summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore44
-rw-r--r--Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads5
-rw-r--r--Documentation/ABI/obsolete/sysfs-gpio2
-rw-r--r--Documentation/ABI/stable/sysfs-bus-vmbus70
-rw-r--r--Documentation/ABI/stable/sysfs-devices2
-rw-r--r--Documentation/ABI/testing/evm48
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio7
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-cros-ec10
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mmc4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power20
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu6
-rw-r--r--Documentation/ABI/testing/sysfs-driver-w1_ds28e1721
-rw-r--r--Documentation/ABI/testing/sysfs-power6
-rw-r--r--Documentation/IPMI.txt4
-rw-r--r--Documentation/Makefile6
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html2
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html9
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html707
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg486
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-registry.svg655
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg700
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg1126
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg1309
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg656
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-2.svg656
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg632
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg5135
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg775
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg1095
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/rcu_node-lock.svg229
-rw-r--r--Documentation/RCU/stallwarn.txt200
-rw-r--r--Documentation/acpi/lpit.txt25
-rw-r--r--Documentation/admin-guide/README.rst2
-rw-r--r--Documentation/admin-guide/bug-hunting.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt93
-rw-r--r--Documentation/admin-guide/reporting-bugs.rst4
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt18
-rw-r--r--Documentation/arm64/elf_hwcaps.txt160
-rw-r--r--Documentation/arm64/memory.txt10
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/arm64/sve.txt508
-rw-r--r--Documentation/block/biodoc.txt11
-rw-r--r--Documentation/block/null_blk.txt19
-rw-r--r--Documentation/cdrom/ide-cd6
-rw-r--r--Documentation/cgroup-v2.txt42
-rw-r--r--Documentation/core-api/kernel-api.rst55
-rw-r--r--Documentation/cpu-freq/cpufreq-stats.txt3
-rw-r--r--Documentation/crypto/api-samples.rst52
-rw-r--r--Documentation/dev-tools/coccinelle.rst2
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kmemcheck.rst733
-rw-r--r--Documentation/dev-tools/kselftest.rst34
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/pmu.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sp810.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/spe-pmu.txt20
-rw-r--r--Documentation/devicetree/bindings/arm/vexpress-sysreg.txt2
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-ceva.txt39
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt2
-rw-r--r--Documentation/devicetree/bindings/ata/imx-sata.txt2
-rw-r--r--Documentation/devicetree/bindings/bus/imx-weim.txt2
-rw-r--r--Documentation/devicetree/bindings/bus/sunxi-rsb.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/arm-syscon-icst.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/clk-exynos-audss.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/clk-s5pv210-audss.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/dove-divider-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/imx1-clock.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/imx6q-clock.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/maxim,max77686.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,clkgen.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi.txt16
-rw-r--r--Documentation/devicetree/bindings/clock/ti,cdce706.txt2
-rw-r--r--Documentation/devicetree/bindings/common-properties.txt26
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt (renamed from Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt)0
-rw-r--r--Documentation/devicetree/bindings/crypto/sun4i-ss.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/sii9234.txt49
-rw-r--r--Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt2
-rw-r--r--Documentation/devicetree/bindings/display/faraday,tve200.txt54
-rw-r--r--Documentation/devicetree/bindings/display/imx/hdmi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt36
-rw-r--r--Documentation/devicetree/bindings/display/msm/edp.txt20
-rw-r--r--Documentation/devicetree/bindings/display/msm/hdmi.txt8
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp5.txt32
-rw-r--r--Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt21
-rw-r--r--Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt49
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt24
-rw-r--r--Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt23
-rw-r--r--Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt8
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt99
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.txt2
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt16
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/sprd-dma.txt41
-rw-r--r--Documentation/devicetree/bindings/dma/stm32-dma.txt6
-rw-r--r--Documentation/devicetree/bindings/dma/stm32-dmamux.txt84
-rw-r--r--Documentation/devicetree/bindings/dma/stm32-mdma.txt94
-rw-r--r--Documentation/devicetree/bindings/dma/sun4i-dma.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/sun6i-dma.txt28
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt6
-rw-r--r--Documentation/devicetree/bindings/dma/zxdma.txt2
-rw-r--r--Documentation/devicetree/bindings/eeprom/eeprom.txt2
-rw-r--r--Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-dsp-keystone.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-max3191x.txt59
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-tz1090.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-uniphier.txt52
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt1
-rw-r--r--Documentation/devicetree/bindings/hsi/omap-ssi.txt13
-rw-r--r--Documentation/devicetree/bindings/hwmon/gpio-fan.txt (renamed from Documentation/devicetree/bindings/gpio/gpio-fan.txt)0
-rw-r--r--Documentation/devicetree/bindings/hwmon/max1619.txt12
-rw-r--r--Documentation/devicetree/bindings/hwmon/max31785.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-aspeed.txt7
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-axxia.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-davinci.txt12
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-gpio.txt32
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mcp320x.txt14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ds4424.txt20
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt34
-rw-r--r--Documentation/devicetree/bindings/iio/health/max30100.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/health/max30102.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt4
-rw-r--r--Documentation/devicetree/bindings/input/gpio-mouse.txt32
-rw-r--r--Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/exc3000.txt27
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.txt3
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/hideep.txt42
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt34
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt36
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt22
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt32
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt2
-rw-r--r--Documentation/devicetree/bindings/iommu/qcom,iommu.txt2
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/register-bit-led.txt16
-rw-r--r--Documentation/devicetree/bindings/mailbox/ti,message-manager.txt2
-rw-r--r--Documentation/devicetree/bindings/marvell.txt4
-rw-r--r--Documentation/devicetree/bindings/media/cec-gpio.txt32
-rw-r--r--Documentation/devicetree/bindings/media/exynos5-gsc.txt9
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imx274.txt33
-rw-r--r--Documentation/devicetree/bindings/media/i2c/nokia,smia.txt2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/tc358743.txt2
-rw-r--r--Documentation/devicetree/bindings/media/img-ir-rev1.txt2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vsp1.txt2
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-rga.txt33
-rw-r--r--Documentation/devicetree/bindings/media/stih-cec.txt2
-rw-r--r--Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt2
-rw-r--r--Documentation/devicetree/bindings/media/sunxi-ir.txt2
-rw-r--r--Documentation/devicetree/bindings/media/tango-ir.txt21
-rw-r--r--Documentation/devicetree/bindings/media/tegra-cec.txt27
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt24
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt40
-rw-r--r--Documentation/devicetree/bindings/mfd/aspeed-scu.txt6
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt16
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt18
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/max77693.txt6
-rw-r--r--Documentation/devicetree/bindings/mfd/max77802.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/mc13xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/mfd.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/sprd,sc27xx-pmic.txt40
-rw-r--r--Documentation/devicetree/bindings/mfd/sun4i-gpadc.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/sun6i-prcm.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt54
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt5
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt18
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-omap.txt16
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-st.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/sunxi-mmc.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt70
-rw-r--r--Documentation/devicetree/bindings/mtd/sunxi-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt4
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/m_can.txt2
-rw-r--r--Documentation/devicetree/bindings/net/can/sun4i_can.txt4
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt5
-rw-r--r--Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/amlogic-meson-mx-efuse.txt22
-rw-r--r--Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/qfprom.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt1
-rw-r--r--Documentation/devicetree/bindings/nvmem/snvs-lpgpr.txt20
-rw-r--r--Documentation/devicetree/bindings/nvmem/uniphier-efuse.txt49
-rw-r--r--Documentation/devicetree/bindings/openrisc/opencores/or1ksim.txt39
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie-ecam.txt42
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt68
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt146
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt10
-rw-r--r--Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt75
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/brcm-sata-phy.txt11
-rw-r--r--Documentation/devicetree/bindings/phy/mxs-usb-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt7
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt (renamed from Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt)59
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpc.txt4
-rw-r--r--Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt2
-rw-r--r--Documentation/devicetree/bindings/power/reset/keystone-reset.txt4
-rw-r--r--Documentation/devicetree/bindings/power/supply/sbs,sbs-manager.txt66
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sun4i.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/da9211.txt82
-rw-r--r--Documentation/devicetree/bindings/regulator/max77686.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/max77802.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt6
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt13
-rw-r--r--Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt2
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx-src.txt6
-rw-r--r--Documentation/devicetree/bindings/reset/ti-syscon-reset.txt2
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.txt162
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt4
-rw-r--r--Documentation/devicetree/bindings/rtc/dallas,ds1339.txt18
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-ds1307.txt44
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-m41t80.txt31
-rw-r--r--Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt13
-rw-r--r--Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/sun6i-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/sunxi-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/atmel-usart.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/omap_serial.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/st,stm32-usart.txt10
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-card.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l56.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,asrc.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,esai.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,spdif.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audmux.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/rt5514.txt13
-rw-r--r--Documentation/devicetree/bindings/sound/rt5663.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt14
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-codec.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sun8i-a33-codec.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/tfa9879.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,arizona.txt53
-rw-r--r--Documentation/devicetree/bindings/sound/zte,zx-spdif.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-davinci.txt10
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rspi.txt5
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sprd-adi.txt58
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sun4i.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sun6i.txt4
-rw-r--r--Documentation/devicetree/bindings/sram/samsung-sram.txt2
-rw-r--r--Documentation/devicetree/bindings/sram/sunxi-sram.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt2
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.txt14
-rw-r--r--Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/am33xx-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/atmel-usb.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/maxim,max3421.txt26
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt20
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.txt19
-rw-r--r--Documentation/devicetree/bindings/usb/ohci-da8xx.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usb3.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ehci.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ohci.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt46
-rw-r--r--Documentation/devicetree/bindings/usb/usb3503.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/usbmisc-imx.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt10
-rw-r--r--Documentation/devicetree/bindings/watchdog/mtk-wdt.txt2
-rw-r--r--Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt2
-rw-r--r--Documentation/devicetree/overlay-notes.txt12
-rw-r--r--Documentation/dmaengine/00-INDEX8
-rw-r--r--Documentation/dmaengine/client.txt222
-rw-r--r--Documentation/dmaengine/provider.txt424
-rw-r--r--Documentation/dmaengine/pxa_dma.txt153
-rw-r--r--Documentation/doc-guide/kernel-doc.rst2
-rw-r--r--Documentation/driver-api/dmaengine/client.rst275
-rw-r--r--Documentation/driver-api/dmaengine/dmatest.rst (renamed from Documentation/dmaengine/dmatest.txt)96
-rw-r--r--Documentation/driver-api/dmaengine/index.rst55
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst508
-rw-r--r--Documentation/driver-api/dmaengine/pxa_dma.rst190
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/pinctl.rst6
-rw-r--r--Documentation/driver-api/pm/devices.rst61
-rw-r--r--Documentation/driver-api/scsi.rst2
-rw-r--r--Documentation/driver-api/usb/usb.rst4
-rw-r--r--Documentation/fault-injection/notifier-error-inject.txt30
-rw-r--r--Documentation/fb/fbcon.txt4
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt2
-rw-r--r--Documentation/filesystems/dnotify.txt2
-rw-r--r--Documentation/filesystems/ext4.txt8
-rw-r--r--Documentation/filesystems/fscrypt.rst610
-rw-r--r--Documentation/filesystems/index.rst11
-rw-r--r--Documentation/filesystems/path-lookup.md6
-rw-r--r--Documentation/filesystems/proc.txt1
-rw-r--r--Documentation/filesystems/udf.txt8
-rw-r--r--Documentation/gpio/consumer.txt63
-rw-r--r--Documentation/gpio/driver.txt6
-rw-r--r--Documentation/gpio/gpio-legacy.txt10
-rw-r--r--Documentation/gpu/drm-uapi.rst55
-rw-r--r--Documentation/gpu/index.rst1
-rw-r--r--Documentation/gpu/todo.rst55
-rw-r--r--Documentation/gpu/tve200.rst6
-rw-r--r--Documentation/hid/hiddev.txt2
-rw-r--r--Documentation/hwmon/max3178551
-rw-r--r--Documentation/hwmon/sht153
-rw-r--r--Documentation/input/devices/xpad.rst3
-rw-r--r--Documentation/kbuild/makefiles.txt1
-rw-r--r--Documentation/kprobes.txt159
-rw-r--r--Documentation/laptops/laptop-mode.txt6
-rw-r--r--Documentation/leds/00-INDEX10
-rw-r--r--Documentation/livepatch/callbacks.txt605
-rw-r--r--Documentation/livepatch/shadow-vars.txt192
-rw-r--r--Documentation/locking/rt-mutex-design.txt2
-rw-r--r--Documentation/md/md-cluster.txt3
-rw-r--r--Documentation/media/cec.h.rst.exceptions2
-rw-r--r--Documentation/media/dvb-drivers/bt8xx.rst8
-rw-r--r--Documentation/media/kapi/cec-core.rst7
-rw-r--r--Documentation/media/kapi/dtv-ca.rst4
-rw-r--r--Documentation/media/kapi/dtv-common.rst55
-rw-r--r--Documentation/media/kapi/dtv-core.rst574
-rw-r--r--Documentation/media/kapi/dtv-demux.rst82
-rw-r--r--Documentation/media/kapi/dtv-frontend.rst443
-rw-r--r--Documentation/media/kapi/dtv-net.rst4
-rw-r--r--Documentation/media/kapi/v4l2-async.rst3
-rw-r--r--Documentation/media/kapi/v4l2-core.rst1
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst22
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-receive.rst10
-rw-r--r--Documentation/media/uapi/dvb/examples.rst378
-rw-r--r--Documentation/media/uapi/dvb/fe-get-property.rst7
-rw-r--r--Documentation/media/uapi/dvb/net-types.rst2
-rw-r--r--Documentation/media/uapi/v4l/dev-sliced-vbi.rst2
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-reserved.rst2
-rw-r--r--Documentation/media/v4l-drivers/bttv.rst2
-rw-r--r--Documentation/media/v4l-drivers/max2175.rst2
-rw-r--r--Documentation/memory-barriers.txt209
-rw-r--r--Documentation/networking/cdc_mbim.txt4
-rw-r--r--Documentation/networking/checksum-offloads.txt2
-rw-r--r--Documentation/networking/gtp.txt103
-rw-r--r--Documentation/networking/netdev-FAQ.txt5
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/openrisc/README (renamed from arch/openrisc/README.openrisc)65
-rw-r--r--Documentation/openrisc/TODO (renamed from arch/openrisc/TODO.openrisc)0
-rw-r--r--Documentation/perf/hisi-pmu.txt53
-rw-r--r--Documentation/pi-futex.txt2
-rw-r--r--Documentation/power/interface.txt5
-rw-r--r--Documentation/power/pci.txt43
-rw-r--r--Documentation/power/pm_qos_interface.txt13
-rw-r--r--Documentation/power/runtime_pm.txt2
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.txt9
-rw-r--r--Documentation/process/3.Early-stage.rst2
-rw-r--r--Documentation/process/4.Coding.rst2
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/kernel-driver-statement.rst199
-rw-r--r--Documentation/process/submitting-drivers.rst2
-rw-r--r--Documentation/process/submitting-patches.rst8
-rw-r--r--Documentation/scsi/scsi-parameters.txt13
-rw-r--r--Documentation/scsi/smartpqi.txt2
-rw-r--r--Documentation/security/LSM.rst2
-rw-r--r--Documentation/security/credentials.rst2
-rw-r--r--Documentation/security/keys/request-key.rst2
-rw-r--r--Documentation/sound/cards/joystick.rst2
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--Documentation/sound/hd-audio/notes.rst2
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst2
-rw-r--r--Documentation/sound/oss/ALS66
-rw-r--r--Documentation/sound/oss/AudioExcelDSP16101
-rw-r--r--Documentation/sound/oss/CMI8330152
-rw-r--r--Documentation/sound/oss/ESS34
-rw-r--r--Documentation/sound/oss/ESS186855
-rw-r--r--Documentation/sound/oss/Introduction459
-rw-r--r--Documentation/sound/oss/MultiSound1137
-rw-r--r--Documentation/sound/oss/OPL36
-rw-r--r--Documentation/sound/oss/Opti218
-rw-r--r--Documentation/sound/oss/PAS16162
-rw-r--r--Documentation/sound/oss/PSS41
-rw-r--r--Documentation/sound/oss/PSS-updates88
-rw-r--r--Documentation/sound/oss/README.OSS1455
-rw-r--r--Documentation/sound/oss/README.modules106
-rw-r--r--Documentation/sound/oss/README.ymfsb107
-rw-r--r--Documentation/sound/oss/SoundPro105
-rw-r--r--Documentation/sound/oss/Soundblaster53
-rw-r--r--Documentation/sound/oss/Tropez+26
-rw-r--r--Documentation/sound/oss/VIBRA1680
-rw-r--r--Documentation/sound/oss/WaveArtist170
-rw-r--r--Documentation/sound/oss/btaudio92
-rw-r--r--Documentation/sound/oss/mwave185
-rw-r--r--Documentation/sound/oss/oss-parameters.txt51
-rw-r--r--Documentation/sound/oss/ultrasound30
-rw-r--r--Documentation/sysctl/README2
-rw-r--r--Documentation/sysctl/fs.txt2
-rw-r--r--Documentation/sysctl/vm.txt25
-rw-r--r--Documentation/timers/highres.txt4
-rw-r--r--Documentation/trace/coresight-cpu-debug.txt22
-rw-r--r--Documentation/trace/ftrace-uses.rst293
-rw-r--r--Documentation/trace/intel_th.txt2
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt12
-rw-r--r--Documentation/translations/zh_CN/gpio.txt6
-rw-r--r--Documentation/usb/gadget-testing.txt2
-rw-r--r--Documentation/vm/mmu_notifier.txt93
-rw-r--r--Documentation/w1/slaves/00-INDEX2
-rw-r--r--Documentation/w1/slaves/w1_ds28e1768
-rw-r--r--Documentation/watchdog/hpwdt.txt2
-rw-r--r--Documentation/watchdog/pcwd-watchdog.txt2
-rw-r--r--Documentation/x86/amd-memory-encryption.txt30
-rw-r--r--Documentation/x86/intel_rdt_ui.txt11
-rw-r--r--Documentation/x86/orc-unwinder.txt2
-rw-r--r--Documentation/x86/x86_64/mm.txt2
-rw-r--r--MAINTAINERS197
-rw-r--r--Makefile14
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/atomic.h13
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/alpha/include/asm/floppy.h2
-rw-r--r--arch/alpha/include/asm/pci.h5
-rw-r--r--arch/alpha/include/asm/rwsem.h21
-rw-r--r--arch/alpha/include/asm/spinlock.h14
-rw-r--r--arch/alpha/kernel/pci.c11
-rw-r--r--arch/alpha/kernel/pci_impl.h8
-rw-r--r--arch/arc/Kconfig6
-rw-r--r--arch/arc/boot/.gitignore1
-rw-r--r--arch/arc/boot/dts/Makefile8
-rw-r--r--arch/arc/include/asm/spinlock.h11
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arm/boot/.gitignore1
-rw-r--r--arch/arm/boot/dts/Makefile6
-rw-r--r--arch/arm/boot/dts/omap3-n9.dts1
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-n950.dts1
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi12
-rw-r--r--arch/arm/boot/dts/stm32f746.dtsi12
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi4
-rw-r--r--arch/arm/common/locomo.c24
-rw-r--r--arch/arm/configs/dove_defconfig2
-rw-r--r--arch/arm/configs/multi_v5_defconfig2
-rw-r--r--arch/arm/configs/orion5x_defconfig2
-rw-r--r--arch/arm/configs/pxa_defconfig3
-rw-r--r--arch/arm/configs/raumfeld_defconfig3
-rw-r--r--arch/arm/include/asm/arch_gicv3.h5
-rw-r--r--arch/arm/include/asm/arch_timer.h1
-rw-r--r--arch/arm/include/asm/dma-iommu.h1
-rw-r--r--arch/arm/include/asm/hardware/locomo.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/pgalloc.h2
-rw-r--r--arch/arm/include/asm/ptrace.h3
-rw-r--r--arch/arm/include/asm/spinlock.h17
-rw-r--r--arch/arm/include/asm/topology.h8
-rw-r--r--arch/arm/mach-bcm/Kconfig6
-rw-r--r--arch/arm/mach-ep93xx/core.c41
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c15
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h4
-rw-r--r--arch/arm/mach-ep93xx/simone.c12
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c12
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c7
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c88
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c17
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c16
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c16
-rw-r--r--arch/arm/mach-ixp4xx/goramo_mlr.c24
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c16
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c16
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c16
-rw-r--r--arch/arm/mach-ks8695/board-acs5k.c15
-rw-r--r--arch/arm/mach-pxa/palmz72.c14
-rw-r--r--arch/arm/mach-pxa/stargate2.c17
-rw-r--r--arch/arm/mach-pxa/viper.c27
-rw-r--r--arch/arm/mach-sa1100/simpad.c14
-rw-r--r--arch/arm/mach-shmobile/pm-rmobile.c8
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c2
-rw-r--r--arch/arm/mm/pgd.c2
-rw-r--r--arch/arm/plat-samsung/Kconfig2
-rw-r--r--arch/arm/probes/kprobes/test-core.c59
-rw-r--r--arch/arm/vdso/vgettimeofday.c2
-rw-r--r--arch/arm64/Kconfig58
-rw-r--r--arch/arm64/Kconfig.platforms3
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/.gitignore1
-rw-r--r--arch/arm64/boot/dts/Makefile58
-rw-r--r--arch/arm64/boot/dts/actions/Makefile4
-rw-r--r--arch/arm64/boot/dts/al/Makefile4
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile4
-rw-r--r--arch/arm64/boot/dts/altera/Makefile4
-rw-r--r--arch/arm64/boot/dts/amd/Makefile4
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile4
-rw-r--r--arch/arm64/boot/dts/apm/Makefile4
-rw-r--r--arch/arm64/boot/dts/arm/Makefile4
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile7
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/Makefile4
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/Makefile4
-rw-r--r--arch/arm64/boot/dts/cavium/Makefile4
-rw-r--r--arch/arm64/boot/dts/exynos/Makefile4
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi31
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi75
-rw-r--r--arch/arm64/boot/dts/hisilicon/Makefile4
-rw-r--r--arch/arm64/boot/dts/lg/Makefile4
-rw-r--r--arch/arm64/boot/dts/marvell/Makefile4
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi12
-rw-r--r--arch/arm64/boot/dts/nvidia/Makefile3
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile4
-rw-r--r--arch/arm64/boot/dts/realtek/Makefile4
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile3
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile4
-rw-r--r--arch/arm64/boot/dts/socionext/Makefile3
-rw-r--r--arch/arm64/boot/dts/sprd/Makefile4
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile4
-rw-r--r--arch/arm64/boot/dts/zte/Makefile4
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h12
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h5
-rw-r--r--arch/arm64/include/asm/arch_timer.h1
-rw-r--r--arch/arm64/include/asm/asm-bug.h8
-rw-r--r--arch/arm64/include/asm/assembler.h51
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cpu.h4
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h42
-rw-r--r--arch/arm64/include/asm/daifflags.h72
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/esr.h3
-rw-r--r--arch/arm64/include/asm/fixmap.h7
-rw-r--r--arch/arm64/include/asm/fpsimd.h71
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h148
-rw-r--r--arch/arm64/include/asm/irqflags.h40
-rw-r--r--arch/arm64/include/asm/kvm_arm.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h11
-rw-r--r--arch/arm64/include/asm/memory.h15
-rw-r--r--arch/arm64/include/asm/pgalloc.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h14
-rw-r--r--arch/arm64/include/asm/processor.h28
-rw-r--r--arch/arm64/include/asm/spinlock.h173
-rw-r--r--arch/arm64/include/asm/spinlock_types.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h121
-rw-r--r--arch/arm64/include/asm/thread_info.h5
-rw-r--r--arch/arm64/include/asm/topology.h8
-rw-r--r--arch/arm64/include/asm/traps.h8
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h6
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h139
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h120
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c23
-rw-r--r--arch/arm64/kernel/cpufeature.c204
-rw-r--r--arch/arm64/kernel/cpuinfo.c12
-rw-r--r--arch/arm64/kernel/debug-monitors.c5
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S17
-rw-r--r--arch/arm64/kernel/entry-ftrace.S12
-rw-r--r--arch/arm64/kernel/entry.S128
-rw-r--r--arch/arm64/kernel/fpsimd.c908
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/kernel/hibernate.c5
-rw-r--r--arch/arm64/kernel/io.c12
-rw-r--r--arch/arm64/kernel/machine_kexec.c4
-rw-r--r--arch/arm64/kernel/process.c64
-rw-r--r--arch/arm64/kernel/ptrace.c280
-rw-r--r--arch/arm64/kernel/setup.c15
-rw-r--r--arch/arm64/kernel/signal.c179
-rw-r--r--arch/arm64/kernel/signal32.c2
-rw-r--r--arch/arm64/kernel/smp.c18
-rw-r--r--arch/arm64/kernel/suspend.c8
-rw-r--r--arch/arm64/kernel/traps.c109
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S2
-rw-r--r--arch/arm64/kvm/handle_exit.c8
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c24
-rw-r--r--arch/arm64/kvm/hyp/switch.c12
-rw-r--r--arch/arm64/kvm/sys_regs.c292
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/delay.c23
-rw-r--r--arch/arm64/lib/tishift.S80
-rw-r--r--arch/arm64/mm/dma-mapping.c5
-rw-r--r--arch/arm64/mm/fault.c72
-rw-r--r--arch/arm64/mm/kasan_init.c130
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/arm64/mm/proc.S9
-rw-r--r--arch/blackfin/Kconfig7
-rw-r--r--arch/blackfin/Kconfig.debug1
-rw-r--r--arch/blackfin/include/asm/gpio.h20
-rw-r--r--arch/blackfin/include/asm/spinlock.h20
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c3
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c19
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c18
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c18
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c18
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/blackfin/mach-common/pm.c2
-rw-r--r--arch/c6x/boot/dts/Makefile2
-rw-r--r--arch/cris/boot/dts/Makefile2
-rw-r--r--arch/cris/include/asm/dma-mapping.h6
-rw-r--r--arch/cris/include/asm/pci.h9
-rw-r--r--arch/frv/include/asm/dma-mapping.h7
-rw-r--r--arch/frv/include/asm/pci.h4
-rw-r--r--arch/frv/mm/init.c14
-rw-r--r--arch/h8300/boot/dts/Makefile6
-rw-r--r--arch/h8300/mm/init.c13
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h3
-rw-r--r--arch/hexagon/include/asm/spinlock.h15
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h11
-rw-r--r--arch/ia64/include/asm/pci.h4
-rw-r--r--arch/ia64/include/asm/rwsem.h25
-rw-r--r--arch/ia64/include/asm/spinlock.h20
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/fsys.S8
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h10
-rw-r--r--arch/ia64/kernel/time.c40
-rw-r--r--arch/m32r/Kconfig4
-rw-r--r--arch/m32r/include/asm/dma-mapping.h5
-rw-r--r--arch/m32r/include/asm/spinlock.h20
-rw-r--r--arch/m68k/Kconfig.cpu2
-rw-r--r--arch/m68k/Kconfig.machine6
-rw-r--r--arch/m68k/coldfire/Makefile3
-rw-r--r--arch/m68k/coldfire/m5441x.c3
-rw-r--r--arch/m68k/coldfire/m54xx.c4
-rw-r--r--arch/m68k/coldfire/stmark2.c119
-rw-r--r--arch/m68k/configs/amiga_defconfig6
-rw-r--r--arch/m68k/configs/apollo_defconfig6
-rw-r--r--arch/m68k/configs/atari_defconfig6
-rw-r--r--arch/m68k/configs/bvme6000_defconfig6
-rw-r--r--arch/m68k/configs/hp300_defconfig6
-rw-r--r--arch/m68k/configs/mac_defconfig6
-rw-r--r--arch/m68k/configs/multi_defconfig6
-rw-r--r--arch/m68k/configs/mvme147_defconfig6
-rw-r--r--arch/m68k/configs/mvme16x_defconfig6
-rw-r--r--arch/m68k/configs/q40_defconfig6
-rw-r--r--arch/m68k/configs/stmark2_defconfig92
-rw-r--r--arch/m68k/configs/sun3_defconfig6
-rw-r--r--arch/m68k/configs/sun3x_defconfig6
-rw-r--r--arch/m68k/include/asm/dma-mapping.h6
-rw-r--r--arch/m68k/include/asm/m5441xsim.h6
-rw-r--r--arch/m68k/include/asm/mac_iop.h1
-rw-r--r--arch/m68k/include/asm/mcfmmu.h1
-rw-r--r--arch/m68k/include/asm/mmu_context.h1
-rw-r--r--arch/m68k/kernel/setup.c5
-rw-r--r--arch/m68k/kernel/setup_mm.c6
-rw-r--r--arch/m68k/mac/baboon.c2
-rw-r--r--arch/m68k/mac/config.c2
-rw-r--r--arch/m68k/mac/iop.c13
-rw-r--r--arch/m68k/mac/oss.c16
-rw-r--r--arch/m68k/mac/psc.c6
-rw-r--r--arch/m68k/mac/via.c53
-rw-r--r--arch/m68k/mm/mcfmmu.c4
-rw-r--r--arch/metag/boot/.gitignore1
-rw-r--r--arch/metag/boot/dts/Makefile6
-rw-r--r--arch/metag/include/asm/dma-mapping.h10
-rw-r--r--arch/metag/include/asm/spinlock.h9
-rw-r--r--arch/metag/include/asm/spinlock_lnkget.h37
-rw-r--r--arch/metag/include/asm/spinlock_lock1.h20
-rw-r--r--arch/microblaze/boot/.gitignore1
-rw-r--r--arch/microblaze/boot/Makefile2
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h39
-rw-r--r--arch/microblaze/kernel/dma.c17
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig42
-rw-r--r--arch/mips/Makefile6
-rw-r--r--arch/mips/alchemy/board-gpr.c23
-rw-r--r--arch/mips/alchemy/common/clock.c10
-rw-r--r--arch/mips/ath79/mach-pb44.c16
-rw-r--r--arch/mips/bcm47xx/leds.c2
-rw-r--r--arch/mips/bcm63xx/clk.c242
-rw-r--r--arch/mips/boot/.gitignore1
-rw-r--r--arch/mips/boot/dts/Makefile33
-rw-r--r--arch/mips/boot/dts/brcm/Makefile4
-rw-r--r--arch/mips/boot/dts/brcm/bcm3368.dtsi2
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268.dtsi2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6328.dtsi2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358.dtsi2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6362.dtsi2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6368.dtsi2
-rw-r--r--arch/mips/boot/dts/cavium-octeon/Makefile3
-rw-r--r--arch/mips/boot/dts/img/Makefile3
-rw-r--r--arch/mips/boot/dts/img/pistachio.dtsi1
-rw-r--r--arch/mips/boot/dts/ingenic/Makefile3
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi5
-rw-r--r--arch/mips/boot/dts/lantiq/Makefile3
-rw-r--r--arch/mips/boot/dts/mti/Makefile3
-rw-r--r--arch/mips/boot/dts/netlogic/Makefile3
-rw-r--r--arch/mips/boot/dts/ni/Makefile3
-rw-r--r--arch/mips/boot/dts/pic32/Makefile3
-rw-r--r--arch/mips/boot/dts/qca/Makefile3
-rw-r--r--arch/mips/boot/dts/ralink/Makefile3
-rw-r--r--arch/mips/boot/dts/ralink/rt3052_eval.dts2
-rw-r--r--arch/mips/boot/dts/xilfpga/Makefile5
-rw-r--r--arch/mips/boot/dts/xilfpga/nexys4ddr.dts8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-spi.c10
-rw-r--r--arch/mips/configs/ci20_defconfig7
-rw-r--r--arch/mips/configs/db1xxx_defconfig1
-rw-r--r--arch/mips/configs/generic/board-xilfpga.config22
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/xilfpga_defconfig75
-rw-r--r--arch/mips/generic/Kconfig6
-rw-r--r--arch/mips/generic/board-xilfpga.its.S22
-rw-r--r--arch/mips/include/asm/asmmacro.h16
-rw-r--r--arch/mips/include/asm/bitops.h1
-rw-r--r--arch/mips/include/asm/cmpxchg.h2
-rw-r--r--arch/mips/include/asm/dma-mapping.h3
-rw-r--r--arch/mips/include/asm/mipsregs.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h15
-rw-r--r--arch/mips/include/asm/page.h4
-rw-r--r--arch/mips/include/asm/pci.h4
-rw-r--r--arch/mips/include/asm/pgtable-64.h8
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/spinlock.h7
-rw-r--r--arch/mips/include/asm/syscall.h29
-rw-r--r--arch/mips/include/asm/vdso.h2
-rw-r--r--arch/mips/kernel/pm-cps.c2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/ptrace.c41
-rw-r--r--arch/mips/kernel/ptrace32.c7
-rw-r--r--arch/mips/kernel/r4k_fpu.S20
-rw-r--r--arch/mips/kernel/setup.c4
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c6
-rw-r--r--arch/mips/lasat/picvue_proc.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c46
-rw-r--r--arch/mips/math-emu/dp_maddf.c8
-rw-r--r--arch/mips/math-emu/dp_mul.c8
-rw-r--r--arch/mips/math-emu/dp_sqrt.c4
-rw-r--r--arch/mips/math-emu/ieee754.h15
-rw-r--r--arch/mips/math-emu/ieee754int.h6
-rw-r--r--arch/mips/math-emu/ieee754sp.c4
-rw-r--r--arch/mips/math-emu/ieee754sp.h2
-rw-r--r--arch/mips/math-emu/sp_div.c4
-rw-r--r--arch/mips/math-emu/sp_fint.c2
-rw-r--r--arch/mips/math-emu/sp_maddf.c6
-rw-r--r--arch/mips/math-emu/sp_mul.c10
-rw-r--r--arch/mips/mm/dma-default.c9
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/pci/pci-mt7620.c15
-rw-r--r--arch/mips/pci/pcie-octeon.c12
-rw-r--r--arch/mips/ralink/Kconfig1
-rw-r--r--arch/mips/ralink/mt7620.c4
-rw-r--r--arch/mips/xilfpga/Kconfig10
-rw-r--r--arch/mips/xilfpga/Makefile7
-rw-r--r--arch/mips/xilfpga/Platform3
-rw-r--r--arch/mips/xilfpga/init.c44
-rw-r--r--arch/mips/xilfpga/intc.c22
-rw-r--r--arch/mips/xilfpga/time.c41
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h10
-rw-r--r--arch/mn10300/include/asm/pci.h4
-rw-r--r--arch/mn10300/include/asm/spinlock.h16
-rw-r--r--arch/mn10300/kernel/head.S8
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c4
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.h3
-rw-r--r--arch/nios2/boot/.gitignore1
-rw-r--r--arch/nios2/boot/Makefile2
-rw-r--r--arch/nios2/include/asm/dma-mapping.h9
-rw-r--r--arch/openrisc/Kconfig49
-rw-r--r--arch/openrisc/Makefile1
-rw-r--r--arch/openrisc/boot/dts/Makefile2
-rw-r--r--arch/openrisc/boot/dts/or1ksim.dts7
-rw-r--r--arch/openrisc/boot/dts/simple_smp.dts63
-rw-r--r--arch/openrisc/configs/simple_smp_defconfig66
-rw-r--r--arch/openrisc/include/asm/Kbuild5
-rw-r--r--arch/openrisc/include/asm/cacheflush.h96
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h147
-rw-r--r--arch/openrisc/include/asm/cpuinfo.h7
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h1
-rw-r--r--arch/openrisc/include/asm/mmu_context.h2
-rw-r--r--arch/openrisc/include/asm/pgtable.h18
-rw-r--r--arch/openrisc/include/asm/serial.h2
-rw-r--r--arch/openrisc/include/asm/smp.h26
-rw-r--r--arch/openrisc/include/asm/spinlock.h12
-rw-r--r--arch/openrisc/include/asm/spinlock_types.h7
-rw-r--r--arch/openrisc/include/asm/spr_defs.h14
-rw-r--r--arch/openrisc/include/asm/thread_info.h2
-rw-r--r--arch/openrisc/include/asm/time.h23
-rw-r--r--arch/openrisc/include/asm/tlbflush.h25
-rw-r--r--arch/openrisc/include/asm/unwinder.h20
-rw-r--r--arch/openrisc/kernel/Makefile4
-rw-r--r--arch/openrisc/kernel/dma.c14
-rw-r--r--arch/openrisc/kernel/entry.S74
-rw-r--r--arch/openrisc/kernel/head.S239
-rw-r--r--arch/openrisc/kernel/setup.c165
-rw-r--r--arch/openrisc/kernel/smp.c259
-rw-r--r--arch/openrisc/kernel/stacktrace.c86
-rw-r--r--arch/openrisc/kernel/sync-timer.c120
-rw-r--r--arch/openrisc/kernel/time.c66
-rw-r--r--arch/openrisc/kernel/traps.c54
-rw-r--r--arch/openrisc/kernel/unwinder.c105
-rw-r--r--arch/openrisc/lib/delay.c2
-rw-r--r--arch/openrisc/mm/Makefile2
-rw-r--r--arch/openrisc/mm/cache.c61
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c2
-rw-r--r--arch/openrisc/mm/tlb.c16
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/parisc/include/asm/dma-mapping.h8
-rw-r--r--arch/parisc/include/asm/pci.h8
-rw-r--r--arch/parisc/include/asm/spinlock.h22
-rw-r--r--arch/parisc/kernel/pci-dma.c8
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h7
-rw-r--r--arch/powerpc/include/asm/floppy.h2
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h7
-rw-r--r--arch/powerpc/kernel/eeh_driver.c4
-rw-r--r--arch/powerpc/kernel/machine_kexec_file_64.c12
-rw-r--r--arch/powerpc/kernel/pci-common.c12
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c24
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c2
-rw-r--r--arch/riscv/Kconfig310
-rw-r--r--arch/riscv/Makefile72
-rw-r--r--arch/riscv/configs/defconfig0
-rw-r--r--arch/riscv/include/asm/Kbuild61
-rw-r--r--arch/riscv/include/asm/asm-offsets.h1
-rw-r--r--arch/riscv/include/asm/asm.h76
-rw-r--r--arch/riscv/include/asm/atomic.h375
-rw-r--r--arch/riscv/include/asm/barrier.h68
-rw-r--r--arch/riscv/include/asm/bitops.h218
-rw-r--r--arch/riscv/include/asm/bug.h88
-rw-r--r--arch/riscv/include/asm/cache.h22
-rw-r--r--arch/riscv/include/asm/cacheflush.h39
-rw-r--r--arch/riscv/include/asm/cmpxchg.h134
-rw-r--r--arch/riscv/include/asm/compat.h29
-rw-r--r--arch/riscv/include/asm/csr.h132
-rw-r--r--arch/riscv/include/asm/current.h45
-rw-r--r--arch/riscv/include/asm/delay.h28
-rw-r--r--arch/riscv/include/asm/dma-mapping.h38
-rw-r--r--arch/riscv/include/asm/elf.h84
-rw-r--r--arch/riscv/include/asm/hwcap.h37
-rw-r--r--arch/riscv/include/asm/io.h303
-rw-r--r--arch/riscv/include/asm/irq.h28
-rw-r--r--arch/riscv/include/asm/irqflags.h63
-rw-r--r--arch/riscv/include/asm/kprobes.h22
-rw-r--r--arch/riscv/include/asm/linkage.h20
-rw-r--r--arch/riscv/include/asm/mmu.h26
-rw-r--r--arch/riscv/include/asm/mmu_context.h69
-rw-r--r--arch/riscv/include/asm/page.h130
-rw-r--r--arch/riscv/include/asm/pci.h48
-rw-r--r--arch/riscv/include/asm/pgalloc.h124
-rw-r--r--arch/riscv/include/asm/pgtable-32.h25
-rw-r--r--arch/riscv/include/asm/pgtable-64.h84
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h48
-rw-r--r--arch/riscv/include/asm/pgtable.h430
-rw-r--r--arch/riscv/include/asm/processor.h97
-rw-r--r--arch/riscv/include/asm/ptrace.h118
-rw-r--r--arch/riscv/include/asm/sbi.h100
-rw-r--r--arch/riscv/include/asm/smp.h52
-rw-r--r--arch/riscv/include/asm/spinlock.h151
-rw-r--r--arch/riscv/include/asm/spinlock_types.h33
-rw-r--r--arch/riscv/include/asm/string.h26
-rw-r--r--arch/riscv/include/asm/switch_to.h69
-rw-r--r--arch/riscv/include/asm/syscall.h102
-rw-r--r--arch/riscv/include/asm/thread_info.h94
-rw-r--r--arch/riscv/include/asm/timex.h59
-rw-r--r--arch/riscv/include/asm/tlb.h24
-rw-r--r--arch/riscv/include/asm/tlbflush.h64
-rw-r--r--arch/riscv/include/asm/uaccess.h513
-rw-r--r--arch/riscv/include/asm/unistd.h16
-rw-r--r--arch/riscv/include/asm/vdso.h41
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h55
-rw-r--r--arch/riscv/include/uapi/asm/Kbuild27
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h (renamed from drivers/staging/ccree/dx_reg_base_host.h)17
-rw-r--r--arch/riscv/include/uapi/asm/bitsperlong.h25
-rw-r--r--arch/riscv/include/uapi/asm/byteorder.h23
-rw-r--r--arch/riscv/include/uapi/asm/elf.h83
-rw-r--r--arch/riscv/include/uapi/asm/hwcap.h36
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h90
-rw-r--r--arch/riscv/include/uapi/asm/sigcontext.h30
-rw-r--r--arch/riscv/include/uapi/asm/siginfo.h24
-rw-r--r--arch/riscv/include/uapi/asm/ucontext.h45
-rw-r--r--arch/riscv/kernel/.gitignore1
-rw-r--r--arch/riscv/kernel/Makefile33
-rw-r--r--arch/riscv/kernel/asm-offsets.c322
-rw-r--r--arch/riscv/kernel/cacheinfo.c105
-rw-r--r--arch/riscv/kernel/cpu.c108
-rw-r--r--arch/riscv/kernel/cpufeature.c61
-rw-r--r--arch/riscv/kernel/entry.S464
-rw-r--r--arch/riscv/kernel/head.S157
-rw-r--r--arch/riscv/kernel/irq.c39
-rw-r--r--arch/riscv/kernel/module.c217
-rw-r--r--arch/riscv/kernel/process.c129
-rw-r--r--arch/riscv/kernel/ptrace.c125
-rw-r--r--arch/riscv/kernel/reset.c36
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c15
-rw-r--r--arch/riscv/kernel/setup.c257
-rw-r--r--arch/riscv/kernel/signal.c292
-rw-r--r--arch/riscv/kernel/smp.c110
-rw-r--r--arch/riscv/kernel/smpboot.c114
-rw-r--r--arch/riscv/kernel/stacktrace.c177
-rw-r--r--arch/riscv/kernel/sys_riscv.c49
-rw-r--r--arch/riscv/kernel/syscall_table.c25
-rw-r--r--arch/riscv/kernel/time.c61
-rw-r--r--arch/riscv/kernel/traps.c180
-rw-r--r--arch/riscv/kernel/vdso.c125
-rw-r--r--arch/riscv/kernel/vdso/.gitignore2
-rw-r--r--arch/riscv/kernel/vdso/Makefile63
-rw-r--r--arch/riscv/kernel/vdso/rt_sigreturn.S24
-rw-r--r--arch/riscv/kernel/vdso/vdso.S27
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S77
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S92
-rw-r--r--arch/riscv/lib/Makefile6
-rw-r--r--arch/riscv/lib/delay.c110
-rw-r--r--arch/riscv/lib/memcpy.S115
-rw-r--r--arch/riscv/lib/memset.S120
-rw-r--r--arch/riscv/lib/uaccess.S117
-rw-r--r--arch/riscv/lib/udivdi3.S38
-rw-r--r--arch/riscv/mm/Makefile4
-rw-r--r--arch/riscv/mm/extable.c37
-rw-r--r--arch/riscv/mm/fault.c282
-rw-r--r--arch/riscv/mm/init.c70
-rw-r--r--arch/riscv/mm/ioremap.c92
-rw-r--r--arch/s390/Kconfig43
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/boot/compressed/Makefile2
-rw-r--r--arch/s390/boot/compressed/misc.c2
-rw-r--r--arch/s390/configs/default_defconfig11
-rw-r--r--arch/s390/configs/gcov_defconfig9
-rw-r--r--arch/s390/configs/performance_defconfig9
-rw-r--r--arch/s390/crypto/aes_s390.c296
-rw-r--r--arch/s390/defconfig3
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/alternative.h163
-rw-r--r--arch/s390/include/asm/archrandom.h26
-rw-r--r--arch/s390/include/asm/atomic_ops.h32
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/include/asm/cpacf.h52
-rw-r--r--arch/s390/include/asm/ctl_reg.h32
-rw-r--r--arch/s390/include/asm/debug.h190
-rw-r--r--arch/s390/include/asm/dis.h28
-rw-r--r--arch/s390/include/asm/dma-mapping.h5
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/kprobes.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/lowcore.h5
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/nmi.h19
-rw-r--r--arch/s390/include/asm/pci_debug.h6
-rw-r--r--arch/s390/include/asm/pci_insn.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h18
-rw-r--r--arch/s390/include/asm/processor.h8
-rw-r--r--arch/s390/include/asm/runtime_instr.h86
-rw-r--r--arch/s390/include/asm/rwsem.h211
-rw-r--r--arch/s390/include/asm/sections.h2
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/smp.h5
-rw-r--r--arch/s390/include/asm/spinlock.h179
-rw-r--r--arch/s390/include/asm/spinlock_types.h4
-rw-r--r--arch/s390/include/asm/string.h46
-rw-r--r--arch/s390/include/asm/switch_to.h2
-rw-r--r--arch/s390/include/asm/sysinfo.h4
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/include/asm/vdso.h1
-rw-r--r--arch/s390/include/uapi/asm/kvm_virtio.h65
-rw-r--r--arch/s390/include/uapi/asm/sthyi.h6
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/Makefile5
-rw-r--r--arch/s390/kernel/alternative.c110
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/debug.c916
-rw-r--r--arch/s390/kernel/dis.c2039
-rw-r--r--arch/s390/kernel/early.c145
-rw-r--r--arch/s390/kernel/entry.S60
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/guarded_storage.c7
-rw-r--r--arch/s390/kernel/ipl.c36
-rw-r--r--arch/s390/kernel/kprobes.c7
-rw-r--r--arch/s390/kernel/machine_kexec.c22
-rw-r--r--arch/s390/kernel/module.c17
-rw-r--r--arch/s390/kernel/nmi.c203
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c278
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c6
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/ptrace.c172
-rw-r--r--arch/s390/kernel/relocate_kernel.S3
-rw-r--r--arch/s390/kernel/runtime_instr.c42
-rw-r--r--arch/s390/kernel/setup.c21
-rw-r--r--arch/s390/kernel/smp.c87
-rw-r--r--arch/s390/kernel/sthyi.c (renamed from arch/s390/kvm/sthyi.c)172
-rw-r--r--arch/s390/kernel/suspend.c8
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/topology.c43
-rw-r--r--arch/s390/kernel/vdso.c20
-rw-r--r--arch/s390/kernel/vmlinux.lds.S28
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/intercept.c56
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/kvm/kvm-s390.h5
-rw-r--r--arch/s390/lib/mem.S64
-rw-r--r--arch/s390/lib/spinlock.c343
-rw-r--r--arch/s390/lib/string.c28
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/pgalloc.c14
-rw-r--r--arch/s390/mm/vmem.c16
-rw-r--r--arch/s390/net/bpf_jit.h7
-rw-r--r--arch/s390/net/bpf_jit_comp.c26
-rw-r--r--arch/s390/pci/pci.c5
-rw-r--r--arch/s390/pci/pci_insn.c6
-rw-r--r--arch/s390/tools/Makefile10
-rw-r--r--arch/s390/tools/gen_opcode_table.c336
-rw-r--r--arch/s390/tools/opcodes.txt1183
-rw-r--r--arch/sh/boot/dts/Makefile2
-rw-r--r--arch/sh/include/asm/dma-mapping.h7
-rw-r--r--arch/sh/include/asm/pci.h4
-rw-r--r--arch/sh/include/asm/spinlock-cas.h20
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h20
-rw-r--r--arch/sh/kernel/dma-nommu.c17
-rw-r--r--arch/sh/kernel/dwarf.c4
-rw-r--r--arch/sh/kernel/head_64.S8
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/mm/consistent.c6
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/sparc/include/asm/floppy_32.h1
-rw-r--r--arch/sparc/include/asm/floppy_64.h1
-rw-r--r--arch/sparc/include/asm/pci_32.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h30
-rw-r--r--arch/sparc/include/asm/ptrace.h1
-rw-r--r--arch/sparc/include/asm/spinlock_32.h11
-rw-r--r--arch/sparc/include/asm/spinlock_64.h7
-rw-r--r--arch/sparc/mm/hugetlbpage.c3
-rw-r--r--arch/sparc/mm/init_64.c38
-rw-r--r--arch/tile/gxio/dma_queue.c4
-rw-r--r--arch/tile/include/asm/dma-mapping.h9
-rw-r--r--arch/tile/include/asm/spinlock_32.h22
-rw-r--r--arch/tile/include/asm/spinlock_64.h24
-rw-r--r--arch/tile/include/gxio/dma_queue.h2
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/tile/mm/homecache.c2
-rw-r--r--arch/um/include/shared/init.h2
-rw-r--r--arch/um/kernel/mem.c3
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/include/asm/cacheflush.h9
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h22
-rw-r--r--arch/unicore32/include/asm/pgalloc.h2
-rw-r--r--arch/unicore32/mm/pgd.c2
-rw-r--r--arch/unicore32/mm/proc-syms.c3
-rw-r--r--arch/x86/Kconfig22
-rw-r--r--arch/x86/Kconfig.debug39
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/boot/.gitignore3
-rw-r--r--arch/x86/boot/Makefile59
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/head_64.S16
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S120
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/boot/compressed/pagetable.c8
-rw-r--r--arch/x86/boot/genimage.sh124
-rw-r--r--arch/x86/boot/header.S1
-rw-r--r--arch/x86/boot/video-vga.c6
-rw-r--r--arch/x86/configs/tiny.config4
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c10
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S17
-rw-r--r--arch/x86/entry/calling.h69
-rw-r--r--arch/x86/entry/common.c6
-rw-r--r--arch/x86/entry/entry_64.S141
-rw-r--r--arch/x86/entry/entry_64_compat.S3
-rw-r--r--arch/x86/entry/syscalls/Makefile4
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/entry/vdso/vdso2c.c3
-rw-r--r--arch/x86/entry/vdso/vma.c5
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c4
-rw-r--r--arch/x86/events/perf_event.h24
-rw-r--r--arch/x86/hyperv/hv_init.c17
-rw-r--r--arch/x86/include/asm/apic.h270
-rw-r--r--arch/x86/include/asm/archrandom.h8
-rw-r--r--arch/x86/include/asm/barrier.h12
-rw-r--r--arch/x86/include/asm/bitops.h10
-rw-r--r--arch/x86/include/asm/compat.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h9
-rw-r--r--arch/x86/include/asm/cpufeatures.h537
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/include/asm/disabled-features.h8
-rw-r--r--arch/x86/include/asm/dma-mapping.h8
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/fixmap.h6
-rw-r--r--arch/x86/include/asm/hw_irq.h6
-rw-r--r--arch/x86/include/asm/hypervisor.h46
-rw-r--r--arch/x86/include/asm/inat.h10
-rw-r--r--arch/x86/include/asm/insn-eval.h23
-rw-r--r--arch/x86/include/asm/io.h43
-rw-r--r--arch/x86/include/asm/io_apic.h2
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_vectors.h8
-rw-r--r--arch/x86/include/asm/irqdomain.h5
-rw-r--r--arch/x86/include/asm/kmemcheck.h42
-rw-r--r--arch/x86/include/asm/kprobes.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/mem_encrypt.h14
-rw-r--r--arch/x86/include/asm/mmu_context.h4
-rw-r--r--arch/x86/include/asm/module.h2
-rw-r--r--arch/x86/include/asm/mpspec_def.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h2
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/pci.h2
-rw-r--r--arch/x86/include/asm/percpu.h2
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h16
-rw-r--r--arch/x86/include/asm/processor.h52
-rw-r--r--arch/x86/include/asm/ptrace.h6
-rw-r--r--arch/x86/include/asm/qspinlock.h11
-rw-r--r--arch/x86/include/asm/refcount.h2
-rw-r--r--arch/x86/include/asm/rmwcc.h2
-rw-r--r--arch/x86/include/asm/rwsem.h84
-rw-r--r--arch/x86/include/asm/spinlock.h7
-rw-r--r--arch/x86/include/asm/string_32.h9
-rw-r--r--arch/x86/include/asm/string_64.h8
-rw-r--r--arch/x86/include/asm/switch_to.h24
-rw-r--r--arch/x86/include/asm/syscalls.h2
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/include/asm/trace/fpu.h10
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h248
-rw-r--r--arch/x86/include/asm/traps.h20
-rw-r--r--arch/x86/include/asm/tsc.h7
-rw-r--r--arch/x86/include/asm/umip.h12
-rw-r--r--arch/x86/include/asm/unwind.h8
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h23
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/include/asm/x2apic.h50
-rw-r--r--arch/x86/include/asm/x86_init.h29
-rw-r--r--arch/x86/include/asm/xor.h5
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/include/uapi/asm/processor-flags.h5
-rw-r--r--arch/x86/kernel/Makefile11
-rw-r--r--arch/x86/kernel/acpi/apei.c5
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/alternative.c26
-rw-r--r--arch/x86/kernel/apic/Makefile2
-rw-r--r--arch/x86/kernel/apic/apic.c243
-rw-r--r--arch/x86/kernel/apic/apic_common.c46
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c10
-rw-r--r--arch/x86/kernel/apic/apic_noop.c25
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c12
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c8
-rw-r--r--arch/x86/kernel/apic/htirq.c5
-rw-r--r--arch/x86/kernel/apic/io_apic.c139
-rw-r--r--arch/x86/kernel/apic/probe_32.c29
-rw-r--r--arch/x86/kernel/apic/vector.c1099
-rw-r--r--arch/x86/kernel/apic/x2apic.h9
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c196
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c44
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c65
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c11
-rw-r--r--arch/x86/kernel/cpu/common.c58
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c121
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c64
-rw-r--r--arch/x86/kernel/cpu/intel.c15
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c1
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h7
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c50
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_monitor.c2
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c131
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c9
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c13
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c6
-rw-r--r--arch/x86/kernel/cpu/vmware.c8
-rw-r--r--arch/x86/kernel/crash.c18
-rw-r--r--arch/x86/kernel/espfix_64.c8
-rw-r--r--arch/x86/kernel/fpu/init.c11
-rw-r--r--arch/x86/kernel/fpu/xstate.c43
-rw-r--r--arch/x86/kernel/head_32.S5
-rw-r--r--arch/x86/kernel/head_64.S45
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/idt.c14
-rw-r--r--arch/x86/kernel/irq.c101
-rw-r--r--arch/x86/kernel/irqinit.c4
-rw-r--r--arch/x86/kernel/kprobes/common.h6
-rw-r--r--arch/x86/kernel/kprobes/core.c61
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c32
-rw-r--r--arch/x86/kernel/kprobes/opt.c79
-rw-r--r--arch/x86/kernel/kvm.c49
-rw-r--r--arch/x86/kernel/kvmclock.c65
-rw-r--r--arch/x86/kernel/ldt.c18
-rw-r--r--arch/x86/kernel/nmi.c2
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/pmem.c2
-rw-r--r--arch/x86/kernel/process.c8
-rw-r--r--arch/x86/kernel/process_32.c6
-rw-r--r--arch/x86/kernel/process_64.c5
-rw-r--r--arch/x86/kernel/setup.c51
-rw-r--r--arch/x86/kernel/smpboot.c120
-rw-r--r--arch/x86/kernel/stacktrace.c10
-rw-r--r--arch/x86/kernel/time.c5
-rw-r--r--arch/x86/kernel/traps.c26
-rw-r--r--arch/x86/kernel/tsc.c47
-rw-r--r--arch/x86/kernel/tsc_sync.c56
-rw-r--r--arch/x86/kernel/umip.c366
-rw-r--r--arch/x86/kernel/unwind_orc.c2
-rw-r--r--arch/x86/kernel/uprobes.c15
-rw-r--r--arch/x86/kernel/verify_cpu.S3
-rw-r--r--arch/x86/kernel/vm86_32.c20
-rw-r--r--arch/x86/kernel/vsmp_64.c19
-rw-r--r--arch/x86/kernel/x86_init.c11
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/page_track.c2
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/insn-eval.c1364
-rw-r--r--arch/x86/lib/rwsem.S12
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/extable.c7
-rw-r--r--arch/x86/mm/fault.c94
-rw-r--r--arch/x86/mm/init.c10
-rw-r--r--arch/x86/mm/init_64.c23
-rw-r--r--arch/x86/mm/ioremap.c123
-rw-r--r--arch/x86/mm/kasan_init_64.c244
-rw-r--r--arch/x86/mm/kmemcheck/Makefile1
-rw-r--r--arch/x86/mm/kmemcheck/error.c227
-rw-r--r--arch/x86/mm/kmemcheck/error.h15
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c658
-rw-r--r--arch/x86/mm/kmemcheck/opcode.c106
-rw-r--r--arch/x86/mm/kmemcheck/opcode.h9
-rw-r--r--arch/x86/mm/kmemcheck/pte.c22
-rw-r--r--arch/x86/mm/kmemcheck/pte.h10
-rw-r--r--arch/x86/mm/kmemcheck/selftest.c70
-rw-r--r--arch/x86/mm/kmemcheck/selftest.h6
-rw-r--r--arch/x86/mm/kmemcheck/shadow.c173
-rw-r--r--arch/x86/mm/kmemcheck/shadow.h18
-rw-r--r--arch/x86/mm/mem_encrypt.c301
-rw-r--r--arch/x86/mm/mpx.c120
-rw-r--r--arch/x86/mm/pageattr.c14
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/x86/oprofile/nmi_int.c2
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
-rw-r--r--arch/x86/pci/fixup.c85
-rw-r--r--arch/x86/pci/intel_mid_pci.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c18
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c5
-rw-r--r--arch/x86/platform/uv/uv_irq.c5
-rw-r--r--arch/x86/realmode/init.c5
-rw-r--r--arch/x86/um/ldt.c7
-rw-r--r--arch/x86/xen/apic.c6
-rw-r--r--arch/x86/xen/enlighten_hvm.c36
-rw-r--r--arch/x86/xen/enlighten_pv.c16
-rw-r--r--arch/x86/xen/enlighten_pvh.c9
-rw-r--r--arch/x86/xen/mmu_pv.c159
-rw-r--r--arch/x86/xen/p2m.c2
-rw-r--r--arch/x86/xen/smp_pv.c17
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--arch/x86/xen/xen-asm_64.S2
-rw-r--r--arch/x86/xen/xen-head.S11
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/boot/.gitignore1
-rw-r--r--arch/xtensa/boot/dts/Makefile9
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h3
-rw-r--r--arch/xtensa/include/asm/pci.h2
-rw-r--r--arch/xtensa/include/asm/spinlock.h7
-rw-r--r--arch/xtensa/kernel/pci-dma.c23
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c4
-rw-r--r--arch/xtensa/platforms/xtfpga/lcd.c14
-rw-r--r--block/badblocks.c2
-rw-r--r--block/bfq-iosched.c225
-rw-r--r--block/bio-integrity.c7
-rw-r--r--block/bio.c59
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c274
-rw-r--r--block/blk-flush.c37
-rw-r--r--block/blk-lib.c108
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--block/blk-mq-sched.c203
-rw-r--r--block/blk-mq-tag.c11
-rw-r--r--block/blk-mq-tag.h7
-rw-r--r--block/blk-mq.c424
-rw-r--r--block/blk-mq.h60
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/blk-stat.c45
-rw-r--r--block/blk-throttle.c12
-rw-r--r--block/blk-timeout.c5
-rw-r--r--block/blk-wbt.c4
-rw-r--r--block/blk.h46
-rw-r--r--block/bsg.c18
-rw-r--r--block/elevator.c67
-rw-r--r--block/genhd.c80
-rw-r--r--block/ioctl.c19
-rw-r--r--block/kyber-iosched.c12
-rw-r--r--block/mq-deadline.c1
-rw-r--r--block/scsi_ioctl.c8
-rw-r--r--crypto/Kconfig11
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/af_alg.c27
-rw-r--r--crypto/ahash.c12
-rw-r--r--crypto/algapi.c6
-rw-r--r--crypto/algboss.c1
-rw-r--r--crypto/algif_aead.c8
-rw-r--r--crypto/algif_hash.c30
-rw-r--r--crypto/algif_skcipher.c9
-rw-r--r--crypto/api.c13
-rw-r--r--crypto/asymmetric_keys/public_key.c28
-rw-r--r--crypto/cryptd.c4
-rw-r--r--crypto/cts.c6
-rw-r--r--crypto/dh.c36
-rw-r--r--crypto/dh_helper.c20
-rw-r--r--crypto/drbg.c36
-rw-r--r--crypto/ecdh.c6
-rw-r--r--crypto/ecdh_helper.c2
-rw-r--r--crypto/gcm.c55
-rw-r--r--crypto/gf128mul.c13
-rw-r--r--crypto/keywrap.c84
-rw-r--r--crypto/lrw.c17
-rw-r--r--crypto/rmd128.c2
-rw-r--r--crypto/rmd160.c2
-rw-r--r--crypto/rmd256.c2
-rw-r--r--crypto/rmd320.c2
-rw-r--r--crypto/rsa-pkcs1pad.c16
-rw-r--r--crypto/sm3_generic.c210
-rw-r--r--crypto/tcrypt.c209
-rw-r--r--crypto/testmgr.c210
-rw-r--r--crypto/testmgr.h67
-rw-r--r--crypto/xor.c7
-rw-r--r--crypto/xts.c8
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig27
-rw-r--r--drivers/acpi/Makefile4
-rw-r--r--drivers/acpi/ac.c1
-rw-r--r--drivers/acpi/acpi_apd.c5
-rw-r--r--drivers/acpi/acpi_configfs.c6
-rw-r--r--drivers/acpi/acpi_lpit.c162
-rw-r--r--drivers/acpi/acpi_lpss.c97
-rw-r--r--drivers/acpi/acpi_processor.c1
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/achware.h4
-rw-r--r--drivers/acpi/acpica/acinterp.h6
-rw-r--r--drivers/acpi/acpica/acutils.h33
-rw-r--r--drivers/acpi/acpica/dbconvert.c5
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c30
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c72
-rw-r--r--drivers/acpi/acpica/hwtimer.c10
-rw-r--r--drivers/acpi/acpica/hwxface.c118
-rw-r--r--drivers/acpi/acpica/nsconvert.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c9
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c438
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c442
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c117
-rw-r--r--drivers/acpi/arm64/gtdt.c2
-rw-r--r--drivers/acpi/arm64/iort.c258
-rw-r--r--drivers/acpi/button.c32
-rw-r--r--drivers/acpi/cppc_acpi.c240
-rw-r--r--drivers/acpi/device_pm.c277
-rw-r--r--drivers/acpi/dock.c1
-rw-r--r--drivers/acpi/ec.c18
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/osl.c42
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c137
-rw-r--r--drivers/acpi/pmic/tps68470_pmic.c455
-rw-r--r--drivers/acpi/resource.c1
-rw-r--r--drivers/acpi/scan.c38
-rw-r--r--drivers/acpi/sysfs.c11
-rw-r--r--drivers/acpi/x86/utils.c18
-rw-r--r--drivers/amba/bus.c1
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_ceva.c197
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/libahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c8
-rw-r--r--drivers/ata/libata-eh.c14
-rw-r--r--drivers/ata/libata-scsi.c19
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_artop.c2
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/sata_dwc_460ex.c1
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/ata/sata_rcar.c7
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/base/arch_topology.c31
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/cpu.c14
-rw-r--r--drivers/base/dd.c18
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/domain.c226
-rw-r--r--drivers/base/power/domain_governor.c73
-rw-r--r--drivers/base/power/generic_ops.c23
-rw-r--r--drivers/base/power/main.c53
-rw-r--r--drivers/base/power/qos.c5
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/base/power/sysfs.c53
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/base/property.c9
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regmap-spi.c2
-rw-r--r--drivers/base/regmap/regmap-spmi.c4
-rw-r--r--drivers/base/regmap/regmap.c111
-rw-r--r--drivers/base/test/test_async_driver_probe.c6
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/block/Kconfig5
-rw-r--r--drivers/block/brd.c3
-rw-r--r--drivers/block/cryptoloop.c2
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c13
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/nbd.c26
-rw-r--r--drivers/block/null_blk.c14
-rw-r--r--drivers/block/paride/Kconfig1
-rw-r--r--drivers/block/skd_main.c3
-rw-r--r--drivers/block/zram/zcomp.c6
-rw-r--r--drivers/block/zram/zram_drv.c18
-rw-r--r--drivers/bus/arm-ccn.c1
-rw-r--r--drivers/cdrom/Makefile15
-rw-r--r--drivers/char/hw_random/Kconfig6
-rw-r--r--drivers/char/hw_random/core.c53
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/pseries-rng.c2
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c7
-rw-r--r--drivers/char/hw_random/virtio-rng.c21
-rw-r--r--drivers/char/ipmi/Kconfig35
-rw-r--r--drivers/char/ipmi/Makefile10
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c76
-rw-r--r--drivers/char/ipmi/ipmi_dmi.h8
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1275
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c4
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c4
-rw-r--r--drivers/char/ipmi/ipmi_si.h49
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c146
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c242
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2122
-rw-r--r--drivers/char/ipmi/ipmi_si_mem_io.c144
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c58
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c166
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c593
-rw-r--r--drivers/char/ipmi/ipmi_si_port_io.c112
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h23
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c112
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c11
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c6
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/pcmcia/synclink_cs.c8
-rw-r--r--drivers/char/random.c5
-rw-r--r--drivers/char/tpm/tpm-dev-common.c6
-rw-r--r--drivers/char/tpm/tpm-sysfs.c87
-rw-r--r--drivers/char/tpm/tpm.h15
-rw-r--r--drivers/char/tpm/tpm2-cmd.c73
-rw-r--r--drivers/char/tpm/tpm2-space.c4
-rw-r--r--drivers/char/tpm/tpm_crb.c59
-rw-r--r--drivers/char/tpm/tpm_tis.c5
-rw-r--r--drivers/char/tpm/tpm_tis_core.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c73
-rw-r--r--drivers/clocksource/arm_arch_timer.c25
-rw-r--r--drivers/clocksource/bcm2835_timer.c2
-rw-r--r--drivers/cpufreq/arm_big_little.c16
-rw-r--r--drivers/cpufreq/arm_big_little.h4
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq_stats.c7
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c85
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c191
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c2
-rw-r--r--drivers/cpufreq/ti-cpufreq.c6
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-arm.c153
-rw-r--r--drivers/cpuidle/cpuidle.c14
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/crypto/Kconfig40
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c512
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c831
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h199
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h3
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c85
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h99
-rw-r--r--drivers/crypto/atmel-aes.c80
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c23
-rw-r--r--drivers/crypto/bcm/cipher.c116
-rw-r--r--drivers/crypto/bcm/cipher.h3
-rw-r--r--drivers/crypto/bcm/util.c14
-rw-r--r--drivers/crypto/caam/caamalg.c10
-rw-r--r--drivers/crypto/caam/caamalg_qi.c7
-rw-r--r--drivers/crypto/caam/caamhash.c12
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/desc.h2
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c9
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c8
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c7
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c5
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c1798
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h57
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h121
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c6
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/marvell/cesa.c29
-rw-r--r--drivers/crypto/marvell/cesa.h27
-rw-r--r--drivers/crypto/marvell/cipher.c476
-rw-r--r--drivers/crypto/marvell/tdma.c5
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c39
-rw-r--r--drivers/crypto/mv_cesa.c1216
-rw-r--r--drivers/crypto/mv_cesa.h151
-rw-r--r--drivers/crypto/n2_core.c12
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c2
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c9
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/omap-aes-gcm.c11
-rw-r--r--drivers/crypto/omap-aes.c12
-rw-r--r--drivers/crypto/omap-des.c7
-rw-r--r--drivers/crypto/omap-sham.c7
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c15
-rw-r--r--drivers/crypto/qce/ablkcipher.c5
-rw-r--r--drivers/crypto/qce/sha.c30
-rw-r--r--drivers/crypto/s5p-sss.c1596
-rw-r--r--drivers/crypto/stm32/stm32-hash.c20
-rw-r--r--drivers/crypto/talitos.c582
-rw-r--r--drivers/crypto/talitos.h7
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c2
-rw-r--r--drivers/crypto/vmx/aes_ctr.c33
-rw-r--r--drivers/devfreq/devfreq.c139
-rw-r--r--drivers/devfreq/exynos-bus.c5
-rw-r--r--drivers/devfreq/governor_passive.c2
-rw-r--r--drivers/devfreq/governor_performance.c2
-rw-r--r--drivers/devfreq/governor_powersave.c2
-rw-r--r--drivers/devfreq/governor_simpleondemand.c2
-rw-r--r--drivers/devfreq/governor_userspace.c2
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/dma-buf/reservation.c56
-rw-r--r--drivers/dma-buf/sw_sync.c10
-rw-r--r--drivers/dma/Kconfig31
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/bcm-sba-raid.c117
-rw-r--r--drivers/dma/coh901318.c6
-rw-r--r--drivers/dma/dma-axi-dmac.c75
-rw-r--r--drivers/dma/dmatest.c1
-rw-r--r--drivers/dma/edma.c5
-rw-r--r--drivers/dma/img-mdc-dma.c98
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/imx-sdma.c14
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/ioat/dma.h3
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/nbpfaxi.c5
-rw-r--r--drivers/dma/omap-dma.c5
-rw-r--r--drivers/dma/pch_dma.c12
-rw-r--r--drivers/dma/pl330.c39
-rw-r--r--drivers/dma/qcom/bam_dma.c169
-rw-r--r--drivers/dma/sa11x0-dma.c11
-rw-r--r--drivers/dma/sprd-dma.c988
-rw-r--r--drivers/dma/stm32-dmamux.c327
-rw-r--r--drivers/dma/stm32-mdma.c1682
-rw-r--r--drivers/dma/sun6i-dma.c257
-rw-r--r--drivers/dma/ti-dma-crossbar.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c14
-rw-r--r--drivers/edac/altera_edac.c10
-rw-r--r--drivers/edac/amd64_edac.c5
-rw-r--r--drivers/edac/edac_mc.c7
-rw-r--r--drivers/edac/edac_mc.h8
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/edac_module.c3
-rw-r--r--drivers/edac/ghes_edac.c137
-rw-r--r--drivers/edac/i7core_edac.c11
-rw-r--r--drivers/edac/pnd2_edac.c9
-rw-r--r--drivers/edac/sb_edac.c43
-rw-r--r--drivers/edac/skx_edac.c30
-rw-r--r--drivers/edac/thunderx_edac.c25
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-arizona.c2
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c2
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max14577.c4
-rw-r--r--drivers/extcon/extcon-max3355.c2
-rw-r--r--drivers/extcon/extcon-max77693.c2
-rw-r--r--drivers/extcon/extcon-max77843.c95
-rw-r--r--drivers/extcon/extcon-max8997.c2
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/extcon/extcon-rt8973a.c2
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/extcon/extcon-usb-gpio.c2
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/extcon/extcon.h2
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firmware/tegra/ivc.c24
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c2
-rw-r--r--drivers/fsi/fsi-core.c6
-rw-r--r--drivers/gpio/Kconfig41
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c2
-rw-r--r--drivers/gpio/gpio-104-idi-48.c2
-rw-r--r--drivers/gpio/gpio-104-idio-16.c2
-rw-r--r--drivers/gpio/gpio-adnp.c31
-rw-r--r--drivers/gpio/gpio-altera.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c13
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-brcmstb.c422
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-dln2.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c36
-rw-r--r--drivers/gpio/gpio-em.c4
-rw-r--r--drivers/gpio/gpio-ftgpio010.c2
-rw-r--r--drivers/gpio/gpio-grgpio.c6
-rw-r--r--drivers/gpio/gpio-ingenic.c2
-rw-r--r--drivers/gpio/gpio-intel-mid.c2
-rw-r--r--drivers/gpio/gpio-loongson1.c7
-rw-r--r--drivers/gpio/gpio-lynxpoint.c2
-rw-r--r--drivers/gpio/gpio-max3191x.c492
-rw-r--r--drivers/gpio/gpio-max732x.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c12
-rw-r--r--drivers/gpio/gpio-merrifield.c2
-rw-r--r--drivers/gpio/gpio-mmio.c130
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c23
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c2
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c14
-rw-r--r--drivers/gpio/gpio-rcar.c28
-rw-r--r--drivers/gpio/gpio-reg.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c10
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-tegra186.c620
-rw-r--r--drivers/gpio/gpio-thunderx.c13
-rw-r--r--drivers/gpio/gpio-tz1090.c4
-rw-r--r--drivers/gpio/gpio-uniphier.c508
-rw-r--r--drivers/gpio/gpio-vf610.c2
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpio-ws16c48.c2
-rw-r--r--drivers/gpio/gpio-xgene-sb.c23
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpio/gpio-zx.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpio/gpiolib.c599
-rw-r--r--drivers/gpio/gpiolib.h6
-rw-r--r--drivers/gpu/drm/Kconfig7
-rw-r--r--drivers/gpu/drm/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c390
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c265
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c329
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h)18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c500
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c688
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c349
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c373
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c499
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c115
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c459
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c615
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h128
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c175
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c73
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h129
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h15
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h1
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h19
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h32
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h6
-rw-r--r--drivers/gpu/drm/amd/include/linux/chash.h366
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h12
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h259
-rw-r--r--drivers/gpu/drm/amd/lib/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile11
-rw-r--r--drivers/gpu/drm/amd/lib/chash.c638
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile5
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c528
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/Makefile11
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c291
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c215
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h59
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c104
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c410
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h100
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c445
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h89
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c640
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c161
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c151
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c417
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c2489
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c250
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/psm.h)28
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c510
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c473
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c110
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c183
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c499
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c129
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c281
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h152
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmanager.h109
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmgr.h124
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h10299
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h200
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h11792
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h211
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c)2198
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h78
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c308
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2498
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c2537
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c2567
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2364
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2380
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c130
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c263
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c261
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3275
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3181
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c194
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h16
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c68
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h19
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_510.c1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c22
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c5
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c9
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c7
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c49
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h4
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c4
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig10
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig8
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h45
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c337
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c140
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c38
-rw-r--r--drivers/gpu/drm/bridge/panel.c10
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c994
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c96
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c108
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c45
-rw-r--r--drivers/gpu/drm/drm_atomic.c36
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c325
-rw-r--r--drivers/gpu/drm/drm_auth.c32
-rw-r--r--drivers/gpu/drm/drm_bridge.c7
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c26
-rw-r--r--drivers/gpu/drm/drm_crtc.c15
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c2
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c16
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c7
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c74
-rw-r--r--drivers/gpu/drm/drm_drv.c54
-rw-r--r--drivers/gpu/drm/drm_edid.c15
-rw-r--r--drivers/gpu/drm/drm_edid_load.c16
-rw-r--r--drivers/gpu/drm/drm_encoder.c7
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c77
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c17
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c86
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c28
-rw-r--r--drivers/gpu/drm/drm_lease.c767
-rw-r--r--drivers/gpu/drm/drm_mode_config.c23
-rw-r--r--drivers/gpu/drm/drm_mode_object.c37
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c98
-rw-r--r--drivers/gpu/drm/drm_of.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_plane.c89
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c4
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/drm_scdc_helper.c12
-rw-r--r--drivers/gpu/drm/drm_syncobj.c93
-rw-r--r--drivers/gpu/drm/drm_trace.h2
-rw-r--r--drivers/gpu/drm/drm_vblank.c457
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig2
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c15
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c43
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c69
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c217
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c197
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.h7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c120
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c106
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.h36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c495
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.h49
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c310
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c460
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c127
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c44
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c115
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c638
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c95
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h368
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c649
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c123
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c91
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c728
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h66
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c131
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c148
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.c74
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.h34
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c43
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c476
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.h80
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1101
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c109
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h34
-rw-r--r--drivers/gpu/drm/i915/i915_params.c207
-rw-r--r--drivers/gpu/drm/i915/i915_params.h85
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c341
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h89
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c45
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c76
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h118
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h18
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c144
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h49
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c49
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c392
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c20
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c318
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c136
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c39
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1039
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c41
-rw-r--r--drivers/gpu/drm/i915/intel_display.c747
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c293
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c76
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h157
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c76
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c45
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c534
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c41
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c7
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c14
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c369
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h120
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c (renamed from drivers/gpu/drm/i915/intel_guc_loader.c)252
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.h33
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h62
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c32
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.h59
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c12
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c183
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c172
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h41
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c707
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h37
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c51
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c80
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1244
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c450
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c194
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h184
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c128
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c40
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c39
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c16
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c363
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h247
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c318
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h121
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c268
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h18
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h339
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c1734
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c162
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c42
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c50
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.h42
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c15
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c261
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h109
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c305
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c78
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c297
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h66
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c30
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c22
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c160
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c71
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c232
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c41
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c83
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h37
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c37
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c235
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h51
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c142
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c36
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h33
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c152
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c5
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0008.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000a.h22
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000b.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000c.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if000d.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if500b.h25
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if500d.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if900b.h23
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if900d.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ifb00d.h27
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ifc00d.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mem.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/mmu.h56
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/vmm.h42
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h64
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h32
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h41
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h31
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h36
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h140
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c391
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c280
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.c135
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.h31
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mem.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvif/mmu.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ioctl.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c100
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/mm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/oproxy.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/ramht.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c51
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c151
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c266
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c329
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c696
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c228
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h)49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h)37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.0
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c242
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c94
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c122
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c104
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c190
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c231
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c192
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c352
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c1513
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h310
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c403
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c347
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c230
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c385
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c56
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c8
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c62
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c381
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h1
-rw-r--r--drivers/gpu/drm/panel/Kconfig33
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c491
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c514
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c532
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c372
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c51
-rw-r--r--drivers/gpu/drm/pl111/Kconfig3
-rw-r--r--drivers/gpu/drm/pl111/Makefile4
-rw-r--r--drivers/gpu/drm/pl111/pl111_connector.c126
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c6
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c79
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h37
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c149
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c270
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.h9
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c22
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c49
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h28
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c46
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c21
-rw-r--r--drivers/gpu/drm/radeon/cik.c14
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c870
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig9
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c40
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c9
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c86
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c19
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c18
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c32
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c586
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h114
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c3
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c6
-rw-r--r--drivers/gpu/drm/stm/drv.c3
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c8
-rw-r--r--drivers/gpu/drm/stm/ltdc.c16
-rw-r--r--drivers/gpu/drm/stm/ltdc.h2
-rw-r--r--drivers/gpu/drm/sun4i/Makefile33
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c76
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h112
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c39
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c277
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c227
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c69
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c466
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h30
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c12
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c84
-rw-r--r--drivers/gpu/drm/tegra/dc.h120
-rw-r--r--drivers/gpu/drm/tegra/drm.c33
-rw-r--r--drivers/gpu/drm/tegra/drm.h106
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c2
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c17
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/sor.c5
-rw-r--r--drivers/gpu/drm/tegra/vic.c22
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c57
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c16
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c4
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c3
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c6
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c22
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c7
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c45
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c25
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c405
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c256
-rw-r--r--drivers/gpu/drm/tve200/Kconfig16
-rw-r--r--drivers/gpu/drm/tve200/Makefile4
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c338
-rw-r--r--drivers/gpu/drm/tve200/tve200_drm.h126
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c303
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c156
-rw-r--r--drivers/gpu/drm/udl/udl_connector.h13
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_main.c5
-rw-r--r--drivers/gpu/drm/vc4/Makefile2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c287
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h30
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c144
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c156
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_trace.h2
-rw-r--r--drivers/gpu/drm/via/via_verifier.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c3
-rw-r--r--drivers/gpu/host1x/Makefile3
-rw-r--r--drivers/gpu/host1x/bus.c4
-rw-r--r--drivers/gpu/host1x/channel.c3
-rw-r--r--drivers/gpu/host1x/debug.c14
-rw-r--r--drivers/gpu/host1x/debug.h14
-rw-r--r--drivers/gpu/host1x/dev.c72
-rw-r--r--drivers/gpu/host1x/dev.h19
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c49
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c24
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c240
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x01.c154
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x06.c135
-rw-r--r--drivers/gpu/host1x/hw/host1x01.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x02.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x04.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x05.c2
-rw-r--r--drivers/gpu/host1x/hw/host1x06.c44
-rw-r--r--drivers/gpu/host1x/hw/host1x06.h26
-rw-r--r--drivers/gpu/host1x/hw/host1x06_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x04_channel.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x05_channel.h12
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h32
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_uclass.h181
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_vm.h47
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c29
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c46
-rw-r--r--drivers/gpu/host1x/syncpt.c24
-rw-r--r--drivers/gpu/host1x/syncpt.h2
-rw-r--r--drivers/gpu/vga/vgaarb.c72
-rw-r--r--drivers/hid/Kconfig5
-rw-r--r--drivers/hid/hid-alps.c520
-rw-r--r--drivers/hid/hid-asus.c31
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-cp2112.c10
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-input.c9
-rw-r--r--drivers/hid/hid-lg.c4
-rw-r--r--drivers/hid/hid-lg4ff.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-magicmouse.c3
-rw-r--r--drivers/hid/hid-multitouch.c52
-rw-r--r--drivers/hid/hid-rmi.c13
-rw-r--r--drivers/hid/hid-sony.c14
-rw-r--r--drivers/hid/hid-tmff.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c7
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hid/wacom_wac.c44
-rw-r--r--drivers/hid/wacom_wac.h3
-rw-r--r--drivers/hsi/clients/hsi_char.c4
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c6
-rw-r--r--drivers/hv/Makefile4
-rw-r--r--drivers/hv/channel.c23
-rw-r--r--drivers/hv/channel_mgmt.c36
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv_trace.c4
-rw-r--r--drivers/hv/hv_trace.h327
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/vmbus_drv.c211
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/asc7621.c1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c8
-rw-r--r--drivers/hwmon/gpio-fan.c224
-rw-r--r--drivers/hwmon/k10temp.c108
-rw-r--r--drivers/hwmon/max1619.c10
-rw-r--r--drivers/hwmon/max6621.c593
-rw-r--r--drivers/hwmon/pmbus/Kconfig10
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/max31785.c116
-rw-r--r--drivers/hwmon/pmbus/pmbus.h6
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c25
-rw-r--r--drivers/hwmon/sht15.c175
-rw-r--r--drivers/hwmon/stts751.c18
-rw-r--r--drivers/hwmon/w83793.c4
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c4
-rw-r--r--drivers/hwtracing/stm/policy.c10
-rw-r--r--drivers/i2c/busses/Kconfig5
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c12
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c51
-rw-r--r--drivers/i2c/busses/i2c-davinci.c69
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c71
-rw-r--r--drivers/i2c/busses/i2c-gpio.c212
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c104
-rw-r--r--drivers/i2c/busses/i2c-mpc.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c25
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c1
-rw-r--r--drivers/i2c/busses/i2c-parport.c1
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-riic.c115
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c8
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c3
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c9
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c30
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c44
-rw-r--r--drivers/i2c/i2c-core-base.c34
-rw-r--r--drivers/i2c/i2c-core-smbus.c55
-rw-r--r--drivers/i2c/i2c-smbus.c81
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c95
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c9
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/ide-atapi.c6
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/ide/ide.c4
-rw-r--r--drivers/idle/intel_idle.c23
-rw-r--r--drivers/iio/accel/Kconfig15
-rw-r--r--drivers/iio/accel/Makefile2
-rw-r--r--drivers/iio/accel/adxl345_core.c1
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bma220_spi.c1
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c3
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c423
-rw-r--r--drivers/iio/accel/da280.c1
-rw-r--r--drivers/iio/accel/da311.c1
-rw-r--r--drivers/iio/accel/dmard06.c1
-rw-r--r--drivers/iio/accel/dmard09.c1
-rw-r--r--drivers/iio/accel/dmard10.c1
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c356
-rw-r--r--drivers/iio/accel/kxsd9.c1
-rw-r--r--drivers/iio/accel/mc3230.c1
-rw-r--r--drivers/iio/accel/mma7455_core.c1
-rw-r--r--drivers/iio/accel/mma7660.c1
-rw-r--r--drivers/iio/accel/mma8452.c383
-rw-r--r--drivers/iio/accel/mma9551.c1
-rw-r--r--drivers/iio/accel/mma9553.c1
-rw-r--r--drivers/iio/accel/mxc4005.c2
-rw-r--r--drivers/iio/accel/mxc6255.c1
-rw-r--r--drivers/iio/accel/sca3000.c1
-rw-r--r--drivers/iio/accel/st_accel.h4
-rw-r--r--drivers/iio/accel/st_accel_core.c248
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c10
-rw-r--r--drivers/iio/accel/stk8312.c2
-rw-r--r--drivers/iio/accel/stk8ba50.c2
-rw-r--r--drivers/iio/adc/Kconfig7
-rw-r--r--drivers/iio/adc/ad7266.c1
-rw-r--r--drivers/iio/adc/ad7291.c1
-rw-r--r--drivers/iio/adc/ad7298.c1
-rw-r--r--drivers/iio/adc/ad7476.c1
-rw-r--r--drivers/iio/adc/ad7766.c2
-rw-r--r--drivers/iio/adc/ad7791.c2
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/ad7887.c1
-rw-r--r--drivers/iio/adc/ad7923.c1
-rw-r--r--drivers/iio/adc/ad799x.c3
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c1
-rw-r--r--drivers/iio/adc/aspeed_adc.c1
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c2
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/axp20x_adc.c2
-rw-r--r--drivers/iio/adc/axp288_adc.c1
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c1
-rw-r--r--drivers/iio/adc/berlin2-adc.c1
-rw-r--r--drivers/iio/adc/cc10001_adc.c1
-rw-r--r--drivers/iio/adc/cpcap-adc.c1
-rw-r--r--drivers/iio/adc/da9150-gpadc.c1
-rw-r--r--drivers/iio/adc/dln2-adc.c6
-rw-r--r--drivers/iio/adc/envelope-detector.c1
-rw-r--r--drivers/iio/adc/ep93xx_adc.c1
-rw-r--r--drivers/iio/adc/exynos_adc.c1
-rw-r--r--drivers/iio/adc/hi8435.c1
-rw-r--r--drivers/iio/adc/hx711.c1
-rw-r--r--drivers/iio/adc/imx7d_adc.c1
-rw-r--r--drivers/iio/adc/ina2xx-adc.c20
-rw-r--r--drivers/iio/adc/lp8788_adc.c1
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c1
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c1
-rw-r--r--drivers/iio/adc/ltc2471.c1
-rw-r--r--drivers/iio/adc/ltc2485.c1
-rw-r--r--drivers/iio/adc/ltc2497.c1
-rw-r--r--drivers/iio/adc/max1027.c2
-rw-r--r--drivers/iio/adc/max11100.c1
-rw-r--r--drivers/iio/adc/max1118.c1
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/adc/max9611.c2
-rw-r--r--drivers/iio/adc/mcp320x.c235
-rw-r--r--drivers/iio/adc/mcp3422.c1
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/adc/meson_saradc.c1
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c2
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c2
-rw-r--r--drivers/iio/adc/nau7802.c1
-rw-r--r--drivers/iio/adc/palmas_gpadc.c1
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c1
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c1
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c1
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c8
-rw-r--r--drivers/iio/adc/rockchip_saradc.c1
-rw-r--r--drivers/iio/adc/spear_adc.c1
-rw-r--r--drivers/iio/adc/stm32-adc-core.c13
-rw-r--r--drivers/iio/adc/stm32-adc.c2
-rw-r--r--drivers/iio/adc/stx104.c1
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c45
-rw-r--r--drivers/iio/adc/ti-adc081c.c1
-rw-r--r--drivers/iio/adc/ti-adc0832.c1
-rw-r--r--drivers/iio/adc/ti-adc084s021.c1
-rw-r--r--drivers/iio/adc/ti-adc108s102.c1
-rw-r--r--drivers/iio/adc/ti-adc12138.c3
-rw-r--r--drivers/iio/adc/ti-adc128s052.c1
-rw-r--r--drivers/iio/adc/ti-adc161s626.c1
-rw-r--r--drivers/iio/adc/ti-ads1015.c22
-rw-r--r--drivers/iio/adc/ti-ads7950.c1
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/adc/ti-tlc4541.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c1
-rw-r--r--drivers/iio/adc/twl4030-madc.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c1
-rw-r--r--drivers/iio/adc/vf610_adc.c1
-rw-r--r--drivers/iio/adc/viperboard_adc.c1
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c2
-rw-r--r--drivers/iio/amplifiers/ad8366.c1
-rw-r--r--drivers/iio/chemical/ams-iaq-core.c1
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c2
-rw-r--r--drivers/iio/chemical/ccs811.c87
-rw-r--r--drivers/iio/chemical/vz89x.c1
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c1
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c13
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c59
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c16
-rw-r--r--drivers/iio/counter/104-quad-8.c1
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c1
-rw-r--r--drivers/iio/dac/Kconfig22
-rw-r--r--drivers/iio/dac/Makefile2
-rw-r--r--drivers/iio/dac/ad5064.c1
-rw-r--r--drivers/iio/dac/ad5360.c1
-rw-r--r--drivers/iio/dac/ad5380.c1
-rw-r--r--drivers/iio/dac/ad5421.c1
-rw-r--r--drivers/iio/dac/ad5446.c14
-rw-r--r--drivers/iio/dac/ad5449.c1
-rw-r--r--drivers/iio/dac/ad5504.c1
-rw-r--r--drivers/iio/dac/ad5592r-base.c1
-rw-r--r--drivers/iio/dac/ad5624r_spi.c1
-rw-r--r--drivers/iio/dac/ad5686.c1
-rw-r--r--drivers/iio/dac/ad5755.c1
-rw-r--r--drivers/iio/dac/ad5761.c1
-rw-r--r--drivers/iio/dac/ad5764.c1
-rw-r--r--drivers/iio/dac/ad5791.c1
-rw-r--r--drivers/iio/dac/ad7303.c1
-rw-r--r--drivers/iio/dac/ad8801.c1
-rw-r--r--drivers/iio/dac/cio-dac.c1
-rw-r--r--drivers/iio/dac/dpot-dac.c1
-rw-r--r--drivers/iio/dac/ds4424.c341
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c1
-rw-r--r--drivers/iio/dac/ltc2632.c1
-rw-r--r--drivers/iio/dac/m62332.c1
-rw-r--r--drivers/iio/dac/max517.c1
-rw-r--r--drivers/iio/dac/max5821.c1
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/dac/mcp4922.c1
-rw-r--r--drivers/iio/dac/stm32-dac.c1
-rw-r--r--drivers/iio/dac/ti-dac082s085.c368
-rw-r--r--drivers/iio/dac/vf610_dac.c1
-rw-r--r--drivers/iio/dummy/Kconfig2
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c89
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c3
-rw-r--r--drivers/iio/frequency/ad9523.c1
-rw-r--r--drivers/iio/frequency/adf4350.c1
-rw-r--r--drivers/iio/gyro/adis16080.c1
-rw-r--r--drivers/iio/gyro/adis16130.c1
-rw-r--r--drivers/iio/gyro/adis16136.c1
-rw-r--r--drivers/iio/gyro/adis16260.c1
-rw-r--r--drivers/iio/gyro/adxrs450.c1
-rw-r--r--drivers/iio/gyro/bmg160_core.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c1
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c1
-rw-r--r--drivers/iio/gyro/itg3200_core.c1
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c47
-rw-r--r--drivers/iio/health/afe4403.c2
-rw-r--r--drivers/iio/health/afe4404.c2
-rw-r--r--drivers/iio/health/max30100.c1
-rw-r--r--drivers/iio/health/max30102.c1
-rw-r--r--drivers/iio/humidity/am2315.c1
-rw-r--r--drivers/iio/humidity/dht11.c1
-rw-r--r--drivers/iio/humidity/hdc100x.c1
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c1
-rw-r--r--drivers/iio/humidity/hts221_buffer.c1
-rw-r--r--drivers/iio/humidity/hts221_core.c1
-rw-r--r--drivers/iio/humidity/htu21.c1
-rw-r--r--drivers/iio/humidity/si7005.c1
-rw-r--r--drivers/iio/humidity/si7020.c1
-rw-r--r--drivers/iio/imu/adis16400_core.c1
-rw-r--r--drivers/iio/imu/adis16480.c1
-rw-r--r--drivers/iio/imu/adis_trigger.c1
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c13
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/imu/kmx61.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h32
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c72
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c98
-rw-r--r--drivers/iio/industrialio-configfs.c2
-rw-r--r--drivers/iio/industrialio-core.c35
-rw-r--r--drivers/iio/industrialio-sw-device.c6
-rw-r--r--drivers/iio/industrialio-sw-trigger.c6
-rw-r--r--drivers/iio/industrialio-trigger.c35
-rw-r--r--drivers/iio/light/acpi-als.c1
-rw-r--r--drivers/iio/light/adjd_s311.c1
-rw-r--r--drivers/iio/light/al3320a.c1
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/light/bh1750.c1
-rw-r--r--drivers/iio/light/bh1780.c1
-rw-r--r--drivers/iio/light/cm32181.c1
-rw-r--r--drivers/iio/light/cm3232.c1
-rw-r--r--drivers/iio/light/cm3323.c1
-rw-r--r--drivers/iio/light/cm3605.c1
-rw-r--r--drivers/iio/light/cm36651.c1
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c1
-rw-r--r--drivers/iio/light/gp2ap020a00f.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c1
-rw-r--r--drivers/iio/light/hid-sensor-prox.c1
-rw-r--r--drivers/iio/light/isl29018.c2
-rw-r--r--drivers/iio/light/isl29028.c1
-rw-r--r--drivers/iio/light/isl29125.c1
-rw-r--r--drivers/iio/light/jsa1212.c1
-rw-r--r--drivers/iio/light/lm3533-als.c1
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/light/max44000.c1
-rw-r--r--drivers/iio/light/opt3001.c1
-rw-r--r--drivers/iio/light/pa12203001.c1
-rw-r--r--drivers/iio/light/rpr0521.c2
-rw-r--r--drivers/iio/light/si1145.c3
-rw-r--r--drivers/iio/light/stk3310.c1
-rw-r--r--drivers/iio/light/tcs3414.c1
-rw-r--r--drivers/iio/light/tcs3472.c263
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2583.c1
-rw-r--r--drivers/iio/light/tsl4531.c1
-rw-r--r--drivers/iio/light/us5182d.c1
-rw-r--r--drivers/iio/light/vcnl4000.c1
-rw-r--r--drivers/iio/light/veml6070.c1
-rw-r--r--drivers/iio/light/vl6180.c127
-rw-r--r--drivers/iio/magnetometer/ak8974.c1
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c2
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c1
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c1
-rw-r--r--drivers/iio/magnetometer/mag3110.c1
-rw-r--r--drivers/iio/magnetometer/mmc35240.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c22
-rw-r--r--drivers/iio/multiplexer/iio-mux.c7
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c1
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c1
-rw-r--r--drivers/iio/potentiometer/ds1803.c1
-rw-r--r--drivers/iio/potentiometer/max5481.c2
-rw-r--r--drivers/iio/potentiometer/max5487.c2
-rw-r--r--drivers/iio/potentiometer/mcp4131.c1
-rw-r--r--drivers/iio/potentiometer/mcp4531.c1
-rw-r--r--drivers/iio/potentiometer/tpl0102.c1
-rw-r--r--drivers/iio/potentiostat/lmp91000.c2
-rw-r--r--drivers/iio/pressure/abp060mg.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c1
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c3
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c1
-rw-r--r--drivers/iio/pressure/hp03.c1
-rw-r--r--drivers/iio/pressure/hp206c.c1
-rw-r--r--drivers/iio/pressure/mpl115.c1
-rw-r--r--drivers/iio/pressure/mpl3115.c1
-rw-r--r--drivers/iio/pressure/ms5611_core.c1
-rw-r--r--drivers/iio/pressure/ms5637.c1
-rw-r--r--drivers/iio/pressure/st_pressure.h4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c78
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c10
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c10
-rw-r--r--drivers/iio/pressure/t5403.c1
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/proximity/Kconfig10
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c1
-rw-r--r--drivers/iio/proximity/rfd77402.c352
-rw-r--r--drivers/iio/proximity/srf04.c1
-rw-r--r--drivers/iio/proximity/srf08.c2
-rw-r--r--drivers/iio/proximity/sx9500.c2
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c1
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c1
-rw-r--r--drivers/iio/temperature/mlx90614.c1
-rw-r--r--drivers/iio/temperature/tmp006.c1
-rw-r--r--drivers/iio/temperature/tmp007.c1
-rw-r--r--drivers/iio/temperature/tsys01.c1
-rw-r--r--drivers/iio/temperature/tsys02d.c1
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c3
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c1
-rw-r--r--drivers/iio/trigger/iio-trig-loop.c3
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c1
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c1
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c2
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/addr.c29
-rw-r--r--drivers/infiniband/core/cm.c38
-rw-r--r--drivers/infiniband/core/cma.c19
-rw-r--r--drivers/infiniband/core/cma_configfs.c8
-rw-r--r--drivers/infiniband/core/iwcm.c3
-rw-r--r--drivers/infiniband/core/mad.c3
-rw-r--r--drivers/infiniband/core/rw.c24
-rw-r--r--drivers/infiniband/core/security.c66
-rw-r--r--drivers/infiniband/core/sysfs.c16
-rw-r--r--drivers/infiniband/core/umem_odp.c72
-rw-r--r--drivers/infiniband/core/umem_rbtree.c109
-rw-r--r--drivers/infiniband/core/user_mad.c13
-rw-r--r--drivers/infiniband/core/uverbs.h36
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c189
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c13
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c23
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c13
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c20
-rw-r--r--drivers/infiniband/core/verbs.c52
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c78
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c39
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig2
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c18
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c330
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c127
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c69
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h95
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c268
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c66
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c186
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h29
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h4
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h7
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c385
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h6
-rw-r--r--drivers/infiniband/hw/hfi1/common.h1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c80
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c22
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c486
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c113
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h35
-rw-r--r--drivers/infiniband/hw/hfi1/init.c53
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c57
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c144
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h4
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c24
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c23
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h6
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c7
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c13
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c44
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c27
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h10
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h49
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h11
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c7
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c9
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c100
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h29
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c65
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h2
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c44
-rw-r--r--drivers/infiniband/hw/hns/Kconfig25
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c107
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c719
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c609
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c3296
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1177
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c384
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c692
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c226
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c76
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c48
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c76
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h23
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c47
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c8
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c23
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c284
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c38
-rw-r--r--drivers/infiniband/hw/mlx5/main.c57
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c149
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c10
-rw-r--r--drivers/infiniband/hw/nes/nes.c33
-rw-r--r--drivers/infiniband/hw/nes/nes.h6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c24
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c22
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c15
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig1
-rw-r--r--drivers/infiniband/hw/qedr/Makefile2
-rw-r--r--drivers/infiniband/hw/qedr/main.c118
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h31
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h6
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c749
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.h49
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c (renamed from drivers/infiniband/hw/qedr/qedr_cm.c)31
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.h (renamed from drivers/infiniband/hw/qedr/qedr_cm.h)0
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c359
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig1
-rw-r--r--drivers/infiniband/hw/qib/qib.h30
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c81
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c95
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c200
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c34
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c128
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c9
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h25
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c25
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Makefile2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h25
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h54
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c59
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c55
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c319
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h18
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig1
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c21
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c56
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c135
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c17
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c14
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c42
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h22
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h7
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c44
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c22
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c90
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c333
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h9
-rw-r--r--drivers/input/ff-memless.c8
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/joystick/gamecon.c3
-rw-r--r--drivers/input/joystick/sidewinder.c10
-rw-r--r--drivers/input/joystick/spaceball.c4
-rw-r--r--drivers/input/keyboard/bf54x-keys.c7
-rw-r--r--drivers/input/keyboard/gpio_keys.c7
-rw-r--r--drivers/input/keyboard/imx_keypad.c8
-rw-r--r--drivers/input/keyboard/locomokbd.c7
-rw-r--r--drivers/input/keyboard/omap-keypad.c6
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c7
-rw-r--r--drivers/input/keyboard/tegra-kbc.c6
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/misc/regulator-haptic.c2
-rw-r--r--drivers/input/misc/uinput.c305
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/byd.c10
-rw-r--r--drivers/input/mouse/elan_i2c_core.c11
-rw-r--r--drivers/input/mouse/gpio_mouse.c206
-rw-r--r--drivers/input/mouse/vmmouse.c10
-rw-r--r--drivers/input/rmi4/rmi_f54.c2
-rw-r--r--drivers/input/serio/hil_mlc.c4
-rw-r--r--drivers/input/serio/hp_sdc.c5
-rw-r--r--drivers/input/serio/ps2-gpio.c1
-rw-r--r--drivers/input/serio/sa1111ps2.c69
-rw-r--r--drivers/input/touchscreen/Kconfig36
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879.c19
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c10
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c59
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c173
-rw-r--r--drivers/input/touchscreen/elants_i2c.c19
-rw-r--r--drivers/input/touchscreen/exc3000.c223
-rw-r--r--drivers/input/touchscreen/goodix.c125
-rw-r--r--drivers/input/touchscreen/hideep.c1120
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c17
-rw-r--r--drivers/input/touchscreen/mxs-lradc-ts.c2
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c18
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c17
-rw-r--r--drivers/input/touchscreen/s6sy761.c559
-rw-r--r--drivers/input/touchscreen/st1232.c16
-rw-r--r--drivers/input/touchscreen/stmfts.c4
-rw-r--r--drivers/input/touchscreen/tsc2007_iio.c1
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c6
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c7
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c10
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c252
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu-v3.c214
-rw-r--r--drivers/iommu/arm-smmu.c31
-rw-r--r--drivers/iommu/dma-iommu.c24
-rw-r--r--drivers/iommu/dmar.c10
-rw-r--r--drivers/iommu/exynos-iommu.c23
-rw-r--r--drivers/iommu/intel-iommu.c28
-rw-r--r--drivers/iommu/intel-svm.c4
-rw-r--r--drivers/iommu/intel_irq_remapping.c43
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/iova.c220
-rw-r--r--drivers/iommu/ipmmu-vmsa.c527
-rw-r--r--drivers/iommu/mtk_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/iommu/omap-iommu.c375
-rw-r--r--drivers/iommu/omap-iommu.h30
-rw-r--r--drivers/iommu/qcom_iommu.c33
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c4
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c171
-rw-r--r--drivers/irqchip/irq-gic-common.c5
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c390
-rw-r--r--drivers/irqchip/irq-gic-v3.c50
-rw-r--r--drivers/irqchip/irq-gic.c71
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c1
-rw-r--r--drivers/irqchip/irq-meson-gpio.c419
-rw-r--r--drivers/irqchip/irq-mips-gic.c226
-rw-r--r--drivers/irqchip/irq-omap-intc.c16
-rw-r--r--drivers/irqchip/irq-ompic.c202
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c9
-rw-r--r--drivers/irqchip/irq-sni-exiu.c227
-rw-r--r--drivers/irqchip/irq-stm32-exti.c206
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-core.c7
-rw-r--r--drivers/leds/leds-apu.c278
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/leds/leds-pca955x.c17
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c275
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c16
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c12
-rw-r--r--drivers/lightnvm/Kconfig3
-rw-r--r--drivers/lightnvm/core.c176
-rw-r--r--drivers/lightnvm/pblk-cache.c24
-rw-r--r--drivers/lightnvm/pblk-core.c512
-rw-r--r--drivers/lightnvm/pblk-gc.c289
-rw-r--r--drivers/lightnvm/pblk-init.c197
-rw-r--r--drivers/lightnvm/pblk-map.c28
-rw-r--r--drivers/lightnvm/pblk-rb.c30
-rw-r--r--drivers/lightnvm/pblk-read.c274
-rw-r--r--drivers/lightnvm/pblk-recovery.c129
-rw-r--r--drivers/lightnvm/pblk-rl.c43
-rw-r--r--drivers/lightnvm/pblk-sysfs.c2
-rw-r--r--drivers/lightnvm/pblk-write.c229
-rw-r--r--drivers/lightnvm/pblk.h132
-rw-r--r--drivers/macintosh/adb-iop.c4
-rw-r--r--drivers/mailbox/Kconfig3
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c66
-rw-r--r--drivers/mailbox/mailbox-test.c11
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/pcc.c5
-rw-r--r--drivers/md/Kconfig5
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/bcache/alloc.c15
-rw-r--r--drivers/md/bcache/bcache.h19
-rw-r--r--drivers/md/bcache/btree.c17
-rw-r--r--drivers/md/bcache/btree.h2
-rw-r--r--drivers/md/bcache/closure.h6
-rw-r--r--drivers/md/bcache/request.c36
-rw-r--r--drivers/md/bcache/super.c52
-rw-r--r--drivers/md/bcache/sysfs.c28
-rw-r--r--drivers/md/bcache/util.c10
-rw-r--r--drivers/md/bcache/util.h4
-rw-r--r--drivers/md/bcache/writeback.c117
-rw-r--r--drivers/md/bcache/writeback.h6
-rw-r--r--drivers/md/dm-bufio.c10
-rw-r--r--drivers/md/dm-cache-background-tracker.c18
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-cache-policy-smq.c42
-rw-r--r--drivers/md/dm-cache-target.c326
-rw-r--r--drivers/md/dm-core.h3
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-integrity.c17
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-log-writes.c175
-rw-r--r--drivers/md/dm-mpath.c42
-rw-r--r--drivers/md/dm-raid.c33
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-stats.c36
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c21
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c83
-rw-r--r--drivers/md/dm-verity.h5
-rw-r--r--drivers/md/dm-zoned-target.c13
-rw-r--r--drivers/md/dm.c60
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/md-bitmap.c (renamed from drivers/md/bitmap.c)29
-rw-r--r--drivers/md/md-bitmap.h (renamed from drivers/md/bitmap.h)0
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-faulty.c (renamed from drivers/md/faulty.c)0
-rw-r--r--drivers/md/md-linear.c (renamed from drivers/md/linear.c)2
-rw-r--r--drivers/md/md-linear.h (renamed from drivers/md/linear.h)0
-rw-r--r--drivers/md/md-multipath.c (renamed from drivers/md/multipath.c)4
-rw-r--r--drivers/md/md-multipath.h (renamed from drivers/md/multipath.h)0
-rw-r--r--drivers/md/md.c155
-rw-r--r--drivers/md/md.h20
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c3
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c78
-rw-r--r--drivers/md/raid10.c169
-rw-r--r--drivers/md/raid10.h6
-rw-r--r--drivers/md/raid5-cache.c44
-rw-r--r--drivers/md/raid5-log.h2
-rw-r--r--drivers/md/raid5-ppl.c6
-rw-r--r--drivers/md/raid5.c81
-rw-r--r--drivers/media/cec/cec-adap.c18
-rw-r--r--drivers/media/cec/cec-api.c19
-rw-r--r--drivers/media/cec/cec-core.c9
-rw-r--r--drivers/media/cec/cec-pin-priv.h133
-rw-r--r--drivers/media/cec/cec-pin.c40
-rw-r--r--drivers/media/common/cypress_firmware.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c14
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c5
-rw-r--r--drivers/media/common/siano/smscoreapi.c39
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c12
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dmxdev.h90
-rw-r--r--drivers/media/dvb-core/dvb_demux.c17
-rw-r--r--drivers/media/dvb-core/dvb_demux.h248
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c518
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h117
-rw-r--r--drivers/media/dvb-core/dvb_net.h34
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c8
-rw-r--r--drivers/media/dvb-core/dvbdev.c32
-rw-r--r--drivers/media/dvb-core/dvbdev.h137
-rw-r--r--drivers/media/dvb-frontends/Kconfig6
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c7
-rw-r--r--drivers/media/dvb-frontends/cx24113.c10
-rw-r--r--drivers/media/dvb-frontends/cx24116.c22
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb-frontends/ds3000.c22
-rw-r--r--drivers/media/dvb-frontends/lg2160.c14
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c3
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c23
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c1
-rw-r--r--drivers/media/dvb-frontends/sp2.c9
-rw-r--r--drivers/media/dvb-frontends/stv0288.c7
-rw-r--r--drivers/media/dvb-frontends/stv6110.c9
-rw-r--r--drivers/media/i2c/Kconfig16
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c7
-rw-r--r--drivers/media/i2c/adv7604.c10
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/dw9714.c7
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c2
-rw-r--r--drivers/media/i2c/imx274.c1811
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/ov13858.c61
-rw-r--r--drivers/media/i2c/ov2640.c17
-rw-r--r--drivers/media/i2c/ov5640.c2
-rw-r--r--drivers/media/i2c/ov5647.c51
-rw-r--r--drivers/media/i2c/ov5670.c37
-rw-r--r--drivers/media/i2c/ov6650.c5
-rw-r--r--drivers/media/i2c/ov7670.c129
-rw-r--r--drivers/media/i2c/ov9650.c1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c149
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c3
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h1
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c11
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c6
-rw-r--r--drivers/media/i2c/tc358743.c220
-rw-r--r--drivers/media/i2c/tc358743_regs.h94
-rw-r--r--drivers/media/i2c/tvaudio.c8
-rw-r--r--drivers/media/media-entity.c13
-rw-r--r--drivers/media/pci/b2c2/Kconfig4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c19
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c2
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h3
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c5
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c28
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-io.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c9
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c8
-rw-r--r--drivers/media/pci/meye/meye.c20
-rw-r--r--drivers/media/pci/netup_unidvb/Kconfig12
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c7
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134.h4
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c7
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c10
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.c8
-rw-r--r--drivers/media/pci/ttpci/av7110.h2
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c56
-rw-r--r--drivers/media/pci/ttpci/budget-core.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c11
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/Kconfig36
-rw-r--r--drivers/media/platform/Makefile6
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c8
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h1
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c652
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c24
-rw-r--r--drivers/media/platform/blackfin/ppi.c1
-rw-r--r--drivers/media/platform/cec-gpio/Makefile1
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c239
-rw-r--r--drivers/media/platform/coda/coda-bit.c4
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h4
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/isif.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c37
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c6
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c8
-rw-r--r--drivers/media/platform/davinci/vpif_display.c8
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c127
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c8
-rw-r--r--drivers/media/platform/omap/omap_vout.c3
-rw-r--r--drivers/media/platform/omap3isp/isp.c133
-rw-r--r--drivers/media/platform/omap3isp/isp.h5
-rw-r--r--drivers/media/platform/pxa_camera.c8
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c3
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.c1
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.c8
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c1
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c12
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c34
-rw-r--r--drivers/media/platform/qcom/venus/venc.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c117
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c10
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c14
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h4
-rw-r--r--drivers/media/platform/rcar_drif.c12
-rw-r--r--drivers/media/platform/rockchip/rga/Makefile3
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c154
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c421
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h437
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c1010
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h125
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c18
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c14
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c5
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c10
-rw-r--r--drivers/media/platform/tegra-cec/Makefile1
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c495
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.h127
-rw-r--r--drivers/media/platform/ti-vpe/cal.c8
-rw-r--r--drivers/media/platform/vimc/vimc-core.c5
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c16
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c8
-rw-r--r--drivers/media/radio/radio-cadet.c7
-rw-r--r--drivers/media/radio/radio-raremono.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h2
-rw-r--r--drivers/media/radio/wl128x/Kconfig10
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c7
-rw-r--r--drivers/media/rc/Kconfig16
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c7
-rw-r--r--drivers/media/rc/gpio-ir-recv.c192
-rw-r--r--drivers/media/rc/igorplugusb.c8
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c5
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c13
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c6
-rw-r--r--drivers/media/rc/imon.c30
-rw-r--r--drivers/media/rc/ir-lirc-codec.c65
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c7
-rw-r--r--drivers/media/rc/ir-nec-decoder.c29
-rw-r--r--drivers/media/rc/keymaps/Makefile4
-rw-r--r--drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c70
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c3
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-poplar.c69
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-tv-demo.c81
-rw-r--r--drivers/media/rc/keymaps/rc-tango.c92
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c2
-rw-r--r--drivers/media/rc/lirc_dev.c515
-rw-r--r--drivers/media/rc/mceusb.c20
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-ir-raw.c8
-rw-r--r--drivers/media/rc/rc-main.c79
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/serial_ir.c5
-rw-r--r--drivers/media/rc/sir_ir.c4
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/rc/tango-ir.c281
-rw-r--r--drivers/media/usb/as102/as102_fw.c28
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c2
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c4
-rw-r--r--drivers/media/usb/au0828/au0828.h2
-rw-r--r--drivers/media/usb/b2c2/Kconfig6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c65
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c24
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c3
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h1
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c24
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c88
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h2
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/gspca/Kconfig16
-rw-r--r--drivers/media/usb/gspca/gspca.c1
-rw-r--r--drivers/media/usb/gspca/ov519.c22
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig1
-rw-r--r--drivers/media/usb/pwc/pwc-if.c3
-rw-r--r--drivers/media/usb/s2255/s2255drv.c7
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c27
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c15
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c21
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c4
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c516
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c22
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c702
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c9
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/mfd/Kconfig50
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/arizona-core.c132
-rw-r--r--drivers/mfd/axp20x.c2
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c17
-rw-r--r--drivers/mfd/intel-lpss.h7
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c184
-rw-r--r--drivers/mfd/lpc_ich.c1
-rw-r--r--drivers/mfd/max77693.c5
-rw-r--r--drivers/mfd/mxs-lradc.c6
-rw-r--r--drivers/mfd/rts5249.c155
-rw-r--r--drivers/mfd/rtsx_pcr.c142
-rw-r--r--drivers/mfd/rtsx_pcr.h14
-rw-r--r--drivers/mfd/sm501.c49
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c259
-rw-r--r--drivers/mfd/ssbi.c2
-rw-r--r--drivers/mfd/stw481x.c10
-rw-r--r--drivers/mfd/tps65217.c28
-rw-r--r--drivers/mfd/tps65218.c8
-rw-r--r--drivers/mfd/wm97xx-core.c366
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/altera-stapl/Kconfig3
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/eeprom/at24.c112
-rw-r--r--drivers/misc/genwqe/card_base.h7
-rw-r--r--drivers/misc/genwqe/card_dev.c6
-rw-r--r--drivers/misc/genwqe/card_utils.c43
-rw-r--r--drivers/misc/ibmasm/event.c2
-rw-r--r--drivers/misc/ibmasm/module.c6
-rw-r--r--drivers/misc/kgdbts.c3
-rw-r--r--drivers/misc/lkdtm_core.c172
-rw-r--r--drivers/misc/mei/mei-trace.c1
-rw-r--r--drivers/misc/mei/mei-trace.h19
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/mic/scif/scif_rb.c8
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c33
-rw-r--r--drivers/misc/ti_dac7512.c103
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/block.c346
-rw-r--r--drivers/mmc/core/bus.c7
-rw-r--r--drivers/mmc/core/core.c262
-rw-r--r--drivers/mmc/core/core.h16
-rw-r--r--drivers/mmc/core/host.c20
-rw-r--r--drivers/mmc/core/host.h7
-rw-r--r--drivers/mmc/core/mmc.c46
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/queue.c41
-rw-r--r--drivers/mmc/core/queue.h4
-rw-r--r--drivers/mmc/core/sd.c51
-rw-r--r--drivers/mmc/core/sdio_irq.c3
-rw-r--r--drivers/mmc/host/Kconfig28
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c84
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c768
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c285
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/omap.c20
-rw-r--r--drivers/mmc/host/omap_hsmmc.c35
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c38
-rw-r--r--drivers/mmc/host/sdhci-acpi.c174
-rw-r--r--drivers/mmc/host/sdhci-cadence.c28
-rw-r--r--drivers/mmc/host/sdhci-msm.c326
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c58
-rw-r--r--drivers/mmc/host/sdhci-omap.c607
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c11
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c35
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h73
-rw-r--r--drivers/mmc/host/sdhci-pci.h13
-rw-r--r--drivers/mmc/host/sdhci-s3c.c18
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c14
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mmc/host/tifm_sd.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c36
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c8
-rw-r--r--drivers/mmc/host/vub300.c41
-rw-r--r--drivers/mmc/host/wbsd.c8
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/dsa/lan9303-core.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3cdev.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c253
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h68
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h17
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c153
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c10
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c8
-rw-r--r--drivers/net/ethernet/sfc/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/ptp.c10
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c19
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/geneve.c25
-rw-r--r--drivers/net/netconsole.c4
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/ipheth.c30
-rw-r--r--drivers/net/virtio_net.c1
-rw-r--r--drivers/net/vxlan.c35
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/nfc/st-nci/Kconfig4
-rw-r--r--drivers/nubus/nubus.c13
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/Kconfig4
-rw-r--r--drivers/nvme/host/Kconfig9
-rw-r--r--drivers/nvme/host/Makefile1
-rw-r--r--drivers/nvme/host/core.c1301
-rw-r--r--drivers/nvme/host/fabrics.c16
-rw-r--r--drivers/nvme/host/fabrics.h14
-rw-r--r--drivers/nvme/host/fc.c793
-rw-r--r--drivers/nvme/host/lightnvm.c86
-rw-r--r--drivers/nvme/host/multipath.c291
-rw-r--r--drivers/nvme/host/nvme.h169
-rw-r--r--drivers/nvme/host/pci.c243
-rw-r--r--drivers/nvme/host/rdma.c246
-rw-r--r--drivers/nvme/target/admin-cmd.c21
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c23
-rw-r--r--drivers/nvme/target/fc.c48
-rw-r--r--drivers/nvme/target/io-cmd.c20
-rw-r--r--drivers/nvme/target/loop.c66
-rw-r--r--drivers/nvme/target/nvmet.h6
-rw-r--r--drivers/nvme/target/rdma.c16
-rw-r--r--drivers/nvmem/Kconfig35
-rw-r--r--drivers/nvmem/Makefile6
-rw-r--r--drivers/nvmem/bcm-ocotp.c1
-rw-r--r--drivers/nvmem/core.c13
-rw-r--r--drivers/nvmem/imx-iim.c24
-rw-r--r--drivers/nvmem/imx-ocotp.c193
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c1
-rw-r--r--drivers/nvmem/lpc18xx_otp.c1
-rw-r--r--drivers/nvmem/meson-efuse.c5
-rw-r--r--drivers/nvmem/meson-mx-efuse.c265
-rw-r--r--drivers/nvmem/mtk-efuse.c47
-rw-r--r--drivers/nvmem/mxs-ocotp.c1
-rw-r--r--drivers/nvmem/qfprom.c27
-rw-r--r--drivers/nvmem/rockchip-efuse.c5
-rw-r--r--drivers/nvmem/snvs_lpgpr.c156
-rw-r--r--drivers/nvmem/sunxi_sid.c7
-rw-r--r--drivers/nvmem/uniphier-efuse.c97
-rw-r--r--drivers/nvmem/vf610-ocotp.c1
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c19
-rw-r--r--drivers/of/base.c135
-rw-r--r--drivers/of/device.c8
-rw-r--r--drivers/of/dynamic.c190
-rw-r--r--drivers/of/fdt.c91
-rw-r--r--drivers/of/kobj.c164
-rw-r--r--drivers/of/of_private.h51
-rw-r--r--drivers/of/overlay.c1049
-rw-r--r--drivers/of/property.c4
-rw-r--r--drivers/of/resolver.c15
-rw-r--r--drivers/of/unittest-data/.gitignore2
-rw-r--r--drivers/of/unittest.c83
-rw-r--r--drivers/opp/Kconfig13
-rw-r--r--drivers/opp/Makefile (renamed from drivers/base/power/opp/Makefile)0
-rw-r--r--drivers/opp/core.c (renamed from drivers/base/power/opp/core.c)143
-rw-r--r--drivers/opp/cpu.c (renamed from drivers/base/power/opp/cpu.c)0
-rw-r--r--drivers/opp/debugfs.c (renamed from drivers/base/power/opp/debugfs.c)10
-rw-r--r--drivers/opp/of.c (renamed from drivers/base/power/opp/of.c)6
-rw-r--r--drivers/opp/opp.h (renamed from drivers/base/power/opp/opp.h)6
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/pci/Kconfig17
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dwc/Kconfig10
-rw-r--r--drivers/pci/dwc/Makefile1
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c33
-rw-r--r--drivers/pci/dwc/pci-layerscape.c12
-rw-r--r--drivers/pci/dwc/pcie-histb.c470
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c12
-rw-r--r--drivers/pci/host/Kconfig6
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-ftpci100.c22
-rw-r--r--drivers/pci/host/pci-host-generic.c43
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c20
-rw-r--r--drivers/pci/host/pci-tegra.c158
-rw-r--r--drivers/pci/host/pci-v3-semi.c959
-rw-r--r--drivers/pci/host/pci-xgene.c24
-rw-r--r--drivers/pci/host/pcie-altera.c8
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c19
-rw-r--r--drivers/pci/host/pcie-iproc.c20
-rw-r--r--drivers/pci/host/pcie-rcar.c20
-rw-r--r--drivers/pci/host/pcie-tango.c205
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/hotplug-pci.c29
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c15
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c7
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c19
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c19
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c25
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c11
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c9
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c6
-rw-r--r--drivers/pci/iov.c34
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c135
-rw-r--r--drivers/pci/pci-sysfs.c35
-rw-r--r--drivers/pci/pci.c157
-rw-r--r--drivers/pci/pci.h11
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c9
-rw-r--r--drivers/pci/pcie/aspm.c51
-rw-r--r--drivers/pci/pcie/pme.c5
-rw-r--r--drivers/pci/pcie/portdrv_core.c171
-rw-r--r--drivers/pci/pcie/portdrv_pci.c1
-rw-r--r--drivers/pci/probe.c187
-rw-r--r--drivers/pci/quirks.c42
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/rom.c19
-rw-r--r--drivers/pci/setup-bus.c299
-rw-r--r--drivers/pci/setup-res.c58
-rw-r--r--drivers/pci/switch/switchtec.c2
-rw-r--r--drivers/pcmcia/cardbus.c5
-rw-r--r--drivers/pcmcia/cistpl.c2
-rw-r--r--drivers/pcmcia/cs_internal.h2
-rw-r--r--drivers/pcmcia/m32r_cfc.c7
-rw-r--r--drivers/pcmcia/m32r_pcc.c7
-rw-r--r--drivers/perf/Kconfig15
-rw-r--r--drivers/perf/Makefile2
-rw-r--r--drivers/perf/arm_pmu.c10
-rw-r--r--drivers/perf/arm_pmu_acpi.c3
-rw-r--r--drivers/perf/arm_pmu_platform.c4
-rw-r--r--drivers/perf/arm_spe_pmu.c1249
-rw-r--r--drivers/perf/hisilicon/Makefile1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c463
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c473
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c463
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c447
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h102
-rw-r--r--drivers/perf/qcom_l2_pmu.c54
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/broadcom/Kconfig13
-rw-r--r--drivers/phy/broadcom/Makefile3
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c74
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c1017
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h50
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c459
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c39
-rw-r--r--drivers/phy/phy-core.c15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c29
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c29
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c42
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c72
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c217
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c101
-rw-r--r--drivers/pinctrl/Kconfig32
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c21
-rw-r--r--drivers/pinctrl/core.c12
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/intel/Kconfig11
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c375
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/meson/Kconfig41
-rw-r--r--drivers/pinctrl/meson/Makefile9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c892
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c852
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c200
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h47
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c108
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.h48
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c992
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c808
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c70
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/pinctrl/pinctrl-at91.c2
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c333
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c21
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c22
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c2
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c43
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c134
-rw-r--r--drivers/pinctrl/pinctrl-single.c4
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c6
-rw-r--r--drivers/pinctrl/samsung/Kconfig2
-rw-r--r--drivers/pinctrl/sh-pfc/core.c131
-rw-r--r--drivers/pinctrl/sh-pfc/core.h11
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c403
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c542
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1904
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c573
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c394
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c12
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h24
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c6
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c6
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c8
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c13
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h1
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c14
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c4
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c6
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/power/avs/smartreflex.c10
-rw-r--r--drivers/power/supply/Kconfig16
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/cpcap-charger.c2
-rw-r--r--drivers/power/supply/generic-adc-battery.c2
-rw-r--r--drivers/power/supply/max8997_charger.c3
-rw-r--r--drivers/power/supply/pcf50633-charger.c2
-rw-r--r--drivers/power/supply/power_supply_core.c2
-rw-r--r--drivers/power/supply/qcom_smbb.c2
-rw-r--r--drivers/power/supply/sbs-battery.c35
-rw-r--r--drivers/power/supply/sbs-manager.c445
-rw-r--r--drivers/power/supply/twl4030_charger.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/axp20x-regulator.c92
-rw-r--r--drivers/regulator/da9211-regulator.c14
-rw-r--r--drivers/regulator/da9211-regulator.h2
-rw-r--r--drivers/regulator/pbias-regulator.c21
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c48
-rw-r--r--drivers/regulator/tps65217-regulator.c5
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/s390/block/dasd_eer.c16
-rw-r--r--drivers/s390/block/dasd_int.h16
-rw-r--r--drivers/s390/block/scm_blk.h8
-rw-r--r--drivers/s390/char/sclp_con.c7
-rw-r--r--drivers/s390/char/sclp_tty.c7
-rw-r--r--drivers/s390/char/tape_class.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c3
-rw-r--r--drivers/s390/char/vmur.c11
-rw-r--r--drivers/s390/char/vmur.h4
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc_sch.c6
-rw-r--r--drivers/s390/cio/cio_debug.h8
-rw-r--r--drivers/s390/cio/cmf.c278
-rw-r--r--drivers/s390/cio/eadm_sch.c8
-rw-r--r--drivers/s390/cio/qdio_debug.h18
-rw-r--r--drivers/s390/cio/qdio_thinint.c10
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c24
-rw-r--r--drivers/s390/crypto/ap_asm.h43
-rw-r--r--drivers/s390/crypto/ap_bus.c74
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_card.c12
-rw-r--r--drivers/s390/crypto/ap_queue.c4
-rw-r--r--drivers/s390/crypto/pkey_api.c3
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c48
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c6
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/lcs.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c1
-rw-r--r--drivers/s390/virtio/Makefile6
-rw-r--r--drivers/s390/virtio/kvm_virtio.c515
-rw-r--r--drivers/scsi/.gitignore1
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile8
-rw-r--r--drivers/scsi/NCR5380.c18
-rw-r--r--drivers/scsi/aacraid/commsup.c26
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c3
-rw-r--r--drivers/scsi/be2iscsi/be.h19
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c55
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h48
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c54
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c114
-rw-r--r--drivers/scsi/be2iscsi/be_main.h51
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c278
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h10
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c7
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c5
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c10
-rw-r--r--drivers/scsi/csiostor/csio_hw.h3
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/csiostor/csio_mb.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c50
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/cxlflash/main.c24
-rw-r--r--drivers/scsi/cxlflash/main.h3
-rw-r--r--drivers/scsi/cxlflash/sislite.h3
-rw-r--r--drivers/scsi/cxlflash/superpipe.c6
-rw-r--r--drivers/scsi/cxlflash/vlun.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c10
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c81
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c253
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c257
-rw-r--r--drivers/scsi/hpsa.c356
-rw-r--r--drivers/scsi/hpsa_cmd.h3
-rw-r--r--drivers/scsi/libfc/fc_lport.c3
-rw-r--r--drivers/scsi/libsas/sas_dump.c10
-rw-r--r--drivers/scsi/libsas/sas_dump.h1
-rw-r--r--drivers/scsi/libsas/sas_event.c26
-rw-r--r--drivers/scsi/libsas/sas_init.c12
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c174
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c148
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c232
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h64
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1025
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c676
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h29
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h43
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h564
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h11
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h282
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_pci.h111
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h14
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c660
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h177
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c100
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c164
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2219
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c54
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c13
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c124
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c62
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h102
-rw-r--r--drivers/scsi/qedi/qedi_fw.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c195
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c132
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c31
-rw-r--r--drivers/scsi/scsi_devinfo.c72
-rw-r--r--drivers/scsi/scsi_dh.c36
-rw-r--r--drivers/scsi/scsi_error.c13
-rw-r--r--drivers/scsi/scsi_lib.c108
-rw-r--r--drivers/scsi/scsi_logging.h8
-rw-r--r--drivers/scsi/scsi_priv.h4
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c45
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c32
-rw-r--r--drivers/scsi/sd_zbc.c169
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c18
-rw-r--r--drivers/scsi/storvsc_drv.c52
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c10
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c43
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/scsi/ufs/ufshcd.h16
-rw-r--r--drivers/scsi/ufs/ufshci.h70
-rw-r--r--drivers/sh/maple/maple.c5
-rw-r--r--drivers/soc/bcm/brcmstb/common.c12
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c14
-rw-r--r--drivers/soc/rockchip/pm_domains.c14
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-armada-3700.c17
-rw-r--r--drivers/spi/spi-axi-spi-engine.c4
-rw-r--r--drivers/spi/spi-fsl-dspi.c66
-rw-r--r--drivers/spi/spi-imx.c256
-rw-r--r--drivers/spi/spi-mxs.c120
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-rspi.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c3
-rw-r--r--drivers/spi/spi-sh-msiof.c12
-rw-r--r--drivers/spi/spi-sprd-adi.c418
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/staging/android/TODO1
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c11
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/android/ion/ion.h2
-rw-r--r--drivers/staging/ccree/cc_hal.h33
-rw-r--r--drivers/staging/ccree/cc_lli_defs.h2
-rw-r--r--drivers/staging/ccree/cc_regs.h42
-rw-r--r--drivers/staging/ccree/ssi_aead.c258
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c438
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.h5
-rw-r--r--drivers/staging/ccree/ssi_cipher.c189
-rw-r--r--drivers/staging/ccree/ssi_cipher.h13
-rw-r--r--drivers/staging/ccree/ssi_driver.c381
-rw-r--r--drivers/staging/ccree/ssi_driver.h55
-rw-r--r--drivers/staging/ccree/ssi_fips.c26
-rw-r--r--drivers/staging/ccree/ssi_fips.h4
-rw-r--r--drivers/staging/ccree/ssi_hash.c377
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c18
-rw-r--r--drivers/staging/ccree/ssi_pm.c35
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c195
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c33
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c282
-rw-r--r--drivers/staging/comedi/Kconfig4
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c26
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c18
-rw-r--r--drivers/staging/comedi/drivers/das16.c17
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c2
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c10
-rw-r--r--drivers/staging/comedi/drivers/s526.c5
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c8
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h16
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c11
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c10
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c8
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c2
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c24
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c281
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h54
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c14
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h5
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c32
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.h5
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-service.c4
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c2
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h6
-rw-r--r--drivers/staging/fwserial/fwserial.c16
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c19
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c1
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c1
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c3
-rw-r--r--drivers/staging/greybus/arche-platform.c3
-rw-r--r--drivers/staging/greybus/arche_platform.h3
-rw-r--r--drivers/staging/greybus/arpc.h1
-rw-r--r--drivers/staging/greybus/audio_apbridgea.c3
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h1
-rw-r--r--drivers/staging/greybus/audio_codec.c3
-rw-r--r--drivers/staging/greybus/audio_codec.h3
-rw-r--r--drivers/staging/greybus/audio_gb.c3
-rw-r--r--drivers/staging/greybus/audio_manager.c5
-rw-r--r--drivers/staging/greybus/audio_manager.h3
-rw-r--r--drivers/staging/greybus/audio_manager_module.c3
-rw-r--r--drivers/staging/greybus/audio_manager_private.h3
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c3
-rw-r--r--drivers/staging/greybus/audio_module.c3
-rw-r--r--drivers/staging/greybus/audio_topology.c3
-rw-r--r--drivers/staging/greybus/authentication.c3
-rw-r--r--drivers/staging/greybus/bootrom.c3
-rw-r--r--drivers/staging/greybus/bundle.c3
-rw-r--r--drivers/staging/greybus/bundle.h3
-rw-r--r--drivers/staging/greybus/camera.c3
-rw-r--r--drivers/staging/greybus/connection.c3
-rw-r--r--drivers/staging/greybus/connection.h3
-rw-r--r--drivers/staging/greybus/control.c3
-rw-r--r--drivers/staging/greybus/control.h3
-rw-r--r--drivers/staging/greybus/core.c3
-rw-r--r--drivers/staging/greybus/debugfs.c3
-rw-r--r--drivers/staging/greybus/es2.c4
-rw-r--r--drivers/staging/greybus/firmware.h3
-rw-r--r--drivers/staging/greybus/fw-core.c3
-rw-r--r--drivers/staging/greybus/fw-download.c3
-rw-r--r--drivers/staging/greybus/fw-management.c3
-rw-r--r--drivers/staging/greybus/gb-camera.h3
-rw-r--r--drivers/staging/greybus/gbphy.c3
-rw-r--r--drivers/staging/greybus/gbphy.h3
-rw-r--r--drivers/staging/greybus/gpio.c3
-rw-r--r--drivers/staging/greybus/greybus.h3
-rw-r--r--drivers/staging/greybus/greybus_authentication.h1
-rw-r--r--drivers/staging/greybus/greybus_firmware.h1
-rw-r--r--drivers/staging/greybus/greybus_manifest.h1
-rw-r--r--drivers/staging/greybus/greybus_protocols.h1
-rw-r--r--drivers/staging/greybus/greybus_trace.h3
-rw-r--r--drivers/staging/greybus/hd.c3
-rw-r--r--drivers/staging/greybus/hd.h3
-rw-r--r--drivers/staging/greybus/hid.c3
-rw-r--r--drivers/staging/greybus/i2c.c3
-rw-r--r--drivers/staging/greybus/interface.c3
-rw-r--r--drivers/staging/greybus/interface.h3
-rw-r--r--drivers/staging/greybus/light.c11
-rw-r--r--drivers/staging/greybus/log.c3
-rw-r--r--drivers/staging/greybus/loopback.c242
-rw-r--r--drivers/staging/greybus/manifest.c3
-rw-r--r--drivers/staging/greybus/manifest.h3
-rw-r--r--drivers/staging/greybus/module.c3
-rw-r--r--drivers/staging/greybus/module.h3
-rw-r--r--drivers/staging/greybus/operation.c3
-rw-r--r--drivers/staging/greybus/operation.h16
-rw-r--r--drivers/staging/greybus/power_supply.c3
-rw-r--r--drivers/staging/greybus/pwm.c3
-rw-r--r--drivers/staging/greybus/raw.c3
-rw-r--r--drivers/staging/greybus/sdio.c3
-rw-r--r--drivers/staging/greybus/spi.c3
-rw-r--r--drivers/staging/greybus/spilib.c11
-rw-r--r--drivers/staging/greybus/spilib.h1
-rw-r--r--drivers/staging/greybus/svc.c3
-rw-r--r--drivers/staging/greybus/svc.h3
-rw-r--r--drivers/staging/greybus/svc_watchdog.c3
-rwxr-xr-xdrivers/staging/greybus/tools/lbtest1
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c1
-rw-r--r--drivers/staging/greybus/uart.c3
-rw-r--r--drivers/staging/greybus/usb.c4
-rw-r--r--drivers/staging/greybus/vibrator.c3
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c2
-rw-r--r--drivers/staging/iio/Documentation/device.txt3
-rw-r--r--drivers/staging/iio/Documentation/trigger.txt4
-rw-r--r--drivers/staging/iio/accel/adis16201.c1
-rw-r--r--drivers/staging/iio/accel/adis16203.c1
-rw-r--r--drivers/staging/iio/accel/adis16209.c1
-rw-r--r--drivers/staging/iio/accel/adis16240.c1
-rw-r--r--drivers/staging/iio/adc/ad7192.c11
-rw-r--r--drivers/staging/iio/adc/ad7280a.c1
-rw-r--r--drivers/staging/iio/adc/ad7606.c4
-rw-r--r--drivers/staging/iio/adc/ad7780.c1
-rw-r--r--drivers/staging/iio/adc/ad7816.c1
-rw-r--r--drivers/staging/iio/addac/adt7316.c2
-rw-r--r--drivers/staging/iio/cdc/ad7150.c1
-rw-r--r--drivers/staging/iio/cdc/ad7152.c1
-rw-r--r--drivers/staging/iio/cdc/ad7746.c1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c1
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c1
-rw-r--r--drivers/staging/iio/light/tsl2x7x.c665
-rw-r--r--drivers/staging/iio/light/tsl2x7x.h13
-rw-r--r--drivers/staging/iio/meter/ade7753.c27
-rw-r--r--drivers/staging/iio/meter/ade7754.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_trigger.c1
-rw-r--r--drivers/staging/iio/meter/ade7759.c28
-rw-r--r--drivers/staging/iio/meter/ade7854.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c1
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c1
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.c3
-rw-r--r--drivers/staging/irda/drivers/au1k_ir.c40
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.c12
-rw-r--r--drivers/staging/irda/drivers/esi-sir.c22
-rw-r--r--drivers/staging/irda/drivers/irda-usb.c24
-rw-r--r--drivers/staging/irda/drivers/irda-usb.h1
-rw-r--r--drivers/staging/irda/drivers/mcs7780.c9
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.c18
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_event.h6
-rw-r--r--drivers/staging/irda/include/net/irda/qos.h20
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h11
-rw-r--r--drivers/staging/irda/net/af_irda.c7
-rw-r--r--drivers/staging/irda/net/discovery.c4
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty.c2
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_attach.c8
-rw-r--r--drivers/staging/irda/net/irda_device.c36
-rw-r--r--drivers/staging/irda/net/iriap.c10
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client.c6
-rw-r--r--drivers/staging/irda/net/irlan/irlan_common.c4
-rw-r--r--drivers/staging/irda/net/irlap.c16
-rw-r--r--drivers/staging/irda/net/irlap_event.c6
-rw-r--r--drivers/staging/irda/net/irlmp.c8
-rw-r--r--drivers/staging/irda/net/irlmp_event.c10
-rw-r--r--drivers/staging/irda/net/irqueue.c3
-rw-r--r--drivers/staging/irda/net/irttp.c11
-rw-r--r--drivers/staging/irda/net/timer.c54
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_time.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h29
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h1
-rw-r--r--drivers/staging/lustre/lnet/Kconfig2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c17
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c14
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h1
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c1
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c4
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c4
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c1
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h1
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h1
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_errno.h51
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_intent.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_obdo.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_patchless_compat.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h1
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c215
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c34
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c7
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c7
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c44
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c21
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h19
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c30
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.h1
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c3
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c1
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c1
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c1
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c1
-rw-r--r--drivers/staging/media/atomisp/Kconfig11
-rw-r--r--drivers/staging/media/atomisp/TODO24
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig100
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile19
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.c1255
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.h198
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c (renamed from drivers/staging/media/atomisp/i2c/gc0310.c)53
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c (renamed from drivers/staging/media/atomisp/i2c/gc2235.c)54
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c (renamed from drivers/staging/media/atomisp/i2c/libmsrlisthelper.c)4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c (renamed from drivers/staging/media/atomisp/i2c/lm3554.c)47
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c (renamed from drivers/staging/media/atomisp/i2c/mt9m114.c)51
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c (renamed from drivers/staging/media/atomisp/i2c/ov2680.c)51
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c (renamed from drivers/staging/media/atomisp/i2c/ov2722.c)54
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h7
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Kconfig9
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile14
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.c217
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.h50
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/common.h66
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.c210
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.h39
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.c224
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.h64
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.c233
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.h64
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.c198
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.h58
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.c2480
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h737
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx132.h566
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx134.h2465
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx135.h3374
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx175.h1960
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx208.h550
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx219.h228
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx227.h727
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp.c39
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c80
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c89
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_imx.c191
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/vcm.c45
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h9
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h14
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ad5823.h4
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c (renamed from drivers/staging/media/atomisp/i2c/ov5693/ov5693.c)59
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.c65
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h5
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h5
-rw-r--r--drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h38
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h4
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h3
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h25
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h4
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h5
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3642.h153
-rw-r--r--drivers/staging/media/atomisp/pci/Kconfig17
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c67
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c80
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h64
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c133
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c4
-rw-r--r--drivers/staging/media/atomisp/platform/Makefile1
-rw-r--r--drivers/staging/media/atomisp/platform/clock/Makefile6
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c40
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h27
-rw-r--r--drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c247
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/Makefile1
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c141
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c298
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c60
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c5
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c8
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c231
-rw-r--r--drivers/staging/netlogic/xlr_net.c82
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt24
-rw-r--r--drivers/staging/pi433/pi433_if.c67
-rw-r--r--drivers/staging/pi433/rf69.c236
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c89
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c35
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c144
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c64
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c1
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h33
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h3
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h28
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h14
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h3
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h6
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h114
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h46
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h29
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h157
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h222
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h40
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h33
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h19
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h22
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h3
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h82
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h27
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h12
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h12
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h25
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c34
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c1
-rw-r--r--drivers/staging/rtl8192e/dot11d.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c25
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c15
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c56
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c23
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h118
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c27
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c15
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c16
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c46
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c20
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c9
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8712/hal_init.c4
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c48
-rw-r--r--drivers/staging/rtl8712/os_intfs.c8
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c144
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c21
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c140
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c33
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c9
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c11
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c69
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h3
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h6
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h13
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c16
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c45
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c62
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c243
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c24
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.c6
-rw-r--r--drivers/staging/rtlwifi/base.c58
-rw-r--r--drivers/staging/rtlwifi/base.h4
-rw-r--r--drivers/staging/rtlwifi/core.c72
-rw-r--r--drivers/staging/rtlwifi/core.h4
-rw-r--r--drivers/staging/rtlwifi/debug.c36
-rw-r--r--drivers/staging/rtlwifi/efuse.c5
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c2
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c50
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c2
-rw-r--r--drivers/staging/rtlwifi/pci.c2
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c2
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c8
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c4
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c12
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c24
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c23
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h11
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c10
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c4
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c13
-rw-r--r--drivers/staging/rtlwifi/ps.c2
-rw-r--r--drivers/staging/rtlwifi/rc.c16
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c9
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c23
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c24
-rw-r--r--drivers/staging/rts5208/sd.c6
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c4
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h4
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h7
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h4
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c4
-rw-r--r--drivers/staging/speakup/buffers.c4
-rw-r--r--drivers/staging/speakup/main.c4
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c2
-rw-r--r--drivers/staging/speakup/speakup_apollo.c2
-rw-r--r--drivers/staging/speakup/speakup_audptr.c2
-rw-r--r--drivers/staging/speakup/speakup_bns.c2
-rw-r--r--drivers/staging/speakup/speakup_decext.c2
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c2
-rw-r--r--drivers/staging/speakup/speakup_dummy.c2
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c2
-rw-r--r--drivers/staging/speakup/speakup_spkout.c2
-rw-r--r--drivers/staging/speakup/speakup_txprt.c2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c10
-rw-r--r--drivers/staging/typec/Kconfig10
-rw-r--r--drivers/staging/typec/Makefile2
-rw-r--r--drivers/staging/typec/TODO10
-rw-r--r--drivers/staging/typec/fusb302/TODO10
-rw-r--r--drivers/staging/typec/tcpci.c5
-rw-r--r--drivers/staging/unisys/MAINTAINERS2
-rw-r--r--drivers/staging/unisys/include/iochannel.h9
-rw-r--r--drivers/staging/unisys/include/visorbus.h159
-rw-r--r--drivers/staging/unisys/include/visorchannel.h (renamed from drivers/staging/unisys/include/channel.h)12
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h3
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h12
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c271
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h8
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c81
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c298
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c12
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c10
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h8
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c4
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c28
-rw-r--r--drivers/staging/vc04_services/Kconfig12
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c11
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c11
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h24
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchi/connections/connection.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchi/message_drivers/message.h5
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c57
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c177
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c13
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c47
-rw-r--r--drivers/staging/vme/devices/Kconfig13
-rw-r--r--drivers/staging/vme/devices/Makefile3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h245
-rw-r--r--drivers/staging/vme/devices/vme_pio2_cntr.c71
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c493
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c220
-rw-r--r--drivers/staging/vt6655/device_main.c3
-rw-r--r--drivers/staging/vt6655/key.c4
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/wilc1000/host_interface.c45
-rw-r--r--drivers/staging/wilc1000/host_interface.h5
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c33
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c78
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c6
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c4
-rw-r--r--drivers/staging/xgifb/vb_setmode.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c12
-rw-r--r--drivers/target/target_core_configfs.c14
-rw-r--r--drivers/target/target_core_stat.c16
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/thunderbolt/tb.c1
-rw-r--r--drivers/tty/amiserial.c1
-rw-r--r--drivers/tty/bfin_jtag_comm.c3
-rw-r--r--drivers/tty/cyclades.c17
-rw-r--r--drivers/tty/ehv_bytechan.c9
-rw-r--r--drivers/tty/goldfish.c11
-rw-r--r--drivers/tty/hvc/hvc_bfin_jtag.c3
-rw-r--r--drivers/tty/hvc/hvc_console.c15
-rw-r--r--drivers/tty/hvc/hvc_console.h15
-rw-r--r--drivers/tty/hvc/hvc_dcc.c13
-rw-r--r--drivers/tty/hvc/hvc_opal.c16
-rw-r--r--drivers/tty/hvc/hvc_rtas.c15
-rw-r--r--drivers/tty/hvc/hvc_tile.c11
-rw-r--r--drivers/tty/hvc/hvc_udbg.c15
-rw-r--r--drivers/tty/hvc/hvc_vio.c15
-rw-r--r--drivers/tty/hvc/hvc_xen.c15
-rw-r--r--drivers/tty/hvc/hvcs.c15
-rw-r--r--drivers/tty/hvc/hvsi.c15
-rw-r--r--drivers/tty/ipwireless/main.c1
-rw-r--r--drivers/tty/isicom.c6
-rw-r--r--drivers/tty/metag_da.c16
-rw-r--r--drivers/tty/mips_ejtag_fdc.c13
-rw-r--r--drivers/tty/moxa.c6
-rw-r--r--drivers/tty/mxser.c22
-rw-r--r--drivers/tty/n_gsm.c18
-rw-r--r--drivers/tty/n_hdlc.c3
-rw-r--r--drivers/tty/n_null.c14
-rw-r--r--drivers/tty/n_r3964.c4
-rw-r--r--drivers/tty/n_tracerouter.c12
-rw-r--r--drivers/tty/n_tracesink.c12
-rw-r--r--drivers/tty/n_tracesink.h12
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/nozomi.c15
-rw-r--r--drivers/tty/rocket.c15
-rw-r--r--drivers/tty/serdev/Kconfig8
-rw-r--r--drivers/tty/serdev/core.c152
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c31
-rw-r--r--drivers/tty/serial/21285.c1
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_accent.c5
-rw-r--r--drivers/tty/serial/8250/8250_acorn.c5
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c6
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c6
-rw-r--r--drivers/tty/serial/8250/8250_boca.c5
-rw-r--r--drivers/tty/serial/8250/8250_core.c23
-rw-r--r--drivers/tty/serial/8250/8250_dma.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c36
-rw-r--r--drivers/tty/serial/8250/8250_early.c5
-rw-r--r--drivers/tty/serial/8250/8250_em.c14
-rw-r--r--drivers/tty/serial/8250/8250_exar.c5
-rw-r--r--drivers/tty/serial/8250/8250_exar_st16c554.c5
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c123
-rw-r--r--drivers/tty/serial/8250/8250_fourport.c5
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c5
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c6
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c1
-rw-r--r--drivers/tty/serial/8250/8250_hub6.c5
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c10
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c6
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c5
-rw-r--r--drivers/tty/serial/8250/8250_mid.c11
-rw-r--r--drivers/tty/serial/8250/8250_moxa.c5
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c13
-rw-r--r--drivers/tty/serial/8250/8250_of.c7
-rw-r--r--drivers/tty/serial/8250/8250_omap.c5
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c5
-rw-r--r--drivers/tty/serial/8250/8250_port.c16
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c7
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c11
-rw-r--r--drivers/tty/serial/8250/serial_cs.c1
-rw-r--r--drivers/tty/serial/altera_jtaguart.c6
-rw-r--r--drivers/tty/serial/altera_uart.c14
-rw-r--r--drivers/tty/serial/amba-pl010.c15
-rw-r--r--drivers/tty/serial/amba-pl011.c32
-rw-r--r--drivers/tty/serial/apbuart.c1
-rw-r--r--drivers/tty/serial/ar933x_uart.c5
-rw-r--r--drivers/tty/serial/arc_uart.c5
-rw-r--r--drivers/tty/serial/atmel_serial.c65
-rw-r--r--drivers/tty/serial/atmel_serial.h6
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c11
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c5
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h3
-rw-r--r--drivers/tty/serial/bfin_uart.c10
-rw-r--r--drivers/tty/serial/clps711x.c6
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h6
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c16
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c16
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c16
-rw-r--r--drivers/tty/serial/digicolor-usart.c6
-rw-r--r--drivers/tty/serial/dz.c1
-rw-r--r--drivers/tty/serial/earlycon-arm-semihost.c13
-rw-r--r--drivers/tty/serial/earlycon.c5
-rw-r--r--drivers/tty/serial/efm32-uart.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c33
-rw-r--r--drivers/tty/serial/icom.c16
-rw-r--r--drivers/tty/serial/icom.h15
-rw-r--r--drivers/tty/serial/ifx6x60.c20
-rw-r--r--drivers/tty/serial/ifx6x60.h18
-rw-r--r--drivers/tty/serial/imx.c103
-rw-r--r--drivers/tty/serial/ioc3_serial.c5
-rw-r--r--drivers/tty/serial/ioc4_serial.c5
-rw-r--r--drivers/tty/serial/ip22zilog.c1
-rw-r--r--drivers/tty/serial/jsm/jsm.h11
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c11
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c11
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c11
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c27
-rw-r--r--drivers/tty/serial/kgdb_nmi.c5
-rw-r--r--drivers/tty/serial/kgdboc.c8
-rw-r--r--drivers/tty/serial/lantiq.c14
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c11
-rw-r--r--drivers/tty/serial/m32r_sio.c14
-rw-r--r--drivers/tty/serial/m32r_sio_reg.h4
-rw-r--r--drivers/tty/serial/max3100.c16
-rw-r--r--drivers/tty/serial/max310x.c6
-rw-r--r--drivers/tty/serial/mcf.c6
-rw-r--r--drivers/tty/serial/men_z135_uart.c5
-rw-r--r--drivers/tty/serial/meson_uart.c31
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/mps2-uart.c5
-rw-r--r--drivers/tty/serial/mpsc.c6
-rw-r--r--drivers/tty/serial/msm_serial.c10
-rw-r--r--drivers/tty/serial/mux.c9
-rw-r--r--drivers/tty/serial/mvebu-uart.c495
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/netx-serial.c14
-rw-r--r--drivers/tty/serial/omap-serial.c23
-rw-r--r--drivers/tty/serial/owl-uart.c14
-rw-r--r--drivers/tty/serial/pch_uart.c14
-rw-r--r--drivers/tty/serial/pic32_uart.c3
-rw-r--r--drivers/tty/serial/pic32_uart.h3
-rw-r--r--drivers/tty/serial/pmac_zilog.c15
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c11
-rw-r--r--drivers/tty/serial/pxa.c6
-rw-r--r--drivers/tty/serial/rp2.c5
-rw-r--r--drivers/tty/serial/sa1100.c20
-rw-r--r--drivers/tty/serial/samsung.c5
-rw-r--r--drivers/tty/serial/samsung.h5
-rw-r--r--drivers/tty/serial/sb1250-duart.c6
-rw-r--r--drivers/tty/serial/sc16is7xx.c7
-rw-r--r--drivers/tty/serial/sccnxp.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c13
-rw-r--r--drivers/tty/serial/serial_core.c54
-rw-r--r--drivers/tty/serial/serial_ks8695.c7
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c11
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h12
-rw-r--r--drivers/tty/serial/serial_txx9.c5
-rw-r--r--drivers/tty/serial/sh-sci.c103
-rw-r--r--drivers/tty/serial/sh-sci.h3
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c3
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h3
-rw-r--r--drivers/tty/serial/sn_console.c23
-rw-r--r--drivers/tty/serial/sprd_serial.c10
-rw-r--r--drivers/tty/serial/st-asc.c7
-rw-r--r--drivers/tty/serial/stm32-usart.c5
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/suncore.c1
-rw-r--r--drivers/tty/serial/sunhv.c1
-rw-r--r--drivers/tty/serial/sunsab.c1
-rw-r--r--drivers/tty/serial/sunsu.c1
-rw-r--r--drivers/tty/serial/sunzilog.c1
-rw-r--r--drivers/tty/serial/tilegx.c11
-rw-r--r--drivers/tty/serial/timbuart.c14
-rw-r--r--drivers/tty/serial/timbuart.h14
-rw-r--r--drivers/tty/serial/uartlite.c7
-rw-r--r--drivers/tty/serial/ucc_uart.c6
-rw-r--r--drivers/tty/serial/vr41xx_siu.c15
-rw-r--r--drivers/tty/serial/vt8500_serial.c10
-rw-r--r--drivers/tty/serial/xilinx_uartps.c9
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/synclink.c6
-rw-r--r--drivers/tty/synclink_gt.c3
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/sysrq.c18
-rw-r--r--drivers/tty/tty_audit.c6
-rw-r--r--drivers/tty/tty_baudrate.c1
-rw-r--r--drivers/tty/tty_buffer.c3
-rw-r--r--drivers/tty/tty_io.c1
-rw-r--r--drivers/tty/tty_ioctl.c1
-rw-r--r--drivers/tty/tty_jobctrl.c1
-rw-r--r--drivers/tty/tty_ldisc.c1
-rw-r--r--drivers/tty/tty_ldsem.c4
-rw-r--r--drivers/tty/tty_port.c5
-rw-r--r--drivers/tty/vcc.c25
-rw-r--r--drivers/tty/vt/consolemap.c1
-rw-r--r--drivers/tty/vt/keyboard.c1
-rw-r--r--drivers/tty/vt/vt.c107
-rw-r--r--drivers/usb/Kconfig9
-rw-r--r--drivers/usb/atm/cxacru.c22
-rw-r--r--drivers/usb/atm/speedtch.c27
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/atm/usbatm.c16
-rw-r--r--drivers/usb/atm/usbatm.h16
-rw-r--r--drivers/usb/atm/xusbatm.c16
-rw-r--r--drivers/usb/c67x00/Makefile1
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c16
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c16
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h16
-rw-r--r--drivers/usb/c67x00/c67x00-ll-hpi.c16
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c18
-rw-r--r--drivers/usb/c67x00/c67x00.h16
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/bits.h5
-rw-r--r--drivers/usb/chipidea/ci.h5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c6
-rw-r--r--drivers/usb/chipidea/core.c5
-rw-r--r--drivers/usb/chipidea/host.c14
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/chipidea/otg.h5
-rw-r--r--drivers/usb/chipidea/otg_fsm.c5
-rw-r--r--drivers/usb/chipidea/otg_fsm.h5
-rw-r--r--drivers/usb/chipidea/udc.c9
-rw-r--r--drivers/usb/chipidea/udc.h5
-rw-r--r--drivers/usb/chipidea/ulpi.c10
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c8
-rw-r--r--drivers/usb/class/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c15
-rw-r--r--drivers/usb/class/cdc-wdm.c3
-rw-r--r--drivers/usb/class/usblp.c17
-rw-r--r--drivers/usb/class/usbtmc.c15
-rw-r--r--drivers/usb/common/common.c5
-rw-r--r--drivers/usb/common/led.c6
-rw-r--r--drivers/usb/common/ulpi.c5
-rw-r--r--drivers/usb/common/usb-otg-fsm.c15
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/config.c2
-rw-r--r--drivers/usb/core/devices.c15
-rw-r--r--drivers/usb/core/devio.c31
-rw-r--r--drivers/usb/core/driver.c16
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd-pci.c15
-rw-r--r--drivers/usb/core/hcd.c19
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/core/hub.h10
-rw-r--r--drivers/usb/core/ledtrig-usbport.c5
-rw-r--r--drivers/usb/core/message.c53
-rw-r--r--drivers/usb/core/notify.c2
-rw-r--r--drivers/usb/core/of.c13
-rw-r--r--drivers/usb/core/otg_whitelist.h6
-rw-r--r--drivers/usb/core/port.c11
-rw-r--r--drivers/usb/core/quirks.c16
-rw-r--r--drivers/usb/core/sysfs.c9
-rw-r--r--drivers/usb/core/urb.c33
-rw-r--r--drivers/usb/core/usb-acpi.c6
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/dwc2/core.c1
-rw-r--r--drivers/usb/dwc2/core.h5
-rw-r--r--drivers/usb/dwc2/core_intr.c1
-rw-r--r--drivers/usb/dwc2/debug.h10
-rw-r--r--drivers/usb/dwc2/debugfs.c10
-rw-r--r--drivers/usb/dwc2/gadget.c12
-rw-r--r--drivers/usb/dwc2/hcd.c13
-rw-r--r--drivers/usb/dwc2/hcd.h1
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c1
-rw-r--r--drivers/usb/dwc2/hcd_intr.c1
-rw-r--r--drivers/usb/dwc2/hcd_queue.c1
-rw-r--r--drivers/usb/dwc2/hw.h1
-rw-r--r--drivers/usb/dwc2/params.c15
-rw-r--r--drivers/usb/dwc2/pci.c1
-rw-r--r--drivers/usb/dwc2/platform.c1
-rw-r--r--drivers/usb/dwc3/core.c74
-rw-r--r--drivers/usb/dwc3/core.h12
-rw-r--r--drivers/usb/dwc3/debug.h10
-rw-r--r--drivers/usb/dwc3/debugfs.c10
-rw-r--r--drivers/usb/dwc3/drd.c13
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c10
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c10
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c39
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c39
-rw-r--r--drivers/usb/dwc3/dwc3-st.c6
-rw-r--r--drivers/usb/dwc3/ep0.c30
-rw-r--r--drivers/usb/dwc3/gadget.c72
-rw-r--r--drivers/usb/dwc3/gadget.h10
-rw-r--r--drivers/usb/dwc3/host.c10
-rw-r--r--drivers/usb/dwc3/io.h10
-rw-r--r--drivers/usb/dwc3/trace.c10
-rw-r--r--drivers/usb/dwc3/trace.h10
-rw-r--r--drivers/usb/dwc3/ulpi.c5
-rw-r--r--drivers/usb/early/Makefile1
-rw-r--r--drivers/usb/early/ehci-dbgp.c1
-rw-r--r--drivers/usb/early/xhci-dbc.c5
-rw-r--r--drivers/usb/early/xhci-dbc.h11
-rw-r--r--drivers/usb/gadget/composite.c13
-rw-r--r--drivers/usb/gadget/config.c6
-rw-r--r--drivers/usb/gadget/configfs.c11
-rw-r--r--drivers/usb/gadget/epautoconf.c6
-rw-r--r--drivers/usb/gadget/function/f_acm.c7
-rw-r--r--drivers/usb/gadget/function/f_ecm.c8
-rw-r--r--drivers/usb/gadget/function/f_eem.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c9
-rw-r--r--drivers/usb/gadget/function/f_hid.c8
-rw-r--r--drivers/usb/gadget/function/f_loopback.c8
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c5
-rw-r--r--drivers/usb/gadget/function/f_midi.c5
-rw-r--r--drivers/usb/gadget/function/f_ncm.c8
-rw-r--r--drivers/usb/gadget/function/f_obex.c8
-rw-r--r--drivers/usb/gadget/function/f_phonet.c9
-rw-r--r--drivers/usb/gadget/function/f_printer.c8
-rw-r--r--drivers/usb/gadget/function/f_rndis.c8
-rw-r--r--drivers/usb/gadget/function/f_serial.c7
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c8
-rw-r--r--drivers/usb/gadget/function/f_subset.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c5
-rw-r--r--drivers/usb/gadget/function/f_uac1.c8
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c5
-rw-r--r--drivers/usb/gadget/function/f_uac2.c8
-rw-r--r--drivers/usb/gadget/function/f_uvc.c6
-rw-r--r--drivers/usb/gadget/function/f_uvc.h6
-rw-r--r--drivers/usb/gadget/function/rndis.c5
-rw-r--r--drivers/usb/gadget/function/rndis.h5
-rw-r--r--drivers/usb/gadget/function/storage_common.c6
-rw-r--r--drivers/usb/gadget/function/u_audio.c11
-rw-r--r--drivers/usb/gadget/function/u_audio.h12
-rw-r--r--drivers/usb/gadget/function/u_ecm.h5
-rw-r--r--drivers/usb/gadget/function/u_eem.h5
-rw-r--r--drivers/usb/gadget/function/u_ether.c6
-rw-r--r--drivers/usb/gadget/function/u_ether.h6
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h5
-rw-r--r--drivers/usb/gadget/function/u_fs.h5
-rw-r--r--drivers/usb/gadget/function/u_gether.h5
-rw-r--r--drivers/usb/gadget/function/u_hid.h5
-rw-r--r--drivers/usb/gadget/function/u_midi.h5
-rw-r--r--drivers/usb/gadget/function/u_ncm.h5
-rw-r--r--drivers/usb/gadget/function/u_phonet.h5
-rw-r--r--drivers/usb/gadget/function/u_printer.h5
-rw-r--r--drivers/usb/gadget/function/u_rndis.h5
-rw-r--r--drivers/usb/gadget/function/u_serial.c6
-rw-r--r--drivers/usb/gadget/function/u_serial.h5
-rw-r--r--drivers/usb/gadget/function/u_tcm.h5
-rw-r--r--drivers/usb/gadget/function/u_uac1.h5
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.c3
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.h3
-rw-r--r--drivers/usb/gadget/function/u_uac2.h5
-rw-r--r--drivers/usb/gadget/function/u_uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc.h6
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c55
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h5
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c6
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c8
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.h7
-rw-r--r--drivers/usb/gadget/function/uvc_video.c6
-rw-r--r--drivers/usb/gadget/function/uvc_video.h5
-rw-r--r--drivers/usb/gadget/functions.c1
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c6
-rw-r--r--drivers/usb/gadget/legacy/audio.c3
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c6
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c1
-rw-r--r--drivers/usb/gadget/legacy/ether.c6
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c6
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c4
-rw-r--r--drivers/usb/gadget/legacy/hid.c6
-rw-r--r--drivers/usb/gadget/legacy/inode.c6
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c6
-rw-r--r--drivers/usb/gadget/legacy/multi.c6
-rw-r--r--drivers/usb/gadget/legacy/ncm.c6
-rw-r--r--drivers/usb/gadget/legacy/nokia.c5
-rw-r--r--drivers/usb/gadget/legacy/printer.c6
-rw-r--r--drivers/usb/gadget/legacy/serial.c5
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c6
-rw-r--r--drivers/usb/gadget/legacy/zero.c14
-rw-r--r--drivers/usb/gadget/u_f.c5
-rw-r--r--drivers/usb/gadget/u_f.h5
-rw-r--r--drivers/usb/gadget/u_os_desc.h5
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.h6
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c6
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c6
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h6
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h5
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c6
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c7
-rw-r--r--drivers/usb/gadget/udc/core.c15
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c82
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c5
-rw-r--r--drivers/usb/gadget/udc/fotg210.h6
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c6
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c6
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.h6
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c6
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h6
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c5
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.h5
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c19
-rw-r--r--drivers/usb/gadget/udc/goku_udc.h5
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c12
-rw-r--r--drivers/usb/gadget/udc/gr_udc.h6
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c15
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c9
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.h5
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h5
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c5
-rw-r--r--drivers/usb/gadget/udc/mv_udc.h6
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c15
-rw-r--r--drivers/usb/gadget/udc/net2272.h15
-rw-r--r--drivers/usb/gadget/udc/net2280.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.h6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c6
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c5
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c10
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h7
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c9
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.h5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c80
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c7
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c6
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h6
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c29
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c10
-rw-r--r--drivers/usb/gadget/udc/trace.c10
-rw-r--r--drivers/usb/gadget/udc/trace.h13
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c7
-rw-r--r--drivers/usb/gadget/usbstring.c6
-rw-r--r--drivers/usb/host/Kconfig16
-rw-r--r--drivers/usb/host/Makefile5
-rw-r--r--drivers/usb/host/bcma-hcd.c3
-rw-r--r--drivers/usb/host/ehci-atmel.c8
-rw-r--r--drivers/usb/host/ehci-dbg.c12
-rw-r--r--drivers/usb/host/ehci-exynos.c7
-rw-r--r--drivers/usb/host/ehci-fsl.c15
-rw-r--r--drivers/usb/host/ehci-fsl.h15
-rw-r--r--drivers/usb/host/ehci-grlib.c15
-rw-r--r--drivers/usb/host/ehci-hcd.c17
-rw-r--r--drivers/usb/host/ehci-hub.c15
-rw-r--r--drivers/usb/host/ehci-mem.c15
-rw-r--r--drivers/usb/host/ehci-msm.c265
-rw-r--r--drivers/usb/host/ehci-mv.c6
-rw-r--r--drivers/usb/host/ehci-mxc.c15
-rw-r--r--drivers/usb/host/ehci-omap.c16
-rw-r--r--drivers/usb/host/ehci-orion.c5
-rw-r--r--drivers/usb/host/ehci-pci.c15
-rw-r--r--drivers/usb/host/ehci-platform.c38
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c6
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c14
-rw-r--r--drivers/usb/host/ehci-q.c15
-rw-r--r--drivers/usb/host/ehci-sched.c15
-rw-r--r--drivers/usb/host/ehci-sh.c5
-rw-r--r--drivers/usb/host/ehci-spear.c5
-rw-r--r--drivers/usb/host/ehci-st.c5
-rw-r--r--drivers/usb/host/ehci-sysfs.c15
-rw-r--r--drivers/usb/host/ehci-tegra.c12
-rw-r--r--drivers/usb/host/ehci-tilegx.c11
-rw-r--r--drivers/usb/host/ehci-timer.c11
-rw-r--r--drivers/usb/host/ehci-w90x900.c6
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c16
-rw-r--r--drivers/usb/host/ehci.h15
-rw-r--r--drivers/usb/host/fhci-dbg.c6
-rw-r--r--drivers/usb/host/fhci-hcd.c6
-rw-r--r--drivers/usb/host/fhci-hub.c6
-rw-r--r--drivers/usb/host/fhci-mem.c6
-rw-r--r--drivers/usb/host/fhci-q.c6
-rw-r--r--drivers/usb/host/fhci-sched.c6
-rw-r--r--drivers/usb/host/fhci-tds.c6
-rw-r--r--drivers/usb/host/fhci.h6
-rw-r--r--drivers/usb/host/fotg210-hcd.c17
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c6
-rw-r--r--drivers/usb/host/hwa-hc.c16
-rw-r--r--drivers/usb/host/imx21-dbg.c15
-rw-r--r--drivers/usb/host/imx21-hcd.c15
-rw-r--r--drivers/usb/host/imx21-hcd.h15
-rw-r--r--drivers/usb/host/isp116x-hcd.c4
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/max3421-hcd.c81
-rw-r--r--drivers/usb/host/ohci-at91.c1
-rw-r--r--drivers/usb/host/ohci-da8xx.c5
-rw-r--r--drivers/usb/host/ohci-dbg.c1
-rw-r--r--drivers/usb/host/ohci-exynos.c7
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-hub.c1
-rw-r--r--drivers/usb/host/ohci-mem.c1
-rw-r--r--drivers/usb/host/ohci-nxp.c6
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-pci.c1
-rw-r--r--drivers/usb/host/ohci-platform.c40
-rw-r--r--drivers/usb/host/ohci-ppc-of.c1
-rw-r--r--drivers/usb/host/ohci-ps3.c14
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/ohci-q.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c1
-rw-r--r--drivers/usb/host/ohci-sa1111.c26
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-spear.c5
-rw-r--r--drivers/usb/host/ohci-st.c5
-rw-r--r--drivers/usb/host/ohci-tilegx.c11
-rw-r--r--drivers/usb/host/ohci-tmio.c5
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c17
-rw-r--r--drivers/usb/host/pci-quirks.c3
-rw-r--r--drivers/usb/host/r8a66597-hcd.c42
-rw-r--r--drivers/usb/host/r8a66597.h26
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/sl811_cs.c1
-rw-r--r--drivers/usb/host/ssb-hcd.c3
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-hcd.c1
-rw-r--r--drivers/usb/host/uhci-hcd.h4
-rw-r--r--drivers/usb/host/whci/asl.c13
-rw-r--r--drivers/usb/host/whci/debug.c13
-rw-r--r--drivers/usb/host/whci/hcd.c13
-rw-r--r--drivers/usb/host/whci/hw.c13
-rw-r--r--drivers/usb/host/whci/init.c13
-rw-r--r--drivers/usb/host/whci/int.c13
-rw-r--r--drivers/usb/host/whci/pzl.c13
-rw-r--r--drivers/usb/host/whci/qset.c13
-rw-r--r--drivers/usb/host/whci/whcd.h15
-rw-r--r--drivers/usb/host/whci/whci-hc.h15
-rw-r--r--drivers/usb/host/whci/wusb.c13
-rw-r--r--drivers/usb/host/xhci-dbg.c14
-rw-r--r--drivers/usb/host/xhci-debugfs.c523
-rw-r--r--drivers/usb/host/xhci-debugfs.h134
-rw-r--r--drivers/usb/host/xhci-ext-caps.h14
-rw-r--r--drivers/usb/host/xhci-hub.c20
-rw-r--r--drivers/usb/host/xhci-mem.c24
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c14
-rw-r--r--drivers/usb/host/xhci-mtk.c144
-rw-r--r--drivers/usb/host/xhci-mtk.h16
-rw-r--r--drivers/usb/host/xhci-mvebu.c5
-rw-r--r--drivers/usb/host/xhci-mvebu.h5
-rw-r--r--drivers/usb/host/xhci-pci.c14
-rw-r--r--drivers/usb/host/xhci-plat.c16
-rw-r--r--drivers/usb/host/xhci-plat.h5
-rw-r--r--drivers/usb/host/xhci-rcar.c5
-rw-r--r--drivers/usb/host/xhci-rcar.h5
-rw-r--r--drivers/usb/host/xhci-ring.c35
-rw-r--r--drivers/usb/host/xhci-tegra.c9
-rw-r--r--drivers/usb/host/xhci-trace.c5
-rw-r--r--drivers/usb/host/xhci-trace.h10
-rw-r--r--drivers/usb/host/xhci.c103
-rw-r--r--drivers/usb/host/xhci.h34
-rw-r--r--drivers/usb/image/Makefile1
-rw-r--r--drivers/usb/image/mdc800.c16
-rw-r--r--drivers/usb/image/microtek.c1
-rw-r--r--drivers/usb/isp1760/Makefile1
-rw-r--r--drivers/usb/isp1760/isp1760-core.c5
-rw-r--r--drivers/usb/isp1760/isp1760-core.h5
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c8
-rw-r--r--drivers/usb/isp1760/isp1760-regs.h5
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c12
-rw-r--r--drivers/usb/isp1760/isp1760-udc.h5
-rw-r--r--drivers/usb/misc/Kconfig4
-rw-r--r--drivers/usb/misc/adutux.c8
-rw-r--r--drivers/usb/misc/appledisplay.c15
-rw-r--r--drivers/usb/misc/chaoskey.c10
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c5
-rw-r--r--drivers/usb/misc/cytherm.c6
-rw-r--r--drivers/usb/misc/ehset.c10
-rw-r--r--drivers/usb/misc/emi26.c5
-rw-r--r--drivers/usb/misc/emi62.c5
-rw-r--r--drivers/usb/misc/ezusb.c5
-rw-r--r--drivers/usb/misc/ftdi-elan.c6
-rw-r--r--drivers/usb/misc/idmouse.c6
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/isight_firmware.c5
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/lvstest.c5
-rw-r--r--drivers/usb/misc/rio500.c15
-rw-r--r--drivers/usb/misc/rio500_usb.h19
-rw-r--r--drivers/usb/misc/sisusbvga/Makefile1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_struct.h1
-rw-r--r--drivers/usb/misc/trancevibrator.c15
-rw-r--r--drivers/usb/misc/usb251xb.c177
-rw-r--r--drivers/usb/misc/usb3503.c15
-rw-r--r--drivers/usb/misc/usb4604.c11
-rw-r--r--drivers/usb/misc/usb_u132.h6
-rw-r--r--drivers/usb/misc/usblcd.c1
-rw-r--r--drivers/usb/misc/usbsevseg.c6
-rw-r--r--drivers/usb/misc/usbtest.c11
-rw-r--r--drivers/usb/misc/uss720.c15
-rw-r--r--drivers/usb/misc/yurex.c6
-rw-r--r--drivers/usb/mon/Makefile1
-rw-r--r--drivers/usb/mon/mon_main.c1
-rw-r--r--drivers/usb/mtu3/mtu3.h59
-rw-r--r--drivers/usb/mtu3/mtu3_core.c72
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c72
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h17
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c14
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c27
-rw-r--r--drivers/usb/mtu3/mtu3_host.c89
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h24
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c176
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c113
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h11
-rw-r--r--drivers/usb/musb/am35x.c42
-rw-r--r--drivers/usb/musb/blackfin.c16
-rw-r--r--drivers/usb/musb/blackfin.h8
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/da8xx.c47
-rw-r--r--drivers/usb/musb/davinci.c38
-rw-r--r--drivers/usb/musb/davinci.h6
-rw-r--r--drivers/usb/musb/jz4740.c10
-rw-r--r--drivers/usb/musb/musb_am335x.c1
-rw-r--r--drivers/usb/musb/musb_core.c34
-rw-r--r--drivers/usb/musb/musb_core.h28
-rw-r--r--drivers/usb/musb/musb_cppi41.c1
-rw-r--r--drivers/usb/musb/musb_debug.h27
-rw-r--r--drivers/usb/musb/musb_debugfs.c27
-rw-r--r--drivers/usb/musb/musb_dma.h27
-rw-r--r--drivers/usb/musb/musb_dsps.c35
-rw-r--r--drivers/usb/musb/musb_gadget.c27
-rw-r--r--drivers/usb/musb/musb_gadget.h27
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c27
-rw-r--r--drivers/usb/musb/musb_host.c27
-rw-r--r--drivers/usb/musb/musb_host.h27
-rw-r--r--drivers/usb/musb/musb_io.h27
-rw-r--r--drivers/usb/musb/musb_regs.h27
-rw-r--r--drivers/usb/musb/musb_trace.c10
-rw-r--r--drivers/usb/musb/musb_trace.h10
-rw-r--r--drivers/usb/musb/musb_virthub.c27
-rw-r--r--drivers/usb/musb/musbhsdma.c27
-rw-r--r--drivers/usb/musb/musbhsdma.h27
-rw-r--r--drivers/usb/musb/omap2430.c18
-rw-r--r--drivers/usb/musb/omap2430.h6
-rw-r--r--drivers/usb/musb/sunxi.c11
-rw-r--r--drivers/usb/musb/tusb6010.c25
-rw-r--r--drivers/usb/musb/tusb6010.h5
-rw-r--r--drivers/usb/musb/tusb6010_omap.c5
-rw-r--r--drivers/usb/musb/ux500.c15
-rw-r--r--drivers/usb/musb/ux500_dma.c14
-rw-r--r--drivers/usb/phy/Kconfig29
-rw-r--r--drivers/usb/phy/Makefile2
-rw-r--r--drivers/usb/phy/of.c6
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c16
-rw-r--r--drivers/usb/phy/phy-am335x-control.c1
-rw-r--r--drivers/usb/phy/phy-am335x.c1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c15
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h18
-rw-r--r--drivers/usb/phy/phy-generic.c20
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c5
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c26
-rw-r--r--drivers/usb/phy/phy-isp1301.c5
-rw-r--r--drivers/usb/phy/phy-keystone.c11
-rw-r--r--drivers/usb/phy/phy-msm-usb.c2085
-rw-r--r--drivers/usb/phy/phy-mv-usb.c23
-rw-r--r--drivers/usb/phy/phy-mv-usb.h6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c162
-rw-r--r--drivers/usb/phy/phy-omap-otg.c10
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c366
-rw-r--r--drivers/usb/phy/phy-tahvo.c15
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c11
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c15
-rw-r--r--drivers/usb/phy/phy-ulpi-viewport.c11
-rw-r--r--drivers/usb/phy/phy-ulpi.c15
-rw-r--r--drivers/usb/phy/phy.c6
-rw-r--r--drivers/usb/renesas_usbhs/common.c39
-rw-r--r--drivers/usb/renesas_usbhs/common.h11
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c11
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h11
-rw-r--r--drivers/usb/renesas_usbhs/mod.c11
-rw-r--r--drivers/usb/renesas_usbhs/mod.h11
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c11
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c11
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c11
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h11
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.h1
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c54
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h2
-rw-r--r--drivers/usb/serial/Makefile-keyspan_pda_fw1
-rw-r--r--drivers/usb/serial/aircable.c7
-rw-r--r--drivers/usb/serial/ark3116.c6
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/belkin_sa.h6
-rw-r--r--drivers/usb/serial/bus.c5
-rw-r--r--drivers/usb/serial/ch341.c7
-rw-r--r--drivers/usb/serial/console.c5
-rw-r--r--drivers/usb/serial/cp210x.c7
-rw-r--r--drivers/usb/serial/cyberjack.c6
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c6
-rw-r--r--drivers/usb/serial/empeg.c7
-rw-r--r--drivers/usb/serial/f81232.c6
-rw-r--r--drivers/usb/serial/f81534.c96
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/garmin_gps.c82
-rw-r--r--drivers/usb/serial/generic.c5
-rw-r--r--drivers/usb/serial/io_16654.h5
-rw-r--r--drivers/usb/serial/io_edgeport.c6
-rw-r--r--drivers/usb/serial/io_edgeport.h7
-rw-r--r--drivers/usb/serial/io_ionsp.h6
-rw-r--r--drivers/usb/serial/io_ti.c6
-rw-r--r--drivers/usb/serial/io_ti.h7
-rw-r--r--drivers/usb/serial/io_usbvend.h5
-rw-r--r--drivers/usb/serial/ipaq.c6
-rw-r--r--drivers/usb/serial/ipw.c6
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/iuu_phoenix.c7
-rw-r--r--drivers/usb/serial/iuu_phoenix.h7
-rw-r--r--drivers/usb/serial/keyspan.c6
-rw-r--r--drivers/usb/serial/keyspan_pda.c6
-rw-r--r--drivers/usb/serial/kl5kusb105.c6
-rw-r--r--drivers/usb/serial/kobil_sct.c7
-rw-r--r--drivers/usb/serial/mct_u232.c6
-rw-r--r--drivers/usb/serial/mct_u232.h6
-rw-r--r--drivers/usb/serial/metro-usb.c44
-rw-r--r--drivers/usb/serial/mos7720.c7
-rw-r--r--drivers/usb/serial/mos7840.c15
-rw-r--r--drivers/usb/serial/mxuport.c6
-rw-r--r--drivers/usb/serial/navman.c7
-rw-r--r--drivers/usb/serial/omninet.c7
-rw-r--r--drivers/usb/serial/opticon.c7
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/oti6858.c7
-rw-r--r--drivers/usb/serial/oti6858.h6
-rw-r--r--drivers/usb/serial/pl2303.c7
-rw-r--r--drivers/usb/serial/pl2303.h7
-rw-r--r--drivers/usb/serial/qcaux.c7
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/usb/serial/quatech2.c8
-rw-r--r--drivers/usb/serial/safe_serial.c6
-rw-r--r--drivers/usb/serial/sierra.c7
-rw-r--r--drivers/usb/serial/spcp8x5.c6
-rw-r--r--drivers/usb/serial/ssu100.c3
-rw-r--r--drivers/usb/serial/symbolserial.c7
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/upd78f0730.c5
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/serial/usb-serial.c29
-rw-r--r--drivers/usb/serial/usb_debug.c11
-rw-r--r--drivers/usb/serial/usb_wwan.c7
-rw-r--r--drivers/usb/serial/visor.c7
-rw-r--r--drivers/usb/serial/visor.h6
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/serial/whiteheat.h6
-rw-r--r--drivers/usb/serial/wishbone-serial.c6
-rw-r--r--drivers/usb/serial/xsens_mt.c7
-rw-r--r--drivers/usb/storage/alauda.c15
-rw-r--r--drivers/usb/storage/cypress_atacb.c15
-rw-r--r--drivers/usb/storage/datafab.c15
-rw-r--r--drivers/usb/storage/debug.c15
-rw-r--r--drivers/usb/storage/debug.h15
-rw-r--r--drivers/usb/storage/ene_ub6250.c17
-rw-r--r--drivers/usb/storage/freecom.c15
-rw-r--r--drivers/usb/storage/initializers.c15
-rw-r--r--drivers/usb/storage/initializers.h15
-rw-r--r--drivers/usb/storage/isd200.c15
-rw-r--r--drivers/usb/storage/jumpshot.c15
-rw-r--r--drivers/usb/storage/karma.c15
-rw-r--r--drivers/usb/storage/onetouch.c19
-rw-r--r--drivers/usb/storage/option_ms.c19
-rw-r--r--drivers/usb/storage/protocol.c15
-rw-r--r--drivers/usb/storage/protocol.h15
-rw-r--r--drivers/usb/storage/realtek_cr.c14
-rw-r--r--drivers/usb/storage/scsiglue.c15
-rw-r--r--drivers/usb/storage/scsiglue.h15
-rw-r--r--drivers/usb/storage/sddr09.c15
-rw-r--r--drivers/usb/storage/sddr55.c16
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/transport.c15
-rw-r--r--drivers/usb/storage/transport.h15
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_alauda.h15
-rw-r--r--drivers/usb/storage/unusual_cypress.h15
-rw-r--r--drivers/usb/storage/unusual_datafab.h15
-rw-r--r--drivers/usb/storage/unusual_devs.h15
-rw-r--r--drivers/usb/storage/unusual_ene_ub6250.h18
-rw-r--r--drivers/usb/storage/unusual_freecom.h15
-rw-r--r--drivers/usb/storage/unusual_isd200.h15
-rw-r--r--drivers/usb/storage/unusual_jumpshot.h15
-rw-r--r--drivers/usb/storage/unusual_karma.h15
-rw-r--r--drivers/usb/storage/unusual_onetouch.h15
-rw-r--r--drivers/usb/storage/unusual_realtek.h14
-rw-r--r--drivers/usb/storage/unusual_sddr09.h15
-rw-r--r--drivers/usb/storage/unusual_sddr55.h15
-rw-r--r--drivers/usb/storage/unusual_uas.h15
-rw-r--r--drivers/usb/storage/unusual_usbat.h15
-rw-r--r--drivers/usb/storage/usb.c49
-rw-r--r--drivers/usb/storage/usb.h15
-rw-r--r--drivers/usb/storage/usual-tables.c15
-rw-r--r--drivers/usb/typec/Kconfig25
-rw-r--r--drivers/usb/typec/Makefile4
-rw-r--r--drivers/usb/typec/fusb302/Kconfig (renamed from drivers/staging/typec/fusb302/Kconfig)0
-rw-r--r--drivers/usb/typec/fusb302/Makefile (renamed from drivers/staging/typec/fusb302/Makefile)1
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c (renamed from drivers/staging/typec/fusb302/fusb302.c)15
-rw-r--r--drivers/usb/typec/fusb302/fusb302_reg.h (renamed from drivers/staging/typec/fusb302/fusb302_reg.h)11
-rw-r--r--drivers/usb/typec/tcpm.c (renamed from drivers/staging/typec/tcpm.c)60
-rw-r--r--drivers/usb/typec/tps6598x.c473
-rw-r--r--drivers/usb/typec/typec.c5
-rw-r--r--drivers/usb/typec/typec_wcove.c599
-rw-r--r--drivers/usb/typec/ucsi/trace.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c5
-rw-r--r--drivers/usb/usb-skeleton.c6
-rw-r--r--drivers/usb/usbip/stub.h16
-rw-r--r--drivers/usb/usbip/stub_dev.c16
-rw-r--r--drivers/usb/usbip/stub_main.c16
-rw-r--r--drivers/usb/usbip/stub_rx.c16
-rw-r--r--drivers/usb/usbip/stub_tx.c16
-rw-r--r--drivers/usb/usbip/usbip_common.c16
-rw-r--r--drivers/usb/usbip/usbip_common.h16
-rw-r--r--drivers/usb/usbip/usbip_event.c16
-rw-r--r--drivers/usb/usbip/vhci.h7
-rw-r--r--drivers/usb/usbip/vhci_hcd.c16
-rw-r--r--drivers/usb/usbip/vhci_rx.c16
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c16
-rw-r--r--drivers/usb/usbip/vhci_tx.c16
-rw-r--r--drivers/usb/usbip/vudc.h16
-rw-r--r--drivers/usb/usbip/vudc_dev.c23
-rw-r--r--drivers/usb/usbip/vudc_main.c14
-rw-r--r--drivers/usb/usbip/vudc_rx.c14
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c16
-rw-r--r--drivers/usb/usbip/vudc_transfer.c20
-rw-r--r--drivers/usb/usbip/vudc_tx.c14
-rw-r--r--drivers/usb/wusbcore/cbaf.c16
-rw-r--r--drivers/usb/wusbcore/crypto.c16
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c16
-rw-r--r--drivers/usb/wusbcore/devconnect.c16
-rw-r--r--drivers/usb/wusbcore/mmc.c16
-rw-r--r--drivers/usb/wusbcore/pal.c13
-rw-r--r--drivers/usb/wusbcore/reservation.c13
-rw-r--r--drivers/usb/wusbcore/rh.c16
-rw-r--r--drivers/usb/wusbcore/security.c25
-rw-r--r--drivers/usb/wusbcore/wa-hc.c16
-rw-r--r--drivers/usb/wusbcore/wa-hc.h16
-rw-r--r--drivers/usb/wusbcore/wa-nep.c16
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c16
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c17
-rw-r--r--drivers/usb/wusbcore/wusbhc.c16
-rw-r--r--drivers/usb/wusbcore/wusbhc.h16
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c31
-rw-r--r--drivers/vfio/platform/reset/Kconfig9
-rw-r--r--drivers/vfio/platform/reset/Makefile1
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c113
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c3
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/video/backlight/ili922x.c3
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/tps65217_bl.c17
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c73
-rw-r--r--drivers/vme/bridges/vme_fake.c35
-rw-r--r--drivers/vme/bridges/vme_tsi148.c83
-rw-r--r--drivers/vme/vme.c214
-rw-r--r--drivers/w1/slaves/Kconfig15
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c771
-rw-r--r--drivers/w1/slaves/w1_therm.c59
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--fs/afs/write.c17
-rw-r--r--fs/aio.c2
-rw-r--r--fs/binfmt_elf.c10
-rw-r--r--fs/block_dev.c20
-rw-r--r--fs/btrfs/Kconfig11
-rw-r--r--fs/btrfs/Makefile3
-rw-r--r--fs/btrfs/async-thread.c2
-rw-r--r--fs/btrfs/backref.c72
-rw-r--r--fs/btrfs/backref.h8
-rw-r--r--fs/btrfs/btrfs_inode.h29
-rw-r--r--fs/btrfs/check-integrity.c8
-rw-r--r--fs/btrfs/compression.c493
-rw-r--r--fs/btrfs/compression.h6
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h30
-rw-r--r--fs/btrfs/delayed-inode.c46
-rw-r--r--fs/btrfs/delayed-ref.c296
-rw-r--r--fs/btrfs/delayed-ref.h54
-rw-r--r--fs/btrfs/disk-io.c227
-rw-r--r--fs/btrfs/extent-tree.c829
-rw-r--r--fs/btrfs/extent_io.c67
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/file.c50
-rw-r--r--fs/btrfs/free-space-tree.c4
-rw-r--r--fs/btrfs/inode-map.c3
-rw-r--r--fs/btrfs/inode.c327
-rw-r--r--fs/btrfs/ioctl.c156
-rw-r--r--fs/btrfs/lzo.c5
-rw-r--r--fs/btrfs/ordered-data.c21
-rw-r--r--fs/btrfs/qgroup.c8
-rw-r--r--fs/btrfs/raid56.c30
-rw-r--r--fs/btrfs/ref-verify.c1031
-rw-r--r--fs/btrfs/ref-verify.h62
-rw-r--r--fs/btrfs/relocation.c17
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/scrub.c22
-rw-r--r--fs/btrfs/send.c74
-rw-r--r--fs/btrfs/send.h2
-rw-r--r--fs/btrfs/super.c37
-rw-r--r--fs/btrfs/sysfs.c63
-rw-r--r--fs/btrfs/sysfs.h26
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c3
-rw-r--r--fs/btrfs/tests/inode-tests.c20
-rw-r--r--fs/btrfs/tests/qgroup-tests.c30
-rw-r--r--fs/btrfs/transaction.c16
-rw-r--r--fs/btrfs/tree-checker.c425
-rw-r--r--fs/btrfs/tree-checker.h26
-rw-r--r--fs/btrfs/tree-log.c34
-rw-r--r--fs/btrfs/volumes.c168
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/btrfs/zlib.c15
-rw-r--r--fs/btrfs/zstd.c5
-rw-r--r--fs/buffer.c81
-rw-r--r--fs/cachefiles/rdwr.c10
-rw-r--r--fs/ceph/addr.c30
-rw-r--r--fs/cifs/file.c21
-rw-r--r--fs/cifs/smb2ops.c30
-rw-r--r--fs/configfs/dir.c10
-rw-r--r--fs/configfs/file.c12
-rw-r--r--fs/configfs/item.c6
-rw-r--r--fs/configfs/symlink.c4
-rw-r--r--fs/crypto/Makefile2
-rw-r--r--fs/crypto/crypto.c37
-rw-r--r--fs/crypto/fname.c39
-rw-r--r--fs/crypto/fscrypt_private.h13
-rw-r--r--fs/crypto/hooks.c112
-rw-r--r--fs/crypto/keyinfo.c25
-rw-r--r--fs/crypto/policy.c6
-rw-r--r--fs/dax.c15
-rw-r--r--fs/dcache.c24
-rw-r--r--fs/debugfs/file.c216
-rw-r--r--fs/debugfs/inode.c62
-rw-r--r--fs/debugfs/internal.h20
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/dlm/ast.c2
-rw-r--r--fs/dlm/config.c16
-rw-r--r--fs/dlm/lock.c43
-rw-r--r--fs/dlm/lowcomms.c218
-rw-r--r--fs/dlm/rcom.c26
-rw-r--r--fs/dlm/rcom.h1
-rw-r--r--fs/dlm/recover.c4
-rw-r--r--fs/dlm/recoverd.c16
-rw-r--r--fs/ecryptfs/main.c2
-rw-r--r--fs/exec.c2
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext2/super.c161
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/balloc.c15
-rw-r--r--fs/ext4/ext4.h58
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/file.c286
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/inline.c43
-rw-r--r--fs/ext4/inode.c190
-rw-r--r--fs/ext4/ioctl.c30
-rw-r--r--fs/ext4/mballoc.c28
-rw-r--r--fs/ext4/namei.c62
-rw-r--r--fs/ext4/resize.c104
-rw-r--r--fs/ext4/super.c42
-rw-r--r--fs/f2fs/checkpoint.c15
-rw-r--r--fs/f2fs/data.c11
-rw-r--r--fs/f2fs/f2fs.h9
-rw-r--r--fs/f2fs/file.c13
-rw-r--r--fs/f2fs/inode.c5
-rw-r--r--fs/f2fs/node.c73
-rw-r--r--fs/f2fs/super.c7
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/fs-writeback.c153
-rw-r--r--fs/fs_pin.c4
-rw-r--r--fs/fscache/page.c2
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/inode.c10
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/acl.c1
-rw-r--r--fs/gfs2/aops.c22
-rw-r--r--fs/gfs2/bmap.c322
-rw-r--r--fs/gfs2/bmap.h4
-rw-r--r--fs/gfs2/file.c124
-rw-r--r--fs/gfs2/inode.c89
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/super.c5
-rw-r--r--fs/gfs2/trace_gfs2.h65
-rw-r--r--fs/gfs2/trans.c2
-rw-r--r--fs/gfs2/xattr.c63
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/inode.c2
-rw-r--r--fs/iomap.c15
-rw-r--r--fs/isofs/isofs.h22
-rw-r--r--fs/isofs/rock.h64
-rw-r--r--fs/isofs/util.c2
-rw-r--r--fs/jbd2/journal.c9
-rw-r--r--fs/jfs/jfs_metapage.c2
-rw-r--r--fs/jfs/super.c1
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/namei.c4
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/dir.c9
-rw-r--r--fs/nfs/dir.c8
-rw-r--r--fs/nfsd/blocklayout.c4
-rw-r--r--fs/nilfs2/btree.c6
-rw-r--r--fs/nilfs2/page.c15
-rw-r--r--fs/nilfs2/segment.c16
-rw-r--r--fs/notify/dnotify/dnotify.c7
-rw-r--r--fs/notify/fanotify/Kconfig2
-rw-r--r--fs/notify/fanotify/fanotify.c57
-rw-r--r--fs/notify/fanotify/fanotify.h8
-rw-r--r--fs/notify/fanotify/fanotify_user.c59
-rw-r--r--fs/notify/fdinfo.c3
-rw-r--r--fs/notify/fsnotify.c101
-rw-r--r--fs/notify/group.c6
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/notify/mark.c121
-rw-r--r--fs/ntfs/aops.c2
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c26
-rw-r--r--fs/ocfs2/buffer_head_io.h3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.h2
-rw-r--r--fs/ocfs2/cluster/nodemanager.c71
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c1
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c5
-rw-r--r--fs/ocfs2/file.c9
-rw-r--r--fs/ocfs2/suballoc.c5
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/ocfs2/super.h3
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/readdir.c2
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/proc/proc_tty.c3
-rw-r--r--fs/proc/task_mmu.c8
-rw-r--r--fs/proc_namespace.c2
-rw-r--r--fs/quota/dquot.c13
-rw-r--r--fs/readdir.c11
-rw-r--r--fs/splice.c2
-rw-r--r--fs/sync.c2
-rw-r--r--fs/ubifs/crypto.c1
-rw-r--r--fs/ubifs/ioctl.c5
-rw-r--r--fs/ubifs/super.c8
-rw-r--r--fs/ubifs/ubifs.h18
-rw-r--r--fs/ubifs/xattr.c1
-rw-r--r--fs/udf/balloc.c29
-rw-r--r--fs/udf/dir.c2
-rw-r--r--fs/udf/directory.c9
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c50
-rw-r--r--fs/udf/misc.c8
-rw-r--r--fs/udf/namei.c13
-rw-r--r--fs/udf/partition.c6
-rw-r--r--fs/udf/super.c56
-rw-r--r--fs/udf/truncate.c2
-rw-r--r--fs/udf/udfdecl.h21
-rw-r--r--fs/udf/unicode.c2
-rw-r--r--fs/userfaultfd.c10
-rw-r--r--fs/xfs/Kconfig17
-rw-r--r--fs/xfs/Makefile29
-rw-r--r--fs/xfs/kmem.h5
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c1
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c50
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h4
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c2061
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h66
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c250
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.h22
-rw-r--r--fs/xfs/libxfs/xfs_btree.c259
-rw-r--r--fs/xfs/libxfs/xfs_btree.h32
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c22
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c24
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h17
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h106
-rw-r--r--fs/xfs/libxfs/xfs_format.h37
-rw-r--r--fs/xfs/libxfs/xfs_fs.h77
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c91
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h7
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c1043
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c1
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c1333
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h138
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h24
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c1
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c1
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c13
-rw-r--r--fs/xfs/libxfs/xfs_types.h22
-rw-r--r--fs/xfs/scrub/agheader.c658
-rw-r--r--fs/xfs/scrub/alloc.c102
-rw-r--r--fs/xfs/scrub/attr.c471
-rw-r--r--fs/xfs/scrub/bmap.c363
-rw-r--r--fs/xfs/scrub/btree.c516
-rw-r--r--fs/xfs/scrub/btree.h57
-rw-r--r--fs/xfs/scrub/common.c574
-rw-r--r--fs/xfs/scrub/common.h144
-rw-r--r--fs/xfs/scrub/dabtree.c591
-rw-r--r--fs/xfs/scrub/dabtree.h59
-rw-r--r--fs/xfs/scrub/dir.c816
-rw-r--r--fs/xfs/scrub/ialloc.c337
-rw-r--r--fs/xfs/scrub/inode.c611
-rw-r--r--fs/xfs/scrub/parent.c317
-rw-r--r--fs/xfs/scrub/quota.c304
-rw-r--r--fs/xfs/scrub/refcount.c99
-rw-r--r--fs/xfs/scrub/rmap.c138
-rw-r--r--fs/xfs/scrub/rtbitmap.c108
-rw-r--r--fs/xfs/scrub/scrub.c392
-rw-r--r--fs/xfs/scrub/scrub.h115
-rw-r--r--fs/xfs/scrub/symlink.c92
-rw-r--r--fs/xfs/scrub/trace.c59
-rw-r--r--fs/xfs/scrub/trace.h499
-rw-r--r--fs/xfs/scrub/xfs_scrub.h29
-rw-r--r--fs/xfs/xfs.h1
-rw-r--r--fs/xfs/xfs_attr.h5
-rw-r--r--fs/xfs/xfs_attr_inactive.c69
-rw-r--r--fs/xfs/xfs_attr_list.c161
-rw-r--r--fs/xfs/xfs_bmap_util.c746
-rw-r--r--fs/xfs/xfs_bmap_util.h10
-rw-r--r--fs/xfs/xfs_buf.c16
-rw-r--r--fs/xfs/xfs_buf.h5
-rw-r--r--fs/xfs/xfs_dir2_readdir.c10
-rw-r--r--fs/xfs/xfs_dquot.c21
-rw-r--r--fs/xfs/xfs_error.c6
-rw-r--r--fs/xfs/xfs_error.h81
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_inode.c33
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_inode_item.c29
-rw-r--r--fs/xfs/xfs_inode_item.h2
-rw-r--r--fs/xfs/xfs_ioctl.c158
-rw-r--r--fs/xfs/xfs_ioctl.h4
-rw-r--r--fs/xfs/xfs_ioctl32.c1
-rw-r--r--fs/xfs/xfs_iomap.c21
-rw-r--r--fs/xfs/xfs_iops.c52
-rw-r--r--fs/xfs/xfs_itable.c13
-rw-r--r--fs/xfs/xfs_itable.h2
-rw-r--r--fs/xfs/xfs_linux.h21
-rw-r--r--fs/xfs/xfs_log.c33
-rw-r--r--fs/xfs/xfs_log_priv.h4
-rw-r--r--fs/xfs/xfs_log_recover.c62
-rw-r--r--fs/xfs/xfs_mount.c15
-rw-r--r--fs/xfs/xfs_reflink.c108
-rw-r--r--fs/xfs/xfs_rtalloc.h2
-rw-r--r--fs/xfs/xfs_trace.h64
-rw-r--r--fs/xfs/xfs_trans_ail.c22
-rw-r--r--include/acpi/acexcep.h16
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl1.h30
-rw-r--r--include/acpi/apei.h1
-rw-r--r--include/acpi/pcc.h1
-rw-r--r--include/asm-generic/atomic-long.h3
-rw-r--r--include/asm-generic/audit_dir_write.h3
-rw-r--r--include/asm-generic/audit_write.h3
-rw-r--r--include/asm-generic/div64.h14
-rw-r--r--include/asm-generic/qrwlock.h57
-rw-r--r--include/asm-generic/qrwlock_types.h15
-rw-r--r--include/asm-generic/qspinlock.h1
-rw-r--r--include/asm-generic/rwsem.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h22
-rw-r--r--include/clocksource/arm_arch_timer.h10
-rw-r--r--include/crypto/dh.h2
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/ecdh.h2
-rw-r--r--include/crypto/gcm.h8
-rw-r--r--include/crypto/gf128mul.h2
-rw-r--r--include/crypto/hash.h28
-rw-r--r--include/crypto/if_alg.h15
-rw-r--r--include/crypto/sm3.h40
-rw-r--r--include/crypto/sm3_base.h117
-rw-r--r--include/drm/amd_asic_type.h (renamed from drivers/gpu/drm/radeon/radeon_kfd.h)51
-rw-r--r--include/drm/bridge/mhl.h4
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_atomic.h188
-rw-r--r--include/drm/drm_auth.h21
-rw-r--r--include/drm/drm_bridge.h2
-rw-r--r--include/drm/drm_connector.h14
-rw-r--r--include/drm/drm_crtc.h37
-rw-r--r--include/drm/drm_dp_helper.h23
-rw-r--r--include/drm/drm_dp_mst_helper.h2
-rw-r--r--include/drm/drm_drv.h7
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_encoder.h4
-rw-r--r--include/drm/drm_fb_cma_helper.h13
-rw-r--r--include/drm/drm_framebuffer.h1
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h2
-rw-r--r--include/drm/drm_lease.h46
-rw-r--r--include/drm/drm_mode_config.h13
-rw-r--r--include/drm/drm_mode_object.h5
-rw-r--r--include/drm/drm_modeset_helper_vtables.h2
-rw-r--r--include/drm/drm_modeset_lock.h12
-rw-r--r--include/drm/drm_of.h31
-rw-r--r--include/drm/drm_plane.h20
-rw-r--r--include/drm/drm_property.h6
-rw-r--r--include/drm/drm_syncobj.h5
-rw-r--r--include/drm/drm_vblank.h25
-rw-r--r--include/drm/i915_pciids.h154
-rw-r--r--include/drm/ttm/ttm_debug.h31
-rw-r--r--include/drm/ttm/ttm_memory.h5
-rw-r--r--include/drm/ttm/ttm_page_alloc.h33
-rw-r--r--include/dt-bindings/gpio/gpio.h2
-rw-r--r--include/dt-bindings/gpio/meson-gxbb-gpio.h2
-rw-r--r--include/dt-bindings/gpio/meson-gxl-gpio.h2
-rw-r--r--include/dt-bindings/gpio/uniphier-gpio.h18
-rw-r--r--include/dt-bindings/msm/msm-bus-ids.h887
-rw-r--r--include/dt-bindings/phy/phy.h1
-rw-r--r--include/lib/libgcc.h43
-rw-r--r--include/linux/acpi.h30
-rw-r--r--include/linux/acpi_iort.h4
-rw-r--r--include/linux/arch_topology.h17
-rw-r--r--include/linux/atomic.h4
-rw-r--r--include/linux/audit.h18
-rw-r--r--include/linux/average.h10
-rw-r--r--include/linux/backing-dev-defs.h24
-rw-r--r--include/linux/backing-dev.h14
-rw-r--r--include/linux/bio.h25
-rw-r--r--include/linux/bitmap.h114
-rw-r--r--include/linux/bitops.h28
-rw-r--r--include/linux/blk-cgroup.h25
-rw-r--r--include/linux/blk-mq.h40
-rw-r--r--include/linux/blk_types.h16
-rw-r--r--include/linux/blkdev.h36
-rw-r--r--include/linux/bootmem.h27
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/c2port.h4
-rw-r--r--include/linux/cgroup-defs.h59
-rw-r--r--include/linux/cgroup.h58
-rw-r--r--include/linux/compiler-clang.h2
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/compiler.h286
-rw-r--r--include/linux/compiler_types.h274
-rw-r--r--include/linux/completion.h18
-rw-r--r--include/linux/configfs.h8
-rw-r--r--include/linux/cpu.h30
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/cpuhotplug.h4
-rw-r--r--include/linux/cpumask.h16
-rw-r--r--include/linux/crypto.h40
-rw-r--r--include/linux/cyclades.h3
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/debugfs.h38
-rw-r--r--include/linux/devfreq.h16
-rw-r--r--include/linux/device.h19
-rw-r--r--include/linux/dma-fence.h7
-rw-r--r--include/linux/dma-mapping.h21
-rw-r--r--include/linux/dma/xilinx_dma.h14
-rw-r--r--include/linux/dmaengine.h30
-rw-r--r--include/linux/dmar.h1
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/extcon-provider.h142
-rw-r--r--include/linux/extcon.h109
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/freezer.h2
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/fscrypt.h294
-rw-r--r--include/linux/fscrypt_common.h142
-rw-r--r--include/linux/fscrypt_notsupp.h39
-rw-r--r--include/linux/fscrypt_supp.h17
-rw-r--r--include/linux/fsnotify_backend.h8
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/genetlink.h2
-rw-r--r--include/linux/genhd.h26
-rw-r--r--include/linux/gfp.h18
-rw-r--r--include/linux/gpio-fan.h36
-rw-r--r--include/linux/gpio/consumer.h49
-rw-r--r--include/linux/gpio/driver.h215
-rw-r--r--include/linux/gpio/machine.h2
-rw-r--r--include/linux/gpio_mouse.h61
-rw-r--r--include/linux/hid-sensor-hub.h1
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/hmm.h4
-rw-r--r--include/linux/host1x.h2
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hyperv.h10
-rw-r--r--include/linux/hypervisor.h8
-rw-r--r--include/linux/i2c-gpio.h4
-rw-r--r--include/linux/i2c-smbus.h10
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/if_team.h2
-rw-r--r--include/linux/iio/common/st_sensors.h35
-rw-r--r--include/linux/iio/iio.h35
-rw-r--r--include/linux/iio/sw_device.h2
-rw-r--r--include/linux/iio/sw_trigger.h2
-rw-r--r--include/linux/iio/trigger.h21
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/interrupt.h15
-rw-r--r--include/linux/iomap.h18
-rw-r--r--include/linux/ioport.h7
-rw-r--r--include/linux/ioprio.h3
-rw-r--r--include/linux/iova.h14
-rw-r--r--include/linux/ipmi.h8
-rw-r--r--include/linux/ipmi_smi.h27
-rw-r--r--include/linux/irq.h22
-rw-r--r--include/linux/irq_work.h3
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqchip/arm-gic-v4.h9
-rw-r--r--include/linux/irqchip/irq-omap-intc.h2
-rw-r--r--include/linux/irqdesc.h9
-rw-r--r--include/linux/irqdomain.h20
-rw-r--r--include/linux/jump_label.h14
-rw-r--r--include/linux/jump_label_ratelimit.h6
-rw-r--r--include/linux/kallsyms.h14
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kfifo.h6
-rw-r--r--include/linux/kmemcheck.h171
-rw-r--r--include/linux/kmemleak.h8
-rw-r--r--include/linux/kprobes.h36
-rw-r--r--include/linux/kthread.h11
-rw-r--r--include/linux/leds.h18
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lightnvm.h11
-rw-r--r--include/linux/linkage.h2
-rw-r--r--include/linux/livepatch.h34
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lockdep.h20
-rw-r--r--include/linux/log2.h42
-rw-r--r--include/linux/math64.h27
-rw-r--r--include/linux/mem_encrypt.h7
-rw-r--r--include/linux/memblock.h24
-rw-r--r--include/linux/mfd/arizona/pdata.h3
-rw-r--r--include/linux/mfd/axp20x.h3
-rw-r--r--include/linux/mfd/max77843-private.h5
-rw-r--r--include/linux/mfd/palmas.h2
-rw-r--r--include/linux/mfd/rtsx_pci.h85
-rw-r--r--include/linux/mfd/tps65217.h6
-rw-r--r--include/linux/mfd/tps65218.h19
-rw-r--r--include/linux/mfd/wm97xx.h25
-rw-r--r--include/linux/mlx4/cq.h3
-rw-r--r--include/linux/mlx5/cq.h9
-rw-r--r--include/linux/mlx5/mlx5_ifc.h9
-rw-r--r--include/linux/mm.h85
-rw-r--r--include/linux/mm_types.h19
-rw-r--r--include/linux/mmc/host.h11
-rw-r--r--include/linux/mmc/sdhci-pci-data.h3
-rw-r--r--include/linux/mmu_notifier.h20
-rw-r--r--include/linux/mmzone.h15
-rw-r--r--include/linux/module.h7
-rw-r--r--include/linux/moduleparam.h16
-rw-r--r--include/linux/msi.h5
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/nodemask.h4
-rw-r--r--include/linux/nvme-fc-driver.h15
-rw-r--r--include/linux/nvme.h30
-rw-r--r--include/linux/of.h45
-rw-r--r--include/linux/of_address.h10
-rw-r--r--include/linux/of_gpio.h2
-rw-r--r--include/linux/padata.h4
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/page-isolation.h2
-rw-r--r--include/linux/pagemap.h26
-rw-r--r--include/linux/pagevec.h20
-rw-r--r--include/linux/pci.h23
-rw-r--r--include/linux/percpu-defs.h15
-rw-r--r--include/linux/perf_event.h25
-rw-r--r--include/linux/phy/phy-qcom-ufs.h3
-rw-r--r--include/linux/phy/phy.h14
-rw-r--r--include/linux/pinctrl/consumer.h8
-rw-r--r--include/linux/pinctrl/pinconf-generic.h5
-rw-r--r--include/linux/platform_data/i2c-nuc900.h10
-rw-r--r--include/linux/platform_data/media/gpio-ir-recv.h23
-rw-r--r--include/linux/platform_data/sht15.h38
-rw-r--r--include/linux/platform_data/st1232_pdata.h14
-rw-r--r--include/linux/pm.h31
-rw-r--r--include/linux/pm_domain.h20
-rw-r--r--include/linux/pm_opp.h14
-rw-r--r--include/linux/pm_qos.h27
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/property.h6
-rw-r--r--include/linux/radix-tree.h7
-rw-r--r--include/linux/rculist.h4
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/regmap.h64
-rw-r--r--include/linux/regset.h67
-rw-r--r--include/linux/regulator/da9211.h5
-rw-r--r--include/linux/ring_buffer.h3
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/rwlock.h12
-rw-r--r--include/linux/rwlock_api_smp.h2
-rw-r--r--include/linux/rwsem.h1
-rw-r--r--include/linux/sbitmap.h64
-rw-r--r--include/linux/scatterlist.h17
-rw-r--r--include/linux/sched.h19
-rw-r--r--include/linux/sched/cputime.h3
-rw-r--r--include/linux/sched/isolation.h51
-rw-r--r--include/linux/sched/rt.h11
-rw-r--r--include/linux/sched/sysctl.h6
-rw-r--r--include/linux/serial_core.h5
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/linux/slab.h81
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h17
-rw-r--r--include/linux/spi/spi-fsl-dspi.h31
-rw-r--r--include/linux/spinlock.h15
-rw-r--r--include/linux/spinlock_up.h11
-rw-r--r--include/linux/swap.h35
-rw-r--r--include/linux/sync_file.h4
-rw-r--r--include/linux/thread_info.h5
-rw-r--r--include/linux/tick.h39
-rw-r--r--include/linux/types.h1
-rw-r--r--include/linux/uinput.h81
-rw-r--r--include/linux/usb.h18
-rw-r--r--include/linux/usb/association.h1
-rw-r--r--include/linux/usb/audio-v2.h1
-rw-r--r--include/linux/usb/audio.h1
-rw-r--r--include/linux/usb/c67x00.h1
-rw-r--r--include/linux/usb/cdc-wdm.h1
-rw-r--r--include/linux/usb/cdc.h1
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/composite.h1
-rw-r--r--include/linux/usb/ehci_def.h1
-rw-r--r--include/linux/usb/ehci_pdriver.h1
-rw-r--r--include/linux/usb/g_hid.h1
-rw-r--r--include/linux/usb/gadget.h6
-rw-r--r--include/linux/usb/gpio_vbus.h1
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--include/linux/usb/input.h1
-rw-r--r--include/linux/usb/isp1301.h1
-rw-r--r--include/linux/usb/m66592.h1
-rw-r--r--include/linux/usb/msm_hsusb_hw.h77
-rw-r--r--include/linux/usb/musb-ux500.h1
-rw-r--r--include/linux/usb/net2280.h1
-rw-r--r--include/linux/usb/of.h1
-rw-r--r--include/linux/usb/ohci_pdriver.h1
-rw-r--r--include/linux/usb/otg-fsm.h1
-rw-r--r--include/linux/usb/pd.h (renamed from drivers/staging/typec/pd.h)5
-rw-r--r--include/linux/usb/pd_bdo.h (renamed from drivers/staging/typec/pd_bdo.h)0
-rw-r--r--include/linux/usb/pd_vdo.h (renamed from drivers/staging/typec/pd_vdo.h)0
-rw-r--r--include/linux/usb/phy_companion.h1
-rw-r--r--include/linux/usb/quirks.h6
-rw-r--r--include/linux/usb/r8a66597.h1
-rw-r--r--include/linux/usb/renesas_usbhs.h6
-rw-r--r--include/linux/usb/rndis_host.h1
-rw-r--r--include/linux/usb/samsung_usb_phy.h1
-rw-r--r--include/linux/usb/serial.h1
-rw-r--r--include/linux/usb/storage.h1
-rw-r--r--include/linux/usb/tcpm.h (renamed from drivers/staging/typec/tcpm.h)57
-rw-r--r--include/linux/usb/tegra_usb_phy.h1
-rw-r--r--include/linux/usb/tilegx.h1
-rw-r--r--include/linux/usb/ulpi.h1
-rw-r--r--include/linux/usb/usb338x.h1
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/usb/wusb-wa.h1
-rw-r--r--include/linux/usb/wusb.h1
-rw-r--r--include/linux/usb/xhci-dbgp.h1
-rw-r--r--include/linux/usbdevice_fs.h1
-rw-r--r--include/linux/vmstat.h10
-rw-r--r--include/linux/w1.h1
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/linux/writeback.h30
-rw-r--r--include/media/cec-pin.h111
-rw-r--r--include/media/cec.h16
-rw-r--r--include/media/drv-intf/saa7146_vv.h7
-rw-r--r--include/media/lirc_dev.h100
-rw-r--r--include/media/rc-map.h4
-rw-r--r--include/media/v4l2-async.h91
-rw-r--r--include/media/v4l2-fwnode.h228
-rw-r--r--include/media/v4l2-subdev.h3
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/inet_timewait_sock.h4
-rw-r--r--include/net/ip_vs.h6
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h4
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sock.h7
-rw-r--r--include/net/tcp.h23
-rw-r--r--include/net/tls.h23
-rw-r--r--include/rdma/ib_addr.h16
-rw-r--r--include/rdma/ib_pack.h19
-rw-r--r--include/rdma/ib_sa.h12
-rw-r--r--include/rdma/ib_umem_odp.h4
-rw-r--r--include/rdma/ib_verbs.h35
-rw-r--r--include/rdma/opa_addr.h6
-rw-r--r--include/rdma/rdmavt_qp.h6
-rw-r--r--include/scsi/libsas.h56
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_devinfo.h79
-rw-r--r--include/scsi/scsi_proto.h45
-rw-r--r--include/scsi/scsi_transport_fc.h2
-rw-r--r--include/sound/ac97/codec.h118
-rw-r--r--include/sound/ac97/compat.h20
-rw-r--r--include/sound/ac97/controller.h85
-rw-r--r--include/sound/ac97/regs.h262
-rw-r--r--include/sound/ac97_codec.h239
-rw-r--r--include/sound/core.h2
-rw-r--r--include/sound/hdaudio.h3
-rw-r--r--include/sound/pxa2xx-lib.h15
-rw-r--r--include/sound/rt5651.h8
-rw-r--r--include/sound/rt5663.h3
-rw-r--r--include/sound/snd_wavefront.h1
-rw-r--r--include/sound/soc-acpi-intel-match.h32
-rw-r--r--include/sound/soc-acpi.h111
-rw-r--r--include/sound/soc.h115
-rw-r--r--include/target/iscsi/iscsi_target_stat.h12
-rw-r--r--include/trace/events/btrfs.h41
-rw-r--r--include/trace/events/irq_matrix.h201
-rw-r--r--include/trace/events/kmem.h11
-rw-r--r--include/trace/events/mmflags.h2
-rw-r--r--include/trace/events/sched.h2
-rw-r--r--include/trace/events/writeback.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h50
-rw-r--r--include/uapi/drm/drm.h41
-rw-r--r--include/uapi/drm/drm_mode.h70
-rw-r--r--include/uapi/drm/etnaviv_drm.h43
-rw-r--r--include/uapi/drm/i915_drm.h33
-rw-r--r--include/uapi/drm/msm_drm.h24
-rw-r--r--include/uapi/drm/vc4_drm.h19
-rw-r--r--include/uapi/linux/audit.h9
-rw-r--r--include/uapi/linux/btrfs.h8
-rw-r--r--include/uapi/linux/btrfs_tree.h1
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/dvb/frontend.h2
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/fanotify.h3
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/iso_fs.h162
-rw-r--r--include/uapi/linux/kfd_ioctl.h2
-rw-r--r--include/uapi/linux/pci_regs.h44
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/uapi/linux/prctl.h9
-rw-r--r--include/uapi/linux/rxrpc.h10
-rw-r--r--include/uapi/linux/serial_reg.h1
-rw-r--r--include/uapi/linux/stddef.h2
-rw-r--r--include/uapi/linux/tls.h4
-rw-r--r--include/uapi/linux/usb/ch9.h4
-rw-r--r--include/uapi/linux/xattr.h3
-rw-r--r--include/uapi/rdma/ib_user_verbs.h22
-rw-r--r--include/uapi/rdma/mlx5-abi.h52
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h2
-rw-r--r--init/Kconfig24
-rw-r--r--init/do_mounts.c3
-rw-r--r--init/main.c5
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/audit.c76
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/auditfilter.c39
-rw-r--r--kernel/auditsc.c29
-rw-r--r--kernel/bpf/core.c6
-rw-r--r--kernel/bpf/percpu_freelist.c8
-rw-r--r--kernel/bpf/verifier.c40
-rw-r--r--kernel/cgroup/Makefile2
-rw-r--r--kernel/cgroup/cgroup-internal.h9
-rw-r--r--kernel/cgroup/cgroup.c157
-rw-r--r--kernel/cgroup/cpuset.c15
-rw-r--r--kernel/cgroup/stat.c334
-rw-r--r--kernel/events/core.c498
-rw-r--r--kernel/events/ring_buffer.c6
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c24
-rw-r--r--kernel/irq/Kconfig6
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/autoprobe.c2
-rw-r--r--kernel/irq/chip.c35
-rw-r--r--kernel/irq/debugfs.c12
-rw-r--r--kernel/irq/internals.h19
-rw-r--r--kernel/irq/irqdesc.c10
-rw-r--r--kernel/irq/irqdomain.c60
-rw-r--r--kernel/irq/manage.c23
-rw-r--r--kernel/irq/matrix.c443
-rw-r--r--kernel/irq/msi.c32
-rw-r--r--kernel/irq/proc.c5
-rw-r--r--kernel/irq/timings.c2
-rw-r--r--kernel/irq_work.c21
-rw-r--r--kernel/jump_label.c14
-rw-r--r--kernel/kallsyms.c43
-rw-r--r--kernel/kexec_file.c5
-rw-r--r--kernel/kprobes.c18
-rw-r--r--kernel/kthread.c66
-rw-r--r--kernel/livepatch/Makefile2
-rw-r--r--kernel/livepatch/core.c52
-rw-r--r--kernel/livepatch/core.h40
-rw-r--r--kernel/livepatch/patch.c1
-rw-r--r--kernel/livepatch/shadow.c277
-rw-r--r--kernel/livepatch/transition.c45
-rw-r--r--kernel/locking/lockdep.c26
-rw-r--r--kernel/locking/qrwlock.c86
-rw-r--r--kernel/locking/qspinlock_paravirt.h47
-rw-r--r--kernel/locking/rwsem.c16
-rw-r--r--kernel/locking/spinlock.c9
-rw-r--r--kernel/module.c36
-rw-r--r--kernel/padata.c71
-rw-r--r--kernel/power/Kconfig14
-rw-r--r--kernel/power/qos.c4
-rw-r--r--kernel/power/snapshot.c39
-rw-r--r--kernel/power/suspend.c2
-rw-r--r--kernel/power/swap.c128
-rw-r--r--kernel/rcu/rcu.h21
-rw-r--r--kernel/rcu/rcu_segcblist.c1
-rw-r--r--kernel/rcu/rcutorture.c24
-rw-r--r--kernel/rcu/tree.c175
-rw-r--r--kernel/rcu/tree.h5
-rw-r--r--kernel/rcu/tree_plugin.h27
-rw-r--r--kernel/rcu/update.c28
-rw-r--r--kernel/resource.c76
-rw-r--r--kernel/sched/Makefile1
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/core.c235
-rw-r--r--kernel/sched/cpuacct.h18
-rw-r--r--kernel/sched/cpufreq_schedutil.c6
-rw-r--r--kernel/sched/cputime.c17
-rw-r--r--kernel/sched/deadline.c23
-rw-r--r--kernel/sched/debug.c18
-rw-r--r--kernel/sched/fair.c1051
-rw-r--r--kernel/sched/idle.c4
-rw-r--r--kernel/sched/isolation.c155
-rw-r--r--kernel/sched/rt.c318
-rw-r--r--kernel/sched/sched.h75
-rw-r--r--kernel/sched/stop_task.c2
-rw-r--r--kernel/sched/topology.c49
-rw-r--r--kernel/seccomp.c2
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c18
-rw-r--r--kernel/sys.c12
-rw-r--r--kernel/sysctl.c24
-rw-r--r--kernel/task_work.c2
-rw-r--r--kernel/test_kprobes.c29
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--kernel/time/posix-cpu-timers.c6
-rw-r--r--kernel/time/tick-sched.c38
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/blktrace.c90
-rw-r--r--kernel/trace/bpf_trace.c8
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_output.c12
-rw-r--r--kernel/trace/trace_sched_wakeup.c8
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/user_namespace.c2
-rw-r--r--kernel/watchdog.c13
-rw-r--r--kernel/workqueue.c28
-rw-r--r--lib/Kconfig18
-rw-r--r--lib/Kconfig.debug28
-rw-r--r--lib/Kconfig.kmemcheck94
-rw-r--r--lib/Makefile8
-rw-r--r--lib/ashldi3.c44
-rw-r--r--lib/ashrdi3.c46
-rw-r--r--lib/assoc_array.c20
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/cmpdi2.c42
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/crc4.c2
-rw-r--r--lib/crc8.c22
-rw-r--r--lib/div64.c6
-rw-r--r--lib/dynamic_queue_limits.c2
-rw-r--r--lib/gcd.c6
-rw-r--r--lib/idr.c2
-rw-r--r--lib/llist.c2
-rw-r--r--lib/lshrdi3.c45
-rw-r--r--lib/mpi/mpi-pow.c2
-rw-r--r--lib/muldi3.c72
-rw-r--r--lib/radix-tree.c30
-rw-r--r--lib/scatterlist.c95
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/ucmpdi2.c35
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xz/xz_dec_stream.c16
-rw-r--r--mm/Kconfig.debug1
-rw-r--r--mm/Makefile2
-rw-r--r--mm/backing-dev.c20
-rw-r--r--mm/cma.c2
-rw-r--r--mm/debug.c5
-rw-r--r--mm/filemap.c221
-rw-r--r--mm/gup.c97
-rw-r--r--mm/hmm.c3
-rw-r--r--mm/huge_memory.c88
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/kmemcheck.c125
-rw-r--r--mm/kmemleak.c11
-rw-r--r--mm/ksm.c15
-rw-r--r--mm/list_lru.c1
-rw-r--r--mm/memblock.c68
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c96
-rw-r--r--mm/memory_hotplug.c50
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/mempool.c2
-rw-r--r--mm/migrate.c15
-rw-r--r--mm/mlock.c9
-rw-r--r--mm/mmu_notifier.c11
-rw-r--r--mm/oom_kill.c60
-rw-r--r--mm/page-writeback.c83
-rw-r--r--mm/page_alloc.c465
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/page_io.c8
-rw-r--r--mm/page_isolation.c10
-rw-r--r--mm/page_owner.c4
-rw-r--r--mm/pagewalk.c6
-rw-r--r--mm/percpu-vm.c2
-rw-r--r--mm/percpu.c3
-rw-r--r--mm/rmap.c65
-rw-r--r--mm/shmem.c47
-rw-r--r--mm/slab.c45
-rw-r--r--mm/slab.h43
-rw-r--r--mm/slab_common.c59
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c67
-rw-r--r--mm/sparse-vmemmap.c34
-rw-r--r--mm/sparse.c33
-rw-r--r--mm/swap.c35
-rw-r--r--mm/swap_slots.c11
-rw-r--r--mm/swap_state.c11
-rw-r--r--mm/swapfile.c21
-rw-r--r--mm/truncate.c149
-rw-r--r--mm/vmscan.c10
-rw-r--r--mm/vmstat.c77
-rw-r--r--mm/workingset.c10
-rw-r--r--mm/zsmalloc.c2
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/core/dev.c66
-rw-r--r--net/core/fib_notifier.c10
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/net-sysfs.c17
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c9
-rw-r--r--net/core/sock.c3
-rw-r--r--net/dsa/Kconfig1
-rw-r--r--net/dsa/tag_lan9303.c24
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c3
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c19
-rw-r--r--net/ipv4/tcp_cong.c76
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c8
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/llc/llc_input.c4
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_nat_ftp.c2
-rw-r--r--net/netfilter/nf_nat_irc.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netlabel/netlabel_calipso.c2
-rw-r--r--net/netlink/af_netlink.c1
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/openvswitch/meter.c13
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/phonet/pn_dev.c3
-rw-r--r--net/rds/ib_fmr.c4
-rw-r--r--net/rxrpc/af_rxrpc.c16
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_event.c2
-rw-r--r--net/rxrpc/call_object.c1
-rw-r--r--net/rxrpc/input.c2
-rw-r--r--net/rxrpc/output.c19
-rw-r--r--net/rxrpc/recvmsg.c2
-rw-r--r--net/sched/sch_netem.c17
-rw-r--r--net/socket.c1
-rw-r--r--net/sunrpc/svc.c4
-rw-r--r--net/tls/tls_main.c96
-rw-r--r--net/tls/tls_sw.c24
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--samples/Kconfig5
-rw-r--r--samples/bpf/xdp_router_ipv4_user.c5
-rw-r--r--samples/configfs/configfs_sample.c8
-rw-r--r--samples/connector/cn_test.c6
-rw-r--r--samples/kprobes/Makefile2
-rw-r--r--samples/kprobes/jprobe_example.c67
-rw-r--r--samples/kprobes/kprobe_example.c8
-rw-r--r--samples/livepatch/Makefile6
-rw-r--r--samples/livepatch/livepatch-callbacks-busymod.c72
-rw-r--r--samples/livepatch/livepatch-callbacks-demo.c234
-rw-r--r--samples/livepatch/livepatch-callbacks-mod.c53
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c173
-rw-r--r--samples/livepatch/livepatch-shadow-fix2.c168
-rw-r--r--samples/livepatch/livepatch-shadow-mod.c224
-rw-r--r--samples/mic/mpssd/mpssd.c6
-rw-r--r--samples/trace_events/trace-events-sample.h10
-rw-r--r--samples/v4l/v4l2-pci-skeleton.c6
-rw-r--r--samples/vfio-mdev/mtty.c2
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.dtbinst6
-rw-r--r--scripts/Makefile.lib5
-rwxr-xr-xscripts/bloat-o-meter89
-rw-r--r--scripts/coccinelle/api/drm-get-put.cocci5
-rwxr-xr-xscripts/documentation-file-ref-check15
-rw-r--r--scripts/dtc/checks.c291
-rw-r--r--scripts/dtc/dtc-lexer.lex.c_shipped10
-rw-r--r--scripts/dtc/dtc-parser.tab.c_shipped430
-rw-r--r--scripts/dtc/dtc-parser.y20
-rw-r--r--scripts/dtc/dtc.c2
-rw-r--r--scripts/dtc/dtc.h3
-rw-r--r--scripts/dtc/libfdt/fdt_addresses.c96
-rw-r--r--scripts/dtc/libfdt/fdt_empty_tree.c1
-rw-r--r--scripts/dtc/libfdt/fdt_overlay.c861
-rw-r--r--scripts/dtc/libfdt/fdt_ro.c4
-rw-r--r--scripts/dtc/libfdt/fdt_rw.c24
-rw-r--r--scripts/dtc/libfdt/fdt_sw.c16
-rw-r--r--scripts/dtc/libfdt/fdt_wip.c4
-rw-r--r--scripts/dtc/libfdt/libfdt.h47
-rw-r--r--scripts/dtc/livetree.c31
-rwxr-xr-xscripts/dtc/update-dtc-source.sh4
-rw-r--r--scripts/dtc/version_gen.h2
-rwxr-xr-xscripts/find-unused-docs.sh62
-rwxr-xr-xscripts/headers_install.sh2
-rwxr-xr-xscripts/kernel-doc19
-rwxr-xr-xscripts/leaking_addresses.pl370
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--security/apparmor/include/lib.h11
-rw-r--r--security/apparmor/label.c8
-rw-r--r--security/apparmor/lsm.c16
-rw-r--r--security/commoncap.c193
-rw-r--r--security/integrity/digsig.c14
-rw-r--r--security/integrity/evm/evm.h3
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/evm/evm_main.c3
-rw-r--r--security/integrity/evm/evm_secfs.c29
-rw-r--r--security/integrity/iint.c49
-rw-r--r--security/integrity/ima/ima_api.c67
-rw-r--r--security/integrity/ima/ima_appraise.c4
-rw-r--r--security/integrity/ima/ima_crypto.c66
-rw-r--r--security/integrity/ima/ima_fs.c6
-rw-r--r--security/integrity/ima/ima_main.c23
-rw-r--r--security/integrity/ima/ima_policy.c6
-rw-r--r--security/integrity/integrity.h2
-rw-r--r--security/selinux/hooks.c55
-rw-r--r--security/selinux/ss/conditional.c1
-rw-r--r--security/selinux/ss/hashtab.c19
-rw-r--r--security/selinux/ss/hashtab.h4
-rw-r--r--security/selinux/ss/services.c4
-rw-r--r--security/smack/smack_lsm.c79
-rw-r--r--security/tomoyo/audit.c2
-rw-r--r--security/tomoyo/common.c4
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/util.c39
-rw-r--r--sound/Kconfig35
-rw-r--r--sound/Makefile4
-rw-r--r--sound/ac97/Kconfig19
-rw-r--r--sound/ac97/Makefile8
-rw-r--r--sound/ac97/ac97_core.h16
-rw-r--r--sound/ac97/bus.c539
-rw-r--r--sound/ac97/codec.c15
-rw-r--r--sound/ac97/snd_ac97_compat.c108
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c37
-rw-r--r--sound/arm/pxa2xx-ac97.c35
-rw-r--r--sound/core/hrtimer.c2
-rw-r--r--sound/core/hwdep.c2
-rw-r--r--sound/core/init.c32
-rw-r--r--sound/core/jack.c2
-rw-r--r--sound/core/pcm.c20
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c4
-rw-r--r--sound/core/timer.c11
-rw-r--r--sound/drivers/aloop.c7
-rw-r--r--sound/drivers/dummy.c7
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c7
-rw-r--r--sound/drivers/mtpav.c7
-rw-r--r--sound/drivers/opl3/opl3_midi.c4
-rw-r--r--sound/drivers/opl3/opl3_seq.c2
-rw-r--r--sound/drivers/opl3/opl3_voice.h2
-rw-r--r--sound/drivers/serial-u16550.c7
-rw-r--r--sound/firewire/amdtp-am824.c6
-rw-r--r--sound/firewire/amdtp-stream.c23
-rw-r--r--sound/firewire/amdtp-stream.h2
-rw-r--r--sound/firewire/digi00x/amdtp-dot.c6
-rw-r--r--sound/firewire/fireface/amdtp-ff.c4
-rw-r--r--sound/firewire/fireface/ff-midi.c10
-rw-r--r--sound/firewire/fireface/ff-transaction.c8
-rw-r--r--sound/firewire/isight.c18
-rw-r--r--sound/firewire/motu/amdtp-motu.c4
-rw-r--r--sound/firewire/oxfw/oxfw-scs1x.c12
-rw-r--r--sound/firewire/tascam/amdtp-tascam.c4
-rw-r--r--sound/firewire/tascam/tascam-transaction.c6
-rw-r--r--sound/hda/hdac_controller.c3
-rw-r--r--sound/hda/hdac_device.c43
-rw-r--r--sound/hda/hdac_sysfs.c47
-rw-r--r--sound/hda/local.h2
-rw-r--r--sound/i2c/other/ak4117.c8
-rw-r--r--sound/isa/sb/emu8000_pcm.c6
-rw-r--r--sound/isa/sb/sb8_midi.c11
-rw-r--r--sound/isa/wavefront/wavefront_midi.c10
-rw-r--r--sound/oss/CHANGELOG369
-rw-r--r--sound/oss/Kconfig533
-rw-r--r--sound/oss/Makefile108
-rw-r--r--sound/oss/README.FIRST6
-rw-r--r--sound/oss/ad1848.c3062
-rw-r--r--sound/oss/ad1848.h25
-rw-r--r--sound/oss/ad1848_mixer.h253
-rw-r--r--sound/oss/aedsp16.c1373
-rw-r--r--sound/oss/audio.c985
-rw-r--r--sound/oss/bin2hex.c40
-rw-r--r--sound/oss/coproc.h12
-rw-r--r--sound/oss/dev_table.c256
-rw-r--r--sound/oss/dev_table.h390
-rw-r--r--sound/oss/dmabuf.c1268
-rw-r--r--sound/oss/hex2hex.c102
-rw-r--r--sound/oss/kahlua.c229
-rw-r--r--sound/oss/midi_ctrl.h23
-rw-r--r--sound/oss/midi_synth.c712
-rw-r--r--sound/oss/midi_synth.h48
-rw-r--r--sound/oss/midibuf.c427
-rw-r--r--sound/oss/mpu401.c1804
-rw-r--r--sound/oss/mpu401.h12
-rw-r--r--sound/oss/msnd.c413
-rw-r--r--sound/oss/msnd.h278
-rw-r--r--sound/oss/msnd_classic.c3
-rw-r--r--sound/oss/msnd_classic.h185
-rw-r--r--sound/oss/msnd_pinnacle.c1941
-rw-r--r--sound/oss/msnd_pinnacle.h246
-rw-r--r--sound/oss/opl3.c1255
-rw-r--r--sound/oss/opl3_hw.h246
-rw-r--r--sound/oss/os.h46
-rw-r--r--sound/oss/pas2.h21
-rw-r--r--sound/oss/pas2_card.c458
-rw-r--r--sound/oss/pas2_midi.c262
-rw-r--r--sound/oss/pas2_mixer.c327
-rw-r--r--sound/oss/pas2_pcm.c419
-rw-r--r--sound/oss/pss.c1270
-rw-r--r--sound/oss/sb.h186
-rw-r--r--sound/oss/sb_audio.c1097
-rw-r--r--sound/oss/sb_card.c354
-rw-r--r--sound/oss/sb_card.h149
-rw-r--r--sound/oss/sb_common.c1287
-rw-r--r--sound/oss/sb_ess.c1823
-rw-r--r--sound/oss/sb_ess.h35
-rw-r--r--sound/oss/sb_midi.c206
-rw-r--r--sound/oss/sb_mixer.c770
-rw-r--r--sound/oss/sb_mixer.h105
-rw-r--r--sound/oss/sequencer.c1661
-rw-r--r--sound/oss/sleep.h19
-rw-r--r--sound/oss/sound_calls.h88
-rw-r--r--sound/oss/sound_config.h144
-rw-r--r--sound/oss/sound_firmware.h30
-rw-r--r--sound/oss/sound_timer.c327
-rw-r--r--sound/oss/soundcard.c733
-rw-r--r--sound/oss/soundvers.h2
-rw-r--r--sound/oss/swarm_cs4297a.c2781
-rw-r--r--sound/oss/sys_timer.c280
-rw-r--r--sound/oss/trix.c525
-rw-r--r--sound/oss/tuning.h24
-rw-r--r--sound/oss/uart401.c477
-rw-r--r--sound/oss/uart6850.c361
-rw-r--r--sound/oss/ulaw.h70
-rw-r--r--sound/oss/v_midi.c290
-rw-r--r--sound/oss/v_midi.h15
-rw-r--r--sound/oss/vidc.c557
-rw-r--r--sound/oss/vidc.h63
-rw-r--r--sound/oss/vidc_fill.S218
-rw-r--r--sound/oss/waveartist.c2043
-rw-r--r--sound/oss/waveartist.h93
-rw-r--r--sound/pci/asihpi/asihpi.c15
-rw-r--r--sound/pci/au88x0/au88x0_core.c2
-rw-r--r--sound/pci/ctxfi/cttimer.c7
-rw-r--r--sound/pci/echoaudio/midi.c10
-rw-r--r--sound/pci/emu10k1/emuproc.c1
-rw-r--r--sound/pci/ens1370.c3
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c8
-rw-r--r--sound/pci/hda/patch_realtek.c24
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/ice1712/ice1712.h3
-rw-r--r--sound/pci/korg1212/korg1212.c7
-rw-r--r--sound/pci/oxygen/xonar_dg.h2
-rw-r--r--sound/pci/oxygen/xonar_dg_mixer.c2
-rw-r--r--sound/pci/rme9652/hdsp.c8
-rw-r--r--sound/pci/rme9652/hdspm.c8
-rw-r--r--sound/sh/aica.c20
-rw-r--r--sound/soc/Kconfig3
-rw-r--r--sound/soc/Makefile6
-rw-r--r--sound/soc/amd/Kconfig7
-rw-r--r--sound/soc/amd/Makefile6
-rw-r--r--sound/soc/amd/acp-pcm-dma.c356
-rw-r--r--sound/soc/amd/acp-rt5645.c199
-rw-r--r--sound/soc/amd/acp.h19
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c391
-rw-r--r--sound/soc/bcm/cygnus-ssp.c34
-rw-r--r--sound/soc/codecs/Kconfig13
-rw-r--r--sound/soc/codecs/Makefile1
-rw-r--r--sound/soc/codecs/arizona.c166
-rw-r--r--sound/soc/codecs/arizona.h6
-rw-r--r--sound/soc/codecs/cs43130.c16
-rw-r--r--sound/soc/codecs/cs47l24.c14
-rw-r--r--sound/soc/codecs/da7213.c58
-rw-r--r--sound/soc/codecs/da7213.h1
-rw-r--r--sound/soc/codecs/hdac_hdmi.c51
-rw-r--r--sound/soc/codecs/hdmi-codec.c5
-rw-r--r--sound/soc/codecs/max98090.c2
-rw-r--r--sound/soc/codecs/max98925.c23
-rw-r--r--sound/soc/codecs/max98927.c155
-rw-r--r--sound/soc/codecs/max98927.h7
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c120
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c4
-rw-r--r--sound/soc/codecs/pcm512x-i2c.c4
-rw-r--r--sound/soc/codecs/pcm512x-spi.c2
-rw-r--r--sound/soc/codecs/pcm512x.c4
-rw-r--r--sound/soc/codecs/pcm512x.h2
-rw-r--r--sound/soc/codecs/rl6231.c5
-rw-r--r--sound/soc/codecs/rt5514-spi.c46
-rw-r--r--sound/soc/codecs/rt5514.c6
-rw-r--r--sound/soc/codecs/rt5645.c39
-rw-r--r--sound/soc/codecs/rt5651.c219
-rw-r--r--sound/soc/codecs/rt5651.h4
-rw-r--r--sound/soc/codecs/rt5659.c26
-rw-r--r--sound/soc/codecs/rt5663.c272
-rw-r--r--sound/soc/codecs/rt5670.c143
-rw-r--r--sound/soc/codecs/rt5670.h4
-rw-r--r--sound/soc/codecs/tas571x.c3
-rw-r--r--sound/soc/codecs/tfa9879.c6
-rw-r--r--sound/soc/codecs/tlv320aic23.c1
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c2
-rw-r--r--sound/soc/codecs/tpa6130a2.c1
-rw-r--r--sound/soc/codecs/ts3a227e.c10
-rw-r--r--sound/soc/codecs/wm5102.c14
-rw-r--r--sound/soc/codecs/wm5110.c14
-rw-r--r--sound/soc/codecs/wm8741.c39
-rw-r--r--sound/soc/codecs/wm8753.c4
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/codecs/wm8997.c15
-rw-r--r--sound/soc/codecs/wm8998.c95
-rw-r--r--sound/soc/codecs/wm9705.c68
-rw-r--r--sound/soc/codecs/wm9712.c48
-rw-r--r--sound/soc/codecs/wm9713.c39
-rw-r--r--sound/soc/davinci/davinci-mcasp.c21
-rw-r--r--sound/soc/dwc/Kconfig4
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c14
-rw-r--r--sound/soc/fsl/fsl_spdif.c4
-rw-r--r--sound/soc/fsl/fsl_ssi.c46
-rw-r--r--sound/soc/generic/audio-graph-card.c47
-rw-r--r--sound/soc/img/img-i2s-in.c130
-rw-r--r--sound/soc/img/img-i2s-out.c120
-rw-r--r--sound/soc/img/img-parallel-out.c6
-rw-r--r--sound/soc/img/img-spdif-in.c110
-rw-r--r--sound/soc/img/img-spdif-out.c87
-rw-r--r--sound/soc/intel/Kconfig302
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-compress.c2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform.h2
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c312
-rw-r--r--sound/soc/intel/atom/sst/sst_loader.c1
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c1
-rw-r--r--sound/soc/intel/boards/Kconfig265
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c16
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c23
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c27
-rw-r--r--sound/soc/intel/boards/bytcht_nocodec.c10
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c118
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c297
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c185
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c128
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c68
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c76
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c3
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c16
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c16
-rw-r--r--sound/soc/intel/common/Makefile4
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c196
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c194
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c64
-rw-r--r--sound/soc/intel/common/sst-acpi.c36
-rw-r--r--sound/soc/intel/common/sst-acpi.h82
-rw-r--r--sound/soc/intel/common/sst-firmware.c3
-rw-r--r--sound/soc/intel/skylake/skl-messages.c32
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c9
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c49
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c15
-rw-r--r--sound/soc/intel/skylake/skl-topology.c70
-rw-r--r--sound/soc/intel/skylake/skl-topology.h10
-rw-r--r--sound/soc/intel/skylake/skl.c50
-rw-r--r--sound/soc/intel/skylake/skl.h4
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c2
-rw-r--r--sound/soc/kirkwood/kirkwood.h2
-rw-r--r--sound/soc/omap/ams-delta.c4
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c3
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c32
-rw-r--r--sound/soc/qcom/lpass-platform.c2
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c159
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c1
-rw-r--r--sound/soc/samsung/i2s.c10
-rw-r--r--sound/soc/samsung/i2s.h3
-rw-r--r--sound/soc/samsung/tm2_wm5110.c7
-rw-r--r--sound/soc/sh/fsi.c11
-rw-r--r--sound/soc/sh/rcar/adg.c72
-rw-r--r--sound/soc/sh/rcar/core.c51
-rw-r--r--sound/soc/sh/rcar/ctu.c88
-rw-r--r--sound/soc/sh/rcar/dma.c84
-rw-r--r--sound/soc/sh/rcar/dvc.c60
-rw-r--r--sound/soc/sh/rcar/mix.c158
-rw-r--r--sound/soc/sh/rcar/rsnd.h22
-rw-r--r--sound/soc/sh/rcar/ssi.c58
-rw-r--r--sound/soc/soc-acpi.c (renamed from sound/soc/intel/common/sst-match-acpi.c)56
-rw-r--r--sound/soc/soc-compress.c461
-rw-r--r--sound/soc/soc-core.c222
-rw-r--r--sound/soc/soc-dapm.c158
-rw-r--r--sound/soc/soc-io.c14
-rw-r--r--sound/soc/soc-pcm.c462
-rw-r--r--sound/soc/stm/stm32_sai.c162
-rw-r--r--sound/soc/stm/stm32_sai.h22
-rw-r--r--sound/soc/stm/stm32_sai_sub.c162
-rw-r--r--sound/soc/stm/stm32_spdifrx.c23
-rw-r--r--sound/soc/sunxi/sun4i-codec.c29
-rw-r--r--sound/soc/sunxi/sun8i-codec.c72
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c6
-rw-r--r--sound/soc/zte/zx-spdif.c4
-rw-r--r--sound/synth/emux/emux.c2
-rw-r--r--sound/synth/emux/emux_oss.c2
-rw-r--r--sound/synth/emux/emux_synth.c4
-rw-r--r--sound/synth/emux/emux_voice.h2
-rw-r--r--sound/usb/6fire/chip.c2
-rw-r--r--sound/usb/bcd2000/bcd2000.c11
-rw-r--r--sound/usb/caiaq/device.c7
-rw-r--r--sound/usb/caiaq/input.c9
-rw-r--r--sound/usb/hiface/pcm.c9
-rw-r--r--sound/usb/line6/capture.c2
-rw-r--r--sound/usb/line6/capture.h2
-rw-r--r--sound/usb/line6/driver.c34
-rw-r--r--sound/usb/line6/driver.h3
-rw-r--r--sound/usb/line6/midi.c17
-rw-r--r--sound/usb/line6/playback.c2
-rw-r--r--sound/usb/line6/playback.h2
-rw-r--r--sound/usb/line6/pod.c11
-rw-r--r--sound/usb/line6/podhd.c26
-rw-r--r--sound/usb/line6/toneport.c7
-rw-r--r--sound/usb/line6/variax.c32
-rw-r--r--sound/usb/midi.c45
-rw-r--r--sound/usb/quirks.c24
-rw-r--r--sound/usb/usx2y/usb_stream.c23
-rw-r--r--sound/usb/usx2y/usbusx2y.c5
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c3
-rw-r--r--tools/arch/x86/include/asm/atomic.h2
-rw-r--r--tools/gpio/gpio-utils.c17
-rw-r--r--tools/include/asm-generic/atomic-gcc.h2
-rw-r--r--tools/include/linux/kmemcheck.h8
-rw-r--r--tools/include/linux/poison.h5
-rw-r--r--tools/include/uapi/drm/i915_drm.h1
-rw-r--r--tools/include/uapi/linux/kcmp.h27
-rw-r--r--tools/include/uapi/linux/prctl.h200
-rw-r--r--tools/objtool/check.c7
-rw-r--r--tools/objtool/objtool.c6
-rw-r--r--tools/perf/Documentation/perf-list.txt11
-rw-r--r--tools/perf/Documentation/perf-record.txt2
-rw-r--r--tools/perf/Documentation/perf-report.txt3
-rw-r--r--tools/perf/Documentation/perf-sched.txt8
-rw-r--r--tools/perf/Documentation/perf-script.txt11
-rw-r--r--tools/perf/Documentation/perf-stat.txt7
-rw-r--r--tools/perf/Documentation/perf-top.txt3
-rw-r--r--tools/perf/Makefile.perf39
-rw-r--r--tools/perf/arch/arm/annotate/instructions.c3
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c3
-rw-r--r--tools/perf/arch/powerpc/annotate/instructions.c4
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c4
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c14
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h1
-rw-r--r--tools/perf/arch/x86/tests/Build1
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c4
-rw-r--r--tools/perf/builtin-annotate.c10
-rw-r--r--tools/perf/builtin-buildid-cache.c8
-rw-r--r--tools/perf/builtin-buildid-list.c16
-rw-r--r--tools/perf/builtin-c2c.c11
-rw-r--r--tools/perf/builtin-config.c22
-rw-r--r--tools/perf/builtin-diff.c18
-rw-r--r--tools/perf/builtin-evlist.c12
-rw-r--r--tools/perf/builtin-inject.c36
-rw-r--r--tools/perf/builtin-kmem.c13
-rw-r--r--tools/perf/builtin-kvm.c18
-rw-r--r--tools/perf/builtin-list.c7
-rw-r--r--tools/perf/builtin-lock.c12
-rw-r--r--tools/perf/builtin-mem.c13
-rw-r--r--tools/perf/builtin-record.c159
-rw-r--r--tools/perf/builtin-report.c14
-rw-r--r--tools/perf/builtin-sched.c28
-rw-r--r--tools/perf/builtin-script.c714
-rw-r--r--tools/perf/builtin-stat.c121
-rw-r--r--tools/perf/builtin-timechart.c18
-rw-r--r--tools/perf/builtin-top.c13
-rw-r--r--tools/perf/builtin-trace.c86
-rwxr-xr-xtools/perf/check-headers.sh7
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json1453
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/memory.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/other.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json544
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json218
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json140
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json140
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json164
-rw-r--r--tools/perf/pmu-events/jevents.c24
-rw-r--r--tools/perf/pmu-events/jevents.h2
-rw-r--r--tools/perf/pmu-events/pmu-events.h1
-rw-r--r--tools/perf/tests/attr.c2
-rw-r--r--tools/perf/tests/attr.py6
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/attr/test-record-group1
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling2
-rw-r--r--tools/perf/tests/attr/test-record-group11
-rw-r--r--tools/perf/tests/attr/test-stat-C01
-rw-r--r--tools/perf/tests/attr/test-stat-basic1
-rw-r--r--tools/perf/tests/attr/test-stat-default4
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-18
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-213
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-313
-rw-r--r--tools/perf/tests/attr/test-stat-group2
-rw-r--r--tools/perf/tests/attr/test-stat-group12
-rw-r--r--tools/perf/tests/attr/test-stat-no-inherit1
-rw-r--r--tools/perf/tests/builtin-test.c1
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c2
-rw-r--r--tools/perf/tests/topology.c22
-rw-r--r--tools/perf/trace/beauty/Build2
-rw-r--r--tools/perf/trace/beauty/beauty.h18
-rw-r--r--tools/perf/trace/beauty/kcmp.c44
-rwxr-xr-xtools/perf/trace/beauty/kcmp_type.sh10
-rwxr-xr-xtools/perf/trace/beauty/madvise_behavior.sh10
-rw-r--r--tools/perf/trace/beauty/mmap.c38
-rw-r--r--tools/perf/trace/beauty/prctl.c82
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh17
-rw-r--r--tools/perf/ui/browsers/hists.c180
-rw-r--r--tools/perf/ui/progress.c6
-rw-r--r--tools/perf/ui/progress.h12
-rw-r--r--tools/perf/ui/stdio/hist.c77
-rw-r--r--tools/perf/ui/tui/progress.c32
-rw-r--r--tools/perf/util/Build3
-rw-r--r--tools/perf/util/annotate.c10
-rw-r--r--tools/perf/util/auxtrace.c4
-rw-r--r--tools/perf/util/auxtrace.h4
-rw-r--r--tools/perf/util/callchain.c179
-rw-r--r--tools/perf/util/callchain.h6
-rw-r--r--tools/perf/util/comm.c18
-rw-r--r--tools/perf/util/config.c5
-rw-r--r--tools/perf/util/data-convert-bt.c12
-rw-r--r--tools/perf/util/data.c95
-rw-r--r--tools/perf/util/data.h38
-rw-r--r--tools/perf/util/debug.c31
-rw-r--r--tools/perf/util/dso.c20
-rw-r--r--tools/perf/util/dso.h6
-rw-r--r--tools/perf/util/event.c162
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/evlist.c248
-rw-r--r--tools/perf/util/evlist.h77
-rw-r--r--tools/perf/util/evsel.c7
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/evsel_fprintf.c37
-rw-r--r--tools/perf/util/header.c20
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt.c6
-rw-r--r--tools/perf/util/jit.h2
-rw-r--r--tools/perf/util/jitdump.c10
-rw-r--r--tools/perf/util/machine.c228
-rw-r--r--tools/perf/util/machine.h33
-rw-r--r--tools/perf/util/map.c34
-rw-r--r--tools/perf/util/map.h3
-rw-r--r--tools/perf/util/metricgroup.c490
-rw-r--r--tools/perf/util/metricgroup.h31
-rw-r--r--tools/perf/util/mmap.c352
-rw-r--r--tools/perf/util/mmap.h97
-rw-r--r--tools/perf/util/namespaces.c1
-rw-r--r--tools/perf/util/namespaces.h5
-rw-r--r--tools/perf/util/parse-events.c29
-rw-r--r--tools/perf/util/parse-events.h3
-rw-r--r--tools/perf/util/parse-events.l8
-rw-r--r--tools/perf/util/pmu.c55
-rw-r--r--tools/perf/util/pmu.h2
-rw-r--r--tools/perf/util/print_binary.c30
-rw-r--r--tools/perf/util/print_binary.h18
-rw-r--r--tools/perf/util/probe-file.c1
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/rb_resort.h5
-rw-r--r--tools/perf/util/rwsem.c32
-rw-r--r--tools/perf/util/rwsem.h19
-rw-r--r--tools/perf/util/session.c46
-rw-r--r--tools/perf/util/session.h6
-rw-r--r--tools/perf/util/sort.c6
-rw-r--r--tools/perf/util/sort.h1
-rw-r--r--tools/perf/util/srcline.c296
-rw-r--r--tools/perf/util/srcline.h26
-rw-r--r--tools/perf/util/stat-shadow.c158
-rw-r--r--tools/perf/util/stat.c24
-rw-r--r--tools/perf/util/stat.h6
-rw-r--r--tools/perf/util/symbol.c9
-rw-r--r--tools/perf/util/symbol.h2
-rw-r--r--tools/perf/util/thread.c57
-rw-r--r--tools/perf/util/thread.h3
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/trace-event-info.c1
-rw-r--r--tools/perf/util/trace-event-read.c1
-rw-r--r--tools/perf/util/util.c16
-rw-r--r--tools/perf/util/util.h7
-rw-r--r--tools/perf/util/vdso.c4
-rw-r--r--tools/perf/util/zlib.c1
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile1
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c3
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c4
-rw-r--r--tools/power/cpupower/.gitignore3
-rw-r--r--tools/power/cpupower/Makefile6
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c2
-rw-r--r--tools/testing/radix-tree/multiorder.c2
-rw-r--r--tools/testing/scatterlist/Makefile30
-rw-r--r--tools/testing/scatterlist/linux/mm.h125
-rw-r--r--tools/testing/scatterlist/main.c79
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c131
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-torture.sh2
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h5
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c89
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c88
-rw-r--r--tools/testing/selftests/x86/protection_keys.c24
-rw-r--r--tools/usb/usbip/Makefile.am3
-rw-r--r--tools/virtio/ringtest/main.h4
-rw-r--r--tools/vm/slabinfo.c11
-rw-r--r--virt/kvm/arm/arm.c3
-rw-r--r--virt/kvm/kvm_main.c2
7588 files changed, 266137 insertions, 199348 deletions
diff --git a/.gitignore b/.gitignore
index 0c39aa20b6ba..6c119eab5d46 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,38 +7,40 @@
# command after changing this file, to see if there are
# any tracked files which get ignored after the change.
#
-# Normal rules
+# Normal rules (sorted alphabetically)
#
.*
+*.a
+*.bin
+*.bz2
+*.c.[012]*.*
+*.dtb
+*.dtb.S
+*.dwo
+*.elf
+*.gcno
+*.gz
+*.i
+*.ko
+*.ll
+*.lst
+*.lz4
+*.lzma
+*.lzo
+*.mod.c
*.o
*.o.*
-*.a
+*.order
+*.patch
*.s
-*.ko
*.so
*.so.dbg
-*.mod.c
-*.i
-*.lst
+*.su
*.symtypes
-*.order
-*.elf
-*.bin
*.tar
-*.gz
-*.bz2
-*.lzma
*.xz
-*.lz4
-*.lzo
-*.patch
-*.gcno
-*.ll
-modules.builtin
Module.symvers
-*.dwo
-*.su
-*.c.[012]*.*
+modules.builtin
#
# Top-level generic files
diff --git a/Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads b/Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads
deleted file mode 100644
index b0b0eeb20fe3..000000000000
--- a/Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads
+++ /dev/null
@@ -1,5 +0,0 @@
-What: /proc/sys/vm/nr_pdflush_threads
-Date: June 2012
-Contact: Wanpeng Li <liwp@linux.vnet.ibm.com>
-Description: Since pdflush is replaced by per-BDI flusher, the interface of old pdflush
- exported in /proc/sys/vm/ should be removed.
diff --git a/Documentation/ABI/obsolete/sysfs-gpio b/Documentation/ABI/obsolete/sysfs-gpio
index 867c1fab20e2..32513dc2eec9 100644
--- a/Documentation/ABI/obsolete/sysfs-gpio
+++ b/Documentation/ABI/obsolete/sysfs-gpio
@@ -11,7 +11,7 @@ Description:
Kernel code may export it for complete or partial access.
GPIOs are identified as they are inside the kernel, using integers in
- the range 0..INT_MAX. See Documentation/gpio.txt for more information.
+ the range 0..INT_MAX. See Documentation/gpio/gpio.txt for more information.
/sys/class/gpio
/export ... asks the kernel to export a GPIO to userspace
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 5d0125f7bcaf..d4077cc60d55 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -41,3 +41,73 @@ KernelVersion: 4.5
Contact: K. Y. Srinivasan <kys@microsoft.com>
Description: The 16 bit vendor ID of the device
Users: tools/hv/lsvmbus and user level RDMA libraries
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/cpu
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: VCPU (sub)channel is affinitized to
+Users: tools/hv/lsvmbus and other debuggig tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/cpu
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: VCPU (sub)channel is affinitized to
+Users: tools/hv/lsvmbus and other debuggig tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/in_mask
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Inbound channel signaling state
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/latency
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Channel signaling latency
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/out_mask
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Outbound channel signaling state
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/pending
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Channel interrupt pending state
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/read_avail
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Bytes availabble to read
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/write_avail
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Bytes availabble to write
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/events
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Number of times we have signaled the host
+Users: Debugging tools
+
+What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/interrupts
+Date: September. 2017
+KernelVersion: 4.14
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description: Number of times we have taken an interrupt (incoming)
+Users: Debugging tools
diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices
index 35c457f8ce73..4404bd9b96c1 100644
--- a/Documentation/ABI/stable/sysfs-devices
+++ b/Documentation/ABI/stable/sysfs-devices
@@ -1,5 +1,5 @@
# Note: This documents additional properties of any device beyond what
-# is documented in Documentation/sysfs-rules.txt
+# is documented in Documentation/admin-guide/sysfs-rules.rst
What: /sys/devices/*/of_node
Date: February 2015
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
index 8374d4557e5d..9578247e1792 100644
--- a/Documentation/ABI/testing/evm
+++ b/Documentation/ABI/testing/evm
@@ -7,17 +7,37 @@ Description:
HMAC-sha1 value across the extended attributes, storing the
value as the extended attribute 'security.evm'.
- EVM depends on the Kernel Key Retention System to provide it
- with a trusted/encrypted key for the HMAC-sha1 operation.
- The key is loaded onto the root's keyring using keyctl. Until
- EVM receives notification that the key has been successfully
- loaded onto the keyring (echo 1 > <securityfs>/evm), EVM
- can not create or validate the 'security.evm' xattr, but
- returns INTEGRITY_UNKNOWN. Loading the key and signaling EVM
- should be done as early as possible. Normally this is done
- in the initramfs, which has already been measured as part
- of the trusted boot. For more information on creating and
- loading existing trusted/encrypted keys, refer to:
- Documentation/keys-trusted-encrypted.txt. (A sample dracut
- patch, which loads the trusted/encrypted key and enables
- EVM, is available from http://linux-ima.sourceforge.net/#EVM.)
+ EVM supports two classes of security.evm. The first is
+ an HMAC-sha1 generated locally with a
+ trusted/encrypted key stored in the Kernel Key
+ Retention System. The second is a digital signature
+ generated either locally or remotely using an
+ asymmetric key. These keys are loaded onto root's
+ keyring using keyctl, and EVM is then enabled by
+ echoing a value to <securityfs>/evm:
+
+ 1: enable HMAC validation and creation
+ 2: enable digital signature validation
+ 3: enable HMAC and digital signature validation and HMAC
+ creation
+
+ Further writes will be blocked if HMAC support is enabled or
+ if bit 32 is set:
+
+ echo 0x80000002 ><securityfs>/evm
+
+ will enable digital signature validation and block
+ further writes to <securityfs>/evm.
+
+ Until this is done, EVM can not create or validate the
+ 'security.evm' xattr, but returns INTEGRITY_UNKNOWN.
+ Loading keys and signaling EVM should be done as early
+ as possible. Normally this is done in the initramfs,
+ which has already been measured as part of the trusted
+ boot. For more information on creating and loading
+ existing trusted/encrypted keys, refer to:
+
+ Documentation/security/keys/trusted-encrypted.rst. Both dracut
+ (via 97masterkey and 98integrity) and systemd (via
+ core/ima-setup) have support for loading keys at boot
+ time.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 7eead5f97e02..2e3f919485f4 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -522,6 +522,7 @@ Description:
Specifies the output powerdown mode.
DAC output stage is disconnected from the amplifier and
1kohm_to_gnd: connected to ground via an 1kOhm resistor,
+ 2.5kohm_to_gnd: connected to ground via a 2.5kOhm resistor,
6kohm_to_gnd: connected to ground via a 6kOhm resistor,
20kohm_to_gnd: connected to ground via a 20kOhm resistor,
90kohm_to_gnd: connected to ground via a 90kOhm resistor,
@@ -1242,9 +1243,9 @@ What: /sys/.../iio:deviceX/in_distance_raw
KernelVersion: 4.0
Contact: linux-iio@vger.kernel.org
Description:
- This attribute is used to read the distance covered by the user
- since the last reboot while activated. Units after application
- of scale are meters.
+ This attribute is used to read the measured distance to an object
+ or the distance covered by the user since the last reboot while
+ activated. Units after application of scale are meters.
What: /sys/bus/iio/devices/iio:deviceX/store_eeprom
KernelVersion: 3.4.0
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-cros-ec b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
index 297b9720f024..0e95c2ca105c 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
+++ b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
@@ -16,3 +16,13 @@ Description:
the motion sensor is placed. For example, in a laptop a motion
sensor can be located on the base or on the lid. Current valid
values are 'base' and 'lid'.
+
+What: /sys/bus/iio/devices/iio:deviceX/id
+Date: Septembre 2017
+KernelVersion: 4.14
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute is exposed by the CrOS EC legacy accelerometer
+ driver and represents the sensor ID as exposed by the EC. This
+ ID is used by the Android sensor service hardware abstraction
+ layer (sensor HAL) through the Android container on ChromeOS.
diff --git a/Documentation/ABI/testing/sysfs-bus-mmc b/Documentation/ABI/testing/sysfs-bus-mmc
new file mode 100644
index 000000000000..519f028d19cc
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-mmc
@@ -0,0 +1,4 @@
+What: /sys/bus/mmc/devices/.../rev
+Date: October 2017
+Contact: Jin Qian <jinqian@android.com>
+Description: Extended CSD revision number
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index 676fdf5f2a99..80a00f7b6667 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -211,7 +211,9 @@ Description:
device, after it has been suspended at run time, from a resume
request to the moment the device will be ready to process I/O,
in microseconds. If it is equal to 0, however, this means that
- the PM QoS resume latency may be arbitrary.
+ the PM QoS resume latency may be arbitrary and the special value
+ "n/a" means that user space cannot accept any resume latency at
+ all for the given device.
Not all drivers support this attribute. If it isn't supported,
it is not present.
@@ -258,19 +260,3 @@ Description:
This attribute has no effect on system-wide suspend/resume and
hibernation.
-
-What: /sys/devices/.../power/pm_qos_remote_wakeup
-Date: September 2012
-Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
-Description:
- The /sys/devices/.../power/pm_qos_remote_wakeup attribute
- is used for manipulating the PM QoS "remote wakeup required"
- flag. If set, this flag indicates to the kernel that the
- device is a source of user events that have to be signaled from
- its low-power states.
-
- Not all drivers support this attribute. If it isn't supported,
- it is not present.
-
- This attribute has no effect on system-wide suspend/resume and
- hibernation.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index f3d5817c4ef0..d6d862db3b5d 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -187,7 +187,8 @@ Description: Processor frequency boosting control
This switch controls the boost setting for the whole system.
Boosting allows the CPU and the firmware to run at a frequency
beyound it's nominal limit.
- More details can be found in Documentation/cpu-freq/boost.txt
+ More details can be found in
+ Documentation/admin-guide/pm/cpufreq.rst
What: /sys/devices/system/cpu/cpu#/crash_notes
@@ -223,7 +224,8 @@ Description: Parameters for the Intel P-state driver
no_turbo: limits the driver to selecting P states below the turbo
frequency range.
- More details can be found in Documentation/cpu-freq/intel-pstate.txt
+ More details can be found in
+ Documentation/admin-guide/pm/intel_pstate.rst
What: /sys/devices/system/cpu/cpu*/cache/index*/<set_of_attributes_mentioned_below>
Date: July 2014(documented, existed before August 2008)
diff --git a/Documentation/ABI/testing/sysfs-driver-w1_ds28e17 b/Documentation/ABI/testing/sysfs-driver-w1_ds28e17
new file mode 100644
index 000000000000..d301e7017afe
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-w1_ds28e17
@@ -0,0 +1,21 @@
+What: /sys/bus/w1/devices/19-<id>/speed
+Date: Sep 2017
+KernelVersion: 4.14
+Contact: Jan Kandziora <jjj@gmx.de>
+Description: When written, this file sets the I2C speed on the connected
+ DS28E17 chip. When read, it reads the current setting from
+ the DS28E17 chip.
+ Valid values: 100, 400, 900 [kBaud].
+ Default 100, can be set by w1_ds28e17.speed= module parameter.
+Users: w1_ds28e17 driver
+
+What: /sys/bus/w1/devices/19-<id>/stretch
+Date: Sep 2017
+KernelVersion: 4.14
+Contact: Jan Kandziora <jjj@gmx.de>
+Description: When written, this file sets the multiplier used to calculate
+ the busy timeout for I2C operations on the connected DS28E17
+ chip. When read, returns the current setting.
+ Valid values: 1 to 9.
+ Default 1, can be set by w1_ds28e17.stretch= module parameter.
+Users: w1_ds28e17 driver
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index a1d1612f3651..1e0d1dac706b 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -18,7 +18,8 @@ Description:
Writing one of the above strings to this file causes the system
to transition into the corresponding state, if available.
- See Documentation/power/states.txt for more information.
+ See Documentation/admin-guide/pm/sleep-states.rst for more
+ information.
What: /sys/power/mem_sleep
Date: November 2016
@@ -35,7 +36,8 @@ Description:
represented by it to be used on subsequent attempts to suspend
the system.
- See Documentation/power/states.txt for more information.
+ See Documentation/admin-guide/pm/sleep-states.rst for more
+ information.
What: /sys/power/disk
Date: September 2006
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index aa77a25a0940..5ef1047e2e66 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -81,7 +81,9 @@ If you want the driver to put an event into the event log on a panic,
enable the 'Generate a panic event to all BMCs on a panic' option. If
you want the whole panic string put into the event log using OEM
events, enable the 'Generate OEM events containing the panic string'
-option.
+option. You can also enable these dynamically by setting the module
+parameter named "panic_op" in the ipmi_msghandler module to "event"
+or "string". Setting that parameter to "none" disables this function.
Basic Design
------------
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 85f7856f0092..2ca77ad0f238 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -97,6 +97,9 @@ endif # HAVE_SPHINX
# The following targets are independent of HAVE_SPHINX, and the rules should
# work or silently pass without Sphinx.
+refcheckdocs:
+ $(Q)cd $(srctree);scripts/documentation-file-ref-check
+
cleandocs:
$(Q)rm -rf $(BUILDDIR)
$(Q)$(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media clean
@@ -109,6 +112,7 @@ dochelp:
@echo ' epubdocs - EPUB'
@echo ' xmldocs - XML'
@echo ' linkcheckdocs - check for broken external links (will connect to external hosts)'
+ @echo ' refcheckdocs - check for references to non-existing files under Documentation'
@echo ' cleandocs - clean all generated files'
@echo
@echo ' make SPHINXDIRS="s1 s2" [target] Generate only docs of folder s1, s2'
@@ -116,3 +120,5 @@ dochelp:
@echo
@echo ' make SPHINX_CONF={conf-file} [target] use *additional* sphinx-build'
@echo ' configuration. This is e.g. useful to build with nit-picking config.'
+ @echo
+ @echo ' Default location for the generated documents is Documentation/output'
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index e5d0bbd0230b..7394f034be65 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -527,7 +527,7 @@ grace period also drove it to completion.
This straightforward approach had the disadvantage of needing to
account for POSIX signals sent to user tasks,
so more recent implemementations use the Linux kernel's
-<a href="https://www.kernel.org/doc/Documentation/workqueue.txt">workqueues</a>.
+<a href="https://www.kernel.org/doc/Documentation/core-api/workqueue.rst">workqueues</a>.
<p>
The requesting task still does counter snapshotting and funnel-lock
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html
new file mode 100644
index 000000000000..e5b42a798ff3
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Diagram.html
@@ -0,0 +1,9 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+ "http://www.w3.org/TR/html4/loose.dtd">
+ <html>
+ <head><title>A Diagram of TREE_RCU's Grace-Period Memory Ordering</title>
+ <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+
+<p><img src="TreeRCU-gp.svg" alt="TreeRCU-gp.svg">
+
+</body></html>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
new file mode 100644
index 000000000000..8651b0b4fd79
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -0,0 +1,707 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+ "http://www.w3.org/TR/html4/loose.dtd">
+ <html>
+ <head><title>A Tour Through TREE_RCU's Grace-Period Memory Ordering</title>
+ <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+
+ <p>August 8, 2017</p>
+ <p>This article was contributed by Paul E.&nbsp;McKenney</p>
+
+<h3>Introduction</h3>
+
+<p>This document gives a rough visual overview of how Tree RCU's
+grace-period memory ordering guarantee is provided.
+
+<ol>
+<li> <a href="#What Is Tree RCU's Grace Period Memory Ordering Guarantee?">
+ What Is Tree RCU's Grace Period Memory Ordering Guarantee?</a>
+<li> <a href="#Tree RCU Grace Period Memory Ordering Building Blocks">
+ Tree RCU Grace Period Memory Ordering Building Blocks</a>
+<li> <a href="#Tree RCU Grace Period Memory Ordering Components">
+ Tree RCU Grace Period Memory Ordering Components</a>
+<li> <a href="#Putting It All Together">Putting It All Together</a>
+</ol>
+
+<h3><a name="What Is Tree RCU's Grace Period Memory Ordering Guarantee?">
+What Is Tree RCU's Grace Period Memory Ordering Guarantee?</a></h3>
+
+<p>RCU grace periods provide extremely strong memory-ordering guarantees
+for non-idle non-offline code.
+Any code that happens after the end of a given RCU grace period is guaranteed
+to see the effects of all accesses prior to the beginning of that grace
+period that are within RCU read-side critical sections.
+Similarly, any code that happens before the beginning of a given RCU grace
+period is guaranteed to see the effects of all accesses following the end
+of that grace period that are within RCU read-side critical sections.
+
+<p>This guarantee is particularly pervasive for <tt>synchronize_sched()</tt>,
+for which RCU-sched read-side critical sections include any region
+of code for which preemption is disabled.
+Given that each individual machine instruction can be thought of as
+an extremely small region of preemption-disabled code, one can think of
+<tt>synchronize_sched()</tt> as <tt>smp_mb()</tt> on steroids.
+
+<p>RCU updaters use this guarantee by splitting their updates into
+two phases, one of which is executed before the grace period and
+the other of which is executed after the grace period.
+In the most common use case, phase one removes an element from
+a linked RCU-protected data structure, and phase two frees that element.
+For this to work, any readers that have witnessed state prior to the
+phase-one update (in the common case, removal) must not witness state
+following the phase-two update (in the common case, freeing).
+
+<p>The RCU implementation provides this guarantee using a network
+of lock-based critical sections, memory barriers, and per-CPU
+processing, as is described in the following sections.
+
+<h3><a name="Tree RCU Grace Period Memory Ordering Building Blocks">
+Tree RCU Grace Period Memory Ordering Building Blocks</a></h3>
+
+<p>The workhorse for RCU's grace-period memory ordering is the
+critical section for the <tt>rcu_node</tt> structure's
+<tt>-&gt;lock</tt>.
+These critical sections use helper functions for lock acquisition, including
+<tt>raw_spin_lock_rcu_node()</tt>,
+<tt>raw_spin_lock_irq_rcu_node()</tt>, and
+<tt>raw_spin_lock_irqsave_rcu_node()</tt>.
+Their lock-release counterparts are
+<tt>raw_spin_unlock_rcu_node()</tt>,
+<tt>raw_spin_unlock_irq_rcu_node()</tt>, and
+<tt>raw_spin_unlock_irqrestore_rcu_node()</tt>,
+respectively.
+For completeness, a
+<tt>raw_spin_trylock_rcu_node()</tt>
+is also provided.
+The key point is that the lock-acquisition functions, including
+<tt>raw_spin_trylock_rcu_node()</tt>, all invoke
+<tt>smp_mb__after_unlock_lock()</tt> immediately after successful
+acquisition of the lock.
+
+<p>Therefore, for any given <tt>rcu_node</tt> struction, any access
+happening before one of the above lock-release functions will be seen
+by all CPUs as happening before any access happening after a later
+one of the above lock-acquisition functions.
+Furthermore, any access happening before one of the
+above lock-release function on any given CPU will be seen by all
+CPUs as happening before any access happening after a later one
+of the above lock-acquisition functions executing on that same CPU,
+even if the lock-release and lock-acquisition functions are operating
+on different <tt>rcu_node</tt> structures.
+Tree RCU uses these two ordering guarantees to form an ordering
+network among all CPUs that were in any way involved in the grace
+period, including any CPUs that came online or went offline during
+the grace period in question.
+
+<p>The following litmus test exhibits the ordering effects of these
+lock-acquisition and lock-release functions:
+
+<pre>
+ 1 int x, y, z;
+ 2
+ 3 void task0(void)
+ 4 {
+ 5 raw_spin_lock_rcu_node(rnp);
+ 6 WRITE_ONCE(x, 1);
+ 7 r1 = READ_ONCE(y);
+ 8 raw_spin_unlock_rcu_node(rnp);
+ 9 }
+10
+11 void task1(void)
+12 {
+13 raw_spin_lock_rcu_node(rnp);
+14 WRITE_ONCE(y, 1);
+15 r2 = READ_ONCE(z);
+16 raw_spin_unlock_rcu_node(rnp);
+17 }
+18
+19 void task2(void)
+20 {
+21 WRITE_ONCE(z, 1);
+22 smp_mb();
+23 r3 = READ_ONCE(x);
+24 }
+25
+26 WARN_ON(r1 == 0 &amp;&amp; r2 == 0 &amp;&amp; r3 == 0);
+</pre>
+
+<p>The <tt>WARN_ON()</tt> is evaluated at &ldquo;the end of time&rdquo;,
+after all changes have propagated throughout the system.
+Without the <tt>smp_mb__after_unlock_lock()</tt> provided by the
+acquisition functions, this <tt>WARN_ON()</tt> could trigger, for example
+on PowerPC.
+The <tt>smp_mb__after_unlock_lock()</tt> invocations prevent this
+<tt>WARN_ON()</tt> from triggering.
+
+<p>This approach must be extended to include idle CPUs, which need
+RCU's grace-period memory ordering guarantee to extend to any
+RCU read-side critical sections preceding and following the current
+idle sojourn.
+This case is handled by calls to the strongly ordered
+<tt>atomic_add_return()</tt> read-modify-write atomic operation that
+is invoked within <tt>rcu_dynticks_eqs_enter()</tt> at idle-entry
+time and within <tt>rcu_dynticks_eqs_exit()</tt> at idle-exit time.
+The grace-period kthread invokes <tt>rcu_dynticks_snap()</tt> and
+<tt>rcu_dynticks_in_eqs_since()</tt> (both of which invoke
+an <tt>atomic_add_return()</tt> of zero) to detect idle CPUs.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ But what about CPUs that remain offline for the entire
+ grace period?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ Such CPUs will be offline at the beginning of the grace period,
+ so the grace period won't expect quiescent states from them.
+ Races between grace-period start and CPU-hotplug operations
+ are mediated by the CPU's leaf <tt>rcu_node</tt> structure's
+ <tt>-&gt;lock</tt> as described above.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>The approach must be extended to handle one final case, that
+of waking a task blocked in <tt>synchronize_rcu()</tt>.
+This task might be affinitied to a CPU that is not yet aware that
+the grace period has ended, and thus might not yet be subject to
+the grace period's memory ordering.
+Therefore, there is an <tt>smp_mb()</tt> after the return from
+<tt>wait_for_completion()</tt> in the <tt>synchronize_rcu()</tt>
+code path.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ What? Where???
+ I don't see any <tt>smp_mb()</tt> after the return from
+ <tt>wait_for_completion()</tt>!!!
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ That would be because I spotted the need for that
+ <tt>smp_mb()</tt> during the creation of this documentation,
+ and it is therefore unlikely to hit mainline before v4.14.
+ Kudos to Lance Roy, Will Deacon, Peter Zijlstra, and
+ Jonathan Cameron for asking questions that sensitized me
+ to the rather elaborate sequence of events that demonstrate
+ the need for this memory barrier.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>Tree RCU's grace--period memory-ordering guarantees rely most
+heavily on the <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>
+field, so much so that it is necessary to abbreviate this pattern
+in the diagrams in the next section.
+For example, consider the <tt>rcu_prepare_for_idle()</tt> function
+shown below, which is one of several functions that enforce ordering
+of newly arrived RCU callbacks against future grace periods:
+
+<pre>
+ 1 static void rcu_prepare_for_idle(void)
+ 2 {
+ 3 bool needwake;
+ 4 struct rcu_data *rdp;
+ 5 struct rcu_dynticks *rdtp = this_cpu_ptr(&amp;rcu_dynticks);
+ 6 struct rcu_node *rnp;
+ 7 struct rcu_state *rsp;
+ 8 int tne;
+ 9
+10 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
+11 rcu_is_nocb_cpu(smp_processor_id()))
+12 return;
+13 tne = READ_ONCE(tick_nohz_active);
+14 if (tne != rdtp-&gt;tick_nohz_enabled_snap) {
+15 if (rcu_cpu_has_callbacks(NULL))
+16 invoke_rcu_core();
+17 rdtp-&gt;tick_nohz_enabled_snap = tne;
+18 return;
+19 }
+20 if (!tne)
+21 return;
+22 if (rdtp-&gt;all_lazy &amp;&amp;
+23 rdtp-&gt;nonlazy_posted != rdtp-&gt;nonlazy_posted_snap) {
+24 rdtp-&gt;all_lazy = false;
+25 rdtp-&gt;nonlazy_posted_snap = rdtp-&gt;nonlazy_posted;
+26 invoke_rcu_core();
+27 return;
+28 }
+29 if (rdtp-&gt;last_accelerate == jiffies)
+30 return;
+31 rdtp-&gt;last_accelerate = jiffies;
+32 for_each_rcu_flavor(rsp) {
+33 rdp = this_cpu_ptr(rsp-&gt;rda);
+34 if (rcu_segcblist_pend_cbs(&amp;rdp-&gt;cblist))
+35 continue;
+36 rnp = rdp-&gt;mynode;
+37 raw_spin_lock_rcu_node(rnp);
+38 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+39 raw_spin_unlock_rcu_node(rnp);
+40 if (needwake)
+41 rcu_gp_kthread_wake(rsp);
+42 }
+43 }
+</pre>
+
+<p>But the only part of <tt>rcu_prepare_for_idle()</tt> that really
+matters for this discussion are lines&nbsp;37&ndash;39.
+We will therefore abbreviate this function as follows:
+
+</p><p><img src="rcu_node-lock.svg" alt="rcu_node-lock.svg">
+
+<p>The box represents the <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>
+critical section, with the double line on top representing the additional
+<tt>smp_mb__after_unlock_lock()</tt>.
+
+<h3><a name="Tree RCU Grace Period Memory Ordering Components">
+Tree RCU Grace Period Memory Ordering Components</a></h3>
+
+<p>Tree RCU's grace-period memory-ordering guarantee is provided by
+a number of RCU components:
+
+<ol>
+<li> <a href="#Callback Registry">Callback Registry</a>
+<li> <a href="#Grace-Period Initialization">Grace-Period Initialization</a>
+<li> <a href="#Self-Reported Quiescent States">
+ Self-Reported Quiescent States</a>
+<li> <a href="#Dynamic Tick Interface">Dynamic Tick Interface</a>
+<li> <a href="#CPU-Hotplug Interface">CPU-Hotplug Interface</a>
+<li> <a href="Forcing Quiescent States">Forcing Quiescent States</a>
+<li> <a href="Grace-Period Cleanup">Grace-Period Cleanup</a>
+<li> <a href="Callback Invocation">Callback Invocation</a>
+</ol>
+
+<p>Each of the following section looks at the corresponding component
+in detail.
+
+<h4><a name="Callback Registry">Callback Registry</a></h4>
+
+<p>If RCU's grace-period guarantee is to mean anything at all, any
+access that happens before a given invocation of <tt>call_rcu()</tt>
+must also happen before the corresponding grace period.
+The implementation of this portion of RCU's grace period guarantee
+is shown in the following figure:
+
+</p><p><img src="TreeRCU-callback-registry.svg" alt="TreeRCU-callback-registry.svg">
+
+<p>Because <tt>call_rcu()</tt> normally acts only on CPU-local state,
+it provides no ordering guarantees, either for itself or for
+phase one of the update (which again will usually be removal of
+an element from an RCU-protected data structure).
+It simply enqueues the <tt>rcu_head</tt> structure on a per-CPU list,
+which cannot become associated with a grace period until a later
+call to <tt>rcu_accelerate_cbs()</tt>, as shown in the diagram above.
+
+<p>One set of code paths shown on the left invokes
+<tt>rcu_accelerate_cbs()</tt> via
+<tt>note_gp_changes()</tt>, either directly from <tt>call_rcu()</tt> (if
+the current CPU is inundated with queued <tt>rcu_head</tt> structures)
+or more likely from an <tt>RCU_SOFTIRQ</tt> handler.
+Another code path in the middle is taken only in kernels built with
+<tt>CONFIG_RCU_FAST_NO_HZ=y</tt>, which invokes
+<tt>rcu_accelerate_cbs()</tt> via <tt>rcu_prepare_for_idle()</tt>.
+The final code path on the right is taken only in kernels built with
+<tt>CONFIG_HOTPLUG_CPU=y</tt>, which invokes
+<tt>rcu_accelerate_cbs()</tt> via
+<tt>rcu_advance_cbs()</tt>, <tt>rcu_migrate_callbacks</tt>,
+<tt>rcutree_migrate_callbacks()</tt>, and <tt>takedown_cpu()</tt>,
+which in turn is invoked on a surviving CPU after the outgoing
+CPU has been completely offlined.
+
+<p>There are a few other code paths within grace-period processing
+that opportunistically invoke <tt>rcu_accelerate_cbs()</tt>.
+However, either way, all of the CPU's recently queued <tt>rcu_head</tt>
+structures are associated with a future grace-period number under
+the protection of the CPU's lead <tt>rcu_node</tt> structure's
+<tt>-&gt;lock</tt>.
+In all cases, there is full ordering against any prior critical section
+for that same <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>, and
+also full ordering against any of the current task's or CPU's prior critical
+sections for any <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>.
+
+<p>The next section will show how this ordering ensures that any
+accesses prior to the <tt>call_rcu()</tt> (particularly including phase
+one of the update)
+happen before the start of the corresponding grace period.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ But what about <tt>synchronize_rcu()</tt>?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ The <tt>synchronize_rcu()</tt> passes <tt>call_rcu()</tt>
+ to <tt>wait_rcu_gp()</tt>, which invokes it.
+ So either way, it eventually comes down to <tt>call_rcu()</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h4><a name="Grace-Period Initialization">Grace-Period Initialization</a></h4>
+
+<p>Grace-period initialization is carried out by
+the grace-period kernel thread, which makes several passes over the
+<tt>rcu_node</tt> tree within the <tt>rcu_gp_init()</tt> function.
+This means that showing the full flow of ordering through the
+grace-period computation will require duplicating this tree.
+If you find this confusing, please note that the state of the
+<tt>rcu_node</tt> changes over time, just like Heraclitus's river.
+However, to keep the <tt>rcu_node</tt> river tractable, the
+grace-period kernel thread's traversals are presented in multiple
+parts, starting in this section with the various phases of
+grace-period initialization.
+
+<p>The first ordering-related grace-period initialization action is to
+increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt>
+grace-period-number counter, as shown below:
+
+</p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
+
+<p>The actual increment is carried out using <tt>smp_store_release()</tt>,
+which helps reject false-positive RCU CPU stall detection.
+Note that only the root <tt>rcu_node</tt> structure is touched.
+
+<p>The first pass through the <tt>rcu_node</tt> tree updates bitmasks
+based on CPUs having come online or gone offline since the start of
+the previous grace period.
+In the common case where the number of online CPUs for this <tt>rcu_node</tt>
+structure has not transitioned to or from zero,
+this pass will scan only the leaf <tt>rcu_node</tt> structures.
+However, if the number of online CPUs for a given leaf <tt>rcu_node</tt>
+structure has transitioned from zero,
+<tt>rcu_init_new_rnp()</tt> will be invoked for the first incoming CPU.
+Similarly, if the number of online CPUs for a given leaf <tt>rcu_node</tt>
+structure has transitioned to zero,
+<tt>rcu_cleanup_dead_rnp()</tt> will be invoked for the last outgoing CPU.
+The diagram below shows the path of ordering if the leftmost
+<tt>rcu_node</tt> structure onlines its first CPU and if the next
+<tt>rcu_node</tt> structure has no online CPUs
+(or, alternatively if the leftmost <tt>rcu_node</tt> structure offlines
+its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
+
+</p><p><img src="TreeRCU-gp-init-2.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
+
+<p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
+tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
+<tt>-&gt;gpnum</tt> field to the newly incremented value from the
+<tt>rcu_state</tt> structure, as shown in the following diagram.
+
+</p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
+
+<p>This change will also cause each CPU's next call to
+<tt>__note_gp_changes()</tt>
+to notice that a new grace period has started, as described in the next
+section.
+But because the grace-period kthread started the grace period at the
+root (with the increment of the <tt>rcu_state</tt> structure's
+<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of
+the start of the grace period will happen after the actual start
+of the grace period.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ But what about the CPU that started the grace period?
+ Why wouldn't it see the start of the grace period right when
+ it started that grace period?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ In some deep philosophical and overly anthromorphized
+ sense, yes, the CPU starting the grace period is immediately
+ aware of having done so.
+ However, if we instead assume that RCU is not self-aware,
+ then even the CPU starting the grace period does not really
+ become aware of the start of this grace period until its
+ first call to <tt>__note_gp_changes()</tt>.
+ On the other hand, this CPU potentially gets early notification
+ because it invokes <tt>__note_gp_changes()</tt> during its
+ last <tt>rcu_gp_init()</tt> pass through its leaf
+ <tt>rcu_node</tt> structure.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h4><a name="Self-Reported Quiescent States">
+Self-Reported Quiescent States</a></h4>
+
+<p>When all entities that might block the grace period have reported
+quiescent states (or as described in a later section, had quiescent
+states reported on their behalf), the grace period can end.
+Online non-idle CPUs report their own quiescent states, as shown
+in the following diagram:
+
+</p><p><img src="TreeRCU-qs.svg" alt="TreeRCU-qs.svg" width="75%">
+
+<p>This is for the last CPU to report a quiescent state, which signals
+the end of the grace period.
+Earlier quiescent states would push up the <tt>rcu_node</tt> tree
+only until they encountered an <tt>rcu_node</tt> structure that
+is waiting for additional quiescent states.
+However, ordering is nevertheless preserved because some later quiescent
+state will acquire that <tt>rcu_node</tt> structure's <tt>-&gt;lock</tt>.
+
+<p>Any number of events can lead up to a CPU invoking
+<tt>note_gp_changes</tt> (or alternatively, directly invoking
+<tt>__note_gp_changes()</tt>), at which point that CPU will notice
+the start of a new grace period while holding its leaf
+<tt>rcu_node</tt> lock.
+Therefore, all execution shown in this diagram happens after the
+start of the grace period.
+In addition, this CPU will consider any RCU read-side critical
+section that started before the invocation of <tt>__note_gp_changes()</tt>
+to have started before the grace period, and thus a critical
+section that the grace period must wait on.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ But a RCU read-side critical section might have started
+ after the beginning of the grace period
+ (the <tt>-&gt;gpnum++</tt> from earlier), so why should
+ the grace period wait on such a critical section?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ It is indeed not necessary for the grace period to wait on such
+ a critical section.
+ However, it is permissible to wait on it.
+ And it is furthermore important to wait on it, as this
+ lazy approach is far more scalable than a &ldquo;big bang&rdquo;
+ all-at-once grace-period start could possibly be.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>If the CPU does a context switch, a quiescent state will be
+noted by <tt>rcu_node_context_switch()</tt> on the left.
+On the other hand, if the CPU takes a scheduler-clock interrupt
+while executing in usermode, a quiescent state will be noted by
+<tt>rcu_check_callbacks()</tt> on the right.
+Either way, the passage through a quiescent state will be noted
+in a per-CPU variable.
+
+<p>The next time an <tt>RCU_SOFTIRQ</tt> handler executes on
+this CPU (for example, after the next scheduler-clock
+interrupt), <tt>__rcu_process_callbacks()</tt> will invoke
+<tt>rcu_check_quiescent_state()</tt>, which will notice the
+recorded quiescent state, and invoke
+<tt>rcu_report_qs_rdp()</tt>.
+If <tt>rcu_report_qs_rdp()</tt> verifies that the quiescent state
+really does apply to the current grace period, it invokes
+<tt>rcu_report_rnp()</tt> which traverses up the <tt>rcu_node</tt>
+tree as shown at the bottom of the diagram, clearing bits from
+each <tt>rcu_node</tt> structure's <tt>-&gt;qsmask</tt> field,
+and propagating up the tree when the result is zero.
+
+<p>Note that traversal passes upwards out of a given <tt>rcu_node</tt>
+structure only if the current CPU is reporting the last quiescent
+state for the subtree headed by that <tt>rcu_node</tt> structure.
+A key point is that if a CPU's traversal stops at a given <tt>rcu_node</tt>
+structure, then there will be a later traversal by another CPU
+(or perhaps the same one) that proceeds upwards
+from that point, and the <tt>rcu_node</tt> <tt>-&gt;lock</tt>
+guarantees that the first CPU's quiescent state happens before the
+remainder of the second CPU's traversal.
+Applying this line of thought repeatedly shows that all CPUs'
+quiescent states happen before the last CPU traverses through
+the root <tt>rcu_node</tt> structure, the &ldquo;last CPU&rdquo;
+being the one that clears the last bit in the root <tt>rcu_node</tt>
+structure's <tt>-&gt;qsmask</tt> field.
+
+<h4><a name="Dynamic Tick Interface">Dynamic Tick Interface</a></h4>
+
+<p>Due to energy-efficiency considerations, RCU is forbidden from
+disturbing idle CPUs.
+CPUs are therefore required to notify RCU when entering or leaving idle
+state, which they do via fully ordered value-returning atomic operations
+on a per-CPU variable.
+The ordering effects are as shown below:
+
+</p><p><img src="TreeRCU-dyntick.svg" alt="TreeRCU-dyntick.svg" width="50%">
+
+<p>The RCU grace-period kernel thread samples the per-CPU idleness
+variable while holding the corresponding CPU's leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;lock</tt>.
+This means that any RCU read-side critical sections that precede the
+idle period (the oval near the top of the diagram above) will happen
+before the end of the current grace period.
+Similarly, the beginning of the current grace period will happen before
+any RCU read-side critical sections that follow the
+idle period (the oval near the bottom of the diagram above).
+
+<p>Plumbing this into the full grace-period execution is described
+<a href="#Forcing Quiescent States">below</a>.
+
+<h4><a name="CPU-Hotplug Interface">CPU-Hotplug Interface</a></h4>
+
+<p>RCU is also forbidden from disturbing offline CPUs, which might well
+be powered off and removed from the system completely.
+CPUs are therefore required to notify RCU of their comings and goings
+as part of the corresponding CPU hotplug operations.
+The ordering effects are shown below:
+
+</p><p><img src="TreeRCU-hotplug.svg" alt="TreeRCU-hotplug.svg" width="50%">
+
+<p>Because CPU hotplug operations are much less frequent than idle transitions,
+they are heavier weight, and thus acquire the CPU's leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;lock</tt> and update this structure's
+<tt>-&gt;qsmaskinitnext</tt>.
+The RCU grace-period kernel thread samples this mask to detect CPUs
+having gone offline since the beginning of this grace period.
+
+<p>Plumbing this into the full grace-period execution is described
+<a href="#Forcing Quiescent States">below</a>.
+
+<h4><a name="Forcing Quiescent States">Forcing Quiescent States</a></h4>
+
+<p>As noted above, idle and offline CPUs cannot report their own
+quiescent states, and therefore the grace-period kernel thread
+must do the reporting on their behalf.
+This process is called &ldquo;forcing quiescent states&rdquo;, it is
+repeated every few jiffies, and its ordering effects are shown below:
+
+</p><p><img src="TreeRCU-gp-fqs.svg" alt="TreeRCU-gp-fqs.svg" width="100%">
+
+<p>Each pass of quiescent state forcing is guaranteed to traverse the
+leaf <tt>rcu_node</tt> structures, and if there are no new quiescent
+states due to recently idled and/or offlined CPUs, then only the
+leaves are traversed.
+However, if there is a newly offlined CPU as illustrated on the left
+or a newly idled CPU as illustrated on the right, the corresponding
+quiescent state will be driven up towards the root.
+As with self-reported quiescent states, the upwards driving stops
+once it reaches an <tt>rcu_node</tt> structure that has quiescent
+states outstanding from other CPUs.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ The leftmost drive to root stopped before it reached
+ the root <tt>rcu_node</tt> structure, which means that
+ there are still CPUs subordinate to that structure on
+ which the current grace period is waiting.
+ Given that, how is it possible that the rightmost drive
+ to root ended the grace period?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ Good analysis!
+ It is in fact impossible in the absence of bugs in RCU.
+ But this diagram is complex enough as it is, so simplicity
+ overrode accuracy.
+ You can think of it as poetic license, or you can think of
+ it as misdirection that is resolved in the
+ <a href="#Putting It All Together">stitched-together diagram</a>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
+
+<p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
+breadth-first setting all the <tt>-&gt;completed</tt> fields equal
+to the number of the newly completed grace period, then it sets
+the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
+again to the number of the newly completed grace period.
+The ordering effects are shown below:
+
+</p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
+
+<p>As indicated by the oval at the bottom of the diagram, once
+grace-period cleanup is complete, the next grace period can begin.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ But when precisely does the grace period end?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ There is no useful single point at which the grace period
+ can be said to end.
+ The earliest reasonable candidate is as soon as the last
+ CPU has reported its quiescent state, but it may be some
+ milliseconds before RCU becomes aware of this.
+ The latest reasonable candidate is once the <tt>rcu_state</tt>
+ structure's <tt>-&gt;completed</tt> field has been updated,
+ but it is quite possible that some CPUs have already completed
+ phase two of their updates by that time.
+ In short, if you are going to work with RCU, you need to
+ learn to embrace uncertainty.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+
+<h4><a name="Callback Invocation">Callback Invocation</a></h4>
+
+<p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
+<tt>-&gt;completed</tt> field has been updated, that CPU can begin
+invoking its RCU callbacks that were waiting for this grace period
+to end.
+These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
+which is usually invoked by <tt>__note_gp_changes()</tt>.
+As shown in the diagram below, this invocation can be triggered by
+the scheduling-clock interrupt (<tt>rcu_check_callbacks()</tt> on
+the left) or by idle entry (<tt>rcu_cleanup_after_idle()</tt> on
+the right, but only for kernels build with
+<tt>CONFIG_RCU_FAST_NO_HZ=y</tt>).
+Either way, <tt>RCU_SOFTIRQ</tt> is raised, which results in
+<tt>rcu_do_batch()</tt> invoking the callbacks, which in turn
+allows those callbacks to carry out (either directly or indirectly
+via wakeup) the needed phase-two processing for each update.
+
+</p><p><img src="TreeRCU-callback-invocation.svg" alt="TreeRCU-callback-invocation.svg" width="60%">
+
+<p>Please note that callback invocation can also be prompted by any
+number of corner-case code paths, for example, when a CPU notes that
+it has excessive numbers of callbacks queued.
+In all cases, the CPU acquires its leaf <tt>rcu_node</tt> structure's
+<tt>-&gt;lock</tt> before invoking callbacks, which preserves the
+required ordering against the newly completed grace period.
+
+<p>However, if the callback function communicates to other CPUs,
+for example, doing a wakeup, then it is that function's responsibility
+to maintain ordering.
+For example, if the callback function wakes up a task that runs on
+some other CPU, proper ordering must in place in both the callback
+function and the task being awakened.
+To see why this is important, consider the top half of the
+<a href="#Grace-Period Cleanup">grace-period cleanup</a> diagram.
+The callback might be running on a CPU corresponding to the leftmost
+leaf <tt>rcu_node</tt> structure, and awaken a task that is to run on
+a CPU corresponding to the rightmost leaf <tt>rcu_node</tt> structure,
+and the grace-period kernel thread might not yet have reached the
+rightmost leaf.
+In this case, the grace period's memory ordering might not yet have
+reached that CPU, so again the callback function and the awakened
+task must supply proper ordering.
+
+<h3><a name="Putting It All Together">Putting It All Together</a></h3>
+
+<p>A stitched-together diagram is
+<a href="Tree-RCU-Diagram.html">here</a>.
+
+<h3><a name="Legal Statement">
+Legal Statement</a></h3>
+
+<p>This work represents the view of the author and does not necessarily
+represent the view of IBM.
+
+</p><p>Linux is a registered trademark of Linus Torvalds.
+
+</p><p>Other company, product, and service names may be trademarks or
+service marks of others.
+
+</body></html>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
new file mode 100644
index 000000000000..832408313d93
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg
@@ -0,0 +1,486 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="592.12805"
+ height="469.83038"
+ viewBox="-44 -44 7874.1949 6244.9802"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-callback-invocation.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="1.2009216"
+ inkscape:cx="289.88715"
+ inkscape:cy="219.06265"
+ inkscape:window-x="713"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="g3058"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3079"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="-116.00011px"
+ originy="-87.2081px" />
+ </sodipodi:namedview>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g4"
+ transform="translate(-2296.0293,-2364.1166)">
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,2411.7612 0,4920.3076"
+ id="path3134-9-0-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,4672.443 -2393.6631,0.5116 0,1196.8316 2393.6631,-0.5116"
+ id="path3134-9-0"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,4672.443 2393.6631,0.5116 0,1196.8316 -2393.6631,-0.5116"
+ id="path3134-9-0-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <rect
+ x="2333.5203"
+ y="5109.5566"
+ width="2844.0974"
+ height="360.77411"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.0005789, 60.00115781;stroke-dashoffset:0"
+ id="rect118-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="2562.135"
+ y="5357.9937"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks()</text>
+ <rect
+ x="7069.6187"
+ y="5087.4678"
+ width="2975.115"
+ height="382.86298"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057902, 60.00115804;stroke-dashoffset:0"
+ id="rect118-36"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="7165.2524"
+ y="5333.4927"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_cleanup_after_idle()</text>
+ <g
+ id="g3058"
+ transform="translate(-53.192514,-2819.2063)">
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"
+ id="text202"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="6532.0293"
+ x="5073.3374"
+ xml:space="preserve">rcu_advance_cbs()</text>
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="5650.2598"
+ x="4800.2563" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="5726.2852"
+ x="4800.2563" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-3-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="6961.395"
+ x="7220.106"
+ xml:space="preserve"><tspan
+ id="tspan3104-6-5"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Leaf</tspan></text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-3"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="6321.9248"
+ x="5073.3374"
+ xml:space="preserve">__note_gp_changes()</text>
+ </g>
+ <g
+ id="g3049"
+ transform="translate(26.596257,6090.5512)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,1872.6808,-2726.4833)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-3"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-6"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1785.2073"
+ x="5717.4517"
+ xml:space="preserve"><tspan
+ id="tspan3104-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Phase Two</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-5"
+ y="2005.6624"
+ x="6119.668"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="2005.6624"
+ x="6119.668"
+ id="tspan3112-3"
+ sodipodi:role="line">of Update</tspan></text>
+ </g>
+ <rect
+ x="5097.8271"
+ y="6268.2183"
+ width="1994.7195"
+ height="664.90662"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057858, 60.00115716;stroke-dashoffset:0"
+ id="rect118-36-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="5363.7886"
+ y="6534.1812"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">RCU_SOFTIRQ</text>
+ <text
+ xml:space="preserve"
+ x="5363.7886"
+ y="6800.1436"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6-6-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-registry.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-registry.svg
new file mode 100644
index 000000000000..7ac6f9269806
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-registry.svg
@@ -0,0 +1,655 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="816.04761"
+ height="636.55627"
+ viewBox="-44 -44 10851.906 8461.0989"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-callback-registry.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="1.2009216"
+ inkscape:cx="408.02381"
+ inkscape:cy="254.38856"
+ inkscape:window-x="713"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="g4"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3079"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="5.2596966e-08px"
+ originy="-4.5963961e-06px" />
+ </sodipodi:namedview>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g4"
+ transform="translate(-753.44492,-1306.6788)">
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,2411.7612 0,6117.1391"
+ id="path3134-9-0-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,3342.6302 -3856.4573,0 10.6979,5757.1962 2918.1436,-2e-4"
+ id="path3134-9-0"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,3342.6302 3856.4574,0 -12.188,5757.1963 -2918.1436,-3e-4"
+ id="path3134-9-0-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <rect
+ x="4544.7305"
+ y="4603.417"
+ width="3240.0088"
+ height="2650.6289"
+ rx="0"
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ id="rect118"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="5073.3374"
+ y="6372.4521"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rcu_accelerate_cbs()</text>
+ <g
+ id="g3107"
+ transform="translate(2715.7065,4700.8888)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="4773.3452"
+ y="4825.2578"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_prepare_for_idle()</text>
+ <rect
+ x="790.93585"
+ y="4630.8252"
+ width="3240.0088"
+ height="2650.6289"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.0005789, 60.00115781;stroke-dashoffset:0"
+ id="rect118-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="1319.5447"
+ y="6639.2261"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_accelerate_cbs()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-7"
+ transform="translate(-1038.0776,4728.2971)">
+ <rect
+ id="rect112-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="1019.5512"
+ y="4852.666"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">note_gp_changes()</text>
+ <text
+ xml:space="preserve"
+ x="1319.5447"
+ y="6376.6328"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_advance_cbs()</text>
+ <text
+ xml:space="preserve"
+ x="1340.6649"
+ y="6111.4473"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">__note_gp_changes()</text>
+ <rect
+ x="5422.6279"
+ y="3041.8311"
+ width="1480.4871"
+ height="379.24637"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.0005789, 60.00115794;stroke-dashoffset:0"
+ id="rect118-3-9"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="5607.2734"
+ y="3283.3892"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">call_rcu()</text>
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ id="path3084"
+ sodipodi:cx="319.379"
+ sodipodi:cy="345.54001"
+ sodipodi:rx="65.917107"
+ sodipodi:ry="39.550262"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ transform="matrix(13.298129,0,0,13.298129,1915.7286,4523.6528)" />
+ <text
+ xml:space="preserve"
+ x="5853.9238"
+ y="8902.3623"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104">Wake up</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6165.7158"
+ y="9122.8174"
+ id="text3110"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3112"
+ x="6165.7158"
+ y="9122.8174">grace-period</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6162.8716"
+ y="9364.3564"
+ id="text3114"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3116"
+ x="6162.8716"
+ y="9364.3564">kernel thread</tspan></text>
+ <rect
+ x="8239.8516"
+ y="4608.7363"
+ width="3240.0088"
+ height="2650.6289"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057902, 60.00115804;stroke-dashoffset:0"
+ id="rect118-36"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="8768.4678"
+ y="6484.1562"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-75"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_accelerate_cbs()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-3"
+ transform="translate(6410.833,4706.2127)">
+ <rect
+ id="rect112-56"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="8329.5352"
+ y="4830.5771"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">takedown_cpu()</text>
+ <text
+ xml:space="preserve"
+ x="8335.4873"
+ y="5094.127"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_migrate_callbacks()</text>
+ <text
+ xml:space="preserve"
+ x="8335.4873"
+ y="5357.1006"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_migrate_callbacks()</text>
+ <text
+ xml:space="preserve"
+ x="8768.4678"
+ y="6224.9038"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_advance_cbs()</text>
+ <text
+ xml:space="preserve"
+ x="3467.9963"
+ y="6987.9912"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7220.106"
+ y="6961.395"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="10905.331"
+ y="6961.395"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-5">Leaf</tspan></text>
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ id="path3084-3"
+ sodipodi:cx="319.379"
+ sodipodi:cy="345.54001"
+ sodipodi:rx="65.917107"
+ sodipodi:ry="39.550262"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ transform="matrix(13.298129,0,0,13.298129,1872.6808,-2726.4833)" />
+ <text
+ xml:space="preserve"
+ x="5717.4517"
+ y="1785.2073"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-7">Phase One</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6119.668"
+ y="2005.6624"
+ id="text3110-5"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3112-3"
+ x="6119.668"
+ y="2005.6624">of Update</tspan></text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg
new file mode 100644
index 000000000000..423df00c4df9
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-dyntick.svg
@@ -0,0 +1,700 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="670.61804"
+ height="557.16394"
+ viewBox="-44 -44 8917.9652 7405.8166"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-dyntick.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36"
+ style="overflow:visible">
+ <path
+ id="path3940-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-6"
+ style="overflow:visible">
+ <path
+ id="path3940-26"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-8"
+ style="overflow:visible">
+ <path
+ id="path3940-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-367"
+ style="overflow:visible">
+ <path
+ id="path3940-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-56"
+ style="overflow:visible">
+ <path
+ id="path3946-2"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3081"
+ style="overflow:visible">
+ <path
+ id="path3083"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3085"
+ style="overflow:visible">
+ <path
+ id="path3087"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3089"
+ style="overflow:visible">
+ <path
+ id="path3091"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3093"
+ style="overflow:visible">
+ <path
+ id="path3095"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3097"
+ style="overflow:visible">
+ <path
+ id="path3099"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-9"
+ style="overflow:visible">
+ <path
+ id="path3940-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3675"
+ style="overflow:visible">
+ <path
+ id="path3940-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1148"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="1.4142136"
+ inkscape:cx="381.32663"
+ inkscape:cy="239.67141"
+ inkscape:window-x="833"
+ inkscape:window-y="24"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg2"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5"
+ inkscape:snap-global="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3154"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="-235.14935px"
+ originy="-709.25071px" />
+ </sodipodi:namedview>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3-5"
+ d="m 3754.1051,47.378296 -2.828,7173.860804"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3"
+ d="m 6681.1176,1435.1734 -2.828,1578.9586 -2861.3912,7.7159"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1"
+ d="m 3748.8929,3772.1176 2904.1747,-0.8434 26.8008,1842.1825"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ id="g3115"
+ transform="translate(-2341.8794,10827.399)">
+ <rect
+ ry="0"
+ id="rect118-3"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057923, 60.00115859;stroke-dashoffset:0"
+ rx="0"
+ height="2349.7295"
+ width="5308.7119"
+ y="-8909.5498"
+ x="2379.3704" />
+ <g
+ transform="translate(2576.8841,-9085.2783)"
+ id="g3107-7"
+ style="fill:none;stroke-width:0.025in">
+ <rect
+ x="2084.55"
+ y="949.37109"
+ width="2809.1992"
+ height="1370.8721"
+ rx="0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-5" />
+ <rect
+ x="2084.55"
+ y="1025.3964"
+ width="2809.1992"
+ height="1294.8468"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-3-3" />
+ </g>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-6-6-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-7356.375"
+ x="4769.4536"
+ xml:space="preserve">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-3"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-6825.5815"
+ x="7082.9585"
+ xml:space="preserve"><tspan
+ id="tspan3104-6"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Leaf</tspan></text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-8652.5312"
+ x="2466.7822"
+ xml:space="preserve">dyntick_save_progress_counter()</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-2-0"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-8368.1475"
+ x="2463.3262"
+ xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ </g>
+ <g
+ id="g4504"
+ transform="translate(2063.5184,-16111.739)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3148-9-9"
+ transform="translate(2035.3087,6370.5796)">
+ <rect
+ x="3592.3828"
+ y="-4715.7246"
+ width="3164.783"
+ height="769.99048"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4418.6582"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_enter()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4165.7954"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">atomic_add_return()</text>
+ </g>
+ <g
+ id="g3148-9-9-2"
+ transform="translate(2035.3089,9031.6839)">
+ <rect
+ x="3592.3828"
+ y="-4715.7246"
+ width="3164.783"
+ height="769.99048"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3-6"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4418.6582"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_exit()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4165.7954"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">atomic_add_return()</text>
+ </g>
+ <g
+ id="g4504-7"
+ transform="translate(2082.3248,-10883.562)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-9"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-0"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-2"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-3"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-7"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-5"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
new file mode 100644
index 000000000000..754f426b297a
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
@@ -0,0 +1,1126 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1026.1281"
+ height="1246.2428"
+ viewBox="-44 -44 13645.583 16565.045"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-gp-cleanup.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-1"
+ style="overflow:visible">
+ <path
+ id="path3946-2"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3130"
+ style="overflow:visible">
+ <path
+ id="path3132"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3134"
+ style="overflow:visible">
+ <path
+ id="path3136"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3138"
+ style="overflow:visible">
+ <path
+ id="path3140"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3142"
+ style="overflow:visible">
+ <path
+ id="path3144"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3146"
+ style="overflow:visible">
+ <path
+ id="path3148"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-7"
+ style="overflow:visible">
+ <path
+ id="path3940-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36"
+ style="overflow:visible">
+ <path
+ id="path3940-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36-7"
+ style="overflow:visible">
+ <path
+ id="path3940-7-4"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="0.70710678"
+ inkscape:cx="617.89017"
+ inkscape:cy="542.52419"
+ inkscape:window-x="86"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="g3188-3"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3391"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="-1.7575793e-05px"
+ originy="70.717956px" />
+ </sodipodi:namedview>
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3"
+ d="m 6899.303,45.238347 -2.8276,2480.757053 -2316.0141,-1.687 -2.8276,2179.855 2321.1758,-0.844 -2.7042,-1843.237 2404.5142,-0.211 16.1022,1993.267 -7783.8345,-4.728 -16.7936,2120.3945 2033.1033,-23.5344 2.0128,-1866.5611 2051.9097,14.079 2.0128,1838.2983 1280.8475,-4.728 14.608,-1830.1043 1312.2492,12.923 14.608,1818.337 2000.0061,20.4217 -12.279,-1841.4117 1304.168,1.616 -12.279,2032.7057 -4638.6513,1.6154 19.5828,569.0378"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2450.4073,-11647.612)"
+ id="g3188">
+ <text
+ xml:space="preserve"
+ x="3199.1516"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ <g
+ id="g3107"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Root</tspan></text>
+ </g>
+ <rect
+ ry="0"
+ id="rect118"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ rx="0"
+ height="14649.609"
+ width="13482.601"
+ y="403.13776"
+ x="37.490932" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="662.59283"
+ x="153.2673"
+ xml:space="preserve">rcu_gp_cleanup()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2329.9437,-11611.245)"
+ id="g3147">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5324.5371"
+ y="15414.598"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-753"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(3181.0244,-11647.612)"
+ id="g3153">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7479.5796"
+ y="17699.943"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-9"
+ d="m 3710.957,19425.516 -20.9546,8604.655"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(-737.93887,7732.6672)"
+ id="g3188-3">
+ <text
+ xml:space="preserve"
+ x="3225.7478"
+ y="13175.802"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-60"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
+ <g
+ id="g3107-62"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112-6"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-1"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-7">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3225.7478"
+ y="13390.038"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-60-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot3356"
+ style="font-size:12px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ transform="matrix(13.298129,0,0,13.298129,-2487.0857,3868.8376)"><flowRegion
+ id="flowRegion3358"><rect
+ id="rect3360"
+ width="373.35239"
+ height="63.63961"
+ x="332.34018"
+ y="681.87292" /></flowRegion><flowPara
+ id="flowPara3362" /></flowRoot> </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(-858.40227,7769.0342)"
+ id="g3147-9">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-2"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-02"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-5"
+ transform="translate(5205.6909,23741.476)">
+ <rect
+ id="rect112-7-1-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9710.0928"
+ y="26001.982"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-8">Leaf</tspan></text>
+ <g
+ transform="translate(-4830.8839,7769.0342)"
+ id="g3147-3-7"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6-3"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0-6"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6-1"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ transform="translate(-3340.0639,7732.6672)"
+ id="g3153-2-9"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6-3"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8-9"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7-4"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9-7">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-6672.8049,7732.6672)"
+ id="g3153-20-8"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2-4"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7-0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-10005.546,7732.6672)"
+ id="g3153-28-0"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1-6">Leaf</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2285.411,21615.005 -582.9982,865.094"
+ id="path3414-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5094.193,21615.267 582.998,865.094"
+ id="path3414-9-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 334.77783,23828.182 -582.9982,865.094"
+ id="path3414-8-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7079.8249,23828.444 582.9999,865.094"
+ id="path3414-9-4-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 1751.2742,23828.182 0,846.288"
+ id="path3414-8-3-65"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5628.2495,23854.778 0,846.288"
+ id="path3414-8-3-6-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ </g>
+ <g
+ transform="translate(-1642.5377,-11611.245)"
+ id="g3147-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5327.3057"
+ y="15428.84"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-151.71746,-11647.612)"
+ id="g3153-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-3484.4587,-11647.612)"
+ id="g3153-20"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7486.4907"
+ y="17670.119"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-6817.1997,-11647.612)"
+ id="g3153-28"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7474.1382"
+ y="17688.926"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5473.757,2234.7264 -582.9982,865.094"
+ id="path3414"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8282.5389,2234.9884 582.9982,865.094"
+ id="path3414-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 3523.1239,4447.9034 -582.9982,865.094"
+ id="path3414-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 10268.171,4448.1654 583,865.094"
+ id="path3414-9-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4939.6203,4447.9034 0,846.288"
+ id="path3414-8-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8816.5956,4474.4994 0,846.288"
+ id="path3414-8-3-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ x="7318.9653"
+ y="6031.6353"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g4504-3-9"
+ transform="translate(4866.6205,-1197.2204)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16888.277"
+ x="4344.877"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Start of</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0"
+ y="17119.1"
+ x="4578.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17119.1"
+ x="4578.7886"
+ id="tspan3112-5-9"
+ sodipodi:role="line">Next Grace</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3"
+ y="17350.271"
+ x="4581.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17350.271"
+ x="4581.7886"
+ id="tspan3116-2-6"
+ sodipodi:role="line">Period</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-5"
+ d="m 6875.6003,15833.906 1595.7755,0"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg
new file mode 100644
index 000000000000..7ddc094d7f28
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-fqs.svg
@@ -0,0 +1,1309 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1626.5847"
+ height="843.1416"
+ viewBox="-44 -44 21630.534 11207.028"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-gp-fqs.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36"
+ style="overflow:visible">
+ <path
+ id="path3940-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-6"
+ style="overflow:visible">
+ <path
+ id="path3940-26"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-8"
+ style="overflow:visible">
+ <path
+ id="path3940-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-367"
+ style="overflow:visible">
+ <path
+ id="path3940-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-56"
+ style="overflow:visible">
+ <path
+ id="path3946-2"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3081"
+ style="overflow:visible">
+ <path
+ id="path3083"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3085"
+ style="overflow:visible">
+ <path
+ id="path3087"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3089"
+ style="overflow:visible">
+ <path
+ id="path3091"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3093"
+ style="overflow:visible">
+ <path
+ id="path3095"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3097"
+ style="overflow:visible">
+ <path
+ id="path3099"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-9"
+ style="overflow:visible">
+ <path
+ id="path3940-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-91"
+ style="overflow:visible">
+ <path
+ id="path3940-27"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3082"
+ style="overflow:visible">
+ <path
+ id="path3084"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-09"
+ style="overflow:visible">
+ <path
+ id="path3940-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3093-6"
+ style="overflow:visible">
+ <path
+ id="path3095-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3675"
+ style="overflow:visible">
+ <path
+ id="path3940-35"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="0.5"
+ inkscape:cx="843.3925"
+ inkscape:cy="528.22238"
+ inkscape:window-x="860"
+ inkscape:window-y="65"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg2"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5"
+ inkscape:snap-global="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3154"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="306.04964px"
+ originy="286.40704px" />
+ </sodipodi:namedview>
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1"
+ d="m 16000.705,7361.3625 3383.738,-0.8434 7.995,1860.9894"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3"
+ d="m 19393.687,5043.2247 -2.828,1541.346 -3303.342,-1.6876"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-6"
+ d="m 5568.2242,7353.9621 -3929.1209,17.9634 20.2153,2632.0515"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3-2"
+ d="m 1629.8598,3926.2473 12.2312,2669.7292 3867.5308,7.7168"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3"
+ d="m 10932.061,46.910528 -2.827,638.638602 -5325.0378,35.9259 -21.6339,7219.96837 2057.8863,-38.4562 -21.5106,-2087.7208 -491.6705,-0.211 -2.7042,-1993.689 1393.686,-4.728 39.6256,4057.454 2379.6691,32.779 14.608,-1848.911 1312.249,12.923 14.608,1818.337 2000.007,20.422 -12.28,-1841.412 1191.331,1.616 15.929,1289.8537 520.344,0.202 m 0,0 -15.641,-1570.1327 -2629.727,-18.604 3.166,-2124.92 -2385.245,19.007 21.973,-2444.6293 5551.053,37.8148 1.584,7165.3369 m 0,0 -5602.722,0.1016 19.583,813.521"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <rect
+ ry="0"
+ id="rect118"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057925, 60.0011585;stroke-dashoffset:0"
+ rx="0"
+ height="8254.9336"
+ width="14128.912"
+ y="443.33136"
+ x="4032.6365" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="720.02423"
+ x="4178.2354"
+ xml:space="preserve">rcu_gp_fqs()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(6381.5083,-10649.537)"
+ id="g3147">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5250.5327"
+ y="15512.733"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-35"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(7232.589,-10685.904)"
+ id="g3153">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(2409.0267,-10649.537)"
+ id="g3147-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5284.6885"
+ y="15500.379"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <g
+ transform="translate(3899.8472,-10685.904)"
+ id="g3153-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(567.10542,-10685.904)"
+ id="g3153-20"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-2765.6353,-10685.904)"
+ id="g3153-28"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7428.2939"
+ y="17707.271"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-75"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 9525.3217,3196.4324 -582.9982,865.094"
+ id="path3414"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 12334.103,3196.6944 582.999,865.094"
+ id="path3414-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7574.6885,5409.6094 -582.9983,865.094"
+ id="path3414-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 14319.735,5409.8714 583.001,865.094"
+ id="path3414-9-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8991.1849,5409.6094 0,846.288"
+ id="path3414-8-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 12868.16,5436.2054 0,846.288"
+ id="path3414-8-3-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <rect
+ ry="0"
+ id="rect118-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057965, 60.00115916;stroke-dashoffset:0"
+ rx="0"
+ height="7164.1621"
+ width="13301.43"
+ y="984.91095"
+ x="4277.6021" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1236.326"
+ x="4409.96"
+ xml:space="preserve"
+ sodipodi:linespacing="125%">force_qs_rnp()<tspan
+ style="font-size:192px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3307" /></text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1547.8876"
+ x="4417.6396"
+ xml:space="preserve">dyntick_save_progress_counter()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(6501.9719,-10685.904)"
+ id="g3188">
+ <g
+ id="g3107"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3158.8521"
+ y="13313.027"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1858.8729"
+ x="4414.1836"
+ xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ <text
+ xml:space="preserve"
+ x="14659.87"
+ y="7002.561"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-62"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ <g
+ id="g4504"
+ transform="translate(14776.087,-12503.687)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3089"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3148-9-9"
+ transform="translate(14747.877,9978.6315)">
+ <rect
+ x="3592.3828"
+ y="-4715.7246"
+ width="3164.783"
+ height="769.99048"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4418.6582"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_enter()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4165.7954"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">atomic_add_return()</text>
+ </g>
+ <g
+ id="g3148-9-9-2"
+ transform="translate(14747.877,12639.736)">
+ <rect
+ x="3592.3828"
+ y="-4715.7246"
+ width="3164.783"
+ height="769.99048"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3-6"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4418.6582"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_exit()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4165.7954"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">atomic_add_return()</text>
+ </g>
+ <g
+ id="g4504-7"
+ transform="translate(14794.893,-7275.5109)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-9"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-0"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-2"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-3"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-7"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-5"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-6"
+ transform="translate(-2953.0872,-13662.506)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-1"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-8"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-9"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-2"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-0"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3148-9-9-3"
+ transform="translate(-3554.8919,9313.0075)">
+ <rect
+ x="3592.3828"
+ y="-4981.6865"
+ width="3728.9751"
+ height="2265.0989"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3-7"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4684.6201"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_report_dead()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4431.7573"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_cleanup_dying_idle_cpu()</text>
+ <g
+ transform="translate(1783.3183,-5255.3491)"
+ id="g3107-7-5"
+ style="fill:none;stroke-width:0.025in">
+ <rect
+ x="2084.55"
+ y="949.37109"
+ width="2809.1992"
+ height="1370.8721"
+ rx="0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-5-3" />
+ <rect
+ x="2084.55"
+ y="1025.3964"
+ width="2809.1992"
+ height="1294.8468"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-3-3-5" />
+ </g>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-6-6-2-6"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-3526.4448"
+ x="4241.8574"
+ xml:space="preserve">-&gt;qsmaskinitnext</text>
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-3-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-2987.4167"
+ x="6305.1484"
+ xml:space="preserve"><tspan
+ id="tspan3104-6-9"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Leaf</tspan></text>
+ </g>
+ <g
+ id="g4504-7-2"
+ transform="translate(-2934.2807,-6492.8204)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-9-2"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-2-8"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-0-9"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-2-7"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-3-3"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-7-6"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-5-1"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3206"
+ transform="translate(3999.5374,3999.1768)">
+ <rect
+ ry="0"
+ id="rect118-3-5-1-3-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00058007, 60.00116001;stroke-dashoffset:0"
+ rx="0"
+ height="2265.0989"
+ width="3728.9751"
+ y="3382.2036"
+ x="-3958.3845" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27-6-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="3679.27"
+ x="-3804.9949"
+ xml:space="preserve">rcu_cpu_starting()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-7-5-0"
+ transform="translate(-5767.4491,3108.5424)">
+ <rect
+ id="rect112-5-3-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-3-5-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="-3308.9099"
+ y="4837.4453"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-2-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinitnext</text>
+ <text
+ xml:space="preserve"
+ x="-1245.6189"
+ y="5376.4731"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-2-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-9-6">Leaf</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3-6"
+ d="m 15475.193,7360.7089 467.332,8.6247"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
new file mode 100644
index 000000000000..0161262904ec
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
@@ -0,0 +1,656 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1039.3743"
+ height="677.72852"
+ viewBox="-44 -44 13821.733 9008.3597"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-gp-init-1.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36"
+ style="overflow:visible">
+ <path
+ id="path3940-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="0.70710678"
+ inkscape:cx="617.89019"
+ inkscape:cy="636.57143"
+ inkscape:window-x="697"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg2"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3059"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="1.6062488e-07px"
+ originy="10.7285px" />
+ </sodipodi:namedview>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3"
+ d="m 6871.027,46.883461 0,8777.144039"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2450.4075,-10679.115)"
+ id="g3188">
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+ <g
+ id="g3107"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Root</tspan></text>
+ </g>
+ <rect
+ ry="0"
+ id="rect118"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ rx="0"
+ height="6844.4546"
+ width="13658.751"
+ y="1371.6335"
+ x="37.490932" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1631.0878"
+ x="153.26733"
+ xml:space="preserve">rcu_gp_init()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2329.9439,-10642.748)"
+ id="g3147">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(3181.0246,-10679.115)"
+ id="g3153">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-1642.5375,-10642.748)"
+ id="g3147-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ transform="translate(-151.71726,-10679.115)"
+ id="g3153-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-3484.4587,-10679.115)"
+ id="g3153-20"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-6817.1998,-10679.115)"
+ id="g3153-28"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5473.7572,3203.2219 -582.9982,865.094"
+ id="path3414"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8282.5391,3203.4839 582.9982,865.094"
+ id="path3414-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 3523.1241,5416.3989 -582.9982,865.094"
+ id="path3414-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 10268.171,5416.6609 583,865.094"
+ id="path3414-9-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4939.6205,5416.3989 0,846.288"
+ id="path3414-8-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8816.5958,5442.9949 0,846.288"
+ id="path3414-8-3-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <g
+ id="g4504-3-9"
+ transform="translate(4866.0367,-16425.339)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16888.277"
+ x="4344.877"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">End of</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0"
+ y="17119.1"
+ x="4578.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17119.1"
+ x="4578.7886"
+ id="tspan3112-5-9"
+ sodipodi:role="line">Last Grace</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3"
+ y="17350.271"
+ x="4581.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17350.271"
+ x="4581.7886"
+ id="tspan3116-2-6"
+ sodipodi:role="line">Period</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-5"
+ d="m 8546.5914,605.78414 -1595.7755,0"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-2.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-2.svg
new file mode 100644
index 000000000000..4d956a732685
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-2.svg
@@ -0,0 +1,656 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1037.9602"
+ height="666.38184"
+ viewBox="-44 -44 13802.928 8857.5401"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-gp-init-2.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="false"
+ inkscape:zoom="0.79710462"
+ inkscape:cx="564.27119"
+ inkscape:cy="397.32188"
+ inkscape:window-x="833"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg2"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3"
+ d="m 6861.6904,46.438525 -2.8276,1315.668775 -5343.8436,17.1194 -2.8276,6561.7446 2039.0799,17.963 -2.7042,-2144.1399 -491.6705,-0.2109 -2.7042,-1993.6887 1487.7179,-4.7279 -17.8,1812.453 2017.2374,-7.6434 4.9532,-2151.5723 -1405.5264,11.163 -10.919,-1891.1468 1739.2164,-2.7175 -13.2006,4234.2295 -1701.3595,1.3953 -8.7841,2107.7116 1702.6392,-4.8334 33.4144,-1867.7167 1312.2492,12.9229 14.608,1818.3367 2000.0063,20.4217 -12.279,-1841.4113 1304.168,1.6154 -12.279,2032.7059 -4638.6515,1.6154 19.5828,569.0378"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <rect
+ ry="0"
+ id="rect118"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ rx="0"
+ height="7653.1299"
+ width="13639.945"
+ y="555.69745"
+ x="37.490929" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="799.34259"
+ x="134.46091"
+ xml:space="preserve">rcu_gp_init()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2311.1375,-10650.009)"
+ id="g3147">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(3162.2182,-10686.376)"
+ id="g3153">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-1661.3439,-10650.009)"
+ id="g3147-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5398.415"
+ y="15310.093"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinit</text>
+ <text
+ xml:space="preserve"
+ x="5398.415"
+ y="15545.01"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-5-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinitnext</text>
+ </g>
+ <g
+ transform="translate(-170.52359,-10686.376)"
+ id="g3153-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-3503.2651,-10686.376)"
+ id="g3153-20"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-6836.0062,-10686.376)"
+ id="g3153-28"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7699.7246"
+ y="17734.791"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-4"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinit</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5454.9508,3195.9607 -582.9982,865.094"
+ id="path3414"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8263.7327,3196.2227 582.9982,865.094"
+ id="path3414-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 3504.3177,5409.1377 -582.9982,865.094"
+ id="path3414-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 10249.365,5409.3997 583,865.094"
+ id="path3414-9-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4920.8141,5409.1377 0,846.288"
+ id="path3414-8-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8797.7894,5435.7337 0,846.288"
+ id="path3414-8-3-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <rect
+ ry="0"
+ id="rect118-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057923, 60.00115846;stroke-dashoffset:0"
+ rx="0"
+ height="4418.4302"
+ width="4932.5845"
+ y="1492.2119"
+ x="2087.8708" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1690.4336"
+ x="2223.3145"
+ xml:space="preserve"
+ sodipodi:linespacing="125%">rcu_init_new_rnp()<tspan
+ style="font-size:192px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3307"> or</tspan></text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1958.5066"
+ x="2223.3145"
+ xml:space="preserve">rcu_cleanup_dead_rnp()</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-6"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="2227.4531"
+ x="2226.1592"
+ xml:space="preserve"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:192px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3327">(optional)</tspan></text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2431.6011,-10686.376)"
+ id="g3188">
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;qsmaskinit</text>
+ <g
+ id="g3107"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13490.509"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinitnext</text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
new file mode 100644
index 000000000000..de6ecc51b00e
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
@@ -0,0 +1,632 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1039.3743"
+ height="594.19171"
+ viewBox="-44 -44 13821.733 7897.9895"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-gp-init-2.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="false"
+ inkscape:zoom="0.70710678"
+ inkscape:cx="617.89019"
+ inkscape:cy="625.84293"
+ inkscape:window-x="697"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg2"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3"
+ d="m 6899.3032,45.520244 -2.8276,2480.757056 -2316.0141,-1.687 -2.8276,2179.8547 2321.1758,-0.8434 -2.7042,-1843.2376 2404.5142,-0.2109 16.1022,1993.2669 -7783.8345,-4.7279 -16.7936,2120.3946 2033.1033,-23.5344 2.0128,-1866.561 2051.9097,14.0785 2.0128,1838.2987 1280.8475,-4.728 14.608,-1830.1039 1312.2492,12.9229 14.608,1818.3367 2000.0059,20.4217 -12.279,-1841.4113 1304.168,1.6154 -12.279,2032.7059 -4638.6511,1.6154 19.5828,569.0378"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2450.4075,-11647.329)"
+ id="g3188">
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ <g
+ id="g3107"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Root</tspan></text>
+ </g>
+ <rect
+ ry="0"
+ id="rect118"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ rx="0"
+ height="6844.4546"
+ width="13658.751"
+ y="403.41983"
+ x="37.490932" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="662.8739"
+ x="153.26733"
+ xml:space="preserve">rcu_gp_init()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2329.9439,-11610.962)"
+ id="g3147">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5392.3345"
+ y="15407.104"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(3181.0246,-11647.329)"
+ id="g3153">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7536.4883"
+ y="17640.934"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-1642.5375,-11610.962)"
+ id="g3147-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5378.4146"
+ y="15436.927"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-151.71726,-11647.329)"
+ id="g3153-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-3484.4587,-11647.329)"
+ id="g3153-20"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7520.1294"
+ y="17673.639"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-35"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-6817.1998,-11647.329)"
+ id="g3153-28"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7521.4663"
+ y="17666.062"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-75"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5473.7572,2235.0081 -582.9982,865.094"
+ id="path3414"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8282.5391,2235.2701 582.9982,865.094"
+ id="path3414-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 3523.1241,4448.1851 -582.9982,865.094"
+ id="path3414-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 10268.171,4448.4471 583,865.094"
+ id="path3414-9-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4939.6205,4448.1851 0,846.288"
+ id="path3414-8-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8816.5958,4474.7811 0,846.288"
+ id="path3414-8-3-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ x="7370.856"
+ y="5997.5972"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-62"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
new file mode 100644
index 000000000000..b13b7b01bb3a
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -0,0 +1,5135 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1626.5841"
+ height="6394.5298"
+ viewBox="-44 -44 21630.525 84996.019"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-gp.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36"
+ style="overflow:visible">
+ <path
+ id="path3940-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-6"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3085"
+ style="overflow:visible">
+ <path
+ id="path3087"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3089"
+ style="overflow:visible">
+ <path
+ id="path3091"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3093"
+ style="overflow:visible">
+ <path
+ id="path3095"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3097"
+ style="overflow:visible">
+ <path
+ id="path3099"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3101"
+ style="overflow:visible">
+ <path
+ id="path3103"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-7"
+ style="overflow:visible">
+ <path
+ id="path3940-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-79"
+ style="overflow:visible">
+ <path
+ id="path3940-20"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-37"
+ style="overflow:visible">
+ <path
+ id="path3946-5"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3081"
+ style="overflow:visible">
+ <path
+ id="path3083"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3085-9"
+ style="overflow:visible">
+ <path
+ id="path3087-2"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3089-2"
+ style="overflow:visible">
+ <path
+ id="path3091-8"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3093-9"
+ style="overflow:visible">
+ <path
+ id="path3095-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3097-3"
+ style="overflow:visible">
+ <path
+ id="path3099-6"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-12"
+ style="overflow:visible">
+ <path
+ id="path3940-93"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-2"
+ style="overflow:visible">
+ <path
+ id="path3946-66"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3077"
+ style="overflow:visible">
+ <path
+ id="path3079"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3081-4"
+ style="overflow:visible">
+ <path
+ id="path3083-9"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3085-5"
+ style="overflow:visible">
+ <path
+ id="path3087-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3089-4"
+ style="overflow:visible">
+ <path
+ id="path3091-87"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3093-1"
+ style="overflow:visible">
+ <path
+ id="path3095-72"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-72"
+ style="overflow:visible">
+ <path
+ id="path3940-26"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-6"
+ style="overflow:visible">
+ <path
+ id="path3940-25"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-8"
+ style="overflow:visible">
+ <path
+ id="path3946-62"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3179"
+ style="overflow:visible">
+ <path
+ id="path3181"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3183"
+ style="overflow:visible">
+ <path
+ id="path3185"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3187"
+ style="overflow:visible">
+ <path
+ id="path3189"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3191"
+ style="overflow:visible">
+ <path
+ id="path3193"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3195"
+ style="overflow:visible">
+ <path
+ id="path3197"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3199"
+ style="overflow:visible">
+ <path
+ id="path3201"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3203"
+ style="overflow:visible">
+ <path
+ id="path3205"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3207"
+ style="overflow:visible">
+ <path
+ id="path3209"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3211"
+ style="overflow:visible">
+ <path
+ id="path3213"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3215"
+ style="overflow:visible">
+ <path
+ id="path3217"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-5"
+ style="overflow:visible">
+ <path
+ id="path3940-52"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3150"
+ style="overflow:visible">
+ <path
+ id="path3152"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-9"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3156"
+ style="overflow:visible">
+ <path
+ id="path3158"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3160"
+ style="overflow:visible">
+ <path
+ id="path3162"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3164"
+ style="overflow:visible">
+ <path
+ id="path3166"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3168"
+ style="overflow:visible">
+ <path
+ id="path3170"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3172"
+ style="overflow:visible">
+ <path
+ id="path3174"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-8"
+ style="overflow:visible">
+ <path
+ id="path3940-7-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-17"
+ style="overflow:visible">
+ <path
+ id="path3940-8"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36-4"
+ style="overflow:visible">
+ <path
+ id="path3940-7-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-94"
+ style="overflow:visible">
+ <path
+ id="path3946-59"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3157"
+ style="overflow:visible">
+ <path
+ id="path3159"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3161"
+ style="overflow:visible">
+ <path
+ id="path3163"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3165"
+ style="overflow:visible">
+ <path
+ id="path3167"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3169"
+ style="overflow:visible">
+ <path
+ id="path3171"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3173"
+ style="overflow:visible">
+ <path
+ id="path3175"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3177"
+ style="overflow:visible">
+ <path
+ id="path3179"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3181"
+ style="overflow:visible">
+ <path
+ id="path3183"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3185"
+ style="overflow:visible">
+ <path
+ id="path3187"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3189"
+ style="overflow:visible">
+ <path
+ id="path3191"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3193"
+ style="overflow:visible">
+ <path
+ id="path3195"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3197"
+ style="overflow:visible">
+ <path
+ id="path3199"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-35"
+ style="overflow:visible">
+ <path
+ id="path3940-70"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3203-8"
+ style="overflow:visible">
+ <path
+ id="path3205-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-83"
+ style="overflow:visible">
+ <path
+ id="path3940-79"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3038"
+ style="overflow:visible">
+ <path
+ id="path3040"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3042"
+ style="overflow:visible">
+ <path
+ id="path3044"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="0.6004608"
+ inkscape:cx="826.65969"
+ inkscape:cy="483.3047"
+ inkscape:window-x="66"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg2"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3079"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="413.99932px"
+ originy="5758.0031px" />
+ </sodipodi:namedview>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g4"
+ transform="translate(4751.9713,-1307.071)">
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,2411.7612 0,6117.1391"
+ id="path3134-9-0-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,3342.6302 -3856.4573,0 10.6979,5757.1962 2918.1436,-2e-4"
+ id="path3134-9-0"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 6161.6776,3342.6302 3856.4574,0 -12.188,5757.1963 -2918.1436,-3e-4"
+ id="path3134-9-0-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <rect
+ x="4544.7305"
+ y="4603.417"
+ width="3240.0088"
+ height="2650.6289"
+ rx="0"
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ id="rect118"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="5073.3374"
+ y="6372.4521"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rcu_accelerate_cbs()</text>
+ <g
+ id="g3107"
+ transform="translate(2715.7065,4700.8888)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="4773.3452"
+ y="4825.2578"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_prepare_for_idle()</text>
+ <rect
+ x="790.93585"
+ y="4630.8252"
+ width="3240.0088"
+ height="2650.6289"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.0005789, 60.00115781;stroke-dashoffset:0"
+ id="rect118-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="1319.5447"
+ y="6639.2261"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_accelerate_cbs()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-7"
+ transform="translate(-1038.0776,4728.2971)">
+ <rect
+ id="rect112-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="1019.5512"
+ y="4852.666"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">note_gp_changes()</text>
+ <text
+ xml:space="preserve"
+ x="1319.5447"
+ y="6376.6328"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_advance_cbs()</text>
+ <text
+ xml:space="preserve"
+ x="1340.6649"
+ y="6111.4473"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">__note_gp_changes()</text>
+ <rect
+ x="5422.6279"
+ y="3041.8311"
+ width="1480.4871"
+ height="379.24637"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.0005789, 60.00115794;stroke-dashoffset:0"
+ id="rect118-3-9"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="5607.2734"
+ y="3283.3892"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">call_rcu()</text>
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ id="path3084"
+ sodipodi:cx="319.379"
+ sodipodi:cy="345.54001"
+ sodipodi:rx="65.917107"
+ sodipodi:ry="39.550262"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ transform="matrix(13.298129,0,0,13.298129,1915.7286,4523.6528)" />
+ <text
+ xml:space="preserve"
+ x="5853.9238"
+ y="8902.3623"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104">Wake up</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6165.7158"
+ y="9122.8174"
+ id="text3110"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3112"
+ x="6165.7158"
+ y="9122.8174">grace-period</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6162.8716"
+ y="9364.3564"
+ id="text3114"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3116"
+ x="6162.8716"
+ y="9364.3564">kernel thread</tspan></text>
+ <rect
+ x="8239.8516"
+ y="4608.7363"
+ width="3240.0088"
+ height="2650.6289"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057902, 60.00115804;stroke-dashoffset:0"
+ id="rect118-36"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="8768.4678"
+ y="6484.1562"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-75"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_accelerate_cbs()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-3"
+ transform="translate(6410.833,4706.2127)">
+ <rect
+ id="rect112-56"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="8329.5352"
+ y="4830.5771"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">takedown_cpu()</text>
+ <text
+ xml:space="preserve"
+ x="8335.4873"
+ y="5094.127"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcutree_migrate_callbacks()</text>
+ <text
+ xml:space="preserve"
+ x="8335.4873"
+ y="5357.1006"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_migrate_callbacks()</text>
+ <text
+ xml:space="preserve"
+ x="8768.4678"
+ y="6224.9038"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_advance_cbs()</text>
+ <text
+ xml:space="preserve"
+ x="3467.9963"
+ y="6987.9912"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7220.106"
+ y="6961.395"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="10905.331"
+ y="6961.395"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-5">Leaf</tspan></text>
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ id="path3084-3"
+ sodipodi:cx="319.379"
+ sodipodi:cy="345.54001"
+ sodipodi:rx="65.917107"
+ sodipodi:ry="39.550262"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ transform="matrix(13.298129,0,0,13.298129,1872.6808,-2726.4833)" />
+ <text
+ xml:space="preserve"
+ x="5717.4517"
+ y="1785.2073"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-7">Phase One</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6119.668"
+ y="2005.6624"
+ id="text3110-5"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3112-3"
+ x="6119.668"
+ y="2005.6624">of Update</tspan></text>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-3"
+ d="m 6169.6477,11384.719 0,8777.145"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1749.0282,658.72243)"
+ id="g3188">
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+ <g
+ id="g3107-62"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-1"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-7">Root</tspan></text>
+ </g>
+ <rect
+ ry="0"
+ id="rect118-0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057845, 60.00115689;stroke-dashoffset:0"
+ rx="0"
+ height="23612.516"
+ width="13607.611"
+ y="12709.474"
+ x="-663.88806" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-93"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="12968.928"
+ x="-548.11169"
+ xml:space="preserve">rcu_gp_init()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1628.5648,695.08943)"
+ id="g3147">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2479.6454,658.72243)"
+ id="g3153">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-2343.9166,695.08943)"
+ id="g3147-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ transform="translate(-853.09625,658.72243)"
+ id="g3153-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-4185.8377,658.72243)"
+ id="g3153-20"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-7518.5789,658.72243)"
+ id="g3153-28"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4772.378,14541.058 -582.9982,865.094"
+ id="path3414"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7581.1599,14541.32 582.9982,865.094"
+ id="path3414-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2821.7449,16754.235 -582.9982,865.094"
+ id="path3414-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 9566.7916,16754.497 583.0014,865.094"
+ id="path3414-9-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4238.2414,16754.235 0,846.288"
+ id="path3414-8-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8115.2166,16780.831 0,846.288"
+ id="path3414-8-3-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <g
+ id="g4504-3-9"
+ transform="translate(4164.6575,-5087.5013)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16888.277"
+ x="4344.877"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">End of</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0"
+ y="17119.1"
+ x="4578.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17119.1"
+ x="4578.7886"
+ id="tspan3112-5-9"
+ sodipodi:role="line">Last Grace</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3"
+ y="17350.271"
+ x="4581.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17350.271"
+ x="4581.7886"
+ id="tspan3116-2-6"
+ sodipodi:role="line">Period</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-5"
+ d="m 7845.2122,11943.62 -1595.7756,0"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ id="path3084-6"
+ sodipodi:cx="319.379"
+ sodipodi:cy="345.54001"
+ sodipodi:rx="65.917107"
+ sodipodi:ry="39.550262"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ transform="matrix(13.298129,0,0,13.298129,1915.7264,6279.0065)" />
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6165.6357"
+ y="10691.992"
+ id="text3110-0"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3112-6"
+ x="6165.6357"
+ y="10691.992">Grace-period</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6162.8696"
+ y="10947.994"
+ id="text3114-2"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3116-6"
+ x="6162.8696"
+ y="10947.994">kernel thread</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6165.3237"
+ y="11188.528"
+ id="text3114-1"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3116-8"
+ x="6165.3237"
+ y="11188.528">awakened</tspan></text>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-3-2"
+ d="m 6161.6774,9725.7319 0,531.9251"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1"
+ d="m 6169.1878,20208.525 -2.8277,1315.668 -5343.84363,17.12 -2.8276,6561.744 2039.08003,17.963 -2.7042,-2144.14 -491.6705,-0.211 -2.7042,-1993.689 1487.7179,-4.728 -17.7999,1812.453 2017.2372,-7.643 4.9533,-2151.572 -1405.5264,11.163 -10.9189,-1891.147 1739.2163,-2.718 -13.2006,4234.23 -1701.3596,1.395 -8.784,2107.712 1702.6392,-4.834 33.4144,-1867.716 1312.2491,12.923 14.608,1818.336 2000.0062,20.422 -12.279,-1841.411 1304.1668,1.615 -12.279,2032.706 -4638.6501,1.615 19.5828,569.038"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1618.635,9512.0768)"
+ id="g3147-7">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-8"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-4"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2469.7158,9475.7098)"
+ id="g3153-0">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-3"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-6"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-1"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-6">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-2353.8464,9512.0768)"
+ id="g3147-3-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6-2"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5398.415"
+ y="15310.093"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinit</text>
+ <text
+ xml:space="preserve"
+ x="5398.415"
+ y="15545.01"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-5-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinitnext</text>
+ </g>
+ <g
+ transform="translate(-863.02623,9475.7098)"
+ id="g3153-2-1"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6-5"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8-4"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9-6">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-4195.7676,9475.7098)"
+ id="g3153-20-5"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92-4">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-7528.5086,9475.7098)"
+ id="g3153-28-5"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3-4"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1-4">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7699.7246"
+ y="17734.791"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-4"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinit</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="M 4762.4482,23358.047 4179.45,24223.141"
+ id="path3414-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7571.23,23358.309 582.9982,865.094"
+ id="path3414-9-3"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2811.8152,25571.224 -582.9982,865.094"
+ id="path3414-8-0"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 9556.8622,25571.486 582.9988,865.094"
+ id="path3414-9-4-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4228.3115,25571.224 0,846.288"
+ id="path3414-8-3-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8105.2867,25597.82 0,846.288"
+ id="path3414-8-3-6-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <rect
+ ry="0"
+ id="rect118-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115756;stroke-dashoffset:0"
+ rx="0"
+ height="4418.4302"
+ width="4932.5845"
+ y="21654.297"
+ x="1395.3682" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="21852.52"
+ x="1530.812"
+ xml:space="preserve"
+ sodipodi:linespacing="125%">rcu_init_new_rnp()<tspan
+ style="font-size:192px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3307"> or</tspan></text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="22120.592"
+ x="1530.812"
+ xml:space="preserve">rcu_cleanup_dead_rnp()</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-6"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="22389.539"
+ x="1533.6567"
+ xml:space="preserve"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:192px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3327">(optional)</tspan></text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1739.0986,9475.7098)"
+ id="g3188-8">
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-84"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;qsmaskinit</text>
+ <g
+ id="g3107-31"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112-4"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-9"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-20"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13490.509"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-5-89"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinitnext</text>
+ </g>
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-10"
+ d="m 6187.9943,28881.474 -2.8275,2480.757 -2316.0141,-1.687 -2.8276,2179.854 2321.1757,-0.843 -2.7041,-1843.237 2404.5141,-0.212 16.1022,1993.267 -7783.83443,-4.728 -16.7937,2120.395 2033.10343,-23.534 2.0128,-1866.562 2051.9098,14.079 2.0128,1838.299 1280.8474,-4.728 14.608,-1830.104 1312.2492,12.923 14.608,1818.336 2000.0057,20.422 -12.279,-1841.411 1304.167,1.615 -12.279,2032.706 -4638.6499,1.615 19.5828,569.038"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1739.0986,17188.625)"
+ id="g3188-6">
+ <text
+ xml:space="preserve"
+ x="3305.5364"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ <g
+ id="g3107-5"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112-94"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-90"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-1">Root</tspan></text>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1618.6352,17224.992)"
+ id="g3147-1">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-1"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-9"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5392.3345"
+ y="15407.104"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2469.7158,17188.625)"
+ id="g3153-7">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-67"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-36"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-63"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7536.4883"
+ y="17640.934"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-2353.8462,17224.992)"
+ id="g3147-3-8"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6-1"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0-2"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6-9"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5378.4146"
+ y="15436.927"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-863.02613,17188.625)"
+ id="g3153-2-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9-5">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-4195.7673,17188.625)"
+ id="g3153-20-0"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3-6"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7-38"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7520.1294"
+ y="17673.639"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-35"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-7528.5085,17188.625)"
+ id="g3153-28-1"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9-1"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7-59"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6-4"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7521.4663"
+ y="17666.062"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-75-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4762.4484,31070.961 -582.9982,865.095"
+ id="path3414-0"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7571.2303,31071.223 582.9982,865.095"
+ id="path3414-9-30"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2811.8153,33284.138 -582.9982,865.094"
+ id="path3414-8-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 9556.862,33284.401 582.999,865.093"
+ id="path3414-9-4-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4228.3118,33284.138 0,846.288"
+ id="path3414-8-3-4"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8105.287,33310.734 0,846.288"
+ id="path3414-8-3-6-4"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ x="6659.5469"
+ y="34833.551"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-62"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-8"
+ d="m 11248.729,43927.515 3383.749,-0.843 7.995,1860.989"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3"
+ d="m 14641.723,41609.377 -2.828,1541.346 -3303.353,-1.688"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-6"
+ d="m 816.24399,43920.114 -3929.12029,17.964 20.2152,2632.051"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3-2"
+ d="m -3122.1199,40492.4 12.2312,2669.729 3867.53038,7.717"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-4"
+ d="m 6180.0812,36613.063 -2.827,638.638 -5325.0381,35.926 -9.78989,7279.202 2659.62569,0 0,-2260.682 -1196.8316,0 0,-1861.738 1462.7942,0 0,2127.7 3723.476,0 0,1861.738 2035.5457,-11.246 -12.28,-1788.219 1191.3338,1.616 15.928,1289.854 520.347,0.202 m 0,0 -15.641,-1570.133 -2629.7318,-18.604 3.165,-2124.92 -2305.4983,-7.354 0,-2287.279 5319.2511,0 0,7180.99 m 0,0 0,19229.094 -4441.5746,0"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <rect
+ ry="0"
+ id="rect118-7"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ rx="0"
+ height="8254.9336"
+ width="14128.912"
+ y="37009.492"
+ x="-719.34235" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-24"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="37286.184"
+ x="-573.74298"
+ xml:space="preserve">rcu_gp_fqs()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1629.528,25916.616)"
+ id="g3147-0">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-62"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-90"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5250.5327"
+ y="15512.733"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-35-8"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2480.6088,25880.249)"
+ id="g3153-1">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-31"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-10"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-34"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-03"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-91">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-2342.9531,25916.616)"
+ id="g3147-3-9"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5284.6885"
+ y="15500.379"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-3"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <g
+ transform="translate(-852.13285,25880.249)"
+ id="g3153-2-8"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6-0"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1-56"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7-4"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9-0">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-4184.8743,25880.249)"
+ id="g3153-20-04"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2-62"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3-67"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-7517.6112,25880.249)"
+ id="g3153-28-8"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9-7"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7-2"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3-82"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1-9">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7428.2939"
+ y="17707.271"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-75-6"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4773.3421,39762.585 -582.9986,865.094"
+ id="path3414-02"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7582.1232,39762.847 582.999,865.094"
+ id="path3414-9-7"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2822.7083,41975.762 -582.9982,865.094"
+ id="path3414-8-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 9567.7542,41976.024 583.0018,865.094"
+ id="path3414-9-4-1"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4239.2048,41975.762 0,846.288"
+ id="path3414-8-3-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8116.1802,42002.358 0,846.288"
+ id="path3414-8-3-6-2"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <rect
+ ry="0"
+ id="rect118-1-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057924, 60.00115835;stroke-dashoffset:0"
+ rx="0"
+ height="7164.1621"
+ width="13301.43"
+ y="37551.07"
+ x="-474.37598" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-5"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="37802.488"
+ x="-342.01831"
+ xml:space="preserve"
+ sodipodi:linespacing="125%">force_qs_rnp()<tspan
+ style="font-size:192px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3307-9" /></text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-9"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="38114.047"
+ x="-334.33856"
+ xml:space="preserve">dyntick_save_progress_counter()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1749.9916,25880.249)"
+ id="g3188-1">
+ <g
+ id="g3107-4"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112-91"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-58">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="3158.8521"
+ y="13313.027"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-70"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="38425.035"
+ x="-337.79462"
+ xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ <text
+ xml:space="preserve"
+ x="9907.8887"
+ y="43568.723"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-62-4"
+ style="font-size:192.00001526px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ <g
+ id="g4504"
+ transform="translate(10024.106,24062.466)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3089"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-80"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-4"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-29"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-61"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-04"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-22"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3148-9-9"
+ transform="translate(9995.8972,46544.783)">
+ <rect
+ x="3592.3828"
+ y="-4715.7246"
+ width="3164.783"
+ height="769.99048"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4418.6582"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_enter()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4165.7954"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">atomic_add_return()</text>
+ </g>
+ <g
+ id="g3148-9-9-2"
+ transform="translate(9995.8972,49205.888)">
+ <rect
+ x="3592.3828"
+ y="-4715.7246"
+ width="3164.783"
+ height="769.99048"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3-6"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4418.6582"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_dynticks_eqs_exit()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4165.7954"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">atomic_add_return()</text>
+ </g>
+ <g
+ id="g4504-7"
+ transform="translate(10042.913,29290.642)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-9"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-0"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-2"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-3-2"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-7"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-5"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-6"
+ transform="translate(-7705.0623,22903.647)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-1"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-8"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-7-0"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-9"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-2"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-0"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3148-9-9-3"
+ transform="translate(-8306.8632,45879.159)">
+ <rect
+ x="3592.3828"
+ y="-4981.6865"
+ width="3728.9751"
+ height="2265.0989"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3-7"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4684.6201"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_report_dead()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4431.7573"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_cleanup_dying_idle_cpu()</text>
+ <g
+ transform="translate(1783.3183,-5255.3491)"
+ id="g3107-7-5"
+ style="fill:none;stroke-width:0.025in">
+ <rect
+ x="2084.55"
+ y="949.37109"
+ width="2809.1992"
+ height="1370.8721"
+ rx="0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-5-3" />
+ <rect
+ x="2084.55"
+ y="1025.3964"
+ width="2809.1992"
+ height="1294.8468"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-3-3-5" />
+ </g>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-6-6-2-6"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-3526.4448"
+ x="4241.8574"
+ xml:space="preserve">-&gt;qsmaskinitnext</text>
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-3-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-2987.4167"
+ x="6305.1484"
+ xml:space="preserve"><tspan
+ id="tspan3104-6-9"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Leaf</tspan></text>
+ </g>
+ <g
+ id="g4504-7-2"
+ transform="translate(-7686.2563,30073.332)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-9-2"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-2-8"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-0-9"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-2-7"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-3-3"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-7-6"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-5-1"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3206"
+ transform="translate(-752.44253,40565.329)">
+ <rect
+ ry="0"
+ id="rect118-3-5-1-3-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00058007, 60.00116001;stroke-dashoffset:0"
+ rx="0"
+ height="2265.0989"
+ width="3728.9751"
+ y="3382.2036"
+ x="-3958.3845" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27-6-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="3679.27"
+ x="-3804.9949"
+ xml:space="preserve">rcu_cpu_starting()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-7-5-0"
+ transform="translate(-5767.4491,3108.5424)">
+ <rect
+ id="rect112-5-3-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-3-5-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="-3308.9099"
+ y="4837.4453"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-2-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinitnext</text>
+ <text
+ xml:space="preserve"
+ x="-1245.6189"
+ y="5376.4731"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-2-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-9-6">Leaf</tspan></text>
+ </g>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3-6"
+ d="m 10723.215,43926.861 467.335,8.625"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-8)"
+ d="m 4431.0572,60276.11 16.472,2346.582"
+ id="path3134-9-0-3-1-9-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(-59.697399,41012.242)"
+ id="g3188-83">
+ <text
+ xml:space="preserve"
+ x="3172.5554"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-80"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ <g
+ id="g3107-40"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112-919"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-25"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-4">Root</tspan></text>
+ </g>
+ <rect
+ ry="0"
+ id="rect118-4"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057845, 60.00115689;stroke-dashoffset:0"
+ rx="0"
+ height="7164.1641"
+ width="13639.945"
+ y="52743.297"
+ x="-2453.8081" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-99"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="52950.113"
+ x="-2356.8381"
+ xml:space="preserve">rcu_report_rnp()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(-180.16099,41048.609)"
+ id="g3147-36">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-0"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-50"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-29"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(670.91971,41012.242)"
+ id="g3153-4">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-35"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-17"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-4"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-14">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-4152.6419,41048.609)"
+ id="g3147-3-6"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6-9"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0-4"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5284.9155"
+ y="15386.685"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-3-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <g
+ transform="translate(-2661.8217,41012.242)"
+ id="g3153-2-6"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6-4"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7-88"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-5994.5632,41012.242)"
+ id="g3153-20-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2-8"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3-8"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5-68"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92-3">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-9327.3041,41012.242)"
+ id="g3153-28-83"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9-3"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3-80"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6-47"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1-6">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7422.3945"
+ y="17661.012"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-67"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2963.6526,54894.579 -582.9982,865.092"
+ id="path3414-89"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5772.4344,54894.841 582.9982,865.092"
+ id="path3414-9-0"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 1013.0193,57107.754 -582.99819,865.094"
+ id="path3414-8-68"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7758.0666,57108.016 583,865.094"
+ id="path3414-9-4-79"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2429.5159,57107.754 0,846.288"
+ id="path3414-8-3-0"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 6306.4911,57134.35 0,846.288"
+ id="path3414-8-3-6-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-33"
+ d="m 4421.0737,51833.378 -2.8276,1315.669 -5343.84362,17.119 -2.8276,6561.745 2039.08002,17.963 -2.7043,-2144.141 -491.67069,-0.211 -2.7042,-1993.689 1487.71819,-4.728 -17.8001,1812.453 2017.2374,-7.643 4.9532,-2151.571 -1405.5263,11.162 -10.9191,-1891.146 1739.2165,-2.718 0.1197,7086.03"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 4432.9209,44194.481 8.8008,4666.688 -2616.9163,17.119 15.9788,1446.406 2603.2718,-0.843 -29.6181,2086.665"
+ id="path3134-9-0-3-1-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:none"
+ d="m 4423.9777,48861.171 2616.9159,17.119 -15.979,1465.213 -2584.4649,-19.65"
+ id="path3134-9-0-3-1-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <g
+ transform="translate(-1706.1312,54634.242)"
+ id="g3115">
+ <rect
+ x="4485.6865"
+ y="-8571.0352"
+ width="3296.428"
+ height="2199.2754"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057923, 60.00115859;stroke-dashoffset:0"
+ id="rect118-3-3"
+ ry="0" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-7-2"
+ transform="translate(2656.673,-8952.2968)">
+ <rect
+ id="rect112-5-6"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-3-52"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="4714.3018"
+ y="-8349.1943"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">note_gp_changes()</text>
+ <text
+ xml:space="preserve"
+ x="5014.2954"
+ y="-7170.978"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ <text
+ xml:space="preserve"
+ x="5035.4155"
+ y="-7436.1636"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-2-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">__note_gp_changes()</text>
+ <text
+ xml:space="preserve"
+ x="7162.7471"
+ y="-6692.6006"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-79"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-6">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-3299.9731,54048.57)"
+ id="g3148">
+ <rect
+ ry="0"
+ id="rect118-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="412.66794"
+ width="3240.0085"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="3745.7725"
+ xml:space="preserve">rcu_node_context_switch()</text>
+ </g>
+ <g
+ transform="translate(1881.1886,54048.57)"
+ id="g3148-5">
+ <rect
+ ry="0"
+ id="rect118-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="412.66794"
+ width="3240.0085"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="3745.7725"
+ xml:space="preserve">rcu_check_callbacks()</text>
+ </g>
+ <g
+ transform="translate(-850.30204,55463.106)"
+ id="g3148-9">
+ <rect
+ ry="0"
+ id="rect118-3-5-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="864.02148"
+ width="3540.9114"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="3745.7725"
+ xml:space="preserve">rcu_process_callbacks()</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27-0"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4165.7954"
+ x="3745.7725"
+ xml:space="preserve">rcu_check_quiescent_state())</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27-0-9"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-3914.085"
+ x="3745.7725"
+ xml:space="preserve">rcu__report_qs_rdp())</text>
+ </g>
+ <g
+ id="g4504-3"
+ transform="translate(3886.2577,30763.697)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-0"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2-4"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-3-9-1"
+ transform="translate(3886.2577,34216.283)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1-0"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2-4"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7-8"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0-7"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5-9-0"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3-8"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2-6-6"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-3-0"
+ transform="translate(-4075.0211,30763.697)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,228.84485,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-6"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-26"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-1"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-8"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5-7"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-9"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2-2"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-3-9-0"
+ transform="translate(-4181.4064,34216.283)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1-2"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2-3"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0-5"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5-9-9"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3-2"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2-6-2"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:none"
+ d="m 8448.9566,48370.097 0,2393.663"
+ id="path3134-9-0-3-1-9-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:none"
+ d="m 390.28991,48370.097 0,2393.663"
+ id="path3134-9-0-3-1-9-8-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <g
+ id="g4504-2"
+ transform="translate(-143.72569,46137.076)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-4"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-79"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4273.4326"
+ xml:space="preserve"><tspan
+ id="tspan3104-3"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Wake up</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-92"
+ y="17055.541"
+ x="4585.2246"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4585.2246"
+ id="tspan3112-8"
+ sodipodi:role="line">grace-period</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-3"
+ y="17297.08"
+ x="4582.3804"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4582.3804"
+ id="tspan3116-0"
+ sodipodi:role="line">kernel thread</tspan></text>
+ </g>
+ <g
+ transform="translate(-707.64089,66256.889)"
+ id="g3148-2">
+ <rect
+ ry="0"
+ id="rect118-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="412.66794"
+ width="3240.0085"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-8"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="4064.9268"
+ xml:space="preserve">rcu_report_qs_rsp()</text>
+ </g>
+ <path
+ sodipodi:type="arc"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ id="path3084-6-9"
+ sodipodi:cx="319.379"
+ sodipodi:cy="345.54001"
+ sodipodi:rx="65.917107"
+ sodipodi:ry="39.550262"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ transform="matrix(13.298129,0,0,13.298129,2044.7501,59781.881)" />
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6294.6587"
+ y="64194.863"
+ id="text3110-0-1"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3112-6-5"
+ x="6294.6587"
+ y="64194.863">Grace-period</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6291.8931"
+ y="64450.863"
+ id="text3114-2-4"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3116-6-9"
+ x="6291.8931"
+ y="64450.863">kernel thread</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ x="6294.3472"
+ y="64691.398"
+ id="text3114-1-2"
+ sodipodi:linespacing="125%"><tspan
+ sodipodi:role="line"
+ id="tspan3116-8-5"
+ x="6294.3472"
+ y="64691.398">awakened</tspan></text>
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-3-2-7"
+ d="m 5310.5974,63210.805 984.0615,0 -3.9578,549.726"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-99"
+ d="m 6322.9337,64896.388 -2.8276,2480.757 -2316.0141,-1.687 -2.8276,2179.855 2321.1758,-0.844 -2.7042,-1843.237 2404.5142,-0.212 16.1023,1993.267 -7783.83452,-4.728 -16.79346,2120.395 2033.10318,-23.535 2.0128,-1866.561 2051.9096,14.08 2.0128,1838.298 1280.8474,-4.728 14.6081,-1830.105 1312.2491,12.923 14.608,1818.337 2000.0093,20.422 -12.279,-1841.412 1304.1722,1.616 -12.279,2032.706 -4638.6586,1.616 19.5827,569.037"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1874.038,53203.538)"
+ id="g3188-7">
+ <text
+ xml:space="preserve"
+ x="3199.1516"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-82"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ <g
+ id="g3107-53"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112-49"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-02"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-19">Root</tspan></text>
+ </g>
+ <rect
+ ry="0"
+ id="rect118-6"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057845, 60.00115689;stroke-dashoffset:0"
+ rx="0"
+ height="14649.609"
+ width="13482.601"
+ y="65254.539"
+ x="-538.87689" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-21"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="65513.996"
+ x="-423.10056"
+ xml:space="preserve">rcu_gp_cleanup()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1753.5744,53239.905)"
+ id="g3147-2">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-07"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-1"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5324.5371"
+ y="15414.598"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-753"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-1"
+ transform="translate(7817.6676,69212.346)">
+ <rect
+ id="rect112-7-1-90"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-56"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="12322.059"
+ y="71472.641"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-77"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="10084.225"
+ y="70903.312"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-9-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-9"
+ d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <text
+ xml:space="preserve"
+ x="5092.4683"
+ y="74111.672"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-60"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-62-6"
+ transform="translate(2814.6217,72520.234)">
+ <rect
+ id="rect112-6"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-1-4"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="7319.022"
+ y="74780.406"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-7-7">Root</tspan></text>
+ <text
+ xml:space="preserve"
+ x="5092.4683"
+ y="74325.906"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-60-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(1746.2528,60972.572)"
+ id="g3147-9">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-2"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-02"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-5"
+ transform="translate(7810.3459,76945.013)">
+ <rect
+ id="rect112-7-1-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="12314.736"
+ y="79205.188"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-8">Leaf</tspan></text>
+ <g
+ transform="translate(-2226.2288,60972.572)"
+ id="g3147-3-7"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6-3"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0-6"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6-1"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ transform="translate(-735.4075,60936.205)"
+ id="g3153-2-9"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6-3"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1-1-4"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8-9"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7-4-8"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9-7">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-4068.1496,60936.205)"
+ id="g3153-20-8"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2-4"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7-0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92-6-5">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-7400.8907,60936.205)"
+ id="g3153-28-0"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7-3-8"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1-6-2">Leaf</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4890.0661,74818.542 -582.9982,865.094"
+ id="path3414-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7698.8481,74818.804 582.998,865.094"
+ id="path3414-9-5"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2939.433,77031.719 -582.9982,865.094"
+ id="path3414-8-4-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 9684.4834,77031.981 583.0036,865.094"
+ id="path3414-9-4-7-0"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4355.9293,77031.719 0,846.288"
+ id="path3414-8-3-65"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8232.9046,77058.315 0,846.288"
+ id="path3414-8-3-6-6-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <g
+ transform="translate(-2218.9069,53239.905)"
+ id="g3147-3-64"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6-62"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0-8"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6-96"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5327.3057"
+ y="15428.84"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-36"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-728.08545,53203.538)"
+ id="g3153-2-0"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6-7"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1-01"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8-0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9-3">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-4060.8278,53203.538)"
+ id="g3153-20-7"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2-7"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3-2"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5-4"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7486.4907"
+ y="17670.119"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <g
+ transform="translate(-7393.5687,53203.538)"
+ id="g3153-28-02"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3-9"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6-94"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7474.1382"
+ y="17688.926"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-5-1"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4897.3878,67085.876 -582.9982,865.094"
+ id="path3414-03"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 7706.1695,67086.138 582.9982,865.094"
+ id="path3414-9-78"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 2946.7546,69299.053 -582.9981,865.094"
+ id="path3414-8-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 9691.8054,69299.315 583.0036,865.094"
+ id="path3414-9-4-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4363.251,69299.053 0,846.288"
+ id="path3414-8-3-04"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8240.2262,69325.649 0,846.288"
+ id="path3414-8-3-6-67"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ x="6742.6001"
+ y="70882.617"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g4504-3-9-6"
+ transform="translate(4290.2512,63653.93)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1-09"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16888.277"
+ x="4344.877"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7-5"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Start of</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0-9"
+ y="17119.1"
+ x="4578.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17119.1"
+ x="4578.7886"
+ id="tspan3112-5-9-7"
+ sodipodi:role="line">Next Grace</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3-85"
+ y="17350.271"
+ x="4581.7886"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17350.271"
+ x="4581.7886"
+ id="tspan3116-2-6-3"
+ sodipodi:role="line">Period</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 4406.3256,79248.348 -0.01,5813.579"
+ id="path3134-9-0-3-37"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 4406.3181,82402.301 -2393.663,0.512 0,1196.832 2393.663,-0.512"
+ id="path3134-9-0-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 4406.3181,82402.301 2393.6631,0.512 0,1196.832 -2393.6631,-0.512"
+ id="path3134-9-0-7-7"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <rect
+ x="578.16779"
+ y="82839.773"
+ width="2844.0972"
+ height="360.77411"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057845, 60.00115702;stroke-dashoffset:0"
+ id="rect118-3-4"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="806.7832"
+ y="83088.211"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-19"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks()</text>
+ <rect
+ x="5314.2671"
+ y="82817.688"
+ width="2975.115"
+ height="382.86298"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057858, 60.00115716;stroke-dashoffset:0"
+ id="rect118-36-0"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="5409.8989"
+ y="83063.711"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6-9"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_cleanup_after_idle()</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-88"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="81443.047"
+ x="3264.7983"
+ xml:space="preserve">rcu_advance_cbs()</text>
+ <rect
+ id="rect112-58"
+ style="fill:none;stroke:#000000;stroke-width:29.99999809;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="80561.273"
+ x="2991.7173" />
+ <rect
+ id="rect112-3-4"
+ style="fill:none;stroke:#000000;stroke-width:29.99999809;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="80637.297"
+ x="2991.7173" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-3-7-37"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="81872.406"
+ x="5411.5601"
+ xml:space="preserve"><tspan
+ id="tspan3104-6-5-13"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Leaf</tspan></text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-3-8"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="81232.938"
+ x="3264.7983"
+ xml:space="preserve">__note_gp_changes()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3049"
+ transform="translate(-1728.7601,83820.41)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,1872.6808,-2726.4833)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-3-0"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-6-9"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="1785.2073"
+ x="5717.4517"
+ xml:space="preserve"><tspan
+ id="tspan3104-7-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Phase Two</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-5-9"
+ y="2005.6624"
+ x="6119.668"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="2005.6624"
+ x="6119.668"
+ id="tspan3112-3-9"
+ sodipodi:role="line">of Update</tspan></text>
+ </g>
+ <rect
+ x="3342.4805"
+ y="83998.438"
+ width="1994.7195"
+ height="664.90662"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057818, 60.00115636;stroke-dashoffset:0"
+ id="rect118-36-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3608.4419"
+ y="84264.398"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">RCU_SOFTIRQ</text>
+ <text
+ xml:space="preserve"
+ x="3608.4419"
+ y="84530.367"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-9-6-6-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg
new file mode 100644
index 000000000000..2c9310ba29ba
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-hotplug.svg
@@ -0,0 +1,775 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="613.22784"
+ height="707.07056"
+ viewBox="-44 -44 8154.7829 9398.3736"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-hotplug.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36"
+ style="overflow:visible">
+ <path
+ id="path3940-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-6"
+ style="overflow:visible">
+ <path
+ id="path3940-26"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-8"
+ style="overflow:visible">
+ <path
+ id="path3940-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-367"
+ style="overflow:visible">
+ <path
+ id="path3940-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-56"
+ style="overflow:visible">
+ <path
+ id="path3946-2"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3081"
+ style="overflow:visible">
+ <path
+ id="path3083"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3085"
+ style="overflow:visible">
+ <path
+ id="path3087"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3089"
+ style="overflow:visible">
+ <path
+ id="path3091"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3093"
+ style="overflow:visible">
+ <path
+ id="path3095"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker3097"
+ style="overflow:visible">
+ <path
+ id="path3099"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-9"
+ style="overflow:visible">
+ <path
+ id="path3940-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3675"
+ style="overflow:visible">
+ <path
+ id="path3940-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1148"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="1.4142136"
+ inkscape:cx="325.41695"
+ inkscape:cy="364.94502"
+ inkscape:window-x="833"
+ inkscape:window-y="24"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="svg2"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5"
+ inkscape:snap-global="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3154"
+ empspacing="5"
+ visible="true"
+ enabled="true"
+ snapvisiblegridlinesonly="true"
+ originx="65.610033px"
+ originy="-659.12429px" />
+ </sodipodi:namedview>
+ <path
+ sodipodi:nodetypes="cc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3-5"
+ d="m 5749.1555,47.151064 2.828,9167.338436"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1"
+ d="m 5746.8844,5080.2018 -4107.7813,-0.8434 20.2152,2632.0511"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <path
+ sodipodi:nodetypes="ccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3-1-3"
+ d="m 1629.8595,1633.6804 12.2312,2669.7294 4055.5945,7.7159"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ id="g3115"
+ transform="translate(1657.6576,12154.29)">
+ <rect
+ ry="0"
+ id="rect118-3"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057923, 60.00115859;stroke-dashoffset:0"
+ rx="0"
+ height="2349.7295"
+ width="3992.2642"
+ y="-8909.5498"
+ x="2379.3704" />
+ <g
+ transform="translate(582.16224,-9085.2783)"
+ id="g3107-7"
+ style="fill:none;stroke-width:0.025in">
+ <rect
+ x="2084.55"
+ y="949.37109"
+ width="2809.1992"
+ height="1370.8721"
+ rx="0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-5" />
+ <rect
+ x="2084.55"
+ y="1025.3964"
+ width="2809.1992"
+ height="1294.8468"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-3-3" />
+ </g>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-6-6-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-7356.375"
+ x="2774.7393"
+ xml:space="preserve">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-8652.5312"
+ x="2466.7822"
+ xml:space="preserve">dyntick_save_progress_counter()</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-2-7-2-0"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-8368.1475"
+ x="2463.3262"
+ xml:space="preserve">rcu_implicit_dynticks_qs()</text>
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-3"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-6817.3472"
+ x="5103.9922"
+ xml:space="preserve"><tspan
+ id="tspan3104-6"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Leaf</tspan></text>
+ </g>
+ <g
+ id="g4504"
+ transform="translate(-2953.0872,-15955.072)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3148-9-9"
+ transform="translate(-3554.8919,7020.44)">
+ <rect
+ x="3592.3828"
+ y="-4981.6865"
+ width="3728.9751"
+ height="2265.0989"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ id="rect118-3-5-1-3"
+ ry="0" />
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4684.6201"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_report_dead()</text>
+ <text
+ xml:space="preserve"
+ x="3745.7725"
+ y="-4431.7573"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-3-27-0-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_cleanup_dying_idle_cpu()</text>
+ <g
+ transform="translate(1783.3183,-5255.3491)"
+ id="g3107-7-5"
+ style="fill:none;stroke-width:0.025in">
+ <rect
+ x="2084.55"
+ y="949.37109"
+ width="2809.1992"
+ height="1370.8721"
+ rx="0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-5-3" />
+ <rect
+ x="2084.55"
+ y="1025.3964"
+ width="2809.1992"
+ height="1294.8468"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ id="rect112-3-3-5" />
+ </g>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-6-6-2-6"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-3526.4448"
+ x="4241.8574"
+ xml:space="preserve">-&gt;qsmaskinitnext</text>
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-3-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-2987.4167"
+ x="6305.1484"
+ xml:space="preserve"><tspan
+ id="tspan3104-6-9"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Leaf</tspan></text>
+ </g>
+ <g
+ id="g4504-7"
+ transform="translate(-2934.2808,-8785.3871)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-9"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-0"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-2"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-3"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-7"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-5"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g3206"
+ transform="translate(3999.537,1706.6099)">
+ <rect
+ ry="0"
+ id="rect118-3-5-1-3-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00058007, 60.00116001;stroke-dashoffset:0"
+ rx="0"
+ height="2265.0989"
+ width="3728.9751"
+ y="3382.2036"
+ x="-3958.3845" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27-6-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="3679.27"
+ x="-3804.9949"
+ xml:space="preserve">rcu_cpu_starting()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-7-5-0"
+ transform="translate(-5767.4491,3108.5424)">
+ <rect
+ id="rect112-5-3-9"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-3-5-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="-3308.9099"
+ y="4837.4453"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-2-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmaskinitnext</text>
+ <text
+ xml:space="preserve"
+ x="-1245.6189"
+ y="5376.4731"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-2-0"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-9-6">Leaf</tspan></text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
new file mode 100644
index 000000000000..de3992f4cbe1
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -0,0 +1,1095 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="1037.9601"
+ height="1373.2583"
+ viewBox="-44 -44 13802.927 18253.333"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="TreeRCU-qs.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible">
+ <path
+ id="path3940"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutS"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutS"
+ style="overflow:visible">
+ <path
+ id="path4073"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.2,0.2)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4070"
+ d="m 5.77,0 -8.65,5 0,-10 8.65,5 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="scale(0.4,0.4)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible">
+ <path
+ id="path3952"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend"
+ style="overflow:visible">
+ <path
+ id="path3946"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3952-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6,-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-1"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-2"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-0"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3940-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-3"
+ style="overflow:visible">
+ <path
+ id="path3946-1"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-4"
+ style="overflow:visible">
+ <path
+ id="path3946-7"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4880"
+ style="overflow:visible">
+ <path
+ id="path4882"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-5"
+ style="overflow:visible">
+ <path
+ id="path3946-0"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lend-6"
+ style="overflow:visible">
+ <path
+ id="path3946-10"
+ style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-36"
+ style="overflow:visible">
+ <path
+ id="path3940-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-6"
+ style="overflow:visible">
+ <path
+ id="path3940-26"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send-8"
+ style="overflow:visible">
+ <path
+ id="path3940-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="true"
+ inkscape:zoom="0.70710678"
+ inkscape:cx="616.47598"
+ inkscape:cy="595.41964"
+ inkscape:window-x="813"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="g4405"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5">
+ <inkscape:grid
+ type="xygrid"
+ id="grid3381" />
+ </sodipodi:namedview>
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-8)"
+ d="m 6922.3555,14693.733 16.472,2346.582"
+ id="path3134-9-0-3-1-9-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2431.6011,-4570.136)"
+ id="g3188">
+ <text
+ xml:space="preserve"
+ x="3172.5554"
+ y="13255.592"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ <g
+ id="g3107"
+ transform="translate(947.90548,11584.029)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5452.3052"
+ y="13844.535"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5">Root</tspan></text>
+ </g>
+ <rect
+ ry="0"
+ id="rect118"
+ style="fill:none;stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ rx="0"
+ height="7164.1636"
+ width="13639.945"
+ y="7160.9038"
+ x="37.490932" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="7367.7192"
+ x="134.46094"
+ xml:space="preserve">rcu_report_rnp()</text>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(2311.1375,-4533.769)"
+ id="g3147">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ </g>
+ <g
+ style="fill:none;stroke-width:0.025in"
+ transform="translate(3162.2182,-4570.136)"
+ id="g3153">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-1661.3439,-4533.769)"
+ id="g3147-3"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-6"
+ transform="translate(3054.6101,13760.052)">
+ <rect
+ id="rect112-7-0"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="5284.9155"
+ y="15386.685"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <g
+ transform="translate(-170.52365,-4570.136)"
+ id="g3153-2"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-6"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-1"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-8"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-9">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-3503.2651,-4570.136)"
+ id="g3153-20"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-2"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-3"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-7"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-6836.0062,-4570.136)"
+ id="g3153-28"
+ style="fill:none;stroke-width:0.025in">
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-6-9-9"
+ transform="translate(5213.0126,16008.808)">
+ <rect
+ id="rect112-7-1-7"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-5-2-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="9717.4141"
+ y="18269.314"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3-7-35-7-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
+ <text
+ xml:space="preserve"
+ x="7422.3945"
+ y="17661.012"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-67"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;qsmask &amp;= ~-&gt;grpmask</text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 5454.9508,9312.2011 -582.9982,865.0929"
+ id="path3414"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8263.7327,9312.4631 582.9982,865.0929"
+ id="path3414-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 3504.3177,11525.377 -582.9982,865.094"
+ id="path3414-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 10249.365,11525.639 583,865.094"
+ id="path3414-9-4"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 4920.8141,11525.377 0,846.288"
+ id="path3414-8-3"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:13.29812717px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
+ d="m 8797.7894,11551.973 0,846.288"
+ id="path3414-8-3-6"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ sodipodi:nodetypes="cccccccccccccccc"
+ inkscape:connector-curvature="0"
+ id="path3134-9-0-3"
+ d="m 6912.3719,6251.0009 -2.8276,1315.669 -5343.8436,17.119 -2.8276,6561.7441 2039.08,17.963 -2.7042,-2144.14 -491.6706,-0.211 -2.7042,-1993.689 1487.718,-4.728 -17.8001,1812.453 2017.2374,-7.643 4.9532,-2151.5715 -1405.5263,11.1629 -10.9191,-1891.1465 1739.2165,-2.718 0.1141,7086.0301"
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
+ <g
+ id="g4405"
+ transform="translate(1241.222,9051.8644)">
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)"
+ d="m 5694.6259,-9006.994 -2.828,3233.9212 -2616.9163,17.1191 15.9788,1446.406 2603.2719,-0.8434 -29.6182,2086.6656"
+ id="path3134-9-0-3-1"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccccc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:none"
+ d="m 5674.0539,-5773.0705 2616.9163,17.1191 -15.9788,1465.2124 -2584.4655,-19.6498"
+ id="path3134-9-0-3-1-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cccc" />
+ <g
+ transform="translate(-456.05505,0)"
+ id="g3115">
+ <rect
+ x="4485.6865"
+ y="-8571.0352"
+ width="3296.428"
+ height="2199.2754"
+ rx="0"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057923, 60.00115859;stroke-dashoffset:0"
+ id="rect118-3"
+ ry="0" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g3107-7"
+ transform="translate(2656.673,-8952.2968)">
+ <rect
+ id="rect112-5"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="4714.3018"
+ y="-8349.1943"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">note_gp_changes()</text>
+ <text
+ xml:space="preserve"
+ x="5014.2954"
+ y="-7170.978"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+ <text
+ xml:space="preserve"
+ x="5035.4155"
+ y="-7436.1636"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-6-6-2"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">__note_gp_changes()</text>
+ <text
+ xml:space="preserve"
+ x="7162.7471"
+ y="-6692.6006"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7-5-1-2-3"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ sodipodi:linespacing="125%"><tspan
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
+ id="tspan3104-6">Leaf</tspan></text>
+ </g>
+ <g
+ transform="translate(-2049.897,-585.6713)"
+ id="g3148">
+ <rect
+ ry="0"
+ id="rect118-3-5"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="412.66794"
+ width="3240.0085"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="3745.7725"
+ xml:space="preserve">rcu_node_context_switch()</text>
+ </g>
+ <g
+ transform="translate(3131.2648,-585.6713)"
+ id="g3148-5">
+ <rect
+ ry="0"
+ id="rect118-3-5-6"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="412.66794"
+ width="3240.0085"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="3745.7725"
+ xml:space="preserve">rcu_check_callbacks()</text>
+ </g>
+ <g
+ transform="translate(399.7744,828.86448)"
+ id="g3148-9">
+ <rect
+ ry="0"
+ id="rect118-3-5-1"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="864.02148"
+ width="3540.9114"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="3745.7725"
+ xml:space="preserve">rcu_process_callbacks()</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27-0"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4165.7954"
+ x="3745.7725"
+ xml:space="preserve">rcu_check_quiescent_state())</text>
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-27-0-9"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-3914.085"
+ x="3745.7725"
+ xml:space="preserve">rcu__report_qs_rdp())</text>
+ </g>
+ <g
+ id="g4504-3"
+ transform="translate(5136.3339,-23870.546)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-3-9"
+ transform="translate(5136.3339,-20417.959)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5-9"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2-6"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-3-0"
+ transform="translate(-2824.9451,-23870.546)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,228.84485,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-6"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-26"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-1"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-8"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5-7"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-9"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2-2"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <g
+ id="g4504-3-9-0"
+ transform="translate(-2931.3303,-20417.959)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084-6-1-2"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2-7-2-3"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4409.043"
+ xml:space="preserve"><tspan
+ id="tspan3104-5-7-7"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">RCU</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110-3-0-5"
+ y="17055.541"
+ x="4579.373"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4579.373"
+ id="tspan3112-5-9-9"
+ sodipodi:role="line">read-side</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114-6-3-2"
+ y="17297.08"
+ x="4584.8276"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4584.8276"
+ id="tspan3116-2-6-2"
+ sodipodi:role="line">critical section</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:none"
+ d="m 9699.0326,-6264.1445 0,2393.6632"
+ id="path3134-9-0-3-1-9-8"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <path
+ style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:none"
+ d="m 1640.3664,-6264.1445 0,2393.6632"
+ id="path3134-9-0-3-1-9-8-9"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ </g>
+ <g
+ id="g4504"
+ transform="translate(2347.5727,554.69889)">
+ <path
+ transform="matrix(13.298129,0,0,13.298129,335.22989,12456.379)"
+ d="m 385.2961,345.54001 c 0,21.84301 -29.51209,39.55026 -65.9171,39.55026 -36.40501,0 -65.91711,-17.70725 -65.91711,-39.55026 0,-21.84301 29.5121,-39.55026 65.91711,-39.55026 36.40501,0 65.9171,17.70725 65.9171,39.55026 z"
+ sodipodi:ry="39.550262"
+ sodipodi:rx="65.917107"
+ sodipodi:cy="345.54001"
+ sodipodi:cx="319.379"
+ id="path3084"
+ style="fill:#ffffa1;fill-opacity:0;stroke:#000000;stroke-width:2.25600004;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.256, 4.512;stroke-dashoffset:0"
+ sodipodi:type="arc" />
+ <text
+ sodipodi:linespacing="125%"
+ style="font-size:192px;font-style:normal;font-weight:bold;line-height:125%;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-1-2"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="16835.086"
+ x="4273.4326"
+ xml:space="preserve"><tspan
+ id="tspan3104"
+ style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans">Wake up</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3110"
+ y="17055.541"
+ x="4585.2246"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17055.541"
+ x="4585.2246"
+ id="tspan3112"
+ sodipodi:role="line">grace-period</tspan></text>
+ <text
+ sodipodi:linespacing="125%"
+ id="text3114"
+ y="17297.08"
+ x="4582.3804"
+ style="font-size:159.57754517px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ y="17297.08"
+ x="4582.3804"
+ id="tspan3116"
+ sodipodi:role="line">kernel thread</tspan></text>
+ </g>
+ <g
+ transform="translate(1783.6576,20674.512)"
+ id="g3148-2">
+ <rect
+ ry="0"
+ id="rect118-3-5-2"
+ style="fill:none;stroke:#000000;stroke-width:30.00057983;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057963, 60.00115926;stroke-dashoffset:0"
+ rx="0"
+ height="412.66794"
+ width="3240.0085"
+ y="-4640.499"
+ x="3517.1572" />
+ <text
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"
+ id="text202-7-5-3-8"
+ font-size="192"
+ font-weight="bold"
+ font-style="normal"
+ y="-4418.6582"
+ x="4064.9268"
+ xml:space="preserve">rcu_report_qs_rsp()</text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/rcu_node-lock.svg b/Documentation/RCU/Design/Memory-Ordering/rcu_node-lock.svg
new file mode 100644
index 000000000000..94c96c595aed
--- /dev/null
+++ b/Documentation/RCU/Design/Memory-Ordering/rcu_node-lock.svg
@@ -0,0 +1,229 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec 9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="303.54147"
+ height="211.57945"
+ viewBox="-44 -44 4036.5336 2812.3117"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="rcu_node-lock.svg">
+ <metadata
+ id="metadata212">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs210">
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3970"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1087"
+ inkscape:window-height="1144"
+ id="namedview208"
+ showgrid="false"
+ inkscape:zoom="1.0495049"
+ inkscape:cx="311.2033"
+ inkscape:cy="168.10913"
+ inkscape:window-x="833"
+ inkscape:window-y="28"
+ inkscape:window-maximized="0"
+ inkscape:current-layer="g4"
+ fit-margin-top="5"
+ fit-margin-right="5"
+ fit-margin-left="5"
+ fit-margin-bottom="5" />
+ <g
+ style="fill:none;stroke-width:0.025in"
+ id="g4"
+ transform="translate(-1905.5784,-4568.3024)">
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Circle -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
+ <!-- Line: box -->
+ <rect
+ x="1943.0693"
+ y="4603.417"
+ width="3873.5518"
+ height="2650.6289"
+ rx="0"
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:30.00057884, 60.00115769;stroke-dashoffset:0"
+ id="rect118"
+ ry="0" />
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
+ <!-- Line: box -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Line -->
+ <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Line: box -->
+ <!-- Text -->
+ <!-- Text -->
+ <!-- Text -->
+ <text
+ xml:space="preserve"
+ x="3105.219"
+ y="6425.6445"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rcu_accelerate_cbs()</text>
+ <!-- Text -->
+ <!-- Text -->
+ <g
+ id="g3107"
+ transform="translate(747.5807,4700.8888)">
+ <rect
+ id="rect112"
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1370.8721"
+ width="2809.1992"
+ y="949.37109"
+ x="2084.55" />
+ <rect
+ id="rect112-3"
+ style="fill:none;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
+ rx="0"
+ height="1294.8468"
+ width="2809.1992"
+ y="1025.3964"
+ x="2084.55" />
+ </g>
+ <text
+ xml:space="preserve"
+ x="2025.5763"
+ y="4825.2578"
+ font-style="normal"
+ font-weight="bold"
+ font-size="192"
+ id="text202-7"
+ style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_prepare_for_idle()</text>
+ </g>
+</svg>
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 96a3d81837e1..a08f928c8557 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -40,7 +40,9 @@ o Booting Linux using a console connection that is too slow to
o Anything that prevents RCU's grace-period kthreads from running.
This can result in the "All QSes seen" console-log message.
This message will include information on when the kthread last
- ran and how often it should be expected to run.
+ ran and how often it should be expected to run. It can also
+ result in the "rcu_.*kthread starved for" console-log message,
+ which will include additional debugging information.
o A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
happen to preempt a low-priority task in the middle of an RCU
@@ -60,6 +62,20 @@ o A CPU-bound real-time task in a CONFIG_PREEMPT_RT kernel that
CONFIG_PREEMPT_RCU case, you might see stall-warning
messages.
+o A periodic interrupt whose handler takes longer than the time
+ interval between successive pairs of interrupts. This can
+ prevent RCU's kthreads and softirq handlers from running.
+ Note that certain high-overhead debugging options, for example
+ the function_graph tracer, can result in interrupt handler taking
+ considerably longer than normal, which can in turn result in
+ RCU CPU stall warnings.
+
+o Testing a workload on a fast system, tuning the stall-warning
+ timeout down to just barely avoid RCU CPU stall warnings, and then
+ running the same workload with the same stall-warning timeout on a
+ slow system. Note that thermal throttling and on-demand governors
+ can cause a single system to be sometimes fast and sometimes slow!
+
o A hardware or software issue shuts off the scheduler-clock
interrupt on a CPU that is not in dyntick-idle mode. This
problem really has happened, and seems to be most likely to
@@ -155,67 +171,32 @@ Interpreting RCU's CPU Stall-Detector "Splats"
For non-RCU-tasks flavors of RCU, when a CPU detects that it is stalling,
it will print a message similar to the following:
-INFO: rcu_sched_state detected stall on CPU 5 (t=2500 jiffies)
-
-This message indicates that CPU 5 detected that it was causing a stall,
-and that the stall was affecting RCU-sched. This message will normally be
-followed by a stack dump of the offending CPU. On TREE_RCU kernel builds,
-RCU and RCU-sched are implemented by the same underlying mechanism,
-while on PREEMPT_RCU kernel builds, RCU is instead implemented
-by rcu_preempt_state.
-
-On the other hand, if the offending CPU fails to print out a stall-warning
-message quickly enough, some other CPU will print a message similar to
-the following:
-
-INFO: rcu_bh_state detected stalls on CPUs/tasks: { 3 5 } (detected by 2, 2502 jiffies)
+ INFO: rcu_sched detected stalls on CPUs/tasks:
+ 2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
+ 16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
+ (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625)
-This message indicates that CPU 2 detected that CPUs 3 and 5 were both
-causing stalls, and that the stall was affecting RCU-bh. This message
+This message indicates that CPU 32 detected that CPUs 2 and 16 were both
+causing stalls, and that the stall was affecting RCU-sched. This message
will normally be followed by stack dumps for each CPU. Please note that
-PREEMPT_RCU builds can be stalled by tasks as well as by CPUs,
-and that the tasks will be indicated by PID, for example, "P3421".
-It is even possible for a rcu_preempt_state stall to be caused by both
-CPUs -and- tasks, in which case the offending CPUs and tasks will all
-be called out in the list.
-
-Finally, if the grace period ends just as the stall warning starts
-printing, there will be a spurious stall-warning message:
-
-INFO: rcu_bh_state detected stalls on CPUs/tasks: { } (detected by 4, 2502 jiffies)
-
-This is rare, but does happen from time to time in real life. It is also
-possible for a zero-jiffy stall to be flagged in this case, depending
-on how the stall warning and the grace-period initialization happen to
-interact. Please note that it is not possible to entirely eliminate this
-sort of false positive without resorting to things like stop_machine(),
-which is overkill for this sort of problem.
-
-Recent kernels will print a long form of the stall-warning message:
-
- INFO: rcu_preempt detected stall on CPU
- 0: (63959 ticks this GP) idle=241/3fffffffffffffff/0 softirq=82/543
- (t=65000 jiffies)
-
-In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed:
-
- INFO: rcu_preempt detected stall on CPU
- 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D
- (t=65000 jiffies)
+PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that
+the tasks will be indicated by PID, for example, "P3421". It is even
+possible for a rcu_preempt_state stall to be caused by both CPUs -and-
+tasks, in which case the offending CPUs and tasks will all be called
+out in the list.
-The "(64628 ticks this GP)" indicates that this CPU has taken more
-than 64,000 scheduling-clock interrupts during the current stalled
-grace period. If the CPU was not yet aware of the current grace
-period (for example, if it was offline), then this part of the message
-indicates how many grace periods behind the CPU is.
+CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with
+the RCU core for the past three grace periods. In contrast, CPU 16's "(0
+ticks this GP)" indicates that this CPU has not taken any scheduling-clock
+interrupts during the current stalled grace period.
The "idle=" portion of the message prints the dyntick-idle state.
The hex number before the first "/" is the low-order 12 bits of the
-dynticks counter, which will have an even-numbered value if the CPU is
-in dyntick-idle mode and an odd-numbered value otherwise. The hex
-number between the two "/"s is the value of the nesting, which will
-be a small positive number if in the idle loop and a very large positive
-number (as shown above) otherwise.
+dynticks counter, which will have an even-numbered value if the CPU
+is in dyntick-idle mode and an odd-numbered value otherwise. The hex
+number between the two "/"s is the value of the nesting, which will be
+a small non-negative number if in the idle loop (as shown above) and a
+very large positive number otherwise.
The "softirq=" portion of the message tracks the number of RCU softirq
handlers that the stalled CPU has executed. The number before the "/"
@@ -230,24 +211,72 @@ handlers are no longer able to execute on this CPU. This can happen if
the stalled CPU is spinning with interrupts are disabled, or, in -rt
kernels, if a high-priority process is starving RCU's softirq handler.
-For CONFIG_RCU_FAST_NO_HZ kernels, the "last_accelerate:" prints the
-low-order 16 bits (in hex) of the jiffies counter when this CPU last
-invoked rcu_try_advance_all_cbs() from rcu_needs_cpu() or last invoked
-rcu_accelerate_cbs() from rcu_prepare_for_idle(). The "nonlazy_posted:"
-prints the number of non-lazy callbacks posted since the last call to
-rcu_needs_cpu(). Finally, an "L" indicates that there are currently
-no non-lazy callbacks ("." is printed otherwise, as shown above) and
-"D" indicates that dyntick-idle processing is enabled ("." is printed
-otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
+The "fps=" shows the number of force-quiescent-state idle/offline
+detection passes that the grace-period kthread has made across this
+CPU since the last time that this CPU noted the beginning of a grace
+period.
+
+The "detected by" line indicates which CPU detected the stall (in this
+case, CPU 32), how many jiffies have elapsed since the start of the
+grace period (in this case 2603), the number of the last grace period
+to start and to complete (7073 and 7072, respectively), and an estimate
+of the total number of RCU callbacks queued across all CPUs (625 in
+this case).
+
+In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
+for each CPU:
+
+ 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D
+
+The "last_accelerate:" prints the low-order 16 bits (in hex) of the
+jiffies counter when this CPU last invoked rcu_try_advance_all_cbs()
+from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from
+rcu_prepare_for_idle(). The "nonlazy_posted:" prints the number
+of non-lazy callbacks posted since the last call to rcu_needs_cpu().
+Finally, an "L" indicates that there are currently no non-lazy callbacks
+("." is printed otherwise, as shown above) and "D" indicates that
+dyntick-idle processing is enabled ("." is printed otherwise, for example,
+if disabled via the "nohz=" kernel boot parameter).
+
+If the grace period ends just as the stall warning starts printing,
+there will be a spurious stall-warning message, which will include
+the following:
+
+ INFO: Stall ended before state dump start
+
+This is rare, but does happen from time to time in real life. It is also
+possible for a zero-jiffy stall to be flagged in this case, depending
+on how the stall warning and the grace-period initialization happen to
+interact. Please note that it is not possible to entirely eliminate this
+sort of false positive without resorting to things like stop_machine(),
+which is overkill for this sort of problem.
+
+If all CPUs and tasks have passed through quiescent states, but the
+grace period has nevertheless failed to end, the stall-warning splat
+will include something like the following:
+
+ All QSes seen, last rcu_preempt kthread activity 23807 (4297905177-4297881370), jiffies_till_next_fqs=3, root ->qsmask 0x0
+
+The "23807" indicates that it has been more than 23 thousand jiffies
+since the grace-period kthread ran. The "jiffies_till_next_fqs"
+indicates how frequently that kthread should run, giving the number
+of jiffies between force-quiescent-state scans, in this case three,
+which is way less than 23807. Finally, the root rcu_node structure's
+->qsmask field is printed, which will normally be zero.
If the relevant grace-period kthread has been unable to run prior to
-the stall warning, the following additional line is printed:
+the stall warning, as was the case in the "All QSes seen" line above,
+the following additional line is printed:
- rcu_preempt kthread starved for 2023 jiffies!
+ kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1
-Starving the grace-period kthreads of CPU time can of course result in
-RCU CPU stall warnings even when all CPUs and tasks have passed through
-the required quiescent states.
+Starving the grace-period kthreads of CPU time can of course result
+in RCU CPU stall warnings even when all CPUs and tasks have passed
+through the required quiescent states. The "g" and "c" numbers flag the
+number of the last grace period started and completed, respectively,
+the "f" precedes the ->gp_flags command to the grace-period kthread,
+the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short
+timeout, and the "state" precedes value of the task_struct ->state field.
Multiple Warnings From One Stall
@@ -264,13 +293,28 @@ Stall Warnings for Expedited Grace Periods
If an expedited grace period detects a stall, it will place a message
like the following in dmesg:
- INFO: rcu_sched detected expedited stalls on CPUs: { 1 2 6 } 26009 jiffies s: 1043
-
-This indicates that CPUs 1, 2, and 6 have failed to respond to a
-reschedule IPI, that the expedited grace period has been going on for
-26,009 jiffies, and that the expedited grace-period sequence counter is
-1043. The fact that this last value is odd indicates that an expedited
-grace period is in flight.
+ INFO: rcu_sched detected expedited stalls on CPUs/tasks: { 7-... } 21119 jiffies s: 73 root: 0x2/.
+
+This indicates that CPU 7 has failed to respond to a reschedule IPI.
+The three periods (".") following the CPU number indicate that the CPU
+is online (otherwise the first period would instead have been "O"),
+that the CPU was online at the beginning of the expedited grace period
+(otherwise the second period would have instead been "o"), and that
+the CPU has been online at least once since boot (otherwise, the third
+period would instead have been "N"). The number before the "jiffies"
+indicates that the expedited grace period has been going on for 21,119
+jiffies. The number following the "s:" indicates that the expedited
+grace-period sequence counter is 73. The fact that this last value is
+odd indicates that an expedited grace period is in flight. The number
+following "root:" is a bitmask that indicates which children of the root
+rcu_node structure correspond to CPUs and/or tasks that are blocking the
+current expedited grace period. If the tree had more than one level,
+additional hex numbers would be printed for the states of the other
+rcu_node structures in the tree.
+
+As with normal grace periods, PREEMPT_RCU builds can be stalled by
+tasks as well as by CPUs, and that the tasks will be indicated by PID,
+for example, "P3421".
It is entirely possible to see stall warnings from normal and from
-expedited grace periods at about the same time from the same run.
+expedited grace periods at about the same time during the same run.
diff --git a/Documentation/acpi/lpit.txt b/Documentation/acpi/lpit.txt
new file mode 100644
index 000000000000..b426398d2e97
--- /dev/null
+++ b/Documentation/acpi/lpit.txt
@@ -0,0 +1,25 @@
+To enumerate platform Low Power Idle states, Intel platforms are using
+“Low Power Idle Table†(LPIT). More details about this table can be
+downloaded from:
+http://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf
+
+Residencies for each low power state can be read via FFH
+(Function fixed hardware) or a memory mapped interface.
+
+On platforms supporting S0ix sleep states, there can be two types of
+residencies:
+- CPU PKG C10 (Read via FFH interface)
+- Platform Controller Hub (PCH) SLP_S0 (Read via memory mapped interface)
+
+The following attributes are added dynamically to the cpuidle
+sysfs attribute group:
+ /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
+ /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
+
+The "low_power_idle_cpu_residency_us" attribute shows time spent
+by the CPU package in PKG C10
+
+The "low_power_idle_system_residency_us" attribute shows SLP_S0
+residency, or system time spent with the SLP_S0# signal asserted.
+This is the lowest possible system power state, achieved only when CPU is in
+PKG C10 and all functional blocks in PCH are in a low power state.
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index b5343c5aa224..63066db39910 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -350,7 +350,7 @@ If something goes wrong
help debugging the problem. The text above the dump is also
important: it tells something about why the kernel dumped code (in
the above example, it's due to a bad kernel pointer). More information
- on making sense of the dump is in Documentation/admin-guide/oops-tracing.rst
+ on making sense of the dump is in Documentation/admin-guide/bug-hunting.rst
- If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
as is, otherwise you will have to use the ``ksymoops`` program to make
diff --git a/Documentation/admin-guide/bug-hunting.rst b/Documentation/admin-guide/bug-hunting.rst
index 08c4b1308189..f278b289e260 100644
--- a/Documentation/admin-guide/bug-hunting.rst
+++ b/Documentation/admin-guide/bug-hunting.rst
@@ -240,7 +240,7 @@ In order to report it upstream, you should identify the mailing list
used for the development of the affected code. This can be done by using
the ``get_maintainer.pl`` script.
-For example, if you find a bug at the gspca's conex.c file, you can get
+For example, if you find a bug at the gspca's sonixj.c file, you can get
their maintainers with::
$ ./scripts/get_maintainer.pl -f drivers/media/usb/gspca/sonixj.c
@@ -257,7 +257,7 @@ Please notice that it will point to:
Tejun and Bhaktipriya (in this specific case, none really envolved on the
development of this file);
- The driver maintainer (Hans Verkuil);
-- The subsystem maintainer (Mauro Carvalho Chehab)
+- The subsystem maintainer (Mauro Carvalho Chehab);
- The driver and/or subsystem mailing list (linux-media@vger.kernel.org);
- the Linux Kernel mailing list (linux-kernel@vger.kernel.org).
@@ -274,14 +274,14 @@ Fixing the bug
--------------
If you know programming, you could help us by not only reporting the bug,
-but also providing us with a solution. After all open source is about
+but also providing us with a solution. After all, open source is about
sharing what you do and don't you want to be recognised for your genius?
If you decide to take this way, once you have worked out a fix please submit
it upstream.
Please do read
-ref:`Documentation/process/submitting-patches.rst <submittingpatches>` though
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>` though
to help your code get accepted.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 05496622b4ef..5ab1089d1422 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -314,7 +314,7 @@
amijoy.map= [HW,JOY] Amiga joystick support
Map of devices attached to JOY0DAT and JOY1DAT
Format: <a>,<b>
- See also Documentation/input/joystick.txt
+ See also Documentation/input/joydev/joystick.rst
analog.map= [HW,JOY] Analog joystick and gamepad support
Specifies type or capabilities of an analog joystick
@@ -439,7 +439,7 @@
bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards)
bttv.radio= Most important insmod options are available as
kernel args too.
- bttv.pll= See Documentation/video4linux/bttv/Insmod-options
+ bttv.pll= See Documentation/media/v4l-drivers/bttv.rst
bttv.tuner=
bulk_remove=off [PPC] This parameter disables the use of the pSeries
@@ -641,8 +641,8 @@
For now, only VisioBraille is supported.
consoleblank= [KNL] The console blank (screen saver) timeout in
- seconds. Defaults to 10*60 = 10mins. A value of 0
- disables the blank timer.
+ seconds. A value of 0 disables the blank timer.
+ Defaults to 0.
coredump_filter=
[KNL] Change the default value for
@@ -709,6 +709,9 @@
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
+ crossrelease_fullstack
+ [KNL] Allow to record full stack trace in cross-release
+
cryptomgr.notests
[KNL] Disable crypto self-tests
@@ -724,7 +727,7 @@
db9.dev[2|3]= [HW,JOY] Multisystem joystick support via parallel port
(one device per port)
Format: <port#>,<type>
- See also Documentation/input/joystick-parport.txt
+ See also Documentation/input/devices/joystick-parport.rst
ddebug_query= [KNL,DYNAMIC_DEBUG] Enable debug messages at early boot
time. See
@@ -854,7 +857,7 @@
The filter can be disabled or changed to another
driver later using sysfs.
- drm_kms_helper.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
+ drm.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
Broken monitors, graphic adapters, KVMs and EDIDless
panels may send no or incorrect EDID data sets.
This parameter allows to specify an EDID data sets
@@ -1220,7 +1223,7 @@
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)
Format: <port#>,<pad1>,<pad2>,<pad3>,<pad4>,<pad5>
- See also Documentation/input/joystick-parport.txt
+ See also Documentation/input/devices/joystick-parport.rst
gamma= [HW,DRM]
@@ -1713,6 +1716,13 @@
irqaffinity= [SMP] Set the default irq affinity mask
The argument is a cpu list, as described above.
+ irqchip.gicv2_force_probe=
+ [ARM, ARM64]
+ Format: <bool>
+ Force the kernel to look for the second 4kB page
+ of a GICv2 controller even if the memory range
+ exposed by the device tree is too small.
+
irqfixup [HW]
When an interrupt is not handled search all handlers
for it. Intended to get systems with badly broken
@@ -1727,20 +1737,33 @@
isapnp= [ISAPNP]
Format: <RDP>,<reset>,<pci_scan>,<verbosity>
- isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler.
- The argument is a cpu list, as described above.
+ isolcpus= [KNL,SMP] Isolate a given set of CPUs from disturbance.
+ [Deprecated - use cpusets instead]
+ Format: [flag-list,]<cpu-list>
+
+ Specify one or more CPUs to isolate from disturbances
+ specified in the flag list (default: domain):
+
+ nohz
+ Disable the tick when a single task runs.
+ domain
+ Isolate from the general SMP balancing and scheduling
+ algorithms. Note that performing domain isolation this way
+ is irreversible: it's not possible to bring back a CPU to
+ the domains once isolated through isolcpus. It's strongly
+ advised to use cpusets instead to disable scheduler load
+ balancing through the "cpuset.sched_load_balance" file.
+ It offers a much more flexible interface where CPUs can
+ move in and out of an isolated set anytime.
+
+ You can move a process onto or off an "isolated" CPU via
+ the CPU affinity syscalls or cpuset.
+ <cpu number> begins at 0 and the maximum value is
+ "number of CPUs in system - 1".
+
+ The format of <cpu-list> is described above.
- This option can be used to specify one or more CPUs
- to isolate from the general SMP balancing and scheduling
- algorithms. You can move a process onto or off an
- "isolated" CPU via the CPU affinity syscalls or cpuset.
- <cpu number> begins at 0 and the maximum value is
- "number of CPUs in system - 1".
- This option is the preferred way to isolate CPUs. The
- alternative -- manually setting the CPU mask of all
- tasks in the system -- can cause problems and
- suboptimal load balancer performance.
iucv= [HW,NET]
@@ -1766,7 +1789,7 @@
ivrs_acpihid[00:14.5]=AMD0020:0
js= [HW,JOY] Analog joystick
- See Documentation/input/joystick.txt.
+ See Documentation/input/joydev/joystick.rst.
nokaslr [KNL]
When CONFIG_RANDOMIZE_BASE is set, this disables
@@ -1841,13 +1864,6 @@
Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
the default is off.
- kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode
- Valid arguments: 0, 1, 2
- kmemcheck=0 (disabled)
- kmemcheck=1 (enabled)
- kmemcheck=2 (one-shot mode)
- Default: 2 (one-shot mode)
-
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)
@@ -2248,10 +2264,10 @@
s2idle - Suspend-To-Idle
shallow - Power-On Suspend or equivalent (if supported)
deep - Suspend-To-RAM or equivalent (if supported)
- See Documentation/power/states.txt.
+ See Documentation/admin-guide/pm/sleep-states.rst.
meye.*= [HW] Set MotionEye Camera parameters
- See Documentation/video4linux/meye.txt.
+ See Documentation/media/v4l-drivers/meye.rst.
mfgpt_irq= [IA-32] Specify the IRQ to use for the
Multi-Function General Purpose Timers on AMD Geode
@@ -2548,6 +2564,9 @@
noalign [KNL,ARM]
+ noaltinstr [S390] Disables alternative instructions patching
+ (CPU alternatives feature).
+
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
@@ -3134,7 +3153,7 @@
plip= [PPT,NET] Parallel port network link
Format: { parport<nr> | timid | 0 }
- See also Documentation/parport.txt.
+ See also Documentation/admin-guide/parport.rst.
pmtmr= [X86] Manual setup of pmtmr I/O Port.
Override pmtimer IOPort with a hex value.
@@ -3539,6 +3558,9 @@
rcutorture.stall_cpu_holdoff= [KNL]
Time to wait (s) after boot before inducing stall.
+ rcutorture.stall_cpu_irqsoff= [KNL]
+ Disable interrupts while stalling if set.
+
rcutorture.stat_interval= [KNL]
Time (s) between statistics printk()s.
@@ -3885,6 +3907,12 @@
[KNL] Should the soft-lockup detector generate panics.
Format: <integer>
+ A nonzero value instructs the soft-lockup detector
+ to panic the machine when a soft-lockup occurs. This
+ is also controlled by CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC
+ which is the respective build-time switch to that
+ functionality.
+
softlockup_all_cpu_backtrace=
[KNL] Should the soft-lockup detector generate
backtraces on all cpus.
@@ -4194,12 +4222,15 @@
Used to run time disable IRQ_TIME_ACCOUNTING on any
platforms where RDTSC is slow and this accounting
can add overhead.
+ [x86] unstable: mark the TSC clocksource as unstable, this
+ marks the TSC unconditionally unstable at bootup and
+ avoids any further wobbles once the TSC watchdog notices.
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
Format:
<port#>,<js1>,<js2>,<js3>,<js4>,<js5>,<js6>,<js7>
- See also Documentation/input/joystick-parport.txt
+ See also Documentation/input/devices/joystick-parport.rst
udbg-immortal [PPC] When debugging early kernel crashes that
happen after console_init() and before a proper
diff --git a/Documentation/admin-guide/reporting-bugs.rst b/Documentation/admin-guide/reporting-bugs.rst
index 26b60b419652..4650edb8840a 100644
--- a/Documentation/admin-guide/reporting-bugs.rst
+++ b/Documentation/admin-guide/reporting-bugs.rst
@@ -94,7 +94,7 @@ step-by-step instructions for how a user can trigger the bug.
If the failure includes an "OOPS:", take a picture of the screen, capture
a netconsole trace, or type the message from your screen into the bug
-report. Please read "Documentation/admin-guide/oops-tracing.rst" before posting your
+report. Please read "Documentation/admin-guide/bug-hunting.rst" before posting your
bug report. This explains what you should do with the "Oops" information
to make it useful to the recipient.
@@ -120,7 +120,7 @@ summary from [1.]>" for easy identification by the developers::
[4.2.] Kernel .config file:
[5.] Most recent kernel version which did not have the bug:
[6.] Output of Oops.. message (if applicable) with symbolic information
- resolved (see Documentation/admin-guide/oops-tracing.rst)
+ resolved (see Documentation/admin-guide/bug-hunting.rst)
[7.] A small shell script or example program which triggers the
problem (if possible)
[8.] Environment
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index dad411d635d8..bd9b3faab2c4 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -110,10 +110,20 @@ infrastructure:
x--------------------------------------------------x
| Name | bits | visible |
|--------------------------------------------------|
- | RES0 | [63-32] | n |
+ | RES0 | [63-48] | n |
+ |--------------------------------------------------|
+ | DP | [47-44] | y |
+ |--------------------------------------------------|
+ | SM4 | [43-40] | y |
+ |--------------------------------------------------|
+ | SM3 | [39-36] | y |
+ |--------------------------------------------------|
+ | SHA3 | [35-32] | y |
|--------------------------------------------------|
| RDM | [31-28] | y |
|--------------------------------------------------|
+ | RES0 | [27-24] | n |
+ |--------------------------------------------------|
| ATOMICS | [23-20] | y |
|--------------------------------------------------|
| CRC32 | [19-16] | y |
@@ -132,7 +142,11 @@ infrastructure:
x--------------------------------------------------x
| Name | bits | visible |
|--------------------------------------------------|
- | RES0 | [63-28] | n |
+ | RES0 | [63-36] | n |
+ |--------------------------------------------------|
+ | SVE | [35-32] | y |
+ |--------------------------------------------------|
+ | RES0 | [31-28] | n |
|--------------------------------------------------|
| GIC | [27-24] | n |
|--------------------------------------------------|
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
new file mode 100644
index 000000000000..89edba12a9e0
--- /dev/null
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -0,0 +1,160 @@
+ARM64 ELF hwcaps
+================
+
+This document describes the usage and semantics of the arm64 ELF hwcaps.
+
+
+1. Introduction
+---------------
+
+Some hardware or software features are only available on some CPU
+implementations, and/or with certain kernel configurations, but have no
+architected discovery mechanism available to userspace code at EL0. The
+kernel exposes the presence of these features to userspace through a set
+of flags called hwcaps, exposed in the auxilliary vector.
+
+Userspace software can test for features by acquiring the AT_HWCAP entry
+of the auxilliary vector, and testing whether the relevant flags are
+set, e.g.
+
+bool floating_point_is_present(void)
+{
+ unsigned long hwcaps = getauxval(AT_HWCAP);
+ if (hwcaps & HWCAP_FP)
+ return true;
+
+ return false;
+}
+
+Where software relies on a feature described by a hwcap, it should check
+the relevant hwcap flag to verify that the feature is present before
+attempting to make use of the feature.
+
+Features cannot be probed reliably through other means. When a feature
+is not available, attempting to use it may result in unpredictable
+behaviour, and is not guaranteed to result in any reliable indication
+that the feature is unavailable, such as a SIGILL.
+
+
+2. Interpretation of hwcaps
+---------------------------
+
+The majority of hwcaps are intended to indicate the presence of features
+which are described by architected ID registers inaccessible to
+userspace code at EL0. These hwcaps are defined in terms of ID register
+fields, and should be interpreted with reference to the definition of
+these fields in the ARM Architecture Reference Manual (ARM ARM).
+
+Such hwcaps are described below in the form:
+
+ Functionality implied by idreg.field == val.
+
+Such hwcaps indicate the availability of functionality that the ARM ARM
+defines as being present when idreg.field has value val, but do not
+indicate that idreg.field is precisely equal to val, nor do they
+indicate the absence of functionality implied by other values of
+idreg.field.
+
+Other hwcaps may indicate the presence of features which cannot be
+described by ID registers alone. These may be described without
+reference to ID registers, and may refer to other documentation.
+
+
+3. The hwcaps exposed in AT_HWCAP
+---------------------------------
+
+HWCAP_FP
+
+ Functionality implied by ID_AA64PFR0_EL1.FP == 0b0000.
+
+HWCAP_ASIMD
+
+ Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0000.
+
+HWCAP_EVTSTRM
+
+ The generic timer is configured to generate events at a frequency of
+ approximately 100KHz.
+
+HWCAP_AES
+
+ Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0001.
+
+HWCAP_PMULL
+
+ Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0010.
+
+HWCAP_SHA1
+
+ Functionality implied by ID_AA64ISAR0_EL1.SHA1 == 0b0001.
+
+HWCAP_SHA2
+
+ Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0001.
+
+HWCAP_CRC32
+
+ Functionality implied by ID_AA64ISAR0_EL1.CRC32 == 0b0001.
+
+HWCAP_ATOMICS
+
+ Functionality implied by ID_AA64ISAR0_EL1.Atomic == 0b0010.
+
+HWCAP_FPHP
+
+ Functionality implied by ID_AA64PFR0_EL1.FP == 0b0001.
+
+HWCAP_ASIMDHP
+
+ Functionality implied by ID_AA64PFR0_EL1.AdvSIMD == 0b0001.
+
+HWCAP_CPUID
+
+ EL0 access to certain ID registers is available, to the extent
+ described by Documentation/arm64/cpu-feature-registers.txt.
+
+ These ID registers may imply the availability of features.
+
+HWCAP_ASIMDRDM
+
+ Functionality implied by ID_AA64ISAR0_EL1.RDM == 0b0001.
+
+HWCAP_JSCVT
+
+ Functionality implied by ID_AA64ISAR1_EL1.JSCVT == 0b0001.
+
+HWCAP_FCMA
+
+ Functionality implied by ID_AA64ISAR1_EL1.FCMA == 0b0001.
+
+HWCAP_LRCPC
+
+ Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0001.
+
+HWCAP_DCPOP
+
+ Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001.
+
+HWCAP_SHA3
+
+ Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001.
+
+HWCAP_SM3
+
+ Functionality implied by ID_AA64ISAR0_EL1.SM3 == 0b0001.
+
+HWCAP_SM4
+
+ Functionality implied by ID_AA64ISAR0_EL1.SM4 == 0b0001.
+
+HWCAP_ASIMDDP
+
+ Functionality implied by ID_AA64ISAR0_EL1.DP == 0b0001.
+
+HWCAP_SHA512
+
+ Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0002.
+
+HWCAP_SVE
+
+ Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001.
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index d7273a5f6456..671bc0639262 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -86,9 +86,9 @@ Translation table lookup with 64KB pages:
+-------------------------------------------------> [63] TTBR0/1
-When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
-offset from the kernel VA (top 24bits of the kernel VA set to zero):
+When using KVM without the Virtualization Host Extensions, the hypervisor
+maps kernel pages in EL2 at a fixed offset from the kernel VA. See the
+kern_hyp_va macro for more details.
-Start End Size Use
------------------------------------------------------------------------
-0000004000000000 0000007fffffffff 256GB kernel objects mapped in HYP
+When using KVM with the Virtualization Host Extensions, no additional
+mappings are created, since the host kernel runs directly in EL2.
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 66e8ce14d23d..304bf22bb83c 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -70,6 +70,7 @@ stable kernels.
| | | | |
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
+| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
| | | | |
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt
new file mode 100644
index 000000000000..f128f736b4a5
--- /dev/null
+++ b/Documentation/arm64/sve.txt
@@ -0,0 +1,508 @@
+ Scalable Vector Extension support for AArch64 Linux
+ ===================================================
+
+Author: Dave Martin <Dave.Martin@arm.com>
+Date: 4 August 2017
+
+This document outlines briefly the interface provided to userspace by Linux in
+order to support use of the ARM Scalable Vector Extension (SVE).
+
+This is an outline of the most important features and issues only and not
+intended to be exhaustive.
+
+This document does not aim to describe the SVE architecture or programmer's
+model. To aid understanding, a minimal description of relevant programmer's
+model features for SVE is included in Appendix A.
+
+
+1. General
+-----------
+
+* SVE registers Z0..Z31, P0..P15 and FFR and the current vector length VL, are
+ tracked per-thread.
+
+* The presence of SVE is reported to userspace via HWCAP_SVE in the aux vector
+ AT_HWCAP entry. Presence of this flag implies the presence of the SVE
+ instructions and registers, and the Linux-specific system interfaces
+ described in this document. SVE is reported in /proc/cpuinfo as "sve".
+
+* Support for the execution of SVE instructions in userspace can also be
+ detected by reading the CPU ID register ID_AA64PFR0_EL1 using an MRS
+ instruction, and checking that the value of the SVE field is nonzero. [3]
+
+ It does not guarantee the presence of the system interfaces described in the
+ following sections: software that needs to verify that those interfaces are
+ present must check for HWCAP_SVE instead.
+
+* Debuggers should restrict themselves to interacting with the target via the
+ NT_ARM_SVE regset. The recommended way of detecting support for this regset
+ is to connect to a target process first and then attempt a
+ ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov).
+
+
+2. Vector length terminology
+-----------------------------
+
+The size of an SVE vector (Z) register is referred to as the "vector length".
+
+To avoid confusion about the units used to express vector length, the kernel
+adopts the following conventions:
+
+* Vector length (VL) = size of a Z-register in bytes
+
+* Vector quadwords (VQ) = size of a Z-register in units of 128 bits
+
+(So, VL = 16 * VQ.)
+
+The VQ convention is used where the underlying granularity is important, such
+as in data structure definitions. In most other situations, the VL convention
+is used. This is consistent with the meaning of the "VL" pseudo-register in
+the SVE instruction set architecture.
+
+
+3. System call behaviour
+-------------------------
+
+* On syscall, V0..V31 are preserved (as without SVE). Thus, bits [127:0] of
+ Z0..Z31 are preserved. All other bits of Z0..Z31, and all of P0..P15 and FFR
+ become unspecified on return from a syscall.
+
+* The SVE registers are not used to pass arguments to or receive results from
+ any syscall.
+
+* In practice the affected registers/bits will be preserved or will be replaced
+ with zeros on return from a syscall, but userspace should not make
+ assumptions about this. The kernel behaviour may vary on a case-by-case
+ basis.
+
+* All other SVE state of a thread, including the currently configured vector
+ length, the state of the PR_SVE_VL_INHERIT flag, and the deferred vector
+ length (if any), is preserved across all syscalls, subject to the specific
+ exceptions for execve() described in section 6.
+
+ In particular, on return from a fork() or clone(), the parent and new child
+ process or thread share identical SVE configuration, matching that of the
+ parent before the call.
+
+
+4. Signal handling
+-------------------
+
+* A new signal frame record sve_context encodes the SVE registers on signal
+ delivery. [1]
+
+* This record is supplementary to fpsimd_context. The FPSR and FPCR registers
+ are only present in fpsimd_context. For convenience, the content of V0..V31
+ is duplicated between sve_context and fpsimd_context.
+
+* The signal frame record for SVE always contains basic metadata, in particular
+ the thread's vector length (in sve_context.vl).
+
+* The SVE registers may or may not be included in the record, depending on
+ whether the registers are live for the thread. The registers are present if
+ and only if:
+ sve_context.head.size >= SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)).
+
+* If the registers are present, the remainder of the record has a vl-dependent
+ size and layout. Macros SVE_SIG_* are defined [1] to facilitate access to
+ the members.
+
+* If the SVE context is too big to fit in sigcontext.__reserved[], then extra
+ space is allocated on the stack, an extra_context record is written in
+ __reserved[] referencing this space. sve_context is then written in the
+ extra space. Refer to [1] for further details about this mechanism.
+
+
+5. Signal return
+-----------------
+
+When returning from a signal handler:
+
+* If there is no sve_context record in the signal frame, or if the record is
+ present but contains no register data as desribed in the previous section,
+ then the SVE registers/bits become non-live and take unspecified values.
+
+* If sve_context is present in the signal frame and contains full register
+ data, the SVE registers become live and are populated with the specified
+ data. However, for backward compatibility reasons, bits [127:0] of Z0..Z31
+ are always restored from the corresponding members of fpsimd_context.vregs[]
+ and not from sve_context. The remaining bits are restored from sve_context.
+
+* Inclusion of fpsimd_context in the signal frame remains mandatory,
+ irrespective of whether sve_context is present or not.
+
+* The vector length cannot be changed via signal return. If sve_context.vl in
+ the signal frame does not match the current vector length, the signal return
+ attempt is treated as illegal, resulting in a forced SIGSEGV.
+
+
+6. prctl extensions
+--------------------
+
+Some new prctl() calls are added to allow programs to manage the SVE vector
+length:
+
+prctl(PR_SVE_SET_VL, unsigned long arg)
+
+ Sets the vector length of the calling thread and related flags, where
+ arg == vl | flags. Other threads of the calling process are unaffected.
+
+ vl is the desired vector length, where sve_vl_valid(vl) must be true.
+
+ flags:
+
+ PR_SVE_SET_VL_INHERIT
+
+ Inherit the current vector length across execve(). Otherwise, the
+ vector length is reset to the system default at execve(). (See
+ Section 9.)
+
+ PR_SVE_SET_VL_ONEXEC
+
+ Defer the requested vector length change until the next execve()
+ performed by this thread.
+
+ The effect is equivalent to implicit exceution of the following
+ call immediately after the next execve() (if any) by the thread:
+
+ prctl(PR_SVE_SET_VL, arg & ~PR_SVE_SET_VL_ONEXEC)
+
+ This allows launching of a new program with a different vector
+ length, while avoiding runtime side effects in the caller.
+
+
+ Without PR_SVE_SET_VL_ONEXEC, the requested change takes effect
+ immediately.
+
+
+ Return value: a nonnegative on success, or a negative value on error:
+ EINVAL: SVE not supported, invalid vector length requested, or
+ invalid flags.
+
+
+ On success:
+
+ * Either the calling thread's vector length or the deferred vector length
+ to be applied at the next execve() by the thread (dependent on whether
+ PR_SVE_SET_VL_ONEXEC is present in arg), is set to the largest value
+ supported by the system that is less than or equal to vl. If vl ==
+ SVE_VL_MAX, the value set will be the largest value supported by the
+ system.
+
+ * Any previously outstanding deferred vector length change in the calling
+ thread is cancelled.
+
+ * The returned value describes the resulting configuration, encoded as for
+ PR_SVE_GET_VL. The vector length reported in this value is the new
+ current vector length for this thread if PR_SVE_SET_VL_ONEXEC was not
+ present in arg; otherwise, the reported vector length is the deferred
+ vector length that will be applied at the next execve() by the calling
+ thread.
+
+ * Changing the vector length causes all of P0..P15, FFR and all bits of
+ Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
+ unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current
+ vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC
+ flag, does not constitute a change to the vector length for this purpose.
+
+
+prctl(PR_SVE_GET_VL)
+
+ Gets the vector length of the calling thread.
+
+ The following flag may be OR-ed into the result:
+
+ PR_SVE_SET_VL_INHERIT
+
+ Vector length will be inherited across execve().
+
+ There is no way to determine whether there is an outstanding deferred
+ vector length change (which would only normally be the case between a
+ fork() or vfork() and the corresponding execve() in typical use).
+
+ To extract the vector length from the result, and it with
+ PR_SVE_VL_LEN_MASK.
+
+ Return value: a nonnegative value on success, or a negative value on error:
+ EINVAL: SVE not supported.
+
+
+7. ptrace extensions
+---------------------
+
+* A new regset NT_ARM_SVE is defined for use with PTRACE_GETREGSET and
+ PTRACE_SETREGSET.
+
+ Refer to [2] for definitions.
+
+The regset data starts with struct user_sve_header, containing:
+
+ size
+
+ Size of the complete regset, in bytes.
+ This depends on vl and possibly on other things in the future.
+
+ If a call to PTRACE_GETREGSET requests less data than the value of
+ size, the caller can allocate a larger buffer and retry in order to
+ read the complete regset.
+
+ max_size
+
+ Maximum size in bytes that the regset can grow to for the target
+ thread. The regset won't grow bigger than this even if the target
+ thread changes its vector length etc.
+
+ vl
+
+ Target thread's current vector length, in bytes.
+
+ max_vl
+
+ Maximum possible vector length for the target thread.
+
+ flags
+
+ either
+
+ SVE_PT_REGS_FPSIMD
+
+ SVE registers are not live (GETREGSET) or are to be made
+ non-live (SETREGSET).
+
+ The payload is of type struct user_fpsimd_state, with the same
+ meaning as for NT_PRFPREG, starting at offset
+ SVE_PT_FPSIMD_OFFSET from the start of user_sve_header.
+
+ Extra data might be appended in the future: the size of the
+ payload should be obtained using SVE_PT_FPSIMD_SIZE(vq, flags).
+
+ vq should be obtained using sve_vq_from_vl(vl).
+
+ or
+
+ SVE_PT_REGS_SVE
+
+ SVE registers are live (GETREGSET) or are to be made live
+ (SETREGSET).
+
+ The payload contains the SVE register data, starting at offset
+ SVE_PT_SVE_OFFSET from the start of user_sve_header, and with
+ size SVE_PT_SVE_SIZE(vq, flags);
+
+ ... OR-ed with zero or more of the following flags, which have the same
+ meaning and behaviour as the corresponding PR_SET_VL_* flags:
+
+ SVE_PT_VL_INHERIT
+
+ SVE_PT_VL_ONEXEC (SETREGSET only).
+
+* The effects of changing the vector length and/or flags are equivalent to
+ those documented for PR_SVE_SET_VL.
+
+ The caller must make a further GETREGSET call if it needs to know what VL is
+ actually set by SETREGSET, unless is it known in advance that the requested
+ VL is supported.
+
+* In the SVE_PT_REGS_SVE case, the size and layout of the payload depends on
+ the header fields. The SVE_PT_SVE_*() macros are provided to facilitate
+ access to the members.
+
+* In either case, for SETREGSET it is permissible to omit the payload, in which
+ case only the vector length and flags are changed (along with any
+ consequences of those changes).
+
+* For SETREGSET, if an SVE_PT_REGS_SVE payload is present and the
+ requested VL is not supported, the effect will be the same as if the
+ payload were omitted, except that an EIO error is reported. No
+ attempt is made to translate the payload data to the correct layout
+ for the vector length actually set. The thread's FPSIMD state is
+ preserved, but the remaining bits of the SVE registers become
+ unspecified. It is up to the caller to translate the payload layout
+ for the actual VL and retry.
+
+* The effect of writing a partial, incomplete payload is unspecified.
+
+
+8. ELF coredump extensions
+---------------------------
+
+* A NT_ARM_SVE note will be added to each coredump for each thread of the
+ dumped process. The contents will be equivalent to the data that would have
+ been read if a PTRACE_GETREGSET of NT_ARM_SVE were executed for each thread
+ when the coredump was generated.
+
+
+9. System runtime configuration
+--------------------------------
+
+* To mitigate the ABI impact of expansion of the signal frame, a policy
+ mechanism is provided for administrators, distro maintainers and developers
+ to set the default vector length for userspace processes:
+
+/proc/sys/abi/sve_default_vector_length
+
+ Writing the text representation of an integer to this file sets the system
+ default vector length to the specified value, unless the value is greater
+ than the maximum vector length supported by the system in which case the
+ default vector length is set to that maximum.
+
+ The result can be determined by reopening the file and reading its
+ contents.
+
+ At boot, the default vector length is initially set to 64 or the maximum
+ supported vector length, whichever is smaller. This determines the initial
+ vector length of the init process (PID 1).
+
+ Reading this file returns the current system default vector length.
+
+* At every execve() call, the new vector length of the new process is set to
+ the system default vector length, unless
+
+ * PR_SVE_SET_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the
+ calling thread, or
+
+ * a deferred vector length change is pending, established via the
+ PR_SVE_SET_VL_ONEXEC flag (or SVE_PT_VL_ONEXEC).
+
+* Modifying the system default vector length does not affect the vector length
+ of any existing process or thread that does not make an execve() call.
+
+
+Appendix A. SVE programmer's model (informative)
+=================================================
+
+This section provides a minimal description of the additions made by SVE to the
+ARMv8-A programmer's model that are relevant to this document.
+
+Note: This section is for information only and not intended to be complete or
+to replace any architectural specification.
+
+A.1. Registers
+---------------
+
+In A64 state, SVE adds the following:
+
+* 32 8VL-bit vector registers Z0..Z31
+ For each Zn, Zn bits [127:0] alias the ARMv8-A vector register Vn.
+
+ A register write using a Vn register name zeros all bits of the corresponding
+ Zn except for bits [127:0].
+
+* 16 VL-bit predicate registers P0..P15
+
+* 1 VL-bit special-purpose predicate register FFR (the "first-fault register")
+
+* a VL "pseudo-register" that determines the size of each vector register
+
+ The SVE instruction set architecture provides no way to write VL directly.
+ Instead, it can be modified only by EL1 and above, by writing appropriate
+ system registers.
+
+* The value of VL can be configured at runtime by EL1 and above:
+ 16 <= VL <= VLmax, where VL must be a multiple of 16.
+
+* The maximum vector length is determined by the hardware:
+ 16 <= VLmax <= 256.
+
+ (The SVE architecture specifies 256, but permits future architecture
+ revisions to raise this limit.)
+
+* FPSR and FPCR are retained from ARMv8-A, and interact with SVE floating-point
+ operations in a similar way to the way in which they interact with ARMv8
+ floating-point operations.
+
+ 8VL-1 128 0 bit index
+ +---- //// -----------------+
+ Z0 | : V0 |
+ : :
+ Z7 | : V7 |
+ Z8 | : * V8 |
+ : : :
+ Z15 | : *V15 |
+ Z16 | : V16 |
+ : :
+ Z31 | : V31 |
+ +---- //// -----------------+
+ 31 0
+ VL-1 0 +-------+
+ +---- //// --+ FPSR | |
+ P0 | | +-------+
+ : | | *FPCR | |
+ P15 | | +-------+
+ +---- //// --+
+ FFR | | +-----+
+ +---- //// --+ VL | |
+ +-----+
+
+(*) callee-save:
+ This only applies to bits [63:0] of Z-/V-registers.
+ FPCR contains callee-save and caller-save bits. See [4] for details.
+
+
+A.2. Procedure call standard
+-----------------------------
+
+The ARMv8-A base procedure call standard is extended as follows with respect to
+the additional SVE register state:
+
+* All SVE register bits that are not shared with FP/SIMD are caller-save.
+
+* Z8 bits [63:0] .. Z15 bits [63:0] are callee-save.
+
+ This follows from the way these bits are mapped to V8..V15, which are caller-
+ save in the base procedure call standard.
+
+
+Appendix B. ARMv8-A FP/SIMD programmer's model
+===============================================
+
+Note: This section is for information only and not intended to be complete or
+to replace any architectural specification.
+
+Refer to [4] for for more information.
+
+ARMv8-A defines the following floating-point / SIMD register state:
+
+* 32 128-bit vector registers V0..V31
+* 2 32-bit status/control registers FPSR, FPCR
+
+ 127 0 bit index
+ +---------------+
+ V0 | |
+ : : :
+ V7 | |
+ * V8 | |
+ : : : :
+ *V15 | |
+ V16 | |
+ : : :
+ V31 | |
+ +---------------+
+
+ 31 0
+ +-------+
+ FPSR | |
+ +-------+
+ *FPCR | |
+ +-------+
+
+(*) callee-save:
+ This only applies to bits [63:0] of V-registers.
+ FPCR contains a mixture of callee-save and caller-save bits.
+
+
+References
+==========
+
+[1] arch/arm64/include/uapi/asm/sigcontext.h
+ AArch64 Linux signal ABI definitions
+
+[2] arch/arm64/include/uapi/asm/ptrace.h
+ AArch64 Linux ptrace ABI definitions
+
+[3] linux/Documentation/arm64/cpu-feature-registers.txt
+
+[4] ARM IHI0055C
+ http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf
+ http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
+ Procedure Call Standard for the ARM 64-bit Architecture (AArch64)
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 9490f2845f06..86927029a52d 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -216,10 +216,9 @@ may need to abort DMA operations and revert to PIO for the transfer, in
which case a virtual mapping of the page is required. For SCSI it is also
done in some scenarios where the low level driver cannot be trusted to
handle a single sg entry correctly. The driver is expected to perform the
-kmaps as needed on such occasions using the __bio_kmap_atomic and bio_kmap_irq
-routines as appropriate. A driver could also use the blk_queue_bounce()
-routine on its own to bounce highmem i/o to low memory for specific requests
-if so desired.
+kmaps as needed on such occasions as appropriate. A driver could also use
+the blk_queue_bounce() routine on its own to bounce highmem i/o to low
+memory for specific requests if so desired.
iii. The i/o scheduler algorithm itself can be replaced/set as appropriate
@@ -1137,8 +1136,8 @@ use dma_map_sg for scatter gather) to be able to ship it to the driver. For
PIO drivers (or drivers that need to revert to PIO transfer once in a
while (IDE for example)), where the CPU is doing the actual data
transfer a virtual mapping is needed. If the driver supports highmem I/O,
-(Sec 1.1, (ii) ) it needs to use __bio_kmap_atomic and bio_kmap_irq to
-temporarily map a bio into the virtual address space.
+(Sec 1.1, (ii) ) it needs to use kmap_atomic or similar to temporarily map
+a bio into the virtual address space.
8. Prior/Related/Impacted patches
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index 3140dbd860d8..733927a7b501 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -38,7 +38,7 @@ gb=[Size in GB]: Default: 250GB
bs=[Block size (in bytes)]: Default: 512 bytes
The block size reported to the system.
-nr_devices=[Number of devices]: Default: 2
+nr_devices=[Number of devices]: Default: 1
Number of block devices instantiated. They are instantiated as /dev/nullb0,
etc.
@@ -52,13 +52,13 @@ irqmode=[0-2]: Default: 1-Soft-irq
2: Timer: Waits a specific period (completion_nsec) for each IO before
completion.
-completion_nsec=[ns]: Default: 10.000ns
+completion_nsec=[ns]: Default: 10,000ns
Combined with irqmode=2 (timer). The time each completion event must wait.
-submit_queues=[0..nr_cpus]:
+submit_queues=[1..nr_cpus]:
The number of submission queues attached to the device driver. If unset, it
- defaults to 1 on single-queue and bio-based instances. For multi-queue,
- it is ignored when use_per_node_hctx module parameter is 1.
+ defaults to 1. For multi-queue, it is ignored when use_per_node_hctx module
+ parameter is 1.
hw_queue_depth=[0..qdepth]: Default: 64
The hardware queue depth of the device.
@@ -73,3 +73,12 @@ use_per_node_hctx=[0/1]: Default: 0
use_lightnvm=[0/1]: Default: 0
Register device with LightNVM. Requires blk-mq and CONFIG_NVM to be enabled.
+
+no_sched=[0/1]: Default: 0
+ 0: nullb* use default blk-mq io scheduler.
+ 1: nullb* doesn't use io scheduler.
+
+shared_tags=[0/1]: Default: 0
+ 0: Tag set is not shared.
+ 1: Tag set shared between devices for blk-mq. Only makes sense with
+ nr_devices > 1, otherwise there's no tag set to share.
diff --git a/Documentation/cdrom/ide-cd b/Documentation/cdrom/ide-cd
index f4dc9de2694e..a5f2a7f1ff46 100644
--- a/Documentation/cdrom/ide-cd
+++ b/Documentation/cdrom/ide-cd
@@ -55,13 +55,9 @@ This driver provides the following features:
(to compile support as a module which can be loaded and unloaded)
to the options:
- Enhanced IDE/MFM/RLL disk/cdrom/tape/floppy support
+ ATA/ATAPI/MFM/RLL support
Include IDE/ATAPI CDROM support
- and `no' to
-
- Use old disk-only driver on primary interface
-
Depending on what type of IDE interface you have, you may need to
specify additional configuration options. See
Documentation/ide/ide.txt.
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index dc44785dc0fa..779211fbb69f 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -893,10 +893,6 @@ Controllers
CPU
---
-.. note::
-
- The interface for the cpu controller hasn't been merged yet
-
The "cpu" controllers regulates distribution of CPU cycles. This
controller implements weight and absolute bandwidth limit models for
normal scheduling policy and absolute bandwidth allocation model for
@@ -910,12 +906,16 @@ All time durations are in microseconds.
cpu.stat
A read-only flat-keyed file which exists on non-root cgroups.
+ This file exists whether the controller is enabled or not.
- It reports the following six stats:
+ It always reports the following three stats:
- usage_usec
- user_usec
- system_usec
+
+ and the following three when the controller is enabled:
+
- nr_periods
- nr_throttled
- throttled_usec
@@ -926,6 +926,18 @@ All time durations are in microseconds.
The weight in the range [1, 10000].
+ cpu.weight.nice
+ A read-write single value file which exists on non-root
+ cgroups. The default is "0".
+
+ The nice value is in the range [-20, 19].
+
+ This interface file is an alternative interface for
+ "cpu.weight" and allows reading and setting weight using the
+ same values used by nice(2). Because the range is smaller and
+ granularity is coarser for the nice values, the read value is
+ the closest approximation of the current weight.
+
cpu.max
A read-write two value file which exists on non-root cgroups.
The default is "max 100000".
@@ -938,26 +950,6 @@ All time durations are in microseconds.
$PERIOD duration. "max" for $MAX indicates no limit. If only
one number is written, $MAX is updated.
- cpu.rt.max
- .. note::
-
- The semantics of this file is still under discussion and the
- interface hasn't been merged yet
-
- A read-write two value file which exists on all cgroups.
- The default is "0 100000".
-
- The maximum realtime runtime allocation. Over-committing
- configurations are disallowed and process migrations are
- rejected if not enough bandwidth is available. It's in the
- following format::
-
- $MAX $PERIOD
-
- which indicates that the group may consume upto $MAX in each
- $PERIOD duration. If only one number is written, $MAX is
- updated.
-
Memory
------
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 5da10184d908..2d9da6c40a4d 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -2,11 +2,9 @@
The Linux Kernel API
====================
-Data Types
-==========
-Doubly Linked Lists
--------------------
+List Management Functions
+=========================
.. kernel-doc:: include/linux/list.h
:internal:
@@ -56,11 +54,26 @@ Bitmap Operations
-----------------
.. kernel-doc:: lib/bitmap.c
+ :doc: bitmap introduction
+
+.. kernel-doc:: include/linux/bitmap.h
+ :doc: declare bitmap
+
+.. kernel-doc:: include/linux/bitmap.h
+ :doc: bitmap overview
+
+.. kernel-doc:: include/linux/bitmap.h
+ :doc: bitmap bitops
+
+.. kernel-doc:: lib/bitmap.c
:export:
.. kernel-doc:: lib/bitmap.c
:internal:
+.. kernel-doc:: include/linux/bitmap.h
+ :internal:
+
Command-line Parsing
--------------------
@@ -70,13 +83,16 @@ Command-line Parsing
CRC Functions
-------------
+.. kernel-doc:: lib/crc4.c
+ :export:
+
.. kernel-doc:: lib/crc7.c
:export:
-.. kernel-doc:: lib/crc16.c
+.. kernel-doc:: lib/crc8.c
:export:
-.. kernel-doc:: lib/crc-itu-t.c
+.. kernel-doc:: lib/crc16.c
:export:
.. kernel-doc:: lib/crc32.c
@@ -84,6 +100,9 @@ CRC Functions
.. kernel-doc:: lib/crc-ccitt.c
:export:
+.. kernel-doc:: lib/crc-itu-t.c
+ :export:
+
idr/ida Functions
-----------------
@@ -96,6 +115,30 @@ idr/ida Functions
.. kernel-doc:: lib/idr.c
:export:
+Math Functions in Linux
+=======================
+
+Base 2 log and power Functions
+------------------------------
+
+.. kernel-doc:: include/linux/log2.h
+ :internal:
+
+Division Functions
+------------------
+
+.. kernel-doc:: include/asm-generic/div64.h
+ :functions: do_div
+
+.. kernel-doc:: include/linux/math64.h
+ :internal:
+
+.. kernel-doc:: lib/div64.c
+ :functions: div_s64_rem div64_u64_rem div64_u64 div64_s64
+
+.. kernel-doc:: lib/gcd.c
+ :export:
+
Memory Management in Linux
==========================
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index 2bbe207354ed..a873855c811d 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -90,6 +90,9 @@ Freq_i to Freq_j. Freq_i is in descending order with increasing rows and
Freq_j is in descending order with increasing columns. The output here also
contains the actual freq values for each row and column for better readability.
+If the transition table is bigger than PAGE_SIZE, reading this will
+return an -EFBIG error.
+
--------------------------------------------------------------------------------
<mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # cat trans_table
From : To
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 2531948db89f..006827e30d06 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -7,59 +7,27 @@ Code Example For Symmetric Key Cipher Operation
::
- struct tcrypt_result {
- struct completion completion;
- int err;
- };
-
/* tie all data structures together */
struct skcipher_def {
struct scatterlist sg;
struct crypto_skcipher *tfm;
struct skcipher_request *req;
- struct tcrypt_result result;
+ struct crypto_wait wait;
};
- /* Callback function */
- static void test_skcipher_cb(struct crypto_async_request *req, int error)
- {
- struct tcrypt_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
- result->err = error;
- complete(&result->completion);
- pr_info("Encryption finished successfully\n");
- }
-
/* Perform cipher operation */
static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
int enc)
{
- int rc = 0;
+ int rc;
if (enc)
- rc = crypto_skcipher_encrypt(sk->req);
+ rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait);
else
- rc = crypto_skcipher_decrypt(sk->req);
-
- switch (rc) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- rc = wait_for_completion_interruptible(
- &sk->result.completion);
- if (!rc && !sk->result.err) {
- reinit_completion(&sk->result.completion);
- break;
- }
- default:
- pr_info("skcipher encrypt returned with %d result %d\n",
- rc, sk->result.err);
- break;
- }
- init_completion(&sk->result.completion);
+ rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait);
+
+ if (rc)
+ pr_info("skcipher encrypt returned with result %d\n", rc);
return rc;
}
@@ -89,8 +57,8 @@ Code Example For Symmetric Key Cipher Operation
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- test_skcipher_cb,
- &sk.result);
+ crypto_req_done,
+ &sk.wait);
/* AES 256 with random key */
get_random_bytes(&key, 32);
@@ -122,7 +90,7 @@ Code Example For Symmetric Key Cipher Operation
/* We encrypt one block */
sg_init_one(&sk.sg, scratchpad, 16);
skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
- init_completion(&sk.result.completion);
+ crypto_init_wait(&sk.wait);
/* encrypt data */
ret = test_skcipher_encdec(&sk, 1);
diff --git a/Documentation/dev-tools/coccinelle.rst b/Documentation/dev-tools/coccinelle.rst
index 4a64b4c69d3f..37e474ff6911 100644
--- a/Documentation/dev-tools/coccinelle.rst
+++ b/Documentation/dev-tools/coccinelle.rst
@@ -209,7 +209,7 @@ err.log will now have the profiling information, while stdout will
provide some progress information as Coccinelle moves forward with
work.
-DEBUG_FILE support is only supported when using coccinelle >= 1.2.
+DEBUG_FILE support is only supported when using coccinelle >= 1.0.2.
.cocciconfig support
--------------------
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index a81787cd47d7..e313925fb0fa 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -21,7 +21,6 @@ whole; patches welcome!
kasan
ubsan
kmemleak
- kmemcheck
gdb-kernel-debugging
kgdb
kselftest
diff --git a/Documentation/dev-tools/kmemcheck.rst b/Documentation/dev-tools/kmemcheck.rst
deleted file mode 100644
index 7f3d1985de74..000000000000
--- a/Documentation/dev-tools/kmemcheck.rst
+++ /dev/null
@@ -1,733 +0,0 @@
-Getting started with kmemcheck
-==============================
-
-Vegard Nossum <vegardno@ifi.uio.no>
-
-
-Introduction
-------------
-
-kmemcheck is a debugging feature for the Linux Kernel. More specifically, it
-is a dynamic checker that detects and warns about some uses of uninitialized
-memory.
-
-Userspace programmers might be familiar with Valgrind's memcheck. The main
-difference between memcheck and kmemcheck is that memcheck works for userspace
-programs only, and kmemcheck works for the kernel only. The implementations
-are of course vastly different. Because of this, kmemcheck is not as accurate
-as memcheck, but it turns out to be good enough in practice to discover real
-programmer errors that the compiler is not able to find through static
-analysis.
-
-Enabling kmemcheck on a kernel will probably slow it down to the extent that
-the machine will not be usable for normal workloads such as e.g. an
-interactive desktop. kmemcheck will also cause the kernel to use about twice
-as much memory as normal. For this reason, kmemcheck is strictly a debugging
-feature.
-
-
-Downloading
------------
-
-As of version 2.6.31-rc1, kmemcheck is included in the mainline kernel.
-
-
-Configuring and compiling
--------------------------
-
-kmemcheck only works for the x86 (both 32- and 64-bit) platform. A number of
-configuration variables must have specific settings in order for the kmemcheck
-menu to even appear in "menuconfig". These are:
-
-- ``CONFIG_CC_OPTIMIZE_FOR_SIZE=n``
- This option is located under "General setup" / "Optimize for size".
-
- Without this, gcc will use certain optimizations that usually lead to
- false positive warnings from kmemcheck. An example of this is a 16-bit
- field in a struct, where gcc may load 32 bits, then discard the upper
- 16 bits. kmemcheck sees only the 32-bit load, and may trigger a
- warning for the upper 16 bits (if they're uninitialized).
-
-- ``CONFIG_SLAB=y`` or ``CONFIG_SLUB=y``
- This option is located under "General setup" / "Choose SLAB
- allocator".
-
-- ``CONFIG_FUNCTION_TRACER=n``
- This option is located under "Kernel hacking" / "Tracers" / "Kernel
- Function Tracer"
-
- When function tracing is compiled in, gcc emits a call to another
- function at the beginning of every function. This means that when the
- page fault handler is called, the ftrace framework will be called
- before kmemcheck has had a chance to handle the fault. If ftrace then
- modifies memory that was tracked by kmemcheck, the result is an
- endless recursive page fault.
-
-- ``CONFIG_DEBUG_PAGEALLOC=n``
- This option is located under "Kernel hacking" / "Memory Debugging"
- / "Debug page memory allocations".
-
-In addition, I highly recommend turning on ``CONFIG_DEBUG_INFO=y``. This is also
-located under "Kernel hacking". With this, you will be able to get line number
-information from the kmemcheck warnings, which is extremely valuable in
-debugging a problem. This option is not mandatory, however, because it slows
-down the compilation process and produces a much bigger kernel image.
-
-Now the kmemcheck menu should be visible (under "Kernel hacking" / "Memory
-Debugging" / "kmemcheck: trap use of uninitialized memory"). Here follows
-a description of the kmemcheck configuration variables:
-
-- ``CONFIG_KMEMCHECK``
- This must be enabled in order to use kmemcheck at all...
-
-- ``CONFIG_KMEMCHECK_``[``DISABLED`` | ``ENABLED`` | ``ONESHOT``]``_BY_DEFAULT``
- This option controls the status of kmemcheck at boot-time. "Enabled"
- will enable kmemcheck right from the start, "disabled" will boot the
- kernel as normal (but with the kmemcheck code compiled in, so it can
- be enabled at run-time after the kernel has booted), and "one-shot" is
- a special mode which will turn kmemcheck off automatically after
- detecting the first use of uninitialized memory.
-
- If you are using kmemcheck to actively debug a problem, then you
- probably want to choose "enabled" here.
-
- The one-shot mode is mostly useful in automated test setups because it
- can prevent floods of warnings and increase the chances of the machine
- surviving in case something is really wrong. In other cases, the one-
- shot mode could actually be counter-productive because it would turn
- itself off at the very first error -- in the case of a false positive
- too -- and this would come in the way of debugging the specific
- problem you were interested in.
-
- If you would like to use your kernel as normal, but with a chance to
- enable kmemcheck in case of some problem, it might be a good idea to
- choose "disabled" here. When kmemcheck is disabled, most of the run-
- time overhead is not incurred, and the kernel will be almost as fast
- as normal.
-
-- ``CONFIG_KMEMCHECK_QUEUE_SIZE``
- Select the maximum number of error reports to store in an internal
- (fixed-size) buffer. Since errors can occur virtually anywhere and in
- any context, we need a temporary storage area which is guaranteed not
- to generate any other page faults when accessed. The queue will be
- emptied as soon as a tasklet may be scheduled. If the queue is full,
- new error reports will be lost.
-
- The default value of 64 is probably fine. If some code produces more
- than 64 errors within an irqs-off section, then the code is likely to
- produce many, many more, too, and these additional reports seldom give
- any more information (the first report is usually the most valuable
- anyway).
-
- This number might have to be adjusted if you are not using serial
- console or similar to capture the kernel log. If you are using the
- "dmesg" command to save the log, then getting a lot of kmemcheck
- warnings might overflow the kernel log itself, and the earlier reports
- will get lost in that way instead. Try setting this to 10 or so on
- such a setup.
-
-- ``CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT``
- Select the number of shadow bytes to save along with each entry of the
- error-report queue. These bytes indicate what parts of an allocation
- are initialized, uninitialized, etc. and will be displayed when an
- error is detected to help the debugging of a particular problem.
-
- The number entered here is actually the logarithm of the number of
- bytes that will be saved. So if you pick for example 5 here, kmemcheck
- will save 2^5 = 32 bytes.
-
- The default value should be fine for debugging most problems. It also
- fits nicely within 80 columns.
-
-- ``CONFIG_KMEMCHECK_PARTIAL_OK``
- This option (when enabled) works around certain GCC optimizations that
- produce 32-bit reads from 16-bit variables where the upper 16 bits are
- thrown away afterwards.
-
- The default value (enabled) is recommended. This may of course hide
- some real errors, but disabling it would probably produce a lot of
- false positives.
-
-- ``CONFIG_KMEMCHECK_BITOPS_OK``
- This option silences warnings that would be generated for bit-field
- accesses where not all the bits are initialized at the same time. This
- may also hide some real bugs.
-
- This option is probably obsolete, or it should be replaced with
- the kmemcheck-/bitfield-annotations for the code in question. The
- default value is therefore fine.
-
-Now compile the kernel as usual.
-
-
-How to use
-----------
-
-Booting
-~~~~~~~
-
-First some information about the command-line options. There is only one
-option specific to kmemcheck, and this is called "kmemcheck". It can be used
-to override the default mode as chosen by the ``CONFIG_KMEMCHECK_*_BY_DEFAULT``
-option. Its possible settings are:
-
-- ``kmemcheck=0`` (disabled)
-- ``kmemcheck=1`` (enabled)
-- ``kmemcheck=2`` (one-shot mode)
-
-If SLUB debugging has been enabled in the kernel, it may take precedence over
-kmemcheck in such a way that the slab caches which are under SLUB debugging
-will not be tracked by kmemcheck. In order to ensure that this doesn't happen
-(even though it shouldn't by default), use SLUB's boot option ``slub_debug``,
-like this: ``slub_debug=-``
-
-In fact, this option may also be used for fine-grained control over SLUB vs.
-kmemcheck. For example, if the command line includes
-``kmemcheck=1 slub_debug=,dentry``, then SLUB debugging will be used only
-for the "dentry" slab cache, and with kmemcheck tracking all the other
-caches. This is advanced usage, however, and is not generally recommended.
-
-
-Run-time enable/disable
-~~~~~~~~~~~~~~~~~~~~~~~
-
-When the kernel has booted, it is possible to enable or disable kmemcheck at
-run-time. WARNING: This feature is still experimental and may cause false
-positive warnings to appear. Therefore, try not to use this. If you find that
-it doesn't work properly (e.g. you see an unreasonable amount of warnings), I
-will be happy to take bug reports.
-
-Use the file ``/proc/sys/kernel/kmemcheck`` for this purpose, e.g.::
-
- $ echo 0 > /proc/sys/kernel/kmemcheck # disables kmemcheck
-
-The numbers are the same as for the ``kmemcheck=`` command-line option.
-
-
-Debugging
-~~~~~~~~~
-
-A typical report will look something like this::
-
- WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
- 80000000000000000000000000000000000000000088ffff0000000000000000
- i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
- ^
-
- Pid: 1856, comm: ntpdate Not tainted 2.6.29-rc5 #264 945P-A
- RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
- RSP: 0018:ffff88003cdf7d98 EFLAGS: 00210002
- RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
- RDX: ffff88003e5d6018 RSI: ffff88003e5d6024 RDI: ffff88003cdf7e84
- RBP: ffff88003cdf7db8 R08: ffff88003e5d6000 R09: 0000000000000000
- R10: 0000000000000080 R11: 0000000000000000 R12: 000000000000000e
- R13: ffff88003cdf7e78 R14: ffff88003d530710 R15: ffff88003d5a98c8
- FS: 0000000000000000(0000) GS:ffff880001982000(0063) knlGS:00000
- CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033
- CR2: ffff88003f806ea0 CR3: 000000003c036000 CR4: 00000000000006a0
- DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
- DR3: 0000000000000000 DR6: 00000000ffff4ff0 DR7: 0000000000000400
- [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
- [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
- [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
- [<ffffffff8100c7b5>] int_signal+0x12/0x17
- [<ffffffffffffffff>] 0xffffffffffffffff
-
-The single most valuable information in this report is the RIP (or EIP on 32-
-bit) value. This will help us pinpoint exactly which instruction that caused
-the warning.
-
-If your kernel was compiled with ``CONFIG_DEBUG_INFO=y``, then all we have to do
-is give this address to the addr2line program, like this::
-
- $ addr2line -e vmlinux -i ffffffff8104ede8
- arch/x86/include/asm/string_64.h:12
- include/asm-generic/siginfo.h:287
- kernel/signal.c:380
- kernel/signal.c:410
-
-The "``-e vmlinux``" tells addr2line which file to look in. **IMPORTANT:**
-This must be the vmlinux of the kernel that produced the warning in the
-first place! If not, the line number information will almost certainly be
-wrong.
-
-The "``-i``" tells addr2line to also print the line numbers of inlined
-functions. In this case, the flag was very important, because otherwise,
-it would only have printed the first line, which is just a call to
-``memcpy()``, which could be called from a thousand places in the kernel, and
-is therefore not very useful. These inlined functions would not show up in
-the stack trace above, simply because the kernel doesn't load the extra
-debugging information. This technique can of course be used with ordinary
-kernel oopses as well.
-
-In this case, it's the caller of ``memcpy()`` that is interesting, and it can be
-found in ``include/asm-generic/siginfo.h``, line 287::
-
- 281 static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
- 282 {
- 283 if (from->si_code < 0)
- 284 memcpy(to, from, sizeof(*to));
- 285 else
- 286 /* _sigchld is currently the largest know union member */
- 287 memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
- 288 }
-
-Since this was a read (kmemcheck usually warns about reads only, though it can
-warn about writes to unallocated or freed memory as well), it was probably the
-"from" argument which contained some uninitialized bytes. Following the chain
-of calls, we move upwards to see where "from" was allocated or initialized,
-``kernel/signal.c``, line 380::
-
- 359 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
- 360 {
- ...
- 367 list_for_each_entry(q, &list->list, list) {
- 368 if (q->info.si_signo == sig) {
- 369 if (first)
- 370 goto still_pending;
- 371 first = q;
- ...
- 377 if (first) {
- 378 still_pending:
- 379 list_del_init(&first->list);
- 380 copy_siginfo(info, &first->info);
- 381 __sigqueue_free(first);
- ...
- 392 }
- 393 }
-
-Here, it is ``&first->info`` that is being passed on to ``copy_siginfo()``. The
-variable ``first`` was found on a list -- passed in as the second argument to
-``collect_signal()``. We continue our journey through the stack, to figure out
-where the item on "list" was allocated or initialized. We move to line 410::
-
- 395 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
- 396 siginfo_t *info)
- 397 {
- ...
- 410 collect_signal(sig, pending, info);
- ...
- 414 }
-
-Now we need to follow the ``pending`` pointer, since that is being passed on to
-``collect_signal()`` as ``list``. At this point, we've run out of lines from the
-"addr2line" output. Not to worry, we just paste the next addresses from the
-kmemcheck stack dump, i.e.::
-
- [<ffffffff8104f04e>] dequeue_signal+0x8e/0x170
- [<ffffffff81050bd8>] get_signal_to_deliver+0x98/0x390
- [<ffffffff8100b87d>] do_notify_resume+0xad/0x7d0
- [<ffffffff8100c7b5>] int_signal+0x12/0x17
-
- $ addr2line -e vmlinux -i ffffffff8104f04e ffffffff81050bd8 \
- ffffffff8100b87d ffffffff8100c7b5
- kernel/signal.c:446
- kernel/signal.c:1806
- arch/x86/kernel/signal.c:805
- arch/x86/kernel/signal.c:871
- arch/x86/kernel/entry_64.S:694
-
-Remember that since these addresses were found on the stack and not as the
-RIP value, they actually point to the _next_ instruction (they are return
-addresses). This becomes obvious when we look at the code for line 446::
-
- 422 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
- 423 {
- ...
- 431 signr = __dequeue_signal(&tsk->signal->shared_pending,
- 432 mask, info);
- 433 /*
- 434 * itimer signal ?
- 435 *
- 436 * itimers are process shared and we restart periodic
- 437 * itimers in the signal delivery path to prevent DoS
- 438 * attacks in the high resolution timer case. This is
- 439 * compliant with the old way of self restarting
- 440 * itimers, as the SIGALRM is a legacy signal and only
- 441 * queued once. Changing the restart behaviour to
- 442 * restart the timer in the signal dequeue path is
- 443 * reducing the timer noise on heavy loaded !highres
- 444 * systems too.
- 445 */
- 446 if (unlikely(signr == SIGALRM)) {
- ...
- 489 }
-
-So instead of looking at 446, we should be looking at 431, which is the line
-that executes just before 446. Here we see that what we are looking for is
-``&tsk->signal->shared_pending``.
-
-Our next task is now to figure out which function that puts items on this
-``shared_pending`` list. A crude, but efficient tool, is ``git grep``::
-
- $ git grep -n 'shared_pending' kernel/
- ...
- kernel/signal.c:828: pending = group ? &t->signal->shared_pending : &t->pending;
- kernel/signal.c:1339: pending = group ? &t->signal->shared_pending : &t->pending;
- ...
-
-There were more results, but none of them were related to list operations,
-and these were the only assignments. We inspect the line numbers more closely
-and find that this is indeed where items are being added to the list::
-
- 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
- 817 int group)
- 818 {
- ...
- 828 pending = group ? &t->signal->shared_pending : &t->pending;
- ...
- 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
- 852 (is_si_special(info) ||
- 853 info->si_code >= 0)));
- 854 if (q) {
- 855 list_add_tail(&q->list, &pending->list);
- ...
- 890 }
-
-and::
-
- 1309 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
- 1310 {
- ....
- 1339 pending = group ? &t->signal->shared_pending : &t->pending;
- 1340 list_add_tail(&q->list, &pending->list);
- ....
- 1347 }
-
-In the first case, the list element we are looking for, ``q``, is being
-returned from the function ``__sigqueue_alloc()``, which looks like an
-allocation function. Let's take a look at it::
-
- 187 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
- 188 int override_rlimit)
- 189 {
- 190 struct sigqueue *q = NULL;
- 191 struct user_struct *user;
- 192
- 193 /*
- 194 * We won't get problems with the target's UID changing under us
- 195 * because changing it requires RCU be used, and if t != current, the
- 196 * caller must be holding the RCU readlock (by way of a spinlock) and
- 197 * we use RCU protection here
- 198 */
- 199 user = get_uid(__task_cred(t)->user);
- 200 atomic_inc(&user->sigpending);
- 201 if (override_rlimit ||
- 202 atomic_read(&user->sigpending) <=
- 203 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
- 204 q = kmem_cache_alloc(sigqueue_cachep, flags);
- 205 if (unlikely(q == NULL)) {
- 206 atomic_dec(&user->sigpending);
- 207 free_uid(user);
- 208 } else {
- 209 INIT_LIST_HEAD(&q->list);
- 210 q->flags = 0;
- 211 q->user = user;
- 212 }
- 213
- 214 return q;
- 215 }
-
-We see that this function initializes ``q->list``, ``q->flags``, and
-``q->user``. It seems that now is the time to look at the definition of
-``struct sigqueue``, e.g.::
-
- 14 struct sigqueue {
- 15 struct list_head list;
- 16 int flags;
- 17 siginfo_t info;
- 18 struct user_struct *user;
- 19 };
-
-And, you might remember, it was a ``memcpy()`` on ``&first->info`` that
-caused the warning, so this makes perfect sense. It also seems reasonable
-to assume that it is the caller of ``__sigqueue_alloc()`` that has the
-responsibility of filling out (initializing) this member.
-
-But just which fields of the struct were uninitialized? Let's look at
-kmemcheck's report again::
-
- WARNING: kmemcheck: Caught 32-bit read from uninitialized memory (ffff88003e4a2024)
- 80000000000000000000000000000000000000000088ffff0000000000000000
- i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
- ^
-
-These first two lines are the memory dump of the memory object itself, and
-the shadow bytemap, respectively. The memory object itself is in this case
-``&first->info``. Just beware that the start of this dump is NOT the start
-of the object itself! The position of the caret (^) corresponds with the
-address of the read (ffff88003e4a2024).
-
-The shadow bytemap dump legend is as follows:
-
-- i: initialized
-- u: uninitialized
-- a: unallocated (memory has been allocated by the slab layer, but has not
- yet been handed off to anybody)
-- f: freed (memory has been allocated by the slab layer, but has been freed
- by the previous owner)
-
-In order to figure out where (relative to the start of the object) the
-uninitialized memory was located, we have to look at the disassembly. For
-that, we'll need the RIP address again::
-
- RIP: 0010:[<ffffffff8104ede8>] [<ffffffff8104ede8>] __dequeue_signal+0xc8/0x190
-
- $ objdump -d --no-show-raw-insn vmlinux | grep -C 8 ffffffff8104ede8:
- ffffffff8104edc8: mov %r8,0x8(%r8)
- ffffffff8104edcc: test %r10d,%r10d
- ffffffff8104edcf: js ffffffff8104ee88 <__dequeue_signal+0x168>
- ffffffff8104edd5: mov %rax,%rdx
- ffffffff8104edd8: mov $0xc,%ecx
- ffffffff8104eddd: mov %r13,%rdi
- ffffffff8104ede0: mov $0x30,%eax
- ffffffff8104ede5: mov %rdx,%rsi
- ffffffff8104ede8: rep movsl %ds:(%rsi),%es:(%rdi)
- ffffffff8104edea: test $0x2,%al
- ffffffff8104edec: je ffffffff8104edf0 <__dequeue_signal+0xd0>
- ffffffff8104edee: movsw %ds:(%rsi),%es:(%rdi)
- ffffffff8104edf0: test $0x1,%al
- ffffffff8104edf2: je ffffffff8104edf5 <__dequeue_signal+0xd5>
- ffffffff8104edf4: movsb %ds:(%rsi),%es:(%rdi)
- ffffffff8104edf5: mov %r8,%rdi
- ffffffff8104edf8: callq ffffffff8104de60 <__sigqueue_free>
-
-As expected, it's the "``rep movsl``" instruction from the ``memcpy()``
-that causes the warning. We know about ``REP MOVSL`` that it uses the register
-``RCX`` to count the number of remaining iterations. By taking a look at the
-register dump again (from the kmemcheck report), we can figure out how many
-bytes were left to copy::
-
- RAX: 0000000000000030 RBX: ffff88003d4ea968 RCX: 0000000000000009
-
-By looking at the disassembly, we also see that ``%ecx`` is being loaded
-with the value ``$0xc`` just before (ffffffff8104edd8), so we are very
-lucky. Keep in mind that this is the number of iterations, not bytes. And
-since this is a "long" operation, we need to multiply by 4 to get the
-number of bytes. So this means that the uninitialized value was encountered
-at 4 * (0xc - 0x9) = 12 bytes from the start of the object.
-
-We can now try to figure out which field of the "``struct siginfo``" that
-was not initialized. This is the beginning of the struct::
-
- 40 typedef struct siginfo {
- 41 int si_signo;
- 42 int si_errno;
- 43 int si_code;
- 44
- 45 union {
- ..
- 92 } _sifields;
- 93 } siginfo_t;
-
-On 64-bit, the int is 4 bytes long, so it must the union member that has
-not been initialized. We can verify this using gdb::
-
- $ gdb vmlinux
- ...
- (gdb) p &((struct siginfo *) 0)->_sifields
- $1 = (union {...} *) 0x10
-
-Actually, it seems that the union member is located at offset 0x10 -- which
-means that gcc has inserted 4 bytes of padding between the members ``si_code``
-and ``_sifields``. We can now get a fuller picture of the memory dump::
-
- _----------------------------=> si_code
- / _--------------------=> (padding)
- | / _------------=> _sifields(._kill._pid)
- | | / _----=> _sifields(._kill._uid)
- | | | /
- -------|-------|-------|-------|
- 80000000000000000000000000000000000000000088ffff0000000000000000
- i i i i u u u u i i i i i i i i u u u u u u u u u u u u u u u u
-
-This allows us to realize another important fact: ``si_code`` contains the
-value 0x80. Remember that x86 is little endian, so the first 4 bytes
-"80000000" are really the number 0x00000080. With a bit of research, we
-find that this is actually the constant ``SI_KERNEL`` defined in
-``include/asm-generic/siginfo.h``::
-
- 144 #define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
-
-This macro is used in exactly one place in the x86 kernel: In ``send_signal()``
-in ``kernel/signal.c``::
-
- 816 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
- 817 int group)
- 818 {
- ...
- 828 pending = group ? &t->signal->shared_pending : &t->pending;
- ...
- 851 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
- 852 (is_si_special(info) ||
- 853 info->si_code >= 0)));
- 854 if (q) {
- 855 list_add_tail(&q->list, &pending->list);
- 856 switch ((unsigned long) info) {
- ...
- 865 case (unsigned long) SEND_SIG_PRIV:
- 866 q->info.si_signo = sig;
- 867 q->info.si_errno = 0;
- 868 q->info.si_code = SI_KERNEL;
- 869 q->info.si_pid = 0;
- 870 q->info.si_uid = 0;
- 871 break;
- ...
- 890 }
-
-Not only does this match with the ``.si_code`` member, it also matches the place
-we found earlier when looking for where siginfo_t objects are enqueued on the
-``shared_pending`` list.
-
-So to sum up: It seems that it is the padding introduced by the compiler
-between two struct fields that is uninitialized, and this gets reported when
-we do a ``memcpy()`` on the struct. This means that we have identified a false
-positive warning.
-
-Normally, kmemcheck will not report uninitialized accesses in ``memcpy()`` calls
-when both the source and destination addresses are tracked. (Instead, we copy
-the shadow bytemap as well). In this case, the destination address clearly
-was not tracked. We can dig a little deeper into the stack trace from above::
-
- arch/x86/kernel/signal.c:805
- arch/x86/kernel/signal.c:871
- arch/x86/kernel/entry_64.S:694
-
-And we clearly see that the destination siginfo object is located on the
-stack::
-
- 782 static void do_signal(struct pt_regs *regs)
- 783 {
- 784 struct k_sigaction ka;
- 785 siginfo_t info;
- ...
- 804 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- ...
- 854 }
-
-And this ``&info`` is what eventually gets passed to ``copy_siginfo()`` as the
-destination argument.
-
-Now, even though we didn't find an actual error here, the example is still a
-good one, because it shows how one would go about to find out what the report
-was all about.
-
-
-Annotating false positives
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There are a few different ways to make annotations in the source code that
-will keep kmemcheck from checking and reporting certain allocations. Here
-they are:
-
-- ``__GFP_NOTRACK_FALSE_POSITIVE``
- This flag can be passed to ``kmalloc()`` or ``kmem_cache_alloc()``
- (therefore also to other functions that end up calling one of
- these) to indicate that the allocation should not be tracked
- because it would lead to a false positive report. This is a "big
- hammer" way of silencing kmemcheck; after all, even if the false
- positive pertains to particular field in a struct, for example, we
- will now lose the ability to find (real) errors in other parts of
- the same struct.
-
- Example::
-
- /* No warnings will ever trigger on accessing any part of x */
- x = kmalloc(sizeof *x, GFP_KERNEL | __GFP_NOTRACK_FALSE_POSITIVE);
-
-- ``kmemcheck_bitfield_begin(name)``/``kmemcheck_bitfield_end(name)`` and
- ``kmemcheck_annotate_bitfield(ptr, name)``
- The first two of these three macros can be used inside struct
- definitions to signal, respectively, the beginning and end of a
- bitfield. Additionally, this will assign the bitfield a name, which
- is given as an argument to the macros.
-
- Having used these markers, one can later use
- kmemcheck_annotate_bitfield() at the point of allocation, to indicate
- which parts of the allocation is part of a bitfield.
-
- Example::
-
- struct foo {
- int x;
-
- kmemcheck_bitfield_begin(flags);
- int flag_a:1;
- int flag_b:1;
- kmemcheck_bitfield_end(flags);
-
- int y;
- };
-
- struct foo *x = kmalloc(sizeof *x);
-
- /* No warnings will trigger on accessing the bitfield of x */
- kmemcheck_annotate_bitfield(x, flags);
-
- Note that ``kmemcheck_annotate_bitfield()`` can be used even before the
- return value of ``kmalloc()`` is checked -- in other words, passing NULL
- as the first argument is legal (and will do nothing).
-
-
-Reporting errors
-----------------
-
-As we have seen, kmemcheck will produce false positive reports. Therefore, it
-is not very wise to blindly post kmemcheck warnings to mailing lists and
-maintainers. Instead, I encourage maintainers and developers to find errors
-in their own code. If you get a warning, you can try to work around it, try
-to figure out if it's a real error or not, or simply ignore it. Most
-developers know their own code and will quickly and efficiently determine the
-root cause of a kmemcheck report. This is therefore also the most efficient
-way to work with kmemcheck.
-
-That said, we (the kmemcheck maintainers) will always be on the lookout for
-false positives that we can annotate and silence. So whatever you find,
-please drop us a note privately! Kernel configs and steps to reproduce (if
-available) are of course a great help too.
-
-Happy hacking!
-
-
-Technical description
----------------------
-
-kmemcheck works by marking memory pages non-present. This means that whenever
-somebody attempts to access the page, a page fault is generated. The page
-fault handler notices that the page was in fact only hidden, and so it calls
-on the kmemcheck code to make further investigations.
-
-When the investigations are completed, kmemcheck "shows" the page by marking
-it present (as it would be under normal circumstances). This way, the
-interrupted code can continue as usual.
-
-But after the instruction has been executed, we should hide the page again, so
-that we can catch the next access too! Now kmemcheck makes use of a debugging
-feature of the processor, namely single-stepping. When the processor has
-finished the one instruction that generated the memory access, a debug
-exception is raised. From here, we simply hide the page again and continue
-execution, this time with the single-stepping feature turned off.
-
-kmemcheck requires some assistance from the memory allocator in order to work.
-The memory allocator needs to
-
- 1. Tell kmemcheck about newly allocated pages and pages that are about to
- be freed. This allows kmemcheck to set up and tear down the shadow memory
- for the pages in question. The shadow memory stores the status of each
- byte in the allocation proper, e.g. whether it is initialized or
- uninitialized.
-
- 2. Tell kmemcheck which parts of memory should be marked uninitialized.
- There are actually a few more states, such as "not yet allocated" and
- "recently freed".
-
-If a slab cache is set up using the SLAB_NOTRACK flag, it will never return
-memory that can take page faults because of kmemcheck.
-
-If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
-request memory with the __GFP_NOTRACK or __GFP_NOTRACK_FALSE_POSITIVE flags.
-This does not prevent the page faults from occurring, however, but marks the
-object in question as being initialized so that no warnings will ever be
-produced for this object.
-
-Currently, the SLAB and SLUB allocators are supported by kmemcheck.
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index ebd03d11d2c2..e80850eefe13 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -31,6 +31,17 @@ To build and run the tests with a single command, use::
Note that some tests will require root privileges.
+Build and run from user specific object directory (make O=dir)::
+
+ $ make O=/tmp/kselftest kselftest
+
+Build and run KBUILD_OUTPUT directory (make KBUILD_OUTPUT=)::
+
+ $ make KBUILD_OUTPUT=/tmp/kselftest kselftest
+
+The above commands run the tests and print pass/fail summary to make it
+easier to understand the test results. Please find the detailed individual
+test results for each test in /tmp/testname file(s).
Running a subset of selftests
=============================
@@ -46,10 +57,21 @@ You can specify multiple tests to build and run::
$ make TARGETS="size timers" kselftest
+Build and run from user specific object directory (make O=dir)::
+
+ $ make O=/tmp/kselftest TARGETS="size timers" kselftest
+
+Build and run KBUILD_OUTPUT directory (make KBUILD_OUTPUT=)::
+
+ $ make KBUILD_OUTPUT=/tmp/kselftest TARGETS="size timers" kselftest
+
+The above commands run the tests and print pass/fail summary to make it
+easier to understand the test results. Please find the detailed individual
+test results for each test in /tmp/testname file(s).
+
See the top-level tools/testing/selftests/Makefile for the list of all
possible targets.
-
Running the full range hotplug selftests
========================================
@@ -113,9 +135,17 @@ Contributing new tests (details)
* Use TEST_GEN_XXX if such binaries or files are generated during
compiling.
- TEST_PROGS, TEST_GEN_PROGS mean it is the excutable tested by
+ TEST_PROGS, TEST_GEN_PROGS mean it is the executable tested by
default.
+ TEST_CUSTOM_PROGS should be used by tests that require custom build
+ rule and prevent common build rule use.
+
+ TEST_PROGS are for test shell scripts. Please ensure shell script has
+ its exec bit set. Otherwise, lib.mk run_tests will generate a warning.
+
+ TEST_CUSTOM_PROGS and TEST_PROGS will be run by common run_tests.
+
TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
executable which is not tested by default.
TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.txt b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
index bf5fc59a6938..088584a98cf4 100644
--- a/Documentation/devicetree/bindings/arm/samsung/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
@@ -62,7 +62,7 @@ pmu_system_controller: system-controller@10040000 {
Example of clock consumer :
-usb3503: usb3503@08 {
+usb3503: usb3503@8 {
/* ... */
clock-names = "refclk";
clocks = <&pmu_system_controller 0>;
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
index 3c551894f621..fa674818e7e8 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
@@ -71,7 +71,7 @@ Optional nodes:
- compatible: only "samsung,secure-firmware" is currently supported
- reg: address of non-secure SYSRAM used for communication with firmware
- firmware@0203F000 {
+ firmware@203F000 {
compatible = "samsung,secure-firmware";
reg = <0x0203F000 0x1000>;
};
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt
index 6808fb5dee40..1b2ab1ff5587 100644
--- a/Documentation/devicetree/bindings/arm/sp810.txt
+++ b/Documentation/devicetree/bindings/arm/sp810.txt
@@ -33,7 +33,7 @@ Required properties:
property with the highest frequency
Example:
- v2m_sysctl: sysctl@020000 {
+ v2m_sysctl: sysctl@20000 {
compatible = "arm,sp810", "arm,primecell";
reg = <0x020000 0x1000>;
clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
diff --git a/Documentation/devicetree/bindings/arm/spe-pmu.txt b/Documentation/devicetree/bindings/arm/spe-pmu.txt
new file mode 100644
index 000000000000..93372f2a7df9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/spe-pmu.txt
@@ -0,0 +1,20 @@
+* ARMv8.2 Statistical Profiling Extension (SPE) Performance Monitor Units (PMU)
+
+ARMv8.2 introduces the optional Statistical Profiling Extension for collecting
+performance sample data using an in-memory trace buffer.
+
+** SPE Required properties:
+
+- compatible : should be one of:
+ "arm,statistical-profiling-extension-v1"
+
+- interrupts : Exactly 1 PPI must be listed. For heterogeneous systems where
+ SPE is only supported on a subset of the CPUs, please consult
+ the arm,gic-v3 binding for details on describing a PPI partition.
+
+** Example:
+
+spe-pmu {
+ compatible = "arm,statistical-profiling-extension-v1";
+ interrupts = <GIC_PPI 05 IRQ_TYPE_LEVEL_HIGH &part1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt b/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
index 00318d083c9e..50095802fb4a 100644
--- a/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
+++ b/Documentation/devicetree/bindings/arm/vexpress-sysreg.txt
@@ -37,7 +37,7 @@ Example:
compatible = "arm,vexpress-sysreg";
reg = <0x10000000 0x1000>;
- v2m_led_gpios: sys_led@08 {
+ v2m_led_gpios: sys_led@8 {
compatible = "arm,vexpress-sysreg,sys_led";
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/ata/ahci-ceva.txt b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
index 7ca8b976c13a..7561cc4de371 100644
--- a/Documentation/devicetree/bindings/ata/ahci-ceva.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-ceva.txt
@@ -5,6 +5,36 @@ Required properties:
- compatible: Compatibility string. Must be 'ceva,ahci-1v84'.
- clocks: Input clock specifier. Refer to common clock bindings.
- interrupts: Interrupt specifier. Refer to interrupt binding.
+ - ceva,p0-cominit-params: OOB timing value for COMINIT parameter for port 0.
+ - ceva,p1-cominit-params: OOB timing value for COMINIT parameter for port 1.
+ The fields for the above parameter must be as shown below:
+ ceva,pN-cominit-params = /bits/ 8 <CIBGMN CIBGMX CIBGN CINMP>;
+ CINMP : COMINIT Negate Minimum Period.
+ CIBGN : COMINIT Burst Gap Nominal.
+ CIBGMX: COMINIT Burst Gap Maximum.
+ CIBGMN: COMINIT Burst Gap Minimum.
+ - ceva,p0-comwake-params: OOB timing value for COMWAKE parameter for port 0.
+ - ceva,p1-comwake-params: OOB timing value for COMWAKE parameter for port 1.
+ The fields for the above parameter must be as shown below:
+ ceva,pN-comwake-params = /bits/ 8 <CWBGMN CWBGMX CWBGN CWNMP>;
+ CWBGMN: COMWAKE Burst Gap Minimum.
+ CWBGMX: COMWAKE Burst Gap Maximum.
+ CWBGN: COMWAKE Burst Gap Nominal.
+ CWNMP: COMWAKE Negate Minimum Period.
+ - ceva,p0-burst-params: Burst timing value for COM parameter for port 0.
+ - ceva,p1-burst-params: Burst timing value for COM parameter for port 1.
+ The fields for the above parameter must be as shown below:
+ ceva,pN-burst-params = /bits/ 8 <BMX BNM SFD PTST>;
+ BMX: COM Burst Maximum.
+ BNM: COM Burst Nominal.
+ SFD: Signal Failure Detection value.
+ PTST: Partial to Slumber timer value.
+ - ceva,p0-retry-params: Retry interval timing value for port 0.
+ - ceva,p1-retry-params: Retry interval timing value for port 1.
+ The fields for the above parameter must be as shown below:
+ ceva,pN-retry-params = /bits/ 16 <RIT RCT>;
+ RIT: Retry Interval Timer.
+ RCT: Rate Change Timer.
Optional properties:
- ceva,broken-gen2: limit to gen1 speed instead of gen2.
@@ -16,5 +46,14 @@ Examples:
interrupt-parent = <&gic>;
interrupts = <0 133 4>;
clocks = <&clkc SATA_CLK_ID>;
+ ceva,p0-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>;
+ ceva,p0-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>;
+ ceva,p0-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
+ ceva,p0-retry-params = /bits/ 16 <0x0216 0x7F06>;
+
+ ceva,p1-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>;
+ ceva,p1-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>;
+ ceva,p1-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
+ ceva,p1-retry-params = /bits/ 16 <0x0216 0x7F06>;
ceva,broken-gen2;
};
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index fedc213b5f1a..c760ecb81381 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -56,7 +56,7 @@ Examples:
interrupts = <115>;
};
- ahci: sata@01c18000 {
+ ahci: sata@1c18000 {
compatible = "allwinner,sun4i-a10-ahci";
reg = <0x01c18000 0x1000>;
interrupts = <56>;
diff --git a/Documentation/devicetree/bindings/ata/imx-sata.txt b/Documentation/devicetree/bindings/ata/imx-sata.txt
index fa511db18408..a3d14719e478 100644
--- a/Documentation/devicetree/bindings/ata/imx-sata.txt
+++ b/Documentation/devicetree/bindings/ata/imx-sata.txt
@@ -25,7 +25,7 @@ Optional properties:
Examples:
-sata@02200000 {
+sata@2200000 {
compatible = "fsl,imx6q-ahci";
reg = <0x02200000 0x4000>;
interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/bus/imx-weim.txt b/Documentation/devicetree/bindings/bus/imx-weim.txt
index 6630d842c7a3..683eaf3aed79 100644
--- a/Documentation/devicetree/bindings/bus/imx-weim.txt
+++ b/Documentation/devicetree/bindings/bus/imx-weim.txt
@@ -61,7 +61,7 @@ Timing property for child nodes. It is mandatory, not optional.
Example for an imx6q-sabreauto board, the NOR flash connected to the WEIM:
- weim: weim@021b8000 {
+ weim: weim@21b8000 {
compatible = "fsl,imx6q-weim";
reg = <0x021b8000 0x4000>;
clocks = <&clks 196>;
diff --git a/Documentation/devicetree/bindings/bus/sunxi-rsb.txt b/Documentation/devicetree/bindings/bus/sunxi-rsb.txt
index 3dd28343b6ce..eb3ed628c6f1 100644
--- a/Documentation/devicetree/bindings/bus/sunxi-rsb.txt
+++ b/Documentation/devicetree/bindings/bus/sunxi-rsb.txt
@@ -28,7 +28,7 @@ which can normally be found in the datasheet.
Example:
- rsb@01f03400 {
+ rsb@1f03400 {
compatible = "allwinner,sun8i-a23-rsb";
reg = <0x01f03400 0x400>;
interrupts = <0 39 4>;
diff --git a/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt b/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
index 27468119fd94..4cd81742038f 100644
--- a/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
+++ b/Documentation/devicetree/bindings/clock/arm-syscon-icst.txt
@@ -59,7 +59,7 @@ syscon: syscon@10000000 {
compatible = "syscon";
reg = <0x10000000 0x1000>;
- oscclk0: osc0@0c {
+ oscclk0: osc0@c {
compatible = "arm,syscon-icst307";
#clock-cells = <0>;
lock-offset = <0x20>;
diff --git a/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt b/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
index 0c3d6015868d..2cba012f5af0 100644
--- a/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
+++ b/Documentation/devicetree/bindings/clock/clk-exynos-audss.txt
@@ -80,7 +80,7 @@ Example 3: I2S controller node that consumes the clock generated by the clock
controller. Refer to the standard clock bindings for information
about 'clocks' and 'clock-names' property.
-i2s0: i2s@03830000 {
+i2s0: i2s@3830000 {
compatible = "samsung,i2s-v5";
reg = <0x03830000 0x100>;
dmas = <&pdma0 10
diff --git a/Documentation/devicetree/bindings/clock/clk-s5pv210-audss.txt b/Documentation/devicetree/bindings/clock/clk-s5pv210-audss.txt
index 4fc869b69d4a..f6272dcd96f4 100644
--- a/Documentation/devicetree/bindings/clock/clk-s5pv210-audss.txt
+++ b/Documentation/devicetree/bindings/clock/clk-s5pv210-audss.txt
@@ -43,7 +43,7 @@ Example: I2S controller node that consumes the clock generated by the clock
controller. Refer to the standard clock bindings for information
about 'clocks' and 'clock-names' property.
- i2s0: i2s@03830000 {
+ i2s0: i2s@3830000 {
/* ... */
clock-names = "iis", "i2s_opclk0",
"i2s_opclk1";
diff --git a/Documentation/devicetree/bindings/clock/dove-divider-clock.txt b/Documentation/devicetree/bindings/clock/dove-divider-clock.txt
index e3eb0f657c5e..217871f483c0 100644
--- a/Documentation/devicetree/bindings/clock/dove-divider-clock.txt
+++ b/Documentation/devicetree/bindings/clock/dove-divider-clock.txt
@@ -21,7 +21,7 @@ Required properties:
a size of 8.
- #clock-cells : from common clock binding; shall be set to 1
-divider_clk: core-clock@0064 {
+divider_clk: core-clock@64 {
compatible = "marvell,dove-divider-clock";
reg = <0x0064 0x8>;
#clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/imx1-clock.txt b/Documentation/devicetree/bindings/clock/imx1-clock.txt
index b7adf4e3ea98..9823baf7acb6 100644
--- a/Documentation/devicetree/bindings/clock/imx1-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx1-clock.txt
@@ -10,13 +10,13 @@ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx1-clock.h
for the full list of i.MX1 clock IDs.
Examples:
- clks: ccm@0021b000 {
+ clks: ccm@21b000 {
#clock-cells = <1>;
compatible = "fsl,imx1-ccm";
reg = <0x0021b000 0x1000>;
};
- pwm: pwm@00208000 {
+ pwm: pwm@208000 {
#pwm-cells = <2>;
compatible = "fsl,imx1-pwm";
reg = <0x00208000 0x1000>;
diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.txt b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
index aa0a4d423ef5..a45ca67a9d5f 100644
--- a/Documentation/devicetree/bindings/clock/imx6q-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
@@ -14,14 +14,14 @@ Examples:
#include <dt-bindings/clock/imx6qdl-clock.h>
-clks: ccm@020c4000 {
+clks: ccm@20c4000 {
compatible = "fsl,imx6q-ccm";
reg = <0x020c4000 0x4000>;
interrupts = <0 87 0x04 0 88 0x04>;
#clock-cells = <1>;
};
-uart1: serial@02020000 {
+uart1: serial@2020000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x02020000 0x4000>;
interrupts = <0 26 0x04>;
diff --git a/Documentation/devicetree/bindings/clock/maxim,max77686.txt b/Documentation/devicetree/bindings/clock/maxim,max77686.txt
index 8398a3a5e106..3472b461ca93 100644
--- a/Documentation/devicetree/bindings/clock/maxim,max77686.txt
+++ b/Documentation/devicetree/bindings/clock/maxim,max77686.txt
@@ -46,7 +46,7 @@ Example:
/* ... */
Node of the MFD chip
- max77686: max77686@09 {
+ max77686: max77686@9 {
compatible = "maxim,max77686";
interrupt-parent = <&wakeup_eint>;
interrupts = <26 0>;
@@ -71,7 +71,7 @@ Example:
/* ... */
Node of the MFD chip
- max77802: max77802@09 {
+ max77802: max77802@9 {
compatible = "maxim,max77802";
interrupt-parent = <&wakeup_eint>;
interrupts = <26 0>;
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
index c35390f60545..7364953d0d0b 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen.txt
@@ -42,7 +42,7 @@ Required properties:
Example:
- clockgen-a@090ff000 {
+ clockgen-a@90ff000 {
compatible = "st,clkgen-c32";
reg = <0x90ff000 0x1000>;
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index 7eda08eb8a1e..4ca21c3a6fc9 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -36,7 +36,7 @@ For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
- "iosc": the SoC's internal frequency oscillator
Example for generic CCU:
-ccu: clock@01c20000 {
+ccu: clock@1c20000 {
compatible = "allwinner,sun8i-h3-ccu";
reg = <0x01c20000 0x400>;
clocks = <&osc24M>, <&osc32k>;
@@ -46,7 +46,7 @@ ccu: clock@01c20000 {
};
Example for PRCM CCU:
-r_ccu: clock@01f01400 {
+r_ccu: clock@1f01400 {
compatible = "allwinner,sun50i-a64-r-ccu";
reg = <0x01f01400 0x100>;
clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index 8f7619d8c8d8..1a042e20b115 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -137,7 +137,7 @@ the address block, which is related to the overall mmc block.
For example:
-osc24M: clk@01c20050 {
+osc24M: clk@1c20050 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-osc-clk";
reg = <0x01c20050 0x4>;
@@ -145,7 +145,7 @@ osc24M: clk@01c20050 {
clock-output-names = "osc24M";
};
-pll1: clk@01c20000 {
+pll1: clk@1c20000 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-pll1-clk";
reg = <0x01c20000 0x4>;
@@ -153,7 +153,7 @@ pll1: clk@01c20000 {
clock-output-names = "pll1";
};
-pll5: clk@01c20020 {
+pll5: clk@1c20020 {
#clock-cells = <1>;
compatible = "allwinner,sun4i-pll5-clk";
reg = <0x01c20020 0x4>;
@@ -161,7 +161,7 @@ pll5: clk@01c20020 {
clock-output-names = "pll5_ddr", "pll5_other";
};
-pll6: clk@01c20028 {
+pll6: clk@1c20028 {
#clock-cells = <1>;
compatible = "allwinner,sun6i-a31-pll6-clk";
reg = <0x01c20028 0x4>;
@@ -169,7 +169,7 @@ pll6: clk@01c20028 {
clock-output-names = "pll6", "pll6x2";
};
-cpu: cpu@01c20054 {
+cpu: cpu@1c20054 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-cpu-clk";
reg = <0x01c20054 0x4>;
@@ -177,7 +177,7 @@ cpu: cpu@01c20054 {
clock-output-names = "cpu";
};
-mmc0_clk: clk@01c20088 {
+mmc0_clk: clk@1c20088 {
#clock-cells = <1>;
compatible = "allwinner,sun4i-a10-mmc-clk";
reg = <0x01c20088 0x4>;
@@ -199,7 +199,7 @@ gmac_int_tx_clk: clk@3 {
clock-output-names = "gmac_int_tx";
};
-gmac_clk: clk@01c20164 {
+gmac_clk: clk@1c20164 {
#clock-cells = <0>;
compatible = "allwinner,sun7i-a20-gmac-clk";
reg = <0x01c20164 0x4>;
@@ -211,7 +211,7 @@ gmac_clk: clk@01c20164 {
clock-output-names = "gmac";
};
-mmc_config_clk: clk@01c13000 {
+mmc_config_clk: clk@1c13000 {
compatible = "allwinner,sun9i-a80-mmc-config-clk";
reg = <0x01c13000 0x10>;
clocks = <&ahb0_gates 8>;
diff --git a/Documentation/devicetree/bindings/clock/ti,cdce706.txt b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
index 616836e7e1e2..959d96632f5d 100644
--- a/Documentation/devicetree/bindings/clock/ti,cdce706.txt
+++ b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
@@ -25,7 +25,7 @@ Example:
};
};
...
- i2c0: i2c-master@0d090000 {
+ i2c0: i2c-master@d090000 {
...
cdce706: clock-synth@69 {
compatible = "ti,cdce706";
diff --git a/Documentation/devicetree/bindings/common-properties.txt b/Documentation/devicetree/bindings/common-properties.txt
index 697714f8d75c..a3448bfa1c82 100644
--- a/Documentation/devicetree/bindings/common-properties.txt
+++ b/Documentation/devicetree/bindings/common-properties.txt
@@ -1,4 +1,8 @@
Common properties
+=================
+
+Endianness
+----------
The Devicetree Specification does not define any properties related to hardware
byteswapping, but endianness issues show up frequently in porting Linux to
@@ -58,3 +62,25 @@ dev: dev@40031000 {
...
little-endian;
};
+
+Daisy-chained devices
+---------------------
+
+Many serially-attached GPIO and IIO devices are daisy-chainable. To the
+host controller, a daisy-chain appears as a single device, but the number
+of inputs and outputs it provides is the sum of inputs and outputs provided
+by all of its devices. The driver needs to know how many devices the
+daisy-chain comprises to determine the amount of data exchanged, how many
+inputs and outputs to register and so on.
+
+Optional properties:
+ - #daisy-chained-devices: Number of devices in the daisy-chain (default is 1).
+
+Example:
+gpio@0 {
+ compatible = "name";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #daisy-chained-devices = <3>;
+};
diff --git a/Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
index 4ca8dd4d7e66..4ca8dd4d7e66 100644
--- a/Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt
+++ b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
diff --git a/Documentation/devicetree/bindings/crypto/sun4i-ss.txt b/Documentation/devicetree/bindings/crypto/sun4i-ss.txt
index 5d38e9b7033f..f2dc3d9bca92 100644
--- a/Documentation/devicetree/bindings/crypto/sun4i-ss.txt
+++ b/Documentation/devicetree/bindings/crypto/sun4i-ss.txt
@@ -14,7 +14,7 @@ Optional properties:
- reset-names : must contain "ahb"
Example:
- crypto: crypto-engine@01c15000 {
+ crypto: crypto-engine@1c15000 {
compatible = "allwinner,sun4i-a10-crypto";
reg = <0x01c15000 0x1000>;
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 06668bca7ffc..0047b1394c70 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -68,6 +68,8 @@ Optional properties:
- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
generator. The chip will rely on the sync signals in the DSI data lanes,
rather than generate its own timings for HDMI output.
+- clocks: from common clock binding: reference to the CEC clock.
+- clock-names: from common clock binding: must be "cec".
Required nodes:
@@ -89,6 +91,8 @@ Example
reg = <39>;
interrupt-parent = <&gpio3>;
interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&cec_clock>;
+ clock-names = "cec";
adi,input-depth = <8>;
adi,input-colorspace = "rgb";
diff --git a/Documentation/devicetree/bindings/display/bridge/sii9234.txt b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
new file mode 100644
index 000000000000..88041ba23d56
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/sii9234.txt
@@ -0,0 +1,49 @@
+Silicon Image SiI9234 HDMI/MHL bridge bindings
+
+Required properties:
+ - compatible : "sil,sii9234".
+ - reg : I2C address for TPI interface, use 0x39
+ - avcc33-supply : MHL/USB Switch Supply Voltage (3.3V)
+ - iovcc18-supply : I/O Supply Voltage (1.8V)
+ - avcc12-supply : TMDS Analog Supply Voltage (1.2V)
+ - cvcc12-supply : Digital Core Supply Voltage (1.2V)
+ - interrupts, interrupt-parent: interrupt specifier of INT pin
+ - reset-gpios: gpio specifier of RESET pin (active low)
+ - video interfaces: Device node can contain two video interface port
+ nodes for HDMI encoder and connector according to [1].
+ - port@0 - MHL to HDMI
+ - port@1 - MHL to connector
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+
+Example:
+ sii9234@39 {
+ compatible = "sil,sii9234";
+ reg = <0x39>;
+ avcc33-supply = <&vcc33mhl>;
+ iovcc18-supply = <&vcc18mhl>;
+ avcc12-supply = <&vsil12>;
+ cvcc12-supply = <&vsil12>;
+ reset-gpios = <&gpf3 4 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpf3>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mhl_to_hdmi: endpoint {
+ remote-endpoint = <&hdmi_to_mhl>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ mhl_to_connector: endpoint {
+ remote-endpoint = <&connector_to_mhl>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
index ed5e0a7894ad..05176f1ae108 100644
--- a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
+++ b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
@@ -42,7 +42,7 @@ Optional properties:
example:
-gpu_3d: gpu@00130000 {
+gpu_3d: gpu@130000 {
compatible = "vivante,gc";
reg = <0x00130000 0x4000>;
interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/display/faraday,tve200.txt b/Documentation/devicetree/bindings/display/faraday,tve200.txt
new file mode 100644
index 000000000000..82e3bc0b7485
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/faraday,tve200.txt
@@ -0,0 +1,54 @@
+* Faraday TV Encoder TVE200
+
+Required properties:
+
+- compatible: must be one of:
+ "faraday,tve200"
+ "cortina,gemini-tvc", "faraday,tve200"
+
+- reg: base address and size of the control registers block
+
+- interrupts: contains an interrupt specifier for the interrupt
+ line from the TVE200
+
+- clock-names: should contain "PCLK" for the clock line clocking the
+ silicon and "TVE" for the 27MHz clock to the video driver
+
+- clocks: contains phandle and clock specifier pairs for the entries
+ in the clock-names property. See
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Optional properties:
+
+- resets: contains the reset line phandle for the block
+
+Required sub-nodes:
+
+- port: describes LCD panel signals, following the common binding
+ for video transmitter interfaces; see
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+ This port should have the properties:
+ reg = <0>;
+ It should have one endpoint connected to a remote endpoint where
+ the display is connected.
+
+Example:
+
+display-controller@6a000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "faraday,tve200";
+ reg = <0x6a000000 0x1000>;
+ interrupts = <13 IRQ_TYPE_EDGE_RISING>;
+ resets = <&syscon GEMINI_RESET_TVC>;
+ clocks = <&syscon GEMINI_CLK_GATE_TVC>,
+ <&syscon GEMINI_CLK_TVC>;
+ clock-names = "PCLK", "TVE";
+
+ port@0 {
+ reg = <0>;
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/imx/hdmi.txt b/Documentation/devicetree/bindings/display/imx/hdmi.txt
index 66a8f86e5d12..6d021e71c9cf 100644
--- a/Documentation/devicetree/bindings/display/imx/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/imx/hdmi.txt
@@ -32,11 +32,11 @@ Optional properties
Example:
- gpr: iomuxc-gpr@020e0000 {
+ gpr: iomuxc-gpr@20e0000 {
/* ... */
};
- hdmi: hdmi@0120000 {
+ hdmi: hdmi@120000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx6q-hdmi";
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index fa00e62e1cf6..a6671bd2c85a 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -13,16 +13,16 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks.
- clock-names: the following clocks are required:
- * "mdp_core_clk"
- * "iface_clk"
- * "bus_clk"
- * "core_mmss_clk"
- * "byte_clk"
- * "pixel_clk"
- * "core_clk"
+ * "mdp_core"
+ * "iface"
+ * "bus"
+ * "core_mmss"
+ * "byte"
+ * "pixel"
+ * "core"
For DSIv2, we need an additional clock:
- * "src_clk"
-- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform.
+ * "src"
+- assigned-clocks: Parents of "byte" and "pixel" for the given platform.
- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node
@@ -101,7 +101,7 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
- * "iface_clk"
+ * "iface"
- vddio-supply: phandle to vdd-io regulator device node
Optional properties:
@@ -123,13 +123,13 @@ Example:
reg = <0xfd922800 0x200>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
- "bus_clk",
- "byte_clk",
- "core_clk",
- "core_mmss_clk",
- "iface_clk",
- "mdp_core_clk",
- "pixel_clk";
+ "bus",
+ "byte",
+ "core",
+ "core_mmss",
+ "iface",
+ "mdp_core",
+ "pixel";
clocks =
<&mmcc MDSS_AXI_CLK>,
<&mmcc MDSS_BYTE0_CLK>,
@@ -207,7 +207,7 @@ Example:
reg = <0xfd922a00 0xd4>,
<0xfd922b00 0x2b0>,
<0xfd922d80 0x7b>;
- clock-names = "iface_clk";
+ clock-names = "iface";
clocks = <&mmcc MDSS_AHB_CLK>;
#clock-cells = <1>;
vddio-supply = <&pma8084_l12>;
diff --git a/Documentation/devicetree/bindings/display/msm/edp.txt b/Documentation/devicetree/bindings/display/msm/edp.txt
index e63032be5401..95ce19ca7bc5 100644
--- a/Documentation/devicetree/bindings/display/msm/edp.txt
+++ b/Documentation/devicetree/bindings/display/msm/edp.txt
@@ -12,11 +12,11 @@ Required properties:
- clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- clock-names: the following clocks are required:
- * "core_clk"
- * "iface_clk"
- * "mdp_core_clk"
- * "pixel_clk"
- * "link_clk"
+ * "core"
+ * "iface"
+ * "mdp_core"
+ * "pixel"
+ * "link"
- #clock-cells: The value should be 1.
- vdda-supply: phandle to vdda regulator device node
- lvl-vdd-supply: phandle to regulator device node which is used to supply power
@@ -41,11 +41,11 @@ Example:
interrupts = <12 0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
- "core_clk",
- "pixel_clk",
- "iface_clk",
- "link_clk",
- "mdp_core_clk";
+ "core",
+ "pixel",
+ "iface",
+ "link",
+ "mdp_core";
clocks =
<&mmcc MDSS_EDPAUX_CLK>,
<&mmcc MDSS_EDPPIXEL_CLK>,
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt
index 2d306f402d18..5f90a40da51b 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt
@@ -64,9 +64,9 @@ Example:
interrupts = <GIC_SPI 79 0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
- "core_clk",
- "master_iface_clk",
- "slave_iface_clk";
+ "core",
+ "master_iface",
+ "slave_iface";
clocks =
<&mmcc HDMI_APP_CLK>,
<&mmcc HDMI_M_AHB_CLK>,
@@ -92,7 +92,7 @@ Example:
<0x4a00500 0x100>;
#phy-cells = <0>;
power-domains = <&mmcc MDSS_GDSC>;
- clock-names = "slave_iface_clk";
+ clock-names = "slave_iface";
clocks = <&mmcc HDMI_S_AHB_CLK>;
core-vdda-supply = <&pm8921_hdmi_mvs>;
};
diff --git a/Documentation/devicetree/bindings/display/msm/mdp5.txt b/Documentation/devicetree/bindings/display/msm/mdp5.txt
index 30c11ea83754..1b31977a68ba 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp5.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp5.txt
@@ -22,16 +22,16 @@ Required properties:
Documentation/devicetree/bindings/power/power_domain.txt
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
- * "iface_clk"
- * "bus_clk"
- * "vsync_clk"
+ * "iface"
+ * "bus"
+ * "vsync"
- #address-cells: number of address cells for the MDSS children. Should be 1.
- #size-cells: Should be 1.
- ranges: parent bus address space is the same as the child bus address space.
Optional properties:
- clock-names: the following clocks are optional:
- * "lut_clk"
+ * "lut"
MDP5:
Required properties:
@@ -45,10 +45,10 @@ Required properties:
through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
-- * "bus_clk"
-- * "iface_clk"
-- * "core_clk"
-- * "vsync_clk"
+- * "bus"
+- * "iface"
+- * "core"
+- * "vsync"
- ports: contains the list of output ports from MDP. These connect to interfaces
that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
special case since it is a part of the MDP block itself).
@@ -77,7 +77,7 @@ Required properties:
Optional properties:
- clock-names: the following clocks are optional:
- * "lut_clk"
+ * "lut"
Example:
@@ -95,9 +95,9 @@ Example:
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
- clock-names = "iface_clk",
- "bus_clk",
- "vsync_clk"
+ clock-names = "iface",
+ "bus",
+ "vsync"
interrupts = <0 72 0>;
@@ -120,10 +120,10 @@ Example:
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
- clock-names = "iface_clk",
- "bus_clk",
- "core_clk",
- "vsync_clk";
+ clock-names = "iface",
+ "bus",
+ "core",
+ "vsync";
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
new file mode 100644
index 000000000000..6862028e7b2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
@@ -0,0 +1,21 @@
+Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
+
+The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
+a MIPI-DSI video interface. Its backlight is managed through the DSI link.
+
+Required properties:
+ - compatible: "orisetech,otm8009a"
+ - reg: the virtual channel number of a DSI peripheral
+
+Optional properties:
+ - reset-gpios: a GPIO spec for the reset pin (active low).
+
+Example:
+&dsi {
+ ...
+ panel@0 {
+ compatible = "orisetech,otm8009a";
+ reg = <0>;
+ reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt b/Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt
new file mode 100644
index 000000000000..e9e19c059260
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raspberrypi,7inch-touchscreen.txt
@@ -0,0 +1,49 @@
+This binding covers the official 7" (800x480) Raspberry Pi touchscreen
+panel.
+
+This DSI panel contains:
+
+- TC358762 DSI->DPI bridge
+- Atmel microcontroller on I2C for power sequencing the DSI bridge and
+ controlling backlight
+- Touchscreen controller on I2C for touch input
+
+and this binding covers the DSI display parts but not its touch input.
+
+Required properties:
+- compatible: Must be "raspberrypi,7inch-touchscreen-panel"
+- reg: Must be "45"
+- port: See panel-common.txt
+
+Example:
+
+dsi1: dsi@7e700000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ <...>
+
+ port {
+ dsi_out_port: endpoint {
+ remote-endpoint = <&panel_dsi_port>;
+ };
+ };
+};
+
+i2c_dsi: i2c {
+ compatible = "i2c-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpios = <&gpio 28 0
+ &gpio 29 0>;
+
+ lcd@45 {
+ compatible = "raspberrypi,7inch-touchscreen-panel";
+ reg = <0x45>;
+
+ port {
+ panel_dsi_port: endpoint {
+ remote-endpoint = <&dsi_out_port>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt
new file mode 100644
index 000000000000..3f1a8392af7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63j0x03.txt
@@ -0,0 +1,24 @@
+Samsung S6E63J0X03 1.63" 320x320 AMOLED panel (interface: MIPI-DSI command mode)
+
+Required properties:
+ - compatible: "samsung,s6e63j0x03"
+ - reg: the virtual channel number of a DSI peripheral
+ - vdd3-supply: I/O voltage supply
+ - vci-supply: voltage supply for analog circuits
+ - reset-gpios: a GPIO spec for the reset pin (active low)
+ - te-gpios: a GPIO spec for the tearing effect synchronization signal
+ gpio pin (active high)
+
+Example:
+&dsi {
+ ...
+
+ panel@0 {
+ compatible = "samsung,s6e63j0x03";
+ reg = <0>;
+ vdd3-supply = <&ldo16_reg>;
+ vci-supply = <&ldo20_reg>;
+ reset-gpios = <&gpe0 1 GPIO_ACTIVE_LOW>;
+ te-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt b/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt
new file mode 100644
index 000000000000..aae57ef36cdd
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/seiko,43wvf1g.txt
@@ -0,0 +1,23 @@
+Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
+
+Required properties:
+- compatible: should be "sii,43wvf1g".
+- "dvdd-supply": 3v3 digital regulator.
+- "avdd-supply": 5v analog regulator.
+
+Optional properties:
+- backlight: phandle for the backlight control.
+
+Example:
+
+ panel {
+ compatible = "sii,43wvf1g";
+ backlight = <&backlight_display>;
+ dvdd-supply = <&reg_lcd_3v3>;
+ avdd-supply = <&reg_lcd_5v>;
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt b/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
new file mode 100644
index 000000000000..4c0caaf246c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
@@ -0,0 +1,8 @@
+Toshiba 8.9" WXGA (1280x768) TFT LCD panel
+
+Required properties:
+- compatible: should be "toshiba,lt089ac29000.txt"
+- power-supply: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
new file mode 100644
index 000000000000..da6939efdb43
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
@@ -0,0 +1,99 @@
+Rockchip RK3288 LVDS interface
+================================
+
+Required properties:
+- compatible: matching the soc type, one of
+ - "rockchip,rk3288-lvds";
+
+- reg: physical base address of the controller and length
+ of memory mapped region.
+- clocks: must include clock specifiers corresponding to entries in the
+ clock-names property.
+- clock-names: must contain "pclk_lvds"
+
+- avdd1v0-supply: regulator phandle for 1.0V analog power
+- avdd1v8-supply: regulator phandle for 1.8V analog power
+- avdd3v3-supply: regulator phandle for 3.3V analog power
+
+- rockchip,grf: phandle to the general register files syscon
+- rockchip,output: "rgb", "lvds" or "duallvds", This describes the output interface
+
+Optional properties:
+- pinctrl-names: must contain a "lcdc" entry.
+- pinctrl-0: pin control group to be used for this controller.
+
+Required nodes:
+
+The lvds has two video ports as described by
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+Their connections are modeled using the OF graph bindings specified in
+ Documentation/devicetree/bindings/graph.txt.
+
+- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
+- video port 1 for either a panel or subsequent encoder
+
+the lvds panel described by
+ Documentation/devicetree/bindings/display/panel/simple-panel.txt
+
+Panel required properties:
+- ports for remote LVDS output
+
+Panel optional properties:
+- data-mapping: should be "vesa-24","jeida-24" or "jeida-18".
+This describes decribed by:
+ Documentation/devicetree/bindings/display/panel/panel-lvds.txt
+
+Example:
+
+lvds_panel: lvds-panel {
+ compatible = "auo,b101ean01";
+ enable-gpios = <&gpio7 21 GPIO_ACTIVE_HIGH>;
+ data-mapping = "jeida-24";
+
+ ports {
+ panel_in_lvds: endpoint {
+ remote-endpoint = <&lvds_out_panel>;
+ };
+ };
+};
+
+For Rockchip RK3288:
+
+ lvds: lvds@ff96c000 {
+ compatible = "rockchip,rk3288-lvds";
+ rockchip,grf = <&grf>;
+ reg = <0xff96c000 0x4000>;
+ clocks = <&cru PCLK_LVDS_PHY>;
+ clock-names = "pclk_lvds";
+ pinctrl-names = "lcdc";
+ pinctrl-0 = <&lcdc_ctl>;
+ avdd1v0-supply = <&vdd10_lcd>;
+ avdd1v8-supply = <&vcc18_lcd>;
+ avdd3v3-supply = <&vcca_33>;
+ rockchip,output = "rgb";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lvds_in: port@0 {
+ reg = <0>;
+
+ lvds_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_lvds>;
+ };
+ lvds_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_lvds>;
+ };
+ };
+
+ lvds_out: port@1 {
+ reg = <1>;
+
+ lvds_out_panel: endpoint {
+ remote-endpoint = <&panel_in_lvds>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.txt b/Documentation/devicetree/bindings/display/simple-framebuffer.txt
index 8c9e9f515c87..5a9ce511be88 100644
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.txt
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.txt
@@ -78,7 +78,7 @@ chosen {
stdout-path = "display0";
};
-soc@01c00000 {
+soc@1c00000 {
lcdc0: lcdc@1c0c000 {
compatible = "allwinner,sun4i-a10-lcdc";
...
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 92441086caba..50cc72ee1168 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -40,15 +40,19 @@ CEC. It is one end of the pipeline.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun4i-a10-hdmi
* allwinner,sun5i-a10s-hdmi
+ * allwinner,sun6i-a31-hdmi
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the HDMI encoder
* ahb: the HDMI interface clock
* mod: the HDMI module clock
+ * ddc: the HDMI ddc clock (A31 only)
* pll-0: the first video PLL
* pll-1: the second video PLL
- clock-names: the clock names mentioned above
+ - resets: phandle to the reset control for the HDMI encoder (A31 only)
- dmas: phandles to the DMA channels used by the HDMI encoder
* ddc-tx: The channel for DDC transmission
* ddc-rx: The channel for DDC reception
@@ -83,9 +87,11 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties:
- compatible: value must be either:
+ * allwinner,sun4i-a10-tcon
* allwinner,sun5i-a13-tcon
* allwinner,sun6i-a31-tcon
* allwinner,sun6i-a31s-tcon
+ * allwinner,sun7i-a20-tcon
* allwinner,sun8i-a33-tcon
* allwinner,sun8i-v3s-tcon
- reg: base address and size of memory-mapped region
@@ -150,8 +156,10 @@ system.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun4i-a10-display-backend
* allwinner,sun5i-a13-display-backend
* allwinner,sun6i-a31-display-backend
+ * allwinner,sun7i-a20-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -182,8 +190,10 @@ deinterlacing and color space conversion.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun4i-a10-display-frontend
* allwinner,sun5i-a13-display-frontend
* allwinner,sun6i-a31-display-frontend
+ * allwinner,sun7i-a20-display-frontend
* allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@@ -228,10 +238,12 @@ extra node.
Required properties:
- compatible: value must be one of:
+ * allwinner,sun4i-a10-display-engine
* allwinner,sun5i-a10s-display-engine
* allwinner,sun5i-a13-display-engine
* allwinner,sun6i-a31-display-engine
* allwinner,sun6i-a31s-display-engine
+ * allwinner,sun7i-a20-display-engine
* allwinner,sun8i-a33-display-engine
* allwinner,sun8i-v3s-display-engine
@@ -266,7 +278,7 @@ connector {
};
};
-hdmi: hdmi@01c16000 {
+hdmi: hdmi@1c16000 {
compatible = "allwinner,sun5i-a10s-hdmi";
reg = <0x01c16000 0x1000>;
interrupts = <58>;
@@ -305,7 +317,7 @@ hdmi: hdmi@01c16000 {
};
};
-tve0: tv-encoder@01c0a000 {
+tve0: tv-encoder@1c0a000 {
compatible = "allwinner,sun4i-a10-tv-encoder";
reg = <0x01c0a000 0x1000>;
clocks = <&ahb_gates 34>;
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
index 74e1e8add5a1..844e0103fb0d 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
@@ -3,6 +3,10 @@ NVIDIA Tegra host1x
Required properties:
- compatible: "nvidia,tegra<chip>-host1x"
- reg: Physical base address and length of the controller's registers.
+ For pre-Tegra186, one entry describing the whole register area.
+ For Tegra186, one entry for each entry in reg-names:
+ "vm" - VM region assigned to Linux
+ "hypervisor" - Hypervisor region (only if Linux acts as hypervisor)
- interrupts: The interrupt outputs from the controller.
- #address-cells: The number of cells used to represent physical base addresses
in the host1x address space. Should be 1.
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index 1be6941ac1e5..f3d1f151ba80 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -3,6 +3,8 @@
Required Properties:
-compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
Examples with soctypes are:
+ - "renesas,r8a7743-usb-dmac" (RZ/G1M)
+ - "renesas,r8a7745-usb-dmac" (RZ/G1E)
- "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W)
- "renesas,r8a7793-usb-dmac" (R-Car M2-N)
diff --git a/Documentation/devicetree/bindings/dma/sprd-dma.txt b/Documentation/devicetree/bindings/dma/sprd-dma.txt
new file mode 100644
index 000000000000..7a10fea2e51b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sprd-dma.txt
@@ -0,0 +1,41 @@
+* Spreadtrum DMA controller
+
+This binding follows the generic DMA bindings defined in dma.txt.
+
+Required properties:
+- compatible: Should be "sprd,sc9860-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel.
+- #dma-cells: must be <1>. Used to represent the number of integer
+ cells in the dmas property of client device.
+- #dma-channels : Number of DMA channels supported. Should be 32.
+- clock-names: Should contain the clock of the DMA controller.
+- clocks: Should contain a clock specifier for each entry in clock-names.
+
+Example:
+
+Controller:
+apdma: dma-controller@20100000 {
+ compatible = "sprd,sc9860-dma";
+ reg = <0x20100000 0x4000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ #dma-channels = <32>;
+ clock-names = "enable";
+ clocks = <&clk_ap_ahb_gates 5>;
+};
+
+
+Client:
+DMA clients connected to the Spreadtrum DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel.
+The two cells in order are:
+1. A phandle pointing to the DMA controller.
+2. The channel id.
+
+spi0: spi@70a00000{
+ ...
+ dma-names = "rx_chn", "tx_chn";
+ dmas = <&apdma 11>, <&apdma 12>;
+ ...
+};
diff --git a/Documentation/devicetree/bindings/dma/stm32-dma.txt b/Documentation/devicetree/bindings/dma/stm32-dma.txt
index 4408af693d0c..0b55718bf889 100644
--- a/Documentation/devicetree/bindings/dma/stm32-dma.txt
+++ b/Documentation/devicetree/bindings/dma/stm32-dma.txt
@@ -13,6 +13,7 @@ Required properties:
- #dma-cells : Must be <4>. See DMA client paragraph for more details.
Optional properties:
+- dma-requests : Number of DMA requests supported.
- resets: Reference to a reset controller asserting the DMA controller
- st,mem2mem: boolean; if defined, it indicates that the controller supports
memory-to-memory transfer
@@ -34,12 +35,13 @@ Example:
#dma-cells = <4>;
st,mem2mem;
resets = <&rcc 150>;
+ dma-requests = <8>;
};
* DMA client
DMA clients connected to the STM32 DMA controller must use the format
-described in the dma.txt file, using a five-cell specifier for each
+described in the dma.txt file, using a four-cell specifier for each
channel: a phandle to the DMA controller plus the following four integer cells:
1. The channel id
@@ -71,7 +73,7 @@ channel: a phandle to the DMA controller plus the following four integer cells:
Example:
usart1: serial@40011000 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40011000 0x400>;
interrupts = <37>;
clocks = <&clk_pclk2>;
diff --git a/Documentation/devicetree/bindings/dma/stm32-dmamux.txt b/Documentation/devicetree/bindings/dma/stm32-dmamux.txt
new file mode 100644
index 000000000000..1b893b235507
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/stm32-dmamux.txt
@@ -0,0 +1,84 @@
+STM32 DMA MUX (DMA request router)
+
+Required properties:
+- compatible: "st,stm32h7-dmamux"
+- reg: Memory map for accessing module
+- #dma-cells: Should be set to <3>.
+ First parameter is request line number.
+ Second is DMA channel configuration
+ Third is Fifo threshold
+ For more details about the three cells, please see
+ stm32-dma.txt documentation binding file
+- dma-masters: Phandle pointing to the DMA controllers.
+ Several controllers are allowed. Only "st,stm32-dma" DMA
+ compatible are supported.
+
+Optional properties:
+- dma-channels : Number of DMA requests supported.
+- dma-requests : Number of DMAMUX requests supported.
+- resets: Reference to a reset controller asserting the DMA controller
+- clocks: Input clock of the DMAMUX instance.
+
+Example:
+
+/* DMA controller 1 */
+dma1: dma-controller@40020000 {
+ compatible = "st,stm32-dma";
+ reg = <0x40020000 0x400>;
+ interrupts = <11>,
+ <12>,
+ <13>,
+ <14>,
+ <15>,
+ <16>,
+ <17>,
+ <47>;
+ clocks = <&timer_clk>;
+ #dma-cells = <4>;
+ st,mem2mem;
+ resets = <&rcc 150>;
+ dma-channels = <8>;
+ dma-requests = <8>;
+};
+
+/* DMA controller 1 */
+dma2: dma@40020400 {
+ compatible = "st,stm32-dma";
+ reg = <0x40020400 0x400>;
+ interrupts = <56>,
+ <57>,
+ <58>,
+ <59>,
+ <60>,
+ <68>,
+ <69>,
+ <70>;
+ clocks = <&timer_clk>;
+ #dma-cells = <4>;
+ st,mem2mem;
+ resets = <&rcc 150>;
+ dma-channels = <8>;
+ dma-requests = <8>;
+};
+
+/* DMA mux */
+dmamux1: dma-router@40020800 {
+ compatible = "st,stm32h7-dmamux";
+ reg = <0x40020800 0x3c>;
+ #dma-cells = <3>;
+ dma-requests = <128>;
+ dma-channels = <16>;
+ dma-masters = <&dma1 &dma2>;
+ clocks = <&timer_clk>;
+};
+
+/* DMA client */
+usart1: serial@40011000 {
+ compatible = "st,stm32-usart", "st,stm32-uart";
+ reg = <0x40011000 0x400>;
+ interrupts = <37>;
+ clocks = <&timer_clk>;
+ dmas = <&dmamux1 41 0x414 0>,
+ <&dmamux1 42 0x414 0>;
+ dma-names = "rx", "tx";
+};
diff --git a/Documentation/devicetree/bindings/dma/stm32-mdma.txt b/Documentation/devicetree/bindings/dma/stm32-mdma.txt
new file mode 100644
index 000000000000..d18772d6bc65
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/stm32-mdma.txt
@@ -0,0 +1,94 @@
+* STMicroelectronics STM32 MDMA controller
+
+The STM32 MDMA is a general-purpose direct memory access controller capable of
+supporting 64 independent DMA channels with 256 HW requests.
+
+Required properties:
+- compatible: Should be "st,stm32h7-mdma"
+- reg: Should contain MDMA registers location and length. This should include
+ all of the per-channel registers.
+- interrupts: Should contain the MDMA interrupt.
+- clocks: Should contain the input clock of the DMA instance.
+- resets: Reference to a reset controller asserting the DMA controller.
+- #dma-cells : Must be <5>. See DMA client paragraph for more details.
+
+Optional properties:
+- dma-channels: Number of DMA channels supported by the controller.
+- dma-requests: Number of DMA request signals supported by the controller.
+- st,ahb-addr-masks: Array of u32 mask to list memory devices addressed via
+ AHB bus.
+
+Example:
+
+ mdma1: dma@52000000 {
+ compatible = "st,stm32h7-mdma";
+ reg = <0x52000000 0x1000>;
+ interrupts = <122>;
+ clocks = <&timer_clk>;
+ resets = <&rcc 992>;
+ #dma-cells = <5>;
+ dma-channels = <16>;
+ dma-requests = <32>;
+ st,ahb-addr-masks = <0x20000000>, <0x00000000>;
+ };
+
+* DMA client
+
+DMA clients connected to the STM32 MDMA controller must use the format
+described in the dma.txt file, using a five-cell specifier for each channel:
+a phandle to the MDMA controller plus the following five integer cells:
+
+1. The request line number
+2. The priority level
+ 0x00: Low
+ 0x01: Medium
+ 0x10: High
+ 0x11: Very high
+3. A 32bit mask specifying the DMA channel configuration
+ -bit 0-1: Source increment mode
+ 0x00: Source address pointer is fixed
+ 0x10: Source address pointer is incremented after each data transfer
+ 0x11: Source address pointer is decremented after each data transfer
+ -bit 2-3: Destination increment mode
+ 0x00: Destination address pointer is fixed
+ 0x10: Destination address pointer is incremented after each data
+ transfer
+ 0x11: Destination address pointer is decremented after each data
+ transfer
+ -bit 8-9: Source increment offset size
+ 0x00: byte (8bit)
+ 0x01: half-word (16bit)
+ 0x10: word (32bit)
+ 0x11: double-word (64bit)
+ -bit 10-11: Destination increment offset size
+ 0x00: byte (8bit)
+ 0x01: half-word (16bit)
+ 0x10: word (32bit)
+ 0x11: double-word (64bit)
+-bit 25-18: The number of bytes to be transferred in a single transfer
+ (min = 1 byte, max = 128 bytes)
+-bit 29:28: Trigger Mode
+ 0x00: Each MDMA request triggers a buffer transfer (max 128 bytes)
+ 0x01: Each MDMA request triggers a block transfer (max 64K bytes)
+ 0x10: Each MDMA request triggers a repeated block transfer
+ 0x11: Each MDMA request triggers a linked list transfer
+4. A 32bit value specifying the register to be used to acknowledge the request
+ if no HW ack signal is used by the MDMA client
+5. A 32bit mask specifying the value to be written to acknowledge the request
+ if no HW ack signal is used by the MDMA client
+
+Example:
+
+ i2c4: i2c@5c002000 {
+ compatible = "st,stm32f7-i2c";
+ reg = <0x5c002000 0x400>;
+ interrupts = <95>,
+ <96>;
+ clocks = <&timer_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ dmas = <&mdma1 36 0x0 0x40008 0x0 0x0>,
+ <&mdma1 37 0x0 0x40002 0x0 0x0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/dma/sun4i-dma.txt b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
index 3b484380c56a..8ad556aca70b 100644
--- a/Documentation/devicetree/bindings/dma/sun4i-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
@@ -12,7 +12,7 @@ Required properties:
second cell holding the request line number.
Example:
- dma: dma-controller@01c02000 {
+ dma: dma-controller@1c02000 {
compatible = "allwinner,sun4i-a10-dma";
reg = <0x01c02000 0x1000>;
interrupts = <27>;
@@ -32,7 +32,7 @@ The three cells in order are:
3. The port ID as specified in the datasheet
Example:
- spi2: spi@01c17000 {
+ spi2: spi@1c17000 {
compatible = "allwinner,sun4i-a10-spi";
reg = <0x01c17000 0x1000>;
interrupts = <0 12 4>;
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
index 98fbe1a5c6dd..d4c34774d626 100644
--- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
@@ -27,6 +27,32 @@ Example:
#dma-cells = <1>;
};
+------------------------------------------------------------------------------
+For A64 DMA controller:
+
+Required properties:
+- compatible: "allwinner,sun50i-a64-dma"
+- dma-channels: Number of DMA channels supported by the controller.
+ Refer to Documentation/devicetree/bindings/dma/dma.txt
+- all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells
+
+Optional properties:
+- dma-requests: Number of DMA request signals supported by the controller.
+ Refer to Documentation/devicetree/bindings/dma/dma.txt
+
+Example:
+ dma: dma-controller@1c02000 {
+ compatible = "allwinner,sun50i-a64-dma";
+ reg = <0x01c02000 0x1000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_DMA>;
+ dma-channels = <8>;
+ dma-requests = <27>;
+ resets = <&ccu RST_BUS_DMA>;
+ #dma-cells = <1>;
+ };
+------------------------------------------------------------------------------
+
Clients:
DMA clients connected to the A31 DMA controller must use the format
@@ -38,7 +64,7 @@ The two cells in order are:
2. The port ID as specified in the datasheet
Example:
-spi2: spi@01c6a000 {
+spi2: spi@1c6a000 {
compatible = "allwinner,sun6i-a31-spi";
reg = <0x01c6a000 0x1000>;
interrupts = <0 67 4>;
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 41f0c1a07c56..66026dcf53e1 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -142,7 +142,7 @@ mcasp0: mcasp@48038000 {
};
2.
-edma1: edma@02728000 {
+edma1: edma@2728000 {
compatible = "ti,k2g-edma3-tpcc", "ti,edma3-tpcc";
reg = <0x02728000 0x8000>;
reg-names = "edma3_cc";
@@ -165,13 +165,13 @@ edma1: edma@02728000 {
power-domains = <&k2g_pds 0x4f>;
};
-edma1_tptc0: tptc@027b0000 {
+edma1_tptc0: tptc@27b0000 {
compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc";
reg = <0x027b0000 0x400>;
power-domains = <&k2g_pds 0x4f>;
};
-edma1_tptc1: tptc@027b8000 {
+edma1_tptc1: tptc@27b8000 {
compatible = "ti, k2g-edma3-tptc", "ti,edma3-tptc";
reg = <0x027b8000 0x400>;
power-domains = <&k2g_pds 0x4f>;
diff --git a/Documentation/devicetree/bindings/dma/zxdma.txt b/Documentation/devicetree/bindings/dma/zxdma.txt
index 3207ceb04d0b..abec59f35fde 100644
--- a/Documentation/devicetree/bindings/dma/zxdma.txt
+++ b/Documentation/devicetree/bindings/dma/zxdma.txt
@@ -26,7 +26,7 @@ Controller:
Client:
Use specific request line passing from dmax
For example, spdif0 tx channel request line is 4
- spdif0: spdif0@0b004000 {
+ spdif0: spdif0@b004000 {
#sound-dai-cells = <0>;
compatible = "zte,zx296702-spdif";
reg = <0x0b004000 0x1000>;
diff --git a/Documentation/devicetree/bindings/eeprom/eeprom.txt b/Documentation/devicetree/bindings/eeprom/eeprom.txt
index afc04589eadf..27f2bc15298a 100644
--- a/Documentation/devicetree/bindings/eeprom/eeprom.txt
+++ b/Documentation/devicetree/bindings/eeprom/eeprom.txt
@@ -36,6 +36,8 @@ Optional properties:
- read-only: this parameterless property disables writes to the eeprom
+ - size: total eeprom size in bytes
+
Example:
eeprom@52 {
diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
index e821e16ad65b..0c10802c8327 100644
--- a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
+++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
@@ -66,7 +66,7 @@ See ".../sram/sram.txt" for the bindings.
Example:
-hsp_top0: hsp@03c00000 {
+hsp_top0: hsp@3c00000 {
...
#mbox-cells = <2>;
};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-dsp-keystone.txt b/Documentation/devicetree/bindings/gpio/gpio-dsp-keystone.txt
index 6c7e6c7302f5..0423699d74c7 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-dsp-keystone.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-dsp-keystone.txt
@@ -25,7 +25,7 @@ Please refer to gpio.txt in this directory for details of the common GPIO
bindings used by client devices.
Example:
- dspgpio0: keystone_dsp_gpio@02620240 {
+ dspgpio0: keystone_dsp_gpio@2620240 {
compatible = "ti,keystone-dsp-gpio";
ti,syscon-dev = <&devctrl 0x240>;
gpio-controller;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-max3191x.txt b/Documentation/devicetree/bindings/gpio/gpio-max3191x.txt
new file mode 100644
index 000000000000..b3a6444b8f45
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-max3191x.txt
@@ -0,0 +1,59 @@
+GPIO driver for Maxim MAX3191x industrial serializer
+
+Required properties:
+ - compatible: Must be one of:
+ "maxim,max31910"
+ "maxim,max31911"
+ "maxim,max31912"
+ "maxim,max31913"
+ "maxim,max31953"
+ "maxim,max31963"
+ - reg: Chip select number.
+ - gpio-controller: Marks the device node as a GPIO controller.
+ - #gpio-cells: Should be two. For consumer use see gpio.txt.
+
+Optional properties:
+ - #daisy-chained-devices:
+ Number of chips in the daisy-chain (default is 1).
+ - maxim,modesel-gpios: GPIO pins to configure modesel of each chip.
+ The number of GPIOs must equal "#daisy-chained-devices"
+ (if each chip is driven by a separate pin) or 1
+ (if all chips are wired to the same pin).
+ - maxim,fault-gpios: GPIO pins to read fault of each chip.
+ The number of GPIOs must equal "#daisy-chained-devices"
+ or 1.
+ - maxim,db0-gpios: GPIO pins to configure debounce of each chip.
+ The number of GPIOs must equal "#daisy-chained-devices"
+ or 1.
+ - maxim,db1-gpios: GPIO pins to configure debounce of each chip.
+ The number of GPIOs must equal "maxim,db0-gpios".
+ - maxim,modesel-8bit: Boolean whether the modesel pin of the chips is
+ pulled high (8-bit mode). Use this if the modesel pin
+ is hardwired and consequently "maxim,modesel-gpios"
+ cannot be specified. By default if neither this nor
+ "maxim,modesel-gpios" is given, the driver assumes
+ that modesel is pulled low (16-bit mode).
+ - maxim,ignore-undervoltage:
+ Boolean whether to ignore undervoltage alarms signaled
+ by the "maxim,fault-gpios" or by the status byte
+ (in 16-bit mode). Use this if the chips are powered
+ through 5VOUT instead of VCC24V, in which case they
+ will constantly signal undervoltage.
+
+For other required and optional properties of SPI slave nodes please refer to
+../spi/spi-bus.txt.
+
+Example:
+ gpio@0 {
+ compatible = "maxim,max31913";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ maxim,modesel-gpios = <&gpio2 23>;
+ maxim,fault-gpios = <&gpio2 24 GPIO_ACTIVE_LOW>;
+ maxim,db0-gpios = <&gpio2 25>;
+ maxim,db1-gpios = <&gpio2 26>;
+
+ spi-max-frequency = <25000000>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt b/Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt
index 1fd98ffa8cb7..528f5ef5a893 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-tz1090-pdc.txt
@@ -30,7 +30,7 @@ Optional properties:
Example:
- pdc_gpios: gpio-controller@02006500 {
+ pdc_gpios: gpio-controller@2006500 {
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-tz1090.txt b/Documentation/devicetree/bindings/gpio/gpio-tz1090.txt
index 174cdf309170..b05a90e0ab29 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-tz1090.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-tz1090.txt
@@ -59,7 +59,7 @@ Required properties:
Example:
- gpios: gpio-controller@02005800 {
+ gpios: gpio-controller@2005800 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "img,tz1090-gpio";
diff --git a/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt b/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
new file mode 100644
index 000000000000..fed9158dd913
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
@@ -0,0 +1,52 @@
+UniPhier GPIO controller
+
+Required properties:
+- compatible: Should be "socionext,uniphier-gpio".
+- reg: Specifies offset and length of the register set for the device.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells: Should be 2. The first cell is the pin number and the second
+ cell is used to specify optional parameters.
+- interrupt-parent: Specifies the parent interrupt controller.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: Should be 2. The first cell defines the interrupt number.
+ The second cell bits[3:0] is used to specify trigger type as follows:
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered
+ 4 = active high level-sensitive
+ 8 = active low level-sensitive
+ Valid combinations are 1, 2, 3, 4, 8.
+- ngpios: Specifies the number of GPIO lines.
+- gpio-ranges: Mapping to pin controller pins (as described in gpio.txt)
+- socionext,interrupt-ranges: Specifies an interrupt number mapping between
+ this GPIO controller and its interrupt parent, in the form of arbitrary
+ number of <child-interrupt-base parent-interrupt-base length> triplets.
+
+Optional properties:
+- gpio-ranges-group-names: Used for named gpio ranges (as described in gpio.txt)
+
+Example:
+ gpio: gpio@55000000 {
+ compatible = "socionext,uniphier-gpio";
+ reg = <0x55000000 0x200>;
+ interrupt-parent = <&aidet>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 0>;
+ gpio-ranges-group-names = "gpio_range";
+ ngpios = <248>;
+ socionext,interrupt-ranges = <0 48 16>, <16 154 5>, <21 217 3>;
+ };
+
+Consumer Example:
+
+ sdhci0_pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio UNIPHIER_GPIO_PORT(29, 4) GPIO_ACTIVE_LOW>;
+ };
+
+Please note UNIPHIER_GPIO_PORT(29, 4) represents PORT294 in the SoC document.
+Unfortunately, only the one's place is octal in the port numbering. (That is,
+PORT 8, 9, 18, 19, 28, 29, ... are missing.) UNIPHIER_GPIO_PORT() is a helper
+macro to calculate 29 * 8 + 4.
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 51c86f69995e..a7ac460ad657 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -14,6 +14,8 @@ Required Properties:
- "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
- "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
- "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
+ - "renesas,gpio-r8a77970": for R8A77970 (R-Car V3M) compatible GPIO controller.
+ - "renesas,gpio-r8a77995": for R8A77995 (R-Car D3) compatible GPIO controller.
- "renesas,rcar-gen1-gpio": for a generic R-Car Gen1 GPIO controller.
- "renesas,rcar-gen2-gpio": for a generic R-Car Gen2 or RZ/G1 GPIO controller.
- "renesas,rcar-gen3-gpio": for a generic R-Car Gen3 GPIO controller.
diff --git a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
index 4d6c8cdc8586..4a75da7051bd 100644
--- a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
@@ -29,6 +29,7 @@ controller.
- interrupts : The interrupt to the parent controller raised when GPIOs
generate the interrupts.
- snps,nr-gpios : The number of pins in the port, a single cell.
+- resets : Reset line for the controller.
Example:
diff --git a/Documentation/devicetree/bindings/hsi/omap-ssi.txt b/Documentation/devicetree/bindings/hsi/omap-ssi.txt
index b8eca3c7810d..955e335e7e56 100644
--- a/Documentation/devicetree/bindings/hsi/omap-ssi.txt
+++ b/Documentation/devicetree/bindings/hsi/omap-ssi.txt
@@ -1,10 +1,12 @@
OMAP SSI controller bindings
-OMAP Synchronous Serial Interface (SSI) controller implements a legacy
-variant of MIPI's High Speed Synchronous Serial Interface (HSI).
+OMAP3's Synchronous Serial Interface (SSI) controller implements a
+legacy variant of MIPI's High Speed Synchronous Serial Interface (HSI),
+while the controller found inside OMAP4 is supposed to be fully compliant
+with the HSI standard.
Required properties:
-- compatible: Should include "ti,omap3-ssi".
+- compatible: Should include "ti,omap3-ssi" or "ti,omap4-hsi"
- reg-names: Contains the values "sys" and "gdd" (in this order).
- reg: Contains a matching register specifier for each entry
in reg-names.
@@ -27,6 +29,7 @@ Each port is represented as a sub-node of the ti,omap3-ssi device.
Required Port sub-node properties:
- compatible: Should be set to the following value
ti,omap3-ssi-port (applicable to OMAP34xx devices)
+ ti,omap4-hsi-port (applicable to OMAP44xx devices)
- reg-names: Contains the values "tx" and "rx" (in this order).
- reg: Contains a matching register specifier for each entry
in reg-names.
@@ -38,6 +41,10 @@ Required Port sub-node properties:
property. If it's missing the port will not be
enabled.
+Optional properties:
+- ti,hwmods: Shall contain TI interconnect module name if needed
+ by the SoC
+
Example for Nokia N900:
ssi-controller@48058000 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-fan.txt b/Documentation/devicetree/bindings/hwmon/gpio-fan.txt
index 439a7430fc68..439a7430fc68 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-fan.txt
+++ b/Documentation/devicetree/bindings/hwmon/gpio-fan.txt
diff --git a/Documentation/devicetree/bindings/hwmon/max1619.txt b/Documentation/devicetree/bindings/hwmon/max1619.txt
new file mode 100644
index 000000000000..c70dbbe1e56f
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/max1619.txt
@@ -0,0 +1,12 @@
+Bindings for MAX1619 Temperature Sensor
+
+Required properties:
+- compatible : "maxim,max1619"
+- reg : I2C address, one of 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, or
+ 0x4d, 0x4e
+
+Example:
+ temp@4c {
+ compatible = "maxim,max1619";
+ reg = <0x4c>;
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/max31785.txt b/Documentation/devicetree/bindings/hwmon/max31785.txt
new file mode 100644
index 000000000000..106e08c56aaa
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/max31785.txt
@@ -0,0 +1,22 @@
+Bindings for the Maxim MAX31785 Intelligent Fan Controller
+==========================================================
+
+Reference:
+
+https://datasheets.maximintegrated.com/en/ds/MAX31785.pdf
+
+The Maxim MAX31785 is a PMBus device providing closed-loop, multi-channel fan
+management with temperature and remote voltage sensing. Various fan control
+features are provided, including PWM frequency control, temperature hysteresis,
+dual tachometer measurements, and fan health monitoring.
+
+Required properties:
+- compatible : One of "maxim,max31785" or "maxim,max31785a"
+- reg : I2C address, one of 0x52, 0x53, 0x54, 0x55.
+
+Example:
+
+ fans@52 {
+ compatible = "maxim,max31785";
+ reg = <0x52>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
index bd6480b19535..e7106bfc1f13 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
@@ -7,7 +7,9 @@ Required Properties:
- compatible : should be "aspeed,ast2400-i2c-bus"
or "aspeed,ast2500-i2c-bus"
- clocks : root clock of bus, should reference the APB
- clock
+ clock in the second cell
+- resets : phandle to reset controller with the reset number in
+ the second cell
- interrupts : interrupt number
- interrupt-parent : interrupt controller for bus, should reference a
aspeed,ast2400-i2c-ic or aspeed,ast2500-i2c-ic
@@ -40,7 +42,8 @@ i2c {
#interrupt-cells = <1>;
reg = <0x40 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
- clocks = <&clk_apb>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ resets = <&syscon ASPEED_RESET_I2C>;
bus-frequency = <100000>;
interrupts = <0>;
interrupt-parent = <&i2c_ic>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-axxia.txt b/Documentation/devicetree/bindings/i2c/i2c-axxia.txt
index 2296d782b4c2..7d53a2b79553 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-axxia.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-axxia.txt
@@ -17,7 +17,7 @@ Optional properties :
Example :
-i2c@02010084000 {
+i2c@2010084000 {
compatible = "lsi,api2c";
device_type = "i2c";
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
index 5b123e0e4cc2..64e6e656c345 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-davinci.txt
@@ -6,6 +6,18 @@ davinci/keystone i2c interface contains.
Required properties:
- compatible: "ti,davinci-i2c" or "ti,keystone-i2c";
- reg : Offset and length of the register set for the device
+- clocks: I2C functional clock phandle.
+ For 66AK2G this property should be set per binding,
+ Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+
+SoC-specific Required Properties:
+
+The following are mandatory properties for Keystone 2 66AK2G SoCs only:
+
+- power-domains: Should contain a phandle to a PM domain provider node
+ and an args specifier containing the I2C device id
+ value. This property is as per the binding,
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
Recommended properties :
- interrupts : standard interrupt property.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-gpio.txt
index 4f8ec947c6bd..38a05562d1d2 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-gpio.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-gpio.txt
@@ -2,25 +2,39 @@ Device-Tree bindings for i2c gpio driver
Required properties:
- compatible = "i2c-gpio";
- - gpios: sda and scl gpio
-
+ - sda-gpios: gpio used for the sda signal, this should be flagged as
+ active high using open drain with (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)
+ from <dt-bindings/gpio/gpio.h> since the signal is by definition
+ open drain.
+ - scl-gpios: gpio used for the scl signal, this should be flagged as
+ active high using open drain with (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)
+ from <dt-bindings/gpio/gpio.h> since the signal is by definition
+ open drain.
Optional properties:
- - i2c-gpio,sda-open-drain: sda as open drain
- - i2c-gpio,scl-open-drain: scl as open drain
- i2c-gpio,scl-output-only: scl as output only
- i2c-gpio,delay-us: delay between GPIO operations (may depend on each platform)
- i2c-gpio,timeout-ms: timeout to get data
+Deprecated properties, do not use in new device tree sources:
+ - gpios: sda and scl gpio, alternative for {sda,scl}-gpios
+ - i2c-gpio,sda-open-drain: this means that something outside of our
+ control has put the GPIO line used for SDA into open drain mode, and
+ that something is not the GPIO chip. It is essentially an
+ inconsistency flag.
+ - i2c-gpio,scl-open-drain: this means that something outside of our
+ control has put the GPIO line used for SCL into open drain mode, and
+ that something is not the GPIO chip. It is essentially an
+ inconsistency flag.
+
Example nodes:
+#include <dt-bindings/gpio/gpio.h>
+
i2c@0 {
compatible = "i2c-gpio";
- gpios = <&pioA 23 0 /* sda */
- &pioA 24 0 /* scl */
- >;
- i2c-gpio,sda-open-drain;
- i2c-gpio,scl-open-drain;
+ sda-gpios = <&pioA 23 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&pioA 24 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux.txt b/Documentation/devicetree/bindings/i2c/i2c-mux.txt
index 212e6779dc5c..b38f58a1c878 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux.txt
@@ -6,10 +6,10 @@ multiplexer/switch will have one child node for each child bus.
Optional properties:
- #address-cells = <1>;
- This property is required is the i2c-mux child node does not exist.
+ This property is required if the i2c-mux child node does not exist.
- #size-cells = <0>;
- This property is required is the i2c-mux child node does not exist.
+ This property is required if the i2c-mux child node does not exist.
- i2c-mux
For i2c multiplexers/switches that have child nodes that are a mixture
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
index cad39aee9f73..a777477e4547 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
@@ -13,6 +13,7 @@ Required properties:
"renesas,i2c-r8a7794" if the device is a part of a R8A7794 SoC.
"renesas,i2c-r8a7795" if the device is a part of a R8A7795 SoC.
"renesas,i2c-r8a7796" if the device is a part of a R8A7796 SoC.
+ "renesas,i2c-r8a77970" if the device is a part of a R8A77970 SoC.
"renesas,rcar-gen1-i2c" for a generic R-Car Gen1 compatible device.
"renesas,rcar-gen2-i2c" for a generic R-Car Gen2 or RZ/G1 compatible
device.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
index 6b765485af7d..49df0053347a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
@@ -24,7 +24,7 @@ Slave device properties:
Example:
- p2wi@01f03400 {
+ p2wi@1f03400 {
compatible = "allwinner,sun6i-a31-p2wi";
reg = <0x01f03400 0x400>;
interrupts = <0 39 4>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index cee9d5055fa2..11263982470e 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -59,8 +59,8 @@ wants to support one of the below features, it should adapt the bindings below.
interrupts used by the device.
- interrupt-names
- "irq" and "wakeup" names are recognized by I2C core, other names are
- left to individual drivers.
+ "irq", "wakeup" and "smbus_alert" names are recognized by I2C core,
+ other names are left to individual drivers.
- host-notify
device uses SMBus host notify protocol instead of interrupt line.
diff --git a/Documentation/devicetree/bindings/iio/adc/mcp320x.txt b/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
index bcd3ac8e6e0c..7d64753df949 100644
--- a/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
@@ -29,15 +29,29 @@ Required properties:
"microchip,mcp3204"
"microchip,mcp3208"
"microchip,mcp3301"
+ "microchip,mcp3550-50"
+ "microchip,mcp3550-60"
+ "microchip,mcp3551"
+ "microchip,mcp3553"
NOTE: The use of the compatibles with no vendor prefix
is deprecated and only listed because old DT use them.
+ - spi-cpha, spi-cpol (boolean):
+ Either SPI mode (0,0) or (1,1) must be used, so specify
+ none or both of spi-cpha, spi-cpol. The MCP3550/1/3
+ is more efficient in mode (1,1) as only 3 instead of
+ 4 bytes need to be read from the ADC, but not all SPI
+ masters support it.
+
+ - vref-supply: Phandle to the external reference voltage supply.
+
Examples:
spi_controller {
mcp3x0x@0 {
compatible = "mcp3002";
reg = <0>;
spi-max-frequency = <1000000>;
+ vref-supply = <&vref_reg>;
};
};
diff --git a/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
index 64dc4843c180..0df9befdaecc 100644
--- a/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
@@ -12,6 +12,7 @@ for the Thermal Controller which holds a phandle to the AUXADC.
Required properties:
- compatible: Should be one of:
- "mediatek,mt2701-auxadc": For MT2701 family of SoCs
+ - "mediatek,mt2712-auxadc": For MT2712 family of SoCs
- "mediatek,mt7622-auxadc": For MT7622 family of SoCs
- "mediatek,mt8173-auxadc": For MT8173 family of SoCs
- reg: Address range of the AUXADC unit.
diff --git a/Documentation/devicetree/bindings/iio/dac/ds4424.txt b/Documentation/devicetree/bindings/iio/dac/ds4424.txt
new file mode 100644
index 000000000000..eaebbf8dab40
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ds4424.txt
@@ -0,0 +1,20 @@
+Maxim Integrated DS4422/DS4424 7-bit Sink/Source Current DAC Device Driver
+
+Datasheet publicly available at:
+https://datasheets.maximintegrated.com/en/ds/DS4422-DS4424.pdf
+
+Required properties:
+ - compatible: Should be one of
+ maxim,ds4422
+ maxim,ds4424
+ - reg: Should contain the DAC I2C address
+
+Optional properties:
+ - vcc-supply: Power supply is optional. If not defined, driver will ignore it.
+
+Example:
+ ds4224@10 {
+ compatible = "maxim,ds4424";
+ reg = <0x10>; /* When A0, A1 pins are ground */
+ vcc-supply = <&vcc_3v3>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt b/Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt
new file mode 100644
index 000000000000..9cb0e10df704
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ti-dac082s085.txt
@@ -0,0 +1,34 @@
+Texas Instruments 8/10/12-bit 2/4-channel DAC driver
+
+Required properties:
+ - compatible: Must be one of:
+ "ti,dac082s085"
+ "ti,dac102s085"
+ "ti,dac122s085"
+ "ti,dac084s085"
+ "ti,dac104s085"
+ "ti,dac124s085"
+ - reg: Chip select number.
+ - spi-cpha, spi-cpol: SPI mode (0,1) or (1,0) must be used, so specify
+ either spi-cpha or spi-cpol (but not both).
+ - vref-supply: Phandle to the external reference voltage supply.
+
+For other required and optional properties of SPI slave nodes please refer to
+../../spi/spi-bus.txt.
+
+Example:
+ vref_2v5_reg: regulator-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "2v5";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-always-on;
+ };
+
+ dac@0 {
+ compatible = "ti,dac082s085";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+ spi-cpol;
+ vref-supply = <&vref_2v5_reg>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/health/max30100.txt b/Documentation/devicetree/bindings/iio/health/max30100.txt
index 295a9edfa4fd..8d8176459d09 100644
--- a/Documentation/devicetree/bindings/iio/health/max30100.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30100.txt
@@ -20,9 +20,9 @@ Optional properties:
Example:
-max30100@057 {
+max30100@57 {
compatible = "maxim,max30100";
- reg = <57>;
+ reg = <0x57>;
maxim,led-current-microamp = <24000 50000>;
interrupt-parent = <&gpio1>;
interrupts = <16 2>;
diff --git a/Documentation/devicetree/bindings/iio/health/max30102.txt b/Documentation/devicetree/bindings/iio/health/max30102.txt
index c695e7cbeefb..8629c18b0e78 100644
--- a/Documentation/devicetree/bindings/iio/health/max30102.txt
+++ b/Documentation/devicetree/bindings/iio/health/max30102.txt
@@ -20,7 +20,7 @@ Optional properties:
Example:
-max30100@57 {
+max30102@57 {
compatible = "maxim,max30102";
reg = <0x57>;
maxim,red-led-current-microamp = <7000>;
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt b/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
index 77d5aba1bd8c..baecc4a85197 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
+++ b/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
@@ -19,7 +19,7 @@ Optional properties:
Example:
-ak8974@0f {
+ak8974@f {
compatible = "asahi-kasei,ak8974";
reg = <0x0f>;
avdd-supply = <&foo_reg>;
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt b/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
index e1e7dd3259f6..aa67ceb0d4e0 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
+++ b/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
@@ -13,7 +13,7 @@ Optional properties:
Example:
-ak8975@0c {
+ak8975@c {
compatible = "asahi-kasei,ak8975";
reg = <0x0c>;
gpios = <&gpj0 7 0>;
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index 9ec6f5ce54fc..6f626f73417e 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -46,6 +46,8 @@ Accelerometers:
- st,h3lis331dl-accel
- st,lng2dm-accel
- st,lis3l02dq
+- st,lis2dw12
+- st,lis3dhh
Gyroscopes:
- st,l3g4200d-gyro
@@ -71,3 +73,5 @@ Pressure sensors:
- st,lps25h-press
- st,lps331ap-press
- st,lps22hb-press
+- st,lps33hw
+- st,lps35hw
diff --git a/Documentation/devicetree/bindings/input/gpio-mouse.txt b/Documentation/devicetree/bindings/input/gpio-mouse.txt
new file mode 100644
index 000000000000..519510a11af9
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/gpio-mouse.txt
@@ -0,0 +1,32 @@
+Device-Tree bindings for GPIO attached mice
+
+This simply uses standard GPIO handles to define a simple mouse connected
+to 5-7 GPIO lines.
+
+Required properties:
+ - compatible: must be "gpio-mouse"
+ - scan-interval-ms: The scanning interval in milliseconds
+ - up-gpios: GPIO line phandle to the line indicating "up"
+ - down-gpios: GPIO line phandle to the line indicating "down"
+ - left-gpios: GPIO line phandle to the line indicating "left"
+ - right-gpios: GPIO line phandle to the line indicating "right"
+
+Optional properties:
+ - button-left-gpios: GPIO line handle to the left mouse button
+ - button-middle-gpios: GPIO line handle to the middle mouse button
+ - button-right-gpios: GPIO line handle to the right mouse button
+Example:
+
+#include <dt-bindings/gpio/gpio.h>
+
+gpio-mouse {
+ compatible = "gpio-mouse";
+ scan-interval-ms = <50>;
+ up-gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ down-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
+ left-gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+ right-gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ button-left-gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
+ button-middle-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ button-right-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
index 4357e498ef04..1458c3179a63 100644
--- a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
+++ b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
@@ -19,7 +19,7 @@ Example:
#include <dt-bindings/input/input.h>
- lradc: lradc@01c22800 {
+ lradc: lradc@1c22800 {
compatible = "allwinner,sun4i-a10-lradc-keys";
reg = <0x01c22800 0x100>;
interrupts = <31>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
index 49fa14ed155c..298e3442f143 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/egalax-ts.txt
@@ -10,7 +10,7 @@ Required properties:
Example:
- egalax_ts@04 {
+ touchscreen@4 {
compatible = "eeti,egalax_ts";
reg = <0x04>;
interrupt-parent = <&gpio1>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt b/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
new file mode 100644
index 000000000000..1dcff4a43eaa
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/exc3000.txt
@@ -0,0 +1,27 @@
+* EETI EXC3000 Multiple Touch Controller
+
+Required properties:
+- compatible: must be "eeti,exc3000"
+- reg: i2c slave address
+- interrupt-parent: the phandle for the interrupt controller
+- interrupts: touch controller interrupt
+- touchscreen-size-x: See touchscreen.txt
+- touchscreen-size-y: See touchscreen.txt
+
+Optional properties:
+- touchscreen-inverted-x: See touchscreen.txt
+- touchscreen-inverted-y: See touchscreen.txt
+- touchscreen-swapped-x-y: See touchscreen.txt
+
+Example:
+
+ touchscreen@2a {
+ compatible = "eeti,exc3000";
+ reg = <0x2a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+ touchscreen-size-x = <4096>;
+ touchscreen-size-y = <4096>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
index c98757a69110..0c369d8ebcab 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
@@ -2,7 +2,8 @@ Device tree bindings for Goodix GT9xx series touchscreen controller
Required properties:
- - compatible : Should be "goodix,gt911"
+ - compatible : Should be "goodix,gt1151"
+ or "goodix,gt911"
or "goodix,gt9110"
or "goodix,gt912"
or "goodix,gt927"
diff --git a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
new file mode 100644
index 000000000000..121d9b7c79a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
@@ -0,0 +1,42 @@
+* HiDeep Finger and Stylus touchscreen controller
+
+Required properties:
+- compatible : must be "hideep,hideep-ts"
+- reg : I2C slave address, (e.g. 0x6C).
+- interrupt-parent : Interrupt controller to which the chip is connected.
+- interrupts : Interrupt to which the chip is connected.
+
+Optional properties:
+- vdd-supply : It is the controller supply for controlling
+ main voltage(3.3V) through the regulator.
+- vid-supply : It is the controller supply for controlling
+ IO voltage(1.8V) through the regulator.
+- reset-gpios : Define for reset gpio pin.
+ It is to use for reset IC.
+- touchscreen-size-x : X axis size of touchscreen
+- touchscreen-size-y : Y axis size of touchscreen
+- linux,keycodes : Specifies an array of numeric keycode values to
+ be used for reporting button presses. The array can
+ contain up to 3 entries.
+
+Example:
+
+#include "dt-bindings/input/input.h"
+
+i2c@00000000 {
+
+ /* ... */
+
+ touchscreen@6c {
+ compatible = "hideep,hideep-ts";
+ reg = <0x6c>;
+ interrupt-parent = <&gpx1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ vdd-supply = <&ldo15_reg>";
+ vid-supply = <&ldo18_reg>;
+ reset-gpios = <&gpx1 5 0>;
+ touchscreen-size-x = <1080>;
+ touchscreen-size-y = <1920>;
+ linux,keycodes = <KEY_HOME>, <KEY_MENU>, <KEY_BACK>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
index e67e58b61706..164915004424 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
@@ -21,7 +21,7 @@ Optional properties:
each read. Valid values are 1, 4, 8, 16 and 32.
Example:
- tsc: tsc@02040000 {
+ tsc: tsc@2040000 {
compatible = "fsl,imx6ul-tsc";
reg = <0x02040000 0x4000>, <0x0219c000 0x4000>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
new file mode 100644
index 000000000000..d9b7c2ff611e
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/samsung,s6sy761.txt
@@ -0,0 +1,34 @@
+* Samsung S6SY761 touchscreen controller
+
+Required properties:
+- compatible : must be "samsung,s6sy761"
+- reg : I2C slave address, (e.g. 0x48)
+- interrupt-parent : the phandle to the interrupt controller which provides
+ the interrupt
+- interrupts : interrupt specification
+- avdd-supply : analogic power supply
+- vdd-supply : power supply
+
+Optional properties:
+- touchscreen-size-x : see touchscreen.txt. This property is embedded in the
+ device. If defined it forces a different x resolution.
+- touchscreen-size-y : see touchscreen.txt. This property is embedded in the
+ device. If defined it forces a different y resolution.
+
+Example:
+
+i2c@00000000 {
+
+ /* ... */
+
+ touchscreen@48 {
+ compatible = "samsung,s6sy761";
+ reg = <0x48>;
+ interrupt-parent = <&gpa1>;
+ interrupts = <1 IRQ_TYPE_NONE>;
+ avdd-supply = <&ldo30_reg>;
+ vdd-supply = <&ldo31_reg>;
+ touchscreen-size-x = <4096>;
+ touchscreen-size-y = <4096>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
index 4ae553eb333d..4903fb72d883 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sunxi-nmi.txt
@@ -20,7 +20,7 @@ Required properties:
Example:
-sc-nmi-intc@01c00030 {
+sc-nmi-intc@1c00030 {
compatible = "allwinner,sun7i-a20-sc-nmi";
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
new file mode 100644
index 000000000000..a83f9a5734ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
@@ -0,0 +1,36 @@
+Amlogic meson GPIO interrupt controller
+
+Meson SoCs contains an interrupt controller which is able to watch the SoC
+pads and generate an interrupt on edge or level. The controller is essentially
+a 256 pads to 8 GIC interrupt multiplexer, with a filter block to select edge
+or level and polarity. It does not expose all 256 mux inputs because the
+documentation shows that the upper part is not mapped to any pad. The actual
+number of interrupt exposed depends on the SoC.
+
+Required properties:
+
+- compatible : must have "amlogic,meson8-gpio-intc†and either
+ “amlogic,meson8-gpio-intc†for meson8 SoCs (S802) or
+ “amlogic,meson8b-gpio-intc†for meson8b SoCs (S805) or
+ “amlogic,meson-gxbb-gpio-intc†for GXBB SoCs (S905) or
+ “amlogic,meson-gxl-gpio-intc†for GXL SoCs (S905X, S912)
+- interrupt-parent : a phandle to the GIC the interrupts are routed to.
+ Usually this is provided at the root level of the device tree as it is
+ common to most of the SoC.
+- reg : Specifies base physical address and size of the registers.
+- interrupt-controller : Identifies the node as an interrupt controller.
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The value must be 2.
+- meson,channel-interrupts: Array with the 8 upstream hwirq numbers. These
+ are the hwirqs used on the parent interrupt controller.
+
+Example:
+
+gpio_interrupt: interrupt-controller@9880 {
+ compatible = "amlogic,meson-gxbb-gpio-intc",
+ "amlogic,meson-gpio-intc";
+ reg = <0x0 0x9880 0x0 0x10>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ meson,channel-interrupts = <64 65 66 67 68 69 70 71>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 5eb108e180fa..0a57f2f4167d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -75,6 +75,10 @@ These nodes must have the following properties:
- reg: Specifies the base physical address and size of the ITS
registers.
+Optional:
+- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated
+ address and size of the pre-ITS window.
+
The main GIC node must contain the appropriate #address-cells,
#size-cells and ranges properties for the reg property of all ITS
nodes.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
index 448273a30a11..36df06c5c567 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
@@ -2,7 +2,8 @@ Broadcom Generic Level 2 Interrupt Controller
Required properties:
-- compatible: should be "brcm,l2-intc"
+- compatible: should be "brcm,l2-intc" for latched interrupt controllers
+ should be "brcm,bcm7271-l2-intc" for level interrupt controllers
- reg: specifies the base physical address and size of the registers
- interrupt-controller: identifies the node as an interrupt controller
- #interrupt-cells: specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
index 49ccabbfa6f3..a4ff93d6b7f3 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
@@ -8,6 +8,7 @@ Required properties:
"fsl,ls1043a-msi"
"fsl,ls1046a-msi"
"fsl,ls1043a-v1.1-msi"
+ "fsl,ls1012a-msi"
- msi-controller: indicates that this is a PCIe MSI controller node
- reg: physical base address of the controller and length of memory mapped.
- interrupts: an interrupt to the parent interrupt controller.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt b/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt
new file mode 100644
index 000000000000..caec07cc7149
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/openrisc,ompic.txt
@@ -0,0 +1,22 @@
+Open Multi-Processor Interrupt Controller
+
+Required properties:
+
+- compatible : This should be "openrisc,ompic"
+- reg : Specifies base physical address and size of the register space. The
+ size is based on the number of cores the controller has been configured
+ to handle, this should be set to 8 bytes per cpu core.
+- interrupt-controller : Identifies the node as an interrupt controller.
+- #interrupt-cells : This should be set to 0 as this will not be an irq
+ parent.
+- interrupts : Specifies the interrupt line to which the ompic is wired.
+
+Example:
+
+ompic: interrupt-controller@98000000 {
+ compatible = "openrisc,ompic";
+ reg = <0x98000000 16>;
+ interrupt-controller;
+ #interrupt-cells = <0>;
+ interrupts = <1>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index e3f052d8c11a..33c9a10fdc91 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -13,6 +13,9 @@ Required properties:
- "renesas,irqc-r8a7793" (R-Car M2-N)
- "renesas,irqc-r8a7794" (R-Car E2)
- "renesas,intc-ex-r8a7795" (R-Car H3)
+ - "renesas,intc-ex-r8a7796" (R-Car M3-W)
+ - "renesas,intc-ex-r8a77970" (R-Car V3M)
+ - "renesas,intc-ex-r8a77995" (R-Car D3)
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
interrupts.txt in this directory
- clocks: Must contain a reference to the functional clock.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
new file mode 100644
index 000000000000..8b2faefe29ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,synquacer-exiu.txt
@@ -0,0 +1,32 @@
+Socionext SynQuacer External Interrupt Unit (EXIU)
+
+The Socionext Synquacer SoC has an external interrupt unit (EXIU)
+that forwards a block of 32 configurable input lines to 32 adjacent
+level-high type GICv3 SPIs.
+
+Required properties:
+
+- compatible : Should be "socionext,synquacer-exiu".
+- reg : Specifies base physical address and size of the
+ control registers.
+- interrupt-controller : Identifies the node as an interrupt controller.
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The value must be 3.
+- interrupt-parent : phandle of the GIC these interrupts are routed to.
+- socionext,spi-base : The SPI number of the first SPI of the 32 adjacent
+ ones the EXIU forwards its interrups to.
+
+Notes:
+
+- Only SPIs can use the EXIU as an interrupt parent.
+
+Example:
+
+ exiu: interrupt-controller@510c0000 {
+ compatible = "socionext,synquacer-exiu";
+ reg = <0x0 0x510c0000 0x0 0x20>;
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <3>;
+ socionext,spi-base = <112>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
index 6e7703d4ff5b..edf03f09244b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
@@ -2,7 +2,9 @@ STM32 External Interrupt Controller
Required properties:
-- compatible: Should be "st,stm32-exti"
+- compatible: Should be:
+ "st,stm32-exti"
+ "st,stm32h7-exti"
- reg: Specifies base physical address and size of the registers
- interrupt-controller: Indentifies the node as an interrupt controller
- #interrupt-cells: Specifies the number of cells to encode an interrupt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
index d9bb106bdd16..5f94d7739d8d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
@@ -20,7 +20,7 @@ Please refer to interrupts.txt in this directory for details of the common
Interrupt Controllers bindings used by client devices.
Example:
- kirq0: keystone_irq0@026202a0 {
+ kirq0: keystone_irq0@26202a0 {
compatible = "ti,keystone-irq";
ti,syscon-dev = <&devctrl 0x2a0>;
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
diff --git a/Documentation/devicetree/bindings/iommu/qcom,iommu.txt b/Documentation/devicetree/bindings/iommu/qcom,iommu.txt
index b2641ceb2b40..059139abce35 100644
--- a/Documentation/devicetree/bindings/iommu/qcom,iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/qcom,iommu.txt
@@ -115,7 +115,7 @@ to non-secure vs secure interrupt line.
iommus = <&apps_iommu 4>;
};
- gpu@01c00000 {
+ gpu@1c00000 {
...
iommus = <&gpu_iommu 1>, <&gpu_iommu 2>;
};
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
index 3ed027cfca95..857df929a654 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -53,7 +53,7 @@ Example: R8A7791 IPMMU-MX and VSP1-D0 bus master
#iommu-cells = <1>;
};
- vsp1@fe928000 {
+ vsp@fe928000 {
...
iommus = <&ipmmu_mx 13>;
...
diff --git a/Documentation/devicetree/bindings/leds/register-bit-led.txt b/Documentation/devicetree/bindings/leds/register-bit-led.txt
index 59b56365f648..cf1ea403ba7a 100644
--- a/Documentation/devicetree/bindings/leds/register-bit-led.txt
+++ b/Documentation/devicetree/bindings/leds/register-bit-led.txt
@@ -32,7 +32,7 @@ syscon: syscon@10000000 {
compatible = "arm,realview-pb1176-syscon", "syscon";
reg = <0x10000000 0x1000>;
- led@08.0 {
+ led@8.0 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x01>;
@@ -40,7 +40,7 @@ syscon: syscon@10000000 {
linux,default-trigger = "heartbeat";
default-state = "on";
};
- led@08.1 {
+ led@8.1 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x02>;
@@ -48,7 +48,7 @@ syscon: syscon@10000000 {
linux,default-trigger = "mmc0";
default-state = "off";
};
- led@08.2 {
+ led@8.2 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x04>;
@@ -56,35 +56,35 @@ syscon: syscon@10000000 {
linux,default-trigger = "cpu0";
default-state = "off";
};
- led@08.3 {
+ led@8.3 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x08>;
label = "versatile:3";
default-state = "off";
};
- led@08.4 {
+ led@8.4 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x10>;
label = "versatile:4";
default-state = "off";
};
- led@08.5 {
+ led@8.5 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x20>;
label = "versatile:5";
default-state = "off";
};
- led@08.6 {
+ led@8.6 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x40>;
label = "versatile:6";
default-state = "off";
};
- led@08.7 {
+ led@8.7 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x80>;
diff --git a/Documentation/devicetree/bindings/mailbox/ti,message-manager.txt b/Documentation/devicetree/bindings/mailbox/ti,message-manager.txt
index b449d025049f..c3b55b3ede8a 100644
--- a/Documentation/devicetree/bindings/mailbox/ti,message-manager.txt
+++ b/Documentation/devicetree/bindings/mailbox/ti,message-manager.txt
@@ -29,7 +29,7 @@ Required properties:
Example(K2G):
------------
- msgmgr: msgmgr@02a00000 {
+ msgmgr: msgmgr@2a00000 {
compatible = "ti,k2g-message-manager";
#mbox-cells = <2>;
reg-names = "queue_proxy_region", "queue_state_debug_region";
diff --git a/Documentation/devicetree/bindings/marvell.txt b/Documentation/devicetree/bindings/marvell.txt
index ea2b16ced49b..7f722316458a 100644
--- a/Documentation/devicetree/bindings/marvell.txt
+++ b/Documentation/devicetree/bindings/marvell.txt
@@ -446,7 +446,7 @@ prefixed with the string "marvell,", for Marvell Technology Group Ltd.
that services interrupts for this device.
Example Discovery CPU Error node:
- cpu-error@0070 {
+ cpu-error@70 {
compatible = "marvell,mv64360-cpu-error";
reg = <0x70 0x10 0x128 0x28>;
interrupts = <3>;
@@ -466,7 +466,7 @@ prefixed with the string "marvell,", for Marvell Technology Group Ltd.
that services interrupts for this device.
Example Discovery SRAM Controller node:
- sram-ctrl@0380 {
+ sram-ctrl@380 {
compatible = "marvell,mv64360-sram-ctrl";
reg = <0x380 0x80>;
interrupts = <13>;
diff --git a/Documentation/devicetree/bindings/media/cec-gpio.txt b/Documentation/devicetree/bindings/media/cec-gpio.txt
new file mode 100644
index 000000000000..46a0bac8b3b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/cec-gpio.txt
@@ -0,0 +1,32 @@
+* HDMI CEC GPIO driver
+
+The HDMI CEC GPIO module supports CEC implementations where the CEC line
+is hooked up to a pull-up GPIO line and - optionally - the HPD line is
+hooked up to another GPIO line.
+
+Required properties:
+ - compatible: value must be "cec-gpio".
+ - cec-gpios: gpio that the CEC line is connected to. The line should be
+ tagged as open drain.
+
+If the CEC line is associated with an HDMI receiver/transmitter, then the
+following property is also required:
+
+ - hdmi-phandle - phandle to the HDMI controller, see also cec.txt.
+
+If the CEC line is not associated with an HDMI receiver/transmitter, then
+the following property is optional:
+
+ - hpd-gpios: gpio that the HPD line is connected to.
+
+Example for the Raspberry Pi 3 where the CEC line is connected to
+pin 26 aka BCM7 aka CE1 on the GPIO pin header and the HPD line is
+connected to pin 11 aka BCM17:
+
+#include <dt-bindings/gpio/gpio.h>
+
+cec-gpio {
+ compatible = "cec-gpio";
+ cec-gpios = <&gpio 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ hpd-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/media/exynos5-gsc.txt b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
index 26ca25b6d264..0d4fdaedc6f1 100644
--- a/Documentation/devicetree/bindings/media/exynos5-gsc.txt
+++ b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
@@ -3,8 +3,11 @@
G-Scaler is used for scaling and color space conversion on EXYNOS5 SoCs.
Required properties:
-- compatible: should be "samsung,exynos5-gsc" (for Exynos 5250, 5420 and
- 5422 SoCs) or "samsung,exynos5433-gsc" (Exynos 5433)
+- compatible: should be one of
+ "samsung,exynos5250-gsc"
+ "samsung,exynos5420-gsc"
+ "samsung,exynos5433-gsc"
+ "samsung,exynos5-gsc" (deprecated)
- reg: should contain G-Scaler physical address location and length.
- interrupts: should contain G-Scaler interrupt number
@@ -15,7 +18,7 @@ Optional properties:
Example:
gsc_0: gsc@0x13e00000 {
- compatible = "samsung,exynos5-gsc";
+ compatible = "samsung,exynos5250-gsc";
reg = <0x13e00000 0x1000>;
interrupts = <0 85 0>;
};
diff --git a/Documentation/devicetree/bindings/media/i2c/imx274.txt b/Documentation/devicetree/bindings/media/i2c/imx274.txt
new file mode 100644
index 000000000000..80f2e89568e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/imx274.txt
@@ -0,0 +1,33 @@
+* Sony 1/2.5-Inch 8.51Mp CMOS Digital Image Sensor
+
+The Sony imx274 is a 1/2.5-inch CMOS active pixel digital image sensor with
+an active array size of 3864H x 2202V. It is programmable through I2C
+interface. The I2C address is fixed to 0x1a as per sensor data sheet.
+Image data is sent through MIPI CSI-2, which is configured as 4 lanes
+at 1440 Mbps.
+
+
+Required Properties:
+- compatible: value should be "sony,imx274" for imx274 sensor
+- reg: I2C bus address of the device
+
+Optional Properties:
+- reset-gpios: Sensor reset GPIO
+
+The imx274 device node should contain one 'port' child node with
+an 'endpoint' subnode. For further reading on port node refer to
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+ sensor@1a {
+ compatible = "sony,imx274";
+ reg = <0x1a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reset-gpios = <&gpio_sensor 0 0>;
+ port {
+ sensor_out: endpoint {
+ remote-endpoint = <&csiss_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
index 855e1faf73e2..33f10a94c381 100644
--- a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
+++ b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
@@ -27,6 +27,8 @@ Optional properties
- nokia,nvm-size: The size of the NVM, in bytes. If the size is not given,
the NVM contents will not be read.
- reset-gpios: XSHUTDOWN GPIO
+- flash-leds: See ../video-interfaces.txt
+- lens-focus: See ../video-interfaces.txt
Endpoint node mandatory properties
diff --git a/Documentation/devicetree/bindings/media/i2c/tc358743.txt b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
index 5218921629ed..49f8bcc2ea4d 100644
--- a/Documentation/devicetree/bindings/media/i2c/tc358743.txt
+++ b/Documentation/devicetree/bindings/media/i2c/tc358743.txt
@@ -27,7 +27,7 @@ Documentation/devicetree/bindings/media/video-interfaces.txt.
Example:
- tc358743@0f {
+ tc358743@f {
compatible = "toshiba,tc358743";
reg = <0x0f>;
clocks = <&hdmi_osc>;
diff --git a/Documentation/devicetree/bindings/media/img-ir-rev1.txt b/Documentation/devicetree/bindings/media/img-ir-rev1.txt
index 5434ce61b925..ed9ec52b77e0 100644
--- a/Documentation/devicetree/bindings/media/img-ir-rev1.txt
+++ b/Documentation/devicetree/bindings/media/img-ir-rev1.txt
@@ -25,7 +25,7 @@ Optional properties:
Example:
- ir@02006200 {
+ ir@2006200 {
compatible = "img,ir-rev1";
reg = <0x02006200 0x100>;
interrupts = <29 4>;
diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.txt b/Documentation/devicetree/bindings/media/renesas,vsp1.txt
index 9b695bcbf219..16427017cb45 100644
--- a/Documentation/devicetree/bindings/media/renesas,vsp1.txt
+++ b/Documentation/devicetree/bindings/media/renesas,vsp1.txt
@@ -22,7 +22,7 @@ Optional properties:
Example: R8A7790 (R-Car H2) VSP1-S node
- vsp1@fe928000 {
+ vsp@fe928000 {
compatible = "renesas,vsp1";
reg = <0 0xfe928000 0 0x8000>;
interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/media/rockchip-rga.txt b/Documentation/devicetree/bindings/media/rockchip-rga.txt
new file mode 100644
index 000000000000..fd5276abfad6
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/rockchip-rga.txt
@@ -0,0 +1,33 @@
+device-tree bindings for rockchip 2D raster graphic acceleration controller (RGA)
+
+RGA is a standalone 2D raster graphic acceleration unit. It accelerates 2D
+graphics operations, such as point/line drawing, image scaling, rotation,
+BitBLT, alpha blending and image blur/sharpness.
+
+Required properties:
+- compatible: value should be one of the following
+ "rockchip,rk3288-rga";
+ "rockchip,rk3399-rga";
+
+- interrupts: RGA interrupt specifier.
+
+- clocks: phandle to RGA sclk/hclk/aclk clocks
+
+- clock-names: should be "aclk", "hclk" and "sclk"
+
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: should be "core", "axi" and "ahb"
+
+Example:
+SoC-specific DT entry:
+ rga: rga@ff680000 {
+ compatible = "rockchip,rk3399-rga";
+ reg = <0xff680000 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_RGA>, <&cru HCLK_RGA>, <&cru SCLK_RGA_CORE>;
+ clock-names = "aclk", "hclk", "sclk";
+
+ resets = <&cru SRST_RGA_CORE>, <&cru SRST_A_RGA>, <&cru SRST_H_RGA>;
+ reset-names = "core, "axi", "ahb";
+ };
diff --git a/Documentation/devicetree/bindings/media/stih-cec.txt b/Documentation/devicetree/bindings/media/stih-cec.txt
index 8be2a040c6c6..ece0832fdeaf 100644
--- a/Documentation/devicetree/bindings/media/stih-cec.txt
+++ b/Documentation/devicetree/bindings/media/stih-cec.txt
@@ -13,7 +13,7 @@ Required properties:
Example for STIH407:
-sti-cec@094a087c {
+sti-cec@94a087c {
compatible = "st,stih-cec";
reg = <0x94a087c 0x64>;
clocks = <&clk_sysin>;
diff --git a/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt
index 6af3fc210ecc..c7888d6f6408 100644
--- a/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt
+++ b/Documentation/devicetree/bindings/media/stih407-c8sectpfe.txt
@@ -50,7 +50,7 @@ Example:
/* stih410 SoC b2120 + b2004a + stv0367-pll(NIMB) + stv0367-tda18212 (NIMA) DT example) */
- c8sectpfe@08a20000 {
+ c8sectpfe@8a20000 {
compatible = "st,stih407-c8sectpfe";
reg = <0x08a20000 0x10000>, <0x08a00000 0x4000>;
reg-names = "stfe", "stfe-ram";
diff --git a/Documentation/devicetree/bindings/media/sunxi-ir.txt b/Documentation/devicetree/bindings/media/sunxi-ir.txt
index 302a0b183cb8..91648c569b1e 100644
--- a/Documentation/devicetree/bindings/media/sunxi-ir.txt
+++ b/Documentation/devicetree/bindings/media/sunxi-ir.txt
@@ -14,7 +14,7 @@ Optional properties:
Example:
-ir0: ir@01c21800 {
+ir0: ir@1c21800 {
compatible = "allwinner,sun4i-a10-ir";
clocks = <&apb0_gates 6>, <&ir0_clk>;
clock-names = "apb", "ir";
diff --git a/Documentation/devicetree/bindings/media/tango-ir.txt b/Documentation/devicetree/bindings/media/tango-ir.txt
new file mode 100644
index 000000000000..a9f00c2bf897
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/tango-ir.txt
@@ -0,0 +1,21 @@
+Sigma Designs Tango IR NEC/RC-5/RC-6 decoder (SMP86xx and SMP87xx)
+
+Required properties:
+
+- compatible: "sigma,smp8642-ir"
+- reg: address/size of NEC+RC5 area, address/size of RC6 area
+- interrupts: spec for IR IRQ
+- clocks: spec for IR clock (typically the crystal oscillator)
+
+Optional properties:
+
+- linux,rc-map-name: see Documentation/devicetree/bindings/media/rc.txt
+
+Example:
+
+ ir@10518 {
+ compatible = "sigma,smp8642-ir";
+ reg = <0x10518 0x18>, <0x105e0 0x1c>;
+ interrupts = <21 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>;
+ };
diff --git a/Documentation/devicetree/bindings/media/tegra-cec.txt b/Documentation/devicetree/bindings/media/tegra-cec.txt
new file mode 100644
index 000000000000..c503f06f3b84
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/tegra-cec.txt
@@ -0,0 +1,27 @@
+* Tegra HDMI CEC hardware
+
+The HDMI CEC module is present in Tegra SoCs and its purpose is to
+handle communication between HDMI connected devices over the CEC bus.
+
+Required properties:
+ - compatible : value should be one of the following:
+ "nvidia,tegra114-cec"
+ "nvidia,tegra124-cec"
+ "nvidia,tegra210-cec"
+ - reg : Physical base address of the IP registers and length of memory
+ mapped region.
+ - interrupts : HDMI CEC interrupt number to the CPU.
+ - clocks : from common clock binding: handle to HDMI CEC clock.
+ - clock-names : from common clock binding: must contain "cec",
+ corresponding to the entry in the clocks property.
+ - hdmi-phandle : phandle to the HDMI controller, see also cec.txt.
+
+Example:
+
+cec@70015000 {
+ compatible = "nvidia,tegra124-cec";
+ reg = <0x0 0x70015000 0x0 0x00001000>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA124_CLK_CEC>;
+ clock-names = "cec";
+};
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index 852041a7480c..3994b0143dd1 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -55,6 +55,15 @@ divided into two separate ITU-R BT.656 8-bit busses. In such case bus-width
and data-shift properties can be used to assign physical data lines to each
endpoint node (logical bus).
+Documenting bindings for devices
+--------------------------------
+
+All required and optional bindings the device supports shall be explicitly
+documented in device DT binding documentation. This also includes port and
+endpoint nodes for the device, including unit-addresses and reg properties where
+relevant.
+
+Please also see Documentation/devicetree/bindings/graph.txt .
Required properties
-------------------
@@ -67,6 +76,16 @@ are required in a relevant parent node:
identifier, should be 1.
- #size-cells : should be zero.
+
+Optional properties
+-------------------
+
+- flash-leds: An array of phandles, each referring to a flash LED, a sub-node
+ of the LED driver device node.
+
+- lens-focus: A phandle to the node of the focus lens controller.
+
+
Optional endpoint properties
----------------------------
@@ -99,7 +118,10 @@ Optional endpoint properties
determines the logical lane number, while the value of an entry indicates
physical lane, e.g. for 2-lane MIPI CSI-2 bus we could have
"data-lanes = <1 2>;", assuming the clock lane is on hardware lane 0.
- This property is valid for serial busses only (e.g. MIPI CSI-2).
+ If the hardware does not support lane reordering, monotonically
+ incremented values shall be used from 0 or 1 onwards, depending on
+ whether or not there is also a clock lane. This property is valid for
+ serial busses only (e.g. MIPI CSI-2).
- clock-lanes: an array of physical clock lane indexes. Position of an entry
determines the logical lane number, while the value of an entry indicates
physical lane, e.g. for a MIPI CSI-2 bus we could have "clock-lanes = <0>;",
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index b37bdde5cfda..bdd017686ea5 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -65,45 +65,6 @@ Optional properties:
a value that is out of range for a 16 bit register then the chip default
will be used. If present exactly five values must be specified.
- - wlf,inmode : A list of INn_MODE register values, where n is the number
- of input signals. Valid values are 0 (Differential), 1 (Single-ended) and
- 2 (Digital Microphone). If absent, INn_MODE registers set to 0 by default.
- If present, values must be specified less than or equal to the number of
- input signals. If values less than the number of input signals, elements
- that have not been specified are set to 0 by default. Entries are:
- <IN1, IN2, IN3, IN4> (wm5102, wm5110, wm8280, wm8997)
- <IN1A, IN2A, IN1B, IN2B> (wm8998, wm1814)
- - wlf,out-mono : A list of boolean values indicating whether each output is
- mono or stereo. Position within the list indicates the output affected
- (eg. First entry in the list corresponds to output 1). A non-zero value
- indicates a mono output. If present, the number of values should be less
- than or equal to the number of outputs, if less values are supplied the
- additional outputs will be treated as stereo.
-
- - wlf,dmic-ref : DMIC reference voltage source for each input, can be
- selected from either MICVDD or one of the MICBIAS's, defines
- (ARIZONA_DMIC_xxxx) are provided in <dt-bindings/mfd/arizona.txt>. If
- present, the number of values should be less than or equal to the
- number of inputs, unspecified inputs will use the chip default.
-
- - wlf,max-channels-clocked : The maximum number of channels to be clocked on
- each AIF, useful for I2S systems with multiple data lines being mastered.
- Specify one cell for each AIF to be configured, specify zero for AIFs that
- should be handled normally.
- If present, number of cells must be less than or equal to the number of
- AIFs. If less than the number of AIFs, for cells that have not been
- specified the corresponding AIFs will be treated as default setting.
-
- - wlf,spk-fmt : PDM speaker data format, must contain 2 cells (OUT5 and OUT6).
- See the datasheet for values.
- The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
- wm8998, wm1814)
-
- - wlf,spk-mute : PDM speaker mute setting, must contain 2 cells (OUT5 and OUT6).
- See the datasheet for values.
- The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
- wm8998, wm1814)
-
- DCVDD-supply, MICVDD-supply : Power supplies, only need to be specified if
they are being externally supplied. As covered in
Documentation/devicetree/bindings/regulator/regulator.txt
@@ -112,6 +73,7 @@ Optional properties:
Also see child specific device properties:
Regulator - ../regulator/arizona-regulator.txt
Extcon - ../extcon/extcon-arizona.txt
+ Sound - ../sound/arizona.txt
Example:
diff --git a/Documentation/devicetree/bindings/mfd/aspeed-scu.txt b/Documentation/devicetree/bindings/mfd/aspeed-scu.txt
index 4fc5b83726d6..ce8cf0ec6279 100644
--- a/Documentation/devicetree/bindings/mfd/aspeed-scu.txt
+++ b/Documentation/devicetree/bindings/mfd/aspeed-scu.txt
@@ -9,10 +9,16 @@ Required properties:
"aspeed,g5-scu", "syscon", "simple-mfd"
- reg: contains the offset and length of the SCU memory region
+- #clock-cells: should be set to <1> - the system controller is also a
+ clock provider
+- #reset-cells: should be set to <1> - the system controller is also a
+ reset line provider
Example:
syscon: syscon@1e6e2000 {
compatible = "aspeed,ast2400-scu", "syscon", "simple-mfd";
reg = <0x1e6e2000 0x1a8>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt b/Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt
new file mode 100644
index 000000000000..82f82e069563
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/brcm,iproc-cdru.txt
@@ -0,0 +1,16 @@
+Broadcom iProc Chip Device Resource Unit (CDRU)
+
+Various Broadcom iProc SoCs have a set of registers that provide various
+chip specific device and resource configurations. This node allows access to
+these CDRU registers via syscon.
+
+Required properties:
+- compatible: should contain:
+ "brcm,sr-cdru", "syscon" for Stingray
+- reg: base address and range of the CDRU registers
+
+Example:
+ cdru: syscon@6641d000 {
+ compatible = "brcm,sr-cdru", "syscon";
+ reg = <0 0x6641d000 0 0x400>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt b/Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt
new file mode 100644
index 000000000000..4421e9771b8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/brcm,iproc-mhb.txt
@@ -0,0 +1,18 @@
+Broadcom iProc Multi Host Bridge (MHB)
+
+Certain Broadcom iProc SoCs have a multi host bridge (MHB) block that controls
+the connection and configuration of 1) internal PCIe serdes; 2) PCIe endpoint
+interface; 3) access to the Nitro (network processing) engine
+
+This node allows access to these MHB registers via syscon.
+
+Required properties:
+- compatible: should contain:
+ "brcm,sr-mhb", "syscon" for Stingray
+- reg: base address and range of the MHB registers
+
+Example:
+ mhb: syscon@60401000 {
+ compatible = "brcm,sr-mhb", "syscon";
+ reg = <0 0x60401000 0 0x38c>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 741e76688cf2..0f2587fa42cb 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -19,7 +19,7 @@ Required properties:
Example:
- max77686: pmic@09 {
+ max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&wakeup_eint>;
interrupts = <26 0>;
diff --git a/Documentation/devicetree/bindings/mfd/max77693.txt b/Documentation/devicetree/bindings/mfd/max77693.txt
index 6a1ae3a2b77f..e6754974a745 100644
--- a/Documentation/devicetree/bindings/mfd/max77693.txt
+++ b/Documentation/devicetree/bindings/mfd/max77693.txt
@@ -127,6 +127,12 @@ Required properties for the LED child node:
Optional properties for the LED child node:
- label : see Documentation/devicetree/bindings/leds/common.txt
+Optional nodes:
+- max77693-muic :
+ Node used only by extcon consumers.
+ Required properties:
+ - compatible : "maxim,max77693-muic"
+
Example:
#include <dt-bindings/leds/common.h>
diff --git a/Documentation/devicetree/bindings/mfd/max77802.txt b/Documentation/devicetree/bindings/mfd/max77802.txt
index 51fc1a60caa5..f2f3fe75901c 100644
--- a/Documentation/devicetree/bindings/mfd/max77802.txt
+++ b/Documentation/devicetree/bindings/mfd/max77802.txt
@@ -18,7 +18,7 @@ Required properties:
Example:
- max77802: pmic@09 {
+ max77802: pmic@9 {
compatible = "maxim,max77802";
interrupt-parent = <&intc>;
interrupts = <26 IRQ_TYPE_NONE>;
diff --git a/Documentation/devicetree/bindings/mfd/mc13xxx.txt b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
index 39ba4146769d..ac235fe385fc 100644
--- a/Documentation/devicetree/bindings/mfd/mc13xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
@@ -113,7 +113,6 @@ MC13892 regulators:
Examples:
ecspi@70010000 { /* ECSPI1 */
- fsl,spi-num-chipselects = <2>;
cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
<&gpio4 25 0>; /* GPIO4_25 */
diff --git a/Documentation/devicetree/bindings/mfd/mfd.txt b/Documentation/devicetree/bindings/mfd/mfd.txt
index bcb6abb9d413..336c0495c8a3 100644
--- a/Documentation/devicetree/bindings/mfd/mfd.txt
+++ b/Documentation/devicetree/bindings/mfd/mfd.txt
@@ -41,7 +41,7 @@ foo@1000 {
compatible = "syscon", "simple-mfd";
reg = <0x01000 0x1000>;
- led@08.0 {
+ led@8.0 {
compatible = "register-bit-led";
offset = <0x08>;
mask = <0x01>;
diff --git a/Documentation/devicetree/bindings/mfd/sprd,sc27xx-pmic.txt b/Documentation/devicetree/bindings/mfd/sprd,sc27xx-pmic.txt
new file mode 100644
index 000000000000..21b9a897fca5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/sprd,sc27xx-pmic.txt
@@ -0,0 +1,40 @@
+Spreadtrum SC27xx Power Management Integrated Circuit (PMIC)
+
+The Spreadtrum SC27xx series PMICs contain SC2720, SC2721, SC2723, SC2730
+and SC2731. The Spreadtrum PMIC belonging to SC27xx series integrates all
+mobile handset power management, audio codec, battery management and user
+interface support function in a single chip. It has 6 major functional
+blocks:
+- DCDCs to support CPU, memory.
+- LDOs to support both internal and external requirement.
+- Battery management system, such as charger, fuel gauge.
+- Audio codec.
+- User interface function, such as indicator, flash LED and so on.
+- IC level interface, such as power on/off control, RTC and typec and so on.
+
+Required properties:
+- compatible: Should be one of the following:
+ "sprd,sc2720"
+ "sprd,sc2721"
+ "sprd,sc2723"
+ "sprd,sc2730"
+ "sprd,sc2731"
+- reg: The address of the device chip select, should be 0.
+- spi-max-frequency: Typically set to 26000000.
+- interrupts: The interrupt line the device is connected to.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: The number of cells to describe an PMIC IRQ, must be 2.
+- #address-cells: Child device offset number of cells, must be 1.
+- #size-cells: Child device size number of cells, must be 0.
+
+Example:
+pmic@0 {
+ compatible = "sprd,sc2731";
+ reg = <0>;
+ spi-max-frequency = <26000000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/mfd/sun4i-gpadc.txt b/Documentation/devicetree/bindings/mfd/sun4i-gpadc.txt
index badff3611a98..86dd8191b04c 100644
--- a/Documentation/devicetree/bindings/mfd/sun4i-gpadc.txt
+++ b/Documentation/devicetree/bindings/mfd/sun4i-gpadc.txt
@@ -10,7 +10,7 @@ Required properties:
- #io-channel-cells: shall be 0,
Example:
- ths: ths@01c25000 {
+ ths: ths@1c25000 {
compatible = "allwinner,sun8i-a33-ths";
reg = <0x01c25000 0x100>;
#thermal-sensor-cells = <0>;
@@ -47,7 +47,7 @@ Optional properties:
Example:
- rtp: rtp@01c25000 {
+ rtp: rtp@1c25000 {
compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
diff --git a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt
index 03c5a551da55..dd2c06540485 100644
--- a/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt
+++ b/Documentation/devicetree/bindings/mfd/sun6i-prcm.txt
@@ -15,7 +15,7 @@ The prcm node may contain several subdevices definitions:
Example:
- prcm: prcm@01f01400 {
+ prcm: prcm@1f01400 {
compatible = "allwinner,sun6i-a31-prcm";
reg = <0x01f01400 0x200>;
diff --git a/Documentation/devicetree/bindings/mfd/syscon.txt b/Documentation/devicetree/bindings/mfd/syscon.txt
index 408f768686f1..8b92d4576c42 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.txt
+++ b/Documentation/devicetree/bindings/mfd/syscon.txt
@@ -18,7 +18,7 @@ Optional property:
performed on the device.
Examples:
-gpr: iomuxc-gpr@020e0000 {
+gpr: iomuxc-gpr@20e0000 {
compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
reg = <0x020e0000 0x38>;
};
diff --git a/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt b/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt
new file mode 100644
index 000000000000..8765c605e6bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.txt
@@ -0,0 +1,54 @@
+* Amlogic Meson6, Meson8 and Meson8b SDIO/MMC controller
+
+The highspeed MMC host controller on Amlogic SoCs provides an interface
+for MMC, SD, SDIO and SDHC types of memory cards.
+
+Supported maximum speeds are the ones of the eMMC standard 4.41 as well
+as the speed of SD standard 2.0.
+
+The hardware provides an internal "mux" which allows up to three slots
+to be controlled. Only one slot can be accessed at a time.
+
+Required properties:
+ - compatible : must be one of
+ - "amlogic,meson8-sdio"
+ - "amlogic,meson8b-sdio"
+ along with the generic "amlogic,meson-mx-sdio"
+ - reg : mmc controller base registers
+ - interrupts : mmc controller interrupt
+ - #address-cells : must be 1
+ - size-cells : must be 0
+ - clocks : phandle to clock providers
+ - clock-names : must contain "core" and "clkin"
+
+Required child nodes:
+A node for each slot provided by the MMC controller is required.
+NOTE: due to a driver limitation currently only one slot (= child node)
+ is supported!
+
+Required properties on each child node (= slot):
+ - compatible : must be "mmc-slot" (see mmc.txt within this directory)
+ - reg : the slot (or "port") ID
+
+Optional properties on each child node (= slot):
+ - bus-width : must be 1 or 4 (8-bit bus is not supported)
+ - for cd and all other additional generic mmc parameters
+ please refer to mmc.txt within this directory
+
+Examples:
+ mmc@c1108c20 {
+ compatible = "amlogic,meson8-sdio", "amlogic,meson-mx-sdio";
+ reg = <0xc1108c20 0x20>;
+ interrupts = <0 28 1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkc CLKID_SDIO>, <&clkc CLKID_CLK81>;
+ clock-names = "core", "clkin";
+
+ slot@1 {
+ compatible = "mmc-slot";
+ reg = <1>;
+
+ bus-width = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index b32ade645ad9..fb11ae8b3b72 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -53,6 +53,9 @@ Optional properties:
- no-sdio: controller is limited to send sdio cmd during initialization
- no-sd: controller is limited to send sd cmd during initialization
- no-mmc: controller is limited to send mmc cmd during initialization
+- fixed-emmc-driver-type: for non-removable eMMC, enforce this driver type.
+ The value <n> is the driver type as specified in the eMMC specification
+ (table 206 in spec version 5.1).
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
polarity properties, we have to fix the meaning of the "normal" and "inverted"
@@ -143,7 +146,7 @@ sdhci@ab000000 {
Example with sdio function subnode:
-mmc3: mmc@01c12000 {
+mmc3: mmc@1c12000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index 4182ea36ca5b..72d2a734ab85 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -7,10 +7,18 @@ This file documents differences between the core properties in mmc.txt
and the properties used by the msdc driver.
Required properties:
-- compatible: Should be "mediatek,mt8173-mmc","mediatek,mt8135-mmc"
+- compatible: value should be either of the following.
+ "mediatek,mt8135-mmc": for mmc host ip compatible with mt8135
+ "mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
+ "mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
+ "mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
+- reg: physical base address of the controller and length
- interrupts: Should contain MSDC interrupt number
-- clocks: MSDC source clock, HCLK
-- clock-names: "source", "hclk"
+- clocks: Should contain phandle for the clock feeding the MMC controller
+- clock-names: Should contain the following:
+ "source" - source clock (required)
+ "hclk" - HCLK which used for host (required)
+ "source_cg" - independent source clock gate (required for MT2712)
- pinctrl-names: should be "default", "state_uhs"
- pinctrl-0: should contain default/high speed pin ctrl
- pinctrl-1: should contain uhs mode pin ctrl
@@ -30,6 +38,10 @@ Optional properties:
- mediatek,hs400-cmd-resp-sel-rising: HS400 command response sample selection
If present,HS400 command responses are sampled on rising edges.
If not present,HS400 command responses are sampled on falling edges.
+- mediatek,latch-ck: Some SoCs do not support enhance_rx, need set correct latch-ck to avoid data crc
+ error caused by stop clock(fifo full)
+ Valid range = [0:0x7]. if not present, default value is 0.
+ applied to compatible "mediatek,mt2701-mmc".
Examples:
mmc0: mmc@11230000 {
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt b/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
index de2c53cff4f1..3ee9263adf73 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-fujitsu.txt
@@ -15,6 +15,8 @@ Required properties:
Optional properties:
- vqmmc-supply: phandle to the regulator device tree node, mentioned
as the VCCQ/VDD_IO supply in the eMMC/SD specs.
+- fujitsu,cmd-dat-delay-select: boolean property indicating that this host
+ requires the CMD_DAT_DELAY control to be enabled.
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 0576264eab5e..bfdcdc4ccdff 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -18,6 +18,8 @@ Required properties:
"core" - SDC MMC clock (MCLK) (required)
"bus" - SDCC bus voter clock (optional)
"xo" - TCXO clock (optional)
+ "cal" - reference clock for RCLK delay calibration (optional)
+ "sleep" - sleep clock for RCLK delay calibration (optional)
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-omap.txt b/Documentation/devicetree/bindings/mmc/sdhci-omap.txt
new file mode 100644
index 000000000000..51775a372c06
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-omap.txt
@@ -0,0 +1,16 @@
+* TI OMAP SDHCI Controller
+
+Refer to mmc.txt for standard MMC bindings.
+
+Required properties:
+- compatible: Should be "ti,dra7-sdhci" for DRA7 and DRA72 controllers
+- ti,hwmods: Must be "mmc<n>", <n> is controller instance starting 1
+
+Example:
+ mmc1: mmc@4809c000 {
+ compatible = "ti,dra7-sdhci";
+ reg = <0x4809c000 0x400>;
+ ti,hwmods = "mmc1";
+ bus-width = <4>;
+ vmmc-supply = <&vmmc>; /* phandle to regulator node */
+ };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-st.txt b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
index e35645598315..6b3d40ca395e 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-st.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
@@ -74,7 +74,7 @@ mmc0: sdhci@fe81e000 {
/* Example SD stih407 family configuration */
-mmc1: sdhci@09080000 {
+mmc1: sdhci@9080000 {
compatible = "st,sdhci-stih407", "st,sdhci";
reg = <0x09080000 0x7ff>;
reg-names = "mmc";
@@ -90,7 +90,7 @@ mmc1: sdhci@09080000 {
/* Example eMMC stih407 family configuration */
-mmc0: sdhci@09060000 {
+mmc0: sdhci@9060000 {
compatible = "st,sdhci-stih407", "st,sdhci";
reg = <0x09060000 0x7ff>, <0x9061008 0x20>;
reg-names = "mmc", "top-mmc-delay";
diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
index 63b57e2a10fb..132e0007d7d6 100644
--- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
@@ -29,7 +29,7 @@ Optional properties:
Examples:
- Within .dtsi:
- mmc0: mmc@01c0f000 {
+ mmc0: mmc@1c0f000 {
compatible = "allwinner,sun5i-a13-mmc";
reg = <0x01c0f000 0x1000>;
clocks = <&ahb_gates 8>, <&mmc0_clk>, <&mmc0_output_clk>, <&mmc0_sample_clk>;
@@ -39,7 +39,7 @@ Examples:
};
- Within dts:
- mmc0: mmc@01c0f000 {
+ mmc0: mmc@1c0f000 {
pinctrl-names = "default", "default";
pinctrl-0 = <&mmc0_pins_a>;
pinctrl-1 = <&mmc0_cd_pin_reference_design>;
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 54ef642f23a0..3c6762430fd9 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -10,7 +10,7 @@ described in mmc.txt, can be used. Additionally the following tmio_mmc-specific
optional bindings can be used.
Required properties:
-- compatible: "renesas,sdhi-shmobile" - a generic sh-mobile SDHI unit
+- compatible: should contain one or more of the following:
"renesas,sdhi-sh73a0" - SDHI IP on SH73A0 SoC
"renesas,sdhi-r7s72100" - SDHI IP on R7S72100 SoC
"renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
@@ -26,6 +26,16 @@ Required properties:
"renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
"renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
"renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
+ "renesas,sdhi-shmobile" - a generic sh-mobile SDHI controller
+ "renesas,rcar-gen1-sdhi" - a generic R-Car Gen1 SDHI controller
+ "renesas,rcar-gen2-sdhi" - a generic R-Car Gen2 or RZ/G1
+ SDHI controller
+ "renesas,rcar-gen3-sdhi" - a generic R-Car Gen3 SDHI controller
+
+
+ When compatible with the generic version, nodes must list
+ the SoC-specific version corresponding to the platform
+ first followed by the generic version.
- clocks: Most controllers only have 1 clock source per channel. However, on
some variations of this controller, the internal card detection
@@ -43,3 +53,61 @@ Optional properties:
- pinctrl-names: should be "default", "state_uhs"
- pinctrl-0: should contain default/high speed pin ctrl
- pinctrl-1: should contain uhs mode pin ctrl
+
+Example: R8A7790 (R-Car H2) SDHI controller nodes
+
+ sdhi0: sd@ee100000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee100000 0 0x328>;
+ interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 314>;
+ dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
+ <&dmac1 0xcd>, <&dmac1 0xce>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <195000000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 314>;
+ status = "disabled";
+ };
+
+ sdhi1: sd@ee120000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee120000 0 0x328>;
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 313>;
+ dmas = <&dmac0 0xc9>, <&dmac0 0xca>,
+ <&dmac1 0xc9>, <&dmac1 0xca>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <195000000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 313>;
+ status = "disabled";
+ };
+
+ sdhi2: sd@ee140000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee140000 0 0x100>;
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 312>;
+ dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
+ <&dmac1 0xc1>, <&dmac1 0xc2>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <97500000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 312>;
+ status = "disabled";
+ };
+
+ sdhi3: sd@ee160000 {
+ compatible = "renesas,sdhi-r8a7790", "renesas,rcar-gen2-sdhi";
+ reg = <0 0xee160000 0 0x100>;
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 311>;
+ dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
+ <&dmac1 0xd3>, <&dmac1 0xd4>;
+ dma-names = "tx", "rx", "tx", "rx";
+ max-frequency = <97500000>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 311>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
index a37c67bcb43b..5e13a5cdff03 100644
--- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
@@ -31,7 +31,7 @@ see Documentation/devicetree/bindings/mtd/nand.txt for generic bindings.
Examples:
-nfc: nand@01c03000 {
+nfc: nand@1c03000 {
compatible = "allwinner,sun4i-a10-nand";
reg = <0x01c03000 0x1000>;
interrupts = <0 37 1>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index 10640b17c866..e98118aef5f6 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -10,7 +10,7 @@ Required properties:
Example:
-emac: ethernet@01c0b000 {
+emac: ethernet@1c0b000 {
compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <55>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
index 4ec56413779d..ab5b8613b0ef 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
@@ -9,7 +9,7 @@ Optional properties:
- phy-supply: phandle to a regulator if the PHY needs one
Example at the SoC level:
-mdio@01c0b080 {
+mdio@1c0b080 {
compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
#address-cells = <1>;
@@ -18,7 +18,7 @@ mdio@01c0b080 {
And at the board level:
-mdio@01c0b080 {
+mdio@1c0b080 {
phy-supply = <&reg_emac_3v3>;
phy0: ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.txt b/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.txt
index ea4d752389a2..8b3f953656e3 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.txt
@@ -15,7 +15,7 @@ Optional properties:
Examples:
- gmac: ethernet@01c50000 {
+ gmac: ethernet@1c50000 {
compatible = "allwinner,sun7i-a20-gmac";
reg = <0x01c50000 0x10000>,
<0x01c20164 0x4>;
diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
index 26c77d985faf..3956af1d30f3 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
@@ -109,7 +109,7 @@ ethernet@f0ba0000 {
reg = <0xf0ba0000 0xfc4c>;
interrupts = <0x0 0x18 0x0>, <0x0 0x19 0x0>;
- mdio@0e14 {
+ mdio@e14 {
compatible = "brcm,genet-mdio-v4";
#address-cells = <0x1>;
#size-cells = <0x0>;
diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt
index 78138333ff7a..63e90421d029 100644
--- a/Documentation/devicetree/bindings/net/can/m_can.txt
+++ b/Documentation/devicetree/bindings/net/can/m_can.txt
@@ -45,7 +45,7 @@ Required properties:
Example:
SoC dtsi:
-m_can1: can@020e8000 {
+m_can1: can@20e8000 {
compatible = "bosch,m_can";
reg = <0x020e8000 0x4000>, <0x02298000 0x4000>;
reg-names = "m_can", "message_ram";
diff --git a/Documentation/devicetree/bindings/net/can/sun4i_can.txt b/Documentation/devicetree/bindings/net/can/sun4i_can.txt
index 84ed1909df76..f69845e6feaf 100644
--- a/Documentation/devicetree/bindings/net/can/sun4i_can.txt
+++ b/Documentation/devicetree/bindings/net/can/sun4i_can.txt
@@ -19,7 +19,7 @@ SoC common .dtsi file:
allwinner,pull = <0>;
};
...
- can0: can@01c2bc00 {
+ can0: can@1c2bc00 {
compatible = "allwinner,sun4i-a10-can";
reg = <0x01c2bc00 0x400>;
interrupts = <0 26 4>;
@@ -29,7 +29,7 @@ SoC common .dtsi file:
Board specific .dts file:
- can0: can@01c2bc00 {
+ can0: can@1c2bc00 {
pinctrl-names = "default";
pinctrl-0 = <&can0_pins_a>;
status = "okay";
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
index b2bd4704f859..86602f264dce 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
@@ -20,7 +20,7 @@ Optional properties:
Example:
-mmc3: mmc@01c12000 {
+mmc3: mmc@1c12000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
index ef06d061913c..d69543701d5d 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
@@ -5,6 +5,7 @@ Required properties:
"allwinner,sun4i-a10-sid"
"allwinner,sun7i-a20-sid"
"allwinner,sun8i-h3-sid"
+ "allwinner,sun50i-a64-sid"
- reg: Should contain registers location and length
@@ -13,13 +14,13 @@ Are child nodes of qfprom, bindings of which as described in
bindings/nvmem/nvmem.txt
Example for sun4i:
- sid@01c23800 {
+ sid@1c23800 {
compatible = "allwinner,sun4i-a10-sid";
reg = <0x01c23800 0x10>
};
Example for sun7i:
- sid@01c23800 {
+ sid@1c23800 {
compatible = "allwinner,sun7i-a20-sid";
reg = <0x01c23800 0x200>
};
diff --git a/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt b/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
index fafd85bd67a6..e3298e18de26 100644
--- a/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/amlogic-efuse.txt
@@ -1,4 +1,4 @@
-= Amlogic eFuse device tree bindings =
+= Amlogic Meson GX eFuse device tree bindings =
Required properties:
- compatible: should be "amlogic,meson-gxbb-efuse"
diff --git a/Documentation/devicetree/bindings/nvmem/amlogic-meson-mx-efuse.txt b/Documentation/devicetree/bindings/nvmem/amlogic-meson-mx-efuse.txt
new file mode 100644
index 000000000000..a3c63954a1a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/amlogic-meson-mx-efuse.txt
@@ -0,0 +1,22 @@
+Amlogic Meson6/Meson8/Meson8b efuse
+
+Required Properties:
+- compatible: depending on the SoC this should be one of:
+ - "amlogic,meson6-efuse"
+ - "amlogic,meson8-efuse"
+ - "amlogic,meson8b-efuse"
+- reg: base address and size of the efuse registers
+- clocks: a reference to the efuse core gate clock
+- clock-names: must be "core"
+
+All properties and sub-nodes as well as the consumer bindings
+defined in nvmem.txt in this directory are also supported.
+
+
+Example:
+ efuse: nvmem@0 {
+ compatible = "amlogic,meson8-efuse";
+ reg = <0x0 0x2000>;
+ clocks = <&clkc CLKID_EFUSE>;
+ clock-names = "core";
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
index 6462e12d8de6..0415265c215a 100644
--- a/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
@@ -10,7 +10,7 @@ Required Properties:
Example:
-otp: otp@0301c800 {
+otp: otp@301c800 {
compatible = "brcm,ocotp";
reg = <0x0301c800 0x2c>;
brcm,ocotp-size = <2048>;
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 70d791b03ea1..f162c72b4e36 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -19,7 +19,7 @@ Optional properties:
Example:
- ocotp: ocotp@021bc000 {
+ ocotp: ocotp@21bc000 {
compatible = "fsl,imx6q-ocotp", "syscon";
reg = <0x021bc000 0x4000>;
clocks = <&clks IMX6QDL_CLK_IIM>;
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.txt b/Documentation/devicetree/bindings/nvmem/nvmem.txt
index b52bc11e9597..fd06c09b822b 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.txt
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.txt
@@ -33,7 +33,7 @@ bits: Is pair of bit location and number of bits, which specifies offset
For example:
/* Provider */
- qfprom: qfprom@00700000 {
+ qfprom: qfprom@700000 {
...
/* Data cells */
diff --git a/Documentation/devicetree/bindings/nvmem/qfprom.txt b/Documentation/devicetree/bindings/nvmem/qfprom.txt
index 4ad68b7f5c18..26fe878d5c86 100644
--- a/Documentation/devicetree/bindings/nvmem/qfprom.txt
+++ b/Documentation/devicetree/bindings/nvmem/qfprom.txt
@@ -12,7 +12,7 @@ bindings/nvmem/nvmem.txt
Example:
- qfprom: qfprom@00700000 {
+ qfprom: qfprom@700000 {
compatible = "qcom,qfprom";
reg = <0x00700000 0x8000>;
...
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
index 1ff02afdc55a..60bec4782806 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
@@ -6,6 +6,7 @@ Required properties:
- "rockchip,rk3188-efuse" - for RK3188 SoCs.
- "rockchip,rk3228-efuse" - for RK3228 SoCs.
- "rockchip,rk3288-efuse" - for RK3288 SoCs.
+ - "rockchip,rk3368-efuse" - for RK3368 SoCs.
- "rockchip,rk3399-efuse" - for RK3399 SoCs.
- reg: Should contain the registers location and exact eFuse size
- clocks: Should be the clock id of eFuse
diff --git a/Documentation/devicetree/bindings/nvmem/snvs-lpgpr.txt b/Documentation/devicetree/bindings/nvmem/snvs-lpgpr.txt
new file mode 100644
index 000000000000..20bc49b49799
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/snvs-lpgpr.txt
@@ -0,0 +1,20 @@
+Device tree bindings for Low Power General Purpose Register found in i.MX6Q/D
+Secure Non-Volatile Storage.
+
+This DT node should be represented as a sub-node of a "syscon",
+"simple-mfd" node.
+
+Required properties:
+- compatible: should be one of the fallowing variants:
+ "fsl,imx6q-snvs-lpgpr" for Freescale i.MX6Q/D/DL/S
+ "fsl,imx6ul-snvs-lpgpr" for Freescale i.MX6UL
+
+Example:
+snvs: snvs@020cc000 {
+ compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+ reg = <0x020cc000 0x4000>;
+
+ snvs_lpgpr: snvs-lpgpr {
+ compatible = "fsl,imx6q-snvs-lpgpr";
+ };
+};
diff --git a/Documentation/devicetree/bindings/nvmem/uniphier-efuse.txt b/Documentation/devicetree/bindings/nvmem/uniphier-efuse.txt
new file mode 100644
index 000000000000..eccf490d5a6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/uniphier-efuse.txt
@@ -0,0 +1,49 @@
+= UniPhier eFuse device tree bindings =
+
+This UniPhier eFuse must be under soc-glue.
+
+Required properties:
+- compatible: should be "socionext,uniphier-efuse"
+- reg: should contain the register location and length
+
+= Data cells =
+Are child nodes of efuse, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+ soc-glue@5f900000 {
+ compatible = "socionext,uniphier-ld20-soc-glue-debug",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x5f900000 0x2000>;
+
+ efuse@100 {
+ compatible = "socionext,uniphier-efuse";
+ reg = <0x100 0x28>;
+ };
+
+ efuse@200 {
+ compatible = "socionext,uniphier-efuse";
+ reg = <0x200 0x68>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* Data cells */
+ usb_mon: usb-mon@54 {
+ reg = <0x54 0xc>;
+ };
+ };
+ };
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+Example:
+
+ usb {
+ ...
+ nvmem-cells = <&usb_mon>;
+ nvmem-cell-names = "usb_mon";
+ }
diff --git a/Documentation/devicetree/bindings/openrisc/opencores/or1ksim.txt b/Documentation/devicetree/bindings/openrisc/opencores/or1ksim.txt
new file mode 100644
index 000000000000..4950c794ecbb
--- /dev/null
+++ b/Documentation/devicetree/bindings/openrisc/opencores/or1ksim.txt
@@ -0,0 +1,39 @@
+OpenRISC Generic SoC
+====================
+
+Boards and FPGA SoC's which support the OpenRISC standard platform. The
+platform essentially follows the conventions of the OpenRISC architecture
+specification, however some aspects, such as the boot protocol have been defined
+by the Linux port.
+
+Required properties
+-------------------
+ - compatible: Must include "opencores,or1ksim"
+
+CPU nodes:
+----------
+A "cpus" node is required. Required properties:
+ - #address-cells: Must be 1.
+ - #size-cells: Must be 0.
+A CPU sub-node is also required for at least CPU 0. Since the topology may
+be probed via CPS, it is not necessary to specify secondary CPUs. Required
+properties:
+ - compatible: Must be "opencores,or1200-rtlsvn481".
+ - reg: CPU number.
+ - clock-frequency: The CPU clock frequency in Hz.
+Example:
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "opencores,or1200-rtlsvn481";
+ reg = <0>;
+ clock-frequency = <20000000>;
+ };
+ };
+
+
+Boot protocol
+-------------
+The bootloader may pass the following arguments to the kernel:
+ - r3: address of a flattened device-tree blob or 0x0.
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie-ecam.txt b/Documentation/devicetree/bindings/pci/designware-pcie-ecam.txt
new file mode 100644
index 000000000000..515b2f9542e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/designware-pcie-ecam.txt
@@ -0,0 +1,42 @@
+* Synopsys DesignWare PCIe root complex in ECAM shift mode
+
+In some cases, firmware may already have configured the Synopsys DesignWare
+PCIe controller in RC mode with static ATU window mappings that cover all
+config, MMIO and I/O spaces in a [mostly] ECAM compatible fashion.
+In this case, there is no need for the OS to perform any low level setup
+of clocks, PHYs or device registers, nor is there any reason for the driver
+to reconfigure ATU windows for config and/or IO space accesses at runtime.
+
+In cases where the IP was synthesized with a minimum ATU window size of
+64 KB, it cannot be supported by the generic ECAM driver, because it
+requires special config space accessors that filter accesses to device #1
+and beyond on the first bus.
+
+Required properties:
+- compatible: "marvell,armada8k-pcie-ecam" or
+ "socionext,synquacer-pcie-ecam" or
+ "snps,dw-pcie-ecam" (must be preceded by a more specific match)
+
+Please refer to the binding document of "pci-host-ecam-generic" in the
+file host-generic-pci.txt for a description of the remaining required
+and optional properties.
+
+Example:
+
+ pcie1: pcie@7f000000 {
+ compatible = "socionext,synquacer-pcie-ecam", "snps,dw-pcie-ecam";
+ device_type = "pci";
+ reg = <0x0 0x7f000000 0x0 0xf00000>;
+ bus-range = <0x0 0xe>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x1000000 0x00 0x00010000 0x00 0x7ff00000 0x0 0x00010000>,
+ <0x2000000 0x00 0x70000000 0x00 0x70000000 0x0 0x0f000000>,
+ <0x3000000 0x3f 0x00000000 0x3f 0x00000000 0x1 0x00000000>;
+
+ #interrupt-cells = <0x1>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x0>;
+ interrupt-map = <0x0 0x0 0x0 0x0 &gic 0x0 0x0 0x0 182 0x4>;
+ msi-map = <0x0 &its 0x0 0x10000>;
+ dma-coherent;
+ };
diff --git a/Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt b/Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
new file mode 100644
index 000000000000..c84bc027930b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
@@ -0,0 +1,68 @@
+HiSilicon STB PCIe host bridge DT description
+
+The HiSilicon STB PCIe host controller is based on the DesignWare PCIe core.
+It shares common functions with the DesignWare PCIe core driver and inherits
+common properties defined in
+Documentation/devicetree/bindings/pci/designware-pcie.txt.
+
+Additional properties are described here:
+
+Required properties
+- compatible: Should be one of the following strings:
+ "hisilicon,hi3798cv200-pcie"
+- reg: Should contain sysctl, rc_dbi, config registers location and length.
+- reg-names: Must include the following entries:
+ "control": control registers of PCIe controller;
+ "rc-dbi": configuration space of PCIe controller;
+ "config": configuration transaction space of PCIe controller.
+- bus-range: PCI bus numbers covered.
+- interrupts: MSI interrupt.
+- interrupt-names: Must include "msi" entries.
+- clocks: List of phandle and clock specifier pairs as listed in clock-names
+ property.
+- clock-name: Must include the following entries:
+ "aux": auxiliary gate clock;
+ "pipe": pipe gate clock;
+ "sys": sys gate clock;
+ "bus": bus gate clock.
+- resets: List of phandle and reset specifier pairs as listed in reset-names
+ property.
+- reset-names: Must include the following entries:
+ "soft": soft reset;
+ "sys": sys reset;
+ "bus": bus reset.
+
+Optional properties:
+- reset-gpios: The gpio to generate PCIe PERST# assert and deassert signal.
+- phys: List of phandle and phy mode specifier, should be 0.
+- phy-names: Must be "phy".
+
+Example:
+ pcie@f9860000 {
+ compatible = "hisilicon,hi3798cv200-pcie";
+ reg = <0xf9860000 0x1000>,
+ <0xf0000000 0x2000>,
+ <0xf2000000 0x01000000>;
+ reg-names = "control", "rc-dbi", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0 15>;
+ num-lanes = <1>;
+ ranges=<0x81000000 0 0 0xf4000000 0 0x00010000
+ 0x82000000 0 0xf3000000 0xf3000000 0 0x01000000>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg PCIE_AUX_CLK>,
+ <&crg PCIE_PIPE_CLK>,
+ <&crg PCIE_SYS_CLK>,
+ <&crg PCIE_BUS_CLK>;
+ clock-names = "aux", "pipe", "sys", "bus";
+ resets = <&crg 0x18c 6>, <&crg 0x18c 5>, <&crg 0x18c 4>;
+ reset-names = "soft", "sys", "bus";
+ phys = <&combphy1 PHY_TYPE_PCIE>;
+ phy-names = "phy";
+ };
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index c0484da0f20d..66df1e81e0b8 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -18,6 +18,7 @@ Required properties:
"fsl,ls2088a-pcie"
"fsl,ls1088a-pcie"
"fsl,ls1046a-pcie"
+ "fsl,ls1012a-pcie"
- reg: base addresses and lengths of the PCIe controller register blocks.
- interrupts: A list of interrupt outputs of the controller. Must contain an
entry for each entry in the interrupt-names property.
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
index 982a74ea6df9..145a4f04194f 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
@@ -1,10 +1,15 @@
NVIDIA Tegra PCIe controller
Required properties:
-- compatible: For Tegra20, must contain "nvidia,tegra20-pcie". For Tegra30,
- "nvidia,tegra30-pcie". For Tegra124, must contain "nvidia,tegra124-pcie".
- Otherwise, must contain "nvidia,<chip>-pcie", plus one of the above, where
- <chip> is tegra132 or tegra210.
+- compatible: Must be:
+ - "nvidia,tegra20-pcie": for Tegra20
+ - "nvidia,tegra30-pcie": for Tegra30
+ - "nvidia,tegra124-pcie": for Tegra124 and Tegra132
+ - "nvidia,tegra210-pcie": for Tegra210
+ - "nvidia,tegra186-pcie": for Tegra186
+- power-domains: To ungate power partition by BPMP powergate driver. Must
+ contain BPMP phandle and PCIe power partition ID. This is required only
+ for Tegra186.
- device_type: Must be "pci"
- reg: A list of physical base address and length for each set of controller
registers. Must contain an entry for each entry in the reg-names property.
@@ -124,6 +129,16 @@ Power supplies for Tegra210:
- vddio-pex-ctl-supply: Power supply for PCIe control I/O partition. Must
supply 1.8 V.
+Power supplies for Tegra186:
+- Required:
+ - dvdd-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
+ - hvdd-pex-pll-supply: High-voltage supply for PLLE (shared with USB3). Must
+ supply 1.8 V.
+ - hvdd-pex-supply: High-voltage supply for PCIe I/O and PCIe output clocks.
+ Must supply 1.8 V.
+ - vddio-pexctl-aud-supply: Power supply for PCIe side band signals. Must
+ supply 1.8 V.
+
Root ports are defined as subnodes of the PCIe controller node.
Required properties:
@@ -255,7 +270,7 @@ Tegra30:
SoC DTSI:
- pcie-controller@00003000 {
+ pcie-controller@3000 {
compatible = "nvidia,tegra30-pcie";
device_type = "pci";
reg = <0x00003000 0x00000800 /* PADS registers */
@@ -334,7 +349,7 @@ SoC DTSI:
Board DTS:
- pcie-controller@00003000 {
+ pcie-controller@3000 {
status = "okay";
avdd-pexa-supply = <&ldo1_reg>;
@@ -360,7 +375,7 @@ Tegra124:
SoC DTSI:
- pcie-controller@01003000 {
+ pcie-controller@1003000 {
compatible = "nvidia,tegra124-pcie";
device_type = "pci";
reg = <0x0 0x01003000 0x0 0x00000800 /* PADS registers */
@@ -425,7 +440,7 @@ SoC DTSI:
Board DTS:
- pcie-controller@01003000 {
+ pcie-controller@1003000 {
status = "okay";
avddio-pex-supply = <&vdd_1v05_run>;
@@ -456,7 +471,7 @@ Tegra210:
SoC DTSI:
- pcie-controller@01003000 {
+ pcie-controller@1003000 {
compatible = "nvidia,tegra210-pcie";
device_type = "pci";
reg = <0x0 0x01003000 0x0 0x00000800 /* PADS registers */
@@ -521,7 +536,7 @@ SoC DTSI:
Board DTS:
- pcie-controller@01003000 {
+ pcie-controller@1003000 {
status = "okay";
avdd-pll-uerefe-supply = <&avdd_1v05_pll>;
@@ -546,3 +561,114 @@ Board DTS:
status = "okay";
};
};
+
+Tegra186:
+---------
+
+SoC DTSI:
+
+ pcie@10003000 {
+ compatible = "nvidia,tegra186-pcie";
+ power-domains = <&bpmp TEGRA186_POWER_DOMAIN_PCX>;
+ device_type = "pci";
+ reg = <0x0 0x10003000 0x0 0x00000800 /* PADS registers */
+ 0x0 0x10003800 0x0 0x00000800 /* AFI registers */
+ 0x0 0x40000000 0x0 0x10000000>; /* configuration space */
+ reg-names = "pads", "afi", "cs";
+
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
+ interrupt-names = "intr", "msi";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+
+ bus-range = <0x00 0xff>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ranges = <0x82000000 0 0x10000000 0x0 0x10000000 0 0x00001000 /* port 0 configuration space */
+ 0x82000000 0 0x10001000 0x0 0x10001000 0 0x00001000 /* port 1 configuration space */
+ 0x82000000 0 0x10004000 0x0 0x10004000 0 0x00001000 /* port 2 configuration space */
+ 0x81000000 0 0x0 0x0 0x50000000 0 0x00010000 /* downstream I/O (64 KiB) */
+ 0x82000000 0 0x50100000 0x0 0x50100000 0 0x07F00000 /* non-prefetchable memory (127 MiB) */
+ 0xc2000000 0 0x58000000 0x0 0x58000000 0 0x28000000>; /* prefetchable memory (640 MiB) */
+
+ clocks = <&bpmp TEGRA186_CLK_AFI>,
+ <&bpmp TEGRA186_CLK_PCIE>,
+ <&bpmp TEGRA186_CLK_PLLE>;
+ clock-names = "afi", "pex", "pll_e";
+
+ resets = <&bpmp TEGRA186_RESET_AFI>,
+ <&bpmp TEGRA186_RESET_PCIE>,
+ <&bpmp TEGRA186_RESET_PCIEXCLK>;
+ reset-names = "afi", "pex", "pcie_x";
+
+ status = "disabled";
+
+ pci@1,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82000800 0 0x10000000 0 0x1000>;
+ reg = <0x000800 0 0 0 0>;
+ status = "disabled";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ nvidia,num-lanes = <2>;
+ };
+
+ pci@2,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82001000 0 0x10001000 0 0x1000>;
+ reg = <0x001000 0 0 0 0>;
+ status = "disabled";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ nvidia,num-lanes = <1>;
+ };
+
+ pci@3,0 {
+ device_type = "pci";
+ assigned-addresses = <0x82001800 0 0x10004000 0 0x1000>;
+ reg = <0x001800 0 0 0 0>;
+ status = "disabled";
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+
+ nvidia,num-lanes = <1>;
+ };
+ };
+
+Board DTS:
+
+ pcie@10003000 {
+ status = "okay";
+
+ dvdd-pex-supply = <&vdd_pex>;
+ hvdd-pex-pll-supply = <&vdd_1v8>;
+ hvdd-pex-supply = <&vdd_1v8>;
+ vddio-pexctl-aud-supply = <&vdd_1v8>;
+
+ pci@1,0 {
+ nvidia,num-lanes = <4>;
+ status = "okay";
+ };
+
+ pci@2,0 {
+ nvidia,num-lanes = <0>;
+ status = "disabled";
+ };
+
+ pci@3,0 {
+ nvidia,num-lanes = <1>;
+ status = "disabled";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index 3d038638612b..9fe7e12a7bf3 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -60,17 +60,15 @@ Example SoC configuration:
0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
- pci@0,1 {
+ usb@1,0 {
reg = <0x800 0 0 0 0>;
- device_type = "pci";
- phys = <&usbphy 0 0>;
+ phys = <&usb0 0>;
phy-names = "usb";
};
- pci@0,2 {
+ usb@2,0 {
reg = <0x1000 0 0 0 0>;
- device_type = "pci";
- phys = <&usbphy 0 0>;
+ phys = <&usb0 0>;
phy-names = "usb";
};
};
diff --git a/Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt b/Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
index 30b364e504ba..11063293f761 100644
--- a/Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
+++ b/Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
@@ -2,14 +2,75 @@ V3 Semiconductor V360 EPC PCI bridge
This bridge is found in the ARM Integrator/AP (Application Platform)
-Integrator-specific notes:
+Required properties:
+- compatible: should be one of:
+ "v3,v360epc-pci"
+ "arm,integrator-ap-pci", "v3,v360epc-pci"
+- reg: should contain two register areas:
+ first the base address of the V3 host bridge controller, 64KB
+ second the configuration area register space, 16MB
+- interrupts: should contain a reference to the V3 error interrupt
+ as routed on the system.
+- bus-range: see pci.txt
+- ranges: this follows the standard PCI bindings in the IEEE Std
+ 1275-1994 (see pci.txt) with the following restriction:
+ - The non-prefetchable and prefetchable memory windows must
+ each be exactly 256MB (0x10000000) in size.
+ - The prefetchable memory window must be immediately adjacent
+ to the non-prefetcable memory window
+- dma-ranges: three ranges for the inbound memory region. The ranges must
+ be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB,
+ 64MB, 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked
+ as pre-fetchable. Two ranges are supported by the hardware.
-- syscon: should contain a link to the syscon device node (since
+Integrator-specific required properties:
+- syscon: should contain a link to the syscon device node, since
on the Integrator, some registers in the syscon are required to
- operate the V3).
+ operate the V3 host bridge.
-V360 EPC specific notes:
+Example:
-- reg: should contain the base address of the V3 adapter.
-- interrupts: should contain a reference to the V3 error interrupt
- as routed on the system.
+pci: pciv3@62000000 {
+ compatible = "arm,integrator-ap-pci", "v3,v360epc-pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0x62000000 0x10000>, <0x61000000 0x01000000>;
+ interrupt-parent = <&pic>;
+ interrupts = <17>; /* Bus error IRQ */
+ clocks = <&pciclk>;
+ bus-range = <0x00 0xff>;
+ ranges = 0x01000000 0 0x00000000 /* I/O space @00000000 */
+ 0x60000000 0 0x01000000 /* 16 MiB @ LB 60000000 */
+ 0x02000000 0 0x40000000 /* non-prefectable memory @40000000 */
+ 0x40000000 0 0x10000000 /* 256 MiB @ LB 40000000 1:1 */
+ 0x42000000 0 0x50000000 /* prefetchable memory @50000000 */
+ 0x50000000 0 0x10000000>; /* 256 MiB @ LB 50000000 1:1 */
+ dma-ranges = <0x02000000 0 0x20000000 /* EBI memory space */
+ 0x20000000 0 0x20000000 /* 512 MB @ LB 20000000 1:1 */
+ 0x02000000 0 0x80000000 /* Core module alias memory */
+ 0x80000000 0 0x40000000>; /* 1GB @ LB 80000000 */
+ interrupt-map-mask = <0xf800 0 0 0x7>;
+ interrupt-map = <
+ /* IDSEL 9 */
+ 0x4800 0 0 1 &pic 13 /* INT A on slot 9 is irq 13 */
+ 0x4800 0 0 2 &pic 14 /* INT B on slot 9 is irq 14 */
+ 0x4800 0 0 3 &pic 15 /* INT C on slot 9 is irq 15 */
+ 0x4800 0 0 4 &pic 16 /* INT D on slot 9 is irq 16 */
+ /* IDSEL 10 */
+ 0x5000 0 0 1 &pic 14 /* INT A on slot 10 is irq 14 */
+ 0x5000 0 0 2 &pic 15 /* INT B on slot 10 is irq 15 */
+ 0x5000 0 0 3 &pic 16 /* INT C on slot 10 is irq 16 */
+ 0x5000 0 0 4 &pic 13 /* INT D on slot 10 is irq 13 */
+ /* IDSEL 11 */
+ 0x5800 0 0 1 &pic 15 /* INT A on slot 11 is irq 15 */
+ 0x5800 0 0 2 &pic 16 /* INT B on slot 11 is irq 16 */
+ 0x5800 0 0 3 &pic 13 /* INT C on slot 11 is irq 13 */
+ 0x5800 0 0 4 &pic 14 /* INT D on slot 11 is irq 14 */
+ /* IDSEL 12 */
+ 0x6000 0 0 1 &pic 16 /* INT A on slot 12 is irq 16 */
+ 0x6000 0 0 2 &pic 13 /* INT B on slot 12 is irq 13 */
+ 0x6000 0 0 3 &pic 14 /* INT C on slot 12 is irq 14 */
+ 0x6000 0 0 4 &pic 15 /* INT D on slot 12 is irq 15 */
+ >;
+};
diff --git a/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt
new file mode 100644
index 000000000000..24a0d06acd1d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,brcmstb-usb-phy.txt
@@ -0,0 +1,43 @@
+Broadcom STB USB PHY
+
+Required properties:
+ - compatible: brcm,brcmstb-usb-phy
+ - reg: two offset and length pairs.
+ The first pair specifies a manditory set of memory mapped
+ registers used for general control of the PHY.
+ The second pair specifies optional registers used by some of
+ the SoCs that support USB 3.x
+ - #phy-cells: Shall be 1 as it expects one argument for setting
+ the type of the PHY. Possible values are:
+ - PHY_TYPE_USB2 for USB1.1/2.0 PHY
+ - PHY_TYPE_USB3 for USB3.x PHY
+
+Optional Properties:
+- clocks : clock phandles.
+- clock-names: String, clock name.
+- brcm,ipp: Boolean, Invert Port Power.
+ Possible values are: 0 (Don't invert), 1 (Invert)
+- brcm,ioc: Boolean, Invert Over Current detection.
+ Possible values are: 0 (Don't invert), 1 (Invert)
+NOTE: one or both of the following two properties must be set
+- brcm,has-xhci: Boolean indicating the phy has an XHCI phy.
+- brcm,has-eohci: Boolean indicating the phy has an EHCI/OHCI phy.
+- dr_mode: String, PHY Device mode.
+ Possible values are: "host", "peripheral ", "drd" or "typec-pd"
+ If this property is not defined, the phy will default to "host" mode.
+
+Example:
+
+usbphy_0: usb-phy@f0470200 {
+ reg = <0xf0470200 0xb8>,
+ <0xf0471940 0x6c0>;
+ compatible = "brcm,brcmstb-usb-phy";
+ #phy-cells = <1>;
+ dr_mode = "host"
+ brcm,ioc = <1>;
+ brcm,ipp = <1>;
+ brcm,has-xhci;
+ brcm,has-eohci;
+ clocks = <&usb20>, <&usb30>;
+ clock-names = "sw_usb", "sw_usb3";
+};
diff --git a/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt b/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt
index 761c4bc24a9b..10efff28b52b 100644
--- a/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt
@@ -15,7 +15,7 @@ Required properties For the child node:
- #phy-cells: must be 0
Example:
- pcie_phy: phy@0301d0a0 {
+ pcie_phy: phy@301d0a0 {
compatible = "brcm,cygnus-pcie-phy";
reg = <0x0301d0a0 0x14>;
diff --git a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
index 97977cd29a98..0aced97d8092 100644
--- a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
@@ -27,7 +27,16 @@ Sub-nodes optional properties:
This property is not applicable for "brcm,iproc-ns2-sata-phy",
"brcm,iproc-nsp-sata-phy" and "brcm,iproc-sr-sata-phy".
-Example:
+- brcm,rxaeq-mode: string that indicates the desired RX equalizer
+ mode, possible values are:
+ "off" (equivalent to not specifying the property)
+ "auto"
+ "manual" (brcm,rxaeq-value is used in that case)
+
+- brcm,rxaeq-value: when 'rxaeq-mode' is set to "manual", provides the RX
+ equalizer value that should be used. Allowed range is 0..63.
+
+Example
sata-phy@f0458100 {
compatible = "brcm,bcm7445-sata-phy", "brcm,phy-sata3";
reg = <0xf0458100 0x1e00>, <0xf045804c 0x10>;
diff --git a/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt b/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
index 1d25b04cd05e..6ac98b3b5f57 100644
--- a/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/mxs-usb-phy.txt
@@ -23,7 +23,7 @@ Optional properties:
the 17.78mA TX reference current. Default: 100
Example:
-usbphy1: usbphy@020c9000 {
+usbphy1: usbphy@20c9000 {
compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
reg = <0x020c9000 0x1000>;
interrupts = <0 44 0x04>;
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
index 91da947ae9b6..eeb9e1874ea6 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
@@ -4,10 +4,13 @@ This file provides information on what the device node for the R-Car generation
2 USB PHY contains.
Required properties:
-- compatible: "renesas,usb-phy-r8a7790" if the device is a part of R8A7790 SoC.
+- compatible: "renesas,usb-phy-r8a7743" if the device is a part of R8A7743 SoC.
+ "renesas,usb-phy-r8a7745" if the device is a part of R8A7745 SoC.
+ "renesas,usb-phy-r8a7790" if the device is a part of R8A7790 SoC.
"renesas,usb-phy-r8a7791" if the device is a part of R8A7791 SoC.
"renesas,usb-phy-r8a7794" if the device is a part of R8A7794 SoC.
- "renesas,rcar-gen2-usb-phy" for a generic R-Car Gen2 compatible device.
+ "renesas,rcar-gen2-usb-phy" for a generic R-Car Gen2 or
+ RZ/G1 compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index ace9cce2704a..99b651b33110 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -8,6 +8,8 @@ Required properties:
SoC.
"renesas,usb2-phy-r8a7796" if the device is a part of an R8A7796
SoC.
+ "renesas,usb2-phy-r8a77995" if the device is a part of an
+ R8A77995 SoC.
"renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3 compatible device.
When compatible with the generic version, nodes must list the
diff --git a/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt
index f9853156e311..64f7109aea1f 100644
--- a/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/sun9i-usb-phy.txt
@@ -25,7 +25,7 @@ It is recommended to list all clocks and resets available.
The driver will only use those matching the phy_type.
Example:
- usbphy1: phy@00a01800 {
+ usbphy1: phy@a01800 {
compatible = "allwinner,sun9i-a80-usb-phy";
reg = <0x00a01800 0x4>;
clocks = <&usb_phy_clk 2>, <&usb_phy_clk 10>,
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 6f2ec9af0de2..09789fdfa749 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -89,7 +89,7 @@ Optional subnode-properties:
Examples:
-pio: pinctrl@01c20800 {
+pio: pinctrl@1c20800 {
compatible = "allwinner,sun5i-a13-pinctrl";
reg = <0x01c20800 0x400>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
index 61466c58faae..d857b67fab72 100644
--- a/Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
@@ -9,8 +9,14 @@ The pin controller node must be a subnode of the system controller node.
Required properties:
- compatible: "cortina,gemini-pinctrl"
-Subnodes of the pin controller contain pin control multiplexing set-up.
-Please refer to pinctrl-bindings.txt for generic pin multiplexing nodes.
+Subnodes of the pin controller contain pin control multiplexing set-up
+and pin configuration of individual pins.
+
+Please refer to pinctrl-bindings.txt for generic pin multiplexing nodes
+and generic pin config nodes.
+
+Supported configurations:
+- skew-delay is supported on the Ethernet pins
Example:
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt
index 42d74f8a1bcc..a1050b5982ec 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt
@@ -58,14 +58,14 @@ Some requirements for using fsl,imx-pinctrl binding:
configurations by referring to the phandle of that pin configuration node.
Examples:
-usdhc@0219c000 { /* uSDHC4 */
+usdhc@219c000 { /* uSDHC4 */
non-removable;
vmmc-supply = <&reg_3p3v>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc4_1>;
};
-iomuxc@020e0000 {
+iomuxc@20e0000 {
compatible = "fsl,imx6q-iomuxc";
reg = <0x020e0000 0x4000>;
diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
index 51b943cc9770..cf9ccdff4455 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
@@ -89,7 +89,7 @@ Valid values for pin and group names are:
Example:
- pinctrl_pdc: pinctrl@02006500 {
+ pinctrl_pdc: pinctrl@2006500 {
#gpio-range-cells = <3>;
compatible = "img,tz1090-pdc-pinctrl";
reg = <0x02006500 0x100>;
@@ -121,7 +121,7 @@ Example board file extracts:
};
};
- ir: ir@02006200 {
+ ir: ir@2006200 {
pinctrl-names = "default";
pinctrl-0 = <&irmod_default>;
};
diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
index 509faa87ad0e..2dfd9a3fc1e4 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
@@ -197,7 +197,7 @@ Valid values for pin and group names are:
Example:
- pinctrl: pinctrl@02005800 {
+ pinctrl: pinctrl@2005800 {
#gpio-range-cells = <3>;
compatible = "img,tz1090-pinctrl";
reg = <0x02005800 0xe4>;
@@ -221,7 +221,7 @@ Example board file extract:
};
};
- uart@02004b00 {
+ uart@2004b00 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_default>;
};
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
index f64060908d5a..c7c088d2dd50 100644
--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
@@ -97,8 +97,8 @@ group spi_quad
- pins 15-16
- functions spi, gpio
-group uart_2
- - pins 9-10
+group uart2
+ - pins 9-10 and 18-19
- functions uart, gpio
Available groups and functions for the South bridge:
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
index 4048f43a9d29..02e971c39d81 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-xusb-padctl.txt
@@ -97,7 +97,7 @@ SoC file extract:
Board file extract:
-------------------
- pcie-controller@01003000 {
+ pcie-controller@1003000 {
...
phys = <&padctl 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 4483cc31e531..ad9bbbba36e9 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -271,6 +271,10 @@ output-high - set the pin to output mode with high level
sleep-hardware-state - indicate this is sleep related state which will be programmed
into the registers for the sleep state.
slew-rate - set the slew rate
+skew-delay - this affects the expected clock skew on input pins
+ and the delay before latching a value to an output
+ pin. Typically indicates how many double-inverters are
+ used to delay the signal.
For example:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
index c934106b10aa..9c451c20dda4 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mcp23s08.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mcp23s08.txt
@@ -13,6 +13,7 @@ Required properties:
- "microchip,mcp23s18" for 16 GPIO SPI version
- "microchip,mcp23008" for 8 GPIO I2C version or
- "microchip,mcp23017" for 16 GPIO I2C version of the chip
+ - "microchip,mcp23018" for 16 GPIO I2C version
NOTE: Do not use the old mcp prefix any more. It is deprecated and will be
removed.
- #gpio-cells : Should be two.
@@ -81,3 +82,61 @@ gpiom1: gpio@0 {
reg = <0>;
spi-max-frequency = <1000000>;
};
+
+Pull-up configuration
+=====================
+
+If pins are used as output, they can also be configured with pull-ups. This is
+done with pinctrl.
+
+Please refer file <devicetree/bindings/pinctrl/pinctrl-bindings.txt>
+for details of the common pinctrl bindings used by client devices,
+including the meaning of the phrase "pin configuration node".
+
+Optional Pinmux properties:
+--------------------------
+Following properties are required if default setting of pins are required
+at boot.
+- pinctrl-names: A pinctrl state named per <pinctrl-binding.txt>.
+- pinctrl[0...n]: Properties to contain the phandle for pinctrl states per
+ <pinctrl-binding.txt>.
+
+The pin configurations are defined as child of the pinctrl states node. Each
+sub-node have following properties:
+
+Required properties:
+------------------
+- pins: List of pins. Valid values of pins properties are:
+ gpio0 ... gpio7 for the devices with 8 GPIO pins and
+ gpio0 ... gpio15 for the devices with 16 GPIO pins.
+
+Optional properties:
+-------------------
+The following optional property is defined in the pinmux DT binding document
+<pinctrl-bindings.txt>. Absence of this property will leave the configuration
+in its default state.
+ bias-pull-up
+
+Example with pinctrl to pull-up output pins:
+gpio21: gpio@21 {
+ compatible = "microchip,mcp23017";
+ gpio-controller;
+ #gpio-cells = <0x2>;
+ reg = <0x21>;
+ interrupt-parent = <&socgpio>;
+ interrupts = <0x17 0x8>;
+ interrupt-names = "mcp23017@21 irq";
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ microchip,irq-mirror;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2cgpio0irq &gpio21pullups>;
+
+ gpio21pullups: pinmux {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3",
+ "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15";
+ bias-pull-up;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
index 37d744750579..231fa1db7c5e 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
@@ -86,7 +86,7 @@ Examples:
reg = <0 0x1020C020 0 0x1000>;
};
- pinctrl@01c20800 {
+ pinctrl@1c20800 {
compatible = "mediatek,mt8135-pinctrl";
reg = <0 0x1000B000 0 0x1000>;
mediatek,pctl-regmap = <&syscfg_pctl_a &syscfg_pctl_b>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
index 013c675b5b64..48b9be48af18 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
@@ -89,7 +89,7 @@ Example:
interrupt-names = "irqmux";
ranges = <0 0x09610000 0x6000>;
- pio0: gpio@09610000 {
+ pio0: gpio@9610000 {
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
index e312a71b2f94..aaf01e929eea 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8996-pinctrl.txt
@@ -175,7 +175,7 @@ to specify in a pin configuration subnode:
Example:
- tlmm: pinctrl@01010000 {
+ tlmm: pinctrl@1010000 {
compatible = "qcom,msm8996-pinctrl";
reg = <0x01010000 0x300000>;
interrupts = <0 208 0>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 5b12c57e7f02..5c25fcb29fb5 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -15,6 +15,7 @@ PMIC's from Qualcomm.
"qcom,pm8921-gpio"
"qcom,pm8941-gpio"
"qcom,pm8994-gpio"
+ "qcom,pmi8994-gpio"
"qcom,pma8084-gpio"
"qcom,pmi8994-gpio"
@@ -85,6 +86,7 @@ to specify in a pin configuration subnode:
gpio1-gpio44 for pm8921
gpio1-gpio36 for pm8941
gpio1-gpio22 for pm8994
+ gpio1-gpio10 for pmi8994
gpio1-gpio22 for pma8084
gpio1-gpio10 for pmi8994
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt
index 43e21474528a..fd3696eb36bf 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt
@@ -12,8 +12,10 @@ Pin controller node
-------------------
Required properties:
- - compatible
- this shall be "renesas,r7s72100-ports".
+ - compatible: should be:
+ - "renesas,r7s72100-ports": for RZ/A1H
+ - "renesas,r7s72101-ports", "renesas,r7s72100-ports": for RZ/A1M
+ - "renesas,r7s72102-ports": for RZ/A1L
- reg
address base and length of the memory area where the pin controller
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
index 6c1498958d48..e371b262d709 100644
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
@@ -40,7 +40,7 @@ Optional properties:
Example:
- gpc: gpc@020dc000 {
+ gpc: gpc@20dc000 {
compatible = "fsl,imx6q-gpc";
reg = <0x020dc000 0x4000>;
interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
@@ -80,7 +80,7 @@ that is a phandle pointing to the power domain the device belongs to.
Example of a device that is part of the PU power domain:
- vpu: vpu@02040000 {
+ vpu: vpu@2040000 {
reg = <0x02040000 0x3c000>;
/* ... */
power-domains = <&pd_pu>;
diff --git a/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt b/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
index dc7c9bad63ea..1b81fcd9fb72 100644
--- a/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
+++ b/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
@@ -10,7 +10,7 @@ Required Properties:
-reg: Specifies the physical address of the SNVS_LPCR register
Example:
- snvs@020cc000 {
+ snvs@20cc000 {
compatible = "fsl,sec-v4.0-mon", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/power/reset/keystone-reset.txt b/Documentation/devicetree/bindings/power/reset/keystone-reset.txt
index c82f12e2d85c..c5c03789ed1e 100644
--- a/Documentation/devicetree/bindings/power/reset/keystone-reset.txt
+++ b/Documentation/devicetree/bindings/power/reset/keystone-reset.txt
@@ -37,12 +37,12 @@ Example 1:
Setup keystone reset so that in case software reset or
WDT0 is triggered it issues hard reset for SoC.
-pllctrl: pll-controller@02310000 {
+pllctrl: pll-controller@2310000 {
compatible = "ti,keystone-pllctrl", "syscon";
reg = <0x02310000 0x200>;
};
-devctrl: device-state-control@02620000 {
+devctrl: device-state-control@2620000 {
compatible = "ti,keystone-devctrl", "syscon";
reg = <0x02620000 0x1000>;
};
diff --git a/Documentation/devicetree/bindings/power/supply/sbs,sbs-manager.txt b/Documentation/devicetree/bindings/power/supply/sbs,sbs-manager.txt
new file mode 100644
index 000000000000..4b2195571a49
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/sbs,sbs-manager.txt
@@ -0,0 +1,66 @@
+Binding for sbs-manager
+
+Required properties:
+- compatible: "<vendor>,<part-number>", "sbs,sbs-charger" as fallback. The part
+ number compatible string might be used in order to take care of vendor
+ specific registers.
+- reg: integer, i2c address of the device. Should be <0xa>.
+Optional properties:
+- gpio-controller: Marks the port as GPIO controller.
+ See "gpio-specifier" in .../devicetree/bindings/gpio/gpio.txt.
+- #gpio-cells: Should be <2>. The first cell is the pin number, the second cell
+ is used to specify optional parameters:
+ See "gpio-specifier" in .../devicetree/bindings/gpio/gpio.txt.
+
+From OS view the device is basically an i2c-mux used to communicate with up to
+four smart battery devices at address 0xb. The driver actually implements this
+behaviour. So standard i2c-mux nodes can be used to register up to four slave
+batteries. Channels will be numerated starting from 1 to 4.
+
+Example:
+
+batman@a {
+ compatible = "lltc,ltc1760", "sbs,sbs-manager";
+ reg = <0x0a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ battery@b {
+ compatible = "ti,bq2060", "sbs,sbs-battery";
+ reg = <0x0b>;
+ sbs,battery-detect-gpios = <&batman 1 1>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ battery@b {
+ compatible = "ti,bq2060", "sbs,sbs-battery";
+ reg = <0x0b>;
+ sbs,battery-detect-gpios = <&batman 2 1>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ battery@b {
+ compatible = "ti,bq2060", "sbs,sbs-battery";
+ reg = <0x0b>;
+ sbs,battery-detect-gpios = <&batman 3 1>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
index 0f766333b6eb..37f91fa57654 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
@@ -8,7 +8,7 @@ Required properties:
Example:
-mcu@0a {
+mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8349emitx",
"fsl,mcu-mpc8349emitx";
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
index c5171660eaf9..51ff54c8b8ef 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt
@@ -14,7 +14,7 @@ Required properties:
Example:
- pwm: pwm@01c20e00 {
+ pwm: pwm@1c20e00 {
compatible = "allwinner,sun7i-a20-pwm";
reg = <0x01c20e00 0xc>;
clocks = <&osc24M>;
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index 0f2a6f8fcafd..27717e816e71 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -1,8 +1,9 @@
-* Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 Voltage Regulator
+* Dialog Semiconductor DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
+ Voltage Regulator
Required properties:
-- compatible: "dlg,da9211" or "dlg,da9212" or "dlg,da9213"
- or "dlg,da9214" or "dlg,da9215"
+- compatible: "dlg,da9211" or "dlg,da9212" or "dlg,da9213" or "dlg,da9223"
+ or "dlg,da9214" or "dlg,da9224" or "dlg,da9215" or "dlg,da9225"
- reg: I2C slave address, usually 0x68.
- interrupts: the interrupt outputs of the controller
- regulators: A node that houses a sub-node for each regulator within the
@@ -16,7 +17,6 @@ Optional properties:
- Any optional property defined in regulator.txt
Example 1) DA9211
-
pmic: da9211@68 {
compatible = "dlg,da9211";
reg = <0x68>;
@@ -35,7 +35,6 @@ Example 1) DA9211
};
Example 2) DA9212
-
pmic: da9212@68 {
compatible = "dlg,da9212";
reg = <0x68>;
@@ -79,7 +78,25 @@ Example 3) DA9213
};
};
-Example 4) DA9214
+Example 4) DA9223
+ pmic: da9223@68 {
+ compatible = "dlg,da9223";
+ reg = <0x68>;
+ interrupts = <3 27>;
+
+ regulators {
+ BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 27 0>;
+ };
+ };
+ };
+
+Example 5) DA9214
pmic: da9214@68 {
compatible = "dlg,da9214";
reg = <0x68>;
@@ -105,7 +122,33 @@ Example 4) DA9214
};
};
-Example 5) DA9215
+Example 6) DA9224
+ pmic: da9224@68 {
+ compatible = "dlg,da9224";
+ reg = <0x68>;
+ interrupts = <3 27>;
+
+ regulators {
+ BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 27 0>;
+ };
+ BUCKB {
+ regulator-name = "VBUCKB";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <3000000>;
+ regulator-max-microamp = <6000000>;
+ enable-gpios = <&gpio 17 0>;
+ };
+ };
+ };
+
+Example 7) DA9215
pmic: da9215@68 {
compatible = "dlg,da9215";
reg = <0x68>;
@@ -131,3 +174,28 @@ Example 5) DA9215
};
};
+Example 8) DA9225
+ pmic: da9225@68 {
+ compatible = "dlg,da9225";
+ reg = <0x68>;
+ interrupts = <3 27>;
+
+ regulators {
+ BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <4000000>;
+ regulator-max-microamp = <7000000>;
+ enable-gpios = <&gpio 27 0>;
+ };
+ BUCKB {
+ regulator-name = "VBUCKB";
+ regulator-min-microvolt = < 300000>;
+ regulator-max-microvolt = <1570000>;
+ regulator-min-microamp = <4000000>;
+ regulator-max-microamp = <7000000>;
+ enable-gpios = <&gpio 17 0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/max77686.txt b/Documentation/devicetree/bindings/regulator/max77686.txt
index 0dded64d89d3..e9f7578ca09a 100644
--- a/Documentation/devicetree/bindings/regulator/max77686.txt
+++ b/Documentation/devicetree/bindings/regulator/max77686.txt
@@ -40,7 +40,7 @@ to get matched with their hardware counterparts as follow:
Example:
- max77686: pmic@09 {
+ max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&wakeup_eint>;
interrupts = <26 IRQ_TYPE_NONE>;
diff --git a/Documentation/devicetree/bindings/regulator/max77802.txt b/Documentation/devicetree/bindings/regulator/max77802.txt
index 879e98d3b9aa..b82943d83677 100644
--- a/Documentation/devicetree/bindings/regulator/max77802.txt
+++ b/Documentation/devicetree/bindings/regulator/max77802.txt
@@ -71,7 +71,7 @@ has not been disabled for that state using "regulator-off-in-suspend".
Example:
- max77802@09 {
+ max77802@9 {
compatible = "maxim,max77802";
interrupt-parent = <&wakeup_eint>;
interrupts = <26 0>;
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
index 444c47831a40..c6dd3f5e485b 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.txt
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -21,7 +21,7 @@ Each regulator is defined using the standard binding for regulators.
Example 1: PFUZE100
- pmic: pfuze100@08 {
+ pmic: pfuze100@8 {
compatible = "fsl,pfuze100";
reg = <0x08>;
@@ -122,7 +122,7 @@ Example 1: PFUZE100
Example 2: PFUZE200
- pmic: pfuze200@08 {
+ pmic: pfuze200@8 {
compatible = "fsl,pfuze200";
reg = <0x08>;
@@ -216,7 +216,7 @@ Example 2: PFUZE200
Example 3: PFUZE3000
- pmic: pfuze3000@08 {
+ pmic: pfuze3000@8 {
compatible = "fsl,pfuze3000";
reg = <0x08>;
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index 0fa3b0fac129..57d2c65899df 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -8,6 +8,7 @@ Qualcomm SPMI Regulators
"qcom,pm8916-regulators"
"qcom,pm8941-regulators"
"qcom,pm8994-regulators"
+ "qcom,pmi8994-regulators"
- interrupts:
Usage: optional
@@ -100,6 +101,15 @@ Qualcomm SPMI Regulators
Definition: Reference to regulator supplying the input pin, as
described in the data sheet.
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_l1-supply:
+ Usage: optional (pmi8994 only)
+ Value type: <phandle>
+ Definition: Reference to regulator supplying the input pin, as
+ described in the data sheet.
+
The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
@@ -122,6 +132,9 @@ pm8994:
l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20,
l21, l22, l23, l24, l25, l26, l27, l28, l29, l30, l31, l32, lvs1, lvs2
+pmi8994:
+ s1, s2, s3, l1
+
The content of each sub-node is defined by the standard binding for regulators -
see regulator.txt - with additional custom properties described below:
diff --git a/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt b/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt
index c8f775714887..4ca66c96fe97 100644
--- a/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt
+++ b/Documentation/devicetree/bindings/reset/allwinner,sunxi-clock-reset.txt
@@ -14,7 +14,7 @@ Required properties:
example:
-ahb1_rst: reset@01c202c0 {
+ahb1_rst: reset@1c202c0 {
#reset-cells = <1>;
compatible = "allwinner,sun6i-a31-ahb1-reset";
reg = <0x01c202c0 0xc>;
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx-src.txt
index 13301777e11c..6ed79e60248a 100644
--- a/Documentation/devicetree/bindings/reset/fsl,imx-src.txt
+++ b/Documentation/devicetree/bindings/reset/fsl,imx-src.txt
@@ -14,7 +14,7 @@ Required properties:
example:
-src: src@020d8000 {
+src: src@20d8000 {
compatible = "fsl,imx6q-src";
reg = <0x020d8000 0x4000>;
interrupts = <0 91 0x04 0 96 0x04>;
@@ -33,10 +33,10 @@ reset.txt
example:
- ipu1: ipu@02400000 {
+ ipu1: ipu@2400000 {
resets = <&src 2>;
};
- ipu2: ipu@02800000 {
+ ipu2: ipu@2800000 {
resets = <&src 4>;
};
diff --git a/Documentation/devicetree/bindings/reset/ti-syscon-reset.txt b/Documentation/devicetree/bindings/reset/ti-syscon-reset.txt
index c516d24959f2..86945502ccb5 100644
--- a/Documentation/devicetree/bindings/reset/ti-syscon-reset.txt
+++ b/Documentation/devicetree/bindings/reset/ti-syscon-reset.txt
@@ -67,7 +67,7 @@ using the syscon node, and a consumer (a DSP device) on the TI Keystone 2
/ {
soc {
- psc: power-sleep-controller@02350000 {
+ psc: power-sleep-controller@2350000 {
compatible = "syscon", "simple-mfd";
reg = <0x02350000 0x1000>;
diff --git a/Documentation/devicetree/bindings/riscv/cpus.txt b/Documentation/devicetree/bindings/riscv/cpus.txt
new file mode 100644
index 000000000000..adf7b7af5dc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/cpus.txt
@@ -0,0 +1,162 @@
+===================
+RISC-V CPU Bindings
+===================
+
+The device tree allows to describe the layout of CPUs in a system through
+the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
+defining properties for every cpu.
+
+Bindings for CPU nodes follow the Devicetree Specification, available from:
+
+https://www.devicetree.org/specifications/
+
+with updates for 32-bit and 64-bit RISC-V systems provided in this document.
+
+===========
+Terminology
+===========
+
+This document uses some terminology common to the RISC-V community that is not
+widely used, the definitions of which are listed here:
+
+* hart: A hardware execution context, which contains all the state mandated by
+ the RISC-V ISA: a PC and some registers. This terminology is designed to
+ disambiguate software's view of execution contexts from any particular
+ microarchitectural implementation strategy. For example, my Intel laptop is
+ described as having one socket with two cores, each of which has two hyper
+ threads. Therefore this system has four harts.
+
+=====================================
+cpus and cpu node bindings definition
+=====================================
+
+The RISC-V architecture, in accordance with the Devicetree Specification,
+requires the cpus and cpu nodes to be present and contain the properties
+described below.
+
+- cpus node
+
+ Description: Container of cpu nodes
+
+ The node name must be "cpus".
+
+ A cpus node must define the following properties:
+
+ - #address-cells
+ Usage: required
+ Value type: <u32>
+ Definition: must be set to 1
+ - #size-cells
+ Usage: required
+ Value type: <u32>
+ Definition: must be set to 0
+
+- cpu node
+
+ Description: Describes a hart context
+
+ PROPERTIES
+
+ - device_type
+ Usage: required
+ Value type: <string>
+ Definition: must be "cpu"
+ - reg
+ Usage: required
+ Value type: <u32>
+ Definition: The hart ID of this CPU node
+ - compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must contain "riscv", may contain one of
+ "sifive,rocket0"
+ - mmu-type:
+ Usage: optional
+ Value type: <string>
+ Definition: Specifies the CPU's MMU type. Possible values are
+ "riscv,sv32"
+ "riscv,sv39"
+ "riscv,sv48"
+ - riscv,isa:
+ Usage: required
+ Value type: <string>
+ Definition: Contains the RISC-V ISA string of this hart. These
+ ISA strings are defined by the RISC-V ISA manual.
+
+Example: SiFive Freedom U540G Development Kit
+---------------------------------------------
+
+This system contains two harts: a hart marked as disabled that's used for
+low-level system tasks and should be ignored by Linux, and a second hart that
+Linux is allowed to run on.
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ timebase-frequency = <1000000>;
+ cpu@0 {
+ clock-frequency = <1600000000>;
+ compatible = "sifive,rocket0", "riscv";
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <16384>;
+ next-level-cache = <&L15 &L0>;
+ reg = <0>;
+ riscv,isa = "rv64imac";
+ status = "disabled";
+ L10: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu@1 {
+ clock-frequency = <1600000000>;
+ compatible = "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ next-level-cache = <&L15 &L0>;
+ reg = <1>;
+ riscv,isa = "rv64imafdc";
+ status = "okay";
+ tlb-split;
+ L13: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ };
+
+Example: Spike ISA Simulator with 1 Hart
+----------------------------------------
+
+This device tree matches the Spike ISA golden model as run with `spike -p1`.
+
+ cpus {
+ cpu@0 {
+ device_type = "cpu";
+ reg = <0x00000000>;
+ status = "okay";
+ compatible = "riscv";
+ riscv,isa = "rv64imafdc";
+ mmu-type = "riscv,sv48";
+ clock-frequency = <0x3b9aca00>;
+ interrupt-controller {
+ #interrupt-cells = <0x00000001>;
+ interrupt-controller;
+ compatible = "riscv,cpu-intc";
+ }
+ }
+ }
diff --git a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
index e25a456664b9..0014da9145af 100644
--- a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
+++ b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
@@ -1,7 +1,9 @@
HWRNG support for the iproc-rng200 driver
Required properties:
-- compatible : "brcm,iproc-rng200"
+- compatible : Must be one of:
+ "brcm,bcm7278-rng200"
+ "brcm,iproc-rng200"
- reg : base address and size of control register block
Example:
diff --git a/Documentation/devicetree/bindings/rtc/dallas,ds1339.txt b/Documentation/devicetree/bindings/rtc/dallas,ds1339.txt
deleted file mode 100644
index 916f57601a8f..000000000000
--- a/Documentation/devicetree/bindings/rtc/dallas,ds1339.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* Dallas DS1339 I2C Serial Real-Time Clock
-
-Required properties:
-- compatible: Should contain "dallas,ds1339".
-- reg: I2C address for chip
-
-Optional properties:
-- trickle-resistor-ohms : Selected resistor for trickle charger
- Values usable for ds1339 are 250, 2000, 4000
- Should be given if trickle charger should be enabled
-- trickle-diode-disable : Do not use internal trickle charger diode
- Should be given if internal trickle charger diode should be disabled
-Example:
- ds1339: rtc@68 {
- compatible = "dallas,ds1339";
- trickle-resistor-ohms = <250>;
- reg = <0x68>;
- };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt b/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
new file mode 100644
index 000000000000..d28d6e7f6ae8
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-ds1307.txt
@@ -0,0 +1,44 @@
+Dallas DS1307 and compatible RTC
+
+Required properties:
+- compatible: should be one of:
+ "dallas,ds1307",
+ "dallas,ds1308",
+ "dallas,ds1337",
+ "dallas,ds1338",
+ "dallas,ds1339",
+ "dallas,ds1388",
+ "dallas,ds1340",
+ "dallas,ds1341",
+ "maxim,ds3231",
+ "st,m41t0",
+ "st,m41t00",
+ "microchip,mcp7940x",
+ "microchip,mcp7941x",
+ "pericom,pt7c4338",
+ "epson,rx8025",
+ "isil,isl12057"
+- reg: I2C bus address of the device
+
+Optional properties:
+- interrupt-parent: phandle for the interrupt controller.
+- interrupts: rtc alarm interrupt.
+- clock-output-names: From common clock binding to override the default output
+ clock name
+- wakeup-source: Enables wake up of host system on alarm
+- trickle-resistor-ohms : ds1339, ds1340 and ds 1388 only
+ Selected resistor for trickle charger
+ Possible values are 250, 2000, 4000
+ Should be given if trickle charger should be enabled
+- trickle-diode-disable : ds1339, ds1340 and ds 1388 only
+ Do not use internal trickle charger diode
+ Should be given if internal trickle charger diode should be disabled
+
+Example:
+ rtc1: ds1339@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <20 0>;
+ trickle-resistor-ohms = <250>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
new file mode 100644
index 000000000000..717d93860af1
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-m41t80.txt
@@ -0,0 +1,31 @@
+ST M41T80 family of RTC and compatible
+
+Required properties:
+- compatible: should be one of:
+ "st,m41t62",
+ "st,m41t65",
+ "st,m41t80",
+ "st,m41t81",
+ "st,m41t81s",
+ "st,m41t82",
+ "st,m41t83",
+ "st,m41t84",
+ "st,m41t85",
+ "st,m41t87",
+ "microcrystal,rv4162",
+- reg: I2C bus address of the device
+
+Optional properties:
+- interrupt-parent: phandle for the interrupt controller.
+- interrupts: rtc alarm interrupt.
+- clock-output-names: From common clock binding to override the default output
+ clock name
+- wakeup-source: Enables wake up of host system on alarm
+
+Example:
+ rtc@68 {
+ compatible = "st,m41t80";
+ reg = <0x68>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x9 0x8>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt b/Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt
new file mode 100644
index 000000000000..58885b55da21
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/sirf,prima2-sysrtc.txt
@@ -0,0 +1,13 @@
+SiRFSoC Real Time Clock
+
+Required properties:
+- compatible: must be "sirf,prima2-sysrtc"
+- reg: address range of rtc register set.
+- interrupts: rtc alarm interrupts.
+
+Example:
+ rtc@2000 {
+ compatible = "sirf,prima2-sysrtc";
+ reg = <0x2000 0x1000>;
+ interrupts = <52 53 54>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt b/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
new file mode 100644
index 000000000000..3ebeb311335f
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/stericsson,coh901331.txt
@@ -0,0 +1,17 @@
+ST-Ericsson COH 901 331 Real Time Clock
+
+Required properties:
+- compatible: must be "stericsson,coh901331"
+- reg: address range of rtc register set.
+- interrupt-parent: phandle for the interrupt controller.
+- interrupts: rtc alarm interrupt.
+- clocks: phandle to the rtc clock source
+
+Example:
+ rtc: rtc@c0017000 {
+ compatible = "stericsson,coh901331";
+ reg = <0xc0017000 0x1000>;
+ interrupt-parent = <&vicb>;
+ interrupts = <10>;
+ clocks = <&rtc_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
index d5e26d313f62..12c083c1140a 100644
--- a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
@@ -17,7 +17,7 @@ Required properties for new device trees
Example:
-rtc: rtc@01f00000 {
+rtc: rtc@1f00000 {
compatible = "allwinner,sun6i-a31-rtc";
reg = <0x01f00000 0x54>;
interrupts = <0 40 4>, <0 41 4>;
diff --git a/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt b/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt
index 6983aad376c3..4a8d79c1cf08 100644
--- a/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/sunxi-rtc.txt
@@ -10,7 +10,7 @@ Required properties:
Example:
-rtc: rtc@01c20d00 {
+rtc: rtc@1c20d00 {
compatible = "allwinner,sun4i-a10-rtc";
reg = <0x01c20d00 0x20>;
interrupts = <24>;
diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/serial/atmel-usart.txt
index e6e6142e33ac..7c0d6b2f53e4 100644
--- a/Documentation/devicetree/bindings/serial/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/serial/atmel-usart.txt
@@ -24,6 +24,7 @@ Optional properties:
- dma-names: "rx" for RX channel, "tx" for TX channel.
- atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO
capable USARTs.
+- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
<chip> compatible description:
- at91rm9200: legacy USART support
diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt b/Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt
index 5c52e5eef16d..8b2b0460259a 100644
--- a/Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt
+++ b/Documentation/devicetree/bindings/serial/brcm,bcm6345-uart.txt
@@ -11,6 +11,11 @@ Required properties:
- clocks: Clock driving the hardware; used to figure out the baud rate
divisor.
+
+Optional properties:
+
+- clock-names: Should be "refclk".
+
Example:
uart0: serial@14e00520 {
@@ -19,6 +24,7 @@ Example:
interrupt-parent = <&periph_intc>;
interrupts = <2>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
};
clocks {
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 574c3a2c77d5..860a9559839a 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,6 +9,7 @@ Optional properties:
- fsl,irda-mode : Indicate the uart supports irda mode
- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
in DCE mode by default.
+- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
Please check Documentation/devicetree/bindings/serial/serial.txt
for the complete list of generic properties.
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
index a1252a047f78..59567b51cf09 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
@@ -16,6 +16,7 @@ Required properties:
Optional properties:
- dmas: A list of two dma specifiers, one for each entry in dma-names.
- dma-names: should contain "tx" and "rx".
+- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
Note: Optional properties for DMA support. Write them both or both not.
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
index 7a71b5de77d6..43eac675f21f 100644
--- a/Documentation/devicetree/bindings/serial/omap_serial.txt
+++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
@@ -19,6 +19,7 @@ Optional properties:
- dmas : DMA specifier, consisting of a phandle to the DMA controller
node and a DMA channel number.
- dma-names : "rx" for receive channel, "tx" for transmit channel.
+- rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt
Example:
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-usart.txt b/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
index 3657f9f9d17a..d150b04a6229 100644
--- a/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
+++ b/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
@@ -2,14 +2,10 @@
Required properties:
- compatible: can be either:
- - "st,stm32-usart",
- "st,stm32-uart",
- - "st,stm32f7-usart",
- "st,stm32f7-uart",
- - "st,stm32h7-usart"
- "st,stm32h7-uart".
- depending on whether the device supports synchronous mode
- and is compatible with stm32(f4), stm32f7 or stm32h7.
+ depending is compatible with stm32(f4), stm32f7 or stm32h7.
- reg: The address and length of the peripheral registers space
- interrupts:
- The interrupt line for the USART instance,
@@ -33,7 +29,7 @@ usart4: serial@40004c00 {
};
usart2: serial@40004400 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40004400 0x400>;
interrupts = <38>;
clocks = <&clk_pclk1>;
@@ -43,7 +39,7 @@ usart2: serial@40004400 {
};
usart1: serial@40011000 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40011000 0x400>;
interrupts = <37>;
clocks = <&rcc 0 164>;
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt
index 60984260207b..09b1b05fa677 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/par_io.txt
@@ -18,7 +18,7 @@ par_io@1400 {
#size-cells = <0>;
device_type = "par_io";
num-ports = <7>;
- ucc_pin@01 {
+ ucc_pin@1 {
......
};
diff --git a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt
index ec6ee2e864a2..5bde8b98a8c9 100644
--- a/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt
+++ b/Documentation/devicetree/bindings/soc/fsl/cpm_qe/qe/pincfg.txt
@@ -26,7 +26,7 @@ Required properties:
interrupts.
Example:
- ucc_pin@01 {
+ ucc_pin@1 {
pio-map = <
/* port pin dir open_drain assignment has_irq */
0 3 1 0 1 0 /* TxD0 */
diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
index 66e6265fb0aa..f7b00a7c0f68 100644
--- a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
@@ -51,7 +51,7 @@ of valid identifiers for k2g.
Example (K2G):
--------------------
- uart0: serial@02530c00 {
+ uart0: serial@2530c00 {
compatible = "ns16550a";
...
power-domains = <&k2g_pds 0x002c>;
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
index 6e6720aa33f1..d04ea3b1a1dd 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-card.txt
+++ b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
@@ -17,6 +17,7 @@ Below are same as Simple-Card.
- bitclock-master
- bitclock-inversion
- frame-inversion
+- mclk-fs
- dai-tdm-slot-num
- dai-tdm-slot-width
- clocks / system-clock-frequency
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
index 8b8afe9fcb31..441dd6f29df1 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
+++ b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
@@ -43,7 +43,7 @@ Example 1. Sampling Rate Conversion
label = "sound-card";
prefix = "codec";
routing = "codec Playback", "DAI0 Playback",
- "codec Playback", "DAI1 Playback";
+ "DAI0 Capture", "codec Capture";
convert-rate = <48000>;
dais = <&cpu_port>;
@@ -79,7 +79,8 @@ Example 2. 2 CPU 1 Codec (Mixing)
label = "sound-card";
prefix = "codec";
routing = "codec Playback", "DAI0 Playback",
- "codec Playback", "DAI1 Playback";
+ "codec Playback", "DAI1 Playback",
+ "DAI0 Capture", "codec Capture";
convert-rate = <48000>;
dais = <&cpu_port0
diff --git a/Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt b/Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt
index befd125d18bb..860fc0da39c0 100644
--- a/Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/cdns,xtfpga-i2s.txt
@@ -9,7 +9,7 @@ Required properties:
Examples:
- i2s0: xtfpga-i2s@0d080000 {
+ i2s0: xtfpga-i2s@d080000 {
#sound-dai-cells = <0>;
compatible = "cdns,xtfpga-i2s";
reg = <0x0d080000 0x40>;
diff --git a/Documentation/devicetree/bindings/sound/cs42l56.txt b/Documentation/devicetree/bindings/sound/cs42l56.txt
index 4feb0eb27ea4..4ba520a28ae8 100644
--- a/Documentation/devicetree/bindings/sound/cs42l56.txt
+++ b/Documentation/devicetree/bindings/sound/cs42l56.txt
@@ -55,7 +55,7 @@ Example:
codec: codec@4b {
compatible = "cirrus,cs42l56";
reg = <0x4b>;
- gpio-reset = <&gpio 10 0>;
+ cirrus,gpio-nreset = <&gpio 10 0>;
cirrus,chgfreq-divisor = <0x05>;
cirrus.ain1_ref_cfg;
cirrus,micbias-lvl = <5>;
diff --git a/Documentation/devicetree/bindings/sound/fsl,asrc.txt b/Documentation/devicetree/bindings/sound/fsl,asrc.txt
index 65979b205893..f5a14115b459 100644
--- a/Documentation/devicetree/bindings/sound/fsl,asrc.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,asrc.txt
@@ -41,7 +41,7 @@ Required properties:
Example:
-asrc: asrc@02034000 {
+asrc: asrc@2034000 {
compatible = "fsl,imx53-asrc";
reg = <0x02034000 0x4000>;
interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/sound/fsl,esai.txt b/Documentation/devicetree/bindings/sound/fsl,esai.txt
index 21c401e2ccda..cacd18bb9ba6 100644
--- a/Documentation/devicetree/bindings/sound/fsl,esai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,esai.txt
@@ -48,7 +48,7 @@ Required properties:
Example:
-esai: esai@02024000 {
+esai: esai@2024000 {
compatible = "fsl,imx35-esai";
reg = <0x02024000 0x4000>;
interrupts = <0 51 0x04>;
diff --git a/Documentation/devicetree/bindings/sound/fsl,spdif.txt b/Documentation/devicetree/bindings/sound/fsl,spdif.txt
index 0f97e54c3d43..38cfa7573441 100644
--- a/Documentation/devicetree/bindings/sound/fsl,spdif.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,spdif.txt
@@ -39,7 +39,7 @@ Required properties:
Example:
-spdif: spdif@02004000 {
+spdif: spdif@2004000 {
compatible = "fsl,imx35-spdif";
reg = <0x02004000 0x4000>;
interrupts = <0 52 0x04>;
diff --git a/Documentation/devicetree/bindings/sound/imx-audmux.txt b/Documentation/devicetree/bindings/sound/imx-audmux.txt
index b30a737e209e..2db4dcbee1b9 100644
--- a/Documentation/devicetree/bindings/sound/imx-audmux.txt
+++ b/Documentation/devicetree/bindings/sound/imx-audmux.txt
@@ -22,7 +22,7 @@ Required properties of optional child nodes:
Example:
-audmux@021d8000 {
+audmux@21d8000 {
compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
reg = <0x021d8000 0x4000>;
};
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
index 929ca6756b02..4f33b0d96afe 100644
--- a/Documentation/devicetree/bindings/sound/rt5514.txt
+++ b/Documentation/devicetree/bindings/sound/rt5514.txt
@@ -1,22 +1,27 @@
RT5514 audio CODEC
-This device supports I2C only.
+This device supports both I2C and SPI.
Required properties:
- compatible : "realtek,rt5514".
-- reg : The I2C address of the device.
+- reg : the I2C address of the device for I2C, the chip select
+ number for SPI.
Optional properties:
- clocks: The phandle of the master clock to the CODEC
- clock-names: Should be "mclk"
+- interrupt-parent: The phandle for the interrupt controller.
+- interrupts: The interrupt number to the cpu. The interrupt specifier format
+ depends on the interrupt controller.
+
- realtek,dmic-init-delay-ms
- Set the DMIC initial delay (ms) to wait it ready.
+ Set the DMIC initial delay (ms) to wait it ready for I2C.
-Pins on the device (for linking into audio routes) for RT5514:
+Pins on the device (for linking into audio routes) for I2C:
* DMIC1L
* DMIC1R
diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt
index ff381718c517..497bcfc58b71 100644
--- a/Documentation/devicetree/bindings/sound/rt5663.txt
+++ b/Documentation/devicetree/bindings/sound/rt5663.txt
@@ -19,6 +19,22 @@ Optional properties:
Based on the different PCB layout, add the manual offset value to
compensate the DC offset for each L and R channel, and they are different
between headphone and headset.
+- "realtek,impedance_sensing_num"
+ The matrix row number of the impedance sensing table.
+ If the value is 0, it means the impedance sensing is not supported.
+- "realtek,impedance_sensing_table"
+ The matrix rows of the impedance sensing table are consisted by impedance
+ minimum, impedance maximun, volume, DC offset w/o and w/ mic of each L and
+ R channel accordingly. Example is shown as following.
+ < 0 300 7 0xffd160 0xffd1c0 0xff8a10 0xff8ab0
+ 301 65535 4 0xffe470 0xffe470 0xffb8e0 0xffb8e0>
+ The first and second column are defined for the impedance range. If the
+ detected impedance value is in the range, then the volume value of the
+ third column will be set to codec. In our codec design, each volume value
+ should compensate different DC offset to avoid the pop sound, and it is
+ also different between headphone and headset. In the example, the
+ "realtek,impedance_sensing_num" is 2. It means that there are 2 ranges of
+ impedance in the impedance sensing function.
Pins on the device (for linking into audio routes) for RT5663:
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.txt b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
index 09e0e18591ae..bf100cd0d0f7 100644
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
@@ -63,7 +63,7 @@ Optional SoC Specific Properties:
Example:
-i2s0: i2s@03830000 {
+i2s0: i2s@3830000 {
compatible = "samsung,s5pv210-i2s";
reg = <0x03830000 0x100>;
dmas = <&pdma0 10
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 7a73a9d62015..060cb4a3b47e 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -37,7 +37,7 @@ VDDIO 1.8V 2.5V 3.3V
Example:
-codec: sgtl5000@0a {
+codec: sgtl5000@a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
clocks = <&clks 150>;
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index f1c5ae59e7c9..1f9cd7095337 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -10,13 +10,21 @@ Required properties:
- reg: Base address and size of SAI common register set.
- clocks: Must contain phandle and clock specifier pairs for each entry
in clock-names.
- - clock-names: Must contain "x8k" and "x11k"
+ - clock-names: Must contain "pclk" "x8k" and "x11k"
+ "pclk": Clock which feeds the peripheral bus interface.
+ Mandatory for "st,stm32h7-sai" compatible.
+ Not used for "st,stm32f4-sai" compatible.
"x8k": SAI parent clock for sampling rates multiple of 8kHz.
"x11k": SAI parent clock for sampling rates multiple of 11.025kHz.
- interrupts: cpu DAI interrupt line shared by SAI sub-blocks
Optional properties:
- resets: Reference to a reset controller asserting the SAI
+ - st,sync: specify synchronization mode.
+ By default SAI sub-block is in asynchronous mode.
+ This property sets SAI sub-block as slave of another SAI sub-block.
+ Must contain the phandle and index of the sai sub-block providing
+ the synchronization.
SAI subnodes:
Two subnodes corresponding to SAI sub-block instances A et B can be defined.
@@ -52,8 +60,8 @@ sai1: sai1@40015800 {
#size-cells = <1>;
ranges = <0 0x40015800 0x400>;
reg = <0x40015800 0x4>;
- clocks = <&rcc PLL1_Q>, <&rcc PLL2_P>;
- clock-names = "x8k", "x11k";
+ clocks = <&rcc SAI1_CK>, <&rcc PLL1_Q>, <&rcc PLL2_P>;
+ clock-names = "pclk", "x8k", "x11k";
interrupts = <87>;
sai1a: audio-controller@40015804 {
diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
index 2d4e10deb6f4..66579bbd3294 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
@@ -62,7 +62,7 @@ Required properties for the following compatibles:
block in the PRCM.
Example:
-codec: codec@01c22c00 {
+codec: codec@1c22c00 {
#sound-dai-cells = <0>;
compatible = "allwinner,sun7i-a20-codec";
reg = <0x01c22c00 0x40>;
@@ -73,7 +73,7 @@ codec: codec@01c22c00 {
dma-names = "rx", "tx";
};
-codec: codec@01c22c00 {
+codec: codec@1c22c00 {
#sound-dai-cells = <0>;
compatible = "allwinner,sun6i-a31-codec";
reg = <0x01c22c00 0x98>;
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
index fc5da6080759..05d7135a8d2f 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -28,7 +28,7 @@ Required properties for the following compatibles:
Example:
-i2s0: i2s@01c22400 {
+i2s0: i2s@1c22400 {
#sound-dai-cells = <0>;
compatible = "allwinner,sun4i-a10-i2s";
reg = <0x01c22400 0x400>;
diff --git a/Documentation/devicetree/bindings/sound/sun8i-a33-codec.txt b/Documentation/devicetree/bindings/sound/sun8i-a33-codec.txt
index 399b1b4bae22..2ca3d138528e 100644
--- a/Documentation/devicetree/bindings/sound/sun8i-a33-codec.txt
+++ b/Documentation/devicetree/bindings/sound/sun8i-a33-codec.txt
@@ -48,7 +48,7 @@ are similar to A33 using simple-card:
sound-dai = <&codec>;
};
- soc@01c00000 {
+ soc@1c00000 {
[...]
audio-codec@1c22e00 {
diff --git a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
index 1b6e7c4e50ab..07356758bd91 100644
--- a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
+++ b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
@@ -10,7 +10,7 @@ Required properties if not a sub-node of the PRCM node:
- reg: must contain the registers location and length
Example:
-prcm: prcm@01f01400 {
+prcm: prcm@1f01400 {
codec_analog: codec-analog {
compatible = "allwinner,sun8i-a23-codec-analog";
};
diff --git a/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt b/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt
index 70ee177901d3..0c64a209c2e9 100644
--- a/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt
+++ b/Documentation/devicetree/bindings/sound/sunxi,sun4i-spdif.txt
@@ -31,7 +31,7 @@ Required properties:
Example:
-spdif: spdif@01c21000 {
+spdif: spdif@1c21000 {
compatible = "allwinner,sun4i-a10-spdif";
reg = <0x01c21000 0x40>;
interrupts = <13>;
diff --git a/Documentation/devicetree/bindings/sound/tfa9879.txt b/Documentation/devicetree/bindings/sound/tfa9879.txt
new file mode 100644
index 000000000000..23ba522d9e2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tfa9879.txt
@@ -0,0 +1,23 @@
+NXP TFA9879 class-D audio amplifier
+
+Required properties:
+
+- compatible : "nxp,tfa9879"
+
+- reg : the I2C address of the device
+
+Example:
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ codec: tfa9879@6c {
+ #sound-dai-cells = <0>;
+ compatible = "nxp,tfa9879";
+ reg = <0x6c>;
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/sound/wlf,arizona.txt b/Documentation/devicetree/bindings/sound/wlf,arizona.txt
new file mode 100644
index 000000000000..e172c62dc2df
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wlf,arizona.txt
@@ -0,0 +1,53 @@
+Cirrus Logic Arizona class audio SoCs
+
+These devices are audio SoCs with extensive digital capabilities and a range
+of analogue I/O.
+
+This document lists sound specific bindings, see the primary binding
+document:
+ ../mfd/arizona.txt
+
+Optional properties:
+
+ - wlf,inmode : A list of INn_MODE register values, where n is the number
+ of input signals. Valid values are 0 (Differential), 1 (Single-ended) and
+ 2 (Digital Microphone). If absent, INn_MODE registers set to 0 by default.
+ If present, values must be specified less than or equal to the number of
+ input signals. If values less than the number of input signals, elements
+ that have not been specified are set to 0 by default. Entries are:
+ <IN1, IN2, IN3, IN4> (wm5102, wm5110, wm8280, wm8997)
+ <IN1A, IN2A, IN1B, IN2B> (wm8998, wm1814)
+ - wlf,out-mono : A list of boolean values indicating whether each output is
+ mono or stereo. Position within the list indicates the output affected
+ (eg. First entry in the list corresponds to output 1). A non-zero value
+ indicates a mono output. If present, the number of values should be less
+ than or equal to the number of outputs, if less values are supplied the
+ additional outputs will be treated as stereo.
+
+ - wlf,dmic-ref : DMIC reference voltage source for each input, can be
+ selected from either MICVDD or one of the MICBIAS's, defines
+ (ARIZONA_DMIC_xxxx) are provided in <dt-bindings/mfd/arizona.txt>. If
+ present, the number of values should be less than or equal to the
+ number of inputs, unspecified inputs will use the chip default.
+
+ - wlf,max-channels-clocked : The maximum number of channels to be clocked on
+ each AIF, useful for I2S systems with multiple data lines being mastered.
+ Specify one cell for each AIF to be configured, specify zero for AIFs that
+ should be handled normally.
+ If present, number of cells must be less than or equal to the number of
+ AIFs. If less than the number of AIFs, for cells that have not been
+ specified the corresponding AIFs will be treated as default setting.
+
+ - wlf,spk-fmt : PDM speaker data format, must contain 2 cells (OUT5 and OUT6).
+ See the datasheet for values.
+ The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
+ wm8998, wm1814)
+
+ - wlf,spk-mute : PDM speaker mute setting, must contain 2 cells (OUT5 and OUT6).
+ See the datasheet for values.
+ The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
+ wm8998, wm1814)
+
+ - wlf,out-volume-limit : The volume limit value that should be applied to each
+ output channel. See the datasheet for exact values. Channels are specified
+ in the order OUT1L, OUT1R, OUT2L, OUT2R, etc.
diff --git a/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt b/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt
index b5a5ca4502f9..09231d7586b2 100644
--- a/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt
+++ b/Documentation/devicetree/bindings/sound/zte,zx-spdif.txt
@@ -16,7 +16,7 @@ please check:
* dma/dma.txt
Example:
- spdif0: spdif0@0b004000 {
+ spdif0: spdif0@b004000 {
compatible = "zte,zx296702-spdif";
reg = <0x0b004000 0x1000>;
clocks = <&lsp0clk ZX296702_SPDIF0_DIV>;
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index e865855726a2..bdd83959019c 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,7 +1,9 @@
Renesas MSIOF spi controller
Required properties:
-- compatible : "renesas,msiof-r8a7790" (R-Car H2)
+- compatible : "renesas,msiof-r8a7743" (RZ/G1M)
+ "renesas,msiof-r8a7745" (RZ/G1E)
+ "renesas,msiof-r8a7790" (R-Car H2)
"renesas,msiof-r8a7791" (R-Car M2-W)
"renesas,msiof-r8a7792" (R-Car V2H)
"renesas,msiof-r8a7793" (R-Car M2-N)
@@ -10,7 +12,7 @@ Required properties:
"renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
"renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
- "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
+ "renesas,rcar-gen2-msiof" (generic R-Car Gen2 and RZ/G1 compatible device)
"renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
"renesas,sh-msiof" (deprecated)
diff --git a/Documentation/devicetree/bindings/spi/spi-davinci.txt b/Documentation/devicetree/bindings/spi/spi-davinci.txt
index f5916c92fe91..1925277bfc1e 100644
--- a/Documentation/devicetree/bindings/spi/spi-davinci.txt
+++ b/Documentation/devicetree/bindings/spi/spi-davinci.txt
@@ -24,6 +24,16 @@ Required properties:
based on a specific SoC configuration.
- interrupts: interrupt number mapped to CPU.
- clocks: spi clk phandle
+ For 66AK2G this property should be set per binding,
+ Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+
+SoC-specific Required Properties:
+
+The following are mandatory properties for Keystone 2 66AK2G SoCs only:
+
+- power-domains: Should contain a phandle to a PM domain provider node
+ and an args specifier containing the SPI device id
+ value. This property is as per the binding,
Optional:
- cs-gpios: gpio chip selects
diff --git a/Documentation/devicetree/bindings/spi/spi-rspi.txt b/Documentation/devicetree/bindings/spi/spi-rspi.txt
index 8f4169f63936..3b02b3a7cfb2 100644
--- a/Documentation/devicetree/bindings/spi/spi-rspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rspi.txt
@@ -5,11 +5,14 @@ Required properties:
"renesas,rspi-<soctype>", "renesas,rspi" as fallback.
For Renesas Serial Peripheral Interface on RZ/A1H:
"renesas,rspi-<soctype>", "renesas,rspi-rz" as fallback.
- For Quad Serial Peripheral Interface on R-Car Gen2:
+ For Quad Serial Peripheral Interface on R-Car Gen2 and
+ RZ/G1 devices:
"renesas,qspi-<soctype>", "renesas,qspi" as fallback.
Examples with soctypes are:
- "renesas,rspi-sh7757" (SH)
- "renesas,rspi-r7s72100" (RZ/A1H)
+ - "renesas,qspi-r8a7743" (RZ/G1M)
+ - "renesas,qspi-r8a7745" (RZ/G1E)
- "renesas,qspi-r8a7790" (R-Car H2)
- "renesas,qspi-r8a7791" (R-Car M2-W)
- "renesas,qspi-r8a7792" (R-Car V2H)
diff --git a/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt b/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
new file mode 100644
index 000000000000..8de589b376ce
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sprd-adi.txt
@@ -0,0 +1,58 @@
+Spreadtrum ADI controller
+
+ADI is the abbreviation of Anolog-Digital interface, which is used to access
+analog chip (such as PMIC) from digital chip. ADI controller follows the SPI
+framework for its hardware implementation is alike to SPI bus and its timing
+is compatile to SPI timing.
+
+ADI controller has 50 channels including 2 software read/write channels and
+48 hardware channels to access analog chip. For 2 software read/write channels,
+users should set ADI registers to access analog chip. For hardware channels,
+we can configure them to allow other hardware components to use it independently,
+which means we can just link one analog chip address to one hardware channel,
+then users can access the mapped analog chip address by this hardware channel
+triggered by hardware components instead of ADI software channels.
+
+Thus we introduce one property named "sprd,hw-channels" to configure hardware
+channels, the first value specifies the hardware channel id which is used to
+transfer data triggered by hardware automatically, and the second value specifies
+the analog chip address where user want to access by hardware components.
+
+Since we have multi-subsystems will use unique ADI to access analog chip, when
+one system is reading/writing data by ADI software channels, that should be under
+one hardware spinlock protection to prevent other systems from reading/writing
+data by ADI software channels at the same time, or two parallel routine of setting
+ADI registers will make ADI controller registers chaos to lead incorrect results.
+Then we need one hardware spinlock to synchronize between the multiple subsystems.
+
+Required properties:
+- compatible: Should be "sprd,sc9860-adi".
+- reg: Offset and length of ADI-SPI controller register space.
+- hwlocks: Reference to a phandle of a hwlock provider node.
+- hwlock-names: Reference to hwlock name strings defined in the same order
+ as the hwlocks, should be "adi".
+- #address-cells: Number of cells required to define a chip select address
+ on the ADI-SPI bus. Should be set to 1.
+- #size-cells: Size of cells required to define a chip select address size
+ on the ADI-SPI bus. Should be set to 0.
+
+Optional properties:
+- sprd,hw-channels: This is an array of channel values up to 49 channels.
+ The first value specifies the hardware channel id which is used to
+ transfer data triggered by hardware automatically, and the second
+ value specifies the analog chip address where user want to access
+ by hardware components.
+
+SPI slave nodes must be children of the SPI controller node and can contain
+properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
+
+Example:
+ adi_bus: spi@40030000 {
+ compatible = "sprd,sc9860-adi";
+ reg = <0 0x40030000 0 0x10000>;
+ hwlocks = <&hwlock1 0>;
+ hwlock-names = "adi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ sprd,hw-channels = <30 0x8c20>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-sun4i.txt b/Documentation/devicetree/bindings/spi/spi-sun4i.txt
index 484bbff5337e..c75d604a8290 100644
--- a/Documentation/devicetree/bindings/spi/spi-sun4i.txt
+++ b/Documentation/devicetree/bindings/spi/spi-sun4i.txt
@@ -12,7 +12,7 @@ Required properties:
Example:
-spi1: spi@01c06000 {
+spi1: spi@1c06000 {
compatible = "allwinner,sun4i-a10-spi";
reg = <0x01c06000 0x1000>;
interrupts = <11>;
diff --git a/Documentation/devicetree/bindings/spi/spi-sun6i.txt b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
index ab1811354cce..435a8e0731ac 100644
--- a/Documentation/devicetree/bindings/spi/spi-sun6i.txt
+++ b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
@@ -19,7 +19,7 @@ Optional properties:
Example:
-spi1: spi@01c69000 {
+spi1: spi@1c69000 {
compatible = "allwinner,sun6i-a31-spi";
reg = <0x01c69000 0x1000>;
interrupts = <0 66 4>;
@@ -28,7 +28,7 @@ spi1: spi@01c69000 {
resets = <&ahb1_rst 21>;
};
-spi0: spi@01c68000 {
+spi0: spi@1c68000 {
compatible = "allwinner,sun8i-h3-spi";
reg = <0x01c68000 0x1000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/sram/samsung-sram.txt b/Documentation/devicetree/bindings/sram/samsung-sram.txt
index 6bc474b2b885..61a9bbed303d 100644
--- a/Documentation/devicetree/bindings/sram/samsung-sram.txt
+++ b/Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -19,7 +19,7 @@ found in Documentation/devicetree/bindings/sram/sram.txt
Example:
- sysram@02020000 {
+ sysram@2020000 {
compatible = "mmio-sram";
reg = <0x02020000 0x54000>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/sram/sunxi-sram.txt b/Documentation/devicetree/bindings/sram/sunxi-sram.txt
index 6bb92a1df753..d087f04a4d7f 100644
--- a/Documentation/devicetree/bindings/sram/sunxi-sram.txt
+++ b/Documentation/devicetree/bindings/sram/sunxi-sram.txt
@@ -47,7 +47,7 @@ This valid values for this argument are:
Example
-------
-sram-controller@01c00000 {
+sram-controller@1c00000 {
compatible = "allwinner,sun4i-a10-sram-controller";
reg = <0x01c00000 0x30>;
#address-cells = <1>;
@@ -68,7 +68,7 @@ sram-controller@01c00000 {
};
};
-emac: ethernet@01c0b000 {
+emac: ethernet@1c0b000 {
compatible = "allwinner,sun4i-a10-emac";
...
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
index 8d6e4fd2468e..2c5c1be78360 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun5i-a13-hstimer.txt
@@ -14,7 +14,7 @@ Optional properties:
Example:
-timer@01c60000 {
+timer@1c60000 {
compatible = "allwinner,sun7i-a20-hstimer";
reg = <0x01c60000 0x1000>;
interrupts = <0 51 1>,
diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt
index af284fbd4d23..27dce08edd73 100644
--- a/Documentation/devicetree/bindings/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/trivial-devices.txt
@@ -36,11 +36,9 @@ atmel,at97sc3204t i2c trusted platform module (TPM)
capella,cm32181 CM32181: Ambient Light Sensor
capella,cm3232 CM3232: Ambient Light Sensor
cirrus,cs42l51 Cirrus Logic CS42L51 audio codec
-dallas,ds1307 64 x 8, Serial, I2C Real-Time Clock
-dallas,ds1338 I2C RTC with 56-Byte NV RAM
-dallas,ds1340 I2C RTC with Trickle Charger
dallas,ds1374 I2C, 32-Bit Binary Counter Watchdog RTC with Trickle Charger and Reset Input/Output
dallas,ds1631 High-Precision Digital Thermometer
+dallas,ds1672 Dallas DS1672 Real-time Clock
dallas,ds1682 Total-Elapsed-Time Recorder with Alarm
dallas,ds1775 Tiny Digital Thermometer and Thermostat
dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
@@ -54,8 +52,8 @@ dlg,da9063 DA9063: system PMIC for quad-core application processors
domintech,dmard09 DMARD09: 3-axis Accelerometer
domintech,dmard10 DMARD10: 3-axis Accelerometer
epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
-epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
+emmicro,em3027 EM Microelectronic EM3027 Real-time Clock
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
fsl,mma7660 MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
@@ -67,10 +65,13 @@ gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire In
infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz)
isil,isl1208 Intersil ISL1208 Low Power RTC with Battery Backed SRAM
+isil,isl1218 Intersil ISL1218 Low Power RTC with Battery Backed SRAM
+isil,isl12022 Intersil ISL12022 Real-time Clock
isil,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor
isil,isl29030 Intersil ISL29030 Ambient Light and Proximity Sensor
maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
+maxim,max6621 PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion
maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
mc,rv3029c2 Real Time Clock Module with I2C-Bus
mcube,mc3230 mCube 3-axis 8-bit digital accelerometer
@@ -155,6 +156,7 @@ nxp,pca9556 Octal SMBus and I2C registered interface
nxp,pca9557 8-bit I2C-bus and SMBus I/O port with reset
nxp,pcf2127 Real-time clock
nxp,pcf2129 Real-time clock
+nxp,pcf8523 Real-time Clock
nxp,pcf8563 Real-time clock/calendar
nxp,pcf85063 Tiny Real-Time Clock
oki,ml86v7667 OKI ML86V7667 video decoder
@@ -174,10 +176,6 @@ sii,s35390a 2-wire CMOS real-time clock
silabs,si7020 Relative Humidity and Temperature Sensors
skyworks,sky81452 Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
st,24c256 i2c serial eeprom (24cxx)
-st,m41t0 Serial real-time clock (RTC)
-st,m41t00 Serial real-time clock (RTC)
-st,m41t62 Serial real-time clock (RTC) with alarm
-st,m41t80 M41T80 - SERIAL ACCESS RTC WITH ALARMS
taos,tsl2550 Ambient Light Sensor with SMBUS/Two Wire Serial Interface
ti,ads7828 8-Channels, 12-bit ADC
ti,ads7830 8-Channels, 8-bit ADC
diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt
index cb2bd83fa89a..50abb20fe319 100644
--- a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt
+++ b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt
@@ -16,7 +16,7 @@ Required properties:
Example:
- usb_otg: usb@01c13000 {
+ usb_otg: usb@1c13000 {
compatible = "allwinner,sun4i-a10-musb";
reg = <0x01c13000 0x0400>;
clocks = <&ahb_gates 0>;
diff --git a/Documentation/devicetree/bindings/usb/am33xx-usb.txt b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
index 16920d78e1b8..7a33f22c815a 100644
--- a/Documentation/devicetree/bindings/usb/am33xx-usb.txt
+++ b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
@@ -181,7 +181,7 @@ usb: usb@47400000 {
"tx14", "tx15";
};
- cppi41dma: dma-controller@07402000 {
+ cppi41dma: dma-controller@7402000 {
compatible = "ti,am3359-cppi41";
reg = <0x47400000 0x1000
0x47402000 0x1000
diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt
index ad8ea56a9ed3..44e80153b148 100644
--- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
+++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
@@ -18,7 +18,7 @@ Required properties:
- atmel,oc-gpio: If present, specifies a gpio that needs to be
activated for the overcurrent detection.
-usb0: ohci@00500000 {
+usb0: ohci@500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x100000>;
clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
@@ -39,7 +39,7 @@ Required properties:
"ehci_clk" for the peripheral clock
"usb_clk" for the UTMI clock
-usb1: ehci@00800000 {
+usb1: ehci@800000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00800000 0x100000>;
interrupts = <22 4>;
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index fcf199b64d3d..e64d903bcbe8 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -19,6 +19,8 @@ Required properties:
configured in FS mode;
- "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
configured in HS mode;
+ - "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs
+ configured in HS mode;
- reg : Should contain 1 register range (address and length)
- interrupts : Should contain 1 interrupt
- clocks: clock provider specifier
diff --git a/Documentation/devicetree/bindings/usb/maxim,max3421.txt b/Documentation/devicetree/bindings/usb/maxim,max3421.txt
new file mode 100644
index 000000000000..8cdbe0c85188
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/maxim,max3421.txt
@@ -0,0 +1,26 @@
+Maxim Integrated SPI-based USB 2.0 host controller MAX3421E
+
+Required properties:
+ - compatible: Should be "maxim,max3421"
+ - spi-max-frequency: maximum frequency for this device must not exceed 26 MHz.
+ - reg: chip select number to which this device is connected.
+ - maxim,vbus-en-pin: <GPOUTx ACTIVE_LEVEL>
+ GPOUTx is the number (1-8) of the GPOUT pin of MAX3421E to drive Vbus.
+ ACTIVE_LEVEL is 0 or 1.
+ - interrupts: the interrupt line description for the interrupt controller.
+ The driver configures MAX3421E for active low level triggered interrupts,
+ configure your interrupt line accordingly.
+
+Optional property:
+ - interrupt-parent: the phandle to the associated interrupt controller.
+
+Example:
+
+ usb@0 {
+ compatible = "maxim,max3421";
+ reg = <0>;
+ maxim,vbus-en-pin = <3 1>;
+ spi-max-frequency = <26000000>;
+ interrupt-parent = <&PIC>;
+ interrupts = <42>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
index 5611a2e4ddf0..30595964876a 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
@@ -26,10 +26,11 @@ Required properties:
- clocks : a list of phandle + clock-specifier pairs, one for each
entry in clock-names
- clock-names : must contain
- "sys_ck": for clock of xHCI MAC
- "ref_ck": for reference clock of xHCI MAC
- "wakeup_deb_p0": for USB wakeup debounce clock of port0
- "wakeup_deb_p1": for USB wakeup debounce clock of port1
+ "sys_ck": controller clock used by normal mode,
+ the following ones are optional:
+ "ref_ck": reference clock used by low power mode etc,
+ "mcu_ck": mcu_bus clock for register access,
+ "dma_ck": dma_bus clock for data transfer by DMA
- phys : a list of phandle + phy specifier pairs
@@ -38,6 +39,8 @@ Optional properties:
mode;
- mediatek,syscon-wakeup : phandle to syscon used to access USB wakeup
control register, it depends on "mediatek,wakeup-src".
+ - mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
+ bit1 for u3port1, ... etc;
- vbus-supply : reference to the VBUS regulator;
- usb3-lpm-capable : supports USB3.0 LPM
- pinctrl-names : a pinctrl state named "default" must be defined
@@ -55,9 +58,7 @@ usb30: usb@11270000 {
clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>,
<&pericfg CLK_PERI_USB0>,
<&pericfg CLK_PERI_USB1>;
- clock-names = "sys_ck", "ref_ck",
- "wakeup_deb_p0",
- "wakeup_deb_p1";
+ clock-names = "sys_ck", "ref_ck";
phys = <&phy_port0 PHY_TYPE_USB3>,
<&phy_port1 PHY_TYPE_USB2>;
vusb33-supply = <&mt6397_vusb_reg>;
@@ -89,9 +90,8 @@ Required properties:
- clocks : a list of phandle + clock-specifier pairs, one for each
entry in clock-names
- - clock-names : must be
- "sys_ck": for clock of xHCI MAC
- "ref_ck": for reference clock of xHCI MAC
+ - clock-names : must contain "sys_ck", and the following ones are optional:
+ "ref_ck", "mcu_ck" and "dma_ck"
Optional properties:
- vbus-supply : reference to the VBUS regulator;
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
index 49f54767cd21..b2271d8e6b50 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
@@ -14,9 +14,9 @@ Required properties:
- vusb33-supply : regulator of USB avdd3.3v
- clocks : a list of phandle + clock-specifier pairs, one for each
entry in clock-names
- - clock-names : must contain "sys_ck" and "ref_ck" for clock of controller;
- "wakeup_deb_p0" and "wakeup_deb_p1" are optional, they are
- depends on "mediatek,enable-wakeup"
+ - clock-names : must contain "sys_ck" for clock of controller,
+ the following clocks are optional:
+ "ref_ck", "mcu_ck" and "dam_ck";
- phys : a list of phandle + phy specifier pairs
- dr_mode : should be one of "host", "peripheral" or "otg",
refer to usb/generic.txt
@@ -30,9 +30,10 @@ Optional properties:
when supports dual-role mode.
- vbus-supply : reference to the VBUS regulator, needed when supports
dual-role mode.
- - pinctl-names : a pinctrl state named "default" must be defined,
- "id_float" and "id_ground" are optinal which depends on
- "mediatek,enable-manual-drd"
+ - pinctrl-names : a pinctrl state named "default" is optional, and need be
+ defined if auto drd switch is enabled, that means the property dr_mode
+ is set as "otg", and meanwhile the property "mediatek,enable-manual-drd"
+ is not set.
- pinctrl-0 : pin control group
See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -44,6 +45,8 @@ Optional properties:
- mediatek,enable-wakeup : supports ip sleep wakeup used by host mode
- mediatek,syscon-wakeup : phandle to syscon used to access USB wakeup
control register, it depends on "mediatek,enable-wakeup".
+ - mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
+ bit1 for u3port1, ... etc;
Sub-nodes:
The xhci should be added as subnode to mtu3 as shown in the following example
@@ -63,9 +66,7 @@ ssusb: usb@11271000 {
clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>,
<&pericfg CLK_PERI_USB0>,
<&pericfg CLK_PERI_USB1>;
- clock-names = "sys_ck", "ref_ck",
- "wakeup_deb_p0",
- "wakeup_deb_p1";
+ clock-names = "sys_ck", "ref_ck";
vusb33-supply = <&mt6397_vusb_reg>;
vbus-supply = <&usb_p0_vbus>;
extcon = <&extcon_usb>;
diff --git a/Documentation/devicetree/bindings/usb/ohci-da8xx.txt b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
index 2dc8f67eda39..24a826d5015e 100644
--- a/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
+++ b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
@@ -13,7 +13,7 @@ Optional properties:
Example:
-ohci: usb@0225000 {
+ohci: usb@225000 {
compatible = "ti,da830-ohci";
reg = <0x225000 0x1000>;
interrupts = <59>;
diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
index e28025883b79..87a45e2f9b7f 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
@@ -15,6 +15,10 @@ Required properties:
- interrupts: Interrupt specifier for the USB3.0 Peripheral
- clocks: clock phandle and specifier pair
+Optional properties:
+ - phys: phandle + phy specifier pair
+ - phy-names: must be "usb"
+
Example of R-Car H3 ES1.x:
usb3_peri0: usb@ee020000 {
compatible = "renesas,r8a7795-usb3-peri",
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index 9e18e000339e..47394ab788e3 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -3,6 +3,8 @@ Renesas Electronics USBHS driver
Required properties:
- compatible: Must contain one or more of the following:
+ - "renesas,usbhs-r8a7743" for r8a7743 (RZ/G1M) compatible device
+ - "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device
- "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device
- "renesas,usbhs-r8a7791" for r8a7791 (R-Car M2-W) compatible device
- "renesas,usbhs-r8a7792" for r8a7792 (R-Car V2H) compatible device
@@ -10,7 +12,8 @@ Required properties:
- "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- - "renesas,rcar-gen2-usbhs" for R-Car Gen2 compatible device
+ - "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
+ - "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
- "renesas,rcar-gen3-usbhs" for R-Car Gen3 compatible device
When compatible with the generic version, nodes must list the
diff --git a/Documentation/devicetree/bindings/usb/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt
index a12d6012a40f..3efde12b5d68 100644
--- a/Documentation/devicetree/bindings/usb/usb-ehci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ehci.txt
@@ -30,7 +30,7 @@ Example (Sequoia 440EPx):
};
Example (Allwinner sun4i A10 SoC):
- ehci0: usb@01c14000 {
+ ehci0: usb@1c14000 {
compatible = "allwinner,sun4i-a10-ehci", "generic-ehci";
reg = <0x01c14000 0x100>;
interrupts = <39>;
diff --git a/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt b/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt
index 5be01c859b7a..4dc6a8ee3071 100644
--- a/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt
+++ b/Documentation/devicetree/bindings/usb/usb-nop-xceiv.txt
@@ -2,6 +2,7 @@ USB NOP PHY
Required properties:
- compatible: should be usb-nop-xceiv
+- #phy-cells: Must be 0
Optional properties:
- clocks: phandle to the PHY clock. Use as per Documentation/devicetree
@@ -33,6 +34,7 @@ Example:
reset-gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
vbus-detect-gpio = <&gpio2 13 GPIO_ACTIVE_HIGH>;
vbus-regulator = <&vbus_regulator>;
+ #phy-cells = <0>;
};
hsusb1_phy is a NOP USB PHY device that gets its clock from an oscillator
diff --git a/Documentation/devicetree/bindings/usb/usb-ohci.txt b/Documentation/devicetree/bindings/usb/usb-ohci.txt
index e8766b08c93b..09e70c875bc6 100644
--- a/Documentation/devicetree/bindings/usb/usb-ohci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ohci.txt
@@ -19,7 +19,7 @@ Optional properties:
Example:
- ohci0: usb@01c14400 {
+ ohci0: usb@1c14400 {
compatible = "allwinner,sun4i-a10-ohci", "generic-ohci";
reg = <0x01c14400 0x100>;
interrupts = <64>;
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index 2d80b60eeabe..ae6e484a8d7c 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -26,6 +26,7 @@ Required properties:
Optional properties:
- clocks: reference to a clock
+ - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
- usb3-lpm-capable: determines if platform is USB3 LPM capable
- quirk-broken-port-ped: set if the controller has broken port disable mechanism
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index 3957d4edaa74..168ff819e827 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -1,16 +1,17 @@
Microchip USB 2.0 Hi-Speed Hub Controller
-The device node for the configuration of a Microchip USB251xB/xBi USB 2.0
+The device node for the configuration of a Microchip USB251x/xBi USB 2.0
Hi-Speed Controller.
Required properties :
- compatible : Should be "microchip,usb251xb" or one of the specific types:
"microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
- "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi"
- - reset-gpios : Should specify the gpio for hub reset
+ "microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi",
+ "microchip,usb2517", "microchip,usb2517i"
- reg : I2C address on the selected bus (default is <0x2C>)
Optional properties :
+ - reset-gpios : Should specify the gpio for hub reset
- skip-config : Skip Hub configuration, but only send the USB-Attach command
- vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
- product-id : Set USB Product ID of the hub (16 bit, default depends on type)
@@ -19,29 +20,47 @@ Optional properties :
- manufacturer : Set USB Manufacturer string (max 31 characters long)
- product : Set USB Product string (max 31 characters long)
- serial : Set USB Serial string (max 31 characters long)
- - {bus,self}-powered : selects between self- and bus-powered operation (default
- is self-powered)
- - disable-hi-speed : disable USB Hi-Speed support
+ - {bus,self}-powered : selects between self- and bus-powered operation
+ (boolean, default is self-powered)
+ - disable-hi-speed : disable USB Hi-Speed support (boolean)
- {multi,single}-tt : selects between multi- and single-transaction-translator
- (default is multi-tt)
- - disable-eop : disable End of Packet generation in full-speed mode
+ (boolean, default is multi-tt)
+ - disable-eop : disable End of Packet generation in full-speed mode (boolean)
- {ganged,individual}-sensing : select over-current sense type in self-powered
- mode (default is individual)
+ mode (boolean, default is individual)
- {ganged,individual}-port-switching : select port power switching mode
- (default is individual)
+ (boolean, default is individual)
- dynamic-power-switching : enable auto-switching from self- to bus-powered
- operation if the local power source is removed or unavailable
+ operation if the local power source is removed or unavailable (boolean)
- oc-delay-us : Delay time (in microseconds) for filtering the over-current
sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If
an invalid value is given, the default is used instead.
- - compound-device : indicate the hub is part of a compound device
- - port-mapping-mode : enable port mapping mode
+ - compound-device : indicate the hub is part of a compound device (boolean)
+ - port-mapping-mode : enable port mapping mode (boolean)
+ - led-{usb,speed}-mode : led usb/speed indication mode selection
+ (boolean, default is speed mode)
- string-support : enable string descriptor support (required for manufacturer,
product and serial string configuration)
- non-removable-ports : Should specify the ports which have a non-removable
device connected.
- sp-disabled-ports : Specifies the ports which will be self-power disabled
- bp-disabled-ports : Specifies the ports which will be bus-power disabled
+ - sp-max-total-current-microamp: Specifies max current consumed by the hub
+ from VBUS when operating in self-powered hub. It includes the hub
+ silicon along with all associated circuitry including a permanently
+ attached peripheral (range: 0 - 100000 uA, default 1000 uA)
+ - bp-max-total-current-microamp: Specifies max current consumed by the hub
+ from VBUS when operating in self-powered hub. It includes the hub
+ silicon along with all associated circuitry including a permanently
+ attached peripheral (range: 0 - 510000 uA, default 100000 uA)
+ - sp-max-removable-current-microamp: Specifies max current consumed by the hub
+ from VBUS when operating in self-powered hub. It includes the hub
+ silicon along with all associated circuitry excluding a permanently
+ attached peripheral (range: 0 - 100000 uA, default 1000 uA)
+ - bp-max-removable-current-microamp: Specifies max current consumed by the hub
+ from VBUS when operating in self-powered hub. It includes the hub
+ silicon along with all associated circuitry excluding a permanently
+ attached peripheral (range: 0 - 510000 uA, default 100000 uA)
- power-on-time-ms : Specifies the time it takes from the time the host
initiates the power-on sequence to a port until the port has adequate
power. The value is given in ms in a 0 - 510 range (default is 100ms).
@@ -56,7 +75,6 @@ Examples:
usb2514b@2c {
compatible = "microchip,usb2514b";
reg = <0x2c>;
- reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
vendor-id = /bits/ 16 <0x0000>;
product-id = /bits/ 16 <0x0000>;
string-support;
diff --git a/Documentation/devicetree/bindings/usb/usb3503.txt b/Documentation/devicetree/bindings/usb/usb3503.txt
index c1a0a9191d26..057dd384d473 100644
--- a/Documentation/devicetree/bindings/usb/usb3503.txt
+++ b/Documentation/devicetree/bindings/usb/usb3503.txt
@@ -26,7 +26,7 @@ Optional properties:
clock frequencies table is used)
Examples:
- usb3503@08 {
+ usb3503@8 {
compatible = "smsc,usb3503";
reg = <0x08>;
connect-gpios = <&gpx3 0 1>;
diff --git a/Documentation/devicetree/bindings/usb/usbmisc-imx.txt b/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
index f1e27faf528e..a85a631ec434 100644
--- a/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
+++ b/Documentation/devicetree/bindings/usb/usbmisc-imx.txt
@@ -10,7 +10,7 @@ Required properties:
- reg: Should contain registers location and length
Examples:
-usbmisc@02184800 {
+usbmisc@2184800 {
#index-cells = <1>;
compatible = "fsl,imx6q-usbmisc";
reg = <0x02184800 0x200>;
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 1afd298eddd7..1db9dbef3e56 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -83,6 +83,7 @@ davicom DAVICOM Semiconductor, Inc.
delta Delta Electronics, Inc.
denx Denx Software Engineering
devantech Devantech, Ltd.
+dh DH electronics GmbH
digi Digi International Inc.
digilent Diglent, Inc.
dioo Dioo Microcircuit Co., Ltd
@@ -137,6 +138,7 @@ gw Gateworks Corporation
hannstar HannStar Display Corporation
haoyu Haoyu Microelectronic Co. Ltd.
hardkernel Hardkernel Co., Ltd
+hideep HiDeep Inc.
himax Himax Technologies, Inc.
hisilicon Hisilicon Limited.
hit Hitachi Ltd.
@@ -229,12 +231,14 @@ netlogic Broadcom Corporation (formerly NetLogic Microsystems)
netron-dy Netron DY
netxeon Shenzhen Netxeon Technology CO., LTD
nexbox Nexbox
+nextthing Next Thing Co.
newhaven Newhaven Display International
ni National Instruments
nintendo Nintendo
nlt NLT Technologies, Ltd.
nokia Nokia
nordic Nordic Semiconductor
+nutsboard NutsBoard
nuvoton Nuvoton Technology Corporation
nvd New Vision Display
nvidia NVIDIA
@@ -245,9 +249,12 @@ olimex OLIMEX Ltd.
onion Onion Corporation
onnn ON Semiconductor Corp.
ontat On Tat Industrial Company
+opalkelly Opal Kelly Incorporated
opencores OpenCores.org
+openrisc OpenRISC.io
option Option NV
ORCL Oracle Corporation
+orisetech Orise Technology
ortustech Ortus Technology Co., Ltd.
ovti OmniVision Technologies
oxsemi Oxford Semiconductor, Ltd.
@@ -296,6 +303,7 @@ sensirion Sensirion AG
sff Small Form Factor Committee
sgx SGX Sensortech
sharp Sharp Corporation
+shimafuji Shimafuji Electric, Inc.
si-en Si-En Technology Ltd.
sigma Sigma Designs, Inc.
sii Seiko Instruments, Inc.
@@ -317,6 +325,7 @@ solomon Solomon Systech Limited
sony Sony Corporation
spansion Spansion Inc.
sprd Spreadtrum Communications Inc.
+sst Silicon Storage Technology, Inc.
st STMicroelectronics
starry Starry Electronic Technology (ShenZhen) Co., LTD
startek Startek
@@ -338,6 +347,7 @@ thine THine Electronics, Inc.
ti Texas Instruments
tianma Tianma Micro-electronics Co., Ltd.
tlm Trusted Logic Mobility
+tmt Tecon Microprocessor Technologies, LLC.
topeet Topeet
toradex Toradex AG
toshiba Toshiba Corporation
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index 235de0683bb6..5b38a30e608c 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -13,7 +13,7 @@ Required properties:
Example:
-wdt: watchdog@010000000 {
+wdt: watchdog@10000000 {
compatible = "mediatek,mt6589-wdt";
reg = <0x10000000 0x18>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt b/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
index b8f75c51453a..62dd5baad70e 100644
--- a/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/sunxi-wdt.txt
@@ -8,7 +8,7 @@ Required properties:
Example:
-wdt: watchdog@01c20c90 {
+wdt: watchdog@1c20c90 {
compatible = "allwinner,sun4i-a10-wdt";
reg = <0x01c20c90 0x10>;
};
diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt
index eb7f2685fda1..c4aa0adf13ec 100644
--- a/Documentation/devicetree/overlay-notes.txt
+++ b/Documentation/devicetree/overlay-notes.txt
@@ -87,15 +87,15 @@ Overlay in-kernel API
The API is quite easy to use.
-1. Call of_overlay_create() to create and apply an overlay. The return value
-is a cookie identifying this overlay.
+1. Call of_overlay_apply() to create and apply an overlay changeset. The return
+value is an error or a cookie identifying this overlay.
-2. Call of_overlay_destroy() to remove and cleanup the overlay previously
-created via the call to of_overlay_create(). Removal of an overlay that
-is stacked by another will not be permitted.
+2. Call of_overlay_remove() to remove and cleanup the overlay changeset
+previously created via the call to of_overlay_apply(). Removal of an overlay
+changeset that is stacked by another will not be permitted.
Finally, if you need to remove all overlays in one-go, just call
-of_overlay_destroy_all() which will remove every single one in the correct
+of_overlay_remove_all() which will remove every single one in the correct
order.
Overlay DTS Format
diff --git a/Documentation/dmaengine/00-INDEX b/Documentation/dmaengine/00-INDEX
deleted file mode 100644
index 07de6573d22b..000000000000
--- a/Documentation/dmaengine/00-INDEX
+++ /dev/null
@@ -1,8 +0,0 @@
-00-INDEX
- - this file.
-client.txt
- -the DMA Engine API Guide.
-dmatest.txt
- - how to compile, configure and use the dmatest system.
-provider.txt
- - the DMA controller API. \ No newline at end of file
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt
deleted file mode 100644
index c72b4563de10..000000000000
--- a/Documentation/dmaengine/client.txt
+++ /dev/null
@@ -1,222 +0,0 @@
- DMA Engine API Guide
- ====================
-
- Vinod Koul <vinod dot koul at intel.com>
-
-NOTE: For DMA Engine usage in async_tx please see:
- Documentation/crypto/async-tx-api.txt
-
-
-Below is a guide to device driver writers on how to use the Slave-DMA API of the
-DMA Engine. This is applicable only for slave DMA usage only.
-
-The slave DMA usage consists of following steps:
-1. Allocate a DMA slave channel
-2. Set slave and controller specific parameters
-3. Get a descriptor for transaction
-4. Submit the transaction
-5. Issue pending requests and wait for callback notification
-
-1. Allocate a DMA slave channel
-
- Channel allocation is slightly different in the slave DMA context,
- client drivers typically need a channel from a particular DMA
- controller only and even in some cases a specific channel is desired.
- To request a channel dma_request_chan() API is used.
-
- Interface:
- struct dma_chan *dma_request_chan(struct device *dev, const char *name);
-
- Which will find and return the 'name' DMA channel associated with the 'dev'
- device. The association is done via DT, ACPI or board file based
- dma_slave_map matching table.
-
- A channel allocated via this interface is exclusive to the caller,
- until dma_release_channel() is called.
-
-2. Set slave and controller specific parameters
-
- Next step is always to pass some specific information to the DMA
- driver. Most of the generic information which a slave DMA can use
- is in struct dma_slave_config. This allows the clients to specify
- DMA direction, DMA addresses, bus widths, DMA burst lengths etc
- for the peripheral.
-
- If some DMA controllers have more parameters to be sent then they
- should try to embed struct dma_slave_config in their controller
- specific structure. That gives flexibility to client to pass more
- parameters, if required.
-
- Interface:
- int dmaengine_slave_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-
- Please see the dma_slave_config structure definition in dmaengine.h
- for a detailed explanation of the struct members. Please note
- that the 'direction' member will be going away as it duplicates the
- direction given in the prepare call.
-
-3. Get a descriptor for transaction
-
- For slave usage the various modes of slave transfers supported by the
- DMA-engine are:
-
- slave_sg - DMA a list of scatter gather buffers from/to a peripheral
- dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the
- operation is explicitly stopped.
- interleaved_dma - This is common to Slave as well as M2M clients. For slave
- address of devices' fifo could be already known to the driver.
- Various types of operations could be expressed by setting
- appropriate values to the 'dma_interleaved_template' members.
-
- A non-NULL return of this transfer API represents a "descriptor" for
- the given transaction.
-
- Interface:
- struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
- unsigned long flags);
-
- struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction);
-
- struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
- struct dma_chan *chan, struct dma_interleaved_template *xt,
- unsigned long flags);
-
- The peripheral driver is expected to have mapped the scatterlist for
- the DMA operation prior to calling dmaengine_prep_slave_sg(), and must
- keep the scatterlist mapped until the DMA operation has completed.
- The scatterlist must be mapped using the DMA struct device.
- If a mapping needs to be synchronized later, dma_sync_*_for_*() must be
- called using the DMA struct device, too.
- So, normal setup should look like this:
-
- nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
- if (nr_sg == 0)
- /* error */
-
- desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
-
- Once a descriptor has been obtained, the callback information can be
- added and the descriptor must then be submitted. Some DMA engine
- drivers may hold a spinlock between a successful preparation and
- submission so it is important that these two operations are closely
- paired.
-
- Note:
- Although the async_tx API specifies that completion callback
- routines cannot submit any new operations, this is not the
- case for slave/cyclic DMA.
-
- For slave DMA, the subsequent transaction may not be available
- for submission prior to callback function being invoked, so
- slave DMA callbacks are permitted to prepare and submit a new
- transaction.
-
- For cyclic DMA, a callback function may wish to terminate the
- DMA via dmaengine_terminate_async().
-
- Therefore, it is important that DMA engine drivers drop any
- locks before calling the callback function which may cause a
- deadlock.
-
- Note that callbacks will always be invoked from the DMA
- engines tasklet, never from interrupt context.
-
-4. Submit the transaction
-
- Once the descriptor has been prepared and the callback information
- added, it must be placed on the DMA engine drivers pending queue.
-
- Interface:
- dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
-
- This returns a cookie can be used to check the progress of DMA engine
- activity via other DMA engine calls not covered in this document.
-
- dmaengine_submit() will not start the DMA operation, it merely adds
- it to the pending queue. For this, see step 5, dma_async_issue_pending.
-
-5. Issue pending DMA requests and wait for callback notification
-
- The transactions in the pending queue can be activated by calling the
- issue_pending API. If channel is idle then the first transaction in
- queue is started and subsequent ones queued up.
-
- On completion of each DMA operation, the next in queue is started and
- a tasklet triggered. The tasklet will then call the client driver
- completion callback routine for notification, if set.
-
- Interface:
- void dma_async_issue_pending(struct dma_chan *chan);
-
-Further APIs:
-
-1. int dmaengine_terminate_sync(struct dma_chan *chan)
- int dmaengine_terminate_async(struct dma_chan *chan)
- int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */
-
- This causes all activity for the DMA channel to be stopped, and may
- discard data in the DMA FIFO which hasn't been fully transferred.
- No callback functions will be called for any incomplete transfers.
-
- Two variants of this function are available.
-
- dmaengine_terminate_async() might not wait until the DMA has been fully
- stopped or until any running complete callbacks have finished. But it is
- possible to call dmaengine_terminate_async() from atomic context or from
- within a complete callback. dmaengine_synchronize() must be called before it
- is safe to free the memory accessed by the DMA transfer or free resources
- accessed from within the complete callback.
-
- dmaengine_terminate_sync() will wait for the transfer and any running
- complete callbacks to finish before it returns. But the function must not be
- called from atomic context or from within a complete callback.
-
- dmaengine_terminate_all() is deprecated and should not be used in new code.
-
-2. int dmaengine_pause(struct dma_chan *chan)
-
- This pauses activity on the DMA channel without data loss.
-
-3. int dmaengine_resume(struct dma_chan *chan)
-
- Resume a previously paused DMA channel. It is invalid to resume a
- channel which is not currently paused.
-
-4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
- dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
-
- This can be used to check the status of the channel. Please see
- the documentation in include/linux/dmaengine.h for a more complete
- description of this API.
-
- This can be used in conjunction with dma_async_is_complete() and
- the cookie returned from dmaengine_submit() to check for
- completion of a specific DMA transaction.
-
- Note:
- Not all DMA engine drivers can return reliable information for
- a running DMA channel. It is recommended that DMA engine users
- pause or stop (via dmaengine_terminate_all()) the channel before
- using this API.
-
-5. void dmaengine_synchronize(struct dma_chan *chan)
-
- Synchronize the termination of the DMA channel to the current context.
-
- This function should be used after dmaengine_terminate_async() to synchronize
- the termination of the DMA channel to the current context. The function will
- wait for the transfer and any running complete callbacks to finish before it
- returns.
-
- If dmaengine_terminate_async() is used to stop the DMA channel this function
- must be called before it is safe to free memory accessed by previously
- submitted descriptors or to free any resources accessed within the complete
- callback of previously submitted descriptors.
-
- The behavior of this function is undefined if dma_async_issue_pending() has
- been called between dmaengine_terminate_async() and this function.
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
deleted file mode 100644
index 5dbe054a40ad..000000000000
--- a/Documentation/dmaengine/provider.txt
+++ /dev/null
@@ -1,424 +0,0 @@
-DMAengine controller documentation
-==================================
-
-Hardware Introduction
-+++++++++++++++++++++
-
-Most of the Slave DMA controllers have the same general principles of
-operations.
-
-They have a given number of channels to use for the DMA transfers, and
-a given number of requests lines.
-
-Requests and channels are pretty much orthogonal. Channels can be used
-to serve several to any requests. To simplify, channels are the
-entities that will be doing the copy, and requests what endpoints are
-involved.
-
-The request lines actually correspond to physical lines going from the
-DMA-eligible devices to the controller itself. Whenever the device
-will want to start a transfer, it will assert a DMA request (DRQ) by
-asserting that request line.
-
-A very simple DMA controller would only take into account a single
-parameter: the transfer size. At each clock cycle, it would transfer a
-byte of data from one buffer to another, until the transfer size has
-been reached.
-
-That wouldn't work well in the real world, since slave devices might
-require a specific number of bits to be transferred in a single
-cycle. For example, we may want to transfer as much data as the
-physical bus allows to maximize performances when doing a simple
-memory copy operation, but our audio device could have a narrower FIFO
-that requires data to be written exactly 16 or 24 bits at a time. This
-is why most if not all of the DMA controllers can adjust this, using a
-parameter called the transfer width.
-
-Moreover, some DMA controllers, whenever the RAM is used as a source
-or destination, can group the reads or writes in memory into a buffer,
-so instead of having a lot of small memory accesses, which is not
-really efficient, you'll get several bigger transfers. This is done
-using a parameter called the burst size, that defines how many single
-reads/writes it's allowed to do without the controller splitting the
-transfer into smaller sub-transfers.
-
-Our theoretical DMA controller would then only be able to do transfers
-that involve a single contiguous block of data. However, some of the
-transfers we usually have are not, and want to copy data from
-non-contiguous buffers to a contiguous buffer, which is called
-scatter-gather.
-
-DMAEngine, at least for mem2dev transfers, require support for
-scatter-gather. So we're left with two cases here: either we have a
-quite simple DMA controller that doesn't support it, and we'll have to
-implement it in software, or we have a more advanced DMA controller,
-that implements in hardware scatter-gather.
-
-The latter are usually programmed using a collection of chunks to
-transfer, and whenever the transfer is started, the controller will go
-over that collection, doing whatever we programmed there.
-
-This collection is usually either a table or a linked list. You will
-then push either the address of the table and its number of elements,
-or the first item of the list to one channel of the DMA controller,
-and whenever a DRQ will be asserted, it will go through the collection
-to know where to fetch the data from.
-
-Either way, the format of this collection is completely dependent on
-your hardware. Each DMA controller will require a different structure,
-but all of them will require, for every chunk, at least the source and
-destination addresses, whether it should increment these addresses or
-not and the three parameters we saw earlier: the burst size, the
-transfer width and the transfer size.
-
-The one last thing is that usually, slave devices won't issue DRQ by
-default, and you have to enable this in your slave device driver first
-whenever you're willing to use DMA.
-
-These were just the general memory-to-memory (also called mem2mem) or
-memory-to-device (mem2dev) kind of transfers. Most devices often
-support other kind of transfers or memory operations that dmaengine
-support and will be detailed later in this document.
-
-DMA Support in Linux
-++++++++++++++++++++
-
-Historically, DMA controller drivers have been implemented using the
-async TX API, to offload operations such as memory copy, XOR,
-cryptography, etc., basically any memory to memory operation.
-
-Over time, the need for memory to device transfers arose, and
-dmaengine was extended. Nowadays, the async TX API is written as a
-layer on top of dmaengine, and acts as a client. Still, dmaengine
-accommodates that API in some cases, and made some design choices to
-ensure that it stayed compatible.
-
-For more information on the Async TX API, please look the relevant
-documentation file in Documentation/crypto/async-tx-api.txt.
-
-DMAEngine Registration
-++++++++++++++++++++++
-
-struct dma_device Initialization
---------------------------------
-
-Just like any other kernel framework, the whole DMAEngine registration
-relies on the driver filling a structure and registering against the
-framework. In our case, that structure is dma_device.
-
-The first thing you need to do in your driver is to allocate this
-structure. Any of the usual memory allocators will do, but you'll also
-need to initialize a few fields in there:
-
- * channels: should be initialized as a list using the
- INIT_LIST_HEAD macro for example
-
- * src_addr_widths:
- - should contain a bitmask of the supported source transfer width
-
- * dst_addr_widths:
- - should contain a bitmask of the supported destination transfer
- width
-
- * directions:
- - should contain a bitmask of the supported slave directions
- (i.e. excluding mem2mem transfers)
-
- * residue_granularity:
- - Granularity of the transfer residue reported to dma_set_residue.
- - This can be either:
- + Descriptor
- -> Your device doesn't support any kind of residue
- reporting. The framework will only know that a particular
- transaction descriptor is done.
- + Segment
- -> Your device is able to report which chunks have been
- transferred
- + Burst
- -> Your device is able to report which burst have been
- transferred
-
- * dev: should hold the pointer to the struct device associated
- to your current driver instance.
-
-Supported transaction types
----------------------------
-
-The next thing you need is to set which transaction types your device
-(and driver) supports.
-
-Our dma_device structure has a field called cap_mask that holds the
-various types of transaction supported, and you need to modify this
-mask using the dma_cap_set function, with various flags depending on
-transaction types you support as an argument.
-
-All those capabilities are defined in the dma_transaction_type enum,
-in include/linux/dmaengine.h
-
-Currently, the types available are:
- * DMA_MEMCPY
- - The device is able to do memory to memory copies
-
- * DMA_XOR
- - The device is able to perform XOR operations on memory areas
- - Used to accelerate XOR intensive tasks, such as RAID5
-
- * DMA_XOR_VAL
- - The device is able to perform parity check using the XOR
- algorithm against a memory buffer.
-
- * DMA_PQ
- - The device is able to perform RAID6 P+Q computations, P being a
- simple XOR, and Q being a Reed-Solomon algorithm.
-
- * DMA_PQ_VAL
- - The device is able to perform parity check using RAID6 P+Q
- algorithm against a memory buffer.
-
- * DMA_INTERRUPT
- - The device is able to trigger a dummy transfer that will
- generate periodic interrupts
- - Used by the client drivers to register a callback that will be
- called on a regular basis through the DMA controller interrupt
-
- * DMA_PRIVATE
- - The devices only supports slave transfers, and as such isn't
- available for async transfers.
-
- * DMA_ASYNC_TX
- - Must not be set by the device, and will be set by the framework
- if needed
- - /* TODO: What is it about? */
-
- * DMA_SLAVE
- - The device can handle device to memory transfers, including
- scatter-gather transfers.
- - While in the mem2mem case we were having two distinct types to
- deal with a single chunk to copy or a collection of them, here,
- we just have a single transaction type that is supposed to
- handle both.
- - If you want to transfer a single contiguous memory buffer,
- simply build a scatter list with only one item.
-
- * DMA_CYCLIC
- - The device can handle cyclic transfers.
- - A cyclic transfer is a transfer where the chunk collection will
- loop over itself, with the last item pointing to the first.
- - It's usually used for audio transfers, where you want to operate
- on a single ring buffer that you will fill with your audio data.
-
- * DMA_INTERLEAVE
- - The device supports interleaved transfer.
- - These transfers can transfer data from a non-contiguous buffer
- to a non-contiguous buffer, opposed to DMA_SLAVE that can
- transfer data from a non-contiguous data set to a continuous
- destination buffer.
- - It's usually used for 2d content transfers, in which case you
- want to transfer a portion of uncompressed data directly to the
- display to print it
-
-These various types will also affect how the source and destination
-addresses change over time.
-
-Addresses pointing to RAM are typically incremented (or decremented)
-after each transfer. In case of a ring buffer, they may loop
-(DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
-are typically fixed.
-
-Device operations
------------------
-
-Our dma_device structure also requires a few function pointers in
-order to implement the actual logic, now that we described what
-operations we were able to perform.
-
-The functions that we have to fill in there, and hence have to
-implement, obviously depend on the transaction types you reported as
-supported.
-
- * device_alloc_chan_resources
- * device_free_chan_resources
- - These functions will be called whenever a driver will call
- dma_request_channel or dma_release_channel for the first/last
- time on the channel associated to that driver.
- - They are in charge of allocating/freeing all the needed
- resources in order for that channel to be useful for your
- driver.
- - These functions can sleep.
-
- * device_prep_dma_*
- - These functions are matching the capabilities you registered
- previously.
- - These functions all take the buffer or the scatterlist relevant
- for the transfer being prepared, and should create a hardware
- descriptor or a list of hardware descriptors from it
- - These functions can be called from an interrupt context
- - Any allocation you might do should be using the GFP_NOWAIT
- flag, in order not to potentially sleep, but without depleting
- the emergency pool either.
- - Drivers should try to pre-allocate any memory they might need
- during the transfer setup at probe time to avoid putting to
- much pressure on the nowait allocator.
-
- - It should return a unique instance of the
- dma_async_tx_descriptor structure, that further represents this
- particular transfer.
-
- - This structure can be initialized using the function
- dma_async_tx_descriptor_init.
- - You'll also need to set two fields in this structure:
- + flags:
- TODO: Can it be modified by the driver itself, or
- should it be always the flags passed in the arguments
-
- + tx_submit: A pointer to a function you have to implement,
- that is supposed to push the current
- transaction descriptor to a pending queue, waiting
- for issue_pending to be called.
- - In this structure the function pointer callback_result can be
- initialized in order for the submitter to be notified that a
- transaction has completed. In the earlier code the function pointer
- callback has been used. However it does not provide any status to the
- transaction and will be deprecated. The result structure defined as
- dmaengine_result that is passed in to callback_result has two fields:
- + result: This provides the transfer result defined by
- dmaengine_tx_result. Either success or some error
- condition.
- + residue: Provides the residue bytes of the transfer for those that
- support residue.
-
- * device_issue_pending
- - Takes the first transaction descriptor in the pending queue,
- and starts the transfer. Whenever that transfer is done, it
- should move to the next transaction in the list.
- - This function can be called in an interrupt context
-
- * device_tx_status
- - Should report the bytes left to go over on the given channel
- - Should only care about the transaction descriptor passed as
- argument, not the currently active one on a given channel
- - The tx_state argument might be NULL
- - Should use dma_set_residue to report it
- - In the case of a cyclic transfer, it should only take into
- account the current period.
- - This function can be called in an interrupt context.
-
- * device_config
- - Reconfigures the channel with the configuration given as
- argument
- - This command should NOT perform synchronously, or on any
- currently queued transfers, but only on subsequent ones
- - In this case, the function will receive a dma_slave_config
- structure pointer as an argument, that will detail which
- configuration to use.
- - Even though that structure contains a direction field, this
- field is deprecated in favor of the direction argument given to
- the prep_* functions
- - This call is mandatory for slave operations only. This should NOT be
- set or expected to be set for memcpy operations.
- If a driver support both, it should use this call for slave
- operations only and not for memcpy ones.
-
- * device_pause
- - Pauses a transfer on the channel
- - This command should operate synchronously on the channel,
- pausing right away the work of the given channel
-
- * device_resume
- - Resumes a transfer on the channel
- - This command should operate synchronously on the channel,
- resuming right away the work of the given channel
-
- * device_terminate_all
- - Aborts all the pending and ongoing transfers on the channel
- - For aborted transfers the complete callback should not be called
- - Can be called from atomic context or from within a complete
- callback of a descriptor. Must not sleep. Drivers must be able
- to handle this correctly.
- - Termination may be asynchronous. The driver does not have to
- wait until the currently active transfer has completely stopped.
- See device_synchronize.
-
- * device_synchronize
- - Must synchronize the termination of a channel to the current
- context.
- - Must make sure that memory for previously submitted
- descriptors is no longer accessed by the DMA controller.
- - Must make sure that all complete callbacks for previously
- submitted descriptors have finished running and none are
- scheduled to run.
- - May sleep.
-
-
-Misc notes (stuff that should be documented, but don't really know
-where to put them)
-------------------------------------------------------------------
- * dma_run_dependencies
- - Should be called at the end of an async TX transfer, and can be
- ignored in the slave transfers case.
- - Makes sure that dependent operations are run before marking it
- as complete.
-
- * dma_cookie_t
- - it's a DMA transaction ID that will increment over time.
- - Not really relevant any more since the introduction of virt-dma
- that abstracts it away.
-
- * DMA_CTRL_ACK
- - If clear, the descriptor cannot be reused by provider until the
- client acknowledges receipt, i.e. has has a chance to establish any
- dependency chains
- - This can be acked by invoking async_tx_ack()
- - If set, does not mean descriptor can be reused
-
- * DMA_CTRL_REUSE
- - If set, the descriptor can be reused after being completed. It should
- not be freed by provider if this flag is set.
- - The descriptor should be prepared for reuse by invoking
- dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
- - dmaengine_desc_set_reuse() will succeed only when channel support
- reusable descriptor as exhibited by capabilities
- - As a consequence, if a device driver wants to skip the dma_map_sg() and
- dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
- it can resubmit the transfer right after its completion.
- - Descriptor can be freed in few ways
- - Clearing DMA_CTRL_REUSE by invoking dmaengine_desc_clear_reuse()
- and submitting for last txn
- - Explicitly invoking dmaengine_desc_free(), this can succeed only
- when DMA_CTRL_REUSE is already set
- - Terminating the channel
-
- * DMA_PREP_CMD
- - If set, the client driver tells DMA controller that passed data in DMA
- API is command data.
- - Interpretation of command data is DMA controller specific. It can be
- used for issuing commands to other peripherals/register reads/register
- writes for which the descriptor should be in different format from
- normal data descriptors.
-
-General Design Notes
---------------------
-
-Most of the DMAEngine drivers you'll see are based on a similar design
-that handles the end of transfer interrupts in the handler, but defer
-most work to a tasklet, including the start of a new transfer whenever
-the previous transfer ended.
-
-This is a rather inefficient design though, because the inter-transfer
-latency will be not only the interrupt latency, but also the
-scheduling latency of the tasklet, which will leave the channel idle
-in between, which will slow down the global transfer rate.
-
-You should avoid this kind of practice, and instead of electing a new
-transfer in your tasklet, move that part to the interrupt handler in
-order to have a shorter idle window (that we can't really avoid
-anyway).
-
-Glossary
---------
-
-Burst: A number of consecutive read or write operations
- that can be queued to buffers before being flushed to
- memory.
-Chunk: A contiguous collection of bursts
-Transfer: A collection of chunks (be it contiguous or not)
diff --git a/Documentation/dmaengine/pxa_dma.txt b/Documentation/dmaengine/pxa_dma.txt
deleted file mode 100644
index 0736d44b5438..000000000000
--- a/Documentation/dmaengine/pxa_dma.txt
+++ /dev/null
@@ -1,153 +0,0 @@
-PXA/MMP - DMA Slave controller
-==============================
-
-Constraints
------------
- a) Transfers hot queuing
- A driver submitting a transfer and issuing it should be granted the transfer
- is queued even on a running DMA channel.
- This implies that the queuing doesn't wait for the previous transfer end,
- and that the descriptor chaining is not only done in the irq/tasklet code
- triggered by the end of the transfer.
- A transfer which is submitted and issued on a phy doesn't wait for a phy to
- stop and restart, but is submitted on a "running channel". The other
- drivers, especially mmp_pdma waited for the phy to stop before relaunching
- a new transfer.
-
- b) All transfers having asked for confirmation should be signaled
- Any issued transfer with DMA_PREP_INTERRUPT should trigger a callback call.
- This implies that even if an irq/tasklet is triggered by end of tx1, but
- at the time of irq/dma tx2 is already finished, tx1->complete() and
- tx2->complete() should be called.
-
- c) Channel running state
- A driver should be able to query if a channel is running or not. For the
- multimedia case, such as video capture, if a transfer is submitted and then
- a check of the DMA channel reports a "stopped channel", the transfer should
- not be issued until the next "start of frame interrupt", hence the need to
- know if a channel is in running or stopped state.
-
- d) Bandwidth guarantee
- The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
- The high priorities get twice as much bandwidth as the normal, which get twice
- as much as the low priorities.
- A driver should be able to request a priority, especially the real-time
- ones such as pxa_camera with (big) throughputs.
-
-Design
-------
- a) Virtual channels
- Same concept as in sa11x0 driver, ie. a driver was assigned a "virtual
- channel" linked to the requestor line, and the physical DMA channel is
- assigned on the fly when the transfer is issued.
-
- b) Transfer anatomy for a scatter-gather transfer
- +------------+-----+---------------+----------------+-----------------+
- | desc-sg[0] | ... | desc-sg[last] | status updater | finisher/linker |
- +------------+-----+---------------+----------------+-----------------+
-
- This structure is pointed by dma->sg_cpu.
- The descriptors are used as follows :
- - desc-sg[i]: i-th descriptor, transferring the i-th sg
- element to the video buffer scatter gather
- - status updater
- Transfers a single u32 to a well known dma coherent memory to leave
- a trace that this transfer is done. The "well known" is unique per
- physical channel, meaning that a read of this value will tell which
- is the last finished transfer at that point in time.
- - finisher: has ddadr=DADDR_STOP, dcmd=ENDIRQEN
- - linker: has ddadr= desc-sg[0] of next transfer, dcmd=0
-
- c) Transfers hot-chaining
- Suppose the running chain is :
- Buffer 1 Buffer 2
- +---------+----+---+ +----+----+----+---+
- | d0 | .. | dN | l | | d0 | .. | dN | f |
- +---------+----+-|-+ ^----+----+----+---+
- | |
- +----+
-
- After a call to dmaengine_submit(b3), the chain will look like :
- Buffer 1 Buffer 2 Buffer 3
- +---------+----+---+ +----+----+----+---+ +----+----+----+---+
- | d0 | .. | dN | l | | d0 | .. | dN | l | | d0 | .. | dN | f |
- +---------+----+-|-+ ^----+----+----+-|-+ ^----+----+----+---+
- | | | |
- +----+ +----+
- new_link
-
- If while new_link was created the DMA channel stopped, it is _not_
- restarted. Hot-chaining doesn't break the assumption that
- dma_async_issue_pending() is to be used to ensure the transfer is actually started.
-
- One exception to this rule :
- - if Buffer1 and Buffer2 had all their addresses 8 bytes aligned
- - and if Buffer3 has at least one address not 4 bytes aligned
- - then hot-chaining cannot happen, as the channel must be stopped, the
- "align bit" must be set, and the channel restarted As a consequence,
- such a transfer tx_submit() will be queued on the submitted queue, and
- this specific case if the DMA is already running in aligned mode.
-
- d) Transfers completion updater
- Each time a transfer is completed on a channel, an interrupt might be
- generated or not, up to the client's request. But in each case, the last
- descriptor of a transfer, the "status updater", will write the latest
- transfer being completed into the physical channel's completion mark.
-
- This will speed up residue calculation, for large transfers such as video
- buffers which hold around 6k descriptors or more. This also allows without
- any lock to find out what is the latest completed transfer in a running
- DMA chain.
-
- e) Transfers completion, irq and tasklet
- When a transfer flagged as "DMA_PREP_INTERRUPT" is finished, the dma irq
- is raised. Upon this interrupt, a tasklet is scheduled for the physical
- channel.
- The tasklet is responsible for :
- - reading the physical channel last updater mark
- - calling all the transfer callbacks of finished transfers, based on
- that mark, and each transfer flags.
- If a transfer is completed while this handling is done, a dma irq will
- be raised, and the tasklet will be scheduled once again, having a new
- updater mark.
-
- f) Residue
- Residue granularity will be descriptor based. The issued but not completed
- transfers will be scanned for all of their descriptors against the
- currently running descriptor.
-
- g) Most complicated case of driver's tx queues
- The most tricky situation is when :
- - there are not "acked" transfers (tx0)
- - a driver submitted an aligned tx1, not chained
- - a driver submitted an aligned tx2 => tx2 is cold chained to tx1
- - a driver issued tx1+tx2 => channel is running in aligned mode
- - a driver submitted an aligned tx3 => tx3 is hot-chained
- - a driver submitted an unaligned tx4 => tx4 is put in submitted queue,
- not chained
- - a driver issued tx4 => tx4 is put in issued queue, not chained
- - a driver submitted an aligned tx5 => tx5 is put in submitted queue, not
- chained
- - a driver submitted an aligned tx6 => tx6 is put in submitted queue,
- cold chained to tx5
-
- This translates into (after tx4 is issued) :
- - issued queue
- +-----+ +-----+ +-----+ +-----+
- | tx1 | | tx2 | | tx3 | | tx4 |
- +---|-+ ^---|-+ ^-----+ +-----+
- | | | |
- +---+ +---+
- - submitted queue
- +-----+ +-----+
- | tx5 | | tx6 |
- +---|-+ ^-----+
- | |
- +---+
- - completed queue : empty
- - allocated queue : tx0
-
- It should be noted that after tx3 is completed, the channel is stopped, and
- restarted in "unaligned mode" to handle tx4.
-
-Author: Robert Jarzmik <robert.jarzmik@free.fr>
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index b24854b5d6be..0268335414ce 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -65,7 +65,7 @@ Without options, the kernel-doc directive includes all documentation comments
from the source file.
The kernel-doc extension is included in the kernel source tree, at
-``Documentation/sphinx/kernel-doc.py``. Internally, it uses the
+``Documentation/sphinx/kerneldoc.py``. Internally, it uses the
``scripts/kernel-doc`` script to extract the documentation comments from the
source.
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
new file mode 100644
index 000000000000..6245c99af8c1
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -0,0 +1,275 @@
+====================
+DMA Engine API Guide
+====================
+
+Vinod Koul <vinod dot koul at intel.com>
+
+.. note:: For DMA Engine usage in async_tx please see:
+ ``Documentation/crypto/async-tx-api.txt``
+
+
+Below is a guide to device driver writers on how to use the Slave-DMA API of the
+DMA Engine. This is applicable only for slave DMA usage only.
+
+DMA usage
+=========
+
+The slave DMA usage consists of following steps:
+
+- Allocate a DMA slave channel
+
+- Set slave and controller specific parameters
+
+- Get a descriptor for transaction
+
+- Submit the transaction
+
+- Issue pending requests and wait for callback notification
+
+The details of these operations are:
+
+1. Allocate a DMA slave channel
+
+ Channel allocation is slightly different in the slave DMA context,
+ client drivers typically need a channel from a particular DMA
+ controller only and even in some cases a specific channel is desired.
+ To request a channel dma_request_chan() API is used.
+
+ Interface:
+
+ .. code-block:: c
+
+ struct dma_chan *dma_request_chan(struct device *dev, const char *name);
+
+ Which will find and return the ``name`` DMA channel associated with the 'dev'
+ device. The association is done via DT, ACPI or board file based
+ dma_slave_map matching table.
+
+ A channel allocated via this interface is exclusive to the caller,
+ until dma_release_channel() is called.
+
+2. Set slave and controller specific parameters
+
+ Next step is always to pass some specific information to the DMA
+ driver. Most of the generic information which a slave DMA can use
+ is in struct dma_slave_config. This allows the clients to specify
+ DMA direction, DMA addresses, bus widths, DMA burst lengths etc
+ for the peripheral.
+
+ If some DMA controllers have more parameters to be sent then they
+ should try to embed struct dma_slave_config in their controller
+ specific structure. That gives flexibility to client to pass more
+ parameters, if required.
+
+ Interface:
+
+ .. code-block:: c
+
+ int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+
+ Please see the dma_slave_config structure definition in dmaengine.h
+ for a detailed explanation of the struct members. Please note
+ that the 'direction' member will be going away as it duplicates the
+ direction given in the prepare call.
+
+3. Get a descriptor for transaction
+
+ For slave usage the various modes of slave transfers supported by the
+ DMA-engine are:
+
+ - slave_sg: DMA a list of scatter gather buffers from/to a peripheral
+
+ - dma_cyclic: Perform a cyclic DMA operation from/to a peripheral till the
+ operation is explicitly stopped.
+
+ - interleaved_dma: This is common to Slave as well as M2M clients. For slave
+ address of devices' fifo could be already known to the driver.
+ Various types of operations could be expressed by setting
+ appropriate values to the 'dma_interleaved_template' members.
+
+ A non-NULL return of this transfer API represents a "descriptor" for
+ the given transaction.
+
+ Interface:
+
+ .. code-block:: c
+
+ struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags);
+
+ struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_data_direction direction);
+
+ struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
+
+ The peripheral driver is expected to have mapped the scatterlist for
+ the DMA operation prior to calling dmaengine_prep_slave_sg(), and must
+ keep the scatterlist mapped until the DMA operation has completed.
+ The scatterlist must be mapped using the DMA struct device.
+ If a mapping needs to be synchronized later, dma_sync_*_for_*() must be
+ called using the DMA struct device, too.
+ So, normal setup should look like this:
+
+ .. code-block:: c
+
+ nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len);
+ if (nr_sg == 0)
+ /* error */
+
+ desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
+
+ Once a descriptor has been obtained, the callback information can be
+ added and the descriptor must then be submitted. Some DMA engine
+ drivers may hold a spinlock between a successful preparation and
+ submission so it is important that these two operations are closely
+ paired.
+
+ .. note::
+
+ Although the async_tx API specifies that completion callback
+ routines cannot submit any new operations, this is not the
+ case for slave/cyclic DMA.
+
+ For slave DMA, the subsequent transaction may not be available
+ for submission prior to callback function being invoked, so
+ slave DMA callbacks are permitted to prepare and submit a new
+ transaction.
+
+ For cyclic DMA, a callback function may wish to terminate the
+ DMA via dmaengine_terminate_async().
+
+ Therefore, it is important that DMA engine drivers drop any
+ locks before calling the callback function which may cause a
+ deadlock.
+
+ Note that callbacks will always be invoked from the DMA
+ engines tasklet, never from interrupt context.
+
+4. Submit the transaction
+
+ Once the descriptor has been prepared and the callback information
+ added, it must be placed on the DMA engine drivers pending queue.
+
+ Interface:
+
+ .. code-block:: c
+
+ dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+
+ This returns a cookie can be used to check the progress of DMA engine
+ activity via other DMA engine calls not covered in this document.
+
+ dmaengine_submit() will not start the DMA operation, it merely adds
+ it to the pending queue. For this, see step 5, dma_async_issue_pending.
+
+5. Issue pending DMA requests and wait for callback notification
+
+ The transactions in the pending queue can be activated by calling the
+ issue_pending API. If channel is idle then the first transaction in
+ queue is started and subsequent ones queued up.
+
+ On completion of each DMA operation, the next in queue is started and
+ a tasklet triggered. The tasklet will then call the client driver
+ completion callback routine for notification, if set.
+
+ Interface:
+
+ .. code-block:: c
+
+ void dma_async_issue_pending(struct dma_chan *chan);
+
+Further APIs:
+------------
+
+1. Terminate APIs
+
+ .. code-block:: c
+
+ int dmaengine_terminate_sync(struct dma_chan *chan)
+ int dmaengine_terminate_async(struct dma_chan *chan)
+ int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */
+
+ This causes all activity for the DMA channel to be stopped, and may
+ discard data in the DMA FIFO which hasn't been fully transferred.
+ No callback functions will be called for any incomplete transfers.
+
+ Two variants of this function are available.
+
+ dmaengine_terminate_async() might not wait until the DMA has been fully
+ stopped or until any running complete callbacks have finished. But it is
+ possible to call dmaengine_terminate_async() from atomic context or from
+ within a complete callback. dmaengine_synchronize() must be called before it
+ is safe to free the memory accessed by the DMA transfer or free resources
+ accessed from within the complete callback.
+
+ dmaengine_terminate_sync() will wait for the transfer and any running
+ complete callbacks to finish before it returns. But the function must not be
+ called from atomic context or from within a complete callback.
+
+ dmaengine_terminate_all() is deprecated and should not be used in new code.
+
+2. Pause API
+
+ .. code-block:: c
+
+ int dmaengine_pause(struct dma_chan *chan)
+
+ This pauses activity on the DMA channel without data loss.
+
+3. Resume API
+
+ .. code-block:: c
+
+ int dmaengine_resume(struct dma_chan *chan)
+
+ Resume a previously paused DMA channel. It is invalid to resume a
+ channel which is not currently paused.
+
+4. Check Txn complete
+
+ .. code-block:: c
+
+ enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+
+ This can be used to check the status of the channel. Please see
+ the documentation in include/linux/dmaengine.h for a more complete
+ description of this API.
+
+ This can be used in conjunction with dma_async_is_complete() and
+ the cookie returned from dmaengine_submit() to check for
+ completion of a specific DMA transaction.
+
+ .. note::
+
+ Not all DMA engine drivers can return reliable information for
+ a running DMA channel. It is recommended that DMA engine users
+ pause or stop (via dmaengine_terminate_all()) the channel before
+ using this API.
+
+5. Synchronize termination API
+
+ .. code-block:: c
+
+ void dmaengine_synchronize(struct dma_chan *chan)
+
+ Synchronize the termination of the DMA channel to the current context.
+
+ This function should be used after dmaengine_terminate_async() to synchronize
+ the termination of the DMA channel to the current context. The function will
+ wait for the transfer and any running complete callbacks to finish before it
+ returns.
+
+ If dmaengine_terminate_async() is used to stop the DMA channel this function
+ must be called before it is safe to free memory accessed by previously
+ submitted descriptors or to free any resources accessed within the complete
+ callback of previously submitted descriptors.
+
+ The behavior of this function is undefined if dma_async_issue_pending() has
+ been called between dmaengine_terminate_async() and this function.
diff --git a/Documentation/dmaengine/dmatest.txt b/Documentation/driver-api/dmaengine/dmatest.rst
index fb683c72dea8..3922c0a3f0c0 100644
--- a/Documentation/dmaengine/dmatest.txt
+++ b/Documentation/driver-api/dmaengine/dmatest.rst
@@ -1,11 +1,13 @@
- DMA Test Guide
- ==============
+==============
+DMA Test Guide
+==============
- Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Andy Shevchenko <andriy.shevchenko@linux.intel.com>
This small document introduces how to test DMA drivers using dmatest module.
- Part 1 - How to build the test module
+Part 1 - How to build the test module
+=====================================
The menuconfig contains an option that could be found by following path:
Device Drivers -> DMA Engine support -> DMA Test client
@@ -13,25 +15,31 @@ The menuconfig contains an option that could be found by following path:
In the configuration file the option called CONFIG_DMATEST. The dmatest could
be built as module or inside kernel. Let's consider those cases.
- Part 2 - When dmatest is built as a module...
+Part 2 - When dmatest is built as a module
+==========================================
-Example of usage:
- % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1
+Example of usage: ::
-...or:
- % modprobe dmatest
- % echo dma0chan0 > /sys/module/dmatest/parameters/channel
- % echo 2000 > /sys/module/dmatest/parameters/timeout
- % echo 1 > /sys/module/dmatest/parameters/iterations
- % echo 1 > /sys/module/dmatest/parameters/run
+ % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1
-...or on the kernel command line:
+...or: ::
- dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1
+ % modprobe dmatest
+ % echo dma0chan0 > /sys/module/dmatest/parameters/channel
+ % echo 2000 > /sys/module/dmatest/parameters/timeout
+ % echo 1 > /sys/module/dmatest/parameters/iterations
+ % echo 1 > /sys/module/dmatest/parameters/run
-Hint: available channel list could be extracted by running the following
-command:
- % ls -1 /sys/class/dma/
+...or on the kernel command line: ::
+
+ dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1
+
+..hint:: available channel list could be extracted by running the following
+ command:
+
+::
+
+ % ls -1 /sys/class/dma/
Once started a message like "dmatest: Started 1 threads using dma0chan0" is
emitted. After that only test failure messages are reported until the test
@@ -39,8 +47,9 @@ stops.
Note that running a new test will not stop any in progress test.
-The following command returns the state of the test.
- % cat /sys/module/dmatest/parameters/run
+The following command returns the state of the test. ::
+
+ % cat /sys/module/dmatest/parameters/run
To wait for test completion userpace can poll 'run' until it is false, or use
the wait parameter. Specifying 'wait=1' when loading the module causes module
@@ -50,15 +59,19 @@ before returning. For example, the following scripts wait for 42 tests
to complete before exiting. Note that if 'iterations' is set to 'infinite' then
waiting is disabled.
-Example:
- % modprobe dmatest run=1 iterations=42 wait=1
- % modprobe -r dmatest
-...or:
- % modprobe dmatest run=1 iterations=42
- % cat /sys/module/dmatest/parameters/wait
- % modprobe -r dmatest
+Example: ::
+
+ % modprobe dmatest run=1 iterations=42 wait=1
+ % modprobe -r dmatest
- Part 3 - When built-in in the kernel...
+...or: ::
+
+ % modprobe dmatest run=1 iterations=42
+ % cat /sys/module/dmatest/parameters/wait
+ % modprobe -r dmatest
+
+Part 3 - When built-in in the kernel
+====================================
The module parameters that is supplied to the kernel command line will be used
for the first performed test. After user gets a control, the test could be
@@ -66,27 +79,32 @@ re-run with the same or different parameters. For the details see the above
section "Part 2 - When dmatest is built as a module..."
In both cases the module parameters are used as the actual values for the test
-case. You always could check them at run-time by running
- % grep -H . /sys/module/dmatest/parameters/*
+case. You always could check them at run-time by running ::
+
+ % grep -H . /sys/module/dmatest/parameters/*
- Part 4 - Gathering the test results
+Part 4 - Gathering the test results
+===================================
-Test results are printed to the kernel log buffer with the format:
+Test results are printed to the kernel log buffer with the format: ::
-"dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)"
+ "dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)"
-Example of output:
- % dmesg | tail -n 1
- dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
+Example of output: ::
+
+
+ % dmesg | tail -n 1
+ dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
The message format is unified across the different types of errors. A number in
the parens represents additional information, e.g. error code, error counter,
or status. A test thread also emits a summary line at completion listing the
number of tests executed, number that failed, and a result code.
-Example:
- % dmesg | tail -n 1
- dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0)
+Example: ::
+
+ % dmesg | tail -n 1
+ dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0)
The details of a data miscompare error are also emitted, but do not follow the
above format.
diff --git a/Documentation/driver-api/dmaengine/index.rst b/Documentation/driver-api/dmaengine/index.rst
new file mode 100644
index 000000000000..3026fa975937
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/index.rst
@@ -0,0 +1,55 @@
+=======================
+DMAEngine documentation
+=======================
+
+DMAEngine documentation provides documents for various aspects of DMAEngine
+framework.
+
+DMAEngine documentation
+-----------------------
+
+This book helps with DMAengine internal APIs and guide for DMAEngine device
+driver writers.
+
+.. toctree::
+ :maxdepth: 1
+
+ provider
+
+DMAEngine client documentation
+------------------------------
+
+This book is a guide to device driver writers on how to use the Slave-DMA
+API of the DMAEngine. This is applicable only for slave DMA usage only.
+
+.. toctree::
+ :maxdepth: 1
+
+ client
+
+DMA Test documentation
+----------------------
+
+This book introduces how to test DMA drivers using dmatest module.
+
+.. toctree::
+ :maxdepth: 1
+
+ dmatest
+
+PXA DMA documentation
+----------------------
+
+This book adds some notes about PXA DMA
+
+.. toctree::
+ :maxdepth: 1
+
+ pxa_dma
+
+.. only:: subproject
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
new file mode 100644
index 000000000000..814acb4d2294
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -0,0 +1,508 @@
+==================================
+DMAengine controller documentation
+==================================
+
+Hardware Introduction
+=====================
+
+Most of the Slave DMA controllers have the same general principles of
+operations.
+
+They have a given number of channels to use for the DMA transfers, and
+a given number of requests lines.
+
+Requests and channels are pretty much orthogonal. Channels can be used
+to serve several to any requests. To simplify, channels are the
+entities that will be doing the copy, and requests what endpoints are
+involved.
+
+The request lines actually correspond to physical lines going from the
+DMA-eligible devices to the controller itself. Whenever the device
+will want to start a transfer, it will assert a DMA request (DRQ) by
+asserting that request line.
+
+A very simple DMA controller would only take into account a single
+parameter: the transfer size. At each clock cycle, it would transfer a
+byte of data from one buffer to another, until the transfer size has
+been reached.
+
+That wouldn't work well in the real world, since slave devices might
+require a specific number of bits to be transferred in a single
+cycle. For example, we may want to transfer as much data as the
+physical bus allows to maximize performances when doing a simple
+memory copy operation, but our audio device could have a narrower FIFO
+that requires data to be written exactly 16 or 24 bits at a time. This
+is why most if not all of the DMA controllers can adjust this, using a
+parameter called the transfer width.
+
+Moreover, some DMA controllers, whenever the RAM is used as a source
+or destination, can group the reads or writes in memory into a buffer,
+so instead of having a lot of small memory accesses, which is not
+really efficient, you'll get several bigger transfers. This is done
+using a parameter called the burst size, that defines how many single
+reads/writes it's allowed to do without the controller splitting the
+transfer into smaller sub-transfers.
+
+Our theoretical DMA controller would then only be able to do transfers
+that involve a single contiguous block of data. However, some of the
+transfers we usually have are not, and want to copy data from
+non-contiguous buffers to a contiguous buffer, which is called
+scatter-gather.
+
+DMAEngine, at least for mem2dev transfers, require support for
+scatter-gather. So we're left with two cases here: either we have a
+quite simple DMA controller that doesn't support it, and we'll have to
+implement it in software, or we have a more advanced DMA controller,
+that implements in hardware scatter-gather.
+
+The latter are usually programmed using a collection of chunks to
+transfer, and whenever the transfer is started, the controller will go
+over that collection, doing whatever we programmed there.
+
+This collection is usually either a table or a linked list. You will
+then push either the address of the table and its number of elements,
+or the first item of the list to one channel of the DMA controller,
+and whenever a DRQ will be asserted, it will go through the collection
+to know where to fetch the data from.
+
+Either way, the format of this collection is completely dependent on
+your hardware. Each DMA controller will require a different structure,
+but all of them will require, for every chunk, at least the source and
+destination addresses, whether it should increment these addresses or
+not and the three parameters we saw earlier: the burst size, the
+transfer width and the transfer size.
+
+The one last thing is that usually, slave devices won't issue DRQ by
+default, and you have to enable this in your slave device driver first
+whenever you're willing to use DMA.
+
+These were just the general memory-to-memory (also called mem2mem) or
+memory-to-device (mem2dev) kind of transfers. Most devices often
+support other kind of transfers or memory operations that dmaengine
+support and will be detailed later in this document.
+
+DMA Support in Linux
+====================
+
+Historically, DMA controller drivers have been implemented using the
+async TX API, to offload operations such as memory copy, XOR,
+cryptography, etc., basically any memory to memory operation.
+
+Over time, the need for memory to device transfers arose, and
+dmaengine was extended. Nowadays, the async TX API is written as a
+layer on top of dmaengine, and acts as a client. Still, dmaengine
+accommodates that API in some cases, and made some design choices to
+ensure that it stayed compatible.
+
+For more information on the Async TX API, please look the relevant
+documentation file in Documentation/crypto/async-tx-api.txt.
+
+DMAEngine APIs
+==============
+
+``struct dma_device`` Initialization
+------------------------------------
+
+Just like any other kernel framework, the whole DMAEngine registration
+relies on the driver filling a structure and registering against the
+framework. In our case, that structure is dma_device.
+
+The first thing you need to do in your driver is to allocate this
+structure. Any of the usual memory allocators will do, but you'll also
+need to initialize a few fields in there:
+
+- channels: should be initialized as a list using the
+ INIT_LIST_HEAD macro for example
+
+- src_addr_widths:
+ should contain a bitmask of the supported source transfer width
+
+- dst_addr_widths:
+ should contain a bitmask of the supported destination transfer width
+
+- directions:
+ should contain a bitmask of the supported slave directions
+ (i.e. excluding mem2mem transfers)
+
+- residue_granularity:
+
+ - Granularity of the transfer residue reported to dma_set_residue.
+ This can be either:
+
+ - Descriptor
+
+ - Your device doesn't support any kind of residue
+ reporting. The framework will only know that a particular
+ transaction descriptor is done.
+
+ - Segment
+
+ - Your device is able to report which chunks have been transferred
+
+ - Burst
+
+ - Your device is able to report which burst have been transferred
+
+ - dev: should hold the pointer to the ``struct device`` associated
+ to your current driver instance.
+
+Supported transaction types
+---------------------------
+
+The next thing you need is to set which transaction types your device
+(and driver) supports.
+
+Our ``dma_device structure`` has a field called cap_mask that holds the
+various types of transaction supported, and you need to modify this
+mask using the dma_cap_set function, with various flags depending on
+transaction types you support as an argument.
+
+All those capabilities are defined in the ``dma_transaction_type enum``,
+in ``include/linux/dmaengine.h``
+
+Currently, the types available are:
+
+- DMA_MEMCPY
+
+ - The device is able to do memory to memory copies
+
+- DMA_XOR
+
+ - The device is able to perform XOR operations on memory areas
+
+ - Used to accelerate XOR intensive tasks, such as RAID5
+
+- DMA_XOR_VAL
+
+ - The device is able to perform parity check using the XOR
+ algorithm against a memory buffer.
+
+- DMA_PQ
+
+ - The device is able to perform RAID6 P+Q computations, P being a
+ simple XOR, and Q being a Reed-Solomon algorithm.
+
+- DMA_PQ_VAL
+
+ - The device is able to perform parity check using RAID6 P+Q
+ algorithm against a memory buffer.
+
+- DMA_INTERRUPT
+
+ - The device is able to trigger a dummy transfer that will
+ generate periodic interrupts
+
+ - Used by the client drivers to register a callback that will be
+ called on a regular basis through the DMA controller interrupt
+
+- DMA_PRIVATE
+
+ - The devices only supports slave transfers, and as such isn't
+ available for async transfers.
+
+- DMA_ASYNC_TX
+
+ - Must not be set by the device, and will be set by the framework
+ if needed
+
+ - TODO: What is it about?
+
+- DMA_SLAVE
+
+ - The device can handle device to memory transfers, including
+ scatter-gather transfers.
+
+ - While in the mem2mem case we were having two distinct types to
+ deal with a single chunk to copy or a collection of them, here,
+ we just have a single transaction type that is supposed to
+ handle both.
+
+ - If you want to transfer a single contiguous memory buffer,
+ simply build a scatter list with only one item.
+
+- DMA_CYCLIC
+
+ - The device can handle cyclic transfers.
+
+ - A cyclic transfer is a transfer where the chunk collection will
+ loop over itself, with the last item pointing to the first.
+
+ - It's usually used for audio transfers, where you want to operate
+ on a single ring buffer that you will fill with your audio data.
+
+- DMA_INTERLEAVE
+
+ - The device supports interleaved transfer.
+
+ - These transfers can transfer data from a non-contiguous buffer
+ to a non-contiguous buffer, opposed to DMA_SLAVE that can
+ transfer data from a non-contiguous data set to a continuous
+ destination buffer.
+
+ - It's usually used for 2d content transfers, in which case you
+ want to transfer a portion of uncompressed data directly to the
+ display to print it
+
+These various types will also affect how the source and destination
+addresses change over time.
+
+Addresses pointing to RAM are typically incremented (or decremented)
+after each transfer. In case of a ring buffer, they may loop
+(DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
+are typically fixed.
+
+Device operations
+-----------------
+
+Our dma_device structure also requires a few function pointers in
+order to implement the actual logic, now that we described what
+operations we were able to perform.
+
+The functions that we have to fill in there, and hence have to
+implement, obviously depend on the transaction types you reported as
+supported.
+
+- ``device_alloc_chan_resources``
+
+- ``device_free_chan_resources``
+
+ - These functions will be called whenever a driver will call
+ ``dma_request_channel`` or ``dma_release_channel`` for the first/last
+ time on the channel associated to that driver.
+
+ - They are in charge of allocating/freeing all the needed
+ resources in order for that channel to be useful for your driver.
+
+ - These functions can sleep.
+
+- ``device_prep_dma_*``
+
+ - These functions are matching the capabilities you registered
+ previously.
+
+ - These functions all take the buffer or the scatterlist relevant
+ for the transfer being prepared, and should create a hardware
+ descriptor or a list of hardware descriptors from it
+
+ - These functions can be called from an interrupt context
+
+ - Any allocation you might do should be using the GFP_NOWAIT
+ flag, in order not to potentially sleep, but without depleting
+ the emergency pool either.
+
+ - Drivers should try to pre-allocate any memory they might need
+ during the transfer setup at probe time to avoid putting to
+ much pressure on the nowait allocator.
+
+ - It should return a unique instance of the
+ ``dma_async_tx_descriptor structure``, that further represents this
+ particular transfer.
+
+ - This structure can be initialized using the function
+ ``dma_async_tx_descriptor_init``.
+
+ - You'll also need to set two fields in this structure:
+
+ - flags:
+ TODO: Can it be modified by the driver itself, or
+ should it be always the flags passed in the arguments
+
+ - tx_submit: A pointer to a function you have to implement,
+ that is supposed to push the current transaction descriptor to a
+ pending queue, waiting for issue_pending to be called.
+
+ - In this structure the function pointer callback_result can be
+ initialized in order for the submitter to be notified that a
+ transaction has completed. In the earlier code the function pointer
+ callback has been used. However it does not provide any status to the
+ transaction and will be deprecated. The result structure defined as
+ ``dmaengine_result`` that is passed in to callback_result
+ has two fields:
+
+ - result: This provides the transfer result defined by
+ ``dmaengine_tx_result``. Either success or some error condition.
+
+ - residue: Provides the residue bytes of the transfer for those that
+ support residue.
+
+- ``device_issue_pending``
+
+ - Takes the first transaction descriptor in the pending queue,
+ and starts the transfer. Whenever that transfer is done, it
+ should move to the next transaction in the list.
+
+ - This function can be called in an interrupt context
+
+- ``device_tx_status``
+
+ - Should report the bytes left to go over on the given channel
+
+ - Should only care about the transaction descriptor passed as
+ argument, not the currently active one on a given channel
+
+ - The tx_state argument might be NULL
+
+ - Should use dma_set_residue to report it
+
+ - In the case of a cyclic transfer, it should only take into
+ account the current period.
+
+ - This function can be called in an interrupt context.
+
+- device_config
+
+ - Reconfigures the channel with the configuration given as argument
+
+ - This command should NOT perform synchronously, or on any
+ currently queued transfers, but only on subsequent ones
+
+ - In this case, the function will receive a ``dma_slave_config``
+ structure pointer as an argument, that will detail which
+ configuration to use.
+
+ - Even though that structure contains a direction field, this
+ field is deprecated in favor of the direction argument given to
+ the prep_* functions
+
+ - This call is mandatory for slave operations only. This should NOT be
+ set or expected to be set for memcpy operations.
+ If a driver support both, it should use this call for slave
+ operations only and not for memcpy ones.
+
+- device_pause
+
+ - Pauses a transfer on the channel
+
+ - This command should operate synchronously on the channel,
+ pausing right away the work of the given channel
+
+- device_resume
+
+ - Resumes a transfer on the channel
+
+ - This command should operate synchronously on the channel,
+ resuming right away the work of the given channel
+
+- device_terminate_all
+
+ - Aborts all the pending and ongoing transfers on the channel
+
+ - For aborted transfers the complete callback should not be called
+
+ - Can be called from atomic context or from within a complete
+ callback of a descriptor. Must not sleep. Drivers must be able
+ to handle this correctly.
+
+ - Termination may be asynchronous. The driver does not have to
+ wait until the currently active transfer has completely stopped.
+ See device_synchronize.
+
+- device_synchronize
+
+ - Must synchronize the termination of a channel to the current
+ context.
+
+ - Must make sure that memory for previously submitted
+ descriptors is no longer accessed by the DMA controller.
+
+ - Must make sure that all complete callbacks for previously
+ submitted descriptors have finished running and none are
+ scheduled to run.
+
+ - May sleep.
+
+
+Misc notes
+==========
+
+(stuff that should be documented, but don't really know
+where to put them)
+
+``dma_run_dependencies``
+
+- Should be called at the end of an async TX transfer, and can be
+ ignored in the slave transfers case.
+
+- Makes sure that dependent operations are run before marking it
+ as complete.
+
+dma_cookie_t
+
+- it's a DMA transaction ID that will increment over time.
+
+- Not really relevant any more since the introduction of ``virt-dma``
+ that abstracts it away.
+
+DMA_CTRL_ACK
+
+- If clear, the descriptor cannot be reused by provider until the
+ client acknowledges receipt, i.e. has has a chance to establish any
+ dependency chains
+
+- This can be acked by invoking async_tx_ack()
+
+- If set, does not mean descriptor can be reused
+
+DMA_CTRL_REUSE
+
+- If set, the descriptor can be reused after being completed. It should
+ not be freed by provider if this flag is set.
+
+- The descriptor should be prepared for reuse by invoking
+ ``dmaengine_desc_set_reuse()`` which will set DMA_CTRL_REUSE.
+
+- ``dmaengine_desc_set_reuse()`` will succeed only when channel support
+ reusable descriptor as exhibited by capabilities
+
+- As a consequence, if a device driver wants to skip the
+ ``dma_map_sg()`` and ``dma_unmap_sg()`` in between 2 transfers,
+ because the DMA'd data wasn't used, it can resubmit the transfer right after
+ its completion.
+
+- Descriptor can be freed in few ways
+
+ - Clearing DMA_CTRL_REUSE by invoking
+ ``dmaengine_desc_clear_reuse()`` and submitting for last txn
+
+ - Explicitly invoking ``dmaengine_desc_free()``, this can succeed only
+ when DMA_CTRL_REUSE is already set
+
+ - Terminating the channel
+
+- DMA_PREP_CMD
+
+ - If set, the client driver tells DMA controller that passed data in DMA
+ API is command data.
+
+ - Interpretation of command data is DMA controller specific. It can be
+ used for issuing commands to other peripherals/register reads/register
+ writes for which the descriptor should be in different format from
+ normal data descriptors.
+
+General Design Notes
+====================
+
+Most of the DMAEngine drivers you'll see are based on a similar design
+that handles the end of transfer interrupts in the handler, but defer
+most work to a tasklet, including the start of a new transfer whenever
+the previous transfer ended.
+
+This is a rather inefficient design though, because the inter-transfer
+latency will be not only the interrupt latency, but also the
+scheduling latency of the tasklet, which will leave the channel idle
+in between, which will slow down the global transfer rate.
+
+You should avoid this kind of practice, and instead of electing a new
+transfer in your tasklet, move that part to the interrupt handler in
+order to have a shorter idle window (that we can't really avoid
+anyway).
+
+Glossary
+========
+
+- Burst: A number of consecutive read or write operations that
+ can be queued to buffers before being flushed to memory.
+
+- Chunk: A contiguous collection of bursts
+
+- Transfer: A collection of chunks (be it contiguous or not)
diff --git a/Documentation/driver-api/dmaengine/pxa_dma.rst b/Documentation/driver-api/dmaengine/pxa_dma.rst
new file mode 100644
index 000000000000..442ee691a190
--- /dev/null
+++ b/Documentation/driver-api/dmaengine/pxa_dma.rst
@@ -0,0 +1,190 @@
+==============================
+PXA/MMP - DMA Slave controller
+==============================
+
+Constraints
+===========
+
+a) Transfers hot queuing
+A driver submitting a transfer and issuing it should be granted the transfer
+is queued even on a running DMA channel.
+This implies that the queuing doesn't wait for the previous transfer end,
+and that the descriptor chaining is not only done in the irq/tasklet code
+triggered by the end of the transfer.
+A transfer which is submitted and issued on a phy doesn't wait for a phy to
+stop and restart, but is submitted on a "running channel". The other
+drivers, especially mmp_pdma waited for the phy to stop before relaunching
+a new transfer.
+
+b) All transfers having asked for confirmation should be signaled
+Any issued transfer with DMA_PREP_INTERRUPT should trigger a callback call.
+This implies that even if an irq/tasklet is triggered by end of tx1, but
+at the time of irq/dma tx2 is already finished, tx1->complete() and
+tx2->complete() should be called.
+
+c) Channel running state
+A driver should be able to query if a channel is running or not. For the
+multimedia case, such as video capture, if a transfer is submitted and then
+a check of the DMA channel reports a "stopped channel", the transfer should
+not be issued until the next "start of frame interrupt", hence the need to
+know if a channel is in running or stopped state.
+
+d) Bandwidth guarantee
+The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
+The high priorities get twice as much bandwidth as the normal, which get twice
+as much as the low priorities.
+A driver should be able to request a priority, especially the real-time
+ones such as pxa_camera with (big) throughputs.
+
+Design
+======
+a) Virtual channels
+Same concept as in sa11x0 driver, ie. a driver was assigned a "virtual
+channel" linked to the requestor line, and the physical DMA channel is
+assigned on the fly when the transfer is issued.
+
+b) Transfer anatomy for a scatter-gather transfer
+
+::
+
+ +------------+-----+---------------+----------------+-----------------+
+ | desc-sg[0] | ... | desc-sg[last] | status updater | finisher/linker |
+ +------------+-----+---------------+----------------+-----------------+
+
+This structure is pointed by dma->sg_cpu.
+The descriptors are used as follows :
+
+ - desc-sg[i]: i-th descriptor, transferring the i-th sg
+ element to the video buffer scatter gather
+
+ - status updater
+ Transfers a single u32 to a well known dma coherent memory to leave
+ a trace that this transfer is done. The "well known" is unique per
+ physical channel, meaning that a read of this value will tell which
+ is the last finished transfer at that point in time.
+
+ - finisher: has ddadr=DADDR_STOP, dcmd=ENDIRQEN
+
+ - linker: has ddadr= desc-sg[0] of next transfer, dcmd=0
+
+c) Transfers hot-chaining
+Suppose the running chain is:
+
+::
+
+ Buffer 1 Buffer 2
+ +---------+----+---+ +----+----+----+---+
+ | d0 | .. | dN | l | | d0 | .. | dN | f |
+ +---------+----+-|-+ ^----+----+----+---+
+ | |
+ +----+
+
+After a call to dmaengine_submit(b3), the chain will look like:
+
+::
+
+ Buffer 1 Buffer 2 Buffer 3
+ +---------+----+---+ +----+----+----+---+ +----+----+----+---+
+ | d0 | .. | dN | l | | d0 | .. | dN | l | | d0 | .. | dN | f |
+ +---------+----+-|-+ ^----+----+----+-|-+ ^----+----+----+---+
+ | | | |
+ +----+ +----+
+ new_link
+
+If while new_link was created the DMA channel stopped, it is _not_
+restarted. Hot-chaining doesn't break the assumption that
+dma_async_issue_pending() is to be used to ensure the transfer is actually started.
+
+One exception to this rule :
+
+- if Buffer1 and Buffer2 had all their addresses 8 bytes aligned
+
+- and if Buffer3 has at least one address not 4 bytes aligned
+
+- then hot-chaining cannot happen, as the channel must be stopped, the
+ "align bit" must be set, and the channel restarted As a consequence,
+ such a transfer tx_submit() will be queued on the submitted queue, and
+ this specific case if the DMA is already running in aligned mode.
+
+d) Transfers completion updater
+Each time a transfer is completed on a channel, an interrupt might be
+generated or not, up to the client's request. But in each case, the last
+descriptor of a transfer, the "status updater", will write the latest
+transfer being completed into the physical channel's completion mark.
+
+This will speed up residue calculation, for large transfers such as video
+buffers which hold around 6k descriptors or more. This also allows without
+any lock to find out what is the latest completed transfer in a running
+DMA chain.
+
+e) Transfers completion, irq and tasklet
+When a transfer flagged as "DMA_PREP_INTERRUPT" is finished, the dma irq
+is raised. Upon this interrupt, a tasklet is scheduled for the physical
+channel.
+
+The tasklet is responsible for :
+
+- reading the physical channel last updater mark
+
+- calling all the transfer callbacks of finished transfers, based on
+ that mark, and each transfer flags.
+
+If a transfer is completed while this handling is done, a dma irq will
+be raised, and the tasklet will be scheduled once again, having a new
+updater mark.
+
+f) Residue
+Residue granularity will be descriptor based. The issued but not completed
+transfers will be scanned for all of their descriptors against the
+currently running descriptor.
+
+g) Most complicated case of driver's tx queues
+The most tricky situation is when :
+
+ - there are not "acked" transfers (tx0)
+
+ - a driver submitted an aligned tx1, not chained
+
+ - a driver submitted an aligned tx2 => tx2 is cold chained to tx1
+
+ - a driver issued tx1+tx2 => channel is running in aligned mode
+
+ - a driver submitted an aligned tx3 => tx3 is hot-chained
+
+ - a driver submitted an unaligned tx4 => tx4 is put in submitted queue,
+ not chained
+
+ - a driver issued tx4 => tx4 is put in issued queue, not chained
+
+ - a driver submitted an aligned tx5 => tx5 is put in submitted queue, not
+ chained
+
+ - a driver submitted an aligned tx6 => tx6 is put in submitted queue,
+ cold chained to tx5
+
+ This translates into (after tx4 is issued) :
+
+ - issued queue
+
+ ::
+
+ +-----+ +-----+ +-----+ +-----+
+ | tx1 | | tx2 | | tx3 | | tx4 |
+ +---|-+ ^---|-+ ^-----+ +-----+
+ | | | |
+ +---+ +---+
+ - submitted queue
+ +-----+ +-----+
+ | tx5 | | tx6 |
+ +---|-+ ^-----+
+ | |
+ +---+
+
+- completed queue : empty
+
+- allocated queue : tx0
+
+It should be noted that after tx3 is completed, the channel is stopped, and
+restarted in "unaligned mode" to handle tx4.
+
+Author: Robert Jarzmik <robert.jarzmik@free.fr>
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 9c20624842b7..d17a9876b473 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -46,6 +46,7 @@ available subsections can be seen below.
pinctl
gpio
misc_devices
+ dmaengine/index
.. only:: subproject and html
diff --git a/Documentation/driver-api/pinctl.rst b/Documentation/driver-api/pinctl.rst
index 48f15b4f9d3e..6cb68d67fa75 100644
--- a/Documentation/driver-api/pinctl.rst
+++ b/Documentation/driver-api/pinctl.rst
@@ -757,8 +757,8 @@ that your datasheet calls "GPIO mode", but actually is just an electrical
configuration for a certain device. See the section below named
"GPIO mode pitfalls" for more details on this scenario.
-The public pinmux API contains two functions named pinctrl_request_gpio()
-and pinctrl_free_gpio(). These two functions shall *ONLY* be called from
+The public pinmux API contains two functions named pinctrl_gpio_request()
+and pinctrl_gpio_free(). These two functions shall *ONLY* be called from
gpiolib-based drivers as part of their gpio_request() and
gpio_free() semantics. Likewise the pinctrl_gpio_direction_[input|output]
shall only be called from within respective gpio_direction_[input|output]
@@ -790,7 +790,7 @@ gpiolib driver and the affected GPIO range, pin offset and desired direction
will be passed along to this function.
Alternatively to using these special functions, it is fully allowed to use
-named functions for each GPIO pin, the pinctrl_request_gpio() will attempt to
+named functions for each GPIO pin, the pinctrl_gpio_request() will attempt to
obtain the function "gpioN" where "N" is the global GPIO pin number if no
special GPIO-handler is registered.
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index a0dc2879a152..53c1b0b06da5 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -274,7 +274,7 @@ sleep states and the hibernation state ("suspend-to-disk"). Each phase involves
executing callbacks for every device before the next phase begins. Not all
buses or classes support all these callbacks and not all drivers use all the
callbacks. The various phases always run after tasks have been frozen and
-before they are unfrozen. Furthermore, the ``*_noirq phases`` run at a time
+before they are unfrozen. Furthermore, the ``*_noirq`` phases run at a time
when IRQ handlers have been disabled (except for those marked with the
IRQF_NO_SUSPEND flag).
@@ -328,7 +328,10 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``.
After the ``->prepare`` callback method returns, no new children may be
registered below the device. The method may also prepare the device or
driver in some way for the upcoming system power transition, but it
- should not put the device into a low-power state.
+ should not put the device into a low-power state. Moreover, if the
+ device supports runtime power management, the ``->prepare`` callback
+ method must not update its state in case it is necessary to resume it
+ from runtime suspend later on.
For devices supporting runtime power management, the return value of the
prepare callback can be used to indicate to the PM core that it may
@@ -351,11 +354,35 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``.
is because all such devices are initially set to runtime-suspended with
runtime PM disabled.
+ This feature also can be controlled by device drivers by using the
+ ``DPM_FLAG_NEVER_SKIP`` and ``DPM_FLAG_SMART_PREPARE`` driver power
+ management flags. [Typically, they are set at the time the driver is
+ probed against the device in question by passing them to the
+ :c:func:`dev_pm_set_driver_flags` helper function.] If the first of
+ these flags is set, the PM core will not apply the direct-complete
+ procedure described above to the given device and, consequenty, to any
+ of its ancestors. The second flag, when set, informs the middle layer
+ code (bus types, device types, PM domains, classes) that it should take
+ the return value of the ``->prepare`` callback provided by the driver
+ into account and it may only return a positive value from its own
+ ``->prepare`` callback if the driver's one also has returned a positive
+ value.
+
2. The ``->suspend`` methods should quiesce the device to stop it from
performing I/O. They also may save the device registers and put it into
the appropriate low-power state, depending on the bus type the device is
on, and they may enable wakeup events.
+ However, for devices supporting runtime power management, the
+ ``->suspend`` methods provided by subsystems (bus types and PM domains
+ in particular) must follow an additional rule regarding what can be done
+ to the devices before their drivers' ``->suspend`` methods are called.
+ Namely, they can only resume the devices from runtime suspend by
+ calling :c:func:`pm_runtime_resume` for them, if that is necessary, and
+ they must not update the state of the devices in any other way at that
+ time (in case the drivers need to resume the devices from runtime
+ suspend in their ``->suspend`` methods).
+
3. For a number of devices it is convenient to split suspend into the
"quiesce device" and "save device state" phases, in which cases
``suspend_late`` is meant to do the latter. It is always executed after
@@ -729,6 +756,36 @@ state temporarily, for example so that its system wakeup capability can be
disabled. This all depends on the hardware and the design of the subsystem and
device driver in question.
+If it is necessary to resume a device from runtime suspend during a system-wide
+transition into a sleep state, that can be done by calling
+:c:func:`pm_runtime_resume` for it from the ``->suspend`` callback (or its
+couterpart for transitions related to hibernation) of either the device's driver
+or a subsystem responsible for it (for example, a bus type or a PM domain).
+That is guaranteed to work by the requirement that subsystems must not change
+the state of devices (possibly except for resuming them from runtime suspend)
+from their ``->prepare`` and ``->suspend`` callbacks (or equivalent) *before*
+invoking device drivers' ``->suspend`` callbacks (or equivalent).
+
+Some bus types and PM domains have a policy to resume all devices from runtime
+suspend upfront in their ``->suspend`` callbacks, but that may not be really
+necessary if the driver of the device can cope with runtime-suspended devices.
+The driver can indicate that by setting ``DPM_FLAG_SMART_SUSPEND`` in
+:c:member:`power.driver_flags` at the probe time, by passing it to the
+:c:func:`dev_pm_set_driver_flags` helper. That also may cause middle-layer code
+(bus types, PM domains etc.) to skip the ``->suspend_late`` and
+``->suspend_noirq`` callbacks provided by the driver if the device remains in
+runtime suspend at the beginning of the ``suspend_late`` phase of system-wide
+suspend (or in the ``poweroff_late`` phase of hibernation), when runtime PM
+has been disabled for it, under the assumption that its state should not change
+after that point until the system-wide transition is over. If that happens, the
+driver's system-wide resume callbacks, if present, may still be invoked during
+the subsequent system-wide resume transition and the device's runtime power
+management status may be set to "active" before enabling runtime PM for it,
+so the driver must be prepared to cope with the invocation of its system-wide
+resume callbacks back-to-back with its ``->runtime_suspend`` one (without the
+intervening ``->runtime_resume`` and so on) and the final state of the device
+must reflect the "active" status for runtime PM in that case.
+
During system-wide resume from a sleep state it's easiest to put devices into
the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`.
Refer to that document for more information regarding this particular issue as
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 5a2aa7a377d9..9ae03171daca 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -28,7 +28,7 @@ SCSI commands can be transported over just about any kind of bus, and
are the default protocol for storage devices attached to USB, SATA, SAS,
Fibre Channel, FireWire, and ATAPI devices. SCSI packets are also
commonly exchanged over Infiniband,
-`I20 <http://i2o.shadowconnect.com/faq.php>`__, TCP/IP
+`I2O <http://i2o.shadowconnect.com/faq.php>`__, TCP/IP
(`iSCSI <https://en.wikipedia.org/wiki/ISCSI>`__), even `Parallel
ports <http://cyberelk.net/tim/parport/parscsi.html>`__.
diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst
index dba0f876b36f..078e981e2b16 100644
--- a/Documentation/driver-api/usb/usb.rst
+++ b/Documentation/driver-api/usb/usb.rst
@@ -690,9 +690,7 @@ The USB devices are now exported via debugfs:
This file is handy for status viewing tools in user mode, which can scan
the text format and ignore most of it. More detailed device status
(including class and vendor status) is available from device-specific
-files. For information about the current format of this file, see the
-``Documentation/usb/proc_usb_info.txt`` file in your Linux kernel
-sources.
+files. For information about the current format of this file, see below.
This file, in combination with the poll() system call, can also be used
to detect when devices are added or removed::
diff --git a/Documentation/fault-injection/notifier-error-inject.txt b/Documentation/fault-injection/notifier-error-inject.txt
index 83d3f4e43e91..e861d761de24 100644
--- a/Documentation/fault-injection/notifier-error-inject.txt
+++ b/Documentation/fault-injection/notifier-error-inject.txt
@@ -6,41 +6,11 @@ specified notifier chain callbacks. It is useful to test the error handling of
notifier call chain failures which is rarely executed. There are kernel
modules that can be used to test the following notifiers.
- * CPU notifier
* PM notifier
* Memory hotplug notifier
* powerpc pSeries reconfig notifier
* Netdevice notifier
-CPU notifier error injection module
------------------------------------
-This feature can be used to test the error handling of the CPU notifiers by
-injecting artificial errors to CPU notifier chain callbacks.
-
-If the notifier call chain should be failed with some events notified, write
-the error code to debugfs interface
-/sys/kernel/debug/notifier-error-inject/cpu/actions/<notifier event>/error
-
-Possible CPU notifier events to be failed are:
-
- * CPU_UP_PREPARE
- * CPU_UP_PREPARE_FROZEN
- * CPU_DOWN_PREPARE
- * CPU_DOWN_PREPARE_FROZEN
-
-Example1: Inject CPU offline error (-1 == -EPERM)
-
- # cd /sys/kernel/debug/notifier-error-inject/cpu
- # echo -1 > actions/CPU_DOWN_PREPARE/error
- # echo 0 > /sys/devices/system/cpu/cpu1/online
- bash: echo: write error: Operation not permitted
-
-Example2: inject CPU online error (-2 == -ENOENT)
-
- # echo -2 > actions/CPU_UP_PREPARE/error
- # echo 1 > /sys/devices/system/cpu/cpu1/online
- bash: echo: write error: No such file or directory
-
PM notifier error injection module
----------------------------------
This feature is controlled through debugfs interface
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index a38d3aa4d189..79c22d096bbc 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -77,8 +77,8 @@ C. Boot options
1. fbcon=font:<name>
Select the initial font to use. The value 'name' can be any of the
- compiled-in fonts: VGA8x16, 7x14, 10x18, VGA8x8, MINI4x6, RomanLarge,
- SUN8x16, SUN12x22, ProFont6x11, Acorn8x8, PEARL8x8.
+ compiled-in fonts: 10x18, 6x10, 7x14, Acorn8x8, MINI4x6,
+ PEARL8x8, ProFont6x11, SUN12x22, SUN8x16, VGA8x16, VGA8x8.
Note, not all drivers can handle font with widths not divisible by 8,
such as vga16fb.
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index 76bbd7fe27b3..f377290fe48e 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -34,6 +34,6 @@
| tile: | TODO |
| um: | TODO |
| unicore32: | TODO |
- | x86: | ok |
+ | x86: | ok | 64-bit only
| xtensa: | TODO |
-----------------------
diff --git a/Documentation/filesystems/dnotify.txt b/Documentation/filesystems/dnotify.txt
index 6baf88f46859..15156883d321 100644
--- a/Documentation/filesystems/dnotify.txt
+++ b/Documentation/filesystems/dnotify.txt
@@ -62,7 +62,7 @@ disabled, fcntl(fd, F_NOTIFY, ...) will return -EINVAL.
Example
-------
-See Documentation/filesystems/dnotify_test.c for an example.
+See tools/testing/selftests/filesystems/dnotify_test.c for an example.
NOTE
----
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 5a8f7f4d2bca..75236c0c2ac2 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -94,10 +94,10 @@ Note: More extensive information for getting started with ext4 can be
* ability to pack bitmaps and inode tables into larger virtual groups via the
flex_bg feature
* large file support
-* Inode allocation using large virtual block groups via flex_bg
+* inode allocation using large virtual block groups via flex_bg
* delayed allocation
* large block (up to pagesize) support
-* efficient new ordered mode in JBD2 and ext4(avoid using buffer head to force
+* efficient new ordered mode in JBD2 and ext4 (avoid using buffer head to force
the ordering)
[1] Filesystems with a block size of 1k may see a limit imposed by the
@@ -105,7 +105,7 @@ directory hash tree having a maximum depth of two.
2.2 Candidate features for future inclusion
-* Online defrag (patches available but not well tested)
+* online defrag (patches available but not well tested)
* reduced mke2fs time via lazy itable initialization in conjunction with
the uninit_bg feature (capability to do this is available in e2fsprogs
but a kernel thread to do lazy zeroing of unused inode table blocks
@@ -602,7 +602,7 @@ Table of Ext4 specific ioctls
bitmaps and inode table, the userspace tool thus
just passes the new number of blocks.
-EXT4_IOC_SWAP_BOOT Swap i_blocks and associated attributes
+ EXT4_IOC_SWAP_BOOT Swap i_blocks and associated attributes
(like i_blocks, i_size, i_flags, ...) from
the specified inode with inode
EXT4_BOOT_LOADER_INO (#5). This is typically
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
new file mode 100644
index 000000000000..776ddc655f79
--- /dev/null
+++ b/Documentation/filesystems/fscrypt.rst
@@ -0,0 +1,610 @@
+=====================================
+Filesystem-level encryption (fscrypt)
+=====================================
+
+Introduction
+============
+
+fscrypt is a library which filesystems can hook into to support
+transparent encryption of files and directories.
+
+Note: "fscrypt" in this document refers to the kernel-level portion,
+implemented in ``fs/crypto/``, as opposed to the userspace tool
+`fscrypt <https://github.com/google/fscrypt>`_. This document only
+covers the kernel-level portion. For command-line examples of how to
+use encryption, see the documentation for the userspace tool `fscrypt
+<https://github.com/google/fscrypt>`_. Also, it is recommended to use
+the fscrypt userspace tool, or other existing userspace tools such as
+`fscryptctl <https://github.com/google/fscryptctl>`_ or `Android's key
+management system
+<https://source.android.com/security/encryption/file-based>`_, over
+using the kernel's API directly. Using existing tools reduces the
+chance of introducing your own security bugs. (Nevertheless, for
+completeness this documentation covers the kernel's API anyway.)
+
+Unlike dm-crypt, fscrypt operates at the filesystem level rather than
+at the block device level. This allows it to encrypt different files
+with different keys and to have unencrypted files on the same
+filesystem. This is useful for multi-user systems where each user's
+data-at-rest needs to be cryptographically isolated from the others.
+However, except for filenames, fscrypt does not encrypt filesystem
+metadata.
+
+Unlike eCryptfs, which is a stacked filesystem, fscrypt is integrated
+directly into supported filesystems --- currently ext4, F2FS, and
+UBIFS. This allows encrypted files to be read and written without
+caching both the decrypted and encrypted pages in the pagecache,
+thereby nearly halving the memory used and bringing it in line with
+unencrypted files. Similarly, half as many dentries and inodes are
+needed. eCryptfs also limits encrypted filenames to 143 bytes,
+causing application compatibility issues; fscrypt allows the full 255
+bytes (NAME_MAX). Finally, unlike eCryptfs, the fscrypt API can be
+used by unprivileged users, with no need to mount anything.
+
+fscrypt does not support encrypting files in-place. Instead, it
+supports marking an empty directory as encrypted. Then, after
+userspace provides the key, all regular files, directories, and
+symbolic links created in that directory tree are transparently
+encrypted.
+
+Threat model
+============
+
+Offline attacks
+---------------
+
+Provided that userspace chooses a strong encryption key, fscrypt
+protects the confidentiality of file contents and filenames in the
+event of a single point-in-time permanent offline compromise of the
+block device content. fscrypt does not protect the confidentiality of
+non-filename metadata, e.g. file sizes, file permissions, file
+timestamps, and extended attributes. Also, the existence and location
+of holes (unallocated blocks which logically contain all zeroes) in
+files is not protected.
+
+fscrypt is not guaranteed to protect confidentiality or authenticity
+if an attacker is able to manipulate the filesystem offline prior to
+an authorized user later accessing the filesystem.
+
+Online attacks
+--------------
+
+fscrypt (and storage encryption in general) can only provide limited
+protection, if any at all, against online attacks. In detail:
+
+fscrypt is only resistant to side-channel attacks, such as timing or
+electromagnetic attacks, to the extent that the underlying Linux
+Cryptographic API algorithms are. If a vulnerable algorithm is used,
+such as a table-based implementation of AES, it may be possible for an
+attacker to mount a side channel attack against the online system.
+Side channel attacks may also be mounted against applications
+consuming decrypted data.
+
+After an encryption key has been provided, fscrypt is not designed to
+hide the plaintext file contents or filenames from other users on the
+same system, regardless of the visibility of the keyring key.
+Instead, existing access control mechanisms such as file mode bits,
+POSIX ACLs, LSMs, or mount namespaces should be used for this purpose.
+Also note that as long as the encryption keys are *anywhere* in
+memory, an online attacker can necessarily compromise them by mounting
+a physical attack or by exploiting any kernel security vulnerability
+which provides an arbitrary memory read primitive.
+
+While it is ostensibly possible to "evict" keys from the system,
+recently accessed encrypted files will remain accessible at least
+until the filesystem is unmounted or the VFS caches are dropped, e.g.
+using ``echo 2 > /proc/sys/vm/drop_caches``. Even after that, if the
+RAM is compromised before being powered off, it will likely still be
+possible to recover portions of the plaintext file contents, if not
+some of the encryption keys as well. (Since Linux v4.12, all
+in-kernel keys related to fscrypt are sanitized before being freed.
+However, userspace would need to do its part as well.)
+
+Currently, fscrypt does not prevent a user from maliciously providing
+an incorrect key for another user's existing encrypted files. A
+protection against this is planned.
+
+Key hierarchy
+=============
+
+Master Keys
+-----------
+
+Each encrypted directory tree is protected by a *master key*. Master
+keys can be up to 64 bytes long, and must be at least as long as the
+greater of the key length needed by the contents and filenames
+encryption modes being used. For example, if AES-256-XTS is used for
+contents encryption, the master key must be 64 bytes (512 bits). Note
+that the XTS mode is defined to require a key twice as long as that
+required by the underlying block cipher.
+
+To "unlock" an encrypted directory tree, userspace must provide the
+appropriate master key. There can be any number of master keys, each
+of which protects any number of directory trees on any number of
+filesystems.
+
+Userspace should generate master keys either using a cryptographically
+secure random number generator, or by using a KDF (Key Derivation
+Function). Note that whenever a KDF is used to "stretch" a
+lower-entropy secret such as a passphrase, it is critical that a KDF
+designed for this purpose be used, such as scrypt, PBKDF2, or Argon2.
+
+Per-file keys
+-------------
+
+Master keys are not used to encrypt file contents or names directly.
+Instead, a unique key is derived for each encrypted file, including
+each regular file, directory, and symbolic link. This has several
+advantages:
+
+- In cryptosystems, the same key material should never be used for
+ different purposes. Using the master key as both an XTS key for
+ contents encryption and as a CTS-CBC key for filenames encryption
+ would violate this rule.
+- Per-file keys simplify the choice of IVs (Initialization Vectors)
+ for contents encryption. Without per-file keys, to ensure IV
+ uniqueness both the inode and logical block number would need to be
+ encoded in the IVs. This would make it impossible to renumber
+ inodes, which e.g. ``resize2fs`` can do when resizing an ext4
+ filesystem. With per-file keys, it is sufficient to encode just the
+ logical block number in the IVs.
+- Per-file keys strengthen the encryption of filenames, where IVs are
+ reused out of necessity. With a unique key per directory, IV reuse
+ is limited to within a single directory.
+- Per-file keys allow individual files to be securely erased simply by
+ securely erasing their keys. (Not yet implemented.)
+
+A KDF (Key Derivation Function) is used to derive per-file keys from
+the master key. This is done instead of wrapping a randomly-generated
+key for each file because it reduces the size of the encryption xattr,
+which for some filesystems makes the xattr more likely to fit in-line
+in the filesystem's inode table. With a KDF, only a 16-byte nonce is
+required --- long enough to make key reuse extremely unlikely. A
+wrapped key, on the other hand, would need to be up to 64 bytes ---
+the length of an AES-256-XTS key. Furthermore, currently there is no
+requirement to support unlocking a file with multiple alternative
+master keys or to support rotating master keys. Instead, the master
+keys may be wrapped in userspace, e.g. as done by the `fscrypt
+<https://github.com/google/fscrypt>`_ tool.
+
+The current KDF encrypts the master key using the 16-byte nonce as an
+AES-128-ECB key. The output is used as the derived key. If the
+output is longer than needed, then it is truncated to the needed
+length. Truncation is the norm for directories and symlinks, since
+those use the CTS-CBC encryption mode which requires a key half as
+long as that required by the XTS encryption mode.
+
+Note: this KDF meets the primary security requirement, which is to
+produce unique derived keys that preserve the entropy of the master
+key, assuming that the master key is already a good pseudorandom key.
+However, it is nonstandard and has some problems such as being
+reversible, so it is generally considered to be a mistake! It may be
+replaced with HKDF or another more standard KDF in the future.
+
+Encryption modes and usage
+==========================
+
+fscrypt allows one encryption mode to be specified for file contents
+and one encryption mode to be specified for filenames. Different
+directory trees are permitted to use different encryption modes.
+Currently, the following pairs of encryption modes are supported:
+
+- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
+- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
+
+It is strongly recommended to use AES-256-XTS for contents encryption.
+AES-128-CBC was added only for low-powered embedded devices with
+crypto accelerators such as CAAM or CESA that do not support XTS.
+
+New encryption modes can be added relatively easily, without changes
+to individual filesystems. However, authenticated encryption (AE)
+modes are not currently supported because of the difficulty of dealing
+with ciphertext expansion.
+
+For file contents, each filesystem block is encrypted independently.
+Currently, only the case where the filesystem block size is equal to
+the system's page size (usually 4096 bytes) is supported. With the
+XTS mode of operation (recommended), the logical block number within
+the file is used as the IV. With the CBC mode of operation (not
+recommended), ESSIV is used; specifically, the IV for CBC is the
+logical block number encrypted with AES-256, where the AES-256 key is
+the SHA-256 hash of the inode's data encryption key.
+
+For filenames, the full filename is encrypted at once. Because of the
+requirements to retain support for efficient directory lookups and
+filenames of up to 255 bytes, a constant initialization vector (IV) is
+used. However, each encrypted directory uses a unique key, which
+limits IV reuse to within a single directory. Note that IV reuse in
+the context of CTS-CBC encryption means that when the original
+filenames share a common prefix at least as long as the cipher block
+size (16 bytes for AES), the corresponding encrypted filenames will
+also share a common prefix. This is undesirable; it may be fixed in
+the future by switching to an encryption mode that is a strong
+pseudorandom permutation on arbitrary-length messages, e.g. the HEH
+(Hash-Encrypt-Hash) mode.
+
+Since filenames are encrypted with the CTS-CBC mode of operation, the
+plaintext and ciphertext filenames need not be multiples of the AES
+block size, i.e. 16 bytes. However, the minimum size that can be
+encrypted is 16 bytes, so shorter filenames are NUL-padded to 16 bytes
+before being encrypted. In addition, to reduce leakage of filename
+lengths via their ciphertexts, all filenames are NUL-padded to the
+next 4, 8, 16, or 32-byte boundary (configurable). 32 is recommended
+since this provides the best confidentiality, at the cost of making
+directory entries consume slightly more space. Note that since NUL
+(``\0``) is not otherwise a valid character in filenames, the padding
+will never produce duplicate plaintexts.
+
+Symbolic link targets are considered a type of filename and are
+encrypted in the same way as filenames in directory entries. Each
+symlink also uses a unique key; hence, the hardcoded IV is not a
+problem for symlinks.
+
+User API
+========
+
+Setting an encryption policy
+----------------------------
+
+The FS_IOC_SET_ENCRYPTION_POLICY ioctl sets an encryption policy on an
+empty directory or verifies that a directory or regular file already
+has the specified encryption policy. It takes in a pointer to a
+:c:type:`struct fscrypt_policy`, defined as follows::
+
+ #define FS_KEY_DESCRIPTOR_SIZE 8
+
+ struct fscrypt_policy {
+ __u8 version;
+ __u8 contents_encryption_mode;
+ __u8 filenames_encryption_mode;
+ __u8 flags;
+ __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ };
+
+This structure must be initialized as follows:
+
+- ``version`` must be 0.
+
+- ``contents_encryption_mode`` and ``filenames_encryption_mode`` must
+ be set to constants from ``<linux/fs.h>`` which identify the
+ encryption modes to use. If unsure, use
+ FS_ENCRYPTION_MODE_AES_256_XTS (1) for ``contents_encryption_mode``
+ and FS_ENCRYPTION_MODE_AES_256_CTS (4) for
+ ``filenames_encryption_mode``.
+
+- ``flags`` must be set to a value from ``<linux/fs.h>`` which
+ identifies the amount of NUL-padding to use when encrypting
+ filenames. If unsure, use FS_POLICY_FLAGS_PAD_32 (0x3).
+
+- ``master_key_descriptor`` specifies how to find the master key in
+ the keyring; see `Adding keys`_. It is up to userspace to choose a
+ unique ``master_key_descriptor`` for each master key. The e4crypt
+ and fscrypt tools use the first 8 bytes of
+ ``SHA-512(SHA-512(master_key))``, but this particular scheme is not
+ required. Also, the master key need not be in the keyring yet when
+ FS_IOC_SET_ENCRYPTION_POLICY is executed. However, it must be added
+ before any files can be created in the encrypted directory.
+
+If the file is not yet encrypted, then FS_IOC_SET_ENCRYPTION_POLICY
+verifies that the file is an empty directory. If so, the specified
+encryption policy is assigned to the directory, turning it into an
+encrypted directory. After that, and after providing the
+corresponding master key as described in `Adding keys`_, all regular
+files, directories (recursively), and symlinks created in the
+directory will be encrypted, inheriting the same encryption policy.
+The filenames in the directory's entries will be encrypted as well.
+
+Alternatively, if the file is already encrypted, then
+FS_IOC_SET_ENCRYPTION_POLICY validates that the specified encryption
+policy exactly matches the actual one. If they match, then the ioctl
+returns 0. Otherwise, it fails with EEXIST. This works on both
+regular files and directories, including nonempty directories.
+
+Note that the ext4 filesystem does not allow the root directory to be
+encrypted, even if it is empty. Users who want to encrypt an entire
+filesystem with one key should consider using dm-crypt instead.
+
+FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors:
+
+- ``EACCES``: the file is not owned by the process's uid, nor does the
+ process have the CAP_FOWNER capability in a namespace with the file
+ owner's uid mapped
+- ``EEXIST``: the file is already encrypted with an encryption policy
+ different from the one specified
+- ``EINVAL``: an invalid encryption policy was specified (invalid
+ version, mode(s), or flags)
+- ``ENOTDIR``: the file is unencrypted and is a regular file, not a
+ directory
+- ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
+- ``ENOTTY``: this type of filesystem does not implement encryption
+- ``EOPNOTSUPP``: the kernel was not configured with encryption
+ support for this filesystem, or the filesystem superblock has not
+ had encryption enabled on it. (For example, to use encryption on an
+ ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
+ kernel config, and the superblock must have had the "encrypt"
+ feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
+ encrypt``.)
+- ``EPERM``: this directory may not be encrypted, e.g. because it is
+ the root directory of an ext4 filesystem
+- ``EROFS``: the filesystem is readonly
+
+Getting an encryption policy
+----------------------------
+
+The FS_IOC_GET_ENCRYPTION_POLICY ioctl retrieves the :c:type:`struct
+fscrypt_policy`, if any, for a directory or regular file. See above
+for the struct definition. No additional permissions are required
+beyond the ability to open the file.
+
+FS_IOC_GET_ENCRYPTION_POLICY can fail with the following errors:
+
+- ``EINVAL``: the file is encrypted, but it uses an unrecognized
+ encryption context format
+- ``ENODATA``: the file is not encrypted
+- ``ENOTTY``: this type of filesystem does not implement encryption
+- ``EOPNOTSUPP``: the kernel was not configured with encryption
+ support for this filesystem
+
+Note: if you only need to know whether a file is encrypted or not, on
+most filesystems it is also possible to use the FS_IOC_GETFLAGS ioctl
+and check for FS_ENCRYPT_FL, or to use the statx() system call and
+check for STATX_ATTR_ENCRYPTED in stx_attributes.
+
+Getting the per-filesystem salt
+-------------------------------
+
+Some filesystems, such as ext4 and F2FS, also support the deprecated
+ioctl FS_IOC_GET_ENCRYPTION_PWSALT. This ioctl retrieves a randomly
+generated 16-byte value stored in the filesystem superblock. This
+value is intended to used as a salt when deriving an encryption key
+from a passphrase or other low-entropy user credential.
+
+FS_IOC_GET_ENCRYPTION_PWSALT is deprecated. Instead, prefer to
+generate and manage any needed salt(s) in userspace.
+
+Adding keys
+-----------
+
+To provide a master key, userspace must add it to an appropriate
+keyring using the add_key() system call (see:
+``Documentation/security/keys/core.rst``). The key type must be
+"logon"; keys of this type are kept in kernel memory and cannot be
+read back by userspace. The key description must be "fscrypt:"
+followed by the 16-character lower case hex representation of the
+``master_key_descriptor`` that was set in the encryption policy. The
+key payload must conform to the following structure::
+
+ #define FS_MAX_KEY_SIZE 64
+
+ struct fscrypt_key {
+ u32 mode;
+ u8 raw[FS_MAX_KEY_SIZE];
+ u32 size;
+ };
+
+``mode`` is ignored; just set it to 0. The actual key is provided in
+``raw`` with ``size`` indicating its size in bytes. That is, the
+bytes ``raw[0..size-1]`` (inclusive) are the actual key.
+
+The key description prefix "fscrypt:" may alternatively be replaced
+with a filesystem-specific prefix such as "ext4:". However, the
+filesystem-specific prefixes are deprecated and should not be used in
+new programs.
+
+There are several different types of keyrings in which encryption keys
+may be placed, such as a session keyring, a user session keyring, or a
+user keyring. Each key must be placed in a keyring that is "attached"
+to all processes that might need to access files encrypted with it, in
+the sense that request_key() will find the key. Generally, if only
+processes belonging to a specific user need to access a given
+encrypted directory and no session keyring has been installed, then
+that directory's key should be placed in that user's user session
+keyring or user keyring. Otherwise, a session keyring should be
+installed if needed, and the key should be linked into that session
+keyring, or in a keyring linked into that session keyring.
+
+Note: introducing the complex visibility semantics of keyrings here
+was arguably a mistake --- especially given that by design, after any
+process successfully opens an encrypted file (thereby setting up the
+per-file key), possessing the keyring key is not actually required for
+any process to read/write the file until its in-memory inode is
+evicted. In the future there probably should be a way to provide keys
+directly to the filesystem instead, which would make the intended
+semantics clearer.
+
+Access semantics
+================
+
+With the key
+------------
+
+With the encryption key, encrypted regular files, directories, and
+symlinks behave very similarly to their unencrypted counterparts ---
+after all, the encryption is intended to be transparent. However,
+astute users may notice some differences in behavior:
+
+- Unencrypted files, or files encrypted with a different encryption
+ policy (i.e. different key, modes, or flags), cannot be renamed or
+ linked into an encrypted directory; see `Encryption policy
+ enforcement`_. Attempts to do so will fail with EPERM. However,
+ encrypted files can be renamed within an encrypted directory, or
+ into an unencrypted directory.
+
+- Direct I/O is not supported on encrypted files. Attempts to use
+ direct I/O on such files will fall back to buffered I/O.
+
+- The fallocate operations FALLOC_FL_COLLAPSE_RANGE,
+ FALLOC_FL_INSERT_RANGE, and FALLOC_FL_ZERO_RANGE are not supported
+ on encrypted files and will fail with EOPNOTSUPP.
+
+- Online defragmentation of encrypted files is not supported. The
+ EXT4_IOC_MOVE_EXT and F2FS_IOC_MOVE_RANGE ioctls will fail with
+ EOPNOTSUPP.
+
+- The ext4 filesystem does not support data journaling with encrypted
+ regular files. It will fall back to ordered data mode instead.
+
+- DAX (Direct Access) is not supported on encrypted files.
+
+- The st_size of an encrypted symlink will not necessarily give the
+ length of the symlink target as required by POSIX. It will actually
+ give the length of the ciphertext, which may be slightly longer than
+ the plaintext due to the NUL-padding.
+
+Note that mmap *is* supported. This is possible because the pagecache
+for an encrypted file contains the plaintext, not the ciphertext.
+
+Without the key
+---------------
+
+Some filesystem operations may be performed on encrypted regular
+files, directories, and symlinks even before their encryption key has
+been provided:
+
+- File metadata may be read, e.g. using stat().
+
+- Directories may be listed, in which case the filenames will be
+ listed in an encoded form derived from their ciphertext. The
+ current encoding algorithm is described in `Filename hashing and
+ encoding`_. The algorithm is subject to change, but it is
+ guaranteed that the presented filenames will be no longer than
+ NAME_MAX bytes, will not contain the ``/`` or ``\0`` characters, and
+ will uniquely identify directory entries.
+
+ The ``.`` and ``..`` directory entries are special. They are always
+ present and are not encrypted or encoded.
+
+- Files may be deleted. That is, nondirectory files may be deleted
+ with unlink() as usual, and empty directories may be deleted with
+ rmdir() as usual. Therefore, ``rm`` and ``rm -r`` will work as
+ expected.
+
+- Symlink targets may be read and followed, but they will be presented
+ in encrypted form, similar to filenames in directories. Hence, they
+ are unlikely to point to anywhere useful.
+
+Without the key, regular files cannot be opened or truncated.
+Attempts to do so will fail with ENOKEY. This implies that any
+regular file operations that require a file descriptor, such as
+read(), write(), mmap(), fallocate(), and ioctl(), are also forbidden.
+
+Also without the key, files of any type (including directories) cannot
+be created or linked into an encrypted directory, nor can a name in an
+encrypted directory be the source or target of a rename, nor can an
+O_TMPFILE temporary file be created in an encrypted directory. All
+such operations will fail with ENOKEY.
+
+It is not currently possible to backup and restore encrypted files
+without the encryption key. This would require special APIs which
+have not yet been implemented.
+
+Encryption policy enforcement
+=============================
+
+After an encryption policy has been set on a directory, all regular
+files, directories, and symbolic links created in that directory
+(recursively) will inherit that encryption policy. Special files ---
+that is, named pipes, device nodes, and UNIX domain sockets --- will
+not be encrypted.
+
+Except for those special files, it is forbidden to have unencrypted
+files, or files encrypted with a different encryption policy, in an
+encrypted directory tree. Attempts to link or rename such a file into
+an encrypted directory will fail with EPERM. This is also enforced
+during ->lookup() to provide limited protection against offline
+attacks that try to disable or downgrade encryption in known locations
+where applications may later write sensitive data. It is recommended
+that systems implementing a form of "verified boot" take advantage of
+this by validating all top-level encryption policies prior to access.
+
+Implementation details
+======================
+
+Encryption context
+------------------
+
+An encryption policy is represented on-disk by a :c:type:`struct
+fscrypt_context`. It is up to individual filesystems to decide where
+to store it, but normally it would be stored in a hidden extended
+attribute. It should *not* be exposed by the xattr-related system
+calls such as getxattr() and setxattr() because of the special
+semantics of the encryption xattr. (In particular, there would be
+much confusion if an encryption policy were to be added to or removed
+from anything other than an empty directory.) The struct is defined
+as follows::
+
+ #define FS_KEY_DESCRIPTOR_SIZE 8
+ #define FS_KEY_DERIVATION_NONCE_SIZE 16
+
+ struct fscrypt_context {
+ u8 format;
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+ };
+
+Note that :c:type:`struct fscrypt_context` contains the same
+information as :c:type:`struct fscrypt_policy` (see `Setting an
+encryption policy`_), except that :c:type:`struct fscrypt_context`
+also contains a nonce. The nonce is randomly generated by the kernel
+and is used to derive the inode's encryption key as described in
+`Per-file keys`_.
+
+Data path changes
+-----------------
+
+For the read path (->readpage()) of regular files, filesystems can
+read the ciphertext into the page cache and decrypt it in-place. The
+page lock must be held until decryption has finished, to prevent the
+page from becoming visible to userspace prematurely.
+
+For the write path (->writepage()) of regular files, filesystems
+cannot encrypt data in-place in the page cache, since the cached
+plaintext must be preserved. Instead, filesystems must encrypt into a
+temporary buffer or "bounce page", then write out the temporary
+buffer. Some filesystems, such as UBIFS, already use temporary
+buffers regardless of encryption. Other filesystems, such as ext4 and
+F2FS, have to allocate bounce pages specially for encryption.
+
+Filename hashing and encoding
+-----------------------------
+
+Modern filesystems accelerate directory lookups by using indexed
+directories. An indexed directory is organized as a tree keyed by
+filename hashes. When a ->lookup() is requested, the filesystem
+normally hashes the filename being looked up so that it can quickly
+find the corresponding directory entry, if any.
+
+With encryption, lookups must be supported and efficient both with and
+without the encryption key. Clearly, it would not work to hash the
+plaintext filenames, since the plaintext filenames are unavailable
+without the key. (Hashing the plaintext filenames would also make it
+impossible for the filesystem's fsck tool to optimize encrypted
+directories.) Instead, filesystems hash the ciphertext filenames,
+i.e. the bytes actually stored on-disk in the directory entries. When
+asked to do a ->lookup() with the key, the filesystem just encrypts
+the user-supplied name to get the ciphertext.
+
+Lookups without the key are more complicated. The raw ciphertext may
+contain the ``\0`` and ``/`` characters, which are illegal in
+filenames. Therefore, readdir() must base64-encode the ciphertext for
+presentation. For most filenames, this works fine; on ->lookup(), the
+filesystem just base64-decodes the user-supplied name to get back to
+the raw ciphertext.
+
+However, for very long filenames, base64 encoding would cause the
+filename length to exceed NAME_MAX. To prevent this, readdir()
+actually presents long filenames in an abbreviated form which encodes
+a strong "hash" of the ciphertext filename, along with the optional
+filesystem-specific hash(es) needed for directory lookups. This
+allows the filesystem to still, with a high degree of confidence, map
+the filename given in ->lookup() back to a particular directory entry
+that was previously listed by readdir(). See :c:type:`struct
+fscrypt_digested_name` in the source for more details.
+
+Note that the precise way that filenames are presented to userspace
+without the key is subject to change in the future. It is only meant
+as a way to temporarily present valid filenames so that commands like
+``rm -r`` work as expected on encrypted directories.
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 256e10eedba4..53b89d0edc15 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -315,3 +315,14 @@ exported for use by modules.
:internal:
.. kernel-doc:: fs/pipe.c
+
+Encryption API
+==============
+
+A library which filesystems can hook into to support transparent
+encryption of files and directories.
+
+.. toctree::
+ :maxdepth: 2
+
+ fscrypt
diff --git a/Documentation/filesystems/path-lookup.md b/Documentation/filesystems/path-lookup.md
index 1b39e084a2b2..1933ef734e63 100644
--- a/Documentation/filesystems/path-lookup.md
+++ b/Documentation/filesystems/path-lookup.md
@@ -826,9 +826,9 @@ If the filesystem may need to revalidate dcache entries, then
*is* passed the dentry but does not have access to the `inode` or the
`seq` number from the `nameidata`, so it needs to be extra careful
when accessing fields in the dentry. This "extra care" typically
-involves using `ACCESS_ONCE()` or the newer [`READ_ONCE()`] to access
-fields, and verifying the result is not NULL before using it. This
-pattern can be see in `nfs_lookup_revalidate()`.
+involves using [`READ_ONCE()`] to access fields, and verifying the
+result is not NULL before using it. This pattern can be seen in
+`nfs_lookup_revalidate()`.
A pair of patterns
------------------
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index adba21b5ada7..ec571b9bb18a 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -250,7 +250,6 @@ Table 1-2: Contents of the status files (as of 4.8)
VmExe size of text segment
VmLib size of shared library code
VmPTE size of page table entries
- VmPMD size of second level page tables
VmSwap amount of swap used by anonymous private data
(shmem swap usage is not included)
HugetlbPages size of hugetlb memory portions
diff --git a/Documentation/filesystems/udf.txt b/Documentation/filesystems/udf.txt
index 902b95d0ee51..d3d0e3218f86 100644
--- a/Documentation/filesystems/udf.txt
+++ b/Documentation/filesystems/udf.txt
@@ -1,11 +1,9 @@
*
* Documentation/filesystems/udf.txt
*
-UDF Filesystem version 0.9.8.1
If you encounter problems with reading UDF discs using this driver,
-please report them to linux_udf@hpesjro.fc.hp.com, which is the
-developer's list.
+please report them according to MAINTAINERS file.
Write support requires a block driver which supports writing. Currently
dvd+rw drives and media support true random sector writes, and so a udf
@@ -73,10 +71,8 @@ The following expect a offset from the partition root.
For the latest version and toolset see:
- http://linux-udf.sourceforge.net/
+ https://github.com/pali/udftools
Documentation on UDF and ECMA 167 is available FREE from:
http://www.osta.org/
http://www.ecma-international.org/
-
-Ben Fennema <bfennema@falcon.csc.calpoly.edu>
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 912568baabb9..63e1bd1d88e3 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -10,14 +10,30 @@ Guidelines for GPIOs consumers
==============================
Drivers that can't work without standard GPIO calls should have Kconfig entries
-that depend on GPIOLIB. The functions that allow a driver to obtain and use
-GPIOs are available by including the following file:
+that depend on GPIOLIB or select GPIOLIB. The functions that allow a driver to
+obtain and use GPIOs are available by including the following file:
#include <linux/gpio/consumer.h>
+There are static inline stubs for all functions in the header file in the case
+where GPIOLIB is disabled. When these stubs are called they will emit
+warnings. These stubs are used for two use cases:
+
+- Simple compile coverage with e.g. COMPILE_TEST - it does not matter that
+ the current platform does not enable or select GPIOLIB because we are not
+ going to execute the system anyway.
+
+- Truly optional GPIOLIB support - where the driver does not really make use
+ of the GPIOs on certain compile-time configurations for certain systems, but
+ will use it under other compile-time configurations. In this case the
+ consumer must make sure not to call into these functions, or the user will
+ be met with console warnings that may be perceived as intimidating.
+
All the functions that work with the descriptor-based GPIO interface are
prefixed with gpiod_. The gpio_ prefix is used for the legacy interface. No
-other function in the kernel should use these prefixes.
+other function in the kernel should use these prefixes. The use of the legacy
+functions is strongly discouraged, new code should use <linux/gpio/consumer.h>
+and descriptors exclusively.
Obtaining and Disposing GPIOs
@@ -279,9 +295,22 @@ as possible, especially by drivers which should not care about the actual
physical line level and worry about the logical value instead.
-Set multiple GPIO outputs with a single function call
------------------------------------------------------
-The following functions set the output values of an array of GPIOs:
+Access multiple GPIOs with a single function call
+-------------------------------------------------
+The following functions get or set the values of an array of GPIOs:
+
+ int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
+ int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
+ int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
+ int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -296,34 +325,40 @@ The following functions set the output values of an array of GPIOs:
struct gpio_desc **desc_array,
int *value_array)
-The array can be an arbitrary set of GPIOs. The functions will try to set
+The array can be an arbitrary set of GPIOs. The functions will try to access
GPIOs belonging to the same bank or chip simultaneously if supported by the
corresponding chip driver. In that case a significantly improved performance
-can be expected. If simultaneous setting is not possible the GPIOs will be set
-sequentially.
+can be expected. If simultaneous access is not possible the GPIOs will be
+accessed sequentially.
-The gpiod_set_array() functions take three arguments:
+The functions take three arguments:
* array_size - the number of array elements
* desc_array - an array of GPIO descriptors
- * value_array - an array of values to assign to the GPIOs
+ * value_array - an array to store the GPIOs' values (get) or
+ an array of values to assign to the GPIOs (set)
The descriptor array can be obtained using the gpiod_get_array() function
or one of its variants. If the group of descriptors returned by that function
-matches the desired group of GPIOs, those GPIOs can be set by simply using
+matches the desired group of GPIOs, those GPIOs can be accessed by simply using
the struct gpio_descs returned by gpiod_get_array():
struct gpio_descs *my_gpio_descs = gpiod_get_array(...);
gpiod_set_array_value(my_gpio_descs->ndescs, my_gpio_descs->desc,
my_gpio_values);
-It is also possible to set a completely arbitrary array of descriptors. The
+It is also possible to access a completely arbitrary array of descriptors. The
descriptors may be obtained using any combination of gpiod_get() and
gpiod_get_array(). Afterwards the array of descriptors has to be setup
-manually before it can be used with gpiod_set_array().
+manually before it can be passed to one of the above functions.
Note that for optimal performance GPIOs belonging to the same chip should be
contiguous within the array of descriptors.
+The return value of gpiod_get_array_value() and its variants is 0 on success
+or negative on error. Note the difference to gpiod_get_value(), which returns
+0 or 1 on success to convey the GPIO value. With the array functions, the GPIO
+values are stored in value_array rather than passed back as return value.
+
GPIOs mapped to IRQs
--------------------
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index fc1d2f83564d..d8de1c7de85a 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -254,7 +254,7 @@ GPIO irqchips usually fall in one of two categories:
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
unsigned long wa_lock_flags;
raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
- generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, bit));
+ generic_handle_irq(irq_find_mapping(bank->chip.irq.domain, bit));
raw_spin_unlock_irqrestore(&bank->wa_lock, wa_lock_flags);
* GENERIC CHAINED GPIO irqchips: these are the same as "CHAINED GPIO irqchips",
@@ -313,8 +313,8 @@ symbol:
mark all the child IRQs as having the other IRQ as parent.
If there is a need to exclude certain GPIOs from the IRQ domain, you can
-set .irq_need_valid_mask of the gpiochip before gpiochip_add_data() is
-called. This allocates an .irq_valid_mask with as many bits set as there
+set .irq.need_valid_mask of the gpiochip before gpiochip_add_data() is
+called. This allocates an .irq.valid_mask with as many bits set as there
are GPIOs in the chip. Drivers can exclude GPIOs by clearing bits from this
mask. The mask must be filled in before gpiochip_irqchip_add() or
gpiochip_irqchip_add_nested() is called.
diff --git a/Documentation/gpio/gpio-legacy.txt b/Documentation/gpio/gpio-legacy.txt
index 5eacc147ea87..8356d0e78f67 100644
--- a/Documentation/gpio/gpio-legacy.txt
+++ b/Documentation/gpio/gpio-legacy.txt
@@ -273,8 +273,8 @@ easily, gating off unused clocks.
For GPIOs that use pins known to the pinctrl subsystem, that subsystem should
be informed of their use; a gpiolib driver's .request() operation may call
-pinctrl_request_gpio(), and a gpiolib driver's .free() operation may call
-pinctrl_free_gpio(). The pinctrl subsystem allows a pinctrl_request_gpio()
+pinctrl_gpio_request(), and a gpiolib driver's .free() operation may call
+pinctrl_gpio_free(). The pinctrl subsystem allows a pinctrl_gpio_request()
to succeed concurrently with a pin or pingroup being "owned" by a device for
pin multiplexing.
@@ -448,8 +448,8 @@ together with an optional gpio feature. We have already covered the
case where e.g. a GPIO controller need to reserve a pin or set the
direction of a pin by calling any of:
-pinctrl_request_gpio()
-pinctrl_free_gpio()
+pinctrl_gpio_request()
+pinctrl_gpio_free()
pinctrl_gpio_direction_input()
pinctrl_gpio_direction_output()
@@ -466,7 +466,7 @@ gpio (under gpiolib) is still maintained by gpio drivers. It may happen
that different pin ranges in a SoC is managed by different gpio drivers.
This makes it logical to let gpio drivers announce their pin ranges to
-the pin ctrl subsystem before it will call 'pinctrl_request_gpio' in order
+the pin ctrl subsystem before it will call 'pinctrl_gpio_request' in order
to request the corresponding pin to be prepared by the pinctrl subsystem
before any gpio usage.
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 679373b4a03f..a2214cc1f821 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -168,6 +168,61 @@ IOCTL Support on Device Nodes
.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
:doc: driver specific ioctls
+Recommended IOCTL Return Values
+-------------------------------
+
+In theory a driver's IOCTL callback is only allowed to return very few error
+codes. In practice it's good to abuse a few more. This section documents common
+practice within the DRM subsystem:
+
+ENOENT:
+ Strictly this should only be used when a file doesn't exist e.g. when
+ calling the open() syscall. We reuse that to signal any kind of object
+ lookup failure, e.g. for unknown GEM buffer object handles, unknown KMS
+ object handles and similar cases.
+
+ENOSPC:
+ Some drivers use this to differentiate "out of kernel memory" from "out
+ of VRAM". Sometimes also applies to other limited gpu resources used for
+ rendering (e.g. when you have a special limited compression buffer).
+ Sometimes resource allocation/reservation issues in command submission
+ IOCTLs are also signalled through EDEADLK.
+
+ Simply running out of kernel/system memory is signalled through ENOMEM.
+
+EPERM/EACCESS:
+ Returned for an operation that is valid, but needs more privileges.
+ E.g. root-only or much more common, DRM master-only operations return
+ this when when called by unpriviledged clients. There's no clear
+ difference between EACCESS and EPERM.
+
+ENODEV:
+ Feature (like PRIME, modesetting, GEM) is not supported by the driver.
+
+ENXIO:
+ Remote failure, either a hardware transaction (like i2c), but also used
+ when the exporting driver of a shared dma-buf or fence doesn't support a
+ feature needed.
+
+EINTR:
+ DRM drivers assume that userspace restarts all IOCTLs. Any DRM IOCTL can
+ return EINTR and in such a case should be restarted with the IOCTL
+ parameters left unchanged.
+
+EIO:
+ The GPU died and couldn't be resurrected through a reset. Modesetting
+ hardware failures are signalled through the "link status" connector
+ property.
+
+EINVAL:
+ Catch-all for anything that is an invalid argument combination which
+ cannot work.
+
+IOCTL also use other error codes like ETIME, EFAULT, EBUSY, ENOTTY but their
+usage is in line with the common meanings. The above list tries to just document
+DRM specific patterns. Note that ENOTTY has the slightly unintuitive meaning of
+"this IOCTL does not exist", and is used exactly as such in DRM.
+
.. kernel-doc:: include/drm/drm_ioctl.h
:internal:
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index 35d673bf9b56..c36586dad29d 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -15,6 +15,7 @@ Linux GPU Driver Developer's Guide
pl111
tegra
tinydrm
+ tve200
vc4
vga-switcheroo
vgaarbiter
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 22af55d06ab8..96f8ec7dbe4e 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -75,17 +75,6 @@ helpers.
Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
-Implement deferred fbdev setup in the helper
---------------------------------------------
-
-Many (especially embedded drivers) want to delay fbdev setup until there's a
-real screen plugged in. This is to avoid the dreaded fallback to the low-res
-fbdev default. Many drivers have a hacked-up (and often broken) version of this,
-better to do it once in the shared helpers. Thierry has a patch series, but that
-one needs to be rebased and final polish applied.
-
-Contact: Thierry Reding, Daniel Vetter, driver maintainers
-
Convert early atomic drivers to async commit helpers
----------------------------------------------------
@@ -138,6 +127,8 @@ interfaces to fix these issues:
the acquire context explicitly on stack and then also pass it down into
drivers explicitly so that the legacy-on-atomic functions can use them.
+ Except for some driver code this is done.
+
* A bunch of the vtable hooks are now in the wrong place: DRM has a split
between core vfunc tables (named ``drm_foo_funcs``), which are used to
implement the userspace ABI. And then there's the optional hooks for the
@@ -151,6 +142,8 @@ interfaces to fix these issues:
connector at runtime. That's almost all of them, and would allow us to get
rid of a lot of ``best_encoder`` boilerplate in drivers.
+ This was almost done, but new drivers added a few more cases again.
+
Contact: Daniel Vetter
Get rid of dev->struct_mutex from GEM drivers
@@ -177,14 +170,19 @@ following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and
Contact: Daniel Vetter, respective driver maintainers
-Core refactorings
-=================
+Convert instances of dev_info/dev_err/dev_warn to their DRM_DEV_* equivalent
+----------------------------------------------------------------------------
-Use new IDR deletion interface to clean up drm_gem_handle_delete()
-------------------------------------------------------------------
+For drivers which could have multiple instances, it is necessary to
+differentiate between which is which in the logs. Since DRM_INFO/WARN/ERROR
+don't do this, drivers used dev_info/warn/err to make this differentiation. We
+now have DRM_DEV_* variants of the drm print macros, so we can start to convert
+those drivers back to using drm-formwatted specific log messages.
-See the "This is gross" comment -- apparently the IDR system now can return an
-error code instead of oopsing.
+Contact: Sean Paul, Maintainer of the driver you plan to convert
+
+Core refactorings
+=================
Clean up the DRM header mess
----------------------------
@@ -306,6 +304,18 @@ There's a bunch of issues with it:
Contact: Daniel Vetter
+KMS cleanups
+------------
+
+Some of these date from the very introduction of KMS in 2008 ...
+
+- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should
+ be renamed to drm_mode_config.object_idr.
+
+- drm_display_mode doesn't need to be derived from drm_mode_object. That's
+ leftovers from older (never merged into upstream) KMS designs where modes
+ where set using their ID, including support to add/remove modes.
+
Better Testing
==============
@@ -353,7 +363,16 @@ those drivers as simple as possible, so lots of room for refactoring:
- backlight helpers, probably best to put them into a new drm_backlight.c.
This is because drivers/video is de-facto unmaintained. We could also
move drivers/video/backlight to drivers/gpu/backlight and take it all
- over within drm-misc, but that's more work.
+ over within drm-misc, but that's more work. Backlight helpers require a fair
+ bit of reworking and refactoring. A simple example is the enabling of a backlight.
+ Tinydrm has helpers for this. It would be good if other drivers can also use the
+ helper. However, there are various cases we need to consider i.e different
+ drivers seem to have different ways of enabling/disabling a backlight.
+ We also need to consider the backlight drivers (like gpio_backlight). The situation
+ is further complicated by the fact that the backlight is tied to fbdev
+ via fb_notifier_callback() which has complicated logic. For further details, refer
+ to the following discussion thread:
+ https://groups.google.com/forum/#!topic/outreachy-kernel/8rBe30lwtdA
- spi helpers, probably best put into spi core/helper code. Thierry said
the spi maintainer is fast&reactive, so shouldn't be a big issue.
diff --git a/Documentation/gpu/tve200.rst b/Documentation/gpu/tve200.rst
new file mode 100644
index 000000000000..69b17b324e12
--- /dev/null
+++ b/Documentation/gpu/tve200.rst
@@ -0,0 +1,6 @@
+==================================
+ drm/tve200 Faraday TV Encoder 200
+==================================
+
+.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
+ :doc: Faraday TV Encoder 200
diff --git a/Documentation/hid/hiddev.txt b/Documentation/hid/hiddev.txt
index 6e8c9f1d2f22..638448707aa2 100644
--- a/Documentation/hid/hiddev.txt
+++ b/Documentation/hid/hiddev.txt
@@ -12,7 +12,7 @@ To support these disparate requirements, the Linux USB system provides
HID events to two separate interfaces:
* the input subsystem, which converts HID events into normal input
device interfaces (such as keyboard, mouse and joystick) and a
-normalised event interface - see Documentation/input/input.txt
+normalised event interface - see Documentation/input/input.rst
* the hiddev interface, which provides fairly raw HID events
The data flow for a HID event produced by a device is something like
diff --git a/Documentation/hwmon/max31785 b/Documentation/hwmon/max31785
new file mode 100644
index 000000000000..45fb6093dec2
--- /dev/null
+++ b/Documentation/hwmon/max31785
@@ -0,0 +1,51 @@
+Kernel driver max31785
+======================
+
+Supported chips:
+ * Maxim MAX31785, MAX31785A
+ Prefix: 'max31785' or 'max31785a'
+ Addresses scanned: -
+ Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31785.pdf
+
+Author: Andrew Jeffery <andrew@aj.id.au>
+
+Description
+-----------
+
+The Maxim MAX31785 is a PMBus device providing closed-loop, multi-channel fan
+management with temperature and remote voltage sensing. Various fan control
+features are provided, including PWM frequency control, temperature hysteresis,
+dual tachometer measurements, and fan health monitoring.
+
+For dual rotor fan configuration, the MAX31785 exposes the slowest rotor of the
+two in the fan[1-4]_input attributes.
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+Sysfs attributes
+----------------
+
+fan[1-4]_alarm Fan alarm.
+fan[1-4]_fault Fan fault.
+fan[1-4]_input Fan RPM.
+
+in[1-6]_crit Critical maximum output voltage
+in[1-6]_crit_alarm Output voltage critical high alarm
+in[1-6]_input Measured output voltage
+in[1-6]_label "vout[18-23]"
+in[1-6]_lcrit Critical minimum output voltage
+in[1-6]_lcrit_alarm Output voltage critical low alarm
+in[1-6]_max Maximum output voltage
+in[1-6]_max_alarm Output voltage high alarm
+in[1-6]_min Minimum output voltage
+in[1-6]_min_alarm Output voltage low alarm
+
+temp[1-11]_crit Critical high temperature
+temp[1-11]_crit_alarm Chip temperature critical high alarm
+temp[1-11]_input Measured temperature
+temp[1-11]_max Maximum temperature
+temp[1-11]_max_alarm Chip temperature high alarm
diff --git a/Documentation/hwmon/sht15 b/Documentation/hwmon/sht15
index 778987d1856f..5e3207c3b177 100644
--- a/Documentation/hwmon/sht15
+++ b/Documentation/hwmon/sht15
@@ -42,8 +42,7 @@ chip. These coefficients are used to internally calibrate the signals from the
sensors. Disabling the reload of those coefficients allows saving 10ms for each
measurement and decrease power consumption, while losing on precision.
-Some options may be set directly in the sht15_platform_data structure
-or via sysfs attributes.
+Some options may be set via sysfs attributes.
Notes:
* The regulator supply name is set to "vcc".
diff --git a/Documentation/input/devices/xpad.rst b/Documentation/input/devices/xpad.rst
index 5a709ab77c8d..b8bd65962dd8 100644
--- a/Documentation/input/devices/xpad.rst
+++ b/Documentation/input/devices/xpad.rst
@@ -230,4 +230,5 @@ Historic Edits
2005-03-19 - Dominic Cerquetti <binary1230@yahoo.com>
- added stuff for dance pads, new d-pad->axes mappings
-Later changes may be viewed with 'git log Documentation/input/xpad.txt'
+Later changes may be viewed with
+'git log --follow Documentation/input/devices/xpad.rst'
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index f6f80380dff2..71e9feefb63c 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1158,7 +1158,6 @@ When kbuild executes, the following steps are followed (roughly):
Example:
targets += $(dtb-y)
- clean-files += *.dtb
DTC_FLAGS ?= -p 1024
--- 6.8 Custom kbuild commands
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 2335715bf471..22208bf2386d 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -8,7 +8,7 @@ Kernel Probes (Kprobes)
.. CONTENTS
- 1. Concepts: Kprobes, Jprobes, Return Probes
+ 1. Concepts: Kprobes, and Return Probes
2. Architectures Supported
3. Configuring Kprobes
4. API Reference
@@ -16,12 +16,12 @@ Kernel Probes (Kprobes)
6. Probe Overhead
7. TODO
8. Kprobes Example
- 9. Jprobes Example
- 10. Kretprobes Example
+ 9. Kretprobes Example
+ 10. Deprecated Features
Appendix A: The kprobes debugfs interface
Appendix B: The kprobes sysctl interface
-Concepts: Kprobes, Jprobes, Return Probes
+Concepts: Kprobes and Return Probes
=========================================
Kprobes enables you to dynamically break into any kernel routine and
@@ -32,12 +32,10 @@ routine to be invoked when the breakpoint is hit.
.. [1] some parts of the kernel code can not be trapped, see
:ref:`kprobes_blacklist`)
-There are currently three types of probes: kprobes, jprobes, and
-kretprobes (also called return probes). A kprobe can be inserted
-on virtually any instruction in the kernel. A jprobe is inserted at
-the entry to a kernel function, and provides convenient access to the
-function's arguments. A return probe fires when a specified function
-returns.
+There are currently two types of probes: kprobes, and kretprobes
+(also called return probes). A kprobe can be inserted on virtually
+any instruction in the kernel. A return probe fires when a specified
+function returns.
In the typical case, Kprobes-based instrumentation is packaged as
a kernel module. The module's init function installs ("registers")
@@ -82,45 +80,6 @@ After the instruction is single-stepped, Kprobes executes the
"post_handler," if any, that is associated with the kprobe.
Execution then continues with the instruction following the probepoint.
-How Does a Jprobe Work?
------------------------
-
-A jprobe is implemented using a kprobe that is placed on a function's
-entry point. It employs a simple mirroring principle to allow
-seamless access to the probed function's arguments. The jprobe
-handler routine should have the same signature (arg list and return
-type) as the function being probed, and must always end by calling
-the Kprobes function jprobe_return().
-
-Here's how it works. When the probe is hit, Kprobes makes a copy of
-the saved registers and a generous portion of the stack (see below).
-Kprobes then points the saved instruction pointer at the jprobe's
-handler routine, and returns from the trap. As a result, control
-passes to the handler, which is presented with the same register and
-stack contents as the probed function. When it is done, the handler
-calls jprobe_return(), which traps again to restore the original stack
-contents and processor state and switch to the probed function.
-
-By convention, the callee owns its arguments, so gcc may produce code
-that unexpectedly modifies that portion of the stack. This is why
-Kprobes saves a copy of the stack and restores it after the jprobe
-handler has run. Up to MAX_STACK_SIZE bytes are copied -- e.g.,
-64 bytes on i386.
-
-Note that the probed function's args may be passed on the stack
-or in registers. The jprobe will work in either case, so long as the
-handler's prototype matches that of the probed function.
-
-Note that in some architectures (e.g.: arm64 and sparc64) the stack
-copy is not done, as the actual location of stacked parameters may be
-outside of a reasonable MAX_STACK_SIZE value and because that location
-cannot be determined by the jprobes code. In this case the jprobes
-user must be careful to make certain the calling signature of the
-function does not cause parameters to be passed on the stack (e.g.:
-more than eight function arguments, an argument of more than sixteen
-bytes, or more than 64 bytes of argument data, depending on
-architecture).
-
Return Probes
-------------
@@ -245,8 +204,7 @@ Pre-optimization
After preparing the detour buffer, Kprobes verifies that none of the
following situations exist:
-- The probe has either a break_handler (i.e., it's a jprobe) or a
- post_handler.
+- The probe has a post_handler.
- Other instructions in the optimized region are probed.
- The probe is disabled.
@@ -331,7 +289,7 @@ rejects registering it, if the given address is in the blacklist.
Architectures Supported
=======================
-Kprobes, jprobes, and return probes are implemented on the following
+Kprobes and return probes are implemented on the following
architectures:
- i386 (Supports jump optimization)
@@ -446,27 +404,6 @@ architecture-specific trap number associated with the fault (e.g.,
on i386, 13 for a general protection fault or 14 for a page fault).
Returns 1 if it successfully handled the exception.
-register_jprobe
----------------
-
-::
-
- #include <linux/kprobes.h>
- int register_jprobe(struct jprobe *jp)
-
-Sets a breakpoint at the address jp->kp.addr, which must be the address
-of the first instruction of a function. When the breakpoint is hit,
-Kprobes runs the handler whose address is jp->entry.
-
-The handler should have the same arg list and return type as the probed
-function; and just before it returns, it must call jprobe_return().
-(The handler never actually returns, since jprobe_return() returns
-control to Kprobes.) If the probed function is declared asmlinkage
-or anything else that affects how args are passed, the handler's
-declaration must match.
-
-register_jprobe() returns 0 on success, or a negative errno otherwise.
-
register_kretprobe
------------------
@@ -513,7 +450,6 @@ unregister_*probe
#include <linux/kprobes.h>
void unregister_kprobe(struct kprobe *kp);
- void unregister_jprobe(struct jprobe *jp);
void unregister_kretprobe(struct kretprobe *rp);
Removes the specified probe. The unregister function can be called
@@ -532,7 +468,6 @@ register_*probes
#include <linux/kprobes.h>
int register_kprobes(struct kprobe **kps, int num);
int register_kretprobes(struct kretprobe **rps, int num);
- int register_jprobes(struct jprobe **jps, int num);
Registers each of the num probes in the specified array. If any
error occurs during registration, all probes in the array, up to
@@ -555,7 +490,6 @@ unregister_*probes
#include <linux/kprobes.h>
void unregister_kprobes(struct kprobe **kps, int num);
void unregister_kretprobes(struct kretprobe **rps, int num);
- void unregister_jprobes(struct jprobe **jps, int num);
Removes each of the num probes in the specified array at once.
@@ -574,7 +508,6 @@ disable_*probe
#include <linux/kprobes.h>
int disable_kprobe(struct kprobe *kp);
int disable_kretprobe(struct kretprobe *rp);
- int disable_jprobe(struct jprobe *jp);
Temporarily disables the specified ``*probe``. You can enable it again by using
enable_*probe(). You must specify the probe which has been registered.
@@ -587,7 +520,6 @@ enable_*probe
#include <linux/kprobes.h>
int enable_kprobe(struct kprobe *kp);
int enable_kretprobe(struct kretprobe *rp);
- int enable_jprobe(struct jprobe *jp);
Enables ``*probe`` which has been disabled by disable_*probe(). You must specify
the probe which has been registered.
@@ -595,12 +527,10 @@ the probe which has been registered.
Kprobes Features and Limitations
================================
-Kprobes allows multiple probes at the same address. Currently,
-however, there cannot be multiple jprobes on the same function at
-the same time. Also, a probepoint for which there is a jprobe or
-a post_handler cannot be optimized. So if you install a jprobe,
-or a kprobe with a post_handler, at an optimized probepoint, the
-probepoint will be unoptimized automatically.
+Kprobes allows multiple probes at the same address. Also,
+a probepoint for which there is a post_handler cannot be optimized.
+So if you install a kprobe with a post_handler, at an optimized
+probepoint, the probepoint will be unoptimized automatically.
In general, you can install a probe anywhere in the kernel.
In particular, you can probe interrupt handlers. Known exceptions
@@ -662,7 +592,7 @@ We're unaware of other specific cases where this could be a problem.
If, upon entry to or exit from a function, the CPU is running on
a stack other than that of the current task, registering a return
probe on that function may produce undesirable results. For this
-reason, Kprobes doesn't support return probes (or kprobes or jprobes)
+reason, Kprobes doesn't support return probes (or kprobes)
on the x86_64 version of __switch_to(); the registration functions
return -EINVAL.
@@ -706,24 +636,24 @@ Probe Overhead
On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0
microseconds to process. Specifically, a benchmark that hits the same
probepoint repeatedly, firing a simple handler each time, reports 1-2
-million hits per second, depending on the architecture. A jprobe or
-return-probe hit typically takes 50-75% longer than a kprobe hit.
+million hits per second, depending on the architecture. A return-probe
+hit typically takes 50-75% longer than a kprobe hit.
When you have a return probe set on a function, adding a kprobe at
the entry to that function adds essentially no overhead.
Here are sample overhead figures (in usec) for different architectures::
- k = kprobe; j = jprobe; r = return probe; kr = kprobe + return probe
- on same function; jr = jprobe + return probe on same function::
+ k = kprobe; r = return probe; kr = kprobe + return probe
+ on same function
i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips
- k = 0.57 usec; j = 1.00; r = 0.92; kr = 0.99; jr = 1.40
+ k = 0.57 usec; r = 0.92; kr = 0.99
x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips
- k = 0.49 usec; j = 0.76; r = 0.80; kr = 0.82; jr = 1.07
+ k = 0.49 usec; r = 0.80; kr = 0.82
ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU)
- k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99
+ k = 0.77 usec; r = 1.26; kr = 1.45
Optimized Probe Overhead
------------------------
@@ -755,11 +685,6 @@ Kprobes Example
See samples/kprobes/kprobe_example.c
-Jprobes Example
-===============
-
-See samples/kprobes/jprobe_example.c
-
Kretprobes Example
==================
@@ -772,6 +697,37 @@ For additional information on Kprobes, refer to the following URLs:
- http://www-users.cs.umn.edu/~boutcher/kprobes/
- http://www.linuxsymposium.org/2006/linuxsymposium_procv2.pdf (pages 101-115)
+Deprecated Features
+===================
+
+Jprobes is now a deprecated feature. People who are depending on it should
+migrate to other tracing features or use older kernels. Please consider to
+migrate your tool to one of the following options:
+
+- Use trace-event to trace target function with arguments.
+
+ trace-event is a low-overhead (and almost no visible overhead if it
+ is off) statically defined event interface. You can define new events
+ and trace it via ftrace or any other tracing tools.
+
+ See the following urls:
+
+ - https://lwn.net/Articles/379903/
+ - https://lwn.net/Articles/381064/
+ - https://lwn.net/Articles/383362/
+
+- Use ftrace dynamic events (kprobe event) with perf-probe.
+
+ If you build your kernel with debug info (CONFIG_DEBUG_INFO=y), you can
+ find which register/stack is assigned to which local variable or arguments
+ by using perf-probe and set up new event to trace it.
+
+ See following documents:
+
+ - Documentation/trace/kprobetrace.txt
+ - Documentation/trace/events.txt
+ - tools/perf/Documentation/perf-probe.txt
+
The kprobes debugfs interface
=============================
@@ -783,14 +739,13 @@ under the /sys/kernel/debug/kprobes/ directory (assuming debugfs is mounted at /
/sys/kernel/debug/kprobes/list: Lists all registered probes on the system::
c015d71a k vfs_read+0x0
- c011a316 j do_fork+0x0
c03dedc5 r tcp_v4_rcv+0x0
The first column provides the kernel address where the probe is inserted.
-The second column identifies the type of probe (k - kprobe, r - kretprobe
-and j - jprobe), while the third column specifies the symbol+offset of
-the probe. If the probed function belongs to a module, the module name
-is also specified. Following columns show probe status. If the probe is on
+The second column identifies the type of probe (k - kprobe and r - kretprobe)
+while the third column specifies the symbol+offset of the probe.
+If the probed function belongs to a module, the module name is also
+specified. Following columns show probe status. If the probe is on
a virtual address that is no longer valid (module init sections, module
virtual addresses that correspond to modules that've been unloaded),
such probes are marked with [GONE]. If the probe is temporarily disabled,
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 19276f5d195c..1c707fc9b141 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -184,7 +184,7 @@ is done when dirty_ratio is reached.
DO_CPU:
Enable CPU frequency scaling when in laptop mode. (Requires CPUFreq to be setup.
-See Documentation/cpu-freq/user-guide.txt for more info. Disabled by default.)
+See Documentation/admin-guide/pm/cpufreq.rst for more info. Disabled by default.)
CPU_MAXFREQ:
@@ -287,7 +287,7 @@ MINIMUM_BATTERY_MINUTES=10
# Should the maximum CPU frequency be adjusted down while on battery?
# Requires CPUFreq to be setup.
-# See Documentation/cpu-freq/user-guide.txt for more info
+# See Documentation/admin-guide/pm/cpufreq.rst for more info
#DO_CPU=0
# When on battery what is the maximum CPU speed that the system should
@@ -378,7 +378,7 @@ BATT_HD=${BATT_HD:-'4'}
DIRTY_RATIO=${DIRTY_RATIO:-'40'}
# cpu frequency scaling
-# See Documentation/cpu-freq/user-guide.txt for more info
+# See Documentation/admin-guide/pm/cpufreq.rst for more info
DO_CPU=${CPU_MANAGE:-'0'}
CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index b4ef1f34e25f..ae626b29a740 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -4,6 +4,10 @@ leds-blinkm.txt
- Driver for BlinkM LED-devices.
leds-class.txt
- documents LED handling under Linux.
+leds-class-flash.txt
+ - documents flash LED handling under Linux.
+leds-lm3556.txt
+ - notes on how to use the leds-lm3556 driver.
leds-lp3944.txt
- notes on how to use the leds-lp3944 driver.
leds-lp5521.txt
@@ -16,7 +20,13 @@ leds-lp55xx.txt
- description about lp55xx common driver.
leds-lm3556.txt
- notes on how to use the leds-lm3556 driver.
+leds-mlxcpld.txt
+ - notes on how to use the leds-mlxcpld driver.
ledtrig-oneshot.txt
- One-shot LED trigger for both sporadic and dense events.
ledtrig-transient.txt
- LED Transient Trigger, one shot timer activation.
+ledtrig-usbport.txt
+ - notes on how to use the drivers/usb/core/ledtrig-usbport.c trigger.
+uleds.txt
+ - notes on how to use the uleds driver.
diff --git a/Documentation/livepatch/callbacks.txt b/Documentation/livepatch/callbacks.txt
new file mode 100644
index 000000000000..c9776f48e458
--- /dev/null
+++ b/Documentation/livepatch/callbacks.txt
@@ -0,0 +1,605 @@
+======================
+(Un)patching Callbacks
+======================
+
+Livepatch (un)patch-callbacks provide a mechanism for livepatch modules
+to execute callback functions when a kernel object is (un)patched. They
+can be considered a "power feature" that extends livepatching abilities
+to include:
+
+ - Safe updates to global data
+
+ - "Patches" to init and probe functions
+
+ - Patching otherwise unpatchable code (i.e. assembly)
+
+In most cases, (un)patch callbacks will need to be used in conjunction
+with memory barriers and kernel synchronization primitives, like
+mutexes/spinlocks, or even stop_machine(), to avoid concurrency issues.
+
+Callbacks differ from existing kernel facilities:
+
+ - Module init/exit code doesn't run when disabling and re-enabling a
+ patch.
+
+ - A module notifier can't stop a to-be-patched module from loading.
+
+Callbacks are part of the klp_object structure and their implementation
+is specific to that klp_object. Other livepatch objects may or may not
+be patched, irrespective of the target klp_object's current state.
+
+Callbacks can be registered for the following livepatch actions:
+
+ * Pre-patch - before a klp_object is patched
+
+ * Post-patch - after a klp_object has been patched and is active
+ across all tasks
+
+ * Pre-unpatch - before a klp_object is unpatched (ie, patched code is
+ active), used to clean up post-patch callback
+ resources
+
+ * Post-unpatch - after a klp_object has been patched, all code has
+ been restored and no tasks are running patched code,
+ used to cleanup pre-patch callback resources
+
+Each callback is optional, omitting one does not preclude specifying any
+other. However, the livepatching core executes the handlers in
+symmetry: pre-patch callbacks have a post-unpatch counterpart and
+post-patch callbacks have a pre-unpatch counterpart. An unpatch
+callback will only be executed if its corresponding patch callback was
+executed. Typical use cases pair a patch handler that acquires and
+configures resources with an unpatch handler tears down and releases
+those same resources.
+
+A callback is only executed if its host klp_object is loaded. For
+in-kernel vmlinux targets, this means that callbacks will always execute
+when a livepatch is enabled/disabled. For patch target kernel modules,
+callbacks will only execute if the target module is loaded. When a
+module target is (un)loaded, its callbacks will execute only if the
+livepatch module is enabled.
+
+The pre-patch callback, if specified, is expected to return a status
+code (0 for success, -ERRNO on error). An error status code indicates
+to the livepatching core that patching of the current klp_object is not
+safe and to stop the current patching request. (When no pre-patch
+callback is provided, the transition is assumed to be safe.) If a
+pre-patch callback returns failure, the kernel's module loader will:
+
+ - Refuse to load a livepatch, if the livepatch is loaded after
+ targeted code.
+
+ or:
+
+ - Refuse to load a module, if the livepatch was already successfully
+ loaded.
+
+No post-patch, pre-unpatch, or post-unpatch callbacks will be executed
+for a given klp_object if the object failed to patch, due to a failed
+pre_patch callback or for any other reason.
+
+If a patch transition is reversed, no pre-unpatch handlers will be run
+(this follows the previously mentioned symmetry -- pre-unpatch callbacks
+will only occur if their corresponding post-patch callback executed).
+
+If the object did successfully patch, but the patch transition never
+started for some reason (e.g., if another object failed to patch),
+only the post-unpatch callback will be called.
+
+
+Example Use-cases
+=================
+
+Update global data
+------------------
+
+A pre-patch callback can be useful to update a global variable. For
+example, 75ff39ccc1bd ("tcp: make challenge acks less predictable")
+changes a global sysctl, as well as patches the tcp_send_challenge_ack()
+function.
+
+In this case, if we're being super paranoid, it might make sense to
+patch the data *after* patching is complete with a post-patch callback,
+so that tcp_send_challenge_ack() could first be changed to read
+sysctl_tcp_challenge_ack_limit with READ_ONCE.
+
+
+Support __init and probe function patches
+-----------------------------------------
+
+Although __init and probe functions are not directly livepatch-able, it
+may be possible to implement similar updates via pre/post-patch
+callbacks.
+
+48900cb6af42 ("virtio-net: drop NETIF_F_FRAGLIST") change the way that
+virtnet_probe() initialized its driver's net_device features. A
+pre/post-patch callback could iterate over all such devices, making a
+similar change to their hw_features value. (Client functions of the
+value may need to be updated accordingly.)
+
+
+Test cases
+==========
+
+What follows is not an exhaustive test suite of every possible livepatch
+pre/post-(un)patch combination, but a selection that demonstrates a few
+important concepts. Each test case uses the kernel modules located in
+the samples/livepatch/ and assumes that no livepatches are loaded at the
+beginning of the test.
+
+
+Test 1
+------
+
+Test a combination of loading a kernel module and a livepatch that
+patches a function in the first module. (Un)load the target module
+before the livepatch module:
+
+- load target module
+- load livepatch
+- disable livepatch
+- unload target module
+- unload livepatch
+
+First load a target module:
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 34.475708] livepatch_callbacks_mod: livepatch_callbacks_mod_init
+
+On livepatch enable, before the livepatch transition starts, pre-patch
+callbacks are executed for vmlinux and livepatch_callbacks_mod (those
+klp_objects currently loaded). After klp_objects are patched according
+to the klp_patch, their post-patch callbacks run and the transition
+completes:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 36.503719] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 36.504213] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 36.504238] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 36.504721] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 36.505849] livepatch: 'livepatch_callbacks_demo': starting patching transition
+ [ 37.727133] livepatch: 'livepatch_callbacks_demo': completing patching transition
+ [ 37.727232] livepatch_callbacks_demo: post_patch_callback: vmlinux
+ [ 37.727860] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 37.728792] livepatch: 'livepatch_callbacks_demo': patching complete
+
+Similarly, on livepatch disable, pre-patch callbacks run before the
+unpatching transition starts. klp_objects are reverted, post-patch
+callbacks execute and the transition completes:
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 38.510209] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition
+ [ 38.510234] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux
+ [ 38.510982] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 38.512209] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 39.711132] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 39.711210] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 39.711779] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 39.712735] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+ % rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 42.534183] livepatch_callbacks_mod: livepatch_callbacks_mod_exit
+
+
+Test 2
+------
+
+This test is similar to the previous test, but (un)load the livepatch
+module before the target kernel module. This tests the livepatch core's
+module_coming handler:
+
+- load livepatch
+- load target module
+- disable livepatch
+- unload livepatch
+- unload target module
+
+
+On livepatch enable, only pre/post-patch callbacks are executed for
+currently loaded klp_objects, in this case, vmlinux:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 44.553328] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 44.553997] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 44.554049] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 44.554845] livepatch: 'livepatch_callbacks_demo': starting patching transition
+ [ 45.727128] livepatch: 'livepatch_callbacks_demo': completing patching transition
+ [ 45.727212] livepatch_callbacks_demo: post_patch_callback: vmlinux
+ [ 45.727961] livepatch: 'livepatch_callbacks_demo': patching complete
+
+When a targeted module is subsequently loaded, only its pre/post-patch
+callbacks are executed:
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 46.560845] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod'
+ [ 46.561988] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 46.563452] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 46.565495] livepatch_callbacks_mod: livepatch_callbacks_mod_init
+
+On livepatch disable, all currently loaded klp_objects' (vmlinux and
+livepatch_callbacks_mod) pre/post-unpatch callbacks are executed:
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 48.568885] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition
+ [ 48.568910] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux
+ [ 48.569441] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 48.570502] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 49.759091] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 49.759171] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 49.759742] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 49.760690] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+ % rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 52.592283] livepatch_callbacks_mod: livepatch_callbacks_mod_exit
+
+
+Test 3
+------
+
+Test loading the livepatch after a targeted kernel module, then unload
+the kernel module before disabling the livepatch. This tests the
+livepatch core's module_going handler:
+
+- load target module
+- load livepatch
+- unload target module
+- disable livepatch
+- unload livepatch
+
+First load a target module, then the livepatch:
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 54.607948] livepatch_callbacks_mod: livepatch_callbacks_mod_init
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 56.613919] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 56.614411] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 56.614436] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 56.614818] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 56.615656] livepatch: 'livepatch_callbacks_demo': starting patching transition
+ [ 57.759070] livepatch: 'livepatch_callbacks_demo': completing patching transition
+ [ 57.759147] livepatch_callbacks_demo: post_patch_callback: vmlinux
+ [ 57.759621] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state
+ [ 57.760307] livepatch: 'livepatch_callbacks_demo': patching complete
+
+When a target module is unloaded, the livepatch is only reverted from
+that klp_object (livepatch_callbacks_mod). As such, only its pre and
+post-unpatch callbacks are executed when this occurs:
+
+ % rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 58.623409] livepatch_callbacks_mod: livepatch_callbacks_mod_exit
+ [ 58.623903] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away
+ [ 58.624658] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod'
+ [ 58.625305] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away
+
+When the livepatch is disabled, pre and post-unpatch callbacks are run
+for the remaining klp_object, vmlinux:
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 60.638420] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition
+ [ 60.638444] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux
+ [ 60.638996] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 61.727088] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 61.727165] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 61.727985] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+
+
+Test 4
+------
+
+This test is similar to the previous test, however the livepatch is
+loaded first. This tests the livepatch core's module_coming and
+module_going handlers:
+
+- load livepatch
+- load target module
+- unload target module
+- disable livepatch
+- unload livepatch
+
+First load the livepatch:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 64.661552] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 64.662147] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 64.662175] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 64.662850] livepatch: 'livepatch_callbacks_demo': starting patching transition
+ [ 65.695056] livepatch: 'livepatch_callbacks_demo': completing patching transition
+ [ 65.695147] livepatch_callbacks_demo: post_patch_callback: vmlinux
+ [ 65.695561] livepatch: 'livepatch_callbacks_demo': patching complete
+
+When a targeted kernel module is subsequently loaded, only its
+pre/post-patch callbacks are executed:
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 66.669196] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod'
+ [ 66.669882] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 66.670744] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 66.672873] livepatch_callbacks_mod: livepatch_callbacks_mod_init
+
+When the target module is unloaded, the livepatch is only reverted from
+the livepatch_callbacks_mod klp_object. As such, only pre and
+post-unpatch callbacks are executed when this occurs:
+
+ % rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 68.680065] livepatch_callbacks_mod: livepatch_callbacks_mod_exit
+ [ 68.680688] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away
+ [ 68.681452] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod'
+ [ 68.682094] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 70.689225] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition
+ [ 70.689256] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux
+ [ 70.689882] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 71.711080] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 71.711481] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 71.711988] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+
+
+Test 5
+------
+
+A simple test of loading a livepatch without one of its patch target
+klp_objects ever loaded (livepatch_callbacks_mod):
+
+- load livepatch
+- disable livepatch
+- unload livepatch
+
+Load the livepatch:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 74.711081] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 74.711595] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 74.711639] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 74.712272] livepatch: 'livepatch_callbacks_demo': starting patching transition
+ [ 75.743137] livepatch: 'livepatch_callbacks_demo': completing patching transition
+ [ 75.743219] livepatch_callbacks_demo: post_patch_callback: vmlinux
+ [ 75.743867] livepatch: 'livepatch_callbacks_demo': patching complete
+
+As expected, only pre/post-(un)patch handlers are executed for vmlinux:
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 76.716254] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition
+ [ 76.716278] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux
+ [ 76.716666] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 77.727089] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 77.727194] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 77.727907] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+
+
+Test 6
+------
+
+Test a scenario where a vmlinux pre-patch callback returns a non-zero
+status (ie, failure):
+
+- load target module
+- load livepatch -ENODEV
+- unload target module
+
+First load a target module:
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 80.740520] livepatch_callbacks_mod: livepatch_callbacks_mod_init
+
+Load the livepatch module, setting its 'pre_patch_ret' value to -19
+(-ENODEV). When its vmlinux pre-patch callback executed, this status
+code will propagate back to the module-loading subsystem. The result is
+that the insmod command refuses to load the livepatch module:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko pre_patch_ret=-19
+ [ 82.747326] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 82.747743] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 82.747767] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 82.748237] livepatch: pre-patch callback failed for object 'vmlinux'
+ [ 82.748637] livepatch: failed to enable patch 'livepatch_callbacks_demo'
+ [ 82.749059] livepatch: 'livepatch_callbacks_demo': canceling transition, going to unpatch
+ [ 82.749060] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 82.749868] livepatch: 'livepatch_callbacks_demo': unpatching complete
+ [ 82.765809] insmod: ERROR: could not insert module samples/livepatch/livepatch-callbacks-demo.ko: No such device
+
+ % rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 84.774238] livepatch_callbacks_mod: livepatch_callbacks_mod_exit
+
+
+Test 7
+------
+
+Similar to the previous test, setup a livepatch such that its vmlinux
+pre-patch callback returns success. However, when a targeted kernel
+module is later loaded, have the livepatch return a failing status code:
+
+- load livepatch
+- setup -ENODEV
+- load target module
+- disable livepatch
+- unload livepatch
+
+Load the livepatch, notice vmlinux pre-patch callback succeeds:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 86.787845] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 86.788325] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 86.788427] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 86.788821] livepatch: 'livepatch_callbacks_demo': starting patching transition
+ [ 87.711069] livepatch: 'livepatch_callbacks_demo': completing patching transition
+ [ 87.711143] livepatch_callbacks_demo: post_patch_callback: vmlinux
+ [ 87.711886] livepatch: 'livepatch_callbacks_demo': patching complete
+
+Set a trap so subsequent pre-patch callbacks to this livepatch will
+return -ENODEV:
+
+ % echo -19 > /sys/module/livepatch_callbacks_demo/parameters/pre_patch_ret
+
+The livepatch pre-patch callback for subsequently loaded target modules
+will return failure, so the module loader refuses to load the kernel
+module. Notice that no post-patch or pre/post-unpatch callbacks are
+executed for this klp_object:
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 90.796976] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod'
+ [ 90.797834] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 90.798900] livepatch: pre-patch callback failed for object 'livepatch_callbacks_mod'
+ [ 90.799652] livepatch: patch 'livepatch_callbacks_demo' failed for module 'livepatch_callbacks_mod', refusing to load module 'livepatch_callbacks_mod'
+ [ 90.819737] insmod: ERROR: could not insert module samples/livepatch/livepatch-callbacks-mod.ko: No such device
+
+However, pre/post-unpatch callbacks run for the vmlinux klp_object:
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 92.823547] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition
+ [ 92.823573] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux
+ [ 92.824331] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 93.727128] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 93.727327] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 93.727861] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+
+
+Test 8
+------
+
+Test loading multiple targeted kernel modules. This test-case is
+mainly for comparing with the next test-case.
+
+- load busy target module (0s sleep),
+- load livepatch
+- load target module
+- unload target module
+- disable livepatch
+- unload livepatch
+- unload busy target module
+
+
+Load a target "busy" kernel module which kicks off a worker function
+that immediately exits:
+
+ % insmod samples/livepatch/livepatch-callbacks-busymod.ko sleep_secs=0
+ [ 96.910107] livepatch_callbacks_busymod: livepatch_callbacks_mod_init
+ [ 96.910600] livepatch_callbacks_busymod: busymod_work_func, sleeping 0 seconds ...
+ [ 96.913024] livepatch_callbacks_busymod: busymod_work_func exit
+
+Proceed with loading the livepatch and another ordinary target module,
+notice that the post-patch callbacks are executed and the transition
+completes quickly:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 98.917892] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 98.918426] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 98.918453] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 98.918955] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state
+ [ 98.923835] livepatch: 'livepatch_callbacks_demo': starting patching transition
+ [ 99.743104] livepatch: 'livepatch_callbacks_demo': completing patching transition
+ [ 99.743156] livepatch_callbacks_demo: post_patch_callback: vmlinux
+ [ 99.743679] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state
+ [ 99.744616] livepatch: 'livepatch_callbacks_demo': patching complete
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 100.930955] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod'
+ [ 100.931668] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 100.932645] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 100.934125] livepatch_callbacks_mod: livepatch_callbacks_mod_init
+
+ % rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 102.942805] livepatch_callbacks_mod: livepatch_callbacks_mod_exit
+ [ 102.943640] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away
+ [ 102.944585] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod'
+ [ 102.945455] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 104.953815] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition
+ [ 104.953838] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux
+ [ 104.954431] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state
+ [ 104.955426] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 106.719073] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 106.722633] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 106.723282] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state
+ [ 106.724279] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+ % rmmod samples/livepatch/livepatch-callbacks-busymod.ko
+ [ 108.975660] livepatch_callbacks_busymod: livepatch_callbacks_mod_exit
+
+
+Test 9
+------
+
+A similar test as the previous one, but force the "busy" kernel module
+to do longer work.
+
+The livepatching core will refuse to patch a task that is currently
+executing a to-be-patched function -- the consistency model stalls the
+current patch transition until this safety-check is met. Test a
+scenario where one of a livepatch's target klp_objects sits on such a
+function for a long time. Meanwhile, load and unload other target
+kernel modules while the livepatch transition is in progress.
+
+- load busy target module (30s sleep)
+- load livepatch
+- load target module
+- unload target module
+- disable livepatch
+- unload livepatch
+- unload busy target module
+
+
+Load the "busy" kernel module, this time make it do 30 seconds worth of
+work:
+
+ % insmod samples/livepatch/livepatch-callbacks-busymod.ko sleep_secs=30
+ [ 110.993362] livepatch_callbacks_busymod: livepatch_callbacks_mod_init
+ [ 110.994059] livepatch_callbacks_busymod: busymod_work_func, sleeping 30 seconds ...
+
+Meanwhile, the livepatch is loaded. Notice that the patch transition
+does not complete as the targeted "busy" module is sitting on a
+to-be-patched function:
+
+ % insmod samples/livepatch/livepatch-callbacks-demo.ko
+ [ 113.000309] livepatch: enabling patch 'livepatch_callbacks_demo'
+ [ 113.000764] livepatch: 'livepatch_callbacks_demo': initializing patching transition
+ [ 113.000791] livepatch_callbacks_demo: pre_patch_callback: vmlinux
+ [ 113.001289] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state
+ [ 113.005208] livepatch: 'livepatch_callbacks_demo': starting patching transition
+
+Load a second target module (this one is an ordinary idle kernel
+module). Note that *no* post-patch callbacks will be executed while the
+livepatch is still in transition:
+
+ % insmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 115.012740] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod'
+ [ 115.013406] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init
+ [ 115.015315] livepatch_callbacks_mod: livepatch_callbacks_mod_init
+
+Request an unload of the simple kernel module. The patch is still
+transitioning, so its pre-unpatch callbacks are skipped:
+
+ % rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ [ 117.022626] livepatch_callbacks_mod: livepatch_callbacks_mod_exit
+ [ 117.023376] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod'
+ [ 117.024533] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away
+
+Finally the livepatch is disabled. Since none of the patch's
+klp_object's post-patch callbacks executed, the remaining klp_object's
+pre-unpatch callbacks are skipped:
+
+ % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ [ 119.035408] livepatch: 'livepatch_callbacks_demo': reversing transition from patching to unpatching
+ [ 119.035485] livepatch: 'livepatch_callbacks_demo': starting unpatching transition
+ [ 119.711166] livepatch: 'livepatch_callbacks_demo': completing unpatching transition
+ [ 119.714179] livepatch_callbacks_demo: post_unpatch_callback: vmlinux
+ [ 119.714653] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state
+ [ 119.715437] livepatch: 'livepatch_callbacks_demo': unpatching complete
+
+ % rmmod samples/livepatch/livepatch-callbacks-demo.ko
+ % rmmod samples/livepatch/livepatch-callbacks-busymod.ko
+ [ 141.279111] livepatch_callbacks_busymod: busymod_work_func exit
+ [ 141.279760] livepatch_callbacks_busymod: livepatch_callbacks_mod_exit
diff --git a/Documentation/livepatch/shadow-vars.txt b/Documentation/livepatch/shadow-vars.txt
new file mode 100644
index 000000000000..89c66634d600
--- /dev/null
+++ b/Documentation/livepatch/shadow-vars.txt
@@ -0,0 +1,192 @@
+================
+Shadow Variables
+================
+
+Shadow variables are a simple way for livepatch modules to associate
+additional "shadow" data with existing data structures. Shadow data is
+allocated separately from parent data structures, which are left
+unmodified. The shadow variable API described in this document is used
+to allocate/add and remove/free shadow variables to/from their parents.
+
+The implementation introduces a global, in-kernel hashtable that
+associates pointers to parent objects and a numeric identifier of the
+shadow data. The numeric identifier is a simple enumeration that may be
+used to describe shadow variable version, class or type, etc. More
+specifically, the parent pointer serves as the hashtable key while the
+numeric id subsequently filters hashtable queries. Multiple shadow
+variables may attach to the same parent object, but their numeric
+identifier distinguishes between them.
+
+
+1. Brief API summary
+====================
+
+(See the full API usage docbook notes in livepatch/shadow.c.)
+
+A hashtable references all shadow variables. These references are
+stored and retrieved through a <obj, id> pair.
+
+* The klp_shadow variable data structure encapsulates both tracking
+meta-data and shadow-data:
+ - meta-data
+ - obj - pointer to parent object
+ - id - data identifier
+ - data[] - storage for shadow data
+
+It is important to note that the klp_shadow_alloc() and
+klp_shadow_get_or_alloc() calls, described below, store a *copy* of the
+data that the functions are provided. Callers should provide whatever
+mutual exclusion is required of the shadow data.
+
+* klp_shadow_get() - retrieve a shadow variable data pointer
+ - search hashtable for <obj, id> pair
+
+* klp_shadow_alloc() - allocate and add a new shadow variable
+ - search hashtable for <obj, id> pair
+ - if exists
+ - WARN and return NULL
+ - if <obj, id> doesn't already exist
+ - allocate a new shadow variable
+ - copy data into the new shadow variable
+ - add <obj, id> to the global hashtable
+
+* klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable
+ - search hashtable for <obj, id> pair
+ - if exists
+ - return existing shadow variable
+ - if <obj, id> doesn't already exist
+ - allocate a new shadow variable
+ - copy data into the new shadow variable
+ - add <obj, id> pair to the global hashtable
+
+* klp_shadow_free() - detach and free a <obj, id> shadow variable
+ - find and remove a <obj, id> reference from global hashtable
+ - if found, free shadow variable
+
+* klp_shadow_free_all() - detach and free all <*, id> shadow variables
+ - find and remove any <*, id> references from global hashtable
+ - if found, free shadow variable
+
+
+2. Use cases
+============
+
+(See the example shadow variable livepatch modules in samples/livepatch/
+for full working demonstrations.)
+
+For the following use-case examples, consider commit 1d147bfa6429
+("mac80211: fix AP powersave TX vs. wakeup race"), which added a
+spinlock to net/mac80211/sta_info.h :: struct sta_info. Each use-case
+example can be considered a stand-alone livepatch implementation of this
+fix.
+
+
+Matching parent's lifecycle
+---------------------------
+
+If parent data structures are frequently created and destroyed, it may
+be easiest to align their shadow variables lifetimes to the same
+allocation and release functions. In this case, the parent data
+structure is typically allocated, initialized, then registered in some
+manner. Shadow variable allocation and setup can then be considered
+part of the parent's initialization and should be completed before the
+parent "goes live" (ie, any shadow variable get-API requests are made
+for this <obj, id> pair.)
+
+For commit 1d147bfa6429, when a parent sta_info structure is allocated,
+allocate a shadow copy of the ps_lock pointer, then initialize it:
+
+#define PS_LOCK 1
+struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr, gfp_t gfp)
+{
+ struct sta_info *sta;
+ spinlock_t *ps_lock;
+
+ /* Parent structure is created */
+ sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
+
+ /* Attach a corresponding shadow variable, then initialize it */
+ ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp);
+ if (!ps_lock)
+ goto shadow_fail;
+ spin_lock_init(ps_lock);
+ ...
+
+When requiring a ps_lock, query the shadow variable API to retrieve one
+for a specific struct sta_info:
+
+void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+{
+ spinlock_t *ps_lock;
+
+ /* sync with ieee80211_tx_h_unicast_ps_buf */
+ ps_lock = klp_shadow_get(sta, PS_LOCK);
+ if (ps_lock)
+ spin_lock(ps_lock);
+ ...
+
+When the parent sta_info structure is freed, first free the shadow
+variable:
+
+void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+{
+ klp_shadow_free(sta, PS_LOCK);
+ kfree(sta);
+ ...
+
+
+In-flight parent objects
+------------------------
+
+Sometimes it may not be convenient or possible to allocate shadow
+variables alongside their parent objects. Or a livepatch fix may
+require shadow varibles to only a subset of parent object instances. In
+these cases, the klp_shadow_get_or_alloc() call can be used to attach
+shadow variables to parents already in-flight.
+
+For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is
+inside ieee80211_sta_ps_deliver_wakeup():
+
+#define PS_LOCK 1
+void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+{
+ DEFINE_SPINLOCK(ps_lock_fallback);
+ spinlock_t *ps_lock;
+
+ /* sync with ieee80211_tx_h_unicast_ps_buf */
+ ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK,
+ &ps_lock_fallback, sizeof(ps_lock_fallback),
+ GFP_ATOMIC);
+ if (ps_lock)
+ spin_lock(ps_lock);
+ ...
+
+This usage will create a shadow variable, only if needed, otherwise it
+will use one that was already created for this <obj, id> pair.
+
+Like the previous use-case, the shadow spinlock needs to be cleaned up.
+A shadow variable can be freed just before its parent object is freed,
+or even when the shadow variable itself is no longer required.
+
+
+Other use-cases
+---------------
+
+Shadow variables can also be used as a flag indicating that a data
+structure was allocated by new, livepatched code. In this case, it
+doesn't matter what data value the shadow variable holds, its existence
+suggests how to handle the parent object.
+
+
+3. References
+=============
+
+* https://github.com/dynup/kpatch
+The livepatch implementation is based on the kpatch version of shadow
+variables.
+
+* http://files.mkgnu.net/files/dynamos/doc/papers/dynamos_eurosys_07.pdf
+Dynamic and Adaptive Updates of Non-Quiescent Subsystems in Commodity
+Operating System Kernels (Kritis Makris, Kyung Dong Ryu 2007) presented
+a datatype update technique called "shadow data structures".
diff --git a/Documentation/locking/rt-mutex-design.txt b/Documentation/locking/rt-mutex-design.txt
index 6c6e8c2410de..3d7b865539cc 100644
--- a/Documentation/locking/rt-mutex-design.txt
+++ b/Documentation/locking/rt-mutex-design.txt
@@ -8,7 +8,7 @@ RT-mutex implementation design
This document tries to describe the design of the rtmutex.c implementation.
It doesn't describe the reasons why rtmutex.c exists. For that please see
-Documentation/rt-mutex.txt. Although this document does explain problems
+Documentation/locking/rt-mutex.txt. Although this document does explain problems
that happen without this code, but that is in the concept to understand
what the code actually is doing.
diff --git a/Documentation/md/md-cluster.txt b/Documentation/md/md-cluster.txt
index 82ee51604e9a..e1055f105cf5 100644
--- a/Documentation/md/md-cluster.txt
+++ b/Documentation/md/md-cluster.txt
@@ -1,4 +1,5 @@
-The cluster MD is a shared-device RAID for a cluster.
+The cluster MD is a shared-device RAID for a cluster, it supports
+two levels: raid1 and raid10 (limited support).
1. On-disk format
diff --git a/Documentation/media/cec.h.rst.exceptions b/Documentation/media/cec.h.rst.exceptions
index b1687532742f..d9fd092de6f8 100644
--- a/Documentation/media/cec.h.rst.exceptions
+++ b/Documentation/media/cec.h.rst.exceptions
@@ -24,8 +24,6 @@ ignore define CEC_VENDOR_ID_NONE
ignore define CEC_MODE_INITIATOR_MSK
ignore define CEC_MODE_FOLLOWER_MSK
-ignore define CEC_EVENT_FL_INITIAL_STATE
-
# Part of CEC 2.0 spec - shouldn't be documented too?
ignore define CEC_LOG_ADDR_TV
ignore define CEC_LOG_ADDR_RECORD_1
diff --git a/Documentation/media/dvb-drivers/bt8xx.rst b/Documentation/media/dvb-drivers/bt8xx.rst
index b43958b7340c..e3e387bdf498 100644
--- a/Documentation/media/dvb-drivers/bt8xx.rst
+++ b/Documentation/media/dvb-drivers/bt8xx.rst
@@ -18,7 +18,7 @@ General information
This class of cards has a bt878a as the PCI interface, and require the bttv driver
for accessing the i2c bus and the gpio pins of the bt8xx chipset.
-Please see Documentation/dvb/cards.txt => o Cards based on the Conexant Bt8xx PCI bridge:
+Please see Documentation/media/dvb-drivers/cards.rst => o Cards based on the Conexant Bt8xx PCI bridge:
Compiling kernel please enable:
@@ -45,7 +45,7 @@ Loading Modules
Regular case: If the bttv driver detects a bt8xx-based DVB card, all frontend and backend modules will be loaded automatically.
Exceptions are:
- Old TwinHan DST cards or clones with or without CA slot and not containing an Eeprom.
-People running udev please see Documentation/dvb/udev.txt.
+People running udev please see Documentation/media/dvb-drivers/udev.rst.
In the following cases overriding the PCI type detection for dvb-bt8xx might be necessary:
@@ -72,7 +72,7 @@ Useful parameters for verbosity level and debugging the dst module:
The autodetected values are determined by the cards' "response string".
In your logs see f. ex.: dst_get_device_id: Recognize [DSTMCI].
For bug reports please send in a complete log with verbose=4 activated.
-Please also see Documentation/dvb/ci.txt.
+Please also see Documentation/media/dvb-drivers/ci.rst.
Running multiple cards
~~~~~~~~~~~~~~~~~~~~~~
@@ -100,7 +100,7 @@ Examples of card ID's:
$ modprobe bttv card=113 card=135
-For a full list of card ID's please see Documentation/video4linux/CARDLIST.bttv.
+For a full list of card ID's please see Documentation/media/v4l-drivers/bttv-cardlist.rst.
In case of further problems please subscribe and send questions to the mailing list: linux-dvb@linuxtv.org.
Probing the cards with broken PCI subsystem ID
diff --git a/Documentation/media/kapi/cec-core.rst b/Documentation/media/kapi/cec-core.rst
index 28866259998c..d37e107f2fde 100644
--- a/Documentation/media/kapi/cec-core.rst
+++ b/Documentation/media/kapi/cec-core.rst
@@ -227,8 +227,8 @@ CEC_TX_STATUS_LOW_DRIVE:
retransmission.
CEC_TX_STATUS_ERROR:
- some unspecified error occurred: this can be one of
- the previous two if the hardware cannot differentiate or something
+ some unspecified error occurred: this can be one of ARB_LOST
+ or LOW_DRIVE if the hardware cannot differentiate or something
else entirely.
CEC_TX_STATUS_MAX_RETRIES:
@@ -238,6 +238,9 @@ CEC_TX_STATUS_MAX_RETRIES:
doesn't have to make another attempt to transmit the message
since the hardware did that already.
+The hardware must be able to differentiate between OK, NACK and 'something
+else'.
+
The \*_cnt arguments are the number of error conditions that were seen.
This may be 0 if no information is available. Drivers that do not support
hardware retry can just set the counter corresponding to the transmit error
diff --git a/Documentation/media/kapi/dtv-ca.rst b/Documentation/media/kapi/dtv-ca.rst
new file mode 100644
index 000000000000..a4dd700189b0
--- /dev/null
+++ b/Documentation/media/kapi/dtv-ca.rst
@@ -0,0 +1,4 @@
+Digital TV Conditional Access kABI
+----------------------------------
+
+.. kernel-doc:: drivers/media/dvb-core/dvb_ca_en50221.h
diff --git a/Documentation/media/kapi/dtv-common.rst b/Documentation/media/kapi/dtv-common.rst
new file mode 100644
index 000000000000..40cf1033b5e1
--- /dev/null
+++ b/Documentation/media/kapi/dtv-common.rst
@@ -0,0 +1,55 @@
+Digital TV Common functions
+---------------------------
+
+Math functions
+~~~~~~~~~~~~~~
+
+Provide some commonly-used math functions, usually required in order to
+estimate signal strength and signal to noise measurements in dB.
+
+.. kernel-doc:: drivers/media/dvb-core/dvb_math.h
+
+
+DVB devices
+~~~~~~~~~~~
+
+Those functions are responsible for handling the DVB device nodes.
+
+.. kernel-doc:: drivers/media/dvb-core/dvbdev.h
+
+Digital TV Ring buffer
+~~~~~~~~~~~~~~~~~~~~~~
+
+Those routines implement ring buffers used to handle digital TV data and
+copy it from/to userspace.
+
+.. note::
+
+ 1) For performance reasons read and write routines don't check buffer sizes
+ and/or number of bytes free/available. This has to be done before these
+ routines are called. For example:
+
+ .. code-block:: c
+
+ /* write @buflen: bytes */
+ free = dvb_ringbuffer_free(rbuf);
+ if (free >= buflen)
+ count = dvb_ringbuffer_write(rbuf, buffer, buflen);
+ else
+ /* do something */
+
+ /* read min. 1000, max. @bufsize: bytes */
+ avail = dvb_ringbuffer_avail(rbuf);
+ if (avail >= 1000)
+ count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
+ else
+ /* do something */
+
+ 2) If there is exactly one reader and one writer, there is no need
+ to lock read or write operations.
+ Two or more readers must be locked against each other.
+ Flushing the buffer counts as a read operation.
+ Resetting the buffer counts as a read and write operation.
+ Two or more writers must be locked against each other.
+
+.. kernel-doc:: drivers/media/dvb-core/dvb_ringbuffer.h
diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst
index de9a228aca8a..bca743dc6b43 100644
--- a/Documentation/media/kapi/dtv-core.rst
+++ b/Documentation/media/kapi/dtv-core.rst
@@ -26,572 +26,12 @@ I2C bus.
abandoned standard, not used anymore) and ATSC version 3.0 current
proposals. Currently, the DVB subsystem doesn't implement those standards.
-Digital TV Common functions
----------------------------
-.. kernel-doc:: drivers/media/dvb-core/dvb_math.h
+.. toctree::
+ :maxdepth: 1
-.. kernel-doc:: drivers/media/dvb-core/dvbdev.h
-
-Digital TV Ring buffer
-----------------------
-
-Those routines implement ring buffers used to handle digital TV data and
-copy it from/to userspace.
-
-.. note::
-
- 1) For performance reasons read and write routines don't check buffer sizes
- and/or number of bytes free/available. This has to be done before these
- routines are called. For example:
-
- .. code-block:: c
-
- /* write @buflen: bytes */
- free = dvb_ringbuffer_free(rbuf);
- if (free >= buflen)
- count = dvb_ringbuffer_write(rbuf, buffer, buflen);
- else
- /* do something */
-
- /* read min. 1000, max. @bufsize: bytes */
- avail = dvb_ringbuffer_avail(rbuf);
- if (avail >= 1000)
- count = dvb_ringbuffer_read(rbuf, buffer, min(avail, bufsize));
- else
- /* do something */
-
- 2) If there is exactly one reader and one writer, there is no need
- to lock read or write operations.
- Two or more readers must be locked against each other.
- Flushing the buffer counts as a read operation.
- Resetting the buffer counts as a read and write operation.
- Two or more writers must be locked against each other.
-
-.. kernel-doc:: drivers/media/dvb-core/dvb_ringbuffer.h
-
-
-Digital TV Frontend kABI
-------------------------
-
-Digital TV Frontend
-~~~~~~~~~~~~~~~~~~~
-
-The Digital TV Frontend kABI defines a driver-internal interface for
-registering low-level, hardware specific driver to a hardware independent
-frontend layer. It is only of interest for Digital TV device driver writers.
-The header file for this API is named ``dvb_frontend.h`` and located in
-``drivers/media/dvb-core``.
-
-Demodulator driver
-^^^^^^^^^^^^^^^^^^
-
-The demodulator driver is responsible to talk with the decoding part of the
-hardware. Such driver should implement :c:type:`dvb_frontend_ops`, with
-tells what type of digital TV standards are supported, and points to a
-series of functions that allow the DVB core to command the hardware via
-the code under ``drivers/media/dvb-core/dvb_frontend.c``.
-
-A typical example of such struct in a driver ``foo`` is::
-
- static struct dvb_frontend_ops foo_ops = {
- .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
- .info = {
- .name = "foo DVB-T/T2/C driver",
- .caps = FE_CAN_FEC_1_2 |
- FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 |
- FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK |
- FE_CAN_QAM_16 |
- FE_CAN_QAM_32 |
- FE_CAN_QAM_64 |
- FE_CAN_QAM_128 |
- FE_CAN_QAM_256 |
- FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO |
- FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION,
- .frequency_min = 42000000, /* Hz */
- .frequency_max = 1002000000, /* Hz */
- .symbol_rate_min = 870000,
- .symbol_rate_max = 11700000
- },
- .init = foo_init,
- .sleep = foo_sleep,
- .release = foo_release,
- .set_frontend = foo_set_frontend,
- .get_frontend = foo_get_frontend,
- .read_status = foo_get_status_and_stats,
- .tune = foo_tune,
- .i2c_gate_ctrl = foo_i2c_gate_ctrl,
- .get_frontend_algo = foo_get_algo,
- };
-
-A typical example of such struct in a driver ``bar`` meant to be used on
-Satellite TV reception is::
-
- static const struct dvb_frontend_ops bar_ops = {
- .delsys = { SYS_DVBS, SYS_DVBS2 },
- .info = {
- .name = "Bar DVB-S/S2 demodulator",
- .frequency_min = 500000, /* KHz */
- .frequency_max = 2500000, /* KHz */
- .frequency_stepsize = 0,
- .symbol_rate_min = 1000000,
- .symbol_rate_max = 45000000,
- .symbol_rate_tolerance = 500,
- .caps = FE_CAN_INVERSION_AUTO |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK,
- },
- .init = bar_init,
- .sleep = bar_sleep,
- .release = bar_release,
- .set_frontend = bar_set_frontend,
- .get_frontend = bar_get_frontend,
- .read_status = bar_get_status_and_stats,
- .i2c_gate_ctrl = bar_i2c_gate_ctrl,
- .get_frontend_algo = bar_get_algo,
- .tune = bar_tune,
-
- /* Satellite-specific */
- .diseqc_send_master_cmd = bar_send_diseqc_msg,
- .diseqc_send_burst = bar_send_burst,
- .set_tone = bar_set_tone,
- .set_voltage = bar_set_voltage,
- };
-
-.. note::
-
- #) For satellite digital TV standards (DVB-S, DVB-S2, ISDB-S), the
- frequencies are specified in kHz, while, for terrestrial and cable
- standards, they're specified in Hz. Due to that, if the same frontend
- supports both types, you'll need to have two separate
- :c:type:`dvb_frontend_ops` structures, one for each standard.
- #) The ``.i2c_gate_ctrl`` field is present only when the hardware has
- allows controlling an I2C gate (either directly of via some GPIO pin),
- in order to remove the tuner from the I2C bus after a channel is
- tuned.
- #) All new drivers should implement the
- :ref:`DVBv5 statistics <dvbv5_stats>` via ``.read_status``.
- Yet, there are a number of callbacks meant to get statistics for
- signal strength, S/N and UCB. Those are there to provide backward
- compatibility with legacy applications that don't support the DVBv5
- API. Implementing those callbacks are optional. Those callbacks may be
- removed in the future, after we have all existing drivers supporting
- DVBv5 stats.
- #) Other callbacks are required for satellite TV standards, in order to
- control LNBf and DiSEqC: ``.diseqc_send_master_cmd``,
- ``.diseqc_send_burst``, ``.set_tone``, ``.set_voltage``.
-
-.. |delta| unicode:: U+00394
-
-The ``drivers/media/dvb-core/dvb_frontend.c`` has a kernel thread with is
-responsible for tuning the device. It supports multiple algoritms to
-detect a channel, as defined at enum :c:func:`dvbfe_algo`.
-
-The algorithm to be used is obtained via ``.get_frontend_algo``. If the driver
-doesn't fill its field at struct :c:type:`dvb_frontend_ops`, it will default to
-``DVBFE_ALGO_SW``, meaning that the dvb-core will do a zigzag when tuning,
-e. g. it will try first to use the specified center frequency ``f``,
-then, it will do ``f`` + |delta|, ``f`` - |delta|, ``f`` + 2 x |delta|,
-``f`` - 2 x |delta| and so on.
-
-If the hardware has internally a some sort of zigzag algorithm, you should
-define a ``.get_frontend_algo`` function that would return ``DVBFE_ALGO_HW``.
-
-.. note::
-
- The core frontend support also supports
- a third type (``DVBFE_ALGO_CUSTOM``), in order to allow the driver to
- define its own hardware-assisted algorithm. Very few hardware need to
- use it nowadays. Using ``DVBFE_ALGO_CUSTOM`` require to provide other
- function callbacks at struct :c:type:`dvb_frontend_ops`.
-
-Attaching frontend driver to the bridge driver
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Before using the Digital TV frontend core, the bridge driver should attach
-the frontend demod, tuner and SEC devices and call
-:c:func:`dvb_register_frontend()`,
-in order to register the new frontend at the subsystem. At device
-detach/removal, the bridge driver should call
-:c:func:`dvb_unregister_frontend()` to
-remove the frontend from the core and then :c:func:`dvb_frontend_detach()`
-to free the memory allocated by the frontend drivers.
-
-The drivers should also call :c:func:`dvb_frontend_suspend()` as part of
-their handler for the :c:type:`device_driver`.\ ``suspend()``, and
-:c:func:`dvb_frontend_resume()` as
-part of their handler for :c:type:`device_driver`.\ ``resume()``.
-
-A few other optional functions are provided to handle some special cases.
-
-.. _dvbv5_stats:
-
-Digital TV Frontend statistics
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Introduction
-^^^^^^^^^^^^
-
-Digital TV frontends provide a range of
-:ref:`statistics <frontend-stat-properties>` meant to help tuning the device
-and measuring the quality of service.
-
-For each statistics measurement, the driver should set the type of scale used,
-or ``FE_SCALE_NOT_AVAILABLE`` if the statistics is not available on a given
-time. Drivers should also provide the number of statistics for each type.
-that's usually 1 for most video standards [#f2]_.
-
-Drivers should initialize each statistic counters with length and
-scale at its init code. For example, if the frontend provides signal
-strength, it should have, on its init code::
-
- struct dtv_frontend_properties *c = &state->fe.dtv_property_cache;
-
- c->strength.len = 1;
- c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
-
-And, when the statistics got updated, set the scale::
-
- c->strength.stat[0].scale = FE_SCALE_DECIBEL;
- c->strength.stat[0].uvalue = strength;
-
-.. [#f2] For ISDB-T, it may provide both a global statistics and a per-layer
- set of statistics. On such cases, len should be equal to 4. The first
- value corresponds to the global stat; the other ones to each layer, e. g.:
-
- - c->cnr.stat[0] for global S/N carrier ratio,
- - c->cnr.stat[1] for Layer A S/N carrier ratio,
- - c->cnr.stat[2] for layer B S/N carrier ratio,
- - c->cnr.stat[3] for layer C S/N carrier ratio.
-
-.. note:: Please prefer to use ``FE_SCALE_DECIBEL`` instead of
- ``FE_SCALE_RELATIVE`` for signal strength and CNR measurements.
-
-Groups of statistics
-^^^^^^^^^^^^^^^^^^^^
-
-There are several groups of statistics currently supported:
-
-Signal strength (:ref:`DTV-STAT-SIGNAL-STRENGTH`)
- - Measures the signal strength level at the analog part of the tuner or
- demod.
-
- - Typically obtained from the gain applied to the tuner and/or frontend
- in order to detect the carrier. When no carrier is detected, the gain is
- at the maximum value (so, strength is on its minimal).
-
- - As the gain is visible through the set of registers that adjust the gain,
- typically, this statistics is always available [#f3]_.
-
- - Drivers should try to make it available all the times, as this statistics
- can be used when adjusting an antenna position and to check for troubles
- at the cabling.
-
- .. [#f3] On a few devices, the gain keeps floating if no carrier.
- On such devices, strength report should check first if carrier is
- detected at the tuner (``FE_HAS_CARRIER``, see :c:type:`fe_status`),
- and otherwise return the lowest possible value.
-
-Carrier Signal to Noise ratio (:ref:`DTV-STAT-CNR`)
- - Signal to Noise ratio for the main carrier.
-
- - Signal to Noise measurement depends on the device. On some hardware, is
- available when the main carrier is detected. On those hardware, CNR
- measurement usually comes from the tuner (e. g. after ``FE_HAS_CARRIER``,
- see :c:type:`fe_status`).
-
- On other devices, it requires inner FEC decoding,
- as the frontend measures it indirectly from other parameters (e. g. after
- ``FE_HAS_VITERBI``, see :c:type:`fe_status`).
-
- Having it available after inner FEC is more common.
-
-Bit counts post-FEC (:ref:`DTV-STAT-POST-ERROR-BIT-COUNT` and :ref:`DTV-STAT-POST-TOTAL-BIT-COUNT`)
- - Those counters measure the number of bits and bit errors errors after
- the forward error correction (FEC) on the inner coding block
- (after Viterbi, LDPC or other inner code).
-
- - Due to its nature, those statistics depend on full coding lock
- (e. g. after ``FE_HAS_SYNC`` or after ``FE_HAS_LOCK``,
- see :c:type:`fe_status`).
-
-Bit counts pre-FEC (:ref:`DTV-STAT-PRE-ERROR-BIT-COUNT` and :ref:`DTV-STAT-PRE-TOTAL-BIT-COUNT`)
- - Those counters measure the number of bits and bit errors errors before
- the forward error correction (FEC) on the inner coding block
- (before Viterbi, LDPC or other inner code).
-
- - Not all frontends provide this kind of statistics.
-
- - Due to its nature, those statistics depend on inner coding lock (e. g.
- after ``FE_HAS_VITERBI``, see :c:type:`fe_status`).
-
-Block counts (:ref:`DTV-STAT-ERROR-BLOCK-COUNT` and :ref:`DTV-STAT-TOTAL-BLOCK-COUNT`)
- - Those counters measure the number of blocks and block errors errors after
- the forward error correction (FEC) on the inner coding block
- (before Viterbi, LDPC or other inner code).
-
- - Due to its nature, those statistics depend on full coding lock
- (e. g. after ``FE_HAS_SYNC`` or after
- ``FE_HAS_LOCK``, see :c:type:`fe_status`).
-
-.. note:: All counters should be monotonically increased as they're
- collected from the hardware.
-
-A typical example of the logic that handle status and statistics is::
-
- static int foo_get_status_and_stats(struct dvb_frontend *fe)
- {
- struct foo_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
- int rc;
- enum fe_status *status;
-
- /* Both status and strength are always available */
- rc = foo_read_status(fe, &status);
- if (rc < 0)
- return rc;
-
- rc = foo_read_strength(fe);
- if (rc < 0)
- return rc;
-
- /* Check if CNR is available */
- if (!(fe->status & FE_HAS_CARRIER))
- return 0;
-
- rc = foo_read_cnr(fe);
- if (rc < 0)
- return rc;
-
- /* Check if pre-BER stats are available */
- if (!(fe->status & FE_HAS_VITERBI))
- return 0;
-
- rc = foo_get_pre_ber(fe);
- if (rc < 0)
- return rc;
-
- /* Check if post-BER stats are available */
- if (!(fe->status & FE_HAS_SYNC))
- return 0;
-
- rc = foo_get_post_ber(fe);
- if (rc < 0)
- return rc;
- }
-
- static const struct dvb_frontend_ops ops = {
- /* ... */
- .read_status = foo_get_status_and_stats,
- };
-
-Statistics collect
-^^^^^^^^^^^^^^^^^^
-
-On almost all frontend hardware, the bit and byte counts are stored by
-the hardware after a certain amount of time or after the total bit/block
-counter reaches a certain value (usually programable), for example, on
-every 1000 ms or after receiving 1,000,000 bits.
-
-So, if you read the registers too soon, you'll end by reading the same
-value as in the previous reading, causing the monotonic value to be
-incremented too often.
-
-Drivers should take the responsibility to avoid too often reads. That
-can be done using two approaches:
-
-if the driver have a bit that indicates when a collected data is ready
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-Driver should check such bit before making the statistics available.
-
-An example of such behavior can be found at this code snippet (adapted
-from mb86a20s driver's logic)::
-
- static int foo_get_pre_ber(struct dvb_frontend *fe)
- {
- struct foo_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int rc, bit_error;
-
- /* Check if the BER measures are already available */
- rc = foo_read_u8(state, 0x54);
- if (rc < 0)
- return rc;
-
- if (!rc)
- return 0;
-
- /* Read Bit Error Count */
- bit_error = foo_read_u32(state, 0x55);
- if (bit_error < 0)
- return bit_error;
-
- /* Read Total Bit Count */
- rc = foo_read_u32(state, 0x51);
- if (rc < 0)
- return rc;
-
- c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
- c->pre_bit_error.stat[0].uvalue += bit_error;
- c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
- c->pre_bit_count.stat[0].uvalue += rc;
-
- return 0;
- }
-
-If the driver doesn't provide a statistics available check bit
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-A few devices, however, may not provide a way to check if the stats are
-available (or the way to check it is unknown). They may not even provide
-a way to directly read the total number of bits or blocks.
-
-On those devices, the driver need to ensure that it won't be reading from
-the register too often and/or estimate the total number of bits/blocks.
-
-On such drivers, a typical routine to get statistics would be like
-(adapted from dib8000 driver's logic)::
-
- struct foo_state {
- /* ... */
-
- unsigned long per_jiffies_stats;
- }
-
- static int foo_get_pre_ber(struct dvb_frontend *fe)
- {
- struct foo_state *state = fe->demodulator_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int rc, bit_error;
- u64 bits;
-
- /* Check if time for stats was elapsed */
- if (!time_after(jiffies, state->per_jiffies_stats))
- return 0;
-
- /* Next stat should be collected in 1000 ms */
- state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000);
-
- /* Read Bit Error Count */
- bit_error = foo_read_u32(state, 0x55);
- if (bit_error < 0)
- return bit_error;
-
- /*
- * On this particular frontend, there's no register that
- * would provide the number of bits per 1000ms sample. So,
- * some function would calculate it based on DTV properties
- */
- bits = get_number_of_bits_per_1000ms(fe);
-
- c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
- c->pre_bit_error.stat[0].uvalue += bit_error;
- c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
- c->pre_bit_count.stat[0].uvalue += bits;
-
- return 0;
- }
-
-Please notice that, on both cases, we're getting the statistics using the
-:c:type:`dvb_frontend_ops` ``.read_status`` callback. The rationale is that
-the frontend core will automatically call this function periodically
-(usually, 3 times per second, when the frontend is locked).
-
-That warrants that we won't miss to collect a counter and increment the
-monotonic stats at the right time.
-
-Digital TV Frontend functions and types
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. kernel-doc:: drivers/media/dvb-core/dvb_frontend.h
-
-
-Digital TV Demux kABI
----------------------
-
-Digital TV Demux
-~~~~~~~~~~~~~~~~
-
-The Kernel Digital TV Demux kABI defines a driver-internal interface for
-registering low-level, hardware specific driver to a hardware independent
-demux layer. It is only of interest for Digital TV device driver writers.
-The header file for this kABI is named demux.h and located in
-drivers/media/dvb-core.
-
-The demux kABI should be implemented for each demux in the system. It is
-used to select the TS source of a demux and to manage the demux resources.
-When the demux client allocates a resource via the demux kABI, it receives
-a pointer to the kABI of that resource.
-
-Each demux receives its TS input from a DVB front-end or from memory, as
-set via this demux kABI. In a system with more than one front-end, the kABI
-can be used to select one of the DVB front-ends as a TS source for a demux,
-unless this is fixed in the HW platform.
-
-The demux kABI only controls front-ends regarding to their connections with
-demuxes; the kABI used to set the other front-end parameters, such as
-tuning, are devined via the Digital TV Frontend kABI.
-
-The functions that implement the abstract interface demux should be defined
-static or module private and registered to the Demux core for external
-access. It is not necessary to implement every function in the struct
-&dmx_demux. For example, a demux interface might support Section filtering,
-but not PES filtering. The kABI client is expected to check the value of any
-function pointer before calling the function: the value of ``NULL`` means
-that the function is not available.
-
-Whenever the functions of the demux API modify shared data, the
-possibilities of lost update and race condition problems should be
-addressed, e.g. by protecting parts of code with mutexes.
-
-Note that functions called from a bottom half context must not sleep.
-Even a simple memory allocation without using ``GFP_ATOMIC`` can result in a
-kernel thread being put to sleep if swapping is needed. For example, the
-Linux Kernel calls the functions of a network device interface from a
-bottom half context. Thus, if a demux kABI function is called from network
-device code, the function must not sleep.
-
-
-
-Demux Callback API
-------------------
-
-Demux Callback
-~~~~~~~~~~~~~~
-
-This kernel-space API comprises the callback functions that deliver filtered
-data to the demux client. Unlike the other DVB kABIs, these functions are
-provided by the client and called from the demux code.
-
-The function pointers of this abstract interface are not packed into a
-structure as in the other demux APIs, because the callback functions are
-registered and used independent of each other. As an example, it is possible
-for the API client to provide several callback functions for receiving TS
-packets and no callbacks for PES packets or sections.
-
-The functions that implement the callback API need not be re-entrant: when
-a demux driver calls one of these functions, the driver is not allowed to
-call the function again before the original call returns. If a callback is
-triggered by a hardware interrupt, it is recommended to use the Linux
-bottom half mechanism or start a tasklet instead of making the callback
-function call directly from a hardware interrupt.
-
-This mechanism is implemented by :c:func:`dmx_ts_cb()` and :c:func:`dmx_section_cb()`
-callbacks.
-
-.. kernel-doc:: drivers/media/dvb-core/demux.h
-
-Digital TV Conditional Access kABI
-----------------------------------
-
-.. kernel-doc:: drivers/media/dvb-core/dvb_ca_en50221.h
+ dtv-common
+ dtv-frontend
+ dtv-demux
+ dtv-ca
+ dtv-net
diff --git a/Documentation/media/kapi/dtv-demux.rst b/Documentation/media/kapi/dtv-demux.rst
new file mode 100644
index 000000000000..7aa865a2b43f
--- /dev/null
+++ b/Documentation/media/kapi/dtv-demux.rst
@@ -0,0 +1,82 @@
+Digital TV Demux kABI
+---------------------
+
+Digital TV Demux
+~~~~~~~~~~~~~~~~
+
+The Kernel Digital TV Demux kABI defines a driver-internal interface for
+registering low-level, hardware specific driver to a hardware independent
+demux layer. It is only of interest for Digital TV device driver writers.
+The header file for this kABI is named ``demux.h`` and located in
+``drivers/media/dvb-core``.
+
+The demux kABI should be implemented for each demux in the system. It is
+used to select the TS source of a demux and to manage the demux resources.
+When the demux client allocates a resource via the demux kABI, it receives
+a pointer to the kABI of that resource.
+
+Each demux receives its TS input from a DVB front-end or from memory, as
+set via this demux kABI. In a system with more than one front-end, the kABI
+can be used to select one of the DVB front-ends as a TS source for a demux,
+unless this is fixed in the HW platform.
+
+The demux kABI only controls front-ends regarding to their connections with
+demuxes; the kABI used to set the other front-end parameters, such as
+tuning, are devined via the Digital TV Frontend kABI.
+
+The functions that implement the abstract interface demux should be defined
+static or module private and registered to the Demux core for external
+access. It is not necessary to implement every function in the struct
+:c:type:`dmx_demux`. For example, a demux interface might support Section filtering,
+but not PES filtering. The kABI client is expected to check the value of any
+function pointer before calling the function: the value of ``NULL`` means
+that the function is not available.
+
+Whenever the functions of the demux API modify shared data, the
+possibilities of lost update and race condition problems should be
+addressed, e.g. by protecting parts of code with mutexes.
+
+Note that functions called from a bottom half context must not sleep.
+Even a simple memory allocation without using ``GFP_ATOMIC`` can result in a
+kernel thread being put to sleep if swapping is needed. For example, the
+Linux Kernel calls the functions of a network device interface from a
+bottom half context. Thus, if a demux kABI function is called from network
+device code, the function must not sleep.
+
+Demux Callback API
+~~~~~~~~~~~~~~~~~~
+
+This kernel-space API comprises the callback functions that deliver filtered
+data to the demux client. Unlike the other DVB kABIs, these functions are
+provided by the client and called from the demux code.
+
+The function pointers of this abstract interface are not packed into a
+structure as in the other demux APIs, because the callback functions are
+registered and used independent of each other. As an example, it is possible
+for the API client to provide several callback functions for receiving TS
+packets and no callbacks for PES packets or sections.
+
+The functions that implement the callback API need not be re-entrant: when
+a demux driver calls one of these functions, the driver is not allowed to
+call the function again before the original call returns. If a callback is
+triggered by a hardware interrupt, it is recommended to use the Linux
+bottom half mechanism or start a tasklet instead of making the callback
+function call directly from a hardware interrupt.
+
+This mechanism is implemented by :c:func:`dmx_ts_cb()` and :c:func:`dmx_section_cb()`
+callbacks.
+
+Digital TV Demux device registration functions and data structures
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/media/dvb-core/dmxdev.h
+
+High-level Digital TV demux interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/media/dvb-core/dvb_demux.h
+
+Driver-internal low-level hardware specific driver demux interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/media/dvb-core/demux.h
diff --git a/Documentation/media/kapi/dtv-frontend.rst b/Documentation/media/kapi/dtv-frontend.rst
new file mode 100644
index 000000000000..f1a2fdaab5ba
--- /dev/null
+++ b/Documentation/media/kapi/dtv-frontend.rst
@@ -0,0 +1,443 @@
+Digital TV Frontend kABI
+------------------------
+
+Digital TV Frontend
+~~~~~~~~~~~~~~~~~~~
+
+The Digital TV Frontend kABI defines a driver-internal interface for
+registering low-level, hardware specific driver to a hardware independent
+frontend layer. It is only of interest for Digital TV device driver writers.
+The header file for this API is named ``dvb_frontend.h`` and located in
+``drivers/media/dvb-core``.
+
+Demodulator driver
+^^^^^^^^^^^^^^^^^^
+
+The demodulator driver is responsible to talk with the decoding part of the
+hardware. Such driver should implement :c:type:`dvb_frontend_ops`, with
+tells what type of digital TV standards are supported, and points to a
+series of functions that allow the DVB core to command the hardware via
+the code under ``drivers/media/dvb-core/dvb_frontend.c``.
+
+A typical example of such struct in a driver ``foo`` is::
+
+ static struct dvb_frontend_ops foo_ops = {
+ .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
+ .info = {
+ .name = "foo DVB-T/T2/C driver",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS |
+ FE_CAN_2G_MODULATION,
+ .frequency_min = 42000000, /* Hz */
+ .frequency_max = 1002000000, /* Hz */
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000
+ },
+ .init = foo_init,
+ .sleep = foo_sleep,
+ .release = foo_release,
+ .set_frontend = foo_set_frontend,
+ .get_frontend = foo_get_frontend,
+ .read_status = foo_get_status_and_stats,
+ .tune = foo_tune,
+ .i2c_gate_ctrl = foo_i2c_gate_ctrl,
+ .get_frontend_algo = foo_get_algo,
+ };
+
+A typical example of such struct in a driver ``bar`` meant to be used on
+Satellite TV reception is::
+
+ static const struct dvb_frontend_ops bar_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Bar DVB-S/S2 demodulator",
+ .frequency_min = 500000, /* KHz */
+ .frequency_max = 2500000, /* KHz */
+ .frequency_stepsize = 0,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .symbol_rate_tolerance = 500,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK,
+ },
+ .init = bar_init,
+ .sleep = bar_sleep,
+ .release = bar_release,
+ .set_frontend = bar_set_frontend,
+ .get_frontend = bar_get_frontend,
+ .read_status = bar_get_status_and_stats,
+ .i2c_gate_ctrl = bar_i2c_gate_ctrl,
+ .get_frontend_algo = bar_get_algo,
+ .tune = bar_tune,
+
+ /* Satellite-specific */
+ .diseqc_send_master_cmd = bar_send_diseqc_msg,
+ .diseqc_send_burst = bar_send_burst,
+ .set_tone = bar_set_tone,
+ .set_voltage = bar_set_voltage,
+ };
+
+.. note::
+
+ #) For satellite digital TV standards (DVB-S, DVB-S2, ISDB-S), the
+ frequencies are specified in kHz, while, for terrestrial and cable
+ standards, they're specified in Hz. Due to that, if the same frontend
+ supports both types, you'll need to have two separate
+ :c:type:`dvb_frontend_ops` structures, one for each standard.
+ #) The ``.i2c_gate_ctrl`` field is present only when the hardware has
+ allows controlling an I2C gate (either directly of via some GPIO pin),
+ in order to remove the tuner from the I2C bus after a channel is
+ tuned.
+ #) All new drivers should implement the
+ :ref:`DVBv5 statistics <dvbv5_stats>` via ``.read_status``.
+ Yet, there are a number of callbacks meant to get statistics for
+ signal strength, S/N and UCB. Those are there to provide backward
+ compatibility with legacy applications that don't support the DVBv5
+ API. Implementing those callbacks are optional. Those callbacks may be
+ removed in the future, after we have all existing drivers supporting
+ DVBv5 stats.
+ #) Other callbacks are required for satellite TV standards, in order to
+ control LNBf and DiSEqC: ``.diseqc_send_master_cmd``,
+ ``.diseqc_send_burst``, ``.set_tone``, ``.set_voltage``.
+
+.. |delta| unicode:: U+00394
+
+The ``drivers/media/dvb-core/dvb_frontend.c`` has a kernel thread with is
+responsible for tuning the device. It supports multiple algorithms to
+detect a channel, as defined at enum :c:func:`dvbfe_algo`.
+
+The algorithm to be used is obtained via ``.get_frontend_algo``. If the driver
+doesn't fill its field at struct :c:type:`dvb_frontend_ops`, it will default to
+``DVBFE_ALGO_SW``, meaning that the dvb-core will do a zigzag when tuning,
+e. g. it will try first to use the specified center frequency ``f``,
+then, it will do ``f`` + |delta|, ``f`` - |delta|, ``f`` + 2 x |delta|,
+``f`` - 2 x |delta| and so on.
+
+If the hardware has internally a some sort of zigzag algorithm, you should
+define a ``.get_frontend_algo`` function that would return ``DVBFE_ALGO_HW``.
+
+.. note::
+
+ The core frontend support also supports
+ a third type (``DVBFE_ALGO_CUSTOM``), in order to allow the driver to
+ define its own hardware-assisted algorithm. Very few hardware need to
+ use it nowadays. Using ``DVBFE_ALGO_CUSTOM`` require to provide other
+ function callbacks at struct :c:type:`dvb_frontend_ops`.
+
+Attaching frontend driver to the bridge driver
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Before using the Digital TV frontend core, the bridge driver should attach
+the frontend demod, tuner and SEC devices and call
+:c:func:`dvb_register_frontend()`,
+in order to register the new frontend at the subsystem. At device
+detach/removal, the bridge driver should call
+:c:func:`dvb_unregister_frontend()` to
+remove the frontend from the core and then :c:func:`dvb_frontend_detach()`
+to free the memory allocated by the frontend drivers.
+
+The drivers should also call :c:func:`dvb_frontend_suspend()` as part of
+their handler for the :c:type:`device_driver`.\ ``suspend()``, and
+:c:func:`dvb_frontend_resume()` as
+part of their handler for :c:type:`device_driver`.\ ``resume()``.
+
+A few other optional functions are provided to handle some special cases.
+
+.. _dvbv5_stats:
+
+Digital TV Frontend statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Introduction
+^^^^^^^^^^^^
+
+Digital TV frontends provide a range of
+:ref:`statistics <frontend-stat-properties>` meant to help tuning the device
+and measuring the quality of service.
+
+For each statistics measurement, the driver should set the type of scale used,
+or ``FE_SCALE_NOT_AVAILABLE`` if the statistics is not available on a given
+time. Drivers should also provide the number of statistics for each type.
+that's usually 1 for most video standards [#f2]_.
+
+Drivers should initialize each statistic counters with length and
+scale at its init code. For example, if the frontend provides signal
+strength, it should have, on its init code::
+
+ struct dtv_frontend_properties *c = &state->fe.dtv_property_cache;
+
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+And, when the statistics got updated, set the scale::
+
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].uvalue = strength;
+
+.. [#f2] For ISDB-T, it may provide both a global statistics and a per-layer
+ set of statistics. On such cases, len should be equal to 4. The first
+ value corresponds to the global stat; the other ones to each layer, e. g.:
+
+ - c->cnr.stat[0] for global S/N carrier ratio,
+ - c->cnr.stat[1] for Layer A S/N carrier ratio,
+ - c->cnr.stat[2] for layer B S/N carrier ratio,
+ - c->cnr.stat[3] for layer C S/N carrier ratio.
+
+.. note:: Please prefer to use ``FE_SCALE_DECIBEL`` instead of
+ ``FE_SCALE_RELATIVE`` for signal strength and CNR measurements.
+
+Groups of statistics
+^^^^^^^^^^^^^^^^^^^^
+
+There are several groups of statistics currently supported:
+
+Signal strength (:ref:`DTV-STAT-SIGNAL-STRENGTH`)
+ - Measures the signal strength level at the analog part of the tuner or
+ demod.
+
+ - Typically obtained from the gain applied to the tuner and/or frontend
+ in order to detect the carrier. When no carrier is detected, the gain is
+ at the maximum value (so, strength is on its minimal).
+
+ - As the gain is visible through the set of registers that adjust the gain,
+ typically, this statistics is always available [#f3]_.
+
+ - Drivers should try to make it available all the times, as this statistics
+ can be used when adjusting an antenna position and to check for troubles
+ at the cabling.
+
+ .. [#f3] On a few devices, the gain keeps floating if no carrier.
+ On such devices, strength report should check first if carrier is
+ detected at the tuner (``FE_HAS_CARRIER``, see :c:type:`fe_status`),
+ and otherwise return the lowest possible value.
+
+Carrier Signal to Noise ratio (:ref:`DTV-STAT-CNR`)
+ - Signal to Noise ratio for the main carrier.
+
+ - Signal to Noise measurement depends on the device. On some hardware, is
+ available when the main carrier is detected. On those hardware, CNR
+ measurement usually comes from the tuner (e. g. after ``FE_HAS_CARRIER``,
+ see :c:type:`fe_status`).
+
+ On other devices, it requires inner FEC decoding,
+ as the frontend measures it indirectly from other parameters (e. g. after
+ ``FE_HAS_VITERBI``, see :c:type:`fe_status`).
+
+ Having it available after inner FEC is more common.
+
+Bit counts post-FEC (:ref:`DTV-STAT-POST-ERROR-BIT-COUNT` and :ref:`DTV-STAT-POST-TOTAL-BIT-COUNT`)
+ - Those counters measure the number of bits and bit errors errors after
+ the forward error correction (FEC) on the inner coding block
+ (after Viterbi, LDPC or other inner code).
+
+ - Due to its nature, those statistics depend on full coding lock
+ (e. g. after ``FE_HAS_SYNC`` or after ``FE_HAS_LOCK``,
+ see :c:type:`fe_status`).
+
+Bit counts pre-FEC (:ref:`DTV-STAT-PRE-ERROR-BIT-COUNT` and :ref:`DTV-STAT-PRE-TOTAL-BIT-COUNT`)
+ - Those counters measure the number of bits and bit errors errors before
+ the forward error correction (FEC) on the inner coding block
+ (before Viterbi, LDPC or other inner code).
+
+ - Not all frontends provide this kind of statistics.
+
+ - Due to its nature, those statistics depend on inner coding lock (e. g.
+ after ``FE_HAS_VITERBI``, see :c:type:`fe_status`).
+
+Block counts (:ref:`DTV-STAT-ERROR-BLOCK-COUNT` and :ref:`DTV-STAT-TOTAL-BLOCK-COUNT`)
+ - Those counters measure the number of blocks and block errors errors after
+ the forward error correction (FEC) on the inner coding block
+ (before Viterbi, LDPC or other inner code).
+
+ - Due to its nature, those statistics depend on full coding lock
+ (e. g. after ``FE_HAS_SYNC`` or after
+ ``FE_HAS_LOCK``, see :c:type:`fe_status`).
+
+.. note:: All counters should be monotonically increased as they're
+ collected from the hardware.
+
+A typical example of the logic that handle status and statistics is::
+
+ static int foo_get_status_and_stats(struct dvb_frontend *fe)
+ {
+ struct foo_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ int rc;
+ enum fe_status *status;
+
+ /* Both status and strength are always available */
+ rc = foo_read_status(fe, &status);
+ if (rc < 0)
+ return rc;
+
+ rc = foo_read_strength(fe);
+ if (rc < 0)
+ return rc;
+
+ /* Check if CNR is available */
+ if (!(fe->status & FE_HAS_CARRIER))
+ return 0;
+
+ rc = foo_read_cnr(fe);
+ if (rc < 0)
+ return rc;
+
+ /* Check if pre-BER stats are available */
+ if (!(fe->status & FE_HAS_VITERBI))
+ return 0;
+
+ rc = foo_get_pre_ber(fe);
+ if (rc < 0)
+ return rc;
+
+ /* Check if post-BER stats are available */
+ if (!(fe->status & FE_HAS_SYNC))
+ return 0;
+
+ rc = foo_get_post_ber(fe);
+ if (rc < 0)
+ return rc;
+ }
+
+ static const struct dvb_frontend_ops ops = {
+ /* ... */
+ .read_status = foo_get_status_and_stats,
+ };
+
+Statistics collect
+^^^^^^^^^^^^^^^^^^
+
+On almost all frontend hardware, the bit and byte counts are stored by
+the hardware after a certain amount of time or after the total bit/block
+counter reaches a certain value (usually programable), for example, on
+every 1000 ms or after receiving 1,000,000 bits.
+
+So, if you read the registers too soon, you'll end by reading the same
+value as in the previous reading, causing the monotonic value to be
+incremented too often.
+
+Drivers should take the responsibility to avoid too often reads. That
+can be done using two approaches:
+
+if the driver have a bit that indicates when a collected data is ready
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+Driver should check such bit before making the statistics available.
+
+An example of such behavior can be found at this code snippet (adapted
+from mb86a20s driver's logic)::
+
+ static int foo_get_pre_ber(struct dvb_frontend *fe)
+ {
+ struct foo_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc, bit_error;
+
+ /* Check if the BER measures are already available */
+ rc = foo_read_u8(state, 0x54);
+ if (rc < 0)
+ return rc;
+
+ if (!rc)
+ return 0;
+
+ /* Read Bit Error Count */
+ bit_error = foo_read_u32(state, 0x55);
+ if (bit_error < 0)
+ return bit_error;
+
+ /* Read Total Bit Count */
+ rc = foo_read_u32(state, 0x51);
+ if (rc < 0)
+ return rc;
+
+ c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[0].uvalue += bit_error;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[0].uvalue += rc;
+
+ return 0;
+ }
+
+If the driver doesn't provide a statistics available check bit
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+A few devices, however, may not provide a way to check if the stats are
+available (or the way to check it is unknown). They may not even provide
+a way to directly read the total number of bits or blocks.
+
+On those devices, the driver need to ensure that it won't be reading from
+the register too often and/or estimate the total number of bits/blocks.
+
+On such drivers, a typical routine to get statistics would be like
+(adapted from dib8000 driver's logic)::
+
+ struct foo_state {
+ /* ... */
+
+ unsigned long per_jiffies_stats;
+ }
+
+ static int foo_get_pre_ber(struct dvb_frontend *fe)
+ {
+ struct foo_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc, bit_error;
+ u64 bits;
+
+ /* Check if time for stats was elapsed */
+ if (!time_after(jiffies, state->per_jiffies_stats))
+ return 0;
+
+ /* Next stat should be collected in 1000 ms */
+ state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000);
+
+ /* Read Bit Error Count */
+ bit_error = foo_read_u32(state, 0x55);
+ if (bit_error < 0)
+ return bit_error;
+
+ /*
+ * On this particular frontend, there's no register that
+ * would provide the number of bits per 1000ms sample. So,
+ * some function would calculate it based on DTV properties
+ */
+ bits = get_number_of_bits_per_1000ms(fe);
+
+ c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[0].uvalue += bit_error;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[0].uvalue += bits;
+
+ return 0;
+ }
+
+Please notice that, on both cases, we're getting the statistics using the
+:c:type:`dvb_frontend_ops` ``.read_status`` callback. The rationale is that
+the frontend core will automatically call this function periodically
+(usually, 3 times per second, when the frontend is locked).
+
+That warrants that we won't miss to collect a counter and increment the
+monotonic stats at the right time.
+
+Digital TV Frontend functions and types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/media/dvb-core/dvb_frontend.h
diff --git a/Documentation/media/kapi/dtv-net.rst b/Documentation/media/kapi/dtv-net.rst
new file mode 100644
index 000000000000..ced991b73d69
--- /dev/null
+++ b/Documentation/media/kapi/dtv-net.rst
@@ -0,0 +1,4 @@
+Digital TV Network kABI
+-----------------------
+
+.. kernel-doc:: drivers/media/dvb-core/dvb_net.h
diff --git a/Documentation/media/kapi/v4l2-async.rst b/Documentation/media/kapi/v4l2-async.rst
new file mode 100644
index 000000000000..523ff9eb09a0
--- /dev/null
+++ b/Documentation/media/kapi/v4l2-async.rst
@@ -0,0 +1,3 @@
+V4L2 async kAPI
+^^^^^^^^^^^^^^^
+.. kernel-doc:: include/media/v4l2-async.h
diff --git a/Documentation/media/kapi/v4l2-core.rst b/Documentation/media/kapi/v4l2-core.rst
index c7434f38fd9c..5cf292037a48 100644
--- a/Documentation/media/kapi/v4l2-core.rst
+++ b/Documentation/media/kapi/v4l2-core.rst
@@ -19,6 +19,7 @@ Video4Linux devices
v4l2-mc
v4l2-mediabus
v4l2-mem2mem
+ v4l2-async
v4l2-fwnode
v4l2-rect
v4l2-tuner
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index a5c821809cc6..b6fd86424fbb 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -161,6 +161,24 @@ it is guaranteed that the state did change in between the two events.
- Generated if the CEC pin goes from a low voltage to a high voltage.
Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
capability set.
+ * .. _`CEC-EVENT-PIN-HPD-LOW`:
+
+ - ``CEC_EVENT_PIN_HPD_LOW``
+ - 5
+ - Generated if the HPD pin goes from a high voltage to a low voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set. When open() is called, the HPD pin can be read and
+ if the HPD is low, then an initial event will be generated for that
+ filehandle.
+ * .. _`CEC-EVENT-PIN-HPD-HIGH`:
+
+ - ``CEC_EVENT_PIN_HPD_HIGH``
+ - 6
+ - Generated if the HPD pin goes from a low voltage to a high voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set. When open() is called, the HPD pin can be read and
+ if the HPD is high, then an initial event will be generated for that
+ filehandle.
.. tabularcolumns:: |p{6.0cm}|p{0.6cm}|p{10.9cm}|
@@ -172,9 +190,9 @@ it is guaranteed that the state did change in between the two events.
:stub-columns: 0
:widths: 3 1 8
- * .. _`CEC-EVENT-FL-INITIAL-VALUE`:
+ * .. _`CEC-EVENT-FL-INITIAL-STATE`:
- - ``CEC_EVENT_FL_INITIAL_VALUE``
+ - ``CEC_EVENT_FL_INITIAL_STATE``
- 1
- Set for the initial events that are generated when the device is
opened. See the table above for which events do this. This allows
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index 0f397c535a4c..bdad4b197bcd 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -131,7 +131,7 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
- ``tx_status``
- The status bits of the transmitted message. See
:ref:`cec-tx-status` for the possible status values. It is 0 if
- this messages was received, not transmitted.
+ this message was received, not transmitted.
* - __u8
- ``msg[16]``
- The message payload. For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` this is filled in by the
@@ -168,7 +168,7 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
- ``tx_status``
- The status bits of the transmitted message. See
:ref:`cec-tx-status` for the possible status values. It is 0 if
- this messages was received, not transmitted.
+ this message was received, not transmitted.
* - __u8
- ``tx_arb_lost_cnt``
- A counter of the number of transmit attempts that resulted in the
@@ -256,9 +256,9 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
- ``CEC_TX_STATUS_ERROR``
- 0x10
- Some error occurred. This is used for any errors that do not fit
- the previous two, either because the hardware could not tell which
- error occurred, or because the hardware tested for other
- conditions besides those two.
+ ``CEC_TX_STATUS_ARB_LOST`` or ``CEC_TX_STATUS_LOW_DRIVE``, either because
+ the hardware could not tell which error occurred, or because the hardware
+ tested for other conditions besides those two.
* .. _`CEC-TX-STATUS-MAX-RETRIES`:
- ``CEC_TX_STATUS_MAX_RETRIES``
diff --git a/Documentation/media/uapi/dvb/examples.rst b/Documentation/media/uapi/dvb/examples.rst
index e0f627ca2e4d..16dd90fa9e94 100644
--- a/Documentation/media/uapi/dvb/examples.rst
+++ b/Documentation/media/uapi/dvb/examples.rst
@@ -6,377 +6,11 @@
Examples
********
-In this section we would like to present some examples for using the Digital
-TV API.
+In the past, we used to have a set of examples here. However, those
+examples got out of date and doesn't even compile nowadays.
-.. note::
+Also, nowadays, the best is to use the libdvbv5 DVB API nowadays,
+with is fully documented.
- This section is out of date, and the code below won't even
- compile. Please refer to the
- `libdvbv5 <https://linuxtv.org/docs/libdvbv5/index.html>`__ for
- updated/recommended examples.
-
-
-.. _tuning:
-
-Example: Tuning
-===============
-
-We will start with a generic tuning subroutine that uses the frontend
-and SEC, as well as the demux devices. The example is given for QPSK
-tuners, but can easily be adjusted for QAM.
-
-
-.. code-block:: c
-
- #include <sys/ioctl.h>
- #include <stdio.h>
- #include <stdint.h>
- #include <sys/types.h>
- #include <sys/stat.h>
- #include <fcntl.h>
- #include <time.h>
- #include <unistd.h>
-
- #include <linux/dvb/dmx.h>
- #include <linux/dvb/frontend.h>
- #include <linux/dvb/sec.h>
- #include <sys/poll.h>
-
- #define DMX "/dev/dvb/adapter0/demux1"
- #define FRONT "/dev/dvb/adapter0/frontend1"
- #define SEC "/dev/dvb/adapter0/sec1"
-
- /* routine for checking if we have a signal and other status information*/
- int FEReadStatus(int fd, fe_status_t *stat)
- {
- int ans;
-
- if ( (ans = ioctl(fd,FE_READ_STATUS,stat) < 0)){
- perror("FE READ STATUS: ");
- return -1;
- }
-
- if (*stat & FE_HAS_POWER)
- printf("FE HAS POWER\\n");
-
- if (*stat & FE_HAS_SIGNAL)
- printf("FE HAS SIGNAL\\n");
-
- if (*stat & FE_SPECTRUM_INV)
- printf("SPEKTRUM INV\\n");
-
- return 0;
- }
-
-
- /* tune qpsk */
- /* freq: frequency of transponder */
- /* vpid, apid, tpid: PIDs of video, audio and teletext TS packets */
- /* diseqc: DiSEqC address of the used LNB */
- /* pol: Polarisation */
- /* srate: Symbol Rate */
- /* fec. FEC */
- /* lnb_lof1: local frequency of lower LNB band */
- /* lnb_lof2: local frequency of upper LNB band */
- /* lnb_slof: switch frequency of LNB */
-
- int set_qpsk_channel(int freq, int vpid, int apid, int tpid,
- int diseqc, int pol, int srate, int fec, int lnb_lof1,
- int lnb_lof2, int lnb_slof)
- {
- struct secCommand scmd;
- struct secCmdSequence scmds;
- struct dmx_pes_filter_params pesFilterParams;
- FrontendParameters frp;
- struct pollfd pfd[1];
- FrontendEvent event;
- int demux1, demux2, demux3, front;
-
- frequency = (uint32_t) freq;
- symbolrate = (uint32_t) srate;
-
- if((front = open(FRONT,O_RDWR)) < 0){
- perror("FRONTEND DEVICE: ");
- return -1;
- }
-
- if((sec = open(SEC,O_RDWR)) < 0){
- perror("SEC DEVICE: ");
- return -1;
- }
-
- if (demux1 < 0){
- if ((demux1=open(DMX, O_RDWR|O_NONBLOCK))
- < 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (demux2 < 0){
- if ((demux2=open(DMX, O_RDWR|O_NONBLOCK))
- < 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (demux3 < 0){
- if ((demux3=open(DMX, O_RDWR|O_NONBLOCK))
- < 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (freq < lnb_slof) {
- frp.Frequency = (freq - lnb_lof1);
- scmds.continuousTone = SEC_TONE_OFF;
- } else {
- frp.Frequency = (freq - lnb_lof2);
- scmds.continuousTone = SEC_TONE_ON;
- }
- frp.Inversion = INVERSION_AUTO;
- if (pol) scmds.voltage = SEC_VOLTAGE_18;
- else scmds.voltage = SEC_VOLTAGE_13;
-
- scmd.type=0;
- scmd.u.diseqc.addr=0x10;
- scmd.u.diseqc.cmd=0x38;
- scmd.u.diseqc.numParams=1;
- scmd.u.diseqc.params[0] = 0xF0 | ((diseqc * 4) & 0x0F) |
- (scmds.continuousTone == SEC_TONE_ON ? 1 : 0) |
- (scmds.voltage==SEC_VOLTAGE_18 ? 2 : 0);
-
- scmds.miniCommand=SEC_MINI_NONE;
- scmds.numCommands=1;
- scmds.commands=&scmd;
- if (ioctl(sec, SEC_SEND_SEQUENCE, &scmds) < 0){
- perror("SEC SEND: ");
- return -1;
- }
-
- if (ioctl(sec, SEC_SEND_SEQUENCE, &scmds) < 0){
- perror("SEC SEND: ");
- return -1;
- }
-
- frp.u.qpsk.SymbolRate = srate;
- frp.u.qpsk.FEC_inner = fec;
-
- if (ioctl(front, FE_SET_FRONTEND, &frp) < 0){
- perror("QPSK TUNE: ");
- return -1;
- }
-
- pfd[0].fd = front;
- pfd[0].events = POLLIN;
-
- if (poll(pfd,1,3000)){
- if (pfd[0].revents & POLLIN){
- printf("Getting QPSK event\\n");
- if ( ioctl(front, FE_GET_EVENT, &event)
-
- == -EOVERFLOW){
- perror("qpsk get event");
- return -1;
- }
- printf("Received ");
- switch(event.type){
- case FE_UNEXPECTED_EV:
- printf("unexpected event\\n");
- return -1;
- case FE_FAILURE_EV:
- printf("failure event\\n");
- return -1;
-
- case FE_COMPLETION_EV:
- printf("completion event\\n");
- }
- }
- }
-
-
- pesFilterParams.pid = vpid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_DECODER;
- pesFilterParams.pes_type = DMX_PES_VIDEO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux1, DMX_SET_PES_FILTER, &pesFilterParams) < 0){
- perror("set_vpid");
- return -1;
- }
-
- pesFilterParams.pid = apid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_DECODER;
- pesFilterParams.pes_type = DMX_PES_AUDIO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux2, DMX_SET_PES_FILTER, &pesFilterParams) < 0){
- perror("set_apid");
- return -1;
- }
-
- pesFilterParams.pid = tpid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_DECODER;
- pesFilterParams.pes_type = DMX_PES_TELETEXT;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux3, DMX_SET_PES_FILTER, &pesFilterParams) < 0){
- perror("set_tpid");
- return -1;
- }
-
- return has_signal(fds);
- }
-
-The program assumes that you are using a universal LNB and a standard
-DiSEqC switch with up to 4 addresses. Of course, you could build in some
-more checking if tuning was successful and maybe try to repeat the
-tuning process. Depending on the external hardware, i.e. LNB and DiSEqC
-switch, and weather conditions this may be necessary.
-
-
-.. _the_dvr_device:
-
-Example: The DVR device
-========================
-
-The following program code shows how to use the DVR device for
-recording.
-
-
-.. code-block:: c
-
- #include <sys/ioctl.h>
- #include <stdio.h>
- #include <stdint.h>
- #include <sys/types.h>
- #include <sys/stat.h>
- #include <fcntl.h>
- #include <time.h>
- #include <unistd.h>
-
- #include <linux/dvb/dmx.h>
- #include <linux/dvb/video.h>
- #include <sys/poll.h>
- #define DVR "/dev/dvb/adapter0/dvr1"
- #define AUDIO "/dev/dvb/adapter0/audio1"
- #define VIDEO "/dev/dvb/adapter0/video1"
-
- #define BUFFY (188*20)
- #define MAX_LENGTH (1024*1024*5) /* record 5MB */
-
-
- /* switch the demuxes to recording, assuming the transponder is tuned */
-
- /* demux1, demux2: file descriptor of video and audio filters */
- /* vpid, apid: PIDs of video and audio channels */
-
- int switch_to_record(int demux1, int demux2, uint16_t vpid, uint16_t apid)
- {
- struct dmx_pes_filter_params pesFilterParams;
-
- if (demux1 < 0){
- if ((demux1=open(DMX, O_RDWR|O_NONBLOCK))
- < 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- if (demux2 < 0){
- if ((demux2=open(DMX, O_RDWR|O_NONBLOCK))
- < 0){
- perror("DEMUX DEVICE: ");
- return -1;
- }
- }
-
- pesFilterParams.pid = vpid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_TS_TAP;
- pesFilterParams.pes_type = DMX_PES_VIDEO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux1, DMX_SET_PES_FILTER, &pesFilterParams) < 0){
- perror("DEMUX DEVICE");
- return -1;
- }
- pesFilterParams.pid = apid;
- pesFilterParams.input = DMX_IN_FRONTEND;
- pesFilterParams.output = DMX_OUT_TS_TAP;
- pesFilterParams.pes_type = DMX_PES_AUDIO;
- pesFilterParams.flags = DMX_IMMEDIATE_START;
- if (ioctl(demux2, DMX_SET_PES_FILTER, &pesFilterParams) < 0){
- perror("DEMUX DEVICE");
- return -1;
- }
- return 0;
- }
-
- /* start recording MAX_LENGTH , assuming the transponder is tuned */
-
- /* demux1, demux2: file descriptor of video and audio filters */
- /* vpid, apid: PIDs of video and audio channels */
- int record_dvr(int demux1, int demux2, uint16_t vpid, uint16_t apid)
- {
- int i;
- int len;
- int written;
- uint8_t buf[BUFFY];
- uint64_t length;
- struct pollfd pfd[1];
- int dvr, dvr_out;
-
- /* open dvr device */
- if ((dvr = open(DVR, O_RDONLY|O_NONBLOCK)) < 0){
- perror("DVR DEVICE");
- return -1;
- }
-
- /* switch video and audio demuxes to dvr */
- printf ("Switching dvr on\\n");
- i = switch_to_record(demux1, demux2, vpid, apid);
- printf("finished: ");
-
- printf("Recording %2.0f MB of test file in TS format\\n",
- MAX_LENGTH/(1024.0*1024.0));
- length = 0;
-
- /* open output file */
- if ((dvr_out = open(DVR_FILE,O_WRONLY|O_CREAT
- |O_TRUNC, S_IRUSR|S_IWUSR
- |S_IRGRP|S_IWGRP|S_IROTH|
- S_IWOTH)) < 0){
- perror("Can't open file for dvr test");
- return -1;
- }
-
- pfd[0].fd = dvr;
- pfd[0].events = POLLIN;
-
- /* poll for dvr data and write to file */
- while (length < MAX_LENGTH ) {
- if (poll(pfd,1,1)){
- if (pfd[0].revents & POLLIN){
- len = read(dvr, buf, BUFFY);
- if (len < 0){
- perror("recording");
- return -1;
- }
- if (len > 0){
- written = 0;
- while (written < len)
- written +=
- write (dvr_out,
- buf, len);
- length += len;
- printf("written %2.0f MB\\r",
- length/1024./1024.);
- }
- }
- }
- }
- return 0;
- }
+Please refer to the `libdvbv5 <https://linuxtv.org/docs/libdvbv5/index.html>`__
+for updated/recommended examples.
diff --git a/Documentation/media/uapi/dvb/fe-get-property.rst b/Documentation/media/uapi/dvb/fe-get-property.rst
index 948d2ba84f2c..b69741d9cedf 100644
--- a/Documentation/media/uapi/dvb/fe-get-property.rst
+++ b/Documentation/media/uapi/dvb/fe-get-property.rst
@@ -48,8 +48,11 @@ depends on the delivery system and on the device:
- This call requires read/write access to the device.
- - At return, the values are updated to reflect the actual parameters
- used.
+.. note::
+
+ At return, the values aren't updated to reflect the actual
+ parameters used. If the actual parameters are needed, an explicit
+ call to ``FE_GET_PROPERTY`` is needed.
- ``FE_GET_PROPERTY:``
diff --git a/Documentation/media/uapi/dvb/net-types.rst b/Documentation/media/uapi/dvb/net-types.rst
index e1177bdcd623..8fa3292eaa42 100644
--- a/Documentation/media/uapi/dvb/net-types.rst
+++ b/Documentation/media/uapi/dvb/net-types.rst
@@ -1,6 +1,6 @@
.. -*- coding: utf-8; mode: rst -*-
-.. _dmx_types:
+.. _net_types:
**************
Net Data Types
diff --git a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
index 9d6c860271cb..d311a6866b3b 100644
--- a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
@@ -431,7 +431,7 @@ MPEG stream.
*Historical context*: This format specification originates from a
custom, embedded, sliced VBI data format used by the ``ivtv`` driver.
This format has already been informally specified in the kernel sources
-in the file ``Documentation/video4linux/cx2341x/README.vbi`` . The
+in the file ``Documentation/media/v4l-drivers/cx2341x.rst`` . The
maximum size of the payload and other aspects of this format are driven
by the CX23415 MPEG decoder's capabilities and limitations with respect
to extracting, decoding, and displaying sliced VBI data embedded within
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index a3e81c1d276b..dfe49ae57e78 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -284,7 +284,7 @@ enum v4l2_mpeg_stream_vbi_fmt -
* - ``V4L2_MPEG_STREAM_VBI_FMT_IVTV``
- VBI in private packets, IVTV format (documented in the kernel
sources in the file
- ``Documentation/video4linux/cx2341x/README.vbi``)
+ ``Documentation/media/v4l-drivers/cx2341x.rst``)
diff --git a/Documentation/media/uapi/v4l/pixfmt-reserved.rst b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
index 521adb795535..38af1472a4b4 100644
--- a/Documentation/media/uapi/v4l/pixfmt-reserved.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
@@ -52,7 +52,7 @@ please make a proposal on the linux-media mailing list.
`http://www.ivtvdriver.org/ <http://www.ivtvdriver.org/>`__
The format is documented in the kernel sources in the file
- ``Documentation/video4linux/cx2341x/README.hm12``
+ ``Documentation/media/v4l-drivers/cx2341x.rst``
* .. _V4L2-PIX-FMT-CPIA1:
- ``V4L2_PIX_FMT_CPIA1``
diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst
index 195ccaac2816..5f35e2fb5afa 100644
--- a/Documentation/media/v4l-drivers/bttv.rst
+++ b/Documentation/media/v4l-drivers/bttv.rst
@@ -307,7 +307,7 @@ console and let some terminal application log the messages. /me uses
screen. See Documentation/admin-guide/serial-console.rst for details on setting
up a serial console.
-Read Documentation/admin-guide/oops-tracing.rst to learn how to get any useful
+Read Documentation/admin-guide/bug-hunting.rst to learn how to get any useful
information out of a register+stack dump printed by the kernel on
protection faults (so-called "kernel oops").
diff --git a/Documentation/media/v4l-drivers/max2175.rst b/Documentation/media/v4l-drivers/max2175.rst
index 04478c25d57a..b1a4c89fd869 100644
--- a/Documentation/media/v4l-drivers/max2175.rst
+++ b/Documentation/media/v4l-drivers/max2175.rst
@@ -7,7 +7,7 @@ The MAX2175 driver implements the following driver-specific controls:
-------------------------------
Enable/Disable I2S output of the tuner. This is a private control
that can be accessed only using the subdev interface.
- Refer to Documentation/media/kapi/v4l2-controls for more details.
+ Refer to Documentation/media/kapi/v4l2-controls.rst for more details.
.. flat-table::
:header-rows: 0
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index b759a60624fd..479ecec80593 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -53,7 +53,7 @@ CONTENTS
- SMP barrier pairing.
- Examples of memory barrier sequences.
- Read memory barriers vs load speculation.
- - Transitivity
+ - Multicopy atomicity.
(*) Explicit kernel barriers.
@@ -383,8 +383,8 @@ Memory barriers come in four basic varieties:
to have any effect on loads.
A CPU can be viewed as committing a sequence of store operations to the
- memory system as time progresses. All stores before a write barrier will
- occur in the sequence _before_ all the stores after the write barrier.
+ memory system as time progresses. All stores _before_ a write barrier
+ will occur _before_ all the stores after the write barrier.
[!] Note that write barriers should normally be paired with read or data
dependency barriers; see the "SMP barrier pairing" subsection.
@@ -635,6 +635,11 @@ can be used to record rare error conditions and the like, and the CPUs'
naturally occurring ordering prevents such records from being lost.
+Note well that the ordering provided by a data dependency is local to
+the CPU containing it. See the section on "Multicopy atomicity" for
+more information.
+
+
The data dependency barrier is very important to the RCU system,
for example. See rcu_assign_pointer() and rcu_dereference() in
include/linux/rcupdate.h. This permits the current target of an RCU'd
@@ -851,38 +856,11 @@ In short, control dependencies apply only to the stores in the then-clause
and else-clause of the if-statement in question (including functions
invoked by those two clauses), not to code following that if-statement.
-Finally, control dependencies do -not- provide transitivity. This is
-demonstrated by two related examples, with the initial values of
-'x' and 'y' both being zero:
-
- CPU 0 CPU 1
- ======================= =======================
- r1 = READ_ONCE(x); r2 = READ_ONCE(y);
- if (r1 > 0) if (r2 > 0)
- WRITE_ONCE(y, 1); WRITE_ONCE(x, 1);
-
- assert(!(r1 == 1 && r2 == 1));
-
-The above two-CPU example will never trigger the assert(). However,
-if control dependencies guaranteed transitivity (which they do not),
-then adding the following CPU would guarantee a related assertion:
- CPU 2
- =====================
- WRITE_ONCE(x, 2);
+Note well that the ordering provided by a control dependency is local
+to the CPU containing it. See the section on "Multicopy atomicity"
+for more information.
- assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */
-
-But because control dependencies do -not- provide transitivity, the above
-assertion can fail after the combined three-CPU example completes. If you
-need the three-CPU example to provide ordering, you will need smp_mb()
-between the loads and stores in the CPU 0 and CPU 1 code fragments,
-that is, just before or just after the "if" statements. Furthermore,
-the original two-CPU example is very fragile and should be avoided.
-
-These two examples are the LB and WWC litmus tests from this paper:
-http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf and this
-site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html.
In summary:
@@ -922,8 +900,8 @@ In summary:
(*) Control dependencies pair normally with other types of barriers.
- (*) Control dependencies do -not- provide transitivity. If you
- need transitivity, use smp_mb().
+ (*) Control dependencies do -not- provide multicopy atomicity. If you
+ need all the CPUs to see a given store at the same time, use smp_mb().
(*) Compilers do not understand control dependencies. It is therefore
your job to ensure that they do not break your code.
@@ -936,13 +914,14 @@ When dealing with CPU-CPU interactions, certain types of memory barrier should
always be paired. A lack of appropriate pairing is almost certainly an error.
General barriers pair with each other, though they also pair with most
-other types of barriers, albeit without transitivity. An acquire barrier
-pairs with a release barrier, but both may also pair with other barriers,
-including of course general barriers. A write barrier pairs with a data
-dependency barrier, a control dependency, an acquire barrier, a release
-barrier, a read barrier, or a general barrier. Similarly a read barrier,
-control dependency, or a data dependency barrier pairs with a write
-barrier, an acquire barrier, a release barrier, or a general barrier:
+other types of barriers, albeit without multicopy atomicity. An acquire
+barrier pairs with a release barrier, but both may also pair with other
+barriers, including of course general barriers. A write barrier pairs
+with a data dependency barrier, a control dependency, an acquire barrier,
+a release barrier, a read barrier, or a general barrier. Similarly a
+read barrier, control dependency, or a data dependency barrier pairs
+with a write barrier, an acquire barrier, a release barrier, or a
+general barrier:
CPU 1 CPU 2
=============== ===============
@@ -968,7 +947,7 @@ Or even:
=============== ===============================
r1 = READ_ONCE(y);
<general barrier>
- WRITE_ONCE(y, 1); if (r2 = READ_ONCE(x)) {
+ WRITE_ONCE(x, 1); if (r2 = READ_ONCE(x)) {
<implicit control dependency>
WRITE_ONCE(y, 1);
}
@@ -1359,64 +1338,79 @@ the speculation will be cancelled and the value reloaded:
retrieved : : +-------+
-TRANSITIVITY
-------------
+MULTICOPY ATOMICITY
+--------------------
-Transitivity is a deeply intuitive notion about ordering that is not
-always provided by real computer systems. The following example
-demonstrates transitivity:
+Multicopy atomicity is a deeply intuitive notion about ordering that is
+not always provided by real computer systems, namely that a given store
+becomes visible at the same time to all CPUs, or, alternatively, that all
+CPUs agree on the order in which all stores become visible. However,
+support of full multicopy atomicity would rule out valuable hardware
+optimizations, so a weaker form called ``other multicopy atomicity''
+instead guarantees only that a given store becomes visible at the same
+time to all -other- CPUs. The remainder of this document discusses this
+weaker form, but for brevity will call it simply ``multicopy atomicity''.
+
+The following example demonstrates multicopy atomicity:
CPU 1 CPU 2 CPU 3
======================= ======================= =======================
{ X = 0, Y = 0 }
- STORE X=1 LOAD X STORE Y=1
- <general barrier> <general barrier>
- LOAD Y LOAD X
-
-Suppose that CPU 2's load from X returns 1 and its load from Y returns 0.
-This indicates that CPU 2's load from X in some sense follows CPU 1's
-store to X and that CPU 2's load from Y in some sense preceded CPU 3's
-store to Y. The question is then "Can CPU 3's load from X return 0?"
-
-Because CPU 2's load from X in some sense came after CPU 1's store, it
+ STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1)
+ <general barrier> <read barrier>
+ STORE Y=r1 LOAD X
+
+Suppose that CPU 2's load from X returns 1, which it then stores to Y,
+and CPU 3's load from Y returns 1. This indicates that CPU 1's store
+to X precedes CPU 2's load from X and that CPU 2's store to Y precedes
+CPU 3's load from Y. In addition, the memory barriers guarantee that
+CPU 2 executes its load before its store, and CPU 3 loads from Y before
+it loads from X. The question is then "Can CPU 3's load from X return 0?"
+
+Because CPU 3's load from X in some sense comes after CPU 2's load, it
is natural to expect that CPU 3's load from X must therefore return 1.
-This expectation is an example of transitivity: if a load executing on
-CPU A follows a load from the same variable executing on CPU B, then
-CPU A's load must either return the same value that CPU B's load did,
-or must return some later value.
-
-In the Linux kernel, use of general memory barriers guarantees
-transitivity. Therefore, in the above example, if CPU 2's load from X
-returns 1 and its load from Y returns 0, then CPU 3's load from X must
-also return 1.
-
-However, transitivity is -not- guaranteed for read or write barriers.
-For example, suppose that CPU 2's general barrier in the above example
-is changed to a read barrier as shown below:
+This expectation follows from multicopy atomicity: if a load executing
+on CPU B follows a load from the same variable executing on CPU A (and
+CPU A did not originally store the value which it read), then on
+multicopy-atomic systems, CPU B's load must return either the same value
+that CPU A's load did or some later value. However, the Linux kernel
+does not require systems to be multicopy atomic.
+
+The use of a general memory barrier in the example above compensates
+for any lack of multicopy atomicity. In the example, if CPU 2's load
+from X returns 1 and CPU 3's load from Y returns 1, then CPU 3's load
+from X must indeed also return 1.
+
+However, dependencies, read barriers, and write barriers are not always
+able to compensate for non-multicopy atomicity. For example, suppose
+that CPU 2's general barrier is removed from the above example, leaving
+only the data dependency shown below:
CPU 1 CPU 2 CPU 3
======================= ======================= =======================
{ X = 0, Y = 0 }
- STORE X=1 LOAD X STORE Y=1
- <read barrier> <general barrier>
- LOAD Y LOAD X
-
-This substitution destroys transitivity: in this example, it is perfectly
-legal for CPU 2's load from X to return 1, its load from Y to return 0,
-and CPU 3's load from X to return 0.
-
-The key point is that although CPU 2's read barrier orders its pair
-of loads, it does not guarantee to order CPU 1's store. Therefore, if
-this example runs on a system where CPUs 1 and 2 share a store buffer
-or a level of cache, CPU 2 might have early access to CPU 1's writes.
-General barriers are therefore required to ensure that all CPUs agree
-on the combined order of CPU 1's and CPU 2's accesses.
-
-General barriers provide "global transitivity", so that all CPUs will
-agree on the order of operations. In contrast, a chain of release-acquire
-pairs provides only "local transitivity", so that only those CPUs on
-the chain are guaranteed to agree on the combined order of the accesses.
-For example, switching to C code in deference to Herman Hollerith:
+ STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1)
+ <data dependency> <read barrier>
+ STORE Y=r1 LOAD X (reads 0)
+
+This substitution allows non-multicopy atomicity to run rampant: in
+this example, it is perfectly legal for CPU 2's load from X to return 1,
+CPU 3's load from Y to return 1, and its load from X to return 0.
+
+The key point is that although CPU 2's data dependency orders its load
+and store, it does not guarantee to order CPU 1's store. Thus, if this
+example runs on a non-multicopy-atomic system where CPUs 1 and 2 share a
+store buffer or a level of cache, CPU 2 might have early access to CPU 1's
+writes. General barriers are therefore required to ensure that all CPUs
+agree on the combined order of multiple accesses.
+
+General barriers can compensate not only for non-multicopy atomicity,
+but can also generate additional ordering that can ensure that -all-
+CPUs will perceive the same order of -all- operations. In contrast, a
+chain of release-acquire pairs do not provide this additional ordering,
+which means that only those CPUs on the chain are guaranteed to agree
+on the combined order of the accesses. For example, switching to C code
+in deference to the ghost of Herman Hollerith:
int u, v, x, y, z;
@@ -1448,9 +1442,9 @@ For example, switching to C code in deference to Herman Hollerith:
r3 = READ_ONCE(u);
}
-Because cpu0(), cpu1(), and cpu2() participate in a local transitive
-chain of smp_store_release()/smp_load_acquire() pairs, the following
-outcome is prohibited:
+Because cpu0(), cpu1(), and cpu2() participate in a chain of
+smp_store_release()/smp_load_acquire() pairs, the following outcome
+is prohibited:
r0 == 1 && r1 == 1 && r2 == 1
@@ -1460,9 +1454,9 @@ outcome is prohibited:
r1 == 1 && r5 == 0
-However, the transitivity of release-acquire is local to the participating
-CPUs and does not apply to cpu3(). Therefore, the following outcome
-is possible:
+However, the ordering provided by a release-acquire chain is local
+to the CPUs participating in that chain and does not apply to cpu3(),
+at least aside from stores. Therefore, the following outcome is possible:
r0 == 0 && r1 == 1 && r2 == 1 && r3 == 0 && r4 == 0
@@ -1490,8 +1484,8 @@ following outcome is possible:
Note that this outcome can happen even on a mythical sequentially
consistent system where nothing is ever reordered.
-To reiterate, if your code requires global transitivity, use general
-barriers throughout.
+To reiterate, if your code requires full ordering of all operations,
+use general barriers throughout.
========================
@@ -1886,18 +1880,6 @@ There are some more advanced barrier functions:
See Documentation/atomic_{t,bitops}.txt for more information.
- (*) lockless_dereference();
-
- This can be thought of as a pointer-fetch wrapper around the
- smp_read_barrier_depends() data-dependency barrier.
-
- This is also similar to rcu_dereference(), but in cases where
- object lifetime is handled by some mechanism other than RCU, for
- example, when the objects removed only when the system goes down.
- In addition, lockless_dereference() is used in some data structures
- that can be used both with and without RCU.
-
-
(*) dma_wmb();
(*) dma_rmb();
@@ -3101,6 +3083,9 @@ AMD64 Architecture Programmer's Manual Volume 2: System Programming
Chapter 7.1: Memory-Access Ordering
Chapter 7.4: Buffering and Combining Memory Writes
+ARM Architecture Reference Manual (ARMv8, for ARMv8-A architecture profile)
+ Chapter B2: The AArch64 Application Level Memory Model
+
IA-32 Intel Architecture Software Developer's Manual, Volume 3:
System Programming Guide
Chapter 7.1: Locked Atomic Operations
@@ -3112,6 +3097,8 @@ The SPARC Architecture Manual, Version 9
Appendix D: Formal Specification of the Memory Models
Appendix J: Programming with the Memory Models
+Storage in the PowerPC (Stone and Fitzgerald)
+
UltraSPARC Programmer Reference Manual
Chapter 5: Memory Accesses and Cacheability
Chapter 15: Sparc-V9 Memory Models
diff --git a/Documentation/networking/cdc_mbim.txt b/Documentation/networking/cdc_mbim.txt
index e4c376abbdad..4e68f0bc5dba 100644
--- a/Documentation/networking/cdc_mbim.txt
+++ b/Documentation/networking/cdc_mbim.txt
@@ -332,8 +332,8 @@ References
[5] "MBIM (Mobile Broadband Interface Model) Registry"
- http://compliance.usb.org/mbim/
-[6] "/dev/bus/usb filesystem output"
- - Documentation/usb/proc_usb_info.txt
+[6] "/sys/kernel/debug/usb/devices output format"
+ - Documentation/driver-api/usb/usb.rst
[7] "/sys/bus/usb/devices/.../descriptors"
- Documentation/ABI/stable/sysfs-bus-usb
diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt
index d52d191bbb0c..27bc09cfcf6d 100644
--- a/Documentation/networking/checksum-offloads.txt
+++ b/Documentation/networking/checksum-offloads.txt
@@ -47,7 +47,7 @@ The requirements for GSO are more complicated, because when segmenting an
(section 'E') for more details.
A driver declares its offload capabilities in netdev->hw_features; see
- Documentation/networking/netdev-features for more. Note that a device
+ Documentation/networking/netdev-features.txt for more. Note that a device
which only advertises NETIF_F_IP[V6]_CSUM must still obey the csum_start
and csum_offset given in the SKB; if it tries to deduce these itself in
hardware (as some NICs do) the driver should check that the values in the
diff --git a/Documentation/networking/gtp.txt b/Documentation/networking/gtp.txt
index 93e96750f103..0d9c18f05ec6 100644
--- a/Documentation/networking/gtp.txt
+++ b/Documentation/networking/gtp.txt
@@ -1,6 +1,7 @@
The Linux kernel GTP tunneling module
======================================================================
-Documentation by Harald Welte <laforge@gnumonks.org>
+Documentation by Harald Welte <laforge@gnumonks.org> and
+ Andreas Schultz <aschultz@tpip.net>
In 'drivers/net/gtp.c' you are finding a kernel-level implementation
of a GTP tunnel endpoint.
@@ -91,9 +92,13 @@ http://git.osmocom.org/libgtpnl/
== Protocol Versions ==
-There are two different versions of GTP-U: v0 and v1. Both are
-implemented in the Kernel GTP module. Version 0 is a legacy version,
-and deprecated from recent 3GPP specifications.
+There are two different versions of GTP-U: v0 [GSM TS 09.60] and v1
+[3GPP TS 29.281]. Both are implemented in the Kernel GTP module.
+Version 0 is a legacy version, and deprecated from recent 3GPP
+specifications.
+
+GTP-U uses UDP for transporting PDUs. The receiving UDP port is 2151
+for GTPv1-U and 3386 for GTPv0-U.
There are three versions of GTP-C: v0, v1, and v2. As the kernel
doesn't implement GTP-C, we don't have to worry about this. It's the
@@ -133,3 +138,93 @@ doe to a lack of user interest, it never got merged.
In 2015, Andreas Schultz came to the rescue and fixed lots more bugs,
extended it with new features and finally pushed all of us to get it
mainline, where it was merged in 4.7.0.
+
+== Architectural Details ==
+
+=== Local GTP-U entity and tunnel identification ===
+
+GTP-U uses UDP for transporting PDU's. The receiving UDP port is 2152
+for GTPv1-U and 3386 for GTPv0-U.
+
+There is only one GTP-U entity (and therefor SGSN/GGSN/S-GW/PDN-GW
+instance) per IP address. Tunnel Endpoint Identifier (TEID) are unique
+per GTP-U entity.
+
+A specific tunnel is only defined by the destination entity. Since the
+destination port is constant, only the destination IP and TEID define
+a tunnel. The source IP and Port have no meaning for the tunnel.
+
+Therefore:
+
+ * when sending, the remote entity is defined by the remote IP and
+ the tunnel endpoint id. The source IP and port have no meaning and
+ can be changed at any time.
+
+ * when receiving the local entity is defined by the local
+ destination IP and the tunnel endpoint id. The source IP and port
+ have no meaning and can change at any time.
+
+[3GPP TS 29.281] Section 4.3.0 defines this so:
+
+> The TEID in the GTP-U header is used to de-multiplex traffic
+> incoming from remote tunnel endpoints so that it is delivered to the
+> User plane entities in a way that allows multiplexing of different
+> users, different packet protocols and different QoS levels.
+> Therefore no two remote GTP-U endpoints shall send traffic to a
+> GTP-U protocol entity using the same TEID value except
+> for data forwarding as part of mobility procedures.
+
+The definition above only defines that two remote GTP-U endpoints
+*should not* send to the same TEID, it *does not* forbid or exclude
+such a scenario. In fact, the mentioned mobility procedures make it
+necessary that the GTP-U entity accepts traffic for TEIDs from
+multiple or unknown peers.
+
+Therefore, the receiving side identifies tunnels exclusively based on
+TEIDs, not based on the source IP!
+
+== APN vs. Network Device ==
+
+The GTP-U driver creates a Linux network device for each Gi/SGi
+interface.
+
+[3GPP TS 29.281] calls the Gi/SGi reference point an interface. This
+may lead to the impression that the GGSN/P-GW can have only one such
+interface.
+
+Correct is that the Gi/SGi reference point defines the interworking
+between +the 3GPP packet domain (PDN) based on GTP-U tunnel and IP
+based networks.
+
+There is no provision in any of the 3GPP documents that limits the
+number of Gi/SGi interfaces implemented by a GGSN/P-GW.
+
+[3GPP TS 29.061] Section 11.3 makes it clear that the selection of a
+specific Gi/SGi interfaces is made through the Access Point Name
+(APN):
+
+> 2. each private network manages its own addressing. In general this
+> will result in different private networks having overlapping
+> address ranges. A logically separate connection (e.g. an IP in IP
+> tunnel or layer 2 virtual circuit) is used between the GGSN/P-GW
+> and each private network.
+>
+> In this case the IP address alone is not necessarily unique. The
+> pair of values, Access Point Name (APN) and IPv4 address and/or
+> IPv6 prefixes, is unique.
+
+In order to support the overlapping address range use case, each APN
+is mapped to a separate Gi/SGi interface (network device).
+
+NOTE: The Access Point Name is purely a control plane (GTP-C) concept.
+At the GTP-U level, only Tunnel Endpoint Identifiers are present in
+GTP-U packets and network devices are known
+
+Therefore for a given UE the mapping in IP to PDN network is:
+ * network device + MS IP -> Peer IP + Peer TEID,
+
+and from PDN to IP network:
+ * local GTP-U IP + TEID -> network device
+
+Furthermore, before a received T-PDU is injected into the network
+device the MS IP is checked against the IP recorded in PDP context.
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index cfc66ea72329..2a3278d5cf35 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -64,7 +64,10 @@ A: To understand this, you need to know a bit of background information
If you aren't subscribed to netdev and/or are simply unsure if net-next
has re-opened yet, simply check the net-next git repository link above for
- any new networking-related commits.
+ any new networking-related commits. You may also check the following
+ website for the current status:
+
+ http://vger.kernel.org/~davem/net-next.html
The "net" tree continues to collect fixes for the vX.Y content, and
is fed back to Linus at regular (~weekly) intervals. Meaning that the
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index f3b9e507ab05..bf654845556e 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -1055,7 +1055,7 @@ TX_RING part only TP_STATUS_AVAILABLE is set, then the tp_sec and tp_{n,u}sec
members do not contain a valid value. For TX_RINGs, by default no timestamp
is generated!
-See include/linux/net_tstamp.h and Documentation/networking/timestamping
+See include/linux/net_tstamp.h and Documentation/networking/timestamping.txt
for more information on hardware timestamps.
-------------------------------------------------------------------------------
diff --git a/arch/openrisc/README.openrisc b/Documentation/openrisc/README
index 072069ab5100..777a893d533d 100644
--- a/arch/openrisc/README.openrisc
+++ b/Documentation/openrisc/README
@@ -7,13 +7,7 @@ target architecture, specifically, is the 32-bit OpenRISC 1000 family (or1k).
For information about OpenRISC processors and ongoing development:
website http://openrisc.io
-
-For more information about Linux on OpenRISC, please contact South Pole AB.
-
- email: info@southpole.se
-
- website: http://southpole.se
- http://southpoleconsulting.com
+ email openrisc@lists.librecores.org
---------------------------------------------------------------------
@@ -24,37 +18,54 @@ In order to build and run Linux for OpenRISC, you'll need at least a basic
toolchain and, perhaps, the architectural simulator. Steps to get these bits
in place are outlined here.
-1) The toolchain can be obtained from openrisc.io. Instructions for building
-a toolchain can be found at:
+1) Toolchain
+
+Toolchain binaries can be obtained from openrisc.io or our github releases page.
+Instructions for building the different toolchains can be found on openrisc.io
+or Stafford's toolchain build and release scripts.
+
+ binaries https://github.com/openrisc/or1k-gcc/releases
+ toolchains https://openrisc.io/software
+ building https://github.com/stffrdhrn/or1k-toolchain-build
-https://github.com/openrisc/tutorials
+2) Building
-2) or1ksim (optional)
+Build the Linux kernel as usual
-or1ksim is the architectural simulator which will allow you to actually run
-your OpenRISC Linux kernel if you don't have an OpenRISC processor at hand.
+ make ARCH=openrisc defconfig
+ make ARCH=openrisc
- git clone https://github.com/openrisc/or1ksim.git
+3) Running on FPGA (optional)
- cd or1ksim
- ./configure --prefix=$OPENRISC_PREFIX
- make
- make install
+The OpenRISC community typically uses FuseSoC to manage building and programming
+an SoC into an FPGA. The below is an example of programming a De0 Nano
+development board with the OpenRISC SoC. During the build FPGA RTL is code
+downloaded from the FuseSoC IP cores repository and built using the FPGA vendor
+tools. Binaries are loaded onto the board with openocd.
-3) Linux kernel
+ git clone https://github.com/olofk/fusesoc
+ cd fusesoc
+ sudo pip install -e .
-Build the kernel as usual
+ fusesoc init
+ fusesoc build de0_nano
+ fusesoc pgm de0_nano
- make ARCH=openrisc defconfig
- make ARCH=openrisc
+ openocd -f interface/altera-usb-blaster.cfg \
+ -f board/or1k_generic.cfg
+
+ telnet localhost 4444
+ > init
+ > halt; load_image vmlinux ; reset
-4) Run in architectural simulator
+4) Running on a Simulator (optional)
-Grab the or1ksim platform configuration file (from the or1ksim source) and
-together with your freshly built vmlinux, run your kernel with the following
-incantation:
+QEMU is a processor emulator which we recommend for simulating the OpenRISC
+platform. Please follow the OpenRISC instructions on the QEMU website to get
+Linux running on QEMU. You can build QEMU yourself, but your Linux distribution
+likely provides binary packages to support OpenRISC.
- sim -f arch/openrisc/or1ksim.cfg vmlinux
+ qemu openrisc https://wiki.qemu.org/Documentation/Platforms/OpenRISC
---------------------------------------------------------------------
diff --git a/arch/openrisc/TODO.openrisc b/Documentation/openrisc/TODO
index c43d4e1d14eb..c43d4e1d14eb 100644
--- a/arch/openrisc/TODO.openrisc
+++ b/Documentation/openrisc/TODO
diff --git a/Documentation/perf/hisi-pmu.txt b/Documentation/perf/hisi-pmu.txt
new file mode 100644
index 000000000000..267a028b2741
--- /dev/null
+++ b/Documentation/perf/hisi-pmu.txt
@@ -0,0 +1,53 @@
+HiSilicon SoC uncore Performance Monitoring Unit (PMU)
+======================================================
+The HiSilicon SoC chip includes various independent system device PMUs
+such as L3 cache (L3C), Hydra Home Agent (HHA) and DDRC. These PMUs are
+independent and have hardware logic to gather statistics and performance
+information.
+
+The HiSilicon SoC encapsulates multiple CPU and IO dies. Each CPU cluster
+(CCL) is made up of 4 cpu cores sharing one L3 cache; each CPU die is
+called Super CPU cluster (SCCL) and is made up of 6 CCLs. Each SCCL has
+two HHAs (0 - 1) and four DDRCs (0 - 3), respectively.
+
+HiSilicon SoC uncore PMU driver
+---------------------------------------
+Each device PMU has separate registers for event counting, control and
+interrupt, and the PMU driver shall register perf PMU drivers like L3C,
+HHA and DDRC etc. The available events and configuration options shall
+be described in the sysfs, see :
+/sys/devices/hisi_sccl{X}_<l3c{Y}/hha{Y}/ddrc{Y}>/, or
+/sys/bus/event_source/devices/hisi_sccl{X}_<l3c{Y}/hha{Y}/ddrc{Y}>.
+The "perf list" command shall list the available events from sysfs.
+
+Each L3C, HHA and DDRC is registered as a separate PMU with perf. The PMU
+name will appear in event listing as hisi_sccl<sccl-id>_module<index-id>.
+where "sccl-id" is the identifier of the SCCL and "index-id" is the index of
+module.
+e.g. hisi_sccl3_l3c0/rd_hit_cpipe is READ_HIT_CPIPE event of L3C index #0 in
+SCCL ID #3.
+e.g. hisi_sccl1_hha0/rx_operations is RX_OPERATIONS event of HHA index #0 in
+SCCL ID #1.
+
+The driver also provides a "cpumask" sysfs attribute, which shows the CPU core
+ID used to count the uncore PMU event.
+
+Example usage of perf:
+$# perf list
+hisi_sccl3_l3c0/rd_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+hisi_sccl3_l3c0/wr_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+hisi_sccl1_l3c0/rd_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+hisi_sccl1_l3c0/wr_hit_cpipe/ [kernel PMU event]
+------------------------------------------
+
+$# perf stat -a -e hisi_sccl3_l3c0/rd_hit_cpipe/ sleep 5
+$# perf stat -a -e hisi_sccl3_l3c0/config=0x02/ sleep 5
+
+The current driver does not support sampling. So "perf record" is unsupported.
+Also attach to a task is unsupported as the events are all uncore.
+
+Note: Please contact the maintainer for a complete list of events supported for
+the PMU devices in the SoC and its information if needed.
diff --git a/Documentation/pi-futex.txt b/Documentation/pi-futex.txt
index aafddbee7377..b154f6c0c36e 100644
--- a/Documentation/pi-futex.txt
+++ b/Documentation/pi-futex.txt
@@ -119,4 +119,4 @@ properties of futexes, and all four combinations are possible: futex,
robust-futex, PI-futex, robust+PI-futex.
More details about priority inheritance can be found in
-Documentation/rt-mutex.txt.
+Documentation/locking/rt-mutex.txt.
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
index 974916ff6608..27df7f98668a 100644
--- a/Documentation/power/interface.txt
+++ b/Documentation/power/interface.txt
@@ -24,7 +24,8 @@ platform.
If one of the strings listed in /sys/power/state is written to it, the system
will attempt to transition into the corresponding sleep state. Refer to
-Documentation/power/states.txt for a description of each of those states.
+Documentation/admin-guide/pm/sleep-states.rst for a description of each of
+those states.
/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
Specifically, it tells the kernel what to do after creating a hibernation image.
@@ -42,7 +43,7 @@ The currently selected option is printed in square brackets.
The 'platform' option is only available if the platform provides a special
mechanism to put the system to sleep after creating a hibernation image (ACPI
does that, for example). The 'suspend' option is available if Suspend-to-RAM
-is supported. Refer to Documentation/power/basic_pm_debugging.txt for the
+is supported. Refer to Documentation/power/basic-pm-debugging.txt for the
description of the 'test_resume' option.
To select an option, write the string representing it to /sys/power/disk.
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index a1b7f7158930..704cd36079b8 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -8,7 +8,7 @@ management. Based on previous work by Patrick Mochel <mochel@transmeta.com>
This document only covers the aspects of power management specific to PCI
devices. For general description of the kernel's interfaces related to device
-power management refer to Documentation/power/admin-guide/devices.rst and
+power management refer to Documentation/driver-api/pm/devices.rst and
Documentation/power/runtime_pm.txt.
---------------------------------------------------------------------------
@@ -417,7 +417,7 @@ pm->runtime_idle() callback.
2.4. System-Wide Power Transitions
----------------------------------
There are a few different types of system-wide power transitions, described in
-Documentation/power/admin-guide/devices.rst. Each of them requires devices to be handled
+Documentation/driver-api/pm/devices.rst. Each of them requires devices to be handled
in a specific way and the PM core executes subsystem-level power management
callbacks for this purpose. They are executed in phases such that each phase
involves executing the same subsystem-level callback for every device belonging
@@ -623,7 +623,7 @@ System restore requires a hibernation image to be loaded into memory and the
pre-hibernation memory contents to be restored before the pre-hibernation system
activity can be resumed.
-As described in Documentation/power/admin-guide/devices.rst, the hibernation image is loaded
+As described in Documentation/driver-api/pm/devices.rst, the hibernation image is loaded
into memory by a fresh instance of the kernel, called the boot kernel, which in
turn is loaded and run by a boot loader in the usual way. After the boot kernel
has loaded the image, it needs to replace its own code and data with the code
@@ -677,7 +677,7 @@ controlling the runtime power management of their devices.
At the time of this writing there are two ways to define power management
callbacks for a PCI device driver, the recommended one, based on using a
-dev_pm_ops structure described in Documentation/power/admin-guide/devices.rst, and the
+dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and the
"legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
.resume() callbacks from struct pci_driver are used. The legacy approach,
however, doesn't allow one to define runtime power management callbacks and is
@@ -961,6 +961,39 @@ dev_pm_ops to indicate that one suspend routine is to be pointed to by the
.suspend(), .freeze(), and .poweroff() members and one resume routine is to
be pointed to by the .resume(), .thaw(), and .restore() members.
+3.1.19. Driver Flags for Power Management
+
+The PM core allows device drivers to set flags that influence the handling of
+power management for the devices by the core itself and by middle layer code
+including the PCI bus type. The flags should be set once at the driver probe
+time with the help of the dev_pm_set_driver_flags() function and they should not
+be updated directly afterwards.
+
+The DPM_FLAG_NEVER_SKIP flag prevents the PM core from using the direct-complete
+mechanism allowing device suspend/resume callbacks to be skipped if the device
+is in runtime suspend when the system suspend starts. That also affects all of
+the ancestors of the device, so this flag should only be used if absolutely
+necessary.
+
+The DPM_FLAG_SMART_PREPARE flag instructs the PCI bus type to only return a
+positive value from pci_pm_prepare() if the ->prepare callback provided by the
+driver of the device returns a positive value. That allows the driver to opt
+out from using the direct-complete mechanism dynamically.
+
+The DPM_FLAG_SMART_SUSPEND flag tells the PCI bus type that from the driver's
+perspective the device can be safely left in runtime suspend during system
+suspend. That causes pci_pm_suspend(), pci_pm_freeze() and pci_pm_poweroff()
+to skip resuming the device from runtime suspend unless there are PCI-specific
+reasons for doing that. Also, it causes pci_pm_suspend_late/noirq(),
+pci_pm_freeze_late/noirq() and pci_pm_poweroff_late/noirq() to return early
+if the device remains in runtime suspend in the beginning of the "late" phase
+of the system-wide transition under way. Moreover, if the device is in
+runtime suspend in pci_pm_resume_noirq() or pci_pm_restore_noirq(), its runtime
+power management status will be changed to "active" (as it is going to be put
+into D0 going forward), but if it is in runtime suspend in pci_pm_thaw_noirq(),
+the function will set the power.direct_complete flag for it (to make the PM core
+skip the subsequent "thaw" callbacks for it) and return.
+
3.2. Device Runtime Power Management
------------------------------------
In addition to providing device power management callbacks PCI device drivers
@@ -1046,5 +1079,5 @@ PCI Local Bus Specification, Rev. 3.0
PCI Bus Power Management Interface Specification, Rev. 1.2
Advanced Configuration and Power Interface (ACPI) Specification, Rev. 3.0b
PCI Express Base Specification, Rev. 2.0
-Documentation/power/admin-guide/devices.rst
+Documentation/driver-api/pm/devices.rst
Documentation/power/runtime_pm.txt
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt
index 21d2d48f87a2..19c5f7b1a7ba 100644
--- a/Documentation/power/pm_qos_interface.txt
+++ b/Documentation/power/pm_qos_interface.txt
@@ -98,8 +98,7 @@ Values are updated in response to changes of the request list.
The target values of resume latency and active state latency tolerance are
simply the minimum of the request values held in the parameter list elements.
The PM QoS flags aggregate value is a gather (bitwise OR) of all list elements'
-values. Two device PM QoS flags are defined currently: PM_QOS_FLAG_NO_POWER_OFF
-and PM_QOS_FLAG_REMOTE_WAKEUP.
+values. One device PM QoS flag is defined currently: PM_QOS_FLAG_NO_POWER_OFF.
Note: The aggregated target values are implemented in such a way that reading
the aggregated value does not require any locking mechanism.
@@ -153,14 +152,14 @@ PM QoS list of resume latency constraints and remove sysfs attribute
pm_qos_resume_latency_us from the device's power directory.
int dev_pm_qos_expose_flags(device, value)
-Add a request to the device's PM QoS list of flags and create sysfs attributes
-pm_qos_no_power_off and pm_qos_remote_wakeup under the device's power directory
-allowing user space to change these flags' value.
+Add a request to the device's PM QoS list of flags and create sysfs attribute
+pm_qos_no_power_off under the device's power directory allowing user space to
+change the value of the PM_QOS_FLAG_NO_POWER_OFF flag.
void dev_pm_qos_hide_flags(device)
Drop the request added by dev_pm_qos_expose_flags() from the device's PM QoS list
-of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeup
-under the device's power directory.
+of flags and remove sysfs attribute pm_qos_no_power_off from the device's power
+directory.
Notification mechanisms:
The per-device PM QoS framework has a per-device notification tree.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 625549d4c74a..57af2f7963ee 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -680,7 +680,7 @@ left in runtime suspend. If that happens, the PM core will not execute any
system suspend and resume callbacks for all of those devices, except for the
complete callback, which is then entirely responsible for handling the device
as appropriate. This only applies to system suspend transitions that are not
-related to hibernation (see Documentation/power/admin-guide/devices.rst for more
+related to hibernation (see Documentation/driver-api/pm/devices.rst for more
information).
The PM core does its best to reduce the probability of race conditions between
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
index 2fc909502db5..31abd04b9572 100644
--- a/Documentation/power/suspend-and-cpuhotplug.txt
+++ b/Documentation/power/suspend-and-cpuhotplug.txt
@@ -232,7 +232,7 @@ d. Handling microcode update during suspend/hibernate:
hibernate/restore cycle.]
In the current design of the kernel however, during a CPU offline operation
- as part of the suspend/hibernate cycle (the CPU_DEAD_FROZEN notification),
+ as part of the suspend/hibernate cycle (cpuhp_tasks_frozen is set),
the existing copy of microcode image in the kernel is not freed up.
And during the CPU online operations (during resume/restore), since the
kernel finds that it already has copies of the microcode images for all the
@@ -252,10 +252,9 @@ Yes, they are listed below:
the _cpu_down() and _cpu_up() functions is *always* 0.
This might not reflect the true current state of the system, since the
tasks could have been frozen by an out-of-band event such as a suspend
- operation in progress. Hence, it will lead to wrong notifications being
- sent during the cpu online/offline events (eg, CPU_ONLINE notification
- instead of CPU_ONLINE_FROZEN) which in turn will lead to execution of
- inappropriate code by the callbacks registered for such CPU hotplug events.
+ operation in progress. Hence, the cpuhp_tasks_frozen variable will not
+ reflect the frozen state and the CPU hotplug callbacks which evaluate
+ that variable might execute the wrong code path.
2. If a regular CPU hotplug stress test happens to race with the freezer due
to a suspend operation in progress at the same time, then we could hit the
diff --git a/Documentation/process/3.Early-stage.rst b/Documentation/process/3.Early-stage.rst
index af2c0af931d6..be00716071d4 100644
--- a/Documentation/process/3.Early-stage.rst
+++ b/Documentation/process/3.Early-stage.rst
@@ -178,7 +178,7 @@ matter is (1) kernel developers tend to be busy, (2) there is no shortage
of people with grand plans and little code (or even prospect of code) to
back them up, and (3) nobody is obligated to review or comment on ideas
posted by others. Beyond that, high-level designs often hide problems
-which are only reviewed when somebody actually tries to implement those
+which are only revealed when somebody actually tries to implement those
designs; for that reason, kernel developers would rather see the code.
If a request-for-comments posting yields little in the way of comments, do
diff --git a/Documentation/process/4.Coding.rst b/Documentation/process/4.Coding.rst
index 6df19943dd4d..26b106071364 100644
--- a/Documentation/process/4.Coding.rst
+++ b/Documentation/process/4.Coding.rst
@@ -307,7 +307,7 @@ variety of potential coding problems; it can also propose fixes for those
problems. Quite a few "semantic patches" for the kernel have been packaged
under the scripts/coccinelle directory; running "make coccicheck" will run
through those semantic patches and report on any problems found. See
-Documentation/coccinelle.txt for more information.
+Documentation/dev-tools/coccinelle.rst for more information.
Other kinds of portability errors are best found by compiling your code for
other architectures. If you do not happen to have an S/390 system or a
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 61e43cc3ed17..a430f6eee756 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -26,6 +26,7 @@ Below are the essential guides that every developer should read.
coding-style
email-clients
kernel-enforcement-statement
+ kernel-driver-statement
Other guides to the community that are of interest to most developers are:
diff --git a/Documentation/process/kernel-driver-statement.rst b/Documentation/process/kernel-driver-statement.rst
new file mode 100644
index 000000000000..60d9d868f300
--- /dev/null
+++ b/Documentation/process/kernel-driver-statement.rst
@@ -0,0 +1,199 @@
+Kernel Driver Statement
+-----------------------
+
+Position Statement on Linux Kernel Modules
+==========================================
+
+
+We, the undersigned Linux kernel developers, consider any closed-source
+Linux kernel module or driver to be harmful and undesirable. We have
+repeatedly found them to be detrimental to Linux users, businesses, and
+the greater Linux ecosystem. Such modules negate the openness,
+stability, flexibility, and maintainability of the Linux development
+model and shut their users off from the expertise of the Linux
+community. Vendors that provide closed-source kernel modules force their
+customers to give up key Linux advantages or choose new vendors.
+Therefore, in order to take full advantage of the cost savings and
+shared support benefits open source has to offer, we urge vendors to
+adopt a policy of supporting their customers on Linux with open-source
+kernel code.
+
+We speak only for ourselves, and not for any company we might work for
+today, have in the past, or will in the future.
+
+ - Dave Airlie
+ - Nick Andrew
+ - Jens Axboe
+ - Ralf Baechle
+ - Felipe Balbi
+ - Ohad Ben-Cohen
+ - Muli Ben-Yehuda
+ - Jiri Benc
+ - Arnd Bergmann
+ - Thomas Bogendoerfer
+ - Vitaly Bordug
+ - James Bottomley
+ - Josh Boyer
+ - Neil Brown
+ - Mark Brown
+ - David Brownell
+ - Michael Buesch
+ - Franck Bui-Huu
+ - Adrian Bunk
+ - François Cami
+ - Ralph Campbell
+ - Luiz Fernando N. Capitulino
+ - Mauro Carvalho Chehab
+ - Denis Cheng
+ - Jonathan Corbet
+ - Glauber Costa
+ - Alan Cox
+ - Magnus Damm
+ - Ahmed S. Darwish
+ - Robert P. J. Day
+ - Hans de Goede
+ - Arnaldo Carvalho de Melo
+ - Helge Deller
+ - Jean Delvare
+ - Mathieu Desnoyers
+ - Sven-Thorsten Dietrich
+ - Alexey Dobriyan
+ - Daniel Drake
+ - Alex Dubov
+ - Randy Dunlap
+ - Michael Ellerman
+ - Pekka Enberg
+ - Jan Engelhardt
+ - Mark Fasheh
+ - J. Bruce Fields
+ - Larry Finger
+ - Jeremy Fitzhardinge
+ - Mike Frysinger
+ - Kumar Gala
+ - Robin Getz
+ - Liam Girdwood
+ - Jan-Benedict Glaw
+ - Thomas Gleixner
+ - Brice Goglin
+ - Cyrill Gorcunov
+ - Andy Gospodarek
+ - Thomas Graf
+ - Krzysztof Halasa
+ - Harvey Harrison
+ - Stephen Hemminger
+ - Michael Hennerich
+ - Tejun Heo
+ - Benjamin Herrenschmidt
+ - Kristian Høgsberg
+ - Henrique de Moraes Holschuh
+ - Marcel Holtmann
+ - Mike Isely
+ - Takashi Iwai
+ - Olof Johansson
+ - Dave Jones
+ - Jesper Juhl
+ - Matthias Kaehlcke
+ - Kenji Kaneshige
+ - Jan Kara
+ - Jeremy Kerr
+ - Russell King
+ - Olaf Kirch
+ - Roel Kluin
+ - Hans-Jürgen Koch
+ - Auke Kok
+ - Peter Korsgaard
+ - Jiri Kosina
+ - Mariusz Kozlowski
+ - Greg Kroah-Hartman
+ - Michael Krufky
+ - Aneesh Kumar
+ - Clemens Ladisch
+ - Christoph Lameter
+ - Gunnar Larisch
+ - Anders Larsen
+ - Grant Likely
+ - John W. Linville
+ - Yinghai Lu
+ - Tony Luck
+ - Pavel Machek
+ - Matt Mackall
+ - Paul Mackerras
+ - Roland McGrath
+ - Patrick McHardy
+ - Kyle McMartin
+ - Paul Menage
+ - Thierry Merle
+ - Eric Miao
+ - Akinobu Mita
+ - Ingo Molnar
+ - James Morris
+ - Andrew Morton
+ - Paul Mundt
+ - Oleg Nesterov
+ - Luca Olivetti
+ - S.Çağlar Onur
+ - Pierre Ossman
+ - Keith Owens
+ - Venkatesh Pallipadi
+ - Nick Piggin
+ - Nicolas Pitre
+ - Evgeniy Polyakov
+ - Richard Purdie
+ - Mike Rapoport
+ - Sam Ravnborg
+ - Gerrit Renker
+ - Stefan Richter
+ - David Rientjes
+ - Luis R. Rodriguez
+ - Stefan Roese
+ - Francois Romieu
+ - Rami Rosen
+ - Stephen Rothwell
+ - Maciej W. Rozycki
+ - Mark Salyzyn
+ - Yoshinori Sato
+ - Deepak Saxena
+ - Holger Schurig
+ - Amit Shah
+ - Yoshihiro Shimoda
+ - Sergei Shtylyov
+ - Kay Sievers
+ - Sebastian Siewior
+ - Rik Snel
+ - Jes Sorensen
+ - Alexey Starikovskiy
+ - Alan Stern
+ - Timur Tabi
+ - Hirokazu Takata
+ - Eliezer Tamir
+ - Eugene Teo
+ - Doug Thompson
+ - FUJITA Tomonori
+ - Dmitry Torokhov
+ - Marcelo Tosatti
+ - Steven Toth
+ - Theodore Tso
+ - Matthias Urlichs
+ - Geert Uytterhoeven
+ - Arjan van de Ven
+ - Ivo van Doorn
+ - Rik van Riel
+ - Wim Van Sebroeck
+ - Hans Verkuil
+ - Horst H. von Brand
+ - Dmitri Vorobiev
+ - Anton Vorontsov
+ - Daniel Walker
+ - Johannes Weiner
+ - Harald Welte
+ - Matthew Wilcox
+ - Dan J. Williams
+ - Darrick J. Wong
+ - David Woodhouse
+ - Chris Wright
+ - Bryan Wu
+ - Rafael J. Wysocki
+ - Herbert Xu
+ - Vlad Yasevich
+ - Peter Zijlstra
+ - Bartlomiej Zolnierkiewicz
diff --git a/Documentation/process/submitting-drivers.rst b/Documentation/process/submitting-drivers.rst
index afb82ee0cbea..b38bf2054ce3 100644
--- a/Documentation/process/submitting-drivers.rst
+++ b/Documentation/process/submitting-drivers.rst
@@ -117,7 +117,7 @@ PM support:
anything. For the driver testing instructions see
Documentation/power/drivers-testing.txt and for a relatively
complete overview of the power management issues related to
- drivers see Documentation/power/admin-guide/devices.rst .
+ drivers see Documentation/driver-api/pm/devices.rst.
Control:
In general if there is active maintenance of a driver by
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 733478ade91b..1ef19d3a3eee 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -621,14 +621,14 @@ The canonical patch subject line is::
The canonical patch message body contains the following:
- - A ``from`` line specifying the patch author (only needed if the person
- sending the patch is not the author).
-
- - An empty line.
+ - A ``from`` line specifying the patch author, followed by an empty
+ line (only needed if the person sending the patch is not the author).
- The body of the explanation, line wrapped at 75 columns, which will
be copied to the permanent changelog to describe this patch.
+ - An empty line.
+
- The ``Signed-off-by:`` lines, described above, which will
also go in the changelog.
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 8477655c0e46..453d4b79c78d 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -50,10 +50,11 @@ parameters may be changed at runtime by the command
mac5380= [HW,SCSI]
See drivers/scsi/mac_scsi.c.
- max_luns= [SCSI] Maximum number of LUNs to probe.
+ scsi_mod.max_luns=
+ [SCSI] Maximum number of LUNs to probe.
Should be between 1 and 2^32-1.
- max_report_luns=
+ scsi_mod.max_report_luns=
[SCSI] Maximum number of LUNs received.
Should be between 1 and 16384.
@@ -80,15 +81,17 @@ parameters may be changed at runtime by the command
scsi_debug_*= [SCSI]
See drivers/scsi/scsi_debug.c.
- scsi_default_dev_flags=
+ scsi_mod.default_dev_flags=
[SCSI] SCSI default device flags
Format: <integer>
- scsi_dev_flags= [SCSI] Black/white list entry for vendor and model
+ scsi_mod.dev_flags=
+ [SCSI] Black/white list entry for vendor and model
Format: <vendor>:<model>:<flags>
(flags are integer value)
- scsi_logging_level= [SCSI] a bit mask of logging levels
+ scsi_mod.scsi_logging_level=
+ [SCSI] a bit mask of logging levels
See drivers/scsi/scsi_logging.h for bits. Also
settable via sysctl at dev.scsi.logging_level
(/proc/sys/dev/scsi/logging_level).
diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt
index ab377d9e5d1b..201f80c7c050 100644
--- a/Documentation/scsi/smartpqi.txt
+++ b/Documentation/scsi/smartpqi.txt
@@ -21,7 +21,7 @@ http://www.t10.org/members/w_pqi2.htm
Supported devices:
------------------
-<Controller names to be added as they become publically available.>
+<Controller names to be added as they become publicly available.>
smartpqi specific entries in /sys
-----------------------------
diff --git a/Documentation/security/LSM.rst b/Documentation/security/LSM.rst
index d75778b0fa10..98522e0e1ee2 100644
--- a/Documentation/security/LSM.rst
+++ b/Documentation/security/LSM.rst
@@ -5,7 +5,7 @@ Linux Security Module Development
Based on https://lkml.org/lkml/2007/10/26/215,
a new LSM is accepted into the kernel when its intent (a description of
what it tries to protect against and in what cases one would expect to
-use it) has been appropriately documented in ``Documentation/security/LSM``.
+use it) has been appropriately documented in ``Documentation/security/LSM.rst``.
This allows an LSM's code to be easily compared to its goals, and so
that end users and distros can make a more informed decision about which
LSMs suit their requirements.
diff --git a/Documentation/security/credentials.rst b/Documentation/security/credentials.rst
index 038a7e19eff9..66a2e24939d8 100644
--- a/Documentation/security/credentials.rst
+++ b/Documentation/security/credentials.rst
@@ -196,7 +196,7 @@ The Linux kernel supports the following types of credentials:
When a process accesses a key, if not already present, it will normally be
cached on one of these keyrings for future accesses to find.
- For more information on using keys, see Documentation/security/keys.txt.
+ For more information on using keys, see ``Documentation/security/keys/*``.
5. LSM
diff --git a/Documentation/security/keys/request-key.rst b/Documentation/security/keys/request-key.rst
index b2d16abaa9e9..21e27238cec6 100644
--- a/Documentation/security/keys/request-key.rst
+++ b/Documentation/security/keys/request-key.rst
@@ -3,7 +3,7 @@ Key Request Service
===================
The key request service is part of the key retention service (refer to
-Documentation/security/core.rst). This document explains more fully how
+Documentation/security/keys/core.rst). This document explains more fully how
the requesting algorithm works.
The process starts by either the kernel requesting a service by calling
diff --git a/Documentation/sound/cards/joystick.rst b/Documentation/sound/cards/joystick.rst
index a6e468c81d02..488946fc1079 100644
--- a/Documentation/sound/cards/joystick.rst
+++ b/Documentation/sound/cards/joystick.rst
@@ -11,7 +11,7 @@ General
First of all, you need to enable GAMEPORT support on Linux kernel for
using a joystick with the ALSA driver. For the details of gameport
-support, refer to Documentation/input/joystick.txt.
+support, refer to Documentation/input/joydev/joystick.rst.
The joystick support of ALSA drivers is different between ISA and PCI
cards. In the case of ISA (PnP) cards, it's usually handled by the
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 773d2bfacc6c..1fee5a4f6660 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -82,6 +82,8 @@ tpt460
Lenovo Thinkpad T460/560 setup
dual-codecs
Lenovo laptops with dual codecs
+alc700-ref
+ Intel reference board with ALC700 codec
ALC66x/67x/892
==============
diff --git a/Documentation/sound/hd-audio/notes.rst b/Documentation/sound/hd-audio/notes.rst
index f59c3cdbfaf4..9f7347830ba4 100644
--- a/Documentation/sound/hd-audio/notes.rst
+++ b/Documentation/sound/hd-audio/notes.rst
@@ -192,7 +192,7 @@ preset model instead of PCI (and codec-) SSID look-up.
What ``model`` option values are available depends on the codec chip.
Check your codec chip from the codec proc file (see "Codec Proc-File"
section below). It will show the vendor/product name of your codec
-chip. Then, see Documentation/sound/HD-Audio-Models.rst file,
+chip. Then, see Documentation/sound/hd-audio/models.rst file,
the section of HD-audio driver. You can find a list of codecs
and ``model`` options belonging to each codec. For example, for Realtek
ALC262 codec chip, pass ``model=ultra`` for devices that are compatible
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 58ffa3f5bda7..a0b268466cb1 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -2498,7 +2498,7 @@ Mic boost
Mic-boost switch is set as “Mic Boost†or “Mic Boost (6dB)â€.
More precise information can be found in
-``Documentation/sound/alsa/ControlNames.txt``.
+``Documentation/sound/designs/control-names.rst``.
Access Flags
------------
diff --git a/Documentation/sound/oss/ALS b/Documentation/sound/oss/ALS
deleted file mode 100644
index bf10bed4574b..000000000000
--- a/Documentation/sound/oss/ALS
+++ /dev/null
@@ -1,66 +0,0 @@
-ALS-007/ALS-100/ALS-200 based sound cards
-=========================================
-
-Support for sound cards based around the Avance Logic
-ALS-007/ALS-100/ALS-200 chip is included. These chips are a single
-chip PnP sound solution which is mostly hardware compatible with the
-Sound Blaster 16 card, with most differences occurring in the use of
-the mixer registers. For this reason the ALS code is integrated
-as part of the Sound Blaster 16 driver (adding only 800 bytes to the
-SB16 driver).
-
-To use an ALS sound card under Linux, enable the following options as
-modules in the sound configuration section of the kernel config:
- - 100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support
- - FM synthesizer (YM3812/OPL-3) support
- - standalone MPU401 support may be required for some cards; for the
- ALS-007, when using isapnptools, it is required
-Since the ALS-007/100/200 are PnP cards, ISAPnP support should probably be
-compiled in. If kernel level PnP support is not included, isapnptools will
-be required to configure the card before the sound modules are loaded.
-
-When using kernel level ISAPnP, the kernel should correctly identify and
-configure all resources required by the card when the "sb" module is
-inserted. Note that the ALS-007 does not have a 16 bit DMA channel and that
-the MPU401 interface on this card uses a different interrupt to the audio
-section. This should all be correctly configured by the kernel; if problems
-with the MPU401 interface surface, try using the standalone MPU401 module,
-passing "0" as the "sb" module's "mpu_io" module parameter to prevent the
-soundblaster driver attempting to register the MPU401 itself. The onboard
-synth device can be accessed using the "opl3" module.
-
-If isapnptools is used to wake up the sound card (as in 2.2.x), the settings
-of the card's resources should be passed to the kernel modules ("sb", "opl3"
-and "mpu401") using the module parameters. When configuring an ALS-007, be
-sure to specify different IRQs for the audio and MPU401 sections - this card
-requires they be different. For "sb", "io", "irq" and "dma" should be set
-to the same values used to configure the audio section of the card with
-isapnp. "dma16" should be explicitly set to "-1" for an ALS-007 since this
-card does not have a 16 bit dma channel; if not specified the kernel will
-default to using channel 5 anyway which will cause audio not to work.
-"mpu_io" should be set to 0. The "io" parameter of the "opl3" module should
-also agree with the setting used by isapnp. To get the MPU401 interface
-working on an ALS-007 card, the "mpu401" module will be required since this
-card uses separate IRQs for the audio and MPU401 sections and there is no
-parameter available to pass a different IRQ to the "sb" driver (whose
-inbuilt MPU401 driver would otherwise be fine). Insert the mpu401 module
-passing appropriate values using the "io" and "irq" parameters.
-
-The resulting sound driver will provide the following capabilities:
- - 8 and 16 bit audio playback
- - 8 and 16 bit audio recording
- - Software selection of record source (line in, CD, FM, mic, master)
- - Record and playback of midi data via the external MPU-401
- - Playback of midi data using inbuilt FM synthesizer
- - Control of the ALS-007 mixer via any OSS-compatible mixer programs.
- Controls available are Master (L&R), Line in (L&R), CD (L&R),
- DSP/PCM/audio out (L&R), FM (L&R) and Mic in (mono).
-
-Jonathan Woithe
-jwoithe@just42.net
-30 March 1998
-
-Modified 2000-02-26 by Dave Forrest, drf5n@virginia.edu to add ALS100/ALS200
-Modified 2000-04-10 by Paul Laufer, pelaufer@csupomona.edu to add ISAPnP info.
-Modified 2000-11-19 by Jonathan Woithe, jwoithe@just42.net
- - updated information for kernel 2.4.x.
diff --git a/Documentation/sound/oss/AudioExcelDSP16 b/Documentation/sound/oss/AudioExcelDSP16
deleted file mode 100644
index ea8549faede9..000000000000
--- a/Documentation/sound/oss/AudioExcelDSP16
+++ /dev/null
@@ -1,101 +0,0 @@
-Driver
-------
-
-Information about Audio Excel DSP 16 driver can be found in the source
-file aedsp16.c
-Please, read the head of the source before using it. It contain useful
-information.
-
-Configuration
--------------
-
-The Audio Excel configuration, is now done with the standard Linux setup.
-You have to configure the sound card (Sound Blaster or Microsoft Sound System)
-and, if you want it, the Roland MPU-401 (do not use the Sound Blaster MPU-401,
-SB-MPU401) in the main driver menu. Activate the lowlevel drivers then select
-the Audio Excel hardware that you want to initialize. Check the IRQ/DMA/MIRQ
-of the Audio Excel initialization: it must be the same as the SBPRO (or MSS)
-setup. If the parameters are different, correct it.
-I you own a Gallant's audio card based on SC-6600, activate the SC-6600 support.
-If you want to change the configuration of the sound board, be sure to
-check off all the configuration items before re-configure it.
-
-Module parameters
------------------
-To use this driver as a module, you must configure some module parameters, to
-set up I/O addresses, IRQ lines and DMA channels. Some parameters are
-mandatory while some others are optional. Here a list of parameters you can
-use with this module:
-
-Name Description
-==== ===========
-MANDATORY
-io I/O base address (0x220 or 0x240)
-irq irq line (5, 7, 9, 10 or 11)
-dma dma channel (0, 1 or 3)
-
-OPTIONAL
-mss_base I/O base address for activate MSS mode (default SBPRO)
- (0x530 or 0xE80)
-mpu_base I/O base address for activate MPU-401 mode
- (0x300, 0x310, 0x320 or 0x330)
-mpu_irq MPU-401 irq line (5, 7, 9, 10 or 0)
-
-A configuration file in /etc/modprobe.d/ directory will have lines like this:
-
-options opl3 io=0x388
-options ad1848 io=0x530 irq=11 dma=3
-options aedsp16 io=0x220 irq=11 dma=3 mss_base=0x530
-
-Where the aedsp16 options are the options for this driver while opl3 and
-ad1848 are the corresponding options for the MSS and OPL3 modules.
-
-Loading MSS and OPL3 needs to pre load the aedsp16 module to set up correctly
-the sound card. Installation dependencies must be written in configuration
-files under /etc/modprobe.d/ directory:
-
-softdep ad1848 pre: aedsp16
-softdep opl3 pre: aedsp16
-
-Then you must load the sound modules stack in this order:
-sound -> aedsp16 -> [ ad1848, opl3 ]
-
-With the above configuration, loading ad1848 or opl3 modules, will
-automatically load all the sound stack.
-
-Sound cards supported
----------------------
-This driver supports the SC-6000 and SC-6600 based Gallant's sound card.
-It don't support the Audio Excel DSP 16 III (try the SC-6600 code).
-I'm working on the III version of the card: if someone have useful
-information about it, please let me know.
-For all the non-supported audio cards, you have to boot MS-DOS (or WIN95)
-activating the audio card with the MS-DOS device driver, then you have to
-<ctrl>-<alt>-<del> and boot Linux.
-Follow these steps:
-
-1) Compile Linux kernel with standard sound driver, using the emulation
- you want, with the parameters of your audio card,
- e.g. Microsoft Sound System irq10 dma3
-2) Install your new kernel as the default boot kernel.
-3) Boot MS-DOS and configure the audio card with the boot time device
- driver, for MSS irq10 dma3 in our example.
-4) <ctrl>-<alt>-<del> and boot Linux. This will maintain the DOS configuration
- and will boot the new kernel with sound driver. The sound driver will find
- the audio card and will recognize and attach it.
-
-Reports on User successes
--------------------------
-
-> Date: Mon, 29 Jul 1996 08:35:40 +0100
-> From: Mr S J Greenaway <sjg95@unixfe.rl.ac.uk>
-> To: riccardo@cdc8g5.cdc.polimi.it (Riccardo Facchetti)
-> Subject: Re: Audio Excel DSP 16 initialization code
->
-> Just to let you know got my Audio Excel (emulating a MSS) working
-> with my original SB16, thanks for the driver!
-
-
-Last revised: 20 August 1998
-Riccardo Facchetti
-fizban@tin.it
diff --git a/Documentation/sound/oss/CMI8330 b/Documentation/sound/oss/CMI8330
deleted file mode 100644
index 8a5fd1611c6f..000000000000
--- a/Documentation/sound/oss/CMI8330
+++ /dev/null
@@ -1,152 +0,0 @@
-Documentation for CMI 8330 (SoundPRO)
--------------------------------------
-Alessandro Zummo <azummo@ita.flashnet.it>
-
-( Be sure to read Documentation/sound/oss/SoundPro too )
-
-
-This adapter is now directly supported by the sb driver.
-
- The only thing you have to do is to compile the kernel sound
-support as a module and to enable kernel ISAPnP support,
-as shown below.
-
-
-CONFIG_SOUND=m
-CONFIG_SOUND_SB=m
-
-CONFIG_PNP=y
-CONFIG_ISAPNP=y
-
-
-and optionally:
-
-
-CONFIG_SOUND_MPU401=m
-
- for MPU401 support.
-
-
-(I suggest you to use "make menuconfig" or "make xconfig"
- for a more comfortable configuration editing)
-
-
-
-Then you can do
-
- modprobe sb
-
-and everything will be (hopefully) configured.
-
-You should get something similar in syslog:
-
-sb: CMI8330 detected.
-sb: CMI8330 sb base located at 0x220
-sb: CMI8330 mpu base located at 0x330
-sb: CMI8330 mail reports to Alessandro Zummo <azummo@ita.flashnet.it>
-sb: ISAPnP reports CMI 8330 SoundPRO at i/o 0x220, irq 7, dma 1,5
-
-
-
-
-The old documentation file follows for reference
-purposes.
-
-
-How to enable CMI 8330 (SOUNDPRO) soundchip on Linux
-------------------------------------------
-Stefan Laudat <Stefan.Laudat@asit.ro>
-
-[Note: The CMI 8338 is unrelated and is supported by cmpci.o]
-
-
- In order to use CMI8330 under Linux you just have to use a proper isapnp.conf, a good isapnp and a little bit of patience. I use isapnp 1.17, but
-you may get a better one I guess at http://www.roestock.demon.co.uk/isapnptools/.
-
- Of course you will have to compile kernel sound support as module, as shown below:
-
-CONFIG_SOUND=m
-CONFIG_SOUND_OSS=m
-CONFIG_SOUND_SB=m
-CONFIG_SOUND_ADLIB=m
-CONFIG_SOUND_MPU401=m
-# Mikro$chaft sound system (kinda useful here ;))
-CONFIG_SOUND_MSS=m
-
- The /etc/isapnp.conf file will be:
-
-<snip below>
-
-
-(READPORT 0x0203)
-(ISOLATE PRESERVE)
-(IDENTIFY *)
-(VERBOSITY 2)
-(CONFLICT (IO FATAL)(IRQ FATAL)(DMA FATAL)(MEM FATAL)) # or WARNING
-(VERIFYLD N)
-
-
-# WSS
-
-(CONFIGURE CMI0001/16777472 (LD 0
-(IO 0 (SIZE 8) (BASE 0x0530))
-(IO 1 (SIZE 8) (BASE 0x0388))
-(INT 0 (IRQ 7 (MODE +E)))
-(DMA 0 (CHANNEL 0))
-(NAME "CMI0001/16777472[0]{CMI8330/C3D Audio Adapter}")
-(ACT Y)
-))
-
-# MPU
-
-(CONFIGURE CMI0001/16777472 (LD 1
-(IO 0 (SIZE 2) (BASE 0x0330))
-(INT 0 (IRQ 11 (MODE +E)))
-(NAME "CMI0001/16777472[1]{CMI8330/C3D Audio Adapter}")
-(ACT Y)
-))
-
-# Joystick
-
-(CONFIGURE CMI0001/16777472 (LD 2
-(IO 0 (SIZE 8) (BASE 0x0200))
-(NAME "CMI0001/16777472[2]{CMI8330/C3D Audio Adapter}")
-(ACT Y)
-))
-
-# SoundBlaster
-
-(CONFIGURE CMI0001/16777472 (LD 3
-(IO 0 (SIZE 16) (BASE 0x0220))
-(INT 0 (IRQ 5 (MODE +E)))
-(DMA 0 (CHANNEL 1))
-(DMA 1 (CHANNEL 5))
-(NAME "CMI0001/16777472[3]{CMI8330/C3D Audio Adapter}")
-(ACT Y)
-))
-
-
-(WAITFORKEY)
-
-<end of snip>
-
- The module sequence is trivial:
-
-/sbin/insmod soundcore
-/sbin/insmod sound
-/sbin/insmod uart401
-# insert this first
-/sbin/insmod ad1848 io=0x530 irq=7 dma=0 soundpro=1
-# The sb module is an alternative to the ad1848 (Microsoft Sound System)
-# Anyhow, this is full duplex and has MIDI
-/sbin/insmod sb io=0x220 dma=1 dma16=5 irq=5 mpu_io=0x330
-
-
-
-Alma Chao <elysian@ethereal.torsion.org> suggests the following in
-a /etc/modprobe.d/*conf file:
-
-alias sound ad1848
-alias synth0 opl3
-options ad1848 io=0x530 irq=7 dma=0 soundpro=1
-options opl3 io=0x388
diff --git a/Documentation/sound/oss/ESS b/Documentation/sound/oss/ESS
deleted file mode 100644
index bba93b4d2def..000000000000
--- a/Documentation/sound/oss/ESS
+++ /dev/null
@@ -1,34 +0,0 @@
-Documentation for the ESS AudioDrive chips
-
-In 2.4 kernels the SoundBlaster driver not only tries to detect an ESS chip, it
-tries to detect the type of ESS chip too. The correct detection of the chip
-doesn't always succeed however, so unless you use the kernel isapnp facilities
-(and you chip is pnp capable) the default behaviour is 2.0 behaviour which
-means: only detect ES688 and ES1688.
-
-All ESS chips now have a recording level setting. This is a need-to-have for
-people who want to use their ESS for recording sound.
-
-Every chip that's detected as a later-than-es1688 chip has a 6 bits logarithmic
-master volume control.
-
-Every chip that's detected as a ES1887 now has Full Duplex support. Made a
-little testprogram that shows that is works, haven't seen a real program that
-needs this however.
-
-For ESS chips an additional parameter "esstype" can be specified. This controls
-the (auto) detection of the ESS chips. It can have 3 kinds of values:
-
--1 Act like 2.0 kernels: only detect ES688 or ES1688.
-0 Try to auto-detect the chip (may fail for ES1688)
-688 The chip will be treated as ES688
-1688 ,, ,, ,, ,, ,, ,, ES1688
-1868 ,, ,, ,, ,, ,, ,, ES1868
-1869 ,, ,, ,, ,, ,, ,, ES1869
-1788 ,, ,, ,, ,, ,, ,, ES1788
-1887 ,, ,, ,, ,, ,, ,, ES1887
-1888 ,, ,, ,, ,, ,, ,, ES1888
-
-Because Full Duplex is supported for ES1887 you can specify a second DMA
-channel by specifying module parameter dma16. It can be one of: 0, 1, 3 or 5.
-
diff --git a/Documentation/sound/oss/ESS1868 b/Documentation/sound/oss/ESS1868
deleted file mode 100644
index 55e922f21bc0..000000000000
--- a/Documentation/sound/oss/ESS1868
+++ /dev/null
@@ -1,55 +0,0 @@
-Documentation for the ESS1868F AudioDrive PnP sound card
-
-The ESS1868 sound card is a PnP ESS1688-compatible 16-bit sound card.
-
-It should be automatically detected by the Linux Kernel isapnp support when you
-load the sb.o module. Otherwise you should take care of:
-
- * The ESS1868 does not allow use of a 16-bit DMA, thus DMA 0, 1, 2, and 3
- may only be used.
-
- * isapnptools version 1.14 does work with ESS1868. Earlier versions might
- not.
-
- * Sound support MUST be compiled as MODULES, not statically linked
- into the kernel.
-
-
-NOTE: this is only needed when not using the kernel isapnp support!
-
-For configuring the sound card's I/O addresses, IRQ and DMA, here is a
-sample copy of the isapnp.conf directives regarding the ESS1868:
-
-(CONFIGURE ESS1868/-1 (LD 1
-(IO 0 (BASE 0x0220))
-(IO 1 (BASE 0x0388))
-(IO 2 (BASE 0x0330))
-(DMA 0 (CHANNEL 1))
-(INT 0 (IRQ 5 (MODE +E)))
-(ACT Y)
-))
-
-(for a full working isapnp.conf file, remember the
-(ISOLATE)
-(IDENTIFY *)
-at the beginning and the
-(WAITFORKEY)
-at the end.)
-
-In this setup, the main card I/O is 0x0220, FM synthesizer is 0x0388, and
-the MPU-401 MIDI port is located at 0x0330. IRQ is IRQ 5, DMA is channel 1.
-
-After configuring the sound card via isapnp, to use the card you must load
-the sound modules with the proper I/O information. Here is my setup:
-
-# ESS1868F AudioDrive initialization
-
-/sbin/modprobe sound
-/sbin/insmod uart401
-/sbin/insmod sb io=0x220 irq=5 dma=1 dma16=-1
-/sbin/insmod mpu401 io=0x330
-/sbin/insmod opl3 io=0x388
-/sbin/insmod v_midi
-
-opl3 is the FM synthesizer
-/sbin/insmod opl3 io=0x388
diff --git a/Documentation/sound/oss/Introduction b/Documentation/sound/oss/Introduction
deleted file mode 100644
index 42da2d8fa372..000000000000
--- a/Documentation/sound/oss/Introduction
+++ /dev/null
@@ -1,459 +0,0 @@
-Introduction Notes on Modular Sound Drivers and Soundcore
-Wade Hampton
-2/14/2001
-
-Purpose:
-========
-This document provides some general notes on the modular
-sound drivers and their configuration, along with the
-support modules sound.o and soundcore.o.
-
-Note, some of this probably should be added to the Sound-HOWTO!
-
-Note, soundlow.o was present with 2.2 kernels but is not
-required for 2.4.x kernels. References have been removed
-to this.
-
-
-Copying:
-========
-none
-
-
-History:
-========
-0.1.0 11/20/1998 First version, draft
-1.0.0 11/1998 Alan Cox changes, incorporation in 2.2.0
- as Documentation/sound/oss/Introduction
-1.1.0 6/30/1999 Second version, added notes on making the drivers,
- added info on multiple sound cards of similar types,]
- added more diagnostics info, added info about esd.
- added info on OSS and ALSA.
-1.1.1 19991031 Added notes on sound-slot- and sound-service.
- (Alan Cox)
-1.1.2 20000920 Modified for Kernel 2.4 (Christoph Hellwig)
-1.1.3 20010214 Minor notes and corrections (Wade Hampton)
- Added examples of sound-slot-0, etc.
-
-
-Modular Sound Drivers:
-======================
-
-Thanks to the GREAT work by Alan Cox (alan@lxorguk.ukuu.org.uk),
-
-[And Oleg Drokin, Thomas Sailer, Andrew Veliath and more than a few
- others - not to mention Hannu's original code being designed well
- enough to cope with that kind of chopping up](Alan)
-
-the standard Linux kernels support a modular sound driver. From
-Alan's comments in linux/drivers/sound/README.FIRST:
-
- The modular sound driver patches were funded by Red Hat Software
- (www.redhat.com). The sound driver here is thus a modified version of
- Hannu's code. Please bear that in mind when considering the appropriate
- forums for bug reporting.
-
-The modular sound drivers may be loaded via insmod or modprobe.
-To support all the various sound modules, there are two general
-support modules that must be loaded first:
-
- soundcore.o: Top level handler for the sound system, provides
- a set of functions for registration of devices
- by type.
-
- sound.o: Common sound functions required by all modules.
-
-For the specific sound modules (e.g., sb.o for the Soundblaster),
-read the documentation on that module to determine what options
-are available, for example IRQ, address, DMA.
-
-Warning, the options for different cards sometime use different names
-for the same or a similar feature (dma1= versus dma16=). As a last
-resort, inspect the code (search for module_param).
-
-Notes:
-
-1. There is a new OpenSource sound driver called ALSA which is
- currently under development: http://www.alsa-project.org/
- The ALSA drivers support some newer hardware that may not
- be supported by this sound driver and also provide some
- additional features.
-
-2. The commercial OSS driver may be obtained from the site:
- http://www.opensound.com. This may be used for cards that
- are unsupported by the kernel driver, or may be used
- by other operating systems.
-
-3. The enlightenment sound daemon may be used for playing
- multiple sounds at the same time via a single card, eliminating
- some of the requirements for multiple sound card systems. For
- more information, see: http://www.tux.org/~ricdude/EsounD.html
- The "esd" program may be used with the real-player and mpeg
- players like mpg123 and x11amp. The newer real-player
- and some games even include built-in support for ESD!
-
-
-Building the Modules:
-=====================
-
-This document does not provide full details on building the
-kernel, etc. The notes below apply only to making the kernel
-sound modules. If this conflicts with the kernel's README,
-the README takes precedence.
-
-1. To make the kernel sound modules, cd to your /usr/src/linux
- directory (typically) and type make config, make menuconfig,
- or make xconfig (to start the command line, dialog, or x-based
- configuration tool).
-
-2. Select the Sound option and a dialog will be displayed.
-
-3. Select M (module) for "Sound card support".
-
-4. Select your sound driver(s) as a module. For ProAudio, Sound
- Blaster, etc., select M (module) for OSS sound modules.
- [thanks to Marvin Stodolsky <stodolsk@erols.com>]A
-
-5. Make the kernel (e.g., make bzImage), and install the kernel.
-
-6. Make the modules and install them (make modules; make modules_install).
-
-Note, for 2.5.x kernels, make sure you have the newer module-init-tools
-installed or modules will not be loaded properly. 2.5.x requires an
-updated module-init-tools.
-
-
-Plug and Play (PnP:
-===================
-
-If the sound card is an ISA PnP card, isapnp may be used
-to configure the card. See the file isapnp.txt in the
-directory one level up (e.g., /usr/src/linux/Documentation).
-
-Also the 2.4.x kernels provide PnP capabilities, see the
-file NEWS in this directory.
-
-PCI sound cards are highly recommended, as they are far
-easier to configure and from what I have read, they use
-less resources and are more CPU efficient.
-
-
-INSMOD:
-=======
-
-If loading via insmod, the common modules must be loaded in the
-order below BEFORE loading the other sound modules. The card-specific
-modules may then be loaded (most require parameters). For example,
-I use the following via a shell script to load my SoundBlaster:
-
-SB_BASE=0x240
-SB_IRQ=9
-SB_DMA=3
-SB_DMA2=5
-SB_MPU=0x300
-#
-echo Starting sound
-/sbin/insmod soundcore
-/sbin/insmod sound
-#
-echo Starting sound blaster....
-/sbin/insmod uart401
-/sbin/insmod sb io=$SB_BASE irq=$SB_IRQ dma=$SB_DMA dma16=$SB_DMA2 mpu_io=$SB_MP
-
-When using sound as a module, I typically put these commands
-in a file such as /root/soundon.sh.
-
-
-MODPROBE:
-=========
-
-If loading via modprobe, these common files are automatically loaded when
-requested by modprobe. For example, my /etc/modprobe.d/oss.conf contains:
-
-alias sound sb
-options sb io=0x240 irq=9 dma=3 dma16=5 mpu_io=0x300
-
-All you need to do to load the module is:
-
- /sbin/modprobe sb
-
-
-Sound Status:
-=============
-
-The status of sound may be read/checked by:
- cat (anyfile).au >/dev/audio
-
-[WWH: This may not work properly for SoundBlaster PCI 128 cards
-such as the es1370/1 (see the es1370/1 files in this directory)
-as they do not automatically support uLaw on /dev/audio.]
-
-The status of the modules and which modules depend on
-which other modules may be checked by:
- /sbin/lsmod
-
-/sbin/lsmod should show something like the following:
- sb 26280 0
- uart401 5640 0 [sb]
- sound 57112 0 [sb uart401]
- soundcore 1968 8 [sb sound]
-
-
-Removing Sound:
-===============
-
-Sound may be removed by using /sbin/rmmod in the reverse order
-in which you load the modules. Note, if a program has a sound device
-open (e.g., xmixer), that module (and the modules on which it
-depends) may not be unloaded.
-
-For example, I use the following to remove my Soundblaster (rmmod
-in the reverse order in which I loaded the modules):
-
-/sbin/rmmod sb
-/sbin/rmmod uart401
-/sbin/rmmod sound
-/sbin/rmmod soundcore
-
-When using sound as a module, I typically put these commands
-in a script such as /root/soundoff.sh.
-
-
-Removing Sound for use with OSS:
-================================
-
-If you get really stuck or have a card that the kernel modules
-will not support, you can get a commercial sound driver from
-http://www.opensound.com. Before loading the commercial sound
-driver, you should do the following:
-
-1. remove sound modules (detailed above)
-2. remove the sound modules from /etc/modprobe.d/*.conf
-3. move the sound modules from /lib/modules/<kernel>/misc
- (for example, I make a /lib/modules/<kernel>/misc/tmp
- directory and copy the sound module files to that
- directory).
-
-
-Multiple Sound Cards:
-=====================
-
-The sound drivers will support multiple sound cards and there
-are some great applications like multitrack that support them.
-Typically, you need two sound cards of different types. Note, this
-uses more precious interrupts and DMA channels and sometimes
-can be a configuration nightmare. I have heard reports of 3-4
-sound cards (typically I only use 2). You can sometimes use
-multiple PCI sound cards of the same type.
-
-On my machine I have two sound cards (cs4232 and Soundblaster Vibra
-16). By loading sound as modules, I can control which is the first
-sound device (/dev/dsp, /dev/audio, /dev/mixer) and which is
-the second. Normally, the cs4232 (Dell sound on the motherboard)
-would be the first sound device, but I prefer the Soundblaster.
-All you have to do is to load the one you want as /dev/dsp
-first (in my case "sb") and then load the other one
-(in my case "cs4232").
-
-If you have two cards of the same type that are jumpered
-cards or different PnP revisions, you may load the same
-module twice. For example, I have a SoundBlaster vibra 16
-and an older SoundBlaster 16 (jumpers). To load the module
-twice, you need to do the following:
-
-1. Copy the sound modules to a new name. For example
- sb.o could be copied (or symlinked) to sb1.o for the
- second SoundBlaster.
-
-2. Make a second entry in /etc/modprobe.d/*conf, for example,
- sound1 or sb1. This second entry should refer to the
- new module names for example sb1, and should include
- the I/O, etc. for the second sound card.
-
-3. Update your soundon.sh script, etc.
-
-Warning: I have never been able to get two PnP sound cards of the
-same type to load at the same time. I have tried this several times
-with the Soundblaster Vibra 16 cards. OSS has indicated that this
-is a PnP problem.... If anyone has any luck doing this, please
-send me an E-MAIL. PCI sound cards should not have this problem.a
-Since this was originally release, I have received a couple of
-mails from people who have accomplished this!
-
-NOTE: In Linux 2.4 the Sound Blaster driver (and only this one yet)
-supports multiple cards with one module by default.
-Read the file 'Soundblaster' in this directory for details.
-
-
-Sound Problems:
-===============
-
-First RTFM (including the troubleshooting section
-in the Sound-HOWTO).
-
-1) If you are having problems loading the modules (for
- example, if you get device conflict errors) try the
- following:
-
- A) If you have Win95 or NT on the same computer,
- write down what addresses, IRQ, and DMA channels
- those were using for the same hardware. You probably
- can use these addresses, IRQs, and DMA channels.
- You should really do this BEFORE attempting to get
- sound working!
-
- B) Check (cat) /proc/interrupts, /proc/ioports,
- and /proc/dma. Are you trying to use an address,
- IRQ or DMA port that another device is using?
-
- C) Check (cat) /proc/isapnp
-
- D) Inspect your /var/log/messages file. Often that will
- indicate what IRQ or IO port could not be obtained.
-
- E) Try another port or IRQ. Note this may involve
- using the PnP tools to move the sound card to
- another location. Sometimes this is the only way
- and it is more or less trial and error.
-
-2) If you get motor-boating (the same sound or part of a
- sound clip repeated), you probably have either an IRQ
- or DMA conflict. Move the card to another IRQ or DMA
- port. This has happened to me when playing long files
- when I had an IRQ conflict.
-
-3. If you get dropouts or pauses when playing high sample
- rate files such as using mpg123 or x11amp/xmms, you may
- have too slow of a CPU and may have to use the options to
- play the files at 1/2 speed. For example, you may use
- the -2 or -4 option on mpg123. You may also get this
- when trying to play mpeg files stored on a CD-ROM
- (my Toshiba T8000 PII/366 sometimes has this problem).
-
-4. If you get "cannot access device" errors, your /dev/dsp
- files, etc. may be set to owner root, mode 600. You
- may have to use the command:
- chmod 666 /dev/dsp /dev/mixer /dev/audio
-
-5. If you get "device busy" errors, another program has the
- sound device open. For example, if using the Enlightenment
- sound daemon "esd", the "esd" program has the sound device.
- If using "esd", please RTFM the docs on ESD. For example,
- esddsp <program> may be used to play files via a non-esd
- aware program.
-
-6) Ask for help on the sound list or send E-MAIL to the
- sound driver author/maintainer.
-
-7) Turn on debug in drivers/sound/sound_config.h (DEB, DDB, MDB).
-
-8) If the system reports insufficient DMA memory then you may want to
- load sound with the "dmabufs=1" option. Or in /etc/conf.modules add
-
- preinstall sound dmabufs=1
-
- This makes the sound system allocate its buffers and hang onto them.
-
- You may also set persistent DMA when building a 2.4.x kernel.
-
-
-Configuring Sound:
-==================
-
-There are several ways of configuring your sound:
-
-1) On the kernel command line (when using the sound driver(s)
- compiled in the kernel). Check the driver source and
- documentation for details.
-
-2) On the command line when using insmod or in a bash script
- using command line calls to load sound.
-
-3) In /etc/modprobe.d/*conf when using modprobe.
-
-4) Via Red Hat's GPL'd /usr/sbin/sndconfig program (text based).
-
-5) Via the OSS soundconf program (with the commercial version
- of the OSS driver.
-
-6) By just loading the module and let isapnp do everything relevant
- for you. This works only with a few drivers yet and - of course -
- only with isapnp hardware.
-
-And I am sure, several other ways.
-
-Anyone want to write a linuxconf module for configuring sound?
-
-
-Module Loading:
-===============
-
-When a sound card is first referenced and sound is modular, the sound system
-will ask for the sound devices to be loaded. Initially it requests that
-the driver for the sound system is loaded. It then will ask for
-sound-slot-0, where 0 is the first sound card. (sound-slot-1 the second and
-so on). Thus you can do
-
-alias sound-slot-0 sb
-
-To load a soundblaster at this point. If the slot loading does not provide
-the desired device - for example a soundblaster does not directly provide
-a midi synth in all cases then it will request "sound-service-0-n" where n
-is
-
- 0 Mixer
-
- 2 MIDI
-
- 3, 4 DSP audio
-
-
-For example, I use the following to load my Soundblaster PCI 128
-(ES 1371) card first, followed by my SoundBlaster Vibra 16 card,
-then by my TV card:
-
-# Load the Soundblaster PCI 128 as /dev/dsp, /dev/dsp1, /dev/mixer
-alias sound-slot-0 es1371
-
-# Load the Soundblaster Vibra 16 as /dev/dsp2, /dev/mixer1
-alias sound-slot-1 sb
-options sb io=0x240 irq=5 dma=1 dma16=5 mpu_io=0x330
-
-# Load the BTTV (TV card) as /dev/mixer2
-alias sound-slot-2 bttv
-alias sound-service-2-0 tvmixer
-
-pre-install bttv modprobe tuner ; modprobe tvmixer
-pre-install tvmixer modprobe msp3400; modprobe tvaudio
-options tuner debug=0 type=8
-options bttv card=0 radio=0 pll=0
-
-
-For More Information (RTFM):
-============================
-1) Information on kernel modules: manual pages for insmod and modprobe.
-
-2) Information on PnP, RTFM manual pages for isapnp.
-
-3) Sound-HOWTO and Sound-Playing-HOWTO.
-
-4) OSS's WWW site at http://www.opensound.com.
-
-5) All the files in Documentation/sound.
-
-6) The comments and code in linux/drivers/sound.
-
-7) The sndconfig and rhsound documentation from Red Hat.
-
-8) The Linux-sound mailing list: sound-list@redhat.com.
-
-9) Enlightenment documentation (for info on esd)
- http://www.tux.org/~ricdude/EsounD.html.
-
-10) ALSA home page: http://www.alsa-project.org/
-
-
-Contact Information:
-====================
-Wade Hampton: (whampton@staffnet.com)
-
diff --git a/Documentation/sound/oss/MultiSound b/Documentation/sound/oss/MultiSound
deleted file mode 100644
index e4a18bb7f73a..000000000000
--- a/Documentation/sound/oss/MultiSound
+++ /dev/null
@@ -1,1137 +0,0 @@
-#! /bin/sh
-#
-# Turtle Beach MultiSound Driver Notes
-# -- Andrew Veliath <andrewtv@usa.net>
-#
-# Last update: September 10, 1998
-# Corresponding msnd driver: 0.8.3
-#
-# ** This file is a README (top part) and shell archive (bottom part).
-# The corresponding archived utility sources can be unpacked by
-# running `sh MultiSound' (the utilities are only needed for the
-# Pinnacle and Fiji cards). **
-#
-#
-# -=-=- Getting Firmware -=-=-
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# See the section `Obtaining and Creating Firmware Files' in this
-# document for instructions on obtaining the necessary firmware
-# files.
-#
-#
-# Supported Features
-# ~~~~~~~~~~~~~~~~~~
-#
-# Currently, full-duplex digital audio (/dev/dsp only, /dev/audio is
-# not currently available) and mixer functionality (/dev/mixer) are
-# supported (memory mapped digital audio is not yet supported).
-# Digital transfers and monitoring can be done as well if you have
-# the digital daughterboard (see the section on using the S/PDIF port
-# for more information).
-#
-# Support for the Turtle Beach MultiSound Hurricane architecture is
-# composed of the following modules (these can also operate compiled
-# into the kernel):
-#
-# msnd - MultiSound base (requires soundcore)
-#
-# msnd_classic - Base audio/mixer support for Classic, Monetery and
-# Tahiti cards
-#
-# msnd_pinnacle - Base audio/mixer support for Pinnacle and Fiji cards
-#
-#
-# Important Notes - Read Before Using
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# The firmware files are not included (may change in future). You
-# must obtain these images from Turtle Beach (they are included in
-# the MultiSound Development Kits), and place them in /etc/sound for
-# example, and give the full paths in the Linux configuration. If
-# you are compiling in support for the MultiSound driver rather than
-# using it as a module, these firmware files must be accessible
-# during kernel compilation.
-#
-# Please note these files must be binary files, not assembler. See
-# the section later in this document for instructions to obtain these
-# files.
-#
-#
-# Configuring Card Resources
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# ** This section is very important, as your card may not work at all
-# or your machine may crash if you do not do this correctly. **
-#
-# * Classic/Monterey/Tahiti
-#
-# These cards are configured through the driver msnd_classic. You must
-# know the io port, then the driver will select the irq and memory resources
-# on the card. It is up to you to know if these are free locations or now,
-# a conflict can lock the machine up.
-#
-# * Pinnacle/Fiji
-#
-# The Pinnacle and Fiji cards have an extra config port, either
-# 0x250, 0x260 or 0x270. This port can be disabled to have the card
-# configured strictly through PnP, however you lose the ability to
-# access the IDE controller and joystick devices on this card when
-# using PnP. The included pinnaclecfg program in this shell archive
-# can be used to configure the card in non-PnP mode, and in PnP mode
-# you can use isapnptools. These are described briefly here.
-#
-# pinnaclecfg is not required; you can use the msnd_pinnacle module
-# to fully configure the card as well. However, pinnaclecfg can be
-# used to change the resource values of a particular device after the
-# msnd_pinnacle module has been loaded. If you are compiling the
-# driver into the kernel, you must set these values during compile
-# time, however other peripheral resource values can be changed with
-# the pinnaclecfg program after the kernel is loaded.
-#
-#
-# *** PnP mode
-#
-# Use pnpdump to obtain a sample configuration if you can; I was able
-# to obtain one with the command `pnpdump 1 0x203' -- this may vary
-# for you (running pnpdump by itself did not work for me). Then,
-# edit this file and use isapnp to uncomment and set the card values.
-# Use these values when inserting the msnd_pinnacle module. Using
-# this method, you can set the resources for the DSP and the Kurzweil
-# synth (Pinnacle). Since Linux does not directly support PnP
-# devices, you may have difficulty when using the card in PnP mode
-# when it the driver is compiled into the kernel. Using non-PnP mode
-# is preferable in this case.
-#
-# Here is an example mypinnacle.conf for isapnp that sets the card to
-# io base 0x210, irq 5 and mem 0xd8000, and also sets the Kurzweil
-# synth to 0x330 and irq 9 (may need editing for your system):
-#
-# (READPORT 0x0203)
-# (CSN 2)
-# (IDENTIFY *)
-#
-# # DSP
-# (CONFIGURE BVJ0440/-1 (LD 0
-# (INT 0 (IRQ 5 (MODE +E))) (IO 0 (BASE 0x0210)) (MEM 0 (BASE 0x0d8000))
-# (ACT Y)))
-#
-# # Kurzweil Synth (Pinnacle Only)
-# (CONFIGURE BVJ0440/-1 (LD 1
-# (IO 0 (BASE 0x0330)) (INT 0 (IRQ 9 (MODE +E)))
-# (ACT Y)))
-#
-# (WAITFORKEY)
-#
-#
-# *** Non-PnP mode
-#
-# The second way is by running the card in non-PnP mode. This
-# actually has some advantages in that you can access some other
-# devices on the card, such as the joystick and IDE controller. To
-# configure the card, unpack this shell archive and build the
-# pinnaclecfg program. Using this program, you can assign the
-# resource values to the card's devices, or disable the devices. As
-# an alternative to using pinnaclecfg, you can specify many of the
-# configuration values when loading the msnd_pinnacle module (or
-# during kernel configuration when compiling the driver into the
-# kernel).
-#
-# If you specify cfg=0x250 for the msnd_pinnacle module, it
-# automatically configure the card to the given io, irq and memory
-# values using that config port (the config port is jumper selectable
-# on the card to 0x250, 0x260 or 0x270).
-#
-# See the `msnd_pinnacle Additional Options' section below for more
-# information on these parameters (also, if you compile the driver
-# directly into the kernel, these extra parameters can be useful
-# here).
-#
-#
-# ** It is very easy to cause problems in your machine if you choose a
-# resource value which is incorrect. **
-#
-#
-# Examples
-# ~~~~~~~~
-#
-# * MultiSound Classic/Monterey/Tahiti:
-#
-# modprobe soundcore
-# insmod msnd
-# insmod msnd_classic io=0x290 irq=7 mem=0xd0000
-#
-# * MultiSound Pinnacle in PnP mode:
-#
-# modprobe soundcore
-# insmod msnd
-# isapnp mypinnacle.conf
-# insmod msnd_pinnacle io=0x210 irq=5 mem=0xd8000 <-- match mypinnacle.conf values
-#
-# * MultiSound Pinnacle in non-PnP mode (replace 0x250 with your configuration port,
-# one of 0x250, 0x260 or 0x270):
-#
-# insmod soundcore
-# insmod msnd
-# insmod msnd_pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000
-#
-# * To use the MPU-compatible Kurzweil synth on the Pinnacle in PnP
-# mode, add the following (assumes you did `isapnp mypinnacle.conf'):
-#
-# insmod sound
-# insmod mpu401 io=0x330 irq=9 <-- match mypinnacle.conf values
-#
-# * To use the MPU-compatible Kurzweil synth on the Pinnacle in non-PnP
-# mode, add the following. Note how we first configure the peripheral's
-# resources, _then_ install a Linux driver for it:
-#
-# insmod sound
-# pinnaclecfg 0x250 mpu 0x330 9
-# insmod mpu401 io=0x330 irq=9
-#
-# -- OR you can use the following sequence without pinnaclecfg in non-PnP mode:
-#
-# insmod soundcore
-# insmod msnd
-# insmod msnd_pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000 mpu_io=0x330 mpu_irq=9
-# insmod sound
-# insmod mpu401 io=0x330 irq=9
-#
-# * To setup the joystick port on the Pinnacle in non-PnP mode (though
-# you have to find the actual Linux joystick driver elsewhere), you
-# can use pinnaclecfg:
-#
-# pinnaclecfg 0x250 joystick 0x200
-#
-# -- OR you can configure this using msnd_pinnacle with the following:
-#
-# insmod soundcore
-# insmod msnd
-# insmod msnd_pinnacle cfg=0x250 io=0x290 irq=5 mem=0xd0000 joystick_io=0x200
-#
-#
-# msnd_classic, msnd_pinnacle Required Options
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# If the following options are not given, the module will not load.
-# Examine the kernel message log for informative error messages.
-# WARNING--probing isn't supported so try to make sure you have the
-# correct shared memory area, otherwise you may experience problems.
-#
-# io I/O base of DSP, e.g. io=0x210
-# irq IRQ number, e.g. irq=5
-# mem Shared memory area, e.g. mem=0xd8000
-#
-#
-# msnd_classic, msnd_pinnacle Additional Options
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# fifosize The digital audio FIFOs, in kilobytes. If not
-# specified, the default will be used. Increasing
-# this value will reduce the chance of a FIFO
-# underflow at the expense of increasing overall
-# latency. For example, fifosize=512 will
-# allocate 512kB read and write FIFOs (1MB total).
-# While this may reduce dropouts, a heavy machine
-# load will undoubtedly starve the FIFO of data
-# and you will eventually get dropouts. One
-# option is to alter the scheduling priority of
-# the playback process, using `nice' or some form
-# of POSIX soft real-time scheduling.
-#
-# calibrate_signal Setting this to one calibrates the ADCs to the
-# signal, zero calibrates to the card (defaults
-# to zero).
-#
-#
-# msnd_pinnacle Additional Options
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# digital Specify digital=1 to enable the S/PDIF input
-# if you have the digital daughterboard
-# adapter. This will enable access to the
-# DIGITAL1 input for the soundcard in the mixer.
-# Some mixer programs might have trouble setting
-# the DIGITAL1 source as an input. If you have
-# trouble, you can try the setdigital.c program
-# at the bottom of this document.
-#
-# cfg Non-PnP configuration port for the Pinnacle
-# and Fiji (typically 0x250, 0x260 or 0x270,
-# depending on the jumper configuration). If
-# this option is omitted, then it is assumed
-# that the card is in PnP mode, and that the
-# specified DSP resource values are already
-# configured with PnP (i.e. it won't attempt to
-# do any sort of configuration).
-#
-# When the Pinnacle is in non-PnP mode, you can use the following
-# options to configure particular devices. If a full specification
-# for a device is not given, then the device is not configured. Note
-# that you still must use a Linux driver for any of these devices
-# once their resources are setup (such as the Linux joystick driver,
-# or the MPU401 driver from OSS for the Kurzweil synth).
-#
-# mpu_io I/O port of MPU (on-board Kurzweil synth)
-# mpu_irq IRQ of MPU (on-board Kurzweil synth)
-# ide_io0 First I/O port of IDE controller
-# ide_io1 Second I/O port of IDE controller
-# ide_irq IRQ IDE controller
-# joystick_io I/O port of joystick
-#
-#
-# Obtaining and Creating Firmware Files
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# For the Classic/Tahiti/Monterey
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# Download to /tmp and unzip the following file from Turtle Beach:
-#
-# ftp://ftp.voyetra.com/pub/tbs/msndcl/msndvkit.zip
-#
-# When unzipped, unzip the file named MsndFiles.zip. Then copy the
-# following firmware files to /etc/sound (note the file renaming):
-#
-# cp DSPCODE/MSNDINIT.BIN /etc/sound/msndinit.bin
-# cp DSPCODE/MSNDPERM.REB /etc/sound/msndperm.bin
-#
-# When configuring the Linux kernel, specify /etc/sound/msndinit.bin and
-# /etc/sound/msndperm.bin for the two firmware files (Linux kernel
-# versions older than 2.2 do not ask for firmware paths, and are
-# hardcoded to /etc/sound).
-#
-# If you are compiling the driver into the kernel, these files must
-# be accessible during compilation, but will not be needed later.
-# The files must remain, however, if the driver is used as a module.
-#
-#
-# For the Pinnacle/Fiji
-# ~~~~~~~~~~~~~~~~~~~~~
-#
-# Download to /tmp and unzip the following file from Turtle Beach (be
-# sure to use the entire URL; some have had trouble navigating to the
-# URL):
-#
-# ftp://ftp.voyetra.com/pub/tbs/pinn/pnddk100.zip
-#
-# Unpack this shell archive, and run make in the created directory
-# (you need a C compiler and flex to build the utilities). This
-# should give you the executables conv, pinnaclecfg and setdigital.
-# conv is only used temporarily here to create the firmware files,
-# while pinnaclecfg is used to configure the Pinnacle or Fiji card in
-# non-PnP mode, and setdigital can be used to set the S/PDIF input on
-# the mixer (pinnaclecfg and setdigital should be copied to a
-# convenient place, possibly run during system initialization).
-#
-# To generating the firmware files with the `conv' program, we create
-# the binary firmware files by doing the following conversion
-# (assuming the archive unpacked into a directory named PINNDDK):
-#
-# ./conv < PINNDDK/dspcode/pndspini.asm > /etc/sound/pndspini.bin
-# ./conv < PINNDDK/dspcode/pndsperm.asm > /etc/sound/pndsperm.bin
-#
-# The conv (and conv.l) program is not needed after conversion and can
-# be safely deleted. Then, when configuring the Linux kernel, specify
-# /etc/sound/pndspini.bin and /etc/sound/pndsperm.bin for the two
-# firmware files (Linux kernel versions older than 2.2 do not ask for
-# firmware paths, and are hardcoded to /etc/sound).
-#
-# If you are compiling the driver into the kernel, these files must
-# be accessible during compilation, but will not be needed later.
-# The files must remain, however, if the driver is used as a module.
-#
-#
-# Using Digital I/O with the S/PDIF Port
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#
-# If you have a Pinnacle or Fiji with the digital daughterboard and
-# want to set it as the input source, you can use this program if you
-# have trouble trying to do it with a mixer program (be sure to
-# insert the module with the digital=1 option, or say Y to the option
-# during compiled-in kernel operation). Upon selection of the S/PDIF
-# port, you should be able monitor and record from it.
-#
-# There is something to note about using the S/PDIF port. Digital
-# timing is taken from the digital signal, so if a signal is not
-# connected to the port and it is selected as recording input, you
-# will find PCM playback to be distorted in playback rate. Also,
-# attempting to record at a sampling rate other than the DAT rate may
-# be problematic (i.e. trying to record at 8000Hz when the DAT signal
-# is 44100Hz). If you have a problem with this, set the recording
-# input to analog if you need to record at a rate other than that of
-# the DAT rate.
-#
-#
-# -- Shell archive attached below, just run `sh MultiSound' to extract.
-# Contains Pinnacle/Fiji utilities to convert firmware, configure
-# in non-PnP mode, and select the DIGITAL1 input for the mixer.
-#
-#
-#!/bin/sh
-# This is a shell archive (produced by GNU sharutils 4.2).
-# To extract the files from this archive, save it to some FILE, remove
-# everything before the `!/bin/sh' line above, then type `sh FILE'.
-#
-# Made on 1998-12-04 10:07 EST by <andrewtv@ztransform.velsoft.com>.
-# Source directory was `/home/andrewtv/programming/pinnacle/pinnacle'.
-#
-# Existing files will *not* be overwritten unless `-c' is specified.
-#
-# This shar contains:
-# length mode name
-# ------ ---------- ------------------------------------------
-# 2046 -rw-rw-r-- MultiSound.d/setdigital.c
-# 10235 -rw-rw-r-- MultiSound.d/pinnaclecfg.c
-# 106 -rw-rw-r-- MultiSound.d/Makefile
-# 141 -rw-rw-r-- MultiSound.d/conv.l
-# 1472 -rw-rw-r-- MultiSound.d/msndreset.c
-#
-save_IFS="${IFS}"
-IFS="${IFS}:"
-gettext_dir=FAILED
-locale_dir=FAILED
-first_param="$1"
-for dir in $PATH
-do
- if test "$gettext_dir" = FAILED && test -f $dir/gettext \
- && ($dir/gettext --version >/dev/null 2>&1)
- then
- set `$dir/gettext --version 2>&1`
- if test "$3" = GNU
- then
- gettext_dir=$dir
- fi
- fi
- if test "$locale_dir" = FAILED && test -f $dir/shar \
- && ($dir/shar --print-text-domain-dir >/dev/null 2>&1)
- then
- locale_dir=`$dir/shar --print-text-domain-dir`
- fi
-done
-IFS="$save_IFS"
-if test "$locale_dir" = FAILED || test "$gettext_dir" = FAILED
-then
- echo=echo
-else
- TEXTDOMAINDIR=$locale_dir
- export TEXTDOMAINDIR
- TEXTDOMAIN=sharutils
- export TEXTDOMAIN
- echo="$gettext_dir/gettext -s"
-fi
-touch -am 1231235999 $$.touch >/dev/null 2>&1
-if test ! -f 1231235999 && test -f $$.touch; then
- shar_touch=touch
-else
- shar_touch=:
- echo
- $echo 'WARNING: not restoring timestamps. Consider getting and'
- $echo "installing GNU \`touch', distributed in GNU File Utilities..."
- echo
-fi
-rm -f 1231235999 $$.touch
-#
-if mkdir _sh01426; then
- $echo 'x -' 'creating lock directory'
-else
- $echo 'failed to create lock directory'
- exit 1
-fi
-# ============= MultiSound.d/setdigital.c ==============
-if test ! -d 'MultiSound.d'; then
- $echo 'x -' 'creating directory' 'MultiSound.d'
- mkdir 'MultiSound.d'
-fi
-if test -f 'MultiSound.d/setdigital.c' && test "$first_param" != -c; then
- $echo 'x -' SKIPPING 'MultiSound.d/setdigital.c' '(file already exists)'
-else
- $echo 'x -' extracting 'MultiSound.d/setdigital.c' '(text)'
- sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/setdigital.c' &&
-/*********************************************************************
-X *
-X * setdigital.c - sets the DIGITAL1 input for a mixer
-X *
-X * Copyright (C) 1998 Andrew Veliath
-X *
-X * This program is free software; you can redistribute it and/or modify
-X * it under the terms of the GNU General Public License as published by
-X * the Free Software Foundation; either version 2 of the License, or
-X * (at your option) any later version.
-X *
-X * This program is distributed in the hope that it will be useful,
-X * but WITHOUT ANY WARRANTY; without even the implied warranty of
-X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-X * GNU General Public License for more details.
-X *
-X * You should have received a copy of the GNU General Public License
-X * along with this program; if not, write to the Free Software
-X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-X *
-X ********************************************************************/
-X
-#include <stdio.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/ioctl.h>
-#include <sys/soundcard.h>
-X
-int main(int argc, char *argv[])
-{
-X int fd;
-X unsigned long recmask, recsrc;
-X
-X if (argc != 2) {
-X fprintf(stderr, "usage: setdigital <mixer device>\n");
-X exit(1);
-X }
-X
-X if ((fd = open(argv[1], O_RDWR)) < 0) {
-X perror(argv[1]);
-X exit(1);
-X }
-X
-X if (ioctl(fd, SOUND_MIXER_READ_RECMASK, &recmask) < 0) {
-X fprintf(stderr, "error: ioctl read recording mask failed\n");
-X perror("ioctl");
-X close(fd);
-X exit(1);
-X }
-X
-X if (!(recmask & SOUND_MASK_DIGITAL1)) {
-X fprintf(stderr, "error: cannot find DIGITAL1 device in mixer\n");
-X close(fd);
-X exit(1);
-X }
-X
-X if (ioctl(fd, SOUND_MIXER_READ_RECSRC, &recsrc) < 0) {
-X fprintf(stderr, "error: ioctl read recording source failed\n");
-X perror("ioctl");
-X close(fd);
-X exit(1);
-X }
-X
-X recsrc |= SOUND_MASK_DIGITAL1;
-X
-X if (ioctl(fd, SOUND_MIXER_WRITE_RECSRC, &recsrc) < 0) {
-X fprintf(stderr, "error: ioctl write recording source failed\n");
-X perror("ioctl");
-X close(fd);
-X exit(1);
-X }
-X
-X close(fd);
-X
-X return 0;
-}
-SHAR_EOF
- $shar_touch -am 1204092598 'MultiSound.d/setdigital.c' &&
- chmod 0664 'MultiSound.d/setdigital.c' ||
- $echo 'restore of' 'MultiSound.d/setdigital.c' 'failed'
- if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
- && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
- md5sum -c << SHAR_EOF >/dev/null 2>&1 \
- || $echo 'MultiSound.d/setdigital.c:' 'MD5 check failed'
-e87217fc3e71288102ba41fd81f71ec4 MultiSound.d/setdigital.c
-SHAR_EOF
- else
- shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/setdigital.c'`"
- test 2046 -eq "$shar_count" ||
- $echo 'MultiSound.d/setdigital.c:' 'original size' '2046,' 'current size' "$shar_count!"
- fi
-fi
-# ============= MultiSound.d/pinnaclecfg.c ==============
-if test -f 'MultiSound.d/pinnaclecfg.c' && test "$first_param" != -c; then
- $echo 'x -' SKIPPING 'MultiSound.d/pinnaclecfg.c' '(file already exists)'
-else
- $echo 'x -' extracting 'MultiSound.d/pinnaclecfg.c' '(text)'
- sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/pinnaclecfg.c' &&
-/*********************************************************************
-X *
-X * pinnaclecfg.c - Pinnacle/Fiji Device Configuration Program
-X *
-X * This is for NON-PnP mode only. For PnP mode, use isapnptools.
-X *
-X * This is Linux-specific, and must be run with root permissions.
-X *
-X * Part of the Turtle Beach MultiSound Sound Card Driver for Linux
-X *
-X * Copyright (C) 1998 Andrew Veliath
-X *
-X * This program is free software; you can redistribute it and/or modify
-X * it under the terms of the GNU General Public License as published by
-X * the Free Software Foundation; either version 2 of the License, or
-X * (at your option) any later version.
-X *
-X * This program is distributed in the hope that it will be useful,
-X * but WITHOUT ANY WARRANTY; without even the implied warranty of
-X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-X * GNU General Public License for more details.
-X *
-X * You should have received a copy of the GNU General Public License
-X * along with this program; if not, write to the Free Software
-X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-X *
-X ********************************************************************/
-X
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <asm/io.h>
-#include <asm/types.h>
-X
-#define IREG_LOGDEVICE 0x07
-#define IREG_ACTIVATE 0x30
-#define LD_ACTIVATE 0x01
-#define LD_DISACTIVATE 0x00
-#define IREG_EECONTROL 0x3F
-#define IREG_MEMBASEHI 0x40
-#define IREG_MEMBASELO 0x41
-#define IREG_MEMCONTROL 0x42
-#define IREG_MEMRANGEHI 0x43
-#define IREG_MEMRANGELO 0x44
-#define MEMTYPE_8BIT 0x00
-#define MEMTYPE_16BIT 0x02
-#define MEMTYPE_RANGE 0x00
-#define MEMTYPE_HIADDR 0x01
-#define IREG_IO0_BASEHI 0x60
-#define IREG_IO0_BASELO 0x61
-#define IREG_IO1_BASEHI 0x62
-#define IREG_IO1_BASELO 0x63
-#define IREG_IRQ_NUMBER 0x70
-#define IREG_IRQ_TYPE 0x71
-#define IRQTYPE_HIGH 0x02
-#define IRQTYPE_LOW 0x00
-#define IRQTYPE_LEVEL 0x01
-#define IRQTYPE_EDGE 0x00
-X
-#define HIBYTE(w) ((BYTE)(((WORD)(w) >> 8) & 0xFF))
-#define LOBYTE(w) ((BYTE)(w))
-#define MAKEWORD(low,hi) ((WORD)(((BYTE)(low))|(((WORD)((BYTE)(hi)))<<8)))
-X
-typedef __u8 BYTE;
-typedef __u16 USHORT;
-typedef __u16 WORD;
-X
-static int config_port = -1;
-X
-static int msnd_write_cfg(int cfg, int reg, int value)
-{
-X outb(reg, cfg);
-X outb(value, cfg + 1);
-X if (value != inb(cfg + 1)) {
-X fprintf(stderr, "error: msnd_write_cfg: I/O error\n");
-X return -EIO;
-X }
-X return 0;
-}
-X
-static int msnd_read_cfg(int cfg, int reg)
-{
-X outb(reg, cfg);
-X return inb(cfg + 1);
-}
-X
-static int msnd_write_cfg_io0(int cfg, int num, WORD io)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io)))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io)))
-X return -EIO;
-X return 0;
-}
-X
-static int msnd_read_cfg_io0(int cfg, int num, WORD *io)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X
-X *io = MAKEWORD(msnd_read_cfg(cfg, IREG_IO0_BASELO),
-X msnd_read_cfg(cfg, IREG_IO0_BASEHI));
-X
-X return 0;
-}
-X
-static int msnd_write_cfg_io1(int cfg, int num, WORD io)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io)))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io)))
-X return -EIO;
-X return 0;
-}
-X
-static int msnd_read_cfg_io1(int cfg, int num, WORD *io)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X
-X *io = MAKEWORD(msnd_read_cfg(cfg, IREG_IO1_BASELO),
-X msnd_read_cfg(cfg, IREG_IO1_BASEHI));
-X
-X return 0;
-}
-X
-static int msnd_write_cfg_irq(int cfg, int num, WORD irq)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_IRQ_NUMBER, LOBYTE(irq)))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_IRQ_TYPE, IRQTYPE_EDGE))
-X return -EIO;
-X return 0;
-}
-X
-static int msnd_read_cfg_irq(int cfg, int num, WORD *irq)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X
-X *irq = msnd_read_cfg(cfg, IREG_IRQ_NUMBER);
-X
-X return 0;
-}
-X
-static int msnd_write_cfg_mem(int cfg, int num, int mem)
-{
-X WORD wmem;
-X
-X mem >>= 8;
-X mem &= 0xfff;
-X wmem = (WORD)mem;
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_MEMBASEHI, HIBYTE(wmem)))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_MEMBASELO, LOBYTE(wmem)))
-X return -EIO;
-X if (wmem && msnd_write_cfg(cfg, IREG_MEMCONTROL, (MEMTYPE_HIADDR | MEMTYPE_16BIT)))
-X return -EIO;
-X return 0;
-}
-X
-static int msnd_read_cfg_mem(int cfg, int num, int *mem)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X
-X *mem = MAKEWORD(msnd_read_cfg(cfg, IREG_MEMBASELO),
-X msnd_read_cfg(cfg, IREG_MEMBASEHI));
-X *mem <<= 8;
-X
-X return 0;
-}
-X
-static int msnd_activate_logical(int cfg, int num)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X if (msnd_write_cfg(cfg, IREG_ACTIVATE, LD_ACTIVATE))
-X return -EIO;
-X return 0;
-}
-X
-static int msnd_write_cfg_logical(int cfg, int num, WORD io0, WORD io1, WORD irq, int mem)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X if (msnd_write_cfg_io0(cfg, num, io0))
-X return -EIO;
-X if (msnd_write_cfg_io1(cfg, num, io1))
-X return -EIO;
-X if (msnd_write_cfg_irq(cfg, num, irq))
-X return -EIO;
-X if (msnd_write_cfg_mem(cfg, num, mem))
-X return -EIO;
-X if (msnd_activate_logical(cfg, num))
-X return -EIO;
-X return 0;
-}
-X
-static int msnd_read_cfg_logical(int cfg, int num, WORD *io0, WORD *io1, WORD *irq, int *mem)
-{
-X if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
-X return -EIO;
-X if (msnd_read_cfg_io0(cfg, num, io0))
-X return -EIO;
-X if (msnd_read_cfg_io1(cfg, num, io1))
-X return -EIO;
-X if (msnd_read_cfg_irq(cfg, num, irq))
-X return -EIO;
-X if (msnd_read_cfg_mem(cfg, num, mem))
-X return -EIO;
-X return 0;
-}
-X
-static void usage(void)
-{
-X fprintf(stderr,
-X "\n"
-X "pinnaclecfg 1.0\n"
-X "\n"
-X "usage: pinnaclecfg <config port> [device config]\n"
-X "\n"
-X "This is for use with the card in NON-PnP mode only.\n"
-X "\n"
-X "Available devices (not all available for Fiji):\n"
-X "\n"
-X " Device Description\n"
-X " -------------------------------------------------------------------\n"
-X " reset Reset all devices (i.e. disable)\n"
-X " show Display current device configurations\n"
-X "\n"
-X " dsp <io> <irq> <mem> Audio device\n"
-X " mpu <io> <irq> Internal Kurzweil synth\n"
-X " ide <io0> <io1> <irq> On-board IDE controller\n"
-X " joystick <io> Joystick port\n"
-X "\n");
-X exit(1);
-}
-X
-static int cfg_reset(void)
-{
-X int i;
-X
-X for (i = 0; i < 4; ++i)
-X msnd_write_cfg_logical(config_port, i, 0, 0, 0, 0);
-X
-X return 0;
-}
-X
-static int cfg_show(void)
-{
-X int i;
-X int count = 0;
-X
-X for (i = 0; i < 4; ++i) {
-X WORD io0, io1, irq;
-X int mem;
-X msnd_read_cfg_logical(config_port, i, &io0, &io1, &irq, &mem);
-X switch (i) {
-X case 0:
-X if (io0 || irq || mem) {
-X printf("dsp 0x%x %d 0x%x\n", io0, irq, mem);
-X ++count;
-X }
-X break;
-X case 1:
-X if (io0 || irq) {
-X printf("mpu 0x%x %d\n", io0, irq);
-X ++count;
-X }
-X break;
-X case 2:
-X if (io0 || io1 || irq) {
-X printf("ide 0x%x 0x%x %d\n", io0, io1, irq);
-X ++count;
-X }
-X break;
-X case 3:
-X if (io0) {
-X printf("joystick 0x%x\n", io0);
-X ++count;
-X }
-X break;
-X }
-X }
-X
-X if (count == 0)
-X fprintf(stderr, "no devices configured\n");
-X
-X return 0;
-}
-X
-static int cfg_dsp(int argc, char *argv[])
-{
-X int io, irq, mem;
-X
-X if (argc < 3 ||
-X sscanf(argv[0], "0x%x", &io) != 1 ||
-X sscanf(argv[1], "%d", &irq) != 1 ||
-X sscanf(argv[2], "0x%x", &mem) != 1)
-X usage();
-X
-X if (!(io == 0x290 ||
-X io == 0x260 ||
-X io == 0x250 ||
-X io == 0x240 ||
-X io == 0x230 ||
-X io == 0x220 ||
-X io == 0x210 ||
-X io == 0x3e0)) {
-X fprintf(stderr, "error: io must be one of "
-X "210, 220, 230, 240, 250, 260, 290, or 3E0\n");
-X usage();
-X }
-X
-X if (!(irq == 5 ||
-X irq == 7 ||
-X irq == 9 ||
-X irq == 10 ||
-X irq == 11 ||
-X irq == 12)) {
-X fprintf(stderr, "error: irq must be one of "
-X "5, 7, 9, 10, 11 or 12\n");
-X usage();
-X }
-X
-X if (!(mem == 0xb0000 ||
-X mem == 0xc8000 ||
-X mem == 0xd0000 ||
-X mem == 0xd8000 ||
-X mem == 0xe0000 ||
-X mem == 0xe8000)) {
-X fprintf(stderr, "error: mem must be one of "
-X "0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or 0xe8000\n");
-X usage();
-X }
-X
-X return msnd_write_cfg_logical(config_port, 0, io, 0, irq, mem);
-}
-X
-static int cfg_mpu(int argc, char *argv[])
-{
-X int io, irq;
-X
-X if (argc < 2 ||
-X sscanf(argv[0], "0x%x", &io) != 1 ||
-X sscanf(argv[1], "%d", &irq) != 1)
-X usage();
-X
-X return msnd_write_cfg_logical(config_port, 1, io, 0, irq, 0);
-}
-X
-static int cfg_ide(int argc, char *argv[])
-{
-X int io0, io1, irq;
-X
-X if (argc < 3 ||
-X sscanf(argv[0], "0x%x", &io0) != 1 ||
-X sscanf(argv[0], "0x%x", &io1) != 1 ||
-X sscanf(argv[1], "%d", &irq) != 1)
-X usage();
-X
-X return msnd_write_cfg_logical(config_port, 2, io0, io1, irq, 0);
-}
-X
-static int cfg_joystick(int argc, char *argv[])
-{
-X int io;
-X
-X if (argc < 1 ||
-X sscanf(argv[0], "0x%x", &io) != 1)
-X usage();
-X
-X return msnd_write_cfg_logical(config_port, 3, io, 0, 0, 0);
-}
-X
-int main(int argc, char *argv[])
-{
-X char *device;
-X int rv = 0;
-X
-X --argc; ++argv;
-X
-X if (argc < 2)
-X usage();
-X
-X sscanf(argv[0], "0x%x", &config_port);
-X if (config_port != 0x250 && config_port != 0x260 && config_port != 0x270) {
-X fprintf(stderr, "error: <config port> must be 0x250, 0x260 or 0x270\n");
-X exit(1);
-X }
-X if (ioperm(config_port, 2, 1)) {
-X perror("ioperm");
-X fprintf(stderr, "note: pinnaclecfg must be run as root\n");
-X exit(1);
-X }
-X device = argv[1];
-X
-X argc -= 2; argv += 2;
-X
-X if (strcmp(device, "reset") == 0)
-X rv = cfg_reset();
-X else if (strcmp(device, "show") == 0)
-X rv = cfg_show();
-X else if (strcmp(device, "dsp") == 0)
-X rv = cfg_dsp(argc, argv);
-X else if (strcmp(device, "mpu") == 0)
-X rv = cfg_mpu(argc, argv);
-X else if (strcmp(device, "ide") == 0)
-X rv = cfg_ide(argc, argv);
-X else if (strcmp(device, "joystick") == 0)
-X rv = cfg_joystick(argc, argv);
-X else {
-X fprintf(stderr, "error: unknown device %s\n", device);
-X usage();
-X }
-X
-X if (rv)
-X fprintf(stderr, "error: device configuration failed\n");
-X
-X return 0;
-}
-SHAR_EOF
- $shar_touch -am 1204092598 'MultiSound.d/pinnaclecfg.c' &&
- chmod 0664 'MultiSound.d/pinnaclecfg.c' ||
- $echo 'restore of' 'MultiSound.d/pinnaclecfg.c' 'failed'
- if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
- && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
- md5sum -c << SHAR_EOF >/dev/null 2>&1 \
- || $echo 'MultiSound.d/pinnaclecfg.c:' 'MD5 check failed'
-366bdf27f0db767a3c7921d0a6db20fe MultiSound.d/pinnaclecfg.c
-SHAR_EOF
- else
- shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/pinnaclecfg.c'`"
- test 10235 -eq "$shar_count" ||
- $echo 'MultiSound.d/pinnaclecfg.c:' 'original size' '10235,' 'current size' "$shar_count!"
- fi
-fi
-# ============= MultiSound.d/Makefile ==============
-if test -f 'MultiSound.d/Makefile' && test "$first_param" != -c; then
- $echo 'x -' SKIPPING 'MultiSound.d/Makefile' '(file already exists)'
-else
- $echo 'x -' extracting 'MultiSound.d/Makefile' '(text)'
- sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/Makefile' &&
-CC = gcc
-CFLAGS = -O
-PROGS = setdigital msndreset pinnaclecfg conv
-X
-all: $(PROGS)
-X
-clean:
-X rm -f $(PROGS)
-SHAR_EOF
- $shar_touch -am 1204092398 'MultiSound.d/Makefile' &&
- chmod 0664 'MultiSound.d/Makefile' ||
- $echo 'restore of' 'MultiSound.d/Makefile' 'failed'
- if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
- && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
- md5sum -c << SHAR_EOF >/dev/null 2>&1 \
- || $echo 'MultiSound.d/Makefile:' 'MD5 check failed'
-76ca8bb44e3882edcf79c97df6c81845 MultiSound.d/Makefile
-SHAR_EOF
- else
- shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/Makefile'`"
- test 106 -eq "$shar_count" ||
- $echo 'MultiSound.d/Makefile:' 'original size' '106,' 'current size' "$shar_count!"
- fi
-fi
-# ============= MultiSound.d/conv.l ==============
-if test -f 'MultiSound.d/conv.l' && test "$first_param" != -c; then
- $echo 'x -' SKIPPING 'MultiSound.d/conv.l' '(file already exists)'
-else
- $echo 'x -' extracting 'MultiSound.d/conv.l' '(text)'
- sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/conv.l' &&
-%%
-[ \n\t,\r]
-\;.*
-DB
-[0-9A-Fa-f]+H { int n; sscanf(yytext, "%xH", &n); printf("%c", n); }
-%%
-int yywrap() { return 1; }
-main() { yylex(); }
-SHAR_EOF
- $shar_touch -am 0828231798 'MultiSound.d/conv.l' &&
- chmod 0664 'MultiSound.d/conv.l' ||
- $echo 'restore of' 'MultiSound.d/conv.l' 'failed'
- if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
- && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
- md5sum -c << SHAR_EOF >/dev/null 2>&1 \
- || $echo 'MultiSound.d/conv.l:' 'MD5 check failed'
-d2411fc32cd71a00dcdc1f009e858dd2 MultiSound.d/conv.l
-SHAR_EOF
- else
- shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/conv.l'`"
- test 141 -eq "$shar_count" ||
- $echo 'MultiSound.d/conv.l:' 'original size' '141,' 'current size' "$shar_count!"
- fi
-fi
-# ============= MultiSound.d/msndreset.c ==============
-if test -f 'MultiSound.d/msndreset.c' && test "$first_param" != -c; then
- $echo 'x -' SKIPPING 'MultiSound.d/msndreset.c' '(file already exists)'
-else
- $echo 'x -' extracting 'MultiSound.d/msndreset.c' '(text)'
- sed 's/^X//' << 'SHAR_EOF' > 'MultiSound.d/msndreset.c' &&
-/*********************************************************************
-X *
-X * msndreset.c - resets the MultiSound card
-X *
-X * Copyright (C) 1998 Andrew Veliath
-X *
-X * This program is free software; you can redistribute it and/or modify
-X * it under the terms of the GNU General Public License as published by
-X * the Free Software Foundation; either version 2 of the License, or
-X * (at your option) any later version.
-X *
-X * This program is distributed in the hope that it will be useful,
-X * but WITHOUT ANY WARRANTY; without even the implied warranty of
-X * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-X * GNU General Public License for more details.
-X *
-X * You should have received a copy of the GNU General Public License
-X * along with this program; if not, write to the Free Software
-X * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-X *
-X ********************************************************************/
-X
-#include <stdio.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/ioctl.h>
-#include <sys/soundcard.h>
-X
-int main(int argc, char *argv[])
-{
-X int fd;
-X
-X if (argc != 2) {
-X fprintf(stderr, "usage: msndreset <mixer device>\n");
-X exit(1);
-X }
-X
-X if ((fd = open(argv[1], O_RDWR)) < 0) {
-X perror(argv[1]);
-X exit(1);
-X }
-X
-X if (ioctl(fd, SOUND_MIXER_PRIVATE1, 0) < 0) {
-X fprintf(stderr, "error: msnd ioctl reset failed\n");
-X perror("ioctl");
-X close(fd);
-X exit(1);
-X }
-X
-X close(fd);
-X
-X return 0;
-}
-SHAR_EOF
- $shar_touch -am 1204100698 'MultiSound.d/msndreset.c' &&
- chmod 0664 'MultiSound.d/msndreset.c' ||
- $echo 'restore of' 'MultiSound.d/msndreset.c' 'failed'
- if ( md5sum --help 2>&1 | grep 'sage: md5sum \[' ) >/dev/null 2>&1 \
- && ( md5sum --version 2>&1 | grep -v 'textutils 1.12' ) >/dev/null; then
- md5sum -c << SHAR_EOF >/dev/null 2>&1 \
- || $echo 'MultiSound.d/msndreset.c:' 'MD5 check failed'
-c52f876521084e8eb25e12e01dcccb8a MultiSound.d/msndreset.c
-SHAR_EOF
- else
- shar_count="`LC_ALL= LC_CTYPE= LANG= wc -c < 'MultiSound.d/msndreset.c'`"
- test 1472 -eq "$shar_count" ||
- $echo 'MultiSound.d/msndreset.c:' 'original size' '1472,' 'current size' "$shar_count!"
- fi
-fi
-rm -fr _sh01426
-exit 0
diff --git a/Documentation/sound/oss/OPL3 b/Documentation/sound/oss/OPL3
deleted file mode 100644
index 2468ff827688..000000000000
--- a/Documentation/sound/oss/OPL3
+++ /dev/null
@@ -1,6 +0,0 @@
-A pure OPL3 card is nice and easy to configure. Simply do
-
-insmod opl3 io=0x388
-
-Change the I/O address in the very unlikely case this card is differently
-configured
diff --git a/Documentation/sound/oss/Opti b/Documentation/sound/oss/Opti
deleted file mode 100644
index 4cd5d9ab3580..000000000000
--- a/Documentation/sound/oss/Opti
+++ /dev/null
@@ -1,218 +0,0 @@
-Support for the OPTi 82C931 chip
---------------------------------
-Note: parts of this README file apply also to other
-cards that use the mad16 driver.
-
-Some items in this README file are based on features
-added to the sound driver after Linux-2.1.91 was out.
-By the time of writing this I do not know which official
-kernel release will include these features.
-Please do not report inconsistencies on older Linux
-kernels.
-
-The OPTi 82C931 is supported in its non-PnP mode.
-Usually you do not need to set jumpers, etc. The sound driver
-will check the card status and if it is required it will
-force the card into a mode in which it can be programmed.
-
-If you have another OS installed on your computer it is recommended
-that Linux and the other OS use the same resources.
-
-Also, it is recommended that resources specified in /etc/modprobe.d/*.conf
-and resources specified in /etc/isapnp.conf agree.
-
-Compiling the sound driver
---------------------------
-I highly recommend that you build a modularized sound driver.
-This document does not cover a sound-driver which is built in
-the kernel.
-
-Sound card support should be enabled as a module (chose m).
-Answer 'm' for these items:
- Generic OPL2/OPL3 FM synthesizer support (CONFIG_SOUND_ADLIB)
- Microsoft Sound System support (CONFIG_SOUND_MSS)
- Support for OPTi MAD16 and/or Mozart based cards (CONFIG_SOUND_MAD16)
- FM synthesizer (YM3812/OPL-3) support (CONFIG_SOUND_YM3812)
-
-The configuration menu may ask for addresses, IRQ lines or DMA
-channels. If the card is used as a module the module loading
-options will override these values.
-
-For the OPTi 931 you can answer 'n' to:
- Support MIDI in older MAD16 based cards (requires SB) (CONFIG_SOUND_MAD16_OLDCARD)
-If you do need MIDI support in a Mozart or C928 based card you
-need to answer 'm' to the above question. In that case you will
-also need to answer 'm' to:
- '100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support' (CONFIG_SOUND_SB)
-
-Go on and compile your kernel and modules. Install the modules. Run depmod -a.
-
-Using isapnptools
------------------
-In most systems with a PnP BIOS you do not need to use isapnp. The
-initialization provided by the BIOS is sufficient for the driver
-to pick up the card and continue initialization.
-
-If that fails, or if you have other PnP cards, you need to use isapnp
-to initialize the card.
-This was tested with isapnptools-1.11 but I recommend that you use
-isapnptools-1.13 (or newer). Run pnpdump to dump the information
-about your PnP cards. Then edit the resulting file and select
-the options of your choice. This file is normally installed as
-/etc/isapnp.conf.
-
-The driver has one limitation with respect to I/O port resources:
-IO3 base must be 0x0E0C. Although isapnp allows other ports, this
-address is hard-coded into the driver.
-
-Using kmod and autoloading the sound driver
--------------------------------------------
-Config files in '/etc/modprobe.d/' are used as below:
-
-alias mixer0 mad16
-alias audio0 mad16
-alias midi0 mad16
-alias synth0 opl3
-options sb mad16=1
-options mad16 irq=10 dma=0 dma16=1 io=0x530 joystick=1 cdtype=0
-options opl3 io=0x388
-install mad16 /sbin/modprobe -i mad16 && /sbin/ad1848_mixer_reroute 14 8 15 3 16 6
-
-If you have an MPU daughtercard or onboard MPU you will want to add to the
-"options mad16" line - eg
-
-options mad16 irq=5 dma=0 dma16=3 io=0x530 mpu_io=0x330 mpu_irq=9
-
-To set the I/O and IRQ of the MPU.
-
-
-Explain:
-
-alias mixer0 mad16
-alias audio0 mad16
-alias midi0 mad16
-alias synth0 opl3
-
-When any sound device is opened the kernel requests auto-loading
-of char-major-14. There is a built-in alias that translates this
-request to loading the main sound module.
-
-The sound module in its turn will request loading of a sub-driver
-for mixer, audio, midi or synthesizer device. The first 3 are
-supported by the mad16 driver. The synth device is supported
-by the opl3 driver.
-
-There is currently no way to autoload the sound device driver
-if more than one card is installed.
-
-options sb mad16=1
-
-This is left for historical reasons. If you enable the
-config option 'Support MIDI in older MAD16 based cards (requires SB)'
-or if you use an older mad16 driver it will force loading of the
-SoundBlaster driver. This option tells the SB driver not to look
-for a SB card but to wait for the mad16 driver.
-
-options mad16 irq=10 dma=0 dma16=1 io=0x530 joystick=1 cdtype=0
-options opl3 io=0x388
-
-post-install mad16 /sbin/ad1848_mixer_reroute 14 8 15 3 16 6
-
-This sets resources and options for the mad16 and opl3 drivers.
-I use two DMA channels (only one is required) to enable full duplex.
-joystick=1 enables the joystick port. cdtype=0 disables the cd port.
-You can also set mpu_io and mpu_irq in the mad16 options for the
-uart401 driver.
-
-This tells modprobe to run /sbin/ad1848_mixer_reroute after
-mad16 is successfully loaded and initialized. The source
-for ad1848_mixer_reroute is appended to the end of this readme
-file. It is impossible for the sound driver to know the actual
-connections to the mixer. The 3 inputs intended for cd, synth
-and line-in are mapped to the generic inputs line1, line2 and
-line3. This program reroutes these mixer channels to their
-right names (note the right mapping depends on the actual sound
-card that you use).
-The numeric parameters mean:
- 14=line1 8=cd - reroute line1 to the CD input.
- 15=line2 3=synth - reroute line2 to the synthesizer input.
- 16=line3 6=line - reroute line3 to the line input.
-For reference on other input names look at the file
-/usr/include/linux/soundcard.h.
-
-Using a joystick
------------------
-You must enable a joystick in the mad16 options. (also
-in /etc/isapnp.conf if you use it).
-Tested with regular analog joysticks.
-
-A CDROM drive connected to the sound card
------------------------------------------
-The 82C931 chip has support only for secondary ATAPI cdrom.
-(cdtype=8). Loading the mad16 driver resets the C931 chip
-and if a cdrom was already mounted it may cause a complete
-system hang. Do not use the sound card if you have an alternative.
-If you do use the sound card it is important that you load
-the mad16 driver (use "modprobe mad16" to prevent auto-unloading)
-before the cdrom is accessed the first time.
-
-Using the sound driver built-in to the kernel may help here, but...
-Most new systems have a PnP BIOS and also two IDE controllers.
-The IDE controller on the sound card may be needed only on older
-systems (which have only one IDE controller) but these systems
-also do not have a PnP BIOS - requiring isapnptools and a modularized
-driver.
-
-Known problems
---------------
-1. See the section on "A CDROM drive connected to the sound card".
-
-2. On my system the codec cannot capture companded sound samples.
- (eg., recording from /dev/audio). When any companded capture is
- requested I get stereo-16 bit samples instead. Playback of
- companded samples works well. Apparently this problem is not common
- to all C931 based cards. I do not know how to identify cards that
- have this problem.
-
-Source for ad1848_mixer_reroute.c
----------------------------------
-#include <stdio.h>
-#include <fcntl.h>
-#include <linux/soundcard.h>
-
-static char *mixer_names[SOUND_MIXER_NRDEVICES] =
- SOUND_DEVICE_LABELS;
-
-int
-main(int argc, char **argv) {
- int val, from, to;
- int i, fd;
-
- fd = open("/dev/mixer", O_RDWR);
- if(fd < 0) {
- perror("/dev/mixer");
- return 1;
- }
-
- for(i = 2; i < argc; i += 2) {
- from = atoi(argv[i-1]);
- to = atoi(argv[i]);
-
- if(to == SOUND_MIXER_NONE)
- fprintf(stderr, "%s: turning off mixer %s\n",
- argv[0], mixer_names[to]);
- else
- fprintf(stderr, "%s: rerouting mixer %s to %s\n",
- argv[0], mixer_names[from], mixer_names[to]);
-
- val = from << 8 | to;
-
- if(ioctl(fd, SOUND_MIXER_PRIVATE2, &val)) {
- perror("AD1848 mixer reroute");
- return 1;
- }
- }
-
- return 0;
-}
-
diff --git a/Documentation/sound/oss/PAS16 b/Documentation/sound/oss/PAS16
deleted file mode 100644
index 5c27229eec8c..000000000000
--- a/Documentation/sound/oss/PAS16
+++ /dev/null
@@ -1,162 +0,0 @@
-Pro Audio Spectrum 16 for 2.3.99 and later
-=========================================
-by Thomas Molina (tmolina@home.com)
-last modified 3 Mar 2001
-Acknowledgement to Axel Boldt (boldt@math.ucsb.edu) for stuff taken
-from Configure.help, Riccardo Facchetti for stuff from README.OSS,
-and others whose names I could not find.
-
-This documentation is relevant for the PAS16 driver (pas2_card.c and
-friends) under kernel version 2.3.99 and later. If you are
-unfamiliar with configuring sound under Linux, please read the
-Sound-HOWTO, Documentation/sound/oss/Introduction and other
-relevant docs first.
-
-The following information is relevant information from README.OSS
-and legacy docs for the Pro Audio Spectrum 16 (PAS16):
-==================================================================
-
-The pas2_card.c driver supports the following cards --
-Pro Audio Spectrum 16 (PAS16) and compatibles:
- Pro Audio Spectrum 16
- Pro Audio Studio 16
- Logitech Sound Man 16
- NOTE! The original Pro Audio Spectrum as well as the PAS+ are not
- and will not be supported by the driver.
-
-The sound driver configuration dialog
--------------------------------------
-
-Sound configuration starts by making some yes/no questions. Be careful
-when answering to these questions since answering y to a question may
-prevent some later ones from being asked. For example don't answer y to
-the question about (PAS16) if you don't really have a PAS16. Sound
-configuration may also be made modular by answering m to configuration
-options presented.
-
-Note also that all questions may not be asked. The configuration program
-may disable some questions depending on the earlier choices. It may also
-select some options automatically as well.
-
- "ProAudioSpectrum 16 support",
- - Answer 'y'_ONLY_ if you have a Pro Audio Spectrum _16_,
- Pro Audio Studio 16 or Logitech SoundMan 16 (be sure that
- you read the above list correctly). Don't answer 'y' if you
- have some other card made by Media Vision or Logitech since they
- are not PAS16 compatible.
- NOTE! Since 3.5-beta10 you need to enable SB support (next question)
- if you want to use the SB emulation of PAS16. It's also possible to
- the emulation if you want to use a true SB card together with PAS16
- (there is another question about this that is asked later).
-
- "Generic OPL2/OPL3 FM synthesizer support",
- - Answer 'y' if your card has a FM chip made by Yamaha (OPL2/OPL3/OPL4).
- The PAS16 has an OPL3-compatible FM chip.
-
-With PAS16 you can use two audio device files at the same time. /dev/dsp (and
-/dev/audio) is connected to the 8/16 bit native codec and the /dev/dsp1 (and
-/dev/audio1) is connected to the SB emulation (8 bit mono only).
-
-
-The new stuff for 2.3.99 and later
-============================================================================
-The following configuration options are relevant to configuring the PAS16:
-
-Sound card support
-CONFIG_SOUND
- If you have a sound card in your computer, i.e. if it can say more
- than an occasional beep, say Y. Be sure to have all the information
- about your sound card and its configuration down (I/O port,
- interrupt and DMA channel), because you will be asked for it.
-
- You want to read the Sound-HOWTO, available from
- http://www.tldp.org/docs.html#howto . General information
- about the modular sound system is contained in the files
- Documentation/sound/oss/Introduction. The file
- Documentation/sound/oss/README.OSS contains some slightly outdated but
- still useful information as well.
-
-OSS sound modules
-CONFIG_SOUND_OSS
- OSS is the Open Sound System suite of sound card drivers. They make
- sound programming easier since they provide a common API. Say Y or M
- here (the module will be called sound.o) if you haven't found a
- driver for your sound card above, then pick your driver from the
- list below.
-
-Persistent DMA buffers
-CONFIG_SOUND_DMAP
- Linux can often have problems allocating DMA buffers for ISA sound
- cards on machines with more than 16MB of RAM. This is because ISA
- DMA buffers must exist below the 16MB boundary and it is quite
- possible that a large enough free block in this region cannot be
- found after the machine has been running for a while. If you say Y
- here the DMA buffers (64Kb) will be allocated at boot time and kept
- until the shutdown. This option is only useful if you said Y to
- "OSS sound modules", above. If you said M to "OSS sound modules"
- then you can get the persistent DMA buffer functionality by passing
- the command-line argument "dmabuf=1" to the sound.o module.
-
- Say y here for PAS16.
-
-ProAudioSpectrum 16 support
-CONFIG_SOUND_PAS
- Answer Y only if you have a Pro Audio Spectrum 16, ProAudio Studio
- 16 or Logitech SoundMan 16 sound card. Don't answer Y if you have
- some other card made by Media Vision or Logitech since they are not
- PAS16 compatible. It is not necessary to enable the separate
- Sound Blaster support; it is included in the PAS driver.
-
- If you compile the driver into the kernel, you have to add
- "pas2=<io>,<irq>,<dma>,<dma2>,<sbio>,<sbirq>,<sbdma>,<sbdma2>
- to the kernel command line.
-
-FM Synthesizer (YM3812/OPL-3) support
-CONFIG_SOUND_YM3812
- Answer Y if your card has a FM chip made by Yamaha (OPL2/OPL3/OPL4).
- Answering Y is usually a safe and recommended choice, however some
- cards may have software (TSR) FM emulation. Enabling FM support with
- these cards may cause trouble (I don't currently know of any such
- cards, however).
- Please read the file Documentation/sound/oss/OPL3 if your card has an
- OPL3 chip.
- If you compile the driver into the kernel, you have to add
- "opl3=<io>" to the kernel command line.
-
- If you compile your drivers into the kernel, you MUST configure
- OPL3 support as a module for PAS16 support to work properly.
- You can then get OPL3 functionality by issuing the command:
- insmod opl3
- In addition, you must either add the following line to
- /etc/modprobe.d/*.conf:
- options opl3 io=0x388
- or else add the following line to /etc/lilo.conf:
- opl3=0x388
-
-
-EXAMPLES
-===================================================================
-To use the PAS16 in my computer I have enabled the following sound
-configuration options:
-
-CONFIG_SOUND=y
-CONFIG_SOUND_OSS=y
-CONFIG_SOUND_TRACEINIT=y
-CONFIG_SOUND_DMAP=y
-CONFIG_SOUND_PAS=y
-CONFIG_SOUND_SB=n
-CONFIG_SOUND_YM3812=m
-
-I have also included the following append line in /etc/lilo.conf:
-append="pas2=0x388,10,3,-1,0x220,5,1,-1 sb=0x220,5,1,-1 opl3=0x388"
-
-The io address of 0x388 is default configuration on the PAS16. The
-irq of 10 and dma of 3 may not match your installation. The above
-configuration enables PAS16, 8-bit Soundblaster and OPL3
-functionality. If Soundblaster functionality is not desired, the
-following line would be appropriate:
-append="pas2=0x388,10,3,-1,0,-1,-1,-1 opl3=0x388"
-
-If sound is built totally modular, the above options may be
-specified in /etc/modprobe.d/*.conf for pas2, sb and opl3
-respectively.
diff --git a/Documentation/sound/oss/PSS b/Documentation/sound/oss/PSS
deleted file mode 100644
index 187b9525e1f6..000000000000
--- a/Documentation/sound/oss/PSS
+++ /dev/null
@@ -1,41 +0,0 @@
-The PSS cards and other ECHO based cards provide an onboard DSP with
-downloadable programs and also has an AD1848 "Microsoft Sound System"
-device. The PSS driver enables MSS and MPU401 modes of the card. SB
-is not enabled since it doesn't work concurrently with MSS.
-
-If you build this driver as a module then the driver takes the following
-parameters
-
-pss_io. The I/O base the PSS card is configured at (normally 0x220
- or 0x240)
-
-mss_io The base address of the Microsoft Sound System interface.
- This is normally 0x530, but may be 0x604 or other addresses.
-
-mss_irq The interrupt assigned to the Microsoft Sound System
- emulation. IRQ's 3,5,7,9,10,11 and 12 are available. If you
- get IRQ errors be sure to check the interrupt is set to
- "ISA/Legacy" in the BIOS on modern machines.
-
-mss_dma The DMA channel used by the Microsoft Sound System.
- This can be 0, 1, or 3. DMA 0 is not available on older
- machines and will cause a crash on them.
-
-mpu_io The MPU emulation base address. This sets the base of the
- synthesizer. It is typically 0x330 but can be altered.
-
-mpu_irq The interrupt to use for the synthesizer. It must differ
- from the IRQ used by the Microsoft Sound System port.
-
-
-The mpu_io/mpu_irq fields are optional. If they are not specified the
-synthesizer parts are not configured.
-
-When the module is loaded it looks for a file called
-/etc/sound/pss_synth. This is the firmware file from the DOS install disks.
-This fil holds a general MIDI emulation. The file expected is called
-genmidi.ld on newer DOS driver install disks and synth.ld on older ones.
-
-You can also load alternative DSP algorithms into the card if you wish. One
-alternative driver can be found at http://www.mpg123.de/
-
diff --git a/Documentation/sound/oss/PSS-updates b/Documentation/sound/oss/PSS-updates
deleted file mode 100644
index 11914a1dc7e7..000000000000
--- a/Documentation/sound/oss/PSS-updates
+++ /dev/null
@@ -1,88 +0,0 @@
- This file contains notes for users of PSS sound cards who wish to use the
-newly added features of the newest version of this driver.
-
- The major enhancements present in this new revision of this driver is the
-addition of two new module parameters that allow you to take full advantage of
-all the features present on your PSS sound card. These features include the
-ability to enable both the builtin CDROM and joystick ports.
-
-pss_enable_joystick
-
- This parameter is basically a flag. A 0 will leave the joystick port
-disabled, while a non-zero value would enable the joystick port. The default
-setting is pss_enable_joystick=0 as this keeps this driver fully compatible
-with systems that were using previous versions of this driver. If you wish to
-enable the joystick port you will have to add pss_enable_joystick=1 as an
-argument to the driver. To actually use the joystick port you will then have
-to load the joystick driver itself. Just remember to load the joystick driver
-AFTER the pss sound driver.
-
-pss_cdrom_port
-
- This parameter takes a port address as its parameter. Any available port
-address can be specified to enable the CDROM port, except for 0x0 and -1 as
-these values would leave the port disabled. Like the joystick port, the cdrom
-port will require that an appropriate CDROM driver be loaded before you can make
-use of the newly enabled CDROM port. Like the joystick port option above,
-remember to load the CDROM driver AFTER the pss sound driver. While it may
-differ on some PSS sound cards, all the PSS sound cards that I have seen have a
-builtin Wearnes CDROM port. If this is the case with your PSS sound card you
-should load aztcd with the appropriate port option that matches the port you
-assigned to the CDROM port when you loaded your pss sound driver. (ex.
-modprobe pss pss_cdrom_port=0x340 && modprobe aztcd aztcd=0x340) The default
-setting of this parameter leaves the CDROM port disabled to maintain full
-compatibility with systems using previous versions of this driver.
-
- Other options have also been added for the added convenience and utility
-of the user. These options are only available if this driver is loaded as a
-module.
-
-pss_no_sound
-
- This module parameter is a flag that can be used to tell the driver to
-just configure non-sound components. 0 configures all components, a non-0
-value will only attempt to configure the CDROM and joystick ports. This
-parameter can be used by a user who only wished to use the builtin joystick
-and/or CDROM port(s) of his PSS sound card. If this driver is loaded with this
-parameter and with the parameter below set to true then a user can safely unload
-this driver with the following command "rmmod pss && rmmod ad1848 && rmmod
-mpu401 && rmmod sound && rmmod soundcore" and retain the full functionality of
-his CDROM and/or joystick port(s) while gaining back the memory previously used
-by the sound drivers. This default setting of this parameter is 0 to retain
-full behavioral compatibility with previous versions of this driver.
-
-pss_keep_settings
-
- This parameter can be used to specify whether you want the driver to reset
-all emulations whenever its unloaded. This can be useful for those who are
-sharing resources (io ports, IRQ's, DMA's) between different ISA cards. This
-flag can also be useful in that future versions of this driver may reset all
-emulations by default on the driver's unloading (as it probably should), so
-specifying it now will ensure that all future versions of this driver will
-continue to work as expected. The default value of this parameter is 1 to
-retain full behavioral compatibility with previous versions of this driver.
-
-pss_firmware
-
- This parameter can be used to specify the file containing the firmware
-code so that a user could tell the driver where that file is located instead
-of having to put it in a predefined location with a predefined name. The
-default setting of this parameter is "/etc/sound/pss_synth" as this was the
-path and filename the hardcoded value in the previous versions of this driver.
-
-Examples:
-
-# Normal PSS sound card system, loading of drivers.
-# Should be specified in an rc file (ex. Slackware uses /etc/rc.d/rc.modules).
-
-/sbin/modprobe pss pss_io=0x220 mpu_io=0x338 mpu_irq=9 mss_io=0x530 mss_irq=10 mss_dma=1 pss_cdrom_port=0x340 pss_enable_joystick=1
-/sbin/modprobe aztcd aztcd=0x340
-/sbin/modprobe joystick
-
-# System using the PSS sound card just for its CDROM and joystick ports.
-# Should be specified in an rc file (ex. Slackware uses /etc/rc.d/rc.modules).
-
-/sbin/modprobe pss pss_io=0x220 pss_cdrom_port=0x340 pss_enable_joystick=1 pss_no_sound=1
-/sbin/rmmod pss && /sbin/rmmod ad1848 && /sbin/rmmod mpu401 && /sbin/rmmod sound && /sbin/rmmod soundcore # This line not needed, but saves memory.
-/sbin/modprobe aztcd aztcd=0x340
-/sbin/modprobe joystick
diff --git a/Documentation/sound/oss/README.OSS b/Documentation/sound/oss/README.OSS
deleted file mode 100644
index a085ea3611a1..000000000000
--- a/Documentation/sound/oss/README.OSS
+++ /dev/null
@@ -1,1455 +0,0 @@
-Introduction
-------------
-
-This file is a collection of all the old Readme files distributed with
-OSS/Lite by Hannu Savolainen. Since the new Linux sound driver is founded
-on it I think these information may still be interesting for users that
-have to configure their sound system.
-
-Be warned: Alan Cox is the current maintainer of the Linux sound driver so if
-you have problems with it, please contact him or the current device-specific
-driver maintainer (e.g. for aedsp16 specific problems contact me). If you have
-patches, contributions or suggestions send them to Alan: I'm sure they are
-welcome.
-
-In this document you will find a lot of references about OSS/Lite or ossfree:
-they are gone forever. Keeping this in mind and with a grain of salt this
-document can be still interesting and very helpful.
-
-[ File edited 17.01.1999 - Riccardo Facchetti ]
-[ Edited miroSOUND section 19.04.2001 - Robert Siemer ]
-
-OSS/Free version 3.8 release notes
-----------------------------------
-
-Please read the SOUND-HOWTO (available from sunsite.unc.edu and other Linux FTP
-sites). It gives instructions about using sound with Linux. It's bit out of
-date but still very useful. Information about bug fixes and such things
-is available from the web page (see above).
-
-Please check http://www.opensound.com/pguide for more info about programming
-with OSS API.
-
- ====================================================
-- THIS VERSION ____REQUIRES____ Linux 2.1.57 OR LATER.
- ====================================================
-
-Packages "snd-util-3.8.tar.gz" and "snd-data-0.1.tar.Z"
-contain useful utilities to be used with this driver.
-See http://www.opensound.com/ossfree/ for
-download instructions.
-
-If you are looking for the installation instructions, please
-look forward into this document.
-
-Supported sound cards
----------------------
-
-See below.
-
-Contributors
-------------
-
-This driver contains code by several contributors. In addition several other
-persons have given useful suggestions. The following is a list of major
-contributors. (I could have forgotten some names.)
-
- Craig Metz 1/2 of the PAS16 Mixer and PCM support
- Rob Hooft Volume computation algorithm for the FM synth.
- Mika Liljeberg uLaw encoding and decoding routines
- Jeff Tranter Linux SOUND HOWTO document
- Greg Lee Volume computation algorithm for the GUS and
- lots of valuable suggestions.
- Andy Warner ISC port
- Jim Lowe,
- Amancio Hasty Jr FreeBSD/NetBSD port
- Anders Baekgaard Bug hunting and valuable suggestions.
- Joerg Schubert SB16 DSP support (initial version).
- Andrew Robinson Improvements to the GUS driver
- Megens SA MIDI recording for SB and SB Pro (initial version).
- Mikael Nordqvist Linear volume support for GUS and
- nonblocking /dev/sequencer.
- Ian Hartas SVR4.2 port
- Markus Aroharju and
- Risto Kankkunen Major contributions to the mixer support
- of GUS v3.7.
- Hunyue Yau Mixer support for SG NX Pro.
- Marc Hoffman PSS support (initial version).
- Rainer Vranken Initialization for Jazz16 (initial version).
- Peter Trattler Initial version of loadable module support for Linux.
- JRA Gibson 16 bit mode for Jazz16 (initial version)
- Davor Jadrijevic MAD16 support (initial version)
- Gregor Hoffleit Mozart support (initial version)
- Riccardo Facchetti Audio Excel DSP 16 (aedsp16) support
- James Hightower Spotting a tiny but important bug in CS423x support.
- Denis Sablic OPTi 82C924 specific enhancements (non PnP mode)
- Tim MacKenzie Full duplex support for OPTi 82C930.
-
- Please look at lowlevel/README for more contributors.
-
-There are probably many other names missing. If you have sent me some
-patches and your name is not in the above list, please inform me.
-
-Sending your contributions or patches
--------------------------------------
-
-First of all it's highly recommended to contact me before sending anything
-or before even starting to do any work. Tell me what you suggest to be
-changed or what you have planned to do. Also ensure you are using the
-very latest (development) version of OSS/Free since the change may already be
-implemented there. In general it's a major waste of time to try to improve a
-several months old version. Information about the latest version can be found
-from http://www.opensound.com/ossfree. In general there is no point in
-sending me patches relative to production kernels.
-
-Sponsors etc.
--------------
-
-The following companies have greatly helped development of this driver
-in form of a free copy of their product:
-
-Novell, Inc. UnixWare personal edition + SDK
-The Santa Cruz Operation, Inc. A SCO OpenServer + SDK
-Ensoniq Corp, a SoundScape card and extensive amount of assistance
-MediaTrix Peripherals Inc, a AudioTrix Pro card + SDK
-Acer, Inc. a pair of AcerMagic S23 cards.
-
-In addition the following companies have provided me sufficient amount
-of technical information at least some of their products (free or $$$):
-
-Advanced Gravis Computer Technology Ltd.
-Media Vision Inc.
-Analog Devices Inc.
-Logitech Inc.
-Aztech Labs Inc.
-Crystal Semiconductor Corporation,
-Integrated Circuit Systems Inc.
-OAK Technology
-OPTi
-Turtle Beach
-miro
-Ad Lib Inc. ($$)
-Music Quest Inc. ($$)
-Creative Labs ($$$)
-
-If you have some problems
-=========================
-
-Read the sound HOWTO (sunsite.unc.edu:/pub/Linux/docs/...?).
-Also look at the home page (http://www.opensound.com/ossfree). It may
-contain info about some recent bug fixes.
-
-It's likely that you have some problems when trying to use the sound driver
-first time. Sound cards don't have standard configuration so there are no
-good default configuration to use. Please try to use same I/O, DMA and IRQ
-values for the sound card than with DOS.
-
-If you get an error message when trying to use the driver, please look
-at /var/adm/messages for more verbose error message.
-
-
-The following errors are likely with /dev/dsp and /dev/audio.
-
- - "No such device or address".
- This error indicates that there are no suitable hardware for the
- device file or the sound driver has been compiled without support for
- this particular device. For example /dev/audio and /dev/dsp will not
- work if "digitized voice support" was not enabled during "make config".
-
- - "Device or resource busy". Probably the IRQ (or DMA) channel
- required by the sound card is in use by some other device/driver.
-
- - "I/O error". Almost certainly (99%) it's an IRQ or DMA conflict.
- Look at the kernel messages in /var/adm/notice for more info.
-
- - "Invalid argument". The application is calling ioctl()
- with impossible parameters. Check that the application is
- for sound driver version 2.X or later.
-
-Linux installation
-==================
-
-IMPORTANT! Read this if you are installing a separately
- distributed version of this driver.
-
- Check that your kernel version works with this
- release of the driver (see Readme). Also verify
- that your current kernel version doesn't have more
- recent sound driver version than this one. IT'S HIGHLY
- RECOMMENDED THAT YOU USE THE SOUND DRIVER VERSION THAT
- IS DISTRIBUTED WITH KERNEL SOURCES.
-
-- When installing separately distributed sound driver you should first
- read the above notice. Then try to find proper directory where and how
- to install the driver sources. You should not try to install a separately
- distributed driver version if you are not able to find the proper way
- yourself (in this case use the version that is distributed with kernel
- sources). Remove old version of linux/drivers/sound directory before
- installing new files.
-
-- To build the device files you need to run the enclosed shell script
- (see below). You need to do this only when installing sound driver
- first time or when upgrading to much recent version than the earlier
- one.
-
-- Configure and compile Linux as normally (remember to include the
- sound support during "make config"). Please refer to kernel documentation
- for instructions about configuring and compiling kernel. File Readme.cards
- contains card specific instructions for configuring this driver for
- use with various sound cards.
-
-Boot time configuration (using lilo and insmod)
------------------------------------------------
-
-This information has been removed. Too many users didn't believe
-that it's really not necessary to use this method. Please look at
-Readme of sound driver version 3.0.1 if you still want to use this method.
-
-Problems
---------
-
-Common error messages:
-
-- /dev/???????: No such file or directory.
-Run the script at the end of this file.
-
-- /dev/???????: No such device.
-You are not running kernel which contains the sound driver. When using
-modularized sound driver this error means that the sound driver is not
-loaded.
-
-- /dev/????: No such device or address.
-Sound driver didn't detect suitable card when initializing. Please look at
-Readme.cards for info about configuring the driver with your card. Also
-check for possible boot (insmod) time error messages in /var/adm/messages.
-
-- Other messages or problems
-Please check http://www.opensound.com/ossfree for more info.
-
-Configuring version 3.8 (for Linux) with some common sound cards
-================================================================
-
-This document describes configuring sound cards with the freeware version of
-Open Sound Systems (OSS/Free). Information about the commercial version
-(OSS/Linux) and its configuration is available from
-http://www.opensound.com/linux.html. Information presented here is
-not valid for OSS/Linux.
-
-If you are unsure about how to configure OSS/Free
-you can download the free evaluation version of OSS/Linux from the above
-address. There is a chance that it can autodetect your sound card. In this case
-you can use the information included in soundon.log when configuring OSS/Free.
-
-
-IMPORTANT! This document covers only cards that were "known" when
- this driver version was released. Please look at
- http://www.opensound.com/ossfree for info about
- cards introduced recently.
-
- When configuring the sound driver, you should carefully
- check each sound configuration option (particularly
- "Support for /dev/dsp and /dev/audio"). The default values
- offered by these programs are not necessarily valid.
-
-
-THE BIGGEST MISTAKES YOU CAN MAKE
-=================================
-
-1. Assuming that the card is Sound Blaster compatible when it's not.
---------------------------------------------------------------------
-
-The number one mistake is to assume that your card is compatible with
-Sound Blaster. Only the cards made by Creative Technology or which have
-one or more chips labeled by Creative are SB compatible. In addition there
-are few sound chipsets which are SB compatible in Linux such as ESS1688 or
-Jazz16. Note that SB compatibility in DOS/Windows does _NOT_ mean anything
-in Linux.
-
-IF YOU REALLY ARE 150% SURE YOU HAVE A SOUND BLASTER YOU CAN SKIP THE REST OF
-THIS CHAPTER.
-
-For most other "supposed to be SB compatible" cards you have to use other
-than SB drivers (see below). It is possible to get most sound cards to work
-in SB mode but in general it's a complete waste of time. There are several
-problems which you will encounter by using SB mode with cards that are not
-truly SB compatible:
-
-- The SB emulation is at most SB Pro (DSP version 3.x) which means that
-you get only 8 bit audio (there is always an another ("native") mode which
-gives the 16 bit capability). The 8 bit only operation is the reason why
-many users claim that sound quality in Linux is much worse than in DOS.
-In addition some applications require 16 bit mode and they produce just
-noise with a 8 bit only device.
-- The card may work only in some cases but refuse to work most of the
-time. The SB compatible mode always requires special initialization which is
-done by the DOS/Windows drivers. This kind of cards work in Linux after
-you have warm booted it after DOS but they don't work after cold boot
-(power on or reset).
-- You get the famous "DMA timed out" messages. Usually all SB clones have
-software selectable IRQ and DMA settings. If the (power on default) values
-currently used by the card don't match configuration of the driver you will
-get the above error message whenever you try to record or play. There are
-few other reasons to the DMA timeout message but using the SB mode seems
-to be the most common cause.
-
-2. Trying to use a PnP (Plug & Play) card just like an ordinary sound card
---------------------------------------------------------------------------
-
-Plug & Play is a protocol defined by Intel and Microsoft. It lets operating
-systems to easily identify and reconfigure I/O ports, IRQs and DMAs of ISA
-cards. The problem with PnP cards is that the standard Linux doesn't currently
-(versions 2.1.x and earlier) don't support PnP. This means that you will have
-to use some special tricks (see later) to get a PnP card alive. Many PnP cards
-work after they have been initialized but this is not always the case.
-
-There are sometimes both PnP and non-PnP versions of the same sound card.
-The non-PnP version is the original model which usually has been discontinued
-more than an year ago. The PnP version has the same name but with "PnP"
-appended to it (sometimes not). This causes major confusion since the non-PnP
-model works with Linux but the PnP one doesn't.
-
-You should carefully check if "Plug & Play" or "PnP" is mentioned in the name
-of the card or in the documentation or package that came with the card.
-Everything described in the rest of this document is not necessarily valid for
-PnP models of sound cards even you have managed to wake up the card properly.
-Many PnP cards are simply too different from their non-PnP ancestors which are
-covered by this document.
-
-
-Cards that are not (fully) supported by this driver
-===================================================
-
-See http://www.opensound.com/ossfree for information about sound cards
-to be supported in future.
-
-
-How to use sound without recompiling kernel and/or sound driver
-===============================================================
-
-There is a commercial sound driver which comes in precompiled form and doesn't
-require recompiling of the kernel. See http://www.4Front-tech.com/oss.html for
-more info.
-
-
-Configuring PnP cards
-=====================
-
-New versions of most sound cards use the so-called ISA PnP protocol for
-soft configuring their I/O, IRQ, DMA and shared memory resources.
-Currently at least cards made by Creative Technology (SB32 and SB32AWE
-PnP), Gravis (GUS PnP and GUS PnP Pro), Ensoniq (Soundscape PnP) and
-Aztech (some Sound Galaxy models) use PnP technology. The CS4232/4236 audio
-chip by Crystal Semiconductor (Intel Atlantis, HP Pavilion and many other
-motherboards) is also based on PnP technology but there is a "native" driver
-available for it (see information about CS4232 later in this document).
-
-PnP sound cards (as well as most other PnP ISA cards) are not supported
-by this version of the driver . Proper
-support for them should be released during 97 once the kernel level
-PnP support is available.
-
-There is a method to get most of the PnP cards to work. The basic method
-is the following:
-
-1) Boot DOS so the card's DOS drivers have a chance to initialize it.
-2) _Cold_ boot to Linux by using "loadlin.exe". Hitting ctrl-alt-del
-works with older machines but causes a hard reset of all cards on recent
-(Pentium) machines.
-3) If you have the sound driver in Linux configured properly, the card should
-work now. "Proper" means that I/O, IRQ and DMA settings are the same as in
-DOS. The hard part is to find which settings were used. See the documentation of
-your card for more info.
-
-Windows 95 could work as well as DOS but running loadlin may be difficult.
-Probably you should "shut down" your machine to MS-DOS mode before running it.
-
-Some machines have a BIOS utility for setting PnP resources. This is a good
-way to configure some cards. In this case you don't need to boot DOS/Win95
-before starting Linux.
-
-Another way to initialize PnP cards without DOS/Win95 is a Linux based
-PnP isolation tool. When writing this there is a pre alpha test version
-of such a tool available from ftp://ftp.demon.co.uk/pub/unix/linux/utils. The
-file is called isapnptools-*. Please note that this tool is just a temporary
-solution which may be incompatible with future kernel versions having proper
-support for PnP cards. There are bugs in setting DMA channels in earlier
-versions of isapnptools so at least version 1.6 is required with sound cards.
-
-Yet another way to use PnP cards is to use (commercial) OSS/Linux drivers. See
-http://www.opensound.com/linux.html for more info. This is probably the way you
-should do it if you don't want to spend time recompiling the kernel and
-required tools.
-
-
-Read this before trying to configure the driver
-===============================================
-
-There are currently many cards that work with this driver. Some of the cards
-have native support while others work since they emulate some other
-card (usually SB, MSS/WSS and/or MPU401). The following cards have native
-support in the driver. Detailed instructions for configuring these cards
-will be given later in this document.
-
-Pro Audio Spectrum 16 (PAS16) and compatibles:
- Pro Audio Spectrum 16
- Pro Audio Studio 16
- Logitech Sound Man 16
- NOTE! The original Pro Audio Spectrum as well as the PAS+ are not
- and will not be supported by the driver.
-
-Media Vision Jazz16 based cards
- Pro Sonic 16
- Logitech SoundMan Wave
- (Other Jazz based cards should work but I don't have any reports
- about them).
-
-Sound Blasters
- SB 1.0 to 2.0
- SB Pro
- SB 16
- SB32/64/AWE
- Configure SB32/64/AWE just like SB16. See lowlevel/README.awe
- for information about using the wave table synth.
- NOTE! AWE63/Gold and 16/32/AWE "PnP" cards need to be activated
- using isapnptools before they work with OSS/Free.
- SB16 compatible cards by other manufacturers than Creative.
- You have been fooled since there are _no_ SB16 compatible
- cards on the market (as of May 1997). It's likely that your card
- is compatible just with SB Pro but there is also a non-SB-
- compatible 16 bit mode. Usually it's MSS/WSS but it could also
- be a proprietary one like MV Jazz16 or ESS ES688. OPTi
- MAD16 chips are very common in so called "SB 16 bit cards"
- (try with the MAD16 driver).
-
- ======================================================================
- "Supposed to be SB compatible" cards.
- Forget the SB compatibility and check for other alternatives
- first. The only cards that work with the SB driver in
- Linux have been made by Creative Technology (there is at least
- one chip on the card with "CREATIVE" printed on it). The
- only other SB compatible chips are ESS and Jazz16 chips
- (maybe ALSxxx chips too but they probably don't work).
- Most other "16 bit SB compatible" cards such as "OPTi/MAD16" or
- "Crystal" are _NOT_ SB compatible in Linux.
-
- Practically all sound cards have some kind of SB emulation mode
- in addition to their native (16 bit) mode. In most cases this
- (8 bit only) SB compatible mode doesn't work with Linux. If
- you get it working it may cause problems with games and
- applications which require 16 bit audio. Some 16 bit only
- applications don't check if the card actually supports 16 bits.
- They just dump 16 bit data to a 8 bit card which produces just
- noise.
-
- In most cases the 16 bit native mode is supported by Linux.
- Use the SB mode with "clones" only if you don't find anything
- better from the rest of this doc.
- ======================================================================
-
-Gravis Ultrasound (GUS)
- GUS
- GUS + the 16 bit option
- GUS MAX
- GUS ACE (No MIDI port and audio recording)
- GUS PnP (with RAM)
-
-MPU-401 and compatibles
- The driver works both with the full (intelligent mode) MPU-401
- cards (such as MPU IPC-T and MQX-32M) and with the UART only
- dumb MIDI ports. MPU-401 is currently the most common MIDI
- interface. Most sound cards are compatible with it. However,
- don't enable MPU401 mode blindly. Many cards with native support
- in the driver have their own MPU401 driver. Enabling the standard one
- will cause a conflict with these cards. So check if your card is
- in the list of supported cards before enabling MPU401.
-
-Windows Sound System (MSS/WSS)
- Even when Microsoft has discontinued their own Sound System card
- they managed to make it a standard. MSS compatible cards are based on
- a codec chip which is easily available from at least two manufacturers
- (AD1848 by Analog Devices and CS4231/CS4248 by Crystal Semiconductor).
- Currently most sound cards are based on one of the MSS compatible codec
- chips. The CS4231 is used in the high quality cards such as GUS MAX,
- MediaTrix AudioTrix Pro and TB Tropez (GUS MAX is not MSS compatible).
-
- Having a AD1848, CS4248 or CS4231 codec chip on the card is a good
- sign. Even if the card is not MSS compatible, it could be easy to write
- support for it. Note also that most MSS compatible cards
- require special boot time initialization which may not be present
- in the driver. Also, some MSS compatible cards have native support.
- Enabling the MSS support with these cards is likely to
- cause a conflict. So check if your card is listed in this file before
- enabling the MSS support.
-
-Yamaha FM synthesizers (OPL2, OPL3 (not OPL3-SA) and OPL4)
- Most sound cards have a FM synthesizer chip. The OPL2 is a 2
- operator chip used in the original AdLib card. Currently it's used
- only in the cheapest (8 bit mono) cards. The OPL3 is a 4 operator
- FM chip which provides better sound quality and/or more available
- voices than the OPL2. The OPL4 is a new chip that has an OPL3 and
- a wave table synthesizer packed onto the same chip. The driver supports
- just the OPL3 mode directly. Most cards with an OPL4 (like
- SM Wave and AudioTrix Pro) support the OPL4 mode using MPU401
- emulation. Writing a native OPL4 support is difficult
- since Yamaha doesn't give information about their sample ROM chip.
-
- Enable the generic OPL2/OPL3 FM synthesizer support if your
- card has a FM chip made by Yamaha. Don't enable it if your card
- has a software (TRS) based FM emulator.
-
- ----------------------------------------------------------------
- NOTE! OPL3-SA is different chip than the ordinary OPL3. In addition
- to the FM synth this chip has also digital audio (WSS) and
- MIDI (MPU401) capabilities. Support for OPL3-SA is described below.
- ----------------------------------------------------------------
-
-Yamaha OPL3-SA1
-
- Yamaha OPL3-SA1 (YMF701) is an audio controller chip used on some
- (Intel) motherboards and on cheap sound cards. It should not be
- confused with the original OPL3 chip (YMF278) which is entirely
- different chip. OPL3-SA1 has support for MSS, MPU401 and SB Pro
- (not used in OSS/Free) in addition to the OPL3 FM synth.
-
- There are also chips called OPL3-SA2, OPL3-SA3, ..., OPL3SA-N. They
- are PnP chips and will not work with the OPL3-SA1 driver. You should
- use the standard MSS, MPU401 and OPL3 options with these chips and to
- activate the card using isapnptools.
-
-4Front Technologies SoftOSS
-
- SoftOSS is a software based wave table emulation which works with
- any 16 bit stereo sound card. Due to its nature a fast CPU is
- required (P133 is minimum). Although SoftOSS does _not_ use MMX
- instructions it has proven out that recent processors (which appear
- to have MMX) perform significantly better with SoftOSS than earlier
- ones. For example a P166MMX beats a PPro200. SoftOSS should not be used
- on 486 or 386 machines.
-
- The amount of CPU load caused by SoftOSS can be controlled by
- selecting the CONFIG_SOFTOSS_RATE and CONFIG_SOFTOSS_VOICES
- parameters properly (they will be prompted by make config). It's
- recommended to set CONFIG_SOFTOSS_VOICES to 32. If you have a
- P166MMX or faster (PPro200 is not faster) you can set
- CONFIG_SOFTOSS_RATE to 44100 (kHz). However with slower systems it
- recommended to use sampling rates around 22050 or even 16000 kHz.
- Selecting too high values for these parameters may hang your
- system when playing MIDI files with hight degree of polyphony
- (number of concurrently playing notes). It's also possible to
- decrease CONFIG_SOFTOSS_VOICES. This makes it possible to use
- higher sampling rates. However using fewer voices decreases
- playback quality more than decreasing the sampling rate.
-
- SoftOSS keeps the samples loaded on the system's RAM so much RAM is
- required. SoftOSS should never be used on machines with less than 16 MB
- of RAM since this is potentially dangerous (you may accidentally run out
- of memory which probably crashes the machine).
-
- SoftOSS implements the wave table API originally designed for GUS. For
- this reason all applications designed for GUS should work (at least
- after minor modifications). For example gmod/xgmod and playmidi -g are
- known to work.
-
- To work SoftOSS will require GUS compatible
- patch files to be installed on the system (in /dos/ultrasnd/midi). You
- can use the public domain MIDIA patchset available from several ftp
- sites.
-
- *********************************************************************
- IMPORTANT NOTICE! The original patch set distributed with the Gravis
- Ultrasound card is not in public domain (even though it's available from
- some FTP sites). You should contact Voice Crystal (www.voicecrystal.com)
- if you like to use these patches with SoftOSS included in OSS/Free.
- *********************************************************************
-
-PSS based cards (AD1848 + ADSP-2115 + Echo ESC614 ASIC)
- Analog Devices and Echo Speech have together defined a sound card
- architecture based on the above chips. The DSP chip is used
- for emulation of SB Pro, FM and General MIDI/MT32.
-
- There are several cards based on this architecture. The most known
- ones are Orchid SW32 and Cardinal DSP16.
-
- The driver supports downloading DSP algorithms to these cards.
-
- NOTE! You will have to use the "old" config script when configuring
- PSS cards.
-
-MediaTrix AudioTrix Pro
- The ATP card is built around a CS4231 codec and an OPL4 synthesizer
- chips. The OPL4 mode is supported by a microcontroller running a
- General MIDI emulator. There is also a SB 1.5 compatible playback mode.
-
-Ensoniq SoundScape and compatibles
- Ensoniq has designed a sound card architecture based on the
- OTTO synthesizer chip used in their professional MIDI synthesizers.
- Several companies (including Ensoniq, Reveal and Spea) are selling
- cards based on this architecture.
-
- NOTE! The SoundScape PnP is not supported by OSS/Free. Ensoniq VIVO and
- VIVO90 cards are not compatible with Soundscapes so the Soundscape
- driver will not work with them. You may want to use OSS/Linux with these
- cards.
-
-OPTi MAD16 and Mozart based cards
- The Mozart (OAK OTI-601), MAD16 (OPTi 82C928), MAD16 Pro (OPTi 82C929),
- OPTi 82C924/82C925 (in _non_ PnP mode) and OPTi 82C930 interface
- chips are used in many different sound cards, including some
- cards by Reveal miro and Turtle Beach (Tropez). The purpose of these
- chips is to connect other audio components to the PC bus. The
- interface chip performs address decoding for the other chips.
- NOTE! Tropez Plus is not MAD16 but CS4232 based.
- NOTE! MAD16 PnP cards (82C924, 82C925, 82C931) are not MAD16 compatible
- in the PnP mode. You will have to use them in MSS mode after having
- initialized them using isapnptools or DOS. 82C931 probably requires
- initialization using DOS/Windows (running isapnptools is not enough).
- It's possible to use 82C931 with OSS/Free by jumpering it to non-PnP
- mode (provided that the card has a jumper for this). In non-PnP mode
- 82C931 is compatible with 82C930 and should work with the MAD16 driver
- (without need to use isapnptools or DOS to initialize it). All OPTi
- chips are supported by OSS/Linux (both in PnP and non-PnP modes).
-
-Audio Excel DSP16
- Support for this card was written by Riccardo Faccetti
- (riccardo@cdc8g5.cdc.polimi.it). The AEDSP16 driver included in
- the lowlevel/ directory. To use it you should enable the
- "Additional low level drivers" option.
-
-Crystal CS4232 and CS4236 based cards such as AcerMagic S23, TB Tropez _Plus_ and
- many PC motherboards (Compaq, HP, Intel, ...)
- CS4232 is a PnP multimedia chip which contains a CS3231A codec,
- SB and MPU401 emulations. There is support for OPL3 too.
- Unfortunately the MPU401 mode doesn't work (I don't know how to
- initialize it). CS4236 is an enhanced (compatible) version of CS4232.
- NOTE! Don't ever try to use isapnptools with CS4232 since this will just
- freeze your machine (due to chip bugs). If you have problems in getting
- CS4232 working you could try initializing it with DOS (CS4232C.EXE) and
- then booting Linux using loadlin. CS4232C.EXE loads a secret firmware
- patch which is not documented by Crystal.
-
-Turtle Beach Maui and Tropez "classic"
- This driver version supports sample, patch and program loading commands
- described in the Maui/Tropez User's manual.
- There is now full initialization support too. The audio side of
- the Tropez is based on the MAD16 chip (see above).
- NOTE! Tropez Plus is different card than Tropez "classic" and will not
- work fully in Linux. You can get audio features working by configuring
- the card as a CS4232 based card (above).
-
-
-Jumpers and software configuration
-==================================
-
-Some of the earliest sound cards were jumper configurable. You have to
-configure the driver use I/O, IRQ and DMA settings
-that match the jumpers. Just few 8 bit cards are fully jumper
-configurable (SB 1.x/2.x, SB Pro and clones).
-Some cards made by Aztech have an EEPROM which contains the
-config info. These cards behave much like hardware jumpered cards.
-
-Most cards have jumper for the base I/O address but other parameters
-are software configurable. Sometimes there are few other jumpers too.
-
-Latest cards are fully software configurable or they are PnP ISA
-compatible. There are no jumpers on the board.
-
-The driver handles software configurable cards automatically. Just configure
-the driver to use I/O, IRQ and DMA settings which are known to work.
-You could usually use the same values than with DOS and/or Windows.
-Using different settings is possible but not recommended since it may cause
-some trouble (for example when warm booting from an OS to another or
-when installing new hardware to the machine).
-
-Sound driver sets the soft configurable parameters of the card automatically
-during boot. Usually you don't need to run any extra initialization
-programs when booting Linux but there are some exceptions. See the
-card-specific instructions below for more info.
-
-The drawback of software configuration is that the driver needs to know
-how the card must be initialized. It cannot initialize unknown cards
-even if they are otherwise compatible with some other cards (like SB,
-MPU401 or Windows Sound System).
-
-
-What if your card was not listed above?
-=======================================
-
-The first thing to do is to look at the major IC chips on the card.
-Many of the latest sound cards are based on some standard chips. If you
-are lucky, all of them could be supported by the driver. The most common ones
-are the OPTi MAD16, Mozart, SoundScape (Ensoniq) and the PSS architectures
-listed above. Also look at the end of this file for list of unsupported
-cards and the ones which could be supported later.
-
-The last resort is to send _exact_ name and model information of the card
-to me together with a list of the major IC chips (manufactured, model) to
-me. I could then try to check if your card looks like something familiar.
-
-There are many more cards in the world than listed above. The first thing to
-do with these cards is to check if they emulate some other card or interface
-such as SB, MSS and/or MPU401. In this case there is a chance to get the
-card to work by booting DOS before starting Linux (boot DOS, hit ctrl-alt-del
-and boot Linux without hard resetting the machine). In this method the
-DOS based driver initializes the hardware to use known I/O, IRQ and DMA
-settings. If sound driver is configured to use the same settings, everything
-should work OK.
-
-
-Configuring sound driver (with Linux)
-=====================================
-
-The sound driver is currently distributed as part of the Linux kernel. The
-files are in /usr/src/linux/drivers/sound/.
-
-****************************************************************************
-* ALWAYS USE THE SOUND DRIVER VERSION WHICH IS DISTRIBUTED WITH *
-* THE KERNEL SOURCE PACKAGE YOU ARE USING. SOME ALPHA AND BETA TEST *
-* VERSIONS CAN BE INSTALLED FROM A SEPARATELY DISTRIBUTED PACKAGE *
-* BUT CHECK THAT THE PACKAGE IS NOT MUCH OLDER (OR NEWER) THAN THE *
-* KERNEL YOU ARE USING. IT'S POSSIBLE THAT THE KERNEL/DRIVER *
-* INTERFACE CHANGES BETWEEN KERNEL RELEASES WHICH MAY CAUSE SOME *
-* INCOMPATIBILITY PROBLEMS. *
-* *
-* IN CASE YOU INSTALL A SEPARATELY DISTRIBUTED SOUND DRIVER VERSION, *
-* BE SURE TO REMOVE OR RENAME THE OLD SOUND DRIVER DIRECTORY BEFORE *
-* INSTALLING THE NEW ONE. LEAVING OLD FILES TO THE SOUND DRIVER *
-* DIRECTORY _WILL_ CAUSE PROBLEMS WHEN THE DRIVER IS USED OR *
-* COMPILED. *
-****************************************************************************
-
-To configure the driver, run "make config" in the kernel source directory
-(/usr/src/linux). Answer "y" or "m" to the question about Sound card support
-(after the questions about mouse, CD-ROM, ftape, etc. support). Questions
-about options for sound will then be asked.
-
-After configuring the kernel and sound driver and compile the kernel
-following instructions in the kernel README.
-
-The sound driver configuration dialog
--------------------------------------
-
-Sound configuration starts by making some yes/no questions. Be careful
-when answering to these questions since answering y to a question may
-prevent some later ones from being asked. For example don't answer y to
-the first question (PAS16) if you don't really have a PAS16. Don't enable
-more cards than you really need since they just consume memory. Also
-some drivers (like MPU401) may conflict with your SCSI controller and
-prevent kernel from booting. If you card was in the list of supported
-cards (above), please look at the card specific config instructions
-(later in this file) before starting to configure. Some cards must be
-configured in way which is not obvious.
-
-So here is the beginning of the config dialog. Answer 'y' or 'n' to these
-questions. The default answer is shown so that (y/n) means 'y' by default and
-(n/y) means 'n'. To use the default value, just hit ENTER. But be careful
-since using the default _doesn't_ guarantee anything.
-
-Note also that all questions may not be asked. The configuration program
-may disable some questions depending on the earlier choices. It may also
-select some options automatically as well.
-
- "ProAudioSpectrum 16 support",
- - Answer 'y'_ONLY_ if you have a Pro Audio Spectrum _16_,
- Pro Audio Studio 16 or Logitech SoundMan 16 (be sure that
- you read the above list correctly). Don't answer 'y' if you
- have some other card made by Media Vision or Logitech since they
- are not PAS16 compatible.
- NOTE! Since 3.5-beta10 you need to enable SB support (next question)
- if you want to use the SB emulation of PAS16. It's also possible to
- the emulation if you want to use a true SB card together with PAS16
- (there is another question about this that is asked later).
- "Sound Blaster support",
- - Answer 'y' if you have an original SB card made by Creative Labs
- or a full 100% hardware compatible clone (like Thunderboard or
- SM Games). If your card was in the list of supported cards (above),
- please look at the card specific instructions later in this file
- before answering this question. For an unknown card you may answer
- 'y' if the card claims to be SB compatible.
- Enable this option also with PAS16 (changed since v3.5-beta9).
-
- Don't enable SB if you have a MAD16 or Mozart compatible card.
-
- "Generic OPL2/OPL3 FM synthesizer support",
- - Answer 'y' if your card has a FM chip made by Yamaha (OPL2/OPL3/OPL4).
- Answering 'y' is usually a safe and recommended choice. However some
- cards may have software (TSR) FM emulation. Enabling FM support
- with these cards may cause trouble. However I don't currently know
- such cards.
- "Gravis Ultrasound support",
- - Answer 'y' if you have GUS or GUS MAX. Answer 'n' if you don't
- have GUS since the GUS driver consumes much memory.
- Currently I don't have experiences with the GUS ACE so I don't
- know what to answer with it.
- "MPU-401 support (NOT for SB16)",
- - Be careful with this question. The MPU401 interface is supported
- by almost any sound card today. However some natively supported cards
- have their own driver for MPU401. Enabling the MPU401 option with
- these cards will cause a conflict. Also enabling MPU401 on a system
- that doesn't really have a MPU401 could cause some trouble. If your
- card was in the list of supported cards (above), please look at
- the card specific instructions later in this file.
-
- In MOST cases this MPU401 driver should only be used with "true"
- MIDI-only MPU401 professional cards. In most other cases there
- is another way to get the MPU401 compatible interface of a
- sound card to work.
- Support for the MPU401 compatible MIDI port of SB16, ESS1688
- and MV Jazz16 cards is included in the SB driver. Use it instead
- of this separate MPU401 driver with these cards. As well
- Soundscape, PSS and Maui drivers include their own MPU401
- options.
-
- It's safe to answer 'y' if you have a true MPU401 MIDI interface
- card.
- "6850 UART Midi support",
- - It's safe to answer 'n' to this question in all cases. The 6850
- UART interface is so rarely used.
- "PSS (ECHO-ADI2111) support",
- - Answer 'y' only if you have Orchid SW32, Cardinal DSP16 or some
- other card based on the PSS chipset (AD1848 codec + ADSP-2115
- DSP chip + Echo ESC614 ASIC CHIP).
- "16 bit sampling option of GUS (_NOT_ GUS MAX)",
- - Answer 'y' if you have installed the 16 bit sampling daughtercard
- to your GUS. Answer 'n' if you have GUS MAX. Enabling this option
- disables GUS MAX support.
- "GUS MAX support",
- - Answer 'y' only if you have a GUS MAX.
- "Microsoft Sound System support",
- - Again think carefully before answering 'y' to this question. It's
- safe to answer 'y' in case you have the original Windows Sound
- System card made by Microsoft or Aztech SG 16 Pro (or NX16 Pro).
- Also you may answer 'y' in case your card was not listed earlier
- in this file. For cards having native support in the driver, consult
- the card specific instructions later in this file. Some drivers
- have their own MSS support and enabling this option will cause a
- conflict.
- Note! The MSS driver permits configuring two DMA channels. This is a
- "nonstandard" feature and works only with very few cards (if any).
- In most cases the second DMA channel should be disabled or set to
- the same channel than the first one. Trying to configure two separate
- channels with cards that don't support this feature will prevent
- audio (at least recording) from working.
- "Ensoniq Soundscape support",
- - Answer 'y' if you have a sound card based on the Ensoniq SoundScape
- chipset. Such cards are being manufactured at least by Ensoniq,
- Spea and Reveal (note that Reveal makes other cards also). The oldest
- cards made by Spea don't work properly with Linux.
- Soundscape PnP as well as Ensoniq VIVO work only with the commercial
- OSS/Linux version.
- "MediaTrix AudioTrix Pro support",
- - Answer 'y' if you have the AudioTrix Pro.
- "Support for MAD16 and/or Mozart based cards",
- - Answer y if your card has a Mozart (OAK OTI-601) or MAD16
- (OPTi 82C928, 82C929, 82C924/82C925 or 82C930) audio interface chip.
- These chips are
- currently quite common so it's possible that many no-name cards
- have one of them. In addition the MAD16 chip is used in some
- cards made by known manufacturers such as Turtle Beach (Tropez),
- Reveal (some models) and Diamond (some recent models).
- Note OPTi 82C924 and 82C925 are MAD16 compatible only in non PnP
- mode (jumper selectable on many cards).
- "Support for TB Maui"
- - This enables TB Maui specific initialization. Works with TB Maui
- and TB Tropez (may not work with Tropez Plus).
-
-
-Then the configuration program asks some y/n questions about the higher
-level services. It's recommended to answer 'y' to each of these questions.
-Answer 'n' only if you know you will not need the option.
-
- "MIDI interface support",
- - Answering 'n' disables /dev/midi## devices and access to any
- MIDI ports using /dev/sequencer and /dev/music. This option
- also affects any MPU401 and/or General MIDI compatible devices.
- "FM synthesizer (YM3812/OPL-3) support",
- - Answer 'y' here.
- "/dev/sequencer support",
- - Answering 'n' disables /dev/sequencer and /dev/music.
-
-Entering the I/O, IRQ and DMA config parameters
------------------------------------------------
-
-After the above questions the configuration program prompts for the
-card specific configuration information. Usually just a set of
-I/O address, IRQ and DMA numbers are asked. With some cards the program
-asks for some files to be used during initialization of the card. For example
-many cards have a DSP chip or microprocessor which must be initialized by
-downloading a program (microcode) file to the card.
-
-Instructions for answering these questions are given in the next section.
-
-
-Card specific information
-=========================
-
-This section gives additional instructions about configuring some cards.
-Please refer manual of your card for valid I/O, IRQ and DMA numbers. Using
-the same settings with DOS/Windows and Linux is recommended. Using
-different values could cause some problems when switching between
-different operating systems.
-
-Sound Blasters (the original ones by Creative)
----------------------------------------------
-
-NOTE! Check if you have a PnP Sound Blaster (cards sold after summer 1995
- are almost certainly PnP ones). With PnP cards you should use isapnptools
- to activate them (see above).
-
-It's possible to configure these cards to use different I/O, IRQ and
-DMA settings. Since the possible/default settings have changed between various
-models, you have to consult manual of your card for the proper ones. It's
-a good idea to use the same values than with DOS/Windows. With SB and SB Pro
-it's the only choice. SB16 has software selectable IRQ and DMA channels but
-using different values with DOS and Linux is likely to cause troubles. The
-DOS driver is not able to reset the card properly after warm boot from Linux
-if Linux has used different IRQ or DMA values.
-
-The original (steam) Sound Blaster (versions 1.x and 2.x) use always
-DMA1. There is no way to change it.
-
-The SB16 needs two DMA channels. A 8 bit one (1 or 3) is required for
-8 bit operation and a 16 bit one (5, 6 or 7) for the 16 bit mode. In theory
-it's possible to use just one (8 bit) DMA channel by answering the 8 bit
-one when the configuration program asks for the 16 bit one. This may work
-in some systems but is likely to cause terrible noise on some other systems.
-
-It's possible to use two SB16/32/64 at the same time. To do this you should
-first configure OSS/Free for one card. Then edit local.h manually and define
-SB2_BASE, SB2_IRQ, SB2_DMA and SB2_DMA2 for the second one. You can't get
-the OPL3, MIDI and EMU8000 devices of the second card to work. If you are
-going to use two PnP Sound Blasters, ensure that they are of different model
-and have different PnP IDs. There is no way to get two cards with the same
-card ID and serial number to work. The easiest way to check this is trying
-if isapnptools can see both cards or just one.
-
-NOTE! Don't enable the SM Games option (asked by the configuration program)
- if you are not 101% sure that your card is a Logitech Soundman Games
- (not a SM Wave or SM16).
-
-SB Clones
----------
-
-First of all: There are no SB16 clones. There are SB Pro clones with a
-16 bit mode which is not SB16 compatible. The most likely alternative is that
-the 16 bit mode means MSS/WSS.
-
-There are just a few fully 100% hardware SB or SB Pro compatible cards.
-I know just Thunderboard and SM Games. Other cards require some kind of
-hardware initialization before they become SB compatible. Check if your card
-was listed in the beginning of this file. In this case you should follow
-instructions for your card later in this file.
-
-For other not fully SB clones you may try initialization using DOS in
-the following way:
-
- - Boot DOS so that the card specific driver gets run.
- - Hit ctrl-alt-del (or use loadlin) to boot Linux. Don't
- switch off power or press the reset button.
- - If you use the same I/O, IRQ and DMA settings in Linux, the
- card should work.
-
-If your card is both SB and MSS compatible, I recommend using the MSS mode.
-Most cards of this kind are not able to work in the SB and the MSS mode
-simultaneously. Using the MSS mode provides 16 bit recording and playback.
-
-ProAudioSpectrum 16 and compatibles
------------------------------------
-
-PAS16 has a SB emulation chip which can be used together with the native
-(16 bit) mode of the card. To enable this emulation you should configure
-the driver to have SB support too (this has been changed since version
-3.5-beta9 of this driver).
-
-With current driver versions it's also possible to use PAS16 together with
-another SB compatible card. In this case you should configure SB support
-for the other card and to disable the SB emulation of PAS16 (there is a
-separate questions about this).
-
-With PAS16 you can use two audio device files at the same time. /dev/dsp (and
-/dev/audio) is connected to the 8/16 bit native codec and the /dev/dsp1 (and
-/dev/audio1) is connected to the SB emulation (8 bit mono only).
-
-Gravis Ultrasound
------------------
-
-There are many different revisions of the Ultrasound card (GUS). The
-earliest ones (pre 3.7) don't have a hardware mixer. With these cards
-the driver uses a software emulation for synth and pcm playbacks. It's
-also possible to switch some of the inputs (line in, mic) off by setting
-mixer volume of the channel level below 10%. For recording you have
-to select the channel as a recording source and to use volume above 10%.
-
-GUS 3.7 has a hardware mixer.
-
-GUS MAX and the 16 bit sampling daughtercard have a CS4231 codec chip which
-also contains a mixer.
-
-Configuring GUS is simple. Just enable the GUS support and GUS MAX or
-the 16 bit daughtercard if you have them. Note that enabling the daughter
-card disables GUS MAX driver.
-
-NOTE for owners of the 16 bit daughtercard: By default the daughtercard
-uses /dev/dsp (and /dev/audio). Command "ln -sf /dev/dsp1 /dev/dsp"
-selects the daughter card as the default device.
-
-With just the standard GUS enabled the configuration program prompts
-for the I/O, IRQ and DMA numbers for the card. Use the same values than
-with DOS.
-
-With the daughter card option enabled you will be prompted for the I/O,
-IRQ and DMA numbers for the daughter card. You have to use different I/O
-and DMA values than for the standard GUS. The daughter card permits
-simultaneous recording and playback. Use /dev/dsp (the daughtercard) for
-recording and /dev/dsp1 (GUS GF1) for playback.
-
-GUS MAX uses the same I/O address and IRQ settings than the original GUS
-(GUS MAX = GUS + a CS4231 codec). In addition an extra DMA channel may be used.
-Using two DMA channels permits simultaneous playback using two devices
-(dev/dsp0 and /dev/dsp1). The second DMA channel is required for
-full duplex audio.
-To enable the second DMA channels, give a valid DMA channel when the config
-program asks for the GUS MAX DMA (entering -1 disables the second DMA).
-Using 16 bit DMA channels (5,6 or 7) is recommended.
-
-If you have problems in recording with GUS MAX, you could try to use
-just one 8 bit DMA channel. Recording will not work with one DMA
-channel if it's a 16 bit one.
-
-Microphone input of GUS MAX is connected to mixer in little bit nonstandard
-way. There is actually two microphone volume controls. Normal "mic" controls
-only recording level. Mixer control "speaker" is used to control volume of
-microphone signal connected directly to line/speaker out. So just decrease
-volume of "speaker" if you have problems with microphone feedback.
-
-GUS ACE works too but any attempt to record or to use the MIDI port
-will fail.
-
-GUS PnP (with RAM) is partially supported but it needs to be initialized using
-DOS or isapnptools before starting the driver.
-
-MPU401 and Windows Sound System
--------------------------------
-
-Again. Don't enable these options in case your card is listed
-somewhere else in this file.
-
-Configuring these cards is obvious (or it should be). With MSS
-you should probably enable the OPL3 synth also since
-most MSS compatible cards have it. However check that this is true
-before enabling OPL3.
-
-Sound driver supports more than one MPU401 compatible cards at the same time
-but the config program asks config info for just the first of them.
-Adding the second or third MPU interfaces must be done manually by
-editing sound/local.h (after running the config program). Add defines for
-MPU2_BASE & MPU2_IRQ (and MPU3_BASE & MPU3_IRQ) to the file.
-
-CAUTION!
-
-The default I/O base of Adaptec AHA-1542 SCSI controller is 0x330 which
-is also the default of the MPU401 driver. Don't configure the sound driver to
-use 0x330 as the MPU401 base if you have a AHA1542. The kernel will not boot
-if you make this mistake.
-
-PSS
----
-
-Even the PSS cards are compatible with SB, MSS and MPU401, you must not
-enable these options when configuring the driver. The configuration
-program handles these options itself. (You may use the SB, MPU and MSS options
-together with PSS if you have another card on the system).
-
-The PSS driver enables MSS and MPU401 modes of the card. SB is not enabled
-since it doesn't work concurrently with MSS. The driver loads also a
-DSP algorithm which is used to for the general MIDI emulation. The
-algorithm file (.ld) is read by the config program and written to a
-file included when the pss.c is compiled. For this reason the config
-program asks if you want to download the file. Use the genmidi.ld file
-distributed with the DOS/Windows drivers of the card (don't use the mt32.ld).
-With some cards the file is called 'synth.ld'. You must have access to
-the file when configuring the driver. The easiest way is to mount the DOS
-partition containing the file with Linux.
-
-It's possible to load your own DSP algorithms and run them with the card.
-Look at the directory pss_test of snd-util-3.0.tar.gz for more info.
-
-AudioTrix Pro
--------------
-
-You have to enable the OPL3 and SB (not SB Pro or SB16) drivers in addition
-to the native AudioTrix driver. Don't enable MSS or MPU drivers.
-
-Configuring ATP is little bit tricky since it uses so many I/O, IRQ and
-DMA numbers. Using the same values than with DOS/Win is a good idea. Don't
-attempt to use the same IRQ or DMA channels twice.
-
-The SB mode of ATP is implemented so the ATP driver just enables SB
-in the proper address. The SB driver handles the rest. You have to configure
-both the SB driver and the SB mode of ATP to use the same IRQ, DMA and I/O
-settings.
-
-Also the ATP has a microcontroller for the General MIDI emulation (OPL4).
-For this reason the driver asks for the name of a file containing the
-microcode (TRXPRO.HEX). This file is usually located in the directory
-where the DOS drivers were installed. You must have access to this file
-when configuring the driver.
-
-If you have the effects daughtercard, it must be initialized by running
-the setfx program of snd-util-3.0.tar.gz package. This step is not required
-when using the (future) binary distribution version of the driver.
-
-Ensoniq SoundScape
-------------------
-
-NOTE! The new PnP SoundScape is not supported yet. Soundscape compatible
- cards made by Reveal don't work with Linux. They use older revision
- of the Soundscape chipset which is not fully compatible with
- newer cards made by Ensoniq.
-
-The SoundScape driver handles initialization of MSS and MPU supports
-itself so you don't need to enable other drivers than SoundScape
-(enable also the /dev/dsp, /dev/sequencer and MIDI supports).
-
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-!!!!! !!!!
-!!!!! NOTE! Before version 3.5-beta6 there WERE two sets of audio !!!!
-!!!!! device files (/dev/dsp0 and /dev/dsp1). The first one WAS !!!!
-!!!!! used only for card initialization and the second for audio !!!!
-!!!!! purposes. It WAS required to change /dev/dsp (a symlink) to !!!!
-!!!!! point to /dev/dsp1. !!!!
-!!!!! !!!!
-!!!!! This is not required with OSS versions 3.5-beta6 and later !!!!
-!!!!! since there is now just one audio device file. Please !!!!
-!!!!! change /dev/dsp to point back to /dev/dsp0 if you are !!!!
-!!!!! upgrading from an earlier driver version using !!!!
-!!!!! (cd /dev;rm dsp;ln -s dsp0 dsp). !!!!
-!!!!! !!!!
-!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-The configuration program asks one DMA channel and two interrupts. One IRQ
-and one DMA is used by the MSS codec. The second IRQ is required for the
-MPU401 mode (you have to use different IRQs for both purposes).
-There were earlier two DMA channels for SoundScape but the current driver
-version requires just one.
-
-The SoundScape card has a Motorola microcontroller which must initialized
-_after_ boot (the driver doesn't initialize it during boot).
-The initialization is done by running the 'ssinit' program which is
-distributed in the snd-util-3.0.tar.gz package. You have to edit two
-defines in the ssinit.c and then compile the program. You may run ssinit
-manually (after each boot) or add it to /etc/rc.d/rc.local.
-
-The ssinit program needs the microcode file that comes with the DOS/Windows
-driver of the card. You will need to use version 1.30.00 or later
-of the microcode file (sndscape.co0 or sndscape.co1 depending on
-your card model). THE OLD sndscape.cod WILL NOT WORK. IT WILL HANG YOUR
-MACHINE. The only way to get the new microcode file is to download
-and install the DOS/Windows driver from ftp://ftp.ensoniq.com/pub.
-
-Then you have to select the proper microcode file to use: soundscape.co0
-is the right one for most cards and sndscape.co1 is for few (older) cards
-made by Reveal and/or Spea. The driver has capability to detect the card
-version during boot. Look at the boot log messages in /var/adm/messages
-and locate the sound driver initialization message for the SoundScape
-card. If the driver displays string <Ensoniq Soundscape (old)>, you have
-an old card and you will need to use sndscape.co1. For other cards use
-soundscape.co0. New Soundscape revisions such as Elite and PnP use
-code files with higher numbers (.co2, .co3, etc.).
-
-NOTE! Ensoniq Soundscape VIVO is not compatible with other Soundscape cards.
- Currently it's possible to use it in Linux only with OSS/Linux
- drivers.
-
-Check /var/adm/messages after running ssinit. The driver prints
-the board version after downloading the microcode file. That version
-number must match the number in the name of the microcode file (extension).
-
-Running ssinit with a wrong version of the sndscape.co? file is not
-dangerous as long as you don't try to use a file called sndscape.cod.
-If you have initialized the card using a wrong microcode file (sounds
-are terrible), just modify ssinit.c to use another microcode file and try
-again. It's possible to use an earlier version of sndscape.co[01] but it
-may sound weird.
-
-MAD16 (Pro) and Mozart
-----------------------
-
-You need to enable just the MAD16 /Mozart support when configuring
-the driver. _Don't_ enable SB, MPU401 or MSS. However you will need the
-/dev/audio, /dev/sequencer and MIDI supports.
-
-Mozart and OPTi 82C928 (the original MAD16) chips don't support
-MPU401 mode so enter just 0 when the configuration program asks the
-MPU/MIDI I/O base. The MAD16 Pro (OPTi 82C929) and 82C930 chips have MPU401
-mode.
-
-TB Tropez is based on the 82C929 chip. It has two MIDI ports.
-The one connected to the MAD16 chip is the second one (there is a second
-MIDI connector/pins somewhere??). If you have not connected the second MIDI
-port, just disable the MIDI port of MAD16. The 'Maui' compatible synth of
-Tropez is jumper configurable and not connected to the MAD16 chip (the
-Maui driver can be used with it).
-
-Some MAD16 based cards may cause feedback, whistle or terrible noise if the
-line3 mixer channel is turned too high. This happens at least with Shuttle
-Sound System. Current driver versions set volume of line3 low enough so
-this should not be a problem.
-
-If you have a MAD16 card which have an OPL4 (FM + Wave table) synthesizer
-chip (_not_ an OPL3), you have to append a line containing #define MAD16_OPL4
-to the file linux/drivers/sound/local.h (after running make config).
-
-MAD16 cards having a CS4231 codec support full duplex mode. This mode
-can be enabled by configuring the card to use two DMA channels. Possible
-DMA channel pairs are: 0&1, 1&0 and 3&0.
-
-NOTE! Cards having an OPTi 82C924/82C925 chip work with OSS/Free only in
-non-PnP mode (usually jumper selectable). The PnP mode is supported only
-by OSS/Linux.
-
-MV Jazz (ProSonic)
-------------------
-
-The Jazz16 driver is just a hack made to the SB Pro driver. However it works
-fairly well. You have to enable SB, SB Pro (_not_ SB16) and MPU401 supports
-when configuring the driver. The configuration program asks later if you
-want support for MV Jazz16 based cards (after asking SB base address). Answer
-'y' here and the driver asks the second (16 bit) DMA channel.
-
-The Jazz16 driver uses the MPU401 driver in a way which will cause
-problems if you have another MPU401 compatible card. In this case you must
-give address of the Jazz16 based MPU401 interface when the config
-program prompts for the MPU401 information. Then look at the MPU401
-specific section for instructions about configuring more than one MPU401 cards.
-
-Logitech Soundman Wave
-----------------------
-
-Read the above MV Jazz specific instructions first.
-
-The Logitech SoundMan Wave (don't confuse this with the SM16 or SM Games) is
-a MV Jazz based card which has an additional OPL4 based wave table
-synthesizer. The OPL4 chip is handled by an on board microcontroller
-which must be initialized during boot. The config program asks if
-you have a SM Wave immediately after asking the second DMA channel of jazz16.
-If you answer 'y', the config program will ask name of the file containing
-code to be loaded to the microcontroller. The file is usually called
-MIDI0001.BIN and it's located in the DOS/Windows driver directory. The file
-may also be called as TSUNAMI.BIN or something else (older cards?).
-
-The OPL4 synth will be inaccessible without loading the microcontroller code.
-
-Also remember to enable SB MPU401 support if you want to use the OPL4 mode.
-(Don't enable the 'normal' MPU401 device as with some earlier driver
-versions (pre 3.5-alpha8)).
-
-NOTE! Don't answer 'y' when the driver asks about SM Games support
- (the next question after the MIDI0001.BIN name). However
- answering 'y' doesn't cause damage your computer so don't panic.
-
-Sound Galaxies
---------------
-
-There are many different Sound Galaxy cards made by Aztech. The 8 bit
-ones are fully SB or SB Pro compatible and there should be no problems
-with them.
-
-The older 16 bit cards (SG Pro16, SG NX Pro16, Nova and Lyra) have
-an EEPROM chip for storing the configuration data. There is a microcontroller
-which initializes the card to match the EEPROM settings when the machine
-is powered on. These cards actually behave just like they have jumpers
-for all of the settings. Configure driver for MSS, MPU, SB/SB Pro and OPL3
-supports with these cards.
-
-There are some new Sound Galaxies in the market. I have no experience with
-them so read the card's manual carefully.
-
-ESS ES1688 and ES688 'AudioDrive' based cards
----------------------------------------------
-
-Support for these two ESS chips is embedded in the SB driver.
-Configure these cards just like SB. Enable the 'SB MPU401 MIDI port'
-if you want to use MIDI features of ES1688. ES688 doesn't have MPU mode
-so you don't need to enable it (the driver uses normal SB MIDI automatically
-with ES688).
-
-NOTE! ESS cards are not compatible with MSS/WSS so don't worry if MSS support
-of OSS doesn't work with it.
-
-There are some ES1688/688 based sound cards and (particularly) motherboards
-which use software configurable I/O port relocation feature of the chip.
-This ESS proprietary feature is supported only by OSS/Linux.
-
-There are ES1688 based cards which use different interrupt pin assignment than
-recommended by ESS (5, 7, 9/2 and 10). In this case all IRQs don't work.
-At least a card called (Pearl?) Hypersound 16 supports IRQ 15 but it doesn't
-work.
-
-ES1868 is a PnP chip which is (supposed to be) compatible with ESS1688
-probably works with OSS/Free after initialization using isapnptools.
-
-Reveal cards
-------------
-
-There are several different cards made/marketed by Reveal. Some of them
-are compatible with SoundScape and some use the MAD16 chip. You may have
-to look at the card and try to identify its origin.
-
-Diamond
--------
-
-The oldest (Sierra Aria based) sound cards made by Diamond are not supported
-(they may work if the card is initialized using DOS). The recent (LX?)
-models are based on the MAD16 chip which is supported by the driver.
-
-Audio Excel DSP16
------------------
-
-Support for this card is currently not functional. A new driver for it
-should be available later this year.
-
-PCMCIA cards
-------------
-
-Sorry, can't help. Some cards may work and some don't.
-
-TI TM4000M notebooks
---------------------
-
-These computers have a built in sound support based on the Jazz chipset.
-Look at the instructions for MV Jazz (above). It's also important to note
-that there is something wrong with the mouse port and sound at least on
-some TM models. Don't enable the "C&T 82C710 mouse port support" when
-configuring Linux. Having it enabled is likely to cause mysterious problems
-and kernel failures when sound is used.
-
-miroSOUND
----------
-
-The miroSOUND PCM1-pro, PCM12 and PCM20 radio has been used
-successfully. These cards are based on the MAD16, OPL4, and CS4231A chips
-and everything said in the section about MAD16 cards applies here,
-too. The only major difference between the PCMxx and other MAD16 cards
-is that instead of the mixer in the CS4231 codec a separate mixer
-controlled by an on-board 80C32 microcontroller is used. Control of
-the mixer takes place via the ACI (miro's audio control interface)
-protocol that is implemented in a separate lowlevel driver. Make sure
-you compile this ACI driver together with the normal MAD16 support
-when you use a miroSOUND PCMxx card. The ACI mixer is controlled by
-/dev/mixer and the CS4231 mixer by /dev/mixer1 (depends on load
-time). Only in special cases you want to change something regularly on
-the CS4231 mixer.
-
-The miroSOUND PCM12 and PCM20 radio is capable of full duplex
-operation (simultaneous PCM replay and recording), which allows you to
-implement nice real-time signal processing audio effect software and
-network telephones. The ACI mixer has to be switched into the "solo"
-mode for duplex operation in order to avoid feedback caused by the
-mixer (input hears output signal). You can de-/activate this mode
-through toggling the record button for the wave controller with an
-OSS-mixer.
-
-The PCM20 contains a radio tuner, which is also controlled by
-ACI. This radio tuner is supported by the ACI driver together with the
-miropcm20.o module. Also the 7-band equalizer is integrated
-(limited by the OSS-design). Development has started and maybe
-finished for the RDS decoder on this card, too. You will be able to
-read RadioText, the Programme Service name, Programme TYpe and
-others. Even the v4l radio module benefits from it with a refined
-strength value. See aci.[ch] and miropcm20*.[ch] for more details.
-
-The following configuration parameters have worked fine for the PCM12
-in Markus Kuhn's system, many other configurations might work, too:
-CONFIG_MAD16_BASE=0x530, CONFIG_MAD16_IRQ=11, CONFIG_MAD16_DMA=3,
-CONFIG_MAD16_DMA2=0, CONFIG_MAD16_MPU_BASE=0x330, CONFIG_MAD16_MPU_IRQ=10,
-DSP_BUFFSIZE=65536, SELECTED_SOUND_OPTIONS=0x00281000.
-
-Bas van der Linden is using his PCM1-pro with a configuration that
-differs in: CONFIG_MAD16_IRQ=7, CONFIG_MAD16_DMA=1, CONFIG_MAD16_MPU_IRQ=9
-
-Compaq Deskpro XL
------------------
-
-The builtin sound hardware of Compaq Deskpro XL is now supported.
-You need to configure the driver with MSS and OPL3 supports enabled.
-In addition you need to manually edit linux/drivers/sound/local.h and
-to add a line containing "#define DESKPROXL" if you used
-make menuconfig/xconfig.
-
-Others?
--------
-
-Since there are so many different sound cards, it's likely that I have
-forgotten to mention many of them. Please inform me if you know yet another
-card which works with Linux, please inform me (or is anybody else
-willing to maintain a database of supported cards (just like in XF86)?).
-
-Cards not supported yet
-=======================
-
-Please check the version of sound driver you are using before
-complaining that your card is not supported. It's possible you are
-using a driver version which was released months before your card was
-introduced.
-
-First of all, there is an easy way to make most sound cards work with Linux.
-Just use the DOS based driver to initialize the card to a known state, then use
-loadlin.exe to boot Linux. If Linux is configured to use the same I/O, IRQ and
-DMA numbers as DOS, the card could work.
-(ctrl-alt-del can be used in place of loadlin.exe but it doesn't work with
-new motherboards). This method works also with all/most PnP sound cards.
-
-Don't get fooled with SB compatibility. Most cards are compatible with
-SB but that may require a TSR which is not possible with Linux. If
-the card is compatible with MSS, it's a better choice. Some cards
-don't work in the SB and MSS modes at the same time.
-
-Then there are cards which are no longer manufactured and/or which
-are relatively rarely used (such as the 8 bit ProAudioSpectrum
-models). It's extremely unlikely that such cards ever get supported.
-Adding support for a new card requires much work and increases time
-required in maintaining the driver (some changes need to be done
-to all low level drivers and be tested too, maybe with multiple
-operating systems). For this reason I have made a decision to not support
-obsolete cards. It's possible that someone else makes a separately
-distributed driver (diffs) for the card.
-
-Writing a driver for a new card is not possible if there are no
-programming information available about the card. If you don't
-find your new card from this file, look from the home page
-(http://www.opensound.com/ossfree). Then please contact
-manufacturer of the card and ask if they have (or are willing to)
-released technical details of the card. Do this before contacting me. I
-can only answer 'no' if there are no programming information available.
-
-I have made decision to not accept code based on reverse engineering
-to the driver. There are three main reasons: First I don't want to break
-relationships to sound card manufacturers. The second reason is that
-maintaining and supporting a driver without any specs will be a pain.
-The third reason is that companies have freedom to refuse selling their
-products to other than Windows users.
-
-Some companies don't give low level technical information about their
-products to public or at least their require signing a NDA. It's not
-possible to implement a freeware driver for them. However it's possible
-that support for such cards become available in the commercial version
-of this driver (see http://www.4Front-tech.com/oss.html for more info).
-
-There are some common audio chipsets that are not supported yet. For example
-Sierra Aria and IBM Mwave. It's possible that these architectures
-get some support in future but I can't make any promises. Just look
-at the home page (http://www.opensound.com/ossfree/)
-for latest info.
-
-Information about unsupported sound cards and chipsets is welcome as well
-as free copies of sound cards, SDKs and operating systems.
-
-If you have any corrections and/or comments, please contact me.
-
-Hannu Savolainen
-hannu@opensound.com
-
-home page of OSS/Free: http://www.opensound.com/ossfree
-
-home page of commercial OSS
-(Open Sound System) drivers: http://www.opensound.com/oss.html
diff --git a/Documentation/sound/oss/README.modules b/Documentation/sound/oss/README.modules
deleted file mode 100644
index cdc039421a46..000000000000
--- a/Documentation/sound/oss/README.modules
+++ /dev/null
@@ -1,106 +0,0 @@
-Building a modular sound driver
-================================
-
- The following information is current as of linux-2.1.85. Check the other
-readme files, especially README.OSS, for information not specific to
-making sound modular.
-
- First, configure your kernel. This is an idea of what you should be
-setting in the sound section:
-
-<M> Sound card support
-
-<M> 100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support
-
- I have SoundBlaster. Select your card from the list.
-
-<M> Generic OPL2/OPL3 FM synthesizer support
-<M> FM synthesizer (YM3812/OPL-3) support
-
- If you don't set these, you will probably find you can play .wav files
-but not .midi. As the help for them says, set them unless you know your
-card does not use one of these chips for FM support.
-
- Once you are configured, make zlilo, modules, modules_install; reboot.
-Note that it is no longer necessary or possible to configure sound in the
-drivers/sound dir. Now one simply configures and makes one's kernel and
-modules in the usual way.
-
- Then, add to your /etc/modprobe.d/oss.conf something like:
-
-alias char-major-14-* sb
-install sb /sbin/modprobe -i sb && /sbin/modprobe adlib_card
-options sb io=0x220 irq=7 dma=1 dma16=5 mpu_io=0x330
-options adlib_card io=0x388 # FM synthesizer
-
- Alternatively, if you have compiled in kernel level ISAPnP support:
-
-alias char-major-14 sb
-softdep sb post: adlib_card
-options adlib_card io=0x388
-
- The effect of this is that the sound driver and all necessary bits and
-pieces autoload on demand, assuming you use kerneld (a sound choice) and
-autoclean when not in use. Also, options for the device drivers are
-set. They will not work without them. Change as appropriate for your card.
-If you are not yet using the very cool kerneld, you will have to "modprobe
--k sb" yourself to get things going. Eventually things may be fixed so
-that this kludgery is not necessary; for the time being, it seems to work
-well.
-
- Replace 'sb' with the driver for your card, and give it the right
-options. To find the filename of the driver, look in
-/lib/modules/<kernel-version>/misc. Mine looks like:
-
-adlib_card.o # This is the generic OPLx driver
-opl3.o # The OPL3 driver
-sb.o # <<The SoundBlaster driver. Yours may differ.>>
-sound.o # The sound driver
-uart401.o # Used by sb, maybe other cards
-
- Whichever card you have, try feeding it the options that would be the
-default if you were making the driver wired, not as modules. You can
-look at function referred to by module_init() for the card to see what
-args are expected.
-
- Note that at present there is no way to configure the io, irq and other
-parameters for the modular drivers as one does for the wired drivers.. One
-needs to pass the modules the necessary parameters as arguments, either
-with /etc/modprobe.d/*.conf or with command-line args to modprobe, e.g.
-
-modprobe sb io=0x220 irq=7 dma=1 dma16=5 mpu_io=0x330
-modprobe adlib_card io=0x388
-
- recommend using /etc/modprobe.d/*.conf.
-
-Persistent DMA Buffers:
-
-The sound modules normally allocate DMA buffers during open() and
-deallocate them during close(). Linux can often have problems allocating
-DMA buffers for ISA cards on machines with more than 16MB RAM. This is
-because ISA DMA buffers must exist below the 16MB boundary and it is quite
-possible that we can't find a large enough free block in this region after
-the machine has been running for any amount of time. The way to avoid this
-problem is to allocate the DMA buffers during module load and deallocate
-them when the module is unloaded. For this to be effective we need to load
-the sound modules right after the kernel boots, either manually or by an
-init script, and keep them around until we shut down. This is a little
-wasteful of RAM, but it guarantees that sound always works.
-
-To make the sound driver use persistent DMA buffers we need to pass the
-sound.o module a "dmabuf=1" command-line argument. This is normally done
-in /etc/modprobe.d/*.conf files like so:
-
-options sound dmabuf=1
-
-If you have 16MB or less RAM or a PCI sound card, this is wasteful and
-unnecessary. It is possible that machine with 16MB or less RAM will find
-this option useful, but if your machine is so memory-starved that it
-cannot find a 64K block free, you will be wasting even more RAM by keeping
-the sound modules loaded and the DMA buffers allocated when they are not
-needed. The proper solution is to upgrade your RAM. But you do also have
-this improper solution as well. Use it wisely.
-
- I'm afraid I know nothing about anything but my setup, being more of a
-text-mode guy anyway. If you have options for other cards or other helpful
-hints, send them to me, Jim Bray, jb@as220.org, http://as220.org/jb.
diff --git a/Documentation/sound/oss/README.ymfsb b/Documentation/sound/oss/README.ymfsb
deleted file mode 100644
index b6b77906b58d..000000000000
--- a/Documentation/sound/oss/README.ymfsb
+++ /dev/null
@@ -1,107 +0,0 @@
-Legacy audio driver for YMF7xx PCI cards.
-
-
-FIRST OF ALL
-============
-
- This code references YAMAHA's sample codes and data sheets.
- I respect and thank for all people they made open the information
- about YMF7xx cards.
-
- And this codes heavily based on Jeff Garzik <jgarzik@pobox.com>'s
- old VIA 82Cxxx driver (via82cxxx.c). I also respect him.
-
-
-DISCLIMER
-=========
-
- This driver is currently at early ALPHA stage. It may cause serious
- damage to your computer when used.
- PLEASE USE IT AT YOUR OWN RISK.
-
-
-ABOUT THIS DRIVER
-=================
-
- This code enables you to use your YMF724[A-F], YMF740[A-C], YMF744, YMF754
- cards. When enabled, your card acts as "SoundBlaster Pro" compatible card.
- It can only play 22.05kHz / 8bit / Stereo samples, control external MIDI
- port.
- If you want to use your card as recent "16-bit" card, you should use
- Alsa or OSS/Linux driver. Of course you can write native PCI driver for
- your cards :)
-
-
-USAGE
-=====
-
- # modprobe ymfsb (options)
-
-
-OPTIONS FOR MODULE
-==================
-
- io : SB base address (0x220, 0x240, 0x260, 0x280)
- synth_io : OPL3 base address (0x388, 0x398, 0x3a0, 0x3a8)
- dma : DMA number (0,1,3)
- master_volume: AC'97 PCM out Vol (0-100)
- spdif_out : SPDIF-out flag (0:disable 1:enable)
-
- These options will change in future...
-
-
-FREQUENCY
-=========
-
- When playing sounds via this driver, you will hear its pitch is slightly
- lower than original sounds. Since this driver recognizes your card acts
- with 21.739kHz sample rates rather than 22.050kHz (I think it must be
- hardware restriction). So many players become tone deafness.
- To prevent this, you should express some options to your sound player
- that specify correct sample frequency. For example, to play your MP3 file
- correctly with mpg123, specify the frequency like following:
-
- % mpg123 -r 21739 foo.mp3
-
-
-SPDIF OUT
-=========
-
- With installing modules with option 'spdif_out=1', you can enjoy your
- sounds from SPDIF-out of your card (if it had).
- Its Fs is fixed to 48kHz (It never means the sample frequency become
- up to 48kHz. All sounds via SPDIF-out also 22kHz samples). So your
- digital-in capable components has to be able to handle 48kHz Fs.
-
-
-COPYING
-=======
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-
-TODO
-====
- * support for multiple cards
- (set the different SB_IO,MPU_IO,OPL_IO for each cards)
-
- * support for OPL (dmfm) : There will be no requirements... :-<
-
-
-AUTHOR
-======
-
- Daisuke Nagano <breeze.nagano@nifty.ne.jp>
-
diff --git a/Documentation/sound/oss/SoundPro b/Documentation/sound/oss/SoundPro
deleted file mode 100644
index 9d4db1f29d3c..000000000000
--- a/Documentation/sound/oss/SoundPro
+++ /dev/null
@@ -1,105 +0,0 @@
-Documentation for the SoundPro CMI8330 extensions in the WSS driver (ad1848.o)
-------------------------------------------------------------------------------
-
-( Be sure to read Documentation/sound/oss/CMI8330 too )
-
-Ion Badulescu, ionut@cs.columbia.edu
-February 24, 1999
-
-(derived from the OPL3-SA2 documentation by Scott Murray)
-
-The SoundPro CMI8330 (ISA) is a chip usually found on some Taiwanese
-motherboards. The official name in the documentation is CMI8330, SoundPro
-is the nickname and the big inscription on the chip itself.
-
-The chip emulates a WSS as well as a SB16, but it has certain differences
-in the mixer section which require separate support. It also emulates an
-MPU401 and an OPL3 synthesizer, so you probably want to enable support
-for these, too.
-
-The chip identifies itself as an AD1848, but its mixer is significantly
-more advanced than the original AD1848 one. If your system works with
-either WSS or SB16 and you are having problems with some mixer controls
-(no CD audio, no line-in, etc), you might want to give this driver a try.
-Detection should work, but it hasn't been widely tested, so it might still
-mis-identify the chip. You can still force soundpro=1 in the modprobe
-parameters for ad1848. Please let me know if it happens to you, so I can
-adjust the detection routine.
-
-The chip is capable of doing full-duplex, but since the driver sees it as an
-AD1848, it cannot take advantage of this. Moreover, the full-duplex mode is
-not achievable through the WSS interface, b/c it needs a dma16 line which is
-assigned only to the SB16 subdevice (with isapnp). Windows documentation
-says the user must use WSS Playback and SB16 Recording for full-duplex, so
-it might be possible to do the same thing under Linux. You can try loading
-up both ad1848 and sb then use one for playback and the other for
-recording. I don't know if this works, b/c I haven't tested it. Anyway, if
-you try it, be very careful: the SB16 mixer *mostly* works, but certain
-settings can have unexpected effects. Use the WSS mixer for best results.
-
-There is also a PCI SoundPro chip. I have not seen this chip, so I have
-no idea if the driver will work with it. I suspect it won't.
-
-As with PnP cards, some configuration is required. There are two ways
-of doing this. The most common is to use the isapnptools package to
-initialize the card, and use the kernel module form of the sound
-subsystem and sound drivers. Alternatively, some BIOS's allow manual
-configuration of installed PnP devices in a BIOS menu, which should
-allow using the non-modular sound drivers, i.e. built into the kernel.
-Since in this latter case you cannot use module parameters, you will
-have to enable support for the SoundPro at compile time.
-
-The IRQ and DMA values can be any that are considered acceptable for a
-WSS. Assuming you've got isapnp all happy, then you should be able to
-do something like the following (which *must* match the isapnp/BIOS
-configuration):
-
-modprobe ad1848 io=0x530 irq=11 dma=0 soundpro=1
--and maybe-
-modprobe sb io=0x220 irq=5 dma=1 dma16=5
-
--then-
-modprobe mpu401 io=0x330 irq=9
-modprobe opl3 io=0x388
-
-If all goes well and you see no error messages, you should be able to
-start using the sound capabilities of your system. If you get an
-error message while trying to insert the module(s), then make
-sure that the values of the various arguments match what you specified
-in your isapnp configuration file, and that there is no conflict with
-another device for an I/O port or interrupt. Checking the contents of
-/proc/ioports and /proc/interrupts can be useful to see if you're
-butting heads with another device.
-
-If you do not see the chipset version message, and none of the other
-messages present in the system log are helpful, try adding 'debug=1'
-to the ad1848 parameters, email me the syslog results and I'll do
-my best to help.
-
-Lastly, if you're using modules and want to set up automatic module
-loading with kmod, the kernel module loader, here is the section I
-currently use in my conf.modules file:
-
-# Sound
-post-install sound modprobe -k ad1848; modprobe -k mpu401; modprobe -k opl3
-options ad1848 io=0x530 irq=11 dma=0
-options sb io=0x220 irq=5 dma=1 dma16=5
-options mpu401 io=0x330 irq=9
-options opl3 io=0x388
-
-The above ensures that ad1848 will be loaded whenever the sound system
-is being used.
-
-Good luck.
-
-Ion
-
-NOT REALLY TESTED:
-- recording
-- recording device selection
-- full-duplex
-
-TODO:
-- implement mixer support for surround, loud, digital CD switches.
-- come up with a scheme which allows recording volumes for each subdevice.
-This is a major OSS API change.
diff --git a/Documentation/sound/oss/Soundblaster b/Documentation/sound/oss/Soundblaster
deleted file mode 100644
index b288d464ba8b..000000000000
--- a/Documentation/sound/oss/Soundblaster
+++ /dev/null
@@ -1,53 +0,0 @@
-modprobe sound
-insmod uart401
-insmod sb ...
-
-This loads the driver for the Sound Blaster and assorted clones. Cards that
-are covered by other drivers should not be using this driver.
-
-The Sound Blaster module takes the following arguments
-
-io I/O address of the Sound Blaster chip (0x220,0x240,0x260,0x280)
-irq IRQ of the Sound Blaster chip (5,7,9,10)
-dma 8-bit DMA channel for the Sound Blaster (0,1,3)
-dma16 16-bit DMA channel for SB16 and equivalent cards (5,6,7)
-mpu_io I/O for MPU chip if present (0x300,0x330)
-
-sm_games=1 Set if you have a Logitech soundman games
-acer=1 Set this to detect cards in some ACER notebooks
-mwave_bug=1 Set if you are trying to use this driver with mwave (see on)
-type Use this to specify a specific card type
-
-The following arguments are taken if ISAPnP support is compiled in
-
-isapnp=0 Set this to disable ISAPnP detection (use io=0xXXX etc. above)
-multiple=0 Set to disable detection of multiple Soundblaster cards.
- Consider it a bug if this option is needed, and send in a
- report.
-pnplegacy=1 Set this to be able to use a PnP card(s) along with a single
- non-PnP (legacy) card. Above options for io, irq, etc. are
- needed, and will apply only to the legacy card.
-reverse=1 Reverses the order of the search in the PnP table.
-uart401=1 Set to enable detection of mpu devices on some clones.
-isapnpjump=n Jumps to slot n in the driver's PnP table. Use the source,
- Luke.
-
-You may well want to load the opl3 driver for synth music on most SB and
-clone SB devices
-
-insmod opl3 io=0x388
-
-Using Mwave
-
-To make this driver work with Mwave you must set mwave_bug. You also need
-to warm boot from DOS/Windows with the required firmware loaded under this
-OS. IBM are being difficult about documenting how to load this firmware.
-
-Avance Logic ALS007
-
-This card is supported; see the separate file ALS007 for full details.
-
-Avance Logic ALS100
-
-This card is supported; setup should be as for a standard Sound Blaster 16.
-The driver will identify the audio device as a "Sound Blaster 16 (ALS-100)".
diff --git a/Documentation/sound/oss/Tropez+ b/Documentation/sound/oss/Tropez+
deleted file mode 100644
index b93a6b734fc0..000000000000
--- a/Documentation/sound/oss/Tropez+
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Paul Barton-Davis <pbd@op.net>
-
-Here is the configuration I use with a Tropez+ and my modular
-driver:
-
- alias char-major-14 wavefront
- alias synth0 wavefront
- alias mixer0 cs4232
- alias audio0 cs4232
- pre-install wavefront modprobe "-k" "cs4232"
- post-install wavefront modprobe "-k" "opl3"
- options wavefront io=0x200 irq=9
- options cs4232 synthirq=9 synthio=0x200 io=0x530 irq=5 dma=1 dma2=0
- options opl3 io=0x388
-
-Things to note:
-
- the wavefront options "io" and "irq" ***MUST*** match the "synthio"
- and "synthirq" cs4232 options.
-
- you can do without the opl3 module if you don't
- want to use the OPL/[34] synth on the soundcard
-
- the opl3 io parameter is conventionally not adjustable.
-
-Please see drivers/sound/README.wavefront for more details.
diff --git a/Documentation/sound/oss/VIBRA16 b/Documentation/sound/oss/VIBRA16
deleted file mode 100644
index 68a5a46beb88..000000000000
--- a/Documentation/sound/oss/VIBRA16
+++ /dev/null
@@ -1,80 +0,0 @@
-Sound Blaster 16X Vibra addendum
---------------------------------
-by Marius Ilioaea <mariusi@protv.ro>
- Stefan Laudat <stefan@asit.ro>
-
-Sat Mar 6 23:55:27 EET 1999
-
- Hello again,
-
- Playing with a SB Vibra 16x soundcard we found it very difficult
-to setup because the kernel reported a lot of DMA errors and wouldn't
-simply play any sound.
- A good starting point is that the vibra16x chip full-duplex facility
-is neither still exploited by the sb driver found in the linux kernel
-(tried it with a 2.2.2-ac7), nor in the commercial OSS package (it reports
-it as half-duplex soundcard). Oh, I almost forgot, the RedHat sndconfig
-failed detecting it ;)
- So, the big problem still remains, because the sb module wants a
-8-bit and a 16-bit dma, which we could not allocate for vibra... it supports
-only two 8-bit dma channels, the second one will be passed to the module
-as a 16 bit channel, the kernel will yield about that but everything will
-be okay, trust us.
- The only inconvenient you may find is that you will have
-some sound playing jitters if you have HDD dma support enabled - but this
-will happen with almost all soundcards...
-
- A fully working isapnp.conf is just here:
-
-<snip here>
-
-(READPORT 0x0203)
-(ISOLATE PRESERVE)
-(IDENTIFY *)
-(VERBOSITY 2)
-(CONFLICT (IO FATAL)(IRQ FATAL)(DMA FATAL)(MEM FATAL)) # or WARNING
-# SB 16 and OPL3 devices
-(CONFIGURE CTL00f0/-1 (LD 0
-(INT 0 (IRQ 5 (MODE +E)))
-(DMA 0 (CHANNEL 1))
-(DMA 1 (CHANNEL 3))
-(IO 0 (SIZE 16) (BASE 0x0220))
-(IO 2 (SIZE 4) (BASE 0x0388))
-(NAME "CTL00f0/-1[0]{Audio }")
-(ACT Y)
-))
-
-# Joystick device - only if you need it :-/
-
-(CONFIGURE CTL00f0/-1 (LD 1
-(IO 0 (SIZE 1) (BASE 0x0200))
-(NAME "CTL00f0/-1[1]{Game }")
-(ACT Y)
-))
-(WAITFORKEY)
-
-<end of snipping>
-
- So, after a good kernel modules compilation and a 'depmod -a kernel_ver'
-you may want to:
-
-modprobe sb io=0x220 irq=5 dma=1 dma16=3
-
- Or, take the hard way:
-
-modprobe soundcore
-modprobe sound
-modprobe uart401
-modprobe sb io=0x220 irq=5 dma=1 dma16=3
-# do you need MIDI?
-modprobe opl3=0x388
-
- Just in case, the kernel sound support should be:
-
-CONFIG_SOUND=m
-CONFIG_SOUND_OSS=m
-CONFIG_SOUND_SB=m
-
- Enjoy your new noisy Linux box! ;)
-
-
diff --git a/Documentation/sound/oss/WaveArtist b/Documentation/sound/oss/WaveArtist
deleted file mode 100644
index f4f3407cd818..000000000000
--- a/Documentation/sound/oss/WaveArtist
+++ /dev/null
@@ -1,170 +0,0 @@
-
- (the following is from the armlinux CVS)
-
- WaveArtist mixer and volume levels can be accessed via these commands:
-
- nn30 read registers nn, where nn = 00 - 09 for mixer settings
- 0a - 13 for channel volumes
- mm31 write the volume setting in pairs, where mm = (nn - 10) / 2
- rr32 write the mixer settings in pairs, where rr = nn/2
- xx33 reset all settings to default
- 0y34 select mono source, y=0 = left, y=1 = right
-
- bits
- nn 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 00 | 0 | 0 0 1 1 | left line mixer gain | left aux1 mixer gain |lmute|
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 01 | 0 | 0 1 0 1 | left aux2 mixer gain | right 2 left mic gain |mmute|
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 02 | 0 | 0 1 1 1 | left mic mixer gain | left mic | left mixer gain |dith |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 03 | 0 | 1 0 0 1 | left mixer input select |lrfg | left ADC gain |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 04 | 0 | 1 0 1 1 | right line mixer gain | right aux1 mixer gain |rmute|
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 05 | 0 | 1 1 0 1 | right aux2 mixer gain | left 2 right mic gain |test |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 06 | 0 | 1 1 1 1 | right mic mixer gain | right mic |right mixer gain |rbyps|
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 07 | 1 | 0 0 0 1 | right mixer select |rrfg | right ADC gain |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 08 | 1 | 0 0 1 1 | mono mixer gain |right ADC mux sel|left ADC mux sel |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 09 | 1 | 0 1 0 1 |loopb|left linout|loop|ADCch|TxFch|OffCD|test |loopb|loopb|osamp|
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 0a | 0 | left PCM channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 0b | 0 | right PCM channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 0c | 0 | left FM channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 0d | 0 | right FM channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 0e | 0 | left wavetable channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 0f | 0 | right wavetable channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 10 | 0 | left PCM expansion channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 11 | 0 | right PCM expansion channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 12 | 0 | left FM expansion channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
- 13 | 0 | right FM expansion channel volume |
-----+---+------------+-----+-----+-----+----+-----+-----+-----+-----+-----+-----+-----+
-
- lmute: left mute
- mmute: mono mute
- dith: dithds
- lrfg:
- rmute: right mute
- rbyps: right bypass
- rrfg:
- ADCch:
- TxFch:
- OffCD:
- osamp:
-
- And the following diagram is derived from the description in the CVS archive:
-
- MIC L (mouthpiece)
- +------+
- -->PreAmp>-\
- +--^---+ |
- | |
- r2b4-5 | +--------+
- /----*-------------------------------->5 |
- | | |
- | /----------------------------------->4 |
- | | | |
- | | /--------------------------------->3 1of5 | +---+
- | | | | mux >-->AMP>--> ADC L
- | | | /------------------------------->2 | +-^-+
- | | | | | | |
- Line | | | | +----+ +------+ +---+ /---->1 | r3b3-0
- ------------*->mute>--> Gain >--> | | | |
- L | | | +----+ +------+ | | | *->0 |
- | | | | | | +---^----+
- Aux2 | | | +----+ +------+ | | | |
- ----------*--->mute>--> Gain >--> M | | r8b0-2
- L | | +----+ +------+ | | |
- | | | | \------\
- Aux1 | | +----+ +------+ | | |
- --------*----->mute>--> Gain >--> I | |
- L | +----+ +------+ | | |
- | | | |
- | +----+ +------+ | | +---+ |
- *------->mute>--> Gain >--> X >-->AMP>--*
- | +----+ +------+ | | +-^-+ |
- | | | | |
- | +----+ +------+ | | r2b1-3 |
- | /----->mute>--> Gain >--> E | |
- | | +----+ +------+ | | |
- | | | | |
- | | +----+ +------+ | | |
- | | /--->mute>--> Gain >--> R | |
- | | | +----+ +------+ | | |
- | | | | | | r9b8-9
- | | | +----+ +------+ | | | |
- | | | /->mute>--> Gain >--> | | +---v---+
- | | | | +----+ +------+ +---+ /-*->0 |
- DAC | | | | | | |
- ------------*----------------------------------->? | +----+
- L | | | | | Mux >-->mute>--> L output
- | | | | /->? | +--^-+
- | | | | | | | |
- | | | /--------->? | r0b0
- | | | | | | +-------+
- | | | | | |
- Mono | | | | | | +-------+
- ----------* | \---> | +----+
- | | | | | | Mix >-->mute>--> Mono output
- | | | | *-> | +--^-+
- | | | | | +-------+ |
- | | | | | r1b0
- DAC | | | | | +-------+
- ------------*-------------------------*--------->1 | +----+
- R | | | | | | Mux >-->mute>--> R output
- | | | | +----+ +------+ +---+ *->0 | +--^-+
- | | | \->mute>--> Gain >--> | | +---^---+ |
- | | | +----+ +------+ | | | | r5b0
- | | | | | | r6b0
- | | | +----+ +------+ | | |
- | | \--->mute>--> Gain >--> M | |
- | | +----+ +------+ | | |
- | | | | |
- | | +----+ +------+ | | |
- | *----->mute>--> Gain >--> I | |
- | | +----+ +------+ | | |
- | | | | |
- | | +----+ +------+ | | +---+ |
- \------->mute>--> Gain >--> X >-->AMP>--*
- | +----+ +------+ | | +-^-+ |
- /--/ | | | |
- Aux1 | +----+ +------+ | | r6b1-3 |
- -------*------>mute>--> Gain >--> E | |
- R | | +----+ +------+ | | |
- | | | | |
- Aux2 | | +----+ +------+ | | /------/
- ---------*---->mute>--> Gain >--> R | |
- R | | | +----+ +------+ | | |
- | | | | | | +--------+
- Line | | | +----+ +------+ | | | *->0 |
- -----------*-->mute>--> Gain >--> | | | |
- R | | | | +----+ +------+ +---+ \---->1 |
- | | | | | |
- | | | \-------------------------------->2 | +---+
- | | | | Mux >-->AMP>--> ADC R
- | | \---------------------------------->3 | +-^-+
- | | | | |
- | \------------------------------------>4 | r7b3-0
- | | |
- \-----*-------------------------------->5 |
- | +---^----+
- r6b4-5 | |
- | | r8b3-5
- +--v---+ |
- -->PreAmp>-/
- +------+
- MIC R (electret mic)
diff --git a/Documentation/sound/oss/btaudio b/Documentation/sound/oss/btaudio
deleted file mode 100644
index effdb9a3f898..000000000000
--- a/Documentation/sound/oss/btaudio
+++ /dev/null
@@ -1,92 +0,0 @@
-
-Intro
-=====
-
-people start bugging me about this with questions, looks like I
-should write up some documentation for this beast. That way I
-don't have to answer that much mails I hope. Yes, I'm lazy...
-
-
-You might have noticed that the bt878 grabber cards have actually
-_two_ PCI functions:
-
-$ lspci
-[ ... ]
-00:0a.0 Multimedia video controller: Brooktree Corporation Bt878 (rev 02)
-00:0a.1 Multimedia controller: Brooktree Corporation Bt878 (rev 02)
-[ ... ]
-
-The first does video, it is backward compatible to the bt848. The second
-does audio. btaudio is a driver for the second function. It's a sound
-driver which can be used for recording sound (and _only_ recording, no
-playback). As most TV cards come with a short cable which can be plugged
-into your sound card's line-in you probably don't need this driver if all
-you want to do is just watching TV...
-
-
-Driver Status
-=============
-
-Still somewhat experimental. The driver should work stable, i.e. it
-should'nt crash your box. It might not work as expected, have bugs,
-not being fully OSS API compliant, ...
-
-Latest versions are available from http://bytesex.org/bttv/, the
-driver is in the bttv tarball. Kernel patches might be available too,
-have a look at http://bytesex.org/bttv/listing.html.
-
-The chip knows two different modes. btaudio registers two dsp
-devices, one for each mode. They can not be used at the same time.
-
-
-Digital audio mode
-==================
-
-The chip gives you 16 bit stereo sound. The sample rate depends on
-the external source which feeds the bt878 with digital sound via I2S
-interface. There is a insmod option (rate) to tell the driver which
-sample rate the hardware uses (32000 is the default).
-
-One possible source for digital sound is the msp34xx audio processor
-chip which provides digital sound via I2S with 32 kHz sample rate. My
-Hauppauge board works this way.
-
-The Osprey-200 reportly gives you digital sound with 44100 Hz sample
-rate. It is also possible that you get no sound at all.
-
-
-analog mode (A/D)
-=================
-
-You can tell the driver to use this mode with the insmod option "analog=1".
-The chip has three analog inputs. Consequently you'll get a mixer device
-to control these.
-
-The analog mode supports mono only. Both 8 + 16 bit. Both are _signed_
-int, which is uncommon for the 8 bit case. Sample rate range is 119 kHz
-to 448 kHz. Yes, the number of digits is correct. The driver supports
-downsampling by powers of two, so you can ask for more usual sample rates
-like 44 kHz too.
-
-With my Hauppauge I get noisy sound on the second input (mapped to line2
-by the mixer device). Others get a useable signal on line1.
-
-
-some examples
-=============
-
-* read audio data from btaudio (dsp2), send to es1730 (dsp,dsp1):
- $ sox -w -r 32000 -t ossdsp /dev/dsp2 -t ossdsp /dev/dsp
-
-* read audio data from btaudio, send to esound daemon (which might be
- running on another host):
- $ sox -c 2 -w -r 32000 -t ossdsp /dev/dsp2 -t sw - | esdcat -r 32000
- $ sox -c 1 -w -r 32000 -t ossdsp /dev/dsp2 -t sw - | esdcat -m -r 32000
-
-
-Have fun,
-
- Gerd
-
---
-Gerd Knorr <kraxel@bytesex.org>
diff --git a/Documentation/sound/oss/mwave b/Documentation/sound/oss/mwave
deleted file mode 100644
index 5fbcb1609275..000000000000
--- a/Documentation/sound/oss/mwave
+++ /dev/null
@@ -1,185 +0,0 @@
- How to try to survive an IBM Mwave under Linux SB drivers
-
-
-+ IBM have now released documentation of sorts and Torsten is busy
- trying to make the Mwave work. This is not however a trivial task.
-
-----------------------------------------------------------------------------
-
-OK, first thing - the IRQ problem IS a problem, whether the test is bypassed or
-not. It is NOT a Linux problem, but an MWAVE problem that is fixed with the
-latest MWAVE patches. So, in other words, don't bypass the test for MWAVES!
-
-I have Windows 95 on /dev/hda1, swap on /dev/hda2, and Red Hat 5 on /dev/hda3.
-
-The steps, then:
-
- Boot to Linux.
- Mount Windows 95 file system (assume mount point = /dos95).
- mkdir /dos95/linux
- mkdir /dos95/linux/boot
- mkdir /dos95/linux/boot/parms
-
- Copy the kernel, any initrd image, and loadlin to /dos95/linux/boot/.
-
- Reboot to Windows 95.
-
- Edit C:/msdos.sys and add or change the following:
-
- Logo=0
- BootGUI=0
-
- Note that msdos.sys is a text file but it needs to be made 'unhidden',
- readable and writable before it can be edited. This can be done with
- DOS' "attrib" command.
-
- Edit config.sys to have multiple config menus. I have one for windows 95 and
- five for Linux, like this:
-------------
-[menu]
-menuitem=W95, Windows 95
-menuitem=LINTP, Linux - ThinkPad
-menuitem=LINTP3, Linux - ThinkPad Console
-menuitem=LINDOC, Linux - Docked
-menuitem=LINDOC3, Linux - Docked Console
-menuitem=LIN1, Linux - Single User Mode
-REM menudefault=W95,10
-
-[W95]
-
-[LINTP]
-
-[LINDOC]
-
-[LINTP3]
-
-[LINDOC3]
-
-[LIN1]
-
-[COMMON]
-FILES=30
-REM Please read README.TXT in C:\MWW subdirectory before changing the DOS= statement.
-DOS=HIGH,UMB
-DEVICE=C:\MWW\MANAGER\MWD50430.EXE
-SHELL=c:\command.com /e:2048
--------------------
-
-The important things are the SHELL and DEVICE statements.
-
- Then change autoexec.bat. Basically everything in there originally should be
- done ONLY when Windows 95 is booted. Then you add new things specifically
- for Linux. Mine is as follows
-
----------------
-@ECHO OFF
-if "%CONFIG%" == "W95" goto W95
-
-REM
-REM Linux stuff
-REM
-SET MWPATH=C:\MWW\DLL;C:\MWW\MWGAMES;C:\MWW\DSP
-SET BLASTER=A220 I5 D1
-SET MWROOT=C:\MWW
-SET LIBPATH=C:\MWW\DLL
-SET PATH=C:\WINDOWS;C:\MWW\DLL;
-CALL MWAVE START NOSHOW
-c:\linux\boot\loadlin.exe @c:\linux\boot\parms\%CONFIG%.par
-
-:W95
-REM
-REM Windows 95 stuff
-REM
-c:\toolkit\guard
-SET MSINPUT=C:\MSINPUT
-SET MWPATH=C:\MWW\DLL;C:\MWW\MWGAMES;C:\MWW\DSP
-REM The following is used by DOS games to recognize Sound Blaster hardware.
-REM If hardware settings are changed, please change this line as well.
-REM See the Mwave README file for instructions.
-SET BLASTER=A220 I5 D1
-SET MWROOT=C:\MWW
-SET LIBPATH=C:\MWW\DLL
-SET PATH=C:\WINDOWS;C:\WINDOWS\COMMAND;E:\ORAWIN95\BIN;f:\msdev\bin;e:\v30\bin.dbg;v:\devt\v30\bin;c:\JavaSDK\Bin;C:\MWW\DLL;
-SET INCLUDE=f:\MSDEV\INCLUDE;F:\MSDEV\MFC\INCLUDE
-SET LIB=F:\MSDEV\LIB;F:\MSDEV\MFC\LIB
-win
-
-------------------------
-
-Now build a file in c:\linux\boot\parms for each Linux config that you have.
-
-For example, my LINDOC3 config is for a docked Thinkpad at runlevel 3 with no
-initrd image, and has a parameter file named LINDOC3.PAR in c:\linux\boot\parms:
-
------------------------
-# LOADLIN @param_file image=other_image root=/dev/other
-#
-# Linux Console in docking station
-#
-c:\linux\boot\zImage.krn # First value must be filename of Linux kernel.
-root=/dev/hda3 # device which gets mounted as root FS
-ro # Other kernel arguments go here.
-apm=off
-doc=yes
-3
------------------------
-
-The doc=yes parameter is an environment variable used by my init scripts, not
-a kernel argument.
-
-However, the apm=off parameter IS a kernel argument! APM, at least in my setup,
-causes the kernel to crash when loaded via loadlin (but NOT when loaded via
-LILO). The APM stuff COULD be forced out of the kernel via the kernel compile
-options. Instead, I got an unofficial patch to the APM drivers that allows them
-to be dynamically deactivated via kernel arguments. Whatever you chose to
-document, APM, it seems, MUST be off for setups like mine.
-
-Now make sure C:\MWW\MWCONFIG.REF looks like this:
-
-----------------------
-[NativeDOS]
-Default=SB1.5
-SBInputSource=CD
-SYNTH=FM
-QSound=OFF
-Reverb=OFF
-Chorus=OFF
-ReverbDepth=5
-ChorusDepth=5
-SBInputVolume=5
-SBMainVolume=10
-SBWaveVolume=10
-SBSynthVolume=10
-WaveTableVolume=10
-AudioPowerDriver=ON
-
-[FastCFG]
-Show=No
-HideOption=Off
------------------------------
-
-OR the Default= line COULD be
-
-Default=SBPRO
-
-Reboot to Windows 95 and choose Linux. When booted, use sndconfig to configure
-the sound modules and voilà - ThinkPad sound with Linux.
-
-Now the gotchas - you can either have CD sound OR Mixers but not both. That's a
-problem with the SB1.5 (CD sound) or SBPRO (Mixers) settings. No one knows why
-this is!
-
-For some reason MPEG3 files, when played through mpg123, sound like they
-are playing at 1/8th speed - not very useful! If you have ANY insight
-on why this second thing might be happening, I would be grateful.
-
-===========================================================
- _/ _/_/_/_/
- _/_/ _/_/ _/
- _/ _/_/ _/_/_/_/ Martin John Bartlett
- _/ _/ _/ _/ (martin@nitram.demon.co.uk)
-_/ _/_/_/_/
- _/
-_/ _/
- _/_/
-===========================================================
diff --git a/Documentation/sound/oss/oss-parameters.txt b/Documentation/sound/oss/oss-parameters.txt
deleted file mode 100644
index cc675f25eee4..000000000000
--- a/Documentation/sound/oss/oss-parameters.txt
+++ /dev/null
@@ -1,51 +0,0 @@
- OSS Kernel Parameters
- ~~~~~~~~~~~~~~~~~~~~~
-
-See Documentation/admin-guide/kernel-parameters.rst for general information on
-specifying module parameters.
-
-This document may not be entirely up to date and comprehensive. The command
-"modinfo -p ${modulename}" shows a current list of all parameters of a loadable
-module. Loadable modules, after being loaded into the running kernel, also
-reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
-parameters may be changed at runtime by the command
-"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
-
-
- ad1848= [HW,OSS]
- Format: <io>,<irq>,<dma>,<dma2>,<type>
-
- aedsp16= [HW,OSS] Audio Excel DSP 16
- Format: <io>,<irq>,<dma>,<mss_io>,<mpu_io>,<mpu_irq>
- See also header of sound/oss/aedsp16.c.
-
- dmasound= [HW,OSS] Sound subsystem buffers
-
- mpu401= [HW,OSS]
- Format: <io>,<irq>
-
- opl3= [HW,OSS]
- Format: <io>
-
- pas2= [HW,OSS] Format:
- <io>,<irq>,<dma>,<dma16>,<sb_io>,<sb_irq>,<sb_dma>,<sb_dma16>
-
- pss= [HW,OSS] Personal Sound System (ECHO ESC614)
- Format:
- <io>,<mss_io>,<mss_irq>,<mss_dma>,<mpu_io>,<mpu_irq>
-
- sscape= [HW,OSS]
- Format: <io>,<irq>,<dma>,<mpu_io>,<mpu_irq>
-
- trix= [HW,OSS] MediaTrix AudioTrix Pro
- Format:
- <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
-
- uart401= [HW,OSS]
- Format: <io>,<irq>
-
- uart6850= [HW,OSS]
- Format: <io>,<irq>
-
- waveartist= [HW,OSS]
- Format: <io>,<irq>,<dma>,<dma2>
diff --git a/Documentation/sound/oss/ultrasound b/Documentation/sound/oss/ultrasound
deleted file mode 100644
index eed331c738a3..000000000000
--- a/Documentation/sound/oss/ultrasound
+++ /dev/null
@@ -1,30 +0,0 @@
-modprobe sound
-insmod ad1848
-insmod gus io=* irq=* dma=* ...
-
-This loads the driver for the Gravis Ultrasound family of sound cards.
-
-The gus module takes the following arguments
-
-io I/O address of the Ultrasound card (eg. io=0x220)
-irq IRQ of the Sound Blaster card
-dma DMA channel for the Sound Blaster
-dma16 2nd DMA channel, only needed for full duplex operation
-type 1 for PnP card
-gus16 1 for using 16 bit sampling daughter board
-no_wave_dma Set to disable DMA usage for wavetable (see note)
-db16 ???
-
-
-no_wave_dma option
-
-This option defaults to a value of 0, which allows the Ultrasound wavetable
-DSP to use DMA for playback and downloading samples. This is the same
-as the old behaviour. If set to 1, no DMA is needed for downloading samples,
-and allows owners of a GUS MAX to make use of simultaneous digital audio
-(/dev/dsp), MIDI, and wavetable playback.
-
-
-If you have problems in recording with GUS MAX, you could try to use
-just one 8 bit DMA channel. Recording will not work with one DMA
-channel if it's a 16 bit one.
diff --git a/Documentation/sysctl/README b/Documentation/sysctl/README
index 91f54ffa0077..d5f24ab0ecc3 100644
--- a/Documentation/sysctl/README
+++ b/Documentation/sysctl/README
@@ -60,7 +60,7 @@ debug/ <empty>
dev/ device specific information (eg dev/cdrom/info)
fs/ specific filesystems
filehandle, inode, dentry and quota tuning
- binfmt_misc <Documentation/binfmt_misc.txt>
+ binfmt_misc <Documentation/admin-guide/binfmt-misc.rst>
kernel/ global kernel info / tuning
miscellaneous stuff
net/ networking stuff, for documentation look in:
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 35e17f748ca7..6c00c1e2743f 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -277,7 +277,7 @@ in a mount namespace.
----------------------------------------------------------
Documentation for the files in /proc/sys/fs/binfmt_misc is
-in Documentation/binfmt_misc.txt.
+in Documentation/admin-guide/binfmt-misc.rst.
3. /proc/sys/fs/mqueue - POSIX message queues filesystem
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 9baf66a9ef4e..055c8b3e1018 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -58,6 +58,7 @@ Currently, these files are in /proc/sys/vm:
- percpu_pagelist_fraction
- stat_interval
- stat_refresh
+- numa_stat
- swappiness
- user_reserve_kbytes
- vfs_cache_pressure
@@ -157,6 +158,10 @@ Note: the minimum value allowed for dirty_bytes is two pages (in bytes); any
value lower than this limit will be ignored and the old configuration will be
retained.
+Note: the value of dirty_bytes also must be set greater than
+dirty_background_bytes or the amount of memory corresponding to
+dirty_background_ratio.
+
==============================================================
dirty_expire_centisecs
@@ -176,6 +181,9 @@ generating disk writes will itself start writing out dirty data.
The total available memory is not equal to total system memory.
+Note: dirty_ratio must be set greater than dirty_background_ratio or
+ratio corresponding to dirty_background_bytes.
+
==============================================================
dirty_writeback_centisecs
@@ -622,7 +630,7 @@ oom_dump_tasks
Enables a system-wide task dump (excluding kernel threads) to be produced
when the kernel performs an OOM-killing and includes such information as
-pid, uid, tgid, vm size, rss, nr_ptes, nr_pmds, swapents, oom_score_adj
+pid, uid, tgid, vm size, rss, pgtables_bytes, swapents, oom_score_adj
score, and name. This is helpful to determine why the OOM killer was
invoked, to identify the rogue task that caused it, and to determine why
the OOM killer chose the task it did to kill.
@@ -792,6 +800,21 @@ with no ill effects: errors and warnings on these stats are suppressed.)
==============================================================
+numa_stat
+
+This interface allows runtime configuration of numa statistics.
+
+When page allocation performance becomes a bottleneck and you can tolerate
+some possible tool breakage and decreased numa counter precision, you can
+do:
+ echo 0 > /proc/sys/vm/numa_stat
+
+When page allocation performance is not a bottleneck and you want all
+tooling to work, you can do:
+ echo 1 > /proc/sys/vm/numa_stat
+
+==============================================================
+
swappiness
This control is used to define how aggressive the kernel will swap
diff --git a/Documentation/timers/highres.txt b/Documentation/timers/highres.txt
index e8789976e77c..9d88f67781c2 100644
--- a/Documentation/timers/highres.txt
+++ b/Documentation/timers/highres.txt
@@ -4,10 +4,10 @@ High resolution timers and dynamic ticks design notes
Further information can be found in the paper of the OLS 2006 talk "hrtimers
and beyond". The paper is part of the OLS 2006 Proceedings Volume 1, which can
be found on the OLS website:
-http://www.linuxsymposium.org/2006/linuxsymposium_procv1.pdf
+https://www.kernel.org/doc/ols/2006/ols2006v1-pages-333-346.pdf
The slides to this talk are available from:
-http://tglx.de/projects/hrtimers/ols2006-hrtimers.pdf
+http://www.cs.columbia.edu/~nahum/w6998/papers/ols2006-hrtimers-slides.pdf
The slides contain five figures (pages 2, 15, 18, 20, 22), which illustrate the
changes in the time(r) related Linux subsystems. Figure #1 (p. 2) shows the
diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt
index b3da1f90b861..2b9b51cd501e 100644
--- a/Documentation/trace/coresight-cpu-debug.txt
+++ b/Documentation/trace/coresight-cpu-debug.txt
@@ -149,11 +149,23 @@ If you want to limit idle states at boot time, you can use "nohlt" or
At the runtime you can disable idle states with below methods:
-Set latency request to /dev/cpu_dma_latency to disable all CPUs specific idle
-states (if latency = 0uS then disable all idle states):
-# echo "what_ever_latency_you_need_in_uS" > /dev/cpu_dma_latency
-
-Disable specific CPU's specific idle state:
+It is possible to disable CPU idle states by way of the PM QoS
+subsystem, more specifically by using the "/dev/cpu_dma_latency"
+interface (see Documentation/power/pm_qos_interface.txt for more
+details). As specified in the PM QoS documentation the requested
+parameter will stay in effect until the file descriptor is released.
+For example:
+
+# exec 3<> /dev/cpu_dma_latency; echo 0 >&3
+...
+Do some work...
+...
+# exec 3<>-
+
+The same can also be done from an application program.
+
+Disable specific CPU's specific idle state from cpuidle sysfs (see
+Documentation/cpuidle/sysfs.txt):
# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
diff --git a/Documentation/trace/ftrace-uses.rst b/Documentation/trace/ftrace-uses.rst
new file mode 100644
index 000000000000..8494a801d341
--- /dev/null
+++ b/Documentation/trace/ftrace-uses.rst
@@ -0,0 +1,293 @@
+=================================
+Using ftrace to hook to functions
+=================================
+
+.. Copyright 2017 VMware Inc.
+.. Author: Steven Rostedt <srostedt@goodmis.org>
+.. License: The GNU Free Documentation License, Version 1.2
+.. (dual licensed under the GPL v2)
+
+Written for: 4.14
+
+Introduction
+============
+
+The ftrace infrastructure was originially created to attach callbacks to the
+beginning of functions in order to record and trace the flow of the kernel.
+But callbacks to the start of a function can have other use cases. Either
+for live kernel patching, or for security monitoring. This document describes
+how to use ftrace to implement your own function callbacks.
+
+
+The ftrace context
+==================
+
+WARNING: The ability to add a callback to almost any function within the
+kernel comes with risks. A callback can be called from any context
+(normal, softirq, irq, and NMI). Callbacks can also be called just before
+going to idle, during CPU bring up and takedown, or going to user space.
+This requires extra care to what can be done inside a callback. A callback
+can be called outside the protective scope of RCU.
+
+The ftrace infrastructure has some protections agains recursions and RCU
+but one must still be very careful how they use the callbacks.
+
+
+The ftrace_ops structure
+========================
+
+To register a function callback, a ftrace_ops is required. This structure
+is used to tell ftrace what function should be called as the callback
+as well as what protections the callback will perform and not require
+ftrace to handle.
+
+There is only one field that is needed to be set when registering
+an ftrace_ops with ftrace::
+
+.. code-block: c
+
+ struct ftrace_ops ops = {
+ .func = my_callback_func,
+ .flags = MY_FTRACE_FLAGS
+ .private = any_private_data_structure,
+ };
+
+Both .flags and .private are optional. Only .func is required.
+
+To enable tracing call::
+
+.. c:function:: register_ftrace_function(&ops);
+
+To disable tracing call::
+
+.. c:function:: unregister_ftrace_function(&ops);
+
+The above is defined by including the header::
+
+.. c:function:: #include <linux/ftrace.h>
+
+The registered callback will start being called some time after the
+register_ftrace_function() is called and before it returns. The exact time
+that callbacks start being called is dependent upon architecture and scheduling
+of services. The callback itself will have to handle any synchronization if it
+must begin at an exact moment.
+
+The unregister_ftrace_function() will guarantee that the callback is
+no longer being called by functions after the unregister_ftrace_function()
+returns. Note that to perform this guarantee, the unregister_ftrace_function()
+may take some time to finish.
+
+
+The callback function
+=====================
+
+The prototype of the callback function is as follows (as of v4.14)::
+
+.. code-block: c
+
+ void callback_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
+
+@ip
+ This is the instruction pointer of the function that is being traced.
+ (where the fentry or mcount is within the function)
+
+@parent_ip
+ This is the instruction pointer of the function that called the
+ the function being traced (where the call of the function occurred).
+
+@op
+ This is a pointer to ftrace_ops that was used to register the callback.
+ This can be used to pass data to the callback via the private pointer.
+
+@regs
+ If the FTRACE_OPS_FL_SAVE_REGS or FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
+ flags are set in the ftrace_ops structure, then this will be pointing
+ to the pt_regs structure like it would be if an breakpoint was placed
+ at the start of the function where ftrace was tracing. Otherwise it
+ either contains garbage, or NULL.
+
+
+The ftrace FLAGS
+================
+
+The ftrace_ops flags are all defined and documented in include/linux/ftrace.h.
+Some of the flags are used for internal infrastructure of ftrace, but the
+ones that users should be aware of are the following:
+
+FTRACE_OPS_FL_SAVE_REGS
+ If the callback requires reading or modifying the pt_regs
+ passed to the callback, then it must set this flag. Registering
+ a ftrace_ops with this flag set on an architecture that does not
+ support passing of pt_regs to the callback will fail.
+
+FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
+ Similar to SAVE_REGS but the registering of a
+ ftrace_ops on an architecture that does not support passing of regs
+ will not fail with this flag set. But the callback must check if
+ regs is NULL or not to determine if the architecture supports it.
+
+FTRACE_OPS_FL_RECURSION_SAFE
+ By default, a wrapper is added around the callback to
+ make sure that recursion of the function does not occur. That is,
+ if a function that is called as a result of the callback's execution
+ is also traced, ftrace will prevent the callback from being called
+ again. But this wrapper adds some overhead, and if the callback is
+ safe from recursion, it can set this flag to disable the ftrace
+ protection.
+
+ Note, if this flag is set, and recursion does occur, it could cause
+ the system to crash, and possibly reboot via a triple fault.
+
+ It is OK if another callback traces a function that is called by a
+ callback that is marked recursion safe. Recursion safe callbacks
+ must never trace any function that are called by the callback
+ itself or any nested functions that those functions call.
+
+ If this flag is set, it is possible that the callback will also
+ be called with preemption enabled (when CONFIG_PREEMPT is set),
+ but this is not guaranteed.
+
+FTRACE_OPS_FL_IPMODIFY
+ Requires FTRACE_OPS_FL_SAVE_REGS set. If the callback is to "hijack"
+ the traced function (have another function called instead of the
+ traced function), it requires setting this flag. This is what live
+ kernel patches uses. Without this flag the pt_regs->ip can not be
+ modified.
+
+ Note, only one ftrace_ops with FTRACE_OPS_FL_IPMODIFY set may be
+ registered to any given function at a time.
+
+FTRACE_OPS_FL_RCU
+ If this is set, then the callback will only be called by functions
+ where RCU is "watching". This is required if the callback function
+ performs any rcu_read_lock() operation.
+
+ RCU stops watching when the system goes idle, the time when a CPU
+ is taken down and comes back online, and when entering from kernel
+ to user space and back to kernel space. During these transitions,
+ a callback may be executed and RCU synchronization will not protect
+ it.
+
+
+Filtering which functions to trace
+==================================
+
+If a callback is only to be called from specific functions, a filter must be
+set up. The filters are added by name, or ip if it is known.
+
+.. code-block: c
+
+ int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+
+@ops
+ The ops to set the filter with
+
+@buf
+ The string that holds the function filter text.
+@len
+ The length of the string.
+
+@reset
+ Non-zero to reset all filters before applying this filter.
+
+Filters denote which functions should be enabled when tracing is enabled.
+If @buf is NULL and reset is set, all functions will be enabled for tracing.
+
+The @buf can also be a glob expression to enable all functions that
+match a specific pattern.
+
+See Filter Commands in :file:`Documentation/trace/ftrace.txt`.
+
+To just trace the schedule function::
+
+.. code-block: c
+
+ ret = ftrace_set_filter(&ops, "schedule", strlen("schedule"), 0);
+
+To add more functions, call the ftrace_set_filter() more than once with the
+@reset parameter set to zero. To remove the current filter set and replace it
+with new functions defined by @buf, have @reset be non-zero.
+
+To remove all the filtered functions and trace all functions::
+
+.. code-block: c
+
+ ret = ftrace_set_filter(&ops, NULL, 0, 1);
+
+
+Sometimes more than one function has the same name. To trace just a specific
+function in this case, ftrace_set_filter_ip() can be used.
+
+.. code-block: c
+
+ ret = ftrace_set_filter_ip(&ops, ip, 0, 0);
+
+Although the ip must be the address where the call to fentry or mcount is
+located in the function. This function is used by perf and kprobes that
+gets the ip address from the user (usually using debug info from the kernel).
+
+If a glob is used to set the filter, functions can be added to a "notrace"
+list that will prevent those functions from calling the callback.
+The "notrace" list takes precedence over the "filter" list. If the
+two lists are non-empty and contain the same functions, the callback will not
+be called by any function.
+
+An empty "notrace" list means to allow all functions defined by the filter
+to be traced.
+
+.. code-block: c
+
+ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+
+This takes the same parameters as ftrace_set_filter() but will add the
+functions it finds to not be traced. This is a separate list from the
+filter list, and this function does not modify the filter list.
+
+A non-zero @reset will clear the "notrace" list before adding functions
+that match @buf to it.
+
+Clearing the "notrace" list is the same as clearing the filter list
+
+.. code-block: c
+
+ ret = ftrace_set_notrace(&ops, NULL, 0, 1);
+
+The filter and notrace lists may be changed at any time. If only a set of
+functions should call the callback, it is best to set the filters before
+registering the callback. But the changes may also happen after the callback
+has been registered.
+
+If a filter is in place, and the @reset is non-zero, and @buf contains a
+matching glob to functions, the switch will happen during the time of
+the ftrace_set_filter() call. At no time will all functions call the callback.
+
+.. code-block: c
+
+ ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
+
+ register_ftrace_function(&ops);
+
+ msleep(10);
+
+ ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 1);
+
+is not the same as:
+
+.. code-block: c
+
+ ftrace_set_filter(&ops, "schedule", strlen("schedule"), 1);
+
+ register_ftrace_function(&ops);
+
+ msleep(10);
+
+ ftrace_set_filter(&ops, NULL, 0, 1);
+
+ ftrace_set_filter(&ops, "try_to_wake_up", strlen("try_to_wake_up"), 0);
+
+As the latter will have a short time where all functions will call
+the callback, between the time of the reset, and the time of the
+new setting of the filter.
diff --git a/Documentation/trace/intel_th.txt b/Documentation/trace/intel_th.txt
index f92070e7dde0..7a57165c2492 100644
--- a/Documentation/trace/intel_th.txt
+++ b/Documentation/trace/intel_th.txt
@@ -37,7 +37,7 @@ description is at Documentation/ABI/testing/sysfs-bus-intel_th-devices-gth.
STH registers an stm class device, through which it provides interface
to userspace and kernelspace software trace sources. See
-Documentation/tracing/stm.txt for more information on that.
+Documentation/trace/stm.txt for more information on that.
MSU can be configured to collect trace data into a system memory
buffer, which can later on be read from its device nodes via read() or
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index a7a813258013..ec3b46e27b7a 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1858,18 +1858,6 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP 효ê³
참고하세요.
- (*) lockless_dereference();
-
- ì´ í•¨ìˆ˜ëŠ” smp_read_barrier_depends() ë°ì´í„° ì˜ì¡´ì„± 배리어를 사용하는
- í¬ì¸í„° ì½ì–´ì˜¤ê¸° 래í¼(wrapper) 함수로 ìƒê°ë  수 있습니다.
-
- ê°ì²´ì˜ ë¼ì´í”„íƒ€ìž„ì´ RCU ì™¸ì˜ ë©”ì»¤ë‹ˆì¦˜ìœ¼ë¡œ 관리ëœë‹¤ëŠ” ì ì„ 제외하면
- rcu_dereference() ì™€ë„ ìœ ì‚¬í•œë°, 예를 들면 ê°ì²´ê°€ ì‹œìŠ¤í…œì´ êº¼ì§ˆ ë•Œì—만
- 제거ë˜ëŠ” 경우 등입니다. ë˜í•œ, lockless_dereference() ì€ RCU 와 함께
- 사용ë ìˆ˜ë„, RCU ì—†ì´ ì‚¬ìš©ë  ìˆ˜ë„ ìžˆëŠ” ì¼ë¶€ ë°ì´í„° êµ¬ì¡°ì— ì‚¬ìš©ë˜ê³ 
- 있습니다.
-
-
(*) dma_wmb();
(*) dma_rmb();
diff --git a/Documentation/translations/zh_CN/gpio.txt b/Documentation/translations/zh_CN/gpio.txt
index bce972521065..4f8bf30a41dc 100644
--- a/Documentation/translations/zh_CN/gpio.txt
+++ b/Documentation/translations/zh_CN/gpio.txt
@@ -257,9 +257,9 @@ GPIO 值的命令需è¦ç­‰å¾…其信æ¯æŽ’到队首æ‰å‘é€å‘½ä»¤ï¼Œå†èŽ·å¾—å…¶
简å•åœ°å…³é—­æœªä½¿ç”¨æ—¶é’Ÿ)。
对于 GPIO 使用 pinctrl å­ç³»ç»Ÿå·²çŸ¥çš„引脚,å­ç³»ç»Ÿåº”该被告知其使用情况;
-一个 gpiolib 驱动的 .request()æ“作应调用 pinctrl_request_gpio(),
-而 gpiolib 驱动的 .free()æ“作应调用 pinctrl_free_gpio()。pinctrl
-å­ç³»ç»Ÿå…许 pinctrl_request_gpio()在æŸä¸ªå¼•è„šæˆ–引脚组以å¤ç”¨å½¢å¼â€œå±žäºŽâ€
+一个 gpiolib 驱动的 .request()æ“作应调用 pinctrl_gpio_request(),
+而 gpiolib 驱动的 .free()æ“作应调用 pinctrl_gpio_free()。pinctrl
+å­ç³»ç»Ÿå…许 pinctrl_gpio_request()在æŸä¸ªå¼•è„šæˆ–引脚组以å¤ç”¨å½¢å¼â€œå±žäºŽâ€
一个设备时都æˆåŠŸè¿”回。
任何须将 GPIO ä¿¡å·å¯¼å‘适当引脚的引脚å¤ç”¨ç¡¬ä»¶çš„编程应该å‘生在 GPIO
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index fbc397d17e98..441a4b9b666f 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -773,7 +773,7 @@ host:
# cat /dev/usb/lp0
More advanced testing can be done with the prn_example
-described in Documentation/usb/gadget-printer.txt.
+described in Documentation/usb/gadget_printer.txt.
20. UAC1 function (virtual ALSA card, using u_audio API)
diff --git a/Documentation/vm/mmu_notifier.txt b/Documentation/vm/mmu_notifier.txt
new file mode 100644
index 000000000000..23b462566bb7
--- /dev/null
+++ b/Documentation/vm/mmu_notifier.txt
@@ -0,0 +1,93 @@
+When do you need to notify inside page table lock ?
+
+When clearing a pte/pmd we are given a choice to notify the event through
+(notify version of *_clear_flush call mmu_notifier_invalidate_range) under
+the page table lock. But that notification is not necessary in all cases.
+
+For secondary TLB (non CPU TLB) like IOMMU TLB or device TLB (when device use
+thing like ATS/PASID to get the IOMMU to walk the CPU page table to access a
+process virtual address space). There is only 2 cases when you need to notify
+those secondary TLB while holding page table lock when clearing a pte/pmd:
+
+ A) page backing address is free before mmu_notifier_invalidate_range_end()
+ B) a page table entry is updated to point to a new page (COW, write fault
+ on zero page, __replace_page(), ...)
+
+Case A is obvious you do not want to take the risk for the device to write to
+a page that might now be used by some completely different task.
+
+Case B is more subtle. For correctness it requires the following sequence to
+happen:
+ - take page table lock
+ - clear page table entry and notify ([pmd/pte]p_huge_clear_flush_notify())
+ - set page table entry to point to new page
+
+If clearing the page table entry is not followed by a notify before setting
+the new pte/pmd value then you can break memory model like C11 or C++11 for
+the device.
+
+Consider the following scenario (device use a feature similar to ATS/PASID):
+
+Two address addrA and addrB such that |addrA - addrB| >= PAGE_SIZE we assume
+they are write protected for COW (other case of B apply too).
+
+[Time N] --------------------------------------------------------------------
+CPU-thread-0 {try to write to addrA}
+CPU-thread-1 {try to write to addrB}
+CPU-thread-2 {}
+CPU-thread-3 {}
+DEV-thread-0 {read addrA and populate device TLB}
+DEV-thread-2 {read addrB and populate device TLB}
+[Time N+1] ------------------------------------------------------------------
+CPU-thread-0 {COW_step0: {mmu_notifier_invalidate_range_start(addrA)}}
+CPU-thread-1 {COW_step0: {mmu_notifier_invalidate_range_start(addrB)}}
+CPU-thread-2 {}
+CPU-thread-3 {}
+DEV-thread-0 {}
+DEV-thread-2 {}
+[Time N+2] ------------------------------------------------------------------
+CPU-thread-0 {COW_step1: {update page table to point to new page for addrA}}
+CPU-thread-1 {COW_step1: {update page table to point to new page for addrB}}
+CPU-thread-2 {}
+CPU-thread-3 {}
+DEV-thread-0 {}
+DEV-thread-2 {}
+[Time N+3] ------------------------------------------------------------------
+CPU-thread-0 {preempted}
+CPU-thread-1 {preempted}
+CPU-thread-2 {write to addrA which is a write to new page}
+CPU-thread-3 {}
+DEV-thread-0 {}
+DEV-thread-2 {}
+[Time N+3] ------------------------------------------------------------------
+CPU-thread-0 {preempted}
+CPU-thread-1 {preempted}
+CPU-thread-2 {}
+CPU-thread-3 {write to addrB which is a write to new page}
+DEV-thread-0 {}
+DEV-thread-2 {}
+[Time N+4] ------------------------------------------------------------------
+CPU-thread-0 {preempted}
+CPU-thread-1 {COW_step3: {mmu_notifier_invalidate_range_end(addrB)}}
+CPU-thread-2 {}
+CPU-thread-3 {}
+DEV-thread-0 {}
+DEV-thread-2 {}
+[Time N+5] ------------------------------------------------------------------
+CPU-thread-0 {preempted}
+CPU-thread-1 {}
+CPU-thread-2 {}
+CPU-thread-3 {}
+DEV-thread-0 {read addrA from old page}
+DEV-thread-2 {read addrB from new page}
+
+So here because at time N+2 the clear page table entry was not pair with a
+notification to invalidate the secondary TLB, the device see the new value for
+addrB before seing the new value for addrA. This break total memory ordering
+for the device.
+
+When changing a pte to write protect or to point to a new write protected page
+with same content (KSM) it is fine to delay the mmu_notifier_invalidate_range
+call to mmu_notifier_invalidate_range_end() outside the page table lock. This
+is true even if the thread doing the page table update is preempted right after
+releasing page table lock but before call mmu_notifier_invalidate_range_end().
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index 8d76718e1ea2..68946f83e579 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -10,3 +10,5 @@ w1_ds2438
- The Maxim/Dallas Semiconductor ds2438 smart battery monitor.
w1_ds28e04
- The Maxim/Dallas Semiconductor ds28e04 eeprom.
+w1_ds28e17
+ - The Maxim/Dallas Semiconductor ds28e17 1-Wire-to-I2C Master Bridge.
diff --git a/Documentation/w1/slaves/w1_ds28e17 b/Documentation/w1/slaves/w1_ds28e17
new file mode 100644
index 000000000000..7fcfad5b4a37
--- /dev/null
+++ b/Documentation/w1/slaves/w1_ds28e17
@@ -0,0 +1,68 @@
+Kernel driver w1_ds28e17
+========================
+
+Supported chips:
+ * Maxim DS28E17 1-Wire-to-I2C Master Bridge
+
+supported family codes:
+ W1_FAMILY_DS28E17 0x19
+
+Author: Jan Kandziora <jjj@gmx.de>
+
+
+Description
+-----------
+The DS28E17 is a Onewire slave device which acts as an I2C bus master.
+
+This driver creates a new I2C bus for any DS28E17 device detected. I2C buses
+come and go as the DS28E17 devices come and go. I2C slave devices connected to
+a DS28E17 can be accessed by the kernel or userspace tools as if they were
+connected to a "native" I2C bus master.
+
+
+An udev rule like the following
+-------------------------------------------------------------------------------
+SUBSYSTEM=="i2c-dev", KERNEL=="i2c-[0-9]*", ATTRS{name}=="w1-19-*", \
+ SYMLINK+="i2c-$attr{name}"
+-------------------------------------------------------------------------------
+may be used to create stable /dev/i2c- entries based on the unique id of the
+DS28E17 chip.
+
+
+Driver parameters are:
+
+speed:
+ This sets up the default I2C speed a DS28E17 get configured for as soon
+ it is connected. The power-on default of the DS28E17 is 400kBaud, but
+ chips may come and go on the Onewire bus without being de-powered and
+ as soon the "w1_ds28e17" driver notices a freshly connected, or
+ reconnected DS28E17 device on the Onewire bus, it will re-apply this
+ setting.
+
+ Valid values are 100, 400, 900 [kBaud]. Any other value means to leave
+ alone the current DS28E17 setting on detect. The default value is 100.
+
+stretch:
+ This sets up the default stretch value used for freshly connected
+ DS28E17 devices. It is a multiplier used on the calculation of the busy
+ wait time for an I2C transfer. This is to account for I2C slave devices
+ which make heavy use of the I2C clock stretching feature and thus, the
+ needed timeout cannot be pre-calculated correctly. As the w1_ds28e17
+ driver checks the DS28E17's busy flag in a loop after the precalculated
+ wait time, it should be hardly needed to tweak this setting.
+
+ Leave it at 1 unless you get ETIMEDOUT errors and a "w1_slave_driver
+ 19-00000002dbd8: busy timeout" in the kernel log.
+
+ Valid values are 1 to 9. The default is 1.
+
+
+The driver creates sysfs files /sys/bus/w1/devices/19-<id>/speed and
+/sys/bus/w1/devices/19-<id>/stretch for each device, preloaded with the default
+settings from the driver parameters. They may be changed anytime. In addition a
+directory /sys/bus/w1/devices/19-<id>/i2c-<nnn> for the I2C bus master sysfs
+structure is created.
+
+
+See https://github.com/ianka/w1_ds28e17 for even more information.
+
diff --git a/Documentation/watchdog/hpwdt.txt b/Documentation/watchdog/hpwdt.txt
index 7a9f635d0258..6d866c537127 100644
--- a/Documentation/watchdog/hpwdt.txt
+++ b/Documentation/watchdog/hpwdt.txt
@@ -15,7 +15,7 @@ Last reviewed: 05/20/2016
Watchdog functionality is enabled like any other common watchdog driver. That
is, an application needs to be started that kicks off the watchdog timer. A
- basic application exists in the Documentation/watchdog/src directory called
+ basic application exists in tools/testing/selftests/watchdog/ named
watchdog-test.c. Simply compile the C file and kick it off. If the system
gets into a bad state and hangs, the HPE ProLiant iLO timer register will
not be updated in a timely fashion and a hardware system reset (also known as
diff --git a/Documentation/watchdog/pcwd-watchdog.txt b/Documentation/watchdog/pcwd-watchdog.txt
index 4f68052395c0..b8e60a441a43 100644
--- a/Documentation/watchdog/pcwd-watchdog.txt
+++ b/Documentation/watchdog/pcwd-watchdog.txt
@@ -25,7 +25,7 @@ Last reviewed: 10/05/2007
If you want to write a program to be compatible with the PC Watchdog
driver, simply use of modify the watchdog test program:
- Documentation/watchdog/src/watchdog-test.c
+ tools/testing/selftests/watchdog/watchdog-test.c
Other IOCTL functions include:
diff --git a/Documentation/x86/amd-memory-encryption.txt b/Documentation/x86/amd-memory-encryption.txt
index f512ab718541..afc41f544dab 100644
--- a/Documentation/x86/amd-memory-encryption.txt
+++ b/Documentation/x86/amd-memory-encryption.txt
@@ -1,4 +1,5 @@
-Secure Memory Encryption (SME) is a feature found on AMD processors.
+Secure Memory Encryption (SME) and Secure Encrypted Virtualization (SEV) are
+features found on AMD processors.
SME provides the ability to mark individual pages of memory as encrypted using
the standard x86 page tables. A page that is marked encrypted will be
@@ -6,24 +7,38 @@ automatically decrypted when read from DRAM and encrypted when written to
DRAM. SME can therefore be used to protect the contents of DRAM from physical
attacks on the system.
+SEV enables running encrypted virtual machines (VMs) in which the code and data
+of the guest VM are secured so that a decrypted version is available only
+within the VM itself. SEV guest VMs have the concept of private and shared
+memory. Private memory is encrypted with the guest-specific key, while shared
+memory may be encrypted with hypervisor key. When SME is enabled, the hypervisor
+key is the same key which is used in SME.
+
A page is encrypted when a page table entry has the encryption bit set (see
below on how to determine its position). The encryption bit can also be
specified in the cr3 register, allowing the PGD table to be encrypted. Each
successive level of page tables can also be encrypted by setting the encryption
bit in the page table entry that points to the next table. This allows the full
page table hierarchy to be encrypted. Note, this means that just because the
-encryption bit is set in cr3, doesn't imply the full hierarchy is encyrpted.
+encryption bit is set in cr3, doesn't imply the full hierarchy is encrypted.
Each page table entry in the hierarchy needs to have the encryption bit set to
achieve that. So, theoretically, you could have the encryption bit set in cr3
so that the PGD is encrypted, but not set the encryption bit in the PGD entry
for a PUD which results in the PUD pointed to by that entry to not be
encrypted.
-Support for SME can be determined through the CPUID instruction. The CPUID
-function 0x8000001f reports information related to SME:
+When SEV is enabled, instruction pages and guest page tables are always treated
+as private. All the DMA operations inside the guest must be performed on shared
+memory. Since the memory encryption bit is controlled by the guest OS when it
+is operating in 64-bit or 32-bit PAE mode, in all other modes the SEV hardware
+forces the memory encryption bit to 1.
+
+Support for SME and SEV can be determined through the CPUID instruction. The
+CPUID function 0x8000001f reports information related to SME:
0x8000001f[eax]:
Bit[0] indicates support for SME
+ Bit[1] indicates support for SEV
0x8000001f[ebx]:
Bits[5:0] pagetable bit number used to activate memory
encryption
@@ -39,6 +54,13 @@ determine if SME is enabled and/or to enable memory encryption:
Bit[23] 0 = memory encryption features are disabled
1 = memory encryption features are enabled
+If SEV is supported, MSR 0xc0010131 (MSR_AMD64_SEV) can be used to determine if
+SEV is active:
+
+ 0xc0010131:
+ Bit[0] 0 = memory encryption is not active
+ 1 = memory encryption is active
+
Linux relies on BIOS to set this bit if BIOS has determined that the reduction
in the physical address space as a result of enabling memory encryption (see
CPUID information above) will not conflict with the address space resource
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index 4d8848e4e224..6851854cf69d 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -87,6 +87,17 @@ with the following files:
bytes) at which a previously used LLC_occupancy
counter can be considered for re-use.
+Finally, in the top level of the "info" directory there is a file
+named "last_cmd_status". This is reset with every "command" issued
+via the file system (making new directories or writing to any of the
+control files). If the command was successful, it will read as "ok".
+If the command failed, it will provide more information that can be
+conveyed in the error returns from file operations. E.g.
+
+ # echo L3:0=f7 > schemata
+ bash: echo: write error: Invalid argument
+ # cat info/last_cmd_status
+ mask f7 has non-consecutive 1-bits
Resource alloc and monitor groups
---------------------------------
diff --git a/Documentation/x86/orc-unwinder.txt b/Documentation/x86/orc-unwinder.txt
index af0c9a4c65a6..cd4b29be29af 100644
--- a/Documentation/x86/orc-unwinder.txt
+++ b/Documentation/x86/orc-unwinder.txt
@@ -4,7 +4,7 @@ ORC unwinder
Overview
--------
-The kernel CONFIG_ORC_UNWINDER option enables the ORC unwinder, which is
+The kernel CONFIG_UNWINDER_ORC option enables the ORC unwinder, which is
similar in concept to a DWARF unwinder. The difference is that the
format of the ORC data is much simpler than DWARF, which in turn allows
the ORC unwinder to be much simpler and faster.
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index b0798e281aa6..3448e675b462 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -34,7 +34,7 @@ ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
... unused hole ...
-ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB)
+ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
... unused hole ...
ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
... unused hole ...
diff --git a/MAINTAINERS b/MAINTAINERS
index 170d46bd5692..c8dfe6cdc4dd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -527,11 +527,6 @@ W: http://ez.analog.com/community/linux-device-drivers
S: Supported
F: drivers/input/misc/adxl34x.c
-AEDSP16 DRIVER
-M: Riccardo Facchetti <fizban@tin.it>
-S: Maintained
-F: sound/oss/aedsp16.c
-
AF9013 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -759,8 +754,6 @@ F: drivers/gpu/drm/amd/amdkfd/
F: drivers/gpu/drm/amd/include/cik_structs.h
F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h
F: drivers/gpu/drm/amd/include/vi_structs.h
-F: drivers/gpu/drm/radeon/radeon_kfd.c
-F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h
AMD SEATTLE DEVICE TREE SUPPORT
@@ -1961,6 +1954,14 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ARM/TEGRA HDMI CEC SUBSYSTEM SUPPORT
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-tegra@vger.kernel.org
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/platform/tegra-cec/
+F: Documentation/devicetree/bindings/media/tegra-cec.txt
+
ARM/TETON BGA MACHINE SUPPORT
M: "Mark F. Brown" <mark.brown314@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2029,6 +2030,7 @@ M: Masahiro Yamada <yamada.masahiro@socionext.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
S: Maintained
+F: Documentation/devicetree/bindings/gpio/gpio-uniphier.txt
F: arch/arm/boot/dts/uniphier*
F: arch/arm/include/asm/hardware/cache-uniphier.h
F: arch/arm/mach-uniphier/
@@ -2036,6 +2038,7 @@ F: arch/arm/mm/cache-uniphier.c
F: arch/arm64/boot/dts/socionext/
F: drivers/bus/uniphier-system-bus.c
F: drivers/clk/uniphier/
+F: drivers/gpio/gpio-uniphier.c
F: drivers/i2c/busses/i2c-uniphier*
F: drivers/irqchip/irq-uniphier-aidet.c
F: drivers/pinctrl/uniphier/
@@ -2247,7 +2250,7 @@ F: include/linux/dmaengine.h
F: include/linux/async_tx.h
AT24 EEPROM DRIVER
-M: Wolfram Sang <wsa@the-dreams.de>
+M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/misc/eeprom/at24.c
@@ -2562,10 +2565,12 @@ S: Maintained
F: drivers/net/hamradio/baycom*
BCACHE (BLOCK LAYER CACHE)
+M: Michael Lyle <mlyle@lyle.org>
M: Kent Overstreet <kent.overstreet@gmail.com>
L: linux-bcache@vger.kernel.org
W: http://bcache.evilpiepirate.org
-S: Orphan
+C: irc://irc.oftc.net/bcache
+S: Maintained
F: drivers/md/bcache/
BDISP ST MEDIA DRIVER
@@ -2897,6 +2902,13 @@ S: Supported
F: drivers/gpio/gpio-brcmstb.c
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
+M: Al Cooper <alcooperx@gmail.com>
+L: linux-kernel@vger.kernel.org
+L: bcm-kernel-feedback-list@broadcom.com
+S: Maintained
+F: drivers/phy/broadcom/phy-brcm-usb*
+
BROADCOM GENET ETHERNET DRIVER
M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
@@ -3258,6 +3270,15 @@ F: include/uapi/linux/cec.h
F: include/uapi/linux/cec-funcs.h
F: Documentation/devicetree/bindings/media/cec.txt
+CEC GPIO DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Supported
+F: drivers/media/platform/cec-gpio/
+F: Documentation/devicetree/bindings/media/cec-gpio.txt
+
CELL BROADBAND ENGINE ARCHITECTURE
M: Arnd Bergmann <arnd@arndb.de>
L: linuxppc-dev@lists.ozlabs.org
@@ -3472,7 +3493,7 @@ COCCINELLE/Semantic Patches (SmPL)
M: Julia Lawall <Julia.Lawall@lip6.fr>
M: Gilles Muller <Gilles.Muller@lip6.fr>
M: Nicolas Palix <nicolas.palix@imag.fr>
-M: Michal Marek <mmarek@suse.com>
+M: Michal Marek <michal.lkml@markovi.net>
L: cocci@systeme.lip6.fr (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
W: http://coccinelle.lip6.fr/
@@ -3586,7 +3607,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
F: Documentation/cgroup-v1/cpusets.txt
F: include/linux/cpuset.h
-F: kernel/cpuset.c
+F: kernel/cgroup/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
@@ -3643,6 +3664,8 @@ F: drivers/cpufreq/arm_big_little_dt.c
CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com>
+M: Shuah Khan <shuahkh@osg.samsung.com>
+M: Shuah Khan <shuah@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
F: tools/power/cpupower/
@@ -4098,6 +4121,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
T: quilt http://people.redhat.com/agk/patches/linux/editing/
S: Maintained
F: Documentation/device-mapper/
+F: drivers/md/Makefile
+F: drivers/md/Kconfig
F: drivers/md/dm*
F: drivers/md/persistent-data/
F: include/linux/device-mapper.h
@@ -4121,7 +4146,7 @@ F: Documentation/devicetree/bindings/mfd/da90*.txt
F: Documentation/devicetree/bindings/input/da90??-onkey.txt
F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt
F: Documentation/devicetree/bindings/regulator/da92*.txt
-F: Documentation/devicetree/bindings/watchdog/da92??-wdt.txt
+F: Documentation/devicetree/bindings/watchdog/da90??-wdt.txt
F: Documentation/devicetree/bindings/sound/da[79]*.txt
F: drivers/gpio/gpio-da90??.c
F: drivers/hwmon/da90??-hwmon.c
@@ -4241,7 +4266,7 @@ S: Maintained
F: drivers/dma/
F: include/linux/dmaengine.h
F: Documentation/devicetree/bindings/dma/
-F: Documentation/dmaengine/
+F: Documentation/driver-api/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
DMA MAPPING HELPERS
@@ -4373,6 +4398,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/bochs/
+DRM DRIVER FOR FARADAY TVE200 TV ENCODER
+M: Linus Walleij <linus.walleij@linaro.org>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/tve200/
+
DRM DRIVER FOR INTEL I810 VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/i810/
@@ -4516,7 +4547,7 @@ L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mripard/linux.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong@baylibre.com>
@@ -4700,7 +4731,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>
L: dri-devel@lists.freedesktop.org
-T: git git://anongit.freedesktop.org/tegra/linux.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/drm_panel.c
F: drivers/gpu/drm/panel/
@@ -4913,13 +4944,19 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/highbank*
-EDAC-CAVIUM
+EDAC-CAVIUM OCTEON
M: Ralf Baechle <ralf@linux-mips.org>
M: David Daney <david.daney@cavium.com>
L: linux-edac@vger.kernel.org
L: linux-mips@linux-mips.org
S: Supported
F: drivers/edac/octeon_edac*
+
+EDAC-CAVIUM THUNDERX
+M: David Daney <david.daney@cavium.com>
+M: Jan Glauber <jglauber@cavium.com>
+L: linux-edac@vger.kernel.org
+S: Supported
F: drivers/edac/thunderx_edac*
EDAC-CORE
@@ -5220,8 +5257,7 @@ F: fs/ext4/
Extended Verification Module (EVM)
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-L: linux-ima-devel@lists.sourceforge.net
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
S: Supported
F: security/integrity/evm/
@@ -5438,7 +5474,7 @@ K: fmc_d.*register
FPGA MANAGER FRAMEWORK
M: Alan Tull <atull@kernel.org>
-R: Moritz Fischer <mdf@kernel.org>
+M: Moritz Fischer <mdf@kernel.org>
L: linux-fpga@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
@@ -5463,6 +5499,7 @@ F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+L: dri-devel@lists.freedesktop.org
L: linux-fbdev@vger.kernel.org
T: git git://github.com/bzolnier/linux.git
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
@@ -5476,7 +5513,7 @@ F: include/uapi/linux/fb.h
FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
M: Horia Geantă <horia.geanta@nxp.com>
-M: Dan Douglass <dan.douglass@nxp.com>
+M: Aymen Sghaier <aymen.sghaier@nxp.com>
L: linux-crypto@vger.kernel.org
S: Maintained
F: drivers/crypto/caam/
@@ -5656,6 +5693,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git
S: Supported
F: fs/crypto/
F: include/linux/fscrypt*.h
+F: Documentation/filesystems/fscrypt.rst
FUJITSU FR-V (FRV) PORT
S: Orphan
@@ -6249,6 +6287,13 @@ S: Maintained
F: drivers/net/ethernet/hisilicon/
F: Documentation/devicetree/bindings/net/hisilicon*.txt
+HISILICON PMU DRIVER
+M: Shaokun Zhang <zhangshaokun@hisilicon.com>
+W: http://www.hisilicon.com
+S: Supported
+F: drivers/perf/hisilicon
+F: Documentation/perf/hisi-pmu.txt
+
HISILICON ROCE DRIVER
M: Lijun Ou <oulijun@huawei.com>
M: Wei Hu(Xavier) <xavier.huwei@huawei.com>
@@ -6782,8 +6827,6 @@ F: drivers/ipack/
INFINIBAND SUBSYSTEM
M: Doug Ledford <dledford@redhat.com>
-M: Sean Hefty <sean.hefty@intel.com>
-M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
@@ -6848,9 +6891,7 @@ L: linux-crypto@vger.kernel.org
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
-L: linux-ima-devel@lists.sourceforge.net
-L: linux-ima-user@lists.sourceforge.net
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
S: Supported
F: security/integrity/ima/
@@ -7440,10 +7481,8 @@ F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
-M: "Yann E. MORIN" <yann.morin.1998@free.fr>
L: linux-kbuild@vger.kernel.org
-T: git git://gitorious.org/linux-kconfig/linux-kconfig
-S: Maintained
+S: Orphan
F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/
@@ -7472,7 +7511,7 @@ F: fs/autofs4/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
M: Masahiro Yamada <yamada.masahiro@socionext.com>
-M: Michal Marek <mmarek@suse.com>
+M: Michal Marek <michal.lkml@markovi.net>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
L: linux-kbuild@vger.kernel.org
S: Maintained
@@ -7633,8 +7672,7 @@ F: kernel/kexec*
KEYS-ENCRYPTED
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-M: David Safford <safford@us.ibm.com>
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
F: Documentation/security/keys/trusted-encrypted.rst
@@ -7642,9 +7680,8 @@ F: include/keys/encrypted-type.h
F: security/keys/encrypted-keys/
KEYS-TRUSTED
-M: David Safford <safford@us.ibm.com>
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-L: linux-security-module@vger.kernel.org
+L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
F: Documentation/security/keys/trusted-encrypted.rst
@@ -7677,16 +7714,6 @@ F: include/linux/kdb.h
F: include/linux/kgdb.h
F: kernel/debug/
-KMEMCHECK
-M: Vegard Nossum <vegardno@ifi.uio.no>
-M: Pekka Enberg <penberg@kernel.org>
-S: Maintained
-F: Documentation/dev-tools/kmemcheck.rst
-F: arch/x86/include/asm/kmemcheck.h
-F: arch/x86/mm/kmemcheck/
-F: include/linux/kmemcheck.h
-F: mm/kmemcheck.c
-
KMEMLEAK
M: Catalin Marinas <catalin.marinas@arm.com>
S: Maintained
@@ -9212,12 +9239,6 @@ F: include/linux/dt-bindings/mux/
F: include/linux/mux/
F: drivers/mux/
-MULTISOUND SOUND DRIVER
-M: Andrew Veliath <andrewtv@usa.net>
-S: Maintained
-F: Documentation/sound/oss/MultiSound
-F: sound/oss/msnd*
-
MULTITECH MULTIPORT CARD (ISICOM)
S: Orphan
F: drivers/tty/isicom.c
@@ -10045,7 +10066,11 @@ T: git git://github.com/openrisc/linux.git
L: openrisc@lists.librecores.org
W: http://openrisc.io
S: Maintained
+F: Documentation/devicetree/bindings/openrisc/
+F: Documentation/openrisc/
F: arch/openrisc/
+F: drivers/irqchip/irq-ompic.c
+F: drivers/irqchip/irq-or1k-*
OPENVSWITCH
M: Pravin Shelar <pshelar@nicira.com>
@@ -10063,7 +10088,7 @@ M: Stephen Boyd <sboyd@codeaurora.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
-F: drivers/base/power/opp/
+F: drivers/opp/
F: include/linux/pm_opp.h
F: Documentation/power/opp.txt
F: Documentation/devicetree/bindings/opp/
@@ -10507,6 +10532,14 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/pcie-kirin.txt
F: drivers/pci/dwc/pcie-kirin.c
+PCIE DRIVER FOR HISILICON STB
+M: Jianguo Sun <sunjianguo1@huawei.com>
+M: Shawn Guo <shawn.guo@linaro.org>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
+F: drivers/pci/dwc/pcie-histb.c
+
PCIE DRIVER FOR MEDIATEK
M: Ryder Lee <ryder.lee@mediatek.com>
L: linux-pci@vger.kernel.org
@@ -10530,6 +10563,13 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
F: drivers/pci/host/pcie-rockchip.c
+PCI DRIVER FOR V3 SEMICONDUCTOR V360EPC
+M: Linus Walleij <linus.walleij@linaro.org>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/v3-v360epc-pci.txt
+F: drivers/pci/host/pci-v3-semi.c
+
PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com>
L: linux-pci@vger.kernel.org
@@ -10673,6 +10713,7 @@ PIN CONTROLLER - RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
M: Geert Uytterhoeven <geert+renesas@glider.be>
L: linux-renesas-soc@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc
S: Maintained
F: drivers/pinctrl/sh-pfc/
@@ -11054,7 +11095,6 @@ F: drivers/mtd/nand/pxa3xx_nand.c
QAT DRIVER
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
-M: Salvatore Benedetto <salvatore.benedetto@intel.com>
L: qat-linux@intel.com
S: Supported
F: drivers/crypto/qat/
@@ -11102,6 +11142,7 @@ F: drivers/net/ethernet/qlogic/qede/
QLOGIC QL4xxx RDMA DRIVER
M: Ram Amrani <Ram.Amrani@cavium.com>
+M: Michal Kalderon <Michal.Kalderon@cavium.com>
M: Ariel Elior <Ariel.Elior@cavium.com>
L: linux-rdma@vger.kernel.org
S: Supported
@@ -11536,6 +11577,16 @@ S: Maintained
F: drivers/mtd/nand/r852.c
F: drivers/mtd/nand/r852.h
+RISC-V ARCHITECTURE
+M: Palmer Dabbelt <palmer@sifive.com>
+M: Albert Ou <albert@sifive.com>
+L: patches@groups.riscv.org
+T: git https://github.com/riscv/riscv-linux
+S: Supported
+F: arch/riscv/
+K: riscv
+N: riscv
+
ROCCAT DRIVERS
M: Stefan Achatz <erazor_de@users.sourceforge.net>
W: http://sourceforge.net/projects/roccat/
@@ -11544,6 +11595,13 @@ F: drivers/hid/hid-roccat*
F: include/linux/hid-roccat*
F: Documentation/ABI/*/sysfs-driver-hid-roccat*
+ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
+M: Jacob chen <jacob2.chen@rock-chips.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/platform/rockchip/rga/
+F: Documentation/devicetree/bindings/media/rockchip-rga.txt
+
ROCKER DRIVER
M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
@@ -11788,7 +11846,7 @@ L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
F: drivers/crypto/exynos-rng.c
-F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt
+F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
SAMSUNG FRAMEBUFFER DRIVER
M: Jingoo Han <jingoohan1@gmail.com>
@@ -12071,10 +12129,15 @@ L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-spear.c
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) TI OMAP DRIVER
+M: Kishon Vijay Abraham I <kishon@ti.com>
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/sdhci-omap.c
+
SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
M: Scott Bauer <scott.bauer@intel.com>
M: Jonathan Derrick <jonathan.derrick@intel.com>
-M: Rafael Antognolli <rafael.antognolli@intel.com>
L: linux-block@vger.kernel.org
S: Supported
F: block/sed*
@@ -12475,7 +12538,10 @@ M: Shaohua Li <shli@kernel.org>
L: linux-raid@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git
S: Supported
-F: drivers/md/
+F: drivers/md/Makefile
+F: drivers/md/Kconfig
+F: drivers/md/md*
+F: drivers/md/raid*
F: include/linux/raid/
F: include/uapi/linux/raid/
@@ -12685,6 +12751,13 @@ L: stable@vger.kernel.org
S: Supported
F: Documentation/process/stable-kernel-rules.rst
+STAGING - ATOMISP DRIVER
+M: Alan Cox <alan@linux.intel.com>
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/staging/media/atomisp/
+
STAGING - COMEDI
M: Ian Abbott <abbotti@mev.co.uk>
M: H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -12928,9 +13001,16 @@ F: arch/arc/plat-axs10x
F: arch/arc/boot/dts/ax*
F: Documentation/devicetree/bindings/arc/axs10*
+SYNOPSYS DESIGNWARE APB GPIO DRIVER
+M: Hoan Tran <hotran@apm.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-dwapb.c
+F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+
SYNOPSYS DESIGNWARE DMAC DRIVER
M: Viresh Kumar <vireshk@kernel.org>
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
S: Maintained
F: include/linux/dma/dw.h
F: include/linux/platform_data/dma-dw.h
@@ -13311,6 +13391,7 @@ M: Andreas Noever <andreas.noever@gmail.com>
M: Michael Jamet <michael.jamet@intel.com>
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <yehezkel.bernat@intel.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
S: Maintained
F: drivers/thunderbolt/
F: include/linux/thunderbolt.h
@@ -13778,7 +13859,7 @@ UDRAW TABLET
M: Bastien Nocera <hadess@hadess.net>
L: linux-input@vger.kernel.org
S: Maintained
-F: drivers/hid/hid-udraw.c
+F: drivers/hid/hid-udraw-ps3.c
UFS FILESYSTEM
M: Evgeniy Dushistov <dushistov@mail.ru>
@@ -14350,6 +14431,7 @@ L: virtualization@lists.linux-foundation.org
L: kvm@vger.kernel.org
S: Supported
F: drivers/s390/virtio/
+F: arch/s390/include/uapi/asm/virtio-ccw.h
VIRTIO GPU DRIVER
M: David Airlie <airlied@linux.ie>
@@ -14405,7 +14487,7 @@ M: Manohar Vanga <manohar.vanga@gmail.com>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: devel@driverdev.osuosl.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/driver-api/vme.rst
F: drivers/staging/vme/
F: drivers/vme/
@@ -14622,6 +14704,7 @@ F: Documentation/devicetree/bindings/extcon/extcon-arizona.txt
F: Documentation/devicetree/bindings/regulator/arizona-regulator.txt
F: Documentation/devicetree/bindings/mfd/arizona.txt
F: Documentation/devicetree/bindings/mfd/wm831x.txt
+F: Documentation/devicetree/bindings/sound/wlf,arizona.txt
F: arch/arm/mach-s3c64xx/mach-crag6410*
F: drivers/clk/clk-wm83*.c
F: drivers/extcon/extcon-arizona.c
diff --git a/Makefile b/Makefile
index bee2033e7d1d..763ab35df12a 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -226,7 +226,8 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
-e s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
- -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
+ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
+ -e s/riscv.*/riscv/)
# Cross compiling and selecting different set of gcc/bin-utils
# ---------------------------------------------------------------------------
@@ -934,8 +935,8 @@ ifdef CONFIG_STACK_VALIDATION
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
- ifdef CONFIG_ORC_UNWINDER
- $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
+ ifdef CONFIG_UNWINDER_ORC
+ $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
else
$(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
endif
@@ -1459,7 +1460,8 @@ $(help-board-dirs): help-%:
# Documentation targets
# ---------------------------------------------------------------------------
-DOC_TARGETS := xmldocs latexdocs pdfdocs htmldocs epubdocs cleandocs linkcheckdocs
+DOC_TARGETS := xmldocs latexdocs pdfdocs htmldocs epubdocs cleandocs \
+ linkcheckdocs dochelp refcheckdocs
PHONY += $(DOC_TARGETS)
$(DOC_TARGETS): scripts_basic FORCE
$(Q)$(MAKE) $(build)=Documentation $@
@@ -1549,7 +1551,7 @@ clean: $(clean-dirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
- -o -name '*.ko.*' \
+ -o -name '*.ko.*' -o -name '*.dtb' -o -name '*.dtb.S' \
-o -name '*.dwo' \
-o -name '*.su' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
diff --git a/arch/Kconfig b/arch/Kconfig
index 057370a0ac4e..400b9e1b2f27 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -91,7 +91,7 @@ config STATIC_KEYS_SELFTEST
config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
- depends on !PREEMPT
+ select TASKS_RCU if PREEMPT
config KPROBES_ON_FTRACE
def_bool y
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 69b875880754..b31b974a03cb 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -506,7 +506,7 @@ config ALPHA_QEMU
Generic kernels will auto-detect QEMU. But when building a
system-specific kernel, the assumption is that we want to
- elimiate as many runtime tests as possible.
+ eliminate as many runtime tests as possible.
If unsure, say N.
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 85867d3cea64..767bfdd42992 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,6 +14,15 @@
* than regular operations.
*/
+/*
+ * To ensure dependency ordering is preserved for the _relaxed and
+ * _release atomics, an smp_read_barrier_depends() is unconditionally
+ * inserted into the _relaxed variants, which are used to build the
+ * barriered versions. To avoid redundant back-to-back fences, we can
+ * define the _acquire and _fence versions explicitly.
+ */
+#define __atomic_op_acquire(op, args...) op##_relaxed(args)
+#define __atomic_op_fence __atomic_op_release
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
@@ -61,6 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -78,6 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -112,6 +123,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
@@ -129,6 +141,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
+ smp_read_barrier_depends(); \
return result; \
}
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index e542cb272b67..b78f61f20796 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -9,6 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-#define dma_cache_sync(dev, va, size, dir) ((void)0)
-
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h
index bae97eb19d26..942924756cf2 100644
--- a/arch/alpha/include/asm/floppy.h
+++ b/arch/alpha/include/asm/floppy.h
@@ -24,7 +24,6 @@
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
0, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
@@ -62,7 +61,6 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
prev_dir = dir;
fd_clear_dma_ff();
- fd_cacheflush(addr, size);
fd_set_dma_mode(mode);
set_dma_addr(FLOPPY_DMA, bus_addr);
fd_set_dma_count(size);
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index fc988c16e894..b9ec55351924 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -13,9 +13,6 @@
* The following structure is used to manage multiple PCI busses.
*/
-struct pci_dev;
-struct pci_bus;
-struct resource;
struct pci_iommu_arena;
struct page;
@@ -57,8 +54,6 @@ struct pci_controller {
#define PCIBIOS_MIN_IO alpha_mv.min_io_address
#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address
-extern void pcibios_set_master(struct pci_dev *dev);
-
/* IOMMU controls. */
/* The PCI address space does not equal the physical memory address space.
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
index 3925f06afd6b..cf8fc8f9a2ed 100644
--- a/arch/alpha/include/asm/rwsem.h
+++ b/arch/alpha/include/asm/rwsem.h
@@ -22,7 +22,7 @@
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-static inline void __down_read(struct rw_semaphore *sem)
+static inline int ___down_read(struct rw_semaphore *sem)
{
long oldcount;
#ifndef CONFIG_SMP
@@ -42,10 +42,24 @@ static inline void __down_read(struct rw_semaphore *sem)
:"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
:"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
#endif
- if (unlikely(oldcount < 0))
+ return (oldcount < 0);
+}
+
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
rwsem_down_read_failed(sem);
}
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(___down_read(sem)))
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
@@ -95,9 +109,10 @@ static inline void __down_write(struct rw_semaphore *sem)
static inline int __down_write_killable(struct rw_semaphore *sem)
{
- if (unlikely(___down_write(sem)))
+ if (unlikely(___down_write(sem))) {
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
+ }
return 0;
}
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index aa4304afbea6..1221cbb86a6f 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -14,7 +14,6 @@
* We make no fairness assumptions. They have a cost.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -55,16 +54,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/***********************************************************/
-static inline int arch_read_can_lock(arch_rwlock_t *lock)
-{
- return (lock->lock & 1) == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *lock)
-{
- return lock->lock == 0;
-}
-
static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -171,7 +160,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
lock->lock = 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 08235bb1f035..87da00579946 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -197,9 +197,16 @@ pcibios_init(void)
subsys_initcall(pcibios_init);
#ifdef ALPHA_RESTORE_SRM_SETUP
+/* Store PCI device configuration left by SRM here. */
+struct pdev_srm_saved_conf
+{
+ struct pdev_srm_saved_conf *next;
+ struct pci_dev *dev;
+};
+
static struct pdev_srm_saved_conf *srm_saved_configs;
-void pdev_save_srm_config(struct pci_dev *dev)
+static void pdev_save_srm_config(struct pci_dev *dev)
{
struct pdev_srm_saved_conf *tmp;
static int printed = 0;
@@ -239,6 +246,8 @@ pci_restore_srm_config(void)
pci_restore_state(tmp->dev);
}
}
+#else
+#define pdev_save_srm_config(dev) do {} while (0)
#endif
void pcibios_fixup_bus(struct pci_bus *bus)
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
index 26231601630e..2e4cb74fdc41 100644
--- a/arch/alpha/kernel/pci_impl.h
+++ b/arch/alpha/kernel/pci_impl.h
@@ -157,16 +157,8 @@ struct pci_iommu_arena
#endif
#ifdef ALPHA_RESTORE_SRM_SETUP
-/* Store PCI device configuration left by SRM here. */
-struct pdev_srm_saved_conf
-{
- struct pdev_srm_saved_conf *next;
- struct pci_dev *dev;
-};
-
extern void pci_restore_srm_config(void);
#else
-#define pdev_save_srm_config(dev) do {} while (0)
#define pci_restore_srm_config() do {} while (0)
#endif
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c84e67fdea09..5c7adf100a58 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -298,7 +298,7 @@ config ARC_MMU_V1
config ARC_MMU_V2
bool "MMU v2"
help
- Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
+ Fixed the deficiency of v1 - possible thrashing in memcpy scenario
when 2 D-TLB and 1 I-TLB entries index into same 2way set.
config ARC_MMU_V3
@@ -371,7 +371,7 @@ config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
default n
help
- Double Precision Floating Point unit had dedictaed regs which
+ Double Precision Floating Point unit had dedicated regs which
need to be saved/restored across context-switch.
Note that ARC FPU is overly simplistic, unlike say x86, which has
hardware pieces to allow software to conditionally save/restore,
@@ -467,7 +467,7 @@ config ARC_PLAT_NEEDS_PHYS_TO_DMA
bool
config ARC_KVADDR_SIZE
- int "Kernel Virtaul Address Space size (MB)"
+ int "Kernel Virtual Address Space size (MB)"
range 0 512
default "256"
help
diff --git a/arch/arc/boot/.gitignore b/arch/arc/boot/.gitignore
index 5246969a20c5..c4c5fd529c25 100644
--- a/arch/arc/boot/.gitignore
+++ b/arch/arc/boot/.gitignore
@@ -1,2 +1 @@
-*.dtb*
uImage
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index 83c9e076ef63..22a4c5d4702f 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -11,8 +11,6 @@ dtb-y := $(builtindtb-y).dtb
.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
+# for CONFIG_OF_ALL_DTBS test
+dtstree := $(srctree)/$(src)
+dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 47efc8451b70..2ba04a7db621 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -14,7 +14,6 @@
#include <asm/barrier.h>
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#ifdef CONFIG_ARC_HAS_LLSC
@@ -410,14 +409,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#endif
-#define arch_read_can_lock(x) ((x)->counter > 0)
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 6df9d94a9537..efe8b4200a67 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -250,7 +250,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
* and read back old value
*/
do {
- new = old = ACCESS_ONCE(*ipi_data_ptr);
+ new = old = READ_ONCE(*ipi_data_ptr);
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore
index 3c79f85975aa..ce1c5ff746e7 100644
--- a/arch/arm/boot/.gitignore
+++ b/arch/arm/boot/.gitignore
@@ -3,4 +3,3 @@ zImage
xipImage
bootpImage
uImage
-*.dtb
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index eff87a344566..25dcf4e534e6 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -1070,9 +1070,3 @@ dtb-$(CONFIG_ARCH_ASPEED) += aspeed-bmc-opp-palmetto.dtb \
aspeed-bmc-opp-romulus.dtb \
aspeed-ast2500-evb.dtb
endif
-
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-
-always := $(dtb-y)
-clean-files := *.dtb
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index b9e58c536afd..39e35f8b8206 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -26,6 +26,7 @@
clocks = <&isp 0>;
clock-frequency = <9600000>;
nokia,nvm-size = <(16 * 64)>;
+ flash-leds = <&as3645a_flash &as3645a_indicator>;
port {
smia_1_1: endpoint {
link-frequencies = /bits/ 64 <199200000 210000000 499200000>;
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 1b0bd72945f2..12fbb3da5fce 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -271,14 +271,14 @@
#size-cells = <0>;
reg = <0x30>;
compatible = "ams,as3645a";
- flash@0 {
+ as3645a_flash: flash@0 {
reg = <0x0>;
flash-timeout-us = <150000>;
flash-max-microamp = <320000>;
led-max-microamp = <60000>;
ams,input-max-microamp = <1750000>;
};
- indicator@1 {
+ as3645a_indicator: indicator@1 {
reg = <0x1>;
led-max-microamp = <10000>;
};
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index 646601a3ebd8..c354a1ed1e70 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -60,6 +60,7 @@
clocks = <&isp 0>;
clock-frequency = <9600000>;
nokia,nvm-size = <(16 * 64)>;
+ flash-leds = <&as3645a_flash &as3645a_indicator>;
port {
smia_1_1: endpoint {
link-frequencies = /bits/ 64 <210000000 333600000 398400000>;
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index 5b36eb114ddc..10099df8b73e 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -314,7 +314,7 @@
};
usart2: serial@40004400 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40004400 0x400>;
interrupts = <38>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(UART2)>;
@@ -322,7 +322,7 @@
};
usart3: serial@40004800 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40004800 0x400>;
interrupts = <39>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(UART3)>;
@@ -386,7 +386,7 @@
};
usart7: serial@40007800 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40007800 0x400>;
interrupts = <82>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(UART7)>;
@@ -394,7 +394,7 @@
};
usart8: serial@40007c00 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40007c00 0x400>;
interrupts = <83>;
clocks = <&rcc 0 STM32F4_APB1_CLOCK(UART8)>;
@@ -444,7 +444,7 @@
};
usart1: serial@40011000 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40011000 0x400>;
interrupts = <37>;
clocks = <&rcc 0 STM32F4_APB2_CLOCK(USART1)>;
@@ -455,7 +455,7 @@
};
usart6: serial@40011400 {
- compatible = "st,stm32-usart", "st,stm32-uart";
+ compatible = "st,stm32-uart";
reg = <0x40011400 0x400>;
interrupts = <71>;
clocks = <&rcc 0 STM32F4_APB2_CLOCK(USART6)>;
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
index 5633860037d2..5f9417894059 100644
--- a/arch/arm/boot/dts/stm32f746.dtsi
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -136,7 +136,7 @@
};
usart2: serial@40004400 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40004400 0x400>;
interrupts = <38>;
clocks = <&rcc 1 CLK_USART2>;
@@ -144,7 +144,7 @@
};
usart3: serial@40004800 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40004800 0x400>;
interrupts = <39>;
clocks = <&rcc 1 CLK_USART3>;
@@ -177,7 +177,7 @@
};
usart7: serial@40007800 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40007800 0x400>;
interrupts = <82>;
clocks = <&rcc 1 CLK_UART7>;
@@ -185,7 +185,7 @@
};
usart8: serial@40007c00 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40007c00 0x400>;
interrupts = <83>;
clocks = <&rcc 1 CLK_UART8>;
@@ -193,7 +193,7 @@
};
usart1: serial@40011000 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40011000 0x400>;
interrupts = <37>;
clocks = <&rcc 1 CLK_USART1>;
@@ -201,7 +201,7 @@
};
usart6: serial@40011400 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40011400 0x400>;
interrupts = <71>;
clocks = <&rcc 1 CLK_USART6>;
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index 58ec2275181e..26de31578701 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -67,7 +67,7 @@
};
usart2: serial@40004400 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40004400 0x400>;
interrupts = <38>;
status = "disabled";
@@ -99,7 +99,7 @@
};
usart1: serial@40011000 {
- compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+ compatible = "st,stm32f7-uart";
reg = <0x40011000 0x400>;
interrupts = <37>;
status = "disabled";
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 6c7b06854fce..51936bde1eb2 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -826,28 +826,6 @@ static int locomo_match(struct device *_dev, struct device_driver *_drv)
return dev->devid == drv->devid;
}
-static int locomo_bus_suspend(struct device *dev, pm_message_t state)
-{
- struct locomo_dev *ldev = LOCOMO_DEV(dev);
- struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
- int ret = 0;
-
- if (drv && drv->suspend)
- ret = drv->suspend(ldev, state);
- return ret;
-}
-
-static int locomo_bus_resume(struct device *dev)
-{
- struct locomo_dev *ldev = LOCOMO_DEV(dev);
- struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
- int ret = 0;
-
- if (drv && drv->resume)
- ret = drv->resume(ldev);
- return ret;
-}
-
static int locomo_bus_probe(struct device *dev)
{
struct locomo_dev *ldev = LOCOMO_DEV(dev);
@@ -875,8 +853,6 @@ struct bus_type locomo_bus_type = {
.match = locomo_match,
.probe = locomo_bus_probe,
.remove = locomo_bus_remove,
- .suspend = locomo_bus_suspend,
- .resume = locomo_bus_resume,
};
int locomo_driver_register(struct locomo_driver *driver)
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index a93cc2fcf791..2f01e84b3d8c 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -140,6 +140,6 @@ CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_MV_CESA=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_CCITT=y
CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 69a4bd13eea5..7c41bee28463 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -279,6 +279,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_USER=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_DEV_MV_CESA=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_CCITT=y
CONFIG_LIBCRC32C=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index e39ee282e6ca..b831baddae02 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -163,5 +163,5 @@ CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_MV_CESA=y
+CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_T10DIF=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index d5e1370ec303..830e817a028a 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -219,7 +219,8 @@ CONFIG_AD525X_DPOT_I2C=m
CONFIG_ICS932S401=m
CONFIG_APDS9802ALS=m
CONFIG_ISL29003=m
-CONFIG_TI_DAC7512=m
+CONFIG_IIO=m
+CONFIG_AD5446=m
CONFIG_EEPROM_AT24=m
CONFIG_SENSORS_LIS3_SPI=m
CONFIG_IDE=m
diff --git a/arch/arm/configs/raumfeld_defconfig b/arch/arm/configs/raumfeld_defconfig
index e3dc80ead465..77a56c23c6ef 100644
--- a/arch/arm/configs/raumfeld_defconfig
+++ b/arch/arm/configs/raumfeld_defconfig
@@ -37,7 +37,8 @@ CONFIG_MTD_NAND_PXA3xx=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_ISL29003=y
-CONFIG_TI_DAC7512=y
+CONFIG_IIO=y
+CONFIG_AD5446=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index eee269321923..1070044f5c3f 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -196,6 +196,11 @@ static inline void gic_write_ctlr(u32 val)
isb();
}
+static inline u32 gic_read_ctlr(void)
+{
+ return read_sysreg(ICC_CTLR);
+}
+
static inline void gic_write_grpen1(u32 val)
{
write_sysreg(val, ICC_IGRPEN1);
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 9327e3a101dc..0a8d7bba2cb0 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -107,6 +107,7 @@ static inline u32 arch_timer_get_cntkctl(void)
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
+ isb();
}
#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 0722ec6be692..6821f1249300 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -7,7 +7,6 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
#include <linux/kref.h>
#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index 74e51d6bd93f..f8712e3c29cf 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -189,8 +189,6 @@ struct locomo_driver {
unsigned int devid;
int (*probe)(struct locomo_dev *);
int (*remove)(struct locomo_dev *);
- int (*suspend)(struct locomo_dev *, pm_message_t);
- int (*resume)(struct locomo_dev *);
};
#define LOCOMO_DRV(_d) container_of((_d), struct locomo_driver, drv)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 4a879f6ff13b..242151ea6908 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -293,4 +293,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
+/* All host FP/SIMD state is restored on guest exit, so nothing to save: */
+static inline void kvm_fpsimd_flush_cpu_state(void) {}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index b2902a5cd780..2d7344f0e208 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
static inline void clean_pte_table(pte_t *pte)
{
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index e9c9a117bd25..c7cdbb43ae7c 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -126,8 +126,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
/*
* kprobe-based event tracer support
*/
-#include <linux/stddef.h>
-#include <linux/types.h>
+#include <linux/compiler.h>
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
extern int regs_query_register_offset(const char *name);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 25cb465c8538..099c78fcf62d 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -53,8 +53,6 @@ static inline void dsb_sev(void)
* memory.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -74,7 +72,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
- lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+ lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
}
smp_mb();
@@ -194,9 +192,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
dsb_sev();
}
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -274,14 +269,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
}
}
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index f59ab9bcbaf9..5d88d2f22b2c 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -25,6 +25,14 @@ void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+#include <linux/arch_topology.h>
+
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_scale_freq_capacity topology_get_freq_scale
+
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 5d2925e2ce1f..53efe8b22126 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -23,7 +23,7 @@ config ARCH_BCM_IPROC
help
This enables support for systems based on Broadcom IPROC architected SoCs.
The IPROC complex contains one or more ARM CPUs along with common
- core periperals. Application specific SoCs are created by adding a
+ core peripherals. Application specific SoCs are created by adding a
uArchitecture containing peripherals outside of the IPROC complex.
Currently supported SoCs are Cygnus.
@@ -69,8 +69,8 @@ config ARCH_BCM_5301X
Support for Broadcom BCM470X and BCM5301X SoCs with ARM CPU cores.
This is a network SoC line mostly used in home routers and
- wifi access points, it's internal name is Northstar.
- This inclused the following SoC: BCM53010, BCM53011, BCM53012,
+ wifi access points, its internal name is Northstar.
+ This includes the following SoC: BCM53010, BCM53011, BCM53012,
BCM53014, BCM53015, BCM53016, BCM53017, BCM53018, BCM4707,
BCM4708 and BCM4709.
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index f53c61813998..e70feec6fad5 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -31,7 +31,7 @@
#include <linux/amba/serial.h>
#include <linux/mtd/physmap.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/spi/spi.h>
#include <linux/export.h>
#include <linux/irqchip/arm-vic.h>
@@ -320,42 +320,47 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
/*************************************************************************
* EP93xx i2c peripheral handling
*************************************************************************/
-static struct i2c_gpio_platform_data ep93xx_i2c_data;
+
+/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */
+static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ /* Use local offsets on gpiochip/port "G" */
+ GPIO_LOOKUP_IDX("G", 1, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("G", 0, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
static struct platform_device ep93xx_i2c_device = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &ep93xx_i2c_data,
+ .platform_data = NULL,
},
};
/**
* ep93xx_register_i2c - Register the i2c platform device.
- * @data: platform specific i2c-gpio configuration (__initdata)
* @devices: platform specific i2c bus device information (__initdata)
* @num: the number of devices on the i2c bus
*/
-void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
- struct i2c_board_info *devices, int num)
+void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num)
{
/*
- * Set the EEPROM interface pin drive type control.
- * Defines the driver type for the EECLK and EEDAT pins as either
- * open drain, which will require an external pull-up, or a normal
- * CMOS driver.
+ * FIXME: this just sets the two pins as non-opendrain, as no
+ * platforms tries to do that anyway. Flag the applicable lines
+ * as open drain in the GPIO_LOOKUP above and the driver or
+ * gpiolib will handle open drain/open drain emulation as need
+ * be. Right now i2c-gpio emulates open drain which is not
+ * optimal.
*/
- if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT)
- pr_warning("sda != EEDAT, open drain has no effect\n");
- if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK)
- pr_warning("scl != EECLK, open drain has no effect\n");
-
- __raw_writel((data->sda_is_open_drain << 1) |
- (data->scl_is_open_drain << 0),
+ __raw_writel((0 << 1) | (0 << 0),
EP93XX_GPIO_EEDRIVE);
- ep93xx_i2c_data = *data;
i2c_register_board_info(0, devices, num);
+ gpiod_add_lookup_table(&ep93xx_i2c_gpiod_table);
platform_device_register(&ep93xx_i2c_device);
}
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index 7a7f280b07d7..8e89ec8b6f0f 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -28,7 +28,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
#include <linux/spi/spi.h>
#include <sound/cs4271.h>
@@ -61,14 +60,6 @@ static struct ep93xx_eth_data __initdata edb93xx_eth_data = {
/*************************************************************************
* EDB93xx i2c peripheral handling
*************************************************************************/
-static struct i2c_gpio_platform_data __initdata edb93xx_i2c_gpio_data = {
- .sda_pin = EP93XX_GPIO_LINE_EEDAT,
- .sda_is_open_drain = 0,
- .scl_pin = EP93XX_GPIO_LINE_EECLK,
- .scl_is_open_drain = 0,
- .udelay = 0, /* default to 100 kHz */
- .timeout = 0, /* default to 100 ms */
-};
static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = {
{
@@ -86,13 +77,11 @@ static void __init edb93xx_register_i2c(void)
{
if (machine_is_edb9302a() || machine_is_edb9307a() ||
machine_is_edb9315a()) {
- ep93xx_register_i2c(&edb93xx_i2c_gpio_data,
- edb93xxa_i2c_board_info,
+ ep93xx_register_i2c(edb93xxa_i2c_board_info,
ARRAY_SIZE(edb93xxa_i2c_board_info));
} else if (machine_is_edb9302() || machine_is_edb9307()
|| machine_is_edb9312() || machine_is_edb9315()) {
- ep93xx_register_i2c(&edb93xx_i2c_gpio_data,
- edb93xx_i2c_board_info,
+ ep93xx_register_i2c(edb93xx_i2c_board_info,
ARRAY_SIZE(edb93xx_i2c_board_info));
}
}
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 3bbe1591013e..6c41c794bed5 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -8,7 +8,6 @@
#include <linux/reboot.h>
struct device;
-struct i2c_gpio_platform_data;
struct i2c_board_info;
struct spi_board_info;
struct platform_device;
@@ -37,8 +36,7 @@ void ep93xx_register_flash(unsigned int width,
resource_size_t start, resource_size_t size);
void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
-void ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
- struct i2c_board_info *devices, int num);
+void ep93xx_register_i2c(struct i2c_board_info *devices, int num);
void ep93xx_register_spi(struct ep93xx_spi_info *info,
struct spi_board_info *devices, int num);
void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index c7a40f245892..e61f3dee24c2 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -19,7 +19,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
#include <linux/mmc/host.h>
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
@@ -129,15 +128,6 @@ static struct ep93xx_spi_info simone_spi_info __initdata = {
.use_dma = 1,
};
-static struct i2c_gpio_platform_data __initdata simone_i2c_gpio_data = {
- .sda_pin = EP93XX_GPIO_LINE_EEDAT,
- .sda_is_open_drain = 0,
- .scl_pin = EP93XX_GPIO_LINE_EECLK,
- .scl_is_open_drain = 0,
- .udelay = 0,
- .timeout = 0,
-};
-
static struct i2c_board_info __initdata simone_i2c_board_info[] = {
{
I2C_BOARD_INFO("ds1337", 0x68),
@@ -161,7 +151,7 @@ static void __init simone_init_machine(void)
ep93xx_register_flash(2, EP93XX_CS6_PHYS_BASE, SZ_8M);
ep93xx_register_eth(&simone_eth_data, 1);
ep93xx_register_fb(&simone_fb_info);
- ep93xx_register_i2c(&simone_i2c_gpio_data, simone_i2c_board_info,
+ ep93xx_register_i2c(simone_i2c_board_info,
ARRAY_SIZE(simone_i2c_board_info));
ep93xx_register_spi(&simone_spi_info, simone_spi_devices,
ARRAY_SIZE(simone_spi_devices));
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 8b29398f4dc7..45940c1d7787 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -21,7 +21,6 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
#include <linux/fb.h>
#include <linux/mtd/partitions.h>
@@ -127,15 +126,6 @@ static struct ep93xx_eth_data __initdata snappercl15_eth_data = {
.phy_id = 1,
};
-static struct i2c_gpio_platform_data __initdata snappercl15_i2c_gpio_data = {
- .sda_pin = EP93XX_GPIO_LINE_EEDAT,
- .sda_is_open_drain = 0,
- .scl_pin = EP93XX_GPIO_LINE_EECLK,
- .scl_is_open_drain = 0,
- .udelay = 0,
- .timeout = 0,
-};
-
static struct i2c_board_info __initdata snappercl15_i2c_data[] = {
{
/* Audio codec */
@@ -161,7 +151,7 @@ static void __init snappercl15_init_machine(void)
{
ep93xx_init_devices();
ep93xx_register_eth(&snappercl15_eth_data, 1);
- ep93xx_register_i2c(&snappercl15_i2c_gpio_data, snappercl15_i2c_data,
+ ep93xx_register_i2c(snappercl15_i2c_data,
ARRAY_SIZE(snappercl15_i2c_data));
ep93xx_register_fb(&snappercl15_fb_info);
snappercl15_register_audio();
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index 1daf9441058c..5a0b6187990a 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
#include <linux/platform_data/pca953x.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
@@ -144,10 +143,6 @@ static struct pca953x_platform_data pca953x_77_gpio_data = {
/*************************************************************************
* I2C Bus
*************************************************************************/
-static struct i2c_gpio_platform_data vision_i2c_gpio_data __initdata = {
- .sda_pin = EP93XX_GPIO_LINE_EEDAT,
- .scl_pin = EP93XX_GPIO_LINE_EECLK,
-};
static struct i2c_board_info vision_i2c_info[] __initdata = {
{
@@ -289,7 +284,7 @@ static void __init vision_init_machine(void)
vision_i2c_info[1].irq = gpio_to_irq(EP93XX_GPIO_LINE_F(7));
- ep93xx_register_i2c(&vision_i2c_gpio_data, vision_i2c_info,
+ ep93xx_register_i2c(vision_i2c_info,
ARRAY_SIZE(vision_i2c_info));
ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
ARRAY_SIZE(vision_spi_board_info));
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 45801b27ee5c..b5f89fdbbb4b 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -286,88 +286,6 @@ static void __init imx6q_init_machine(void)
imx6q_axi_init();
}
-#define OCOTP_CFG3 0x440
-#define OCOTP_CFG3_SPEED_SHIFT 16
-#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
-#define OCOTP_CFG3_SPEED_996MHZ 0x2
-#define OCOTP_CFG3_SPEED_852MHZ 0x1
-
-static void __init imx6q_opp_check_speed_grading(struct device *cpu_dev)
-{
- struct device_node *np;
- void __iomem *base;
- u32 val;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
- if (!np) {
- pr_warn("failed to find ocotp node\n");
- return;
- }
-
- base = of_iomap(np, 0);
- if (!base) {
- pr_warn("failed to map ocotp\n");
- goto put_node;
- }
-
- /*
- * SPEED_GRADING[1:0] defines the max speed of ARM:
- * 2b'11: 1200000000Hz;
- * 2b'10: 996000000Hz;
- * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
- * 2b'00: 792000000Hz;
- * We need to set the max speed of ARM according to fuse map.
- */
- val = readl_relaxed(base + OCOTP_CFG3);
- val >>= OCOTP_CFG3_SPEED_SHIFT;
- val &= 0x3;
-
- if ((val != OCOTP_CFG3_SPEED_1P2GHZ) && cpu_is_imx6q())
- if (dev_pm_opp_disable(cpu_dev, 1200000000))
- pr_warn("failed to disable 1.2 GHz OPP\n");
- if (val < OCOTP_CFG3_SPEED_996MHZ)
- if (dev_pm_opp_disable(cpu_dev, 996000000))
- pr_warn("failed to disable 996 MHz OPP\n");
- if (cpu_is_imx6q()) {
- if (val != OCOTP_CFG3_SPEED_852MHZ)
- if (dev_pm_opp_disable(cpu_dev, 852000000))
- pr_warn("failed to disable 852 MHz OPP\n");
- }
- iounmap(base);
-put_node:
- of_node_put(np);
-}
-
-static void __init imx6q_opp_init(void)
-{
- struct device_node *np;
- struct device *cpu_dev = get_cpu_device(0);
-
- if (!cpu_dev) {
- pr_warn("failed to get cpu0 device\n");
- return;
- }
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- pr_warn("failed to find cpu0 node\n");
- return;
- }
-
- if (dev_pm_opp_of_add_table(cpu_dev)) {
- pr_warn("failed to init OPP table\n");
- goto put_node;
- }
-
- imx6q_opp_check_speed_grading(cpu_dev);
-
-put_node:
- of_node_put(np);
-}
-
-static struct platform_device imx6q_cpufreq_pdev = {
- .name = "imx6q-cpufreq",
-};
-
static void __init imx6q_init_late(void)
{
/*
@@ -377,10 +295,8 @@ static void __init imx6q_init_late(void)
if (imx_get_soc_revision() > IMX_CHIP_REVISION_1_1)
imx6q_cpuidle_init();
- if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ)) {
- imx6q_opp_init();
- platform_device_register(&imx6q_cpufreq_pdev);
- }
+ if (IS_ENABLED(CONFIG_ARM_IMX6Q_CPUFREQ))
+ platform_device_register_simple("imx6q-cpufreq", -1, NULL, 0);
}
static void __init imx6q_map_io(void)
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 186df64ceae7..77def6169f50 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -18,7 +18,7 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
@@ -50,16 +50,21 @@ static struct platform_device avila_flash = {
.resource = &avila_flash_resource,
};
-static struct i2c_gpio_platform_data avila_i2c_gpio_data = {
- .sda_pin = AVILA_SDA_PIN,
- .scl_pin = AVILA_SCL_PIN,
+static struct gpiod_lookup_table avila_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SCL_PIN,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
};
static struct platform_device avila_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &avila_i2c_gpio_data,
+ .platform_data = NULL,
},
};
@@ -148,6 +153,8 @@ static void __init avila_init(void)
avila_flash_resource.end =
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
+ gpiod_add_lookup_table(&avila_i2c_gpiod_table);
+
platform_add_devices(avila_devices, ARRAY_SIZE(avila_devices));
avila_pata_resources[0].start = IXP4XX_EXP_BUS_BASE(1);
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 19839bba7f17..ac97a4599034 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -26,7 +26,7 @@
#include <linux/leds.h>
#include <linux/reboot.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <mach/hardware.h>
@@ -69,16 +69,21 @@ static struct platform_device dsmg600_flash = {
.resource = &dsmg600_flash_resource,
};
-static struct i2c_gpio_platform_data dsmg600_i2c_gpio_data = {
- .sda_pin = DSMG600_SDA_PIN,
- .scl_pin = DSMG600_SCL_PIN,
+static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SCL_PIN,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
};
static struct platform_device dsmg600_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &dsmg600_i2c_gpio_data,
+ .platform_data = NULL,
},
};
@@ -270,6 +275,7 @@ static void __init dsmg600_init(void)
dsmg600_flash_resource.end =
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
+ gpiod_add_lookup_table(&dsmg600_i2c_gpiod_table);
i2c_register_board_info(0, dsmg600_i2c_board_info,
ARRAY_SIZE(dsmg600_i2c_board_info));
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 6e32cbc4f590..033f79b35d51 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -23,7 +23,7 @@
#include <linux/leds.h>
#include <linux/reboot.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/io.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -55,16 +55,21 @@ static struct platform_device fsg_flash = {
.resource = &fsg_flash_resource,
};
-static struct i2c_gpio_platform_data fsg_i2c_gpio_data = {
- .sda_pin = FSG_SDA_PIN,
- .scl_pin = FSG_SCL_PIN,
+static struct gpiod_lookup_table fsg_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SCL_PIN,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
};
static struct platform_device fsg_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &fsg_i2c_gpio_data,
+ .platform_data = NULL,
},
};
@@ -197,6 +202,7 @@ static void __init fsg_init(void)
/* Configure CS2 for operation, 8bit and writable */
*IXP4XX_EXP_CS2 = 0xbfff0002;
+ gpiod_add_lookup_table(&fsg_i2c_gpiod_table);
i2c_register_board_info(0, fsg_i2c_board_info,
ARRAY_SIZE(fsg_i2c_board_info));
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index 145ec5c1b0eb..4d805080020e 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -7,7 +7,6 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/hdlc.h>
-#include <linux/i2c-gpio.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -79,6 +78,12 @@
static u32 hw_bits = 0xFFFFFFFD; /* assume all hardware present */;
static u8 control_value;
+/*
+ * FIXME: this is reimplementing I2C bit-bangining. Move this
+ * over to using driver/i2c/busses/i2c-gpio.c like all other boards
+ * and register proper I2C device(s) on the bus for this. (See
+ * other IXP4xx boards for examples.)
+ */
static void set_scl(u8 value)
{
gpio_set_value(GPIO_SCL, !!value);
@@ -217,20 +222,6 @@ static struct platform_device device_flash = {
.resource = &flash_resource,
};
-
-/* I^2C interface */
-static struct i2c_gpio_platform_data i2c_data = {
- .sda_pin = GPIO_SDA,
- .scl_pin = GPIO_SCL,
-};
-
-static struct platform_device device_i2c = {
- .name = "i2c-gpio",
- .id = 0,
- .dev = { .platform_data = &i2c_data },
-};
-
-
/* IXP425 2 UART ports */
static struct resource uart_resources[] = {
{
@@ -412,9 +403,6 @@ static void __init gmlr_init(void)
if (hw_bits & CFG_HW_HAS_HSS1)
device_tab[devices++] = &device_hss_tab[1]; /* max index 5 */
- if (hw_bits & CFG_HW_HAS_EEPROM)
- device_tab[devices++] = &device_i2c; /* max index 6 */
-
gpio_request(GPIO_SCL, "SCL/clock");
gpio_request(GPIO_SDA, "SDA/data");
gpio_request(GPIO_STR, "strobe");
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 8f5e01527b1b..b168e2fbdbeb 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -15,7 +15,7 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
@@ -123,16 +123,21 @@ static struct platform_device ixdp425_flash_nand = {
};
#endif /* CONFIG_MTD_NAND_PLATFORM */
-static struct i2c_gpio_platform_data ixdp425_i2c_gpio_data = {
- .sda_pin = IXDP425_SDA_PIN,
- .scl_pin = IXDP425_SCL_PIN,
+static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SCL_PIN,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
};
static struct platform_device ixdp425_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &ixdp425_i2c_gpio_data,
+ .platform_data = NULL,
},
};
@@ -246,6 +251,7 @@ static void __init ixdp425_init(void)
ixdp425_uart_data[1].flags = 0;
}
+ gpiod_add_lookup_table(&ixdp425_i2c_gpiod_table);
platform_add_devices(ixdp425_devices, ARRAY_SIZE(ixdp425_devices));
}
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index b6d731241317..435602085408 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -28,7 +28,7 @@
#include <linux/leds.h>
#include <linux/reboot.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/io.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -101,16 +101,21 @@ static struct platform_device nas100d_leds = {
.dev.platform_data = &nas100d_led_data,
};
-static struct i2c_gpio_platform_data nas100d_i2c_gpio_data = {
- .sda_pin = NAS100D_SDA_PIN,
- .scl_pin = NAS100D_SCL_PIN,
+static struct gpiod_lookup_table nas100d_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SCL_PIN,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
};
static struct platform_device nas100d_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &nas100d_i2c_gpio_data,
+ .platform_data = NULL,
},
};
@@ -281,6 +286,7 @@ static void __init nas100d_init(void)
nas100d_flash_resource.end =
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
+ gpiod_add_lookup_table(&nas100d_i2c_gpiod_table);
i2c_register_board_info(0, nas100d_i2c_board_info,
ARRAY_SIZE(nas100d_i2c_board_info));
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index bd8dc65b4ffc..91da63a7d7b5 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -25,7 +25,7 @@
#include <linux/leds.h>
#include <linux/reboot.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/io.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -69,9 +69,14 @@ static struct platform_device nslu2_flash = {
.resource = &nslu2_flash_resource,
};
-static struct i2c_gpio_platform_data nslu2_i2c_gpio_data = {
- .sda_pin = NSLU2_SDA_PIN,
- .scl_pin = NSLU2_SCL_PIN,
+static struct gpiod_lookup_table nslu2_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SCL_PIN,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
};
static struct i2c_board_info __initdata nslu2_i2c_board_info [] = {
@@ -116,7 +121,7 @@ static struct platform_device nslu2_i2c_gpio = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &nslu2_i2c_gpio_data,
+ .platform_data = NULL,
},
};
@@ -251,6 +256,7 @@ static void __init nslu2_init(void)
nslu2_flash_resource.end =
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
+ gpiod_add_lookup_table(&nslu2_i2c_gpiod_table);
i2c_register_board_info(0, nslu2_i2c_board_info,
ARRAY_SIZE(nslu2_i2c_board_info));
diff --git a/arch/arm/mach-ks8695/board-acs5k.c b/arch/arm/mach-ks8695/board-acs5k.c
index e4d709c8ed32..937eb1d47e7b 100644
--- a/arch/arm/mach-ks8695/board-acs5k.c
+++ b/arch/arm/mach-ks8695/board-acs5k.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-
+#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c-gpio.h>
@@ -38,9 +38,17 @@
#include "generic.h"
+static struct gpiod_lookup_table acs5k_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("KS8695", 4, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("KS8695", 5, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data acs5k_i2c_device_platdata = {
- .sda_pin = 4,
- .scl_pin = 5,
.udelay = 10,
};
@@ -95,6 +103,7 @@ static struct i2c_board_info acs5k_i2c_devs[] __initdata = {
static void acs5k_i2c_init(void)
{
/* The gpio interface */
+ gpiod_add_lookup_table(&acs5k_i2c_gpiod_table);
platform_device_register(&acs5k_i2c_device);
/* I2C devices */
i2c_register_board_info(0, acs5k_i2c_devs,
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 29630061e700..5877e547cecd 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -31,6 +31,7 @@
#include <linux/power_supply.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <asm/mach-types.h>
#include <asm/suspend.h>
@@ -320,9 +321,17 @@ static struct soc_camera_link palmz72_iclink = {
.flags = SOCAM_DATAWIDTH_8,
};
+static struct gpiod_lookup_table palmz72_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("gpio-pxa", 117, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data palmz72_i2c_bus_data = {
- .sda_pin = 118,
- .scl_pin = 117,
.udelay = 10,
.timeout = 100,
};
@@ -369,6 +378,7 @@ static void __init palmz72_camera_init(void)
{
palmz72_cam_gpio_init();
pxa_set_camera_info(&palmz72_pxacamera_platform_data);
+ gpiod_add_lookup_table(&palmz72_i2c_gpiod_table);
platform_device_register(&palmz72_i2c_bus_device);
platform_device_register(&palmz72_camera);
}
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 2d45d18b1a5e..6b7df6fd2448 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -29,6 +29,7 @@
#include <linux/platform_data/pcf857x.h>
#include <linux/platform_data/at24.h>
#include <linux/smc91x.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio.h>
#include <linux/leds.h>
@@ -52,7 +53,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/mfd/da903x.h>
-#include <linux/platform_data/sht15.h>
#include "devices.h"
#include "generic.h"
@@ -137,17 +137,18 @@ static unsigned long sg2_im2_unified_pin_config[] __initdata = {
GPIO10_GPIO, /* large basic connector pin 23 */
};
-static struct sht15_platform_data platform_data_sht15 = {
- .gpio_data = 100,
- .gpio_sck = 98,
+static struct gpiod_lookup_table sht15_gpiod_table = {
+ .dev_id = "sht15",
+ .table = {
+ /* FIXME: should this have |GPIO_OPEN_DRAIN set? */
+ GPIO_LOOKUP("gpio-pxa", 100, "data", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", 98, "clk", GPIO_ACTIVE_HIGH),
+ },
};
static struct platform_device sht15 = {
.name = "sht15",
.id = -1,
- .dev = {
- .platform_data = &platform_data_sht15,
- },
};
static struct regulator_consumer_supply stargate2_sensor_3_con[] = {
@@ -608,6 +609,7 @@ static void __init imote2_init(void)
imote2_stargate2_init();
+ gpiod_add_lookup_table(&sht15_gpiod_table);
platform_add_devices(imote2_devices, ARRAY_SIZE(imote2_devices));
i2c_register_board_info(0, imote2_i2c_board_info,
@@ -988,6 +990,7 @@ static void __init stargate2_init(void)
imote2_stargate2_init();
+ gpiod_add_lookup_table(&sht15_gpiod_table);
platform_add_devices(ARRAY_AND_SIZE(stargate2_devices));
i2c_register_board_info(0, ARRAY_AND_SIZE(stargate2_i2c_board_info));
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 8e89d91b206b..4185e7ff073f 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -36,6 +36,7 @@
#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/serial_8250.h>
#include <linux/smc91x.h>
@@ -458,9 +459,17 @@ static struct platform_device smc91x_device = {
};
/* i2c */
+static struct gpiod_lookup_table viper_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SCL_GPIO,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data i2c_bus_data = {
- .sda_pin = VIPER_RTC_I2C_SDA_GPIO,
- .scl_pin = VIPER_RTC_I2C_SCL_GPIO,
.udelay = 10,
.timeout = HZ,
};
@@ -779,12 +788,20 @@ static int __init viper_tpm_setup(char *str)
__setup("tpm=", viper_tpm_setup);
+struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SCL_GPIO,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static void __init viper_tpm_init(void)
{
struct platform_device *tpm_device;
struct i2c_gpio_platform_data i2c_tpm_data = {
- .sda_pin = VIPER_TPM_I2C_SDA_GPIO,
- .scl_pin = VIPER_TPM_I2C_SCL_GPIO,
.udelay = 10,
.timeout = HZ,
};
@@ -794,6 +811,7 @@ static void __init viper_tpm_init(void)
if (!viper_tpm)
return;
+ gpiod_add_lookup_table(&viper_tpm_i2c_gpiod_table);
tpm_device = platform_device_alloc("i2c-gpio", 2);
if (tpm_device) {
if (!platform_device_add_data(tpm_device,
@@ -943,6 +961,7 @@ static void __init viper_init(void)
smc91x_device.num_resources--;
pxa_set_i2c_info(NULL);
+ gpiod_add_lookup_table(&viper_i2c_gpiod_table);
pwm_add_table(viper_pwm_lookup, ARRAY_SIZE(viper_pwm_lookup));
platform_add_devices(viper_devs, ARRAY_SIZE(viper_devs));
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index e8d25a7bbcb8..7d4feb8a49ac 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -17,6 +17,7 @@
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
#include <mach/hardware.h>
#include <asm/setup.h>
@@ -324,9 +325,17 @@ static struct platform_device simpad_gpio_leds = {
/*
* i2c
*/
+static struct gpiod_lookup_table simpad_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio", 21, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("gpio", 25, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data simpad_i2c_data = {
- .sda_pin = GPIO_GPIO21,
- .scl_pin = GPIO_GPIO25,
.udelay = 10,
.timeout = HZ,
};
@@ -381,6 +390,7 @@ static int __init simpad_init(void)
ARRAY_SIZE(simpad_flash_resources));
sa11x0_register_mcp(&simpad_mcp_data);
+ gpiod_add_lookup_table(&simpad_i2c_gpiod_table);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
if(ret)
printk(KERN_WARNING "simpad: Unable to register mq200 framebuffer device");
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index 3a4ed4c33a68..e348bcfe389d 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -120,18 +120,12 @@ static int rmobile_pd_power_up(struct generic_pm_domain *genpd)
return __rmobile_pd_power_up(to_rmobile_pd(genpd), true);
}
-static bool rmobile_pd_active_wakeup(struct device *dev)
-{
- return true;
-}
-
static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
{
struct generic_pm_domain *genpd = &rmobile_pd->genpd;
struct dev_power_governor *gov = rmobile_pd->gov;
- genpd->flags |= GENPD_FLAG_PM_CLK;
- genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup;
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
genpd->power_off = rmobile_pd_power_down;
genpd->power_on = rmobile_pd_power_up;
genpd->attach_dev = cpg_mstp_attach_dev;
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 76e4c83cd5c8..3f24addd7972 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
bool entered_lp2 = false;
if (tegra_pending_sgi())
- ACCESS_ONCE(abort_flag) = true;
+ WRITE_ONCE(abort_flag, true);
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index c1c1a5c67da1..61e281cb29fb 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -141,7 +141,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
- atomic_long_dec(&mm->nr_ptes);
+ mm_dec_nr_ptes(mm);
no_pmd:
pud_clear(pud);
pmd_free(mm, pmd);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index e8229b9fee4a..8d4a64cc644c 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -278,7 +278,7 @@ config SAMSUNG_PM_CHECK_CHUNKSIZE
help
Set the chunksize in Kilobytes of the CRC for checking memory
corruption over suspend and resume. A smaller value will mean that
- the CRC data block will take more memory, but wil identify any
+ the CRC data block will take more memory, but will identify any
faults with better precision.
See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
index 1c98a87786ca..9ed0129bed3c 100644
--- a/arch/arm/probes/kprobes/test-core.c
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -227,7 +227,6 @@ static bool test_regs_ok;
static int test_func_instance;
static int pre_handler_called;
static int post_handler_called;
-static int jprobe_func_called;
static int kretprobe_handler_called;
static int tests_failed;
@@ -370,50 +369,6 @@ static int test_kprobe(long (*func)(long, long))
return 0;
}
-static void __kprobes jprobe_func(long r0, long r1)
-{
- jprobe_func_called = test_func_instance;
- if (r0 == FUNC_ARG1 && r1 == FUNC_ARG2)
- test_regs_ok = true;
- jprobe_return();
-}
-
-static struct jprobe the_jprobe = {
- .entry = jprobe_func,
-};
-
-static int test_jprobe(long (*func)(long, long))
-{
- int ret;
-
- the_jprobe.kp.addr = (kprobe_opcode_t *)func;
- ret = register_jprobe(&the_jprobe);
- if (ret < 0) {
- pr_err("FAIL: register_jprobe failed with %d\n", ret);
- return ret;
- }
-
- ret = call_test_func(func, true);
-
- unregister_jprobe(&the_jprobe);
- the_jprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
-
- if (!ret)
- return -EINVAL;
- if (jprobe_func_called != test_func_instance) {
- pr_err("FAIL: jprobe handler function not called\n");
- return -EINVAL;
- }
- if (!call_test_func(func, false))
- return -EINVAL;
- if (jprobe_func_called == test_func_instance) {
- pr_err("FAIL: probe called after unregistering\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int __kprobes
kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
@@ -451,7 +406,7 @@ static int test_kretprobe(long (*func)(long, long))
}
if (!call_test_func(func, false))
return -EINVAL;
- if (jprobe_func_called == test_func_instance) {
+ if (kretprobe_handler_called == test_func_instance) {
pr_err("FAIL: kretprobe called after unregistering\n");
return -EINVAL;
}
@@ -468,18 +423,6 @@ static int run_api_tests(long (*func)(long, long))
if (ret < 0)
return ret;
- pr_info(" jprobe\n");
- ret = test_jprobe(func);
-#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
- if (ret == -EINVAL) {
- pr_err("FAIL: Known longtime bug with jprobe on Thumb kernels\n");
- tests_failed = ret;
- ret = 0;
- }
-#endif
- if (ret < 0)
- return ret;
-
pr_info(" kretprobe\n");
ret = test_kretprobe(func);
if (ret < 0)
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index 79214d5ff097..a9dd619c6c29 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
{
u32 seq;
repeat:
- seq = ACCESS_ONCE(vdata->seq_count);
+ seq = READ_ONCE(vdata->seq_count);
if (seq & 1) {
cpu_relax();
goto repeat;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0df64a6a56d4..a93339f5178f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -21,8 +21,25 @@ config ARM64
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_INLINE_READ_LOCK if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
+ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_QUEUED_RWLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -68,7 +85,7 @@ config ARM64
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+ select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -98,7 +115,7 @@ config ARM64
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP if NUMA
- select HAVE_NMI if ACPI_APEI_SEA
+ select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -119,6 +136,7 @@ config ARM64
select PCI_ECAM if ACPI
select POWER_RESET
select POWER_SUPPLY
+ select REFCOUNT_FULL
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
@@ -539,6 +557,25 @@ config QCOM_QDF2400_ERRATUM_0065
If unsure, say Y.
+
+config SOCIONEXT_SYNQUACER_PREITS
+ bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
+ default y
+ help
+ Socionext Synquacer SoCs implement a separate h/w block to generate
+ MSI doorbell writes with non-zero values for the device ID.
+
+ If unsure, say Y.
+
+config HISILICON_ERRATUM_161600802
+ bool "Hip07 161600802: Erroneous redistributor VLPI base"
+ default y
+ help
+ The HiSilicon Hip07 SoC usees the wrong redistributor base
+ when issued ITS commands such as VMOVP and VMAPP, and requires
+ a 128kB offset to be applied to the target address in this commands.
+
+ If unsure, say Y.
endmenu
@@ -806,6 +843,7 @@ config FORCE_MAX_ZONEORDER
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
+ depends on SYSCTL
help
Legacy software support may require certain instructions
that have been deprecated or obsoleted in the architecture.
@@ -946,7 +984,7 @@ config ARM64_UAO
help
User Access Override (UAO; part of the ARMv8.2 Extensions)
causes the 'unprivileged' variant of the load/store instructions to
- be overriden to be privileged.
+ be overridden to be privileged.
This option changes get_user() and friends to use the 'unprivileged'
variant of the load/store instructions. This ensures that user-space
@@ -975,6 +1013,17 @@ config ARM64_PMEM
endmenu
+config ARM64_SVE
+ bool "ARM Scalable Vector Extension support"
+ default y
+ help
+ The Scalable Vector Extension (SVE) is an extension to the AArch64
+ execution state which complements and extends the SIMD functionality
+ of the base architecture to support much larger vectors and to enable
+ additional vectorisation opportunities.
+
+ To enable use of this extension on CPUs that implement it, say Y.
+
config ARM64_MODULE_CMODEL_LARGE
bool
@@ -1063,6 +1112,7 @@ config EFI_STUB
config EFI
bool "UEFI runtime support"
depends on OF && !CPU_BIG_ENDIAN
+ depends on KERNEL_MODE_NEON
select LIBFDT
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 6b54ee8c1262..1d03ef54295a 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -161,6 +161,9 @@ config ARCH_SEATTLE
config ARCH_SHMOBILE
bool
+config ARCH_SYNQUACER
+ bool "Socionext SynQuacer SoC Family"
+
config ARCH_RENESAS
bool "Renesas SoC Platforms"
select ARCH_SHMOBILE
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 939b310913cf..b35788c909f1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -14,8 +14,12 @@ LDFLAGS_vmlinux :=-p --no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
-ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie -shared -Bsymbolic
+ifeq ($(CONFIG_RELOCATABLE), y)
+# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+# for relative relocs, since this leads to better Image compression
+# with the relocation offsets always being zero.
+LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
+ $(call ld-option, --no-apply-dynamic-relocs)
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
@@ -53,6 +57,8 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
+KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
+
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
diff --git a/arch/arm64/boot/dts/.gitignore b/arch/arm64/boot/dts/.gitignore
deleted file mode 100644
index b60ed208c779..000000000000
--- a/arch/arm64/boot/dts/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.dtb
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index c6684ab8e201..d7c22d51bc50 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,34 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
-dts-dirs += actions
-dts-dirs += al
-dts-dirs += allwinner
-dts-dirs += altera
-dts-dirs += amd
-dts-dirs += amlogic
-dts-dirs += apm
-dts-dirs += arm
-dts-dirs += broadcom
-dts-dirs += cavium
-dts-dirs += exynos
-dts-dirs += freescale
-dts-dirs += hisilicon
-dts-dirs += marvell
-dts-dirs += mediatek
-dts-dirs += nvidia
-dts-dirs += qcom
-dts-dirs += realtek
-dts-dirs += renesas
-dts-dirs += rockchip
-dts-dirs += socionext
-dts-dirs += sprd
-dts-dirs += xilinx
-dts-dirs += lg
-dts-dirs += zte
-
-subdir-y := $(dts-dirs)
-
-dtstree := $(srctree)/$(src)
-
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts)))
-
-always := $(dtb-y)
+subdir-y += actions
+subdir-y += al
+subdir-y += allwinner
+subdir-y += altera
+subdir-y += amd
+subdir-y += amlogic
+subdir-y += apm
+subdir-y += arm
+subdir-y += broadcom
+subdir-y += cavium
+subdir-y += exynos
+subdir-y += freescale
+subdir-y += hisilicon
+subdir-y += marvell
+subdir-y += mediatek
+subdir-y += nvidia
+subdir-y += qcom
+subdir-y += realtek
+subdir-y += renesas
+subdir-y += rockchip
+subdir-y += socionext
+subdir-y += sprd
+subdir-y += xilinx
+subdir-y += lg
+subdir-y += zte
diff --git a/arch/arm64/boot/dts/actions/Makefile b/arch/arm64/boot/dts/actions/Makefile
index 62922d688ce3..cc4661256356 100644
--- a/arch/arm64/boot/dts/actions/Makefile
+++ b/arch/arm64/boot/dts/actions/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_ACTIONS) += s900-bubblegum-96.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/al/Makefile b/arch/arm64/boot/dts/al/Makefile
index 8a6cde4f9b23..036e387112ed 100644
--- a/arch/arm64/boot/dts/al/Makefile
+++ b/arch/arm64/boot/dts/al/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_ALPINE) += alpine-v2-evp.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index ff35e184e422..7d3acb355ff3 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -9,7 +9,3 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-pc2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-prime.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-zero-plus2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-nanopi-neo2.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/altera/Makefile b/arch/arm64/boot/dts/altera/Makefile
index d7a641698d77..68ba0882a8bb 100644
--- a/arch/arm64/boot/dts/altera/Makefile
+++ b/arch/arm64/boot/dts/altera/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_STRATIX10) += socfpga_stratix10_socdk.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/amd/Makefile b/arch/arm64/boot/dts/amd/Makefile
index f9963d63006d..6a6093064a32 100644
--- a/arch/arm64/boot/dts/amd/Makefile
+++ b/arch/arm64/boot/dts/amd/Makefile
@@ -2,7 +2,3 @@
dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
husky.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 543416b8dff5..f84b83bb9809 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -20,7 +20,3 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-rbox-pro.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/apm/Makefile b/arch/arm64/boot/dts/apm/Makefile
index a10fbdb34229..55b5cdca13b8 100644
--- a/arch/arm64/boot/dts/apm/Makefile
+++ b/arch/arm64/boot/dts/apm/Makefile
@@ -1,7 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
dtb-$(CONFIG_ARCH_XGENE) += apm-merlin.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index 470378addca4..4256bae99925 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -3,7 +3,3 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb foundation-v8-gicv3.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb juno-r2.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
index 3df2db7f8878..2a2591ef1fee 100644
--- a/arch/arm64/boot/dts/broadcom/Makefile
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -1,8 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb
-dts-dirs += northstar2
-dts-dirs += stingray
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
+subdir-y += northstar2
+subdir-y += stingray
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/Makefile b/arch/arm64/boot/dts/broadcom/northstar2/Makefile
index e01a1485b813..83736004336d 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/Makefile
+++ b/arch/arm64/boot/dts/broadcom/northstar2/Makefile
@@ -1,6 +1,2 @@
dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb
dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-xmc.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/broadcom/stingray/Makefile b/arch/arm64/boot/dts/broadcom/stingray/Makefile
index 04bb302f3233..c4d06cffcb11 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/Makefile
+++ b/arch/arm64/boot/dts/broadcom/stingray/Makefile
@@ -1,7 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_BCM_IPROC) += bcm958742k.dtb
dtb-$(CONFIG_ARCH_BCM_IPROC) += bcm958742t.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/cavium/Makefile b/arch/arm64/boot/dts/cavium/Makefile
index 9f68c277302b..c178f7e06e18 100644
--- a/arch/arm64/boot/dts/cavium/Makefile
+++ b/arch/arm64/boot/dts/cavium/Makefile
@@ -1,7 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb
dtb-$(CONFIG_ARCH_THUNDER2) += thunder2-99xx.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/exynos/Makefile b/arch/arm64/boot/dts/exynos/Makefile
index 6914b2cbd397..e0a2facde6a2 100644
--- a/arch/arm64/boot/dts/exynos/Makefile
+++ b/arch/arm64/boot/dts/exynos/Makefile
@@ -3,7 +3,3 @@ dtb-$(CONFIG_ARCH_EXYNOS) += \
exynos5433-tm2.dtb \
exynos5433-tm2e.dtb \
exynos7-espresso.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index dc02e82aba7c..86e18adb695a 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -13,7 +13,3 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index df83915d6ea6..fe1ea5d707a8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -471,5 +471,36 @@
dr_mode = "host";
phy_type = "ulpi";
};
+
+ msi: msi-controller1@1572000 {
+ compatible = "fsl,ls1012a-msi";
+ reg = <0x0 0x1572000 0x0 0x8>;
+ msi-controller;
+ interrupts = <0 126 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pcie@3400000 {
+ compatible = "fsl,ls1012a-pcie", "snps,dw-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <0 118 0x4>, /* controller interrupt */
+ <0 117 0x4>; /* PME interrupt */
+ interrupt-names = "aer", "pme";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ num-lanes = <4>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&msi>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic 0 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic 0 111 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic 0 112 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index c8ff0baddf1d..e8a478ca1485 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -661,6 +661,81 @@
<GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
};
+ pcie@3400000 {
+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
+ 0x40 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
+ interrupt-names = "aer", "pme";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-lanes = <4>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&msi1>, <&msi2>, <&msi3>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pcie@3500000 {
+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */
+ 0x48 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
+ interrupt-names = "aer", "pme";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-lanes = <2>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&msi2>, <&msi3>, <&msi1>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pcie@3600000 {
+ compatible = "fsl,ls1046a-pcie", "snps,dw-pcie";
+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */
+ 0x50 0x00000000 0x0 0x00002000>; /* configuration space */
+ reg-names = "regs", "config";
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>; /* PME interrupt */
+ interrupt-names = "aer", "pme";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ dma-coherent;
+ num-lanes = <2>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
+ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
+ msi-parent = <&msi3>, <&msi1>, <&msi2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0000 0 0 1 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 2 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 3 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <0000 0 0 4 &gic GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
};
reserved-memory {
diff --git a/arch/arm64/boot/dts/hisilicon/Makefile b/arch/arm64/boot/dts/hisilicon/Makefile
index 521ed484a5d1..03d93f8ef8a9 100644
--- a/arch/arm64/boot/dts/hisilicon/Makefile
+++ b/arch/arm64/boot/dts/hisilicon/Makefile
@@ -5,7 +5,3 @@ dtb-$(CONFIG_ARCH_HISI) += hi6220-hikey.dtb
dtb-$(CONFIG_ARCH_HISI) += hip05-d02.dtb
dtb-$(CONFIG_ARCH_HISI) += hip06-d03.dtb
dtb-$(CONFIG_ARCH_HISI) += hip07-d05.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/lg/Makefile b/arch/arm64/boot/dts/lg/Makefile
index e345b8e58efe..4c3959e24e1b 100644
--- a/arch/arm64/boot/dts/lg/Makefile
+++ b/arch/arm64/boot/dts/lg/Makefile
@@ -1,7 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_LG1K) += lg1312-ref.dtb
dtb-$(CONFIG_ARCH_LG1K) += lg1313-ref.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
index 5633676fa9d0..cb454beede55 100644
--- a/arch/arm64/boot/dts/marvell/Makefile
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -10,7 +10,3 @@ dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-db.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-mcbin.dtb
dtb-$(CONFIG_ARCH_MVEBU) += armada-8080-db.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 1d05d1824fa9..ac17f60f998c 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -5,7 +5,3 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index b99a27372965..26396ef53bde 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -682,8 +682,7 @@
};
mmc0: mmc@11230000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11230000 0 0x1000>;
interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_0>,
@@ -693,8 +692,7 @@
};
mmc1: mmc@11240000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11240000 0 0x1000>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_1>,
@@ -704,8 +702,7 @@
};
mmc2: mmc@11250000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11250000 0 0x1000>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_2>,
@@ -715,8 +712,7 @@
};
mmc3: mmc@11260000 {
- compatible = "mediatek,mt8173-mmc",
- "mediatek,mt8135-mmc";
+ compatible = "mediatek,mt8173-mmc";
reg = <0 0x11260000 0 0x1000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_MSDC30_3>,
diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile
index 6bc0c6ab4b7f..676aa2f238d1 100644
--- a/arch/arm64/boot/dts/nvidia/Makefile
+++ b/arch/arm64/boot/dts/nvidia/Makefile
@@ -5,6 +5,3 @@ dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-2180.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2571.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-smaug.dtb
dtb-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-p2771-0000.dtb
-
-always := $(dtb-y)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index e7b25bee3f1e..55ec5ee7f7e8 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -6,7 +6,3 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/realtek/Makefile b/arch/arm64/boot/dts/realtek/Makefile
index 8521e921e59a..6e2ae59a3745 100644
--- a/arch/arm64/boot/dts/realtek/Makefile
+++ b/arch/arm64/boot/dts/realtek/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_REALTEK) += rtd1295-zidoo-x9s.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index d417701640bd..6b282283f1bf 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -4,6 +4,3 @@ dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-xs.dtb
dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-es1-salvator-x.dtb r8a7795-es1-h3ulcb.dtb
dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb r8a7796-m3ulcb.dtb
dtb-$(CONFIG_ARCH_R8A77995) += r8a77995-draak.dtb
-
-always := $(dtb-y)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 84801892ee61..ce2701e37d00 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -11,7 +11,3 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-firefly.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-kevin.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire-excavator.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/socionext/Makefile b/arch/arm64/boot/dts/socionext/Makefile
index 72dbe8acd9fd..d45441249cb5 100644
--- a/arch/arm64/boot/dts/socionext/Makefile
+++ b/arch/arm64/boot/dts/socionext/Makefile
@@ -5,6 +5,3 @@ dtb-$(CONFIG_ARCH_UNIPHIER) += \
uniphier-ld20-global.dtb \
uniphier-ld20-ref.dtb \
uniphier-pxs3-ref.dtb
-
-always := $(dtb-y)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/sprd/Makefile b/arch/arm64/boot/dts/sprd/Makefile
index d7188be103c5..2bdc23804f40 100644
--- a/arch/arm64/boot/dts/sprd/Makefile
+++ b/arch/arm64/boot/dts/sprd/Makefile
@@ -1,7 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_SPRD) += sc9836-openphone.dtb \
sp9860g-1h10.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index ae16427f6a4a..a2d67084a514 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -1,5 +1 @@
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-ep108.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/zte/Makefile b/arch/arm64/boot/dts/zte/Makefile
index d86c4def6bc9..14a1cdfc1559 100644
--- a/arch/arm64/boot/dts/zte/Makefile
+++ b/arch/arm64/boot/dts/zte/Makefile
@@ -1,6 +1,2 @@
dtb-$(CONFIG_ARCH_ZX) += zx296718-evb.dtb
dtb-$(CONFIG_ARCH_ZX) += zx296718-pcbox.dtb
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 2326e39d5892..e63d0a8312de 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msi.h
generic-y += preempt.h
+generic-y += qrwlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 59cca1d6ec54..32f465a80e4e 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -126,18 +126,6 @@ static inline const char *acpi_get_enable_method(int cpu)
*/
#define acpi_disable_cmcff 1
pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
-
-/*
- * Despite its name, this function must still broadcast the TLB
- * invalidation in order to ensure other CPUs don't end up with junk
- * entries as a result of speculation. Unusually, its also called in
- * IRQ context (ghes_iounmap_irq) so if we ever need to use IPIs for
- * TLB broadcasting, then we're in trouble here.
- */
-static inline void arch_apei_flush_tlb_one(unsigned long addr)
-{
- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
-}
#endif /* CONFIG_ACPI_APEI */
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index b7e3f74822da..9becba9ab392 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -87,6 +87,11 @@ static inline void gic_write_ctlr(u32 val)
isb();
}
+static inline u32 gic_read_ctlr(void)
+{
+ return read_sysreg_s(SYS_ICC_CTLR_EL1);
+}
+
static inline void gic_write_grpen1(u32 val)
{
write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1);
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index a652ce0a5cb2..bdedd8f748d1 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -144,6 +144,7 @@ static inline u32 arch_timer_get_cntkctl(void)
static inline void arch_timer_set_cntkctl(u32 cntkctl)
{
write_sysreg(cntkctl, cntkctl_el1);
+ isb();
}
static inline u64 arch_counter_get_cntpct(void)
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
index 636e755bcdca..b3552c4a405f 100644
--- a/arch/arm64/include/asm/asm-bug.h
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -22,10 +22,10 @@
#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
#define __BUGVERBOSE_LOCATION(file, line) \
.pushsection .rodata.str,"aMS",@progbits,1; \
- 2: .string file; \
+ 14472: .string file; \
.popsection; \
\
- .long 2b - 0b; \
+ .long 14472b - 14470b; \
.short line;
#else
#define _BUGVERBOSE_LOCATION(file, line)
@@ -36,11 +36,11 @@
#define __BUG_ENTRY(flags) \
.pushsection __bug_table,"aw"; \
.align 2; \
- 0: .long 1f - 0b; \
+ 14470: .long 14471f - 14470b; \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
.short flags; \
.popsection; \
- 1:
+ 14471:
#else
#define __BUG_ENTRY(flags)
#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index d58a6253c6ab..aef72d886677 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -25,12 +25,41 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
+#include <asm/debug-monitors.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
+ .macro save_and_disable_daif, flags
+ mrs \flags, daif
+ msr daifset, #0xf
+ .endm
+
+ .macro disable_daif
+ msr daifset, #0xf
+ .endm
+
+ .macro enable_daif
+ msr daifclr, #0xf
+ .endm
+
+ .macro restore_daif, flags:req
+ msr daif, \flags
+ .endm
+
+ /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
+ .macro inherit_daif, pstate:req, tmp:req
+ and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+ msr daif, \tmp
+ .endm
+
+ /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
+ .macro enable_da_f
+ msr daifclr, #(8 | 4 | 1)
+ .endm
+
/*
* Enable and disable interrupts.
*/
@@ -51,13 +80,6 @@
msr daif, \flags
.endm
-/*
- * Enable and disable debug exceptions.
- */
- .macro disable_dbg
- msr daifset, #8
- .endm
-
.macro enable_dbg
msr daifclr, #8
.endm
@@ -65,31 +87,22 @@
.macro disable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
- bic \tmp, \tmp, #1
+ bic \tmp, \tmp, #DBG_MDSCR_SS
msr mdscr_el1, \tmp
isb // Synchronise with enable_dbg
9990:
.endm
+ /* call with daif masked */
.macro enable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
- disable_dbg
mrs \tmp, mdscr_el1
- orr \tmp, \tmp, #1
+ orr \tmp, \tmp, #DBG_MDSCR_SS
msr mdscr_el1, \tmp
9990:
.endm
/*
- * Enable both debug exceptions and interrupts. This is likely to be
- * faster than two daifclr operations, since writes to this register
- * are self-synchronising.
- */
- .macro enable_dbg_and_irq
- msr daifclr, #(8 | 2)
- .endm
-
-/*
* SMP data memory barrier
*/
.macro smp_dmb, opt
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fe7e43b7fbc..77651c49ef44 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -31,6 +31,8 @@
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
+#define psb_csync() asm volatile("hint #17" : : : "memory")
+
#define mb() dsb(sy)
#define rmb() dsb(ld)
#define wmb() dsb(st)
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 889226b4c6e1..88392272250e 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -41,6 +41,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64mmfr2;
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
+ u64 reg_id_aa64zfr0;
u32 reg_id_dfr0;
u32 reg_id_isar0;
@@ -59,6 +60,9 @@ struct cpuinfo_arm64 {
u32 reg_mvfr0;
u32 reg_mvfr1;
u32 reg_mvfr2;
+
+ /* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
+ u64 reg_zcr;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 8da621627d7c..2ff7c5e8efab 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -40,7 +40,8 @@
#define ARM64_WORKAROUND_858921 19
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
+#define ARM64_SVE 22
-#define ARM64_NCAPS 22
+#define ARM64_NCAPS 23
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 428ee1f2468c..ac67cfc2585a 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -10,7 +10,9 @@
#define __ASM_CPUFEATURE_H
#include <asm/cpucaps.h>
+#include <asm/fpsimd.h>
#include <asm/hwcap.h>
+#include <asm/sigcontext.h>
#include <asm/sysreg.h>
/*
@@ -223,6 +225,13 @@ static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
return val == ID_AA64PFR0_EL0_32BIT_64BIT;
}
+static inline bool id_aa64pfr0_sve(u64 pfr0)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
+
+ return val > 0;
+}
+
void __init setup_cpu_features(void);
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
@@ -262,6 +271,39 @@ static inline bool system_uses_ttbr0_pan(void)
!cpus_have_const_cap(ARM64_HAS_PAN);
}
+static inline bool system_supports_sve(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SVE) &&
+ cpus_have_const_cap(ARM64_SVE);
+}
+
+/*
+ * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
+ * vector length.
+ *
+ * Use only if SVE is present.
+ * This function clobbers the SVE vector length.
+ */
+static inline u64 read_zcr_features(void)
+{
+ u64 zcr;
+ unsigned int vq_max;
+
+ /*
+ * Set the maximum possible VL, and write zeroes to all other
+ * bits to see if they stick.
+ */
+ sve_kernel_enable(NULL);
+ write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
+
+ zcr = read_sysreg_s(SYS_ZCR_EL1);
+ zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
+ vq_max = sve_vq_from_vl(sve_get_vl());
+ zcr |= vq_max - 1; /* set LEN field to maximum effective value */
+
+ return zcr;
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
new file mode 100644
index 000000000000..22e4c83de5a5
--- /dev/null
+++ b/arch/arm64/include/asm/daifflags.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_DAIFFLAGS_H
+#define __ASM_DAIFFLAGS_H
+
+#include <linux/irqflags.h>
+
+#define DAIF_PROCCTX 0
+#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+
+/* mask/save/unmask/restore all exceptions, including interrupts. */
+static inline void local_daif_mask(void)
+{
+ asm volatile(
+ "msr daifset, #0xf // local_daif_mask\n"
+ :
+ :
+ : "memory");
+ trace_hardirqs_off();
+}
+
+static inline unsigned long local_daif_save(void)
+{
+ unsigned long flags;
+
+ asm volatile(
+ "mrs %0, daif // local_daif_save\n"
+ : "=r" (flags)
+ :
+ : "memory");
+ local_daif_mask();
+
+ return flags;
+}
+
+static inline void local_daif_unmask(void)
+{
+ trace_hardirqs_on();
+ asm volatile(
+ "msr daifclr, #0xf // local_daif_unmask"
+ :
+ :
+ : "memory");
+}
+
+static inline void local_daif_restore(unsigned long flags)
+{
+ if (!arch_irqs_disabled_flags(flags))
+ trace_hardirqs_on();
+ asm volatile(
+ "msr daif, %0 // local_daif_restore"
+ :
+ : "r" (flags)
+ : "memory");
+ if (arch_irqs_disabled_flags(flags))
+ trace_hardirqs_off();
+}
+
+#endif
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 33be513ef24c..fac1c4de7898 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -188,8 +188,8 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
#define compat_start_thread compat_start_thread
/*
- * Unlike the native SET_PERSONALITY macro, the compat version inherits
- * READ_IMPLIES_EXEC across a fork() since this is the behaviour on
+ * Unlike the native SET_PERSONALITY macro, the compat version maintains
+ * READ_IMPLIES_EXEC across an execve() since this is the behaviour on
* arch/arm/.
*/
#define COMPAT_SET_PERSONALITY(ex) \
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 66ed8b6b9976..014d7d8edcf9 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -43,7 +43,8 @@
#define ESR_ELx_EC_HVC64 (0x16)
#define ESR_ELx_EC_SMC64 (0x17)
#define ESR_ELx_EC_SYS64 (0x18)
-/* Unallocated EC: 0x19 - 0x1E */
+#define ESR_ELx_EC_SVE (0x19)
+/* Unallocated EC: 0x1A - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f)
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index caf86be815ba..4052ec39e8db 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -51,6 +51,13 @@ enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
+
+#ifdef CONFIG_ACPI_APEI_GHES
+ /* Used for GHES mapping from assorted contexts */
+ FIX_APEI_GHES_IRQ,
+ FIX_APEI_GHES_NMI,
+#endif /* CONFIG_ACPI_APEI_GHES */
+
__end_of_permanent_fixed_addresses,
/*
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 410c48163c6a..74f34392a531 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -17,9 +17,13 @@
#define __ASM_FP_H
#include <asm/ptrace.h>
+#include <asm/errno.h>
#ifndef __ASSEMBLY__
+#include <linux/cache.h>
+#include <linux/stddef.h>
+
/*
* FP/SIMD storage area has:
* - FPSR and FPCR
@@ -35,13 +39,16 @@ struct fpsimd_state {
__uint128_t vregs[32];
u32 fpsr;
u32 fpcr;
+ /*
+ * For ptrace compatibility, pad to next 128-bit
+ * boundary here if extending this struct.
+ */
};
};
/* the id of the last cpu to have restored this state */
unsigned int cpu;
};
-
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
@@ -61,11 +68,73 @@ extern void fpsimd_load_state(struct fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void);
+extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct fpsimd_state *state);
extern void fpsimd_flush_task_state(struct task_struct *target);
+extern void sve_flush_cpu_state(void);
+
+/* Maximum VL that SVE VL-agnostic software can transparently support */
+#define SVE_VL_ARCH_MAX 0x100
+
+extern void sve_save_state(void *state, u32 *pfpsr);
+extern void sve_load_state(void const *state, u32 const *pfpsr,
+ unsigned long vq_minus_1);
+extern unsigned int sve_get_vl(void);
+extern int sve_kernel_enable(void *);
+
+extern int __ro_after_init sve_max_vl;
+
+#ifdef CONFIG_ARM64_SVE
+
+extern size_t sve_state_size(struct task_struct const *task);
+
+extern void sve_alloc(struct task_struct *task);
+extern void fpsimd_release_task(struct task_struct *task);
+extern void fpsimd_sync_to_sve(struct task_struct *task);
+extern void sve_sync_to_fpsimd(struct task_struct *task);
+extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
+
+extern int sve_set_vector_length(struct task_struct *task,
+ unsigned long vl, unsigned long flags);
+
+extern int sve_set_current_vl(unsigned long arg);
+extern int sve_get_current_vl(void);
+
+/*
+ * Probing and setup functions.
+ * Calls to these functions must be serialised with one another.
+ */
+extern void __init sve_init_vq_map(void);
+extern void sve_update_vq_map(void);
+extern int sve_verify_vq_map(void);
+extern void __init sve_setup(void);
+
+#else /* ! CONFIG_ARM64_SVE */
+
+static inline void sve_alloc(struct task_struct *task) { }
+static inline void fpsimd_release_task(struct task_struct *task) { }
+static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
+static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
+
+static inline int sve_set_current_vl(unsigned long arg)
+{
+ return -EINVAL;
+}
+
+static inline int sve_get_current_vl(void)
+{
+ return -EINVAL;
+}
+
+static inline void sve_init_vq_map(void) { }
+static inline void sve_update_vq_map(void) { }
+static inline int sve_verify_vq_map(void) { return 0; }
+static inline void sve_setup(void) { }
+
+#endif /* ! CONFIG_ARM64_SVE */
/* For use by EFI runtime services calls only */
extern void __efi_fpsimd_begin(void);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 0f5fdd388b0d..e050d765ca9e 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -75,3 +75,151 @@
ldr w\tmpnr, [\state, #16 * 2 + 4]
fpsimd_restore_fpcr x\tmpnr, \state
.endm
+
+/* Sanity-check macros to help avoid encoding garbage instructions */
+
+.macro _check_general_reg nr
+ .if (\nr) < 0 || (\nr) > 30
+ .error "Bad register number \nr."
+ .endif
+.endm
+
+.macro _sve_check_zreg znr
+ .if (\znr) < 0 || (\znr) > 31
+ .error "Bad Scalable Vector Extension vector register number \znr."
+ .endif
+.endm
+
+.macro _sve_check_preg pnr
+ .if (\pnr) < 0 || (\pnr) > 15
+ .error "Bad Scalable Vector Extension predicate register number \pnr."
+ .endif
+.endm
+
+.macro _check_num n, min, max
+ .if (\n) < (\min) || (\n) > (\max)
+ .error "Number \n out of range [\min,\max]"
+ .endif
+.endm
+
+/* SVE instruction encodings for non-SVE-capable assemblers */
+
+/* STR (vector): STR Z\nz, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_str_v nz, nxbase, offset=0
+ _sve_check_zreg \nz
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe5804000 \
+ | (\nz) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* LDR (vector): LDR Z\nz, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_ldr_v nz, nxbase, offset=0
+ _sve_check_zreg \nz
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0x85804000 \
+ | (\nz) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* STR (predicate): STR P\np, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_str_p np, nxbase, offset=0
+ _sve_check_preg \np
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe5800000 \
+ | (\np) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* LDR (predicate): LDR P\np, [X\nxbase, #\offset, MUL VL] */
+.macro _sve_ldr_p np, nxbase, offset=0
+ _sve_check_preg \np
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0x85800000 \
+ | (\np) \
+ | ((\nxbase) << 5) \
+ | (((\offset) & 7) << 10) \
+ | (((\offset) & 0x1f8) << 13)
+.endm
+
+/* RDVL X\nx, #\imm */
+.macro _sve_rdvl nx, imm
+ _check_general_reg \nx
+ _check_num (\imm), -0x20, 0x1f
+ .inst 0x04bf5000 \
+ | (\nx) \
+ | (((\imm) & 0x3f) << 5)
+.endm
+
+/* RDFFR (unpredicated): RDFFR P\np.B */
+.macro _sve_rdffr np
+ _sve_check_preg \np
+ .inst 0x2519f000 \
+ | (\np)
+.endm
+
+/* WRFFR P\np.B */
+.macro _sve_wrffr np
+ _sve_check_preg \np
+ .inst 0x25289000 \
+ | ((\np) << 5)
+.endm
+
+.macro __for from:req, to:req
+ .if (\from) == (\to)
+ _for__body \from
+ .else
+ __for \from, (\from) + ((\to) - (\from)) / 2
+ __for (\from) + ((\to) - (\from)) / 2 + 1, \to
+ .endif
+.endm
+
+.macro _for var:req, from:req, to:req, insn:vararg
+ .macro _for__body \var:req
+ \insn
+ .endm
+
+ __for \from, \to
+
+ .purgem _for__body
+.endm
+
+.macro sve_save nxbase, xpfpsr, nxtmp
+ _for n, 0, 31, _sve_str_v \n, \nxbase, \n - 34
+ _for n, 0, 15, _sve_str_p \n, \nxbase, \n - 16
+ _sve_rdffr 0
+ _sve_str_p 0, \nxbase
+ _sve_ldr_p 0, \nxbase, -16
+
+ mrs x\nxtmp, fpsr
+ str w\nxtmp, [\xpfpsr]
+ mrs x\nxtmp, fpcr
+ str w\nxtmp, [\xpfpsr, #4]
+.endm
+
+.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp
+ mrs_s x\nxtmp, SYS_ZCR_EL1
+ bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK
+ orr x\nxtmp, x\nxtmp, \xvqminus1
+ msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising
+
+ _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
+ _sve_ldr_p 0, \nxbase
+ _sve_wrffr 0
+ _for n, 0, 15, _sve_ldr_p \n, \nxbase, \n - 16
+
+ ldr w\nxtmp, [\xpfpsr]
+ msr fpsr, x\nxtmp
+ ldr w\nxtmp, [\xpfpsr, #4]
+ msr fpcr, x\nxtmp
+.endm
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 8c581281fa12..24692edf1a69 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -21,6 +21,19 @@
#include <asm/ptrace.h>
/*
+ * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
+ * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
+ * order:
+ * Masking debug exceptions causes all other exceptions to be masked too/
+ * Masking SError masks irq, but not debug exceptions. Masking irqs has no
+ * side effects for other flags. Keeping to this order makes it easier for
+ * entry.S to know which exceptions should be unmasked.
+ *
+ * FIQ is never expected, but we mask it when we disable debug exceptions, and
+ * unmask it at all other times.
+ */
+
+/*
* CPU interrupt mask handling.
*/
static inline unsigned long arch_local_irq_save(void)
@@ -53,12 +66,6 @@ static inline void arch_local_irq_disable(void)
: "memory");
}
-#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
-#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
-
-#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
-#define local_async_disable() asm("msr daifset, #4" : : : "memory")
-
/*
* Save the current interrupt enable state.
*/
@@ -89,26 +96,5 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return flags & PSR_I_BIT;
}
-
-/*
- * save and restore debug state
- */
-#define local_dbg_save(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "mrs %0, daif // local_dbg_save\n" \
- "msr daifset, #8" \
- : "=r" (flags) : : "memory"); \
- } while (0)
-
-#define local_dbg_restore(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "msr daif, %0 // local_dbg_restore\n" \
- : : "r" (flags) : "memory"); \
- } while (0)
-
#endif
#endif
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 61d694c2eae5..7f069ff37f06 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -185,7 +185,9 @@
#define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
-#define CPTR_EL2_DEFAULT 0x000033ff
+#define CPTR_EL2_TZ (1 << 8)
+#define CPTR_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 */
+#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_TPMS (1 << 14)
@@ -236,5 +238,6 @@
#define CPACR_EL1_FPEN (3 << 20)
#define CPACR_EL1_TTA (1 << 28)
+#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN | CPACR_EL1_ZEN_EL1EN)
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e923b58606e2..674912d7a571 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -384,4 +385,14 @@ static inline void __cpu_init_stage2(void)
"PARange is %d bits, unsupported configuration!", parange);
}
+/*
+ * All host FP/SIMD state is restored on guest exit, so nothing needs
+ * doing here except in the SVE case:
+*/
+static inline void kvm_fpsimd_flush_cpu_state(void)
+{
+ if (system_supports_sve())
+ sve_flush_cpu_state();
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f7c4d2146aed..d4bae7d6e0d8 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -61,8 +61,6 @@
* KIMAGE_VADDR - the virtual address of the start of the kernel image
* VA_BITS - the maximum number of bits for virtual addresses.
* VA_START - the first kernel virtual address.
- * TASK_SIZE - the maximum size of a user space task.
- * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
#define VA_START (UL(0xffffffffffffffff) - \
@@ -77,19 +75,6 @@
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
-#define TASK_SIZE_64 (UL(1) << VA_BITS)
-
-#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 UL(0x100000000)
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
- TASK_SIZE_32 : TASK_SIZE_64)
-#else
-#define TASK_SIZE TASK_SIZE_64
-#endif /* CONFIG_COMPAT */
-
-#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
#define KERNEL_START _text
#define KERNEL_END _end
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index d25f4f137c2a..5ca6a573a701 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -26,7 +26,7 @@
#define check_pgt_cache() do { } while (0)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#if CONFIG_PGTABLE_LEVELS > 2
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b46e54c2399b..c9530b5b5ca8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -98,6 +98,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+#define pte_valid_user(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
/*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
@@ -107,6 +109,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_accessible(mm, pte) \
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
+/*
+ * p??_access_permitted() is true for valid user mappings (subject to the
+ * write permission check) other than user execute-only which do not have the
+ * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
+ */
+#define pte_access_permitted(pte, write) \
+ (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+#define pmd_access_permitted(pmd, write) \
+ (pte_access_permitted(pmd_pte(pmd), (write)))
+#define pud_access_permitted(pud, write) \
+ (pte_access_permitted(pud_pte(pud), (write)))
+
static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
pte_val(pte) &= ~pgprot_val(prot);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 29adab8138c3..023cacb946c3 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -19,6 +19,10 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
+#define TASK_SIZE_64 (UL(1) << VA_BITS)
+
+#ifndef __ASSEMBLY__
+
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
@@ -37,6 +41,22 @@
#include <asm/ptrace.h>
#include <asm/types.h>
+/*
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
+ */
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 UL(0x100000000)
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif /* CONFIG_COMPAT */
+
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
+
#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000
@@ -85,6 +105,9 @@ struct thread_struct {
unsigned long tp2_value;
#endif
struct fpsimd_state fpsimd_state;
+ void *sve_state; /* SVE registers, if any */
+ unsigned int sve_vl; /* SVE vector length */
+ unsigned int sve_vl_onexec; /* SVE vl after next exec */
unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
@@ -194,4 +217,9 @@ static inline void spin_lock_prefetch(const void *ptr)
int cpu_enable_pan(void *__unused);
int cpu_enable_cache_maint_trap(void *__unused);
+/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
+#define SVE_SET_VL(arg) sve_set_current_vl(arg)
+#define SVE_GET_VL() sve_get_current_vl()
+
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 95ad7102b63c..fdb827c7832f 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -27,8 +27,6 @@
* instructions.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
@@ -139,176 +137,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
}
#define arch_spin_is_contended arch_spin_is_contended
-/*
- * Write lock implementation.
- *
- * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
- * exclusively held.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- __nops(1),
- /* LSE atomics */
- "1: mov %w0, wzr\n"
- "2: casa %w0, %w2, %1\n"
- " cbz %w0, 3f\n"
- " ldxr %w0, %1\n"
- " cbz %w0, 2b\n"
- " wfe\n"
- " b 1b\n"
- "3:")
- : "=&r" (tmp), "+Q" (rw->lock)
- : "r" (0x80000000)
- : "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: ldaxr %w0, %1\n"
- " cbnz %w0, 2f\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 1b\n"
- "2:",
- /* LSE atomics */
- " mov %w0, wzr\n"
- " casa %w0, %w2, %1\n"
- __nops(2))
- : "=&r" (tmp), "+Q" (rw->lock)
- : "r" (0x80000000)
- : "memory");
-
- return !tmp;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- " stlr wzr, %0",
- " swpl wzr, wzr, %0")
- : "=Q" (rw->lock) :: "memory");
-}
-
-/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
-/*
- * Read lock implementation.
- *
- * It exclusively loads the lock value, increments it and stores the new value
- * back if positive and the CPU still exclusively owns the location. If the
- * value is negative, the lock is already held.
- *
- * During unlocking there may be multiple active read locks but no write lock.
- *
- * The memory barriers are implicit with the load-acquire and store-release
- * instructions.
- *
- * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
- * and LSE implementations may exhibit different behaviour (although this
- * will have no effect on lockdep).
- */
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(
- " sevl\n"
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: wfe\n"
- "2: ldaxr %w0, %2\n"
- " add %w0, %w0, #1\n"
- " tbnz %w0, #31, 1b\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 2b\n"
- __nops(1),
- /* LSE atomics */
- "1: wfe\n"
- "2: ldxr %w0, %2\n"
- " adds %w1, %w0, #1\n"
- " tbnz %w1, #31, 1b\n"
- " casa %w0, %w1, %2\n"
- " sbc %w0, %w1, %w0\n"
- " cbnz %w0, 2b")
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "cc", "memory");
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- "1: ldxr %w0, %2\n"
- " sub %w0, %w0, #1\n"
- " stlxr %w1, %w0, %2\n"
- " cbnz %w1, 1b",
- /* LSE atomics */
- " movn %w0, #0\n"
- " staddl %w0, %2\n"
- __nops(2))
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- unsigned int tmp, tmp2;
-
- asm volatile(ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
- " mov %w1, #1\n"
- "1: ldaxr %w0, %2\n"
- " add %w0, %w0, #1\n"
- " tbnz %w0, #31, 2f\n"
- " stxr %w1, %w0, %2\n"
- " cbnz %w1, 1b\n"
- "2:",
- /* LSE atomics */
- " ldr %w0, %2\n"
- " adds %w1, %w0, #1\n"
- " tbnz %w1, #31, 1f\n"
- " casa %w0, %w1, %2\n"
- " sbc %w1, %w1, %w0\n"
- __nops(1)
- "1:")
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
- :
- : "cc", "memory");
-
- return !tmp2;
-}
-
-/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
+#include <asm/qrwlock.h>
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 55be59a35e3f..6b856012c51b 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -36,10 +36,6 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
-typedef struct {
- volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+#include <asm-generic/qrwlock_types.h>
#endif
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index f707fed5886f..08cc88574659 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -145,10 +145,14 @@
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
+#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
+#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
+#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
+
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
@@ -160,6 +164,8 @@
#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
+
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
@@ -172,6 +178,99 @@
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+/*** Statistical Profiling Extension ***/
+/* ID registers */
+#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
+#define SYS_PMSIDR_EL1_FE_SHIFT 0
+#define SYS_PMSIDR_EL1_FT_SHIFT 1
+#define SYS_PMSIDR_EL1_FL_SHIFT 2
+#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3
+#define SYS_PMSIDR_EL1_LDS_SHIFT 4
+#define SYS_PMSIDR_EL1_ERND_SHIFT 5
+#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL
+#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12
+#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL
+#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16
+#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL
+
+#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
+#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0
+#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU
+#define SYS_PMBIDR_EL1_P_SHIFT 4
+#define SYS_PMBIDR_EL1_F_SHIFT 5
+
+/* Sampling controls */
+#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
+#define SYS_PMSCR_EL1_E0SPE_SHIFT 0
+#define SYS_PMSCR_EL1_E1SPE_SHIFT 1
+#define SYS_PMSCR_EL1_CX_SHIFT 3
+#define SYS_PMSCR_EL1_PA_SHIFT 4
+#define SYS_PMSCR_EL1_TS_SHIFT 5
+#define SYS_PMSCR_EL1_PCT_SHIFT 6
+
+#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0)
+#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0
+#define SYS_PMSCR_EL2_E2SPE_SHIFT 1
+#define SYS_PMSCR_EL2_CX_SHIFT 3
+#define SYS_PMSCR_EL2_PA_SHIFT 4
+#define SYS_PMSCR_EL2_TS_SHIFT 5
+#define SYS_PMSCR_EL2_PCT_SHIFT 6
+
+#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2)
+
+#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3)
+#define SYS_PMSIRR_EL1_RND_SHIFT 0
+#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL
+
+/* Filtering controls */
+#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4)
+#define SYS_PMSFCR_EL1_FE_SHIFT 0
+#define SYS_PMSFCR_EL1_FT_SHIFT 1
+#define SYS_PMSFCR_EL1_FL_SHIFT 2
+#define SYS_PMSFCR_EL1_B_SHIFT 16
+#define SYS_PMSFCR_EL1_LD_SHIFT 17
+#define SYS_PMSFCR_EL1_ST_SHIFT 18
+
+#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5)
+#define SYS_PMSEVFR_EL1_RES0 0x0000ffff00ff0f55UL
+
+#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6)
+#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0
+
+/* Buffer controls */
+#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
+#define SYS_PMBLIMITR_EL1_E_SHIFT 0
+#define SYS_PMBLIMITR_EL1_FM_SHIFT 1
+#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL
+#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT)
+
+#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1)
+
+/* Buffer error reporting */
+#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3)
+#define SYS_PMBSR_EL1_COLL_SHIFT 16
+#define SYS_PMBSR_EL1_S_SHIFT 17
+#define SYS_PMBSR_EL1_EA_SHIFT 18
+#define SYS_PMBSR_EL1_DL_SHIFT 19
+#define SYS_PMBSR_EL1_EC_SHIFT 26
+#define SYS_PMBSR_EL1_EC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT)
+
+#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0
+#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0
+#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT)
+
+/*** End of Statistical Profiling Extension ***/
+
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
@@ -250,6 +349,8 @@
#define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7)
+#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
+
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
@@ -318,6 +419,10 @@
#define SCTLR_EL1_CP15BEN (1 << 5)
/* id_aa64isar0 */
+#define ID_AA64ISAR0_DP_SHIFT 44
+#define ID_AA64ISAR0_SM4_SHIFT 40
+#define ID_AA64ISAR0_SM3_SHIFT 36
+#define ID_AA64ISAR0_SHA3_SHIFT 32
#define ID_AA64ISAR0_RDM_SHIFT 28
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
#define ID_AA64ISAR0_CRC32_SHIFT 16
@@ -332,6 +437,7 @@
#define ID_AA64ISAR1_DPB_SHIFT 0
/* id_aa64pfr0 */
+#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
@@ -340,6 +446,7 @@
#define ID_AA64PFR0_EL1_SHIFT 4
#define ID_AA64PFR0_EL0_SHIFT 0
+#define ID_AA64PFR0_SVE 0x1
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
@@ -441,6 +548,20 @@
#endif
+/*
+ * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
+ * are reserved by the SVE architecture for future expansion of the LEN
+ * field, with compatible semantics.
+ */
+#define ZCR_ELx_LEN_SHIFT 0
+#define ZCR_ELx_LEN_SIZE 9
+#define ZCR_ELx_LEN_MASK 0x1ff
+
+#define CPACR_EL1_ZEN_EL1EN (1 << 16) /* enable EL1 access */
+#define CPACR_EL1_ZEN_EL0EN (1 << 17) /* enable EL0 access, if EL1EN set */
+#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+
+
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (1UL << 31)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index ddded6497a8a..eb431286bacd 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -63,6 +63,8 @@ struct thread_info {
void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
+void arch_release_task_struct(struct task_struct *tsk);
+
#endif
/*
@@ -92,6 +94,8 @@ void arch_setup_new_exec(void);
#define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
+#define TIF_SVE 23 /* Scalable Vector Extension in use */
+#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -105,6 +109,7 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_32BIT (1 << TIF_32BIT)
+#define _TIF_SVE (1 << TIF_SVE)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index b3202284568b..c4f2d50491eb 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -33,6 +33,14 @@ int pcibus_to_node(struct pci_bus *bus);
#endif /* CONFIG_NUMA */
+#include <linux/arch_topology.h>
+
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_scale_freq_capacity topology_get_freq_scale
+
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d131501c6222..1696f9de9359 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -34,9 +34,17 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
+void force_signal_inject(int signal, int code, struct pt_regs *regs,
+ unsigned long address);
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
+/*
+ * Move regs->pc to next instruction and do necessary setup before it
+ * is executed.
+ */
+void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size);
+
static inline int __in_irqentry_text(unsigned long ptr)
{
return ptr >= (unsigned long)&__irqentry_text_start &&
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index b3fdeee739ea..cda76fa8b9b2 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -37,5 +37,11 @@
#define HWCAP_FCMA (1 << 14)
#define HWCAP_LRCPC (1 << 15)
#define HWCAP_DCPOP (1 << 16)
+#define HWCAP_SHA3 (1 << 17)
+#define HWCAP_SM3 (1 << 18)
+#define HWCAP_SM4 (1 << 19)
+#define HWCAP_ASIMDDP (1 << 20)
+#define HWCAP_SHA512 (1 << 21)
+#define HWCAP_SVE (1 << 22)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 67d4c33974e8..98c4ce55d9c3 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <asm/hwcap.h>
+#include <asm/sigcontext.h>
/*
@@ -47,7 +48,6 @@
#define PSR_D_BIT 0x00000200
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
-#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
@@ -64,6 +64,8 @@
#ifndef __ASSEMBLY__
+#include <linux/prctl.h>
+
/*
* User structures for general purpose, floating point and debug registers.
*/
@@ -91,6 +93,141 @@ struct user_hwdebug_state {
} dbg_regs[16];
};
+/* SVE/FP/SIMD state (NT_ARM_SVE) */
+
+struct user_sve_header {
+ __u32 size; /* total meaningful regset content in bytes */
+ __u32 max_size; /* maxmium possible size for this thread */
+ __u16 vl; /* current vector length */
+ __u16 max_vl; /* maximum possible vector length */
+ __u16 flags;
+ __u16 __reserved;
+};
+
+/* Definitions for user_sve_header.flags: */
+#define SVE_PT_REGS_MASK (1 << 0)
+
+#define SVE_PT_REGS_FPSIMD 0
+#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
+
+/*
+ * Common SVE_PT_* flags:
+ * These must be kept in sync with prctl interface in <linux/ptrace.h>
+ */
+#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16)
+#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16)
+
+
+/*
+ * The remainder of the SVE state follows struct user_sve_header. The
+ * total size of the SVE state (including header) depends on the
+ * metadata in the header: SVE_PT_SIZE(vq, flags) gives the total size
+ * of the state in bytes, including the header.
+ *
+ * Refer to <asm/sigcontext.h> for details of how to pass the correct
+ * "vq" argument to these macros.
+ */
+
+/* Offset from the start of struct user_sve_header to the register data */
+#define SVE_PT_REGS_OFFSET \
+ ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+
+/*
+ * The register data content and layout depends on the value of the
+ * flags field.
+ */
+
+/*
+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case:
+ *
+ * The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type
+ * struct user_fpsimd_state. Additional data might be appended in the
+ * future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size.
+ * SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than
+ * sizeof(struct user_fpsimd_state).
+ */
+
+#define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET
+
+#define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state))
+
+/*
+ * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case:
+ *
+ * The payload starts at offset SVE_PT_SVE_OFFSET, and is of size
+ * SVE_PT_SVE_SIZE(vq, flags).
+ *
+ * Additional macros describe the contents and layout of the payload.
+ * For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to
+ * the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is
+ * the size in bytes:
+ *
+ * x type description
+ * - ---- -----------
+ * ZREGS \
+ * ZREG |
+ * PREGS | refer to <asm/sigcontext.h>
+ * PREG |
+ * FFR /
+ *
+ * FPSR uint32_t FPSR
+ * FPCR uint32_t FPCR
+ *
+ * Additional data might be appended in the future.
+ */
+
+#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq)
+#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
+#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq)
+#define SVE_PT_SVE_FPSR_SIZE sizeof(__u32)
+#define SVE_PT_SVE_FPCR_SIZE sizeof(__u32)
+
+#define __SVE_SIG_TO_PT(offset) \
+ ((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
+
+#define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET
+
+#define SVE_PT_SVE_ZREGS_OFFSET \
+ __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
+#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \
+ __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
+#define SVE_PT_SVE_ZREGS_SIZE(vq) \
+ (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
+
+#define SVE_PT_SVE_PREGS_OFFSET(vq) \
+ __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
+#define SVE_PT_SVE_PREG_OFFSET(vq, n) \
+ __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
+#define SVE_PT_SVE_PREGS_SIZE(vq) \
+ (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \
+ SVE_PT_SVE_PREGS_OFFSET(vq))
+
+#define SVE_PT_SVE_FFR_OFFSET(vq) \
+ __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
+
+#define SVE_PT_SVE_FPSR_OFFSET(vq) \
+ ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \
+ (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_PT_SVE_FPCR_OFFSET(vq) \
+ (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
+
+/*
+ * Any future extension appended after FPCR must be aligned to the next
+ * 128-bit boundary.
+ */
+
+#define SVE_PT_SVE_SIZE(vq, flags) \
+ ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \
+ - SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+
+#define SVE_PT_SIZE(vq, flags) \
+ (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
+ SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
+ : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI__ASM_PTRACE_H */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index f6cc3061b1ae..dca8f8b5168b 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -17,6 +17,8 @@
#ifndef _UAPI__ASM_SIGCONTEXT_H
#define _UAPI__ASM_SIGCONTEXT_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
/*
@@ -42,10 +44,11 @@ struct sigcontext {
*
* 0x210 fpsimd_context
* 0x10 esr_context
+ * 0x8a0 sve_context (vl <= 64) (optional)
* 0x20 extra_context (optional)
* 0x10 terminator (null _aarch64_ctx)
*
- * 0xdb0 (reserved for future allocation)
+ * 0x510 (reserved for future allocation)
*
* New records that can exceed this space need to be opt-in for userspace, so
* that an expanded signal frame is not generated unexpectedly. The mechanism
@@ -117,4 +120,119 @@ struct extra_context {
__u32 __reserved[3];
};
+#define SVE_MAGIC 0x53564501
+
+struct sve_context {
+ struct _aarch64_ctx head;
+ __u16 vl;
+ __u16 __reserved[3];
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * The SVE architecture leaves space for future expansion of the
+ * vector length beyond its initial architectural limit of 2048 bits
+ * (16 quadwords).
+ *
+ * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
+ * terminology.
+ */
+#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
+
+#define SVE_VQ_MIN 1
+#define SVE_VQ_MAX 512
+
+#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
+#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
+
+#define SVE_NUM_ZREGS 32
+#define SVE_NUM_PREGS 16
+
+#define sve_vl_valid(vl) \
+ ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
+#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
+#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
+
+/*
+ * If the SVE registers are currently live for the thread at signal delivery,
+ * sve_context.head.size >=
+ * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl))
+ * and the register data may be accessed using the SVE_SIG_*() macros.
+ *
+ * If sve_context.head.size <
+ * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)),
+ * the SVE registers were not live for the thread and no register data
+ * is included: in this case, the SVE_SIG_*() macros should not be
+ * used except for this check.
+ *
+ * The same convention applies when returning from a signal: a caller
+ * will need to remove or resize the sve_context block if it wants to
+ * make the SVE registers live when they were previously non-live or
+ * vice-versa. This may require the the caller to allocate fresh
+ * memory and/or move other context blocks in the signal frame.
+ *
+ * Changing the vector length during signal return is not permitted:
+ * sve_context.vl must equal the thread's current vector length when
+ * doing a sigreturn.
+ *
+ *
+ * Note: for all these macros, the "vq" argument denotes the SVE
+ * vector length in quadwords (i.e., units of 128 bits).
+ *
+ * The correct way to obtain vq is to use sve_vq_from_vl(vl). The
+ * result is valid if and only if sve_vl_valid(vl) is true. This is
+ * guaranteed for a struct sve_context written by the kernel.
+ *
+ *
+ * Additional macros describe the contents and layout of the payload.
+ * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to
+ * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the
+ * size in bytes:
+ *
+ * x type description
+ * - ---- -----------
+ * REGS the entire SVE context
+ *
+ * ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers
+ * ZREG __uint128_t[vq] individual Z-register Zn
+ *
+ * PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers
+ * PREG uint16_t[vq] individual P-register Pn
+ *
+ * FFR uint16_t[vq] first-fault status register
+ *
+ * Additional data might be appended in the future.
+ */
+
+#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq) * SVE_VQ_BYTES)
+#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8))
+#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
+
+#define SVE_SIG_REGS_OFFSET \
+ ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
+ / SVE_VQ_BYTES * SVE_VQ_BYTES)
+
+#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
+#define SVE_SIG_ZREG_OFFSET(vq, n) \
+ (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
+#define SVE_SIG_ZREGS_SIZE(vq) \
+ (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
+
+#define SVE_SIG_PREGS_OFFSET(vq) \
+ (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
+#define SVE_SIG_PREG_OFFSET(vq, n) \
+ (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
+#define SVE_SIG_PREGS_SIZE(vq) \
+ (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
+
+#define SVE_SIG_FFR_OFFSET(vq) \
+ (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
+
+#define SVE_SIG_REGS_SIZE(vq) \
+ (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
+
+#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
+
+
#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 0029e13adb59..8265dd790895 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -11,8 +11,6 @@ CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_return_address.o = -pg
-CFLAGS_setup.o = -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
# Object file lists.
arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index d06fbe4cd38d..c33b5e4010ab 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -228,15 +228,7 @@ ret:
return ret;
}
-static struct ctl_table ctl_abi[] = {
- {
- .procname = "abi",
- .mode = 0555,
- },
- { }
-};
-
-static void __init register_insn_emulation_sysctl(struct ctl_table *table)
+static void __init register_insn_emulation_sysctl(void)
{
unsigned long flags;
int i = 0;
@@ -262,8 +254,7 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
}
raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
- table->child = insns_sysctl;
- register_sysctl_table(table);
+ register_sysctl("abi", insns_sysctl);
}
/*
@@ -431,7 +422,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, 4);
return 0;
fault:
@@ -512,7 +503,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, 4);
return 0;
}
@@ -586,14 +577,14 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
static int a32_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 9) & 1);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, 4);
return rc;
}
static int t16_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 3) & 1);
- regs->pc += 2;
+ arm64_skip_faulting_instruction(regs, 2);
return rc;
}
@@ -644,7 +635,7 @@ static int __init armv8_deprecated_init(void)
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
"arm64/isndep:starting",
run_all_insn_set_hw_mode, NULL);
- register_insn_emulation_sysctl(ctl_abi);
+ register_insn_emulation_sysctl();
return 0;
}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 21e2c95d24e7..c5ba0097887f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -27,6 +27,7 @@
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
+#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
@@ -51,6 +52,21 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
+/*
+ * Flag to indicate if we have computed the system wide
+ * capabilities based on the boot time active CPUs. This
+ * will be used to determine if a new booting CPU should
+ * go through the verification process to make sure that it
+ * supports the system capabilities, without using a hotplug
+ * notifier.
+ */
+static bool sys_caps_initialised;
+
+static inline void set_sys_caps_initialised(void)
+{
+ sys_caps_initialised = true;
+}
+
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
{
/* file-wide pr_fmt adds "CPU features: " prefix */
@@ -107,7 +123,11 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
@@ -117,34 +137,35 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
/* Linux doesn't care about the EL3 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
/*
* Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs
@@ -155,20 +176,20 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -193,14 +214,14 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
};
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
ARM64_FTR_END,
};
@@ -221,8 +242,8 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
};
static const struct arm64_ftr_bits ftr_mvfr2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
ARM64_FTR_END,
};
@@ -234,25 +255,25 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
static const struct arm64_ftr_bits ftr_id_isar5[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
ARM64_FTR_END,
};
@@ -268,6 +289,12 @@ static const struct arm64_ftr_bits ftr_id_dfr0[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_zcr[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
+ ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
+ ARM64_FTR_END,
+};
+
/*
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
@@ -334,6 +361,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -348,6 +376,9 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
+ /* Op1 = 0, CRn = 1, CRm = 2 */
+ ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+
/* Op1 = 3, CRn = 0, CRm = 0 */
{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
@@ -485,6 +516,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
@@ -505,6 +537,10 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
}
+ if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+ init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
+ sve_init_vq_map();
+ }
}
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
@@ -608,6 +644,9 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
+ info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
+
/*
* If we have AArch32, we care about 32-bit features for compat.
* If the system doesn't support AArch32, don't update them.
@@ -655,6 +694,16 @@ void update_cpu_features(int cpu,
info->reg_mvfr2, boot->reg_mvfr2);
}
+ if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+ taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
+ info->reg_zcr, boot->reg_zcr);
+
+ /* Probe vector lengths, unless we already gave up on SVE */
+ if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
+ !sys_caps_initialised)
+ sve_update_vq_map();
+ }
+
/*
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
@@ -900,6 +949,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 1,
},
#endif
+#ifdef CONFIG_ARM64_SVE
+ {
+ .desc = "Scalable Vector Extension",
+ .capability = ARM64_SVE,
+ .def_scope = SCOPE_SYSTEM,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_SVE_SHIFT,
+ .min_field_value = ID_AA64PFR0_SVE,
+ .matches = has_cpuid_feature,
+ .enable = sve_kernel_enable,
+ },
+#endif /* CONFIG_ARM64_SVE */
{},
};
@@ -921,9 +983,14 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
@@ -932,6 +999,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
+#ifdef CONFIG_ARM64_SVE
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
+#endif
{},
};
@@ -1041,21 +1111,6 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
}
/*
- * Flag to indicate if we have computed the system wide
- * capabilities based on the boot time active CPUs. This
- * will be used to determine if a new booting CPU should
- * go through the verification process to make sure that it
- * supports the system capabilities, without using a hotplug
- * notifier.
- */
-static bool sys_caps_initialised;
-
-static inline void set_sys_caps_initialised(void)
-{
- sys_caps_initialised = true;
-}
-
-/*
* Check for CPU features that are used in early boot
* based on the Boot CPU value.
*/
@@ -1097,6 +1152,23 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
}
}
+static void verify_sve_features(void)
+{
+ u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
+ u64 zcr = read_zcr_features();
+
+ unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
+ unsigned int len = zcr & ZCR_ELx_LEN_MASK;
+
+ if (len < safe_len || sve_verify_vq_map()) {
+ pr_crit("CPU%d: SVE: required vector length(s) missing\n",
+ smp_processor_id());
+ cpu_die_early();
+ }
+
+ /* Add checks on other ZCR bits here if necessary */
+}
+
/*
* Run through the enabled system capabilities and enable() it on this CPU.
* The capabilities were decided based on the available CPUs at the boot time.
@@ -1110,8 +1182,12 @@ static void verify_local_cpu_capabilities(void)
verify_local_cpu_errata_workarounds();
verify_local_cpu_features(arm64_features);
verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
if (system_supports_32bit_el0())
verify_local_elf_hwcaps(compat_elf_hwcaps);
+
+ if (system_supports_sve())
+ verify_sve_features();
}
void check_local_cpu_capabilities(void)
@@ -1189,6 +1265,8 @@ void __init setup_cpu_features(void)
if (system_supports_32bit_el0())
setup_elf_hwcaps(compat_elf_hwcaps);
+ sve_setup();
+
/* Advertise that we have computed the system capabilities */
set_sys_caps_initialised();
@@ -1287,7 +1365,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
if (!rc) {
dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
pt_regs_write_reg(regs, dst, val);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
return rc;
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 311885962830..1e2554543506 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -19,6 +19,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
#include <linux/bitops.h>
#include <linux/bug.h>
@@ -69,6 +70,12 @@ static const char *const hwcap_str[] = {
"fcma",
"lrcpc",
"dcpop",
+ "sha3",
+ "sm3",
+ "sm4",
+ "asimddp",
+ "sha512",
+ "sve",
NULL
};
@@ -326,6 +333,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
/* Update the 32bit ID registers only if AArch32 is implemented */
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
@@ -348,6 +356,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
}
+ if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ id_aa64pfr0_sve(info->reg_id_aa64pfr0))
+ info->reg_zcr = read_zcr_features();
+
cpuinfo_detect_icache_policy(info);
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index c7ef99904934..a88b6ccebbb4 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -30,6 +30,7 @@
#include <asm/cpufeature.h>
#include <asm/cputype.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/system_misc.h>
@@ -46,9 +47,9 @@ u8 debug_monitors_arch(void)
static void mdscr_write(u32 mdscr)
{
unsigned long flags;
- local_dbg_save(flags);
+ flags = local_daif_save();
write_sysreg(mdscr, mdscr_el1);
- local_dbg_restore(flags);
+ local_daif_restore(flags);
}
NOKPROBE_SYMBOL(mdscr_write);
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 6a27cd6dbfa6..73f17bffcd23 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -41,3 +41,20 @@ ENTRY(fpsimd_load_state)
fpsimd_restore x0, 8
ret
ENDPROC(fpsimd_load_state)
+
+#ifdef CONFIG_ARM64_SVE
+ENTRY(sve_save_state)
+ sve_save 0, x1, 2
+ ret
+ENDPROC(sve_save_state)
+
+ENTRY(sve_load_state)
+ sve_load 0, x1, x2, 3
+ ret
+ENDPROC(sve_load_state)
+
+ENTRY(sve_get_vl)
+ _sve_rdvl 0, 1
+ ret
+ENDPROC(sve_get_vl)
+#endif /* CONFIG_ARM64_SVE */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index e1be42e11ff5..1175f5827ae1 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -108,13 +108,8 @@ ENTRY(_mcount)
mcount_get_lr x1 // function's lr (= parent's pc)
blr x2 // (*ftrace_trace_function)(pc, lr);
-#ifndef CONFIG_FUNCTION_GRAPH_TRACER
-skip_ftrace_call: // return;
- mcount_exit // }
-#else
- mcount_exit // return;
- // }
-skip_ftrace_call:
+skip_ftrace_call: // }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr_l x2, ftrace_graph_return
cmp x0, x2 // if ((ftrace_graph_return
b.ne ftrace_graph_caller // != ftrace_stub)
@@ -123,9 +118,8 @@ skip_ftrace_call:
adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
cmp x0, x2
b.ne ftrace_graph_caller // ftrace_graph_caller();
-
- mcount_exit
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+ mcount_exit
ENDPROC(_mcount)
#else /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e1c59d4008a8..6d14b8f29b5f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,7 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
-#include <asm/memory.h>
+#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
@@ -221,6 +221,8 @@ alternative_else_nop_endif
.macro kernel_exit, el
.if \el != 0
+ disable_daif
+
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
@@ -373,18 +375,18 @@ ENTRY(vectors)
kernel_ventry el1_sync // Synchronous EL1h
kernel_ventry el1_irq // IRQ EL1h
kernel_ventry el1_fiq_invalid // FIQ EL1h
- kernel_ventry el1_error_invalid // Error EL1h
+ kernel_ventry el1_error // Error EL1h
kernel_ventry el0_sync // Synchronous 64-bit EL0
kernel_ventry el0_irq // IRQ 64-bit EL0
kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
- kernel_ventry el0_error_invalid // Error 64-bit EL0
+ kernel_ventry el0_error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
kernel_ventry el0_irq_compat // IRQ 32-bit EL0
kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
+ kernel_ventry el0_error_compat // Error 32-bit EL0
#else
kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
@@ -453,10 +455,6 @@ ENDPROC(el0_error_invalid)
el0_fiq_invalid_compat:
inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat)
-
-el0_error_invalid_compat:
- inv_entry 0, BAD_ERROR, 32
-ENDPROC(el0_error_invalid_compat)
#endif
el1_sync_invalid:
@@ -508,24 +506,18 @@ el1_da:
* Data abort handling
*/
mrs x3, far_el1
- enable_dbg
- // re-enable interrupts if they were enabled in the aborted context
- tbnz x23, #7, 1f // PSR_I_BIT
- enable_irq
-1:
+ inherit_daif pstate=x23, tmp=x2
clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
- // disable interrupts before pulling preserved data off the stack
- disable_irq
kernel_exit 1
el1_sp_pc:
/*
* Stack or PC alignment exception handling
*/
mrs x0, far_el1
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x2, sp
bl do_sp_pc_abort
ASM_BUG()
@@ -533,7 +525,7 @@ el1_undef:
/*
* Undefined instruction
*/
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
bl do_undefinstr
ASM_BUG()
@@ -550,7 +542,7 @@ el1_dbg:
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
- enable_dbg
+ inherit_daif pstate=x23, tmp=x2
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
@@ -561,7 +553,7 @@ ENDPROC(el1_sync)
.align 6
el1_irq:
kernel_entry 1
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -607,6 +599,8 @@ el0_sync:
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
+ cmp x24, #ESR_ELx_EC_SVE // SVE access
+ b.eq el0_sve_acc
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
@@ -658,6 +652,7 @@ el0_svc_compat:
/*
* AArch32 syscall handling
*/
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, compat_sys_call_table // load compat syscall table pointer
mov wscno, w7 // syscall number in w7 (r7)
mov wsc_nr, #__NR_compat_syscalls
@@ -667,6 +662,10 @@ el0_svc_compat:
el0_irq_compat:
kernel_entry 0, 32
b el0_irq_naked
+
+el0_error_compat:
+ kernel_entry 0, 32
+ b el0_error_naked
#endif
el0_da:
@@ -674,8 +673,7 @@ el0_da:
* Data abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
clear_address_tag x0, x26
mov x1, x25
@@ -687,8 +685,7 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -699,17 +696,27 @@ el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
b ret_to_user
+el0_sve_acc:
+ /*
+ * Scalable Vector Extension access
+ */
+ enable_daif
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_sve_acc
+ b ret_to_user
el0_fpsimd_exc:
/*
- * Floating Point or Advanced SIMD exception
+ * Floating Point, Advanced SIMD or SVE exception
*/
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
@@ -720,8 +727,7 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -732,8 +738,7 @@ el0_undef:
/*
* Undefined instruction
*/
- // enable interrupts before calling the main handler
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, sp
bl do_undefinstr
@@ -742,7 +747,7 @@ el0_sys:
/*
* System instructions, for trapped cache maintenance instructions
*/
- enable_dbg_and_irq
+ enable_daif
ct_user_exit
mov x0, x25
mov x1, sp
@@ -757,11 +762,11 @@ el0_dbg:
mov x1, x25
mov x2, sp
bl do_debug_exception
- enable_dbg
+ enable_daif
ct_user_exit
b ret_to_user
el0_inv:
- enable_dbg
+ enable_daif
ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
@@ -774,7 +779,7 @@ ENDPROC(el0_sync)
el0_irq:
kernel_entry 0
el0_irq_naked:
- enable_dbg
+ enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -788,12 +793,34 @@ el0_irq_naked:
b ret_to_user
ENDPROC(el0_irq)
+el1_error:
+ kernel_entry 1
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ kernel_exit 1
+ENDPROC(el1_error)
+
+el0_error:
+ kernel_entry 0
+el0_error_naked:
+ mrs x1, esr_el1
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+ enable_daif
+ ct_user_exit
+ b ret_to_user
+ENDPROC(el0_error)
+
+
/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
*/
ret_fast_syscall:
- disable_irq // disable interrupts
+ disable_daif
str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
@@ -803,7 +830,7 @@ ret_fast_syscall:
enable_step_tsk x1, x2
kernel_exit 0
ret_fast_syscall_trace:
- enable_irq // enable interrupts
+ enable_daif
b __sys_trace_return_skipped // we already saved x0
/*
@@ -821,7 +848,7 @@ work_pending:
* "slow" syscall return path.
*/
ret_to_user:
- disable_irq // disable interrupts
+ disable_daif
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
@@ -835,16 +862,37 @@ ENDPROC(ret_to_user)
*/
.align 6
el0_svc:
+ ldr x16, [tsk, #TSK_TI_FLAGS] // load thread flags
adrp stbl, sys_call_table // load syscall table pointer
mov wscno, w8 // syscall number in w8
mov wsc_nr, #__NR_syscalls
+
+#ifdef CONFIG_ARM64_SVE
+alternative_if_not ARM64_SVE
+ b el0_svc_naked
+alternative_else_nop_endif
+ tbz x16, #TIF_SVE, el0_svc_naked // Skip unless TIF_SVE set:
+ bic x16, x16, #_TIF_SVE // discard SVE state
+ str x16, [tsk, #TSK_TI_FLAGS]
+
+ /*
+ * task_fpsimd_load() won't be called to update CPACR_EL1 in
+ * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
+ * happens if a context switch or kernel_neon_begin() or context
+ * modification (sigreturn, ptrace) intervenes.
+ * So, ensure that CPACR_EL1 is already correct for the fast-path case:
+ */
+ mrs x9, cpacr_el1
+ bic x9, x9, #CPACR_EL1_ZEN_EL0EN // disable SVE for el0
+ msr cpacr_el1, x9 // synchronised by eret to el0
+#endif
+
el0_svc_naked: // compat entry point
stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
- enable_dbg_and_irq
+ enable_daif
ct_user_exit 1
- ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
- tst x16, #_TIF_SYSCALL_WORK
+ tst x16, #_TIF_SYSCALL_WORK // check for syscall hooks
b.ne __sys_trace
cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 5d547deb6996..143b3e72c25e 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,19 +17,34 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bitmap.h>
#include <linux/bottom_half.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/compat.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/prctl.h>
#include <linux/preempt.h>
+#include <linux/prctl.h>
+#include <linux/ptrace.h>
#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
#include <asm/fpsimd.h>
#include <asm/cputype.h>
#include <asm/simd.h>
+#include <asm/sigcontext.h>
+#include <asm/sysreg.h>
+#include <asm/traps.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -39,6 +54,8 @@
#define FPEXC_IDF (1 << 7)
/*
+ * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
+ *
* In order to reduce the number of times the FPSIMD state is needlessly saved
* and restored, we need to keep track of two things:
* (a) for each task, we need to remember which CPU was the last one to have
@@ -99,10 +116,741 @@
*/
static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state);
+/* Default VL for tasks that don't set it explicitly: */
+static int sve_default_vl = -1;
+
+#ifdef CONFIG_ARM64_SVE
+
+/* Maximum supported vector length across all CPUs (initially poisoned) */
+int __ro_after_init sve_max_vl = -1;
+/* Set of available vector lengths, as vq_to_bit(vq): */
+static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+static void __percpu *efi_sve_state;
+
+#else /* ! CONFIG_ARM64_SVE */
+
+/* Dummy declaration for code that will be optimised out: */
+extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+extern void __percpu *efi_sve_state;
+
+#endif /* ! CONFIG_ARM64_SVE */
+
+/*
+ * Call __sve_free() directly only if you know task can't be scheduled
+ * or preempted.
+ */
+static void __sve_free(struct task_struct *task)
+{
+ kfree(task->thread.sve_state);
+ task->thread.sve_state = NULL;
+}
+
+static void sve_free(struct task_struct *task)
+{
+ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
+
+ __sve_free(task);
+}
+
+
+/* Offset of FFR in the SVE register dump */
+static size_t sve_ffr_offset(int vl)
+{
+ return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
+}
+
+static void *sve_pffr(struct task_struct *task)
+{
+ return (char *)task->thread.sve_state +
+ sve_ffr_offset(task->thread.sve_vl);
+}
+
+static void change_cpacr(u64 val, u64 mask)
+{
+ u64 cpacr = read_sysreg(CPACR_EL1);
+ u64 new = (cpacr & ~mask) | val;
+
+ if (new != cpacr)
+ write_sysreg(new, CPACR_EL1);
+}
+
+static void sve_user_disable(void)
+{
+ change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
+}
+
+static void sve_user_enable(void)
+{
+ change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
+}
+
+/*
+ * TIF_SVE controls whether a task can use SVE without trapping while
+ * in userspace, and also the way a task's FPSIMD/SVE state is stored
+ * in thread_struct.
+ *
+ * The kernel uses this flag to track whether a user task is actively
+ * using SVE, and therefore whether full SVE register state needs to
+ * be tracked. If not, the cheaper FPSIMD context handling code can
+ * be used instead of the more costly SVE equivalents.
+ *
+ * * TIF_SVE set:
+ *
+ * The task can execute SVE instructions while in userspace without
+ * trapping to the kernel.
+ *
+ * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
+ * corresponding Zn), P0-P15 and FFR are encoded in in
+ * task->thread.sve_state, formatted appropriately for vector
+ * length task->thread.sve_vl.
+ *
+ * task->thread.sve_state must point to a valid buffer at least
+ * sve_state_size(task) bytes in size.
+ *
+ * During any syscall, the kernel may optionally clear TIF_SVE and
+ * discard the vector state except for the FPSIMD subset.
+ *
+ * * TIF_SVE clear:
+ *
+ * An attempt by the user task to execute an SVE instruction causes
+ * do_sve_acc() to be called, which does some preparation and then
+ * sets TIF_SVE.
+ *
+ * When stored, FPSIMD registers V0-V31 are encoded in
+ * task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are
+ * logically zero but not stored anywhere; P0-P15 and FFR are not
+ * stored and have unspecified values from userspace's point of
+ * view. For hygiene purposes, the kernel zeroes them on next use,
+ * but userspace is discouraged from relying on this.
+ *
+ * task->thread.sve_state does not need to be non-NULL, valid or any
+ * particular size: it must not be dereferenced.
+ *
+ * * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of
+ * whether TIF_SVE is clear or set, since these are not vector length
+ * dependent.
+ */
+
+/*
+ * Update current's FPSIMD/SVE registers from thread_struct.
+ *
+ * This function should be called only when the FPSIMD/SVE state in
+ * thread_struct is known to be up to date, when preparing to enter
+ * userspace.
+ *
+ * Softirqs (and preemption) must be disabled.
+ */
+static void task_fpsimd_load(void)
+{
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ sve_load_state(sve_pffr(current),
+ &current->thread.fpsimd_state.fpsr,
+ sve_vq_from_vl(current->thread.sve_vl) - 1);
+ else
+ fpsimd_load_state(&current->thread.fpsimd_state);
+
+ if (system_supports_sve()) {
+ /* Toggle SVE trapping for userspace if needed */
+ if (test_thread_flag(TIF_SVE))
+ sve_user_enable();
+ else
+ sve_user_disable();
+
+ /* Serialised by exception return to user */
+ }
+}
+
+/*
+ * Ensure current's FPSIMD/SVE storage in thread_struct is up to date
+ * with respect to the CPU registers.
+ *
+ * Softirqs (and preemption) must be disabled.
+ */
+static void task_fpsimd_save(void)
+{
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+ if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
+ /*
+ * Can't save the user regs, so current would
+ * re-enter user with corrupt state.
+ * There's no way to recover, so kill it:
+ */
+ force_signal_inject(
+ SIGKILL, 0, current_pt_regs(), 0);
+ return;
+ }
+
+ sve_save_state(sve_pffr(current),
+ &current->thread.fpsimd_state.fpsr);
+ } else
+ fpsimd_save_state(&current->thread.fpsimd_state);
+ }
+}
+
+/*
+ * Helpers to translate bit indices in sve_vq_map to VQ values (and
+ * vice versa). This allows find_next_bit() to be used to find the
+ * _maximum_ VQ not exceeding a certain value.
+ */
+
+static unsigned int vq_to_bit(unsigned int vq)
+{
+ return SVE_VQ_MAX - vq;
+}
+
+static unsigned int bit_to_vq(unsigned int bit)
+{
+ if (WARN_ON(bit >= SVE_VQ_MAX))
+ bit = SVE_VQ_MAX - 1;
+
+ return SVE_VQ_MAX - bit;
+}
+
+/*
+ * All vector length selection from userspace comes through here.
+ * We're on a slow path, so some sanity-checks are included.
+ * If things go wrong there's a bug somewhere, but try to fall back to a
+ * safe choice.
+ */
+static unsigned int find_supported_vector_length(unsigned int vl)
+{
+ int bit;
+ int max_vl = sve_max_vl;
+
+ if (WARN_ON(!sve_vl_valid(vl)))
+ vl = SVE_VL_MIN;
+
+ if (WARN_ON(!sve_vl_valid(max_vl)))
+ max_vl = SVE_VL_MIN;
+
+ if (vl > max_vl)
+ vl = max_vl;
+
+ bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
+ vq_to_bit(sve_vq_from_vl(vl)));
+ return sve_vl_from_vq(bit_to_vq(bit));
+}
+
+#ifdef CONFIG_SYSCTL
+
+static int sve_proc_do_default_vl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ int vl = sve_default_vl;
+ struct ctl_table tmp_table = {
+ .data = &vl,
+ .maxlen = sizeof(vl),
+ };
+
+ ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
+ /* Writing -1 has the special meaning "set to max": */
+ if (vl == -1) {
+ /* Fail safe if sve_max_vl wasn't initialised */
+ if (WARN_ON(!sve_vl_valid(sve_max_vl)))
+ vl = SVE_VL_MIN;
+ else
+ vl = sve_max_vl;
+
+ goto chosen;
+ }
+
+ if (!sve_vl_valid(vl))
+ return -EINVAL;
+
+ vl = find_supported_vector_length(vl);
+chosen:
+ sve_default_vl = vl;
+ return 0;
+}
+
+static struct ctl_table sve_default_vl_table[] = {
+ {
+ .procname = "sve_default_vector_length",
+ .mode = 0644,
+ .proc_handler = sve_proc_do_default_vl,
+ },
+ { }
+};
+
+static int __init sve_sysctl_init(void)
+{
+ if (system_supports_sve())
+ if (!register_sysctl("abi", sve_default_vl_table))
+ return -EINVAL;
+
+ return 0;
+}
+
+#else /* ! CONFIG_SYSCTL */
+static int __init sve_sysctl_init(void) { return 0; }
+#endif /* ! CONFIG_SYSCTL */
+
+#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
+ (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
+
+/*
+ * Transfer the FPSIMD state in task->thread.fpsimd_state to
+ * task->thread.sve_state.
+ *
+ * Task can be a non-runnable task, or current. In the latter case,
+ * softirqs (and preemption) must be disabled.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.fpsimd_state must be up to date before calling this function.
+ */
+static void fpsimd_to_sve(struct task_struct *task)
+{
+ unsigned int vq;
+ void *sst = task->thread.sve_state;
+ struct fpsimd_state const *fst = &task->thread.fpsimd_state;
+ unsigned int i;
+
+ if (!system_supports_sve())
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+ for (i = 0; i < 32; ++i)
+ memcpy(ZREG(sst, vq, i), &fst->vregs[i],
+ sizeof(fst->vregs[i]));
+}
+
+/*
+ * Transfer the SVE state in task->thread.sve_state to
+ * task->thread.fpsimd_state.
+ *
+ * Task can be a non-runnable task, or current. In the latter case,
+ * softirqs (and preemption) must be disabled.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.sve_state must be up to date before calling this function.
+ */
+static void sve_to_fpsimd(struct task_struct *task)
+{
+ unsigned int vq;
+ void const *sst = task->thread.sve_state;
+ struct fpsimd_state *fst = &task->thread.fpsimd_state;
+ unsigned int i;
+
+ if (!system_supports_sve())
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+ for (i = 0; i < 32; ++i)
+ memcpy(&fst->vregs[i], ZREG(sst, vq, i),
+ sizeof(fst->vregs[i]));
+}
+
+#ifdef CONFIG_ARM64_SVE
+
+/*
+ * Return how many bytes of memory are required to store the full SVE
+ * state for task, given task's currently configured vector length.
+ */
+size_t sve_state_size(struct task_struct const *task)
+{
+ return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
+}
+
+/*
+ * Ensure that task->thread.sve_state is allocated and sufficiently large.
+ *
+ * This function should be used only in preparation for replacing
+ * task->thread.sve_state with new data. The memory is always zeroed
+ * here to prevent stale data from showing through: this is done in
+ * the interest of testability and predictability: except in the
+ * do_sve_acc() case, there is no ABI requirement to hide stale data
+ * written previously be task.
+ */
+void sve_alloc(struct task_struct *task)
+{
+ if (task->thread.sve_state) {
+ memset(task->thread.sve_state, 0, sve_state_size(current));
+ return;
+ }
+
+ /* This is a small allocation (maximum ~8KB) and Should Not Fail. */
+ task->thread.sve_state =
+ kzalloc(sve_state_size(task), GFP_KERNEL);
+
+ /*
+ * If future SVE revisions can have larger vectors though,
+ * this may cease to be true:
+ */
+ BUG_ON(!task->thread.sve_state);
+}
+
+
+/*
+ * Ensure that task->thread.sve_state is up to date with respect to
+ * the user task, irrespective of when SVE is in use or not.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void fpsimd_sync_to_sve(struct task_struct *task)
+{
+ if (!test_tsk_thread_flag(task, TIF_SVE))
+ fpsimd_to_sve(task);
+}
+
+/*
+ * Ensure that task->thread.fpsimd_state is up to date with respect to
+ * the user task, irrespective of whether SVE is in use or not.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void sve_sync_to_fpsimd(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_SVE))
+ sve_to_fpsimd(task);
+}
+
+/*
+ * Ensure that task->thread.sve_state is up to date with respect to
+ * the task->thread.fpsimd_state.
+ *
+ * This should only be called by ptrace to merge new FPSIMD register
+ * values into a task for which SVE is currently active.
+ * task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ * task->thread.fpsimd_state must already have been initialised with
+ * the new FPSIMD register values to be merged in.
+ */
+void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
+{
+ unsigned int vq;
+ void *sst = task->thread.sve_state;
+ struct fpsimd_state const *fst = &task->thread.fpsimd_state;
+ unsigned int i;
+
+ if (!test_tsk_thread_flag(task, TIF_SVE))
+ return;
+
+ vq = sve_vq_from_vl(task->thread.sve_vl);
+
+ memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
+
+ for (i = 0; i < 32; ++i)
+ memcpy(ZREG(sst, vq, i), &fst->vregs[i],
+ sizeof(fst->vregs[i]));
+}
+
+int sve_set_vector_length(struct task_struct *task,
+ unsigned long vl, unsigned long flags)
+{
+ if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
+ PR_SVE_SET_VL_ONEXEC))
+ return -EINVAL;
+
+ if (!sve_vl_valid(vl))
+ return -EINVAL;
+
+ /*
+ * Clamp to the maximum vector length that VL-agnostic SVE code can
+ * work with. A flag may be assigned in the future to allow setting
+ * of larger vector lengths without confusing older software.
+ */
+ if (vl > SVE_VL_ARCH_MAX)
+ vl = SVE_VL_ARCH_MAX;
+
+ vl = find_supported_vector_length(vl);
+
+ if (flags & (PR_SVE_VL_INHERIT |
+ PR_SVE_SET_VL_ONEXEC))
+ task->thread.sve_vl_onexec = vl;
+ else
+ /* Reset VL to system default on next exec: */
+ task->thread.sve_vl_onexec = 0;
+
+ /* Only actually set the VL if not deferred: */
+ if (flags & PR_SVE_SET_VL_ONEXEC)
+ goto out;
+
+ if (vl == task->thread.sve_vl)
+ goto out;
+
+ /*
+ * To ensure the FPSIMD bits of the SVE vector registers are preserved,
+ * write any live register state back to task_struct, and convert to a
+ * non-SVE thread.
+ */
+ if (task == current) {
+ local_bh_disable();
+
+ task_fpsimd_save();
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ }
+
+ fpsimd_flush_task_state(task);
+ if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
+ sve_to_fpsimd(task);
+
+ if (task == current)
+ local_bh_enable();
+
+ /*
+ * Force reallocation of task SVE state to the correct size
+ * on next use:
+ */
+ sve_free(task);
+
+ task->thread.sve_vl = vl;
+
+out:
+ if (flags & PR_SVE_VL_INHERIT)
+ set_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
+ else
+ clear_tsk_thread_flag(task, TIF_SVE_VL_INHERIT);
+
+ return 0;
+}
+
+/*
+ * Encode the current vector length and flags for return.
+ * This is only required for prctl(): ptrace has separate fields
+ *
+ * flags are as for sve_set_vector_length().
+ */
+static int sve_prctl_status(unsigned long flags)
+{
+ int ret;
+
+ if (flags & PR_SVE_SET_VL_ONEXEC)
+ ret = current->thread.sve_vl_onexec;
+ else
+ ret = current->thread.sve_vl;
+
+ if (test_thread_flag(TIF_SVE_VL_INHERIT))
+ ret |= PR_SVE_VL_INHERIT;
+
+ return ret;
+}
+
+/* PR_SVE_SET_VL */
+int sve_set_current_vl(unsigned long arg)
+{
+ unsigned long vl, flags;
+ int ret;
+
+ vl = arg & PR_SVE_VL_LEN_MASK;
+ flags = arg & ~vl;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ ret = sve_set_vector_length(current, vl, flags);
+ if (ret)
+ return ret;
+
+ return sve_prctl_status(flags);
+}
+
+/* PR_SVE_GET_VL */
+int sve_get_current_vl(void)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ return sve_prctl_status(0);
+}
+
+/*
+ * Bitmap for temporary storage of the per-CPU set of supported vector lengths
+ * during secondary boot.
+ */
+static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
+
+static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
+{
+ unsigned int vq, vl;
+ unsigned long zcr;
+
+ bitmap_zero(map, SVE_VQ_MAX);
+
+ zcr = ZCR_ELx_LEN_MASK;
+ zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
+
+ for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
+ write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
+ vl = sve_get_vl();
+ vq = sve_vq_from_vl(vl); /* skip intervening lengths */
+ set_bit(vq_to_bit(vq), map);
+ }
+}
+
+void __init sve_init_vq_map(void)
+{
+ sve_probe_vqs(sve_vq_map);
+}
+
+/*
+ * If we haven't committed to the set of supported VQs yet, filter out
+ * those not supported by the current CPU.
+ */
+void sve_update_vq_map(void)
+{
+ sve_probe_vqs(sve_secondary_vq_map);
+ bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
+}
+
+/* Check whether the current CPU supports all VQs in the committed set */
+int sve_verify_vq_map(void)
+{
+ int ret = 0;
+
+ sve_probe_vqs(sve_secondary_vq_map);
+ bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
+ SVE_VQ_MAX);
+ if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
+ pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
+ smp_processor_id());
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void __init sve_efi_setup(void)
+{
+ if (!IS_ENABLED(CONFIG_EFI))
+ return;
+
+ /*
+ * alloc_percpu() warns and prints a backtrace if this goes wrong.
+ * This is evidence of a crippled system and we are returning void,
+ * so no attempt is made to handle this situation here.
+ */
+ if (!sve_vl_valid(sve_max_vl))
+ goto fail;
+
+ efi_sve_state = __alloc_percpu(
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
+ if (!efi_sve_state)
+ goto fail;
+
+ return;
+
+fail:
+ panic("Cannot allocate percpu memory for EFI SVE save/restore");
+}
+
+/*
+ * Enable SVE for EL1.
+ * Intended for use by the cpufeatures code during CPU boot.
+ */
+int sve_kernel_enable(void *__always_unused p)
+{
+ write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
+ isb();
+
+ return 0;
+}
+
+void __init sve_setup(void)
+{
+ u64 zcr;
+
+ if (!system_supports_sve())
+ return;
+
+ /*
+ * The SVE architecture mandates support for 128-bit vectors,
+ * so sve_vq_map must have at least SVE_VQ_MIN set.
+ * If something went wrong, at least try to patch it up:
+ */
+ if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+ set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+
+ zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
+ sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
+
+ /*
+ * Sanity-check that the max VL we determined through CPU features
+ * corresponds properly to sve_vq_map. If not, do our best:
+ */
+ if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
+ sve_max_vl = find_supported_vector_length(sve_max_vl);
+
+ /*
+ * For the default VL, pick the maximum supported value <= 64.
+ * VL == 64 is guaranteed not to grow the signal frame.
+ */
+ sve_default_vl = find_supported_vector_length(64);
+
+ pr_info("SVE: maximum available vector length %u bytes per vector\n",
+ sve_max_vl);
+ pr_info("SVE: default vector length %u bytes per vector\n",
+ sve_default_vl);
+
+ sve_efi_setup();
+}
+
+/*
+ * Called from the put_task_struct() path, which cannot get here
+ * unless dead_task is really dead and not schedulable.
+ */
+void fpsimd_release_task(struct task_struct *dead_task)
+{
+ __sve_free(dead_task);
+}
+
+#endif /* CONFIG_ARM64_SVE */
+
+/*
+ * Trapped SVE access
+ *
+ * Storage is allocated for the full SVE state, the current FPSIMD
+ * register contents are migrated across, and TIF_SVE is set so that
+ * the SVE access trap will be disabled the next time this task
+ * reaches ret_to_user.
+ *
+ * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
+ * would have disabled the SVE access trap for userspace during
+ * ret_to_user, making an SVE access trap impossible in that case.
+ */
+asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
+{
+ /* Even if we chose not to use SVE, the hardware could still trap: */
+ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ return;
+ }
+
+ sve_alloc(current);
+
+ local_bh_disable();
+
+ task_fpsimd_save();
+ fpsimd_to_sve(current);
+
+ /* Force ret_to_user to reload the registers: */
+ fpsimd_flush_task_state(current);
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+
+ if (test_and_set_thread_flag(TIF_SVE))
+ WARN_ON(1); /* SVE access shouldn't have trapped */
+
+ local_bh_enable();
+}
+
/*
* Trapped FP/ASIMD access.
*/
-void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
+asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
/* TODO: implement lazy context saving/restoring */
WARN_ON(1);
@@ -111,7 +859,7 @@ void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
/*
* Raise a SIGFPE for the current process.
*/
-void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
+asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
siginfo_t info;
unsigned int si_code = 0;
@@ -144,8 +892,8 @@ void fpsimd_thread_switch(struct task_struct *next)
* the registers is in fact the most recent userland FPSIMD state of
* 'current'.
*/
- if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
+ if (current->mm)
+ task_fpsimd_save();
if (next->mm) {
/*
@@ -159,16 +907,16 @@ void fpsimd_thread_switch(struct task_struct *next)
if (__this_cpu_read(fpsimd_last_state) == st
&& st->cpu == smp_processor_id())
- clear_ti_thread_flag(task_thread_info(next),
- TIF_FOREIGN_FPSTATE);
+ clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
else
- set_ti_thread_flag(task_thread_info(next),
- TIF_FOREIGN_FPSTATE);
+ set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
}
}
void fpsimd_flush_thread(void)
{
+ int vl, supported_vl;
+
if (!system_supports_fpsimd())
return;
@@ -176,6 +924,42 @@ void fpsimd_flush_thread(void)
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
+
+ if (system_supports_sve()) {
+ clear_thread_flag(TIF_SVE);
+ sve_free(current);
+
+ /*
+ * Reset the task vector length as required.
+ * This is where we ensure that all user tasks have a valid
+ * vector length configured: no kernel task can become a user
+ * task without an exec and hence a call to this function.
+ * By the time the first call to this function is made, all
+ * early hardware probing is complete, so sve_default_vl
+ * should be valid.
+ * If a bug causes this to go wrong, we make some noise and
+ * try to fudge thread.sve_vl to a safe value here.
+ */
+ vl = current->thread.sve_vl_onexec ?
+ current->thread.sve_vl_onexec : sve_default_vl;
+
+ if (WARN_ON(!sve_vl_valid(vl)))
+ vl = SVE_VL_MIN;
+
+ supported_vl = find_supported_vector_length(vl);
+ if (WARN_ON(supported_vl != vl))
+ vl = supported_vl;
+
+ current->thread.sve_vl = vl;
+
+ /*
+ * If the task is not set to inherit, ensure that the vector
+ * length will be reset by a subsequent exec:
+ */
+ if (!test_thread_flag(TIF_SVE_VL_INHERIT))
+ current->thread.sve_vl_onexec = 0;
+ }
+
set_thread_flag(TIF_FOREIGN_FPSTATE);
local_bh_enable();
@@ -191,14 +975,23 @@ void fpsimd_preserve_current_state(void)
return;
local_bh_disable();
-
- if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
-
+ task_fpsimd_save();
local_bh_enable();
}
/*
+ * Like fpsimd_preserve_current_state(), but ensure that
+ * current->thread.fpsimd_state is updated so that it can be copied to
+ * the signal frame.
+ */
+void fpsimd_signal_preserve_current_state(void)
+{
+ fpsimd_preserve_current_state();
+ if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ sve_to_fpsimd(current);
+}
+
+/*
* Load the userland FPSIMD state of 'current' from memory, but only if the
* FPSIMD state already held in the registers is /not/ the most recent FPSIMD
* state of 'current'
@@ -213,7 +1006,7 @@ void fpsimd_restore_current_state(void)
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
- fpsimd_load_state(st);
+ task_fpsimd_load();
__this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id();
}
@@ -233,7 +1026,12 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
local_bh_disable();
- fpsimd_load_state(state);
+ if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+ current->thread.fpsimd_state = *state;
+ fpsimd_to_sve(current);
+ }
+ task_fpsimd_load();
+
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
@@ -252,6 +1050,33 @@ void fpsimd_flush_task_state(struct task_struct *t)
t->thread.fpsimd_state.cpu = NR_CPUS;
}
+static inline void fpsimd_flush_cpu_state(void)
+{
+ __this_cpu_write(fpsimd_last_state, NULL);
+}
+
+/*
+ * Invalidate any task SVE state currently held in this CPU's regs.
+ *
+ * This is used to prevent the kernel from trying to reuse SVE register data
+ * that is detroyed by KVM guest enter/exit. This function should go away when
+ * KVM SVE support is implemented. Don't use it for anything else.
+ */
+#ifdef CONFIG_ARM64_SVE
+void sve_flush_cpu_state(void)
+{
+ struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state);
+ struct task_struct *tsk;
+
+ if (!fpstate)
+ return;
+
+ tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state);
+ if (test_tsk_thread_flag(tsk, TIF_SVE))
+ fpsimd_flush_cpu_state();
+}
+#endif /* CONFIG_ARM64_SVE */
+
#ifdef CONFIG_KERNEL_MODE_NEON
DEFINE_PER_CPU(bool, kernel_neon_busy);
@@ -286,11 +1111,13 @@ void kernel_neon_begin(void)
__this_cpu_write(kernel_neon_busy, true);
/* Save unsaved task fpsimd state, if any: */
- if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
+ if (current->mm) {
+ task_fpsimd_save();
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ }
/* Invalidate any task state remaining in the fpsimd regs: */
- __this_cpu_write(fpsimd_last_state, NULL);
+ fpsimd_flush_cpu_state();
preempt_disable();
@@ -325,6 +1152,7 @@ EXPORT_SYMBOL(kernel_neon_end);
static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
+static DEFINE_PER_CPU(bool, efi_sve_state_used);
/*
* EFI runtime services support functions
@@ -350,10 +1178,24 @@ void __efi_fpsimd_begin(void)
WARN_ON(preemptible());
- if (may_use_simd())
+ if (may_use_simd()) {
kernel_neon_begin();
- else {
- fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
+ } else {
+ /*
+ * If !efi_sve_state, SVE can't be in use yet and doesn't need
+ * preserving:
+ */
+ if (system_supports_sve() && likely(efi_sve_state)) {
+ char *sve_state = this_cpu_ptr(efi_sve_state);
+
+ __this_cpu_write(efi_sve_state_used, true);
+
+ sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
+ &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
+ } else {
+ fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
+ }
+
__this_cpu_write(efi_fpsimd_state_used, true);
}
}
@@ -366,10 +1208,22 @@ void __efi_fpsimd_end(void)
if (!system_supports_fpsimd())
return;
- if (__this_cpu_xchg(efi_fpsimd_state_used, false))
- fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
- else
+ if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
kernel_neon_end();
+ } else {
+ if (system_supports_sve() &&
+ likely(__this_cpu_read(efi_sve_state_used))) {
+ char const *sve_state = this_cpu_ptr(efi_sve_state);
+
+ sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
+ &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
+ sve_vq_from_vl(sve_get_vl()) - 1);
+
+ __this_cpu_write(efi_sve_state_used, false);
+ } else {
+ fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
+ }
+ }
}
#endif /* CONFIG_EFI */
@@ -382,9 +1236,9 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
{
switch (cmd) {
case CPU_PM_ENTER:
- if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
- this_cpu_write(fpsimd_last_state, NULL);
+ if (current->mm)
+ task_fpsimd_save();
+ fpsimd_flush_cpu_state();
break;
case CPU_PM_EXIT:
if (current->mm)
@@ -442,6 +1296,6 @@ static int __init fpsimd_init(void)
if (!(elf_hwcap & HWCAP_ASIMD))
pr_notice("Advanced SIMD is not implemented\n");
- return 0;
+ return sve_sysctl_init();
}
core_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0b243ecaf7ac..67e86a0f57ac 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -480,14 +480,21 @@ set_hcr:
/* Statistical profiling */
ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
- cbz x0, 6f // Skip if SPE not present
- cbnz x2, 5f // VHE?
+ cbz x0, 7f // Skip if SPE not present
+ cbnz x2, 6f // VHE?
+ mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
+ and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
+ cbnz x4, 5f // then permit sampling of physical
+ mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
+ 1 << SYS_PMSCR_EL2_PA_SHIFT)
+ msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
+5:
mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
orr x3, x3, x1 // If we don't have VHE, then
- b 6f // use EL1&0 translation.
-5: // For VHE, use EL2 translation
+ b 7f // use EL1&0 translation.
+6: // For VHE, use EL2 translation
orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
-6:
+7:
msr mdcr_el2, x3 // Configure debug traps
/* Stage-2 translation */
@@ -517,8 +524,19 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
mov x0, #0x33ff
msr cptr_el2, x0 // Disable copro. traps to EL2
+ /* SVE register access */
+ mrs x1, id_aa64pfr0_el1
+ ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
+ cbz x1, 7f
+
+ bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
+ msr cptr_el2, x0 // Disable copro. traps to EL2
+ isb
+ mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
+ msr_s SYS_ZCR_EL2, x1 // length for EL1.
+
/* Hypervisor stub */
- adr_l x0, __hyp_stub_vectors
+7: adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0
/* spsr */
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 095d3c170f5d..3009b8b80f08 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -27,6 +27,7 @@
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/daifflags.h>
#include <asm/irqflags.h>
#include <asm/kexec.h>
#include <asm/memory.h>
@@ -285,7 +286,7 @@ int swsusp_arch_suspend(void)
return -EBUSY;
}
- local_dbg_save(flags);
+ flags = local_daif_save();
if (__cpu_suspend_enter(&state)) {
/* make the crash dump kernel image visible/saveable */
@@ -315,7 +316,7 @@ int swsusp_arch_suspend(void)
__cpu_suspend_exit();
}
- local_dbg_restore(flags);
+ local_daif_restore(flags);
return ret;
}
diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c
index 354be2a872ae..79b17384effa 100644
--- a/arch/arm64/kernel/io.c
+++ b/arch/arm64/kernel/io.c
@@ -25,8 +25,7 @@
*/
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
- while (count && (!IS_ALIGNED((unsigned long)from, 8) ||
- !IS_ALIGNED((unsigned long)to, 8))) {
+ while (count && !IS_ALIGNED((unsigned long)from, 8)) {
*(u8 *)to = __raw_readb(from);
from++;
to++;
@@ -54,23 +53,22 @@ EXPORT_SYMBOL(__memcpy_fromio);
*/
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
- while (count && (!IS_ALIGNED((unsigned long)to, 8) ||
- !IS_ALIGNED((unsigned long)from, 8))) {
- __raw_writeb(*(volatile u8 *)from, to);
+ while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+ __raw_writeb(*(u8 *)from, to);
from++;
to++;
count--;
}
while (count >= 8) {
- __raw_writeq(*(volatile u64 *)from, to);
+ __raw_writeq(*(u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
- __raw_writeb(*(volatile u8 *)from, to);
+ __raw_writeb(*(u8 *)from, to);
from++;
to++;
count--;
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 11121f608eb5..f76ea92dff91 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
#include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -195,8 +196,7 @@ void machine_kexec(struct kimage *kimage)
pr_info("Bye!\n");
- /* Disable all DAIF exceptions. */
- asm volatile ("msr daifset, #0xf" : : : "memory");
+ local_daif_mask();
/*
* cpu_soft_restart will shutdown the MMU, disable data caches, then
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 2dc0f8482210..b2adcce7bc18 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -49,6 +49,7 @@
#include <linux/notifier.h>
#include <trace/events/power.h>
#include <linux/percpu.h>
+#include <linux/thread_info.h>
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -170,6 +171,39 @@ void machine_restart(char *cmd)
while (1);
}
+static void print_pstate(struct pt_regs *regs)
+{
+ u64 pstate = regs->pstate;
+
+ if (compat_user_mode(regs)) {
+ printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
+ pstate,
+ pstate & COMPAT_PSR_N_BIT ? 'N' : 'n',
+ pstate & COMPAT_PSR_Z_BIT ? 'Z' : 'z',
+ pstate & COMPAT_PSR_C_BIT ? 'C' : 'c',
+ pstate & COMPAT_PSR_V_BIT ? 'V' : 'v',
+ pstate & COMPAT_PSR_Q_BIT ? 'Q' : 'q',
+ pstate & COMPAT_PSR_T_BIT ? "T32" : "A32",
+ pstate & COMPAT_PSR_E_BIT ? "BE" : "LE",
+ pstate & COMPAT_PSR_A_BIT ? 'A' : 'a',
+ pstate & COMPAT_PSR_I_BIT ? 'I' : 'i',
+ pstate & COMPAT_PSR_F_BIT ? 'F' : 'f');
+ } else {
+ printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
+ pstate,
+ pstate & PSR_N_BIT ? 'N' : 'n',
+ pstate & PSR_Z_BIT ? 'Z' : 'z',
+ pstate & PSR_C_BIT ? 'C' : 'c',
+ pstate & PSR_V_BIT ? 'V' : 'v',
+ pstate & PSR_D_BIT ? 'D' : 'd',
+ pstate & PSR_A_BIT ? 'A' : 'a',
+ pstate & PSR_I_BIT ? 'I' : 'i',
+ pstate & PSR_F_BIT ? 'F' : 'f',
+ pstate & PSR_PAN_BIT ? '+' : '-',
+ pstate & PSR_UAO_BIT ? '+' : '-');
+ }
+}
+
void __show_regs(struct pt_regs *regs)
{
int i, top_reg;
@@ -186,10 +220,9 @@ void __show_regs(struct pt_regs *regs)
}
show_regs_print_info(KERN_DEFAULT);
- print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", lr);
- printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, lr, regs->pstate);
+ print_pstate(regs);
+ print_symbol("pc : %s\n", regs->pc);
+ print_symbol("lr : %s\n", lr);
printk("sp : %016llx\n", sp);
i = top_reg;
@@ -241,11 +274,27 @@ void release_thread(struct task_struct *dead_task)
{
}
+void arch_release_task_struct(struct task_struct *tsk)
+{
+ fpsimd_release_task(tsk);
+}
+
+/*
+ * src and dst may temporarily have aliased sve_state after task_struct
+ * is copied. We cannot fix this properly here, because src may have
+ * live SVE state and dst's thread_info may not exist yet, so tweaking
+ * either src's or dst's TIF_SVE is not safe.
+ *
+ * The unaliasing is done in copy_thread() instead. This works because
+ * dst is not schedulable or traceable until both of these functions
+ * have been called.
+ */
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
if (current->mm)
fpsimd_preserve_current_state();
*dst = *src;
+
return 0;
}
@@ -258,6 +307,13 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
+ /*
+ * Unalias p->thread.sve_state (if any) from the parent task
+ * and disable discard SVE state for p:
+ */
+ clear_tsk_thread_flag(p, TIF_SVE);
+ p->thread.sve_state = NULL;
+
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 9cbb6123208f..7c44658b316d 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/init.h>
#include <linux/signal.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
@@ -40,6 +41,7 @@
#include <linux/elf.h>
#include <asm/compat.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/stacktrace.h>
@@ -618,17 +620,56 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
/*
* TODO: update fp accessors for lazy context switching (sync/flush hwstate)
*/
-static int fpr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+static int __fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf, unsigned int start_pos)
{
struct user_fpsimd_state *uregs;
+
+ sve_sync_to_fpsimd(target);
+
uregs = &target->thread.fpsimd_state.user_fpsimd;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
+ start_pos, start_pos + sizeof(*uregs));
+}
+
+static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
if (target == current)
fpsimd_preserve_current_state();
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
+ return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
+}
+
+static int __fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ unsigned int start_pos)
+{
+ int ret;
+ struct user_fpsimd_state newstate;
+
+ /*
+ * Ensure target->thread.fpsimd_state is up to date, so that a
+ * short copyin can't resurrect stale data.
+ */
+ sve_sync_to_fpsimd(target);
+
+ newstate = target->thread.fpsimd_state.user_fpsimd;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
+ start_pos, start_pos + sizeof(newstate));
+ if (ret)
+ return ret;
+
+ target->thread.fpsimd_state.user_fpsimd = newstate;
+
+ return ret;
}
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
@@ -636,15 +677,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_fpsimd_state newstate =
- target->thread.fpsimd_state.user_fpsimd;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
if (ret)
return ret;
- target->thread.fpsimd_state.user_fpsimd = newstate;
+ sve_sync_from_fpsimd_zeropad(target);
fpsimd_flush_task_state(target);
+
return ret;
}
@@ -702,6 +742,215 @@ static int system_call_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_ARM64_SVE
+
+static void sve_init_header_from_task(struct user_sve_header *header,
+ struct task_struct *target)
+{
+ unsigned int vq;
+
+ memset(header, 0, sizeof(*header));
+
+ header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
+ SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
+ if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+
+ header->vl = target->thread.sve_vl;
+ vq = sve_vq_from_vl(header->vl);
+
+ header->max_vl = sve_max_vl;
+ if (WARN_ON(!sve_vl_valid(sve_max_vl)))
+ header->max_vl = header->vl;
+
+ header->size = SVE_PT_SIZE(vq, header->flags);
+ header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
+ SVE_PT_REGS_SVE);
+}
+
+static unsigned int sve_size_from_header(struct user_sve_header const *header)
+{
+ return ALIGN(header->size, SVE_VQ_BYTES);
+}
+
+static unsigned int sve_get_size(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ struct user_sve_header header;
+
+ if (!system_supports_sve())
+ return 0;
+
+ sve_init_header_from_task(&header, target);
+ return sve_size_from_header(&header);
+}
+
+static int sve_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+ struct user_sve_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Header */
+ sve_init_header_from_task(&header, target);
+ vq = sve_vq_from_vl(header.vl);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
+ 0, sizeof(header));
+ if (ret)
+ return ret;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ /* Registers: FPSIMD-only case */
+
+ BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
+ if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
+ return __fpr_get(target, regset, pos, count, kbuf, ubuf,
+ SVE_PT_FPSIMD_OFFSET);
+
+ /* Otherwise: full SVE case */
+
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ target->thread.sve_state,
+ start, end);
+ if (ret)
+ return ret;
+
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ start, end);
+ if (ret)
+ return ret;
+
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpsimd_state.fpsr,
+ start, end);
+ if (ret)
+ return ret;
+
+ start = end;
+ end = sve_size_from_header(&header);
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ start, end);
+}
+
+static int sve_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_sve_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Header */
+ if (count < sizeof(header))
+ return -EINVAL;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
+ 0, sizeof(header));
+ if (ret)
+ goto out;
+
+ /*
+ * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
+ * sve_set_vector_length(), which will also validate them for us:
+ */
+ ret = sve_set_vector_length(target, header.vl,
+ ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
+ if (ret)
+ goto out;
+
+ /* Actual VL set may be less than the user asked for: */
+ vq = sve_vq_from_vl(target->thread.sve_vl);
+
+ /* Registers: FPSIMD-only case */
+
+ BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
+ if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
+ ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
+ SVE_PT_FPSIMD_OFFSET);
+ clear_tsk_thread_flag(target, TIF_SVE);
+ goto out;
+ }
+
+ /* Otherwise: full SVE case */
+
+ /*
+ * If setting a different VL from the requested VL and there is
+ * register data, the data layout will be wrong: don't even
+ * try to set the registers in this case.
+ */
+ if (count && vq != sve_vq_from_vl(header.vl)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ sve_alloc(target);
+
+ /*
+ * Ensure target->thread.sve_state is up to date with target's
+ * FPSIMD regs, so that a short copyin leaves trailing registers
+ * unmodified.
+ */
+ fpsimd_sync_to_sve(target);
+ set_tsk_thread_flag(target, TIF_SVE);
+
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.sve_state,
+ start, end);
+ if (ret)
+ goto out;
+
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ start, end);
+ if (ret)
+ goto out;
+
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpsimd_state.fpsr,
+ start, end);
+
+out:
+ fpsimd_flush_task_state(target);
+ return ret;
+}
+
+#endif /* CONFIG_ARM64_SVE */
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -711,6 +960,9 @@ enum aarch64_regset {
REGSET_HW_WATCH,
#endif
REGSET_SYSTEM_CALL,
+#ifdef CONFIG_ARM64_SVE
+ REGSET_SVE,
+#endif
};
static const struct user_regset aarch64_regsets[] = {
@@ -768,6 +1020,18 @@ static const struct user_regset aarch64_regsets[] = {
.get = system_call_get,
.set = system_call_set,
},
+#ifdef CONFIG_ARM64_SVE
+ [REGSET_SVE] = { /* Scalable Vector Extension */
+ .core_note_type = NT_ARM_SVE,
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
+ SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+ .get = sve_get,
+ .set = sve_set,
+ .get_size = sve_get_size,
+ },
+#endif
};
static const struct user_regset_view user_aarch64_view = {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index d4b740538ad5..30ad2f085d1f 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -23,7 +23,6 @@
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/delay.h>
-#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/cache.h>
@@ -48,6 +47,7 @@
#include <asm/fixmap.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/daifflags.h>
#include <asm/elf.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -103,7 +103,8 @@ void __init smp_setup_processor_id(void)
* access percpu variable inside lock_release
*/
set_my_cpu_offset(0);
- pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
+ pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
+ (unsigned long)mpidr, read_cpuid_id());
}
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
@@ -244,9 +245,6 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
{
- pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id());
-
- sprintf(init_utsname()->machine, UTS_MACHINE);
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
@@ -262,10 +260,11 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
/*
- * Unmask asynchronous aborts after bringing up possible earlycon.
- * (Report possible System Errors once we can report this occurred)
+ * Unmask asynchronous aborts and fiq after bringing up possible
+ * earlycon. (Report possible System Errors once we can report this
+ * occurred).
*/
- local_async_enable();
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 0bdc96c61bc0..b120111a46be 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -31,6 +31,7 @@
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -63,6 +64,7 @@ struct rt_sigframe_user_layout {
unsigned long fpsimd_offset;
unsigned long esr_offset;
+ unsigned long sve_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
@@ -179,9 +181,6 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
int err;
- /* dump the hardware registers to the fpsimd_state structure */
- fpsimd_preserve_current_state();
-
/* copy the FP and status/control registers */
err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
@@ -214,6 +213,8 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
__get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
+ clear_thread_flag(TIF_SVE);
+
/* load the hardware registers from the fpsimd_state structure */
if (!err)
fpsimd_update_current_state(&fpsimd);
@@ -221,10 +222,118 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
return err ? -EFAULT : 0;
}
+
struct user_ctxs {
struct fpsimd_context __user *fpsimd;
+ struct sve_context __user *sve;
};
+#ifdef CONFIG_ARM64_SVE
+
+static int preserve_sve_context(struct sve_context __user *ctx)
+{
+ int err = 0;
+ u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ unsigned int vl = current->thread.sve_vl;
+ unsigned int vq = 0;
+
+ if (test_thread_flag(TIF_SVE))
+ vq = sve_vq_from_vl(vl);
+
+ memset(reserved, 0, sizeof(reserved));
+
+ __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
+ __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
+ &ctx->head.size, err);
+ __put_user_error(vl, &ctx->vl, err);
+ BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
+ err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
+
+ if (vq) {
+ /*
+ * This assumes that the SVE state has already been saved to
+ * the task struct by calling preserve_fpsimd_context().
+ */
+ err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
+ current->thread.sve_state,
+ SVE_SIG_REGS_SIZE(vq));
+ }
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_sve_fpsimd_context(struct user_ctxs *user)
+{
+ int err;
+ unsigned int vq;
+ struct fpsimd_state fpsimd;
+ struct sve_context sve;
+
+ if (__copy_from_user(&sve, user->sve, sizeof(sve)))
+ return -EFAULT;
+
+ if (sve.vl != current->thread.sve_vl)
+ return -EINVAL;
+
+ if (sve.head.size <= sizeof(*user->sve)) {
+ clear_thread_flag(TIF_SVE);
+ goto fpsimd_only;
+ }
+
+ vq = sve_vq_from_vl(sve.vl);
+
+ if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
+ return -EINVAL;
+
+ /*
+ * Careful: we are about __copy_from_user() directly into
+ * thread.sve_state with preemption enabled, so protection is
+ * needed to prevent a racing context switch from writing stale
+ * registers back over the new data.
+ */
+
+ fpsimd_flush_task_state(current);
+ barrier();
+ /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
+
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+ barrier();
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
+ sve_alloc(current);
+ err = __copy_from_user(current->thread.sve_state,
+ (char __user const *)user->sve +
+ SVE_SIG_REGS_OFFSET,
+ SVE_SIG_REGS_SIZE(vq));
+ if (err)
+ return -EFAULT;
+
+ set_thread_flag(TIF_SVE);
+
+fpsimd_only:
+ /* copy the FP and status/control registers */
+ /* restore_sigframe() already checked that user->fpsimd != NULL. */
+ err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
+ sizeof(fpsimd.vregs));
+ __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
+ __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
+
+ /* load the hardware registers from the fpsimd_state structure */
+ if (!err)
+ fpsimd_update_current_state(&fpsimd);
+
+ return err ? -EFAULT : 0;
+}
+
+#else /* ! CONFIG_ARM64_SVE */
+
+/* Turn any non-optimised out attempts to use these into a link error: */
+extern int preserve_sve_context(void __user *ctx);
+extern int restore_sve_fpsimd_context(struct user_ctxs *user);
+
+#endif /* ! CONFIG_ARM64_SVE */
+
+
static int parse_user_sigframe(struct user_ctxs *user,
struct rt_sigframe __user *sf)
{
@@ -237,6 +346,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
char const __user *const sfp = (char const __user *)sf;
user->fpsimd = NULL;
+ user->sve = NULL;
if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid;
@@ -287,6 +397,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
/* ignore */
break;
+ case SVE_MAGIC:
+ if (!system_supports_sve())
+ goto invalid;
+
+ if (user->sve)
+ goto invalid;
+
+ if (size < sizeof(*user->sve))
+ goto invalid;
+
+ user->sve = (struct sve_context __user *)head;
+ break;
+
case EXTRA_MAGIC:
if (have_extra_context)
goto invalid;
@@ -343,6 +466,10 @@ static int parse_user_sigframe(struct user_ctxs *user,
*/
offset = 0;
limit = extra_size;
+
+ if (!access_ok(VERIFY_READ, base, limit))
+ goto invalid;
+
continue;
default:
@@ -359,9 +486,6 @@ static int parse_user_sigframe(struct user_ctxs *user,
}
done:
- if (!user->fpsimd)
- goto invalid;
-
return 0;
invalid:
@@ -395,8 +519,19 @@ static int restore_sigframe(struct pt_regs *regs,
if (err == 0)
err = parse_user_sigframe(&user, sf);
- if (err == 0)
- err = restore_fpsimd_context(user.fpsimd);
+ if (err == 0) {
+ if (!user.fpsimd)
+ return -EINVAL;
+
+ if (user.sve) {
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ err = restore_sve_fpsimd_context(&user);
+ } else {
+ err = restore_fpsimd_context(user.fpsimd);
+ }
+ }
return err;
}
@@ -455,6 +590,18 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
return err;
}
+ if (system_supports_sve()) {
+ unsigned int vq = 0;
+
+ if (test_thread_flag(TIF_SVE))
+ vq = sve_vq_from_vl(current->thread.sve_vl);
+
+ err = sigframe_alloc(user, &user->sve_offset,
+ SVE_SIG_CONTEXT_SIZE(vq));
+ if (err)
+ return err;
+ }
+
return sigframe_alloc_end(user);
}
@@ -496,6 +643,13 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
}
+ /* Scalable Vector Extension state, if present */
+ if (system_supports_sve() && err == 0 && user->sve_offset) {
+ struct sve_context __user *sve_ctx =
+ apply_user_offset(user, user->sve_offset);
+ err |= preserve_sve_context(sve_ctx);
+ }
+
if (err == 0 && user->extra_offset) {
char __user *sfp = (char __user *)user->sigframe;
char __user *userp =
@@ -595,6 +749,8 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct rt_sigframe __user *frame;
int err = 0;
+ fpsimd_signal_preserve_current_state();
+
if (get_sigframe(&user, ksig, regs))
return 1;
@@ -756,9 +912,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
addr_limit_user_check();
if (thread_flags & _TIF_NEED_RESCHED) {
+ /* Unmask Debug and SError for the next task */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
schedule();
} else {
- local_irq_enable();
+ local_daif_restore(DAIF_PROCCTX);
if (thread_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
@@ -775,7 +934,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
fpsimd_restore_current_state();
}
- local_irq_disable();
+ local_daif_mask();
thread_flags = READ_ONCE(current_thread_info()->flags);
} while (thread_flags & _TIF_WORK_MASK);
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e09bf5d15606..22711ee8e36c 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -239,7 +239,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
* Note that this also saves V16-31, which aren't visible
* in AArch32.
*/
- fpsimd_preserve_current_state();
+ fpsimd_signal_preserve_current_state();
/* Place structure header on the stack */
__put_user_error(magic, &frame->magic, err);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 9f7195a5773e..551eb07c53b6 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -47,6 +47,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
+#include <asm/daifflags.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
@@ -216,6 +217,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
*/
asmlinkage void secondary_start_kernel(void)
{
+ u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
unsigned int cpu;
@@ -265,14 +267,14 @@ asmlinkage void secondary_start_kernel(void)
* the CPU migration code to notice that the CPU is online
* before we continue.
*/
- pr_info("CPU%u: Booted secondary processor [%08x]\n",
- cpu, read_cpuid_id());
+ pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
+ cpu, (unsigned long)mpidr,
+ read_cpuid_id());
update_cpu_boot_status(CPU_BOOT_SUCCESS);
set_cpu_online(cpu, true);
complete(&cpu_running);
- local_irq_enable();
- local_async_enable();
+ local_daif_restore(DAIF_PROCCTX);
/*
* OK, it's off to the idle thread for us
@@ -368,10 +370,6 @@ void __cpu_die(unsigned int cpu)
/*
* Called from the idle thread for the CPU which has been shutdown.
*
- * Note that we disable IRQs here, but do not re-enable them
- * before returning to the caller. This is also the behaviour
- * of the other hotplug-cpu capable cores, so presumably coming
- * out of idle fixes this.
*/
void cpu_die(void)
{
@@ -379,7 +377,7 @@ void cpu_die(void)
idle_task_exit();
- local_irq_disable();
+ local_daif_mask();
/* Tell __cpu_die() that this CPU is now safe to dispose of */
(void)cpu_report_death();
@@ -837,7 +835,7 @@ static void ipi_cpu_stop(unsigned int cpu)
{
set_cpu_online(cpu, false);
- local_irq_disable();
+ local_daif_mask();
while (1)
cpu_relax();
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 77cd655e6eb7..3fe5ad884418 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -5,6 +5,7 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
#include <asm/pgtable.h>
@@ -12,7 +13,6 @@
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
-#include <asm/tlbflush.h>
/*
* This is allocated by cpu_suspend_init(), and used to store a pointer to
@@ -58,7 +58,7 @@ void notrace __cpu_suspend_exit(void)
/*
* Restore HW breakpoint registers to sane values
* before debug exceptions are possibly reenabled
- * through local_dbg_restore.
+ * by cpu_suspend()s local_daif_restore() call.
*/
if (hw_breakpoint_restore)
hw_breakpoint_restore(cpu);
@@ -82,7 +82,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* updates to mdscr register (saved and restored along with
* general purpose registers) from kernel debuggers.
*/
- local_dbg_save(flags);
+ flags = local_daif_save();
/*
* Function graph tracer state gets incosistent when the kernel
@@ -115,7 +115,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* restored, so from this point onwards, debugging is fully
* renabled if it was enabled when core started shutdown.
*/
- local_dbg_restore(flags);
+ local_daif_restore(flags);
return ret;
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 8383af15a759..3d3588fcd1c7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/atomic.h>
#include <asm/bug.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/insn.h>
@@ -58,55 +59,9 @@ static const char *handler[]= {
int show_unhandled_signals = 1;
-/*
- * Dump out the contents of some kernel memory nicely...
- */
-static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top)
-{
- unsigned long first;
- mm_segment_t fs;
- int i;
-
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top);
-
- for (first = bottom & ~31; first < top; first += 32) {
- unsigned long p;
- char str[sizeof(" 12345678") * 8 + 1];
-
- memset(str, ' ', sizeof(str));
- str[sizeof(str) - 1] = '\0';
-
- for (p = first, i = 0; i < (32 / 8)
- && p < top; i++, p += 8) {
- if (p >= bottom && p < top) {
- unsigned long val;
-
- if (__get_user(val, (unsigned long *)p) == 0)
- sprintf(str + i * 17, " %016lx", val);
- else
- sprintf(str + i * 17, " ????????????????");
- }
- }
- printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
- }
-
- set_fs(fs);
-}
-
static void dump_backtrace_entry(unsigned long where)
{
- /*
- * Note that 'where' can have a physical address, but it's not handled.
- */
- print_ip_sym(where);
+ printk(" %pS\n", (void *)where);
}
static void __dump_instr(const char *lvl, struct pt_regs *regs)
@@ -171,10 +126,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
skip = !!regs;
printk("Call trace:\n");
- while (1) {
- unsigned long stack;
- int ret;
-
+ do {
/* skip until specified stack frame */
if (!skip) {
dump_backtrace_entry(frame.pc);
@@ -189,17 +141,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
*/
dump_backtrace_entry(regs->pc);
}
- ret = unwind_frame(tsk, &frame);
- if (ret < 0)
- break;
- if (in_entry_text(frame.pc)) {
- stack = frame.fp - offsetof(struct pt_regs, stackframe);
-
- if (on_accessible_stack(tsk, stack))
- dump_mem("", "Exception stack", stack,
- stack + sizeof(struct pt_regs));
- }
- }
+ } while (!unwind_frame(tsk, &frame));
put_task_stack(tsk);
}
@@ -293,6 +235,17 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
}
}
+void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
+{
+ regs->pc += size;
+
+ /*
+ * If we were single stepping, we want to get the step exception after
+ * we return from the trap.
+ */
+ user_fastforward_single_step(current);
+}
+
static LIST_HEAD(undef_hook);
static DEFINE_RAW_SPINLOCK(undef_lock);
@@ -358,8 +311,8 @@ exit:
return fn ? fn(regs, instr) : 1;
}
-static void force_signal_inject(int signal, int code, struct pt_regs *regs,
- unsigned long address)
+void force_signal_inject(int signal, int code, struct pt_regs *regs,
+ unsigned long address)
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
@@ -373,7 +326,7 @@ static void force_signal_inject(int signal, int code, struct pt_regs *regs,
desc = "illegal memory access";
break;
default:
- desc = "bad mode";
+ desc = "unknown or unrecoverable error";
break;
}
@@ -480,7 +433,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
if (ret)
arm64_notify_segfault(regs, address);
else
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
@@ -490,7 +443,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
pt_regs_write_reg(regs, rt, val);
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
@@ -498,7 +451,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
@@ -506,7 +459,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
- regs->pc += 4;
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
struct sys64_hook {
@@ -603,6 +556,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_HVC64] = "HVC (AArch64)",
[ESR_ELx_EC_SMC64] = "SMC (AArch64)",
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
+ [ESR_ELx_EC_SVE] = "SVE",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -642,7 +596,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
esr_get_class_string(esr));
die("Oops - bad mode", regs, 0);
- local_irq_disable();
+ local_daif_mask();
panic("bad mode");
}
@@ -708,6 +662,19 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
}
#endif
+asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+{
+ nmi_enter();
+
+ console_verbose();
+
+ pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
+ smp_processor_id(), esr, esr_get_class_string(esr));
+ __show_regs(regs);
+
+ panic("Asynchronous SError Interrupt");
+}
+
void __pte_error(const char *file, int line, unsigned long val)
{
pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
@@ -761,7 +728,7 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
/* If thread survives, skip over the BUG instruction and continue: */
- regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
return DBG_HOOK_HANDLED;
}
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index 76320e920965..c39872a7b03c 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -309,7 +309,7 @@ ENTRY(__kernel_clock_getres)
b.ne 4f
ldr x2, 6f
2:
- cbz w1, 3f
+ cbz x1, 3f
stp xzr, x2, [x1]
3: /* res == NULL. */
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 7debb74843a0..b71247995469 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -147,6 +147,13 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* Until SVE is supported for guests: */
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -160,6 +167,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_HVC64] = handle_hvc,
[ESR_ELx_EC_SMC64] = handle_smc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
+ [ESR_ELx_EC_SVE] = handle_sve,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index f5154ed3da6c..321c9c05dd9e 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -65,16 +65,6 @@
default: write_debug(ptr[0], reg, 0); \
}
-#define PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
-
-#define PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
-#define PMBLIMITR_EL1_E BIT(0)
-
-#define PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
-#define PMBIDR_EL1_P BIT(4)
-
-#define psb_csync() asm volatile("hint #17")
-
static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
{
/* The vcpu can run. but it can't hide. */
@@ -90,18 +80,18 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
return;
/* Yes; is it owned by EL3? */
- reg = read_sysreg_s(PMBIDR_EL1);
- if (reg & PMBIDR_EL1_P)
+ reg = read_sysreg_s(SYS_PMBIDR_EL1);
+ if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
return;
/* No; is the host actually using the thing? */
- reg = read_sysreg_s(PMBLIMITR_EL1);
- if (!(reg & PMBLIMITR_EL1_E))
+ reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
+ if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
return;
/* Yes; save the control register and disable data generation */
- *pmscr_el1 = read_sysreg_s(PMSCR_EL1);
- write_sysreg_s(0, PMSCR_EL1);
+ *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
+ write_sysreg_s(0, SYS_PMSCR_EL1);
isb();
/* Now drain all buffered data to memory */
@@ -122,7 +112,7 @@ static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
- write_sysreg_s(pmscr_el1, PMSCR_EL1);
+ write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}
void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 945e79c641c4..951f3ebaff26 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -48,7 +48,7 @@ static void __hyp_text __activate_traps_vhe(void)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
- val &= ~CPACR_EL1_FPEN;
+ val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
write_sysreg(val, cpacr_el1);
write_sysreg(__kvm_hyp_vector, vbar_el1);
@@ -59,7 +59,7 @@ static void __hyp_text __activate_traps_nvhe(void)
u64 val;
val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ;
write_sysreg(val, cptr_el2);
}
@@ -81,11 +81,17 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
* it will cause an exception.
*/
val = vcpu->arch.hcr_el2;
+
if (!(val & HCR_RW) && system_supports_fpsimd()) {
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
+
+ if (val & HCR_RW) /* for AArch64 only: */
+ val |= HCR_TID3; /* TID3: trap feature register accesses */
+
write_sysreg(val, hcr_el2);
+
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
@@ -111,7 +117,7 @@ static void __hyp_text __deactivate_traps_vhe(void)
write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
- write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
+ write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2e070d3baf9f..a0ee9b05e3d4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -23,6 +23,7 @@
#include <linux/bsearch.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
+#include <linux/printk.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -892,6 +893,146 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
return true;
}
+/* Read a sanitised cpufeature ID register by sys_reg_desc */
+static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
+{
+ u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
+ (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
+
+ if (id == SYS_ID_AA64PFR0_EL1) {
+ if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
+ pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n",
+ task_pid_nr(current));
+
+ val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
+ }
+
+ return val;
+}
+
+/* cpufeature ID register access trap handlers */
+
+static bool __access_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r,
+ bool raz)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = read_id_reg(r, raz);
+ return true;
+}
+
+static bool access_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ return __access_id_reg(vcpu, p, r, false);
+}
+
+static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ return __access_id_reg(vcpu, p, r, true);
+}
+
+static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
+static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
+static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
+
+/*
+ * cpufeature ID register user accessors
+ *
+ * For now, these registers are immutable for userspace, so no values
+ * are stored, and for set_id_reg() we don't allow the effective value
+ * to be changed.
+ */
+static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+ bool raz)
+{
+ const u64 id = sys_reg_to_index(rd);
+ const u64 val = read_id_reg(rd, raz);
+
+ return reg_to_user(uaddr, &val, id);
+}
+
+static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+ bool raz)
+{
+ const u64 id = sys_reg_to_index(rd);
+ int err;
+ u64 val;
+
+ err = reg_from_user(&val, uaddr, id);
+ if (err)
+ return err;
+
+ /* This is what we mean by invariant: you can't change it. */
+ if (val != read_id_reg(rd, raz))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __get_id_reg(rd, uaddr, false);
+}
+
+static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __set_id_reg(rd, uaddr, false);
+}
+
+static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __get_id_reg(rd, uaddr, true);
+}
+
+static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __set_id_reg(rd, uaddr, true);
+}
+
+/* sys_reg_desc initialiser for known cpufeature ID registers */
+#define ID_SANITISED(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = access_id_reg, \
+ .get_user = get_id_reg, \
+ .set_user = set_id_reg, \
+}
+
+/*
+ * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
+ * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
+ * (1 <= crm < 8, 0 <= Op2 < 8).
+ */
+#define ID_UNALLOCATED(crm, op2) { \
+ Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
+ .access = access_raz_id_reg, \
+ .get_user = get_raz_id_reg, \
+ .set_user = set_raz_id_reg, \
+}
+
+/*
+ * sys_reg_desc initialiser for known ID registers that we hide from guests.
+ * For now, these are exposed just like unallocated ID regs: they appear
+ * RAZ for the guest.
+ */
+#define ID_HIDDEN(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = access_raz_id_reg, \
+ .get_user = get_raz_id_reg, \
+ .set_user = set_raz_id_reg, \
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -944,6 +1085,84 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
+
+ /*
+ * ID regs: all ID_SANITISED() entries here must have corresponding
+ * entries in arm64_ftr_regs[].
+ */
+
+ /* AArch64 mappings of the AArch32 ID registers */
+ /* CRm=1 */
+ ID_SANITISED(ID_PFR0_EL1),
+ ID_SANITISED(ID_PFR1_EL1),
+ ID_SANITISED(ID_DFR0_EL1),
+ ID_HIDDEN(ID_AFR0_EL1),
+ ID_SANITISED(ID_MMFR0_EL1),
+ ID_SANITISED(ID_MMFR1_EL1),
+ ID_SANITISED(ID_MMFR2_EL1),
+ ID_SANITISED(ID_MMFR3_EL1),
+
+ /* CRm=2 */
+ ID_SANITISED(ID_ISAR0_EL1),
+ ID_SANITISED(ID_ISAR1_EL1),
+ ID_SANITISED(ID_ISAR2_EL1),
+ ID_SANITISED(ID_ISAR3_EL1),
+ ID_SANITISED(ID_ISAR4_EL1),
+ ID_SANITISED(ID_ISAR5_EL1),
+ ID_SANITISED(ID_MMFR4_EL1),
+ ID_UNALLOCATED(2,7),
+
+ /* CRm=3 */
+ ID_SANITISED(MVFR0_EL1),
+ ID_SANITISED(MVFR1_EL1),
+ ID_SANITISED(MVFR2_EL1),
+ ID_UNALLOCATED(3,3),
+ ID_UNALLOCATED(3,4),
+ ID_UNALLOCATED(3,5),
+ ID_UNALLOCATED(3,6),
+ ID_UNALLOCATED(3,7),
+
+ /* AArch64 ID registers */
+ /* CRm=4 */
+ ID_SANITISED(ID_AA64PFR0_EL1),
+ ID_SANITISED(ID_AA64PFR1_EL1),
+ ID_UNALLOCATED(4,2),
+ ID_UNALLOCATED(4,3),
+ ID_UNALLOCATED(4,4),
+ ID_UNALLOCATED(4,5),
+ ID_UNALLOCATED(4,6),
+ ID_UNALLOCATED(4,7),
+
+ /* CRm=5 */
+ ID_SANITISED(ID_AA64DFR0_EL1),
+ ID_SANITISED(ID_AA64DFR1_EL1),
+ ID_UNALLOCATED(5,2),
+ ID_UNALLOCATED(5,3),
+ ID_HIDDEN(ID_AA64AFR0_EL1),
+ ID_HIDDEN(ID_AA64AFR1_EL1),
+ ID_UNALLOCATED(5,6),
+ ID_UNALLOCATED(5,7),
+
+ /* CRm=6 */
+ ID_SANITISED(ID_AA64ISAR0_EL1),
+ ID_SANITISED(ID_AA64ISAR1_EL1),
+ ID_UNALLOCATED(6,2),
+ ID_UNALLOCATED(6,3),
+ ID_UNALLOCATED(6,4),
+ ID_UNALLOCATED(6,5),
+ ID_UNALLOCATED(6,6),
+ ID_UNALLOCATED(6,7),
+
+ /* CRm=7 */
+ ID_SANITISED(ID_AA64MMFR0_EL1),
+ ID_SANITISED(ID_AA64MMFR1_EL1),
+ ID_SANITISED(ID_AA64MMFR2_EL1),
+ ID_UNALLOCATED(7,3),
+ ID_UNALLOCATED(7,4),
+ ID_UNALLOCATED(7,5),
+ ID_UNALLOCATED(7,6),
+ ID_UNALLOCATED(7,7),
+
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
@@ -1790,8 +2009,8 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
if (!r)
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
- /* Not saved in the sys_reg array? */
- if (r && !r->reg)
+ /* Not saved in the sys_reg array and not otherwise accessible? */
+ if (r && !(r->reg || r->get_user))
r = NULL;
return r;
@@ -1815,20 +2034,6 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(ctr_el0)
FUNCTION_INVARIANT(revidr_el1)
-FUNCTION_INVARIANT(id_pfr0_el1)
-FUNCTION_INVARIANT(id_pfr1_el1)
-FUNCTION_INVARIANT(id_dfr0_el1)
-FUNCTION_INVARIANT(id_afr0_el1)
-FUNCTION_INVARIANT(id_mmfr0_el1)
-FUNCTION_INVARIANT(id_mmfr1_el1)
-FUNCTION_INVARIANT(id_mmfr2_el1)
-FUNCTION_INVARIANT(id_mmfr3_el1)
-FUNCTION_INVARIANT(id_isar0_el1)
-FUNCTION_INVARIANT(id_isar1_el1)
-FUNCTION_INVARIANT(id_isar2_el1)
-FUNCTION_INVARIANT(id_isar3_el1)
-FUNCTION_INVARIANT(id_isar4_el1)
-FUNCTION_INVARIANT(id_isar5_el1)
FUNCTION_INVARIANT(clidr_el1)
FUNCTION_INVARIANT(aidr_el1)
@@ -1836,20 +2041,6 @@ FUNCTION_INVARIANT(aidr_el1)
static struct sys_reg_desc invariant_sys_regs[] = {
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
- { SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
- { SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
- { SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
- { SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
- { SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
- { SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
- { SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
- { SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
- { SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
- { SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
- { SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
- { SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
- { SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
- { SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
@@ -2079,12 +2270,31 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
return true;
}
+static int walk_one_sys_reg(const struct sys_reg_desc *rd,
+ u64 __user **uind,
+ unsigned int *total)
+{
+ /*
+ * Ignore registers we trap but don't save,
+ * and for which no custom user accessor is provided.
+ */
+ if (!(rd->reg || rd->get_user))
+ return 0;
+
+ if (!copy_reg_to_user(rd, uind))
+ return -EFAULT;
+
+ (*total)++;
+ return 0;
+}
+
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
{
const struct sys_reg_desc *i1, *i2, *end1, *end2;
unsigned int total = 0;
size_t num;
+ int err;
/* We check for duplicates here, to allow arch-specific overrides. */
i1 = get_target_table(vcpu->arch.target, true, &num);
@@ -2098,21 +2308,13 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
while (i1 || i2) {
int cmp = cmp_sys_reg(i1, i2);
/* target-specific overrides generic entry. */
- if (cmp <= 0) {
- /* Ignore registers we trap but don't save. */
- if (i1->reg) {
- if (!copy_reg_to_user(i1, &uind))
- return -EFAULT;
- total++;
- }
- } else {
- /* Ignore registers we trap but don't save. */
- if (i2->reg) {
- if (!copy_reg_to_user(i2, &uind))
- return -EFAULT;
- total++;
- }
- }
+ if (cmp <= 0)
+ err = walk_one_sys_reg(i1, &uind, &total);
+ else
+ err = walk_one_sys_reg(i2, &uind, &total);
+
+ if (err)
+ return err;
if (cmp <= 0 && ++i1 == end1)
i1 = NULL;
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 9a8cb96555d6..4e696f96451f 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -3,7 +3,7 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
- strchr.o strrchr.o
+ strchr.o strrchr.o tishift.o
# Tell the compiler to treat all general purpose registers (with the
# exception of the IP registers, which are already handled by the caller
diff --git a/arch/arm64/lib/delay.c b/arch/arm64/lib/delay.c
index dad4ec9bbfd1..e48ac402e7be 100644
--- a/arch/arm64/lib/delay.c
+++ b/arch/arm64/lib/delay.c
@@ -24,10 +24,28 @@
#include <linux/module.h>
#include <linux/timex.h>
+#include <clocksource/arm_arch_timer.h>
+
+#define USECS_TO_CYCLES(time_usecs) \
+ xloops_to_cycles((time_usecs) * 0x10C7UL)
+
+static inline unsigned long xloops_to_cycles(unsigned long xloops)
+{
+ return (xloops * loops_per_jiffy * HZ) >> 32;
+}
+
void __delay(unsigned long cycles)
{
cycles_t start = get_cycles();
+ if (arch_timer_evtstrm_available()) {
+ const cycles_t timer_evt_period =
+ USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US);
+
+ while ((get_cycles() - start + timer_evt_period) < cycles)
+ wfe();
+ }
+
while ((get_cycles() - start) < cycles)
cpu_relax();
}
@@ -35,10 +53,7 @@ EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
- unsigned long loops;
-
- loops = xloops * loops_per_jiffy * HZ;
- __delay(loops >> 32);
+ __delay(xloops_to_cycles(xloops));
}
EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
new file mode 100644
index 000000000000..0179a43cc045
--- /dev/null
+++ b/arch/arm64/lib/tishift.S
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+ENTRY(__ashlti3)
+ cbz x2, 1f
+ mov x3, #64
+ sub x3, x3, x2
+ cmp x3, #0
+ b.le 2f
+ lsl x1, x1, x2
+ lsr x3, x0, x3
+ lsl x2, x0, x2
+ orr x1, x1, x3
+ mov x0, x2
+1:
+ ret
+2:
+ neg w1, w3
+ mov x2, #0
+ lsl x1, x0, x1
+ mov x0, x2
+ ret
+ENDPROC(__ashlti3)
+
+ENTRY(__ashrti3)
+ cbz x2, 3f
+ mov x3, #64
+ sub x3, x3, x2
+ cmp x3, #0
+ b.le 4f
+ lsr x0, x0, x2
+ lsl x3, x1, x3
+ asr x2, x1, x2
+ orr x0, x0, x3
+ mov x1, x2
+3:
+ ret
+4:
+ neg w0, w3
+ asr x2, x1, #63
+ asr x0, x1, x0
+ mov x1, x2
+ ret
+ENDPROC(__ashrti3)
+
+ENTRY(__lshrti3)
+ cbz x2, 1f
+ mov x3, #64
+ sub x3, x3, x2
+ cmp x3, #0
+ b.le 2f
+ lsr x0, x0, x2
+ lsl x3, x1, x3
+ lsr x2, x1, x2
+ orr x0, x0, x3
+ mov x1, x2
+1:
+ ret
+2:
+ neg w0, w3
+ mov x2, #0
+ lsr x0, x1, x0
+ mov x1, x2
+ ret
+ENDPROC(__lshrti3)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 614af886b7ef..b45c5bcaeccb 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -166,7 +166,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
/* create a coherent mapping */
page = virt_to_page(ptr);
coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- prot, NULL);
+ prot, __builtin_return_address(0));
if (!coherent_ptr)
goto no_map;
@@ -303,8 +303,7 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
unsigned long pfn, size_t size)
{
int ret = -ENXIO;
- unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
- PAGE_SHIFT;
+ unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index b64958b23a7f..22168cd0dde7 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -105,13 +105,11 @@ static void data_abort_decode(unsigned int esr)
(esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
}
-/*
- * Decode mem abort information
- */
static void mem_abort_decode(unsigned int esr)
{
pr_alert("Mem abort info:\n");
+ pr_alert(" ESR = 0x%08x\n", esr);
pr_alert(" Exception class = %s, IL = %u bits\n",
esr_get_class_string(esr),
(esr & ESR_ELx_IL) ? 32 : 16);
@@ -249,9 +247,6 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
return false;
}
-/*
- * The kernel tried to access some page that wasn't present.
- */
static void __do_kernel_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -264,9 +259,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
- /*
- * No handler, we'll have to terminate things with extreme prejudice.
- */
bust_spinlocks(1);
if (is_permission_fault(esr, regs, addr)) {
@@ -291,10 +283,6 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
do_exit(SIGKILL);
}
-/*
- * Something tried to access memory that isn't in our memory map. User mode
- * accesses just cause a SIGSEGV
- */
static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
unsigned int esr, unsigned int sig, int code,
struct pt_regs *regs, int fault)
@@ -559,23 +547,6 @@ no_context:
return 0;
}
-/*
- * First Level Translation Fault Handler
- *
- * We enter here because the first level page table doesn't contain a valid
- * entry for the address.
- *
- * If the address is in kernel space (>= TASK_SIZE), then we are probably
- * faulting in the vmalloc() area.
- *
- * If the init_task's first level page tables contains the relevant entry, we
- * copy the it to this task. If not, we send the process a signal, fixup the
- * exception, or oops the kernel.
- *
- * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
- * or a critical region, and should only copy the information from the master
- * page table, nothing more.
- */
static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
@@ -594,18 +565,11 @@ static int do_alignment_fault(unsigned long addr, unsigned int esr,
return 0;
}
-/*
- * This abort handler always returns "fault".
- */
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
- return 1;
+ return 1; /* "fault" */
}
-/*
- * This abort handler deals with Synchronous External Abort.
- * It calls notifiers, and then returns "fault".
- */
static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct siginfo info;
@@ -668,14 +632,14 @@ static const struct fault_info fault_info[] = {
{ do_sea, SIGBUS, 0, "level 1 (translation table walk)" },
{ do_sea, SIGBUS, 0, "level 2 (translation table walk)" },
{ do_sea, SIGBUS, 0, "level 3 (translation table walk)" },
- { do_sea, SIGBUS, 0, "synchronous parity or ECC error" },
+ { do_sea, SIGBUS, 0, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
- { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" },
+ { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
{ do_bad, SIGBUS, 0, "unknown 32" },
{ do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
{ do_bad, SIGBUS, 0, "unknown 34" },
@@ -693,7 +657,7 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" },
{ do_bad, SIGBUS, 0, "TLB conflict abort" },
- { do_bad, SIGBUS, 0, "unknown 49" },
+ { do_bad, SIGBUS, 0, "Unsupported atomic hardware update fault" },
{ do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" },
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
@@ -710,13 +674,6 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGBUS, 0, "unknown 63" },
};
-/*
- * Handle Synchronous External Aborts that occur in a guest kernel.
- *
- * The return value will be zero if the SEA was successfully handled
- * and non-zero if there was an error processing the error or there was
- * no error to process.
- */
int handle_guest_sea(phys_addr_t addr, unsigned int esr)
{
int ret = -ENOENT;
@@ -727,9 +684,6 @@ int handle_guest_sea(phys_addr_t addr, unsigned int esr)
return ret;
}
-/*
- * Dispatch a data abort to the relevant handler.
- */
asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -739,11 +693,14 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
if (!inf->fn(addr, esr, regs))
return;
- pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
- inf->name, esr, addr);
+ pr_alert("Unhandled fault: %s at 0x%016lx\n",
+ inf->name, addr);
mem_abort_decode(esr);
+ if (!user_mode(regs))
+ show_pte(addr);
+
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
@@ -751,9 +708,6 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
-/*
- * Handle stack alignment exceptions.
- */
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 81f03959a4ab..acba49fb5aac 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -11,6 +11,7 @@
*/
#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/sched/task.h>
@@ -35,77 +36,117 @@ static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
* with the physical address from __pa_symbol.
*/
-static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
- unsigned long end)
+static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
- pte_t *pte;
- unsigned long next;
+ void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ return __pa(p);
+}
+
+static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
+ bool early)
+{
+ if (pmd_none(*pmd)) {
+ phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
+ : kasan_alloc_zeroed_page(node);
+ __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
+ }
+
+ return early ? pte_offset_kimg(pmd, addr)
+ : pte_offset_kernel(pmd, addr);
+}
- if (pmd_none(*pmd))
- __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
+static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
+ bool early)
+{
+ if (pud_none(*pud)) {
+ phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
+ : kasan_alloc_zeroed_page(node);
+ __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
+ }
+
+ return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
+}
+
+static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
+ bool early)
+{
+ if (pgd_none(*pgd)) {
+ phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
+ : kasan_alloc_zeroed_page(node);
+ __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
+ }
+
+ return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
+}
+
+static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
- pte = pte_offset_kimg(pmd, addr);
do {
+ phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
+ : kasan_alloc_zeroed_page(node);
next = addr + PAGE_SIZE;
- set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
- PAGE_KERNEL));
+ set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (pte++, addr = next, addr != end && pte_none(*pte));
}
-static void __init kasan_early_pmd_populate(pud_t *pud,
- unsigned long addr,
- unsigned long end)
+static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
+ unsigned long end, int node, bool early)
{
- pmd_t *pmd;
unsigned long next;
+ pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
- if (pud_none(*pud))
- __pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
-
- pmd = pmd_offset_kimg(pud, addr);
do {
next = pmd_addr_end(addr, end);
- kasan_early_pte_populate(pmd, addr, next);
+ kasan_pte_populate(pmd, addr, next, node, early);
} while (pmd++, addr = next, addr != end && pmd_none(*pmd));
}
-static void __init kasan_early_pud_populate(pgd_t *pgd,
- unsigned long addr,
- unsigned long end)
+static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end, int node, bool early)
{
- pud_t *pud;
unsigned long next;
+ pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
- if (pgd_none(*pgd))
- __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
-
- pud = pud_offset_kimg(pgd, addr);
do {
next = pud_addr_end(addr, end);
- kasan_early_pmd_populate(pud, addr, next);
+ kasan_pmd_populate(pud, addr, next, node, early);
} while (pud++, addr = next, addr != end && pud_none(*pud));
}
-static void __init kasan_map_early_shadow(void)
+static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
+ int node, bool early)
{
- unsigned long addr = KASAN_SHADOW_START;
- unsigned long end = KASAN_SHADOW_END;
unsigned long next;
pgd_t *pgd;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- kasan_early_pud_populate(pgd, addr, next);
+ kasan_pud_populate(pgd, addr, next, node, early);
} while (pgd++, addr = next, addr != end);
}
+/* The early shadow maps everything to a single page of zeroes */
asmlinkage void __init kasan_early_init(void)
{
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
- kasan_map_early_shadow();
+ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
+ true);
+}
+
+/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
+static void __init kasan_map_populate(unsigned long start, unsigned long end,
+ int node)
+{
+ kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
}
/*
@@ -142,8 +183,8 @@ void __init kasan_init(void)
struct memblock_region *reg;
int i;
- kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
- kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
+ kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
+ kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
@@ -161,19 +202,8 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
- vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(lm_alias(_text))));
-
- /*
- * vmemmap_populate() has populated the shadow region that covers the
- * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
- * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
- * kasan_populate_zero_shadow() from replacing the page table entries
- * (PMD or PTE) at the edges of the shadow region for the kernel
- * image.
- */
- kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
- kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
+ kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
+ pfn_to_nid(virt_to_pfn(lm_alias(_text))));
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
(void *)mod_shadow_start);
@@ -191,9 +221,9 @@ void __init kasan_init(void)
if (start >= end)
break;
- vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
- (unsigned long)kasan_mem_to_shadow(end),
- pfn_to_nid(virt_to_pfn(start)));
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
+ (unsigned long)kasan_mem_to_shadow(end),
+ pfn_to_nid(virt_to_pfn(start)));
}
/*
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f1eb15e0e864..267d2b79d52d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -778,6 +778,10 @@ void __init early_fixmap_init(void)
}
}
+/*
+ * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
+ * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
+ */
void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 877d42fb0df6..95233dfc4c39 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -109,10 +109,10 @@ ENTRY(cpu_do_resume)
/*
* __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
* debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
- * exception. Mask them until local_dbg_restore() in cpu_suspend()
+ * exception. Mask them until local_daif_restore() in cpu_suspend()
* resets them.
*/
- disable_dbg
+ disable_daif
msr mdscr_el1, x10
msr sctlr_el1, x12
@@ -155,8 +155,7 @@ ENDPROC(cpu_do_switch_mm)
* called by anything else. It can only be executed from a TTBR0 mapping.
*/
ENTRY(idmap_cpu_replace_ttbr1)
- mrs x2, daif
- msr daifset, #0xf
+ save_and_disable_daif flags=x2
adrp x1, empty_zero_page
msr ttbr1_el1, x1
@@ -169,7 +168,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
msr ttbr1_el1, x0
isb
- msr daif, x2
+ restore_daif x2
ret
ENDPROC(idmap_cpu_replace_ttbr1)
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index af5369422032..d9c2866ba618 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -321,11 +321,14 @@ config BF53x
config GPIO_ADI
def_bool y
+ depends on !PINCTRL
depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561)
-config PINCTRL
+config PINCTRL_BLACKFIN_ADI2
def_bool y
- depends on BF54x || BF60x
+ depends on (BF54x || BF60x)
+ select PINCTRL
+ select PINCTRL_ADI2
config MEM_MT48LC64M4A2FB_7E
bool
diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
index 4ddd1b73ee3e..c8d957274cc2 100644
--- a/arch/blackfin/Kconfig.debug
+++ b/arch/blackfin/Kconfig.debug
@@ -18,6 +18,7 @@ config DEBUG_VERBOSE
config DEBUG_MMRS
tristate "Generate Blackfin MMR tree"
+ depends on !PINCTRL
select DEBUG_FS
help
Create a tree of Blackfin MMRs via the debugfs tree. If
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 99d338ca2ea4..a2579321c7f1 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -183,6 +183,26 @@ static inline int irq_to_gpio(unsigned irq)
{
return irq - GPIO_IRQ_BASE;
}
+
+#else /* CONFIG_PINCTRL */
+
+/*
+ * CONFIG_PM is not working with pin control and should probably
+ * avoid being selected when pin control is active, but so far,
+ * these stubs are here to make allyesconfig and allmodconfig
+ * compile properly. These functions are normally backed by the
+ * CONFIG_ADI_GPIO custom GPIO implementation.
+ */
+
+static inline int bfin_pm_standby_setup(void)
+{
+ return 0;
+}
+
+static inline void bfin_pm_standby_restore(void)
+{
+}
+
#endif /* CONFIG_PINCTRL */
#include <asm/irq.h>
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index f6431439d15d..839d1441af3a 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -36,8 +36,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__raw_spin_lock_asm(&lock->lock);
}
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
@@ -48,23 +46,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__raw_spin_unlock_asm(&lock->lock);
}
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return __raw_uncached_fetch_asm(&rw->lock) > 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
-}
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
@@ -80,8 +66,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__raw_write_lock_asm(&rw->lock);
}
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
@@ -92,10 +76,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
__raw_write_unlock_asm(&rw->lock);
}
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif
#endif /* !__BFIN_SPINLOCK_H */
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index c5d31287de01..63da80bbadf6 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -15,6 +15,9 @@
/* FIXME: consumer API required for gpio_set_value() etc, get rid of this */
#include <linux/gpio.h>
#include <linux/irq.h>
+#include <asm/gpio.h>
+#include <asm/irq_handler.h>
+#include <asm/portmux.h>
#if ANOMALY_05000311 || ANOMALY_05000323
enum {
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
index f31ace221392..194773ce109e 100644
--- a/arch/blackfin/kernel/debug-mmrs.c
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -10,7 +10,6 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
#include <asm/blackfin.h>
#include <asm/gptimers.h>
@@ -20,6 +19,7 @@
#include <asm/bfin_serial.h>
#include <asm/bfin5xx_spi.h>
#include <asm/bfin_twi.h>
+#include <asm/gpio.h>
/* Common code defines PORT_MUX on us, so redirect the MMR back locally */
#ifdef BFIN_PORT_MUX
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 0ccf0cf4daaf..fab69c736515 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/gpio/machine.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
@@ -362,11 +363,17 @@ static struct platform_device bfin_device_gpiokeys = {
#if IS_ENABLED(CONFIG_I2C_GPIO)
#include <linux/i2c-gpio.h>
+static struct gpiod_lookup_table bfin_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF8, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF9, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data i2c_gpio_data = {
- .sda_pin = GPIO_PF8,
- .scl_pin = GPIO_PF9,
- .sda_is_open_drain = 0,
- .scl_is_open_drain = 0,
.udelay = 40,
}; /* This hasn't actually been used these pins
* are (currently) free pins on the expansion connector */
@@ -462,7 +469,9 @@ static int __init blackstamp_init(void)
int ret;
printk(KERN_INFO "%s(): registering device resources\n", __func__);
-
+#if IS_ENABLED(CONFIG_I2C_GPIO)
+ gpiod_add_lookup_table(&bfin_i2c_gpiod_table);
+#endif
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 3625e9eaa8a8..d64d270e9e62 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -19,6 +19,7 @@
#endif
#include <linux/irq.h>
#include <linux/i2c.h>
+#include <linux/gpio/machine.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
@@ -390,11 +391,17 @@ static struct platform_device bfin_device_gpiokeys = {
#if IS_ENABLED(CONFIG_I2C_GPIO)
#include <linux/i2c-gpio.h>
+static struct gpiod_lookup_table bfin_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF1, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF0, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data i2c_gpio_data = {
- .sda_pin = GPIO_PF1,
- .scl_pin = GPIO_PF0,
- .sda_is_open_drain = 0,
- .scl_is_open_drain = 0,
.udelay = 40,
};
@@ -516,6 +523,9 @@ static struct platform_device *ezkit_devices[] __initdata = {
static int __init ezkit_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
+#if IS_ENABLED(CONFIG_I2C_GPIO)
+ gpiod_add_lookup_table(&bfin_i2c_gpiod_table);
+#endif
platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
i2c_register_board_info(0, bfin_i2c_board_info,
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 23eada79439c..27cbf2fa2c62 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -21,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/i2c.h>
+#include <linux/gpio/machine.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
@@ -512,11 +513,17 @@ static struct platform_device bfin_device_gpiokeys = {
#if IS_ENABLED(CONFIG_I2C_GPIO)
#include <linux/i2c-gpio.h>
+static struct gpiod_lookup_table bfin_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF2, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF3, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data i2c_gpio_data = {
- .sda_pin = GPIO_PF2,
- .scl_pin = GPIO_PF3,
- .sda_is_open_drain = 0,
- .scl_is_open_drain = 0,
.udelay = 10,
};
@@ -848,6 +855,9 @@ static int __init stamp_init(void)
printk(KERN_INFO "%s(): registering device resources\n", __func__);
+#if IS_ENABLED(CONFIG_I2C_GPIO)
+ gpiod_add_lookup_table(&bfin_i2c_gpiod_table);
+#endif
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 57d1c43726d9..acc5363f60c6 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/delay.h>
+#include <linux/gpio/machine.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
@@ -379,11 +380,17 @@ static struct platform_device bfin_device_gpiokeys = {
#if IS_ENABLED(CONFIG_I2C_GPIO)
#include <linux/i2c-gpio.h>
+static struct gpiod_lookup_table bfin_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF1, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("BFIN-GPIO", GPIO_PF0, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct i2c_gpio_platform_data i2c_gpio_data = {
- .sda_pin = GPIO_PF1,
- .scl_pin = GPIO_PF0,
- .sda_is_open_drain = 0,
- .scl_is_open_drain = 0,
.udelay = 10,
};
@@ -633,6 +640,9 @@ static int __init ezkit_init(void)
printk(KERN_INFO "%s(): registering device resources\n", __func__);
+#if IS_ENABLED(CONFIG_I2C_GPIO)
+ gpiod_add_lookup_table(&bfin_i2c_gpiod_table);
+#endif
ret = platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices));
if (ret < 0)
return ret;
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 13e94bf9d8ba..e81a5b7dabdc 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -18,7 +18,6 @@
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/syscore_ops.h>
-#include <linux/gpio.h>
#include <asm/delay.h>
#ifdef CONFIG_IPIPE
#include <linux/ipipe.h>
@@ -28,6 +27,7 @@
#include <asm/irq_handler.h>
#include <asm/dpmc.h>
#include <asm/traps.h>
+#include <asm/gpio.h>
/*
* NOTES:
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 5ece38a5b758..f57b5fe5355e 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -15,12 +15,12 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <asm/cplb.h>
#include <asm/dma.h>
#include <asm/dpmc.h>
#include <asm/pm.h>
+#include <asm/gpio.h>
#ifdef CONFIG_BF60x
struct bfin_cpu_pm_fns *bfin_cpu_pm;
diff --git a/arch/c6x/boot/dts/Makefile b/arch/c6x/boot/dts/Makefile
index 7368838c6e71..b212d278ebc4 100644
--- a/arch/c6x/boot/dts/Makefile
+++ b/arch/c6x/boot/dts/Makefile
@@ -17,5 +17,3 @@ $(obj)/builtin.dtb: $(obj)/$(DTB).dtb
$(call if_changed,cp)
$(obj)/linked_dtb.o: $(obj)/builtin.dtb
-
-clean-files := *.dtb
diff --git a/arch/cris/boot/dts/Makefile b/arch/cris/boot/dts/Makefile
index 3318c630caa2..118fe990a173 100644
--- a/arch/cris/boot/dts/Makefile
+++ b/arch/cris/boot/dts/Makefile
@@ -3,5 +3,3 @@ BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
endif
-
-clean-files := *.dtb.S
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index 1c9bf14807db..1553bdb30a0c 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -17,10 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
}
#endif
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
#endif
diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h
index 6f8b366a226a..dcfef6407ae6 100644
--- a/arch/cris/include/asm/pci.h
+++ b/arch/cris/include/asm/pci.h
@@ -17,13 +17,6 @@
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
-void pcibios_config_init(void);
-struct pci_bus * pcibios_scan_root(int bus);
-
-void pcibios_set_master(struct pci_dev *dev);
-struct irq_routing_table *pcibios_get_irq_routing_table(void);
-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
/* Dynamic DMA mapping stuff.
* i386 has everything mapped statically.
*/
@@ -34,8 +27,6 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#include <linux/string.h>
#include <asm/io.h>
-struct pci_dev;
-
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index 273defa02a02..fd80e840a1e6 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -15,11 +15,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &frv_dma_ops;
}
-static inline
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h
index 809cfc6707ab..895af9d558ba 100644
--- a/arch/frv/include/asm/pci.h
+++ b/arch/frv/include/asm/pci.h
@@ -17,12 +17,8 @@
#include <linux/scatterlist.h>
#include <asm-generic/pci.h>
-struct pci_dev;
-
#define pcibios_assign_all_busses() 0
-extern void pcibios_set_master(struct pci_dev *dev);
-
#ifdef CONFIG_MMU
extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
extern void consistent_free(void *vaddr);
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c
index 328f0a292316..cf464100e838 100644
--- a/arch/frv/mm/init.c
+++ b/arch/frv/mm/init.c
@@ -42,21 +42,9 @@
#undef DEBUG
/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
-static unsigned long empty_bad_page_table;
-static unsigned long empty_bad_page;
-
unsigned long empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
@@ -72,8 +60,6 @@ void __init paging_init(void)
unsigned long zones_size[MAX_NR_ZONES] = {0, };
/* allocate some pages for kernel housekeeping tasks */
- empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
- empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
memset((void *) empty_zero_page, 0, PAGE_SIZE);
diff --git a/arch/h8300/boot/dts/Makefile b/arch/h8300/boot/dts/Makefile
index 14593b51b2b2..69fcd817892c 100644
--- a/arch/h8300/boot/dts/Makefile
+++ b/arch/h8300/boot/dts/Makefile
@@ -8,9 +8,3 @@ obj-y += $(BUILTIN_DTB)
dtb-$(CONFIG_H8300H_SIM) := h8300h_sim.dtb
dtb-$(CONFIG_H8S_SIM) := h8s_sim.dtb
dtb-$(CONFIG_H8S_EDOSK2674) := edosk2674.dtb
-
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-
-always := $(dtb-y)
-clean-files := *.dtb.S *.dtb
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index eeead51bed2d..015287ac8ce8 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -40,20 +40,9 @@
#include <asm/sections.h>
/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
-static unsigned long empty_bad_page_table;
-static unsigned long empty_bad_page;
unsigned long empty_zero_page;
/*
@@ -78,8 +67,6 @@ void __init paging_init(void)
* Initialize the bad page table and bad page to point
* to a couple of allocated pages.
*/
- empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
- empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 463dbc18f853..5208de242e79 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -37,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
index 53a8d5885887..48020863f53a 100644
--- a/arch/hexagon/include/asm/spinlock.h
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -86,16 +86,6 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
return temp;
}
-static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
-{
- return rwlock->lock == 0;
-}
-
-static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
-{
- return rwlock->lock == 0;
-}
-
/* Stuffs a -1 in the lock value? */
static inline void arch_write_lock(arch_rwlock_t *lock)
{
@@ -177,11 +167,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
/*
* SMP spinlocks are intended to allow only a single CPU at the lock
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
#define arch_spin_is_locked(x) ((x)->lock != 0)
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1efc444f5fa1..49583c5a5d44 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -47,7 +47,7 @@ config IA64
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
- select GENERIC_TIME_VSYSCALL_OLD
+ select GENERIC_TIME_VSYSCALL
select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 5da9421fb0ff..c1bab526a046 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -45,15 +45,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr;
}
-static inline void
-dma_cache_sync (struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- /*
- * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
- * ensure that dma_cache_sync() enforces order, hence the mb().
- */
- mb();
-}
-
#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index e20d77f6a3c1..b1d04e8bafc8 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -30,10 +30,6 @@ struct pci_vector_struct {
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
-void pcibios_config_init(void);
-
-struct pci_dev;
-
/*
* PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
* correspondence between device bus addresses and CPU physical addresses.
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index 7d6fceb3d567..917910607e0e 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -38,15 +38,31 @@
/*
* lock for reading
*/
-static inline void
-__down_read (struct rw_semaphore *sem)
+static inline int
+___down_read (struct rw_semaphore *sem)
{
long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
- if (result < 0)
+ return (result < 0);
+}
+
+static inline void
+__down_read (struct rw_semaphore *sem)
+{
+ if (___down_read(sem))
rwsem_down_read_failed(sem);
}
+static inline int
+__down_read_killable (struct rw_semaphore *sem)
+{
+ if (___down_read(sem))
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+
+ return 0;
+}
+
/*
* lock for writing
*/
@@ -73,9 +89,10 @@ __down_write (struct rw_semaphore *sem)
static inline int
__down_write_killable (struct rw_semaphore *sem)
{
- if (___down_write(sem))
+ if (___down_write(sem)) {
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
return -EINTR;
+ }
return 0;
}
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index aa057abd948e..afd0b3121b4c 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -62,7 +62,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- int tmp = ACCESS_ONCE(lock->lock);
+ int tmp = READ_ONCE(lock->lock);
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
@@ -74,19 +74,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
+ WRITE_ONCE(*p, (tmp + 2) & ~1);
}
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock);
+ long tmp = READ_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock);
+ long tmp = READ_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
@@ -127,9 +127,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
{
arch_spin_lock(lock);
}
-
-#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_spin_lock_flags arch_spin_lock_flags
#ifdef ASM_SUPPORTED
@@ -157,6 +155,7 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
: "p6", "p7", "r2", "memory");
}
+#define arch_read_lock_flags arch_read_lock_flags
#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
@@ -209,6 +208,7 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
+#define arch_write_lock_flags arch_write_lock_flags
#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
#define arch_write_trylock(rw) \
@@ -232,8 +232,6 @@ static inline void arch_write_unlock(arch_rwlock_t *x)
#else /* !ASM_SUPPORTED */
-#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-
#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
@@ -273,8 +271,4 @@ static inline int arch_read_trylock(arch_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index b385ff2bf6ce..f7693f49c573 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -212,6 +212,8 @@ void foo(void)
BLANK();
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
offsetof (struct timespec, tv_nsec));
+ DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET,
+ offsetof (struct time_sn_spec, snsec));
DEFINE(CLONE_SETTLS_BIT, 19);
#if CLONE_SETTLS != (1<<19)
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c0e7c9af2bb9..fe742ffafc7a 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -236,9 +236,9 @@ ENTRY(fsys_gettimeofday)
MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
(p13) ld8 r25 = [r19] // get itc_lastcycle value
- ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
+ ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec
;;
- ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
+ ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec
(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
;;
(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
@@ -266,9 +266,9 @@ EX(.fail_efault, probe.w.fault r31, 3)
mf
;;
ld4 r10 = [r20] // gtod_lock.sequence
- shr.u r2 = r2,r23 // shift by factor
- ;;
add r8 = r8,r2 // Add xtime.nsecs
+ ;;
+ shr.u r8 = r8,r23 // shift by factor
cmp4.ne p7,p0 = r28,r10
(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
// End critical section.
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 0914c02a1eb0..cc2861445965 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -6,10 +6,16 @@
* fsyscall gettimeofday data
*/
+/* like timespec, but includes "shifted nanoseconds" */
+struct time_sn_spec {
+ u64 sec;
+ u64 snsec;
+};
+
struct fsyscall_gtod_data_t {
seqcount_t seq;
- struct timespec wall_time;
- struct timespec monotonic_time;
+ struct time_sn_spec wall_time;
+ struct time_sn_spec monotonic_time;
u64 clk_mask;
u32 clk_mult;
u32 clk_shift;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aa7be020a904..c6ecb97151a2 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -430,30 +430,32 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
- struct clocksource *c, u32 mult, u64 cycle_last)
+void update_vsyscall(struct timekeeper *tk)
{
write_seqcount_begin(&fsyscall_gtod_data.seq);
- /* copy fsyscall clock data */
- fsyscall_gtod_data.clk_mask = c->mask;
- fsyscall_gtod_data.clk_mult = mult;
- fsyscall_gtod_data.clk_shift = c->shift;
- fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
- fsyscall_gtod_data.clk_cycle_last = cycle_last;
-
- /* copy kernel time structures */
- fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
- fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
- fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
- + wall->tv_sec;
- fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
- + wall->tv_nsec;
+ /* copy vsyscall data */
+ fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
+ fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
+ fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
+ fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
+ fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
+
+ fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
+ fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
+
+ fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
+ + tk->wall_to_monotonic.tv_sec;
+ fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
+ + ((u64)tk->wall_to_monotonic.tv_nsec
+ << tk->tkr_mono.shift);
/* normalize */
- while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
- fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
- fsyscall_gtod_data.monotonic_time.tv_sec++;
+ while (fsyscall_gtod_data.monotonic_time.snsec >=
+ (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+ fsyscall_gtod_data.monotonic_time.snsec -=
+ ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
+ fsyscall_gtod_data.monotonic_time.sec++;
}
write_seqcount_end(&fsyscall_gtod_data.seq);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 0d9446c37ae8..498398d915c1 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -196,8 +196,8 @@ config TIMER_DIVIDE
default "128"
config CPU_BIG_ENDIAN
- bool "Generate big endian code"
- default n
+ bool
+ default !CPU_LITTLE_ENDIAN
config CPU_LITTLE_ENDIAN
bool "Generate little endian code"
diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h
index 4abfc07f4762..336ffe60814b 100644
--- a/arch/m32r/include/asm/dma-mapping.h
+++ b/arch/m32r/include/asm/dma-mapping.h
@@ -14,11 +14,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &dma_noop_ops;
}
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index 604af84427ff..0189f410f8f5 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -29,7 +29,6 @@
*/
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
/**
* arch_spin_trylock - Try spin lock and return a result
@@ -138,18 +137,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* semaphore.h for details. -ben
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -318,11 +305,4 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
return 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index ff5f0896318b..21f00349af52 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -284,7 +284,7 @@ config M548x
config M5441x
bool "MCF5441x"
- depends on !MMU
+ select MMU_COLDFIRE if MMU
select GENERIC_CLOCKEVENTS
select HAVE_CACHE_CB
help
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 5cd57b4d3615..64a641467736 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -266,6 +266,12 @@ config AMCORE
help
Support for the Sysam AMCORE open-hardware generic board.
+config STMARK2
+ bool "Sysam stmark2 board support"
+ depends on M5441x
+ help
+ Support for the Sysam stmark2 open-hardware generic board.
+
config FIREBEE
bool "FireBee board support"
depends on M547x
diff --git a/arch/m68k/coldfire/Makefile b/arch/m68k/coldfire/Makefile
index f8cef9681416..573eabca1a3a 100644
--- a/arch/m68k/coldfire/Makefile
+++ b/arch/m68k/coldfire/Makefile
@@ -35,7 +35,8 @@ obj-$(CONFIG_NETtel) += nettel.o
obj-$(CONFIG_CLEOPATRA) += nettel.o
obj-$(CONFIG_FIREBEE) += firebee.o
obj-$(CONFIG_MCF8390) += mcf8390.o
-obj-$(CONFIG_AMCORE) += amcore.o
+obj-$(CONFIG_AMCORE) += amcore.o
+obj-$(CONFIG_STMARK2) += stmark2.o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c
index 315d14b0dca0..55392af845fb 100644
--- a/arch/m68k/coldfire/m5441x.c
+++ b/arch/m68k/coldfire/m5441x.c
@@ -27,7 +27,7 @@ DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
DEFINE_CLK(0, "intc.2", 20, MCF_CLK);
DEFINE_CLK(0, "imx1-i2c.0", 22, MCF_CLK);
-DEFINE_CLK(0, "mcfdspi.0", 23, MCF_CLK);
+DEFINE_CLK(0, "fsl-dspi.0", 23, MCF_CLK);
DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
@@ -140,6 +140,7 @@ static struct clk * const enable_clks[] __initconst = {
&__clk_0_18, /* intc0 */
&__clk_0_19, /* intc0 */
&__clk_0_20, /* intc0 */
+ &__clk_0_23, /* dspi.0 */
&__clk_0_24, /* uart0 */
&__clk_0_25, /* uart1 */
&__clk_0_26, /* uart2 */
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index e53ffed13ba8..adad03ca6e11 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -96,10 +96,6 @@ static void mcf54xx_reset(void)
void __init config_BSP(char *commandp, int size)
{
-#ifdef CONFIG_MMU
- cf_bootmem_alloc();
- mmu_context_init();
-#endif
mach_reset = mcf54xx_reset;
mach_sched_init = hw_timer_init;
m54xx_uarts_init();
diff --git a/arch/m68k/coldfire/stmark2.c b/arch/m68k/coldfire/stmark2.c
new file mode 100644
index 000000000000..a8d2b3d172f9
--- /dev/null
+++ b/arch/m68k/coldfire/stmark2.c
@@ -0,0 +1,119 @@
+/*
+ * stmark2.c -- Support for Sysam AMCORE open board
+ *
+ * (C) Copyright 2017, Angelo Dureghello <angelo@sysam.it>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-fsl-dspi.h>
+#include <linux/spi/flash.h>
+#include <asm/mcfsim.h>
+
+/*
+ * Partitioning of parallel NOR flash (39VF3201B)
+ */
+static struct mtd_partition stmark2_partitions[] = {
+ {
+ .name = "U-Boot (1024K)",
+ .size = 0x100000,
+ .offset = 0x0
+ }, {
+ .name = "Kernel+initramfs (7168K)",
+ .size = 0x700000,
+ .offset = MTDPART_OFS_APPEND
+ }, {
+ .name = "Flash Free Space (8192K)",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND
+ }
+};
+
+static struct flash_platform_data stmark2_spi_flash_data = {
+ .name = "is25lp128",
+ .parts = stmark2_partitions,
+ .nr_parts = ARRAY_SIZE(stmark2_partitions),
+ .type = "is25lp128",
+};
+
+static struct spi_board_info stmark2_board_info[] __initdata = {
+ {
+ .modalias = "m25p80",
+ .max_speed_hz = 5000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .platform_data = &stmark2_spi_flash_data,
+ .mode = SPI_MODE_3,
+ }
+};
+
+/* SPI controller data, SPI (0) */
+static struct fsl_dspi_platform_data dspi_spi0_info = {
+ .cs_num = 4,
+ .bus_num = 0,
+ .sck_cs_delay = 100,
+ .cs_sck_delay = 100,
+};
+
+static struct resource dspi_spi0_resource[] = {
+ [0] = {
+ .start = MCFDSPI_BASE0,
+ .end = MCFDSPI_BASE0 + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 12,
+ .end = 13,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = MCF_IRQ_DSPI0,
+ .end = MCF_IRQ_DSPI0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* SPI controller, id = bus number */
+static struct platform_device dspi_spi0_device = {
+ .name = "fsl-dspi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(dspi_spi0_resource),
+ .resource = dspi_spi0_resource,
+ .dev = {
+ .platform_data = &dspi_spi0_info,
+ },
+};
+
+static struct platform_device *stmark2_devices[] __initdata = {
+ &dspi_spi0_device,
+};
+
+/*
+ * Note: proper pin-mux setup is mandatory for proper SPI functionality.
+ */
+static int __init init_stmark2(void)
+{
+ /* DSPI0, all pins as DSPI, and using CS1 */
+ __raw_writeb(0x80, MCFGPIO_PAR_DSPIOWL);
+ __raw_writeb(0xfc, MCFGPIO_PAR_DSPIOWH);
+
+ /* Board gpio setup */
+ __raw_writeb(0x00, MCFGPIO_PAR_BE);
+ __raw_writeb(0x00, MCFGPIO_PAR_FBCTL);
+ __raw_writeb(0x00, MCFGPIO_PAR_CS);
+ __raw_writeb(0x00, MCFGPIO_PAR_CANI2C);
+
+ platform_add_devices(stmark2_devices, ARRAY_SIZE(stmark2_devices));
+
+ spi_register_board_info(stmark2_board_info,
+ ARRAY_SIZE(stmark2_board_info));
+
+ return 0;
+}
+
+late_initcall(init_stmark2);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 54191f6fc715..5b5fa9831b4d 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -123,6 +123,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -302,6 +303,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -400,6 +402,7 @@ CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -451,6 +454,7 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -607,12 +611,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index fb4663904428..72a7764b74ed 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -121,6 +121,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -300,6 +301,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -377,6 +379,7 @@ CONFIG_VETH=m
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -419,6 +422,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -566,12 +570,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 4ab393e86e52..884b43a2f0d9 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -121,6 +121,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -300,6 +301,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -387,6 +389,7 @@ CONFIG_ATARILANCE=y
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -434,6 +437,7 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_ATARI=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -588,12 +592,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 1dd8d697545b..fcfa60d31499 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -119,6 +119,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -298,6 +299,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -376,6 +378,7 @@ CONFIG_VETH=m
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -417,6 +420,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -558,12 +562,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 02b39f50076e..9d597bbbbbfe 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -121,6 +121,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -300,6 +301,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -378,6 +380,7 @@ CONFIG_HPLANCE=y
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -422,6 +425,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -568,12 +572,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 044dcb2bf8fb..45da20d1286c 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -120,6 +120,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -302,6 +303,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -395,6 +397,7 @@ CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -444,6 +447,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_VALKYRIE=y
CONFIG_FB_MAC=y
@@ -590,12 +594,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 3ad04682077a..fda880c10861 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -130,6 +130,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -312,6 +313,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -436,6 +438,7 @@ CONFIG_MACMACE=y
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_BVME6000_NET=y
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -501,6 +504,7 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -670,12 +674,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index dc2dd61948cd..7d5e4863efec 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -118,6 +118,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -297,6 +298,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -376,6 +378,7 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -417,6 +420,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -558,12 +562,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 54e7b523fc3d..7763b71a9c49 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -119,6 +119,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -298,6 +299,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -376,6 +378,7 @@ CONFIG_VETH=m
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -417,6 +420,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -558,12 +562,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index d63d8a15f6db..17eaebfa3e19 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -119,6 +119,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -298,6 +299,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -386,6 +388,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -434,6 +437,7 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -581,12 +585,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
new file mode 100644
index 000000000000..55e55dbc2fb6
--- /dev/null
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -0,0 +1,92 @@
+CONFIG_LOCALVERSION="stmark2-001"
+CONFIG_DEFAULT_HOSTNAME="stmark2"
+CONFIG_SYSVIPC=y
+# CONFIG_FHANDLE is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../uClinux-dist/romfs"
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_AIO is not set
+# CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_CMDLINE_PARSER=y
+# CONFIG_MMU is not set
+CONFIG_M5441x=y
+CONFIG_CLOCK_FREQ=240000000
+CONFIG_STMARK2=y
+CONFIG_RAMBASE=0x40000000
+CONFIG_RAMSIZE=0x8000000
+CONFIG_VECTORBASE=0x40000000
+CONFIG_KERNELBASE=0x40001000
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_MISC=y
+# CONFIG_UEVENT_HELPER is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_LE_BYTE_SWAP=y
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_CFI_I2 is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SPI_NOR=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_UNIX98_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MCF=y
+CONFIG_SERIAL_MCF_BAUDRATE=115200
+CONFIG_SERIAL_MCF_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_FSL_DSPI=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_FSCACHE=y
+# CONFIG_PROC_SYSCTL is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_BOOTPARAM=y
+CONFIG_BOOTPARAM_STRING="console=ttyS0,115200 root=/dev/ram0 rw rootfstype=ramfs rdinit=/bin/init devtmpfs.mount=1"
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_ECHAINIV is not set
+CONFIG_CRYPTO_ANSI_CPRNG=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC16=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index d0924c22f52a..d1cb7a04ae1d 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -116,6 +116,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -295,6 +296,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -373,6 +375,7 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -416,6 +419,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -559,12 +563,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 3001ee1e5dc5..ea3a331c62d5 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -116,6 +116,7 @@ CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -295,6 +296,7 @@ CONFIG_MPLS=y
CONFIG_NET_MPLS_GSO=m
CONFIG_MPLS_ROUTING=m
CONFIG_MPLS_IPTUNNEL=m
+CONFIG_NET_NSH=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
@@ -374,6 +376,7 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -416,6 +419,7 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
+# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -560,12 +564,10 @@ CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 3e1a3ffba291..e3722ed04fbb 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -9,10 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &m68k_dma_ops;
}
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- /* we use coherent allocation, so not much to do here. */
-}
-
#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/m5441xsim.h b/arch/m68k/include/asm/m5441xsim.h
index 4e9095b9480a..c87556d5581c 100644
--- a/arch/m68k/include/asm/m5441xsim.h
+++ b/arch/m68k/include/asm/m5441xsim.h
@@ -278,4 +278,10 @@
#define MCFGPIO_IRQ_VECBASE (MCFINT_VECBASE - MCFGPIO_IRQ_MIN)
#define MCFGPIO_PIN_MAX 87
+/*
+ * DSPI module.
+ */
+#define MCFDSPI_BASE0 0xfc05c000
+#define MCF_IRQ_DSPI0 (MCFINT0_VECBASE + MCFINT0_DSPI0)
+
#endif /* m5441xsim_h */
diff --git a/arch/m68k/include/asm/mac_iop.h b/arch/m68k/include/asm/mac_iop.h
index 73dae2abeba3..32f1c79c818f 100644
--- a/arch/m68k/include/asm/mac_iop.h
+++ b/arch/m68k/include/asm/mac_iop.h
@@ -159,6 +159,7 @@ extern void iop_complete_message(struct iop_msg *);
extern void iop_upload_code(uint, __u8 *, uint, __u16);
extern void iop_download_code(uint, __u8 *, uint, __u16);
extern __u8 *iop_compare_code(uint, __u8 *, uint, __u16);
+extern void iop_ism_irq_poll(uint);
extern void iop_register_interrupts(void);
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
index 10f9930ec49a..283352ab0d5d 100644
--- a/arch/m68k/include/asm/mcfmmu.h
+++ b/arch/m68k/include/asm/mcfmmu.h
@@ -106,6 +106,7 @@ static inline void mmu_write(u32 a, u32 v)
}
void cf_bootmem_alloc(void);
+void cf_mmu_context_init(void);
int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word);
#endif
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index 836acea8f758..f5b1852b4663 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -92,7 +92,6 @@ static inline void activate_mm(struct mm_struct *active_mm,
#define deactivate_mm(tsk, mm) do { } while (0)
-extern void mmu_context_init(void);
#define prepare_arch_switch(next) load_ksp_mmu(next)
static inline void load_ksp_mmu(struct task_struct *task)
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 854e09f403e7..19a92982629a 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -4,3 +4,8 @@
#else
#include "setup_no.c"
#endif
+
+#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
+void (*mach_beep)(unsigned int, unsigned int);
+EXPORT_SYMBOL(mach_beep);
+#endif
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 657a9843ebfa..dd25bfc22fb4 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -106,10 +106,6 @@ EXPORT_SYMBOL(mach_heartbeat);
#ifdef CONFIG_M68K_L2_CACHE
void (*mach_l2_flush) (int);
#endif
-#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
-void (*mach_beep)(unsigned int, unsigned int);
-EXPORT_SYMBOL(mach_beep);
-#endif
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
int isa_type;
int isa_sex;
@@ -344,6 +340,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_COLDFIRE
case MACH_M54XX:
case MACH_M5441X:
+ cf_bootmem_alloc();
+ cf_mmu_context_init();
config_BSP(NULL, 0);
break;
#endif
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index 850f0dc284ca..c7ea6475ef9b 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -37,7 +37,7 @@ void __init baboon_init(void)
baboon = (struct baboon *) BABOON_BASE;
baboon_present = 1;
- printk("Baboon detected at %p\n", baboon);
+ pr_debug("Baboon detected at %p\n", baboon);
}
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 22123f7e8f75..16cd5cea5207 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -898,8 +898,8 @@ static void __init mac_identify(void)
mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize);
iop_init();
- via_init();
oss_init();
+ via_init();
psc_init();
baboon_init();
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 4c1e606e7d03..9bfa17015768 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -273,10 +273,10 @@ void __init iop_init(void)
int i;
if (iop_scc_present) {
- pr_info("IOP: detected SCC IOP at %p\n", iop_base[IOP_NUM_SCC]);
+ pr_debug("SCC IOP detected at %p\n", iop_base[IOP_NUM_SCC]);
}
if (iop_ism_present) {
- pr_info("IOP: detected ISM IOP at %p\n", iop_base[IOP_NUM_ISM]);
+ pr_debug("ISM IOP detected at %p\n", iop_base[IOP_NUM_ISM]);
iop_start(iop_base[IOP_NUM_ISM]);
iop_alive(iop_base[IOP_NUM_ISM]); /* clears the alive flag */
}
@@ -598,3 +598,12 @@ irqreturn_t iop_ism_irq(int irq, void *dev_id)
}
return IRQ_HANDLED;
}
+
+void iop_ism_irq_poll(uint iop_num)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ iop_ism_irq(0, (void *)iop_num);
+ local_irq_restore(flags);
+}
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 34c0993dc689..3f81892527ad 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -32,18 +32,18 @@ volatile struct mac_oss *oss;
/*
* Initialize the OSS
- *
- * The OSS "detection" code is actually in via_init() which is always called
- * before us. Thus we can count on oss_present being valid on entry.
*/
void __init oss_init(void)
{
int i;
- if (!oss_present) return;
+ if (macintosh_config->ident != MAC_MODEL_IIFX)
+ return;
oss = (struct mac_oss *) OSS_BASE;
+ pr_debug("OSS detected at %p", oss);
+ oss_present = 1;
/* Disable all interrupts. Unlike a VIA it looks like we */
/* do this by setting the source's interrupt level to zero. */
@@ -53,14 +53,6 @@ void __init oss_init(void)
}
/*
- * Initialize OSS for Nubus access
- */
-
-void __init oss_nubus_init(void)
-{
-}
-
-/*
* Handle miscellaneous OSS interrupts.
*/
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index 439a2a2e5874..8d547df4e16c 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -42,7 +42,7 @@ static void psc_debug_dump(void)
return;
for (i = 0x30 ; i < 0x70 ; i += 0x10) {
- printk("PSC #%d: IFR = 0x%02X IER = 0x%02X\n",
+ printk(KERN_DEBUG "PSC #%d: IFR = 0x%02X IER = 0x%02X\n",
i >> 4,
(int) psc_read_byte(pIFRbase + i),
(int) psc_read_byte(pIERbase + i));
@@ -59,14 +59,12 @@ static __init void psc_dma_die_die_die(void)
{
int i;
- printk("Killing all PSC DMA channels...");
for (i = 0 ; i < 9 ; i++) {
psc_write_word(PSC_CTL_BASE + (i << 4), 0x8800);
psc_write_word(PSC_CTL_BASE + (i << 4), 0x1000);
psc_write_word(PSC_CMD_BASE + (i << 5), 0x1100);
psc_write_word(PSC_CMD_BASE + (i << 5) + 0x10, 0x1100);
}
- printk("done!\n");
}
/*
@@ -92,7 +90,7 @@ void __init psc_init(void)
psc = (void *) PSC_BASE;
- printk("PSC detected at %p\n", psc);
+ pr_debug("PSC detected at %p\n", psc);
psc_dma_die_die_die();
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 9f59a662ace5..acdabbeecfd2 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -107,6 +107,7 @@ static int gIER,gIFR,gBufA,gBufB;
static u8 nubus_disabled;
void via_debug_dump(void);
+static void via_nubus_init(void);
/*
* Initialize the VIAs
@@ -114,29 +115,25 @@ void via_debug_dump(void);
* First we figure out where they actually _are_ as well as what type of
* VIA we have for VIA2 (it could be a real VIA or an RBV or even an OSS.)
* Then we pretty much clear them out and disable all IRQ sources.
- *
- * Note: the OSS is actually "detected" here and not in oss_init(). It just
- * seems more logical to do it here since via_init() needs to know
- * these things anyways.
*/
void __init via_init(void)
{
- switch(macintosh_config->via_type) {
+ via1 = (void *)VIA1_BASE;
+ pr_debug("VIA1 detected at %p\n", via1);
+
+ if (oss_present) {
+ via2 = NULL;
+ rbv_present = 0;
+ } else {
+ switch (macintosh_config->via_type) {
/* IIci, IIsi, IIvx, IIvi (P6xx), LC series */
case MAC_VIA_IICI:
- via1 = (void *) VIA1_BASE;
- if (macintosh_config->ident == MAC_MODEL_IIFX) {
- via2 = NULL;
- rbv_present = 0;
- oss_present = 1;
- } else {
- via2 = (void *) RBV_BASE;
- rbv_present = 1;
- oss_present = 0;
- }
+ via2 = (void *)RBV_BASE;
+ pr_debug("VIA2 (RBV) detected at %p\n", via2);
+ rbv_present = 1;
if (macintosh_config->ident == MAC_MODEL_LCIII) {
rbv_clear = 0x00;
} else {
@@ -155,29 +152,19 @@ void __init via_init(void)
case MAC_VIA_QUADRA:
case MAC_VIA_II:
- via1 = (void *) VIA1_BASE;
via2 = (void *) VIA2_BASE;
+ pr_debug("VIA2 detected at %p\n", via2);
rbv_present = 0;
- oss_present = 0;
rbv_clear = 0x00;
gIER = vIER;
gIFR = vIFR;
gBufA = vBufA;
gBufB = vBufB;
break;
+
default:
panic("UNKNOWN VIA TYPE");
- }
-
- printk(KERN_INFO "VIA1 at %p is a 6522 or clone\n", via1);
-
- printk(KERN_INFO "VIA2 at %p is ", via2);
- if (rbv_present) {
- printk("an RBV\n");
- } else if (oss_present) {
- printk("an OSS\n");
- } else {
- printk("a 6522 or clone\n");
+ }
}
#ifdef DEBUG_VIA
@@ -253,6 +240,8 @@ void __init via_init(void)
via2[vACR] &= ~0x03; /* disable port A & B latches */
}
+ via_nubus_init();
+
/* Everything below this point is VIA2 only... */
if (rbv_present)
@@ -304,9 +293,9 @@ void via_debug_dump(void)
(uint) via1[vDirA], (uint) via1[vDirB], (uint) via1[vACR]);
printk(KERN_DEBUG " PCR = 0x%02X IFR = 0x%02X IER = 0x%02X\n",
(uint) via1[vPCR], (uint) via1[vIFR], (uint) via1[vIER]);
- if (oss_present) {
- printk(KERN_DEBUG "VIA2: <OSS>\n");
- } else if (rbv_present) {
+ if (!via2)
+ return;
+ if (rbv_present) {
printk(KERN_DEBUG "VIA2: IFR = 0x%02X IER = 0x%02X\n",
(uint) via2[rIFR], (uint) via2[rIER]);
printk(KERN_DEBUG " SIFR = 0x%02X SIER = 0x%02X\n",
@@ -374,7 +363,7 @@ int via_get_cache_disable(void)
* Initialize VIA2 for Nubus access
*/
-void __init via_nubus_init(void)
+static void __init via_nubus_init(void)
{
/* unlock nubus transactions */
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 8d1408583cf4..2925d795d71a 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -170,7 +170,7 @@ void __init cf_bootmem_alloc(void)
max_pfn = max_low_pfn = PFN_DOWN(_ramend);
high_memory = (void *)_ramend;
- m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
+ m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
module_fixup(NULL, __start_fixup, __stop_fixup);
/* setup bootmem data */
@@ -184,7 +184,7 @@ void __init cf_bootmem_alloc(void)
* Initialize the context management stuff.
* The following was taken from arch/ppc/mmu_context.c
*/
-void __init mmu_context_init(void)
+void __init cf_mmu_context_init(void)
{
/*
* Some processors have too few contexts to reserve one for
diff --git a/arch/metag/boot/.gitignore b/arch/metag/boot/.gitignore
index 2d6c0c160884..6c662ddb909a 100644
--- a/arch/metag/boot/.gitignore
+++ b/arch/metag/boot/.gitignore
@@ -1,4 +1,3 @@
vmlinux*
uImage*
ramdisk.*
-*.dtb*
diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile
index ad5dde558db1..f0a180f62766 100644
--- a/arch/metag/boot/dts/Makefile
+++ b/arch/metag/boot/dts/Makefile
@@ -13,10 +13,4 @@ endif
dtb-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb
obj-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb.o
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-
.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
-
-always += $(dtb-y)
-clean-files += *.dtb *.dtb.S
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
index 7465ce54a4a9..cfd6a0505b56 100644
--- a/arch/metag/include/asm/dma-mapping.h
+++ b/arch/metag/include/asm/dma-mapping.h
@@ -9,14 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &metag_dma_ops;
}
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
#endif
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
index 349938c35f2d..4497c232d9c1 100644
--- a/arch/metag/include/asm/spinlock.h
+++ b/arch/metag/include/asm/spinlock.h
@@ -16,13 +16,4 @@
* locked.
*/
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h
index 029935560b7f..dfd780eab350 100644
--- a/arch/metag/include/asm/spinlock_lnkget.h
+++ b/arch/metag/include/asm/spinlock_lnkget.h
@@ -137,21 +137,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
: "memory");
}
-/* write_can_lock - would write_trylock() succeed? */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- int ret;
-
- asm volatile ("LNKGETD %0, [%1]\n"
- "CMP %0, #0\n"
- "MOV %0, #1\n"
- "XORNZ %0, %0, %0\n"
- : "=&d" (ret)
- : "da" (&rw->lock)
- : "cc");
- return ret;
-}
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -225,26 +210,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
return tmp;
}
-/* read_can_lock - would read_trylock() succeed? */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- int tmp;
-
- asm volatile ("LNKGETD %0, [%1]\n"
- "CMP %0, %2\n"
- "MOV %0, #1\n"
- "XORZ %0, %0, %0\n"
- : "=&d" (tmp)
- : "da" (&rw->lock), "bd" (0x80000000)
- : "cc");
- return tmp;
-}
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_LNKGET_H */
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h
index 12de9862d190..c0bd81bbe18c 100644
--- a/arch/metag/include/asm/spinlock_lock1.h
+++ b/arch/metag/include/asm/spinlock_lock1.h
@@ -105,16 +105,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
rw->lock = 0;
}
-/* write_can_lock - would write_trylock() succeed? */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- unsigned int ret;
-
- barrier();
- ret = rw->lock;
- return (ret == 0);
-}
-
/*
* Read locks are a bit more hairy:
* - Exclusively load the lock value.
@@ -172,14 +162,4 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
return (ret < 0x80000000);
}
-/* read_can_lock - would read_trylock() succeed? */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- unsigned int ret;
-
- barrier();
- ret = rw->lock;
- return (ret < 0x80000000);
-}
-
#endif /* __ASM_SPINLOCK_LOCK1_H */
diff --git a/arch/microblaze/boot/.gitignore b/arch/microblaze/boot/.gitignore
index bf0459186027..679502d64a97 100644
--- a/arch/microblaze/boot/.gitignore
+++ b/arch/microblaze/boot/.gitignore
@@ -1,3 +1,2 @@
-*.dtb
linux.bin*
simpleImage.*
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 47f94cc383b6..fd46385a4c97 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -35,4 +35,4 @@ $(obj)/simpleImage.%: vmlinux FORCE
$(call if_changed,strip)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
-clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
+clean-files += simpleImage.*.unstrip linux.bin.ub
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index e15cd2f76e23..6b9ea39405b8 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -16,22 +16,6 @@
#define _ASM_MICROBLAZE_DMA_MAPPING_H
/*
- * See Documentation/DMA-API-HOWTO.txt and
- * Documentation/DMA-API.txt for documentation.
- */
-
-#include <linux/types.h>
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <asm/io.h>
-#include <asm/cacheflush.h>
-
-#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
-#define __dma_free_coherent(size, addr) ((void)0)
-
-/*
* Available generic sets of operations
*/
extern const struct dma_map_ops dma_direct_ops;
@@ -41,27 +25,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &dma_direct_ops;
}
-static inline void __dma_sync(unsigned long paddr,
- size_t size, enum dma_data_direction direction)
-{
- switch (direction) {
- case DMA_TO_DEVICE:
- case DMA_BIDIRECTIONAL:
- flush_dcache_range(paddr, paddr + size);
- break;
- case DMA_FROM_DEVICE:
- invalidate_dcache_range(paddr, paddr + size);
- break;
- default:
- BUG();
- }
-}
-
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- __dma_sync(virt_to_phys(vaddr), size, (int)direction);
-}
-
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index e63f154be964..990bf9ea0ec6 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -13,6 +13,7 @@
#include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/bug.h>
+#include <asm/cacheflush.h>
#define NOT_COHERENT_CACHE
@@ -52,6 +53,22 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
#endif
}
+static inline void __dma_sync(unsigned long paddr,
+ size_t size, enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ flush_dcache_range(paddr, paddr + size);
+ break;
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range(paddr, paddr + size);
+ break;
+ default:
+ BUG();
+ }
+}
+
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index f5f1bdb292de..ac7ad54f984f 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -34,7 +34,6 @@ platforms += sibyte
platforms += sni
platforms += txx9
platforms += vr41xx
-platforms += xilfpga
# include the platform specific files
include $(patsubst %, $(srctree)/arch/mips/%/Platform, $(platforms))
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5d3284d20678..350a990fc719 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -65,7 +65,7 @@ config MIPS
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_VIRT_CPU_ACCOUNTING_GEN
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select MODULES_USE_ELF_REL if MODULES
@@ -78,7 +78,7 @@ menu "Machine selection"
choice
prompt "System type"
- default SGI_IP22
+ default MIPS_GENERIC
config MIPS_GENERIC
bool "Generic board-agnostic MIPS kernel"
@@ -233,6 +233,7 @@ config BMIPS_GENERIC
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+ select HARDIRQS_SW_RESEND
help
Build a generic DT-based kernel image that boots on select
BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
@@ -276,6 +277,7 @@ config BCM63XX
select GPIOLIB
select HAVE_CLK
select MIPS_L1_CACHE_SHIFT_4
+ select CLKDEV_LOOKUP
help
Support for BCM63XX based boards
@@ -468,29 +470,6 @@ config MACH_PISTACHIO
help
This enables support for the IMG Pistachio SoC platform.
-config MACH_XILFPGA
- bool "MIPSfpga Xilinx based boards"
- select BOOT_ELF32
- select BOOT_RAW
- select BUILTIN_DTB
- select CEVT_R4K
- select COMMON_CLK
- select CSRC_R4K
- select GPIOLIB
- select IRQ_MIPS_CPU
- select LIBFDT
- select MIPS_CPU_SCACHE
- select SYS_HAS_EARLY_PRINTK
- select SYS_HAS_CPU_MIPS32_R2
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
- select SYS_SUPPORTS_ZBOOT_UART16550
- select USE_OF
- select USE_GENERIC_EARLY_PRINTK_8250
- select XILINX_INTC
- help
- This enables support for the IMG University Program MIPSfpga platform.
-
config MIPS_MALTA
bool "MIPS Malta board"
select ARCH_MAY_HAVE_PC_FDC
@@ -916,7 +895,8 @@ config CAVIUM_OCTEON_SOC
select USE_OF
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_SMP
- select NR_CPUS_DEFAULT_16
+ select NR_CPUS_DEFAULT_64
+ select MIPS_NR_CPU_NR_MAP_1024
select BUILTIN_DTB
select MTD_COMPLEX_MAPPINGS
select SYS_SUPPORTS_RELOCATABLE
@@ -1034,7 +1014,6 @@ source "arch/mips/loongson32/Kconfig"
source "arch/mips/loongson64/Kconfig"
source "arch/mips/netlogic/Kconfig"
source "arch/mips/paravirt/Kconfig"
-source "arch/mips/xilfpga/Kconfig"
endmenu
@@ -2726,6 +2705,15 @@ config NR_CPUS
config MIPS_PERF_SHARED_TC_COUNTERS
bool
+config MIPS_NR_CPU_NR_MAP_1024
+ bool
+
+config MIPS_NR_CPU_NR_MAP
+ int
+ depends on SMP
+ default 1024 if MIPS_NR_CPU_NR_MAP_1024
+ default NR_CPUS if !MIPS_NR_CPU_NR_MAP_1024
+
#
# Timer Interrupt Frequency Configuration
#
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index a96d97a806c9..9f6a26d72f9f 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -15,7 +15,7 @@
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
-KBUILD_DEFCONFIG := ip22_defconfig
+KBUILD_DEFCONFIG := 32r2el_defconfig
#
# Select the object file format to substitute into the linker script.
@@ -544,3 +544,7 @@ sead3_defconfig:
.PHONY: sead3micro_defconfig
sead3micro_defconfig:
$(Q)$(MAKE) -f $(srctree)/Makefile micro32r2el_defconfig BOARDS=sead-3
+
+.PHONY: xilfpga_defconfig
+xilfpga_defconfig:
+ $(Q)$(MAKE) -f $(srctree)/Makefile 32r2el_defconfig BOARDS=xilfpga
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index 6fb6b3faa158..328d697e72b4 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -30,6 +30,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
@@ -218,10 +219,27 @@ static struct platform_device gpr_led_devices = {
/*
* I2C
*/
+static struct gpiod_lookup_table gpr_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ /*
+ * This should be on "GPIO2" which has base at 200 so
+ * the global numbers 209 and 210 should correspond to
+ * local offsets 9 and 10.
+ */
+ GPIO_LOOKUP_IDX("alchemy-gpio2", 9, NULL, 0,
+ GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("alchemy-gpio2", 10, NULL, 1,
+ GPIO_ACTIVE_HIGH),
+ },
+};
+
static struct i2c_gpio_platform_data gpr_i2c_data = {
- .sda_pin = 209,
+ /*
+ * The open drain mode is hardwired somewhere or an electrical
+ * property of the alchemy GPIO controller.
+ */
.sda_is_open_drain = 1,
- .scl_pin = 210,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
.timeout = HZ,
@@ -295,6 +313,7 @@ arch_initcall(gpr_pci_init);
static int __init gpr_dev_init(void)
{
+ gpiod_add_lookup_table(&gpr_i2c_gpiod_table);
i2c_register_board_info(0, gpr_i2c_info, ARRAY_SIZE(gpr_i2c_info));
return platform_add_devices(gpr_devices, ARRAY_SIZE(gpr_devices));
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index a83c7b7e2eb1..6b6f6851df92 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -143,7 +143,7 @@ void __init alchemy_set_lpj(void)
preset_lpj /= 2 * HZ;
}
-static struct clk_ops alchemy_clkops_cpu = {
+static const struct clk_ops alchemy_clkops_cpu = {
.recalc_rate = alchemy_clk_cpu_recalc,
};
@@ -224,7 +224,7 @@ static long alchemy_clk_aux_roundr(struct clk_hw *hw,
return (*parent_rate) * mult;
}
-static struct clk_ops alchemy_clkops_aux = {
+static const struct clk_ops alchemy_clkops_aux = {
.recalc_rate = alchemy_clk_aux_recalc,
.set_rate = alchemy_clk_aux_setr,
.round_rate = alchemy_clk_aux_roundr,
@@ -576,7 +576,7 @@ static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
}
/* Au1000, Au1100, Au15x0, Au12x0 */
-static struct clk_ops alchemy_clkops_fgenv1 = {
+static const struct clk_ops alchemy_clkops_fgenv1 = {
.recalc_rate = alchemy_clk_fgv1_recalc,
.determine_rate = alchemy_clk_fgv1_detr,
.set_rate = alchemy_clk_fgv1_setr,
@@ -717,7 +717,7 @@ static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
}
/* Au1300 larger input mux, no separate disable bit, flexible divider */
-static struct clk_ops alchemy_clkops_fgenv2 = {
+static const struct clk_ops alchemy_clkops_fgenv2 = {
.recalc_rate = alchemy_clk_fgv2_recalc,
.determine_rate = alchemy_clk_fgv2_detr,
.set_rate = alchemy_clk_fgv2_setr,
@@ -925,7 +925,7 @@ static int alchemy_clk_csrc_detr(struct clk_hw *hw,
return alchemy_clk_fgcs_detr(hw, req, scale, 4);
}
-static struct clk_ops alchemy_clkops_csrc = {
+static const struct clk_ops alchemy_clkops_csrc = {
.recalc_rate = alchemy_clk_csrc_recalc,
.determine_rate = alchemy_clk_csrc_detr,
.set_rate = alchemy_clk_csrc_setr,
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index be78298dffb4..6b2c6f3baefa 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/platform_data/pcf857x.h>
#include "machtypes.h"
@@ -33,16 +33,21 @@
#define PB44_KEYS_POLL_INTERVAL 20 /* msecs */
#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
-static struct i2c_gpio_platform_data pb44_i2c_gpio_data = {
- .sda_pin = PB44_GPIO_I2C_SDA,
- .scl_pin = PB44_GPIO_I2C_SCL,
+static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
+ .dev_id = "i2c-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SCL,
+ NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
};
static struct platform_device pb44_i2c_gpio_device = {
.name = "i2c-gpio",
.id = 0,
.dev = {
- .platform_data = &pb44_i2c_gpio_data,
+ .platform_data = NULL,
}
};
@@ -103,6 +108,7 @@ static struct ath79_spi_platform_data pb44_spi_data = {
static void __init pb44_init(void)
{
+ gpiod_add_lookup_table(&pb44_i2c_gpiod_table);
i2c_register_board_info(0, pb44_i2c_board_info,
ARRAY_SIZE(pb44_i2c_board_info));
platform_device_register(&pb44_i2c_gpio_device);
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index d4f2407a42c6..8307a8a02667 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -331,7 +331,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
/* Verified on: WRT54GS V1.0 */
static const struct gpio_led
bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
- BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
};
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index 19577f771c1f..164115944a7f 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/delay.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
@@ -121,21 +122,56 @@ static struct clk clk_ephy = {
};
/*
+ * Ethernet switch SAR clock
+ */
+static void swpkt_sar_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_SWPKT_SAR_EN, enable);
+ else
+ return;
+}
+
+static struct clk clk_swpkt_sar = {
+ .set = swpkt_sar_set,
+};
+
+/*
+ * Ethernet switch USB clock
+ */
+static void swpkt_usb_set(struct clk *clk, int enable)
+{
+ if (BCMCPU_IS_6368())
+ bcm_hwclock_set(CKCTL_6368_SWPKT_USB_EN, enable);
+ else
+ return;
+}
+
+static struct clk clk_swpkt_usb = {
+ .set = swpkt_usb_set,
+};
+
+/*
* Ethernet switch clock
*/
static void enetsw_set(struct clk *clk, int enable)
{
- if (BCMCPU_IS_6328())
+ if (BCMCPU_IS_6328()) {
bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
- else if (BCMCPU_IS_6362())
+ } else if (BCMCPU_IS_6362()) {
bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
- else if (BCMCPU_IS_6368())
- bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
- CKCTL_6368_SWPKT_USB_EN |
- CKCTL_6368_SWPKT_SAR_EN,
- enable);
- else
+ } else if (BCMCPU_IS_6368()) {
+ if (enable) {
+ clk_enable_unlocked(&clk_swpkt_sar);
+ clk_enable_unlocked(&clk_swpkt_usb);
+ } else {
+ clk_disable_unlocked(&clk_swpkt_usb);
+ clk_disable_unlocked(&clk_swpkt_sar);
+ }
+ bcm_hwclock_set(CKCTL_6368_ROBOSW_EN, enable);
+ } else {
return;
+ }
if (enable) {
/* reset switch core afer clock change */
@@ -247,6 +283,10 @@ static struct clk clk_hsspi = {
.set = hsspi_set,
};
+/*
+ * HSSPI PLL
+ */
+static struct clk clk_hsspi_pll;
/*
* XTM clock
@@ -256,8 +296,12 @@ static void xtm_set(struct clk *clk, int enable)
if (!BCMCPU_IS_6368())
return;
- bcm_hwclock_set(CKCTL_6368_SAR_EN |
- CKCTL_6368_SWPKT_SAR_EN, enable);
+ if (enable)
+ clk_enable_unlocked(&clk_swpkt_sar);
+ else
+ clk_disable_unlocked(&clk_swpkt_sar);
+
+ bcm_hwclock_set(CKCTL_6368_SAR_EN, enable);
if (enable) {
/* reset sar core afer clock change */
@@ -359,44 +403,128 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL_GPL(clk_round_rate);
-struct clk *clk_get(struct device *dev, const char *id)
-{
- if (!strcmp(id, "enet0"))
- return &clk_enet0;
- if (!strcmp(id, "enet1"))
- return &clk_enet1;
- if (!strcmp(id, "enetsw"))
- return &clk_enetsw;
- if (!strcmp(id, "ephy"))
- return &clk_ephy;
- if (!strcmp(id, "usbh"))
- return &clk_usbh;
- if (!strcmp(id, "usbd"))
- return &clk_usbd;
- if (!strcmp(id, "spi"))
- return &clk_spi;
- if (!strcmp(id, "hsspi"))
- return &clk_hsspi;
- if (!strcmp(id, "xtm"))
- return &clk_xtm;
- if (!strcmp(id, "periph"))
- return &clk_periph;
- if ((BCMCPU_IS_3368() || BCMCPU_IS_6358()) && !strcmp(id, "pcm"))
- return &clk_pcm;
- if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
- return &clk_ipsec;
- if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
- return &clk_pcie;
- return ERR_PTR(-ENOENT);
-}
+static struct clk_lookup bcm3368_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "pcm", &clk_pcm),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
+ CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
+};
-EXPORT_SYMBOL(clk_get);
+static struct clk_lookup bcm6328_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
+ CLKDEV_INIT(NULL, "pcie", &clk_pcie),
+};
-void clk_put(struct clk *clk)
-{
-}
+static struct clk_lookup bcm6338_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
+};
-EXPORT_SYMBOL(clk_put);
+static struct clk_lookup bcm6345_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
+};
+
+static struct clk_lookup bcm6348_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet_misc),
+ CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet_misc),
+};
+
+static struct clk_lookup bcm6358_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enet0", &clk_enet0),
+ CLKDEV_INIT(NULL, "enet1", &clk_enet1),
+ CLKDEV_INIT(NULL, "ephy", &clk_ephy),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "pcm", &clk_pcm),
+ CLKDEV_INIT(NULL, "swpkt_sar", &clk_swpkt_sar),
+ CLKDEV_INIT(NULL, "swpkt_usb", &clk_swpkt_usb),
+ CLKDEV_INIT("bcm63xx_enet.0", "enet", &clk_enet0),
+ CLKDEV_INIT("bcm63xx_enet.1", "enet", &clk_enet1),
+};
+
+static struct clk_lookup bcm6362_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx-hsspi.0", "pll", &clk_hsspi_pll),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "hsspi", &clk_hsspi),
+ CLKDEV_INIT(NULL, "pcie", &clk_pcie),
+ CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
+};
+
+static struct clk_lookup bcm6368_clks[] = {
+ /* fixed rate clocks */
+ CLKDEV_INIT(NULL, "periph", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.0", "refclk", &clk_periph),
+ CLKDEV_INIT("bcm63xx_uart.1", "refclk", &clk_periph),
+ /* gated clocks */
+ CLKDEV_INIT(NULL, "enetsw", &clk_enetsw),
+ CLKDEV_INIT(NULL, "usbh", &clk_usbh),
+ CLKDEV_INIT(NULL, "usbd", &clk_usbd),
+ CLKDEV_INIT(NULL, "spi", &clk_spi),
+ CLKDEV_INIT(NULL, "xtm", &clk_xtm),
+ CLKDEV_INIT(NULL, "ipsec", &clk_ipsec),
+};
#define HSSPI_PLL_HZ_6328 133333333
#define HSSPI_PLL_HZ_6362 400000000
@@ -404,11 +532,31 @@ EXPORT_SYMBOL(clk_put);
static int __init bcm63xx_clk_init(void)
{
switch (bcm63xx_get_cpu_id()) {
+ case BCM3368_CPU_ID:
+ clkdev_add_table(bcm3368_clks, ARRAY_SIZE(bcm3368_clks));
+ break;
case BCM6328_CPU_ID:
- clk_hsspi.rate = HSSPI_PLL_HZ_6328;
+ clk_hsspi_pll.rate = HSSPI_PLL_HZ_6328;
+ clkdev_add_table(bcm6328_clks, ARRAY_SIZE(bcm6328_clks));
+ break;
+ case BCM6338_CPU_ID:
+ clkdev_add_table(bcm6338_clks, ARRAY_SIZE(bcm6338_clks));
+ break;
+ case BCM6345_CPU_ID:
+ clkdev_add_table(bcm6345_clks, ARRAY_SIZE(bcm6345_clks));
+ break;
+ case BCM6348_CPU_ID:
+ clkdev_add_table(bcm6348_clks, ARRAY_SIZE(bcm6348_clks));
+ break;
+ case BCM6358_CPU_ID:
+ clkdev_add_table(bcm6358_clks, ARRAY_SIZE(bcm6358_clks));
break;
case BCM6362_CPU_ID:
- clk_hsspi.rate = HSSPI_PLL_HZ_6362;
+ clk_hsspi_pll.rate = HSSPI_PLL_HZ_6362;
+ clkdev_add_table(bcm6362_clks, ARRAY_SIZE(bcm6362_clks));
+ break;
+ case BCM6368_CPU_ID:
+ clkdev_add_table(bcm6368_clks, ARRAY_SIZE(bcm6368_clks));
break;
}
diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore
index d3962cd5ce0c..a73d6e2c4f64 100644
--- a/arch/mips/boot/.gitignore
+++ b/arch/mips/boot/.gitignore
@@ -5,4 +5,3 @@ zImage
zImage.tmp
calc_vmlinuz_load_addr
uImage
-*.dtb
diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile
index e0a4e939f843..e2c6f131c8eb 100644
--- a/arch/mips/boot/dts/Makefile
+++ b/arch/mips/boot/dts/Makefile
@@ -1,22 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-dts-dirs += brcm
-dts-dirs += cavium-octeon
-dts-dirs += img
-dts-dirs += ingenic
-dts-dirs += lantiq
-dts-dirs += mti
-dts-dirs += netlogic
-dts-dirs += ni
-dts-dirs += pic32
-dts-dirs += qca
-dts-dirs += ralink
-dts-dirs += xilfpga
+subdir-y += brcm
+subdir-y += cavium-octeon
+subdir-y += img
+subdir-y += ingenic
+subdir-y += lantiq
+subdir-y += mti
+subdir-y += netlogic
+subdir-y += ni
+subdir-y += pic32
+subdir-y += qca
+subdir-y += ralink
+subdir-y += xilfpga
-obj-y := $(addsuffix /, $(dts-dirs))
-
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts)))
-
-always := $(dtb-y)
-subdir-y := $(dts-dirs)
-clean-files := *.dtb *.dtb.S
+obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index 9e09cc4556b3..09ba7e894bad 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -23,7 +23,6 @@ dtb-$(CONFIG_DT_NONE) += \
bcm63268-comtrend-vr-3032u.dtb \
bcm93384wvg.dtb \
bcm93384wvg_viper.dtb \
- bcm96358nb4ser.dtb \
bcm96368mvwg.dtb \
bcm9ejtagprb.dtb \
bcm97125cbmb.dtb \
@@ -39,6 +38,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
index 277cde02b744..7a3e5c8943ca 100644
--- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
@@ -83,6 +83,7 @@
interrupts = <2>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
@@ -95,6 +96,7 @@
interrupts = <3>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
diff --git a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
index 2bc0d8401ad6..8d010b919de2 100644
--- a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
+++ b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
@@ -19,7 +19,7 @@
};
&leds0 {
- status = "ok";
+ status = "okay";
brcm,serial-leds;
brcm,serial-dat-low;
brcm,serial-shift-inv;
diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
index 3b09f44e67fb..58790b173bb2 100644
--- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
@@ -84,6 +84,7 @@
interrupts = <5>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
@@ -96,6 +97,7 @@
interrupts = <34>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
index 644486fe4159..bf6716aa425a 100644
--- a/arch/mips/boot/dts/brcm/bcm6328.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -69,6 +69,7 @@
interrupt-parent = <&periph_intc>;
interrupts = <28>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
@@ -78,6 +79,7 @@
interrupt-parent = <&periph_intc>;
interrupts = <39>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
diff --git a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
index 5e62190aa3d5..53e57cc29291 100644
--- a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
+++ b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
@@ -19,7 +19,7 @@
};
&leds0 {
- status = "ok";
+ status = "okay";
led@0 {
reg = <0>;
diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
index 682df7fb7069..26ddae5a4247 100644
--- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
@@ -93,6 +93,7 @@
interrupts = <2>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
@@ -105,6 +106,7 @@
interrupts = <3>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
index a82a5e5de672..c387793525dd 100644
--- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
@@ -84,6 +84,7 @@
interrupts = <3>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
@@ -96,6 +97,7 @@
interrupts = <4>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
index 7a72f59ae457..e116a385525f 100644
--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
@@ -90,6 +90,7 @@
interrupt-parent = <&periph_intc>;
interrupts = <2>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
@@ -99,6 +100,7 @@
interrupt-parent = <&periph_intc>;
interrupts = <3>;
clocks = <&periph_clk>;
+ clock-names = "refclk";
status = "disabled";
};
diff --git a/arch/mips/boot/dts/cavium-octeon/Makefile b/arch/mips/boot/dts/cavium-octeon/Makefile
index 35300e091573..f5d01b31df50 100644
--- a/arch/mips/boot/dts/cavium-octeon/Makefile
+++ b/arch/mips/boot/dts/cavium-octeon/Makefile
@@ -5,6 +5,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/img/Makefile b/arch/mips/boot/dts/img/Makefile
index 139bcd887b86..3eb2597a4d6c 100644
--- a/arch/mips/boot/dts/img/Makefile
+++ b/arch/mips/boot/dts/img/Makefile
@@ -6,6 +6,3 @@ obj-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb.o
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/img/pistachio.dtsi b/arch/mips/boot/dts/img/pistachio.dtsi
index 57809f6a5864..f8d7e6f622cb 100644
--- a/arch/mips/boot/dts/img/pistachio.dtsi
+++ b/arch/mips/boot/dts/img/pistachio.dtsi
@@ -805,7 +805,6 @@
pinctrl-0 = <&sdhost_pins>;
pinctrl-names = "default";
fifo-depth = <0x20>;
- num-slots = <1>;
clock-frequency = <50000000>;
bus-width = <8>;
cap-mmc-highspeed;
diff --git a/arch/mips/boot/dts/ingenic/Makefile b/arch/mips/boot/dts/ingenic/Makefile
index 7798262570da..035769269cbc 100644
--- a/arch/mips/boot/dts/ingenic/Makefile
+++ b/arch/mips/boot/dts/ingenic/Makefile
@@ -6,6 +6,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index ff3298f29ec4..9b5794667aee 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -219,6 +219,11 @@
status = "disabled";
};
+ watchdog: watchdog@10002000 {
+ compatible = "ingenic,jz4780-watchdog";
+ reg = <0x10002000 0x100>;
+ };
+
nemc: nemc@13410000 {
compatible = "ingenic,jz4780-nemc";
reg = <0x13410000 0x10000>;
diff --git a/arch/mips/boot/dts/lantiq/Makefile b/arch/mips/boot/dts/lantiq/Makefile
index 0c50e3246a63..00e2e540ed3f 100644
--- a/arch/mips/boot/dts/lantiq/Makefile
+++ b/arch/mips/boot/dts/lantiq/Makefile
@@ -5,6 +5,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile
index 5ee06f73c348..480af498a9dd 100644
--- a/arch/mips/boot/dts/mti/Makefile
+++ b/arch/mips/boot/dts/mti/Makefile
@@ -6,6 +6,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/netlogic/Makefile b/arch/mips/boot/dts/netlogic/Makefile
index 1cb2fdbd8949..2b99450d7433 100644
--- a/arch/mips/boot/dts/netlogic/Makefile
+++ b/arch/mips/boot/dts/netlogic/Makefile
@@ -9,6 +9,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/ni/Makefile b/arch/mips/boot/dts/ni/Makefile
index 66cfdffc51c2..6cd9c606f025 100644
--- a/arch/mips/boot/dts/ni/Makefile
+++ b/arch/mips/boot/dts/ni/Makefile
@@ -2,6 +2,3 @@ dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445) += 169445.dtb
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/pic32/Makefile b/arch/mips/boot/dts/pic32/Makefile
index a86ddd289cfd..a139a0fbd7b7 100644
--- a/arch/mips/boot/dts/pic32/Makefile
+++ b/arch/mips/boot/dts/pic32/Makefile
@@ -8,6 +8,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/qca/Makefile b/arch/mips/boot/dts/qca/Makefile
index eabd94eb59db..639adeac90af 100644
--- a/arch/mips/boot/dts/qca/Makefile
+++ b/arch/mips/boot/dts/qca/Makefile
@@ -8,6 +8,3 @@ dtb-$(CONFIG_ATH79) += ar9331_tl_mr3020.dtb
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/ralink/Makefile b/arch/mips/boot/dts/ralink/Makefile
index a80eeeecf613..323c8bcfb602 100644
--- a/arch/mips/boot/dts/ralink/Makefile
+++ b/arch/mips/boot/dts/ralink/Makefile
@@ -10,6 +10,3 @@ obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/ralink/rt3052_eval.dts b/arch/mips/boot/dts/ralink/rt3052_eval.dts
index 674efdd42e74..6408ff629d5a 100644
--- a/arch/mips/boot/dts/ralink/rt3052_eval.dts
+++ b/arch/mips/boot/dts/ralink/rt3052_eval.dts
@@ -47,6 +47,6 @@
};
usb@101c0000 {
- status = "ok";
+ status = "okay";
};
};
diff --git a/arch/mips/boot/dts/xilfpga/Makefile b/arch/mips/boot/dts/xilfpga/Makefile
index 498ac081e2fe..616322405ade 100644
--- a/arch/mips/boot/dts/xilfpga/Makefile
+++ b/arch/mips/boot/dts/xilfpga/Makefile
@@ -1,10 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_XILFPGA_NEXYS4DDR) += nexys4ddr.dtb
+dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
-
-always := $(dtb-y)
-clean-files := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
index 41fee03dc312..2152b7ba65fb 100644
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -6,6 +6,14 @@
/ {
compatible = "digilent,nexys4ddr";
+ aliases {
+ serial0 = &axi_uart16550;
+ };
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = "serial0:115200n8";
+ };
+
memory {
device_type = "memory";
reg = <0x0 0x08000000>;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index f24be0b5db50..75108ec669eb 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -862,7 +862,7 @@ int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
*/
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
- cvmx_wait(100000000ull);
+ __delay(100000000ull);
for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
retry_cnt = 100000;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-spi.c b/arch/mips/cavium-octeon/executive/cvmx-spi.c
index 459e3b1eb61f..f51957a3e915 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-spi.c
@@ -215,7 +215,7 @@ int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
spxx_clk_ctl.u64 = 0;
spxx_clk_ctl.s.runbist = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(10 * MS);
+ __delay(10 * MS);
spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
if (spxx_bist_stat.s.stat0)
cvmx_dprintf
@@ -265,14 +265,14 @@ int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
spxx_clk_ctl.s.rcvtrn = 0;
spxx_clk_ctl.s.srxdlck = 0;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(100 * MS);
+ __delay(100 * MS);
/* Reset SRX0 DLL */
spxx_clk_ctl.s.srxdlck = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
/* Waiting for Inf0 Spi4 RX DLL to lock */
- cvmx_wait(100 * MS);
+ __delay(100 * MS);
/* Enable dynamic alignment */
spxx_trn4_ctl.s.trntest = 0;
@@ -527,7 +527,7 @@ int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
spxx_clk_ctl.s.rcvtrn = 1;
spxx_clk_ctl.s.srxdlck = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(1000 * MS);
+ __delay(1000 * MS);
/* SRX0 clear the boot bit */
spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
@@ -536,7 +536,7 @@ int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
/* Wait for the training sequence to complete */
cvmx_dprintf("SPI%d: Waiting for training\n", interface);
- cvmx_wait(1000 * MS);
+ __delay(1000 * MS);
/* Wait a really long time here */
timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
/*
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 5ea3104a3aca..b5f4ad8f2c45 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -38,6 +38,8 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
@@ -93,6 +95,8 @@ CONFIG_I2C_JZ4780=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_INGENIC=y
# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_JZ4740_WDT=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_DEBUG=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -110,7 +114,8 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_UBIFS_FS=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=y
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index f0c8971030c4..0108bb9f1e37 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -77,7 +77,6 @@ CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_BRIDGE=y
-CONFIG_NETLINK_MMAP=y
CONFIG_NETLINK_DIAG=y
CONFIG_IRDA=y
CONFIG_IRLAN=y
diff --git a/arch/mips/configs/generic/board-xilfpga.config b/arch/mips/configs/generic/board-xilfpga.config
new file mode 100644
index 000000000000..9cce57385b03
--- /dev/null
+++ b/arch/mips/configs/generic/board-xilfpga.config
@@ -0,0 +1,22 @@
+# require CONFIG_CPU_MIPS32_R2=y
+# require CONFIG_CPU_LITTLE_ENDIAN=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_XILINX=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_FIT_IMAGE_FDT_XILFPGA=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_XILINX=y
+CONFIG_SENSORS_ADT7410=y
+CONFIG_TMPFS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_NETDEVICES=y
+CONFIG_XILINX_EMACLITE=y
+CONFIG_SMSC_PHY=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 83e8fe2064aa..7ddfb4ef9479 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -1,3 +1,4 @@
+CONFIG_SGI_IP22=y
CONFIG_ARC_CONSOLE=y
CONFIG_CPU_R5000=y
CONFIG_NO_HZ=y
diff --git a/arch/mips/configs/xilfpga_defconfig b/arch/mips/configs/xilfpga_defconfig
deleted file mode 100644
index 829c637be3fc..000000000000
--- a/arch/mips/configs/xilfpga_defconfig
+++ /dev/null
@@ -1,75 +0,0 @@
-CONFIG_MACH_XILFPGA=y
-# CONFIG_COMPACTION is not set
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_EMBEDDED=y
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-# CONFIG_BLOCK is not set
-# CONFIG_SUSPEND is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-# CONFIG_UEVENT_HELPER is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FW_LOADER is not set
-# CONFIG_ALLOW_DEV_COREDUMP is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NET_CORE is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_XILINX_EMACLITE=y
-CONFIG_SMSC_PHY=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_UNIX98_PTYS is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_I2C_XILINX=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_XILINX=y
-CONFIG_SENSORS_ADT7410=y
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_MIPS_PLATFORM_DEVICES is not set
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_PROC_PAGE_MONITOR is not set
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_PANIC_ON_OOPS=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_FTRACE is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,115200"
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
index 0b67c46666cc..52e0286a1612 100644
--- a/arch/mips/generic/Kconfig
+++ b/arch/mips/generic/Kconfig
@@ -43,4 +43,10 @@ config FIT_IMAGE_FDT_NI169445
Enable this to include the FDT for the 169445 platform from
National Instruments in the FIT kernel image.
+config FIT_IMAGE_FDT_XILFPGA
+ bool "Include FDT for Xilfpga"
+ help
+ Enable this to include the FDT for the MIPSfpga platform
+ from Imagination Technologies in the FIT kernel image.
+
endif
diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
new file mode 100644
index 000000000000..a2e773d3f14f
--- /dev/null
+++ b/arch/mips/generic/board-xilfpga.its.S
@@ -0,0 +1,22 @@
+/ {
+ images {
+ fdt@xilfpga {
+ description = "MIPSfpga (xilfpga) Device Tree";
+ data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash@0 {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ conf@xilfpga {
+ description = "MIPSfpga Linux kernel";
+ kernel = "kernel@0";
+ fdt = "fdt@xilfpga";
+ };
+ };
+};
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 83054f79f72a..feb069cbf44e 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,6 +19,9 @@
#include <asm/asmmacro-64.h>
#endif
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
/*
* Helper macros for generating raw instruction encodings.
*/
@@ -105,6 +108,7 @@
.macro fpu_save_16odd thread
.set push
.set mips64r2
+ .set fp=64
SET_HARDFLOAT
sdc1 $f1, THREAD_FPR1(\thread)
sdc1 $f3, THREAD_FPR3(\thread)
@@ -126,8 +130,8 @@
.endm
.macro fpu_save_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR6)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
@@ -163,6 +167,7 @@
.macro fpu_restore_16odd thread
.set push
.set mips64r2
+ .set fp=64
SET_HARDFLOAT
ldc1 $f1, THREAD_FPR1(\thread)
ldc1 $f3, THREAD_FPR3(\thread)
@@ -184,8 +189,8 @@
.endm
.macro fpu_restore_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR6)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?
@@ -234,9 +239,6 @@
.endm
#ifdef TOOLCHAIN_SUPPORTS_MSA
-/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
-#undef fp
-
.macro _cfcmsa rd, cs
.set push
.set mips32r2
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index fa57cef12a46..da1b8718861e 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -456,6 +456,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *
{
smp_mb__before_llsc();
__clear_bit(nr, addr);
+ nudge_writes();
}
/*
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 7e25c5cc353a..89e9fb7976fe 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -204,8 +204,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#ifndef CONFIG_SMP
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif
+#endif
#undef __scbeqz
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 42f8cbad6c23..0d9418d264f9 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -27,9 +27,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline void dma_mark_clean(void *addr, size_t size) {}
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
-
#define arch_setup_dma_ops arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu,
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index a6810923b3f0..6b1f1ad0542c 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1355,19 +1355,17 @@ do { \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dmfc0\t%M0, " #source "\n\t" \
- "dsll\t%L0, %M0, 32\n\t" \
- "dsra\t%M0, %M0, 32\n\t" \
- "dsra\t%L0, %L0, 32\n\t" \
+ "dmfc0\t%L0, " #source "\n\t" \
+ "dsra\t%M0, %L0, 32\n\t" \
+ "sll\t%L0, %L0, 0\n\t" \
".set\tmips0" \
: "=r" (__val)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
- "dmfc0\t%M0, " #source ", " #sel "\n\t" \
- "dsll\t%L0, %M0, 32\n\t" \
- "dsra\t%M0, %M0, 32\n\t" \
- "dsra\t%L0, %L0, 32\n\t" \
+ "dmfc0\t%L0, " #source ", " #sel "\n\t" \
+ "dsra\t%M0, %L0, 32\n\t" \
+ "sll\t%L0, %L0, 0\n\t" \
".set\tmips0" \
: "=r" (__val)); \
local_irq_restore(__flags); \
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa.h b/arch/mips/include/asm/octeon/cvmx-fpa.h
index c00501d0f7ae..29ae63606ab4 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa.h
@@ -36,6 +36,8 @@
#ifndef __CVMX_FPA_H__
#define __CVMX_FPA_H__
+#include <linux/delay.h>
+
#include <asm/octeon/cvmx-address.h>
#include <asm/octeon/cvmx-fpa-defs.h>
@@ -165,7 +167,7 @@ static inline void cvmx_fpa_enable(void)
}
/* Enforce a 10 cycle delay between config and enable */
- cvmx_wait(10);
+ __delay(10);
}
/* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 205ab2ce10f8..25854abc95f8 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/delay.h>
enum cvmx_mips_space {
CVMX_MIPS_SPACE_XKSEG = 3LL,
@@ -429,18 +430,6 @@ static inline uint64_t cvmx_get_cycle(void)
}
/**
- * Wait for the specified number of cycle
- *
- */
-static inline void cvmx_wait(uint64_t cycles)
-{
- uint64_t done = cvmx_get_cycle() + cycles;
-
- while (cvmx_get_cycle() < done)
- ; /* Spin */
-}
-
-/**
* Reads a chip global cycle counter. This counts CPU cycles since
* chip reset. The counter is 64 bit.
* This register does not exist on CN38XX pass 1 silicion
@@ -481,7 +470,7 @@ static inline uint64_t cvmx_get_cycle_global(void)
result = -1; \
break; \
} else \
- cvmx_wait(100); \
+ __delay(100); \
} \
} while (0); \
result; \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 5f987598054f..ad461216b5a1 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -240,8 +240,8 @@ static inline int pfn_valid(unsigned long pfn)
#endif
-#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys((void *) \
- (kaddr))))
+#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
extern int __virt_addr_valid(const volatile void *kaddr);
#define virt_addr_valid(kaddr) \
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 52f551ee492d..2339f42f047a 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -106,8 +106,6 @@ extern unsigned long PCIBIOS_MIN_MEM;
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
-extern void pcibios_set_master(struct pci_dev *dev);
-
#define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
@@ -123,8 +121,6 @@ extern void pcibios_set_master(struct pci_dev *dev);
#include <linux/string.h>
#include <asm/io.h>
-struct pci_dev;
-
/*
* The PCI address space does equal the physical memory address space.
* The networking and block device layers use this boolean for bounce
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 67fe6dc5211c..0036ea0c7173 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -31,12 +31,7 @@
* tables. Each page table is also a single 4K page, giving 512 (==
* PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
* invalid_pmd_table, each pmd entry is initialized to point to
- * invalid_pte_table, each pte is initialized to 0. When memory is low,
- * and a pmd table or a page table allocation fails, empty_bad_pmd_table
- * and empty_bad_page_table is returned back to higher layer code, so
- * that the failure is recognized later on. Linux does not seem to
- * handle these failures very well though. The empty_bad_page_table has
- * invalid pte entries in it, to force page faults.
+ * invalid_pte_table, each pte is initialized to 0.
*
* Kernel mappings: kernel mappings are held in the swapper_pg_table.
* The layout is identical to userspace except it's indexed with the
@@ -175,7 +170,6 @@
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE];
-extern pte_t empty_bad_page_table[PTRS_PER_PTE];
#ifndef __PAGETABLE_PUD_FOLDED
/*
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 95b8c471f572..af34afbc32d9 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -368,8 +368,6 @@ struct task_struct;
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0)
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
/*
* Do necessary setup to start up a newly executed thread.
*/
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 9e494f8d9c03..88ebd83b3bf9 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -29,7 +29,7 @@ extern cpumask_t cpu_foreign_map[];
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
-extern int __cpu_number_map[NR_CPUS];
+extern int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];
#define cpu_number_map(cpu) __cpu_number_map[cpu]
/* The reverse map from sequential logical cpu number to cpu id. */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index a7d21da16b6a..ee81297d9117 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -13,11 +13,4 @@
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 7c713025b23f..0170602a1e4e 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -26,12 +26,34 @@
#define __NR_syscall 4000
#endif
+static inline bool mips_syscall_is_indirect(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ return (IS_ENABLED(CONFIG_32BIT) ||
+ test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+ (regs->regs[2] == __NR_syscall);
+}
+
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return current_thread_info()->syscall;
}
+static inline void mips_syscall_update_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /*
+ * v0 is the system call number, except for O32 ABI syscall(), where it
+ * ends up in a0.
+ */
+ if (mips_syscall_is_indirect(task, regs))
+ task_thread_info(task)->syscall = regs->regs[4];
+ else
+ task_thread_info(task)->syscall = regs->regs[2];
+}
+
static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
struct task_struct *task, struct pt_regs *regs, unsigned int n)
{
@@ -98,10 +120,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
int ret;
- /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
- if ((IS_ENABLED(CONFIG_32BIT) ||
- test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
- (regs->regs[2] == __NR_syscall))
+
+ /* O32 ABI syscall() */
+ if (mips_syscall_is_indirect(task, regs))
i++;
while (n--)
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
index b7cd6cf77b83..91bf0c2c265c 100644
--- a/arch/mips/include/asm/vdso.h
+++ b/arch/mips/include/asm/vdso.h
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
u32 seq;
while (true) {
- seq = ACCESS_ONCE(data->seq_count);
+ seq = READ_ONCE(data->seq_count);
if (likely(!(seq & 1))) {
/* Paired with smp_wmb() in vdso_data_write_*(). */
smp_rmb();
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 9dd624c2fe56..421e06dfee72 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
- ACCESS_ONCE(*nc_core_ready_count) = 0;
+ WRITE_ONCE(*nc_core_ready_count, 0);
coupled_barrier(&per_cpu(pm_barrier, core), online);
/* Run the generated entry code */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c5ff6bfe2825..45d0b6b037ee 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -487,7 +487,7 @@ arch_initcall(frame_info_init);
/*
* Return saved PC of a blocked thread.
*/
-unsigned long thread_saved_pc(struct task_struct *tsk)
+static unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 1395654cfc8d..efbd8df8b665 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -144,6 +144,9 @@ int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
/* badvaddr, status, and cause may not be written. */
+ /* System call number may have been changed */
+ mips_syscall_update_nr(child, regs);
+
return 0;
}
@@ -345,6 +348,9 @@ static int gpr32_set(struct task_struct *target,
}
}
+ /* System call number may have been changed */
+ mips_syscall_update_nr(target, regs);
+
return 0;
}
@@ -405,6 +411,9 @@ static int gpr64_set(struct task_struct *target,
}
}
+ /* System call number may have been changed */
+ mips_syscall_update_nr(target, regs);
+
return 0;
}
@@ -618,6 +627,19 @@ static const struct user_regset_view user_mips64_view = {
.n = ARRAY_SIZE(mips64_regsets),
};
+#ifdef CONFIG_MIPS32_N32
+
+static const struct user_regset_view user_mipsn32_view = {
+ .name = "mipsn32",
+ .e_flags = EF_MIPS_ABI2,
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips64_regsets),
+};
+
+#endif /* CONFIG_MIPS32_N32 */
+
#endif /* CONFIG_64BIT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
@@ -629,6 +651,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
return &user_mips_view;
#endif
+#ifdef CONFIG_MIPS32_N32
+ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
+ return &user_mipsn32_view;
+#endif
return &user_mips64_view;
#endif
}
@@ -753,6 +779,12 @@ long arch_ptrace(struct task_struct *child, long request,
switch (addr) {
case 0 ... 31:
regs->regs[addr] = data;
+ /* System call number may have been changed */
+ if (addr == 2)
+ mips_syscall_update_nr(child, regs);
+ else if (addr == 4 &&
+ mips_syscall_is_indirect(child, regs))
+ mips_syscall_update_nr(child, regs);
break;
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
@@ -864,9 +896,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
current_thread_info()->syscall = syscall;
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- return -1;
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ if (tracehook_report_syscall_entry(regs))
+ return -1;
+ syscall = current_thread_info()->syscall;
+ }
#ifdef CONFIG_SECCOMP
if (unlikely(test_thread_flag(TIF_SECCOMP))) {
@@ -884,6 +918,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
ret = __secure_computing(&sd);
if (ret == -1)
return ret;
+ syscall = current_thread_info()->syscall;
}
#endif
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 40e212d6b26b..2b9260f92ccd 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/reg.h>
+#include <asm/syscall.h>
#include <linux/uaccess.h>
#include <asm/bootinfo.h>
@@ -195,6 +196,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
switch (addr) {
case 0 ... 31:
regs->regs[addr] = data;
+ /* System call number may have been changed */
+ if (addr == 2)
+ mips_syscall_update_nr(child, regs);
+ else if (addr == 4 &&
+ mips_syscall_is_indirect(child, regs))
+ mips_syscall_update_nr(child, regs);
break;
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 0a83b1708b3c..8e3a6020c613 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -40,8 +40,8 @@
*/
LEAF(_save_fp)
EXPORT_SYMBOL(_save_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR6)
mfc0 t0, CP0_STATUS
#endif
fpu_save_double a0 t0 t1 # clobbers t1
@@ -52,8 +52,8 @@ EXPORT_SYMBOL(_save_fp)
* Restore a thread's fp context.
*/
LEAF(_restore_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR6)
mfc0 t0, CP0_STATUS
#endif
fpu_restore_double a0 t0 t1 # clobbers t1
@@ -246,11 +246,11 @@ LEAF(_save_fp_context)
cfc1 t1, fcr31
.set pop
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR6)
.set push
SET_HARDFLOAT
-#ifdef CONFIG_CPU_MIPS32_R2
+#ifdef CONFIG_CPU_MIPSR2
.set mips32r2
.set fp=64
mfc0 t0, CP0_STATUS
@@ -314,11 +314,11 @@ LEAF(_save_fp_context)
LEAF(_restore_fp_context)
EX lw t1, 0(a1)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
- defined(CONFIG_CPU_MIPS32_R6)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR6)
.set push
SET_HARDFLOAT
-#ifdef CONFIG_CPU_MIPS32_R2
+#ifdef CONFIG_CPU_MIPSR2
.set mips32r2
.set fp=64
mfc0 t0, CP0_STATUS
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index fe3939726765..702c678de116 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -80,6 +80,7 @@ EXPORT_SYMBOL(mips_io_port_base);
static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
+static struct resource bss_resource = { .name = "Kernel bss", };
static void *detect_magic __initdata = detect_memory_region;
@@ -927,6 +928,8 @@ static void __init resource_init(void)
code_resource.end = __pa_symbol(&_etext) - 1;
data_resource.start = __pa_symbol(&_etext);
data_resource.end = __pa_symbol(&_edata) - 1;
+ bss_resource.start = __pa_symbol(&__bss_start);
+ bss_resource.end = __pa_symbol(&__bss_stop) - 1;
for (i = 0; i < boot_mem_map.nr_map; i++) {
struct resource *res;
@@ -966,6 +969,7 @@ static void __init resource_init(void)
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
request_crashkernel(res);
}
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 88be966d3e61..d84b9066b465 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -48,7 +48,7 @@
#include <asm/setup.h>
#include <asm/maar.h>
-int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 7611c3013793..52500d3b7004 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -446,9 +446,9 @@ void __init ltq_soc_init(void)
/* add our generic xway clocks */
clkdev_add_pmu("10000000.fpi", NULL, 0, 0, PMU_FPI);
- clkdev_add_pmu("1e100400.serial", NULL, 0, 0, PMU_ASC0);
clkdev_add_pmu("1e100a00.gptu", NULL, 1, 0, PMU_GPT);
clkdev_add_pmu("1e100bb0.stp", NULL, 1, 0, PMU_STP);
+ clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1);
clkdev_add_pmu("1e104100.dma", NULL, 1, 0, PMU_DMA);
clkdev_add_pmu("1e100800.spi", NULL, 1, 0, PMU_SPI);
clkdev_add_pmu("1e105300.ebu", NULL, 0, 0, PMU_EBU);
@@ -462,10 +462,8 @@ void __init ltq_soc_init(void)
clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE);
}
- if (!of_machine_is_compatible("lantiq,ase")) {
- clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1);
+ if (!of_machine_is_compatible("lantiq,ase"))
clkdev_add_pci();
- }
if (of_machine_is_compatible("lantiq,grx390") ||
of_machine_is_compatible("lantiq,ar10")) {
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index dd292dcec684..a8103f6972cd 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -197,8 +197,7 @@ static int __init pvc_proc_init(void)
if (proc_entry == NULL)
goto error;
- init_timer(&timer);
- timer.function = pvc_proc_timerfunc;
+ setup_timer(&timer, pvc_proc_timerfunc, 0UL);
return 0;
error:
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 16d9ef5a78c5..da6c1c0c30c1 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -810,7 +810,7 @@ do { \
#define SITOREG(si, x) \
do { \
if (cop1_64bit(xcp) && !hybrid_fprs()) { \
- unsigned i; \
+ unsigned int i; \
set_fpr32(&ctx->fpr[x], 0, si); \
for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
set_fpr32(&ctx->fpr[x], i, 0); \
@@ -823,7 +823,7 @@ do { \
#define SITOHREG(si, x) \
do { \
- unsigned i; \
+ unsigned int i; \
set_fpr32(&ctx->fpr[x], 1, si); \
for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
set_fpr32(&ctx->fpr[x], i, 0); \
@@ -834,7 +834,7 @@ do { \
#define DITOREG(di, x) \
do { \
- unsigned fpr, i; \
+ unsigned int fpr, i; \
fpr = (x) & ~(cop1_64bit(xcp) ^ 1); \
set_fpr64(&ctx->fpr[fpr], 0, di); \
for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \
@@ -1465,7 +1465,7 @@ DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir, void __user **fault_addr)
{
- unsigned rcsr = 0; /* resulting csr */
+ unsigned int rcsr = 0; /* resulting csr */
MIPS_FPU_EMU_INC_STATS(cp1xops);
@@ -1661,10 +1661,10 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
{
int rfmt; /* resulting format */
- unsigned rcsr = 0; /* resulting csr */
+ unsigned int rcsr = 0; /* resulting csr */
unsigned int oldrm;
unsigned int cbit;
- unsigned cond;
+ unsigned int cond;
union {
union ieee754dp d;
union ieee754sp s;
@@ -1795,7 +1795,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_maddf(fd, fs, ft);
- break;
+ goto copcsr;
}
case fmsubf_op: {
@@ -1809,7 +1809,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(fd, MIPSInst_FD(ir));
rv.s = ieee754sp_msubf(fd, fs, ft);
- break;
+ goto copcsr;
}
case frint_op: {
@@ -1834,7 +1834,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_2008class(fs);
rfmt = w_fmt;
- break;
+ goto copcsr;
}
case fmin_op: {
@@ -1847,7 +1847,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmin(fs, ft);
- break;
+ goto copcsr;
}
case fmina_op: {
@@ -1860,7 +1860,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmina(fs, ft);
- break;
+ goto copcsr;
}
case fmax_op: {
@@ -1873,7 +1873,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmax(fs, ft);
- break;
+ goto copcsr;
}
case fmaxa_op: {
@@ -1886,7 +1886,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
SPFROMREG(ft, MIPSInst_FT(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fmaxa(fs, ft);
- break;
+ goto copcsr;
}
case fabs_op:
@@ -2029,9 +2029,10 @@ copcsr:
default:
if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
- unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
+ unsigned int cmpop;
union ieee754sp fs, ft;
+ cmpop = MIPSInst_FUNC(ir) - fcmp_op;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754sp_cmp(fs, ft,
@@ -2165,7 +2166,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_maddf(fd, fs, ft);
- break;
+ goto copcsr;
}
case fmsubf_op: {
@@ -2179,7 +2180,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(fd, MIPSInst_FD(ir));
rv.d = ieee754dp_msubf(fd, fs, ft);
- break;
+ goto copcsr;
}
case frint_op: {
@@ -2204,7 +2205,7 @@ copcsr:
DPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754dp_2008class(fs);
rfmt = l_fmt;
- break;
+ goto copcsr;
}
case fmin_op: {
@@ -2217,7 +2218,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmin(fs, ft);
- break;
+ goto copcsr;
}
case fmina_op: {
@@ -2230,7 +2231,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmina(fs, ft);
- break;
+ goto copcsr;
}
case fmax_op: {
@@ -2243,7 +2244,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmax(fs, ft);
- break;
+ goto copcsr;
}
case fmaxa_op: {
@@ -2256,7 +2257,7 @@ copcsr:
DPFROMREG(ft, MIPSInst_FT(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fmaxa(fs, ft);
- break;
+ goto copcsr;
}
case fabs_op:
@@ -2379,9 +2380,10 @@ dcopuop:
default:
if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
- unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
+ unsigned int cmpop;
union ieee754dp fs, ft;
+ cmpop = MIPSInst_FUNC(ir) - fcmp_op;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754dp_cmp(fs, ft,
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index e0d9be5fbf4c..7ad79ed411f5 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -45,10 +45,10 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
{
int re;
int rs;
- unsigned lxm;
- unsigned hxm;
- unsigned lym;
- unsigned hym;
+ unsigned int lxm;
+ unsigned int hxm;
+ unsigned int lym;
+ unsigned int hym;
u64 lrm;
u64 hrm;
u64 lzm;
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
index 87d0b44b0614..60c8bfe40947 100644
--- a/arch/mips/math-emu/dp_mul.c
+++ b/arch/mips/math-emu/dp_mul.c
@@ -26,10 +26,10 @@ union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y)
int re;
int rs;
u64 rm;
- unsigned lxm;
- unsigned hxm;
- unsigned lym;
- unsigned hym;
+ unsigned int lxm;
+ unsigned int hxm;
+ unsigned int lym;
+ unsigned int hym;
u64 lrm;
u64 hrm;
u64 t;
diff --git a/arch/mips/math-emu/dp_sqrt.c b/arch/mips/math-emu/dp_sqrt.c
index cd5bc083001e..cea907b83146 100644
--- a/arch/mips/math-emu/dp_sqrt.c
+++ b/arch/mips/math-emu/dp_sqrt.c
@@ -21,7 +21,7 @@
#include "ieee754dp.h"
-static const unsigned table[] = {
+static const unsigned int table[] = {
0, 1204, 3062, 5746, 9193, 13348, 18162, 23592,
29598, 36145, 43202, 50740, 58733, 67158, 75992,
85215, 83599, 71378, 60428, 50647, 41945, 34246,
@@ -33,7 +33,7 @@ union ieee754dp ieee754dp_sqrt(union ieee754dp x)
{
struct _ieee754_csr oldcsr;
union ieee754dp y, z, t;
- unsigned scalx, yh;
+ unsigned int scalx, yh;
COMPXDP;
EXPLODEXDP;
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index 92dc8fa565cb..e0eb7a965fdf 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -165,11 +165,12 @@ struct _ieee754_csr {
};
#define ieee754_csr (*(struct _ieee754_csr *)(&current->thread.fpu.fcr31))
-static inline unsigned ieee754_getrm(void)
+static inline unsigned int ieee754_getrm(void)
{
return (ieee754_csr.rm);
}
-static inline unsigned ieee754_setrm(unsigned rm)
+
+static inline unsigned int ieee754_setrm(unsigned int rm)
{
return (ieee754_csr.rm = rm);
}
@@ -177,14 +178,14 @@ static inline unsigned ieee754_setrm(unsigned rm)
/*
* get current exceptions
*/
-static inline unsigned ieee754_getcx(void)
+static inline unsigned int ieee754_getcx(void)
{
return (ieee754_csr.cx);
}
/* test for current exception condition
*/
-static inline int ieee754_cxtest(unsigned n)
+static inline int ieee754_cxtest(unsigned int n)
{
return (ieee754_csr.cx & n);
}
@@ -192,21 +193,21 @@ static inline int ieee754_cxtest(unsigned n)
/*
* get sticky exceptions
*/
-static inline unsigned ieee754_getsx(void)
+static inline unsigned int ieee754_getsx(void)
{
return (ieee754_csr.sx);
}
/* clear sticky conditions
*/
-static inline unsigned ieee754_clrsx(void)
+static inline unsigned int ieee754_clrsx(void)
{
return (ieee754_csr.sx = 0);
}
/* test for sticky exception condition
*/
-static inline int ieee754_sxtest(unsigned n)
+static inline int ieee754_sxtest(unsigned int n)
{
return (ieee754_csr.sx & n);
}
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index dd2071f430e0..06ac0e2ac7ac 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -54,13 +54,13 @@ static inline int ieee754_class_nan(int xc)
}
#define COMPXSP \
- unsigned xm; int xe; int xs __maybe_unused; int xc
+ unsigned int xm; int xe; int xs __maybe_unused; int xc
#define COMPYSP \
- unsigned ym; int ye; int ys; int yc
+ unsigned int ym; int ye; int ys; int yc
#define COMPZSP \
- unsigned zm; int ze; int zs; int zc
+ unsigned int zm; int ze; int zs; int zc
#define EXPLODESP(v, vc, vs, ve, vm) \
{ \
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index 260e68965907..8423e4c5e415 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -65,7 +65,7 @@ union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp r)
return r;
}
-static unsigned ieee754sp_get_rounding(int sn, unsigned xm)
+static unsigned int ieee754sp_get_rounding(int sn, unsigned int xm)
{
/* inexact must round of 3 bits
*/
@@ -96,7 +96,7 @@ static unsigned ieee754sp_get_rounding(int sn, unsigned xm)
* xe is an unbiased exponent
* xm is 3bit extended precision value.
*/
-union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
+union ieee754sp ieee754sp_format(int sn, int xe, unsigned int xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index 0f63e4202cff..8c5a63804873 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -69,7 +69,7 @@ static inline int ieee754sp_finite(union ieee754sp x)
#define SPDNORMY SPDNORMx(ym, ye)
#define SPDNORMZ SPDNORMx(zm, ze)
-static inline union ieee754sp buildsp(int s, int bx, unsigned m)
+static inline union ieee754sp buildsp(int s, int bx, unsigned int m)
{
union ieee754sp r;
diff --git a/arch/mips/math-emu/sp_div.c b/arch/mips/math-emu/sp_div.c
index 27f6db3a0a4c..23587b31ca87 100644
--- a/arch/mips/math-emu/sp_div.c
+++ b/arch/mips/math-emu/sp_div.c
@@ -23,9 +23,9 @@
union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y)
{
- unsigned rm;
+ unsigned int rm;
int re;
- unsigned bm;
+ unsigned int bm;
COMPXSP;
COMPYSP;
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
index d5d8495b2cc4..1a35d12b6fc8 100644
--- a/arch/mips/math-emu/sp_fint.c
+++ b/arch/mips/math-emu/sp_fint.c
@@ -23,7 +23,7 @@
union ieee754sp ieee754sp_fint(int x)
{
- unsigned xm;
+ unsigned int xm;
int xe;
int xs;
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index 7195fe785d81..f823338dbb65 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -20,9 +20,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
{
int re;
int rs;
- unsigned rm;
- uint64_t rm64;
- uint64_t zm64;
+ unsigned int rm;
+ u64 rm64;
+ u64 zm64;
int s;
COMPXSP;
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
index d910c43a6f30..4015101fbc37 100644
--- a/arch/mips/math-emu/sp_mul.c
+++ b/arch/mips/math-emu/sp_mul.c
@@ -25,15 +25,15 @@ union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y)
{
int re;
int rs;
- unsigned rm;
+ unsigned int rm;
unsigned short lxm;
unsigned short hxm;
unsigned short lym;
unsigned short hym;
- unsigned lrm;
- unsigned hrm;
- unsigned t;
- unsigned at;
+ unsigned int lrm;
+ unsigned int hrm;
+ unsigned int t;
+ unsigned int at;
COMPXSP;
COMPYSP;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index c01bd20d0208..e3e94d05f0fd 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -179,7 +179,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)cpu_addr;
unsigned long off = vma->vm_pgoff;
@@ -383,7 +383,7 @@ static int mips_dma_supported(struct device *dev, u64 mask)
return plat_dma_supported(dev, mask);
}
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
@@ -392,8 +392,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
__dma_sync_virtual(vaddr, size, direction);
}
-EXPORT_SYMBOL(dma_cache_sync);
-
static const struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent,
@@ -407,7 +405,8 @@ static const struct dma_map_ops mips_default_dma_map_ops = {
.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
.sync_sg_for_device = mips_dma_sync_sg_for_device,
.mapping_error = mips_dma_mapping_error,
- .dma_supported = mips_dma_supported
+ .dma_supported = mips_dma_supported,
+ .cache_sync = mips_dma_cache_sync,
};
const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 5f6ea7d746de..84b7b592b834 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -402,7 +402,6 @@ int page_is_ram(unsigned long pagenr)
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long lastpfn __maybe_unused;
pagetable_init();
@@ -416,17 +415,14 @@ void __init paging_init(void)
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
- lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
- lastpfn = highend_pfn;
if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
- lastpfn = max_low_pfn;
}
#endif
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index 90fba9bf98da..407f155f0bb6 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -33,14 +33,13 @@
#define RALINK_GPIOMODE 0x60
#define PPLL_CFG1 0x9c
-#define PDRV_SW_SET BIT(23)
#define PPLL_DRV 0xa0
-#define PDRV_SW_SET (1<<31)
-#define LC_CKDRVPD (1<<19)
-#define LC_CKDRVOHZ (1<<18)
-#define LC_CKDRVHZ (1<<17)
-#define LC_CKTEST (1<<16)
+#define PDRV_SW_SET BIT(31)
+#define LC_CKDRVPD BIT(19)
+#define LC_CKDRVOHZ BIT(18)
+#define LC_CKDRVHZ BIT(17)
+#define LC_CKTEST BIT(16)
/* PCI Bridge registers */
#define RALINK_PCI_PCICFG_ADDR 0x00
@@ -66,7 +65,7 @@
#define PCIEPHY0_CFG 0x90
#define RALINK_PCIEPHY_P0_CTL_OFFSET 0x7498
-#define RALINK_PCIE0_CLK_EN (1 << 26)
+#define RALINK_PCIE0_CLK_EN BIT(26)
#define BUSY 0x80000000
#define WAITRETRY_MAX 10
@@ -121,7 +120,7 @@ static int wait_pciephy_busy(void)
else
break;
if (retry++ > WAITRETRY_MAX) {
- printk(KERN_WARN "PCIE-PHY retry failed.\n");
+ pr_warn("PCIE-PHY retry failed.\n");
return -1;
}
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index fd2887415bc8..87ba86bd8696 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -639,7 +639,7 @@ static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
return -1;
}
- cvmx_wait(10000);
+ __delay(10000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while (pciercx_cfg032.s.dlla == 0);
@@ -821,7 +821,7 @@ retry:
* don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
* fixed number of cycles.
*/
- cvmx_wait(400000);
+ __delay(400000);
/*
* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of
@@ -1018,7 +1018,7 @@ retry:
i = in_p_offset;
while (i--) {
cvmx_write64_uint32(write_address, 0);
- cvmx_wait(10000);
+ __delay(10000);
}
/*
@@ -1034,7 +1034,7 @@ retry:
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
old_in_fif_p_count = dbg_data.s.data & 0xff;
cvmx_write64_uint32(write_address, 0);
- cvmx_wait(10000);
+ __delay(10000);
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
in_fif_p_count = dbg_data.s.data & 0xff;
} while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
@@ -1053,7 +1053,7 @@ retry:
cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
while (in_fif_p_count != 0) {
cvmx_write64_uint32(write_address, 0);
- cvmx_wait(10000);
+ __delay(10000);
in_fif_p_count = (in_fif_p_count + 1) & 0xff;
}
/*
@@ -1105,7 +1105,7 @@ static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
do {
if (cvmx_get_cycle() - start_cycle > octeon_get_clock_rate())
return -1;
- cvmx_wait(10000);
+ __delay(10000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index f26736b7080b..1f9cb0e3c79a 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -31,7 +31,6 @@ choice
config SOC_RT305X
bool "RT305x"
- select USB_ARCH_HAS_HCD
config SOC_RT3883
bool "RT3883"
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 9be8b08ae46b..41b71c4352c2 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -145,8 +145,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
FUNC("i2c", 0, 4, 2),
};
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
+static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
+static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
diff --git a/arch/mips/xilfpga/Kconfig b/arch/mips/xilfpga/Kconfig
deleted file mode 100644
index ca7b2368eab7..000000000000
--- a/arch/mips/xilfpga/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-choice
- prompt "Machine type"
- depends on MACH_XILFPGA
- default XILFPGA_NEXYS4DDR
-
-config XILFPGA_NEXYS4DDR
- bool "Nexys4DDR by Digilent"
-
-endchoice
diff --git a/arch/mips/xilfpga/Makefile b/arch/mips/xilfpga/Makefile
deleted file mode 100644
index a4deec6fadbc..000000000000
--- a/arch/mips/xilfpga/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the Xilfpga
-#
-
-obj-y += init.o
-obj-y += intc.o
-obj-y += time.o
diff --git a/arch/mips/xilfpga/Platform b/arch/mips/xilfpga/Platform
deleted file mode 100644
index ed375afe3d39..000000000000
--- a/arch/mips/xilfpga/Platform
+++ /dev/null
@@ -1,3 +0,0 @@
-platform-$(CONFIG_MACH_XILFPGA) += xilfpga/
-cflags-$(CONFIG_MACH_XILFPGA) += -I$(srctree)/arch/mips/include/asm/mach-xilfpga
-load-$(CONFIG_MACH_XILFPGA) += 0xffffffff80100000
diff --git a/arch/mips/xilfpga/init.c b/arch/mips/xilfpga/init.c
deleted file mode 100644
index 602e384a26a2..000000000000
--- a/arch/mips/xilfpga/init.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Xilfpga platform setup
- *
- * Copyright (C) 2015 Imagination Technologies
- * Author: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#include <linux/of_fdt.h>
-
-#include <asm/prom.h>
-
-#define XILFPGA_UART_BASE 0xb0401000
-
-const char *get_system_type(void)
-{
- return "MIPSfpga";
-}
-
-void __init plat_mem_setup(void)
-{
- __dt_setup_arch(__dtb_start);
- strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
-}
-
-void __init prom_init(void)
-{
- setup_8250_early_printk_port(XILFPGA_UART_BASE, 2, 50000);
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
-
-void __init device_tree_init(void)
-{
- if (!initial_boot_params)
- return;
-
- unflatten_and_copy_device_tree();
-}
diff --git a/arch/mips/xilfpga/intc.c b/arch/mips/xilfpga/intc.c
deleted file mode 100644
index a127cca3ae8c..000000000000
--- a/arch/mips/xilfpga/intc.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Xilfpga interrupt controller setup
- *
- * Copyright (C) 2015 Imagination Technologies
- * Author: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/irqchip.h>
-
-#include <asm/irq_cpu.h>
-
-
-void __init arch_init_irq(void)
-{
- irqchip_init();
-}
diff --git a/arch/mips/xilfpga/time.c b/arch/mips/xilfpga/time.c
deleted file mode 100644
index 36f3f1870ee2..000000000000
--- a/arch/mips/xilfpga/time.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Xilfpga clocksource/timer setup
- *
- * Copyright (C) 2015 Imagination Technologies
- * Author: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/clocksource.h>
-#include <linux/of.h>
-
-#include <asm/time.h>
-
-void __init plat_time_init(void)
-{
- struct device_node *np;
- struct clk *clk;
-
- of_clk_init(NULL);
- timer_probe();
-
- np = of_get_cpu_node(0, NULL);
- if (!np) {
- pr_err("Failed to get CPU node\n");
- return;
- }
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
- return;
- }
-
- mips_hpt_frequency = clk_get_rate(clk) / 2;
- clk_put(clk);
-}
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index 737ef574b3ea..439e474ed6d7 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -11,9 +11,6 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <asm/cache.h>
-#include <asm/io.h>
-
extern const struct dma_map_ops mn10300_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
@@ -21,11 +18,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &mn10300_dma_ops;
}
-static inline
-void dma_cache_sync(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- mn10300_dcache_flush_inv();
-}
-
#endif
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h
index d27654902f28..5b75a1b2c4f6 100644
--- a/arch/mn10300/include/asm/pci.h
+++ b/arch/mn10300/include/asm/pci.h
@@ -47,8 +47,6 @@ extern void unit_pci_init(void);
#define PCIBIOS_MIN_IO 0xBE000004
#define PCIBIOS_MIN_MEM 0xB8000000
-void pcibios_set_master(struct pci_dev *dev);
-
/* Dynamic DMA mapping stuff.
* i386 has everything mapped statically.
*/
@@ -59,8 +57,6 @@ void pcibios_set_master(struct pci_dev *dev);
#include <linux/string.h>
#include <asm/io.h>
-struct pci_dev;
-
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index fe413b41df6c..879cd0df53ba 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -84,6 +84,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
: "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
: "memory", "cc");
}
+#define arch_spin_lock_flags arch_spin_lock_flags
#ifdef __KERNEL__
@@ -98,18 +99,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
/*
* On mn10300, we implement read-write locks as a 32-bit counter
* with the high bit (sign) being the "contended" bit.
@@ -183,9 +172,6 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
return 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
diff --git a/arch/mn10300/kernel/head.S b/arch/mn10300/kernel/head.S
index 73e00fc78072..0b15f759e0d2 100644
--- a/arch/mn10300/kernel/head.S
+++ b/arch/mn10300/kernel/head.S
@@ -434,14 +434,6 @@ ENTRY(empty_zero_page)
.space PAGE_SIZE
.balign PAGE_SIZE
-ENTRY(empty_bad_page)
- .space PAGE_SIZE
-
- .balign PAGE_SIZE
-ENTRY(empty_bad_pte_table)
- .space PAGE_SIZE
-
- .balign PAGE_SIZE
ENTRY(large_page_table)
.space PAGE_SIZE
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 7ecf69879e2d..d7ef1232a82a 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
try_again:
/* pull chars out of the hat */
- ix = ACCESS_ONCE(port->rx_outp);
+ ix = READ_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
if (push && !tport->low_latency)
tty_flip_buffer_push(tport);
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
do {
/* pull chars out of the hat */
- ix = ACCESS_ONCE(port->rx_outp);
+ ix = READ_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
return NO_POLL_CHAR;
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h
index 96c484b12226..0667f613b023 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.h
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.h
@@ -30,9 +30,6 @@ extern void pcibios_resource_survey(void);
extern struct pci_ops *pci_root_ops;
-extern struct irq_routing_table *pcibios_get_irq_routing_table(void);
-extern int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
-
/* pci-irq.c */
struct irq_info {
diff --git a/arch/nios2/boot/.gitignore b/arch/nios2/boot/.gitignore
index 109279ca5a4d..64386a8dedd8 100644
--- a/arch/nios2/boot/.gitignore
+++ b/arch/nios2/boot/.gitignore
@@ -1,2 +1 @@
-*.dtb
vmImage
diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile
index c899876320df..2ba23a679732 100644
--- a/arch/nios2/boot/Makefile
+++ b/arch/nios2/boot/Makefile
@@ -53,7 +53,5 @@ $(obj)/%.dtb: $(src)/dts/%.dts FORCE
$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
-clean-files := *.dtb
-
install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h
index f8dc62222741..6ceb92251da0 100644
--- a/arch/nios2/include/asm/dma-mapping.h
+++ b/arch/nios2/include/asm/dma-mapping.h
@@ -17,13 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &nios2_dma_ops;
}
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
#endif /* _ASM_NIOS2_DMA_MAPPING_H */
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index df2136ab1dcc..339df7324e9c 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -22,13 +22,19 @@ config OPENRISC
select HAVE_UID16
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
+ select GENERIC_SMP_IDLE_THREAD
select MODULES_USE_ELF_RELA
select HAVE_DEBUG_STACKOVERFLOW
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
select NO_BOOTMEM
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_USE_QUEUED_RWLOCKS
+ select OMPIC if SMP
+ select ARCH_WANT_FRAME_POINTERS
config CPU_BIG_ENDIAN
def_bool y
@@ -56,6 +62,12 @@ config TRACE_IRQFLAGS_SUPPORT
config GENERIC_CSUM
def_bool y
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -73,6 +85,17 @@ config OR1K_1200
endchoice
+config DCACHE_WRITETHROUGH
+ bool "Have write through data caches"
+ default n
+ help
+ Select this if your implementation features write through data caches.
+ Selecting 'N' here will allow the kernel to force flushing of data
+ caches at relevant times. Most OpenRISC implementations support write-
+ through data caches.
+
+ If unsure say N here
+
config OPENRISC_BUILTIN_DTB
string "Builtin DTB"
default ""
@@ -105,8 +128,19 @@ config OPENRISC_HAVE_INST_DIV
endmenu
config NR_CPUS
- int
- default "1"
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ depends on SMP
+ default "2"
+
+config SMP
+ bool "Symmetric Multi-Processing support"
+ help
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, say N. If you have a system with more
+ than one CPU, say Y.
+
+ If you don't know what to do here, say N.
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
@@ -125,6 +159,17 @@ config OPENRISC_NO_SPR_SR_DSX
Say N here if you know that your OpenRISC processor has
SPR_SR_DSX bit implemented. Say Y if you are unsure.
+config OPENRISC_HAVE_SHADOW_GPRS
+ bool "Support for shadow gpr files" if !SMP
+ default y if SMP
+ help
+ Say Y here if your OpenRISC processor features shadowed
+ register files. They will in such case be used as a
+ scratch reg storage on exception entry.
+
+ On SMP systems, this feature is mandatory.
+ On a unicore system it's safe to say N here if you are unsure.
+
config CMDLINE
string "Default kernel command string"
default ""
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 89076a66eee2..cf8802962864 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -25,6 +25,7 @@ LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
+CHECKFLAGS += -mbig-endian
ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y)
KBUILD_CFLAGS += $(call cc-option,-mhard-mul)
diff --git a/arch/openrisc/boot/dts/Makefile b/arch/openrisc/boot/dts/Makefile
index 792ce7143c3a..17dd791a833f 100644
--- a/arch/openrisc/boot/dts/Makefile
+++ b/arch/openrisc/boot/dts/Makefile
@@ -6,6 +6,4 @@ BUILTIN_DTB :=
endif
obj-y += $(BUILTIN_DTB)
-clean-files := *.dtb.S
-
#DTC_FLAGS ?= -p 1024
diff --git a/arch/openrisc/boot/dts/or1ksim.dts b/arch/openrisc/boot/dts/or1ksim.dts
index 9f4b856da580..d8aa8309c9d3 100644
--- a/arch/openrisc/boot/dts/or1ksim.dts
+++ b/arch/openrisc/boot/dts/or1ksim.dts
@@ -6,8 +6,13 @@
#size-cells = <1>;
interrupt-parent = <&pic>;
+ aliases {
+ uart0 = &serial0;
+ };
+
chosen {
- bootargs = "console=uart,mmio,0x90000000,115200";
+ bootargs = "earlycon";
+ stdout-path = "uart0:115200";
};
memory@0 {
diff --git a/arch/openrisc/boot/dts/simple_smp.dts b/arch/openrisc/boot/dts/simple_smp.dts
new file mode 100644
index 000000000000..defbb92714ec
--- /dev/null
+++ b/arch/openrisc/boot/dts/simple_smp.dts
@@ -0,0 +1,63 @@
+/dts-v1/;
+/ {
+ compatible = "opencores,or1ksim";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pic>;
+
+ aliases {
+ uart0 = &serial0;
+ };
+
+ chosen {
+ bootargs = "earlycon";
+ stdout-path = "uart0:115200";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x02000000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu@0 {
+ compatible = "opencores,or1200-rtlsvn481";
+ reg = <0>;
+ clock-frequency = <20000000>;
+ };
+ cpu@1 {
+ compatible = "opencores,or1200-rtlsvn481";
+ reg = <1>;
+ clock-frequency = <20000000>;
+ };
+ };
+
+ ompic: ompic@98000000 {
+ compatible = "openrisc,ompic";
+ reg = <0x98000000 16>;
+ interrupt-controller;
+ #interrupt-cells = <0>;
+ interrupts = <1>;
+ };
+
+ /*
+ * OR1K PIC is built into CPU and accessed via special purpose
+ * registers. It is not addressable and, hence, has no 'reg'
+ * property.
+ */
+ pic: pic {
+ compatible = "opencores,or1k-pic-level";
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ };
+
+ serial0: serial@90000000 {
+ compatible = "opencores,uart16550-rtlsvn105", "ns16550a";
+ reg = <0x90000000 0x100>;
+ interrupts = <2>;
+ clock-frequency = <20000000>;
+ };
+
+};
diff --git a/arch/openrisc/configs/simple_smp_defconfig b/arch/openrisc/configs/simple_smp_defconfig
new file mode 100644
index 000000000000..b6e3c7e158e7
--- /dev/null
+++ b/arch/openrisc/configs/simple_smp_defconfig
@@ -0,0 +1,66 @@
+CONFIG_CROSS_COMPILE="or1k-linux-"
+CONFIG_LOCALVERSION="-simple-smp"
+CONFIG_NO_HZ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_EXPERT=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_EPOLL is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+CONFIG_MODULES=y
+# CONFIG_BLOCK is not set
+CONFIG_OPENRISC_BUILTIN_DTB="simple_smp"
+CONFIG_SMP=y
+CONFIG_HZ_100=y
+CONFIG_OPENRISC_HAVE_SHADOW_GPRS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_CUBIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_NETDEVICES=y
+CONFIG_ETHOC=y
+CONFIG_MICREL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_XZ_DEC=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_RCU_TRACE is not set
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 5bea416a7792..6eb16719549e 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,7 +1,6 @@
generic-y += barrier.h
generic-y += bug.h
generic-y += bugs.h
-generic-y += cacheflush.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += current.h
@@ -28,6 +27,10 @@ generic-y += module.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
+generic-y += qspinlock_types.h
+generic-y += qspinlock.h
+generic-y += qrwlock_types.h
+generic-y += qrwlock.h
generic-y += sections.h
generic-y += segment.h
generic-y += string.h
diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h
new file mode 100644
index 000000000000..70f46fd7a074
--- /dev/null
+++ b/arch/openrisc/include/asm/cacheflush.h
@@ -0,0 +1,96 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_CACHEFLUSH_H
+#define __ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+/*
+ * Helper function for flushing or invalidating entire pages from data
+ * and instruction caches. SMP needs a little extra work, since we need
+ * to flush the pages on all cpus.
+ */
+extern void local_dcache_page_flush(struct page *page);
+extern void local_icache_page_inv(struct page *page);
+
+/*
+ * Data cache flushing always happen on the local cpu. Instruction cache
+ * invalidations need to be broadcasted to all other cpu in the system in
+ * case of SMP configurations.
+ */
+#ifndef CONFIG_SMP
+#define dcache_page_flush(page) local_dcache_page_flush(page)
+#define icache_page_inv(page) local_icache_page_inv(page)
+#else /* CONFIG_SMP */
+#define dcache_page_flush(page) local_dcache_page_flush(page)
+#define icache_page_inv(page) smp_icache_page_inv(page)
+extern void smp_icache_page_inv(struct page *page);
+#endif /* CONFIG_SMP */
+
+/*
+ * Synchronizes caches. Whenever a cpu writes executable code to memory, this
+ * should be called to make sure the processor sees the newly written code.
+ */
+static inline void sync_icache_dcache(struct page *page)
+{
+ if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH))
+ dcache_page_flush(page);
+ icache_page_inv(page);
+}
+
+/*
+ * Pages with this bit set need not be flushed/invalidated, since
+ * they have not changed since last flush. New pages start with
+ * PG_arch_1 not set and are therefore dirty by default.
+ */
+#define PG_dc_clean PG_arch_1
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ clear_bit(PG_dc_clean, &page->flags);
+}
+
+/*
+ * Other interfaces are not required since we do not have virtually
+ * indexed or tagged caches. So we can use the default here.
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma, pg) do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) \
+ sync_icache_dcache(page); \
+ } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index f0a5d8b844d6..d29f7db53906 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -1,32 +1,29 @@
/*
+ * 1,2 and 4 byte cmpxchg and xchg implementations for OpenRISC.
+ *
* Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
+ *
+ * Note:
+ * The portable implementations of 1 and 2 byte xchg and cmpxchg using a 4
+ * byte cmpxchg is sourced heavily from the sh and mips implementations.
*/
#ifndef __ASM_OPENRISC_CMPXCHG_H
#define __ASM_OPENRISC_CMPXCHG_H
#include <linux/types.h>
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern void __cmpxchg_called_with_bad_pointer(void);
+#include <linux/bitops.h>
#define __HAVE_ARCH_CMPXCHG 1
-static inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+static inline unsigned long cmpxchg_u32(volatile void *ptr,
+ unsigned long old, unsigned long new)
{
- if (size != 4) {
- __cmpxchg_called_with_bad_pointer();
- return old;
- }
-
__asm__ __volatile__(
"1: l.lwa %0, 0(%1) \n"
" l.sfeq %0, %2 \n"
@@ -43,6 +40,97 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
+static inline unsigned long xchg_u32(volatile void *ptr,
+ unsigned long val)
+{
+ __asm__ __volatile__(
+ "1: l.lwa %0, 0(%1) \n"
+ " l.swa 0(%1), %2 \n"
+ " l.bnf 1b \n"
+ " l.nop \n"
+ : "=&r"(val)
+ : "r"(ptr), "r"(val)
+ : "cc", "memory");
+
+ return val;
+}
+
+static inline u32 cmpxchg_small(volatile void *ptr, u32 old, u32 new,
+ int size)
+{
+ int off = (unsigned long)ptr % sizeof(u32);
+ volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+ int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+#else
+ int bitoff = off * BITS_PER_BYTE;
+#endif
+ u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+ u32 load32, old32, new32;
+ u32 ret;
+
+ load32 = READ_ONCE(*p);
+
+ while (true) {
+ ret = (load32 & bitmask) >> bitoff;
+ if (old != ret)
+ return ret;
+
+ old32 = (load32 & ~bitmask) | (old << bitoff);
+ new32 = (load32 & ~bitmask) | (new << bitoff);
+
+ /* Do 32 bit cmpxchg */
+ load32 = cmpxchg_u32(p, old32, new32);
+ if (load32 == old32)
+ return old;
+ }
+}
+
+/* xchg */
+
+static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
+{
+ int off = (unsigned long)ptr % sizeof(u32);
+ volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+ int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+#else
+ int bitoff = off * BITS_PER_BYTE;
+#endif
+ u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+ u32 oldv, newv;
+ u32 ret;
+
+ do {
+ oldv = READ_ONCE(*p);
+ ret = (oldv & bitmask) >> bitoff;
+ newv = (oldv & ~bitmask) | (x << bitoff);
+ } while (cmpxchg_u32(p, oldv, newv) != oldv);
+
+ return ret;
+}
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern unsigned long __cmpxchg_called_with_bad_pointer(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 1:
+ case 2:
+ return cmpxchg_small(ptr, old, new, size);
+ case 4:
+ return cmpxchg_u32(ptr, old, new);
+ default:
+ return __cmpxchg_called_with_bad_pointer();
+ }
+}
+
#define cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
@@ -55,32 +143,27 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern unsigned long __xchg_called_with_bad_pointer(void)
+ __compiletime_error("Bad argument size for xchg");
-static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
- int size)
+static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
+ int size)
{
- if (size != 4) {
- __xchg_called_with_bad_pointer();
- return val;
+ switch (size) {
+ case 1:
+ case 2:
+ return xchg_small(ptr, with, size);
+ case 4:
+ return xchg_u32(ptr, with);
+ default:
+ return __xchg_called_with_bad_pointer();
}
-
- __asm__ __volatile__(
- "1: l.lwa %0, 0(%1) \n"
- " l.swa 0(%1), %2 \n"
- " l.bnf 1b \n"
- " l.nop \n"
- : "=&r"(val)
- : "r"(ptr), "r"(val)
- : "cc", "memory");
-
- return val;
}
#define xchg(ptr, with) \
({ \
- (__typeof__(*(ptr))) __xchg((unsigned long)(with), \
- (ptr), \
+ (__typeof__(*(ptr))) __xchg((ptr), \
+ (unsigned long)(with), \
sizeof(*(ptr))); \
})
diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h
index ec10679d6429..4ea0a33eba6c 100644
--- a/arch/openrisc/include/asm/cpuinfo.h
+++ b/arch/openrisc/include/asm/cpuinfo.h
@@ -19,7 +19,7 @@
#ifndef __ASM_OPENRISC_CPUINFO_H
#define __ASM_OPENRISC_CPUINFO_H
-struct cpuinfo {
+struct cpuinfo_or1k {
u32 clock_frequency;
u32 icache_size;
@@ -29,8 +29,11 @@ struct cpuinfo {
u32 dcache_size;
u32 dcache_block_size;
u32 dcache_ways;
+
+ u16 coreid;
};
-extern struct cpuinfo cpuinfo;
+extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
+extern void setup_cpuinfo(void);
#endif /* __ASM_OPENRISC_CPUINFO_H */
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index f41bd3cb76d9..e212a1f0b6d2 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -23,7 +23,6 @@
*/
#include <linux/dma-debug.h>
-#include <linux/kmemcheck.h>
#include <linux/dma-mapping.h>
extern const struct dma_map_ops or1k_dma_map_ops;
diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h
index e94b814d2e3c..c380d8caf84f 100644
--- a/arch/openrisc/include/asm/mmu_context.h
+++ b/arch/openrisc/include/asm/mmu_context.h
@@ -34,7 +34,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* registers like cr3 on the i386
*/
-extern volatile pgd_t *current_pgd; /* defined in arch/openrisc/mm/fault.c */
+extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 71a6f08de8f2..21c71303012f 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -94,7 +94,7 @@ extern void paging_init(void);
* 64 MB of vmalloc area is comparable to what's available on other arches.
*/
-#define VMALLOC_START (PAGE_OFFSET-0x04000000)
+#define VMALLOC_START (PAGE_OFFSET-0x04000000UL)
#define VMALLOC_END (PAGE_OFFSET)
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
@@ -416,15 +416,19 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
struct vm_area_struct;
-/*
- * or32 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- *
- * Actually I am not sure on what this could be used for.
- */
+static inline void update_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte)
+{
+}
+
+extern void update_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *pte);
+
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *pte)
{
+ update_tlb(vma, address, pte);
+ update_cache(vma, address, pte);
}
/* __PHX__ FIXME, SWAP, this probably doesn't work */
diff --git a/arch/openrisc/include/asm/serial.h b/arch/openrisc/include/asm/serial.h
index 270a45241639..cb5932f5447a 100644
--- a/arch/openrisc/include/asm/serial.h
+++ b/arch/openrisc/include/asm/serial.h
@@ -29,7 +29,7 @@
* it needs to be correct to get the early console working.
*/
-#define BASE_BAUD (cpuinfo.clock_frequency/16)
+#define BASE_BAUD (cpuinfo_or1k[smp_processor_id()].clock_frequency/16)
#endif /* __KERNEL__ */
diff --git a/arch/openrisc/include/asm/smp.h b/arch/openrisc/include/asm/smp.h
new file mode 100644
index 000000000000..e21d2f12b5b6
--- /dev/null
+++ b/arch/openrisc/include/asm/smp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_OPENRISC_SMP_H
+#define __ASM_OPENRISC_SMP_H
+
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define hard_smp_processor_id() mfspr(SPR_COREID)
+
+extern void smp_init_cpus(void);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+extern void handle_IPI(unsigned int ipi_msg);
+
+#endif /* __ASM_OPENRISC_SMP_H */
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
index fd00a3a24123..9b761e0e22c3 100644
--- a/arch/openrisc/include/asm/spinlock.h
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -19,6 +19,16 @@
#ifndef __ASM_OPENRISC_SPINLOCK_H
#define __ASM_OPENRISC_SPINLOCK_H
-#error "or32 doesn't do SMP yet"
+#include <asm/qspinlock.h>
+
+#include <asm/qrwlock.h>
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
#endif
diff --git a/arch/openrisc/include/asm/spinlock_types.h b/arch/openrisc/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..7c6fb1208c88
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock_types.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_OPENRISC_SPINLOCK_TYPES_H
+#define _ASM_OPENRISC_SPINLOCK_TYPES_H
+
+#include <asm/qspinlock_types.h>
+#include <asm/qrwlock_types.h>
+
+#endif /* _ASM_OPENRISC_SPINLOCK_TYPES_H */
diff --git a/arch/openrisc/include/asm/spr_defs.h b/arch/openrisc/include/asm/spr_defs.h
index 367dac70326a..154b5a1ee579 100644
--- a/arch/openrisc/include/asm/spr_defs.h
+++ b/arch/openrisc/include/asm/spr_defs.h
@@ -51,6 +51,11 @@
#define SPR_ICCFGR (SPRGROUP_SYS + 6)
#define SPR_DCFGR (SPRGROUP_SYS + 7)
#define SPR_PCCFGR (SPRGROUP_SYS + 8)
+#define SPR_VR2 (SPRGROUP_SYS + 9)
+#define SPR_AVR (SPRGROUP_SYS + 10)
+#define SPR_EVBAR (SPRGROUP_SYS + 11)
+#define SPR_AECR (SPRGROUP_SYS + 12)
+#define SPR_AESR (SPRGROUP_SYS + 13)
#define SPR_NPC (SPRGROUP_SYS + 16) /* CZ 21/06/01 */
#define SPR_SR (SPRGROUP_SYS + 17) /* CZ 21/06/01 */
#define SPR_PPC (SPRGROUP_SYS + 18) /* CZ 21/06/01 */
@@ -61,6 +66,8 @@
#define SPR_EEAR_LAST (SPRGROUP_SYS + 63)
#define SPR_ESR_BASE (SPRGROUP_SYS + 64)
#define SPR_ESR_LAST (SPRGROUP_SYS + 79)
+#define SPR_COREID (SPRGROUP_SYS + 128)
+#define SPR_NUMCORES (SPRGROUP_SYS + 129)
#define SPR_GPR_BASE (SPRGROUP_SYS + 1024)
/* Data MMU group */
@@ -135,12 +142,19 @@
#define SPR_VR_CFG 0x00ff0000 /* Processor configuration */
#define SPR_VR_RES 0x0000ffc0 /* Reserved */
#define SPR_VR_REV 0x0000003f /* Processor revision */
+#define SPR_VR_UVRP 0x00000040 /* Updated Version Registers Present */
#define SPR_VR_VER_OFF 24
#define SPR_VR_CFG_OFF 16
#define SPR_VR_REV_OFF 0
/*
+ * Bit definitions for the Version Register 2
+ */
+#define SPR_VR2_CPUID 0xff000000 /* Processor ID */
+#define SPR_VR2_VER 0x00ffffff /* Processor version */
+
+/*
* Bit definitions for the Unit Present Register
*
*/
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index 6e619a79a401..c229aa6bb502 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -74,7 +74,7 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
- .preempt_count = 1, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.ksp = 0, \
}
diff --git a/arch/openrisc/include/asm/time.h b/arch/openrisc/include/asm/time.h
new file mode 100644
index 000000000000..313ee975774b
--- /dev/null
+++ b/arch/openrisc/include/asm/time.h
@@ -0,0 +1,23 @@
+/*
+ * OpenRISC timer API
+ *
+ * Copyright (C) 2017 by Stafford Horne (shorne@gmail.com)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_OR1K_TIME_H
+#define __ASM_OR1K_TIME_H
+
+extern void openrisc_clockevent_init(void);
+
+extern void openrisc_timer_set(unsigned long count);
+extern void openrisc_timer_set_next(unsigned long delta);
+
+#ifdef CONFIG_SMP
+extern void synchronise_count_master(int cpu);
+extern void synchronise_count_slave(int cpu);
+#endif
+
+#endif /* __ASM_OR1K_TIME_H */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
index 6a2accd6cb67..94227f0eaf6d 100644
--- a/arch/openrisc/include/asm/tlbflush.h
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -33,13 +33,26 @@
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*/
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr);
+extern void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
-void flush_tlb_all(void);
-void flush_tlb_mm(struct mm_struct *mm);
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end);
+#ifndef CONFIG_SMP
+#define flush_tlb_all local_flush_tlb_all
+#define flush_tlb_mm local_flush_tlb_mm
+#define flush_tlb_page local_flush_tlb_page
+#define flush_tlb_range local_flush_tlb_range
+#else
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#endif
static inline void flush_tlb(void)
{
diff --git a/arch/openrisc/include/asm/unwinder.h b/arch/openrisc/include/asm/unwinder.h
new file mode 100644
index 000000000000..165ec6f02ab8
--- /dev/null
+++ b/arch/openrisc/include/asm/unwinder.h
@@ -0,0 +1,20 @@
+/*
+ * OpenRISC unwinder.h
+ *
+ * Architecture API for unwinding stacks.
+ *
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_OPENRISC_UNWINDER_H
+#define __ASM_OPENRISC_UNWINDER_H
+
+void unwind_stack(void *data, unsigned long *stack,
+ void (*trace)(void *data, unsigned long addr,
+ int reliable));
+
+#endif /* __ASM_OPENRISC_UNWINDER_H */
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
index c4ea6cabad46..2d172e79f58d 100644
--- a/arch/openrisc/kernel/Makefile
+++ b/arch/openrisc/kernel/Makefile
@@ -7,8 +7,10 @@ extra-y := head.o vmlinux.lds
obj-y := setup.o or32_ksyms.o process.o dma.o \
traps.o time.o irq.o entry.o ptrace.o signal.o \
- sys_call_table.o
+ sys_call_table.o unwinder.o
+obj-$(CONFIG_SMP) += smp.o sync-timer.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_OF) += prom.o
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index b10369b7e31b..a945f00011b4 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -32,6 +32,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
unsigned long cl;
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
pte_val(*pte) |= _PAGE_CI;
@@ -42,7 +43,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
flush_tlb_page(NULL, addr);
/* Flush page out of dcache */
- for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
+ for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
return 0;
@@ -140,6 +141,7 @@ or1k_map_page(struct device *dev, struct page *page,
{
unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset;
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return addr;
@@ -148,13 +150,13 @@ or1k_map_page(struct device *dev, struct page *page,
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
for (cl = addr; cl < addr + size;
- cl += cpuinfo.dcache_block_size)
+ cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
break;
case DMA_FROM_DEVICE:
/* Invalidate the dcache for the requested range */
for (cl = addr; cl < addr + size;
- cl += cpuinfo.dcache_block_size)
+ cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBIR, cl);
break;
default:
@@ -213,9 +215,10 @@ or1k_sync_single_for_cpu(struct device *dev,
{
unsigned long cl;
dma_addr_t addr = dma_handle;
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
/* Invalidate the dcache for the requested range */
- for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+ for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBIR, cl);
}
@@ -226,9 +229,10 @@ or1k_sync_single_for_device(struct device *dev,
{
unsigned long cl;
dma_addr_t addr = dma_handle;
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
/* Flush the dcache for the requested range */
- for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
+ for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
}
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 1b7160c79646..690d55272ba6 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -42,6 +42,61 @@
/* =========================================================[ macros ]=== */
+#ifdef CONFIG_TRACE_IRQFLAGS
+/*
+ * Trace irq on/off creating a stack frame.
+ */
+#define TRACE_IRQS_OP(trace_op) \
+ l.sw -8(r1),r2 /* store frame pointer */ ;\
+ l.sw -4(r1),r9 /* store return address */ ;\
+ l.addi r2,r1,0 /* move sp to fp */ ;\
+ l.jal trace_op ;\
+ l.addi r1,r1,-8 ;\
+ l.ori r1,r2,0 /* restore sp */ ;\
+ l.lwz r9,-4(r1) /* restore return address */ ;\
+ l.lwz r2,-8(r1) /* restore fp */ ;\
+/*
+ * Trace irq on/off and save registers we need that would otherwise be
+ * clobbered.
+ */
+#define TRACE_IRQS_SAVE(t1,trace_op) \
+ l.sw -12(r1),t1 /* save extra reg */ ;\
+ l.sw -8(r1),r2 /* store frame pointer */ ;\
+ l.sw -4(r1),r9 /* store return address */ ;\
+ l.addi r2,r1,0 /* move sp to fp */ ;\
+ l.jal trace_op ;\
+ l.addi r1,r1,-12 ;\
+ l.ori r1,r2,0 /* restore sp */ ;\
+ l.lwz r9,-4(r1) /* restore return address */ ;\
+ l.lwz r2,-8(r1) /* restore fp */ ;\
+ l.lwz t1,-12(r1) /* restore extra reg */
+
+#define TRACE_IRQS_OFF TRACE_IRQS_OP(trace_hardirqs_off)
+#define TRACE_IRQS_ON TRACE_IRQS_OP(trace_hardirqs_on)
+#define TRACE_IRQS_ON_SYSCALL \
+ TRACE_IRQS_SAVE(r10,trace_hardirqs_on) ;\
+ l.lwz r3,PT_GPR3(r1) ;\
+ l.lwz r4,PT_GPR4(r1) ;\
+ l.lwz r5,PT_GPR5(r1) ;\
+ l.lwz r6,PT_GPR6(r1) ;\
+ l.lwz r7,PT_GPR7(r1) ;\
+ l.lwz r8,PT_GPR8(r1) ;\
+ l.lwz r11,PT_GPR11(r1)
+#define TRACE_IRQS_OFF_ENTRY \
+ l.lwz r5,PT_SR(r1) ;\
+ l.andi r3,r5,(SPR_SR_IEE|SPR_SR_TEE) ;\
+ l.sfeq r5,r0 /* skip trace if irqs were already off */;\
+ l.bf 1f ;\
+ l.nop ;\
+ TRACE_IRQS_SAVE(r4,trace_hardirqs_off) ;\
+1:
+#else
+#define TRACE_IRQS_OFF
+#define TRACE_IRQS_ON
+#define TRACE_IRQS_OFF_ENTRY
+#define TRACE_IRQS_ON_SYSCALL
+#endif
+
/*
* We need to disable interrupts at beginning of RESTORE_ALL
* since interrupt might come in after we've loaded EPC return address
@@ -124,6 +179,7 @@ handler: ;\
/* r30 already save */ ;\
/* l.sw PT_GPR30(r1),r30*/ ;\
l.sw PT_GPR31(r1),r31 ;\
+ TRACE_IRQS_OFF_ENTRY ;\
/* Store -1 in orig_gpr11 for non-syscall exceptions */ ;\
l.addi r30,r0,-1 ;\
l.sw PT_ORIG_GPR11(r1),r30
@@ -557,9 +613,6 @@ _string_syscall_return:
.align 4
ENTRY(_sys_call_handler)
- /* syscalls run with interrupts enabled */
- ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp
-
/* r1, EPCR, ESR a already saved */
l.sw PT_GPR2(r1),r2
/* r3-r8 must be saved because syscall restart relies
@@ -597,6 +650,10 @@ ENTRY(_sys_call_handler)
/* l.sw PT_GPR30(r1),r30 */
_syscall_check_trace_enter:
+ /* syscalls run with interrupts enabled */
+ TRACE_IRQS_ON_SYSCALL
+ ENABLE_INTERRUPTS(r29) // enable interrupts, r29 is temp
+
/* If TIF_SYSCALL_TRACE is set, then we want to do syscall tracing */
l.lwz r30,TI_FLAGS(r10)
l.andi r30,r30,_TIF_SYSCALL_TRACE
@@ -657,6 +714,7 @@ _syscall_check_trace_leave:
_syscall_check_work:
/* Here we need to disable interrupts */
DISABLE_INTERRUPTS(r27,r29)
+ TRACE_IRQS_OFF
l.lwz r30,TI_FLAGS(r10)
l.andi r30,r30,_TIF_WORK_MASK
l.sfne r30,r0
@@ -871,6 +929,7 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00)
_resume_userspace:
DISABLE_INTERRUPTS(r3,r4)
+ TRACE_IRQS_OFF
l.lwz r4,TI_FLAGS(r10)
l.andi r13,r4,_TIF_WORK_MASK
l.sfeqi r13,0
@@ -909,6 +968,15 @@ _work_pending:
l.lwz r8,PT_GPR8(r1)
_restore_all:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ l.lwz r4,PT_SR(r1)
+ l.andi r3,r4,(SPR_SR_IEE|SPR_SR_TEE)
+ l.sfeq r3,r0 /* skip trace if irqs were off */
+ l.bf skip_hardirqs_on
+ l.nop
+ TRACE_IRQS_ON
+skip_hardirqs_on:
+#endif
RESTORE_ALL
/* This returns to userspace code */
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index 1e87913576e3..fb02b2a1d6f2 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -49,9 +49,31 @@
/* ============================================[ tmp store locations ]=== */
+#define SPR_SHADOW_GPR(x) ((x) + SPR_GPR_BASE + 32)
+
/*
* emergency_print temporary stores
*/
+#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
+#define EMERGENCY_PRINT_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(14)
+#define EMERGENCY_PRINT_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(14)
+
+#define EMERGENCY_PRINT_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(15)
+#define EMERGENCY_PRINT_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(15)
+
+#define EMERGENCY_PRINT_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(16)
+#define EMERGENCY_PRINT_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(16)
+
+#define EMERGENCY_PRINT_STORE_GPR7 l.mtspr r0,r7,SPR_SHADOW_GPR(7)
+#define EMERGENCY_PRINT_LOAD_GPR7 l.mfspr r7,r0,SPR_SHADOW_GPR(7)
+
+#define EMERGENCY_PRINT_STORE_GPR8 l.mtspr r0,r8,SPR_SHADOW_GPR(8)
+#define EMERGENCY_PRINT_LOAD_GPR8 l.mfspr r8,r0,SPR_SHADOW_GPR(8)
+
+#define EMERGENCY_PRINT_STORE_GPR9 l.mtspr r0,r9,SPR_SHADOW_GPR(9)
+#define EMERGENCY_PRINT_LOAD_GPR9 l.mfspr r9,r0,SPR_SHADOW_GPR(9)
+
+#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
#define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
#define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
@@ -70,13 +92,28 @@
#define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
#define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
+#endif
/*
* TLB miss handlers temorary stores
*/
-#define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9
-#define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0)
+#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
+#define EXCEPTION_STORE_GPR2 l.mtspr r0,r2,SPR_SHADOW_GPR(2)
+#define EXCEPTION_LOAD_GPR2 l.mfspr r2,r0,SPR_SHADOW_GPR(2)
+
+#define EXCEPTION_STORE_GPR3 l.mtspr r0,r3,SPR_SHADOW_GPR(3)
+#define EXCEPTION_LOAD_GPR3 l.mfspr r3,r0,SPR_SHADOW_GPR(3)
+
+#define EXCEPTION_STORE_GPR4 l.mtspr r0,r4,SPR_SHADOW_GPR(4)
+#define EXCEPTION_LOAD_GPR4 l.mfspr r4,r0,SPR_SHADOW_GPR(4)
+
+#define EXCEPTION_STORE_GPR5 l.mtspr r0,r5,SPR_SHADOW_GPR(5)
+#define EXCEPTION_LOAD_GPR5 l.mfspr r5,r0,SPR_SHADOW_GPR(5)
+
+#define EXCEPTION_STORE_GPR6 l.mtspr r0,r6,SPR_SHADOW_GPR(6)
+#define EXCEPTION_LOAD_GPR6 l.mfspr r6,r0,SPR_SHADOW_GPR(6)
+#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
#define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
#define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
@@ -92,35 +129,67 @@
#define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
#define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
+#endif
/*
* EXCEPTION_HANDLE temporary stores
*/
+#ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS
+#define EXCEPTION_T_STORE_GPR30 l.mtspr r0,r30,SPR_SHADOW_GPR(30)
+#define EXCEPTION_T_LOAD_GPR30(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(30)
+
+#define EXCEPTION_T_STORE_GPR10 l.mtspr r0,r10,SPR_SHADOW_GPR(10)
+#define EXCEPTION_T_LOAD_GPR10(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(10)
+
+#define EXCEPTION_T_STORE_SP l.mtspr r0,r1,SPR_SHADOW_GPR(1)
+#define EXCEPTION_T_LOAD_SP(reg) l.mfspr reg,r0,SPR_SHADOW_GPR(1)
+
+#else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */
#define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
#define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
#define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
#define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
-#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
+#define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
#define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
-
-/*
- * For UNHANLDED_EXCEPTION
- */
-
-#define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31
-#define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0)
+#endif
/* =========================================================[ macros ]=== */
-
+#ifdef CONFIG_SMP
+#define GET_CURRENT_PGD(reg,t1) \
+ LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
+ l.mfspr t1,r0,SPR_COREID ;\
+ l.slli t1,t1,2 ;\
+ l.add reg,reg,t1 ;\
+ tophys (t1,reg) ;\
+ l.lwz reg,0(t1)
+#else
#define GET_CURRENT_PGD(reg,t1) \
LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
tophys (t1,reg) ;\
l.lwz reg,0(t1)
+#endif
+/* Load r10 from current_thread_info_set - clobbers r1 and r30 */
+#ifdef CONFIG_SMP
+#define GET_CURRENT_THREAD_INFO \
+ LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
+ tophys (r30,r1) ;\
+ l.mfspr r10,r0,SPR_COREID ;\
+ l.slli r10,r10,2 ;\
+ l.add r30,r30,r10 ;\
+ /* r10: current_thread_info */ ;\
+ l.lwz r10,0(r30)
+#else
+#define GET_CURRENT_THREAD_INFO \
+ LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
+ tophys (r30,r1) ;\
+ /* r10: current_thread_info */ ;\
+ l.lwz r10,0(r30)
+#endif
/*
* DSCR: this is a common hook for handling exceptions. it will save
@@ -163,10 +232,7 @@
l.bnf 2f /* kernel_mode */ ;\
EXCEPTION_T_STORE_SP /* delay slot */ ;\
1: /* user_mode: */ ;\
- LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
- tophys (r30,r1) ;\
- /* r10: current_thread_info */ ;\
- l.lwz r10,0(r30) ;\
+ GET_CURRENT_THREAD_INFO ;\
tophys (r30,r10) ;\
l.lwz r1,(TI_KSP)(r30) ;\
/* fall through */ ;\
@@ -226,7 +292,7 @@
*
*/
#define UNHANDLED_EXCEPTION(handler) \
- EXCEPTION_T_STORE_GPR31 ;\
+ EXCEPTION_T_STORE_GPR30 ;\
EXCEPTION_T_STORE_GPR10 ;\
EXCEPTION_T_STORE_SP ;\
/* temporary store r3, r9 into r1, r10 */ ;\
@@ -255,35 +321,35 @@
/* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
/* r12: temp, syscall indicator, r13 temp */ ;\
l.addi r1,r1,-(INT_FRAME_SIZE) ;\
- /* r1 is KSP, r31 is __pa(KSP) */ ;\
- tophys (r31,r1) ;\
- l.sw PT_GPR12(r31),r12 ;\
+ /* r1 is KSP, r30 is __pa(KSP) */ ;\
+ tophys (r30,r1) ;\
+ l.sw PT_GPR12(r30),r12 ;\
l.mfspr r12,r0,SPR_EPCR_BASE ;\
- l.sw PT_PC(r31),r12 ;\
+ l.sw PT_PC(r30),r12 ;\
l.mfspr r12,r0,SPR_ESR_BASE ;\
- l.sw PT_SR(r31),r12 ;\
+ l.sw PT_SR(r30),r12 ;\
/* save r31 */ ;\
- EXCEPTION_T_LOAD_GPR31(r12) ;\
- l.sw PT_GPR31(r31),r12 ;\
+ EXCEPTION_T_LOAD_GPR30(r12) ;\
+ l.sw PT_GPR30(r30),r12 ;\
/* save r10 as was prior to exception */ ;\
EXCEPTION_T_LOAD_GPR10(r12) ;\
- l.sw PT_GPR10(r31),r12 ;\
+ l.sw PT_GPR10(r30),r12 ;\
/* save PT_SP as was prior to exception */ ;\
EXCEPTION_T_LOAD_SP(r12) ;\
- l.sw PT_SP(r31),r12 ;\
- l.sw PT_GPR13(r31),r13 ;\
+ l.sw PT_SP(r30),r12 ;\
+ l.sw PT_GPR13(r30),r13 ;\
/* --> */ ;\
/* save exception r4, set r4 = EA */ ;\
- l.sw PT_GPR4(r31),r4 ;\
+ l.sw PT_GPR4(r30),r4 ;\
l.mfspr r4,r0,SPR_EEAR_BASE ;\
/* r12 == 1 if we come from syscall */ ;\
CLEAR_GPR(r12) ;\
/* ----- play a MMU trick ----- */ ;\
- l.ori r31,r0,(EXCEPTION_SR) ;\
- l.mtspr r0,r31,SPR_ESR_BASE ;\
+ l.ori r30,r0,(EXCEPTION_SR) ;\
+ l.mtspr r0,r30,SPR_ESR_BASE ;\
/* r31: EA address of handler */ ;\
- LOAD_SYMBOL_2_GPR(r31,handler) ;\
- l.mtspr r0,r31,SPR_EPCR_BASE ;\
+ LOAD_SYMBOL_2_GPR(r30,handler) ;\
+ l.mtspr r0,r30,SPR_EPCR_BASE ;\
l.rfe
/* =====================================================[ exceptions] === */
@@ -487,6 +553,12 @@ _start:
CLEAR_GPR(r30)
CLEAR_GPR(r31)
+#ifdef CONFIG_SMP
+ l.mfspr r26,r0,SPR_COREID
+ l.sfeq r26,r0
+ l.bnf secondary_wait
+ l.nop
+#endif
/*
* set up initial ksp and current
*/
@@ -638,6 +710,100 @@ _flush_tlb:
l.jr r9
l.nop
+#ifdef CONFIG_SMP
+secondary_wait:
+ /* Doze the cpu until we are asked to run */
+ /* If we dont have power management skip doze */
+ l.mfspr r25,r0,SPR_UPR
+ l.andi r25,r25,SPR_UPR_PMP
+ l.sfeq r25,r0
+ l.bf secondary_check_release
+ l.nop
+
+ /* Setup special secondary exception handler */
+ LOAD_SYMBOL_2_GPR(r3, _secondary_evbar)
+ tophys(r25,r3)
+ l.mtspr r0,r25,SPR_EVBAR
+
+ /* Enable Interrupts */
+ l.mfspr r25,r0,SPR_SR
+ l.ori r25,r25,SPR_SR_IEE
+ l.mtspr r0,r25,SPR_SR
+
+ /* Unmask interrupts interrupts */
+ l.mfspr r25,r0,SPR_PICMR
+ l.ori r25,r25,0xffff
+ l.mtspr r0,r25,SPR_PICMR
+
+ /* Doze */
+ l.mfspr r25,r0,SPR_PMR
+ LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME)
+ l.or r25,r25,r3
+ l.mtspr r0,r25,SPR_PMR
+
+ /* Wakeup - Restore exception handler */
+ l.mtspr r0,r0,SPR_EVBAR
+
+secondary_check_release:
+ /*
+ * Check if we actually got the release signal, if not go-back to
+ * sleep.
+ */
+ l.mfspr r25,r0,SPR_COREID
+ LOAD_SYMBOL_2_GPR(r3, secondary_release)
+ tophys(r4, r3)
+ l.lwz r3,0(r4)
+ l.sfeq r25,r3
+ l.bnf secondary_wait
+ l.nop
+ /* fall through to secondary_init */
+
+secondary_init:
+ /*
+ * set up initial ksp and current
+ */
+ LOAD_SYMBOL_2_GPR(r10, secondary_thread_info)
+ tophys (r30,r10)
+ l.lwz r10,0(r30)
+ l.addi r1,r10,THREAD_SIZE
+ tophys (r30,r10)
+ l.sw TI_KSP(r30),r1
+
+ l.jal _ic_enable
+ l.nop
+
+ l.jal _dc_enable
+ l.nop
+
+ l.jal _flush_tlb
+ l.nop
+
+ /*
+ * enable dmmu & immu
+ */
+ l.mfspr r30,r0,SPR_SR
+ l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
+ l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
+ l.or r30,r30,r28
+ /*
+ * This is a bit tricky, we need to switch over from physical addresses
+ * to virtual addresses on the fly.
+ * To do that, we first set up ESR with the IME and DME bits set.
+ * Then EPCR is set to secondary_start and then a l.rfe is issued to
+ * "jump" to that.
+ */
+ l.mtspr r0,r30,SPR_ESR_BASE
+ LOAD_SYMBOL_2_GPR(r30, secondary_start)
+ l.mtspr r0,r30,SPR_EPCR_BASE
+ l.rfe
+
+secondary_start:
+ LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel)
+ l.jr r30
+ l.nop
+
+#endif
+
/* ========================================[ cache ]=== */
/* alignment here so we don't change memory offsets with
@@ -1533,6 +1699,17 @@ ENTRY(_early_uart_init)
l.jr r9
l.nop
+ .align 0x1000
+ .global _secondary_evbar
+_secondary_evbar:
+
+ .space 0x800
+ /* Just disable interrupts and Return */
+ l.ori r3,r0,SPR_SR_SM
+ l.mtspr r0,r3,SPR_ESR_BASE
+ l.rfe
+
+
.section .rodata
_string_unhandled_exception:
.string "\n\rRunarunaround: Unhandled exception 0x\0"
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index dbf5ee95a0d5..9d28ab14d139 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -93,7 +93,7 @@ static void __init setup_memory(void)
memblock_dump_all();
}
-struct cpuinfo cpuinfo;
+struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
static void print_cpuinfo(void)
{
@@ -101,12 +101,13 @@ static void print_cpuinfo(void)
unsigned long vr = mfspr(SPR_VR);
unsigned int version;
unsigned int revision;
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
version = (vr & SPR_VR_VER) >> 24;
revision = (vr & SPR_VR_REV);
printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n",
- version, revision, cpuinfo.clock_frequency / 1000000);
+ version, revision, cpuinfo->clock_frequency / 1000000);
if (!(upr & SPR_UPR_UP)) {
printk(KERN_INFO
@@ -117,15 +118,15 @@ static void print_cpuinfo(void)
if (upr & SPR_UPR_DCP)
printk(KERN_INFO
"-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
- cpuinfo.dcache_size, cpuinfo.dcache_block_size,
- cpuinfo.dcache_ways);
+ cpuinfo->dcache_size, cpuinfo->dcache_block_size,
+ cpuinfo->dcache_ways);
else
printk(KERN_INFO "-- dcache disabled\n");
if (upr & SPR_UPR_ICP)
printk(KERN_INFO
"-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
- cpuinfo.icache_size, cpuinfo.icache_block_size,
- cpuinfo.icache_ways);
+ cpuinfo->icache_size, cpuinfo->icache_block_size,
+ cpuinfo->icache_ways);
else
printk(KERN_INFO "-- icache disabled\n");
@@ -153,38 +154,58 @@ static void print_cpuinfo(void)
printk(KERN_INFO "-- custom unit(s)\n");
}
+static struct device_node *setup_find_cpu_node(int cpu)
+{
+ u32 hwid;
+ struct device_node *cpun;
+ struct device_node *cpus = of_find_node_by_path("/cpus");
+
+ for_each_available_child_of_node(cpus, cpun) {
+ if (of_property_read_u32(cpun, "reg", &hwid))
+ continue;
+ if (hwid == cpu)
+ return cpun;
+ }
+
+ return NULL;
+}
+
void __init setup_cpuinfo(void)
{
struct device_node *cpu;
unsigned long iccfgr, dccfgr;
unsigned long cache_set_size;
+ int cpu_id = smp_processor_id();
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id];
- cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+ cpu = setup_find_cpu_node(cpu_id);
if (!cpu)
- panic("No compatible CPU found in device tree...\n");
+ panic("Couldn't find CPU%d in device tree...\n", cpu_id);
iccfgr = mfspr(SPR_ICCFGR);
- cpuinfo.icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
+ cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
- cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
- cpuinfo.icache_size =
- cache_set_size * cpuinfo.icache_ways * cpuinfo.icache_block_size;
+ cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
+ cpuinfo->icache_size =
+ cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size;
dccfgr = mfspr(SPR_DCCFGR);
- cpuinfo.dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
+ cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
- cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
- cpuinfo.dcache_size =
- cache_set_size * cpuinfo.dcache_ways * cpuinfo.dcache_block_size;
+ cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
+ cpuinfo->dcache_size =
+ cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size;
if (of_property_read_u32(cpu, "clock-frequency",
- &cpuinfo.clock_frequency)) {
+ &cpuinfo->clock_frequency)) {
printk(KERN_WARNING
"Device tree missing CPU 'clock-frequency' parameter."
"Assuming frequency 25MHZ"
"This is probably not what you want.");
}
+ cpuinfo->coreid = mfspr(SPR_COREID);
+
of_node_put(cpu);
print_cpuinfo();
@@ -251,8 +272,8 @@ void __init detect_unit_config(unsigned long upr, unsigned long mask,
void calibrate_delay(void)
{
const int *val;
- struct device_node *cpu = NULL;
- cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481");
+ struct device_node *cpu = setup_find_cpu_node(smp_processor_id());
+
val = of_get_property(cpu, "clock-frequency", NULL);
if (!val)
panic("no cpu 'clock-frequency' parameter in device tree");
@@ -268,6 +289,10 @@ void __init setup_arch(char **cmdline_p)
setup_cpuinfo();
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
/* process 1's initial memory region is the kernel code/data */
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long)_etext;
@@ -302,54 +327,78 @@ void __init setup_arch(char **cmdline_p)
static int show_cpuinfo(struct seq_file *m, void *v)
{
- unsigned long vr;
- int version, revision;
+ unsigned int vr, cpucfgr;
+ unsigned int avr;
+ unsigned int version;
+ struct cpuinfo_or1k *cpuinfo = v;
vr = mfspr(SPR_VR);
- version = (vr & SPR_VR_VER) >> 24;
- revision = vr & SPR_VR_REV;
-
- seq_printf(m,
- "cpu\t\t: OpenRISC-%x\n"
- "revision\t: %d\n"
- "frequency\t: %ld\n"
- "dcache size\t: %d bytes\n"
- "dcache block size\t: %d bytes\n"
- "dcache ways\t: %d\n"
- "icache size\t: %d bytes\n"
- "icache block size\t: %d bytes\n"
- "icache ways\t: %d\n"
- "immu\t\t: %d entries, %lu ways\n"
- "dmmu\t\t: %d entries, %lu ways\n"
- "bogomips\t: %lu.%02lu\n",
- version,
- revision,
- loops_per_jiffy * HZ,
- cpuinfo.dcache_size,
- cpuinfo.dcache_block_size,
- cpuinfo.dcache_ways,
- cpuinfo.icache_size,
- cpuinfo.icache_block_size,
- cpuinfo.icache_ways,
- 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
- 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW),
- 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
- 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW),
- (loops_per_jiffy * HZ) / 500000,
- ((loops_per_jiffy * HZ) / 5000) % 100);
+ cpucfgr = mfspr(SPR_CPUCFGR);
+
+#ifdef CONFIG_SMP
+ seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
+#endif
+ if (vr & SPR_VR_UVRP) {
+ vr = mfspr(SPR_VR2);
+ version = vr & SPR_VR2_VER;
+ avr = mfspr(SPR_AVR);
+ seq_printf(m, "cpu architecture\t: "
+ "OpenRISC 1000 (%d.%d-rev%d)\n",
+ (avr >> 24) & 0xff,
+ (avr >> 16) & 0xff,
+ (avr >> 8) & 0xff);
+ seq_printf(m, "cpu implementation id\t: 0x%x\n",
+ (vr & SPR_VR2_CPUID) >> 24);
+ seq_printf(m, "cpu version\t\t: 0x%x\n", version);
+ } else {
+ version = (vr & SPR_VR_VER) >> 24;
+ seq_printf(m, "cpu\t\t\t: OpenRISC-%x\n", version);
+ seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV);
+ }
+ seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ);
+ seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size);
+ seq_printf(m, "dcache block size\t: %d bytes\n",
+ cpuinfo->dcache_block_size);
+ seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways);
+ seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size);
+ seq_printf(m, "icache block size\t: %d bytes\n",
+ cpuinfo->icache_block_size);
+ seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways);
+ seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n",
+ 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
+ seq_printf(m, "dmmu\t\t\t: %d entries, %lu ways\n",
+ 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
+ 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW));
+ seq_printf(m, "bogomips\t\t: %lu.%02lu\n",
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100);
+
+ seq_puts(m, "features\t\t: ");
+ seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB32S ? "orbis32" : "");
+ seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB64S ? "orbis64" : "");
+ seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF32S ? "orfpx32" : "");
+ seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF64S ? "orfpx64" : "");
+ seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OV64S ? "orvdx64" : "");
+ seq_puts(m, "\n");
+
+ seq_puts(m, "\n");
+
return 0;
}
-static void *c_start(struct seq_file *m, loff_t * pos)
+static void *c_start(struct seq_file *m, loff_t *pos)
{
- /* We only have one CPU... */
- return *pos < 1 ? (void *)1 : NULL;
+ *pos = cpumask_next(*pos - 1, cpu_online_mask);
+ if ((*pos) < nr_cpu_ids)
+ return &cpuinfo_or1k[*pos];
+ return NULL;
}
-static void *c_next(struct seq_file *m, void *v, loff_t * pos)
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
- ++*pos;
- return NULL;
+ (*pos)++;
+ return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
new file mode 100644
index 000000000000..7d518ee8bddc
--- /dev/null
+++ b/arch/openrisc/kernel/smp.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * Based on arm64 and arc implementations
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <asm/cpuinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/time.h>
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
+unsigned long secondary_release = -1;
+struct thread_info *secondary_thread_info;
+
+enum ipi_msg_type {
+ IPI_WAKEUP,
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+};
+
+static DEFINE_SPINLOCK(boot_lock);
+
+static void boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ /*
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ secondary_release = cpu;
+ smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+}
+
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+void __init smp_init_cpus(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpu_possible(i, true);
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static DECLARE_COMPLETION(cpu_running);
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ if (smp_cross_call == NULL) {
+ pr_warn("CPU%u: failed to start, IPI controller missing",
+ cpu);
+ return -EIO;
+ }
+
+ secondary_thread_info = task_thread_info(idle);
+ current_pgd[cpu] = init_mm.pgd;
+
+ boot_secondary(cpu, idle);
+ if (!wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+ synchronise_count_master(cpu);
+
+ return 0;
+}
+
+asmlinkage __init void secondary_start_kernel(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+ /*
+ * All kernel threads share the same mm context; grab a
+ * reference and switch to it.
+ */
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+
+ pr_info("CPU%u: Booted secondary processor\n", cpu);
+
+ setup_cpuinfo();
+ openrisc_clockevent_init();
+
+ notify_cpu_starting(cpu);
+
+ /*
+ * OK, now it's safe to let the boot CPU continue
+ */
+ complete(&cpu_running);
+
+ synchronise_count_slave(cpu);
+ set_cpu_online(cpu, true);
+
+ local_irq_enable();
+
+ preempt_disable();
+ /*
+ * OK, it's off to the idle thread for us
+ */
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+void handle_IPI(unsigned int ipi_msg)
+{
+ unsigned int cpu = smp_processor_id();
+
+ switch (ipi_msg) {
+ case IPI_WAKEUP:
+ break;
+
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ default:
+ WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
+ break;
+ }
+}
+
+void smp_send_reschedule(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+static void stop_this_cpu(void *dummy)
+{
+ /* Remove this CPU */
+ set_cpu_online(smp_processor_id(), false);
+
+ local_irq_disable();
+ /* CPU Doze */
+ if (mfspr(SPR_UPR) & SPR_UPR_PMP)
+ mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
+ /* If that didn't work, infinite loop */
+ while (1)
+ ;
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+/* not supported, yet */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
+{
+ smp_cross_call = fn;
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+/* TLB flush operations - Performed on each CPU*/
+static inline void ipi_flush_tlb_all(void *ignored)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+/*
+ * FIXME: implement proper functionality instead of flush_tlb_all.
+ * *But*, as things currently stands, the local_tlb_flush_* functions will
+ * all boil down to local_tlb_flush_all anyway.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ on_each_cpu(ipi_flush_tlb_all, NULL, 1);
+}
+
+/* Instruction cache invalidate - performed on each cpu */
+static void ipi_icache_page_inv(void *arg)
+{
+ struct page *page = arg;
+
+ local_icache_page_inv(page);
+}
+
+void smp_icache_page_inv(struct page *page)
+{
+ on_each_cpu(ipi_icache_page_inv, page, 1);
+}
+EXPORT_SYMBOL(smp_icache_page_inv);
diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c
new file mode 100644
index 000000000000..43f140a28bc7
--- /dev/null
+++ b/arch/openrisc/kernel/stacktrace.c
@@ -0,0 +1,86 @@
+/*
+ * Stack trace utility for OpenRISC
+ *
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Losely based on work from sh and powerpc.
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/stacktrace.h>
+
+#include <asm/processor.h>
+#include <asm/unwinder.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+static void
+save_stack_address(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = data;
+
+ if (!reliable)
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ unwind_stack(trace, (unsigned long *) &trace, save_stack_address);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+static void
+save_stack_address_nosched(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = (struct stack_trace *)data;
+
+ if (!reliable)
+ return;
+
+ if (in_sched_functions(addr))
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ unsigned long *sp = NULL;
+
+ if (tsk == current)
+ sp = (unsigned long *) &sp;
+ else
+ sp = (unsigned long *) KSTK_ESP(tsk);
+
+ unwind_stack(trace, sp, save_stack_address_nosched);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void
+save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+ unwind_stack(trace, (unsigned long *) regs->sp,
+ save_stack_address_nosched);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/openrisc/kernel/sync-timer.c b/arch/openrisc/kernel/sync-timer.c
new file mode 100644
index 000000000000..ed8d835caca1
--- /dev/null
+++ b/arch/openrisc/kernel/sync-timer.c
@@ -0,0 +1,120 @@
+/*
+ * OR1K timer synchronisation
+ *
+ * Based on work from MIPS implementation.
+ *
+ * All CPUs will have their count registers synchronised to the CPU0 next time
+ * value. This can cause a small timewarp for CPU0. All other CPU's should
+ * not have done anything significant (but they may have had interrupts
+ * enabled briefly - prom_smp_finish() should not be responsible for enabling
+ * interrupts...)
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+#include <linux/cpumask.h>
+
+#include <asm/time.h>
+#include <asm/timex.h>
+#include <linux/atomic.h>
+#include <asm/barrier.h>
+
+#include <asm/spr.h>
+
+static unsigned int initcount;
+static atomic_t count_count_start = ATOMIC_INIT(0);
+static atomic_t count_count_stop = ATOMIC_INIT(0);
+
+#define COUNTON 100
+#define NR_LOOPS 3
+
+void synchronise_count_master(int cpu)
+{
+ int i;
+ unsigned long flags;
+
+ pr_info("Synchronize counters for CPU %u: ", cpu);
+
+ local_irq_save(flags);
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronised and
+ * the master and slaves each set their cycle counters to a known
+ * value all at once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+ while (atomic_read(&count_count_start) != 1)
+ mb();
+ atomic_set(&count_count_stop, 0);
+ smp_wmb();
+
+ /* Let the slave writes its count register */
+ atomic_inc(&count_count_start);
+
+ /* Count will be initialised to current timer */
+ if (i == 1)
+ initcount = get_cycles();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ openrisc_timer_set(initcount);
+
+ /*
+ * Wait for slave to leave the synchronization point:
+ */
+ while (atomic_read(&count_count_stop) != 1)
+ mb();
+ atomic_set(&count_count_start, 0);
+ smp_wmb();
+ atomic_inc(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ openrisc_timer_set_next(COUNTON);
+
+ local_irq_restore(flags);
+
+ /*
+ * i386 code reported the skew here, but the
+ * count registers were almost certainly out of sync
+ * so no point in alarming people
+ */
+ pr_cont("done.\n");
+}
+
+void synchronise_count_slave(int cpu)
+{
+ int i;
+
+ /*
+ * Not every cpu is online at the time this gets called,
+ * so we first wait for the master to say everyone is ready
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&count_count_start);
+ while (atomic_read(&count_count_start) != 2)
+ mb();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ openrisc_timer_set(initcount);
+
+ atomic_inc(&count_count_stop);
+ while (atomic_read(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+ openrisc_timer_set_next(COUNTON);
+}
+#undef NR_LOOPS
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
index 687c11d048d7..6baecea27080 100644
--- a/arch/openrisc/kernel/time.c
+++ b/arch/openrisc/kernel/time.c
@@ -27,8 +27,14 @@
#include <asm/cpuinfo.h>
-static int openrisc_timer_set_next_event(unsigned long delta,
- struct clock_event_device *dev)
+/* Test the timer ticks to count, used in sync routine */
+inline void openrisc_timer_set(unsigned long count)
+{
+ mtspr(SPR_TTCR, count);
+}
+
+/* Set the timer to trigger in delta cycles */
+inline void openrisc_timer_set_next(unsigned long delta)
{
u32 c;
@@ -44,7 +50,12 @@ static int openrisc_timer_set_next_event(unsigned long delta,
* Keep timer in continuous mode always.
*/
mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c);
+}
+static int openrisc_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ openrisc_timer_set_next(delta);
return 0;
}
@@ -53,13 +64,32 @@ static int openrisc_timer_set_next_event(unsigned long delta,
* timers) we cannot enable the PERIODIC feature. The tick timer can run using
* one-shot events, so no problem.
*/
+DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer);
-static struct clock_event_device clockevent_openrisc_timer = {
- .name = "openrisc_timer_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 300,
- .set_next_event = openrisc_timer_set_next_event,
-};
+void openrisc_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt =
+ &per_cpu(clockevent_openrisc_timer, cpu);
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu];
+
+ mtspr(SPR_TTMR, SPR_TTMR_CR);
+
+#ifdef CONFIG_SMP
+ evt->broadcast = tick_broadcast;
+#endif
+ evt->name = "openrisc_timer_clockevent",
+ evt->features = CLOCK_EVT_FEAT_ONESHOT,
+ evt->rating = 300,
+ evt->set_next_event = openrisc_timer_set_next_event,
+
+ evt->cpumask = cpumask_of(cpu);
+
+ /* We only have 28 bits */
+ clockevents_config_and_register(evt, cpuinfo->clock_frequency,
+ 100, 0x0fffffff);
+
+}
static inline void timer_ack(void)
{
@@ -83,7 +113,9 @@ static inline void timer_ack(void)
irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct clock_event_device *evt = &clockevent_openrisc_timer;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt =
+ &per_cpu(clockevent_openrisc_timer, cpu);
timer_ack();
@@ -99,24 +131,12 @@ irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
return IRQ_HANDLED;
}
-static __init void openrisc_clockevent_init(void)
-{
- clockevent_openrisc_timer.cpumask = cpumask_of(0);
-
- /* We only have 28 bits */
- clockevents_config_and_register(&clockevent_openrisc_timer,
- cpuinfo.clock_frequency,
- 100, 0x0fffffff);
-
-}
-
/**
* Clocksource: Based on OpenRISC timer/counter
*
* This sets up the OpenRISC Tick Timer as a clock source. The tick timer
* is 32 bits wide and runs at the CPU clock frequency.
*/
-
static u64 openrisc_timer_read(struct clocksource *cs)
{
return (u64) mfspr(SPR_TTCR);
@@ -132,7 +152,9 @@ static struct clocksource openrisc_timer = {
static int __init openrisc_timer_init(void)
{
- if (clocksource_register_hz(&openrisc_timer, cpuinfo.clock_frequency))
+ struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
+
+ if (clocksource_register_hz(&openrisc_timer, cpuinfo->clock_frequency))
panic("failed to register clocksource");
/* Enable the incrementer: 'continuous' mode with interrupt disabled */
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 803e9e756f77..4085d72fa5ae 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/segment.h>
#include <asm/io.h>
#include <asm/pgtable.h>
+#include <asm/unwinder.h>
extern char _etext, _stext;
@@ -45,61 +46,20 @@ int kstack_depth_to_print = 0x180;
int lwa_flag;
unsigned long __user *lwa_addr;
-static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+void print_trace(void *data, unsigned long addr, int reliable)
{
- return p > (void *)tinfo && p < (void *)tinfo + THREAD_SIZE - 3;
-}
-
-void show_trace(struct task_struct *task, unsigned long *stack)
-{
- struct thread_info *context;
- unsigned long addr;
-
- context = (struct thread_info *)
- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-
- while (valid_stack_ptr(context, stack)) {
- addr = *stack++;
- if (__kernel_text_address(addr)) {
- printk(" [<%08lx>]", addr);
- print_symbol(" %s", addr);
- printk("\n");
- }
- }
- printk(" =======================\n");
+ pr_emerg("[<%p>] %s%pS\n", (void *) addr, reliable ? "" : "? ",
+ (void *) addr);
}
/* displays a short stack trace */
void show_stack(struct task_struct *task, unsigned long *esp)
{
- unsigned long addr, *stack;
- int i;
-
if (esp == NULL)
esp = (unsigned long *)&esp;
- stack = esp;
-
- printk("Stack dump [0x%08lx]:\n", (unsigned long)esp);
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (kstack_end(stack))
- break;
- if (__get_user(addr, stack)) {
- /* This message matches "failing address" marked
- s390 in ksymoops, so lines containing it will
- not be filtered out by ksymoops. */
- printk("Failing address 0x%lx\n", (unsigned long)stack);
- break;
- }
- stack++;
-
- printk("sp + %02d: 0x%08lx\n", i * 4, addr);
- }
- printk("\n");
-
- show_trace(task, esp);
-
- return;
+ pr_emerg("Call trace:\n");
+ unwind_stack(NULL, esp, print_trace);
}
void show_trace_task(struct task_struct *tsk)
@@ -115,7 +75,7 @@ void show_registers(struct pt_regs *regs)
int in_kernel = 1;
unsigned long esp;
- esp = (unsigned long)(&regs->sp);
+ esp = (unsigned long)(regs->sp);
if (user_mode(regs))
in_kernel = 0;
diff --git a/arch/openrisc/kernel/unwinder.c b/arch/openrisc/kernel/unwinder.c
new file mode 100644
index 000000000000..8ae15c2c1845
--- /dev/null
+++ b/arch/openrisc/kernel/unwinder.c
@@ -0,0 +1,105 @@
+/*
+ * OpenRISC unwinder.c
+ *
+ * Reusable arch specific api for unwinding stacks.
+ *
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/sched/task_stack.h>
+#include <linux/kernel.h>
+
+#include <asm/unwinder.h>
+
+#ifdef CONFIG_FRAME_POINTER
+struct or1k_frameinfo {
+ unsigned long *fp;
+ unsigned long ra;
+ unsigned long top;
+};
+
+/*
+ * Verify a frameinfo structure. The return address should be a valid text
+ * address. The frame pointer may be null if its the last frame, otherwise
+ * the frame pointer should point to a location in the stack after the the
+ * top of the next frame up.
+ */
+static inline int or1k_frameinfo_valid(struct or1k_frameinfo *frameinfo)
+{
+ return (frameinfo->fp == NULL ||
+ (!kstack_end(frameinfo->fp) &&
+ frameinfo->fp > &frameinfo->top)) &&
+ __kernel_text_address(frameinfo->ra);
+}
+
+/*
+ * Create a stack trace doing scanning which is frame pointer aware. We can
+ * get reliable stack traces by matching the previously found frame
+ * pointer with the top of the stack address every time we find a valid
+ * or1k_frameinfo.
+ *
+ * Ideally the stack parameter will be passed as FP, but it can not be
+ * guaranteed. Therefore we scan each address looking for the first sign
+ * of a return address.
+ *
+ * The OpenRISC stack frame looks something like the following. The
+ * location SP is held in r1 and location FP is held in r2 when frame pointers
+ * enabled.
+ *
+ * SP -> (top of stack)
+ * - (callee saved registers)
+ * - (local variables)
+ * FP-8 -> previous FP \
+ * FP-4 -> return address |- or1k_frameinfo
+ * FP -> (previous top of stack) /
+ */
+void unwind_stack(void *data, unsigned long *stack,
+ void (*trace)(void *data, unsigned long addr, int reliable))
+{
+ unsigned long *next_fp = NULL;
+ struct or1k_frameinfo *frameinfo = NULL;
+ int reliable = 0;
+
+ while (!kstack_end(stack)) {
+ frameinfo = container_of(stack,
+ struct or1k_frameinfo,
+ top);
+
+ if (__kernel_text_address(frameinfo->ra)) {
+ if (or1k_frameinfo_valid(frameinfo) &&
+ (next_fp == NULL ||
+ next_fp == &frameinfo->top)) {
+ reliable = 1;
+ next_fp = frameinfo->fp;
+ } else
+ reliable = 0;
+
+ trace(data, frameinfo->ra, reliable);
+ }
+ stack++;
+ }
+}
+
+#else /* CONFIG_FRAME_POINTER */
+
+/*
+ * Create a stack trace by doing a simple scan treating all text addresses
+ * as return addresses.
+ */
+void unwind_stack(void *data, unsigned long *stack,
+ void (*trace)(void *data, unsigned long addr, int reliable))
+{
+ unsigned long addr;
+
+ while (!kstack_end(stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr))
+ trace(data, addr, 0);
+ }
+}
+#endif /* CONFIG_FRAME_POINTER */
+
diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c
index 8b13fdf43ec6..a92bd621aa1f 100644
--- a/arch/openrisc/lib/delay.c
+++ b/arch/openrisc/lib/delay.c
@@ -25,7 +25,7 @@
int read_current_timer(unsigned long *timer_value)
{
- *timer_value = mfspr(SPR_TTCR);
+ *timer_value = get_cycles();
return 0;
}
diff --git a/arch/openrisc/mm/Makefile b/arch/openrisc/mm/Makefile
index 324ba2634529..a31b2a42e966 100644
--- a/arch/openrisc/mm/Makefile
+++ b/arch/openrisc/mm/Makefile
@@ -2,4 +2,4 @@
# Makefile for the linux openrisc-specific parts of the memory manager.
#
-obj-y := fault.o tlb.o init.o ioremap.o
+obj-y := fault.o cache.o tlb.o init.o ioremap.o
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
new file mode 100644
index 000000000000..b747bf1fc1b6
--- /dev/null
+++ b/arch/openrisc/mm/cache.c
@@ -0,0 +1,61 @@
+/*
+ * OpenRISC cache.c
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others. All original copyrights apply as per the original source
+ * declaration.
+ *
+ * Modifications for the OpenRISC architecture:
+ * Copyright (C) 2015 Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static void cache_loop(struct page *page, const unsigned int reg)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+
+ while (line < paddr + PAGE_SIZE) {
+ mtspr(reg, line);
+ line += L1_CACHE_BYTES;
+ }
+}
+
+void local_dcache_page_flush(struct page *page)
+{
+ cache_loop(page, SPR_DCBFR);
+}
+EXPORT_SYMBOL(local_dcache_page_flush);
+
+void local_icache_page_inv(struct page *page)
+{
+ cache_loop(page, SPR_ICBIR);
+}
+EXPORT_SYMBOL(local_icache_page_inv);
+
+void update_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte)
+{
+ unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(pfn);
+ int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
+
+ /*
+ * Since icaches do not snoop for updated data on OpenRISC, we
+ * must write back and invalidate any dirty pages manually. We
+ * can skip data pages, since they will not end up in icaches.
+ */
+ if ((vma->vm_flags & VM_EXEC) && dirty)
+ sync_icache_dcache(page);
+}
+
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index e310ab499385..d0021dfae20a 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -33,7 +33,7 @@ unsigned long pte_errors; /* updated by do_page_fault() */
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
* - also look into include/asm-or32/mmu_context.h
*/
-volatile pgd_t *current_pgd;
+volatile pgd_t *current_pgd[NR_CPUS];
extern void die(char *, struct pt_regs *, long);
@@ -319,7 +319,7 @@ vmalloc_fault:
phx_mmu("vmalloc_fault");
*/
- pgd = (pgd_t *)current_pgd + offset;
+ pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
/* Since we're two-level, we don't need to do both
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index f67d82b9d22f..6972d5d6f23f 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -147,7 +147,7 @@ void __init paging_init(void)
* (even if it is most probably not used until the next
* switch_mm)
*/
- current_pgd = init_mm.pgd;
+ current_pgd[smp_processor_id()] = init_mm.pgd;
end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index 683bd4d31c7c..6c253a2e86bc 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -49,7 +49,7 @@
*
*/
-void flush_tlb_all(void)
+void local_flush_tlb_all(void)
{
int i;
unsigned long num_tlb_sets;
@@ -86,7 +86,7 @@ void flush_tlb_all(void)
#define flush_itlb_page_no_eir(addr) \
mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
if (have_dtlbeir)
flush_dtlb_page_eir(addr);
@@ -99,8 +99,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
flush_itlb_page_no_eir(addr);
}
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
int addr;
bool dtlbeir;
@@ -129,13 +129,13 @@ void flush_tlb_range(struct vm_area_struct *vma,
* This should be changed to loop over over mm and call flush_tlb_range.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void local_flush_tlb_mm(struct mm_struct *mm)
{
/* Was seeing bugs with the mm struct passed to us. Scrapped most of
this function. */
/* Several architctures do this */
- flush_tlb_all();
+ local_flush_tlb_all();
}
/* called in schedule() just before actually doing the switch_to */
@@ -149,14 +149,14 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* might be invalid at points where we still need to derefer
* the pgd.
*/
- current_pgd = next->pgd;
+ current_pgd[smp_processor_id()] = next->pgd;
/* We don't have context support implemented, so flush all
* entries belonging to previous map
*/
if (prev != next)
- flush_tlb_mm(prev);
+ local_flush_tlb_mm(prev);
}
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index bc54addd589f..88bae6676c9b 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -261,7 +261,7 @@ atomic64_set(atomic64_t *v, s64 i)
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
#define atomic64_inc(v) (atomic64_add( 1,(v)))
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 7af4a00b5ce2..01e1fc057c83 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -33,14 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return hppa_dma_ops;
}
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- if (hppa_dma_ops->sync_single_for_cpu)
- flush_kernel_dcache_range((unsigned long)vaddr, size);
-}
-
static inline void *
parisc_walk_tree(struct device *dev)
{
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index 6108e9df0296..96b7deec512d 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -88,13 +88,6 @@ struct pci_hba_data {
#endif /* !CONFIG_64BIT */
/*
-** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus
-** (This eliminates some of the warnings).
-*/
-struct pci_bus;
-struct pci_dev;
-
-/*
* If the PCI device's view of memory is the same as the CPU's view of memory,
* PCI_DMA_BUS_IS_PHYS is true. The networking and block device layers use
* this boolean for bounce buffer decisions.
@@ -162,7 +155,6 @@ extern struct pci_bios_ops *pci_bios;
#ifdef CONFIG_PCI
extern void pcibios_register_hba(struct pci_hba_data *);
-extern void pcibios_set_master(struct pci_dev *);
#else
static inline void pcibios_register_hba(struct pci_hba_data *x)
{
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index af03359e6ac5..6f84b6acc86e 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -32,6 +32,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
cpu_relax();
mb();
}
+#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
@@ -169,25 +170,4 @@ static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
return result;
}
-/*
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return rw->counter >= 0;
-}
-
-/*
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return !rw->counter;
-}
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 412231d101f9..c0dfd892f70c 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -572,6 +572,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
+static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_kernel_dcache_range((unsigned long)vaddr, size);
+}
+
const struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc,
@@ -584,6 +590,7 @@ const struct dma_map_ops pcxl_dma_ops = {
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .cache_sync = pa11_dma_cache_sync,
};
static void *pcx_dma_alloc(struct device *dev, size_t size,
@@ -620,4 +627,5 @@ const struct dma_map_ops pcx_dma_ops = {
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .cache_sync = pa11_dma_cache_sync,
};
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 84774ccba1c2..f92d0530ceb1 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -18,7 +18,6 @@ otheros.bld
uImage
cuImage.*
dtbImage.*
-*.dtb
treeImage.*
vmlinux.strip
zImage
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 651974192c4d..08782f55b89f 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -440,7 +440,7 @@ zInstall: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
zImage.miboot zImage.pmac zImage.pseries \
- zImage.maple simpleImage.* otheros.bld *.dtb
+ zImage.maple simpleImage.* otheros.bld
# clean up files cached by wrapper
clean-kernel-base := vmlinux.strip vmlinux.bin
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index ee1e38ff1b77..5a6cbe11db6f 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -142,12 +142,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define ARCH_HAS_DMA_MMAP_COHERENT
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- __dma_sync(vaddr, size, (int)direction);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
index 936a904ae78c..167c44b58848 100644
--- a/arch/powerpc/include/asm/floppy.h
+++ b/arch/powerpc/include/asm/floppy.h
@@ -25,7 +25,6 @@
#define fd_get_dma_residue() fd_ops->_get_dma_residue(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
-#define fd_cacheflush(addr,size) /* nothing */
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
#include <linux/pci.h>
@@ -152,7 +151,6 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
prev_dir = dir;
fd_clear_dma_ff();
- fd_cacheflush(addr, size);
fd_set_dma_mode(mode);
set_dma_addr(FLOPPY_DMA, bus_addr);
fd_set_dma_count(size);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index c8975dac535f..8dc32eacc97c 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -28,8 +28,6 @@
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
-struct pci_dev;
-
/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
#define IOBASE_BRIDGE_NUMBER 0
#define IOBASE_MEMORY 1
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index a14203c005f1..e11f03007b57 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
}
#endif /* MODULE */
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h>
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index edbe571bcc54..b9ebc3085fb7 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -161,6 +161,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
local_irq_restore(flags_dis);
}
}
+#define arch_spin_lock_flags arch_spin_lock_flags
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
@@ -181,9 +182,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* read-locks.
*/
-#define arch_read_can_lock(rw) ((rw)->lock >= 0)
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
#ifdef CONFIG_PPC64
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
@@ -302,9 +300,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
rw->lock = 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#define arch_spin_relax(lock) __spin_yield(lock)
#define arch_read_relax(lock) __rw_yield(lock)
#define arch_write_relax(lock) __rw_yield(lock)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 8b840191df59..4e1b433f6cb5 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -441,7 +441,7 @@ static void *eeh_add_virt_device(void *data, void *userdata)
}
#ifdef CONFIG_PPC_POWERNV
- pci_iov_add_virtfn(edev->physfn, pdn->vf_index, 0);
+ pci_iov_add_virtfn(edev->physfn, pdn->vf_index);
#endif
return NULL;
}
@@ -499,7 +499,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
#ifdef CONFIG_PPC_POWERNV
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
- pci_iov_remove_virtfn(edev->physfn, pdn->vf_index, 0);
+ pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
edev->pdev = NULL;
/*
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
index 992c0d258e5d..e4395f937d63 100644
--- a/arch/powerpc/kernel/machine_kexec_file_64.c
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -91,11 +91,13 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
* and that value will be returned. If all free regions are visited without
* func returning non-zero, then zero will be returned.
*/
-int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
+int arch_kexec_walk_mem(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
{
int ret = 0;
u64 i;
phys_addr_t mstart, mend;
+ struct resource res = { };
if (kbuf->top_down) {
for_each_free_mem_range_reverse(i, NUMA_NO_NODE, 0,
@@ -105,7 +107,9 @@ int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
* range while in kexec, end points to the last byte
* in the range.
*/
- ret = func(mstart, mend - 1, kbuf);
+ res.start = mstart;
+ res.end = mend - 1;
+ ret = func(&res, kbuf);
if (ret)
break;
}
@@ -117,7 +121,9 @@ int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
* range while in kexec, end points to the last byte
* in the range.
*/
- ret = func(mstart, mend - 1, kbuf);
+ res.start = mstart;
+ res.end = mend - 1;
+ ret = func(&res, kbuf);
if (ret)
break;
}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 02831a396419..0ac7aa346c69 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1740,15 +1740,3 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
-
-static void fixup_vga(struct pci_dev *pdev)
-{
- u16 cmd;
-
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
- vga_set_default_device(pdev);
-
-}
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1643e9e53655..3f1c4fcbe0aa 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -78,7 +78,7 @@ static unsigned long lock_rtas(void)
local_irq_save(flags);
preempt_disable();
- arch_spin_lock_flags(&rtas.lock, flags);
+ arch_spin_lock(&rtas.lock);
return flags;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1571a498a33f..a9b9083c5e49 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -433,6 +433,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
+ mm_dec_nr_puds(tlb->mm);
}
/*
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 05e15386d4cb..a7e998158f37 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -200,7 +200,7 @@ static void destroy_pagetable_page(struct mm_struct *mm)
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
pgtable_page_dtor(page);
- free_hot_cold_page(page, 0);
+ free_unref_page(page);
}
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac0717a90ca6..1ec3aee43624 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -404,7 +404,7 @@ void pte_fragment_free(unsigned long *table, int kernel)
if (put_page_testzero(page)) {
if (!kernel)
pgtable_page_dtor(page);
- free_hot_cold_page(page, 0);
+ free_unref_page(page);
}
}
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index d5e34ce5fd5d..5a96a2763e4a 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -79,7 +79,7 @@ config UDBG_RTAS_CONSOLE
config PPC_SMP_MUXED_IPI
bool
help
- Select this opton if your platform supports SMP and your
+ Select this option if your platform supports SMP and your
interrupt controller provides less than 4 interrupts to each
cpu. This will enable the generic code to multiplex the 4
messages on to one ipi.
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 7a9cde0cfbd1..acd3206dfae3 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
if (!opal_memcons)
return -ENODEV;
- out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
+ out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
/* Now we've read out_pos, put a barrier in before reading the new
* data it points to in conbuf. */
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 4ac419c7eb4c..560aefde06c0 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -742,7 +742,7 @@ static void cmm_exit(void)
* Return value:
* 0 on success / other on failure
**/
-static int cmm_set_disable(const char *val, struct kernel_param *kp)
+static int cmm_set_disable(const char *val, const struct kernel_param *kp)
{
int disable = simple_strtoul(val, NULL, 10);
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index e45b5f10645a..6e35780c5962 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -75,24 +75,17 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
return prop;
}
-static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
- const char *path)
+static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
{
struct device_node *dn;
- char *name;
-
- /* If parent node path is "/" advance path to NULL terminator to
- * prevent double leading slashs in full_name.
- */
- if (!path[1])
- path++;
+ const char *name;
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
return NULL;
- name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
- dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
+ name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
+ dn->full_name = kstrdup(name, GFP_KERNEL);
if (!dn->full_name) {
kfree(dn);
return NULL;
@@ -148,7 +141,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
struct property *last_property = NULL;
struct cc_workarea *ccwa;
char *data_buf;
- const char *parent_path = parent->full_name;
int cc_token;
int rc = -1;
@@ -182,7 +174,7 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
break;
case NEXT_SIBLING:
- dn = dlpar_parse_cc_node(ccwa, parent_path);
+ dn = dlpar_parse_cc_node(ccwa);
if (!dn)
goto cc_error;
@@ -192,10 +184,7 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
break;
case NEXT_CHILD:
- if (first_dn)
- parent_path = last_dn->full_name;
-
- dn = dlpar_parse_cc_node(ccwa, parent_path);
+ dn = dlpar_parse_cc_node(ccwa);
if (!dn)
goto cc_error;
@@ -226,7 +215,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
case PREV_PARENT:
last_dn = last_dn->parent;
- parent_path = last_dn->parent->full_name;
break;
case CALL_AGAIN:
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 296c188fd5ca..f24d8159c9e1 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -33,7 +33,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
if (!np)
goto out_err;
- np->full_name = kstrdup(path, GFP_KERNEL);
+ np->full_name = kstrdup(kbasename(path), GFP_KERNEL);
if (!np->full_name)
goto out_err;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
new file mode 100644
index 000000000000..2c6adf12713a
--- /dev/null
+++ b/arch/riscv/Kconfig
@@ -0,0 +1,310 @@
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+
+config RISCV
+ def_bool y
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_IRQ
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_WANT_FRAME_POINTERS
+ select CLONE_BACKWARDS
+ select COMMON_CLK
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CPU_DEVICES
+ select GENERIC_IRQ_SHOW
+ select GENERIC_PCI_IOMAP
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_MEMBLOCK
+ select HAVE_DMA_API_DEBUG
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_GENERIC_DMA_COHERENT
+ select IRQ_DOMAIN
+ select NO_BOOTMEM
+ select RISCV_ISA_A if SMP
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select HAVE_ARCH_TRACEHOOK
+ select MODULES_USE_ELF_RELA if MODULES
+ select THREAD_INFO_IN_TASK
+ select RISCV_IRQ_INTC
+ select RISCV_TIMER
+
+config MMU
+ def_bool y
+
+# even on 32-bit, physical (and DMA) addresses are > 32-bits
+config ARCH_PHYS_ADDR_T_64BIT
+ def_bool y
+
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+
+config PAGE_OFFSET
+ hex
+ default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
+ default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
+ default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config GENERIC_BUG
+ def_bool y
+ depends on BUG
+ select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
+
+config GENERIC_BUG_RELATIVE_POINTERS
+ bool
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT
+ default 2
+
+config HAVE_KPROBES
+ def_bool n
+
+config DMA_NOOP_OPS
+ def_bool y
+
+menu "Platform type"
+
+choice
+ prompt "Base ISA"
+ default ARCH_RV64I
+ help
+ This selects the base ISA that this kernel will traget and must match
+ the target platform.
+
+config ARCH_RV32I
+ bool "RV32I"
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select 32BIT
+ select GENERIC_ASHLDI3
+ select GENERIC_ASHRDI3
+ select GENERIC_LSHRDI3
+
+config ARCH_RV64I
+ bool "RV64I"
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select 64BIT
+
+endchoice
+
+# We must be able to map all physical memory into the kernel, but the compiler
+# is still a bit more efficient when generating code if it's setup in a manner
+# such that it can only map 2GiB of memory.
+choice
+ prompt "Kernel Code Model"
+ default CMODEL_MEDLOW if 32BIT
+ default CMODEL_MEDANY if 64BIT
+
+ config CMODEL_MEDLOW
+ bool "medium low code model"
+ config CMODEL_MEDANY
+ bool "medium any code model"
+endchoice
+
+choice
+ prompt "Maximum Physical Memory"
+ default MAXPHYSMEM_2GB if 32BIT
+ default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
+ default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
+
+ config MAXPHYSMEM_2GB
+ bool "2GiB"
+ config MAXPHYSMEM_128GB
+ depends on 64BIT && CMODEL_MEDANY
+ bool "128GiB"
+endchoice
+
+
+config SMP
+ bool "Symmetric Multi-Processing"
+ help
+ This enables support for systems with more than one CPU. If
+ you say N here, the kernel will run on single and
+ multiprocessor machines, but will use only one CPU of a
+ multiprocessor machine. If you say Y here, the kernel will run
+ on many, but not all, single processor machines. On a single
+ processor machine, the kernel will run faster if you say N
+ here.
+
+ If you don't know what to do here, say N.
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ depends on SMP
+ default "8"
+
+config CPU_SUPPORTS_32BIT_KERNEL
+ bool
+config CPU_SUPPORTS_64BIT_KERNEL
+ bool
+
+choice
+ prompt "CPU Tuning"
+ default TUNE_GENERIC
+
+config TUNE_GENERIC
+ bool "generic"
+
+endchoice
+
+config RISCV_ISA_C
+ bool "Emit compressed instructions when building Linux"
+ default y
+ help
+ Adds "C" to the ISA subsets that the toolchain is allowed to emit
+ when building Linux, which results in compressed instructions in the
+ Linux binary.
+
+ If you don't know what to do here, say Y.
+
+config RISCV_ISA_A
+ def_bool y
+
+endmenu
+
+menu "Kernel type"
+
+choice
+ prompt "Kernel code model"
+ default 64BIT
+
+config 32BIT
+ bool "32-bit kernel"
+ depends on CPU_SUPPORTS_32BIT_KERNEL
+ help
+ Select this option to build a 32-bit kernel.
+
+config 64BIT
+ bool "64-bit kernel"
+ depends on CPU_SUPPORTS_64BIT_KERNEL
+ help
+ Select this option to build a 64-bit kernel.
+
+endchoice
+
+source "mm/Kconfig"
+
+source "kernel/Kconfig.preempt"
+
+source "kernel/Kconfig.hz"
+
+endmenu
+
+menu "Bus support"
+
+config PCI
+ bool "PCI support"
+ select PCI_MSI
+ help
+ This feature enables support for PCI bus system. If you say Y
+ here, the kernel will include drivers and infrastructure code
+ to support PCI bus devices.
+
+ If you don't know what to do here, say Y.
+
+config PCI_DOMAINS
+ def_bool PCI
+
+config PCI_DOMAINS_GENERIC
+ def_bool PCI
+
+source "drivers/pci/Kconfig"
+
+endmenu
+
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+menu "Executable file formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+menu "Power management options"
+
+source kernel/power/Kconfig
+
+endmenu
+
+source "net/Kconfig"
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+menu "Kernel hacking"
+
+config CMDLINE_BOOL
+ bool "Built-in kernel command line"
+ help
+ For most platforms, it is firmware or second stage bootloader
+ that by default specifies the kernel command line options.
+ However, it might be necessary or advantageous to either override
+ the default kernel command line or add a few extra options to it.
+ For such cases, this option allows hardcoding command line options
+ directly into the kernel.
+
+ For that, choose 'Y' here and fill in the extra boot parameters
+ in CONFIG_CMDLINE.
+
+ The built-in options will be concatenated to the default command
+ line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default
+ command line will be ignored and replaced by the built-in string.
+
+config CMDLINE
+ string "Built-in kernel command string"
+ depends on CMDLINE_BOOL
+ default ""
+ help
+ Supply command-line options at build time by entering them here.
+
+config CMDLINE_OVERRIDE
+ bool "Built-in command line overrides bootloader arguments"
+ depends on CMDLINE_BOOL
+ help
+ Set this option to 'Y' to have the kernel ignore the bootloader
+ or firmware command line. Instead, the built-in command line
+ will be used exclusively.
+
+ If you don't know what to do here, say N.
+
+config EARLY_PRINTK
+ def_bool y
+
+source "lib/Kconfig.debug"
+
+config CMDLINE_BOOL
+ bool
+endmenu
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
new file mode 100644
index 000000000000..6719dd30ec5b
--- /dev/null
+++ b/arch/riscv/Makefile
@@ -0,0 +1,72 @@
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+LDFLAGS :=
+OBJCOPYFLAGS := -O binary
+LDFLAGS_vmlinux :=
+KBUILD_AFLAGS_MODULE += -fPIC
+KBUILD_CFLAGS_MODULE += -fPIC
+
+KBUILD_DEFCONFIG = defconfig
+
+export BITS
+ifeq ($(CONFIG_ARCH_RV64I),y)
+ BITS := 64
+ UTS_MACHINE := riscv64
+
+ KBUILD_CFLAGS += -mabi=lp64
+ KBUILD_AFLAGS += -mabi=lp64
+ KBUILD_MARCH = rv64im
+ LDFLAGS += -melf64lriscv
+else
+ BITS := 32
+ UTS_MACHINE := riscv32
+
+ KBUILD_CFLAGS += -mabi=ilp32
+ KBUILD_AFLAGS += -mabi=ilp32
+ KBUILD_MARCH = rv32im
+ LDFLAGS += -melf32lriscv
+endif
+
+KBUILD_CFLAGS += -Wall
+
+ifeq ($(CONFIG_RISCV_ISA_A),y)
+ KBUILD_ARCH_A = a
+endif
+ifeq ($(CONFIG_RISCV_ISA_C),y)
+ KBUILD_ARCH_C = c
+endif
+
+KBUILD_AFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)fd$(KBUILD_ARCH_C)
+
+KBUILD_CFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)$(KBUILD_ARCH_C)
+KBUILD_CFLAGS += -mno-save-restore
+KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
+
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
+ KBUILD_CFLAGS += -mcmodel=medlow
+endif
+ifeq ($(CONFIG_CMODEL_MEDANY),y)
+ KBUILD_CFLAGS += -mcmodel=medany
+endif
+
+# GCC versions that support the "-mstrict-align" option default to allowing
+# unaligned accesses. While unaligned accesses are explicitly allowed in the
+# RISC-V ISA, they're emulated by machine mode traps on all extant
+# architectures. It's faster to have GCC emit only aligned accesses.
+KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+
+head-y := arch/riscv/kernel/head.o
+
+core-y += arch/riscv/kernel/ arch/riscv/mm/
+
+libs-y += arch/riscv/lib/
+
+all: vmlinux
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/riscv/configs/defconfig
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
new file mode 100644
index 000000000000..18158be62a2b
--- /dev/null
+++ b/arch/riscv/include/asm/Kbuild
@@ -0,0 +1,61 @@
+generic-y += bugs.h
+generic-y += cacheflush.h
+generic-y += checksum.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += dma-contiguous.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += exec.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hash.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += mm-arch-hooks.h
+generic-y += mman.h
+generic-y += module.h
+generic-y += msgbuf.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += preempt.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += sembuf.h
+generic-y += setup.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += unaligned.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += vmlinux.lds.h
+generic-y += xor.h
diff --git a/arch/riscv/include/asm/asm-offsets.h b/arch/riscv/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/riscv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
new file mode 100644
index 000000000000..6cbbb6a68d76
--- /dev/null
+++ b/arch/riscv/include/asm/asm.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_ASM_H
+#define _ASM_RISCV_ASM_H
+
+#ifdef __ASSEMBLY__
+#define __ASM_STR(x) x
+#else
+#define __ASM_STR(x) #x
+#endif
+
+#if __riscv_xlen == 64
+#define __REG_SEL(a, b) __ASM_STR(a)
+#elif __riscv_xlen == 32
+#define __REG_SEL(a, b) __ASM_STR(b)
+#else
+#error "Unexpected __riscv_xlen"
+#endif
+
+#define REG_L __REG_SEL(ld, lw)
+#define REG_S __REG_SEL(sd, sw)
+#define SZREG __REG_SEL(8, 4)
+#define LGREG __REG_SEL(3, 2)
+
+#if __SIZEOF_POINTER__ == 8
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .dword
+#define RISCV_SZPTR 8
+#define RISCV_LGPTR 3
+#else
+#define RISCV_PTR ".dword"
+#define RISCV_SZPTR "8"
+#define RISCV_LGPTR "3"
+#endif
+#elif __SIZEOF_POINTER__ == 4
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .word
+#define RISCV_SZPTR 4
+#define RISCV_LGPTR 2
+#else
+#define RISCV_PTR ".word"
+#define RISCV_SZPTR "4"
+#define RISCV_LGPTR "2"
+#endif
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#if (__SIZEOF_INT__ == 4)
+#define INT __ASM_STR(.word)
+#define SZINT __ASM_STR(4)
+#define LGINT __ASM_STR(2)
+#else
+#error "Unexpected __SIZEOF_INT__"
+#endif
+
+#if (__SIZEOF_SHORT__ == 2)
+#define SHORT __ASM_STR(.half)
+#define SZSHORT __ASM_STR(2)
+#define LGSHORT __ASM_STR(1)
+#else
+#error "Unexpected __SIZEOF_SHORT__"
+#endif
+
+#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
new file mode 100644
index 000000000000..e2e37c57cbeb
--- /dev/null
+++ b/arch/riscv/include/asm/atomic.h
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_RISCV_ATOMIC_H
+#define _ASM_RISCV_ATOMIC_H
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+# include <asm-generic/atomic64.h>
+#else
+# if (__riscv_xlen < 64)
+# error "64-bit atomics require XLEN to be at least 64"
+# endif
+#endif
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define ATOMIC_INIT(i) { (i) }
+static __always_inline int atomic_read(const atomic_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void atomic_set(atomic_t *v, int i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC64_INIT(i) { (i) }
+static __always_inline long atomic64_read(const atomic64_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void atomic64_set(atomic64_t *v, long i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+#endif
+
+/*
+ * First, the atomic ops that have no ordering constraints and therefor don't
+ * have the AQ or RL bits set. These don't return anything, so there's only
+ * one version to worry about.
+ */
+#define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \
+static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ __asm__ __volatile__ ( \
+ "amo" #asm_op "." #asm_type " zero, %1, %0" \
+ : "+A" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_OP (op, asm_op, c_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \
+ ATOMIC_OP (op, asm_op, c_op, I, d, long, 64)
+#endif
+
+ATOMIC_OPS(add, add, +, i)
+ATOMIC_OPS(sub, add, +, -i)
+ATOMIC_OPS(and, and, &, i)
+ATOMIC_OPS( or, or, |, i)
+ATOMIC_OPS(xor, xor, ^, i)
+
+#undef ATOMIC_OP
+#undef ATOMIC_OPS
+
+/*
+ * Atomic ops that have ordered, relaxed, acquire, and relese variants.
+ * There's two flavors of these: the arithmatic ops have both fetch and return
+ * versions, while the logical ops only have fetch versions.
+ */
+#define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
+static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \
+{ \
+ register c_type ret; \
+ __asm__ __volatile__ ( \
+ "amo" #asm_op "." #asm_type #asm_or " %1, %2, %0" \
+ : "+A" (v->counter), "=r" (ret) \
+ : "r" (I) \
+ : "memory"); \
+ return ret; \
+}
+
+#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \
+static __always_inline c_type atomic##prefix##_##op##_return##c_or(c_type i, atomic##prefix##_t *v) \
+{ \
+ return atomic##prefix##_fetch_##op##c_or(i, v) c_op I; \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
+ ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
+ ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
+ ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
+#endif
+
+ATOMIC_OPS(add, add, +, i, , _relaxed)
+ATOMIC_OPS(add, add, +, i, .aq , _acquire)
+ATOMIC_OPS(add, add, +, i, .rl , _release)
+ATOMIC_OPS(add, add, +, i, .aqrl, )
+
+ATOMIC_OPS(sub, add, +, -i, , _relaxed)
+ATOMIC_OPS(sub, add, +, -i, .aq , _acquire)
+ATOMIC_OPS(sub, add, +, -i, .rl , _release)
+ATOMIC_OPS(sub, add, +, -i, .aqrl, )
+
+#undef ATOMIC_OPS
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
+ ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \
+ ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \
+ ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64)
+#endif
+
+ATOMIC_OPS(and, and, &, i, , _relaxed)
+ATOMIC_OPS(and, and, &, i, .aq , _acquire)
+ATOMIC_OPS(and, and, &, i, .rl , _release)
+ATOMIC_OPS(and, and, &, i, .aqrl, )
+
+ATOMIC_OPS( or, or, |, i, , _relaxed)
+ATOMIC_OPS( or, or, |, i, .aq , _acquire)
+ATOMIC_OPS( or, or, |, i, .rl , _release)
+ATOMIC_OPS( or, or, |, i, .aqrl, )
+
+ATOMIC_OPS(xor, xor, ^, i, , _relaxed)
+ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire)
+ATOMIC_OPS(xor, xor, ^, i, .rl , _release)
+ATOMIC_OPS(xor, xor, ^, i, .aqrl, )
+
+#undef ATOMIC_OPS
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+
+/*
+ * The extra atomic operations that are constructed from one of the core
+ * AMO-based operations above (aside from sub, which is easier to fit above).
+ * These are required to perform a barrier, but they're OK this way because
+ * atomic_*_return is also required to perform a barrier.
+ */
+#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
+static __always_inline bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, func_op, comp_op, I) \
+ ATOMIC_OP (op, func_op, comp_op, I, int, )
+#else
+#define ATOMIC_OPS(op, func_op, comp_op, I) \
+ ATOMIC_OP (op, func_op, comp_op, I, int, ) \
+ ATOMIC_OP (op, func_op, comp_op, I, long, 64)
+#endif
+
+ATOMIC_OPS(add_and_test, add, ==, 0)
+ATOMIC_OPS(sub_and_test, sub, ==, 0)
+ATOMIC_OPS(add_negative, add, <, 0)
+
+#undef ATOMIC_OP
+#undef ATOMIC_OPS
+
+#define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \
+static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \
+{ \
+ atomic##prefix##_##func_op(I, v); \
+}
+
+#define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \
+static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
+{ \
+ return atomic##prefix##_fetch_##func_op(I, v); \
+}
+
+#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
+static __always_inline c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
+{ \
+ return atomic##prefix##_fetch_##op(v) c_op I; \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_OP (op, asm_op, c_op, I, int, ) \
+ ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_OP (op, asm_op, c_op, I, int, ) \
+ ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
+ ATOMIC_OP (op, asm_op, c_op, I, long, 64) \
+ ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
+#endif
+
+ATOMIC_OPS(inc, add, +, 1)
+ATOMIC_OPS(dec, add, +, -1)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+
+#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
+static __always_inline bool atomic##prefix##_##op(atomic##prefix##_t *v) \
+{ \
+ return atomic##prefix##_##func_op##_return(v) comp_op I; \
+}
+
+ATOMIC_OP(inc_and_test, inc, ==, 0, )
+ATOMIC_OP(dec_and_test, dec, ==, 0, )
+#ifndef CONFIG_GENERIC_ATOMIC64
+ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
+ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
+#endif
+
+#undef ATOMIC_OP
+
+/* This is required to provide a barrier on success. */
+static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0:\n\t"
+ "lr.w.aqrl %[p], %[c]\n\t"
+ "beq %[p], %[u], 1f\n\t"
+ "add %[rc], %[p], %[a]\n\t"
+ "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
+ "bnez %[rc], 0b\n\t"
+ "1:"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long prev, rc;
+
+ __asm__ __volatile__ (
+ "0:\n\t"
+ "lr.d.aqrl %[p], %[c]\n\t"
+ "beq %[p], %[u], 1f\n\t"
+ "add %[rc], %[p], %[a]\n\t"
+ "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
+ "bnez %[rc], 0b\n\t"
+ "1:"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+
+static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ return __atomic64_add_unless(v, a, u) != u;
+}
+#endif
+
+/*
+ * The extra atomic operations that are constructed from one of the core
+ * LR/SC-based operations above.
+ */
+static __always_inline int atomic_inc_not_zero(atomic_t *v)
+{
+ return __atomic_add_unless(v, 1, 0);
+}
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
+{
+ return atomic64_add_unless(v, 1, 0);
+}
+#endif
+
+/*
+ * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
+ * {cmp,}xchg and the operations that return, so they need a barrier. We just
+ * use the other implementations directly.
+ */
+#define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \
+static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \
+{ \
+ return __cmpxchg(&(v->counter), o, n, size, asm_or, asm_or); \
+} \
+static __always_inline c_t atomic##prefix##_xchg##c_or(atomic##prefix##_t *v, c_t n) \
+{ \
+ return __xchg(n, &(v->counter), size, asm_or); \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(c_or, asm_or) \
+ ATOMIC_OP( int, , c_or, 4, asm_or)
+#else
+#define ATOMIC_OPS(c_or, asm_or) \
+ ATOMIC_OP( int, , c_or, 4, asm_or) \
+ ATOMIC_OP(long, 64, c_or, 8, asm_or)
+#endif
+
+ATOMIC_OPS( , .aqrl)
+ATOMIC_OPS(_acquire, .aq)
+ATOMIC_OPS(_release, .rl)
+ATOMIC_OPS(_relaxed, )
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP
+
+static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0:\n\t"
+ "lr.w.aqrl %[p], %[c]\n\t"
+ "sub %[rc], %[p], %[o]\n\t"
+ "bltz %[rc], 1f\n\t"
+ "sc.w.aqrl %[rc], %[rc], %[c]\n\t"
+ "bnez %[rc], 0b\n\t"
+ "1:"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [o]"r" (offset)
+ : "memory");
+ return prev - offset;
+}
+
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
+{
+ long prev, rc;
+
+ __asm__ __volatile__ (
+ "0:\n\t"
+ "lr.d.aqrl %[p], %[c]\n\t"
+ "sub %[rc], %[p], %[o]\n\t"
+ "bltz %[rc], 1f\n\t"
+ "sc.d.aqrl %[rc], %[rc], %[c]\n\t"
+ "bnez %[rc], 0b\n\t"
+ "1:"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [o]"r" (offset)
+ : "memory");
+ return prev - offset;
+}
+
+#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
+#endif
+
+#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
new file mode 100644
index 000000000000..183534b7c39b
--- /dev/null
+++ b/arch/riscv/include/asm/barrier.h
@@ -0,0 +1,68 @@
+/*
+ * Based on arch/arm/include/asm/barrier.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2013 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_RISCV_BARRIER_H
+#define _ASM_RISCV_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define RISCV_FENCE(p, s) \
+ __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
+
+/* These barriers need to enforce ordering on both devices or memory. */
+#define mb() RISCV_FENCE(iorw,iorw)
+#define rmb() RISCV_FENCE(ir,ir)
+#define wmb() RISCV_FENCE(ow,ow)
+
+/* These barriers do not need to enforce ordering on devices, just memory. */
+#define smp_mb() RISCV_FENCE(rw,rw)
+#define smp_rmb() RISCV_FENCE(r,r)
+#define smp_wmb() RISCV_FENCE(w,w)
+
+/*
+ * These fences exist to enforce ordering around the relaxed AMOs. The
+ * documentation defines that
+ * "
+ * atomic_fetch_add();
+ * is equivalent to:
+ * smp_mb__before_atomic();
+ * atomic_fetch_add_relaxed();
+ * smp_mb__after_atomic();
+ * "
+ * So we emit full fences on both sides.
+ */
+#define __smb_mb__before_atomic() smp_mb()
+#define __smb_mb__after_atomic() smp_mb()
+
+/*
+ * These barriers prevent accesses performed outside a spinlock from being moved
+ * inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only
+ * enforce release consistency, we need full fences here.
+ */
+#define smb_mb__before_spinlock() smp_mb()
+#define smb_mb__after_spinlock() smp_mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_BARRIER_H */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
new file mode 100644
index 000000000000..7c281ef1d583
--- /dev/null
+++ b/arch/riscv/include/asm/bitops.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_BITOPS_H
+#define _ASM_RISCV_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error "Only <linux/bitops.h> can be included directly"
+#endif /* _LINUX_BITOPS_H */
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/bitsperlong.h>
+
+#ifndef smp_mb__before_clear_bit
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+#endif /* smp_mb__before_clear_bit */
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+
+#include <asm-generic/bitops/hweight.h>
+
+#if (BITS_PER_LONG == 64)
+#define __AMO(op) "amo" #op ".d"
+#elif (BITS_PER_LONG == 32)
+#define __AMO(op) "amo" #op ".w"
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
+({ \
+ unsigned long __res, __mask; \
+ __mask = BIT_MASK(nr); \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " %0, %2, %1" \
+ : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(__mask)) \
+ : "memory"); \
+ ((__res & __mask) != 0); \
+})
+
+#define __op_bit_ord(op, mod, nr, addr, ord) \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " zero, %1, %0" \
+ : "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(BIT_MASK(nr))) \
+ : "memory");
+
+#define __test_and_op_bit(op, mod, nr, addr) \
+ __test_and_op_bit_ord(op, mod, nr, addr, )
+#define __op_bit(op, mod, nr, addr) \
+ __op_bit_ord(op, mod, nr, addr, )
+
+/* Bitmask modifiers */
+#define __NOP(x) (x)
+#define __NOT(x) (~(x))
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation may be reordered on other architectures than x86.
+ */
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation can be reordered on other architectures other than x86.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ */
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() may be reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+static inline int test_and_set_bit_lock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
+}
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ __op_bit_ord(and, __NOT, nr, addr, .rl);
+}
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ *
+ * On RISC-V systems there seems to be no benefit to taking advantage of the
+ * non-atomic property here: it's a lot more instructions and we still have to
+ * provide release semantics anyway.
+ */
+static inline void __clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ clear_bit_unlock(nr, addr);
+}
+
+#undef __test_and_op_bit
+#undef __op_bit
+#undef __NOP
+#undef __NOT
+#undef __AMO
+
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* _ASM_RISCV_BITOPS_H */
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
new file mode 100644
index 000000000000..c3e13764a943
--- /dev/null
+++ b/arch/riscv/include/asm/bug.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_BUG_H
+#define _ASM_RISCV_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#include <asm/asm.h>
+
+#ifdef CONFIG_GENERIC_BUG
+#define __BUG_INSN _AC(0x00100073, UL) /* ebreak */
+
+#ifndef __ASSEMBLY__
+typedef u32 bug_insn_t;
+
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+#define __BUG_ENTRY_ADDR INT " 1b - 2b"
+#define __BUG_ENTRY_FILE INT " %0 - 2b"
+#else
+#define __BUG_ENTRY_ADDR RISCV_PTR " 1b"
+#define __BUG_ENTRY_FILE RISCV_PTR " %0"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR "\n\t" \
+ __BUG_ENTRY_FILE "\n\t" \
+ SHORT " %1"
+#else
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR
+#endif
+
+#define BUG() \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\n\t" \
+ "ebreak\n" \
+ ".pushsection __bug_table,\"a\"\n\t" \
+ "2:\n\t" \
+ __BUG_ENTRY "\n\t" \
+ ".org 2b + %2\n\t" \
+ ".popsection" \
+ : \
+ : "i" (__FILE__), "i" (__LINE__), \
+ "i" (sizeof(struct bug_entry))); \
+ unreachable(); \
+} while (0)
+#endif /* !__ASSEMBLY__ */
+#else /* CONFIG_GENERIC_BUG */
+#ifndef __ASSEMBLY__
+#define BUG() \
+do { \
+ __asm__ __volatile__ ("ebreak\n"); \
+ unreachable(); \
+} while (0)
+#endif /* !__ASSEMBLY__ */
+#endif /* CONFIG_GENERIC_BUG */
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs;
+struct task_struct;
+
+extern void die(struct pt_regs *regs, const char *str);
+extern void do_trap(struct pt_regs *regs, int signo, int code,
+ unsigned long addr, struct task_struct *tsk);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_BUG_H */
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
new file mode 100644
index 000000000000..e8f0d1110d74
--- /dev/null
+++ b/arch/riscv/include/asm/cache.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_CACHE_H
+#define _ASM_RISCV_CACHE_H
+
+#define L1_CACHE_SHIFT 6
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#endif /* _ASM_RISCV_CACHE_H */
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
new file mode 100644
index 000000000000..0595585013b0
--- /dev/null
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_CACHEFLUSH_H
+#define _ASM_RISCV_CACHEFLUSH_H
+
+#include <asm-generic/cacheflush.h>
+
+#undef flush_icache_range
+#undef flush_icache_user_range
+
+static inline void local_flush_icache_all(void)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
+
+#ifndef CONFIG_SMP
+
+#define flush_icache_range(start, end) local_flush_icache_all()
+#define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all()
+
+#else /* CONFIG_SMP */
+
+#define flush_icache_range(start, end) sbi_remote_fence_i(0)
+#define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0)
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..db249dbc7b97
--- /dev/null
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_CMPXCHG_H
+#define _ASM_RISCV_CMPXCHG_H
+
+#include <linux/bug.h>
+
+#include <asm/barrier.h>
+
+#define __xchg(new, ptr, size, asm_or) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "amoswap.w" #asm_or " %0, %2, %1" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "amoswap.d" #asm_or " %0, %2, %1" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)), .aqrl))
+
+#define xchg32(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ xchg((ptr), (x)); \
+})
+
+#define xchg64(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ xchg((ptr), (x)); \
+})
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __cmpxchg(ptr, old, new, size, lrb, scb) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0:" \
+ "lr.w" #scb " %0, %2\n" \
+ "bne %0, %z3, 1f\n" \
+ "sc.w" #lrb " %1, %z4, %2\n" \
+ "bnez %1, 0b\n" \
+ "1:" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0:" \
+ "lr.d" #scb " %0, %2\n" \
+ "bne %0, %z3, 1f\n" \
+ "sc.d" #lrb " %1, %z4, %2\n" \
+ "bnez %1, 0b\n" \
+ "1:" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg(ptr, o, n) \
+ (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), .aqrl, .aqrl))
+
+#define cmpxchg_local(ptr, o, n) \
+ (__cmpxchg((ptr), (o), (n), sizeof(*(ptr)), , ))
+
+#define cmpxchg32(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#define cmpxchg32_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ cmpxchg_local((ptr), (o), (n)); \
+})
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+})
+
+#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
new file mode 100644
index 000000000000..044aecff8854
--- /dev/null
+++ b/arch/riscv/include/asm/compat.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_COMPAT_H
+#define __ASM_COMPAT_H
+#ifdef CONFIG_COMPAT
+
+#if defined(CONFIG_64BIT)
+#define COMPAT_UTS_MACHINE "riscv64\0\0"
+#elif defined(CONFIG_32BIT)
+#define COMPAT_UTS_MACHINE "riscv32\0\0"
+#else
+#error "Unknown RISC-V base ISA"
+#endif
+
+#endif /*CONFIG_COMPAT*/
+#endif /*__ASM_COMPAT_H*/
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
new file mode 100644
index 000000000000..0d64bc9f4f91
--- /dev/null
+++ b/arch/riscv/include/asm/csr.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_CSR_H
+#define _ASM_RISCV_CSR_H
+
+#include <linux/const.h>
+
+/* Status register flags */
+#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */
+#define SR_PIE _AC(0x00000020, UL) /* Previous IE */
+#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
+
+#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF _AC(0x00000000, UL)
+#define SR_FS_INITIAL _AC(0x00002000, UL)
+#define SR_FS_CLEAN _AC(0x00004000, UL)
+#define SR_FS_DIRTY _AC(0x00006000, UL)
+
+#define SR_XS _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF _AC(0x00000000, UL)
+#define SR_XS_INITIAL _AC(0x00008000, UL)
+#define SR_XS_CLEAN _AC(0x00010000, UL)
+#define SR_XS_DIRTY _AC(0x00018000, UL)
+
+#ifndef CONFIG_64BIT
+#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
+#else
+#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
+#endif
+
+/* SPTBR flags */
+#if __riscv_xlen == 32
+#define SPTBR_PPN _AC(0x003FFFFF, UL)
+#define SPTBR_MODE_32 _AC(0x80000000, UL)
+#define SPTBR_MODE SPTBR_MODE_32
+#else
+#define SPTBR_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SPTBR_MODE_39 _AC(0x8000000000000000, UL)
+#define SPTBR_MODE SPTBR_MODE_39
+#endif
+
+/* Interrupt Enable and Interrupt Pending flags */
+#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */
+#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */
+
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+
+#ifndef __ASSEMBLY__
+
+#define csr_swap(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrw %0, " #csr ", %1" \
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, " #csr \
+ : "=r" (__v) : \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_write(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrw " #csr ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrs %0, " #csr ", %1" \
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrs " #csr ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrc %0, " #csr ", %1" \
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrc " #csr ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CSR_H */
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
new file mode 100644
index 000000000000..2cf6336ef600
--- /dev/null
+++ b/arch/riscv/include/asm/current.h
@@ -0,0 +1,45 @@
+/*
+ * Based on arm/arm64/include/asm/current.h
+ *
+ * Copyright (C) 2016 ARM
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+/*
+ * This only works because "struct thread_info" is at offset 0 from "struct
+ * task_struct". This constraint seems to be necessary on other architectures
+ * as well, but __switch_to enforces it. We can't check TASK_TI here because
+ * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
+ * task_struct" here due to some header ordering problems.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+ register struct task_struct *tp __asm__("tp");
+ return tp;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
diff --git a/arch/riscv/include/asm/delay.h b/arch/riscv/include/asm/delay.h
new file mode 100644
index 000000000000..cbb0c9eb96cb
--- /dev/null
+++ b/arch/riscv/include/asm/delay.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2016 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_DELAY_H
+#define _ASM_RISCV_DELAY_H
+
+extern unsigned long riscv_timebase;
+
+#define udelay udelay
+extern void udelay(unsigned long usecs);
+
+#define ndelay ndelay
+extern void ndelay(unsigned long nsecs);
+
+extern void __delay(unsigned long cycles);
+
+#endif /* _ASM_RISCV_DELAY_H */
diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..3eec1000196d
--- /dev/null
+++ b/arch/riscv/include/asm/dma-mapping.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2003-2004 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2016 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_RISCV_DMA_MAPPING_H
+#define __ASM_RISCV_DMA_MAPPING_H
+
+/* Use ops->dma_mapping_error (if it exists) or assume success */
+// #undef DMA_ERROR_CODE
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return &dma_noop_ops;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+#endif /* __ASM_RISCV_DMA_MAPPING_H */
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
new file mode 100644
index 000000000000..a1ef503d616e
--- /dev/null
+++ b/arch/riscv/include/asm/elf.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_RISCV_ELF_H
+#define _ASM_RISCV_ELF_H
+
+#include <uapi/asm/elf.h>
+#include <asm/auxvec.h>
+#include <asm/byteorder.h>
+
+/* TODO: Move definition into include/uapi/linux/elf-em.h */
+#define EM_RISCV 0xF3
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_ARCH EM_RISCV
+
+#ifdef CONFIG_64BIT
+#define ELF_CLASS ELFCLASS64
+#else
+#define ELF_CLASS ELFCLASS32
+#endif
+
+#if defined(__LITTLE_ENDIAN)
+#define ELF_DATA ELFDATA2LSB
+#elif defined(__BIG_ENDIAN)
+#define ELF_DATA ELFDATA2MSB
+#else
+#error "Unknown endianness"
+#endif
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_RISCV)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+#define ELF_HWCAP (elf_hwcap)
+extern unsigned long elf_hwcap;
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)current->mm->context.vdso); \
+} while (0)
+
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+
+#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
new file mode 100644
index 000000000000..8a4ed7bbcbea
--- /dev/null
+++ b/arch/riscv/include/asm/hwcap.h
@@ -0,0 +1,37 @@
+/*
+ * Copied from arch/arm64/include/asm/hwcap.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_HWCAP_H
+#define __ASM_HWCAP_H
+
+#include <uapi/asm/hwcap.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (elf_hwcap)
+
+enum {
+ CAP_HWCAP = 1,
+};
+
+extern unsigned long elf_hwcap;
+#endif
+#endif
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
new file mode 100644
index 000000000000..c1f32cfcc79b
--- /dev/null
+++ b/arch/riscv/include/asm/io.h
@@ -0,0 +1,303 @@
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_IO_H
+#define _ASM_RISCV_IO_H
+
+#ifdef CONFIG_MMU
+
+extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
+
+/*
+ * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
+ * change the properties of memory regions. This should be fixed by the
+ * upcoming platform spec.
+ */
+#define ioremap_nocache(addr, size) ioremap((addr), (size))
+#define ioremap_wc(addr, size) ioremap((addr), (size))
+#define ioremap_wt(addr, size) ioremap((addr), (size))
+
+extern void iounmap(void __iomem *addr);
+
+#endif /* CONFIG_MMU */
+
+/* Generic IO read/write. These perform native-endian accesses. */
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+{
+ asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+#endif
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+#endif
+
+/*
+ * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
+ * the ordering with I/O on spinlocks like PowerPC does. The worry is that
+ * drivers won't get this correct, but I also don't want to introduce a fence
+ * into the lock code that otherwise only uses AMOs (and is essentially defined
+ * by the ISA to be correct). For now I'm leaving this here: "o,w" is
+ * sufficient to ensure that all writes to the device have completed before the
+ * write to the spinlock is allowed to commit. I surmised this from reading
+ * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
+ */
+#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+
+/*
+ * Unordered I/O memory access primitives. These are even more relaxed than
+ * the relaxed versions, as they don't even order accesses between successive
+ * operations to the I/O regions.
+ */
+#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writeb_cpu(v,c) ((void)__raw_writeb((v),(c)))
+#define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
+#define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
+
+#ifdef CONFIG_64BIT
+#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
+#define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
+#endif
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses. These are defined to order the indicated access (either a read or
+ * write) with all other I/O memory accesses. Since the platform specification
+ * defines that all I/O regions are strongly ordered on channel 2, no explicit
+ * fences are required to enforce this ordering.
+ */
+/* FIXME: These are now the same as asm-generic */
+#define __io_rbr() do {} while (0)
+#define __io_rar() do {} while (0)
+#define __io_rbw() do {} while (0)
+#define __io_raw() do {} while (0)
+
+#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
+#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
+#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
+
+#define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); })
+#define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); })
+#define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); })
+
+#ifdef CONFIG_64BIT
+#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
+#define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); })
+#endif
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access. The memory barriers here are necessary as RISC-V
+ * doesn't define any ordering between the memory space and the I/O space.
+ */
+#define __io_br() do {} while (0)
+#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory");
+#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
+#define __io_aw() do {} while (0)
+
+#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(); __v; })
+#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(); __v; })
+#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(); __v; })
+
+#define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
+#define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
+#define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
+
+#ifdef CONFIG_64BIT
+#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(); __v; })
+#define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
+#endif
+
+/*
+ * Emulation routines for the port-mapped IO space used by some PCI drivers.
+ * These are defined as being "fully synchronous", but also "not guaranteed to
+ * be fully ordered with respect to other memory and I/O operations". We're
+ * going to be on the safe side here and just make them:
+ * - Fully ordered WRT each other, by bracketing them with two fences. The
+ * outer set contains both I/O so inX is ordered with outX, while the inner just
+ * needs the type of the access (I for inX and O for outX).
+ * - Ordered in the same manner as readX/writeX WRT memory by subsuming their
+ * fences.
+ * - Ordered WRT timer reads, so udelay and friends don't get elided by the
+ * implementation.
+ * Note that there is no way to actually enforce that outX is a non-posted
+ * operation on RISC-V, but hopefully the timer ordering constraint is
+ * sufficient to ensure this works sanely on controllers that support I/O
+ * writes.
+ */
+#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
+#define __io_par() __asm__ __volatile__ ("fence i,ior" : : : "memory");
+#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
+#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+
+#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
+#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
+#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; })
+
+#define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+#define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+#define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+
+#ifdef CONFIG_64BIT
+#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(); __v; })
+#define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
+#endif
+
+/*
+ * Accesses from a single hart to a single I/O address must be ordered. This
+ * allows us to use the raw read macros, but we still need to fence before and
+ * after the block to ensure ordering WRT other macros. These are defined to
+ * perform host-endian accesses so we use __raw instead of __cpu.
+ */
+#define __io_reads_ins(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(const volatile void __iomem *addr, \
+ void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ ctype *buf = buffer; \
+ \
+ do { \
+ ctype x = __raw_read ## len(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+#define __io_writes_outs(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(volatile void __iomem *addr, \
+ const void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ const ctype *buf = buffer; \
+ \
+ do { \
+ __raw_writeq(*buf++, addr); \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+__io_reads_ins(reads, u8, b, __io_br(), __io_ar())
+__io_reads_ins(reads, u16, w, __io_br(), __io_ar())
+__io_reads_ins(reads, u32, l, __io_br(), __io_ar())
+#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
+#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
+#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
+
+__io_reads_ins(ins, u8, b, __io_pbr(), __io_par())
+__io_reads_ins(ins, u16, w, __io_pbr(), __io_par())
+__io_reads_ins(ins, u32, l, __io_pbr(), __io_par())
+#define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count)
+#define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count)
+#define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count)
+
+__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
+__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
+__io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
+#define writesb(addr, buffer, count) __writesb(addr, buffer, count)
+#define writesw(addr, buffer, count) __writesw(addr, buffer, count)
+#define writesl(addr, buffer, count) __writesl(addr, buffer, count)
+
+__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
+#define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count)
+#define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count)
+#define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count)
+
+#ifdef CONFIG_64BIT
+__io_reads_ins(reads, u64, q, __io_br(), __io_ar())
+#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
+
+__io_reads_ins(ins, u64, q, __io_pbr(), __io_par())
+#define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count)
+
+__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
+#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
+
+__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
+#define outsq(addr, buffer, count) __outsq((void __iomem *)addr, buffer, count)
+#endif
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
new file mode 100644
index 000000000000..4dee9d4c13c0
--- /dev/null
+++ b/arch/riscv/include/asm/irq.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_IRQ_H
+#define _ASM_RISCV_IRQ_H
+
+#define NR_IRQS 0
+
+#define INTERRUPT_CAUSE_SOFTWARE 1
+#define INTERRUPT_CAUSE_TIMER 5
+#define INTERRUPT_CAUSE_EXTERNAL 9
+
+void riscv_timer_interrupt(void);
+
+#include <asm-generic/irq.h>
+
+#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
new file mode 100644
index 000000000000..6fdc860d7f84
--- /dev/null
+++ b/arch/riscv/include/asm/irqflags.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef _ASM_RISCV_IRQFLAGS_H
+#define _ASM_RISCV_IRQFLAGS_H
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/* read interrupt enabled status */
+static inline unsigned long arch_local_save_flags(void)
+{
+ return csr_read(sstatus);
+}
+
+/* unconditionally enable interrupts */
+static inline void arch_local_irq_enable(void)
+{
+ csr_set(sstatus, SR_IE);
+}
+
+/* unconditionally disable interrupts */
+static inline void arch_local_irq_disable(void)
+{
+ csr_clear(sstatus, SR_IE);
+}
+
+/* get status and disable interrupts */
+static inline unsigned long arch_local_irq_save(void)
+{
+ return csr_read_clear(sstatus, SR_IE);
+}
+
+/* test flags */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & SR_IE);
+}
+
+/* test hardware interrupt enable bit */
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+/* set interrupt enabled status */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ csr_set(sstatus, flags & SR_IE);
+}
+
+#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
new file mode 100644
index 000000000000..c7eb010d1528
--- /dev/null
+++ b/arch/riscv/include/asm/kprobes.h
@@ -0,0 +1,22 @@
+/*
+ * Copied from arch/arm64/include/asm/kprobes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _RISCV_KPROBES_H
+#define _RISCV_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#endif /* _RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/linkage.h b/arch/riscv/include/asm/linkage.h
new file mode 100644
index 000000000000..b7b304ca89c4
--- /dev/null
+++ b/arch/riscv/include/asm/linkage.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_LINKAGE_H
+#define _ASM_RISCV_LINKAGE_H
+
+#define __ALIGN .balign 4
+#define __ALIGN_STR ".balign 4"
+
+#endif /* _ASM_RISCV_LINKAGE_H */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
new file mode 100644
index 000000000000..66805cba9a27
--- /dev/null
+++ b/arch/riscv/include/asm/mmu.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef _ASM_RISCV_MMU_H
+#define _ASM_RISCV_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ void *vdso;
+} mm_context_t;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_MMU_H */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
new file mode 100644
index 000000000000..de1fc1631fc4
--- /dev/null
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_MMU_CONTEXT_H
+#define _ASM_RISCV_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/tlbflush.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *task)
+{
+}
+
+/* Initialize context-related info for a new mm_struct */
+static inline int init_new_context(struct task_struct *task,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+static inline pgd_t *current_pgdir(void)
+{
+ return pfn_to_virt(csr_read(sptbr) & SPTBR_PPN);
+}
+
+static inline void set_pgdir(pgd_t *pgd)
+{
+ csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next, struct task_struct *task)
+{
+ if (likely(prev != next)) {
+ set_pgdir(next->pgd);
+ local_flush_tlb_all();
+ }
+}
+
+static inline void activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+ switch_mm(prev, next, NULL);
+}
+
+static inline void deactivate_mm(struct task_struct *task,
+ struct mm_struct *mm)
+{
+}
+
+#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
new file mode 100644
index 000000000000..06cfbb3aacbb
--- /dev/null
+++ b/arch/riscv/include/asm/page.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PAGE_H
+#define _ASM_RISCV_PAGE_H
+
+#include <linux/pfn.h>
+#include <linux/const.h>
+
+#define PAGE_SHIFT (12)
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory.
+ * When not using MMU this corresponds to the first free page in
+ * physical memory (aligned on a page boundary).
+ */
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+
+#define KERN_VIRT_SIZE (-PAGE_OFFSET)
+
+#ifndef __ASSEMBLY__
+
+#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
+#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
+
+#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_user_page(vto, vfrom, vaddr, topg) \
+ memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * Use struct definitions to apply C type checking
+ */
+
+/* Page Global Directory entry */
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+
+/* Page Table entry */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#ifdef CONFIG_64BITS
+#define PTE_FMT "%016lx"
+#else
+#define PTE_FMT "%08lx"
+#endif
+
+extern unsigned long va_pa_offset;
+extern unsigned long pfn_base;
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+
+#define __pa(x) ((unsigned long)(x) - va_pa_offset)
+#define __va(x) ((void *)((unsigned long) (x) + va_pa_offset))
+
+#define phys_to_pfn(phys) (PFN_DOWN(phys))
+#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
+
+#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
+#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
+
+#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
+#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+
+#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
+#define page_to_bus(page) (page_to_phys(page))
+#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
+
+#define pfn_valid(pfn) \
+ (((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
+
+#define ARCH_PFN_OFFSET (pfn_base)
+
+#endif /* __ASSEMBLY__ */
+
+#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+/* vDSO support */
+/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
+#define __HAVE_ARCH_GATE_AREA
+
+#endif /* _ASM_RISCV_PAGE_H */
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
new file mode 100644
index 000000000000..0f2fc9ef20fc
--- /dev/null
+++ b/arch/riscv/include/asm/pci.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2016 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_RISCV_PCI_H
+#define __ASM_RISCV_PCI_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 0
+#define PCIBIOS_MIN_MEM 0
+
+/* RISC-V shim does not initialize PCI bus */
+#define pcibios_assign_all_busses() 1
+
+/* We do not have an IOMMU */
+#define PCI_DMA_BUS_IS_PHYS 1
+
+extern int isa_dma_bridge_buggy;
+
+#ifdef CONFIG_PCI
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ /* no legacy IRQ on risc-v */
+ return -ENODEV;
+}
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ /* always show the domain in /proc */
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __ASM_PCI_H */
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
new file mode 100644
index 000000000000..a79ed5faff3a
--- /dev/null
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PGALLOC_H
+#define _ASM_RISCV_PGALLOC_H
+
+#include <linux/mm.h>
+#include <asm/tlb.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ unsigned long pfn = virt_to_pfn(pte);
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+static inline void pmd_populate(struct mm_struct *mm,
+ pmd_t *pmd, pgtable_t pte)
+{
+ unsigned long pfn = virt_to_pfn(page_address(pte));
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ unsigned long pfn = virt_to_pfn(pmd);
+
+ set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd;
+
+ pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ if (likely(pgd != NULL)) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ /* Copy kernel mappings */
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return pgd;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long)pgd);
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return (pmd_t *)__get_free_page(
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ free_page((unsigned long)pmd);
+}
+
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return (pte_t *)__get_free_page(
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *pte;
+
+ pte = alloc_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
+ if (likely(pte != NULL))
+ pgtable_page_ctor(pte);
+ return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+
+#define __pte_free_tlb(tlb, pte, buf) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
+
+static inline void check_pgt_cache(void)
+{
+}
+
+#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
new file mode 100644
index 000000000000..d61974b74182
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_32_H
+#define _ASM_RISCV_PGTABLE_32_H
+
+#include <asm-generic/pgtable-nopmd.h>
+#include <linux/const.h>
+
+/* Size of region mapped by a page global directory */
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
new file mode 100644
index 000000000000..7aa0ea9bd8bb
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_64_H
+#define _ASM_RISCV_PGTABLE_64_H
+
+#include <linux/const.h>
+
+#define PGDIR_SHIFT 30
+/* Size of region mapped by a page global directory */
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+#define PMD_SHIFT 21
+/* Size of region mapped by a page middle directory */
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+
+/* Page Middle Directory entry */
+typedef struct {
+ unsigned long pmd;
+} pmd_t;
+
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+
+static inline int pud_present(pud_t pud)
+{
+ return (pud_val(pud) & _PAGE_PRESENT);
+}
+
+static inline int pud_none(pud_t pud)
+{
+ return (pud_val(pud) == 0);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return !pud_present(pud);
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+ *pudp = pud;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+}
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
+}
+
+#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
+
+static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
+{
+ return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+
+#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
new file mode 100644
index 000000000000..997ddbb1d370
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_BITS_H
+#define _ASM_RISCV_PGTABLE_BITS_H
+
+/*
+ * PTE format:
+ * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * PFN reserved for SW D A G U X W R V
+ */
+
+#define _PAGE_ACCESSED_OFFSET 6
+
+#define _PAGE_PRESENT (1 << 0)
+#define _PAGE_READ (1 << 1) /* Readable */
+#define _PAGE_WRITE (1 << 2) /* Writable */
+#define _PAGE_EXEC (1 << 3) /* Executable */
+#define _PAGE_USER (1 << 4) /* User */
+#define _PAGE_GLOBAL (1 << 5) /* Global */
+#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
+#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
+#define _PAGE_SOFT (1 << 8) /* Reserved for software */
+
+#define _PAGE_SPECIAL _PAGE_SOFT
+#define _PAGE_TABLE _PAGE_PRESENT
+
+#define _PAGE_PFN_SHIFT 10
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL))
+
+/* Advertise support for _PAGE_SPECIAL */
+#define __HAVE_ARCH_PTE_SPECIAL
+
+#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
new file mode 100644
index 000000000000..3399257780b2
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_H
+#define _ASM_RISCV_PGTABLE_H
+
+#include <linux/mmzone.h>
+
+#include <asm/pgtable-bits.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_MMU
+
+/* Page Upper Directory not used in RISC-V */
+#include <asm-generic/pgtable-nopud.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include <linux/mm_types.h>
+
+#ifdef CONFIG_64BIT
+#include <asm/pgtable-64.h>
+#else
+#include <asm/pgtable-32.h>
+#endif /* CONFIG_64BIT */
+
+/* Number of entries in the page global directory */
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+/* Number of entries in the page table */
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+
+/* Number of PGD entries that a user-mode program can use */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0
+
+/* Page protection bits */
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
+
+#define PAGE_NONE __pgprot(0)
+#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
+ _PAGE_EXEC | _PAGE_WRITE)
+
+#define PAGE_COPY PAGE_READ
+#define PAGE_COPY_EXEC PAGE_EXEC
+#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
+#define PAGE_SHARED PAGE_WRITE
+#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
+
+#define _PAGE_KERNEL (_PAGE_READ \
+ | _PAGE_WRITE \
+ | _PAGE_PRESENT \
+ | _PAGE_ACCESSED \
+ | _PAGE_DIRTY)
+
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
+
+extern pgd_t swapper_pg_dir[];
+
+/* MAP_PRIVATE permissions: xwr (copy-on-write) */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READ
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_EXEC
+#define __P101 PAGE_READ_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_READ_EXEC
+
+/* MAP_SHARED permissions: xwr */
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READ
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_EXEC
+#define __S101 PAGE_READ_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero,
+ * used for zero-mapped memory areas, etc.
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PAGE_PRESENT);
+}
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) == 0);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return !pmd_present(pmd);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd(0));
+}
+
+
+static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
+{
+ return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+
+/* Locate an entry in the page global directory */
+static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
+{
+ return mm->pgd + pgd_index(addr);
+}
+/* Locate an entry in the kernel page global directory */
+#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+}
+
+/* Yields the page frame number (PFN) of a page table entry */
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return (pte_val(pte) >> _PAGE_PFN_SHIFT);
+}
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+/* Constructs a page table entry */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline pte_t mk_pte(struct page *page, pgprot_t prot)
+{
+ return pfn_pte(page_to_pfn(page), prot);
+}
+
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
+{
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
+}
+
+#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte) ((void)(pte))
+
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified. Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+static inline void set_pte_at(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ set_pte_at(mm, addr, ptep, __pte(0));
+}
+
+static inline int pte_present(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_PRESENT);
+}
+
+static inline int pte_none(pte_t pte)
+{
+ return (pte_val(pte) == 0);
+}
+
+/* static inline int pte_read(pte_t pte) */
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_WRITE;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_present(pte)
+ && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
+/* static inline int pte_exec(pte_t pte) */
+
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SPECIAL;
+}
+
+/* static inline pte_t pte_rdprotect(pte_t pte) */
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_WRITE));
+}
+
+/* static inline pte_t pte_mkread(pte_t pte) */
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_WRITE);
+}
+
+/* static inline pte_t pte_mkexec(pte_t pte) */
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+/* Modify page protection bits */
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
+
+
+/* Commit new configuration to MMU hardware */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * The kernel assumes that TLBs don't cache invalid entries, but
+ * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
+ * cache flush; it is necessary even after writing invalid entries.
+ * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ * the extra traps reduce performance. So, eagerly SFENCE.VMA.
+ */
+ local_flush_tlb_page(address);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ set_pte_at(vma->vm_mm, address, ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * This comment is borrowed from x86, but applies equally to RISC-V:
+ *
+ * Clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ */
+ return ptep_test_and_clear_young(vma, address, ptep);
+}
+
+/*
+ * Encode and decode a swap entry
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_PRESENT (zero)
+ * bit 1: reserved for future use (zero)
+ * bits 2 to 6: swap type
+ * bits 7 to XLEN-1: swap offset
+ */
+#define __SWP_TYPE_SHIFT 2
+#define __SWP_TYPE_BITS 5
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) \
+ { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#ifdef CONFIG_FLATMEM
+#define kern_addr_valid(addr) (1) /* FIXME */
+#endif
+
+extern void paging_init(void);
+
+static inline void pgtable_cache_init(void)
+{
+ /* No page table caches to initialize */
+}
+
+#endif /* CONFIG_MMU */
+
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END (PAGE_OFFSET - 1)
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+
+/*
+ * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
+ */
+#ifdef CONFIG_64BIT
+#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#else
+#define TASK_SIZE VMALLOC_START
+#endif
+
+#include <asm-generic/pgtable.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
new file mode 100644
index 000000000000..3fe4af8147d2
--- /dev/null
+++ b/arch/riscv/include/asm/processor.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#include <linux/const.h>
+
+#include <asm/ptrace.h>
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+#define STACK_ALIGN 16
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct pt_regs;
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/* CPU-specific state of a task */
+struct thread_struct {
+ /* Callee-saved registers */
+ unsigned long ra;
+ unsigned long sp; /* Kernel mode stack */
+ unsigned long s[12]; /* s[0]: frame pointer */
+ struct __riscv_d_ext_state fstate;
+};
+
+#define INIT_THREAD { \
+ .sp = sizeof(init_stack) + (long)&init_stack, \
+}
+
+#define task_pt_regs(tsk) \
+ ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
+ - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
+
+
+/* Do necessary setup to start up a newly executed thread. */
+extern void start_thread(struct pt_regs *regs,
+ unsigned long pc, unsigned long sp);
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+ barrier();
+}
+
+static inline void wait_for_interrupt(void)
+{
+ __asm__ __volatile__ ("wfi");
+}
+
+struct device_node;
+extern int riscv_of_processor_hart(struct device_node *node);
+
+extern void riscv_fill_hwcap(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
new file mode 100644
index 000000000000..93b8956e25e4
--- /dev/null
+++ b/arch/riscv/include/asm/ptrace.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PTRACE_H
+#define _ASM_RISCV_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/csr.h>
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long sepc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+ /* Supervisor CSRs */
+ unsigned long sstatus;
+ unsigned long sbadaddr;
+ unsigned long scause;
+ /* a0 value before the syscall */
+ unsigned long orig_a0;
+};
+
+#ifdef CONFIG_64BIT
+#define REG_FMT "%016lx"
+#else
+#define REG_FMT "%08lx"
+#endif
+
+#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
+
+
+/* Helpers for working with the instruction pointer */
+#define GET_IP(regs) ((regs)->sepc)
+#define SET_IP(regs, val) (GET_IP(regs) = (val))
+
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return GET_IP(regs);
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_IP(regs, val);
+}
+
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* Helpers for working with the user stack pointer */
+#define GET_USP(regs) ((regs)->sp)
+#define SET_USP(regs, val) (GET_USP(regs) = (val))
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return GET_USP(regs);
+}
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_USP(regs, val);
+}
+
+/* Helpers for working with the frame pointer */
+#define GET_FP(regs) ((regs)->s0)
+#define SET_FP(regs, val) (GET_FP(regs) = (val))
+
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return GET_FP(regs);
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ SET_FP(regs, val);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
new file mode 100644
index 000000000000..b6bb10b92fe2
--- /dev/null
+++ b/arch/riscv/include/asm/sbi.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_SBI_H
+#define _ASM_RISCV_SBI_H
+
+#include <linux/types.h>
+
+#define SBI_SET_TIMER 0
+#define SBI_CONSOLE_PUTCHAR 1
+#define SBI_CONSOLE_GETCHAR 2
+#define SBI_CLEAR_IPI 3
+#define SBI_SEND_IPI 4
+#define SBI_REMOTE_FENCE_I 5
+#define SBI_REMOTE_SFENCE_VMA 6
+#define SBI_REMOTE_SFENCE_VMA_ASID 7
+#define SBI_SHUTDOWN 8
+
+#define SBI_CALL(which, arg0, arg1, arg2) ({ \
+ register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
+ register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); \
+ register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); \
+ register uintptr_t a7 asm ("a7") = (uintptr_t)(which); \
+ asm volatile ("ecall" \
+ : "+r" (a0) \
+ : "r" (a1), "r" (a2), "r" (a7) \
+ : "memory"); \
+ a0; \
+})
+
+/* Lazy implementations until SBI is finalized */
+#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0)
+#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0)
+#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0)
+
+static inline void sbi_console_putchar(int ch)
+{
+ SBI_CALL_1(SBI_CONSOLE_PUTCHAR, ch);
+}
+
+static inline int sbi_console_getchar(void)
+{
+ return SBI_CALL_0(SBI_CONSOLE_GETCHAR);
+}
+
+static inline void sbi_set_timer(uint64_t stime_value)
+{
+#if __riscv_xlen == 32
+ SBI_CALL_2(SBI_SET_TIMER, stime_value, stime_value >> 32);
+#else
+ SBI_CALL_1(SBI_SET_TIMER, stime_value);
+#endif
+}
+
+static inline void sbi_shutdown(void)
+{
+ SBI_CALL_0(SBI_SHUTDOWN);
+}
+
+static inline void sbi_clear_ipi(void)
+{
+ SBI_CALL_0(SBI_CLEAR_IPI);
+}
+
+static inline void sbi_send_ipi(const unsigned long *hart_mask)
+{
+ SBI_CALL_1(SBI_SEND_IPI, hart_mask);
+}
+
+static inline void sbi_remote_fence_i(const unsigned long *hart_mask)
+{
+ SBI_CALL_1(SBI_REMOTE_FENCE_I, hart_mask);
+}
+
+static inline void sbi_remote_sfence_vma(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size)
+{
+ SBI_CALL_1(SBI_REMOTE_SFENCE_VMA, hart_mask);
+}
+
+static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid)
+{
+ SBI_CALL_1(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask);
+}
+
+#endif
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
new file mode 100644
index 000000000000..85e4220839b0
--- /dev/null
+++ b/arch/riscv/include/asm/smp.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_SMP_H
+#define _ASM_RISCV_SMP_H
+
+/* This both needs asm-offsets.h and is used when generating it. */
+#ifndef GENERATING_ASM_OFFSETS
+#include <asm/asm-offsets.h>
+#endif
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+
+#ifdef CONFIG_SMP
+
+/* SMP initialization hook for setup_arch */
+void __init init_clockevent(void);
+
+/* SMP initialization hook for setup_arch */
+void __init setup_smp(void);
+
+/* Hook for the generic smp_call_function_many() routine. */
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+/* Hook for the generic smp_call_function_single() routine. */
+void arch_send_call_function_single_ipi(int cpu);
+
+/*
+ * This is particularly ugly: it appears we can't actually get the definition
+ * of task_struct here, but we need access to the CPU this task is running on.
+ * Instead of using C we're using asm-offsets.h to get the current processor
+ * ID.
+ */
+#define raw_smp_processor_id() (*((int*)((char*)get_current() + TASK_TI_CPU)))
+
+/* Interprocessor interrupt handler */
+irqreturn_t handle_ipi(void);
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
new file mode 100644
index 000000000000..04c71d938afd
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_SPINLOCK_H
+#define _ASM_RISCV_SPINLOCK_H
+
+#include <linux/kernel.h>
+#include <asm/current.h>
+
+/*
+ * Simple spin lock operations. These provide no fairness guarantees.
+ */
+
+/* FIXME: Replace this with a ticket lock, like MIPS. */
+
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ __asm__ __volatile__ (
+ "amoswap.w.rl x0, x0, %0"
+ : "=A" (lock->lock)
+ :: "memory");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int tmp = 1, busy;
+
+ __asm__ __volatile__ (
+ "amoswap.w.aq %0, %2, %1"
+ : "=r" (busy), "+A" (lock->lock)
+ : "r" (tmp)
+ : "memory");
+
+ return !busy;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ while (1) {
+ if (arch_spin_is_locked(lock))
+ continue;
+
+ if (arch_spin_trylock(lock))
+ break;
+ }
+}
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ smp_rmb();
+ do {
+ cpu_relax();
+ } while (arch_spin_is_locked(lock));
+ smp_acquire__after_ctrl_dep();
+}
+
+/***********************************************************/
+
+static inline void arch_read_lock(arch_rwlock_t *lock)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bltz %1, 1b\n"
+ " addi %1, %1, 1\n"
+ " sc.w.aq %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ : "+A" (lock->lock), "=&r" (tmp)
+ :: "memory");
+}
+
+static inline void arch_write_lock(arch_rwlock_t *lock)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bnez %1, 1b\n"
+ " li %1, -1\n"
+ " sc.w.aq %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ : "+A" (lock->lock), "=&r" (tmp)
+ :: "memory");
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ int busy;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bltz %1, 1f\n"
+ " addi %1, %1, 1\n"
+ " sc.w.aq %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ "1:\n"
+ : "+A" (lock->lock), "=&r" (busy)
+ :: "memory");
+
+ return !busy;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ int busy;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bnez %1, 1f\n"
+ " li %1, -1\n"
+ " sc.w.aq %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ "1:\n"
+ : "+A" (lock->lock), "=&r" (busy)
+ :: "memory");
+
+ return !busy;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *lock)
+{
+ __asm__ __volatile__(
+ "amoadd.w.rl x0, %1, %0"
+ : "+A" (lock->lock)
+ : "r" (-1)
+ : "memory");
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *lock)
+{
+ __asm__ __volatile__ (
+ "amoswap.w.rl x0, x0, %0"
+ : "=A" (lock->lock)
+ :: "memory");
+}
+
+#endif /* _ASM_RISCV_SPINLOCK_H */
diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..83ac4ac9e2ac
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock_types.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_SPINLOCK_TYPES_H
+#define _ASM_RISCV_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
new file mode 100644
index 000000000000..9210fcf4ff52
--- /dev/null
+++ b/arch/riscv/include/asm/string.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_STRING_H
+#define _ASM_RISCV_STRING_H
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+
+#define __HAVE_ARCH_MEMSET
+extern asmlinkage void *memset(void *, int, size_t);
+
+#define __HAVE_ARCH_MEMCPY
+extern asmlinkage void *memcpy(void *, const void *, size_t);
+
+#endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
new file mode 100644
index 000000000000..dd6b05bff75b
--- /dev/null
+++ b/arch/riscv/include/asm/switch_to.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_SWITCH_TO_H
+#define _ASM_RISCV_SWITCH_TO_H
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+extern void __fstate_save(struct task_struct *save_to);
+extern void __fstate_restore(struct task_struct *restore_from);
+
+static inline void __fstate_clean(struct pt_regs *regs)
+{
+ regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+}
+
+static inline void fstate_save(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) {
+ __fstate_save(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void fstate_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->sstatus & SR_FS) != SR_FS_OFF) {
+ __fstate_restore(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void __switch_to_aux(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(prev);
+ if (unlikely(regs->sstatus & SR_SD))
+ fstate_save(prev, regs);
+ fstate_restore(next, task_pt_regs(next));
+}
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+do { \
+ struct task_struct *__prev = (prev); \
+ struct task_struct *__next = (next); \
+ __switch_to_aux(__prev, __next); \
+ ((last) = __switch_to(__prev, __next)); \
+} while (0)
+
+#endif /* _ASM_RISCV_SWITCH_TO_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
new file mode 100644
index 000000000000..8d25f8904c00
--- /dev/null
+++ b/arch/riscv/include/asm/syscall.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2015 Regents of the University of California, Berkeley
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_RISCV_SYSCALL_H
+#define _ASM_RISCV_SYSCALL_H
+
+#include <linux/sched.h>
+#include <linux/err.h>
+
+/* The array of function pointers for syscalls. */
+extern void *sys_call_table[];
+
+/*
+ * Only the low 32 bits of orig_r0 are meaningful, so we return int.
+ * This importantly ignores the high bits on 64-bit, so comparisons
+ * sign-extend the low 32 bits.
+ */
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a7;
+}
+
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int sysno)
+{
+ regs->a7 = sysno;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->a0 = regs->orig_a0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->a0;
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ if (i == 0) {
+ args[0] = regs->orig_a0;
+ args++;
+ i++;
+ n--;
+ }
+ memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ if (i == 0) {
+ regs->orig_a0 = args[0];
+ args++;
+ i++;
+ n--;
+ }
+ memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
+}
+
+#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
new file mode 100644
index 000000000000..22c3536ed281
--- /dev/null
+++ b/arch/riscv/include/asm/thread_info.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_THREAD_INFO_H
+#define _ASM_RISCV_THREAD_INFO_H
+
+#include <asm/page.h>
+#include <linux/const.h>
+
+/* thread information allocation */
+#define THREAD_SIZE_ORDER (1)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+typedef unsigned long mm_segment_t;
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - if the members of this struct changes, the assembly constants
+ * in asm-offsets.c must be updated accordingly
+ * - thread_info is included in task_struct at an offset of 0. This means that
+ * tp points to both thread_info and task_struct.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0=>preemptible, <0=>BUG */
+ mm_segment_t addr_limit;
+ /*
+ * These stack pointers are overwritten on every system call or
+ * exception. SP is also saved to the stack it can be recovered when
+ * overwritten.
+ */
+ long kernel_sp; /* Kernel stack pointer */
+ long user_sp; /* User stack pointer */
+ int cpu;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
+#define init_stack (init_thread_union.stack)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+
+#define _TIF_WORK_MASK \
+ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
+
+#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
new file mode 100644
index 000000000000..3df4932d8964
--- /dev/null
+++ b/arch/riscv/include/asm/timex.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_TIMEX_H
+#define _ASM_RISCV_TIMEX_H
+
+#include <asm/param.h>
+
+typedef unsigned long cycles_t;
+
+static inline cycles_t get_cycles(void)
+{
+ cycles_t n;
+
+ __asm__ __volatile__ (
+ "rdtime %0"
+ : "=r" (n));
+ return n;
+}
+
+#ifdef CONFIG_64BIT
+static inline uint64_t get_cycles64(void)
+{
+ return get_cycles();
+}
+#else
+static inline uint64_t get_cycles64(void)
+{
+ u32 lo, hi, tmp;
+ __asm__ __volatile__ (
+ "1:\n"
+ "rdtimeh %0\n"
+ "rdtime %1\n"
+ "rdtimeh %2\n"
+ "bne %0, %2, 1b"
+ : "=&r" (hi), "=&r" (lo), "=&r" (tmp));
+ return ((u64)hi << 32) | lo;
+}
+#endif
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+static inline int read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = get_cycles();
+ return 0;
+}
+
+#endif /* _ASM_RISCV_TIMEX_H */
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
new file mode 100644
index 000000000000..c229509288ea
--- /dev/null
+++ b/arch/riscv/include/asm/tlb.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_TLB_H
+#define _ASM_RISCV_TLB_H
+
+#include <asm-generic/tlb.h>
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ flush_tlb_mm(tlb->mm);
+}
+
+#endif /* _ASM_RISCV_TLB_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
new file mode 100644
index 000000000000..5ee4ae370b5e
--- /dev/null
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_TLBFLUSH_H
+#define _ASM_RISCV_TLBFLUSH_H
+
+#ifdef CONFIG_MMU
+
+/* Flush entire local TLB */
+static inline void local_flush_tlb_all(void)
+{
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
+}
+
+/* Flush one page from local TLB */
+static inline void local_flush_tlb_page(unsigned long addr)
+{
+ __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
+}
+
+#ifndef CONFIG_SMP
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+#define flush_tlb_range(vma, start, end) local_flush_tlb_all()
+
+#else /* CONFIG_SMP */
+
+#include <asm/sbi.h>
+
+#define flush_tlb_all() sbi_remote_sfence_vma(0, 0, -1)
+#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
+#define flush_tlb_range(vma, start, end) \
+ sbi_remote_sfence_vma(0, start, (end) - (start))
+
+#endif /* CONFIG_SMP */
+
+/* Flush the TLB entries of the specified mm context */
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_all();
+}
+
+/* Flush a range of kernel pages */
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
new file mode 100644
index 000000000000..27b90d64814b
--- /dev/null
+++ b/arch/riscv/include/asm/uaccess.h
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file was copied from include/asm-generic/uaccess.h
+ */
+
+#ifndef _ASM_RISCV_UACCESS_H
+#define _ASM_RISCV_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <asm/byteorder.h>
+#include <asm/asm.h>
+
+#define __enable_user_access() \
+ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
+#define __disable_user_access() \
+ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define KERNEL_DS (~0UL)
+#define USER_DS (TASK_SIZE)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+
+static inline void set_fs(mm_segment_t fs)
+{
+ current_thread_info()->addr_limit = fs;
+}
+
+#define segment_eq(a, b) ((a) == (b))
+
+#define user_addr_max() (get_fs())
+
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/**
+ * access_ok: - Checks if a user space pointer is valid
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
+ * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ * to write to a block, it is always safe to read from it.
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns true (nonzero) if the memory block may be valid, false (zero)
+ * if it is definitely invalid.
+ *
+ * Note that, depending on architecture, this function probably just
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+#define access_ok(type, addr, size) ({ \
+ __chk_user_ptr(addr); \
+ likely(__access_ok((unsigned long __force)(addr), (size))); \
+})
+
+/*
+ * Ensure that the range [addr, addr+size) is within the process's
+ * address space
+ */
+static inline int __access_ok(unsigned long addr, unsigned long size)
+{
+ const mm_segment_t fs = get_fs();
+
+ return (size <= fs) && (addr <= (fs - size));
+}
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ unsigned long insn, fixup;
+};
+
+extern int fixup_exception(struct pt_regs *state);
+
+#if defined(__LITTLE_ENDIAN)
+#define __MSW 1
+#define __LSW 0
+#elif defined(__BIG_ENDIAN)
+#define __MSW 0
+#define __LSW 1
+#else
+#error "Unknown endianness"
+#endif
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the address
+ * space - it must have been done previously with a separate "access_ok()"
+ * call.
+ */
+
+#ifdef CONFIG_MMU
+#define __get_user_asm(insn, x, ptr, err) \
+do { \
+ uintptr_t __tmp; \
+ __typeof__(x) __x; \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %1, %3\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "3:\n" \
+ " li %0, %4\n" \
+ " li %1, 0\n" \
+ " jump 2b, %2\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err), "=&r" (__x), "=r" (__tmp) \
+ : "m" (*(ptr)), "i" (-EFAULT)); \
+ __disable_user_access(); \
+ (x) = __x; \
+} while (0)
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_64BIT
+#define __get_user_8(x, ptr, err) \
+ __get_user_asm("ld", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#ifdef CONFIG_MMU
+#define __get_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u32 __lo, __hi; \
+ uintptr_t __tmp; \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " lw %1, %4\n" \
+ "2:\n" \
+ " lw %2, %5\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "4:\n" \
+ " li %0, %6\n" \
+ " li %1, 0\n" \
+ " li %2, 0\n" \
+ " jump 3b, %3\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 4b\n" \
+ " " RISCV_PTR " 2b, 4b\n" \
+ " .previous" \
+ : "+r" (err), "=&r" (__lo), "=r" (__hi), \
+ "=r" (__tmp) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
+ "i" (-EFAULT)); \
+ __disable_user_access(); \
+ (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
+ (((u64)__hi << 32) | __lo))); \
+} while (0)
+#endif /* CONFIG_MMU */
+#endif /* CONFIG_64BIT */
+
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+({ \
+ register long __gu_err = 0; \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __chk_user_ptr(__gu_ptr); \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 8: \
+ __get_user_8((x), __gu_ptr, __gu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __gu_err; \
+})
+
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
+ __get_user((x), __p) : \
+ ((x) = 0, -EFAULT); \
+})
+
+
+#ifdef CONFIG_MMU
+#define __put_user_asm(insn, x, ptr, err) \
+do { \
+ uintptr_t __tmp; \
+ __typeof__(*(ptr)) __x = x; \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %z3, %2\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "3:\n" \
+ " li %0, %4\n" \
+ " jump 2b, %1\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
+ : "rJ" (__x), "i" (-EFAULT)); \
+ __disable_user_access(); \
+} while (0)
+#endif /* CONFIG_MMU */
+
+
+#ifdef CONFIG_64BIT
+#define __put_user_8(x, ptr, err) \
+ __put_user_asm("sd", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#ifdef CONFIG_MMU
+#define __put_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u64 __x = (__typeof__((x)-(x)))(x); \
+ uintptr_t __tmp; \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " sw %z4, %2\n" \
+ "2:\n" \
+ " sw %z5, %3\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "4:\n" \
+ " li %0, %6\n" \
+ " jump 2b, %1\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 4b\n" \
+ " " RISCV_PTR " 2b, 4b\n" \
+ " .previous" \
+ : "+r" (err), "=r" (__tmp), \
+ "=m" (__ptr[__LSW]), \
+ "=m" (__ptr[__MSW]) \
+ : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
+ __disable_user_access(); \
+} while (0)
+#endif /* CONFIG_MMU */
+#endif /* CONFIG_64BIT */
+
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+({ \
+ register long __pu_err = 0; \
+ __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __chk_user_ptr(__gu_ptr); \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 8: \
+ __put_user_8((x), __gu_ptr, __pu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __pu_err; \
+})
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
+ __put_user((x), __p) : \
+ -EFAULT; \
+})
+
+
+extern unsigned long __must_check __copy_user(void __user *to,
+ const void __user *from, unsigned long n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return __copy_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return __copy_user(to, from, n);
+}
+
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern long __must_check strlen_user(const char __user *str);
+extern long __must_check strnlen_user(const char __user *str, long n);
+
+extern
+unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+
+static inline
+unsigned long __must_check clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ return access_ok(VERIFY_WRITE, to, n) ?
+ __clear_user(to, n) : n;
+}
+
+/*
+ * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
+ * will set "err" to -EFAULT, while successful accesses return the previous
+ * value.
+ */
+#ifdef CONFIG_MMU
+#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(err) __err = 0; \
+ register unsigned int __rc; \
+ __enable_user_access(); \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0:\n" \
+ " lr.w" #scb " %[ret], %[ptr]\n" \
+ " bne %[ret], %z[old], 1f\n" \
+ " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
+ " bnez %[rc], 0b\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ ".balign 4\n" \
+ "2:\n" \
+ " li %[err], %[efault]\n" \
+ " jump 1b, %[rc]\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 2b\n" \
+ ".previous\n" \
+ : [ret] "=&r" (__ret), \
+ [rc] "=&r" (__rc), \
+ [ptr] "+A" (*__ptr), \
+ [err] "=&r" (__err) \
+ : [old] "rJ" (__old), \
+ [new] "rJ" (__new), \
+ [efault] "i" (-EFAULT)); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0:\n" \
+ " lr.d" #scb " %[ret], %[ptr]\n" \
+ " bne %[ret], %z[old], 1f\n" \
+ " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
+ " bnez %[rc], 0b\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ ".balign 4\n" \
+ "2:\n" \
+ " li %[err], %[efault]\n" \
+ " jump 1b, %[rc]\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 2b\n" \
+ ".previous\n" \
+ : [ret] "=&r" (__ret), \
+ [rc] "=&r" (__rc), \
+ [ptr] "+A" (*__ptr), \
+ [err] "=&r" (__err) \
+ : [old] "rJ" (__old), \
+ [new] "rJ" (__new), \
+ [efault] "i" (-EFAULT)); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __disable_user_access(); \
+ (err) = __err; \
+ __ret; \
+})
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
new file mode 100644
index 000000000000..9f250ed007cd
--- /dev/null
+++ b/arch/riscv/include/asm/unistd.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define __ARCH_HAVE_MMU
+#define __ARCH_WANT_SYS_CLONE
+#include <uapi/asm/unistd.h>
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
new file mode 100644
index 000000000000..602f61257553
--- /dev/null
+++ b/arch/riscv/include/asm/vdso.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2014 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_RISCV_VDSO_H
+#define _ASM_RISCV_VDSO_H
+
+#include <linux/types.h>
+
+struct vdso_data {
+};
+
+/*
+ * The VDSO symbols are mapped into Linux so we can just use regular symbol
+ * addressing to get their offsets in userspace. The symbols are mapped at an
+ * offset of 0, but since the linker must support setting weak undefined
+ * symbols to the absolute address 0 it also happens to support other low
+ * addresses even when the code model suggests those low addresses would not
+ * otherwise be availiable.
+ */
+#define VDSO_SYMBOL(base, name) \
+({ \
+ extern const char __vdso_##name[]; \
+ (void __user *)((unsigned long)(base) + __vdso_##name); \
+})
+
+#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..aa6238791d3e
--- /dev/null
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ * Derived from arch/x86/include/asm/word-at-a-time.h
+ */
+
+#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
+#define _ASM_RISCV_WORD_AT_A_TIME_H
+
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long val,
+ unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val,
+ unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..5ded96b06352
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/Kbuild
@@ -0,0 +1,27 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += setup.h
+generic-y += unistd.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += types.h
diff --git a/drivers/staging/ccree/dx_reg_base_host.h b/arch/riscv/include/uapi/asm/auxvec.h
index 47bbadbcd1df..1376515547cd 100644
--- a/drivers/staging/ccree/dx_reg_base_host.h
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,15 +12,13 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __DX_REG_BASE_HOST_H__
-#define __DX_REG_BASE_HOST_H__
+#ifndef _UAPI_ASM_RISCV_AUXVEC_H
+#define _UAPI_ASM_RISCV_AUXVEC_H
-#define DX_BASE_CC 0x80000000
-#define DX_BASE_HOST_RGF 0x0UL
-#define DX_BASE_CRY_KERNEL 0x0UL
-#define DX_BASE_ROM 0x40000000
+/* vDSO location */
+#define AT_SYSINFO_EHDR 33
-#endif /*__DX_REG_BASE_HOST_H__*/
+#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..0b3cb52fd29d
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..4ca38af2cd32
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/byteorder.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_ASM_RISCV_BYTEORDER_H
+#define _UAPI_ASM_RISCV_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _UAPI_ASM_RISCV_BYTEORDER_H */
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
new file mode 100644
index 000000000000..a510edfa8226
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UAPI_ASM_ELF_H
+#define _UAPI_ASM_ELF_H
+
+#include <asm/ptrace.h>
+
+/* ELF register definitions */
+typedef unsigned long elf_greg_t;
+typedef struct user_regs_struct elf_gregset_t;
+#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t))
+
+typedef union __riscv_fp_state elf_fpregset_t;
+
+#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
+#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
+
+/*
+ * RISC-V relocation types
+ */
+
+/* Relocation types used by the dynamic linker */
+#define R_RISCV_NONE 0
+#define R_RISCV_32 1
+#define R_RISCV_64 2
+#define R_RISCV_RELATIVE 3
+#define R_RISCV_COPY 4
+#define R_RISCV_JUMP_SLOT 5
+#define R_RISCV_TLS_DTPMOD32 6
+#define R_RISCV_TLS_DTPMOD64 7
+#define R_RISCV_TLS_DTPREL32 8
+#define R_RISCV_TLS_DTPREL64 9
+#define R_RISCV_TLS_TPREL32 10
+#define R_RISCV_TLS_TPREL64 11
+
+/* Relocation types not used by the dynamic linker */
+#define R_RISCV_BRANCH 16
+#define R_RISCV_JAL 17
+#define R_RISCV_CALL 18
+#define R_RISCV_CALL_PLT 19
+#define R_RISCV_GOT_HI20 20
+#define R_RISCV_TLS_GOT_HI20 21
+#define R_RISCV_TLS_GD_HI20 22
+#define R_RISCV_PCREL_HI20 23
+#define R_RISCV_PCREL_LO12_I 24
+#define R_RISCV_PCREL_LO12_S 25
+#define R_RISCV_HI20 26
+#define R_RISCV_LO12_I 27
+#define R_RISCV_LO12_S 28
+#define R_RISCV_TPREL_HI20 29
+#define R_RISCV_TPREL_LO12_I 30
+#define R_RISCV_TPREL_LO12_S 31
+#define R_RISCV_TPREL_ADD 32
+#define R_RISCV_ADD8 33
+#define R_RISCV_ADD16 34
+#define R_RISCV_ADD32 35
+#define R_RISCV_ADD64 36
+#define R_RISCV_SUB8 37
+#define R_RISCV_SUB16 38
+#define R_RISCV_SUB32 39
+#define R_RISCV_SUB64 40
+#define R_RISCV_GNU_VTINHERIT 41
+#define R_RISCV_GNU_VTENTRY 42
+#define R_RISCV_ALIGN 43
+#define R_RISCV_RVC_BRANCH 44
+#define R_RISCV_RVC_JUMP 45
+#define R_RISCV_LUI 46
+#define R_RISCV_GPREL_I 47
+#define R_RISCV_GPREL_S 48
+#define R_RISCV_TPREL_I 49
+#define R_RISCV_TPREL_S 50
+#define R_RISCV_RELAX 51
+
+#endif /* _UAPI_ASM_ELF_H */
diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h
new file mode 100644
index 000000000000..f333221c9ab2
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/hwcap.h
@@ -0,0 +1,36 @@
+/*
+ * Copied from arch/arm64/include/asm/hwcap.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __UAPI_ASM_HWCAP_H
+#define __UAPI_ASM_HWCAP_H
+
+/*
+ * Linux saves the floating-point registers according to the ISA Linux is
+ * executing on, as opposed to the ISA the user program is compiled for. This
+ * is necessary for a handful of esoteric use cases: for example, userpsace
+ * threading libraries must be able to examine the actual machine state in
+ * order to fully reconstruct the state of a thread.
+ */
+#define COMPAT_HWCAP_ISA_I (1 << ('I' - 'A'))
+#define COMPAT_HWCAP_ISA_M (1 << ('M' - 'A'))
+#define COMPAT_HWCAP_ISA_A (1 << ('A' - 'A'))
+#define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A'))
+#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
+#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
+
+#endif
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..1a9e4cdd37e2
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_ASM_RISCV_PTRACE_H
+#define _UAPI_ASM_RISCV_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * User-mode register state for core dumps, ptrace, sigcontext
+ *
+ * This decouples struct pt_regs from the userspace ABI.
+ * struct user_regs_struct must form a prefix of struct pt_regs.
+ */
+struct user_regs_struct {
+ unsigned long pc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+};
+
+struct __riscv_f_ext_state {
+ __u32 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_d_ext_state {
+ __u64 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_q_ext_state {
+ __u64 f[64] __attribute__((aligned(16)));
+ __u32 fcsr;
+ /*
+ * Reserved for expansion of sigcontext structure. Currently zeroed
+ * upon signal, and must be zero upon sigreturn.
+ */
+ __u32 reserved[3];
+};
+
+union __riscv_fp_state {
+ struct __riscv_f_ext_state f;
+ struct __riscv_d_ext_state d;
+ struct __riscv_q_ext_state q;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..ed7372b277fa
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/sigcontext.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_ASM_RISCV_SIGCONTEXT_H
+#define _UAPI_ASM_RISCV_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure
+ *
+ * This contains the context saved before a signal handler is invoked;
+ * it is restored by sys_sigreturn / sys_rt_sigreturn.
+ */
+struct sigcontext {
+ struct user_regs_struct sc_regs;
+ union __riscv_fp_state sc_fpregs;
+};
+
+#endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
diff --git a/arch/riscv/include/uapi/asm/siginfo.h b/arch/riscv/include/uapi/asm/siginfo.h
new file mode 100644
index 000000000000..f96849aac662
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/siginfo.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2016 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_SIGINFO_H
+#define __ASM_SIGINFO_H
+
+#define __ARCH_SI_PREAMBLE_SIZE (__SIZEOF_POINTER__ == 4 ? 12 : 16)
+
+#include <asm-generic/siginfo.h>
+
+#endif
diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000000..1fae8b1697e0
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/ucontext.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file was copied from arch/arm64/include/uapi/asm/ucontext.h
+ */
+#ifndef _UAPI__ASM_UCONTEXT_H
+#define _UAPI__ASM_UCONTEXT_H
+
+#include <linux/types.h>
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ sigset_t uc_sigmask;
+ /* There's some padding here to allow sigset_t to be expanded in the
+ * future. Though this is unlikely, other architectures put uc_sigmask
+ * at the end of this structure and explicitly state it can be
+ * expanded, so we didn't want to box ourselves in here. */
+ __u8 __unused[1024 / 8 - sizeof(sigset_t)];
+ /* We can't put uc_sigmask at the end of this structure because we need
+ * to be able to expand sigcontext in the future. For example, the
+ * vector ISA extension will almost certainly add ISA state. We want
+ * to ensure all user-visible ISA state can be saved and restored via a
+ * ucontext, so we're putting this at the end in order to allow for
+ * infinite extensibility. Since we know this will be extended and we
+ * assume sigset_t won't be extended an extreme amount, we're
+ * prioritizing this. */
+ struct sigcontext uc_mcontext;
+};
+
+#endif /* _UAPI__ASM_UCONTEXT_H */
diff --git a/arch/riscv/kernel/.gitignore b/arch/riscv/kernel/.gitignore
new file mode 100644
index 000000000000..b51634f6a7cd
--- /dev/null
+++ b/arch/riscv/kernel/.gitignore
@@ -0,0 +1 @@
+/vmlinux.lds
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
new file mode 100644
index 000000000000..ab8baf7bd142
--- /dev/null
+++ b/arch/riscv/kernel/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for the RISC-V Linux kernel
+#
+
+extra-y += head.o
+extra-y += vmlinux.lds
+
+obj-y += cpu.o
+obj-y += cpufeature.o
+obj-y += entry.o
+obj-y += irq.o
+obj-y += process.o
+obj-y += ptrace.o
+obj-y += reset.o
+obj-y += setup.o
+obj-y += signal.o
+obj-y += syscall_table.o
+obj-y += sys_riscv.o
+obj-y += time.o
+obj-y += traps.o
+obj-y += riscv_ksyms.o
+obj-y += stacktrace.o
+obj-y += vdso.o
+obj-y += cacheinfo.o
+obj-y += vdso/
+
+CFLAGS_setup.o := -mcmodel=medany
+
+obj-$(CONFIG_SMP) += smpboot.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_MODULES) += module.o
+
+clean:
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
new file mode 100644
index 000000000000..6a92a2fe198e
--- /dev/null
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define GENERATING_ASM_OFFSETS
+
+#include <linux/kbuild.h>
+#include <linux/sched.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+
+void asm_offsets(void)
+{
+ OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
+ OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+ OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
+ OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
+ OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
+ OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
+ OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
+ OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
+ OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
+ OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
+ OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
+ OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
+ OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
+ OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+ OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+ OFFSET(TASK_STACK, task_struct, stack);
+ OFFSET(TASK_TI, task_struct, thread_info);
+ OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+ OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+ OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+ OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
+
+ OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
+ OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
+ OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
+ OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]);
+ OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]);
+ OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]);
+ OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]);
+ OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]);
+ OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]);
+ OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]);
+ OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
+ OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
+ OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
+ OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
+ OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
+ OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
+ OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
+ OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
+ OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
+ OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
+ OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
+ OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
+ OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
+ OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
+ OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
+ OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
+ OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
+ OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
+ OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
+ OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
+ OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
+ OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
+ OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
+
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ OFFSET(PT_SEPC, pt_regs, sepc);
+ OFFSET(PT_RA, pt_regs, ra);
+ OFFSET(PT_FP, pt_regs, s0);
+ OFFSET(PT_S0, pt_regs, s0);
+ OFFSET(PT_S1, pt_regs, s1);
+ OFFSET(PT_S2, pt_regs, s2);
+ OFFSET(PT_S3, pt_regs, s3);
+ OFFSET(PT_S4, pt_regs, s4);
+ OFFSET(PT_S5, pt_regs, s5);
+ OFFSET(PT_S6, pt_regs, s6);
+ OFFSET(PT_S7, pt_regs, s7);
+ OFFSET(PT_S8, pt_regs, s8);
+ OFFSET(PT_S9, pt_regs, s9);
+ OFFSET(PT_S10, pt_regs, s10);
+ OFFSET(PT_S11, pt_regs, s11);
+ OFFSET(PT_SP, pt_regs, sp);
+ OFFSET(PT_TP, pt_regs, tp);
+ OFFSET(PT_A0, pt_regs, a0);
+ OFFSET(PT_A1, pt_regs, a1);
+ OFFSET(PT_A2, pt_regs, a2);
+ OFFSET(PT_A3, pt_regs, a3);
+ OFFSET(PT_A4, pt_regs, a4);
+ OFFSET(PT_A5, pt_regs, a5);
+ OFFSET(PT_A6, pt_regs, a6);
+ OFFSET(PT_A7, pt_regs, a7);
+ OFFSET(PT_T0, pt_regs, t0);
+ OFFSET(PT_T1, pt_regs, t1);
+ OFFSET(PT_T2, pt_regs, t2);
+ OFFSET(PT_T3, pt_regs, t3);
+ OFFSET(PT_T4, pt_regs, t4);
+ OFFSET(PT_T5, pt_regs, t5);
+ OFFSET(PT_T6, pt_regs, t6);
+ OFFSET(PT_GP, pt_regs, gp);
+ OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
+ OFFSET(PT_SSTATUS, pt_regs, sstatus);
+ OFFSET(PT_SBADADDR, pt_regs, sbadaddr);
+ OFFSET(PT_SCAUSE, pt_regs, scause);
+
+ /*
+ * THREAD_{F,X}* might be larger than a S-type offset can handle, but
+ * these are used in performance-sensitive assembly so we can't resort
+ * to loading the long immediate every time.
+ */
+ DEFINE(TASK_THREAD_RA_RA,
+ offsetof(struct task_struct, thread.ra)
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_SP_RA,
+ offsetof(struct task_struct, thread.sp)
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S0_RA,
+ offsetof(struct task_struct, thread.s[0])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S1_RA,
+ offsetof(struct task_struct, thread.s[1])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S2_RA,
+ offsetof(struct task_struct, thread.s[2])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S3_RA,
+ offsetof(struct task_struct, thread.s[3])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S4_RA,
+ offsetof(struct task_struct, thread.s[4])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S5_RA,
+ offsetof(struct task_struct, thread.s[5])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S6_RA,
+ offsetof(struct task_struct, thread.s[6])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S7_RA,
+ offsetof(struct task_struct, thread.s[7])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S8_RA,
+ offsetof(struct task_struct, thread.s[8])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S9_RA,
+ offsetof(struct task_struct, thread.s[9])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S10_RA,
+ offsetof(struct task_struct, thread.s[10])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S11_RA,
+ offsetof(struct task_struct, thread.s[11])
+ - offsetof(struct task_struct, thread.ra)
+ );
+
+ DEFINE(TASK_THREAD_F0_F0,
+ offsetof(struct task_struct, thread.fstate.f[0])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F1_F0,
+ offsetof(struct task_struct, thread.fstate.f[1])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F2_F0,
+ offsetof(struct task_struct, thread.fstate.f[2])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F3_F0,
+ offsetof(struct task_struct, thread.fstate.f[3])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F4_F0,
+ offsetof(struct task_struct, thread.fstate.f[4])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F5_F0,
+ offsetof(struct task_struct, thread.fstate.f[5])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F6_F0,
+ offsetof(struct task_struct, thread.fstate.f[6])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F7_F0,
+ offsetof(struct task_struct, thread.fstate.f[7])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F8_F0,
+ offsetof(struct task_struct, thread.fstate.f[8])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F9_F0,
+ offsetof(struct task_struct, thread.fstate.f[9])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F10_F0,
+ offsetof(struct task_struct, thread.fstate.f[10])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F11_F0,
+ offsetof(struct task_struct, thread.fstate.f[11])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F12_F0,
+ offsetof(struct task_struct, thread.fstate.f[12])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F13_F0,
+ offsetof(struct task_struct, thread.fstate.f[13])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F14_F0,
+ offsetof(struct task_struct, thread.fstate.f[14])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F15_F0,
+ offsetof(struct task_struct, thread.fstate.f[15])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F16_F0,
+ offsetof(struct task_struct, thread.fstate.f[16])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F17_F0,
+ offsetof(struct task_struct, thread.fstate.f[17])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F18_F0,
+ offsetof(struct task_struct, thread.fstate.f[18])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F19_F0,
+ offsetof(struct task_struct, thread.fstate.f[19])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F20_F0,
+ offsetof(struct task_struct, thread.fstate.f[20])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F21_F0,
+ offsetof(struct task_struct, thread.fstate.f[21])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F22_F0,
+ offsetof(struct task_struct, thread.fstate.f[22])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F23_F0,
+ offsetof(struct task_struct, thread.fstate.f[23])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F24_F0,
+ offsetof(struct task_struct, thread.fstate.f[24])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F25_F0,
+ offsetof(struct task_struct, thread.fstate.f[25])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F26_F0,
+ offsetof(struct task_struct, thread.fstate.f[26])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F27_F0,
+ offsetof(struct task_struct, thread.fstate.f[27])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F28_F0,
+ offsetof(struct task_struct, thread.fstate.f[28])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F29_F0,
+ offsetof(struct task_struct, thread.fstate.f[29])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F30_F0,
+ offsetof(struct task_struct, thread.fstate.f[30])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F31_F0,
+ offsetof(struct task_struct, thread.fstate.f[31])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_FCSR_F0,
+ offsetof(struct task_struct, thread.fstate.fcsr)
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+
+ /* The assembler needs access to THREAD_SIZE as well. */
+ DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
+
+ /*
+ * We allocate a pt_regs on the stack when entering the kernel. This
+ * ensures the alignment is sane.
+ */
+ DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
+}
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
new file mode 100644
index 000000000000..10ed2749e246
--- /dev/null
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ struct device_node *node,
+ enum cache_type type, unsigned int level)
+{
+ this_leaf->of_node = node;
+ this_leaf->level = level;
+ this_leaf->type = type;
+ /* not a sector cache */
+ this_leaf->physical_line_partition = 1;
+ /* TODO: Add to DTS */
+ this_leaf->attributes =
+ CACHE_WRITE_BACK
+ | CACHE_READ_ALLOCATE
+ | CACHE_WRITE_ALLOCATE;
+}
+
+static int __init_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ int levels = 0, leaves = 0, level;
+
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+ if (leaves > 0)
+ levels = 1;
+
+ while ((np = of_find_next_cache_node(np))) {
+ if (!of_device_is_compatible(np, "cache"))
+ break;
+ if (of_property_read_u32(np, "cache-level", &level))
+ break;
+ if (level <= levels)
+ break;
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+ levels = level;
+ }
+
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+static int __populate_cache_leaves(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ int levels = 1, level = 1;
+
+ if (of_property_read_bool(np, "cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+ if (of_property_read_bool(np, "i-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+ if (of_property_read_bool(np, "d-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+
+ while ((np = of_find_next_cache_node(np))) {
+ if (!of_device_is_compatible(np, "cache"))
+ break;
+ if (of_property_read_u32(np, "cache-level", &level))
+ break;
+ if (level <= levels)
+ break;
+ if (of_property_read_bool(np, "cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
+ if (of_property_read_bool(np, "i-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
+ if (of_property_read_bool(np, "d-cache-size"))
+ ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+ levels = level;
+ }
+
+ return 0;
+}
+
+DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
+DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
new file mode 100644
index 000000000000..ca6c81e54e37
--- /dev/null
+++ b/arch/riscv/kernel/cpu.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/of.h>
+
+/* Return -1 if not a valid hart */
+int riscv_of_processor_hart(struct device_node *node)
+{
+ const char *isa, *status;
+ u32 hart;
+
+ if (!of_device_is_compatible(node, "riscv")) {
+ pr_warn("Found incompatible CPU\n");
+ return -(ENODEV);
+ }
+
+ if (of_property_read_u32(node, "reg", &hart)) {
+ pr_warn("Found CPU without hart ID\n");
+ return -(ENODEV);
+ }
+ if (hart >= NR_CPUS) {
+ pr_info("Found hart ID %d, which is above NR_CPUs. Disabling this hart\n", hart);
+ return -(ENODEV);
+ }
+
+ if (of_property_read_string(node, "status", &status)) {
+ pr_warn("CPU with hartid=%d has no \"status\" property\n", hart);
+ return -(ENODEV);
+ }
+ if (strcmp(status, "okay")) {
+ pr_info("CPU with hartid=%d has a non-okay status of \"%s\"\n", hart, status);
+ return -(ENODEV);
+ }
+
+ if (of_property_read_string(node, "riscv,isa", &isa)) {
+ pr_warn("CPU with hartid=%d has no \"riscv,isa\" property\n", hart);
+ return -(ENODEV);
+ }
+ if (isa[0] != 'r' || isa[1] != 'v') {
+ pr_warn("CPU with hartid=%d has an invalid ISA of \"%s\"\n", hart, isa);
+ return -(ENODEV);
+ }
+
+ return hart;
+}
+
+#ifdef CONFIG_PROC_FS
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ *pos = cpumask_next(*pos - 1, cpu_online_mask);
+ if ((*pos) < nr_cpu_ids)
+ return (void *)(uintptr_t)(1 + *pos);
+ return NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+static int c_show(struct seq_file *m, void *v)
+{
+ unsigned long hart_id = (unsigned long)v - 1;
+ struct device_node *node = of_get_cpu_node(hart_id, NULL);
+ const char *compat, *isa, *mmu;
+
+ seq_printf(m, "hart\t: %lu\n", hart_id);
+ if (!of_property_read_string(node, "riscv,isa", &isa)
+ && isa[0] == 'r'
+ && isa[1] == 'v')
+ seq_printf(m, "isa\t: %s\n", isa);
+ if (!of_property_read_string(node, "mmu-type", &mmu)
+ && !strncmp(mmu, "riscv,", 6))
+ seq_printf(m, "mmu\t: %s\n", mmu+6);
+ if (!of_property_read_string(node, "compatible", &compat)
+ && strcmp(compat, "riscv"))
+ seq_printf(m, "uarch\t: %s\n", compat);
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
new file mode 100644
index 000000000000..17011a870044
--- /dev/null
+++ b/arch/riscv/kernel/cpufeature.c
@@ -0,0 +1,61 @@
+/*
+ * Copied from arch/arm64/kernel/cpufeature.c
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+#include <asm/processor.h>
+#include <asm/hwcap.h>
+
+unsigned long elf_hwcap __read_mostly;
+
+void riscv_fill_hwcap(void)
+{
+ struct device_node *node;
+ const char *isa;
+ size_t i;
+ static unsigned long isa2hwcap[256] = {0};
+
+ isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
+ isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
+ isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A;
+ isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
+ isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
+ isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
+
+ elf_hwcap = 0;
+
+ /*
+ * We don't support running Linux on hertergenous ISA systems. For
+ * now, we just check the ISA of the first processor.
+ */
+ node = of_find_node_by_type(NULL, "cpu");
+ if (!node) {
+ pr_warning("Unable to find \"cpu\" devicetree entry");
+ return;
+ }
+
+ if (of_property_read_string(node, "riscv,isa", &isa)) {
+ pr_warning("Unable to find \"riscv,isa\" devicetree entry");
+ return;
+ }
+
+ for (i = 0; i < strlen(isa); ++i)
+ elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
+
+ pr_info("elf_hwcap is 0x%lx", elf_hwcap);
+}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
new file mode 100644
index 000000000000..20ee86f782a9
--- /dev/null
+++ b/arch/riscv/kernel/entry.S
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+ .text
+ .altmacro
+
+/*
+ * Prepares to enter a system call or exception by saving all registers to the
+ * stack.
+ */
+ .macro SAVE_ALL
+ LOCAL _restore_kernel_tpsp
+ LOCAL _save_context
+
+ /*
+ * If coming from userspace, preserve the user thread pointer and load
+ * the kernel thread pointer. If we came from the kernel, sscratch
+ * will contain 0, and we should continue on the current TP.
+ */
+ csrrw tp, sscratch, tp
+ bnez tp, _save_context
+
+_restore_kernel_tpsp:
+ csrr tp, sscratch
+ REG_S sp, TASK_TI_KERNEL_SP(tp)
+_save_context:
+ REG_S sp, TASK_TI_USER_SP(tp)
+ REG_L sp, TASK_TI_KERNEL_SP(tp)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+ REG_S x1, PT_RA(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x5, PT_T0(sp)
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+
+ /*
+ * Disable FPU to detect illegal usage of
+ * floating point in kernel space
+ */
+ li t0, SR_FS
+
+ REG_L s0, TASK_TI_USER_SP(tp)
+ csrrc s1, sstatus, t0
+ csrr s2, sepc
+ csrr s3, sbadaddr
+ csrr s4, scause
+ csrr s5, sscratch
+ REG_S s0, PT_SP(sp)
+ REG_S s1, PT_SSTATUS(sp)
+ REG_S s2, PT_SEPC(sp)
+ REG_S s3, PT_SBADADDR(sp)
+ REG_S s4, PT_SCAUSE(sp)
+ REG_S s5, PT_TP(sp)
+ .endm
+
+/*
+ * Prepares to return from a system call or exception by restoring all
+ * registers from the stack.
+ */
+ .macro RESTORE_ALL
+ REG_L a0, PT_SSTATUS(sp)
+ REG_L a2, PT_SEPC(sp)
+ csrw sstatus, a0
+ csrw sepc, a2
+
+ REG_L x1, PT_RA(sp)
+ REG_L x3, PT_GP(sp)
+ REG_L x4, PT_TP(sp)
+ REG_L x5, PT_T0(sp)
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+
+ REG_L x2, PT_SP(sp)
+ .endm
+
+ENTRY(handle_exception)
+ SAVE_ALL
+
+ /*
+ * Set sscratch register to 0, so that if a recursive exception
+ * occurs, the exception vector knows it came from the kernel
+ */
+ csrw sscratch, x0
+
+ /* Load the global pointer */
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+
+ la ra, ret_from_exception
+ /*
+ * MSB of cause differentiates between
+ * interrupts and exceptions
+ */
+ bge s4, zero, 1f
+
+ /* Handle interrupts */
+ slli a0, s4, 1
+ srli a0, a0, 1
+ move a1, sp /* pt_regs */
+ tail do_IRQ
+1:
+ /* Handle syscalls */
+ li t0, EXC_SYSCALL
+ beq s4, t0, handle_syscall
+
+ /* Handle other exceptions */
+ slli t0, s4, RISCV_LGPTR
+ la t1, excp_vect_table
+ la t2, excp_vect_table_end
+ move a0, sp /* pt_regs */
+ add t0, t1, t0
+ /* Check if exception code lies within bounds */
+ bgeu t0, t2, 1f
+ REG_L t0, 0(t0)
+ jr t0
+1:
+ tail do_trap_unknown
+
+handle_syscall:
+ /* save the initial A0 value (needed in signal handlers) */
+ REG_S a0, PT_ORIG_A0(sp)
+ /*
+ * Advance SEPC to avoid executing the original
+ * scall instruction on sret
+ */
+ addi s2, s2, 0x4
+ REG_S s2, PT_SEPC(sp)
+ /* System calls run with interrupts enabled */
+ csrs sstatus, SR_IE
+ /* Trace syscalls, but only if requested by the user. */
+ REG_L t0, TASK_TI_FLAGS(tp)
+ andi t0, t0, _TIF_SYSCALL_TRACE
+ bnez t0, handle_syscall_trace_enter
+check_syscall_nr:
+ /* Check to make sure we don't jump to a bogus syscall number. */
+ li t0, __NR_syscalls
+ la s0, sys_ni_syscall
+ /* Syscall number held in a7 */
+ bgeu a7, t0, 1f
+ la s0, sys_call_table
+ slli t0, a7, RISCV_LGPTR
+ add s0, s0, t0
+ REG_L s0, 0(s0)
+1:
+ jalr s0
+
+ret_from_syscall:
+ /* Set user a0 to kernel a0 */
+ REG_S a0, PT_A0(sp)
+ /* Trace syscalls, but only if requested by the user. */
+ REG_L t0, TASK_TI_FLAGS(tp)
+ andi t0, t0, _TIF_SYSCALL_TRACE
+ bnez t0, handle_syscall_trace_exit
+
+ret_from_exception:
+ REG_L s0, PT_SSTATUS(sp)
+ csrc sstatus, SR_IE
+ andi s0, s0, SR_PS
+ bnez s0, restore_all
+
+resume_userspace:
+ /* Interrupts must be disabled here so flags are checked atomically */
+ REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+ andi s1, s0, _TIF_WORK_MASK
+ bnez s1, work_pending
+
+ /* Save unwound kernel stack pointer in thread_info */
+ addi s0, sp, PT_SIZE_ON_STACK
+ REG_S s0, TASK_TI_KERNEL_SP(tp)
+
+ /*
+ * Save TP into sscratch, so we can find the kernel data structures
+ * again.
+ */
+ csrw sscratch, tp
+
+restore_all:
+ RESTORE_ALL
+ sret
+
+work_pending:
+ /* Enter slow path for supplementary processing */
+ la ra, ret_from_exception
+ andi s1, s0, _TIF_NEED_RESCHED
+ bnez s1, work_resched
+work_notifysig:
+ /* Handle pending signals and notify-resume requests */
+ csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
+ move a0, sp /* pt_regs */
+ move a1, s0 /* current_thread_info->flags */
+ tail do_notify_resume
+work_resched:
+ tail schedule
+
+/* Slow paths for ptrace. */
+handle_syscall_trace_enter:
+ move a0, sp
+ call do_syscall_trace_enter
+ REG_L a0, PT_A0(sp)
+ REG_L a1, PT_A1(sp)
+ REG_L a2, PT_A2(sp)
+ REG_L a3, PT_A3(sp)
+ REG_L a4, PT_A4(sp)
+ REG_L a5, PT_A5(sp)
+ REG_L a6, PT_A6(sp)
+ REG_L a7, PT_A7(sp)
+ j check_syscall_nr
+handle_syscall_trace_exit:
+ move a0, sp
+ call do_syscall_trace_exit
+ j ret_from_exception
+
+END(handle_exception)
+
+ENTRY(ret_from_fork)
+ la ra, ret_from_exception
+ tail schedule_tail
+ENDPROC(ret_from_fork)
+
+ENTRY(ret_from_kernel_thread)
+ call schedule_tail
+ /* Call fn(arg) */
+ la ra, ret_from_exception
+ move a0, s1
+ jr s0
+ENDPROC(ret_from_kernel_thread)
+
+
+/*
+ * Integer register context switch
+ * The callee-saved registers must be saved and restored.
+ *
+ * a0: previous task_struct (must be preserved across the switch)
+ * a1: next task_struct
+ *
+ * The value of a0 and a1 must be preserved by this function, as that's how
+ * arguments are passed to schedule_tail.
+ */
+ENTRY(__switch_to)
+ /* Save context into prev->thread */
+ li a4, TASK_THREAD_RA
+ add a3, a0, a4
+ add a4, a1, a4
+ REG_S ra, TASK_THREAD_RA_RA(a3)
+ REG_S sp, TASK_THREAD_SP_RA(a3)
+ REG_S s0, TASK_THREAD_S0_RA(a3)
+ REG_S s1, TASK_THREAD_S1_RA(a3)
+ REG_S s2, TASK_THREAD_S2_RA(a3)
+ REG_S s3, TASK_THREAD_S3_RA(a3)
+ REG_S s4, TASK_THREAD_S4_RA(a3)
+ REG_S s5, TASK_THREAD_S5_RA(a3)
+ REG_S s6, TASK_THREAD_S6_RA(a3)
+ REG_S s7, TASK_THREAD_S7_RA(a3)
+ REG_S s8, TASK_THREAD_S8_RA(a3)
+ REG_S s9, TASK_THREAD_S9_RA(a3)
+ REG_S s10, TASK_THREAD_S10_RA(a3)
+ REG_S s11, TASK_THREAD_S11_RA(a3)
+ /* Restore context from next->thread */
+ REG_L ra, TASK_THREAD_RA_RA(a4)
+ REG_L sp, TASK_THREAD_SP_RA(a4)
+ REG_L s0, TASK_THREAD_S0_RA(a4)
+ REG_L s1, TASK_THREAD_S1_RA(a4)
+ REG_L s2, TASK_THREAD_S2_RA(a4)
+ REG_L s3, TASK_THREAD_S3_RA(a4)
+ REG_L s4, TASK_THREAD_S4_RA(a4)
+ REG_L s5, TASK_THREAD_S5_RA(a4)
+ REG_L s6, TASK_THREAD_S6_RA(a4)
+ REG_L s7, TASK_THREAD_S7_RA(a4)
+ REG_L s8, TASK_THREAD_S8_RA(a4)
+ REG_L s9, TASK_THREAD_S9_RA(a4)
+ REG_L s10, TASK_THREAD_S10_RA(a4)
+ REG_L s11, TASK_THREAD_S11_RA(a4)
+ /* Swap the CPU entry around. */
+ lw a3, TASK_TI_CPU(a0)
+ lw a4, TASK_TI_CPU(a1)
+ sw a3, TASK_TI_CPU(a1)
+ sw a4, TASK_TI_CPU(a0)
+#if TASK_TI != 0
+#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
+ addi tp, a1, TASK_TI
+#else
+ move tp, a1
+#endif
+ ret
+ENDPROC(__switch_to)
+
+ENTRY(__fstate_save)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ csrs sstatus, t1
+ frcsr t0
+ fsd f0, TASK_THREAD_F0_F0(a0)
+ fsd f1, TASK_THREAD_F1_F0(a0)
+ fsd f2, TASK_THREAD_F2_F0(a0)
+ fsd f3, TASK_THREAD_F3_F0(a0)
+ fsd f4, TASK_THREAD_F4_F0(a0)
+ fsd f5, TASK_THREAD_F5_F0(a0)
+ fsd f6, TASK_THREAD_F6_F0(a0)
+ fsd f7, TASK_THREAD_F7_F0(a0)
+ fsd f8, TASK_THREAD_F8_F0(a0)
+ fsd f9, TASK_THREAD_F9_F0(a0)
+ fsd f10, TASK_THREAD_F10_F0(a0)
+ fsd f11, TASK_THREAD_F11_F0(a0)
+ fsd f12, TASK_THREAD_F12_F0(a0)
+ fsd f13, TASK_THREAD_F13_F0(a0)
+ fsd f14, TASK_THREAD_F14_F0(a0)
+ fsd f15, TASK_THREAD_F15_F0(a0)
+ fsd f16, TASK_THREAD_F16_F0(a0)
+ fsd f17, TASK_THREAD_F17_F0(a0)
+ fsd f18, TASK_THREAD_F18_F0(a0)
+ fsd f19, TASK_THREAD_F19_F0(a0)
+ fsd f20, TASK_THREAD_F20_F0(a0)
+ fsd f21, TASK_THREAD_F21_F0(a0)
+ fsd f22, TASK_THREAD_F22_F0(a0)
+ fsd f23, TASK_THREAD_F23_F0(a0)
+ fsd f24, TASK_THREAD_F24_F0(a0)
+ fsd f25, TASK_THREAD_F25_F0(a0)
+ fsd f26, TASK_THREAD_F26_F0(a0)
+ fsd f27, TASK_THREAD_F27_F0(a0)
+ fsd f28, TASK_THREAD_F28_F0(a0)
+ fsd f29, TASK_THREAD_F29_F0(a0)
+ fsd f30, TASK_THREAD_F30_F0(a0)
+ fsd f31, TASK_THREAD_F31_F0(a0)
+ sw t0, TASK_THREAD_FCSR_F0(a0)
+ csrc sstatus, t1
+ ret
+ENDPROC(__fstate_save)
+
+ENTRY(__fstate_restore)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ lw t0, TASK_THREAD_FCSR_F0(a0)
+ csrs sstatus, t1
+ fld f0, TASK_THREAD_F0_F0(a0)
+ fld f1, TASK_THREAD_F1_F0(a0)
+ fld f2, TASK_THREAD_F2_F0(a0)
+ fld f3, TASK_THREAD_F3_F0(a0)
+ fld f4, TASK_THREAD_F4_F0(a0)
+ fld f5, TASK_THREAD_F5_F0(a0)
+ fld f6, TASK_THREAD_F6_F0(a0)
+ fld f7, TASK_THREAD_F7_F0(a0)
+ fld f8, TASK_THREAD_F8_F0(a0)
+ fld f9, TASK_THREAD_F9_F0(a0)
+ fld f10, TASK_THREAD_F10_F0(a0)
+ fld f11, TASK_THREAD_F11_F0(a0)
+ fld f12, TASK_THREAD_F12_F0(a0)
+ fld f13, TASK_THREAD_F13_F0(a0)
+ fld f14, TASK_THREAD_F14_F0(a0)
+ fld f15, TASK_THREAD_F15_F0(a0)
+ fld f16, TASK_THREAD_F16_F0(a0)
+ fld f17, TASK_THREAD_F17_F0(a0)
+ fld f18, TASK_THREAD_F18_F0(a0)
+ fld f19, TASK_THREAD_F19_F0(a0)
+ fld f20, TASK_THREAD_F20_F0(a0)
+ fld f21, TASK_THREAD_F21_F0(a0)
+ fld f22, TASK_THREAD_F22_F0(a0)
+ fld f23, TASK_THREAD_F23_F0(a0)
+ fld f24, TASK_THREAD_F24_F0(a0)
+ fld f25, TASK_THREAD_F25_F0(a0)
+ fld f26, TASK_THREAD_F26_F0(a0)
+ fld f27, TASK_THREAD_F27_F0(a0)
+ fld f28, TASK_THREAD_F28_F0(a0)
+ fld f29, TASK_THREAD_F29_F0(a0)
+ fld f30, TASK_THREAD_F30_F0(a0)
+ fld f31, TASK_THREAD_F31_F0(a0)
+ fscsr t0
+ csrc sstatus, t1
+ ret
+ENDPROC(__fstate_restore)
+
+
+ .section ".rodata"
+ /* Exception vector table */
+ENTRY(excp_vect_table)
+ RISCV_PTR do_trap_insn_misaligned
+ RISCV_PTR do_trap_insn_fault
+ RISCV_PTR do_trap_insn_illegal
+ RISCV_PTR do_trap_break
+ RISCV_PTR do_trap_load_misaligned
+ RISCV_PTR do_trap_load_fault
+ RISCV_PTR do_trap_store_misaligned
+ RISCV_PTR do_trap_store_fault
+ RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
+ RISCV_PTR do_trap_ecall_s
+ RISCV_PTR do_trap_unknown
+ RISCV_PTR do_trap_ecall_m
+ RISCV_PTR do_page_fault /* instruction page fault */
+ RISCV_PTR do_page_fault /* load page fault */
+ RISCV_PTR do_trap_unknown
+ RISCV_PTR do_page_fault /* store page fault */
+excp_vect_table_end:
+END(excp_vect_table)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
new file mode 100644
index 000000000000..76af908f87c1
--- /dev/null
+++ b/arch/riscv/kernel/head.S
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/csr.h>
+
+__INIT
+ENTRY(_start)
+ /* Mask all interrupts */
+ csrw sie, zero
+
+ /* Load the global pointer */
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+
+ /*
+ * Disable FPU to detect illegal usage of
+ * floating point in kernel space
+ */
+ li t0, SR_FS
+ csrc sstatus, t0
+
+ /* Pick one hart to run the main boot sequence */
+ la a3, hart_lottery
+ li a2, 1
+ amoadd.w a3, a2, (a3)
+ bnez a3, .Lsecondary_start
+
+ /* Save hart ID and DTB physical address */
+ mv s0, a0
+ mv s1, a1
+
+ /* Initialize page tables and relocate to virtual addresses */
+ la sp, init_thread_union + THREAD_SIZE
+ call setup_vm
+ call relocate
+
+ /* Restore C environment */
+ la tp, init_task
+ sw s0, TASK_TI_CPU(tp)
+
+ la sp, init_thread_union
+ li a0, ASM_THREAD_SIZE
+ add sp, sp, a0
+
+ /* Start the kernel */
+ mv a0, s0
+ mv a1, s1
+ call sbi_save
+ tail start_kernel
+
+relocate:
+ /* Relocate return address */
+ li a1, PAGE_OFFSET
+ la a0, _start
+ sub a1, a1, a0
+ add ra, ra, a1
+
+ /* Point stvec to virtual address of intruction after sptbr write */
+ la a0, 1f
+ add a0, a0, a1
+ csrw stvec, a0
+
+ /* Compute sptbr for kernel page tables, but don't load it yet */
+ la a2, swapper_pg_dir
+ srl a2, a2, PAGE_SHIFT
+ li a1, SPTBR_MODE
+ or a2, a2, a1
+
+ /*
+ * Load trampoline page directory, which will cause us to trap to
+ * stvec if VA != PA, or simply fall through if VA == PA
+ */
+ la a0, trampoline_pg_dir
+ srl a0, a0, PAGE_SHIFT
+ or a0, a0, a1
+ sfence.vma
+ csrw sptbr, a0
+1:
+ /* Set trap vector to spin forever to help debug */
+ la a0, .Lsecondary_park
+ csrw stvec, a0
+
+ /* Reload the global pointer */
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+
+ /* Switch to kernel page tables */
+ csrw sptbr, a2
+
+ ret
+
+.Lsecondary_start:
+#ifdef CONFIG_SMP
+ li a1, CONFIG_NR_CPUS
+ bgeu a0, a1, .Lsecondary_park
+
+ /* Set trap vector to spin forever to help debug */
+ la a3, .Lsecondary_park
+ csrw stvec, a3
+
+ slli a3, a0, LGREG
+ la a1, __cpu_up_stack_pointer
+ la a2, __cpu_up_task_pointer
+ add a1, a3, a1
+ add a2, a3, a2
+
+ /*
+ * This hart didn't win the lottery, so we wait for the winning hart to
+ * get far enough along the boot process that it should continue.
+ */
+.Lwait_for_cpu_up:
+ /* FIXME: We should WFI to save some energy here. */
+ REG_L sp, (a1)
+ REG_L tp, (a2)
+ beqz sp, .Lwait_for_cpu_up
+ beqz tp, .Lwait_for_cpu_up
+ fence
+
+ /* Enable virtual memory and relocate to virtual address */
+ call relocate
+
+ tail smp_callin
+#endif
+
+.Lsecondary_park:
+ /* We lack SMP support or have too many harts, so park this hart */
+ wfi
+ j .Lsecondary_park
+END(_start)
+
+__PAGE_ALIGNED_BSS
+ /* Empty zero page */
+ .balign PAGE_SIZE
+ENTRY(empty_zero_page)
+ .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00
+END(empty_zero_page)
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
new file mode 100644
index 000000000000..328718e8026e
--- /dev/null
+++ b/arch/riscv/kernel/irq.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+
+#ifdef CONFIG_RISCV_INTC
+#include <linux/irqchip/irq-riscv-intc.h>
+#endif
+
+void __init init_IRQ(void)
+{
+ irqchip_init();
+}
+
+asmlinkage void __irq_entry do_IRQ(unsigned int cause, struct pt_regs *regs)
+{
+#ifdef CONFIG_RISCV_INTC
+ /*
+ * FIXME: We don't want a direct call to riscv_intc_irq here. The plan
+ * is to put an IRQ domain here and let the interrupt controller
+ * register with that, but I poked around the arm64 code a bit and
+ * there might be a better way to do it (ie, something fully generic).
+ */
+ riscv_intc_irq(cause, regs);
+#endif
+}
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
new file mode 100644
index 000000000000..e0f05034fc21
--- /dev/null
+++ b/arch/riscv/kernel/module.c
@@ -0,0 +1,217 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2017 Zihao Yu
+ */
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/moduleloader.h>
+
+static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *(u64 *)location = v;
+ return 0;
+}
+
+static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ s64 offset = (void *)v - (void *)location;
+ u32 imm12 = (offset & 0x1000) << (31 - 12);
+ u32 imm11 = (offset & 0x800) >> (11 - 7);
+ u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
+ u32 imm4_1 = (offset & 0x1e) << (11 - 4);
+
+ *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1;
+ return 0;
+}
+
+static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ s64 offset = (void *)v - (void *)location;
+ u32 imm20 = (offset & 0x100000) << (31 - 20);
+ u32 imm19_12 = (offset & 0xff000);
+ u32 imm11 = (offset & 0x800) << (20 - 11);
+ u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
+
+ *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1;
+ return 0;
+}
+
+static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ s64 offset = (void *)v - (void *)location;
+ s32 hi20;
+
+ if (offset != (s32)offset) {
+ pr_err(
+ "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, v, location);
+ return -EINVAL;
+ }
+
+ hi20 = (offset + 0x800) & 0xfffff000;
+ *location = (*location & 0xfff) | hi20;
+ return 0;
+}
+
+static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ /*
+ * v is the lo12 value to fill. It is calculated before calling this
+ * handler.
+ */
+ *location = (*location & 0xfffff) | ((v & 0xfff) << 20);
+ return 0;
+}
+
+static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ /*
+ * v is the lo12 value to fill. It is calculated before calling this
+ * handler.
+ */
+ u32 imm11_5 = (v & 0xfe0) << (31 - 11);
+ u32 imm4_0 = (v & 0x1f) << (11 - 4);
+
+ *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
+ return 0;
+}
+
+static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ s64 offset = (void *)v - (void *)location;
+ s32 fill_v = offset;
+ u32 hi20, lo12;
+
+ if (offset != fill_v) {
+ pr_err(
+ "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, v, location);
+ return -EINVAL;
+ }
+
+ hi20 = (offset + 0x800) & 0xfffff000;
+ lo12 = (offset - hi20) & 0xfff;
+ *location = (*location & 0xfff) | hi20;
+ *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
+ return 0;
+}
+
+static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ return 0;
+}
+
+static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
+ Elf_Addr v) = {
+ [R_RISCV_64] = apply_r_riscv_64_rela,
+ [R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
+ [R_RISCV_JAL] = apply_r_riscv_jal_rela,
+ [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
+ [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
+ [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela,
+ [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela,
+ [R_RISCV_RELAX] = apply_r_riscv_relax_rela,
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+ Elf_Sym *sym;
+ u32 *location;
+ unsigned int i, type;
+ Elf_Addr v;
+ int res;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_RISCV_R_SYM(rel[i].r_info);
+ if (IS_ERR_VALUE(sym->st_value)) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ pr_warning("%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ type = ELF_RISCV_R_TYPE(rel[i].r_info);
+
+ if (type < ARRAY_SIZE(reloc_handlers_rela))
+ handler = reloc_handlers_rela[type];
+ else
+ handler = NULL;
+
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n",
+ me->name, type);
+ return -EINVAL;
+ }
+
+ v = sym->st_value + rel[i].r_addend;
+
+ if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
+ unsigned int j;
+
+ for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
+ u64 hi20_loc =
+ sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[j].r_offset;
+ /* Find the corresponding HI20 PC-relative relocation entry */
+ if (hi20_loc == sym->st_value) {
+ Elf_Sym *hi20_sym =
+ (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_RISCV_R_SYM(rel[j].r_info);
+ u64 hi20_sym_val =
+ hi20_sym->st_value
+ + rel[j].r_addend;
+ /* Calculate lo12 */
+ s64 offset = hi20_sym_val - hi20_loc;
+ s32 hi20 = (offset + 0x800) & 0xfffff000;
+ s32 lo12 = offset - hi20;
+ v = lo12;
+ break;
+ }
+ }
+ if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
+ pr_err(
+ "%s: Can not find HI20 PC-relative relocation information\n",
+ me->name);
+ return -EINVAL;
+ }
+ }
+
+ res = handler(me, location, v);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
new file mode 100644
index 000000000000..0d90dcc1fbd3
--- /dev/null
+++ b/arch/riscv/kernel/process.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tick.h>
+#include <linux/ptrace.h>
+
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/csr.h>
+#include <asm/string.h>
+#include <asm/switch_to.h>
+
+extern asmlinkage void ret_from_fork(void);
+extern asmlinkage void ret_from_kernel_thread(void);
+
+void arch_cpu_idle(void)
+{
+ wait_for_interrupt();
+ local_irq_enable();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ show_regs_print_info(KERN_DEFAULT);
+
+ pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
+ regs->sepc, regs->ra, regs->sp);
+ pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
+ regs->gp, regs->tp, regs->t0);
+ pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
+ regs->t1, regs->t2, regs->s0);
+ pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
+ regs->s1, regs->a0, regs->a1);
+ pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
+ regs->a2, regs->a3, regs->a4);
+ pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
+ regs->a5, regs->a6, regs->a7);
+ pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
+ regs->s2, regs->s3, regs->s4);
+ pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
+ regs->s5, regs->s6, regs->s7);
+ pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
+ regs->s8, regs->s9, regs->s10);
+ pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
+ regs->s11, regs->t3, regs->t4);
+ pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
+ regs->t5, regs->t6);
+
+ pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
+ regs->sstatus, regs->sbadaddr, regs->scause);
+}
+
+void start_thread(struct pt_regs *regs, unsigned long pc,
+ unsigned long sp)
+{
+ regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
+ regs->sepc = pc;
+ regs->sp = sp;
+ set_fs(USER_DS);
+}
+
+void flush_thread(void)
+{
+ /*
+ * Reset FPU context
+ * frm: round to nearest, ties to even (IEEE default)
+ * fflags: accrued exceptions cleared
+ */
+ memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ fstate_save(src, task_pt_regs(src));
+ *dst = *src;
+ return 0;
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+
+ /* p->thread holds context to be restored by __switch_to() */
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ /* Kernel thread */
+ const register unsigned long gp __asm__ ("gp");
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->gp = gp;
+ childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
+
+ p->thread.ra = (unsigned long)ret_from_kernel_thread;
+ p->thread.s[0] = usp; /* fn */
+ p->thread.s[1] = arg;
+ } else {
+ *childregs = *(current_pt_regs());
+ if (usp) /* User fork */
+ childregs->sp = usp;
+ if (clone_flags & CLONE_SETTLS)
+ childregs->tp = childregs->a5;
+ childregs->a0 = 0; /* Return value of fork() */
+ p->thread.ra = (unsigned long)ret_from_fork;
+ }
+ p->thread.sp = (unsigned long)childregs; /* kernel sp */
+ return 0;
+}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
new file mode 100644
index 000000000000..ba3e80712797
--- /dev/null
+++ b/arch/riscv/kernel/ptrace.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2015 Regents of the University of California
+ * Copyright 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copied from arch/tile/kernel/ptrace.c
+ */
+
+#include <asm/ptrace.h>
+#include <asm/syscall.h>
+#include <asm/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tracehook.h>
+#include <trace/events/syscalls.h>
+
+enum riscv_regset {
+ REGSET_X,
+};
+
+static int riscv_gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(target);
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
+}
+
+static int riscv_gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(target);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+ return ret;
+}
+
+
+static const struct user_regset riscv_user_regset[] = {
+ [REGSET_X] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(elf_greg_t),
+ .align = sizeof(elf_greg_t),
+ .get = &riscv_gpr_get,
+ .set = &riscv_gpr_set,
+ },
+};
+
+static const struct user_regset_view riscv_user_native_view = {
+ .name = "riscv",
+ .e_machine = EM_RISCV,
+ .regsets = riscv_user_regset,
+ .n = ARRAY_SIZE(riscv_user_regset),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &riscv_user_native_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ long ret = -EIO;
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Allows PTRACE_SYSCALL to work. These are called from entry.S in
+ * {handle,ret_from}_syscall.
+ */
+void do_syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (tracehook_report_syscall_entry(regs))
+ syscall_set_nr(current, regs, -1);
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, syscall_get_nr(current, regs));
+#endif
+}
+
+void do_syscall_trace_exit(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs->regs[0]);
+#endif
+}
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
new file mode 100644
index 000000000000..2a53d26ffdd6
--- /dev/null
+++ b/arch/riscv/kernel/reset.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/reboot.h>
+#include <linux/export.h>
+#include <asm/sbi.h>
+
+void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_restart(char *cmd)
+{
+ do_kernel_restart(cmd);
+ while (1);
+}
+
+void machine_halt(void)
+{
+ machine_power_off();
+}
+
+void machine_power_off(void)
+{
+ sbi_shutdown();
+ while (1);
+}
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
new file mode 100644
index 000000000000..23cc81ec9e94
--- /dev/null
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2017 Zihao Yu
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+/*
+ * Assembly functions that may be used (directly or indirectly) by modules
+ */
+EXPORT_SYMBOL(__copy_user);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
new file mode 100644
index 000000000000..de7db114c315
--- /dev/null
+++ b/arch/riscv/kernel/setup.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/sched.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task.h>
+
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/smp.h>
+#include <asm/sbi.h>
+#include <asm/tlbflush.h>
+#include <asm/thread_info.h>
+
+#ifdef CONFIG_HVC_RISCV_SBI
+#include <asm/hvc_riscv_sbi.h>
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+struct screen_info screen_info = {
+ .orig_video_lines = 30,
+ .orig_video_cols = 80,
+ .orig_video_mode = 0,
+ .orig_video_ega_bx = 0,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 8
+};
+#endif
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+#endif /* CONFIG_CMDLINE_BOOL */
+
+unsigned long va_pa_offset;
+unsigned long pfn_base;
+
+/* The lucky hart to first increment this variable will boot the other cores */
+atomic_t hart_lottery;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init setup_initrd(void)
+{
+ extern char __initramfs_start[];
+ extern unsigned long __initramfs_size;
+ unsigned long size;
+
+ if (__initramfs_size > 0) {
+ initrd_start = (unsigned long)(&__initramfs_start);
+ initrd_end = initrd_start + __initramfs_size;
+ }
+
+ if (initrd_start >= initrd_end) {
+ printk(KERN_INFO "initrd not found or empty");
+ goto disable;
+ }
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ printk(KERN_ERR "initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ size = initrd_end - initrd_start;
+ memblock_reserve(__pa(initrd_start), size);
+ initrd_below_start_ok = 1;
+
+ printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *)(initrd_start), size);
+ return;
+disable:
+ pr_cont(" - disabling initrd\n");
+ initrd_start = 0;
+ initrd_end = 0;
+}
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define NUM_SWAPPER_PMDS ((uintptr_t)-PAGE_OFFSET >> PGDIR_SHIFT)
+pmd_t swapper_pmd[PTRS_PER_PMD*((-PAGE_OFFSET)/PGDIR_SIZE)] __page_aligned_bss;
+pmd_t trampoline_pmd[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+#endif
+
+asmlinkage void __init setup_vm(void)
+{
+ extern char _start;
+ uintptr_t i;
+ uintptr_t pa = (uintptr_t) &_start;
+ pgprot_t prot = __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_EXEC);
+
+ va_pa_offset = PAGE_OFFSET - pa;
+ pfn_base = PFN_DOWN(pa);
+
+ /* Sanity check alignment and size */
+ BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
+ BUG_ON((pa % (PAGE_SIZE * PTRS_PER_PTE)) != 0);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] =
+ pfn_pgd(PFN_DOWN((uintptr_t)trampoline_pmd),
+ __pgprot(_PAGE_TABLE));
+ trampoline_pmd[0] = pfn_pmd(PFN_DOWN(pa), prot);
+
+ for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) {
+ size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i;
+ swapper_pg_dir[o] =
+ pfn_pgd(PFN_DOWN((uintptr_t)swapper_pmd) + i,
+ __pgprot(_PAGE_TABLE));
+ }
+ for (i = 0; i < ARRAY_SIZE(swapper_pmd); i++)
+ swapper_pmd[i] = pfn_pmd(PFN_DOWN(pa + i * PMD_SIZE), prot);
+#else
+ trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] =
+ pfn_pgd(PFN_DOWN(pa), prot);
+
+ for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) {
+ size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i;
+ swapper_pg_dir[o] =
+ pfn_pgd(PFN_DOWN(pa + i * PGDIR_SIZE), prot);
+ }
+#endif
+}
+
+void __init sbi_save(unsigned int hartid, void *dtb)
+{
+ early_init_dt_scan(__va(dtb));
+}
+
+/*
+ * Allow the user to manually add a memory region (in case DTS is broken);
+ * "mem_end=nn[KkMmGg]"
+ */
+static int __init mem_end_override(char *p)
+{
+ resource_size_t base, end;
+
+ if (!p)
+ return -EINVAL;
+ base = (uintptr_t) __pa(PAGE_OFFSET);
+ end = memparse(p, &p) & PMD_MASK;
+ if (end == 0)
+ return -EINVAL;
+ memblock_add(base, end - base);
+ return 0;
+}
+early_param("mem_end", mem_end_override);
+
+static void __init setup_bootmem(void)
+{
+ struct memblock_region *reg;
+ phys_addr_t mem_size = 0;
+
+ /* Find the memory region containing the kernel */
+ for_each_memblock(memory, reg) {
+ phys_addr_t vmlinux_end = __pa(_end);
+ phys_addr_t end = reg->base + reg->size;
+
+ if (reg->base <= vmlinux_end && vmlinux_end <= end) {
+ /*
+ * Reserve from the start of the region to the end of
+ * the kernel
+ */
+ memblock_reserve(reg->base, vmlinux_end - reg->base);
+ mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
+ }
+ }
+ BUG_ON(mem_size == 0);
+
+ set_max_mapnr(PFN_DOWN(mem_size));
+ max_low_pfn = pfn_base + PFN_DOWN(mem_size);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+ memblock_allow_resize();
+ memblock_dump_all();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+#if defined(CONFIG_HVC_RISCV_SBI)
+ if (likely(early_console == NULL)) {
+ early_console = &riscv_sbi_early_console_dev;
+ register_console(early_console);
+ }
+#endif
+
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0] != '\0') {
+ /* Append bootloader command line to built-in */
+ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+#endif /* CONFIG_CMDLINE_OVERRIDE */
+#endif /* CONFIG_CMDLINE_BOOL */
+ *cmdline_p = boot_command_line;
+
+ parse_early_param();
+
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+
+ setup_bootmem();
+ paging_init();
+ unflatten_device_tree();
+
+#ifdef CONFIG_SMP
+ setup_smp();
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
+ riscv_fill_hwcap();
+}
+
+static int __init riscv_device_init(void)
+{
+ return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+subsys_initcall_sync(riscv_device_init);
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
new file mode 100644
index 000000000000..718d0c984ef0
--- /dev/null
+++ b/arch/riscv/kernel/signal.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ */
+
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <linux/linkage.h>
+
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+#include <asm/switch_to.h>
+#include <asm/csr.h>
+
+#define DEBUG_SIG 0
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+static long restore_d_state(struct pt_regs *regs,
+ struct __riscv_d_ext_state __user *state)
+{
+ long err;
+ err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
+ if (likely(!err))
+ fstate_restore(current, regs);
+ return err;
+}
+
+static long save_d_state(struct pt_regs *regs,
+ struct __riscv_d_ext_state __user *state)
+{
+ fstate_save(current, regs);
+ return __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+}
+
+static long restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc)
+{
+ long err;
+ size_t i;
+ /* sc_regs is structured the same as the start of pt_regs */
+ err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
+ if (unlikely(err))
+ return err;
+ /* Restore the floating-point state. */
+ err = restore_d_state(regs, &sc->sc_fpregs.d);
+ if (unlikely(err))
+ return err;
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++) {
+ u32 value;
+ err = __get_user(value, &sc->sc_fpregs.q.reserved[i]);
+ if (unlikely(err))
+ break;
+ if (value != 0)
+ return -EINVAL;
+ }
+ return err;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe __user *frame;
+ struct task_struct *task;
+ sigset_t set;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ frame = (struct rt_sigframe __user *)regs->sp;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return regs->a0;
+
+badframe:
+ task = current;
+ if (show_unhandled_signals) {
+ pr_info_ratelimited(
+ "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
+ task->comm, task_pid_nr(task), __func__,
+ frame, (void *)regs->sepc, (void *)regs->sp);
+ }
+ force_sig(SIGSEGV, task);
+ return 0;
+}
+
+static long setup_sigcontext(struct rt_sigframe __user *frame,
+ struct pt_regs *regs)
+{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ long err;
+ size_t i;
+ /* sc_regs is structured the same as the start of pt_regs */
+ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
+ /* Save the floating-point state. */
+ err |= save_d_state(regs, &sc->sc_fpregs.d);
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++)
+ err |= __put_user(0, &sc->sc_fpregs.q.reserved[i]);
+ return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs, size_t framesize)
+{
+ unsigned long sp;
+ /* Default to using normal stack */
+ sp = regs->sp;
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+ return (void __user __force *)(-1UL);
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ sp = sigsp(sp, ksig) - framesize;
+
+ /* Align the stack frame. */
+ sp &= ~0xfUL;
+
+ return (void __user *)sp;
+}
+
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ long err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ return -EFAULT;
+
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ err |= setup_sigcontext(frame, regs);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ return -EFAULT;
+
+ /* Set up to return from userspace. */
+ regs->ra = (unsigned long)VDSO_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
+
+ /*
+ * Set up registers for signal handler.
+ * Registers that we don't modify keep the value they had from
+ * user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
+ */
+ regs->sepc = (unsigned long)ksig->ka.sa.sa_handler;
+ regs->sp = (unsigned long)frame;
+ regs->a0 = ksig->sig; /* a0: signal number */
+ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
+ regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */
+
+#if DEBUG_SIG
+ pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
+ current->comm, task_pid_nr(current), ksig->sig,
+ (void *)regs->sepc, (void *)regs->ra, frame);
+#endif
+
+ return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Are we from a system call? */
+ if (regs->scause == EXC_SYSCALL) {
+ /* If so, check system call restarting.. */
+ switch (regs->a0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->a0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->a0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->a0 = regs->orig_a0;
+ regs->sepc -= 0x4;
+ break;
+ }
+ }
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ /* Actually deliver the signal */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->scause == EXC_SYSCALL) {
+ /* Restart the system call - no handlers present */
+ switch (regs->a0) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->a0 = regs->orig_a0;
+ regs->sepc -= 0x4;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->a0 = regs->orig_a0;
+ regs->a7 = __NR_restart_syscall;
+ regs->sepc -= 0x4;
+ break;
+ }
+ }
+
+ /*
+ * If there is no signal to deliver, we just put the saved
+ * sigmask back.
+ */
+ restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by the _TIF_WORK_MASK flags
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_info_flags)
+{
+ /* Handle pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
new file mode 100644
index 000000000000..b4a71ec5906f
--- /dev/null
+++ b/arch/riscv/kernel/smp.c
@@ -0,0 +1,110 @@
+/*
+ * SMP initialisation and IPI support
+ * Based on arch/arm64/kernel/smp.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+
+#include <asm/sbi.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/* A collection of single bit ipi messages. */
+static struct {
+ unsigned long bits ____cacheline_aligned;
+} ipi_data[NR_CPUS] __cacheline_aligned;
+
+enum ipi_message_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_MAX
+};
+
+irqreturn_t handle_ipi(void)
+{
+ unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
+
+ /* Clear pending IPI */
+ csr_clear(sip, SIE_SSIE);
+
+ while (true) {
+ unsigned long ops;
+
+ /* Order bit clearing and data access. */
+ mb();
+
+ ops = xchg(pending_ipis, 0);
+ if (ops == 0)
+ return IRQ_HANDLED;
+
+ if (ops & (1 << IPI_RESCHEDULE))
+ scheduler_ipi();
+
+ if (ops & (1 << IPI_CALL_FUNC))
+ generic_smp_call_function_interrupt();
+
+ BUG_ON((ops >> IPI_MAX) != 0);
+
+ /* Order data access and bit testing. */
+ mb();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
+{
+ int i;
+
+ mb();
+ for_each_cpu(i, to_whom)
+ set_bit(operation, &ipi_data[i].bits);
+
+ mb();
+ sbi_send_ipi(cpumask_bits(to_whom));
+}
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask)
+{
+ send_ipi_message(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
+}
+
+static void ipi_stop(void *unused)
+{
+ while (1)
+ wait_for_interrupt();
+}
+
+void smp_send_stop(void)
+{
+ on_each_cpu(ipi_stop, NULL, 1);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
+}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
new file mode 100644
index 000000000000..f741458c5a3f
--- /dev/null
+++ b/arch/riscv/kernel/smpboot.c
@@ -0,0 +1,114 @@
+/*
+ * SMP initialisation and IPI support
+ * Based on arch/arm64/kernel/smp.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/sched/task_stack.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm/sbi.h>
+
+void *__cpu_up_stack_pointer[NR_CPUS];
+void *__cpu_up_task_pointer[NR_CPUS];
+
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+}
+
+void __init setup_smp(void)
+{
+ struct device_node *dn = NULL;
+ int hart, im_okay_therefore_i_am = 0;
+
+ while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ hart = riscv_of_processor_hart(dn);
+ if (hart >= 0) {
+ set_cpu_possible(hart, true);
+ set_cpu_present(hart, true);
+ if (hart == smp_processor_id()) {
+ BUG_ON(im_okay_therefore_i_am);
+ im_okay_therefore_i_am = 1;
+ }
+ }
+ }
+
+ BUG_ON(!im_okay_therefore_i_am);
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ tidle->thread_info.cpu = cpu;
+
+ /*
+ * On RISC-V systems, all harts boot on their own accord. Our _start
+ * selects the first hart to boot the kernel and causes the remainder
+ * of the harts to spin in a loop waiting for their stack pointer to be
+ * setup by that main hart. Writing __cpu_up_stack_pointer signals to
+ * the spinning harts that they can continue the boot process.
+ */
+ smp_mb();
+ __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE;
+ __cpu_up_task_pointer[cpu] = tidle;
+
+ while (!cpu_online(cpu))
+ cpu_relax();
+
+ return 0;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * C entry point for a secondary processor.
+ */
+asmlinkage void __init smp_callin(void)
+{
+ struct mm_struct *mm = &init_mm;
+
+ /* All kernel threads share the same mm context. */
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+
+ trap_init();
+ init_clockevent();
+ notify_cpu_starting(smp_processor_id());
+ set_cpu_online(smp_processor_id(), 1);
+ local_flush_tlb_all();
+ local_irq_enable();
+ preempt_disable();
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
new file mode 100644
index 000000000000..559aae781154
--- /dev/null
+++ b/arch/riscv/kernel/stacktrace.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2008 ARM Limited
+ * Copyright (C) 2014 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+static void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+{
+ unsigned long fp, sp, pc;
+
+ if (regs) {
+ fp = GET_FP(regs);
+ sp = GET_USP(regs);
+ pc = GET_IP(regs);
+ } else if (task == NULL || task == current) {
+ const register unsigned long current_sp __asm__ ("sp");
+ fp = (unsigned long)__builtin_frame_address(0);
+ sp = current_sp;
+ pc = (unsigned long)walk_stackframe;
+ } else {
+ /* task blocked in __switch_to */
+ fp = task->thread.s[0];
+ sp = task->thread.sp;
+ pc = task->thread.ra;
+ }
+
+ for (;;) {
+ unsigned long low, high;
+ struct stackframe *frame;
+
+ if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+ break;
+
+ /* Validate frame pointer */
+ low = sp + sizeof(struct stackframe);
+ high = ALIGN(sp, THREAD_SIZE);
+ if (unlikely(fp < low || fp > high || fp & 0x7))
+ break;
+ /* Unwind stack frame */
+ frame = (struct stackframe *)fp - 1;
+ sp = fp;
+ fp = frame->fp;
+ pc = frame->ra - 0x4;
+ }
+}
+
+#else /* !CONFIG_FRAME_POINTER */
+
+static void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+{
+ unsigned long sp, pc;
+ unsigned long *ksp;
+
+ if (regs) {
+ sp = GET_USP(regs);
+ pc = GET_IP(regs);
+ } else if (task == NULL || task == current) {
+ const register unsigned long current_sp __asm__ ("sp");
+ sp = current_sp;
+ pc = (unsigned long)walk_stackframe;
+ } else {
+ /* task blocked in __switch_to */
+ sp = task->thread.sp;
+ pc = task->thread.ra;
+ }
+
+ if (unlikely(sp & 0x7))
+ return;
+
+ ksp = (unsigned long *)sp;
+ while (!kstack_end(ksp)) {
+ if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
+ break;
+ pc = (*ksp++) - 0x4;
+ }
+}
+
+#endif /* CONFIG_FRAME_POINTER */
+
+
+static bool print_trace_address(unsigned long pc, void *arg)
+{
+ print_ip_sym(pc);
+ return false;
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ pr_cont("Call Trace:\n");
+ walk_stackframe(task, NULL, print_trace_address, NULL);
+}
+
+
+static bool save_wchan(unsigned long pc, void *arg)
+{
+ if (!in_sched_functions(pc)) {
+ unsigned long *p = arg;
+ *p = pc;
+ return true;
+ }
+ return false;
+}
+
+unsigned long get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+
+ if (likely(task && task != current && task->state != TASK_RUNNING))
+ walk_stackframe(task, NULL, save_wchan, &pc);
+ return pc;
+}
+
+
+#ifdef CONFIG_STACKTRACE
+
+static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+{
+ struct stack_trace *trace = arg;
+
+ if (unlikely(nosched && in_sched_functions(pc)))
+ return false;
+ if (unlikely(trace->skip > 0)) {
+ trace->skip--;
+ return false;
+ }
+
+ trace->entries[trace->nr_entries++] = pc;
+ return (trace->nr_entries >= trace->max_entries);
+}
+
+static bool save_trace(unsigned long pc, void *arg)
+{
+ return __save_trace(pc, arg, false);
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ walk_stackframe(tsk, NULL, save_trace, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(NULL, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
new file mode 100644
index 000000000000..4351be7d0533
--- /dev/null
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2014 Darius Rad <darius@bluespec.com>
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/syscalls.h>
+#include <asm/cmpxchg.h>
+#include <asm/unistd.h>
+
+static long riscv_sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset,
+ unsigned long page_shift_offset)
+{
+ if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
+ return -EINVAL;
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> (PAGE_SHIFT - page_shift_offset));
+}
+
+#ifdef CONFIG_64BIT
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, off_t, offset)
+{
+ return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
+}
+#else
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, off_t, offset)
+{
+ /*
+ * Note that the shift for mmap2 is constant (12),
+ * regardless of PAGE_SIZE
+ */
+ return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
+}
+#endif /* !CONFIG_64BIT */
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
new file mode 100644
index 000000000000..4e30dc5fb593
--- /dev/null
+++ b/arch/riscv/kernel/syscall_table.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2009 Arnd Bergmann <arnd@arndb.de>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <asm-generic/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
new file mode 100644
index 000000000000..2463fcca719e
--- /dev/null
+++ b/arch/riscv/kernel/time.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_RISCV_TIMER
+#include <linux/timer_riscv.h>
+#endif
+
+#include <asm/sbi.h>
+
+unsigned long riscv_timebase;
+
+DECLARE_PER_CPU(struct clock_event_device, riscv_clock_event);
+
+void riscv_timer_interrupt(void)
+{
+#ifdef CONFIG_RISCV_TIMER
+ /*
+ * FIXME: This needs to be cleaned up along with the rest of the IRQ
+ * handling cleanup. See irq.c for more details.
+ */
+ struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
+
+ evdev->event_handler(evdev);
+#endif
+}
+
+void __init init_clockevent(void)
+{
+ timer_probe();
+ csr_set(sie, SIE_STIE);
+}
+
+void __init time_init(void)
+{
+ struct device_node *cpu;
+ u32 prop;
+
+ cpu = of_find_node_by_path("/cpus");
+ if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop))
+ panic(KERN_WARNING "RISC-V system with no 'timebase-frequency' in DTS\n");
+ riscv_timebase = prop;
+
+ lpj_fine = riscv_timebase / HZ;
+
+ init_clockevent();
+}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
new file mode 100644
index 000000000000..93132cb59184
--- /dev/null
+++ b/arch/riscv/kernel/traps.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+int show_unhandled_signals = 1;
+
+extern asmlinkage void handle_exception(void);
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+ static int die_counter;
+ int ret;
+
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+
+ pr_emerg("%s [#%d]\n", str, ++die_counter);
+ print_modules();
+ show_regs(regs);
+
+ ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (ret != NOTIFY_STOP)
+ do_exit(SIGSEGV);
+}
+
+static inline void do_trap_siginfo(int signo, int code,
+ unsigned long addr, struct task_struct *tsk)
+{
+ siginfo_t info;
+
+ info.si_signo = signo;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = (void __user *)addr;
+ force_sig_info(signo, &info, tsk);
+}
+
+void do_trap(struct pt_regs *regs, int signo, int code,
+ unsigned long addr, struct task_struct *tsk)
+{
+ if (show_unhandled_signals && unhandled_signal(tsk, signo)
+ && printk_ratelimit()) {
+ pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
+ tsk->comm, task_pid_nr(tsk), signo, code, addr);
+ print_vma_addr(KERN_CONT " in ", GET_IP(regs));
+ pr_cont("\n");
+ show_regs(regs);
+ }
+
+ do_trap_siginfo(signo, code, addr, tsk);
+}
+
+static void do_trap_error(struct pt_regs *regs, int signo, int code,
+ unsigned long addr, const char *str)
+{
+ if (user_mode(regs)) {
+ do_trap(regs, signo, code, addr, current);
+ } else {
+ if (!fixup_exception(regs))
+ die(regs, str);
+ }
+}
+
+#define DO_ERROR_INFO(name, signo, code, str) \
+asmlinkage void name(struct pt_regs *regs) \
+{ \
+ do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \
+}
+
+DO_ERROR_INFO(do_trap_unknown,
+ SIGILL, ILL_ILLTRP, "unknown exception");
+DO_ERROR_INFO(do_trap_insn_misaligned,
+ SIGBUS, BUS_ADRALN, "instruction address misaligned");
+DO_ERROR_INFO(do_trap_insn_fault,
+ SIGSEGV, SEGV_ACCERR, "instruction access fault");
+DO_ERROR_INFO(do_trap_insn_illegal,
+ SIGILL, ILL_ILLOPC, "illegal instruction");
+DO_ERROR_INFO(do_trap_load_misaligned,
+ SIGBUS, BUS_ADRALN, "load address misaligned");
+DO_ERROR_INFO(do_trap_load_fault,
+ SIGSEGV, SEGV_ACCERR, "load access fault");
+DO_ERROR_INFO(do_trap_store_misaligned,
+ SIGBUS, BUS_ADRALN, "store (or AMO) address misaligned");
+DO_ERROR_INFO(do_trap_store_fault,
+ SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
+DO_ERROR_INFO(do_trap_ecall_u,
+ SIGILL, ILL_ILLTRP, "environment call from U-mode");
+DO_ERROR_INFO(do_trap_ecall_s,
+ SIGILL, ILL_ILLTRP, "environment call from S-mode");
+DO_ERROR_INFO(do_trap_ecall_m,
+ SIGILL, ILL_ILLTRP, "environment call from M-mode");
+
+asmlinkage void do_trap_break(struct pt_regs *regs)
+{
+#ifdef CONFIG_GENERIC_BUG
+ if (!user_mode(regs)) {
+ enum bug_trap_type type;
+
+ type = report_bug(regs->sepc, regs);
+ switch (type) {
+ case BUG_TRAP_TYPE_NONE:
+ break;
+ case BUG_TRAP_TYPE_WARN:
+ regs->sepc += sizeof(bug_insn_t);
+ return;
+ case BUG_TRAP_TYPE_BUG:
+ die(regs, "Kernel BUG");
+ }
+ }
+#endif /* CONFIG_GENERIC_BUG */
+
+ do_trap_siginfo(SIGTRAP, TRAP_BRKPT, regs->sepc, current);
+ regs->sepc += 0x4;
+}
+
+#ifdef CONFIG_GENERIC_BUG
+int is_valid_bugaddr(unsigned long pc)
+{
+ bug_insn_t insn;
+
+ if (pc < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((bug_insn_t __user *)pc, insn))
+ return 0;
+ return (insn == __BUG_INSN);
+}
+#endif /* CONFIG_GENERIC_BUG */
+
+void __init trap_init(void)
+{
+ /*
+ * Set sup0 scratch register to 0, indicating to exception vector
+ * that we are presently executing in the kernel
+ */
+ csr_write(sscratch, 0);
+ /* Set the exception vector address */
+ csr_write(stvec, &handle_exception);
+ /* Enable all interrupts */
+ csr_write(sie, -1);
+}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
new file mode 100644
index 000000000000..e8a178df8144
--- /dev/null
+++ b/arch/riscv/kernel/vdso.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/err.h>
+
+#include <asm/vdso.h>
+
+extern char vdso_start[], vdso_end[];
+
+static unsigned int vdso_pages;
+static struct page **vdso_pagelist;
+
+/*
+ * The vDSO data page.
+ */
+static union {
+ struct vdso_data data;
+ u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
+
+static int __init vdso_init(void)
+{
+ unsigned int i;
+
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+ vdso_pagelist =
+ kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(vdso_pagelist == NULL)) {
+ pr_err("vdso: pagelist allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < vdso_pages; i++) {
+ struct page *pg;
+
+ pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
+ ClearPageReserved(pg);
+ vdso_pagelist[i] = pg;
+ }
+ vdso_pagelist[i] = virt_to_page(vdso_data);
+
+ return 0;
+}
+arch_initcall(vdso_init);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long vdso_base, vdso_len;
+ int ret;
+
+ vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+
+ down_write(&mm->mmap_sem);
+ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+ if (unlikely(IS_ERR_VALUE(vdso_base))) {
+ ret = vdso_base;
+ goto end;
+ }
+
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ mm->context.vdso = (void *)vdso_base;
+
+ ret = install_special_mapping(mm, vdso_base, vdso_len,
+ (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
+ vdso_pagelist);
+
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
+
+end:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
+ return "[vdso]";
+ return NULL;
+}
+
+/*
+ * Function stubs to prevent linker errors when AT_SYSINFO_EHDR is defined
+ */
+
+int in_gate_area_no_mm(unsigned long addr)
+{
+ return 0;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+ return 0;
+}
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+ return NULL;
+}
diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..97c2d69d0289
--- /dev/null
+++ b/arch/riscv/kernel/vdso/.gitignore
@@ -0,0 +1,2 @@
+vdso.lds
+*.tmp
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
new file mode 100644
index 000000000000..523d0a8ac8db
--- /dev/null
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -0,0 +1,63 @@
+# Copied from arch/tile/kernel/vdso/Makefile
+
+# Symbols present in the vdso
+vdso-syms = rt_sigreturn
+
+# Files to link into the vdso
+obj-vdso = $(patsubst %, %.o, $(vdso-syms))
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+obj-y += vdso.o vdso-syms.o
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+
+# Force dependency
+$(obj)/vdso.o: $(obj)/vdso.so
+
+# link rule for the .so file, .lds has to be first
+SYSCFLAGS_vdso.so.dbg = $(c_flags)
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO. With ld -R we can then refer to
+# these symbols in the kernel code rather than hand-coded addresses.
+
+SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+ $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
+ $(call if_changed,vdsold)
+
+LDFLAGS_vdso-syms.o := -r -R
+$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
+ $(call if_changed,ld)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
+# Make sure only to export the intended __vdso_xxx symbol offsets.
+quiet_cmd_vdsold = VDSOLD $@
+ cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
+ $(CROSS_COMPILE)objcopy \
+ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S
new file mode 100644
index 000000000000..f5aa3d72acfb
--- /dev/null
+++ b/arch/riscv/kernel/vdso/rt_sigreturn.S
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+ .text
+ENTRY(__vdso_rt_sigreturn)
+ .cfi_startproc
+ .cfi_signal_frame
+ li a7, __NR_rt_sigreturn
+ scall
+ .cfi_endproc
+ENDPROC(__vdso_rt_sigreturn)
diff --git a/arch/riscv/kernel/vdso/vdso.S b/arch/riscv/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..7055de5f9174
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vdso.S
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/riscv/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..8c9dce95c11d
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+OUTPUT_ARCH(riscv)
+
+SECTIONS
+{
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+
+ /*
+ * This linker script is used both with -r and with -shared.
+ * For the layouts to match, we need to skip more than enough
+ * space for the dynamic symbol table, etc. If this amount is
+ * insufficient, ld -shared will error; simply increase it here.
+ */
+ . = 0x800;
+ .text : { *(.text .text.*) } :text
+
+ .data : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_4.15 {
+ global:
+ __vdso_rt_sigreturn;
+ __vdso_cmpxchg32;
+ __vdso_cmpxchg64;
+ local: *;
+ };
+}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..ece84991609c
--- /dev/null
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define LOAD_OFFSET PAGE_OFFSET
+#include <asm/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(riscv)
+ENTRY(_start)
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ /* Beginning of code and text segment */
+ . = LOAD_OFFSET;
+ _start = .;
+ __init_begin = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+ /* we have to discard exit text and such at runtime, not link time */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ __init_end = .;
+
+ .text : {
+ _text = .;
+ _stext = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ ENTRY_TEXT
+ IRQENTRY_TEXT
+ *(.fixup)
+ _etext = .;
+ }
+
+ /* Start of data section */
+ _sdata = .;
+ RO_DATA_SECTION(L1_CACHE_BYTES)
+ .srodata : {
+ *(.srodata*)
+ }
+
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ .sdata : {
+ __global_pointer$ = . + 0x800;
+ *(.sdata*)
+ /* End of data section */
+ _edata = .;
+ *(.sbss*)
+ }
+
+ BSS_SECTION(0, 0, 0)
+
+ EXCEPTION_TABLE(0x10)
+ NOTES
+
+ .rel.dyn : {
+ *(.rel.dyn*)
+ }
+
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS
+}
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
new file mode 100644
index 000000000000..596c2ca40d63
--- /dev/null
+++ b/arch/riscv/lib/Makefile
@@ -0,0 +1,6 @@
+lib-y += delay.o
+lib-y += memcpy.o
+lib-y += memset.o
+lib-y += uaccess.o
+
+lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c
new file mode 100644
index 000000000000..1cc4ac3964b4
--- /dev/null
+++ b/arch/riscv/lib/delay.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/param.h>
+#include <linux/timex.h>
+#include <linux/export.h>
+
+/*
+ * This is copies from arch/arm/include/asm/delay.h
+ *
+ * Loop (or tick) based delay:
+ *
+ * loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
+ *
+ * where:
+ *
+ * jiffies_per_sec = HZ
+ * us_per_sec = 1000000
+ *
+ * Therefore the constant part is HZ / 1000000 which is a small
+ * fractional number. To make this usable with integer math, we
+ * scale up this constant by 2^31, perform the actual multiplication,
+ * and scale the result back down by 2^31 with a simple shift:
+ *
+ * loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
+ *
+ * where:
+ *
+ * UDELAY_MULT = 2^31 * HZ / 1000000
+ * = (2^31 / 1000000) * HZ
+ * = 2147.483648 * HZ
+ * = 2147 * HZ + 483648 * HZ / 1000000
+ *
+ * 31 is the biggest scale shift value that won't overflow 32 bits for
+ * delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
+ */
+#define MAX_UDELAY_US 2000
+#define MAX_UDELAY_HZ 1000
+#define UDELAY_MULT (2147UL * HZ + 483648UL * HZ / 1000000UL)
+#define UDELAY_SHIFT 31
+
+#if HZ > MAX_UDELAY_HZ
+#error "HZ > MAX_UDELAY_HZ"
+#endif
+
+/*
+ * RISC-V supports both UDELAY and NDELAY. This is largely the same as above,
+ * but with different constants. I added 10 bits to the shift to get this, but
+ * the result is that I need a 64-bit multiply, which is slow on 32-bit
+ * platforms.
+ *
+ * NDELAY_MULT = 2^41 * HZ / 1000000000
+ * = (2^41 / 1000000000) * HZ
+ * = 2199.02325555 * HZ
+ * = 2199 * HZ + 23255550 * HZ / 1000000000
+ *
+ * The maximum here is to avoid 64-bit overflow, but it isn't checked as it
+ * won't happen.
+ */
+#define MAX_NDELAY_NS (1ULL << 42)
+#define MAX_NDELAY_HZ MAX_UDELAY_HZ
+#define NDELAY_MULT ((unsigned long long)(2199ULL * HZ + 23255550ULL * HZ / 1000000000ULL))
+#define NDELAY_SHIFT 41
+
+#if HZ > MAX_NDELAY_HZ
+#error "HZ > MAX_NDELAY_HZ"
+#endif
+
+void __delay(unsigned long cycles)
+{
+ u64 t0 = get_cycles();
+
+ while ((unsigned long)(get_cycles() - t0) < cycles)
+ cpu_relax();
+}
+
+void udelay(unsigned long usecs)
+{
+ unsigned long ucycles = usecs * lpj_fine * UDELAY_MULT;
+
+ if (unlikely(usecs > MAX_UDELAY_US)) {
+ __delay((u64)usecs * riscv_timebase / 1000000ULL);
+ return;
+ }
+
+ __delay(ucycles >> UDELAY_SHIFT);
+}
+EXPORT_SYMBOL(udelay);
+
+void ndelay(unsigned long nsecs)
+{
+ /*
+ * This doesn't bother checking for overflow, as it won't happen (it's
+ * an hour) of delay.
+ */
+ unsigned long long ncycles = nsecs * lpj_fine * NDELAY_MULT;
+ __delay(ncycles >> NDELAY_SHIFT);
+}
+EXPORT_SYMBOL(ndelay);
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
new file mode 100644
index 000000000000..80f9c1a5c598
--- /dev/null
+++ b/arch/riscv/lib/memcpy.S
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+/* void *memcpy(void *, const void *, size_t) */
+ENTRY(memcpy)
+ move t6, a0 /* Preserve return value */
+
+ /* Defer to byte-oriented copy for small sizes */
+ sltiu a3, a2, 128
+ bnez a3, 4f
+ /* Use word-oriented copy only if low-order bits match */
+ andi a3, t6, SZREG-1
+ andi a4, a1, SZREG-1
+ bne a3, a4, 4f
+
+ beqz a3, 2f /* Skip if already aligned */
+ /*
+ * Round to nearest double word-aligned address
+ * greater than or equal to start address
+ */
+ andi a3, a1, ~(SZREG-1)
+ addi a3, a3, SZREG
+ /* Handle initial misalignment */
+ sub a4, a3, a1
+1:
+ lb a5, 0(a1)
+ addi a1, a1, 1
+ sb a5, 0(t6)
+ addi t6, t6, 1
+ bltu a1, a3, 1b
+ sub a2, a2, a4 /* Update count */
+
+2:
+ andi a4, a2, ~((16*SZREG)-1)
+ beqz a4, 4f
+ add a3, a1, a4
+3:
+ REG_L a4, 0(a1)
+ REG_L a5, SZREG(a1)
+ REG_L a6, 2*SZREG(a1)
+ REG_L a7, 3*SZREG(a1)
+ REG_L t0, 4*SZREG(a1)
+ REG_L t1, 5*SZREG(a1)
+ REG_L t2, 6*SZREG(a1)
+ REG_L t3, 7*SZREG(a1)
+ REG_L t4, 8*SZREG(a1)
+ REG_L t5, 9*SZREG(a1)
+ REG_S a4, 0(t6)
+ REG_S a5, SZREG(t6)
+ REG_S a6, 2*SZREG(t6)
+ REG_S a7, 3*SZREG(t6)
+ REG_S t0, 4*SZREG(t6)
+ REG_S t1, 5*SZREG(t6)
+ REG_S t2, 6*SZREG(t6)
+ REG_S t3, 7*SZREG(t6)
+ REG_S t4, 8*SZREG(t6)
+ REG_S t5, 9*SZREG(t6)
+ REG_L a4, 10*SZREG(a1)
+ REG_L a5, 11*SZREG(a1)
+ REG_L a6, 12*SZREG(a1)
+ REG_L a7, 13*SZREG(a1)
+ REG_L t0, 14*SZREG(a1)
+ REG_L t1, 15*SZREG(a1)
+ addi a1, a1, 16*SZREG
+ REG_S a4, 10*SZREG(t6)
+ REG_S a5, 11*SZREG(t6)
+ REG_S a6, 12*SZREG(t6)
+ REG_S a7, 13*SZREG(t6)
+ REG_S t0, 14*SZREG(t6)
+ REG_S t1, 15*SZREG(t6)
+ addi t6, t6, 16*SZREG
+ bltu a1, a3, 3b
+ andi a2, a2, (16*SZREG)-1 /* Update count */
+
+4:
+ /* Handle trailing misalignment */
+ beqz a2, 6f
+ add a3, a1, a2
+
+ /* Use word-oriented copy if co-aligned to word boundary */
+ or a5, a1, t6
+ or a5, a5, a3
+ andi a5, a5, 3
+ bnez a5, 5f
+7:
+ lw a4, 0(a1)
+ addi a1, a1, 4
+ sw a4, 0(t6)
+ addi t6, t6, 4
+ bltu a1, a3, 7b
+
+ ret
+
+5:
+ lb a4, 0(a1)
+ addi a1, a1, 1
+ sb a4, 0(t6)
+ addi t6, t6, 1
+ bltu a1, a3, 5b
+6:
+ ret
+END(memcpy)
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
new file mode 100644
index 000000000000..a790107cf4c9
--- /dev/null
+++ b/arch/riscv/lib/memset.S
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+/* void *memset(void *, int, size_t) */
+ENTRY(memset)
+ move t0, a0 /* Preserve return value */
+
+ /* Defer to byte-oriented fill for small sizes */
+ sltiu a3, a2, 16
+ bnez a3, 4f
+
+ /*
+ * Round to nearest XLEN-aligned address
+ * greater than or equal to start address
+ */
+ addi a3, t0, SZREG-1
+ andi a3, a3, ~(SZREG-1)
+ beq a3, t0, 2f /* Skip if already aligned */
+ /* Handle initial misalignment */
+ sub a4, a3, t0
+1:
+ sb a1, 0(t0)
+ addi t0, t0, 1
+ bltu t0, a3, 1b
+ sub a2, a2, a4 /* Update count */
+
+2: /* Duff's device with 32 XLEN stores per iteration */
+ /* Broadcast value into all bytes */
+ andi a1, a1, 0xff
+ slli a3, a1, 8
+ or a1, a3, a1
+ slli a3, a1, 16
+ or a1, a3, a1
+#ifdef CONFIG_64BIT
+ slli a3, a1, 32
+ or a1, a3, a1
+#endif
+
+ /* Calculate end address */
+ andi a4, a2, ~(SZREG-1)
+ add a3, t0, a4
+
+ andi a4, a4, 31*SZREG /* Calculate remainder */
+ beqz a4, 3f /* Shortcut if no remainder */
+ neg a4, a4
+ addi a4, a4, 32*SZREG /* Calculate initial offset */
+
+ /* Adjust start address with offset */
+ sub t0, t0, a4
+
+ /* Jump into loop body */
+ /* Assumes 32-bit instruction lengths */
+ la a5, 3f
+#ifdef CONFIG_64BIT
+ srli a4, a4, 1
+#endif
+ add a5, a5, a4
+ jr a5
+3:
+ REG_S a1, 0(t0)
+ REG_S a1, SZREG(t0)
+ REG_S a1, 2*SZREG(t0)
+ REG_S a1, 3*SZREG(t0)
+ REG_S a1, 4*SZREG(t0)
+ REG_S a1, 5*SZREG(t0)
+ REG_S a1, 6*SZREG(t0)
+ REG_S a1, 7*SZREG(t0)
+ REG_S a1, 8*SZREG(t0)
+ REG_S a1, 9*SZREG(t0)
+ REG_S a1, 10*SZREG(t0)
+ REG_S a1, 11*SZREG(t0)
+ REG_S a1, 12*SZREG(t0)
+ REG_S a1, 13*SZREG(t0)
+ REG_S a1, 14*SZREG(t0)
+ REG_S a1, 15*SZREG(t0)
+ REG_S a1, 16*SZREG(t0)
+ REG_S a1, 17*SZREG(t0)
+ REG_S a1, 18*SZREG(t0)
+ REG_S a1, 19*SZREG(t0)
+ REG_S a1, 20*SZREG(t0)
+ REG_S a1, 21*SZREG(t0)
+ REG_S a1, 22*SZREG(t0)
+ REG_S a1, 23*SZREG(t0)
+ REG_S a1, 24*SZREG(t0)
+ REG_S a1, 25*SZREG(t0)
+ REG_S a1, 26*SZREG(t0)
+ REG_S a1, 27*SZREG(t0)
+ REG_S a1, 28*SZREG(t0)
+ REG_S a1, 29*SZREG(t0)
+ REG_S a1, 30*SZREG(t0)
+ REG_S a1, 31*SZREG(t0)
+ addi t0, t0, 32*SZREG
+ bltu t0, a3, 3b
+ andi a2, a2, SZREG-1 /* Update count */
+
+4:
+ /* Handle trailing misalignment */
+ beqz a2, 6f
+ add a3, t0, a2
+5:
+ sb a1, 0(t0)
+ addi t0, t0, 1
+ bltu t0, a3, 5b
+6:
+ ret
+END(memset)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
new file mode 100644
index 000000000000..58fb2877c865
--- /dev/null
+++ b/arch/riscv/lib/uaccess.S
@@ -0,0 +1,117 @@
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/csr.h>
+
+ .altmacro
+ .macro fixup op reg addr lbl
+ LOCAL _epc
+_epc:
+ \op \reg, \addr
+ .section __ex_table,"a"
+ .balign RISCV_SZPTR
+ RISCV_PTR _epc, \lbl
+ .previous
+ .endm
+
+ENTRY(__copy_user)
+
+ /* Enable access to user memory */
+ li t6, SR_SUM
+ csrs sstatus, t6
+
+ add a3, a1, a2
+ /* Use word-oriented copy only if low-order bits match */
+ andi t0, a0, SZREG-1
+ andi t1, a1, SZREG-1
+ bne t0, t1, 2f
+
+ addi t0, a1, SZREG-1
+ andi t1, a3, ~(SZREG-1)
+ andi t0, t0, ~(SZREG-1)
+ /*
+ * a3: terminal address of source region
+ * t0: lowest XLEN-aligned address in source
+ * t1: highest XLEN-aligned address in source
+ */
+ bgeu t0, t1, 2f
+ bltu a1, t0, 4f
+1:
+ fixup REG_L, t2, (a1), 10f
+ fixup REG_S, t2, (a0), 10f
+ addi a1, a1, SZREG
+ addi a0, a0, SZREG
+ bltu a1, t1, 1b
+2:
+ bltu a1, a3, 5f
+
+3:
+ /* Disable access to user memory */
+ csrc sstatus, t6
+ li a0, 0
+ ret
+4: /* Edge case: unalignment */
+ fixup lbu, t2, (a1), 10f
+ fixup sb, t2, (a0), 10f
+ addi a1, a1, 1
+ addi a0, a0, 1
+ bltu a1, t0, 4b
+ j 1b
+5: /* Edge case: remainder */
+ fixup lbu, t2, (a1), 10f
+ fixup sb, t2, (a0), 10f
+ addi a1, a1, 1
+ addi a0, a0, 1
+ bltu a1, a3, 5b
+ j 3b
+ENDPROC(__copy_user)
+
+
+ENTRY(__clear_user)
+
+ /* Enable access to user memory */
+ li t6, SR_SUM
+ csrs sstatus, t6
+
+ add a3, a0, a1
+ addi t0, a0, SZREG-1
+ andi t1, a3, ~(SZREG-1)
+ andi t0, t0, ~(SZREG-1)
+ /*
+ * a3: terminal address of target region
+ * t0: lowest doubleword-aligned address in target region
+ * t1: highest doubleword-aligned address in target region
+ */
+ bgeu t0, t1, 2f
+ bltu a0, t0, 4f
+1:
+ fixup REG_S, zero, (a0), 10f
+ addi a0, a0, SZREG
+ bltu a0, t1, 1b
+2:
+ bltu a0, a3, 5f
+
+3:
+ /* Disable access to user memory */
+ csrc sstatus, t6
+ li a0, 0
+ ret
+4: /* Edge case: unalignment */
+ fixup sb, zero, (a0), 10f
+ addi a0, a0, 1
+ bltu a0, t0, 4b
+ j 1b
+5: /* Edge case: remainder */
+ fixup sb, zero, (a0), 10f
+ addi a0, a0, 1
+ bltu a0, a3, 5b
+ j 3b
+ENDPROC(__clear_user)
+
+ .section .fixup,"ax"
+ .balign 4
+10:
+ /* Disable access to user memory */
+ csrs sstatus, t6
+ sub a0, a3, a0
+ ret
+ .previous
diff --git a/arch/riscv/lib/udivdi3.S b/arch/riscv/lib/udivdi3.S
new file mode 100644
index 000000000000..cb01ae5b181a
--- /dev/null
+++ b/arch/riscv/lib/udivdi3.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016-2017 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+ .globl __udivdi3
+__udivdi3:
+ mv a2, a1
+ mv a1, a0
+ li a0, -1
+ beqz a2, .L5
+ li a3, 1
+ bgeu a2, a1, .L2
+.L1:
+ blez a2, .L2
+ slli a2, a2, 1
+ slli a3, a3, 1
+ bgtu a1, a2, .L1
+.L2:
+ li a0, 0
+.L3:
+ bltu a1, a2, .L4
+ sub a1, a1, a2
+ or a0, a0, a3
+.L4:
+ srli a3, a3, 1
+ srli a2, a2, 1
+ bnez a3, .L3
+.L5:
+ ret
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
new file mode 100644
index 000000000000..81f7d9ce6d88
--- /dev/null
+++ b/arch/riscv/mm/Makefile
@@ -0,0 +1,4 @@
+obj-y += init.o
+obj-y += fault.o
+obj-y += extable.o
+obj-y += ioremap.o
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
new file mode 100644
index 000000000000..11bb9417123b
--- /dev/null
+++ b/arch/riscv/mm/extable.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2013 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ */
+
+
+#include <linux/extable.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(regs->sepc);
+ if (fixup) {
+ regs->sepc = fixup->fixup;
+ return 1;
+ }
+ return 0;
+}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
new file mode 100644
index 000000000000..df2ca3c65048
--- /dev/null
+++ b/arch/riscv/mm/fault.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ */
+
+
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgalloc.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+/*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long addr, cause;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ int fault, code = SEGV_MAPERR;
+
+ cause = regs->scause;
+ addr = regs->sbadaddr;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /*
+ * Fault-in kernel-space virtual memory on-demand.
+ * The 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
+ goto vmalloc_fault;
+
+ /* Enable interrupts if they were enabled in the parent context. */
+ if (likely(regs->sstatus & SR_PIE))
+ local_irq_enable();
+
+ /*
+ * If we're in an interrupt, have no user context, or are running
+ * in an atomic region, then we must not take the fault.
+ */
+ if (unlikely(faulthandler_disabled() || !mm))
+ goto no_context;
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+
+retry:
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, addr);
+ if (unlikely(!vma))
+ goto bad_area;
+ if (likely(vma->vm_start <= addr))
+ goto good_area;
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
+ goto bad_area;
+ if (unlikely(expand_stack(vma, addr)))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it.
+ */
+good_area:
+ code = SEGV_ACCERR;
+
+ switch (cause) {
+ case EXC_INST_PAGE_FAULT:
+ if (!(vma->vm_flags & VM_EXEC))
+ goto bad_area;
+ break;
+ case EXC_LOAD_PAGE_FAULT:
+ if (!(vma->vm_flags & VM_READ))
+ goto bad_area;
+ break;
+ case EXC_STORE_PAGE_FAULT:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
+ break;
+ default:
+ panic("%s: unhandled cause %lu", __func__, cause);
+ }
+
+ /*
+ * If for any reason at all we could not handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, addr, flags);
+
+ /*
+ * If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_sem because it
+ * would already be released in __lock_page_or_retry in mm/filemap.c.
+ */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+ BUG();
+ }
+
+ /*
+ * Major/minor page fault accounting is only done on the
+ * initial attempt. If we go through a retry, it is extremely
+ * likely that the page will be found in page cache at that point.
+ */
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR) {
+ tsk->maj_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+ 1, regs, addr);
+ } else {
+ tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+ 1, regs, addr);
+ }
+ if (fault & VM_FAULT_RETRY) {
+ /*
+ * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+ * of starvation.
+ */
+ flags &= ~(FAULT_FLAG_ALLOW_RETRY);
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map.
+ * Fix it, but check if it's kernel or user first.
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ do_trap(regs, SIGSEGV, code, addr, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+ pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+ die(regs, "Oops");
+ do_exit(SIGKILL);
+
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
+ return;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+ do_trap(regs, SIGBUS, BUS_ADRERR, addr, tsk);
+ return;
+
+vmalloc_fault:
+ {
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ p4d_t *p4d, *p4d_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+ int index;
+
+ if (user_mode(regs))
+ goto bad_area;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk->active_mm->pgd" here.
+ * We might be inside an interrupt in the middle
+ * of a task switch.
+ */
+ index = pgd_index(addr);
+ pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+ set_pgd(pgd, *pgd_k);
+
+ p4d = p4d_offset(pgd, addr);
+ p4d_k = p4d_offset(pgd_k, addr);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+
+ pud = pud_offset(p4d, addr);
+ pud_k = pud_offset(p4d_k, addr);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ /*
+ * Since the vmalloc area is global, it is unnecessary
+ * to copy individual PTEs
+ */
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+
+ /*
+ * Make sure the actual PTE exists as well to
+ * catch kernel vmalloc-area accesses to non-mapped
+ * addresses. If we don't do this, this will just
+ * silently loop forever.
+ */
+ pte_k = pte_offset_kernel(pmd_k, addr);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
new file mode 100644
index 000000000000..9f4bee5e51fd
--- /dev/null
+++ b/arch/riscv/mm/init.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+#include <linux/swap.h>
+
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+
+static void __init zone_sizes_init(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES];
+
+ memset(zones_size, 0, sizeof(zones_size));
+ zones_size[ZONE_NORMAL] = max_mapnr;
+ free_area_init_node(0, zones_size, pfn_base, NULL);
+}
+
+void setup_zero_page(void)
+{
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+}
+
+void __init paging_init(void)
+{
+ init_mm.pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr));
+
+ setup_zero_page();
+ local_flush_tlb_all();
+ zone_sizes_init();
+}
+
+void __init mem_init(void)
+{
+#ifdef CONFIG_FLATMEM
+ BUG_ON(!mem_map);
+#endif /* CONFIG_FLATMEM */
+
+ high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
+ free_all_bootmem();
+
+ mem_init_print_info(NULL);
+}
+
+void free_initmem(void)
+{
+ free_initmem_default(0);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+}
+#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c
new file mode 100644
index 000000000000..e99194a4077e
--- /dev/null
+++ b/arch/riscv/mm/ioremap.c
@@ -0,0 +1,92 @@
+/*
+ * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+
+#include <asm/pgtable.h>
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
+ pgprot_t prot, void *caller)
+{
+ phys_addr_t last_addr;
+ unsigned long offset, vaddr;
+ struct vm_struct *area;
+
+ /* Disallow wrap-around or zero size */
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ /* Page-align mappings */
+ offset = addr & (~PAGE_MASK);
+ addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
+
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (!area)
+ return NULL;
+ vaddr = (unsigned long)area->addr;
+
+ if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
+ free_vm_area(area);
+ return NULL;
+ }
+
+ return (void __iomem *)(vaddr + offset);
+}
+
+/*
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * Must be freed with iounmap.
+ */
+void __iomem *ioremap(phys_addr_t offset, unsigned long size)
+{
+ return __ioremap_caller(offset, size, PAGE_KERNEL,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+
+/**
+ * iounmap - Free a IO remapping
+ * @addr: virtual address from ioremap_*
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
+void iounmap(void __iomem *addr)
+{
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ae55e715cc74..863a62a6de3c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -68,6 +68,7 @@ config S390
select ARCH_BINFMT_ELF_STATE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
@@ -143,7 +144,6 @@ config S390
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
- select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
@@ -538,6 +538,22 @@ config ARCH_RANDOM
If unsure, say Y.
+config ALTERNATIVES
+ def_bool y
+ prompt "Patch optimized instructions for running CPU type"
+ help
+ When enabled the kernel code is compiled with additional
+ alternative instructions blocks optimized for newer CPU types.
+ These alternative instructions blocks are patched at kernel boot
+ time when running CPU supports them. This mechanism is used to
+ optimize some critical code paths (i.e. spinlocks) for newer CPUs
+ even if kernel is build to support older machine generations.
+
+ This mechanism could be disabled by appending "noaltinstr"
+ option to the kernel command line.
+
+ If unsure, say Y.
+
endmenu
menu "Memory setup"
@@ -809,18 +825,6 @@ config PFAULT
Everybody who wants to run Linux under VM != VM4.2 should select
this option.
-config SHARED_KERNEL
- bool "VM shared kernel support"
- depends on !JUMP_LABEL
- help
- Select this option, if you want to share the text segment of the
- Linux kernel between different VM guests. This reduces memory
- usage with lots of guests but greatly increases kernel size.
- Also if a kernel was IPL'ed from a shared segment the kexec system
- call will not work.
- You should only select this option if you know what you are
- doing and want to exploit this feature.
-
config CMM
def_tristate n
prompt "Cooperative memory management"
@@ -930,17 +934,4 @@ config S390_GUEST
Select this option if you want to run the kernel as a guest under
the KVM hypervisor.
-config S390_GUEST_OLD_TRANSPORT
- def_bool y
- prompt "Guest support for old s390 virtio transport (DEPRECATED)"
- depends on S390_GUEST
- help
- Enable this option to add support for the old s390-virtio
- transport (i.e. virtio devices NOT based on virtio-ccw). This
- type of virtio devices is only available on the experimental
- kuli userspace or with old (< 2.6) qemu. If you are running
- with a modern version of qemu (which supports virtio-ccw since
- 1.4 and uses it by default since version 2.4), you probably won't
- need this.
-
endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index dac821cfcd43..6b3f41985f28 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -21,7 +21,7 @@ KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
UTS_MACHINE := s390x
STACK_SIZE := 16384
-CHECKFLAGS += -D__s390__ -D__s390x__
+CHECKFLAGS += -D__s390__ -D__s390x__ -mbig-endian
export LD_BFD
@@ -133,6 +133,7 @@ archclean:
archprepare:
$(Q)$(MAKE) $(build)=$(tools) include/generated/facilities.h
+ $(Q)$(MAKE) $(build)=$(tools) include/generated/dis.h
# Don't use tabs in echo arguments
define archhelp
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3df10c989893..29e3dc99b916 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -12,7 +12,7 @@ targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o
KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
-KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 77633200f42c..cecf38b9ec82 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -170,9 +170,7 @@ unsigned long decompress_kernel(void)
free_mem_ptr = (unsigned long) &_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- puts("Uncompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
- puts("Ok, booting the kernel.\n");
return (unsigned long) output;
}
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 282072206df7..84eccc88c065 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
@@ -379,7 +378,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
@@ -416,7 +414,6 @@ CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
@@ -483,6 +480,8 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -599,7 +598,6 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
@@ -629,10 +627,8 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
@@ -649,6 +645,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -705,12 +702,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 3c6b78189fbc..f7202358e6d7 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -70,7 +70,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
@@ -376,7 +375,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
@@ -412,7 +410,6 @@ CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
@@ -479,6 +476,8 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -575,10 +574,8 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
@@ -650,12 +647,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 653d72bcc007..03100fe74ea8 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -68,7 +68,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
@@ -374,7 +373,6 @@ CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
@@ -410,7 +408,6 @@ CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
@@ -477,6 +474,8 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
+CONFIG_VFIO=m
+CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -573,10 +572,8 @@ CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
@@ -648,12 +645,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 591cbdf615af..b48e20dd94e9 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -4,9 +4,11 @@
* s390 implementation of the AES Cipher Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005, 2007
+ * Copyright IBM Corp. 2005, 2017
* Author(s): Jan Glauber (jang@de.ibm.com)
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
+ * Patrick Steuer <patrick.steuer@de.ibm.com>
+ * Harald Freudenberger <freude@de.ibm.com>
*
* Derived from "crypto/aes_generic.c"
*
@@ -22,20 +24,25 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/ghash.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/fips.h>
+#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
-static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
+ kma_functions;
struct s390_aes_ctx {
u8 key[AES_MAX_KEY_SIZE];
@@ -55,6 +62,17 @@ struct s390_xts_ctx {
struct crypto_skcipher *fallback;
};
+struct gcm_sg_walk {
+ struct scatter_walk walk;
+ unsigned int walk_bytes;
+ u8 *walk_ptr;
+ unsigned int walk_bytes_remain;
+ u8 buf[AES_BLOCK_SIZE];
+ unsigned int buf_bytes;
+ u8 *ptr;
+ unsigned int nbytes;
+};
+
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -771,6 +789,267 @@ static struct crypto_alg ctr_aes_alg = {
}
};
+static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ ctx->fc = CPACF_KMA_GCM_AES_128;
+ break;
+ case AES_KEYSIZE_192:
+ ctx->fc = CPACF_KMA_GCM_AES_192;
+ break;
+ case AES_KEYSIZE_256:
+ ctx->fc = CPACF_KMA_GCM_AES_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+ return 0;
+}
+
+static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
+ unsigned int len)
+{
+ memset(gw, 0, sizeof(*gw));
+ gw->walk_bytes_remain = len;
+ scatterwalk_start(&gw->walk, sg);
+}
+
+static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
+{
+ int n;
+
+ /* minbytesneeded <= AES_BLOCK_SIZE */
+ if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
+ gw->ptr = gw->buf;
+ gw->nbytes = gw->buf_bytes;
+ goto out;
+ }
+
+ if (gw->walk_bytes_remain == 0) {
+ gw->ptr = NULL;
+ gw->nbytes = 0;
+ goto out;
+ }
+
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
+ if (!gw->walk_bytes) {
+ scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ }
+ gw->walk_ptr = scatterwalk_map(&gw->walk);
+
+ if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
+ gw->ptr = gw->walk_ptr;
+ gw->nbytes = gw->walk_bytes;
+ goto out;
+ }
+
+ while (1) {
+ n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
+ memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
+ gw->buf_bytes += n;
+ gw->walk_bytes_remain -= n;
+ scatterwalk_unmap(&gw->walk);
+ scatterwalk_advance(&gw->walk, n);
+ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+
+ if (gw->buf_bytes >= minbytesneeded) {
+ gw->ptr = gw->buf;
+ gw->nbytes = gw->buf_bytes;
+ goto out;
+ }
+
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ if (!gw->walk_bytes) {
+ scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+ gw->walk_bytes_remain);
+ }
+ gw->walk_ptr = scatterwalk_map(&gw->walk);
+ }
+
+out:
+ return gw->nbytes;
+}
+
+static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
+{
+ int n;
+
+ if (gw->ptr == NULL)
+ return;
+
+ if (gw->ptr == gw->buf) {
+ n = gw->buf_bytes - bytesdone;
+ if (n > 0) {
+ memmove(gw->buf, gw->buf + bytesdone, n);
+ gw->buf_bytes -= n;
+ } else
+ gw->buf_bytes = 0;
+ } else {
+ gw->walk_bytes_remain -= bytesdone;
+ scatterwalk_unmap(&gw->walk);
+ scatterwalk_advance(&gw->walk, bytesdone);
+ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+ }
+}
+
+static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
+ unsigned int taglen = crypto_aead_authsize(tfm);
+ unsigned int aadlen = req->assoclen;
+ unsigned int pclen = req->cryptlen;
+ int ret = 0;
+
+ unsigned int len, in_bytes, out_bytes,
+ min_bytes, bytes, aad_bytes, pc_bytes;
+ struct gcm_sg_walk gw_in, gw_out;
+ u8 tag[GHASH_DIGEST_SIZE];
+
+ struct {
+ u32 _[3]; /* reserved */
+ u32 cv; /* Counter Value */
+ u8 t[GHASH_DIGEST_SIZE];/* Tag */
+ u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
+ u64 taadl; /* Total AAD Length */
+ u64 tpcl; /* Total Plain-/Cipher-text Length */
+ u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
+ u8 k[AES_MAX_KEY_SIZE]; /* Key */
+ } param;
+
+ /*
+ * encrypt
+ * req->src: aad||plaintext
+ * req->dst: aad||ciphertext||tag
+ * decrypt
+ * req->src: aad||ciphertext||tag
+ * req->dst: aad||plaintext, return 0 or -EBADMSG
+ * aad, plaintext and ciphertext may be empty.
+ */
+ if (flags & CPACF_DECRYPT)
+ pclen -= taglen;
+ len = aadlen + pclen;
+
+ memset(&param, 0, sizeof(param));
+ param.cv = 1;
+ param.taadl = aadlen * 8;
+ param.tpcl = pclen * 8;
+ memcpy(param.j0, req->iv, ivsize);
+ *(u32 *)(param.j0 + ivsize) = 1;
+ memcpy(param.k, ctx->key, ctx->key_len);
+
+ gcm_sg_walk_start(&gw_in, req->src, len);
+ gcm_sg_walk_start(&gw_out, req->dst, len);
+
+ do {
+ min_bytes = min_t(unsigned int,
+ aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
+ in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
+ out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
+ bytes = min(in_bytes, out_bytes);
+
+ if (aadlen + pclen <= bytes) {
+ aad_bytes = aadlen;
+ pc_bytes = pclen;
+ flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
+ } else {
+ if (aadlen <= bytes) {
+ aad_bytes = aadlen;
+ pc_bytes = (bytes - aadlen) &
+ ~(AES_BLOCK_SIZE - 1);
+ flags |= CPACF_KMA_LAAD;
+ } else {
+ aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
+ pc_bytes = 0;
+ }
+ }
+
+ if (aad_bytes > 0)
+ memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
+
+ cpacf_kma(ctx->fc | flags, &param,
+ gw_out.ptr + aad_bytes,
+ gw_in.ptr + aad_bytes, pc_bytes,
+ gw_in.ptr, aad_bytes);
+
+ gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
+ gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
+ aadlen -= aad_bytes;
+ pclen -= pc_bytes;
+ } while (aadlen + pclen > 0);
+
+ if (flags & CPACF_DECRYPT) {
+ scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
+ if (crypto_memneq(tag, param.t, taglen))
+ ret = -EBADMSG;
+ } else
+ scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
+
+ memzero_explicit(&param, sizeof(param));
+ return ret;
+}
+
+static int gcm_aes_encrypt(struct aead_request *req)
+{
+ return gcm_aes_crypt(req, CPACF_ENCRYPT);
+}
+
+static int gcm_aes_decrypt(struct aead_request *req)
+{
+ return gcm_aes_crypt(req, CPACF_DECRYPT);
+}
+
+static struct aead_alg gcm_aes_aead = {
+ .setkey = gcm_aes_setkey,
+ .setauthsize = gcm_aes_setauthsize,
+ .encrypt = gcm_aes_encrypt,
+ .decrypt = gcm_aes_decrypt,
+
+ .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_priority = 900,
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-s390",
+ .cra_module = THIS_MODULE,
+ },
+};
+
static struct crypto_alg *aes_s390_algs_ptr[5];
static int aes_s390_algs_num;
@@ -790,16 +1069,19 @@ static void aes_s390_fini(void)
crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
+
+ crypto_unregister_aead(&gcm_aes_aead);
}
static int __init aes_s390_init(void)
{
int ret;
- /* Query available functions for KM, KMC and KMCTR */
+ /* Query available functions for KM, KMC, KMCTR and KMA */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
+ cpacf_query(CPACF_KMA, &kma_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
@@ -840,6 +1122,14 @@ static int __init aes_s390_init(void)
goto out_err;
}
+ if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
+ cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
+ cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
+ ret = crypto_register_aead(&gcm_aes_aead);
+ if (ret)
+ goto out_err;
+ }
+
return 0;
out_err:
aes_s390_fini();
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 20244a38c886..46a3178d8bc6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -53,7 +53,6 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
-CONFIG_CMA=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -163,7 +162,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
@@ -179,7 +177,6 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_FUNCTION_PROFILER=y
-CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 6e2c9f7e47fa..41c211a4d8b1 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
+generic-y += rwsem.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 000000000000..6c268f6a51d3
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,163 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 facility; /* facility bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+#ifdef CONFIG_ALTERNATIVES
+extern void apply_alternative_instructions(void);
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+#else
+static inline void apply_alternative_instructions(void) {};
+static inline void apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end) {};
+#endif
+/*
+ * |661: |662: |6620 |663:
+ * +-----------+---------------------+
+ * | oldinstr | oldinstr_padding |
+ * | +----------+----------+
+ * | | | |
+ * | | >6 bytes |6/4/2 nops|
+ * | |6 bytes jg----------->
+ * +-----------+---------------------+
+ * ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641: |6651:
+ * | alternative instr 1 |
+ * +-----------+---------+- - - - - -+
+ * |6642: |6652: |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ * ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each |
+ * | alternative instr |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
+
+#define e_oldinstr_pad_end "663"
+#define oldinstr_len "662b-661b"
+#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+ "((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len) \
+ ".if " len " > 254\n" \
+ "\t.error \"cpu alternatives does not support instructions " \
+ "blocks > 254 bytes\"\n" \
+ ".endif\n" \
+ ".if (" len ") %% 2\n" \
+ "\t.error \"cpu alternatives instructions length is odd\"\n" \
+ ".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num) \
+ ".if " oldinstr_pad_len(num) " > 6\n" \
+ "\tjg " e_oldinstr_pad_end "f\n" \
+ "6620:\n" \
+ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ ".else\n" \
+ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ ".endif\n"
+
+#define OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ OLDINSTR_PADDING(oldinstr, num) \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
+ OLDINSTR_PADDING(oldinstr, num2) \
+ ".else\n" \
+ OLDINSTR_PADDING(oldinstr, num1) \
+ ".endif\n" \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num) \
+ "\t.long 661b - .\n" /* old instruction */ \
+ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
+ "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.byte " oldinstr_total_len "\n" /* source len */ \
+ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
+ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+#ifdef CONFIG_ALTERNATIVES
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr, 1) \
+ ".popsection\n" \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility, 1) \
+ ".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr1, 1) \
+ ALTINSTR_REPLACEMENT(altinstr2, 2) \
+ ".popsection\n" \
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility1, 1) \
+ ALTINSTR_ENTRY(facility2, 2) \
+ ".popsection\n"
+#else
+/* Alternative instructions are disabled, let's put just oldinstr in */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ oldinstr "\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ oldinstr "\n"
+#endif
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility) \
+ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
index e9f7d7a57f99..09aed1095336 100644
--- a/arch/s390/include/asm/archrandom.h
+++ b/arch/s390/include/asm/archrandom.h
@@ -28,42 +28,42 @@ static void s390_arch_random_generate(u8 *buf, unsigned int nbytes)
static inline bool arch_has_random(void)
{
- if (static_branch_likely(&s390_arch_random_available))
- return true;
return false;
}
static inline bool arch_has_random_seed(void)
{
- return arch_has_random();
+ if (static_branch_likely(&s390_arch_random_available))
+ return true;
+ return false;
}
static inline bool arch_get_random_long(unsigned long *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
- s390_arch_random_generate((u8 *)v, sizeof(*v));
- return true;
- }
return false;
}
static inline bool arch_get_random_int(unsigned int *v)
{
- if (static_branch_likely(&s390_arch_random_available)) {
- s390_arch_random_generate((u8 *)v, sizeof(*v));
- return true;
- }
return false;
}
static inline bool arch_get_random_seed_long(unsigned long *v)
{
- return arch_get_random_long(v);
+ if (static_branch_likely(&s390_arch_random_available)) {
+ s390_arch_random_generate((u8 *)v, sizeof(*v));
+ return true;
+ }
+ return false;
}
static inline bool arch_get_random_seed_int(unsigned int *v)
{
- return arch_get_random_int(v);
+ if (static_branch_likely(&s390_arch_random_available)) {
+ s390_arch_random_generate((u8 *)v, sizeof(*v));
+ return true;
+ }
+ return false;
}
#endif /* CONFIG_ARCH_RANDOM */
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index f479e4c0b87e..d3f09526ee19 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -40,19 +40,24 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
#undef __ATOMIC_OPS
#undef __ATOMIC_OP
-static inline void __atomic_add_const(int val, int *ptr)
-{
- asm volatile(
- " asi %[ptr],%[val]\n"
- : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
+static inline void op_name(op_type val, op_type *ptr) \
+{ \
+ asm volatile( \
+ op_string " %[ptr],%[val]\n" \
+ op_barrier \
+ : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\
}
-static inline void __atomic64_add_const(long val, long *ptr)
-{
- asm volatile(
- " agsi %[ptr],%[val]\n"
- : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
-}
+#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
+ __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
+ __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
+__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
+
+#undef __ATOMIC_CONST_OPS
+#undef __ATOMIC_CONST_OP
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
@@ -108,6 +113,11 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
#undef __ATOMIC64_OPS
+#define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
+#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
+#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
+#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
+
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index b00777ce93b4..99aa817dad32 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -42,6 +42,7 @@ struct ccwgroup_device {
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @driver: embedded driver structure
+ * @ccw_driver: supported ccw_driver (optional)
*/
struct ccwgroup_driver {
int (*setup) (struct ccwgroup_device *);
@@ -56,6 +57,7 @@ struct ccwgroup_driver {
int (*restore)(struct ccwgroup_device *);
struct device_driver driver;
+ struct ccw_driver *ccw_driver;
};
extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 056670ebba67..3cc52e37b4b2 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -2,7 +2,7 @@
/*
* CP Assist for Cryptographic Functions (CPACF)
*
- * Copyright IBM Corp. 2003, 2016
+ * Copyright IBM Corp. 2003, 2017
* Author(s): Thomas Spatzier
* Jan Glauber
* Harald Freudenberger (freude@de.ibm.com)
@@ -134,6 +134,22 @@
#define CPACF_PRNO_TRNG_Q_R2C_RATIO 0x70
#define CPACF_PRNO_TRNG 0x72
+/*
+ * Function codes for the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
+ * instruction
+ */
+#define CPACF_KMA_QUERY 0x00
+#define CPACF_KMA_GCM_AES_128 0x12
+#define CPACF_KMA_GCM_AES_192 0x13
+#define CPACF_KMA_GCM_AES_256 0x14
+
+/*
+ * Flags for the KMA (CIPHER MESSAGE WITH AUTHENTICATION) instruction
+ */
+#define CPACF_KMA_LPC 0x100 /* Last-Plaintext/Ciphertext */
+#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
+#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
+
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
/**
@@ -179,6 +195,8 @@ static inline int __cpacf_check_opcode(unsigned int opcode)
return test_facility(77); /* check for MSA4 */
case CPACF_PRNO:
return test_facility(57); /* check for MSA5 */
+ case CPACF_KMA:
+ return test_facility(146); /* check for MSA8 */
default:
BUG();
}
@@ -470,4 +488,36 @@ static inline void cpacf_pckmo(long func, void *param)
: "cc", "memory");
}
+/**
+ * cpacf_kma() - executes the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
+ * instruction
+ * @func: the function code passed to KMA; see CPACF_KMA_xxx defines
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @src: address of source memory area
+ * @src_len: length of src operand in bytes
+ * @aad: address of additional authenticated data memory area
+ * @aad_len: length of aad operand in bytes
+ */
+static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
+ const u8 *src, unsigned long src_len,
+ const u8 *aad, unsigned long aad_len)
+{
+ register unsigned long r0 asm("0") = (unsigned long) func;
+ register unsigned long r1 asm("1") = (unsigned long) param;
+ register unsigned long r2 asm("2") = (unsigned long) src;
+ register unsigned long r3 asm("3") = (unsigned long) src_len;
+ register unsigned long r4 asm("4") = (unsigned long) aad;
+ register unsigned long r5 asm("5") = (unsigned long) aad_len;
+ register unsigned long r6 asm("6") = (unsigned long) dest;
+
+ asm volatile(
+ "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
+ " brc 1,0b\n" /* handle partial completion */
+ : [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
+ [aad] "+a" (r4), [alen] "+d" (r5)
+ : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
+ : "cc", "memory");
+}
+
#endif /* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 93e0d72f6c94..99c93d0346f9 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -8,6 +8,18 @@
#ifndef __ASM_CTL_REG_H
#define __ASM_CTL_REG_H
+#include <linux/const.h>
+
+#define CR2_GUARDED_STORAGE _BITUL(63 - 59)
+
+#define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35)
+#define CR14_RECOVERY_SUBMASK _BITUL(63 - 36)
+#define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37)
+#define CR14_EXTERNAL_DAMAGE_SUBMASK _BITUL(63 - 38)
+#define CR14_WARNING_SUBMASK _BITUL(63 - 39)
+
+#ifndef __ASSEMBLY__
+
#include <linux/bug.h>
#define __ctl_load(array, low, high) do { \
@@ -55,7 +67,11 @@ void smp_ctl_clear_bit(int cr, int bit);
union ctlreg0 {
unsigned long val;
struct {
- unsigned long : 32;
+ unsigned long : 8;
+ unsigned long tcx : 1; /* Transactional-Execution control */
+ unsigned long pifo : 1; /* Transactional-Execution Program-
+ Interruption-Filtering Override */
+ unsigned long : 22;
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
@@ -71,6 +87,19 @@ union ctlreg0 {
};
};
+union ctlreg2 {
+ unsigned long val;
+ struct {
+ unsigned long : 33;
+ unsigned long ducto : 25;
+ unsigned long : 1;
+ unsigned long gse : 1;
+ unsigned long : 1;
+ unsigned long tds : 1;
+ unsigned long tdc : 2;
+ };
+};
+
#ifdef CONFIG_SMP
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
@@ -79,4 +108,5 @@ union ctlreg0 {
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
#endif
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_CTL_REG_H */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index a4ed25dd3278..c305d39f5016 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -14,71 +14,71 @@
#include <linux/refcount.h>
#include <uapi/asm/debug.h>
-#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
-#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
-#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
-#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
-#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
-#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
+#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
+#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
+#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
+#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
+#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
+#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */
-#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
- /* the entry information */
+#define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */
+ /* the entry information */
typedef struct __debug_entry debug_entry_t;
struct debug_view;
-typedef struct debug_info {
- struct debug_info* next;
- struct debug_info* prev;
+typedef struct debug_info {
+ struct debug_info *next;
+ struct debug_info *prev;
refcount_t ref_count;
- spinlock_t lock;
+ spinlock_t lock;
int level;
int nr_areas;
int pages_per_area;
int buf_size;
- int entry_size;
- debug_entry_t*** areas;
+ int entry_size;
+ debug_entry_t ***areas;
int active_area;
int *active_pages;
int *active_entries;
- struct dentry* debugfs_root_entry;
- struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
- struct debug_view* views[DEBUG_MAX_VIEWS];
+ struct dentry *debugfs_root_entry;
+ struct dentry *debugfs_entries[DEBUG_MAX_VIEWS];
+ struct debug_view *views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN];
umode_t mode;
} debug_info_t;
-typedef int (debug_header_proc_t) (debug_info_t* id,
- struct debug_view* view,
+typedef int (debug_header_proc_t) (debug_info_t *id,
+ struct debug_view *view,
int area,
- debug_entry_t* entry,
- char* out_buf);
-
-typedef int (debug_format_proc_t) (debug_info_t* id,
- struct debug_view* view, char* out_buf,
- const char* in_buf);
-typedef int (debug_prolog_proc_t) (debug_info_t* id,
- struct debug_view* view,
- char* out_buf);
-typedef int (debug_input_proc_t) (debug_info_t* id,
- struct debug_view* view,
- struct file* file,
+ debug_entry_t *entry,
+ char *out_buf);
+
+typedef int (debug_format_proc_t) (debug_info_t *id,
+ struct debug_view *view, char *out_buf,
+ const char *in_buf);
+typedef int (debug_prolog_proc_t) (debug_info_t *id,
+ struct debug_view *view,
+ char *out_buf);
+typedef int (debug_input_proc_t) (debug_info_t *id,
+ struct debug_view *view,
+ struct file *file,
const char __user *user_buf,
- size_t in_buf_size, loff_t* offset);
+ size_t in_buf_size, loff_t *offset);
+
+int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf);
-int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view,
- int area, debug_entry_t* entry, char* out_buf);
-
struct debug_view {
char name[DEBUG_MAX_NAME_LEN];
- debug_prolog_proc_t* prolog_proc;
- debug_header_proc_t* header_proc;
- debug_format_proc_t* format_proc;
- debug_input_proc_t* input_proc;
- void* private_data;
+ debug_prolog_proc_t *prolog_proc;
+ debug_header_proc_t *header_proc;
+ debug_format_proc_t *format_proc;
+ debug_input_proc_t *input_proc;
+ void *private_data;
};
extern struct debug_view debug_hex_ascii_view;
@@ -87,65 +87,67 @@ extern struct debug_view debug_sprintf_view;
/* do NOT use the _common functions */
-debug_entry_t* debug_event_common(debug_info_t* id, int level,
- const void* data, int length);
+debug_entry_t *debug_event_common(debug_info_t *id, int level,
+ const void *data, int length);
-debug_entry_t* debug_exception_common(debug_info_t* id, int level,
- const void* data, int length);
+debug_entry_t *debug_exception_common(debug_info_t *id, int level,
+ const void *data, int length);
/* Debug Feature API: */
debug_info_t *debug_register(const char *name, int pages, int nr_areas,
- int buf_size);
+ int buf_size);
debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
int buf_size, umode_t mode, uid_t uid,
gid_t gid);
-void debug_unregister(debug_info_t* id);
+void debug_unregister(debug_info_t *id);
-void debug_set_level(debug_info_t* id, int new_level);
+void debug_set_level(debug_info_t *id, int new_level);
void debug_set_critical(void);
void debug_stop_all(void);
-static inline bool debug_level_enabled(debug_info_t* id, int level)
+static inline bool debug_level_enabled(debug_info_t *id, int level)
{
return level <= id->level;
}
-static inline debug_entry_t*
-debug_event(debug_info_t* id, int level, void* data, int length)
+static inline debug_entry_t *debug_event(debug_info_t *id, int level,
+ void *data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,data,length);
+ return debug_event_common(id, level, data, length);
}
-static inline debug_entry_t*
-debug_int_event(debug_info_t* id, int level, unsigned int tag)
+static inline debug_entry_t *debug_int_event(debug_info_t *id, int level,
+ unsigned int tag)
{
- unsigned int t=tag;
+ unsigned int t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,&t,sizeof(unsigned int));
+ return debug_event_common(id, level, &t, sizeof(unsigned int));
}
-static inline debug_entry_t *
-debug_long_event (debug_info_t* id, int level, unsigned long tag)
+static inline debug_entry_t *debug_long_event(debug_info_t *id, int level,
+ unsigned long tag)
{
- unsigned long t=tag;
+ unsigned long t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,&t,sizeof(unsigned long));
+ return debug_event_common(id, level, &t, sizeof(unsigned long));
}
-static inline debug_entry_t*
-debug_text_event(debug_info_t* id, int level, const char* txt)
+static inline debug_entry_t *debug_text_event(debug_info_t *id, int level,
+ const char *txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_event_common(id,level,txt,strlen(txt));
+ return debug_event_common(id, level, txt, strlen(txt));
}
/*
@@ -161,6 +163,7 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
debug_entry_t *__ret; \
debug_info_t *__id = _id; \
int __level = _level; \
+ \
if ((!__id) || (__level > __id->level)) \
__ret = NULL; \
else \
@@ -169,38 +172,40 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
__ret; \
})
-static inline debug_entry_t*
-debug_exception(debug_info_t* id, int level, void* data, int length)
+static inline debug_entry_t *debug_exception(debug_info_t *id, int level,
+ void *data, int length)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,data,length);
+ return debug_exception_common(id, level, data, length);
}
-static inline debug_entry_t*
-debug_int_exception(debug_info_t* id, int level, unsigned int tag)
+static inline debug_entry_t *debug_int_exception(debug_info_t *id, int level,
+ unsigned int tag)
{
- unsigned int t=tag;
+ unsigned int t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,&t,sizeof(unsigned int));
+ return debug_exception_common(id, level, &t, sizeof(unsigned int));
}
-static inline debug_entry_t *
-debug_long_exception (debug_info_t* id, int level, unsigned long tag)
+static inline debug_entry_t *debug_long_exception (debug_info_t *id, int level,
+ unsigned long tag)
{
- unsigned long t=tag;
+ unsigned long t = tag;
+
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,&t,sizeof(unsigned long));
+ return debug_exception_common(id, level, &t, sizeof(unsigned long));
}
-static inline debug_entry_t*
-debug_text_exception(debug_info_t* id, int level, const char* txt)
+static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level,
+ const char *txt)
{
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
return NULL;
- return debug_exception_common(id,level,txt,strlen(txt));
+ return debug_exception_common(id, level, txt, strlen(txt));
}
/*
@@ -216,6 +221,7 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
debug_entry_t *__ret; \
debug_info_t *__id = _id; \
int __level = _level; \
+ \
if ((!__id) || (__level > __id->level)) \
__ret = NULL; \
else \
@@ -224,13 +230,13 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
__ret; \
})
-int debug_register_view(debug_info_t* id, struct debug_view* view);
-int debug_unregister_view(debug_info_t* id, struct debug_view* view);
+int debug_register_view(debug_info_t *id, struct debug_view *view);
+int debug_unregister_view(debug_info_t *id, struct debug_view *view);
/*
define the debug levels:
- 0 No debugging output to console or syslog
- - 1 Log internal errors to syslog, ignore check conditions
+ - 1 Log internal errors to syslog, ignore check conditions
- 2 Log internal errors and check conditions to syslog
- 3 Log internal errors to console, log check conditions to syslog
- 4 Log internal errors and check conditions to console
@@ -248,17 +254,17 @@ int debug_unregister_view(debug_info_t* id, struct debug_view* view);
#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
#if DEBUG_LEVEL > 0
-#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
-#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
-#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
-#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_INFO(x...) printk(KERN_INFO PRINTK_HEADER x)
+#define PRINT_WARN(x...) printk(KERN_WARNING PRINTK_HEADER x)
+#define PRINT_ERR(x...) printk(KERN_ERR PRINTK_HEADER x)
+#define PRINT_FATAL(x...) panic(PRINTK_HEADER x)
#else
-#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
-#endif /* DASD_DEBUG */
-
-#endif /* DEBUG_H */
+#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_INFO(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_WARN(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_ERR(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
+#endif /* DASD_DEBUG */
+
+#endif /* DEBUG_H */
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 78d1b2d725b9..b0480c60a8e1 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -9,32 +9,7 @@
#ifndef __ASM_S390_DIS_H__
#define __ASM_S390_DIS_H__
-/* Type of operand */
-#define OPERAND_GPR 0x1 /* Operand printed as %rx */
-#define OPERAND_FPR 0x2 /* Operand printed as %fx */
-#define OPERAND_AR 0x4 /* Operand printed as %ax */
-#define OPERAND_CR 0x8 /* Operand printed as %cx */
-#define OPERAND_VR 0x10 /* Operand printed as %vx */
-#define OPERAND_DISP 0x20 /* Operand printed as displacement */
-#define OPERAND_BASE 0x40 /* Operand printed as base register */
-#define OPERAND_INDEX 0x80 /* Operand printed as index register */
-#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
-
-
-struct s390_operand {
- int bits; /* The number of bits in the operand. */
- int shift; /* The number of bits to shift. */
- int flags; /* One bit syntax flags. */
-};
-
-struct s390_insn {
- const char name[5];
- unsigned char opfrag;
- unsigned char format;
-};
-
+#include <generated/dis.h>
static inline int insn_length(unsigned char code)
{
@@ -45,7 +20,6 @@ struct pt_regs;
void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len);
-int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
struct s390_insn *find_insn(unsigned char *code);
static inline int is_known_insn(unsigned char *code)
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 8fc8764fe5ee..eaf490f9c5bc 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -16,11 +16,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &dma_noop_ops;
}
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 5a8d92758a58..186c7b5f5511 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -13,6 +13,8 @@
#include <asm/cio.h>
#include <asm/setup.h>
+#define NSS_NAME_SIZE 8
+
#define IPL_PARMBLOCK_ORIGIN 0x2000
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
@@ -106,7 +108,6 @@ extern size_t append_ipl_scpdata(char *, size_t);
enum {
IPL_DEVNO_VALID = 1,
IPL_PARMBLOCK_VALID = 2,
- IPL_NSS_VALID = 4,
};
enum ipl_type {
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 28792ef82c83..921391f2341e 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -63,8 +63,6 @@ typedef u16 kprobe_opcode_t;
#define kretprobe_blacklist_size 0
-#define KPROBE_SWAP_INST 0x10
-
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 51375e766e90..fd006a272024 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -736,7 +736,6 @@ struct kvm_arch{
wait_queue_head_t ipte_wq;
int ipte_lock_count;
struct mutex ipte_mutex;
- struct ratelimit_state sthyi_limit;
spinlock_t start_stop_lock;
struct sie_page2 *sie_page2;
struct kvm_s390_cpu_model model;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 917f7344cab6..9eb36a1592c7 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -134,8 +134,9 @@ struct lowcore {
__u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
__u64 gmap; /* 0x03b8 */
__u32 spinlock_lockval; /* 0x03c0 */
- __u32 fpu_flags; /* 0x03c4 */
- __u8 pad_0x03c8[0x0400-0x03c8]; /* 0x03c8 */
+ __u32 spinlock_index; /* 0x03c4 */
+ __u32 fpu_flags; /* 0x03c8 */
+ __u8 pad_0x03cc[0x0400-0x03cc]; /* 0x03cc */
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 43607bb12cc2..cf4c1cb17dcd 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -44,6 +44,8 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.asce_limit = STACK_TOP_MAX;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ /* pgd_alloc() did not account this pud */
+ mm_inc_nr_puds(mm);
break;
case -PAGE_SIZE:
/* forked 5-level task, set new asce with new_mm->pgd */
@@ -59,7 +61,7 @@ static inline int init_new_context(struct task_struct *tsk,
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- /* pgd_alloc() did not increase mm->nr_pmds */
+ /* pgd_alloc() did not account this pmd */
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index c8a7beadd3d4..1e5dc4537bf2 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -26,12 +26,9 @@
#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46)
#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
-
-#define MCCK_CR14_CR_PENDING_SUB_MASK (1 << 28)
-#define MCCK_CR14_RECOVERY_SUB_MASK (1 << 27)
-#define MCCK_CR14_DEGRAD_SUB_MASK (1 << 26)
-#define MCCK_CR14_EXT_DAMAGE_SUB_MASK (1 << 25)
-#define MCCK_CR14_WARN_SUB_MASK (1 << 24)
+#define MCCK_CODE_CR_VALID _BITUL(63 - 29)
+#define MCCK_CODE_GS_VALID _BITUL(63 - 36)
+#define MCCK_CODE_FC_VALID _BITUL(63 - 43)
#ifndef __ASSEMBLY__
@@ -87,6 +84,8 @@ union mci {
#define MCESA_ORIGIN_MASK (~0x3ffUL)
#define MCESA_LC_MASK (0xfUL)
+#define MCESA_MIN_SIZE (1024)
+#define MCESA_MAX_SIZE (2048)
struct mcesa {
u8 vector_save_area[1024];
@@ -95,8 +94,12 @@ struct mcesa {
struct pt_regs;
-extern void s390_handle_mcck(void);
-extern void s390_do_machine_check(struct pt_regs *regs);
+void nmi_alloc_boot_cpu(struct lowcore *lc);
+int nmi_alloc_per_cpu(struct lowcore *lc);
+void nmi_free_per_cpu(struct lowcore *lc);
+
+void s390_handle_mcck(void);
+void s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h
index 6c2c38060f8b..5dfe47588277 100644
--- a/arch/s390/include/asm/pci_debug.h
+++ b/arch/s390/include/asm/pci_debug.h
@@ -19,11 +19,7 @@ extern debug_info_t *pci_debug_err_id;
static inline void zpci_err_hex(void *addr, int len)
{
- while (len > 0) {
- debug_event(pci_debug_err_id, 0, (void *) addr, len);
- len -= pci_debug_err_id->buf_size;
- addr += pci_debug_err_id->buf_size;
- }
+ debug_event(pci_debug_err_id, 0, addr, len);
}
#endif
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index 419e83fa4721..ba22a6ea51a1 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
int zpci_load(u64 *data, u64 req, u64 offset);
int zpci_store(u64 data, u64 req, u64 offset);
int zpci_store_block(const u64 *data, u64 req, u64 offset);
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index bbe99cb8219d..c7b4333d1de0 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -13,6 +13,7 @@
#define _S390_PGALLOC_H
#include <linux/threads.h>
+#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/mm.h>
@@ -28,24 +29,9 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
void page_table_free_pgste(struct page *page);
extern int page_table_allocate_pgste;
-static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
-{
- struct addrtype { char _[256]; };
- int i;
-
- for (i = 0; i < n; i += 256) {
- *s = val;
- asm volatile(
- "mvc 8(248,%[s]),0(%[s])\n"
- : "+m" (*(struct addrtype *) s)
- : [s] "a" (s));
- s += 256 / sizeof(long);
- }
-}
-
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{
- clear_table(crst, entry, _CRST_TABLE_SIZE);
+ memset64((u64 *)crst, entry, _CRST_ENTRIES);
}
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9cf92abe23c3..f25bfe888933 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -22,6 +22,7 @@
#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
+#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
@@ -31,6 +32,7 @@
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST)
+#define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU)
#ifndef __ASSEMBLY__
@@ -219,10 +221,10 @@ void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
+static inline void release_thread(struct task_struct *tsk) { }
-/* Free guarded storage control block for current */
-void exit_thread_gs(void);
+/* Free guarded storage control block */
+void guarded_storage_release(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index ea8896ba5afc..6b1540337ed6 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -6,55 +6,55 @@
#define S390_RUNTIME_INSTR_STOP 0x2
struct runtime_instr_cb {
- __u64 buf_current;
- __u64 buf_origin;
- __u64 buf_limit;
+ __u64 rca;
+ __u64 roa;
+ __u64 rla;
- __u32 valid : 1;
- __u32 pstate : 1;
- __u32 pstate_set_buf : 1;
- __u32 home_space : 1;
- __u32 altered : 1;
- __u32 : 3;
- __u32 pstate_sample : 1;
- __u32 sstate_sample : 1;
- __u32 pstate_collect : 1;
- __u32 sstate_collect : 1;
- __u32 : 1;
- __u32 halted_int : 1;
- __u32 int_requested : 1;
- __u32 buffer_full_int : 1;
+ __u32 v : 1;
+ __u32 s : 1;
+ __u32 k : 1;
+ __u32 h : 1;
+ __u32 a : 1;
+ __u32 reserved1 : 3;
+ __u32 ps : 1;
+ __u32 qs : 1;
+ __u32 pc : 1;
+ __u32 qc : 1;
+ __u32 reserved2 : 1;
+ __u32 g : 1;
+ __u32 u : 1;
+ __u32 l : 1;
__u32 key : 4;
- __u32 : 9;
+ __u32 reserved3 : 8;
+ __u32 t : 1;
__u32 rgs : 3;
- __u32 mode : 4;
- __u32 next : 1;
+ __u32 m : 4;
+ __u32 n : 1;
__u32 mae : 1;
- __u32 : 2;
- __u32 call_type_br : 1;
- __u32 return_type_br : 1;
- __u32 other_type_br : 1;
- __u32 bc_other_type : 1;
- __u32 emit : 1;
- __u32 tx_abort : 1;
- __u32 : 2;
- __u32 bp_xn : 1;
- __u32 bp_xt : 1;
- __u32 bp_ti : 1;
- __u32 bp_ni : 1;
- __u32 suppr_y : 1;
- __u32 suppr_z : 1;
+ __u32 reserved4 : 2;
+ __u32 c : 1;
+ __u32 r : 1;
+ __u32 b : 1;
+ __u32 j : 1;
+ __u32 e : 1;
+ __u32 x : 1;
+ __u32 reserved5 : 2;
+ __u32 bpxn : 1;
+ __u32 bpxt : 1;
+ __u32 bpti : 1;
+ __u32 bpni : 1;
+ __u32 reserved6 : 2;
- __u32 dc_miss_extra : 1;
- __u32 lat_lev_ignore : 1;
- __u32 ic_lat_lev : 4;
- __u32 dc_lat_lev : 4;
+ __u32 d : 1;
+ __u32 f : 1;
+ __u32 ic : 4;
+ __u32 dc : 4;
- __u64 reserved1;
- __u64 scaling_factor;
+ __u64 reserved7;
+ __u64 sf;
__u64 rsic;
- __u64 reserved2;
+ __u64 reserved8;
} __packed __aligned(8);
extern struct runtime_instr_cb runtime_instr_empty_cb;
@@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
load_runtime_instr_cb(&runtime_instr_empty_cb);
}
-void exit_thread_runtime_instr(void);
+struct task_struct;
+
+void runtime_instr_release(struct task_struct *tsk);
#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
deleted file mode 100644
index f731b7b518bd..000000000000
--- a/arch/s390/include/asm/rwsem.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _S390_RWSEM_H
-#define _S390_RWSEM_H
-
-/*
- * S390 version
- * Copyright IBM Corp. 2002
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
- */
-
-/*
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
-#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
-#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
-#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- if (old < 0)
- rwsem_down_read_failed(sem);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: ltgr %1,%0\n"
- " jm 1f\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- return old >= 0 ? 1 : 0;
-}
-
-/*
- * lock for writing
- */
-static inline long ___down_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = RWSEM_ACTIVE_WRITE_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
-
- return old;
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- signed long old;
-
- asm volatile(
- " lg %0,%1\n"
- "0: ltgr %0,%0\n"
- " jnz 1f\n"
- " csg %0,%3,%1\n"
- " jl 0b\n"
- "1:"
- : "=&d" (old), "=Q" (sem->count)
- : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
- : "cc", "memory");
- return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " aghi %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
- : "cc", "memory");
- if (new < 0)
- if ((new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = -RWSEM_ACTIVE_WRITE_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
- if (new < 0)
- if ((new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- signed long old, new, tmp;
-
- tmp = -RWSEM_WAITING_BIAS;
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " ag %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "m" (tmp)
- : "cc", "memory");
- if (new > 1)
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 0ac3e8166e85..54f81f8ed662 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,6 +4,6 @@
#include <asm-generic/sections.h>
-extern char _eshared[], _ehead[];
+extern char _ehead[];
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index f2c2b7cd9099..8bc87dcb10eb 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -98,9 +98,6 @@ extern char vmpoff_cmd[];
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
-#define NSS_NAME_SIZE 8
-extern char kernel_nss_name[];
-
#ifdef CONFIG_PFAULT
extern int pfault_init(void);
extern void pfault_fini(void);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index babe83ed416c..3907ead27ffa 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -28,6 +28,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void smp_call_online_cpu(void (*func)(void *), void *);
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
+extern void smp_emergency_stop(void);
extern int smp_find_processor_id(u16 address);
extern int smp_store_status(int cpu);
@@ -53,6 +54,10 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
func(data);
}
+static inline void smp_emergency_stop(void)
+{
+}
+
static inline int smp_find_processor_id(u16 address) { return 0; }
static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index f3f5e0155b10..0a29588aa00b 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -14,6 +14,7 @@
#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/processor.h>
+#include <asm/alternative.h>
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
@@ -36,20 +37,16 @@ bool arch_vcpu_is_preempted(int cpu);
* (the type definitions are in asm/spinlock_types.h)
*/
-void arch_lock_relax(int cpu);
+void arch_spin_relax(arch_spinlock_t *lock);
+#define arch_spin_relax arch_spin_relax
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
-void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
-
-static inline void arch_spin_relax(arch_spinlock_t *lock)
-{
- arch_lock_relax(lock->lock);
-}
+void arch_spin_lock_setup(int cpu);
static inline u32 arch_spin_lockval(int cpu)
{
- return ~cpu;
+ return cpu + 1;
}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@ -65,8 +62,7 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
barrier();
- return likely(arch_spin_value_unlocked(*lp) &&
- __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
+ return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -79,8 +75,9 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
if (!arch_spin_trylock_once(lp))
- arch_spin_lock_wait_flags(lp, flags);
+ arch_spin_lock_wait(lp);
}
+#define arch_spin_lock_flags arch_spin_lock_flags
static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
@@ -93,11 +90,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
asm volatile(
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- " .long 0xb2fa0070\n" /* NIAI 7 */
-#endif
- " st %1,%0\n"
- : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
+ ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
+ " sth %1,%0\n"
+ : "=Q" (((unsigned short *) &lp->lock)[1])
+ : "d" (0) : "cc", "memory");
}
/*
@@ -111,168 +107,53 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
-extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
-extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-static inline int arch_read_trylock_once(arch_rwlock_t *rw)
-{
- int old = ACCESS_ONCE(rw->lock);
- return likely(old >= 0 &&
- __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
-}
-
-static inline int arch_write_trylock_once(arch_rwlock_t *rw)
-{
- int old = ACCESS_ONCE(rw->lock);
- return likely(old == 0 &&
- __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
-}
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __RAW_OP_OR "lao"
-#define __RAW_OP_AND "lan"
-#define __RAW_OP_ADD "laa"
-
-#define __RAW_LOCK(ptr, op_val, op_string) \
-({ \
- int old_val; \
- \
- typecheck(int *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- "bcr 14,0\n" \
- : "=d" (old_val), "+Q" (*ptr) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
-
-#define __RAW_UNLOCK(ptr, op_val, op_string) \
-({ \
- int old_val; \
- \
- typecheck(int *, ptr); \
- asm volatile( \
- op_string " %0,%2,%1\n" \
- : "=d" (old_val), "+Q" (*ptr) \
- : "d" (op_val) \
- : "cc", "memory"); \
- old_val; \
-})
+#define arch_read_relax(rw) barrier()
+#define arch_write_relax(rw) barrier()
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
+void arch_read_lock_wait(arch_rwlock_t *lp);
+void arch_write_lock_wait(arch_rwlock_t *lp);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
int old;
- old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
- if (old < 0)
- _raw_read_lock_wait(rw);
+ old = __atomic_add(1, &rw->cnts);
+ if (old & 0xffff0000)
+ arch_read_lock_wait(rw);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
+ __atomic_add_const_barrier(-1, &rw->cnts);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- int old;
-
- old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
- if (old != 0)
- _raw_write_lock_wait(rw, old);
- rw->owner = SPINLOCK_LOCKVAL;
+ if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
+ arch_write_lock_wait(rw);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- rw->owner = 0;
- __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
+ __atomic_add_barrier(-0x30000, &rw->cnts);
}
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp);
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- if (!arch_read_trylock_once(rw))
- _raw_read_lock_wait(rw);
-}
-static inline void arch_read_unlock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int old;
- do {
- old = ACCESS_ONCE(rw->lock);
- } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- if (!arch_write_trylock_once(rw))
- _raw_write_lock_wait(rw);
- rw->owner = SPINLOCK_LOCKVAL;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- typecheck(int, rw->lock);
-
- rw->owner = 0;
- asm volatile(
- "st %1,%0\n"
- : "+Q" (rw->lock)
- : "d" (0)
- : "cc", "memory");
-}
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- if (!arch_read_trylock_once(rw))
- return _raw_read_trylock_retry(rw);
- return 1;
+ old = READ_ONCE(rw->cnts);
+ return (!(old & 0xffff0000) &&
+ __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
- return 0;
- rw->owner = SPINLOCK_LOCKVAL;
- return 1;
-}
-
-static inline void arch_read_relax(arch_rwlock_t *rw)
-{
- arch_lock_relax(rw->owner);
-}
+ int old;
-static inline void arch_write_relax(arch_rwlock_t *rw)
-{
- arch_lock_relax(rw->owner);
+ old = READ_ONCE(rw->cnts);
+ return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 1861a0c5dd47..cfed272e4fd5 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -13,8 +13,8 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
typedef struct {
- int lock;
- int owner;
+ int cnts;
+ arch_spinlock_t wait;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 27ce494198f5..50f26fc9acb2 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -18,6 +18,9 @@
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMSET16 /* arch function */
+#define __HAVE_ARCH_MEMSET32 /* arch function */
+#define __HAVE_ARCH_MEMSET64 /* arch function */
#define __HAVE_ARCH_STRCAT /* inline & arch function */
#define __HAVE_ARCH_STRCMP /* arch function */
#define __HAVE_ARCH_STRCPY /* inline & arch function */
@@ -31,17 +34,17 @@
#define __HAVE_ARCH_STRSTR /* arch function */
/* Prototypes for non-inlined arch strings functions. */
-extern int memcmp(const void *, const void *, size_t);
-extern void *memcpy(void *, const void *, size_t);
-extern void *memset(void *, int, size_t);
-extern void *memmove(void *, const void *, size_t);
-extern int strcmp(const char *,const char *);
-extern size_t strlcat(char *, const char *, size_t);
-extern size_t strlcpy(char *, const char *, size_t);
-extern char *strncat(char *, const char *, size_t);
-extern char *strncpy(char *, const char *, size_t);
-extern char *strrchr(const char *, int);
-extern char *strstr(const char *, const char *);
+int memcmp(const void *s1, const void *s2, size_t n);
+void *memcpy(void *dest, const void *src, size_t n);
+void *memset(void *s, int c, size_t n);
+void *memmove(void *dest, const void *src, size_t n);
+int strcmp(const char *s1, const char *s2);
+size_t strlcat(char *dest, const char *src, size_t n);
+size_t strlcpy(char *dest, const char *src, size_t size);
+char *strncat(char *dest, const char *src, size_t n);
+char *strncpy(char *dest, const char *src, size_t n);
+char *strrchr(const char *s, int c);
+char *strstr(const char *s1, const char *s2);
#undef __HAVE_ARCH_STRCHR
#undef __HAVE_ARCH_STRNCHR
@@ -50,7 +53,26 @@ extern char *strstr(const char *, const char *);
#undef __HAVE_ARCH_STRSEP
#undef __HAVE_ARCH_STRSPN
-#if !defined(IN_ARCH_STRING_C)
+void *__memset16(uint16_t *s, uint16_t v, size_t count);
+void *__memset32(uint32_t *s, uint32_t v, size_t count);
+void *__memset64(uint64_t *s, uint64_t v, size_t count);
+
+static inline void *memset16(uint16_t *s, uint16_t v, size_t count)
+{
+ return __memset16(s, v, count * sizeof(v));
+}
+
+static inline void *memset32(uint32_t *s, uint32_t v, size_t count)
+{
+ return __memset32(s, v, count * sizeof(v));
+}
+
+static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
+{
+ return __memset64(s, v, count * sizeof(v));
+}
+
+#if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY))
static inline void *memchr(const void * s, int c, size_t n)
{
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index c21fe1d57c00..ec7b476c1ac5 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs)
save_ri_cb(prev->thread.ri_cb); \
save_gs_cb(prev->thread.gs_cb); \
} \
+ update_cr_regs(next); \
if (next->mm) { \
- update_cr_regs(next); \
set_cpu_flag(CIF_FPU); \
restore_access_regs(&next->thread.acrs[0]); \
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2b498e58b914..a702cb9d4269 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -156,7 +156,8 @@ static inline unsigned char topology_mnest_limit(void)
struct topology_core {
unsigned char nl;
unsigned char reserved0[3];
- unsigned char :6;
+ unsigned char :5;
+ unsigned char d:1;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
@@ -198,4 +199,5 @@ struct service_level {
int register_service_level(struct service_level *);
int unregister_service_level(struct service_level *);
+int sthyi_fill(void *dst, u64 *rc);
#endif /* __ASM_S390_SYSINFO_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 55de4eb73604..1807229b292f 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -17,6 +17,7 @@ struct cpu_topology_s390 {
unsigned short book_id;
unsigned short drawer_id;
unsigned short node_id;
+ unsigned short dedicated : 1;
cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
@@ -35,6 +36,7 @@ extern cpumask_t cpus_with_topology;
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
+#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated)
#define mc_capable() 1
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index bb2ce72300b0..ae6261ef97d5 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -47,6 +47,7 @@ struct vdso_per_cpu_data {
extern struct vdso_data *vdso_data;
+void vdso_alloc_boot_cpu(struct lowcore *lowcore);
int vdso_alloc_per_cpu(struct lowcore *lowcore);
void vdso_free_per_cpu(struct lowcore *lowcore);
diff --git a/arch/s390/include/uapi/asm/kvm_virtio.h b/arch/s390/include/uapi/asm/kvm_virtio.h
deleted file mode 100644
index 73283677a132..000000000000
--- a/arch/s390/include/uapi/asm/kvm_virtio.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * definition for virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- */
-
-#ifndef __KVM_S390_VIRTIO_H
-#define __KVM_S390_VIRTIO_H
-
-#include <linux/types.h>
-
-struct kvm_device_desc {
- /* The device type: console, network, disk etc. Type 0 terminates. */
- __u8 type;
- /* The number of virtqueues (first in config array) */
- __u8 num_vq;
- /*
- * The number of bytes of feature bits. Multiply by 2: one for host
- * features and one for guest acknowledgements.
- */
- __u8 feature_len;
- /* The number of bytes of the config array after virtqueues. */
- __u8 config_len;
- /* A status byte, written by the Guest. */
- __u8 status;
- __u8 config[0];
-};
-
-/*
- * This is how we expect the device configuration field for a virtqueue
- * to be laid out in config space.
- */
-struct kvm_vqconfig {
- /* The token returned with an interrupt. Set by the guest */
- __u64 token;
- /* The address of the virtio ring */
- __u64 address;
- /* The number of entries in the virtio_ring */
- __u16 num;
-
-};
-
-#define KVM_S390_VIRTIO_NOTIFY 0
-#define KVM_S390_VIRTIO_RESET 1
-#define KVM_S390_VIRTIO_SET_STATUS 2
-
-/* The alignment to use between consumer and producer parts of vring.
- * This is pagesize for historical reasons. */
-#define KVM_S390_VIRTIO_RING_ALIGN 4096
-
-
-/* These values are supposed to be in ext_params on an interrupt */
-#define VIRTIO_PARAM_MASK 0xff
-#define VIRTIO_PARAM_VRING_INTERRUPT 0x0
-#define VIRTIO_PARAM_CONFIG_CHANGED 0x1
-#define VIRTIO_PARAM_DEV_ADD 0x2
-
-#endif
diff --git a/arch/s390/include/uapi/asm/sthyi.h b/arch/s390/include/uapi/asm/sthyi.h
new file mode 100644
index 000000000000..ec113db4eb7e
--- /dev/null
+++ b/arch/s390/include/uapi/asm/sthyi.h
@@ -0,0 +1,6 @@
+#ifndef _UAPI_ASM_STHYI_H
+#define _UAPI_ASM_STHYI_H
+
+#define STHYI_FC_CP_IFL_CAP 0
+
+#endif /* _UAPI_ASM_STHYI_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index b52bce8ee941..725120939051 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -316,7 +316,8 @@
#define __NR_pwritev2 377
#define __NR_s390_guarded_storage 378
#define __NR_statx 379
-#define NR_syscalls 380
+#define __NR_s390_sthyi 380
+#define NR_syscalls 381
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4ce2d05929a7..83bc82001c06 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -34,6 +34,8 @@ AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
endif
+CFLAGS_als.o += -D__NO_FORTIFY
+
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#
@@ -56,7 +58,7 @@ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
-obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o
+obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o
extra-y += head.o head64.o vmlinux.lds
@@ -75,6 +77,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
+obj-$(CONFIG_ALTERNATIVES) += alternative.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 000000000000..315986a06cf5
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,110 @@
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+ alt_instr_disabled = 1;
+ return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+ 0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+ &nop16,
+ &nop32,
+ &nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+ struct brcl_insn brcl = {
+ 0xc0f4,
+ len / 2
+ };
+
+ memcpy(insns, &brcl, sizeof(brcl));
+ insns += sizeof(brcl);
+ len -= sizeof(brcl);
+
+ while (len > 0) {
+ memcpy(insns, &nop16, 2);
+ insns += 2;
+ len -= 2;
+ }
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+ if (len > 6)
+ add_jump_padding(insns, len);
+ else if (len >= 2)
+ memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ struct alt_instr *a;
+ u8 *instr, *replacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+ */
+ for (a = start; a < end; a++) {
+ int insnbuf_sz = 0;
+
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+ if (!test_facility(a->facility))
+ continue;
+
+ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+ WARN_ONCE(1, "cpu alternatives instructions length is "
+ "odd, skipping patching\n");
+ continue;
+ }
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+
+ if (a->instrlen > a->replacementlen) {
+ add_padding(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+
+ s390_kernel_write(instr, insnbuf, insnbuf_sz);
+ }
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ if (!alt_instr_disabled)
+ __apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 0e6d2b032484..33ec80df7ed4 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <asm/vdso.h>
#include <asm/pgtable.h>
#include <asm/gmap.h>
+#include <asm/nmi.h>
/*
* Make sure that the compiler is new enough. We want a compiler that
@@ -159,6 +160,7 @@ int main(void)
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
+ OFFSET(__LC_CLOCK_COMPARATOR, lowcore, clock_comparator);
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
@@ -194,6 +196,9 @@ int main(void)
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
BLANK();
+ /* extended machine check save area */
+ OFFSET(__MCESA_GS_SAVE_AREA, mcesa, guarded_storage_save_area);
+ BLANK();
/* gmap/sie offsets */
OFFSET(__GMAP_ASCE, gmap, asce);
OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index d04918583971..11e9d8b5c1b0 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -181,3 +181,4 @@ COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
+COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 05a9cf4ae9c2..58b9e127b615 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -5,7 +5,7 @@
* Copyright IBM Corp. 1999, 2012
*
* Author(s): Michael Holzheu (holzheu@de.ibm.com),
- * Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ * Holger Smolinski (Holger.Smolinski@de.ibm.com)
*
* Bugreports to: <Linux390@de.ibm.com>
*/
@@ -37,69 +37,67 @@
typedef struct file_private_info {
loff_t offset; /* offset of last read in file */
- int act_area; /* number of last formated area */
- int act_page; /* act page in given area */
- int act_entry; /* last formated entry (offset */
- /* relative to beginning of last */
- /* formated page) */
- size_t act_entry_offset; /* up to this offset we copied */
+ int act_area; /* number of last formated area */
+ int act_page; /* act page in given area */
+ int act_entry; /* last formated entry (offset */
+ /* relative to beginning of last */
+ /* formated page) */
+ size_t act_entry_offset; /* up to this offset we copied */
/* in last read the last formated */
/* entry to userland */
char temp_buf[2048]; /* buffer for output */
- debug_info_t *debug_info_org; /* original debug information */
+ debug_info_t *debug_info_org; /* original debug information */
debug_info_t *debug_info_snap; /* snapshot of debug information */
struct debug_view *view; /* used view of debug info */
} file_private_info_t;
-typedef struct
-{
+typedef struct {
char *string;
- /*
- * This assumes that all args are converted into longs
- * on L/390 this is the case for all types of parameter
- * except of floats, and long long (32 bit)
+ /*
+ * This assumes that all args are converted into longs
+ * on L/390 this is the case for all types of parameter
+ * except of floats, and long long (32 bit)
*
*/
long args[0];
} debug_sprintf_entry_t;
-
/* internal function prototyes */
static int debug_init(void);
static ssize_t debug_output(struct file *file, char __user *user_buf,
- size_t user_len, loff_t * offset);
+ size_t user_len, loff_t *offset);
static ssize_t debug_input(struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset);
+ size_t user_len, loff_t *offset);
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
static debug_info_t *debug_info_create(const char *name, int pages_per_area,
- int nr_areas, int buf_size, umode_t mode);
+ int nr_areas, int buf_size, umode_t mode);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
-static int debug_prolog_level_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf);
-static int debug_input_level_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_buf_size, loff_t * offset);
-static int debug_prolog_pages_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf);
-static int debug_input_pages_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_buf_size, loff_t * offset);
-static int debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_buf_size, loff_t * offset);
-static int debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, const char *in_buf);
-static int debug_raw_format_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf,
- const char *in_buf);
-static int debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
- int area, debug_entry_t * entry, char *out_buf);
-
-static int debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, debug_sprintf_entry_t *curr_event);
+static int debug_prolog_level_fn(debug_info_t *id,
+ struct debug_view *view, char *out_buf);
+static int debug_input_level_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_buf_size, loff_t *offset);
+static int debug_prolog_pages_fn(debug_info_t *id,
+ struct debug_view *view, char *out_buf);
+static int debug_input_pages_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_buf_size, loff_t *offset);
+static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_buf_size, loff_t *offset);
+static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, const char *in_buf);
+static int debug_raw_format_fn(debug_info_t *id,
+ struct debug_view *view, char *out_buf,
+ const char *in_buf);
+static int debug_raw_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf);
+
+static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, debug_sprintf_entry_t *curr_event);
/* globals */
@@ -142,19 +140,19 @@ static struct debug_view debug_pages_view = {
};
static struct debug_view debug_flush_view = {
- "flush",
- NULL,
- NULL,
- NULL,
- &debug_input_flush_fn,
- NULL
+ "flush",
+ NULL,
+ NULL,
+ NULL,
+ &debug_input_flush_fn,
+ NULL
};
struct debug_view debug_sprintf_view = {
"sprintf",
NULL,
&debug_dflt_header_fn,
- (debug_format_proc_t*)&debug_sprintf_format_fn,
+ (debug_format_proc_t *)&debug_sprintf_format_fn,
NULL,
NULL
};
@@ -165,18 +163,18 @@ static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
/* static globals */
-static debug_info_t *debug_area_first = NULL;
-static debug_info_t *debug_area_last = NULL;
+static debug_info_t *debug_area_first;
+static debug_info_t *debug_area_last;
static DEFINE_MUTEX(debug_mutex);
static int initialized;
static int debug_critical;
static const struct file_operations debug_file_ops = {
- .owner = THIS_MODULE,
- .read = debug_output,
- .write = debug_input,
- .open = debug_open,
+ .owner = THIS_MODULE,
+ .read = debug_output,
+ .write = debug_input,
+ .open = debug_open,
.release = debug_close,
.llseek = no_llseek,
};
@@ -191,29 +189,23 @@ static struct dentry *debug_debugfs_root_entry;
* areas[areanumber][pagenumber][pageoffset]
*/
-static debug_entry_t***
-debug_areas_alloc(int pages_per_area, int nr_areas)
+static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
{
- debug_entry_t*** areas;
- int i,j;
+ debug_entry_t ***areas;
+ int i, j;
- areas = kmalloc(nr_areas *
- sizeof(debug_entry_t**),
- GFP_KERNEL);
+ areas = kmalloc(nr_areas * sizeof(debug_entry_t **), GFP_KERNEL);
if (!areas)
goto fail_malloc_areas;
for (i = 0; i < nr_areas; i++) {
- areas[i] = kmalloc(pages_per_area *
- sizeof(debug_entry_t*),GFP_KERNEL);
- if (!areas[i]) {
+ areas[i] = kmalloc(pages_per_area * sizeof(debug_entry_t *), GFP_KERNEL);
+ if (!areas[i])
goto fail_malloc_areas2;
- }
- for(j = 0; j < pages_per_area; j++) {
+ for (j = 0; j < pages_per_area; j++) {
areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if(!areas[i][j]) {
- for(j--; j >=0 ; j--) {
+ if (!areas[i][j]) {
+ for (j--; j >= 0 ; j--)
kfree(areas[i][j]);
- }
kfree(areas[i]);
goto fail_malloc_areas2;
}
@@ -222,62 +214,55 @@ debug_areas_alloc(int pages_per_area, int nr_areas)
return areas;
fail_malloc_areas2:
- for(i--; i >= 0; i--){
- for(j=0; j < pages_per_area;j++){
+ for (i--; i >= 0; i--) {
+ for (j = 0; j < pages_per_area; j++)
kfree(areas[i][j]);
- }
kfree(areas[i]);
}
kfree(areas);
fail_malloc_areas:
return NULL;
-
}
-
/*
* debug_info_alloc
* - alloc new debug-info
*/
-
-static debug_info_t*
-debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
- int buf_size, int level, int mode)
+static debug_info_t *debug_info_alloc(const char *name, int pages_per_area,
+ int nr_areas, int buf_size, int level,
+ int mode)
{
- debug_info_t* rc;
+ debug_info_t *rc;
/* alloc everything */
-
rc = kmalloc(sizeof(debug_info_t), GFP_KERNEL);
- if(!rc)
+ if (!rc)
goto fail_malloc_rc;
rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
- if(!rc->active_entries)
+ if (!rc->active_entries)
goto fail_malloc_active_entries;
rc->active_pages = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
- if(!rc->active_pages)
+ if (!rc->active_pages)
goto fail_malloc_active_pages;
- if((mode == ALL_AREAS) && (pages_per_area != 0)){
+ if ((mode == ALL_AREAS) && (pages_per_area != 0)) {
rc->areas = debug_areas_alloc(pages_per_area, nr_areas);
- if(!rc->areas)
+ if (!rc->areas)
goto fail_malloc_areas;
} else {
rc->areas = NULL;
}
/* initialize members */
-
spin_lock_init(&rc->lock);
rc->pages_per_area = pages_per_area;
- rc->nr_areas = nr_areas;
+ rc->nr_areas = nr_areas;
rc->active_area = 0;
- rc->level = level;
- rc->buf_size = buf_size;
- rc->entry_size = sizeof(debug_entry_t) + buf_size;
+ rc->level = level;
+ rc->buf_size = buf_size;
+ rc->entry_size = sizeof(debug_entry_t) + buf_size;
strlcpy(rc->name, name, sizeof(rc->name));
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
- memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
- sizeof(struct dentry*));
+ memset(rc->debugfs_entries, 0, DEBUG_MAX_VIEWS * sizeof(struct dentry *));
refcount_set(&(rc->ref_count), 0);
return rc;
@@ -296,18 +281,15 @@ fail_malloc_rc:
* debug_areas_free
* - free all debug areas
*/
-
-static void
-debug_areas_free(debug_info_t* db_info)
+static void debug_areas_free(debug_info_t *db_info)
{
- int i,j;
+ int i, j;
- if(!db_info->areas)
+ if (!db_info->areas)
return;
for (i = 0; i < db_info->nr_areas; i++) {
- for(j = 0; j < db_info->pages_per_area; j++) {
+ for (j = 0; j < db_info->pages_per_area; j++)
kfree(db_info->areas[i][j]);
- }
kfree(db_info->areas[i]);
}
kfree(db_info->areas);
@@ -318,9 +300,8 @@ debug_areas_free(debug_info_t* db_info)
* debug_info_free
* - free memory debug-info
*/
-
-static void
-debug_info_free(debug_info_t* db_info){
+static void debug_info_free(debug_info_t *db_info)
+{
debug_areas_free(db_info);
kfree(db_info->active_entries);
kfree(db_info->active_pages);
@@ -332,35 +313,34 @@ debug_info_free(debug_info_t* db_info){
* - create new debug-info
*/
-static debug_info_t*
-debug_info_create(const char *name, int pages_per_area, int nr_areas,
- int buf_size, umode_t mode)
+static debug_info_t *debug_info_create(const char *name, int pages_per_area,
+ int nr_areas, int buf_size, umode_t mode)
{
- debug_info_t* rc;
+ debug_info_t *rc;
- rc = debug_info_alloc(name, pages_per_area, nr_areas, buf_size,
- DEBUG_DEFAULT_LEVEL, ALL_AREAS);
- if(!rc)
+ rc = debug_info_alloc(name, pages_per_area, nr_areas, buf_size,
+ DEBUG_DEFAULT_LEVEL, ALL_AREAS);
+ if (!rc)
goto out;
rc->mode = mode & ~S_IFMT;
/* create root directory */
- rc->debugfs_root_entry = debugfs_create_dir(rc->name,
- debug_debugfs_root_entry);
+ rc->debugfs_root_entry = debugfs_create_dir(rc->name,
+ debug_debugfs_root_entry);
/* append new element to linked list */
- if (!debug_area_first) {
- /* first element in list */
- debug_area_first = rc;
- rc->prev = NULL;
- } else {
- /* append element to end of list */
- debug_area_last->next = rc;
- rc->prev = debug_area_last;
- }
- debug_area_last = rc;
- rc->next = NULL;
+ if (!debug_area_first) {
+ /* first element in list */
+ debug_area_first = rc;
+ rc->prev = NULL;
+ } else {
+ /* append element to end of list */
+ debug_area_last->next = rc;
+ rc->prev = debug_area_last;
+ }
+ debug_area_last = rc;
+ rc->next = NULL;
refcount_set(&rc->ref_count, 1);
out:
@@ -371,24 +351,22 @@ out:
* debug_info_copy
* - copy debug-info
*/
-
-static debug_info_t*
-debug_info_copy(debug_info_t* in, int mode)
+static debug_info_t *debug_info_copy(debug_info_t *in, int mode)
{
- int i,j;
- debug_info_t* rc;
- unsigned long flags;
+ unsigned long flags;
+ debug_info_t *rc;
+ int i, j;
/* get a consistent copy of the debug areas */
do {
rc = debug_info_alloc(in->name, in->pages_per_area,
in->nr_areas, in->buf_size, in->level, mode);
spin_lock_irqsave(&in->lock, flags);
- if(!rc)
+ if (!rc)
goto out;
/* has something changed in the meantime ? */
- if((rc->pages_per_area == in->pages_per_area) &&
- (rc->nr_areas == in->nr_areas)) {
+ if ((rc->pages_per_area == in->pages_per_area) &&
+ (rc->nr_areas == in->nr_areas)) {
break;
}
spin_unlock_irqrestore(&in->lock, flags);
@@ -396,25 +374,22 @@ debug_info_copy(debug_info_t* in, int mode)
} while (1);
if (mode == NO_AREAS)
- goto out;
+ goto out;
- for(i = 0; i < in->nr_areas; i++){
- for(j = 0; j < in->pages_per_area; j++) {
- memcpy(rc->areas[i][j], in->areas[i][j],PAGE_SIZE);
- }
- }
+ for (i = 0; i < in->nr_areas; i++) {
+ for (j = 0; j < in->pages_per_area; j++)
+ memcpy(rc->areas[i][j], in->areas[i][j], PAGE_SIZE);
+ }
out:
- spin_unlock_irqrestore(&in->lock, flags);
- return rc;
+ spin_unlock_irqrestore(&in->lock, flags);
+ return rc;
}
/*
* debug_info_get
* - increments reference count for debug-info
*/
-
-static void
-debug_info_get(debug_info_t * db_info)
+static void debug_info_get(debug_info_t *db_info)
{
if (db_info)
refcount_inc(&db_info->ref_count);
@@ -424,9 +399,7 @@ debug_info_get(debug_info_t * db_info)
* debug_info_put:
* - decreases reference count for debug-info and frees it if necessary
*/
-
-static void
-debug_info_put(debug_info_t *db_info)
+static void debug_info_put(debug_info_t *db_info)
{
int i;
@@ -439,12 +412,14 @@ debug_info_put(debug_info_t *db_info)
debugfs_remove(db_info->debugfs_entries[i]);
}
debugfs_remove(db_info->debugfs_root_entry);
- if(db_info == debug_area_first)
+ if (db_info == debug_area_first)
debug_area_first = db_info->next;
- if(db_info == debug_area_last)
+ if (db_info == debug_area_last)
debug_area_last = db_info->prev;
- if(db_info->prev) db_info->prev->next = db_info->next;
- if(db_info->next) db_info->next->prev = db_info->prev;
+ if (db_info->prev)
+ db_info->prev->next = db_info->next;
+ if (db_info->next)
+ db_info->next->prev = db_info->prev;
debug_info_free(db_info);
}
}
@@ -453,71 +428,68 @@ debug_info_put(debug_info_t *db_info)
* debug_format_entry:
* - format one debug entry and return size of formated data
*/
-
-static int
-debug_format_entry(file_private_info_t *p_info)
+static int debug_format_entry(file_private_info_t *p_info)
{
- debug_info_t *id_snap = p_info->debug_info_snap;
+ debug_info_t *id_snap = p_info->debug_info_snap;
struct debug_view *view = p_info->view;
debug_entry_t *act_entry;
size_t len = 0;
- if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+
+ if (p_info->act_entry == DEBUG_PROLOG_ENTRY) {
/* print prolog */
- if (view->prolog_proc)
- len += view->prolog_proc(id_snap,view,p_info->temp_buf);
+ if (view->prolog_proc)
+ len += view->prolog_proc(id_snap, view, p_info->temp_buf);
goto out;
}
if (!id_snap->areas) /* this is true, if we have a prolog only view */
goto out; /* or if 'pages_per_area' is 0 */
- act_entry = (debug_entry_t *) ((char*)id_snap->areas[p_info->act_area]
- [p_info->act_page] + p_info->act_entry);
-
+ act_entry = (debug_entry_t *) ((char *)id_snap->areas[p_info->act_area]
+ [p_info->act_page] + p_info->act_entry);
+
if (act_entry->id.stck == 0LL)
- goto out; /* empty entry */
+ goto out; /* empty entry */
if (view->header_proc)
len += view->header_proc(id_snap, view, p_info->act_area,
- act_entry, p_info->temp_buf + len);
+ act_entry, p_info->temp_buf + len);
if (view->format_proc)
len += view->format_proc(id_snap, view, p_info->temp_buf + len,
- DEBUG_DATA(act_entry));
+ DEBUG_DATA(act_entry));
out:
- return len;
+ return len;
}
/*
* debug_next_entry:
* - goto next entry in p_info
*/
-
-static inline int
-debug_next_entry(file_private_info_t *p_info)
+static inline int debug_next_entry(file_private_info_t *p_info)
{
debug_info_t *id;
id = p_info->debug_info_snap;
- if(p_info->act_entry == DEBUG_PROLOG_ENTRY){
+ if (p_info->act_entry == DEBUG_PROLOG_ENTRY) {
p_info->act_entry = 0;
p_info->act_page = 0;
goto out;
}
- if(!id->areas)
+ if (!id->areas)
return 1;
p_info->act_entry += id->entry_size;
/* switch to next page, if we reached the end of the page */
- if (p_info->act_entry > (PAGE_SIZE - id->entry_size)){
+ if (p_info->act_entry > (PAGE_SIZE - id->entry_size)) {
/* next page */
p_info->act_entry = 0;
p_info->act_page += 1;
- if((p_info->act_page % id->pages_per_area) == 0) {
+ if ((p_info->act_page % id->pages_per_area) == 0) {
/* next area */
- p_info->act_area++;
- p_info->act_page=0;
+ p_info->act_area++;
+ p_info->act_page = 0;
}
- if(p_info->act_area >= id->nr_areas)
+ if (p_info->act_area >= id->nr_areas)
return 1;
}
out:
- return 0;
+ return 0;
}
/*
@@ -525,26 +497,24 @@ out:
* - called for user read()
* - copies formated debug entries to the user buffer
*/
-
-static ssize_t
-debug_output(struct file *file, /* file descriptor */
- char __user *user_buf, /* user buffer */
- size_t len, /* length of buffer */
- loff_t *offset) /* offset in the file */
+static ssize_t debug_output(struct file *file, /* file descriptor */
+ char __user *user_buf, /* user buffer */
+ size_t len, /* length of buffer */
+ loff_t *offset) /* offset in the file */
{
size_t count = 0;
size_t entry_offset;
file_private_info_t *p_info;
- p_info = ((file_private_info_t *) file->private_data);
- if (*offset != p_info->offset)
+ p_info = (file_private_info_t *) file->private_data;
+ if (*offset != p_info->offset)
return -EPIPE;
- if(p_info->act_area >= p_info->debug_info_snap->nr_areas)
+ if (p_info->act_area >= p_info->debug_info_snap->nr_areas)
return 0;
entry_offset = p_info->act_entry_offset;
- while(count < len){
- int formatted_line_size;
+ while (count < len) {
int formatted_line_residue;
+ int formatted_line_size;
int user_buf_residue;
size_t copy_size;
@@ -552,21 +522,21 @@ debug_output(struct file *file, /* file descriptor */
formatted_line_residue = formatted_line_size - entry_offset;
user_buf_residue = len-count;
copy_size = min(user_buf_residue, formatted_line_residue);
- if(copy_size){
+ if (copy_size) {
if (copy_to_user(user_buf + count, p_info->temp_buf
- + entry_offset, copy_size))
+ + entry_offset, copy_size))
return -EFAULT;
count += copy_size;
entry_offset += copy_size;
}
- if(copy_size == formatted_line_residue){
+ if (copy_size == formatted_line_residue) {
entry_offset = 0;
- if(debug_next_entry(p_info))
+ if (debug_next_entry(p_info))
goto out;
}
}
out:
- p_info->offset = *offset + count;
+ p_info->offset = *offset + count;
p_info->act_entry_offset = entry_offset;
*offset = p_info->offset;
return count;
@@ -577,24 +547,23 @@ out:
* - called for user write()
* - calls input function of view
*/
-
-static ssize_t
-debug_input(struct file *file, const char __user *user_buf, size_t length,
- loff_t *offset)
+static ssize_t debug_input(struct file *file, const char __user *user_buf,
+ size_t length, loff_t *offset)
{
- int rc = 0;
file_private_info_t *p_info;
+ int rc = 0;
mutex_lock(&debug_mutex);
p_info = ((file_private_info_t *) file->private_data);
- if (p_info->view->input_proc)
+ if (p_info->view->input_proc) {
rc = p_info->view->input_proc(p_info->debug_info_org,
p_info->view, file, user_buf,
length, offset);
- else
+ } else {
rc = -EPERM;
+ }
mutex_unlock(&debug_mutex);
- return rc; /* number of input characters */
+ return rc; /* number of input characters */
}
/*
@@ -603,13 +572,11 @@ debug_input(struct file *file, const char __user *user_buf, size_t length,
* - copies formated output to private_data area of the file
* handle
*/
-
-static int
-debug_open(struct inode *inode, struct file *file)
+static int debug_open(struct inode *inode, struct file *file)
{
- int i, rc = 0;
- file_private_info_t *p_info;
debug_info_t *debug_info, *debug_info_snapshot;
+ file_private_info_t *p_info;
+ int i, rc = 0;
mutex_lock(&debug_mutex);
debug_info = file_inode(file)->i_private;
@@ -617,10 +584,8 @@ debug_open(struct inode *inode, struct file *file)
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!debug_info->views[i])
continue;
- else if (debug_info->debugfs_entries[i] ==
- file->f_path.dentry) {
- goto found; /* found view ! */
- }
+ else if (debug_info->debugfs_entries[i] == file->f_path.dentry)
+ goto found; /* found view ! */
}
/* no entry found */
rc = -EINVAL;
@@ -628,31 +593,28 @@ debug_open(struct inode *inode, struct file *file)
found:
- /* Make snapshot of current debug areas to get it consistent. */
+ /* Make snapshot of current debug areas to get it consistent. */
/* To copy all the areas is only needed, if we have a view which */
/* formats the debug areas. */
- if(!debug_info->views[i]->format_proc &&
- !debug_info->views[i]->header_proc){
+ if (!debug_info->views[i]->format_proc && !debug_info->views[i]->header_proc)
debug_info_snapshot = debug_info_copy(debug_info, NO_AREAS);
- } else {
+ else
debug_info_snapshot = debug_info_copy(debug_info, ALL_AREAS);
- }
- if(!debug_info_snapshot){
+ if (!debug_info_snapshot) {
rc = -ENOMEM;
goto out;
}
- p_info = kmalloc(sizeof(file_private_info_t),
- GFP_KERNEL);
- if(!p_info){
+ p_info = kmalloc(sizeof(file_private_info_t), GFP_KERNEL);
+ if (!p_info) {
debug_info_free(debug_info_snapshot);
rc = -ENOMEM;
goto out;
}
p_info->offset = 0;
p_info->debug_info_snap = debug_info_snapshot;
- p_info->debug_info_org = debug_info;
+ p_info->debug_info_org = debug_info;
p_info->view = debug_info->views[i];
p_info->act_area = 0;
p_info->act_page = 0;
@@ -671,17 +633,16 @@ out:
* - called for user close()
* - deletes private_data area of the file handle
*/
-
-static int
-debug_close(struct inode *inode, struct file *file)
+static int debug_close(struct inode *inode, struct file *file)
{
file_private_info_t *p_info;
+
p_info = (file_private_info_t *) file->private_data;
- if(p_info->debug_info_snap)
+ if (p_info->debug_info_snap)
debug_info_free(p_info->debug_info_snap);
debug_info_put(p_info->debug_info_org);
kfree(file->private_data);
- return 0; /* success */
+ return 0; /* success */
}
/*
@@ -690,7 +651,6 @@ debug_close(struct inode *inode, struct file *file)
* The mode parameter allows to specify access rights for the s390dbf files
* - Returns handle for debug area
*/
-
debug_info_t *debug_register_mode(const char *name, int pages_per_area,
int nr_areas, int buf_size, umode_t mode,
uid_t uid, gid_t gid)
@@ -704,18 +664,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
BUG_ON(!initialized);
mutex_lock(&debug_mutex);
- /* create new debug_info */
-
+ /* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
- if(!rc)
+ if (!rc)
goto out;
debug_register_view(rc, &debug_level_view);
- debug_register_view(rc, &debug_flush_view);
+ debug_register_view(rc, &debug_flush_view);
debug_register_view(rc, &debug_pages_view);
out:
- if (!rc){
+ if (!rc)
pr_err("Registering debug feature %s failed\n", name);
- }
mutex_unlock(&debug_mutex);
return rc;
}
@@ -726,7 +684,6 @@ EXPORT_SYMBOL(debug_register_mode);
* - creates and initializes debug area for the caller
* - returns handle for debug area
*/
-
debug_info_t *debug_register(const char *name, int pages_per_area,
int nr_areas, int buf_size)
{
@@ -739,18 +696,13 @@ EXPORT_SYMBOL(debug_register);
* debug_unregister:
* - give back debug area
*/
-
-void
-debug_unregister(debug_info_t * id)
+void debug_unregister(debug_info_t *id)
{
if (!id)
- goto out;
+ return;
mutex_lock(&debug_mutex);
debug_info_put(id);
mutex_unlock(&debug_mutex);
-
-out:
- return;
}
EXPORT_SYMBOL(debug_unregister);
@@ -758,18 +710,17 @@ EXPORT_SYMBOL(debug_unregister);
* debug_set_size:
* - set area size (number of pages) and number of areas
*/
-static int
-debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
+static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
{
+ debug_entry_t ***new_areas;
unsigned long flags;
- debug_entry_t *** new_areas;
- int rc=0;
+ int rc = 0;
- if(!id || (nr_areas <= 0) || (pages_per_area < 0))
+ if (!id || (nr_areas <= 0) || (pages_per_area < 0))
return -EINVAL;
- if(pages_per_area > 0){
+ if (pages_per_area > 0) {
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
- if(!new_areas) {
+ if (!new_areas) {
pr_info("Allocating memory for %i pages failed\n",
pages_per_area);
rc = -ENOMEM;
@@ -778,16 +729,16 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area)
} else {
new_areas = NULL;
}
- spin_lock_irqsave(&id->lock,flags);
+ spin_lock_irqsave(&id->lock, flags);
debug_areas_free(id);
id->areas = new_areas;
id->nr_areas = nr_areas;
id->pages_per_area = pages_per_area;
id->active_area = 0;
- memset(id->active_entries,0,sizeof(int)*id->nr_areas);
+ memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
- spin_unlock_irqrestore(&id->lock,flags);
- pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area);
+ spin_unlock_irqrestore(&id->lock, flags);
+ pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
out:
return rc;
}
@@ -796,24 +747,23 @@ out:
* debug_set_level:
* - set actual debug level
*/
-
-void
-debug_set_level(debug_info_t* id, int new_level)
+void debug_set_level(debug_info_t *id, int new_level)
{
unsigned long flags;
- if(!id)
- return;
- spin_lock_irqsave(&id->lock,flags);
- if(new_level == DEBUG_OFF_LEVEL){
- id->level = DEBUG_OFF_LEVEL;
- pr_info("%s: switched off\n",id->name);
- } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
+
+ if (!id)
+ return;
+ spin_lock_irqsave(&id->lock, flags);
+ if (new_level == DEBUG_OFF_LEVEL) {
+ id->level = DEBUG_OFF_LEVEL;
+ pr_info("%s: switched off\n", id->name);
+ } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
pr_info("%s: level %i is out of range (%i - %i)\n",
- id->name, new_level, 0, DEBUG_MAX_LEVEL);
- } else {
- id->level = new_level;
- }
- spin_unlock_irqrestore(&id->lock,flags);
+ id->name, new_level, 0, DEBUG_MAX_LEVEL);
+ } else {
+ id->level = new_level;
+ }
+ spin_unlock_irqrestore(&id->lock, flags);
}
EXPORT_SYMBOL(debug_set_level);
@@ -821,12 +771,10 @@ EXPORT_SYMBOL(debug_set_level);
* proceed_active_entry:
* - set active entry to next in the ring buffer
*/
-
-static inline void
-proceed_active_entry(debug_info_t * id)
+static inline void proceed_active_entry(debug_info_t *id)
{
if ((id->active_entries[id->active_area] += id->entry_size)
- > (PAGE_SIZE - id->entry_size)){
+ > (PAGE_SIZE - id->entry_size)) {
id->active_entries[id->active_area] = 0;
id->active_pages[id->active_area] =
(id->active_pages[id->active_area] + 1) %
@@ -838,9 +786,7 @@ proceed_active_entry(debug_info_t * id)
* proceed_active_area:
* - set active area to next in the ring buffer
*/
-
-static inline void
-proceed_active_area(debug_info_t * id)
+static inline void proceed_active_area(debug_info_t *id)
{
id->active_area++;
id->active_area = id->active_area % id->nr_areas;
@@ -849,13 +795,11 @@ proceed_active_area(debug_info_t * id)
/*
* get_active_entry:
*/
-
-static inline debug_entry_t*
-get_active_entry(debug_info_t * id)
+static inline debug_entry_t *get_active_entry(debug_info_t *id)
{
return (debug_entry_t *) (((char *) id->areas[id->active_area]
- [id->active_pages[id->active_area]]) +
- id->active_entries[id->active_area]);
+ [id->active_pages[id->active_area]]) +
+ id->active_entries[id->active_area]);
}
/*
@@ -863,23 +807,22 @@ get_active_entry(debug_info_t * id)
* - set timestamp, caller address, cpu number etc.
*/
-static inline void
-debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
- int exception)
+static inline void debug_finish_entry(debug_info_t *id, debug_entry_t *active,
+ int level, int exception)
{
active->id.stck = get_tod_clock_fast() -
*(unsigned long long *) &tod_clock_base[1];
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
- active->id.fields.level = level;
+ active->id.fields.level = level;
proceed_active_entry(id);
- if(exception)
+ if (exception)
proceed_active_area(id);
}
-static int debug_stoppable=1;
-static int debug_active=1;
+static int debug_stoppable = 1;
+static int debug_active = 1;
#define CTL_S390DBF_STOPPABLE 5678
#define CTL_S390DBF_ACTIVE 5679
@@ -889,9 +832,8 @@ static int debug_active=1;
* always allow read, allow write only if debug_stoppable is set or
* if debug_active is already off
*/
-static int
-s390dbf_procactive(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+static int s390dbf_procactive(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (!write || debug_stoppable || !debug_active)
return proc_dointvec(table, write, buffer, lenp, ppos);
@@ -899,39 +841,37 @@ s390dbf_procactive(struct ctl_table *table, int write,
return 0;
}
-
static struct ctl_table s390dbf_table[] = {
{
- .procname = "debug_stoppable",
+ .procname = "debug_stoppable",
.data = &debug_stoppable,
.maxlen = sizeof(int),
- .mode = S_IRUGO | S_IWUSR,
- .proc_handler = proc_dointvec,
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = proc_dointvec,
},
- {
- .procname = "debug_active",
+ {
+ .procname = "debug_active",
.data = &debug_active,
.maxlen = sizeof(int),
- .mode = S_IRUGO | S_IWUSR,
- .proc_handler = s390dbf_procactive,
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = s390dbf_procactive,
},
{ }
};
static struct ctl_table s390dbf_dir_table[] = {
{
- .procname = "s390dbf",
- .maxlen = 0,
- .mode = S_IRUGO | S_IXUGO,
- .child = s390dbf_table,
+ .procname = "s390dbf",
+ .maxlen = 0,
+ .mode = S_IRUGO | S_IXUGO,
+ .child = s390dbf_table,
},
{ }
};
static struct ctl_table_header *s390dbf_sysctl_header;
-void
-debug_stop_all(void)
+void debug_stop_all(void)
{
if (debug_stoppable)
debug_active = 0;
@@ -947,26 +887,31 @@ void debug_set_critical(void)
* debug_event_common:
* - write debug entry with given size
*/
-
-debug_entry_t*
-debug_event_common(debug_info_t * id, int level, const void *buf, int len)
+debug_entry_t *debug_event_common(debug_info_t *id, int level, const void *buf,
+ int len)
{
- unsigned long flags;
debug_entry_t *active;
+ unsigned long flags;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
- active = get_active_entry(id);
- memset(DEBUG_DATA(active), 0, id->buf_size);
- memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
- debug_finish_entry(id, active, level, 0);
- spin_unlock_irqrestore(&id->lock, flags);
+ }
+ do {
+ active = get_active_entry(id);
+ memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+ if (len < id->buf_size)
+ memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
+ debug_finish_entry(id, active, level, 0);
+ len -= id->buf_size;
+ buf += id->buf_size;
+ } while (len > 0);
+ spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(debug_event_common);
@@ -975,26 +920,31 @@ EXPORT_SYMBOL(debug_event_common);
* debug_exception_common:
* - write debug entry with given size and switch to next debug area
*/
-
-debug_entry_t
-*debug_exception_common(debug_info_t * id, int level, const void *buf, int len)
+debug_entry_t *debug_exception_common(debug_info_t *id, int level,
+ const void *buf, int len)
{
- unsigned long flags;
debug_entry_t *active;
+ unsigned long flags;
if (!debug_active || !id->areas)
return NULL;
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
- active = get_active_entry(id);
- memset(DEBUG_DATA(active), 0, id->buf_size);
- memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
- debug_finish_entry(id, active, level, 1);
- spin_unlock_irqrestore(&id->lock, flags);
+ }
+ do {
+ active = get_active_entry(id);
+ memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size));
+ if (len < id->buf_size)
+ memset((DEBUG_DATA(active)) + len, 0, id->buf_size - len);
+ debug_finish_entry(id, active, level, len <= id->buf_size);
+ len -= id->buf_size;
+ buf += id->buf_size;
+ } while (len > 0);
+ spin_unlock_irqrestore(&id->lock, flags);
return active;
}
EXPORT_SYMBOL(debug_exception_common);
@@ -1002,47 +952,44 @@ EXPORT_SYMBOL(debug_exception_common);
/*
* counts arguments in format string for sprintf view
*/
-
-static inline int
-debug_count_numargs(char *string)
+static inline int debug_count_numargs(char *string)
{
- int numargs=0;
+ int numargs = 0;
- while(*string) {
- if(*string++=='%')
+ while (*string) {
+ if (*string++ == '%')
numargs++;
}
- return(numargs);
+ return numargs;
}
/*
* debug_sprintf_event:
*/
-
-debug_entry_t*
-__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
+debug_entry_t *__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
{
- va_list ap;
- int numargs,idx;
- unsigned long flags;
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
+ unsigned long flags;
+ int numargs, idx;
+ va_list ap;
if (!debug_active || !id->areas)
return NULL;
- numargs=debug_count_numargs(string);
+ numargs = debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
+ }
active = get_active_entry(id);
- curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active);
- va_start(ap,string);
- curr_event->string=string;
- for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
- curr_event->args[idx]=va_arg(ap,long);
+ curr_event = (debug_sprintf_entry_t *) DEBUG_DATA(active);
+ va_start(ap, string);
+ curr_event->string = string;
+ for (idx = 0; idx < min(numargs, (int)(id->buf_size / sizeof(long)) - 1); idx++)
+ curr_event->args[idx] = va_arg(ap, long);
va_end(ap);
debug_finish_entry(id, active, level, 0);
spin_unlock_irqrestore(&id->lock, flags);
@@ -1054,32 +1001,31 @@ EXPORT_SYMBOL(__debug_sprintf_event);
/*
* debug_sprintf_exception:
*/
-
-debug_entry_t*
-__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
+debug_entry_t *__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
{
- va_list ap;
- int numargs,idx;
- unsigned long flags;
debug_sprintf_entry_t *curr_event;
debug_entry_t *active;
+ unsigned long flags;
+ int numargs, idx;
+ va_list ap;
if (!debug_active || !id->areas)
return NULL;
- numargs=debug_count_numargs(string);
+ numargs = debug_count_numargs(string);
if (debug_critical) {
if (!spin_trylock_irqsave(&id->lock, flags))
return NULL;
- } else
+ } else {
spin_lock_irqsave(&id->lock, flags);
+ }
active = get_active_entry(id);
- curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active);
- va_start(ap,string);
- curr_event->string=string;
- for(idx=0;idx<min(numargs,(int)(id->buf_size / sizeof(long))-1);idx++)
- curr_event->args[idx]=va_arg(ap,long);
+ curr_event = (debug_sprintf_entry_t *)DEBUG_DATA(active);
+ va_start(ap, string);
+ curr_event->string = string;
+ for (idx = 0; idx < min(numargs, (int)(id->buf_size / sizeof(long)) - 1); idx++)
+ curr_event->args[idx] = va_arg(ap, long);
va_end(ap);
debug_finish_entry(id, active, level, 1);
spin_unlock_irqrestore(&id->lock, flags);
@@ -1091,15 +1037,13 @@ EXPORT_SYMBOL(__debug_sprintf_exception);
/*
* debug_register_view:
*/
-
-int
-debug_register_view(debug_info_t * id, struct debug_view *view)
+int debug_register_view(debug_info_t *id, struct debug_view *view)
{
- int rc = 0;
- int i;
unsigned long flags;
- umode_t mode;
struct dentry *pde;
+ umode_t mode;
+ int rc = 0;
+ int i;
if (!id)
goto out;
@@ -1109,10 +1053,10 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
if (!view->input_proc)
mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
- id , &debug_file_ops);
- if (!pde){
+ id, &debug_file_ops);
+ if (!pde) {
pr_err("Registering view %s/%s failed due to out of "
- "memory\n", id->name,view->name);
+ "memory\n", id->name, view->name);
rc = -1;
goto out;
}
@@ -1140,9 +1084,7 @@ EXPORT_SYMBOL(debug_register_view);
/*
* debug_unregister_view:
*/
-
-int
-debug_unregister_view(debug_info_t * id, struct debug_view *view)
+int debug_unregister_view(debug_info_t *id, struct debug_view *view)
{
struct dentry *dentry = NULL;
unsigned long flags;
@@ -1155,9 +1097,9 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
if (id->views[i] == view)
break;
}
- if (i == DEBUG_MAX_VIEWS)
+ if (i == DEBUG_MAX_VIEWS) {
rc = -1;
- else {
+ } else {
dentry = id->debugfs_entries[i];
id->views[i] = NULL;
id->debugfs_entries[i] = NULL;
@@ -1169,10 +1111,10 @@ out:
}
EXPORT_SYMBOL(debug_unregister_view);
-static inline char *
-debug_get_user_string(const char __user *user_buf, size_t user_len)
+static inline char *debug_get_user_string(const char __user *user_buf,
+ size_t user_len)
{
- char* buffer;
+ char *buffer;
buffer = kmalloc(user_len + 1, GFP_KERNEL);
if (!buffer)
@@ -1186,19 +1128,17 @@ debug_get_user_string(const char __user *user_buf, size_t user_len)
buffer[user_len - 1] = 0;
else
buffer[user_len] = 0;
- return buffer;
+ return buffer;
}
-static inline int
-debug_get_uint(char *buf)
+static inline int debug_get_uint(char *buf)
{
int rc;
buf = skip_spaces(buf);
rc = simple_strtoul(buf, &buf, 10);
- if(*buf){
+ if (*buf)
rc = -EINVAL;
- }
return rc;
}
@@ -1211,9 +1151,8 @@ debug_get_uint(char *buf)
* prints out actual debug level
*/
-static int
-debug_prolog_pages_fn(debug_info_t * id,
- struct debug_view *view, char *out_buf)
+static int debug_prolog_pages_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf)
{
return sprintf(out_buf, "%i\n", id->pages_per_area);
}
@@ -1222,32 +1161,31 @@ debug_prolog_pages_fn(debug_info_t * id,
* reads new size (number of pages per debug area)
*/
-static int
-debug_input_pages_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset)
+static int debug_input_pages_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
+ int rc, new_pages;
char *str;
- int rc,new_pages;
if (user_len > 0x10000)
- user_len = 0x10000;
- if (*offset != 0){
+ user_len = 0x10000;
+ if (*offset != 0) {
rc = -EPIPE;
goto out;
}
- str = debug_get_user_string(user_buf,user_len);
- if(IS_ERR(str)){
+ str = debug_get_user_string(user_buf, user_len);
+ if (IS_ERR(str)) {
rc = PTR_ERR(str);
goto out;
}
new_pages = debug_get_uint(str);
- if(new_pages < 0){
+ if (new_pages < 0) {
rc = -EINVAL;
goto free_str;
}
- rc = debug_set_size(id,id->nr_areas, new_pages);
- if(rc != 0){
+ rc = debug_set_size(id, id->nr_areas, new_pages);
+ if (rc != 0) {
rc = -EINVAL;
goto free_str;
}
@@ -1262,52 +1200,47 @@ out:
/*
* prints out actual debug level
*/
-
-static int
-debug_prolog_level_fn(debug_info_t * id, struct debug_view *view, char *out_buf)
+static int debug_prolog_level_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf)
{
int rc = 0;
- if(id->level == DEBUG_OFF_LEVEL) {
- rc = sprintf(out_buf,"-\n");
- }
- else {
+ if (id->level == DEBUG_OFF_LEVEL)
+ rc = sprintf(out_buf, "-\n");
+ else
rc = sprintf(out_buf, "%i\n", id->level);
- }
return rc;
}
/*
* reads new debug level
*/
-
-static int
-debug_input_level_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset)
+static int debug_input_level_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
+ int rc, new_level;
char *str;
- int rc,new_level;
if (user_len > 0x10000)
- user_len = 0x10000;
- if (*offset != 0){
+ user_len = 0x10000;
+ if (*offset != 0) {
rc = -EPIPE;
goto out;
}
- str = debug_get_user_string(user_buf,user_len);
- if(IS_ERR(str)){
+ str = debug_get_user_string(user_buf, user_len);
+ if (IS_ERR(str)) {
rc = PTR_ERR(str);
goto out;
}
- if(str[0] == '-'){
+ if (str[0] == '-') {
debug_set_level(id, DEBUG_OFF_LEVEL);
rc = user_len;
goto free_str;
} else {
new_level = debug_get_uint(str);
}
- if(new_level < 0) {
+ if (new_level < 0) {
pr_warn("%s is not a valid level for a debug feature\n", str);
rc = -EINVAL;
} else {
@@ -1321,99 +1254,90 @@ out:
return rc; /* number of input characters */
}
-
/*
* flushes debug areas
*/
-
-static void debug_flush(debug_info_t* id, int area)
+static void debug_flush(debug_info_t *id, int area)
{
- unsigned long flags;
- int i,j;
-
- if(!id || !id->areas)
- return;
- spin_lock_irqsave(&id->lock,flags);
- if(area == DEBUG_FLUSH_ALL){
- id->active_area = 0;
- memset(id->active_entries, 0, id->nr_areas * sizeof(int));
- for (i = 0; i < id->nr_areas; i++) {
+ unsigned long flags;
+ int i, j;
+
+ if (!id || !id->areas)
+ return;
+ spin_lock_irqsave(&id->lock, flags);
+ if (area == DEBUG_FLUSH_ALL) {
+ id->active_area = 0;
+ memset(id->active_entries, 0, id->nr_areas * sizeof(int));
+ for (i = 0; i < id->nr_areas; i++) {
id->active_pages[i] = 0;
- for(j = 0; j < id->pages_per_area; j++) {
- memset(id->areas[i][j], 0, PAGE_SIZE);
- }
+ for (j = 0; j < id->pages_per_area; j++)
+ memset(id->areas[i][j], 0, PAGE_SIZE);
}
- } else if(area >= 0 && area < id->nr_areas) {
- id->active_entries[area] = 0;
+ } else if (area >= 0 && area < id->nr_areas) {
+ id->active_entries[area] = 0;
id->active_pages[area] = 0;
- for(i = 0; i < id->pages_per_area; i++) {
- memset(id->areas[area][i],0,PAGE_SIZE);
- }
- }
- spin_unlock_irqrestore(&id->lock,flags);
+ for (i = 0; i < id->pages_per_area; i++)
+ memset(id->areas[area][i], 0, PAGE_SIZE);
+ }
+ spin_unlock_irqrestore(&id->lock, flags);
}
/*
- * view function: flushes debug areas
+ * view function: flushes debug areas
*/
-
-static int
-debug_input_flush_fn(debug_info_t * id, struct debug_view *view,
- struct file *file, const char __user *user_buf,
- size_t user_len, loff_t * offset)
+static int debug_input_flush_fn(debug_info_t *id, struct debug_view *view,
+ struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
- char input_buf[1];
- int rc = user_len;
+ char input_buf[1];
+ int rc = user_len;
if (user_len > 0x10000)
- user_len = 0x10000;
- if (*offset != 0){
+ user_len = 0x10000;
+ if (*offset != 0) {
rc = -EPIPE;
- goto out;
+ goto out;
+ }
+ if (copy_from_user(input_buf, user_buf, 1)) {
+ rc = -EFAULT;
+ goto out;
+ }
+ if (input_buf[0] == '-') {
+ debug_flush(id, DEBUG_FLUSH_ALL);
+ goto out;
+ }
+ if (isdigit(input_buf[0])) {
+ int area = ((int) input_buf[0] - (int) '0');
+
+ debug_flush(id, area);
+ goto out;
}
- if (copy_from_user(input_buf, user_buf, 1)){
- rc = -EFAULT;
- goto out;
- }
- if(input_buf[0] == '-') {
- debug_flush(id, DEBUG_FLUSH_ALL);
- goto out;
- }
- if (isdigit(input_buf[0])) {
- int area = ((int) input_buf[0] - (int) '0');
- debug_flush(id, area);
- goto out;
- }
pr_info("Flushing debug data failed because %c is not a valid "
"area\n", input_buf[0]);
out:
- *offset += user_len;
- return rc; /* number of input characters */
+ *offset += user_len;
+ return rc; /* number of input characters */
}
/*
* prints debug header in raw format
*/
-
-static int
-debug_raw_header_fn(debug_info_t * id, struct debug_view *view,
- int area, debug_entry_t * entry, char *out_buf)
+static int debug_raw_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf)
{
- int rc;
+ int rc;
rc = sizeof(debug_entry_t);
- memcpy(out_buf,entry,sizeof(debug_entry_t));
- return rc;
+ memcpy(out_buf, entry, sizeof(debug_entry_t));
+ return rc;
}
/*
* prints debug data in raw format
*/
-
-static int
-debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
+static int debug_raw_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf)
{
int rc;
@@ -1426,20 +1350,17 @@ debug_raw_format_fn(debug_info_t * id, struct debug_view *view,
/*
* prints debug data in hex/ascii format
*/
-
-static int
-debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, const char *in_buf)
+static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, const char *in_buf)
{
int i, rc = 0;
- for (i = 0; i < id->buf_size; i++) {
- rc += sprintf(out_buf + rc, "%02x ",
- ((unsigned char *) in_buf)[i]);
- }
+ for (i = 0; i < id->buf_size; i++)
+ rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]);
rc += sprintf(out_buf + rc, "| ");
for (i = 0; i < id->buf_size; i++) {
unsigned char c = in_buf[i];
+
if (isascii(c) && isprint(c))
rc += sprintf(out_buf + rc, "%c", c);
else
@@ -1452,16 +1373,14 @@ debug_hex_ascii_format_fn(debug_info_t * id, struct debug_view *view,
/*
* prints header for debug entry
*/
-
-int
-debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
- int area, debug_entry_t * entry, char *out_buf)
+int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
+ int area, debug_entry_t *entry, char *out_buf)
{
unsigned long base, sec, usec;
- char *except_str;
unsigned long caller;
- int rc = 0;
unsigned int level;
+ char *except_str;
+ int rc = 0;
level = entry->id.fields.level;
base = (*(unsigned long *) &tod_clock_base[0]) >> 4;
@@ -1487,19 +1406,18 @@ EXPORT_SYMBOL(debug_dflt_header_fn);
#define DEBUG_SPRINTF_MAX_ARGS 10
-static int
-debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
- char *out_buf, debug_sprintf_entry_t *curr_event)
+static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
+ char *out_buf, debug_sprintf_entry_t *curr_event)
{
- int num_longs, num_used_args = 0,i, rc = 0;
+ int num_longs, num_used_args = 0, i, rc = 0;
int index[DEBUG_SPRINTF_MAX_ARGS];
/* count of longs fit into one entry */
- num_longs = id->buf_size / sizeof(long);
+ num_longs = id->buf_size / sizeof(long);
- if(num_longs < 1)
+ if (num_longs < 1)
goto out; /* bufsize of entry too small */
- if(num_longs == 1) {
+ if (num_longs == 1) {
/* no args, we use only the string */
strcpy(out_buf, curr_event->string);
rc = strlen(curr_event->string);
@@ -1507,22 +1425,20 @@ debug_sprintf_format_fn(debug_info_t * id, struct debug_view *view,
}
/* number of arguments used for sprintf (without the format string) */
- num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
+ num_used_args = min(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1));
- memset(index,0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
+ memset(index, 0, DEBUG_SPRINTF_MAX_ARGS * sizeof(int));
- for(i = 0; i < num_used_args; i++)
+ for (i = 0; i < num_used_args; i++)
index[i] = i;
- rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
- curr_event->args[index[1]], curr_event->args[index[2]],
- curr_event->args[index[3]], curr_event->args[index[4]],
- curr_event->args[index[5]], curr_event->args[index[6]],
- curr_event->args[index[7]], curr_event->args[index[8]],
- curr_event->args[index[9]]);
-
+ rc = sprintf(out_buf, curr_event->string, curr_event->args[index[0]],
+ curr_event->args[index[1]], curr_event->args[index[2]],
+ curr_event->args[index[3]], curr_event->args[index[4]],
+ curr_event->args[index[5]], curr_event->args[index[6]],
+ curr_event->args[index[7]], curr_event->args[index[8]],
+ curr_event->args[index[9]]);
out:
-
return rc;
}
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f7e82302a71e..b811d3a8417d 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -21,52 +21,91 @@
#include <linux/reboot.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-
#include <linux/uaccess.h>
+#include <linux/atomic.h>
#include <asm/dis.h>
#include <asm/io.h>
-#include <linux/atomic.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
#include <asm/irq.h>
+/* Type of operand */
+#define OPERAND_GPR 0x1 /* Operand printed as %rx */
+#define OPERAND_FPR 0x2 /* Operand printed as %fx */
+#define OPERAND_AR 0x4 /* Operand printed as %ax */
+#define OPERAND_CR 0x8 /* Operand printed as %cx */
+#define OPERAND_VR 0x10 /* Operand printed as %vx */
+#define OPERAND_DISP 0x20 /* Operand printed as displacement */
+#define OPERAND_BASE 0x40 /* Operand printed as base register */
+#define OPERAND_INDEX 0x80 /* Operand printed as index register */
+#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
+
+struct s390_operand {
+ unsigned char bits; /* The number of bits in the operand. */
+ unsigned char shift; /* The number of bits to shift. */
+ unsigned short flags; /* One bit syntax flags. */
+};
+
+struct s390_insn {
+ union {
+ const char name[5];
+ struct {
+ unsigned char zero;
+ unsigned int offset;
+ } __packed;
+ };
+ unsigned char opfrag;
+ unsigned char format;
+};
+
+struct s390_opcode_offset {
+ unsigned char opcode;
+ unsigned char mask;
+ unsigned char byte;
+ unsigned short offset;
+ unsigned short count;
+} __packed;
+
enum {
- UNUSED, /* Indicates the end of the operand list */
- R_8, /* GPR starting at position 8 */
- R_12, /* GPR starting at position 12 */
- R_16, /* GPR starting at position 16 */
- R_20, /* GPR starting at position 20 */
- R_24, /* GPR starting at position 24 */
- R_28, /* GPR starting at position 28 */
- R_32, /* GPR starting at position 32 */
- F_8, /* FPR starting at position 8 */
- F_12, /* FPR starting at position 12 */
- F_16, /* FPR starting at position 16 */
- F_20, /* FPR starting at position 16 */
- F_24, /* FPR starting at position 24 */
- F_28, /* FPR starting at position 28 */
- F_32, /* FPR starting at position 32 */
+ UNUSED,
A_8, /* Access reg. starting at position 8 */
A_12, /* Access reg. starting at position 12 */
A_24, /* Access reg. starting at position 24 */
A_28, /* Access reg. starting at position 28 */
- C_8, /* Control reg. starting at position 8 */
- C_12, /* Control reg. starting at position 12 */
- V_8, /* Vector reg. starting at position 8, extension bit at 36 */
- V_12, /* Vector reg. starting at position 12, extension bit at 37 */
- V_16, /* Vector reg. starting at position 16, extension bit at 38 */
- V_32, /* Vector reg. starting at position 32, extension bit at 39 */
- W_12, /* Vector reg. at bit 12, extension at bit 37, used as index */
B_16, /* Base register starting at position 16 */
B_32, /* Base register starting at position 32 */
- X_12, /* Index register starting at position 12 */
+ C_8, /* Control reg. starting at position 8 */
+ C_12, /* Control reg. starting at position 12 */
+ D20_20, /* 20 bit displacement starting at 20 */
D_20, /* Displacement starting at position 20 */
D_36, /* Displacement starting at position 36 */
- D20_20, /* 20 bit displacement starting at 20 */
+ F_8, /* FPR starting at position 8 */
+ F_12, /* FPR starting at position 12 */
+ F_16, /* FPR starting at position 16 */
+ F_24, /* FPR starting at position 24 */
+ F_28, /* FPR starting at position 28 */
+ F_32, /* FPR starting at position 32 */
+ I8_8, /* 8 bit signed value starting at 8 */
+ I8_32, /* 8 bit signed value starting at 32 */
+ I16_16, /* 16 bit signed value starting at 16 */
+ I16_32, /* 16 bit signed value starting at 32 */
+ I32_16, /* 32 bit signed value starting at 16 */
+ J12_12, /* 12 bit PC relative offset at 12 */
+ J16_16, /* 16 bit PC relative offset at 16 */
+ J16_32, /* 16 bit PC relative offset at 32 */
+ J24_24, /* 24 bit PC relative offset at 24 */
+ J32_16, /* 32 bit PC relative offset at 16 */
L4_8, /* 4 bit length starting at position 8 */
L4_12, /* 4 bit length starting at position 12 */
L8_8, /* 8 bit length starting at position 8 */
+ R_8, /* GPR starting at position 8 */
+ R_12, /* GPR starting at position 12 */
+ R_16, /* GPR starting at position 16 */
+ R_24, /* GPR starting at position 24 */
+ R_28, /* GPR starting at position 28 */
U4_8, /* 4 bit unsigned value starting at 8 */
U4_12, /* 4 bit unsigned value starting at 12 */
U4_16, /* 4 bit unsigned value starting at 16 */
@@ -78,1651 +117,226 @@ enum {
U8_8, /* 8 bit unsigned value starting at 8 */
U8_16, /* 8 bit unsigned value starting at 16 */
U8_24, /* 8 bit unsigned value starting at 24 */
+ U8_28, /* 8 bit unsigned value starting at 28 */
U8_32, /* 8 bit unsigned value starting at 32 */
- I8_8, /* 8 bit signed value starting at 8 */
- I8_16, /* 8 bit signed value starting at 16 */
- I8_24, /* 8 bit signed value starting at 24 */
- I8_32, /* 8 bit signed value starting at 32 */
- J12_12, /* PC relative offset at 12 */
- I16_16, /* 16 bit signed value starting at 16 */
- I16_32, /* 32 bit signed value starting at 16 */
- U16_16, /* 16 bit unsigned value starting at 16 */
- U16_32, /* 32 bit unsigned value starting at 16 */
- J16_16, /* PC relative jump offset at 16 */
- J16_32, /* PC relative offset at 16 */
- I24_24, /* 24 bit signed value starting at 24 */
- J32_16, /* PC relative long offset at 16 */
- I32_16, /* 32 bit signed value starting at 16 */
- U32_16, /* 32 bit unsigned value starting at 16 */
- M_16, /* 4 bit optional mask starting at 16 */
- M_20, /* 4 bit optional mask starting at 20 */
- M_24, /* 4 bit optional mask starting at 24 */
- M_28, /* 4 bit optional mask starting at 28 */
- M_32, /* 4 bit optional mask starting at 32 */
- RO_28, /* optional GPR starting at position 28 */
-};
-
-/*
- * Enumeration of the different instruction formats.
- * For details consult the principles of operation.
- */
-enum {
- INSTR_INVALID,
- INSTR_E,
- INSTR_IE_UU,
- INSTR_MII_UPI,
- INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
- INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
- INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
- INSTR_RIS_R0RDU, INSTR_RIS_R0UU, INSTR_RIS_RURDI, INSTR_RIS_RURDU,
- INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP,
- INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0,
- INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
- INSTR_RRE_RR, INSTR_RRE_RR_OPT,
- INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
- INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_FUFF2, INSTR_RRF_M0RR,
- INSTR_RRF_R0RR, INSTR_RRF_R0RR2, INSTR_RRF_RMRR, INSTR_RRF_RURR,
- INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, INSTR_RRF_UUFF,
- INSTR_RRF_UUFR, INSTR_RRF_UURF,
- INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
- INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
- INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
- INSTR_RSI_RRP,
- INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
- INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
- INSTR_RSY_RDRM, INSTR_RSY_RMRD,
- INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
- INSTR_RS_RURD,
- INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
- INSTR_RXF_FRRDF,
- INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RXY_URRD,
- INSTR_RX_FRRD, INSTR_RX_RRRD, INSTR_RX_URRD,
- INSTR_SIL_RDI, INSTR_SIL_RDU,
- INSTR_SIY_IRD, INSTR_SIY_URD,
- INSTR_SI_URD,
- INSTR_SMI_U0RDP,
- INSTR_SSE_RDRD,
- INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
- INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
- INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3,
- INSTR_S_00, INSTR_S_RD,
- INSTR_VRI_V0IM, INSTR_VRI_V0I0, INSTR_VRI_V0IIM, INSTR_VRI_VVIM,
- INSTR_VRI_VVV0IM, INSTR_VRI_VVV0I0, INSTR_VRI_VVIMM,
- INSTR_VRR_VV00MMM, INSTR_VRR_VV000MM, INSTR_VRR_VV0000M,
- INSTR_VRR_VV00000, INSTR_VRR_VVV0M0M, INSTR_VRR_VV00M0M,
- INSTR_VRR_VVV000M, INSTR_VRR_VVV000V, INSTR_VRR_VVV0000,
- INSTR_VRR_VVV0MMM, INSTR_VRR_VVV00MM, INSTR_VRR_VVVMM0V,
- INSTR_VRR_VVVM0MV, INSTR_VRR_VVVM00V, INSTR_VRR_VRR0000,
- INSTR_VRS_VVRDM, INSTR_VRS_VVRD0, INSTR_VRS_VRRDM, INSTR_VRS_VRRD0,
- INSTR_VRS_RVRDM,
- INSTR_VRV_VVRDM, INSTR_VRV_VWRDM,
- INSTR_VRX_VRRDM, INSTR_VRX_VRRD0,
+ U12_16, /* 12 bit unsigned value starting at 16 */
+ U16_16, /* 16 bit unsigned value starting at 16 */
+ U16_32, /* 16 bit unsigned value starting at 32 */
+ U32_16, /* 32 bit unsigned value starting at 16 */
+ VX_12, /* Vector index register starting at position 12 */
+ V_8, /* Vector reg. starting at position 8 */
+ V_12, /* Vector reg. starting at position 12 */
+ V_16, /* Vector reg. starting at position 16 */
+ V_32, /* Vector reg. starting at position 32 */
+ X_12, /* Index register starting at position 12 */
};
-static const struct s390_operand operands[] =
-{
- [UNUSED] = { 0, 0, 0 },
- [R_8] = { 4, 8, OPERAND_GPR },
- [R_12] = { 4, 12, OPERAND_GPR },
- [R_16] = { 4, 16, OPERAND_GPR },
- [R_20] = { 4, 20, OPERAND_GPR },
- [R_24] = { 4, 24, OPERAND_GPR },
- [R_28] = { 4, 28, OPERAND_GPR },
- [R_32] = { 4, 32, OPERAND_GPR },
- [F_8] = { 4, 8, OPERAND_FPR },
- [F_12] = { 4, 12, OPERAND_FPR },
- [F_16] = { 4, 16, OPERAND_FPR },
- [F_20] = { 4, 16, OPERAND_FPR },
- [F_24] = { 4, 24, OPERAND_FPR },
- [F_28] = { 4, 28, OPERAND_FPR },
- [F_32] = { 4, 32, OPERAND_FPR },
+static const struct s390_operand operands[] = {
+ [UNUSED] = { 0, 0, 0 },
[A_8] = { 4, 8, OPERAND_AR },
[A_12] = { 4, 12, OPERAND_AR },
[A_24] = { 4, 24, OPERAND_AR },
[A_28] = { 4, 28, OPERAND_AR },
- [C_8] = { 4, 8, OPERAND_CR },
- [C_12] = { 4, 12, OPERAND_CR },
- [V_8] = { 4, 8, OPERAND_VR },
- [V_12] = { 4, 12, OPERAND_VR },
- [V_16] = { 4, 16, OPERAND_VR },
- [V_32] = { 4, 32, OPERAND_VR },
- [W_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
[B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR },
[B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR },
- [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
+ [C_8] = { 4, 8, OPERAND_CR },
+ [C_12] = { 4, 12, OPERAND_CR },
+ [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
[D_20] = { 12, 20, OPERAND_DISP },
[D_36] = { 12, 36, OPERAND_DISP },
- [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED },
+ [F_8] = { 4, 8, OPERAND_FPR },
+ [F_12] = { 4, 12, OPERAND_FPR },
+ [F_16] = { 4, 16, OPERAND_FPR },
+ [F_24] = { 4, 24, OPERAND_FPR },
+ [F_28] = { 4, 28, OPERAND_FPR },
+ [F_32] = { 4, 32, OPERAND_FPR },
+ [I8_8] = { 8, 8, OPERAND_SIGNED },
+ [I8_32] = { 8, 32, OPERAND_SIGNED },
+ [I16_16] = { 16, 16, OPERAND_SIGNED },
+ [I16_32] = { 16, 32, OPERAND_SIGNED },
+ [I32_16] = { 32, 16, OPERAND_SIGNED },
+ [J12_12] = { 12, 12, OPERAND_PCREL },
+ [J16_16] = { 16, 16, OPERAND_PCREL },
+ [J16_32] = { 16, 32, OPERAND_PCREL },
+ [J24_24] = { 24, 24, OPERAND_PCREL },
+ [J32_16] = { 32, 16, OPERAND_PCREL },
[L4_8] = { 4, 8, OPERAND_LENGTH },
- [L4_12] = { 4, 12, OPERAND_LENGTH },
+ [L4_12] = { 4, 12, OPERAND_LENGTH },
[L8_8] = { 8, 8, OPERAND_LENGTH },
+ [R_8] = { 4, 8, OPERAND_GPR },
+ [R_12] = { 4, 12, OPERAND_GPR },
+ [R_16] = { 4, 16, OPERAND_GPR },
+ [R_24] = { 4, 24, OPERAND_GPR },
+ [R_28] = { 4, 28, OPERAND_GPR },
[U4_8] = { 4, 8, 0 },
- [U4_12] = { 4, 12, 0 },
- [U4_16] = { 4, 16, 0 },
- [U4_20] = { 4, 20, 0 },
- [U4_24] = { 4, 24, 0 },
- [U4_28] = { 4, 28, 0 },
- [U4_32] = { 4, 32, 0 },
- [U4_36] = { 4, 36, 0 },
+ [U4_12] = { 4, 12, 0 },
+ [U4_16] = { 4, 16, 0 },
+ [U4_20] = { 4, 20, 0 },
+ [U4_24] = { 4, 24, 0 },
+ [U4_28] = { 4, 28, 0 },
+ [U4_32] = { 4, 32, 0 },
+ [U4_36] = { 4, 36, 0 },
[U8_8] = { 8, 8, 0 },
- [U8_16] = { 8, 16, 0 },
- [U8_24] = { 8, 24, 0 },
- [U8_32] = { 8, 32, 0 },
- [J12_12] = { 12, 12, OPERAND_PCREL },
- [I8_8] = { 8, 8, OPERAND_SIGNED },
- [I8_16] = { 8, 16, OPERAND_SIGNED },
- [I8_24] = { 8, 24, OPERAND_SIGNED },
- [I8_32] = { 8, 32, OPERAND_SIGNED },
- [I16_32] = { 16, 32, OPERAND_SIGNED },
- [I16_16] = { 16, 16, OPERAND_SIGNED },
+ [U8_16] = { 8, 16, 0 },
+ [U8_24] = { 8, 24, 0 },
+ [U8_28] = { 8, 28, 0 },
+ [U8_32] = { 8, 32, 0 },
+ [U12_16] = { 12, 16, 0 },
[U16_16] = { 16, 16, 0 },
[U16_32] = { 16, 32, 0 },
- [J16_16] = { 16, 16, OPERAND_PCREL },
- [J16_32] = { 16, 32, OPERAND_PCREL },
- [I24_24] = { 24, 24, OPERAND_SIGNED },
- [J32_16] = { 32, 16, OPERAND_PCREL },
- [I32_16] = { 32, 16, OPERAND_SIGNED },
[U32_16] = { 32, 16, 0 },
- [M_16] = { 4, 16, 0 },
- [M_20] = { 4, 20, 0 },
- [M_24] = { 4, 24, 0 },
- [M_28] = { 4, 28, 0 },
- [M_32] = { 4, 32, 0 },
- [RO_28] = { 4, 28, OPERAND_GPR }
-};
-
-static const unsigned char formats[][7] = {
- [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
- [INSTR_IE_UU] = { 0xff, U4_24,U4_28,0,0,0,0 },
- [INSTR_MII_UPI] = { 0xff, U4_8,J12_12,I24_24 },
- [INSTR_RIE_R0IU] = { 0xff, R_8,I16_16,U4_32,0,0,0 },
- [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
- [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
- [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
- [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
- [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
- [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
- [INSTR_RIE_RUPU] = { 0xff, R_8,U8_32,U4_12,J16_16,0,0 },
- [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
- [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
- [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
- [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 },
- [INSTR_RIS_R0RDU] = { 0xff, R_8,U8_32,D_20,B_16,0,0 },
- [INSTR_RIS_RURDI] = { 0xff, R_8,I8_32,U4_12,D_20,B_16,0 },
- [INSTR_RIS_RURDU] = { 0xff, R_8,U8_32,U4_12,D_20,B_16,0 },
- [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 },
- [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 },
- [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 },
- [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 },
- [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 },
- [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 },
- [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 },
- [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 },
- [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 },
- [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 },
- [INSTR_RRE_FR] = { 0xff, F_24,R_28,0,0,0,0 },
- [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 },
- [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 },
- [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 },
- [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 },
- [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 },
- [INSTR_RRF_0UFF] = { 0xff, F_24,F_28,U4_20,0,0,0 },
- [INSTR_RRF_F0FF2] = { 0xff, F_24,F_16,F_28,0,0,0 },
- [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 },
- [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
- [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
- [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
- [INSTR_RRF_FUFF2] = { 0xff, F_24,F_28,F_16,U4_20,0,0 },
- [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
- [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
- [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
- [INSTR_RRF_RMRR] = { 0xff, R_24,R_16,R_28,M_20,0,0 },
- [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
- [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
- [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
- [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
- [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
- [INSTR_RRF_UUFR] = { 0xff, F_24,U4_16,R_28,U4_20,0,0 },
- [INSTR_RRF_UURF] = { 0xff, R_24,U4_16,F_28,U4_20,0,0 },
- [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
- [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
- [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
- [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 },
- [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 },
- [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 },
- [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 },
- [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
- [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
- [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
- [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
- [INSTR_RSL_LRDFU] = { 0xff, F_32,D_20,L4_8,B_16,U4_36,0 },
- [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
- [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
- [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
- [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
- [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
- [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
- [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
- [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
- [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
- [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
- [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
- [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
- [INSTR_RXE_RRRDM] = { 0xff, R_8,D_20,X_12,B_16,M_32,0 },
- [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 },
- [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },
- [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },
- [INSTR_RXY_URRD] = { 0xff, U4_8,D20_20,X_12,B_16,0,0 },
- [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 },
- [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 },
- [INSTR_RX_URRD] = { 0xff, U4_8,D_20,X_12,B_16,0,0 },
- [INSTR_SIL_RDI] = { 0xff, D_20,B_16,I16_32,0,0,0 },
- [INSTR_SIL_RDU] = { 0xff, D_20,B_16,U16_32,0,0,0 },
- [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
- [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
- [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
- [INSTR_SMI_U0RDP] = { 0xff, U4_8,J16_32,D_20,B_16,0,0 },
- [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
- [INSTR_SSF_RRDRD] = { 0x0f, D_20,B_16,D_36,B_32,R_8,0 },
- [INSTR_SSF_RRDRD2]= { 0x0f, R_8,D_20,B_16,D_36,B_32,0 },
- [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
- [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
- [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
- [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 },
- [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 },
- [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 },
- [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 },
- [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 },
- [INSTR_VRI_V0IM] = { 0xff, V_8,I16_16,M_32,0,0,0 },
- [INSTR_VRI_V0I0] = { 0xff, V_8,I16_16,0,0,0,0 },
- [INSTR_VRI_V0IIM] = { 0xff, V_8,I8_16,I8_24,M_32,0,0 },
- [INSTR_VRI_VVIM] = { 0xff, V_8,I16_16,V_12,M_32,0,0 },
- [INSTR_VRI_VVV0IM]= { 0xff, V_8,V_12,V_16,I8_24,M_32,0 },
- [INSTR_VRI_VVV0I0]= { 0xff, V_8,V_12,V_16,I8_24,0,0 },
- [INSTR_VRI_VVIMM] = { 0xff, V_8,V_12,I16_16,M_32,M_28,0 },
- [INSTR_VRR_VV00MMM]={ 0xff, V_8,V_12,M_32,M_28,M_24,0 },
- [INSTR_VRR_VV000MM]={ 0xff, V_8,V_12,M_32,M_28,0,0 },
- [INSTR_VRR_VV0000M]={ 0xff, V_8,V_12,M_32,0,0,0 },
- [INSTR_VRR_VV00000]={ 0xff, V_8,V_12,0,0,0,0 },
- [INSTR_VRR_VVV0M0M]={ 0xff, V_8,V_12,V_16,M_32,M_24,0 },
- [INSTR_VRR_VV00M0M]={ 0xff, V_8,V_12,M_32,M_24,0,0 },
- [INSTR_VRR_VVV000M]={ 0xff, V_8,V_12,V_16,M_32,0,0 },
- [INSTR_VRR_VVV000V]={ 0xff, V_8,V_12,V_16,V_32,0,0 },
- [INSTR_VRR_VVV0000]={ 0xff, V_8,V_12,V_16,0,0,0 },
- [INSTR_VRR_VVV0MMM]={ 0xff, V_8,V_12,V_16,M_32,M_28,M_24 },
- [INSTR_VRR_VVV00MM]={ 0xff, V_8,V_12,V_16,M_32,M_28,0 },
- [INSTR_VRR_VVVMM0V]={ 0xff, V_8,V_12,V_16,V_32,M_20,M_24 },
- [INSTR_VRR_VVVM0MV]={ 0xff, V_8,V_12,V_16,V_32,M_28,M_20 },
- [INSTR_VRR_VVVM00V]={ 0xff, V_8,V_12,V_16,V_32,M_20,0 },
- [INSTR_VRR_VRR0000]={ 0xff, V_8,R_12,R_16,0,0,0 },
- [INSTR_VRS_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
- [INSTR_VRS_VVRD0] = { 0xff, V_8,V_12,D_20,B_16,0,0 },
- [INSTR_VRS_VRRDM] = { 0xff, V_8,R_12,D_20,B_16,M_32,0 },
- [INSTR_VRS_VRRD0] = { 0xff, V_8,R_12,D_20,B_16,0,0 },
- [INSTR_VRS_RVRDM] = { 0xff, R_8,V_12,D_20,B_16,M_32,0 },
- [INSTR_VRV_VVRDM] = { 0xff, V_8,V_12,D_20,B_16,M_32,0 },
- [INSTR_VRV_VWRDM] = { 0xff, V_8,D_20,W_12,B_16,M_32,0 },
- [INSTR_VRX_VRRDM] = { 0xff, V_8,D_20,X_12,B_16,M_32,0 },
- [INSTR_VRX_VRRD0] = { 0xff, V_8,D_20,X_12,B_16,0,0 },
-};
-
-enum {
- LONG_INSN_ALGHSIK,
- LONG_INSN_ALHHHR,
- LONG_INSN_ALHHLR,
- LONG_INSN_ALHSIK,
- LONG_INSN_ALSIHN,
- LONG_INSN_CDFBRA,
- LONG_INSN_CDGBRA,
- LONG_INSN_CDGTRA,
- LONG_INSN_CDLFBR,
- LONG_INSN_CDLFTR,
- LONG_INSN_CDLGBR,
- LONG_INSN_CDLGTR,
- LONG_INSN_CEFBRA,
- LONG_INSN_CEGBRA,
- LONG_INSN_CELFBR,
- LONG_INSN_CELGBR,
- LONG_INSN_CFDBRA,
- LONG_INSN_CFEBRA,
- LONG_INSN_CFXBRA,
- LONG_INSN_CGDBRA,
- LONG_INSN_CGDTRA,
- LONG_INSN_CGEBRA,
- LONG_INSN_CGXBRA,
- LONG_INSN_CGXTRA,
- LONG_INSN_CLFDBR,
- LONG_INSN_CLFDTR,
- LONG_INSN_CLFEBR,
- LONG_INSN_CLFHSI,
- LONG_INSN_CLFXBR,
- LONG_INSN_CLFXTR,
- LONG_INSN_CLGDBR,
- LONG_INSN_CLGDTR,
- LONG_INSN_CLGEBR,
- LONG_INSN_CLGFRL,
- LONG_INSN_CLGHRL,
- LONG_INSN_CLGHSI,
- LONG_INSN_CLGXBR,
- LONG_INSN_CLGXTR,
- LONG_INSN_CLHHSI,
- LONG_INSN_CXFBRA,
- LONG_INSN_CXGBRA,
- LONG_INSN_CXGTRA,
- LONG_INSN_CXLFBR,
- LONG_INSN_CXLFTR,
- LONG_INSN_CXLGBR,
- LONG_INSN_CXLGTR,
- LONG_INSN_FIDBRA,
- LONG_INSN_FIEBRA,
- LONG_INSN_FIXBRA,
- LONG_INSN_LDXBRA,
- LONG_INSN_LEDBRA,
- LONG_INSN_LEXBRA,
- LONG_INSN_LLGFAT,
- LONG_INSN_LLGFRL,
- LONG_INSN_LLGHRL,
- LONG_INSN_LLGTAT,
- LONG_INSN_POPCNT,
- LONG_INSN_RIEMIT,
- LONG_INSN_RINEXT,
- LONG_INSN_RISBGN,
- LONG_INSN_RISBHG,
- LONG_INSN_RISBLG,
- LONG_INSN_SLHHHR,
- LONG_INSN_SLHHLR,
- LONG_INSN_TABORT,
- LONG_INSN_TBEGIN,
- LONG_INSN_TBEGINC,
- LONG_INSN_PCISTG,
- LONG_INSN_MPCIFC,
- LONG_INSN_STPCIFC,
- LONG_INSN_PCISTB,
- LONG_INSN_VPOPCT,
- LONG_INSN_VERLLV,
- LONG_INSN_VESRAV,
- LONG_INSN_VESRLV,
- LONG_INSN_VSBCBI,
- LONG_INSN_STCCTM
-};
-
-static char *long_insn_name[] = {
- [LONG_INSN_ALGHSIK] = "alghsik",
- [LONG_INSN_ALHHHR] = "alhhhr",
- [LONG_INSN_ALHHLR] = "alhhlr",
- [LONG_INSN_ALHSIK] = "alhsik",
- [LONG_INSN_ALSIHN] = "alsihn",
- [LONG_INSN_CDFBRA] = "cdfbra",
- [LONG_INSN_CDGBRA] = "cdgbra",
- [LONG_INSN_CDGTRA] = "cdgtra",
- [LONG_INSN_CDLFBR] = "cdlfbr",
- [LONG_INSN_CDLFTR] = "cdlftr",
- [LONG_INSN_CDLGBR] = "cdlgbr",
- [LONG_INSN_CDLGTR] = "cdlgtr",
- [LONG_INSN_CEFBRA] = "cefbra",
- [LONG_INSN_CEGBRA] = "cegbra",
- [LONG_INSN_CELFBR] = "celfbr",
- [LONG_INSN_CELGBR] = "celgbr",
- [LONG_INSN_CFDBRA] = "cfdbra",
- [LONG_INSN_CFEBRA] = "cfebra",
- [LONG_INSN_CFXBRA] = "cfxbra",
- [LONG_INSN_CGDBRA] = "cgdbra",
- [LONG_INSN_CGDTRA] = "cgdtra",
- [LONG_INSN_CGEBRA] = "cgebra",
- [LONG_INSN_CGXBRA] = "cgxbra",
- [LONG_INSN_CGXTRA] = "cgxtra",
- [LONG_INSN_CLFDBR] = "clfdbr",
- [LONG_INSN_CLFDTR] = "clfdtr",
- [LONG_INSN_CLFEBR] = "clfebr",
- [LONG_INSN_CLFHSI] = "clfhsi",
- [LONG_INSN_CLFXBR] = "clfxbr",
- [LONG_INSN_CLFXTR] = "clfxtr",
- [LONG_INSN_CLGDBR] = "clgdbr",
- [LONG_INSN_CLGDTR] = "clgdtr",
- [LONG_INSN_CLGEBR] = "clgebr",
- [LONG_INSN_CLGFRL] = "clgfrl",
- [LONG_INSN_CLGHRL] = "clghrl",
- [LONG_INSN_CLGHSI] = "clghsi",
- [LONG_INSN_CLGXBR] = "clgxbr",
- [LONG_INSN_CLGXTR] = "clgxtr",
- [LONG_INSN_CLHHSI] = "clhhsi",
- [LONG_INSN_CXFBRA] = "cxfbra",
- [LONG_INSN_CXGBRA] = "cxgbra",
- [LONG_INSN_CXGTRA] = "cxgtra",
- [LONG_INSN_CXLFBR] = "cxlfbr",
- [LONG_INSN_CXLFTR] = "cxlftr",
- [LONG_INSN_CXLGBR] = "cxlgbr",
- [LONG_INSN_CXLGTR] = "cxlgtr",
- [LONG_INSN_FIDBRA] = "fidbra",
- [LONG_INSN_FIEBRA] = "fiebra",
- [LONG_INSN_FIXBRA] = "fixbra",
- [LONG_INSN_LDXBRA] = "ldxbra",
- [LONG_INSN_LEDBRA] = "ledbra",
- [LONG_INSN_LEXBRA] = "lexbra",
- [LONG_INSN_LLGFAT] = "llgfat",
- [LONG_INSN_LLGFRL] = "llgfrl",
- [LONG_INSN_LLGHRL] = "llghrl",
- [LONG_INSN_LLGTAT] = "llgtat",
- [LONG_INSN_POPCNT] = "popcnt",
- [LONG_INSN_RIEMIT] = "riemit",
- [LONG_INSN_RINEXT] = "rinext",
- [LONG_INSN_RISBGN] = "risbgn",
- [LONG_INSN_RISBHG] = "risbhg",
- [LONG_INSN_RISBLG] = "risblg",
- [LONG_INSN_SLHHHR] = "slhhhr",
- [LONG_INSN_SLHHLR] = "slhhlr",
- [LONG_INSN_TABORT] = "tabort",
- [LONG_INSN_TBEGIN] = "tbegin",
- [LONG_INSN_TBEGINC] = "tbeginc",
- [LONG_INSN_PCISTG] = "pcistg",
- [LONG_INSN_MPCIFC] = "mpcifc",
- [LONG_INSN_STPCIFC] = "stpcifc",
- [LONG_INSN_PCISTB] = "pcistb",
- [LONG_INSN_VPOPCT] = "vpopct",
- [LONG_INSN_VERLLV] = "verllv",
- [LONG_INSN_VESRAV] = "vesrav",
- [LONG_INSN_VESRLV] = "vesrlv",
- [LONG_INSN_VSBCBI] = "vsbcbi",
- [LONG_INSN_STCCTM] = "stcctm",
-};
-
-static struct s390_insn opcode[] = {
- { "bprp", 0xc5, INSTR_MII_UPI },
- { "bpp", 0xc7, INSTR_SMI_U0RDP },
- { "trtr", 0xd0, INSTR_SS_L0RDRD },
- { "lmd", 0xef, INSTR_SS_RRRDRD3 },
- { "spm", 0x04, INSTR_RR_R0 },
- { "balr", 0x05, INSTR_RR_RR },
- { "bctr", 0x06, INSTR_RR_RR },
- { "bcr", 0x07, INSTR_RR_UR },
- { "svc", 0x0a, INSTR_RR_U0 },
- { "bsm", 0x0b, INSTR_RR_RR },
- { "bassm", 0x0c, INSTR_RR_RR },
- { "basr", 0x0d, INSTR_RR_RR },
- { "mvcl", 0x0e, INSTR_RR_RR },
- { "clcl", 0x0f, INSTR_RR_RR },
- { "lpr", 0x10, INSTR_RR_RR },
- { "lnr", 0x11, INSTR_RR_RR },
- { "ltr", 0x12, INSTR_RR_RR },
- { "lcr", 0x13, INSTR_RR_RR },
- { "nr", 0x14, INSTR_RR_RR },
- { "clr", 0x15, INSTR_RR_RR },
- { "or", 0x16, INSTR_RR_RR },
- { "xr", 0x17, INSTR_RR_RR },
- { "lr", 0x18, INSTR_RR_RR },
- { "cr", 0x19, INSTR_RR_RR },
- { "ar", 0x1a, INSTR_RR_RR },
- { "sr", 0x1b, INSTR_RR_RR },
- { "mr", 0x1c, INSTR_RR_RR },
- { "dr", 0x1d, INSTR_RR_RR },
- { "alr", 0x1e, INSTR_RR_RR },
- { "slr", 0x1f, INSTR_RR_RR },
- { "lpdr", 0x20, INSTR_RR_FF },
- { "lndr", 0x21, INSTR_RR_FF },
- { "ltdr", 0x22, INSTR_RR_FF },
- { "lcdr", 0x23, INSTR_RR_FF },
- { "hdr", 0x24, INSTR_RR_FF },
- { "ldxr", 0x25, INSTR_RR_FF },
- { "mxr", 0x26, INSTR_RR_FF },
- { "mxdr", 0x27, INSTR_RR_FF },
- { "ldr", 0x28, INSTR_RR_FF },
- { "cdr", 0x29, INSTR_RR_FF },
- { "adr", 0x2a, INSTR_RR_FF },
- { "sdr", 0x2b, INSTR_RR_FF },
- { "mdr", 0x2c, INSTR_RR_FF },
- { "ddr", 0x2d, INSTR_RR_FF },
- { "awr", 0x2e, INSTR_RR_FF },
- { "swr", 0x2f, INSTR_RR_FF },
- { "lper", 0x30, INSTR_RR_FF },
- { "lner", 0x31, INSTR_RR_FF },
- { "lter", 0x32, INSTR_RR_FF },
- { "lcer", 0x33, INSTR_RR_FF },
- { "her", 0x34, INSTR_RR_FF },
- { "ledr", 0x35, INSTR_RR_FF },
- { "axr", 0x36, INSTR_RR_FF },
- { "sxr", 0x37, INSTR_RR_FF },
- { "ler", 0x38, INSTR_RR_FF },
- { "cer", 0x39, INSTR_RR_FF },
- { "aer", 0x3a, INSTR_RR_FF },
- { "ser", 0x3b, INSTR_RR_FF },
- { "mder", 0x3c, INSTR_RR_FF },
- { "der", 0x3d, INSTR_RR_FF },
- { "aur", 0x3e, INSTR_RR_FF },
- { "sur", 0x3f, INSTR_RR_FF },
- { "sth", 0x40, INSTR_RX_RRRD },
- { "la", 0x41, INSTR_RX_RRRD },
- { "stc", 0x42, INSTR_RX_RRRD },
- { "ic", 0x43, INSTR_RX_RRRD },
- { "ex", 0x44, INSTR_RX_RRRD },
- { "bal", 0x45, INSTR_RX_RRRD },
- { "bct", 0x46, INSTR_RX_RRRD },
- { "bc", 0x47, INSTR_RX_URRD },
- { "lh", 0x48, INSTR_RX_RRRD },
- { "ch", 0x49, INSTR_RX_RRRD },
- { "ah", 0x4a, INSTR_RX_RRRD },
- { "sh", 0x4b, INSTR_RX_RRRD },
- { "mh", 0x4c, INSTR_RX_RRRD },
- { "bas", 0x4d, INSTR_RX_RRRD },
- { "cvd", 0x4e, INSTR_RX_RRRD },
- { "cvb", 0x4f, INSTR_RX_RRRD },
- { "st", 0x50, INSTR_RX_RRRD },
- { "lae", 0x51, INSTR_RX_RRRD },
- { "n", 0x54, INSTR_RX_RRRD },
- { "cl", 0x55, INSTR_RX_RRRD },
- { "o", 0x56, INSTR_RX_RRRD },
- { "x", 0x57, INSTR_RX_RRRD },
- { "l", 0x58, INSTR_RX_RRRD },
- { "c", 0x59, INSTR_RX_RRRD },
- { "a", 0x5a, INSTR_RX_RRRD },
- { "s", 0x5b, INSTR_RX_RRRD },
- { "m", 0x5c, INSTR_RX_RRRD },
- { "d", 0x5d, INSTR_RX_RRRD },
- { "al", 0x5e, INSTR_RX_RRRD },
- { "sl", 0x5f, INSTR_RX_RRRD },
- { "std", 0x60, INSTR_RX_FRRD },
- { "mxd", 0x67, INSTR_RX_FRRD },
- { "ld", 0x68, INSTR_RX_FRRD },
- { "cd", 0x69, INSTR_RX_FRRD },
- { "ad", 0x6a, INSTR_RX_FRRD },
- { "sd", 0x6b, INSTR_RX_FRRD },
- { "md", 0x6c, INSTR_RX_FRRD },
- { "dd", 0x6d, INSTR_RX_FRRD },
- { "aw", 0x6e, INSTR_RX_FRRD },
- { "sw", 0x6f, INSTR_RX_FRRD },
- { "ste", 0x70, INSTR_RX_FRRD },
- { "ms", 0x71, INSTR_RX_RRRD },
- { "le", 0x78, INSTR_RX_FRRD },
- { "ce", 0x79, INSTR_RX_FRRD },
- { "ae", 0x7a, INSTR_RX_FRRD },
- { "se", 0x7b, INSTR_RX_FRRD },
- { "mde", 0x7c, INSTR_RX_FRRD },
- { "de", 0x7d, INSTR_RX_FRRD },
- { "au", 0x7e, INSTR_RX_FRRD },
- { "su", 0x7f, INSTR_RX_FRRD },
- { "ssm", 0x80, INSTR_S_RD },
- { "lpsw", 0x82, INSTR_S_RD },
- { "diag", 0x83, INSTR_RS_RRRD },
- { "brxh", 0x84, INSTR_RSI_RRP },
- { "brxle", 0x85, INSTR_RSI_RRP },
- { "bxh", 0x86, INSTR_RS_RRRD },
- { "bxle", 0x87, INSTR_RS_RRRD },
- { "srl", 0x88, INSTR_RS_R0RD },
- { "sll", 0x89, INSTR_RS_R0RD },
- { "sra", 0x8a, INSTR_RS_R0RD },
- { "sla", 0x8b, INSTR_RS_R0RD },
- { "srdl", 0x8c, INSTR_RS_R0RD },
- { "sldl", 0x8d, INSTR_RS_R0RD },
- { "srda", 0x8e, INSTR_RS_R0RD },
- { "slda", 0x8f, INSTR_RS_R0RD },
- { "stm", 0x90, INSTR_RS_RRRD },
- { "tm", 0x91, INSTR_SI_URD },
- { "mvi", 0x92, INSTR_SI_URD },
- { "ts", 0x93, INSTR_S_RD },
- { "ni", 0x94, INSTR_SI_URD },
- { "cli", 0x95, INSTR_SI_URD },
- { "oi", 0x96, INSTR_SI_URD },
- { "xi", 0x97, INSTR_SI_URD },
- { "lm", 0x98, INSTR_RS_RRRD },
- { "trace", 0x99, INSTR_RS_RRRD },
- { "lam", 0x9a, INSTR_RS_AARD },
- { "stam", 0x9b, INSTR_RS_AARD },
- { "mvcle", 0xa8, INSTR_RS_RRRD },
- { "clcle", 0xa9, INSTR_RS_RRRD },
- { "stnsm", 0xac, INSTR_SI_URD },
- { "stosm", 0xad, INSTR_SI_URD },
- { "sigp", 0xae, INSTR_RS_RRRD },
- { "mc", 0xaf, INSTR_SI_URD },
- { "lra", 0xb1, INSTR_RX_RRRD },
- { "stctl", 0xb6, INSTR_RS_CCRD },
- { "lctl", 0xb7, INSTR_RS_CCRD },
- { "cs", 0xba, INSTR_RS_RRRD },
- { "cds", 0xbb, INSTR_RS_RRRD },
- { "clm", 0xbd, INSTR_RS_RURD },
- { "stcm", 0xbe, INSTR_RS_RURD },
- { "icm", 0xbf, INSTR_RS_RURD },
- { "mvn", 0xd1, INSTR_SS_L0RDRD },
- { "mvc", 0xd2, INSTR_SS_L0RDRD },
- { "mvz", 0xd3, INSTR_SS_L0RDRD },
- { "nc", 0xd4, INSTR_SS_L0RDRD },
- { "clc", 0xd5, INSTR_SS_L0RDRD },
- { "oc", 0xd6, INSTR_SS_L0RDRD },
- { "xc", 0xd7, INSTR_SS_L0RDRD },
- { "mvck", 0xd9, INSTR_SS_RRRDRD },
- { "mvcp", 0xda, INSTR_SS_RRRDRD },
- { "mvcs", 0xdb, INSTR_SS_RRRDRD },
- { "tr", 0xdc, INSTR_SS_L0RDRD },
- { "trt", 0xdd, INSTR_SS_L0RDRD },
- { "ed", 0xde, INSTR_SS_L0RDRD },
- { "edmk", 0xdf, INSTR_SS_L0RDRD },
- { "pku", 0xe1, INSTR_SS_L0RDRD },
- { "unpku", 0xe2, INSTR_SS_L0RDRD },
- { "mvcin", 0xe8, INSTR_SS_L0RDRD },
- { "pka", 0xe9, INSTR_SS_L0RDRD },
- { "unpka", 0xea, INSTR_SS_L0RDRD },
- { "plo", 0xee, INSTR_SS_RRRDRD2 },
- { "srp", 0xf0, INSTR_SS_LIRDRD },
- { "mvo", 0xf1, INSTR_SS_LLRDRD },
- { "pack", 0xf2, INSTR_SS_LLRDRD },
- { "unpk", 0xf3, INSTR_SS_LLRDRD },
- { "zap", 0xf8, INSTR_SS_LLRDRD },
- { "cp", 0xf9, INSTR_SS_LLRDRD },
- { "ap", 0xfa, INSTR_SS_LLRDRD },
- { "sp", 0xfb, INSTR_SS_LLRDRD },
- { "mp", 0xfc, INSTR_SS_LLRDRD },
- { "dp", 0xfd, INSTR_SS_LLRDRD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_01[] = {
- { "ptff", 0x04, INSTR_E },
- { "pfpo", 0x0a, INSTR_E },
- { "sam64", 0x0e, INSTR_E },
- { "pr", 0x01, INSTR_E },
- { "upt", 0x02, INSTR_E },
- { "sckpf", 0x07, INSTR_E },
- { "tam", 0x0b, INSTR_E },
- { "sam24", 0x0c, INSTR_E },
- { "sam31", 0x0d, INSTR_E },
- { "trap2", 0xff, INSTR_E },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_a5[] = {
- { "iihh", 0x00, INSTR_RI_RU },
- { "iihl", 0x01, INSTR_RI_RU },
- { "iilh", 0x02, INSTR_RI_RU },
- { "iill", 0x03, INSTR_RI_RU },
- { "nihh", 0x04, INSTR_RI_RU },
- { "nihl", 0x05, INSTR_RI_RU },
- { "nilh", 0x06, INSTR_RI_RU },
- { "nill", 0x07, INSTR_RI_RU },
- { "oihh", 0x08, INSTR_RI_RU },
- { "oihl", 0x09, INSTR_RI_RU },
- { "oilh", 0x0a, INSTR_RI_RU },
- { "oill", 0x0b, INSTR_RI_RU },
- { "llihh", 0x0c, INSTR_RI_RU },
- { "llihl", 0x0d, INSTR_RI_RU },
- { "llilh", 0x0e, INSTR_RI_RU },
- { "llill", 0x0f, INSTR_RI_RU },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_a7[] = {
- { "tmhh", 0x02, INSTR_RI_RU },
- { "tmhl", 0x03, INSTR_RI_RU },
- { "brctg", 0x07, INSTR_RI_RP },
- { "lghi", 0x09, INSTR_RI_RI },
- { "aghi", 0x0b, INSTR_RI_RI },
- { "mghi", 0x0d, INSTR_RI_RI },
- { "cghi", 0x0f, INSTR_RI_RI },
- { "tmlh", 0x00, INSTR_RI_RU },
- { "tmll", 0x01, INSTR_RI_RU },
- { "brc", 0x04, INSTR_RI_UP },
- { "bras", 0x05, INSTR_RI_RP },
- { "brct", 0x06, INSTR_RI_RP },
- { "lhi", 0x08, INSTR_RI_RI },
- { "ahi", 0x0a, INSTR_RI_RI },
- { "mhi", 0x0c, INSTR_RI_RI },
- { "chi", 0x0e, INSTR_RI_RI },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_aa[] = {
- { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
- { "rion", 0x01, INSTR_RI_RI },
- { "tric", 0x02, INSTR_RI_RI },
- { "rioff", 0x03, INSTR_RI_RI },
- { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_b2[] = {
- { "stckf", 0x7c, INSTR_S_RD },
- { "lpp", 0x80, INSTR_S_RD },
- { "lcctl", 0x84, INSTR_S_RD },
- { "lpctl", 0x85, INSTR_S_RD },
- { "qsi", 0x86, INSTR_S_RD },
- { "lsctl", 0x87, INSTR_S_RD },
- { "qctri", 0x8e, INSTR_S_RD },
- { "stfle", 0xb0, INSTR_S_RD },
- { "lpswe", 0xb2, INSTR_S_RD },
- { "srnmb", 0xb8, INSTR_S_RD },
- { "srnmt", 0xb9, INSTR_S_RD },
- { "lfas", 0xbd, INSTR_S_RD },
- { "scctr", 0xe0, INSTR_RRE_RR },
- { "spctr", 0xe1, INSTR_RRE_RR },
- { "ecctr", 0xe4, INSTR_RRE_RR },
- { "epctr", 0xe5, INSTR_RRE_RR },
- { "ppa", 0xe8, INSTR_RRF_U0RR },
- { "etnd", 0xec, INSTR_RRE_R0 },
- { "ecpga", 0xed, INSTR_RRE_RR },
- { "tend", 0xf8, INSTR_S_00 },
- { "niai", 0xfa, INSTR_IE_UU },
- { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
- { "stidp", 0x02, INSTR_S_RD },
- { "sck", 0x04, INSTR_S_RD },
- { "stck", 0x05, INSTR_S_RD },
- { "sckc", 0x06, INSTR_S_RD },
- { "stckc", 0x07, INSTR_S_RD },
- { "spt", 0x08, INSTR_S_RD },
- { "stpt", 0x09, INSTR_S_RD },
- { "spka", 0x0a, INSTR_S_RD },
- { "ipk", 0x0b, INSTR_S_00 },
- { "ptlb", 0x0d, INSTR_S_00 },
- { "spx", 0x10, INSTR_S_RD },
- { "stpx", 0x11, INSTR_S_RD },
- { "stap", 0x12, INSTR_S_RD },
- { "sie", 0x14, INSTR_S_RD },
- { "pc", 0x18, INSTR_S_RD },
- { "sac", 0x19, INSTR_S_RD },
- { "cfc", 0x1a, INSTR_S_RD },
- { "servc", 0x20, INSTR_RRE_RR },
- { "ipte", 0x21, INSTR_RRE_RR },
- { "ipm", 0x22, INSTR_RRE_R0 },
- { "ivsk", 0x23, INSTR_RRE_RR },
- { "iac", 0x24, INSTR_RRE_R0 },
- { "ssar", 0x25, INSTR_RRE_R0 },
- { "epar", 0x26, INSTR_RRE_R0 },
- { "esar", 0x27, INSTR_RRE_R0 },
- { "pt", 0x28, INSTR_RRE_RR },
- { "iske", 0x29, INSTR_RRE_RR },
- { "rrbe", 0x2a, INSTR_RRE_RR },
- { "sske", 0x2b, INSTR_RRF_M0RR },
- { "tb", 0x2c, INSTR_RRE_0R },
- { "dxr", 0x2d, INSTR_RRE_FF },
- { "pgin", 0x2e, INSTR_RRE_RR },
- { "pgout", 0x2f, INSTR_RRE_RR },
- { "csch", 0x30, INSTR_S_00 },
- { "hsch", 0x31, INSTR_S_00 },
- { "msch", 0x32, INSTR_S_RD },
- { "ssch", 0x33, INSTR_S_RD },
- { "stsch", 0x34, INSTR_S_RD },
- { "tsch", 0x35, INSTR_S_RD },
- { "tpi", 0x36, INSTR_S_RD },
- { "sal", 0x37, INSTR_S_00 },
- { "rsch", 0x38, INSTR_S_00 },
- { "stcrw", 0x39, INSTR_S_RD },
- { "stcps", 0x3a, INSTR_S_RD },
- { "rchp", 0x3b, INSTR_S_00 },
- { "schm", 0x3c, INSTR_S_00 },
- { "bakr", 0x40, INSTR_RRE_RR },
- { "cksm", 0x41, INSTR_RRE_RR },
- { "sqdr", 0x44, INSTR_RRE_FF },
- { "sqer", 0x45, INSTR_RRE_FF },
- { "stura", 0x46, INSTR_RRE_RR },
- { "msta", 0x47, INSTR_RRE_R0 },
- { "palb", 0x48, INSTR_RRE_00 },
- { "ereg", 0x49, INSTR_RRE_RR },
- { "esta", 0x4a, INSTR_RRE_RR },
- { "lura", 0x4b, INSTR_RRE_RR },
- { "tar", 0x4c, INSTR_RRE_AR },
- { "cpya", 0x4d, INSTR_RRE_AA },
- { "sar", 0x4e, INSTR_RRE_AR },
- { "ear", 0x4f, INSTR_RRE_RA },
- { "csp", 0x50, INSTR_RRE_RR },
- { "msr", 0x52, INSTR_RRE_RR },
- { "mvpg", 0x54, INSTR_RRE_RR },
- { "mvst", 0x55, INSTR_RRE_RR },
- { "cuse", 0x57, INSTR_RRE_RR },
- { "bsg", 0x58, INSTR_RRE_RR },
- { "bsa", 0x5a, INSTR_RRE_RR },
- { "clst", 0x5d, INSTR_RRE_RR },
- { "srst", 0x5e, INSTR_RRE_RR },
- { "cmpsc", 0x63, INSTR_RRE_RR },
- { "siga", 0x74, INSTR_S_RD },
- { "xsch", 0x76, INSTR_S_00 },
- { "rp", 0x77, INSTR_S_RD },
- { "stcke", 0x78, INSTR_S_RD },
- { "sacf", 0x79, INSTR_S_RD },
- { "stsi", 0x7d, INSTR_S_RD },
- { "srnm", 0x99, INSTR_S_RD },
- { "stfpc", 0x9c, INSTR_S_RD },
- { "lfpc", 0x9d, INSTR_S_RD },
- { "tre", 0xa5, INSTR_RRE_RR },
- { "cuutf", 0xa6, INSTR_RRF_M0RR },
- { "cutfu", 0xa7, INSTR_RRF_M0RR },
- { "stfl", 0xb1, INSTR_S_RD },
- { "trap4", 0xff, INSTR_S_RD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_b3[] = {
- { "maylr", 0x38, INSTR_RRF_F0FF },
- { "mylr", 0x39, INSTR_RRF_F0FF },
- { "mayr", 0x3a, INSTR_RRF_F0FF },
- { "myr", 0x3b, INSTR_RRF_F0FF },
- { "mayhr", 0x3c, INSTR_RRF_F0FF },
- { "myhr", 0x3d, INSTR_RRF_F0FF },
- { "lpdfr", 0x70, INSTR_RRE_FF },
- { "lndfr", 0x71, INSTR_RRE_FF },
- { "cpsdr", 0x72, INSTR_RRF_F0FF2 },
- { "lcdfr", 0x73, INSTR_RRE_FF },
- { "sfasr", 0x85, INSTR_RRE_R0 },
- { { 0, LONG_INSN_CELFBR }, 0x90, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLFBR }, 0x91, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXLFBR }, 0x92, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CEFBRA }, 0x94, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDFBRA }, 0x95, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXFBRA }, 0x96, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CFEBRA }, 0x98, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CFDBRA }, 0x99, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CFXBRA }, 0x9a, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CLFEBR }, 0x9c, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLFDBR }, 0x9d, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLFXBR }, 0x9e, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CELGBR }, 0xa0, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLGBR }, 0xa1, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXLGBR }, 0xa2, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CEGBRA }, 0xa4, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDGBRA }, 0xa5, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CXGBRA }, 0xa6, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CGEBRA }, 0xa8, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CGDBRA }, 0xa9, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CGXBRA }, 0xaa, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CLGEBR }, 0xac, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGDBR }, 0xad, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGXBR }, 0xae, INSTR_RRF_UUFR },
- { "ldgr", 0xc1, INSTR_RRE_FR },
- { "cegr", 0xc4, INSTR_RRE_FR },
- { "cdgr", 0xc5, INSTR_RRE_FR },
- { "cxgr", 0xc6, INSTR_RRE_FR },
- { "cger", 0xc8, INSTR_RRF_U0RF },
- { "cgdr", 0xc9, INSTR_RRF_U0RF },
- { "cgxr", 0xca, INSTR_RRF_U0RF },
- { "lgdr", 0xcd, INSTR_RRE_RF },
- { "mdtra", 0xd0, INSTR_RRF_FUFF2 },
- { "ddtra", 0xd1, INSTR_RRF_FUFF2 },
- { "adtra", 0xd2, INSTR_RRF_FUFF2 },
- { "sdtra", 0xd3, INSTR_RRF_FUFF2 },
- { "ldetr", 0xd4, INSTR_RRF_0UFF },
- { "ledtr", 0xd5, INSTR_RRF_UUFF },
- { "ltdtr", 0xd6, INSTR_RRE_FF },
- { "fidtr", 0xd7, INSTR_RRF_UUFF },
- { "mxtra", 0xd8, INSTR_RRF_FUFF2 },
- { "dxtra", 0xd9, INSTR_RRF_FUFF2 },
- { "axtra", 0xda, INSTR_RRF_FUFF2 },
- { "sxtra", 0xdb, INSTR_RRF_FUFF2 },
- { "lxdtr", 0xdc, INSTR_RRF_0UFF },
- { "ldxtr", 0xdd, INSTR_RRF_UUFF },
- { "ltxtr", 0xde, INSTR_RRE_FF },
- { "fixtr", 0xdf, INSTR_RRF_UUFF },
- { "kdtr", 0xe0, INSTR_RRE_FF },
- { { 0, LONG_INSN_CGDTRA }, 0xe1, INSTR_RRF_UURF },
- { "cudtr", 0xe2, INSTR_RRE_RF },
- { "csdtr", 0xe3, INSTR_RRE_RF },
- { "cdtr", 0xe4, INSTR_RRE_FF },
- { "eedtr", 0xe5, INSTR_RRE_RF },
- { "esdtr", 0xe7, INSTR_RRE_RF },
- { "kxtr", 0xe8, INSTR_RRE_FF },
- { { 0, LONG_INSN_CGXTRA }, 0xe9, INSTR_RRF_UUFR },
- { "cuxtr", 0xea, INSTR_RRE_RF },
- { "csxtr", 0xeb, INSTR_RRE_RF },
- { "cxtr", 0xec, INSTR_RRE_FF },
- { "eextr", 0xed, INSTR_RRE_RF },
- { "esxtr", 0xef, INSTR_RRE_RF },
- { { 0, LONG_INSN_CDGTRA }, 0xf1, INSTR_RRF_UUFR },
- { "cdutr", 0xf2, INSTR_RRE_FR },
- { "cdstr", 0xf3, INSTR_RRE_FR },
- { "cedtr", 0xf4, INSTR_RRE_FF },
- { "qadtr", 0xf5, INSTR_RRF_FUFF },
- { "iedtr", 0xf6, INSTR_RRF_F0FR },
- { "rrdtr", 0xf7, INSTR_RRF_FFRU },
- { { 0, LONG_INSN_CXGTRA }, 0xf9, INSTR_RRF_UURF },
- { "cxutr", 0xfa, INSTR_RRE_FR },
- { "cxstr", 0xfb, INSTR_RRE_FR },
- { "cextr", 0xfc, INSTR_RRE_FF },
- { "qaxtr", 0xfd, INSTR_RRF_FUFF },
- { "iextr", 0xfe, INSTR_RRF_F0FR },
- { "rrxtr", 0xff, INSTR_RRF_FFRU },
- { "lpebr", 0x00, INSTR_RRE_FF },
- { "lnebr", 0x01, INSTR_RRE_FF },
- { "ltebr", 0x02, INSTR_RRE_FF },
- { "lcebr", 0x03, INSTR_RRE_FF },
- { "ldebr", 0x04, INSTR_RRE_FF },
- { "lxdbr", 0x05, INSTR_RRE_FF },
- { "lxebr", 0x06, INSTR_RRE_FF },
- { "mxdbr", 0x07, INSTR_RRE_FF },
- { "kebr", 0x08, INSTR_RRE_FF },
- { "cebr", 0x09, INSTR_RRE_FF },
- { "aebr", 0x0a, INSTR_RRE_FF },
- { "sebr", 0x0b, INSTR_RRE_FF },
- { "mdebr", 0x0c, INSTR_RRE_FF },
- { "debr", 0x0d, INSTR_RRE_FF },
- { "maebr", 0x0e, INSTR_RRF_F0FF },
- { "msebr", 0x0f, INSTR_RRF_F0FF },
- { "lpdbr", 0x10, INSTR_RRE_FF },
- { "lndbr", 0x11, INSTR_RRE_FF },
- { "ltdbr", 0x12, INSTR_RRE_FF },
- { "lcdbr", 0x13, INSTR_RRE_FF },
- { "sqebr", 0x14, INSTR_RRE_FF },
- { "sqdbr", 0x15, INSTR_RRE_FF },
- { "sqxbr", 0x16, INSTR_RRE_FF },
- { "meebr", 0x17, INSTR_RRE_FF },
- { "kdbr", 0x18, INSTR_RRE_FF },
- { "cdbr", 0x19, INSTR_RRE_FF },
- { "adbr", 0x1a, INSTR_RRE_FF },
- { "sdbr", 0x1b, INSTR_RRE_FF },
- { "mdbr", 0x1c, INSTR_RRE_FF },
- { "ddbr", 0x1d, INSTR_RRE_FF },
- { "madbr", 0x1e, INSTR_RRF_F0FF },
- { "msdbr", 0x1f, INSTR_RRF_F0FF },
- { "lder", 0x24, INSTR_RRE_FF },
- { "lxdr", 0x25, INSTR_RRE_FF },
- { "lxer", 0x26, INSTR_RRE_FF },
- { "maer", 0x2e, INSTR_RRF_F0FF },
- { "mser", 0x2f, INSTR_RRF_F0FF },
- { "sqxr", 0x36, INSTR_RRE_FF },
- { "meer", 0x37, INSTR_RRE_FF },
- { "madr", 0x3e, INSTR_RRF_F0FF },
- { "msdr", 0x3f, INSTR_RRF_F0FF },
- { "lpxbr", 0x40, INSTR_RRE_FF },
- { "lnxbr", 0x41, INSTR_RRE_FF },
- { "ltxbr", 0x42, INSTR_RRE_FF },
- { "lcxbr", 0x43, INSTR_RRE_FF },
- { { 0, LONG_INSN_LEDBRA }, 0x44, INSTR_RRF_UUFF },
- { { 0, LONG_INSN_LDXBRA }, 0x45, INSTR_RRF_UUFF },
- { { 0, LONG_INSN_LEXBRA }, 0x46, INSTR_RRF_UUFF },
- { { 0, LONG_INSN_FIXBRA }, 0x47, INSTR_RRF_UUFF },
- { "kxbr", 0x48, INSTR_RRE_FF },
- { "cxbr", 0x49, INSTR_RRE_FF },
- { "axbr", 0x4a, INSTR_RRE_FF },
- { "sxbr", 0x4b, INSTR_RRE_FF },
- { "mxbr", 0x4c, INSTR_RRE_FF },
- { "dxbr", 0x4d, INSTR_RRE_FF },
- { "tbedr", 0x50, INSTR_RRF_U0FF },
- { "tbdr", 0x51, INSTR_RRF_U0FF },
- { "diebr", 0x53, INSTR_RRF_FUFF },
- { { 0, LONG_INSN_FIEBRA }, 0x57, INSTR_RRF_UUFF },
- { "thder", 0x58, INSTR_RRE_FF },
- { "thdr", 0x59, INSTR_RRE_FF },
- { "didbr", 0x5b, INSTR_RRF_FUFF },
- { { 0, LONG_INSN_FIDBRA }, 0x5f, INSTR_RRF_UUFF },
- { "lpxr", 0x60, INSTR_RRE_FF },
- { "lnxr", 0x61, INSTR_RRE_FF },
- { "ltxr", 0x62, INSTR_RRE_FF },
- { "lcxr", 0x63, INSTR_RRE_FF },
- { "lxr", 0x65, INSTR_RRE_FF },
- { "lexr", 0x66, INSTR_RRE_FF },
- { "fixr", 0x67, INSTR_RRE_FF },
- { "cxr", 0x69, INSTR_RRE_FF },
- { "lzer", 0x74, INSTR_RRE_F0 },
- { "lzdr", 0x75, INSTR_RRE_F0 },
- { "lzxr", 0x76, INSTR_RRE_F0 },
- { "fier", 0x77, INSTR_RRE_FF },
- { "fidr", 0x7f, INSTR_RRE_FF },
- { "sfpc", 0x84, INSTR_RRE_RR_OPT },
- { "efpc", 0x8c, INSTR_RRE_RR_OPT },
- { "cefbr", 0x94, INSTR_RRE_RF },
- { "cdfbr", 0x95, INSTR_RRE_RF },
- { "cxfbr", 0x96, INSTR_RRE_RF },
- { "cfebr", 0x98, INSTR_RRF_U0RF },
- { "cfdbr", 0x99, INSTR_RRF_U0RF },
- { "cfxbr", 0x9a, INSTR_RRF_U0RF },
- { "cefr", 0xb4, INSTR_RRE_FR },
- { "cdfr", 0xb5, INSTR_RRE_FR },
- { "cxfr", 0xb6, INSTR_RRE_FR },
- { "cfer", 0xb8, INSTR_RRF_U0RF },
- { "cfdr", 0xb9, INSTR_RRF_U0RF },
- { "cfxr", 0xba, INSTR_RRF_U0RF },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_b9[] = {
- { "lpgr", 0x00, INSTR_RRE_RR },
- { "lngr", 0x01, INSTR_RRE_RR },
- { "ltgr", 0x02, INSTR_RRE_RR },
- { "lcgr", 0x03, INSTR_RRE_RR },
- { "lgr", 0x04, INSTR_RRE_RR },
- { "lurag", 0x05, INSTR_RRE_RR },
- { "lgbr", 0x06, INSTR_RRE_RR },
- { "lghr", 0x07, INSTR_RRE_RR },
- { "agr", 0x08, INSTR_RRE_RR },
- { "sgr", 0x09, INSTR_RRE_RR },
- { "algr", 0x0a, INSTR_RRE_RR },
- { "slgr", 0x0b, INSTR_RRE_RR },
- { "msgr", 0x0c, INSTR_RRE_RR },
- { "dsgr", 0x0d, INSTR_RRE_RR },
- { "eregg", 0x0e, INSTR_RRE_RR },
- { "lrvgr", 0x0f, INSTR_RRE_RR },
- { "lpgfr", 0x10, INSTR_RRE_RR },
- { "lngfr", 0x11, INSTR_RRE_RR },
- { "ltgfr", 0x12, INSTR_RRE_RR },
- { "lcgfr", 0x13, INSTR_RRE_RR },
- { "lgfr", 0x14, INSTR_RRE_RR },
- { "llgfr", 0x16, INSTR_RRE_RR },
- { "llgtr", 0x17, INSTR_RRE_RR },
- { "agfr", 0x18, INSTR_RRE_RR },
- { "sgfr", 0x19, INSTR_RRE_RR },
- { "algfr", 0x1a, INSTR_RRE_RR },
- { "slgfr", 0x1b, INSTR_RRE_RR },
- { "msgfr", 0x1c, INSTR_RRE_RR },
- { "dsgfr", 0x1d, INSTR_RRE_RR },
- { "cgr", 0x20, INSTR_RRE_RR },
- { "clgr", 0x21, INSTR_RRE_RR },
- { "sturg", 0x25, INSTR_RRE_RR },
- { "lbr", 0x26, INSTR_RRE_RR },
- { "lhr", 0x27, INSTR_RRE_RR },
- { "cgfr", 0x30, INSTR_RRE_RR },
- { "clgfr", 0x31, INSTR_RRE_RR },
- { "cfdtr", 0x41, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGDTR }, 0x42, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLFDTR }, 0x43, INSTR_RRF_UURF },
- { "bctgr", 0x46, INSTR_RRE_RR },
- { "cfxtr", 0x49, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CLGXTR }, 0x4a, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CLFXTR }, 0x4b, INSTR_RRF_UUFR },
- { "cdftr", 0x51, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLGTR }, 0x52, INSTR_RRF_UUFR },
- { { 0, LONG_INSN_CDLFTR }, 0x53, INSTR_RRF_UUFR },
- { "cxftr", 0x59, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CXLGTR }, 0x5a, INSTR_RRF_UURF },
- { { 0, LONG_INSN_CXLFTR }, 0x5b, INSTR_RRF_UUFR },
- { "cgrt", 0x60, INSTR_RRF_U0RR },
- { "clgrt", 0x61, INSTR_RRF_U0RR },
- { "crt", 0x72, INSTR_RRF_U0RR },
- { "clrt", 0x73, INSTR_RRF_U0RR },
- { "ngr", 0x80, INSTR_RRE_RR },
- { "ogr", 0x81, INSTR_RRE_RR },
- { "xgr", 0x82, INSTR_RRE_RR },
- { "flogr", 0x83, INSTR_RRE_RR },
- { "llgcr", 0x84, INSTR_RRE_RR },
- { "llghr", 0x85, INSTR_RRE_RR },
- { "mlgr", 0x86, INSTR_RRE_RR },
- { "dlgr", 0x87, INSTR_RRE_RR },
- { "alcgr", 0x88, INSTR_RRE_RR },
- { "slbgr", 0x89, INSTR_RRE_RR },
- { "cspg", 0x8a, INSTR_RRE_RR },
- { "idte", 0x8e, INSTR_RRF_R0RR },
- { "crdte", 0x8f, INSTR_RRF_RMRR },
- { "llcr", 0x94, INSTR_RRE_RR },
- { "llhr", 0x95, INSTR_RRE_RR },
- { "esea", 0x9d, INSTR_RRE_R0 },
- { "ptf", 0xa2, INSTR_RRE_R0 },
- { "lptea", 0xaa, INSTR_RRF_RURR },
- { "rrbm", 0xae, INSTR_RRE_RR },
- { "pfmf", 0xaf, INSTR_RRE_RR },
- { "cu14", 0xb0, INSTR_RRF_M0RR },
- { "cu24", 0xb1, INSTR_RRF_M0RR },
- { "cu41", 0xb2, INSTR_RRE_RR },
- { "cu42", 0xb3, INSTR_RRE_RR },
- { "trtre", 0xbd, INSTR_RRF_M0RR },
- { "srstu", 0xbe, INSTR_RRE_RR },
- { "trte", 0xbf, INSTR_RRF_M0RR },
- { "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
- { "shhhr", 0xc9, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_ALHHHR }, 0xca, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_SLHHHR }, 0xcb, INSTR_RRF_R0RR2 },
- { "chhr", 0xcd, INSTR_RRE_RR },
- { "clhhr", 0xcf, INSTR_RRE_RR },
- { { 0, LONG_INSN_PCISTG }, 0xd0, INSTR_RRE_RR },
- { "pcilg", 0xd2, INSTR_RRE_RR },
- { "rpcit", 0xd3, INSTR_RRE_RR },
- { "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
- { "shhlr", 0xd9, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_ALHHLR }, 0xda, INSTR_RRF_R0RR2 },
- { { 0, LONG_INSN_SLHHLR }, 0xdb, INSTR_RRF_R0RR2 },
- { "chlr", 0xdd, INSTR_RRE_RR },
- { "clhlr", 0xdf, INSTR_RRE_RR },
- { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
- { "locgr", 0xe2, INSTR_RRF_M0RR },
- { "ngrk", 0xe4, INSTR_RRF_R0RR2 },
- { "ogrk", 0xe6, INSTR_RRF_R0RR2 },
- { "xgrk", 0xe7, INSTR_RRF_R0RR2 },
- { "agrk", 0xe8, INSTR_RRF_R0RR2 },
- { "sgrk", 0xe9, INSTR_RRF_R0RR2 },
- { "algrk", 0xea, INSTR_RRF_R0RR2 },
- { "slgrk", 0xeb, INSTR_RRF_R0RR2 },
- { "locr", 0xf2, INSTR_RRF_M0RR },
- { "nrk", 0xf4, INSTR_RRF_R0RR2 },
- { "ork", 0xf6, INSTR_RRF_R0RR2 },
- { "xrk", 0xf7, INSTR_RRF_R0RR2 },
- { "ark", 0xf8, INSTR_RRF_R0RR2 },
- { "srk", 0xf9, INSTR_RRF_R0RR2 },
- { "alrk", 0xfa, INSTR_RRF_R0RR2 },
- { "slrk", 0xfb, INSTR_RRF_R0RR2 },
- { "kmac", 0x1e, INSTR_RRE_RR },
- { "lrvr", 0x1f, INSTR_RRE_RR },
- { "km", 0x2e, INSTR_RRE_RR },
- { "kmc", 0x2f, INSTR_RRE_RR },
- { "kimd", 0x3e, INSTR_RRE_RR },
- { "klmd", 0x3f, INSTR_RRE_RR },
- { "epsw", 0x8d, INSTR_RRE_RR },
- { "trtt", 0x90, INSTR_RRF_M0RR },
- { "trto", 0x91, INSTR_RRF_M0RR },
- { "trot", 0x92, INSTR_RRF_M0RR },
- { "troo", 0x93, INSTR_RRF_M0RR },
- { "mlr", 0x96, INSTR_RRE_RR },
- { "dlr", 0x97, INSTR_RRE_RR },
- { "alcr", 0x98, INSTR_RRE_RR },
- { "slbr", 0x99, INSTR_RRE_RR },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c0[] = {
- { "lgfi", 0x01, INSTR_RIL_RI },
- { "xihf", 0x06, INSTR_RIL_RU },
- { "xilf", 0x07, INSTR_RIL_RU },
- { "iihf", 0x08, INSTR_RIL_RU },
- { "iilf", 0x09, INSTR_RIL_RU },
- { "nihf", 0x0a, INSTR_RIL_RU },
- { "nilf", 0x0b, INSTR_RIL_RU },
- { "oihf", 0x0c, INSTR_RIL_RU },
- { "oilf", 0x0d, INSTR_RIL_RU },
- { "llihf", 0x0e, INSTR_RIL_RU },
- { "llilf", 0x0f, INSTR_RIL_RU },
- { "larl", 0x00, INSTR_RIL_RP },
- { "brcl", 0x04, INSTR_RIL_UP },
- { "brasl", 0x05, INSTR_RIL_RP },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c2[] = {
- { "msgfi", 0x00, INSTR_RIL_RI },
- { "msfi", 0x01, INSTR_RIL_RI },
- { "slgfi", 0x04, INSTR_RIL_RU },
- { "slfi", 0x05, INSTR_RIL_RU },
- { "agfi", 0x08, INSTR_RIL_RI },
- { "afi", 0x09, INSTR_RIL_RI },
- { "algfi", 0x0a, INSTR_RIL_RU },
- { "alfi", 0x0b, INSTR_RIL_RU },
- { "cgfi", 0x0c, INSTR_RIL_RI },
- { "cfi", 0x0d, INSTR_RIL_RI },
- { "clgfi", 0x0e, INSTR_RIL_RU },
- { "clfi", 0x0f, INSTR_RIL_RU },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c4[] = {
- { "llhrl", 0x02, INSTR_RIL_RP },
- { "lghrl", 0x04, INSTR_RIL_RP },
- { "lhrl", 0x05, INSTR_RIL_RP },
- { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
- { "sthrl", 0x07, INSTR_RIL_RP },
- { "lgrl", 0x08, INSTR_RIL_RP },
- { "stgrl", 0x0b, INSTR_RIL_RP },
- { "lgfrl", 0x0c, INSTR_RIL_RP },
- { "lrl", 0x0d, INSTR_RIL_RP },
- { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
- { "strl", 0x0f, INSTR_RIL_RP },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c6[] = {
- { "exrl", 0x00, INSTR_RIL_RP },
- { "pfdrl", 0x02, INSTR_RIL_UP },
- { "cghrl", 0x04, INSTR_RIL_RP },
- { "chrl", 0x05, INSTR_RIL_RP },
- { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
- { "clhrl", 0x07, INSTR_RIL_RP },
- { "cgrl", 0x08, INSTR_RIL_RP },
- { "clgrl", 0x0a, INSTR_RIL_RP },
- { "cgfrl", 0x0c, INSTR_RIL_RP },
- { "crl", 0x0d, INSTR_RIL_RP },
- { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
- { "clrl", 0x0f, INSTR_RIL_RP },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_c8[] = {
- { "mvcos", 0x00, INSTR_SSF_RRDRD },
- { "ectg", 0x01, INSTR_SSF_RRDRD },
- { "csst", 0x02, INSTR_SSF_RRDRD },
- { "lpd", 0x04, INSTR_SSF_RRDRD2 },
- { "lpdg", 0x05, INSTR_SSF_RRDRD2 },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_cc[] = {
- { "brcth", 0x06, INSTR_RIL_RP },
- { "aih", 0x08, INSTR_RIL_RI },
- { "alsih", 0x0a, INSTR_RIL_RI },
- { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
- { "cih", 0x0d, INSTR_RIL_RI },
- { "clih", 0x0f, INSTR_RIL_RI },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_e3[] = {
- { "ltg", 0x02, INSTR_RXY_RRRD },
- { "lrag", 0x03, INSTR_RXY_RRRD },
- { "lg", 0x04, INSTR_RXY_RRRD },
- { "cvby", 0x06, INSTR_RXY_RRRD },
- { "ag", 0x08, INSTR_RXY_RRRD },
- { "sg", 0x09, INSTR_RXY_RRRD },
- { "alg", 0x0a, INSTR_RXY_RRRD },
- { "slg", 0x0b, INSTR_RXY_RRRD },
- { "msg", 0x0c, INSTR_RXY_RRRD },
- { "dsg", 0x0d, INSTR_RXY_RRRD },
- { "cvbg", 0x0e, INSTR_RXY_RRRD },
- { "lrvg", 0x0f, INSTR_RXY_RRRD },
- { "lt", 0x12, INSTR_RXY_RRRD },
- { "lray", 0x13, INSTR_RXY_RRRD },
- { "lgf", 0x14, INSTR_RXY_RRRD },
- { "lgh", 0x15, INSTR_RXY_RRRD },
- { "llgf", 0x16, INSTR_RXY_RRRD },
- { "llgt", 0x17, INSTR_RXY_RRRD },
- { "agf", 0x18, INSTR_RXY_RRRD },
- { "sgf", 0x19, INSTR_RXY_RRRD },
- { "algf", 0x1a, INSTR_RXY_RRRD },
- { "slgf", 0x1b, INSTR_RXY_RRRD },
- { "msgf", 0x1c, INSTR_RXY_RRRD },
- { "dsgf", 0x1d, INSTR_RXY_RRRD },
- { "cg", 0x20, INSTR_RXY_RRRD },
- { "clg", 0x21, INSTR_RXY_RRRD },
- { "stg", 0x24, INSTR_RXY_RRRD },
- { "ntstg", 0x25, INSTR_RXY_RRRD },
- { "cvdy", 0x26, INSTR_RXY_RRRD },
- { "cvdg", 0x2e, INSTR_RXY_RRRD },
- { "strvg", 0x2f, INSTR_RXY_RRRD },
- { "cgf", 0x30, INSTR_RXY_RRRD },
- { "clgf", 0x31, INSTR_RXY_RRRD },
- { "ltgf", 0x32, INSTR_RXY_RRRD },
- { "cgh", 0x34, INSTR_RXY_RRRD },
- { "pfd", 0x36, INSTR_RXY_URRD },
- { "strvh", 0x3f, INSTR_RXY_RRRD },
- { "bctg", 0x46, INSTR_RXY_RRRD },
- { "sty", 0x50, INSTR_RXY_RRRD },
- { "msy", 0x51, INSTR_RXY_RRRD },
- { "ny", 0x54, INSTR_RXY_RRRD },
- { "cly", 0x55, INSTR_RXY_RRRD },
- { "oy", 0x56, INSTR_RXY_RRRD },
- { "xy", 0x57, INSTR_RXY_RRRD },
- { "ly", 0x58, INSTR_RXY_RRRD },
- { "cy", 0x59, INSTR_RXY_RRRD },
- { "ay", 0x5a, INSTR_RXY_RRRD },
- { "sy", 0x5b, INSTR_RXY_RRRD },
- { "mfy", 0x5c, INSTR_RXY_RRRD },
- { "aly", 0x5e, INSTR_RXY_RRRD },
- { "sly", 0x5f, INSTR_RXY_RRRD },
- { "sthy", 0x70, INSTR_RXY_RRRD },
- { "lay", 0x71, INSTR_RXY_RRRD },
- { "stcy", 0x72, INSTR_RXY_RRRD },
- { "icy", 0x73, INSTR_RXY_RRRD },
- { "laey", 0x75, INSTR_RXY_RRRD },
- { "lb", 0x76, INSTR_RXY_RRRD },
- { "lgb", 0x77, INSTR_RXY_RRRD },
- { "lhy", 0x78, INSTR_RXY_RRRD },
- { "chy", 0x79, INSTR_RXY_RRRD },
- { "ahy", 0x7a, INSTR_RXY_RRRD },
- { "shy", 0x7b, INSTR_RXY_RRRD },
- { "mhy", 0x7c, INSTR_RXY_RRRD },
- { "ng", 0x80, INSTR_RXY_RRRD },
- { "og", 0x81, INSTR_RXY_RRRD },
- { "xg", 0x82, INSTR_RXY_RRRD },
- { "lgat", 0x85, INSTR_RXY_RRRD },
- { "mlg", 0x86, INSTR_RXY_RRRD },
- { "dlg", 0x87, INSTR_RXY_RRRD },
- { "alcg", 0x88, INSTR_RXY_RRRD },
- { "slbg", 0x89, INSTR_RXY_RRRD },
- { "stpq", 0x8e, INSTR_RXY_RRRD },
- { "lpq", 0x8f, INSTR_RXY_RRRD },
- { "llgc", 0x90, INSTR_RXY_RRRD },
- { "llgh", 0x91, INSTR_RXY_RRRD },
- { "llc", 0x94, INSTR_RXY_RRRD },
- { "llh", 0x95, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_LLGTAT }, 0x9c, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_LLGFAT }, 0x9d, INSTR_RXY_RRRD },
- { "lat", 0x9f, INSTR_RXY_RRRD },
- { "lbh", 0xc0, INSTR_RXY_RRRD },
- { "llch", 0xc2, INSTR_RXY_RRRD },
- { "stch", 0xc3, INSTR_RXY_RRRD },
- { "lhh", 0xc4, INSTR_RXY_RRRD },
- { "llhh", 0xc6, INSTR_RXY_RRRD },
- { "sthh", 0xc7, INSTR_RXY_RRRD },
- { "lfhat", 0xc8, INSTR_RXY_RRRD },
- { "lfh", 0xca, INSTR_RXY_RRRD },
- { "stfh", 0xcb, INSTR_RXY_RRRD },
- { "chf", 0xcd, INSTR_RXY_RRRD },
- { "clhf", 0xcf, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
- { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
- { "lrv", 0x1e, INSTR_RXY_RRRD },
- { "lrvh", 0x1f, INSTR_RXY_RRRD },
- { "strv", 0x3e, INSTR_RXY_RRRD },
- { "ml", 0x96, INSTR_RXY_RRRD },
- { "dl", 0x97, INSTR_RXY_RRRD },
- { "alc", 0x98, INSTR_RXY_RRRD },
- { "slb", 0x99, INSTR_RXY_RRRD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_e5[] = {
- { "strag", 0x02, INSTR_SSE_RDRD },
- { "mvhhi", 0x44, INSTR_SIL_RDI },
- { "mvghi", 0x48, INSTR_SIL_RDI },
- { "mvhi", 0x4c, INSTR_SIL_RDI },
- { "chhsi", 0x54, INSTR_SIL_RDI },
- { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
- { "cghsi", 0x58, INSTR_SIL_RDI },
- { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
- { "chsi", 0x5c, INSTR_SIL_RDI },
- { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
- { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
- { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
- { "lasp", 0x00, INSTR_SSE_RDRD },
- { "tprot", 0x01, INSTR_SSE_RDRD },
- { "mvcsk", 0x0e, INSTR_SSE_RDRD },
- { "mvcdk", 0x0f, INSTR_SSE_RDRD },
- { "", 0, INSTR_INVALID }
-};
-
-static struct s390_insn opcode_e7[] = {
- { "lcbb", 0x27, INSTR_RXE_RRRDM },
- { "vgef", 0x13, INSTR_VRV_VVRDM },
- { "vgeg", 0x12, INSTR_VRV_VVRDM },
- { "vgbm", 0x44, INSTR_VRI_V0I0 },
- { "vgm", 0x46, INSTR_VRI_V0IIM },
- { "vl", 0x06, INSTR_VRX_VRRD0 },
- { "vlr", 0x56, INSTR_VRR_VV00000 },
- { "vlrp", 0x05, INSTR_VRX_VRRDM },
- { "vleb", 0x00, INSTR_VRX_VRRDM },
- { "vleh", 0x01, INSTR_VRX_VRRDM },
- { "vlef", 0x03, INSTR_VRX_VRRDM },
- { "vleg", 0x02, INSTR_VRX_VRRDM },
- { "vleib", 0x40, INSTR_VRI_V0IM },
- { "vleih", 0x41, INSTR_VRI_V0IM },
- { "vleif", 0x43, INSTR_VRI_V0IM },
- { "vleig", 0x42, INSTR_VRI_V0IM },
- { "vlgv", 0x21, INSTR_VRS_RVRDM },
- { "vllez", 0x04, INSTR_VRX_VRRDM },
- { "vlm", 0x36, INSTR_VRS_VVRD0 },
- { "vlbb", 0x07, INSTR_VRX_VRRDM },
- { "vlvg", 0x22, INSTR_VRS_VRRDM },
- { "vlvgp", 0x62, INSTR_VRR_VRR0000 },
- { "vll", 0x37, INSTR_VRS_VRRD0 },
- { "vmrh", 0x61, INSTR_VRR_VVV000M },
- { "vmrl", 0x60, INSTR_VRR_VVV000M },
- { "vpk", 0x94, INSTR_VRR_VVV000M },
- { "vpks", 0x97, INSTR_VRR_VVV0M0M },
- { "vpkls", 0x95, INSTR_VRR_VVV0M0M },
- { "vperm", 0x8c, INSTR_VRR_VVV000V },
- { "vpdi", 0x84, INSTR_VRR_VVV000M },
- { "vrep", 0x4d, INSTR_VRI_VVIM },
- { "vrepi", 0x45, INSTR_VRI_V0IM },
- { "vscef", 0x1b, INSTR_VRV_VWRDM },
- { "vsceg", 0x1a, INSTR_VRV_VWRDM },
- { "vsel", 0x8d, INSTR_VRR_VVV000V },
- { "vseg", 0x5f, INSTR_VRR_VV0000M },
- { "vst", 0x0e, INSTR_VRX_VRRD0 },
- { "vsteb", 0x08, INSTR_VRX_VRRDM },
- { "vsteh", 0x09, INSTR_VRX_VRRDM },
- { "vstef", 0x0b, INSTR_VRX_VRRDM },
- { "vsteg", 0x0a, INSTR_VRX_VRRDM },
- { "vstm", 0x3e, INSTR_VRS_VVRD0 },
- { "vstl", 0x3f, INSTR_VRS_VRRD0 },
- { "vuph", 0xd7, INSTR_VRR_VV0000M },
- { "vuplh", 0xd5, INSTR_VRR_VV0000M },
- { "vupl", 0xd6, INSTR_VRR_VV0000M },
- { "vupll", 0xd4, INSTR_VRR_VV0000M },
- { "va", 0xf3, INSTR_VRR_VVV000M },
- { "vacc", 0xf1, INSTR_VRR_VVV000M },
- { "vac", 0xbb, INSTR_VRR_VVVM00V },
- { "vaccc", 0xb9, INSTR_VRR_VVVM00V },
- { "vn", 0x68, INSTR_VRR_VVV0000 },
- { "vnc", 0x69, INSTR_VRR_VVV0000 },
- { "vavg", 0xf2, INSTR_VRR_VVV000M },
- { "vavgl", 0xf0, INSTR_VRR_VVV000M },
- { "vcksm", 0x66, INSTR_VRR_VVV0000 },
- { "vec", 0xdb, INSTR_VRR_VV0000M },
- { "vecl", 0xd9, INSTR_VRR_VV0000M },
- { "vceq", 0xf8, INSTR_VRR_VVV0M0M },
- { "vch", 0xfb, INSTR_VRR_VVV0M0M },
- { "vchl", 0xf9, INSTR_VRR_VVV0M0M },
- { "vclz", 0x53, INSTR_VRR_VV0000M },
- { "vctz", 0x52, INSTR_VRR_VV0000M },
- { "vx", 0x6d, INSTR_VRR_VVV0000 },
- { "vgfm", 0xb4, INSTR_VRR_VVV000M },
- { "vgfma", 0xbc, INSTR_VRR_VVVM00V },
- { "vlc", 0xde, INSTR_VRR_VV0000M },
- { "vlp", 0xdf, INSTR_VRR_VV0000M },
- { "vmx", 0xff, INSTR_VRR_VVV000M },
- { "vmxl", 0xfd, INSTR_VRR_VVV000M },
- { "vmn", 0xfe, INSTR_VRR_VVV000M },
- { "vmnl", 0xfc, INSTR_VRR_VVV000M },
- { "vmal", 0xaa, INSTR_VRR_VVVM00V },
- { "vmae", 0xae, INSTR_VRR_VVVM00V },
- { "vmale", 0xac, INSTR_VRR_VVVM00V },
- { "vmah", 0xab, INSTR_VRR_VVVM00V },
- { "vmalh", 0xa9, INSTR_VRR_VVVM00V },
- { "vmao", 0xaf, INSTR_VRR_VVVM00V },
- { "vmalo", 0xad, INSTR_VRR_VVVM00V },
- { "vmh", 0xa3, INSTR_VRR_VVV000M },
- { "vmlh", 0xa1, INSTR_VRR_VVV000M },
- { "vml", 0xa2, INSTR_VRR_VVV000M },
- { "vme", 0xa6, INSTR_VRR_VVV000M },
- { "vmle", 0xa4, INSTR_VRR_VVV000M },
- { "vmo", 0xa7, INSTR_VRR_VVV000M },
- { "vmlo", 0xa5, INSTR_VRR_VVV000M },
- { "vno", 0x6b, INSTR_VRR_VVV0000 },
- { "vo", 0x6a, INSTR_VRR_VVV0000 },
- { { 0, LONG_INSN_VPOPCT }, 0x50, INSTR_VRR_VV0000M },
- { { 0, LONG_INSN_VERLLV }, 0x73, INSTR_VRR_VVV000M },
- { "verll", 0x33, INSTR_VRS_VVRDM },
- { "verim", 0x72, INSTR_VRI_VVV0IM },
- { "veslv", 0x70, INSTR_VRR_VVV000M },
- { "vesl", 0x30, INSTR_VRS_VVRDM },
- { { 0, LONG_INSN_VESRAV }, 0x7a, INSTR_VRR_VVV000M },
- { "vesra", 0x3a, INSTR_VRS_VVRDM },
- { { 0, LONG_INSN_VESRLV }, 0x78, INSTR_VRR_VVV000M },
- { "vesrl", 0x38, INSTR_VRS_VVRDM },
- { "vsl", 0x74, INSTR_VRR_VVV0000 },
- { "vslb", 0x75, INSTR_VRR_VVV0000 },
- { "vsldb", 0x77, INSTR_VRI_VVV0I0 },
- { "vsra", 0x7e, INSTR_VRR_VVV0000 },
- { "vsrab", 0x7f, INSTR_VRR_VVV0000 },
- { "vsrl", 0x7c, INSTR_VRR_VVV0000 },
- { "vsrlb", 0x7d, INSTR_VRR_VVV0000 },
- { "vs", 0xf7, INSTR_VRR_VVV000M },
- { "vscb", 0xf5, INSTR_VRR_VVV000M },
- { "vsb", 0xbf, INSTR_VRR_VVVM00V },
- { { 0, LONG_INSN_VSBCBI }, 0xbd, INSTR_VRR_VVVM00V },
- { "vsumg", 0x65, INSTR_VRR_VVV000M },
- { "vsumq", 0x67, INSTR_VRR_VVV000M },
- { "vsum", 0x64, INSTR_VRR_VVV000M },
- { "vtm", 0xd8, INSTR_VRR_VV00000 },
- { "vfae", 0x82, INSTR_VRR_VVV0M0M },
- { "vfee", 0x80, INSTR_VRR_VVV0M0M },
- { "vfene", 0x81, INSTR_VRR_VVV0M0M },
- { "vistr", 0x5c, INSTR_VRR_VV00M0M },
- { "vstrc", 0x8a, INSTR_VRR_VVVMM0V },
- { "vfa", 0xe3, INSTR_VRR_VVV00MM },
- { "wfc", 0xcb, INSTR_VRR_VV000MM },
- { "wfk", 0xca, INSTR_VRR_VV000MM },
- { "vfce", 0xe8, INSTR_VRR_VVV0MMM },
- { "vfch", 0xeb, INSTR_VRR_VVV0MMM },
- { "vfche", 0xea, INSTR_VRR_VVV0MMM },
- { "vcdg", 0xc3, INSTR_VRR_VV00MMM },
- { "vcdlg", 0xc1, INSTR_VRR_VV00MMM },
- { "vcgd", 0xc2, INSTR_VRR_VV00MMM },
- { "vclgd", 0xc0, INSTR_VRR_VV00MMM },
- { "vfd", 0xe5, INSTR_VRR_VVV00MM },
- { "vfi", 0xc7, INSTR_VRR_VV00MMM },
- { "vlde", 0xc4, INSTR_VRR_VV000MM },
- { "vled", 0xc5, INSTR_VRR_VV00MMM },
- { "vfm", 0xe7, INSTR_VRR_VVV00MM },
- { "vfma", 0x8f, INSTR_VRR_VVVM0MV },
- { "vfms", 0x8e, INSTR_VRR_VVVM0MV },
- { "vfpso", 0xcc, INSTR_VRR_VV00MMM },
- { "vfsq", 0xce, INSTR_VRR_VV000MM },
- { "vfs", 0xe2, INSTR_VRR_VVV00MM },
- { "vftci", 0x4a, INSTR_VRI_VVIMM },
-};
-
-static struct s390_insn opcode_eb[] = {
- { "lmg", 0x04, INSTR_RSY_RRRD },
- { "srag", 0x0a, INSTR_RSY_RRRD },
- { "slag", 0x0b, INSTR_RSY_RRRD },
- { "srlg", 0x0c, INSTR_RSY_RRRD },
- { "sllg", 0x0d, INSTR_RSY_RRRD },
- { "tracg", 0x0f, INSTR_RSY_RRRD },
- { "csy", 0x14, INSTR_RSY_RRRD },
- { "rllg", 0x1c, INSTR_RSY_RRRD },
- { "clmh", 0x20, INSTR_RSY_RURD },
- { "clmy", 0x21, INSTR_RSY_RURD },
- { "clt", 0x23, INSTR_RSY_RURD },
- { "stmg", 0x24, INSTR_RSY_RRRD },
- { "stctg", 0x25, INSTR_RSY_CCRD },
- { "stmh", 0x26, INSTR_RSY_RRRD },
- { "clgt", 0x2b, INSTR_RSY_RURD },
- { "stcmh", 0x2c, INSTR_RSY_RURD },
- { "stcmy", 0x2d, INSTR_RSY_RURD },
- { "lctlg", 0x2f, INSTR_RSY_CCRD },
- { "csg", 0x30, INSTR_RSY_RRRD },
- { "cdsy", 0x31, INSTR_RSY_RRRD },
- { "cdsg", 0x3e, INSTR_RSY_RRRD },
- { "bxhg", 0x44, INSTR_RSY_RRRD },
- { "bxleg", 0x45, INSTR_RSY_RRRD },
- { "ecag", 0x4c, INSTR_RSY_RRRD },
- { "tmy", 0x51, INSTR_SIY_URD },
- { "mviy", 0x52, INSTR_SIY_URD },
- { "niy", 0x54, INSTR_SIY_URD },
- { "cliy", 0x55, INSTR_SIY_URD },
- { "oiy", 0x56, INSTR_SIY_URD },
- { "xiy", 0x57, INSTR_SIY_URD },
- { "asi", 0x6a, INSTR_SIY_IRD },
- { "alsi", 0x6e, INSTR_SIY_IRD },
- { "agsi", 0x7a, INSTR_SIY_IRD },
- { "algsi", 0x7e, INSTR_SIY_IRD },
- { "icmh", 0x80, INSTR_RSY_RURD },
- { "icmy", 0x81, INSTR_RSY_RURD },
- { "clclu", 0x8f, INSTR_RSY_RRRD },
- { "stmy", 0x90, INSTR_RSY_RRRD },
- { "lmh", 0x96, INSTR_RSY_RRRD },
- { "lmy", 0x98, INSTR_RSY_RRRD },
- { "lamy", 0x9a, INSTR_RSY_AARD },
- { "stamy", 0x9b, INSTR_RSY_AARD },
- { { 0, LONG_INSN_PCISTB }, 0xd0, INSTR_RSY_RRRD },
- { "sic", 0xd1, INSTR_RSY_RRRD },
- { "srak", 0xdc, INSTR_RSY_RRRD },
- { "slak", 0xdd, INSTR_RSY_RRRD },
- { "srlk", 0xde, INSTR_RSY_RRRD },
- { "sllk", 0xdf, INSTR_RSY_RRRD },
- { "locg", 0xe2, INSTR_RSY_RDRM },
- { "stocg", 0xe3, INSTR_RSY_RDRM },
- { "lang", 0xe4, INSTR_RSY_RRRD },
- { "laog", 0xe6, INSTR_RSY_RRRD },
- { "laxg", 0xe7, INSTR_RSY_RRRD },
- { "laag", 0xe8, INSTR_RSY_RRRD },
- { "laalg", 0xea, INSTR_RSY_RRRD },
- { "loc", 0xf2, INSTR_RSY_RDRM },
- { "stoc", 0xf3, INSTR_RSY_RDRM },
- { "lan", 0xf4, INSTR_RSY_RRRD },
- { "lao", 0xf6, INSTR_RSY_RRRD },
- { "lax", 0xf7, INSTR_RSY_RRRD },
- { "laa", 0xf8, INSTR_RSY_RRRD },
- { "laal", 0xfa, INSTR_RSY_RRRD },
- { "lric", 0x60, INSTR_RSY_RDRM },
- { "stric", 0x61, INSTR_RSY_RDRM },
- { "mric", 0x62, INSTR_RSY_RDRM },
- { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
- { "rll", 0x1d, INSTR_RSY_RRRD },
- { "mvclu", 0x8e, INSTR_RSY_RRRD },
- { "tp", 0xc0, INSTR_RSL_R0RD },
- { "", 0, INSTR_INVALID }
+ [VX_12] = { 4, 12, OPERAND_INDEX | OPERAND_VR },
+ [V_8] = { 4, 8, OPERAND_VR },
+ [V_12] = { 4, 12, OPERAND_VR },
+ [V_16] = { 4, 16, OPERAND_VR },
+ [V_32] = { 4, 32, OPERAND_VR },
+ [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR },
};
-static struct s390_insn opcode_ec[] = {
- { "brxhg", 0x44, INSTR_RIE_RRP },
- { "brxlg", 0x45, INSTR_RIE_RRP },
- { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
- { "rnsbg", 0x54, INSTR_RIE_RRUUU },
- { "risbg", 0x55, INSTR_RIE_RRUUU },
- { "rosbg", 0x56, INSTR_RIE_RRUUU },
- { "rxsbg", 0x57, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBGN }, 0x59, INSTR_RIE_RRUUU },
- { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
- { "cgrj", 0x64, INSTR_RIE_RRPU },
- { "clgrj", 0x65, INSTR_RIE_RRPU },
- { "cgit", 0x70, INSTR_RIE_R0IU },
- { "clgit", 0x71, INSTR_RIE_R0UU },
- { "cit", 0x72, INSTR_RIE_R0IU },
- { "clfit", 0x73, INSTR_RIE_R0UU },
- { "crj", 0x76, INSTR_RIE_RRPU },
- { "clrj", 0x77, INSTR_RIE_RRPU },
- { "cgij", 0x7c, INSTR_RIE_RUPI },
- { "clgij", 0x7d, INSTR_RIE_RUPU },
- { "cij", 0x7e, INSTR_RIE_RUPI },
- { "clij", 0x7f, INSTR_RIE_RUPU },
- { "ahik", 0xd8, INSTR_RIE_RRI0 },
- { "aghik", 0xd9, INSTR_RIE_RRI0 },
- { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
- { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
- { "cgrb", 0xe4, INSTR_RRS_RRRDU },
- { "clgrb", 0xe5, INSTR_RRS_RRRDU },
- { "crb", 0xf6, INSTR_RRS_RRRDU },
- { "clrb", 0xf7, INSTR_RRS_RRRDU },
- { "cgib", 0xfc, INSTR_RIS_RURDI },
- { "clgib", 0xfd, INSTR_RIS_RURDU },
- { "cib", 0xfe, INSTR_RIS_RURDI },
- { "clib", 0xff, INSTR_RIS_RURDU },
- { "", 0, INSTR_INVALID }
+static const unsigned char formats[][6] = {
+ [INSTR_E] = { 0, 0, 0, 0, 0, 0 },
+ [INSTR_IE_UU] = { U4_24, U4_28, 0, 0, 0, 0 },
+ [INSTR_MII_UPP] = { U4_8, J12_12, J24_24 },
+ [INSTR_RIE_R0IU] = { R_8, I16_16, U4_32, 0, 0, 0 },
+ [INSTR_RIE_R0UU] = { R_8, U16_16, U4_32, 0, 0, 0 },
+ [INSTR_RIE_RRI0] = { R_8, R_12, I16_16, 0, 0, 0 },
+ [INSTR_RIE_RRP] = { R_8, R_12, J16_16, 0, 0, 0 },
+ [INSTR_RIE_RRPU] = { R_8, R_12, U4_32, J16_16, 0, 0 },
+ [INSTR_RIE_RRUUU] = { R_8, R_12, U8_16, U8_24, U8_32, 0 },
+ [INSTR_RIE_RUI0] = { R_8, I16_16, U4_12, 0, 0, 0 },
+ [INSTR_RIE_RUPI] = { R_8, I8_32, U4_12, J16_16, 0, 0 },
+ [INSTR_RIE_RUPU] = { R_8, U8_32, U4_12, J16_16, 0, 0 },
+ [INSTR_RIL_RI] = { R_8, I32_16, 0, 0, 0, 0 },
+ [INSTR_RIL_RP] = { R_8, J32_16, 0, 0, 0, 0 },
+ [INSTR_RIL_RU] = { R_8, U32_16, 0, 0, 0, 0 },
+ [INSTR_RIL_UP] = { U4_8, J32_16, 0, 0, 0, 0 },
+ [INSTR_RIS_RURDI] = { R_8, I8_32, U4_12, D_20, B_16, 0 },
+ [INSTR_RIS_RURDU] = { R_8, U8_32, U4_12, D_20, B_16, 0 },
+ [INSTR_RI_RI] = { R_8, I16_16, 0, 0, 0, 0 },
+ [INSTR_RI_RP] = { R_8, J16_16, 0, 0, 0, 0 },
+ [INSTR_RI_RU] = { R_8, U16_16, 0, 0, 0, 0 },
+ [INSTR_RI_UP] = { U4_8, J16_16, 0, 0, 0, 0 },
+ [INSTR_RRE_00] = { 0, 0, 0, 0, 0, 0 },
+ [INSTR_RRE_AA] = { A_24, A_28, 0, 0, 0, 0 },
+ [INSTR_RRE_AR] = { A_24, R_28, 0, 0, 0, 0 },
+ [INSTR_RRE_F0] = { F_24, 0, 0, 0, 0, 0 },
+ [INSTR_RRE_FF] = { F_24, F_28, 0, 0, 0, 0 },
+ [INSTR_RRE_FR] = { F_24, R_28, 0, 0, 0, 0 },
+ [INSTR_RRE_R0] = { R_24, 0, 0, 0, 0, 0 },
+ [INSTR_RRE_RA] = { R_24, A_28, 0, 0, 0, 0 },
+ [INSTR_RRE_RF] = { R_24, F_28, 0, 0, 0, 0 },
+ [INSTR_RRE_RR] = { R_24, R_28, 0, 0, 0, 0 },
+ [INSTR_RRF_0UFF] = { F_24, F_28, U4_20, 0, 0, 0 },
+ [INSTR_RRF_0URF] = { R_24, F_28, U4_20, 0, 0, 0 },
+ [INSTR_RRF_F0FF] = { F_16, F_24, F_28, 0, 0, 0 },
+ [INSTR_RRF_F0FF2] = { F_24, F_16, F_28, 0, 0, 0 },
+ [INSTR_RRF_F0FR] = { F_24, F_16, R_28, 0, 0, 0 },
+ [INSTR_RRF_FFRU] = { F_24, F_16, R_28, U4_20, 0, 0 },
+ [INSTR_RRF_FUFF] = { F_24, F_16, F_28, U4_20, 0, 0 },
+ [INSTR_RRF_FUFF2] = { F_24, F_28, F_16, U4_20, 0, 0 },
+ [INSTR_RRF_R0RR] = { R_24, R_16, R_28, 0, 0, 0 },
+ [INSTR_RRF_R0RR2] = { R_24, R_28, R_16, 0, 0, 0 },
+ [INSTR_RRF_RURR] = { R_24, R_28, R_16, U4_20, 0, 0 },
+ [INSTR_RRF_RURR2] = { R_24, R_16, R_28, U4_20, 0, 0 },
+ [INSTR_RRF_U0FF] = { F_24, U4_16, F_28, 0, 0, 0 },
+ [INSTR_RRF_U0RF] = { R_24, U4_16, F_28, 0, 0, 0 },
+ [INSTR_RRF_U0RR] = { R_24, R_28, U4_16, 0, 0, 0 },
+ [INSTR_RRF_UUFF] = { F_24, U4_16, F_28, U4_20, 0, 0 },
+ [INSTR_RRF_UUFR] = { F_24, U4_16, R_28, U4_20, 0, 0 },
+ [INSTR_RRF_UURF] = { R_24, U4_16, F_28, U4_20, 0, 0 },
+ [INSTR_RRS_RRRDU] = { R_8, R_12, U4_32, D_20, B_16 },
+ [INSTR_RR_FF] = { F_8, F_12, 0, 0, 0, 0 },
+ [INSTR_RR_R0] = { R_8, 0, 0, 0, 0, 0 },
+ [INSTR_RR_RR] = { R_8, R_12, 0, 0, 0, 0 },
+ [INSTR_RR_U0] = { U8_8, 0, 0, 0, 0, 0 },
+ [INSTR_RR_UR] = { U4_8, R_12, 0, 0, 0, 0 },
+ [INSTR_RSI_RRP] = { R_8, R_12, J16_16, 0, 0, 0 },
+ [INSTR_RSL_LRDFU] = { F_32, D_20, L8_8, B_16, U4_36, 0 },
+ [INSTR_RSL_R0RD] = { D_20, L4_8, B_16, 0, 0, 0 },
+ [INSTR_RSY_AARD] = { A_8, A_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_CCRD] = { C_8, C_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_RDRU] = { R_8, D20_20, B_16, U4_12, 0, 0 },
+ [INSTR_RSY_RRRD] = { R_8, R_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_RURD] = { R_8, U4_12, D20_20, B_16, 0, 0 },
+ [INSTR_RSY_RURD2] = { R_8, D20_20, B_16, U4_12, 0, 0 },
+ [INSTR_RS_AARD] = { A_8, A_12, D_20, B_16, 0, 0 },
+ [INSTR_RS_CCRD] = { C_8, C_12, D_20, B_16, 0, 0 },
+ [INSTR_RS_R0RD] = { R_8, D_20, B_16, 0, 0, 0 },
+ [INSTR_RS_RRRD] = { R_8, R_12, D_20, B_16, 0, 0 },
+ [INSTR_RS_RURD] = { R_8, U4_12, D_20, B_16, 0, 0 },
+ [INSTR_RXE_FRRD] = { F_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_RXE_RRRDU] = { R_8, D_20, X_12, B_16, U4_32, 0 },
+ [INSTR_RXF_FRRDF] = { F_32, F_8, D_20, X_12, B_16, 0 },
+ [INSTR_RXY_FRRD] = { F_8, D20_20, X_12, B_16, 0, 0 },
+ [INSTR_RXY_RRRD] = { R_8, D20_20, X_12, B_16, 0, 0 },
+ [INSTR_RXY_URRD] = { U4_8, D20_20, X_12, B_16, 0, 0 },
+ [INSTR_RX_FRRD] = { F_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_RX_RRRD] = { R_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_RX_URRD] = { U4_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_SIL_RDI] = { D_20, B_16, I16_32, 0, 0, 0 },
+ [INSTR_SIL_RDU] = { D_20, B_16, U16_32, 0, 0, 0 },
+ [INSTR_SIY_IRD] = { D20_20, B_16, I8_8, 0, 0, 0 },
+ [INSTR_SIY_URD] = { D20_20, B_16, U8_8, 0, 0, 0 },
+ [INSTR_SI_RD] = { D_20, B_16, 0, 0, 0, 0 },
+ [INSTR_SI_URD] = { D_20, B_16, U8_8, 0, 0, 0 },
+ [INSTR_SMI_U0RDP] = { U4_8, J16_32, D_20, B_16, 0, 0 },
+ [INSTR_SSE_RDRD] = { D_20, B_16, D_36, B_32, 0, 0 },
+ [INSTR_SSF_RRDRD] = { D_20, B_16, D_36, B_32, R_8, 0 },
+ [INSTR_SSF_RRDRD2] = { R_8, D_20, B_16, D_36, B_32, 0 },
+ [INSTR_SS_L0RDRD] = { D_20, L8_8, B_16, D_36, B_32, 0 },
+ [INSTR_SS_L2RDRD] = { D_20, B_16, D_36, L8_8, B_32, 0 },
+ [INSTR_SS_LIRDRD] = { D_20, L4_8, B_16, D_36, B_32, U4_12 },
+ [INSTR_SS_LLRDRD] = { D_20, L4_8, B_16, D_36, L4_12, B_32 },
+ [INSTR_SS_RRRDRD] = { D_20, R_8, B_16, D_36, B_32, R_12 },
+ [INSTR_SS_RRRDRD2] = { R_8, D_20, B_16, R_12, D_36, B_32 },
+ [INSTR_SS_RRRDRD3] = { R_8, R_12, D_20, B_16, D_36, B_32 },
+ [INSTR_S_00] = { 0, 0, 0, 0, 0, 0 },
+ [INSTR_S_RD] = { D_20, B_16, 0, 0, 0, 0 },
+ [INSTR_VRI_V0IU] = { V_8, I16_16, U4_32, 0, 0, 0 },
+ [INSTR_VRI_V0U] = { V_8, U16_16, 0, 0, 0, 0 },
+ [INSTR_VRI_V0UU2] = { V_8, U16_16, U4_32, 0, 0, 0 },
+ [INSTR_VRI_V0UUU] = { V_8, U8_16, U8_24, U4_32, 0, 0 },
+ [INSTR_VRI_VR0UU] = { V_8, R_12, U8_28, U4_24, 0, 0 },
+ [INSTR_VRI_VVUU] = { V_8, V_12, U16_16, U4_32, 0, 0 },
+ [INSTR_VRI_VVUUU] = { V_8, V_12, U12_16, U4_32, U4_28, 0 },
+ [INSTR_VRI_VVUUU2] = { V_8, V_12, U8_28, U8_16, U4_24, 0 },
+ [INSTR_VRI_VVV0U] = { V_8, V_12, V_16, U8_24, 0, 0 },
+ [INSTR_VRI_VVV0UU] = { V_8, V_12, V_16, U8_24, U4_32, 0 },
+ [INSTR_VRI_VVV0UU2] = { V_8, V_12, V_16, U8_28, U4_24, 0 },
+ [INSTR_VRR_0V] = { V_12, 0, 0, 0, 0, 0 },
+ [INSTR_VRR_0VV0U] = { V_12, V_16, U4_24, 0, 0, 0 },
+ [INSTR_VRR_RV0U] = { R_8, V_12, U4_24, 0, 0, 0 },
+ [INSTR_VRR_VRR] = { V_8, R_12, R_16, 0, 0, 0 },
+ [INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 },
+ [INSTR_VRR_VV0U] = { V_8, V_12, U4_32, 0, 0, 0 },
+ [INSTR_VRR_VV0U0U] = { V_8, V_12, U4_32, U4_24, 0, 0 },
+ [INSTR_VRR_VV0UU2] = { V_8, V_12, U4_32, U4_28, 0, 0 },
+ [INSTR_VRR_VV0UUU] = { V_8, V_12, U4_32, U4_28, U4_24, 0 },
+ [INSTR_VRR_VVV] = { V_8, V_12, V_16, 0, 0, 0 },
+ [INSTR_VRR_VVV0U] = { V_8, V_12, V_16, U4_32, 0, 0 },
+ [INSTR_VRR_VVV0U0U] = { V_8, V_12, V_16, U4_32, U4_24, 0 },
+ [INSTR_VRR_VVV0UU] = { V_8, V_12, V_16, U4_32, U4_28, 0 },
+ [INSTR_VRR_VVV0UUU] = { V_8, V_12, V_16, U4_32, U4_28, U4_24 },
+ [INSTR_VRR_VVV0V] = { V_8, V_12, V_16, V_32, 0, 0 },
+ [INSTR_VRR_VVVU0UV] = { V_8, V_12, V_16, V_32, U4_28, U4_20 },
+ [INSTR_VRR_VVVU0V] = { V_8, V_12, V_16, V_32, U4_20, 0 },
+ [INSTR_VRR_VVVUU0V] = { V_8, V_12, V_16, V_32, U4_20, U4_24 },
+ [INSTR_VRS_RRDV] = { V_32, R_12, D_20, B_16, 0, 0 },
+ [INSTR_VRS_RVRDU] = { R_8, V_12, D_20, B_16, U4_32, 0 },
+ [INSTR_VRS_VRRD] = { V_8, R_12, D_20, B_16, 0, 0 },
+ [INSTR_VRS_VRRDU] = { V_8, R_12, D_20, B_16, U4_32, 0 },
+ [INSTR_VRS_VVRD] = { V_8, V_12, D_20, B_16, 0, 0 },
+ [INSTR_VRS_VVRDU] = { V_8, V_12, D_20, B_16, U4_32, 0 },
+ [INSTR_VRV_VVXRDU] = { V_8, D_20, VX_12, B_16, U4_32, 0 },
+ [INSTR_VRX_VRRD] = { V_8, D_20, X_12, B_16, 0, 0 },
+ [INSTR_VRX_VRRDU] = { V_8, D_20, X_12, B_16, U4_32, 0 },
+ [INSTR_VRX_VV] = { V_8, V_12, 0, 0, 0, 0 },
+ [INSTR_VSI_URDV] = { V_32, D_20, B_16, U8_8, 0, 0 },
};
-static struct s390_insn opcode_ed[] = {
- { "mayl", 0x38, INSTR_RXF_FRRDF },
- { "myl", 0x39, INSTR_RXF_FRRDF },
- { "may", 0x3a, INSTR_RXF_FRRDF },
- { "my", 0x3b, INSTR_RXF_FRRDF },
- { "mayh", 0x3c, INSTR_RXF_FRRDF },
- { "myh", 0x3d, INSTR_RXF_FRRDF },
- { "sldt", 0x40, INSTR_RXF_FRRDF },
- { "srdt", 0x41, INSTR_RXF_FRRDF },
- { "slxt", 0x48, INSTR_RXF_FRRDF },
- { "srxt", 0x49, INSTR_RXF_FRRDF },
- { "tdcet", 0x50, INSTR_RXE_FRRD },
- { "tdget", 0x51, INSTR_RXE_FRRD },
- { "tdcdt", 0x54, INSTR_RXE_FRRD },
- { "tdgdt", 0x55, INSTR_RXE_FRRD },
- { "tdcxt", 0x58, INSTR_RXE_FRRD },
- { "tdgxt", 0x59, INSTR_RXE_FRRD },
- { "ley", 0x64, INSTR_RXY_FRRD },
- { "ldy", 0x65, INSTR_RXY_FRRD },
- { "stey", 0x66, INSTR_RXY_FRRD },
- { "stdy", 0x67, INSTR_RXY_FRRD },
- { "czdt", 0xa8, INSTR_RSL_LRDFU },
- { "czxt", 0xa9, INSTR_RSL_LRDFU },
- { "cdzt", 0xaa, INSTR_RSL_LRDFU },
- { "cxzt", 0xab, INSTR_RSL_LRDFU },
- { "ldeb", 0x04, INSTR_RXE_FRRD },
- { "lxdb", 0x05, INSTR_RXE_FRRD },
- { "lxeb", 0x06, INSTR_RXE_FRRD },
- { "mxdb", 0x07, INSTR_RXE_FRRD },
- { "keb", 0x08, INSTR_RXE_FRRD },
- { "ceb", 0x09, INSTR_RXE_FRRD },
- { "aeb", 0x0a, INSTR_RXE_FRRD },
- { "seb", 0x0b, INSTR_RXE_FRRD },
- { "mdeb", 0x0c, INSTR_RXE_FRRD },
- { "deb", 0x0d, INSTR_RXE_FRRD },
- { "maeb", 0x0e, INSTR_RXF_FRRDF },
- { "mseb", 0x0f, INSTR_RXF_FRRDF },
- { "tceb", 0x10, INSTR_RXE_FRRD },
- { "tcdb", 0x11, INSTR_RXE_FRRD },
- { "tcxb", 0x12, INSTR_RXE_FRRD },
- { "sqeb", 0x14, INSTR_RXE_FRRD },
- { "sqdb", 0x15, INSTR_RXE_FRRD },
- { "meeb", 0x17, INSTR_RXE_FRRD },
- { "kdb", 0x18, INSTR_RXE_FRRD },
- { "cdb", 0x19, INSTR_RXE_FRRD },
- { "adb", 0x1a, INSTR_RXE_FRRD },
- { "sdb", 0x1b, INSTR_RXE_FRRD },
- { "mdb", 0x1c, INSTR_RXE_FRRD },
- { "ddb", 0x1d, INSTR_RXE_FRRD },
- { "madb", 0x1e, INSTR_RXF_FRRDF },
- { "msdb", 0x1f, INSTR_RXF_FRRDF },
- { "lde", 0x24, INSTR_RXE_FRRD },
- { "lxd", 0x25, INSTR_RXE_FRRD },
- { "lxe", 0x26, INSTR_RXE_FRRD },
- { "mae", 0x2e, INSTR_RXF_FRRDF },
- { "mse", 0x2f, INSTR_RXF_FRRDF },
- { "sqe", 0x34, INSTR_RXE_FRRD },
- { "sqd", 0x35, INSTR_RXE_FRRD },
- { "mee", 0x37, INSTR_RXE_FRRD },
- { "mad", 0x3e, INSTR_RXF_FRRDF },
- { "msd", 0x3f, INSTR_RXF_FRRDF },
- { "", 0, INSTR_INVALID }
-};
+static char long_insn_name[][7] = LONG_INSN_INITIALIZER;
+static struct s390_insn opcode[] = OPCODE_TABLE_INITIALIZER;
+static struct s390_opcode_offset opcode_offset[] = OPCODE_OFFSET_INITIALIZER;
/* Extracts an operand value from an instruction. */
static unsigned int extract_operand(unsigned char *code,
@@ -1777,114 +391,27 @@ static unsigned int extract_operand(unsigned char *code,
struct s390_insn *find_insn(unsigned char *code)
{
- unsigned char opfrag = code[1];
- unsigned char opmask;
- struct s390_insn *table;
+ struct s390_opcode_offset *entry;
+ struct s390_insn *insn;
+ unsigned char opfrag;
+ int i;
- switch (code[0]) {
- case 0x01:
- table = opcode_01;
- break;
- case 0xa5:
- table = opcode_a5;
- break;
- case 0xa7:
- table = opcode_a7;
- break;
- case 0xaa:
- table = opcode_aa;
- break;
- case 0xb2:
- table = opcode_b2;
- break;
- case 0xb3:
- table = opcode_b3;
- break;
- case 0xb9:
- table = opcode_b9;
- break;
- case 0xc0:
- table = opcode_c0;
- break;
- case 0xc2:
- table = opcode_c2;
- break;
- case 0xc4:
- table = opcode_c4;
- break;
- case 0xc6:
- table = opcode_c6;
- break;
- case 0xc8:
- table = opcode_c8;
- break;
- case 0xcc:
- table = opcode_cc;
- break;
- case 0xe3:
- table = opcode_e3;
- opfrag = code[5];
- break;
- case 0xe5:
- table = opcode_e5;
- break;
- case 0xe7:
- table = opcode_e7;
- opfrag = code[5];
- break;
- case 0xeb:
- table = opcode_eb;
- opfrag = code[5];
- break;
- case 0xec:
- table = opcode_ec;
- opfrag = code[5];
- break;
- case 0xed:
- table = opcode_ed;
- opfrag = code[5];
- break;
- default:
- table = opcode;
- opfrag = code[0];
- break;
- }
- while (table->format != INSTR_INVALID) {
- opmask = formats[table->format][0];
- if (table->opfrag == (opfrag & opmask))
- return table;
- table++;
+ for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
+ entry = &opcode_offset[i];
+ if (entry->opcode == code[0] || entry->opcode == 0)
+ break;
}
- return NULL;
-}
-/**
- * insn_to_mnemonic - decode an s390 instruction
- * @instruction: instruction to decode
- * @buf: buffer to fill with mnemonic
- * @len: length of buffer
- *
- * Decode the instruction at @instruction and store the corresponding
- * mnemonic into @buf of length @len.
- * @buf is left unchanged if the instruction could not be decoded.
- * Returns:
- * %0 on success, %-ENOENT if the instruction was not found.
- */
-int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
-{
- struct s390_insn *insn;
+ opfrag = *(code + entry->byte) & entry->mask;
- insn = find_insn(instruction);
- if (!insn)
- return -ENOENT;
- if (insn->name[0] == '\0')
- snprintf(buf, len, "%s",
- long_insn_name[(int) insn->name[1]]);
- else
- snprintf(buf, len, "%.5s", insn->name);
- return 0;
+ insn = &opcode[entry->offset];
+ for (i = 0; i < entry->count; i++) {
+ if (insn->opfrag == opfrag)
+ return insn;
+ insn++;
+ }
+ return NULL;
}
-EXPORT_SYMBOL_GPL(insn_to_mnemonic);
static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
{
@@ -1899,14 +426,14 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr = buffer;
insn = find_insn(code);
if (insn) {
- if (insn->name[0] == '\0')
- ptr += sprintf(ptr, "%s\t",
- long_insn_name[(int) insn->name[1]]);
+ if (insn->zero == 0)
+ ptr += sprintf(ptr, "%.7s\t",
+ long_insn_name[insn->offset]);
else
ptr += sprintf(ptr, "%.5s\t", insn->name);
/* Extract the operands. */
separator = 0;
- for (ops = formats[insn->format] + 1, i = 0;
+ for (ops = formats[insn->format], i = 0;
*ops != 0 && i < 6; ops++, i++) {
operand = operands + *ops;
value = extract_operand(code, operand);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index b945448b9eae..497a92047591 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -31,14 +31,6 @@
#include <asm/facility.h>
#include "entry.h"
-/*
- * Create a Kernel NSS if the SAVESYS= parameter is defined
- */
-#define DEFSYS_CMD_SIZE 128
-#define SAVESYS_CMD_SIZE 32
-
-char kernel_nss_name[NSS_NAME_SIZE + 1];
-
static void __init setup_boot_command_line(void);
/*
@@ -59,134 +51,6 @@ static void __init reset_tod_clock(void)
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
-#ifdef CONFIG_SHARED_KERNEL
-int __init savesys_ipl_nss(char *cmd, const int cmdlen);
-
-asm(
- " .section .init.text,\"ax\",@progbits\n"
- " .align 4\n"
- " .type savesys_ipl_nss, @function\n"
- "savesys_ipl_nss:\n"
- " stmg 6,15,48(15)\n"
- " lgr 14,3\n"
- " sam31\n"
- " diag 2,14,0x8\n"
- " sam64\n"
- " lgr 2,14\n"
- " lmg 6,15,48(15)\n"
- " br 14\n"
- " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
- " .previous\n");
-
-static __initdata char upper_command_line[COMMAND_LINE_SIZE];
-
-static noinline __init void create_kernel_nss(void)
-{
- unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
-#ifdef CONFIG_BLK_DEV_INITRD
- unsigned int sinitrd_pfn, einitrd_pfn;
-#endif
- int response;
- int hlen;
- size_t len;
- char *savesys_ptr;
- char defsys_cmd[DEFSYS_CMD_SIZE];
- char savesys_cmd[SAVESYS_CMD_SIZE];
-
- /* Do nothing if we are not running under VM */
- if (!MACHINE_IS_VM)
- return;
-
- /* Convert COMMAND_LINE to upper case */
- for (i = 0; i < strlen(boot_command_line); i++)
- upper_command_line[i] = toupper(boot_command_line[i]);
-
- savesys_ptr = strstr(upper_command_line, "SAVESYS=");
-
- if (!savesys_ptr)
- return;
-
- savesys_ptr += 8; /* Point to the beginning of the NSS name */
- for (i = 0; i < NSS_NAME_SIZE; i++) {
- if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
- break;
- kernel_nss_name[i] = savesys_ptr[i];
- }
-
- stext_pfn = PFN_DOWN(__pa(&_stext));
- eshared_pfn = PFN_DOWN(__pa(&_eshared));
- end_pfn = PFN_UP(__pa(&_end));
- min_size = end_pfn << 2;
-
- hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
- "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
- kernel_nss_name, stext_pfn - 1, stext_pfn,
- eshared_pfn - 1, eshared_pfn, end_pfn);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (INITRD_START && INITRD_SIZE) {
- sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
- einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
- min_size = einitrd_pfn << 2;
- hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
- " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
- }
-#endif
-
- snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
- " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
- defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
- snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
- kernel_nss_name, kernel_nss_name);
- savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
-
- __cpcmd(defsys_cmd, NULL, 0, &response);
-
- if (response != 0) {
- pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
- response);
- kernel_nss_name[0] = '\0';
- return;
- }
-
- len = strlen(savesys_cmd);
- ASCEBC(savesys_cmd, len);
- response = savesys_ipl_nss(savesys_cmd, len);
-
- /* On success: response is equal to the command size,
- * max SAVESYS_CMD_SIZE
- * On error: response contains the numeric portion of cp error message.
- * for SAVESYS it will be >= 263
- * for missing privilege class, it will be 1
- */
- if (response > SAVESYS_CMD_SIZE || response == 1) {
- pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
- response);
- kernel_nss_name[0] = '\0';
- return;
- }
-
- /* re-initialize cputime accounting. */
- get_tod_clock_ext(tod_clock_base);
- S390_lowcore.last_update_clock = *(__u64 *) &tod_clock_base[1];
- S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
- S390_lowcore.user_timer = 0;
- S390_lowcore.system_timer = 0;
- asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
-
- /* re-setup boot command line with new ipl vm parms */
- ipl_update_parameters();
- setup_boot_command_line();
-
- ipl_flags = IPL_NSS_VALID;
-}
-
-#else /* CONFIG_SHARED_KERNEL */
-
-static inline void create_kernel_nss(void) { }
-
-#endif /* CONFIG_SHARED_KERNEL */
-
/*
* Clear bss memory
*/
@@ -375,8 +239,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(40))
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
- if (test_facility(50) && test_facility(73))
+ if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
+ __ctl_set_bit(0, 55);
+ }
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) {
@@ -549,10 +415,6 @@ static void __init setup_boot_command_line(void)
append_to_cmdline(append_ipl_scpdata);
}
-/*
- * Save ipl parameters, clear bss memory, initialize storage keys
- * and create a kernel NSS at startup if the SAVESYS= parm is defined
- */
void __init startup_init(void)
{
reset_tod_clock();
@@ -569,7 +431,6 @@ void __init startup_init(void)
setup_arch_string();
ipl_update_parameters();
setup_boot_command_line();
- create_kernel_nss();
detect_diag9c();
detect_diag44();
detect_machine_facilities();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 7c6904d616d8..f498d201f98d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/processor.h>
#include <asm/cache.h>
+#include <asm/ctl_reg.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -952,15 +953,56 @@ load_fpu_regs:
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
- la %r1,4095 # revalidate r1
- spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+ la %r1,4095 # validate r1
+ spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
+ sckc __LC_CLOCK_COMPARATOR # validate comparator
+ lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
- lghi %r14,__LC_CPU_TIMER_SAVE_AREA
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
+ jno .Lmcck_panic # control registers invalid -> panic
+ la %r14,4095
+ lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
+ ptlb
+ lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
+ nill %r11,0xfc00 # MCESA_ORIGIN_MASK
+ TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
+ jno 0f
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
+ jno 0f
+ .insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
+0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
+ jo 0f
+ sr %r14,%r14
+0: sfpc %r14
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+ jo 0f
+ lghi %r14,__LC_FPREGS_SAVE_AREA
+ ld %f0,0(%r14)
+ ld %f1,8(%r14)
+ ld %f2,16(%r14)
+ ld %f3,24(%r14)
+ ld %f4,32(%r14)
+ ld %f5,40(%r14)
+ ld %f6,48(%r14)
+ ld %f7,56(%r14)
+ ld %f8,64(%r14)
+ ld %f9,72(%r14)
+ ld %f10,80(%r14)
+ ld %f11,88(%r14)
+ ld %f12,96(%r14)
+ ld %f13,104(%r14)
+ ld %f14,112(%r14)
+ ld %f15,120(%r14)
+ j 1f
+0: VLM %v0,%v15,0,%r11
+ VLM %v16,%v31,256,%r11
+1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
jo 3f
@@ -976,9 +1018,13 @@ ENTRY(mcck_int_handler)
la %r14,__LC_LAST_UPDATE_TIMER
2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
- jno .Lmcck_panic # no -> skip cleanup critical
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
+3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
+ jno .Lmcck_panic
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz 4f
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
+ jno .Lmcck_panic
+4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 905bde782490..e87758f8fbdc 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -78,6 +78,7 @@ long sys_s390_runtime_instr(int command, int signum);
long sys_s390_guarded_storage(int command, struct gs_cb __user *);
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
+long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags);
DECLARE_PER_CPU(u64, mt_cycles[8]);
diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
index bff39b66c9ff..d14dd1c2e524 100644
--- a/arch/s390/kernel/guarded_storage.c
+++ b/arch/s390/kernel/guarded_storage.c
@@ -12,11 +12,10 @@
#include <asm/guarded_storage.h>
#include "entry.h"
-void exit_thread_gs(void)
+void guarded_storage_release(struct task_struct *tsk)
{
- kfree(current->thread.gs_cb);
- kfree(current->thread.gs_bc_cb);
- current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
+ kfree(tsk->thread.gs_cb);
+ kfree(tsk->thread.gs_bc_cb);
}
static int gs_enable(void)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 8e622bb52f7a..310e59e6eb4b 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -279,8 +279,6 @@ static __init enum ipl_type get_ipl_type(void)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
- if (ipl_flags & IPL_NSS_VALID)
- return IPL_TYPE_NSS;
if (!(ipl_flags & IPL_DEVNO_VALID))
return IPL_TYPE_UNKNOWN;
if (!(ipl_flags & IPL_PARMBLOCK_VALID))
@@ -533,22 +531,6 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
.attrs = ipl_ccw_attrs_lpar
};
-/* NSS ipl device attributes */
-
-DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
-
-static struct attribute *ipl_nss_attrs[] = {
- &sys_ipl_type_attr.attr,
- &sys_ipl_nss_name_attr.attr,
- &sys_ipl_ccw_loadparm_attr.attr,
- &sys_ipl_vm_parm_attr.attr,
- NULL,
-};
-
-static struct attribute_group ipl_nss_attr_group = {
- .attrs = ipl_nss_attrs,
-};
-
/* UNKNOWN ipl device attributes */
static struct attribute *ipl_unknown_attrs[] = {
@@ -598,9 +580,6 @@ static int __init ipl_init(void)
case IPL_TYPE_FCP_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
- case IPL_TYPE_NSS:
- rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
- break;
default:
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_unknown_attr_group);
@@ -1172,18 +1151,6 @@ static int __init reipl_nss_init(void)
return rc;
reipl_block_ccw_init(reipl_block_nss);
- if (ipl_info.type == IPL_TYPE_NSS) {
- memset(reipl_block_nss->ipl_info.ccw.nss_name,
- ' ', NSS_NAME_SIZE);
- memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
- kernel_nss_name, strlen(kernel_nss_name));
- ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
- reipl_block_nss->ipl_info.ccw.vm_flags |=
- DIAG308_VM_FLAGS_NSS_VALID;
-
- reipl_block_ccw_fill_parms(reipl_block_nss);
- }
-
reipl_capabilities |= IPL_TYPE_NSS;
return 0;
}
@@ -1971,9 +1938,6 @@ void __init setup_ipl(void)
ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
break;
case IPL_TYPE_NSS:
- strncpy(ipl_info.data.nss.name, kernel_nss_name,
- sizeof(ipl_info.data.nss.name));
- break;
case IPL_TYPE_UNKNOWN:
/* We have no info to copy */
break;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 6842e4501e2e..1a6521af1751 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -161,8 +161,6 @@ struct swap_insn_args {
static int swap_instruction(void *data)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct swap_insn_args *args = data;
struct ftrace_insn new_insn, *insn;
struct kprobe *p = args->p;
@@ -185,9 +183,7 @@ static int swap_instruction(void *data)
ftrace_generate_nop_insn(&new_insn);
}
skip_ftrace:
- kcb->kprobe_status = KPROBE_SWAP_INST;
s390_kernel_write(p->addr, &new_insn, len);
- kcb->kprobe_status = status;
return 0;
}
NOKPROBE_SYMBOL(swap_instruction);
@@ -574,9 +570,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
const struct exception_table_entry *entry;
switch(kcb->kprobe_status) {
- case KPROBE_SWAP_INST:
- /* We are here because the instruction replacement failed */
- return 0;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b0ba2c26b45e..a80050bbe2e4 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -106,7 +106,7 @@ static void __do_machine_kdump(void *image)
static noinline void __machine_kdump(void *image)
{
struct mcesa *mcesa;
- unsigned long cr2_old, cr2_new;
+ union ctlreg2 cr2_old, cr2_new;
int this_cpu, cpu;
lgr_info_log();
@@ -123,11 +123,12 @@ static noinline void __machine_kdump(void *image)
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
- __ctl_store(cr2_old, 2, 2);
- cr2_new = cr2_old | (1UL << 4);
- __ctl_load(cr2_new, 2, 2);
+ __ctl_store(cr2_old.val, 2, 2);
+ cr2_new = cr2_old;
+ cr2_new.gse = 1;
+ __ctl_load(cr2_new.val, 2, 2);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
- __ctl_load(cr2_old, 2, 2);
+ __ctl_load(cr2_old.val, 2, 2);
}
/*
* To create a good backchain for this CPU in the dump store_status
@@ -145,7 +146,7 @@ static noinline void __machine_kdump(void *image)
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
*/
-static int kdump_csum_valid(struct kimage *image)
+static bool kdump_csum_valid(struct kimage *image)
{
#ifdef CONFIG_CRASH_DUMP
int (*start_kdump)(int) = (void *)image->start;
@@ -154,9 +155,9 @@ static int kdump_csum_valid(struct kimage *image)
__arch_local_irq_stnsm(0xfb); /* disable DAT */
rc = start_kdump(0);
__arch_local_irq_stosm(0x04); /* enable DAT */
- return rc ? 0 : -EINVAL;
+ return rc == 0;
#else
- return -EINVAL;
+ return false;
#endif
}
@@ -219,10 +220,6 @@ int machine_kexec_prepare(struct kimage *image)
{
void *reboot_code_buffer;
- /* Can't replace kernel image since it is read-only. */
- if (ipl_flags & IPL_NSS_VALID)
- return -EOPNOTSUPP;
-
if (image->type == KEXEC_TYPE_CRASH)
return machine_kexec_prepare_kdump();
@@ -269,6 +266,7 @@ static void __do_machine_kexec(void *data)
s390_reset_system();
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
+ __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
/* Call the moving routine */
(*data_mover)(&image->head, image->start);
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 1a27f307a920..6d9f73bb4142 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <asm/alternative.h>
#if 0
#define DEBUGP printk
@@ -429,6 +430,22 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ const Elf_Shdr *s;
+ char *secstrings;
+
+ if (IS_ENABLED(CONFIG_ALTERNATIVES)) {
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (!strcmp(".altinstructions",
+ secstrings + s->sh_name)) {
+ /* patch .altinstructions */
+ void *aseg = (void *)s->sh_addr;
+
+ apply_alternatives(aseg, aseg + s->sh_size);
+ }
+ }
+ }
+
jump_label_apply_nops(me);
return 0;
}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 31d03a84126c..3f3cda41f32a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -12,6 +12,9 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
+#include <linux/log2.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
@@ -37,13 +40,94 @@ struct mcck_struct {
};
static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
+static struct kmem_cache *mcesa_cache;
+static unsigned long mcesa_origin_lc;
-static void s390_handle_damage(void)
+static inline int nmi_needs_mcesa(void)
{
- smp_send_stop();
+ return MACHINE_HAS_VX || MACHINE_HAS_GS;
+}
+
+static inline unsigned long nmi_get_mcesa_size(void)
+{
+ if (MACHINE_HAS_GS)
+ return MCESA_MAX_SIZE;
+ return MCESA_MIN_SIZE;
+}
+
+/*
+ * The initial machine check extended save area for the boot CPU.
+ * It will be replaced by nmi_init() with an allocated structure.
+ * The structure is required for machine check happening early in
+ * the boot process.
+ */
+static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
+
+void __init nmi_alloc_boot_cpu(struct lowcore *lc)
+{
+ if (!nmi_needs_mcesa())
+ return;
+ lc->mcesad = (unsigned long) &boot_mcesa;
+ if (MACHINE_HAS_GS)
+ lc->mcesad |= ilog2(MCESA_MAX_SIZE);
+}
+
+static int __init nmi_init(void)
+{
+ unsigned long origin, cr0, size;
+
+ if (!nmi_needs_mcesa())
+ return 0;
+ size = nmi_get_mcesa_size();
+ if (size > MCESA_MIN_SIZE)
+ mcesa_origin_lc = ilog2(size);
+ /* create slab cache for the machine-check-extended-save-areas */
+ mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
+ if (!mcesa_cache)
+ panic("Couldn't create nmi save area cache");
+ origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
+ if (!origin)
+ panic("Couldn't allocate nmi save area");
+ /* The pointer is stored with mcesa_bits ORed in */
+ kmemleak_not_leak((void *) origin);
+ __ctl_store(cr0, 0, 0);
+ __ctl_clear_bit(0, 28); /* disable lowcore protection */
+ /* Replace boot_mcesa on the boot CPU */
+ S390_lowcore.mcesad = origin | mcesa_origin_lc;
+ __ctl_load(cr0, 0, 0);
+ return 0;
+}
+early_initcall(nmi_init);
+
+int nmi_alloc_per_cpu(struct lowcore *lc)
+{
+ unsigned long origin;
+
+ if (!nmi_needs_mcesa())
+ return 0;
+ origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
+ if (!origin)
+ return -ENOMEM;
+ /* The pointer is stored with mcesa_bits ORed in */
+ kmemleak_not_leak((void *) origin);
+ lc->mcesad = origin | mcesa_origin_lc;
+ return 0;
+}
+
+void nmi_free_per_cpu(struct lowcore *lc)
+{
+ if (!nmi_needs_mcesa())
+ return;
+ kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
+}
+
+static notrace void s390_handle_damage(void)
+{
+ smp_emergency_stop();
disabled_wait((unsigned long) __builtin_return_address(0));
while (1);
}
+NOKPROBE_SYMBOL(s390_handle_damage);
/*
* Main machine check handler function. Will be called with interrupts enabled
@@ -100,18 +184,16 @@ void s390_handle_mcck(void)
EXPORT_SYMBOL_GPL(s390_handle_mcck);
/*
- * returns 0 if all registers could be validated
+ * returns 0 if all required registers are available
* returns 1 otherwise
*/
-static int notrace s390_validate_registers(union mci mci, int umode)
+static int notrace s390_check_registers(union mci mci, int umode)
{
+ union ctlreg2 cr2;
int kill_task;
- u64 zero;
void *fpt_save_area;
- struct mcesa *mcesa;
kill_task = 0;
- zero = 0;
if (!mci.gr) {
/*
@@ -122,18 +204,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
s390_handle_damage();
kill_task = 1;
}
- /* Validate control registers */
+ /* Check control registers */
if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
s390_handle_damage();
- } else {
- asm volatile(
- " lctlg 0,15,0(%0)\n"
- " ptlb\n"
- : : "a" (&S390_lowcore.cregs_save_area) : "memory");
}
if (!mci.fp) {
/*
@@ -141,7 +218,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
* kernel currently uses floating point registers the
* system is stopped. If the process has its floating
* pointer registers loaded it is terminated.
- * Otherwise just revalidate the registers.
*/
if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
s390_handle_damage();
@@ -155,72 +231,29 @@ static int notrace s390_validate_registers(union mci mci, int umode)
* If the kernel currently uses the floating pointer
* registers and needs the FPC register the system is
* stopped. If the process has its floating pointer
- * registers loaded it is terminated. Otherwiese the
- * FPC is just revalidated.
+ * registers loaded it is terminated.
*/
if (S390_lowcore.fpu_flags & KERNEL_FPC)
s390_handle_damage();
- asm volatile("lfpc %0" : : "Q" (zero));
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
- } else {
- asm volatile("lfpc %0"
- : : "Q" (S390_lowcore.fpt_creg_save_area));
}
- mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
- if (!MACHINE_HAS_VX) {
- /* Validate floating point registers */
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 1,8(%0)\n"
- " ld 2,16(%0)\n"
- " ld 3,24(%0)\n"
- " ld 4,32(%0)\n"
- " ld 5,40(%0)\n"
- " ld 6,48(%0)\n"
- " ld 7,56(%0)\n"
- " ld 8,64(%0)\n"
- " ld 9,72(%0)\n"
- " ld 10,80(%0)\n"
- " ld 11,88(%0)\n"
- " ld 12,96(%0)\n"
- " ld 13,104(%0)\n"
- " ld 14,112(%0)\n"
- " ld 15,120(%0)\n"
- : : "a" (fpt_save_area) : "memory");
- } else {
- /* Validate vector registers */
- union ctlreg0 cr0;
-
+ if (MACHINE_HAS_VX) {
if (!mci.vr) {
/*
* Vector registers can't be restored. If the kernel
* currently uses vector registers the system is
* stopped. If the process has its vector registers
- * loaded it is terminated. Otherwise just revalidate
- * the registers.
+ * loaded it is terminated.
*/
if (S390_lowcore.fpu_flags & KERNEL_VXR)
s390_handle_damage();
if (!test_cpu_flag(CIF_FPU))
kill_task = 1;
}
- cr0.val = S390_lowcore.cregs_save_area[0];
- cr0.afp = cr0.vx = 1;
- __ctl_load(cr0.val, 0, 0);
- asm volatile(
- " la 1,%0\n"
- " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
- " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
- : : "Q" (*(struct vx_array *) mcesa->vector_save_area)
- : "1");
- __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
- /* Validate access registers */
- asm volatile(
- " lam 0,15,0(%0)"
- : : "a" (&S390_lowcore.access_regs_save_area));
+ /* Check if access registers are valid */
if (!mci.ar) {
/*
* Access registers have unknown contents.
@@ -228,53 +261,41 @@ static int notrace s390_validate_registers(union mci mci, int umode)
*/
kill_task = 1;
}
- /* Validate guarded storage registers */
- if (MACHINE_HAS_GS && (S390_lowcore.cregs_save_area[2] & (1UL << 4))) {
- if (!mci.gs)
+ /* Check guarded storage registers */
+ cr2.val = S390_lowcore.cregs_save_area[2];
+ if (cr2.gse) {
+ if (!mci.gs) {
/*
* Guarded storage register can't be restored and
* the current processes uses guarded storage.
* It has to be terminated.
*/
kill_task = 1;
- else
- load_gs_cb((struct gs_cb *)
- mcesa->guarded_storage_save_area);
+ }
}
- /*
- * We don't even try to validate the TOD register, since we simply
- * can't write something sensible into that register.
- */
- /*
- * See if we can validate the TOD programmable register with its
- * old contents (should be zero) otherwise set it to zero.
- */
- if (!mci.pr)
- asm volatile(
- " sr 0,0\n"
- " sckpf"
- : : : "0", "cc");
- else
- asm volatile(
- " l 0,%0\n"
- " sckpf"
- : : "Q" (S390_lowcore.tod_progreg_save_area)
- : "0", "cc");
- /* Validate clock comparator register */
- set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
- if (!mci.wp)
+ if (!mci.wp) {
/*
* Can't tell if we come from user or kernel mode
* -> stopping machine.
*/
s390_handle_damage();
+ }
+ /* Check for invalid kernel instruction address */
+ if (!mci.ia && !umode) {
+ /*
+ * The instruction address got lost while running
+ * in the kernel -> stopping machine.
+ */
+ s390_handle_damage();
+ }
if (!mci.ms || !mci.pm || !mci.ia)
kill_task = 1;
return kill_task;
}
+NOKPROBE_SYMBOL(s390_check_registers);
/*
* Backup the guest's machine check info to its description block
@@ -300,6 +321,7 @@ static void notrace s390_backup_mcck_info(struct pt_regs *regs)
mcck_backup->failing_storage_address
= S390_lowcore.failing_storage_address;
}
+NOKPROBE_SYMBOL(s390_backup_mcck_info);
#define MAX_IPD_COUNT 29
#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
@@ -372,7 +394,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
s390_handle_damage();
}
}
- if (s390_validate_registers(mci, user_mode(regs))) {
+ if (s390_check_registers(mci, user_mode(regs))) {
/*
* Couldn't restore all register contents for the
* user space process -> mark task for termination.
@@ -443,6 +465,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
clear_cpu_flag(CIF_MCCK_GUEST);
nmi_exit();
}
+NOKPROBE_SYMBOL(s390_do_machine_check);
static int __init machine_check_init(void)
{
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 08bfa17ba0a0..94f90cefbffc 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -10,34 +10,42 @@
/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
-CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000);
-CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001);
-CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002);
-CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
-CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
-CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004);
-CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005);
-CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040);
-CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041);
-CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042);
-CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043);
-CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044);
-CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045);
-CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046);
-CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047);
-CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048);
-CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049);
-CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a);
-CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b);
-CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c);
-CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d);
-CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e);
-CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f);
+CPUMF_EVENT_ATTR(cf_fvn1, CPU_CYCLES, 0x0000);
+CPUMF_EVENT_ATTR(cf_fvn1, INSTRUCTIONS, 0x0001);
+CPUMF_EVENT_ATTR(cf_fvn1, L1I_DIR_WRITES, 0x0002);
+CPUMF_EVENT_ATTR(cf_fvn1, L1I_PENALTY_CYCLES, 0x0003);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES, 0x0020);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
+CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
+CPUMF_EVENT_ATTR(cf_fvn1, L1D_DIR_WRITES, 0x0004);
+CPUMF_EVENT_ATTR(cf_fvn1, L1D_PENALTY_CYCLES, 0x0005);
+CPUMF_EVENT_ATTR(cf_fvn3, CPU_CYCLES, 0x0000);
+CPUMF_EVENT_ATTR(cf_fvn3, INSTRUCTIONS, 0x0001);
+CPUMF_EVENT_ATTR(cf_fvn3, L1I_DIR_WRITES, 0x0002);
+CPUMF_EVENT_ATTR(cf_fvn3, L1I_PENALTY_CYCLES, 0x0003);
+CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES, 0x0020);
+CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
+CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
+CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_FUNCTIONS, 0x0040);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_CYCLES, 0x0041);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS, 0x0042);
+CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_CYCLES, 0x0043);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_FUNCTIONS, 0x0044);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_CYCLES, 0x0045);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS, 0x0046);
+CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_CYCLES, 0x0047);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_FUNCTIONS, 0x0048);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_CYCLES, 0x0049);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS, 0x004a);
+CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_CYCLES, 0x004b);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_FUNCTIONS, 0x004c);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_CYCLES, 0x004d);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS, 0x004e);
+CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_CYCLES, 0x004f);
CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
@@ -171,36 +179,105 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
+CPUMF_EVENT_ATTR(cf_z14, DTLB2_GPAGE_WRITES, 0x0084);
+CPUMF_EVENT_ATTR(cf_z14, L1D_L2D_SOURCED_WRITES, 0x0085);
+CPUMF_EVENT_ATTR(cf_z14, ITLB2_WRITES, 0x0086);
+CPUMF_EVENT_ATTR(cf_z14, ITLB2_MISSES, 0x0087);
+CPUMF_EVENT_ATTR(cf_z14, L1I_L2I_SOURCED_WRITES, 0x0088);
+CPUMF_EVENT_ATTR(cf_z14, TLB2_PTE_WRITES, 0x0089);
+CPUMF_EVENT_ATTR(cf_z14, TLB2_CRSTE_WRITES, 0x008a);
+CPUMF_EVENT_ATTR(cf_z14, TLB2_ENGINES_BUSY, 0x008b);
+CPUMF_EVENT_ATTR(cf_z14, TX_C_TEND, 0x008c);
+CPUMF_EVENT_ATTR(cf_z14, TX_NC_TEND, 0x008d);
+CPUMF_EVENT_ATTR(cf_z14, L1C_TLB2_MISSES, 0x008f);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES, 0x0091);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0092);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES, 0x0093);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x0094);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x0095);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES, 0x0096);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x0097);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x0098);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES, 0x0099);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x009a);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x009b);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x009c);
+CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES, 0x009d);
+CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO, 0x009e);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES, 0x00a3);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a4);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES, 0x00a5);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x00a6);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x00a7);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES, 0x00a8);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x00a9);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x00aa);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES, 0x00ab);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x00ac);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x00ad);
+CPUMF_EVENT_ATTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00ae);
+CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af);
+CPUMF_EVENT_ATTR(cf_z14, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
+CPUMF_EVENT_ATTR(cf_z14, VX_BCD_EXECUTION_SLOTS, 0x00e1);
+CPUMF_EVENT_ATTR(cf_z14, DECIMAL_INSTRUCTIONS, 0x00e2);
+CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e9);
+CPUMF_EVENT_ATTR(cf_z14, TX_NC_TABORT, 0x00f3);
+CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
+CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
+CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
-static struct attribute *cpumcf_pmu_event_attr[] __initdata = {
- CPUMF_EVENT_PTR(cf, CPU_CYCLES),
- CPUMF_EVENT_PTR(cf, INSTRUCTIONS),
- CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES),
- CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES),
- CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, PRNG_CYCLES),
- CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, SHA_CYCLES),
- CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, DEA_CYCLES),
- CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf, AES_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, AES_CYCLES),
- CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES),
+static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn1, L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn1, L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn1, L1D_PENALTY_CYCLES),
+ NULL,
+};
+
+static struct attribute *cpumcf_fvn3_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_fvn3, CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn3, INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn3, L1I_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn3, L1I_PENALTY_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES),
+ CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_fvn3, L1D_DIR_WRITES),
+ CPUMF_EVENT_PTR(cf_fvn3, L1D_PENALTY_CYCLES),
+ NULL,
+};
+
+static struct attribute *cpumcf_svn_generic_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_CYCLES),
NULL,
};
@@ -353,6 +430,63 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
NULL,
};
+static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, DTLB2_GPAGE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_L2D_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, ITLB2_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, ITLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_L2I_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, TLB2_PTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, TLB2_CRSTE_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, TLB2_ENGINES_BUSY),
+ CPUMF_EVENT_PTR(cf_z14, TX_C_TEND),
+ CPUMF_EVENT_PTR(cf_z14, TX_NC_TEND),
+ CPUMF_EVENT_PTR(cf_z14, L1C_TLB2_MISSES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV),
+ CPUMF_EVENT_PTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES),
+ CPUMF_EVENT_PTR(cf_z14, BCD_DFP_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z14, VX_BCD_EXECUTION_SLOTS),
+ CPUMF_EVENT_PTR(cf_z14, DECIMAL_INSTRUCTIONS),
+ CPUMF_EVENT_PTR(cf_z14, LAST_HOST_TRANSLATIONS),
+ CPUMF_EVENT_PTR(cf_z14, TX_NC_TABORT),
+ CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_NO_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_SPECIAL),
+ CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
+ CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
+ NULL,
+};
+
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
static struct attribute_group cpumcf_pmu_events_group = {
@@ -379,7 +513,8 @@ static const struct attribute_group *cpumcf_pmu_attr_groups[] = {
static __init struct attribute **merge_attr(struct attribute **a,
- struct attribute **b)
+ struct attribute **b,
+ struct attribute **c)
{
struct attribute **new;
int j, i;
@@ -388,6 +523,8 @@ static __init struct attribute **merge_attr(struct attribute **a,
;
for (i = 0; b[i]; i++)
j++;
+ for (i = 0; c[i]; i++)
+ j++;
j++;
new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
@@ -398,6 +535,8 @@ static __init struct attribute **merge_attr(struct attribute **a,
new[j++] = a[i];
for (i = 0; b[i]; i++)
new[j++] = b[i];
+ for (i = 0; c[i]; i++)
+ new[j++] = c[i];
new[j] = NULL;
return new;
@@ -405,10 +544,26 @@ static __init struct attribute **merge_attr(struct attribute **a,
__init const struct attribute_group **cpumf_cf_event_group(void)
{
- struct attribute **combined, **model;
+ struct attribute **combined, **model, **cfvn, **csvn;
struct attribute *none[] = { NULL };
+ struct cpumf_ctr_info ci;
struct cpuid cpu_id;
+ /* Determine generic counters set(s) */
+ qctri(&ci);
+ switch (ci.cfvn) {
+ case 1:
+ cfvn = cpumcf_fvn1_pmu_event_attr;
+ break;
+ case 3:
+ cfvn = cpumcf_fvn3_pmu_event_attr;
+ break;
+ default:
+ cfvn = none;
+ }
+ csvn = cpumcf_svn_generic_pmu_event_attr;
+
+ /* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
case 0x2097:
@@ -427,12 +582,15 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
case 0x2965:
model = cpumcf_z13_pmu_event_attr;
break;
+ case 0x3906:
+ model = cpumcf_z14_pmu_event_attr;
+ break;
default:
model = none;
break;
}
- combined = merge_attr(cpumcf_pmu_event_attr, model);
+ combined = merge_attr(cfvn, csvn, model);
if (combined)
cpumcf_pmu_events_group.attrs = combined;
return cpumcf_pmu_attr_groups;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 7e1e40323b78..bd4bbf61aaf3 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -823,12 +823,8 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
}
/* Check online status of the CPU to which the event is pinned */
- if (event->cpu >= 0) {
- if ((unsigned int)event->cpu >= nr_cpumask_bits)
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
- if (!cpu_online(event->cpu))
- return -ENODEV;
- }
/* Force reset of idle/hv excludes regardless of what the
* user requested.
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index a4a84fb08046..70576a2f69cf 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -44,27 +44,14 @@ asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
extern void kernel_thread_starter(void);
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(struct task_struct *tsk)
-{
- if (tsk == current) {
- exit_thread_runtime_instr();
- exit_thread_gs();
- }
-}
-
void flush_thread(void)
{
}
-void release_thread(struct task_struct *dead_task)
-{
-}
-
void arch_release_task_struct(struct task_struct *tsk)
{
+ runtime_instr_release(tsk);
+ guarded_storage_release(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -100,6 +87,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
+ p->thread.per_flags = 0;
/* Initialize per thread user and system timer values */
p->thread.user_timer = 0;
p->thread.guest_timer = 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 1427d60ce628..26c0523c1488 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -31,6 +31,9 @@
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/switch_to.h>
+#include <asm/runtime_instr.h>
+#include <asm/facility.h>
+
#include "entry.h"
#ifdef CONFIG_COMPAT
@@ -45,42 +48,42 @@ void update_cr_regs(struct task_struct *task)
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
- unsigned long cr0_old, cr0_new;
- unsigned long cr2_old, cr2_new;
+ union ctlreg0 cr0_old, cr0_new;
+ union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
- __ctl_store(cr0_old, 0, 0);
- __ctl_store(cr2_old, 2, 2);
+ __ctl_store(cr0_old.val, 0, 0);
+ __ctl_store(cr2_old.val, 2, 2);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE) {
/* Set or clear transaction execution TXC bit 8. */
- cr0_new |= (1UL << 55);
+ cr0_new.tcx = 1;
if (task->thread.per_flags & PER_FLAG_NO_TE)
- cr0_new &= ~(1UL << 55);
+ cr0_new.tcx = 0;
/* Set or clear transaction execution TDC bits 62 and 63. */
- cr2_new &= ~3UL;
+ cr2_new.tdc = 0;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
- cr2_new |= 1UL;
+ cr2_new.tdc = 1;
else
- cr2_new |= 2UL;
+ cr2_new.tdc = 2;
}
}
/* Take care of enable/disable of guarded storage. */
if (MACHINE_HAS_GS) {
- cr2_new &= ~(1UL << 4);
+ cr2_new.gse = 0;
if (task->thread.gs_cb)
- cr2_new |= (1UL << 4);
+ cr2_new.gse = 1;
}
/* Load control register 0/2 iff changed */
- cr0_changed = cr0_new != cr0_old;
- cr2_changed = cr2_new != cr2_old;
+ cr0_changed = cr0_new.val != cr0_old.val;
+ cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
- __ctl_load(cr0_new, 0, 0);
+ __ctl_load(cr0_new.val, 0, 0);
if (cr2_changed)
- __ctl_load(cr2_new, 2, 2);
+ __ctl_load(cr2_new.val, 2, 2);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
@@ -1172,26 +1175,37 @@ static int s390_gs_cb_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- struct gs_cb *data = target->thread.gs_cb;
+ struct gs_cb gs_cb = { }, *data = NULL;
int rc;
if (!MACHINE_HAS_GS)
return -ENODEV;
- if (!data) {
+ if (!target->thread.gs_cb) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->gsd = 25;
- target->thread.gs_cb = data;
- if (target == current)
- __ctl_set_bit(2, 4);
- } else if (target == current) {
- save_gs_cb(data);
}
+ if (!target->thread.gs_cb)
+ gs_cb.gsd = 25;
+ else if (target == current)
+ save_gs_cb(&gs_cb);
+ else
+ gs_cb = *target->thread.gs_cb;
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- data, 0, sizeof(struct gs_cb));
- if (target == current)
- restore_gs_cb(data);
+ &gs_cb, 0, sizeof(gs_cb));
+ if (rc) {
+ kfree(data);
+ return -EFAULT;
+ }
+ preempt_disable();
+ if (!target->thread.gs_cb)
+ target->thread.gs_cb = data;
+ *target->thread.gs_cb = gs_cb;
+ if (target == current) {
+ __ctl_set_bit(2, 4);
+ restore_gs_cb(target->thread.gs_cb);
+ }
+ preempt_enable();
return rc;
}
@@ -1229,6 +1243,96 @@ static int s390_gs_bc_set(struct task_struct *target,
data, 0, sizeof(struct gs_cb));
}
+static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
+{
+ return (cb->rca & 0x1f) == 0 &&
+ (cb->roa & 0xfff) == 0 &&
+ (cb->rla & 0xfff) == 0xfff &&
+ cb->s == 1 &&
+ cb->k == 1 &&
+ cb->h == 0 &&
+ cb->reserved1 == 0 &&
+ cb->ps == 1 &&
+ cb->qs == 0 &&
+ cb->pc == 1 &&
+ cb->qc == 0 &&
+ cb->reserved2 == 0 &&
+ cb->key == PAGE_DEFAULT_KEY &&
+ cb->reserved3 == 0 &&
+ cb->reserved4 == 0 &&
+ cb->reserved5 == 0 &&
+ cb->reserved6 == 0 &&
+ cb->reserved7 == 0 &&
+ cb->reserved8 == 0 &&
+ cb->rla >= cb->roa &&
+ cb->rca >= cb->roa &&
+ cb->rca <= cb->rla+1 &&
+ cb->m < 3;
+}
+
+static int s390_runtime_instr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct runtime_instr_cb *data = target->thread.ri_cb;
+
+ if (!test_facility(64))
+ return -ENODEV;
+ if (!data)
+ return -ENODATA;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ data, 0, sizeof(struct runtime_instr_cb));
+}
+
+static int s390_runtime_instr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct runtime_instr_cb ri_cb = { }, *data = NULL;
+ int rc;
+
+ if (!test_facility(64))
+ return -ENODEV;
+
+ if (!target->thread.ri_cb) {
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ }
+
+ if (target->thread.ri_cb) {
+ if (target == current)
+ store_runtime_instr_cb(&ri_cb);
+ else
+ ri_cb = *target->thread.ri_cb;
+ }
+
+ rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &ri_cb, 0, sizeof(struct runtime_instr_cb));
+ if (rc) {
+ kfree(data);
+ return -EFAULT;
+ }
+
+ if (!is_ri_cb_valid(&ri_cb)) {
+ kfree(data);
+ return -EINVAL;
+ }
+
+ preempt_disable();
+ if (!target->thread.ri_cb)
+ target->thread.ri_cb = data;
+ *target->thread.ri_cb = ri_cb;
+ if (target == current)
+ load_runtime_instr_cb(target->thread.ri_cb);
+ preempt_enable();
+
+ return 0;
+}
+
static const struct user_regset s390_regsets[] = {
{
.core_note_type = NT_PRSTATUS,
@@ -1302,6 +1406,14 @@ static const struct user_regset s390_regsets[] = {
.get = s390_gs_bc_get,
.set = s390_gs_bc_set,
},
+ {
+ .core_note_type = NT_S390_RI_CB,
+ .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_runtime_instr_get,
+ .set = s390_runtime_instr_set,
+ },
};
static const struct user_regset_view user_s390_view = {
@@ -1538,6 +1650,14 @@ static const struct user_regset s390_compat_regsets[] = {
.get = s390_gs_cb_get,
.set = s390_gs_cb_set,
},
+ {
+ .core_note_type = NT_S390_RI_CB,
+ .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_runtime_instr_get,
+ .set = s390_runtime_instr_set,
+ },
};
static const struct user_regset_view user_s390_compat_view = {
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index ca37e5d5b40c..9c2c96da23d0 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -29,7 +29,6 @@
ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
- stnsm sys_msk-.base(%r13),0xfb # disable DAT
stctg %c0,%c15,ctlregs-.base(%r13)
stmg %r0,%r15,gprregs-.base(%r13)
lghi %r0,3
@@ -103,8 +102,6 @@ ENTRY(relocate_kernel)
.align 8
load_psw:
.long 0x00080000,0x80000000
- sys_msk:
- .quad 0
ctlregs:
.rept 16
.quad 0
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 32aefb215e59..09f5bf0d5c0c 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -21,11 +21,24 @@
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
+void runtime_instr_release(struct task_struct *tsk)
+{
+ kfree(tsk->thread.ri_cb);
+}
+
static void disable_runtime_instr(void)
{
- struct pt_regs *regs = task_pt_regs(current);
+ struct task_struct *task = current;
+ struct pt_regs *regs;
+ if (!task->thread.ri_cb)
+ return;
+ regs = task_pt_regs(task);
+ preempt_disable();
load_runtime_instr_cb(&runtime_instr_empty_cb);
+ kfree(task->thread.ri_cb);
+ task->thread.ri_cb = NULL;
+ preempt_enable();
/*
* Make sure the RI bit is deleted from the PSW. If the user did not
@@ -37,24 +50,13 @@ static void disable_runtime_instr(void)
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{
- cb->buf_limit = 0xfff;
- cb->pstate = 1;
- cb->pstate_set_buf = 1;
- cb->pstate_sample = 1;
- cb->pstate_collect = 1;
+ cb->rla = 0xfff;
+ cb->s = 1;
+ cb->k = 1;
+ cb->ps = 1;
+ cb->pc = 1;
cb->key = PAGE_DEFAULT_KEY;
- cb->valid = 1;
-}
-
-void exit_thread_runtime_instr(void)
-{
- struct task_struct *task = current;
-
- if (!task->thread.ri_cb)
- return;
- disable_runtime_instr();
- kfree(task->thread.ri_cb);
- task->thread.ri_cb = NULL;
+ cb->v = 1;
}
SYSCALL_DEFINE1(s390_runtime_instr, int, command)
@@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) {
- preempt_disable();
- exit_thread_runtime_instr();
- preempt_enable();
+ disable_runtime_instr();
return 0;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 164a1e16b53e..090053cf279b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -55,17 +55,18 @@
#include <asm/mmu_context.h>
#include <asm/cpcmd.h>
#include <asm/lowcore.h>
+#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/ebcdic.h>
-#include <asm/kvm_virtio.h>
#include <asm/diag.h>
#include <asm/os_info.h>
#include <asm/sclp.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
+#include <asm/alternative.h>
#include "entry.h"
/*
@@ -339,16 +340,8 @@ static void __init setup_lowcore(void)
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- unsigned long bits, size;
-
- bits = MACHINE_HAS_GS ? 11 : 10;
- size = 1UL << bits;
- lc->mcesad = (__u64) memblock_virt_alloc(size, size);
- if (MACHINE_HAS_GS)
- lc->mcesad |= bits;
- }
- lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
+ nmi_alloc_boot_cpu(lc);
+ vdso_alloc_boot_cpu(lc);
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@@ -380,6 +373,8 @@ static void __init setup_lowcore(void)
#ifdef CONFIG_SMP
lc->spinlock_lockval = arch_spin_lockval(0);
+ lc->spinlock_index = 0;
+ arch_spin_lock_setup(0);
#endif
set_prefix((u32)(unsigned long) lc);
@@ -764,7 +759,7 @@ static int __init setup_hwcaps(void)
/*
* Transactional execution support HWCAP_S390_TE is bit 10.
*/
- if (test_facility(50) && test_facility(73))
+ if (MACHINE_HAS_TE)
elf_hwcap |= HWCAP_S390_TE;
/*
@@ -955,6 +950,8 @@ void __init setup_arch(char **cmdline_p)
conmode_default();
set_preferred_console();
+ apply_alternative_instructions();
+
/* Setup zfcpdump support */
setup_zfcpdump();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 092c4154abd7..cd4334e80b64 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/sched/task_stack.h>
#include <linux/crash_dump.h>
#include <linux/memblock.h>
+#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
#include <asm/switch_to.h>
@@ -81,8 +82,6 @@ struct pcpu {
static u8 boot_core_type;
static struct pcpu pcpu_devices[NR_CPUS];
-static struct kmem_cache *pcpu_mcesa_cache;
-
unsigned int smp_cpu_mt_shift;
EXPORT_SYMBOL(smp_cpu_mt_shift);
@@ -193,10 +192,8 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
{
unsigned long async_stack, panic_stack;
- unsigned long mcesa_origin, mcesa_bits;
struct lowcore *lc;
- mcesa_origin = mcesa_bits = 0;
if (pcpu != &pcpu_devices[0]) {
pcpu->lowcore = (struct lowcore *)
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
@@ -204,39 +201,30 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
panic_stack = __get_free_page(GFP_KERNEL);
if (!pcpu->lowcore || !panic_stack || !async_stack)
goto out;
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- mcesa_origin = (unsigned long)
- kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
- if (!mcesa_origin)
- goto out;
- /* The pointer is stored with mcesa_bits ORed in */
- kmemleak_not_leak((void *) mcesa_origin);
- mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
- }
} else {
async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
- mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
- mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
}
lc = pcpu->lowcore;
memcpy(lc, &S390_lowcore, 512);
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
- lc->mcesad = mcesa_origin | mcesa_bits;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
- if (vdso_alloc_per_cpu(lc))
+ lc->spinlock_index = 0;
+ if (nmi_alloc_per_cpu(lc))
goto out;
+ if (vdso_alloc_per_cpu(lc))
+ goto out_mcesa;
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
+
+out_mcesa:
+ nmi_free_per_cpu(lc);
out:
if (pcpu != &pcpu_devices[0]) {
- if (mcesa_origin)
- kmem_cache_free(pcpu_mcesa_cache,
- (void *) mcesa_origin);
free_page(panic_stack);
free_pages(async_stack, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
@@ -248,17 +236,12 @@ out:
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
- unsigned long mcesa_origin;
-
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
vdso_free_per_cpu(pcpu->lowcore);
+ nmi_free_per_cpu(pcpu->lowcore);
if (pcpu == &pcpu_devices[0])
return;
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
- kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
- }
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
@@ -274,6 +257,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->spinlock_index = 0;
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
@@ -282,6 +266,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
+ arch_spin_lock_setup(cpu);
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -423,13 +408,17 @@ void smp_yield_cpu(int cpu)
* Send cpus emergency shutdown signal. This gives the cpus the
* opportunity to complete outstanding interrupts.
*/
-static void smp_emergency_stop(cpumask_t *cpumask)
+void notrace smp_emergency_stop(void)
{
+ cpumask_t cpumask;
u64 end;
int cpu;
+ cpumask_copy(&cpumask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpumask);
+
end = get_tod_clock() + (1000000UL << 12);
- for_each_cpu(cpu, cpumask) {
+ for_each_cpu(cpu, &cpumask) {
struct pcpu *pcpu = pcpu_devices + cpu;
set_bit(ec_stop_cpu, &pcpu->ec_mask);
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
@@ -438,21 +427,21 @@ static void smp_emergency_stop(cpumask_t *cpumask)
cpu_relax();
}
while (get_tod_clock() < end) {
- for_each_cpu(cpu, cpumask)
+ for_each_cpu(cpu, &cpumask)
if (pcpu_stopped(pcpu_devices + cpu))
- cpumask_clear_cpu(cpu, cpumask);
- if (cpumask_empty(cpumask))
+ cpumask_clear_cpu(cpu, &cpumask);
+ if (cpumask_empty(&cpumask))
break;
cpu_relax();
}
}
+NOKPROBE_SYMBOL(smp_emergency_stop);
/*
* Stop all cpus but the current one.
*/
void smp_send_stop(void)
{
- cpumask_t cpumask;
int cpu;
/* Disable all interrupts/machine checks */
@@ -460,17 +449,16 @@ void smp_send_stop(void)
trace_hardirqs_off();
debug_set_critical();
- cpumask_copy(&cpumask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &cpumask);
if (oops_in_progress)
- smp_emergency_stop(&cpumask);
+ smp_emergency_stop();
/* stop all processors */
- for_each_cpu(cpu, &cpumask) {
- struct pcpu *pcpu = pcpu_devices + cpu;
- pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
- while (!pcpu_stopped(pcpu))
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
+ while (!pcpu_stopped(pcpu_devices + cpu))
cpu_relax();
}
}
@@ -804,6 +792,8 @@ void __init smp_detect_cpus(void)
*/
static void smp_start_secondary(void *cpuvoid)
{
+ int cpu = smp_processor_id();
+
S390_lowcore.last_update_clock = get_tod_clock();
S390_lowcore.restart_stack = (unsigned long) restart_stack;
S390_lowcore.restart_fn = (unsigned long) do_restart;
@@ -817,8 +807,12 @@ static void smp_start_secondary(void *cpuvoid)
init_cpu_timer();
vtime_init();
pfault_init();
- notify_cpu_starting(smp_processor_id());
- set_cpu_online(smp_processor_id(), true);
+ notify_cpu_starting(cpu);
+ if (topology_cpu_dedicated(cpu))
+ set_cpu_flag(CIF_DEDICATED_CPU);
+ else
+ clear_cpu_flag(CIF_DEDICATED_CPU);
+ set_cpu_online(cpu, true);
inc_irq_stat(CPU_RST);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@@ -927,22 +921,12 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned long size;
-
/* request the 0x1201 emergency signal external interrupt */
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
- /* create slab cache for the machine-check-extended-save-areas */
- if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
- size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
- pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
- size, size, 0, NULL);
- if (!pcpu_mcesa_cache)
- panic("Couldn't create nmi save area cache");
- }
}
void __init smp_prepare_boot_cpu(void)
@@ -965,6 +949,7 @@ void __init smp_setup_processor_id(void)
pcpu_devices[0].address = stap();
S390_lowcore.cpu_nr = 0;
S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
+ S390_lowcore.spinlock_index = 0;
}
/*
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kernel/sthyi.c
index 395926b8c1ed..12981e197f01 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -8,22 +8,19 @@
* Copyright IBM Corp. 2016
* Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
*/
-#include <linux/kvm_host.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
-#include <linux/ratelimit.h>
-
-#include <asm/kvm_host.h>
+#include <linux/syscalls.h>
+#include <linux/mutex.h>
#include <asm/asm-offsets.h>
#include <asm/sclp.h>
#include <asm/diag.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
-
-#include "kvm-s390.h"
-#include "gaccess.h"
-#include "trace.h"
+#include <asm/facility.h>
+#include <asm/sthyi.h>
+#include "entry.h"
#define DED_WEIGHT 0xffff
/*
@@ -144,6 +141,21 @@ struct lpar_cpu_inf {
struct cpu_inf ifl;
};
+/*
+ * STHYI requires extensive locking in the higher hypervisors
+ * and is very computational/memory expensive. Therefore we
+ * cache the retrieved data whose valid period is 1s.
+ */
+#define CACHE_VALID_JIFFIES HZ
+
+struct sthyi_info {
+ void *info;
+ unsigned long end;
+};
+
+static DEFINE_MUTEX(sthyi_mutex);
+static struct sthyi_info sthyi_cache;
+
static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
{
return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
@@ -382,88 +394,124 @@ out:
vfree(diag204_buf);
}
-static int sthyi(u64 vaddr)
+static int sthyi(u64 vaddr, u64 *rc)
{
register u64 code asm("0") = 0;
register u64 addr asm("2") = vaddr;
+ register u64 rcode asm("3");
int cc;
asm volatile(
".insn rre,0xB2560000,%[code],%[addr]\n"
"ipm %[cc]\n"
"srl %[cc],28\n"
- : [cc] "=d" (cc)
+ : [cc] "=d" (cc), "=d" (rcode)
: [code] "d" (code), [addr] "a" (addr)
- : "3", "memory", "cc");
+ : "memory", "cc");
+ *rc = rcode;
return cc;
}
-int handle_sthyi(struct kvm_vcpu *vcpu)
+static int fill_dst(void *dst, u64 *rc)
{
- int reg1, reg2, r = 0;
- u64 code, addr, cc = 0;
- struct sthyi_sctns *sctns = NULL;
-
- if (!test_kvm_facility(vcpu->kvm, 74))
- return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+ struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
/*
- * STHYI requires extensive locking in the higher hypervisors
- * and is very computational/memory expensive. Therefore we
- * ratelimit the executions per VM.
+ * If the facility is on, we don't want to emulate the instruction.
+ * We ask the hypervisor to provide the data.
*/
- if (!__ratelimit(&vcpu->kvm->arch.sthyi_limit)) {
- kvm_s390_retry_instr(vcpu);
+ if (test_facility(74))
+ return sthyi((u64)dst, rc);
+
+ fill_hdr(sctns);
+ fill_stsi(sctns);
+ fill_diag(sctns);
+ *rc = 0;
+ return 0;
+}
+
+static int sthyi_init_cache(void)
+{
+ if (sthyi_cache.info)
return 0;
- }
+ sthyi_cache.info = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!sthyi_cache.info)
+ return -ENOMEM;
+ sthyi_cache.end = jiffies - 1; /* expired */
+ return 0;
+}
- kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
- code = vcpu->run->s.regs.gprs[reg1];
- addr = vcpu->run->s.regs.gprs[reg2];
+static int sthyi_update_cache(u64 *rc)
+{
+ int r;
- vcpu->stat.instruction_sthyi++;
- VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
- trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+ memset(sthyi_cache.info, 0, PAGE_SIZE);
+ r = fill_dst(sthyi_cache.info, rc);
+ if (r)
+ return r;
+ sthyi_cache.end = jiffies + CACHE_VALID_JIFFIES;
+ return r;
+}
- if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
- return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+/*
+ * sthyi_fill - Fill page with data returned by the STHYI instruction
+ *
+ * @dst: Pointer to zeroed page
+ * @rc: Pointer for storing the return code of the instruction
+ *
+ * Fills the destination with system information returned by the STHYI
+ * instruction. The data is generated by emulation or execution of STHYI,
+ * if available. The return value is the condition code that would be
+ * returned, the rc parameter is the return code which is passed in
+ * register R2 + 1.
+ */
+int sthyi_fill(void *dst, u64 *rc)
+{
+ int r;
- if (code & 0xffff) {
- cc = 3;
+ mutex_lock(&sthyi_mutex);
+ r = sthyi_init_cache();
+ if (r)
goto out;
- }
- if (addr & ~PAGE_MASK)
- return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ if (time_is_before_jiffies(sthyi_cache.end)) {
+ /* cache expired */
+ r = sthyi_update_cache(rc);
+ if (r)
+ goto out;
+ }
+ *rc = 0;
+ memcpy(dst, sthyi_cache.info, PAGE_SIZE);
+out:
+ mutex_unlock(&sthyi_mutex);
+ return r;
+}
+EXPORT_SYMBOL_GPL(sthyi_fill);
- sctns = (void *)get_zeroed_page(GFP_KERNEL);
- if (!sctns)
+SYSCALL_DEFINE4(s390_sthyi, unsigned long, function_code, void __user *, buffer,
+ u64 __user *, return_code, unsigned long, flags)
+{
+ u64 sthyi_rc;
+ void *info;
+ int r;
+
+ if (flags)
+ return -EINVAL;
+ if (function_code != STHYI_FC_CP_IFL_CAP)
+ return -EOPNOTSUPP;
+ info = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!info)
return -ENOMEM;
-
- /*
- * If we are a guest, we don't want to emulate an emulated
- * instruction. We ask the hypervisor to provide the data.
- */
- if (test_facility(74)) {
- cc = sthyi((u64)sctns);
+ r = sthyi_fill(info, &sthyi_rc);
+ if (r < 0)
+ goto out;
+ if (return_code && put_user(sthyi_rc, return_code)) {
+ r = -EFAULT;
goto out;
}
-
- fill_hdr(sctns);
- fill_stsi(sctns);
- fill_diag(sctns);
-
+ if (copy_to_user(buffer, info, PAGE_SIZE))
+ r = -EFAULT;
out:
- if (!cc) {
- r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
- if (r) {
- free_page((unsigned long)sctns);
- return kvm_s390_inject_prog_cond(vcpu, r);
- }
- }
-
- free_page((unsigned long)sctns);
- vcpu->run->s.regs.gprs[reg2 + 1] = cc ? 4 : 0;
- kvm_s390_set_psw_cc(vcpu, cc);
+ free_page((unsigned long)info);
return r;
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index a8af9c825628..ce329c876d8c 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -153,7 +153,7 @@ int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
- unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long end_rodata_pfn = PFN_DOWN(__pa(&__end_rodata)) - 1;
unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
/* Always save lowcore pages (LC protection might be enabled). */
@@ -161,9 +161,9 @@ int pfn_is_nosave(unsigned long pfn)
return 0;
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
- /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
- if (pfn >= stext_pfn && pfn <= eshared_pfn)
- return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
+ /* Skip memory holes and read-only pages (DCSS, ...). */
+ if (pfn >= stext_pfn && pfn <= end_rodata_pfn)
+ return 0;
if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index d39f121e67a9..308a7b63348b 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -389,3 +389,4 @@ SYSCALL(sys_preadv2,compat_sys_preadv2)
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
SYSCALL(sys_s390_guarded_storage,compat_sys_s390_guarded_storage) /* 378 */
SYSCALL(sys_statx,compat_sys_statx)
+SYSCALL(sys_s390_sthyi,compat_sys_s390_sthyi)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index d7ece9888c29..f9b393d4a078 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -133,6 +133,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
topo->socket_id = socket->id;
topo->core_id = rcore;
topo->thread_id = lcpu + i;
+ topo->dedicated = tl_core->d;
cpumask_set_cpu(lcpu + i, &drawer->mask);
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
@@ -273,6 +274,14 @@ void store_topology(struct sysinfo_15_1_x *info)
stsi(info, 15, 1, topology_mnest_limit());
}
+static void __arch_update_dedicated_flag(void *arg)
+{
+ if (topology_cpu_dedicated(smp_processor_id()))
+ set_cpu_flag(CIF_DEDICATED_CPU);
+ else
+ clear_cpu_flag(CIF_DEDICATED_CPU);
+}
+
static int __arch_update_cpu_topology(void)
{
struct sysinfo_15_1_x *info = tl_info;
@@ -298,6 +307,7 @@ int arch_update_cpu_topology(void)
int cpu, rc;
rc = __arch_update_cpu_topology();
+ on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -434,9 +444,39 @@ static struct attribute_group topology_cpu_attr_group = {
.attrs = topology_cpu_attrs,
};
+static ssize_t cpu_dedicated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int cpu = dev->id;
+ ssize_t count;
+
+ mutex_lock(&smp_cpu_state_mutex);
+ count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
+ mutex_unlock(&smp_cpu_state_mutex);
+ return count;
+}
+static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
+
+static struct attribute *topology_extra_cpu_attrs[] = {
+ &dev_attr_dedicated.attr,
+ NULL,
+};
+
+static struct attribute_group topology_extra_cpu_attr_group = {
+ .attrs = topology_extra_cpu_attrs,
+};
+
int topology_cpu_init(struct cpu *cpu)
{
- return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+ int rc;
+
+ rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+ if (rc || !MACHINE_HAS_TOPOLOGY)
+ return rc;
+ rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
+ if (rc)
+ sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
+ return rc;
}
static const struct cpumask *cpu_thread_mask(int cpu)
@@ -508,6 +548,7 @@ void __init topology_init_early(void)
alloc_masks(info, &drawer_info, 3);
out:
__arch_update_cpu_topology();
+ __arch_update_dedicated_flag(NULL);
}
static inline int topology_get_mode(int enabled)
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index eacda05b45d7..0520854a4dab 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -140,6 +140,20 @@ static void __init vdso_init_data(struct vdso_data *vd)
*/
#define SEGMENT_ORDER 2
+/*
+ * The initial vdso_data structure for the boot CPU. Eventually
+ * it is replaced with a properly allocated structure in vdso_init.
+ * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
+ * pointer is required to be able to return from an interrupt or
+ * program check. See the exit paths in entry.S.
+ */
+struct vdso_data boot_vdso_data __initdata;
+
+void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
+{
+ lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
+}
+
int vdso_alloc_per_cpu(struct lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
@@ -166,10 +180,8 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
vd->node_id = cpu_to_node(vd->cpu_nr);
/* Set up access register mode page table */
- clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
- PAGE_SIZE << SEGMENT_ORDER);
- clear_table((unsigned long *) page_table, _PAGE_INVALID,
- 256*sizeof(unsigned long));
+ memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
+ memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 96a713a470e7..a049ff005f03 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -60,12 +60,7 @@ SECTIONS
RO_DATA_SECTION(PAGE_SIZE)
-#ifdef CONFIG_SHARED_KERNEL
- . = ALIGN(0x100000); /* VM shared segments are 1MB aligned */
-#endif
-
. = ALIGN(PAGE_SIZE);
- _eshared = .; /* End of shareable data */
_sdata = .; /* Start of data section */
. = ALIGN(PAGE_SIZE);
@@ -105,6 +100,29 @@ SECTIONS
EXIT_DATA
}
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ * Note, that it is a part of __init region.
+ */
+ . = ALIGN(8);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ /*
+ * And here are the replacement instructions. The linker sticks
+ * them as binary blobs. The .altinstructions has enough data to
+ * get the address and the length of them to patch the kernel safely.
+ * Note, that it is a part of __init region.
+ */
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 09a9e6dfc09f..6048b1c6e580 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -12,6 +12,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-objs += diag.o gaccess.o guestdbg.o sthyi.o vsie.o
+kvm-objs += diag.o gaccess.o guestdbg.o vsie.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index a4752bf6b526..8fe034beb623 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -18,6 +18,7 @@
#include <asm/kvm_host.h>
#include <asm/asm-offsets.h>
#include <asm/irq.h>
+#include <asm/sysinfo.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -360,6 +361,61 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+/*
+ * Handle the sthyi instruction that provides the guest with system
+ * information, like current CPU resources available at each level of
+ * the machine.
+ */
+int handle_sthyi(struct kvm_vcpu *vcpu)
+{
+ int reg1, reg2, r = 0;
+ u64 code, addr, cc = 0, rc = 0;
+ struct sthyi_sctns *sctns = NULL;
+
+ if (!test_kvm_facility(vcpu->kvm, 74))
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
+ kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
+ code = vcpu->run->s.regs.gprs[reg1];
+ addr = vcpu->run->s.regs.gprs[reg2];
+
+ vcpu->stat.instruction_sthyi++;
+ VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
+ trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+
+ if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ if (code & 0xffff) {
+ cc = 3;
+ rc = 4;
+ goto out;
+ }
+
+ if (addr & ~PAGE_MASK)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ sctns = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!sctns)
+ return -ENOMEM;
+
+ cc = sthyi_fill(sctns, &rc);
+
+out:
+ if (!cc) {
+ r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
+ if (r) {
+ free_page((unsigned long)sctns);
+ return kvm_s390_inject_prog_cond(vcpu, r);
+ }
+ }
+
+ free_page((unsigned long)sctns);
+ vcpu->run->s.regs.gprs[reg2 + 1] = rc;
+ kvm_s390_set_psw_cc(vcpu, cc);
+ return r;
+}
+
static int handle_operexc(struct kvm_vcpu *vcpu)
{
psw_t oldpsw, newpsw;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a832ad031cee..329b2843fee2 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2483,11 +2483,11 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
mci.val = mcck_info->mcic;
if (mci.sr)
- cr14 |= MCCK_CR14_RECOVERY_SUB_MASK;
+ cr14 |= CR14_RECOVERY_SUBMASK;
if (mci.dg)
- cr14 |= MCCK_CR14_DEGRAD_SUB_MASK;
+ cr14 |= CR14_DEGRADATION_SUBMASK;
if (mci.w)
- cr14 |= MCCK_CR14_WARN_SUB_MASK;
+ cr14 |= CR14_WARNING_SUBMASK;
mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
mchk->cr14 = cr14;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 40d0a1a97889..4bc70afe0a10 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1884,8 +1884,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
rc = -ENOMEM;
- ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
-
kvm->arch.use_esca = 0; /* start with basic SCA */
if (!sclp.has_64bscao)
alloc_flags |= GFP_DMA;
@@ -3283,7 +3281,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
*/
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
test_kvm_facility(vcpu->kvm, 64) &&
- riccb->valid &&
+ riccb->v &&
!(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 9f8fdd7b2311..10d65dfbc306 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -242,6 +242,8 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
}
+int handle_sthyi(struct kvm_vcpu *vcpu);
+
/* implemented in priv.c */
int is_valid_psw(psw_t *psw);
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
@@ -268,9 +270,6 @@ void kvm_s390_vsie_destroy(struct kvm *kvm);
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
-/* implemented in sthyi.c */
-int handle_sthyi(struct kvm_vcpu *vcpu);
-
/* implemented in kvm-s390.c */
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
const struct kvm_s390_vm_tod_clock *gtod);
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index d66751397e72..495c9c4bacc7 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -79,21 +79,25 @@ ENTRY(memset)
ex %r4,0(%r3)
br %r14
.Lmemset_fill:
- stc %r3,0(%r2)
cghi %r4,1
lgr %r1,%r2
- ber %r14
+ je .Lmemset_fill_exit
aghi %r4,-2
- srlg %r3,%r4,8
- ltgr %r3,%r3
+ srlg %r5,%r4,8
+ ltgr %r5,%r5
jz .Lmemset_fill_remainder
.Lmemset_fill_loop:
- mvc 1(256,%r1),0(%r1)
+ stc %r3,0(%r1)
+ mvc 1(255,%r1),0(%r1)
la %r1,256(%r1)
- brctg %r3,.Lmemset_fill_loop
+ brctg %r5,.Lmemset_fill_loop
.Lmemset_fill_remainder:
- larl %r3,.Lmemset_mvc
- ex %r4,0(%r3)
+ stc %r3,0(%r1)
+ larl %r5,.Lmemset_mvc
+ ex %r4,0(%r5)
+ br %r14
+.Lmemset_fill_exit:
+ stc %r3,0(%r1)
br %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
@@ -127,3 +131,47 @@ ENTRY(memcpy)
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
EXPORT_SYMBOL(memcpy)
+
+/*
+ * __memset16/32/64
+ *
+ * void *__memset16(uint16_t *s, uint16_t v, size_t count)
+ * void *__memset32(uint32_t *s, uint32_t v, size_t count)
+ * void *__memset64(uint64_t *s, uint64_t v, size_t count)
+ */
+.macro __MEMSET bits,bytes,insn
+ENTRY(__memset\bits)
+ ltgr %r4,%r4
+ bzr %r14
+ cghi %r4,\bytes
+ je .L__memset_exit\bits
+ aghi %r4,-(\bytes+1)
+ srlg %r5,%r4,8
+ ltgr %r5,%r5
+ lgr %r1,%r2
+ jz .L__memset_remainder\bits
+.L__memset_loop\bits:
+ \insn %r3,0(%r1)
+ mvc \bytes(256-\bytes,%r1),0(%r1)
+ la %r1,256(%r1)
+ brctg %r5,.L__memset_loop\bits
+.L__memset_remainder\bits:
+ \insn %r3,0(%r1)
+ larl %r5,.L__memset_mvc\bits
+ ex %r4,0(%r5)
+ br %r14
+.L__memset_exit\bits:
+ \insn %r3,0(%r2)
+ br %r14
+.L__memset_mvc\bits:
+ mvc \bytes(1,%r1),0(%r1)
+.endm
+
+__MEMSET 16,2,sth
+EXPORT_SYMBOL(__memset16)
+
+__MEMSET 32,4,st
+EXPORT_SYMBOL(__memset32)
+
+__MEMSET 64,8,stg
+EXPORT_SYMBOL(__memset64)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 1dc85f552f48..84c0faeaf7ea 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -9,8 +9,11 @@
#include <linux/types.h>
#include <linux/export.h>
#include <linux/spinlock.h>
+#include <linux/jiffies.h>
#include <linux/init.h>
#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <asm/alternative.h>
#include <asm/io.h>
int spin_retry = -1;
@@ -33,14 +36,46 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+struct spin_wait {
+ struct spin_wait *next, *prev;
+ int node_id;
+} __aligned(32);
+
+static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
+
+#define _Q_LOCK_CPU_OFFSET 0
+#define _Q_LOCK_STEAL_OFFSET 16
+#define _Q_TAIL_IDX_OFFSET 18
+#define _Q_TAIL_CPU_OFFSET 20
+
+#define _Q_LOCK_CPU_MASK 0x0000ffff
+#define _Q_LOCK_STEAL_ADD 0x00010000
+#define _Q_LOCK_STEAL_MASK 0x00030000
+#define _Q_TAIL_IDX_MASK 0x000c0000
+#define _Q_TAIL_CPU_MASK 0xfff00000
+
+#define _Q_LOCK_MASK (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
+#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+void arch_spin_lock_setup(int cpu)
+{
+ struct spin_wait *node;
+ int ix;
+
+ node = per_cpu_ptr(&spin_wait[0], cpu);
+ for (ix = 0; ix < 4; ix++, node++) {
+ memset(node, 0, sizeof(*node));
+ node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
+ (ix << _Q_TAIL_IDX_OFFSET);
+ }
+}
+
static inline int arch_load_niai4(int *lock)
{
int owner;
asm volatile(
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- " .long 0xb2fa0040\n" /* NIAI 4 */
-#endif
+ ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
return owner;
@@ -51,9 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
int expected = old;
asm volatile(
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
- " .long 0xb2fa0080\n" /* NIAI 8 */
-#endif
+ ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
: "0" (old), "d" (new), "Q" (*lock)
@@ -61,75 +94,160 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
return expected == old;
}
-void arch_spin_lock_wait(arch_spinlock_t *lp)
+static inline struct spin_wait *arch_spin_decode_tail(int lock)
{
- int cpu = SPINLOCK_LOCKVAL;
- int owner, count;
+ int ix, cpu;
+
+ ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
+ cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
+ return per_cpu_ptr(&spin_wait[ix], cpu - 1);
+}
+
+static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
+{
+ if (lock & _Q_LOCK_CPU_MASK)
+ return lock & _Q_LOCK_CPU_MASK;
+ if (node == NULL || node->prev == NULL)
+ return 0; /* 0 -> no target cpu */
+ while (node->prev)
+ node = node->prev;
+ return node->node_id >> _Q_TAIL_CPU_OFFSET;
+}
+
+static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
+{
+ struct spin_wait *node, *next;
+ int lockval, ix, node_id, tail_id, old, new, owner, count;
+
+ ix = S390_lowcore.spinlock_index++;
+ barrier();
+ lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
+ node = this_cpu_ptr(&spin_wait[ix]);
+ node->prev = node->next = NULL;
+ node_id = node->node_id;
+
+ /* Enqueue the node for this CPU in the spinlock wait queue */
+ while (1) {
+ old = READ_ONCE(lp->lock);
+ if ((old & _Q_LOCK_CPU_MASK) == 0 &&
+ (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
+ /*
+ * The lock is free but there may be waiters.
+ * With no waiters simply take the lock, if there
+ * are waiters try to steal the lock. The lock may
+ * be stolen three times before the next queued
+ * waiter will get the lock.
+ */
+ new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ /* Got the lock */
+ goto out;
+ /* lock passing in progress */
+ continue;
+ }
+ /* Make the node of this CPU the new tail. */
+ new = node_id | (old & _Q_LOCK_MASK);
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ break;
+ }
+ /* Set the 'next' pointer of the tail node in the queue */
+ tail_id = old & _Q_TAIL_MASK;
+ if (tail_id != 0) {
+ node->prev = arch_spin_decode_tail(tail_id);
+ WRITE_ONCE(node->prev->next, node);
+ }
/* Pass the virtual CPU to the lock holder if it is not running */
- owner = arch_load_niai4(&lp->lock);
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ owner = arch_spin_yield_target(old, node);
+ if (owner && arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
+ /* Spin on the CPU local node->prev pointer */
+ if (tail_id != 0) {
+ count = spin_retry;
+ while (READ_ONCE(node->prev) != NULL) {
+ if (count-- >= 0)
+ continue;
+ count = spin_retry;
+ /* Query running state of lock holder again. */
+ owner = arch_spin_yield_target(old, node);
+ if (owner && arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
+ }
+ }
+
+ /* Spin on the lock value in the spinlock_t */
count = spin_retry;
while (1) {
- owner = arch_load_niai4(&lp->lock);
- /* Try to get the lock if it is free. */
+ old = READ_ONCE(lp->lock);
+ owner = old & _Q_LOCK_CPU_MASK;
if (!owner) {
- if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
- return;
+ tail_id = old & _Q_TAIL_MASK;
+ new = ((tail_id != node_id) ? tail_id : 0) | lockval;
+ if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ /* Got the lock */
+ break;
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
- /*
- * For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU unconditionally. For LPAR rely on the
- * sense running status.
- */
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
}
+
+ /* Pass lock_spin job to next CPU in the queue */
+ if (node_id && tail_id != node_id) {
+ /* Wait until the next CPU has set up the 'next' pointer */
+ while ((next = READ_ONCE(node->next)) == NULL)
+ ;
+ next->prev = NULL;
+ }
+
+ out:
+ S390_lowcore.spinlock_index--;
}
-EXPORT_SYMBOL(arch_spin_lock_wait);
-void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
+static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
{
- int cpu = SPINLOCK_LOCKVAL;
- int owner, count;
+ int lockval, old, new, owner, count;
- local_irq_restore(flags);
+ lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
- owner = arch_load_niai4(&lp->lock);
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
+ if (owner && arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
count = spin_retry;
while (1) {
- owner = arch_load_niai4(&lp->lock);
+ old = arch_load_niai4(&lp->lock);
+ owner = old & _Q_LOCK_CPU_MASK;
/* Try to get the lock if it is free. */
if (!owner) {
- local_irq_disable();
- if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
- return;
- local_irq_restore(flags);
+ new = (old & _Q_TAIL_MASK) | lockval;
+ if (arch_cmpxchg_niai8(&lp->lock, old, new))
+ /* Got the lock */
+ return;
continue;
}
if (count-- >= 0)
continue;
count = spin_retry;
- /*
- * For multiple layers of hypervisors, e.g. z/VM + LPAR
- * yield the CPU unconditionally. For LPAR rely on the
- * sense running status.
- */
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
+ smp_yield_cpu(owner - 1);
}
}
-EXPORT_SYMBOL(arch_spin_lock_wait_flags);
+
+void arch_spin_lock_wait(arch_spinlock_t *lp)
+{
+ /* Use classic spinlocks + niai if the steal time is >= 10% */
+ if (test_cpu_flag(CIF_DEDICATED_CPU))
+ arch_spin_lock_queued(lp);
+ else
+ arch_spin_lock_classic(lp);
+}
+EXPORT_SYMBOL(arch_spin_lock_wait);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
@@ -148,126 +266,59 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
-void _raw_read_lock_wait(arch_rwlock_t *rw)
+void arch_read_lock_wait(arch_rwlock_t *rw)
{
- int count = spin_retry;
- int owner, old;
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
-#endif
- owner = 0;
- while (1) {
- if (count-- <= 0) {
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
- if (old < 0)
- continue;
- if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
- return;
+ if (unlikely(in_interrupt())) {
+ while (READ_ONCE(rw->cnts) & 0x10000)
+ barrier();
+ return;
}
+
+ /* Remove this reader again to allow recursive read locking */
+ __atomic_add_const(-1, &rw->cnts);
+ /* Put the reader into the wait queue */
+ arch_spin_lock(&rw->wait);
+ /* Now add this reader to the count value again */
+ __atomic_add_const(1, &rw->cnts);
+ /* Loop until the writer is done */
+ while (READ_ONCE(rw->cnts) & 0x10000)
+ barrier();
+ arch_spin_unlock(&rw->wait);
}
-EXPORT_SYMBOL(_raw_read_lock_wait);
+EXPORT_SYMBOL(arch_read_lock_wait);
-int _raw_read_trylock_retry(arch_rwlock_t *rw)
+void arch_write_lock_wait(arch_rwlock_t *rw)
{
- int count = spin_retry;
int old;
- while (count-- > 0) {
- old = ACCESS_ONCE(rw->lock);
- if (old < 0)
- continue;
- if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(_raw_read_trylock_retry);
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ /* Add this CPU to the write waiters */
+ __atomic_add(0x20000, &rw->cnts);
-void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
-{
- int count = spin_retry;
- int owner, old;
+ /* Put the writer into the wait queue */
+ arch_spin_lock(&rw->wait);
- owner = 0;
while (1) {
- if (count-- <= 0) {
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
- smp_mb();
- if (old >= 0) {
- prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
- old = prev;
- }
- if ((old & 0x7fffffff) == 0 && prev >= 0)
+ old = READ_ONCE(rw->cnts);
+ if ((old & 0x1ffff) == 0 &&
+ __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
+ /* Got the lock */
break;
+ barrier();
}
-}
-EXPORT_SYMBOL(_raw_write_lock_wait);
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-void _raw_write_lock_wait(arch_rwlock_t *rw)
-{
- int count = spin_retry;
- int owner, old, prev;
- prev = 0x80000000;
- owner = 0;
- while (1) {
- if (count-- <= 0) {
- if (owner && arch_vcpu_is_preempted(~owner))
- smp_yield_cpu(~owner);
- count = spin_retry;
- }
- old = ACCESS_ONCE(rw->lock);
- owner = ACCESS_ONCE(rw->owner);
- if (old >= 0 &&
- __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
- prev = old;
- else
- smp_mb();
- if ((old & 0x7fffffff) == 0 && prev >= 0)
- break;
- }
+ arch_spin_unlock(&rw->wait);
}
-EXPORT_SYMBOL(_raw_write_lock_wait);
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+EXPORT_SYMBOL(arch_write_lock_wait);
-int _raw_write_trylock_retry(arch_rwlock_t *rw)
+void arch_spin_relax(arch_spinlock_t *lp)
{
- int count = spin_retry;
- int old;
+ int cpu;
- while (count-- > 0) {
- old = ACCESS_ONCE(rw->lock);
- if (old)
- continue;
- if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL(_raw_write_trylock_retry);
-
-void arch_lock_relax(int cpu)
-{
+ cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
return;
- smp_yield_cpu(~cpu);
+ smp_yield_cpu(cpu - 1);
}
-EXPORT_SYMBOL(arch_lock_relax);
+EXPORT_SYMBOL(arch_spin_relax);
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index dbf2fdad2724..a10e11f7a5f7 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(strlen);
*
* returns the minimum of the length of @s and @n
*/
-size_t strnlen(const char * s, size_t n)
+size_t strnlen(const char *s, size_t n)
{
return __strnend(s, n) - s;
}
@@ -195,14 +195,14 @@ EXPORT_SYMBOL(strncat);
/**
* strcmp - Compare two strings
- * @cs: One string
- * @ct: Another string
+ * @s1: One string
+ * @s2: Another string
*
- * returns 0 if @cs and @ct are equal,
- * < 0 if @cs is less than @ct
- * > 0 if @cs is greater than @ct
+ * returns 0 if @s1 and @s2 are equal,
+ * < 0 if @s1 is less than @s2
+ * > 0 if @s1 is greater than @s2
*/
-int strcmp(const char *cs, const char *ct)
+int strcmp(const char *s1, const char *s2)
{
register int r0 asm("0") = 0;
int ret = 0;
@@ -214,7 +214,7 @@ int strcmp(const char *cs, const char *ct)
" ic %1,0(%3)\n"
" sr %0,%1\n"
"1:"
- : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
+ : "+d" (ret), "+d" (r0), "+a" (s1), "+a" (s2)
: : "cc", "memory");
return ret;
}
@@ -225,7 +225,7 @@ EXPORT_SYMBOL(strcmp);
* @s: The string to be searched
* @c: The character to search for
*/
-char * strrchr(const char * s, int c)
+char *strrchr(const char *s, int c)
{
size_t len = __strend(s) - s;
@@ -261,7 +261,7 @@ static inline int clcle(const char *s1, unsigned long l1,
* @s1: The string to be searched
* @s2: The string to search for
*/
-char * strstr(const char * s1,const char * s2)
+char *strstr(const char *s1, const char *s2)
{
int l1, l2;
@@ -307,15 +307,15 @@ EXPORT_SYMBOL(memchr);
/**
* memcmp - Compare two areas of memory
- * @cs: One area of memory
- * @ct: Another area of memory
+ * @s1: One area of memory
+ * @s2: Another area of memory
* @count: The size of the area.
*/
-int memcmp(const void *cs, const void *ct, size_t n)
+int memcmp(const void *s1, const void *s2, size_t n)
{
int ret;
- ret = clcle(cs, n, ct, n);
+ ret = clcle(s1, n, s2, n);
if (ret)
ret = ret == 1 ? -1 : 1;
return ret;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 41ba9bd53e48..817c9e16e83e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -145,8 +145,8 @@ void __init mem_init(void)
void free_initmem(void)
{
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index cc2faffa7d6e..4ad4c4f77b4d 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -159,13 +159,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
struct page *page_table_alloc_pgste(struct mm_struct *mm)
{
struct page *page;
- unsigned long *table;
+ u64 *table;
page = alloc_page(GFP_KERNEL);
if (page) {
- table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ table = (u64 *)page_to_phys(page);
+ memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
}
return page;
}
@@ -222,12 +222,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */
atomic_set(&page->_mapcount, 3);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
+ memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} else {
/* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE);
+ memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
spin_lock_bh(&mm->context.lock);
list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.lock);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index f2ada0bc08e6..3316d463fc29 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -60,7 +60,7 @@ pte_t __ref *vmem_pte_alloc(void)
pte = (pte_t *) memblock_alloc(size, size);
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_INVALID, size);
+ memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}
@@ -403,17 +403,17 @@ void __init vmem_map_init(void)
for_each_memblock(memory, reg)
vmem_add_mem(reg->base, reg->size);
- __set_memory((unsigned long) _stext,
- (_etext - _stext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_stext,
+ (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long) _etext,
- (_eshared - _etext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_etext,
+ (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
SET_MEMORY_RO);
- __set_memory((unsigned long) _sinittext,
- (_einittext - _sinittext) >> PAGE_SHIFT,
+ __set_memory((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
- (_eshared - _stext) >> 10);
+ (unsigned long)(__end_rodata - _stext) >> 10);
}
/*
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index 7fa55ccffe48..5e1e5133132d 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -53,10 +53,13 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
*
* We get 160 bytes stack space from calling function, but only use
* 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
+ *
+ * The stack size used by the BPF program ("BPF stack" above) is passed
+ * via "aux->stack_depth".
*/
-#define STK_SPACE (MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
+#define STK_SPACE_ADD (8 + 8 + 4 + 4 + 160)
#define STK_160_UNUSED (160 - 12 * 8)
-#define STK_OFF (STK_SPACE - STK_160_UNUSED)
+#define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index b15cd2f0320f..e81c16838b90 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -320,12 +320,12 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
/*
* Restore registers from "rs" (register start) to "re" (register end) on stack
*/
-static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
+static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
{
u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (jit->seen & SEEN_STACK)
- off += STK_OFF;
+ off += STK_OFF + stack_depth;
if (rs == re)
/* lg %rs,off(%r15) */
@@ -369,7 +369,7 @@ static int get_end(struct bpf_jit *jit, int start)
* Save and restore clobbered registers (6-15) on stack.
* We save/restore registers in chunks with gap >= 2 registers.
*/
-static void save_restore_regs(struct bpf_jit *jit, int op)
+static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
{
int re = 6, rs;
@@ -382,7 +382,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
if (op == REGS_SAVE)
save_regs(jit, rs, re);
else
- restore_regs(jit, rs, re);
+ restore_regs(jit, rs, re, stack_depth);
re++;
} while (re <= 15);
}
@@ -414,7 +414,7 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit)
* Save registers and create stack frame if necessary.
* See stack frame layout desription in "bpf_jit.h"!
*/
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
{
if (jit->seen & SEEN_TAIL_CALL) {
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -427,7 +427,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
/* Tail calls have to skip above initialization */
jit->tail_call_start = jit->prg;
/* Save registers */
- save_restore_regs(jit, REGS_SAVE);
+ save_restore_regs(jit, REGS_SAVE, stack_depth);
/* Setup literal pool */
if (jit->seen & SEEN_LITERAL) {
/* basr %r13,0 */
@@ -442,7 +442,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */
- EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
+ EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
if (jit->seen & SEEN_FUNC)
/* stg %w1,152(%r15) (backchain) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
@@ -459,7 +459,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
/*
* Function epilogue
*/
-static void bpf_jit_epilogue(struct bpf_jit *jit)
+static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
{
/* Return 0 */
if (jit->seen & SEEN_RET0) {
@@ -471,7 +471,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/* Load exit code: lgr %r2,%b0 */
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
- save_restore_regs(jit, REGS_RESTORE);
+ save_restore_regs(jit, REGS_RESTORE, stack_depth);
/* br %r14 */
_EMIT2(0x07fe);
}
@@ -1019,7 +1019,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
*/
if (jit->seen & SEEN_STACK)
- off = STK_OFF_TCCNT + STK_OFF;
+ off = STK_OFF_TCCNT + STK_OFF + fp->aux->stack_depth;
else
off = STK_OFF_TCCNT;
/* lhi %w0,1 */
@@ -1047,7 +1047,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/*
* Restore registers before calling function
*/
- save_restore_regs(jit, REGS_RESTORE);
+ save_restore_regs(jit, REGS_RESTORE, fp->aux->stack_depth);
/*
* goto *(prog->bpf_func + tail_call_start);
@@ -1273,7 +1273,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
jit->lit = jit->lit_start;
jit->prg = 0;
- bpf_jit_prologue(jit);
+ bpf_jit_prologue(jit, fp->aux->stack_depth);
for (i = 0; i < fp->len; i += insn_count) {
insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0)
@@ -1281,7 +1281,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
/* Next instruction address */
jit->addrs[i + insn_count] = jit->prg;
}
- bpf_jit_epilogue(jit);
+ bpf_jit_epilogue(jit, fp->aux->stack_depth);
jit->lit_start = jit->prg;
jit->size = jit->lit;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index a25d95a6612d..0fe649c0d542 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -368,7 +368,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
/* End of second scan with interrupts on. */
break;
/* First scan complete, reenable interrupts. */
- zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
+ break;
si = 0;
continue;
}
@@ -956,7 +957,7 @@ static int __init pci_base_init(void)
if (!s390_pci_probe)
return 0;
- if (!test_facility(69) || !test_facility(71) || !test_facility(72))
+ if (!test_facility(69) || !test_facility(71))
return 0;
rc = zpci_debug_init();
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index ea34086c8674..81b840bc6e4e 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>
#include <asm/processor.h>
@@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
}
/* Set Interruption Controls */
-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{
+ if (!test_facility(72))
+ return -EIO;
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+ return 0;
}
/* PCI Load */
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index d54c149fbb6b..2ebf2872cc16 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -4,11 +4,21 @@
#
hostprogs-y += gen_facilities
+hostprogs-y += gen_opcode_table
+
HOSTCFLAGS_gen_facilities.o += -Wall $(LINUXINCLUDE)
+HOSTCFLAGS_gen_opcode_table.o += -Wall $(LINUXINCLUDE)
define filechk_facilities.h
$(obj)/gen_facilities
endef
+define filechk_dis.h
+ ( $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt )
+endef
+
include/generated/facilities.h: $(obj)/gen_facilities FORCE
$(call filechk,facilities.h)
+
+include/generated/dis.h: $(obj)/gen_opcode_table FORCE
+ $(call filechk,dis.h,__FUN)
diff --git a/arch/s390/tools/gen_opcode_table.c b/arch/s390/tools/gen_opcode_table.c
new file mode 100644
index 000000000000..01d4c5a4bfe9
--- /dev/null
+++ b/arch/s390/tools/gen_opcode_table.c
@@ -0,0 +1,336 @@
+/*
+ * Generate opcode table initializers for the in-kernel disassembler.
+ *
+ * Copyright IBM Corp. 2017
+ *
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdio.h>
+
+#define STRING_SIZE_MAX 20
+
+struct insn_type {
+ unsigned char byte;
+ unsigned char mask;
+ char **format;
+};
+
+struct insn {
+ struct insn_type *type;
+ char opcode[STRING_SIZE_MAX];
+ char name[STRING_SIZE_MAX];
+ char upper[STRING_SIZE_MAX];
+ char format[STRING_SIZE_MAX];
+ unsigned int name_len;
+};
+
+struct insn_group {
+ struct insn_type *type;
+ int offset;
+ int count;
+ char opcode[2];
+};
+
+struct insn_format {
+ char *format;
+ int type;
+};
+
+struct gen_opcode {
+ struct insn *insn;
+ int nr;
+ struct insn_group *group;
+ int nr_groups;
+};
+
+/*
+ * Table of instruction format types. Each opcode is defined with at
+ * least one byte (two nibbles), three nibbles, or two bytes (four
+ * nibbles).
+ * The byte member of each instruction format type entry defines
+ * within which byte of an instruction the third (and fourth) nibble
+ * of an opcode can be found. The mask member is the and-mask that
+ * needs to be applied on this byte in order to get the third (and
+ * fourth) nibble of the opcode.
+ * The format array defines all instruction formats (as defined in the
+ * Principles of Operation) which have the same position of the opcode
+ * nibbles.
+ * A special case are instruction formats with 1-byte opcodes. In this
+ * case the byte member always is zero, so that the mask is applied on
+ * the (only) byte that contains the opcode.
+ */
+static struct insn_type insn_type_table[] = {
+ {
+ .byte = 0,
+ .mask = 0xff,
+ .format = (char *[]) {
+ "MII",
+ "RR",
+ "RS",
+ "RSI",
+ "RX",
+ "SI",
+ "SMI",
+ "SS",
+ NULL,
+ },
+ },
+ {
+ .byte = 1,
+ .mask = 0x0f,
+ .format = (char *[]) {
+ "RI",
+ "RIL",
+ "SSF",
+ NULL,
+ },
+ },
+ {
+ .byte = 1,
+ .mask = 0xff,
+ .format = (char *[]) {
+ "E",
+ "IE",
+ "RRE",
+ "RRF",
+ "RRR",
+ "S",
+ "SIL",
+ "SSE",
+ NULL,
+ },
+ },
+ {
+ .byte = 5,
+ .mask = 0xff,
+ .format = (char *[]) {
+ "RIE",
+ "RIS",
+ "RRS",
+ "RSE",
+ "RSL",
+ "RSY",
+ "RXE",
+ "RXF",
+ "RXY",
+ "SIY",
+ "VRI",
+ "VRR",
+ "VRS",
+ "VRV",
+ "VRX",
+ "VSI",
+ NULL,
+ },
+ },
+};
+
+static struct insn_type *insn_format_to_type(char *format)
+{
+ char tmp[STRING_SIZE_MAX];
+ char *base_format, **ptr;
+ int i;
+
+ strcpy(tmp, format);
+ base_format = tmp;
+ base_format = strsep(&base_format, "_");
+ for (i = 0; i < sizeof(insn_type_table) / sizeof(insn_type_table[0]); i++) {
+ ptr = insn_type_table[i].format;
+ while (*ptr) {
+ if (!strcmp(base_format, *ptr))
+ return &insn_type_table[i];
+ ptr++;
+ }
+ }
+ exit(EXIT_FAILURE);
+}
+
+static void read_instructions(struct gen_opcode *desc)
+{
+ struct insn insn;
+ int rc, i;
+
+ while (1) {
+ rc = scanf("%s %s %s", insn.opcode, insn.name, insn.format);
+ if (rc == EOF)
+ break;
+ if (rc != 3)
+ exit(EXIT_FAILURE);
+ insn.type = insn_format_to_type(insn.format);
+ insn.name_len = strlen(insn.name);
+ for (i = 0; i <= insn.name_len; i++)
+ insn.upper[i] = toupper((unsigned char)insn.name[i]);
+ desc->nr++;
+ desc->insn = realloc(desc->insn, desc->nr * sizeof(*desc->insn));
+ if (!desc->insn)
+ exit(EXIT_FAILURE);
+ desc->insn[desc->nr - 1] = insn;
+ }
+}
+
+static int cmpformat(const void *a, const void *b)
+{
+ return strcmp(((struct insn *)a)->format, ((struct insn *)b)->format);
+}
+
+static void print_formats(struct gen_opcode *desc)
+{
+ char *format;
+ int i, count;
+
+ qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpformat);
+ format = "";
+ count = 0;
+ printf("enum {\n");
+ for (i = 0; i < desc->nr; i++) {
+ if (!strcmp(format, desc->insn[i].format))
+ continue;
+ count++;
+ format = desc->insn[i].format;
+ printf("\tINSTR_%s,\n", format);
+ }
+ printf("}; /* %d */\n\n", count);
+}
+
+static int cmp_long_insn(const void *a, const void *b)
+{
+ return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name);
+}
+
+static void print_long_insn(struct gen_opcode *desc)
+{
+ struct insn *insn;
+ int i, count;
+
+ qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmp_long_insn);
+ count = 0;
+ printf("enum {\n");
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->name_len < 6)
+ continue;
+ printf("\tLONG_INSN_%s,\n", insn->upper);
+ count++;
+ }
+ printf("}; /* %d */\n\n", count);
+
+ printf("#define LONG_INSN_INITIALIZER { \\\n");
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->name_len < 6)
+ continue;
+ printf("\t[LONG_INSN_%s] = \"%s\", \\\n", insn->upper, insn->name);
+ }
+ printf("}\n\n");
+}
+
+static void print_opcode(struct insn *insn, int nr)
+{
+ char *opcode;
+
+ opcode = insn->opcode;
+ if (insn->type->byte != 0)
+ opcode += 2;
+ printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format);
+ if (insn->name_len < 6)
+ printf(".name = \"%s\" ", insn->name);
+ else
+ printf(".offset = LONG_INSN_%s ", insn->upper);
+ printf("}, \\\n");
+}
+
+static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
+{
+ struct insn_group *group;
+
+ group = desc->group ? &desc->group[desc->nr_groups - 1] : NULL;
+ if (group && (!strncmp(group->opcode, insn->opcode, 2) || group->type->byte == 0)) {
+ group->count++;
+ return;
+ }
+ desc->nr_groups++;
+ desc->group = realloc(desc->group, desc->nr_groups * sizeof(*desc->group));
+ if (!desc->group)
+ exit(EXIT_FAILURE);
+ group = &desc->group[desc->nr_groups - 1];
+ strncpy(group->opcode, insn->opcode, 2);
+ group->type = insn->type;
+ group->offset = offset;
+ group->count = 1;
+}
+
+static int cmpopcode(const void *a, const void *b)
+{
+ return strcmp(((struct insn *)a)->opcode, ((struct insn *)b)->opcode);
+}
+
+static void print_opcode_table(struct gen_opcode *desc)
+{
+ char opcode[2] = "";
+ struct insn *insn;
+ int i, offset;
+
+ qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpopcode);
+ printf("#define OPCODE_TABLE_INITIALIZER { \\\n");
+ offset = 0;
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->type->byte == 0)
+ continue;
+ add_to_group(desc, insn, offset);
+ if (strncmp(opcode, insn->opcode, 2)) {
+ strncpy(opcode, insn->opcode, 2);
+ printf("\t/* %.2s */ \\\n", opcode);
+ }
+ print_opcode(insn, offset);
+ offset++;
+ }
+ printf("\t/* 1-byte opcode instructions */ \\\n");
+ for (i = 0; i < desc->nr; i++) {
+ insn = &desc->insn[i];
+ if (insn->type->byte != 0)
+ continue;
+ add_to_group(desc, insn, offset);
+ print_opcode(insn, offset);
+ offset++;
+ }
+ printf("}\n\n");
+}
+
+static void print_opcode_table_offsets(struct gen_opcode *desc)
+{
+ struct insn_group *group;
+ int i;
+
+ printf("#define OPCODE_OFFSET_INITIALIZER { \\\n");
+ for (i = 0; i < desc->nr_groups; i++) {
+ group = &desc->group[i];
+ printf("\t{ .opcode = 0x%.2s, .mask = 0x%02x, .byte = %d, .offset = %d, .count = %d }, \\\n",
+ group->opcode, group->type->mask, group->type->byte, group->offset, group->count);
+ }
+ printf("}\n\n");
+}
+
+int main(int argc, char **argv)
+{
+ struct gen_opcode _desc = { 0 };
+ struct gen_opcode *desc = &_desc;
+
+ read_instructions(desc);
+ printf("#ifndef __S390_GENERATED_DIS_H__\n");
+ printf("#define __S390_GENERATED_DIS_H__\n");
+ printf("/*\n");
+ printf(" * DO NOT MODIFY.\n");
+ printf(" *\n");
+ printf(" * This file was generated by %s\n", __FILE__);
+ printf(" */\n\n");
+ print_formats(desc);
+ print_long_insn(desc);
+ print_opcode_table(desc);
+ print_opcode_table_offsets(desc);
+ printf("#endif\n");
+ exit(EXIT_SUCCESS);
+}
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
new file mode 100644
index 000000000000..1cbed82cd17b
--- /dev/null
+++ b/arch/s390/tools/opcodes.txt
@@ -0,0 +1,1183 @@
+0101 pr E
+0102 upt E
+0104 ptff E
+0107 sckpf E
+010a pfpo E
+010b tam E
+010c sam24 E
+010d sam31 E
+010e sam64 E
+01ff trap2 E
+04 spm RR_R0
+05 balr RR_RR
+06 bctr RR_RR
+07 bcr RR_UR
+0a svc RR_U0
+0b bsm RR_RR
+0c bassm RR_RR
+0d basr RR_RR
+0e mvcl RR_RR
+0f clcl RR_RR
+10 lpr RR_RR
+11 lnr RR_RR
+12 ltr RR_RR
+13 lcr RR_RR
+14 nr RR_RR
+15 clr RR_RR
+16 or RR_RR
+17 xr RR_RR
+18 lr RR_RR
+19 cr RR_RR
+1a ar RR_RR
+1b sr RR_RR
+1c mr RR_RR
+1d dr RR_RR
+1e alr RR_RR
+1f slr RR_RR
+20 lpdr RR_FF
+21 lndr RR_FF
+22 ltdr RR_FF
+23 lcdr RR_FF
+24 hdr RR_FF
+25 ldxr RR_FF
+26 mxr RR_FF
+27 mxdr RR_FF
+28 ldr RR_FF
+29 cdr RR_FF
+2a adr RR_FF
+2b sdr RR_FF
+2c mdr RR_FF
+2d ddr RR_FF
+2e awr RR_FF
+2f swr RR_FF
+30 lper RR_FF
+31 lner RR_FF
+32 lter RR_FF
+33 lcer RR_FF
+34 her RR_FF
+35 ledr RR_FF
+36 axr RR_FF
+37 sxr RR_FF
+38 ler RR_FF
+39 cer RR_FF
+3a aer RR_FF
+3b ser RR_FF
+3c mder RR_FF
+3d der RR_FF
+3e aur RR_FF
+3f sur RR_FF
+40 sth RX_RRRD
+41 la RX_RRRD
+42 stc RX_RRRD
+43 ic RX_RRRD
+44 ex RX_RRRD
+45 bal RX_RRRD
+46 bct RX_RRRD
+47 bc RX_URRD
+48 lh RX_RRRD
+49 ch RX_RRRD
+4a ah RX_RRRD
+4b sh RX_RRRD
+4c mh RX_RRRD
+4d bas RX_RRRD
+4e cvd RX_RRRD
+4f cvb RX_RRRD
+50 st RX_RRRD
+51 lae RX_RRRD
+54 n RX_RRRD
+55 cl RX_RRRD
+56 o RX_RRRD
+57 x RX_RRRD
+58 l RX_RRRD
+59 c RX_RRRD
+5a a RX_RRRD
+5b s RX_RRRD
+5c m RX_RRRD
+5d d RX_RRRD
+5e al RX_RRRD
+5f sl RX_RRRD
+60 std RX_FRRD
+67 mxd RX_FRRD
+68 ld RX_FRRD
+69 cd RX_FRRD
+6a ad RX_FRRD
+6b sd RX_FRRD
+6c md RX_FRRD
+6d dd RX_FRRD
+6e aw RX_FRRD
+6f sw RX_FRRD
+70 ste RX_FRRD
+71 ms RX_RRRD
+78 le RX_FRRD
+79 ce RX_FRRD
+7a ae RX_FRRD
+7b se RX_FRRD
+7c mde RX_FRRD
+7d de RX_FRRD
+7e au RX_FRRD
+7f su RX_FRRD
+80 ssm SI_RD
+82 lpsw SI_RD
+83 diag RS_RRRD
+84 brxh RSI_RRP
+85 brxle RSI_RRP
+86 bxh RS_RRRD
+87 bxle RS_RRRD
+88 srl RS_R0RD
+89 sll RS_R0RD
+8a sra RS_R0RD
+8b sla RS_R0RD
+8c srdl RS_R0RD
+8d sldl RS_R0RD
+8e srda RS_R0RD
+8f slda RS_R0RD
+90 stm RS_RRRD
+91 tm SI_URD
+92 mvi SI_URD
+93 ts SI_RD
+94 ni SI_URD
+95 cli SI_URD
+96 oi SI_URD
+97 xi SI_URD
+98 lm RS_RRRD
+99 trace RS_RRRD
+9a lam RS_AARD
+9b stam RS_AARD
+a50 iihh RI_RU
+a51 iihl RI_RU
+a52 iilh RI_RU
+a53 iill RI_RU
+a54 nihh RI_RU
+a55 nihl RI_RU
+a56 nilh RI_RU
+a57 nill RI_RU
+a58 oihh RI_RU
+a59 oihl RI_RU
+a5a oilh RI_RU
+a5b oill RI_RU
+a5c llihh RI_RU
+a5d llihl RI_RU
+a5e llilh RI_RU
+a5f llill RI_RU
+a70 tmlh RI_RU
+a71 tmll RI_RU
+a72 tmhh RI_RU
+a73 tmhl RI_RU
+a74 brc RI_UP
+a75 bras RI_RP
+a76 brct RI_RP
+a77 brctg RI_RP
+a78 lhi RI_RI
+a79 lghi RI_RI
+a7a ahi RI_RI
+a7b aghi RI_RI
+a7c mhi RI_RI
+a7d mghi RI_RI
+a7e chi RI_RI
+a7f cghi RI_RI
+a8 mvcle RS_RRRD
+a9 clcle RS_RRRD
+aa0 rinext RI_RI
+aa1 rion RI_RI
+aa2 tric RI_RI
+aa3 rioff RI_RI
+aa4 riemit RI_RI
+ac stnsm SI_URD
+ad stosm SI_URD
+ae sigp RS_RRRD
+af mc SI_URD
+b1 lra RX_RRRD
+b202 stidp S_RD
+b204 sck S_RD
+b205 stck S_RD
+b206 sckc S_RD
+b207 stckc S_RD
+b208 spt S_RD
+b209 stpt S_RD
+b20a spka S_RD
+b20b ipk S_00
+b20d ptlb S_00
+b210 spx S_RD
+b211 stpx S_RD
+b212 stap S_RD
+b214 sie S_RD
+b218 pc S_RD
+b219 sac S_RD
+b21a cfc S_RD
+b220 servc RRE_RR
+b221 ipte RRF_RURR
+b222 ipm RRE_R0
+b223 ivsk RRE_RR
+b224 iac RRE_R0
+b225 ssar RRE_R0
+b226 epar RRE_R0
+b227 esar RRE_R0
+b228 pt RRE_RR
+b229 iske RRE_RR
+b22a rrbe RRE_RR
+b22b sske RRF_U0RR
+b22c tb RRE_RR
+b22d dxr RRE_FF
+b22e pgin RRE_RR
+b22f pgout RRE_RR
+b230 csch S_00
+b231 hsch S_00
+b232 msch S_RD
+b233 ssch S_RD
+b234 stsch S_RD
+b235 tsch S_RD
+b236 tpi S_RD
+b237 sal S_00
+b238 rsch S_00
+b239 stcrw S_RD
+b23a stcps S_RD
+b23b rchp S_00
+b23c schm S_00
+b240 bakr RRE_RR
+b241 cksm RRE_RR
+b244 sqdr RRE_FF
+b245 sqer RRE_FF
+b246 stura RRE_RR
+b247 msta RRE_R0
+b248 palb RRE_00
+b249 ereg RRE_RR
+b24a esta RRE_RR
+b24b lura RRE_RR
+b24c tar RRE_AR
+b24d cpya RRE_AA
+b24e sar RRE_AR
+b24f ear RRE_RA
+b250 csp RRE_RR
+b252 msr RRE_RR
+b254 mvpg RRE_RR
+b255 mvst RRE_RR
+b256 sthyi RRE_RR
+b257 cuse RRE_RR
+b258 bsg RRE_RR
+b25a bsa RRE_RR
+b25d clst RRE_RR
+b25e srst RRE_RR
+b263 cmpsc RRE_RR
+b274 siga S_RD
+b276 xsch S_00
+b277 rp S_RD
+b278 stcke S_RD
+b279 sacf S_RD
+b27c stckf S_RD
+b27d stsi S_RD
+b280 lpp S_RD
+b284 lcctl S_RD
+b285 lpctl S_RD
+b286 qsi S_RD
+b287 lsctl S_RD
+b28e qctri S_RD
+b299 srnm S_RD
+b29c stfpc S_RD
+b29d lfpc S_RD
+b2a5 tre RRE_RR
+b2a6 cu21 RRF_U0RR
+b2a7 cu12 RRF_U0RR
+b2b0 stfle S_RD
+b2b1 stfl S_RD
+b2b2 lpswe S_RD
+b2b8 srnmb S_RD
+b2b9 srnmt S_RD
+b2bd lfas S_RD
+b2e0 scctr RRE_RR
+b2e1 spctr RRE_RR
+b2e4 ecctr RRE_RR
+b2e5 epctr RRE_RR
+b2e8 ppa RRF_U0RR
+b2ec etnd RRE_R0
+b2ed ecpga RRE_RR
+b2f8 tend S_00
+b2fa niai IE_UU
+b2fc tabort S_RD
+b2ff trap4 S_RD
+b300 lpebr RRE_FF
+b301 lnebr RRE_FF
+b302 ltebr RRE_FF
+b303 lcebr RRE_FF
+b304 ldebr RRE_FF
+b305 lxdbr RRE_FF
+b306 lxebr RRE_FF
+b307 mxdbr RRE_FF
+b308 kebr RRE_FF
+b309 cebr RRE_FF
+b30a aebr RRE_FF
+b30b sebr RRE_FF
+b30c mdebr RRE_FF
+b30d debr RRE_FF
+b30e maebr RRF_F0FF
+b30f msebr RRF_F0FF
+b310 lpdbr RRE_FF
+b311 lndbr RRE_FF
+b312 ltdbr RRE_FF
+b313 lcdbr RRE_FF
+b314 sqebr RRE_FF
+b315 sqdbr RRE_FF
+b316 sqxbr RRE_FF
+b317 meebr RRE_FF
+b318 kdbr RRE_FF
+b319 cdbr RRE_FF
+b31a adbr RRE_FF
+b31b sdbr RRE_FF
+b31c mdbr RRE_FF
+b31d ddbr RRE_FF
+b31e madbr RRF_F0FF
+b31f msdbr RRF_F0FF
+b324 lder RRE_FF
+b325 lxdr RRE_FF
+b326 lxer RRE_FF
+b32e maer RRF_F0FF
+b32f mser RRF_F0FF
+b336 sqxr RRE_FF
+b337 meer RRE_FF
+b338 maylr RRF_F0FF
+b339 mylr RRF_F0FF
+b33a mayr RRF_F0FF
+b33b myr RRF_F0FF
+b33c mayhr RRF_F0FF
+b33d myhr RRF_F0FF
+b33e madr RRF_F0FF
+b33f msdr RRF_F0FF
+b340 lpxbr RRE_FF
+b341 lnxbr RRE_FF
+b342 ltxbr RRE_FF
+b343 lcxbr RRE_FF
+b344 ledbra RRF_UUFF
+b345 ldxbra RRF_UUFF
+b346 lexbra RRF_UUFF
+b347 fixbra RRF_UUFF
+b348 kxbr RRE_FF
+b349 cxbr RRE_FF
+b34a axbr RRE_FF
+b34b sxbr RRE_FF
+b34c mxbr RRE_FF
+b34d dxbr RRE_FF
+b350 tbedr RRF_U0FF
+b351 tbdr RRF_U0FF
+b353 diebr RRF_FUFF
+b357 fiebra RRF_UUFF
+b358 thder RRE_FF
+b359 thdr RRE_FF
+b35b didbr RRF_FUFF
+b35f fidbra RRF_UUFF
+b360 lpxr RRE_FF
+b361 lnxr RRE_FF
+b362 ltxr RRE_FF
+b363 lcxr RRE_FF
+b365 lxr RRE_FF
+b366 lexr RRE_FF
+b367 fixr RRE_FF
+b369 cxr RRE_FF
+b370 lpdfr RRE_FF
+b371 lndfr RRE_FF
+b372 cpsdr RRF_F0FF2
+b373 lcdfr RRE_FF
+b374 lzer RRE_F0
+b375 lzdr RRE_F0
+b376 lzxr RRE_F0
+b377 fier RRE_FF
+b37f fidr RRE_FF
+b384 sfpc RRE_RR
+b385 sfasr RRE_R0
+b38c efpc RRE_RR
+b390 celfbr RRF_UUFR
+b391 cdlfbr RRF_UUFR
+b392 cxlfbr RRF_UUFR
+b394 cefbra RRF_UUFR
+b395 cdfbra RRF_UUFR
+b396 cxfbra RRF_UUFR
+b398 cfebra RRF_UURF
+b399 cfdbra RRF_UURF
+b39a cfxbra RRF_UURF
+b39c clfebr RRF_UURF
+b39d clfdbr RRF_UURF
+b39e clfxbr RRF_UURF
+b3a0 celgbr RRF_UUFR
+b3a1 cdlgbr RRF_UUFR
+b3a2 cxlgbr RRF_UUFR
+b3a4 cegbra RRF_UUFR
+b3a5 cdgbra RRF_UUFR
+b3a6 cxgbra RRF_UUFR
+b3a8 cgebra RRF_UURF
+b3a9 cgdbra RRF_UURF
+b3aa cgxbra RRF_UURF
+b3ac clgebr RRF_UURF
+b3ad clgdbr RRF_UURF
+b3ae clgxbr RRF_UURF
+b3b4 cefr RRE_FR
+b3b5 cdfr RRE_FR
+b3b6 cxfr RRE_FR
+b3b8 cfer RRF_U0RF
+b3b9 cfdr RRF_U0RF
+b3ba cfxr RRF_U0RF
+b3c1 ldgr RRE_FR
+b3c4 cegr RRE_FR
+b3c5 cdgr RRE_FR
+b3c6 cxgr RRE_FR
+b3c8 cger RRF_U0RF
+b3c9 cgdr RRF_U0RF
+b3ca cgxr RRF_U0RF
+b3cd lgdr RRE_RF
+b3d0 mdtra RRF_FUFF2
+b3d1 ddtra RRF_FUFF2
+b3d2 adtra RRF_FUFF2
+b3d3 sdtra RRF_FUFF2
+b3d4 ldetr RRF_0UFF
+b3d5 ledtr RRF_UUFF
+b3d6 ltdtr RRE_FF
+b3d7 fidtr RRF_UUFF
+b3d8 mxtra RRF_FUFF2
+b3d9 dxtra RRF_FUFF2
+b3da axtra RRF_FUFF2
+b3db sxtra RRF_FUFF2
+b3dc lxdtr RRF_0UFF
+b3dd ldxtr RRF_UUFF
+b3de ltxtr RRE_FF
+b3df fixtr RRF_UUFF
+b3e0 kdtr RRE_FF
+b3e1 cgdtra RRF_UURF
+b3e2 cudtr RRE_RF
+b3e3 csdtr RRF_0URF
+b3e4 cdtr RRE_FF
+b3e5 eedtr RRE_RF
+b3e7 esdtr RRE_RF
+b3e8 kxtr RRE_FF
+b3e9 cgxtra RRF_UURF
+b3ea cuxtr RRE_RF
+b3eb csxtr RRF_0URF
+b3ec cxtr RRE_FF
+b3ed eextr RRE_RF
+b3ef esxtr RRE_RF
+b3f1 cdgtra RRF_UUFR
+b3f2 cdutr RRE_FR
+b3f3 cdstr RRE_FR
+b3f4 cedtr RRE_FF
+b3f5 qadtr RRF_FUFF
+b3f6 iedtr RRF_F0FR
+b3f7 rrdtr RRF_FFRU
+b3f9 cxgtra RRF_UUFR
+b3fa cxutr RRE_FR
+b3fb cxstr RRE_FR
+b3fc cextr RRE_FF
+b3fd qaxtr RRF_FUFF
+b3fe iextr RRF_F0FR
+b3ff rrxtr RRF_FFRU
+b6 stctl RS_CCRD
+b7 lctl RS_CCRD
+b900 lpgr RRE_RR
+b901 lngr RRE_RR
+b902 ltgr RRE_RR
+b903 lcgr RRE_RR
+b904 lgr RRE_RR
+b905 lurag RRE_RR
+b906 lgbr RRE_RR
+b907 lghr RRE_RR
+b908 agr RRE_RR
+b909 sgr RRE_RR
+b90a algr RRE_RR
+b90b slgr RRE_RR
+b90c msgr RRE_RR
+b90d dsgr RRE_RR
+b90e eregg RRE_RR
+b90f lrvgr RRE_RR
+b910 lpgfr RRE_RR
+b911 lngfr RRE_RR
+b912 ltgfr RRE_RR
+b913 lcgfr RRE_RR
+b914 lgfr RRE_RR
+b916 llgfr RRE_RR
+b917 llgtr RRE_RR
+b918 agfr RRE_RR
+b919 sgfr RRE_RR
+b91a algfr RRE_RR
+b91b slgfr RRE_RR
+b91c msgfr RRE_RR
+b91d dsgfr RRE_RR
+b91e kmac RRE_RR
+b91f lrvr RRE_RR
+b920 cgr RRE_RR
+b921 clgr RRE_RR
+b925 sturg RRE_RR
+b926 lbr RRE_RR
+b927 lhr RRE_RR
+b928 pckmo RRE_00
+b929 kma RRF_R0RR
+b92a kmf RRE_RR
+b92b kmo RRE_RR
+b92c pcc RRE_00
+b92d kmctr RRF_R0RR
+b92e km RRE_RR
+b92f kmc RRE_RR
+b930 cgfr RRE_RR
+b931 clgfr RRE_RR
+b93c ppno RRE_RR
+b93e kimd RRE_RR
+b93f klmd RRE_RR
+b941 cfdtr RRF_UURF
+b942 clgdtr RRF_UURF
+b943 clfdtr RRF_UURF
+b946 bctgr RRE_RR
+b949 cfxtr RRF_UURF
+b94a clgxtr RRF_UURF
+b94b clfxtr RRF_UURF
+b951 cdftr RRF_UUFR
+b952 cdlgtr RRF_UUFR
+b953 cdlftr RRF_UUFR
+b959 cxftr RRF_UUFR
+b95a cxlgtr RRF_UUFR
+b95b cxlftr RRF_UUFR
+b960 cgrt RRF_U0RR
+b961 clgrt RRF_U0RR
+b972 crt RRF_U0RR
+b973 clrt RRF_U0RR
+b980 ngr RRE_RR
+b981 ogr RRE_RR
+b982 xgr RRE_RR
+b983 flogr RRE_RR
+b984 llgcr RRE_RR
+b985 llghr RRE_RR
+b986 mlgr RRE_RR
+b987 dlgr RRE_RR
+b988 alcgr RRE_RR
+b989 slbgr RRE_RR
+b98a cspg RRE_RR
+b98d epsw RRE_RR
+b98e idte RRF_RURR2
+b98f crdte RRF_RURR2
+b990 trtt RRF_U0RR
+b991 trto RRF_U0RR
+b992 trot RRF_U0RR
+b993 troo RRF_U0RR
+b994 llcr RRE_RR
+b995 llhr RRE_RR
+b996 mlr RRE_RR
+b997 dlr RRE_RR
+b998 alcr RRE_RR
+b999 slbr RRE_RR
+b99a epair RRE_R0
+b99b esair RRE_R0
+b99d esea RRE_R0
+b99e pti RRE_RR
+b99f ssair RRE_R0
+b9a1 tpei RRE_RR
+b9a2 ptf RRE_R0
+b9aa lptea RRF_RURR2
+b9ac irbm RRE_RR
+b9ae rrbm RRE_RR
+b9af pfmf RRE_RR
+b9b0 cu14 RRF_U0RR
+b9b1 cu24 RRF_U0RR
+b9b2 cu41 RRE_RR
+b9b3 cu42 RRE_RR
+b9bd trtre RRF_U0RR
+b9be srstu RRE_RR
+b9bf trte RRF_U0RR
+b9c8 ahhhr RRF_R0RR2
+b9c9 shhhr RRF_R0RR2
+b9ca alhhhr RRF_R0RR2
+b9cb slhhhr RRF_R0RR2
+b9cd chhr RRE_RR
+b9cf clhhr RRE_RR
+b9d0 pcistg RRE_RR
+b9d2 pcilg RRE_RR
+b9d3 rpcit RRE_RR
+b9d8 ahhlr RRF_R0RR2
+b9d9 shhlr RRF_R0RR2
+b9da alhhlr RRF_R0RR2
+b9db slhhlr RRF_R0RR2
+b9dd chlr RRE_RR
+b9df clhlr RRE_RR
+b9e0 locfhr RRF_U0RR
+b9e1 popcnt RRE_RR
+b9e2 locgr RRF_U0RR
+b9e4 ngrk RRF_R0RR2
+b9e6 ogrk RRF_R0RR2
+b9e7 xgrk RRF_R0RR2
+b9e8 agrk RRF_R0RR2
+b9e9 sgrk RRF_R0RR2
+b9ea algrk RRF_R0RR2
+b9eb slgrk RRF_R0RR2
+b9ec mgrk RRF_R0RR2
+b9ed msgrkc RRF_R0RR2
+b9f2 locr RRF_U0RR
+b9f4 nrk RRF_R0RR2
+b9f6 ork RRF_R0RR2
+b9f7 xrk RRF_R0RR2
+b9f8 ark RRF_R0RR2
+b9f9 srk RRF_R0RR2
+b9fa alrk RRF_R0RR2
+b9fb slrk RRF_R0RR2
+b9fd msrkc RRF_R0RR2
+ba cs RS_RRRD
+bb cds RS_RRRD
+bd clm RS_RURD
+be stcm RS_RURD
+bf icm RS_RURD
+c00 larl RIL_RP
+c01 lgfi RIL_RI
+c04 brcl RIL_UP
+c05 brasl RIL_RP
+c06 xihf RIL_RU
+c07 xilf RIL_RU
+c08 iihf RIL_RU
+c09 iilf RIL_RU
+c0a nihf RIL_RU
+c0b nilf RIL_RU
+c0c oihf RIL_RU
+c0d oilf RIL_RU
+c0e llihf RIL_RU
+c0f llilf RIL_RU
+c20 msgfi RIL_RI
+c21 msfi RIL_RI
+c24 slgfi RIL_RU
+c25 slfi RIL_RU
+c28 agfi RIL_RI
+c29 afi RIL_RI
+c2a algfi RIL_RU
+c2b alfi RIL_RU
+c2c cgfi RIL_RI
+c2d cfi RIL_RI
+c2e clgfi RIL_RU
+c2f clfi RIL_RU
+c42 llhrl RIL_RP
+c44 lghrl RIL_RP
+c45 lhrl RIL_RP
+c46 llghrl RIL_RP
+c47 sthrl RIL_RP
+c48 lgrl RIL_RP
+c4b stgrl RIL_RP
+c4c lgfrl RIL_RP
+c4d lrl RIL_RP
+c4e llgfrl RIL_RP
+c4f strl RIL_RP
+c5 bprp MII_UPP
+c60 exrl RIL_RP
+c62 pfdrl RIL_UP
+c64 cghrl RIL_RP
+c65 chrl RIL_RP
+c66 clghrl RIL_RP
+c67 clhrl RIL_RP
+c68 cgrl RIL_RP
+c6a clgrl RIL_RP
+c6c cgfrl RIL_RP
+c6d crl RIL_RP
+c6e clgfrl RIL_RP
+c6f clrl RIL_RP
+c7 bpp SMI_U0RDP
+c80 mvcos SSF_RRDRD
+c81 ectg SSF_RRDRD
+c82 csst SSF_RRDRD
+c84 lpd SSF_RRDRD2
+c85 lpdg SSF_RRDRD2
+cc6 brcth RIL_RP
+cc8 aih RIL_RI
+cca alsih RIL_RI
+ccb alsihn RIL_RI
+ccd cih RIL_RI
+ccf clih RIL_RU
+d0 trtr SS_L0RDRD
+d1 mvn SS_L0RDRD
+d2 mvc SS_L0RDRD
+d3 mvz SS_L0RDRD
+d4 nc SS_L0RDRD
+d5 clc SS_L0RDRD
+d6 oc SS_L0RDRD
+d7 xc SS_L0RDRD
+d9 mvck SS_RRRDRD
+da mvcp SS_RRRDRD
+db mvcs SS_RRRDRD
+dc tr SS_L0RDRD
+dd trt SS_L0RDRD
+de ed SS_L0RDRD
+df edmk SS_L0RDRD
+e1 pku SS_L2RDRD
+e2 unpku SS_L0RDRD
+e302 ltg RXY_RRRD
+e303 lrag RXY_RRRD
+e304 lg RXY_RRRD
+e306 cvby RXY_RRRD
+e308 ag RXY_RRRD
+e309 sg RXY_RRRD
+e30a alg RXY_RRRD
+e30b slg RXY_RRRD
+e30c msg RXY_RRRD
+e30d dsg RXY_RRRD
+e30e cvbg RXY_RRRD
+e30f lrvg RXY_RRRD
+e312 lt RXY_RRRD
+e313 lray RXY_RRRD
+e314 lgf RXY_RRRD
+e315 lgh RXY_RRRD
+e316 llgf RXY_RRRD
+e317 llgt RXY_RRRD
+e318 agf RXY_RRRD
+e319 sgf RXY_RRRD
+e31a algf RXY_RRRD
+e31b slgf RXY_RRRD
+e31c msgf RXY_RRRD
+e31d dsgf RXY_RRRD
+e31e lrv RXY_RRRD
+e31f lrvh RXY_RRRD
+e320 cg RXY_RRRD
+e321 clg RXY_RRRD
+e324 stg RXY_RRRD
+e325 ntstg RXY_RRRD
+e326 cvdy RXY_RRRD
+e32a lzrg RXY_RRRD
+e32e cvdg RXY_RRRD
+e32f strvg RXY_RRRD
+e330 cgf RXY_RRRD
+e331 clgf RXY_RRRD
+e332 ltgf RXY_RRRD
+e334 cgh RXY_RRRD
+e336 pfd RXY_URRD
+e338 agh RXY_RRRD
+e339 sgh RXY_RRRD
+e33a llzrgf RXY_RRRD
+e33b lzrf RXY_RRRD
+e33c mgh RXY_RRRD
+e33e strv RXY_RRRD
+e33f strvh RXY_RRRD
+e346 bctg RXY_RRRD
+e347 bic RXY_URRD
+e348 llgfsg RXY_RRRD
+e349 stgsc RXY_RRRD
+e34c lgg RXY_RRRD
+e34d lgsc RXY_RRRD
+e350 sty RXY_RRRD
+e351 msy RXY_RRRD
+e353 msc RXY_RRRD
+e354 ny RXY_RRRD
+e355 cly RXY_RRRD
+e356 oy RXY_RRRD
+e357 xy RXY_RRRD
+e358 ly RXY_RRRD
+e359 cy RXY_RRRD
+e35a ay RXY_RRRD
+e35b sy RXY_RRRD
+e35c mfy RXY_RRRD
+e35e aly RXY_RRRD
+e35f sly RXY_RRRD
+e370 sthy RXY_RRRD
+e371 lay RXY_RRRD
+e372 stcy RXY_RRRD
+e373 icy RXY_RRRD
+e375 laey RXY_RRRD
+e376 lb RXY_RRRD
+e377 lgb RXY_RRRD
+e378 lhy RXY_RRRD
+e379 chy RXY_RRRD
+e37a ahy RXY_RRRD
+e37b shy RXY_RRRD
+e37c mhy RXY_RRRD
+e380 ng RXY_RRRD
+e381 og RXY_RRRD
+e382 xg RXY_RRRD
+e383 msgc RXY_RRRD
+e384 mg RXY_RRRD
+e385 lgat RXY_RRRD
+e386 mlg RXY_RRRD
+e387 dlg RXY_RRRD
+e388 alcg RXY_RRRD
+e389 slbg RXY_RRRD
+e38e stpq RXY_RRRD
+e38f lpq RXY_RRRD
+e390 llgc RXY_RRRD
+e391 llgh RXY_RRRD
+e394 llc RXY_RRRD
+e395 llh RXY_RRRD
+e396 ml RXY_RRRD
+e397 dl RXY_RRRD
+e398 alc RXY_RRRD
+e399 slb RXY_RRRD
+e39c llgtat RXY_RRRD
+e39d llgfat RXY_RRRD
+e39f lat RXY_RRRD
+e3c0 lbh RXY_RRRD
+e3c2 llch RXY_RRRD
+e3c3 stch RXY_RRRD
+e3c4 lhh RXY_RRRD
+e3c6 llhh RXY_RRRD
+e3c7 sthh RXY_RRRD
+e3c8 lfhat RXY_RRRD
+e3ca lfh RXY_RRRD
+e3cb stfh RXY_RRRD
+e3cd chf RXY_RRRD
+e3cf clhf RXY_RRRD
+e3d0 mpcifc RXY_RRRD
+e3d4 stpcifc RXY_RRRD
+e500 lasp SSE_RDRD
+e501 tprot SSE_RDRD
+e502 strag SSE_RDRD
+e50e mvcsk SSE_RDRD
+e50f mvcdk SSE_RDRD
+e544 mvhhi SIL_RDI
+e548 mvghi SIL_RDI
+e54c mvhi SIL_RDI
+e554 chhsi SIL_RDI
+e555 clhhsi SIL_RDU
+e558 cghsi SIL_RDI
+e559 clghsi SIL_RDU
+e55c chsi SIL_RDI
+e55d clfhsi SIL_RDU
+e560 tbegin SIL_RDU
+e561 tbeginc SIL_RDU
+e634 vpkz VSI_URDV
+e635 vlrl VSI_URDV
+e637 vlrlr VRS_RRDV
+e63c vupkz VSI_URDV
+e63d vstrl VSI_URDV
+e63f vstrlr VRS_RRDV
+e649 vlip VRI_V0UU2
+e650 vcvb VRR_RV0U
+e652 vcvbg VRR_RV0U
+e658 vcvd VRI_VR0UU
+e659 vsrp VRI_VVUUU2
+e65a vcvdg VRI_VR0UU
+e65b vpsop VRI_VVUUU2
+e65f vtp VRR_0V
+e671 vap VRI_VVV0UU2
+e673 vsp VRI_VVV0UU2
+e677 vcp VRR_0VV0U
+e678 vmp VRI_VVV0UU2
+e679 vmsp VRI_VVV0UU2
+e67a vdp VRI_VVV0UU2
+e67b vrp VRI_VVV0UU2
+e67e vsdp VRI_VVV0UU2
+e700 vleb VRX_VRRDU
+e701 vleh VRX_VRRDU
+e702 vleg VRX_VRRDU
+e703 vlef VRX_VRRDU
+e704 vllez VRX_VRRDU
+e705 vlrep VRX_VRRDU
+e706 vl VRX_VRRD
+e707 vlbb VRX_VRRDU
+e708 vsteb VRX_VRRDU
+e709 vsteh VRX_VRRDU
+e70a vsteg VRX_VRRDU
+e70b vstef VRX_VRRDU
+e70e vst VRX_VRRD
+e712 vgeg VRV_VVXRDU
+e713 vgef VRV_VVXRDU
+e71a vsceg VRV_VVXRDU
+e71b vscef VRV_VVXRDU
+e721 vlgv VRS_RVRDU
+e722 vlvg VRS_VRRDU
+e727 lcbb RXE_RRRDU
+e730 vesl VRS_VVRDU
+e733 verll VRS_VVRDU
+e736 vlm VRS_VVRD
+e737 vll VRS_VRRD
+e738 vesrl VRS_VVRDU
+e73a vesra VRS_VVRDU
+e73e vstm VRS_VVRD
+e73f vstl VRS_VRRD
+e740 vleib VRI_V0IU
+e741 vleih VRI_V0IU
+e742 vleig VRI_V0IU
+e743 vleif VRI_V0IU
+e744 vgbm VRI_V0U
+e745 vrepi VRI_V0IU
+e746 vgm VRI_V0UUU
+e74a vftci VRI_VVUUU
+e74d vrep VRI_VVUU
+e750 vpopct VRR_VV0U
+e752 vctz VRR_VV0U
+e753 vclz VRR_VV0U
+e756 vlr VRX_VV
+e75c vistr VRR_VV0U0U
+e75f vseg VRR_VV0U
+e760 vmrl VRR_VVV0U
+e761 vmrh VRR_VVV0U
+e762 vlvgp VRR_VRR
+e764 vsum VRR_VVV0U
+e765 vsumg VRR_VVV0U
+e766 vcksm VRR_VVV
+e767 vsumq VRR_VVV0U
+e768 vn VRR_VVV
+e769 vnc VRR_VVV
+e76a vo VRR_VVV
+e76b vno VRR_VVV
+e76c vnx VRR_VVV
+e76d vx VRR_VVV
+e76e vnn VRR_VVV
+e76f voc VRR_VVV
+e770 veslv VRR_VVV0U
+e772 verim VRI_VVV0UU
+e773 verllv VRR_VVV0U
+e774 vsl VRR_VVV
+e775 vslb VRR_VVV
+e777 vsldb VRI_VVV0U
+e778 vesrlv VRR_VVV0U
+e77a vesrav VRR_VVV0U
+e77c vsrl VRR_VVV
+e77d vsrlb VRR_VVV
+e77e vsra VRR_VVV
+e77f vsrab VRR_VVV
+e780 vfee VRR_VVV0U0U
+e781 vfene VRR_VVV0U0U
+e782 vfae VRR_VVV0U0U
+e784 vpdi VRR_VVV0U
+e785 vbperm VRR_VVV
+e78a vstrc VRR_VVVUU0V
+e78c vperm VRR_VVV0V
+e78d vsel VRR_VVV0V
+e78e vfms VRR_VVVU0UV
+e78f vfma VRR_VVVU0UV
+e794 vpk VRR_VVV0U
+e795 vpkls VRR_VVV0U0U
+e797 vpks VRR_VVV0U0U
+e79e vfnms VRR_VVVU0UV
+e79f vfnma VRR_VVVU0UV
+e7a1 vmlh VRR_VVV0U
+e7a2 vml VRR_VVV0U
+e7a3 vmh VRR_VVV0U
+e7a4 vmle VRR_VVV0U
+e7a5 vmlo VRR_VVV0U
+e7a6 vme VRR_VVV0U
+e7a7 vmo VRR_VVV0U
+e7a9 vmalh VRR_VVVU0V
+e7aa vmal VRR_VVVU0V
+e7ab vmah VRR_VVVU0V
+e7ac vmale VRR_VVVU0V
+e7ad vmalo VRR_VVVU0V
+e7ae vmae VRR_VVVU0V
+e7af vmao VRR_VVVU0V
+e7b4 vgfm VRR_VVV0U
+e7b8 vmsl VRR_VVVUU0V
+e7b9 vaccc VRR_VVVU0V
+e7bb vac VRR_VVVU0V
+e7bc vgfma VRR_VVVU0V
+e7bd vsbcbi VRR_VVVU0V
+e7bf vsbi VRR_VVVU0V
+e7c0 vclgd VRR_VV0UUU
+e7c1 vcdlg VRR_VV0UUU
+e7c2 vcgd VRR_VV0UUU
+e7c3 vcdg VRR_VV0UUU
+e7c4 vlde VRR_VV0UU2
+e7c5 vled VRR_VV0UUU
+e7c7 vfi VRR_VV0UUU
+e7ca wfk VRR_VV0UU2
+e7cb wfc VRR_VV0UU2
+e7cc vfpso VRR_VV0UUU
+e7ce vfsq VRR_VV0UU2
+e7d4 vupll VRR_VV0U
+e7d5 vuplh VRR_VV0U
+e7d6 vupl VRR_VV0U
+e7d7 vuph VRR_VV0U
+e7d8 vtm VRR_VV
+e7d9 vecl VRR_VV0U
+e7db vec VRR_VV0U
+e7de vlc VRR_VV0U
+e7df vlp VRR_VV0U
+e7e2 vfs VRR_VVV0UU
+e7e3 vfa VRR_VVV0UU
+e7e5 vfd VRR_VVV0UU
+e7e7 vfm VRR_VVV0UU
+e7e8 vfce VRR_VVV0UUU
+e7ea vfche VRR_VVV0UUU
+e7eb vfch VRR_VVV0UUU
+e7ee vfmin VRR_VVV0UUU
+e7ef vfmax VRR_VVV0UUU
+e7f0 vavgl VRR_VVV0U
+e7f1 vacc VRR_VVV0U
+e7f2 vavg VRR_VVV0U
+e7f3 va VRR_VVV0U
+e7f5 vscbi VRR_VVV0U
+e7f7 vs VRR_VVV0U
+e7f8 vceq VRR_VVV0U0U
+e7f9 vchl VRR_VVV0U0U
+e7fb vch VRR_VVV0U0U
+e7fc vmnl VRR_VVV0U
+e7fd vmxl VRR_VVV0U
+e7fe vmn VRR_VVV0U
+e7ff vmx VRR_VVV0U
+e8 mvcin SS_L0RDRD
+e9 pka SS_L2RDRD
+ea unpka SS_L0RDRD
+eb04 lmg RSY_RRRD
+eb0a srag RSY_RRRD
+eb0b slag RSY_RRRD
+eb0c srlg RSY_RRRD
+eb0d sllg RSY_RRRD
+eb0f tracg RSY_RRRD
+eb14 csy RSY_RRRD
+eb17 stcctm RSY_RURD
+eb1c rllg RSY_RRRD
+eb1d rll RSY_RRRD
+eb20 clmh RSY_RURD
+eb21 clmy RSY_RURD
+eb23 clt RSY_RURD
+eb24 stmg RSY_RRRD
+eb25 stctg RSY_CCRD
+eb26 stmh RSY_RRRD
+eb2b clgt RSY_RURD
+eb2c stcmh RSY_RURD
+eb2d stcmy RSY_RURD
+eb2f lctlg RSY_CCRD
+eb30 csg RSY_RRRD
+eb31 cdsy RSY_RRRD
+eb3e cdsg RSY_RRRD
+eb44 bxhg RSY_RRRD
+eb45 bxleg RSY_RRRD
+eb4c ecag RSY_RRRD
+eb51 tmy SIY_URD
+eb52 mviy SIY_URD
+eb54 niy SIY_URD
+eb55 cliy SIY_URD
+eb56 oiy SIY_URD
+eb57 xiy SIY_URD
+eb60 lric RSY_RDRU
+eb61 stric RSY_RDRU
+eb62 mric RSY_RDRU
+eb6a asi SIY_IRD
+eb6e alsi SIY_IRD
+eb7a agsi SIY_IRD
+eb7e algsi SIY_IRD
+eb80 icmh RSY_RURD
+eb81 icmy RSY_RURD
+eb8e mvclu RSY_RRRD
+eb8f clclu RSY_RRRD
+eb90 stmy RSY_RRRD
+eb96 lmh RSY_RRRD
+eb98 lmy RSY_RRRD
+eb9a lamy RSY_AARD
+eb9b stamy RSY_AARD
+ebc0 tp RSL_R0RD
+ebd0 pcistb RSY_RRRD
+ebd1 sic RSY_RRRD
+ebdc srak RSY_RRRD
+ebdd slak RSY_RRRD
+ebde srlk RSY_RRRD
+ebdf sllk RSY_RRRD
+ebe0 locfh RSY_RURD2
+ebe1 stocfh RSY_RURD2
+ebe2 locg RSY_RURD2
+ebe3 stocg RSY_RURD2
+ebe4 lang RSY_RRRD
+ebe6 laog RSY_RRRD
+ebe7 laxg RSY_RRRD
+ebe8 laag RSY_RRRD
+ebea laalg RSY_RRRD
+ebf2 loc RSY_RURD2
+ebf3 stoc RSY_RURD2
+ebf4 lan RSY_RRRD
+ebf6 lao RSY_RRRD
+ebf7 lax RSY_RRRD
+ebf8 laa RSY_RRRD
+ebfa laal RSY_RRRD
+ec42 lochi RIE_RUI0
+ec44 brxhg RIE_RRP
+ec45 brxlg RIE_RRP
+ec46 locghi RIE_RUI0
+ec4e lochhi RIE_RUI0
+ec51 risblg RIE_RRUUU
+ec54 rnsbg RIE_RRUUU
+ec55 risbg RIE_RRUUU
+ec56 rosbg RIE_RRUUU
+ec57 rxsbg RIE_RRUUU
+ec59 risbgn RIE_RRUUU
+ec5d risbhg RIE_RRUUU
+ec64 cgrj RIE_RRPU
+ec65 clgrj RIE_RRPU
+ec70 cgit RIE_R0IU
+ec71 clgit RIE_R0UU
+ec72 cit RIE_R0IU
+ec73 clfit RIE_R0UU
+ec76 crj RIE_RRPU
+ec77 clrj RIE_RRPU
+ec7c cgij RIE_RUPI
+ec7d clgij RIE_RUPU
+ec7e cij RIE_RUPI
+ec7f clij RIE_RUPU
+ecd8 ahik RIE_RRI0
+ecd9 aghik RIE_RRI0
+ecda alhsik RIE_RRI0
+ecdb alghsik RIE_RRI0
+ece4 cgrb RRS_RRRDU
+ece5 clgrb RRS_RRRDU
+ecf6 crb RRS_RRRDU
+ecf7 clrb RRS_RRRDU
+ecfc cgib RIS_RURDI
+ecfd clgib RIS_RURDU
+ecfe cib RIS_RURDI
+ecff clib RIS_RURDU
+ed04 ldeb RXE_FRRD
+ed05 lxdb RXE_FRRD
+ed06 lxeb RXE_FRRD
+ed07 mxdb RXE_FRRD
+ed08 keb RXE_FRRD
+ed09 ceb RXE_FRRD
+ed0a aeb RXE_FRRD
+ed0b seb RXE_FRRD
+ed0c mdeb RXE_FRRD
+ed0d deb RXE_FRRD
+ed0e maeb RXF_FRRDF
+ed0f mseb RXF_FRRDF
+ed10 tceb RXE_FRRD
+ed11 tcdb RXE_FRRD
+ed12 tcxb RXE_FRRD
+ed14 sqeb RXE_FRRD
+ed15 sqdb RXE_FRRD
+ed17 meeb RXE_FRRD
+ed18 kdb RXE_FRRD
+ed19 cdb RXE_FRRD
+ed1a adb RXE_FRRD
+ed1b sdb RXE_FRRD
+ed1c mdb RXE_FRRD
+ed1d ddb RXE_FRRD
+ed1e madb RXF_FRRDF
+ed1f msdb RXF_FRRDF
+ed24 lde RXE_FRRD
+ed25 lxd RXE_FRRD
+ed26 lxe RXE_FRRD
+ed2e mae RXF_FRRDF
+ed2f mse RXF_FRRDF
+ed34 sqe RXE_FRRD
+ed35 sqd RXE_FRRD
+ed37 mee RXE_FRRD
+ed38 mayl RXF_FRRDF
+ed39 myl RXF_FRRDF
+ed3a may RXF_FRRDF
+ed3b my RXF_FRRDF
+ed3c mayh RXF_FRRDF
+ed3d myh RXF_FRRDF
+ed3e mad RXF_FRRDF
+ed3f msd RXF_FRRDF
+ed40 sldt RXF_FRRDF
+ed41 srdt RXF_FRRDF
+ed48 slxt RXF_FRRDF
+ed49 srxt RXF_FRRDF
+ed50 tdcet RXE_FRRD
+ed51 tdget RXE_FRRD
+ed54 tdcdt RXE_FRRD
+ed55 tdgdt RXE_FRRD
+ed58 tdcxt RXE_FRRD
+ed59 tdgxt RXE_FRRD
+ed64 ley RXY_FRRD
+ed65 ldy RXY_FRRD
+ed66 stey RXY_FRRD
+ed67 stdy RXY_FRRD
+eda8 czdt RSL_LRDFU
+eda9 czxt RSL_LRDFU
+edaa cdzt RSL_LRDFU
+edab cxzt RSL_LRDFU
+edac cpdt RSL_LRDFU
+edad cpxt RSL_LRDFU
+edae cdpt RSL_LRDFU
+edaf cxpt RSL_LRDFU
+ee plo SS_RRRDRD2
+ef lmd SS_RRRDRD3
+f0 srp SS_LIRDRD
+f1 mvo SS_LLRDRD
+f2 pack SS_LLRDRD
+f3 unpk SS_LLRDRD
+f8 zap SS_LLRDRD
+f9 cp SS_LLRDRD
+fa ap SS_LLRDRD
+fb sp SS_LLRDRD
+fc mp SS_LLRDRD
+fd dp SS_LLRDRD
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
index e5ce3a0de7f4..715def00a436 100644
--- a/arch/sh/boot/dts/Makefile
+++ b/arch/sh/boot/dts/Makefile
@@ -1,3 +1 @@
obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
-
-clean-files := *.dtb.S
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 68c1536b3aab..41167931e5d9 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -10,10 +10,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir);
-
-/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs);
@@ -21,4 +17,7 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs);
+void sh_sync_dma_for_device(void *vaddr, size_t size,
+ enum dma_data_direction dir);
+
#endif /* __ASM_SH_DMA_MAPPING_H */
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 53f7ae6abaa7..0033f0df2b3b 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -64,13 +64,9 @@ extern int pci_is_66mhz_capable(struct pci_channel *hose,
extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
-struct pci_dev;
-
#define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
-extern void pcibios_set_master(struct pci_dev *dev);
-
/* Dynamic DMA mapping stuff.
* SuperH has everything mapped statically like x86.
*/
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h
index 5ed7dbbd94ff..270ee4d3e25b 100644
--- a/arch/sh/include/asm/spinlock-cas.h
+++ b/arch/sh/include/asm/spinlock-cas.h
@@ -27,7 +27,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
@@ -53,18 +52,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned old;
@@ -102,11 +89,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SH_SPINLOCK_CAS_H */
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
index f77263aae760..715595de286a 100644
--- a/arch/sh/include/asm/spinlock-llsc.h
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -19,7 +19,6 @@
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -89,18 +88,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* read-locks.
*/
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_read_can_lock(x) ((x)->lock > 0)
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -209,11 +196,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return (oldval > (RW_LOCK_BIAS - 1));
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SH_SPINLOCK_LLSC_H */
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
index d24c707b2181..62b485107eae 100644
--- a/arch/sh/kernel/dma-nommu.c
+++ b/arch/sh/kernel/dma-nommu.c
@@ -9,6 +9,7 @@
*/
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <asm/cacheflush.h>
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
@@ -20,7 +21,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
WARN_ON(size == 0);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_cache_sync(dev, page_address(page) + offset, size, dir);
+ sh_sync_dma_for_device(page_address(page) + offset, size, dir);
return addr;
}
@@ -38,7 +39,7 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
BUG_ON(!sg_page(s));
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_cache_sync(dev, sg_virt(s), s->length, dir);
+ sh_sync_dma_for_device(sg_virt(s), s->length, dir);
s->dma_address = sg_phys(s);
s->dma_length = s->length;
@@ -48,20 +49,20 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
}
#ifdef CONFIG_DMA_NONCOHERENT
-static void nommu_sync_single(struct device *dev, dma_addr_t addr,
+static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
- dma_cache_sync(dev, phys_to_virt(addr), size, dir);
+ sh_sync_dma_for_device(phys_to_virt(addr), size, dir);
}
-static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
+static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nelems, i)
- dma_cache_sync(dev, sg_virt(s), s->length, dir);
+ sh_sync_dma_for_device(sg_virt(s), s->length, dir);
}
#endif
@@ -71,8 +72,8 @@ const struct dma_map_ops nommu_dma_ops = {
.map_page = nommu_map_page,
.map_sg = nommu_map_sg,
#ifdef CONFIG_DMA_NONCOHERENT
- .sync_single_for_device = nommu_sync_single,
- .sync_sg_for_device = nommu_sync_sg,
+ .sync_single_for_device = nommu_sync_single_for_device,
+ .sync_sg_for_device = nommu_sync_sg_for_device,
#endif
.is_phys = 1,
};
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index e1d751ae2498..1a2526676a87 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void)
dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
sizeof(struct dwarf_frame), 0,
- SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
sizeof(struct dwarf_reg), 0,
- SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
dwarf_frame_cachep);
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
index defd851abefa..cca491397a28 100644
--- a/arch/sh/kernel/head_64.S
+++ b/arch/sh/kernel/head_64.S
@@ -101,14 +101,6 @@ empty_zero_page:
mmu_pdtp_cache:
.space PAGE_SIZE, 0
- .global empty_bad_page
-empty_bad_page:
- .space PAGE_SIZE, 0
-
- .global empty_bad_pte_table
-empty_bad_pte_table:
- .space PAGE_SIZE, 0
-
.global fpu_in_use
fpu_in_use: .quad 0
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index b2d9963d5978..68b1a67533ce 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -59,7 +59,7 @@ void arch_task_cache_init(void)
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
- SLAB_PANIC | SLAB_NOTRACK, NULL);
+ SLAB_PANIC, NULL);
}
#ifdef CONFIG_SH_FPU_EMU
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index d1275adfa0ef..6ea3aab508f2 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -49,7 +49,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
* Pages from the page allocator may have data present in
* cache. So flush the cache before using uncached memory.
*/
- dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
+ sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
if (!ret_nocache) {
@@ -78,7 +78,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
iounmap(vaddr);
}
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+void sh_sync_dma_for_device(void *vaddr, size_t size,
enum dma_data_direction direction)
{
void *addr;
@@ -100,7 +100,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
BUG();
}
}
-EXPORT_SYMBOL(dma_cache_sync);
+EXPORT_SYMBOL(sh_sync_dma_for_device);
static int __init memchunk_setup(char *str)
{
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 0c3b3b4a9963..d13ce517f4b9 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -32,7 +32,7 @@ void atomic_set(atomic_t *, int);
#define atomic_set_release(v, i) atomic_set((v), (i))
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 2f3490dd37de..12ae33daf52f 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -6,14 +6,6 @@
#include <linux/mm.h>
#include <linux/dma-debug.h>
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
- * routine can be a nop.
- */
-}
-
extern const struct dma_map_ops *dma_ops;
extern const struct dma_map_ops pci32_dma_ops;
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index dd63aa301658..b519acf4383d 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -71,7 +71,6 @@ static struct sun_floppy_ops sun_fdops;
#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
#define fd_enable_irq() /* nothing... */
#define fd_disable_irq() /* nothing... */
-#define fd_cacheflush(addr, size) /* nothing... */
#define fd_request_irq() sun_fd_request_irq()
#define fd_free_irq() /* nothing... */
#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index 22fbeab92a4c..2a050eab69a0 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -73,7 +73,6 @@ static struct sun_floppy_ops sun_fdops;
#define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr)
#define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count)
#define get_dma_residue(x) sun_fdops.get_dma_residue()
-#define fd_cacheflush(addr, size) /* nothing... */
#define fd_request_irq() sun_fdops.fd_request_irq()
#define fd_free_irq() sun_fdops.fd_free_irq()
#define fd_eject(drive) sun_fdops.fd_eject(drive)
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index fe361d3d180d..98917e48727d 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -21,8 +21,6 @@
*/
#define PCI_DMA_BUS_IS_PHYS (0)
-struct pci_dev;
-
#endif /* __KERNEL__ */
#ifndef CONFIG_LEON_PCI
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index fd9d9bac7cfa..5a9e96be1665 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -231,6 +231,36 @@ extern unsigned long _PAGE_ALL_SZ_BITS;
extern struct page *mem_map_zero;
#define ZERO_PAGE(vaddr) (mem_map_zero)
+/* This macro must be updated when the size of struct page grows above 80
+ * or reduces below 64.
+ * The idea that compiler optimizes out switch() statement, and only
+ * leaves clrx instructions
+ */
+#define mm_zero_struct_page(pp) do { \
+ unsigned long *_pp = (void *)(pp); \
+ \
+ /* Check that struct page is either 64, 72, or 80 bytes */ \
+ BUILD_BUG_ON(sizeof(struct page) & 7); \
+ BUILD_BUG_ON(sizeof(struct page) < 64); \
+ BUILD_BUG_ON(sizeof(struct page) > 80); \
+ \
+ switch (sizeof(struct page)) { \
+ case 80: \
+ _pp[9] = 0; /* fallthrough */ \
+ case 72: \
+ _pp[8] = 0; /* fallthrough */ \
+ default: \
+ _pp[7] = 0; \
+ _pp[6] = 0; \
+ _pp[5] = 0; \
+ _pp[4] = 0; \
+ _pp[3] = 0; \
+ _pp[2] = 0; \
+ _pp[1] = 0; \
+ _pp[0] = 0; \
+ } \
+} while (0)
+
/* PFNs are real physical page numbers. However, mem_map only begins to record
* per-page information starting at pfn_base. This is to handle systems where
* the first physical page in the machine is at some huge physical address,
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index 6a339a78f4f4..71dd82b43cc5 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -7,6 +7,7 @@
#if defined(__sparc__) && defined(__arch64__)
#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
#include <linux/threads.h>
#include <asm/switch_to.h>
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 26f00ac2b470..bc5aa6f61676 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -183,17 +183,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw)
res; \
})
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
-#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
#endif /* !(__ASSEMBLY__) */
#endif /* __SPARC_SPINLOCK_H */
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 4822a7e94a30..7fc82a233f49 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -14,13 +14,6 @@
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
-#define arch_read_lock_flags(p, f) arch_read_lock(p)
-#define arch_write_lock_flags(p, f) arch_write_lock(p)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_SPINLOCK_H) */
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 5078b7f68890..0112d6942288 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -397,7 +397,7 @@ static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
- atomic_long_dec(&tlb->mm->nr_ptes);
+ mm_dec_nr_ptes(tlb->mm);
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -472,6 +472,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud = pud_offset(pgd, start);
pgd_clear(pgd);
pud_free_tlb(tlb, pud, start);
+ mm_dec_nr_puds(tlb->mm);
}
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 61bdc1270d19..55ba62957e64 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2540,10 +2540,17 @@ void __init mem_init(void)
{
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
- register_page_bootmem_info();
free_all_bootmem();
/*
+ * Must be done after boot memory is put on freelist, because here we
+ * might set fields in deferred struct pages that have not yet been
+ * initialized, and free_all_bootmem() initializes all the reserved
+ * deferred pages for us.
+ */
+ register_page_bootmem_info();
+
+ /*
* Set up the zero page, mark it reserved, so that page count
* is not manipulated when freeing the page from user ptes.
*/
@@ -2637,30 +2644,19 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
vstart = vstart & PMD_MASK;
vend = ALIGN(vend, PMD_SIZE);
for (; vstart < vend; vstart += PMD_SIZE) {
- pgd_t *pgd = pgd_offset_k(vstart);
+ pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
unsigned long pte;
pud_t *pud;
pmd_t *pmd;
- if (pgd_none(*pgd)) {
- pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
-
- if (!new)
- return -ENOMEM;
- pgd_populate(&init_mm, pgd, new);
- }
-
- pud = pud_offset(pgd, vstart);
- if (pud_none(*pud)) {
- pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
+ if (!pgd)
+ return -ENOMEM;
- if (!new)
- return -ENOMEM;
- pud_populate(&init_mm, pud, new);
- }
+ pud = vmemmap_pud_populate(pgd, vstart, node);
+ if (!pud)
+ return -ENOMEM;
pmd = pmd_offset(pud, vstart);
-
pte = pmd_val(*pmd);
if (!(pte & _PAGE_VALID)) {
void *block = vmemmap_alloc_block(PMD_SIZE, node);
@@ -2927,7 +2923,7 @@ void __flush_tlb_all(void)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
pte_t *pte = NULL;
if (page)
@@ -2939,11 +2935,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+ struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
if (!pgtable_page_ctor(page)) {
- free_hot_cold_page(page, 0);
+ free_unref_page(page);
return NULL;
}
return (pte_t *) page_address(page);
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c
index baa60357f8ba..b7ba577d82ca 100644
--- a/arch/tile/gxio/dma_queue.c
+++ b/arch/tile/gxio/dma_queue.c
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
int64_t completion_slot, int update)
{
if (update) {
- if (ACCESS_ONCE(dma_queue->hw_complete_count) >
+ if (READ_ONCE(dma_queue->hw_complete_count) >
completion_slot)
return 1;
__gxio_dma_queue_update_credits(dma_queue);
}
- return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
+ return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
}
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 7061dc8af43a..97ad62878290 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -67,13 +67,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
#define HAVE_ARCH_DMA_SET_MASK 1
int dma_set_mask(struct device *dev, u64 mask);
-/*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
-static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
-}
-
#endif /* _ASM_TILE_DMA_MAPPING_H */
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index cba8ba9b8da6..fb5313d77315 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -51,9 +51,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
void arch_spin_lock(arch_spinlock_t *lock);
-/* We cannot take an interrupt after getting a ticket, so don't enable them. */
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
int arch_spin_trylock(arch_spinlock_t *lock);
static inline void arch_spin_unlock(arch_spinlock_t *lock)
@@ -80,22 +77,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define _RD_COUNT_WIDTH 8
/**
- * arch_read_can_lock() - would read_trylock() succeed?
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
-{
- return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
-}
-
-/**
- * arch_write_can_lock() - would write_trylock() succeed?
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
-{
- return rwlock->lock == 0;
-}
-
-/**
* arch_read_lock() - acquire a read lock.
*/
void arch_read_lock(arch_rwlock_t *rwlock);
@@ -125,7 +106,4 @@ void arch_read_unlock(arch_rwlock_t *rwlock);
*/
void arch_write_unlock(arch_rwlock_t *rwlock);
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ASM_TILE_SPINLOCK_32_H */
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 9a2c2d605752..5b616ef642a8 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -75,9 +75,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
/* Try to get the lock, and return whether we succeeded. */
int arch_spin_trylock(arch_spinlock_t *lock);
-/* We cannot take an interrupt after getting a ticket, so don't enable them. */
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
@@ -93,24 +90,6 @@ static inline int arch_write_val_locked(int val)
return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
}
-/**
- * read_can_lock - would read_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_read_can_lock(arch_rwlock_t *rw)
-{
- return !arch_write_val_locked(rw->lock);
-}
-
-/**
- * write_can_lock - would write_trylock() succeed?
- * @lock: the rwlock in question.
- */
-static inline int arch_write_can_lock(arch_rwlock_t *rw)
-{
- return rw->lock == 0;
-}
-
extern void __read_lock_failed(arch_rwlock_t *rw);
static inline void arch_read_lock(arch_rwlock_t *rw)
@@ -156,7 +135,4 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
return 0;
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _ASM_TILE_SPINLOCK_64_H */
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h
index b9e45e37649e..c8fd47edba30 100644
--- a/arch/tile/include/gxio/dma_queue.h
+++ b/arch/tile/include/gxio/dma_queue.h
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
* if the result is LESS than "hw_complete_count".
*/
uint64_t complete;
- complete = ACCESS_ONCE(dma_queue->hw_complete_count);
+ complete = READ_ONCE(dma_queue->hw_complete_count);
slot |= (complete & 0xffffffffff000000);
if (slot < complete)
slot += 0x1000000;
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index e1a078e6828e..d516d61751c2 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
int do_syscall_trace_enter(struct pt_regs *regs)
{
- u32 work = ACCESS_ONCE(current_thread_info()->flags);
+ u32 work = READ_ONCE(current_thread_info()->flags);
if ((work & _TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) {
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index b51cc28acd0a..4432f31e8479 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -409,7 +409,7 @@ void __homecache_free_pages(struct page *page, unsigned int order)
if (put_page_testzero(page)) {
homecache_change_page_home(page, order, PAGE_HOME_HASH);
if (order == 0) {
- free_hot_cold_page(page, false);
+ free_unref_page(page);
} else {
init_page_count(page);
__free_pages(page, order);
diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h
index 390572daa40d..b3f5865a92c9 100644
--- a/arch/um/include/shared/init.h
+++ b/arch/um/include/shared/init.h
@@ -41,7 +41,7 @@
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
/* These are for everybody (although not all archs will actually
discard it in modules) */
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index e7437ec62710..3c0e470ea646 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -22,8 +22,6 @@
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
unsigned long *empty_zero_page = NULL;
EXPORT_SYMBOL(empty_zero_page);
-/* allocated in paging_init and unchanged thereafter */
-static unsigned long *empty_bad_page = NULL;
/*
* Initialized during boot, and readonly for initializing page tables
@@ -146,7 +144,6 @@ void __init paging_init(void)
int i;
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
- empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0;
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index d39f0bc6a046..462e59a7ae78 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -115,7 +115,7 @@ endif
source "arch/unicore32/mm/Kconfig"
-comment "Floating poing support"
+comment "Floating point support"
config UNICORE_FPU_F64
def_bool y if !ARCH_FPGA
diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h
index c0301e6c8b81..a5e08e2d5d6d 100644
--- a/arch/unicore32/include/asm/cacheflush.h
+++ b/arch/unicore32/include/asm/cacheflush.h
@@ -102,15 +102,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);
/*
- * These are private to the dma-mapping API. Do not use directly.
- * Their sole purpose is to ensure that data held in the cache
- * is visible to DMA, or data written by DMA to system memory is
- * visible to the CPU.
- */
-extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
-extern void __cpuc_dma_flush_range(unsigned long, unsigned long);
-
-/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index 518ba5848dd6..ac608c2f6af6 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -18,9 +18,6 @@
#include <linux/scatterlist.h>
#include <linux/swiotlb.h>
-#include <asm/memory.h>
-#include <asm/cacheflush.h>
-
extern const struct dma_map_ops swiotlb_dma_map_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
@@ -48,24 +45,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static inline void dma_mark_clean(void *addr, size_t size) {}
-static inline void dma_cache_sync(struct device *dev, void *vaddr,
- size_t size, enum dma_data_direction direction)
-{
- unsigned long start = (unsigned long)vaddr;
- unsigned long end = start + size;
-
- switch (direction) {
- case DMA_NONE:
- BUG();
- case DMA_FROM_DEVICE:
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- __cpuc_dma_flush_range(start, end);
- break;
- case DMA_TO_DEVICE: /* writeback only */
- __cpuc_dma_clean_range(start, end);
- break;
- }
-}
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
index 26775793c204..f0fdb268f8f2 100644
--- a/arch/unicore32/include/asm/pgalloc.h
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
/*
* Allocate one PTE table.
diff --git a/arch/unicore32/mm/pgd.c b/arch/unicore32/mm/pgd.c
index c572a28c76c9..a830a300aaa1 100644
--- a/arch/unicore32/mm/pgd.c
+++ b/arch/unicore32/mm/pgd.c
@@ -97,7 +97,7 @@ void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
pte = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free(mm, pte);
- atomic_long_dec(&mm->nr_ptes);
+ mm_dec_nr_ptes(mm);
pmd_free(mm, pmd);
mm_dec_nr_pmds(mm);
free:
diff --git a/arch/unicore32/mm/proc-syms.c b/arch/unicore32/mm/proc-syms.c
index 21c00fc85c99..df215fd6d639 100644
--- a/arch/unicore32/mm/proc-syms.c
+++ b/arch/unicore32/mm/proc-syms.c
@@ -20,6 +20,3 @@ EXPORT_SYMBOL(cpu_dcache_clean_area);
EXPORT_SYMBOL(cpu_set_pte);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
-
-EXPORT_SYMBOL(__cpuc_dma_flush_range);
-EXPORT_SYMBOL(__cpuc_dma_clean_range);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2fdb23313dd5..df3276d6bfe3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -56,7 +56,7 @@ config X86
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_PMEM_API if X86_64
# Causing hangs/crashes, see the commit that added this change for details.
- select ARCH_HAS_REFCOUNT if BROKEN
+ select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
@@ -93,8 +93,10 @@ config X86
select GENERIC_FIND_FIRST_BIT
select GENERIC_IOMAP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+ select GENERIC_IRQ_MATRIX_ALLOCATOR if X86_LOCAL_APIC
select GENERIC_IRQ_MIGRATION if SMP
select GENERIC_IRQ_PROBE
+ select GENERIC_IRQ_RESERVATION_MODE
select GENERIC_IRQ_SHOW
select GENERIC_PENDING_IRQ if SMP
select GENERIC_SMP_IDLE_THREAD
@@ -108,9 +110,8 @@ config X86
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
+ select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KGDB
- select HAVE_ARCH_KMEMCHECK
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
@@ -171,7 +172,7 @@ config X86
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER_UNWINDER && STACK_VALIDATION
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
select HAVE_STACK_VALIDATION if X86_64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
@@ -303,7 +304,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0xdff8000000000000 if X86_5LEVEL
default 0xdffffc0000000000
config HAVE_INTEL_TXT
@@ -1429,7 +1429,7 @@ config ARCH_DMA_ADDR_T_64BIT
config X86_DIRECT_GBPAGES
def_bool y
- depends on X86_64 && !DEBUG_PAGEALLOC && !KMEMCHECK
+ depends on X86_64 && !DEBUG_PAGEALLOC
---help---
Certain kernel features effectively disable kernel
linear 1 GB mappings (even if the CPU otherwise
@@ -1803,6 +1803,16 @@ config X86_SMAP
If unsure, say Y.
+config X86_INTEL_UMIP
+ def_bool n
+ depends on CPU_SUP_INTEL
+ prompt "Intel User Mode Instruction Prevention" if EXPERT
+ ---help---
+ The User Mode Instruction Prevention (UMIP) is a security
+ feature in newer Intel processors. If enabled, a general
+ protection fault is issued if the instructions SGDT, SLDT,
+ SIDT, SMSW and STR are executed in user mode.
+
config X86_INTEL_MPX
prompt "Intel MPX (Memory Protection Extensions)"
def_bool n
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 90b123056f4b..6293a8768a91 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -359,28 +359,14 @@ config PUNIT_ATOM_DEBUG
choice
prompt "Choose kernel unwinder"
- default FRAME_POINTER_UNWINDER
+ default UNWINDER_ORC if X86_64
+ default UNWINDER_FRAME_POINTER if X86_32
---help---
This determines which method will be used for unwinding kernel stack
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
livepatch, lockdep, and more.
-config FRAME_POINTER_UNWINDER
- bool "Frame pointer unwinder"
- select FRAME_POINTER
- ---help---
- This option enables the frame pointer unwinder for unwinding kernel
- stack traces.
-
- The unwinder itself is fast and it uses less RAM than the ORC
- unwinder, but the kernel text size will grow by ~3% and the kernel's
- overall performance will degrade by roughly 5-10%.
-
- This option is recommended if you want to use the livepatch
- consistency model, as this is currently the only way to get a
- reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE).
-
-config ORC_UNWINDER
+config UNWINDER_ORC
bool "ORC unwinder"
depends on X86_64
select STACK_VALIDATION
@@ -396,7 +382,22 @@ config ORC_UNWINDER
Enabling this option will increase the kernel's runtime memory usage
by roughly 2-4MB, depending on your kernel config.
-config GUESS_UNWINDER
+config UNWINDER_FRAME_POINTER
+ bool "Frame pointer unwinder"
+ select FRAME_POINTER
+ ---help---
+ This option enables the frame pointer unwinder for unwinding kernel
+ stack traces.
+
+ The unwinder itself is fast and it uses less RAM than the ORC
+ unwinder, but the kernel text size will grow by ~3% and the kernel's
+ overall performance will degrade by roughly 5-10%.
+
+ This option is recommended if you want to use the livepatch
+ consistency model, as this is currently the only way to get a
+ reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE).
+
+config UNWINDER_GUESS
bool "Guess unwinder"
depends on EXPERT
---help---
@@ -411,7 +412,7 @@ config GUESS_UNWINDER
endchoice
config FRAME_POINTER
- depends on !ORC_UNWINDER && !GUESS_UNWINDER
+ depends on !UNWINDER_ORC && !UNWINDER_GUESS
bool
endmenu
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index a20eacd9c7e9..3e73bc255e4e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -158,11 +158,6 @@ ifdef CONFIG_X86_X32
endif
export CONFIG_X86_X32_ABI
-# Don't unroll struct assignments with kmemcheck enabled
-ifeq ($(CONFIG_KMEMCHECK),y)
- KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
-endif
-
#
# If the function graph tracer is used with mcount instead of fentry,
# '-maccumulate-outgoing-args' is needed to prevent a GCC bug
diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore
index e3cf9f682be5..09d25dd09307 100644
--- a/arch/x86/boot/.gitignore
+++ b/arch/x86/boot/.gitignore
@@ -7,3 +7,6 @@ zoffset.h
setup
setup.bin
setup.elf
+fdimage
+mtools.conf
+image.iso
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index d88a2fddba8c..9b5adae9cc40 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -123,63 +123,26 @@ image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
$(obj)/mtools.conf: $(src)/mtools.conf.in
sed -e 's|@OBJ@|$(obj)|g' < $< > $@
+quiet_cmd_genimage = GENIMAGE $3
+cmd_genimage = sh $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
+ $(obj)/mtools.conf '$(image_cmdline)' $(FDINITRD)
+
# This requires write access to /dev/fd0
bzdisk: $(obj)/bzImage $(obj)/mtools.conf
- MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync
- syslinux /dev/fd0 ; sync
- echo '$(image_cmdline)' | \
- MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg
- if [ -f '$(FDINITRD)' ] ; then \
- MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \
- fi
- MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage a:linux ; sync
+ $(call cmd,genimage,bzdisk,/dev/fd0)
# These require being root or having syslinux 2.02 or higher installed
fdimage fdimage144: $(obj)/bzImage $(obj)/mtools.conf
- dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440
- MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync
- syslinux $(obj)/fdimage ; sync
- echo '$(image_cmdline)' | \
- MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg
- if [ -f '$(FDINITRD)' ] ; then \
- MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \
- fi
- MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage v:linux ; sync
+ $(call cmd,genimage,fdimage144,$(obj)/fdimage)
+ @$(kecho) 'Kernel: $(obj)/fdimage is ready'
fdimage288: $(obj)/bzImage $(obj)/mtools.conf
- dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880
- MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync
- syslinux $(obj)/fdimage ; sync
- echo '$(image_cmdline)' | \
- MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg
- if [ -f '$(FDINITRD)' ] ; then \
- MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \
- fi
- MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage w:linux ; sync
+ $(call cmd,genimage,fdimage288,$(obj)/fdimage)
+ @$(kecho) 'Kernel: $(obj)/fdimage is ready'
isoimage: $(obj)/bzImage
- -rm -rf $(obj)/isoimage
- mkdir $(obj)/isoimage
- for i in lib lib64 share end ; do \
- if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
- cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
- if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
- cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
- fi ; \
- break ; \
- fi ; \
- if [ $$i = end ] ; then exit 1 ; fi ; \
- done
- cp $(obj)/bzImage $(obj)/isoimage/linux
- echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
- if [ -f '$(FDINITRD)' ] ; then \
- cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
- fi
- mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \
- -no-emul-boot -boot-load-size 4 -boot-info-table \
- $(obj)/isoimage
- isohybrid $(obj)/image.iso 2>/dev/null || true
- rm -rf $(obj)/isoimage
+ $(call cmd,genimage,isoimage,$(obj)/image.iso)
+ @$(kecho) 'Kernel: $(obj)/image.iso is ready'
bzlilo: $(obj)/bzImage
if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 4b7575b00563..1e9c322e973a 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -36,6 +36,7 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
@@ -78,6 +79,7 @@ vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
ifdef CONFIG_X86_64
vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
+ vmlinux-objs-y += $(obj)/mem_encrypt.o
endif
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index beb255b66447..20919b4f3133 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -131,6 +131,19 @@ ENTRY(startup_32)
/*
* Build early 4G boot pagetable
*/
+ /*
+ * If SEV is active then set the encryption mask in the page tables.
+ * This will insure that when the kernel is copied and decompressed
+ * it will be done so encrypted.
+ */
+ call get_sev_encryption_bit
+ xorl %edx, %edx
+ testl %eax, %eax
+ jz 1f
+ subl $32, %eax /* Encryption bit is always above bit 31 */
+ bts %eax, %edx /* Set encryption mask for page tables */
+1:
+
/* Initialize Page tables to 0 */
leal pgtable(%ebx), %edi
xorl %eax, %eax
@@ -141,12 +154,14 @@ ENTRY(startup_32)
leal pgtable + 0(%ebx), %edi
leal 0x1007 (%edi), %eax
movl %eax, 0(%edi)
+ addl %edx, 4(%edi)
/* Build Level 3 */
leal pgtable + 0x1000(%ebx), %edi
leal 0x1007(%edi), %eax
movl $4, %ecx
1: movl %eax, 0x00(%edi)
+ addl %edx, 0x04(%edi)
addl $0x00001000, %eax
addl $8, %edi
decl %ecx
@@ -157,6 +172,7 @@ ENTRY(startup_32)
movl $0x00000183, %eax
movl $2048, %ecx
1: movl %eax, 0(%edi)
+ addl %edx, 4(%edi)
addl $0x00200000, %eax
addl $8, %edi
decl %ecx
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
new file mode 100644
index 000000000000..54f5f6625a73
--- /dev/null
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -0,0 +1,120 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/processor-flags.h>
+#include <asm/msr.h>
+#include <asm/asm-offsets.h>
+
+ .text
+ .code32
+ENTRY(get_sev_encryption_bit)
+ xor %eax, %eax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ push %ebx
+ push %ecx
+ push %edx
+ push %edi
+
+ /*
+ * RIP-relative addressing is needed to access the encryption bit
+ * variable. Since we are running in 32-bit mode we need this call/pop
+ * sequence to get the proper relative addressing.
+ */
+ call 1f
+1: popl %edi
+ subl $1b, %edi
+
+ movl enc_bit(%edi), %eax
+ cmpl $0, %eax
+ jge .Lsev_exit
+
+ /* Check if running under a hypervisor */
+ movl $1, %eax
+ cpuid
+ bt $31, %ecx /* Check the hypervisor bit */
+ jnc .Lno_sev
+
+ movl $0x80000000, %eax /* CPUID to check the highest leaf */
+ cpuid
+ cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
+ jb .Lno_sev
+
+ /*
+ * Check for the SEV feature:
+ * CPUID Fn8000_001F[EAX] - Bit 1
+ * CPUID Fn8000_001F[EBX] - Bits 5:0
+ * Pagetable bit position used to indicate encryption
+ */
+ movl $0x8000001f, %eax
+ cpuid
+ bt $1, %eax /* Check if SEV is available */
+ jnc .Lno_sev
+
+ movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
+ rdmsr
+ bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */
+ jnc .Lno_sev
+
+ movl %ebx, %eax
+ andl $0x3f, %eax /* Return the encryption bit location */
+ movl %eax, enc_bit(%edi)
+ jmp .Lsev_exit
+
+.Lno_sev:
+ xor %eax, %eax
+ movl %eax, enc_bit(%edi)
+
+.Lsev_exit:
+ pop %edi
+ pop %edx
+ pop %ecx
+ pop %ebx
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+ ret
+ENDPROC(get_sev_encryption_bit)
+
+ .code64
+ENTRY(get_sev_encryption_mask)
+ xor %rax, %rax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ push %rbp
+ push %rdx
+
+ movq %rsp, %rbp /* Save current stack pointer */
+
+ call get_sev_encryption_bit /* Get the encryption bit position */
+ testl %eax, %eax
+ jz .Lno_sev_mask
+
+ xor %rdx, %rdx
+ bts %rax, %rdx /* Create the encryption mask */
+ mov %rdx, %rax /* ... and return it */
+
+.Lno_sev_mask:
+ movq %rbp, %rsp /* Restore original stack pointer */
+
+ pop %rdx
+ pop %rbp
+#endif
+
+ ret
+ENDPROC(get_sev_encryption_mask)
+
+ .data
+enc_bit:
+ .int 0xffffffff
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 32d4ec2e0243..9d323dc6b159 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -109,4 +109,6 @@ static inline void console_init(void)
{ }
#endif
+unsigned long get_sev_encryption_mask(void);
+
#endif
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 972319ff5b01..d5364ca2e3f9 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -77,16 +77,18 @@ static unsigned long top_level_pgt;
* Mapping information structure passed to kernel_ident_mapping_init().
* Due to relocation, pointers must be assigned at run time not build time.
*/
-static struct x86_mapping_info mapping_info = {
- .page_flag = __PAGE_KERNEL_LARGE_EXEC,
-};
+static struct x86_mapping_info mapping_info;
/* Locates and clears a region for a new top level page table. */
void initialize_identity_maps(void)
{
+ unsigned long sev_me_mask = get_sev_encryption_mask();
+
/* Init mapping_info with run-time function/buffer pointers. */
mapping_info.alloc_pgt_page = alloc_pgt_page;
mapping_info.context = &pgt_data;
+ mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask;
+ mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask;
/*
* It should be impossible for this not to already be true,
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
new file mode 100644
index 000000000000..49f4970f693b
--- /dev/null
+++ b/arch/x86/boot/genimage.sh
@@ -0,0 +1,124 @@
+#!/bin/sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2017 by Changbin Du <changbin.du@intel.com>
+#
+# Adapted from code in arch/x86/boot/Makefile by H. Peter Anvin and others
+#
+# "make fdimage/fdimage144/fdimage288/isoimage" script for x86 architecture
+#
+# Arguments:
+# $1 - fdimage format
+# $2 - target image file
+# $3 - kernel bzImage file
+# $4 - mtool configuration file
+# $5 - kernel cmdline
+# $6 - inird image file
+#
+
+# Use "make V=1" to debug this script
+case "${KBUILD_VERBOSE}" in
+*1*)
+ set -x
+ ;;
+esac
+
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+
+export MTOOLSRC=$4
+FIMAGE=$2
+FBZIMAGE=$3
+KCMDLINE=$5
+FDINITRD=$6
+
+# Make sure the files actually exist
+verify "$FBZIMAGE"
+verify "$MTOOLSRC"
+
+genbzdisk() {
+ mformat a:
+ syslinux $FIMAGE
+ echo "$KCMDLINE" | mcopy - a:syslinux.cfg
+ if [ -f "$FDINITRD" ] ; then
+ mcopy "$FDINITRD" a:initrd.img
+ fi
+ mcopy $FBZIMAGE a:linux
+}
+
+genfdimage144() {
+ dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
+ mformat v:
+ syslinux $FIMAGE
+ echo "$KCMDLINE" | mcopy - v:syslinux.cfg
+ if [ -f "$FDINITRD" ] ; then
+ mcopy "$FDINITRD" v:initrd.img
+ fi
+ mcopy $FBZIMAGE v:linux
+}
+
+genfdimage288() {
+ dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
+ mformat w:
+ syslinux $FIMAGE
+ echo "$KCMDLINE" | mcopy - W:syslinux.cfg
+ if [ -f "$FDINITRD" ] ; then
+ mcopy "$FDINITRD" w:initrd.img
+ fi
+ mcopy $FBZIMAGE w:linux
+}
+
+genisoimage() {
+ tmp_dir=`dirname $FIMAGE`/isoimage
+ rm -rf $tmp_dir
+ mkdir $tmp_dir
+ for i in lib lib64 share end ; do
+ for j in syslinux ISOLINUX ; do
+ if [ -f /usr/$i/$j/isolinux.bin ] ; then
+ isolinux=/usr/$i/$j/isolinux.bin
+ cp $isolinux $tmp_dir
+ fi
+ done
+ for j in syslinux syslinux/modules/bios ; do
+ if [ -f /usr/$i/$j/ldlinux.c32 ]; then
+ ldlinux=/usr/$i/$j/ldlinux.c32
+ cp $ldlinux $tmp_dir
+ fi
+ done
+ if [ -n "$isolinux" -a -n "$ldlinux" ] ; then
+ break
+ fi
+ if [ $i = end -a -z "$isolinux" ] ; then
+ echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
+ exit 1
+ fi
+ done
+ cp $FBZIMAGE $tmp_dir/linux
+ echo "$KCMDLINE" > $tmp_dir/isolinux.cfg
+ if [ -f "$FDINITRD" ] ; then
+ cp "$FDINITRD" $tmp_dir/initrd.img
+ fi
+ mkisofs -J -r -input-charset=utf-8 -quiet -o $FIMAGE -b isolinux.bin \
+ -c boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table \
+ $tmp_dir
+ isohybrid $FIMAGE 2>/dev/null || true
+ rm -rf $tmp_dir
+}
+
+case $1 in
+ bzdisk) genbzdisk;;
+ fdimage144) genfdimage144;;
+ fdimage288) genfdimage288;;
+ isoimage) genisoimage;;
+ *) echo 'Unknown image format'; exit 1;
+esac
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 9c7ea597eee6..850b8762e889 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -17,7 +17,6 @@
*/
#include <asm/segment.h>
-#include <generated/utsrelease.h>
#include <asm/boot.h>
#include <asm/page_types.h>
#include <asm/setup.h>
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
index 45bc9402aa49..a14c5178d4ba 100644
--- a/arch/x86/boot/video-vga.c
+++ b/arch/x86/boot/video-vga.c
@@ -241,9 +241,9 @@ static int vga_probe(void)
vga_modes,
};
static int mode_count[] = {
- sizeof(cga_modes)/sizeof(struct mode_info),
- sizeof(ega_modes)/sizeof(struct mode_info),
- sizeof(vga_modes)/sizeof(struct mode_info),
+ ARRAY_SIZE(cga_modes),
+ ARRAY_SIZE(ega_modes),
+ ARRAY_SIZE(vga_modes),
};
struct biosregs ireg, oreg;
diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
index 550cd5012b73..66c9e2aab16c 100644
--- a/arch/x86/configs/tiny.config
+++ b/arch/x86/configs/tiny.config
@@ -1,5 +1,5 @@
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set
-CONFIG_GUESS_UNWINDER=y
-# CONFIG_FRAME_POINTER_UNWINDER is not set
+CONFIG_UNWINDER_GUESS=y
+# CONFIG_UNWINDER_FRAME_POINTER is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 4a4b16e56d35..e32fc1f274d8 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -299,6 +299,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_BOOT_PARAMS=y
CONFIG_OPTIMIZE_INLINING=y
+CONFIG_UNWINDER_ORC=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 5c15d6b57329..3bf3dcf29825 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -28,6 +28,7 @@
#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <crypto/b128ops.h>
+#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <asm/cpu_device_id.h>
#include <asm/fpu/api.h>
@@ -1067,9 +1068,10 @@ static struct skcipher_alg aesni_skciphers[] = {
}
};
+static
struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
-struct {
+static struct {
const char *algname;
const char *drvname;
const char *basename;
@@ -1131,7 +1133,7 @@ static struct aead_alg aesni_aead_algs[] = { {
.setauthsize = common_rfc4106_set_authsize,
.encrypt = helper_rfc4106_encrypt,
.decrypt = helper_rfc4106_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.base = {
.cra_name = "__gcm-aes-aesni",
@@ -1149,7 +1151,7 @@ static struct aead_alg aesni_aead_algs[] = { {
.setauthsize = rfc4106_set_authsize,
.encrypt = rfc4106_encrypt,
.decrypt = rfc4106_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.base = {
.cra_name = "rfc4106(gcm(aes))",
@@ -1165,7 +1167,7 @@ static struct aead_alg aesni_aead_algs[] = { {
.setauthsize = generic_gcmaes_set_authsize,
.encrypt = generic_gcmaes_encrypt,
.decrypt = generic_gcmaes_decrypt,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16,
.base = {
.cra_name = "gcm(aes)",
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index f247304299a2..1c099dc08cc3 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -41,6 +41,7 @@
#include <asm/inst.h>
+.section .rodata
.align 16
/*
* [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
@@ -111,19 +112,13 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
pxor CONSTANT, %xmm1
sub $0x40, LEN
add $0x40, BUF
-#ifndef __x86_64__
- /* This is for position independent code(-fPIC) support for 32bit */
- call delta
-delta:
- pop %ecx
-#endif
cmp $0x40, LEN
jb less_64
#ifdef __x86_64__
movdqa .Lconstant_R2R1(%rip), CONSTANT
#else
- movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
+ movdqa .Lconstant_R2R1, CONSTANT
#endif
loop_64:/* 64 bytes Full cache line folding */
@@ -172,7 +167,7 @@ less_64:/* Folding cache line into 128bit */
#ifdef __x86_64__
movdqa .Lconstant_R4R3(%rip), CONSTANT
#else
- movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT
+ movdqa .Lconstant_R4R3, CONSTANT
#endif
prefetchnta (BUF)
@@ -220,8 +215,8 @@ fold_64:
movdqa .Lconstant_R5(%rip), CONSTANT
movdqa .Lconstant_mask32(%rip), %xmm3
#else
- movdqa .Lconstant_R5 - delta(%ecx), CONSTANT
- movdqa .Lconstant_mask32 - delta(%ecx), %xmm3
+ movdqa .Lconstant_R5, CONSTANT
+ movdqa .Lconstant_mask32, %xmm3
#endif
psrldq $0x04, %xmm2
pand %xmm3, %xmm1
@@ -232,7 +227,7 @@ fold_64:
#ifdef __x86_64__
movdqa .Lconstant_RUpoly(%rip), CONSTANT
#else
- movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT
+ movdqa .Lconstant_RUpoly, CONSTANT
#endif
movdqa %xmm1, %xmm2
pand %xmm3, %xmm1
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 6e160031cfea..3fd8bc560fae 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -142,56 +142,25 @@ For 32-bit we have the following conventions - kernel is built with
UNWIND_HINT_REGS offset=\offset
.endm
- .macro RESTORE_EXTRA_REGS offset=0
- movq 0*8+\offset(%rsp), %r15
- movq 1*8+\offset(%rsp), %r14
- movq 2*8+\offset(%rsp), %r13
- movq 3*8+\offset(%rsp), %r12
- movq 4*8+\offset(%rsp), %rbp
- movq 5*8+\offset(%rsp), %rbx
- UNWIND_HINT_REGS offset=\offset extra=0
- .endm
-
- .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
- .if \rstor_r11
- movq 6*8(%rsp), %r11
- .endif
- .if \rstor_r8910
- movq 7*8(%rsp), %r10
- movq 8*8(%rsp), %r9
- movq 9*8(%rsp), %r8
- .endif
- .if \rstor_rax
- movq 10*8(%rsp), %rax
- .endif
- .if \rstor_rcx
- movq 11*8(%rsp), %rcx
- .endif
- .if \rstor_rdx
- movq 12*8(%rsp), %rdx
- .endif
- movq 13*8(%rsp), %rsi
- movq 14*8(%rsp), %rdi
- UNWIND_HINT_IRET_REGS offset=16*8
- .endm
- .macro RESTORE_C_REGS
- RESTORE_C_REGS_HELPER 1,1,1,1,1
- .endm
- .macro RESTORE_C_REGS_EXCEPT_RAX
- RESTORE_C_REGS_HELPER 0,1,1,1,1
- .endm
- .macro RESTORE_C_REGS_EXCEPT_RCX
- RESTORE_C_REGS_HELPER 1,0,1,1,1
- .endm
- .macro RESTORE_C_REGS_EXCEPT_R11
- RESTORE_C_REGS_HELPER 1,1,0,1,1
- .endm
- .macro RESTORE_C_REGS_EXCEPT_RCX_R11
- RESTORE_C_REGS_HELPER 1,0,0,1,1
- .endm
-
- .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
- subq $-(15*8+\addskip), %rsp
+ .macro POP_EXTRA_REGS
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbp
+ popq %rbx
+ .endm
+
+ .macro POP_C_REGS
+ popq %r11
+ popq %r10
+ popq %r9
+ popq %r8
+ popq %rax
+ popq %rcx
+ popq %rdx
+ popq %rsi
+ popq %rdi
.endm
.macro icebp
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 03505ffbe1b6..d7d3cc24baf4 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
BUG_ON(regs != task_pt_regs(current));
- work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
+ work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
if (unlikely(work & _TIF_SYSCALL_EMU))
emulated = true;
@@ -186,9 +186,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
addr_limit_user_check();
- if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
- local_irq_disable();
-
+ lockdep_assert_irqs_disabled();
lockdep_sys_exit();
cached_flags = READ_ONCE(ti->flags);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index bcfc5668dcb2..a2b30ec69497 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -221,10 +221,9 @@ entry_SYSCALL_64_fastpath:
TRACE_IRQS_ON /* user mode is traced as IRQs on */
movq RIP(%rsp), %rcx
movq EFLAGS(%rsp), %r11
- RESTORE_C_REGS_EXCEPT_RCX_R11
- movq RSP(%rsp), %rsp
+ addq $6*8, %rsp /* skip extra regs -- they were preserved */
UNWIND_HINT_EMPTY
- USERGS_SYSRET64
+ jmp .Lpop_c_regs_except_rcx_r11_and_sysret
1:
/*
@@ -246,17 +245,18 @@ entry_SYSCALL64_slow_path:
call do_syscall_64 /* returns with IRQs disabled */
return_from_SYSCALL_64:
- RESTORE_EXTRA_REGS
TRACE_IRQS_IRETQ /* we're about to change IF */
/*
* Try to use SYSRET instead of IRET if we're returning to
- * a completely clean 64-bit userspace context.
+ * a completely clean 64-bit userspace context. If we're not,
+ * go to the slow exit path.
*/
movq RCX(%rsp), %rcx
movq RIP(%rsp), %r11
- cmpq %rcx, %r11 /* RCX == RIP */
- jne opportunistic_sysret_failed
+
+ cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */
+ jne swapgs_restore_regs_and_return_to_usermode
/*
* On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
@@ -274,14 +274,14 @@ return_from_SYSCALL_64:
/* If this changed %rcx, it was not canonical */
cmpq %rcx, %r11
- jne opportunistic_sysret_failed
+ jne swapgs_restore_regs_and_return_to_usermode
cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
- jne opportunistic_sysret_failed
+ jne swapgs_restore_regs_and_return_to_usermode
movq R11(%rsp), %r11
cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
- jne opportunistic_sysret_failed
+ jne swapgs_restore_regs_and_return_to_usermode
/*
* SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
@@ -302,12 +302,12 @@ return_from_SYSCALL_64:
* would never get past 'stuck_here'.
*/
testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
- jnz opportunistic_sysret_failed
+ jnz swapgs_restore_regs_and_return_to_usermode
/* nothing to check for RSP */
cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
- jne opportunistic_sysret_failed
+ jne swapgs_restore_regs_and_return_to_usermode
/*
* We win! This label is here just for ease of understanding
@@ -315,14 +315,20 @@ return_from_SYSCALL_64:
*/
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
- RESTORE_C_REGS_EXCEPT_RCX_R11
- movq RSP(%rsp), %rsp
UNWIND_HINT_EMPTY
+ POP_EXTRA_REGS
+.Lpop_c_regs_except_rcx_r11_and_sysret:
+ popq %rsi /* skip r11 */
+ popq %r10
+ popq %r9
+ popq %r8
+ popq %rax
+ popq %rsi /* skip rcx */
+ popq %rdx
+ popq %rsi
+ popq %rdi
+ movq RSP-ORIG_RAX(%rsp), %rsp
USERGS_SYSRET64
-
-opportunistic_sysret_failed:
- SWAPGS
- jmp restore_c_regs_and_iret
END(entry_SYSCALL_64)
ENTRY(stub_ptregs_64)
@@ -423,8 +429,7 @@ ENTRY(ret_from_fork)
movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */
TRACE_IRQS_ON /* user mode is traced as IRQS on */
- SWAPGS
- jmp restore_regs_and_iret
+ jmp swapgs_restore_regs_and_return_to_usermode
1:
/* kernel thread */
@@ -612,8 +617,21 @@ GLOBAL(retint_user)
mov %rsp,%rdi
call prepare_exit_to_usermode
TRACE_IRQS_IRETQ
+
+GLOBAL(swapgs_restore_regs_and_return_to_usermode)
+#ifdef CONFIG_DEBUG_ENTRY
+ /* Assert that pt_regs indicates user mode. */
+ testb $3, CS(%rsp)
+ jnz 1f
+ ud2
+1:
+#endif
SWAPGS
- jmp restore_regs_and_iret
+ POP_EXTRA_REGS
+ POP_C_REGS
+ addq $8, %rsp /* skip regs->orig_ax */
+ INTERRUPT_RETURN
+
/* Returning to kernel space */
retint_kernel:
@@ -633,15 +651,17 @@ retint_kernel:
*/
TRACE_IRQS_IRETQ
-/*
- * At this label, code paths which return to kernel and to user,
- * which come from interrupts/exception and from syscalls, merge.
- */
-GLOBAL(restore_regs_and_iret)
- RESTORE_EXTRA_REGS
-restore_c_regs_and_iret:
- RESTORE_C_REGS
- REMOVE_PT_GPREGS_FROM_STACK 8
+GLOBAL(restore_regs_and_return_to_kernel)
+#ifdef CONFIG_DEBUG_ENTRY
+ /* Assert that pt_regs indicates kernel mode. */
+ testb $3, CS(%rsp)
+ jz 1f
+ ud2
+1:
+#endif
+ POP_EXTRA_REGS
+ POP_C_REGS
+ addq $8, %rsp /* skip regs->orig_ax */
INTERRUPT_RETURN
ENTRY(native_iret)
@@ -818,7 +838,7 @@ ENTRY(\sym)
ASM_CLAC
- .ifeq \has_error_code
+ .if \has_error_code == 0
pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif
@@ -1059,6 +1079,7 @@ idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN
+idtentry xennmi do_nmi has_error_code=0
idtentry xendebug do_debug has_error_code=0
idtentry xenint3 do_int3 has_error_code=0
#endif
@@ -1112,17 +1133,14 @@ ENTRY(paranoid_exit)
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */
- jnz paranoid_exit_no_swapgs
+ jnz .Lparanoid_exit_no_swapgs
TRACE_IRQS_IRETQ
SWAPGS_UNSAFE_STACK
- jmp paranoid_exit_restore
-paranoid_exit_no_swapgs:
+ jmp .Lparanoid_exit_restore
+.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
-paranoid_exit_restore:
- RESTORE_EXTRA_REGS
- RESTORE_C_REGS
- REMOVE_PT_GPREGS_FROM_STACK 8
- INTERRUPT_RETURN
+.Lparanoid_exit_restore:
+ jmp restore_regs_and_return_to_kernel
END(paranoid_exit)
/*
@@ -1223,10 +1241,13 @@ ENTRY(error_exit)
jmp retint_user
END(error_exit)
-/* Runs on exception stack */
-/* XXX: broken on Xen PV */
+/*
+ * Runs on exception stack. Xen PV does not go through this path at all,
+ * so we can use real assembly here.
+ */
ENTRY(nmi)
UNWIND_HINT_IRET_REGS
+
/*
* We allow breakpoints in NMIs. If a breakpoint occurs, then
* the iretq it performs will take us out of NMI context.
@@ -1284,7 +1305,7 @@ ENTRY(nmi)
* stacks lest we corrupt the "NMI executing" variable.
*/
- SWAPGS_UNSAFE_STACK
+ swapgs
cld
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -1328,8 +1349,7 @@ ENTRY(nmi)
* Return back to user mode. We must *not* do the normal exit
* work, because we don't want to enable interrupts.
*/
- SWAPGS
- jmp restore_regs_and_iret
+ jmp swapgs_restore_regs_and_return_to_usermode
.Lnmi_from_kernel:
/*
@@ -1450,7 +1470,7 @@ nested_nmi_out:
popq %rdx
/* We are returning to kernel mode, so this cannot result in a fault. */
- INTERRUPT_RETURN
+ iretq
first_nmi:
/* Restore rdx. */
@@ -1481,7 +1501,7 @@ first_nmi:
pushfq /* RFLAGS */
pushq $__KERNEL_CS /* CS */
pushq $1f /* RIP */
- INTERRUPT_RETURN /* continues at repeat_nmi below */
+ iretq /* continues at repeat_nmi below */
UNWIND_HINT_IRET_REGS
1:
#endif
@@ -1544,29 +1564,34 @@ end_repeat_nmi:
nmi_swapgs:
SWAPGS_UNSAFE_STACK
nmi_restore:
- RESTORE_EXTRA_REGS
- RESTORE_C_REGS
+ POP_EXTRA_REGS
+ POP_C_REGS
- /* Point RSP at the "iret" frame. */
- REMOVE_PT_GPREGS_FROM_STACK 6*8
+ /*
+ * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
+ * at the "iret" frame.
+ */
+ addq $6*8, %rsp
/*
* Clear "NMI executing". Set DF first so that we can easily
* distinguish the remaining code between here and IRET from
- * the SYSCALL entry and exit paths. On a native kernel, we
- * could just inspect RIP, but, on paravirt kernels,
- * INTERRUPT_RETURN can translate into a jump into a
- * hypercall page.
+ * the SYSCALL entry and exit paths.
+ *
+ * We arguably should just inspect RIP instead, but I (Andy) wrote
+ * this code when I had the misapprehension that Xen PV supported
+ * NMIs, and Xen PV would break that approach.
*/
std
movq $0, 5*8(%rsp) /* clear "NMI executing" */
/*
- * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
- * stack in a single instruction. We are returning to kernel
- * mode, so this cannot result in a fault.
+ * iretq reads the "iret" frame and exits the NMI stack in a
+ * single instruction. We are returning to kernel mode, so this
+ * cannot result in a fault. Similarly, we don't need to worry
+ * about espfix64 on the way back to kernel mode.
*/
- INTERRUPT_RETURN
+ iretq
END(nmi)
ENTRY(ignore_sysret)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index b5c7a56ed256..568e130d932c 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -337,8 +337,7 @@ ENTRY(entry_INT80_compat)
/* Go back to user mode. */
TRACE_IRQS_ON
- SWAPGS
- jmp restore_regs_and_iret
+ jmp swapgs_restore_regs_and_return_to_usermode
END(entry_INT80_compat)
ENTRY(stub32_clone)
diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile
index 331f1dca5085..6fb9b57ed5ba 100644
--- a/arch/x86/entry/syscalls/Makefile
+++ b/arch/x86/entry/syscalls/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-out := $(obj)/../../include/generated/asm
-uapi := $(obj)/../../include/generated/uapi/asm
+out := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
# Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index fa8dbfcf7ed3..11b13c4b43d5 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *)
notrace time_t __vdso_time(time_t *t)
{
/* This is atomic on x86 so we don't need any locks. */
- time_t result = ACCESS_ONCE(gtod->wall_time_sec);
+ time_t result = READ_ONCE(gtod->wall_time_sec);
if (t)
*t = result;
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 0780a443a53b..4674f58581a1 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -65,6 +65,7 @@
#include <linux/elf.h>
#include <linux/types.h>
+#include <linux/kernel.h>
const char *outfilename;
@@ -151,7 +152,7 @@ extern void bad_put_le(void);
PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val))))
-#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
+#define NSYMS ARRAY_SIZE(required_syms)
#define BITSFUNC3(name, bits, suffix) name##bits##suffix
#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 1911310959f8..d63053142b16 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -114,10 +114,11 @@ static int vvar_fault(const struct vm_special_mapping *sm,
struct pvclock_vsyscall_time_info *pvti =
pvclock_pvti_cpu0_va();
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
- ret = vm_insert_pfn(
+ ret = vm_insert_pfn_prot(
vma,
vmf->address,
- __pa(pvti) >> PAGE_SHIFT);
+ __pa(pvti) >> PAGE_SHIFT,
+ pgprot_decrypted(vma->vm_page_prot));
}
} else if (sym_offset == image->sym_hvclock_page) {
struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 3641e24fdac5..38b5d41b0c37 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -405,7 +405,7 @@ const struct attribute_group *amd_iommu_attr_groups[] = {
NULL,
};
-static struct pmu iommu_pmu = {
+static const struct pmu iommu_pmu __initconst = {
.event_init = perf_iommu_event_init,
.add = perf_iommu_add,
.del = perf_iommu_del,
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 80534d3c2480..140d33288e78 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event)
event->destroy(event);
}
- if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
+ if (READ_ONCE(x86_pmu.attr_rdpmc))
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
return err;
@@ -2371,7 +2371,7 @@ static unsigned long get_segment_base(unsigned int segment)
struct ldt_struct *ldt;
/* IRQs are off, so this synchronizes with smp_store_release */
- ldt = lockless_dereference(current->active_mm->context.ldt);
+ ldt = READ_ONCE(current->active_mm->context.ldt);
if (!ldt || idx >= ldt->nr_entries)
return 0;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 9fb9a1f1e47b..43445da30cea 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2958,6 +2958,10 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
if (event->attr.use_clockid)
flags &= ~PERF_SAMPLE_TIME;
+ if (!event->attr.exclude_kernel)
+ flags &= ~PERF_SAMPLE_REGS_USER;
+ if (event->attr.sample_regs_user & ~PEBS_REGS)
+ flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
return flags;
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 4196f81ec0e1..f7aaadf9331f 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -85,13 +85,15 @@ struct amd_nb {
* Flags PEBS can handle without an PMI.
*
* TID can only be handled by flushing at context switch.
+ * REGS_USER can be handled for events limited to ring 3.
*
*/
#define PEBS_FREERUNNING_FLAGS \
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
- PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR)
+ PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
+ PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
/*
* A debug store configuration.
@@ -110,6 +112,26 @@ struct debug_store {
u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
+#define PEBS_REGS \
+ (PERF_REG_X86_AX | \
+ PERF_REG_X86_BX | \
+ PERF_REG_X86_CX | \
+ PERF_REG_X86_DX | \
+ PERF_REG_X86_DI | \
+ PERF_REG_X86_SI | \
+ PERF_REG_X86_SP | \
+ PERF_REG_X86_BP | \
+ PERF_REG_X86_IP | \
+ PERF_REG_X86_FLAGS | \
+ PERF_REG_X86_R8 | \
+ PERF_REG_X86_R9 | \
+ PERF_REG_X86_R10 | \
+ PERF_REG_X86_R11 | \
+ PERF_REG_X86_R12 | \
+ PERF_REG_X86_R13 | \
+ PERF_REG_X86_R14 | \
+ PERF_REG_X86_R15)
+
/*
* Per register state.
*/
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index a5db63f728a2..189a398290db 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -113,7 +113,7 @@ void hyperv_init(void)
u64 guest_id;
union hv_x64_msr_hypercall_contents hypercall_msr;
- if (x86_hyper != &x86_hyper_ms_hyperv)
+ if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return;
/* Allocate percpu VP index */
@@ -210,9 +210,10 @@ void hyperv_cleanup(void)
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
-void hyperv_report_panic(struct pt_regs *regs)
+void hyperv_report_panic(struct pt_regs *regs, long err)
{
static bool panic_reported;
+ u64 guest_id;
/*
* We prefer to report panic on 'die' chain as we have proper
@@ -223,11 +224,13 @@ void hyperv_report_panic(struct pt_regs *regs)
return;
panic_reported = true;
- wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
- wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
- wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
- wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
- wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
+ rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
+
+ wrmsrl(HV_X64_MSR_CRASH_P0, err);
+ wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
+ wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
+ wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
+ wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
/*
* Let Hyper-V know there is crash data available
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 5f01671c68f2..a9e57f08bfa6 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -53,6 +53,15 @@ extern int local_apic_timer_c2_ok;
extern int disable_apic;
extern unsigned int lapic_timer_frequency;
+extern enum apic_intr_mode_id apic_intr_mode;
+enum apic_intr_mode_id {
+ APIC_PIC,
+ APIC_VIRTUAL_WIRE,
+ APIC_VIRTUAL_WIRE_NO_CONFIG,
+ APIC_SYMMETRIC_IO,
+ APIC_SYMMETRIC_IO_NO_ROUTING
+};
+
#ifdef CONFIG_SMP
extern void __inquire_remote_apic(int apicid);
#else /* CONFIG_SMP */
@@ -127,14 +136,13 @@ extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
extern void lapic_shutdown(void);
extern void sync_Arb_IDs(void);
-extern void init_bsp_APIC(void);
+extern void apic_intr_mode_init(void);
extern void setup_local_APIC(void);
extern void init_apic_mappings(void);
void register_lapic_address(unsigned long address);
extern void setup_boot_APIC_clock(void);
extern void setup_secondary_APIC_clock(void);
extern void lapic_update_tsc_freq(void);
-extern int APIC_init_uniprocessor(void);
#ifdef CONFIG_X86_64
static inline int apic_force_enable(unsigned long addr)
@@ -145,7 +153,7 @@ static inline int apic_force_enable(unsigned long addr)
extern int apic_force_enable(unsigned long addr);
#endif
-extern int apic_bsp_setup(bool upmode);
+extern void apic_bsp_setup(bool upmode);
extern void apic_ap_setup(void);
/*
@@ -161,6 +169,10 @@ static inline int apic_is_clustered_box(void)
#endif
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
+extern void lapic_assign_system_vectors(void);
+extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_online(void);
+extern void lapic_offline(void);
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
@@ -170,6 +182,9 @@ static inline void disable_local_APIC(void) { }
# define setup_boot_APIC_clock x86_init_noop
# define setup_secondary_APIC_clock x86_init_noop
static inline void lapic_update_tsc_freq(void) { }
+static inline void apic_intr_mode_init(void) { }
+static inline void lapic_assign_system_vectors(void) { }
+static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_X2APIC
@@ -265,73 +280,63 @@ struct irq_data;
* James Cleverdon.
*/
struct apic {
- char *name;
-
- int (*probe)(void);
- int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
- int (*apic_id_valid)(int apicid);
- int (*apic_id_registered)(void);
-
- u32 irq_delivery_mode;
- u32 irq_dest_mode;
-
- const struct cpumask *(*target_cpus)(void);
-
- int disable_esr;
-
- int dest_logical;
- unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
-
- void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
- const struct cpumask *mask);
- void (*init_apic_ldr)(void);
-
- void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
-
- void (*setup_apic_routing)(void);
- int (*cpu_present_to_apicid)(int mps_cpu);
- void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
- int (*check_phys_apicid_present)(int phys_apicid);
- int (*phys_pkg_id)(int cpuid_apic, int index_msb);
-
- unsigned int (*get_apic_id)(unsigned long x);
- /* Can't be NULL on 64-bit */
- unsigned long (*set_apic_id)(unsigned int id);
-
- int (*cpu_mask_to_apicid)(const struct cpumask *cpumask,
- struct irq_data *irqdata,
- unsigned int *apicid);
-
- /* ipi */
- void (*send_IPI)(int cpu, int vector);
- void (*send_IPI_mask)(const struct cpumask *mask, int vector);
- void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
- int vector);
- void (*send_IPI_allbutself)(int vector);
- void (*send_IPI_all)(int vector);
- void (*send_IPI_self)(int vector);
+ /* Hotpath functions first */
+ void (*eoi_write)(u32 reg, u32 v);
+ void (*native_eoi_write)(u32 reg, u32 v);
+ void (*write)(u32 reg, u32 v);
+ u32 (*read)(u32 reg);
+
+ /* IPI related functions */
+ void (*wait_icr_idle)(void);
+ u32 (*safe_wait_icr_idle)(void);
+
+ void (*send_IPI)(int cpu, int vector);
+ void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+ void (*send_IPI_mask_allbutself)(const struct cpumask *msk, int vec);
+ void (*send_IPI_allbutself)(int vector);
+ void (*send_IPI_all)(int vector);
+ void (*send_IPI_self)(int vector);
+
+ /* dest_logical is used by the IPI functions */
+ u32 dest_logical;
+ u32 disable_esr;
+ u32 irq_delivery_mode;
+ u32 irq_dest_mode;
+
+ /* Functions and data related to vector allocation */
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask);
+ int (*cpu_mask_to_apicid)(const struct cpumask *cpumask,
+ struct irq_data *irqdata,
+ unsigned int *apicid);
+ u32 (*calc_dest_apicid)(unsigned int cpu);
+
+ /* ICR related functions */
+ u64 (*icr_read)(void);
+ void (*icr_write)(u32 low, u32 high);
+
+ /* Probe, setup and smpboot functions */
+ int (*probe)(void);
+ int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+ int (*apic_id_valid)(int apicid);
+ int (*apic_id_registered)(void);
+
+ bool (*check_apicid_used)(physid_mask_t *map, int apicid);
+ void (*init_apic_ldr)(void);
+ void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
+ void (*setup_apic_routing)(void);
+ int (*cpu_present_to_apicid)(int mps_cpu);
+ void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
+ int (*check_phys_apicid_present)(int phys_apicid);
+ int (*phys_pkg_id)(int cpuid_apic, int index_msb);
+
+ u32 (*get_apic_id)(unsigned long x);
+ u32 (*set_apic_id)(unsigned int id);
/* wakeup_secondary_cpu */
- int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
+ int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
- void (*inquire_remote_apic)(int apicid);
-
- /* apic ops */
- u32 (*read)(u32 reg);
- void (*write)(u32 reg, u32 v);
- /*
- * ->eoi_write() has the same signature as ->write().
- *
- * Drivers can support both ->eoi_write() and ->write() by passing the same
- * callback value. Kernel can override ->eoi_write() and fall back
- * on write for EOI.
- */
- void (*eoi_write)(u32 reg, u32 v);
- void (*native_eoi_write)(u32 reg, u32 v);
- u64 (*icr_read)(void);
- void (*icr_write)(u32 low, u32 high);
- void (*wait_icr_idle)(void);
- u32 (*safe_wait_icr_idle)(void);
+ void (*inquire_remote_apic)(int apicid);
#ifdef CONFIG_X86_32
/*
@@ -346,6 +351,7 @@ struct apic {
*/
int (*x86_32_early_logical_apicid)(int cpu);
#endif
+ char *name;
};
/*
@@ -380,6 +386,7 @@ extern struct apic *__apicdrivers[], *__apicdrivers_end[];
*/
#ifdef CONFIG_SMP
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
+extern int lapic_can_unplug_cpu(void);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -463,84 +470,33 @@ static inline unsigned default_get_apic_id(unsigned long x)
extern void apic_send_IPI_self(int vector);
DECLARE_PER_CPU(int, x2apic_extra_bits);
-
-extern int default_cpu_present_to_apicid(int mps_cpu);
-extern int default_check_phys_apicid_present(int phys_apicid);
#endif
extern void generic_bigsmp_probe(void);
-
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-static inline const struct cpumask *default_target_cpus(void)
-{
-#ifdef CONFIG_SMP
- return cpu_online_mask;
-#else
- return cpumask_of(0);
-#endif
-}
-
-static inline const struct cpumask *online_target_cpus(void)
-{
- return cpu_online_mask;
-}
-
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
+extern struct apic apic_noop;
static inline unsigned int read_apic_id(void)
{
- unsigned int reg;
-
- reg = apic_read(APIC_ID);
+ unsigned int reg = apic_read(APIC_ID);
return apic->get_apic_id(reg);
}
-static inline int default_apic_id_valid(int apicid)
-{
- return (apicid < 255);
-}
-
+extern int default_apic_id_valid(int apicid);
extern int default_acpi_madt_oem_check(char *, char *);
-
extern void default_setup_apic_routing(void);
-extern struct apic apic_noop;
-
-#ifdef CONFIG_X86_32
-
-static inline int noop_x86_32_early_logical_apicid(int cpu)
-{
- return BAD_APICID;
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
- */
-extern void default_init_apic_ldr(void);
-
-static inline int default_apic_id_registered(void)
-{
- return physid_isset(read_apic_id(), phys_cpu_present_map);
-}
-
-static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-#endif
+extern u32 apic_default_calc_apicid(unsigned int cpu);
+extern u32 apic_flat_calc_apicid(unsigned int cpu);
extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
struct irq_data *irqdata,
@@ -548,71 +504,17 @@ extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask,
extern int default_cpu_mask_to_apicid(const struct cpumask *cpumask,
struct irq_data *irqdata,
unsigned int *apicid);
-
-static inline void
-flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- cpumask_clear(retmask);
- cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
-static inline void
-default_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- cpumask_copy(retmask, cpumask_of(cpu));
-}
-
-static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
-{
- return physid_isset(apicid, *map);
-}
-
-static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
-{
- *retmap = *phys_map;
-}
-
-static inline int __default_cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
- return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
- else
- return BAD_APICID;
-}
-
-static inline int
-__default_check_phys_apicid_present(int phys_apicid)
-{
- return physid_isset(phys_apicid, phys_cpu_present_map);
-}
-
-#ifdef CONFIG_X86_32
-static inline int default_cpu_present_to_apicid(int mps_cpu)
-{
- return __default_cpu_present_to_apicid(mps_cpu);
-}
-
-static inline int
-default_check_phys_apicid_present(int phys_apicid)
-{
- return __default_check_phys_apicid_present(phys_apicid);
-}
-#else
+extern bool default_check_apicid_used(physid_mask_t *map, int apicid);
+extern void flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask);
+extern void default_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask);
+extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap);
extern int default_cpu_present_to_apicid(int mps_cpu);
extern int default_check_phys_apicid_present(int phys_apicid);
-#endif
#endif /* CONFIG_X86_LOCAL_APIC */
+
extern void irq_enter(void);
extern void irq_exit(void);
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index 5b0579abb398..3ac991d81e74 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -45,7 +45,7 @@ static inline bool rdrand_long(unsigned long *v)
bool ok;
unsigned int retry = RDRAND_RETRY_LOOPS;
do {
- asm volatile(RDRAND_LONG "\n\t"
+ asm volatile(RDRAND_LONG
CC_SET(c)
: CC_OUT(c) (ok), "=a" (*v));
if (ok)
@@ -59,7 +59,7 @@ static inline bool rdrand_int(unsigned int *v)
bool ok;
unsigned int retry = RDRAND_RETRY_LOOPS;
do {
- asm volatile(RDRAND_INT "\n\t"
+ asm volatile(RDRAND_INT
CC_SET(c)
: CC_OUT(c) (ok), "=a" (*v));
if (ok)
@@ -71,7 +71,7 @@ static inline bool rdrand_int(unsigned int *v)
static inline bool rdseed_long(unsigned long *v)
{
bool ok;
- asm volatile(RDSEED_LONG "\n\t"
+ asm volatile(RDSEED_LONG
CC_SET(c)
: CC_OUT(c) (ok), "=a" (*v));
return ok;
@@ -80,7 +80,7 @@ static inline bool rdseed_long(unsigned long *v)
static inline bool rdseed_int(unsigned int *v)
{
bool ok;
- asm volatile(RDSEED_INT "\n\t"
+ asm volatile(RDSEED_INT
CC_SET(c)
: CC_OUT(c) (ok), "=a" (*v));
return ok;
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 01727dbc294a..7fb336210e1b 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -12,11 +12,11 @@
*/
#ifdef CONFIG_X86_32
-#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
+#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
-#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
+#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
-#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
+#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
#else
#define mb() asm volatile("mfence":::"memory")
@@ -31,7 +31,11 @@
#endif
#define dma_wmb() barrier()
-#define __smp_mb() mb()
+#ifdef CONFIG_X86_32
+#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
+#else
+#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
+#endif
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 2bcf47314959..3fa039855b8f 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -143,7 +143,7 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
bool negative;
- asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
+ asm volatile(LOCK_PREFIX "andb %2,%1"
CC_SET(s)
: CC_OUT(s) (negative), ADDR
: "ir" ((char) ~(1 << nr)) : "memory");
@@ -246,7 +246,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
{
bool oldbit;
- asm("bts %2,%1\n\t"
+ asm("bts %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
@@ -286,7 +286,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
{
bool oldbit;
- asm volatile("btr %2,%1\n\t"
+ asm volatile("btr %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
@@ -298,7 +298,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
{
bool oldbit;
- asm volatile("btc %2,%1\n\t"
+ asm volatile("btc %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr) : "memory");
@@ -329,7 +329,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
{
bool oldbit;
- asm volatile("bt %2,%1\n\t"
+ asm volatile("bt %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 9eef9cc64c68..a600a6cda9ec 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -7,6 +7,7 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <asm/processor.h>
#include <asm/user32.h>
#include <asm/unistd.h>
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0dfa68438e80..bf6a76202a77 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -126,11 +126,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
-#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
-#define setup_clear_cpu_cap(bit) do { \
- clear_cpu_cap(&boot_cpu_data, bit); \
- set_bit(bit, (unsigned long *)cpu_caps_cleared); \
-} while (0)
+
+extern void setup_clear_cpu_cap(unsigned int bit);
+extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
+
#define setup_force_cpu_cap(bit) do { \
set_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cpu_caps_set); \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 793690fbda36..c0b0e9e8aa66 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,173 +13,176 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 18 /* N 32-bit words worth of info */
-#define NBUGINTS 1 /* N 32-bit bug flags */
+#define NCAPINTS 18 /* N 32-bit words worth of info */
+#define NBUGINTS 1 /* N 32-bit bug flags */
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
+ *
+ * When adding new features here that depend on other features,
+ * please update the table in kernel/cpu/cpuid-deps.c as well.
*/
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
- /* (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
-#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
-#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
-#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
-#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
+/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
+#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
-#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
+#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
+#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
+#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
+#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
+#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
-#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
-#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
-#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
+#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+
+/* CPU types for specific tunings: */
+#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
+#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
+#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" MFENCE synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
+#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
+#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
+#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
-#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
-#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
+#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
+#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
+#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
+#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
+#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
+#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
+#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
+#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
+#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
+#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
+#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
-#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
-#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
-#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
+#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */
-#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
+#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
+#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
+#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
+#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
+#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
+#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
/*
* Auxiliary flags: Linux defined - For features scattered in various
@@ -187,146 +190,154 @@
*
* Reuse free bits when adding new feature flags!
*/
-#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT */
-#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
-#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
-#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
+#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
+#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
-#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
-#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
/* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
+#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
+#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
+#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
-#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
-#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
-#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
+#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
+#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
+#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
+#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
+#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
+#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
+#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
+/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
+#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
+#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
+#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
+#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
+#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
+#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
-#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
-#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
+/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
+#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
+#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
-#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
-#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
-#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
-#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
-#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
-#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
-#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
-#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
-#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
-#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
-#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
+#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
+#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
+#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
+#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
-#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
-#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
-#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
-#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
-#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
+#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
+#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
+#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
+#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
+#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
+#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
+#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
+#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
+#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
+#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
+#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
-/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
-#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
-#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
-#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
+/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
/*
* BUG word(s)
*/
-#define X86_BUG(x) (NCAPINTS*32 + (x))
+#define X86_BUG(x) (NCAPINTS*32 + (x))
-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
-#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
#ifdef CONFIG_X86_32
/*
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
* to avoid confusion.
*/
-#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
+#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
#endif
-#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
-#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
-#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
+#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
+#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 0a3e808b9123..4011cb03ef08 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -393,7 +393,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
void update_intr_gate(unsigned int n, const void *addr);
void alloc_intr_gate(unsigned int n, const void *addr);
-extern unsigned long used_vectors[];
+extern unsigned long system_vectors[];
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(u32, debug_idt_ctr);
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index c10c9128f54e..14d6d5007314 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif
+#ifdef CONFIG_X86_INTEL_UMIP
+# define DISABLE_UMIP 0
+#else
+# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
+#endif
+
#ifdef CONFIG_X86_64
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
@@ -63,7 +69,7 @@
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
-#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57)
+#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
#define DISABLED_MASK17 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 836ca1178a6a..0350d99bb8fd 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -7,7 +7,6 @@
* Documentation/DMA-API.txt for documentation.
*/
-#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <asm/io.h>
@@ -68,13 +67,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
}
#endif /* CONFIG_X86_DMA_REMAP */
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- flush_write_buffers();
-}
-
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index c1a125e47ff3..3a091cea36c5 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -253,7 +253,7 @@ extern int force_personality32;
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
- (TASK_SIZE / 3 * 2))
+ (DEFAULT_MAP_WINDOW / 3 * 2))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index dcd9fb55e679..b0c505fe9a95 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -104,6 +104,12 @@ enum fixed_addresses {
FIX_GDT_REMAP_BEGIN,
FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
+#ifdef CONFIG_ACPI_APEI_GHES
+ /* Used for GHES mapping from assorted contexts */
+ FIX_APEI_GHES_IRQ,
+ FIX_APEI_GHES_NMI,
+#endif
+
__end_of_permanent_fixed_addresses,
/*
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 8ec99a55e6b9..b80e46733909 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -16,6 +16,8 @@
#include <asm/irq_vectors.h>
+#define IRQ_MATRIX_BITS NR_VECTORS
+
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
@@ -123,15 +125,13 @@ struct irq_alloc_info {
struct irq_cfg {
unsigned int dest_apicid;
- u8 vector;
- u8 old_vector;
+ unsigned int vector;
};
extern struct irq_cfg *irq_cfg(unsigned int irq);
extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
-extern void setup_vector_irq(int cpu);
#ifdef CONFIG_SMP
extern void send_cleanup_vector(struct irq_cfg *);
extern void irq_complete_move(struct irq_cfg *cfg);
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 0ead9dbb9130..1b0a5abcd8ae 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -23,11 +23,22 @@
#ifdef CONFIG_HYPERVISOR_GUEST
#include <asm/kvm_para.h>
+#include <asm/x86_init.h>
#include <asm/xen/hypervisor.h>
/*
* x86 hypervisor information
*/
+
+enum x86_hypervisor_type {
+ X86_HYPER_NATIVE = 0,
+ X86_HYPER_VMWARE,
+ X86_HYPER_MS_HYPERV,
+ X86_HYPER_XEN_PV,
+ X86_HYPER_XEN_HVM,
+ X86_HYPER_KVM,
+};
+
struct hypervisor_x86 {
/* Hypervisor name */
const char *name;
@@ -35,40 +46,19 @@ struct hypervisor_x86 {
/* Detection routine */
uint32_t (*detect)(void);
- /* Platform setup (run once per boot) */
- void (*init_platform)(void);
-
- /* X2APIC detection (run once per boot) */
- bool (*x2apic_available)(void);
+ /* Hypervisor type */
+ enum x86_hypervisor_type type;
- /* pin current vcpu to specified physical cpu (run rarely) */
- void (*pin_vcpu)(int);
+ /* init time callbacks */
+ struct x86_hyper_init init;
- /* called during init_mem_mapping() to setup early mappings. */
- void (*init_mem_mapping)(void);
+ /* runtime callbacks */
+ struct x86_hyper_runtime runtime;
};
-extern const struct hypervisor_x86 *x86_hyper;
-
-/* Recognized hypervisors */
-extern const struct hypervisor_x86 x86_hyper_vmware;
-extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
-extern const struct hypervisor_x86 x86_hyper_xen_pv;
-extern const struct hypervisor_x86 x86_hyper_xen_hvm;
-extern const struct hypervisor_x86 x86_hyper_kvm;
-
+extern enum x86_hypervisor_type x86_hyper_type;
extern void init_hypervisor_platform(void);
-extern bool hypervisor_x2apic_available(void);
-extern void hypervisor_pin_vcpu(int cpu);
-
-static inline void hypervisor_init_mem_mapping(void)
-{
- if (x86_hyper && x86_hyper->init_mem_mapping)
- x86_hyper->init_mem_mapping();
-}
#else
static inline void init_hypervisor_platform(void) { }
-static inline bool hypervisor_x2apic_available(void) { return false; }
-static inline void hypervisor_init_mem_mapping(void) { }
#endif /* CONFIG_HYPERVISOR_GUEST */
#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 02aff0867211..1c78580e58be 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -97,6 +97,16 @@
#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE 0
+#define INAT_SEG_REG_DEFAULT 1
+#define INAT_SEG_REG_CS 2
+#define INAT_SEG_REG_SS 3
+#define INAT_SEG_REG_DS 4
+#define INAT_SEG_REG_ES 5
+#define INAT_SEG_REG_FS 6
+#define INAT_SEG_REG_GS 7
+
/* Attribute search APIs */
extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h
new file mode 100644
index 000000000000..e1d3b4ce8a92
--- /dev/null
+++ b/arch/x86/include/asm/insn-eval.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_X86_INSN_EVAL_H
+#define _ASM_X86_INSN_EVAL_H
+/*
+ * A collection of utility functions for x86 instruction analysis to be
+ * used in a kernel context. Useful when, for instance, making sense
+ * of the registers indicated by operands.
+ */
+
+#include <linux/compiler.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <asm/ptrace.h>
+
+#define INSN_CODE_SEG_ADDR_SZ(params) ((params >> 4) & 0xf)
+#define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf)
+#define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4))
+
+void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
+int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);
+unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
+char insn_get_code_seg_params(struct pt_regs *regs);
+
+#endif /* _ASM_X86_INSN_EVAL_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 11398d55aefa..93ae8aee1780 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -266,6 +266,21 @@ static inline void slow_down_io(void)
#endif
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+#include <linux/jump_label.h>
+
+extern struct static_key_false sev_enable_key;
+static inline bool sev_key_active(void)
+{
+ return static_branch_unlikely(&sev_enable_key);
+}
+
+#else /* !CONFIG_AMD_MEM_ENCRYPT */
+
+static inline bool sev_key_active(void) { return false; }
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
#define BUILDIO(bwl, bw, type) \
static inline void out##bwl(unsigned type value, int port) \
{ \
@@ -296,14 +311,34 @@ static inline unsigned type in##bwl##_p(int port) \
\
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
- asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
+ if (sev_key_active()) { \
+ unsigned type *value = (unsigned type *)addr; \
+ while (count) { \
+ out##bwl(*value, port); \
+ value++; \
+ count--; \
+ } \
+ } else { \
+ asm volatile("rep; outs" #bwl \
+ : "+S"(addr), "+c"(count) \
+ : "d"(port) : "memory"); \
+ } \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
- asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
+ if (sev_key_active()) { \
+ unsigned type *value = (unsigned type *)addr; \
+ while (count) { \
+ *value = in##bwl(port); \
+ value++; \
+ count--; \
+ } \
+ } else { \
+ asm volatile("rep; ins" #bwl \
+ : "+D"(addr), "+c"(count) \
+ : "d"(port) : "memory"); \
+ } \
}
BUILDIO(b, b, char)
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 5c27e146a166..a8834dd546cd 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -193,7 +193,6 @@ static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
extern void setup_IO_APIC(void);
extern void enable_IO_APIC(void);
extern void disable_IO_APIC(void);
-extern void setup_ioapic_dest(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
extern void print_IO_APICs(void);
#else /* !CONFIG_X86_IO_APIC */
@@ -233,7 +232,6 @@ static inline void io_apic_init_mappings(void) { }
static inline void setup_IO_APIC(void) { }
static inline void enable_IO_APIC(void) { }
-static inline void setup_ioapic_dest(void) { }
#endif
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index d8632f8fa17d..2395bb794c7b 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -26,11 +26,7 @@ extern void irq_ctx_init(int cpu);
struct irq_desc;
-#ifdef CONFIG_HOTPLUG_CPU
-#include <linux/cpumask.h>
-extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void);
-#endif
#ifdef CONFIG_HAVE_KVM
extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index c20ffca8fef1..67421f649cfa 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -102,12 +102,8 @@
#define POSTED_INTR_NESTED_VECTOR 0xf0
#endif
-/*
- * Local APIC timer IRQ vector is on a different priority level,
- * to work around the 'lost local interrupt if more than 2 IRQ
- * sources per level' errata.
- */
-#define LOCAL_TIMER_VECTOR 0xef
+#define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef
+#define LOCAL_TIMER_VECTOR 0xee
#define NR_VECTORS 256
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
index 423e112c1e8f..f695cc6b8e1f 100644
--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -9,6 +9,7 @@
enum {
/* Allocate contiguous CPU vectors */
X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 0x1,
+ X86_IRQ_ALLOC_LEGACY = 0x2,
};
extern struct irq_domain *x86_vector_domain;
@@ -42,8 +43,8 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg);
extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
-extern void mp_irqdomain_activate(struct irq_domain *domain,
- struct irq_data *irq_data);
+extern int mp_irqdomain_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early);
extern void mp_irqdomain_deactivate(struct irq_domain *domain,
struct irq_data *irq_data);
extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
index 945a0337fbcf..ea32a7d3cf1b 100644
--- a/arch/x86/include/asm/kmemcheck.h
+++ b/arch/x86/include/asm/kmemcheck.h
@@ -1,43 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_X86_KMEMCHECK_H
-#define ASM_X86_KMEMCHECK_H
-
-#include <linux/types.h>
-#include <asm/ptrace.h>
-
-#ifdef CONFIG_KMEMCHECK
-bool kmemcheck_active(struct pt_regs *regs);
-
-void kmemcheck_show(struct pt_regs *regs);
-void kmemcheck_hide(struct pt_regs *regs);
-
-bool kmemcheck_fault(struct pt_regs *regs,
- unsigned long address, unsigned long error_code);
-bool kmemcheck_trap(struct pt_regs *regs);
-#else
-static inline bool kmemcheck_active(struct pt_regs *regs)
-{
- return false;
-}
-
-static inline void kmemcheck_show(struct pt_regs *regs)
-{
-}
-
-static inline void kmemcheck_hide(struct pt_regs *regs)
-{
-}
-
-static inline bool kmemcheck_fault(struct pt_regs *regs,
- unsigned long address, unsigned long error_code)
-{
- return false;
-}
-
-static inline bool kmemcheck_trap(struct pt_regs *regs)
-{
- return false;
-}
-#endif /* CONFIG_KMEMCHECK */
-
-#endif
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 6cf65437b5e5..9f2e3102e0bb 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -58,8 +58,8 @@ extern __visible kprobe_opcode_t optprobe_template_call[];
extern __visible kprobe_opcode_t optprobe_template_end[];
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
#define MAX_OPTINSN_SIZE \
- (((unsigned long)&optprobe_template_end - \
- (unsigned long)&optprobe_template_entry) + \
+ (((unsigned long)optprobe_template_end - \
+ (unsigned long)optprobe_template_entry) + \
MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
extern const int kretprobe_blacklist_size;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c73e493adf07..9d7d856b2d89 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1419,7 +1419,7 @@ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline int kvm_cpu_get_apicid(int mps_cpu)
{
#ifdef CONFIG_X86_LOCAL_APIC
- return __default_cpu_present_to_apicid(mps_cpu);
+ return default_cpu_present_to_apicid(mps_cpu);
#else
WARN_ON_ONCE(1);
return BAD_APICID;
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index c373e44049b1..7b407dda2bd7 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -88,7 +88,6 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
#ifdef CONFIG_KVM_GUEST
bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void);
-void __init kvm_guest_init(void);
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
@@ -103,7 +102,6 @@ static inline void kvm_spinlock_init(void)
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
#else /* CONFIG_KVM_GUEST */
-#define kvm_guest_init() do {} while (0)
#define kvm_async_pf_task_wait(T, I) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0)
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 6a77c63540f7..c9459a4c3c68 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -42,11 +42,17 @@ void __init sme_early_init(void);
void __init sme_encrypt_kernel(void);
void __init sme_enable(struct boot_params *bp);
+int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
+int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
+bool sme_active(void);
+bool sev_active(void);
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define sme_me_mask 0ULL
@@ -64,6 +70,14 @@ static inline void __init sme_early_init(void) { }
static inline void __init sme_encrypt_kernel(void) { }
static inline void __init sme_enable(struct boot_params *bp) { }
+static inline bool sme_active(void) { return false; }
+static inline bool sev_active(void) { return false; }
+
+static inline int __init
+early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
+static inline int __init
+early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+
#endif /* CONFIG_AMD_MEM_ENCRYPT */
/*
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 6699fc441644..6d16d15d09a0 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -73,8 +73,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
- /* lockless_dereference synchronizes with smp_store_release */
- ldt = lockless_dereference(mm->context.ldt);
+ /* READ_ONCE synchronizes with smp_store_release */
+ ldt = READ_ONCE(mm->context.ldt);
/*
* Any change to mm->context.ldt is followed by an IPI to all
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 8546fafa21a9..7948a17febb4 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -6,7 +6,7 @@
#include <asm/orc_types.h>
struct mod_arch_specific {
-#ifdef CONFIG_ORC_UNWINDER
+#ifdef CONFIG_UNWINDER_ORC
unsigned int num_orcs;
int *orc_unwind_ip;
struct orc_entry *orc_unwind;
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 9492893aec52..a6bec8028480 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -59,7 +59,7 @@ struct mpc_table {
#define MP_TRANSLATION 192
#define CPU_ENABLED 1 /* Processor is available */
-#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
+#define CPU_BOOTPROCESSOR 2 /* Processor is the boot CPU */
#define CPU_STEPPING_MASK 0x000F
#define CPU_MODEL_MASK 0x00F0
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 581bb54dd464..5400add2885b 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -311,7 +311,7 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
void hyperv_init(void);
void hyperv_setup_mmu_ops(void);
void hyper_alloc_mmu(void);
-void hyperv_report_panic(struct pt_regs *regs);
+void hyperv_report_panic(struct pt_regs *regs, long err);
bool hv_is_hypercall_page_setup(void);
void hyperv_cleanup(void);
#else /* CONFIG_HYPERV */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ab022618a50a..34c4922bbc3f 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -324,6 +324,9 @@
#define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SEV 0xc0010131
+#define MSR_AMD64_SEV_ENABLED_BIT 0
+#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index fd81228e8037..283efcaac8af 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -16,10 +16,9 @@
#include <linux/cpumask.h>
#include <asm/frame.h>
-static inline void load_sp0(struct tss_struct *tss,
- struct thread_struct *thread)
+static inline void load_sp0(unsigned long sp0)
{
- PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
+ PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0);
}
/* The paravirtualized CPUID instruction. */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 10cc3b9709fe..6ec54d01972d 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -134,7 +134,7 @@ struct pv_cpu_ops {
void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
- void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
+ void (*load_sp0)(unsigned long sp0);
void (*set_iopl_mask)(unsigned mask);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 09c06b0fb964..d32175e30259 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -89,10 +89,8 @@ extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_CARDBUS_IO 0x4000
extern int pcibios_enabled;
-void pcibios_config_init(void);
void pcibios_scan_root(int bus);
-void pcibios_set_master(struct pci_dev *dev);
struct irq_routing_table *pcibios_get_irq_routing_table(void);
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 377f1ffd18be..ba3c523aaf16 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr,
{
bool oldbit;
- asm volatile("bt "__percpu_arg(2)",%1\n\t"
+ asm volatile("bt "__percpu_arg(2)",%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index f735c3016325..09f9e1e00e3b 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -667,11 +667,6 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
return false;
}
-static inline int pte_hidden(pte_t pte)
-{
- return pte_flags(pte) & _PAGE_HIDDEN;
-}
-
static inline int pmd_present(pmd_t pmd)
{
/*
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 59df7b47a434..3696398a9475 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -32,7 +32,6 @@
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
-#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
@@ -79,18 +78,6 @@
#define _PAGE_KNL_ERRATUM_MASK 0
#endif
-#ifdef CONFIG_KMEMCHECK
-#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
-#else
-#define _PAGE_HIDDEN (_AT(pteval_t, 0))
-#endif
-
-/*
- * The same hidden bit is used by kmemcheck, but since kmemcheck
- * works on kernel pages while soft-dirty engine on user space,
- * they do not conflict with each other.
- */
-
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
#else
@@ -200,10 +187,9 @@ enum page_cache_mode {
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
_PAGE_DIRTY | _PAGE_ENC)
+#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
#define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC)
#define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index bdac19ab2488..2db7cf720b04 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -431,7 +431,9 @@ typedef struct {
struct thread_struct {
/* Cached TLS descriptors: */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+#ifdef CONFIG_X86_32
unsigned long sp0;
+#endif
unsigned long sp;
#ifdef CONFIG_X86_32
unsigned long sysenter_cs;
@@ -518,16 +520,9 @@ static inline void native_set_iopl_mask(unsigned mask)
}
static inline void
-native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
+native_load_sp0(unsigned long sp0)
{
- tss->x86_tss.sp0 = thread->sp0;
-#ifdef CONFIG_X86_32
- /* Only happens when SEP is enabled, no need to test "SEP"arately: */
- if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
- tss->x86_tss.ss1 = thread->sysenter_cs;
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
- }
-#endif
+ this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
}
static inline void native_swapgs(void)
@@ -547,15 +542,20 @@ static inline unsigned long current_top_of_stack(void)
#endif
}
+static inline bool on_thread_stack(void)
+{
+ return (unsigned long)(current_top_of_stack() -
+ current_stack_pointer) < THREAD_SIZE;
+}
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __cpuid native_cpuid
-static inline void load_sp0(struct tss_struct *tss,
- struct thread_struct *thread)
+static inline void load_sp0(unsigned long sp0)
{
- native_load_sp0(tss, thread);
+ native_load_sp0(sp0);
}
#define set_iopl_mask native_set_iopl_mask
@@ -804,6 +804,15 @@ static inline void spin_lock_prefetch(const void *x)
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
TOP_OF_KERNEL_STACK_PADDING)
+#define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
+
+#define task_pt_regs(task) \
+({ \
+ unsigned long __ptr = (unsigned long)task_stack_page(task); \
+ __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
+ ((struct pt_regs *)__ptr) - 1; \
+})
+
#ifdef CONFIG_X86_32
/*
* User space process size: 3GB (default).
@@ -823,23 +832,6 @@ static inline void spin_lock_prefetch(const void *x)
.addr_limit = KERNEL_DS, \
}
-/*
- * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
- * This is necessary to guarantee that the entire "struct pt_regs"
- * is accessible even if the CPU haven't stored the SS/ESP registers
- * on the stack (interrupt gate does not save these registers
- * when switching to the same priv ring).
- * Therefore beware: accessing the ss/esp fields of the
- * "struct pt_regs" is possible, but they may contain the
- * completely wrong values.
- */
-#define task_pt_regs(task) \
-({ \
- unsigned long __ptr = (unsigned long)task_stack_page(task); \
- __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
- ((struct pt_regs *)__ptr) - 1; \
-})
-
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#else
@@ -873,11 +865,9 @@ static inline void spin_lock_prefetch(const void *x)
#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
- .sp0 = TOP_OF_INIT_STACK, \
.addr_limit = KERNEL_DS, \
}
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index c0e3c45cf6ab..14131dd06b29 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -136,9 +136,9 @@ static inline int v8086_mode(struct pt_regs *regs)
#endif
}
-#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
{
+#ifdef CONFIG_X86_64
#ifndef CONFIG_PARAVIRT
/*
* On non-paravirt systems, this is the only long mode CPL 3
@@ -149,8 +149,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
/* Headers are too twisted for this to go in paravirt.h. */
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
+#else /* !CONFIG_X86_64 */
+ return false;
+#endif
}
+#ifdef CONFIG_X86_64
#define current_user_stack_pointer() current_pt_regs()->sp
#define compat_user_stack_pointer() current_pt_regs()->sp
#endif
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index 9982dd96f093..5e16b5d40d32 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -2,6 +2,7 @@
#ifndef _ASM_X86_QSPINLOCK_H
#define _ASM_X86_QSPINLOCK_H
+#include <linux/jump_label.h>
#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>
@@ -47,10 +48,14 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
#endif
#ifdef CONFIG_PARAVIRT
+DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void native_pv_lock_init(void) __init;
+
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (!static_branch_likely(&virt_spin_lock_key))
return false;
/*
@@ -66,6 +71,10 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
+#else
+static inline void native_pv_lock_init(void)
+{
+}
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index ff871210b9f2..4e44250e7d0d 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -15,7 +15,7 @@
* back to the regular execution flow in .text.
*/
#define _REFCOUNT_EXCEPTION \
- ".pushsection .text.unlikely\n" \
+ ".pushsection .text..refcount\n" \
"111:\tlea %[counter], %%" _ASM_CX "\n" \
"112:\t" ASM_UD0 "\n" \
ASM_UNREACHABLE \
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index d8f3a6ae9f6c..f91c365e57c3 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -29,7 +29,7 @@ cc_label: \
#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \
do { \
bool c; \
- asm volatile (fullop ";" CC_SET(cc) \
+ asm volatile (fullop CC_SET(cc) \
: [counter] "+m" (var), CC_OUT(cc) (c) \
: __VA_ARGS__ : clobbers); \
return c; \
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 4d38d85a16ad..4c25cf6caefa 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -61,18 +61,33 @@
/*
* lock for reading
*/
+#define ____down_read(sem, slow_path) \
+({ \
+ struct rw_semaphore* ret; \
+ asm volatile("# beginning down_read\n\t" \
+ LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \
+ /* adds 0x00000001 */ \
+ " jns 1f\n" \
+ " call " slow_path "\n" \
+ "1:\n\t" \
+ "# ending down_read\n\t" \
+ : "+m" (sem->count), "=a" (ret), \
+ ASM_CALL_CONSTRAINT \
+ : [sem] "a" (sem) \
+ : "memory", "cc"); \
+ ret; \
+})
+
static inline void __down_read(struct rw_semaphore *sem)
{
- asm volatile("# beginning down_read\n\t"
- LOCK_PREFIX _ASM_INC "(%1)\n\t"
- /* adds 0x00000001 */
- " jns 1f\n"
- " call call_rwsem_down_read_failed\n"
- "1:\n\t"
- "# ending down_read\n\t"
- : "+m" (sem->count)
- : "a" (sem)
- : "memory", "cc");
+ ____down_read(sem, "call_rwsem_down_read_failed");
+}
+
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
+ return -EINTR;
+ return 0;
}
/*
@@ -82,17 +97,18 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
{
long result, tmp;
asm volatile("# beginning __down_read_trylock\n\t"
- " mov %0,%1\n\t"
+ " mov %[count],%[result]\n\t"
"1:\n\t"
- " mov %1,%2\n\t"
- " add %3,%2\n\t"
+ " mov %[result],%[tmp]\n\t"
+ " add %[inc],%[tmp]\n\t"
" jle 2f\n\t"
- LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
"# ending __down_read_trylock\n\t"
- : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
- : "i" (RWSEM_ACTIVE_READ_BIAS)
+ : [count] "+m" (sem->count), [result] "=&a" (result),
+ [tmp] "=&r" (tmp)
+ : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
return result >= 0;
}
@@ -106,7 +122,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
struct rw_semaphore* ret; \
\
asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %1,(%4)\n\t" \
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
/* adds 0xffff0001, returns the old value */ \
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
/* was the active mask 0 before? */\
@@ -114,9 +130,9 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
- : "+m" (sem->count), "=d" (tmp), \
+ : "+m" (sem->count), [tmp] "=d" (tmp), \
"=a" (ret), ASM_CALL_CONSTRAINT \
- : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
+ : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \
})
@@ -142,21 +158,21 @@ static inline bool __down_write_trylock(struct rw_semaphore *sem)
bool result;
long tmp0, tmp1;
asm volatile("# beginning __down_write_trylock\n\t"
- " mov %0,%1\n\t"
+ " mov %[count],%[tmp0]\n\t"
"1:\n\t"
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
/* was the active mask 0 before? */
" jnz 2f\n\t"
- " mov %1,%2\n\t"
- " add %4,%2\n\t"
- LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " mov %[tmp0],%[tmp1]\n\t"
+ " add %[inc],%[tmp1]\n\t"
+ LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t"
" jnz 1b\n\t"
"2:\n\t"
CC_SET(e)
"# ending __down_write_trylock\n\t"
- : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
- CC_OUT(e) (result)
- : "er" (RWSEM_ACTIVE_WRITE_BIAS)
+ : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
+ [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
+ : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
: "memory");
return result;
}
@@ -168,14 +184,14 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
/* subtracts 1, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n" /* expects old value in %edx */
"1:\n"
"# ending __up_read\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
+ : "+m" (sem->count), [tmp] "=d" (tmp)
+ : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
: "memory", "cc");
}
@@ -186,14 +202,14 @@ static inline void __up_write(struct rw_semaphore *sem)
{
long tmp;
asm volatile("# beginning __up_write\n\t"
- LOCK_PREFIX " xadd %1,(%2)\n\t"
+ LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
/* subtracts 0xffff0001, returns the old value */
" jns 1f\n\t"
" call call_rwsem_wake\n" /* expects old value in %edx */
"1:\n\t"
"# ending __up_write\n"
- : "+m" (sem->count), "=d" (tmp)
- : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+ : "+m" (sem->count), [tmp] "=d" (tmp)
+ : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc");
}
@@ -203,7 +219,7 @@ static inline void __up_write(struct rw_semaphore *sem)
static inline void __downgrade_write(struct rw_semaphore *sem)
{
asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+ LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
/*
* transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
* 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
@@ -213,7 +229,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
"1:\n\t"
"# ending __downgrade_write\n"
: "+m" (sem->count)
- : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+ : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
: "memory", "cc");
}
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b34625796eb2..5b6bc7016c22 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -42,11 +42,4 @@
#include <asm/qrwlock.h>
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 076502241eae..55d392c6bd29 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -179,8 +179,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/
-#ifndef CONFIG_KMEMCHECK
-
#if (__GNUC__ >= 4)
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#else
@@ -189,13 +187,6 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
#endif
-#else
-/*
- * kmemcheck becomes very happy if we use the REP instructions unconditionally,
- * because it means that we know both memory operands in advance.
- */
-#define memcpy(t, f, n) __memcpy((t), (f), (n))
-#endif
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 0b1b4445f4c5..533f74c300c2 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -33,7 +33,6 @@ extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
#ifndef CONFIG_FORTIFY_SOURCE
-#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \
({ \
@@ -46,13 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret; \
})
#endif
-#else
-/*
- * kmemcheck becomes very happy if we use the REP instructions unconditionally,
- * because it means that we know both memory operands in advance.
- */
-#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
-#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 899084b70412..8c6bd6863db9 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_SWITCH_TO_H
#define _ASM_X86_SWITCH_TO_H
+#include <linux/sched/task_stack.h>
+
struct task_struct; /* one of the stranger aspects of C forward declarations */
struct task_struct *__switch_to_asm(struct task_struct *prev,
@@ -73,4 +75,26 @@ do { \
((last) = __switch_to_asm((prev), (next))); \
} while (0)
+#ifdef CONFIG_X86_32
+static inline void refresh_sysenter_cs(struct thread_struct *thread)
+{
+ /* Only happens when SEP is enabled, no need to test "SEP"arately: */
+ if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
+ return;
+
+ this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+}
+#endif
+
+/* This is used when switching tasks or entering/exiting vm86 mode. */
+static inline void update_sp0(struct task_struct *task)
+{
+#ifdef CONFIG_X86_32
+ load_sp0(task->thread.sp0);
+#else
+ load_sp0(task_top_of_stack(task));
+#endif
+}
+
#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 91dfcafe27a6..bad25bb80679 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
asmlinkage long sys_iopl(unsigned int);
/* kernel/ldt.c */
-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
+asmlinkage long sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
asmlinkage long sys_rt_sigreturn(void);
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 47457ab975fd..7365dd4acffb 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -9,7 +9,7 @@
#define TICK_SIZE (tick_nsec / 1000)
unsigned long long native_sched_clock(void);
-extern int recalibrate_cpu_khz(void);
+extern void recalibrate_cpu_khz(void);
extern int no_timer_check;
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index fa60398bbc3a..069c04be1507 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -34,11 +34,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
)
);
-DEFINE_EVENT(x86_fpu, x86_fpu_state,
- TP_PROTO(struct fpu *fpu),
- TP_ARGS(fpu)
-);
-
DEFINE_EVENT(x86_fpu, x86_fpu_before_save,
TP_PROTO(struct fpu *fpu),
TP_ARGS(fpu)
@@ -74,11 +69,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_activate_state,
TP_ARGS(fpu)
);
-DEFINE_EVENT(x86_fpu, x86_fpu_deactivate_state,
- TP_PROTO(struct fpu *fpu),
- TP_ARGS(fpu)
-);
-
DEFINE_EVENT(x86_fpu, x86_fpu_init_state,
TP_PROTO(struct fpu *fpu),
TP_ARGS(fpu)
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 8eb139ed1a03..84b9ec0c1bc0 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -138,6 +138,254 @@ DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic);
DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
#endif
+TRACE_EVENT(vector_config,
+
+ TP_PROTO(unsigned int irq, unsigned int vector,
+ unsigned int cpu, unsigned int apicdest),
+
+ TP_ARGS(irq, vector, cpu, apicdest),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( unsigned int, vector )
+ __field( unsigned int, cpu )
+ __field( unsigned int, apicdest )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->vector = vector;
+ __entry->cpu = cpu;
+ __entry->apicdest = apicdest;
+ ),
+
+ TP_printk("irq=%u vector=%u cpu=%u apicdest=0x%08x",
+ __entry->irq, __entry->vector, __entry->cpu,
+ __entry->apicdest)
+);
+
+DECLARE_EVENT_CLASS(vector_mod,
+
+ TP_PROTO(unsigned int irq, unsigned int vector,
+ unsigned int cpu, unsigned int prev_vector,
+ unsigned int prev_cpu),
+
+ TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( unsigned int, vector )
+ __field( unsigned int, cpu )
+ __field( unsigned int, prev_vector )
+ __field( unsigned int, prev_cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->vector = vector;
+ __entry->cpu = cpu;
+ __entry->prev_vector = prev_vector;
+ __entry->prev_cpu = prev_cpu;
+
+ ),
+
+ TP_printk("irq=%u vector=%u cpu=%u prev_vector=%u prev_cpu=%u",
+ __entry->irq, __entry->vector, __entry->cpu,
+ __entry->prev_vector, __entry->prev_cpu)
+);
+
+#define DEFINE_IRQ_VECTOR_MOD_EVENT(name) \
+DEFINE_EVENT_FN(vector_mod, name, \
+ TP_PROTO(unsigned int irq, unsigned int vector, \
+ unsigned int cpu, unsigned int prev_vector, \
+ unsigned int prev_cpu), \
+ TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \
+
+DEFINE_IRQ_VECTOR_MOD_EVENT(vector_update);
+DEFINE_IRQ_VECTOR_MOD_EVENT(vector_clear);
+
+DECLARE_EVENT_CLASS(vector_reserve,
+
+ TP_PROTO(unsigned int irq, int ret),
+
+ TP_ARGS(irq, ret),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("irq=%u ret=%d", __entry->irq, __entry->ret)
+);
+
+#define DEFINE_IRQ_VECTOR_RESERVE_EVENT(name) \
+DEFINE_EVENT_FN(vector_reserve, name, \
+ TP_PROTO(unsigned int irq, int ret), \
+ TP_ARGS(irq, ret), NULL, NULL); \
+
+DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve_managed);
+DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve);
+
+TRACE_EVENT(vector_alloc,
+
+ TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
+ int ret),
+
+ TP_ARGS(irq, vector, ret, reserved),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( unsigned int, vector )
+ __field( bool, reserved )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->vector = ret < 0 ? 0 : vector;
+ __entry->reserved = reserved;
+ __entry->ret = ret > 0 ? 0 : ret;
+ ),
+
+ TP_printk("irq=%u vector=%u reserved=%d ret=%d",
+ __entry->irq, __entry->vector,
+ __entry->reserved, __entry->ret)
+);
+
+TRACE_EVENT(vector_alloc_managed,
+
+ TP_PROTO(unsigned int irq, unsigned int vector,
+ int ret),
+
+ TP_ARGS(irq, vector, ret),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( unsigned int, vector )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->vector = ret < 0 ? 0 : vector;
+ __entry->ret = ret > 0 ? 0 : ret;
+ ),
+
+ TP_printk("irq=%u vector=%u ret=%d",
+ __entry->irq, __entry->vector, __entry->ret)
+);
+
+DECLARE_EVENT_CLASS(vector_activate,
+
+ TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve,
+ bool early),
+
+ TP_ARGS(irq, is_managed, can_reserve, early),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( bool, is_managed )
+ __field( bool, can_reserve )
+ __field( bool, early )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->is_managed = is_managed;
+ __entry->can_reserve = can_reserve;
+ __entry->early = early;
+ ),
+
+ TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d",
+ __entry->irq, __entry->is_managed, __entry->can_reserve,
+ __entry->early)
+);
+
+#define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \
+DEFINE_EVENT_FN(vector_activate, name, \
+ TP_PROTO(unsigned int irq, bool is_managed, \
+ bool can_reserve, bool early), \
+ TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL); \
+
+DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate);
+DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
+
+TRACE_EVENT(vector_teardown,
+
+ TP_PROTO(unsigned int irq, bool is_managed, bool has_reserved),
+
+ TP_ARGS(irq, is_managed, has_reserved),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( bool, is_managed )
+ __field( bool, has_reserved )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->is_managed = is_managed;
+ __entry->has_reserved = has_reserved;
+ ),
+
+ TP_printk("irq=%u is_managed=%d has_reserved=%d",
+ __entry->irq, __entry->is_managed, __entry->has_reserved)
+);
+
+TRACE_EVENT(vector_setup,
+
+ TP_PROTO(unsigned int irq, bool is_legacy, int ret),
+
+ TP_ARGS(irq, is_legacy, ret),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( bool, is_legacy )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->is_legacy = is_legacy;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("irq=%u is_legacy=%d ret=%d",
+ __entry->irq, __entry->is_legacy, __entry->ret)
+);
+
+TRACE_EVENT(vector_free_moved,
+
+ TP_PROTO(unsigned int irq, unsigned int cpu, unsigned int vector,
+ bool is_managed),
+
+ TP_ARGS(irq, cpu, vector, is_managed),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, irq )
+ __field( unsigned int, cpu )
+ __field( unsigned int, vector )
+ __field( bool, is_managed )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->cpu = cpu;
+ __entry->vector = vector;
+ __entry->is_managed = is_managed;
+ ),
+
+ TP_printk("irq=%u cpu=%u vector=%u is_managed=%d",
+ __entry->irq, __entry->cpu, __entry->vector,
+ __entry->is_managed)
+);
+
+
#endif /* CONFIG_X86_LOCAL_APIC */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index b0cced97a6ce..1fadd310ff68 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -38,9 +38,9 @@ asmlinkage void simd_coprocessor_error(void);
#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
asmlinkage void xen_divide_error(void);
+asmlinkage void xen_xennmi(void);
asmlinkage void xen_xendebug(void);
asmlinkage void xen_xenint3(void);
-asmlinkage void xen_nmi(void);
asmlinkage void xen_overflow(void);
asmlinkage void xen_bounds(void);
asmlinkage void xen_invalid_op(void);
@@ -145,4 +145,22 @@ enum {
X86_TRAP_IRET = 32, /* 32, IRET Exception */
};
+/*
+ * Page fault error code bits:
+ *
+ * bit 0 == 0: no page found 1: protection fault
+ * bit 1 == 0: read access 1: write access
+ * bit 2 == 0: kernel-mode access 1: user-mode access
+ * bit 3 == 1: use of reserved bit detected
+ * bit 4 == 1: fault was an instruction fetch
+ * bit 5 == 1: protection keys block access
+ */
+enum x86_pf_error_code {
+ X86_PF_PROT = 1 << 0,
+ X86_PF_WRITE = 1 << 1,
+ X86_PF_USER = 1 << 2,
+ X86_PF_RSVD = 1 << 3,
+ X86_PF_INSTR = 1 << 4,
+ X86_PF_PK = 1 << 5,
+};
#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 8da0efb13544..cf5d53c3f9ea 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -32,15 +32,22 @@ static inline cycles_t get_cycles(void)
extern struct system_counterval_t convert_art_to_tsc(u64 art);
+extern void tsc_early_delay_calibrate(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
+extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
extern int tsc_clocksource_reliable;
+#ifdef CONFIG_X86_TSC
+extern bool tsc_async_resets;
+#else
+# define tsc_async_resets false
+#endif
/*
* Boot-time check whether the TSCs are synchronized across
diff --git a/arch/x86/include/asm/umip.h b/arch/x86/include/asm/umip.h
new file mode 100644
index 000000000000..db43f2a0d92c
--- /dev/null
+++ b/arch/x86/include/asm/umip.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_UMIP_H
+#define _ASM_X86_UMIP_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_X86_INTEL_UMIP
+bool fixup_umip_exception(struct pt_regs *regs);
+#else
+static inline bool fixup_umip_exception(struct pt_regs *regs) { return false; }
+#endif /* CONFIG_X86_INTEL_UMIP */
+#endif /* _ASM_X86_UMIP_H */
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 87adc0d38c4a..e9cc6fe1fc6f 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -13,11 +13,11 @@ struct unwind_state {
struct task_struct *task;
int graph_idx;
bool error;
-#if defined(CONFIG_ORC_UNWINDER)
+#if defined(CONFIG_UNWINDER_ORC)
bool signal, full_regs;
unsigned long sp, bp, ip;
struct pt_regs *regs;
-#elif defined(CONFIG_FRAME_POINTER_UNWINDER)
+#elif defined(CONFIG_UNWINDER_FRAME_POINTER)
bool got_irq;
unsigned long *bp, *orig_sp, ip;
struct pt_regs *regs;
@@ -51,7 +51,7 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
__unwind_start(state, task, regs, first_frame);
}
-#if defined(CONFIG_ORC_UNWINDER) || defined(CONFIG_FRAME_POINTER_UNWINDER)
+#if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
{
if (unwind_done(state))
@@ -66,7 +66,7 @@ static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
}
#endif
-#ifdef CONFIG_ORC_UNWINDER
+#ifdef CONFIG_UNWINDER_ORC
void unwind_init(void);
void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
void *orc, size_t orc_size);
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 9cffb44a3cf5..036e26d63d9a 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -776,23 +776,36 @@ static inline int uv_num_possible_blades(void)
extern void uv_nmi_setup(void);
extern void uv_nmi_setup_hubless(void);
+/* BIOS/Kernel flags exchange MMR */
+#define UVH_BIOS_KERNEL_MMR UVH_SCRATCH5
+#define UVH_BIOS_KERNEL_MMR_ALIAS UVH_SCRATCH5_ALIAS
+#define UVH_BIOS_KERNEL_MMR_ALIAS_2 UVH_SCRATCH5_ALIAS_2
+
+/* TSC sync valid, set by BIOS */
+#define UVH_TSC_SYNC_MMR UVH_BIOS_KERNEL_MMR
+#define UVH_TSC_SYNC_SHIFT 10
+#define UVH_TSC_SYNC_SHIFT_UV2K 16 /* UV2/3k have different bits */
+#define UVH_TSC_SYNC_MASK 3 /* 0011 */
+#define UVH_TSC_SYNC_VALID 3 /* 0011 */
+#define UVH_TSC_SYNC_INVALID 2 /* 0010 */
+
/* BMC sets a bit this MMR non-zero before sending an NMI */
-#define UVH_NMI_MMR UVH_SCRATCH5
-#define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS
+#define UVH_NMI_MMR UVH_BIOS_KERNEL_MMR
+#define UVH_NMI_MMR_CLEAR UVH_BIOS_KERNEL_MMR_ALIAS
#define UVH_NMI_MMR_SHIFT 63
-#define UVH_NMI_MMR_TYPE "SCRATCH5"
+#define UVH_NMI_MMR_TYPE "SCRATCH5"
/* Newer SMM NMI handler, not present in all systems */
#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
#define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
-#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
+#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
/* Non-zero indicates newer SMM NMI handler present */
#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
/* Indicates to BIOS that we want to use the newer SMM NMI handler */
-#define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2
+#define UVH_NMI_MMRX_REQ UVH_BIOS_KERNEL_MMR_ALIAS_2
#define UVH_NMI_MMRX_REQ_SHIFT 62
struct uv_hub_nmi_s {
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 52250681f68c..fb856c9f0449 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -49,7 +49,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
unsigned ret;
repeat:
- ret = ACCESS_ONCE(s->seq);
+ ret = READ_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
deleted file mode 100644
index 78ccf28d17db..000000000000
--- a/arch/x86/include/asm/x2apic.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common bits for X2APIC cluster/physical modes.
- */
-
-#ifndef _ASM_X86_X2APIC_H
-#define _ASM_X86_X2APIC_H
-
-#include <asm/apic.h>
-#include <asm/ipi.h>
-#include <linux/cpumask.h>
-
-static int x2apic_apic_id_valid(int apicid)
-{
- return 1;
-}
-
-static int x2apic_apic_id_registered(void)
-{
- return 1;
-}
-
-static void
-__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
-{
- unsigned long cfg = __prepare_ICR(0, vector, dest);
- native_x2apic_icr_write(cfg, apicid);
-}
-
-static unsigned int x2apic_get_apic_id(unsigned long id)
-{
- return id;
-}
-
-static unsigned long x2apic_set_apic_id(unsigned int id)
-{
- return id;
-}
-
-static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
-{
- return initial_apicid >> index_msb;
-}
-
-static void x2apic_send_IPI_self(int vector)
-{
- apic_write(APIC_SELF_IPI, vector);
-}
-
-#endif /* _ASM_X86_X2APIC_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 8a1ebf9540dd..aa4747569e23 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -51,11 +51,13 @@ struct x86_init_resources {
* are set up.
* @intr_init: interrupt init code
* @trap_init: platform specific trap setup
+ * @intr_mode_init: interrupt delivery mode setup
*/
struct x86_init_irqs {
void (*pre_vector_init)(void);
void (*intr_init)(void);
void (*trap_init)(void);
+ void (*intr_mode_init)(void);
};
/**
@@ -115,6 +117,20 @@ struct x86_init_pci {
};
/**
+ * struct x86_hyper_init - x86 hypervisor init functions
+ * @init_platform: platform setup
+ * @guest_late_init: guest late init
+ * @x2apic_available: X2APIC detection
+ * @init_mem_mapping: setup early mappings during init_mem_mapping()
+ */
+struct x86_hyper_init {
+ void (*init_platform)(void);
+ void (*guest_late_init)(void);
+ bool (*x2apic_available)(void);
+ void (*init_mem_mapping)(void);
+};
+
+/**
* struct x86_init_ops - functions for platform specific setup
*
*/
@@ -127,6 +143,7 @@ struct x86_init_ops {
struct x86_init_timers timers;
struct x86_init_iommu iommu;
struct x86_init_pci pci;
+ struct x86_hyper_init hyper;
};
/**
@@ -195,11 +212,21 @@ enum x86_legacy_i8042_state {
struct x86_legacy_features {
enum x86_legacy_i8042_state i8042;
int rtc;
+ int no_vga;
int reserve_bios_regions;
struct x86_legacy_devices devices;
};
/**
+ * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks
+ *
+ * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely)
+ */
+struct x86_hyper_runtime {
+ void (*pin_vcpu)(int cpu);
+};
+
+/**
* struct x86_platform_ops - platform specific runtime functions
* @calibrate_cpu: calibrate CPU
* @calibrate_tsc: calibrate TSC, if different from CPU
@@ -218,6 +245,7 @@ struct x86_legacy_features {
* possible in x86_early_init_platform_quirks() by
* only using the current x86_hardware_subarch
* semantics.
+ * @hyper: x86 hypervisor specific runtime callbacks
*/
struct x86_platform_ops {
unsigned long (*calibrate_cpu)(void);
@@ -233,6 +261,7 @@ struct x86_platform_ops {
void (*apic_post_init)(void);
struct x86_legacy_features legacy;
void (*set_legacy_features)(void);
+ struct x86_hyper_runtime hyper;
};
struct pci_dev;
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index 1f5c5161ead6..45c8605467f1 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,7 +1,4 @@
-#ifdef CONFIG_KMEMCHECK
-/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
-# include <asm-generic/xor.h>
-#elif !defined(_ASM_X86_XOR_H)
+#ifndef _ASM_X86_XOR_H
#define _ASM_X86_XOR_H
/*
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 554aa8f24f91..09cc06483bed 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -110,5 +110,4 @@ struct kvm_vcpu_pv_apf_data {
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
#define KVM_PV_EOI_DISABLED 0x0
-
#endif /* _UAPI_ASM_X86_KVM_PARA_H */
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 6f3355399665..7e1e730396ae 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -105,6 +105,8 @@
#define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT)
#define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */
#define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT)
+#define X86_CR4_UMIP_BIT 11 /* enable UMIP support */
+#define X86_CR4_UMIP _BITUL(X86_CR4_UMIP_BIT)
#define X86_CR4_LA57_BIT 12 /* enable 5-level page tables */
#define X86_CR4_LA57 _BITUL(X86_CR4_LA57_BIT)
#define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */
@@ -152,5 +154,8 @@
#define CX86_ARR_BASE 0xc4
#define CX86_RCR_BASE 0xdc
+#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
+ X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
+ X86_CR0_PG)
#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 5f70044340ff..81bb565f4497 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -25,9 +25,9 @@ endif
KASAN_SANITIZE_head$(BITS).o := n
KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
-KASAN_SANITIZE_stacktrace.o := n
+KASAN_SANITIZE_stacktrace.o := n
+KASAN_SANITIZE_paravirt.o := n
-OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y
@@ -127,10 +127,11 @@ obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o
+obj-$(CONFIG_X86_INTEL_UMIP) += umip.o
-obj-$(CONFIG_ORC_UNWINDER) += unwind_orc.o
-obj-$(CONFIG_FRAME_POINTER_UNWINDER) += unwind_frame.o
-obj-$(CONFIG_GUESS_UNWINDER) += unwind_guess.o
+obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
+obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
+obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
###
# 64 bit specific files
diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c
index ea3046e0b0cf..bb8d300fecbd 100644
--- a/arch/x86/kernel/acpi/apei.c
+++ b/arch/x86/kernel/acpi/apei.c
@@ -52,8 +52,3 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
apei_mce_report_mem_error(sev, mem_err);
#endif
}
-
-void arch_apei_flush_tlb_one(unsigned long addr)
-{
- __flush_tlb_one(addr);
-}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 079535e53e2a..ef9e02e614d0 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -961,6 +961,11 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
x86_platform.legacy.rtc = 0;
}
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
+ pr_debug("ACPI: probing for VGA not safe\n");
+ x86_platform.legacy.no_vga = 1;
+ }
+
#ifdef CONFIG_X86_PM_TIMER
/* detect the location of the ACPI PM Timer */
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 3344d3382e91..dbaf14d69ebd 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -442,7 +442,6 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
{
const s32 *poff;
- mutex_lock(&text_mutex);
for (poff = start; poff < end; poff++) {
u8 *ptr = (u8 *)poff + *poff;
@@ -452,7 +451,6 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
if (*ptr == 0x3e)
text_poke(ptr, ((unsigned char []){0xf0}), 1);
}
- mutex_unlock(&text_mutex);
}
static void alternatives_smp_unlock(const s32 *start, const s32 *end,
@@ -460,7 +458,6 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
{
const s32 *poff;
- mutex_lock(&text_mutex);
for (poff = start; poff < end; poff++) {
u8 *ptr = (u8 *)poff + *poff;
@@ -470,7 +467,6 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
if (*ptr == 0xf0)
text_poke(ptr, ((unsigned char []){0x3E}), 1);
}
- mutex_unlock(&text_mutex);
}
struct smp_alt_module {
@@ -489,8 +485,7 @@ struct smp_alt_module {
struct list_head next;
};
static LIST_HEAD(smp_alt_modules);
-static DEFINE_MUTEX(smp_alt);
-static bool uniproc_patched = false; /* protected by smp_alt */
+static bool uniproc_patched = false; /* protected by text_mutex */
void __init_or_module alternatives_smp_module_add(struct module *mod,
char *name,
@@ -499,7 +494,7 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,
{
struct smp_alt_module *smp;
- mutex_lock(&smp_alt);
+ mutex_lock(&text_mutex);
if (!uniproc_patched)
goto unlock;
@@ -526,14 +521,14 @@ void __init_or_module alternatives_smp_module_add(struct module *mod,
smp_unlock:
alternatives_smp_unlock(locks, locks_end, text, text_end);
unlock:
- mutex_unlock(&smp_alt);
+ mutex_unlock(&text_mutex);
}
void __init_or_module alternatives_smp_module_del(struct module *mod)
{
struct smp_alt_module *item;
- mutex_lock(&smp_alt);
+ mutex_lock(&text_mutex);
list_for_each_entry(item, &smp_alt_modules, next) {
if (mod != item->mod)
continue;
@@ -541,7 +536,7 @@ void __init_or_module alternatives_smp_module_del(struct module *mod)
kfree(item);
break;
}
- mutex_unlock(&smp_alt);
+ mutex_unlock(&text_mutex);
}
void alternatives_enable_smp(void)
@@ -551,7 +546,7 @@ void alternatives_enable_smp(void)
/* Why bother if there are no other CPUs? */
BUG_ON(num_possible_cpus() == 1);
- mutex_lock(&smp_alt);
+ mutex_lock(&text_mutex);
if (uniproc_patched) {
pr_info("switching to SMP code\n");
@@ -563,10 +558,13 @@ void alternatives_enable_smp(void)
mod->text, mod->text_end);
uniproc_patched = false;
}
- mutex_unlock(&smp_alt);
+ mutex_unlock(&text_mutex);
}
-/* Return 1 if the address range is reserved for smp-alternatives */
+/*
+ * Return 1 if the address range is reserved for SMP-alternatives.
+ * Must hold text_mutex.
+ */
int alternatives_text_reserved(void *start, void *end)
{
struct smp_alt_module *mod;
@@ -574,6 +572,8 @@ int alternatives_text_reserved(void *start, void *end)
u8 *text_start = start;
u8 *text_end = end;
+ lockdep_assert_held(&text_mutex);
+
list_for_each_entry(mod, &smp_alt_modules, next) {
if (mod->text > text_end || mod->text_end < text_start)
continue;
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index 2fb7309c6900..a9e08924927e 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -7,7 +7,7 @@
# In particualr, smp_apic_timer_interrupt() is called in random places.
KCOV_INSTRUMENT := n
-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o
+obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_common.o apic_noop.o ipi.o vector.o
obj-y += hw_nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ff891772c9f8..6e272f3ea984 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -211,11 +211,7 @@ static inline int lapic_get_version(void)
*/
static inline int lapic_is_integrated(void)
{
-#ifdef CONFIG_X86_64
- return 1;
-#else
return APIC_INTEGRATED(lapic_get_version());
-#endif
}
/*
@@ -298,14 +294,11 @@ int get_physical_broadcast(void)
*/
int lapic_get_maxlvt(void)
{
- unsigned int v;
-
- v = apic_read(APIC_LVR);
/*
* - we always have APIC integrated on 64bit mode
* - 82489DXs do not report # of LVT entries
*/
- return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
+ return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
}
/*
@@ -1229,53 +1222,100 @@ void __init sync_Arb_IDs(void)
APIC_INT_LEVELTRIG | APIC_DM_INIT);
}
-/*
- * An initial setup of the virtual wire mode.
- */
-void __init init_bsp_APIC(void)
+enum apic_intr_mode_id apic_intr_mode;
+
+static int __init apic_intr_mode_select(void)
{
- unsigned int value;
+ /* Check kernel option */
+ if (disable_apic) {
+ pr_info("APIC disabled via kernel command line\n");
+ return APIC_PIC;
+ }
- /*
- * Don't do the setup now if we have a SMP BIOS as the
- * through-I/O-APIC virtual wire mode might be active.
- */
- if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
- return;
+ /* Check BIOS */
+#ifdef CONFIG_X86_64
+ /* On 64-bit, the APIC must be integrated, Check local APIC only */
+ if (!boot_cpu_has(X86_FEATURE_APIC)) {
+ disable_apic = 1;
+ pr_info("APIC disabled by BIOS\n");
+ return APIC_PIC;
+ }
+#else
+ /* On 32-bit, the APIC may be integrated APIC or 82489DX */
- /*
- * Do not trust the local APIC being empty at bootup.
- */
- clear_local_APIC();
+ /* Neither 82489DX nor integrated APIC ? */
+ if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
+ disable_apic = 1;
+ return APIC_PIC;
+ }
- /*
- * Enable APIC.
- */
- value = apic_read(APIC_SPIV);
- value &= ~APIC_VECTOR_MASK;
- value |= APIC_SPIV_APIC_ENABLED;
+ /* If the BIOS pretends there is an integrated APIC ? */
+ if (!boot_cpu_has(X86_FEATURE_APIC) &&
+ APIC_INTEGRATED(boot_cpu_apic_version)) {
+ disable_apic = 1;
+ pr_err(FW_BUG "Local APIC %d not detected, force emulation\n",
+ boot_cpu_physical_apicid);
+ return APIC_PIC;
+ }
+#endif
-#ifdef CONFIG_X86_32
- /* This bit is reserved on P4/Xeon and should be cleared */
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
- (boot_cpu_data.x86 == 15))
- value &= ~APIC_SPIV_FOCUS_DISABLED;
- else
+ /* Check MP table or ACPI MADT configuration */
+ if (!smp_found_config) {
+ disable_ioapic_support();
+ if (!acpi_lapic) {
+ pr_info("APIC: ACPI MADT or MP tables are not detected\n");
+ return APIC_VIRTUAL_WIRE_NO_CONFIG;
+ }
+ return APIC_VIRTUAL_WIRE;
+ }
+
+#ifdef CONFIG_SMP
+ /* If SMP should be disabled, then really disable it! */
+ if (!setup_max_cpus) {
+ pr_info("APIC: SMP mode deactivated\n");
+ return APIC_SYMMETRIC_IO_NO_ROUTING;
+ }
+
+ if (read_apic_id() != boot_cpu_physical_apicid) {
+ panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
+ read_apic_id(), boot_cpu_physical_apicid);
+ /* Or can we switch back to PIC here? */
+ }
#endif
- value |= APIC_SPIV_FOCUS_DISABLED;
- value |= SPURIOUS_APIC_VECTOR;
- apic_write(APIC_SPIV, value);
- /*
- * Set up the virtual wire mode.
- */
- apic_write(APIC_LVT0, APIC_DM_EXTINT);
- value = APIC_DM_NMI;
- if (!lapic_is_integrated()) /* 82489DX */
- value |= APIC_LVT_LEVEL_TRIGGER;
- if (apic_extnmi == APIC_EXTNMI_NONE)
- value |= APIC_LVT_MASKED;
- apic_write(APIC_LVT1, value);
+ return APIC_SYMMETRIC_IO;
+}
+
+/* Init the interrupt delivery mode for the BSP */
+void __init apic_intr_mode_init(void)
+{
+ bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
+
+ apic_intr_mode = apic_intr_mode_select();
+
+ switch (apic_intr_mode) {
+ case APIC_PIC:
+ pr_info("APIC: Keep in PIC mode(8259)\n");
+ return;
+ case APIC_VIRTUAL_WIRE:
+ pr_info("APIC: Switch to virtual wire mode setup\n");
+ default_setup_apic_routing();
+ break;
+ case APIC_VIRTUAL_WIRE_NO_CONFIG:
+ pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
+ upmode = true;
+ default_setup_apic_routing();
+ break;
+ case APIC_SYMMETRIC_IO:
+ pr_info("APIC: Switch to symmetric I/O mode setup\n");
+ default_setup_apic_routing();
+ break;
+ case APIC_SYMMETRIC_IO_NO_ROUTING:
+ pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
+ break;
+ }
+
+ apic_bsp_setup(upmode);
}
static void lapic_setup_esr(void)
@@ -1473,7 +1513,7 @@ void setup_local_APIC(void)
/*
* Set up LVT0, LVT1:
*
- * set up through-local-APIC on the BP's LINT0. This is not
+ * set up through-local-APIC on the boot CPU's LINT0. This is not
* strictly necessary in pure symmetric-IO mode, but sometimes
* we delegate interrupts to the 8259A.
*/
@@ -1499,7 +1539,9 @@ void setup_local_APIC(void)
value = APIC_DM_NMI;
else
value = APIC_DM_NMI | APIC_LVT_MASKED;
- if (!lapic_is_integrated()) /* 82489DX */
+
+ /* Is 82489DX ? */
+ if (!lapic_is_integrated())
value |= APIC_LVT_LEVEL_TRIGGER;
apic_write(APIC_LVT1, value);
@@ -1645,7 +1687,7 @@ static __init void try_to_enable_x2apic(int remap_mode)
* under KVM
*/
if (max_physical_apicid > 255 ||
- !hypervisor_x2apic_available()) {
+ !x86_init.hyper.x2apic_available()) {
pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
x2apic_disable();
return;
@@ -1885,8 +1927,8 @@ void __init init_apic_mappings(void)
* yeah -- we lie about apic_version
* in case if apic was disabled via boot option
* but it's not a problem for SMP compiled kernel
- * since smp_sanity_check is prepared for such a case
- * and disable smp mode
+ * since apic_intr_mode_select is prepared for such
+ * a case and disable smp mode
*/
boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
}
@@ -2242,44 +2284,6 @@ int hard_smp_processor_id(void)
return read_apic_id();
}
-void default_init_apic_ldr(void)
-{
- unsigned long val;
-
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
- apic_write(APIC_LDR, val);
-}
-
-int default_cpu_mask_to_apicid(const struct cpumask *mask,
- struct irq_data *irqdata,
- unsigned int *apicid)
-{
- unsigned int cpu = cpumask_first(mask);
-
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
- *apicid = per_cpu(x86_cpu_to_apicid, cpu);
- irq_data_update_effective_affinity(irqdata, cpumask_of(cpu));
- return 0;
-}
-
-int flat_cpu_mask_to_apicid(const struct cpumask *mask,
- struct irq_data *irqdata,
- unsigned int *apicid)
-
-{
- struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
- unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;
-
- if (!cpu_mask)
- return -EINVAL;
- *apicid = (unsigned int)cpu_mask;
- cpumask_bits(effmsk)[0] = cpu_mask;
- return 0;
-}
-
/*
* Override the generic EOI implementation with an optimized version.
* Only called during early boot when only one CPU is active and with
@@ -2322,72 +2326,27 @@ static void __init apic_bsp_up_setup(void)
* Returns:
* apic_id of BSP APIC
*/
-int __init apic_bsp_setup(bool upmode)
+void __init apic_bsp_setup(bool upmode)
{
- int id;
-
connect_bsp_APIC();
if (upmode)
apic_bsp_up_setup();
setup_local_APIC();
- if (x2apic_mode)
- id = apic_read(APIC_LDR);
- else
- id = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
-
enable_IO_APIC();
end_local_APIC_setup();
irq_remap_enable_fault_handling();
setup_IO_APIC();
- /* Setup local timer */
- x86_init.timers.setup_percpu_clockev();
- return id;
-}
-
-/*
- * This initializes the IO-APIC and APIC hardware if this is
- * a UP kernel.
- */
-int __init APIC_init_uniprocessor(void)
-{
- if (disable_apic) {
- pr_info("Apic disabled\n");
- return -1;
- }
-#ifdef CONFIG_X86_64
- if (!boot_cpu_has(X86_FEATURE_APIC)) {
- disable_apic = 1;
- pr_info("Apic disabled by BIOS\n");
- return -1;
- }
-#else
- if (!smp_found_config && !boot_cpu_has(X86_FEATURE_APIC))
- return -1;
-
- /*
- * Complain if the BIOS pretends there is one.
- */
- if (!boot_cpu_has(X86_FEATURE_APIC) &&
- APIC_INTEGRATED(boot_cpu_apic_version)) {
- pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
- boot_cpu_physical_apicid);
- return -1;
- }
-#endif
-
- if (!smp_found_config)
- disable_ioapic_support();
-
- default_setup_apic_routing();
- apic_bsp_setup(true);
- return 0;
}
#ifdef CONFIG_UP_LATE_INIT
void __init up_late_init(void)
{
- APIC_init_uniprocessor();
+ if (apic_intr_mode == APIC_PIC)
+ return;
+
+ /* Setup local timer */
+ x86_init.timers.setup_percpu_clockev();
}
#endif
diff --git a/arch/x86/kernel/apic/apic_common.c b/arch/x86/kernel/apic/apic_common.c
new file mode 100644
index 000000000000..a360801779ae
--- /dev/null
+++ b/arch/x86/kernel/apic/apic_common.c
@@ -0,0 +1,46 @@
+/*
+ * Common functions shared between the various APIC flavours
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#include <linux/irq.h>
+#include <asm/apic.h>
+
+u32 apic_default_calc_apicid(unsigned int cpu)
+{
+ return per_cpu(x86_cpu_to_apicid, cpu);
+}
+
+u32 apic_flat_calc_apicid(unsigned int cpu)
+{
+ return 1U << cpu;
+}
+
+bool default_check_apicid_used(physid_mask_t *map, int apicid)
+{
+ return physid_isset(apicid, *map);
+}
+
+void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
+{
+ *retmap = *phys_map;
+}
+
+int default_cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
+ return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+ else
+ return BAD_APICID;
+}
+EXPORT_SYMBOL_GPL(default_cpu_present_to_apicid);
+
+int default_check_phys_apicid_present(int phys_apicid)
+{
+ return physid_isset(phys_apicid, phys_cpu_present_map);
+}
+
+int default_apic_id_valid(int apicid)
+{
+ return (apicid < 255);
+}
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index dedd5a41ba48..aa85690e9b64 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -119,7 +119,7 @@ static unsigned int flat_get_apic_id(unsigned long x)
return (x >> 24) & 0xFF;
}
-static unsigned long set_apic_id(unsigned int id)
+static u32 set_apic_id(unsigned int id)
{
return (id & 0xFF) << 24;
}
@@ -154,12 +154,10 @@ static struct apic apic_flat __ro_after_init = {
.irq_delivery_mode = dest_LowestPrio,
.irq_dest_mode = 1, /* logical */
- .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,
- .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -172,7 +170,7 @@ static struct apic apic_flat __ro_after_init = {
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = default_send_IPI_single,
.send_IPI_mask = flat_send_IPI_mask,
@@ -249,12 +247,10 @@ static struct apic apic_physflat __ro_after_init = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
- .vector_allocation_domain = default_vector_allocation_domain,
/* not needed, but shouldn't hurt: */
.init_apic_ldr = flat_init_apic_ldr,
@@ -268,7 +264,7 @@ static struct apic apic_physflat __ro_after_init = {
.get_apic_id = flat_get_apic_id,
.set_apic_id = set_apic_id,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = default_send_IPI_single_phys,
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index c8d211277315..7b659c4480c9 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -84,20 +84,6 @@ static int noop_apic_id_registered(void)
return physid_isset(0, phys_cpu_present_map);
}
-static const struct cpumask *noop_target_cpus(void)
-{
- /* only BSP here */
- return cpumask_of(0);
-}
-
-static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- if (cpu != 0)
- pr_warning("APIC: Vector allocated for non-BSP cpu\n");
- cpumask_copy(retmask, cpumask_of(cpu));
-}
-
static u32 noop_apic_read(u32 reg)
{
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
@@ -109,6 +95,13 @@ static void noop_apic_write(u32 reg, u32 v)
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
}
+#ifdef CONFIG_X86_32
+static int noop_x86_32_early_logical_apicid(int cpu)
+{
+ return BAD_APICID;
+}
+#endif
+
struct apic apic_noop __ro_after_init = {
.name = "noop",
.probe = noop_probe,
@@ -121,12 +114,10 @@ struct apic apic_noop __ro_after_init = {
/* logical delivery broadcast to all CPUs: */
.irq_dest_mode = 1,
- .target_cpus = noop_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = default_check_apicid_used,
- .vector_allocation_domain = noop_vector_allocation_domain,
.init_apic_ldr = noop_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
@@ -142,7 +133,7 @@ struct apic apic_noop __ro_after_init = {
.get_apic_id = noop_get_apic_id,
.set_apic_id = NULL,
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = noop_send_IPI,
.send_IPI_mask = noop_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 2fda912219a6..134e04506ab4 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -38,7 +38,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
return id;
}
-static unsigned long numachip1_set_apic_id(unsigned int id)
+static u32 numachip1_set_apic_id(unsigned int id)
{
return (id & 0xff) << 24;
}
@@ -51,7 +51,7 @@ static unsigned int numachip2_get_apic_id(unsigned long x)
return ((mcfg >> (28 - 8)) & 0xfff00) | (x >> 24);
}
-static unsigned long numachip2_set_apic_id(unsigned int id)
+static u32 numachip2_set_apic_id(unsigned int id)
{
return id << 24;
}
@@ -249,12 +249,10 @@ static const struct apic apic_numachip1 __refconst = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -267,7 +265,7 @@ static const struct apic apic_numachip1 __refconst = {
.get_apic_id = numachip1_get_apic_id,
.set_apic_id = numachip1_set_apic_id,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = numachip_send_IPI_one,
.send_IPI_mask = numachip_send_IPI_mask,
@@ -300,12 +298,10 @@ static const struct apic apic_numachip2 __refconst = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -318,7 +314,7 @@ static const struct apic apic_numachip2 __refconst = {
.get_apic_id = numachip2_get_apic_id,
.set_apic_id = numachip2_set_apic_id,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = numachip_send_IPI_one,
.send_IPI_mask = numachip_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index e12fbcfc9571..afee386ff711 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -27,9 +27,9 @@ static int bigsmp_apic_id_registered(void)
return 1;
}
-static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
+static bool bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
{
- return 0;
+ return false;
}
static int bigsmp_early_logical_apicid(int cpu)
@@ -155,12 +155,10 @@ static struct apic apic_bigsmp __ro_after_init = {
/* phys delivery to target CPU: */
.irq_dest_mode = 0,
- .target_cpus = default_target_cpus,
.disable_esr = 1,
.dest_logical = 0,
.check_apicid_used = bigsmp_check_apicid_used,
- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = bigsmp_init_apic_ldr,
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
@@ -173,7 +171,7 @@ static struct apic apic_bigsmp __ro_after_init = {
.get_apic_id = bigsmp_get_apic_id,
.set_apic_id = NULL,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = default_send_IPI_single_phys,
.send_IPI_mask = default_send_IPI_mask_sequence_phys,
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
index 56ccf9346b08..b07075dce8b7 100644
--- a/arch/x86/kernel/apic/htirq.c
+++ b/arch/x86/kernel/apic/htirq.c
@@ -112,8 +112,8 @@ static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
-static void htirq_domain_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+static int htirq_domain_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
struct ht_irq_msg msg;
struct irq_cfg *cfg = irqd_cfg(irq_data);
@@ -132,6 +132,7 @@ static void htirq_domain_activate(struct irq_domain *domain,
HT_IRQ_LOW_MT_ARBITRATED) |
HT_IRQ_LOW_IRQ_MASKED;
write_ht_irq_msg(irq_data->irq, &msg);
+ return 0;
}
static void htirq_domain_deactivate(struct irq_domain *domain,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3b89b27945ff..201579dc5242 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1014,6 +1014,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
info->ioapic_pin))
return -ENOMEM;
} else {
+ info->flags |= X86_IRQ_ALLOC_LEGACY;
irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
NULL);
if (irq >= 0) {
@@ -1586,6 +1587,43 @@ static int __init notimercheck(char *s)
}
__setup("no_timer_check", notimercheck);
+static void __init delay_with_tsc(void)
+{
+ unsigned long long start, now;
+ unsigned long end = jiffies + 4;
+
+ start = rdtsc();
+
+ /*
+ * We don't know the TSC frequency yet, but waiting for
+ * 40000000000/HZ TSC cycles is safe:
+ * 4 GHz == 10 jiffies
+ * 1 GHz == 40 jiffies
+ */
+ do {
+ rep_nop();
+ now = rdtsc();
+ } while ((now - start) < 40000000000UL / HZ &&
+ time_before_eq(jiffies, end));
+}
+
+static void __init delay_without_tsc(void)
+{
+ unsigned long end = jiffies + 4;
+ int band = 1;
+
+ /*
+ * We don't know any frequency yet, but waiting for
+ * 40940000000/HZ cycles is safe:
+ * 4 GHz == 10 jiffies
+ * 1 GHz == 40 jiffies
+ * 1 << 1 + 1 << 2 +...+ 1 << 11 = 4094
+ */
+ do {
+ __delay(((1U << band++) * 10000000UL) / HZ);
+ } while (band < 12 && time_before_eq(jiffies, end));
+}
+
/*
* There is a nasty bug in some older SMP boards, their mptable lies
* about the timer IRQ. We do the following to work around the situation:
@@ -1604,8 +1642,12 @@ static int __init timer_irq_works(void)
local_save_flags(flags);
local_irq_enable();
- /* Let ten ticks pass... */
- mdelay((10 * 1000) / HZ);
+
+ if (boot_cpu_has(X86_FEATURE_TSC))
+ delay_with_tsc();
+ else
+ delay_without_tsc();
+
local_irq_restore(flags);
/*
@@ -1821,26 +1863,36 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
eoi_ioapic_pin(data->entry.vector, data);
}
+static void ioapic_configure_entry(struct irq_data *irqd)
+{
+ struct mp_chip_data *mpd = irqd->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(irqd);
+ struct irq_pin_list *entry;
+
+ /*
+ * Only update when the parent is the vector domain, don't touch it
+ * if the parent is the remapping domain. Check the installed
+ * ioapic chip to verify that.
+ */
+ if (irqd->chip == &ioapic_chip) {
+ mpd->entry.dest = cfg->dest_apicid;
+ mpd->entry.vector = cfg->vector;
+ }
+ for_each_irq_pin(entry, mpd->irq_2_pin)
+ __ioapic_write_entry(entry->apic, entry->pin, mpd->entry);
+}
+
static int ioapic_set_affinity(struct irq_data *irq_data,
const struct cpumask *mask, bool force)
{
struct irq_data *parent = irq_data->parent_data;
- struct mp_chip_data *data = irq_data->chip_data;
- struct irq_pin_list *entry;
- struct irq_cfg *cfg;
unsigned long flags;
int ret;
ret = parent->chip->irq_set_affinity(parent, mask, force);
raw_spin_lock_irqsave(&ioapic_lock, flags);
- if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
- cfg = irqd_cfg(irq_data);
- data->entry.dest = cfg->dest_apicid;
- data->entry.vector = cfg->vector;
- for_each_irq_pin(entry, data->irq_2_pin)
- __ioapic_write_entry(entry->apic, entry->pin,
- data->entry);
- }
+ if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE)
+ ioapic_configure_entry(irq_data);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return ret;
@@ -2097,7 +2149,7 @@ static inline void __init check_timer(void)
unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
- irq_domain_activate_irq(irq_data);
+ irq_domain_activate_irq(irq_data, false);
if (timer_irq_works()) {
if (disable_timer_pin_1 > 0)
clear_IO_APIC_pin(0, pin1);
@@ -2119,7 +2171,7 @@ static inline void __init check_timer(void)
*/
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
irq_domain_deactivate_irq(irq_data);
- irq_domain_activate_irq(irq_data);
+ irq_domain_activate_irq(irq_data, false);
legacy_pic->unmask(0);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2513,52 +2565,9 @@ int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
}
/*
- * This function currently is only a helper for the i386 smp boot process where
- * we need to reprogram the ioredtbls to cater for the cpus which have come online
- * so mask in all cases should simply be apic->target_cpus()
+ * This function updates target affinity of IOAPIC interrupts to include
+ * the CPUs which came online during SMP bringup.
*/
-#ifdef CONFIG_SMP
-void __init setup_ioapic_dest(void)
-{
- int pin, ioapic, irq, irq_entry;
- const struct cpumask *mask;
- struct irq_desc *desc;
- struct irq_data *idata;
- struct irq_chip *chip;
-
- if (skip_ioapic_setup == 1)
- return;
-
- for_each_ioapic_pin(ioapic, pin) {
- irq_entry = find_irq_entry(ioapic, pin, mp_INT);
- if (irq_entry == -1)
- continue;
-
- irq = pin_2_irq(irq_entry, ioapic, pin, 0);
- if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
- continue;
-
- desc = irq_to_desc(irq);
- raw_spin_lock_irq(&desc->lock);
- idata = irq_desc_get_irq_data(desc);
-
- /*
- * Honour affinities which have been set in early boot
- */
- if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
- mask = irq_data_get_affinity_mask(idata);
- else
- mask = apic->target_cpus();
-
- chip = irq_data_get_irq_chip(idata);
- /* Might be lapic_chip for irq 0 */
- if (chip->irq_set_affinity)
- chip->irq_set_affinity(idata, mask, false);
- raw_spin_unlock_irq(&desc->lock);
- }
-}
-#endif
-
#define IOAPIC_RESOURCE_NAME_SIZE 11
static struct resource *ioapic_resources;
@@ -2978,17 +2987,15 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
-void mp_irqdomain_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+int mp_irqdomain_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
unsigned long flags;
- struct irq_pin_list *entry;
- struct mp_chip_data *data = irq_data->chip_data;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- for_each_irq_pin(entry, data->irq_2_pin)
- __ioapic_write_entry(entry->apic, entry->pin, data->entry);
+ ioapic_configure_entry(irq_data);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ return 0;
}
void mp_irqdomain_deactivate(struct irq_domain *domain,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 63287659adb6..fa22017de806 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -66,6 +66,31 @@ static void setup_apic_flat_routing(void)
#endif
}
+static int default_apic_id_registered(void)
+{
+ return physid_isset(read_apic_id(), phys_cpu_present_map);
+}
+
+/*
+ * Set up the logical destination ID. Intel recommends to set DFR, LDR and
+ * TPR before enabling an APIC. See e.g. "AP-388 82489DX User's Manual"
+ * (Intel document number 292116).
+ */
+static void default_init_apic_ldr(void)
+{
+ unsigned long val;
+
+ apic_write(APIC_DFR, APIC_DFR_VALUE);
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+ apic_write(APIC_LDR, val);
+}
+
+static int default_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
+}
+
/* should be called last. */
static int probe_default(void)
{
@@ -84,12 +109,10 @@ static struct apic apic_default __ro_after_init = {
/* logical delivery broadcast to all CPUs: */
.irq_dest_mode = 1,
- .target_cpus = default_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = default_check_apicid_used,
- .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
@@ -102,7 +125,7 @@ static struct apic apic_default __ro_after_init = {
.get_apic_id = default_get_apic_id,
.set_apic_id = NULL,
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
.send_IPI = default_send_IPI_single,
.send_IPI_mask = default_send_IPI_mask_logical,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 88c214e75a6b..05c85e693a5d 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/slab.h>
@@ -21,20 +22,30 @@
#include <asm/desc.h>
#include <asm/irq_remapping.h>
+#include <asm/trace/irq_vectors.h>
+
struct apic_chip_data {
- struct irq_cfg cfg;
- cpumask_var_t domain;
- cpumask_var_t old_domain;
- u8 move_in_progress : 1;
+ struct irq_cfg hw_irq_cfg;
+ unsigned int vector;
+ unsigned int prev_vector;
+ unsigned int cpu;
+ unsigned int prev_cpu;
+ unsigned int irq;
+ struct hlist_node clist;
+ unsigned int move_in_progress : 1,
+ is_managed : 1,
+ can_reserve : 1,
+ has_reserved : 1;
};
struct irq_domain *x86_vector_domain;
EXPORT_SYMBOL_GPL(x86_vector_domain);
static DEFINE_RAW_SPINLOCK(vector_lock);
-static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
+static cpumask_var_t vector_searchmask;
static struct irq_chip lapic_controller;
-#ifdef CONFIG_X86_IO_APIC
-static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+static struct irq_matrix *vector_matrix;
+#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
#endif
void lock_vector_lock(void)
@@ -50,22 +61,37 @@ void unlock_vector_lock(void)
raw_spin_unlock(&vector_lock);
}
-static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
+void init_irq_alloc_info(struct irq_alloc_info *info,
+ const struct cpumask *mask)
+{
+ memset(info, 0, sizeof(*info));
+ info->mask = mask;
+}
+
+void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
{
- if (!irq_data)
+ if (src)
+ *dst = *src;
+ else
+ memset(dst, 0, sizeof(*dst));
+}
+
+static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
+{
+ if (!irqd)
return NULL;
- while (irq_data->parent_data)
- irq_data = irq_data->parent_data;
+ while (irqd->parent_data)
+ irqd = irqd->parent_data;
- return irq_data->chip_data;
+ return irqd->chip_data;
}
-struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
+struct irq_cfg *irqd_cfg(struct irq_data *irqd)
{
- struct apic_chip_data *data = apic_chip_data(irq_data);
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
- return data ? &data->cfg : NULL;
+ return apicd ? &apicd->hw_irq_cfg : NULL;
}
EXPORT_SYMBOL_GPL(irqd_cfg);
@@ -76,270 +102,395 @@ struct irq_cfg *irq_cfg(unsigned int irq)
static struct apic_chip_data *alloc_apic_chip_data(int node)
{
- struct apic_chip_data *data;
+ struct apic_chip_data *apicd;
- data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
- if (!data)
- return NULL;
- if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
- goto out_data;
- if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
- goto out_domain;
- return data;
-out_domain:
- free_cpumask_var(data->domain);
-out_data:
- kfree(data);
- return NULL;
-}
-
-static void free_apic_chip_data(struct apic_chip_data *data)
-{
- if (data) {
- free_cpumask_var(data->domain);
- free_cpumask_var(data->old_domain);
- kfree(data);
+ apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
+ if (apicd)
+ INIT_HLIST_NODE(&apicd->clist);
+ return apicd;
+}
+
+static void free_apic_chip_data(struct apic_chip_data *apicd)
+{
+ kfree(apicd);
+}
+
+static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
+ unsigned int cpu)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+
+ lockdep_assert_held(&vector_lock);
+
+ apicd->hw_irq_cfg.vector = vector;
+ apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
+ irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
+ trace_vector_config(irqd->irq, vector, cpu,
+ apicd->hw_irq_cfg.dest_apicid);
+}
+
+static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
+ unsigned int newcpu)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ struct irq_desc *desc = irq_data_to_desc(irqd);
+
+ lockdep_assert_held(&vector_lock);
+
+ trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
+ apicd->cpu);
+
+ /* Setup the vector move, if required */
+ if (apicd->vector && cpu_online(apicd->cpu)) {
+ apicd->move_in_progress = true;
+ apicd->prev_vector = apicd->vector;
+ apicd->prev_cpu = apicd->cpu;
+ } else {
+ apicd->prev_vector = 0;
}
+
+ apicd->vector = newvec;
+ apicd->cpu = newcpu;
+ BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
+ per_cpu(vector_irq, newcpu)[newvec] = desc;
}
-static int __assign_irq_vector(int irq, struct apic_chip_data *d,
- const struct cpumask *mask,
- struct irq_data *irqdata)
+static void vector_assign_managed_shutdown(struct irq_data *irqd)
{
- /*
- * NOTE! The local APIC isn't very good at handling
- * multiple interrupts at the same interrupt level.
- * As the interrupt level is determined by taking the
- * vector number and shifting that right by 4, we
- * want to spread these out a bit so that they don't
- * all fall in the same interrupt level.
- *
- * Also, we've got to be careful not to trash gate
- * 0x80, because int 0x80 is hm, kind of importantish. ;)
- */
- static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
- static int current_offset = VECTOR_OFFSET_START % 16;
- int cpu, vector;
+ unsigned int cpu = cpumask_first(cpu_online_mask);
- /*
- * If there is still a move in progress or the previous move has not
- * been cleaned up completely, tell the caller to come back later.
- */
- if (d->move_in_progress ||
- cpumask_intersects(d->old_domain, cpu_online_mask))
- return -EBUSY;
+ apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
+}
- /* Only try and allocate irqs on cpus that are present */
- cpumask_clear(d->old_domain);
- cpumask_clear(searched_cpumask);
- cpu = cpumask_first_and(mask, cpu_online_mask);
- while (cpu < nr_cpu_ids) {
- int new_cpu, offset;
+static int reserve_managed_vector(struct irq_data *irqd)
+{
+ const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ unsigned long flags;
+ int ret;
- /* Get the possible target cpus for @mask/@cpu from the apic */
- apic->vector_allocation_domain(cpu, vector_cpumask, mask);
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ apicd->is_managed = true;
+ ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ trace_vector_reserve_managed(irqd->irq, ret);
+ return ret;
+}
- /*
- * Clear the offline cpus from @vector_cpumask for searching
- * and verify whether the result overlaps with @mask. If true,
- * then the call to apic->cpu_mask_to_apicid() will
- * succeed as well. If not, no point in trying to find a
- * vector in this mask.
- */
- cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
- if (!cpumask_intersects(vector_searchmask, mask))
- goto next_cpu;
-
- if (cpumask_subset(vector_cpumask, d->domain)) {
- if (cpumask_equal(vector_cpumask, d->domain))
- goto success;
- /*
- * Mark the cpus which are not longer in the mask for
- * cleanup.
- */
- cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
- vector = d->cfg.vector;
- goto update;
- }
+static void reserve_irq_vector_locked(struct irq_data *irqd)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
- vector = current_vector;
- offset = current_offset;
-next:
- vector += 16;
- if (vector >= FIRST_SYSTEM_VECTOR) {
- offset = (offset + 1) % 16;
- vector = FIRST_EXTERNAL_VECTOR + offset;
- }
+ irq_matrix_reserve(vector_matrix);
+ apicd->can_reserve = true;
+ apicd->has_reserved = true;
+ trace_vector_reserve(irqd->irq, 0);
+ vector_assign_managed_shutdown(irqd);
+}
- /* If the search wrapped around, try the next cpu */
- if (unlikely(current_vector == vector))
- goto next_cpu;
+static int reserve_irq_vector(struct irq_data *irqd)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ reserve_irq_vector_locked(irqd);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ return 0;
+}
- if (test_bit(vector, used_vectors))
- goto next;
+static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ bool resvd = apicd->has_reserved;
+ unsigned int cpu = apicd->cpu;
+ int vector = apicd->vector;
- for_each_cpu(new_cpu, vector_searchmask) {
- if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
- goto next;
- }
- /* Found one! */
- current_vector = vector;
- current_offset = offset;
- /* Schedule the old vector for cleanup on all cpus */
- if (d->cfg.vector)
- cpumask_copy(d->old_domain, d->domain);
- for_each_cpu(new_cpu, vector_searchmask)
- per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
- goto update;
-
-next_cpu:
- /*
- * We exclude the current @vector_cpumask from the requested
- * @mask and try again with the next online cpu in the
- * result. We cannot modify @mask, so we use @vector_cpumask
- * as a temporary buffer here as it will be reassigned when
- * calling apic->vector_allocation_domain() above.
- */
- cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
- cpumask_andnot(vector_cpumask, mask, searched_cpumask);
- cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
- continue;
- }
- return -ENOSPC;
+ lockdep_assert_held(&vector_lock);
-update:
/*
- * Exclude offline cpus from the cleanup mask and set the
- * move_in_progress flag when the result is not empty.
+ * If the current target CPU is online and in the new requested
+ * affinity mask, there is no point in moving the interrupt from
+ * one CPU to another.
*/
- cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
- d->move_in_progress = !cpumask_empty(d->old_domain);
- d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
- d->cfg.vector = vector;
- cpumask_copy(d->domain, vector_cpumask);
-success:
- /*
- * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
- * as we already established, that mask & d->domain & cpu_online_mask
- * is not empty.
- *
- * vector_searchmask is a subset of d->domain and has the offline
- * cpus masked out.
- */
- cpumask_and(vector_searchmask, vector_searchmask, mask);
- BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, irqdata,
- &d->cfg.dest_apicid));
+ if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
+ return 0;
+
+ vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
+ if (vector > 0)
+ apic_update_vector(irqd, vector, cpu);
+ trace_vector_alloc(irqd->irq, vector, resvd, vector);
+ return vector;
+}
+
+static int assign_vector_locked(struct irq_data *irqd,
+ const struct cpumask *dest)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ int vector = allocate_vector(irqd, dest);
+
+ if (vector < 0)
+ return vector;
+
+ apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
return 0;
}
-static int assign_irq_vector(int irq, struct apic_chip_data *data,
- const struct cpumask *mask,
- struct irq_data *irqdata)
+static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
{
- int err;
unsigned long flags;
+ int ret;
raw_spin_lock_irqsave(&vector_lock, flags);
- err = __assign_irq_vector(irq, data, mask, irqdata);
+ cpumask_and(vector_searchmask, dest, cpu_online_mask);
+ ret = assign_vector_locked(irqd, vector_searchmask);
raw_spin_unlock_irqrestore(&vector_lock, flags);
- return err;
+ return ret;
}
-static int assign_irq_vector_policy(int irq, int node,
- struct apic_chip_data *data,
- struct irq_alloc_info *info,
- struct irq_data *irqdata)
+static int assign_irq_vector_any_locked(struct irq_data *irqd)
{
- if (info && info->mask)
- return assign_irq_vector(irq, data, info->mask, irqdata);
- if (node != NUMA_NO_NODE &&
- assign_irq_vector(irq, data, cpumask_of_node(node), irqdata) == 0)
+ /* Get the affinity mask - either irq_default_affinity or (user) set */
+ const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
+ int node = irq_data_get_node(irqd);
+
+ if (node == NUMA_NO_NODE)
+ goto all;
+ /* Try the intersection of @affmsk and node mask */
+ cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
+ if (!assign_vector_locked(irqd, vector_searchmask))
+ return 0;
+ /* Try the node mask */
+ if (!assign_vector_locked(irqd, cpumask_of_node(node)))
return 0;
- return assign_irq_vector(irq, data, apic->target_cpus(), irqdata);
+all:
+ /* Try the full affinity mask */
+ cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
+ if (!assign_vector_locked(irqd, vector_searchmask))
+ return 0;
+ /* Try the full online mask */
+ return assign_vector_locked(irqd, cpu_online_mask);
+}
+
+static int
+assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
+{
+ if (irqd_affinity_is_managed(irqd))
+ return reserve_managed_vector(irqd);
+ if (info->mask)
+ return assign_irq_vector(irqd, info->mask);
+ /*
+ * Make only a global reservation with no guarantee. A real vector
+ * is associated at activation time.
+ */
+ return reserve_irq_vector(irqd);
}
-static void clear_irq_vector(int irq, struct apic_chip_data *data)
+static int
+assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
{
- struct irq_desc *desc;
- int cpu, vector;
+ const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ int vector, cpu;
- if (!data->cfg.vector)
+ cpumask_and(vector_searchmask, vector_searchmask, affmsk);
+ cpu = cpumask_first(vector_searchmask);
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+ /* set_affinity might call here for nothing */
+ if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
+ return 0;
+ vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+ trace_vector_alloc_managed(irqd->irq, vector, vector);
+ if (vector < 0)
+ return vector;
+ apic_update_vector(irqd, vector, cpu);
+ apic_update_irq_cfg(irqd, vector, cpu);
+ return 0;
+}
+
+static void clear_irq_vector(struct irq_data *irqd)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ bool managed = irqd_affinity_is_managed(irqd);
+ unsigned int vector = apicd->vector;
+
+ lockdep_assert_held(&vector_lock);
+
+ if (!vector)
return;
- vector = data->cfg.vector;
- for_each_cpu_and(cpu, data->domain, cpu_online_mask)
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
+ trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
+ apicd->prev_cpu);
- data->cfg.vector = 0;
- cpumask_clear(data->domain);
+ per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
+ irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
+ apicd->vector = 0;
- /*
- * If move is in progress or the old_domain mask is not empty,
- * i.e. the cleanup IPI has not been processed yet, we need to remove
- * the old references to desc from all cpus vector tables.
- */
- if (!data->move_in_progress && cpumask_empty(data->old_domain))
+ /* Clean up move in progress */
+ vector = apicd->prev_vector;
+ if (!vector)
return;
- desc = irq_to_desc(irq);
- for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
- vector++) {
- if (per_cpu(vector_irq, cpu)[vector] != desc)
- continue;
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
- break;
- }
+ per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
+ irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
+ apicd->prev_vector = 0;
+ apicd->move_in_progress = 0;
+ hlist_del_init(&apicd->clist);
+}
+
+static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ unsigned long flags;
+
+ trace_vector_deactivate(irqd->irq, apicd->is_managed,
+ apicd->can_reserve, false);
+
+ /* Regular fixed assigned interrupt */
+ if (!apicd->is_managed && !apicd->can_reserve)
+ return;
+ /* If the interrupt has a global reservation, nothing to do */
+ if (apicd->has_reserved)
+ return;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ clear_irq_vector(irqd);
+ if (apicd->can_reserve)
+ reserve_irq_vector_locked(irqd);
+ else
+ vector_assign_managed_shutdown(irqd);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+static int activate_reserved(struct irq_data *irqd)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ int ret;
+
+ ret = assign_irq_vector_any_locked(irqd);
+ if (!ret)
+ apicd->has_reserved = false;
+ return ret;
+}
+
+static int activate_managed(struct irq_data *irqd)
+{
+ const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
+ int ret;
+
+ cpumask_and(vector_searchmask, dest, cpu_online_mask);
+ if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
+ /* Something in the core code broke! Survive gracefully */
+ pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
+ return EINVAL;
+ }
+
+ ret = assign_managed_vector(irqd, vector_searchmask);
+ /*
+ * This should not happen. The vector reservation got buggered. Handle
+ * it gracefully.
+ */
+ if (WARN_ON_ONCE(ret < 0)) {
+ pr_err("Managed startup irq %u, no vector available\n",
+ irqd->irq);
}
- data->move_in_progress = 0;
+ return ret;
}
-void init_irq_alloc_info(struct irq_alloc_info *info,
- const struct cpumask *mask)
+static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
+ bool early)
{
- memset(info, 0, sizeof(*info));
- info->mask = mask;
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ unsigned long flags;
+ int ret = 0;
+
+ trace_vector_activate(irqd->irq, apicd->is_managed,
+ apicd->can_reserve, early);
+
+ /* Nothing to do for fixed assigned vectors */
+ if (!apicd->can_reserve && !apicd->is_managed)
+ return 0;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ if (early || irqd_is_managed_and_shutdown(irqd))
+ vector_assign_managed_shutdown(irqd);
+ else if (apicd->is_managed)
+ ret = activate_managed(irqd);
+ else if (apicd->has_reserved)
+ ret = activate_reserved(irqd);
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ return ret;
}
-void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
+static void vector_free_reserved_and_managed(struct irq_data *irqd)
{
- if (src)
- *dst = *src;
- else
- memset(dst, 0, sizeof(*dst));
+ const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+
+ trace_vector_teardown(irqd->irq, apicd->is_managed,
+ apicd->has_reserved);
+
+ if (apicd->has_reserved)
+ irq_matrix_remove_reserved(vector_matrix);
+ if (apicd->is_managed)
+ irq_matrix_remove_managed(vector_matrix, dest);
}
static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
- struct apic_chip_data *apic_data;
- struct irq_data *irq_data;
+ struct apic_chip_data *apicd;
+ struct irq_data *irqd;
unsigned long flags;
int i;
for (i = 0; i < nr_irqs; i++) {
- irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
- if (irq_data && irq_data->chip_data) {
+ irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
+ if (irqd && irqd->chip_data) {
raw_spin_lock_irqsave(&vector_lock, flags);
- clear_irq_vector(virq + i, irq_data->chip_data);
- apic_data = irq_data->chip_data;
- irq_domain_reset_irq_data(irq_data);
+ clear_irq_vector(irqd);
+ vector_free_reserved_and_managed(irqd);
+ apicd = irqd->chip_data;
+ irq_domain_reset_irq_data(irqd);
raw_spin_unlock_irqrestore(&vector_lock, flags);
- free_apic_chip_data(apic_data);
-#ifdef CONFIG_X86_IO_APIC
- if (virq + i < nr_legacy_irqs())
- legacy_irq_data[virq + i] = NULL;
-#endif
+ free_apic_chip_data(apicd);
}
}
}
+static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
+ struct apic_chip_data *apicd)
+{
+ unsigned long flags;
+ bool realloc = false;
+
+ apicd->vector = ISA_IRQ_VECTOR(virq);
+ apicd->cpu = 0;
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ /*
+ * If the interrupt is activated, then it must stay at this vector
+ * position. That's usually the timer interrupt (0).
+ */
+ if (irqd_is_activated(irqd)) {
+ trace_vector_setup(virq, true, 0);
+ apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
+ } else {
+ /* Release the vector */
+ apicd->can_reserve = true;
+ clear_irq_vector(irqd);
+ realloc = true;
+ }
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ return realloc;
+}
+
static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_alloc_info *info = arg;
- struct apic_chip_data *data;
- struct irq_data *irq_data;
+ struct apic_chip_data *apicd;
+ struct irq_data *irqd;
int i, err, node;
if (disable_apic)
@@ -350,34 +501,37 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
return -ENOSYS;
for (i = 0; i < nr_irqs; i++) {
- irq_data = irq_domain_get_irq_data(domain, virq + i);
- BUG_ON(!irq_data);
- node = irq_data_get_node(irq_data);
-#ifdef CONFIG_X86_IO_APIC
- if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
- data = legacy_irq_data[virq + i];
- else
-#endif
- data = alloc_apic_chip_data(node);
- if (!data) {
+ irqd = irq_domain_get_irq_data(domain, virq + i);
+ BUG_ON(!irqd);
+ node = irq_data_get_node(irqd);
+ WARN_ON_ONCE(irqd->chip_data);
+ apicd = alloc_apic_chip_data(node);
+ if (!apicd) {
err = -ENOMEM;
goto error;
}
- irq_data->chip = &lapic_controller;
- irq_data->chip_data = data;
- irq_data->hwirq = virq + i;
- err = assign_irq_vector_policy(virq + i, node, data, info,
- irq_data);
- if (err)
- goto error;
+ apicd->irq = virq + i;
+ irqd->chip = &lapic_controller;
+ irqd->chip_data = apicd;
+ irqd->hwirq = virq + i;
+ irqd_set_single_target(irqd);
/*
- * If the apic destination mode is physical, then the
- * effective affinity is restricted to a single target
- * CPU. Mark the interrupt accordingly.
+ * Legacy vectors are already assigned when the IOAPIC
+ * takes them over. They stay on the same vector. This is
+ * required for check_timer() to work correctly as it might
+ * switch back to legacy mode. Only update the hardware
+ * config.
*/
- if (!apic->irq_dest_mode)
- irqd_set_single_target(irq_data);
+ if (info->flags & X86_IRQ_ALLOC_LEGACY) {
+ if (!vector_configure_legacy(virq + i, irqd, apicd))
+ continue;
+ }
+
+ err = assign_irq_vector_policy(irqd, info);
+ trace_vector_setup(virq + i, false, err);
+ if (err)
+ goto error;
}
return 0;
@@ -387,9 +541,56 @@ error:
return err;
}
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind)
+{
+ unsigned int cpu, vector, prev_cpu, prev_vector;
+ struct apic_chip_data *apicd;
+ unsigned long flags;
+ int irq;
+
+ if (!irqd) {
+ irq_matrix_debug_show(m, vector_matrix, ind);
+ return;
+ }
+
+ irq = irqd->irq;
+ if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
+ seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
+ seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
+ return;
+ }
+
+ apicd = irqd->chip_data;
+ if (!apicd) {
+ seq_printf(m, "%*sVector: Not assigned\n", ind, "");
+ return;
+ }
+
+ raw_spin_lock_irqsave(&vector_lock, flags);
+ cpu = apicd->cpu;
+ vector = apicd->vector;
+ prev_cpu = apicd->prev_cpu;
+ prev_vector = apicd->prev_vector;
+ raw_spin_unlock_irqrestore(&vector_lock, flags);
+ seq_printf(m, "%*sVector: %5u\n", ind, "", vector);
+ seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu);
+ if (prev_vector) {
+ seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector);
+ seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu);
+ }
+}
+#endif
+
static const struct irq_domain_ops x86_vector_domain_ops = {
- .alloc = x86_vector_alloc_irqs,
- .free = x86_vector_free_irqs,
+ .alloc = x86_vector_alloc_irqs,
+ .free = x86_vector_free_irqs,
+ .activate = x86_vector_activate,
+ .deactivate = x86_vector_deactivate,
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ .debug_show = x86_vector_debug_show,
+#endif
};
int __init arch_probe_nr_irqs(void)
@@ -419,35 +620,40 @@ int __init arch_probe_nr_irqs(void)
return legacy_pic->probe();
}
-#ifdef CONFIG_X86_IO_APIC
-static void __init init_legacy_irqs(void)
+void lapic_assign_legacy_vector(unsigned int irq, bool replace)
{
- int i, node = cpu_to_node(0);
- struct apic_chip_data *data;
-
/*
- * For legacy IRQ's, start with assigning irq0 to irq15 to
- * ISA_IRQ_VECTOR(i) for all cpu's.
+ * Use assign system here so it wont get accounted as allocated
+ * and moveable in the cpu hotplug check and it prevents managed
+ * irq reservation from touching it.
*/
- for (i = 0; i < nr_legacy_irqs(); i++) {
- data = legacy_irq_data[i] = alloc_apic_chip_data(node);
- BUG_ON(!data);
+ irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
+}
+
+void __init lapic_assign_system_vectors(void)
+{
+ unsigned int i, vector = 0;
- data->cfg.vector = ISA_IRQ_VECTOR(i);
- cpumask_setall(data->domain);
- irq_set_chip_data(i, data);
+ for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
+ irq_matrix_assign_system(vector_matrix, vector, false);
+
+ if (nr_legacy_irqs() > 1)
+ lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
+
+ /* System vectors are reserved, online it */
+ irq_matrix_online(vector_matrix);
+
+ /* Mark the preallocated legacy interrupts */
+ for (i = 0; i < nr_legacy_irqs(); i++) {
+ if (i != PIC_CASCADE_IR)
+ irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
}
}
-#else
-static inline void init_legacy_irqs(void) { }
-#endif
int __init arch_early_irq_init(void)
{
struct fwnode_handle *fn;
- init_legacy_irqs();
-
fn = irq_domain_alloc_named_fwnode("VECTOR");
BUG_ON(!fn);
x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
@@ -459,100 +665,115 @@ int __init arch_early_irq_init(void)
arch_init_msi_domain(x86_vector_domain);
arch_init_htirq_domain(x86_vector_domain);
- BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
- BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
+
+ /*
+ * Allocate the vector matrix allocator data structure and limit the
+ * search area.
+ */
+ vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
+ FIRST_SYSTEM_VECTOR);
+ BUG_ON(!vector_matrix);
return arch_early_ioapic_init();
}
-/* Initialize vector_irq on a new cpu */
-static void __setup_vector_irq(int cpu)
+#ifdef CONFIG_SMP
+
+static struct irq_desc *__setup_vector_irq(int vector)
{
- struct apic_chip_data *data;
- struct irq_desc *desc;
- int irq, vector;
+ int isairq = vector - ISA_IRQ_VECTOR(0);
+
+ /* Check whether the irq is in the legacy space */
+ if (isairq < 0 || isairq >= nr_legacy_irqs())
+ return VECTOR_UNUSED;
+ /* Check whether the irq is handled by the IOAPIC */
+ if (test_bit(isairq, &io_apic_irqs))
+ return VECTOR_UNUSED;
+ return irq_to_desc(isairq);
+}
- /* Mark the inuse vectors */
- for_each_irq_desc(irq, desc) {
- struct irq_data *idata = irq_desc_get_irq_data(desc);
+/* Online the local APIC infrastructure and initialize the vectors */
+void lapic_online(void)
+{
+ unsigned int vector;
- data = apic_chip_data(idata);
- if (!data || !cpumask_test_cpu(cpu, data->domain))
- continue;
- vector = data->cfg.vector;
- per_cpu(vector_irq, cpu)[vector] = desc;
- }
- /* Mark the free vectors */
- for (vector = 0; vector < NR_VECTORS; ++vector) {
- desc = per_cpu(vector_irq, cpu)[vector];
- if (IS_ERR_OR_NULL(desc))
- continue;
+ lockdep_assert_held(&vector_lock);
- data = apic_chip_data(irq_desc_get_irq_data(desc));
- if (!cpumask_test_cpu(cpu, data->domain))
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
- }
+ /* Online the vector matrix array for this CPU */
+ irq_matrix_online(vector_matrix);
+
+ /*
+ * The interrupt affinity logic never targets interrupts to offline
+ * CPUs. The exception are the legacy PIC interrupts. In general
+ * they are only targeted to CPU0, but depending on the platform
+ * they can be distributed to any online CPU in hardware. The
+ * kernel has no influence on that. So all active legacy vectors
+ * must be installed on all CPUs. All non legacy interrupts can be
+ * cleared.
+ */
+ for (vector = 0; vector < NR_VECTORS; vector++)
+ this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
}
-/*
- * Setup the vector to irq mappings. Must be called with vector_lock held.
- */
-void setup_vector_irq(int cpu)
+void lapic_offline(void)
{
- int irq;
+ lock_vector_lock();
+ irq_matrix_offline(vector_matrix);
+ unlock_vector_lock();
+}
+
+static int apic_set_affinity(struct irq_data *irqd,
+ const struct cpumask *dest, bool force)
+{
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
+ int err;
- lockdep_assert_held(&vector_lock);
/*
- * On most of the platforms, legacy PIC delivers the interrupts on the
- * boot cpu. But there are certain platforms where PIC interrupts are
- * delivered to multiple cpu's. If the legacy IRQ is handled by the
- * legacy PIC, for the new cpu that is coming online, setup the static
- * legacy vector to irq mapping:
+ * Core code can call here for inactive interrupts. For inactive
+ * interrupts which use managed or reservation mode there is no
+ * point in going through the vector assignment right now as the
+ * activation will assign a vector which fits the destination
+ * cpumask. Let the core code store the destination mask and be
+ * done with it.
*/
- for (irq = 0; irq < nr_legacy_irqs(); irq++)
- per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
+ if (!irqd_is_activated(irqd) &&
+ (apicd->is_managed || apicd->can_reserve))
+ return IRQ_SET_MASK_OK;
- __setup_vector_irq(cpu);
+ raw_spin_lock(&vector_lock);
+ cpumask_and(vector_searchmask, dest, cpu_online_mask);
+ if (irqd_affinity_is_managed(irqd))
+ err = assign_managed_vector(irqd, vector_searchmask);
+ else
+ err = assign_vector_locked(irqd, vector_searchmask);
+ raw_spin_unlock(&vector_lock);
+ return err ? err : IRQ_SET_MASK_OK;
}
-static int apic_retrigger_irq(struct irq_data *irq_data)
+#else
+# define apic_set_affinity NULL
+#endif
+
+static int apic_retrigger_irq(struct irq_data *irqd)
{
- struct apic_chip_data *data = apic_chip_data(irq_data);
+ struct apic_chip_data *apicd = apic_chip_data(irqd);
unsigned long flags;
- int cpu;
raw_spin_lock_irqsave(&vector_lock, flags);
- cpu = cpumask_first_and(data->domain, cpu_online_mask);
- apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
+ apic->send_IPI(apicd->cpu, apicd->vector);
raw_spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
-void apic_ack_edge(struct irq_data *data)
+void apic_ack_edge(struct irq_data *irqd)
{
- irq_complete_move(irqd_cfg(data));
- irq_move_irq(data);
+ irq_complete_move(irqd_cfg(irqd));
+ irq_move_irq(irqd);
ack_APIC_irq();
}
-static int apic_set_affinity(struct irq_data *irq_data,
- const struct cpumask *dest, bool force)
-{
- struct apic_chip_data *data = irq_data->chip_data;
- int err, irq = irq_data->irq;
-
- if (!IS_ENABLED(CONFIG_SMP))
- return -EPERM;
-
- if (!cpumask_intersects(dest, cpu_online_mask))
- return -EINVAL;
-
- err = assign_irq_vector(irq, data, dest, irq_data);
- return err ? err : IRQ_SET_MASK_OK;
-}
-
static struct irq_chip lapic_controller = {
.name = "APIC",
.irq_ack = apic_ack_edge,
@@ -561,115 +782,98 @@ static struct irq_chip lapic_controller = {
};
#ifdef CONFIG_SMP
-static void __send_cleanup_vector(struct apic_chip_data *data)
-{
- raw_spin_lock(&vector_lock);
- cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
- data->move_in_progress = 0;
- if (!cpumask_empty(data->old_domain))
- apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
- raw_spin_unlock(&vector_lock);
-}
-void send_cleanup_vector(struct irq_cfg *cfg)
+static void free_moved_vector(struct apic_chip_data *apicd)
{
- struct apic_chip_data *data;
+ unsigned int vector = apicd->prev_vector;
+ unsigned int cpu = apicd->prev_cpu;
+ bool managed = apicd->is_managed;
- data = container_of(cfg, struct apic_chip_data, cfg);
- if (data->move_in_progress)
- __send_cleanup_vector(data);
+ /*
+ * This should never happen. Managed interrupts are not
+ * migrated except on CPU down, which does not involve the
+ * cleanup vector. But try to keep the accounting correct
+ * nevertheless.
+ */
+ WARN_ON_ONCE(managed);
+
+ trace_vector_free_moved(apicd->irq, cpu, vector, managed);
+ irq_matrix_free(vector_matrix, cpu, vector, managed);
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
+ hlist_del_init(&apicd->clist);
+ apicd->prev_vector = 0;
+ apicd->move_in_progress = 0;
}
asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
{
- unsigned vector, me;
+ struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
+ struct apic_chip_data *apicd;
+ struct hlist_node *tmp;
entering_ack_irq();
-
/* Prevent vectors vanishing under us */
raw_spin_lock(&vector_lock);
- me = smp_processor_id();
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- struct apic_chip_data *data;
- struct irq_desc *desc;
- unsigned int irr;
-
- retry:
- desc = __this_cpu_read(vector_irq[vector]);
- if (IS_ERR_OR_NULL(desc))
- continue;
-
- if (!raw_spin_trylock(&desc->lock)) {
- raw_spin_unlock(&vector_lock);
- cpu_relax();
- raw_spin_lock(&vector_lock);
- goto retry;
- }
-
- data = apic_chip_data(irq_desc_get_irq_data(desc));
- if (!data)
- goto unlock;
+ hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
+ unsigned int irr, vector = apicd->prev_vector;
/*
- * Nothing to cleanup if irq migration is in progress
- * or this cpu is not set in the cleanup mask.
- */
- if (data->move_in_progress ||
- !cpumask_test_cpu(me, data->old_domain))
- goto unlock;
-
- /*
- * We have two cases to handle here:
- * 1) vector is unchanged but the target mask got reduced
- * 2) vector and the target mask has changed
- *
- * #1 is obvious, but in #2 we have two vectors with the same
- * irq descriptor: the old and the new vector. So we need to
- * make sure that we only cleanup the old vector. The new
- * vector has the current @vector number in the config and
- * this cpu is part of the target mask. We better leave that
- * one alone.
- */
- if (vector == data->cfg.vector &&
- cpumask_test_cpu(me, data->domain))
- goto unlock;
-
- irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
- /*
- * Check if the vector that needs to be cleanedup is
- * registered at the cpu's IRR. If so, then this is not
- * the best time to clean it up. Lets clean it up in the
+ * Paranoia: Check if the vector that needs to be cleaned
+ * up is registered at the APICs IRR. If so, then this is
+ * not the best time to clean it up. Clean it up in the
* next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
- * to myself.
+ * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
+ * priority external vector, so on return from this
+ * interrupt the device interrupt will happen first.
*/
- if (irr & (1 << (vector % 32))) {
+ irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
+ if (irr & (1U << (vector % 32))) {
apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
- goto unlock;
+ continue;
}
- __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
- cpumask_clear_cpu(me, data->old_domain);
-unlock:
- raw_spin_unlock(&desc->lock);
+ free_moved_vector(apicd);
}
raw_spin_unlock(&vector_lock);
-
exiting_irq();
}
+static void __send_cleanup_vector(struct apic_chip_data *apicd)
+{
+ unsigned int cpu;
+
+ raw_spin_lock(&vector_lock);
+ apicd->move_in_progress = 0;
+ cpu = apicd->prev_cpu;
+ if (cpu_online(cpu)) {
+ hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
+ apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
+ } else {
+ apicd->prev_vector = 0;
+ }
+ raw_spin_unlock(&vector_lock);
+}
+
+void send_cleanup_vector(struct irq_cfg *cfg)
+{
+ struct apic_chip_data *apicd;
+
+ apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
+ if (apicd->move_in_progress)
+ __send_cleanup_vector(apicd);
+}
+
static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
{
- unsigned me;
- struct apic_chip_data *data;
+ struct apic_chip_data *apicd;
- data = container_of(cfg, struct apic_chip_data, cfg);
- if (likely(!data->move_in_progress))
+ apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
+ if (likely(!apicd->move_in_progress))
return;
- me = smp_processor_id();
- if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
- __send_cleanup_vector(data);
+ if (vector == apicd->vector && apicd->cpu == smp_processor_id())
+ __send_cleanup_vector(apicd);
}
void irq_complete_move(struct irq_cfg *cfg)
@@ -682,10 +886,9 @@ void irq_complete_move(struct irq_cfg *cfg)
*/
void irq_force_complete_move(struct irq_desc *desc)
{
- struct irq_data *irqdata;
- struct apic_chip_data *data;
- struct irq_cfg *cfg;
- unsigned int cpu;
+ struct apic_chip_data *apicd;
+ struct irq_data *irqd;
+ unsigned int vector;
/*
* The function is called for all descriptors regardless of which
@@ -696,43 +899,31 @@ void irq_force_complete_move(struct irq_desc *desc)
* Check first that the chip_data is what we expect
* (apic_chip_data) before touching it any further.
*/
- irqdata = irq_domain_get_irq_data(x86_vector_domain,
- irq_desc_get_irq(desc));
- if (!irqdata)
+ irqd = irq_domain_get_irq_data(x86_vector_domain,
+ irq_desc_get_irq(desc));
+ if (!irqd)
return;
- data = apic_chip_data(irqdata);
- cfg = data ? &data->cfg : NULL;
+ raw_spin_lock(&vector_lock);
+ apicd = apic_chip_data(irqd);
+ if (!apicd)
+ goto unlock;
- if (!cfg)
- return;
+ /*
+ * If prev_vector is empty, no action required.
+ */
+ vector = apicd->prev_vector;
+ if (!vector)
+ goto unlock;
/*
- * This is tricky. If the cleanup of @data->old_domain has not been
+ * This is tricky. If the cleanup of the old vector has not been
* done yet, then the following setaffinity call will fail with
* -EBUSY. This can leave the interrupt in a stale state.
*
* All CPUs are stuck in stop machine with interrupts disabled so
* calling __irq_complete_move() would be completely pointless.
- */
- raw_spin_lock(&vector_lock);
- /*
- * Clean out all offline cpus (including the outgoing one) from the
- * old_domain mask.
- */
- cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
-
- /*
- * If move_in_progress is cleared and the old_domain mask is empty,
- * then there is nothing to cleanup. fixup_irqs() will take care of
- * the stale vectors on the outgoing cpu.
- */
- if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
- raw_spin_unlock(&vector_lock);
- return;
- }
-
- /*
+ *
* 1) The interrupt is in move_in_progress state. That means that we
* have not seen an interrupt since the io_apic was reprogrammed to
* the new vector.
@@ -740,7 +931,7 @@ void irq_force_complete_move(struct irq_desc *desc)
* 2) The interrupt has fired on the new vector, but the cleanup IPIs
* have not been processed yet.
*/
- if (data->move_in_progress) {
+ if (apicd->move_in_progress) {
/*
* In theory there is a race:
*
@@ -774,21 +965,43 @@ void irq_force_complete_move(struct irq_desc *desc)
* area arises.
*/
pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
- irqdata->irq, cfg->old_vector);
+ irqd->irq, vector);
}
- /*
- * If old_domain is not empty, then other cpus still have the irq
- * descriptor set in their vector array. Clean it up.
- */
- for_each_cpu(cpu, data->old_domain)
- per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
+ free_moved_vector(apicd);
+unlock:
+ raw_spin_unlock(&vector_lock);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Note, this is not accurate accounting, but at least good enough to
+ * prevent that the actual interrupt move will run out of vectors.
+ */
+int lapic_can_unplug_cpu(void)
+{
+ unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
+ int ret = 0;
- /* Cleanup the left overs of the (half finished) move */
- cpumask_clear(data->old_domain);
- data->move_in_progress = 0;
+ raw_spin_lock(&vector_lock);
+ tomove = irq_matrix_allocated(vector_matrix);
+ avl = irq_matrix_available(vector_matrix, true);
+ if (avl < tomove) {
+ pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
+ cpu, tomove, avl);
+ ret = -ENOSPC;
+ goto out;
+ }
+ rsvd = irq_matrix_reserved(vector_matrix);
+ if (avl < rsvd) {
+ pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
+ rsvd, avl);
+ }
+out:
raw_spin_unlock(&vector_lock);
+ return ret;
}
-#endif
+#endif /* HOTPLUG_CPU */
+#endif /* SMP */
static void __init print_APIC_field(int base)
{
diff --git a/arch/x86/kernel/apic/x2apic.h b/arch/x86/kernel/apic/x2apic.h
new file mode 100644
index 000000000000..b107de381cb5
--- /dev/null
+++ b/arch/x86/kernel/apic/x2apic.h
@@ -0,0 +1,9 @@
+/* Common bits for X2APIC cluster/physical modes. */
+
+int x2apic_apic_id_valid(int apicid);
+int x2apic_apic_id_registered(void);
+void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest);
+unsigned int x2apic_get_apic_id(unsigned long id);
+u32 x2apic_set_apic_id(unsigned int id);
+int x2apic_phys_pkg_id(int initial_apicid, int index_msb);
+void x2apic_send_IPI_self(int vector);
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index e216cf3d64d2..622f13ca8a94 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -9,22 +9,24 @@
#include <linux/cpu.h>
#include <asm/smp.h>
-#include <asm/x2apic.h>
+#include "x2apic.h"
+
+struct cluster_mask {
+ unsigned int clusterid;
+ int node;
+ struct cpumask mask;
+};
static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
-static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
+static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
+static struct cluster_mask *cluster_hotplug_mask;
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return x2apic_enabled();
}
-static inline u32 x2apic_cluster(int cpu)
-{
- return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
-}
-
static void x2apic_send_IPI(int cpu, int vector)
{
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
@@ -36,49 +38,34 @@ static void x2apic_send_IPI(int cpu, int vector)
static void
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
{
- struct cpumask *cpus_in_cluster_ptr;
- struct cpumask *ipi_mask_ptr;
- unsigned int cpu, this_cpu;
+ unsigned int cpu, clustercpu;
+ struct cpumask *tmpmsk;
unsigned long flags;
u32 dest;
x2apic_wrmsr_fence();
-
local_irq_save(flags);
- this_cpu = smp_processor_id();
+ tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
+ cpumask_copy(tmpmsk, mask);
+ /* If IPI should not be sent to self, clear current CPU */
+ if (apic_dest != APIC_DEST_ALLINC)
+ cpumask_clear_cpu(smp_processor_id(), tmpmsk);
- /*
- * We are to modify mask, so we need an own copy
- * and be sure it's manipulated with irq off.
- */
- ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
- cpumask_copy(ipi_mask_ptr, mask);
-
- /*
- * The idea is to send one IPI per cluster.
- */
- for_each_cpu(cpu, ipi_mask_ptr) {
- unsigned long i;
+ /* Collapse cpus in a cluster so a single IPI per cluster is sent */
+ for_each_cpu(cpu, tmpmsk) {
+ struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
- cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
dest = 0;
-
- /* Collect cpus in cluster. */
- for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
- if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
- dest |= per_cpu(x86_cpu_to_logical_apicid, i);
- }
+ for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
+ dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
if (!dest)
continue;
__x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
- /*
- * Cluster sibling cpus should be discared now so
- * we would not send IPI them second time.
- */
- cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
+ /* Remove cluster CPUs from tmpmask */
+ cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
}
local_irq_restore(flags);
@@ -105,125 +92,90 @@ static void x2apic_send_IPI_all(int vector)
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}
-static int
-x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
- unsigned int *apicid)
+static u32 x2apic_calc_apicid(unsigned int cpu)
{
- struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
- unsigned int cpu;
- u32 dest = 0;
- u16 cluster;
-
- cpu = cpumask_first(mask);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
-
- dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
- cluster = x2apic_cluster(cpu);
-
- cpumask_clear(effmsk);
- for_each_cpu(cpu, mask) {
- if (cluster != x2apic_cluster(cpu))
- continue;
- dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
- cpumask_set_cpu(cpu, effmsk);
- }
-
- *apicid = dest;
- return 0;
+ return per_cpu(x86_cpu_to_logical_apicid, cpu);
}
static void init_x2apic_ldr(void)
{
- unsigned int this_cpu = smp_processor_id();
+ struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
+ u32 cluster, apicid = apic_read(APIC_LDR);
unsigned int cpu;
- per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
+ this_cpu_write(x86_cpu_to_logical_apicid, apicid);
+
+ if (cmsk)
+ goto update;
- cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
+ cluster = apicid >> 16;
for_each_online_cpu(cpu) {
- if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
- continue;
- cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
- cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
+ cmsk = per_cpu(cluster_masks, cpu);
+ /* Matching cluster found. Link and update it. */
+ if (cmsk && cmsk->clusterid == cluster)
+ goto update;
}
+ cmsk = cluster_hotplug_mask;
+ cluster_hotplug_mask = NULL;
+update:
+ this_cpu_write(cluster_masks, cmsk);
+ cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
}
-/*
- * At CPU state changes, update the x2apic cluster sibling info.
- */
-static int x2apic_prepare_cpu(unsigned int cpu)
+static int alloc_clustermask(unsigned int cpu, int node)
{
- if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
- return -ENOMEM;
+ if (per_cpu(cluster_masks, cpu))
+ return 0;
+ /*
+ * If a hotplug spare mask exists, check whether it's on the right
+ * node. If not, free it and allocate a new one.
+ */
+ if (cluster_hotplug_mask) {
+ if (cluster_hotplug_mask->node == node)
+ return 0;
+ kfree(cluster_hotplug_mask);
+ }
- if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
- free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
+ cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
+ GFP_KERNEL, node);
+ if (!cluster_hotplug_mask)
return -ENOMEM;
- }
+ cluster_hotplug_mask->node = node;
+ return 0;
+}
+static int x2apic_prepare_cpu(unsigned int cpu)
+{
+ if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
+ return -ENOMEM;
+ if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
+ return -ENOMEM;
return 0;
}
-static int x2apic_dead_cpu(unsigned int this_cpu)
+static int x2apic_dead_cpu(unsigned int dead_cpu)
{
- int cpu;
+ struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
- for_each_online_cpu(cpu) {
- if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
- continue;
- cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
- cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
- }
- free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
- free_cpumask_var(per_cpu(ipi_mask, this_cpu));
+ cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+ free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
return 0;
}
static int x2apic_cluster_probe(void)
{
- int cpu = smp_processor_id();
- int ret;
-
if (!x2apic_mode)
return 0;
- ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
- x2apic_prepare_cpu, x2apic_dead_cpu);
- if (ret < 0) {
+ if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
+ x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
pr_err("Failed to register X2APIC_PREPARE\n");
return 0;
}
- cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
+ init_x2apic_ldr();
return 1;
}
-static const struct cpumask *x2apic_cluster_target_cpus(void)
-{
- return cpu_all_mask;
-}
-
-/*
- * Each x2apic cluster is an allocation domain.
- */
-static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- /*
- * To minimize vector pressure, default case of boot, device bringup
- * etc will use a single cpu for the interrupt destination.
- *
- * On explicit migration requests coming from irqbalance etc,
- * interrupts will be routed to the x2apic cluster (cluster-id
- * derived from the first cpu in the mask) members specified
- * in the mask.
- */
- if (mask == x2apic_cluster_target_cpus())
- cpumask_copy(retmask, cpumask_of(cpu));
- else
- cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
-}
-
static struct apic apic_x2apic_cluster __ro_after_init = {
.name = "cluster x2apic",
@@ -235,12 +187,10 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.irq_delivery_mode = dest_LowestPrio,
.irq_dest_mode = 1, /* logical */
- .target_cpus = x2apic_cluster_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,
- .vector_allocation_domain = cluster_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -253,7 +203,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
- .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
+ .calc_dest_apicid = x2apic_calc_apicid,
.send_IPI = x2apic_send_IPI,
.send_IPI_mask = x2apic_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index b94d35320f85..f8d9d69994e6 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -7,7 +7,8 @@
#include <linux/dmar.h>
#include <asm/smp.h>
-#include <asm/x2apic.h>
+#include <asm/ipi.h>
+#include "x2apic.h"
int x2apic_phys;
@@ -99,6 +100,43 @@ static int x2apic_phys_probe(void)
return apic == &apic_x2apic_phys;
}
+/* Common x2apic functions, also used by x2apic_cluster */
+int x2apic_apic_id_valid(int apicid)
+{
+ return 1;
+}
+
+int x2apic_apic_id_registered(void)
+{
+ return 1;
+}
+
+void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
+{
+ unsigned long cfg = __prepare_ICR(0, vector, dest);
+ native_x2apic_icr_write(cfg, apicid);
+}
+
+unsigned int x2apic_get_apic_id(unsigned long id)
+{
+ return id;
+}
+
+u32 x2apic_set_apic_id(unsigned int id)
+{
+ return id;
+}
+
+int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
+{
+ return initial_apicid >> index_msb;
+}
+
+void x2apic_send_IPI_self(int vector)
+{
+ apic_write(APIC_SELF_IPI, vector);
+}
+
static struct apic apic_x2apic_phys __ro_after_init = {
.name = "physical x2apic",
@@ -110,12 +148,10 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -128,7 +164,7 @@ static struct apic apic_x2apic_phys __ro_after_init = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_default_calc_apicid,
.send_IPI = x2apic_send_IPI,
.send_IPI_mask = x2apic_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 0d57bb9079c9..e1b8e8bf6b3c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -154,6 +154,48 @@ static int __init early_get_pnodeid(void)
return pnode;
}
+static void __init uv_tsc_check_sync(void)
+{
+ u64 mmr;
+ int sync_state;
+ int mmr_shift;
+ char *state;
+ bool valid;
+
+ /* Accommodate different UV arch BIOSes */
+ mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR);
+ mmr_shift =
+ is_uv1_hub() ? 0 :
+ is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT;
+ if (mmr_shift)
+ sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK;
+ else
+ sync_state = 0;
+
+ switch (sync_state) {
+ case UVH_TSC_SYNC_VALID:
+ state = "in sync";
+ valid = true;
+ break;
+
+ case UVH_TSC_SYNC_INVALID:
+ state = "unstable";
+ valid = false;
+ break;
+ default:
+ state = "unknown: assuming valid";
+ valid = true;
+ break;
+ }
+ pr_info("UV: TSC sync state from BIOS:0%d(%s)\n", sync_state, state);
+
+ /* Mark flag that says TSC != 0 is valid for socket 0 */
+ if (valid)
+ mark_tsc_async_resets("UV BIOS");
+ else
+ mark_tsc_unstable("UV BIOS");
+}
+
/* [Copied from arch/x86/kernel/cpu/topology.c:detect_extended_topology()] */
#define SMT_LEVEL 0 /* Leaf 0xb SMT level */
@@ -288,6 +330,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
}
pr_info("UV: OEM IDs %s/%s, System/HUB Types %d/%d, uv_apic %d\n", oem_id, oem_table_id, uv_system_type, uv_min_hub_revision_id, uv_apic);
+ uv_tsc_check_sync();
return uv_apic;
@@ -525,16 +568,9 @@ static void uv_init_apic_ldr(void)
{
}
-static int
-uv_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
- unsigned int *apicid)
+static u32 apic_uv_calc_apicid(unsigned int cpu)
{
- int ret = default_cpu_mask_to_apicid(mask, irqdata, apicid);
-
- if (!ret)
- *apicid |= uv_apicid_hibits;
-
- return ret;
+ return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
}
static unsigned int x2apic_get_apic_id(unsigned long x)
@@ -547,7 +583,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x)
return id;
}
-static unsigned long set_apic_id(unsigned int id)
+static u32 set_apic_id(unsigned int id)
{
/* CHECKME: Do we need to mask out the xapic extra bits? */
return id;
@@ -584,12 +620,10 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* Physical */
- .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,
- .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = uv_init_apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -602,7 +636,7 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = set_apic_id,
- .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_uv_calc_apicid,
.send_IPI = uv_send_IPI_one,
.send_IPI_mask = uv_send_IPI_mask,
@@ -920,9 +954,8 @@ static __init void uv_rtc_init(void)
/*
* percpu heartbeat timer
*/
-static void uv_heartbeat(unsigned long ignored)
+static void uv_heartbeat(struct timer_list *timer)
{
- struct timer_list *timer = &uv_scir_info->timer;
unsigned char bits = uv_scir_info->state;
/* Flip heartbeat bit: */
@@ -947,7 +980,7 @@ static int uv_heartbeat_enable(unsigned int cpu)
struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
- setup_pinned_timer(timer, uv_heartbeat, cpu);
+ timer_setup(timer, uv_heartbeat, TIMER_PINNED);
timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
add_timer_on(timer, cpu);
uv_cpu_scir_info(cpu)->enabled = 1;
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index c60922a66385..90cb82dbba57 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -23,6 +23,7 @@ obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
+obj-y += cpuid-deps.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 0ee83321a313..957813e0180d 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -42,10 +42,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
s64 time_delta = ktime_ms_delta(now, s->time);
unsigned long flags;
- /* Don't bother re-computing within the cache threshold time. */
- if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
- return;
-
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
@@ -74,6 +70,7 @@ static void aperfmperf_snapshot_khz(void *dummy)
unsigned int arch_freq_get_on_cpu(int cpu)
{
+ s64 time_delta;
unsigned int khz;
if (!cpu_khz)
@@ -82,6 +79,12 @@ unsigned int arch_freq_get_on_cpu(int cpu)
if (!static_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
+ /* Don't bother re-computing within the cache threshold time. */
+ time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
+ khz = per_cpu(samples.khz, cpu);
+ if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+ return khz;
+
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
khz = per_cpu(samples.khz, cpu);
if (khz)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c9176bae7fd8..13ae9e5eec2f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -329,6 +329,28 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
}
}
+static __always_inline void setup_umip(struct cpuinfo_x86 *c)
+{
+ /* Check the boot processor, plus build option for UMIP. */
+ if (!cpu_feature_enabled(X86_FEATURE_UMIP))
+ goto out;
+
+ /* Check the current processor's cpuid bits. */
+ if (!cpu_has(c, X86_FEATURE_UMIP))
+ goto out;
+
+ cr4_set_bits(X86_CR4_UMIP);
+
+ return;
+
+out:
+ /*
+ * Make sure UMIP is disabled in case it was enabled in a
+ * previous boot (e.g., via kexec).
+ */
+ cr4_clear_bits(X86_CR4_UMIP);
+}
+
/*
* Protection Keys are not available in 32-bit mode.
*/
@@ -863,8 +885,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
* cache alignment.
* The others are not touched to avoid unwanted side effects.
*
- * WARNING: this function is only called on the BP. Don't add code here
- * that is supposed to run on all CPUs.
+ * WARNING: this function is only called on the boot CPU. Don't add code
+ * here that is supposed to run on all CPUs.
*/
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
{
@@ -1147,9 +1169,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
/* Disable the PN if appropriate */
squash_the_stupid_serial_number(c);
- /* Set up SMEP/SMAP */
+ /* Set up SMEP/SMAP/UMIP */
setup_smep(c);
setup_smap(c);
+ setup_umip(c);
/*
* The vendor-specific functions might have changed features.
@@ -1301,18 +1324,16 @@ void print_cpu_info(struct cpuinfo_x86 *c)
pr_cont(")\n");
}
-static __init int setup_disablecpuid(char *arg)
+/*
+ * clearcpuid= was already parsed in fpu__init_parse_early_param.
+ * But we need to keep a dummy __setup around otherwise it would
+ * show up as an environment variable for init.
+ */
+static __init int setup_clearcpuid(char *arg)
{
- int bit;
-
- if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
- setup_clear_cpu_cap(bit);
- else
- return 0;
-
return 1;
}
-__setup("clearcpuid=", setup_disablecpuid);
+__setup("clearcpuid=", setup_clearcpuid);
#ifdef CONFIG_X86_64
DEFINE_PER_CPU_FIRST(union irq_stack_union,
@@ -1572,9 +1593,13 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, me);
- load_sp0(t, &current->thread);
+ /*
+ * Initialize the TSS. Don't bother initializing sp0, as the initial
+ * task never enters user mode.
+ */
set_tss_desc(cpu, t);
load_TR_desc();
+
load_mm_ldt(&init_mm);
clear_all_debug_regs();
@@ -1596,7 +1621,6 @@ void cpu_init(void)
int cpu = smp_processor_id();
struct task_struct *curr = current;
struct tss_struct *t = &per_cpu(cpu_tss, cpu);
- struct thread_struct *thread = &curr->thread;
wait_for_master_cpu(cpu);
@@ -1627,9 +1651,13 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, curr);
- load_sp0(t, thread);
+ /*
+ * Initialize the TSS. Don't bother initializing sp0, as the initial
+ * task never enters user mode.
+ */
set_tss_desc(cpu, t);
load_TR_desc();
+
load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
new file mode 100644
index 000000000000..904b0a3c4e53
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -0,0 +1,121 @@
+/* Declare dependencies between CPUIDs */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <asm/cpufeature.h>
+
+struct cpuid_dep {
+ unsigned int feature;
+ unsigned int depends;
+};
+
+/*
+ * Table of CPUID features that depend on others.
+ *
+ * This only includes dependencies that can be usefully disabled, not
+ * features part of the base set (like FPU).
+ *
+ * Note this all is not __init / __initdata because it can be
+ * called from cpu hotplug. It shouldn't do anything in this case,
+ * but it's difficult to tell that to the init reference checker.
+ */
+const static struct cpuid_dep cpuid_deps[] = {
+ { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE },
+ { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE },
+ { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE },
+ { X86_FEATURE_AVX, X86_FEATURE_XSAVE },
+ { X86_FEATURE_PKU, X86_FEATURE_XSAVE },
+ { X86_FEATURE_MPX, X86_FEATURE_XSAVE },
+ { X86_FEATURE_XGETBV1, X86_FEATURE_XSAVE },
+ { X86_FEATURE_FXSR_OPT, X86_FEATURE_FXSR },
+ { X86_FEATURE_XMM, X86_FEATURE_FXSR },
+ { X86_FEATURE_XMM2, X86_FEATURE_XMM },
+ { X86_FEATURE_XMM3, X86_FEATURE_XMM2 },
+ { X86_FEATURE_XMM4_1, X86_FEATURE_XMM2 },
+ { X86_FEATURE_XMM4_2, X86_FEATURE_XMM2 },
+ { X86_FEATURE_XMM3, X86_FEATURE_XMM2 },
+ { X86_FEATURE_PCLMULQDQ, X86_FEATURE_XMM2 },
+ { X86_FEATURE_SSSE3, X86_FEATURE_XMM2, },
+ { X86_FEATURE_F16C, X86_FEATURE_XMM2, },
+ { X86_FEATURE_AES, X86_FEATURE_XMM2 },
+ { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
+ { X86_FEATURE_FMA, X86_FEATURE_AVX },
+ { X86_FEATURE_AVX2, X86_FEATURE_AVX, },
+ { X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
+ { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512PF, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512ER, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512CD, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512DQ, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512BW, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
+ { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
+ {}
+};
+
+static inline void clear_feature(struct cpuinfo_x86 *c, unsigned int feature)
+{
+ /*
+ * Note: This could use the non atomic __*_bit() variants, but the
+ * rest of the cpufeature code uses atomics as well, so keep it for
+ * consistency. Cleanup all of it separately.
+ */
+ if (!c) {
+ clear_cpu_cap(&boot_cpu_data, feature);
+ set_bit(feature, (unsigned long *)cpu_caps_cleared);
+ } else {
+ clear_bit(feature, (unsigned long *)c->x86_capability);
+ }
+}
+
+/* Take the capabilities and the BUG bits into account */
+#define MAX_FEATURE_BITS ((NCAPINTS + NBUGINTS) * sizeof(u32) * 8)
+
+static void do_clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
+{
+ DECLARE_BITMAP(disable, MAX_FEATURE_BITS);
+ const struct cpuid_dep *d;
+ bool changed;
+
+ if (WARN_ON(feature >= MAX_FEATURE_BITS))
+ return;
+
+ clear_feature(c, feature);
+
+ /* Collect all features to disable, handling dependencies */
+ memset(disable, 0, sizeof(disable));
+ __set_bit(feature, disable);
+
+ /* Loop until we get a stable state. */
+ do {
+ changed = false;
+ for (d = cpuid_deps; d->feature; d++) {
+ if (!test_bit(d->depends, disable))
+ continue;
+ if (__test_and_set_bit(d->feature, disable))
+ continue;
+
+ changed = true;
+ clear_feature(c, d->feature);
+ }
+ } while (changed);
+}
+
+void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int feature)
+{
+ do_clear_cpu_cap(c, feature);
+}
+
+void setup_clear_cpu_cap(unsigned int feature)
+{
+ do_clear_cpu_cap(NULL, feature);
+}
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 4fa90006ac68..bea8d3e24f50 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -26,6 +26,12 @@
#include <asm/processor.h>
#include <asm/hypervisor.h>
+extern const struct hypervisor_x86 x86_hyper_vmware;
+extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
+extern const struct hypervisor_x86 x86_hyper_xen_pv;
+extern const struct hypervisor_x86 x86_hyper_xen_hvm;
+extern const struct hypervisor_x86 x86_hyper_kvm;
+
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
#ifdef CONFIG_XEN_PV
@@ -41,54 +47,52 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] =
#endif
};
-const struct hypervisor_x86 *x86_hyper;
-EXPORT_SYMBOL(x86_hyper);
+enum x86_hypervisor_type x86_hyper_type;
+EXPORT_SYMBOL(x86_hyper_type);
-static inline void __init
+static inline const struct hypervisor_x86 * __init
detect_hypervisor_vendor(void)
{
- const struct hypervisor_x86 *h, * const *p;
+ const struct hypervisor_x86 *h = NULL, * const *p;
uint32_t pri, max_pri = 0;
for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
- h = *p;
- pri = h->detect();
- if (pri != 0 && pri > max_pri) {
+ pri = (*p)->detect();
+ if (pri > max_pri) {
max_pri = pri;
- x86_hyper = h;
+ h = *p;
}
}
- if (max_pri)
- pr_info("Hypervisor detected: %s\n", x86_hyper->name);
+ if (h)
+ pr_info("Hypervisor detected: %s\n", h->name);
+
+ return h;
}
-void __init init_hypervisor_platform(void)
+static void __init copy_array(const void *src, void *target, unsigned int size)
{
+ unsigned int i, n = size / sizeof(void *);
+ const void * const *from = (const void * const *)src;
+ const void **to = (const void **)target;
- detect_hypervisor_vendor();
-
- if (!x86_hyper)
- return;
-
- if (x86_hyper->init_platform)
- x86_hyper->init_platform();
+ for (i = 0; i < n; i++)
+ if (from[i])
+ to[i] = from[i];
}
-bool __init hypervisor_x2apic_available(void)
+void __init init_hypervisor_platform(void)
{
- return x86_hyper &&
- x86_hyper->x2apic_available &&
- x86_hyper->x2apic_available();
-}
+ const struct hypervisor_x86 *h;
-void hypervisor_pin_vcpu(int cpu)
-{
- if (!x86_hyper)
+ h = detect_hypervisor_vendor();
+
+ if (!h)
return;
- if (x86_hyper->pin_vcpu)
- x86_hyper->pin_vcpu(cpu);
- else
- WARN_ONCE(1, "vcpu pinning requested but not supported!\n");
+ copy_array(&h->init, &x86_init.hyper, sizeof(h->init));
+ copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime));
+
+ x86_hyper_type = h->type;
+ x86_init.hyper.init_platform();
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b720dacac051..b1af22073e28 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -187,21 +187,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);
-#ifdef CONFIG_KMEMCHECK
- /*
- * P4s have a "fast strings" feature which causes single-
- * stepping REP instructions to only generate a #DB on
- * cache-line boundaries.
- *
- * Ingo Molnar reported a Pentium D (model 6) and a Xeon
- * (model 2) with the same problem.
- */
- if (c->x86 == 15)
- if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
- MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
- pr_info("kmemcheck: Disabling fast string operations\n");
-#endif
-
/*
* If fast string is not enabled in IA32_MISC_ENABLE for any reason,
* clear the fast string and enhanced fast string CPU capabilities.
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index cd5fc61ba450..88dcf8479013 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -267,6 +267,7 @@ static void rdt_get_cdp_l3_config(int type)
r->num_closid = r_l3->num_closid / 2;
r->cache.cbm_len = r_l3->cache.cbm_len;
r->default_ctrl = r_l3->default_ctrl;
+ r->cache.shareable_bits = r_l3->cache.shareable_bits;
r->data_width = (r->cache.cbm_len + 3) / 4;
r->alloc_capable = true;
/*
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index a43a72d8e88e..3397244984f5 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -127,12 +127,15 @@ struct rdtgroup {
#define RFTYPE_BASE BIT(1)
#define RF_CTRLSHIFT 4
#define RF_MONSHIFT 5
+#define RF_TOPSHIFT 6
#define RFTYPE_CTRL BIT(RF_CTRLSHIFT)
#define RFTYPE_MON BIT(RF_MONSHIFT)
+#define RFTYPE_TOP BIT(RF_TOPSHIFT)
#define RFTYPE_RES_CACHE BIT(8)
#define RFTYPE_RES_MB BIT(9)
#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL)
#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON)
+#define RF_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP)
#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL)
/* List of all resource groups */
@@ -409,6 +412,10 @@ union cpuid_0x10_x_edx {
unsigned int full;
};
+void rdt_last_cmd_clear(void);
+void rdt_last_cmd_puts(const char *s);
+void rdt_last_cmd_printf(const char *fmt, ...);
+
void rdt_ctrl_update(void *arg);
struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
void rdtgroup_kn_unlock(struct kernfs_node *kn);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index f6ea94f8954a..23e1d5c249c6 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -42,15 +42,22 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
/*
* Only linear delay values is supported for current Intel SKUs.
*/
- if (!r->membw.delay_linear)
+ if (!r->membw.delay_linear) {
+ rdt_last_cmd_puts("No support for non-linear MB domains\n");
return false;
+ }
ret = kstrtoul(buf, 10, &bw);
- if (ret)
+ if (ret) {
+ rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
return false;
+ }
- if (bw < r->membw.min_bw || bw > r->default_ctrl)
+ if (bw < r->membw.min_bw || bw > r->default_ctrl) {
+ rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
+ r->membw.min_bw, r->default_ctrl);
return false;
+ }
*data = roundup(bw, (unsigned long)r->membw.bw_gran);
return true;
@@ -60,8 +67,10 @@ int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d)
{
unsigned long data;
- if (d->have_new_ctrl)
+ if (d->have_new_ctrl) {
+ rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
+ }
if (!bw_validate(buf, &data, r))
return -EINVAL;
@@ -84,20 +93,29 @@ static bool cbm_validate(char *buf, unsigned long *data, struct rdt_resource *r)
int ret;
ret = kstrtoul(buf, 16, &val);
- if (ret)
+ if (ret) {
+ rdt_last_cmd_printf("non-hex character in mask %s\n", buf);
return false;
+ }
- if (val == 0 || val > r->default_ctrl)
+ if (val == 0 || val > r->default_ctrl) {
+ rdt_last_cmd_puts("mask out of range\n");
return false;
+ }
first_bit = find_first_bit(&val, cbm_len);
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
- if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)
+ if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
+ rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val);
return false;
+ }
- if ((zero_bit - first_bit) < r->cache.min_cbm_bits)
+ if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("Need at least %d bits in mask\n",
+ r->cache.min_cbm_bits);
return false;
+ }
*data = val;
return true;
@@ -111,8 +129,10 @@ int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d)
{
unsigned long data;
- if (d->have_new_ctrl)
+ if (d->have_new_ctrl) {
+ rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
+ }
if(!cbm_validate(buf, &data, r))
return -EINVAL;
@@ -139,8 +159,10 @@ next:
return 0;
dom = strsep(&line, ";");
id = strsep(&dom, "=");
- if (!dom || kstrtoul(id, 10, &dom_id))
+ if (!dom || kstrtoul(id, 10, &dom_id)) {
+ rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
return -EINVAL;
+ }
dom = strim(dom);
list_for_each_entry(d, &r->domains, list) {
if (d->id == dom_id) {
@@ -196,6 +218,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
if (!strcmp(resname, r->name) && closid < r->num_closid)
return parse_line(tok, r);
}
+ rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
return -EINVAL;
}
@@ -218,6 +241,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
rdtgroup_kn_unlock(of->kn);
return -ENOENT;
}
+ rdt_last_cmd_clear();
closid = rdtgrp->closid;
@@ -229,6 +253,12 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
while ((tok = strsep(&buf, "\n")) != NULL) {
resname = strim(strsep(&tok, ":"));
if (!tok) {
+ rdt_last_cmd_puts("Missing ':'\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (tok[0] == '\0') {
+ rdt_last_cmd_printf("Missing '%s' value\n", resname);
ret = -EINVAL;
goto out;
}
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
index 30827510094b..681450eee428 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -51,7 +51,7 @@ static LIST_HEAD(rmid_free_lru);
* may have a occupancy value > intel_cqm_threshold. User can change
* the threshold occupancy value.
*/
-unsigned int rmid_limbo_count;
+static unsigned int rmid_limbo_count;
/**
* @rmid_entry - The entry in the limbo and free lists.
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index a869d4a073c5..64c5ff97ee0d 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -24,6 +24,7 @@
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kernfs.h>
+#include <linux/seq_buf.h>
#include <linux/seq_file.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
@@ -51,6 +52,31 @@ static struct kernfs_node *kn_mongrp;
/* Kernel fs node for "mon_data" directory under root */
static struct kernfs_node *kn_mondata;
+static struct seq_buf last_cmd_status;
+static char last_cmd_status_buf[512];
+
+void rdt_last_cmd_clear(void)
+{
+ lockdep_assert_held(&rdtgroup_mutex);
+ seq_buf_clear(&last_cmd_status);
+}
+
+void rdt_last_cmd_puts(const char *s)
+{
+ lockdep_assert_held(&rdtgroup_mutex);
+ seq_buf_puts(&last_cmd_status, s);
+}
+
+void rdt_last_cmd_printf(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ lockdep_assert_held(&rdtgroup_mutex);
+ seq_buf_vprintf(&last_cmd_status, fmt, ap);
+ va_end(ap);
+}
+
/*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer.
@@ -238,8 +264,10 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
- if (cpumask_weight(tmpmask))
+ if (cpumask_weight(tmpmask)) {
+ rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
return -EINVAL;
+ }
/* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
@@ -291,8 +319,10 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) {
/* Can't drop from default group */
- if (rdtgrp == &rdtgroup_default)
+ if (rdtgrp == &rdtgroup_default) {
+ rdt_last_cmd_puts("Can't drop CPUs from default group\n");
return -EINVAL;
+ }
/* Give any dropped cpus to rdtgroup_default */
cpumask_or(&rdtgroup_default.cpu_mask,
@@ -357,8 +387,10 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
}
rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ rdt_last_cmd_clear();
if (!rdtgrp) {
ret = -ENOENT;
+ rdt_last_cmd_puts("directory was removed\n");
goto unlock;
}
@@ -367,13 +399,16 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
else
ret = cpumask_parse(buf, newmask);
- if (ret)
+ if (ret) {
+ rdt_last_cmd_puts("bad cpu list/mask\n");
goto unlock;
+ }
/* check that user didn't specify any offline cpus */
cpumask_andnot(tmpmask, newmask, cpu_online_mask);
if (cpumask_weight(tmpmask)) {
ret = -EINVAL;
+ rdt_last_cmd_puts("can only assign online cpus\n");
goto unlock;
}
@@ -452,6 +487,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/
atomic_dec(&rdtgrp->waitcount);
kfree(callback);
+ rdt_last_cmd_puts("task exited\n");
} else {
/*
* For ctrl_mon groups move both closid and rmid.
@@ -462,10 +498,12 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
tsk->closid = rdtgrp->closid;
tsk->rmid = rdtgrp->mon.rmid;
} else if (rdtgrp->type == RDTMON_GROUP) {
- if (rdtgrp->mon.parent->closid == tsk->closid)
+ if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
- else
+ } else {
+ rdt_last_cmd_puts("Can't move task to different control group\n");
ret = -EINVAL;
+ }
}
}
return ret;
@@ -484,8 +522,10 @@ static int rdtgroup_task_write_permission(struct task_struct *task,
*/
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->euid, tcred->suid))
+ !uid_eq(cred->euid, tcred->suid)) {
+ rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
ret = -EPERM;
+ }
put_cred(tcred);
return ret;
@@ -502,6 +542,7 @@ static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
tsk = find_task_by_vpid(pid);
if (!tsk) {
rcu_read_unlock();
+ rdt_last_cmd_printf("No task %d\n", pid);
return -ESRCH;
}
} else {
@@ -529,6 +570,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ rdt_last_cmd_clear();
if (rdtgrp)
ret = rdtgroup_move_task(pid, rdtgrp, of);
@@ -569,6 +611,21 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of,
return ret;
}
+static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ int len;
+
+ mutex_lock(&rdtgroup_mutex);
+ len = seq_buf_used(&last_cmd_status);
+ if (len)
+ seq_printf(seq, "%.*s", len, last_cmd_status_buf);
+ else
+ seq_puts(seq, "ok\n");
+ mutex_unlock(&rdtgroup_mutex);
+ return 0;
+}
+
static int rdt_num_closids_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
@@ -686,6 +743,13 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] = {
{
+ .name = "last_cmd_status",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_last_cmd_status_show,
+ .fflags = RF_TOP_INFO,
+ },
+ {
.name = "num_closids",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
@@ -855,6 +919,10 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
return PTR_ERR(kn_info);
kernfs_get(kn_info);
+ ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
+ if (ret)
+ goto out_destroy;
+
for_each_alloc_enabled_rdt_resource(r) {
fflags = r->fflags | RF_CTRL_INFO;
ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
@@ -1081,6 +1149,7 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
struct dentry *dentry;
int ret;
+ cpus_read_lock();
mutex_lock(&rdtgroup_mutex);
/*
* resctrl file system can only be mounted once.
@@ -1130,12 +1199,12 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
goto out_mondata;
if (rdt_alloc_capable)
- static_branch_enable(&rdt_alloc_enable_key);
+ static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
if (rdt_mon_capable)
- static_branch_enable(&rdt_mon_enable_key);
+ static_branch_enable_cpuslocked(&rdt_mon_enable_key);
if (rdt_alloc_capable || rdt_mon_capable)
- static_branch_enable(&rdt_enable_key);
+ static_branch_enable_cpuslocked(&rdt_enable_key);
if (is_mbm_enabled()) {
r = &rdt_resources_all[RDT_RESOURCE_L3];
@@ -1156,7 +1225,9 @@ out_info:
out_cdp:
cdp_disable();
out:
+ rdt_last_cmd_clear();
mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
return dentry;
}
@@ -1295,9 +1366,7 @@ static void rmdir_all_sub(void)
kfree(rdtgrp);
}
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
- get_online_cpus();
update_closid_rmid(cpu_online_mask, &rdtgroup_default);
- put_online_cpus();
kernfs_remove(kn_info);
kernfs_remove(kn_mongrp);
@@ -1308,6 +1377,7 @@ static void rdt_kill_sb(struct super_block *sb)
{
struct rdt_resource *r;
+ cpus_read_lock();
mutex_lock(&rdtgroup_mutex);
/*Put everything back to default values. */
@@ -1315,11 +1385,12 @@ static void rdt_kill_sb(struct super_block *sb)
reset_all_ctrls(r);
cdp_disable();
rmdir_all_sub();
- static_branch_disable(&rdt_alloc_enable_key);
- static_branch_disable(&rdt_mon_enable_key);
- static_branch_disable(&rdt_enable_key);
+ static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
+ static_branch_disable_cpuslocked(&rdt_mon_enable_key);
+ static_branch_disable_cpuslocked(&rdt_enable_key);
kernfs_kill_sb(sb);
mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
}
static struct file_system_type rdt_fs_type = {
@@ -1524,8 +1595,10 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
int ret;
prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
+ rdt_last_cmd_clear();
if (!prdtgrp) {
ret = -ENODEV;
+ rdt_last_cmd_puts("directory was removed\n");
goto out_unlock;
}
@@ -1533,6 +1606,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) {
ret = -ENOSPC;
+ rdt_last_cmd_puts("kernel out of memory\n");
goto out_unlock;
}
*r = rdtgrp;
@@ -1544,6 +1618,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
if (IS_ERR(kn)) {
ret = PTR_ERR(kn);
+ rdt_last_cmd_puts("kernfs create error\n");
goto out_free_rgrp;
}
rdtgrp->kn = kn;
@@ -1557,24 +1632,31 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
kernfs_get(kn);
ret = rdtgroup_kn_set_ugid(kn);
- if (ret)
+ if (ret) {
+ rdt_last_cmd_puts("kernfs perm error\n");
goto out_destroy;
+ }
- files = RFTYPE_BASE | RFTYPE_CTRL;
files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
ret = rdtgroup_add_files(kn, files);
- if (ret)
+ if (ret) {
+ rdt_last_cmd_puts("kernfs fill error\n");
goto out_destroy;
+ }
if (rdt_mon_capable) {
ret = alloc_rmid();
- if (ret < 0)
+ if (ret < 0) {
+ rdt_last_cmd_puts("out of RMIDs\n");
goto out_destroy;
+ }
rdtgrp->mon.rmid = ret;
ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
- if (ret)
+ if (ret) {
+ rdt_last_cmd_puts("kernfs subdir error\n");
goto out_idfree;
+ }
}
kernfs_activate(kn);
@@ -1652,8 +1734,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
kn = rdtgrp->kn;
ret = closid_alloc();
- if (ret < 0)
+ if (ret < 0) {
+ rdt_last_cmd_puts("out of CLOSIDs\n");
goto out_common_fail;
+ }
closid = ret;
rdtgrp->closid = closid;
@@ -1665,8 +1749,10 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
* of tasks and cpus to monitor.
*/
ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
- if (ret)
+ if (ret) {
+ rdt_last_cmd_puts("kernfs subdir error\n");
goto out_id_free;
+ }
}
goto out_unlock;
@@ -1902,6 +1988,9 @@ int __init rdtgroup_init(void)
{
int ret = 0;
+ seq_buf_init(&last_cmd_status, last_cmd_status_buf,
+ sizeof(last_cmd_status_buf));
+
ret = rdtgroup_setup_root();
if (ret)
return ret;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 87cc9ab7a13c..4ca632a06e0b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -204,7 +204,7 @@ static int error_context(struct mce *m)
return IN_KERNEL;
}
-static int mce_severity_amd_smca(struct mce *m, int err_ctx)
+static int mce_severity_amd_smca(struct mce *m, enum context err_ctx)
{
u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
u32 low, high;
@@ -245,6 +245,9 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc
if (m->status & MCI_STATUS_UC) {
+ if (ctx == IN_KERNEL)
+ return MCE_PANIC_SEVERITY;
+
/*
* On older systems where overflow_recov flag is not present, we
* should simply panic if an error overflow occurs. If
@@ -255,10 +258,6 @@ static int mce_severity_amd(struct mce *m, int tolerant, char **msg, bool is_exc
if (mce_flags.smca)
return mce_severity_amd_smca(m, ctx);
- /* software can try to contain */
- if (!(m->mcgstatus & MCG_STATUS_RIPV) && (ctx == IN_KERNEL))
- return MCE_PANIC_SEVERITY;
-
/* kill current process */
return MCE_AR_SEVERITY;
} else {
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 3b413065c613..b1d616d08eee 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1367,13 +1367,12 @@ static void __start_timer(struct timer_list *t, unsigned long interval)
local_irq_restore(flags);
}
-static void mce_timer_fn(unsigned long data)
+static void mce_timer_fn(struct timer_list *t)
{
- struct timer_list *t = this_cpu_ptr(&mce_timer);
- int cpu = smp_processor_id();
+ struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
unsigned long iv;
- WARN_ON(cpu != data);
+ WARN_ON(cpu_t != t);
iv = __this_cpu_read(mce_next_interval);
@@ -1763,17 +1762,15 @@ static void mce_start_timer(struct timer_list *t)
static void __mcheck_cpu_setup_timer(void)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
- unsigned int cpu = smp_processor_id();
- setup_pinned_timer(t, mce_timer_fn, cpu);
+ timer_setup(t, mce_timer_fn, TIMER_PINNED);
}
static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
- unsigned int cpu = smp_processor_id();
- setup_pinned_timer(t, mce_timer_fn, cpu);
+ timer_setup(t, mce_timer_fn, TIMER_PINNED);
mce_start_timer(t);
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 236324e83a3a..85eb5fc180c8 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -254,9 +254,9 @@ static void __init ms_hyperv_init_platform(void)
#endif
}
-const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
.name = "Microsoft Hyper-V",
.detect = ms_hyperv_platform,
- .init_platform = ms_hyperv_init_platform,
+ .type = X86_HYPER_MS_HYPERV,
+ .init.init_platform = ms_hyperv_init_platform,
};
-EXPORT_SYMBOL(x86_hyper_ms_hyperv);
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 40ed26852ebd..8e005329648b 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -205,10 +205,10 @@ static bool __init vmware_legacy_x2apic_available(void)
(eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
}
-const __refconst struct hypervisor_x86 x86_hyper_vmware = {
+const __initconst struct hypervisor_x86 x86_hyper_vmware = {
.name = "VMware",
.detect = vmware_platform,
- .init_platform = vmware_platform_setup,
- .x2apic_available = vmware_legacy_x2apic_available,
+ .type = X86_HYPER_VMWARE,
+ .init.init_platform = vmware_platform_setup,
+ .init.x2apic_available = vmware_legacy_x2apic_available,
};
-EXPORT_SYMBOL(x86_hyper_vmware);
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 44404e2307bb..10e74d4778a1 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -209,7 +209,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
}
#ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
+static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
{
unsigned int *nr_ranges = arg;
@@ -342,7 +342,7 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
return ret;
}
-static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
+static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
{
struct crash_elf_data *ced = arg;
Elf64_Ehdr *ehdr;
@@ -355,7 +355,7 @@ static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
ehdr = ced->ehdr;
/* Exclude unwanted mem ranges */
- ret = elf_header_exclude_ranges(ced, start, end);
+ ret = elf_header_exclude_ranges(ced, res->start, res->end);
if (ret)
return ret;
@@ -518,14 +518,14 @@ static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
return 0;
}
-static int memmap_entry_callback(u64 start, u64 end, void *arg)
+static int memmap_entry_callback(struct resource *res, void *arg)
{
struct crash_memmap_data *cmd = arg;
struct boot_params *params = cmd->params;
struct e820_entry ei;
- ei.addr = start;
- ei.size = end - start + 1;
+ ei.addr = res->start;
+ ei.size = resource_size(res);
ei.type = cmd->type;
add_e820_entry(params, &ei);
@@ -619,12 +619,12 @@ out:
return ret;
}
-static int determine_backup_region(u64 start, u64 end, void *arg)
+static int determine_backup_region(struct resource *res, void *arg)
{
struct kimage *image = arg;
- image->arch.backup_src_start = start;
- image->arch.backup_src_sz = end - start + 1;
+ image->arch.backup_src_start = res->start;
+ image->arch.backup_src_sz = resource_size(res);
/* Expecting only one range for backup region */
return 1;
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 9c4e7ba6870c..e5ec3cafa72e 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -57,7 +57,7 @@
# error "Need more virtual address space for the ESPFIX hack"
#endif
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
/* This contains the *bottom* address of the espfix stack */
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
@@ -155,14 +155,14 @@ void init_espfix_ap(int cpu)
page = cpu/ESPFIX_STACKS_PER_PAGE;
/* Did another CPU already set this up? */
- stack_page = ACCESS_ONCE(espfix_pages[page]);
+ stack_page = READ_ONCE(espfix_pages[page]);
if (likely(stack_page))
goto done;
mutex_lock(&espfix_init_mutex);
/* Did we race on the lock? */
- stack_page = ACCESS_ONCE(espfix_pages[page]);
+ stack_page = READ_ONCE(espfix_pages[page]);
if (stack_page)
goto unlock_done;
@@ -200,7 +200,7 @@ void init_espfix_ap(int cpu)
set_pte(&pte_p[n*PTE_STRIDE], pte);
/* Job is done for this CPU and any CPU which shares this page */
- ACCESS_ONCE(espfix_pages[page]) = stack_page;
+ WRITE_ONCE(espfix_pages[page], stack_page);
unlock_done:
mutex_unlock(&espfix_init_mutex);
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 7affb7e3d9a5..6abd83572b01 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -249,6 +249,10 @@ static void __init fpu__init_system_ctx_switch(void)
*/
static void __init fpu__init_parse_early_param(void)
{
+ char arg[32];
+ char *argptr = arg;
+ int bit;
+
if (cmdline_find_option_bool(boot_command_line, "no387"))
setup_clear_cpu_cap(X86_FEATURE_FPU);
@@ -266,6 +270,13 @@ static void __init fpu__init_parse_early_param(void)
if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+
+ if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
+ sizeof(arg)) &&
+ get_option(&argptr, &bit) &&
+ bit >= 0 &&
+ bit < NCAPINTS * 32)
+ setup_clear_cpu_cap(bit);
}
/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index f1d5476c9022..87a57b7642d3 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -15,6 +15,7 @@
#include <asm/fpu/xstate.h>
#include <asm/tlbflush.h>
+#include <asm/cpufeature.h>
/*
* Although we spell it out in here, the Processor Trace
@@ -36,6 +37,19 @@ static const char *xfeature_names[] =
"unknown xstate feature" ,
};
+static short xsave_cpuid_features[] __initdata = {
+ X86_FEATURE_FPU,
+ X86_FEATURE_XMM,
+ X86_FEATURE_AVX,
+ X86_FEATURE_MPX,
+ X86_FEATURE_MPX,
+ X86_FEATURE_AVX512F,
+ X86_FEATURE_AVX512F,
+ X86_FEATURE_AVX512F,
+ X86_FEATURE_INTEL_PT,
+ X86_FEATURE_PKU,
+};
+
/*
* Mask of xstate features supported by the CPU and the kernel:
*/
@@ -59,26 +73,6 @@ unsigned int fpu_user_xstate_size;
void fpu__xstate_clear_all_cpu_caps(void)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
- setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
- setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
- setup_clear_cpu_cap(X86_FEATURE_XSAVES);
- setup_clear_cpu_cap(X86_FEATURE_AVX);
- setup_clear_cpu_cap(X86_FEATURE_AVX2);
- setup_clear_cpu_cap(X86_FEATURE_AVX512F);
- setup_clear_cpu_cap(X86_FEATURE_AVX512IFMA);
- setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
- setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
- setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
- setup_clear_cpu_cap(X86_FEATURE_AVX512DQ);
- setup_clear_cpu_cap(X86_FEATURE_AVX512BW);
- setup_clear_cpu_cap(X86_FEATURE_AVX512VL);
- setup_clear_cpu_cap(X86_FEATURE_MPX);
- setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
- setup_clear_cpu_cap(X86_FEATURE_AVX512VBMI);
- setup_clear_cpu_cap(X86_FEATURE_PKU);
- setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
- setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
- setup_clear_cpu_cap(X86_FEATURE_AVX512_VPOPCNTDQ);
}
/*
@@ -726,6 +720,7 @@ void __init fpu__init_system_xstate(void)
unsigned int eax, ebx, ecx, edx;
static int on_boot_cpu __initdata = 1;
int err;
+ int i;
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
@@ -759,6 +754,14 @@ void __init fpu__init_system_xstate(void)
goto out_disable;
}
+ /*
+ * Clear XSAVE features that are disabled in the normal CPUID.
+ */
+ for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
+ if (!boot_cpu_has(xsave_cpuid_features[i]))
+ xfeatures_mask &= ~BIT(i);
+ }
+
xfeatures_mask &= fpu__get_supported_xfeatures_mask();
/* Enable xstate instructions to be able to continue with initialization: */
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index f1d528bb66a6..c29020907886 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -212,9 +212,6 @@ ENTRY(startup_32_smp)
#endif
.Ldefault_entry:
-#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
- X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
- X86_CR0_PG)
movl $(CR0_STATE & ~X86_CR0_PG),%eax
movl %eax,%cr0
@@ -402,7 +399,7 @@ ENTRY(early_idt_handler_array)
# 24(%rsp) error code
i = 0
.rept NUM_EXCEPTION_VECTORS
- .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
+ .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
pushl $0 # Dummy error code, to make stack frame uniform
.endif
pushl $i # 20(%esp) Vector number
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 6dde3f3fc1f8..7dca675fe78d 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -38,11 +38,12 @@
*
*/
-#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
+#endif
L3_START_KERNEL = pud_index(__START_KERNEL_map)
.text
@@ -50,6 +51,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
.code64
.globl startup_64
startup_64:
+ UNWIND_HINT_EMPTY
/*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded an identity mapped page table
@@ -89,6 +91,7 @@ startup_64:
addq $(early_top_pgt - __START_KERNEL_map), %rax
jmp 1f
ENTRY(secondary_startup_64)
+ UNWIND_HINT_EMPTY
/*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded a mapped page table.
@@ -133,6 +136,7 @@ ENTRY(secondary_startup_64)
movq $1f, %rax
jmp *%rax
1:
+ UNWIND_HINT_EMPTY
/* Check if nx is implemented */
movl $0x80000001, %eax
@@ -150,9 +154,6 @@ ENTRY(secondary_startup_64)
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
- X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
- X86_CR0_PG)
movl $CR0_STATE, %eax
/* Make changes effective */
movq %rax, %cr0
@@ -235,7 +236,7 @@ ENTRY(secondary_startup_64)
pushq %rax # target address in negative space
lretq
.Lafter_lret:
-ENDPROC(secondary_startup_64)
+END(secondary_startup_64)
#include "verify_cpu.S"
@@ -247,6 +248,7 @@ ENDPROC(secondary_startup_64)
*/
ENTRY(start_cpu0)
movq initial_stack(%rip), %rsp
+ UNWIND_HINT_EMPTY
jmp .Ljump_to_C_code
ENDPROC(start_cpu0)
#endif
@@ -266,26 +268,24 @@ ENDPROC(start_cpu0)
.quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
__FINITDATA
-bad_address:
- jmp bad_address
-
__INIT
ENTRY(early_idt_handler_array)
- # 104(%rsp) %rflags
- # 96(%rsp) %cs
- # 88(%rsp) %rip
- # 80(%rsp) error code
i = 0
.rept NUM_EXCEPTION_VECTORS
- .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
- pushq $0 # Dummy error code, to make stack frame uniform
+ .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
+ UNWIND_HINT_IRET_REGS
+ pushq $0 # Dummy error code, to make stack frame uniform
+ .else
+ UNWIND_HINT_IRET_REGS offset=8
.endif
pushq $i # 72(%rsp) Vector number
jmp early_idt_handler_common
+ UNWIND_HINT_IRET_REGS
i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr
-ENDPROC(early_idt_handler_array)
+ UNWIND_HINT_IRET_REGS offset=16
+END(early_idt_handler_array)
early_idt_handler_common:
/*
@@ -313,6 +313,7 @@ early_idt_handler_common:
pushq %r13 /* pt_regs->r13 */
pushq %r14 /* pt_regs->r14 */
pushq %r15 /* pt_regs->r15 */
+ UNWIND_HINT_REGS
cmpq $14,%rsi /* Page fault? */
jnz 10f
@@ -327,8 +328,8 @@ early_idt_handler_common:
20:
decl early_recursion_flag(%rip)
- jmp restore_regs_and_iret
-ENDPROC(early_idt_handler_common)
+ jmp restore_regs_and_return_to_kernel
+END(early_idt_handler_common)
__INITDATA
@@ -362,10 +363,7 @@ NEXT_PAGE(early_dynamic_pgts)
.data
-#ifndef CONFIG_XEN
-NEXT_PAGE(init_top_pgt)
- .fill 512,8,0
-#else
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
NEXT_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + PGD_PAGE_OFFSET*8, 0
@@ -382,6 +380,9 @@ NEXT_PAGE(level2_ident_pgt)
* Don't set NX because code runs from these pages.
*/
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
+#else
+NEXT_PAGE(init_top_pgt)
+ .fill 512,8,0
#endif
#ifdef CONFIG_X86_5LEVEL
@@ -435,7 +436,7 @@ ENTRY(phys_base)
EXPORT_SYMBOL(phys_base)
#include "../../x86/xen/xen-head.S"
-
+
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 8f5cb2c7060c..86c4439f9d74 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -114,6 +114,7 @@ static void make_8259A_irq(unsigned int irq)
io_apic_irqs &= ~(1<<irq);
irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
enable_irq(irq);
+ lapic_assign_legacy_vector(irq, true);
}
/*
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 6107ee1cb8d5..d985cef3984f 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -92,8 +92,6 @@ static const __initdata struct idt_data def_idts[] = {
INTG(X86_TRAP_DF, double_fault),
#endif
INTG(X86_TRAP_DB, debug),
- INTG(X86_TRAP_NMI, nmi),
- INTG(X86_TRAP_BP, int3),
#ifdef CONFIG_X86_MCE
INTG(X86_TRAP_MC, &machine_check),
@@ -225,7 +223,7 @@ idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sy
idt_init_desc(&desc, t);
write_idt_entry(idt, t->vector, &desc);
if (sys)
- set_bit(t->vector, used_vectors);
+ set_bit(t->vector, system_vectors);
}
}
@@ -313,14 +311,14 @@ void __init idt_setup_apic_and_irq_gates(void)
idt_setup_from_table(idt_table, apic_idts, ARRAY_SIZE(apic_idts), true);
- for_each_clear_bit_from(i, used_vectors, FIRST_SYSTEM_VECTOR) {
+ for_each_clear_bit_from(i, system_vectors, FIRST_SYSTEM_VECTOR) {
entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
set_intr_gate(i, entry);
}
- for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
+ for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
#ifdef CONFIG_X86_LOCAL_APIC
- set_bit(i, used_vectors);
+ set_bit(i, system_vectors);
set_intr_gate(i, spurious_interrupt);
#else
entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
@@ -358,7 +356,7 @@ void idt_invalidate(void *addr)
void __init update_intr_gate(unsigned int n, const void *addr)
{
- if (WARN_ON_ONCE(!test_bit(n, used_vectors)))
+ if (WARN_ON_ONCE(!test_bit(n, system_vectors)))
return;
set_intr_gate(n, addr);
}
@@ -366,6 +364,6 @@ void __init update_intr_gate(unsigned int n, const void *addr)
void alloc_intr_gate(unsigned int n, const void *addr)
{
BUG_ON(n < FIRST_SYSTEM_VECTOR);
- if (!test_and_set_bit(n, used_vectors))
+ if (!test_and_set_bit(n, system_vectors))
set_intr_gate(n, addr);
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 52089c043160..49cfd9fe7589 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -134,7 +134,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_puts(p, " Machine check polls\n");
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
- if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) {
+ if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
seq_printf(p, "%*s: ", prec, "HYP");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
@@ -333,105 +333,6 @@ __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
#ifdef CONFIG_HOTPLUG_CPU
-
-/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
- * below, which is protected by stop_machine(). Putting them on the stack
- * results in a stack frame overflow. Dynamically allocating could result in a
- * failure so declare these two cpumasks as global.
- */
-static struct cpumask affinity_new, online_new;
-
-/*
- * This cpu is going to be removed and its vectors migrated to the remaining
- * online cpus. Check to see if there are enough vectors in the remaining cpus.
- * This function is protected by stop_machine().
- */
-int check_irq_vectors_for_cpu_disable(void)
-{
- unsigned int this_cpu, vector, this_count, count;
- struct irq_desc *desc;
- struct irq_data *data;
- int cpu;
-
- this_cpu = smp_processor_id();
- cpumask_copy(&online_new, cpu_online_mask);
- cpumask_clear_cpu(this_cpu, &online_new);
-
- this_count = 0;
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- desc = __this_cpu_read(vector_irq[vector]);
- if (IS_ERR_OR_NULL(desc))
- continue;
- /*
- * Protect against concurrent action removal, affinity
- * changes etc.
- */
- raw_spin_lock(&desc->lock);
- data = irq_desc_get_irq_data(desc);
- cpumask_copy(&affinity_new,
- irq_data_get_affinity_mask(data));
- cpumask_clear_cpu(this_cpu, &affinity_new);
-
- /* Do not count inactive or per-cpu irqs. */
- if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
- raw_spin_unlock(&desc->lock);
- continue;
- }
-
- raw_spin_unlock(&desc->lock);
- /*
- * A single irq may be mapped to multiple cpu's
- * vector_irq[] (for example IOAPIC cluster mode). In
- * this case we have two possibilities:
- *
- * 1) the resulting affinity mask is empty; that is
- * this the down'd cpu is the last cpu in the irq's
- * affinity mask, or
- *
- * 2) the resulting affinity mask is no longer a
- * subset of the online cpus but the affinity mask is
- * not zero; that is the down'd cpu is the last online
- * cpu in a user set affinity mask.
- */
- if (cpumask_empty(&affinity_new) ||
- !cpumask_subset(&affinity_new, &online_new))
- this_count++;
- }
- /* No need to check any further. */
- if (!this_count)
- return 0;
-
- count = 0;
- for_each_online_cpu(cpu) {
- if (cpu == this_cpu)
- continue;
- /*
- * We scan from FIRST_EXTERNAL_VECTOR to first system
- * vector. If the vector is marked in the used vectors
- * bitmap or an irq is assigned to it, we don't count
- * it as available.
- *
- * As this is an inaccurate snapshot anyway, we can do
- * this w/o holding vector_lock.
- */
- for (vector = FIRST_EXTERNAL_VECTOR;
- vector < FIRST_SYSTEM_VECTOR; vector++) {
- if (!test_bit(vector, used_vectors) &&
- IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
- if (++count == this_count)
- return 0;
- }
- }
- }
-
- if (count < this_count) {
- pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
- this_cpu, this_count, count);
- return -ERANGE;
- }
- return 0;
-}
-
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 1e4094eba15e..8da3e909e967 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -61,9 +61,6 @@ void __init init_ISA_irqs(void)
struct irq_chip *chip = legacy_pic->chip;
int i;
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
- init_bsp_APIC();
-#endif
legacy_pic->init(0);
for (i = 0; i < nr_legacy_irqs(); i++)
@@ -94,6 +91,7 @@ void __init native_init_IRQ(void)
x86_init.irqs.pre_vector_init();
idt_setup_apic_and_irq_gates();
+ lapic_assign_system_vectors();
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
setup_irq(2, &irq2);
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index 615105cf7d58..ae38dccf0c8f 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -85,11 +85,11 @@ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
* Copy an instruction and adjust the displacement if the instruction
* uses the %rip-relative addressing mode.
*/
-extern int __copy_instruction(u8 *dest, u8 *src, struct insn *insn);
+extern int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn);
/* Generate a relative-jump/call instruction */
-extern void synthesize_reljump(void *from, void *to);
-extern void synthesize_relcall(void *from, void *to);
+extern void synthesize_reljump(void *dest, void *from, void *to);
+extern void synthesize_relcall(void *dest, void *from, void *to);
#ifdef CONFIG_OPTPROBES
extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 0742491cbb73..bd36f3c33cd0 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -119,29 +119,29 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
static nokprobe_inline void
-__synthesize_relative_insn(void *from, void *to, u8 op)
+__synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
{
struct __arch_relative_insn {
u8 op;
s32 raddr;
} __packed *insn;
- insn = (struct __arch_relative_insn *)from;
+ insn = (struct __arch_relative_insn *)dest;
insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
insn->op = op;
}
/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-void synthesize_reljump(void *from, void *to)
+void synthesize_reljump(void *dest, void *from, void *to)
{
- __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
+ __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE);
}
NOKPROBE_SYMBOL(synthesize_reljump);
/* Insert a call instruction at address 'from', which calls address 'to'.*/
-void synthesize_relcall(void *from, void *to)
+void synthesize_relcall(void *dest, void *from, void *to)
{
- __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
+ __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE);
}
NOKPROBE_SYMBOL(synthesize_relcall);
@@ -346,10 +346,11 @@ static int is_IF_modifier(kprobe_opcode_t *insn)
/*
* Copy an instruction with recovering modified instruction by kprobes
* and adjust the displacement if the instruction uses the %rip-relative
- * addressing mode.
+ * addressing mode. Note that since @real will be the final place of copied
+ * instruction, displacement must be adjust by @real, not @dest.
* This returns the length of copied instruction, or 0 if it has an error.
*/
-int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
+int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
{
kprobe_opcode_t buf[MAX_INSN_SIZE];
unsigned long recovered_insn =
@@ -387,11 +388,11 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
* have given.
*/
newdisp = (u8 *) src + (s64) insn->displacement.value
- - (u8 *) dest;
+ - (u8 *) real;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
- src, dest, insn->displacement.value);
+ src, real, insn->displacement.value);
return 0;
}
disp = (u8 *) dest + insn_offset_displacement(insn);
@@ -402,20 +403,38 @@ int __copy_instruction(u8 *dest, u8 *src, struct insn *insn)
}
/* Prepare reljump right after instruction to boost */
-static void prepare_boost(struct kprobe *p, struct insn *insn)
+static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
+ struct insn *insn)
{
+ int len = insn->length;
+
if (can_boost(insn, p->addr) &&
- MAX_INSN_SIZE - insn->length >= RELATIVEJUMP_SIZE) {
+ MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) {
/*
* These instructions can be executed directly if it
* jumps back to correct address.
*/
- synthesize_reljump(p->ainsn.insn + insn->length,
+ synthesize_reljump(buf + len, p->ainsn.insn + len,
p->addr + insn->length);
+ len += RELATIVEJUMP_SIZE;
p->ainsn.boostable = true;
} else {
p->ainsn.boostable = false;
}
+
+ return len;
+}
+
+/* Make page to RO mode when allocate it */
+void *alloc_insn_page(void)
+{
+ void *page;
+
+ page = module_alloc(PAGE_SIZE);
+ if (page)
+ set_memory_ro((unsigned long)page & PAGE_MASK, 1);
+
+ return page;
}
/* Recover page to RW mode before releasing it */
@@ -429,12 +448,11 @@ void free_insn_page(void *page)
static int arch_copy_kprobe(struct kprobe *p)
{
struct insn insn;
+ kprobe_opcode_t buf[MAX_INSN_SIZE];
int len;
- set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
-
/* Copy an instruction with recovering if other optprobe modifies it.*/
- len = __copy_instruction(p->ainsn.insn, p->addr, &insn);
+ len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
if (!len)
return -EINVAL;
@@ -442,15 +460,16 @@ static int arch_copy_kprobe(struct kprobe *p)
* __copy_instruction can modify the displacement of the instruction,
* but it doesn't affect boostable check.
*/
- prepare_boost(p, &insn);
-
- set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
+ len = prepare_boost(buf, p, &insn);
/* Check whether the instruction modifies Interrupt Flag or not */
- p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
+ p->ainsn.if_modifier = is_IF_modifier(buf);
/* Also, displacement change doesn't affect the first byte */
- p->opcode = p->ainsn.insn[0];
+ p->opcode = buf[0];
+
+ /* OK, write back the instruction(s) into ROX insn buffer */
+ text_poke(p->ainsn.insn, buf, len);
return 0;
}
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 041f7b6dfa0f..8dc0161cec8f 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -26,7 +26,7 @@
#include "common.h"
static nokprobe_inline
-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, unsigned long orig_ip)
{
/*
@@ -41,33 +41,31 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, NULL);
if (orig_ip)
regs->ip = orig_ip;
- return 1;
}
int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- if (kprobe_ftrace(p))
- return __skip_singlestep(p, regs, kcb, 0);
- else
- return 0;
+ if (kprobe_ftrace(p)) {
+ __skip_singlestep(p, regs, kcb, 0);
+ preempt_enable_no_resched();
+ return 1;
+ }
+ return 0;
}
NOKPROBE_SYMBOL(skip_singlestep);
-/* Ftrace callback handler for kprobes */
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
- unsigned long flags;
-
- /* Disable irq for emulating a breakpoint and avoiding preempt */
- local_irq_save(flags);
+ /* Preempt is disabled by ftrace */
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
- goto end;
+ return;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
@@ -77,17 +75,19 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t);
+ /* To emulate trap based kprobes, preempt_disable here */
+ preempt_disable();
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, regs))
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
__skip_singlestep(p, regs, kcb, orig_ip);
+ preempt_enable_no_resched();
+ }
/*
* If pre_handler returns !0, it sets regs->ip and
- * resets current kprobe.
+ * resets current kprobe, and keep preempt count +1.
*/
}
-end:
- local_irq_restore(flags);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 4f98aad38237..e941136e24d8 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -142,11 +142,11 @@ void optprobe_template_func(void);
STACK_FRAME_NON_STANDARD(optprobe_template_func);
#define TMPL_MOVE_IDX \
- ((long)&optprobe_template_val - (long)&optprobe_template_entry)
+ ((long)optprobe_template_val - (long)optprobe_template_entry)
#define TMPL_CALL_IDX \
- ((long)&optprobe_template_call - (long)&optprobe_template_entry)
+ ((long)optprobe_template_call - (long)optprobe_template_entry)
#define TMPL_END_IDX \
- ((long)&optprobe_template_end - (long)&optprobe_template_entry)
+ ((long)optprobe_template_end - (long)optprobe_template_entry)
#define INT3_SIZE sizeof(kprobe_opcode_t)
@@ -154,17 +154,15 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func);
static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long flags;
-
/* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(&op->kp))
return;
- local_irq_save(flags);
+ preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp);
} else {
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
/* Save skipped registers */
#ifdef CONFIG_X86_64
regs->cs = __KERNEL_CS;
@@ -180,17 +178,17 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
- local_irq_restore(flags);
+ preempt_enable_no_resched();
}
NOKPROBE_SYMBOL(optimized_callback);
-static int copy_optimized_instructions(u8 *dest, u8 *src)
+static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
{
struct insn insn;
int len = 0, ret;
while (len < RELATIVEJUMP_SIZE) {
- ret = __copy_instruction(dest + len, src + len, &insn);
+ ret = __copy_instruction(dest + len, src + len, real, &insn);
if (!ret || !can_boost(&insn, src + len))
return -EINVAL;
len += ret;
@@ -343,57 +341,66 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
struct kprobe *__unused)
{
- u8 *buf;
- int ret;
+ u8 *buf = NULL, *slot;
+ int ret, len;
long rel;
if (!can_optimize((unsigned long)op->kp.addr))
return -EILSEQ;
- op->optinsn.insn = get_optinsn_slot();
- if (!op->optinsn.insn)
+ buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
+ op->optinsn.insn = slot = get_optinsn_slot();
+ if (!slot) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/*
* Verify if the address gap is in 2GB range, because this uses
* a relative jump.
*/
- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
+ rel = (long)slot - (long)op->kp.addr + RELATIVEJUMP_SIZE;
if (abs(rel) > 0x7fffffff) {
- __arch_remove_optimized_kprobe(op, 0);
- return -ERANGE;
+ ret = -ERANGE;
+ goto err;
}
- buf = (u8 *)op->optinsn.insn;
- set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
+ /* Copy arch-dep-instance from template */
+ memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
/* Copy instructions into the out-of-line buffer */
- ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
- if (ret < 0) {
- __arch_remove_optimized_kprobe(op, 0);
- return ret;
- }
+ ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
+ slot + TMPL_END_IDX);
+ if (ret < 0)
+ goto err;
op->optinsn.size = ret;
-
- /* Copy arch-dep-instance from template */
- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
+ len = TMPL_END_IDX + op->optinsn.size;
/* Set probe information */
synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
/* Set probe function call */
- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
+ synthesize_relcall(buf + TMPL_CALL_IDX,
+ slot + TMPL_CALL_IDX, optimized_callback);
/* Set returning jmp instruction at the tail of out-of-line buffer */
- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+ synthesize_reljump(buf + len, slot + len,
(u8 *)op->kp.addr + op->optinsn.size);
-
- set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
-
- flush_icache_range((unsigned long) buf,
- (unsigned long) buf + TMPL_END_IDX +
- op->optinsn.size + RELATIVEJUMP_SIZE);
- return 0;
+ len += RELATIVEJUMP_SIZE;
+
+ /* We have to use text_poke for instuction buffer because it is RO */
+ text_poke(slot, buf, len);
+ ret = 0;
+out:
+ kfree(buf);
+ return ret;
+
+err:
+ __arch_remove_optimized_kprobe(op, 0);
+ goto out;
}
/*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 8bb9594d0761..b40ffbf156c1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -75,8 +75,8 @@ static int parse_no_kvmclock_vsyscall(char *arg)
early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
-static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
-static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
+static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
+static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
/*
@@ -312,7 +312,7 @@ static void kvm_register_steal_time(void)
cpu, (unsigned long long) slow_virt_to_phys(st));
}
-static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
+static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
{
@@ -426,9 +426,42 @@ void kvm_disable_steal_time(void)
wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}
+static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
+{
+ early_set_memory_decrypted((unsigned long) ptr, size);
+}
+
+/*
+ * Iterate through all possible CPUs and map the memory region pointed
+ * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
+ *
+ * Note: we iterate through all possible CPUs to ensure that CPUs
+ * hotplugged will have their per-cpu variable already mapped as
+ * decrypted.
+ */
+static void __init sev_map_percpu_data(void)
+{
+ int cpu;
+
+ if (!sev_active())
+ return;
+
+ for_each_possible_cpu(cpu) {
+ __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
+ __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
+ __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
+ }
+}
+
#ifdef CONFIG_SMP
static void __init kvm_smp_prepare_boot_cpu(void)
{
+ /*
+ * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
+ * shares the guest physical address with the hypervisor.
+ */
+ sev_map_percpu_data();
+
kvm_guest_cpu_init();
native_smp_prepare_boot_cpu();
kvm_spinlock_init();
@@ -465,7 +498,7 @@ static void __init kvm_apf_trap_init(void)
update_intr_gate(X86_TRAP_PF, async_page_fault);
}
-void __init kvm_guest_init(void)
+static void __init kvm_guest_init(void)
{
int i;
@@ -496,6 +529,7 @@ void __init kvm_guest_init(void)
kvm_cpu_online, kvm_cpu_down_prepare) < 0)
pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
#else
+ sev_map_percpu_data();
kvm_guest_cpu_init();
#endif
@@ -544,12 +578,13 @@ static uint32_t __init kvm_detect(void)
return kvm_cpuid_base();
}
-const struct hypervisor_x86 x86_hyper_kvm __refconst = {
+const __initconst struct hypervisor_x86 x86_hyper_kvm = {
.name = "KVM",
.detect = kvm_detect,
- .x2apic_available = kvm_para_available,
+ .type = X86_HYPER_KVM,
+ .init.guest_late_init = kvm_guest_init,
+ .init.x2apic_available = kvm_para_available,
};
-EXPORT_SYMBOL_GPL(x86_hyper_kvm);
static __init int activate_jump_labels(void)
{
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 5b609e28ce3f..77b492c2d658 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <asm/mem_encrypt.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/kvmclock.h>
@@ -45,7 +46,7 @@ early_param("no-kvmclock", parse_no_kvmclock);
/* The hypervisor will put information about time periodically here */
static struct pvclock_vsyscall_time_info *hv_clock;
-static struct pvclock_wall_clock wall_clock;
+static struct pvclock_wall_clock *wall_clock;
struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
{
@@ -64,15 +65,15 @@ static void kvm_get_wallclock(struct timespec *now)
int low, high;
int cpu;
- low = (int)__pa_symbol(&wall_clock);
- high = ((u64)__pa_symbol(&wall_clock) >> 32);
+ low = (int)slow_virt_to_phys(wall_clock);
+ high = ((u64)slow_virt_to_phys(wall_clock) >> 32);
native_write_msr(msr_kvm_wall_clock, low, high);
cpu = get_cpu();
vcpu_time = &hv_clock[cpu].pvti;
- pvclock_read_wallclock(&wall_clock, vcpu_time, now);
+ pvclock_read_wallclock(wall_clock, vcpu_time, now);
put_cpu();
}
@@ -249,11 +250,39 @@ static void kvm_shutdown(void)
native_machine_shutdown();
}
+static phys_addr_t __init kvm_memblock_alloc(phys_addr_t size,
+ phys_addr_t align)
+{
+ phys_addr_t mem;
+
+ mem = memblock_alloc(size, align);
+ if (!mem)
+ return 0;
+
+ if (sev_active()) {
+ if (early_set_memory_decrypted((unsigned long)__va(mem), size))
+ goto e_free;
+ }
+
+ return mem;
+e_free:
+ memblock_free(mem, size);
+ return 0;
+}
+
+static void __init kvm_memblock_free(phys_addr_t addr, phys_addr_t size)
+{
+ if (sev_active())
+ early_set_memory_encrypted((unsigned long)__va(addr), size);
+
+ memblock_free(addr, size);
+}
+
void __init kvmclock_init(void)
{
struct pvclock_vcpu_time_info *vcpu_time;
- unsigned long mem;
- int size, cpu;
+ unsigned long mem, mem_wall_clock;
+ int size, cpu, wall_clock_size;
u8 flags;
size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
@@ -267,21 +296,35 @@ void __init kvmclock_init(void)
} else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
return;
- printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
- msr_kvm_system_time, msr_kvm_wall_clock);
+ wall_clock_size = PAGE_ALIGN(sizeof(struct pvclock_wall_clock));
+ mem_wall_clock = kvm_memblock_alloc(wall_clock_size, PAGE_SIZE);
+ if (!mem_wall_clock)
+ return;
- mem = memblock_alloc(size, PAGE_SIZE);
- if (!mem)
+ wall_clock = __va(mem_wall_clock);
+ memset(wall_clock, 0, wall_clock_size);
+
+ mem = kvm_memblock_alloc(size, PAGE_SIZE);
+ if (!mem) {
+ kvm_memblock_free(mem_wall_clock, wall_clock_size);
+ wall_clock = NULL;
return;
+ }
+
hv_clock = __va(mem);
memset(hv_clock, 0, size);
if (kvm_register_clock("primary cpu clock")) {
hv_clock = NULL;
- memblock_free(mem, size);
+ kvm_memblock_free(mem, size);
+ kvm_memblock_free(mem_wall_clock, wall_clock_size);
+ wall_clock = NULL;
return;
}
+ printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+ msr_kvm_system_time, msr_kvm_wall_clock);
+
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 4d17bacf4030..1c1eae961340 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -13,6 +13,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -102,7 +103,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
static void install_ldt(struct mm_struct *current_mm,
struct ldt_struct *ldt)
{
- /* Synchronizes with lockless_dereference in load_mm_ldt. */
+ /* Synchronizes with READ_ONCE in load_mm_ldt. */
smp_store_release(&current_mm->context.ldt, ldt);
/* Activate the LDT for all CPUs using current_mm. */
@@ -295,8 +296,8 @@ out:
return error;
}
-asmlinkage int sys_modify_ldt(int func, void __user *ptr,
- unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+ unsigned long , bytecount)
{
int ret = -ENOSYS;
@@ -314,5 +315,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
ret = write_ldt(ptr, bytecount, 0);
break;
}
- return ret;
+ /*
+ * The SYSCALL_DEFINE() macros give us an 'unsigned long'
+ * return type, but tht ABI for sys_modify_ldt() expects
+ * 'int'. This cast gives us an int-sized value in %rax
+ * for the return code. The 'unsigned' is necessary so
+ * the compiler does not try to sign-extend the negative
+ * return codes into the high half of the register when
+ * taking the value from int->long.
+ */
+ return (unsigned int)ret;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 35aafc95e4b8..18bc9b51ac9b 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w)
{
struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
int remainder_ns, decimal_msecs;
- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
+ u64 whole_msecs = READ_ONCE(a->max_duration);
remainder_ns = do_div(whole_msecs, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 19a3e8f961c7..041096bdef86 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -115,8 +115,18 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
return 5;
}
-/* Neat trick to map patch type back to the call within the
- * corresponding structure. */
+DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
+
+void __init native_pv_lock_init(void)
+{
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ static_branch_disable(&virt_spin_lock_key);
+}
+
+/*
+ * Neat trick to map patch type back to the call within the
+ * corresponding structure.
+ */
static void *get_call_destination(u8 type)
{
struct paravirt_patch_template tmpl = {
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 3fe690067802..6b07faaa1579 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -7,7 +7,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
-static int found(u64 start, u64 end, void *data)
+static int found(struct resource *res, void *data)
{
return 1;
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c67685337c5a..97fb3e5737f5 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -49,7 +49,13 @@
*/
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
.x86_tss = {
- .sp0 = TOP_OF_INIT_STACK,
+ /*
+ * .sp0 is only used when entering ring 0 from a lower
+ * privilege level. Since the init task never runs anything
+ * but ring 0 code, there is no need for a valid value here.
+ * Poison it.
+ */
+ .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
.ss1 = __KERNEL_CS,
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 11966251cd42..45bf0c5f93e1 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -284,9 +284,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Reload esp0 and cpu_current_top_of_stack. This changes
- * current_thread_info().
+ * current_thread_info(). Refresh the SYSENTER configuration in
+ * case prev or next is vm86.
*/
- load_sp0(tss, next);
+ update_sp0(next_p);
+ refresh_sysenter_cs(next);
this_cpu_write(cpu_current_top_of_stack,
(unsigned long)task_stack_page(next_p) +
THREAD_SIZE);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 302e7b2572d1..eeeb34f85c25 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -274,7 +274,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct inactive_task_frame *frame;
struct task_struct *me = current;
- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
@@ -464,8 +463,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
this_cpu_write(current_task, next_p);
- /* Reload esp0 and ss1. This changes current_thread_info(). */
- load_sp0(tss, next);
+ /* Reload sp0. */
+ update_sp0(next_p);
/*
* Now maybe reload the debug registers and handle I/O bitmaps
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0957dd73d127..8af2e8d0c0a1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -136,18 +136,6 @@ RESERVE_BRK(dmi_alloc, 65536);
static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
unsigned long _brk_end = (unsigned long)__brk_base;
-#ifdef CONFIG_X86_64
-int default_cpu_present_to_apicid(int mps_cpu)
-{
- return __default_cpu_present_to_apicid(mps_cpu);
-}
-
-int default_check_phys_apicid_present(int phys_apicid)
-{
- return __default_check_phys_apicid_present(phys_apicid);
-}
-#endif
-
struct boot_params boot_params;
/*
@@ -380,9 +368,11 @@ static void __init reserve_initrd(void)
* If SME is active, this memory will be marked encrypted by the
* kernel when it is accessed (including relocation). However, the
* ramdisk image was loaded decrypted by the bootloader, so make
- * sure that it is encrypted before accessing it.
+ * sure that it is encrypted before accessing it. For SEV the
+ * ramdisk will already be encrypted, so only do this for SME.
*/
- sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+ if (sme_active())
+ sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
initrd_start = 0;
@@ -822,26 +812,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}
-static void __init simple_udelay_calibration(void)
-{
- unsigned int tsc_khz, cpu_khz;
- unsigned long lpj;
-
- if (!boot_cpu_has(X86_FEATURE_TSC))
- return;
-
- cpu_khz = x86_platform.calibrate_cpu();
- tsc_khz = x86_platform.calibrate_tsc();
-
- tsc_khz = tsc_khz ? : cpu_khz;
- if (!tsc_khz)
- return;
-
- lpj = tsc_khz * 1000;
- do_div(lpj, HZ);
- loops_per_jiffy = lpj;
-}
-
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -1045,12 +1015,10 @@ void __init setup_arch(char **cmdline_p)
/*
* VMware detection requires dmi to be available, so this
- * needs to be done after dmi_scan_machine, for the BP.
+ * needs to be done after dmi_scan_machine(), for the boot CPU.
*/
init_hypervisor_platform();
- simple_udelay_calibration();
-
x86_init.resources.probe_roms();
/* after parse_early_param, so could debug it */
@@ -1135,9 +1103,6 @@ void __init setup_arch(char **cmdline_p)
memblock_set_current_limit(ISA_END_ADDRESS);
e820__memblock_setup();
- if (!early_xdbc_setup_hardware())
- early_xdbc_register_console();
-
reserve_bios_regions();
if (efi_enabled(EFI_MEMMAP)) {
@@ -1243,6 +1208,10 @@ void __init setup_arch(char **cmdline_p)
kvmclock_init();
#endif
+ tsc_early_delay_calibrate();
+ if (!early_xdbc_setup_hardware())
+ early_xdbc_register_console();
+
x86_init.paging.pagetable_init();
kasan_init();
@@ -1294,7 +1263,7 @@ void __init setup_arch(char **cmdline_p)
io_apic_init_mappings();
- kvm_guest_init();
+ x86_init.hyper.guest_late_init();
e820__reserve_resources();
e820__register_nosave_regions(max_low_pfn);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ad59edd84de7..5f59e6bee123 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,6 +77,7 @@
#include <asm/i8259.h>
#include <asm/realmode.h>
#include <asm/misc.h>
+#include <asm/qspinlock.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -194,6 +195,12 @@ static void smp_callin(void)
smp_store_cpu_info(cpuid);
/*
+ * The topology information must be up to date before
+ * calibrate_delay() and notify_cpu_starting().
+ */
+ set_cpu_sibling_map(raw_smp_processor_id());
+
+ /*
* Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to
* smp_store_cpu_info() stored a value that is close but not as
@@ -203,11 +210,6 @@ static void smp_callin(void)
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid);
- /*
- * This must be done before setting cpu_online_mask
- * or calling notify_cpu_starting.
- */
- set_cpu_sibling_map(raw_smp_processor_id());
wmb();
notify_cpu_starting(cpuid);
@@ -249,19 +251,19 @@ static void notrace start_secondary(void *unused)
/* otherwise gcc will move up smp_processor_id before the cpu_init */
barrier();
/*
- * Check TSC synchronization with the BP:
+ * Check TSC synchronization with the boot CPU:
*/
check_tsc_sync_target();
/*
- * Lock vector_lock and initialize the vectors on this cpu
- * before setting the cpu online. We must set it online with
- * vector_lock held to prevent a concurrent setup/teardown
- * from seeing a half valid vector space.
+ * Lock vector_lock, set CPU online and bring the vector
+ * allocator online. Online must be set with vector_lock held
+ * to prevent a concurrent irq setup/teardown from seeing a
+ * half valid vector space.
*/
lock_vector_lock();
- setup_vector_irq(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
+ lapic_online();
unlock_vector_lock();
cpu_set_state_online(smp_processor_id());
x86_platform.nmi_init();
@@ -961,8 +963,7 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
#ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu);
- per_cpu(cpu_current_top_of_stack, cpu) =
- (unsigned long)task_stack_page(idle) + THREAD_SIZE;
+ per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
#else
initial_gs = per_cpu_offset(cpu);
#endif
@@ -1094,7 +1095,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
unsigned long flags;
int err, ret = 0;
- WARN_ON(irqs_disabled());
+ lockdep_assert_irqs_enabled();
pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
@@ -1190,17 +1191,10 @@ static __init void disable_smp(void)
cpumask_set_cpu(0, topology_core_cpumask(0));
}
-enum {
- SMP_OK,
- SMP_NO_CONFIG,
- SMP_NO_APIC,
- SMP_FORCE_UP,
-};
-
/*
* Various sanity checks.
*/
-static int __init smp_sanity_check(unsigned max_cpus)
+static void __init smp_sanity_check(void)
{
preempt_disable();
@@ -1238,16 +1232,6 @@ static int __init smp_sanity_check(unsigned max_cpus)
}
/*
- * If we couldn't find an SMP configuration at boot time,
- * get out of here now!
- */
- if (!smp_found_config && !acpi_lapic) {
- preempt_enable();
- pr_notice("SMP motherboard not detected\n");
- return SMP_NO_CONFIG;
- }
-
- /*
* Should not be necessary because the MP table should list the boot
* CPU too, but we do it for the sake of robustness anyway.
*/
@@ -1257,29 +1241,6 @@ static int __init smp_sanity_check(unsigned max_cpus)
physid_set(hard_smp_processor_id(), phys_cpu_present_map);
}
preempt_enable();
-
- /*
- * If we couldn't find a local APIC, then get out of here now!
- */
- if (APIC_INTEGRATED(boot_cpu_apic_version) &&
- !boot_cpu_has(X86_FEATURE_APIC)) {
- if (!disable_apic) {
- pr_err("BIOS bug, local APIC #%d not detected!...\n",
- boot_cpu_physical_apicid);
- pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
- }
- return SMP_NO_APIC;
- }
-
- /*
- * If SMP should be disabled, then really disable it!
- */
- if (!max_cpus) {
- pr_info("SMP mode deactivated\n");
- return SMP_FORCE_UP;
- }
-
- return SMP_OK;
}
static void __init smp_cpu_index_default(void)
@@ -1294,9 +1255,18 @@ static void __init smp_cpu_index_default(void)
}
}
+static void __init smp_get_logical_apicid(void)
+{
+ if (x2apic_mode)
+ cpu0_logical_apicid = apic_read(APIC_LDR);
+ else
+ cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
+}
+
/*
- * Prepare for SMP bootup. The MP table or ACPI has been read
- * earlier. Just do some sanity checking here and enable APIC mode.
+ * Prepare for SMP bootup.
+ * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
+ * for common interface support.
*/
void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
@@ -1328,35 +1298,33 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
set_cpu_sibling_map(0);
- switch (smp_sanity_check(max_cpus)) {
- case SMP_NO_CONFIG:
- disable_smp();
- if (APIC_init_uniprocessor())
- pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
- return;
- case SMP_NO_APIC:
+ smp_sanity_check();
+
+ switch (apic_intr_mode) {
+ case APIC_PIC:
+ case APIC_VIRTUAL_WIRE_NO_CONFIG:
disable_smp();
return;
- case SMP_FORCE_UP:
+ case APIC_SYMMETRIC_IO_NO_ROUTING:
disable_smp();
- apic_bsp_setup(false);
+ /* Setup local timer */
+ x86_init.timers.setup_percpu_clockev();
return;
- case SMP_OK:
+ case APIC_VIRTUAL_WIRE:
+ case APIC_SYMMETRIC_IO:
break;
}
- if (read_apic_id() != boot_cpu_physical_apicid) {
- panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
- read_apic_id(), boot_cpu_physical_apicid);
- /* Or can we switch back to PIC here? */
- }
+ /* Setup local timer */
+ x86_init.timers.setup_percpu_clockev();
- default_setup_apic_routing();
- cpu0_logical_apicid = apic_bsp_setup(false);
+ smp_get_logical_apicid();
pr_info("CPU0: ");
print_cpu_info(&cpu_data(0));
+ native_pv_lock_init();
+
uv_system_init();
set_mtrr_aps_delayed_init();
@@ -1395,7 +1363,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
nmi_selftest();
impress_friends();
- setup_ioapic_dest();
mtrr_aps_init();
}
@@ -1554,13 +1521,14 @@ void cpu_disable_common(void)
remove_cpu_from_maps(cpu);
unlock_vector_lock();
fixup_irqs();
+ lapic_offline();
}
int native_cpu_disable(void)
{
int ret;
- ret = check_irq_vectors_for_cpu_disable();
+ ret = lapic_can_unplug_cpu();
if (ret)
return ret;
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 8dabd7bf1673..77835bc021c7 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -30,7 +30,7 @@ static int save_stack_address(struct stack_trace *trace, unsigned long addr,
return 0;
}
-static void __save_stack_trace(struct stack_trace *trace,
+static void noinline __save_stack_trace(struct stack_trace *trace,
struct task_struct *task, struct pt_regs *regs,
bool nosched)
{
@@ -56,6 +56,7 @@ static void __save_stack_trace(struct stack_trace *trace,
*/
void save_stack_trace(struct stack_trace *trace)
{
+ trace->skip++;
__save_stack_trace(trace, current, NULL, false);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
@@ -70,6 +71,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
if (!try_get_task_stack(tsk))
return;
+ if (tsk == current)
+ trace->skip++;
__save_stack_trace(trace, tsk, NULL, true);
put_task_stack(tsk);
@@ -88,8 +91,9 @@ EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
} \
})
-static int __save_stack_trace_reliable(struct stack_trace *trace,
- struct task_struct *task)
+static int __always_inline
+__save_stack_trace_reliable(struct stack_trace *trace,
+ struct task_struct *task)
{
struct unwind_state state;
struct pt_regs *regs;
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 879af864d99a..749d189f8cd4 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -85,6 +85,11 @@ void __init hpet_time_init(void)
static __init void x86_late_time_init(void)
{
x86_init.timers.timer_init();
+ /*
+ * After PIT/HPET timers init, select and setup
+ * the final interrupt mode for delivering IRQs.
+ */
+ x86_init.irqs.intr_mode_init();
tsc_init();
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 67db4f43309e..989514c94a55 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -42,7 +42,6 @@
#include <linux/edac.h>
#endif
-#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
@@ -60,6 +59,7 @@
#include <asm/trace/mpx.h>
#include <asm/mpx.h>
#include <asm/vm86.h>
+#include <asm/umip.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -71,7 +71,7 @@
#include <asm/proto.h>
#endif
-DECLARE_BITMAP(used_vectors, NR_VECTORS);
+DECLARE_BITMAP(system_vectors, NR_VECTORS);
static inline void cond_local_irq_enable(struct pt_regs *regs)
{
@@ -141,8 +141,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
* will catch asm bugs and any attempt to use ist_preempt_enable
* from double_fault.
*/
- BUG_ON((unsigned long)(current_top_of_stack() -
- current_stack_pointer) >= THREAD_SIZE);
+ BUG_ON(!on_thread_stack());
preempt_enable_no_resched();
}
@@ -209,9 +208,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
if (fixup_exception(regs, trapnr))
return 0;
- if (fixup_bug(regs, trapnr))
- return 0;
-
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
die(str, regs, error_code);
@@ -292,6 +288,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ /*
+ * WARN*()s end up here; fix them up before we call the
+ * notifier chain.
+ */
+ if (!user_mode(regs) && fixup_bug(regs, trapnr))
+ return;
+
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
cond_local_irq_enable(regs);
@@ -514,6 +517,11 @@ do_general_protection(struct pt_regs *regs, long error_code)
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
cond_local_irq_enable(regs);
+ if (static_cpu_has(X86_FEATURE_UMIP)) {
+ if (user_mode(regs) && fixup_umip_exception(regs))
+ return;
+ }
+
if (v8086_mode(regs)) {
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
@@ -740,10 +748,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
if (!dr6 && user_mode(regs))
user_icebp = 1;
- /* Catch kmemcheck conditions! */
- if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
- goto exit;
-
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 796d96bb0821..8ea117f8142e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -112,7 +112,7 @@ static void cyc2ns_data_init(struct cyc2ns_data *data)
data->cyc2ns_offset = 0;
}
-static void cyc2ns_init(int cpu)
+static void __init cyc2ns_init(int cpu)
{
struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
@@ -812,13 +812,13 @@ unsigned long native_calibrate_cpu(void)
return tsc_pit_min;
}
-int recalibrate_cpu_khz(void)
+void recalibrate_cpu_khz(void)
{
#ifndef CONFIG_SMP
unsigned long cpu_khz_old = cpu_khz;
if (!boot_cpu_has(X86_FEATURE_TSC))
- return -ENODEV;
+ return;
cpu_khz = x86_platform.calibrate_cpu();
tsc_khz = x86_platform.calibrate_tsc();
@@ -828,10 +828,6 @@ int recalibrate_cpu_khz(void)
cpu_khz = tsc_khz;
cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
cpu_khz_old, cpu_khz);
-
- return 0;
-#else
- return -ENODEV;
#endif
}
@@ -959,17 +955,21 @@ core_initcall(cpufreq_register_tsc_scaling);
/*
* If ART is present detect the numerator:denominator to convert to TSC
*/
-static void detect_art(void)
+static void __init detect_art(void)
{
unsigned int unused[2];
if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
return;
- /* Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required */
+ /*
+ * Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required,
+ * and the TSC counter resets must not occur asynchronously.
+ */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
- !boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ !boot_cpu_has(X86_FEATURE_TSC_ADJUST) ||
+ tsc_async_resets)
return;
cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
@@ -1263,6 +1263,25 @@ static int __init init_tsc_clocksource(void)
*/
device_initcall(init_tsc_clocksource);
+void __init tsc_early_delay_calibrate(void)
+{
+ unsigned long lpj;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC))
+ return;
+
+ cpu_khz = x86_platform.calibrate_cpu();
+ tsc_khz = x86_platform.calibrate_tsc();
+
+ tsc_khz = tsc_khz ? : cpu_khz;
+ if (!tsc_khz)
+ return;
+
+ lpj = tsc_khz * 1000;
+ do_div(lpj, HZ);
+ loops_per_jiffy = lpj;
+}
+
void __init tsc_init(void)
{
u64 lpj, cyc;
@@ -1346,12 +1365,10 @@ void __init tsc_init(void)
unsigned long calibrate_delay_is_known(void)
{
int sibling, cpu = smp_processor_id();
- struct cpumask *mask = topology_core_cpumask(cpu);
-
- if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
- return 0;
+ int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
+ const struct cpumask *mask = topology_core_cpumask(cpu);
- if (!mask)
+ if (tsc_disabled || !constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index e76a9881306b..ec534f978867 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -31,6 +31,20 @@ struct tsc_adjust {
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+/*
+ * TSC's on different sockets may be reset asynchronously.
+ * This may cause the TSC ADJUST value on socket 0 to be NOT 0.
+ */
+bool __read_mostly tsc_async_resets;
+
+void mark_tsc_async_resets(char *reason)
+{
+ if (tsc_async_resets)
+ return;
+ tsc_async_resets = true;
+ pr_info("tsc: Marking TSC async resets true due to %s\n", reason);
+}
+
void tsc_verify_tsc_adjust(bool resume)
{
struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
@@ -39,6 +53,10 @@ void tsc_verify_tsc_adjust(bool resume)
if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
return;
+ /* Skip unnecessary error messages if TSC already unstable */
+ if (check_tsc_unstable())
+ return;
+
/* Rate limit the MSR check */
if (!resume && time_before(jiffies, adj->nextcheck))
return;
@@ -72,12 +90,22 @@ static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
* non zero. We don't do that on non boot cpus because physical
* hotplug should have set the ADJUST register to a value > 0 so
* the TSC is in sync with the already running cpus.
+ *
+ * Also don't force the ADJUST value to zero if that is a valid value
+ * for socket 0 as determined by the system arch. This is required
+ * when multiple sockets are reset asynchronously with each other
+ * and socket 0 may not have an TSC ADJUST value of 0.
*/
if (bootcpu && bootval != 0) {
- pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n", cpu,
- bootval);
- wrmsrl(MSR_IA32_TSC_ADJUST, 0);
- bootval = 0;
+ if (likely(!tsc_async_resets)) {
+ pr_warn(FW_BUG "TSC ADJUST: CPU%u: %lld force to 0\n",
+ cpu, bootval);
+ wrmsrl(MSR_IA32_TSC_ADJUST, 0);
+ bootval = 0;
+ } else {
+ pr_info("TSC ADJUST: CPU%u: %lld NOT forced to 0\n",
+ cpu, bootval);
+ }
}
cur->adjusted = bootval;
}
@@ -91,6 +119,10 @@ bool __init tsc_store_and_check_tsc_adjust(bool bootcpu)
if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
return false;
+ /* Skip unnecessary error messages if TSC already unstable */
+ if (check_tsc_unstable())
+ return false;
+
rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
cur->nextcheck = jiffies + HZ;
@@ -119,6 +151,13 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
cur->warned = false;
/*
+ * If a non-zero TSC value for socket 0 may be valid then the default
+ * adjusted value cannot assumed to be zero either.
+ */
+ if (tsc_async_resets)
+ cur->adjusted = bootval;
+
+ /*
* Check whether this CPU is the first in a package to come up. In
* this case do not check the boot value against another package
* because the new package might have been physically hotplugged,
@@ -139,10 +178,9 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
* Compare the boot value and complain if it differs in the
* package.
*/
- if (bootval != ref->bootval) {
- pr_warn(FW_BUG "TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n",
- refcpu, ref->bootval, cpu, bootval);
- }
+ if (bootval != ref->bootval)
+ printk_once(FW_BUG "TSC ADJUST differs within socket(s), fixing all errors\n");
+
/*
* The TSC_ADJUST values in a package must be the same. If the boot
* value on this newly upcoming CPU differs from the adjustment
@@ -150,8 +188,6 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
* adjusted value.
*/
if (bootval != ref->adjusted) {
- pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n",
- refcpu, ref->adjusted, cpu, bootval);
cur->adjusted = ref->adjusted;
wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
}
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
new file mode 100644
index 000000000000..6ba82be68cff
--- /dev/null
+++ b/arch/x86/kernel/umip.c
@@ -0,0 +1,366 @@
+/*
+ * umip.c Emulation for instruction protected by the Intel User-Mode
+ * Instruction Prevention feature
+ *
+ * Copyright (c) 2017, Intel Corporation.
+ * Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+ */
+
+#include <linux/uaccess.h>
+#include <asm/umip.h>
+#include <asm/traps.h>
+#include <asm/insn.h>
+#include <asm/insn-eval.h>
+#include <linux/ratelimit.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "umip: " fmt
+
+/** DOC: Emulation for User-Mode Instruction Prevention (UMIP)
+ *
+ * The feature User-Mode Instruction Prevention present in recent Intel
+ * processor prevents a group of instructions (sgdt, sidt, sldt, smsw, and str)
+ * from being executed with CPL > 0. Otherwise, a general protection fault is
+ * issued.
+ *
+ * Rather than relaying to the user space the general protection fault caused by
+ * the UMIP-protected instructions (in the form of a SIGSEGV signal), it can be
+ * trapped and emulate the result of such instructions to provide dummy values.
+ * This allows to both conserve the current kernel behavior and not reveal the
+ * system resources that UMIP intends to protect (i.e., the locations of the
+ * global descriptor and interrupt descriptor tables, the segment selectors of
+ * the local descriptor table, the value of the task state register and the
+ * contents of the CR0 register).
+ *
+ * This emulation is needed because certain applications (e.g., WineHQ and
+ * DOSEMU2) rely on this subset of instructions to function.
+ *
+ * The instructions protected by UMIP can be split in two groups. Those which
+ * return a kernel memory address (sgdt and sidt) and those which return a
+ * value (sldt, str and smsw).
+ *
+ * For the instructions that return a kernel memory address, applications
+ * such as WineHQ rely on the result being located in the kernel memory space,
+ * not the actual location of the table. The result is emulated as a hard-coded
+ * value that, lies close to the top of the kernel memory. The limit for the GDT
+ * and the IDT are set to zero.
+ *
+ * Given that sldt and str are not commonly used in programs that run on WineHQ
+ * or DOSEMU2, they are not emulated.
+ *
+ * The instruction smsw is emulated to return the value that the register CR0
+ * has at boot time as set in the head_32.
+ *
+ * Also, emulation is provided only for 32-bit processes; 64-bit processes
+ * that attempt to use the instructions that UMIP protects will receive the
+ * SIGSEGV signal issued as a consequence of the general protection fault.
+ *
+ * Care is taken to appropriately emulate the results when segmentation is
+ * used. That is, rather than relying on USER_DS and USER_CS, the function
+ * insn_get_addr_ref() inspects the segment descriptor pointed by the
+ * registers in pt_regs. This ensures that we correctly obtain the segment
+ * base address and the address and operand sizes even if the user space
+ * application uses a local descriptor table.
+ */
+
+#define UMIP_DUMMY_GDT_BASE 0xfffe0000
+#define UMIP_DUMMY_IDT_BASE 0xffff0000
+
+/*
+ * The SGDT and SIDT instructions store the contents of the global descriptor
+ * table and interrupt table registers, respectively. The destination is a
+ * memory operand of X+2 bytes. X bytes are used to store the base address of
+ * the table and 2 bytes are used to store the limit. In 32-bit processes, the
+ * only processes for which emulation is provided, X has a value of 4.
+ */
+#define UMIP_GDT_IDT_BASE_SIZE 4
+#define UMIP_GDT_IDT_LIMIT_SIZE 2
+
+#define UMIP_INST_SGDT 0 /* 0F 01 /0 */
+#define UMIP_INST_SIDT 1 /* 0F 01 /1 */
+#define UMIP_INST_SMSW 3 /* 0F 01 /4 */
+
+/**
+ * identify_insn() - Identify a UMIP-protected instruction
+ * @insn: Instruction structure with opcode and ModRM byte.
+ *
+ * From the opcode and ModRM.reg in @insn identify, if any, a UMIP-protected
+ * instruction that can be emulated.
+ *
+ * Returns:
+ *
+ * On success, a constant identifying a specific UMIP-protected instruction that
+ * can be emulated.
+ *
+ * -EINVAL on error or when not an UMIP-protected instruction that can be
+ * emulated.
+ */
+static int identify_insn(struct insn *insn)
+{
+ /* By getting modrm we also get the opcode. */
+ insn_get_modrm(insn);
+
+ if (!insn->modrm.nbytes)
+ return -EINVAL;
+
+ /* All the instructions of interest start with 0x0f. */
+ if (insn->opcode.bytes[0] != 0xf)
+ return -EINVAL;
+
+ if (insn->opcode.bytes[1] == 0x1) {
+ switch (X86_MODRM_REG(insn->modrm.value)) {
+ case 0:
+ return UMIP_INST_SGDT;
+ case 1:
+ return UMIP_INST_SIDT;
+ case 4:
+ return UMIP_INST_SMSW;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* SLDT AND STR are not emulated */
+ return -EINVAL;
+}
+
+/**
+ * emulate_umip_insn() - Emulate UMIP instructions and return dummy values
+ * @insn: Instruction structure with operands
+ * @umip_inst: A constant indicating the instruction to emulate
+ * @data: Buffer into which the dummy result is stored
+ * @data_size: Size of the emulated result
+ *
+ * Emulate an instruction protected by UMIP and provide a dummy result. The
+ * result of the emulation is saved in @data. The size of the results depends
+ * on both the instruction and type of operand (register vs memory address).
+ * The size of the result is updated in @data_size. Caller is responsible
+ * of providing a @data buffer of at least UMIP_GDT_IDT_BASE_SIZE +
+ * UMIP_GDT_IDT_LIMIT_SIZE bytes.
+ *
+ * Returns:
+ *
+ * 0 on success, -EINVAL on error while emulating.
+ */
+static int emulate_umip_insn(struct insn *insn, int umip_inst,
+ unsigned char *data, int *data_size)
+{
+ unsigned long dummy_base_addr, dummy_value;
+ unsigned short dummy_limit = 0;
+
+ if (!data || !data_size || !insn)
+ return -EINVAL;
+ /*
+ * These two instructions return the base address and limit of the
+ * global and interrupt descriptor table, respectively. According to the
+ * Intel Software Development manual, the base address can be 24-bit,
+ * 32-bit or 64-bit. Limit is always 16-bit. If the operand size is
+ * 16-bit, the returned value of the base address is supposed to be a
+ * zero-extended 24-byte number. However, it seems that a 32-byte number
+ * is always returned irrespective of the operand size.
+ */
+ if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) {
+ /* SGDT and SIDT do not use registers operands. */
+ if (X86_MODRM_MOD(insn->modrm.value) == 3)
+ return -EINVAL;
+
+ if (umip_inst == UMIP_INST_SGDT)
+ dummy_base_addr = UMIP_DUMMY_GDT_BASE;
+ else
+ dummy_base_addr = UMIP_DUMMY_IDT_BASE;
+
+ *data_size = UMIP_GDT_IDT_LIMIT_SIZE + UMIP_GDT_IDT_BASE_SIZE;
+
+ memcpy(data + 2, &dummy_base_addr, UMIP_GDT_IDT_BASE_SIZE);
+ memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE);
+
+ } else if (umip_inst == UMIP_INST_SMSW) {
+ dummy_value = CR0_STATE;
+
+ /*
+ * Even though the CR0 register has 4 bytes, the number
+ * of bytes to be copied in the result buffer is determined
+ * by whether the operand is a register or a memory location.
+ * If operand is a register, return as many bytes as the operand
+ * size. If operand is memory, return only the two least
+ * siginificant bytes of CR0.
+ */
+ if (X86_MODRM_MOD(insn->modrm.value) == 3)
+ *data_size = insn->opnd_bytes;
+ else
+ *data_size = 2;
+
+ memcpy(data, &dummy_value, *data_size);
+ /* STR and SLDT are not emulated */
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * force_sig_info_umip_fault() - Force a SIGSEGV with SEGV_MAPERR
+ * @addr: Address that caused the signal
+ * @regs: Register set containing the instruction pointer
+ *
+ * Force a SIGSEGV signal with SEGV_MAPERR as the error code. This function is
+ * intended to be used to provide a segmentation fault when the result of the
+ * UMIP emulation could not be copied to the user space memory.
+ *
+ * Returns: none
+ */
+static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs)
+{
+ siginfo_t info;
+ struct task_struct *tsk = current;
+
+ tsk->thread.cr2 = (unsigned long)addr;
+ tsk->thread.error_code = X86_PF_USER | X86_PF_WRITE;
+ tsk->thread.trap_nr = X86_TRAP_PF;
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = addr;
+ force_sig_info(SIGSEGV, &info, tsk);
+
+ if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV)))
+ return;
+
+ pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n",
+ tsk->comm, task_pid_nr(tsk), regs->ip,
+ regs->sp, X86_PF_USER | X86_PF_WRITE,
+ regs->ip);
+}
+
+/**
+ * fixup_umip_exception() - Fixup a general protection fault caused by UMIP
+ * @regs: Registers as saved when entering the #GP handler
+ *
+ * The instructions sgdt, sidt, str, smsw, sldt cause a general protection
+ * fault if executed with CPL > 0 (i.e., from user space). If the offending
+ * user-space process is not in long mode, this function fixes the exception
+ * up and provides dummy results for sgdt, sidt and smsw; str and sldt are not
+ * fixed up. Also long mode user-space processes are not fixed up.
+ *
+ * If operands are memory addresses, results are copied to user-space memory as
+ * indicated by the instruction pointed by eIP using the registers indicated in
+ * the instruction operands. If operands are registers, results are copied into
+ * the context that was saved when entering kernel mode.
+ *
+ * Returns:
+ *
+ * True if emulation was successful; false if not.
+ */
+bool fixup_umip_exception(struct pt_regs *regs)
+{
+ int not_copied, nr_copied, reg_offset, dummy_data_size, umip_inst;
+ unsigned long seg_base = 0, *reg_addr;
+ /* 10 bytes is the maximum size of the result of UMIP instructions */
+ unsigned char dummy_data[10] = { 0 };
+ unsigned char buf[MAX_INSN_SIZE];
+ void __user *uaddr;
+ struct insn insn;
+ char seg_defs;
+
+ if (!regs)
+ return false;
+
+ /* Do not emulate 64-bit processes. */
+ if (user_64bit_mode(regs))
+ return false;
+
+ /*
+ * If not in user-space long mode, a custom code segment could be in
+ * use. This is true in protected mode (if the process defined a local
+ * descriptor table), or virtual-8086 mode. In most of the cases
+ * seg_base will be zero as in USER_CS.
+ */
+ if (!user_64bit_mode(regs))
+ seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
+
+ if (seg_base == -1L)
+ return false;
+
+ not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
+ sizeof(buf));
+ nr_copied = sizeof(buf) - not_copied;
+
+ /*
+ * The copy_from_user above could have failed if user code is protected
+ * by a memory protection key. Give up on emulation in such a case.
+ * Should we issue a page fault?
+ */
+ if (!nr_copied)
+ return false;
+
+ insn_init(&insn, buf, nr_copied, user_64bit_mode(regs));
+
+ /*
+ * Override the default operand and address sizes with what is specified
+ * in the code segment descriptor. The instruction decoder only sets
+ * the address size it to either 4 or 8 address bytes and does nothing
+ * for the operand bytes. This OK for most of the cases, but we could
+ * have special cases where, for instance, a 16-bit code segment
+ * descriptor is used.
+ * If there is an address override prefix, the instruction decoder
+ * correctly updates these values, even for 16-bit defaults.
+ */
+ seg_defs = insn_get_code_seg_params(regs);
+ if (seg_defs == -EINVAL)
+ return false;
+
+ insn.addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs);
+ insn.opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs);
+
+ insn_get_length(&insn);
+ if (nr_copied < insn.length)
+ return false;
+
+ umip_inst = identify_insn(&insn);
+ if (umip_inst < 0)
+ return false;
+
+ if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size))
+ return false;
+
+ /*
+ * If operand is a register, write result to the copy of the register
+ * value that was pushed to the stack when entering into kernel mode.
+ * Upon exit, the value we write will be restored to the actual hardware
+ * register.
+ */
+ if (X86_MODRM_MOD(insn.modrm.value) == 3) {
+ reg_offset = insn_get_modrm_rm_off(&insn, regs);
+
+ /*
+ * Negative values are usually errors. In memory addressing,
+ * the exception is -EDOM. Since we expect a register operand,
+ * all negative values are errors.
+ */
+ if (reg_offset < 0)
+ return false;
+
+ reg_addr = (unsigned long *)((unsigned long)regs + reg_offset);
+ memcpy(reg_addr, dummy_data, dummy_data_size);
+ } else {
+ uaddr = insn_get_addr_ref(&insn, regs);
+ if ((unsigned long)uaddr == -1L)
+ return false;
+
+ nr_copied = copy_to_user(uaddr, dummy_data, dummy_data_size);
+ if (nr_copied > 0) {
+ /*
+ * If copy fails, send a signal and tell caller that
+ * fault was fixed up.
+ */
+ force_sig_info_umip_fault(uaddr, regs);
+ return true;
+ }
+ }
+
+ /* increase IP to let the program keep going */
+ regs->ip += insn.length;
+ return true;
+}
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index b95007e7c1b3..a3f973b2c97a 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -279,7 +279,7 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
if (!stack_access_ok(state, addr, sizeof(long)))
return false;
- *val = READ_ONCE_TASK_STACK(state->task, *(unsigned long *)addr);
+ *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
return true;
}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 495c776de4b4..a3755d293a48 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -271,12 +271,15 @@ static bool is_prefix_bad(struct insn *insn)
int i;
for (i = 0; i < insn->prefixes.nbytes; i++) {
- switch (insn->prefixes.bytes[i]) {
- case 0x26: /* INAT_PFX_ES */
- case 0x2E: /* INAT_PFX_CS */
- case 0x36: /* INAT_PFX_DS */
- case 0x3E: /* INAT_PFX_SS */
- case 0xF0: /* INAT_PFX_LOCK */
+ insn_attr_t attr;
+
+ attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ switch (attr) {
+ case INAT_MAKE_PREFIX(INAT_PFX_ES):
+ case INAT_MAKE_PREFIX(INAT_PFX_CS):
+ case INAT_MAKE_PREFIX(INAT_PFX_DS):
+ case INAT_MAKE_PREFIX(INAT_PFX_SS):
+ case INAT_MAKE_PREFIX(INAT_PFX_LOCK):
return true;
}
}
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 014ea59aa153..3d3c2f71f617 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -33,7 +33,7 @@
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
-verify_cpu:
+ENTRY(verify_cpu)
pushf # Save caller passed flags
push $0 # Kill any dangerous flags
popf
@@ -139,3 +139,4 @@ verify_cpu:
popf # Restore caller passed flags
xorl %eax, %eax
ret
+ENDPROC(verify_cpu)
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 68244742ecb0..5edb27f1a2c4 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -55,6 +55,7 @@
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/vm86.h>
+#include <asm/switch_to.h>
/*
* Known problems:
@@ -94,7 +95,6 @@
void save_v86_state(struct kernel_vm86_regs *regs, int retval)
{
- struct tss_struct *tss;
struct task_struct *tsk = current;
struct vm86plus_struct __user *user;
struct vm86 *vm86 = current->thread.vm86;
@@ -146,12 +146,13 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
do_exit(SIGSEGV);
}
- tss = &per_cpu(cpu_tss, get_cpu());
+ preempt_disable();
tsk->thread.sp0 = vm86->saved_sp0;
tsk->thread.sysenter_cs = __KERNEL_CS;
- load_sp0(tss, &tsk->thread);
+ update_sp0(tsk);
+ refresh_sysenter_cs(&tsk->thread);
vm86->saved_sp0 = 0;
- put_cpu();
+ preempt_enable();
memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
@@ -237,7 +238,6 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
{
- struct tss_struct *tss;
struct task_struct *tsk = current;
struct vm86 *vm86 = tsk->thread.vm86;
struct kernel_vm86_regs vm86regs;
@@ -365,15 +365,17 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
vm86->saved_sp0 = tsk->thread.sp0;
lazy_save_gs(vm86->regs32.gs);
- tss = &per_cpu(cpu_tss, get_cpu());
/* make room for real-mode segments */
+ preempt_disable();
tsk->thread.sp0 += 16;
- if (static_cpu_has(X86_FEATURE_SEP))
+ if (static_cpu_has(X86_FEATURE_SEP)) {
tsk->thread.sysenter_cs = 0;
+ refresh_sysenter_cs(&tsk->thread);
+ }
- load_sp0(tss, &tsk->thread);
- put_cpu();
+ update_sp0(tsk);
+ preempt_enable();
if (vm86->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index b034b1b14b9c..44685fb2a192 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,9 +26,6 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10
-/* Flag below is initialized once during vSMP PCI initialization. */
-static int irq_routing_comply = 1;
-
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
/*
* Interrupt control on vSMPowered systems:
@@ -105,9 +102,6 @@ static void __init set_vsmp_pv_ops(void)
if (cap & ctl & BIT(8)) {
ctl &= ~BIT(8);
- /* Interrupt routing set to ignore */
- irq_routing_comply = 0;
-
#ifdef CONFIG_PROC_FS
/* Don't let users change irq affinity via procfs */
no_irq_affinity = 1;
@@ -211,23 +205,10 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
return hard_smp_processor_id() >> index_msb;
}
-/*
- * In vSMP, all cpus should be capable of handling interrupts, regardless of
- * the APIC used.
- */
-static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
- const struct cpumask *mask)
-{
- cpumask_setall(retmask);
-}
-
static void vsmp_apic_post_init(void)
{
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
-
- if (!irq_routing_comply)
- apic->vector_allocation_domain = fill_vector_allocation_domain;
}
void __init vsmp_init(void)
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index a088b2c47f73..1151ccd72ce9 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -28,6 +28,8 @@ void x86_init_noop(void) { }
void __init x86_init_uint_noop(unsigned int unused) { }
int __init iommu_init_noop(void) { return 0; }
void iommu_shutdown_noop(void) { }
+bool __init bool_x86_init_noop(void) { return false; }
+void x86_op_int_noop(int cpu) { }
/*
* The platform setup functions are preset with the default functions
@@ -55,6 +57,7 @@ struct x86_init_ops x86_init __initdata = {
.pre_vector_init = init_ISA_irqs,
.intr_init = native_init_IRQ,
.trap_init = x86_init_noop,
+ .intr_mode_init = apic_intr_mode_init
},
.oem = {
@@ -81,6 +84,13 @@ struct x86_init_ops x86_init __initdata = {
.init_irq = x86_default_pci_init_irq,
.fixup_irqs = x86_default_pci_fixup_irqs,
},
+
+ .hyper = {
+ .init_platform = x86_init_noop,
+ .guest_late_init = x86_init_noop,
+ .x2apic_available = bool_x86_init_noop,
+ .init_mem_mapping = x86_init_noop,
+ },
};
struct x86_cpuinit_ops x86_cpuinit = {
@@ -101,6 +111,7 @@ struct x86_platform_ops x86_platform __ro_after_init = {
.get_nmi_reason = default_get_nmi_reason,
.save_sched_clock_state = tsc_save_sched_clock_state,
.restore_sched_clock_state = tsc_restore_sched_clock_state,
+ .hyper.pin_vcpu = x86_op_int_noop,
};
EXPORT_SYMBOL_GPL(x86_platform);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7a69cf053711..a119b361b8b7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
static u64 __get_spte_lockless(u64 *sptep)
{
- return ACCESS_ONCE(*sptep);
+ return READ_ONCE(*sptep);
}
#else
union split_spte {
@@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
* If we don't have indirect shadow pages, it means no page is
* write-protected, so we can exit simply.
*/
- if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
remote_flush = local_flush = false;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index ea67dc876316..01c1371f39f8 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
return false;
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
- return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
+ return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
}
void kvm_page_track_cleanup(struct kvm *kvm)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 457f681ef379..7b181b61170e 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ lib-y := delay.o misc.o cmdline.o cpu.o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
+lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
new file mode 100644
index 000000000000..35625d279458
--- /dev/null
+++ b/arch/x86/lib/insn-eval.c
@@ -0,0 +1,1364 @@
+/*
+ * Utility functions for x86 operand and address decoding
+ *
+ * Copyright (C) Intel Corporation 2017
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/mmu_context.h>
+#include <asm/desc_defs.h>
+#include <asm/desc.h>
+#include <asm/inat.h>
+#include <asm/insn.h>
+#include <asm/insn-eval.h>
+#include <asm/ldt.h>
+#include <asm/vm86.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "insn: " fmt
+
+enum reg_type {
+ REG_TYPE_RM = 0,
+ REG_TYPE_INDEX,
+ REG_TYPE_BASE,
+};
+
+/**
+ * is_string_insn() - Determine if instruction is a string instruction
+ * @insn: Instruction containing the opcode to inspect
+ *
+ * Returns:
+ *
+ * true if the instruction, determined by the opcode, is any of the
+ * string instructions as defined in the Intel Software Development manual.
+ * False otherwise.
+ */
+static bool is_string_insn(struct insn *insn)
+{
+ insn_get_opcode(insn);
+
+ /* All string instructions have a 1-byte opcode. */
+ if (insn->opcode.nbytes != 1)
+ return false;
+
+ switch (insn->opcode.bytes[0]) {
+ case 0x6c ... 0x6f: /* INS, OUTS */
+ case 0xa4 ... 0xa7: /* MOVS, CMPS */
+ case 0xaa ... 0xaf: /* STOS, LODS, SCAS */
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * get_seg_reg_override_idx() - obtain segment register override index
+ * @insn: Valid instruction with segment override prefixes
+ *
+ * Inspect the instruction prefixes in @insn and find segment overrides, if any.
+ *
+ * Returns:
+ *
+ * A constant identifying the segment register to use, among CS, SS, DS,
+ * ES, FS, or GS. INAT_SEG_REG_DEFAULT is returned if no segment override
+ * prefixes were found.
+ *
+ * -EINVAL in case of error.
+ */
+static int get_seg_reg_override_idx(struct insn *insn)
+{
+ int idx = INAT_SEG_REG_DEFAULT;
+ int num_overrides = 0, i;
+
+ insn_get_prefixes(insn);
+
+ /* Look for any segment override prefixes. */
+ for (i = 0; i < insn->prefixes.nbytes; i++) {
+ insn_attr_t attr;
+
+ attr = inat_get_opcode_attribute(insn->prefixes.bytes[i]);
+ switch (attr) {
+ case INAT_MAKE_PREFIX(INAT_PFX_CS):
+ idx = INAT_SEG_REG_CS;
+ num_overrides++;
+ break;
+ case INAT_MAKE_PREFIX(INAT_PFX_SS):
+ idx = INAT_SEG_REG_SS;
+ num_overrides++;
+ break;
+ case INAT_MAKE_PREFIX(INAT_PFX_DS):
+ idx = INAT_SEG_REG_DS;
+ num_overrides++;
+ break;
+ case INAT_MAKE_PREFIX(INAT_PFX_ES):
+ idx = INAT_SEG_REG_ES;
+ num_overrides++;
+ break;
+ case INAT_MAKE_PREFIX(INAT_PFX_FS):
+ idx = INAT_SEG_REG_FS;
+ num_overrides++;
+ break;
+ case INAT_MAKE_PREFIX(INAT_PFX_GS):
+ idx = INAT_SEG_REG_GS;
+ num_overrides++;
+ break;
+ /* No default action needed. */
+ }
+ }
+
+ /* More than one segment override prefix leads to undefined behavior. */
+ if (num_overrides > 1)
+ return -EINVAL;
+
+ return idx;
+}
+
+/**
+ * check_seg_overrides() - check if segment override prefixes are allowed
+ * @insn: Valid instruction with segment override prefixes
+ * @regoff: Operand offset, in pt_regs, for which the check is performed
+ *
+ * For a particular register used in register-indirect addressing, determine if
+ * segment override prefixes can be used. Specifically, no overrides are allowed
+ * for rDI if used with a string instruction.
+ *
+ * Returns:
+ *
+ * True if segment override prefixes can be used with the register indicated
+ * in @regoff. False if otherwise.
+ */
+static bool check_seg_overrides(struct insn *insn, int regoff)
+{
+ if (regoff == offsetof(struct pt_regs, di) && is_string_insn(insn))
+ return false;
+
+ return true;
+}
+
+/**
+ * resolve_default_seg() - resolve default segment register index for an operand
+ * @insn: Instruction with opcode and address size. Must be valid.
+ * @regs: Register values as seen when entering kernel mode
+ * @off: Operand offset, in pt_regs, for which resolution is needed
+ *
+ * Resolve the default segment register index associated with the instruction
+ * operand register indicated by @off. Such index is resolved based on defaults
+ * described in the Intel Software Development Manual.
+ *
+ * Returns:
+ *
+ * If in protected mode, a constant identifying the segment register to use,
+ * among CS, SS, ES or DS. If in long mode, INAT_SEG_REG_IGNORE.
+ *
+ * -EINVAL in case of error.
+ */
+static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
+{
+ if (user_64bit_mode(regs))
+ return INAT_SEG_REG_IGNORE;
+ /*
+ * Resolve the default segment register as described in Section 3.7.4
+ * of the Intel Software Development Manual Vol. 1:
+ *
+ * + DS for all references involving r[ABCD]X, and rSI.
+ * + If used in a string instruction, ES for rDI. Otherwise, DS.
+ * + AX, CX and DX are not valid register operands in 16-bit address
+ * encodings but are valid for 32-bit and 64-bit encodings.
+ * + -EDOM is reserved to identify for cases in which no register
+ * is used (i.e., displacement-only addressing). Use DS.
+ * + SS for rSP or rBP.
+ * + CS for rIP.
+ */
+
+ switch (off) {
+ case offsetof(struct pt_regs, ax):
+ case offsetof(struct pt_regs, cx):
+ case offsetof(struct pt_regs, dx):
+ /* Need insn to verify address size. */
+ if (insn->addr_bytes == 2)
+ return -EINVAL;
+
+ case -EDOM:
+ case offsetof(struct pt_regs, bx):
+ case offsetof(struct pt_regs, si):
+ return INAT_SEG_REG_DS;
+
+ case offsetof(struct pt_regs, di):
+ if (is_string_insn(insn))
+ return INAT_SEG_REG_ES;
+ return INAT_SEG_REG_DS;
+
+ case offsetof(struct pt_regs, bp):
+ case offsetof(struct pt_regs, sp):
+ return INAT_SEG_REG_SS;
+
+ case offsetof(struct pt_regs, ip):
+ return INAT_SEG_REG_CS;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * resolve_seg_reg() - obtain segment register index
+ * @insn: Instruction with operands
+ * @regs: Register values as seen when entering kernel mode
+ * @regoff: Operand offset, in pt_regs, used to deterimine segment register
+ *
+ * Determine the segment register associated with the operands and, if
+ * applicable, prefixes and the instruction pointed by @insn.
+ *
+ * The segment register associated to an operand used in register-indirect
+ * addressing depends on:
+ *
+ * a) Whether running in long mode (in such a case segments are ignored, except
+ * if FS or GS are used).
+ *
+ * b) Whether segment override prefixes can be used. Certain instructions and
+ * registers do not allow override prefixes.
+ *
+ * c) Whether segment overrides prefixes are found in the instruction prefixes.
+ *
+ * d) If there are not segment override prefixes or they cannot be used, the
+ * default segment register associated with the operand register is used.
+ *
+ * The function checks first if segment override prefixes can be used with the
+ * operand indicated by @regoff. If allowed, obtain such overridden segment
+ * register index. Lastly, if not prefixes were found or cannot be used, resolve
+ * the segment register index to use based on the defaults described in the
+ * Intel documentation. In long mode, all segment register indexes will be
+ * ignored, except if overrides were found for FS or GS. All these operations
+ * are done using helper functions.
+ *
+ * The operand register, @regoff, is represented as the offset from the base of
+ * pt_regs.
+ *
+ * As stated, the main use of this function is to determine the segment register
+ * index based on the instruction, its operands and prefixes. Hence, @insn
+ * must be valid. However, if @regoff indicates rIP, we don't need to inspect
+ * @insn at all as in this case CS is used in all cases. This case is checked
+ * before proceeding further.
+ *
+ * Please note that this function does not return the value in the segment
+ * register (i.e., the segment selector) but our defined index. The segment
+ * selector needs to be obtained using get_segment_selector() and passing the
+ * segment register index resolved by this function.
+ *
+ * Returns:
+ *
+ * An index identifying the segment register to use, among CS, SS, DS,
+ * ES, FS, or GS. INAT_SEG_REG_IGNORE is returned if running in long mode.
+ *
+ * -EINVAL in case of error.
+ */
+static int resolve_seg_reg(struct insn *insn, struct pt_regs *regs, int regoff)
+{
+ int idx;
+
+ /*
+ * In the unlikely event of having to resolve the segment register
+ * index for rIP, do it first. Segment override prefixes should not
+ * be used. Hence, it is not necessary to inspect the instruction,
+ * which may be invalid at this point.
+ */
+ if (regoff == offsetof(struct pt_regs, ip)) {
+ if (user_64bit_mode(regs))
+ return INAT_SEG_REG_IGNORE;
+ else
+ return INAT_SEG_REG_CS;
+ }
+
+ if (!insn)
+ return -EINVAL;
+
+ if (!check_seg_overrides(insn, regoff))
+ return resolve_default_seg(insn, regs, regoff);
+
+ idx = get_seg_reg_override_idx(insn);
+ if (idx < 0)
+ return idx;
+
+ if (idx == INAT_SEG_REG_DEFAULT)
+ return resolve_default_seg(insn, regs, regoff);
+
+ /*
+ * In long mode, segment override prefixes are ignored, except for
+ * overrides for FS and GS.
+ */
+ if (user_64bit_mode(regs)) {
+ if (idx != INAT_SEG_REG_FS &&
+ idx != INAT_SEG_REG_GS)
+ idx = INAT_SEG_REG_IGNORE;
+ }
+
+ return idx;
+}
+
+/**
+ * get_segment_selector() - obtain segment selector
+ * @regs: Register values as seen when entering kernel mode
+ * @seg_reg_idx: Segment register index to use
+ *
+ * Obtain the segment selector from any of the CS, SS, DS, ES, FS, GS segment
+ * registers. In CONFIG_X86_32, the segment is obtained from either pt_regs or
+ * kernel_vm86_regs as applicable. In CONFIG_X86_64, CS and SS are obtained
+ * from pt_regs. DS, ES, FS and GS are obtained by reading the actual CPU
+ * registers. This done for only for completeness as in CONFIG_X86_64 segment
+ * registers are ignored.
+ *
+ * Returns:
+ *
+ * Value of the segment selector, including null when running in
+ * long mode.
+ *
+ * -EINVAL on error.
+ */
+static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
+{
+#ifdef CONFIG_X86_64
+ unsigned short sel;
+
+ switch (seg_reg_idx) {
+ case INAT_SEG_REG_IGNORE:
+ return 0;
+ case INAT_SEG_REG_CS:
+ return (unsigned short)(regs->cs & 0xffff);
+ case INAT_SEG_REG_SS:
+ return (unsigned short)(regs->ss & 0xffff);
+ case INAT_SEG_REG_DS:
+ savesegment(ds, sel);
+ return sel;
+ case INAT_SEG_REG_ES:
+ savesegment(es, sel);
+ return sel;
+ case INAT_SEG_REG_FS:
+ savesegment(fs, sel);
+ return sel;
+ case INAT_SEG_REG_GS:
+ savesegment(gs, sel);
+ return sel;
+ default:
+ return -EINVAL;
+ }
+#else /* CONFIG_X86_32 */
+ struct kernel_vm86_regs *vm86regs = (struct kernel_vm86_regs *)regs;
+
+ if (v8086_mode(regs)) {
+ switch (seg_reg_idx) {
+ case INAT_SEG_REG_CS:
+ return (unsigned short)(regs->cs & 0xffff);
+ case INAT_SEG_REG_SS:
+ return (unsigned short)(regs->ss & 0xffff);
+ case INAT_SEG_REG_DS:
+ return vm86regs->ds;
+ case INAT_SEG_REG_ES:
+ return vm86regs->es;
+ case INAT_SEG_REG_FS:
+ return vm86regs->fs;
+ case INAT_SEG_REG_GS:
+ return vm86regs->gs;
+ case INAT_SEG_REG_IGNORE:
+ /* fall through */
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (seg_reg_idx) {
+ case INAT_SEG_REG_CS:
+ return (unsigned short)(regs->cs & 0xffff);
+ case INAT_SEG_REG_SS:
+ return (unsigned short)(regs->ss & 0xffff);
+ case INAT_SEG_REG_DS:
+ return (unsigned short)(regs->ds & 0xffff);
+ case INAT_SEG_REG_ES:
+ return (unsigned short)(regs->es & 0xffff);
+ case INAT_SEG_REG_FS:
+ return (unsigned short)(regs->fs & 0xffff);
+ case INAT_SEG_REG_GS:
+ /*
+ * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS.
+ * The macro below takes care of both cases.
+ */
+ return get_user_gs(regs);
+ case INAT_SEG_REG_IGNORE:
+ /* fall through */
+ default:
+ return -EINVAL;
+ }
+#endif /* CONFIG_X86_64 */
+}
+
+static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
+ enum reg_type type)
+{
+ int regno = 0;
+
+ static const int regoff[] = {
+ offsetof(struct pt_regs, ax),
+ offsetof(struct pt_regs, cx),
+ offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, bx),
+ offsetof(struct pt_regs, sp),
+ offsetof(struct pt_regs, bp),
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, di),
+#ifdef CONFIG_X86_64
+ offsetof(struct pt_regs, r8),
+ offsetof(struct pt_regs, r9),
+ offsetof(struct pt_regs, r10),
+ offsetof(struct pt_regs, r11),
+ offsetof(struct pt_regs, r12),
+ offsetof(struct pt_regs, r13),
+ offsetof(struct pt_regs, r14),
+ offsetof(struct pt_regs, r15),
+#endif
+ };
+ int nr_registers = ARRAY_SIZE(regoff);
+ /*
+ * Don't possibly decode a 32-bit instructions as
+ * reading a 64-bit-only register.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
+ nr_registers -= 8;
+
+ switch (type) {
+ case REG_TYPE_RM:
+ regno = X86_MODRM_RM(insn->modrm.value);
+
+ /*
+ * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement
+ * follows the ModRM byte.
+ */
+ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5)
+ return -EDOM;
+
+ if (X86_REX_B(insn->rex_prefix.value))
+ regno += 8;
+ break;
+
+ case REG_TYPE_INDEX:
+ regno = X86_SIB_INDEX(insn->sib.value);
+ if (X86_REX_X(insn->rex_prefix.value))
+ regno += 8;
+
+ /*
+ * If ModRM.mod != 3 and SIB.index = 4 the scale*index
+ * portion of the address computation is null. This is
+ * true only if REX.X is 0. In such a case, the SIB index
+ * is used in the address computation.
+ */
+ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4)
+ return -EDOM;
+ break;
+
+ case REG_TYPE_BASE:
+ regno = X86_SIB_BASE(insn->sib.value);
+ /*
+ * If ModRM.mod is 0 and SIB.base == 5, the base of the
+ * register-indirect addressing is 0. In this case, a
+ * 32-bit displacement follows the SIB byte.
+ */
+ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5)
+ return -EDOM;
+
+ if (X86_REX_B(insn->rex_prefix.value))
+ regno += 8;
+ break;
+
+ default:
+ pr_err_ratelimited("invalid register type: %d\n", type);
+ return -EINVAL;
+ }
+
+ if (regno >= nr_registers) {
+ WARN_ONCE(1, "decoded an instruction with an invalid register");
+ return -EINVAL;
+ }
+ return regoff[regno];
+}
+
+/**
+ * get_reg_offset_16() - Obtain offset of register indicated by instruction
+ * @insn: Instruction containing ModRM byte
+ * @regs: Register values as seen when entering kernel mode
+ * @offs1: Offset of the first operand register
+ * @offs2: Offset of the second opeand register, if applicable
+ *
+ * Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte
+ * in @insn. This function is to be used with 16-bit address encodings. The
+ * @offs1 and @offs2 will be written with the offset of the two registers
+ * indicated by the instruction. In cases where any of the registers is not
+ * referenced by the instruction, the value will be set to -EDOM.
+ *
+ * Returns:
+ *
+ * 0 on success, -EINVAL on error.
+ */
+static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
+ int *offs1, int *offs2)
+{
+ /*
+ * 16-bit addressing can use one or two registers. Specifics of
+ * encodings are given in Table 2-1. "16-Bit Addressing Forms with the
+ * ModR/M Byte" of the Intel Software Development Manual.
+ */
+ static const int regoff1[] = {
+ offsetof(struct pt_regs, bx),
+ offsetof(struct pt_regs, bx),
+ offsetof(struct pt_regs, bp),
+ offsetof(struct pt_regs, bp),
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, di),
+ offsetof(struct pt_regs, bp),
+ offsetof(struct pt_regs, bx),
+ };
+
+ static const int regoff2[] = {
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, di),
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, di),
+ -EDOM,
+ -EDOM,
+ -EDOM,
+ -EDOM,
+ };
+
+ if (!offs1 || !offs2)
+ return -EINVAL;
+
+ /* Operand is a register, use the generic function. */
+ if (X86_MODRM_MOD(insn->modrm.value) == 3) {
+ *offs1 = insn_get_modrm_rm_off(insn, regs);
+ *offs2 = -EDOM;
+ return 0;
+ }
+
+ *offs1 = regoff1[X86_MODRM_RM(insn->modrm.value)];
+ *offs2 = regoff2[X86_MODRM_RM(insn->modrm.value)];
+
+ /*
+ * If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement-
+ * only addressing. This means that no registers are involved in
+ * computing the effective address. Thus, ensure that the first
+ * register offset is invalild. The second register offset is already
+ * invalid under the aforementioned conditions.
+ */
+ if ((X86_MODRM_MOD(insn->modrm.value) == 0) &&
+ (X86_MODRM_RM(insn->modrm.value) == 6))
+ *offs1 = -EDOM;
+
+ return 0;
+}
+
+/**
+ * get_desc() - Obtain pointer to a segment descriptor
+ * @sel: Segment selector
+ *
+ * Given a segment selector, obtain a pointer to the segment descriptor.
+ * Both global and local descriptor tables are supported.
+ *
+ * Returns:
+ *
+ * Pointer to segment descriptor on success.
+ *
+ * NULL on error.
+ */
+static struct desc_struct *get_desc(unsigned short sel)
+{
+ struct desc_ptr gdt_desc = {0, 0};
+ unsigned long desc_base;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ struct desc_struct *desc = NULL;
+ struct ldt_struct *ldt;
+
+ /* Bits [15:3] contain the index of the desired entry. */
+ sel >>= 3;
+
+ mutex_lock(&current->active_mm->context.lock);
+ ldt = current->active_mm->context.ldt;
+ if (ldt && sel < ldt->nr_entries)
+ desc = &ldt->entries[sel];
+
+ mutex_unlock(&current->active_mm->context.lock);
+
+ return desc;
+ }
+#endif
+ native_store_gdt(&gdt_desc);
+
+ /*
+ * Segment descriptors have a size of 8 bytes. Thus, the index is
+ * multiplied by 8 to obtain the memory offset of the desired descriptor
+ * from the base of the GDT. As bits [15:3] of the segment selector
+ * contain the index, it can be regarded as multiplied by 8 already.
+ * All that remains is to clear bits [2:0].
+ */
+ desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
+
+ if (desc_base > gdt_desc.size)
+ return NULL;
+
+ return (struct desc_struct *)(gdt_desc.address + desc_base);
+}
+
+/**
+ * insn_get_seg_base() - Obtain base address of segment descriptor.
+ * @regs: Register values as seen when entering kernel mode
+ * @seg_reg_idx: Index of the segment register pointing to seg descriptor
+ *
+ * Obtain the base address of the segment as indicated by the segment descriptor
+ * pointed by the segment selector. The segment selector is obtained from the
+ * input segment register index @seg_reg_idx.
+ *
+ * Returns:
+ *
+ * In protected mode, base address of the segment. Zero in long mode,
+ * except when FS or GS are used. In virtual-8086 mode, the segment
+ * selector shifted 4 bits to the right.
+ *
+ * -1L in case of error.
+ */
+unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
+{
+ struct desc_struct *desc;
+ short sel;
+
+ sel = get_segment_selector(regs, seg_reg_idx);
+ if (sel < 0)
+ return -1L;
+
+ if (v8086_mode(regs))
+ /*
+ * Base is simply the segment selector shifted 4
+ * bits to the right.
+ */
+ return (unsigned long)(sel << 4);
+
+ if (user_64bit_mode(regs)) {
+ /*
+ * Only FS or GS will have a base address, the rest of
+ * the segments' bases are forced to 0.
+ */
+ unsigned long base;
+
+ if (seg_reg_idx == INAT_SEG_REG_FS)
+ rdmsrl(MSR_FS_BASE, base);
+ else if (seg_reg_idx == INAT_SEG_REG_GS)
+ /*
+ * swapgs was called at the kernel entry point. Thus,
+ * MSR_KERNEL_GS_BASE will have the user-space GS base.
+ */
+ rdmsrl(MSR_KERNEL_GS_BASE, base);
+ else
+ base = 0;
+ return base;
+ }
+
+ /* In protected mode the segment selector cannot be null. */
+ if (!sel)
+ return -1L;
+
+ desc = get_desc(sel);
+ if (!desc)
+ return -1L;
+
+ return get_desc_base(desc);
+}
+
+/**
+ * get_seg_limit() - Obtain the limit of a segment descriptor
+ * @regs: Register values as seen when entering kernel mode
+ * @seg_reg_idx: Index of the segment register pointing to seg descriptor
+ *
+ * Obtain the limit of the segment as indicated by the segment descriptor
+ * pointed by the segment selector. The segment selector is obtained from the
+ * input segment register index @seg_reg_idx.
+ *
+ * Returns:
+ *
+ * In protected mode, the limit of the segment descriptor in bytes.
+ * In long mode and virtual-8086 mode, segment limits are not enforced. Thus,
+ * limit is returned as -1L to imply a limit-less segment.
+ *
+ * Zero is returned on error.
+ */
+static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
+{
+ struct desc_struct *desc;
+ unsigned long limit;
+ short sel;
+
+ sel = get_segment_selector(regs, seg_reg_idx);
+ if (sel < 0)
+ return 0;
+
+ if (user_64bit_mode(regs) || v8086_mode(regs))
+ return -1L;
+
+ if (!sel)
+ return 0;
+
+ desc = get_desc(sel);
+ if (!desc)
+ return 0;
+
+ /*
+ * If the granularity bit is set, the limit is given in multiples
+ * of 4096. This also means that the 12 least significant bits are
+ * not tested when checking the segment limits. In practice,
+ * this means that the segment ends in (limit << 12) + 0xfff.
+ */
+ limit = get_desc_limit(desc);
+ if (desc->g)
+ limit = (limit << 12) + 0xfff;
+
+ return limit;
+}
+
+/**
+ * insn_get_code_seg_params() - Obtain code segment parameters
+ * @regs: Structure with register values as seen when entering kernel mode
+ *
+ * Obtain address and operand sizes of the code segment. It is obtained from the
+ * selector contained in the CS register in regs. In protected mode, the default
+ * address is determined by inspecting the L and D bits of the segment
+ * descriptor. In virtual-8086 mode, the default is always two bytes for both
+ * address and operand sizes.
+ *
+ * Returns:
+ *
+ * A signed 8-bit value containing the default parameters on success.
+ *
+ * -EINVAL on error.
+ */
+char insn_get_code_seg_params(struct pt_regs *regs)
+{
+ struct desc_struct *desc;
+ short sel;
+
+ if (v8086_mode(regs))
+ /* Address and operand size are both 16-bit. */
+ return INSN_CODE_SEG_PARAMS(2, 2);
+
+ sel = get_segment_selector(regs, INAT_SEG_REG_CS);
+ if (sel < 0)
+ return sel;
+
+ desc = get_desc(sel);
+ if (!desc)
+ return -EINVAL;
+
+ /*
+ * The most significant byte of the Type field of the segment descriptor
+ * determines whether a segment contains data or code. If this is a data
+ * segment, return error.
+ */
+ if (!(desc->type & BIT(3)))
+ return -EINVAL;
+
+ switch ((desc->l << 1) | desc->d) {
+ case 0: /*
+ * Legacy mode. CS.L=0, CS.D=0. Address and operand size are
+ * both 16-bit.
+ */
+ return INSN_CODE_SEG_PARAMS(2, 2);
+ case 1: /*
+ * Legacy mode. CS.L=0, CS.D=1. Address and operand size are
+ * both 32-bit.
+ */
+ return INSN_CODE_SEG_PARAMS(4, 4);
+ case 2: /*
+ * IA-32e 64-bit mode. CS.L=1, CS.D=0. Address size is 64-bit;
+ * operand size is 32-bit.
+ */
+ return INSN_CODE_SEG_PARAMS(4, 8);
+ case 3: /* Invalid setting. CS.L=1, CS.D=1 */
+ /* fall through */
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * insn_get_modrm_rm_off() - Obtain register in r/m part of the ModRM byte
+ * @insn: Instruction containing the ModRM byte
+ * @regs: Register values as seen when entering kernel mode
+ *
+ * Returns:
+ *
+ * The register indicated by the r/m part of the ModRM byte. The
+ * register is obtained as an offset from the base of pt_regs. In specific
+ * cases, the returned value can be -EDOM to indicate that the particular value
+ * of ModRM does not refer to a register and shall be ignored.
+ */
+int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs)
+{
+ return get_reg_offset(insn, regs, REG_TYPE_RM);
+}
+
+/**
+ * get_seg_base_limit() - obtain base address and limit of a segment
+ * @insn: Instruction. Must be valid.
+ * @regs: Register values as seen when entering kernel mode
+ * @regoff: Operand offset, in pt_regs, used to resolve segment descriptor
+ * @base: Obtained segment base
+ * @limit: Obtained segment limit
+ *
+ * Obtain the base address and limit of the segment associated with the operand
+ * @regoff and, if any or allowed, override prefixes in @insn. This function is
+ * different from insn_get_seg_base() as the latter does not resolve the segment
+ * associated with the instruction operand. If a limit is not needed (e.g.,
+ * when running in long mode), @limit can be NULL.
+ *
+ * Returns:
+ *
+ * 0 on success. @base and @limit will contain the base address and of the
+ * resolved segment, respectively.
+ *
+ * -EINVAL on error.
+ */
+static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs,
+ int regoff, unsigned long *base,
+ unsigned long *limit)
+{
+ int seg_reg_idx;
+
+ if (!base)
+ return -EINVAL;
+
+ seg_reg_idx = resolve_seg_reg(insn, regs, regoff);
+ if (seg_reg_idx < 0)
+ return seg_reg_idx;
+
+ *base = insn_get_seg_base(regs, seg_reg_idx);
+ if (*base == -1L)
+ return -EINVAL;
+
+ if (!limit)
+ return 0;
+
+ *limit = get_seg_limit(regs, seg_reg_idx);
+ if (!(*limit))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * get_eff_addr_reg() - Obtain effective address from register operand
+ * @insn: Instruction. Must be valid.
+ * @regs: Register values as seen when entering kernel mode
+ * @regoff: Obtained operand offset, in pt_regs, with the effective address
+ * @eff_addr: Obtained effective address
+ *
+ * Obtain the effective address stored in the register operand as indicated by
+ * the ModRM byte. This function is to be used only with register addressing
+ * (i.e., ModRM.mod is 3). The effective address is saved in @eff_addr. The
+ * register operand, as an offset from the base of pt_regs, is saved in @regoff;
+ * such offset can then be used to resolve the segment associated with the
+ * operand. This function can be used with any of the supported address sizes
+ * in x86.
+ *
+ * Returns:
+ *
+ * 0 on success. @eff_addr will have the effective address stored in the
+ * operand indicated by ModRM. @regoff will have such operand as an offset from
+ * the base of pt_regs.
+ *
+ * -EINVAL on error.
+ */
+static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs,
+ int *regoff, long *eff_addr)
+{
+ insn_get_modrm(insn);
+
+ if (!insn->modrm.nbytes)
+ return -EINVAL;
+
+ if (X86_MODRM_MOD(insn->modrm.value) != 3)
+ return -EINVAL;
+
+ *regoff = get_reg_offset(insn, regs, REG_TYPE_RM);
+ if (*regoff < 0)
+ return -EINVAL;
+
+ /* Ignore bytes that are outside the address size. */
+ if (insn->addr_bytes == 2)
+ *eff_addr = regs_get_register(regs, *regoff) & 0xffff;
+ else if (insn->addr_bytes == 4)
+ *eff_addr = regs_get_register(regs, *regoff) & 0xffffffff;
+ else /* 64-bit address */
+ *eff_addr = regs_get_register(regs, *regoff);
+
+ return 0;
+}
+
+/**
+ * get_eff_addr_modrm() - Obtain referenced effective address via ModRM
+ * @insn: Instruction. Must be valid.
+ * @regs: Register values as seen when entering kernel mode
+ * @regoff: Obtained operand offset, in pt_regs, associated with segment
+ * @eff_addr: Obtained effective address
+ *
+ * Obtain the effective address referenced by the ModRM byte of @insn. After
+ * identifying the registers involved in the register-indirect memory reference,
+ * its value is obtained from the operands in @regs. The computed address is
+ * stored @eff_addr. Also, the register operand that indicates the associated
+ * segment is stored in @regoff, this parameter can later be used to determine
+ * such segment.
+ *
+ * Returns:
+ *
+ * 0 on success. @eff_addr will have the referenced effective address. @regoff
+ * will have a register, as an offset from the base of pt_regs, that can be used
+ * to resolve the associated segment.
+ *
+ * -EINVAL on error.
+ */
+static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs,
+ int *regoff, long *eff_addr)
+{
+ long tmp;
+
+ if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
+ return -EINVAL;
+
+ insn_get_modrm(insn);
+
+ if (!insn->modrm.nbytes)
+ return -EINVAL;
+
+ if (X86_MODRM_MOD(insn->modrm.value) > 2)
+ return -EINVAL;
+
+ *regoff = get_reg_offset(insn, regs, REG_TYPE_RM);
+
+ /*
+ * -EDOM means that we must ignore the address_offset. In such a case,
+ * in 64-bit mode the effective address relative to the rIP of the
+ * following instruction.
+ */
+ if (*regoff == -EDOM) {
+ if (user_64bit_mode(regs))
+ tmp = regs->ip + insn->length;
+ else
+ tmp = 0;
+ } else if (*regoff < 0) {
+ return -EINVAL;
+ } else {
+ tmp = regs_get_register(regs, *regoff);
+ }
+
+ if (insn->addr_bytes == 4) {
+ int addr32 = (int)(tmp & 0xffffffff) + insn->displacement.value;
+
+ *eff_addr = addr32 & 0xffffffff;
+ } else {
+ *eff_addr = tmp + insn->displacement.value;
+ }
+
+ return 0;
+}
+
+/**
+ * get_eff_addr_modrm_16() - Obtain referenced effective address via ModRM
+ * @insn: Instruction. Must be valid.
+ * @regs: Register values as seen when entering kernel mode
+ * @regoff: Obtained operand offset, in pt_regs, associated with segment
+ * @eff_addr: Obtained effective address
+ *
+ * Obtain the 16-bit effective address referenced by the ModRM byte of @insn.
+ * After identifying the registers involved in the register-indirect memory
+ * reference, its value is obtained from the operands in @regs. The computed
+ * address is stored @eff_addr. Also, the register operand that indicates
+ * the associated segment is stored in @regoff, this parameter can later be used
+ * to determine such segment.
+ *
+ * Returns:
+ *
+ * 0 on success. @eff_addr will have the referenced effective address. @regoff
+ * will have a register, as an offset from the base of pt_regs, that can be used
+ * to resolve the associated segment.
+ *
+ * -EINVAL on error.
+ */
+static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
+ int *regoff, short *eff_addr)
+{
+ int addr_offset1, addr_offset2, ret;
+ short addr1 = 0, addr2 = 0, displacement;
+
+ if (insn->addr_bytes != 2)
+ return -EINVAL;
+
+ insn_get_modrm(insn);
+
+ if (!insn->modrm.nbytes)
+ return -EINVAL;
+
+ if (X86_MODRM_MOD(insn->modrm.value) > 2)
+ return -EINVAL;
+
+ ret = get_reg_offset_16(insn, regs, &addr_offset1, &addr_offset2);
+ if (ret < 0)
+ return -EINVAL;
+
+ /*
+ * Don't fail on invalid offset values. They might be invalid because
+ * they cannot be used for this particular value of ModRM. Instead, use
+ * them in the computation only if they contain a valid value.
+ */
+ if (addr_offset1 != -EDOM)
+ addr1 = regs_get_register(regs, addr_offset1) & 0xffff;
+
+ if (addr_offset2 != -EDOM)
+ addr2 = regs_get_register(regs, addr_offset2) & 0xffff;
+
+ displacement = insn->displacement.value & 0xffff;
+ *eff_addr = addr1 + addr2 + displacement;
+
+ /*
+ * The first operand register could indicate to use of either SS or DS
+ * registers to obtain the segment selector. The second operand
+ * register can only indicate the use of DS. Thus, the first operand
+ * will be used to obtain the segment selector.
+ */
+ *regoff = addr_offset1;
+
+ return 0;
+}
+
+/**
+ * get_eff_addr_sib() - Obtain referenced effective address via SIB
+ * @insn: Instruction. Must be valid.
+ * @regs: Register values as seen when entering kernel mode
+ * @regoff: Obtained operand offset, in pt_regs, associated with segment
+ * @eff_addr: Obtained effective address
+ *
+ * Obtain the effective address referenced by the SIB byte of @insn. After
+ * identifying the registers involved in the indexed, register-indirect memory
+ * reference, its value is obtained from the operands in @regs. The computed
+ * address is stored @eff_addr. Also, the register operand that indicates the
+ * associated segment is stored in @regoff, this parameter can later be used to
+ * determine such segment.
+ *
+ * Returns:
+ *
+ * 0 on success. @eff_addr will have the referenced effective address.
+ * @base_offset will have a register, as an offset from the base of pt_regs,
+ * that can be used to resolve the associated segment.
+ *
+ * -EINVAL on error.
+ */
+static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
+ int *base_offset, long *eff_addr)
+{
+ long base, indx;
+ int indx_offset;
+
+ if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
+ return -EINVAL;
+
+ insn_get_modrm(insn);
+
+ if (!insn->modrm.nbytes)
+ return -EINVAL;
+
+ if (X86_MODRM_MOD(insn->modrm.value) > 2)
+ return -EINVAL;
+
+ insn_get_sib(insn);
+
+ if (!insn->sib.nbytes)
+ return -EINVAL;
+
+ *base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
+ indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
+
+ /*
+ * Negative values in the base and index offset means an error when
+ * decoding the SIB byte. Except -EDOM, which means that the registers
+ * should not be used in the address computation.
+ */
+ if (*base_offset == -EDOM)
+ base = 0;
+ else if (*base_offset < 0)
+ return -EINVAL;
+ else
+ base = regs_get_register(regs, *base_offset);
+
+ if (indx_offset == -EDOM)
+ indx = 0;
+ else if (indx_offset < 0)
+ return -EINVAL;
+ else
+ indx = regs_get_register(regs, indx_offset);
+
+ if (insn->addr_bytes == 4) {
+ int addr32, base32, idx32;
+
+ base32 = base & 0xffffffff;
+ idx32 = indx & 0xffffffff;
+
+ addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value));
+ addr32 += insn->displacement.value;
+
+ *eff_addr = addr32 & 0xffffffff;
+ } else {
+ *eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value));
+ *eff_addr += insn->displacement.value;
+ }
+
+ return 0;
+}
+
+/**
+ * get_addr_ref_16() - Obtain the 16-bit address referred by instruction
+ * @insn: Instruction containing ModRM byte and displacement
+ * @regs: Register values as seen when entering kernel mode
+ *
+ * This function is to be used with 16-bit address encodings. Obtain the memory
+ * address referred by the instruction's ModRM and displacement bytes. Also, the
+ * segment used as base is determined by either any segment override prefixes in
+ * @insn or the default segment of the registers involved in the address
+ * computation. In protected mode, segment limits are enforced.
+ *
+ * Returns:
+ *
+ * Linear address referenced by the instruction operands on success.
+ *
+ * -1L on error.
+ */
+static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs)
+{
+ unsigned long linear_addr = -1L, seg_base, seg_limit;
+ int ret, regoff;
+ short eff_addr;
+ long tmp;
+
+ insn_get_modrm(insn);
+ insn_get_displacement(insn);
+
+ if (insn->addr_bytes != 2)
+ goto out;
+
+ if (X86_MODRM_MOD(insn->modrm.value) == 3) {
+ ret = get_eff_addr_reg(insn, regs, &regoff, &tmp);
+ if (ret)
+ goto out;
+
+ eff_addr = tmp;
+ } else {
+ ret = get_eff_addr_modrm_16(insn, regs, &regoff, &eff_addr);
+ if (ret)
+ goto out;
+ }
+
+ ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit);
+ if (ret)
+ goto out;
+
+ /*
+ * Before computing the linear address, make sure the effective address
+ * is within the limits of the segment. In virtual-8086 mode, segment
+ * limits are not enforced. In such a case, the segment limit is -1L to
+ * reflect this fact.
+ */
+ if ((unsigned long)(eff_addr & 0xffff) > seg_limit)
+ goto out;
+
+ linear_addr = (unsigned long)(eff_addr & 0xffff) + seg_base;
+
+ /* Limit linear address to 20 bits */
+ if (v8086_mode(regs))
+ linear_addr &= 0xfffff;
+
+out:
+ return (void __user *)linear_addr;
+}
+
+/**
+ * get_addr_ref_32() - Obtain a 32-bit linear address
+ * @insn: Instruction with ModRM, SIB bytes and displacement
+ * @regs: Register values as seen when entering kernel mode
+ *
+ * This function is to be used with 32-bit address encodings to obtain the
+ * linear memory address referred by the instruction's ModRM, SIB,
+ * displacement bytes and segment base address, as applicable. If in protected
+ * mode, segment limits are enforced.
+ *
+ * Returns:
+ *
+ * Linear address referenced by instruction and registers on success.
+ *
+ * -1L on error.
+ */
+static void __user *get_addr_ref_32(struct insn *insn, struct pt_regs *regs)
+{
+ unsigned long linear_addr = -1L, seg_base, seg_limit;
+ int eff_addr, regoff;
+ long tmp;
+ int ret;
+
+ if (insn->addr_bytes != 4)
+ goto out;
+
+ if (X86_MODRM_MOD(insn->modrm.value) == 3) {
+ ret = get_eff_addr_reg(insn, regs, &regoff, &tmp);
+ if (ret)
+ goto out;
+
+ eff_addr = tmp;
+
+ } else {
+ if (insn->sib.nbytes) {
+ ret = get_eff_addr_sib(insn, regs, &regoff, &tmp);
+ if (ret)
+ goto out;
+
+ eff_addr = tmp;
+ } else {
+ ret = get_eff_addr_modrm(insn, regs, &regoff, &tmp);
+ if (ret)
+ goto out;
+
+ eff_addr = tmp;
+ }
+ }
+
+ ret = get_seg_base_limit(insn, regs, regoff, &seg_base, &seg_limit);
+ if (ret)
+ goto out;
+
+ /*
+ * In protected mode, before computing the linear address, make sure
+ * the effective address is within the limits of the segment.
+ * 32-bit addresses can be used in long and virtual-8086 modes if an
+ * address override prefix is used. In such cases, segment limits are
+ * not enforced. When in virtual-8086 mode, the segment limit is -1L
+ * to reflect this situation.
+ *
+ * After computed, the effective address is treated as an unsigned
+ * quantity.
+ */
+ if (!user_64bit_mode(regs) && ((unsigned int)eff_addr > seg_limit))
+ goto out;
+
+ /*
+ * Even though 32-bit address encodings are allowed in virtual-8086
+ * mode, the address range is still limited to [0x-0xffff].
+ */
+ if (v8086_mode(regs) && (eff_addr & ~0xffff))
+ goto out;
+
+ /*
+ * Data type long could be 64 bits in size. Ensure that our 32-bit
+ * effective address is not sign-extended when computing the linear
+ * address.
+ */
+ linear_addr = (unsigned long)(eff_addr & 0xffffffff) + seg_base;
+
+ /* Limit linear address to 20 bits */
+ if (v8086_mode(regs))
+ linear_addr &= 0xfffff;
+
+out:
+ return (void __user *)linear_addr;
+}
+
+/**
+ * get_addr_ref_64() - Obtain a 64-bit linear address
+ * @insn: Instruction struct with ModRM and SIB bytes and displacement
+ * @regs: Structure with register values as seen when entering kernel mode
+ *
+ * This function is to be used with 64-bit address encodings to obtain the
+ * linear memory address referred by the instruction's ModRM, SIB,
+ * displacement bytes and segment base address, as applicable.
+ *
+ * Returns:
+ *
+ * Linear address referenced by instruction and registers on success.
+ *
+ * -1L on error.
+ */
+#ifndef CONFIG_X86_64
+static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs)
+{
+ return (void __user *)-1L;
+}
+#else
+static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs)
+{
+ unsigned long linear_addr = -1L, seg_base;
+ int regoff, ret;
+ long eff_addr;
+
+ if (insn->addr_bytes != 8)
+ goto out;
+
+ if (X86_MODRM_MOD(insn->modrm.value) == 3) {
+ ret = get_eff_addr_reg(insn, regs, &regoff, &eff_addr);
+ if (ret)
+ goto out;
+
+ } else {
+ if (insn->sib.nbytes) {
+ ret = get_eff_addr_sib(insn, regs, &regoff, &eff_addr);
+ if (ret)
+ goto out;
+ } else {
+ ret = get_eff_addr_modrm(insn, regs, &regoff, &eff_addr);
+ if (ret)
+ goto out;
+ }
+
+ }
+
+ ret = get_seg_base_limit(insn, regs, regoff, &seg_base, NULL);
+ if (ret)
+ goto out;
+
+ linear_addr = (unsigned long)eff_addr + seg_base;
+
+out:
+ return (void __user *)linear_addr;
+}
+#endif /* CONFIG_X86_64 */
+
+/**
+ * insn_get_addr_ref() - Obtain the linear address referred by instruction
+ * @insn: Instruction structure containing ModRM byte and displacement
+ * @regs: Structure with register values as seen when entering kernel mode
+ *
+ * Obtain the linear address referred by the instruction's ModRM, SIB and
+ * displacement bytes, and segment base, as applicable. In protected mode,
+ * segment limits are enforced.
+ *
+ * Returns:
+ *
+ * Linear address referenced by instruction and registers on success.
+ *
+ * -1L on error.
+ */
+void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
+{
+ if (!insn || !regs)
+ return (void __user *)-1L;
+
+ switch (insn->addr_bytes) {
+ case 2:
+ return get_addr_ref_16(insn, regs);
+ case 4:
+ return get_addr_ref_32(insn, regs);
+ case 8:
+ return get_addr_ref_64(insn, regs);
+ default:
+ return (void __user *)-1L;
+ }
+}
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index bf2c6074efd2..dc2ab6ea6768 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -98,6 +98,18 @@ ENTRY(call_rwsem_down_read_failed)
ret
ENDPROC(call_rwsem_down_read_failed)
+ENTRY(call_rwsem_down_read_failed_killable)
+ FRAME_BEGIN
+ save_common_regs
+ __ASM_SIZE(push,) %__ASM_REG(dx)
+ movq %rax,%rdi
+ call rwsem_down_read_failed_killable
+ __ASM_SIZE(pop,) %__ASM_REG(dx)
+ restore_common_regs
+ FRAME_END
+ ret
+ENDPROC(call_rwsem_down_read_failed_killable)
+
ENTRY(call_rwsem_down_write_failed)
FRAME_BEGIN
save_common_regs
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 7ba7f3d7f477..8e13b8cc6bed 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -29,8 +29,6 @@ obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o
obj-$(CONFIG_HIGHMEM) += highmem_32.o
-obj-$(CONFIG_KMEMCHECK) += kmemcheck/
-
KASAN_SANITIZE_kasan_init_$(BITS).o := n
obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index c3521e2be396..3321b446b66c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -67,12 +67,17 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
* wrapped around) will be set. Additionally, seeing the refcount
* reach 0 will set ZF (Zero Flag: result was zero). In each of
* these cases we want a report, since it's a boundary condition.
- *
+ * The SF case is not reported since it indicates post-boundary
+ * manipulations below zero or above INT_MAX. And if none of the
+ * flags are set, something has gone very wrong, so report it.
*/
if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
bool zero = regs->flags & X86_EFLAGS_ZF;
refcount_error_report(regs, zero ? "hit zero" : "overflow");
+ } else if ((regs->flags & X86_EFLAGS_SF) == 0) {
+ /* Report if none of OF, ZF, nor SF are set. */
+ refcount_error_report(regs, "unexpected saturation");
}
return true;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b0ff378650a9..78ca9a8ee454 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -20,7 +20,6 @@
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
-#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
#include <asm/fixmap.h> /* VSYSCALL_ADDR */
#include <asm/vsyscall.h> /* emulate_vsyscall */
#include <asm/vm86.h> /* struct vm86 */
@@ -30,26 +29,6 @@
#include <asm/trace/exceptions.h>
/*
- * Page fault error code bits:
- *
- * bit 0 == 0: no page found 1: protection fault
- * bit 1 == 0: read access 1: write access
- * bit 2 == 0: kernel-mode access 1: user-mode access
- * bit 3 == 1: use of reserved bit detected
- * bit 4 == 1: fault was an instruction fetch
- * bit 5 == 1: protection keys block access
- */
-enum x86_pf_error_code {
-
- PF_PROT = 1 << 0,
- PF_WRITE = 1 << 1,
- PF_USER = 1 << 2,
- PF_RSVD = 1 << 3,
- PF_INSTR = 1 << 4,
- PF_PK = 1 << 5,
-};
-
-/*
* Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace:
*/
@@ -150,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* If it was a exec (instruction fetch) fault on NX page, then
* do not ignore the fault:
*/
- if (error_code & PF_INSTR)
+ if (error_code & X86_PF_INSTR)
return 0;
instr = (void *)convert_ip_to_linear(current, regs);
@@ -180,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* siginfo so userspace can discover which protection key was set
* on the PTE.
*
- * If we get here, we know that the hardware signaled a PF_PK
+ * If we get here, we know that the hardware signaled a X86_PF_PK
* fault and that there was a VMA once we got in the fault
* handler. It does *not* guarantee that the VMA we find here
* was the one that we faulted on.
@@ -205,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
/*
* force_sig_info_fault() is called from a number of
* contexts, some of which have a VMA and some of which
- * do not. The PF_PK handing happens after we have a
+ * do not. The X86_PF_PK handing happens after we have a
* valid VMA, so we should never reach this without a
* valid VMA.
*/
@@ -698,7 +677,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (!oops_may_print())
return;
- if (error_code & PF_INSTR) {
+ if (error_code & X86_PF_INSTR) {
unsigned int level;
pgd_t *pgd;
pte_t *pte;
@@ -780,7 +759,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
*/
if (current->thread.sig_on_uaccess_err && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
- tsk->thread.error_code = error_code | PF_USER;
+ tsk->thread.error_code = error_code | X86_PF_USER;
tsk->thread.cr2 = address;
/* XXX: hwpoison faults will set the wrong code. */
@@ -898,7 +877,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current;
/* User mode accesses just cause a SIGSEGV */
- if (error_code & PF_USER) {
+ if (error_code & X86_PF_USER) {
/*
* It's possible to have interrupts off here:
*/
@@ -919,7 +898,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
* Instruction fetch faults in the vsyscall page might need
* emulation.
*/
- if (unlikely((error_code & PF_INSTR) &&
+ if (unlikely((error_code & X86_PF_INSTR) &&
((address & ~0xfff) == VSYSCALL_ADDR))) {
if (emulate_vsyscall(regs, address))
return;
@@ -932,7 +911,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
* are always protection faults.
*/
if (address >= TASK_SIZE_MAX)
- error_code |= PF_PROT;
+ error_code |= X86_PF_PROT;
if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);
@@ -993,11 +972,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return false;
- if (error_code & PF_PK)
+ if (error_code & X86_PF_PK)
return true;
/* this checks permission keys on the VMA: */
- if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
- (error_code & PF_INSTR), foreign))
+ if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
+ (error_code & X86_PF_INSTR), foreign))
return true;
return false;
}
@@ -1025,7 +1004,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
int code = BUS_ADRERR;
/* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
+ if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
return;
}
@@ -1053,14 +1032,14 @@ static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, u32 *pkey, unsigned int fault)
{
- if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+ if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, 0, 0);
return;
}
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
+ if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR);
return;
@@ -1085,16 +1064,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
- if ((error_code & PF_WRITE) && !pte_write(*pte))
+ if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
return 0;
- if ((error_code & PF_INSTR) && !pte_exec(*pte))
+ if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
return 0;
/*
* Note: We do not do lazy flushing on protection key
- * changes, so no spurious fault will ever set PF_PK.
+ * changes, so no spurious fault will ever set X86_PF_PK.
*/
- if ((error_code & PF_PK))
+ if ((error_code & X86_PF_PK))
return 1;
return 1;
@@ -1140,8 +1119,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
* change, so user accesses are not expected to cause spurious
* faults.
*/
- if (error_code != (PF_WRITE | PF_PROT)
- && error_code != (PF_INSTR | PF_PROT))
+ if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
+ error_code != (X86_PF_INSTR | X86_PF_PROT))
return 0;
pgd = init_mm.pgd + pgd_index(address);
@@ -1201,19 +1180,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
* always an unconditional error and can never result in
* a follow-up action to resolve the fault, like a COW.
*/
- if (error_code & PF_PK)
+ if (error_code & X86_PF_PK)
return 1;
/*
* Make sure to check the VMA so that we do not perform
- * faults just to hit a PF_PK as soon as we fill in a
+ * faults just to hit a X86_PF_PK as soon as we fill in a
* page.
*/
- if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
- (error_code & PF_INSTR), foreign))
+ if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
+ (error_code & X86_PF_INSTR), foreign))
return 1;
- if (error_code & PF_WRITE) {
+ if (error_code & X86_PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
@@ -1221,7 +1200,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
}
/* read, present: */
- if (unlikely(error_code & PF_PROT))
+ if (unlikely(error_code & X86_PF_PROT))
return 1;
/* read, not present: */
@@ -1244,7 +1223,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
if (!static_cpu_has(X86_FEATURE_SMAP))
return false;
- if (error_code & PF_USER)
+ if (error_code & X86_PF_USER)
return false;
if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
@@ -1276,8 +1255,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
- if (kmemcheck_active(regs))
- kmemcheck_hide(regs);
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
@@ -1297,12 +1274,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
- if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
+ if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
-
- if (kmemcheck_fault(regs, address, error_code))
- return;
}
/* Can handle a stale RO->RW TLB: */
@@ -1325,7 +1299,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (unlikely(kprobes_fault(regs)))
return;
- if (unlikely(error_code & PF_RSVD))
+ if (unlikely(error_code & X86_PF_RSVD))
pgtable_bad(regs, error_code, address);
if (unlikely(smap_violation(error_code, regs))) {
@@ -1351,7 +1325,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/
if (user_mode(regs)) {
local_irq_enable();
- error_code |= PF_USER;
+ error_code |= X86_PF_USER;
flags |= FAULT_FLAG_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
@@ -1360,9 +1334,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (error_code & PF_WRITE)
+ if (error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE;
- if (error_code & PF_INSTR)
+ if (error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION;
/*
@@ -1382,7 +1356,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
- if ((error_code & PF_USER) == 0 &&
+ if (!(error_code & X86_PF_USER) &&
!search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address, NULL);
return;
@@ -1409,7 +1383,7 @@ retry:
bad_area(regs, error_code, address);
return;
}
- if (error_code & PF_USER) {
+ if (error_code & X86_PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index af5c1ed21d43..6fdf91ef130a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -92,8 +92,7 @@ __ref void *alloc_low_pages(unsigned int num)
unsigned int order;
order = get_order((unsigned long)num << PAGE_SHIFT);
- return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
- __GFP_ZERO, order);
+ return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
}
if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
@@ -164,12 +163,11 @@ static int page_size_mask;
static void __init probe_page_size_mask(void)
{
/*
- * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
- * use small pages.
+ * For pagealloc debugging, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
- if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
+ if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
page_size_mask |= 1 << PG_LEVEL_2M;
else
direct_gbpages = 0;
@@ -671,7 +669,7 @@ void __init init_mem_mapping(void)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
- hypervisor_init_mem_mapping();
+ x86_init.hyper.init_mem_mapping();
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 048fbe8fc274..4a837289f2ad 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -184,7 +184,7 @@ static __ref void *spp_getpage(void)
void *ptr;
if (after_bootmem)
- ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
+ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
else
ptr = alloc_bootmem_pages(PAGE_SIZE);
@@ -1173,12 +1173,18 @@ void __init mem_init(void)
/* clear_bss() already clear the empty_zero_page */
- register_page_bootmem_info();
-
/* this will put all memory onto the freelists */
free_all_bootmem();
after_bootmem = 1;
+ /*
+ * Must be done after boot memory is put on freelist, because here we
+ * might set fields in deferred struct pages that have not yet been
+ * initialized, and free_all_bootmem() initializes all the reserved
+ * deferred pages for us.
+ */
+ register_page_bootmem_info();
+
/* Register memory areas for /proc/kcore */
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
PAGE_SIZE, KCORE_OTHER);
@@ -1399,7 +1405,6 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
vmemmap_verify((pte_t *)pmd, node, addr, next);
continue;
}
- pr_warn_once("vmemmap: falling back to regular page backing\n");
if (vmemmap_populate_basepages(addr, next, node))
return -ENOMEM;
}
@@ -1426,16 +1431,16 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr,
- struct page *start_page, unsigned long size)
+ struct page *start_page, unsigned long nr_pages)
{
unsigned long addr = (unsigned long)start_page;
- unsigned long end = (unsigned long)(start_page + size);
+ unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long next;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
- unsigned int nr_pages;
+ unsigned int nr_pmd_pages;
struct page *page;
for (; addr < end; addr = next) {
@@ -1482,9 +1487,9 @@ void register_page_bootmem_memmap(unsigned long section_nr,
if (pmd_none(*pmd))
continue;
- nr_pages = 1 << (get_order(PMD_SIZE));
+ nr_pmd_pages = 1 << get_order(PMD_SIZE);
page = pmd_page(*pmd);
- while (nr_pages--)
+ while (nr_pmd_pages--)
get_page_bootmem(section_nr, page++,
SECTION_INFO);
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 34f0e1847dd6..6e4573b1da34 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -27,6 +27,11 @@
#include "physaddr.h"
+struct ioremap_mem_flags {
+ bool system_ram;
+ bool desc_other;
+};
+
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts.
@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
return err;
}
-static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
- void *arg)
+static bool __ioremap_check_ram(struct resource *res)
{
+ unsigned long start_pfn, stop_pfn;
unsigned long i;
- for (i = 0; i < nr_pages; ++i)
- if (pfn_valid(start_pfn + i) &&
- !PageReserved(pfn_to_page(start_pfn + i)))
- return 1;
+ if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
+ return false;
- return 0;
+ start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ stop_pfn = (res->end + 1) >> PAGE_SHIFT;
+ if (stop_pfn > start_pfn) {
+ for (i = 0; i < (stop_pfn - start_pfn); ++i)
+ if (pfn_valid(start_pfn + i) &&
+ !PageReserved(pfn_to_page(start_pfn + i)))
+ return true;
+ }
+
+ return false;
+}
+
+static int __ioremap_check_desc_other(struct resource *res)
+{
+ return (res->desc != IORES_DESC_NONE);
+}
+
+static int __ioremap_res_check(struct resource *res, void *arg)
+{
+ struct ioremap_mem_flags *flags = arg;
+
+ if (!flags->system_ram)
+ flags->system_ram = __ioremap_check_ram(res);
+
+ if (!flags->desc_other)
+ flags->desc_other = __ioremap_check_desc_other(res);
+
+ return flags->system_ram && flags->desc_other;
+}
+
+/*
+ * To avoid multiple resource walks, this function walks resources marked as
+ * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
+ * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
+ */
+static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
+ struct ioremap_mem_flags *flags)
+{
+ u64 start, end;
+
+ start = (u64)addr;
+ end = start + size - 1;
+ memset(flags, 0, sizeof(*flags));
+
+ walk_mem_res(start, end, flags, __ioremap_res_check);
}
/*
@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
unsigned long size, enum page_cache_mode pcm, void *caller)
{
unsigned long offset, vaddr;
- resource_size_t pfn, last_pfn, last_addr;
+ resource_size_t last_addr;
const resource_size_t unaligned_phys_addr = phys_addr;
const unsigned long unaligned_size = size;
+ struct ioremap_mem_flags mem_flags;
struct vm_struct *area;
enum page_cache_mode new_pcm;
pgprot_t prot;
@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
return NULL;
}
+ __ioremap_check_mem(phys_addr, size, &mem_flags);
+
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- pfn = phys_addr >> PAGE_SHIFT;
- last_pfn = last_addr >> PAGE_SHIFT;
- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
- __ioremap_check_ram) == 1) {
+ if (mem_flags.system_ram) {
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
&phys_addr, &last_addr);
return NULL;
@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
pcm = new_pcm;
}
+ /*
+ * If the page being mapped is in memory and SEV is active then
+ * make sure the memory encryption attribute is enabled in the
+ * resulting mapping.
+ */
prot = PAGE_KERNEL_IO;
+ if (sev_active() && mem_flags.desc_other)
+ prot = pgprot_encrypted(prot);
+
switch (pcm) {
case _PAGE_CACHE_MODE_UC:
default:
@@ -422,6 +477,9 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
* areas should be mapped decrypted. And since the encryption key can
* change across reboots, persistent memory should also be mapped
* decrypted.
+ *
+ * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
+ * only persistent memory should be mapped decrypted.
*/
static bool memremap_should_map_decrypted(resource_size_t phys_addr,
unsigned long size)
@@ -458,6 +516,11 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
case E820_TYPE_ACPI:
case E820_TYPE_NVS:
case E820_TYPE_UNUSABLE:
+ /* For SEV, these areas are encrypted */
+ if (sev_active())
+ break;
+ /* Fallthrough */
+
case E820_TYPE_PRAM:
return true;
default:
@@ -581,7 +644,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
unsigned long flags)
{
- if (!sme_active())
+ if (!mem_encrypt_active())
return true;
if (flags & MEMREMAP_ENC)
@@ -590,12 +653,13 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
if (flags & MEMREMAP_DEC)
return false;
- if (memremap_is_setup_data(phys_addr, size) ||
- memremap_is_efi_data(phys_addr, size) ||
- memremap_should_map_decrypted(phys_addr, size))
- return false;
+ if (sme_active()) {
+ if (memremap_is_setup_data(phys_addr, size) ||
+ memremap_is_efi_data(phys_addr, size))
+ return false;
+ }
- return true;
+ return !memremap_should_map_decrypted(phys_addr, size);
}
/*
@@ -608,17 +672,24 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
unsigned long size,
pgprot_t prot)
{
- if (!sme_active())
+ bool encrypted_prot;
+
+ if (!mem_encrypt_active())
return prot;
- if (early_memremap_is_setup_data(phys_addr, size) ||
- memremap_is_efi_data(phys_addr, size) ||
- memremap_should_map_decrypted(phys_addr, size))
- prot = pgprot_decrypted(prot);
- else
- prot = pgprot_encrypted(prot);
+ encrypted_prot = true;
+
+ if (sme_active()) {
+ if (early_memremap_is_setup_data(phys_addr, size) ||
+ memremap_is_efi_data(phys_addr, size))
+ encrypted_prot = false;
+ }
+
+ if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
+ encrypted_prot = false;
- return prot;
+ return encrypted_prot ? pgprot_encrypted(prot)
+ : pgprot_decrypted(prot);
}
bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8f5be3eb40dd..99dfed6dfef8 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -4,19 +4,150 @@
#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kdebug.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/vmalloc.h>
#include <asm/e820/types.h>
+#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
extern struct range pfn_mapped[E820_MAX_ENTRIES];
-static int __init map_range(struct range *range)
+static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
+
+static __init void *early_alloc(size_t size, int nid)
+{
+ return memblock_virt_alloc_try_nid_nopanic(size, size,
+ __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
+}
+
+static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, int nid)
+{
+ pte_t *pte;
+
+ if (pmd_none(*pmd)) {
+ void *p;
+
+ if (boot_cpu_has(X86_FEATURE_PSE) &&
+ ((end - addr) == PMD_SIZE) &&
+ IS_ALIGNED(addr, PMD_SIZE)) {
+ p = early_alloc(PMD_SIZE, nid);
+ if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
+ return;
+ else if (p)
+ memblock_free(__pa(p), PMD_SIZE);
+ }
+
+ p = early_alloc(PAGE_SIZE, nid);
+ pmd_populate_kernel(&init_mm, pmd, p);
+ }
+
+ pte = pte_offset_kernel(pmd, addr);
+ do {
+ pte_t entry;
+ void *p;
+
+ if (!pte_none(*pte))
+ continue;
+
+ p = early_alloc(PAGE_SIZE, nid);
+ entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
+ set_pte_at(&init_mm, addr, pte, entry);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
+ unsigned long end, int nid)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ if (pud_none(*pud)) {
+ void *p;
+
+ if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
+ ((end - addr) == PUD_SIZE) &&
+ IS_ALIGNED(addr, PUD_SIZE)) {
+ p = early_alloc(PUD_SIZE, nid);
+ if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
+ return;
+ else if (p)
+ memblock_free(__pa(p), PUD_SIZE);
+ }
+
+ p = early_alloc(PAGE_SIZE, nid);
+ pud_populate(&init_mm, pud, p);
+ }
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (!pmd_large(*pmd))
+ kasan_populate_pmd(pmd, addr, next, nid);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
+ unsigned long end, int nid)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ if (p4d_none(*p4d)) {
+ void *p = early_alloc(PAGE_SIZE, nid);
+
+ p4d_populate(&init_mm, p4d, p);
+ }
+
+ pud = pud_offset(p4d, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (!pud_large(*pud))
+ kasan_populate_pud(pud, addr, next, nid);
+ } while (pud++, addr = next, addr != end);
+}
+
+static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
+ unsigned long end, int nid)
+{
+ void *p;
+ p4d_t *p4d;
+ unsigned long next;
+
+ if (pgd_none(*pgd)) {
+ p = early_alloc(PAGE_SIZE, nid);
+ pgd_populate(&init_mm, pgd, p);
+ }
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ kasan_populate_p4d(p4d, addr, next, nid);
+ } while (p4d++, addr = next, addr != end);
+}
+
+static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
+ int nid)
+{
+ pgd_t *pgd;
+ unsigned long next;
+
+ addr = addr & PAGE_MASK;
+ end = round_up(end, PAGE_SIZE);
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ kasan_populate_pgd(pgd, addr, next, nid);
+ } while (pgd++, addr = next, addr != end);
+}
+
+static void __init map_range(struct range *range)
{
unsigned long start;
unsigned long end;
@@ -24,15 +155,17 @@ static int __init map_range(struct range *range)
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
- return vmemmap_populate(start, end, NUMA_NO_NODE);
+ kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
}
static void __init clear_pgds(unsigned long start,
unsigned long end)
{
pgd_t *pgd;
+ /* See comment in kasan_init() */
+ unsigned long pgd_end = end & PGDIR_MASK;
- for (; start < end; start += PGDIR_SIZE) {
+ for (; start < pgd_end; start += PGDIR_SIZE) {
pgd = pgd_offset_k(start);
/*
* With folded p4d, pgd_clear() is nop, use p4d_clear()
@@ -43,29 +176,61 @@ static void __init clear_pgds(unsigned long start,
else
pgd_clear(pgd);
}
+
+ pgd = pgd_offset_k(start);
+ for (; start < end; start += P4D_SIZE)
+ p4d_clear(p4d_offset(pgd, start));
+}
+
+static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
+{
+ unsigned long p4d;
+
+ if (!IS_ENABLED(CONFIG_X86_5LEVEL))
+ return (p4d_t *)pgd;
+
+ p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
+ p4d += __START_KERNEL_map - phys_base;
+ return (p4d_t *)p4d + p4d_index(addr);
+}
+
+static void __init kasan_early_p4d_populate(pgd_t *pgd,
+ unsigned long addr,
+ unsigned long end)
+{
+ pgd_t pgd_entry;
+ p4d_t *p4d, p4d_entry;
+ unsigned long next;
+
+ if (pgd_none(*pgd)) {
+ pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
+ set_pgd(pgd, pgd_entry);
+ }
+
+ p4d = early_p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (!p4d_none(*p4d))
+ continue;
+
+ p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
+ set_p4d(p4d, p4d_entry);
+ } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
}
static void __init kasan_map_early_shadow(pgd_t *pgd)
{
- int i;
- unsigned long start = KASAN_SHADOW_START;
+ /* See comment in kasan_init() */
+ unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
unsigned long end = KASAN_SHADOW_END;
+ unsigned long next;
- for (i = pgd_index(start); start < end; i++) {
- switch (CONFIG_PGTABLE_LEVELS) {
- case 4:
- pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
- _KERNPG_TABLE);
- break;
- case 5:
- pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
- _KERNPG_TABLE);
- break;
- default:
- BUILD_BUG();
- }
- start += PGDIR_SIZE;
- }
+ pgd += pgd_index(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ kasan_early_p4d_populate(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
}
#ifdef CONFIG_KASAN_INLINE
@@ -102,7 +267,7 @@ void __init kasan_early_init(void)
for (i = 0; i < PTRS_PER_PUD; i++)
kasan_zero_pud[i] = __pud(pud_val);
- for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
+ for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
kasan_zero_p4d[i] = __p4d(p4d_val);
kasan_map_early_shadow(early_top_pgt);
@@ -118,28 +283,51 @@ void __init kasan_init(void)
#endif
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
+
+ /*
+ * We use the same shadow offset for 4- and 5-level paging to
+ * facilitate boot-time switching between paging modes.
+ * As result in 5-level paging mode KASAN_SHADOW_START and
+ * KASAN_SHADOW_END are not aligned to PGD boundary.
+ *
+ * KASAN_SHADOW_START doesn't share PGD with anything else.
+ * We claim whole PGD entry to make things easier.
+ *
+ * KASAN_SHADOW_END lands in the last PGD entry and it collides with
+ * bunch of things like kernel code, modules, EFI mapping, etc.
+ * We need to take extra steps to not overwrite them.
+ */
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ void *ptr;
+
+ ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
+ memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
+ set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
+ __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
+ }
+
load_cr3(early_top_pgt);
__flush_tlb_all();
- clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+ clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
- kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
+ kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
kasan_mem_to_shadow((void *)PAGE_OFFSET));
for (i = 0; i < E820_MAX_ENTRIES; i++) {
if (pfn_mapped[i].end == 0)
break;
- if (map_range(&pfn_mapped[i]))
- panic("kasan: unable to allocate shadow!");
+ map_range(&pfn_mapped[i]);
}
+
kasan_populate_zero_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
kasan_mem_to_shadow((void *)__START_KERNEL_map));
- vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
- (unsigned long)kasan_mem_to_shadow(_end),
- NUMA_NO_NODE);
+ kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
+ (unsigned long)kasan_mem_to_shadow(_end),
+ early_pfn_to_nid(__pa(_stext)));
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);
diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
deleted file mode 100644
index 520b3bce4095..000000000000
--- a/arch/x86/mm/kmemcheck/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
index 872ec4159a68..cec594032515 100644
--- a/arch/x86/mm/kmemcheck/error.c
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -1,228 +1 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/interrupt.h>
-#include <linux/kdebug.h>
-#include <linux/kmemcheck.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/stacktrace.h>
-#include <linux/string.h>
-
-#include "error.h"
-#include "shadow.h"
-
-enum kmemcheck_error_type {
- KMEMCHECK_ERROR_INVALID_ACCESS,
- KMEMCHECK_ERROR_BUG,
-};
-
-#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
-
-struct kmemcheck_error {
- enum kmemcheck_error_type type;
-
- union {
- /* KMEMCHECK_ERROR_INVALID_ACCESS */
- struct {
- /* Kind of access that caused the error */
- enum kmemcheck_shadow state;
- /* Address and size of the erroneous read */
- unsigned long address;
- unsigned int size;
- };
- };
-
- struct pt_regs regs;
- struct stack_trace trace;
- unsigned long trace_entries[32];
-
- /* We compress it to a char. */
- unsigned char shadow_copy[SHADOW_COPY_SIZE];
- unsigned char memory_copy[SHADOW_COPY_SIZE];
-};
-
-/*
- * Create a ring queue of errors to output. We can't call printk() directly
- * from the kmemcheck traps, since this may call the console drivers and
- * result in a recursive fault.
- */
-static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
-static unsigned int error_count;
-static unsigned int error_rd;
-static unsigned int error_wr;
-static unsigned int error_missed_count;
-
-static struct kmemcheck_error *error_next_wr(void)
-{
- struct kmemcheck_error *e;
-
- if (error_count == ARRAY_SIZE(error_fifo)) {
- ++error_missed_count;
- return NULL;
- }
-
- e = &error_fifo[error_wr];
- if (++error_wr == ARRAY_SIZE(error_fifo))
- error_wr = 0;
- ++error_count;
- return e;
-}
-
-static struct kmemcheck_error *error_next_rd(void)
-{
- struct kmemcheck_error *e;
-
- if (error_count == 0)
- return NULL;
-
- e = &error_fifo[error_rd];
- if (++error_rd == ARRAY_SIZE(error_fifo))
- error_rd = 0;
- --error_count;
- return e;
-}
-
-void kmemcheck_error_recall(void)
-{
- static const char *desc[] = {
- [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated",
- [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized",
- [KMEMCHECK_SHADOW_INITIALIZED] = "initialized",
- [KMEMCHECK_SHADOW_FREED] = "freed",
- };
-
- static const char short_desc[] = {
- [KMEMCHECK_SHADOW_UNALLOCATED] = 'a',
- [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u',
- [KMEMCHECK_SHADOW_INITIALIZED] = 'i',
- [KMEMCHECK_SHADOW_FREED] = 'f',
- };
-
- struct kmemcheck_error *e;
- unsigned int i;
-
- e = error_next_rd();
- if (!e)
- return;
-
- switch (e->type) {
- case KMEMCHECK_ERROR_INVALID_ACCESS:
- printk(KERN_WARNING "WARNING: kmemcheck: Caught %d-bit read from %s memory (%p)\n",
- 8 * e->size, e->state < ARRAY_SIZE(desc) ?
- desc[e->state] : "(invalid shadow state)",
- (void *) e->address);
-
- printk(KERN_WARNING);
- for (i = 0; i < SHADOW_COPY_SIZE; ++i)
- printk(KERN_CONT "%02x", e->memory_copy[i]);
- printk(KERN_CONT "\n");
-
- printk(KERN_WARNING);
- for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
- if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
- printk(KERN_CONT " %c", short_desc[e->shadow_copy[i]]);
- else
- printk(KERN_CONT " ?");
- }
- printk(KERN_CONT "\n");
- printk(KERN_WARNING "%*c\n", 2 + 2
- * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
- break;
- case KMEMCHECK_ERROR_BUG:
- printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
- break;
- }
-
- __show_regs(&e->regs, 1);
- print_stack_trace(&e->trace, 0);
-}
-
-static void do_wakeup(unsigned long data)
-{
- while (error_count > 0)
- kmemcheck_error_recall();
-
- if (error_missed_count > 0) {
- printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
- "the queue was too small\n", error_missed_count);
- error_missed_count = 0;
- }
-}
-
-static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
-
-/*
- * Save the context of an error report.
- */
-void kmemcheck_error_save(enum kmemcheck_shadow state,
- unsigned long address, unsigned int size, struct pt_regs *regs)
-{
- static unsigned long prev_ip;
-
- struct kmemcheck_error *e;
- void *shadow_copy;
- void *memory_copy;
-
- /* Don't report several adjacent errors from the same EIP. */
- if (regs->ip == prev_ip)
- return;
- prev_ip = regs->ip;
-
- e = error_next_wr();
- if (!e)
- return;
-
- e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
-
- e->state = state;
- e->address = address;
- e->size = size;
-
- /* Save regs */
- memcpy(&e->regs, regs, sizeof(*regs));
-
- /* Save stack trace */
- e->trace.nr_entries = 0;
- e->trace.entries = e->trace_entries;
- e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
- e->trace.skip = 0;
- save_stack_trace_regs(regs, &e->trace);
-
- /* Round address down to nearest 16 bytes */
- shadow_copy = kmemcheck_shadow_lookup(address
- & ~(SHADOW_COPY_SIZE - 1));
- BUG_ON(!shadow_copy);
-
- memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
-
- kmemcheck_show_addr(address);
- memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
- memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
- kmemcheck_hide_addr(address);
-
- tasklet_hi_schedule_first(&kmemcheck_tasklet);
-}
-
-/*
- * Save the context of a kmemcheck bug.
- */
-void kmemcheck_error_save_bug(struct pt_regs *regs)
-{
- struct kmemcheck_error *e;
-
- e = error_next_wr();
- if (!e)
- return;
-
- e->type = KMEMCHECK_ERROR_BUG;
-
- memcpy(&e->regs, regs, sizeof(*regs));
-
- e->trace.nr_entries = 0;
- e->trace.entries = e->trace_entries;
- e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
- e->trace.skip = 1;
- save_stack_trace(&e->trace);
-
- tasklet_hi_schedule_first(&kmemcheck_tasklet);
-}
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
index 39f80d7a874d..ea32a7d3cf1b 100644
--- a/arch/x86/mm/kmemcheck/error.h
+++ b/arch/x86/mm/kmemcheck/error.h
@@ -1,16 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
-#define ARCH__X86__MM__KMEMCHECK__ERROR_H
-
-#include <linux/ptrace.h>
-
-#include "shadow.h"
-
-void kmemcheck_error_save(enum kmemcheck_shadow state,
- unsigned long address, unsigned int size, struct pt_regs *regs);
-
-void kmemcheck_error_save_bug(struct pt_regs *regs);
-
-void kmemcheck_error_recall(void);
-
-#endif
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
deleted file mode 100644
index 4515bae36bbe..000000000000
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/**
- * kmemcheck - a heavyweight memory checker for the linux kernel
- * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no>
- * (With a lot of help from Ingo Molnar and Pekka Enberg.)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2) as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kallsyms.h>
-#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
-#include <linux/mm.h>
-#include <linux/page-flags.h>
-#include <linux/percpu.h>
-#include <linux/ptrace.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/cacheflush.h>
-#include <asm/kmemcheck.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-
-#include "error.h"
-#include "opcode.h"
-#include "pte.h"
-#include "selftest.h"
-#include "shadow.h"
-
-
-#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
-# define KMEMCHECK_ENABLED 0
-#endif
-
-#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
-# define KMEMCHECK_ENABLED 1
-#endif
-
-#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
-# define KMEMCHECK_ENABLED 2
-#endif
-
-int kmemcheck_enabled = KMEMCHECK_ENABLED;
-
-int __init kmemcheck_init(void)
-{
-#ifdef CONFIG_SMP
- /*
- * Limit SMP to use a single CPU. We rely on the fact that this code
- * runs before SMP is set up.
- */
- if (setup_max_cpus > 1) {
- printk(KERN_INFO
- "kmemcheck: Limiting number of CPUs to 1.\n");
- setup_max_cpus = 1;
- }
-#endif
-
- if (!kmemcheck_selftest()) {
- printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n");
- kmemcheck_enabled = 0;
- return -EINVAL;
- }
-
- printk(KERN_INFO "kmemcheck: Initialized\n");
- return 0;
-}
-
-early_initcall(kmemcheck_init);
-
-/*
- * We need to parse the kmemcheck= option before any memory is allocated.
- */
-static int __init param_kmemcheck(char *str)
-{
- int val;
- int ret;
-
- if (!str)
- return -EINVAL;
-
- ret = kstrtoint(str, 0, &val);
- if (ret)
- return ret;
- kmemcheck_enabled = val;
- return 0;
-}
-
-early_param("kmemcheck", param_kmemcheck);
-
-int kmemcheck_show_addr(unsigned long address)
-{
- pte_t *pte;
-
- pte = kmemcheck_pte_lookup(address);
- if (!pte)
- return 0;
-
- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
- __flush_tlb_one(address);
- return 1;
-}
-
-int kmemcheck_hide_addr(unsigned long address)
-{
- pte_t *pte;
-
- pte = kmemcheck_pte_lookup(address);
- if (!pte)
- return 0;
-
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
- __flush_tlb_one(address);
- return 1;
-}
-
-struct kmemcheck_context {
- bool busy;
- int balance;
-
- /*
- * There can be at most two memory operands to an instruction, but
- * each address can cross a page boundary -- so we may need up to
- * four addresses that must be hidden/revealed for each fault.
- */
- unsigned long addr[4];
- unsigned long n_addrs;
- unsigned long flags;
-
- /* Data size of the instruction that caused a fault. */
- unsigned int size;
-};
-
-static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
-
-bool kmemcheck_active(struct pt_regs *regs)
-{
- struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
-
- return data->balance > 0;
-}
-
-/* Save an address that needs to be shown/hidden */
-static void kmemcheck_save_addr(unsigned long addr)
-{
- struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
-
- BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
- data->addr[data->n_addrs++] = addr;
-}
-
-static unsigned int kmemcheck_show_all(void)
-{
- struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
- unsigned int i;
- unsigned int n;
-
- n = 0;
- for (i = 0; i < data->n_addrs; ++i)
- n += kmemcheck_show_addr(data->addr[i]);
-
- return n;
-}
-
-static unsigned int kmemcheck_hide_all(void)
-{
- struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
- unsigned int i;
- unsigned int n;
-
- n = 0;
- for (i = 0; i < data->n_addrs; ++i)
- n += kmemcheck_hide_addr(data->addr[i]);
-
- return n;
-}
-
-/*
- * Called from the #PF handler.
- */
-void kmemcheck_show(struct pt_regs *regs)
-{
- struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
-
- BUG_ON(!irqs_disabled());
-
- if (unlikely(data->balance != 0)) {
- kmemcheck_show_all();
- kmemcheck_error_save_bug(regs);
- data->balance = 0;
- return;
- }
-
- /*
- * None of the addresses actually belonged to kmemcheck. Note that
- * this is not an error.
- */
- if (kmemcheck_show_all() == 0)
- return;
-
- ++data->balance;
-
- /*
- * The IF needs to be cleared as well, so that the faulting
- * instruction can run "uninterrupted". Otherwise, we might take
- * an interrupt and start executing that before we've had a chance
- * to hide the page again.
- *
- * NOTE: In the rare case of multiple faults, we must not override
- * the original flags:
- */
- if (!(regs->flags & X86_EFLAGS_TF))
- data->flags = regs->flags;
-
- regs->flags |= X86_EFLAGS_TF;
- regs->flags &= ~X86_EFLAGS_IF;
-}
-
-/*
- * Called from the #DB handler.
- */
-void kmemcheck_hide(struct pt_regs *regs)
-{
- struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
- int n;
-
- BUG_ON(!irqs_disabled());
-
- if (unlikely(data->balance != 1)) {
- kmemcheck_show_all();
- kmemcheck_error_save_bug(regs);
- data->n_addrs = 0;
- data->balance = 0;
-
- if (!(data->flags & X86_EFLAGS_TF))
- regs->flags &= ~X86_EFLAGS_TF;
- if (data->flags & X86_EFLAGS_IF)
- regs->flags |= X86_EFLAGS_IF;
- return;
- }
-
- if (kmemcheck_enabled)
- n = kmemcheck_hide_all();
- else
- n = kmemcheck_show_all();
-
- if (n == 0)
- return;
-
- --data->balance;
-
- data->n_addrs = 0;
-
- if (!(data->flags & X86_EFLAGS_TF))
- regs->flags &= ~X86_EFLAGS_TF;
- if (data->flags & X86_EFLAGS_IF)
- regs->flags |= X86_EFLAGS_IF;
-}
-
-void kmemcheck_show_pages(struct page *p, unsigned int n)
-{
- unsigned int i;
-
- for (i = 0; i < n; ++i) {
- unsigned long address;
- pte_t *pte;
- unsigned int level;
-
- address = (unsigned long) page_address(&p[i]);
- pte = lookup_address(address, &level);
- BUG_ON(!pte);
- BUG_ON(level != PG_LEVEL_4K);
-
- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN));
- __flush_tlb_one(address);
- }
-}
-
-bool kmemcheck_page_is_tracked(struct page *p)
-{
- /* This will also check the "hidden" flag of the PTE. */
- return kmemcheck_pte_lookup((unsigned long) page_address(p));
-}
-
-void kmemcheck_hide_pages(struct page *p, unsigned int n)
-{
- unsigned int i;
-
- for (i = 0; i < n; ++i) {
- unsigned long address;
- pte_t *pte;
- unsigned int level;
-
- address = (unsigned long) page_address(&p[i]);
- pte = lookup_address(address, &level);
- BUG_ON(!pte);
- BUG_ON(level != PG_LEVEL_4K);
-
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
- set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
- __flush_tlb_one(address);
- }
-}
-
-/* Access may NOT cross page boundary */
-static void kmemcheck_read_strict(struct pt_regs *regs,
- unsigned long addr, unsigned int size)
-{
- void *shadow;
- enum kmemcheck_shadow status;
-
- shadow = kmemcheck_shadow_lookup(addr);
- if (!shadow)
- return;
-
- kmemcheck_save_addr(addr);
- status = kmemcheck_shadow_test(shadow, size);
- if (status == KMEMCHECK_SHADOW_INITIALIZED)
- return;
-
- if (kmemcheck_enabled)
- kmemcheck_error_save(status, addr, size, regs);
-
- if (kmemcheck_enabled == 2)
- kmemcheck_enabled = 0;
-
- /* Don't warn about it again. */
- kmemcheck_shadow_set(shadow, size);
-}
-
-bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
-{
- enum kmemcheck_shadow status;
- void *shadow;
-
- shadow = kmemcheck_shadow_lookup(addr);
- if (!shadow)
- return true;
-
- status = kmemcheck_shadow_test_all(shadow, size);
-
- return status == KMEMCHECK_SHADOW_INITIALIZED;
-}
-
-/* Access may cross page boundary */
-static void kmemcheck_read(struct pt_regs *regs,
- unsigned long addr, unsigned int size)
-{
- unsigned long page = addr & PAGE_MASK;
- unsigned long next_addr = addr + size - 1;
- unsigned long next_page = next_addr & PAGE_MASK;
-
- if (likely(page == next_page)) {
- kmemcheck_read_strict(regs, addr, size);
- return;
- }
-
- /*
- * What we do is basically to split the access across the
- * two pages and handle each part separately. Yes, this means
- * that we may now see reads that are 3 + 5 bytes, for
- * example (and if both are uninitialized, there will be two
- * reports), but it makes the code a lot simpler.
- */
- kmemcheck_read_strict(regs, addr, next_page - addr);
- kmemcheck_read_strict(regs, next_page, next_addr - next_page);
-}
-
-static void kmemcheck_write_strict(struct pt_regs *regs,
- unsigned long addr, unsigned int size)
-{
- void *shadow;
-
- shadow = kmemcheck_shadow_lookup(addr);
- if (!shadow)
- return;
-
- kmemcheck_save_addr(addr);
- kmemcheck_shadow_set(shadow, size);
-}
-
-static void kmemcheck_write(struct pt_regs *regs,
- unsigned long addr, unsigned int size)
-{
- unsigned long page = addr & PAGE_MASK;
- unsigned long next_addr = addr + size - 1;
- unsigned long next_page = next_addr & PAGE_MASK;
-
- if (likely(page == next_page)) {
- kmemcheck_write_strict(regs, addr, size);
- return;
- }
-
- /* See comment in kmemcheck_read(). */
- kmemcheck_write_strict(regs, addr, next_page - addr);
- kmemcheck_write_strict(regs, next_page, next_addr - next_page);
-}
-
-/*
- * Copying is hard. We have two addresses, each of which may be split across
- * a page (and each page will have different shadow addresses).
- */
-static void kmemcheck_copy(struct pt_regs *regs,
- unsigned long src_addr, unsigned long dst_addr, unsigned int size)
-{
- uint8_t shadow[8];
- enum kmemcheck_shadow status;
-
- unsigned long page;
- unsigned long next_addr;
- unsigned long next_page;
-
- uint8_t *x;
- unsigned int i;
- unsigned int n;
-
- BUG_ON(size > sizeof(shadow));
-
- page = src_addr & PAGE_MASK;
- next_addr = src_addr + size - 1;
- next_page = next_addr & PAGE_MASK;
-
- if (likely(page == next_page)) {
- /* Same page */
- x = kmemcheck_shadow_lookup(src_addr);
- if (x) {
- kmemcheck_save_addr(src_addr);
- for (i = 0; i < size; ++i)
- shadow[i] = x[i];
- } else {
- for (i = 0; i < size; ++i)
- shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
- }
- } else {
- n = next_page - src_addr;
- BUG_ON(n > sizeof(shadow));
-
- /* First page */
- x = kmemcheck_shadow_lookup(src_addr);
- if (x) {
- kmemcheck_save_addr(src_addr);
- for (i = 0; i < n; ++i)
- shadow[i] = x[i];
- } else {
- /* Not tracked */
- for (i = 0; i < n; ++i)
- shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
- }
-
- /* Second page */
- x = kmemcheck_shadow_lookup(next_page);
- if (x) {
- kmemcheck_save_addr(next_page);
- for (i = n; i < size; ++i)
- shadow[i] = x[i - n];
- } else {
- /* Not tracked */
- for (i = n; i < size; ++i)
- shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
- }
- }
-
- page = dst_addr & PAGE_MASK;
- next_addr = dst_addr + size - 1;
- next_page = next_addr & PAGE_MASK;
-
- if (likely(page == next_page)) {
- /* Same page */
- x = kmemcheck_shadow_lookup(dst_addr);
- if (x) {
- kmemcheck_save_addr(dst_addr);
- for (i = 0; i < size; ++i) {
- x[i] = shadow[i];
- shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
- }
- }
- } else {
- n = next_page - dst_addr;
- BUG_ON(n > sizeof(shadow));
-
- /* First page */
- x = kmemcheck_shadow_lookup(dst_addr);
- if (x) {
- kmemcheck_save_addr(dst_addr);
- for (i = 0; i < n; ++i) {
- x[i] = shadow[i];
- shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
- }
- }
-
- /* Second page */
- x = kmemcheck_shadow_lookup(next_page);
- if (x) {
- kmemcheck_save_addr(next_page);
- for (i = n; i < size; ++i) {
- x[i - n] = shadow[i];
- shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
- }
- }
- }
-
- status = kmemcheck_shadow_test(shadow, size);
- if (status == KMEMCHECK_SHADOW_INITIALIZED)
- return;
-
- if (kmemcheck_enabled)
- kmemcheck_error_save(status, src_addr, size, regs);
-
- if (kmemcheck_enabled == 2)
- kmemcheck_enabled = 0;
-}
-
-enum kmemcheck_method {
- KMEMCHECK_READ,
- KMEMCHECK_WRITE,
-};
-
-static void kmemcheck_access(struct pt_regs *regs,
- unsigned long fallback_address, enum kmemcheck_method fallback_method)
-{
- const uint8_t *insn;
- const uint8_t *insn_primary;
- unsigned int size;
-
- struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
-
- /* Recursive fault -- ouch. */
- if (data->busy) {
- kmemcheck_show_addr(fallback_address);
- kmemcheck_error_save_bug(regs);
- return;
- }
-
- data->busy = true;
-
- insn = (const uint8_t *) regs->ip;
- insn_primary = kmemcheck_opcode_get_primary(insn);
-
- kmemcheck_opcode_decode(insn, &size);
-
- switch (insn_primary[0]) {
-#ifdef CONFIG_KMEMCHECK_BITOPS_OK
- /* AND, OR, XOR */
- /*
- * Unfortunately, these instructions have to be excluded from
- * our regular checking since they access only some (and not
- * all) bits. This clears out "bogus" bitfield-access warnings.
- */
- case 0x80:
- case 0x81:
- case 0x82:
- case 0x83:
- switch ((insn_primary[1] >> 3) & 7) {
- /* OR */
- case 1:
- /* AND */
- case 4:
- /* XOR */
- case 6:
- kmemcheck_write(regs, fallback_address, size);
- goto out;
-
- /* ADD */
- case 0:
- /* ADC */
- case 2:
- /* SBB */
- case 3:
- /* SUB */
- case 5:
- /* CMP */
- case 7:
- break;
- }
- break;
-#endif
-
- /* MOVS, MOVSB, MOVSW, MOVSD */
- case 0xa4:
- case 0xa5:
- /*
- * These instructions are special because they take two
- * addresses, but we only get one page fault.
- */
- kmemcheck_copy(regs, regs->si, regs->di, size);
- goto out;
-
- /* CMPS, CMPSB, CMPSW, CMPSD */
- case 0xa6:
- case 0xa7:
- kmemcheck_read(regs, regs->si, size);
- kmemcheck_read(regs, regs->di, size);
- goto out;
- }
-
- /*
- * If the opcode isn't special in any way, we use the data from the
- * page fault handler to determine the address and type of memory
- * access.
- */
- switch (fallback_method) {
- case KMEMCHECK_READ:
- kmemcheck_read(regs, fallback_address, size);
- goto out;
- case KMEMCHECK_WRITE:
- kmemcheck_write(regs, fallback_address, size);
- goto out;
- }
-
-out:
- data->busy = false;
-}
-
-bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
-{
- pte_t *pte;
-
- /*
- * XXX: Is it safe to assume that memory accesses from virtual 86
- * mode or non-kernel code segments will _never_ access kernel
- * memory (e.g. tracked pages)? For now, we need this to avoid
- * invoking kmemcheck for PnP BIOS calls.
- */
- if (regs->flags & X86_VM_MASK)
- return false;
- if (regs->cs != __KERNEL_CS)
- return false;
-
- pte = kmemcheck_pte_lookup(address);
- if (!pte)
- return false;
-
- WARN_ON_ONCE(in_nmi());
-
- if (error_code & 2)
- kmemcheck_access(regs, address, KMEMCHECK_WRITE);
- else
- kmemcheck_access(regs, address, KMEMCHECK_READ);
-
- kmemcheck_show(regs);
- return true;
-}
-
-bool kmemcheck_trap(struct pt_regs *regs)
-{
- if (!kmemcheck_active(regs))
- return false;
-
- /* We're done. */
- kmemcheck_hide(regs);
- return true;
-}
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
index df8109ddf7fe..cec594032515 100644
--- a/arch/x86/mm/kmemcheck/opcode.c
+++ b/arch/x86/mm/kmemcheck/opcode.c
@@ -1,107 +1 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/types.h>
-
-#include "opcode.h"
-
-static bool opcode_is_prefix(uint8_t b)
-{
- return
- /* Group 1 */
- b == 0xf0 || b == 0xf2 || b == 0xf3
- /* Group 2 */
- || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
- || b == 0x64 || b == 0x65
- /* Group 3 */
- || b == 0x66
- /* Group 4 */
- || b == 0x67;
-}
-
-#ifdef CONFIG_X86_64
-static bool opcode_is_rex_prefix(uint8_t b)
-{
- return (b & 0xf0) == 0x40;
-}
-#else
-static bool opcode_is_rex_prefix(uint8_t b)
-{
- return false;
-}
-#endif
-
-#define REX_W (1 << 3)
-
-/*
- * This is a VERY crude opcode decoder. We only need to find the size of the
- * load/store that caused our #PF and this should work for all the opcodes
- * that we care about. Moreover, the ones who invented this instruction set
- * should be shot.
- */
-void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
-{
- /* Default operand size */
- int operand_size_override = 4;
-
- /* prefixes */
- for (; opcode_is_prefix(*op); ++op) {
- if (*op == 0x66)
- operand_size_override = 2;
- }
-
- /* REX prefix */
- if (opcode_is_rex_prefix(*op)) {
- uint8_t rex = *op;
-
- ++op;
- if (rex & REX_W) {
- switch (*op) {
- case 0x63:
- *size = 4;
- return;
- case 0x0f:
- ++op;
-
- switch (*op) {
- case 0xb6:
- case 0xbe:
- *size = 1;
- return;
- case 0xb7:
- case 0xbf:
- *size = 2;
- return;
- }
-
- break;
- }
-
- *size = 8;
- return;
- }
- }
-
- /* escape opcode */
- if (*op == 0x0f) {
- ++op;
-
- /*
- * This is move with zero-extend and sign-extend, respectively;
- * we don't have to think about 0xb6/0xbe, because this is
- * already handled in the conditional below.
- */
- if (*op == 0xb7 || *op == 0xbf)
- operand_size_override = 2;
- }
-
- *size = (*op & 1) ? operand_size_override : 1;
-}
-
-const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
-{
- /* skip prefixes */
- while (opcode_is_prefix(*op))
- ++op;
- if (opcode_is_rex_prefix(*op))
- ++op;
- return op;
-}
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
index 51a1ce94c24a..ea32a7d3cf1b 100644
--- a/arch/x86/mm/kmemcheck/opcode.h
+++ b/arch/x86/mm/kmemcheck/opcode.h
@@ -1,10 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
-#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
-
-#include <linux/types.h>
-
-void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
-const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
-
-#endif
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
index 8a03be90272a..cec594032515 100644
--- a/arch/x86/mm/kmemcheck/pte.c
+++ b/arch/x86/mm/kmemcheck/pte.c
@@ -1,23 +1 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
-
-#include <asm/pgtable.h>
-
-#include "pte.h"
-
-pte_t *kmemcheck_pte_lookup(unsigned long address)
-{
- pte_t *pte;
- unsigned int level;
-
- pte = lookup_address(address, &level);
- if (!pte)
- return NULL;
- if (level != PG_LEVEL_4K)
- return NULL;
- if (!pte_hidden(*pte))
- return NULL;
-
- return pte;
-}
-
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
index b595612382c2..ea32a7d3cf1b 100644
--- a/arch/x86/mm/kmemcheck/pte.h
+++ b/arch/x86/mm/kmemcheck/pte.h
@@ -1,11 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
-#define ARCH__X86__MM__KMEMCHECK__PTE_H
-
-#include <linux/mm.h>
-
-#include <asm/pgtable.h>
-
-pte_t *kmemcheck_pte_lookup(unsigned long address);
-
-#endif
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
index 7ce0be1f99eb..cec594032515 100644
--- a/arch/x86/mm/kmemcheck/selftest.c
+++ b/arch/x86/mm/kmemcheck/selftest.c
@@ -1,71 +1 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bug.h>
-#include <linux/kernel.h>
-
-#include "opcode.h"
-#include "selftest.h"
-
-struct selftest_opcode {
- unsigned int expected_size;
- const uint8_t *insn;
- const char *desc;
-};
-
-static const struct selftest_opcode selftest_opcodes[] = {
- /* REP MOVS */
- {1, "\xf3\xa4", "rep movsb <mem8>, <mem8>"},
- {4, "\xf3\xa5", "rep movsl <mem32>, <mem32>"},
-
- /* MOVZX / MOVZXD */
- {1, "\x66\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg16>"},
- {1, "\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg32>"},
-
- /* MOVSX / MOVSXD */
- {1, "\x66\x0f\xbe\x51\xf8", "movswq <mem8>, <reg16>"},
- {1, "\x0f\xbe\x51\xf8", "movswq <mem8>, <reg32>"},
-
-#ifdef CONFIG_X86_64
- /* MOVZX / MOVZXD */
- {1, "\x49\x0f\xb6\x51\xf8", "movzbq <mem8>, <reg64>"},
- {2, "\x49\x0f\xb7\x51\xf8", "movzbq <mem16>, <reg64>"},
-
- /* MOVSX / MOVSXD */
- {1, "\x49\x0f\xbe\x51\xf8", "movsbq <mem8>, <reg64>"},
- {2, "\x49\x0f\xbf\x51\xf8", "movsbq <mem16>, <reg64>"},
- {4, "\x49\x63\x51\xf8", "movslq <mem32>, <reg64>"},
-#endif
-};
-
-static bool selftest_opcode_one(const struct selftest_opcode *op)
-{
- unsigned size;
-
- kmemcheck_opcode_decode(op->insn, &size);
-
- if (size == op->expected_size)
- return true;
-
- printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n",
- op->desc, op->expected_size, size);
- return false;
-}
-
-static bool selftest_opcodes_all(void)
-{
- bool pass = true;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i)
- pass = pass && selftest_opcode_one(&selftest_opcodes[i]);
-
- return pass;
-}
-
-bool kmemcheck_selftest(void)
-{
- bool pass = true;
-
- pass = pass && selftest_opcodes_all();
-
- return pass;
-}
diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
index 8d759aae453d..ea32a7d3cf1b 100644
--- a/arch/x86/mm/kmemcheck/selftest.h
+++ b/arch/x86/mm/kmemcheck/selftest.h
@@ -1,7 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H
-#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H
-
-bool kmemcheck_selftest(void);
-
-#endif
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
deleted file mode 100644
index c2638a7d2c10..000000000000
--- a/arch/x86/mm/kmemcheck/shadow.c
+++ /dev/null
@@ -1,173 +0,0 @@
-#include <linux/kmemcheck.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include "pte.h"
-#include "shadow.h"
-
-/*
- * Return the shadow address for the given address. Returns NULL if the
- * address is not tracked.
- *
- * We need to be extremely careful not to follow any invalid pointers,
- * because this function can be called for *any* possible address.
- */
-void *kmemcheck_shadow_lookup(unsigned long address)
-{
- pte_t *pte;
- struct page *page;
-
- if (!virt_addr_valid(address))
- return NULL;
-
- pte = kmemcheck_pte_lookup(address);
- if (!pte)
- return NULL;
-
- page = virt_to_page(address);
- if (!page->shadow)
- return NULL;
- return page->shadow + (address & (PAGE_SIZE - 1));
-}
-
-static void mark_shadow(void *address, unsigned int n,
- enum kmemcheck_shadow status)
-{
- unsigned long addr = (unsigned long) address;
- unsigned long last_addr = addr + n - 1;
- unsigned long page = addr & PAGE_MASK;
- unsigned long last_page = last_addr & PAGE_MASK;
- unsigned int first_n;
- void *shadow;
-
- /* If the memory range crosses a page boundary, stop there. */
- if (page == last_page)
- first_n = n;
- else
- first_n = page + PAGE_SIZE - addr;
-
- shadow = kmemcheck_shadow_lookup(addr);
- if (shadow)
- memset(shadow, status, first_n);
-
- addr += first_n;
- n -= first_n;
-
- /* Do full-page memset()s. */
- while (n >= PAGE_SIZE) {
- shadow = kmemcheck_shadow_lookup(addr);
- if (shadow)
- memset(shadow, status, PAGE_SIZE);
-
- addr += PAGE_SIZE;
- n -= PAGE_SIZE;
- }
-
- /* Do the remaining page, if any. */
- if (n > 0) {
- shadow = kmemcheck_shadow_lookup(addr);
- if (shadow)
- memset(shadow, status, n);
- }
-}
-
-void kmemcheck_mark_unallocated(void *address, unsigned int n)
-{
- mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
-}
-
-void kmemcheck_mark_uninitialized(void *address, unsigned int n)
-{
- mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
-}
-
-/*
- * Fill the shadow memory of the given address such that the memory at that
- * address is marked as being initialized.
- */
-void kmemcheck_mark_initialized(void *address, unsigned int n)
-{
- mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
-}
-EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
-
-void kmemcheck_mark_freed(void *address, unsigned int n)
-{
- mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
-}
-
-void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
-{
- unsigned int i;
-
- for (i = 0; i < n; ++i)
- kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
-}
-
-void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
-{
- unsigned int i;
-
- for (i = 0; i < n; ++i)
- kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
-}
-
-void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
-{
- unsigned int i;
-
- for (i = 0; i < n; ++i)
- kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
-}
-
-enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
-{
-#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
- uint8_t *x;
- unsigned int i;
-
- x = shadow;
-
- /*
- * Make sure _some_ bytes are initialized. Gcc frequently generates
- * code to access neighboring bytes.
- */
- for (i = 0; i < size; ++i) {
- if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
- return x[i];
- }
-
- return x[0];
-#else
- return kmemcheck_shadow_test_all(shadow, size);
-#endif
-}
-
-enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow, unsigned int size)
-{
- uint8_t *x;
- unsigned int i;
-
- x = shadow;
-
- /* All bytes must be initialized. */
- for (i = 0; i < size; ++i) {
- if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
- return x[i];
- }
-
- return x[0];
-}
-
-void kmemcheck_shadow_set(void *shadow, unsigned int size)
-{
- uint8_t *x;
- unsigned int i;
-
- x = shadow;
- for (i = 0; i < size; ++i)
- x[i] = KMEMCHECK_SHADOW_INITIALIZED;
-}
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
index 49768dc18664..ea32a7d3cf1b 100644
--- a/arch/x86/mm/kmemcheck/shadow.h
+++ b/arch/x86/mm/kmemcheck/shadow.h
@@ -1,19 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
-#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
-
-enum kmemcheck_shadow {
- KMEMCHECK_SHADOW_UNALLOCATED,
- KMEMCHECK_SHADOW_UNINITIALIZED,
- KMEMCHECK_SHADOW_INITIALIZED,
- KMEMCHECK_SHADOW_FREED,
-};
-
-void *kmemcheck_shadow_lookup(unsigned long address);
-
-enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
-enum kmemcheck_shadow kmemcheck_shadow_test_all(void *shadow,
- unsigned int size);
-void kmemcheck_shadow_set(void *shadow, unsigned int size);
-
-#endif
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 0286327e65fa..d9a9e9fc75dd 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -30,6 +30,8 @@
#include <asm/msr.h>
#include <asm/cmdline.h>
+#include "mm_internal.h"
+
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
static char sme_cmdline_on[] __initdata = "on";
static char sme_cmdline_off[] __initdata = "off";
@@ -41,6 +43,10 @@ static char sme_cmdline_off[] __initdata = "off";
*/
u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL(sme_me_mask);
+DEFINE_STATIC_KEY_FALSE(sev_enable_key);
+EXPORT_SYMBOL_GPL(sev_enable_key);
+
+static bool sev_enabled __section(.data);
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
@@ -63,7 +69,6 @@ static void __init __sme_early_enc_dec(resource_size_t paddr,
if (!sme_me_mask)
return;
- local_flush_tlb();
wbinvd();
/*
@@ -190,8 +195,238 @@ void __init sme_early_init(void)
/* Update the protection map with memory encryption mask */
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = pgprot_encrypted(protection_map[i]);
+
+ if (sev_active())
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
+static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ unsigned long dma_mask;
+ unsigned int order;
+ struct page *page;
+ void *vaddr = NULL;
+
+ dma_mask = dma_alloc_coherent_mask(dev, gfp);
+ order = get_order(size);
+
+ /*
+ * Memory will be memset to zero after marking decrypted, so don't
+ * bother clearing it before.
+ */
+ gfp &= ~__GFP_ZERO;
+
+ page = alloc_pages_node(dev_to_node(dev), gfp, order);
+ if (page) {
+ dma_addr_t addr;
+
+ /*
+ * Since we will be clearing the encryption bit, check the
+ * mask with it already cleared.
+ */
+ addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
+ if ((addr + size) > dma_mask) {
+ __free_pages(page, get_order(size));
+ } else {
+ vaddr = page_address(page);
+ *dma_handle = addr;
+ }
+ }
+
+ if (!vaddr)
+ vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+
+ if (!vaddr)
+ return NULL;
+
+ /* Clear the SME encryption bit for DMA use if not swiotlb area */
+ if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
+ set_memory_decrypted((unsigned long)vaddr, 1 << order);
+ memset(vaddr, 0, PAGE_SIZE << order);
+ *dma_handle = __sme_clr(*dma_handle);
+ }
+
+ return vaddr;
}
+static void sev_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ /* Set the SME encryption bit for re-use if not swiotlb area */
+ if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
+ set_memory_encrypted((unsigned long)vaddr,
+ 1 << get_order(size));
+
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+{
+ pgprot_t old_prot, new_prot;
+ unsigned long pfn, pa, size;
+ pte_t new_pte;
+
+ switch (level) {
+ case PG_LEVEL_4K:
+ pfn = pte_pfn(*kpte);
+ old_prot = pte_pgprot(*kpte);
+ break;
+ case PG_LEVEL_2M:
+ pfn = pmd_pfn(*(pmd_t *)kpte);
+ old_prot = pmd_pgprot(*(pmd_t *)kpte);
+ break;
+ case PG_LEVEL_1G:
+ pfn = pud_pfn(*(pud_t *)kpte);
+ old_prot = pud_pgprot(*(pud_t *)kpte);
+ break;
+ default:
+ return;
+ }
+
+ new_prot = old_prot;
+ if (enc)
+ pgprot_val(new_prot) |= _PAGE_ENC;
+ else
+ pgprot_val(new_prot) &= ~_PAGE_ENC;
+
+ /* If prot is same then do nothing. */
+ if (pgprot_val(old_prot) == pgprot_val(new_prot))
+ return;
+
+ pa = pfn << page_level_shift(level);
+ size = page_level_size(level);
+
+ /*
+ * We are going to perform in-place en-/decryption and change the
+ * physical page attribute from C=1 to C=0 or vice versa. Flush the
+ * caches to ensure that data gets accessed with the correct C-bit.
+ */
+ clflush_cache_range(__va(pa), size);
+
+ /* Encrypt/decrypt the contents in-place */
+ if (enc)
+ sme_early_encrypt(pa, size);
+ else
+ sme_early_decrypt(pa, size);
+
+ /* Change the page encryption mask. */
+ new_pte = pfn_pte(pfn, new_prot);
+ set_pte_atomic(kpte, new_pte);
+}
+
+static int __init early_set_memory_enc_dec(unsigned long vaddr,
+ unsigned long size, bool enc)
+{
+ unsigned long vaddr_end, vaddr_next;
+ unsigned long psize, pmask;
+ int split_page_size_mask;
+ int level, ret;
+ pte_t *kpte;
+
+ vaddr_next = vaddr;
+ vaddr_end = vaddr + size;
+
+ for (; vaddr < vaddr_end; vaddr = vaddr_next) {
+ kpte = lookup_address(vaddr, &level);
+ if (!kpte || pte_none(*kpte)) {
+ ret = 1;
+ goto out;
+ }
+
+ if (level == PG_LEVEL_4K) {
+ __set_clr_pte_enc(kpte, level, enc);
+ vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
+ continue;
+ }
+
+ psize = page_level_size(level);
+ pmask = page_level_mask(level);
+
+ /*
+ * Check whether we can change the large page in one go.
+ * We request a split when the address is not aligned and
+ * the number of pages to set/clear encryption bit is smaller
+ * than the number of pages in the large page.
+ */
+ if (vaddr == (vaddr & pmask) &&
+ ((vaddr_end - vaddr) >= psize)) {
+ __set_clr_pte_enc(kpte, level, enc);
+ vaddr_next = (vaddr & pmask) + psize;
+ continue;
+ }
+
+ /*
+ * The virtual address is part of a larger page, create the next
+ * level page table mapping (4K or 2M). If it is part of a 2M
+ * page then we request a split of the large page into 4K
+ * chunks. A 1GB large page is split into 2M pages, resp.
+ */
+ if (level == PG_LEVEL_2M)
+ split_page_size_mask = 0;
+ else
+ split_page_size_mask = 1 << PG_LEVEL_2M;
+
+ kernel_physical_mapping_init(__pa(vaddr & pmask),
+ __pa((vaddr_end & pmask) + psize),
+ split_page_size_mask);
+ }
+
+ ret = 0;
+
+out:
+ __flush_tlb_all();
+ return ret;
+}
+
+int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
+{
+ return early_set_memory_enc_dec(vaddr, size, false);
+}
+
+int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
+{
+ return early_set_memory_enc_dec(vaddr, size, true);
+}
+
+/*
+ * SME and SEV are very similar but they are not the same, so there are
+ * times that the kernel will need to distinguish between SME and SEV. The
+ * sme_active() and sev_active() functions are used for this. When a
+ * distinction isn't needed, the mem_encrypt_active() function can be used.
+ *
+ * The trampoline code is a good example for this requirement. Before
+ * paging is activated, SME will access all memory as decrypted, but SEV
+ * will access all memory as encrypted. So, when APs are being brought
+ * up under SME the trampoline area cannot be encrypted, whereas under SEV
+ * the trampoline area must be encrypted.
+ */
+bool sme_active(void)
+{
+ return sme_me_mask && !sev_enabled;
+}
+EXPORT_SYMBOL_GPL(sme_active);
+
+bool sev_active(void)
+{
+ return sme_me_mask && sev_enabled;
+}
+EXPORT_SYMBOL_GPL(sev_active);
+
+static const struct dma_map_ops sev_dma_ops = {
+ .alloc = sev_alloc,
+ .free = sev_free,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
@@ -201,7 +436,23 @@ void __init mem_encrypt_init(void)
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
swiotlb_update_mem_attributes();
- pr_info("AMD Secure Memory Encryption (SME) active\n");
+ /*
+ * With SEV, DMA operations cannot use encryption. New DMA ops
+ * are required in order to mark the DMA areas as decrypted or
+ * to use bounce buffers.
+ */
+ if (sev_active())
+ dma_ops = &sev_dma_ops;
+
+ /*
+ * With SEV, we need to unroll the rep string I/O instructions.
+ */
+ if (sev_active())
+ static_branch_enable(&sev_enable_key);
+
+ pr_info("AMD %s active\n",
+ sev_active() ? "Secure Encrypted Virtualization (SEV)"
+ : "Secure Memory Encryption (SME)");
}
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
@@ -529,37 +780,63 @@ void __init __nostackprotector sme_enable(struct boot_params *bp)
{
const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
unsigned int eax, ebx, ecx, edx;
+ unsigned long feature_mask;
bool active_by_default;
unsigned long me_mask;
char buffer[16];
u64 msr;
- /* Check for the SME support leaf */
+ /* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;
+#define AMD_SME_BIT BIT(0)
+#define AMD_SEV_BIT BIT(1)
/*
- * Check for the SME feature:
- * CPUID Fn8000_001F[EAX] - Bit 0
- * Secure Memory Encryption support
- * CPUID Fn8000_001F[EBX] - Bits 5:0
- * Pagetable bit position used to indicate encryption
+ * Set the feature mask (SME or SEV) based on whether we are
+ * running under a hypervisor.
+ */
+ eax = 1;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
+
+ /*
+ * Check for the SME/SEV feature:
+ * CPUID Fn8000_001F[EAX]
+ * - Bit 0 - Secure Memory Encryption support
+ * - Bit 1 - Secure Encrypted Virtualization support
+ * CPUID Fn8000_001F[EBX]
+ * - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
- if (!(eax & 1))
+ if (!(eax & feature_mask))
return;
me_mask = 1UL << (ebx & 0x3f);
- /* Check if SME is enabled */
- msr = __rdmsr(MSR_K8_SYSCFG);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ /* Check if memory encryption is enabled */
+ if (feature_mask == AMD_SME_BIT) {
+ /* For SME, check the SYSCFG MSR */
+ msr = __rdmsr(MSR_K8_SYSCFG);
+ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ return;
+ } else {
+ /* For SEV, check the SEV MSR */
+ msr = __rdmsr(MSR_AMD64_SEV);
+ if (!(msr & MSR_AMD64_SEV_ENABLED))
+ return;
+
+ /* SEV state cannot be controlled by a command line option */
+ sme_me_mask = me_mask;
+ sev_enabled = true;
return;
+ }
/*
* Fixups have not been applied to phys_base yet and we're running
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7eb06701a935..e500949bae24 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -13,6 +13,7 @@
#include <linux/sched/sysctl.h>
#include <asm/insn.h>
+#include <asm/insn-eval.h>
#include <asm/mman.h>
#include <asm/mmu_context.h>
#include <asm/mpx.h>
@@ -61,123 +62,6 @@ static unsigned long mpx_mmap(unsigned long len)
return addr;
}
-enum reg_type {
- REG_TYPE_RM = 0,
- REG_TYPE_INDEX,
- REG_TYPE_BASE,
-};
-
-static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
- enum reg_type type)
-{
- int regno = 0;
-
- static const int regoff[] = {
- offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
- offsetof(struct pt_regs, dx),
- offsetof(struct pt_regs, bx),
- offsetof(struct pt_regs, sp),
- offsetof(struct pt_regs, bp),
- offsetof(struct pt_regs, si),
- offsetof(struct pt_regs, di),
-#ifdef CONFIG_X86_64
- offsetof(struct pt_regs, r8),
- offsetof(struct pt_regs, r9),
- offsetof(struct pt_regs, r10),
- offsetof(struct pt_regs, r11),
- offsetof(struct pt_regs, r12),
- offsetof(struct pt_regs, r13),
- offsetof(struct pt_regs, r14),
- offsetof(struct pt_regs, r15),
-#endif
- };
- int nr_registers = ARRAY_SIZE(regoff);
- /*
- * Don't possibly decode a 32-bit instructions as
- * reading a 64-bit-only register.
- */
- if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
- nr_registers -= 8;
-
- switch (type) {
- case REG_TYPE_RM:
- regno = X86_MODRM_RM(insn->modrm.value);
- if (X86_REX_B(insn->rex_prefix.value))
- regno += 8;
- break;
-
- case REG_TYPE_INDEX:
- regno = X86_SIB_INDEX(insn->sib.value);
- if (X86_REX_X(insn->rex_prefix.value))
- regno += 8;
- break;
-
- case REG_TYPE_BASE:
- regno = X86_SIB_BASE(insn->sib.value);
- if (X86_REX_B(insn->rex_prefix.value))
- regno += 8;
- break;
-
- default:
- pr_err("invalid register type");
- BUG();
- break;
- }
-
- if (regno >= nr_registers) {
- WARN_ONCE(1, "decoded an instruction with an invalid register");
- return -EINVAL;
- }
- return regoff[regno];
-}
-
-/*
- * return the address being referenced be instruction
- * for rm=3 returning the content of the rm reg
- * for rm!=3 calculates the address using SIB and Disp
- */
-static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
-{
- unsigned long addr, base, indx;
- int addr_offset, base_offset, indx_offset;
- insn_byte_t sib;
-
- insn_get_modrm(insn);
- insn_get_sib(insn);
- sib = insn->sib.value;
-
- if (X86_MODRM_MOD(insn->modrm.value) == 3) {
- addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
- if (addr_offset < 0)
- goto out_err;
- addr = regs_get_register(regs, addr_offset);
- } else {
- if (insn->sib.nbytes) {
- base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
- if (base_offset < 0)
- goto out_err;
-
- indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
- if (indx_offset < 0)
- goto out_err;
-
- base = regs_get_register(regs, base_offset);
- indx = regs_get_register(regs, indx_offset);
- addr = base + indx * (1 << X86_SIB_SCALE(sib));
- } else {
- addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
- if (addr_offset < 0)
- goto out_err;
- addr = regs_get_register(regs, addr_offset);
- }
- addr += insn->displacement.value;
- }
- return (void __user *)addr;
-out_err:
- return (void __user *)-1;
-}
-
static int mpx_insn_decode(struct insn *insn,
struct pt_regs *regs)
{
@@ -290,7 +174,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
info->si_signo = SIGSEGV;
info->si_errno = 0;
info->si_code = SEGV_BNDERR;
- info->si_addr = mpx_get_addr_ref(&insn, regs);
+ info->si_addr = insn_get_addr_ref(&insn, regs);
/*
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index dfb7d657cf43..85cf12219dea 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
if (!debug_pagealloc_enabled())
spin_unlock(&cpa_lock);
- base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
+ base = alloc_pages(GFP_KERNEL, 0);
if (!debug_pagealloc_enabled())
spin_lock(&cpa_lock);
if (!base)
@@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
static int alloc_pte_page(pmd_t *pmd)
{
- pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
if (!pte)
return -1;
@@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd)
static int alloc_pmd_page(pud_t *pud)
{
- pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!pmd)
return -1;
@@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgd_entry = cpa->pgd + pgd_index(addr);
if (pgd_none(*pgd_entry)) {
- p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
if (!p4d)
return -1;
@@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
*/
p4d = p4d_offset(pgd_entry, addr);
if (p4d_none(*p4d)) {
- pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
if (!pud)
return -1;
@@ -1781,8 +1781,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
unsigned long start;
int ret;
- /* Nothing to do if the SME is not active */
- if (!sme_active())
+ /* Nothing to do if memory encryption is not active */
+ if (!mem_encrypt_active())
return 0;
/* Should not be working on unaligned addresses */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 17ebc5a978cc..96d456a94b03 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -7,7 +7,7 @@
#include <asm/fixmap.h>
#include <asm/mtrr.h>
-#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
#ifdef CONFIG_HIGHPTE
#define PGALLOC_USER_GFP __GFP_HIGHMEM
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index ffdbc4836b4f..174c59774cc9 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -592,7 +592,7 @@ enum __force_cpu_type {
static int force_cpu_type;
-static int set_cpu_type(const char *str, struct kernel_param *kp)
+static int set_cpu_type(const char *str, const struct kernel_param *kp)
{
if (!strcmp(str, "timer")) {
force_cpu_type = timer;
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 350f7096baac..7913b6921959 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15) {
+ if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 4210da7b44de..1e996df687a3 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -636,3 +636,88 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+
+#define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
+#define AMD_141b_MMIO_BASE_RE_MASK BIT(0)
+#define AMD_141b_MMIO_BASE_WE_MASK BIT(1)
+#define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8)
+
+#define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
+#define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8)
+
+#define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4)
+#define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0)
+#define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16
+#define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16)
+
+/*
+ * The PCI Firmware Spec, rev 3.2, notes that ACPI should optionally allow
+ * configuring host bridge windows using the _PRS and _SRS methods.
+ *
+ * But this is rarely implemented, so we manually enable a large 64bit BAR for
+ * PCIe device on AMD Family 15h (Models 00h-1fh, 30h-3fh, 60h-7fh) Processors
+ * here.
+ */
+static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
+{
+ unsigned i;
+ u32 base, limit, high;
+ struct resource *res, *conflict;
+
+ for (i = 0; i < 8; i++) {
+ pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
+ pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
+
+ /* Is this slot free? */
+ if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
+ AMD_141b_MMIO_BASE_WE_MASK)))
+ break;
+
+ base >>= 8;
+ base |= high << 24;
+
+ /* Abort if a slot already configures a 64bit BAR. */
+ if (base > 0x10000)
+ return;
+ }
+ if (i == 8)
+ return;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ res->name = "PCI Bus 0000:00";
+ res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
+ IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
+ res->start = 0x100000000ull;
+ res->end = 0xfd00000000ull - 1;
+
+ /* Just grab the free area behind system memory for this */
+ while ((conflict = request_resource_conflict(&iomem_resource, res)))
+ res->start = conflict->end + 1;
+
+ dev_info(&dev->dev, "adding root bus resource %pR\n", res);
+
+ base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
+ AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
+ limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
+ high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
+ ((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
+ & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
+
+ pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
+ pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
+ pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
+
+ pci_bus_add_resource(dev->bus, res, 0);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+
+#endif
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 1012a5f0f98d..511921045312 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -280,7 +280,7 @@ static void intel_mid_pci_irq_disable(struct pci_dev *dev)
}
}
-static struct pci_ops intel_mid_pci_ops = {
+static const struct pci_ops intel_mid_pci_ops __initconst = {
.read = pci_read,
.write = pci_write,
};
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 20fb31579b69..6a151ce70e86 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -33,6 +33,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/ucs2_string.h>
+#include <linux/mem_encrypt.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -206,7 +207,7 @@ int __init efi_alloc_page_tables(void)
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
- gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
+ gfp_mask = GFP_KERNEL | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
if (!efi_pgd)
return -ENOMEM;
@@ -370,7 +371,11 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
* as trim_bios_range() will reserve the first page and isolate it away
* from memory allocators anyway.
*/
- if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+ pf = _PAGE_RW;
+ if (sev_active())
+ pf |= _PAGE_ENC;
+
+ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
pr_err("Failed to create 1:1 mapping for the first page!\n");
return 1;
}
@@ -413,6 +418,9 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
if (!(md->attribute & EFI_MEMORY_WB))
flags |= _PAGE_PCD;
+ if (sev_active())
+ flags |= _PAGE_ENC;
+
pfn = md->phys_addr >> PAGE_SHIFT;
if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
@@ -539,6 +547,9 @@ static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *m
if (!(md->attribute & EFI_MEMORY_RO))
pf |= _PAGE_RW;
+ if (sev_active())
+ pf |= _PAGE_ENC;
+
return efi_update_mappings(md, pf);
}
@@ -590,6 +601,9 @@ void __init efi_runtime_update_mappings(void)
(md->type != EFI_RUNTIME_SERVICES_CODE))
pf |= _PAGE_RW;
+ if (sev_active())
+ pf |= _PAGE_ENC;
+
efi_update_mappings(md, pf);
}
}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
index 74283875c7e8..e639e3116acf 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
@@ -62,10 +62,9 @@ static struct platform_device pb_device = {
static int __init pb_keys_init(void)
{
struct gpio_keys_button *gb = gpio_button;
- int i, num, good = 0;
+ int i, good = 0;
- num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
- for (i = 0; i < num; i++) {
+ for (i = 0; i < ARRAY_SIZE(gpio_button); i++) {
gb[i].gpio = get_gpio_by_name(gb[i].desc);
pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc,
gb[i].gpio);
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 03fc397335b7..5f6fd860820a 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -127,10 +127,11 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
* Re-target the irq to the specified CPU and enable the specified MMR located
* on the specified blade to allow the sending of MSIs to the specified CPU.
*/
-static void uv_domain_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+static int uv_domain_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
+ return 0;
}
/*
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index ed84d3917a59..d10105825d57 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -64,9 +64,10 @@ static void __init setup_real_mode(void)
/*
* If SME is active, the trampoline area will need to be in
* decrypted memory in order to bring up other processors
- * successfully.
+ * successfully. This is not needed for SEV.
*/
- set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
+ if (sme_active())
+ set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
memcpy(base, real_mode_blob, size);
diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c
index 836a1eb5df43..3ee234b6234d 100644
--- a/arch/x86/um/ldt.c
+++ b/arch/x86/um/ldt.c
@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <os.h>
@@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm)
mm->arch.ldt.entry_count = 0;
}
-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
+ unsigned long , bytecount)
{
- return do_modify_ldt_skas(func, ptr, bytecount);
+ /* See non-um modify_ldt() for why we do this cast */
+ return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
}
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 30434b8708f2..6b830d4cb4c8 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -31,7 +31,7 @@ static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
return 0xfd;
}
-static unsigned long xen_set_apic_id(unsigned int x)
+static u32 xen_set_apic_id(unsigned int x)
{
WARN_ON(1);
return x;
@@ -161,12 +161,10 @@ static struct apic xen_pv_apic = {
/* .irq_delivery_mode - used in native_compose_msi_msg only */
/* .irq_dest_mode - used in native_compose_msi_msg only */
- .target_cpus = default_target_cpus,
.disable_esr = 0,
/* .dest_logical - default_send_IPI_ use it but we use our own. */
.check_apicid_used = default_check_apicid_used, /* Used on 32-bit */
- .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = xen_noop, /* setup_local_APIC calls it */
.ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
@@ -179,7 +177,7 @@ static struct apic xen_pv_apic = {
.get_apic_id = xen_get_apic_id,
.set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */
- .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+ .calc_dest_apicid = apic_flat_calc_apicid,
#ifdef CONFIG_SMP
.send_IPI_mask = xen_send_IPI_mask,
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index de503c225ae1..826898701045 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -1,3 +1,4 @@
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/memblock.h>
@@ -188,8 +189,6 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
- if (xen_pvh_domain())
- machine_ops.emergency_restart = xen_emergency_restart;
#ifdef CONFIG_KEXEC_CORE
machine_ops.shutdown = xen_hvm_shutdown;
machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
@@ -226,12 +225,33 @@ static uint32_t __init xen_platform_hvm(void)
return xen_cpuid_base();
}
-const struct hypervisor_x86 x86_hyper_xen_hvm = {
+static __init void xen_hvm_guest_late_init(void)
+{
+#ifdef CONFIG_XEN_PVH
+ /* Test for PVH domain (PVH boot path taken overrides ACPI flags). */
+ if (!xen_pvh &&
+ (x86_platform.legacy.rtc || !x86_platform.legacy.no_vga))
+ return;
+
+ /* PVH detected. */
+ xen_pvh = true;
+
+ /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
+ if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC)
+ acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
+
+ machine_ops.emergency_restart = xen_emergency_restart;
+ pv_info.name = "Xen PVH";
+#endif
+}
+
+const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = {
.name = "Xen HVM",
.detect = xen_platform_hvm,
- .init_platform = xen_hvm_guest_init,
- .pin_vcpu = xen_pin_vcpu,
- .x2apic_available = xen_x2apic_para_available,
- .init_mem_mapping = xen_hvm_init_mem_mapping,
+ .type = X86_HYPER_XEN_HVM,
+ .init.init_platform = xen_hvm_guest_init,
+ .init.x2apic_available = xen_x2apic_para_available,
+ .init.init_mem_mapping = xen_hvm_init_mem_mapping,
+ .init.guest_late_init = xen_hvm_guest_late_init,
+ .runtime.pin_vcpu = xen_pin_vcpu,
};
-EXPORT_SYMBOL(x86_hyper_xen_hvm);
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index d4396e27b1fb..5b2b3f3f6531 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -601,7 +601,7 @@ static struct trap_array_entry trap_array[] = {
#ifdef CONFIG_X86_MCE
{ machine_check, xen_machine_check, true },
#endif
- { nmi, xen_nmi, true },
+ { nmi, xen_xennmi, true },
{ overflow, xen_overflow, false },
#ifdef CONFIG_IA32_EMULATION
{ entry_INT80_compat, xen_entry_INT80_compat, false },
@@ -811,15 +811,14 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
}
}
-static void xen_load_sp0(struct tss_struct *tss,
- struct thread_struct *thread)
+static void xen_load_sp0(unsigned long sp0)
{
struct multicall_space mcs;
mcs = xen_mc_entry(0);
- MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
+ MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
xen_mc_issue(PARAVIRT_LAZY_CPU);
- tss->x86_tss.sp0 = thread->sp0;
+ this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
}
void xen_set_iopl_mask(unsigned mask)
@@ -1231,6 +1230,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_platform.get_nmi_reason = xen_get_nmi_reason;
x86_init.resources.memory_setup = xen_memory_setup;
+ x86_init.irqs.intr_mode_init = x86_init_noop;
x86_init.oem.arch_setup = xen_arch_setup;
x86_init.oem.banner = xen_banner;
@@ -1460,9 +1460,9 @@ static uint32_t __init xen_platform_pv(void)
return 0;
}
-const struct hypervisor_x86 x86_hyper_xen_pv = {
+const __initconst struct hypervisor_x86 x86_hyper_xen_pv = {
.name = "Xen PV",
.detect = xen_platform_pv,
- .pin_vcpu = xen_pin_vcpu,
+ .type = X86_HYPER_XEN_PV,
+ .runtime.pin_vcpu = xen_pin_vcpu,
};
-EXPORT_SYMBOL(x86_hyper_xen_pv);
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 7bd3ee08393e..436c4f003e17 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -25,13 +25,6 @@ struct boot_params pvh_bootparams __attribute__((section(".data")));
struct hvm_start_info pvh_start_info;
unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
-static void xen_pvh_arch_setup(void)
-{
- /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
- if (nr_ioapics == 0)
- acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
-}
-
static void __init init_pvh_bootparams(void)
{
struct xen_memory_map memmap;
@@ -102,6 +95,4 @@ void __init xen_prepare_pvh(void)
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
init_pvh_bootparams();
-
- x86_init.oem.arch_setup = xen_pvh_arch_setup;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 71495f1a86d7..2ccdaba31a07 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -449,7 +449,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
-#if CONFIG_PGTABLE_LEVELS == 4
+#ifdef CONFIG_X86_64
__visible pudval_t xen_pud_val(pud_t pud)
{
return pte_mfn_to_pfn(pud.pud);
@@ -538,7 +538,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+#endif /* CONFIG_X86_64 */
static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
@@ -580,21 +580,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
bool last, unsigned long limit)
{
- int i, nr, flush = 0;
+ int flush = 0;
+ pud_t *pud;
- nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
- for (i = 0; i < nr; i++) {
- pud_t *pud;
- if (p4d_none(p4d[i]))
- continue;
+ if (p4d_none(*p4d))
+ return flush;
- pud = pud_offset(&p4d[i], 0);
- if (PTRS_PER_PUD > 1)
- flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
- flush |= xen_pud_walk(mm, pud, func,
- last && i == nr - 1, limit);
- }
+ pud = pud_offset(p4d, 0);
+ if (PTRS_PER_PUD > 1)
+ flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
+ flush |= xen_pud_walk(mm, pud, func, last, limit);
return flush;
}
@@ -644,8 +640,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
continue;
p4d = p4d_offset(&pgd[i], 0);
- if (PTRS_PER_P4D > 1)
- flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
}
@@ -1176,22 +1170,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr)
{
pgd_t *pgd;
p4d_t *p4d;
- unsigned int i;
bool unpin;
unpin = (vaddr == 2 * PGDIR_SIZE);
vaddr &= PMD_MASK;
pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(pgd, 0);
- for (i = 0; i < PTRS_PER_P4D; i++) {
- if (p4d_none(p4d[i]))
- continue;
- xen_cleanmfnmap_p4d(p4d + i, unpin);
- }
- if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
- set_pgd(pgd, __pgd(0));
- xen_cleanmfnmap_free_pgtbl(p4d, unpin);
- }
+ if (!p4d_none(*p4d))
+ xen_cleanmfnmap_p4d(p4d, unpin);
}
static void __init xen_pagetable_p2m_free(void)
@@ -1692,7 +1678,7 @@ static void xen_release_pmd(unsigned long pfn)
xen_release_ptpage(pfn, PT_PMD);
}
-#if CONFIG_PGTABLE_LEVELS >= 4
+#ifdef CONFIG_X86_64
static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
xen_alloc_ptpage(mm, pfn, PT_PUD);
@@ -2029,13 +2015,12 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
*/
void __init xen_relocate_p2m(void)
{
- phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
+ phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
- int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
+ int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
pte_t *pt;
pmd_t *pmd;
pud_t *pud;
- p4d_t *p4d = NULL;
pgd_t *pgd;
unsigned long *new_p2m;
int save_pud;
@@ -2045,11 +2030,7 @@ void __init xen_relocate_p2m(void)
n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
- if (PTRS_PER_P4D > 1)
- n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
- else
- n_p4d = 0;
- n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
+ n_frames = n_pte + n_pt + n_pmd + n_pud;
new_area = xen_find_free_area(PFN_PHYS(n_frames));
if (!new_area) {
@@ -2065,76 +2046,56 @@ void __init xen_relocate_p2m(void)
* To avoid any possible virtual address collision, just use
* 2 * PUD_SIZE for the new area.
*/
- p4d_phys = new_area;
- pud_phys = p4d_phys + PFN_PHYS(n_p4d);
+ pud_phys = new_area;
pmd_phys = pud_phys + PFN_PHYS(n_pud);
pt_phys = pmd_phys + PFN_PHYS(n_pmd);
p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
pgd = __va(read_cr3_pa());
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
- idx_p4d = 0;
save_pud = n_pud;
- do {
- if (n_p4d > 0) {
- p4d = early_memremap(p4d_phys, PAGE_SIZE);
- clear_page(p4d);
- n_pud = min(save_pud, PTRS_PER_P4D);
- }
- for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
- pud = early_memremap(pud_phys, PAGE_SIZE);
- clear_page(pud);
- for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
- idx_pmd++) {
- pmd = early_memremap(pmd_phys, PAGE_SIZE);
- clear_page(pmd);
- for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
- idx_pt++) {
- pt = early_memremap(pt_phys, PAGE_SIZE);
- clear_page(pt);
- for (idx_pte = 0;
- idx_pte < min(n_pte, PTRS_PER_PTE);
- idx_pte++) {
- set_pte(pt + idx_pte,
- pfn_pte(p2m_pfn, PAGE_KERNEL));
- p2m_pfn++;
- }
- n_pte -= PTRS_PER_PTE;
- early_memunmap(pt, PAGE_SIZE);
- make_lowmem_page_readonly(__va(pt_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
- PFN_DOWN(pt_phys));
- set_pmd(pmd + idx_pt,
- __pmd(_PAGE_TABLE | pt_phys));
- pt_phys += PAGE_SIZE;
+ for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
+ pud = early_memremap(pud_phys, PAGE_SIZE);
+ clear_page(pud);
+ for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
+ idx_pmd++) {
+ pmd = early_memremap(pmd_phys, PAGE_SIZE);
+ clear_page(pmd);
+ for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
+ idx_pt++) {
+ pt = early_memremap(pt_phys, PAGE_SIZE);
+ clear_page(pt);
+ for (idx_pte = 0;
+ idx_pte < min(n_pte, PTRS_PER_PTE);
+ idx_pte++) {
+ set_pte(pt + idx_pte,
+ pfn_pte(p2m_pfn, PAGE_KERNEL));
+ p2m_pfn++;
}
- n_pt -= PTRS_PER_PMD;
- early_memunmap(pmd, PAGE_SIZE);
- make_lowmem_page_readonly(__va(pmd_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
- PFN_DOWN(pmd_phys));
- set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
- pmd_phys += PAGE_SIZE;
+ n_pte -= PTRS_PER_PTE;
+ early_memunmap(pt, PAGE_SIZE);
+ make_lowmem_page_readonly(__va(pt_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
+ PFN_DOWN(pt_phys));
+ set_pmd(pmd + idx_pt,
+ __pmd(_PAGE_TABLE | pt_phys));
+ pt_phys += PAGE_SIZE;
}
- n_pmd -= PTRS_PER_PUD;
- early_memunmap(pud, PAGE_SIZE);
- make_lowmem_page_readonly(__va(pud_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
- if (n_p4d > 0)
- set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
- else
- set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
- pud_phys += PAGE_SIZE;
- }
- if (n_p4d > 0) {
- save_pud -= PTRS_PER_P4D;
- early_memunmap(p4d, PAGE_SIZE);
- make_lowmem_page_readonly(__va(p4d_phys));
- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
- set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
- p4d_phys += PAGE_SIZE;
+ n_pt -= PTRS_PER_PMD;
+ early_memunmap(pmd, PAGE_SIZE);
+ make_lowmem_page_readonly(__va(pmd_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
+ PFN_DOWN(pmd_phys));
+ set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
+ pmd_phys += PAGE_SIZE;
}
- } while (++idx_p4d < n_p4d);
+ n_pmd -= PTRS_PER_PUD;
+ early_memunmap(pud, PAGE_SIZE);
+ make_lowmem_page_readonly(__va(pud_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
+ set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
+ pud_phys += PAGE_SIZE;
+ }
/* Now copy the old p2m info to the new area. */
memcpy(new_p2m, xen_p2m_addr, size);
@@ -2361,7 +2322,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
-#if CONFIG_PGTABLE_LEVELS >= 4
+#ifdef CONFIG_X86_64
pv_mmu_ops.set_p4d = xen_set_p4d;
#endif
@@ -2371,7 +2332,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
pv_mmu_ops.release_pte = xen_release_pte;
pv_mmu_ops.release_pmd = xen_release_pmd;
-#if CONFIG_PGTABLE_LEVELS >= 4
+#ifdef CONFIG_X86_64
pv_mmu_ops.alloc_pud = xen_alloc_pud;
pv_mmu_ops.release_pud = xen_release_pud;
#endif
@@ -2435,14 +2396,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
-#if CONFIG_PGTABLE_LEVELS >= 4
+#ifdef CONFIG_X86_64
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_p4d = xen_set_p4d_hyper,
.alloc_pud = xen_alloc_pmd_init,
.release_pud = xen_release_pmd_init,
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+#endif /* CONFIG_X86_64 */
.activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap,
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 6083ba462f35..13b4f19b9131 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn)
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
topidx = p2m_top_index(pfn);
top_mfn_p = &p2m_top_mfn[topidx];
- mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
+ mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 05f91ce9b55e..c0c756c76afe 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -14,6 +14,7 @@
* single-threaded.
*/
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -294,12 +295,19 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
#endif
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
+ /*
+ * Bring up the CPU in cpu_bringup_and_idle() with the stack
+ * pointing just below where pt_regs would be if it were a normal
+ * kernel entry.
+ */
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;
ctxt->user_regs.es = __USER_DS;
ctxt->user_regs.ss = __KERNEL_DS;
+ ctxt->user_regs.cs = __KERNEL_CS;
+ ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);
xen_copy_trap_info(ctxt->trap_ctxt);
@@ -314,8 +322,13 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->gdt_frames[0] = gdt_mfn;
ctxt->gdt_ents = GDT_ENTRIES;
+ /*
+ * Set SS:SP that Xen will use when entering guest kernel mode
+ * from guest user mode. Subsequent calls to load_sp0() can
+ * change this value.
+ */
ctxt->kernel_ss = __KERNEL_DS;
- ctxt->kernel_sp = idle->thread.sp0;
+ ctxt->kernel_sp = task_top_of_stack(idle);
#ifdef CONFIG_X86_32
ctxt->event_callback_cs = __KERNEL_CS;
@@ -327,10 +340,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
(unsigned long)xen_hypervisor_callback;
ctxt->failsafe_callback_eip =
(unsigned long)xen_failsafe_callback;
- ctxt->user_regs.cs = __KERNEL_CS;
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
- ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
BUG();
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 08324c64005d..02f3445a2b5f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <asm/paravirt.h>
+#include <asm/qspinlock.h>
#include <xen/interface/xen.h>
#include <xen/events.h>
@@ -81,8 +82,11 @@ void xen_init_lock_cpu(int cpu)
int irq;
char *name;
- if (!xen_pvspin)
+ if (!xen_pvspin) {
+ if (cpu == 0)
+ static_branch_disable(&virt_spin_lock_key);
return;
+ }
WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
cpu, per_cpu(lock_kicker_irq, cpu));
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index c98a48c861fd..8a10c9a9e2b5 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -30,7 +30,7 @@ xen_pv_trap debug
xen_pv_trap xendebug
xen_pv_trap int3
xen_pv_trap xenint3
-xen_pv_trap nmi
+xen_pv_trap xennmi
xen_pv_trap overflow
xen_pv_trap bounds
xen_pv_trap invalid_op
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index b5b8d7f43557..497cc55a0c16 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -10,6 +10,7 @@
#include <asm/boot.h>
#include <asm/asm.h>
#include <asm/page_types.h>
+#include <asm/unwind_hints.h>
#include <xen/interface/elfnote.h>
#include <xen/interface/features.h>
@@ -20,6 +21,7 @@
#ifdef CONFIG_XEN_PV
__INIT
ENTRY(startup_xen)
+ UNWIND_HINT_EMPTY
cld
/* Clear .bss */
@@ -34,21 +36,24 @@ ENTRY(startup_xen)
mov $init_thread_union+THREAD_SIZE, %_ASM_SP
jmp xen_start_kernel
-
+END(startup_xen)
__FINIT
#endif
.pushsection .text
.balign PAGE_SIZE
ENTRY(hypercall_page)
- .skip PAGE_SIZE
+ .rept (PAGE_SIZE / 32)
+ UNWIND_HINT_EMPTY
+ .skip 32
+ .endr
#define HYPERCALL(n) \
.equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
.type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
#include <asm/xen-hypercalls.h>
#undef HYPERCALL
-
+END(hypercall_page)
.popsection
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index eb1f196c3f6e..8bc52f749f20 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -224,7 +224,7 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
then enter your normal kernel breakpoints once the MMU was mapped
to the kernel mappings (0XC0000000).
- This unfortunately doesn't work for U-Boot and likley also wont
+ This unfortunately won't work for U-Boot and likely also wont
work for using KEXEC to have a hot kernel ready for doing a
KDUMP.
diff --git a/arch/xtensa/boot/.gitignore b/arch/xtensa/boot/.gitignore
index be7655998b26..38177c7ebcab 100644
--- a/arch/xtensa/boot/.gitignore
+++ b/arch/xtensa/boot/.gitignore
@@ -1,3 +1,2 @@
uImage
zImage.redboot
-*.dtb
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index a15e241c9153..f8052ba5aea8 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -12,9 +12,6 @@ ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
endif
-dtstree := $(srctree)/$(src)
-dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-
-always += $(dtb-y)
-clean-files += *.dtb *.dtb.S
-
+# for CONFIG_OF_ALL_DTBS test
+dtstree := $(srctree)/$(src)
+dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 269738dc9d1d..153bf2370988 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -23,9 +23,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &xtensa_dma_map_ops;
}
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
-
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return (dma_addr_t)paddr;
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index e4f366a488d3..5c83798e3b2e 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -37,8 +37,6 @@ extern struct pci_controller* pcibios_alloc_controller(void);
#include <linux/string.h>
#include <asm/io.h>
-struct pci_dev;
-
/* The PCI address space does equal the physical memory address space.
* The networking and block device layers use this boolean for bounce buffer
* decisions.
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index 3bb49681ee24..c6e1290dcbb7 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -33,8 +33,6 @@
#define arch_spin_is_locked(x) ((x)->slock != 0)
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -97,8 +95,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* 0x80000000 one writer owns the rwlock, no other writers, no readers
*/
-#define arch_write_can_lock(x) ((x)->lock == 0)
-
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -200,7 +196,4 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
: "memory");
}
-#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-
#endif /* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index cec86a1c2acc..623720a11143 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -26,29 +26,6 @@
#include <asm/cacheflush.h>
#include <asm/io.h>
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_BIDIRECTIONAL:
- __flush_invalidate_dcache_range((unsigned long)vaddr, size);
- break;
-
- case DMA_FROM_DEVICE:
- __invalidate_dcache_range((unsigned long)vaddr, size);
- break;
-
- case DMA_TO_DEVICE:
- __flush_dcache_range((unsigned long)vaddr, size);
- break;
-
- case DMA_NONE:
- BUG();
- break;
- }
-}
-EXPORT_SYMBOL(dma_cache_sync);
-
static void do_cache_op(dma_addr_t dma_handle, size_t size,
void (*fn)(unsigned long, unsigned long))
{
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index c45b90bb9339..1b6418407467 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -110,13 +110,13 @@ static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
sector_t sector = bio->bi_iter.bi_sector;
bio_for_each_segment(bvec, bio, iter) {
- char *buffer = __bio_kmap_atomic(bio, iter);
+ char *buffer = kmap_atomic(bvec.bv_page) + bvec.bv_offset;
unsigned len = bvec.bv_len >> SECTOR_SHIFT;
simdisk_transfer(dev, sector, len, buffer,
bio_data_dir(bio) == WRITE);
sector += len;
- __bio_kunmap_atomic(buffer);
+ kunmap_atomic(buffer);
}
bio_endio(bio);
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
index 4dc0c1b43f4b..2f7eb66c23ec 100644
--- a/arch/xtensa/platforms/xtfpga/lcd.c
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -34,23 +34,23 @@
static void lcd_put_byte(u8 *addr, u8 data)
{
#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- ACCESS_ONCE(*addr) = data;
+ WRITE_ONCE(*addr, data);
#else
- ACCESS_ONCE(*addr) = data & 0xf0;
- ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
+ WRITE_ONCE(*addr, data & 0xf0);
+ WRITE_ONCE(*addr, (data << 4) & 0xf0);
#endif
}
static int __init lcd_init(void)
{
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
mdelay(5);
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
udelay(200);
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
udelay(50);
#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
+ WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
diff --git a/block/badblocks.c b/block/badblocks.c
index 43c71166e1e2..91f7bcf979d3 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -178,7 +178,7 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
if (bb->shift < 0)
/* badblocks are disabled */
- return 0;
+ return 1;
if (bb->shift) {
/* round the start down, and the end up */
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index a4783da90ba8..889a8549d97f 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -108,6 +108,7 @@
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
#include "bfq-iosched.h"
+#include "blk-wbt.h"
#define BFQ_BFQQ_FNS(name) \
void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
@@ -724,6 +725,44 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
}
}
+static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+{
+ u64 dur;
+
+ if (bfqd->bfq_wr_max_time > 0)
+ return bfqd->bfq_wr_max_time;
+
+ dur = bfqd->RT_prod;
+ do_div(dur, bfqd->peak_rate);
+
+ /*
+ * Limit duration between 3 and 13 seconds. Tests show that
+ * higher values than 13 seconds often yield the opposite of
+ * the desired result, i.e., worsen responsiveness by letting
+ * non-interactive and non-soft-real-time applications
+ * preserve weight raising for a too long time interval.
+ *
+ * On the other end, lower values than 3 seconds make it
+ * difficult for most interactive tasks to complete their jobs
+ * before weight-raising finishes.
+ */
+ if (dur > msecs_to_jiffies(13000))
+ dur = msecs_to_jiffies(13000);
+ else if (dur < msecs_to_jiffies(3000))
+ dur = msecs_to_jiffies(3000);
+
+ return dur;
+}
+
+/* switch back from soft real-time to interactive weight raising */
+static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
+ struct bfq_data *bfqd)
+{
+ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+ bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
+}
+
static void
bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
struct bfq_io_cq *bic, bool bfq_already_existing)
@@ -750,10 +789,16 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
time_is_before_jiffies(bfqq->last_wr_start_finish +
bfqq->wr_cur_max_time))) {
- bfq_log_bfqq(bfqq->bfqd, bfqq,
- "resume state: switching off wr");
-
- bfqq->wr_coeff = 1;
+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
+ !bfq_bfqq_in_large_burst(bfqq) &&
+ time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
+ bfq_wr_duration(bfqd))) {
+ switch_back_to_interactive_wr(bfqq, bfqd);
+ } else {
+ bfqq->wr_coeff = 1;
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "resume state: switching off wr");
+ }
}
/* make sure weight will be updated, however we got here */
@@ -1173,33 +1218,22 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
return wr_or_deserves_wr;
}
-static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+/*
+ * Return the farthest future time instant according to jiffies
+ * macros.
+ */
+static unsigned long bfq_greatest_from_now(void)
{
- u64 dur;
-
- if (bfqd->bfq_wr_max_time > 0)
- return bfqd->bfq_wr_max_time;
-
- dur = bfqd->RT_prod;
- do_div(dur, bfqd->peak_rate);
-
- /*
- * Limit duration between 3 and 13 seconds. Tests show that
- * higher values than 13 seconds often yield the opposite of
- * the desired result, i.e., worsen responsiveness by letting
- * non-interactive and non-soft-real-time applications
- * preserve weight raising for a too long time interval.
- *
- * On the other end, lower values than 3 seconds make it
- * difficult for most interactive tasks to complete their jobs
- * before weight-raising finishes.
- */
- if (dur > msecs_to_jiffies(13000))
- dur = msecs_to_jiffies(13000);
- else if (dur < msecs_to_jiffies(3000))
- dur = msecs_to_jiffies(3000);
+ return jiffies + MAX_JIFFY_OFFSET;
+}
- return dur;
+/*
+ * Return the farthest past time instant according to jiffies
+ * macros.
+ */
+static unsigned long bfq_smallest_from_now(void)
+{
+ return jiffies - MAX_JIFFY_OFFSET;
}
static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
@@ -1216,7 +1250,19 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
bfqq->wr_coeff = bfqd->bfq_wr_coeff;
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
} else {
- bfqq->wr_start_at_switch_to_srt = jiffies;
+ /*
+ * No interactive weight raising in progress
+ * here: assign minus infinity to
+ * wr_start_at_switch_to_srt, to make sure
+ * that, at the end of the soft-real-time
+ * weight raising periods that is starting
+ * now, no interactive weight-raising period
+ * may be wrongly considered as still in
+ * progress (and thus actually started by
+ * mistake).
+ */
+ bfqq->wr_start_at_switch_to_srt =
+ bfq_smallest_from_now();
bfqq->wr_coeff = bfqd->bfq_wr_coeff *
BFQ_SOFTRT_WEIGHT_FACTOR;
bfqq->wr_cur_max_time =
@@ -2016,10 +2062,27 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
- bic->saved_wr_coeff = bfqq->wr_coeff;
- bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
- bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
- bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+ !bfq_bfqq_in_large_burst(bfqq))) {
+ /*
+ * bfqq being merged right after being created: bfqq
+ * would have deserved interactive weight raising, but
+ * did not make it to be set in a weight-raised state,
+ * because of this early merge. Store directly the
+ * weight-raising state that would have been assigned
+ * to bfqq, so that to avoid that bfqq unjustly fails
+ * to enjoy weight raising if split soon.
+ */
+ bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
+ bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
+ bic->saved_last_wr_start_finish = jiffies;
+ } else {
+ bic->saved_wr_coeff = bfqq->wr_coeff;
+ bic->saved_wr_start_at_switch_to_srt =
+ bfqq->wr_start_at_switch_to_srt;
+ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
+ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
+ }
}
static void
@@ -2897,24 +2960,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
-/*
- * Return the farthest future time instant according to jiffies
- * macros.
- */
-static unsigned long bfq_greatest_from_now(void)
-{
- return jiffies + MAX_JIFFY_OFFSET;
-}
-
-/*
- * Return the farthest past time instant according to jiffies
- * macros.
- */
-static unsigned long bfq_smallest_from_now(void)
-{
- return jiffies - MAX_JIFFY_OFFSET;
-}
-
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
@@ -3489,11 +3534,7 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_wr_duration(bfqd)))
bfq_bfqq_end_wr(bfqq);
else {
- /* switch back to interactive wr */
- bfqq->wr_coeff = bfqd->bfq_wr_coeff;
- bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
- bfqq->last_wr_start_finish =
- bfqq->wr_start_at_switch_to_srt;
+ switch_back_to_interactive_wr(bfqq, bfqd);
bfqq->entity.prio_changed = 1;
}
}
@@ -3685,16 +3726,37 @@ void bfq_put_queue(struct bfq_queue *bfqq)
if (bfqq->ref)
return;
- if (bfq_bfqq_sync(bfqq))
+ if (!hlist_unhashed(&bfqq->burst_list_node)) {
+ hlist_del_init(&bfqq->burst_list_node);
/*
- * The fact that this queue is being destroyed does not
- * invalidate the fact that this queue may have been
- * activated during the current burst. As a consequence,
- * although the queue does not exist anymore, and hence
- * needs to be removed from the burst list if there,
- * the burst size has not to be decremented.
+ * Decrement also burst size after the removal, if the
+ * process associated with bfqq is exiting, and thus
+ * does not contribute to the burst any longer. This
+ * decrement helps filter out false positives of large
+ * bursts, when some short-lived process (often due to
+ * the execution of commands by some service) happens
+ * to start and exit while a complex application is
+ * starting, and thus spawning several processes that
+ * do I/O (and that *must not* be treated as a large
+ * burst, see comments on bfq_handle_burst).
+ *
+ * In particular, the decrement is performed only if:
+ * 1) bfqq is not a merged queue, because, if it is,
+ * then this free of bfqq is not triggered by the exit
+ * of the process bfqq is associated with, but exactly
+ * by the fact that bfqq has just been merged.
+ * 2) burst_size is greater than 0, to handle
+ * unbalanced decrements. Unbalanced decrements may
+ * happen in te following case: bfqq is inserted into
+ * the current burst list--without incrementing
+ * bust_size--because of a split, but the current
+ * burst list is not the burst list bfqq belonged to
+ * (see comments on the case of a split in
+ * bfq_set_request).
*/
- hlist_del_init(&bfqq->burst_list_node);
+ if (bfqq->bic && bfqq->bfqd->burst_size > 0)
+ bfqq->bfqd->burst_size--;
+ }
kmem_cache_free(bfq_pool, bfqq);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
@@ -4127,7 +4189,6 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
new_bfqq->allocated++;
bfqq->allocated--;
new_bfqq->ref++;
- bfq_clear_bfqq_just_created(bfqq);
/*
* If the bic associated with the process
* issuing this request still points to bfqq
@@ -4139,6 +4200,8 @@ static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
bfqq, new_bfqq);
+
+ bfq_clear_bfqq_just_created(bfqq);
/*
* rq is about to be enqueued into new_bfqq,
* release rq reference on bfqq
@@ -4424,6 +4487,34 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
else {
bfq_clear_bfqq_in_large_burst(bfqq);
if (bic->was_in_burst_list)
+ /*
+ * If bfqq was in the current
+ * burst list before being
+ * merged, then we have to add
+ * it back. And we do not need
+ * to increase burst_size, as
+ * we did not decrement
+ * burst_size when we removed
+ * bfqq from the burst list as
+ * a consequence of a merge
+ * (see comments in
+ * bfq_put_queue). In this
+ * respect, it would be rather
+ * costly to know whether the
+ * current burst list is still
+ * the same burst list from
+ * which bfqq was removed on
+ * the merge. To avoid this
+ * cost, if bfqq was in a
+ * burst list, then we add
+ * bfqq to the current burst
+ * list without any further
+ * check. This can cause
+ * inappropriate insertions,
+ * but rarely enough to not
+ * harm the detection of large
+ * bursts significantly.
+ */
hlist_add_head(&bfqq->burst_list_node,
&bfqd->burst_list);
}
@@ -4775,7 +4866,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfq_init_root_group(bfqd->root_group, bfqd);
bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
-
+ wbt_disable_default(q);
return 0;
out_free:
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5df32907ff3b..23b42e8aa03e 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -485,11 +485,8 @@ EXPORT_SYMBOL(bioset_integrity_create);
void bioset_integrity_free(struct bio_set *bs)
{
- if (bs->bio_integrity_pool)
- mempool_destroy(bs->bio_integrity_pool);
-
- if (bs->bvec_integrity_pool)
- mempool_destroy(bs->bvec_integrity_pool);
+ mempool_destroy(bs->bio_integrity_pool);
+ mempool_destroy(bs->bvec_integrity_pool);
}
EXPORT_SYMBOL(bioset_integrity_free);
diff --git a/block/bio.c b/block/bio.c
index 101c2a9b5481..b94a802f8ba3 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -400,7 +400,7 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
/**
* bio_alloc_bioset - allocate a bio for I/O
- * @gfp_mask: the GFP_ mask given to the slab allocator
+ * @gfp_mask: the GFP_* mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
* @bs: the bio_set to allocate from.
*
@@ -917,17 +917,9 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
-struct submit_bio_ret {
- struct completion event;
- int error;
-};
-
static void submit_bio_wait_endio(struct bio *bio)
{
- struct submit_bio_ret *ret = bio->bi_private;
-
- ret->error = blk_status_to_errno(bio->bi_status);
- complete(&ret->event);
+ complete(bio->bi_private);
}
/**
@@ -943,16 +935,15 @@ static void submit_bio_wait_endio(struct bio *bio)
*/
int submit_bio_wait(struct bio *bio)
{
- struct submit_bio_ret ret;
+ DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
- init_completion(&ret.event);
- bio->bi_private = &ret;
+ bio->bi_private = &done;
bio->bi_end_io = submit_bio_wait_endio;
bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
- wait_for_completion_io(&ret.event);
+ wait_for_completion_io(&done);
- return ret.error;
+ return blk_status_to_errno(bio->bi_status);
}
EXPORT_SYMBOL(submit_bio_wait);
@@ -1940,11 +1931,8 @@ void bioset_free(struct bio_set *bs)
if (bs->rescue_workqueue)
destroy_workqueue(bs->rescue_workqueue);
- if (bs->bio_pool)
- mempool_destroy(bs->bio_pool);
-
- if (bs->bvec_pool)
- mempool_destroy(bs->bvec_pool);
+ mempool_destroy(bs->bio_pool);
+ mempool_destroy(bs->bvec_pool);
bioset_integrity_free(bs);
bio_put_slab(bs);
@@ -2045,37 +2033,6 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
- * bio_associate_current - associate a bio with %current
- * @bio: target bio
- *
- * Associate @bio with %current if it hasn't been associated yet. Block
- * layer will treat @bio as if it were issued by %current no matter which
- * task actually issues it.
- *
- * This function takes an extra reference of @task's io_context and blkcg
- * which will be put when @bio is released. The caller must own @bio,
- * ensure %current->io_context exists, and is responsible for synchronizing
- * calls to this function.
- */
-int bio_associate_current(struct bio *bio)
-{
- struct io_context *ioc;
-
- if (bio->bi_css)
- return -EBUSY;
-
- ioc = current->io_context;
- if (!ioc)
- return -ENOENT;
-
- get_io_context_active(ioc);
- bio->bi_ioc = ioc;
- bio->bi_css = task_get_css(current, io_cgrp_id);
- return 0;
-}
-EXPORT_SYMBOL_GPL(bio_associate_current);
-
-/**
* bio_disassociate_task - undo bio_associate_current()
* @bio: target bio
*/
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d3f56baee936..4117524ca45b 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1419,6 +1419,11 @@ int blkcg_policy_register(struct blkcg_policy *pol)
if (i >= BLKCG_MAX_POLS)
goto err_unlock;
+ /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
+ if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
+ (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
+ goto err_unlock;
+
/* register @pol */
pol->plid = i;
blkcg_policy[pol->plid] = pol;
@@ -1452,7 +1457,7 @@ int blkcg_policy_register(struct blkcg_policy *pol)
return 0;
err_free_cpds:
- if (pol->cpd_alloc_fn) {
+ if (pol->cpd_free_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
if (blkcg->cpd[pol->plid]) {
pol->cpd_free_fn(blkcg->cpd[pol->plid]);
@@ -1492,7 +1497,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
/* remove cpds and unregister */
mutex_lock(&blkcg_pol_mutex);
- if (pol->cpd_alloc_fn) {
+ if (pol->cpd_free_fn) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
if (blkcg->cpd[pol->plid]) {
pol->cpd_free_fn(blkcg->cpd[pol->plid]);
diff --git a/block/blk-core.c b/block/blk-core.c
index 048be4aa6024..7c54c195e79e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -333,11 +333,13 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
+ cancel_work_sync(&q->timeout_work);
if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx;
int i;
+ cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
} else {
@@ -347,6 +349,37 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);
/**
+ * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
+ * @q: request queue pointer
+ *
+ * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
+ * set and 1 if the flag was already set.
+ */
+int blk_set_preempt_only(struct request_queue *q)
+{
+ unsigned long flags;
+ int res;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(blk_set_preempt_only);
+
+void blk_clear_preempt_only(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
+ wake_up_all(&q->mq_freeze_wq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
+
+/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
* @q: The queue to run
*
@@ -610,6 +643,9 @@ void blk_set_queue_dying(struct request_queue *q)
}
spin_unlock_irq(q->queue_lock);
}
+
+ /* Make blk_queue_enter() reexamine the DYING flag. */
+ wake_up_all(&q->mq_freeze_wq);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
@@ -718,7 +754,7 @@ static void free_request_size(void *element, void *data)
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask)
{
- if (unlikely(rl->rq_pool))
+ if (unlikely(rl->rq_pool) || q->mq_ops)
return 0;
rl->q = q;
@@ -760,15 +796,38 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);
-int blk_queue_enter(struct request_queue *q, bool nowait)
+/**
+ * blk_queue_enter() - try to increase q->q_usage_counter
+ * @q: request queue pointer
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
+ */
+int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
+ const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
+
while (true) {
+ bool success = false;
int ret;
- if (percpu_ref_tryget_live(&q->q_usage_counter))
+ rcu_read_lock_sched();
+ if (percpu_ref_tryget_live(&q->q_usage_counter)) {
+ /*
+ * The code that sets the PREEMPT_ONLY flag is
+ * responsible for ensuring that that flag is globally
+ * visible before the queue is unfrozen.
+ */
+ if (preempt || !blk_queue_preempt_only(q)) {
+ success = true;
+ } else {
+ percpu_ref_put(&q->q_usage_counter);
+ }
+ }
+ rcu_read_unlock_sched();
+
+ if (success)
return 0;
- if (nowait)
+ if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY;
/*
@@ -781,7 +840,8 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
smp_rmb();
ret = wait_event_interruptible(q->mq_freeze_wq,
- !atomic_read(&q->mq_freeze_depth) ||
+ (atomic_read(&q->mq_freeze_depth) == 0 &&
+ (preempt || !blk_queue_preempt_only(q))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -844,6 +904,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
@@ -1154,7 +1215,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* @rl: request list to allocate from
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
- * @gfp_mask: allocation mask
+ * @flags: BLQ_MQ_REQ_* flags
*
* Get a free request from @q. This function may fail under memory
* pressure or if @q is dead.
@@ -1164,7 +1225,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *__get_request(struct request_list *rl, unsigned int op,
- struct bio *bio, gfp_t gfp_mask)
+ struct bio *bio, blk_mq_req_flags_t flags)
{
struct request_queue *q = rl->q;
struct request *rq;
@@ -1173,6 +1234,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
struct io_cq *icq = NULL;
const bool is_sync = op_is_sync(op);
int may_queue;
+ gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
+ __GFP_DIRECT_RECLAIM;
req_flags_t rq_flags = RQF_ALLOCED;
lockdep_assert_held(q->queue_lock);
@@ -1255,6 +1318,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
blk_rq_set_rl(rq, rl);
rq->cmd_flags = op;
rq->rq_flags = rq_flags;
+ if (flags & BLK_MQ_REQ_PREEMPT)
+ rq->rq_flags |= RQF_PREEMPT;
/* init elvpriv */
if (rq_flags & RQF_ELVPRIV) {
@@ -1333,7 +1398,7 @@ rq_starved:
* @q: request_queue to allocate request from
* @op: operation and flags
* @bio: bio to allocate request for (can be %NULL)
- * @gfp_mask: allocation mask
+ * @flags: BLK_MQ_REQ_* flags.
*
* Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
* this function keeps retrying under memory pressure and fails iff @q is dead.
@@ -1343,7 +1408,7 @@ rq_starved:
* Returns request pointer on success, with @q->queue_lock *not held*.
*/
static struct request *get_request(struct request_queue *q, unsigned int op,
- struct bio *bio, gfp_t gfp_mask)
+ struct bio *bio, blk_mq_req_flags_t flags)
{
const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait);
@@ -1355,7 +1420,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry:
- rq = __get_request(rl, op, bio, gfp_mask);
+ rq = __get_request(rl, op, bio, flags);
if (!IS_ERR(rq))
return rq;
@@ -1364,7 +1429,7 @@ retry:
return ERR_PTR(-EAGAIN);
}
- if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
+ if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return rq;
}
@@ -1391,20 +1456,28 @@ retry:
goto retry;
}
+/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
static struct request *blk_old_get_request(struct request_queue *q,
- unsigned int op, gfp_t gfp_mask)
+ unsigned int op, blk_mq_req_flags_t flags)
{
struct request *rq;
+ gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
+ __GFP_DIRECT_RECLAIM;
+ int ret = 0;
WARN_ON_ONCE(q->mq_ops);
/* create ioc upfront */
create_io_context(gfp_mask, q->node);
+ ret = blk_queue_enter(q, flags);
+ if (ret)
+ return ERR_PTR(ret);
spin_lock_irq(q->queue_lock);
- rq = get_request(q, op, NULL, gfp_mask);
+ rq = get_request(q, op, NULL, flags);
if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock);
+ blk_queue_exit(q);
return rq;
}
@@ -1415,25 +1488,40 @@ static struct request *blk_old_get_request(struct request_queue *q,
return rq;
}
-struct request *blk_get_request(struct request_queue *q, unsigned int op,
- gfp_t gfp_mask)
+/**
+ * blk_get_request_flags - allocate a request
+ * @q: request queue to allocate a request for
+ * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
+ * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
+ */
+struct request *blk_get_request_flags(struct request_queue *q, unsigned int op,
+ blk_mq_req_flags_t flags)
{
struct request *req;
+ WARN_ON_ONCE(op & REQ_NOWAIT);
+ WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
+
if (q->mq_ops) {
- req = blk_mq_alloc_request(q, op,
- (gfp_mask & __GFP_DIRECT_RECLAIM) ?
- 0 : BLK_MQ_REQ_NOWAIT);
+ req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
q->mq_ops->initialize_rq_fn(req);
} else {
- req = blk_old_get_request(q, op, gfp_mask);
+ req = blk_old_get_request(q, op, flags);
if (!IS_ERR(req) && q->initialize_rq_fn)
q->initialize_rq_fn(req);
}
return req;
}
+EXPORT_SYMBOL(blk_get_request_flags);
+
+struct request *blk_get_request(struct request_queue *q, unsigned int op,
+ gfp_t gfp_mask)
+{
+ return blk_get_request_flags(q, op, gfp_mask & __GFP_DIRECT_RECLAIM ?
+ 0 : BLK_MQ_REQ_NOWAIT);
+}
EXPORT_SYMBOL(blk_get_request);
/**
@@ -1576,6 +1664,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
blk_free_request(rl, req);
freed_request(rl, sync, rq_flags);
blk_put_rl(rl);
+ blk_queue_exit(q);
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1857,8 +1946,10 @@ get_rq:
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
+ blk_queue_enter_live(q);
+ req = get_request(q, bio->bi_opf, bio, 0);
if (IS_ERR(req)) {
+ blk_queue_exit(q);
__wbt_done(q->rq_wb, wb_acct);
if (PTR_ERR(req) == -ENOMEM)
bio->bi_status = BLK_STS_RESOURCE;
@@ -2200,8 +2291,10 @@ blk_qc_t generic_make_request(struct bio *bio)
current->bio_list = bio_list_on_stack;
do {
struct request_queue *q = bio->bi_disk->queue;
+ blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
+ BLK_MQ_REQ_NOWAIT : 0;
- if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
+ if (likely(blk_queue_enter(q, flags) == 0)) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
@@ -2242,6 +2335,40 @@ out:
EXPORT_SYMBOL(generic_make_request);
/**
+ * direct_make_request - hand a buffer directly to its device driver for I/O
+ * @bio: The bio describing the location in memory and on the device.
+ *
+ * This function behaves like generic_make_request(), but does not protect
+ * against recursion. Must only be used if the called driver is known
+ * to not call generic_make_request (or direct_make_request) again from
+ * its make_request function. (Calling direct_make_request again from
+ * a workqueue is perfectly fine as that doesn't recurse).
+ */
+blk_qc_t direct_make_request(struct bio *bio)
+{
+ struct request_queue *q = bio->bi_disk->queue;
+ bool nowait = bio->bi_opf & REQ_NOWAIT;
+ blk_qc_t ret;
+
+ if (!generic_make_request_checks(bio))
+ return BLK_QC_T_NONE;
+
+ if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
+ if (nowait && !blk_queue_dying(q))
+ bio->bi_status = BLK_STS_AGAIN;
+ else
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ return BLK_QC_T_NONE;
+ }
+
+ ret = q->make_request_fn(q, bio);
+ blk_queue_exit(q);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(direct_make_request);
+
+/**
* submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O
*
@@ -2285,6 +2412,17 @@ blk_qc_t submit_bio(struct bio *bio)
}
EXPORT_SYMBOL(submit_bio);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie)
+{
+ if (!q->poll_fn || !blk_qc_t_valid(cookie))
+ return false;
+
+ if (current->plug)
+ blk_flush_plug_list(current->plug, false);
+ return q->poll_fn(q, cookie);
+}
+EXPORT_SYMBOL_GPL(blk_poll);
+
/**
* blk_cloned_rq_check_limits - Helper function to check a cloned request
* for new the queue limits
@@ -2350,7 +2488,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- blk_mq_request_bypass_insert(rq);
+ blk_mq_request_bypass_insert(rq, true);
return BLK_STS_OK;
}
@@ -2464,20 +2602,22 @@ void blk_account_io_done(struct request *req)
* Don't process normal requests when queue is suspended
* or in the process of suspending/resuming
*/
-static struct request *blk_pm_peek_request(struct request_queue *q,
- struct request *rq)
+static bool blk_pm_allow_request(struct request *rq)
{
- if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
- (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
- return NULL;
- else
- return rq;
+ switch (rq->q->rpm_status) {
+ case RPM_RESUMING:
+ case RPM_SUSPENDING:
+ return rq->rq_flags & RQF_PM;
+ case RPM_SUSPENDED:
+ return false;
+ }
+
+ return true;
}
#else
-static inline struct request *blk_pm_peek_request(struct request_queue *q,
- struct request *rq)
+static bool blk_pm_allow_request(struct request *rq)
{
- return rq;
+ return true;
}
#endif
@@ -2517,6 +2657,48 @@ void blk_account_io_start(struct request *rq, bool new_io)
part_stat_unlock();
}
+static struct request *elv_next_request(struct request_queue *q)
+{
+ struct request *rq;
+ struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
+
+ WARN_ON_ONCE(q->mq_ops);
+
+ while (1) {
+ list_for_each_entry(rq, &q->queue_head, queuelist) {
+ if (blk_pm_allow_request(rq))
+ return rq;
+
+ if (rq->rq_flags & RQF_SOFTBARRIER)
+ break;
+ }
+
+ /*
+ * Flush request is running and flush request isn't queueable
+ * in the drive, we can hold the queue till flush request is
+ * finished. Even we don't do this, driver can't dispatch next
+ * requests and will requeue them. And this can improve
+ * throughput too. For example, we have request flush1, write1,
+ * flush 2. flush1 is dispatched, then queue is hold, write1
+ * isn't inserted to queue. After flush1 is finished, flush2
+ * will be dispatched. Since disk cache is already clean,
+ * flush2 will be finished very soon, so looks like flush2 is
+ * folded to flush1.
+ * Since the queue is hold, a flag is set to indicate the queue
+ * should be restarted later. Please see flush_end_io() for
+ * details.
+ */
+ if (fq->flush_pending_idx != fq->flush_running_idx &&
+ !queue_flush_queueable(q)) {
+ fq->flush_queue_delayed = 1;
+ return NULL;
+ }
+ if (unlikely(blk_queue_bypass(q)) ||
+ !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+}
+
/**
* blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at
@@ -2538,12 +2720,7 @@ struct request *blk_peek_request(struct request_queue *q)
lockdep_assert_held(q->queue_lock);
WARN_ON_ONCE(q->mq_ops);
- while ((rq = __elv_next_request(q)) != NULL) {
-
- rq = blk_pm_peek_request(q, rq);
- if (!rq)
- break;
-
+ while ((rq = elv_next_request(q)) != NULL) {
if (!(rq->rq_flags & RQF_STARTED)) {
/*
* This is the first time the device driver
@@ -2695,6 +2872,27 @@ struct request *blk_fetch_request(struct request_queue *q)
}
EXPORT_SYMBOL(blk_fetch_request);
+/*
+ * Steal bios from a request and add them to a bio list.
+ * The request must not have been partially completed before.
+ */
+void blk_steal_bios(struct bio_list *list, struct request *rq)
+{
+ if (rq->bio) {
+ if (list->tail)
+ list->tail->bi_next = rq->bio;
+ else
+ list->head = rq->bio;
+ list->tail = rq->biotail;
+
+ rq->bio = NULL;
+ rq->biotail = NULL;
+ }
+
+ rq->__data_len = 0;
+}
+EXPORT_SYMBOL_GPL(blk_steal_bios);
+
/**
* blk_update_request - Special helper function for request stacking drivers
* @req: the request being processed
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 4938bec8cfef..f17170675917 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -231,8 +231,13 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
- blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
- flush_rq->tag = -1;
+ if (!q->elevator) {
+ blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
+ flush_rq->tag = -1;
+ } else {
+ blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+ flush_rq->internal_tag = -1;
+ }
}
running = &fq->flush_queue[fq->flush_running_idx];
@@ -318,19 +323,26 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
blk_rq_init(q, flush_rq);
/*
- * Borrow tag from the first request since they can't
- * be in flight at the same time. And acquire the tag's
- * ownership for flush req.
+ * In case of none scheduler, borrow tag from the first request
+ * since they can't be in flight at the same time. And acquire
+ * the tag's ownership for flush req.
+ *
+ * In case of IO scheduler, flush rq need to borrow scheduler tag
+ * just for cheating put/get driver tag.
*/
if (q->mq_ops) {
struct blk_mq_hw_ctx *hctx;
flush_rq->mq_ctx = first_rq->mq_ctx;
- flush_rq->tag = first_rq->tag;
- fq->orig_rq = first_rq;
- hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
- blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
+ if (!q->elevator) {
+ fq->orig_rq = first_rq;
+ flush_rq->tag = first_rq->tag;
+ hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
+ blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
+ } else {
+ flush_rq->internal_tag = first_rq->internal_tag;
+ }
}
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -394,6 +406,11 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
hctx = blk_mq_map_queue(q, ctx->cpu);
+ if (q->elevator) {
+ WARN_ON(rq->tag < 0);
+ blk_mq_put_driver_tag_hctx(hctx, rq);
+ }
+
/*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
@@ -463,7 +480,7 @@ void blk_insert_flush(struct request *rq)
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops)
- blk_mq_sched_insert_request(rq, false, true, false, false);
+ blk_mq_request_bypass_insert(rq, false);
else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 63fb971d6574..2bc544ce3d2e 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -275,6 +275,40 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
return min(pages, (sector_t)BIO_MAX_PAGES);
}
+static int __blkdev_issue_zero_pages(struct block_device *bdev,
+ sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
+ struct bio **biop)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct bio *bio = *biop;
+ int bi_size = 0;
+ unsigned int sz;
+
+ if (!q)
+ return -ENXIO;
+
+ while (nr_sects != 0) {
+ bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
+ gfp_mask);
+ bio->bi_iter.bi_sector = sector;
+ bio_set_dev(bio, bdev);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ while (nr_sects != 0) {
+ sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
+ bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
+ nr_sects -= bi_size >> 9;
+ sector += bi_size >> 9;
+ if (bi_size < sz)
+ break;
+ }
+ cond_resched();
+ }
+
+ *biop = bio;
+ return 0;
+}
+
/**
* __blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
@@ -288,12 +322,6 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
* Zero-fill a block range, either using hardware offload or by explicitly
* writing zeroes to the device.
*
- * Note that this function may fail with -EOPNOTSUPP if the driver signals
- * zeroing offload support, but the device fails to process the command (for
- * some devices there is no non-destructive way to verify whether this
- * operation is actually supported). In this case the caller should call
- * retry the call to blkdev_issue_zeroout() and the fallback path will be used.
- *
* If a device is using logical block provisioning, the underlying space will
* not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
*
@@ -305,9 +333,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
unsigned flags)
{
int ret;
- int bi_size = 0;
- struct bio *bio = *biop;
- unsigned int sz;
sector_t bs_mask;
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
@@ -317,30 +342,10 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
biop, flags);
if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
- goto out;
-
- ret = 0;
- while (nr_sects != 0) {
- bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
- gfp_mask);
- bio->bi_iter.bi_sector = sector;
- bio_set_dev(bio, bdev);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-
- while (nr_sects != 0) {
- sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
- bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
- nr_sects -= bi_size >> 9;
- sector += bi_size >> 9;
- if (bi_size < sz)
- break;
- }
- cond_resched();
- }
+ return ret;
- *biop = bio;
-out:
- return ret;
+ return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
+ biop);
}
EXPORT_SYMBOL(__blkdev_issue_zeroout);
@@ -360,18 +365,49 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout);
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
{
- int ret;
- struct bio *bio = NULL;
+ int ret = 0;
+ sector_t bs_mask;
+ struct bio *bio;
struct blk_plug plug;
+ bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
+
+retry:
+ bio = NULL;
blk_start_plug(&plug);
- ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
- &bio, flags);
+ if (try_write_zeroes) {
+ ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
+ gfp_mask, &bio, flags);
+ } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
+ ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
+ gfp_mask, &bio);
+ } else {
+ /* No zeroing offload support */
+ ret = -EOPNOTSUPP;
+ }
if (ret == 0 && bio) {
ret = submit_bio_wait(bio);
bio_put(bio);
}
blk_finish_plug(&plug);
+ if (ret && try_write_zeroes) {
+ if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
+ try_write_zeroes = false;
+ goto retry;
+ }
+ if (!bdev_write_zeroes_sectors(bdev)) {
+ /*
+ * Zeroing offload support was indicated, but the
+ * device reported ILLEGAL REQUEST (for some devices
+ * there is no non-destructive way to verify whether
+ * WRITE ZEROES is actually supported).
+ */
+ ret = -EOPNOTSUPP;
+ }
+ }
return ret;
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index de294d775acf..b56a4f35720d 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -54,7 +54,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO),
- QUEUE_FLAG_NAME(STACKABLE),
QUEUE_FLAG_NAME(NONROT),
QUEUE_FLAG_NAME(IO_STAT),
QUEUE_FLAG_NAME(DISCARD),
@@ -75,6 +74,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
+ QUEUE_FLAG_NAME(PREEMPT_ONLY),
};
#undef QUEUE_FLAG_NAME
@@ -180,7 +180,6 @@ static const char *const hctx_state_name[] = {
HCTX_STATE_NAME(STOPPED),
HCTX_STATE_NAME(TAG_ACTIVE),
HCTX_STATE_NAME(SCHED_RESTART),
- HCTX_STATE_NAME(TAG_WAITING),
HCTX_STATE_NAME(START_ON_RUN),
};
#undef HCTX_STATE_NAME
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4ab69435708c..c117bd8fd1f6 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -81,20 +81,103 @@ static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
} else
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- if (blk_mq_hctx_has_pending(hctx)) {
- blk_mq_run_hw_queue(hctx, true);
- return true;
- }
+ return blk_mq_run_hw_queue(hctx, true);
+}
- return false;
+/*
+ * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
+ * its queue by itself in its completion handler, so we don't need to
+ * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
+ */
+static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct elevator_queue *e = q->elevator;
+ LIST_HEAD(rq_list);
+
+ do {
+ struct request *rq;
+
+ if (e->type->ops.mq.has_work &&
+ !e->type->ops.mq.has_work(hctx))
+ break;
+
+ if (!blk_mq_get_dispatch_budget(hctx))
+ break;
+
+ rq = e->type->ops.mq.dispatch_request(hctx);
+ if (!rq) {
+ blk_mq_put_dispatch_budget(hctx);
+ break;
+ }
+
+ /*
+ * Now this rq owns the budget which has to be released
+ * if this rq won't be queued to driver via .queue_rq()
+ * in blk_mq_dispatch_rq_list().
+ */
+ list_add(&rq->queuelist, &rq_list);
+ } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
}
+static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
+{
+ unsigned idx = ctx->index_hw;
+
+ if (++idx == hctx->nr_ctx)
+ idx = 0;
+
+ return hctx->ctxs[idx];
+}
+
+/*
+ * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
+ * its queue by itself in its completion handler, so we don't need to
+ * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
+ */
+static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ LIST_HEAD(rq_list);
+ struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
+
+ do {
+ struct request *rq;
+
+ if (!sbitmap_any_bit_set(&hctx->ctx_map))
+ break;
+
+ if (!blk_mq_get_dispatch_budget(hctx))
+ break;
+
+ rq = blk_mq_dequeue_from_ctx(hctx, ctx);
+ if (!rq) {
+ blk_mq_put_dispatch_budget(hctx);
+ break;
+ }
+
+ /*
+ * Now this rq owns the budget which has to be released
+ * if this rq won't be queued to driver via .queue_rq()
+ * in blk_mq_dispatch_rq_list().
+ */
+ list_add(&rq->queuelist, &rq_list);
+
+ /* round robin for fair dispatch */
+ ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
+
+ } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
+
+ WRITE_ONCE(hctx->dispatch_from, ctx);
+}
+
+/* return true if hw queue need to be run again */
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
- bool did_work = false;
LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */
@@ -122,29 +205,34 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
* scheduler, we can no longer merge or sort them. So it's best to
* leave them there for as long as we can. Mark the hw queue as
* needing a restart in that case.
+ *
+ * We want to dispatch from the scheduler if there was nothing
+ * on the dispatch list or we were able to dispatch from the
+ * dispatch list.
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
- did_work = blk_mq_dispatch_rq_list(q, &rq_list);
- } else if (!has_sched_dispatch) {
+ if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
+ if (has_sched_dispatch)
+ blk_mq_do_dispatch_sched(hctx);
+ else
+ blk_mq_do_dispatch_ctx(hctx);
+ }
+ } else if (has_sched_dispatch) {
+ blk_mq_do_dispatch_sched(hctx);
+ } else if (q->mq_ops->get_budget) {
+ /*
+ * If we need to get budget before queuing request, we
+ * dequeue request one by one from sw queue for avoiding
+ * to mess up I/O merge when dispatch runs out of resource.
+ *
+ * TODO: get more budgets, and dequeue more requests in
+ * one time.
+ */
+ blk_mq_do_dispatch_ctx(hctx);
+ } else {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
- blk_mq_dispatch_rq_list(q, &rq_list);
- }
-
- /*
- * We want to dispatch from the scheduler if we had no work left
- * on the dispatch list, OR if we did have work but weren't able
- * to make progress.
- */
- if (!did_work && has_sched_dispatch) {
- do {
- struct request *rq;
-
- rq = e->type->ops.mq.dispatch_request(hctx);
- if (!rq)
- break;
- list_add(&rq->queuelist, &rq_list);
- } while (blk_mq_dispatch_rq_list(q, &rq_list));
+ blk_mq_dispatch_rq_list(q, &rq_list, false);
}
}
@@ -260,21 +348,21 @@ void blk_mq_sched_request_inserted(struct request *rq)
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
+ bool has_sched,
struct request *rq)
{
- if (rq->tag == -1) {
- rq->rq_flags |= RQF_SORTED;
- return false;
+ /* dispatch flush rq directly */
+ if (rq->rq_flags & RQF_FLUSH_SEQ) {
+ spin_lock(&hctx->lock);
+ list_add(&rq->queuelist, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
+ return true;
}
- /*
- * If we already have a real request tag, send directly to
- * the dispatch list.
- */
- spin_lock(&hctx->lock);
- list_add(&rq->queuelist, &hctx->dispatch);
- spin_unlock(&hctx->lock);
- return true;
+ if (has_sched)
+ rq->rq_flags |= RQF_SORTED;
+
+ return false;
}
/**
@@ -339,21 +427,6 @@ done:
}
}
-/*
- * Add flush/fua to the queue. If we fail getting a driver tag, then
- * punt to the requeue list. Requeue will re-invoke us from a context
- * that's safe to block from.
- */
-static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
- struct request *rq, bool can_block)
-{
- if (blk_mq_get_driver_tag(rq, &hctx, can_block)) {
- blk_insert_flush(rq);
- blk_mq_run_hw_queue(hctx, true);
- } else
- blk_mq_add_to_requeue_list(rq, false, true);
-}
-
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async, bool can_block)
{
@@ -362,12 +435,15 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
- if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) {
- blk_mq_sched_insert_flush(hctx, rq, can_block);
- return;
+ /* flush rq in flush machinery need to be dispatched directly */
+ if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
+ blk_insert_flush(rq);
+ goto run;
}
- if (e && blk_mq_sched_bypass_insert(hctx, rq))
+ WARN_ON(e && (rq->tag != -1));
+
+ if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
goto run;
if (e && e->type->ops.mq.insert_requests) {
@@ -393,23 +469,6 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
struct elevator_queue *e = hctx->queue->elevator;
- if (e) {
- struct request *rq, *next;
-
- /*
- * We bypass requests that already have a driver tag assigned,
- * which should only be flushes. Flushes are only ever inserted
- * as single requests, so we shouldn't ever hit the
- * WARN_ON_ONCE() below (but let's handle it just in case).
- */
- list_for_each_entry_safe(rq, next, list, queuelist) {
- if (WARN_ON_ONCE(rq->tag != -1)) {
- list_del_init(&rq->queuelist);
- blk_mq_sched_bypass_insert(hctx, rq);
- }
- }
- }
-
if (e && e->type->ops.mq.insert_requests)
e->type->ops.mq.insert_requests(hctx, list, false);
else
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 6714507aa6c7..c81b40ecd3f1 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -298,12 +298,12 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
-int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
- int (reinit_request)(void *, struct request *))
+int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
+ int (fn)(void *, struct request *))
{
int i, j, ret = 0;
- if (WARN_ON_ONCE(!reinit_request))
+ if (WARN_ON_ONCE(!fn))
goto out;
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -316,8 +316,7 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
if (!tags->static_rqs[j])
continue;
- ret = reinit_request(set->driver_data,
- tags->static_rqs[j]);
+ ret = fn(data, tags->static_rqs[j]);
if (ret)
goto out;
}
@@ -326,7 +325,7 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
out:
return ret;
}
-EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
+EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index c190165d92ea..61deab0b5a5a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -45,13 +45,8 @@ static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
}
enum {
- BLK_MQ_TAG_CACHE_MIN = 1,
- BLK_MQ_TAG_CACHE_MAX = 64,
-};
-
-enum {
BLK_MQ_TAG_FAIL = -1U,
- BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN,
+ BLK_MQ_TAG_MIN = 1,
BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 98a18609755e..11097477eeab 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -37,6 +37,7 @@
#include "blk-wbt.h"
#include "blk-mq-sched.h"
+static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -60,10 +61,10 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
/*
* Check if any of the ctx's have pending work in this hardware queue
*/
-bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
+static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
- return sbitmap_any_bit_set(&hctx->ctx_map) ||
- !list_empty_careful(&hctx->dispatch) ||
+ return !list_empty_careful(&hctx->dispatch) ||
+ sbitmap_any_bit_set(&hctx->ctx_map) ||
blk_mq_sched_has_work(hctx);
}
@@ -125,7 +126,8 @@ void blk_freeze_queue_start(struct request_queue *q)
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
- blk_mq_run_hw_queues(q, false);
+ if (q->mq_ops)
+ blk_mq_run_hw_queues(q, false);
}
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -255,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_wakeup_all(hctx->tags, true);
-
- /*
- * If we are called because the queue has now been marked as
- * dying, we need to ensure that processes currently waiting on
- * the queue are notified as well.
- */
- wake_up_all(&q->mq_freeze_wq);
}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
@@ -296,6 +291,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->q = data->q;
rq->mq_ctx = data->ctx;
rq->cmd_flags = op;
+ if (data->flags & BLK_MQ_REQ_PREEMPT)
+ rq->rq_flags |= RQF_PREEMPT;
if (blk_queue_io_stat(data->q))
rq->rq_flags |= RQF_IO_STAT;
/* do not touch atomic flags, it needs atomic ops against the timer */
@@ -336,12 +333,14 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct elevator_queue *e = q->elevator;
struct request *rq;
unsigned int tag;
- struct blk_mq_ctx *local_ctx = NULL;
+ bool put_ctx_on_error = false;
blk_queue_enter_live(q);
data->q = q;
- if (likely(!data->ctx))
- data->ctx = local_ctx = blk_mq_get_ctx(q);
+ if (likely(!data->ctx)) {
+ data->ctx = blk_mq_get_ctx(q);
+ put_ctx_on_error = true;
+ }
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
if (op & REQ_NOWAIT)
@@ -360,8 +359,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_TAG_FAIL) {
- if (local_ctx) {
- blk_mq_put_ctx(local_ctx);
+ if (put_ctx_on_error) {
+ blk_mq_put_ctx(data->ctx);
data->ctx = NULL;
}
blk_queue_exit(q);
@@ -384,13 +383,13 @@ static struct request *blk_mq_get_request(struct request_queue *q,
}
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
- unsigned int flags)
+ blk_mq_req_flags_t flags)
{
struct blk_mq_alloc_data alloc_data = { .flags = flags };
struct request *rq;
int ret;
- ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
+ ret = blk_queue_enter(q, flags);
if (ret)
return ERR_PTR(ret);
@@ -410,7 +409,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
- unsigned int op, unsigned int flags, unsigned int hctx_idx)
+ unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
{
struct blk_mq_alloc_data alloc_data = { .flags = flags };
struct request *rq;
@@ -429,7 +428,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
if (hctx_idx >= q->nr_hw_queues)
return ERR_PTR(-EIO);
- ret = blk_queue_enter(q, true);
+ ret = blk_queue_enter(q, flags);
if (ret)
return ERR_PTR(ret);
@@ -476,8 +475,14 @@ void blk_mq_free_request(struct request *rq)
if (rq->rq_flags & RQF_MQ_INFLIGHT)
atomic_dec(&hctx->nr_active);
+ if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
+ laptop_io_completion(q->backing_dev_info);
+
wbt_done(q->rq_wb, &rq->issue_stat);
+ if (blk_rq_rl(rq))
+ blk_put_rl(blk_rq_rl(rq));
+
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
if (rq->tag != -1)
@@ -593,22 +598,32 @@ void blk_mq_start_request(struct request *rq)
blk_add_timer(rq);
- /*
- * Ensure that ->deadline is visible before set the started
- * flag and clear the completed flag.
- */
- smp_mb__before_atomic();
+ WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
/*
* Mark us as started and clear complete. Complete might have been
* set if requeue raced with timeout, which then marked it as
* complete. So be sure to clear complete again when we start
* the request, otherwise we'll ignore the completion event.
+ *
+ * Ensure that ->deadline is visible before we set STARTED, such that
+ * blk_mq_check_expired() is guaranteed to observe our ->deadline when
+ * it observes STARTED.
*/
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+ smp_wmb();
+ set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+ if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
+ /*
+ * Coherence order guarantees these consecutive stores to a
+ * single variable propagate in the specified order. Thus the
+ * clear_bit() is ordered _after_ the set bit. See
+ * blk_mq_check_expired().
+ *
+ * (the bits must be part of the same byte for this to be
+ * true).
+ */
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+ }
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
@@ -634,6 +649,8 @@ static void __blk_mq_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
+ blk_mq_put_driver_tag(rq);
+
trace_block_rq_requeue(q, rq);
wbt_requeue(q->rq_wb, &rq->issue_stat);
blk_mq_sched_requeue_request(rq);
@@ -690,7 +707,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
/*
* We abuse this flag that is otherwise used by the I/O scheduler to
- * request head insertation from the workqueue.
+ * request head insertion from the workqueue.
*/
BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
@@ -778,11 +795,20 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;
+ unsigned long deadline;
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
return;
/*
+ * Ensures that if we see STARTED we must also see our
+ * up-to-date deadline, see blk_mq_start_request().
+ */
+ smp_rmb();
+
+ deadline = READ_ONCE(rq->deadline);
+
+ /*
* The rq being checked may have been freed and reallocated
* out already here, we avoid this race by checking rq->deadline
* and REQ_ATOM_COMPLETE flag together:
@@ -795,11 +821,20 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
* and clearing the flag in blk_mq_start_request(), so
* this rq won't be timed out too.
*/
- if (time_after_eq(jiffies, rq->deadline)) {
- if (!blk_mark_rq_complete(rq))
+ if (time_after_eq(jiffies, deadline)) {
+ if (!blk_mark_rq_complete(rq)) {
+ /*
+ * Again coherence order ensures that consecutive reads
+ * from the same variable must be in that order. This
+ * ensures that if we see COMPLETE clear, we must then
+ * see STARTED set and we'll ignore this timeout.
+ *
+ * (There's also the MB implied by the test_and_clear())
+ */
blk_mq_rq_timed_out(rq, reserved);
- } else if (!data->next_set || time_after(data->next, rq->deadline)) {
- data->next = rq->deadline;
+ }
+ } else if (!data->next_set || time_after(data->next, deadline)) {
+ data->next = deadline;
data->next_set = 1;
}
}
@@ -880,6 +915,45 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
}
EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
+struct dispatch_rq_data {
+ struct blk_mq_hw_ctx *hctx;
+ struct request *rq;
+};
+
+static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
+ void *data)
+{
+ struct dispatch_rq_data *dispatch_data = data;
+ struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
+ struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+
+ spin_lock(&ctx->lock);
+ if (unlikely(!list_empty(&ctx->rq_list))) {
+ dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
+ list_del_init(&dispatch_data->rq->queuelist);
+ if (list_empty(&ctx->rq_list))
+ sbitmap_clear_bit(sb, bitnr);
+ }
+ spin_unlock(&ctx->lock);
+
+ return !dispatch_data->rq;
+}
+
+struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *start)
+{
+ unsigned off = start ? start->index_hw : 0;
+ struct dispatch_rq_data data = {
+ .hctx = hctx,
+ .rq = NULL,
+ };
+
+ __sbitmap_for_each_set(&hctx->ctx_map, off,
+ dispatch_rq_from_ctx, &data);
+
+ return data.rq;
+}
+
static inline unsigned int queued_to_index(unsigned int queued)
{
if (!queued)
@@ -920,109 +994,95 @@ done:
return rq->tag != -1;
}
-static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
-{
- blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
- rq->tag = -1;
-
- if (rq->rq_flags & RQF_MQ_INFLIGHT) {
- rq->rq_flags &= ~RQF_MQ_INFLIGHT;
- atomic_dec(&hctx->nr_active);
- }
-}
-
-static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
-{
- if (rq->tag == -1 || rq->internal_tag == -1)
- return;
-
- __blk_mq_put_driver_tag(hctx, rq);
-}
-
-static void blk_mq_put_driver_tag(struct request *rq)
+static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
+ int flags, void *key)
{
struct blk_mq_hw_ctx *hctx;
- if (rq->tag == -1 || rq->internal_tag == -1)
- return;
+ hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
- hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
- __blk_mq_put_driver_tag(hctx, rq);
+ list_del_init(&wait->entry);
+ blk_mq_run_hw_queue(hctx, true);
+ return 1;
}
/*
- * If we fail getting a driver tag because all the driver tags are already
- * assigned and on the dispatch list, BUT the first entry does not have a
- * tag, then we could deadlock. For that case, move entries with assigned
- * driver tags to the front, leaving the set of tagged requests in the
- * same order, and the untagged set in the same order.
+ * Mark us waiting for a tag. For shared tags, this involves hooking us into
+ * the tag wakeups. For non-shared tags, we can simply mark us nedeing a
+ * restart. For both caes, take care to check the condition again after
+ * marking us as waiting.
*/
-static bool reorder_tags_to_front(struct list_head *list)
-{
- struct request *rq, *tmp, *first = NULL;
-
- list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
- if (rq == first)
- break;
- if (rq->tag != -1) {
- list_move(&rq->queuelist, list);
- if (!first)
- first = rq;
- }
- }
-
- return first != NULL;
-}
-
-static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
- void *key)
+static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
+ struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *this_hctx = *hctx;
+ bool shared_tags = (this_hctx->flags & BLK_MQ_F_TAG_SHARED) != 0;
+ struct sbq_wait_state *ws;
+ wait_queue_entry_t *wait;
+ bool ret;
- hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
+ if (!shared_tags) {
+ if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
+ set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
+ } else {
+ wait = &this_hctx->dispatch_wait;
+ if (!list_empty_careful(&wait->entry))
+ return false;
- list_del(&wait->entry);
- clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
- blk_mq_run_hw_queue(hctx, true);
- return 1;
-}
+ spin_lock(&this_hctx->lock);
+ if (!list_empty(&wait->entry)) {
+ spin_unlock(&this_hctx->lock);
+ return false;
+ }
-static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
-{
- struct sbq_wait_state *ws;
+ ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
+ add_wait_queue(&ws->wait, wait);
+ }
/*
- * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
- * The thread which wins the race to grab this bit adds the hardware
- * queue to the wait queue.
+ * It's possible that a tag was freed in the window between the
+ * allocation failure and adding the hardware queue to the wait
+ * queue.
*/
- if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
- test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
- return false;
+ ret = blk_mq_get_driver_tag(rq, hctx, false);
- init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
- ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
+ if (!shared_tags) {
+ /*
+ * Don't clear RESTART here, someone else could have set it.
+ * At most this will cost an extra queue run.
+ */
+ return ret;
+ } else {
+ if (!ret) {
+ spin_unlock(&this_hctx->lock);
+ return false;
+ }
- /*
- * As soon as this returns, it's no longer safe to fiddle with
- * hctx->dispatch_wait, since a completion can wake up the wait queue
- * and unlock the bit.
- */
- add_wait_queue(&ws->wait, &hctx->dispatch_wait);
- return true;
+ /*
+ * We got a tag, remove ourselves from the wait queue to ensure
+ * someone else gets the wakeup.
+ */
+ spin_lock_irq(&ws->wait.lock);
+ list_del_init(&wait->entry);
+ spin_unlock_irq(&ws->wait.lock);
+ spin_unlock(&this_hctx->lock);
+ return true;
+ }
}
-bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
+bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
+ bool got_budget)
{
struct blk_mq_hw_ctx *hctx;
- struct request *rq;
+ struct request *rq, *nxt;
+ bool no_tag = false;
int errors, queued;
if (list_empty(list))
return false;
+ WARN_ON(!list_is_singular(list) && got_budget);
+
/*
* Now process all the entries, sending them to the driver.
*/
@@ -1033,23 +1093,29 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
rq = list_first_entry(list, struct request, queuelist);
if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
- if (!queued && reorder_tags_to_front(list))
- continue;
-
/*
* The initial allocation attempt failed, so we need to
- * rerun the hardware queue when a tag is freed.
+ * rerun the hardware queue when a tag is freed. The
+ * waitqueue takes care of that. If the queue is run
+ * before we add this entry back on the dispatch list,
+ * we'll re-run it below.
*/
- if (!blk_mq_dispatch_wait_add(hctx))
+ if (!blk_mq_mark_tag_wait(&hctx, rq)) {
+ if (got_budget)
+ blk_mq_put_dispatch_budget(hctx);
+ /*
+ * For non-shared tags, the RESTART check
+ * will suffice.
+ */
+ if (hctx->flags & BLK_MQ_F_TAG_SHARED)
+ no_tag = true;
break;
+ }
+ }
- /*
- * It's possible that a tag was freed in the window
- * between the allocation failure and adding the
- * hardware queue to the wait queue.
- */
- if (!blk_mq_get_driver_tag(rq, &hctx, false))
- break;
+ if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
+ blk_mq_put_driver_tag(rq);
+ break;
}
list_del_init(&rq->queuelist);
@@ -1063,15 +1129,21 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
if (list_empty(list))
bd.last = true;
else {
- struct request *nxt;
-
nxt = list_first_entry(list, struct request, queuelist);
bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
}
ret = q->mq_ops->queue_rq(hctx, &bd);
if (ret == BLK_STS_RESOURCE) {
- blk_mq_put_driver_tag_hctx(hctx, rq);
+ /*
+ * If an I/O scheduler has been configured and we got a
+ * driver tag for the next request already, free it
+ * again.
+ */
+ if (!list_empty(list)) {
+ nxt = list_first_entry(list, struct request, queuelist);
+ blk_mq_put_driver_tag(nxt);
+ }
list_add(&rq->queuelist, list);
__blk_mq_requeue_request(rq);
break;
@@ -1093,13 +1165,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
* that is where we will continue on next queue run.
*/
if (!list_empty(list)) {
- /*
- * If an I/O scheduler has been configured and we got a driver
- * tag for the next request already, free it again.
- */
- rq = list_first_entry(list, struct request, queuelist);
- blk_mq_put_driver_tag(rq);
-
spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
@@ -1109,10 +1174,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
* it is no longer set that means that it was cleared by another
* thread and hence that a queue rerun is needed.
*
- * If TAG_WAITING is set that means that an I/O scheduler has
- * been configured and another thread is waiting for a driver
- * tag. To guarantee fairness, do not rerun this hardware queue
- * but let the other thread grab the driver tag.
+ * If 'no_tag' is set, that means that we failed getting
+ * a driver tag with an I/O scheduler attached. If our dispatch
+ * waitqueue is no longer active, ensure that we run the queue
+ * AFTER adding our entries back to the list.
*
* If no I/O scheduler has been configured it is possible that
* the hardware queue got stopped and restarted before requests
@@ -1124,8 +1189,8 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
* returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
* and dm-rq.
*/
- if (!blk_mq_sched_needs_restart(hctx) &&
- !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
+ if (!blk_mq_sched_needs_restart(hctx) ||
+ (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
blk_mq_run_hw_queue(hctx, true);
}
@@ -1218,9 +1283,14 @@ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- __blk_mq_delay_run_hw_queue(hctx, async, 0);
+ if (blk_mq_hctx_has_pending(hctx)) {
+ __blk_mq_delay_run_hw_queue(hctx, async, 0);
+ return true;
+ }
+
+ return false;
}
EXPORT_SYMBOL(blk_mq_run_hw_queue);
@@ -1230,8 +1300,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- if (!blk_mq_hctx_has_pending(hctx) ||
- blk_mq_hctx_stopped(hctx))
+ if (blk_mq_hctx_stopped(hctx))
continue;
blk_mq_run_hw_queue(hctx, async);
@@ -1405,7 +1474,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device.
*/
-void blk_mq_request_bypass_insert(struct request *rq)
+void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
@@ -1414,7 +1483,8 @@ void blk_mq_request_bypass_insert(struct request *rq)
list_add_tail(&rq->queuelist, &hctx->dispatch);
spin_unlock(&hctx->lock);
- blk_mq_run_hw_queue(hctx, false);
+ if (run_queue)
+ blk_mq_run_hw_queue(hctx, false);
}
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
@@ -1501,13 +1571,9 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
blk_init_request_from_bio(rq, bio);
- blk_account_io_start(rq, true);
-}
+ blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
-static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
-{
- return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
- !blk_queue_nomerges(hctx->queue);
+ blk_account_io_start(rq, true);
}
static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
@@ -1552,6 +1618,11 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (!blk_mq_get_driver_tag(rq, NULL, false))
goto insert;
+ if (!blk_mq_get_dispatch_budget(hctx)) {
+ blk_mq_put_driver_tag(rq);
+ goto insert;
+ }
+
new_cookie = request_to_qc_t(hctx, rq);
/*
@@ -1641,13 +1712,10 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(is_flush_fua)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- if (q->elevator) {
- blk_mq_sched_insert_request(rq, false, true, true,
- true);
- } else {
- blk_insert_flush(rq);
- blk_mq_run_hw_queue(data.hctx, true);
- }
+
+ /* bypass scheduler for flush rq */
+ blk_insert_flush(rq);
+ blk_mq_run_hw_queue(data.hctx, true);
} else if (plug && q->nr_hw_queues == 1) {
struct request *last = NULL;
@@ -1979,7 +2047,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
* Allocate space for all possible cpus to avoid allocation at
* runtime
*/
- hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
+ hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
GFP_KERNEL, node);
if (!hctx->ctxs)
goto unregister_cpu_notifier;
@@ -1990,6 +2058,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->nr_ctx = 0;
+ init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
+ INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
+
if (set->ops->init_hctx &&
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
@@ -2229,8 +2300,11 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
mutex_lock(&set->tag_list_lock);
- /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
- if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+ /*
+ * Check to see if we're transitioning to shared (from 1 to 2 queues).
+ */
+ if (!list_empty(&set->tag_list) &&
+ !(set->flags & BLK_MQ_F_TAG_SHARED)) {
set->flags |= BLK_MQ_F_TAG_SHARED;
/* update existing queue */
blk_mq_update_tag_set_depth(set, true);
@@ -2404,6 +2478,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
spin_lock_init(&q->requeue_lock);
blk_queue_make_request(q, blk_mq_make_request);
+ if (q->mq_ops->poll)
+ q->poll_fn = blk_mq_poll;
/*
* Do this after blk_queue_make_request() overrides it...
@@ -2460,10 +2536,9 @@ static void blk_mq_queue_reinit(struct request_queue *q)
/*
* redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
- * we should change hctx numa_node according to new topology (this
- * involves free and re-allocate memory, worthy doing?)
+ * we should change hctx numa_node according to the new topology (this
+ * involves freeing and re-allocating memory, worth doing?)
*/
-
blk_mq_map_swqueue(q);
blk_mq_sysfs_register(q);
@@ -2552,6 +2627,9 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (!set->ops->queue_rq)
return -EINVAL;
+ if (!set->ops->get_budget ^ !set->ops->put_budget)
+ return -EINVAL;
+
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
pr_info("blk-mq: reduced tag depth to %u\n",
BLK_MQ_MAX_DEPTH);
@@ -2642,8 +2720,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
* queue depth. This is similar to what the old code would do.
*/
if (!hctx->sched_tags) {
- ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
- min(nr, set->queue_depth),
+ ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
false);
} else {
ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
@@ -2863,20 +2940,14 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
return false;
}
-bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_mq_hw_ctx *hctx;
- struct blk_plug *plug;
struct request *rq;
- if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
- !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return false;
- plug = current->plug;
- if (plug)
- blk_flush_plug_list(plug, false);
-
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
if (!blk_qc_t_is_internal(cookie))
rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
@@ -2894,10 +2965,15 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
return __blk_mq_poll(hctx, rq);
}
-EXPORT_SYMBOL_GPL(blk_mq_poll);
static int __init blk_mq_init(void)
{
+ /*
+ * See comment in block/blk.h rq_atomic_flags enum
+ */
+ BUILD_BUG_ON((REQ_ATOM_STARTED / BITS_PER_BYTE) !=
+ (REQ_ATOM_COMPLETE / BITS_PER_BYTE));
+
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
blk_mq_hctx_notify_dead);
return 0;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 4933af9d61f7..6c7c3ff5bf62 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -3,6 +3,7 @@
#define INT_BLK_MQ_H
#include "blk-stat.h"
+#include "blk-mq-tag.h"
struct blk_mq_tag_set;
@@ -26,16 +27,16 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
-bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
+bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
-bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
bool wait);
+struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *start);
/*
* Internal helpers for allocating/freeing the request map
@@ -55,7 +56,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
*/
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head);
-void blk_mq_request_bypass_insert(struct request *rq);
+void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
@@ -109,7 +110,7 @@ static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
struct blk_mq_alloc_data {
/* input parameter */
struct request_queue *q;
- unsigned int flags;
+ blk_mq_req_flags_t flags;
unsigned int shallow_depth;
/* input & output parameter */
@@ -138,4 +139,53 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
+static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+
+ if (q->mq_ops->put_budget)
+ q->mq_ops->put_budget(hctx);
+}
+
+static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+
+ if (q->mq_ops->get_budget)
+ return q->mq_ops->get_budget(hctx);
+ return true;
+}
+
+static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
+ rq->tag = -1;
+
+ if (rq->rq_flags & RQF_MQ_INFLIGHT) {
+ rq->rq_flags &= ~RQF_MQ_INFLIGHT;
+ atomic_dec(&hctx->nr_active);
+ }
+}
+
+static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ if (rq->tag == -1 || rq->internal_tag == -1)
+ return;
+
+ __blk_mq_put_driver_tag(hctx, rq);
+}
+
+static inline void blk_mq_put_driver_tag(struct request *rq)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ if (rq->tag == -1 || rq->internal_tag == -1)
+ return;
+
+ hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
+ __blk_mq_put_driver_tag(hctx, rq);
+}
+
#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 8559e9563c52..48ebe6be07b7 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
* Caveat:
* The driver that does this *must* be able to deal appropriately
* with buffers in "highmemory". This can be accomplished by either calling
- * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
+ * kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
diff --git a/block/blk-stat.c b/block/blk-stat.c
index c52356d90fe3..3a2f3c96f367 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -11,8 +11,6 @@
#include "blk-mq.h"
#include "blk.h"
-#define BLK_RQ_STAT_BATCH 64
-
struct blk_queue_stats {
struct list_head callbacks;
spinlock_t lock;
@@ -23,45 +21,21 @@ static void blk_stat_init(struct blk_rq_stat *stat)
{
stat->min = -1ULL;
stat->max = stat->nr_samples = stat->mean = 0;
- stat->batch = stat->nr_batch = 0;
-}
-
-static void blk_stat_flush_batch(struct blk_rq_stat *stat)
-{
- const s32 nr_batch = READ_ONCE(stat->nr_batch);
- const s32 nr_samples = READ_ONCE(stat->nr_samples);
-
- if (!nr_batch)
- return;
- if (!nr_samples)
- stat->mean = div64_s64(stat->batch, nr_batch);
- else {
- stat->mean = div64_s64((stat->mean * nr_samples) +
- stat->batch,
- nr_batch + nr_samples);
- }
-
- stat->nr_samples += nr_batch;
- stat->nr_batch = stat->batch = 0;
+ stat->batch = 0;
}
+/* src is a per-cpu stat, mean isn't initialized */
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
- blk_stat_flush_batch(src);
-
if (!src->nr_samples)
return;
dst->min = min(dst->min, src->min);
dst->max = max(dst->max, src->max);
- if (!dst->nr_samples)
- dst->mean = src->mean;
- else {
- dst->mean = div64_s64((src->mean * src->nr_samples) +
- (dst->mean * dst->nr_samples),
- dst->nr_samples + src->nr_samples);
- }
+ dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
+ dst->nr_samples + src->nr_samples);
+
dst->nr_samples += src->nr_samples;
}
@@ -69,13 +43,8 @@ static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
{
stat->min = min(stat->min, value);
stat->max = max(stat->max, value);
-
- if (stat->batch + value < stat->batch ||
- stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
- blk_stat_flush_batch(stat);
-
stat->batch += value;
- stat->nr_batch++;
+ stat->nr_samples++;
}
void blk_stat_add(struct request *rq)
@@ -84,7 +53,7 @@ void blk_stat_add(struct request *rq)
struct blk_stat_callback *cb;
struct blk_rq_stat *stat;
int bucket;
- s64 now, value;
+ u64 now, value;
now = __blk_stat_time(ktime_to_ns(ktime_get()));
if (now < blk_stat_time(&rq->issue_stat))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8631763866c6..96ad32623427 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2113,8 +2113,12 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
{
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- if (bio->bi_css)
+ if (bio->bi_css) {
+ if (bio->bi_cg_private)
+ blkg_put(tg_to_blkg(bio->bi_cg_private));
bio->bi_cg_private = tg;
+ blkg_get(tg_to_blkg(tg));
+ }
blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
#endif
}
@@ -2284,8 +2288,10 @@ void blk_throtl_bio_endio(struct bio *bio)
start_time = blk_stat_time(&bio->bi_issue_stat) >> 10;
finish_time = __blk_stat_time(finish_time_ns) >> 10;
- if (!start_time || finish_time <= start_time)
+ if (!start_time || finish_time <= start_time) {
+ blkg_put(tg_to_blkg(tg));
return;
+ }
lat = finish_time - start_time;
/* this is only for bio based driver */
@@ -2315,6 +2321,8 @@ void blk_throtl_bio_endio(struct bio *bio)
tg->bio_cnt /= 2;
tg->bad_bio_cnt /= 2;
}
+
+ blkg_put(tg_to_blkg(tg));
}
#endif
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 17ec83bb0900..764ecf9aeb30 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -134,8 +134,6 @@ void blk_timeout_work(struct work_struct *work)
struct request *rq, *tmp;
int next_set = 0;
- if (blk_queue_enter(q, true))
- return;
spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@@ -145,7 +143,6 @@ void blk_timeout_work(struct work_struct *work)
mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags);
- blk_queue_exit(q);
}
/**
@@ -211,7 +208,7 @@ void blk_add_timer(struct request *req)
if (!req->timeout)
req->timeout = q->rq_timeout;
- req->deadline = jiffies + req->timeout;
+ WRITE_ONCE(req->deadline, jiffies + req->timeout);
/*
* Only the non-mq case needs to add the request to a protected list.
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 6a9a0f03a67b..b252da0e4c11 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -261,7 +261,7 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{
- u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
+ u64 now, issue = READ_ONCE(rwb->sync_issue);
if (!issue || !rwb->sync_cookie)
return 0;
@@ -654,7 +654,7 @@ void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
}
/*
- * Disable wbt, if enabled by default. Only called from CFQ.
+ * Disable wbt, if enabled by default.
*/
void wbt_disable_default(struct request_queue *q)
{
diff --git a/block/blk.h b/block/blk.h
index 85be8b232b37..3f1446937aec 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -123,8 +123,15 @@ void blk_account_io_done(struct request *req);
* Internal atomic flags for request handling
*/
enum rq_atomic_flags {
+ /*
+ * Keep these two bits first - not because we depend on the
+ * value of them, but we do depend on them being in the same
+ * byte of storage to ensure ordering on writes. Keeping them
+ * first will achieve that nicely.
+ */
REQ_ATOM_COMPLETE = 0,
REQ_ATOM_STARTED,
+
REQ_ATOM_POLL_SLEPT,
};
@@ -149,45 +156,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
void blk_insert_flush(struct request *rq);
-static inline struct request *__elv_next_request(struct request_queue *q)
-{
- struct request *rq;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
-
- WARN_ON_ONCE(q->mq_ops);
-
- while (1) {
- if (!list_empty(&q->queue_head)) {
- rq = list_entry_rq(q->queue_head.next);
- return rq;
- }
-
- /*
- * Flush request is running and flush request isn't queueable
- * in the drive, we can hold the queue till flush request is
- * finished. Even we don't do this, driver can't dispatch next
- * requests and will requeue them. And this can improve
- * throughput too. For example, we have request flush1, write1,
- * flush 2. flush1 is dispatched, then queue is hold, write1
- * isn't inserted to queue. After flush1 is finished, flush2
- * will be dispatched. Since disk cache is already clean,
- * flush2 will be finished very soon, so looks like flush2 is
- * folded to flush1.
- * Since the queue is hold, a flag is set to indicate the queue
- * should be restarted later. Please see flush_end_io() for
- * details.
- */
- if (fq->flush_pending_idx != fq->flush_running_idx &&
- !queue_flush_queueable(q)) {
- fq->flush_queue_delayed = 1;
- return NULL;
- }
- if (unlikely(blk_queue_bypass(q)) ||
- !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
- return NULL;
- }
-}
-
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
diff --git a/block/bsg.c b/block/bsg.c
index ee1335c68de7..452f94f1c5d4 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -137,7 +137,7 @@ static inline struct hlist_head *bsg_dev_idx_hash(int index)
static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, struct bsg_device *bd,
- fmode_t has_write_perm)
+ fmode_t mode)
{
struct scsi_request *req = scsi_req(rq);
@@ -152,7 +152,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
return -EFAULT;
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
- if (blk_verify_command(req->cmd, has_write_perm))
+ if (blk_verify_command(req->cmd, mode))
return -EPERM;
} else if (!capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -206,7 +206,7 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op)
* map sg_io_v4 to a request.
*/
static struct request *
-bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
+bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode)
{
struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
@@ -237,7 +237,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
if (IS_ERR(rq))
return rq;
- ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
+ ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, mode);
if (ret)
goto out;
@@ -587,8 +587,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
}
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
- size_t count, ssize_t *bytes_written,
- fmode_t has_write_perm)
+ size_t count, ssize_t *bytes_written, fmode_t mode)
{
struct bsg_command *bc;
struct request *rq;
@@ -619,7 +618,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
/*
* get a request, fill in the blanks, and add to request queue
*/
- rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
+ rq = bsg_map_hdr(bd, &bc->hdr, mode);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
@@ -655,8 +654,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
bsg_set_block(bd, file);
bytes_written = 0;
- ret = __bsg_write(bd, buf, count, &bytes_written,
- file->f_mode & FMODE_WRITE);
+ ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode);
*ppos = bytes_written;
@@ -915,7 +913,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
return -EFAULT;
- rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
+ rq = bsg_map_hdr(bd, &hdr, file->f_mode);
if (IS_ERR(rq))
return PTR_ERR(rq);
diff --git a/block/elevator.c b/block/elevator.c
index 153926a90901..7bda083d5968 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -83,12 +83,25 @@ bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
}
EXPORT_SYMBOL(elv_bio_merge_ok);
-static struct elevator_type *elevator_find(const char *name)
+static bool elevator_match(const struct elevator_type *e, const char *name)
+{
+ if (!strcmp(e->elevator_name, name))
+ return true;
+ if (e->elevator_alias && !strcmp(e->elevator_alias, name))
+ return true;
+
+ return false;
+}
+
+/*
+ * Return scheduler with name 'name' and with matching 'mq capability
+ */
+static struct elevator_type *elevator_find(const char *name, bool mq)
{
struct elevator_type *e;
list_for_each_entry(e, &elv_list, list) {
- if (!strcmp(e->elevator_name, name))
+ if (elevator_match(e, name) && (mq == e->uses_mq))
return e;
}
@@ -100,25 +113,25 @@ static void elevator_put(struct elevator_type *e)
module_put(e->elevator_owner);
}
-static struct elevator_type *elevator_get(const char *name, bool try_loading)
+static struct elevator_type *elevator_get(struct request_queue *q,
+ const char *name, bool try_loading)
{
struct elevator_type *e;
spin_lock(&elv_list_lock);
- e = elevator_find(name);
+ e = elevator_find(name, q->mq_ops != NULL);
if (!e && try_loading) {
spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
spin_lock(&elv_list_lock);
- e = elevator_find(name);
+ e = elevator_find(name, q->mq_ops != NULL);
}
if (e && !try_module_get(e->elevator_owner))
e = NULL;
spin_unlock(&elv_list_lock);
-
return e;
}
@@ -144,8 +157,12 @@ void __init load_default_elevator_module(void)
if (!chosen_elevator[0])
return;
+ /*
+ * Boot parameter is deprecated, we haven't supported that for MQ.
+ * Only look for non-mq schedulers from here.
+ */
spin_lock(&elv_list_lock);
- e = elevator_find(chosen_elevator);
+ e = elevator_find(chosen_elevator, false);
spin_unlock(&elv_list_lock);
if (!e)
@@ -202,7 +219,7 @@ int elevator_init(struct request_queue *q, char *name)
q->boundary_rq = NULL;
if (name) {
- e = elevator_get(name, true);
+ e = elevator_get(q, name, true);
if (!e)
return -EINVAL;
}
@@ -214,7 +231,7 @@ int elevator_init(struct request_queue *q, char *name)
* allowed from async.
*/
if (!e && !q->mq_ops && *chosen_elevator) {
- e = elevator_get(chosen_elevator, false);
+ e = elevator_get(q, chosen_elevator, false);
if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator);
@@ -229,17 +246,17 @@ int elevator_init(struct request_queue *q, char *name)
*/
if (q->mq_ops) {
if (q->nr_hw_queues == 1)
- e = elevator_get("mq-deadline", false);
+ e = elevator_get(q, "mq-deadline", false);
if (!e)
return 0;
} else
- e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
+ e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
if (!e) {
printk(KERN_ERR
"Default I/O scheduler not found. " \
"Using noop.\n");
- e = elevator_get("noop", false);
+ e = elevator_get(q, "noop", false);
}
}
@@ -905,7 +922,7 @@ int elv_register(struct elevator_type *e)
/* register, don't allow duplicate names */
spin_lock(&elv_list_lock);
- if (elevator_find(e->elevator_name)) {
+ if (elevator_find(e->elevator_name, e->uses_mq)) {
spin_unlock(&elv_list_lock);
if (e->icq_cache)
kmem_cache_destroy(e->icq_cache);
@@ -915,9 +932,9 @@ int elv_register(struct elevator_type *e)
spin_unlock(&elv_list_lock);
/* print pretty message */
- if (!strcmp(e->elevator_name, chosen_elevator) ||
+ if (elevator_match(e, chosen_elevator) ||
(!*chosen_elevator &&
- !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
+ elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
def = " (default)";
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
@@ -1066,25 +1083,15 @@ static int __elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, NULL);
strlcpy(elevator_name, name, sizeof(elevator_name));
- e = elevator_get(strstrip(elevator_name), true);
+ e = elevator_get(q, strstrip(elevator_name), true);
if (!e)
return -EINVAL;
- if (q->elevator &&
- !strcmp(elevator_name, q->elevator->type->elevator_name)) {
+ if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
elevator_put(e);
return 0;
}
- if (!e->uses_mq && q->mq_ops) {
- elevator_put(e);
- return -EINVAL;
- }
- if (e->uses_mq && !q->mq_ops) {
- elevator_put(e);
- return -EINVAL;
- }
-
return elevator_switch(q, e);
}
@@ -1116,9 +1123,10 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
struct elevator_queue *e = q->elevator;
struct elevator_type *elv = NULL;
struct elevator_type *__e;
+ bool uses_mq = q->mq_ops != NULL;
int len = 0;
- if (!blk_queue_stackable(q))
+ if (!queue_is_rq_based(q))
return sprintf(name, "none\n");
if (!q->elevator)
@@ -1128,7 +1136,8 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
spin_lock(&elv_list_lock);
list_for_each_entry(__e, &elv_list, list) {
- if (elv && !strcmp(elv->elevator_name, __e->elevator_name)) {
+ if (elv && elevator_match(elv, __e->elevator_name) &&
+ (__e->uses_mq == uses_mq)) {
len += sprintf(name+len, "[%s] ", elv->elevator_name);
continue;
}
diff --git a/block/genhd.c b/block/genhd.c
index dd305c65ffb0..c2223f12a805 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -588,6 +588,11 @@ static void register_disk(struct device *parent, struct gendisk *disk)
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+ if (disk->flags & GENHD_FL_HIDDEN) {
+ dev_set_uevent_suppress(ddev, 0);
+ return;
+ }
+
/* No minors to use for partitions */
if (!disk_part_scan_enabled(disk))
goto exit;
@@ -616,6 +621,11 @@ exit:
while ((part = disk_part_iter_next(&piter)))
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
disk_part_iter_exit(&piter);
+
+ err = sysfs_create_link(&ddev->kobj,
+ &disk->queue->backing_dev_info->dev->kobj,
+ "bdi");
+ WARN_ON(err);
}
/**
@@ -630,7 +640,6 @@ exit:
*/
void device_add_disk(struct device *parent, struct gendisk *disk)
{
- struct backing_dev_info *bdi;
dev_t devt;
int retval;
@@ -639,7 +648,8 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
* parameters make sense.
*/
WARN_ON(disk->minors && !(disk->major || disk->first_minor));
- WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
+ WARN_ON(!disk->minors &&
+ !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN)));
disk->flags |= GENHD_FL_UP;
@@ -648,22 +658,26 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
WARN_ON(1);
return;
}
- disk_to_dev(disk)->devt = devt;
-
- /* ->major and ->first_minor aren't supposed to be
- * dereferenced from here on, but set them just in case.
- */
disk->major = MAJOR(devt);
disk->first_minor = MINOR(devt);
disk_alloc_events(disk);
- /* Register BDI before referencing it from bdev */
- bdi = disk->queue->backing_dev_info;
- bdi_register_owner(bdi, disk_to_dev(disk));
-
- blk_register_region(disk_devt(disk), disk->minors, NULL,
- exact_match, exact_lock, disk);
+ if (disk->flags & GENHD_FL_HIDDEN) {
+ /*
+ * Don't let hidden disks show up in /proc/partitions,
+ * and don't bother scanning for partitions either.
+ */
+ disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
+ disk->flags |= GENHD_FL_NO_PART_SCAN;
+ } else {
+ /* Register BDI before referencing it from bdev */
+ disk_to_dev(disk)->devt = devt;
+ bdi_register_owner(disk->queue->backing_dev_info,
+ disk_to_dev(disk));
+ blk_register_region(disk_devt(disk), disk->minors, NULL,
+ exact_match, exact_lock, disk);
+ }
register_disk(parent, disk);
blk_register_queue(disk);
@@ -673,10 +687,6 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
*/
WARN_ON_ONCE(!blk_get_queue(disk->queue));
- retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
- "bdi");
- WARN_ON(retval);
-
disk_add_events(disk);
blk_integrity_add(disk);
}
@@ -705,7 +715,8 @@ void del_gendisk(struct gendisk *disk)
set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP;
- sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
+ if (!(disk->flags & GENHD_FL_HIDDEN))
+ sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
if (disk->queue) {
/*
* Unregister bdi before releasing device numbers (as they can
@@ -716,13 +727,15 @@ void del_gendisk(struct gendisk *disk)
} else {
WARN_ON(1);
}
- blk_unregister_region(disk_devt(disk), disk->minors);
- part_stat_set_all(&disk->part0, 0);
- disk->part0.stamp = 0;
+ if (!(disk->flags & GENHD_FL_HIDDEN))
+ blk_unregister_region(disk_devt(disk), disk->minors);
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
+
+ part_stat_set_all(&disk->part0, 0);
+ disk->part0.stamp = 0;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
@@ -785,6 +798,10 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
spin_unlock_bh(&ext_devt_lock);
}
+ if (disk && unlikely(disk->flags & GENHD_FL_HIDDEN)) {
+ put_disk(disk);
+ disk = NULL;
+ }
return disk;
}
EXPORT_SYMBOL(get_gendisk);
@@ -1028,6 +1045,15 @@ static ssize_t disk_removable_show(struct device *dev,
(disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
}
+static ssize_t disk_hidden_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ return sprintf(buf, "%d\n",
+ (disk->flags & GENHD_FL_HIDDEN ? 1 : 0));
+}
+
static ssize_t disk_ro_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1065,6 +1091,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
+static DEVICE_ATTR(hidden, S_IRUGO, disk_hidden_show, NULL);
static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
@@ -1089,6 +1116,7 @@ static struct attribute *disk_attrs[] = {
&dev_attr_range.attr,
&dev_attr_ext_range.attr,
&dev_attr_removable.attr,
+ &dev_attr_hidden.attr,
&dev_attr_ro.attr,
&dev_attr_size.attr,
&dev_attr_alignment_offset.attr,
@@ -1354,13 +1382,7 @@ dev_t blk_lookup_devt(const char *name, int partno)
}
EXPORT_SYMBOL(blk_lookup_devt);
-struct gendisk *alloc_disk(int minors)
-{
- return alloc_disk_node(minors, NUMA_NO_NODE);
-}
-EXPORT_SYMBOL(alloc_disk);
-
-struct gendisk *alloc_disk_node(int minors, int node_id)
+struct gendisk *__alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
struct disk_part_tbl *ptbl;
@@ -1411,7 +1433,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
}
return disk;
}
-EXPORT_SYMBOL(alloc_disk_node);
+EXPORT_SYMBOL(__alloc_disk_node);
struct kobject *get_disk(struct gendisk *disk)
{
diff --git a/block/ioctl.c b/block/ioctl.c
index 0de02ee67eed..1668506d8ed8 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -202,10 +202,16 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
{
uint64_t range[2];
uint64_t start, len;
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+
if (!(mode & FMODE_WRITE))
return -EBADF;
+ if (!blk_queue_discard(q))
+ return -EOPNOTSUPP;
+
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
return -EFAULT;
@@ -216,12 +222,12 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
return -EINVAL;
if (len & 511)
return -EINVAL;
- start >>= 9;
- len >>= 9;
- if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+ if (start + len > i_size_read(bdev->bd_inode))
return -EINVAL;
- return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
+ truncate_inode_pages_range(mapping, start, start + len);
+ return blkdev_issue_discard(bdev, start >> 9, len >> 9,
+ GFP_KERNEL, flags);
}
static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
@@ -437,11 +443,12 @@ static int blkdev_roset(struct block_device *bdev, fmode_t mode,
{
int ret, n;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
if (!is_unrecognized_ioctl(ret))
return ret;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
if (get_user(n, (int __user *)arg))
return -EFAULT;
set_device_ro(bdev, n);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index f58cab82105b..b4df317c2916 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -541,9 +541,17 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
/*
* Try again in case a token was freed before we got on the wait
- * queue.
+ * queue. The waker may have already removed the entry from the
+ * wait queue, but list_del_init() is okay with that.
*/
nr = __sbitmap_queue_get(domain_tokens);
+ if (nr >= 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws->wait.lock, flags);
+ list_del_init(&wait->entry);
+ spin_unlock_irqrestore(&ws->wait.lock, flags);
+ }
}
return nr;
}
@@ -641,7 +649,7 @@ static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
if (!list_empty_careful(&khd->rqs[i]))
return true;
}
- return false;
+ return sbitmap_any_bit_set(&hctx->ctx_map);
}
#define KYBER_LAT_SHOW_STORE(op) \
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index a1cad4331edd..0179e484ec98 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -657,6 +657,7 @@ static struct elevator_type mq_deadline = {
#endif
.elevator_attrs = deadline_attrs,
.elevator_name = "mq-deadline",
+ .elevator_alias = "deadline",
.elevator_owner = THIS_MODULE,
};
MODULE_ALIAS("mq-deadline-iosched");
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 7440de44dd85..edcfff974527 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -207,7 +207,7 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
}
-int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
+int blk_verify_command(unsigned char *cmd, fmode_t mode)
{
struct blk_cmd_filter *filter = &blk_default_cmd_filter;
@@ -220,7 +220,7 @@ int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
return 0;
/* Write-safe commands require a writable open */
- if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
+ if (test_bit(cmd[0], filter->write_ok) && (mode & FMODE_WRITE))
return 0;
return -EPERM;
@@ -234,7 +234,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (blk_verify_command(req->cmd, mode & FMODE_WRITE))
+ if (blk_verify_command(req->cmd, mode))
return -EPERM;
/*
@@ -469,7 +469,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
- err = blk_verify_command(req->cmd, mode & FMODE_WRITE);
+ err = blk_verify_command(req->cmd, mode);
if (err)
goto error;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index ac5fb37e6f4b..f7911963bb79 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -860,6 +860,17 @@ config CRYPTO_SHA3
References:
http://keccak.noekeon.org/
+config CRYPTO_SM3
+ tristate "SM3 digest algorithm"
+ select CRYPTO_HASH
+ help
+ SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3).
+ It is part of the Chinese Commercial Cryptography suite.
+
+ References:
+ http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
+ https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
+
config CRYPTO_TGR192
tristate "Tiger digest algorithms"
select CRYPTO_HASH
diff --git a/crypto/Makefile b/crypto/Makefile
index da190be60ce2..d674884b2d51 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
+obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 337cf382718e..85cea9de324a 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -481,33 +481,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
}
EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
-{
- switch (err) {
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&completion->completion);
- reinit_completion(&completion->completion);
- err = completion->err;
- break;
- };
-
- return err;
-}
-EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
-
-void af_alg_complete(struct crypto_async_request *req, int err)
-{
- struct af_alg_completion *completion = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- completion->err = err;
- complete(&completion->completion);
-}
-EXPORT_SYMBOL_GPL(af_alg_complete);
-
/**
* af_alg_alloc_tsgl - allocate the TX SGL
*
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 5e8666e6ccae..3a35d67de7d9 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -334,9 +334,7 @@ static int ahash_op_unaligned(struct ahash_request *req,
return err;
err = op(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
ahash_restore_req(req, err);
@@ -394,9 +392,7 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
req->base.complete = ahash_def_finup_done2;
err = crypto_ahash_reqtfm(req)->final(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
out:
@@ -432,9 +428,7 @@ static int ahash_def_finup(struct ahash_request *req)
return err;
err = tfm->update(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && (ahash_request_flags(req) &
- CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
return ahash_def_finup_finish1(req, err);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index aa699ff6c876..60d7366ed343 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -897,9 +897,11 @@ int crypto_enqueue_request(struct crypto_queue *queue,
int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) {
- err = -EBUSY;
- if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -ENOSPC;
goto out;
+ }
+ err = -EBUSY;
if (queue->backlog == &queue->list)
queue->backlog = &request->list;
}
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 960d8548171b..5e6df2a087fa 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -122,7 +122,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
int notnum = 0;
name = ++p;
- len = 0;
for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
notnum |= !isdigit(*p);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 516b38c3a169..aacae0837aff 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -278,11 +278,11 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
- err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req),
- &ctx->completion);
+ &ctx->wait);
}
/* AIO operation in progress */
@@ -554,7 +554,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
ctx->merge = 0;
ctx->enc = 0;
ctx->aead_assoclen = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 5e92bd275ef3..76d2e716c792 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -26,7 +26,7 @@ struct hash_ctx {
u8 *result;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
unsigned int len;
bool more;
@@ -88,8 +88,7 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx);
- err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
if (err)
goto unlock;
}
@@ -110,8 +109,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
- err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_update(&ctx->req),
+ &ctx->wait);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
@@ -129,8 +128,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
}
unlock:
@@ -171,7 +170,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
}
@@ -179,7 +178,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
err = crypto_ahash_update(&ctx->req);
}
- err = af_alg_wait_for_completion(err, &ctx->completion);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock;
@@ -215,17 +214,16 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (!result && !ctx->more) {
- err = af_alg_wait_for_completion(
- crypto_ahash_init(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
if (!result || ctx->more) {
ctx->more = 0;
- err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
- &ctx->completion);
+ err = crypto_wait_req(crypto_ahash_final(&ctx->req),
+ &ctx->wait);
if (err)
goto unlock;
}
@@ -476,13 +474,13 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ctx->result = NULL;
ctx->len = len;
ctx->more = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
+ crypto_req_done, &ctx->wait);
sk->sk_destruct = hash_sock_destruct;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 8ae4170aaeb4..9954b078f0b9 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -129,12 +129,11 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete,
- &ctx->completion);
- err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_req_done, &ctx->wait);
+ err = crypto_wait_req(ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
- &ctx->completion);
+ &ctx->wait);
}
/* AIO operation in progress */
@@ -388,7 +387,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- af_alg_init_completion(&ctx->completion);
+ crypto_init_wait(&ctx->wait);
ask->private = ctx;
diff --git a/crypto/api.c b/crypto/api.c
index 941cd4c6c7ec..2a2479d168aa 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -24,6 +24,7 @@
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/completion.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
@@ -595,5 +596,17 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
+void crypto_req_done(struct crypto_async_request *req, int err)
+{
+ struct crypto_wait *wait = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ wait->err = err;
+ complete(&wait->completion);
+}
+EXPORT_SYMBOL_GPL(crypto_req_done);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 3cd6e12cfc46..d916235d6cf5 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -57,29 +57,13 @@ static void public_key_destroy(void *payload0, void *payload3)
public_key_signature_free(payload3);
}
-struct public_key_completion {
- struct completion completion;
- int err;
-};
-
-static void public_key_verify_done(struct crypto_async_request *req, int err)
-{
- struct public_key_completion *compl = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- compl->err = err;
- complete(&compl->completion);
-}
-
/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
- struct public_key_completion compl;
+ struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
struct scatterlist sig_sg, digest_sg;
@@ -131,20 +115,16 @@ int public_key_verify_signature(const struct public_key *pkey,
sg_init_one(&digest_sg, output, outlen);
akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
outlen);
- init_completion(&compl.completion);
+ crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- public_key_verify_done, &compl);
+ crypto_req_done, &cwait);
/* Perform the verification calculation. This doesn't actually do the
* verification, but rather calculates the hash expected by the
* signature and returns that to us.
*/
- ret = crypto_akcipher_verify(req);
- if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
- wait_for_completion(&compl.completion);
- ret = compl.err;
- }
+ ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
if (ret < 0)
goto out_free_output;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0508c48a45c4..bd43cf5be14c 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -137,16 +137,14 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
int cpu, err;
struct cryptd_cpu_queue *cpu_queue;
atomic_t *refcnt;
- bool may_backlog;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
- may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
- if (err == -EBUSY && !may_backlog)
+ if (err == -ENOSPC)
goto out_put_cpu;
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
diff --git a/crypto/cts.c b/crypto/cts.c
index 243f591dc409..4773c188e6d9 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -136,8 +136,7 @@ static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
goto out;
err = cts_cbc_encrypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
@@ -229,8 +228,7 @@ static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
goto out;
err = cts_cbc_decrypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
diff --git a/crypto/dh.c b/crypto/dh.c
index b1032a5c1bfa..5659fe7f446d 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -21,19 +21,12 @@ struct dh_ctx {
MPI xa;
};
-static inline void dh_clear_params(struct dh_ctx *ctx)
+static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
mpi_free(ctx->g);
- ctx->p = NULL;
- ctx->g = NULL;
-}
-
-static void dh_free_ctx(struct dh_ctx *ctx)
-{
- dh_clear_params(ctx);
mpi_free(ctx->xa);
- ctx->xa = NULL;
+ memset(ctx, 0, sizeof(*ctx));
}
/*
@@ -60,9 +53,6 @@ static int dh_check_params_length(unsigned int p_len)
static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
{
- if (unlikely(!params->p || !params->g))
- return -EINVAL;
-
if (dh_check_params_length(params->p_size << 3))
return -EINVAL;
@@ -71,10 +61,8 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
return -EINVAL;
ctx->g = mpi_read_raw_data(params->g, params->g_size);
- if (!ctx->g) {
- mpi_free(ctx->p);
+ if (!ctx->g)
return -EINVAL;
- }
return 0;
}
@@ -86,21 +74,23 @@ static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct dh params;
/* Free the old MPI key if any */
- dh_free_ctx(ctx);
+ dh_clear_ctx(ctx);
if (crypto_dh_decode_key(buf, len, &params) < 0)
- return -EINVAL;
+ goto err_clear_ctx;
if (dh_set_params(ctx, &params) < 0)
- return -EINVAL;
+ goto err_clear_ctx;
ctx->xa = mpi_read_raw_data(params.key, params.key_size);
- if (!ctx->xa) {
- dh_clear_params(ctx);
- return -EINVAL;
- }
+ if (!ctx->xa)
+ goto err_clear_ctx;
return 0;
+
+err_clear_ctx:
+ dh_clear_ctx(ctx);
+ return -EINVAL;
}
static int dh_compute_value(struct kpp_request *req)
@@ -158,7 +148,7 @@ static void dh_exit_tfm(struct crypto_kpp *tfm)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
- dh_free_ctx(ctx);
+ dh_clear_ctx(ctx);
}
static struct kpp_alg dh = {
diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c
index 8ba8a3f82620..24fdb2ecaa85 100644
--- a/crypto/dh_helper.c
+++ b/crypto/dh_helper.c
@@ -28,12 +28,12 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
return src + size;
}
-static inline int dh_data_size(const struct dh *p)
+static inline unsigned int dh_data_size(const struct dh *p)
{
return p->key_size + p->p_size + p->g_size;
}
-int crypto_dh_key_len(const struct dh *p)
+unsigned int crypto_dh_key_len(const struct dh *p)
{
return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
}
@@ -83,6 +83,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
+ /*
+ * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
+ * some drivers assume otherwise.
+ */
+ if (params->key_size > params->p_size ||
+ params->g_size > params->p_size)
+ return -EINVAL;
+
/* Don't allocate memory. Set pointers to data within
* the given buffer
*/
@@ -90,6 +98,14 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
params->p = (void *)(ptr + params->key_size);
params->g = (void *)(ptr + params->key_size + params->p_size);
+ /*
+ * Don't permit 'p' to be 0. It's not a prime number, and it's subject
+ * to corner cases such as 'mod 0' being undefined or
+ * crypto_kpp_maxsize() returning 0.
+ */
+ if (memchr_inv(params->p, 0, params->p_size) == NULL)
+ return -EINVAL;
+
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 70018397e59a..4faa2781c964 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1651,16 +1651,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg)
return 0;
}
-static void drbg_skcipher_cb(struct crypto_async_request *req, int error)
-{
- struct drbg_state *drbg = req->data;
-
- if (error == -EINPROGRESS)
- return;
- drbg->ctr_async_err = error;
- complete(&drbg->ctr_completion);
-}
-
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm;
@@ -1691,7 +1681,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
- init_completion(&drbg->ctr_completion);
+ crypto_init_wait(&drbg->ctr_wait);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
@@ -1700,8 +1690,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
return -ENOMEM;
}
drbg->ctr_req = req;
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- drbg_skcipher_cb, drbg);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask,
@@ -1762,21 +1753,12 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
/* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out,
cryptlen, drbg->V);
- ret = crypto_skcipher_encrypt(drbg->ctr_req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&drbg->ctr_completion);
- if (!drbg->ctr_async_err) {
- reinit_completion(&drbg->ctr_completion);
- break;
- }
- default:
+ ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
+ &drbg->ctr_wait);
+ if (ret)
goto out;
- }
- init_completion(&drbg->ctr_completion);
+
+ crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 4271fc77d261..3aca0933ec44 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -131,17 +131,11 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
}
-static void no_exit_tfm(struct crypto_kpp *tfm)
-{
- return;
-}
-
static struct kpp_alg ecdh = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
- .exit = no_exit_tfm,
.base = {
.cra_name = "ecdh",
.cra_driver_name = "ecdh-generic",
diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
index f05bea5fd257..d3af8e8b0b5e 100644
--- a/crypto/ecdh_helper.c
+++ b/crypto/ecdh_helper.c
@@ -28,7 +28,7 @@ static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
return src + sz;
}
-int crypto_ecdh_key_len(const struct ecdh *params)
+unsigned int crypto_ecdh_key_len(const struct ecdh *params)
{
return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 3841b5eafa7e..8589681fb9f6 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -14,9 +14,9 @@
#include <crypto/internal/hash.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
+#include <crypto/gcm.h>
#include <crypto/hash.h>
#include "internal.h"
-#include <linux/completion.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -78,11 +78,6 @@ struct crypto_gcm_req_priv_ctx {
} u;
};
-struct crypto_gcm_setkey_result {
- int err;
- struct completion completion;
-};
-
static struct {
u8 buf[16];
struct scatterlist sg;
@@ -98,17 +93,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
-static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
- struct crypto_gcm_setkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
@@ -119,7 +103,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
be128 hash;
u8 iv[16];
- struct crypto_gcm_setkey_result result;
+ struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
@@ -140,21 +124,18 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data)
return -ENOMEM;
- init_completion(&data->result.completion);
+ crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_gcm_setkey_done,
- &data->result);
+ crypto_req_done,
+ &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
sizeof(data->hash), data->iv);
- err = crypto_skcipher_encrypt(&data->req);
- if (err == -EINPROGRESS || err == -EBUSY) {
- wait_for_completion(&data->result.completion);
- err = data->result.err;
- }
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+ &data->wait);
if (err)
goto out;
@@ -197,8 +178,8 @@ static void crypto_gcm_init_common(struct aead_request *req)
struct scatterlist *sg;
memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
- memcpy(pctx->iv, req->iv, 12);
- memcpy(pctx->iv + 12, &counter, 4);
+ memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE);
+ memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4);
sg_init_table(pctx->src, 3);
sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
@@ -695,7 +676,7 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
- inst->alg.ivsize = 12;
+ inst->alg.ivsize = GCM_AES_IV_SIZE;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
inst->alg.maxauthsize = 16;
inst->alg.init = crypto_gcm_init_tfm;
@@ -832,20 +813,20 @@ static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
crypto_aead_alignmask(child) + 1);
- scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
+ scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0);
memcpy(iv, ctx->nonce, 4);
memcpy(iv + 4, req->iv, 8);
sg_init_table(rctx->src, 3);
- sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
+ sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
if (sg != rctx->src + 1)
sg_chain(rctx->src, 2, sg);
if (req->src != req->dst) {
sg_init_table(rctx->dst, 3);
- sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
+ sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
if (sg != rctx->dst + 1)
sg_chain(rctx->dst, 2, sg);
@@ -957,7 +938,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
err = -EINVAL;
/* Underlying IV size must be 12. */
- if (crypto_aead_alg_ivsize(alg) != 12)
+ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto out_drop_alg;
/* Not a stream cipher? */
@@ -980,7 +961,7 @@ static int crypto_rfc4106_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
- inst->alg.ivsize = 8;
+ inst->alg.ivsize = GCM_RFC4106_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
@@ -1134,7 +1115,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
tfm,
sizeof(struct crypto_rfc4543_req_ctx) +
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
- align + 12);
+ align + GCM_AES_IV_SIZE);
return 0;
@@ -1199,7 +1180,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
err = -EINVAL;
/* Underlying IV size must be 12. */
- if (crypto_aead_alg_ivsize(alg) != 12)
+ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto out_drop_alg;
/* Not a stream cipher? */
@@ -1222,7 +1203,7 @@ static int crypto_rfc4543_create(struct crypto_template *tmpl,
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
- inst->alg.ivsize = 8;
+ inst->alg.ivsize = GCM_RFC4543_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index dc012129c063..24e601954c7a 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -156,6 +156,19 @@ static void gf128mul_x8_bbe(be128 *x)
x->b = cpu_to_be64((b << 8) ^ _tt);
}
+void gf128mul_x8_ble(le128 *r, const le128 *x)
+{
+ u64 a = le64_to_cpu(x->a);
+ u64 b = le64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ r->a = cpu_to_le64((a << 8) | (b >> 56));
+ r->b = cpu_to_le64((b << 8) ^ _tt);
+}
+EXPORT_SYMBOL(gf128mul_x8_ble);
+
void gf128mul_lle(be128 *r, const be128 *b)
{
be128 p[8];
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 72014f963ba7..744e35134c45 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -93,18 +93,10 @@ struct crypto_kw_ctx {
struct crypto_kw_block {
#define SEMIBSIZE 8
- u8 A[SEMIBSIZE];
- u8 R[SEMIBSIZE];
+ __be64 A;
+ __be64 R;
};
-/* convert 64 bit integer into its string representation */
-static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf)
-{
- __be64 *a = (__be64 *)buf;
-
- *a = cpu_to_be64(val);
-}
-
/*
* Fast forward the SGL to the "end" length minus SEMIBSIZE.
* The start in the SGL defined by the fast-forward is returned with
@@ -139,17 +131,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
-
- unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
- crypto_cipher_alignmask(child));
- unsigned int i;
-
- u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
- struct crypto_kw_block *block = (struct crypto_kw_block *)
- PTR_ALIGN(blockbuf + 0, alignmask + 1);
-
- u64 t = 6 * ((nbytes) >> 3);
+ struct crypto_kw_block block;
struct scatterlist *lsrc, *ldst;
+ u64 t = 6 * ((nbytes) >> 3);
+ unsigned int i;
int ret = 0;
/*
@@ -160,7 +145,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
return -EINVAL;
/* Place the IV into block A */
- memcpy(block->A, desc->info, SEMIBSIZE);
+ memcpy(&block.A, desc->info, SEMIBSIZE);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
@@ -171,32 +156,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
ldst = dst;
for (i = 0; i < 6; i++) {
- u8 tbe_buffer[SEMIBSIZE + alignmask];
- /* alignment for the crypto_xor and the _to_be64 operation */
- u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
- unsigned int tmp_nbytes = nbytes;
struct scatter_walk src_walk, dst_walk;
+ unsigned int tmp_nbytes = nbytes;
while (tmp_nbytes) {
/* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes);
/* get the source block */
- scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
- /* perform KW operation: get counter as byte string */
- crypto_kw_cpu_to_be64(t, tbe);
/* perform KW operation: modify IV with counter */
- crypto_xor(block->A, tbe, SEMIBSIZE);
+ block.A ^= cpu_to_be64(t);
t--;
/* perform KW operation: decrypt block */
- crypto_cipher_decrypt_one(child, (u8*)block,
- (u8*)block);
+ crypto_cipher_decrypt_one(child, (u8*)&block,
+ (u8*)&block);
/* move pointer by tmp_nbytes in the SGL */
crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes);
/* Copy block->R into place */
- scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
tmp_nbytes -= SEMIBSIZE;
@@ -208,11 +188,10 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
}
/* Perform authentication check */
- if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A,
- SEMIBSIZE))
+ if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
ret = -EBADMSG;
- memzero_explicit(block, sizeof(struct crypto_kw_block));
+ memzero_explicit(&block, sizeof(struct crypto_kw_block));
return ret;
}
@@ -224,17 +203,10 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
-
- unsigned long alignmask = max_t(unsigned long, SEMIBSIZE,
- crypto_cipher_alignmask(child));
- unsigned int i;
-
- u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask];
- struct crypto_kw_block *block = (struct crypto_kw_block *)
- PTR_ALIGN(blockbuf + 0, alignmask + 1);
-
- u64 t = 1;
+ struct crypto_kw_block block;
struct scatterlist *lsrc, *ldst;
+ u64 t = 1;
+ unsigned int i;
/*
* Require at least 2 semiblocks (note, the 3rd semiblock that is
@@ -249,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
- memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE);
+ block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
@@ -260,30 +232,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
ldst = dst;
for (i = 0; i < 6; i++) {
- u8 tbe_buffer[SEMIBSIZE + alignmask];
- u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1);
- unsigned int tmp_nbytes = nbytes;
struct scatter_walk src_walk, dst_walk;
+ unsigned int tmp_nbytes = nbytes;
scatterwalk_start(&src_walk, lsrc);
scatterwalk_start(&dst_walk, ldst);
while (tmp_nbytes) {
/* get the source block */
- scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: encrypt block */
- crypto_cipher_encrypt_one(child, (u8 *)block,
- (u8 *)block);
- /* perform KW operation: get counter as byte string */
- crypto_kw_cpu_to_be64(t, tbe);
+ crypto_cipher_encrypt_one(child, (u8 *)&block,
+ (u8 *)&block);
/* perform KW operation: modify IV with counter */
- crypto_xor(block->A, tbe, SEMIBSIZE);
+ block.A ^= cpu_to_be64(t);
t++;
/* Copy block->R into place */
- scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE,
+ scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
tmp_nbytes -= SEMIBSIZE;
@@ -295,9 +263,9 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
}
/* establish the IV for the caller to pick up */
- memcpy(desc->info, block->A, SEMIBSIZE);
+ memcpy(desc->info, &block.A, SEMIBSIZE);
- memzero_explicit(block, sizeof(struct crypto_kw_block));
+ memzero_explicit(&block, sizeof(struct crypto_kw_block));
return 0;
}
diff --git a/crypto/lrw.c b/crypto/lrw.c
index a8bfae4451bf..cbbd7c50ad19 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -328,9 +328,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
crypto_skcipher_encrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
@@ -380,9 +378,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
crypto_skcipher_decrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
@@ -610,9 +606,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
ecb_name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
- }
+ "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
+ err = -ENAMETOOLONG;
+ goto err_drop_spawn;
+ }
+ } else
+ goto err_drop_spawn;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = alg->base.cra_priority;
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 049486ede938..40e053b97b69 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -213,8 +213,6 @@ static void rmd128_transform(u32 *state, const __le32 *in)
state[2] = state[3] + aa + bbb;
state[3] = state[0] + bb + ccc;
state[0] = ddd;
-
- return;
}
static int rmd128_init(struct shash_desc *desc)
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index de585e51d455..5f3e6ea35268 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -256,8 +256,6 @@ static void rmd160_transform(u32 *state, const __le32 *in)
state[3] = state[4] + aa + bbb;
state[4] = state[0] + bb + ccc;
state[0] = ddd;
-
- return;
}
static int rmd160_init(struct shash_desc *desc)
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 4ec02a754e09..f50c025cc962 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -228,8 +228,6 @@ static void rmd256_transform(u32 *state, const __le32 *in)
state[5] += bbb;
state[6] += ccc;
state[7] += ddd;
-
- return;
}
static int rmd256_init(struct shash_desc *desc)
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 770f2cb369f8..e1315e4869e8 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -275,8 +275,6 @@ static void rmd320_transform(u32 *state, const __le32 *in)
state[7] += ccc;
state[8] += ddd;
state[9] += eee;
-
- return;
}
static int rmd320_init(struct shash_desc *desc)
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 407c64bdcdd9..2908f93c3e55 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -279,9 +279,7 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_encrypt(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
@@ -383,9 +381,7 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
ctx->key_size);
err = crypto_akcipher_decrypt(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_decrypt_complete(req, err);
return err;
@@ -440,9 +436,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_sign(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
@@ -561,9 +555,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
ctx->key_size);
err = crypto_akcipher_verify(&req_ctx->child_req);
- if (err != -EINPROGRESS &&
- (err != -EBUSY ||
- !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_verify_complete(req, err);
return err;
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
new file mode 100644
index 000000000000..9e823d99f095
--- /dev/null
+++ b/crypto/sm3_generic.c
@@ -0,0 +1,210 @@
+/*
+ * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
+ * described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Written by Gilad Ben-Yossef <gilad@benyossef.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <crypto/sm3.h>
+#include <crypto/sm3_base.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
+ 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
+ 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
+ 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
+ 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
+};
+EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
+
+static inline u32 p0(u32 x)
+{
+ return x ^ rol32(x, 9) ^ rol32(x, 17);
+}
+
+static inline u32 p1(u32 x)
+{
+ return x ^ rol32(x, 15) ^ rol32(x, 23);
+}
+
+static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c)
+{
+ return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c));
+}
+
+static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g)
+{
+ return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g));
+}
+
+static inline u32 t(unsigned int n)
+{
+ return (n < 16) ? SM3_T1 : SM3_T2;
+}
+
+static void sm3_expand(u32 *t, u32 *w, u32 *wt)
+{
+ int i;
+ unsigned int tmp;
+
+ /* load the input */
+ for (i = 0; i <= 15; i++)
+ w[i] = get_unaligned_be32((__u32 *)t + i);
+
+ for (i = 16; i <= 67; i++) {
+ tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15);
+ w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6];
+ }
+
+ for (i = 0; i <= 63; i++)
+ wt[i] = w[i] ^ w[i + 4];
+}
+
+static void sm3_compress(u32 *w, u32 *wt, u32 *m)
+{
+ u32 ss1;
+ u32 ss2;
+ u32 tt1;
+ u32 tt2;
+ u32 a, b, c, d, e, f, g, h;
+ int i;
+
+ a = m[0];
+ b = m[1];
+ c = m[2];
+ d = m[3];
+ e = m[4];
+ f = m[5];
+ g = m[6];
+ h = m[7];
+
+ for (i = 0; i <= 63; i++) {
+
+ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
+
+ ss2 = ss1 ^ rol32(a, 12);
+
+ tt1 = ff(i, a, b, c) + d + ss2 + *wt;
+ wt++;
+
+ tt2 = gg(i, e, f, g) + h + ss1 + *w;
+ w++;
+
+ d = c;
+ c = rol32(b, 9);
+ b = a;
+ a = tt1;
+ h = g;
+ g = rol32(f, 19);
+ f = e;
+ e = p0(tt2);
+ }
+
+ m[0] = a ^ m[0];
+ m[1] = b ^ m[1];
+ m[2] = c ^ m[2];
+ m[3] = d ^ m[3];
+ m[4] = e ^ m[4];
+ m[5] = f ^ m[5];
+ m[6] = g ^ m[6];
+ m[7] = h ^ m[7];
+
+ a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
+}
+
+static void sm3_transform(struct sm3_state *sst, u8 const *src)
+{
+ unsigned int w[68];
+ unsigned int wt[64];
+
+ sm3_expand((u32 *)src, w, wt);
+ sm3_compress(w, wt, sst->state);
+
+ memzero_explicit(w, sizeof(w));
+ memzero_explicit(wt, sizeof(wt));
+}
+
+static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src,
+ int blocks)
+{
+ while (blocks--) {
+ sm3_transform(sst, src);
+ src += SM3_BLOCK_SIZE;
+ }
+}
+
+int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
+}
+EXPORT_SYMBOL(crypto_sm3_update);
+
+static int sm3_final(struct shash_desc *desc, u8 *out)
+{
+ sm3_base_do_finalize(desc, sm3_generic_block_fn);
+ return sm3_base_finish(desc, out);
+}
+
+int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash)
+{
+ sm3_base_do_update(desc, data, len, sm3_generic_block_fn);
+ return sm3_final(desc, hash);
+}
+EXPORT_SYMBOL(crypto_sm3_finup);
+
+static struct shash_alg sm3_alg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .init = sm3_base_init,
+ .update = crypto_sm3_update,
+ .final = sm3_final,
+ .finup = crypto_sm3_finup,
+ .descsize = sizeof(struct sm3_state),
+ .base = {
+ .cra_name = "sm3",
+ .cra_driver_name = "sm3-generic",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = SM3_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+static int __init sm3_generic_mod_init(void)
+{
+ return crypto_register_shash(&sm3_alg);
+}
+
+static void __exit sm3_generic_mod_fini(void)
+{
+ crypto_unregister_shash(&sm3_alg);
+}
+
+module_init(sm3_generic_mod_init);
+module_exit(sm3_generic_mod_fini);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
+
+MODULE_ALIAS_CRYPTO("sm3");
+MODULE_ALIAS_CRYPTO("sm3-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 0022a18d36ee..9267cbdb14d2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -70,7 +70,7 @@ static int mode;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
- "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
+ "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
@@ -79,34 +79,11 @@ static char *check[] = {
NULL
};
-struct tcrypt_result {
- struct completion completion;
- int err;
-};
-
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
- struct tcrypt_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- struct tcrypt_result *tr = req->base.data;
+ struct crypto_wait *wait = req->base.data;
- ret = wait_for_completion_interruptible(&tr->completion);
- if (!ret)
- ret = tr->err;
- reinit_completion(&tr->completion);
- }
-
- return ret;
+ return crypto_wait_req(ret, wait);
}
static int test_aead_jiffies(struct aead_request *req, int enc,
@@ -248,7 +225,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
char *axbuf[XBUFSIZE];
unsigned int *b_size;
unsigned int iv_len;
- struct tcrypt_result result;
+ struct crypto_wait wait;
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
@@ -284,7 +261,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out_notfm;
}
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
@@ -296,7 +273,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
i = 0;
do {
@@ -340,7 +317,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
}
sg_init_aead(sg, xbuf,
- *b_size + (enc ? authsize : 0));
+ *b_size + (enc ? 0 : authsize));
sg_init_aead(sgout, xoutbuf,
*b_size + (enc ? authsize : 0));
@@ -348,7 +325,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
sg_set_buf(&sg[0], assoc, aad_size);
sg_set_buf(&sgout[0], assoc, aad_size);
- aead_request_set_crypt(req, sg, sgout, *b_size, iv);
+ aead_request_set_crypt(req, sg, sgout,
+ *b_size + (enc ? 0 : authsize),
+ iv);
aead_request_set_ad(req, aad_size);
if (secs)
@@ -381,7 +360,6 @@ out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
kfree(iv);
- return;
}
static void test_hash_sg_init(struct scatterlist *sg)
@@ -397,21 +375,16 @@ static void test_hash_sg_init(struct scatterlist *sg)
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- struct tcrypt_result *tr = req->base.data;
+ struct crypto_wait *wait = req->base.data;
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
- return ret;
+ return crypto_wait_req(ret, wait);
}
struct test_mb_ahash_data {
struct scatterlist sg[TVMEMSIZE];
char result[64];
struct ahash_request *req;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
@@ -440,7 +413,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
- init_completion(&data[i].tresult.completion);
+ crypto_init_wait(&data[i].wait);
data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
@@ -449,8 +422,8 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
goto out;
}
- ahash_request_set_callback(data[i].req, 0,
- tcrypt_complete, &data[i].tresult);
+ ahash_request_set_callback(data[i].req, 0, crypto_req_done,
+ &data[i].wait);
test_hash_sg_init(data[i].sg);
}
@@ -492,16 +465,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (ret)
break;
- complete(&data[k].tresult.completion);
- data[k].tresult.err = 0;
+ crypto_req_done(&data[k].req->base, 0);
}
for (j = 0; j < k; j++) {
- struct tcrypt_result *tr = &data[j].tresult;
+ struct crypto_wait *wait = &data[j].wait;
+ int wait_ret;
- wait_for_completion(&tr->completion);
- if (tr->err)
- ret = tr->err;
+ wait_ret = crypto_wait_req(-EINPROGRESS, wait);
+ if (wait_ret)
+ ret = wait_ret;
}
end = get_cycles();
@@ -679,7 +652,7 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
struct hash_speed *speed, unsigned mask)
{
struct scatterlist sg[TVMEMSIZE];
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
struct ahash_request *req;
struct crypto_ahash *tfm;
char *output;
@@ -708,9 +681,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
goto out;
}
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
if (!output)
@@ -765,15 +738,9 @@ static void test_hash_speed(const char *algo, unsigned int secs,
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- struct tcrypt_result *tr = req->base.data;
-
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
+ struct crypto_wait *wait = req->base.data;
- return ret;
+ return crypto_wait_req(ret, wait);
}
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
@@ -853,7 +820,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
unsigned int tcount, u8 *keysize, bool async)
{
unsigned int ret, i, j, k, iv_len;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
const char *key;
char iv[128];
struct skcipher_request *req;
@@ -866,7 +833,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
else
e = "decryption";
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
@@ -887,7 +854,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
i = 0;
do {
@@ -1269,6 +1236,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
ret += tcrypt_test("sha3-512");
break;
+ case 52:
+ ret += tcrypt_test("sm3");
+ break;
+
case 100:
ret += tcrypt_test("hmac(md5)");
break;
@@ -1603,115 +1574,116 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32);
break;
-
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
break;
}
-
/* fall through */
-
case 301:
test_hash_speed("md4", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 302:
test_hash_speed("md5", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 303:
test_hash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 304:
test_hash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 305:
test_hash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 306:
test_hash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 307:
test_hash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 308:
test_hash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 309:
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 310:
test_hash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 311:
test_hash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 312:
test_hash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 318:
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 319:
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 320:
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 323:
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 324:
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
case 325:
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
-
+ /* fall through */
+ case 326:
+ test_hash_speed("sm3", sec, generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+ /* fall through */
case 399:
break;
@@ -1720,106 +1692,107 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_ahash_speed(alg, sec, generic_hash_speed_template);
break;
}
-
/* fall through */
-
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 410:
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 411:
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 412:
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 414:
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 416:
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 417:
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 419:
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 420:
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
-
+ /* fall through */
case 421:
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 422:
test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 423:
test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
case 424:
test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
-
+ /* fall through */
+ case 425:
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
+ if (mode > 400 && mode < 500) break;
+ /* fall through */
case 499:
break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 7125ba3880af..29d7020b8826 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -76,11 +76,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
#define ENCRYPT 1
#define DECRYPT 0
-struct tcrypt_result {
- struct completion completion;
- int err;
-};
-
struct aead_test_suite {
struct {
const struct aead_testvec *vecs;
@@ -155,17 +150,6 @@ static void hexdump(unsigned char *buf, unsigned int len)
buf, len, false);
}
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
- struct tcrypt_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
@@ -193,20 +177,10 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
-static int wait_async_op(struct tcrypt_result *tr, int ret)
-{
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- wait_for_completion(&tr->completion);
- reinit_completion(&tr->completion);
- ret = tr->err;
- }
- return ret;
-}
-
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
- const char *algo, char *result, struct tcrypt_result *tresult)
+ const char *algo, char *result, struct crypto_wait *wait)
{
char *state;
struct ahash_request *req;
@@ -236,7 +210,7 @@ static int ahash_partial_update(struct ahash_request **preq,
}
ahash_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, tresult);
+ crypto_req_done, wait);
memcpy(hash_buff, template->plaintext + temp,
template->tap[k]);
@@ -247,7 +221,7 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
- ret = wait_async_op(tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
*preq = req;
@@ -272,7 +246,7 @@ static int __test_hash(struct crypto_ahash *tfm,
char *result;
char *key;
struct ahash_request *req;
- struct tcrypt_result tresult;
+ struct crypto_wait wait;
void *hash_buff;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
@@ -286,7 +260,7 @@ static int __test_hash(struct crypto_ahash *tfm,
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
- init_completion(&tresult.completion);
+ crypto_init_wait(&wait);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -295,7 +269,7 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out_noreq;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &tresult);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -335,26 +309,26 @@ static int __test_hash(struct crypto_ahash *tfm,
ahash_request_set_crypt(req, sg, result, template[i].psize);
if (use_digest) {
- ret = wait_async_op(&tresult, crypto_ahash_digest(req));
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
@@ -420,22 +394,10 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&tresult.completion);
- reinit_completion(&tresult.completion);
- ret = tresult.err;
- if (!ret)
- break;
- /* fall through */
- default:
- printk(KERN_ERR "alg: hash: digest failed "
- "on chunking test %d for %s: "
- "ret=%d\n", j, algo, -ret);
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n",
+ j, algo, -ret);
goto out;
}
@@ -486,13 +448,13 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
- ret = wait_async_op(&tresult, crypto_ahash_init(req));
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d for %s: ret=%d\n",
j, algo, -ret);
goto out;
}
- ret = wait_async_op(&tresult, crypto_ahash_update(req));
+ ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -503,7 +465,7 @@ static int __test_hash(struct crypto_ahash *tfm,
for (k = 1; k < template[i].np; k++) {
ret = ahash_partial_update(&req, tfm, &template[i],
hash_buff, k, temp, &sg[0], algo, result,
- &tresult);
+ &wait);
if (ret) {
pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -511,7 +473,7 @@ static int __test_hash(struct crypto_ahash *tfm,
}
temp += template[i].tap[k];
}
- ret = wait_async_op(&tresult, crypto_ahash_final(req));
+ ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d for %s: ret=%d\n",
j, algo, -ret);
@@ -580,7 +542,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
struct scatterlist *sg;
struct scatterlist *sgout;
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int authsize, iv_len;
void *input;
void *output;
@@ -619,7 +581,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -629,7 +591,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
iv_len = crypto_aead_ivsize(tfm);
@@ -709,7 +671,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -722,13 +685,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -866,7 +822,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
aead_request_set_ad(req, template[i].alen);
- ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
switch (ret) {
case 0:
@@ -879,13 +836,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
goto out;
}
break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
@@ -1083,7 +1033,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
struct scatterlist sg[8];
struct scatterlist sgout[8];
const char *e, *d;
- struct tcrypt_result result;
+ struct crypto_wait wait;
void *data;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
@@ -1107,7 +1057,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
else
e = "decryption";
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -1117,7 +1067,7 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
j = 0;
for (i = 0; i < tcount; i++) {
@@ -1164,21 +1114,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1272,21 +1211,10 @@ static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
template[i].ilen, iv);
- ret = enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
+ crypto_skcipher_decrypt(req), &wait);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&result.completion);
- reinit_completion(&result.completion);
- ret = result.err;
- if (!ret)
- break;
- /* fall through */
- default:
+ if (ret) {
pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
d, e, j, algo, -ret);
goto out;
@@ -1462,7 +1390,7 @@ static int test_acomp(struct crypto_acomp *tfm,
int ret;
struct scatterlist src, dst;
struct acomp_req *req;
- struct tcrypt_result result;
+ struct crypto_wait wait;
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!output)
@@ -1486,7 +1414,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1501,9 +1429,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_compress(req));
+ ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1516,10 +1444,10 @@ static int test_acomp(struct crypto_acomp *tfm,
dlen = COMP_BUF_SIZE;
sg_init_one(&src, output, ilen);
sg_init_one(&dst, decomp_out, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
acomp_request_set_params(req, &src, &dst, ilen, dlen);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -1563,7 +1491,7 @@ static int test_acomp(struct crypto_acomp *tfm,
}
memset(output, 0, dlen);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
@@ -1578,9 +1506,9 @@ static int test_acomp(struct crypto_acomp *tfm,
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
@@ -2000,7 +1928,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
void *a_public = NULL;
void *a_ss = NULL;
void *shared_secret = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max;
int err = -ENOMEM;
struct scatterlist src, dst;
@@ -2009,7 +1937,7 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
if (!req)
return err;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
if (err < 0)
@@ -2027,10 +1955,10 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
/* Compute party A's public key */
- err = wait_async_op(&result, crypto_kpp_generate_public_key(req));
+ err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
if (err) {
pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
alg, err);
@@ -2069,8 +1997,8 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->b_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
if (err) {
pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
alg, err);
@@ -2100,9 +2028,9 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
kpp_request_set_input(req, &src, vec->expected_a_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
- err = wait_async_op(&result,
- crypto_kpp_compute_shared_secret(req));
+ crypto_req_done, &wait);
+ err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
+ &wait);
if (err) {
pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
alg, err);
@@ -2179,7 +2107,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
- struct tcrypt_result result;
+ struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[2];
@@ -2191,7 +2119,7 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
if (!req)
goto free_xbuf;
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
if (vecs->public_key_vec)
err = crypto_akcipher_set_pub_key(tfm, vecs->key,
@@ -2220,13 +2148,13 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
out_len_max);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- tcrypt_complete, &result);
+ crypto_req_done, &wait);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature generation */
- crypto_akcipher_sign(req) :
- /* Run asymmetric encrypt */
- crypto_akcipher_encrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature generation */
+ crypto_akcipher_sign(req) :
+ /* Run asymmetric encrypt */
+ crypto_akcipher_encrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: encrypt test failed. err %d\n", err);
goto free_all;
@@ -2261,14 +2189,14 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
sg_init_one(&src, xbuf[0], vecs->c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
- err = wait_async_op(&result, vecs->siggen_sigver_test ?
- /* Run asymmetric signature verification */
- crypto_akcipher_verify(req) :
- /* Run asymmetric decrypt */
- crypto_akcipher_decrypt(req));
+ err = crypto_wait_req(vecs->siggen_sigver_test ?
+ /* Run asymmetric signature verification */
+ crypto_akcipher_verify(req) :
+ /* Run asymmetric decrypt */
+ crypto_akcipher_decrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: decrypt test failed. err %d\n", err);
goto free_all;
@@ -3500,6 +3428,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(sha512_tv_template)
}
}, {
+ .alg = "sm3",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(sm3_tv_template)
+ }
+ }, {
.alg = "tgr128",
.test = alg_test_hash,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d54971d2d1c8..a714b6293959 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1497,6 +1497,73 @@ static const struct hash_testvec crct10dif_tv_template[] = {
}
};
+/* Example vectors below taken from
+ * http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
+ *
+ * The rest taken from
+ * https://github.com/adamws/oscca-sm3
+ */
+static const struct hash_testvec sm3_tv_template[] = {
+ {
+ .plaintext = "",
+ .psize = 0,
+ .digest = (u8 *)(u8 []) {
+ 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
+ 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
+ 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
+ 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B }
+ }, {
+ .plaintext = "a",
+ .psize = 1,
+ .digest = (u8 *)(u8 []) {
+ 0x62, 0x34, 0x76, 0xAC, 0x18, 0xF6, 0x5A, 0x29,
+ 0x09, 0xE4, 0x3C, 0x7F, 0xEC, 0x61, 0xB4, 0x9C,
+ 0x7E, 0x76, 0x4A, 0x91, 0xA1, 0x8C, 0xCB, 0x82,
+ 0xF1, 0x91, 0x7A, 0x29, 0xC8, 0x6C, 0x5E, 0x88 }
+ }, {
+ /* A.1. Example 1 */
+ .plaintext = "abc",
+ .psize = 3,
+ .digest = (u8 *)(u8 []) {
+ 0x66, 0xC7, 0xF0, 0xF4, 0x62, 0xEE, 0xED, 0xD9,
+ 0xD1, 0xF2, 0xD4, 0x6B, 0xDC, 0x10, 0xE4, 0xE2,
+ 0x41, 0x67, 0xC4, 0x87, 0x5C, 0xF2, 0xF7, 0xA2,
+ 0x29, 0x7D, 0xA0, 0x2B, 0x8F, 0x4B, 0xA8, 0xE0 }
+ }, {
+ .plaintext = "abcdefghijklmnopqrstuvwxyz",
+ .psize = 26,
+ .digest = (u8 *)(u8 []) {
+ 0xB8, 0x0F, 0xE9, 0x7A, 0x4D, 0xA2, 0x4A, 0xFC,
+ 0x27, 0x75, 0x64, 0xF6, 0x6A, 0x35, 0x9E, 0xF4,
+ 0x40, 0x46, 0x2A, 0xD2, 0x8D, 0xCC, 0x6D, 0x63,
+ 0xAD, 0xB2, 0x4D, 0x5C, 0x20, 0xA6, 0x15, 0x95 }
+ }, {
+ /* A.1. Example 2 */
+ .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab"
+ "cdabcdabcdabcdabcd",
+ .psize = 64,
+ .digest = (u8 *)(u8 []) {
+ 0xDE, 0xBE, 0x9F, 0xF9, 0x22, 0x75, 0xB8, 0xA1,
+ 0x38, 0x60, 0x48, 0x89, 0xC1, 0x8E, 0x5A, 0x4D,
+ 0x6F, 0xDB, 0x70, 0xE5, 0x38, 0x7E, 0x57, 0x65,
+ 0x29, 0x3D, 0xCB, 0xA3, 0x9C, 0x0C, 0x57, 0x32 }
+ }, {
+ .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"
+ "abcdabcdabcdabcdabcdabcdabcdabcd",
+ .psize = 256,
+ .digest = (u8 *)(u8 []) {
+ 0xB9, 0x65, 0x76, 0x4C, 0x8B, 0xEB, 0xB0, 0x91,
+ 0xC7, 0x60, 0x2B, 0x74, 0xAF, 0xD3, 0x4E, 0xEF,
+ 0xB5, 0x31, 0xDC, 0xCB, 0x4E, 0x00, 0x76, 0xD9,
+ 0xB7, 0xCD, 0x81, 0x31, 0x99, 0xB4, 0x59, 0x71 }
+ }
+};
+
/*
* SHA1 test vectors from from FIPS PUB 180-1
* Long vector from CAVS 5.0
diff --git a/crypto/xor.c b/crypto/xor.c
index 263af9fb45ea..bce9fe7af40a 100644
--- a/crypto/xor.c
+++ b/crypto/xor.c
@@ -122,12 +122,7 @@ calibrate_xor_blocks(void)
goto out;
}
- /*
- * Note: Since the memory is not actually used for _anything_ but to
- * test the XOR speed, we don't really want kmemcheck to warn about
- * reading uninitialized bytes here.
- */
- b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
+ b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
if (!b1) {
printk(KERN_WARNING "xor: Yikes! No memory available.\n");
return -ENOMEM;
diff --git a/crypto/xts.c b/crypto/xts.c
index e31828ed0046..f317c48b5e43 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -269,9 +269,7 @@ static int do_encrypt(struct skcipher_request *req, int err)
crypto_skcipher_encrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
@@ -321,9 +319,7 @@ static int do_decrypt(struct skcipher_request *req, int err)
crypto_skcipher_decrypt(subreq) ?:
post_crypt(req);
- if (err == -EINPROGRESS ||
- (err == -EBUSY &&
- req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (err == -EINPROGRESS || err == -EBUSY)
return err;
}
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 1d7af3c2ff27..152744c5ef0f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -209,4 +209,6 @@ source "drivers/tee/Kconfig"
source "drivers/mux/Kconfig"
+source "drivers/opp/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index d242d3514d30..1d034b680431 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -126,6 +126,7 @@ obj-$(CONFIG_ACCESSIBILITY) += accessibility/
obj-$(CONFIG_ISDN) += isdn/
obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_EISA) += eisa/
+obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5b1938f4b626..46505396869e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -81,6 +81,11 @@ endif
config ACPI_SPCR_TABLE
bool
+config ACPI_LPIT
+ bool
+ depends on X86_64
+ default y
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
@@ -522,6 +527,12 @@ config CHT_WC_PMIC_OPREGION
help
This config adds ACPI operation region support for CHT Whiskey Cove PMIC.
+config CHT_DC_TI_PMIC_OPREGION
+ bool "ACPI operation region support for Dollar Cove TI PMIC"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ help
+ This config adds ACPI operation region support for Dollar Cove TI PMIC.
+
endif
config ACPI_CONFIGFS
@@ -536,4 +547,20 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+config TPS68470_PMIC_OPREGION
+ bool "ACPI operation region support for TPS68470 PMIC"
+ depends on MFD_TPS68470
+ help
+ This config adds ACPI operation region support for TI TPS68470 PMIC.
+ TPS68470 device is an advanced power management unit that powers
+ a Compact Camera Module (CCM), generates clocks for image sensors,
+ drives a dual LED for flash and incorporates two LED drivers for
+ general purpose indicators.
+ This driver enables ACPI operation region support control voltage
+ regulators and clocks.
+
+ This option is a bool as it provides an ACPI operation
+ region, which must be available before any of the devices
+ using this, are probed.
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index cd1abc9bc325..41954a601989 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -57,6 +57,7 @@ acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
+acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
@@ -105,9 +106,12 @@ obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o
+obj-$(CONFIG_CHT_DC_TI_PMIC_OPREGION) += pmic/intel_pmic_chtdc_ti.o
obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o
+obj-$(CONFIG_TPS68470_PMIC_OPREGION) += pmic/tps68470_pmic.o
+
video-objs += acpi_video.o video_detect.o
obj-y += dptf/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8f52483219ba..47a7ed557bd6 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -265,6 +265,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
+ /* fall through */
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d5999eb41c00..d553b0087947 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -116,6 +116,10 @@ static const struct apd_device_desc hip08_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 250000000,
};
+static const struct apd_device_desc thunderx2_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 125000000,
+};
#endif
#else
@@ -180,6 +184,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
{ "BRCM900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV900D", APD_ADDR(vulcan_spi_desc) },
+ { "CAV9007", APD_ADDR(thunderx2_i2c_desc) },
{ "HISI02A1", APD_ADDR(hip07_i2c_desc) },
{ "HISI02A2", APD_ADDR(hip08_i2c_desc) },
#endif
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 853bc7fc673f..b58850389094 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -204,7 +204,7 @@ struct configfs_attribute *acpi_table_attrs[] = {
NULL,
};
-static struct config_item_type acpi_table_type = {
+static const struct config_item_type acpi_table_type = {
.ct_owner = THIS_MODULE,
.ct_bin_attrs = acpi_table_bin_attrs,
.ct_attrs = acpi_table_attrs,
@@ -237,12 +237,12 @@ struct configfs_group_operations acpi_table_group_ops = {
.drop_item = acpi_table_drop_item,
};
-static struct config_item_type acpi_tables_type = {
+static const struct config_item_type acpi_tables_type = {
.ct_owner = THIS_MODULE,
.ct_group_ops = &acpi_table_group_ops,
};
-static struct config_item_type acpi_root_group_type = {
+static const struct config_item_type acpi_root_group_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
new file mode 100644
index 000000000000..e94e478dd18b
--- /dev/null
+++ b/drivers/acpi/acpi_lpit.c
@@ -0,0 +1,162 @@
+
+/*
+ * acpi_lpit.c - LPIT table processing functions
+ *
+ * Copyright (C) 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/acpi.h>
+#include <asm/msr.h>
+#include <asm/tsc.h>
+
+struct lpit_residency_info {
+ struct acpi_generic_address gaddr;
+ u64 frequency;
+ void __iomem *iomem_addr;
+};
+
+/* Storage for an memory mapped and FFH based entries */
+static struct lpit_residency_info residency_info_mem;
+static struct lpit_residency_info residency_info_ffh;
+
+static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
+{
+ int err;
+
+ if (io_mem) {
+ u64 count = 0;
+ int error;
+
+ error = acpi_os_read_iomem(residency_info_mem.iomem_addr, &count,
+ residency_info_mem.gaddr.bit_width);
+ if (error)
+ return error;
+
+ *counter = div64_u64(count * 1000000ULL, residency_info_mem.frequency);
+ return 0;
+ }
+
+ err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
+ if (!err) {
+ u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
+ residency_info_ffh.gaddr. bit_width - 1,
+ residency_info_ffh.gaddr.bit_offset);
+
+ *counter &= mask;
+ *counter >>= residency_info_ffh.gaddr.bit_offset;
+ *counter = div64_u64(*counter * 1000000ULL, residency_info_ffh.frequency);
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+static ssize_t low_power_idle_system_residency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 counter;
+ int ret;
+
+ ret = lpit_read_residency_counter_us(&counter, true);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%llu\n", counter);
+}
+static DEVICE_ATTR_RO(low_power_idle_system_residency_us);
+
+static ssize_t low_power_idle_cpu_residency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 counter;
+ int ret;
+
+ ret = lpit_read_residency_counter_us(&counter, false);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%llu\n", counter);
+}
+static DEVICE_ATTR_RO(low_power_idle_cpu_residency_us);
+
+int lpit_read_residency_count_address(u64 *address)
+{
+ if (!residency_info_mem.gaddr.address)
+ return -EINVAL;
+
+ *address = residency_info_mem.gaddr.address;
+
+ return 0;
+}
+
+static void lpit_update_residency(struct lpit_residency_info *info,
+ struct acpi_lpit_native *lpit_native)
+{
+ info->frequency = lpit_native->counter_frequency ?
+ lpit_native->counter_frequency : tsc_khz * 1000;
+ if (!info->frequency)
+ info->frequency = 1;
+
+ info->gaddr = lpit_native->residency_counter;
+ if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ info->iomem_addr = ioremap_nocache(info->gaddr.address,
+ info->gaddr.bit_width / 8);
+ if (!info->iomem_addr)
+ return;
+
+ /* Silently fail, if cpuidle attribute group is not present */
+ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
+ &dev_attr_low_power_idle_system_residency_us.attr,
+ "cpuidle");
+ } else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ /* Silently fail, if cpuidle attribute group is not present */
+ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
+ &dev_attr_low_power_idle_cpu_residency_us.attr,
+ "cpuidle");
+ }
+}
+
+static void lpit_process(u64 begin, u64 end)
+{
+ while (begin + sizeof(struct acpi_lpit_native) < end) {
+ struct acpi_lpit_native *lpit_native = (struct acpi_lpit_native *)begin;
+
+ if (!lpit_native->header.type && !lpit_native->header.flags) {
+ if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ !residency_info_mem.gaddr.address) {
+ lpit_update_residency(&residency_info_mem, lpit_native);
+ } else if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
+ !residency_info_ffh.gaddr.address) {
+ lpit_update_residency(&residency_info_ffh, lpit_native);
+ }
+ }
+ begin += lpit_native->header.length;
+ }
+}
+
+void acpi_init_lpit(void)
+{
+ acpi_status status;
+ u64 lpit_begin;
+ struct acpi_table_lpit *lpit;
+
+ status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
+
+ if (ACPI_FAILURE(status))
+ return;
+
+ lpit_begin = (u64)lpit + sizeof(*lpit);
+ lpit_process(lpit_begin, lpit_begin + lpit->header.length);
+}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 032ae44710e5..7f2b02cc8ea1 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -362,7 +362,7 @@ static int register_device_clock(struct acpi_device *adev,
{
const struct lpss_device_desc *dev_desc = pdata->dev_desc;
const char *devname = dev_name(&adev->dev);
- struct clk *clk = ERR_PTR(-ENODEV);
+ struct clk *clk;
struct lpss_clk_data *clk_data;
const char *parent, *clk_name;
void __iomem *prv_base;
@@ -693,7 +693,7 @@ static int acpi_lpss_activate(struct device *dev)
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
- ret = acpi_dev_runtime_resume(dev);
+ ret = acpi_dev_resume(dev);
if (ret)
return ret;
@@ -713,42 +713,8 @@ static int acpi_lpss_activate(struct device *dev)
static void acpi_lpss_dismiss(struct device *dev)
{
- acpi_dev_runtime_suspend(dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_lpss_suspend_late(struct device *dev)
-{
- struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- int ret;
-
- ret = pm_generic_suspend_late(dev);
- if (ret)
- return ret;
-
- if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
- acpi_lpss_save_ctx(dev, pdata);
-
- return acpi_dev_suspend_late(dev);
-}
-
-static int acpi_lpss_resume_early(struct device *dev)
-{
- struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- int ret;
-
- ret = acpi_dev_resume_early(dev);
- if (ret)
- return ret;
-
- acpi_lpss_d3_to_d0_delay(pdata);
-
- if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
- acpi_lpss_restore_ctx(dev, pdata);
-
- return pm_generic_resume_early(dev);
+ acpi_dev_suspend(dev, false);
}
-#endif /* CONFIG_PM_SLEEP */
/* IOSF SB for LPSS island */
#define LPSS_IOSF_UNIT_LPIOEP 0xA0
@@ -835,19 +801,15 @@ static void lpss_iosf_exit_d3_state(void)
mutex_unlock(&lpss_iosf_mutex);
}
-static int acpi_lpss_runtime_suspend(struct device *dev)
+static int acpi_lpss_suspend(struct device *dev, bool wakeup)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
- ret = pm_generic_runtime_suspend(dev);
- if (ret)
- return ret;
-
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_save_ctx(dev, pdata);
- ret = acpi_dev_runtime_suspend(dev);
+ ret = acpi_dev_suspend(dev, wakeup);
/*
* This call must be last in the sequence, otherwise PMC will return
@@ -860,7 +822,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
return ret;
}
-static int acpi_lpss_runtime_resume(struct device *dev)
+static int acpi_lpss_resume(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
@@ -872,7 +834,7 @@ static int acpi_lpss_runtime_resume(struct device *dev)
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
- ret = acpi_dev_runtime_resume(dev);
+ ret = acpi_dev_resume(dev);
if (ret)
return ret;
@@ -881,7 +843,41 @@ static int acpi_lpss_runtime_resume(struct device *dev)
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_restore_ctx(dev, pdata);
- return pm_generic_runtime_resume(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_lpss_suspend_late(struct device *dev)
+{
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ ret = pm_generic_suspend_late(dev);
+ return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+}
+
+static int acpi_lpss_resume_early(struct device *dev)
+{
+ int ret = acpi_lpss_resume(dev);
+
+ return ret ? ret : pm_generic_resume_early(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int acpi_lpss_runtime_suspend(struct device *dev)
+{
+ int ret = pm_generic_runtime_suspend(dev);
+
+ return ret ? ret : acpi_lpss_suspend(dev, true);
+}
+
+static int acpi_lpss_runtime_resume(struct device *dev)
+{
+ int ret = acpi_lpss_resume(dev);
+
+ return ret ? ret : pm_generic_runtime_resume(dev);
}
#endif /* CONFIG_PM */
@@ -894,13 +890,20 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
- .complete = pm_complete_with_resume_check,
+ .complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_lpss_suspend_late,
+ .suspend_noirq = acpi_subsys_suspend_noirq,
+ .resume_noirq = acpi_subsys_resume_noirq,
.resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
+ .freeze_late = acpi_subsys_freeze_late,
+ .freeze_noirq = acpi_subsys_freeze_noirq,
+ .thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_lpss_suspend_late,
+ .poweroff_noirq = acpi_subsys_suspend_noirq,
+ .restore_noirq = acpi_subsys_resume_noirq,
.restore_early = acpi_lpss_resume_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 86c10599d9f8..449d86d39965 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -82,6 +82,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
* PIIX4 models.
*/
errata.piix4.throttle = 1;
+ /* fall through*/
case 2: /* PIIX4E */
case 3: /* PIIX4M */
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index e05232da0588..71f6f2624deb 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -178,6 +178,7 @@ acpi-y += \
utresrc.o \
utstate.o \
utstring.o \
+ utstrsuppt.o \
utstrtoul64.o \
utxface.o \
utxfinit.o \
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index fd4f3cacb356..cd722d8edacb 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -66,9 +66,9 @@ acpi_status
acpi_hw_validate_register(struct acpi_generic_address *reg,
u8 max_bit_width, u64 *address);
-acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg);
+acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg);
-acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg);
+acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg);
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 29a863c85318..29555c8789a3 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -101,7 +101,8 @@ typedef const struct acpi_exdump_info {
*/
acpi_status
acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
- union acpi_operand_object **result_desc, u32 flags);
+ union acpi_operand_object **result_desc,
+ u32 implicit_conversion);
acpi_status
acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
@@ -424,9 +425,6 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
struct acpi_walk_state *walk_state,
u8 implicit_conversion);
-#define ACPI_IMPLICIT_CONVERSION TRUE
-#define ACPI_NO_IMPLICIT_CONVERSION FALSE
-
/*
* exstoren - resolve/store object
*/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 745134ade35f..83b75e9db7ef 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -141,6 +141,11 @@ extern const char *acpi_gbl_ptyp_decode[];
#define ACPI_MSG_SUFFIX \
acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
+/* Flags to indicate implicit or explicit string-to-integer conversion */
+
+#define ACPI_IMPLICIT_CONVERSION TRUE
+#define ACPI_NO_IMPLICIT_CONVERSION FALSE
+
/* Types for Resource descriptor entries */
#define ACPI_INVALID_RESOURCE 0
@@ -197,15 +202,31 @@ void acpi_ut_strlwr(char *src_string);
int acpi_ut_stricmp(char *string1, char *string2);
-acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *ret_integer);
+/*
+ * utstrsuppt - string-to-integer conversion support functions
+ */
+acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value);
+
+acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr);
+
+acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr);
+
+char acpi_ut_remove_whitespace(char **string);
+
+char acpi_ut_remove_leading_zeros(char **string);
+
+u8 acpi_ut_detect_hex_prefix(char **string);
+
+u8 acpi_ut_detect_octal_prefix(char **string);
/*
- * Values for Flags above
- * Note: LIMIT values correspond to acpi_gbl_integer_byte_width values (4/8)
+ * utstrtoul64 - string-to-integer conversion functions
*/
-#define ACPI_STRTOUL_32BIT 0x04 /* 4 bytes */
-#define ACPI_STRTOUL_64BIT 0x08 /* 8 bytes */
-#define ACPI_STRTOUL_BASE16 0x10 /* Default: Base10/16 */
+acpi_status acpi_ut_strtoul64(char *string, u64 *ret_integer);
+
+u64 acpi_ut_explicit_strtoul64(char *string);
+
+u64 acpi_ut_implicit_strtoul64(char *string);
/*
* utglobal - Global data structures and procedures
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 857dbc43a9b1..32d546f0db2f 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -277,10 +277,7 @@ acpi_db_convert_to_object(acpi_object_type type,
default:
object->type = ACPI_TYPE_INTEGER;
- status = acpi_ut_strtoul64(string,
- (acpi_gbl_integer_byte_width |
- ACPI_STRTOUL_BASE16),
- &object->integer.value);
+ status = acpi_ut_strtoul64(string, &object->integer.value);
break;
}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 20d7744b06ae..22f45d090733 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -134,7 +134,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
* object. Implicitly convert the argument if necessary.
*/
status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 229382035550..263d8fc4a9e2 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -390,8 +390,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
struct acpi_gpe_handler_info *gpe_handler_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
- u32 status_reg;
- u32 enable_reg;
+ u64 status_reg;
+ u64 enable_reg;
acpi_cpu_flags flags;
u32 i;
u32 j;
@@ -472,7 +472,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
gpe_register_info->base_gpe_number,
gpe_register_info->base_gpe_number +
(ACPI_GPE_REGISTER_WIDTH - 1),
- status_reg, enable_reg,
+ (u32)status_reg, (u32)enable_reg,
gpe_register_info->enable_for_run,
gpe_register_info->enable_for_wake));
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 76bfb7dcae2f..59b8de2f07d3 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -156,7 +156,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
status =
acpi_ex_convert_to_integer(local_operand1, &temp_operand1,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
break;
case ACPI_TYPE_BUFFER:
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index f71028e334ee..23ebadb06a95 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -57,10 +57,10 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
*
* FUNCTION: acpi_ex_convert_to_integer
*
- * PARAMETERS: obj_desc - Object to be converted. Must be an
- * Integer, Buffer, or String
- * result_desc - Where the new Integer object is returned
- * flags - Used for string conversion
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * result_desc - Where the new Integer object is returned
+ * implicit_conversion - Used for string conversion
*
* RETURN: Status
*
@@ -70,14 +70,14 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
acpi_status
acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
- union acpi_operand_object **result_desc, u32 flags)
+ union acpi_operand_object **result_desc,
+ u32 implicit_conversion)
{
union acpi_operand_object *return_desc;
u8 *pointer;
u64 result;
u32 i;
u32 count;
- acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
@@ -123,12 +123,18 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
* hexadecimal as per the ACPI specification. The only exception (as
* of ACPI 3.0) is that the to_integer() operator allows both decimal
* and hexadecimal strings (hex prefixed with "0x").
+ *
+ * Explicit conversion is used only by to_integer.
+ * All other string-to-integer conversions are implicit conversions.
*/
- status = acpi_ut_strtoul64(ACPI_CAST_PTR(char, pointer),
- (acpi_gbl_integer_byte_width |
- flags), &result);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (implicit_conversion) {
+ result =
+ acpi_ut_implicit_strtoul64(ACPI_CAST_PTR
+ (char, pointer));
+ } else {
+ result =
+ acpi_ut_explicit_strtoul64(ACPI_CAST_PTR
+ (char, pointer));
}
break;
@@ -631,7 +637,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
*/
status =
acpi_ex_convert_to_integer(source_desc, result_desc,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
break;
case ACPI_TYPE_STRING:
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 1e7649ce0a7b..dbad3ebd7df5 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -330,7 +330,7 @@ acpi_ex_do_logical_op(u16 opcode,
case ACPI_TYPE_INTEGER:
status = acpi_ex_convert_to_integer(operand1, &local_operand1,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
break;
case ACPI_TYPE_STRING:
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index c4852429e2ff..1c7c9962b0de 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -415,7 +415,7 @@ acpi_ex_resolve_operands(u16 opcode,
* Known as "Implicit Source Operand Conversion"
*/
status = acpi_ex_convert_to_integer(obj_desc, stack_ptr,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
if (ACPI_FAILURE(status)) {
if (status == AE_TYPE) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 5eb11b30a79e..09b6822aa5cc 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -99,7 +99,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status = AE_OK;
- u32 enable_mask;
+ u64 enable_mask;
u32 register_bit;
ACPI_FUNCTION_ENTRY();
@@ -214,7 +214,7 @@ acpi_status
acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_event_status *event_status)
{
- u32 in_byte;
+ u64 in_byte;
u32 register_bit;
struct acpi_gpe_register_info *gpe_register_info;
acpi_event_status local_event_status = 0;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index acb417b58bbb..aa6e00081915 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -220,16 +220,15 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
*
* RETURN: Status
*
- * DESCRIPTION: Read from either memory or IO space. This is a 32-bit max
- * version of acpi_read, used internally since the overhead of
- * 64-bit values is not needed.
+ * DESCRIPTION: Read from either memory or IO space. This is a 64-bit max
+ * version of acpi_read.
*
* LIMITATIONS: <These limitations also apply to acpi_hw_write>
* space_ID must be system_memory or system_IO.
*
******************************************************************************/
-acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
+acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg)
{
u64 address;
u8 access_width;
@@ -244,17 +243,17 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
/* Validate contents of the GAS register */
- status = acpi_hw_validate_register(reg, 32, &address);
+ status = acpi_hw_validate_register(reg, 64, &address);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
- * Initialize entire 32-bit return value to zero, convert access_width
+ * Initialize entire 64-bit return value to zero, convert access_width
* into number of bits based
*/
*value = 0;
- access_width = acpi_hw_get_access_bit_width(address, reg, 32);
+ access_width = acpi_hw_get_access_bit_width(address, reg, 64);
bit_width = reg->bit_offset + reg->bit_width;
bit_offset = reg->bit_offset;
@@ -265,7 +264,7 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
index = 0;
while (bit_width) {
if (bit_offset >= access_width) {
- value32 = 0;
+ value64 = 0;
bit_offset -= access_width;
} else {
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -276,7 +275,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
ACPI_DIV_8
(access_width),
&value64, access_width);
- value32 = (u32)value64;
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
status = acpi_hw_read_port((acpi_io_address)
@@ -286,15 +284,16 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
(access_width),
&value32,
access_width);
+ value64 = (u64)value32;
}
}
/*
* Use offset style bit writes because "Index * AccessWidth" is
- * ensured to be less than 32-bits by acpi_hw_validate_register().
+ * ensured to be less than 64-bits by acpi_hw_validate_register().
*/
ACPI_SET_BITS(value, index * access_width,
- ACPI_MASK_BITS_ABOVE_32(access_width), value32);
+ ACPI_MASK_BITS_ABOVE_64(access_width), value64);
bit_width -=
bit_width > access_width ? access_width : bit_width;
@@ -302,8 +301,9 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
- *value, access_width, ACPI_FORMAT_UINT64(address),
+ "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n",
+ ACPI_FORMAT_UINT64(*value), access_width,
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
@@ -318,20 +318,18 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
*
* RETURN: Status
*
- * DESCRIPTION: Write to either memory or IO space. This is a 32-bit max
- * version of acpi_write, used internally since the overhead of
- * 64-bit values is not needed.
+ * DESCRIPTION: Write to either memory or IO space. This is a 64-bit max
+ * version of acpi_write.
*
******************************************************************************/
-acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
+acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg)
{
u64 address;
u8 access_width;
u32 bit_width;
u8 bit_offset;
u64 value64;
- u32 value32;
u8 index;
acpi_status status;
@@ -339,14 +337,14 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
/* Validate contents of the GAS register */
- status = acpi_hw_validate_register(reg, 32, &address);
+ status = acpi_hw_validate_register(reg, 64, &address);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Convert access_width into number of bits based */
- access_width = acpi_hw_get_access_bit_width(address, reg, 32);
+ access_width = acpi_hw_get_access_bit_width(address, reg, 64);
bit_width = reg->bit_offset + reg->bit_width;
bit_offset = reg->bit_offset;
@@ -358,16 +356,15 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
while (bit_width) {
/*
* Use offset style bit reads because "Index * AccessWidth" is
- * ensured to be less than 32-bits by acpi_hw_validate_register().
+ * ensured to be less than 64-bits by acpi_hw_validate_register().
*/
- value32 = ACPI_GET_BITS(&value, index * access_width,
- ACPI_MASK_BITS_ABOVE_32(access_width));
+ value64 = ACPI_GET_BITS(&value, index * access_width,
+ ACPI_MASK_BITS_ABOVE_64(access_width));
if (bit_offset >= access_width) {
bit_offset -= access_width;
} else {
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- value64 = (u64)value32;
status =
acpi_os_write_memory((acpi_physical_address)
address +
@@ -382,7 +379,7 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
index *
ACPI_DIV_8
(access_width),
- value32,
+ (u32)value64,
access_width);
}
}
@@ -397,8 +394,9 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
- value, access_width, ACPI_FORMAT_UINT64(address),
+ "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n",
+ ACPI_FORMAT_UINT64(value), access_width,
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
@@ -526,6 +524,7 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control)
acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
{
u32 value = 0;
+ u64 value64;
acpi_status status;
ACPI_FUNCTION_TRACE(hw_register_read);
@@ -564,12 +563,14 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
status =
- acpi_hw_read(&value, &acpi_gbl_FADT.xpm2_control_block);
+ acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block);
+ value = (u32)value64;
break;
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
- status = acpi_hw_read(&value, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block);
+ value = (u32)value64;
break;
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
@@ -586,7 +587,7 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
}
if (ACPI_SUCCESS(status)) {
- *return_value = value;
+ *return_value = (u32)value;
}
return_ACPI_STATUS(status);
@@ -622,6 +623,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
{
acpi_status status;
u32 read_value;
+ u64 read_value64;
ACPI_FUNCTION_TRACE(hw_register_write);
@@ -685,11 +687,12 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
* as per the ACPI spec.
*/
status =
- acpi_hw_read(&read_value,
+ acpi_hw_read(&read_value64,
&acpi_gbl_FADT.xpm2_control_block);
if (ACPI_FAILURE(status)) {
goto exit;
}
+ read_value = (u32)read_value64;
/* Insert the bits to be preserved */
@@ -745,22 +748,25 @@ acpi_hw_read_multiple(u32 *value,
{
u32 value_a = 0;
u32 value_b = 0;
+ u64 value64;
acpi_status status;
/* The first register is always required */
- status = acpi_hw_read(&value_a, register_a);
+ status = acpi_hw_read(&value64, register_a);
if (ACPI_FAILURE(status)) {
return (status);
}
+ value_a = (u32)value64;
/* Second register is optional */
if (register_b->address) {
- status = acpi_hw_read(&value_b, register_b);
+ status = acpi_hw_read(&value64, register_b);
if (ACPI_FAILURE(status)) {
return (status);
}
+ value_b = (u32)value64;
}
/*
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index b3c5d8c754bb..a2f4e25d45b1 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -94,6 +94,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
acpi_status acpi_get_timer(u32 * ticks)
{
acpi_status status;
+ u64 timer_value;
ACPI_FUNCTION_TRACE(acpi_get_timer);
@@ -107,7 +108,14 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(AE_SUPPORT);
}
- status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_read(&timer_value, &acpi_gbl_FADT.xpm_timer_block);
+ if (ACPI_SUCCESS(status)) {
+
+ /* ACPI PM Timer is defined to be 32 bits (PM_TMR_LEN) */
+
+ *ticks = (u32)timer_value;
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 34684ae89981..b3c6e439933c 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -125,76 +125,12 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
******************************************************************************/
acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
{
- u32 value_lo;
- u32 value_hi;
- u32 width;
- u64 address;
acpi_status status;
ACPI_FUNCTION_NAME(acpi_read);
- if (!return_value) {
- return (AE_BAD_PARAMETER);
- }
-
- /* Validate contents of the GAS register. Allow 64-bit transfers */
-
- status = acpi_hw_validate_register(reg, 64, &address);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Two address spaces supported: Memory or I/O. PCI_Config is
- * not supported here because the GAS structure is insufficient
- */
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_read_memory((acpi_physical_address)
- address, return_value,
- reg->bit_width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- value_lo = 0;
- value_hi = 0;
-
- width = reg->bit_width;
- if (width == 64) {
- width = 32; /* Break into two 32-bit transfers */
- }
-
- status = acpi_hw_read_port((acpi_io_address)
- address, &value_lo, width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- if (reg->bit_width == 64) {
-
- /* Read the top 32 bits */
-
- status = acpi_hw_read_port((acpi_io_address)
- (address + 4), &value_hi,
- 32);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
-
- /* Set the return value only if status is AE_OK */
-
- *return_value = (value_lo | ((u64)value_hi << 32));
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n",
- ACPI_FORMAT_UINT64(*return_value), reg->bit_width,
- ACPI_FORMAT_UINT64(address),
- acpi_ut_get_region_name(reg->space_id)));
-
- return (AE_OK);
+ status = acpi_hw_read(return_value, reg);
+ return (status);
}
ACPI_EXPORT_SYMBOL(acpi_read)
@@ -213,59 +149,11 @@ ACPI_EXPORT_SYMBOL(acpi_read)
******************************************************************************/
acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
{
- u32 width;
- u64 address;
acpi_status status;
ACPI_FUNCTION_NAME(acpi_write);
- /* Validate contents of the GAS register. Allow 64-bit transfers */
-
- status = acpi_hw_validate_register(reg, 64, &address);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Two address spaces supported: Memory or IO. PCI_Config is
- * not supported here because the GAS structure is insufficient
- */
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_write_memory((acpi_physical_address)
- address, value, reg->bit_width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- width = reg->bit_width;
- if (width == 64) {
- width = 32; /* Break into two 32-bit transfers */
- }
-
- status = acpi_hw_write_port((acpi_io_address)
- address, ACPI_LODWORD(value),
- width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- if (reg->bit_width == 64) {
- status = acpi_hw_write_port((acpi_io_address)
- (address + 4),
- ACPI_HIDWORD(value), 32);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n",
- ACPI_FORMAT_UINT64(value), reg->bit_width,
- ACPI_FORMAT_UINT64(address),
- acpi_ut_get_region_name(reg->space_id)));
-
+ status = acpi_hw_write(value, reg);
return (status);
}
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index e4a7da8a11f0..539d775bbc92 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -78,8 +78,8 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
/* String-to-Integer conversion */
- status = acpi_ut_strtoul64(original_object->string.pointer,
- acpi_gbl_integer_byte_width, &value);
+ status =
+ acpi_ut_strtoul64(original_object->string.pointer, &value);
if (ACPI_FAILURE(status)) {
return (status);
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 26ad596c973e..5ecb8d2e6834 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -173,10 +173,13 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
/*
- * Only reallocate the root table if the host provided a static buffer
- * for the table array in the call to acpi_initialize_tables.
+ * If there are tables unverified, it is required to reallocate the
+ * root table list to clean up invalid table entries. Otherwise only
+ * reallocate the root table list if the host provided a static buffer
+ * for the table array in the call to acpi_initialize_tables().
*/
- if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+ if ((acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) &&
+ acpi_gbl_enable_table_validation) {
return_ACPI_STATUS(AE_SUPPORT);
}
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
new file mode 100644
index 000000000000..965fb5cec94f
--- /dev/null
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -0,0 +1,438 @@
+/*******************************************************************************
+ *
+ * Module Name: utstrsuppt - Support functions for string-to-integer conversion
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utstrsuppt")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
+
+static acpi_status
+acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+
+static acpi_status
+acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_octal_string
+ *
+ * PARAMETERS: string - Null terminated input string
+ * return_value_ptr - Where the converted value is returned
+ *
+ * RETURN: Status and 64-bit converted integer
+ *
+ * DESCRIPTION: Performs a base 8 conversion of the input string to an
+ * integer value, either 32 or 64 bits.
+ *
+ * NOTE: Maximum 64-bit unsigned octal value is 01777777777777777777777
+ * Maximum 32-bit unsigned octal value is 037777777777
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value_ptr)
+{
+ u64 accumulated_value = 0;
+ acpi_status status = AE_OK;
+
+ /* Convert each ASCII byte in the input string */
+
+ while (*string) {
+
+ /* Character must be ASCII 0-7, otherwise terminate with no error */
+
+ if (!(ACPI_IS_OCTAL_DIGIT(*string))) {
+ break;
+ }
+
+ /* Convert and insert this octal digit into the accumulator */
+
+ status = acpi_ut_insert_digit(&accumulated_value, 8, *string);
+ if (ACPI_FAILURE(status)) {
+ status = AE_OCTAL_OVERFLOW;
+ break;
+ }
+
+ string++;
+ }
+
+ /* Always return the value that has been accumulated */
+
+ *return_value_ptr = accumulated_value;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_decimal_string
+ *
+ * PARAMETERS: string - Null terminated input string
+ * return_value_ptr - Where the converted value is returned
+ *
+ * RETURN: Status and 64-bit converted integer
+ *
+ * DESCRIPTION: Performs a base 10 conversion of the input string to an
+ * integer value, either 32 or 64 bits.
+ *
+ * NOTE: Maximum 64-bit unsigned decimal value is 18446744073709551615
+ * Maximum 32-bit unsigned decimal value is 4294967295
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr)
+{
+ u64 accumulated_value = 0;
+ acpi_status status = AE_OK;
+
+ /* Convert each ASCII byte in the input string */
+
+ while (*string) {
+
+ /* Character must be ASCII 0-9, otherwise terminate with no error */
+
+ if (!isdigit(*string)) {
+ break;
+ }
+
+ /* Convert and insert this decimal digit into the accumulator */
+
+ status = acpi_ut_insert_digit(&accumulated_value, 10, *string);
+ if (ACPI_FAILURE(status)) {
+ status = AE_DECIMAL_OVERFLOW;
+ break;
+ }
+
+ string++;
+ }
+
+ /* Always return the value that has been accumulated */
+
+ *return_value_ptr = accumulated_value;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_hex_string
+ *
+ * PARAMETERS: string - Null terminated input string
+ * return_value_ptr - Where the converted value is returned
+ *
+ * RETURN: Status and 64-bit converted integer
+ *
+ * DESCRIPTION: Performs a base 16 conversion of the input string to an
+ * integer value, either 32 or 64 bits.
+ *
+ * NOTE: Maximum 64-bit unsigned hex value is 0xFFFFFFFFFFFFFFFF
+ * Maximum 32-bit unsigned hex value is 0xFFFFFFFF
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr)
+{
+ u64 accumulated_value = 0;
+ acpi_status status = AE_OK;
+
+ /* Convert each ASCII byte in the input string */
+
+ while (*string) {
+
+ /* Must be ASCII A-F, a-f, or 0-9, otherwise terminate with no error */
+
+ if (!isxdigit(*string)) {
+ break;
+ }
+
+ /* Convert and insert this hex digit into the accumulator */
+
+ status = acpi_ut_insert_digit(&accumulated_value, 16, *string);
+ if (ACPI_FAILURE(status)) {
+ status = AE_HEX_OVERFLOW;
+ break;
+ }
+
+ string++;
+ }
+
+ /* Always return the value that has been accumulated */
+
+ *return_value_ptr = accumulated_value;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_leading_zeros
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: Next character after any leading zeros. This character may be
+ * used by the caller to detect end-of-string.
+ *
+ * DESCRIPTION: Remove any leading zeros in the input string. Return the
+ * next character after the final ASCII zero to enable the caller
+ * to check for the end of the string (NULL terminator).
+ *
+ ******************************************************************************/
+
+char acpi_ut_remove_leading_zeros(char **string)
+{
+
+ while (**string == ACPI_ASCII_ZERO) {
+ *string += 1;
+ }
+
+ return (**string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_whitespace
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: Next character after any whitespace. This character may be
+ * used by the caller to detect end-of-string.
+ *
+ * DESCRIPTION: Remove any leading whitespace in the input string. Return the
+ * next character after the final ASCII zero to enable the caller
+ * to check for the end of the string (NULL terminator).
+ *
+ ******************************************************************************/
+
+char acpi_ut_remove_whitespace(char **string)
+{
+
+ while (isspace((u8)**string)) {
+ *string += 1;
+ }
+
+ return (**string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_detect_hex_prefix
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: TRUE if a "0x" prefix was found at the start of the string
+ *
+ * DESCRIPTION: Detect and remove a hex "0x" prefix
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_detect_hex_prefix(char **string)
+{
+
+ if ((**string == ACPI_ASCII_ZERO) &&
+ (tolower((int)*(*string + 1)) == 'x')) {
+ *string += 2; /* Go past the leading 0x */
+ return (TRUE);
+ }
+
+ return (FALSE); /* Not a hex string */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_detect_octal_prefix
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: True if an octal "0" prefix was found at the start of the
+ * string
+ *
+ * DESCRIPTION: Detect and remove an octal prefix (zero)
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_detect_octal_prefix(char **string)
+{
+
+ if (**string == ACPI_ASCII_ZERO) {
+ *string += 1; /* Go past the leading 0 */
+ return (TRUE);
+ }
+
+ return (FALSE); /* Not an octal string */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_insert_digit
+ *
+ * PARAMETERS: accumulated_value - Current value of the integer value
+ * accumulator. The new value is
+ * returned here.
+ * base - Radix, either 8/10/16
+ * ascii_digit - ASCII single digit to be inserted
+ *
+ * RETURN: Status and result of the convert/insert operation. The only
+ * possible returned exception code is numeric overflow of
+ * either the multiply or add conversion operations.
+ *
+ * DESCRIPTION: Generic conversion and insertion function for all bases:
+ *
+ * 1) Multiply the current accumulated/converted value by the
+ * base in order to make room for the new character.
+ *
+ * 2) Convert the new character to binary and add it to the
+ * current accumulated value.
+ *
+ * Note: The only possible exception indicates an integer
+ * overflow (AE_NUMERIC_OVERFLOW)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
+{
+ acpi_status status;
+ u64 product;
+
+ /* Make room in the accumulated value for the incoming digit */
+
+ status = acpi_ut_strtoul_multiply64(*accumulated_value, base, &product);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Add in the new digit, and store the sum to the accumulated value */
+
+ status =
+ acpi_ut_strtoul_add64(product,
+ acpi_ut_ascii_char_to_hex(ascii_digit),
+ accumulated_value);
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul_multiply64
+ *
+ * PARAMETERS: multiplicand - Current accumulated converted integer
+ * multiplier - Base/Radix
+ * out_product - Where the product is returned
+ *
+ * RETURN: Status and 64-bit product
+ *
+ * DESCRIPTION: Multiply two 64-bit values, with checking for 64-bit overflow as
+ * well as 32-bit overflow if necessary (if the current global
+ * integer width is 32).
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+{
+ u64 val;
+
+ /* Exit if either operand is zero */
+
+ *out_product = 0;
+ if (!multiplicand || !multiplier) {
+ return (AE_OK);
+ }
+
+ /* Check for 64-bit overflow before the actual multiplication */
+
+ acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
+ if (multiplicand > val) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ val = multiplicand * multiplier;
+
+ /* Check for 32-bit overflow if necessary */
+
+ if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ *out_product = val;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul_add64
+ *
+ * PARAMETERS: addend1 - Current accumulated converted integer
+ * addend2 - New hex value/char
+ * out_sum - Where sum is returned (Accumulator)
+ *
+ * RETURN: Status and 64-bit sum
+ *
+ * DESCRIPTION: Add two 64-bit values, with checking for 64-bit overflow as
+ * well as 32-bit overflow if necessary (if the current global
+ * integer width is 32).
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+{
+ u64 sum;
+
+ /* Check for 64-bit overflow before the actual addition */
+
+ if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ sum = addend1 + addend2;
+
+ /* Check for 32-bit overflow if necessary */
+
+ if ((acpi_gbl_integer_bit_width == 32) && (sum > ACPI_UINT32_MAX)) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ *out_sum = sum;
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index 9633ee142855..e2067dcb9389 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -1,6 +1,7 @@
/*******************************************************************************
*
- * Module Name: utstrtoul64 - string to 64-bit integer support
+ * Module Name: utstrtoul64 - String-to-integer conversion support for both
+ * 64-bit and 32-bit integers
*
******************************************************************************/
@@ -44,304 +45,319 @@
#include <acpi/acpi.h>
#include "accommon.h"
-/*******************************************************************************
- *
- * The functions in this module satisfy the need for 64-bit string-to-integer
- * conversions on both 32-bit and 64-bit platforms.
- *
- ******************************************************************************/
-
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utstrtoul64")
-/* Local prototypes */
-static u64 acpi_ut_strtoul_base10(char *string, u32 flags);
-
-static u64 acpi_ut_strtoul_base16(char *string, u32 flags);
-
/*******************************************************************************
*
- * String conversion rules as written in the ACPI specification. The error
- * conditions and behavior are different depending on the type of conversion.
- *
- *
- * Implicit data type conversion: string-to-integer
- * --------------------------------------------------
- *
- * Base is always 16. This is the ACPI_STRTOUL_BASE16 case.
- *
- * Example:
- * Add ("BA98", Arg0, Local0)
- *
- * The integer is initialized to the value zero.
- * The ASCII string is interpreted as a hexadecimal constant.
+ * This module contains the top-level string to 64/32-bit unsigned integer
+ * conversion functions:
*
- * 1) A "0x" prefix is not allowed. However, ACPICA allows this for
- * compatibility with previous ACPICA. (NO ERROR)
+ * 1) A standard strtoul() function that supports 64-bit integers, base
+ * 8/10/16, with integer overflow support. This is used mainly by the
+ * iASL compiler, which implements tighter constraints on integer
+ * constants than the runtime (interpreter) integer-to-string conversions.
+ * 2) Runtime "Explicit conversion" as defined in the ACPI specification.
+ * 3) Runtime "Implicit conversion" as defined in the ACPI specification.
*
- * 2) Terminates when the size of an integer is reached (32 or 64 bits).
- * (NO ERROR)
+ * Current users of this module:
*
- * 3) The first non-hex character terminates the conversion without error.
- * (NO ERROR)
- *
- * 4) Conversion of a null (zero-length) string to an integer is not
- * allowed. However, ACPICA allows this for compatibility with previous
- * ACPICA. This conversion returns the value 0. (NO ERROR)
- *
- *
- * Explicit data type conversion: to_integer() with string operand
- * ---------------------------------------------------------------
- *
- * Base is either 10 (default) or 16 (with 0x prefix)
- *
- * Examples:
- * to_integer ("1000")
- * to_integer ("0xABCD")
- *
- * 1) Can be (must be) either a decimal or hexadecimal numeric string.
- * A hex value must be prefixed by "0x" or it is interpreted as a decimal.
+ * iASL - Preprocessor (constants and math expressions)
+ * iASL - Main parser, conversion of constants to integers
+ * iASL - Data Table Compiler parser (constants and math expressions)
+ * interpreter - Implicit and explicit conversions, GPE method names
+ * interpreter - Repair code for return values from predefined names
+ * debugger - Command line input string conversion
+ * acpi_dump - ACPI table physical addresses
+ * acpi_exec - Support for namespace overrides
*
- * 2) The value must not exceed the maximum of an integer value. ACPI spec
- * states the behavior is "unpredictable", so ACPICA matches the behavior
- * of the implicit conversion case.(NO ERROR)
+ * Notes concerning users of these interfaces:
*
- * 3) Behavior on the first non-hex character is not specified by the ACPI
- * spec, so ACPICA matches the behavior of the implicit conversion case
- * and terminates. (NO ERROR)
+ * acpi_gbl_integer_byte_width is used to set the 32/64 bit limit for explicit
+ * and implicit conversions. This global must be set to the proper width.
+ * For the core ACPICA code, the width depends on the DSDT version. For the
+ * acpi_ut_strtoul64 interface, all conversions are 64 bits. This interface is
+ * used primarily for iASL, where the default width is 64 bits for all parsers,
+ * but error checking is performed later to flag cases where a 64-bit constant
+ * is wrongly defined in a 32-bit DSDT/SSDT.
*
- * 4) A null (zero-length) string is illegal.
- * However, ACPICA allows this for compatibility with previous ACPICA.
- * This conversion returns the value 0. (NO ERROR)
+ * In ACPI, the only place where octal numbers are supported is within
+ * the ASL language itself. This is implemented via the main acpi_ut_strtoul64
+ * interface. According the ACPI specification, there is no ACPI runtime
+ * support (explicit/implicit) for octal string conversions.
*
******************************************************************************/
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_strtoul64
*
- * PARAMETERS: string - Null terminated input string
- * flags - Conversion info, see below
+ * PARAMETERS: string - Null terminated input string,
+ * must be a valid pointer
* return_value - Where the converted integer is
- * returned
- *
- * RETURN: Status and Converted value
+ * returned. Must be a valid pointer
*
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- * 32-bit or 64-bit conversion, depending on the input integer
- * size in Flags (often the current mode of the interpreter).
+ * RETURN: Status and converted integer. Returns an exception on a
+ * 64-bit numeric overflow
*
- * Values for Flags:
- * ACPI_STRTOUL_32BIT - Max integer value is 32 bits
- * ACPI_STRTOUL_64BIT - Max integer value is 64 bits
- * ACPI_STRTOUL_BASE16 - Input string is hexadecimal. Default
- * is 10/16 based on string prefix (0x).
+ * DESCRIPTION: Convert a string into an unsigned integer. Always performs a
+ * full 64-bit conversion, regardless of the current global
+ * integer width. Supports Decimal, Hex, and Octal strings.
*
- * NOTES:
- * Negative numbers are not supported, as they are not supported by ACPI.
+ * Current users of this function:
*
- * Supports only base 16 or base 10 strings/values. Does not
- * support Octal strings, as these are not supported by ACPI.
- *
- * Current users of this support:
- *
- * interpreter - Implicit and explicit conversions, GPE method names
- * debugger - Command line input string conversion
- * iASL - Main parser, conversion of constants to integers
- * iASL - Data Table Compiler parser (constant math expressions)
- * iASL - Preprocessor (constant math expressions)
- * acpi_dump - Input table addresses
- * acpi_exec - Testing of the acpi_ut_strtoul64 function
- *
- * Note concerning callers:
- * acpi_gbl_integer_byte_width can be used to set the 32/64 limit. If used,
- * this global should be set to the proper width. For the core ACPICA code,
- * this width depends on the DSDT version. For iASL, the default byte
- * width is always 8 for the parser, but error checking is performed later
- * to flag cases where a 64-bit constant is defined in a 32-bit DSDT/SSDT.
+ * iASL - Preprocessor (constants and math expressions)
+ * iASL - Main ASL parser, conversion of ASL constants to integers
+ * iASL - Data Table Compiler parser (constants and math expressions)
+ * interpreter - Repair code for return values from predefined names
+ * acpi_dump - ACPI table physical addresses
+ * acpi_exec - Support for namespace overrides
*
******************************************************************************/
-
-acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *return_value)
+acpi_status acpi_ut_strtoul64(char *string, u64 *return_value)
{
acpi_status status = AE_OK;
- u32 base;
+ u8 original_bit_width;
+ u32 base = 10; /* Default is decimal */
ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string);
- /* Parameter validation */
-
- if (!string || !return_value) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
*return_value = 0;
- /* Check for zero-length string, returns 0 */
+ /* A NULL return string returns a value of zero */
if (*string == 0) {
return_ACPI_STATUS(AE_OK);
}
- /* Skip over any white space at start of string */
-
- while (isspace((int)*string)) {
- string++;
- }
-
- /* End of string? return 0 */
-
- if (*string == 0) {
+ if (!acpi_ut_remove_whitespace(&string)) {
return_ACPI_STATUS(AE_OK);
}
/*
- * 1) The "0x" prefix indicates base 16. Per the ACPI specification,
- * the "0x" prefix is only allowed for implicit (non-strict) conversions.
- * However, we always allow it for compatibility with older ACPICA.
+ * 1) Check for a hex constant. A "0x" prefix indicates base 16.
*/
- if ((*string == ACPI_ASCII_ZERO) &&
- (tolower((int)*(string + 1)) == 'x')) {
- string += 2; /* Go past the 0x */
- if (*string == 0) {
- return_ACPI_STATUS(AE_OK); /* Return value 0 */
- }
-
+ if (acpi_ut_detect_hex_prefix(&string)) {
base = 16;
}
- /* 2) Force to base 16 (implicit conversion case) */
-
- else if (flags & ACPI_STRTOUL_BASE16) {
- base = 16;
+ /*
+ * 2) Check for an octal constant, defined to be a leading zero
+ * followed by sequence of octal digits (0-7)
+ */
+ else if (acpi_ut_detect_octal_prefix(&string)) {
+ base = 8;
}
- /* 3) Default fallback is to Base 10 */
-
- else {
- base = 10;
+ if (!acpi_ut_remove_leading_zeros(&string)) {
+ return_ACPI_STATUS(AE_OK); /* Return value 0 */
}
- /* Skip all leading zeros */
+ /*
+ * Force a full 64-bit conversion. The caller (usually iASL) must
+ * check for a 32-bit overflow later as necessary (If current mode
+ * is 32-bit, meaning a 32-bit DSDT).
+ */
+ original_bit_width = acpi_gbl_integer_bit_width;
+ acpi_gbl_integer_bit_width = 64;
- while (*string == ACPI_ASCII_ZERO) {
- string++;
- if (*string == 0) {
- return_ACPI_STATUS(AE_OK); /* Return value 0 */
- }
+ /*
+ * Perform the base 8, 10, or 16 conversion. A 64-bit numeric overflow
+ * will return an exception (to allow iASL to flag the statement).
+ */
+ switch (base) {
+ case 8:
+ status = acpi_ut_convert_octal_string(string, return_value);
+ break;
+
+ case 10:
+ status = acpi_ut_convert_decimal_string(string, return_value);
+ break;
+
+ case 16:
+ default:
+ status = acpi_ut_convert_hex_string(string, return_value);
+ break;
}
- /* Perform the base 16 or 10 conversion */
-
- if (base == 16) {
- *return_value = acpi_ut_strtoul_base16(string, flags);
- } else {
- *return_value = acpi_ut_strtoul_base10(string, flags);
- }
+ /* Only possible exception from above is a 64-bit overflow */
+ acpi_gbl_integer_bit_width = original_bit_width;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
- * FUNCTION: acpi_ut_strtoul_base10
+ * FUNCTION: acpi_ut_implicit_strtoul64
+ *
+ * PARAMETERS: string - Null terminated input string,
+ * must be a valid pointer
+ *
+ * RETURN: Converted integer
+ *
+ * DESCRIPTION: Perform a 64-bit conversion with restrictions placed upon
+ * an "implicit conversion" by the ACPI specification. Used by
+ * many ASL operators that require an integer operand, and support
+ * an automatic (implicit) conversion from a string operand
+ * to the final integer operand. The major restriction is that
+ * only hex strings are supported.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Base is always 16, either with or without the 0x prefix. Decimal and
+ * Octal strings are not supported, as per the ACPI specification.
+ *
+ * Examples (both are hex values):
+ * Add ("BA98", Arg0, Local0)
+ * Subtract ("0x12345678", Arg1, Local1)
+ *
+ * Conversion rules as extracted from the ACPI specification:
+ *
+ * The converted integer is initialized to the value zero.
+ * The ASCII string is always interpreted as a hexadecimal constant.
+ *
+ * 1) According to the ACPI specification, a "0x" prefix is not allowed.
+ * However, ACPICA allows this as an ACPI extension on general
+ * principle. (NO ERROR)
+ *
+ * 2) The conversion terminates when the size of an integer is reached
+ * (32 or 64 bits). There are no numeric overflow conditions. (NO ERROR)
+ *
+ * 3) The first non-hex character terminates the conversion and returns
+ * the current accumulated value of the converted integer (NO ERROR).
*
- * PARAMETERS: string - Null terminated input string
- * flags - Conversion info
+ * 4) Conversion of a null (zero-length) string to an integer is
+ * technically not allowed. However, ACPICA allows this as an ACPI
+ * extension. The conversion returns the value 0. (NO ERROR)
*
- * RETURN: 64-bit converted integer
+ * NOTE: There are no error conditions returned by this function. At
+ * the minimum, a value of zero is returned.
*
- * DESCRIPTION: Performs a base 10 conversion of the input string to an
- * integer value, either 32 or 64 bits.
- * Note: String must be valid and non-null.
+ * Current users of this function:
+ *
+ * interpreter - All runtime implicit conversions, as per ACPI specification
+ * iASL - Data Table Compiler parser (constants and math expressions)
*
******************************************************************************/
-static u64 acpi_ut_strtoul_base10(char *string, u32 flags)
+u64 acpi_ut_implicit_strtoul64(char *string)
{
- int ascii_digit;
- u64 next_value;
- u64 return_value = 0;
-
- /* Main loop: convert each ASCII byte in the input string */
-
- while (*string) {
- ascii_digit = *string;
- if (!isdigit(ascii_digit)) {
-
- /* Not ASCII 0-9, terminate */
-
- goto exit;
- }
-
- /* Convert and insert (add) the decimal digit */
+ u64 converted_integer = 0;
- acpi_ut_short_multiply(return_value, 10, &next_value);
- next_value += (ascii_digit - ACPI_ASCII_ZERO);
+ ACPI_FUNCTION_TRACE_STR(ut_implicit_strtoul64, string);
- /* Check for overflow (32 or 64 bit) - return current converted value */
+ if (!acpi_ut_remove_whitespace(&string)) {
+ return_VALUE(0);
+ }
- if (((flags & ACPI_STRTOUL_32BIT) && (next_value > ACPI_UINT32_MAX)) || (next_value < return_value)) { /* 64-bit overflow case */
- goto exit;
- }
+ /*
+ * Per the ACPI specification, only hexadecimal is supported for
+ * implicit conversions, and the "0x" prefix is "not allowed".
+ * However, allow a "0x" prefix as an ACPI extension.
+ */
+ acpi_ut_detect_hex_prefix(&string);
- return_value = next_value;
- string++;
+ if (!acpi_ut_remove_leading_zeros(&string)) {
+ return_VALUE(0);
}
-exit:
- return (return_value);
+ /*
+ * Ignore overflow as per the ACPI specification. This is implemented by
+ * ignoring the return status from the conversion function called below.
+ * On overflow, the input string is simply truncated.
+ */
+ acpi_ut_convert_hex_string(string, &converted_integer);
+ return_VALUE(converted_integer);
}
/*******************************************************************************
*
- * FUNCTION: acpi_ut_strtoul_base16
+ * FUNCTION: acpi_ut_explicit_strtoul64
+ *
+ * PARAMETERS: string - Null terminated input string,
+ * must be a valid pointer
*
- * PARAMETERS: string - Null terminated input string
- * flags - conversion info
+ * RETURN: Converted integer
*
- * RETURN: 64-bit converted integer
+ * DESCRIPTION: Perform a 64-bit conversion with the restrictions placed upon
+ * an "explicit conversion" by the ACPI specification. The
+ * main restriction is that only hex and decimal are supported.
*
- * DESCRIPTION: Performs a base 16 conversion of the input string to an
- * integer value, either 32 or 64 bits.
- * Note: String must be valid and non-null.
+ * -----------------------------------------------------------------------------
+ *
+ * Base is either 10 (default) or 16 (with 0x prefix). Octal (base 8) strings
+ * are not supported, as per the ACPI specification.
+ *
+ * Examples:
+ * to_integer ("1000") Decimal
+ * to_integer ("0xABCD") Hex
+ *
+ * Conversion rules as extracted from the ACPI specification:
+ *
+ * 1) The input string is either a decimal or hexadecimal numeric string.
+ * A hex value must be prefixed by "0x" or it is interpreted as decimal.
+ *
+ * 2) The value must not exceed the maximum of an integer value
+ * (32 or 64 bits). The ACPI specification states the behavior is
+ * "unpredictable", so ACPICA matches the behavior of the implicit
+ * conversion case. There are no numeric overflow conditions. (NO ERROR)
+ *
+ * 3) Behavior on the first non-hex character is not defined by the ACPI
+ * specification (for the to_integer operator), so ACPICA matches the
+ * behavior of the implicit conversion case. It terminates the
+ * conversion and returns the current accumulated value of the converted
+ * integer. (NO ERROR)
+ *
+ * 4) Conversion of a null (zero-length) string to an integer is
+ * technically not allowed. However, ACPICA allows this as an ACPI
+ * extension. The conversion returns the value 0. (NO ERROR)
+ *
+ * NOTE: There are no error conditions returned by this function. At the
+ * minimum, a value of zero is returned.
+ *
+ * Current users of this function:
+ *
+ * interpreter - Runtime ASL to_integer operator, as per the ACPI specification
*
******************************************************************************/
-static u64 acpi_ut_strtoul_base16(char *string, u32 flags)
+u64 acpi_ut_explicit_strtoul64(char *string)
{
- int ascii_digit;
- u32 valid_digits = 1;
- u64 return_value = 0;
-
- /* Main loop: convert each ASCII byte in the input string */
+ u64 converted_integer = 0;
+ u32 base = 10; /* Default is decimal */
- while (*string) {
+ ACPI_FUNCTION_TRACE_STR(ut_explicit_strtoul64, string);
- /* Check for overflow (32 or 64 bit) - return current converted value */
-
- if ((valid_digits > 16) ||
- ((valid_digits > 8) && (flags & ACPI_STRTOUL_32BIT))) {
- goto exit;
- }
-
- ascii_digit = *string;
- if (!isxdigit(ascii_digit)) {
-
- /* Not Hex ASCII A-F, a-f, or 0-9, terminate */
-
- goto exit;
- }
+ if (!acpi_ut_remove_whitespace(&string)) {
+ return_VALUE(0);
+ }
- /* Convert and insert the hex digit */
+ /*
+ * Only Hex and Decimal are supported, as per the ACPI specification.
+ * A "0x" prefix indicates hex; otherwise decimal is assumed.
+ */
+ if (acpi_ut_detect_hex_prefix(&string)) {
+ base = 16;
+ }
- acpi_ut_short_shift_left(return_value, 4, &return_value);
- return_value |= acpi_ut_ascii_char_to_hex(ascii_digit);
+ if (!acpi_ut_remove_leading_zeros(&string)) {
+ return_VALUE(0);
+ }
- string++;
- valid_digits++;
+ /*
+ * Ignore overflow as per the ACPI specification. This is implemented by
+ * ignoring the return status from the conversion functions called below.
+ * On overflow, the input string is simply truncated.
+ */
+ switch (base) {
+ case 10:
+ default:
+ acpi_ut_convert_decimal_string(string, &converted_integer);
+ break;
+
+ case 16:
+ acpi_ut_convert_hex_string(string, &converted_integer);
+ break;
}
-exit:
- return (return_value);
+ return_VALUE(converted_integer);
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2c462beee551..6742f6c68034 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1061,7 +1061,7 @@ static int erst_writer(struct pstore_record *record)
rcd->hdr.error_severity = CPER_SEV_FATAL;
/* timestamp valid. platform_id, partition_id are invalid */
rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP;
- rcd->hdr.timestamp = get_seconds();
+ rcd->hdr.timestamp = ktime_get_real_seconds();
rcd->hdr.record_length = sizeof(*rcd) + record->size;
rcd->hdr.creator_id = CPER_CREATOR_PSTORE;
rcd->hdr.notification_type = CPER_NOTIFY_MCE;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index ebaa51ba8a22..6402f7fad3bb 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -51,6 +51,7 @@
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
#include <acpi/apei.h>
+#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <ras/ras_event.h>
@@ -112,22 +113,10 @@ static DEFINE_MUTEX(ghes_list_mutex);
* Because the memory area used to transfer hardware error information
* from BIOS to Linux can be determined only in NMI, IRQ or timer
* handler, but general ioremap can not be used in atomic context, so
- * a special version of atomic ioremap is implemented for that.
- */
-
-/*
- * Two virtual pages are used, one for IRQ/PROCESS context, the other for
- * NMI context (optionally).
- */
-#define GHES_IOREMAP_PAGES 2
-#define GHES_IOREMAP_IRQ_PAGE(base) (base)
-#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE)
-
-/* virtual memory area for atomic ioremap */
-static struct vm_struct *ghes_ioremap_area;
-/*
- * These 2 spinlock is used to prevent atomic ioremap virtual memory
- * area from being mapped simultaneously.
+ * the fixmap is used instead.
+ *
+ * These 2 spinlocks are used to prevent the fixmap entries from being used
+ * simultaneously.
*/
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
@@ -140,71 +129,38 @@ static atomic_t ghes_estatus_cache_alloced;
static int ghes_panic_timeout __read_mostly = 30;
-static int ghes_ioremap_init(void)
-{
- ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
- VM_IOREMAP, VMALLOC_START, VMALLOC_END);
- if (!ghes_ioremap_area) {
- pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void ghes_ioremap_exit(void)
-{
- free_vm_area(ghes_ioremap_area);
-}
-
static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
{
- unsigned long vaddr;
phys_addr_t paddr;
pgprot_t prot;
- vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
-
paddr = pfn << PAGE_SHIFT;
prot = arch_apei_get_mem_attribute(paddr);
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
+ __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot);
- return (void __iomem *)vaddr;
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI);
}
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
{
- unsigned long vaddr, paddr;
+ phys_addr_t paddr;
pgprot_t prot;
- vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
-
paddr = pfn << PAGE_SHIFT;
prot = arch_apei_get_mem_attribute(paddr);
+ __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot);
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
-
- return (void __iomem *)vaddr;
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ);
}
-static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
+static void ghes_iounmap_nmi(void)
{
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
- void *base = ghes_ioremap_area->addr;
-
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- arch_apei_flush_tlb_one(vaddr);
+ clear_fixmap(FIX_APEI_GHES_NMI);
}
-static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
+static void ghes_iounmap_irq(void)
{
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
- void *base = ghes_ioremap_area->addr;
-
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- arch_apei_flush_tlb_one(vaddr);
+ clear_fixmap(FIX_APEI_GHES_IRQ);
}
static int ghes_estatus_pool_init(void)
@@ -360,10 +316,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
paddr += trunk;
buffer += trunk;
if (in_nmi) {
- ghes_iounmap_nmi(vaddr);
+ ghes_iounmap_nmi();
raw_spin_unlock(&ghes_ioremap_lock_nmi);
} else {
- ghes_iounmap_irq(vaddr);
+ ghes_iounmap_irq();
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
}
}
@@ -851,17 +807,8 @@ static void ghes_sea_remove(struct ghes *ghes)
synchronize_rcu();
}
#else /* CONFIG_ACPI_APEI_SEA */
-static inline void ghes_sea_add(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to add SEA notification which is not supported\n",
- ghes->generic->header.source_id);
-}
-
-static inline void ghes_sea_remove(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to remove SEA notification which is not supported\n",
- ghes->generic->header.source_id);
-}
+static inline void ghes_sea_add(struct ghes *ghes) { }
+static inline void ghes_sea_remove(struct ghes *ghes) { }
#endif /* CONFIG_ACPI_APEI_SEA */
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
@@ -1063,23 +1010,9 @@ static void ghes_nmi_init_cxt(void)
init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
}
#else /* CONFIG_HAVE_ACPI_APEI_NMI */
-static inline void ghes_nmi_add(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n",
- ghes->generic->header.source_id);
- BUG();
-}
-
-static inline void ghes_nmi_remove(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n",
- ghes->generic->header.source_id);
- BUG();
-}
-
-static inline void ghes_nmi_init_cxt(void)
-{
-}
+static inline void ghes_nmi_add(struct ghes *ghes) { }
+static inline void ghes_nmi_remove(struct ghes *ghes) { }
+static inline void ghes_nmi_init_cxt(void) { }
#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
static int ghes_probe(struct platform_device *ghes_dev)
@@ -1284,13 +1217,9 @@ static int __init ghes_init(void)
ghes_nmi_init_cxt();
- rc = ghes_ioremap_init();
- if (rc)
- goto err;
-
rc = ghes_estatus_pool_init();
if (rc)
- goto err_ioremap_exit;
+ goto err;
rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
GHES_ESTATUS_CACHE_ALLOCED_MAX);
@@ -1314,8 +1243,6 @@ static int __init ghes_init(void)
return 0;
err_pool_exit:
ghes_estatus_pool_exit();
-err_ioremap_exit:
- ghes_ioremap_exit();
err:
return rc;
}
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 597a737d538f..92f9edf9d11e 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -199,7 +199,7 @@ static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block,
struct acpi_gtdt_timer_entry *gtdt_frame;
if (!block->timer_count) {
- pr_err(FW_BUG "GT block present, but frame count is zero.");
+ pr_err(FW_BUG "GT block present, but frame count is zero.\n");
return -ENODEV;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index de56394dd161..95255ecfae7c 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -88,8 +88,8 @@ static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
-static inline
-struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
+static inline struct fwnode_handle *iort_get_fwnode(
+ struct acpi_iort_node *node)
{
struct iort_fwnode *curr;
struct fwnode_handle *fwnode = NULL;
@@ -126,6 +126,31 @@ static inline void iort_delete_fwnode(struct acpi_iort_node *node)
spin_unlock(&iort_fwnode_lock);
}
+/**
+ * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
+ *
+ * @fwnode: fwnode associated with device to be looked-up
+ *
+ * Returns: iort_node pointer on success, NULL on failure
+ */
+static inline struct acpi_iort_node *iort_get_iort_node(
+ struct fwnode_handle *fwnode)
+{
+ struct iort_fwnode *curr;
+ struct acpi_iort_node *iort_node = NULL;
+
+ spin_lock(&iort_fwnode_lock);
+ list_for_each_entry(curr, &iort_fwnode_list, list) {
+ if (curr->fwnode == fwnode) {
+ iort_node = curr->iort_node;
+ break;
+ }
+ }
+ spin_unlock(&iort_fwnode_lock);
+
+ return iort_node;
+}
+
typedef acpi_status (*iort_find_node_callback)
(struct acpi_iort_node *node, void *context);
@@ -306,9 +331,8 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return 0;
}
-static
-struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
- u32 *id_out, int index)
+static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
+ u32 *id_out, int index)
{
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
@@ -332,7 +356,8 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
- node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+ node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
+ node->type == ACPI_IORT_NODE_SMMU_V3) {
*id_out = map->output_base;
return parent;
}
@@ -341,6 +366,47 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
return NULL;
}
+#if (ACPI_CA_VERSION > 0x20170929)
+static int iort_get_id_mapping_index(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+
+ switch (node->type) {
+ case ACPI_IORT_NODE_SMMU_V3:
+ /*
+ * SMMUv3 dev ID mapping index was introduced in revision 1
+ * table, not available in revision 0
+ */
+ if (node->revision < 1)
+ return -EINVAL;
+
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+ /*
+ * ID mapping index is only ignored if all interrupts are
+ * GSIV based
+ */
+ if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
+ && smmu->sync_gsiv)
+ return -EINVAL;
+
+ if (smmu->id_mapping_index >= node->mapping_count) {
+ pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
+ node, node->type);
+ return -EINVAL;
+ }
+
+ return smmu->id_mapping_index;
+ default:
+ return -EINVAL;
+ }
+}
+#else
+static inline int iort_get_id_mapping_index(struct acpi_iort_node *node)
+{
+ return -EINVAL;
+}
+#endif
+
static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)
@@ -350,7 +416,7 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
/* Parse the ID mapping tree to find specified node type */
while (node) {
struct acpi_iort_id_mapping *map;
- int i;
+ int i, index;
if (IORT_TYPE_MASK(node->type) & type_mask) {
if (id_out)
@@ -371,8 +437,19 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
goto fail_map;
}
+ /*
+ * Get the special ID mapping index (if any) and skip its
+ * associated ID map to prevent erroneous multi-stage
+ * IORT ID translations.
+ */
+ index = iort_get_id_mapping_index(node);
+
/* Do the ID translation */
for (i = 0; i < node->mapping_count; i++, map++) {
+ /* if it is special mapping index, skip it */
+ if (i == index)
+ continue;
+
if (!iort_id_map(map, node->type, id, &id))
break;
}
@@ -392,10 +469,9 @@ fail_map:
return NULL;
}
-static
-struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node,
- u32 *id_out, u8 type_mask,
- int index)
+static struct acpi_iort_node *iort_node_map_platform_id(
+ struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
+ int index)
{
struct acpi_iort_node *parent;
u32 id;
@@ -424,9 +500,25 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
{
struct pci_bus *pbus;
- if (!dev_is_pci(dev))
+ if (!dev_is_pci(dev)) {
+ struct acpi_iort_node *node;
+ /*
+ * scan iort_fwnode_list to see if it's an iort platform
+ * device (such as SMMU, PMCG),its iort node already cached
+ * and associated with fwnode when iort platform devices
+ * were initialized.
+ */
+ node = iort_get_iort_node(dev->fwnode);
+ if (node)
+ return node;
+
+ /*
+ * if not, then it should be a platform device defined in
+ * DSDT/SSDT (with Named Component node in IORT)
+ */
return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
+ }
/* Find a PCI root bus */
pbus = to_pci_dev(dev)->bus;
@@ -466,16 +558,24 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
*/
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
{
- int i;
+ int i, index;
struct acpi_iort_node *node;
node = iort_find_dev_node(dev);
if (!node)
return -ENODEV;
- for (i = 0; i < node->mapping_count; i++) {
- if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i))
+ index = iort_get_id_mapping_index(node);
+ /* if there is a valid index, go get the dev_id directly */
+ if (index >= 0) {
+ if (iort_node_get_id(node, dev_id, index))
return 0;
+ } else {
+ for (i = 0; i < node->mapping_count; i++) {
+ if (iort_node_map_platform_id(node, dev_id,
+ IORT_MSI_TYPE, i))
+ return 0;
+ }
}
return -ENODEV;
@@ -538,6 +638,49 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
}
+static void iort_set_device_domain(struct device *dev,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_its_group *its;
+ struct acpi_iort_node *msi_parent;
+ struct acpi_iort_id_mapping *map;
+ struct fwnode_handle *iort_fwnode;
+ struct irq_domain *domain;
+ int index;
+
+ index = iort_get_id_mapping_index(node);
+ if (index < 0)
+ return;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->output_reference ||
+ !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
+ pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
+ node, node->type);
+ return;
+ }
+
+ msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+
+ if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
+ return;
+
+ /* Move to ITS specific data */
+ its = (struct acpi_iort_its_group *)msi_parent->node_data;
+
+ iort_fwnode = iort_find_domain_token(its->identifiers[0]);
+ if (!iort_fwnode)
+ return;
+
+ domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
+ if (domain)
+ dev_set_msi_domain(dev, domain);
+}
+
/**
* iort_get_platform_device_domain() - Find MSI domain related to a
* platform device
@@ -623,14 +766,14 @@ static inline bool iort_iommu_driver_enabled(u8 type)
}
#ifdef CONFIG_IOMMU_API
-static inline
-const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
+static inline const struct iommu_ops *iort_fwspec_iommu_ops(
+ struct iommu_fwspec *fwspec)
{
return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
}
-static inline
-int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
+static inline int iort_add_device_replay(const struct iommu_ops *ops,
+ struct device *dev)
{
int err = 0;
@@ -640,11 +783,11 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
return err;
}
#else
-static inline
-const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
+static inline const struct iommu_ops *iort_fwspec_iommu_ops(
+ struct iommu_fwspec *fwspec)
{ return NULL; }
-static inline
-int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
+static inline int iort_add_device_replay(const struct iommu_ops *ops,
+ struct device *dev)
{ return 0; }
#endif
@@ -968,7 +1111,7 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
}
-#if defined(CONFIG_ACPI_NUMA) && defined(ACPI_IORT_SMMU_V3_PXM_VALID)
+#if defined(CONFIG_ACPI_NUMA)
/*
* set numa proximity domain for smmuv3 device
*/
@@ -1051,34 +1194,34 @@ static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
}
-struct iort_iommu_config {
+struct iort_dev_config {
const char *name;
- int (*iommu_init)(struct acpi_iort_node *node);
- bool (*iommu_is_coherent)(struct acpi_iort_node *node);
- int (*iommu_count_resources)(struct acpi_iort_node *node);
- void (*iommu_init_resources)(struct resource *res,
+ int (*dev_init)(struct acpi_iort_node *node);
+ bool (*dev_is_coherent)(struct acpi_iort_node *node);
+ int (*dev_count_resources)(struct acpi_iort_node *node);
+ void (*dev_init_resources)(struct resource *res,
struct acpi_iort_node *node);
- void (*iommu_set_proximity)(struct device *dev,
+ void (*dev_set_proximity)(struct device *dev,
struct acpi_iort_node *node);
};
-static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
+static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
- .iommu_is_coherent = arm_smmu_v3_is_coherent,
- .iommu_count_resources = arm_smmu_v3_count_resources,
- .iommu_init_resources = arm_smmu_v3_init_resources,
- .iommu_set_proximity = arm_smmu_v3_set_proximity,
+ .dev_is_coherent = arm_smmu_v3_is_coherent,
+ .dev_count_resources = arm_smmu_v3_count_resources,
+ .dev_init_resources = arm_smmu_v3_init_resources,
+ .dev_set_proximity = arm_smmu_v3_set_proximity,
};
-static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
+static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
- .iommu_is_coherent = arm_smmu_is_coherent,
- .iommu_count_resources = arm_smmu_count_resources,
- .iommu_init_resources = arm_smmu_init_resources
+ .dev_is_coherent = arm_smmu_is_coherent,
+ .dev_count_resources = arm_smmu_count_resources,
+ .dev_init_resources = arm_smmu_init_resources
};
-static __init
-const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
+static __init const struct iort_dev_config *iort_get_dev_cfg(
+ struct acpi_iort_node *node)
{
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
@@ -1091,31 +1234,28 @@ const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
}
/**
- * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
- * @node: Pointer to SMMU ACPI IORT node
+ * iort_add_platform_device() - Allocate a platform device for IORT node
+ * @node: Pointer to device ACPI IORT node
*
* Returns: 0 on success, <0 failure
*/
-static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
+static int __init iort_add_platform_device(struct acpi_iort_node *node,
+ const struct iort_dev_config *ops)
{
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
enum dev_dma_attr attr;
int ret, count;
- const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
-
- if (!ops)
- return -ENODEV;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
- if (ops->iommu_set_proximity)
- ops->iommu_set_proximity(&pdev->dev, node);
+ if (ops->dev_set_proximity)
+ ops->dev_set_proximity(&pdev->dev, node);
- count = ops->iommu_count_resources(node);
+ count = ops->dev_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL);
if (!r) {
@@ -1123,7 +1263,7 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
goto dev_put;
}
- ops->iommu_init_resources(r, node);
+ ops->dev_init_resources(r, node);
ret = platform_device_add_resources(pdev, r, count);
/*
@@ -1158,12 +1298,14 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
pdev->dev.fwnode = fwnode;
- attr = ops->iommu_is_coherent(node) ?
- DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+ attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
/* Configure DMA for the page table walker */
acpi_dma_configure(&pdev->dev, attr);
+ iort_set_device_domain(&pdev->dev, node);
+
ret = platform_device_add(pdev);
if (ret)
goto dma_deconfigure;
@@ -1216,6 +1358,7 @@ static void __init iort_init_platform_devices(void)
struct fwnode_handle *fwnode;
int i, ret;
bool acs_enabled = false;
+ const struct iort_dev_config *ops;
/*
* iort_table and iort both point to the start of IORT table, but
@@ -1238,16 +1381,15 @@ static void __init iort_init_platform_devices(void)
if (!acs_enabled)
acs_enabled = iort_enable_acs(iort_node);
- if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
- (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
-
+ ops = iort_get_dev_cfg(iort_node);
+ if (ops) {
fwnode = acpi_alloc_fwnode_static();
if (!fwnode)
return;
iort_set_fwnode(iort_node, fwnode);
- ret = iort_add_smmu_platform_device(iort_node);
+ ret = iort_add_platform_device(iort_node, ops);
if (ret) {
iort_delete_fwnode(iort_node);
acpi_free_fwnode_static(fwnode);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index ef1856b15488..bf8e4d371fa7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -390,6 +390,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
{
struct acpi_button *button = acpi_driver_data(device);
struct input_dev *input;
+ int users;
switch (event) {
case ACPI_FIXED_HARDWARE_EVENT:
@@ -398,7 +399,11 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_update_state(device);
+ mutex_lock(&button->input->mutex);
+ users = button->input->users;
+ mutex_unlock(&button->input->mutex);
+ if (users)
+ acpi_lid_update_state(device);
} else {
int keycode;
@@ -442,12 +447,24 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
- if (button->type == ACPI_BUTTON_TYPE_LID)
+ if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users)
acpi_lid_initialize_state(device);
return 0;
}
#endif
+static int acpi_lid_input_open(struct input_dev *input)
+{
+ struct acpi_device *device = input_get_drvdata(input);
+ struct acpi_button *button = acpi_driver_data(device);
+
+ button->last_state = !!acpi_lid_evaluate_state(device);
+ button->last_time = ktime_get();
+ acpi_lid_initialize_state(device);
+
+ return 0;
+}
+
static int acpi_button_add(struct acpi_device *device)
{
struct acpi_button *button;
@@ -488,8 +505,7 @@ static int acpi_button_add(struct acpi_device *device)
strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
- button->last_state = !!acpi_lid_evaluate_state(device);
- button->last_time = ktime_get();
+ input->open = acpi_lid_input_open;
} else {
printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
error = -ENODEV;
@@ -522,11 +538,11 @@ static int acpi_button_add(struct acpi_device *device)
break;
}
+ input_set_drvdata(input, device);
error = input_register_device(input);
if (error)
goto err_remove_fs;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_initialize_state(device);
/*
* This assumes there's only one lid device, or if there are
* more we only care about the last one...
@@ -557,7 +573,8 @@ static int acpi_button_remove(struct acpi_device *device)
return 0;
}
-static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
+static int param_set_lid_init_state(const char *val,
+ const struct kernel_param *kp)
{
int result = 0;
@@ -575,7 +592,8 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
return result;
}
-static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
+static int param_get_lid_init_state(char *buffer,
+ const struct kernel_param *kp)
{
switch (lid_init_state) {
case ACPI_BUTTON_LID_INIT_OPEN:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index e5b47f032d9a..21c28433c590 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -48,7 +48,6 @@
struct cppc_pcc_data {
struct mbox_chan *pcc_channel;
void __iomem *pcc_comm_addr;
- int pcc_subspace_idx;
bool pcc_channel_acquired;
ktime_t deadline;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -75,13 +74,16 @@ struct cppc_pcc_data {
/* Wait queue for CPUs whose requests were batched */
wait_queue_head_t pcc_write_wait_q;
+ ktime_t last_cmd_cmpl_time;
+ ktime_t last_mpar_reset;
+ int mpar_count;
+ int refcount;
};
-/* Structure to represent the single PCC channel */
-static struct cppc_pcc_data pcc_data = {
- .pcc_subspace_idx = -1,
- .platform_owns_pcc = true,
-};
+/* Array to represent the PCC channel per subspace id */
+static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
+/* The cpu_pcc_subspace_idx containsper CPU subspace id */
+static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
/*
* The cpc_desc structure contains the ACPI register details
@@ -93,7 +95,8 @@ static struct cppc_pcc_data pcc_data = {
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
-#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
+#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
+ 0x8 + (offs))
/* Check if a CPC register is in PCC */
#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
@@ -188,13 +191,16 @@ static struct kobj_type cppc_ktype = {
.default_attrs = cppc_attrs,
};
-static int check_pcc_chan(bool chk_err_bit)
+static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
{
int ret = -EIO, status = 0;
- struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
- ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ pcc_ss_data->pcc_comm_addr;
+ ktime_t next_deadline = ktime_add(ktime_get(),
+ pcc_ss_data->deadline);
- if (!pcc_data.platform_owns_pcc)
+ if (!pcc_ss_data->platform_owns_pcc)
return 0;
/* Retry in case the remote processor was too slow to catch up. */
@@ -219,7 +225,7 @@ static int check_pcc_chan(bool chk_err_bit)
}
if (likely(!ret))
- pcc_data.platform_owns_pcc = false;
+ pcc_ss_data->platform_owns_pcc = false;
else
pr_err("PCC check channel failed. Status=%x\n", status);
@@ -230,13 +236,12 @@ static int check_pcc_chan(bool chk_err_bit)
* This function transfers the ownership of the PCC to the platform
* So it must be called while holding write_lock(pcc_lock)
*/
-static int send_pcc_cmd(u16 cmd)
+static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
int ret = -EIO, i;
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
- static ktime_t last_cmd_cmpl_time, last_mpar_reset;
- static int mpar_count;
+ (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
unsigned int time_delta;
/*
@@ -249,24 +254,25 @@ static int send_pcc_cmd(u16 cmd)
* before write completion, so first send a WRITE command to
* platform
*/
- if (pcc_data.pending_pcc_write_cmd)
- send_pcc_cmd(CMD_WRITE);
+ if (pcc_ss_data->pending_pcc_write_cmd)
+ send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- ret = check_pcc_chan(false);
+ ret = check_pcc_chan(pcc_ss_id, false);
if (ret)
goto end;
} else /* CMD_WRITE */
- pcc_data.pending_pcc_write_cmd = FALSE;
+ pcc_ss_data->pending_pcc_write_cmd = FALSE;
/*
* Handle the Minimum Request Turnaround Time(MRTT)
* "The minimum amount of time that OSPM must wait after the completion
* of a command before issuing the next command, in microseconds"
*/
- if (pcc_data.pcc_mrtt) {
- time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
- if (pcc_data.pcc_mrtt > time_delta)
- udelay(pcc_data.pcc_mrtt - time_delta);
+ if (pcc_ss_data->pcc_mrtt) {
+ time_delta = ktime_us_delta(ktime_get(),
+ pcc_ss_data->last_cmd_cmpl_time);
+ if (pcc_ss_data->pcc_mrtt > time_delta)
+ udelay(pcc_ss_data->pcc_mrtt - time_delta);
}
/*
@@ -280,18 +286,19 @@ static int send_pcc_cmd(u16 cmd)
* not send the request to the platform after hitting the MPAR limit in
* any 60s window
*/
- if (pcc_data.pcc_mpar) {
- if (mpar_count == 0) {
- time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
- if (time_delta < 60 * MSEC_PER_SEC) {
+ if (pcc_ss_data->pcc_mpar) {
+ if (pcc_ss_data->mpar_count == 0) {
+ time_delta = ktime_ms_delta(ktime_get(),
+ pcc_ss_data->last_mpar_reset);
+ if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
pr_debug("PCC cmd not sent due to MPAR limit");
ret = -EIO;
goto end;
}
- last_mpar_reset = ktime_get();
- mpar_count = pcc_data.pcc_mpar;
+ pcc_ss_data->last_mpar_reset = ktime_get();
+ pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
}
- mpar_count--;
+ pcc_ss_data->mpar_count--;
}
/* Write to the shared comm region. */
@@ -300,10 +307,10 @@ static int send_pcc_cmd(u16 cmd)
/* Flip CMD COMPLETE bit */
writew_relaxed(0, &generic_comm_base->status);
- pcc_data.platform_owns_pcc = true;
+ pcc_ss_data->platform_owns_pcc = true;
/* Ring doorbell */
- ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
+ ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
if (ret < 0) {
pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
cmd, ret);
@@ -311,15 +318,15 @@ static int send_pcc_cmd(u16 cmd)
}
/* wait for completion and check for PCC errro bit */
- ret = check_pcc_chan(true);
+ ret = check_pcc_chan(pcc_ss_id, true);
- if (pcc_data.pcc_mrtt)
- last_cmd_cmpl_time = ktime_get();
+ if (pcc_ss_data->pcc_mrtt)
+ pcc_ss_data->last_cmd_cmpl_time = ktime_get();
- if (pcc_data.pcc_channel->mbox->txdone_irq)
- mbox_chan_txdone(pcc_data.pcc_channel, ret);
+ if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
+ mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
else
- mbox_client_txdone(pcc_data.pcc_channel, ret);
+ mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
end:
if (cmd == CMD_WRITE) {
@@ -329,12 +336,12 @@ end:
if (!desc)
continue;
- if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
+ if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
desc->write_cmd_status = ret;
}
}
- pcc_data.pcc_write_cnt++;
- wake_up_all(&pcc_data.pcc_write_wait_q);
+ pcc_ss_data->pcc_write_cnt++;
+ wake_up_all(&pcc_ss_data->pcc_write_wait_q);
}
return ret;
@@ -536,16 +543,16 @@ err_ret:
}
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
-static int register_pcc_channel(int pcc_subspace_idx)
+static int register_pcc_channel(int pcc_ss_idx)
{
struct acpi_pcct_hw_reduced *cppc_ss;
u64 usecs_lat;
- if (pcc_subspace_idx >= 0) {
- pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
- pcc_subspace_idx);
+ if (pcc_ss_idx >= 0) {
+ pcc_data[pcc_ss_idx]->pcc_channel =
+ pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
- if (IS_ERR(pcc_data.pcc_channel)) {
+ if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
pr_err("Failed to find PCC communication channel\n");
return -ENODEV;
}
@@ -556,7 +563,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
* PCC channels) and stored pointers to the
* subspace communication region in con_priv.
*/
- cppc_ss = (pcc_data.pcc_channel)->con_priv;
+ cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
if (!cppc_ss) {
pr_err("No PCC subspace found for CPPC\n");
@@ -569,19 +576,20 @@ static int register_pcc_channel(int pcc_subspace_idx)
* So add an arbitrary amount of wait on top of Nominal.
*/
usecs_lat = NUM_RETRIES * cppc_ss->latency;
- pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
- pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
- pcc_data.pcc_mpar = cppc_ss->max_access_rate;
- pcc_data.pcc_nominal = cppc_ss->latency;
-
- pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
- if (!pcc_data.pcc_comm_addr) {
+ pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
+ pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
+ pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
+ pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
+
+ pcc_data[pcc_ss_idx]->pcc_comm_addr =
+ acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
+ if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem\n");
return -ENOMEM;
}
/* Set flag so that we dont come here for each CPU. */
- pcc_data.pcc_channel_acquired = true;
+ pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
return 0;
@@ -600,6 +608,34 @@ bool __weak cpc_ffh_supported(void)
return false;
}
+
+/**
+ * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
+ *
+ * Check and allocate the cppc_pcc_data memory.
+ * In some processor configurations it is possible that same subspace
+ * is shared between multiple CPU's. This is seen especially in CPU's
+ * with hardware multi-threading support.
+ *
+ * Return: 0 for success, errno for failure
+ */
+int pcc_data_alloc(int pcc_ss_id)
+{
+ if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
+ return -EINVAL;
+
+ if (pcc_data[pcc_ss_id]) {
+ pcc_data[pcc_ss_id]->refcount++;
+ } else {
+ pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
+ GFP_KERNEL);
+ if (!pcc_data[pcc_ss_id])
+ return -ENOMEM;
+ pcc_data[pcc_ss_id]->refcount++;
+ }
+
+ return 0;
+}
/*
* An example CPC table looks like the following.
*
@@ -661,6 +697,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
struct device *cpu_dev;
acpi_handle handle = pr->handle;
unsigned int num_ent, i, cpc_rev;
+ int pcc_subspace_id = -1;
acpi_status status;
int ret = -EFAULT;
@@ -733,9 +770,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
* so extract it only once.
*/
if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
- if (pcc_data.pcc_subspace_idx < 0)
- pcc_data.pcc_subspace_idx = gas_t->access_width;
- else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
+ if (pcc_subspace_id < 0) {
+ pcc_subspace_id = gas_t->access_width;
+ if (pcc_data_alloc(pcc_subspace_id))
+ goto out_free;
+ } else if (pcc_subspace_id != gas_t->access_width) {
pr_debug("Mismatched PCC ids.\n");
goto out_free;
}
@@ -763,6 +802,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
}
+ per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
@@ -771,14 +811,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (ret)
goto out_free;
- /* Register PCC channel once for all CPUs. */
- if (!pcc_data.pcc_channel_acquired) {
- ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
+ /* Register PCC channel once for all PCC subspace id. */
+ if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
+ ret = register_pcc_channel(pcc_subspace_id);
if (ret)
goto out_free;
- init_rwsem(&pcc_data.pcc_lock);
- init_waitqueue_head(&pcc_data.pcc_write_wait_q);
+ init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
+ init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
}
/* Everything looks okay */
@@ -831,6 +871,18 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
struct cpc_desc *cpc_ptr;
unsigned int i;
void __iomem *addr;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
+
+ if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
+ if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
+ pcc_data[pcc_ss_id]->refcount--;
+ if (!pcc_data[pcc_ss_id]->refcount) {
+ pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
+ pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
+ kfree(pcc_data[pcc_ss_id]);
+ }
+ }
+ }
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
if (!cpc_ptr)
@@ -888,6 +940,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
int ret_val = 0;
void __iomem *vaddr = 0;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg_res->type == ACPI_TYPE_INTEGER) {
@@ -897,7 +950,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
- vaddr = GET_PCC_VADDR(reg->address);
+ vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
@@ -932,10 +985,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
void __iomem *vaddr = 0;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
- vaddr = GET_PCC_VADDR(reg->address);
+ vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
@@ -980,6 +1034,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
struct cpc_register_resource *highest_reg, *lowest_reg,
*lowest_non_linear_reg, *nominal_reg;
u64 high, low, nom, min_nonlinear;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) {
@@ -996,9 +1052,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
regs_in_pcc = 1;
- down_write(&pcc_data.pcc_lock);
+ down_write(&pcc_ss_data->pcc_lock);
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ) < 0) {
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -1021,7 +1077,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
out_err:
if (regs_in_pcc)
- up_write(&pcc_data.pcc_lock);
+ up_write(&pcc_ss_data->pcc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
@@ -1038,6 +1094,8 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *delivered_reg, *reference_reg,
*ref_perf_reg, *ctr_wrap_reg;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
u64 delivered, reference, ref_perf, ctr_wrap_time;
int ret = 0, regs_in_pcc = 0;
@@ -1061,10 +1119,10 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
- down_write(&pcc_data.pcc_lock);
+ down_write(&pcc_ss_data->pcc_lock);
regs_in_pcc = 1;
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ) < 0) {
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -1094,7 +1152,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
perf_fb_ctrs->wraparound_time = ctr_wrap_time;
out_err:
if (regs_in_pcc)
- up_write(&pcc_data.pcc_lock);
+ up_write(&pcc_ss_data->pcc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
@@ -1110,6 +1168,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *desired_reg;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
int ret = 0;
if (!cpc_desc) {
@@ -1127,11 +1187,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* achieve that goal here
*/
if (CPC_IN_PCC(desired_reg)) {
- down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */
- if (pcc_data.platform_owns_pcc) {
- ret = check_pcc_chan(false);
+ down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
+ if (pcc_ss_data->platform_owns_pcc) {
+ ret = check_pcc_chan(pcc_ss_id, false);
if (ret) {
- up_read(&pcc_data.pcc_lock);
+ up_read(&pcc_ss_data->pcc_lock);
return ret;
}
}
@@ -1139,8 +1199,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* Update the pending_write to make sure a PCC CMD_READ will not
* arrive and steal the channel during the switch to write lock
*/
- pcc_data.pending_pcc_write_cmd = true;
- cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
+ pcc_ss_data->pending_pcc_write_cmd = true;
+ cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
cpc_desc->write_cmd_status = 0;
}
@@ -1151,7 +1211,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
if (CPC_IN_PCC(desired_reg))
- up_read(&pcc_data.pcc_lock); /* END Phase-I */
+ up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
/*
* This is Phase-II where we transfer the ownership of PCC to Platform
*
@@ -1199,15 +1259,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* the write command before servicing the read command
*/
if (CPC_IN_PCC(desired_reg)) {
- if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */
+ if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
/* Update only if there are pending write commands */
- if (pcc_data.pending_pcc_write_cmd)
- send_pcc_cmd(CMD_WRITE);
- up_write(&pcc_data.pcc_lock); /* END Phase-II */
+ if (pcc_ss_data->pending_pcc_write_cmd)
+ send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
} else
/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
- wait_event(pcc_data.pcc_write_wait_q,
- cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
+ wait_event(pcc_ss_data->pcc_write_wait_q,
+ cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
/* send_pcc_cmd updates the status in case of failure */
ret = cpc_desc->write_cmd_status;
@@ -1240,6 +1300,8 @@ unsigned int cppc_get_transition_latency(int cpu_num)
unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
@@ -1249,11 +1311,11 @@ unsigned int cppc_get_transition_latency(int cpu_num)
if (!CPC_IN_PCC(desired_reg))
return CPUFREQ_ETERNAL;
- if (pcc_data.pcc_mpar)
- latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
+ if (pcc_ss_data->pcc_mpar)
+ latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
- latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
- latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
+ latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
+ latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index fbcc73f7a099..e4ffaeec9ec2 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
+static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
void acpi_pm_wakeup_event(struct device *dev)
{
@@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
if (!dev && !func)
return AE_BAD_PARAMETER;
- mutex_lock(&acpi_pm_notifier_lock);
+ mutex_lock(&acpi_pm_notifier_install_lock);
if (adev->wakeup.flags.notifier_present)
goto out;
- adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
- adev->wakeup.context.dev = dev;
- adev->wakeup.context.func = func;
-
status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status))
goto out;
+ mutex_lock(&acpi_pm_notifier_lock);
+ adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
+ adev->wakeup.context.dev = dev;
+ adev->wakeup.context.func = func;
adev->wakeup.flags.notifier_present = true;
+ mutex_unlock(&acpi_pm_notifier_lock);
out:
- mutex_unlock(&acpi_pm_notifier_lock);
+ mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
@@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
acpi_status status = AE_BAD_PARAMETER;
- mutex_lock(&acpi_pm_notifier_lock);
+ mutex_lock(&acpi_pm_notifier_install_lock);
if (!adev->wakeup.flags.notifier_present)
goto out;
@@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
if (ACPI_FAILURE(status))
goto out;
+ mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.context.func = NULL;
adev->wakeup.context.dev = NULL;
wakeup_source_unregister(adev->wakeup.ws);
-
adev->wakeup.flags.notifier_present = false;
+ mutex_unlock(&acpi_pm_notifier_lock);
out:
- mutex_unlock(&acpi_pm_notifier_lock);
+ mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
@@ -581,8 +584,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state;
- } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) !=
- PM_QOS_FLAGS_NONE) {
+ } else {
wakeup = adev->wakeup.flags.valid;
}
@@ -848,48 +850,48 @@ static int acpi_dev_pm_full_power(struct acpi_device *adev)
}
/**
- * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
+ * acpi_dev_suspend - Put device into a low-power state using ACPI.
* @dev: Device to put into a low-power state.
+ * @wakeup: Whether or not to enable wakeup for the device.
*
- * Put the given device into a runtime low-power state using the standard ACPI
+ * Put the given device into a low-power state using the standard ACPI
* mechanism. Set up remote wakeup if desired, choose the state to put the
* device into (this checks if remote wakeup is expected to work too), and set
* the power state of the device.
*/
-int acpi_dev_runtime_suspend(struct device *dev)
+int acpi_dev_suspend(struct device *dev, bool wakeup)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- bool remote_wakeup;
+ u32 target_state = acpi_target_system_state();
int error;
if (!adev)
return 0;
- remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
- PM_QOS_FLAGS_NONE;
- if (remote_wakeup) {
- error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
+ if (wakeup && acpi_device_can_wakeup(adev)) {
+ error = acpi_device_wakeup_enable(adev, target_state);
if (error)
return -EAGAIN;
+ } else {
+ wakeup = false;
}
- error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
- if (error && remote_wakeup)
+ error = acpi_dev_pm_low_power(dev, adev, target_state);
+ if (error && wakeup)
acpi_device_wakeup_disable(adev);
return error;
}
-EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
+EXPORT_SYMBOL_GPL(acpi_dev_suspend);
/**
- * acpi_dev_runtime_resume - Put device into the full-power state using ACPI.
+ * acpi_dev_resume - Put device into the full-power state using ACPI.
* @dev: Device to put into the full-power state.
*
* Put the given device into the full-power state using the standard ACPI
- * mechanism at run time. Set the power state of the device to ACPI D0 and
- * disable remote wakeup.
+ * mechanism. Set the power state of the device to ACPI D0 and disable wakeup.
*/
-int acpi_dev_runtime_resume(struct device *dev)
+int acpi_dev_resume(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
@@ -901,7 +903,7 @@ int acpi_dev_runtime_resume(struct device *dev)
acpi_device_wakeup_disable(adev);
return error;
}
-EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
+EXPORT_SYMBOL_GPL(acpi_dev_resume);
/**
* acpi_subsys_runtime_suspend - Suspend device using ACPI.
@@ -913,7 +915,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
int acpi_subsys_runtime_suspend(struct device *dev)
{
int ret = pm_generic_runtime_suspend(dev);
- return ret ? ret : acpi_dev_runtime_suspend(dev);
+ return ret ? ret : acpi_dev_suspend(dev, true);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
@@ -926,68 +928,33 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
*/
int acpi_subsys_runtime_resume(struct device *dev)
{
- int ret = acpi_dev_runtime_resume(dev);
+ int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
#ifdef CONFIG_PM_SLEEP
-/**
- * acpi_dev_suspend_late - Put device into a low-power state using ACPI.
- * @dev: Device to put into a low-power state.
- *
- * Put the given device into a low-power state during system transition to a
- * sleep state using the standard ACPI mechanism. Set up system wakeup if
- * desired, choose the state to put the device into (this checks if system
- * wakeup is expected to work too), and set the power state of the device.
- */
-int acpi_dev_suspend_late(struct device *dev)
+static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- u32 target_state;
- bool wakeup;
- int error;
-
- if (!adev)
- return 0;
-
- target_state = acpi_target_system_state();
- wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
- if (wakeup) {
- error = acpi_device_wakeup_enable(adev, target_state);
- if (error)
- return error;
- }
+ u32 sys_target = acpi_target_system_state();
+ int ret, state;
- error = acpi_dev_pm_low_power(dev, adev, target_state);
- if (error && wakeup)
- acpi_device_wakeup_disable(adev);
+ if (!pm_runtime_suspended(dev) || !adev ||
+ device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
+ return true;
- return error;
-}
-EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
+ if (sys_target == ACPI_STATE_S0)
+ return false;
-/**
- * acpi_dev_resume_early - Put device into the full-power state using ACPI.
- * @dev: Device to put into the full-power state.
- *
- * Put the given device into the full-power state using the standard ACPI
- * mechanism during system transition to the working state. Set the power
- * state of the device to ACPI D0 and disable remote wakeup.
- */
-int acpi_dev_resume_early(struct device *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- int error;
+ if (adev->power.flags.dsw_present)
+ return true;
- if (!adev)
- return 0;
+ ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
+ if (ret)
+ return true;
- error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup_disable(adev);
- return error;
+ return state != adev->power.state;
}
-EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
/**
* acpi_subsys_prepare - Prepare device for system transition to a sleep state.
@@ -996,39 +963,53 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- u32 sys_target;
- int ret, state;
- ret = pm_generic_prepare(dev);
- if (ret < 0)
- return ret;
-
- if (!adev || !pm_runtime_suspended(dev)
- || device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
- return 0;
+ if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
+ int ret = dev->driver->pm->prepare(dev);
- sys_target = acpi_target_system_state();
- if (sys_target == ACPI_STATE_S0)
- return 1;
+ if (ret < 0)
+ return ret;
- if (adev->power.flags.dsw_present)
- return 0;
+ if (!ret && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
+ return 0;
+ }
- ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
- return !ret && state == adev->power.state;
+ return !acpi_dev_needs_resume(dev, adev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
/**
+ * acpi_subsys_complete - Finalize device's resume during system resume.
+ * @dev: Device to handle.
+ */
+void acpi_subsys_complete(struct device *dev)
+{
+ pm_generic_complete(dev);
+ /*
+ * If the device had been runtime-suspended before the system went into
+ * the sleep state it is going out of and it has never been resumed till
+ * now, resume it in case the firmware powered it up.
+ */
+ if (dev->power.direct_complete && pm_resume_via_firmware())
+ pm_request_resume(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_complete);
+
+/**
* acpi_subsys_suspend - Run the device driver's suspend callback.
* @dev: Device to handle.
*
- * Follow PCI and resume devices suspended at run time before running their
- * system suspend callbacks.
+ * Follow PCI and resume devices from runtime suspend before running their
+ * system suspend callbacks, unless the driver can cope with runtime-suspended
+ * devices during system suspend and there are no ACPI-specific reasons for
+ * resuming them.
*/
int acpi_subsys_suspend(struct device *dev)
{
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
+ pm_runtime_resume(dev);
+
return pm_generic_suspend(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
@@ -1042,12 +1023,48 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
*/
int acpi_subsys_suspend_late(struct device *dev)
{
- int ret = pm_generic_suspend_late(dev);
- return ret ? ret : acpi_dev_suspend_late(dev);
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ ret = pm_generic_suspend_late(dev);
+ return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev));
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
/**
+ * acpi_subsys_suspend_noirq - Run the device driver's "noirq" suspend callback.
+ * @dev: Device to suspend.
+ */
+int acpi_subsys_suspend_noirq(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_suspend_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
+
+/**
+ * acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_resume_noirq(struct device *dev)
+{
+ /*
+ * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
+ * during system suspend, so update their runtime PM status to "active"
+ * as they will be put into D0 going forward.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
+ return pm_generic_resume_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
+
+/**
* acpi_subsys_resume_early - Resume device using ACPI.
* @dev: Device to Resume.
*
@@ -1057,7 +1074,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_resume_early(struct device *dev)
{
- int ret = acpi_dev_resume_early(dev);
+ int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
@@ -1074,11 +1091,60 @@ int acpi_subsys_freeze(struct device *dev)
* runtime-suspended devices should not be touched during freeze/thaw
* transitions.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
+
return pm_generic_freeze(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
+/**
+ * acpi_subsys_freeze_late - Run the device driver's "late" freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze_late(struct device *dev)
+{
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_freeze_late(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_freeze_late);
+
+/**
+ * acpi_subsys_freeze_noirq - Run the device driver's "noirq" freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze_noirq(struct device *dev)
+{
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_freeze_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_freeze_noirq);
+
+/**
+ * acpi_subsys_thaw_noirq - Run the device driver's "noirq" thaw callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_thaw_noirq(struct device *dev)
+{
+ /*
+ * If the device is in runtime suspend, the "thaw" code may not work
+ * correctly with it, so skip the driver callback and make the PM core
+ * skip all of the subsequent "thaw" callbacks for the device.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.direct_complete = true;
+ return 0;
+ }
+
+ return pm_generic_thaw_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_thaw_noirq);
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_domain acpi_general_pm_domain = {
@@ -1087,13 +1153,20 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.runtime_resume = acpi_subsys_runtime_resume,
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
- .complete = pm_complete_with_resume_check,
+ .complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_subsys_suspend_late,
+ .suspend_noirq = acpi_subsys_suspend_noirq,
+ .resume_noirq = acpi_subsys_resume_noirq,
.resume_early = acpi_subsys_resume_early,
.freeze = acpi_subsys_freeze,
+ .freeze_late = acpi_subsys_freeze_late,
+ .freeze_noirq = acpi_subsys_freeze_noirq,
+ .thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_subsys_suspend_late,
+ .poweroff_noirq = acpi_subsys_suspend_noirq,
+ .restore_noirq = acpi_subsys_resume_noirq,
.restore_early = acpi_subsys_resume_early,
#endif
},
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 2305e1ab978e..e3fc1f045e1c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -482,6 +482,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
surprise_removal = 1;
event = ACPI_NOTIFY_EJECT_REQUEST;
/* Fall back */
+ /* fall through */
case ACPI_NOTIFY_EJECT_REQUEST:
begin_undock(ds);
if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 236b14324780..da176c95aa2c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
{
if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
ec_log_drv("event unblocked");
- if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- advance_transaction(ec);
+ /*
+ * Unconditionally invoke this once after enabling the event
+ * handling mechanism to detect the pending events.
+ */
+ advance_transaction(ec);
}
static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1)
acpi_ec_enable_gpe(ec, true);
-
- /* EC is fully operational, allow queries */
- acpi_ec_enable_event(ec);
}
}
+ /* EC is fully operational, allow queries */
+ acpi_ec_enable_event(ec);
return 0;
}
@@ -1939,7 +1941,8 @@ static const struct dev_pm_ops acpi_ec_pm = {
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
-static int param_set_event_clearing(const char *val, struct kernel_param *kp)
+static int param_set_event_clearing(const char *val,
+ const struct kernel_param *kp)
{
int result = 0;
@@ -1957,7 +1960,8 @@ static int param_set_event_clearing(const char *val, struct kernel_param *kp)
return result;
}
-static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
+static int param_get_event_clearing(char *buffer,
+ const struct kernel_param *kp)
{
switch (ec_event_clearing) {
case ACPI_EC_EVT_TIMING_STATUS:
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4361c4415b4f..fc8c43e76707 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -248,4 +248,10 @@ void acpi_watchdog_init(void);
static inline void acpi_watchdog_init(void) {}
#endif
+#ifdef CONFIG_ACPI_LPIT
+void acpi_init_lpit(void);
+#else
+static inline void acpi_init_lpit(void) { }
+#endif
+
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index db78d353bab1..3bb46cb24a99 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -663,6 +663,29 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
EXPORT_SYMBOL(acpi_os_write_port);
+int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
+{
+
+ switch (width) {
+ case 8:
+ *(u8 *) value = readb(virt_addr);
+ break;
+ case 16:
+ *(u16 *) value = readw(virt_addr);
+ break;
+ case 32:
+ *(u32 *) value = readl(virt_addr);
+ break;
+ case 64:
+ *(u64 *) value = readq(virt_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{
@@ -670,6 +693,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
unsigned int size = width / 8;
bool unmap = false;
u64 dummy;
+ int error;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
@@ -684,22 +708,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
if (!value)
value = &dummy;
- switch (width) {
- case 8:
- *(u8 *) value = readb(virt_addr);
- break;
- case 16:
- *(u16 *) value = readw(virt_addr);
- break;
- case 32:
- *(u32 *) value = readl(virt_addr);
- break;
- case 64:
- *(u64 *) value = readq(virt_addr);
- break;
- default:
- BUG();
- }
+ error = acpi_os_read_iomem(virt_addr, value, width);
+ BUG_ON(error);
if (unmap)
iounmap(virt_addr);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
new file mode 100644
index 000000000000..109c1e9c9c7a
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -0,0 +1,137 @@
+/*
+ * Dollar Cove TI PMIC operation region driver
+ * Copyright (C) 2014 Intel Corporation. All rights reserved.
+ *
+ * Rewritten and cleaned up
+ * Copyright (C) 2017 Takashi Iwai <tiwai@suse.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include "intel_pmic.h"
+
+/* registers stored in 16bit BE (high:low, total 10bit) */
+#define CHTDC_TI_VBAT 0x54
+#define CHTDC_TI_DIETEMP 0x56
+#define CHTDC_TI_BPTHERM 0x58
+#define CHTDC_TI_GPADC 0x5a
+
+static struct pmic_table chtdc_ti_power_table[] = {
+ { .address = 0x00, .reg = 0x41 },
+ { .address = 0x04, .reg = 0x42 },
+ { .address = 0x08, .reg = 0x43 },
+ { .address = 0x0c, .reg = 0x45 },
+ { .address = 0x10, .reg = 0x46 },
+ { .address = 0x14, .reg = 0x47 },
+ { .address = 0x18, .reg = 0x48 },
+ { .address = 0x1c, .reg = 0x49 },
+ { .address = 0x20, .reg = 0x4a },
+ { .address = 0x24, .reg = 0x4b },
+ { .address = 0x28, .reg = 0x4c },
+ { .address = 0x2c, .reg = 0x4d },
+ { .address = 0x30, .reg = 0x4e },
+};
+
+static struct pmic_table chtdc_ti_thermal_table[] = {
+ {
+ .address = 0x00,
+ .reg = CHTDC_TI_GPADC
+ },
+ {
+ .address = 0x0c,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP2 -> SYSTEMP */
+ {
+ .address = 0x18,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP3 -> BPTHERM */
+ {
+ .address = 0x24,
+ .reg = CHTDC_TI_BPTHERM
+ },
+ {
+ .address = 0x30,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP5 -> DIETEMP */
+ {
+ .address = 0x3c,
+ .reg = CHTDC_TI_DIETEMP
+ },
+};
+
+static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit,
+ u64 *value)
+{
+ int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = data & 1;
+ return 0;
+}
+
+static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit,
+ bool on)
+{
+ return regmap_update_bits(regmap, reg, 1, on);
+}
+
+static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg)
+{
+ u8 buf[2];
+
+ if (regmap_bulk_read(regmap, reg, buf, 2))
+ return -EIO;
+
+ /* stored in big-endian */
+ return ((buf[0] & 0x03) << 8) | buf[1];
+}
+
+static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
+ .get_power = chtdc_ti_pmic_get_power,
+ .update_power = chtdc_ti_pmic_update_power,
+ .get_raw_temp = chtdc_ti_pmic_get_raw_temp,
+ .power_table = chtdc_ti_power_table,
+ .power_table_count = ARRAY_SIZE(chtdc_ti_power_table),
+ .thermal_table = chtdc_ti_thermal_table,
+ .thermal_table_count = ARRAY_SIZE(chtdc_ti_thermal_table),
+};
+
+static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ int err;
+
+ err = intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent), pmic->regmap,
+ &chtdc_ti_pmic_opregion_data);
+ if (err < 0)
+ return err;
+
+ /* Re-enumerate devices depending on PMIC */
+ acpi_walk_dep_device_list(ACPI_HANDLE(pdev->dev.parent));
+ return 0;
+}
+
+static const struct platform_device_id chtdc_ti_pmic_opregion_id_table[] = {
+ { .name = "chtdc_ti_region" },
+ {},
+};
+
+static struct platform_driver chtdc_ti_pmic_opregion_driver = {
+ .probe = chtdc_ti_pmic_opregion_probe,
+ .driver = {
+ .name = "cht_dollar_cove_ti_pmic",
+ },
+ .id_table = chtdc_ti_pmic_opregion_id_table,
+};
+module_platform_driver(chtdc_ti_pmic_opregion_driver);
+
+MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c
new file mode 100644
index 000000000000..7f3c567e8168
--- /dev/null
+++ b/drivers/acpi/pmic/tps68470_pmic.c
@@ -0,0 +1,455 @@
+/*
+ * TI TPS68470 PMIC operation region driver
+ *
+ * Copyright (C) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Rajmohan Mani <rajmohan.mani@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based on drivers/acpi/pmic/intel_pmic* drivers
+ */
+
+#include <linux/acpi.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct tps68470_pmic_table {
+ u32 address; /* operation region address */
+ u32 reg; /* corresponding register */
+ u32 bitmask; /* bit mask for power, clock */
+};
+
+#define TI_PMIC_POWER_OPREGION_ID 0xB0
+#define TI_PMIC_VR_VAL_OPREGION_ID 0xB1
+#define TI_PMIC_CLOCK_OPREGION_ID 0xB2
+#define TI_PMIC_CLKFREQ_OPREGION_ID 0xB3
+
+struct tps68470_pmic_opregion {
+ struct mutex lock;
+ struct regmap *regmap;
+};
+
+#define S_IO_I2C_EN (BIT(0) | BIT(1))
+
+static const struct tps68470_pmic_table power_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_S_I2C_CTL,
+ .bitmask = S_IO_I2C_EN,
+ /* S_I2C_CTL */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_VCMCTL,
+ .bitmask = BIT(0),
+ /* VCMCTL */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_VAUX1CTL,
+ .bitmask = BIT(0),
+ /* VAUX1_CTL */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_VAUX2CTL,
+ .bitmask = BIT(0),
+ /* VAUX2CTL */
+ },
+ {
+ .address = 0x10,
+ .reg = TPS68470_REG_VACTL,
+ .bitmask = BIT(0),
+ /* VACTL */
+ },
+ {
+ .address = 0x14,
+ .reg = TPS68470_REG_VDCTL,
+ .bitmask = BIT(0),
+ /* VDCTL */
+ },
+};
+
+/* Table to set voltage regulator value */
+static const struct tps68470_pmic_table vr_val_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_VSIOVAL,
+ .bitmask = TPS68470_VSIOVAL_IOVOLT_MASK,
+ /* TPS68470_REG_VSIOVAL */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_VIOVAL,
+ .bitmask = TPS68470_VIOVAL_IOVOLT_MASK,
+ /* TPS68470_REG_VIOVAL */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_VCMVAL,
+ .bitmask = TPS68470_VCMVAL_VCVOLT_MASK,
+ /* TPS68470_REG_VCMVAL */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_VAUX1VAL,
+ .bitmask = TPS68470_VAUX1VAL_AUX1VOLT_MASK,
+ /* TPS68470_REG_VAUX1VAL */
+ },
+ {
+ .address = 0x10,
+ .reg = TPS68470_REG_VAUX2VAL,
+ .bitmask = TPS68470_VAUX2VAL_AUX2VOLT_MASK,
+ /* TPS68470_REG_VAUX2VAL */
+ },
+ {
+ .address = 0x14,
+ .reg = TPS68470_REG_VAVAL,
+ .bitmask = TPS68470_VAVAL_AVOLT_MASK,
+ /* TPS68470_REG_VAVAL */
+ },
+ {
+ .address = 0x18,
+ .reg = TPS68470_REG_VDVAL,
+ .bitmask = TPS68470_VDVAL_DVOLT_MASK,
+ /* TPS68470_REG_VDVAL */
+ },
+};
+
+/* Table to configure clock frequency */
+static const struct tps68470_pmic_table clk_freq_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_POSTDIV2,
+ .bitmask = BIT(0) | BIT(1),
+ /* TPS68470_REG_POSTDIV2 */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_BOOSTDIV,
+ .bitmask = 0x1F,
+ /* TPS68470_REG_BOOSTDIV */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_BUCKDIV,
+ .bitmask = 0x0F,
+ /* TPS68470_REG_BUCKDIV */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_PLLSWR,
+ .bitmask = 0x13,
+ /* TPS68470_REG_PLLSWR */
+ },
+ {
+ .address = 0x10,
+ .reg = TPS68470_REG_XTALDIV,
+ .bitmask = 0xFF,
+ /* TPS68470_REG_XTALDIV */
+ },
+ {
+ .address = 0x14,
+ .reg = TPS68470_REG_PLLDIV,
+ .bitmask = 0xFF,
+ /* TPS68470_REG_PLLDIV */
+ },
+ {
+ .address = 0x18,
+ .reg = TPS68470_REG_POSTDIV,
+ .bitmask = 0x83,
+ /* TPS68470_REG_POSTDIV */
+ },
+};
+
+/* Table to configure and enable clocks */
+static const struct tps68470_pmic_table clk_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_PLLCTL,
+ .bitmask = 0xF5,
+ /* TPS68470_REG_PLLCTL */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_PLLCTL2,
+ .bitmask = BIT(0),
+ /* TPS68470_REG_PLLCTL2 */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_CLKCFG1,
+ .bitmask = TPS68470_CLKCFG1_MODE_A_MASK |
+ TPS68470_CLKCFG1_MODE_B_MASK,
+ /* TPS68470_REG_CLKCFG1 */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_CLKCFG2,
+ .bitmask = TPS68470_CLKCFG1_MODE_A_MASK |
+ TPS68470_CLKCFG1_MODE_B_MASK,
+ /* TPS68470_REG_CLKCFG2 */
+ },
+};
+
+static int pmic_get_reg_bit(u64 address,
+ const struct tps68470_pmic_table *table,
+ const unsigned int table_size, int *reg,
+ int *bitmask)
+{
+ u64 i;
+
+ i = address / 4;
+ if (i >= table_size)
+ return -ENOENT;
+
+ if (!reg || !bitmask)
+ return -EINVAL;
+
+ *reg = table[i].reg;
+ *bitmask = table[i].bitmask;
+
+ return 0;
+}
+
+static int tps68470_pmic_get_power(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = (data & bitmask) ? 1 : 0;
+ return 0;
+}
+
+static int tps68470_pmic_get_vr_val(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = data & bitmask;
+ return 0;
+}
+
+static int tps68470_pmic_get_clk(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = (data & bitmask) ? 1 : 0;
+ return 0;
+}
+
+static int tps68470_pmic_get_clk_freq(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = data & bitmask;
+ return 0;
+}
+
+static int ti_tps68470_regmap_update_bits(struct regmap *regmap, int reg,
+ int bitmask, u64 value)
+{
+ return regmap_update_bits(regmap, reg, bitmask, value);
+}
+
+static acpi_status tps68470_pmic_common_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *region_context,
+ int (*get)(struct regmap *,
+ int, int, u64 *),
+ int (*update)(struct regmap *,
+ int, int, u64),
+ const struct tps68470_pmic_table *tbl,
+ unsigned int tbl_size)
+{
+ struct tps68470_pmic_opregion *opregion = region_context;
+ struct regmap *regmap = opregion->regmap;
+ int reg, ret, bitmask;
+
+ if (bits != 32)
+ return AE_BAD_PARAMETER;
+
+ ret = pmic_get_reg_bit(address, tbl, tbl_size, &reg, &bitmask);
+ if (ret < 0)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_WRITE && *value > bitmask)
+ return AE_BAD_PARAMETER;
+
+ mutex_lock(&opregion->lock);
+
+ ret = (function == ACPI_READ) ?
+ get(regmap, reg, bitmask, value) :
+ update(regmap, reg, bitmask, *value);
+
+ mutex_unlock(&opregion->lock);
+
+ return ret ? AE_ERROR : AE_OK;
+}
+
+static acpi_status tps68470_pmic_cfreq_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *handler_context,
+ void *region_context)
+{
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_clk_freq,
+ ti_tps68470_regmap_update_bits,
+ clk_freq_table,
+ ARRAY_SIZE(clk_freq_table));
+}
+
+static acpi_status tps68470_pmic_clk_handler(u32 function,
+ acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context,
+ void *region_context)
+{
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_clk,
+ ti_tps68470_regmap_update_bits,
+ clk_table,
+ ARRAY_SIZE(clk_table));
+}
+
+static acpi_status tps68470_pmic_vrval_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *handler_context,
+ void *region_context)
+{
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_vr_val,
+ ti_tps68470_regmap_update_bits,
+ vr_val_table,
+ ARRAY_SIZE(vr_val_table));
+}
+
+static acpi_status tps68470_pmic_pwr_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *handler_context,
+ void *region_context)
+{
+ if (bits != 32)
+ return AE_BAD_PARAMETER;
+
+ /* set/clear for bit 0, bits 0 and 1 together */
+ if (function == ACPI_WRITE &&
+ !(*value == 0 || *value == 1 || *value == 3)) {
+ return AE_BAD_PARAMETER;
+ }
+
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_power,
+ ti_tps68470_regmap_update_bits,
+ power_table,
+ ARRAY_SIZE(power_table));
+}
+
+static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct regmap *tps68470_regmap = dev_get_drvdata(pdev->dev.parent);
+ acpi_handle handle = ACPI_HANDLE(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct tps68470_pmic_opregion *opregion;
+ acpi_status status;
+
+ if (!dev || !tps68470_regmap) {
+ dev_warn(dev, "dev or regmap is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!handle) {
+ dev_warn(dev, "acpi handle is NULL\n");
+ return -ENODEV;
+ }
+
+ opregion = devm_kzalloc(dev, sizeof(*opregion), GFP_KERNEL);
+ if (!opregion)
+ return -ENOMEM;
+
+ mutex_init(&opregion->lock);
+ opregion->regmap = tps68470_regmap;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_POWER_OPREGION_ID,
+ tps68470_pmic_pwr_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_mutex_destroy;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_VR_VAL_OPREGION_ID,
+ tps68470_pmic_vrval_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_remove_power_handler;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_CLOCK_OPREGION_ID,
+ tps68470_pmic_clk_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_remove_vr_val_handler;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_CLKFREQ_OPREGION_ID,
+ tps68470_pmic_cfreq_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_remove_clk_handler;
+
+ return 0;
+
+out_remove_clk_handler:
+ acpi_remove_address_space_handler(handle, TI_PMIC_CLOCK_OPREGION_ID,
+ tps68470_pmic_clk_handler);
+out_remove_vr_val_handler:
+ acpi_remove_address_space_handler(handle, TI_PMIC_VR_VAL_OPREGION_ID,
+ tps68470_pmic_vrval_handler);
+out_remove_power_handler:
+ acpi_remove_address_space_handler(handle, TI_PMIC_POWER_OPREGION_ID,
+ tps68470_pmic_pwr_handler);
+out_mutex_destroy:
+ mutex_destroy(&opregion->lock);
+ return -ENODEV;
+}
+
+static struct platform_driver tps68470_pmic_opregion_driver = {
+ .probe = tps68470_pmic_opregion_probe,
+ .driver = {
+ .name = "tps68470_pmic_opregion",
+ },
+};
+
+builtin_platform_driver(tps68470_pmic_opregion_driver)
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index d85e010ee2cc..316a0fc785e3 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -381,6 +381,7 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
case ACPI_ACTIVE_BOTH:
if (triggering == ACPI_EDGE_SENSITIVE)
return IRQ_TYPE_EDGE_BOTH;
+ /* fall through */
default:
return IRQ_TYPE_NONE;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 602f8ff212f2..e14e964bfe6d 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1505,41 +1505,38 @@ static void acpi_init_coherency(struct acpi_device *adev)
adev->flags.coherent_dma = cca;
}
-static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
+static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
{
- bool *is_spi_i2c_slave_p = data;
+ bool *is_serial_bus_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
- /*
- * devices that are connected to UART still need to be enumerated to
- * platform bus
- */
- if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
- *is_spi_i2c_slave_p = true;
+ *is_serial_bus_slave_p = true;
/* no need to do more checking */
return -1;
}
-static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
+static bool acpi_is_serial_bus_slave(struct acpi_device *device)
{
struct list_head resource_list;
- bool is_spi_i2c_slave = false;
+ bool is_serial_bus_slave = false;
/* Macs use device properties in lieu of _CRS resources */
if (x86_apple_machine &&
(fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
- fwnode_property_present(&device->fwnode, "i2cAddress")))
+ fwnode_property_present(&device->fwnode, "i2cAddress") ||
+ fwnode_property_present(&device->fwnode, "baud")))
return true;
INIT_LIST_HEAD(&resource_list);
- acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
- &is_spi_i2c_slave);
+ acpi_dev_get_resources(device, &resource_list,
+ acpi_check_serial_bus_slave,
+ &is_serial_bus_slave);
acpi_dev_free_resource_list(&resource_list);
- return is_spi_i2c_slave;
+ return is_serial_bus_slave;
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
@@ -1557,7 +1554,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
- device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
+ device->flags.serial_bus_slave = acpi_is_serial_bus_slave(device);
acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
@@ -1841,10 +1838,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
static void acpi_default_enumeration(struct acpi_device *device)
{
/*
- * Do not enumerate SPI/I2C slaves as they will be enumerated by their
- * respective parents.
+ * Do not enumerate SPI/I2C/UART slaves as they will be enumerated by
+ * their respective parents.
*/
- if (!device->flags.spi_i2c_slave) {
+ if (!device->flags.serial_bus_slave) {
acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
@@ -1941,7 +1938,7 @@ static void acpi_bus_attach(struct acpi_device *device)
return;
device->flags.match_driver = true;
- if (ret > 0 && !device->flags.spi_i2c_slave) {
+ if (ret > 0 && !device->flags.serial_bus_slave) {
acpi_device_set_enumerated(device);
goto ok;
}
@@ -1950,7 +1947,7 @@ static void acpi_bus_attach(struct acpi_device *device)
if (ret < 0)
return;
- if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
+ if (!device->pnp.type.platform_id && !device->flags.serial_bus_slave)
acpi_device_set_enumerated(device);
else
acpi_default_enumeration(device);
@@ -2122,6 +2119,7 @@ int __init acpi_scan_init(void)
acpi_int340x_thermal_init();
acpi_amba_init();
acpi_watchdog_init();
+ acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0fd57bf33524..06a150bb35bf 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -169,7 +169,8 @@ module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
static char trace_method_name[1024];
-int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
+static int param_set_trace_method_name(const char *val,
+ const struct kernel_param *kp)
{
u32 saved_flags = 0;
bool is_abs_path = true;
@@ -230,7 +231,8 @@ module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name,
module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
-static int param_set_trace_state(const char *val, struct kernel_param *kp)
+static int param_set_trace_state(const char *val,
+ const struct kernel_param *kp)
{
acpi_status status;
const char *method = trace_method_name;
@@ -266,7 +268,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
return 0;
}
-static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable");
@@ -295,7 +297,8 @@ MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
/* /sys/module/acpi/parameters/acpica_version */
-static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
+static int param_get_acpica_version(char *buffer,
+ const struct kernel_param *kp)
{
int result;
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index b4fbb9929482..ec5b0f190231 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -71,18 +71,34 @@ static const struct always_present_id always_present_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
/*
- * The GPD win BIOS dated 20170320 has disabled the accelerometer, the
+ * The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
* manufacturer has solved this :| Note that the the DMI data is less
* generic then it seems, a board_vendor of "AMI Corporation" is quite
* rare and a board_name of "Default String" also is rare.
+ *
+ * Unfortunately the GPD pocket also uses these strings and its BIOS
+ * was copy-pasted from the GPD win, so it has a disabled KIOX000A
+ * node which we should not enable, thus we also check the BIOS date.
*/
ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BIOS_DATE, "02/21/2017")
+ }),
+ ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
}),
+ ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BIOS_DATE, "05/25/2017")
+ }),
};
bool acpi_device_always_present(struct acpi_device *adev)
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e0f74ddc22b7..594c228d2f02 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -195,6 +195,7 @@ struct bus_type amba_bustype = {
.match = amba_match,
.uevent = amba_uevent,
.pm = &amba_pm,
+ .force_dma = true,
};
static int __init amba_init(void)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fddf76ef5bd6..a73596a4f804 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -150,7 +150,7 @@ static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
static int binder_set_stop_on_user_error(const char *val,
- struct kernel_param *kp)
+ const struct kernel_param *kp)
{
int ret;
@@ -2192,7 +2192,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
off_start,
offp - off_start);
if (!parent) {
- pr_err("transaction release %d bad parent offset",
+ pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index c2819a3d58a6..6f6f745605af 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -186,12 +186,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
struct mm_struct *mm = NULL;
bool need_mm = false;
@@ -215,7 +215,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
+ if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
mm = alloc->vma_vm_mm;
if (mm) {
@@ -437,7 +437,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
@@ -478,7 +478,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- end_page_addr, NULL);
+ end_page_addr);
return ERR_PTR(-ENOMEM);
}
@@ -562,8 +562,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
alloc->pid, buffer->data,
prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
- buffer_start_page(buffer) + PAGE_SIZE,
- NULL);
+ buffer_start_page(buffer) + PAGE_SIZE);
}
list_del(&buffer->entry);
kfree(buffer);
@@ -600,8 +599,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -984,7 +982,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
return ret;
}
-struct shrinker binder_shrinker = {
+static struct shrinker binder_shrinker = {
.count_objects = binder_shrink_count,
.scan_objects = binder_shrink_scan,
.seeks = DEFAULT_SEEKS,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9f78bb03bb76..5443cb71d7ba 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -57,6 +57,7 @@ enum {
AHCI_PCI_BAR_STA2X11 = 0,
AHCI_PCI_BAR_CAVIUM = 0,
AHCI_PCI_BAR_ENMOTUS = 2,
+ AHCI_PCI_BAR_CAVIUM_GEN5 = 4,
AHCI_PCI_BAR_STANDARD = 5,
};
@@ -1570,8 +1571,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
- else if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
- ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
+ else if (pdev->vendor == PCI_VENDOR_ID_CAVIUM) {
+ if (pdev->device == 0xa01c)
+ ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
+ if (pdev->device == 0xa084)
+ ahci_pci_bar = AHCI_PCI_BAR_CAVIUM_GEN5;
+ }
/* acquire resources */
rc = pcim_enable_device(pdev);
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 207649d323c5..5ecc9d46cb54 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -32,15 +32,27 @@
#define AHCI_VEND_PP3C 0xB0
#define AHCI_VEND_PP4C 0xB4
#define AHCI_VEND_PP5C 0xB8
+#define AHCI_VEND_AXICC 0xBC
#define AHCI_VEND_PAXIC 0xC0
#define AHCI_VEND_PTC 0xC8
/* Vendor Specific Register bit definitions */
#define PAXIC_ADBW_BW64 0x1
-#define PAXIC_MAWIDD (1 << 8)
-#define PAXIC_MARIDD (1 << 16)
+#define PAXIC_MAWID(i) (((i) * 2) << 4)
+#define PAXIC_MARID(i) (((i) * 2) << 12)
+#define PAXIC_MARIDD(i) ((((i) * 2) + 1) << 16)
+#define PAXIC_MAWIDD(i) ((((i) * 2) + 1) << 8)
#define PAXIC_OTL (0x4 << 20)
+/* Register bit definitions for cache control */
+#define AXICC_ARCA_VAL (0xF << 0)
+#define AXICC_ARCF_VAL (0xF << 4)
+#define AXICC_ARCH_VAL (0xF << 8)
+#define AXICC_ARCP_VAL (0xF << 12)
+#define AXICC_AWCFD_VAL (0xF << 16)
+#define AXICC_AWCD_VAL (0xF << 20)
+#define AXICC_AWCF_VAL (0xF << 24)
+
#define PCFG_TPSS_VAL (0x32 << 16)
#define PCFG_TPRS_VAL (0x2 << 12)
#define PCFG_PAD_VAL 0x2
@@ -50,21 +62,6 @@
#define PPCFG_PSS_EN (1 << 29)
#define PPCFG_ESDF_EN (1 << 31)
-#define PP2C_CIBGMN 0x0F
-#define PP2C_CIBGMX (0x25 << 8)
-#define PP2C_CIBGN (0x18 << 16)
-#define PP2C_CINMP (0x29 << 24)
-
-#define PP3C_CWBGMN 0x04
-#define PP3C_CWBGMX (0x0B << 8)
-#define PP3C_CWBGN (0x08 << 16)
-#define PP3C_CWNMP (0x0F << 24)
-
-#define PP4C_BMX 0x0a
-#define PP4C_BNM (0x08 << 8)
-#define PP4C_SFD (0x4a << 16)
-#define PP4C_PTST (0x06 << 24)
-
#define PP5C_RIT 0x60216
#define PP5C_RCT (0x7f0 << 20)
@@ -75,6 +72,7 @@
#define PORT1_BASE 0x180
/* Port Control Register Bit Definitions */
+#define PORT_SCTL_SPD_GEN3 (0x3 << 4)
#define PORT_SCTL_SPD_GEN2 (0x2 << 4)
#define PORT_SCTL_SPD_GEN1 (0x1 << 4)
#define PORT_SCTL_IPM (0x3 << 8)
@@ -85,13 +83,43 @@
#define DRV_NAME "ahci-ceva"
#define CEVA_FLAG_BROKEN_GEN2 1
+static unsigned int rx_watermark = PTC_RX_WM_VAL;
+module_param(rx_watermark, uint, 0644);
+MODULE_PARM_DESC(rx_watermark, "RxWaterMark value (0 - 0x80)");
+
struct ceva_ahci_priv {
struct platform_device *ahci_pdev;
+ /* Port Phy2Cfg Register */
+ u32 pp2c[NR_PORTS];
+ u32 pp3c[NR_PORTS];
+ u32 pp4c[NR_PORTS];
+ u32 pp5c[NR_PORTS];
+ /* Axi Cache Control Register */
+ u32 axicc;
+ bool is_cci_enabled;
int flags;
};
+static unsigned int ceva_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ u32 err_mask;
+
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (err_mask)
+ return err_mask;
+ /*
+ * Since CEVA controller does not support device sleep feature, we
+ * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
+ */
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+
+ return 0;
+}
+
static struct ata_port_operations ahci_ceva_ops = {
.inherits = &ahci_platform_ops,
+ .read_id = ceva_ahci_read_id,
};
static const struct ata_port_info ahci_ceva_port_info = {
@@ -108,14 +136,6 @@ static void ahci_ceva_setup(struct ahci_host_priv *hpriv)
u32 tmp;
int i;
- /*
- * AXI Data bus width to 64
- * Set Mem Addr Read, Write ID for data transfers
- * Transfer limit to 72 DWord
- */
- tmp = PAXIC_ADBW_BW64 | PAXIC_MAWIDD | PAXIC_MARIDD | PAXIC_OTL;
- writel(tmp, mmio + AHCI_VEND_PAXIC);
-
/* Set AHCI Enable */
tmp = readl(mmio + HOST_CTL);
tmp |= HOST_AHCI_EN;
@@ -126,32 +146,48 @@ static void ahci_ceva_setup(struct ahci_host_priv *hpriv)
tmp = PCFG_TPSS_VAL | PCFG_TPRS_VAL | (PCFG_PAD_VAL + i);
writel(tmp, mmio + AHCI_VEND_PCFG);
+ /*
+ * AXI Data bus width to 64
+ * Set Mem Addr Read, Write ID for data transfers
+ * Set Mem Addr Read ID, Write ID for non-data transfers
+ * Transfer limit to 72 DWord
+ */
+ tmp = PAXIC_ADBW_BW64 | PAXIC_MAWIDD(i) | PAXIC_MARIDD(i) |
+ PAXIC_MAWID(i) | PAXIC_MARID(i) | PAXIC_OTL;
+ writel(tmp, mmio + AHCI_VEND_PAXIC);
+
+ /* Set AXI cache control register if CCi is enabled */
+ if (cevapriv->is_cci_enabled) {
+ tmp = readl(mmio + AHCI_VEND_AXICC);
+ tmp |= AXICC_ARCA_VAL | AXICC_ARCF_VAL |
+ AXICC_ARCH_VAL | AXICC_ARCP_VAL |
+ AXICC_AWCFD_VAL | AXICC_AWCD_VAL |
+ AXICC_AWCF_VAL;
+ writel(tmp, mmio + AHCI_VEND_AXICC);
+ }
+
/* Port Phy Cfg register enables */
tmp = PPCFG_TTA | PPCFG_PSS_EN | PPCFG_ESDF_EN;
writel(tmp, mmio + AHCI_VEND_PPCFG);
/* Phy Control OOB timing parameters COMINIT */
- tmp = PP2C_CIBGMN | PP2C_CIBGMX | PP2C_CIBGN | PP2C_CINMP;
- writel(tmp, mmio + AHCI_VEND_PP2C);
+ writel(cevapriv->pp2c[i], mmio + AHCI_VEND_PP2C);
/* Phy Control OOB timing parameters COMWAKE */
- tmp = PP3C_CWBGMN | PP3C_CWBGMX | PP3C_CWBGN | PP3C_CWNMP;
- writel(tmp, mmio + AHCI_VEND_PP3C);
+ writel(cevapriv->pp3c[i], mmio + AHCI_VEND_PP3C);
/* Phy Control Burst timing setting */
- tmp = PP4C_BMX | PP4C_BNM | PP4C_SFD | PP4C_PTST;
- writel(tmp, mmio + AHCI_VEND_PP4C);
+ writel(cevapriv->pp4c[i], mmio + AHCI_VEND_PP4C);
/* Rate Change Timer and Retry Interval Timer setting */
- tmp = PP5C_RIT | PP5C_RCT;
- writel(tmp, mmio + AHCI_VEND_PP5C);
+ writel(cevapriv->pp5c[i], mmio + AHCI_VEND_PP5C);
/* Rx Watermark setting */
- tmp = PTC_RX_WM_VAL | PTC_RSVD;
+ tmp = rx_watermark | PTC_RSVD;
writel(tmp, mmio + AHCI_VEND_PTC);
- /* Default to Gen 2 Speed and Gen 1 if Gen2 is broken */
- tmp = PORT_SCTL_SPD_GEN2 | PORT_SCTL_IPM;
+ /* Default to Gen 3 Speed and Gen 1 if Gen2 is broken */
+ tmp = PORT_SCTL_SPD_GEN3 | PORT_SCTL_IPM;
if (cevapriv->flags & CEVA_FLAG_BROKEN_GEN2)
tmp = PORT_SCTL_SPD_GEN1 | PORT_SCTL_IPM;
writel(tmp, mmio + PORT_SCR_CTL + PORT_BASE + PORT_OFFSET * i);
@@ -168,6 +204,7 @@ static int ceva_ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ceva_ahci_priv *cevapriv;
+ enum dev_dma_attr attr;
int rc;
cevapriv = devm_kzalloc(dev, sizeof(*cevapriv), GFP_KERNEL);
@@ -187,6 +224,65 @@ static int ceva_ahci_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
+ /* Read OOB timing value for COMINIT from device-tree */
+ if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
+ (u8 *)&cevapriv->pp2c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
+ (u8 *)&cevapriv->pp2c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /* Read OOB timing value for COMWAKE from device-tree*/
+ if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
+ (u8 *)&cevapriv->pp3c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
+ (u8 *)&cevapriv->pp3c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /* Read phy BURST timing value from device-tree */
+ if (of_property_read_u8_array(np, "ceva,p0-burst-params",
+ (u8 *)&cevapriv->pp4c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-burst-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-burst-params",
+ (u8 *)&cevapriv->pp4c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-burst-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /* Read phy RETRY interval timing value from device-tree */
+ if (of_property_read_u16_array(np, "ceva,p0-retry-params",
+ (u16 *)&cevapriv->pp5c[0], 2) < 0) {
+ dev_warn(dev, "ceva,p0-retry-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u16_array(np, "ceva,p1-retry-params",
+ (u16 *)&cevapriv->pp5c[1], 2) < 0) {
+ dev_warn(dev, "ceva,p1-retry-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check if CCI is enabled for SATA. The DEV_DMA_COHERENT is returned
+ * if CCI is enabled, so check for DEV_DMA_COHERENT.
+ */
+ attr = device_get_dma_attr(dev);
+ cevapriv->is_cci_enabled = (attr == DEV_DMA_COHERENT);
+
hpriv->plat_data = cevapriv;
/* CEVA specific initialization */
@@ -206,12 +302,37 @@ disable_resources:
static int __maybe_unused ceva_ahci_suspend(struct device *dev)
{
- return ahci_platform_suspend_host(dev);
+ return ahci_platform_suspend(dev);
}
static int __maybe_unused ceva_ahci_resume(struct device *dev)
{
- return ahci_platform_resume_host(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ /* Configure CEVA specific config before resuming HBA */
+ ahci_ceva_setup(hpriv);
+
+ rc = ahci_platform_resume_host(dev);
+ if (rc)
+ goto disable_resources;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
}
static SIMPLE_DEV_PM_OPS(ahci_ceva_pm_ops, ceva_ahci_suspend, ceva_ahci_resume);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 787567e840bd..a58bcc069c54 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -230,7 +230,7 @@ static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio)
{
u16 adc_out_reg, read_sum;
u32 index, read_attempt;
- const u32 attempt_limit = 100;
+ const u32 attempt_limit = 200;
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
imx_phy_reg_write(rtune_ctl_reg, mmio);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index a270a1173c8c..341d0ef82cbd 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -295,6 +295,7 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
node->name);
break;
}
+ /* fall through */
case -ENODEV:
/* continue normally */
hpriv->phys[port] = NULL;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b8ac4902d312..2a882929de4a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1879,6 +1879,7 @@ retry:
switch (class) {
case ATA_DEV_SEMB:
class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
+ /* fall through */
case ATA_DEV_ATA:
case ATA_DEV_ZAC:
tf.command = ATA_CMD_ID_ATA;
@@ -2975,6 +2976,7 @@ int ata_bus_probe(struct ata_port *ap)
case -ENODEV:
/* give it just one more chance */
tries[dev->devno] = min(tries[dev->devno], 1);
+ /* fall through */
case -EIO:
if (tries[dev->devno] == 1) {
/* This is the last chance, better to slow
@@ -3462,6 +3464,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
case ATA_DNXFER_FORCE_PIO0:
pio_mask &= 1;
+ /* fall through */
case ATA_DNXFER_FORCE_PIO:
mwdma_mask = 0;
udma_mask = 0;
@@ -3964,6 +3967,7 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
scontrol &= ~(0x1 << 8);
scontrol |= (0x6 << 8);
break;
+ case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0)
/* no restrictions on LPM transitions */
@@ -5823,7 +5827,7 @@ void ata_host_resume(struct ata_host *host)
}
#endif
-struct device_type ata_port_type = {
+const struct device_type ata_port_type = {
.name = "ata_port",
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
@@ -6903,7 +6907,7 @@ static int __init ata_parse_force_one(char **cur,
return -EINVAL;
}
if (nr_matches > 1) {
- *reason = "ambigious value";
+ *reason = "ambiguous value";
return -EINVAL;
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index ece6fd91a947..11c3137d7b0a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
eflags |= ATA_EFLAG_DUBIOUS_XFER;
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
+ trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
}
- trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
DPRINTK("EXIT\n");
}
@@ -3454,9 +3454,9 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
* @r_failed_dev: out parameter for failed device
*
* Enable SATA Interface power management. This will enable
- * Device Interface Power Management (DIPM) for min_power
- * policy, and then call driver specific callbacks for
- * enabling Host Initiated Power management.
+ * Device Interface Power Management (DIPM) for min_power and
+ * medium_power_with_dipm policies, and then call driver specific
+ * callbacks for enabling Host Initiated Power management.
*
* LOCKING:
* EH context.
@@ -3502,7 +3502,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
- if (policy != ATA_LPM_MIN_POWER && dipm) {
+ if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
@@ -3545,7 +3545,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
/* host config updated, enable DIPM if transitioning to MIN_POWER */
ata_for_each_dev(dev, link, ENABLED) {
- if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
ata_id_has_dipm(dev->id)) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
@@ -3711,9 +3711,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
case -ENODEV:
/* device missing or wrong IDENTIFY data, schedule probing */
ehc->i.probe_mask |= (1 << dev->devno);
+ /* fall through */
case -EINVAL:
/* give it just one more chance */
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
+ /* fall through */
case -EIO:
if (ehc->tries[dev->devno] == 1) {
/* This is the last chance, better to slow
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 44ba292f2cd7..66be961c93a4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -106,10 +106,11 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
};
static const char *ata_lpm_policy_names[] = {
- [ATA_LPM_UNKNOWN] = "max_performance",
- [ATA_LPM_MAX_POWER] = "max_performance",
- [ATA_LPM_MED_POWER] = "medium_power",
- [ATA_LPM_MIN_POWER] = "min_power",
+ [ATA_LPM_UNKNOWN] = "max_performance",
+ [ATA_LPM_MAX_POWER] = "max_performance",
+ [ATA_LPM_MED_POWER] = "medium_power",
+ [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
+ [ATA_LPM_MIN_POWER] = "min_power",
};
static ssize_t ata_scsi_lpm_store(struct device *device,
@@ -2145,7 +2146,7 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
*/
static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
{
- const u8 versions[] = {
+ static const u8 versions[] = {
0x00,
0x60, /* SAM-3 (no version claimed) */
@@ -2155,7 +2156,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0x03,
0x00 /* SPC-3 (no version claimed) */
};
- const u8 versions_zbc[] = {
+ static const u8 versions_zbc[] = {
0x00,
0xA0, /* SAM-5 (no version claimed) */
@@ -2227,7 +2228,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
int num_pages;
- const u8 pages[] = {
+ static const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
0x83, /* page 0x83, device ident page */
@@ -2258,7 +2259,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
{
- const u8 hdr[] = {
+ static const u8 hdr[] = {
0,
0x80, /* this page code */
0,
@@ -2580,7 +2581,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
- const u8 sat_blk_desc[] = {
+ static const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
0, 0x2, 0x0 /* block length: 512 bytes */
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 08a245b76417..f953cb4bb1ba 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -51,7 +51,7 @@ extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
-extern struct device_type ata_port_type;
+extern const struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 96c05c9494fa..6b3355343542 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -242,7 +242,7 @@ static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ unsigned int pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 3ea50dc5ea47..3729e2448eb6 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -171,6 +171,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
default:
printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
"Using 12clk.\n", clk);
+ /* fall through */
case 9 ... 12:
clocks = 7; /* 12 clk */
break;
@@ -203,6 +204,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk)
default:
printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
"Using default 12clk.\n", clk);
+ /* fall through */
case 12: /* default 12 clk */
clocks = 0;
break;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 82bfd51692f3..ffd8d33c6e0f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -84,7 +84,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
*/
static struct pdc2027x_pio_timing {
u8 value0, value1, value2;
-} pdc2027x_pio_timing_tbl [] = {
+} pdc2027x_pio_timing_tbl[] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
{ 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
{ 0x23, 0x26, 0x64 }, /* PIO mode 2 */
@@ -94,7 +94,7 @@ static struct pdc2027x_pio_timing {
static struct pdc2027x_mdma_timing {
u8 value0, value1;
-} pdc2027x_mdma_timing_tbl [] = {
+} pdc2027x_mdma_timing_tbl[] = {
{ 0xdf, 0x5f }, /* MDMA mode 0 */
{ 0x6b, 0x27 }, /* MDMA mode 1 */
{ 0x69, 0x25 }, /* MDMA mode 2 */
@@ -102,7 +102,7 @@ static struct pdc2027x_mdma_timing {
static struct pdc2027x_udma_timing {
u8 value0, value1, value2;
-} pdc2027x_udma_timing_tbl [] = {
+} pdc2027x_udma_timing_tbl[] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index ce128d5a6ded..6af4ec3c88c3 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -248,6 +248,7 @@ static int sata_dwc_dma_init_old(struct platform_device *pdev,
return -ENOMEM;
hsdev->dma->dev = &pdev->dev;
+ hsdev->dma->id = pdev->id;
/* Get SATA DMA interrupt number */
hsdev->dma->irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 3b2246dded74..cc208b72b199 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2387,7 +2387,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
": attempting PIO w/multiple DRQ: "
"this may fail due to h/w errata\n");
}
- /* drop through */
+ /* fall through */
case ATA_PROT_NODATA:
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
@@ -2478,20 +2478,18 @@ static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{
- struct ata_eh_info *ehi;
unsigned int pmp;
/*
* Initialize EH info for PMPs which saw device errors
*/
- ehi = &ap->link.eh_info;
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
+ struct ata_eh_info *ehi = &link->eh_info;
pmp_map &= ~this_pmp;
- ehi = &link->eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "dev err");
ehi->err_mask |= AC_ERR_DEV;
@@ -3877,7 +3875,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
- /* drop through */
+ /* fall through */
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 537d11869069..80ee2f2a50d0 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -872,7 +872,6 @@ MODULE_DEVICE_TABLE(of, sata_rcar_match);
static int sata_rcar_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct ata_host *host;
struct sata_rcar_priv *priv;
struct resource *mem;
@@ -888,11 +887,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- of_id = of_match_device(sata_rcar_match, &pdev->dev);
- if (!of_id)
- return -ENODEV;
-
- priv->type = (enum sata_rcar_type)of_id->data;
+ priv->type = (enum sata_rcar_type)of_device_get_match_data(&pdev->dev);
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "failed to get access to sata clock\n");
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 7e76b35f422c..e121b8485731 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2803,7 +2803,7 @@ out:
return err;
out_free_irq:
- free_irq(dev->irq, dev);
+ free_irq(irq, dev);
out_free:
kfree(dev);
out_release:
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 6df7d6676a48..4de87b0b53c8 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -22,14 +22,23 @@
#include <linux/string.h>
#include <linux/sched/topology.h>
-static DEFINE_MUTEX(cpu_scale_mutex);
-static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
-unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
+void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
{
- return per_cpu(cpu_scale, cpu);
+ unsigned long scale;
+ int i;
+
+ scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+
+ for_each_cpu(i, cpus)
+ per_cpu(freq_scale, i) = scale;
}
+static DEFINE_MUTEX(cpu_scale_mutex);
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
per_cpu(cpu_scale, cpu) = capacity;
@@ -96,7 +105,7 @@ subsys_initcall(register_cpu_capacity_sysctl);
static u32 capacity_scale;
static u32 *raw_capacity;
-static int __init free_raw_capacity(void)
+static int free_raw_capacity(void)
{
kfree(raw_capacity);
raw_capacity = NULL;
@@ -212,6 +221,8 @@ static struct notifier_block init_cpu_capacity_notifier __initdata = {
static int __init register_cpufreq_notifier(void)
{
+ int ret;
+
/*
* on ACPI-based systems we need to use the default cpu capacity
* until we have the necessary code to parse the cpu capacity, so
@@ -227,8 +238,13 @@ static int __init register_cpufreq_notifier(void)
cpumask_copy(cpus_to_visit, cpu_possible_mask);
- return cpufreq_register_notifier(&init_cpu_capacity_notifier,
- CPUFREQ_POLICY_NOTIFIER);
+ ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (ret)
+ free_cpumask_var(cpus_to_visit);
+
+ return ret;
}
core_initcall(register_cpufreq_notifier);
@@ -236,6 +252,7 @@ static void __init parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
+ free_cpumask_var(cpus_to_visit);
}
#else
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 12ebd055724c..110230d86527 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev)
* so be careful about accessing it. dev->bus and dev->class should
* never change once they are set, so they don't need special care.
*/
- drv = ACCESS_ONCE(dev->driver);
+ drv = READ_ONCE(dev->driver);
return drv ? drv->name :
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
@@ -1571,7 +1571,7 @@ static int device_add_class_symlinks(struct device *dev)
int error;
if (of_node) {
- error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
+ error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
if (error)
dev_warn(dev, "Error %d creating of_node link\n",error);
/* An error here doesn't warrant bringing down the device */
@@ -1958,7 +1958,6 @@ void device_del(struct device *dev)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
- device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
@@ -1986,6 +1985,7 @@ void device_del(struct device *dev)
device_pm_remove(dev);
driver_deferred_probe_del(dev);
device_remove_properties(dev);
+ device_links_purge(dev);
/* Notify the platform of the removal, in case they
* need to do anything...
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 321cd7b4d817..58a9b608d821 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -18,6 +18,7 @@
#include <linux/cpufeature.h>
#include <linux/tick.h>
#include <linux/pm_qos.h>
+#include <linux/sched/isolation.h>
#include "base.h"
@@ -271,8 +272,16 @@ static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
int n = 0, len = PAGE_SIZE-2;
+ cpumask_var_t isolated;
- n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(cpu_isolated_map));
+ if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_andnot(isolated, cpu_possible_mask,
+ housekeeping_cpumask(HK_FLAG_DOMAIN));
+ n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));
+
+ free_cpumask_var(isolated);
return n;
}
@@ -377,7 +386,8 @@ int register_cpu(struct cpu *cpu, int num)
per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num));
- dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
+ dev_pm_qos_expose_latency_limit(&cpu->dev,
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
return 0;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index ad44b40fe284..2c964f56dafe 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -350,6 +350,15 @@ EXPORT_SYMBOL_GPL(device_bind_driver);
static atomic_t probe_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+static void driver_deferred_probe_add_trigger(struct device *dev,
+ int local_trigger_count)
+{
+ driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
+}
+
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
@@ -369,6 +378,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
}
ret = device_links_check_suppliers(dev);
+ if (ret == -EPROBE_DEFER)
+ driver_deferred_probe_add_trigger(dev, local_trigger_count);
if (ret)
return ret;
@@ -464,15 +475,13 @@ pinctrl_bind_failed:
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
+ dev_pm_set_driver_flags(dev, 0);
switch (ret) {
case -EPROBE_DEFER:
/* Driver requested deferred probing */
dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
- driver_deferred_probe_add(dev);
- /* Did a trigger occur while probing? Need to re-trigger if yes */
- if (local_trigger_count != atomic_read(&deferred_trigger_count))
- driver_deferred_probe_trigger();
+ driver_deferred_probe_add_trigger(dev, local_trigger_count);
break;
case -ENODEV:
case -ENXIO:
@@ -869,6 +878,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
+ dev_pm_set_driver_flags(dev, 0);
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9045c5f3734e..c203fb90c1a0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1143,6 +1143,7 @@ struct bus_type platform_bus_type = {
.match = platform_match,
.uevent = platform_uevent,
.pm = &platform_dev_pm_ops,
+ .force_dma = true,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 29cd71d8b360..e1bb691cf8f1 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -2,7 +2,6 @@
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e8ca5e2cf1e5..0c80bea05bcb 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -124,6 +124,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
+#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -237,6 +238,95 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
+/**
+ * dev_pm_genpd_set_performance_state- Set performance state of device's power
+ * domain.
+ *
+ * @dev: Device for which the performance-state needs to be set.
+ * @state: Target performance state of the device. This can be set as 0 when the
+ * device doesn't have any performance state constraints left (And so
+ * the device wouldn't participate anymore to find the target
+ * performance state of the genpd).
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data, *pd_data;
+ struct pm_domain_data *pdd;
+ unsigned int prev;
+ int ret = 0;
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -ENODEV;
+
+ if (unlikely(!genpd->set_performance_state))
+ return -EINVAL;
+
+ if (unlikely(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ genpd_lock(genpd);
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ prev = gpd_data->performance_state;
+ gpd_data->performance_state = state;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ goto update_state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /*
+ * We aren't propagating performance state changes of a subdomain to its
+ * masters as we don't have hardware that needs it. Over that, the
+ * performance states of subdomain and its masters may not have
+ * one-to-one mapping and would require additional information. We can
+ * get back to this once we have hardware that needs it. For that
+ * reason, we don't have to consider performance state of the subdomains
+ * of genpd here.
+ */
+
+update_state:
+ if (genpd_status_on(genpd)) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret) {
+ gpd_data->performance_state = prev;
+ goto unlock;
+ }
+ }
+
+ genpd->performance_state = state;
+
+unlock:
+ genpd_unlock(genpd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -256,6 +346,15 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+
+ if (unlikely(genpd->set_performance_state)) {
+ ret = genpd->set_performance_state(genpd, genpd->performance_state);
+ if (ret) {
+ pr_warn("%s: Failed to set performance state %d (%d)\n",
+ genpd->name, genpd->performance_state, ret);
+ }
+ }
+
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@@ -346,9 +445,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
- stat = dev_pm_qos_flags(pdd->dev,
- PM_QOS_FLAG_NO_POWER_OFF
- | PM_QOS_FLAG_REMOTE_WAKEUP);
+ stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
@@ -749,11 +846,7 @@ late_initcall(genpd_power_off_unused);
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-/**
- * pm_genpd_present - Check if the given PM domain has been initialized.
- * @genpd: PM domain to check.
- */
-static bool pm_genpd_present(const struct generic_pm_domain *genpd)
+static bool genpd_present(const struct generic_pm_domain *genpd)
{
const struct generic_pm_domain *gpd;
@@ -771,12 +864,6 @@ static bool pm_genpd_present(const struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_SLEEP
-static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
-}
-
/**
* genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -863,7 +950,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
* @genpd: PM domain the device belongs to.
*
* There are two cases in which a device that can wake up the system from sleep
- * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * states should be resumed by genpd_prepare(): (1) if the device is enabled
* to wake up the system and it has to remain active for this purpose while the
* system is in the sleep state and (2) if the device is not enabled to wake up
* the system from sleep states and it generally doesn't generate wakeup signals
@@ -881,12 +968,12 @@ static bool resume_needed(struct device *dev,
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd_dev_active_wakeup(genpd, dev);
+ active_wakeup = genpd_is_active_wakeup(genpd);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
/**
- * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
* Start a power transition of a device (during a system-wide power transition)
@@ -894,7 +981,7 @@ static bool resume_needed(struct device *dev,
* an object of type struct generic_pm_domain representing a PM domain
* consisting of I/O devices.
*/
-static int pm_genpd_prepare(struct device *dev)
+static int genpd_prepare(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
@@ -921,7 +1008,7 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd);
ret = pm_generic_prepare(dev);
- if (ret) {
+ if (ret < 0) {
genpd_lock(genpd);
genpd->prepared_count--;
@@ -929,7 +1016,8 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd);
}
- return ret;
+ /* Never return 1, as genpd don't cope with the direct_complete path. */
+ return ret >= 0 ? 0 : ret;
}
/**
@@ -950,7 +1038,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
if (poweroff)
@@ -975,13 +1063,13 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
}
/**
- * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_suspend_noirq(struct device *dev)
+static int genpd_suspend_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -989,12 +1077,12 @@ static int pm_genpd_suspend_noirq(struct device *dev)
}
/**
- * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+ * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
-static int pm_genpd_resume_noirq(struct device *dev)
+static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1005,7 +1093,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
genpd_lock(genpd);
@@ -1024,7 +1112,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
}
/**
- * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+ * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
@@ -1032,7 +1120,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
-static int pm_genpd_freeze_noirq(struct device *dev)
+static int genpd_freeze_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1054,13 +1142,13 @@ static int pm_genpd_freeze_noirq(struct device *dev)
}
/**
- * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Start the device, unless power has been removed from the domain already
* before the system transition.
*/
-static int pm_genpd_thaw_noirq(struct device *dev)
+static int genpd_thaw_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1081,14 +1169,14 @@ static int pm_genpd_thaw_noirq(struct device *dev)
}
/**
- * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
+ * genpd_poweroff_noirq - Completion of hibernation of device in an
* I/O PM domain.
* @dev: Device to poweroff.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_poweroff_noirq(struct device *dev)
+static int genpd_poweroff_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -1096,13 +1184,13 @@ static int pm_genpd_poweroff_noirq(struct device *dev)
}
/**
- * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+ * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
* Make sure the domain will be in the same power state as before the
* hibernation the system is resuming from and start the device if necessary.
*/
-static int pm_genpd_restore_noirq(struct device *dev)
+static int genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1139,7 +1227,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
}
/**
- * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * genpd_complete - Complete power transition of a device in a power domain.
* @dev: Device to complete the transition of.
*
* Complete a power transition of a device (during a system-wide power
@@ -1147,7 +1235,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
-static void pm_genpd_complete(struct device *dev)
+static void genpd_complete(struct device *dev)
{
struct generic_pm_domain *genpd;
@@ -1180,7 +1268,7 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
struct generic_pm_domain *genpd;
genpd = dev_to_genpd(dev);
- if (!pm_genpd_present(genpd))
+ if (!genpd_present(genpd))
return;
if (suspend) {
@@ -1206,14 +1294,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else /* !CONFIG_PM_SLEEP */
-#define pm_genpd_prepare NULL
-#define pm_genpd_suspend_noirq NULL
-#define pm_genpd_resume_noirq NULL
-#define pm_genpd_freeze_noirq NULL
-#define pm_genpd_thaw_noirq NULL
-#define pm_genpd_poweroff_noirq NULL
-#define pm_genpd_restore_noirq NULL
-#define pm_genpd_complete NULL
+#define genpd_prepare NULL
+#define genpd_suspend_noirq NULL
+#define genpd_resume_noirq NULL
+#define genpd_freeze_noirq NULL
+#define genpd_thaw_noirq NULL
+#define genpd_poweroff_noirq NULL
+#define genpd_restore_noirq NULL
+#define genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
@@ -1239,7 +1327,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = -1;
+ gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
spin_lock_irq(&dev->power.lock);
@@ -1574,14 +1662,14 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
- genpd->domain.ops.prepare = pm_genpd_prepare;
- genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
- genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
- genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
- genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
- genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
- genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->domain.ops.prepare = genpd_prepare;
+ genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = genpd_resume_noirq;
+ genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
+ genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = genpd_restore_noirq;
+ genpd->domain.ops.complete = genpd_complete;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -1795,7 +1883,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
mutex_lock(&gpd_list_lock);
- if (pm_genpd_present(genpd)) {
+ if (genpd_present(genpd)) {
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (!ret) {
genpd->provider = &np->fwnode;
@@ -1831,7 +1919,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
for (i = 0; i < data->num_domains; i++) {
if (!data->domains[i])
continue;
- if (!pm_genpd_present(data->domains[i]))
+ if (!genpd_present(data->domains[i]))
goto error;
data->domains[i]->provider = &np->fwnode;
@@ -2274,7 +2362,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/kobject.h>
-static struct dentry *pm_genpd_debugfs_dir;
+static struct dentry *genpd_debugfs_dir;
/*
* TODO: This function is a slightly modified version of rtpm_status_show
@@ -2302,8 +2390,8 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
seq_puts(s, p);
}
-static int pm_genpd_summary_one(struct seq_file *s,
- struct generic_pm_domain *genpd)
+static int genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *genpd)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2373,7 +2461,7 @@ static int genpd_summary_show(struct seq_file *s, void *data)
return -ERESTARTSYS;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- ret = pm_genpd_summary_one(s, genpd);
+ ret = genpd_summary_one(s, genpd);
if (ret)
break;
}
@@ -2559,23 +2647,23 @@ define_genpd_debugfs_fops(active_time);
define_genpd_debugfs_fops(total_idle_time);
define_genpd_debugfs_fops(devices);
-static int __init pm_genpd_debug_init(void)
+static int __init genpd_debug_init(void)
{
struct dentry *d;
struct generic_pm_domain *genpd;
- pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+ genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
- if (!pm_genpd_debugfs_dir)
+ if (!genpd_debugfs_dir)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
+ genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
+ d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
if (!d)
return -ENOMEM;
@@ -2595,11 +2683,11 @@ static int __init pm_genpd_debug_init(void)
return 0;
}
-late_initcall(pm_genpd_debug_init);
+late_initcall(genpd_debug_init);
-static void __exit pm_genpd_debug_exit(void)
+static void __exit genpd_debug_exit(void)
{
- debugfs_remove_recursive(pm_genpd_debugfs_dir);
+ debugfs_remove_recursive(genpd_debugfs_dir);
}
-__exitcall(pm_genpd_debug_exit);
+__exitcall(genpd_debug_exit);
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 281f949c5ffe..99896fbf18e4 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,23 +14,29 @@
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
- s32 constraint_ns = -1;
+ s64 constraint_ns;
- if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+ /*
+ * Only take suspend-time QoS constraints of devices into
+ * account, because constraints updated after the device has
+ * been suspended are not guaranteed to be taken into account
+ * anyway. In order for them to take effect, the device has to
+ * be resumed and suspended again.
+ */
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
-
- if (constraint_ns < 0) {
+ } else {
+ /*
+ * The child is not in a domain and there's no info on its
+ * suspend/resume latencies, so assume them to be negligible and
+ * take its current PM QoS constraint (that's the only thing
+ * known at this point anyway).
+ */
constraint_ns = dev_pm_qos_read_value(dev);
constraint_ns *= NSEC_PER_USEC;
}
- if (constraint_ns == 0)
- return 0;
- /*
- * constraint_ns cannot be negative here, because the device has been
- * suspended.
- */
- if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
+ if (constraint_ns < *constraint_ns_p)
*constraint_ns_p = constraint_ns;
return 0;
@@ -58,12 +64,12 @@ static bool default_suspend_ok(struct device *dev)
}
td->constraint_changed = false;
td->cached_suspend_ok = false;
- td->effective_constraint_ns = -1;
+ td->effective_constraint_ns = 0;
constraint_ns = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
- if (constraint_ns < 0)
+ if (constraint_ns == 0)
return false;
constraint_ns *= NSEC_PER_USEC;
@@ -76,14 +82,32 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
- if (constraint_ns > 0) {
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
+ /* "No restriction", so the device is allowed to suspend. */
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ td->cached_suspend_ok = true;
+ } else if (constraint_ns == 0) {
+ /*
+ * This triggers if one of the children that don't belong to a
+ * domain has a zero PM QoS constraint and it's better not to
+ * suspend then. effective_constraint_ns is zero already and
+ * cached_suspend_ok is false, so bail out.
+ */
+ return false;
+ } else {
constraint_ns -= td->suspend_latency_ns +
td->resume_latency_ns;
- if (constraint_ns == 0)
+ /*
+ * effective_constraint_ns is zero already and cached_suspend_ok
+ * is false, so if the computed value is not positive, return
+ * right away.
+ */
+ if (constraint_ns <= 0)
return false;
+
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
}
- td->effective_constraint_ns = constraint_ns;
- td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
@@ -144,18 +168,13 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*/
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
- /* default_suspend_ok() need not be called before us. */
- if (constraint_ns < 0) {
- constraint_ns = dev_pm_qos_read_value(pdd->dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
- continue;
-
/*
- * constraint_ns cannot be negative here, because the device has
- * been suspended.
+ * Zero means "no suspend at all" and this runs only when all
+ * devices in the domain are suspended, so it must be positive.
*/
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
+ continue;
+
if (constraint_ns <= off_on_time_ns)
return false;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 07c3c4a9522d..b2ed606265a8 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -9,7 +9,6 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/suspend.h>
#ifdef CONFIG_PM
/**
@@ -298,26 +297,4 @@ void pm_generic_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
}
-
-/**
- * pm_complete_with_resume_check - Complete a device power transition.
- * @dev: Device to handle.
- *
- * Complete a device power transition during a system-wide power transition and
- * optionally schedule a runtime resume of the device if the system resume in
- * progress has been initated by the platform firmware and the device had its
- * power.direct_complete flag set.
- */
-void pm_complete_with_resume_check(struct device *dev)
-{
- pm_generic_complete(dev);
- /*
- * If the device had been runtime-suspended before the system went into
- * the sleep state it is going out of and it has never been resumed till
- * now, resume it in case the firmware powered it up.
- */
- if (dev->power.direct_complete && pm_resume_via_firmware())
- pm_request_resume(dev);
-}
-EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ae47b2ec84b4..db2f04415927 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -526,7 +526,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * device_resume_noirq - Execute an "early resume" callback for given device.
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@@ -846,16 +846,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Driver;
}
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Driver;
- } else if (dev->class->resume) {
- info = "legacy class ";
- callback = dev->class->resume;
- goto End;
- }
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
}
if (dev->bus) {
@@ -1081,7 +1075,7 @@ static pm_message_t resume_event(pm_message_t sleep_state)
}
/**
- * device_suspend_noirq - Execute a "late suspend" callback for given device.
+ * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1241,7 +1235,7 @@ int dpm_suspend_noirq(pm_message_t state)
}
/**
- * device_suspend_late - Execute a "late suspend" callback for given device.
+ * __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1443,7 +1437,7 @@ static void dpm_clear_suppliers_direct_complete(struct device *dev)
}
/**
- * device_suspend - Execute "suspend" callbacks for given device.
+ * __device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1506,17 +1500,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto Run;
}
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Run;
- } else if (dev->class->suspend) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_suspend(dev, state, dev->class->suspend,
- "legacy class ");
- goto End;
- }
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
}
if (dev->bus) {
@@ -1663,6 +1650,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
+ WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ !pm_runtime_enabled(dev));
+
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1711,7 +1701,9 @@ unlock:
* applies to suspend transitions, however.
*/
spin_lock_irq(&dev->power.lock);
- dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
+ dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
+ pm_runtime_suspended(dev) && ret > 0 &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
spin_unlock_irq(&dev->power.lock);
return 0;
}
@@ -1860,11 +1852,16 @@ void device_pm_check_callbacks(struct device *dev)
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
- (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
- !dev->class->suspend && !dev->class->resume)) &&
+ (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
spin_unlock_irq(&dev->power.lock);
}
+
+bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+{
+ return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ pm_runtime_status_suspended(dev);
+}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 277d43a83f53..3382542b39b7 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -139,6 +139,9 @@ static int apply_constraint(struct dev_pm_qos_request *req,
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
+ if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
+ value = 0;
+
ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value);
break;
@@ -189,7 +192,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7bcf80fa9ada..2362b9e9701e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
if (!dev->power.use_autosuspend)
goto out;
- autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
+ autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
goto out;
- last_busy = ACCESS_ONCE(dev->power.last_busy);
+ last_busy = READ_ONCE(dev->power.last_busy);
elapsed = jiffies - last_busy;
if (elapsed < 0)
goto out; /* jiffies has wrapped around. */
@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
- else if (__dev_pm_qos_read_value(dev) < 0)
+ else if (__dev_pm_qos_read_value(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
@@ -894,9 +894,9 @@ static void pm_runtime_work(struct work_struct *work)
*
* Check if the time is right and queue a suspend request.
*/
-static void pm_suspend_timer_fn(unsigned long data)
+static void pm_suspend_timer_fn(struct timer_list *t)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = from_timer(dev, t, power.suspend_timer);
unsigned long flags;
unsigned long expires;
@@ -1499,8 +1499,7 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
- (unsigned long)dev);
+ timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
init_waitqueue_head(&dev->power.wait_queue);
}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 156ab57bca77..e153e28b1857 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
+ s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+ if (value == 0)
+ return sprintf(buf, "n/a\n");
+ else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ value = 0;
+
+ return sprintf(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_store(struct device *dev,
@@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
s32 value;
int ret;
- if (kstrtos32(buf, 0, &value))
- return -EINVAL;
+ if (!kstrtos32(buf, 0, &value)) {
+ /*
+ * Prevent users from writing negative or "no constraint" values
+ * directly.
+ */
+ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ return -EINVAL;
- if (value < 0)
+ if (value == 0)
+ value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ value = 0;
+ } else {
return -EINVAL;
+ }
ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
value);
@@ -309,33 +326,6 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
static DEVICE_ATTR(pm_qos_no_power_off, 0644,
pm_qos_no_power_off_show, pm_qos_no_power_off_store);
-static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
- & PM_QOS_FLAG_REMOTE_WAKEUP));
-}
-
-static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- int ret;
-
- if (kstrtoint(buf, 0, &ret))
- return -EINVAL;
-
- if (ret != 0 && ret != 1)
- return -EINVAL;
-
- ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
- return ret < 0 ? ret : n;
-}
-
-static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
- pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
-
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
@@ -671,7 +661,6 @@ static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
static struct attribute *pm_qos_flags_attrs[] = {
&dev_attr_pm_qos_no_power_off.attr,
- &dev_attr_pm_qos_remote_wakeup.attr,
NULL,
};
static const struct attribute_group pm_qos_flags_attr_group = {
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cdd6f256da59..680ee1d36ac9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -54,7 +54,7 @@ static unsigned int saved_count;
static DEFINE_SPINLOCK(events_lock);
-static void pm_wakeup_timer_fn(unsigned long data);
+static void pm_wakeup_timer_fn(struct timer_list *t);
static LIST_HEAD(wakeup_sources);
@@ -176,7 +176,7 @@ void wakeup_source_add(struct wakeup_source *ws)
return;
spin_lock_init(&ws->lock);
- setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
+ timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
ws->active = false;
ws->last_time = ktime_get();
@@ -481,8 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
- return ws->timer.function != pm_wakeup_timer_fn ||
- ws->timer.data != (unsigned long)ws;
+ return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
}
/*
@@ -724,9 +723,9 @@ EXPORT_SYMBOL_GPL(pm_relax);
* in @data if it is currently active and its timer has not been canceled and
* the expiration time of the timer is not in future.
*/
-static void pm_wakeup_timer_fn(unsigned long data)
+static void pm_wakeup_timer_fn(struct timer_list *t)
{
- struct wakeup_source *ws = (struct wakeup_source *)data;
+ struct wakeup_source *ws = from_timer(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 7ed99c1b2a8b..851b1b6596a4 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1044,10 +1044,15 @@ EXPORT_SYMBOL_GPL(device_get_named_child_node);
/**
* fwnode_handle_get - Obtain a reference to a device node
* @fwnode: Pointer to the device node to obtain the reference to.
+ *
+ * Returns the fwnode handle.
*/
-void fwnode_handle_get(struct fwnode_handle *fwnode)
+struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
{
- fwnode_call_void_op(fwnode, get);
+ if (!fwnode_has_op(fwnode, get))
+ return fwnode;
+
+ return fwnode_call_ptr_op(fwnode, get);
}
EXPORT_SYMBOL_GPL(fwnode_handle_get);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0368fd7b3a41..3a1535d812d8 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,6 +6,7 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
+ select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -37,3 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
+
+config REGMAP_HWSPINLOCK
+ bool
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 2a4435d76028..8641183cac2f 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -157,6 +157,8 @@ struct regmap {
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
+
+ struct hwspinlock *hwlock;
};
struct regcache_ops {
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index edd9a839d004..c7150dd264d5 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -102,7 +102,7 @@ static int regmap_spi_read(void *context,
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
-static struct regmap_bus regmap_spi = {
+static const struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.async_write = regmap_spi_async_write,
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 4a36e415e938..0bfb8ed244d5 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -83,7 +83,7 @@ static int regmap_spmi_base_write(void *context, const void *data,
count - 1);
}
-static struct regmap_bus regmap_spmi_base = {
+static const struct regmap_bus regmap_spmi_base = {
.read = regmap_spmi_base_read,
.write = regmap_spmi_base_write,
.gather_write = regmap_spmi_base_gather_write,
@@ -203,7 +203,7 @@ static int regmap_spmi_ext_write(void *context, const void *data,
count - 2);
}
-static struct regmap_bus regmap_spmi_ext = {
+static const struct regmap_bus regmap_spmi_ext = {
.read = regmap_spmi_ext_read,
.write = regmap_spmi_ext_write,
.gather_write = regmap_spmi_ext_gather_write,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index b9a779a4a739..8d516a9bfc01 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/log2.h>
+#include <linux/hwspinlock.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -413,6 +414,51 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
+#ifdef REGMAP_HWSPINLOCK
+static void regmap_lock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irqsave(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
+ &map->spinlock_flags);
+}
+
+static void regmap_unlock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irq(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irqrestore(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
+}
+#endif
+
static void regmap_lock_mutex(void *__map)
{
struct regmap *map = __map;
@@ -627,6 +673,34 @@ struct regmap *__regmap_init(struct device *dev,
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
+ } else if (config->hwlock_id) {
+#ifdef REGMAP_HWSPINLOCK
+ map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
+ if (!map->hwlock) {
+ ret = -ENXIO;
+ goto err_map;
+ }
+
+ switch (config->hwlock_mode) {
+ case HWLOCK_IRQSTATE:
+ map->lock = regmap_lock_hwlock_irqsave;
+ map->unlock = regmap_unlock_hwlock_irqrestore;
+ break;
+ case HWLOCK_IRQ:
+ map->lock = regmap_lock_hwlock_irq;
+ map->unlock = regmap_unlock_hwlock_irq;
+ break;
+ default:
+ map->lock = regmap_lock_hwlock;
+ map->unlock = regmap_unlock_hwlock;
+ break;
+ }
+
+ map->lock_arg = map;
+#else
+ ret = -EINVAL;
+ goto err_map;
+#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -729,7 +803,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_2_6_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -739,7 +813,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_4_12_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -749,7 +823,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_7_9_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -759,7 +833,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_10_14_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -779,13 +853,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (reg_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_reg = regmap_format_24;
break;
@@ -801,7 +875,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -818,13 +892,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
default:
- goto err_map;
+ goto err_hwlock;
}
if (val_endian == REGMAP_ENDIAN_NATIVE)
@@ -853,12 +927,12 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (val_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_val = regmap_format_24;
map->format.parse_val = regmap_parse_24;
break;
@@ -879,7 +953,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#ifdef CONFIG_64BIT
@@ -900,7 +974,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
@@ -909,18 +983,18 @@ struct regmap *__regmap_init(struct device *dev,
if (map->format.format_write) {
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
(val_endian != REGMAP_ENDIAN_BIG))
- goto err_map;
+ goto err_hwlock;
map->use_single_write = true;
}
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
- goto err_map;
+ goto err_hwlock;
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
- goto err_map;
+ goto err_hwlock;
}
if (map->format.format_write) {
@@ -1041,6 +1115,9 @@ err_regcache:
err_range:
regmap_range_exit(map);
kfree(map->work_buf);
+err_hwlock:
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
err_map:
kfree(map);
err:
@@ -1228,6 +1305,8 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 304d5c2bd5e9..a3355d66bc12 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -64,7 +64,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(async_dev_1)) {
error = PTR_ERR(async_dev_1);
- pr_err("failed to create async_dev_1: %d", error);
+ pr_err("failed to create async_dev_1: %d\n", error);
return error;
}
@@ -91,7 +91,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(async_dev_2)) {
error = PTR_ERR(async_dev_2);
- pr_err("failed to create async_dev_2: %d", error);
+ pr_err("failed to create async_dev_2: %d\n", error);
goto err_unregister_async_driver;
}
@@ -118,7 +118,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(sync_dev_1)) {
error = PTR_ERR(sync_dev_1);
- pr_err("failed to create sync_dev_1: %d", error);
+ pr_err("failed to create sync_dev_1: %d\n", error);
goto err_unregister_sync_driver;
}
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 982d5781d3ce..2c0ffb77d738 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -113,7 +113,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, gc->ngpio)
- generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, gpio));
bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
return IRQ_HANDLED;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 7b2df7a54d87..923b417eaf4c 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -68,9 +68,13 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
+config CDROM
+ tristate
+
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
+ select CDROM
select BLK_SCSI_REQUEST # only for the generic cdrom code
help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
@@ -348,6 +352,7 @@ config BLK_DEV_RAM_DAX
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
+ select CDROM
select BLK_SCSI_REQUEST
help
Note: This driver is deprecated and will be removed from the
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2d7178f7754e..588360d79fca 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -20,6 +20,7 @@
#include <linux/radix-tree.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/backing-dev.h>
#ifdef CONFIG_BLK_DEV_RAM_DAX
#include <linux/pfn_t.h>
#include <linux/dax.h>
@@ -60,7 +61,6 @@ struct brd_device {
/*
* Look up and return a brd's page for a given sector.
*/
-static DEFINE_MUTEX(brd_mutex);
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
pgoff_t idx;
@@ -449,6 +449,7 @@ static struct brd_device *brd_alloc(int i)
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
#ifdef CONFIG_BLK_DEV_RAM_DAX
queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 74e03aa537ad..7033a4beda66 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -43,7 +43,6 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
int cipher_len;
int mode_len;
char cms[LO_NAME_SIZE]; /* cipher-mode string */
- char *cipher;
char *mode;
char *cmsp = cms; /* c-m string pointer */
struct crypto_skcipher *tfm;
@@ -56,7 +55,6 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
cms[LO_NAME_SIZE - 1] = 0;
- cipher = cmsp;
cipher_len = strcspn(cmsp, "-");
mode = cmsp + cipher_len;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 60c086a53609..a54183935aa1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -275,6 +275,10 @@ static int set_next_request(void);
#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
#endif
+#ifndef fd_cacheflush
+#define fd_cacheflush(addr, size) /* nothing... */
+#endif
+
static inline void fallback_on_nodma_alloc(char **addr, size_t l)
{
#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 85de67334695..bc8e61506968 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -476,6 +476,8 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
+ if (cmd->css)
+ css_put(cmd->css);
cmd->ret = ret;
lo_rw_aio_do_completion(cmd);
}
@@ -535,6 +537,8 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_filp = file;
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
+ if (cmd->css)
+ kthread_associate_blkcg(cmd->css);
if (rw == WRITE)
ret = call_write_iter(file, &cmd->iocb, &iter);
@@ -542,6 +546,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
ret = call_read_iter(file, &cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
+ kthread_associate_blkcg(NULL);
if (ret != -EIOCBQUEUED)
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
@@ -1686,6 +1691,14 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
break;
}
+ /* always use the first bio's css */
+#ifdef CONFIG_BLK_CGROUP
+ if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) {
+ cmd->css = cmd->rq->bio->bi_css;
+ css_get(cmd->css);
+ } else
+#endif
+ cmd->css = NULL;
kthread_queue_work(&lo->worker, &cmd->work);
return BLK_STS_OK;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 1f3956702993..0f45416e4fcf 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -72,6 +72,7 @@ struct loop_cmd {
long ret;
struct kiocb iocb;
struct bio_vec *bvec;
+ struct cgroup_subsys_state *css;
};
/* Support for loadable transfer modules */
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 4a3cfc7940de..b8af7352a18f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -887,12 +887,9 @@ static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
static bool mtip_pause_ncq(struct mtip_port *port,
struct host_to_dev_fis *fis)
{
- struct host_to_dev_fis *reply;
unsigned long task_file_data;
- reply = port->rxfis + RX_FIS_D2H_REG;
task_file_data = readl(port->mmio+PORT_TFDATA);
-
if ((task_file_data & 1))
return false;
@@ -1020,7 +1017,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
.opts = opts
};
int rv = 0;
- unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1057,7 +1053,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Copy the command to the command table */
memcpy(int_cmd->command, fis, fis_len*4);
- start = jiffies;
rq->timeout = timeout;
/* insert request and run queue */
@@ -3015,7 +3010,6 @@ static int mtip_hw_init(struct driver_data *dd)
{
int i;
int rv;
- unsigned int num_command_slots;
unsigned long timeout, timetaken;
dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
@@ -3025,7 +3019,6 @@ static int mtip_hw_init(struct driver_data *dd)
rv = -EIO;
goto out1;
}
- num_command_slots = dd->slot_groups * 32;
hba_setup(dd);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9adfb5445f8d..5f2a4240a204 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
cmd->status = BLK_STS_TIMEOUT;
return BLK_EH_HANDLED;
}
-
- /* If we are waiting on our dead timer then we could get timeout
- * callbacks for our request. For this we just want to reset the timer
- * and let the queue side take care of everything.
- */
- if (!completion_done(&cmd->send_complete)) {
- nbd_config_put(nbd);
- return BLK_EH_RESET_TIMER;
- }
config = nbd->config;
if (config->num_connections > 1) {
@@ -723,9 +714,9 @@ static int wait_for_reconnect(struct nbd_device *nbd)
return 0;
if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
return 0;
- wait_event_interruptible_timeout(config->conn_wait,
- atomic_read(&config->live_connections),
- config->dead_conn_timeout);
+ wait_event_timeout(config->conn_wait,
+ atomic_read(&config->live_connections),
+ config->dead_conn_timeout);
return atomic_read(&config->live_connections);
}
@@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
+ blk_mq_start_request(req);
return -EINVAL;
}
config = nbd->config;
@@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
+ blk_mq_start_request(req);
return -EINVAL;
}
cmd->status = BLK_STS_OK;
@@ -771,6 +764,7 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
+ blk_mq_start_request(req);
return -EIO;
}
goto again;
@@ -781,6 +775,7 @@ again:
* here so that it gets put _after_ the request that is already on the
* dispatch list.
*/
+ blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
blk_mq_requeue_request(req, true);
ret = 0;
@@ -793,10 +788,10 @@ again:
ret = nbd_send_cmd(nbd, cmd, index);
if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Request send failed trying another connection\n");
+ "Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
- mutex_unlock(&nsock->tx_lock);
- goto again;
+ blk_mq_requeue_request(req, true);
+ ret = 0;
}
out:
mutex_unlock(&nsock->tx_lock);
@@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* done sending everything over the wire.
*/
init_completion(&cmd->send_complete);
- blk_mq_start_request(bd->rq);
/* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8042c26ea9e6..c61960deb74a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -154,6 +154,10 @@ enum {
NULL_Q_MQ = 2,
};
+static int g_no_sched;
+module_param_named(no_sched, g_no_sched, int, S_IRUGO);
+MODULE_PARM_DESC(no_sched, "No io scheduler");
+
static int g_submit_queues = 1;
module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
@@ -476,7 +480,7 @@ static struct configfs_item_operations nullb_device_ops = {
.release = nullb_device_release,
};
-static struct config_item_type nullb_device_type = {
+static const struct config_item_type nullb_device_type = {
.ct_item_ops = &nullb_device_ops,
.ct_attrs = nullb_device_attrs,
.ct_owner = THIS_MODULE,
@@ -528,7 +532,7 @@ static struct configfs_group_operations nullb_group_ops = {
.drop_item = nullb_group_drop_item,
};
-static struct config_item_type nullb_group_type = {
+static const struct config_item_type nullb_group_type = {
.ct_group_ops = &nullb_group_ops,
.ct_attrs = nullb_group_attrs,
.ct_owner = THIS_MODULE,
@@ -1754,6 +1758,8 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
set->flags = BLK_MQ_F_SHOULD_MERGE;
+ if (g_no_sched)
+ set->flags |= BLK_MQ_F_NO_SCHED;
set->driver_data = NULL;
if ((nullb && nullb->dev->blocking) || g_blocking)
@@ -1985,8 +1991,10 @@ static int __init null_init(void)
for (i = 0; i < nr_devices; i++) {
dev = null_alloc_dev();
- if (!dev)
+ if (!dev) {
+ ret = -ENOMEM;
goto err_dev;
+ }
ret = null_add_dev(dev);
if (ret) {
null_free_dev(dev);
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index b226835a909a..f8bd6ef3605a 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -26,6 +26,7 @@ config PARIDE_PD
config PARIDE_PCD
tristate "Parallel port ATAPI CD-ROMs"
depends on PARIDE
+ select CDROM
select BLK_SCSI_REQUEST # only for the generic cdrom code
---help---
This option enables the high-level driver for ATAPI CD-ROM devices
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 64d0fc17c174..2819f23e8bf2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1967,7 +1967,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
break;
case FIT_MTD_CMD_LOG_HOST_ID:
- skdev->connect_time_stamp = get_seconds();
+ /* hardware interface overflows in y2106 */
+ skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
data = skdev->connect_time_stamp & 0xFFFF;
mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 5b8992beffec..4ed0a78fdc09 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -23,15 +23,15 @@ static const char * const backends[] = {
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
"lz4",
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE)
- "deflate",
-#endif
#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
"lz4hc",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_842)
"842",
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
+ "zstd",
+#endif
NULL
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f149d3e61234..d70eba30003a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -122,14 +122,6 @@ static inline bool is_partial_io(struct bio_vec *bvec)
}
#endif
-static void zram_revalidate_disk(struct zram *zram)
-{
- revalidate_disk(zram->disk);
- /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
- zram->disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
-}
-
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
@@ -436,7 +428,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
WARN_ON_ONCE(!was_set);
}
-void zram_page_end_io(struct bio *bio)
+static void zram_page_end_io(struct bio *bio)
{
struct page *page = bio->bi_io_vec[0].bv_page;
@@ -1373,7 +1365,8 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- zram_revalidate_disk(zram);
+
+ revalidate_disk(zram->disk);
up_write(&zram->init_lock);
return len;
@@ -1420,7 +1413,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- zram_revalidate_disk(zram);
+ revalidate_disk(zram->disk);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
@@ -1539,6 +1532,7 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
+
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
@@ -1563,6 +1557,8 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ zram->disk->queue->backing_dev_info->capabilities |=
+ (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
add_disk(zram->disk);
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index e8c6946fed9d..3063f5312397 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1276,6 +1276,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Perf driver registration */
ccn->dt.pmu = (struct pmu) {
+ .module = THIS_MODULE,
.attr_groups = arm_ccn_pmu_attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = arm_ccn_pmu_event_init,
diff --git a/drivers/cdrom/Makefile b/drivers/cdrom/Makefile
index a95566ff47d3..0f3664b45f48 100644
--- a/drivers/cdrom/Makefile
+++ b/drivers/cdrom/Makefile
@@ -1,14 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for the kernel cdrom device drivers.
-#
-# 30 Jan 1998, Michael Elizabeth Chastain, <mailto:mec@shout.net>
-# Rewritten to use lists instead of if-statements.
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_BLK_DEV_IDECD) += cdrom.o
-obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
-obj-$(CONFIG_PARIDE_PCD) += cdrom.o
-obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
-
-obj-$(CONFIG_GDROM) += gdrom.o cdrom.o
+obj-$(CONFIG_CDROM) += cdrom.o
+obj-$(CONFIG_GDROM) += gdrom.o
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 95a031e9eced..f6e3e5abc117 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -100,12 +100,12 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
config HW_RANDOM_IPROC_RNG200
- tristate "Broadcom iProc RNG200 support"
- depends on ARCH_BCM_IPROC
+ tristate "Broadcom iProc/STB RNG200 support"
+ depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
default HW_RANDOM
---help---
This driver provides kernel-side support for the RNG200
- hardware found on the Broadcom iProc SoCs.
+ hardware found on the Broadcom iProc and STB SoCs.
To compile this driver as a module, choose M here: the
module will be called iproc-rng200
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 9701ac7d8b47..657b8770b6b9 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -292,26 +292,48 @@ static struct miscdevice rng_miscdev = {
.groups = rng_dev_groups,
};
+static int enable_best_rng(void)
+{
+ int ret = -ENODEV;
+
+ BUG_ON(!mutex_is_locked(&rng_mutex));
+
+ /* rng_list is sorted by quality, use the best (=first) one */
+ if (!list_empty(&rng_list)) {
+ struct hwrng *new_rng;
+
+ new_rng = list_entry(rng_list.next, struct hwrng, list);
+ ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+ if (!ret)
+ cur_rng_set_by_user = 0;
+ }
+
+ return ret;
+}
+
static ssize_t hwrng_attr_current_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- int err;
+ int err = -ENODEV;
struct hwrng *rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
- err = -ENODEV;
- list_for_each_entry(rng, &rng_list, list) {
- if (sysfs_streq(rng->name, buf)) {
- err = 0;
- cur_rng_set_by_user = 1;
- if (rng != current_rng)
+
+ if (sysfs_streq(buf, "")) {
+ err = enable_best_rng();
+ } else {
+ list_for_each_entry(rng, &rng_list, list) {
+ if (sysfs_streq(rng->name, buf)) {
+ cur_rng_set_by_user = 1;
err = set_current_rng(rng);
- break;
+ break;
+ }
}
}
+
mutex_unlock(&rng_mutex);
return err ? : len;
@@ -423,7 +445,7 @@ static void start_khwrngd(void)
{
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed");
+ pr_err("hwrng_fill thread creation failed\n");
hwrng_fill = NULL;
}
}
@@ -493,17 +515,8 @@ void hwrng_unregister(struct hwrng *rng)
mutex_lock(&rng_mutex);
list_del(&rng->list);
- if (current_rng == rng) {
- drop_current_rng();
- cur_rng_set_by_user = 0;
- /* rng_list is sorted by quality, use the best (=first) one */
- if (!list_empty(&rng_list)) {
- struct hwrng *new_rng;
-
- new_rng = list_entry(rng_list.next, struct hwrng, list);
- set_current_rng(new_rng);
- }
- }
+ if (current_rng == rng)
+ enable_best_rng();
if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex);
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 3eaf7cb96d36..8b5a20b35293 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
{},
};
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index d9f46b437cc2..4e2a3f635277 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -72,7 +72,7 @@ static int pseries_rng_remove(struct vio_dev *dev)
return 0;
}
-static struct vio_device_id pseries_rng_driver_ids[] = {
+static const struct vio_device_id pseries_rng_driver_ids[] = {
{ "ibm,random-v1", "ibm,random"},
{ "", "" }
};
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 03ff5483d865..f615684028af 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -53,13 +53,6 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
int period_us = ktime_to_us(priv->period);
/*
- * The RNG provides 32-bits per read. Ensure there is enough space for
- * at minimum one read.
- */
- if (max < sizeof(u32))
- return 0;
-
- /*
* There may not have been enough time for new data to be generated
* since the last request. If the caller doesn't want to wait, let them
* bail out. Otherwise, wait for the completion. If the new data has
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 3fa2f8a009b3..b89df66ea1ae 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev)
static int virtrng_restore(struct virtio_device *vdev)
{
- return probe_common(vdev);
+ int err;
+
+ err = probe_common(vdev);
+ if (!err) {
+ struct virtrng_info *vi = vdev->priv;
+
+ /*
+ * Set hwrng_removed to ensure that virtio_read()
+ * does not block waiting for data before the
+ * registration is complete.
+ */
+ vi->hwrng_removed = true;
+ err = hwrng_register(&vi->hwrng);
+ if (!err) {
+ vi->hwrng_register_done = true;
+ vi->hwrng_removed = false;
+ }
+ }
+
+ return err;
}
#endif
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f6fa056a52fc..3544abc0f9f9 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -22,24 +22,39 @@ config IPMI_DMI_DECODE
if IPMI_HANDLER
+config IPMI_PROC_INTERFACE
+ bool 'Provide an interface for IPMI stats in /proc (deprecated)'
+ depends on PROC_FS
+ default y
+ help
+ Do not use this any more, use sysfs for this info. It will be
+ removed in future kernel versions.
+
config IPMI_PANIC_EVENT
bool 'Generate a panic event to all BMCs on a panic'
help
- When a panic occurs, this will cause the IPMI message handler to
- generate an IPMI event describing the panic to each interface
- registered with the message handler.
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
config IPMI_PANIC_STRING
bool 'Generate OEM events containing the panic string'
depends on IPMI_PANIC_EVENT
help
- When a panic occurs, this will cause the IPMI message handler to
- generate IPMI OEM type f0 events holding the IPMB address of the
- panic generator (byte 4 of the event), a sequence number for the
- string (byte 5 of the event) and part of the string (the rest of the
- event). Bytes 1, 2, and 3 are the normal usage for an OEM event.
- You can fetch these events and use the sequence numbers to piece the
- string together.
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate IPMI OEM type f0 events holding the IPMB
+ address of the panic generator (byte 4 of the event), a sequence
+ number for the string (byte 5 of the event) and part of the
+ string (the rest of the event). Bytes 1, 2, and 3 are the normal
+ usage for an OEM event. You can fetch these events and use the
+ sequence numbers to piece the string together. This config
+ parameter sets the default value to generate these events,
+ the module parameter for ipmi_msghandler named panic_op can
+ be set to "string" to chose this value, this config simply
+ causes the default value to be set to "string".
config IPMI_DEVICE_INTERFACE
tristate 'Device interface for IPMI'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 43b7d86cc5f2..33b899fcf14a 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -3,7 +3,15 @@
# Makefile for the ipmi drivers.
#
-ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
+ ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \
+ ipmi_si_port_io.o ipmi_si_mem_io.o
+ifdef CONFIG_PCI
+ipmi_si-y += ipmi_si_pci.o
+endif
+ifdef CONFIG_PARISC
+ipmi_si-y += ipmi_si_parisc.o
+endif
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index 2059f79d669a..ab78b3be7e33 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -9,10 +9,16 @@
#include <linux/dmi.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include "ipmi_si_sm.h"
#include "ipmi_dmi.h"
+#define IPMI_DMI_TYPE_KCS 0x01
+#define IPMI_DMI_TYPE_SMIC 0x02
+#define IPMI_DMI_TYPE_BT 0x03
+#define IPMI_DMI_TYPE_SSIF 0x04
+
struct ipmi_dmi_info {
- int type;
+ enum si_type si_type;
u32 flags;
unsigned long addr;
u8 slave_addr;
@@ -23,6 +29,15 @@ static struct ipmi_dmi_info *ipmi_dmi_infos;
static int ipmi_dmi_nr __initdata;
+#define set_prop_entry(_p_, _name_, type, val) \
+do { \
+ struct property_entry *_p = &_p_; \
+ _p->name = _name_; \
+ _p->length = sizeof(type); \
+ _p->is_string = false; \
+ _p->value.type##_data = val; \
+} while(0)
+
static void __init dmi_add_platform_ipmi(unsigned long base_addr,
u32 flags,
u8 slave_addr,
@@ -33,27 +48,14 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
struct platform_device *pdev;
struct resource r[4];
unsigned int num_r = 1, size;
- struct property_entry p[4] = {
- PROPERTY_ENTRY_U8("slave-addr", slave_addr),
- PROPERTY_ENTRY_U8("ipmi-type", type),
- PROPERTY_ENTRY_U16("i2c-addr", base_addr),
- { }
- };
+ struct property_entry p[5];
+ unsigned int pidx = 0;
char *name, *override;
int rv;
+ enum si_type si_type;
struct ipmi_dmi_info *info;
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- pr_warn("ipmi:dmi: Could not allocate dmi info\n");
- } else {
- info->type = type;
- info->flags = flags;
- info->addr = base_addr;
- info->slave_addr = slave_addr;
- info->next = ipmi_dmi_infos;
- ipmi_dmi_infos = info;
- }
+ memset(p, 0, sizeof(p));
name = "dmi-ipmi-si";
override = "ipmi_si";
@@ -63,28 +65,53 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
override = "ipmi_ssif";
offset = 1;
size = 1;
+ si_type = SI_TYPE_INVALID;
break;
case IPMI_DMI_TYPE_BT:
size = 3;
+ si_type = SI_BT;
break;
case IPMI_DMI_TYPE_KCS:
+ size = 2;
+ si_type = SI_KCS;
+ break;
case IPMI_DMI_TYPE_SMIC:
size = 2;
+ si_type = SI_SMIC;
break;
default:
- pr_err("ipmi:dmi: Invalid IPMI type: %d", type);
+ pr_err("ipmi:dmi: Invalid IPMI type: %d\n", type);
return;
}
+ if (si_type != SI_TYPE_INVALID)
+ set_prop_entry(p[pidx++], "ipmi-type", u8, si_type);
+ set_prop_entry(p[pidx++], "slave-addr", u8, slave_addr);
+ set_prop_entry(p[pidx++], "addr-source", u8, SI_SMBIOS);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_warn("ipmi:dmi: Could not allocate dmi info\n");
+ } else {
+ info->si_type = si_type;
+ info->flags = flags;
+ info->addr = base_addr;
+ info->slave_addr = slave_addr;
+ info->next = ipmi_dmi_infos;
+ ipmi_dmi_infos = info;
+ }
+
pdev = platform_device_alloc(name, ipmi_dmi_nr);
if (!pdev) {
- pr_err("ipmi:dmi: Error allocation IPMI platform device");
+ pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
return;
}
pdev->driver_override = override;
- if (type == IPMI_DMI_TYPE_SSIF)
+ if (type == IPMI_DMI_TYPE_SSIF) {
+ set_prop_entry(p[pidx++], "i2c-addr", u16, base_addr);
goto add_properties;
+ }
memset(r, 0, sizeof(r));
@@ -152,12 +179,13 @@ err:
* This function allows an ACPI-specified IPMI device to look up the
* slave address from the DMI table.
*/
-int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr)
+int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags,
+ unsigned long base_addr)
{
struct ipmi_dmi_info *info = ipmi_dmi_infos;
while (info) {
- if (info->type == type &&
+ if (info->si_type == si_type &&
info->flags == flags &&
info->addr == base_addr)
return info->slave_addr;
@@ -240,7 +268,7 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm)
offset = 16;
break;
default:
- pr_err("ipmi:dmi: Invalid offset: 0");
+ pr_err("ipmi:dmi: Invalid offset: 0\n");
return;
}
}
diff --git a/drivers/char/ipmi/ipmi_dmi.h b/drivers/char/ipmi/ipmi_dmi.h
index ea990a8e3b09..6c21018e3668 100644
--- a/drivers/char/ipmi/ipmi_dmi.h
+++ b/drivers/char/ipmi/ipmi_dmi.h
@@ -3,11 +3,7 @@
* DMI defines for use by IPMI
*/
-#define IPMI_DMI_TYPE_KCS 0x01
-#define IPMI_DMI_TYPE_SMIC 0x02
-#define IPMI_DMI_TYPE_BT 0x03
-#define IPMI_DMI_TYPE_SSIF 0x04
-
#ifdef CONFIG_IPMI_DMI_DECODE
-int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr);
+int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags,
+ unsigned long base_addr);
#endif
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 810b138f5897..9de189db2cc3 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -46,6 +46,9 @@
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/uuid.h>
#define PFX "IPMI message handler: "
@@ -61,9 +64,77 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
static int initialized;
-#ifdef CONFIG_PROC_FS
+enum ipmi_panic_event_op {
+ IPMI_SEND_PANIC_EVENT_NONE,
+ IPMI_SEND_PANIC_EVENT,
+ IPMI_SEND_PANIC_EVENT_STRING
+};
+#ifdef CONFIG_IPMI_PANIC_STRING
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
+#elif defined(CONFIG_IPMI_PANIC_EVENT)
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
+#else
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
+#endif
+static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
+
+static int panic_op_write_handler(const char *val,
+ const struct kernel_param *kp)
+{
+ char valcp[16];
+ char *s;
+
+ strncpy(valcp, val, 16);
+ valcp[15] = '\0';
+
+ s = strstrip(valcp);
+
+ if (strcmp(s, "none") == 0)
+ ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
+ else if (strcmp(s, "event") == 0)
+ ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
+ else if (strcmp(s, "string") == 0)
+ ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
+{
+ switch (ipmi_send_panic_event) {
+ case IPMI_SEND_PANIC_EVENT_NONE:
+ strcpy(buffer, "none");
+ break;
+
+ case IPMI_SEND_PANIC_EVENT:
+ strcpy(buffer, "event");
+ break;
+
+ case IPMI_SEND_PANIC_EVENT_STRING:
+ strcpy(buffer, "string");
+ break;
+
+ default:
+ strcpy(buffer, "???");
+ break;
+ }
+
+ return strlen(buffer);
+}
+
+static const struct kernel_param_ops panic_op_ops = {
+ .set = panic_op_write_handler,
+ .get = panic_op_read_handler
+};
+module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
+MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
+
+
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static struct proc_dir_entry *proc_ipmi_root;
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
/* Remain in auto-maintenance mode for this amount of time (in ms). */
#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
@@ -90,6 +161,9 @@ static struct proc_dir_entry *proc_ipmi_root;
*/
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+/* How long should we cache dynamic device IDs? */
+#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
+
/*
* The main "user" data structure.
*/
@@ -169,10 +243,17 @@ struct seq_table {
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
+#define IPMI_MAX_CHANNELS 16
struct ipmi_channel {
unsigned char medium;
unsigned char protocol;
+};
+struct ipmi_channel_set {
+ struct ipmi_channel c[IPMI_MAX_CHANNELS];
+};
+
+struct ipmi_my_addrinfo {
/*
* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
* but may be changed by the user.
@@ -186,23 +267,38 @@ struct ipmi_channel {
unsigned char lun;
};
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
struct ipmi_proc_entry {
char *name;
struct ipmi_proc_entry *next;
};
#endif
+/*
+ * Note that the product id, manufacturer id, guid, and device id are
+ * immutable in this structure, so dyn_mutex is not required for
+ * accessing those. If those change on a BMC, a new BMC is allocated.
+ */
struct bmc_device {
struct platform_device pdev;
+ struct list_head intfs; /* Interfaces on this BMC. */
struct ipmi_device_id id;
- unsigned char guid[16];
- int guid_set;
- char name[16];
+ struct ipmi_device_id fetch_id;
+ int dyn_id_set;
+ unsigned long dyn_id_expiry;
+ struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
+ guid_t guid;
+ guid_t fetch_guid;
+ int dyn_guid_set;
struct kref usecount;
+ struct work_struct remove_work;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid);
+
/*
* Various statistics for IPMI, these index stats[] in the ipmi_smi
* structure.
@@ -308,7 +404,6 @@ enum ipmi_stat_indexes {
#define IPMI_IPMB_NUM_SEQ 64
-#define IPMI_MAX_CHANNELS 16
struct ipmi_smi {
/* What interface number are we? */
int intf_num;
@@ -327,15 +422,23 @@ struct ipmi_smi {
*/
struct list_head users;
- /* Information to supply to users. */
- unsigned char ipmi_version_major;
- unsigned char ipmi_version_minor;
-
/* Used for wake ups at startup. */
wait_queue_head_t waitq;
+ /*
+ * Prevents the interface from being unregistered when the
+ * interface is used by being looked up through the BMC
+ * structure.
+ */
+ struct mutex bmc_reg_mutex;
+
+ struct bmc_device tmp_bmc;
struct bmc_device *bmc;
+ bool bmc_registered;
+ struct list_head bmc_link;
char *my_dev_name;
+ bool in_bmc_register; /* Handle recursive situations. Yuck. */
+ struct work_struct bmc_reg_work;
/*
* This is the lower-layer's sender routine. Note that you
@@ -346,10 +449,13 @@ struct ipmi_smi {
const struct ipmi_smi_handlers *handlers;
void *send_info;
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
/* A list of proc entries for this interface. */
struct mutex proc_entry_lock;
struct ipmi_proc_entry *proc_entries;
+
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
#endif
/* Driver-model device for the system interface. */
@@ -421,6 +527,8 @@ struct ipmi_smi {
* interface comes in with a NULL user, call this routine with
* it. Note that the message will still be freed by the
* caller. This only works on the system interface.
+ *
+ * Protected by bmc_reg_mutex.
*/
void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
@@ -431,11 +539,11 @@ struct ipmi_smi {
int curr_channel;
/* Channel information */
- struct ipmi_channel channels[IPMI_MAX_CHANNELS];
-
- /* Proc FS stuff. */
- struct proc_dir_entry *proc_dir;
- char proc_dir_name[10];
+ struct ipmi_channel_set *channel_list;
+ unsigned int curr_working_cset; /* First index into the following. */
+ struct ipmi_channel_set wchannels[2];
+ struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
+ bool channels_ready;
atomic_t stats[IPMI_NUM_STATS];
@@ -448,6 +556,14 @@ struct ipmi_smi {
};
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
+static void __get_guid(ipmi_smi_t intf);
+static void __ipmi_bmc_unregister(ipmi_smi_t intf);
+static int __ipmi_bmc_register(ipmi_smi_t intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num);
+static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id);
+
+
/**
* The driver model view of the IPMI messaging driver.
*/
@@ -457,6 +573,9 @@ static struct platform_driver ipmidriver = {
.bus = &platform_bus_type
}
};
+/*
+ * This mutex keeps us from adding the same BMC twice.
+ */
static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
@@ -475,7 +594,7 @@ static DEFINE_MUTEX(smi_watchers_mutex);
static const char * const addr_src_to_str[] = {
"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
- "device-tree"
+ "device-tree", "platform"
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
@@ -1119,12 +1238,21 @@ int ipmi_destroy_user(ipmi_user_t user)
}
EXPORT_SYMBOL(ipmi_destroy_user);
-void ipmi_get_version(ipmi_user_t user,
- unsigned char *major,
- unsigned char *minor)
+int ipmi_get_version(ipmi_user_t user,
+ unsigned char *major,
+ unsigned char *minor)
{
- *major = user->intf->ipmi_version_major;
- *minor = user->intf->ipmi_version_minor;
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ *major = ipmi_version_major(&id);
+ *minor = ipmi_version_minor(&id);
+
+ return 0;
}
EXPORT_SYMBOL(ipmi_get_version);
@@ -1134,7 +1262,7 @@ int ipmi_set_my_address(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- user->intf->channels[channel].address = address;
+ user->intf->addrinfo[channel].address = address;
return 0;
}
EXPORT_SYMBOL(ipmi_set_my_address);
@@ -1145,7 +1273,7 @@ int ipmi_get_my_address(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- *address = user->intf->channels[channel].address;
+ *address = user->intf->addrinfo[channel].address;
return 0;
}
EXPORT_SYMBOL(ipmi_get_my_address);
@@ -1156,7 +1284,7 @@ int ipmi_set_my_LUN(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- user->intf->channels[channel].lun = LUN & 0x3;
+ user->intf->addrinfo[channel].lun = LUN & 0x3;
return 0;
}
EXPORT_SYMBOL(ipmi_set_my_LUN);
@@ -1167,7 +1295,7 @@ int ipmi_get_my_LUN(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- *address = user->intf->channels[channel].lun;
+ *address = user->intf->addrinfo[channel].lun;
return 0;
}
EXPORT_SYMBOL(ipmi_get_my_LUN);
@@ -1264,8 +1392,8 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val)
list_move_tail(&msg->link, &msgs);
intf->waiting_events_count = 0;
if (intf->event_msg_printed) {
- printk(KERN_WARNING PFX "Event queue no longer"
- " full\n");
+ dev_warn(intf->si_dev,
+ PFX "Event queue no longer full\n");
intf->event_msg_printed = 0;
}
@@ -1655,6 +1783,7 @@ static int i_ipmi_request(ipmi_user_t user,
unsigned char ipmb_seq;
long seqid;
int broadcast = 0;
+ struct ipmi_channel *chans;
if (addr->channel >= IPMI_MAX_CHANNELS) {
ipmi_inc_stat(intf, sent_invalid_commands);
@@ -1662,8 +1791,9 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
- if (intf->channels[addr->channel].medium
- != IPMI_CHANNEL_MEDIUM_IPMB) {
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
@@ -1785,6 +1915,7 @@ static int i_ipmi_request(ipmi_user_t user,
struct ipmi_lan_addr *lan_addr;
unsigned char ipmb_seq;
long seqid;
+ struct ipmi_channel *chans;
if (addr->channel >= IPMI_MAX_CHANNELS) {
ipmi_inc_stat(intf, sent_invalid_commands);
@@ -1792,9 +1923,11 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
- if ((intf->channels[addr->channel].medium
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if ((chans[addr->channel].medium
!= IPMI_CHANNEL_MEDIUM_8023LAN)
- && (intf->channels[addr->channel].medium
+ && (chans[addr->channel].medium
!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
@@ -1928,8 +2061,8 @@ static int check_addr(ipmi_smi_t intf,
{
if (addr->channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- *lun = intf->channels[addr->channel].lun;
- *saddr = intf->channels[addr->channel].address;
+ *lun = intf->addrinfo[addr->channel].lun;
+ *saddr = intf->addrinfo[addr->channel].address;
return 0;
}
@@ -1997,15 +2130,249 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
-#ifdef CONFIG_PROC_FS
+static void bmc_device_id_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+ int rv;
+
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
+ dev_warn(intf->si_dev,
+ PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
+ msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
+ return;
+ }
+
+ rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
+ msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
+ if (rv) {
+ dev_warn(intf->si_dev,
+ PFX "device id demangle failed: %d\n", rv);
+ intf->bmc->dyn_id_set = 0;
+ } else {
+ /*
+ * Make sure the id data is available before setting
+ * dyn_id_set.
+ */
+ smp_wmb();
+ intf->bmc->dyn_id_set = 1;
+ }
+
+ wake_up(&intf->waitq);
+}
+
+static int
+send_get_device_id_cmd(ipmi_smi_t intf)
+{
+ struct ipmi_system_interface_addr si;
+ struct kernel_ipmi_msg msg;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static int __get_device_id(ipmi_smi_t intf, struct bmc_device *bmc)
+{
+ int rv;
+
+ bmc->dyn_id_set = 2;
+
+ intf->null_user_handler = bmc_device_id_handler;
+
+ rv = send_get_device_id_cmd(intf);
+ if (rv)
+ return rv;
+
+ wait_event(intf->waitq, bmc->dyn_id_set != 2);
+
+ if (!bmc->dyn_id_set)
+ rv = -EIO; /* Something went wrong in the fetch. */
+
+ /* dyn_id_set makes the id data available. */
+ smp_rmb();
+
+ intf->null_user_handler = NULL;
+
+ return rv;
+}
+
+/*
+ * Fetch the device id for the bmc/interface. You must pass in either
+ * bmc or intf, this code will get the other one. If the data has
+ * been recently fetched, this will just use the cached data. Otherwise
+ * it will run a new fetch.
+ *
+ * Except for the first time this is called (in ipmi_register_smi()),
+ * this will always return good data;
+ */
+static int __bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid, int intf_num)
+{
+ int rv = 0;
+ int prev_dyn_id_set, prev_guid_set;
+ bool intf_set = intf != NULL;
+
+ if (!intf) {
+ mutex_lock(&bmc->dyn_mutex);
+retry_bmc_lock:
+ if (list_empty(&bmc->intfs)) {
+ mutex_unlock(&bmc->dyn_mutex);
+ return -ENOENT;
+ }
+ intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link);
+ kref_get(&intf->refcount);
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link)) {
+ mutex_unlock(&intf->bmc_reg_mutex);
+ kref_put(&intf->refcount, intf_free);
+ goto retry_bmc_lock;
+ }
+ } else {
+ mutex_lock(&intf->bmc_reg_mutex);
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ kref_get(&intf->refcount);
+ }
+
+ /* If we have a valid and current ID, just return that. */
+ if (intf->in_bmc_register ||
+ (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
+ goto out_noprocessing;
+
+ prev_guid_set = bmc->dyn_guid_set;
+ __get_guid(intf);
+
+ prev_dyn_id_set = bmc->dyn_id_set;
+ rv = __get_device_id(intf, bmc);
+ if (rv)
+ goto out;
+
+ /*
+ * The guid, device id, manufacturer id, and product id should
+ * not change on a BMC. If it does we have to do some dancing.
+ */
+ if (!intf->bmc_registered
+ || (!prev_guid_set && bmc->dyn_guid_set)
+ || (!prev_dyn_id_set && bmc->dyn_id_set)
+ || (prev_guid_set && bmc->dyn_guid_set
+ && !guid_equal(&bmc->guid, &bmc->fetch_guid))
+ || bmc->id.device_id != bmc->fetch_id.device_id
+ || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
+ || bmc->id.product_id != bmc->fetch_id.product_id) {
+ struct ipmi_device_id id = bmc->fetch_id;
+ int guid_set = bmc->dyn_guid_set;
+ guid_t guid;
+
+ guid = bmc->fetch_guid;
+ mutex_unlock(&bmc->dyn_mutex);
+
+ __ipmi_bmc_unregister(intf);
+ /* Fill in the temporary BMC for good measure. */
+ intf->bmc->id = id;
+ intf->bmc->dyn_guid_set = guid_set;
+ intf->bmc->guid = guid;
+ if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
+ need_waiter(intf); /* Retry later on an error. */
+ else
+ __scan_channels(intf, &id);
+
+
+ if (!intf_set) {
+ /*
+ * We weren't given the interface on the
+ * command line, so restart the operation on
+ * the next interface for the BMC.
+ */
+ mutex_unlock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ goto retry_bmc_lock;
+ }
+
+ /* We have a new BMC, set it up. */
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ goto out_noprocessing;
+ } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
+ /* Version info changes, scan the channels again. */
+ __scan_channels(intf, &bmc->fetch_id);
+
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+out:
+ if (rv && prev_dyn_id_set) {
+ rv = 0; /* Ignore failures if we have previous data. */
+ bmc->dyn_id_set = prev_dyn_id_set;
+ }
+ if (!rv) {
+ bmc->id = bmc->fetch_id;
+ if (bmc->dyn_guid_set)
+ bmc->guid = bmc->fetch_guid;
+ else if (prev_guid_set)
+ /*
+ * The guid used to be valid and it failed to fetch,
+ * just use the cached value.
+ */
+ bmc->dyn_guid_set = prev_guid_set;
+ }
+out_noprocessing:
+ if (!rv) {
+ if (id)
+ *id = bmc->id;
+
+ if (guid_set)
+ *guid_set = bmc->dyn_guid_set;
+
+ if (guid && bmc->dyn_guid_set)
+ *guid = bmc->guid;
+ }
+
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_unlock(&intf->bmc_reg_mutex);
+
+ kref_put(&intf->refcount, intf_free);
+ return rv;
+}
+
+static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid)
+{
+ return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
+}
+
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static int smi_ipmb_proc_show(struct seq_file *m, void *v)
{
ipmi_smi_t intf = m->private;
int i;
- seq_printf(m, "%x", intf->channels[0].address);
+ seq_printf(m, "%x", intf->addrinfo[0].address);
for (i = 1; i < IPMI_MAX_CHANNELS; i++)
- seq_printf(m, " %x", intf->channels[i].address);
+ seq_printf(m, " %x", intf->addrinfo[i].address);
seq_putc(m, '\n');
return 0;
@@ -2026,10 +2393,16 @@ static const struct file_operations smi_ipmb_proc_ops = {
static int smi_version_proc_show(struct seq_file *m, void *v)
{
ipmi_smi_t intf = m->private;
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(intf, NULL, &id, NULL, NULL);
+ if (rv)
+ return rv;
seq_printf(m, "%u.%u\n",
- ipmi_version_major(&intf->bmc->id),
- ipmi_version_minor(&intf->bmc->id));
+ ipmi_version_major(&id),
+ ipmi_version_minor(&id));
return 0;
}
@@ -2120,14 +2493,12 @@ static const struct file_operations smi_stats_proc_ops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif /* CONFIG_PROC_FS */
int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
const struct file_operations *proc_ops,
void *data)
{
int rv = 0;
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry *file;
struct ipmi_proc_entry *entry;
@@ -2153,7 +2524,6 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
smi->proc_entries = entry;
mutex_unlock(&smi->proc_entry_lock);
}
-#endif /* CONFIG_PROC_FS */
return rv;
}
@@ -2163,7 +2533,6 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
{
int rv = 0;
-#ifdef CONFIG_PROC_FS
sprintf(smi->proc_dir_name, "%d", num);
smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
if (!smi->proc_dir)
@@ -2183,14 +2552,12 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
rv = ipmi_smi_add_proc_entry(smi, "version",
&smi_version_proc_ops,
smi);
-#endif /* CONFIG_PROC_FS */
return rv;
}
static void remove_proc_entries(ipmi_smi_t smi)
{
-#ifdef CONFIG_PROC_FS
struct ipmi_proc_entry *entry;
mutex_lock(&smi->proc_entry_lock);
@@ -2204,66 +2571,22 @@ static void remove_proc_entries(ipmi_smi_t smi)
}
mutex_unlock(&smi->proc_entry_lock);
remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
-#endif /* CONFIG_PROC_FS */
-}
-
-static int __find_bmc_guid(struct device *dev, void *data)
-{
- unsigned char *id = data;
- struct bmc_device *bmc = to_bmc_device(dev);
- return memcmp(bmc->guid, id, 16) == 0;
-}
-
-static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
- unsigned char *guid)
-{
- struct device *dev;
-
- dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
- if (dev)
- return to_bmc_device(dev);
- else
- return NULL;
-}
-
-struct prod_dev_id {
- unsigned int product_id;
- unsigned char device_id;
-};
-
-static int __find_bmc_prod_dev_id(struct device *dev, void *data)
-{
- struct prod_dev_id *id = data;
- struct bmc_device *bmc = to_bmc_device(dev);
-
- return (bmc->id.product_id == id->product_id
- && bmc->id.device_id == id->device_id);
-}
-
-static struct bmc_device *ipmi_find_bmc_prod_dev_id(
- struct device_driver *drv,
- unsigned int product_id, unsigned char device_id)
-{
- struct prod_dev_id id = {
- .product_id = product_id,
- .device_id = device_id,
- };
- struct device *dev;
-
- dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
- if (dev)
- return to_bmc_device(dev);
- else
- return NULL;
}
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
- return snprintf(buf, 10, "%u\n", bmc->id.device_id);
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return snprintf(buf, 10, "%u\n", id.device_id);
}
static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
@@ -2272,9 +2595,14 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
- return snprintf(buf, 10, "%u\n",
- (bmc->id.device_revision & 0x80) >> 7);
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
}
static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
NULL);
@@ -2283,9 +2611,14 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 20, "%u\n",
- bmc->id.device_revision & 0x0F);
+ return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
}
static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
@@ -2294,9 +2627,15 @@ static ssize_t firmware_revision_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
- bmc->id.firmware_revision_2);
+ return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
+ id.firmware_revision_2);
}
static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
@@ -2305,10 +2644,16 @@ static ssize_t ipmi_version_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
return snprintf(buf, 20, "%u.%u\n",
- ipmi_version_major(&bmc->id),
- ipmi_version_minor(&bmc->id));
+ ipmi_version_major(&id),
+ ipmi_version_minor(&id));
}
static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
@@ -2317,9 +2662,14 @@ static ssize_t add_dev_support_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
- return snprintf(buf, 10, "0x%02x\n",
- bmc->id.additional_device_support);
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
}
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
@@ -2329,8 +2679,14 @@ static ssize_t manufacturer_id_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
+ return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
}
static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
@@ -2339,8 +2695,14 @@ static ssize_t product_id_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
+ return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
}
static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
@@ -2349,12 +2711,18 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
- bmc->id.aux_firmware_revision[3],
- bmc->id.aux_firmware_revision[2],
- bmc->id.aux_firmware_revision[1],
- bmc->id.aux_firmware_revision[0]);
+ id.aux_firmware_revision[3],
+ id.aux_firmware_revision[2],
+ id.aux_firmware_revision[1],
+ id.aux_firmware_revision[0]);
}
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
@@ -2362,10 +2730,17 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ bool guid_set;
+ guid_t guid;
+ int rv;
- return snprintf(buf, 100, "%Lx%Lx\n",
- (long long) bmc->guid[0],
- (long long) bmc->guid[8]);
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
+ if (rv)
+ return rv;
+ if (!guid_set)
+ return -ENOENT;
+
+ return snprintf(buf, 38, "%pUl\n", guid.b);
}
static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
@@ -2389,11 +2764,20 @@ static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct bmc_device *bmc = to_bmc_device(dev);
umode_t mode = attr->mode;
+ int rv;
- if (attr == &dev_attr_aux_firmware_revision.attr)
- return bmc->id.aux_firmware_revision_set ? mode : 0;
- if (attr == &dev_attr_guid.attr)
- return bmc->guid_set ? mode : 0;
+ if (attr == &dev_attr_aux_firmware_revision.attr) {
+ struct ipmi_device_id id;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ return (!rv && id.aux_firmware_revision_set) ? mode : 0;
+ }
+ if (attr == &dev_attr_guid.attr) {
+ bool guid_set;
+
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
+ return (!rv && guid_set) ? mode : 0;
+ }
return mode;
}
@@ -2411,127 +2795,239 @@ static const struct device_type bmc_device_type = {
.groups = bmc_dev_attr_groups,
};
+static int __find_bmc_guid(struct device *dev, void *data)
+{
+ guid_t *guid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+ guid_t *guid)
+{
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+struct prod_dev_id {
+ unsigned int product_id;
+ unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, void *data)
+{
+ struct prod_dev_id *cid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = (bmc->id.product_id == cid->product_id
+ && bmc->id.device_id == cid->device_id);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+ struct device_driver *drv,
+ unsigned int product_id, unsigned char device_id)
+{
+ struct prod_dev_id id = {
+ .product_id = product_id,
+ .device_id = device_id,
+ };
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+static DEFINE_IDA(ipmi_bmc_ida);
+
static void
release_bmc_device(struct device *dev)
{
kfree(to_bmc_device(dev));
}
+static void cleanup_bmc_work(struct work_struct *work)
+{
+ struct bmc_device *bmc = container_of(work, struct bmc_device,
+ remove_work);
+ int id = bmc->pdev.id; /* Unregister overwrites id */
+
+ platform_device_unregister(&bmc->pdev);
+ ida_simple_remove(&ipmi_bmc_ida, id);
+}
+
static void
cleanup_bmc_device(struct kref *ref)
{
struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
- platform_device_unregister(&bmc->pdev);
+ /*
+ * Remove the platform device in a work queue to avoid issues
+ * with removing the device attributes while reading a device
+ * attribute.
+ */
+ schedule_work(&bmc->remove_work);
}
-static void ipmi_bmc_unregister(ipmi_smi_t intf)
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static void __ipmi_bmc_unregister(ipmi_smi_t intf)
{
struct bmc_device *bmc = intf->bmc;
- sysfs_remove_link(&intf->si_dev->kobj, "bmc");
- if (intf->my_dev_name) {
- sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
- kfree(intf->my_dev_name);
- intf->my_dev_name = NULL;
- }
+ if (!intf->bmc_registered)
+ return;
- mutex_lock(&ipmidriver_mutex);
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+ sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
kref_put(&bmc->usecount, cleanup_bmc_device);
- intf->bmc = NULL;
- mutex_unlock(&ipmidriver_mutex);
+ intf->bmc_registered = false;
}
-static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
+static void ipmi_bmc_unregister(ipmi_smi_t intf)
+{
+ mutex_lock(&intf->bmc_reg_mutex);
+ __ipmi_bmc_unregister(intf);
+ mutex_unlock(&intf->bmc_reg_mutex);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static int __ipmi_bmc_register(ipmi_smi_t intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num)
{
int rv;
- struct bmc_device *bmc = intf->bmc;
+ struct bmc_device *bmc;
struct bmc_device *old_bmc;
- mutex_lock(&ipmidriver_mutex);
+ /*
+ * platform_device_register() can cause bmc_reg_mutex to
+ * be claimed because of the is_visible functions of
+ * the attributes. Eliminate possible recursion and
+ * release the lock.
+ */
+ intf->in_bmc_register = true;
+ mutex_unlock(&intf->bmc_reg_mutex);
/*
* Try to find if there is an bmc_device struct
* representing the interfaced BMC already
*/
- if (bmc->guid_set)
- old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
+ mutex_lock(&ipmidriver_mutex);
+ if (guid_set)
+ old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
else
old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
- bmc->id.product_id,
- bmc->id.device_id);
+ id->product_id,
+ id->device_id);
/*
* If there is already an bmc_device, free the new one,
* otherwise register the new BMC device
*/
if (old_bmc) {
- kfree(bmc);
- intf->bmc = old_bmc;
bmc = old_bmc;
+ /*
+ * Note: old_bmc already has usecount incremented by
+ * the BMC find functions.
+ */
+ intf->bmc = old_bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
- kref_get(&bmc->usecount);
- mutex_unlock(&ipmidriver_mutex);
-
- printk(KERN_INFO
- "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
- bmc->id.manufacturer_id,
- bmc->id.product_id,
- bmc->id.device_id);
+ dev_info(intf->si_dev,
+ "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
+ " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
} else {
- unsigned char orig_dev_id = bmc->id.device_id;
- int warn_printed = 0;
-
- snprintf(bmc->name, sizeof(bmc->name),
- "ipmi_bmc.%4.4x", bmc->id.product_id);
- bmc->pdev.name = bmc->name;
-
- while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
- bmc->id.product_id,
- bmc->id.device_id)) {
- if (!warn_printed) {
- printk(KERN_WARNING PFX
- "This machine has two different BMCs"
- " with the same product id and device"
- " id. This is an error in the"
- " firmware, but incrementing the"
- " device id to work around the problem."
- " Prod ID = 0x%x, Dev ID = 0x%x\n",
- bmc->id.product_id, bmc->id.device_id);
- warn_printed = 1;
- }
- bmc->id.device_id++; /* Wraps at 255 */
- if (bmc->id.device_id == orig_dev_id) {
- printk(KERN_ERR PFX
- "Out of device ids!\n");
- break;
- }
+ bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
+ if (!bmc) {
+ rv = -ENOMEM;
+ goto out;
}
+ INIT_LIST_HEAD(&bmc->intfs);
+ mutex_init(&bmc->dyn_mutex);
+ INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
+
+ bmc->id = *id;
+ bmc->dyn_id_set = 1;
+ bmc->dyn_guid_set = guid_set;
+ bmc->guid = *guid;
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+ bmc->pdev.name = "ipmi_bmc";
+ rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
+ if (rv < 0)
+ goto out;
bmc->pdev.dev.driver = &ipmidriver.driver;
- bmc->pdev.id = bmc->id.device_id;
+ bmc->pdev.id = rv;
bmc->pdev.dev.release = release_bmc_device;
bmc->pdev.dev.type = &bmc_device_type;
kref_init(&bmc->usecount);
+ intf->bmc = bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
+
rv = platform_device_register(&bmc->pdev);
- mutex_unlock(&ipmidriver_mutex);
if (rv) {
- put_device(&bmc->pdev.dev);
- printk(KERN_ERR
- "ipmi_msghandler:"
- " Unable to register bmc device: %d\n",
- rv);
- /*
- * Don't go to out_err, you can only do that if
- * the device is registered already.
- */
- return rv;
+ dev_err(intf->si_dev,
+ PFX " Unable to register bmc device: %d\n",
+ rv);
+ goto out_list_del;
}
- dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
- "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ dev_info(intf->si_dev,
+ "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
bmc->id.manufacturer_id,
bmc->id.product_id,
bmc->id.device_id);
@@ -2543,19 +3039,19 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
*/
rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
if (rv) {
- printk(KERN_ERR
- "ipmi_msghandler: Unable to create bmc symlink: %d\n",
- rv);
- goto out_err;
+ dev_err(intf->si_dev,
+ PFX "Unable to create bmc symlink: %d\n", rv);
+ goto out_put_bmc;
}
- intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum);
+ if (intf_num == -1)
+ intf_num = intf->intf_num;
+ intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
if (!intf->my_dev_name) {
rv = -ENOMEM;
- printk(KERN_ERR
- "ipmi_msghandler: allocate link from BMC: %d\n",
- rv);
- goto out_err;
+ dev_err(intf->si_dev,
+ PFX "Unable to allocate link from BMC: %d\n", rv);
+ goto out_unlink1;
}
rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
@@ -2563,18 +3059,42 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
if (rv) {
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
- printk(KERN_ERR
- "ipmi_msghandler:"
- " Unable to create symlink to bmc: %d\n",
- rv);
- goto out_err;
+ dev_err(intf->si_dev,
+ PFX "Unable to create symlink to bmc: %d\n", rv);
+ goto out_free_my_dev_name;
}
- return 0;
+ intf->bmc_registered = true;
-out_err:
- ipmi_bmc_unregister(intf);
+out:
+ mutex_unlock(&ipmidriver_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ intf->in_bmc_register = false;
return rv;
+
+
+out_free_my_dev_name:
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+out_unlink1:
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+
+out_put_bmc:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ kref_put(&bmc->usecount, cleanup_bmc_device);
+ goto out;
+
+out_list_del:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ put_device(&bmc->pdev.dev);
+ goto out;
}
static int
@@ -2600,14 +3120,15 @@ send_guid_cmd(ipmi_smi_t intf, int chan)
NULL,
NULL,
0,
- intf->channels[0].address,
- intf->channels[0].lun,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
-1, 0);
}
-static void
-guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+static void guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
{
+ struct bmc_device *bmc = intf->bmc;
+
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
|| (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
@@ -2616,38 +3137,46 @@ guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
if (msg->msg.data[0] != 0) {
/* Error from getting the GUID, the BMC doesn't have one. */
- intf->bmc->guid_set = 0;
+ bmc->dyn_guid_set = 0;
goto out;
}
if (msg->msg.data_len < 17) {
- intf->bmc->guid_set = 0;
- printk(KERN_WARNING PFX
- "guid_handler: The GUID response from the BMC was too"
- " short, it was %d but should have been 17. Assuming"
- " GUID is not available.\n",
- msg->msg.data_len);
+ bmc->dyn_guid_set = 0;
+ dev_warn(intf->si_dev,
+ PFX "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
+ msg->msg.data_len);
goto out;
}
- memcpy(intf->bmc->guid, msg->msg.data, 16);
- intf->bmc->guid_set = 1;
+ memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
+ /*
+ * Make sure the guid data is available before setting
+ * dyn_guid_set.
+ */
+ smp_wmb();
+ bmc->dyn_guid_set = 1;
out:
wake_up(&intf->waitq);
}
-static void
-get_guid(ipmi_smi_t intf)
+static void __get_guid(ipmi_smi_t intf)
{
int rv;
+ struct bmc_device *bmc = intf->bmc;
- intf->bmc->guid_set = 0x2;
+ bmc->dyn_guid_set = 2;
intf->null_user_handler = guid_handler;
rv = send_guid_cmd(intf, 0);
if (rv)
/* Send failed, no GUID available. */
- intf->bmc->guid_set = 0;
- wait_event(intf->waitq, intf->bmc->guid_set != 2);
+ bmc->dyn_guid_set = 0;
+
+ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+
+ /* dyn_guid_set makes the guid data available. */
+ smp_rmb();
+
intf->null_user_handler = NULL;
}
@@ -2676,8 +3205,8 @@ send_channel_info_cmd(ipmi_smi_t intf, int chan)
NULL,
NULL,
0,
- intf->channels[0].address,
- intf->channels[0].lun,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
-1, 0);
}
@@ -2685,7 +3214,9 @@ static void
channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
{
int rv = 0;
- int chan;
+ int ch;
+ unsigned int set = intf->curr_working_cset;
+ struct ipmi_channel *chans;
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
@@ -2701,12 +3232,13 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
* assume it has one IPMB at channel
* zero.
*/
- intf->channels[0].medium
+ intf->wchannels[set].c[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB;
- intf->channels[0].protocol
+ intf->wchannels[set].c[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB;
- intf->curr_channel = IPMI_MAX_CHANNELS;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
wake_up(&intf->waitq);
goto out;
}
@@ -2716,24 +3248,31 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
/* Message not big enough, just go on. */
goto next_channel;
}
- chan = intf->curr_channel;
- intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
- intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
+ ch = intf->curr_channel;
+ chans = intf->wchannels[set].c;
+ chans[ch].medium = msg->msg.data[2] & 0x7f;
+ chans[ch].protocol = msg->msg.data[3] & 0x1f;
next_channel:
intf->curr_channel++;
- if (intf->curr_channel >= IPMI_MAX_CHANNELS)
+ if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
wake_up(&intf->waitq);
- else
+ } else {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
+ }
if (rv) {
/* Got an error somehow, just give up. */
- printk(KERN_WARNING PFX
- "Error sending channel information for channel"
- " %d: %d\n", intf->curr_channel, rv);
+ dev_warn(intf->si_dev,
+ PFX "Error sending channel information for channel %d: %d\n",
+ intf->curr_channel, rv);
- intf->curr_channel = IPMI_MAX_CHANNELS;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
wake_up(&intf->waitq);
}
}
@@ -2741,6 +3280,53 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
return;
}
+/*
+ * Must be holding intf->bmc_reg_mutex to call this.
+ */
+static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id)
+{
+ int rv;
+
+ if (ipmi_version_major(id) > 1
+ || (ipmi_version_major(id) == 1
+ && ipmi_version_minor(id) >= 5)) {
+ unsigned int set;
+
+ /*
+ * Start scanning the channels to see what is
+ * available.
+ */
+ set = !intf->curr_working_cset;
+ intf->curr_working_cset = set;
+ memset(&intf->wchannels[set], 0,
+ sizeof(struct ipmi_channel_set));
+
+ intf->null_user_handler = channel_handler;
+ intf->curr_channel = 0;
+ rv = send_channel_info_cmd(intf, 0);
+ if (rv) {
+ dev_warn(intf->si_dev,
+ "Error sending channel information for channel 0, %d\n",
+ rv);
+ return -EIO;
+ }
+
+ /* Wait for the channel info to be read. */
+ wait_event(intf->waitq, intf->channels_ready);
+ intf->null_user_handler = NULL;
+ } else {
+ unsigned int set = intf->curr_working_cset;
+
+ /* Assume a single IPMB channel at zero. */
+ intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ }
+
+ return 0;
+}
+
static void ipmi_poll(ipmi_smi_t intf)
{
if (intf->handlers->poll)
@@ -2755,9 +3341,18 @@ void ipmi_poll_interface(ipmi_user_t user)
}
EXPORT_SYMBOL(ipmi_poll_interface);
+static void redo_bmc_reg(struct work_struct *work)
+{
+ ipmi_smi_t intf = container_of(work, struct ipmi_smi, bmc_reg_work);
+
+ if (!intf->in_shutdown)
+ bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
+
+ kref_put(&intf->refcount, intf_free);
+}
+
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
- struct ipmi_device_id *device_id,
struct device *si_dev,
unsigned char slave_addr)
{
@@ -2766,6 +3361,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
ipmi_smi_t intf;
ipmi_smi_t tintf;
struct list_head *link;
+ struct ipmi_device_id id;
/*
* Make sure the driver is actually initialized, this handles
@@ -2787,24 +3383,21 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
if (!intf)
return -ENOMEM;
- intf->ipmi_version_major = ipmi_version_major(device_id);
- intf->ipmi_version_minor = ipmi_version_minor(device_id);
-
- intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
- if (!intf->bmc) {
- kfree(intf);
- return -ENOMEM;
- }
+ intf->bmc = &intf->tmp_bmc;
+ INIT_LIST_HEAD(&intf->bmc->intfs);
+ mutex_init(&intf->bmc->dyn_mutex);
+ INIT_LIST_HEAD(&intf->bmc_link);
+ mutex_init(&intf->bmc_reg_mutex);
intf->intf_num = -1; /* Mark it invalid for now. */
kref_init(&intf->refcount);
- intf->bmc->id = *device_id;
+ INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
intf->si_dev = si_dev;
for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
- intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
- intf->channels[j].lun = 2;
+ intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
+ intf->addrinfo[j].lun = 2;
}
if (slave_addr != 0)
- intf->channels[0].address = slave_addr;
+ intf->addrinfo[0].address = slave_addr;
INIT_LIST_HEAD(&intf->users);
intf->handlers = handlers;
intf->send_info = send_info;
@@ -2814,7 +3407,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
intf->seq_table[j].seqid = 0;
}
intf->curr_seq = 0;
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
mutex_init(&intf->proc_entry_lock);
#endif
spin_lock_init(&intf->waiting_rcv_msgs_lock);
@@ -2838,7 +3431,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
intf->proc_dir = NULL;
+#endif
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
@@ -2862,45 +3457,29 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
if (rv)
goto out;
- get_guid(intf);
-
- if ((intf->ipmi_version_major > 1)
- || ((intf->ipmi_version_major == 1)
- && (intf->ipmi_version_minor >= 5))) {
- /*
- * Start scanning the channels to see what is
- * available.
- */
- intf->null_user_handler = channel_handler;
- intf->curr_channel = 0;
- rv = send_channel_info_cmd(intf, 0);
- if (rv) {
- printk(KERN_WARNING PFX
- "Error sending channel information for channel"
- " 0, %d\n", rv);
- goto out;
- }
-
- /* Wait for the channel info to be read. */
- wait_event(intf->waitq,
- intf->curr_channel >= IPMI_MAX_CHANNELS);
- intf->null_user_handler = NULL;
- } else {
- /* Assume a single IPMB channel at zero. */
- intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
- intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
- intf->curr_channel = IPMI_MAX_CHANNELS;
+ rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
+ if (rv) {
+ dev_err(si_dev, "Unable to get the device id: %d\n", rv);
+ goto out;
}
- rv = ipmi_bmc_register(intf, i);
+ mutex_lock(&intf->bmc_reg_mutex);
+ rv = __scan_channels(intf, &id);
+ mutex_unlock(&intf->bmc_reg_mutex);
+ if (rv)
+ goto out;
- if (rv == 0)
- rv = add_proc_entries(intf, i);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
+ rv = add_proc_entries(intf, i);
+#endif
out:
if (rv) {
+ ipmi_bmc_unregister(intf);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
if (intf->proc_dir)
remove_proc_entries(intf);
+#endif
intf->handlers = NULL;
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
@@ -3005,7 +3584,9 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
intf->handlers = NULL;
mutex_unlock(&ipmi_interfaces_mutex);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
remove_proc_entries(intf);
+#endif
ipmi_bmc_unregister(intf);
/*
@@ -3130,7 +3711,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
msg->data[3] = msg->rsp[6];
msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
- msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
+ msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
/* rqseq/lun */
msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
msg->data[8] = msg->rsp[8]; /* cmd */
@@ -3584,8 +4165,8 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
* There's too many things in the queue, discard this
* message.
*/
- printk(KERN_WARNING PFX "Event queue full, discarding"
- " incoming events\n");
+ dev_warn(intf->si_dev,
+ PFX "Event queue full, discarding incoming events\n");
intf->event_msg_printed = 1;
}
@@ -3603,11 +4184,8 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
recv_msg = (struct ipmi_recv_msg *) msg->user_data;
if (recv_msg == NULL) {
- printk(KERN_WARNING
- "IPMI message received with no owner. This\n"
- "could be because of a malformed message, or\n"
- "because of a hardware error. Contact your\n"
- "hardware vender for assistance\n");
+ dev_warn(intf->si_dev,
+ "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vender for assistance\n");
return 0;
}
@@ -3661,9 +4239,9 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
#endif
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- printk(KERN_WARNING PFX "BMC returned to small a message"
- " for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn(intf->si_dev,
+ PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
/* Generate an error response for the message. */
msg->rsp[0] = msg->data[0] | (1 << 2);
@@ -3676,10 +4254,10 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
* The NetFN and Command in the response is not even
* marginally correct.
*/
- printk(KERN_WARNING PFX "BMC returned incorrect response,"
- " expected netfn %x cmd %x, got netfn %x cmd %x\n",
- (msg->data[0] >> 2) | 1, msg->data[1],
- msg->rsp[0] >> 2, msg->rsp[1]);
+ dev_warn(intf->si_dev,
+ PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
/* Generate an error response for the message. */
msg->rsp[0] = msg->data[0] | (1 << 2);
@@ -3721,6 +4299,8 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
deliver_response(recv_msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
+ struct ipmi_channel *chans;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -3735,12 +4315,14 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
* equal to or greater than IPMI_MAX_CHANNELS when all the
* channels for this interface have been initialized.
*/
- if (intf->curr_channel < IPMI_MAX_CHANNELS) {
+ if (!intf->channels_ready) {
requeue = 0; /* Throw the message away */
goto out;
}
- switch (intf->channels[chan].medium) {
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ switch (chans[chan].medium) {
case IPMI_CHANNEL_MEDIUM_IPMB:
if (msg->rsp[4] & 0x04) {
/*
@@ -3777,9 +4359,8 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
default:
/* Check for OEM Channels. Clients had better
register for these commands. */
- if ((intf->channels[chan].medium
- >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
- && (intf->channels[chan].medium
+ if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+ && (chans[chan].medium
<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
requeue = handle_oem_get_msg_cmd(intf, msg);
} else {
@@ -3941,15 +4522,14 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
&& (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
&& (msg->rsp[2] != IPMI_BUS_ERR)
&& (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
- int chan = msg->rsp[3] & 0xf;
+ int ch = msg->rsp[3] & 0xf;
+ struct ipmi_channel *chans;
/* Got an error sending the message, handle it. */
- if (chan >= IPMI_MAX_CHANNELS)
- ; /* This shouldn't happen */
- else if ((intf->channels[chan].medium
- == IPMI_CHANNEL_MEDIUM_8023LAN)
- || (intf->channels[chan].medium
- == IPMI_CHANNEL_MEDIUM_ASYNC))
+
+ chans = READ_ONCE(intf->channel_list)->c;
+ if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+ || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
ipmi_inc_stat(intf, sent_lan_command_errs);
else
ipmi_inc_stat(intf, sent_ipmb_command_errs);
@@ -4030,7 +4610,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
}
static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
- struct list_head *timeouts, long timeout_period,
+ struct list_head *timeouts,
+ unsigned long timeout_period,
int slot, unsigned long *flags,
unsigned int *waiting_msgs)
{
@@ -4043,8 +4624,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
if (!ent->inuse)
return;
- ent->timeout -= timeout_period;
- if (ent->timeout > 0) {
+ if (timeout_period < ent->timeout) {
+ ent->timeout -= timeout_period;
(*waiting_msgs)++;
return;
}
@@ -4110,7 +4691,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
}
}
-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
+static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
+ unsigned long timeout_period)
{
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
@@ -4118,6 +4700,14 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
int i;
unsigned int waiting_msgs = 0;
+ if (!intf->bmc_registered) {
+ kref_get(&intf->refcount);
+ if (!schedule_work(&intf->bmc_reg_work)) {
+ kref_put(&intf->refcount, intf_free);
+ waiting_msgs++;
+ }
+ }
+
/*
* Go through the seq table and find any messages that
* have timed out, putting them in the timeouts
@@ -4269,8 +4859,6 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
-#ifdef CONFIG_IPMI_PANIC_EVENT
-
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
@@ -4306,8 +4894,8 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
&smi_msg,
&recv_msg,
0,
- intf->channels[0].address,
- intf->channels[0].lun,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
0, 1); /* Don't retry, and don't wait. */
if (rv)
atomic_sub(2, &panic_done_count);
@@ -4318,7 +4906,6 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
ipmi_poll(intf);
}
-#ifdef CONFIG_IPMI_PANIC_STRING
static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
{
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
@@ -4345,7 +4932,6 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
}
}
-#endif
static void send_panic_events(char *str)
{
@@ -4355,6 +4941,9 @@ static void send_panic_events(char *str)
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
+ if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
+ return;
+
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si->channel = IPMI_BMC_CHANNEL;
@@ -4383,20 +4972,19 @@ static void send_panic_events(char *str)
/* For every registered interface, send the event. */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (!intf->handlers)
- /* Interface is not ready. */
+ if (!intf->handlers || !intf->handlers->poll)
+ /* Interface is not ready or can't run at panic time. */
continue;
/* Send the event announcing the panic. */
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
-#ifdef CONFIG_IPMI_PANIC_STRING
/*
* On every interface, dump a bunch of OEM event holding the
* string.
*/
- if (!str)
+ if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
return;
/* For every registered interface, send the event. */
@@ -4456,7 +5044,7 @@ static void send_panic_events(char *str)
*/
if (((intf->event_receiver & 1) == 0)
&& (intf->event_receiver != 0)
- && (intf->event_receiver != intf->channels[0].address)) {
+ && (intf->event_receiver != intf->addrinfo[0].address)) {
/*
* The event receiver is valid, send an IPMB
* message.
@@ -4493,7 +5081,7 @@ static void send_panic_events(char *str)
data[0] = 0;
data[1] = 0;
data[2] = 0xf0; /* OEM event without timestamp. */
- data[3] = intf->channels[0].address;
+ data[3] = intf->addrinfo[0].address;
data[4] = j++; /* sequence # */
/*
* Always give 11 bytes, so strncpy will fill
@@ -4505,9 +5093,7 @@ static void send_panic_events(char *str)
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
-#endif /* CONFIG_IPMI_PANIC_STRING */
}
-#endif /* CONFIG_IPMI_PANIC_EVENT */
static int has_panicked;
@@ -4545,12 +5131,12 @@ static int panic_event(struct notifier_block *this,
spin_unlock(&intf->waiting_rcv_msgs_lock);
intf->run_to_completion = 1;
- intf->handlers->set_run_to_completion(intf->send_info, 1);
+ if (intf->handlers->set_run_to_completion)
+ intf->handlers->set_run_to_completion(intf->send_info,
+ 1);
}
-#ifdef CONFIG_IPMI_PANIC_EVENT
send_panic_events(ptr);
-#endif
return NOTIFY_DONE;
}
@@ -4570,22 +5156,21 @@ static int ipmi_init_msghandler(void)
rv = driver_register(&ipmidriver.driver);
if (rv) {
- printk(KERN_ERR PFX "Could not register IPMI driver\n");
+ pr_err(PFX "Could not register IPMI driver\n");
return rv;
}
- printk(KERN_INFO "ipmi message handler version "
- IPMI_DRIVER_VERSION "\n");
+ pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
proc_ipmi_root = proc_mkdir("ipmi", NULL);
if (!proc_ipmi_root) {
- printk(KERN_ERR PFX "Unable to create IPMI proc dir");
+ pr_err(PFX "Unable to create IPMI proc dir");
driver_unregister(&ipmidriver.driver);
return -ENOMEM;
}
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
setup_timer(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
@@ -4625,9 +5210,9 @@ static void __exit cleanup_ipmi(void)
atomic_inc(&stop_operation);
del_timer_sync(&ipmi_timer);
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
proc_remove(proc_ipmi_root);
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
driver_unregister(&ipmidriver.driver);
@@ -4636,12 +5221,10 @@ static void __exit cleanup_ipmi(void)
/* Check for buffer leaks. */
count = atomic_read(&smi_msg_inuse_count);
if (count != 0)
- printk(KERN_WARNING PFX "SMI message count %d at exit\n",
- count);
+ pr_warn(PFX "SMI message count %d at exit\n", count);
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
- printk(KERN_WARNING PFX "recv message count %d at exit\n",
- count);
+ pr_warn(PFX "recv message count %d at exit\n", count);
}
module_exit(cleanup_ipmi);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index b338a4becbf8..07fddbefefe4 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -23,7 +23,6 @@
struct ipmi_smi_powernv {
u64 interface_id;
- struct ipmi_device_id ipmi_id;
ipmi_smi_t intf;
unsigned int irq;
@@ -266,8 +265,7 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
}
/* todo: query actual ipmi_device_id */
- rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi,
- &ipmi->ipmi_id, dev, 0);
+ rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
if (rc) {
dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
goto err_free_msg;
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 9f2e3be2c5b8..38e6af1c8e38 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -66,7 +66,7 @@ static void (*specific_poweroff_func)(ipmi_user_t user);
/* Holds the old poweroff function so we can restore it on removal. */
static void (*old_poweroff_func)(void);
-static int set_param_ifnum(const char *val, struct kernel_param *kp)
+static int set_param_ifnum(const char *val, const struct kernel_param *kp)
{
int rv = param_set_int(val, kp);
if (rv)
@@ -133,7 +133,7 @@ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
complete(comp);
}
-static struct ipmi_user_hndl ipmi_poweroff_handler = {
+static const struct ipmi_user_hndl ipmi_poweroff_handler = {
.ipmi_recv_hndl = receive_handler
};
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
new file mode 100644
index 000000000000..17ce5f7b89ab
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -0,0 +1,49 @@
+/*
+ * ipmi_si.h
+ *
+ * Interface from the device-specific interfaces (OF, DMI, ACPI, PCI,
+ * etc) to the base ipmi system interface code.
+ */
+
+#include <linux/interrupt.h>
+#include "ipmi_si_sm.h"
+
+#define IPMI_IO_ADDR_SPACE 0
+#define IPMI_MEM_ADDR_SPACE 1
+
+#define DEFAULT_REGSPACING 1
+#define DEFAULT_REGSIZE 1
+
+#define DEVICE_NAME "ipmi_si"
+
+int ipmi_si_add_smi(struct si_sm_io *io);
+irqreturn_t ipmi_si_irq_handler(int irq, void *data);
+void ipmi_irq_start_cleanup(struct si_sm_io *io);
+int ipmi_std_irq_setup(struct si_sm_io *io);
+void ipmi_irq_finish_setup(struct si_sm_io *io);
+int ipmi_si_remove_by_dev(struct device *dev);
+void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr);
+int ipmi_si_hardcode_find_bmc(void);
+void ipmi_si_platform_init(void);
+void ipmi_si_platform_shutdown(void);
+
+extern struct platform_driver ipmi_platform_driver;
+
+#ifdef CONFIG_PCI
+void ipmi_si_pci_init(void);
+void ipmi_si_pci_shutdown(void);
+#else
+static inline void ipmi_si_pci_init(void) { }
+static inline void ipmi_si_pci_shutdown(void) { }
+#endif
+#ifdef CONFIG_PARISC
+void ipmi_si_parisc_init(void);
+void ipmi_si_parisc_shutdown(void);
+#else
+static inline void ipmi_si_parisc_init(void) { }
+static inline void ipmi_si_parisc_shutdown(void) { }
+#endif
+
+int ipmi_si_port_setup(struct si_sm_io *io);
+int ipmi_si_mem_setup(struct si_sm_io *io);
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
new file mode 100644
index 000000000000..fa9a4780de36
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -0,0 +1,146 @@
+
+#include <linux/moduleparam.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_hardcode: "
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
+ */
+
+#define SI_MAX_PARMS 4
+
+static char *si_type[SI_MAX_PARMS];
+#define MAX_SI_TYPE_STR 30
+static char si_type_str[MAX_SI_TYPE_STR];
+static unsigned long addrs[SI_MAX_PARMS];
+static unsigned int num_addrs;
+static unsigned int ports[SI_MAX_PARMS];
+static unsigned int num_ports;
+static int irqs[SI_MAX_PARMS];
+static unsigned int num_irqs;
+static int regspacings[SI_MAX_PARMS];
+static unsigned int num_regspacings;
+static int regsizes[SI_MAX_PARMS];
+static unsigned int num_regsizes;
+static int regshifts[SI_MAX_PARMS];
+static unsigned int num_regshifts;
+static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
+static unsigned int num_slave_addrs;
+
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type, "Defines the type of each interface, each"
+ " interface separated by commas. The types are 'kcs',"
+ " 'smic', and 'bt'. For example si_type=kcs,bt will set"
+ " the first interface to kcs and the second to bt");
+module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
+MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is in memory. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_hw_array(ports, uint, ioport, &num_ports, 0);
+MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is a port. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_hw_array(irqs, int, irq, &num_irqs, 0);
+MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " has an interrupt. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
+ " and each successive register used by the interface. For"
+ " instance, if the start address is 0xca2 and the spacing"
+ " is 2, then the second address is at 0xca4. Defaults"
+ " to 1.");
+module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
+ " This should generally be 1, 2, 4, or 8 for an 8-bit,"
+ " 16-bit, 32-bit, or 64-bit register. Use this if you"
+ " the 8-bit IPMI register has to be read from a larger"
+ " register.");
+module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
+ " IPMI register, in bits. For instance, if the data"
+ " is read from a 32-bit word and the IPMI data is in"
+ " bit 8-15, then the shift would be 8");
+module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
+ " the controller. Normally this is 0x20, but can be"
+ " overridden by this parm. This is an array indexed"
+ " by interface number.");
+
+int ipmi_si_hardcode_find_bmc(void)
+{
+ int ret = -ENODEV;
+ int i;
+ struct si_sm_io io;
+ char *str;
+
+ /* Parse out the si_type string into its components. */
+ str = si_type_str;
+ if (*str != '\0') {
+ for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
+ si_type[i] = str;
+ str = strchr(str, ',');
+ if (str) {
+ *str = '\0';
+ str++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ memset(&io, 0, sizeof(io));
+ for (i = 0; i < SI_MAX_PARMS; i++) {
+ if (!ports[i] && !addrs[i])
+ continue;
+
+ io.addr_source = SI_HARDCODED;
+ pr_info(PFX "probing via hardcoded address\n");
+
+ if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+ io.si_type = SI_KCS;
+ } else if (strcmp(si_type[i], "smic") == 0) {
+ io.si_type = SI_SMIC;
+ } else if (strcmp(si_type[i], "bt") == 0) {
+ io.si_type = SI_BT;
+ } else {
+ pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+ i, si_type[i]);
+ continue;
+ }
+
+ if (ports[i]) {
+ /* An I/O port */
+ io.addr_data = ports[i];
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else if (addrs[i]) {
+ /* A memory port */
+ io.addr_data = addrs[i];
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ } else {
+ pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+ i);
+ continue;
+ }
+
+ io.addr = NULL;
+ io.regspacing = regspacings[i];
+ if (!io.regspacing)
+ io.regspacing = DEFAULT_REGSPACING;
+ io.regsize = regsizes[i];
+ if (!io.regsize)
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = regshifts[i];
+ io.irq = irqs[i];
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+ io.slave_addr = slave_addrs[i];
+
+ ret = ipmi_si_add_smi(&io);
+ }
+ return ret;
+}
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
new file mode 100644
index 000000000000..fc03b9be2f3d
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -0,0 +1,242 @@
+/*
+ * ipmi_si_hotmod.c
+ *
+ * Handling for dynamically adding/removing IPMI devices through
+ * a module parameter (and thus sysfs).
+ */
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_hotmod: "
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
+ " Documentation/IPMI.txt in the kernel sources for the"
+ " gory details.");
+
+/*
+ * Parms come in as <op1>[:op2[:op3...]]. ops are:
+ * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ * rsp=<regspacing>
+ * rsi=<regsize>
+ * rsh=<regshift>
+ * irq=<irq>
+ * ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+ const char *name;
+ const int val;
+};
+
+static const struct hotmod_vals hotmod_ops[] = {
+ { "add", HM_ADD },
+ { "remove", HM_REMOVE },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_si[] = {
+ { "kcs", SI_KCS },
+ { "smic", SI_SMIC },
+ { "bt", SI_BT },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_as[] = {
+ { "mem", IPMI_MEM_ADDR_SPACE },
+ { "i/o", IPMI_IO_ADDR_SPACE },
+ { NULL }
+};
+
+static int parse_str(const struct hotmod_vals *v, int *val, char *name,
+ char **curr)
+{
+ char *s;
+ int i;
+
+ s = strchr(*curr, ',');
+ if (!s) {
+ pr_warn(PFX "No hotmod %s given.\n", name);
+ return -EINVAL;
+ }
+ *s = '\0';
+ s++;
+ for (i = 0; v[i].name; i++) {
+ if (strcmp(*curr, v[i].name) == 0) {
+ *val = v[i].val;
+ *curr = s;
+ return 0;
+ }
+ }
+
+ pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ return -EINVAL;
+}
+
+static int check_hotmod_int_op(const char *curr, const char *option,
+ const char *name, int *val)
+{
+ char *n;
+
+ if (strcmp(curr, name) == 0) {
+ if (!option) {
+ pr_warn(PFX "No option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ *val = simple_strtoul(option, &n, 0);
+ if ((*n != '\0') || (*option == '\0')) {
+ pr_warn(PFX "Bad option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp)
+{
+ char *str = kstrdup(val, GFP_KERNEL);
+ int rv;
+ char *next, *curr, *s, *n, *o;
+ enum hotmod_op op;
+ enum si_type si_type;
+ int addr_space;
+ unsigned long addr;
+ int regspacing;
+ int regsize;
+ int regshift;
+ int irq;
+ int ipmb;
+ int ival;
+ int len;
+
+ if (!str)
+ return -ENOMEM;
+
+ /* Kill any trailing spaces, as we can get a "\n" from echo. */
+ len = strlen(str);
+ ival = len - 1;
+ while ((ival >= 0) && isspace(str[ival])) {
+ str[ival] = '\0';
+ ival--;
+ }
+
+ for (curr = str; curr; curr = next) {
+ regspacing = 1;
+ regsize = 1;
+ regshift = 0;
+ irq = 0;
+ ipmb = 0; /* Choose the default if not specified */
+
+ next = strchr(curr, ':');
+ if (next) {
+ *next = '\0';
+ next++;
+ }
+
+ rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+ if (rv)
+ break;
+ op = ival;
+
+ rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+ if (rv)
+ break;
+ si_type = ival;
+
+ rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+ if (rv)
+ break;
+
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ addr = simple_strtoul(curr, &n, 0);
+ if ((*n != '\0') || (*curr == '\0')) {
+ pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
+ break;
+ }
+
+ while (s) {
+ curr = s;
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ o = strchr(curr, '=');
+ if (o) {
+ *o = '\0';
+ o++;
+ }
+ rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "irq", &irq);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+
+ rv = -EINVAL;
+ pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
+ goto out;
+ }
+
+ if (op == HM_ADD) {
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_HOTMOD;
+ io.si_type = si_type;
+ io.addr_data = addr;
+ io.addr_type = addr_space;
+
+ io.addr = NULL;
+ io.regspacing = regspacing;
+ if (!io.regspacing)
+ io.regspacing = DEFAULT_REGSPACING;
+ io.regsize = regsize;
+ if (!io.regsize)
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = regshift;
+ io.irq = irq;
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+ io.slave_addr = ipmb;
+
+ rv = ipmi_si_add_smi(&io);
+ if (rv)
+ goto out;
+ } else {
+ ipmi_si_remove_by_data(addr_space, si_type, addr);
+ }
+ }
+ rv = len;
+out:
+ kfree(str);
+ return rv;
+}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 36f47e8d06a3..71d33a1807e4 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -49,8 +49,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
@@ -59,22 +57,9 @@
#include <linux/rcupdate.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
-#include <asm/io.h>
-#include "ipmi_si_sm.h"
-#include "ipmi_dmi.h"
-#include <linux/dmi.h>
+#include "ipmi_si.h"
#include <linux/string.h>
#include <linux/ctype.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/acpi.h>
-
-#ifdef CONFIG_PARISC
-#include <asm/hardware.h> /* for register_parisc_driver() stuff */
-#include <asm/parisc-device.h>
-#endif
#define PFX "ipmi_si: "
@@ -104,15 +89,9 @@ enum si_intf_state {
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
-enum si_type {
- SI_KCS, SI_SMIC, SI_BT
-};
-
-static const char * const si_to_str[] = { "kcs", "smic", "bt" };
+static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
-#define DEVICE_NAME "ipmi_si"
-
-static struct platform_driver ipmi_driver;
+static int initialized;
/*
* Indexes into stats[] in smi_info below.
@@ -167,7 +146,6 @@ struct smi_info {
ipmi_smi_t intf;
struct si_sm_data *si_sm;
const struct si_sm_handlers *handlers;
- enum si_type si_type;
spinlock_t si_lock;
struct ipmi_smi_msg *waiting_msg;
struct ipmi_smi_msg *curr_msg;
@@ -178,14 +156,6 @@ struct smi_info {
* IPMI
*/
struct si_sm_io io;
- int (*io_setup)(struct smi_info *info);
- void (*io_cleanup)(struct smi_info *info);
- int (*irq_setup)(struct smi_info *info);
- void (*irq_cleanup)(struct smi_info *info);
- unsigned int io_size;
- enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
- void (*addr_source_cleanup)(struct smi_info *info);
- void *addr_source_data;
/*
* Per-OEM handler, called from handle_flags(). Returns 1
@@ -226,19 +196,6 @@ struct smi_info {
*/
bool run_to_completion;
- /* The I/O port of an SI interface. */
- int port;
-
- /*
- * The space between start addresses of the two ports. For
- * instance, if the first port is 0xca2 and the spacing is 4, then
- * the second port is 0xca6.
- */
- unsigned int spacing;
-
- /* zero if no irq; */
- int irq;
-
/* The timer for this si. */
struct timer_list si_timer;
@@ -289,26 +246,15 @@ struct smi_info {
/* From the get device id response... */
struct ipmi_device_id device_id;
- /* Driver model stuff. */
- struct device *dev;
+ /* Default driver model device. */
struct platform_device *pdev;
- /*
- * True if we allocated the device, false if it came from
- * someplace else (like PCI).
- */
- bool dev_registered;
-
- /* Slave address, could be reported from DMI. */
- unsigned char slave_addr;
-
/* Counters and things for the proc filesystem. */
atomic_t stats[SI_NUM_STATS];
struct task_struct *thread;
struct list_head link;
- union ipmi_smi_info_union addr_info;
};
#define smi_inc_stat(smi, stat) \
@@ -316,23 +262,15 @@ struct smi_info {
#define smi_get_stat(smi, stat) \
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
-#define SI_MAX_PARMS 4
-
-static int force_kipmid[SI_MAX_PARMS];
+#define IPMI_MAX_INTFS 4
+static int force_kipmid[IPMI_MAX_INTFS];
static int num_force_kipmid;
-#ifdef CONFIG_PCI
-static bool pci_registered;
-#endif
-#ifdef CONFIG_PARISC
-static bool parisc_registered;
-#endif
-static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
+static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
static int num_max_busy_us;
static bool unload_when_empty = true;
-static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
static void cleanup_ipmi_si(void);
@@ -499,7 +437,7 @@ static void start_getting_events(struct smi_info *smi_info)
*/
static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
{
- if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = true;
start_check_enables(smi_info, start_timer);
return true;
@@ -509,7 +447,7 @@ static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
static inline bool enable_si_irq(struct smi_info *smi_info)
{
- if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+ if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = false;
start_check_enables(smi_info, true);
return true;
@@ -585,13 +523,13 @@ static u8 current_global_enables(struct smi_info *smi_info, u8 base,
if (smi_info->supports_event_msg_buff)
enables |= IPMI_BMC_EVT_MSG_BUFF;
- if (((smi_info->irq && !smi_info->interrupt_disabled) ||
+ if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
smi_info->cannot_disable_irq) &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_RCV_MSG_INTR;
if (smi_info->supports_event_msg_buff &&
- smi_info->irq && !smi_info->interrupt_disabled &&
+ smi_info->io.irq && !smi_info->interrupt_disabled &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_EVT_MSG_INTR;
@@ -673,7 +611,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
@@ -765,15 +703,15 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Couldn't get irq info: %x.\n", msg[2]);
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->si_type == SI_BT)
+ if (smi_info->io.si_type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -803,7 +741,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
@@ -927,7 +865,7 @@ restart:
* asynchronously reset, and may thus get interrupts
* disable and messages disabled.
*/
- if (smi_info->supports_event_msg_buff || smi_info->irq) {
+ if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
start_check_enables(smi_info, true);
} else {
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
@@ -1153,8 +1091,6 @@ static void set_need_watch(void *send_info, bool enable)
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
-static int initialized;
-
static void smi_timeout(unsigned long data)
{
struct smi_info *smi_info = (struct smi_info *) data;
@@ -1172,7 +1108,7 @@ static void smi_timeout(unsigned long data)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
- if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
@@ -1199,11 +1135,17 @@ do_mod_timer:
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
-static irqreturn_t si_irq_handler(int irq, void *data)
+irqreturn_t ipmi_si_irq_handler(int irq, void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
+ if (smi_info->io.si_type == SI_BT)
+ /* We need to clear the IRQ flag for the BT interface. */
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_CLEAR_IRQ_BIT
+ | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_inc_stat(smi_info, interrupts);
@@ -1215,16 +1157,6 @@ static irqreturn_t si_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t si_bt_irq_handler(int irq, void *data)
-{
- struct smi_info *smi_info = data;
- /* We need to clear the IRQ flag for the BT interface. */
- smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
- IPMI_BT_INTMASK_CLEAR_IRQ_BIT
- | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
- return si_irq_handler(irq, data);
-}
-
static int smi_start_processing(void *send_info,
ipmi_smi_t intf)
{
@@ -1238,8 +1170,10 @@ static int smi_start_processing(void *send_info,
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
- if (new_smi->irq_setup)
- new_smi->irq_setup(new_smi);
+ if (new_smi->io.irq_setup) {
+ new_smi->io.irq_handler_data = new_smi;
+ new_smi->io.irq_setup(&new_smi->io);
+ }
/*
* Check if the user forcefully enabled the daemon.
@@ -1250,14 +1184,14 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
+ else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
enable = 1;
if (enable) {
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
if (IS_ERR(new_smi->thread)) {
- dev_notice(new_smi->dev, "Could not start"
+ dev_notice(new_smi->io.dev, "Could not start"
" kernel thread due to error %ld, only using"
" timers to drive the interface\n",
PTR_ERR(new_smi->thread));
@@ -1272,10 +1206,10 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
{
struct smi_info *smi = send_info;
- data->addr_src = smi->addr_source;
- data->dev = smi->dev;
- data->addr_info = smi->addr_info;
- get_device(smi->dev);
+ data->addr_src = smi->io.addr_source;
+ data->dev = smi->io.dev;
+ data->addr_info = smi->io.addr_info;
+ get_device(smi->io.dev);
return 0;
}
@@ -1301,118 +1235,12 @@ static const struct ipmi_smi_handlers handlers = {
.poll = poll,
};
-/*
- * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
- * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
- */
-
static LIST_HEAD(smi_infos);
static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
-#define DEFAULT_REGSPACING 1
-#define DEFAULT_REGSIZE 1
-
-#ifdef CONFIG_ACPI
-static bool si_tryacpi = true;
-#endif
-#ifdef CONFIG_DMI
-static bool si_trydmi = true;
-#endif
-static bool si_tryplatform = true;
-#ifdef CONFIG_PCI
-static bool si_trypci = true;
-#endif
-static char *si_type[SI_MAX_PARMS];
-#define MAX_SI_TYPE_STR 30
-static char si_type_str[MAX_SI_TYPE_STR];
-static unsigned long addrs[SI_MAX_PARMS];
-static unsigned int num_addrs;
-static unsigned int ports[SI_MAX_PARMS];
-static unsigned int num_ports;
-static int irqs[SI_MAX_PARMS];
-static unsigned int num_irqs;
-static int regspacings[SI_MAX_PARMS];
-static unsigned int num_regspacings;
-static int regsizes[SI_MAX_PARMS];
-static unsigned int num_regsizes;
-static int regshifts[SI_MAX_PARMS];
-static unsigned int num_regshifts;
-static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
-static unsigned int num_slave_addrs;
-
-#define IPMI_IO_ADDR_SPACE 0
-#define IPMI_MEM_ADDR_SPACE 1
static const char * const addr_space_to_str[] = { "i/o", "mem" };
-static int hotmod_handler(const char *val, struct kernel_param *kp);
-
-module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
-MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
- " Documentation/IPMI.txt in the kernel sources for the"
- " gory details.");
-
-#ifdef CONFIG_ACPI
-module_param_named(tryacpi, si_tryacpi, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
- " default scan of the interfaces identified via ACPI");
-#endif
-#ifdef CONFIG_DMI
-module_param_named(trydmi, si_trydmi, bool, 0);
-MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
- " default scan of the interfaces identified via DMI");
-#endif
-module_param_named(tryplatform, si_tryplatform, bool, 0);
-MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
- " default scan of the interfaces identified via platform"
- " interfaces like openfirmware");
-#ifdef CONFIG_PCI
-module_param_named(trypci, si_trypci, bool, 0);
-MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
- " default scan of the interfaces identified via pci");
-#endif
-module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
-MODULE_PARM_DESC(type, "Defines the type of each interface, each"
- " interface separated by commas. The types are 'kcs',"
- " 'smic', and 'bt'. For example si_type=kcs,bt will set"
- " the first interface to kcs and the second to bt");
-module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
-MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
- " addresses separated by commas. Only use if an interface"
- " is in memory. Otherwise, set it to zero or leave"
- " it blank.");
-module_param_hw_array(ports, uint, ioport, &num_ports, 0);
-MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
- " addresses separated by commas. Only use if an interface"
- " is a port. Otherwise, set it to zero or leave"
- " it blank.");
-module_param_hw_array(irqs, int, irq, &num_irqs, 0);
-MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
- " addresses separated by commas. Only use if an interface"
- " has an interrupt. Otherwise, set it to zero or leave"
- " it blank.");
-module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
-MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
- " and each successive register used by the interface. For"
- " instance, if the start address is 0xca2 and the spacing"
- " is 2, then the second address is at 0xca4. Defaults"
- " to 1.");
-module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
-MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
- " This should generally be 1, 2, 4, or 8 for an 8-bit,"
- " 16-bit, 32-bit, or 64-bit register. Use this if you"
- " the 8-bit IPMI register has to be read from a larger"
- " register.");
-module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
-MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
- " IPMI register, in bits. For instance, if the data"
- " is read from a 32-bit word and the IPMI data is in"
- " bit 8-15, then the shift would be 8");
-module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
-MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
- " the controller. Normally this is 0x20, but can be"
- " overridden by this parm. This is an array indexed"
- " by interface number.");
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects"
@@ -1427,1450 +1255,53 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
" sleeping. 0 (default) means to wait forever. Set to 100-500"
" if kipmid is using up a lot of CPU time.");
-
-static void std_irq_cleanup(struct smi_info *info)
-{
- if (info->si_type == SI_BT)
- /* Disable the interrupt in the BT interface. */
- info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
- free_irq(info->irq, info);
-}
-
-static int std_irq_setup(struct smi_info *info)
-{
- int rv;
-
- if (!info->irq)
- return 0;
-
- if (info->si_type == SI_BT) {
- rv = request_irq(info->irq,
- si_bt_irq_handler,
- IRQF_SHARED,
- DEVICE_NAME,
- info);
- if (!rv)
- /* Enable the interrupt in the BT interface. */
- info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
- IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
- } else
- rv = request_irq(info->irq,
- si_irq_handler,
- IRQF_SHARED,
- DEVICE_NAME,
- info);
- if (rv) {
- dev_warn(info->dev, "%s unable to claim interrupt %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
- info->irq = 0;
- } else {
- info->irq_cleanup = std_irq_cleanup;
- dev_info(info->dev, "Using irq %d\n", info->irq);
- }
-
- return rv;
-}
-
-static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
-{
- unsigned int addr = io->addr_data;
-
- return inb(addr + (offset * io->regspacing));
-}
-
-static void port_outb(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- unsigned int addr = io->addr_data;
-
- outb(b, addr + (offset * io->regspacing));
-}
-
-static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
-{
- unsigned int addr = io->addr_data;
-
- return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
-}
-
-static void port_outw(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- unsigned int addr = io->addr_data;
-
- outw(b << io->regshift, addr + (offset * io->regspacing));
-}
-
-static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
-{
- unsigned int addr = io->addr_data;
-
- return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
-}
-
-static void port_outl(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- unsigned int addr = io->addr_data;
-
- outl(b << io->regshift, addr+(offset * io->regspacing));
-}
-
-static void port_cleanup(struct smi_info *info)
-{
- unsigned int addr = info->io.addr_data;
- int idx;
-
- if (addr) {
- for (idx = 0; idx < info->io_size; idx++)
- release_region(addr + idx * info->io.regspacing,
- info->io.regsize);
- }
-}
-
-static int port_setup(struct smi_info *info)
-{
- unsigned int addr = info->io.addr_data;
- int idx;
-
- if (!addr)
- return -ENODEV;
-
- info->io_cleanup = port_cleanup;
-
- /*
- * Figure out the actual inb/inw/inl/etc routine to use based
- * upon the register size.
- */
- switch (info->io.regsize) {
- case 1:
- info->io.inputb = port_inb;
- info->io.outputb = port_outb;
- break;
- case 2:
- info->io.inputb = port_inw;
- info->io.outputb = port_outw;
- break;
- case 4:
- info->io.inputb = port_inl;
- info->io.outputb = port_outl;
- break;
- default:
- dev_warn(info->dev, "Invalid register size: %d\n",
- info->io.regsize);
- return -EINVAL;
- }
-
- /*
- * Some BIOSes reserve disjoint I/O regions in their ACPI
- * tables. This causes problems when trying to register the
- * entire I/O region. Therefore we must register each I/O
- * port separately.
- */
- for (idx = 0; idx < info->io_size; idx++) {
- if (request_region(addr + idx * info->io.regspacing,
- info->io.regsize, DEVICE_NAME) == NULL) {
- /* Undo allocations */
- while (idx--)
- release_region(addr + idx * info->io.regspacing,
- info->io.regsize);
- return -EIO;
- }
- }
- return 0;
-}
-
-static unsigned char intf_mem_inb(const struct si_sm_io *io,
- unsigned int offset)
-{
- return readb((io->addr)+(offset * io->regspacing));
-}
-
-static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writeb(b, (io->addr)+(offset * io->regspacing));
-}
-
-static unsigned char intf_mem_inw(const struct si_sm_io *io,
- unsigned int offset)
-{
- return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
- & 0xff;
-}
-
-static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
-}
-
-static unsigned char intf_mem_inl(const struct si_sm_io *io,
- unsigned int offset)
-{
- return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
- & 0xff;
-}
-
-static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
-}
-
-#ifdef readq
-static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
-{
- return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
- & 0xff;
-}
-
-static void mem_outq(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
-}
-#endif
-
-static void mem_region_cleanup(struct smi_info *info, int num)
-{
- unsigned long addr = info->io.addr_data;
- int idx;
-
- for (idx = 0; idx < num; idx++)
- release_mem_region(addr + idx * info->io.regspacing,
- info->io.regsize);
-}
-
-static void mem_cleanup(struct smi_info *info)
-{
- if (info->io.addr) {
- iounmap(info->io.addr);
- mem_region_cleanup(info, info->io_size);
- }
-}
-
-static int mem_setup(struct smi_info *info)
-{
- unsigned long addr = info->io.addr_data;
- int mapsize, idx;
-
- if (!addr)
- return -ENODEV;
-
- info->io_cleanup = mem_cleanup;
-
- /*
- * Figure out the actual readb/readw/readl/etc routine to use based
- * upon the register size.
- */
- switch (info->io.regsize) {
- case 1:
- info->io.inputb = intf_mem_inb;
- info->io.outputb = intf_mem_outb;
- break;
- case 2:
- info->io.inputb = intf_mem_inw;
- info->io.outputb = intf_mem_outw;
- break;
- case 4:
- info->io.inputb = intf_mem_inl;
- info->io.outputb = intf_mem_outl;
- break;
-#ifdef readq
- case 8:
- info->io.inputb = mem_inq;
- info->io.outputb = mem_outq;
- break;
-#endif
- default:
- dev_warn(info->dev, "Invalid register size: %d\n",
- info->io.regsize);
- return -EINVAL;
- }
-
- /*
- * Some BIOSes reserve disjoint memory regions in their ACPI
- * tables. This causes problems when trying to request the
- * entire region. Therefore we must request each register
- * separately.
- */
- for (idx = 0; idx < info->io_size; idx++) {
- if (request_mem_region(addr + idx * info->io.regspacing,
- info->io.regsize, DEVICE_NAME) == NULL) {
- /* Undo allocations */
- mem_region_cleanup(info, idx);
- return -EIO;
- }
- }
-
- /*
- * Calculate the total amount of memory to claim. This is an
- * unusual looking calculation, but it avoids claiming any
- * more memory than it has to. It will claim everything
- * between the first address to the end of the last full
- * register.
- */
- mapsize = ((info->io_size * info->io.regspacing)
- - (info->io.regspacing - info->io.regsize));
- info->io.addr = ioremap(addr, mapsize);
- if (info->io.addr == NULL) {
- mem_region_cleanup(info, info->io_size);
- return -EIO;
- }
- return 0;
-}
-
-/*
- * Parms come in as <op1>[:op2[:op3...]]. ops are:
- * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
- * Options are:
- * rsp=<regspacing>
- * rsi=<regsize>
- * rsh=<regshift>
- * irq=<irq>
- * ipmb=<ipmb addr>
- */
-enum hotmod_op { HM_ADD, HM_REMOVE };
-struct hotmod_vals {
- const char *name;
- const int val;
-};
-
-static const struct hotmod_vals hotmod_ops[] = {
- { "add", HM_ADD },
- { "remove", HM_REMOVE },
- { NULL }
-};
-
-static const struct hotmod_vals hotmod_si[] = {
- { "kcs", SI_KCS },
- { "smic", SI_SMIC },
- { "bt", SI_BT },
- { NULL }
-};
-
-static const struct hotmod_vals hotmod_as[] = {
- { "mem", IPMI_MEM_ADDR_SPACE },
- { "i/o", IPMI_IO_ADDR_SPACE },
- { NULL }
-};
-
-static int parse_str(const struct hotmod_vals *v, int *val, char *name,
- char **curr)
-{
- char *s;
- int i;
-
- s = strchr(*curr, ',');
- if (!s) {
- pr_warn(PFX "No hotmod %s given.\n", name);
- return -EINVAL;
- }
- *s = '\0';
- s++;
- for (i = 0; v[i].name; i++) {
- if (strcmp(*curr, v[i].name) == 0) {
- *val = v[i].val;
- *curr = s;
- return 0;
- }
- }
-
- pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
- return -EINVAL;
-}
-
-static int check_hotmod_int_op(const char *curr, const char *option,
- const char *name, int *val)
-{
- char *n;
-
- if (strcmp(curr, name) == 0) {
- if (!option) {
- pr_warn(PFX "No option given for '%s'\n", curr);
- return -EINVAL;
- }
- *val = simple_strtoul(option, &n, 0);
- if ((*n != '\0') || (*option == '\0')) {
- pr_warn(PFX "Bad option given for '%s'\n", curr);
- return -EINVAL;
- }
- return 1;
- }
- return 0;
-}
-
-static struct smi_info *smi_info_alloc(void)
-{
- struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
-
- if (info)
- spin_lock_init(&info->si_lock);
- return info;
-}
-
-static int hotmod_handler(const char *val, struct kernel_param *kp)
-{
- char *str = kstrdup(val, GFP_KERNEL);
- int rv;
- char *next, *curr, *s, *n, *o;
- enum hotmod_op op;
- enum si_type si_type;
- int addr_space;
- unsigned long addr;
- int regspacing;
- int regsize;
- int regshift;
- int irq;
- int ipmb;
- int ival;
- int len;
- struct smi_info *info;
-
- if (!str)
- return -ENOMEM;
-
- /* Kill any trailing spaces, as we can get a "\n" from echo. */
- len = strlen(str);
- ival = len - 1;
- while ((ival >= 0) && isspace(str[ival])) {
- str[ival] = '\0';
- ival--;
- }
-
- for (curr = str; curr; curr = next) {
- regspacing = 1;
- regsize = 1;
- regshift = 0;
- irq = 0;
- ipmb = 0; /* Choose the default if not specified */
-
- next = strchr(curr, ':');
- if (next) {
- *next = '\0';
- next++;
- }
-
- rv = parse_str(hotmod_ops, &ival, "operation", &curr);
- if (rv)
- break;
- op = ival;
-
- rv = parse_str(hotmod_si, &ival, "interface type", &curr);
- if (rv)
- break;
- si_type = ival;
-
- rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
- if (rv)
- break;
-
- s = strchr(curr, ',');
- if (s) {
- *s = '\0';
- s++;
- }
- addr = simple_strtoul(curr, &n, 0);
- if ((*n != '\0') || (*curr == '\0')) {
- pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
- break;
- }
-
- while (s) {
- curr = s;
- s = strchr(curr, ',');
- if (s) {
- *s = '\0';
- s++;
- }
- o = strchr(curr, '=');
- if (o) {
- *o = '\0';
- o++;
- }
- rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "irq", &irq);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
-
- rv = -EINVAL;
- pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
- goto out;
- }
-
- if (op == HM_ADD) {
- info = smi_info_alloc();
- if (!info) {
- rv = -ENOMEM;
- goto out;
- }
-
- info->addr_source = SI_HOTMOD;
- info->si_type = si_type;
- info->io.addr_data = addr;
- info->io.addr_type = addr_space;
- if (addr_space == IPMI_MEM_ADDR_SPACE)
- info->io_setup = mem_setup;
- else
- info->io_setup = port_setup;
-
- info->io.addr = NULL;
- info->io.regspacing = regspacing;
- if (!info->io.regspacing)
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = regsize;
- if (!info->io.regsize)
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = regshift;
- info->irq = irq;
- if (info->irq)
- info->irq_setup = std_irq_setup;
- info->slave_addr = ipmb;
-
- rv = add_smi(info);
- if (rv) {
- kfree(info);
- goto out;
- }
- mutex_lock(&smi_infos_lock);
- rv = try_smi_init(info);
- mutex_unlock(&smi_infos_lock);
- if (rv) {
- cleanup_one_si(info);
- goto out;
- }
- } else {
- /* remove */
- struct smi_info *e, *tmp_e;
-
- mutex_lock(&smi_infos_lock);
- list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
- if (e->io.addr_type != addr_space)
- continue;
- if (e->si_type != si_type)
- continue;
- if (e->io.addr_data == addr)
- cleanup_one_si(e);
- }
- mutex_unlock(&smi_infos_lock);
- }
- }
- rv = len;
-out:
- kfree(str);
- return rv;
-}
-
-static int hardcode_find_bmc(void)
-{
- int ret = -ENODEV;
- int i;
- struct smi_info *info;
-
- for (i = 0; i < SI_MAX_PARMS; i++) {
- if (!ports[i] && !addrs[i])
- continue;
-
- info = smi_info_alloc();
- if (!info)
- return -ENOMEM;
-
- info->addr_source = SI_HARDCODED;
- pr_info(PFX "probing via hardcoded address\n");
-
- if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
- info->si_type = SI_KCS;
- } else if (strcmp(si_type[i], "smic") == 0) {
- info->si_type = SI_SMIC;
- } else if (strcmp(si_type[i], "bt") == 0) {
- info->si_type = SI_BT;
- } else {
- pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
- i, si_type[i]);
- kfree(info);
- continue;
- }
-
- if (ports[i]) {
- /* An I/O port */
- info->io_setup = port_setup;
- info->io.addr_data = ports[i];
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else if (addrs[i]) {
- /* A memory port */
- info->io_setup = mem_setup;
- info->io.addr_data = addrs[i];
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- } else {
- pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
- i);
- kfree(info);
- continue;
- }
-
- info->io.addr = NULL;
- info->io.regspacing = regspacings[i];
- if (!info->io.regspacing)
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = regsizes[i];
- if (!info->io.regsize)
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = regshifts[i];
- info->irq = irqs[i];
- if (info->irq)
- info->irq_setup = std_irq_setup;
- info->slave_addr = slave_addrs[i];
-
- if (!add_smi(info)) {
- mutex_lock(&smi_infos_lock);
- if (try_smi_init(info))
- cleanup_one_si(info);
- mutex_unlock(&smi_infos_lock);
- ret = 0;
- } else {
- kfree(info);
- }
- }
- return ret;
-}
-
-#ifdef CONFIG_ACPI
-
-/*
- * Once we get an ACPI failure, we don't try any more, because we go
- * through the tables sequentially. Once we don't find a table, there
- * are no more.
- */
-static int acpi_failure;
-
-/* For GPE-type interrupts. */
-static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
- u32 gpe_number, void *context)
-{
- struct smi_info *smi_info = context;
- unsigned long flags;
-
- spin_lock_irqsave(&(smi_info->si_lock), flags);
-
- smi_inc_stat(smi_info, interrupts);
-
- debug_timestamp("ACPI_GPE");
-
- smi_event_handler(smi_info, 0);
- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
-
- return ACPI_INTERRUPT_HANDLED;
-}
-
-static void acpi_gpe_irq_cleanup(struct smi_info *info)
-{
- if (!info->irq)
- return;
-
- acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
-}
-
-static int acpi_gpe_irq_setup(struct smi_info *info)
-{
- acpi_status status;
-
- if (!info->irq)
- return 0;
-
- status = acpi_install_gpe_handler(NULL,
- info->irq,
- ACPI_GPE_LEVEL_TRIGGERED,
- &ipmi_acpi_gpe,
- info);
- if (status != AE_OK) {
- dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
- " running polled\n", DEVICE_NAME, info->irq);
- info->irq = 0;
- return -EINVAL;
- } else {
- info->irq_cleanup = acpi_gpe_irq_cleanup;
- dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
- return 0;
- }
-}
-
-/*
- * Defined at
- * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
- */
-struct SPMITable {
- s8 Signature[4];
- u32 Length;
- u8 Revision;
- u8 Checksum;
- s8 OEMID[6];
- s8 OEMTableID[8];
- s8 OEMRevision[4];
- s8 CreatorID[4];
- s8 CreatorRevision[4];
- u8 InterfaceType;
- u8 IPMIlegacy;
- s16 SpecificationRevision;
-
- /*
- * Bit 0 - SCI interrupt supported
- * Bit 1 - I/O APIC/SAPIC
- */
- u8 InterruptType;
-
- /*
- * If bit 0 of InterruptType is set, then this is the SCI
- * interrupt in the GPEx_STS register.
- */
- u8 GPE;
-
- s16 Reserved;
-
- /*
- * If bit 1 of InterruptType is set, then this is the I/O
- * APIC/SAPIC interrupt.
- */
- u32 GlobalSystemInterrupt;
-
- /* The actual register address. */
- struct acpi_generic_address addr;
-
- u8 UID[4];
-
- s8 spmi_id[1]; /* A '\0' terminated array starts here. */
-};
-
-static int try_init_spmi(struct SPMITable *spmi)
-{
- struct smi_info *info;
- int rv;
-
- if (spmi->IPMIlegacy != 1) {
- pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
- return -ENODEV;
- }
-
- info = smi_info_alloc();
- if (!info) {
- pr_err(PFX "Could not allocate SI data (3)\n");
- return -ENOMEM;
- }
-
- info->addr_source = SI_SPMI;
- pr_info(PFX "probing via SPMI\n");
-
- /* Figure out the interface type. */
- switch (spmi->InterfaceType) {
- case 1: /* KCS */
- info->si_type = SI_KCS;
- break;
- case 2: /* SMIC */
- info->si_type = SI_SMIC;
- break;
- case 3: /* BT */
- info->si_type = SI_BT;
- break;
- case 4: /* SSIF, just ignore */
- kfree(info);
- return -EIO;
- default:
- pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
- kfree(info);
- return -EIO;
- }
-
- if (spmi->InterruptType & 1) {
- /* We've got a GPE interrupt. */
- info->irq = spmi->GPE;
- info->irq_setup = acpi_gpe_irq_setup;
- } else if (spmi->InterruptType & 2) {
- /* We've got an APIC/SAPIC interrupt. */
- info->irq = spmi->GlobalSystemInterrupt;
- info->irq_setup = std_irq_setup;
- } else {
- /* Use the default interrupt setting. */
- info->irq = 0;
- info->irq_setup = NULL;
- }
-
- if (spmi->addr.bit_width) {
- /* A (hopefully) properly formed register bit width. */
- info->io.regspacing = spmi->addr.bit_width / 8;
- } else {
- info->io.regspacing = DEFAULT_REGSPACING;
- }
- info->io.regsize = info->io.regspacing;
- info->io.regshift = spmi->addr.bit_offset;
-
- if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- kfree(info);
- pr_warn(PFX "Unknown ACPI I/O Address type\n");
- return -EIO;
- }
- info->io.addr_data = spmi->addr.address;
-
- pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
-
- rv = add_smi(info);
- if (rv)
- kfree(info);
-
- return rv;
-}
-
-static void spmi_find_bmc(void)
-{
- acpi_status status;
- struct SPMITable *spmi;
- int i;
-
- if (acpi_disabled)
- return;
-
- if (acpi_failure)
- return;
-
- for (i = 0; ; i++) {
- status = acpi_get_table(ACPI_SIG_SPMI, i+1,
- (struct acpi_table_header **)&spmi);
- if (status != AE_OK)
- return;
-
- try_init_spmi(spmi);
- }
-}
-#endif
-
-#if defined(CONFIG_DMI) || defined(CONFIG_ACPI)
-struct resource *ipmi_get_info_from_resources(struct platform_device *pdev,
- struct smi_info *info)
-{
- struct resource *res, *res_second;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (res) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- }
- }
- if (!res) {
- dev_err(&pdev->dev, "no I/O or memory address\n");
- return NULL;
- }
- info->io.addr_data = res->start;
-
- info->io.regspacing = DEFAULT_REGSPACING;
- res_second = platform_get_resource(pdev,
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
- IORESOURCE_IO : IORESOURCE_MEM,
- 1);
- if (res_second) {
- if (res_second->start > info->io.addr_data)
- info->io.regspacing =
- res_second->start - info->io.addr_data;
- }
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = 0;
-
- return res;
-}
-
-#endif
-
-#ifdef CONFIG_DMI
-static int dmi_ipmi_probe(struct platform_device *pdev)
-{
- struct smi_info *info;
- u8 type, slave_addr;
- int rv;
-
- if (!si_trydmi)
- return -ENODEV;
-
- rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
- if (rv)
- return -ENODEV;
-
- info = smi_info_alloc();
- if (!info) {
- pr_err(PFX "Could not allocate SI data\n");
- return -ENOMEM;
- }
-
- info->addr_source = SI_SMBIOS;
- pr_info(PFX "probing via SMBIOS\n");
-
- switch (type) {
- case IPMI_DMI_TYPE_KCS:
- info->si_type = SI_KCS;
- break;
- case IPMI_DMI_TYPE_SMIC:
- info->si_type = SI_SMIC;
- break;
- case IPMI_DMI_TYPE_BT:
- info->si_type = SI_BT;
- break;
- default:
- kfree(info);
- return -EINVAL;
- }
-
- if (!ipmi_get_info_from_resources(pdev, info)) {
- rv = -EINVAL;
- goto err_free;
- }
-
- rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
- if (rv) {
- dev_warn(&pdev->dev, "device has no slave-addr property");
- info->slave_addr = 0x20;
- } else {
- info->slave_addr = slave_addr;
- }
-
- info->irq = platform_get_irq(pdev, 0);
- if (info->irq > 0)
- info->irq_setup = std_irq_setup;
- else
- info->irq = 0;
-
- info->dev = &pdev->dev;
-
- pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
-
- if (add_smi(info))
- kfree(info);
-
- return 0;
-
-err_free:
- kfree(info);
- return rv;
-}
-#else
-static int dmi_ipmi_probe(struct platform_device *pdev)
+void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- return -ENODEV;
+ if (io->si_type == SI_BT)
+ /* Enable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
}
-#endif /* CONFIG_DMI */
-
-#ifdef CONFIG_PCI
-#define PCI_ERMC_CLASSCODE 0x0C0700
-#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
-#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
-#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
-#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
-#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
-
-#define PCI_HP_VENDOR_ID 0x103C
-#define PCI_MMC_DEVICE_ID 0x121A
-#define PCI_MMC_ADDR_CW 0x10
-
-static void ipmi_pci_cleanup(struct smi_info *info)
+void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- struct pci_dev *pdev = info->addr_source_data;
-
- pci_disable_device(pdev);
+ if (io->si_type == SI_BT)
+ /* Disable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
-static int ipmi_pci_probe_regspacing(struct smi_info *info)
+static void std_irq_cleanup(struct si_sm_io *io)
{
- if (info->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = 0;
- info->io_size = 2;
- info->handlers = &kcs_smi_handlers;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- info->io.regspacing = regspacing;
- if (info->io_setup(info)) {
- dev_err(info->dev,
- "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- info->io.outputb(&info->io, 1, 0x10);
- /* read status back */
- status = info->io.inputb(&info->io, 1);
- info->io_cleanup(info);
- if (status)
- return regspacing;
- regspacing *= 4;
- }
- }
- return DEFAULT_REGSPACING;
+ ipmi_irq_start_cleanup(io);
+ free_irq(io->irq, io->irq_handler_data);
}
-static int ipmi_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int ipmi_std_irq_setup(struct si_sm_io *io)
{
int rv;
- int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
- struct smi_info *info;
-
- info = smi_info_alloc();
- if (!info)
- return -ENOMEM;
-
- info->addr_source = SI_PCI;
- dev_info(&pdev->dev, "probing via PCI");
-
- switch (class_type) {
- case PCI_ERMC_CLASSCODE_TYPE_SMIC:
- info->si_type = SI_SMIC;
- break;
-
- case PCI_ERMC_CLASSCODE_TYPE_KCS:
- info->si_type = SI_KCS;
- break;
-
- case PCI_ERMC_CLASSCODE_TYPE_BT:
- info->si_type = SI_BT;
- break;
-
- default:
- kfree(info);
- dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
- return -ENOMEM;
- }
-
- rv = pci_enable_device(pdev);
- if (rv) {
- dev_err(&pdev->dev, "couldn't enable PCI device\n");
- kfree(info);
- return rv;
- }
-
- info->addr_source_cleanup = ipmi_pci_cleanup;
- info->addr_source_data = pdev;
-
- if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- }
- info->io.addr_data = pci_resource_start(pdev, 0);
-
- info->io.regspacing = ipmi_pci_probe_regspacing(info);
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = 0;
-
- info->irq = pdev->irq;
- if (info->irq)
- info->irq_setup = std_irq_setup;
- info->dev = &pdev->dev;
- pci_set_drvdata(pdev, info);
-
- dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
- &pdev->resource[0], info->io.regsize, info->io.regspacing,
- info->irq);
+ if (!io->irq)
+ return 0;
- rv = add_smi(info);
+ rv = request_irq(io->irq,
+ ipmi_si_irq_handler,
+ IRQF_SHARED,
+ DEVICE_NAME,
+ io->irq_handler_data);
if (rv) {
- kfree(info);
- pci_disable_device(pdev);
- }
-
- return rv;
-}
-
-static void ipmi_pci_remove(struct pci_dev *pdev)
-{
- struct smi_info *info = pci_get_drvdata(pdev);
- cleanup_one_si(info);
-}
-
-static const struct pci_device_id ipmi_pci_devices[] = {
- { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
- { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
-
-static struct pci_driver ipmi_pci_driver = {
- .name = DEVICE_NAME,
- .id_table = ipmi_pci_devices,
- .probe = ipmi_pci_probe,
- .remove = ipmi_pci_remove,
-};
-#endif /* CONFIG_PCI */
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_ipmi_match);
-
-static int of_ipmi_probe(struct platform_device *dev)
-{
- const struct of_device_id *match;
- struct smi_info *info;
- struct resource resource;
- const __be32 *regsize, *regspacing, *regshift;
- struct device_node *np = dev->dev.of_node;
- int ret;
- int proplen;
-
- dev_info(&dev->dev, "probing via device tree\n");
-
- match = of_match_device(of_ipmi_match, &dev->dev);
- if (!match)
- return -ENODEV;
-
- if (!of_device_is_available(np))
- return -EINVAL;
-
- ret = of_address_to_resource(np, 0, &resource);
- if (ret) {
- dev_warn(&dev->dev, PFX "invalid address from OF\n");
- return ret;
- }
-
- regsize = of_get_property(np, "reg-size", &proplen);
- if (regsize && proplen != 4) {
- dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
- return -EINVAL;
- }
-
- regspacing = of_get_property(np, "reg-spacing", &proplen);
- if (regspacing && proplen != 4) {
- dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
- return -EINVAL;
- }
-
- regshift = of_get_property(np, "reg-shift", &proplen);
- if (regshift && proplen != 4) {
- dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
- return -EINVAL;
- }
-
- info = smi_info_alloc();
-
- if (!info) {
- dev_err(&dev->dev,
- "could not allocate memory for OF probe\n");
- return -ENOMEM;
- }
-
- info->si_type = (enum si_type) match->data;
- info->addr_source = SI_DEVICETREE;
- info->irq_setup = std_irq_setup;
-
- if (resource.flags & IORESOURCE_IO) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- }
-
- info->io.addr_data = resource.start;
-
- info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
- info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
- info->io.regshift = regshift ? be32_to_cpup(regshift) : 0;
-
- info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
- info->dev = &dev->dev;
-
- dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
-
- dev_set_drvdata(&dev->dev, info);
-
- ret = add_smi(info);
- if (ret) {
- kfree(info);
- return ret;
- }
- return 0;
-}
-#else
-#define of_ipmi_match NULL
-static int of_ipmi_probe(struct platform_device *dev)
-{
- return -ENODEV;
-}
-#endif
-
-#ifdef CONFIG_ACPI
-static int find_slave_address(struct smi_info *info, int slave_addr)
-{
-#ifdef CONFIG_IPMI_DMI_DECODE
- if (!slave_addr) {
- int type = -1;
- u32 flags = IORESOURCE_IO;
-
- switch (info->si_type) {
- case SI_KCS:
- type = IPMI_DMI_TYPE_KCS;
- break;
- case SI_BT:
- type = IPMI_DMI_TYPE_BT;
- break;
- case SI_SMIC:
- type = IPMI_DMI_TYPE_SMIC;
- break;
- }
-
- if (info->io.addr_type == IPMI_MEM_ADDR_SPACE)
- flags = IORESOURCE_MEM;
-
- slave_addr = ipmi_dmi_get_slave_addr(type, flags,
- info->io.addr_data);
- }
-#endif
-
- return slave_addr;
-}
-
-static int acpi_ipmi_probe(struct platform_device *dev)
-{
- struct smi_info *info;
- acpi_handle handle;
- acpi_status status;
- unsigned long long tmp;
- struct resource *res;
- int rv = -EINVAL;
-
- if (!si_tryacpi)
- return -ENODEV;
-
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle)
- return -ENODEV;
-
- info = smi_info_alloc();
- if (!info)
- return -ENOMEM;
-
- info->addr_source = SI_ACPI;
- dev_info(&dev->dev, PFX "probing via ACPI\n");
-
- info->addr_info.acpi_info.acpi_handle = handle;
-
- /* _IFT tells us the interface type: KCS, BT, etc */
- status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
- goto err_free;
- }
-
- switch (tmp) {
- case 1:
- info->si_type = SI_KCS;
- break;
- case 2:
- info->si_type = SI_SMIC;
- break;
- case 3:
- info->si_type = SI_BT;
- break;
- case 4: /* SSIF, just ignore */
- rv = -ENODEV;
- goto err_free;
- default:
- dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
- goto err_free;
- }
-
- res = ipmi_get_info_from_resources(dev, info);
- if (!res) {
- rv = -EINVAL;
- goto err_free;
- }
-
- /* If _GPE exists, use it; otherwise use standard interrupts */
- status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_SUCCESS(status)) {
- info->irq = tmp;
- info->irq_setup = acpi_gpe_irq_setup;
+ dev_warn(io->dev, "%s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, io->irq);
+ io->irq = 0;
} else {
- int irq = platform_get_irq(dev, 0);
-
- if (irq > 0) {
- info->irq = irq;
- info->irq_setup = std_irq_setup;
- }
+ io->irq_cleanup = std_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using irq %d\n", io->irq);
}
- info->slave_addr = find_slave_address(info, info->slave_addr);
-
- info->dev = &dev->dev;
- platform_set_drvdata(dev, info);
-
- dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
- res, info->io.regsize, info->io.regspacing,
- info->irq);
-
- rv = add_smi(info);
- if (rv)
- kfree(info);
-
- return rv;
-
-err_free:
- kfree(info);
return rv;
}
-static const struct acpi_device_id acpi_ipmi_match[] = {
- { "IPI0001", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
-#else
-static int acpi_ipmi_probe(struct platform_device *dev)
-{
- return -ENODEV;
-}
-#endif
-
-static int ipmi_probe(struct platform_device *dev)
-{
- if (of_ipmi_probe(dev) == 0)
- return 0;
-
- if (acpi_ipmi_probe(dev) == 0)
- return 0;
-
- return dmi_ipmi_probe(dev);
-}
-
-static int ipmi_remove(struct platform_device *dev)
-{
- struct smi_info *info = dev_get_drvdata(&dev->dev);
-
- cleanup_one_si(info);
- return 0;
-}
-
-static struct platform_driver ipmi_driver = {
- .driver = {
- .name = DEVICE_NAME,
- .of_match_table = of_ipmi_match,
- .acpi_match_table = ACPI_PTR(acpi_ipmi_match),
- },
- .probe = ipmi_probe,
- .remove = ipmi_remove,
-};
-
-#ifdef CONFIG_PARISC
-static int __init ipmi_parisc_probe(struct parisc_device *dev)
-{
- struct smi_info *info;
- int rv;
-
- info = smi_info_alloc();
-
- if (!info) {
- dev_err(&dev->dev,
- "could not allocate memory for PARISC probe\n");
- return -ENOMEM;
- }
-
- info->si_type = SI_KCS;
- info->addr_source = SI_DEVICETREE;
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- info->io.addr_data = dev->hpa.start;
- info->io.regsize = 1;
- info->io.regspacing = 1;
- info->io.regshift = 0;
- info->irq = 0; /* no interrupt */
- info->irq_setup = NULL;
- info->dev = &dev->dev;
-
- dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data);
-
- dev_set_drvdata(&dev->dev, info);
-
- rv = add_smi(info);
- if (rv) {
- kfree(info);
- return rv;
- }
-
- return 0;
-}
-
-static int __exit ipmi_parisc_remove(struct parisc_device *dev)
-{
- cleanup_one_si(dev_get_drvdata(&dev->dev));
- return 0;
-}
-
-static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
- { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
-
-static struct parisc_driver ipmi_parisc_driver __refdata = {
- .name = "ipmi",
- .id_table = ipmi_parisc_tbl,
- .probe = ipmi_parisc_probe,
- .remove = __exit_p(ipmi_parisc_remove),
-};
-#endif /* CONFIG_PARISC */
-
static int wait_for_msg_done(struct smi_info *smi_info)
{
enum si_sm_result smi_result;
@@ -2925,7 +1356,8 @@ static int try_get_dev_id(struct smi_info *smi_info)
resp, IPMI_MAX_MSG_LENGTH);
/* Check and record info from the get device id, in case we need it. */
- rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
+ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
+ resp + 2, resp_len - 2, &smi_info->device_id);
out:
kfree(resp);
@@ -2949,7 +1381,7 @@ static int get_global_enables(struct smi_info *smi_info, u8 *enables)
rv = wait_for_msg_done(smi_info);
if (rv) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Error getting response from get global enables command: %d\n",
rv);
goto out;
@@ -2962,7 +1394,7 @@ static int get_global_enables(struct smi_info *smi_info, u8 *enables)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Invalid return from get global enables command: %ld %x %x %x\n",
resp_len, resp[0], resp[1], resp[2]);
rv = -EINVAL;
@@ -2997,7 +1429,7 @@ static int set_global_enables(struct smi_info *smi_info, u8 enables)
rv = wait_for_msg_done(smi_info);
if (rv) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Error getting response from set global enables command: %d\n",
rv);
goto out;
@@ -3009,7 +1441,7 @@ static int set_global_enables(struct smi_info *smi_info, u8 enables)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Invalid return from set global enables command: %ld %x %x\n",
resp_len, resp[0], resp[1]);
rv = -EINVAL;
@@ -3045,7 +1477,7 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
}
if (rv < 0) {
- dev_err(smi_info->dev,
+ dev_err(smi_info->io.dev,
"Cannot check clearing the rcv irq: %d\n", rv);
return;
}
@@ -3055,7 +1487,7 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
* An error when setting the event buffer bit means
* clearing the bit is not supported.
*/
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
}
@@ -3071,7 +1503,7 @@ static void check_set_rcv_irq(struct smi_info *smi_info)
u8 enables = 0;
int rv;
- if (!smi_info->irq)
+ if (!smi_info->io.irq)
return;
rv = get_global_enables(smi_info, &enables);
@@ -3081,7 +1513,7 @@ static void check_set_rcv_irq(struct smi_info *smi_info)
}
if (rv < 0) {
- dev_err(smi_info->dev,
+ dev_err(smi_info->io.dev,
"Cannot check setting the rcv irq: %d\n", rv);
return;
}
@@ -3091,7 +1523,7 @@ static void check_set_rcv_irq(struct smi_info *smi_info)
* An error when setting the event buffer bit means
* setting the bit is not supported.
*/
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
smi_info->irq_enable_broken = true;
@@ -3173,11 +1605,12 @@ out:
return rv;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static int smi_type_proc_show(struct seq_file *m, void *v)
{
struct smi_info *smi = m->private;
- seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+ seq_printf(m, "%s\n", si_to_str[smi->io.si_type]);
return 0;
}
@@ -3199,7 +1632,7 @@ static int smi_si_stats_proc_show(struct seq_file *m, void *v)
struct smi_info *smi = m->private;
seq_printf(m, "interrupts_enabled: %d\n",
- smi->irq && !smi->interrupt_disabled);
+ smi->io.irq && !smi->interrupt_disabled);
seq_printf(m, "short_timeouts: %u\n",
smi_get_stat(smi, short_timeouts));
seq_printf(m, "long_timeouts: %u\n",
@@ -3243,14 +1676,14 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
seq_printf(m,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi->si_type],
+ si_to_str[smi->io.si_type],
addr_space_to_str[smi->io.addr_type],
smi->io.addr_data,
smi->io.regspacing,
smi->io.regsize,
smi->io.regshift,
- smi->irq,
- smi->slave_addr);
+ smi->io.irq,
+ smi->io.slave_addr);
return 0;
}
@@ -3266,6 +1699,93 @@ static const struct file_operations smi_params_proc_ops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
+
+#define IPMI_SI_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct smi_info *smi_info = dev_get_drvdata(dev); \
+ \
+ return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+ int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
+
+ return snprintf(buf, 10, "%d\n", enabled);
+}
+static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
+ ipmi_interrupts_enabled_show, NULL);
+
+IPMI_SI_ATTR(short_timeouts);
+IPMI_SI_ATTR(long_timeouts);
+IPMI_SI_ATTR(idles);
+IPMI_SI_ATTR(interrupts);
+IPMI_SI_ATTR(attentions);
+IPMI_SI_ATTR(flag_fetches);
+IPMI_SI_ATTR(hosed_count);
+IPMI_SI_ATTR(complete_transactions);
+IPMI_SI_ATTR(events);
+IPMI_SI_ATTR(watchdog_pretimeouts);
+IPMI_SI_ATTR(incoming_messages);
+
+static ssize_t ipmi_params_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return snprintf(buf, 200,
+ "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+ si_to_str[smi_info->io.si_type],
+ addr_space_to_str[smi_info->io.addr_type],
+ smi_info->io.addr_data,
+ smi_info->io.regspacing,
+ smi_info->io.regsize,
+ smi_info->io.regshift,
+ smi_info->io.irq,
+ smi_info->io.slave_addr);
+}
+static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
+
+static struct attribute *ipmi_si_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_interrupts_enabled.attr,
+ &dev_attr_short_timeouts.attr,
+ &dev_attr_long_timeouts.attr,
+ &dev_attr_idles.attr,
+ &dev_attr_interrupts.attr,
+ &dev_attr_attentions.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed_count.attr,
+ &dev_attr_complete_transactions.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_incoming_messages.attr,
+ &dev_attr_params.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_si_dev_attr_group = {
+ .attrs = ipmi_si_dev_attrs,
+};
/*
* oem_data_avail_to_receive_msg_avail
@@ -3388,7 +1908,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->si_type == SI_BT)
+ smi_info->io.si_type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -3424,7 +1944,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
del_timer_sync(&smi_info->si_timer);
}
-static int is_new_interface(struct smi_info *info)
+static struct smi_info *find_dup_si(struct smi_info *info)
{
struct smi_info *e;
@@ -3437,31 +1957,61 @@ static int is_new_interface(struct smi_info *info)
* slave address but SMBIOS does. Pick it up from
* any source that has it available.
*/
- if (info->slave_addr && !e->slave_addr)
- e->slave_addr = info->slave_addr;
- return 0;
+ if (info->io.slave_addr && !e->io.slave_addr)
+ e->io.slave_addr = info->io.slave_addr;
+ return e;
}
}
- return 1;
+ return NULL;
}
-static int add_smi(struct smi_info *new_smi)
+int ipmi_si_add_smi(struct si_sm_io *io)
{
int rv = 0;
+ struct smi_info *new_smi, *dup;
+
+ if (!io->io_setup) {
+ if (io->addr_type == IPMI_IO_ADDR_SPACE) {
+ io->io_setup = ipmi_si_port_setup;
+ } else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
+ io->io_setup = ipmi_si_mem_setup;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
+ if (!new_smi)
+ return -ENOMEM;
+ spin_lock_init(&new_smi->si_lock);
+
+ new_smi->io = *io;
mutex_lock(&smi_infos_lock);
- if (!is_new_interface(new_smi)) {
- pr_info(PFX "%s-specified %s state machine: duplicate\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type]);
- rv = -EBUSY;
- goto out_err;
+ dup = find_dup_si(new_smi);
+ if (dup) {
+ if (new_smi->io.addr_source == SI_ACPI &&
+ dup->io.addr_source == SI_SMBIOS) {
+ /* We prefer ACPI over SMBIOS. */
+ dev_info(dup->io.dev,
+ "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
+ si_to_str[new_smi->io.si_type]);
+ cleanup_one_si(dup);
+ } else {
+ dev_info(new_smi->io.dev,
+ "%s-specified %s state machine: duplicate\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
+ rv = -EBUSY;
+ kfree(new_smi);
+ goto out_err;
+ }
}
pr_info(PFX "Adding %s-specified %s state machine\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type]);
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
@@ -3470,6 +2020,14 @@ static int add_smi(struct smi_info *new_smi)
list_add_tail(&new_smi->link, &smi_infos);
+ if (initialized) {
+ rv = try_smi_init(new_smi);
+ if (rv) {
+ mutex_unlock(&smi_infos_lock);
+ cleanup_one_si(new_smi);
+ return rv;
+ }
+ }
out_err:
mutex_unlock(&smi_infos_lock);
return rv;
@@ -3487,13 +2045,13 @@ static int try_smi_init(struct smi_info *new_smi)
char *init_name = NULL;
pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type],
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type],
addr_space_to_str[new_smi->io.addr_type],
new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
+ new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->si_type) {
+ switch (new_smi->io.si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -3515,7 +2073,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->intf_num = smi_num;
/* Do this early so it's available for logs. */
- if (!new_smi->dev) {
+ if (!new_smi->io.dev) {
init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
new_smi->intf_num);
@@ -3529,33 +2087,33 @@ static int try_smi_init(struct smi_info *new_smi)
pr_err(PFX "Unable to allocate platform device\n");
goto out_err;
}
- new_smi->dev = &new_smi->pdev->dev;
- new_smi->dev->driver = &ipmi_driver.driver;
+ new_smi->io.dev = &new_smi->pdev->dev;
+ new_smi->io.dev->driver = &ipmi_platform_driver.driver;
/* Nulled by device_add() */
- new_smi->dev->init_name = init_name;
+ new_smi->io.dev->init_name = init_name;
}
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- pr_err(PFX "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
- new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
- &new_smi->io);
+ new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
+ &new_smi->io);
/* Now that we know the I/O size, we can set up the I/O. */
- rv = new_smi->io_setup(new_smi);
+ rv = new_smi->io.io_setup(&new_smi->io);
if (rv) {
- dev_err(new_smi->dev, "Could not set up I/O space\n");
+ dev_err(new_smi->io.dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
- if (new_smi->addr_source)
- dev_err(new_smi->dev, "Interface detection failed\n");
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3566,8 +2124,9 @@ static int try_smi_init(struct smi_info *new_smi)
*/
rv = try_get_dev_id(new_smi);
if (rv) {
- if (new_smi->addr_source)
- dev_err(new_smi->dev, "There appears to be no BMC at this location\n");
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "There appears to be no BMC at this location\n");
goto out_err;
}
@@ -3599,7 +2158,7 @@ static int try_smi_init(struct smi_info *new_smi)
* IRQ is defined to be set when non-zero. req_events will
* cause a global flags check that will enable interrupts.
*/
- if (new_smi->irq) {
+ if (new_smi->io.irq) {
new_smi->interrupt_disabled = false;
atomic_set(&new_smi->req_events, 1);
}
@@ -3607,30 +2166,40 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->pdev) {
rv = platform_device_add(new_smi->pdev);
if (rv) {
- dev_err(new_smi->dev,
+ dev_err(new_smi->io.dev,
"Unable to register system interface device: %d\n",
rv);
goto out_err;
}
- new_smi->dev_registered = true;
+ }
+
+ dev_set_drvdata(new_smi->io.dev, new_smi);
+ rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
+ if (rv) {
+ dev_err(new_smi->io.dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out_err_stop_timer;
}
rv = ipmi_register_smi(&handlers,
new_smi,
- &new_smi->device_id,
- new_smi->dev,
- new_smi->slave_addr);
+ new_smi->io.dev,
+ new_smi->io.slave_addr);
if (rv) {
- dev_err(new_smi->dev, "Unable to register device: error %d\n",
+ dev_err(new_smi->io.dev,
+ "Unable to register device: error %d\n",
rv);
- goto out_err_stop_timer;
+ goto out_err_remove_attrs;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
&smi_type_proc_ops,
new_smi);
if (rv) {
- dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ dev_err(new_smi->io.dev,
+ "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3638,7 +2207,8 @@ static int try_smi_init(struct smi_info *new_smi)
&smi_si_stats_proc_ops,
new_smi);
if (rv) {
- dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ dev_err(new_smi->io.dev,
+ "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3646,21 +2216,27 @@ static int try_smi_init(struct smi_info *new_smi)
&smi_params_proc_ops,
new_smi);
if (rv) {
- dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ dev_err(new_smi->io.dev,
+ "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
+#endif
/* Don't increment till we know we have succeeded. */
smi_num++;
- dev_info(new_smi->dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->si_type]);
+ dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->io.si_type]);
- WARN_ON(new_smi->dev->init_name != NULL);
+ WARN_ON(new_smi->io.dev->init_name != NULL);
kfree(init_name);
return 0;
+out_err_remove_attrs:
+ device_remove_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
+ dev_set_drvdata(new_smi->io.dev, NULL);
+
out_err_stop_timer:
wait_for_timer_and_thread(new_smi);
@@ -3673,9 +2249,9 @@ out_err:
ipmi_unregister_smi(intf);
}
- if (new_smi->irq_cleanup) {
- new_smi->irq_cleanup(new_smi);
- new_smi->irq_cleanup = NULL;
+ if (new_smi->io.irq_cleanup) {
+ new_smi->io.irq_cleanup(&new_smi->io);
+ new_smi->io.irq_cleanup = NULL;
}
/*
@@ -3691,22 +2267,20 @@ out_err:
kfree(new_smi->si_sm);
new_smi->si_sm = NULL;
}
- if (new_smi->addr_source_cleanup) {
- new_smi->addr_source_cleanup(new_smi);
- new_smi->addr_source_cleanup = NULL;
+ if (new_smi->io.addr_source_cleanup) {
+ new_smi->io.addr_source_cleanup(&new_smi->io);
+ new_smi->io.addr_source_cleanup = NULL;
}
- if (new_smi->io_cleanup) {
- new_smi->io_cleanup(new_smi);
- new_smi->io_cleanup = NULL;
+ if (new_smi->io.io_cleanup) {
+ new_smi->io.io_cleanup(&new_smi->io);
+ new_smi->io.io_cleanup = NULL;
}
- if (new_smi->dev_registered) {
+ if (new_smi->pdev) {
platform_device_unregister(new_smi->pdev);
- new_smi->dev_registered = false;
new_smi->pdev = NULL;
} else if (new_smi->pdev) {
platform_device_put(new_smi->pdev);
- new_smi->pdev = NULL;
}
kfree(init_name);
@@ -3716,97 +2290,57 @@ out_err:
static int init_ipmi_si(void)
{
- int i;
- char *str;
- int rv;
struct smi_info *e;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
- initialized = 1;
-
- if (si_tryplatform) {
- rv = platform_driver_register(&ipmi_driver);
- if (rv) {
- pr_err(PFX "Unable to register driver: %d\n", rv);
- return rv;
- }
- }
-
- /* Parse out the si_type string into its components. */
- str = si_type_str;
- if (*str != '\0') {
- for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
- si_type[i] = str;
- str = strchr(str, ',');
- if (str) {
- *str = '\0';
- str++;
- } else {
- break;
- }
- }
- }
pr_info("IPMI System Interface driver.\n");
/* If the user gave us a device, they presumably want us to use it */
- if (!hardcode_find_bmc())
- return 0;
+ if (!ipmi_si_hardcode_find_bmc())
+ goto do_scan;
-#ifdef CONFIG_PCI
- if (si_trypci) {
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- pr_err(PFX "Unable to register PCI driver: %d\n", rv);
- else
- pci_registered = true;
- }
-#endif
+ ipmi_si_platform_init();
-#ifdef CONFIG_ACPI
- if (si_tryacpi)
- spmi_find_bmc();
-#endif
+ ipmi_si_pci_init();
-#ifdef CONFIG_PARISC
- register_parisc_driver(&ipmi_parisc_driver);
- parisc_registered = true;
-#endif
+ ipmi_si_parisc_init();
/* We prefer devices with interrupts, but in the case of a machine
with multiple BMCs we assume that there will be several instances
of a given type so if we succeed in registering a type then also
try to register everything else of the same type */
-
+do_scan:
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
/* Try to register a device if it has an IRQ and we either
haven't successfully registered a device yet or this
device has the same type as one we successfully registered */
- if (e->irq && (!type || e->addr_source == type)) {
+ if (e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
- type = e->addr_source;
+ type = e->io.addr_source;
}
}
}
/* type will only have been set if we successfully registered an si */
- if (type) {
- mutex_unlock(&smi_infos_lock);
- return 0;
- }
+ if (type)
+ goto skip_fallback_noirq;
/* Fall back to the preferred device */
list_for_each_entry(e, &smi_infos, link) {
- if (!e->irq && (!type || e->addr_source == type)) {
+ if (!e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
- type = e->addr_source;
+ type = e->io.addr_source;
}
}
}
+
+skip_fallback_noirq:
+ initialized = 1;
mutex_unlock(&smi_infos_lock);
if (type)
@@ -3843,8 +2377,8 @@ static void cleanup_one_si(struct smi_info *to_clean)
}
}
- if (to_clean->dev)
- dev_set_drvdata(to_clean->dev, NULL);
+ device_remove_group(to_clean->io.dev, &ipmi_si_dev_attr_group);
+ dev_set_drvdata(to_clean->io.dev, NULL);
list_del(&to_clean->link);
@@ -3852,8 +2386,8 @@ static void cleanup_one_si(struct smi_info *to_clean)
* Make sure that interrupts, the timer and the thread are
* stopped and will not run again.
*/
- if (to_clean->irq_cleanup)
- to_clean->irq_cleanup(to_clean);
+ if (to_clean->io.irq_cleanup)
+ to_clean->io.irq_cleanup(&to_clean->io);
wait_for_timer_and_thread(to_clean);
/*
@@ -3865,7 +2399,8 @@ static void cleanup_one_si(struct smi_info *to_clean)
poll(to_clean);
schedule_timeout_uninterruptible(1);
}
- disable_si_irq(to_clean, false);
+ if (to_clean->handlers)
+ disable_si_irq(to_clean, false);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
schedule_timeout_uninterruptible(1);
@@ -3876,17 +2411,53 @@ static void cleanup_one_si(struct smi_info *to_clean)
kfree(to_clean->si_sm);
- if (to_clean->addr_source_cleanup)
- to_clean->addr_source_cleanup(to_clean);
- if (to_clean->io_cleanup)
- to_clean->io_cleanup(to_clean);
+ if (to_clean->io.addr_source_cleanup)
+ to_clean->io.addr_source_cleanup(&to_clean->io);
+ if (to_clean->io.io_cleanup)
+ to_clean->io.io_cleanup(&to_clean->io);
- if (to_clean->dev_registered)
+ if (to_clean->pdev)
platform_device_unregister(to_clean->pdev);
kfree(to_clean);
}
+int ipmi_si_remove_by_dev(struct device *dev)
+{
+ struct smi_info *e;
+ int rv = -ENOENT;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.dev == dev) {
+ cleanup_one_si(e);
+ rv = 0;
+ break;
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ return rv;
+}
+
+void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr)
+{
+ /* remove */
+ struct smi_info *e, *tmp_e;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+ if (e->io.addr_type != addr_space)
+ continue;
+ if (e->io.si_type != si_type)
+ continue;
+ if (e->io.addr_data == addr)
+ cleanup_one_si(e);
+ }
+ mutex_unlock(&smi_infos_lock);
+}
+
static void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
@@ -3894,16 +2465,11 @@ static void cleanup_ipmi_si(void)
if (!initialized)
return;
-#ifdef CONFIG_PCI
- if (pci_registered)
- pci_unregister_driver(&ipmi_pci_driver);
-#endif
-#ifdef CONFIG_PARISC
- if (parisc_registered)
- unregister_parisc_driver(&ipmi_parisc_driver);
-#endif
+ ipmi_si_pci_shutdown();
+
+ ipmi_si_parisc_shutdown();
- platform_driver_unregister(&ipmi_driver);
+ ipmi_si_platform_shutdown();
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
new file mode 100644
index 000000000000..8796396ecd0f
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
@@ -0,0 +1,144 @@
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char intf_mem_inb(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inl(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
+{
+ return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void mem_outq(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_region_cleanup(struct si_sm_io *io, int num)
+{
+ unsigned long addr = io->addr_data;
+ int idx;
+
+ for (idx = 0; idx < num; idx++)
+ release_mem_region(addr + idx * io->regspacing,
+ io->regsize);
+}
+
+static void mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr) {
+ iounmap(io->addr);
+ mem_region_cleanup(io, io->io_size);
+ }
+}
+
+int ipmi_si_mem_setup(struct si_sm_io *io)
+{
+ unsigned long addr = io->addr_data;
+ int mapsize, idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ io->io_cleanup = mem_cleanup;
+
+ /*
+ * Figure out the actual readb/readw/readl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = intf_mem_inb;
+ io->outputb = intf_mem_outb;
+ break;
+ case 2:
+ io->inputb = intf_mem_inw;
+ io->outputb = intf_mem_outw;
+ break;
+ case 4:
+ io->inputb = intf_mem_inl;
+ io->outputb = intf_mem_outl;
+ break;
+#ifdef readq
+ case 8:
+ io->inputb = mem_inq;
+ io->outputb = mem_outq;
+ break;
+#endif
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint memory regions in their ACPI
+ * tables. This causes problems when trying to request the
+ * entire region. Therefore we must request each register
+ * separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_mem_region(addr + idx * io->regspacing,
+ io->regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ mem_region_cleanup(io, idx);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Calculate the total amount of memory to claim. This is an
+ * unusual looking calculation, but it avoids claiming any
+ * more memory than it has to. It will claim everything
+ * between the first address to the end of the last full
+ * register.
+ */
+ mapsize = ((io->io_size * io->regspacing)
+ - (io->regspacing - io->regsize));
+ io->addr = ioremap(addr, mapsize);
+ if (io->addr == NULL) {
+ mem_region_cleanup(io, io->io_size);
+ return -EIO;
+ }
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
new file mode 100644
index 000000000000..090b073ab441
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -0,0 +1,58 @@
+
+#include <linux/module.h>
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
+#include "ipmi_si.h"
+
+static bool parisc_registered;
+
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
+{
+ struct si_sm_io io;
+
+ io.si_type = SI_KCS;
+ io.addr_source = SI_DEVICETREE;
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ io.addr_data = dev->hpa.start;
+ io.regsize = 1;
+ io.regspacing = 1;
+ io.regshift = 0;
+ io.irq = 0; /* no interrupt */
+ io.irq_setup = NULL;
+ io.dev = &dev->dev;
+
+ dev_dbg(&dev->dev, "addr 0x%lx\n", io.addr_data);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static int __exit ipmi_parisc_remove(struct parisc_device *dev)
+{
+ return ipmi_si_remove_by_dev(&dev->dev);
+}
+
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
+ { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
+ .name = "ipmi",
+ .id_table = ipmi_parisc_tbl,
+ .probe = ipmi_parisc_probe,
+ .remove = __exit_p(ipmi_parisc_remove),
+};
+
+void ipmi_si_parisc_init(void)
+{
+ register_parisc_driver(&ipmi_parisc_driver);
+ parisc_registered = true;
+}
+
+void ipmi_si_parisc_shutdown(void)
+{
+ if (parisc_registered)
+ unregister_parisc_driver(&ipmi_parisc_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
new file mode 100644
index 000000000000..99771f5cad07
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -0,0 +1,166 @@
+/*
+ * ipmi_si_pci.c
+ *
+ * Handling for IPMI devices on the PCI bus.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_pci: "
+
+static bool pci_registered;
+
+static bool si_trypci = true;
+
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via pci");
+
+#define PCI_ERMC_CLASSCODE 0x0C0700
+#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
+#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
+#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
+#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
+#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
+
+#define PCI_HP_VENDOR_ID 0x103C
+#define PCI_MMC_DEVICE_ID 0x121A
+#define PCI_MMC_ADDR_CW 0x10
+
+static void ipmi_pci_cleanup(struct si_sm_io *io)
+{
+ struct pci_dev *pdev = io->addr_source_data;
+
+ pci_disable_device(pdev);
+}
+
+static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
+{
+ if (io->si_type == SI_KCS) {
+ unsigned char status;
+ int regspacing;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev,
+ "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
+ }
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
+ }
+ }
+ return DEFAULT_REGSPACING;
+}
+
+static int ipmi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv;
+ int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
+
+ switch (class_type) {
+ case PCI_ERMC_CLASSCODE_TYPE_SMIC:
+ io.si_type = SI_SMIC;
+ break;
+
+ case PCI_ERMC_CLASSCODE_TYPE_KCS:
+ io.si_type = SI_KCS;
+ break;
+
+ case PCI_ERMC_CLASSCODE_TYPE_BT:
+ io.si_type = SI_BT;
+ break;
+
+ default:
+ dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
+ return -ENOMEM;
+ }
+
+ rv = pci_enable_device(pdev);
+ if (rv) {
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
+ return rv;
+ }
+
+ io.addr_source_cleanup = ipmi_pci_cleanup;
+ io.addr_source_data = pdev;
+
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO)
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ else
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ io.addr_data = pci_resource_start(pdev, 0);
+
+ io.regspacing = ipmi_pci_probe_regspacing(&io);
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = 0;
+
+ io.irq = pdev->irq;
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+
+ io.dev = &pdev->dev;
+
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], io.regsize, io.regspacing, io.irq);
+
+ rv = ipmi_si_add_smi(&io);
+ if (rv)
+ pci_disable_device(pdev);
+
+ return rv;
+}
+
+static void ipmi_pci_remove(struct pci_dev *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+static const struct pci_device_id ipmi_pci_devices[] = {
+ { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
+ { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+ .name = DEVICE_NAME,
+ .id_table = ipmi_pci_devices,
+ .probe = ipmi_pci_probe,
+ .remove = ipmi_pci_remove,
+};
+
+void ipmi_si_pci_init(void)
+{
+ if (si_trypci) {
+ int rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ pr_err(PFX "Unable to register PCI driver: %d\n", rv);
+ else
+ pci_registered = true;
+ }
+}
+
+void ipmi_si_pci_shutdown(void)
+{
+ if (pci_registered)
+ pci_unregister_driver(&ipmi_pci_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
new file mode 100644
index 000000000000..9573f1116450
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -0,0 +1,593 @@
+/*
+ * ipmi_si_platform.c
+ *
+ * Handling for platform devices in IPMI (ACPI, OF, and things
+ * coming from the platform.
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+#include "ipmi_si.h"
+#include "ipmi_dmi.h"
+
+#define PFX "ipmi_platform: "
+
+static bool si_tryplatform = true;
+#ifdef CONFIG_ACPI
+static bool si_tryacpi = true;
+#endif
+#ifdef CONFIG_OF
+static bool si_tryopenfirmware = true;
+#endif
+#ifdef CONFIG_DMI
+static bool si_trydmi = true;
+#else
+static bool si_trydmi = false;
+#endif
+
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via platform"
+ " interfaces besides ACPI, OpenFirmware, and DMI");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_OF
+module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via OpenFirmware");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via DMI");
+#endif
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially. Once we don't find a table, there
+ * are no more.
+ */
+static int acpi_failure;
+
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+ u32 gpe_number, void *context)
+{
+ struct si_sm_io *io = context;
+
+ ipmi_si_irq_handler(io->irq, io->irq_handler_data);
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static void acpi_gpe_irq_cleanup(struct si_sm_io *io)
+{
+ if (!io->irq)
+ return;
+
+ ipmi_irq_start_cleanup(io);
+ acpi_remove_gpe_handler(NULL, io->irq, &ipmi_acpi_gpe);
+}
+
+static int acpi_gpe_irq_setup(struct si_sm_io *io)
+{
+ acpi_status status;
+
+ if (!io->irq)
+ return 0;
+
+ status = acpi_install_gpe_handler(NULL,
+ io->irq,
+ ACPI_GPE_LEVEL_TRIGGERED,
+ &ipmi_acpi_gpe,
+ io);
+ if (status != AE_OK) {
+ dev_warn(io->dev,
+ "Unable to claim ACPI GPE %d, running polled\n",
+ io->irq);
+ io->irq = 0;
+ return -EINVAL;
+ } else {
+ io->irq_cleanup = acpi_gpe_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using ACPI GPE %d\n", io->irq);
+ return 0;
+ }
+}
+
+/*
+ * Defined at
+ * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
+ */
+struct SPMITable {
+ s8 Signature[4];
+ u32 Length;
+ u8 Revision;
+ u8 Checksum;
+ s8 OEMID[6];
+ s8 OEMTableID[8];
+ s8 OEMRevision[4];
+ s8 CreatorID[4];
+ s8 CreatorRevision[4];
+ u8 InterfaceType;
+ u8 IPMIlegacy;
+ s16 SpecificationRevision;
+
+ /*
+ * Bit 0 - SCI interrupt supported
+ * Bit 1 - I/O APIC/SAPIC
+ */
+ u8 InterruptType;
+
+ /*
+ * If bit 0 of InterruptType is set, then this is the SCI
+ * interrupt in the GPEx_STS register.
+ */
+ u8 GPE;
+
+ s16 Reserved;
+
+ /*
+ * If bit 1 of InterruptType is set, then this is the I/O
+ * APIC/SAPIC interrupt.
+ */
+ u32 GlobalSystemInterrupt;
+
+ /* The actual register address. */
+ struct acpi_generic_address addr;
+
+ u8 UID[4];
+
+ s8 spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int try_init_spmi(struct SPMITable *spmi)
+{
+ struct si_sm_io io;
+
+ if (spmi->IPMIlegacy != 1) {
+ pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
+ }
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_SPMI;
+ pr_info(PFX "probing via SPMI\n");
+
+ /* Figure out the interface type. */
+ switch (spmi->InterfaceType) {
+ case 1: /* KCS */
+ io.si_type = SI_KCS;
+ break;
+ case 2: /* SMIC */
+ io.si_type = SI_SMIC;
+ break;
+ case 3: /* BT */
+ io.si_type = SI_BT;
+ break;
+ case 4: /* SSIF, just ignore */
+ return -EIO;
+ default:
+ pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
+ return -EIO;
+ }
+
+ if (spmi->InterruptType & 1) {
+ /* We've got a GPE interrupt. */
+ io.irq = spmi->GPE;
+ io.irq_setup = acpi_gpe_irq_setup;
+ } else if (spmi->InterruptType & 2) {
+ /* We've got an APIC/SAPIC interrupt. */
+ io.irq = spmi->GlobalSystemInterrupt;
+ io.irq_setup = ipmi_std_irq_setup;
+ } else {
+ /* Use the default interrupt setting. */
+ io.irq = 0;
+ io.irq_setup = NULL;
+ }
+
+ if (spmi->addr.bit_width) {
+ /* A (hopefully) properly formed register bit width. */
+ io.regspacing = spmi->addr.bit_width / 8;
+ } else {
+ io.regspacing = DEFAULT_REGSPACING;
+ }
+ io.regsize = io.regspacing;
+ io.regshift = spmi->addr.bit_offset;
+
+ if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ pr_warn(PFX "Unknown ACPI I/O Address type\n");
+ return -EIO;
+ }
+ io.addr_data = spmi->addr.address;
+
+ pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
+ (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void spmi_find_bmc(void)
+{
+ acpi_status status;
+ struct SPMITable *spmi;
+ int i;
+
+ if (acpi_disabled)
+ return;
+
+ if (acpi_failure)
+ return;
+
+ for (i = 0; ; i++) {
+ status = acpi_get_table(ACPI_SIG_SPMI, i+1,
+ (struct acpi_table_header **)&spmi);
+ if (status != AE_OK)
+ return;
+
+ try_init_spmi(spmi);
+ }
+}
+#endif
+
+static struct resource *
+ipmi_get_info_from_resources(struct platform_device *pdev,
+ struct si_sm_io *io)
+{
+ struct resource *res, *res_second;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res) {
+ io->addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ io->addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ if (!res) {
+ dev_err(&pdev->dev, "no I/O or memory address\n");
+ return NULL;
+ }
+ io->addr_data = res->start;
+
+ io->regspacing = DEFAULT_REGSPACING;
+ res_second = platform_get_resource(pdev,
+ (io->addr_type == IPMI_IO_ADDR_SPACE) ?
+ IORESOURCE_IO : IORESOURCE_MEM,
+ 1);
+ if (res_second) {
+ if (res_second->start > io->addr_data)
+ io->regspacing = res_second->start - io->addr_data;
+ }
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ return res;
+}
+
+static int platform_ipmi_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+ u8 type, slave_addr, addr_source;
+ int rv;
+
+ rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
+ if (rv)
+ addr_source = SI_PLATFORM;
+ if (addr_source >= SI_LAST)
+ return -EINVAL;
+
+ if (addr_source == SI_SMBIOS) {
+ if (!si_trydmi)
+ return -ENODEV;
+ } else {
+ if (!si_tryplatform)
+ return -ENODEV;
+ }
+
+ rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
+ if (rv)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = addr_source;
+ dev_info(&pdev->dev, PFX "probing via %s\n",
+ ipmi_addr_src_to_str(addr_source));
+
+ switch (type) {
+ case SI_KCS:
+ case SI_SMIC:
+ case SI_BT:
+ io.si_type = type;
+ break;
+ default:
+ dev_err(&pdev->dev, "ipmi-type property is invalid\n");
+ return -EINVAL;
+ }
+
+ if (!ipmi_get_info_from_resources(pdev, &io))
+ return -EINVAL;
+
+ rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+ if (rv) {
+ dev_warn(&pdev->dev, "device has no slave-addr property\n");
+ io.slave_addr = 0x20;
+ } else {
+ io.slave_addr = slave_addr;
+ }
+
+ io.irq = platform_get_irq(pdev, 0);
+ if (io.irq > 0)
+ io.irq_setup = ipmi_std_irq_setup;
+ else
+ io.irq = 0;
+
+ io.dev = &pdev->dev;
+
+ pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
+ (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ ipmi_si_add_smi(&io);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_match[] = {
+ { .type = "ipmi", .compatible = "ipmi-kcs",
+ .data = (void *)(unsigned long) SI_KCS },
+ { .type = "ipmi", .compatible = "ipmi-smic",
+ .data = (void *)(unsigned long) SI_SMIC },
+ { .type = "ipmi", .compatible = "ipmi-bt",
+ .data = (void *)(unsigned long) SI_BT },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
+
+static int of_ipmi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct si_sm_io io;
+ struct resource resource;
+ const __be32 *regsize, *regspacing, *regshift;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ int proplen;
+
+ if (!si_tryopenfirmware)
+ return -ENODEV;
+
+ dev_info(&pdev->dev, "probing via device tree\n");
+
+ match = of_match_device(of_ipmi_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ if (!of_device_is_available(np))
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_warn(&pdev->dev, PFX "invalid address from OF\n");
+ return ret;
+ }
+
+ regsize = of_get_property(np, "reg-size", &proplen);
+ if (regsize && proplen != 4) {
+ dev_warn(&pdev->dev, PFX "invalid regsize from OF\n");
+ return -EINVAL;
+ }
+
+ regspacing = of_get_property(np, "reg-spacing", &proplen);
+ if (regspacing && proplen != 4) {
+ dev_warn(&pdev->dev, PFX "invalid regspacing from OF\n");
+ return -EINVAL;
+ }
+
+ regshift = of_get_property(np, "reg-shift", &proplen);
+ if (regshift && proplen != 4) {
+ dev_warn(&pdev->dev, PFX "invalid regshift from OF\n");
+ return -EINVAL;
+ }
+
+ memset(&io, 0, sizeof(io));
+ io.si_type = (enum si_type) match->data;
+ io.addr_source = SI_DEVICETREE;
+ io.irq_setup = ipmi_std_irq_setup;
+
+ if (resource.flags & IORESOURCE_IO)
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ else
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+
+ io.addr_data = resource.start;
+
+ io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+ io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+ io.regshift = regshift ? be32_to_cpup(regshift) : 0;
+
+ io.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+#else
+#define of_ipmi_match NULL
+static int of_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int find_slave_address(struct si_sm_io *io, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+ if (!slave_addr) {
+ u32 flags = IORESOURCE_IO;
+
+ if (io->addr_type == IPMI_MEM_ADDR_SPACE)
+ flags = IORESOURCE_MEM;
+
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_type, flags,
+ io->addr_data);
+ }
+#endif
+
+ return slave_addr;
+}
+
+static int acpi_ipmi_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+ struct resource *res;
+ int rv = -EINVAL;
+
+ if (!si_tryacpi)
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_ACPI;
+ dev_info(&pdev->dev, PFX "probing via ACPI\n");
+
+ io.addr_info.acpi_info.acpi_handle = handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev,
+ "Could not find ACPI IPMI interface type\n");
+ goto err_free;
+ }
+
+ switch (tmp) {
+ case 1:
+ io.si_type = SI_KCS;
+ break;
+ case 2:
+ io.si_type = SI_SMIC;
+ break;
+ case 3:
+ io.si_type = SI_BT;
+ break;
+ case 4: /* SSIF, just ignore */
+ rv = -ENODEV;
+ goto err_free;
+ default:
+ dev_info(&pdev->dev, "unknown IPMI type %lld\n", tmp);
+ goto err_free;
+ }
+
+ res = ipmi_get_info_from_resources(pdev, &io);
+ if (!res) {
+ rv = -EINVAL;
+ goto err_free;
+ }
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ io.irq = tmp;
+ io.irq_setup = acpi_gpe_irq_setup;
+ } else {
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq > 0) {
+ io.irq = irq;
+ io.irq_setup = ipmi_std_irq_setup;
+ }
+ }
+
+ io.slave_addr = find_slave_address(&io, io.slave_addr);
+
+ io.dev = &pdev->dev;
+
+ dev_info(io.dev, "%pR regsize %d spacing %d irq %d\n",
+ res, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+
+err_free:
+ return rv;
+}
+
+static const struct acpi_device_id acpi_ipmi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
+#else
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+static int ipmi_probe(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node && of_ipmi_probe(pdev) == 0)
+ return 0;
+
+ if (acpi_ipmi_probe(pdev) == 0)
+ return 0;
+
+ return platform_ipmi_probe(pdev);
+}
+
+static int ipmi_remove(struct platform_device *pdev)
+{
+ return ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_platform_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_ipmi_match,
+ .acpi_match_table = ACPI_PTR(acpi_ipmi_match),
+ },
+ .probe = ipmi_probe,
+ .remove = ipmi_remove,
+};
+
+void ipmi_si_platform_init(void)
+{
+ int rv = platform_driver_register(&ipmi_platform_driver);
+ if (rv)
+ pr_err(PFX "Unable to register driver: %d\n", rv);
+
+#ifdef CONFIG_ACPI
+ if (si_tryacpi)
+ spmi_find_bmc();
+#endif
+
+}
+
+void ipmi_si_platform_shutdown(void)
+{
+ platform_driver_unregister(&ipmi_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
new file mode 100644
index 000000000000..e5ce174fbeeb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_port_io.c
@@ -0,0 +1,112 @@
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return inb(addr + (offset * io->regspacing));
+}
+
+static void port_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outb(b, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outw(b << io->regshift, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outl(b << io->regshift, addr+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (addr) {
+ for (idx = 0; idx < io->io_size; idx++)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ }
+}
+
+int ipmi_si_port_setup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ io->io_cleanup = port_cleanup;
+
+ /*
+ * Figure out the actual inb/inw/inl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = port_inb;
+ io->outputb = port_outb;
+ break;
+ case 2:
+ io->inputb = port_inw;
+ io->outputb = port_outw;
+ break;
+ case 4:
+ io->inputb = port_inl;
+ io->outputb = port_outl;
+ break;
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint I/O regions in their ACPI
+ * tables. This causes problems when trying to register the
+ * entire I/O region. Therefore we must register each I/O
+ * port separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_region(addr + idx * io->regspacing,
+ io->regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ while (idx--)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ return -EIO;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index a705027c0493..aa8d88ab4433 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -34,12 +34,18 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/ipmi.h>
+
/*
* This is defined by the state machines themselves, it is an opaque
* data type for them to use.
*/
struct si_sm_data;
+enum si_type {
+ SI_TYPE_INVALID, SI_KCS, SI_SMIC, SI_BT
+};
+
/*
* The structure for doing I/O in the state machine. The state
* machine doesn't have the actual I/O routines, they are done through
@@ -61,6 +67,23 @@ struct si_sm_io {
int regshift;
int addr_type;
long addr_data;
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ void (*addr_source_cleanup)(struct si_sm_io *io);
+ void *addr_source_data;
+ union ipmi_smi_info_union addr_info;
+
+ int (*io_setup)(struct si_sm_io *info);
+ void (*io_cleanup)(struct si_sm_io *info);
+ unsigned int io_size;
+
+ int irq;
+ int (*irq_setup)(struct si_sm_io *io);
+ void *irq_handler_data;
+ void (*irq_cleanup)(struct si_sm_io *io);
+
+ u8 slave_addr;
+ enum si_type si_type;
+ struct device *dev;
};
/* Results of SMI events. */
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0aea3bcb6158..466b3a1c0adf 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -53,6 +53,7 @@
#include <linux/acpi.h>
#include <linux/ctype.h>
#include <linux/time64.h>
+#include "ipmi_si_sm.h"
#include "ipmi_dmi.h"
#define PFX "ipmi_ssif: "
@@ -267,9 +268,6 @@ struct ssif_info {
unsigned char *i2c_data;
unsigned int i2c_size;
- /* From the device id response. */
- struct ipmi_device_id device_id;
-
struct timer_list retry_timer;
int retries_left;
@@ -1176,6 +1174,61 @@ MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of
static DEFINE_MUTEX(ssif_infos_mutex);
static LIST_HEAD(ssif_infos);
+#define IPMI_SSIF_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ssif_info *ssif_info = dev_get_drvdata(dev); \
+ \
+ return snprintf(buf, 10, "%u\n", ssif_get_stat(ssif_info, name));\
+} \
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 10, "ssif\n");
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+IPMI_SSIF_ATTR(sent_messages);
+IPMI_SSIF_ATTR(sent_messages_parts);
+IPMI_SSIF_ATTR(send_retries);
+IPMI_SSIF_ATTR(send_errors);
+IPMI_SSIF_ATTR(received_messages);
+IPMI_SSIF_ATTR(received_message_parts);
+IPMI_SSIF_ATTR(receive_retries);
+IPMI_SSIF_ATTR(receive_errors);
+IPMI_SSIF_ATTR(flag_fetches);
+IPMI_SSIF_ATTR(hosed);
+IPMI_SSIF_ATTR(events);
+IPMI_SSIF_ATTR(watchdog_pretimeouts);
+IPMI_SSIF_ATTR(alerts);
+
+static struct attribute *ipmi_ssif_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_sent_messages.attr,
+ &dev_attr_sent_messages_parts.attr,
+ &dev_attr_send_retries.attr,
+ &dev_attr_send_errors.attr,
+ &dev_attr_received_messages.attr,
+ &dev_attr_received_message_parts.attr,
+ &dev_attr_receive_retries.attr,
+ &dev_attr_receive_errors.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_alerts.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_ssif_dev_attr_group = {
+ .attrs = ipmi_ssif_dev_attrs,
+};
+
static int ssif_remove(struct i2c_client *client)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
@@ -1196,6 +1249,9 @@ static int ssif_remove(struct i2c_client *client)
}
ssif_info->intf = NULL;
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
+
/* make sure the driver is not looking for flags any more. */
while (ssif_info->ssif_state != SSIF_NORMAL)
schedule_timeout(1);
@@ -1289,6 +1345,7 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
return rv;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static int smi_type_proc_show(struct seq_file *m, void *v)
{
seq_puts(m, "ssif\n");
@@ -1352,6 +1409,7 @@ static const struct file_operations smi_stats_proc_ops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
static int strcmp_nospace(char *s1, char *s2)
{
@@ -1425,7 +1483,7 @@ static int find_slave_address(struct i2c_client *client, int slave_addr)
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
slave_addr = ipmi_dmi_get_slave_addr(
- IPMI_DMI_TYPE_SSIF,
+ SI_TYPE_INVALID,
i2c_adapter_id(client->adapter),
client->addr);
#endif
@@ -1481,20 +1539,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
- /*
- * Do a Get Device ID command, since it comes back with some
- * useful info.
- */
- msg[0] = IPMI_NETFN_APP_REQUEST << 2;
- msg[1] = IPMI_GET_DEVICE_ID_CMD;
- rv = do_cmd(client, 2, msg, &len, resp);
- if (rv)
- goto out;
-
- rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id);
- if (rv)
- goto out;
-
ssif_info->client = client;
i2c_set_clientdata(client, ssif_info);
@@ -1682,16 +1726,26 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ dev_set_drvdata(&ssif_info->client->dev, ssif_info);
+ rv = device_add_group(&ssif_info->client->dev,
+ &ipmi_ssif_dev_attr_group);
+ if (rv) {
+ dev_err(&ssif_info->client->dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out;
+ }
+
rv = ipmi_register_smi(&ssif_info->handlers,
ssif_info,
- &ssif_info->device_id,
&ssif_info->client->dev,
slave_addr);
if (rv) {
pr_err(PFX "Unable to register device: error %d\n", rv);
- goto out;
+ goto out_remove_attr;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type",
&smi_type_proc_ops,
ssif_info);
@@ -1707,6 +1761,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
pr_err(PFX "Unable to create proc entry: %d\n", rv);
goto out_err_unreg;
}
+#endif
out:
if (rv) {
@@ -1725,8 +1780,14 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
kfree(resp);
return rv;
- out_err_unreg:
+#ifdef CONFIG_IPMI_PROC_INTERFACE
+out_err_unreg:
ipmi_unregister_smi(ssif_info->intf);
+#endif
+
+out_remove_attr:
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
goto out;
}
@@ -1953,20 +2014,13 @@ static void spmi_find_bmc(void) { }
#ifdef CONFIG_DMI
static int dmi_ipmi_probe(struct platform_device *pdev)
{
- u8 type, slave_addr = 0;
+ u8 slave_addr = 0;
u16 i2c_addr;
int rv;
if (!ssif_trydmi)
return -ENODEV;
- rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
- if (rv)
- return -ENODEV;
-
- if (type != IPMI_DMI_TYPE_SSIF)
- return -ENODEV;
-
rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr);
if (rv) {
dev_warn(&pdev->dev, PFX "No i2c-addr property\n");
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 3d832d0362a4..76b270678b50 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1009,9 +1009,14 @@ static void ipmi_register_watchdog(int ipmi_intf)
goto out;
}
- ipmi_get_version(watchdog_user,
- &ipmi_version_major,
- &ipmi_version_minor);
+ rv = ipmi_get_version(watchdog_user,
+ &ipmi_version_major,
+ &ipmi_version_minor);
+ if (rv) {
+ pr_warn(PFX "Unable to get IPMI version, assuming 1.0\n");
+ ipmi_version_major = 1;
+ ipmi_version_minor = 0;
+ }
rv = misc_register(&ipmi_wdog_miscdev);
if (rv < 0) {
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index cd53771b9ae7..370e0a64ead1 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -659,9 +659,9 @@ static void terminate_monitor(struct cm4000_dev *dev)
* is already doing that for you.
*/
-static void monitor_card(unsigned long p)
+static void monitor_card(struct timer_list *t)
{
- struct cm4000_dev *dev = (struct cm4000_dev *) p;
+ struct cm4000_dev *dev = from_timer(dev, t, timer);
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
struct ptsreq ptsreq;
@@ -1374,7 +1374,7 @@ static void start_monitor(struct cm4000_dev *dev)
DEBUGP(3, dev, "-> start_monitor\n");
if (!dev->monitor_running) {
DEBUGP(5, dev, "create, init and add timer\n");
- setup_timer(&dev->timer, monitor_card, (unsigned long)dev);
+ timer_setup(&dev->timer, monitor_card, 0);
dev->monitor_running = 1;
mod_timer(&dev->timer, jiffies);
} else
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 382c864814d9..9a1aaf538758 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -104,9 +104,9 @@ static inline unsigned char xinb(unsigned short port)
/* poll the device fifo status register. not to be confused with
* the poll syscall. */
-static void cm4040_do_poll(unsigned long dummy)
+static void cm4040_do_poll(struct timer_list *t)
{
- struct reader_dev *dev = (struct reader_dev *) dummy;
+ struct reader_dev *dev = from_timer(dev, t, poll_timer);
unsigned int obs = xinb(dev->p_dev->resource[0]->start
+ REG_OFFSET_BUFFER_STATUS);
@@ -465,7 +465,6 @@ static int cm4040_open(struct inode *inode, struct file *filp)
link->open = 1;
- dev->poll_timer.data = (unsigned long) dev;
mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
@@ -585,7 +584,7 @@ static int reader_probe(struct pcmcia_device *link)
init_waitqueue_head(&dev->poll_wait);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
- setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
+ timer_setup(&dev->poll_timer, cm4040_do_poll, 0);
ret = reader_config(link, i);
if (ret) {
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 62be953e5fb0..aa502e9fb7fa 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -375,7 +375,7 @@ static void reset_device(MGSLPC_INFO *info);
static void hdlc_mode(MGSLPC_INFO *info);
static void async_mode(MGSLPC_INFO *info);
-static void tx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
static int carrier_raised(struct tty_port *port);
static void dtr_rts(struct tty_port *port, int onoff);
@@ -1289,7 +1289,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
memset(&info->icount, 0, sizeof(info->icount));
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = claim_resources(info);
@@ -3846,9 +3846,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
/* HDLC frame time out
* update stats and do tx completion processing
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- MGSLPC_INFO *info = (MGSLPC_INFO*)context;
+ MGSLPC_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8ad92707e45f..ec42c8bb9b0d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,7 +259,6 @@
#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
-#include <linux/kmemcheck.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/syscalls.h>
@@ -641,7 +640,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
return;
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
@@ -1265,7 +1264,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
/* Can we pull enough? */
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
ibytes = nbytes;
/* never pull more than available */
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 610638a80383..461bf0b8a094 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return -EFAULT;
}
+ if (in_size < 6 ||
+ in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EINVAL;
+ }
+
/* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if
* the char dev is held open.
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 86f38d239476..83a77a445538 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -20,44 +20,48 @@
#include <linux/device.h>
#include "tpm.h"
-#define READ_PUBEK_RESULT_SIZE 314
+struct tpm_readpubek_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ __be32 paramsize;
+ u8 parameters[12];
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+} __packed;
+
#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
#define TPM_ORD_READPUBEK 124
-static const struct tpm_input_header tpm_readpubek_header = {
- .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
- .length = cpu_to_be32(30),
- .ordinal = cpu_to_be32(TPM_ORD_READPUBEK)
-};
+
static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 *data;
- struct tpm_cmd_t tpm_cmd;
- ssize_t err;
- int i, rc;
+ struct tpm_buf tpm_buf;
+ struct tpm_readpubek_out *out;
+ ssize_t rc;
+ int i;
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
+ char anti_replay[20];
- memset(&tpm_cmd, 0, sizeof(tpm_cmd));
-
- tpm_cmd.header.in = tpm_readpubek_header;
- err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
- READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
- "attempting to read the PUBEK");
- if (err)
- goto out;
-
- /*
- ignore header 10 bytes
- algorithm 32 bits (1 == RSA )
- encscheme 16 bits
- sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
- keylenbytes 32 bits
- 256 byte modulus
- ignore checksum 20 bytes
- */
- data = tpm_cmd.params.readpubek_out_buffer;
+ memset(&anti_replay, 0, sizeof(anti_replay));
+
+ rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
+ if (rc)
+ return rc;
+
+ tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
+
+ rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
+ READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
+ "attempting to read the PUBEK");
+ if (rc) {
+ tpm_buf_destroy(&tpm_buf);
+ return 0;
+ }
+
+ out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\n"
@@ -68,21 +72,26 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
"%02X %02X %02X %02X\n"
"Modulus length: %d\n"
"Modulus:\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5],
- data[6], data[7],
- data[12], data[13], data[14], data[15],
- data[16], data[17], data[18], data[19],
- data[20], data[21], data[22], data[23],
- be32_to_cpu(*((__be32 *) (data + 24))));
+ out->algorithm[0], out->algorithm[1], out->algorithm[2],
+ out->algorithm[3],
+ out->encscheme[0], out->encscheme[1],
+ out->sigscheme[0], out->sigscheme[1],
+ out->parameters[0], out->parameters[1],
+ out->parameters[2], out->parameters[3],
+ out->parameters[4], out->parameters[5],
+ out->parameters[6], out->parameters[7],
+ out->parameters[8], out->parameters[9],
+ out->parameters[10], out->parameters[11],
+ be32_to_cpu(out->keysize));
for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 28]);
+ str += sprintf(str, "%02X ", out->modulus[i]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
-out:
+
rc = str - buf;
+ tpm_buf_destroy(&tpm_buf);
return rc;
}
static DEVICE_ATTR_RO(pubek);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2d5466a72e40..528cffbd49d3 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -345,17 +345,6 @@ enum tpm_sub_capabilities {
TPM_CAP_PROP_TIS_DURATION = 0x120,
};
-struct tpm_readpubek_params_out {
- u8 algorithm[4];
- u8 encscheme[2];
- u8 sigscheme[2];
- __be32 paramsize;
- u8 parameters[12]; /*assuming RSA*/
- __be32 keysize;
- u8 modulus[256];
- u8 checksum[20];
-} __packed;
-
typedef union {
struct tpm_input_header in;
struct tpm_output_header out;
@@ -385,8 +374,6 @@ struct tpm_getrandom_in {
} __packed;
typedef union {
- struct tpm_readpubek_params_out readpubek_out;
- u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
struct tpm_pcrread_in pcrread_in;
struct tpm_pcrread_out pcrread_out;
struct tpm_getrandom_in getrandom_in;
@@ -557,7 +544,7 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
-static inline inline u32 tpm2_rc_value(u32 rc)
+static inline u32 tpm2_rc_value(u32 rc)
{
return (rc & BIT(7)) ? rc & 0xff : rc;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index e1a41b788f08..f40d20671a78 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -834,72 +834,43 @@ static const struct tpm_input_header tpm2_selftest_header = {
};
/**
- * tpm2_continue_selftest() - start a self test
- *
- * @chip: TPM chip to use
- * @full: test all commands instead of testing only those that were not
- * previously tested.
- *
- * Return: Same as with tpm_transmit_cmd with exception of RC_TESTING.
- */
-static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
-{
- int rc;
- struct tpm2_cmd cmd;
-
- cmd.header.in = tpm2_selftest_header;
- cmd.params.selftest_in.full_test = full;
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE, 0, 0,
- "continue selftest");
-
- /* At least some prototype chips seem to give RC_TESTING error
- * immediately. This is a workaround for that.
- */
- if (rc == TPM2_RC_TESTING) {
- dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
- rc = 0;
- }
-
- return rc;
-}
-
-/**
- * tpm2_do_selftest() - run a full self test
+ * tpm2_do_selftest() - ensure that all self tests have passed
*
* @chip: TPM chip to use
*
* Return: Same as with tpm_transmit_cmd.
*
- * During the self test TPM2 commands return with the error code RC_TESTING.
- * Waiting is done by issuing PCR read until it executes successfully.
+ * The TPM can either run all self tests synchronously and then return
+ * RC_SUCCESS once all tests were successful. Or it can choose to run the tests
+ * asynchronously and return RC_TESTING immediately while the self tests still
+ * execute in the background. This function handles both cases and waits until
+ * all tests have completed.
*/
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int loops;
- unsigned int delay_msec = 100;
- unsigned long duration;
- int i;
-
- duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST);
+ unsigned int delay_msec = 20;
+ long duration;
+ struct tpm2_cmd cmd;
- loops = jiffies_to_msecs(duration) / delay_msec;
+ duration = jiffies_to_msecs(
+ tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- rc = tpm2_start_selftest(chip, true);
- if (rc)
- return rc;
+ while (duration > 0) {
+ cmd.header.in = tpm2_selftest_header;
+ cmd.params.selftest_in.full_test = 0;
- for (i = 0; i < loops; i++) {
- /* Attempt to read a PCR value */
- rc = tpm2_pcr_read(chip, 0, NULL);
- if (rc < 0)
- break;
+ rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
+ 0, 0, "continue selftest");
if (rc != TPM2_RC_TESTING)
break;
tpm_msleep(delay_msec);
+ duration -= delay_msec;
+
+ /* wait longer the next round */
+ delay_msec *= 2;
}
return rc;
@@ -1009,7 +980,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
{
struct tpm_buf buf;
u32 nr_commands;
- u32 *attrs;
+ __be32 *attrs;
u32 cc;
int i;
int rc;
@@ -1049,7 +1020,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
chip->nr_commands = nr_commands;
- attrs = (u32 *)&buf.data[TPM_HEADER_SIZE + 9];
+ attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9];
for (i = 0; i < nr_commands; i++, attrs++) {
chip->cc_attrs_tbl[i] = be32_to_cpup(attrs);
cc = chip->cc_attrs_tbl[i] & 0xFFFF;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index e2e059d8ffec..4e4014eabdb9 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -242,7 +242,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
struct tpm_space *space = &chip->work_space;
unsigned int nr_handles;
u32 attrs;
- u32 *handle;
+ __be32 *handle;
int i;
i = tpm2_find_cc(chip, cc);
@@ -252,7 +252,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
attrs = chip->cc_attrs_tbl[i];
nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
- handle = (u32 *)&cmd[TPM_HEADER_SIZE];
+ handle = (__be32 *)&cmd[TPM_HEADER_SIZE];
for (i = 0; i < nr_handles; i++, handle++) {
if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) {
if (!tpm2_map_to_phandle(space, handle))
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 8f0a98dea327..7b3c2a8aa9de 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -92,14 +92,9 @@ enum crb_status {
CRB_DRV_STS_COMPLETE = BIT(0),
};
-enum crb_flags {
- CRB_FL_ACPI_START = BIT(0),
- CRB_FL_CRB_START = BIT(1),
- CRB_FL_CRB_SMC_START = BIT(2),
-};
-
struct crb_priv {
- unsigned int flags;
+ u32 sm;
+ const char *hid;
void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
@@ -128,14 +123,16 @@ struct tpm2_crb_smc {
* Anyhow, we do not wait here as a consequent CMD_READY request
* will be handled correctly even if idle was not completed.
*
- * The function does nothing for devices with ACPI-start method.
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 always
*/
static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
@@ -174,14 +171,16 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
* The device should respond within TIMEOUT_C.
*
* The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 on success -ETIME on timeout;
*/
static int __maybe_unused crb_cmd_ready(struct device *dev,
struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
@@ -325,13 +324,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* Make sure that cmd is populated before issuing start. */
wmb();
- if (priv->flags & CRB_FL_CRB_START)
+ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+ * report only ACPI start but in practice seems to require both
+ * CRB start, hence invoking CRB start method if hid == MSFT0101.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
+ (!strcmp(priv->hid, "MSFT0101")))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if (priv->flags & CRB_FL_ACPI_START)
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
rc = crb_do_acpi_start(chip);
- if (priv->flags & CRB_FL_CRB_SMC_START) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
@@ -345,7 +351,9 @@ static void crb_cancel(struct tpm_chip *chip)
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
+ if (((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
}
@@ -458,7 +466,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if (!(priv->flags & CRB_FL_ACPI_START)) {
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
if (buf->control_address == io_res.start +
sizeof(*priv->regs_h))
priv->regs_h = priv->iobase;
@@ -552,18 +561,6 @@ static int crb_acpi_add(struct acpi_device *device)
if (!priv)
return -ENOMEM;
- /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
- * report only ACPI start but in practice seems to require both
- * ACPI start and CRB start.
- */
- if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED ||
- !strcmp(acpi_device_hid(device), "MSFT0101"))
- priv->flags |= CRB_FL_CRB_START;
-
- if (sm == ACPI_TPM2_START_METHOD ||
- sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
- priv->flags |= CRB_FL_ACPI_START;
-
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
dev_err(dev,
@@ -574,9 +571,11 @@ static int crb_acpi_add(struct acpi_device *device)
}
crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
priv->smc_func_id = crb_smc->smc_func_id;
- priv->flags |= CRB_FL_CRB_SMC_START;
}
+ priv->sm = sm;
+ priv->hid = acpi_device_hid(device);
+
rc = crb_map_io(device, priv, buf);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7e55aa9ce680..e2d1055fb814 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -30,6 +30,7 @@
#include <linux/freezer.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/kernel.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -223,7 +224,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
}
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value)
+ const u8 *value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
@@ -365,7 +366,7 @@ static struct pnp_driver tis_pnp_driver = {
},
};
-#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
+#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2)
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 63bc6c3b949e..fdde971bc810 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -252,7 +252,7 @@ out:
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
@@ -343,7 +343,7 @@ static void disable_interrupts(struct tpm_chip *chip)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc;
@@ -445,7 +445,7 @@ static int probe_itpm(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
- u8 cmd_getticks[] = {
+ static const u8 cmd_getticks[] = {
0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0xf1
};
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index e2212f021a02..6bbac319ff3b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value);
+ const u8 *value);
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
@@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
}
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
return data->phy_ops->write_bytes(data, addr, len, value);
}
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 88fe72ae967f..424ff2fde1f2 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -46,9 +46,7 @@
struct tpm_tis_spi_phy {
struct tpm_tis_data priv;
struct spi_device *spi_device;
-
- u8 tx_buf[4];
- u8 rx_buf[4];
+ u8 *iobuf;
};
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
}
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *buffer, u8 direction)
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
while (len) {
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
- phy->tx_buf[0] = direction | (transfer_len - 1);
- phy->tx_buf[1] = 0xd4;
- phy->tx_buf[2] = addr >> 8;
- phy->tx_buf[3] = addr;
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
memset(&spi_xfer, 0, sizeof(spi_xfer));
- spi_xfer.tx_buf = phy->tx_buf;
- spi_xfer.rx_buf = phy->rx_buf;
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = phy->iobuf;
spi_xfer.len = 4;
spi_xfer.cs_change = 1;
@@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->rx_buf[3] & 0x01) == 0) {
+ if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states
- phy->tx_buf[0] = 0;
+ phy->iobuf[0] = 0;
for (i = 0; i < TPM_RETRY; i++) {
spi_xfer.len = 1;
@@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
- if (phy->rx_buf[0] & 0x01)
+ if (phy->iobuf[0] & 0x01)
break;
}
@@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.len = transfer_len;
spi_xfer.delay_usecs = 5;
- if (direction) {
+ if (in) {
spi_xfer.tx_buf = NULL;
- spi_xfer.rx_buf = buffer;
- } else {
- spi_xfer.tx_buf = buffer;
+ } else if (out) {
spi_xfer.rx_buf = NULL;
+ memcpy(phy->iobuf, out, transfer_len);
+ out += transfer_len;
}
spi_message_init(&m);
@@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
+ if (in) {
+ memcpy(in, phy->iobuf, transfer_len);
+ in += transfer_len;
+ }
+
len -= transfer_len;
- buffer += transfer_len;
}
exit:
@@ -139,40 +141,51 @@ exit:
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result)
{
- return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
}
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
- return tpm_tis_spi_transfer(data, addr, len, value, 0);
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
+ __le16 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+ (u8 *)&result_le);
if (!rc)
- *result = le16_to_cpu(*result);
+ *result = le16_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
+ __le32 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+ (u8 *)&result_le);
if (!rc)
- *result = le32_to_cpu(*result);
+ *result = le32_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
- value = cpu_to_le32(value);
- return data->phy_ops->write_bytes(data, addr, sizeof(u32),
- (u8 *)&value);
+ __le32 value_le;
+ int rc;
+
+ value_le = cpu_to_le32(value);
+ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value_le);
+
+ return rc;
}
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
@@ -194,6 +207,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->spi_device = dev;
+ phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 0ecf5beb56ec..538bfa8ba9b4 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -77,6 +77,7 @@ static bool arch_timer_mem_use_virtual;
static bool arch_counter_suspend_stop;
static bool vdso_default = true;
+static cpumask_t evtstrm_available = CPU_MASK_NONE;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
static int __init early_evtstrm_cfg(char *buf)
@@ -739,6 +740,7 @@ static void arch_timer_evtstrm_enable(int divider)
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
+ cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
static void arch_timer_configure_evtstream(void)
@@ -863,6 +865,16 @@ u32 arch_timer_get_rate(void)
return arch_timer_rate;
}
+bool arch_timer_evtstrm_available(void)
+{
+ /*
+ * We might get called from a preemptible context. This is fine
+ * because availability of the event stream should be always the same
+ * for a preemptible context and context where we might resume a task.
+ */
+ return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
+}
+
static u64 arch_counter_get_cntvct_mem(void)
{
u32 vct_lo, vct_hi, tmp_hi;
@@ -928,6 +940,8 @@ static int arch_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+ cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
+
arch_timer_stop(clk);
return 0;
}
@@ -937,10 +951,16 @@ static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- if (action == CPU_PM_ENTER)
+ if (action == CPU_PM_ENTER) {
__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
- else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
+
+ cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
+ } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
+
+ if (elf_hwcap & HWCAP_EVTSTRM)
+ cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
+ }
return NOTIFY_OK;
}
@@ -1016,7 +1036,6 @@ static int __init arch_timer_register(void)
if (err)
goto out_unreg_notify;
-
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
"clockevents/arm/arch_timer:starting",
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 39e489a96ad7..60da2537bef9 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
if (readl_relaxed(timer->control) & timer->match_mask) {
writel_relaxed(timer->match_mask, timer->control);
- event_handler = ACCESS_ONCE(timer->evt.event_handler);
+ event_handler = READ_ONCE(timer->evt.event_handler);
if (event_handler)
event_handler(&timer->evt);
return IRQ_HANDLED;
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 17504129fd77..65ec5f01aa8d 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -57,7 +57,7 @@ static bool bL_switching_enabled;
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
-static struct cpufreq_arm_bL_ops *arm_bL_ops;
+static const struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
@@ -213,6 +213,7 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
{
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
unsigned int freqs_new;
+ int ret;
cur_cluster = cpu_to_cluster(cpu);
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
@@ -229,7 +230,14 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
}
}
- return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+ ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+
+ if (!ret) {
+ arch_set_freq_scale(policy->related_cpus, freqs_new,
+ policy->cpuinfo.max_freq);
+ }
+
+ return ret;
}
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
@@ -609,7 +617,7 @@ static int __bLs_register_notifier(void) { return 0; }
static int __bLs_unregister_notifier(void) { return 0; }
#endif
-int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
+int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
{
int ret, i;
@@ -653,7 +661,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
-void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
+void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops)
{
if (arm_bL_ops != ops) {
pr_err("%s: Registered with: %s, can't unregister, exiting\n",
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 184d7c3a112a..88a176e466c8 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -37,7 +37,7 @@ struct cpufreq_arm_bL_ops {
void (*free_opp_table)(const struct cpumask *cpumask);
};
-int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
-void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
+int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops);
+void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops);
#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 39b3f51d9a30..b944f290c8a4 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -61,7 +61,7 @@ static int dt_get_transition_latency(struct device *cpu_dev)
return transition_latency;
}
-static struct cpufreq_arm_bL_ops dt_bL_ops = {
+static const struct cpufreq_arm_bL_ops dt_bL_ops = {
.name = "dt-bl",
.get_transition_latency = dt_get_transition_latency,
.init_opp_table = dev_pm_opp_of_cpumask_add_table,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index a753c50e9e41..ecc56e26f8f6 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -48,7 +48,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
- { .compatible = "samsung,exynos4212", },
{ .compatible = "samsung,exynos5250", },
#ifndef CONFIG_BL_SWITCHER
{ .compatible = "samsung,exynos5800", },
@@ -83,8 +82,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "rockchip,rk3368", },
{ .compatible = "rockchip,rk3399", },
- { .compatible = "socionext,uniphier-ld6b", },
-
{ .compatible = "st-ericsson,u8500", },
{ .compatible = "st-ericsson,u8540", },
{ .compatible = "st-ericsson,u9500", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index d83ab94d041a..545946ad0752 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -43,9 +43,17 @@ static struct freq_attr *cpufreq_dt_attr[] = {
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct private_data *priv = policy->driver_data;
+ unsigned long freq = policy->freq_table[index].frequency;
+ int ret;
+
+ ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
- return dev_pm_opp_set_rate(priv->cpu_dev,
- policy->freq_table[index].frequency * 1000);
+ if (!ret) {
+ arch_set_freq_scale(policy->related_cpus, freq,
+ policy->cpuinfo.max_freq);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ea43b147a7fe..41d148af7748 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -161,6 +161,12 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
+{
+}
+EXPORT_SYMBOL_GPL(arch_set_freq_scale);
+
/*
* This is a generic cpufreq init() routine which can be used by cpufreq
* drivers of SMP systems. It will do following:
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index e75880eb037d..1e55b5790853 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -118,8 +118,11 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
+
+ if (len >= PAGE_SIZE) {
+ pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
return len;
}
cpufreq_freq_attr_ro(trans_table);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 14466a9b01c0..628fe899cb48 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -191,6 +192,57 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.suspend = cpufreq_generic_suspend,
};
+#define OCOTP_CFG3 0x440
+#define OCOTP_CFG3_SPEED_SHIFT 16
+#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
+#define OCOTP_CFG3_SPEED_996MHZ 0x2
+#define OCOTP_CFG3_SPEED_852MHZ 0x1
+
+static void imx6q_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+ if (!np)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ goto put_node;
+ }
+
+ /*
+ * SPEED_GRADING[1:0] defines the max speed of ARM:
+ * 2b'11: 1200000000Hz;
+ * 2b'10: 996000000Hz;
+ * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
+ * 2b'00: 792000000Hz;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+
+ if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
+ of_machine_is_compatible("fsl,imx6q"))
+ if (dev_pm_opp_disable(dev, 1200000000))
+ dev_warn(dev, "failed to disable 1.2GHz OPP\n");
+ if (val < OCOTP_CFG3_SPEED_996MHZ)
+ if (dev_pm_opp_disable(dev, 996000000))
+ dev_warn(dev, "failed to disable 996MHz OPP\n");
+ if (of_machine_is_compatible("fsl,imx6q")) {
+ if (val != OCOTP_CFG3_SPEED_852MHZ)
+ if (dev_pm_opp_disable(dev, 852000000))
+ dev_warn(dev, "failed to disable 852MHz OPP\n");
+ }
+ iounmap(base);
+put_node:
+ of_node_put(np);
+}
+
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -252,28 +304,21 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- /*
- * We expect an OPP table supplied by platform.
- * Just, incase the platform did not supply the OPP
- * table, it will try to get it.
- */
- num = dev_pm_opp_get_opp_count(cpu_dev);
- if (num < 0) {
- ret = dev_pm_opp_of_add_table(cpu_dev);
- if (ret < 0) {
- dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
- goto put_reg;
- }
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+ goto put_reg;
+ }
- /* Because we have added the OPPs here, we must free them */
- free_opp = true;
+ imx6q_opp_check_speed_grading(cpu_dev);
- num = dev_pm_opp_get_opp_count(cpu_dev);
- if (num < 0) {
- ret = num;
- dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
- goto out_free_opp;
- }
+ /* Because we have added the OPPs here, we must free them */
+ free_opp = true;
+ num = dev_pm_opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto out_free_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 062d71434e47..b01e31db5f83 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1043,7 +1043,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
- pr_err("unable to alloc powernow_k8_data");
+ pr_err("unable to alloc powernow_k8_data\n");
return -ENOMEM;
}
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index ce345bf34d5d..06b024a3e474 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -58,56 +58,40 @@ module_param(pxa27x_maxfreq, uint, 0);
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
+struct pxa_cpufreq_data {
+ struct clk *clk_core;
+};
+static struct pxa_cpufreq_data pxa_cpufreq_data;
+
struct pxa_freqs {
unsigned int khz;
- unsigned int membus;
- unsigned int cccr;
- unsigned int div2;
- unsigned int cclkcfg;
int vmin;
int vmax;
};
-/* Define the refresh period in mSec for the SDRAM and the number of rows */
-#define SDRAM_TREF 64 /* standard 64ms SDRAM */
-static unsigned int sdram_rows;
-
-#define CCLKCFG_TURBO 0x1
-#define CCLKCFG_FCS 0x2
-#define CCLKCFG_HALFTURBO 0x4
-#define CCLKCFG_FASTBUS 0x8
-#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
-#define MDREFR_DRI_MASK 0xFFF
-
-#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
-
/*
* PXA255 definitions
*/
-/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
-#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
-
static const struct pxa_freqs pxa255_run_freqs[] =
{
- /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
- { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
- {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
- {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
- {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
- {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
- {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
+ /* CPU MEMBUS run turbo PXbus SDRAM */
+ { 99500, -1, -1}, /* 99, 99, 50, 50 */
+ {132700, -1, -1}, /* 133, 133, 66, 66 */
+ {199100, -1, -1}, /* 199, 199, 99, 99 */
+ {265400, -1, -1}, /* 265, 265, 133, 66 */
+ {331800, -1, -1}, /* 331, 331, 166, 83 */
+ {398100, -1, -1}, /* 398, 398, 196, 99 */
};
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
static const struct pxa_freqs pxa255_turbo_freqs[] =
{
- /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
- { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
- {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
- {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
- {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
- {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
+ /* CPU run turbo PXbus SDRAM */
+ { 99500, -1, -1}, /* 99, 99, 50, 50 */
+ {199100, -1, -1}, /* 99, 199, 50, 99 */
+ {298500, -1, -1}, /* 99, 287, 50, 99 */
+ {298600, -1, -1}, /* 199, 287, 99, 99 */
+ {398100, -1, -1}, /* 199, 398, 99, 99 */
};
#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
@@ -122,47 +106,14 @@ static unsigned int pxa255_turbo_table;
module_param(pxa255_turbo_table, uint, 0);
MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
-/*
- * PXA270 definitions
- *
- * For the PXA27x:
- * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
- *
- * A = 0 => memory controller clock from table 3-7,
- * A = 1 => memory controller clock = system bus clock
- * Run mode frequency = 13 MHz * L
- * Turbo mode frequency = 13 MHz * L * N
- * System bus frequency = 13 MHz * L / (B + 1)
- *
- * In CCCR:
- * A = 1
- * L = 16 oscillator to run mode ratio
- * 2N = 6 2 * (turbo mode to run mode ratio)
- *
- * In CCLKCFG:
- * B = 1 Fast bus mode
- * HT = 0 Half-Turbo mode
- * T = 1 Turbo mode
- *
- * For now, just support some of the combinations in table 3-7 of
- * PXA27x Processor Family Developer's Manual to simplify frequency
- * change sequences.
- */
-#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
-#define CCLKCFG2(B, HT, T) \
- (CCLKCFG_FCS | \
- ((B) ? CCLKCFG_FASTBUS : 0) | \
- ((HT) ? CCLKCFG_HALFTURBO : 0) | \
- ((T) ? CCLKCFG_TURBO : 0))
-
static struct pxa_freqs pxa27x_freqs[] = {
- {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
- {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
- {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
- {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
- {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
- {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
- {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
+ {104000, 900000, 1705000 },
+ {156000, 1000000, 1705000 },
+ {208000, 1180000, 1705000 },
+ {312000, 1250000, 1705000 },
+ {416000, 1350000, 1705000 },
+ {520000, 1450000, 1705000 },
+ {624000, 1550000, 1705000 }
};
#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
@@ -241,51 +192,29 @@ static void pxa27x_guess_max_freq(void)
}
}
-static void init_sdram_rows(void)
-{
- uint32_t mdcnfg = __raw_readl(MDCNFG);
- unsigned int drac2 = 0, drac0 = 0;
-
- if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
- drac2 = MDCNFG_DRAC2(mdcnfg);
-
- if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
- drac0 = MDCNFG_DRAC0(mdcnfg);
-
- sdram_rows = 1 << (11 + max(drac0, drac2));
-}
-
-static u32 mdrefr_dri(unsigned int freq)
-{
- u32 interval = freq * SDRAM_TREF / sdram_rows;
-
- return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
-}
-
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
- return get_clk_frequency_khz(0);
+ struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
+
+ return (unsigned int) clk_get_rate(data->clk_core) / 1000;
}
static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
const struct pxa_freqs *pxa_freq_settings;
- unsigned long flags;
- unsigned int new_freq_cpu, new_freq_mem;
- unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
+ struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
+ unsigned int new_freq_cpu;
int ret = 0;
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
new_freq_cpu = pxa_freq_settings[idx].khz;
- new_freq_mem = pxa_freq_settings[idx].membus;
if (freq_debug)
- pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
- new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
- (new_freq_mem / 2000) : (new_freq_mem / 1000));
+ pr_debug("Changing CPU frequency from %d Mhz to %d Mhz\n",
+ policy->cur / 1000, new_freq_cpu / 1000);
if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
@@ -293,53 +222,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
return ret;
}
- /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
- * we need to preset the smaller DRI before the change. If we're
- * speeding up we need to set the larger DRI value after the change.
- */
- preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
- if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
- preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
- preset_mdrefr |= mdrefr_dri(new_freq_mem);
- }
- postset_mdrefr =
- (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
-
- /* If we're dividing the memory clock by two for the SDRAM clock, this
- * must be set prior to the change. Clearing the divide must be done
- * after the change.
- */
- if (pxa_freq_settings[idx].div2) {
- preset_mdrefr |= MDREFR_DB2_MASK;
- postset_mdrefr |= MDREFR_DB2_MASK;
- } else {
- postset_mdrefr &= ~MDREFR_DB2_MASK;
- }
-
- local_irq_save(flags);
-
- /* Set new the CCCR and prepare CCLKCFG */
- writel(pxa_freq_settings[idx].cccr, CCCR);
- cclkcfg = pxa_freq_settings[idx].cclkcfg;
-
- asm volatile(" \n\
- ldr r4, [%1] /* load MDREFR */ \n\
- b 2f \n\
- .align 5 \n\
-1: \n\
- str %3, [%1] /* preset the MDREFR */ \n\
- mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
- str %4, [%1] /* postset the MDREFR */ \n\
- \n\
- b 3f \n\
-2: b 1b \n\
-3: nop \n\
- "
- : "=&r" (unused)
- : "r" (MDREFR), "r" (cclkcfg),
- "r" (preset_mdrefr), "r" (postset_mdrefr)
- : "r4", "r5");
- local_irq_restore(flags);
+ clk_set_rate(data->clk_core, new_freq_cpu * 1000);
/*
* Even if voltage setting fails, we don't report it, as the frequency
@@ -369,8 +252,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
pxa_cpufreq_init_voltages();
- init_sdram_rows();
-
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
@@ -429,11 +310,17 @@ static struct cpufreq_driver pxa_cpufreq_driver = {
.init = pxa_cpufreq_init,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
+ .driver_data = &pxa_cpufreq_data,
};
static int __init pxa_cpu_init(void)
{
int ret = -ENODEV;
+
+ pxa_cpufreq_data.clk_core = clk_get_sys(NULL, "core");
+ if (IS_ERR(pxa_cpufreq_data.clk_core))
+ return PTR_ERR(pxa_cpufreq_data.clk_core);
+
if (cpu_is_pxa25x() || cpu_is_pxa27x())
ret = cpufreq_register_driver(&pxa_cpufreq_driver);
return ret;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 8de2364b5995..05d299052c5c 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -53,7 +53,7 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
-static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
+static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
.name = "scpi",
.get_transition_latency = scpi_get_transition_latency,
.init_opp_table = scpi_init_opp_table,
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 4894924a3ca2..195f27f9c1cb 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -177,7 +177,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
np = of_cpu_device_node_get(0);
if (!np) {
- pr_err("No cpu node found");
+ pr_err("No cpu node found\n");
return -ENODEV;
}
@@ -187,7 +187,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
prop = of_find_property(np, "cpufreq_tbl", NULL);
if (!prop || !prop->value) {
- pr_err("Invalid cpufreq_tbl");
+ pr_err("Invalid cpufreq_tbl\n");
ret = -ENODEV;
goto out_put_node;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index ccab452a4ef5..8085ec9000d1 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -367,7 +367,7 @@ unsigned int speedstep_detect_processor(void)
} else
return SPEEDSTEP_CPU_PIII_C;
}
-
+ /* fall through */
default:
return 0;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 4bf47de6101f..923317f03b4b 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -205,6 +205,7 @@ static int ti_cpufreq_init(void)
np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
+ of_node_put(np);
if (!match)
return -ENODEV;
@@ -217,7 +218,8 @@ static int ti_cpufreq_init(void)
opp_data->cpu_dev = get_cpu_device(0);
if (!opp_data->cpu_dev) {
pr_err("%s: Failed to get device for CPU0\n", __func__);
- return -ENODEV;
+ ret = ENODEV;
+ goto free_opp_data;
}
opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
@@ -262,6 +264,8 @@ register_cpufreq_dt:
fail_put_node:
of_node_put(opp_data->opp_node);
+free_opp_data:
+ kfree(opp_data);
return ret;
}
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 87e5bdc5ec74..53237289e606 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -42,7 +42,7 @@ static int ve_spc_get_transition_latency(struct device *cpu_dev)
return 1000000; /* 1 ms */
}
-static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
+static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
.name = "vexpress-spc",
.get_transition_latency = ve_spc_get_transition_latency,
.init_opp_table = ve_spc_init_opp_table,
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 52a75053ee03..ddee1b601b89 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -72,12 +72,94 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
};
/*
- * arm_idle_init
+ * arm_idle_init_cpu
*
* Registers the arm specific cpuidle driver with the cpuidle
* framework. It relies on core code to parse the idle states
* and initialize them using driver data structures accordingly.
*/
+static int __init arm_idle_init_cpu(int cpu)
+{
+ int ret;
+ struct cpuidle_driver *drv;
+ struct cpuidle_device *dev;
+
+ drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /*
+ * Initialize idle states data, starting at index 1. This
+ * driver is DT only, if no DT idle states are detected (ret
+ * == 0) let the driver initialization fail accordingly since
+ * there is no reason to initialize the idle driver if only
+ * wfi is supported.
+ */
+ ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto out_kfree_drv;
+ }
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver\n");
+ goto out_kfree_drv;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
+ ret = arm_cpuidle_init(cpu);
+
+ /*
+ * Skip the cpuidle device initialization if the reported
+ * failure is a HW misconfiguration/breakage (-ENXIO).
+ */
+ if (ret == -ENXIO)
+ return 0;
+
+ if (ret) {
+ pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+ goto out_unregister_drv;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ pr_err("Failed to allocate cpuidle device\n");
+ ret = -ENOMEM;
+ goto out_unregister_drv;
+ }
+ dev->cpu = cpu;
+
+ ret = cpuidle_register_device(dev);
+ if (ret) {
+ pr_err("Failed to register cpuidle device for CPU %d\n",
+ cpu);
+ goto out_kfree_dev;
+ }
+
+ return 0;
+
+out_kfree_dev:
+ kfree(dev);
+out_unregister_drv:
+ cpuidle_unregister_driver(drv);
+out_kfree_drv:
+ kfree(drv);
+ return ret;
+}
+
+/*
+ * arm_idle_init - Initializes arm cpuidle driver
+ *
+ * Initializes arm cpuidle driver for all CPUs, if any CPU fails
+ * to register cpuidle driver then rollback to cancel all CPUs
+ * registeration.
+ */
static int __init arm_idle_init(void)
{
int cpu, ret;
@@ -85,79 +167,20 @@ static int __init arm_idle_init(void)
struct cpuidle_device *dev;
for_each_possible_cpu(cpu) {
-
- drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
- if (!drv) {
- ret = -ENOMEM;
- goto out_fail;
- }
-
- drv->cpumask = (struct cpumask *)cpumask_of(cpu);
-
- /*
- * Initialize idle states data, starting at index 1. This
- * driver is DT only, if no DT idle states are detected (ret
- * == 0) let the driver initialization fail accordingly since
- * there is no reason to initialize the idle driver if only
- * wfi is supported.
- */
- ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
- if (ret <= 0) {
- ret = ret ? : -ENODEV;
- goto init_fail;
- }
-
- ret = cpuidle_register_driver(drv);
- if (ret) {
- pr_err("Failed to register cpuidle driver\n");
- goto init_fail;
- }
-
- /*
- * Call arch CPU operations in order to initialize
- * idle states suspend back-end specific data
- */
- ret = arm_cpuidle_init(cpu);
-
- /*
- * Skip the cpuidle device initialization if the reported
- * failure is a HW misconfiguration/breakage (-ENXIO).
- */
- if (ret == -ENXIO)
- continue;
-
- if (ret) {
- pr_err("CPU %d failed to init idle CPU ops\n", cpu);
- goto out_fail;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- pr_err("Failed to allocate cpuidle device\n");
- ret = -ENOMEM;
+ ret = arm_idle_init_cpu(cpu);
+ if (ret)
goto out_fail;
- }
- dev->cpu = cpu;
-
- ret = cpuidle_register_device(dev);
- if (ret) {
- pr_err("Failed to register cpuidle device for CPU %d\n",
- cpu);
- kfree(dev);
- goto out_fail;
- }
}
return 0;
-init_fail:
- kfree(drv);
+
out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister_device(dev);
- kfree(dev);
- drv = cpuidle_get_driver();
cpuidle_unregister_driver(drv);
+ kfree(dev);
kfree(drv);
}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 484cc8909d5c..68a16827f45f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -208,6 +208,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
return -EBUSY;
}
target_state = &drv->states[index];
+ broadcast = false;
}
/* Take note of the planned idle state. */
@@ -387,9 +388,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (dev->enabled)
return 0;
+ if (!cpuidle_curr_governor)
+ return -EIO;
+
drv = cpuidle_get_cpu_driver(dev);
- if (!drv || !cpuidle_curr_governor)
+ if (!drv)
return -EIO;
if (!dev->registered)
@@ -399,9 +403,11 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (ret)
return ret;
- if (cpuidle_curr_governor->enable &&
- (ret = cpuidle_curr_governor->enable(drv, dev)))
- goto fail_sysfs;
+ if (cpuidle_curr_governor->enable) {
+ ret = cpuidle_curr_governor->enable(drv, dev);
+ if (ret)
+ goto fail_sysfs;
+ }
smp_wmb();
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ce1a2ffffb2a..1ad8745fd6d6 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -17,6 +17,7 @@
#include <linux/pm_qos.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
+#include <linux/cpu.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -67,10 +68,16 @@ static int ladder_select_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
+ struct device *device = get_cpu_device(dev->cpu);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ int resume_latency = dev_pm_qos_raw_read_value(device);
+
+ if (resume_latency < latency_req &&
+ resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 48eaf2879228..aa390404e85f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->needs_update = 0;
}
- /* resume_latency is 0 means no restriction */
- if (resume_latency && resume_latency < latency_req)
+ if (resume_latency < latency_req &&
+ resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fe33c199fc1a..47ec920d5b71 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later.
-config CRYPTO_DEV_MV_CESA
- tristate "Marvell's Cryptographic Engine"
- depends on PLAT_ORION
- select CRYPTO_AES
- select CRYPTO_BLKCIPHER
- select CRYPTO_HASH
- select SRAM
- help
- This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on the Marvell Orion
- and Kirkwood SoCs, such as QNAP's TS-209.
-
- Currently the driver supports AES in ECB and CBC mode without DMA.
-
config CRYPTO_DEV_MARVELL_CESA
- tristate "New Marvell's Cryptographic Engine driver"
+ tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_AES
select CRYPTO_DES
@@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA
select SRAM
help
This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on the Armada 370.
+ Security Accelerator (CESA) which can be found on MVEBU and ORION
+ platforms.
This driver supports CPU offload through DMA transfers.
- This driver is aimed at replacing the mv_cesa driver. This will only
- happen once it has received proper testing.
-
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_DES
@@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
select CRYPTO_HASH
+ select CRYPTO_AEAD
+ select CRYPTO_AES
+ select CRYPTO_CCM
+ select CRYPTO_GCM
select CRYPTO_BLKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
algorithms execution.
+config CRYPTO_DEV_EXYNOS_HASH
+ bool "Support for Samsung Exynos HASH accelerator"
+ depends on CRYPTO_DEV_S5P
+ depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+ help
+ Select this to offload Exynos from HASH MD5/SHA1/SHA256.
+ This will select software SHA1, MD5 and SHA256 as they are
+ needed for small and zero-size messages.
+ HASH algorithms will be disabled if EXYNOS_RNG
+ is enabled due to hw conflict.
+
config CRYPTO_DEV_NX
bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
depends on PPC64
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c00708d04be6..2513d13ea2c4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index b95539928fdf..e33c185fc163 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
-crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o
crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4afca3968773..eeaf27859d80 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -26,11 +26,14 @@
#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/ctr.h>
#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
u32 save_iv, u32 ld_h, u32 ld_iv,
@@ -62,6 +65,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
sa->sa_command_1.bf.feedback_mode = cfb,
sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
sa->sa_command_1.bf.seq_num_mask = sn_mask;
sa->sa_command_1.bf.mutable_bit_proc = mute;
@@ -73,29 +77,29 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
int crypto4xx_encrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_OUTBOUND;
- ctx->hash_final = 0;
- ctx->is_hash = 0;
- ctx->pd_ctl = 0x1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_INBOUND;
- ctx->hash_final = 0;
- ctx->is_hash = 0;
- ctx->pd_ctl = 1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
}
/**
@@ -120,23 +124,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
}
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- rc = crypto4xx_alloc_state_record(ctx);
- if (rc) {
- crypto4xx_free_sa(ctx);
- return rc;
- }
- }
/* Setup SA */
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- ctx->hash_final = 0;
+ sa = ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
@@ -150,18 +146,13 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
- key, keylen);
- sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+ sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
- ctx->is_hash = 0;
- ctx->direction = DIR_INBOUND;
- memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
- (void *)&ctx->state_record_dma_addr, 4);
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
return 0;
@@ -174,6 +165,396 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ int rc;
+
+ rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+ if (rc)
+ return rc;
+
+ ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
+ CTR_RFC3686_NONCE_SIZE]);
+
+ return 0;
+}
+
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0);
+}
+
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0);
+}
+
+static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ bool is_ccm, bool decrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ /* authsize has to be a multiple of 4 */
+ if (aead->authsize & 3)
+ return true;
+
+ /*
+ * hardware does not handle cases where cryptlen
+ * is less than a block
+ */
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return true;
+
+ /* assoc len needs to be a multiple of 4 */
+ if (req->assoclen & 0x3)
+ return true;
+
+ /* CCM supports only counter field length of 2 and 4 bytes */
+ if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+ return true;
+
+ /* CCM - fix CBC MAC mismatch in special case */
+ if (is_ccm && decrypt && !req->assoclen)
+ return true;
+
+ return false;
+}
+
+static int crypto4xx_aead_fallback(struct aead_request *req,
+ struct crypto4xx_ctx *ctx, bool do_decrypt)
+{
+ char aead_req_data[sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)]
+ __aligned(__alignof__(struct aead_request));
+
+ struct aead_request *subreq = (void *) aead_req_data;
+
+ memset(subreq, 0, sizeof(aead_req_data));
+
+ aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return do_decrypt ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+}
+
+static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int rc;
+
+ crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->sw_cipher.aead,
+ crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
+ crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(cipher,
+ crypto_aead_get_flags(ctx->sw_cipher.aead) &
+ CRYPTO_TFM_RES_MASK);
+
+ return rc;
+}
+
+/**
+ * AES-CCM Functions
+ */
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ return 0;
+}
+
+static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int len = req->cryptlen;
+ __le32 iv[16];
+ u32 tmp_sa[ctx->sa_len * 4];
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
+
+ if (crypto4xx_aead_need_fallback(req, true, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(aead);
+
+ memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
+ sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
+
+ if (req->iv[0] == 1) {
+ /* CRYPTO_MODE_AES_ICM */
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+ }
+
+ iv[3] = cpu_to_le32(0);
+ crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ sa, ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, false);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, true);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
+}
+
+/**
+ * AES-GCM Functions
+ */
+
+static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_cipher *aes_tfm = NULL;
+ uint8_t src[16] = { 0 };
+ int rc = 0;
+
+ aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(aes_tfm)) {
+ rc = PTR_ERR(aes_tfm);
+ pr_warn("could not load aes cipher driver: %d\n", rc);
+ return rc;
+ }
+
+ rc = crypto_cipher_setkey(aes_tfm, key, keylen);
+ if (rc) {
+ pr_err("setkey() failed: %d\n", rc);
+ goto out;
+ }
+
+ crypto_cipher_encrypt_one(aes_tfm, src, src);
+ crypto4xx_memcpy_to_le32(hash_start, src, 16);
+out:
+ crypto_free_cipher(aes_tfm);
+ return rc;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON, SA_MC_DISABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
+ key, keylen);
+ if (rc) {
+ pr_err("GCM hash key setting failed = %d\n", rc);
+ goto err;
+ }
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
+ bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int len = req->cryptlen;
+ __le32 iv[4];
+
+ if (crypto4xx_aead_need_fallback(req, false, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
+ iv[3] = cpu_to_le32(1);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, false);
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, true);
+}
+
/**
* HASH SHA1 Functions
*/
@@ -183,53 +564,39 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
unsigned char hm)
{
struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_alg *my_alg;
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
- struct dynamic_sa_hash160 *sa_in;
+ struct dynamic_sa_hash160 *sa;
int rc;
+ my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
+ alg.u.hash);
ctx->dev = my_alg->dev;
- ctx->is_hash = 1;
- ctx->hash_final = 0;
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, sa_len);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- crypto4xx_alloc_state_record(ctx);
- if (!ctx->state_record_dma_addr) {
- crypto4xx_free_sa(ctx);
- return -ENOMEM;
- }
- }
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+ set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- ctx->direction = DIR_INBOUND;
- sa->sa_contents = SA_HASH160_CONTENTS;
- sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
/* Need to zero hash digest in SA */
- memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
- memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
- sa_in->state_ptr = ctx->state_record_dma_addr;
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+ memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
return 0;
}
@@ -240,29 +607,27 @@ int crypto4xx_hash_init(struct ahash_request *req)
int ds;
struct dynamic_sa_ctl *sa;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa = ctx->sa_in;
ds = crypto_ahash_digestsize(
__crypto_ahash_cast(req->base.tfm));
sa->sa_command_0.bf.digest_len = ds >> 2;
sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
- ctx->is_hash = 1;
- ctx->direction = DIR_INBOUND;
return 0;
}
int crypto4xx_hash_update(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
- ctx->is_hash = 1;
- ctx->hash_final = 0;
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
+ sg_init_one(&dst, req->result, ds);
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -272,15 +637,16 @@ int crypto4xx_hash_final(struct ahash_request *req)
int crypto4xx_hash_digest(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
- ctx->hash_final = 1;
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
+ sg_init_one(&dst, req->result, ds);
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0);
}
/**
@@ -291,5 +657,3 @@ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
SA_HASH_MODE_HASH);
}
-
-
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 65dc78b91dea..c44954e274bc 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -35,8 +35,14 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -127,21 +133,17 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
- ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_in_dma_addr, GFP_ATOMIC);
+ ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_in == NULL)
return -ENOMEM;
- ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_out_dma_addr, GFP_ATOMIC);
+ ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device, size * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
return -ENOMEM;
}
- memset(ctx->sa_in, 0, size * 4);
- memset(ctx->sa_out, 0, size * 4);
ctx->sa_len = size;
return 0;
@@ -149,40 +151,13 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
{
- if (ctx->sa_in != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
- if (ctx->sa_out != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_out, ctx->sa_out_dma_addr);
-
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
+ kfree(ctx->sa_out);
+ ctx->sa_out = NULL;
ctx->sa_len = 0;
}
-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
-{
- ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- &ctx->state_record_dma_addr, GFP_ATOMIC);
- if (!ctx->state_record_dma_addr)
- return -ENOMEM;
- memset(ctx->state_record, 0, sizeof(struct sa_state_record));
-
- return 0;
-}
-
-void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
-{
- if (ctx->state_record != NULL)
- dma_free_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- ctx->state_record,
- ctx->state_record_dma_addr);
- ctx->state_record_dma_addr = 0;
-}
-
/**
* alloc memory for the gather ring
* no need to alloc buf for the ring
@@ -191,7 +166,6 @@ void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
{
int i;
- struct pd_uinfo *pd_uinfo;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
&dev->pdr_pa, GFP_ATOMIC);
@@ -207,9 +181,9 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->pdr_pa);
return -ENOMEM;
}
- memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+ memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
- 256 * PPC4XX_NUM_PD,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
GFP_ATOMIC);
if (!dev->shadow_sa_pool)
@@ -221,16 +195,17 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
- pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * i);
+ struct ce_pd *pd = &dev->pdr[i];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
+
+ pd->sa = dev->shadow_sa_pool_pa +
+ sizeof(union shadow_sa_buf) * i;
/* alloc 256 bytes which is enough for any kind of dynamic sa */
- pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
- pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+ pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
/* alloc state record */
- pd_uinfo->sr_va = dev->shadow_sr_pool +
- sizeof(struct sa_state_record) * i;
+ pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
sizeof(struct sa_state_record) * i;
}
@@ -240,13 +215,16 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
- if (dev->pdr != NULL)
+ if (dev->pdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
+
if (dev->shadow_sa_pool)
- dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
- dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+ dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
if (dev->shadow_sr_pool)
dma_free_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@@ -273,28 +251,21 @@ static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
{
- struct pd_uinfo *pd_uinfo;
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ u32 tail;
unsigned long flags;
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * idx);
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ pd_uinfo->state = PD_ENTRY_FREE;
+
if (dev->pdr_tail != PPC4XX_LAST_PD)
dev->pdr_tail++;
else
dev->pdr_tail = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
+ tail = dev->pdr_tail;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- return 0;
-}
-
-static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
- dma_addr_t *pd_dma, u32 idx)
-{
- *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
-
- return dev->pdr + sizeof(struct ce_pd) * idx;
+ return tail;
}
/**
@@ -326,10 +297,11 @@ static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
* when this function is called.
* preemption or interrupt must be disabled
*/
-u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
{
u32 retval;
u32 tmp;
+
if (n >= PPC4XX_NUM_GD)
return ERING_WAS_FULL;
@@ -372,7 +344,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
{
*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
- return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
+ return &dev->gdr[idx];
}
/**
@@ -383,7 +355,6 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- struct ce_sd *sd_array;
/* alloc memory for scatter descriptor ring */
dev->sdr = dma_alloc_coherent(dev->core_dev->device,
@@ -392,10 +363,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
if (!dev->sdr)
return -ENOMEM;
- dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
if (!dev->scatter_buffer_va) {
dma_free_coherent(dev->core_dev->device,
@@ -404,11 +374,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
return -ENOMEM;
}
- sd_array = dev->sdr;
-
for (i = 0; i < PPC4XX_NUM_SD; i++) {
- sd_array[i].ptr = dev->scatter_buffer_pa +
- dev->scatter_buffer_size * i;
+ dev->sdr[i].ptr = dev->scatter_buffer_pa +
+ PPC4XX_SD_BUFFER_SIZE * i;
}
return 0;
@@ -416,14 +384,14 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
{
- if (dev->sdr != NULL)
+ if (dev->sdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
dev->sdr, dev->sdr_pa);
- if (dev->scatter_buffer_va != NULL)
+ if (dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
dev->scatter_buffer_va,
dev->scatter_buffer_pa);
}
@@ -477,63 +445,7 @@ static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
{
*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
- return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
-}
-
-static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
- dma_addr_t *addr, u32 *length,
- u32 *idx, u32 *offset, u32 *nbytes)
-{
- u32 len;
-
- if (*length > dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- dev->scatter_buffer_size);
- *offset = 0;
- *length -= dev->scatter_buffer_size;
- *nbytes -= dev->scatter_buffer_size;
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *addr = *addr + dev->scatter_buffer_size;
- return 1;
- } else if (*length < dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset, *length);
- if ((*offset + *length) == dev->scatter_buffer_size) {
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *nbytes -= *length;
- *offset = 0;
- } else {
- *nbytes -= *length;
- *offset += *length;
- }
-
- return 0;
- } else {
- len = (*nbytes <= dev->scatter_buffer_size) ?
- (*nbytes) : dev->scatter_buffer_size;
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- len);
- *offset = 0;
- *nbytes -= len;
-
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
-
- return 0;
- }
+ return &dev->sdr[idx];
}
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
@@ -542,66 +454,52 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
u32 nbytes,
struct scatterlist *dst)
{
- dma_addr_t addr;
- u32 this_sd;
- u32 offset;
- u32 len;
- u32 i;
- u32 sg_len;
- struct scatterlist *sg;
+ unsigned int first_sd = pd_uinfo->first_sd;
+ unsigned int last_sd;
+ unsigned int overflow = 0;
+ unsigned int to_copy;
+ unsigned int dst_start = 0;
- this_sd = pd_uinfo->first_sd;
- offset = 0;
- i = 0;
+ /*
+ * Because the scatter buffers are all neatly organized in one
+ * big continuous ringbuffer; scatterwalk_map_and_copy() can
+ * be instructed to copy a range of buffers in one go.
+ */
+
+ last_sd = (first_sd + pd_uinfo->num_sd);
+ if (last_sd > PPC4XX_LAST_SD) {
+ last_sd = PPC4XX_LAST_SD;
+ overflow = last_sd % PPC4XX_NUM_SD;
+ }
while (nbytes) {
- sg = &dst[i];
- sg_len = sg->length;
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
-
- if (offset == 0) {
- len = (nbytes <= sg->length) ? nbytes : sg->length;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- i++;
- } else {
- len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
- nbytes : (dev->scatter_buffer_size - offset);
- len = (sg->length < len) ? sg->length : len;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- sg_len -= len;
- if (sg_len) {
- addr += len;
- while (crypto4xx_fill_one_page(dev, &addr,
- &sg_len, &this_sd, &offset, &nbytes))
- ;
- }
- i++;
+ void *buf = dev->scatter_buffer_va +
+ first_sd * PPC4XX_SD_BUFFER_SIZE;
+
+ to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
+ (1 + last_sd - first_sd));
+ scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
+ nbytes -= to_copy;
+
+ if (overflow) {
+ first_sd = 0;
+ last_sd = overflow;
+ dst_start += to_copy;
+ overflow = 0;
}
}
}
-static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
+static void crypto4xx_copy_digest_to_dst(void *dst,
+ struct pd_uinfo *pd_uinfo,
struct crypto4xx_ctx *ctx)
{
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- struct sa_state_record *state_record =
- (struct sa_state_record *) pd_uinfo->sr_va;
if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
+ memcpy(dst, pd_uinfo->sr_va->save_digest,
SA_HASH_ALG_SHA1_DIGEST_SIZE);
}
-
- return 0;
}
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
@@ -623,7 +521,7 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
}
}
-static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
@@ -644,13 +542,13 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
dst->offset, dst->length, DMA_FROM_DEVICE);
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- if (ablk_req->base.complete != NULL)
- ablk_req->base.complete(&ablk_req->base, 0);
- return 0;
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ablkcipher_request_complete(ablk_req, -EINPROGRESS);
+ ablkcipher_request_complete(ablk_req, 0);
}
-static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
+static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
struct crypto4xx_ctx *ctx;
@@ -659,62 +557,93 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
ahash_req = ahash_request_cast(pd_uinfo->async_req);
ctx = crypto_tfm_ctx(ahash_req->base.tfm);
- crypto4xx_copy_digest_to_dst(pd_uinfo,
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- /* call user provided callback function x */
- if (ahash_req->base.complete != NULL)
- ahash_req->base.complete(&ahash_req->base, 0);
- return 0;
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ahash_request_complete(ahash_req, -EINPROGRESS);
+ ahash_request_complete(ahash_req, 0);
}
-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
{
- struct ce_pd *pd;
- struct pd_uinfo *pd_uinfo;
+ struct aead_request *aead_req;
+ struct crypto4xx_ctx *ctx;
+ struct scatterlist *dst = pd_uinfo->dest_va;
+ int err = 0;
- pd = dev->pdr + sizeof(struct ce_pd)*idx;
- pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
- if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
- CRYPTO_ALG_TYPE_ABLKCIPHER)
- return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
- else
- return crypto4xx_ahash_done(dev, pd_uinfo);
+ aead_req = container_of(pd_uinfo->async_req, struct aead_request,
+ base);
+ ctx = crypto_tfm_ctx(aead_req->base.tfm);
+
+ if (pd_uinfo->using_sd) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ pd->pd_ctl_len.bf.pkt_len,
+ dst);
+ } else {
+ __dma_sync_page(sg_page(dst), dst->offset, dst->length,
+ DMA_FROM_DEVICE);
+ }
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ /* append icv at the end */
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
+
+ crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+ cp_len);
+
+ scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ cp_len, 1);
+ }
+
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ if (pd->pd_ctl.bf.status & 0x1) {
+ /* authentication error */
+ err = -EBADMSG;
+ } else {
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
+ }
+ err = -EINVAL;
+ }
+ }
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ aead_request_complete(aead_req, -EINPROGRESS);
+
+ aead_request_complete(aead_req, err);
}
-/**
- * Note: Only use this function to copy items that is word aligned.
- */
-void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf,
- int len)
+static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
- u8 *tmp;
- for (; len >= 4; buf += 4, len -= 4)
- *dst++ = cpu_to_le32(*(unsigned int *) buf);
-
- tmp = (u8 *)dst;
- switch (len) {
- case 3:
- *tmp++ = 0;
- *tmp++ = *(buf+2);
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
- break;
- case 2:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
+ struct ce_pd *pd = &dev->pdr[idx];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+
+ switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
break;
- case 1:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *buf;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
- default:
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto4xx_ahash_done(dev, pd_uinfo);
break;
}
}
@@ -729,17 +658,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
kfree(core_dev);
}
-void crypto4xx_return_pd(struct crypto4xx_device *dev,
- u32 pd_entry, struct ce_pd *pd,
- struct pd_uinfo *pd_uinfo)
-{
- /* irq should be already disabled */
- dev->pdr_head = pd_entry;
- pd->pd_ctl.w = 0;
- pd->pd_ctl_len.w = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
-}
-
static u32 get_next_gd(u32 current)
{
if (current != PPC4XX_LAST_GD)
@@ -756,17 +674,19 @@ static u32 get_next_sd(u32 current)
return 0;
}
-u32 crypto4xx_build_pd(struct crypto_async_request *req,
+int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len)
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *req_sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen)
{
+ struct scatterlist _dst[2];
struct crypto4xx_device *dev = ctx->dev;
- dma_addr_t addr, pd_dma, sd_dma, gd_dma;
struct dynamic_sa_ctl *sa;
- struct scatterlist *sg;
struct ce_gd *gd;
struct ce_pd *pd;
u32 num_gd, num_sd;
@@ -774,22 +694,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
u32 fst_sd = 0xffffffff;
u32 pd_entry;
unsigned long flags;
- struct pd_uinfo *pd_uinfo = NULL;
- unsigned int nbytes = datalen, idx;
- unsigned int ivlen = 0;
+ struct pd_uinfo *pd_uinfo;
+ unsigned int nbytes = datalen;
+ size_t offset_to_sr_ptr;
u32 gd_idx = 0;
+ int tmp;
+ bool is_busy;
- /* figure how many gd is needed */
- num_gd = sg_nents_for_len(src, datalen);
- if ((int)num_gd < 0) {
+ /* figure how many gd are needed */
+ tmp = sg_nents_for_len(src, assoclen + datalen);
+ if (tmp < 0) {
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
- return -EINVAL;
+ return tmp;
}
- if (num_gd == 1)
- num_gd = 0;
+ if (tmp == 1)
+ tmp = 0;
+ num_gd = tmp;
- /* figure how many sd is needed */
- if (sg_is_last(dst) || ctx->is_hash) {
+ if (assoclen) {
+ nbytes += assoclen;
+ dst = scatterwalk_ffwd(_dst, dst, assoclen);
+ }
+
+ /* figure how many sd are needed */
+ if (sg_is_last(dst)) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -808,6 +736,31 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
* already got must be return the original place.
*/
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ /*
+ * Let the caller know to slow down, once more than 13/16ths = 81%
+ * of the available data contexts are being used simultaneously.
+ *
+ * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
+ * 31 more contexts. Before new requests have to be rejected.
+ */
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 13) / 16);
+ } else {
+ /*
+ * To fix contention issues between ipsec (no blacklog) and
+ * dm-crypto (backlog) reserve 32 entries for "no backlog"
+ * data contexts.
+ */
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 15) / 16);
+
+ if (is_busy) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EBUSY;
+ }
+ }
+
if (num_gd) {
fst_gd = crypto4xx_get_n_gd(dev, num_gd);
if (fst_gd == ERING_WAS_FULL) {
@@ -835,38 +788,28 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
}
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * pd_entry);
- pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+ pd = &dev->pdr[pd_entry];
+ pd->sa_len = sa_len;
+
+ pd_uinfo = &dev->pdr_uinfo[pd_entry];
pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
- if (iv_len || ctx->is_hash) {
- ivlen = iv_len;
- pd->sa = pd_uinfo->sa_pa;
- sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
- if (ctx->direction == DIR_INBOUND)
- memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
- else
- memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
+ if (iv_len)
+ memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
- memcpy((void *) sa + ctx->offset_to_sr_ptr,
- &pd_uinfo->sr_pa, 4);
+ sa = pd_uinfo->sa_va;
+ memcpy(sa, req_sa, sa_len * 4);
+
+ sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
+ offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
- if (iv_len)
- crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
- } else {
- if (ctx->direction == DIR_INBOUND) {
- pd->sa = ctx->sa_in_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- } else {
- pd->sa = ctx->sa_out_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
- }
- }
- pd->sa_len = ctx->sa_len;
if (num_gd) {
+ dma_addr_t gd_dma;
+ struct scatterlist *sg;
+
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
@@ -875,27 +818,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd->src = gd_dma;
/* enable gather */
sa->sa_command_0.bf.gather = 1;
- idx = 0;
- src = &src[0];
/* walk the sg, and setup gather array */
+
+ sg = src;
while (nbytes) {
- sg = &src[idx];
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
- gd->ptr = addr;
- gd->ctl_len.len = sg->length;
+ size_t len;
+
+ len = min(sg->length, nbytes);
+ gd->ptr = dma_map_page(dev->core_dev->device,
+ sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
+ gd->ctl_len.len = len;
gd->ctl_len.done = 0;
gd->ctl_len.ready = 1;
- if (sg->length >= nbytes)
+ if (len >= nbytes)
break;
+
nbytes -= sg->length;
gd_idx = get_next_gd(gd_idx);
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
- idx++;
+ sg = sg_next(sg);
}
} else {
pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
- src->offset, src->length, DMA_TO_DEVICE);
+ src->offset, min(nbytes, src->length),
+ DMA_TO_DEVICE);
/*
* Disable gather in sa command
*/
@@ -906,25 +852,24 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd_uinfo->first_gd = 0xffffffff;
pd_uinfo->num_gd = 0;
}
- if (ctx->is_hash || sg_is_last(dst)) {
+ if (sg_is_last(dst)) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
- * In case of is_hash, the icv is always at end of src data.
*/
pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
pd_uinfo->num_sd = 0;
pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
- if (ctx->is_hash)
- pd->dest = virt_to_phys((void *)dst);
- else
- pd->dest = (u32)dma_map_page(dev->core_dev->device,
- sg_page(dst), dst->offset,
- dst->length, DMA_TO_DEVICE);
+ pd->dest = (u32)dma_map_page(dev->core_dev->device,
+ sg_page(dst), dst->offset,
+ min(datalen, dst->length),
+ DMA_TO_DEVICE);
} else {
+ dma_addr_t sd_dma;
struct ce_sd *sd = NULL;
+
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
@@ -938,7 +883,6 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
sd->ctl.done = 0;
sd->ctl.rdy = 1;
/* sd->ptr should be setup by sd_init routine*/
- idx = 0;
if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
nbytes -= PPC4XX_SD_BUFFER_SIZE;
else
@@ -949,67 +893,97 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
/* setup scatter descriptor */
sd->ctl.done = 0;
sd->ctl.rdy = 1;
- if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
nbytes -= PPC4XX_SD_BUFFER_SIZE;
- else
+ } else {
/*
* SD entry can hold PPC4XX_SD_BUFFER_SIZE,
* which is more than nbytes, so done.
*/
nbytes = 0;
+ }
}
}
- sa->sa_command_1.bf.hash_crypto_offset = 0;
- pd->pd_ctl.w = ctx->pd_ctl;
- pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
- pd_uinfo->state = PD_ENTRY_INUSE;
+ pd->pd_ctl.w = PD_CTL_HOST_READY |
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+ (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ PD_CTL_HASH_FINAL : 0);
+ pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
+ pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+
wmb();
/* write any value to push engine to read a pd */
+ writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
- return -EINPROGRESS;
+ return is_busy ? -EBUSY : -EINPROGRESS;
}
/**
* Algorithm Registration Functions
*/
-static int crypto4xx_alg_init(struct crypto_tfm *tfm)
+static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
+ struct crypto4xx_ctx *ctx)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
ctx->dev = amcc_alg->dev;
ctx->sa_in = NULL;
ctx->sa_out = NULL;
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
+}
- switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- default:
- tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- break;
- }
+static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
return 0;
}
-static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
+static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
crypto4xx_free_sa(ctx);
- crypto4xx_free_state_record(ctx);
}
-int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
- struct crypto4xx_alg_common *crypto_alg,
- int array_size)
+static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
+{
+ crypto4xx_common_exit(crypto_tfm_ctx(tfm));
+}
+
+static int crypto4xx_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto4xx_alg *amcc_alg;
+
+ ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->sw_cipher.aead))
+ return PTR_ERR(ctx->sw_cipher.aead);
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ max(sizeof(struct crypto4xx_ctx), 32 +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)));
+ return 0;
+}
+
+static void crypto4xx_aead_exit(struct crypto_aead *tfm)
+{
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto4xx_common_exit(ctx);
+ crypto_free_aead(ctx->sw_cipher.aead);
+}
+
+static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
{
struct crypto4xx_alg *alg;
int i;
@@ -1024,6 +998,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
alg->dev = sec_dev;
switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ rc = crypto_register_aead(&alg->alg.u.aead);
+ break;
+
case CRYPTO_ALG_TYPE_AHASH:
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
@@ -1033,12 +1011,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
break;
}
- if (rc) {
- list_del(&alg->entry);
+ if (rc)
kfree(alg);
- } else {
+ else
list_add_tail(&alg->entry, &sec_dev->alg_list);
- }
}
return 0;
@@ -1055,6 +1031,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
crypto_unregister_ahash(&alg->alg.u.hash);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&alg->alg.u.aead);
+ break;
+
default:
crypto_unregister_alg(&alg->alg.u.cipher);
}
@@ -1068,25 +1048,23 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
struct pd_uinfo *pd_uinfo;
struct ce_pd *pd;
- u32 tail;
-
- while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
- tail = core_dev->dev->pdr_tail;
- pd_uinfo = core_dev->dev->pdr_uinfo +
- sizeof(struct pd_uinfo)*tail;
- pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
- if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
- pd->pd_ctl.bf.pe_done &&
- !pd->pd_ctl.bf.host_ready) {
- pd->pd_ctl.bf.pe_done = 0;
+ u32 tail = core_dev->dev->pdr_tail;
+ u32 head = core_dev->dev->pdr_head;
+
+ do {
+ pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+ pd = &core_dev->dev->pdr[tail];
+ if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+ ((READ_ONCE(pd->pd_ctl.w) &
+ (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
+ PD_CTL_PE_DONE)) {
crypto4xx_pd_done(core_dev->dev, tail);
- crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
- pd_uinfo->state = PD_ENTRY_FREE;
+ tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
} else {
/* if tail not done, break */
break;
}
- }
+ } while (head != tail);
}
/**
@@ -1110,18 +1088,20 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
/**
* Supported Crypto Algorithms
*/
-struct crypto4xx_alg_common crypto4xx_alg[] = {
+static struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1134,6 +1114,147 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
}
}
}},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = crypto4xx_setkey_rfc3686,
+ .encrypt = crypto4xx_rfc3686_encrypt,
+ .decrypt = crypto4xx_rfc3686_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+
+ /* AEAD */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_ccm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_ccm,
+ .decrypt = crypto4xx_decrypt_aes_ccm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_gcm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_gcm,
+ .decrypt = crypto4xx_decrypt_aes_gcm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
};
/**
@@ -1187,13 +1308,14 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
+ ratelimit_default_init(&core_dev->dev->aead_ratelimit);
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
goto err_build_pdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_gdr;
+ goto err_build_pdr;
rc = crypto4xx_build_sdr(core_dev->dev);
if (rc)
@@ -1236,12 +1358,11 @@ err_iomap:
err_request_irq:
irq_dispose_mapping(core_dev->irq);
tasklet_kill(&core_dev->tasklet);
- crypto4xx_destroy_sdr(core_dev->dev);
err_build_sdr:
+ crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_gdr:
- crypto4xx_destroy_pdr(core_dev->dev);
err_build_pdr:
+ crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
kfree(core_dev);
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ecfdcfe3698d..8ac3bd37203b 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,7 +22,11 @@
#ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__
+#include <linux/ratelimit.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
#define MODULE_NAME "crypto4xx"
@@ -34,20 +38,28 @@
#define PPC405EX_CE_RESET 0x00000008
#define CRYPTO4XX_CRYPTO_PRIORITY 300
-#define PPC4XX_LAST_PD 63
-#define PPC4XX_NUM_PD 64
-#define PPC4XX_LAST_GD 1023
+#define PPC4XX_NUM_PD 256
+#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
#define PPC4XX_NUM_GD 1024
-#define PPC4XX_LAST_SD 63
-#define PPC4XX_NUM_SD 64
+#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD 256
+#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
#define PPC4XX_SD_BUFFER_SIZE 2048
-#define PD_ENTRY_INUSE 1
+#define PD_ENTRY_BUSY BIT(1)
+#define PD_ENTRY_INUSE BIT(0)
#define PD_ENTRY_FREE 0
#define ERING_WAS_FULL 0xffffffff
struct crypto4xx_device;
+union shadow_sa_buf {
+ struct dynamic_sa_ctl sa;
+
+ /* alloc 256 bytes which is enough for any kind of dynamic sa */
+ u8 buf[256];
+} __packed;
+
struct pd_uinfo {
struct crypto4xx_device *dev;
u32 state;
@@ -60,9 +72,8 @@ struct pd_uinfo {
used by this packet */
u32 num_sd; /* number of scatter discriptors
used by this packet */
- void *sa_va; /* shadow sa, when using cp from ctx->sa */
- u32 sa_pa;
- void *sr_va; /* state record for shadow sa */
+ struct dynamic_sa_ctl *sa_va; /* shadow sa */
+ struct sa_state_record *sr_va; /* state record for shadow sa */
u32 sr_pa;
struct scatterlist *dest_va;
struct crypto_async_request *async_req; /* base crypto request
@@ -72,27 +83,21 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
char *name;
- u64 ce_phy_address;
void __iomem *ce_base;
void __iomem *trng_base;
- void *pdr; /* base address of packet
- descriptor ring */
- dma_addr_t pdr_pa; /* physical address used to
- program ce pdr_base_register */
- void *gdr; /* gather descriptor ring */
- dma_addr_t gdr_pa; /* physical address used to
- program ce gdr_base_register */
- void *sdr; /* scatter descriptor ring */
- dma_addr_t sdr_pa; /* physical address used to
- program ce sdr_base_register */
+ struct ce_pd *pdr; /* base address of packet descriptor ring */
+ dma_addr_t pdr_pa; /* physical address of pdr_base_register */
+ struct ce_gd *gdr; /* gather descriptor ring */
+ dma_addr_t gdr_pa; /* physical address of gdr_base_register */
+ struct ce_sd *sdr; /* scatter descriptor ring */
+ dma_addr_t sdr_pa; /* physical address of sdr_base_register */
void *scatter_buffer_va;
dma_addr_t scatter_buffer_pa;
- u32 scatter_buffer_size;
- void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
+ union shadow_sa_buf *shadow_sa_pool;
dma_addr_t shadow_sa_pool_pa;
- void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
+ struct sa_state_record *shadow_sr_pool;
dma_addr_t shadow_sr_pool_pa;
u32 pdr_tail;
u32 pdr_head;
@@ -100,9 +105,10 @@ struct crypto4xx_device {
u32 gdr_head;
u32 sdr_tail;
u32 sdr_head;
- void *pdr_uinfo;
+ struct pd_uinfo *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported
by this device */
+ struct ratelimit_state aead_ratelimit;
};
struct crypto4xx_core_device {
@@ -118,30 +124,13 @@ struct crypto4xx_core_device {
struct crypto4xx_ctx {
struct crypto4xx_device *dev;
- void *sa_in;
- dma_addr_t sa_in_dma_addr;
- void *sa_out;
- dma_addr_t sa_out_dma_addr;
- void *state_record;
- dma_addr_t state_record_dma_addr;
+ struct dynamic_sa_ctl *sa_in;
+ struct dynamic_sa_ctl *sa_out;
+ __le32 iv_nonce;
u32 sa_len;
- u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
- u32 direction;
- u32 next_hdr;
- u32 save_iv;
- u32 pd_ctl_len;
- u32 pd_ctl;
- u32 bypass;
- u32 is_hash;
- u32 hash_final;
-};
-
-struct crypto4xx_req_ctx {
- struct crypto4xx_device *dev; /* Device in which
- operation to send to */
- void *sa;
- u32 sa_dma_addr;
- u16 sa_len;
+ union {
+ struct crypto_aead *aead;
+ } sw_cipher;
};
struct crypto4xx_alg_common {
@@ -149,6 +138,7 @@ struct crypto4xx_alg_common {
union {
struct crypto_alg cipher;
struct ahash_alg hash;
+ struct aead_alg aead;
} u;
};
@@ -158,43 +148,90 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
-static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
- struct crypto_alg *x)
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+int crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen);
+int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt(struct ablkcipher_request *req);
+int crypto4xx_decrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+int crypto4xx_hash_digest(struct ahash_request *req);
+int crypto4xx_hash_final(struct ahash_request *req);
+int crypto4xx_hash_update(struct ahash_request *req);
+int crypto4xx_hash_init(struct ahash_request *req);
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
+ size_t len)
{
- switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- return container_of(__crypto_ahash_alg(x),
- struct crypto4xx_alg, alg.u.hash);
+ for (; len >= 4; buf += 4, len -= 4)
+ *dst++ = __swab32p((u32 *) buf);
+
+ if (len) {
+ const u8 *tmp = (u8 *)buf;
+
+ switch (len) {
+ case 3:
+ *dst = (tmp[2] << 16) |
+ (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 2:
+ *dst = (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 1:
+ *dst = tmp[0];
+ break;
+ default:
+ break;
+ }
}
+}
- return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32(dst, buf, len);
}
-extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
-extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
- struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
-extern void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf, int len);
-extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
- struct crypto4xx_ctx *ctx,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len);
-extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen);
-extern int crypto4xx_encrypt(struct ablkcipher_request *req);
-extern int crypto4xx_decrypt(struct ablkcipher_request *req);
-extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-extern int crypto4xx_hash_digest(struct ahash_request *req);
-extern int crypto4xx_hash_final(struct ahash_request *req);
-extern int crypto4xx_hash_update(struct ahash_request *req);
-extern int crypto4xx_hash_init(struct ahash_request *req);
+static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
+ unsigned int authsize);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+
#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 279b8725559f..0a22ec5d1a96 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -261,6 +261,9 @@ union ce_pd_ctl {
} bf;
u32 w;
} __attribute__((packed));
+#define PD_CTL_HASH_FINAL BIT(4)
+#define PD_CTL_PE_DONE BIT(1)
+#define PD_CTL_HOST_READY BIT(0)
union ce_pd_ctl_len {
struct {
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
deleted file mode 100644
index 69182e2cc3ea..000000000000
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * AMCC SoC PPC4xx Crypto Driver
- *
- * Copyright (c) 2008 Applied Micro Circuits Corporation.
- * All rights reserved. James Hsiao <jhsiao@amcc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * @file crypto4xx_sa.c
- *
- * This file implements the security context
- * associate format.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock_types.h>
-#include <linux/highmem.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
-#include "crypto4xx_core.h"
-
-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
-{
- u32 offset;
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
- offset = cts.bf.key_size
- + cts.bf.inner_size
- + cts.bf.outer_size
- + cts.bf.spi
- + cts.bf.seq_num0
- + cts.bf.seq_num1
- + cts.bf.seq_num_mask0
- + cts.bf.seq_num_mask1
- + cts.bf.seq_num_mask2
- + cts.bf.seq_num_mask3
- + cts.bf.iv0
- + cts.bf.iv1
- + cts.bf.iv2
- + cts.bf.iv3;
-
- return sizeof(struct dynamic_sa_ctl) + offset * 4;
-}
-
-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
- return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
-}
-
-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
-
- return sizeof(struct dynamic_sa_ctl);
-}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index 1352d58d4e34..a4d403528db5 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -55,6 +55,8 @@ union dynamic_sa_contents {
#define SA_OP_GROUP_BASIC 0
#define SA_OPCODE_ENCRYPT 0
#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_ENCRYPT_HASH 1
+#define SA_OPCODE_HASH_DECRYPT 1
#define SA_OPCODE_HASH 3
#define SA_CIPHER_ALG_DES 0
#define SA_CIPHER_ALG_3DES 1
@@ -65,6 +67,8 @@ union dynamic_sa_contents {
#define SA_HASH_ALG_MD5 0
#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_GHASH 12
+#define SA_HASH_ALG_CBC_MAC 14
#define SA_HASH_ALG_NULL 15
#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
@@ -112,6 +116,9 @@ union sa_command_0 {
#define CRYPTO_MODE_ECB 0
#define CRYPTO_MODE_CBC 1
+#define CRYPTO_MODE_OFB 2
+#define CRYPTO_MODE_CFB 3
+#define CRYPTO_MODE_CTR 4
#define CRYPTO_FEEDBACK_MODE_NO_FB 0
#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
@@ -169,7 +176,7 @@ union sa_command_1 {
} __attribute__((packed));
struct dynamic_sa_ctl {
- u32 sa_contents;
+ union dynamic_sa_contents sa_contents;
union sa_command_0 sa_command_0;
union sa_command_1 sa_command_1;
} __attribute__((packed));
@@ -178,9 +185,12 @@ struct dynamic_sa_ctl {
* State Record for Security Association (SA)
*/
struct sa_state_record {
- u32 save_iv[4];
- u32 save_hash_byte_cnt[2];
- u32 save_digest[16];
+ __le32 save_iv[4];
+ __le32 save_hash_byte_cnt[2];
+ union {
+ u32 save_digest[16]; /* for MD5/SHA */
+ __le32 save_digest_le32[16]; /* GHASH / CBC */
+ };
} __attribute__((packed));
/**
@@ -189,8 +199,8 @@ struct sa_state_record {
*/
struct dynamic_sa_aes128 {
struct dynamic_sa_ctl ctrl;
- u32 key[4];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[4];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -203,8 +213,8 @@ struct dynamic_sa_aes128 {
*/
struct dynamic_sa_aes192 {
struct dynamic_sa_ctl ctrl;
- u32 key[6];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[6];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -217,8 +227,8 @@ struct dynamic_sa_aes192 {
*/
struct dynamic_sa_aes256 {
struct dynamic_sa_ctl ctrl;
- u32 key[8];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[8];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -228,16 +238,81 @@ struct dynamic_sa_aes256 {
#define SA_AES_CONTENTS 0x3e000002
/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS 0x3e000042
+#define SA_AES_CCM_CONTENTS 0x3e000002
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 inner_digest[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+
+#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS 0x3e000442
+#define SA_AES_GCM_CONTENTS 0x3e000402
+
+/**
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
struct dynamic_sa_ctl ctrl;
- u32 inner_digest[5];
- u32 outer_digest[5];
+ __le32 inner_digest[5];
+ __le32 outer_digest[5];
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
#define SA_HASH160_CONTENTS 0x2000a502
+static inline u32
+get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
+{
+ u32 offset;
+
+ offset = cts->sa_contents.bf.key_size
+ + cts->sa_contents.bf.inner_size
+ + cts->sa_contents.bf.outer_size
+ + cts->sa_contents.bf.spi
+ + cts->sa_contents.bf.seq_num0
+ + cts->sa_contents.bf.seq_num1
+ + cts->sa_contents.bf.seq_num_mask0
+ + cts->sa_contents.bf.seq_num_mask1
+ + cts->sa_contents.bf.seq_num_mask2
+ + cts->sa_contents.bf.seq_num_mask3
+ + cts->sa_contents.bf.iv0
+ + cts->sa_contents.bf.iv1
+ + cts->sa_contents.bf.iv2
+ + cts->sa_contents.bf.iv3;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+}
+
+static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts +
+ sizeof(struct dynamic_sa_ctl) +
+ cts->sa_contents.bf.key_size * 4);
+}
+
#endif
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 29e20c37f3a6..691c6465b71e 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
@@ -76,12 +77,11 @@
AES_FLAGS_ENCRYPT | \
AES_FLAGS_GTAGEN)
-#define AES_FLAGS_INIT BIT(2)
#define AES_FLAGS_BUSY BIT(3)
#define AES_FLAGS_DUMP_REG BIT(4)
#define AES_FLAGS_OWN_SHA BIT(5)
-#define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
+#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
#define ATMEL_AES_QUEUE_LENGTH 50
@@ -110,6 +110,7 @@ struct atmel_aes_base_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u16 block_size;
+ bool is_aead;
};
struct atmel_aes_ctx {
@@ -156,6 +157,7 @@ struct atmel_aes_authenc_ctx {
struct atmel_aes_reqctx {
unsigned long mode;
+ u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
};
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
@@ -448,11 +450,8 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
if (err)
return err;
- if (!(dd->flags & AES_FLAGS_INIT)) {
- atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
- atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
- dd->flags |= AES_FLAGS_INIT;
- }
+ atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+ atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
return 0;
}
@@ -497,12 +496,34 @@ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
- atmel_aes_authenc_complete(dd, err);
+ if (dd->ctx->is_aead)
+ atmel_aes_authenc_complete(dd, err);
#endif
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
+ if (!dd->ctx->is_aead) {
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_ablkcipher *ablkcipher =
+ crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->info, req->dst,
+ req->nbytes - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst) {
+ memcpy(req->info, rctx->lastc, ivsize);
+ } else {
+ scatterwalk_map_and_copy(req->info, req->src,
+ req->nbytes - ivsize, ivsize, 0);
+ }
+ }
+ }
+
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -1071,11 +1092,11 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
- struct atmel_aes_base_ctx *ctx;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct atmel_aes_reqctx *rctx;
struct atmel_aes_dev *dd;
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
switch (mode & AES_FLAGS_OPMODE_MASK) {
case AES_FLAGS_CFB8:
ctx->block_size = CFB8_BLOCK_SIZE;
@@ -1097,6 +1118,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
ctx->block_size = AES_BLOCK_SIZE;
break;
}
+ ctx->is_aead = false;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -1105,6 +1127,13 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx = ablkcipher_request_ctx(req);
rctx->mode = mode;
+ if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ (req->nbytes - ivsize), ivsize, 0);
+ }
+
return atmel_aes_handle_queue(dd, &req->base);
}
@@ -1236,10 +1265,6 @@ static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
@@ -1252,7 +1277,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1272,7 +1296,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1293,7 +1316,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1314,7 +1336,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1335,7 +1356,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1356,7 +1376,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1377,7 +1396,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1398,7 +1416,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_ctr_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1421,7 +1438,6 @@ static struct crypto_alg aes_cfb64_alg = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1532,7 +1548,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- if (likely(ivsize == 12)) {
+ if (likely(ivsize == GCM_AES_IV_SIZE)) {
memcpy(ctx->j0, iv, ivsize);
ctx->j0[3] = cpu_to_be32(1);
return atmel_aes_gcm_process(dd);
@@ -1739,6 +1755,7 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
ctx->block_size = AES_BLOCK_SIZE;
+ ctx->is_aead = true;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -1808,19 +1825,13 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm)
return 0;
}
-static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
-{
-
-}
-
static struct aead_alg aes_gcm_alg = {
.setkey = atmel_aes_gcm_setkey,
.setauthsize = atmel_aes_gcm_setauthsize,
.encrypt = atmel_aes_gcm_encrypt,
.decrypt = atmel_aes_gcm_decrypt,
.init = atmel_aes_gcm_init,
- .exit = atmel_aes_gcm_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -1955,7 +1966,6 @@ static struct crypto_alg aes_xts_alg = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_xts_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2223,6 +2233,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
rctx->base.mode = mode;
ctx->block_size = AES_BLOCK_SIZE;
+ ctx->is_aead = true;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -2382,7 +2393,6 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
struct crypto_platform_data *pdata)
{
struct at_dma_slave *slave;
- int err = -ENOMEM;
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -2407,7 +2417,7 @@ err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2658,8 +2668,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
- aes_dd->irq = -1;
-
/* Get the base address */
aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!aes_res) {
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 3e2f41b3eaf3..8874aa5ca0f7 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
struct crypto_platform_data *pdata)
{
- int err = -ENOMEM;
dma_cap_mask_t mask_in;
/* Try to grab DMA channel */
@@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
if (!dd->dma_lch_in.chan) {
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
@@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
- sha_dd->irq = -1;
-
/* Get the base address */
sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!sha_res) {
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index f4b335dda568..592124f8382b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -720,7 +720,6 @@ static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
struct crypto_platform_data *pdata)
{
- int err = -ENOMEM;
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -765,7 +764,7 @@ err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -912,10 +911,6 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static struct crypto_alg tdes_algs[] = {
{
.cra_name = "ecb(des)",
@@ -928,7 +923,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -948,7 +942,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -969,7 +962,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -990,7 +982,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1011,7 +1002,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1032,7 +1022,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1053,7 +1042,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1074,7 +1062,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
@@ -1094,7 +1081,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
@@ -1115,7 +1101,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1136,7 +1121,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1157,7 +1141,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1178,7 +1161,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1199,7 +1181,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
@@ -1382,8 +1363,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
- tdes_dd->irq = -1;
-
/* Get the base address */
tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!tdes_res) {
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 8685c7e4debd..ce70b44d0fb6 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
return 0;
}
+static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
+ u8 chan_idx)
+{
+ int err;
+ int retry_cnt = 0;
+ struct device *dev = &(iproc_priv.pdev->dev);
+
+ err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
+ if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
+ while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
+ /*
+ * Mailbox queue is full. Since MAY_SLEEP is set, assume
+ * not in atomic context and we can wait and try again.
+ */
+ retry_cnt++;
+ usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
+ err = mbox_send_message(iproc_priv.mbox[chan_idx],
+ mssg);
+ atomic_inc(&iproc_priv.mb_no_spc);
+ }
+ }
+ if (err < 0) {
+ atomic_inc(&iproc_priv.mb_send_fail);
+ return err;
+ }
+
+ /* Check error returned by mailbox controller */
+ err = mssg->error;
+ if (unlikely(err < 0)) {
+ dev_err(dev, "message error %d", err);
+ /* Signal txdone for mailbox channel */
+ }
+
+ /* Signal txdone for mailbox channel */
+ mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
+ return err;
+}
+
/**
* handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
* a single SPU request message, starting at the current position in the request
@@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
u32 pad_len; /* total length of all padding */
bool update_key = false;
struct brcm_message *mssg; /* mailbox message */
- int retry_cnt = 0;
/* number of entries in src and dst sg in mailbox message. */
u8 rx_frag_num = 2; /* response header and STATUS */
@@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (unlikely(err < 0)) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
return -EINPROGRESS;
}
@@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
u32 spu_hdr_len;
unsigned int digestsize;
u16 rem = 0;
- int retry_cnt = 0;
/*
* number of entries in src and dst sg. Always includes SPU msg header.
@@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (err < 0) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
+
return -EINPROGRESS;
}
@@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
int assoc_nents = 0;
bool incl_icv = false;
unsigned int digestsize = ctx->digestsize;
- int retry_cnt = 0;
/* number of entries in src and dst sg. Always includes SPU msg header.
*/
@@ -1367,11 +1373,11 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
* expects AAD to include just SPI and seqno. So
* subtract off the IV len.
*/
- aead_parms.assoc_size -= GCM_ESP_IV_SIZE;
+ aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
if (rctx->is_encrypt) {
aead_parms.return_iv = true;
- aead_parms.ret_iv_len = GCM_ESP_IV_SIZE;
+ aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
}
} else {
@@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (err < 0) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
return -EINPROGRESS;
}
@@ -3255,7 +3246,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
},
.setkey = aead_gcm_esp_setkey,
- .ivsize = GCM_ESP_IV_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.cipher_info = {
@@ -3301,7 +3292,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
},
.setkey = rfc4543_gcm_esp_setkey,
- .ivsize = GCM_ESP_IV_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.cipher_info = {
@@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev)
mcl->dev = dev;
mcl->tx_block = false;
mcl->tx_tout = 0;
- mcl->knows_txdone = false;
+ mcl->knows_txdone = true;
mcl->rx_callback = spu_rx_callback;
mcl->tx_done = NULL;
@@ -4818,7 +4809,6 @@ static int spu_dt_read(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
struct resource *spu_ctrl_regs;
- const struct of_device_id *match;
const struct spu_type_subtype *matched_spu_type;
struct device_node *dn = pdev->dev.of_node;
int err, i;
@@ -4826,14 +4816,12 @@ static int spu_dt_read(struct platform_device *pdev)
/* Count number of mailbox channels */
spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
- match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
- if (!match) {
+ matched_spu_type = of_device_get_match_data(dev);
+ if (!matched_spu_type) {
dev_err(&pdev->dev, "Failed to match device\n");
return -ENODEV;
}
- matched_spu_type = match->data;
-
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 57a55eb2a255..763c425c41ca 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -23,6 +23,7 @@
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
#include <crypto/aead.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
@@ -39,8 +40,6 @@
#define ARC4_STATE_SIZE 4
#define CCM_AES_IV_SIZE 16
-#define GCM_AES_IV_SIZE 12
-#define GCM_ESP_IV_SIZE 8
#define CCM_ESP_IV_SIZE 8
#define RFC4543_ICV_SIZE 16
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 430c5570ea87..d543c010ccd9 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -271,7 +271,7 @@ int do_shash(unsigned char *name, unsigned char *result,
hash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(hash)) {
rc = PTR_ERR(hash);
- pr_err("%s: Crypto %s allocation error %d", __func__, name, rc);
+ pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc);
return rc;
}
@@ -279,7 +279,7 @@ int do_shash(unsigned char *name, unsigned char *result,
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
- pr_err("%s: Memory allocation failure", __func__);
+ pr_err("%s: Memory allocation failure\n", __func__);
goto do_shash_err;
}
sdesc->shash.tfm = hash;
@@ -288,31 +288,31 @@ int do_shash(unsigned char *name, unsigned char *result,
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
if (rc) {
- pr_err("%s: Could not setkey %s shash", __func__, name);
+ pr_err("%s: Could not setkey %s shash\n", __func__, name);
goto do_shash_err;
}
}
rc = crypto_shash_init(&sdesc->shash);
if (rc) {
- pr_err("%s: Could not init %s shash", __func__, name);
+ pr_err("%s: Could not init %s shash\n", __func__, name);
goto do_shash_err;
}
rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
if (rc) {
- pr_err("%s: Could not update1", __func__);
+ pr_err("%s: Could not update1\n", __func__);
goto do_shash_err;
}
if (data2 && data2_len) {
rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
if (rc) {
- pr_err("%s: Could not update2", __func__);
+ pr_err("%s: Could not update2\n", __func__);
goto do_shash_err;
}
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
- pr_err("%s: Could not generate %s hash", __func__, name);
+ pr_err("%s: Could not generate %s hash\n", __func__, name);
do_shash_err:
crypto_free_shash(hash);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 54f3b375a453..baa8dd52472d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -992,7 +992,7 @@ static void init_gcm_job(struct aead_request *req,
struct caam_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc = edesc->hw_desc;
- bool generic_gcm = (ivsize == 12);
+ bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
@@ -1004,7 +1004,7 @@ static void init_gcm_job(struct aead_request *req,
/* Read GCM IV */
append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
/* Append Salt */
if (!generic_gcm)
append_data(desc, ctx->key + ctx->cdata.keylen, 4);
@@ -1953,7 +1953,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = rfc4106_setauthsize,
.encrypt = ipsec_gcm_encrypt,
.decrypt = ipsec_gcm_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
@@ -1971,7 +1971,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = rfc4543_setauthsize,
.encrypt = ipsec_gcm_encrypt,
.decrypt = ipsec_gcm_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4543_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
@@ -1990,7 +1990,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = gcm_setauthsize,
.encrypt = gcm_encrypt,
.decrypt = gcm_decrypt,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 2eefc4a26bc2..f9f08fce4356 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -7,7 +7,7 @@
*/
#include "compat.h"
-
+#include "ctrl.h"
#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
@@ -2312,6 +2312,11 @@ static int __init caam_qi_algapi_init(void)
if (!priv || !priv->qi_present)
return -ENODEV;
+ if (caam_dpaa2) {
+ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
+ return -ENODEV;
+ }
+
INIT_LIST_HEAD(&alg_list);
/*
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 698580b60b2f..616720a04e7a 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -218,7 +218,7 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
}
/* Map state->caam_ctx, and add it to link table */
-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
@@ -773,7 +773,7 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -871,9 +871,8 @@ static int ahash_final_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->src_nents = 0;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
@@ -967,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
@@ -1123,7 +1122,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
dev_err(jrdev, "unable to map dst\n");
goto unmap;
}
- edesc->src_nents = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1205,7 +1203,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->dst_dma = 0;
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
if (ret)
@@ -1417,7 +1414,6 @@ static int ahash_update_first(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- edesc->dst_dma = 0;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
to_hash);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 5f2f1b288d37..1c71e0cd5098 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -32,6 +32,7 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 6633fbb80e74..8142de7ba050 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1440,7 +1440,7 @@
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d258953ff488..f4f258075b89 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg)
while (rd_reg32(&jrp->rregs->outring_used)) {
- head = ACCESS_ONCE(jrp->head);
+ head = READ_ONCE(jrp->head);
spin_lock(&jrp->outlock);
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_lock_bh(&jrp->inplock);
head = jrp->head;
- tail = ACCESS_ONCE(jrp->tail);
+ tail = READ_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index bf9900bc4804..ab4ccf2f9e77 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -127,7 +127,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
* size and interrupt threshold.
*/
offset = NPS_PKT_IN_INSTR_BADDRX(i);
- nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma);
+ nitrox_write_csr(ndev, offset, cmdq->dma);
/* configure ring size */
offset = NPS_PKT_IN_INSTR_RSIZEX(i);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 52313524a4dd..ff02b713c6f6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -19,13 +19,12 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <linux/delay.h>
#include "ccp-crypto.h"
-#define AES_GCM_IVSIZE 12
-
static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
{
return ret;
@@ -95,9 +94,9 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
*/
/* Prepare the IV: 12 bytes + an integer (counter) */
- memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
for (i = 0; i < 3; i++)
- rctx->iv[i + AES_GCM_IVSIZE] = 0;
+ rctx->iv[i + GCM_AES_IV_SIZE] = 0;
rctx->iv[AES_BLOCK_SIZE - 1] = 1;
/* Set up a scatterlist for the IV */
@@ -160,7 +159,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.encrypt = ccp_aes_gcm_encrypt,
.decrypt = ccp_aes_gcm_decrypt,
.init = ccp_aes_gcm_cra_init,
- .ivsize = AES_GCM_IVSIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 35a9de7fd475..b95d19974aa6 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
/* Check if the cmd can/should be queued */
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
- ret = -EBUSY;
- if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
+ ret = -ENOSPC;
goto e_lock;
+ }
}
/* Look for an entry with the same tfm. If there is a cmd
@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
goto e_lock; /* Error, don't queue it */
- if ((ret == -EBUSY) &&
- !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
- goto e_lock; /* Not backlogging, don't queue it */
}
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 65604fc65e8f..44a4d2779b15 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -788,13 +788,12 @@ static int ccp5_init(struct ccp_device *ccp)
struct ccp_cmd_queue *cmd_q;
struct dma_pool *dma_pool;
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
+ unsigned int qmr, i;
u64 status;
u32 status_lo, status_hi;
int ret;
/* Find available queues */
- qim = 0;
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
for (i = 0; i < MAX_HW_QUEUES; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4e029b176641..1b5035d56288 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
i = ccp->cmd_q_count;
if (ccp->cmd_count >= MAX_CMD_QLEN) {
- ret = -EBUSY;
- if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+ if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
+ ret = -EBUSY;
list_add_tail(&cmd->entry, &ccp->backlog);
+ } else {
+ ret = -ENOSPC;
+ }
} else {
ret = -EINPROGRESS;
ccp->cmd_count++;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index d608043c0280..8b9da58459df 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -223,6 +223,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
desc->tx_desc.cookie, desc->status);
dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
}
desc = __ccp_next_dma_desc(chan, desc);
@@ -230,9 +231,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
if (tx_desc) {
- if (tx_desc->callback &&
- (tx_desc->flags & DMA_PREP_INTERRUPT))
- tx_desc->callback(tx_desc->callback_param);
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
dma_run_dependencies(tx_desc);
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 0e8160701833..4eed7171e2ae 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -53,6 +53,7 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/authenc.h>
#include <crypto/ctr.h>
@@ -70,6 +71,8 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+#define IV AES_BLOCK_SIZE
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -102,7 +105,7 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
static inline int is_ofld_imm(const struct sk_buff *skb)
{
- return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
+ return (skb->len <= SGE_MAX_WR_LEN);
}
/*
@@ -117,6 +120,92 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
+ unsigned int entlen,
+ unsigned int skip)
+{
+ int nents = 0;
+ unsigned int less;
+ unsigned int skip_len = 0;
+
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+
+ while (sg && reqlen) {
+ less = min(reqlen, sg_dma_len(sg) - skip_len);
+ nents += DIV_ROUND_UP(less, entlen);
+ reqlen -= less;
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ return nents;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ int digestsize, updated_digestsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+
+ if (input == NULL)
+ goto out;
+ reqctx = ahash_request_ctx(req);
+ digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ if (reqctx->is_sg_map)
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ if (reqctx->dma_addr)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
+ reqctx->dma_len, DMA_TO_DEVICE);
+ reqctx->dma_addr = 0;
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+ if (reqctx->result == 1) {
+ reqctx->result = 0;
+ memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+ digestsize);
+ } else {
+ memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+ }
+out:
+ req->base.complete(&req->base, err);
+
+ }
+
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+}
+ req->base.complete(&req->base, err);
+
+}
static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
@@ -151,29 +240,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_req_ctx ctx_req;
- unsigned int digestsize, updated_digestsize;
struct adapter *adap = padap(ctx->dev);
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- ctx_req.req.aead_req = aead_request_cast(req);
- ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
- dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
- ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
- if (ctx_req.ctx.reqctx->skb) {
- kfree_skb(ctx_req.ctx.reqctx->skb);
- ctx_req.ctx.reqctx->skb = NULL;
- }
- free_new_sg(ctx_req.ctx.reqctx->newdstsg);
- ctx_req.ctx.reqctx->newdstsg = NULL;
- if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(ctx_req.req.aead_req, input,
- &err);
- ctx_req.ctx.reqctx->verify = VERIFY_HW;
- }
- ctx_req.req.aead_req->base.complete(req, err);
+ chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
@@ -182,60 +253,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
break;
case CRYPTO_ALG_TYPE_AHASH:
- ctx_req.req.ahash_req = ahash_request_cast(req);
- ctx_req.ctx.ahash_ctx =
- ahash_request_ctx(ctx_req.req.ahash_req);
- digestsize =
- crypto_ahash_digestsize(crypto_ahash_reqtfm(
- ctx_req.req.ahash_req));
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb) {
- kfree_skb(ctx_req.ctx.ahash_ctx->skb);
- ctx_req.ctx.ahash_ctx->skb = NULL;
- }
- if (ctx_req.ctx.ahash_ctx->result == 1) {
- ctx_req.ctx.ahash_ctx->result = 0;
- memcpy(ctx_req.req.ahash_req->result, input +
- sizeof(struct cpl_fw6_pld),
- digestsize);
- } else {
- memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
- sizeof(struct cpl_fw6_pld),
- updated_digestsize);
+ chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
}
- ctx_req.req.ahash_req->base.complete(req, err);
- break;
- }
atomic_inc(&adap->chcr_stats.complete);
return err;
}
-/*
- * calc_tx_flits_ofld - calculate # of flits for an offload packet
- * @skb: the packet
- * Returns the number of flits needed for the given offload packet.
- * These packets are already fully constructed and no additional headers
- * will be added.
- */
-static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
-{
- unsigned int flits, cnt;
-
- if (is_ofld_imm(skb))
- return DIV_ROUND_UP(skb->len, 8);
-
- flits = skb_transport_offset(skb) / 8; /* headers */
- cnt = skb_shinfo(skb)->nr_frags;
- if (skb_tail_pointer(skb) != skb_transport_header(skb))
- cnt++;
- return flits + sgl_len(cnt);
-}
-
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
+static void get_aes_decrypt_key(unsigned char *dec_key,
const unsigned char *key,
unsigned int keylength)
{
@@ -382,13 +406,19 @@ static inline int is_hmac(struct crypto_tfm *tfm)
return 0;
}
-static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg,
- struct phys_sge_parm *sg_param)
+static inline void dsgl_walk_init(struct dsgl_walk *walk,
+ struct cpl_rx_phys_dsgl *dsgl)
{
- struct phys_sge_pairs *to;
- unsigned int len = 0, left_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j = 0;
+ walk->dsgl = dsgl;
+ walk->nents = 0;
+ walk->to = (struct phys_sge_pairs *)(dsgl + 1);
+}
+
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+{
+ struct cpl_rx_phys_dsgl *phys_cpl;
+
+ phys_cpl = walk->dsgl;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -398,38 +428,171 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
CPL_RX_PHYS_DSGL_DCAID_V(0) |
- CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
+ CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
- phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
+ phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0;
- to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
- sizeof(struct cpl_rx_phys_dsgl));
- for (i = 0; nents && left_size; to++) {
- for (j = 0; j < 8 && nents && left_size; j++, nents--) {
- len = min(left_size, sg_dma_len(sg));
- to->len[j] = htons(len);
- to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- left_size -= len;
+}
+
+static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
+ size_t size,
+ dma_addr_t *addr)
+{
+ int j;
+
+ if (!size)
+ return;
+ j = walk->nents;
+ walk->to->len[j % 8] = htons(size);
+ walk->to->addr[j % 8] = cpu_to_be64(*addr);
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ walk->nents = j;
+}
+
+static void dsgl_walk_add_sg(struct dsgl_walk *walk,
+ struct scatterlist *sg,
+ unsigned int slen,
+ unsigned int skip)
+{
+ int skip_len = 0;
+ unsigned int left_size = slen, len = 0;
+ unsigned int j = walk->nents;
+ int offset, ent_len;
+
+ if (!slen)
+ return;
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
}
}
+
+ while (left_size && sg) {
+ len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ offset = 0;
+ while (len) {
+ ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
+ walk->to->len[j % 8] = htons(ent_len);
+ walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
+ offset + skip_len);
+ offset += ent_len;
+ len -= ent_len;
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ }
+ walk->last_sg = sg;
+ walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
+ skip_len) + skip_len;
+ left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ walk->nents = j;
+}
+
+static inline void ulptx_walk_init(struct ulptx_walk *walk,
+ struct ulptx_sgl *ulp)
+{
+ walk->sgl = ulp;
+ walk->nents = 0;
+ walk->pair_idx = 0;
+ walk->pair = ulp->sge;
+ walk->last_sg = NULL;
+ walk->last_sg_len = 0;
}
-static inline int map_writesg_phys_cpl(struct device *dev,
- struct cpl_rx_phys_dsgl *phys_cpl,
+static inline void ulptx_walk_end(struct ulptx_walk *walk)
+{
+ walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(walk->nents));
+}
+
+
+static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
+ size_t size,
+ dma_addr_t *addr)
+{
+ if (!size)
+ return;
+
+ if (walk->nents == 0) {
+ walk->sgl->len0 = cpu_to_be32(size);
+ walk->sgl->addr0 = cpu_to_be64(*addr);
+ } else {
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
+ walk->pair_idx = !walk->pair_idx;
+ if (!walk->pair_idx)
+ walk->pair++;
+ }
+ walk->nents++;
+}
+
+static void ulptx_walk_add_sg(struct ulptx_walk *walk,
struct scatterlist *sg,
- struct phys_sge_parm *sg_param)
+ unsigned int len,
+ unsigned int skip)
{
- if (!sg || !sg_param->nents)
- return -EINVAL;
+ int small;
+ int skip_len = 0;
+ unsigned int sgmin;
- sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
- if (sg_param->nents == 0) {
- pr_err("CHCR : DMA mapping failed\n");
- return -EINVAL;
+ if (!len)
+ return;
+
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+ if (walk->nents == 0) {
+ small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->sgl->len0 = cpu_to_be32(sgmin);
+ walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->nents++;
+ len -= sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = sgmin + skip_len;
+ skip_len += sgmin;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
+ }
+
+ while (sg && len) {
+ small = min(sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
+ walk->pair->addr[walk->pair_idx] =
+ cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->pair_idx = !walk->pair_idx;
+ walk->nents++;
+ if (!walk->pair_idx)
+ walk->pair++;
+ len -= sgmin;
+ skip_len += sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = skip_len;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
}
- write_phys_cpl(phys_cpl, sg, sg_param);
- return 0;
}
static inline int get_aead_subtype(struct crypto_aead *aead)
@@ -449,45 +612,6 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
-static inline void write_buffer_to_skb(struct sk_buff *skb,
- unsigned int *frags,
- char *bfr,
- u8 bfr_len)
-{
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- get_page(virt_to_page(bfr));
- skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
- offset_in_page(bfr), bfr_len);
- (*frags)++;
-}
-
-
-static inline void
-write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
- struct scatterlist *sg, unsigned int count)
-{
- struct page *spage;
- unsigned int page_len;
-
- skb->len += count;
- skb->data_len += count;
- skb->truesize += count;
-
- while (count > 0) {
- if (!sg || (!(sg->length)))
- break;
- spage = sg_page(sg);
- get_page(spage);
- page_len = min(sg->length, count);
- skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
- (*frags)++;
- count -= page_len;
- sg = sg_next(sg);
- }
-}
-
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{
struct adapter *adap = netdev2adap(dev);
@@ -524,30 +648,46 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
struct scatterlist *dst,
unsigned int minsg,
unsigned int space,
- short int *sent,
- short int *dent)
+ unsigned int srcskip,
+ unsigned int dstskip)
{
int srclen = 0, dstlen = 0;
- int srcsg = minsg, dstsg = 0;
+ int srcsg = minsg, dstsg = minsg;
+ int offset = 0, less;
+
+ if (sg_dma_len(src) == srcskip) {
+ src = sg_next(src);
+ srcskip = 0;
+ }
- *sent = 0;
- *dent = 0;
- while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
+ if (sg_dma_len(dst) == dstskip) {
+ dst = sg_next(dst);
+ dstskip = 0;
+ }
+
+ while (src && dst &&
space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
- srclen += src->length;
+ srclen += (sg_dma_len(src) - srcskip);
srcsg++;
+ offset = 0;
while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
if (srclen <= dstlen)
break;
- dstlen += dst->length;
- dst = sg_next(dst);
+ less = min_t(unsigned int, sg_dma_len(dst) - offset -
+ dstskip, CHCR_DST_SG_SIZE);
+ dstlen += less;
+ offset += less;
+ if (offset == sg_dma_len(dst)) {
+ dst = sg_next(dst);
+ offset = 0;
+ }
dstsg++;
+ dstskip = 0;
}
src = sg_next(src);
+ srcskip = 0;
}
- *sent = srcsg - minsg;
- *dent = dstsg;
return min(srclen, dstlen);
}
@@ -576,47 +716,35 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
}
static inline void create_wreq(struct chcr_context *ctx,
struct chcr_wr *chcr_req,
- void *req, struct sk_buff *skb,
- int kctx_len, int hash_sz,
- int is_iv,
+ struct crypto_async_request *req,
+ unsigned int imm,
+ int hash_sz,
+ unsigned int len16,
unsigned int sc_len,
unsigned int lcb)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
- unsigned int immdatalen = 0, nr_frags = 0;
- if (is_ofld_imm(skb)) {
- immdatalen = skb->data_len;
- iv_loc = IV_IMMEDIATE;
- } else {
- nr_frags = skb_shinfo(skb)->nr_frags;
- }
- chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
chcr_req->wreq.pld_size_hash_size =
- htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
- FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
+ htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
chcr_req->wreq.len16_pkd =
- htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
- (calc_tx_flits_ofld(skb) * 8), 16)));
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
- is_iv ? iv_loc : IV_NOP, !!lcb,
- ctx->tx_qidx);
+ !!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
qid);
- chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(chcr_req->wreq)) >> 4)));
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
+ ((sizeof(chcr_req->wreq)) >> 4)));
- chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
- sizeof(chcr_req->key_ctx) +
- kctx_len + sc_len + immdatalen);
+ sizeof(chcr_req->key_ctx) + sc_len);
}
/**
@@ -629,47 +757,52 @@ static inline void create_wreq(struct chcr_context *ctx,
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
struct chcr_blkcipher_req_ctx *reqctx =
ablkcipher_request_ctx(wrparam->req);
- struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl;
+ unsigned int temp = 0, transhdr_len, dst_size;
int error;
- unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
+ int nents;
+ unsigned int kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
-
- phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
+ nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
+ reqctx->dst_ofst);
+ dst_size = get_space_for_phys_dsgl(nents + 1);
kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
+ CHCR_SRC_SG_SIZE, reqctx->src_ofst);
+ temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
+ * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
- chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
+ chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
- FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1);
+ 0, 0, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
- 0, 1, phys_dsgl);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) &&
@@ -694,26 +827,18 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
}
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = wrparam->bytes;
- sg_param.qid = wrparam->qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto map_fail1;
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
+ chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
- skb_set_transport_header(skb, transhdr_len);
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
- sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
+ +(reqctx->imm ? (IV + wrparam->bytes) : 0);
+ create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
+ transhdr_len, temp,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
reqctx->skb = skb;
- skb_get(skb);
return skb;
-map_fail1:
- kfree_skb(skb);
err:
return ERR_PTR(error);
}
@@ -738,8 +863,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -757,8 +881,7 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -790,8 +913,7 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -822,8 +944,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -890,25 +1011,28 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct crypto_cipher *cipher;
int ret, i;
u8 *key;
unsigned int keylen;
+ int round = reqctx->last_req_len / AES_BLOCK_SIZE;
+ int round8 = round / 8;
cipher = ablkctx->aes_generic;
- memcpy(iv, req->info, AES_BLOCK_SIZE);
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
ret = crypto_cipher_setkey(cipher, key, keylen);
if (ret)
goto out;
+ /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
+ for (i = 0; i < round8; i++)
+ gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_encrypt_one(cipher, iv, iv);
- for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
+ for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
crypto_cipher_decrypt_one(cipher, iv, iv);
@@ -982,65 +1106,60 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam;
int bytes;
- dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
- DMA_FROM_DEVICE);
-
- if (reqctx->skb) {
- kfree_skb(reqctx->skb);
- reqctx->skb = NULL;
- }
if (err)
- goto complete;
-
+ goto unmap;
if (req->nbytes == reqctx->processed) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_final_cipher_iv(req, fw6_pld, req->info);
goto complete;
}
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
err = -EBUSY;
- goto complete;
+ goto unmap;
}
}
- wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
- reqctx->processed);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
- reqctx->processed);
- if (!wrparam.srcsg || !reqctx->dst) {
- pr_err("Input sg list length less that nbytes\n");
- err = -EINVAL;
- goto complete;
- }
- bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
- SPACE_LEFT(ablkctx->enckey_len),
- &wrparam.snent, &reqctx->dst_nents);
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
+ SPACE_LEFT(ablkctx->enckey_len),
+ reqctx->src_ofst, reqctx->dst_ofst);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
bytes = ROUND_16(bytes);
+ } else {
+ /*CTR mode counter overfloa*/
+ bytes = req->nbytes - reqctx->processed;
+ }
+ dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
+ dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
if (err)
- goto complete;
+ goto unmap;
if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
- wrparam.srcsg,
- reqctx->dst,
- req->nbytes - reqctx->processed,
- reqctx->iv,
+ req->src,
+ req->dst,
+ req->nbytes,
+ req->info,
reqctx->op);
goto complete;
}
@@ -1048,23 +1167,24 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
- reqctx->processed += bytes;
- wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+ wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
wrparam.req = req;
wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
err = PTR_ERR(skb);
- goto complete;
+ goto unmap;
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
+ reqctx->last_req_len = bytes;
+ reqctx->processed += bytes;
return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
req->base.complete(&req->base, err);
return err;
}
@@ -1077,12 +1197,10 @@ static int process_cipher(struct ablkcipher_request *req,
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct cipher_wr_param wrparam;
- int bytes, nents, err = -EINVAL;
+ int bytes, err = -EINVAL;
- reqctx->newdstsg = NULL;
reqctx->processed = 0;
if (!req->info)
goto error;
@@ -1093,25 +1211,41 @@ static int process_cipher(struct ablkcipher_request *req,
ablkctx->enckey_len, req->nbytes, ivsize);
goto error;
}
- wrparam.srcsg = req->src;
- if (is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return PTR_ERR(reqctx->newdstsg);
- reqctx->dstsg = reqctx->newdstsg;
+ chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ AES_MIN_KEY_SIZE +
+ sizeof(struct cpl_rx_phys_dsgl) +
+ /*Min dsgl size*/
+ 32))) {
+ /* Can be sent as Imm*/
+ unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
+
+ dnents = sg_nents_xlen(req->dst, req->nbytes,
+ CHCR_DST_SG_SIZE, 0);
+ dnents += 1; // IV
+ phys_dsgl = get_space_for_phys_dsgl(dnents);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
+ reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+ SGE_MAX_WR_LEN;
+ bytes = IV + req->nbytes;
+
} else {
- reqctx->dstsg = req->dst;
+ reqctx->imm = 0;
}
- bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
- SPACE_LEFT(ablkctx->enckey_len),
- &wrparam.snent,
- &reqctx->dst_nents);
+
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(req->src, req->dst,
+ MIN_CIPHER_SG,
+ SPACE_LEFT(ablkctx->enckey_len),
+ 0, 0);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
bytes = ROUND_16(bytes);
- if (unlikely(bytes > req->nbytes))
+ } else {
bytes = req->nbytes;
+ }
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
@@ -1128,9 +1262,11 @@ static int process_cipher(struct ablkcipher_request *req,
} else {
- memcpy(reqctx->iv, req->info, ivsize);
+ memcpy(reqctx->iv, req->info, IV);
}
if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
@@ -1140,45 +1276,48 @@ static int process_cipher(struct ablkcipher_request *req,
op_type);
goto error;
}
- reqctx->processed = bytes;
- reqctx->dst = reqctx->dstsg;
reqctx->op = op_type;
+ reqctx->srcsg = req->src;
+ reqctx->dstsg = req->dst;
+ reqctx->src_ofst = 0;
+ reqctx->dst_ofst = 0;
wrparam.qid = qid;
wrparam.req = req;
wrparam.bytes = bytes;
*skb = create_cipher_wr(&wrparam);
if (IS_ERR(*skb)) {
err = PTR_ERR(*skb);
- goto error;
+ goto unmap;
}
+ reqctx->processed = bytes;
+ reqctx->last_req_len = bytes;
return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
error:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
return err;
}
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct sk_buff *skb = NULL;
int err;
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
- CHCR_ENCRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_ENCRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1186,23 +1325,22 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
int err;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
- CHCR_DECRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1350,17 +1488,19 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
struct sk_buff *skb = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
struct chcr_wr *chcr_req;
- unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = 0;
+ unsigned int kctx_len = 0, temp = 0;
u8 hash_size_in_response = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(h_ctx(tfm)->dev);
+ int error = 0;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
kctx_len = param->alg_prm.result_size + iopad_alignment;
@@ -1372,15 +1512,22 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
+ SGE_MAX_WR_LEN;
+ nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
+ nents += param->bfr_len ? 1 : 0;
+ transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
+ param->sg_len), 16) * 16) :
+ (sgl_len(nents) * 8);
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb)
- return skb;
-
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+ return ERR_PTR(-ENOMEM);
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1409,37 +1556,52 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
((kctx_len +
sizeof(chcr_req->key_ctx)) >> 4));
chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
-
- skb_set_transport_header(skb, transhdr_len);
- if (param->bfr_len != 0)
- write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
- param->bfr_len);
- if (param->sg_len != 0)
- write_sg_to_skb(skb, &frags, req->src, param->sg_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
+ DUMMY_BYTES);
+ if (param->bfr_len != 0) {
+ req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
+ req_ctx->reqbfr, param->bfr_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
+ req_ctx->dma_addr)) {
+ error = -ENOMEM;
+ goto err;
+ }
+ req_ctx->dma_len = param->bfr_len;
+ } else {
+ req_ctx->dma_addr = 0;
+ }
+ chcr_add_hash_src_ent(req, ulptx, param);
+ /* Request upto max wr size */
+ temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
+ + param->bfr_len) : 0);
atomic_inc(&adap->chcr_stats.digest_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
- hash_size_in_response, 0, DUMMY_BYTES, 0);
+ create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
+ hash_size_in_response, transhdr_len,
+ temp, 0);
req_ctx->skb = skb;
- skb_get(skb);
return skb;
+err:
+ kfree_skb(skb);
+ return ERR_PTR(error);
}
static int chcr_ahash_update(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
struct hash_wr_param params;
+ int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1453,7 +1615,9 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
-
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
params.opad_needed = 0;
params.more = 1;
params.last = 0;
@@ -1464,25 +1628,27 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
if (remainder) {
- u8 *temp;
/* Swap buffers */
- temp = req_ctx->reqbfr;
- req_ctx->reqbfr = req_ctx->skbfr;
- req_ctx->skbfr = temp;
+ swap(req_ctx->reqbfr, req_ctx->skbfr);
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
req_ctx->reqbfr, remainder, req->nbytes -
remainder);
}
req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
@@ -1499,13 +1665,12 @@ static int chcr_ahash_final(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
@@ -1528,11 +1693,11 @@ static int chcr_ahash_final(struct ahash_request *req)
params.more = 0;
}
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1541,17 +1706,17 @@ static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
+ int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1577,34 +1742,41 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
-
- skb = create_hash_wr(req, &params);
- if (!skb)
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
return -ENOMEM;
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
+ int error;
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1613,6 +1785,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
params.opad_needed = 1;
else
params.opad_needed = 0;
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
params.last = 0;
params.more = 0;
@@ -1630,13 +1805,17 @@ static int chcr_ahash_digest(struct ahash_request *req)
}
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
-
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static int chcr_ahash_export(struct ahash_request *areq, void *out)
@@ -1646,6 +1825,8 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
+ state->is_sg_map = 0;
+ state->result = 0;
memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1661,6 +1842,8 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
req_ctx->data_len = state->data_len;
req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2;
+ req_ctx->is_sg_map = 0;
+ req_ctx->result = 0;
memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1670,8 +1853,7 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
@@ -1724,8 +1906,7 @@ out:
static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
unsigned int key_len)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned short context_size = 0;
int err;
@@ -1764,6 +1945,7 @@ static int chcr_sha_init(struct ahash_request *areq)
req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
+ req_ctx->is_sg_map = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
return 0;
}
@@ -1779,8 +1961,7 @@ static int chcr_hmac_init(struct ahash_request *areq)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
unsigned int digestsize = crypto_ahash_digestsize(rtfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
@@ -1826,86 +2007,48 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
}
}
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
-{
- int nents = 0;
- int ret = 0;
-
- while (sgl) {
- if (sgl->length > CHCR_SG_SIZE)
- ret = 1;
- nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
- sgl = sg_next(sgl);
- }
- *newents = nents;
- return ret;
-}
-
-static inline void free_new_sg(struct scatterlist *sgl)
+static int chcr_aead_common_init(struct aead_request *req,
+ unsigned short op_type)
{
- kfree(sgl);
-}
-
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
- unsigned int nents)
-{
- struct scatterlist *newsg, *sg;
- int i, len, processed = 0;
- struct page *spage;
- int offset;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int error = -EINVAL;
+ unsigned int dst_size;
+ unsigned int authsize = crypto_aead_authsize(tfm);
- newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
- if (!newsg)
- return ERR_PTR(-ENOMEM);
- sg = newsg;
- sg_init_table(sg, nents);
- offset = sgl->offset;
- spage = sg_page(sgl);
- for (i = 0; i < nents; i++) {
- len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
- sg_set_page(sg, spage, len, offset);
- processed += len;
- offset += len;
- if (offset >= PAGE_SIZE) {
- offset = offset % PAGE_SIZE;
- spage++;
- }
- if (processed == sgl->length) {
- processed = 0;
- sgl = sg_next(sgl);
- if (!sgl)
- break;
- spage = sg_page(sgl);
- offset = sgl->offset;
- }
- sg = sg_next(sg);
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+ if (op_type && req->cryptlen < authsize)
+ goto err;
+ error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
+ if (error) {
+ error = -ENOMEM;
+ goto err;
}
- return newsg;
+ reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
+ CHCR_SRC_SG_SIZE, 0);
+ reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
+ CHCR_SRC_SG_SIZE, req->assoclen);
+ return 0;
+err:
+ return error;
}
-static int chcr_copy_assoc(struct aead_request *req,
- struct chcr_aead_ctx *ctx)
-{
- SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
- NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
+static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
int aadmax, int wrlen,
unsigned short op_type)
{
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
+ dst_nents > MAX_DSGL_ENT ||
(req->assoclen > aadmax) ||
- (src_nent > MAX_SKB_FRAGS) ||
- (wrlen > MAX_WR_SIZE))
+ (wrlen > SGE_MAX_WR_LEN))
return 1;
return 0;
}
@@ -1913,8 +2056,7 @@ static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, aeadctx->sw_cipher);
@@ -1933,96 +2075,75 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len;
- unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
- unsigned int kctx_len = 0, nents;
- unsigned short stop_offset = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, temp;
+ unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
- reqctx->newdstsg = NULL;
- dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
- if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
- goto err;
-
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+ if (req->cryptlen == 0)
+ return NULL;
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
- if (error)
- return ERR_PTR(error);
- }
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, req->assoclen);
- } else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, req->assoclen);
- }
+ reqctx->b0_dma = 0;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
null = 1;
assoclen = 0;
}
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("AUTHENC:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
+ dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
+ authsize);
+ error = chcr_aead_common_init(req, op_type);
+ if (error)
+ return ERR_PTR(error);
+ if (dst_size) {
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
+ } else {
+ dnents = 0;
}
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
- T6_MAX_AAD_SIZE,
- transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
+ * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ + MIN_GCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- /* LLD is going to write the sge hdr. */
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
- /* Write WR */
chcr_req = __skb_put_zero(skb, transhdr_len);
- stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
/*
* Input order is AAD,IV and Payload. where IV should be included as
@@ -2030,24 +2151,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
- (ivsize ? (assoclen + 1) : 0));
- chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
+ assoclen + 1);
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
- assoclen + ivsize + 1,
- (stop_offset & 0x1F0) >> 4);
+ assoclen + IV + 1,
+ (temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
- stop_offset & 0xF,
- null ? 0 : assoclen + ivsize + 1,
- stop_offset, stop_offset);
+ temp & 0xF,
+ null ? 0 : assoclen + IV + 1,
+ temp, temp);
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_CBC,
actx->auth_mode, aeadctx->hmac_ctrl,
- ivsize >> 1);
+ IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 1, dst_size);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
if (op_type == CHCR_ENCRYPT_OP)
@@ -2060,41 +2181,312 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
-
+ memcpy(reqctx->iv, req->iv, IV);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto dstmap_fail;
-
- skb_set_transport_header(skb, transhdr_len);
-
- if (assoclen) {
- /* AAD buffer in */
- write_sg_to_skb(skb, &frags, req->src, assoclen);
-
- }
- write_buffer_to_skb(skb, &frags, req->iv, ivsize);
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, 0);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
return skb;
-dstmap_fail:
- /* ivmap_fail: */
- kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
+
return ERR_PTR(error);
}
+static int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ int error;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int dst_size;
+
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ if (!req->cryptlen || !dst_size)
+ return 0;
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, reqctx->iv_dma))
+ return -ENOMEM;
+
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+
+static void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int dst_size;
+
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ if (!req->cryptlen || !dst_size)
+ return;
+
+ dma_unmap_single(dev, reqctx->iv_dma, IV,
+ DMA_BIDIRECTIONAL);
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ }
+}
+
+static inline void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (reqctx->b0_dma) {
+ memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
+ buf += reqctx->b0_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, assoclen, 0);
+ buf += assoclen;
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, req->cryptlen, req->assoclen);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (reqctx->b0_dma)
+ ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+ &reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
+ ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
+ req->assoclen);
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+static inline void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct dsgl_walk dsgl_walk;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ u32 temp;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ if (reqctx->b0_dma)
+ dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
+ dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+ temp = req->cryptlen + (op_type ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+ dsgl_walk_end(&dsgl_walk, qid);
+}
+
+static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, wrparam->bytes, reqctx->processed);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
+ reqctx->src_ofst);
+ reqctx->srcsg = ulp_walk.last_sg;
+ reqctx->src_ofst = ulp_walk.last_sg_len;
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
+{
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct dsgl_walk dsgl_walk;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+ dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+ reqctx->dst_ofst);
+ reqctx->dstsg = dsgl_walk.last_sg;
+ reqctx->dst_ofst = dsgl_walk.last_sg_len;
+
+ dsgl_walk_end(&dsgl_walk, qid);
+}
+
+static inline void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (param->bfr_len) {
+ memcpy(buf, reqctx->reqbfr, param->bfr_len);
+ buf += param->bfr_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, param->sg_len, 0);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (param->bfr_len)
+ ulptx_walk_add_page(&ulp_walk, param->bfr_len,
+ &reqctx->dma_addr);
+ ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
+ 0);
+// reqctx->srcsg = ulp_walk.last_sg;
+// reqctx->src_ofst = ulp_walk.last_sg_len;
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+
+static inline int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ int error = 0;
+
+ if (!req->nbytes)
+ return 0;
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ return error;
+ req_ctx->is_sg_map = 1;
+ return 0;
+}
+
+static inline void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return;
+
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ req_ctx->is_sg_map = 0;
+
+}
+
+
+static int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
+{
+ int error;
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, reqctx->iv_dma))
+ return -ENOMEM;
+
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+static void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
+{
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ dma_unmap_single(dev, reqctx->iv_dma, IV,
+ DMA_BIDIRECTIONAL);
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ }
+}
+
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
@@ -2179,15 +2571,13 @@ static int ccm_format_packet(struct aead_request *req,
static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int dst_size,
struct aead_request *req,
- unsigned short op_type,
- struct chcr_context *chcrctx)
+ unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
- unsigned int ivsize = AES_BLOCK_SIZE;
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = chcrctx->dev->rx_channel_id;
+ unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2200,7 +2590,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ?
- (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ (assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm);
@@ -2210,14 +2600,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, (ivsize ? (assoclen + 1) : 0) +
- ccm_xtra);
+ 2, assoclen + 1 + ccm_xtra);
sec_cpl->pldlen =
- htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ htonl(assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1, assoclen + ccm_xtra, assoclen
- + ivsize + 1 + ccm_xtra, 0);
+ + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset,
@@ -2226,10 +2615,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
cipher_mode, mac_mode,
- aeadctx->hmac_ctrl, ivsize >> 1);
+ aeadctx->hmac_ctrl, IV >> 1);
sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
- 1, dst_size);
+ 0, dst_size);
}
int aead_ccm_validate_input(unsigned short op_type,
@@ -2249,131 +2638,83 @@ int aead_ccm_validate_input(unsigned short op_type,
return -EINVAL;
}
}
- if (aeadctx->enckey_len == 0) {
- pr_err("CCM: Encryption key not set\n");
- return -EINVAL;
- }
return 0;
}
-unsigned int fill_aead_req_fields(struct sk_buff *skb,
- struct aead_request *req,
- struct scatterlist *src,
- unsigned int ivsize,
- struct chcr_aead_ctx *aeadctx)
-{
- unsigned int frags = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- /* b0 and aad length(if available) */
-
- write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
- (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
- if (req->assoclen) {
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
- write_sg_to_skb(skb, &frags, req->src,
- req->assoclen - 8);
- else
- write_sg_to_skb(skb, &frags, req->src, req->assoclen);
- }
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- if (req->cryptlen)
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
-
- return frags;
-}
-
static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned short qid,
int size,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
- unsigned int dst_size = 0, kctx_len, nents;
- unsigned int sub_type;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, kctx_len, dnents, temp;
+ unsigned int sub_type, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
- dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
+ reqctx->b0_dma = 0;
+ sub_type = get_aead_subtype(tfm);
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen -= 8;
+ dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
authsize);
- reqctx->newdstsg = NULL;
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
+ error = chcr_aead_common_init(req, op_type);
+ if (error)
+ return ERR_PTR(error);
- sub_type = get_aead_subtype(tfm);
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
- if (error) {
- pr_err("AAD copy to destination buffer fails\n");
- return ERR_PTR(error);
- }
- }
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, req->assoclen);
- } else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, req->assoclen);
- }
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("CCM:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
- }
+
+ reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
-
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ if (dst_size) {
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
+ } else {
+ dnents = 0;
+ }
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
- T6_MAX_AAD_SIZE - 18,
- transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+ reqctx->b0_len) <= SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
+ reqctx->b0_len), 16) * 16) :
+ (sgl_len(reqctx->src_nents + reqctx->aad_nents +
+ MIN_CCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
+ reqctx->b0_len, transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
-
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
- chcr_req = __skb_put_zero(skb, transhdr_len);
+ chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
- fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
@@ -2381,31 +2722,37 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
16), aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
error = ccm_format_packet(req, aeadctx, sub_type, op_type);
if (error)
goto dstmap_fail;
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
+ reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+ &reqctx->scratch_pad, reqctx->b0_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+ reqctx->b0_dma)) {
+ error = -ENOMEM;
goto dstmap_fail;
+ }
+
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
- skb_set_transport_header(skb, transhdr_len);
- frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+ reqctx->b0_len) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
+ transhdr_len, temp, 0);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
+
return skb;
dstmap_fail:
kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error);
}
@@ -2415,115 +2762,84 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len;
- unsigned int ivsize = AES_BLOCK_SIZE;
- unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
- unsigned char tag_offset = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len, dnents = 0;
+ unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
-
- reqctx->newdstsg = NULL;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
- /* validate key size */
- if (aeadctx->enckey_len == 0)
- goto err;
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ assoclen = req->assoclen - 8;
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
+ reqctx->b0_dma = 0;
+ dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
+ error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
- }
-
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, assoclen);
+ if (dst_size) {
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst,
+ req->cryptlen + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_GCM_SG; // For IV
} else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, assoclen);
- }
-
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("GCM:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
+ dnents = 0;
}
-
-
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
- T6_MAX_AAD_SIZE,
- transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
+ req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
+ reqctx->aad_nents + MIN_GCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- /* NIC driver is going to write the sge hdr. */
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
chcr_req = __skb_put_zero(skb, transhdr_len);
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
- assoclen = req->assoclen - 8;
-
- tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ //Offset of tag from end
+ temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- ctx->dev->rx_channel_id, 2, (ivsize ?
- (assoclen + 1) : 0));
+ a_ctx(tfm)->dev->rx_channel_id, 2,
+ (assoclen + 1));
chcr_req->sec_cpl.pldlen =
- htonl(assoclen + ivsize + req->cryptlen);
+ htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
- assoclen + ivsize + 1, 0);
+ assoclen + IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
- tag_offset, tag_offset);
+ FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+ temp, temp);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH,
- aeadctx->hmac_ctrl, ivsize >> 1);
+ aeadctx->hmac_ctrl, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 1, dst_size);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
@@ -2534,39 +2850,28 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (get_aead_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, 8);
+ memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
} else {
- memcpy(reqctx->iv, req->iv, 12);
+ memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
}
*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto dstmap_fail;
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- skb_set_transport_header(skb, transhdr_len);
- write_sg_to_skb(skb, &frags, req->src, assoclen);
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
atomic_inc(&adap->chcr_stats.aead_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size,
- reqctx->verify);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
return skb;
-dstmap_fail:
- /* ivmap_fail: */
- kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error);
}
@@ -2574,8 +2879,7 @@ err:
static int chcr_aead_cra_init(struct crypto_aead *tfm)
{
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct aead_alg *alg = crypto_aead_alg(tfm);
aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
@@ -2586,25 +2890,20 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
sizeof(struct aead_request) +
crypto_aead_reqsize(aeadctx->sw_cipher)));
- aeadctx->null = crypto_get_default_null_skcipher();
- if (IS_ERR(aeadctx->null))
- return PTR_ERR(aeadctx->null);
- return chcr_device_init(ctx);
+ return chcr_device_init(a_ctx(tfm));
}
static void chcr_aead_cra_exit(struct crypto_aead *tfm)
{
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- crypto_put_default_null_skcipher();
crypto_free_aead(aeadctx->sw_cipher);
}
static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
aeadctx->mayverify = VERIFY_HW;
@@ -2613,7 +2912,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
u32 maxauth = crypto_aead_maxauthsize(tfm);
/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
@@ -2651,7 +2950,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_4:
@@ -2691,7 +2990,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_8:
@@ -2717,7 +3016,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_4:
@@ -2760,8 +3059,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
unsigned char ck_size, mk_size;
int key_ctx_size = 0;
@@ -2794,8 +3092,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
int error;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -2813,8 +3110,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
int error;
if (keylen < 3) {
@@ -2840,8 +3136,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
struct crypto_cipher *cipher;
unsigned int ck_size;
@@ -2913,8 +3208,7 @@ out:
static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(authenc);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
@@ -3034,8 +3328,7 @@ out:
static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(authenc);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct crypto_authenc_keys keys;
int err;
@@ -3107,7 +3400,7 @@ static int chcr_aead_encrypt(struct aead_request *req)
static int chcr_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int size;
@@ -3140,30 +3433,29 @@ static int chcr_aead_op(struct aead_request *req,
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
struct uld_ctx *u_ctx;
struct sk_buff *skb;
- if (!ctx->dev) {
+ if (!a_ctx(tfm)->dev) {
pr_err("chcr : %s : No crypto device.\n", __func__);
return -ENXIO;
}
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx)) {
+ a_ctx(tfm)->tx_qidx)) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
/* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
op_type);
if (IS_ERR(skb) || !skb)
return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -3385,7 +3677,7 @@ static struct chcr_alg_template driver_algs[] = {
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_gcm_ctx),
},
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.setkey = chcr_gcm_setkey,
.setauthsize = chcr_gcm_setauthsize,
@@ -3405,7 +3697,7 @@ static struct chcr_alg_template driver_algs[] = {
sizeof(struct chcr_gcm_ctx),
},
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.setkey = chcr_gcm_setkey,
.setauthsize = chcr_4106_4309_setauthsize,
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 583008de51a3..96c9335ee728 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -176,21 +176,21 @@
KEY_CONTEXT_SALT_PRESENT_V(1) | \
KEY_CONTEXT_CTX_LEN_V((ctx_len)))
-#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
+#define FILL_WR_OP_CCTX_SIZE \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
FW_CRYPTO_LOOKASIDE_WR) | \
FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
- FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
- FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
- FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
+ FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0))
-#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \
+#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
- FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \
+ FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \
FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
#define FILL_ULPTX_CMD_DEST(cid, qid) \
@@ -214,27 +214,22 @@
calc_tx_flits_ofld(skb) * 8), 16)))
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
- ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
-
+ ULP_TX_SC_MORE_V((immdatalen)))
#define MAX_NK 8
-#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
-#define MAX_WR_SIZE 512
#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
#define MAX_DSGL_ENT 32
-#define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2)
#define MIN_CIPHER_SG 1 /* IV */
-#define MIN_AUTH_SG 2 /*IV + AAD*/
-#define MIN_GCM_SG 2 /* IV + AAD*/
+#define MIN_AUTH_SG 1 /* IV */
+#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
-#define MIN_CCM_SG 3 /*IV+AAD+B0*/
+#define MIN_CCM_SG 2 /*IV+B0*/
#define SPACE_LEFT(len) \
- ((MAX_WR_SIZE - WR_MIN_LEN - (len)))
+ ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40,
- 48, 64, 72, 88,
- 96, 112, 120, 136,
- 144, 160, 168, 184,
- 192};
+unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376};
unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
112, 112, 128, 128, 144, 144, 160, 160,
192, 192, 208, 208, 224, 224, 240, 240,
@@ -258,10 +253,8 @@ struct hash_wr_param {
struct cipher_wr_param {
struct ablkcipher_request *req;
- struct scatterlist *srcsg;
char *iv;
int bytes;
- short int snent;
unsigned short qid;
};
enum {
@@ -299,31 +292,11 @@ enum {
ICV_16 = 16
};
-struct hash_op_params {
- unsigned char mk_size;
- unsigned char pad_align;
- unsigned char auth_mode;
- char hash_name[MAX_HASH_NAME];
- unsigned short block_size;
- unsigned short word_size;
- unsigned short ipad_size;
-};
-
struct phys_sge_pairs {
__be16 len[8];
__be64 addr[8];
};
-struct phys_sge_parm {
- unsigned int nents;
- unsigned int obsize;
- unsigned short qid;
-};
-
-struct crypto_result {
- struct completion completion;
- int err;
-};
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index b6dd9cbe815f..f5a2624081dc 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -154,15 +154,15 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
+ if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Create the device and add it in the device list */
u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
if (!u_ctx) {
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
- if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
- u_ctx = ERR_PTR(-ENOMEM);
- goto out;
- }
u_ctx->lldi = *lld;
out:
return u_ctx;
@@ -224,7 +224,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
- pr_err("ULD register fail: No chcr crypto support in cxgb4");
+ pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
return 0;
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index c9a19b2a1e9f..94e7412f6164 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -89,7 +89,7 @@ struct uld_ctx {
struct chcr_dev *dev;
};
-struct uld_ctx * assign_chcr_device(void);
+struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 30af1ee17b87..94a87e3ad9bc 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -149,9 +149,23 @@
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
-#define CHCR_SG_SIZE 2048
+#define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
+#define CHCR_DST_SG_SIZE 2048
-/* Aligned to 128 bit boundary */
+static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
+{
+ return crypto_aead_ctx(tfm);
+}
+
+static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+{
+ return crypto_ablkcipher_ctx(tfm);
+}
+
+static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
struct ablk_ctx {
struct crypto_skcipher *sw_cipher;
@@ -165,16 +179,39 @@ struct ablk_ctx {
};
struct chcr_aead_reqctx {
struct sk_buff *skb;
- struct scatterlist *dst;
- struct scatterlist *newdstsg;
- struct scatterlist srcffwd[2];
- struct scatterlist dstffwd[2];
+ dma_addr_t iv_dma;
+ dma_addr_t b0_dma;
+ unsigned int b0_len;
+ unsigned int op;
+ short int aad_nents;
+ short int src_nents;
short int dst_nents;
+ u16 imm;
u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
};
+struct ulptx_walk {
+ struct ulptx_sgl *sgl;
+ unsigned int nents;
+ unsigned int pair_idx;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct ulptx_sge_pair *pair;
+
+};
+
+struct dsgl_walk {
+ unsigned int nents;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct cpl_rx_phys_dsgl *dsgl;
+ struct phys_sge_pairs *to;
+};
+
+
+
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -195,7 +232,6 @@ struct __aead_ctx {
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
- struct crypto_skcipher *null;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
@@ -231,8 +267,11 @@ struct chcr_ahash_req_ctx {
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 *reqbfr;
u8 *skbfr;
+ dma_addr_t dma_addr;
+ u32 dma_len;
u8 reqlen;
- /* DMA the partial hash in it */
+ u8 imm;
+ u8 is_sg_map;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
/* SKB which is being sent to the hardware for processing */
@@ -241,14 +280,15 @@ struct chcr_ahash_req_ctx {
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
- struct scatterlist srcffwd[2];
- struct scatterlist dstffwd[2];
struct scatterlist *dstsg;
- struct scatterlist *dst;
- struct scatterlist *newdstsg;
unsigned int processed;
+ unsigned int last_req_len;
+ struct scatterlist *srcsg;
+ unsigned int src_ofst;
+ unsigned int dst_ofst;
unsigned int op;
- short int dst_nents;
+ dma_addr_t iv_dma;
+ u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
@@ -262,24 +302,6 @@ struct chcr_alg_template {
} alg;
};
-struct chcr_req_ctx {
- union {
- struct ahash_request *ahash_req;
- struct aead_request *aead_req;
- struct ablkcipher_request *ablk_req;
- } req;
- union {
- struct chcr_ahash_req_ctx *ahash_ctx;
- struct chcr_aead_reqctx *reqctx;
- struct chcr_blkcipher_req_ctx *ablk_ctx;
- } ctx;
-};
-
-struct sge_opaque_hdr {
- void *dev;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
-};
-
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
int size,
@@ -290,10 +312,39 @@ static int chcr_aead_op(struct aead_request *req_base,
int size,
create_wr_t create_wr_fn);
static inline int get_aead_subtype(struct crypto_aead *aead);
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents);
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
- unsigned int nents);
-static inline void free_new_sg(struct scatterlist *sgl);
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err);
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
+ *req, unsigned short op_type);
+static inline void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid);
+static inline void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type);
+static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+static int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req);
+static void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req);
+static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
+int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
+static inline void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+static inline int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req);
+static inline void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 3980f946874f..74feb6227101 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -308,10 +308,8 @@ unmap_cache:
ctx->base.cache_sz = 0;
}
free_cache:
- if (ctx->base.cache) {
- kfree(ctx->base.cache);
- ctx->base.cache = NULL;
- }
+ kfree(ctx->base.cache);
+ ctx->base.cache = NULL;
unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index dadc4a808df5..8705b28eb02c 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -534,7 +534,6 @@ static void release_ixp_crypto(struct device *dev)
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
crypt_virt, crypt_phys);
}
- return;
}
static void reset_sa_dir(struct ix_sa_dir *dir)
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 6e7a5c77a00a..293832488cc9 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -34,10 +34,6 @@
/* Limit of the crypto queue before reaching the backlog */
#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
-static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
-module_param_named(allhwsupport, allhwsupport, int, 0444);
-MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
-
struct mv_cesa_dev *cesa_dev;
struct crypto_async_request *
@@ -76,8 +72,6 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
ctx = crypto_tfm_ctx(req->tfm);
ctx->ops->step(req);
-
- return;
}
static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
@@ -183,8 +177,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
spin_lock_bh(&engine->lock);
ret = crypto_enqueue_request(&engine->queue, req);
if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
- (ret == -EINPROGRESS ||
- (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ (ret == -EINPROGRESS || ret == -EBUSY))
mv_cesa_tdma_chain(engine, creq);
spin_unlock_bh(&engine->lock);
@@ -202,7 +195,7 @@ static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
int i, j;
for (i = 0; i < cesa->caps->ncipher_algs; i++) {
- ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
+ ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
if (ret)
goto err_unregister_crypto;
}
@@ -222,7 +215,7 @@ err_unregister_ahash:
err_unregister_crypto:
for (j = 0; j < i; j++)
- crypto_unregister_alg(cesa->caps->cipher_algs[j]);
+ crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
return ret;
}
@@ -235,10 +228,10 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
for (i = 0; i < cesa->caps->ncipher_algs; i++)
- crypto_unregister_alg(cesa->caps->cipher_algs[i]);
+ crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
}
-static struct crypto_alg *orion_cipher_algs[] = {
+static struct skcipher_alg *orion_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
@@ -254,7 +247,7 @@ static struct ahash_alg *orion_ahash_algs[] = {
&mv_ahmac_sha1_alg,
};
-static struct crypto_alg *armada_370_cipher_algs[] = {
+static struct skcipher_alg *armada_370_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
@@ -459,9 +452,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
caps = match->data;
}
- if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
- return -ENOTSUPP;
-
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
if (!cesa)
return -ENOMEM;
@@ -599,9 +589,16 @@ static int mv_cesa_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id mv_cesa_plat_id_table[] = {
+ { .name = "mv_crypto" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
+
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
.remove = mv_cesa_remove,
+ .id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
.of_match_table = mv_cesa_of_match_table,
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index 0032e3bf46ee..d63a6ee905c9 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -5,6 +5,7 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/dmapool.h>
@@ -373,7 +374,7 @@ struct mv_cesa_engine;
struct mv_cesa_caps {
int nengines;
bool has_tdma;
- struct crypto_alg **cipher_algs;
+ struct skcipher_alg **cipher_algs;
int ncipher_algs;
struct ahash_alg **ahash_algs;
int nahash_algs;
@@ -539,12 +540,12 @@ struct mv_cesa_sg_std_iter {
};
/**
- * struct mv_cesa_ablkcipher_std_req - cipher standard request
+ * struct mv_cesa_skcipher_std_req - cipher standard request
* @op: operation context
* @offset: current operation offset
* @size: size of the crypto operation
*/
-struct mv_cesa_ablkcipher_std_req {
+struct mv_cesa_skcipher_std_req {
struct mv_cesa_op_ctx op;
unsigned int offset;
unsigned int size;
@@ -552,14 +553,14 @@ struct mv_cesa_ablkcipher_std_req {
};
/**
- * struct mv_cesa_ablkcipher_req - cipher request
+ * struct mv_cesa_skcipher_req - cipher request
* @req: type specific request information
* @src_nents: number of entries in the src sg list
* @dst_nents: number of entries in the dest sg list
*/
-struct mv_cesa_ablkcipher_req {
+struct mv_cesa_skcipher_req {
struct mv_cesa_req base;
- struct mv_cesa_ablkcipher_std_req std;
+ struct mv_cesa_skcipher_std_req std;
int src_nents;
int dst_nents;
};
@@ -764,7 +765,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
* the backlog and will be processed later. There's no need to
* clean it up.
*/
- if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ if (ret == -EBUSY)
return false;
/* Request wasn't queued, we need to clean it up */
@@ -869,11 +870,11 @@ extern struct ahash_alg mv_ahmac_md5_alg;
extern struct ahash_alg mv_ahmac_sha1_alg;
extern struct ahash_alg mv_ahmac_sha256_alg;
-extern struct crypto_alg mv_cesa_ecb_des_alg;
-extern struct crypto_alg mv_cesa_cbc_des_alg;
-extern struct crypto_alg mv_cesa_ecb_des3_ede_alg;
-extern struct crypto_alg mv_cesa_cbc_des3_ede_alg;
-extern struct crypto_alg mv_cesa_ecb_aes_alg;
-extern struct crypto_alg mv_cesa_cbc_aes_alg;
+extern struct skcipher_alg mv_cesa_ecb_des_alg;
+extern struct skcipher_alg mv_cesa_cbc_des_alg;
+extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_ecb_aes_alg;
+extern struct skcipher_alg mv_cesa_cbc_aes_alg;
#endif /* __MARVELL_CESA_H__ */
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 098871a22a54..0ae84ec9e21c 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -32,23 +32,23 @@ struct mv_cesa_aes_ctx {
struct crypto_aes_ctx aes;
};
-struct mv_cesa_ablkcipher_dma_iter {
+struct mv_cesa_skcipher_dma_iter {
struct mv_cesa_dma_iter base;
struct mv_cesa_sg_dma_iter src;
struct mv_cesa_sg_dma_iter dst;
};
static inline void
-mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
- struct ablkcipher_request *req)
+mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
+ struct skcipher_request *req)
{
- mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
+ mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
}
static inline bool
-mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
+mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
{
iter->src.op_offset = 0;
iter->dst.op_offset = 0;
@@ -57,9 +57,9 @@ mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
}
static inline void
-mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (req->dst != req->src) {
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
@@ -73,20 +73,20 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
mv_cesa_dma_cleanup(&creq->base);
}
-static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
+static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_ablkcipher_dma_cleanup(req);
+ mv_cesa_skcipher_dma_cleanup(req);
}
-static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
+static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
- size_t len = min_t(size_t, req->nbytes - sreq->offset,
+ size_t len = min_t(size_t, req->cryptlen - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op);
@@ -114,11 +114,11 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
-static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
- u32 status)
+static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
+ u32 status)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
@@ -127,122 +127,130 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
sreq->size, sreq->offset);
sreq->offset += len;
- if (sreq->offset < req->nbytes)
+ if (sreq->offset < req->cryptlen)
return -EINPROGRESS;
return 0;
}
-static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
- u32 status)
+static int mv_cesa_skcipher_process(struct crypto_async_request *req,
+ u32 status)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_req *basereq = &creq->base;
if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
- return mv_cesa_ablkcipher_std_process(ablkreq, status);
+ return mv_cesa_skcipher_std_process(skreq, status);
return mv_cesa_dma_process(basereq, status);
}
-static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
+static void mv_cesa_skcipher_step(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_dma_step(&creq->base);
else
- mv_cesa_ablkcipher_std_step(ablkreq);
+ mv_cesa_skcipher_std_step(skreq);
}
static inline void
-mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_req *basereq = &creq->base;
mv_cesa_dma_prepare(basereq, basereq->engine);
}
static inline void
-mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
sreq->size = 0;
sreq->offset = 0;
}
-static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
- struct mv_cesa_engine *engine)
+static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
+ struct mv_cesa_engine *engine)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_ablkcipher_dma_prepare(ablkreq);
+ mv_cesa_skcipher_dma_prepare(skreq);
else
- mv_cesa_ablkcipher_std_prepare(ablkreq);
+ mv_cesa_skcipher_std_prepare(skreq);
}
static inline void
-mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
+mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
- mv_cesa_ablkcipher_cleanup(ablkreq);
+ mv_cesa_skcipher_cleanup(skreq);
}
static void
-mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+mv_cesa_skcipher_complete(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
- atomic_sub(ablkreq->nbytes, &engine->load);
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+ atomic_sub(skreq->cryptlen, &engine->load);
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
ivsize);
} else {
- memcpy_fromio(ablkreq->info,
+ memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
}
}
-static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
- .step = mv_cesa_ablkcipher_step,
- .process = mv_cesa_ablkcipher_process,
- .cleanup = mv_cesa_ablkcipher_req_cleanup,
- .complete = mv_cesa_ablkcipher_complete,
+static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
+ .step = mv_cesa_skcipher_step,
+ .process = mv_cesa_skcipher_process,
+ .cleanup = mv_cesa_skcipher_req_cleanup,
+ .complete = mv_cesa_skcipher_complete,
};
-static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
+static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
{
- struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ void *ctx = crypto_tfm_ctx(tfm);
+
+ memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
+}
+
+static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+ struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
+ ctx->ops = &mv_cesa_skcipher_req_ops;
- tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct mv_cesa_skcipher_req));
return 0;
}
-static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int remaining;
int offset;
@@ -251,7 +259,7 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ret = crypto_aes_expand_key(&ctx->aes, key, len);
if (ret) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return ret;
}
@@ -264,16 +272,16 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
int ret;
if (len != DES_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -288,14 +296,14 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
+static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
if (len != DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -304,14 +312,14 @@ static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
- const struct mv_cesa_op_ctx *op_templ)
+static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
+ const struct mv_cesa_op_ctx *op_templ)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_req *basereq = &creq->base;
- struct mv_cesa_ablkcipher_dma_iter iter;
+ struct mv_cesa_skcipher_dma_iter iter;
bool skip_ctx = false;
int ret;
unsigned int ivsize;
@@ -339,7 +347,7 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
}
mv_cesa_tdma_desc_iter_init(&basereq->chain);
- mv_cesa_ablkcipher_req_iter_init(&iter, req);
+ mv_cesa_skcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
@@ -370,10 +378,10 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
if (ret)
goto err_free_tdma;
- } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
+ } while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
@@ -399,11 +407,11 @@ err_unmap_src:
}
static inline int
-mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
- const struct mv_cesa_op_ctx *op_templ)
+mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
+ const struct mv_cesa_op_ctx *op_templ)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_req *basereq = &creq->base;
sreq->op = *op_templ;
@@ -414,23 +422,23 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
return 0;
}
-static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int blksize = crypto_skcipher_blocksize(tfm);
int ret;
- if (!IS_ALIGNED(req->nbytes, blksize))
+ if (!IS_ALIGNED(req->cryptlen, blksize))
return -EINVAL;
- creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
- creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (creq->dst_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of dst SG");
return creq->dst_nents;
@@ -440,36 +448,36 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_OP_MSK);
if (cesa_dev->caps->has_tdma)
- ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
else
- ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_std_req_init(req, tmpl);
return ret;
}
-static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
int ret;
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
- ret = mv_cesa_ablkcipher_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
- engine = mv_cesa_select_engine(req->nbytes);
- mv_cesa_ablkcipher_prepare(&req->base, engine);
+ engine = mv_cesa_select_engine(req->cryptlen);
+ mv_cesa_skcipher_prepare(&req->base, engine);
ret = mv_cesa_queue_req(&req->base, &creq->base);
if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ablkcipher_cleanup(req);
+ mv_cesa_skcipher_cleanup(req);
return ret;
}
-static int mv_cesa_des_op(struct ablkcipher_request *req,
+static int mv_cesa_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -479,10 +487,10 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -493,7 +501,7 @@ static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
return mv_cesa_des_op(req, &tmpl);
}
-static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -504,41 +512,38 @@ static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
return mv_cesa_des_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_des_alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "mv-ecb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = mv_cesa_des_setkey,
- .encrypt = mv_cesa_ecb_des_encrypt,
- .decrypt = mv_cesa_ecb_des_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_des_alg = {
+ .setkey = mv_cesa_des_setkey,
+ .encrypt = mv_cesa_ecb_des_encrypt,
+ .decrypt = mv_cesa_ecb_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "mv-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
return mv_cesa_des_op(req, tmpl);
}
-static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -547,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des_op(req, &tmpl);
}
-static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -556,31 +561,28 @@ static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_des_alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "mv-cbc-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = mv_cesa_des_setkey,
- .encrypt = mv_cesa_cbc_des_encrypt,
- .decrypt = mv_cesa_cbc_des_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_des_alg = {
+ .setkey = mv_cesa_des_setkey,
+ .encrypt = mv_cesa_cbc_des_encrypt,
+ .decrypt = mv_cesa_cbc_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "mv-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -590,10 +592,10 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -605,7 +607,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
return mv_cesa_des3_op(req, &tmpl);
}
-static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -617,39 +619,36 @@ static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
return mv_cesa_des3_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "mv-ecb-des3-ede",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = mv_cesa_des3_ede_setkey,
- .encrypt = mv_cesa_ecb_des3_ede_encrypt,
- .decrypt = mv_cesa_ecb_des3_ede_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
+ .setkey = mv_cesa_des3_ede_setkey,
+ .encrypt = mv_cesa_ecb_des3_ede_encrypt,
+ .decrypt = mv_cesa_ecb_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "mv-ecb-des3-ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
- memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
return mv_cesa_des3_op(req, tmpl);
}
-static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -661,7 +660,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des3_op(req, &tmpl);
}
-static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -673,31 +672,28 @@ static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des3_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "mv-cbc-des3-ede",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = mv_cesa_des3_ede_setkey,
- .encrypt = mv_cesa_cbc_des3_ede_encrypt,
- .decrypt = mv_cesa_cbc_des3_ede_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
+ .setkey = mv_cesa_des3_ede_setkey,
+ .encrypt = mv_cesa_cbc_des3_ede_encrypt,
+ .decrypt = mv_cesa_cbc_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "mv-cbc-des3-ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -724,10 +720,10 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_CRYPTM_MSK |
CESA_SA_DESC_CFG_AES_LEN_MSK);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -738,7 +734,7 @@ static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
return mv_cesa_aes_op(req, &tmpl);
}
-static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -749,40 +745,37 @@ static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
return mv_cesa_aes_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "mv-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_cesa_aes_setkey,
- .encrypt = mv_cesa_ecb_aes_encrypt,
- .decrypt = mv_cesa_ecb_aes_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_aes_alg = {
+ .setkey = mv_cesa_aes_setkey,
+ .encrypt = mv_cesa_ecb_aes_encrypt,
+ .decrypt = mv_cesa_ecb_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
return mv_cesa_aes_op(req, tmpl);
}
-static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -791,7 +784,7 @@ static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_aes_op(req, &tmpl);
}
-static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -800,26 +793,23 @@ static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_aes_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "mv-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mv_cesa_aes_setkey,
- .encrypt = mv_cesa_cbc_aes_encrypt,
- .decrypt = mv_cesa_cbc_aes_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_aes_alg = {
+ .setkey = mv_cesa_aes_setkey,
+ .encrypt = mv_cesa_cbc_aes_encrypt,
+ .decrypt = mv_cesa_cbc_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index c76375ff376d..d0ef171c18df 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -304,10 +304,7 @@ int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, flags);
- if (IS_ERR(tdma))
- return PTR_ERR(tdma);
-
- return 0;
+ return PTR_ERR_OR_ZERO(tdma);
}
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 9e845e866dec..c2058cf59f57 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -13,6 +13,7 @@
*/
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include "mtk-platform.h"
#define AES_QUEUE_SIZE 512
@@ -137,11 +138,6 @@ struct mtk_aes_gcm_ctx {
struct crypto_skcipher *ctr;
};
-struct mtk_aes_gcm_setkey_result {
- int err;
- struct completion completion;
-};
-
struct mtk_aes_drv {
struct list_head dev_list;
/* Device list lock */
@@ -928,25 +924,19 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
{
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
+ /* Empty messages are not supported yet */
+ if (!gctx->textlen && !req->assoclen)
+ return -EINVAL;
+
rctx->mode = AES_FLAGS_GCM | mode;
return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
&req->base);
}
-static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
- struct mtk_aes_gcm_setkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
/*
* Because of the hardware limitation, we need to pre-calculate key(H)
* for the GHASH operation. The result of the encryption operation
@@ -962,7 +952,7 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
u32 hash[4];
u8 iv[8];
- struct mtk_aes_gcm_setkey_result result;
+ struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
@@ -1002,22 +992,17 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data)
return -ENOMEM;
- init_completion(&data->result.completion);
+ crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- mtk_gcm_setkey_done, &data->result);
+ crypto_req_done, &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
AES_BLOCK_SIZE, data->iv);
- err = crypto_skcipher_encrypt(&data->req);
- if (err == -EINPROGRESS || err == -EBUSY) {
- err = wait_for_completion_interruptible(
- &data->result.completion);
- if (!err)
- err = data->result.err;
- }
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+ &data->wait);
if (err)
goto out;
@@ -1098,7 +1083,7 @@ static struct aead_alg aes_gcm_alg = {
.decrypt = mtk_aes_gcm_decrypt,
.init = mtk_aes_gcm_init,
.exit = mtk_aes_gcm_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
deleted file mode 100644
index 0eb2706f23c8..000000000000
--- a/drivers/crypto/mv_cesa.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-/*
- * Support for Marvell's crypto engine which can be found on some Orion5X
- * boards.
- *
- * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
- * License: GPLv2
- *
- */
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <linux/crypto.h>
-#include <linux/genalloc.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kthread.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-
-#include "mv_cesa.h"
-
-#define MV_CESA "MV-CESA:"
-#define MAX_HW_HASH_SIZE 0xFFFF
-#define MV_CESA_EXPIRE 500 /* msec */
-
-#define MV_CESA_DEFAULT_SRAM_SIZE 2048
-
-/*
- * STM:
- * /---------------------------------------\
- * | | request complete
- * \./ |
- * IDLE -> new request -> BUSY -> done -> DEQUEUE
- * /°\ |
- * | | more scatter entries
- * \________________/
- */
-enum engine_status {
- ENGINE_IDLE,
- ENGINE_BUSY,
- ENGINE_W_DEQUEUE,
-};
-
-/**
- * struct req_progress - used for every crypt request
- * @src_sg_it: sg iterator for src
- * @dst_sg_it: sg iterator for dst
- * @sg_src_left: bytes left in src to process (scatter list)
- * @src_start: offset to add to src start position (scatter list)
- * @crypt_len: length of current hw crypt/hash process
- * @hw_nbytes: total bytes to process in hw for this request
- * @copy_back: whether to copy data back (crypt) or not (hash)
- * @sg_dst_left: bytes left dst to process in this scatter list
- * @dst_start: offset to add to dst start position (scatter list)
- * @hw_processed_bytes: number of bytes processed by hw (request).
- *
- * sg helper are used to iterate over the scatterlist. Since the size of the
- * SRAM may be less than the scatter size, this struct struct is used to keep
- * track of progress within current scatterlist.
- */
-struct req_progress {
- struct sg_mapping_iter src_sg_it;
- struct sg_mapping_iter dst_sg_it;
- void (*complete) (void);
- void (*process) (int is_first);
-
- /* src mostly */
- int sg_src_left;
- int src_start;
- int crypt_len;
- int hw_nbytes;
- /* dst mostly */
- int copy_back;
- int sg_dst_left;
- int dst_start;
- int hw_processed_bytes;
-};
-
-struct crypto_priv {
- void __iomem *reg;
- void __iomem *sram;
- struct gen_pool *sram_pool;
- dma_addr_t sram_dma;
- int irq;
- struct clk *clk;
- struct task_struct *queue_th;
-
- /* the lock protects queue and eng_st */
- spinlock_t lock;
- struct crypto_queue queue;
- enum engine_status eng_st;
- struct timer_list completion_timer;
- struct crypto_async_request *cur_req;
- struct req_progress p;
- int max_req_size;
- int sram_size;
- int has_sha1;
- int has_hmac_sha1;
-};
-
-static struct crypto_priv *cpg;
-
-struct mv_ctx {
- u8 aes_enc_key[AES_KEY_LEN];
- u32 aes_dec_key[8];
- int key_len;
- u32 need_calc_aes_dkey;
-};
-
-enum crypto_op {
- COP_AES_ECB,
- COP_AES_CBC,
-};
-
-struct mv_req_ctx {
- enum crypto_op op;
- int decrypt;
-};
-
-enum hash_op {
- COP_SHA1,
- COP_HMAC_SHA1
-};
-
-struct mv_tfm_hash_ctx {
- struct crypto_shash *fallback;
- struct crypto_shash *base_hash;
- u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
- int count_add;
- enum hash_op op;
-};
-
-struct mv_req_hash_ctx {
- u64 count;
- u32 state[SHA1_DIGEST_SIZE / 4];
- u8 buffer[SHA1_BLOCK_SIZE];
- int first_hash; /* marks that we don't have previous state */
- int last_chunk; /* marks that this is the 'final' request */
- int extra_bytes; /* unprocessed bytes in buffer */
- enum hash_op op;
- int count_add;
-};
-
-static void mv_completion_timer_callback(struct timer_list *unused)
-{
- int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
-
- printk(KERN_ERR MV_CESA
- "completion timer expired (CESA %sactive), cleaning up.\n",
- active ? "" : "in");
-
- del_timer(&cpg->completion_timer);
- writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
- while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
- printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
- cpg->eng_st = ENGINE_W_DEQUEUE;
- wake_up_process(cpg->queue_th);
-}
-
-static void mv_setup_timer(void)
-{
- timer_setup(&cpg->completion_timer, mv_completion_timer_callback, 0);
- mod_timer(&cpg->completion_timer,
- jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
-}
-
-static void compute_aes_dec_key(struct mv_ctx *ctx)
-{
- struct crypto_aes_ctx gen_aes_key;
- int key_pos;
-
- if (!ctx->need_calc_aes_dkey)
- return;
-
- crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
-
- key_pos = ctx->key_len + 24;
- memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
- switch (ctx->key_len) {
- case AES_KEYSIZE_256:
- key_pos -= 2;
- /* fall */
- case AES_KEYSIZE_192:
- key_pos -= 2;
- memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
- 4 * 4);
- break;
- }
- ctx->need_calc_aes_dkey = 0;
-}
-
-static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
-
- switch (len) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_192:
- case AES_KEYSIZE_256:
- break;
- default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- ctx->key_len = len;
- ctx->need_calc_aes_dkey = 1;
-
- memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
- return 0;
-}
-
-static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
-{
- int ret;
- void *sbuf;
- int copy_len;
-
- while (len) {
- if (!p->sg_src_left) {
- ret = sg_miter_next(&p->src_sg_it);
- BUG_ON(!ret);
- p->sg_src_left = p->src_sg_it.length;
- p->src_start = 0;
- }
-
- sbuf = p->src_sg_it.addr + p->src_start;
-
- copy_len = min(p->sg_src_left, len);
- memcpy(dbuf, sbuf, copy_len);
-
- p->src_start += copy_len;
- p->sg_src_left -= copy_len;
-
- len -= copy_len;
- dbuf += copy_len;
- }
-}
-
-static void setup_data_in(void)
-{
- struct req_progress *p = &cpg->p;
- int data_in_sram =
- min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
- copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
- data_in_sram - p->crypt_len);
- p->crypt_len = data_in_sram;
-}
-
-static void mv_process_current_q(int first_block)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
- struct sec_accel_config op;
-
- switch (req_ctx->op) {
- case COP_AES_ECB:
- op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
- break;
- case COP_AES_CBC:
- default:
- op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
- op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
- ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
- if (first_block)
- memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
- break;
- }
- if (req_ctx->decrypt) {
- op.config |= CFG_DIR_DEC;
- memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
- AES_KEY_LEN);
- } else {
- op.config |= CFG_DIR_ENC;
- memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
- AES_KEY_LEN);
- }
-
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- op.config |= CFG_AES_LEN_128;
- break;
- case AES_KEYSIZE_192:
- op.config |= CFG_AES_LEN_192;
- break;
- case AES_KEYSIZE_256:
- op.config |= CFG_AES_LEN_256;
- break;
- }
- op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
- ENC_P_DST(SRAM_DATA_OUT_START);
- op.enc_key_p = SRAM_DATA_KEY_P;
-
- setup_data_in();
- op.enc_len = cpg->p.crypt_len;
- memcpy(cpg->sram + SRAM_CONFIG, &op,
- sizeof(struct sec_accel_config));
-
- /* GO */
- mv_setup_timer();
- writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static void mv_crypto_algo_completion(void)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- sg_miter_stop(&cpg->p.src_sg_it);
- sg_miter_stop(&cpg->p.dst_sg_it);
-
- if (req_ctx->op != COP_AES_CBC)
- return ;
-
- memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
-}
-
-static void mv_process_hash_current(int first_block)
-{
- struct ahash_request *req = ahash_request_cast(cpg->cur_req);
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
- struct req_progress *p = &cpg->p;
- struct sec_accel_config op = { 0 };
- int is_last;
-
- switch (req_ctx->op) {
- case COP_SHA1:
- default:
- op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
- break;
- case COP_HMAC_SHA1:
- op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
- memcpy(cpg->sram + SRAM_HMAC_IV_IN,
- tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
- break;
- }
-
- op.mac_src_p =
- MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
- req_ctx->
- count);
-
- setup_data_in();
-
- op.mac_digest =
- MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
- op.mac_iv =
- MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
- MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
-
- is_last = req_ctx->last_chunk
- && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
- && (req_ctx->count <= MAX_HW_HASH_SIZE);
- if (req_ctx->first_hash) {
- if (is_last)
- op.config |= CFG_NOT_FRAG;
- else
- op.config |= CFG_FIRST_FRAG;
-
- req_ctx->first_hash = 0;
- } else {
- if (is_last)
- op.config |= CFG_LAST_FRAG;
- else
- op.config |= CFG_MID_FRAG;
-
- if (first_block) {
- writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
- writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
- writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
- writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
- writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
- }
- }
-
- memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
-
- /* GO */
- mv_setup_timer();
- writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
- struct shash_desc *desc)
-{
- int i;
- struct sha1_state shash_state;
-
- shash_state.count = ctx->count + ctx->count_add;
- for (i = 0; i < 5; i++)
- shash_state.state[i] = ctx->state[i];
- memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
- return crypto_shash_import(desc, &shash_state);
-}
-
-static int mv_hash_final_fallback(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
- SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback);
- int rc;
-
- shash->tfm = tfm_ctx->fallback;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- if (unlikely(req_ctx->first_hash)) {
- crypto_shash_init(shash);
- crypto_shash_update(shash, req_ctx->buffer,
- req_ctx->extra_bytes);
- } else {
- /* only SHA1 for now....
- */
- rc = mv_hash_import_sha1_ctx(req_ctx, shash);
- if (rc)
- goto out;
- }
- rc = crypto_shash_final(shash, req->result);
-out:
- return rc;
-}
-
-static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
-{
- ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
- ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
- ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
- ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
- ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
-}
-
-static void mv_hash_algo_completion(void)
-{
- struct ahash_request *req = ahash_request_cast(cpg->cur_req);
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
- if (ctx->extra_bytes)
- copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
- sg_miter_stop(&cpg->p.src_sg_it);
-
- if (likely(ctx->last_chunk)) {
- if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
- memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
- crypto_ahash_digestsize(crypto_ahash_reqtfm
- (req)));
- } else {
- mv_save_digest_state(ctx);
- mv_hash_final_fallback(req);
- }
- } else {
- mv_save_digest_state(ctx);
- }
-}
-
-static void dequeue_complete_req(void)
-{
- struct crypto_async_request *req = cpg->cur_req;
- void *buf;
- int ret;
- cpg->p.hw_processed_bytes += cpg->p.crypt_len;
- if (cpg->p.copy_back) {
- int need_copy_len = cpg->p.crypt_len;
- int sram_offset = 0;
- do {
- int dst_copy;
-
- if (!cpg->p.sg_dst_left) {
- ret = sg_miter_next(&cpg->p.dst_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
- cpg->p.dst_start = 0;
- }
-
- buf = cpg->p.dst_sg_it.addr;
- buf += cpg->p.dst_start;
-
- dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
-
- memcpy(buf,
- cpg->sram + SRAM_DATA_OUT_START + sram_offset,
- dst_copy);
- sram_offset += dst_copy;
- cpg->p.sg_dst_left -= dst_copy;
- need_copy_len -= dst_copy;
- cpg->p.dst_start += dst_copy;
- } while (need_copy_len > 0);
- }
-
- cpg->p.crypt_len = 0;
-
- BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
- if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
- /* process next scatter list entry */
- cpg->eng_st = ENGINE_BUSY;
- cpg->p.process(0);
- } else {
- cpg->p.complete();
- cpg->eng_st = ENGINE_IDLE;
- local_bh_disable();
- req->complete(req, 0);
- local_bh_enable();
- }
-}
-
-static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
-{
- int i = 0;
- size_t cur_len;
-
- while (sl) {
- cur_len = sl[i].length;
- ++i;
- if (total_bytes > cur_len)
- total_bytes -= cur_len;
- else
- break;
- }
-
- return i;
-}
-
-static void mv_start_new_crypt_req(struct ablkcipher_request *req)
-{
- struct req_progress *p = &cpg->p;
- int num_sgs;
-
- cpg->cur_req = &req->base;
- memset(p, 0, sizeof(struct req_progress));
- p->hw_nbytes = req->nbytes;
- p->complete = mv_crypto_algo_completion;
- p->process = mv_process_current_q;
- p->copy_back = 1;
-
- num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
- num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
-
- mv_process_current_q(1);
-}
-
-static void mv_start_new_hash_req(struct ahash_request *req)
-{
- struct req_progress *p = &cpg->p;
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
- int num_sgs, hw_bytes, old_extra_bytes, rc;
- cpg->cur_req = &req->base;
- memset(p, 0, sizeof(struct req_progress));
- hw_bytes = req->nbytes + ctx->extra_bytes;
- old_extra_bytes = ctx->extra_bytes;
-
- ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
- if (ctx->extra_bytes != 0
- && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
- hw_bytes -= ctx->extra_bytes;
- else
- ctx->extra_bytes = 0;
-
- num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
- if (hw_bytes) {
- p->hw_nbytes = hw_bytes;
- p->complete = mv_hash_algo_completion;
- p->process = mv_process_hash_current;
-
- if (unlikely(old_extra_bytes)) {
- memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
- old_extra_bytes);
- p->crypt_len = old_extra_bytes;
- }
-
- mv_process_hash_current(1);
- } else {
- copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
- ctx->extra_bytes - old_extra_bytes);
- sg_miter_stop(&p->src_sg_it);
- if (ctx->last_chunk)
- rc = mv_hash_final_fallback(req);
- else
- rc = 0;
- cpg->eng_st = ENGINE_IDLE;
- local_bh_disable();
- req->base.complete(&req->base, rc);
- local_bh_enable();
- }
-}
-
-static int queue_manag(void *data)
-{
- cpg->eng_st = ENGINE_IDLE;
- do {
- struct crypto_async_request *async_req = NULL;
- struct crypto_async_request *backlog = NULL;
-
- __set_current_state(TASK_INTERRUPTIBLE);
-
- if (cpg->eng_st == ENGINE_W_DEQUEUE)
- dequeue_complete_req();
-
- spin_lock_irq(&cpg->lock);
- if (cpg->eng_st == ENGINE_IDLE) {
- backlog = crypto_get_backlog(&cpg->queue);
- async_req = crypto_dequeue_request(&cpg->queue);
- if (async_req) {
- BUG_ON(cpg->eng_st != ENGINE_IDLE);
- cpg->eng_st = ENGINE_BUSY;
- }
- }
- spin_unlock_irq(&cpg->lock);
-
- if (backlog) {
- backlog->complete(backlog, -EINPROGRESS);
- backlog = NULL;
- }
-
- if (async_req) {
- if (crypto_tfm_alg_type(async_req->tfm) !=
- CRYPTO_ALG_TYPE_AHASH) {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(async_req);
- mv_start_new_crypt_req(req);
- } else {
- struct ahash_request *req =
- ahash_request_cast(async_req);
- mv_start_new_hash_req(req);
- }
- async_req = NULL;
- }
-
- schedule();
-
- } while (!kthread_should_stop());
- return 0;
-}
-
-static int mv_handle_req(struct crypto_async_request *req)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cpg->lock, flags);
- ret = crypto_enqueue_request(&cpg->queue, req);
- spin_unlock_irqrestore(&cpg->lock, flags);
- wake_up_process(cpg->queue_th);
- return ret;
-}
-
-static int mv_enc_aes_ecb(struct ablkcipher_request *req)
-{
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_ECB;
- req_ctx->decrypt = 0;
-
- return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_ecb(struct ablkcipher_request *req)
-{
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_ECB;
- req_ctx->decrypt = 1;
-
- compute_aes_dec_key(ctx);
- return mv_handle_req(&req->base);
-}
-
-static int mv_enc_aes_cbc(struct ablkcipher_request *req)
-{
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_CBC;
- req_ctx->decrypt = 0;
-
- return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_cbc(struct ablkcipher_request *req)
-{
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_CBC;
- req_ctx->decrypt = 1;
-
- compute_aes_dec_key(ctx);
- return mv_handle_req(&req->base);
-}
-
-static int mv_cra_init(struct crypto_tfm *tfm)
-{
- tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
- return 0;
-}
-
-static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
- int is_last, unsigned int req_len,
- int count_add)
-{
- memset(ctx, 0, sizeof(*ctx));
- ctx->op = op;
- ctx->count = req_len;
- ctx->first_hash = 1;
- ctx->last_chunk = is_last;
- ctx->count_add = count_add;
-}
-
-static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
- unsigned req_len)
-{
- ctx->last_chunk = is_last;
- ctx->count += req_len;
-}
-
-static int mv_hash_init(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
- tfm_ctx->count_add);
- return 0;
-}
-
-static int mv_hash_update(struct ahash_request *req)
-{
- if (!req->nbytes)
- return 0;
-
- mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_final(struct ahash_request *req)
-{
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
- ahash_request_set_crypt(req, NULL, req->result, 0);
- mv_update_hash_req_ctx(ctx, 1, 0);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_finup(struct ahash_request *req)
-{
- mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_digest(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
- req->nbytes, tfm_ctx->count_add);
- return mv_handle_req(&req->base);
-}
-
-static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
- const void *ostate)
-{
- const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
- int i;
- for (i = 0; i < 5; i++) {
- ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
- ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
- }
-}
-
-static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
- unsigned int keylen)
-{
- int rc;
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
- int bs, ds, ss;
-
- if (!ctx->base_hash)
- return 0;
-
- rc = crypto_shash_setkey(ctx->fallback, key, keylen);
- if (rc)
- return rc;
-
- /* Can't see a way to extract the ipad/opad from the fallback tfm
- so I'm basically copying code from the hmac module */
- bs = crypto_shash_blocksize(ctx->base_hash);
- ds = crypto_shash_digestsize(ctx->base_hash);
- ss = crypto_shash_statesize(ctx->base_hash);
-
- {
- SHASH_DESC_ON_STACK(shash, ctx->base_hash);
-
- unsigned int i;
- char ipad[ss];
- char opad[ss];
-
- shash->tfm = ctx->base_hash;
- shash->flags = crypto_shash_get_flags(ctx->base_hash) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- if (keylen > bs) {
- int err;
-
- err =
- crypto_shash_digest(shash, key, keylen, ipad);
- if (err)
- return err;
-
- keylen = ds;
- } else
- memcpy(ipad, key, keylen);
-
- memset(ipad + keylen, 0, bs - keylen);
- memcpy(opad, ipad, bs);
-
- for (i = 0; i < bs; i++) {
- ipad[i] ^= HMAC_IPAD_VALUE;
- opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- rc = crypto_shash_init(shash) ? :
- crypto_shash_update(shash, ipad, bs) ? :
- crypto_shash_export(shash, ipad) ? :
- crypto_shash_init(shash) ? :
- crypto_shash_update(shash, opad, bs) ? :
- crypto_shash_export(shash, opad);
-
- if (rc == 0)
- mv_hash_init_ivs(ctx, ipad, opad);
-
- return rc;
- }
-}
-
-static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
- enum hash_op op, int count_add)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_shash *fallback_tfm = NULL;
- struct crypto_shash *base_hash = NULL;
- int err = -ENOMEM;
-
- ctx->op = op;
- ctx->count_add = count_add;
-
- /* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- printk(KERN_WARNING MV_CESA
- "Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
- ctx->fallback = fallback_tfm;
-
- if (base_hash_name) {
- /* Allocate a hash to compute the ipad/opad of hmac. */
- base_hash = crypto_alloc_shash(base_hash_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(base_hash)) {
- printk(KERN_WARNING MV_CESA
- "Base driver '%s' could not be loaded!\n",
- base_hash_name);
- err = PTR_ERR(base_hash);
- goto err_bad_base;
- }
- }
- ctx->base_hash = base_hash;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mv_req_hash_ctx) +
- crypto_shash_descsize(ctx->fallback));
- return 0;
-err_bad_base:
- crypto_free_shash(fallback_tfm);
-out:
- return err;
-}
-
-static void mv_cra_hash_exit(struct crypto_tfm *tfm)
-{
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(ctx->fallback);
- if (ctx->base_hash)
- crypto_free_shash(ctx->base_hash);
-}
-
-static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
-{
- return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
-}
-
-static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
-{
- return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
-}
-
-static irqreturn_t crypto_int(int irq, void *priv)
-{
- u32 val;
-
- val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
- if (!(val & SEC_INT_ACCEL0_DONE))
- return IRQ_NONE;
-
- if (!del_timer(&cpg->completion_timer)) {
- printk(KERN_WARNING MV_CESA
- "got an interrupt but no pending timer?\n");
- }
- val &= ~SEC_INT_ACCEL0_DONE;
- writel(val, cpg->reg + FPGA_INT_STATUS);
- writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
- BUG_ON(cpg->eng_st != ENGINE_BUSY);
- cpg->eng_st = ENGINE_W_DEQUEUE;
- wake_up_process(cpg->queue_th);
- return IRQ_HANDLED;
-}
-
-static struct crypto_alg mv_aes_alg_ecb = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "mv-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 16,
- .cra_ctxsize = sizeof(struct mv_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_setkey_aes,
- .encrypt = mv_enc_aes_ecb,
- .decrypt = mv_dec_aes_ecb,
- },
- },
-};
-
-static struct crypto_alg mv_aes_alg_cbc = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "mv-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cra_init,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_setkey_aes,
- .encrypt = mv_enc_aes_cbc,
- .decrypt = mv_dec_aes_cbc,
- },
- },
-};
-
-static struct ahash_alg mv_sha1_alg = {
- .init = mv_hash_init,
- .update = mv_hash_update,
- .final = mv_hash_final,
- .finup = mv_hash_finup,
- .digest = mv_hash_digest,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "mv-sha1",
- .cra_priority = 300,
- .cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
- .cra_init = mv_cra_hash_sha1_init,
- .cra_exit = mv_cra_hash_exit,
- .cra_module = THIS_MODULE,
- }
- }
-};
-
-static struct ahash_alg mv_hmac_sha1_alg = {
- .init = mv_hash_init,
- .update = mv_hash_update,
- .final = mv_hash_final,
- .finup = mv_hash_finup,
- .digest = mv_hash_digest,
- .setkey = mv_hash_setkey,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "mv-hmac-sha1",
- .cra_priority = 300,
- .cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
- .cra_init = mv_cra_hash_hmac_sha1_init,
- .cra_exit = mv_cra_hash_exit,
- .cra_module = THIS_MODULE,
- }
- }
-};
-
-static int mv_cesa_get_sram(struct platform_device *pdev,
- struct crypto_priv *cp)
-{
- struct resource *res;
- u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE;
-
- of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size",
- &sram_size);
-
- cp->sram_size = sram_size;
- cp->sram_pool = of_gen_pool_get(pdev->dev.of_node,
- "marvell,crypto-srams", 0);
- if (cp->sram_pool) {
- cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
- &cp->sram_dma);
- if (cp->sram)
- return 0;
-
- return -ENOMEM;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sram");
- if (!res || resource_size(res) < cp->sram_size)
- return -EINVAL;
-
- cp->sram = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cp->sram))
- return PTR_ERR(cp->sram);
-
- return 0;
-}
-
-static int mv_probe(struct platform_device *pdev)
-{
- struct crypto_priv *cp;
- struct resource *res;
- int irq;
- int ret;
-
- if (cpg) {
- printk(KERN_ERR MV_CESA "Second crypto dev?\n");
- return -EEXIST;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- if (!res)
- return -ENXIO;
-
- cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
-
- spin_lock_init(&cp->lock);
- crypto_init_queue(&cp->queue, 50);
- cp->reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cp->reg)) {
- ret = PTR_ERR(cp->reg);
- goto err;
- }
-
- ret = mv_cesa_get_sram(pdev, cp);
- if (ret)
- goto err;
-
- cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err;
- }
- cp->irq = irq;
-
- platform_set_drvdata(pdev, cp);
- cpg = cp;
-
- cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
- if (IS_ERR(cp->queue_th)) {
- ret = PTR_ERR(cp->queue_th);
- goto err;
- }
-
- ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
- cp);
- if (ret)
- goto err_thread;
-
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- cp->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(cp->clk))
- clk_prepare_enable(cp->clk);
-
- writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
- writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
- writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
- writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
-
- ret = crypto_register_alg(&mv_aes_alg_ecb);
- if (ret) {
- printk(KERN_WARNING MV_CESA
- "Could not register aes-ecb driver\n");
- goto err_irq;
- }
-
- ret = crypto_register_alg(&mv_aes_alg_cbc);
- if (ret) {
- printk(KERN_WARNING MV_CESA
- "Could not register aes-cbc driver\n");
- goto err_unreg_ecb;
- }
-
- ret = crypto_register_ahash(&mv_sha1_alg);
- if (ret == 0)
- cpg->has_sha1 = 1;
- else
- printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
-
- ret = crypto_register_ahash(&mv_hmac_sha1_alg);
- if (ret == 0) {
- cpg->has_hmac_sha1 = 1;
- } else {
- printk(KERN_WARNING MV_CESA
- "Could not register hmac-sha1 driver\n");
- }
-
- return 0;
-err_unreg_ecb:
- crypto_unregister_alg(&mv_aes_alg_ecb);
-err_irq:
- free_irq(irq, cp);
- if (!IS_ERR(cp->clk)) {
- clk_disable_unprepare(cp->clk);
- clk_put(cp->clk);
- }
-err_thread:
- kthread_stop(cp->queue_th);
-err:
- cpg = NULL;
- return ret;
-}
-
-static int mv_remove(struct platform_device *pdev)
-{
- struct crypto_priv *cp = platform_get_drvdata(pdev);
-
- crypto_unregister_alg(&mv_aes_alg_ecb);
- crypto_unregister_alg(&mv_aes_alg_cbc);
- if (cp->has_sha1)
- crypto_unregister_ahash(&mv_sha1_alg);
- if (cp->has_hmac_sha1)
- crypto_unregister_ahash(&mv_hmac_sha1_alg);
- kthread_stop(cp->queue_th);
- free_irq(cp->irq, cp);
- memset(cp->sram, 0, cp->sram_size);
-
- if (!IS_ERR(cp->clk)) {
- clk_disable_unprepare(cp->clk);
- clk_put(cp->clk);
- }
-
- cpg = NULL;
- return 0;
-}
-
-static const struct of_device_id mv_cesa_of_match_table[] = {
- { .compatible = "marvell,orion-crypto", },
- { .compatible = "marvell,kirkwood-crypto", },
- { .compatible = "marvell,dove-crypto", },
- {}
-};
-MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
-
-static struct platform_driver marvell_crypto = {
- .probe = mv_probe,
- .remove = mv_remove,
- .driver = {
- .name = "mv_crypto",
- .of_match_table = mv_cesa_of_match_table,
- },
-};
-MODULE_ALIAS("platform:mv_crypto");
-
-module_platform_driver(marvell_crypto);
-
-MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
-MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
deleted file mode 100644
index 0be3f0aa4afd..000000000000
--- a/drivers/crypto/mv_cesa.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MV_CRYPTO_H__
-#define __MV_CRYPTO_H__
-
-#define DIGEST_INITIAL_VAL_A 0xdd00
-#define DIGEST_INITIAL_VAL_B 0xdd04
-#define DIGEST_INITIAL_VAL_C 0xdd08
-#define DIGEST_INITIAL_VAL_D 0xdd0c
-#define DIGEST_INITIAL_VAL_E 0xdd10
-#define DES_CMD_REG 0xdd58
-
-#define SEC_ACCEL_CMD 0xde00
-#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
-#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
-#define SEC_CMD_DISABLE_SEC (1 << 2)
-
-#define SEC_ACCEL_DESC_P0 0xde04
-#define SEC_DESC_P0_PTR(x) (x)
-
-#define SEC_ACCEL_DESC_P1 0xde14
-#define SEC_DESC_P1_PTR(x) (x)
-
-#define SEC_ACCEL_CFG 0xde08
-#define SEC_CFG_STOP_DIG_ERR (1 << 0)
-#define SEC_CFG_CH0_W_IDMA (1 << 7)
-#define SEC_CFG_CH1_W_IDMA (1 << 8)
-#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
-#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
-
-#define SEC_ACCEL_STATUS 0xde0c
-#define SEC_ST_ACT_0 (1 << 0)
-#define SEC_ST_ACT_1 (1 << 1)
-
-/*
- * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
- * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
- * someone forgot to remove it while switching to the core and moving to
- * SEC_ACCEL_INT_STATUS.
- */
-#define FPGA_INT_STATUS 0xdd68
-#define SEC_ACCEL_INT_STATUS 0xde20
-#define SEC_INT_AUTH_DONE (1 << 0)
-#define SEC_INT_DES_E_DONE (1 << 1)
-#define SEC_INT_AES_E_DONE (1 << 2)
-#define SEC_INT_AES_D_DONE (1 << 3)
-#define SEC_INT_ENC_DONE (1 << 4)
-#define SEC_INT_ACCEL0_DONE (1 << 5)
-#define SEC_INT_ACCEL1_DONE (1 << 6)
-#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
-#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
-
-#define SEC_ACCEL_INT_MASK 0xde24
-
-#define AES_KEY_LEN (8 * 4)
-
-struct sec_accel_config {
-
- u32 config;
-#define CFG_OP_MAC_ONLY 0
-#define CFG_OP_CRYPT_ONLY 1
-#define CFG_OP_MAC_CRYPT 2
-#define CFG_OP_CRYPT_MAC 3
-#define CFG_MACM_MD5 (4 << 4)
-#define CFG_MACM_SHA1 (5 << 4)
-#define CFG_MACM_HMAC_MD5 (6 << 4)
-#define CFG_MACM_HMAC_SHA1 (7 << 4)
-#define CFG_ENCM_DES (1 << 8)
-#define CFG_ENCM_3DES (2 << 8)
-#define CFG_ENCM_AES (3 << 8)
-#define CFG_DIR_ENC (0 << 12)
-#define CFG_DIR_DEC (1 << 12)
-#define CFG_ENC_MODE_ECB (0 << 16)
-#define CFG_ENC_MODE_CBC (1 << 16)
-#define CFG_3DES_EEE (0 << 20)
-#define CFG_3DES_EDE (1 << 20)
-#define CFG_AES_LEN_128 (0 << 24)
-#define CFG_AES_LEN_192 (1 << 24)
-#define CFG_AES_LEN_256 (2 << 24)
-#define CFG_NOT_FRAG (0 << 30)
-#define CFG_FIRST_FRAG (1 << 30)
-#define CFG_LAST_FRAG (2 << 30)
-#define CFG_MID_FRAG (3 << 30)
-
- u32 enc_p;
-#define ENC_P_SRC(x) (x)
-#define ENC_P_DST(x) ((x) << 16)
-
- u32 enc_len;
-#define ENC_LEN(x) (x)
-
- u32 enc_key_p;
-#define ENC_KEY_P(x) (x)
-
- u32 enc_iv;
-#define ENC_IV_POINT(x) ((x) << 0)
-#define ENC_IV_BUF_POINT(x) ((x) << 16)
-
- u32 mac_src_p;
-#define MAC_SRC_DATA_P(x) (x)
-#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
-
- u32 mac_digest;
-#define MAC_DIGEST_P(x) (x)
-#define MAC_FRAG_LEN(x) ((x) << 16)
- u32 mac_iv;
-#define MAC_INNER_IV_P(x) (x)
-#define MAC_OUTER_IV_P(x) ((x) << 16)
-}__attribute__ ((packed));
- /*
- * /-----------\ 0
- * | ACCEL CFG | 4 * 8
- * |-----------| 0x20
- * | CRYPT KEY | 8 * 4
- * |-----------| 0x40
- * | IV IN | 4 * 4
- * |-----------| 0x40 (inplace)
- * | IV BUF | 4 * 4
- * |-----------| 0x80
- * | DATA IN | 16 * x (max ->max_req_size)
- * |-----------| 0x80 (inplace operation)
- * | DATA OUT | 16 * x (max ->max_req_size)
- * \-----------/ SRAM size
- */
-
- /* Hashing memory map:
- * /-----------\ 0
- * | ACCEL CFG | 4 * 8
- * |-----------| 0x20
- * | Inner IV | 5 * 4
- * |-----------| 0x34
- * | Outer IV | 5 * 4
- * |-----------| 0x48
- * | Output BUF| 5 * 4
- * |-----------| 0x80
- * | DATA IN | 64 * x (max ->max_req_size)
- * \-----------/ SRAM size
- */
-#define SRAM_CONFIG 0x00
-#define SRAM_DATA_KEY_P 0x20
-#define SRAM_DATA_IV 0x40
-#define SRAM_DATA_IV_BUF 0x40
-#define SRAM_DATA_IN_START 0x80
-#define SRAM_DATA_OUT_START 0x80
-
-#define SRAM_HMAC_IV_IN 0x20
-#define SRAM_HMAC_IV_OUT 0x34
-#define SRAM_DIGEST_BUF 0x48
-
-#define SRAM_CFG_SPACE 0x80
-
-#endif
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index a9fd8b9e86cd..48de52cf2ecc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1962,10 +1962,8 @@ static struct n2_crypto *alloc_n2cp(void)
static void free_n2cp(struct n2_crypto *np)
{
- if (np->cwq_info.ino_table) {
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
- }
+ kfree(np->cwq_info.ino_table);
+ np->cwq_info.ino_table = NULL;
kfree(np);
}
@@ -2079,10 +2077,8 @@ static struct n2_mau *alloc_ncp(void)
static void free_ncp(struct n2_mau *mp)
{
- if (mp->mau_info.ino_table) {
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
- }
+ kfree(mp->mau_info.ino_table);
+ mp->mau_info.ino_table = NULL;
kfree(mp);
}
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 874ddf5e9087..0f20f5ec9617 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
ktime_t start = wmem->start, now = ktime_get();
ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
- while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
+ while (!(READ_ONCE(csb->flags) & CSB_V)) {
cpu_relax();
now = ktime_get();
if (ktime_after(now, timeout))
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index cddc6d8b55d9..bf52cd1d7fca 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -1082,7 +1082,7 @@ static int nx842_remove(struct vio_dev *viodev)
return 0;
}
-static struct vio_device_id nx842_vio_driver_ids[] = {
+static const struct vio_device_id nx842_vio_driver_ids[] = {
{"ibm,compression-v1", "ibm,compression"},
{"", ""},
};
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index abd465f479c4..a810596b97c2 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -22,6 +22,7 @@
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -433,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
- memcpy(iv, req->iv, 12);
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
return gcm_aes_nx_crypt(req, 1, req->assoclen);
}
@@ -443,7 +444,7 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
- memcpy(iv, req->iv, 12);
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
return gcm_aes_nx_crypt(req, 0, req->assoclen);
}
@@ -498,7 +499,7 @@ struct aead_alg nx_gcm_aes_alg = {
},
.init = nx_crypto_ctx_aes_gcm_init,
.exit = nx_crypto_ctx_aead_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_aes_nx_set_key,
.encrypt = gcm_aes_nx_encrypt,
@@ -516,7 +517,7 @@ struct aead_alg nx_gcm4106_aes_alg = {
},
.init = nx_crypto_ctx_aes_gcm_init,
.exit = nx_crypto_ctx_aead_exit,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm4106_aes_nx_set_key,
.setauthsize = gcm4106_aes_nx_setauthsize,
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 036057abb257..3a5e31be4764 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -833,7 +833,7 @@ static void __exit nx_fini(void)
vio_unregister_driver(&nx_driver.viodriver);
}
-static struct vio_device_id nx_crypto_driver_ids[] = {
+static const struct vio_device_id nx_crypto_driver_ids[] = {
{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
{ "", "" }
};
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 7d4f8a4be6d8..0cc3b65d7162 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -18,6 +18,7 @@
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -186,7 +187,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
if (!sk_req) {
pr_err("skcipher: Failed to allocate request\n");
- return -1;
+ return -ENOMEM;
}
init_completion(&result.completion);
@@ -214,7 +215,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
}
/* fall through */
default:
- pr_err("Encryption of IV failed for GCM mode");
+ pr_err("Encryption of IV failed for GCM mode\n");
break;
}
@@ -311,7 +312,7 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
int err, assoclen;
memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
- memcpy(rctx->iv + 12, &counter, 4);
+ memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
if (err)
@@ -339,7 +340,7 @@ int omap_aes_gcm_encrypt(struct aead_request *req)
{
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, req->iv, 12);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
}
@@ -347,7 +348,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
{
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, req->iv, 12);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
return omap_aes_gcm_crypt(req, FLAGS_GCM);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index c376a3ee7c2c..fbec0a2e76dd 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/aead.h>
@@ -767,7 +768,7 @@ static struct aead_alg algs_aead_gcm[] = {
},
.init = omap_aes_gcm_cra_init,
.exit = omap_aes_gcm_cra_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
.encrypt = omap_aes_gcm_encrypt,
@@ -788,7 +789,7 @@ static struct aead_alg algs_aead_gcm[] = {
.init = omap_aes_gcm_cra_init,
.exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
@@ -974,11 +975,10 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
struct device *dev, struct resource *res)
{
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
int err = 0;
- match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(dev);
+ if (!dd->pdata) {
dev_err(dev, "no compatible OF match\n");
err = -EINVAL;
goto err;
@@ -991,8 +991,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
goto err;
}
- dd->pdata = match->data;
-
err:
return err;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index d37c9506c36c..ebc5c0f11f03 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -928,16 +928,13 @@ MODULE_DEVICE_TABLE(of, omap_des_of_match);
static int omap_des_get_of(struct omap_des_dev *dd,
struct platform_device *pdev)
{
- const struct of_device_id *match;
- match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(&pdev->dev);
+ if (!dd->pdata) {
dev_err(&pdev->dev, "no compatible OF match\n");
return -EINVAL;
}
- dd->pdata = match->data;
-
return 0;
}
#else
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index c40ac30ec002..86b89ace836f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1944,11 +1944,10 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
struct device *dev, struct resource *res)
{
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
int err = 0;
- match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(dev);
+ if (!dd->pdata) {
dev_err(dev, "no compatible OF match\n");
err = -EINVAL;
goto err;
@@ -1968,8 +1967,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err;
}
- dd->pdata = match->data;
-
err:
return err;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index b3869748cc6b..4b6642a25df5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -482,7 +482,7 @@ static struct crypto_alg cbc_aes_alg = {
}
};
-static struct x86_cpu_id padlock_cpu_id[] = {
+static const struct x86_cpu_id padlock_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
{}
};
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index bc72d20c32c3..d32c79328876 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -509,7 +509,7 @@ static struct shash_alg sha256_alg_nano = {
}
};
-static struct x86_cpu_id padlock_sha_ids[] = {
+static const struct x86_cpu_id padlock_sha_ids[] = {
X86_FEATURE_MATCH(X86_FEATURE_PHE),
{}
};
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 8afac52677a6..2d06409bd3c4 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -228,11 +228,8 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
list_add_tail(&map->list, &vfs_table);
} else if (accel_dev->is_vf && pf) {
/* VF on host */
- struct adf_accel_vf_info *vf_info;
struct vf_id_map *map;
- vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
-
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (map) {
struct vf_id_map *next;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 6f5dd68449c6..13c52d6bf630 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -443,9 +443,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- if (unlikely(!params->p || !params->g))
- return -EINVAL;
-
if (qat_dh_check_params_length(params->p_size << 3))
return -EINVAL;
@@ -462,11 +459,8 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
}
ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
- if (!ctx->g) {
- dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
- ctx->p = NULL;
+ if (!ctx->g)
return -ENOMEM;
- }
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
params->g_size);
@@ -507,18 +501,22 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
ret = qat_dh_set_params(ctx, &params);
if (ret < 0)
- return ret;
+ goto err_clear_ctx;
ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
GFP_KERNEL);
if (!ctx->xa) {
- qat_dh_clear_ctx(dev, ctx);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_clear_ctx;
}
memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
params.key_size);
return 0;
+
+err_clear_ctx:
+ qat_dh_clear_ctx(dev, ctx);
+ return ret;
}
static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index e2454d90d949..98d22c2096e3 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -567,26 +567,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
- pr_err("QAT: UOF can't contain imported variable to be parsed");
+ pr_err("QAT: UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
- pr_err("QAT: UOF can't contain shared control store feature");
+ pr_err("QAT: UOF can't contain shared control store feature\n");
return -EINVAL;
}
if (image->numpages > 1) {
- pr_err("QAT: UOF can't contain multiple pages");
+ pr_err("QAT: UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use shared control store feature");
+ pr_err("QAT: UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use reloadable feature");
+ pr_err("QAT: UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
@@ -702,7 +702,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
}
}
if (!mflag) {
- pr_err("QAT: uimage uses AE not set");
+ pr_err("QAT: uimage uses AE not set\n");
return -EINVAL;
}
return 0;
@@ -791,6 +791,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_GPA_ABS:
case ICP_GPB_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_GPA_REL:
case ICP_GPB_REL:
return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
@@ -800,6 +801,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_RD_ABS:
case ICP_DR_RD_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_SR_REL:
case ICP_DR_REL:
case ICP_SR_RD_REL:
@@ -809,6 +811,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_WR_ABS:
case ICP_DR_WR_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_SR_WR_REL:
case ICP_DR_WR_REL:
return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index b04b42f48366..ea4d96bf47e8 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -248,10 +248,7 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->fallback))
- return PTR_ERR(ctx->fallback);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->fallback);
}
static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 47e114ac09d0..53227d70d397 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -349,28 +349,12 @@ static int qce_ahash_digest(struct ahash_request *req)
return qce->async_req_enqueue(tmpl->qce, &req->base);
}
-struct qce_ahash_result {
- struct completion completion;
- int error;
-};
-
-static void qce_digest_complete(struct crypto_async_request *req, int error)
-{
- struct qce_ahash_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned int digestsize = crypto_ahash_digestsize(tfm);
struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
- struct qce_ahash_result result;
+ struct crypto_wait wait;
struct ahash_request *req;
struct scatterlist sg;
unsigned int blocksize;
@@ -405,9 +389,9 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
goto err_free_ahash;
}
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- qce_digest_complete, &result);
+ crypto_req_done, &wait);
crypto_ahash_clear_flags(ahash_tfm, ~0);
buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
@@ -420,13 +404,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
sg_init_one(&sg, buf, keylen);
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
- ret = crypto_ahash_digest(req);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret)
- ret = result.error;
- }
-
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret)
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 7ac657f46d15..142c6020cec7 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,14 +1,16 @@
/*
* Cryptographic API.
*
- * Support for Samsung S5PV210 HW acceleration.
+ * Support for Samsung S5PV210 and Exynos HW acceleration.
*
* Copyright (C) 2011 NetUP Inc. All rights reserved.
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
+ * Hash part based on omap-sham.c driver.
*/
#include <linux/clk.h>
@@ -30,98 +32,112 @@
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
-#define _SBF(s, v) ((v) << (s))
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define _SBF(s, v) ((v) << (s))
/* Feed control registers */
-#define SSS_REG_FCINTSTAT 0x0000
-#define SSS_FCINTSTAT_BRDMAINT BIT(3)
-#define SSS_FCINTSTAT_BTDMAINT BIT(2)
-#define SSS_FCINTSTAT_HRDMAINT BIT(1)
-#define SSS_FCINTSTAT_PKDMAINT BIT(0)
-
-#define SSS_REG_FCINTENSET 0x0004
-#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
-#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
-#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
-#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
-
-#define SSS_REG_FCINTENCLR 0x0008
-#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
-#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
-#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
-#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
-
-#define SSS_REG_FCINTPEND 0x000C
-#define SSS_FCINTPEND_BRDMAINTP BIT(3)
-#define SSS_FCINTPEND_BTDMAINTP BIT(2)
-#define SSS_FCINTPEND_HRDMAINTP BIT(1)
-#define SSS_FCINTPEND_PKDMAINTP BIT(0)
-
-#define SSS_REG_FCFIFOSTAT 0x0010
-#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
-#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
-#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
-#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
-#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
-#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
-#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
-#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
-
-#define SSS_REG_FCFIFOCTRL 0x0014
-#define SSS_FCFIFOCTRL_DESSEL BIT(2)
-#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
-#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
-#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
-
-#define SSS_REG_FCBRDMAS 0x0020
-#define SSS_REG_FCBRDMAL 0x0024
-#define SSS_REG_FCBRDMAC 0x0028
-#define SSS_FCBRDMAC_BYTESWAP BIT(1)
-#define SSS_FCBRDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCBTDMAS 0x0030
-#define SSS_REG_FCBTDMAL 0x0034
-#define SSS_REG_FCBTDMAC 0x0038
-#define SSS_FCBTDMAC_BYTESWAP BIT(1)
-#define SSS_FCBTDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCHRDMAS 0x0040
-#define SSS_REG_FCHRDMAL 0x0044
-#define SSS_REG_FCHRDMAC 0x0048
-#define SSS_FCHRDMAC_BYTESWAP BIT(1)
-#define SSS_FCHRDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCPKDMAS 0x0050
-#define SSS_REG_FCPKDMAL 0x0054
-#define SSS_REG_FCPKDMAC 0x0058
-#define SSS_FCPKDMAC_BYTESWAP BIT(3)
-#define SSS_FCPKDMAC_DESCEND BIT(2)
-#define SSS_FCPKDMAC_TRANSMIT BIT(1)
-#define SSS_FCPKDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCPKDMAO 0x005C
+#define SSS_REG_FCINTSTAT 0x0000
+#define SSS_FCINTSTAT_HPARTINT BIT(7)
+#define SSS_FCINTSTAT_HDONEINT BIT(5)
+#define SSS_FCINTSTAT_BRDMAINT BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT BIT(0)
+
+#define SSS_REG_FCINTENSET 0x0004
+#define SSS_FCINTENSET_HPARTINTENSET BIT(7)
+#define SSS_FCINTENSET_HDONEINTENSET BIT(5)
+#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
+
+#define SSS_REG_FCINTENCLR 0x0008
+#define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
+#define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
+#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
+
+#define SSS_REG_FCINTPEND 0x000C
+#define SSS_FCINTPEND_HPARTINTP BIT(7)
+#define SSS_FCINTPEND_HDONEINTP BIT(5)
+#define SSS_FCINTPEND_BRDMAINTP BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP BIT(0)
+
+#define SSS_REG_FCFIFOSTAT 0x0010
+#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
+
+#define SSS_REG_FCFIFOCTRL 0x0014
+#define SSS_FCFIFOCTRL_DESSEL BIT(2)
+#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
+#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
+#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
+#define SSS_HASHIN_MASK _SBF(0, 0x03)
+
+#define SSS_REG_FCBRDMAS 0x0020
+#define SSS_REG_FCBRDMAL 0x0024
+#define SSS_REG_FCBRDMAC 0x0028
+#define SSS_FCBRDMAC_BYTESWAP BIT(1)
+#define SSS_FCBRDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCBTDMAS 0x0030
+#define SSS_REG_FCBTDMAL 0x0034
+#define SSS_REG_FCBTDMAC 0x0038
+#define SSS_FCBTDMAC_BYTESWAP BIT(1)
+#define SSS_FCBTDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCHRDMAS 0x0040
+#define SSS_REG_FCHRDMAL 0x0044
+#define SSS_REG_FCHRDMAC 0x0048
+#define SSS_FCHRDMAC_BYTESWAP BIT(1)
+#define SSS_FCHRDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCPKDMAS 0x0050
+#define SSS_REG_FCPKDMAL 0x0054
+#define SSS_REG_FCPKDMAC 0x0058
+#define SSS_FCPKDMAC_BYTESWAP BIT(3)
+#define SSS_FCPKDMAC_DESCEND BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT BIT(1)
+#define SSS_FCPKDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCPKDMAO 0x005C
/* AES registers */
#define SSS_REG_AES_CONTROL 0x00
-#define SSS_AES_BYTESWAP_DI BIT(11)
-#define SSS_AES_BYTESWAP_DO BIT(10)
-#define SSS_AES_BYTESWAP_IV BIT(9)
-#define SSS_AES_BYTESWAP_CNT BIT(8)
-#define SSS_AES_BYTESWAP_KEY BIT(7)
-#define SSS_AES_KEY_CHANGE_MODE BIT(6)
-#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
-#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
-#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
-#define SSS_AES_FIFO_MODE BIT(3)
-#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
-#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
-#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
-#define SSS_AES_MODE_DECRYPT BIT(0)
+#define SSS_AES_BYTESWAP_DI BIT(11)
+#define SSS_AES_BYTESWAP_DO BIT(10)
+#define SSS_AES_BYTESWAP_IV BIT(9)
+#define SSS_AES_BYTESWAP_CNT BIT(8)
+#define SSS_AES_BYTESWAP_KEY BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE BIT(6)
+#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
+#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
+#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
+#define SSS_AES_FIFO_MODE BIT(3)
+#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
+#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
+#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
+#define SSS_AES_MODE_DECRYPT BIT(0)
#define SSS_REG_AES_STATUS 0x04
-#define SSS_AES_BUSY BIT(2)
-#define SSS_AES_INPUT_READY BIT(1)
-#define SSS_AES_OUTPUT_READY BIT(0)
+#define SSS_AES_BUSY BIT(2)
+#define SSS_AES_INPUT_READY BIT(1)
+#define SSS_AES_OUTPUT_READY BIT(0)
#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
@@ -129,26 +145,97 @@
#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
-#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
-#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
-#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
+#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
+#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
+#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
-#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
+#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
SSS_AES_REG(dev, reg))
/* HW engine modes */
-#define FLAGS_AES_DECRYPT BIT(0)
-#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
-#define FLAGS_AES_CBC _SBF(1, 0x01)
-#define FLAGS_AES_CTR _SBF(1, 0x02)
+#define FLAGS_AES_DECRYPT BIT(0)
+#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
+#define FLAGS_AES_CBC _SBF(1, 0x01)
+#define FLAGS_AES_CTR _SBF(1, 0x02)
+
+#define AES_KEY_LEN 16
+#define CRYPTO_QUEUE_LEN 1
+
+/* HASH registers */
+#define SSS_REG_HASH_CTRL 0x00
+
+#define SSS_HASH_USER_IV_EN BIT(5)
+#define SSS_HASH_INIT_BIT BIT(4)
+#define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
+#define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
+#define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
+
+#define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
+
+#define SSS_REG_HASH_CTRL_PAUSE 0x04
+
+#define SSS_HASH_PAUSE BIT(0)
+
+#define SSS_REG_HASH_CTRL_FIFO 0x08
+
+#define SSS_HASH_FIFO_MODE_DMA BIT(0)
+#define SSS_HASH_FIFO_MODE_CPU 0
+
+#define SSS_REG_HASH_CTRL_SWAP 0x0C
+
+#define SSS_HASH_BYTESWAP_DI BIT(3)
+#define SSS_HASH_BYTESWAP_DO BIT(2)
+#define SSS_HASH_BYTESWAP_IV BIT(1)
+#define SSS_HASH_BYTESWAP_KEY BIT(0)
+
+#define SSS_REG_HASH_STATUS 0x10
+
+#define SSS_HASH_STATUS_MSG_DONE BIT(6)
+#define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
+#define SSS_HASH_STATUS_BUFFER_READY BIT(0)
+
+#define SSS_REG_HASH_MSG_SIZE_LOW 0x20
+#define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
+
+#define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
+#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
-#define AES_KEY_LEN 16
-#define CRYPTO_QUEUE_LEN 1
+#define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
+#define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
+
+#define HASH_BLOCK_SIZE 64
+#define HASH_REG_SIZEOF 4
+#define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
+
+/*
+ * HASH bit numbers, used by device, setting in dev->hash_flags with
+ * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
+ * to keep HASH state BUSY or FREE, or to signal state from irq_handler
+ * to hash_tasklet. SGS keep track of allocated memory for scatterlist
+ */
+#define HASH_FLAGS_BUSY 0
+#define HASH_FLAGS_FINAL 1
+#define HASH_FLAGS_DMA_ACTIVE 2
+#define HASH_FLAGS_OUTPUT_READY 3
+#define HASH_FLAGS_DMA_READY 4
+#define HASH_FLAGS_SGS_COPIED 5
+#define HASH_FLAGS_SGS_ALLOCED 6
+
+/* HASH HW constants */
+#define BUFLEN HASH_BLOCK_SIZE
+
+#define SSS_HASH_DMA_LEN_ALIGN 8
+#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
+
+#define SSS_HASH_QUEUE_LENGTH 10
/**
* struct samsung_aes_variant - platform specific SSS driver data
* @aes_offset: AES register offset from SSS module's base.
+ * @hash_offset: HASH register offset from SSS module's base.
*
* Specifies platform specific configuration of SSS module.
* Note: A structure for driver specific platform data is used for future
@@ -156,6 +243,7 @@
*/
struct samsung_aes_variant {
unsigned int aes_offset;
+ unsigned int hash_offset;
};
struct s5p_aes_reqctx {
@@ -195,6 +283,19 @@ struct s5p_aes_ctx {
* protects against concurrent access to these fields.
* @lock: Lock for protecting both access to device hardware registers
* and fields related to current request (including the busy field).
+ * @res: Resources for hash.
+ * @io_hash_base: Per-variant offset for HASH block IO memory.
+ * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
+ * variable.
+ * @hash_flags: Flags for current HASH op.
+ * @hash_queue: Async hash queue.
+ * @hash_tasklet: New HASH request scheduling job.
+ * @xmit_buf: Buffer for current HASH request transfer into SSS block.
+ * @hash_req: Current request sending to SSS HASH block.
+ * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
+ * @hash_sg_cnt: Counter for hash_sg_iter.
+ *
+ * @use_hash: true if HASH algs enabled
*/
struct s5p_aes_dev {
struct device *dev;
@@ -215,16 +316,83 @@ struct s5p_aes_dev {
struct crypto_queue queue;
bool busy;
spinlock_t lock;
+
+ struct resource *res;
+ void __iomem *io_hash_base;
+
+ spinlock_t hash_lock; /* protect hash_ vars */
+ unsigned long hash_flags;
+ struct crypto_queue hash_queue;
+ struct tasklet_struct hash_tasklet;
+
+ u8 xmit_buf[BUFLEN];
+ struct ahash_request *hash_req;
+ struct scatterlist *hash_sg_iter;
+ unsigned int hash_sg_cnt;
+
+ bool use_hash;
};
-static struct s5p_aes_dev *s5p_dev;
+/**
+ * struct s5p_hash_reqctx - HASH request context
+ * @dd: Associated device
+ * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
+ * @digcnt: Number of bytes processed by HW (without buffer[] ones)
+ * @digest: Digest message or IV for partial result
+ * @nregs: Number of HW registers for digest or IV read/write
+ * @engine: Bits for selecting type of HASH in SSS block
+ * @sg: sg for DMA transfer
+ * @sg_len: Length of sg for DMA transfer
+ * @sgl[]: sg for joining buffer and req->src scatterlist
+ * @skip: Skip offset in req->src for current op
+ * @total: Total number of bytes for current request
+ * @finup: Keep state for finup or final.
+ * @error: Keep track of error.
+ * @bufcnt: Number of bytes holded in buffer[]
+ * @buffer[]: For byte(s) from end of req->src in UPDATE op
+ */
+struct s5p_hash_reqctx {
+ struct s5p_aes_dev *dd;
+ bool op_update;
+
+ u64 digcnt;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ unsigned int nregs; /* digest_size / sizeof(reg) */
+ u32 engine;
+
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ struct scatterlist sgl[2];
+ unsigned int skip;
+ unsigned int total;
+ bool finup;
+ bool error;
+
+ u32 bufcnt;
+ u8 buffer[0];
+};
+
+/**
+ * struct s5p_hash_ctx - HASH transformation context
+ * @dd: Associated device
+ * @flags: Bits for algorithm HASH.
+ * @fallback: Software transformation for zero message or size < BUFLEN.
+ */
+struct s5p_hash_ctx {
+ struct s5p_aes_dev *dd;
+ unsigned long flags;
+ struct crypto_shash *fallback;
+};
static const struct samsung_aes_variant s5p_aes_data = {
.aes_offset = 0x4000,
+ .hash_offset = 0x6000,
};
static const struct samsung_aes_variant exynos_aes_data = {
.aes_offset = 0x200,
+ .hash_offset = 0x400,
};
static const struct of_device_id s5p_sss_dt_match[] = {
@@ -254,6 +422,8 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
platform_get_device_id(pdev)->driver_data;
}
+static struct s5p_aes_dev *s5p_dev;
+
static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
@@ -436,15 +606,65 @@ static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
return ret;
}
+static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_hash_base + offset);
+}
+
+static inline void s5p_hash_write(struct s5p_aes_dev *dd,
+ u32 offset, u32 value)
+{
+ __raw_writel(value, dd->io_hash_base + offset);
+}
+
+/**
+ * s5p_set_dma_hashdata() - start DMA with sg
+ * @dev: device
+ * @sg: scatterlist ready to DMA transmit
+ */
+static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
+ struct scatterlist *sg)
+{
+ dev->hash_sg_cnt--;
+ SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
+ SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
+}
+
+/**
+ * s5p_hash_rx() - get next hash_sg_iter
+ * @dev: device
+ *
+ * Return:
+ * 2 if there is no more data and it is UPDATE op
+ * 1 if new receiving (input) data is ready and can be written to device
+ * 0 if there is no more data and it is FINAL op
+ */
+static int s5p_hash_rx(struct s5p_aes_dev *dev)
+{
+ if (dev->hash_sg_cnt > 0) {
+ dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
+ return 1;
+ }
+
+ set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
+ if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
+ return 0;
+
+ return 2;
+}
+
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
int err_dma_tx = 0;
int err_dma_rx = 0;
+ int err_dma_hx = 0;
bool tx_end = false;
+ bool hx_end = false;
unsigned long flags;
uint32_t status;
+ u32 st_bits;
int err;
spin_lock_irqsave(&dev->lock, flags);
@@ -456,6 +676,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
*
* If there is no more data in tx scatter list, call s5p_aes_complete()
* and schedule new tasklet.
+ *
+ * Handle hx interrupt. If there is still data map next entry.
*/
status = SSS_READ(dev, FCINTSTAT);
if (status & SSS_FCINTSTAT_BRDMAINT)
@@ -467,7 +689,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
err_dma_tx = s5p_aes_tx(dev);
}
- SSS_WRITE(dev, FCINTPEND, status);
+ if (status & SSS_FCINTSTAT_HRDMAINT)
+ err_dma_hx = s5p_hash_rx(dev);
+
+ st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
+ SSS_FCINTSTAT_HRDMAINT);
+ /* clear DMA bits */
+ SSS_WRITE(dev, FCINTPEND, st_bits);
+
+ /* clear HASH irq bits */
+ if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
+ /* cannot have both HPART and HDONE */
+ if (status & SSS_FCINTSTAT_HPARTINT)
+ st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
+
+ if (status & SSS_FCINTSTAT_HDONEINT)
+ st_bits = SSS_HASH_STATUS_MSG_DONE;
+
+ set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
+ s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
+ hx_end = true;
+ /* when DONE or PART, do not handle HASH DMA */
+ err_dma_hx = 0;
+ }
if (err_dma_rx < 0) {
err = err_dma_rx;
@@ -480,6 +724,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
if (tx_end) {
s5p_sg_done(dev);
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -497,21 +743,1100 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
s5p_set_dma_outdata(dev, dev->sg_dst);
if (err_dma_rx == 1)
s5p_set_dma_indata(dev, dev->sg_src);
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
}
- return IRQ_HANDLED;
+ goto hash_irq_end;
error:
s5p_sg_done(dev);
dev->busy = false;
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
+
spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, err);
+hash_irq_end:
+ /*
+ * Note about else if:
+ * when hash_sg_iter reaches end and its UPDATE op,
+ * issue SSS_HASH_PAUSE and wait for HPART irq
+ */
+ if (hx_end)
+ tasklet_schedule(&dev->hash_tasklet);
+ else if (err_dma_hx == 2)
+ s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
+ SSS_HASH_PAUSE);
+
return IRQ_HANDLED;
}
+/**
+ * s5p_hash_read_msg() - read message or IV from HW
+ * @req: AHASH request
+ */
+static void s5p_hash_read_msg(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+ u32 *hash = (u32 *)ctx->digest;
+ unsigned int i;
+
+ for (i = 0; i < ctx->nregs; i++)
+ hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
+}
+
+/**
+ * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
+ * @dd: device
+ * @ctx: request context
+ */
+static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
+ struct s5p_hash_reqctx *ctx)
+{
+ u32 *hash = (u32 *)ctx->digest;
+ unsigned int i;
+
+ for (i = 0; i < ctx->nregs; i++)
+ s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
+}
+
+/**
+ * s5p_hash_write_iv() - write IV for next partial/finup op.
+ * @req: AHASH request
+ */
+static void s5p_hash_write_iv(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ s5p_hash_write_ctx_iv(ctx->dd, ctx);
+}
+
+/**
+ * s5p_hash_copy_result() - copy digest into req->result
+ * @req: AHASH request
+ */
+static void s5p_hash_copy_result(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return;
+
+ memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
+}
+
+/**
+ * s5p_hash_dma_flush() - flush HASH DMA
+ * @dev: secss device
+ */
+static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
+{
+ SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
+}
+
+/**
+ * s5p_hash_dma_enable() - enable DMA mode for HASH
+ * @dev: secss device
+ *
+ * enable DMA mode for HASH
+ */
+static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
+{
+ s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
+}
+
+/**
+ * s5p_hash_irq_disable() - disable irq HASH signals
+ * @dev: secss device
+ * @flags: bitfield with irq's to be disabled
+ */
+static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
+{
+ SSS_WRITE(dev, FCINTENCLR, flags);
+}
+
+/**
+ * s5p_hash_irq_enable() - enable irq signals
+ * @dev: secss device
+ * @flags: bitfield with irq's to be enabled
+ */
+static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
+{
+ SSS_WRITE(dev, FCINTENSET, flags);
+}
+
+/**
+ * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
+ * @dev: secss device
+ * @hashflow: HASH stream flow with/without crypto AES/DES
+ */
+static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
+{
+ unsigned long flags;
+ u32 flow;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ flow = SSS_READ(dev, FCFIFOCTRL);
+ flow &= ~SSS_HASHIN_MASK;
+ flow |= hashflow;
+ SSS_WRITE(dev, FCFIFOCTRL, flow);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/**
+ * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
+ * @dev: secss device
+ * @hashflow: HASH stream flow with/without AES/DES
+ *
+ * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
+ * enable HASH irq's HRDMA, HDONE, HPART
+ */
+static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
+{
+ s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
+ SSS_FCINTENCLR_HDONEINTENCLR |
+ SSS_FCINTENCLR_HPARTINTENCLR);
+ s5p_hash_dma_flush(dev);
+
+ s5p_hash_dma_enable(dev);
+ s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
+ s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
+ SSS_FCINTENSET_HDONEINTENSET |
+ SSS_FCINTENSET_HPARTINTENSET);
+}
+
+/**
+ * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
+ * @dd: secss device
+ * @length: length for request
+ * @final: true if final op
+ *
+ * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
+ * after previous updates, fill up IV words. For final, calculate and set
+ * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
+ * length as 2^63 so it will be never reached and set to zero prelow and
+ * prehigh.
+ *
+ * This function does not start DMA transfer.
+ */
+static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
+ bool final)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ u32 prelow, prehigh, low, high;
+ u32 configflags, swapflags;
+ u64 tmplen;
+
+ configflags = ctx->engine | SSS_HASH_INIT_BIT;
+
+ if (likely(ctx->digcnt)) {
+ s5p_hash_write_ctx_iv(dd, ctx);
+ configflags |= SSS_HASH_USER_IV_EN;
+ }
+
+ if (final) {
+ /* number of bytes for last part */
+ low = length;
+ high = 0;
+ /* total number of bits prev hashed */
+ tmplen = ctx->digcnt * 8;
+ prelow = (u32)tmplen;
+ prehigh = (u32)(tmplen >> 32);
+ } else {
+ prelow = 0;
+ prehigh = 0;
+ low = 0;
+ high = BIT(31);
+ }
+
+ swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
+ SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
+
+ s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
+ s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
+ s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
+ s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
+
+ s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
+ s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
+}
+
+/**
+ * s5p_hash_xmit_dma() - start DMA hash processing
+ * @dd: secss device
+ * @length: length for request
+ * @final: true if final op
+ *
+ * Update digcnt here, as it is needed for finup/final op.
+ */
+static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
+ bool final)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ unsigned int cnt;
+
+ cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+ if (!cnt) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+ dd->hash_sg_iter = ctx->sg;
+ dd->hash_sg_cnt = cnt;
+ s5p_hash_write_ctrl(dd, length, final);
+ ctx->digcnt += length;
+ ctx->total -= length;
+
+ /* catch last interrupt */
+ if (final)
+ set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
+
+ s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
+
+ return -EINPROGRESS;
+}
+
+/**
+ * s5p_hash_copy_sgs() - copy request's bytes into new buffer
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @new_len: number of bytes to process from sg
+ *
+ * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
+ * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
+ * with allocated buffer.
+ *
+ * Set bit in dd->hash_flag so we can free it after irq ends processing.
+ */
+static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg, unsigned int new_len)
+{
+ unsigned int pages, len;
+ void *buf;
+
+ len = new_len + ctx->bufcnt;
+ pages = get_order(len);
+
+ buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf) {
+ dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
+ ctx->error = true;
+ return -ENOMEM;
+ }
+
+ if (ctx->bufcnt)
+ memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
+
+ scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
+ new_len, 0);
+ sg_init_table(ctx->sgl, 1);
+ sg_set_buf(ctx->sgl, buf, len);
+ ctx->sg = ctx->sgl;
+ ctx->sg_len = 1;
+ ctx->bufcnt = 0;
+ ctx->skip = 0;
+ set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @new_len: number of bytes to process from sg
+ *
+ * Allocate new scatterlist table, copy data for HASH into it. If there was
+ * xmit_buf filled, prepare it first, then copy page, length and offset from
+ * source sg into it, adjusting begin and/or end for skip offset and
+ * hash_later value.
+ *
+ * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
+ * it after irq ends processing.
+ */
+static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg, unsigned int new_len)
+{
+ unsigned int skip = ctx->skip, n = sg_nents(sg);
+ struct scatterlist *tmp;
+ unsigned int len;
+
+ if (ctx->bufcnt)
+ n++;
+
+ ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+ if (!ctx->sg) {
+ ctx->error = true;
+ return -ENOMEM;
+ }
+
+ sg_init_table(ctx->sg, n);
+
+ tmp = ctx->sg;
+
+ ctx->sg_len = 0;
+
+ if (ctx->bufcnt) {
+ sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
+ }
+
+ while (sg && skip >= sg->length) {
+ skip -= sg->length;
+ sg = sg_next(sg);
+ }
+
+ while (sg && new_len) {
+ len = sg->length - skip;
+ if (new_len < len)
+ len = new_len;
+
+ new_len -= len;
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
+ skip = 0;
+ if (new_len <= 0)
+ sg_mark_end(tmp);
+
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
+ sg = sg_next(sg);
+ }
+
+ set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_prepare_sgs() - prepare sg for processing
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @nbytes: number of bytes to process from sg
+ * @final: final flag
+ *
+ * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
+ * sg table have good aligned elements (list_ok). If one of this checks fails,
+ * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
+ * data into this buffer and prepare request in sgl, or (2) allocates new sg
+ * table and prepare sg elements.
+ *
+ * For digest or finup all conditions can be good, and we may not need any
+ * fixes.
+ */
+static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg,
+ unsigned int new_len, bool final)
+{
+ unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
+ bool aligned = true, list_ok = true;
+ struct scatterlist *sg_tmp = sg;
+
+ if (!sg || !sg->length || !new_len)
+ return 0;
+
+ if (skip || !final)
+ list_ok = false;
+
+ while (nbytes > 0 && sg_tmp) {
+ n++;
+ if (skip >= sg_tmp->length) {
+ skip -= sg_tmp->length;
+ if (!sg_tmp->length) {
+ aligned = false;
+ break;
+ }
+ } else {
+ if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
+ aligned = false;
+ break;
+ }
+
+ if (nbytes < sg_tmp->length - skip) {
+ list_ok = false;
+ break;
+ }
+
+ nbytes -= sg_tmp->length - skip;
+ skip = 0;
+ }
+
+ sg_tmp = sg_next(sg_tmp);
+ }
+
+ if (!aligned)
+ return s5p_hash_copy_sgs(ctx, sg, new_len);
+ else if (!list_ok)
+ return s5p_hash_copy_sg_lists(ctx, sg, new_len);
+
+ /*
+ * Have aligned data from previous operation and/or current
+ * Note: will enter here only if (digest or finup) and aligned
+ */
+ if (ctx->bufcnt) {
+ ctx->sg_len = n;
+ sg_init_table(ctx->sgl, 2);
+ sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
+ sg_chain(ctx->sgl, 2, sg);
+ ctx->sg = ctx->sgl;
+ ctx->sg_len++;
+ } else {
+ ctx->sg = sg;
+ ctx->sg_len = n;
+ }
+
+ return 0;
+}
+
+/**
+ * s5p_hash_prepare_request() - prepare request for processing
+ * @req: AHASH request
+ * @update: true if UPDATE op
+ *
+ * Note 1: we can have update flag _and_ final flag at the same time.
+ * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
+ * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
+ * we have final op
+ */
+static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ bool final = ctx->finup;
+ int xmit_len, hash_later, nbytes;
+ int ret;
+
+ if (!req)
+ return 0;
+
+ if (update)
+ nbytes = req->nbytes;
+ else
+ nbytes = 0;
+
+ ctx->total = nbytes + ctx->bufcnt;
+ if (!ctx->total)
+ return 0;
+
+ if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
+ /* bytes left from previous request, so fill up to BUFLEN */
+ int len = BUFLEN - ctx->bufcnt % BUFLEN;
+
+ if (len > nbytes)
+ len = nbytes;
+
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+ 0, len, 0);
+ ctx->bufcnt += len;
+ nbytes -= len;
+ ctx->skip = len;
+ } else {
+ ctx->skip = 0;
+ }
+
+ if (ctx->bufcnt)
+ memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
+
+ xmit_len = ctx->total;
+ if (final) {
+ hash_later = 0;
+ } else {
+ if (IS_ALIGNED(xmit_len, BUFLEN))
+ xmit_len -= BUFLEN;
+ else
+ xmit_len -= xmit_len & (BUFLEN - 1);
+
+ hash_later = ctx->total - xmit_len;
+ /* copy hash_later bytes from end of req->src */
+ /* previous bytes are in xmit_buf, so no overwrite */
+ scatterwalk_map_and_copy(ctx->buffer, req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
+ }
+
+ if (xmit_len > BUFLEN) {
+ ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
+ final);
+ if (ret)
+ return ret;
+ } else {
+ /* have buffered data only */
+ if (unlikely(!ctx->bufcnt)) {
+ /* first update didn't fill up buffer */
+ scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
+ 0, xmit_len, 0);
+ }
+
+ sg_init_table(ctx->sgl, 1);
+ sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
+
+ ctx->sg = ctx->sgl;
+ ctx->sg_len = 1;
+ }
+
+ ctx->bufcnt = hash_later;
+ if (!final)
+ ctx->total = xmit_len;
+
+ return 0;
+}
+
+/**
+ * s5p_hash_update_dma_stop() - unmap DMA
+ * @dd: secss device
+ *
+ * Unmap scatterlist ctx->sg.
+ */
+static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+
+ dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+ clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+}
+
+/**
+ * s5p_hash_finish() - copy calculated digest to crypto layer
+ * @req: AHASH request
+ */
+static void s5p_hash_finish(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+
+ if (ctx->digcnt)
+ s5p_hash_copy_result(req);
+
+ dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
+}
+
+/**
+ * s5p_hash_finish_req() - finish request
+ * @req: AHASH request
+ * @err: error
+ */
+static void s5p_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+ unsigned long flags;
+
+ if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
+ free_pages((unsigned long)sg_virt(ctx->sg),
+ get_order(ctx->sg->length));
+
+ if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
+ kfree(ctx->sg);
+
+ ctx->sg = NULL;
+ dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
+ BIT(HASH_FLAGS_SGS_COPIED));
+
+ if (!err && !ctx->error) {
+ s5p_hash_read_msg(req);
+ if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
+ s5p_hash_finish(req);
+ } else {
+ ctx->error = true;
+ }
+
+ spin_lock_irqsave(&dd->hash_lock, flags);
+ dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
+ BIT(HASH_FLAGS_DMA_READY) |
+ BIT(HASH_FLAGS_OUTPUT_READY));
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+/**
+ * s5p_hash_handle_queue() - handle hash queue
+ * @dd: device s5p_aes_dev
+ * @req: AHASH request
+ *
+ * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
+ * device then processes the first request from the dd->queue
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct s5p_hash_reqctx *ctx;
+ unsigned long flags;
+ int err = 0, ret = 0;
+
+retry:
+ spin_lock_irqsave(&dd->hash_lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&dd->hash_queue, req);
+
+ if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&dd->hash_queue);
+ async_req = crypto_dequeue_request(&dd->hash_queue);
+ if (async_req)
+ set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
+
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+ dd->hash_req = req;
+ ctx = ahash_request_ctx(req);
+
+ err = s5p_hash_prepare_request(req, ctx->op_update);
+ if (err || !ctx->total)
+ goto out;
+
+ dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
+ ctx->op_update, req->nbytes);
+
+ s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
+ if (ctx->digcnt)
+ s5p_hash_write_iv(req); /* restore hash IV */
+
+ if (ctx->op_update) { /* HASH_OP_UPDATE */
+ err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
+ if (err != -EINPROGRESS && ctx->finup && !ctx->error)
+ /* no final() after finup() */
+ err = s5p_hash_xmit_dma(dd, ctx->total, true);
+ } else { /* HASH_OP_FINAL */
+ err = s5p_hash_xmit_dma(dd, ctx->total, true);
+ }
+out:
+ if (err != -EINPROGRESS) {
+ /* hash_tasklet_cb will not finish it, so do it here */
+ s5p_hash_finish_req(req, err);
+ req = NULL;
+
+ /*
+ * Execute next request immediately if there is anything
+ * in queue.
+ */
+ goto retry;
+ }
+
+ return ret;
+}
+
+/**
+ * s5p_hash_tasklet_cb() - hash tasklet
+ * @data: ptr to s5p_aes_dev
+ */
+static void s5p_hash_tasklet_cb(unsigned long data)
+{
+ struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
+
+ if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+ s5p_hash_handle_queue(dd, NULL);
+ return;
+ }
+
+ if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
+ if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
+ &dd->hash_flags)) {
+ s5p_hash_update_dma_stop(dd);
+ }
+
+ if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
+ &dd->hash_flags)) {
+ /* hash or semi-hash ready */
+ clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
+ goto finish;
+ }
+ }
+
+ return;
+
+finish:
+ /* finish curent request */
+ s5p_hash_finish_req(dd->hash_req, 0);
+
+ /* If we are not busy, process next req */
+ if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
+ s5p_hash_handle_queue(dd, NULL);
+}
+
+/**
+ * s5p_hash_enqueue() - enqueue request
+ * @req: AHASH request
+ * @op: operation UPDATE (true) or FINAL (false)
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_enqueue(struct ahash_request *req, bool op)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->op_update = op;
+
+ return s5p_hash_handle_queue(tctx->dd, req);
+}
+
+/**
+ * s5p_hash_update() - process the hash input data
+ * @req: AHASH request
+ *
+ * If request will fit in buffer, copy it and return immediately
+ * else enqueue it with OP_UPDATE.
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_update(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ if (ctx->bufcnt + req->nbytes <= BUFLEN) {
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+ 0, req->nbytes, 0);
+ ctx->bufcnt += req->nbytes;
+ return 0;
+ }
+
+ return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
+}
+
+/**
+ * s5p_hash_shash_digest() - calculate shash digest
+ * @tfm: crypto transformation
+ * @flags: tfm flags
+ * @data: input data
+ * @len: length of data
+ * @out: output buffer
+ */
+static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+ shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(shash, data, len, out);
+}
+
+/**
+ * s5p_hash_final_shash() - calculate shash digest
+ * @req: AHASH request
+ */
+static int s5p_hash_final_shash(struct ahash_request *req)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
+ ctx->buffer, ctx->bufcnt, req->result);
+}
+
+/**
+ * s5p_hash_final() - close up hash and calculate digest
+ * @req: AHASH request
+ *
+ * Note: in final req->src do not have any data, and req->nbytes can be
+ * non-zero.
+ *
+ * If there were no input data processed yet and the buffered hash data is
+ * less than BUFLEN (64) then calculate the final hash immediately by using
+ * SW algorithm fallback.
+ *
+ * Otherwise enqueues the current AHASH request with OP_FINAL operation op
+ * and finalize hash message in HW. Note that if digcnt!=0 then there were
+ * previous update op, so there are always some buffered bytes in ctx->buffer,
+ * which means that ctx->bufcnt!=0
+ *
+ * Returns:
+ * 0 if the request has been processed immediately,
+ * -EINPROGRESS if the operation has been queued for later execution or is set
+ * to processing by HW,
+ * -EBUSY if queue is full and request should be resubmitted later,
+ * other negative values denotes an error.
+ */
+static int s5p_hash_final(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ ctx->finup = true;
+ if (ctx->error)
+ return -EINVAL; /* uncompleted hash is not needed */
+
+ if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
+ return s5p_hash_final_shash(req);
+
+ return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
+}
+
+/**
+ * s5p_hash_finup() - process last req->src and calculate digest
+ * @req: AHASH request containing the last update data
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_finup(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->finup = true;
+
+ err1 = s5p_hash_update(req);
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources even if
+ * update() failed, except EINPROGRESS or calculate digest for small
+ * size
+ */
+ err2 = s5p_hash_final(req);
+
+ return err1 ?: err2;
+}
+
+/**
+ * s5p_hash_init() - initialize AHASH request contex
+ * @req: AHASH request
+ *
+ * Init async hash request context.
+ */
+static int s5p_hash_init(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ctx->dd = tctx->dd;
+ ctx->error = false;
+ ctx->finup = false;
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+ ctx->total = 0;
+ ctx->skip = 0;
+
+ dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case MD5_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_MD5;
+ ctx->nregs = HASH_MD5_MAX_REG;
+ break;
+ case SHA1_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_SHA1;
+ ctx->nregs = HASH_SHA1_MAX_REG;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_SHA256;
+ ctx->nregs = HASH_SHA256_MAX_REG;
+ break;
+ default:
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * s5p_hash_digest - calculate digest from req->src
+ * @req: AHASH request
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_digest(struct ahash_request *req)
+{
+ return s5p_hash_init(req) ?: s5p_hash_finup(req);
+}
+
+/**
+ * s5p_hash_cra_init_alg - init crypto alg transformation
+ * @tfm: crypto transformation
+ */
+static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ tctx->dd = s5p_dev;
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("fallback alloc fails for '%s'\n", alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct s5p_hash_reqctx) + BUFLEN);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_cra_init - init crypto tfm
+ * @tfm: crypto transformation
+ */
+static int s5p_hash_cra_init(struct crypto_tfm *tfm)
+{
+ return s5p_hash_cra_init_alg(tfm);
+}
+
+/**
+ * s5p_hash_cra_exit - exit crypto tfm
+ * @tfm: crypto transformation
+ *
+ * free allocated fallback
+ */
+static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+}
+
+/**
+ * s5p_hash_export - export hash state
+ * @req: AHASH request
+ * @out: buffer for exported state
+ */
+static int s5p_hash_export(struct ahash_request *req, void *out)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_import - import hash state
+ * @req: AHASH request
+ * @in: buffer with state to be imported from
+ */
+static int s5p_hash_import(struct ahash_request *req, const void *in)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+ const struct s5p_hash_reqctx *ctx_in = in;
+
+ memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
+ if (ctx_in->bufcnt > BUFLEN) {
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ ctx->dd = tctx->dd;
+ ctx->error = false;
+
+ return 0;
+}
+
+static struct ahash_alg algs_sha1_md5_sha256[] = {
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "exynos-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+},
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "exynos-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+},
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "exynos-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+}
+
+};
+
static void s5p_set_aes(struct s5p_aes_dev *dev,
uint8_t *key, uint8_t *iv, unsigned int keylen)
{
@@ -829,6 +2154,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
+ unsigned int hash_i;
if (s5p_dev)
return -EEXIST;
@@ -837,12 +2163,34 @@ static int s5p_aes_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
+ variant = find_s5p_sss_version(pdev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pdata->ioaddr))
- return PTR_ERR(pdata->ioaddr);
- variant = find_s5p_sss_version(pdev);
+ /*
+ * Note: HASH and PRNG uses the same registers in secss, avoid
+ * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
+ * is enabled in config. We need larger size for HASH registers in
+ * secss, current describe only AES/DES
+ */
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
+ if (variant == &exynos_aes_data) {
+ res->end += 0x300;
+ pdata->use_hash = true;
+ }
+ }
+
+ pdata->res = res;
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr)) {
+ if (!pdata->use_hash)
+ return PTR_ERR(pdata->ioaddr);
+ /* try AES without HASH */
+ res->end -= 0x300;
+ pdata->use_hash = false;
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
+ }
pdata->clk = devm_clk_get(dev, "secss");
if (IS_ERR(pdata->clk)) {
@@ -857,8 +2205,10 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->hash_lock);
pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
+ pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
pdata->irq_fc = platform_get_irq(pdev, 0);
if (pdata->irq_fc < 0) {
@@ -888,12 +2238,40 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_algs;
}
+ if (pdata->use_hash) {
+ tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
+ (unsigned long)pdata);
+ crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
+
+ for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
+ hash_i++) {
+ struct ahash_alg *alg;
+
+ alg = &algs_sha1_md5_sha256[hash_i];
+ err = crypto_register_ahash(alg);
+ if (err) {
+ dev_err(dev, "can't register '%s': %d\n",
+ alg->halg.base.cra_driver_name, err);
+ goto err_hash;
+ }
+ }
+ }
+
dev_info(dev, "s5p-sss driver registered\n");
return 0;
+err_hash:
+ for (j = hash_i - 1; j >= 0; j--)
+ crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
+
+ tasklet_kill(&pdata->hash_tasklet);
+ res->end -= 0x300;
+
err_algs:
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
+ if (i < ARRAY_SIZE(algs))
+ dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
+ err);
for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]);
@@ -920,9 +2298,16 @@ static int s5p_aes_remove(struct platform_device *pdev)
crypto_unregister_alg(&algs[i]);
tasklet_kill(&pdata->tasklet);
+ if (pdata->use_hash) {
+ for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
+ crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
- clk_disable_unprepare(pdata->clk);
+ pdata->res->end -= 0x300;
+ tasklet_kill(&pdata->hash_tasklet);
+ pdata->use_hash = false;
+ }
+ clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
return 0;
@@ -942,3 +2327,4 @@ module_platform_driver(s5p_aes_crypto);
MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
+MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4835dd4a9e50..4ca4a264a833 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -895,7 +895,6 @@ static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
static int stm32_hash_update(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- int ret;
if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
return 0;
@@ -909,12 +908,7 @@ static int stm32_hash_update(struct ahash_request *req)
return 0;
}
- ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
-
- if (rctx->flags & HASH_FLAGS_FINUP)
- return ret;
-
- return 0;
+ return stm32_hash_enqueue(req, HASH_OP_UPDATE);
}
static int stm32_hash_final(struct ahash_request *req)
@@ -1070,7 +1064,6 @@ static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
{
struct stm32_hash_dev *hdev = dev_id;
- int err;
if (HASH_FLAGS_CPU & hdev->flags) {
if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
@@ -1087,8 +1080,8 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
finish:
- /*Finish current request */
- stm32_hash_finish_req(hdev->req, err);
+ /* Finish current request */
+ stm32_hash_finish_req(hdev->req, 0);
return IRQ_HANDLED;
}
@@ -1411,11 +1404,10 @@ MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
struct device *dev)
{
- const struct of_device_id *match;
int err;
- match = of_match_device(stm32_hash_of_match, dev);
- if (!match) {
+ hdev->pdata = of_device_get_match_data(dev);
+ if (!hdev->pdata) {
dev_err(dev, "no compatible OF match\n");
return -EINVAL;
}
@@ -1423,8 +1415,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
err = of_property_read_u32(dev->of_node, "dma-maxburst",
&hdev->dma_maxburst);
- hdev->pdata = match->data;
-
return err;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dff88838dce7..9c80e0cb1664 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -56,29 +56,26 @@
#include "talitos.h"
static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
- bool is_sec1)
+ unsigned int len, bool is_sec1)
{
ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
- if (!is_sec1)
+ if (is_sec1) {
+ ptr->len1 = cpu_to_be16(len);
+ } else {
+ ptr->len = cpu_to_be16(len);
ptr->eptr = upper_32_bits(dma_addr);
+ }
}
static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
struct talitos_ptr *src_ptr, bool is_sec1)
{
dst_ptr->ptr = src_ptr->ptr;
- if (!is_sec1)
- dst_ptr->eptr = src_ptr->eptr;
-}
-
-static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
- bool is_sec1)
-{
if (is_sec1) {
- ptr->res = 0;
- ptr->len1 = cpu_to_be16(len);
+ dst_ptr->len1 = src_ptr->len1;
} else {
- ptr->len = cpu_to_be16(len);
+ dst_ptr->len = src_ptr->len;
+ dst_ptr->eptr = src_ptr->eptr;
}
}
@@ -116,9 +113,7 @@ static void map_single_talitos_ptr(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr(ptr, dma_addr, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+ to_talitos_ptr(ptr, dma_addr, len, is_sec1);
}
/*
@@ -165,6 +160,10 @@ static int reset_channel(struct device *dev, int ch)
/* set 36-bit addressing, done writeback enable and done IRQ enable */
setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
+ /* enable chaining descriptors */
+ if (is_sec1)
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
+ TALITOS_CCCR_LO_NE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -287,7 +286,6 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
/* map descriptor and save caller data */
if (is_sec1) {
desc->hdr1 = desc->hdr;
- desc->next_desc = 0;
request->dma_desc = dma_map_single(dev, &desc->hdr1,
TALITOS_DESC_SIZE,
DMA_BIDIRECTIONAL);
@@ -339,7 +337,12 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/* descriptors with their done bits set don't get the error */
rmb();
- hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
+ if (!is_sec1)
+ hdr = request->desc->hdr;
+ else if (request->desc->next_desc)
+ hdr = (request->desc + 1)->hdr1;
+ else
+ hdr = request->desc->hdr1;
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
status = 0;
@@ -393,8 +396,6 @@ static void talitos1_done_##name(unsigned long data) \
\
if (ch_done_mask & 0x10000000) \
flush_channel(dev, 0, 0, 0); \
- if (priv->num_channels == 1) \
- goto out; \
if (ch_done_mask & 0x40000000) \
flush_channel(dev, 1, 0, 0); \
if (ch_done_mask & 0x00010000) \
@@ -402,7 +403,6 @@ static void talitos1_done_##name(unsigned long data) \
if (ch_done_mask & 0x00040000) \
flush_channel(dev, 3, 0, 0); \
\
-out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
spin_lock_irqsave(&priv->reg_lock, flags); \
@@ -412,6 +412,7 @@ out: \
}
DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
+DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
#define DEF_TALITOS2_DONE(name, ch_done_mask) \
static void talitos2_done_##name(unsigned long data) \
@@ -422,8 +423,6 @@ static void talitos2_done_##name(unsigned long data) \
\
if (ch_done_mask & 1) \
flush_channel(dev, 0, 0, 0); \
- if (priv->num_channels == 1) \
- goto out; \
if (ch_done_mask & (1 << 2)) \
flush_channel(dev, 1, 0, 0); \
if (ch_done_mask & (1 << 4)) \
@@ -431,7 +430,6 @@ static void talitos2_done_##name(unsigned long data) \
if (ch_done_mask & (1 << 6)) \
flush_channel(dev, 3, 0, 0); \
\
-out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
spin_lock_irqsave(&priv->reg_lock, flags); \
@@ -441,6 +439,7 @@ out: \
}
DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
+DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
@@ -464,7 +463,8 @@ static u32 current_desc_hdr(struct device *dev, int ch)
tail = priv->chan[ch].tail;
iter = tail;
- while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
+ while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
+ priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
iter = (iter + 1) & (priv->fifo_len - 1);
if (iter == tail) {
dev_err(dev, "couldn't locate current descriptor\n");
@@ -472,6 +472,9 @@ static u32 current_desc_hdr(struct device *dev, int ch)
}
}
+ if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
+ return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+
return priv->chan[ch].fifo[iter].desc->hdr;
}
@@ -825,9 +828,12 @@ struct talitos_ctx {
__be32 desc_hdr_template;
u8 key[TALITOS_MAX_KEY_SIZE];
u8 iv[TALITOS_MAX_IV_LENGTH];
+ dma_addr_t dma_key;
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
+ dma_addr_t dma_buf;
+ dma_addr_t dma_hw_context;
};
#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -836,8 +842,8 @@ struct talitos_ctx {
struct talitos_ahash_req_ctx {
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
unsigned int hw_context_size;
- u8 buf[HASH_MAX_BLOCK_SIZE];
- u8 bufnext[HASH_MAX_BLOCK_SIZE];
+ u8 buf[2][HASH_MAX_BLOCK_SIZE];
+ int buf_idx;
unsigned int swinit;
unsigned int first;
unsigned int last;
@@ -861,6 +867,7 @@ static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -869,12 +876,17 @@ static int aead_setkey(struct crypto_aead *authenc,
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
ctx->keylen = keys.authkeylen + keys.enckeylen;
ctx->enckeylen = keys.enckeylen;
ctx->authkeylen = keys.authkeylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+ DMA_TO_DEVICE);
return 0;
@@ -948,13 +960,13 @@ static void ipsec_esp_unmap(struct device *dev,
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
+ bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
- if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
DMA_FROM_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
+ unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
areq->assoclen);
@@ -963,7 +975,7 @@ static void ipsec_esp_unmap(struct device *dev,
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
- if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+ if (!is_ipsec_esp) {
unsigned int dst_nents = edesc->dst_nents ? : 1;
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
@@ -983,6 +995,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int ivsize = crypto_aead_ivsize(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
@@ -1003,6 +1016,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
icvdata, authsize);
}
+ dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
+
kfree(edesc);
aead_request_complete(areq, err);
@@ -1097,8 +1112,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
len = cryptlen;
to_talitos_ptr(link_tbl_ptr + count,
- sg_dma_address(sg) + offset, 0);
- to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
+ sg_dma_address(sg) + offset, len, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
@@ -1116,7 +1130,7 @@ next:
return count;
}
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr,
int sg_count, unsigned int offset, int tbl_off)
@@ -1124,15 +1138,12 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
-
if (sg_count == 1) {
- to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
}
if (is_sec1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
return sg_count;
}
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
@@ -1143,7 +1154,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
return sg_count;
}
to_talitos_ptr(ptr, edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), is_sec1);
+ tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
return sg_count;
@@ -1170,10 +1181,12 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
+ struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
/* hmac key */
- map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
@@ -1194,25 +1207,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
}
/* cipher iv */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
- to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
- } else {
- to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
- }
+ to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
/* cipher key */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
- map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
- else
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
+ to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
+ ctx->enckeylen, is_sec1);
/*
* cipher in
@@ -1220,24 +1219,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
-
sg_link_tbl_len = cryptlen;
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ if (is_ipsec_esp) {
to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
}
- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
- &desc->ptr[4], sg_count, areq->assoclen,
- tbl_off);
+ ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
+ &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
- if (sg_count > 1) {
- tbl_off += sg_count;
+ if (ret > 1) {
+ tbl_off += ret;
sync_needed = true;
}
@@ -1248,47 +1243,59 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
}
- sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
- &desc->ptr[5], sg_count, areq->assoclen,
- tbl_off);
+ ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ sg_count, areq->assoclen, tbl_off);
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
- if (sg_count > 1) {
+ /* ICV data */
+ if (ret > 1) {
+ tbl_off += ret;
edesc->icv_ool = true;
sync_needed = true;
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ if (is_ipsec_esp) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
int offset = (edesc->src_nents + edesc->dst_nents + 2) *
sizeof(struct talitos_ptr) + authsize;
/* Add an entry to the link table for ICV data */
- tbl_ptr += sg_count - 1;
- to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
- tbl_ptr++;
+ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
is_sec1);
- to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
/* icv data follows link tables */
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
- is_sec1);
+ authsize, is_sec1);
+ } else {
+ dma_addr_t addr = edesc->dma_link_tbl;
+
+ if (is_sec1)
+ addr += areq->assoclen + cryptlen;
+ else
+ addr += sizeof(struct talitos_ptr) * tbl_off;
+
+ to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
+ }
+ } else if (!is_ipsec_esp) {
+ ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
+ &desc->ptr[6], sg_count, areq->assoclen +
+ cryptlen,
+ tbl_off);
+ if (ret > 1) {
+ tbl_off += ret;
+ edesc->icv_ool = true;
+ sync_needed = true;
+ } else {
+ edesc->icv_ool = false;
}
} else {
edesc->icv_ool = false;
}
- /* ICV data */
- if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
- to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
- to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
- areq->assoclen + cryptlen, is_sec1);
- }
-
/* iv out */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
DMA_FROM_DEVICE);
@@ -1387,22 +1394,31 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
alloc_len += icv_stashing ? authsize : 0;
}
+ /* if its a ahash, add space for a second desc next to the first one */
+ if (is_sec1 && !dst)
+ alloc_len += sizeof(struct talitos_desc);
+
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
dev_err(dev, "could not allocate edescriptor\n");
err = ERR_PTR(-ENOMEM);
goto error_sg;
}
+ memset(&edesc->desc, 0, sizeof(edesc->desc));
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
- if (dma_len)
- edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+ if (dma_len) {
+ void *addr = &edesc->link_tbl[0];
+
+ if (is_sec1 && !dst)
+ addr += sizeof(struct talitos_desc);
+ edesc->dma_link_tbl = dma_map_single(dev, addr,
edesc->dma_len,
DMA_BIDIRECTIONAL);
-
+ }
return edesc;
error_sg:
if (iv_dma)
@@ -1468,7 +1484,6 @@ static int aead_decrypt(struct aead_request *req)
DESC_HDR_MODE1_MDEU_CICV;
/* reset integrity check result bits */
- edesc->desc.hdr_lo = 0;
return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
}
@@ -1494,15 +1509,29 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct device *dev = ctx->dev;
+ u32 tmp[DES_EXPKEY_WORDS];
if (keylen > TALITOS_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
+ if (unlikely(crypto_ablkcipher_get_flags(cipher) &
+ CRYPTO_TFM_REQ_WEAK_KEY) &&
+ !des_ekey(tmp, key)) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
return 0;
}
@@ -1513,7 +1542,6 @@ static void common_nonsnoop_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
if (edesc->dma_len)
@@ -1555,16 +1583,12 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
bool is_sec1 = has_ftr_sec1(priv);
/* first DWORD empty */
- desc->ptr[0] = zero_entry;
/* cipher iv */
- to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
+ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
/* cipher key */
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
- (char *)&ctx->key, DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
@@ -1599,7 +1623,6 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
DMA_FROM_DEVICE);
/* last DWORD empty */
- desc->ptr[6] = zero_entry;
if (sync_needed)
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
@@ -1663,26 +1686,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
- /* When using hashctx-in, must unmap it. */
- if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
- DMA_TO_DEVICE);
-
- if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
- DMA_TO_DEVICE);
-
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
+ if (edesc->desc.next_desc)
+ dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
+ TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
}
static void ahash_done(struct device *dev,
@@ -1696,7 +1709,7 @@ static void ahash_done(struct device *dev,
if (!req_ctx->last && req_ctx->to_hash_later) {
/* Position any partial block for next update/final/finup */
- memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+ req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
req_ctx->nbuf = req_ctx->to_hash_later;
}
common_nonsnoop_hash_unmap(dev, edesc, areq);
@@ -1710,7 +1723,7 @@ static void ahash_done(struct device *dev,
* SEC1 doesn't like hashing of 0 sized message, so we do the padding
* ourself and submit a padded block
*/
-void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
+static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
struct talitos_edesc *edesc,
struct talitos_ptr *ptr)
{
@@ -1729,6 +1742,7 @@ void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
struct ahash_request *areq, unsigned int length,
+ unsigned int offset,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1745,44 +1759,48 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
int sg_count;
/* first DWORD empty */
- desc->ptr[0] = zero_entry;
/* hash context in */
if (!req_ctx->first || req_ctx->swinit) {
- map_single_talitos_ptr(dev, &desc->ptr[1],
- req_ctx->hw_context_size,
- (char *)req_ctx->hw_context,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
req_ctx->swinit = 0;
- } else {
- desc->ptr[1] = zero_entry;
}
/* Indicate next op is not the first. */
req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
- (char *)&ctx->key, DMA_TO_DEVICE);
- else
- desc->ptr[2] = zero_entry;
+ to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
+ is_sec1);
+
+ if (is_sec1 && req_ctx->nbuf)
+ length -= req_ctx->nbuf;
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
- else
+ sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
+ edesc->buf + sizeof(struct talitos_desc),
+ length, req_ctx->nbuf);
+ else if (length)
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
/*
* data in
*/
- sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
- &desc->ptr[3], sg_count, 0, 0);
- if (sg_count > 1)
- sync_needed = true;
+ if (is_sec1 && req_ctx->nbuf) {
+ dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+ HASH_MAX_BLOCK_SIZE;
+
+ to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
+ } else {
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc->ptr[3], sg_count, offset, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ }
/* fifth DWORD empty */
- desc->ptr[4] = zero_entry;
/* hash/HMAC out -or- hash context out */
if (req_ctx->last)
@@ -1790,16 +1808,44 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
crypto_ahash_digestsize(tfm),
areq->result, DMA_FROM_DEVICE);
else
- map_single_talitos_ptr(dev, &desc->ptr[5],
- req_ctx->hw_context_size,
- req_ctx->hw_context, DMA_FROM_DEVICE);
+ to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
/* last DWORD empty */
- desc->ptr[6] = zero_entry;
if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
+ if (is_sec1 && req_ctx->nbuf && length) {
+ struct talitos_desc *desc2 = desc + 1;
+ dma_addr_t next_desc;
+
+ memset(desc2, 0, sizeof(*desc2));
+ desc2->hdr = desc->hdr;
+ desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
+ desc2->hdr1 = desc2->hdr;
+ desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
+ desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
+ desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
+
+ to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
+
+ copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc2->ptr[3], sg_count, offset, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
+ if (req_ctx->last)
+ to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
+
+ next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
+ DMA_BIDIRECTIONAL);
+ desc->next_desc = cpu_to_be32(next_desc);
+ }
+
if (sync_needed)
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1818,6 +1864,11 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(ctx->dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ if (is_sec1)
+ nbytes -= req_ctx->nbuf;
return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
nbytes, 0, 0, 0, areq->base.flags, false);
@@ -1826,17 +1877,35 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
static int ahash_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ unsigned int size;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
/* Initialize the context */
+ req_ctx->buf_idx = 0;
req_ctx->nbuf = 0;
req_ctx->first = 1; /* first indicates h/w must init its context */
req_ctx->swinit = 0; /* assume h/w init of context */
- req_ctx->hw_context_size =
- (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ req_ctx->hw_context_size = size;
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+ ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
+ if (is_sec1)
+ ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+ sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
return 0;
}
@@ -1847,6 +1916,9 @@ static int ahash_init(struct ahash_request *areq)
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
ahash_init(areq);
req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
@@ -1864,6 +1936,9 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
req_ctx->hw_context[8] = 0;
req_ctx->hw_context[9] = 0;
+ dma_sync_single_for_device(dev, ctx->dma_hw_context,
+ req_ctx->hw_context_size, DMA_TO_DEVICE);
+
return 0;
}
@@ -1879,6 +1954,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
unsigned int to_hash_later;
unsigned int nsg;
int nents;
+ struct device *dev = ctx->dev;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+ int offset = 0;
+ u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
@@ -1888,7 +1968,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
return nents;
}
sg_copy_to_buffer(areq->src, nents,
- req_ctx->buf + req_ctx->nbuf, nbytes);
+ ctx_buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
}
@@ -1909,13 +1989,27 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
}
/* Chain in any previously buffered data */
- if (req_ctx->nbuf) {
+ if (!is_sec1 && req_ctx->nbuf) {
nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
sg_init_table(req_ctx->bufsl, nsg);
- sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
+ sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
if (nsg > 1)
sg_chain(req_ctx->bufsl, 2, areq->src);
req_ctx->psrc = req_ctx->bufsl;
+ } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+ if (nbytes_to_hash > blocksize)
+ offset = blocksize - req_ctx->nbuf;
+ else
+ offset = nbytes_to_hash - req_ctx->nbuf;
+ nents = sg_nents_for_len(areq->src, offset);
+ if (nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return nents;
+ }
+ sg_copy_to_buffer(areq->src, nents,
+ ctx_buf + req_ctx->nbuf, offset);
+ req_ctx->nbuf += offset;
+ req_ctx->psrc = areq->src;
} else
req_ctx->psrc = areq->src;
@@ -1926,7 +2020,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
return nents;
}
sg_pcopy_to_buffer(areq->src, nents,
- req_ctx->bufnext,
+ req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
to_hash_later,
nbytes - to_hash_later);
}
@@ -1948,6 +2042,13 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
/* request SEC to INIT hash. */
if (req_ctx->first && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+ if (is_sec1) {
+ dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+ HASH_MAX_BLOCK_SIZE;
+
+ dma_sync_single_for_device(dev, dma_buf,
+ req_ctx->nbuf, DMA_TO_DEVICE);
+ }
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
@@ -1955,7 +2056,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
if (ctx->keylen && (req_ctx->first || req_ctx->last))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
ahash_done);
}
@@ -2001,10 +2102,15 @@ static int ahash_export(struct ahash_request *areq, void *out)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct talitos_export_state *export = out;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = ctx->dev;
+ dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
+ req_ctx->hw_context_size, DMA_FROM_DEVICE);
memcpy(export->hw_context, req_ctx->hw_context,
req_ctx->hw_context_size);
- memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
+ memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
export->swinit = req_ctx->swinit;
export->first = req_ctx->first;
export->last = req_ctx->last;
@@ -2019,15 +2125,32 @@ static int ahash_import(struct ahash_request *areq, const void *in)
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
const struct talitos_export_state *export = in;
+ unsigned int size;
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
memset(req_ctx, 0, sizeof(*req_ctx));
- req_ctx->hw_context_size =
- (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
- memcpy(req_ctx->hw_context, export->hw_context,
- req_ctx->hw_context_size);
- memcpy(req_ctx->buf, export->buf, export->nbuf);
+ req_ctx->hw_context_size = size;
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(req_ctx->hw_context, export->hw_context, size);
+ ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
+ memcpy(req_ctx->buf[0], export->buf, export->nbuf);
+ if (is_sec1)
+ ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+ sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
req_ctx->swinit = export->swinit;
req_ctx->first = export->first;
req_ctx->last = export->last;
@@ -2037,22 +2160,6 @@ static int ahash_import(struct ahash_request *areq, const void *in)
return 0;
}
-struct keyhash_result {
- struct completion completion;
- int err;
-};
-
-static void keyhash_complete(struct crypto_async_request *req, int err)
-{
- struct keyhash_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
u8 *hash)
{
@@ -2060,10 +2167,10 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
struct scatterlist sg[1];
struct ahash_request *req;
- struct keyhash_result hresult;
+ struct crypto_wait wait;
int ret;
- init_completion(&hresult.completion);
+ crypto_init_wait(&wait);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
@@ -2072,25 +2179,13 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
/* Keep tfm keylen == 0 during hash of the long key */
ctx->keylen = 0;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- keyhash_complete, &hresult);
+ crypto_req_done, &wait);
sg_init_one(&sg[0], key, keylen);
ahash_request_set_crypt(req, sg, hash, keylen);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &hresult.completion);
- if (!ret)
- ret = hresult.err;
- break;
- default:
- break;
- }
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
ahash_request_free(req);
return ret;
@@ -2100,6 +2195,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct device *dev = ctx->dev;
unsigned int blocksize =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int digestsize = crypto_ahash_digestsize(tfm);
@@ -2122,7 +2218,11 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
memcpy(ctx->key, hash, digestsize);
}
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
ctx->keylen = keysize;
+ ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
return 0;
}
@@ -2614,7 +2714,7 @@ static struct talitos_alg_template driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
}
},
- .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CTR,
},
@@ -2951,6 +3051,36 @@ static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
return 0;
}
+static void talitos_cra_exit(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+}
+
+static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+ unsigned int size;
+
+ talitos_cra_exit(tfm);
+
+ size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
+ SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
+ DMA_TO_DEVICE);
+}
+
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -2989,17 +3119,11 @@ static int talitos_remove(struct platform_device *ofdev)
break;
}
list_del(&t_alg->entry);
- kfree(t_alg);
}
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- for (i = 0; priv->chan && i < priv->num_channels; i++)
- kfree(priv->chan[i].fifo);
-
- kfree(priv->chan);
-
for (i = 0; i < 2; i++)
if (priv->irq[i]) {
free_irq(priv->irq[i], dev);
@@ -3010,10 +3134,6 @@ static int talitos_remove(struct platform_device *ofdev)
if (priv->irq[1])
tasklet_kill(&priv->done_task[1]);
- iounmap(priv->reg);
-
- kfree(priv);
-
return 0;
}
@@ -3025,7 +3145,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
struct talitos_crypto_alg *t_alg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
+ t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
+ GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
@@ -3035,6 +3156,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg = &t_alg->algt.alg.crypto;
alg->cra_init = talitos_cra_init;
+ alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher.setkey = ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
@@ -3043,14 +3165,21 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
+ alg->cra_exit = talitos_cra_exit;
t_alg->algt.alg.aead.init = talitos_cra_init_aead;
t_alg->algt.alg.aead.setkey = aead_setkey;
t_alg->algt.alg.aead.encrypt = aead_encrypt;
t_alg->algt.alg.aead.decrypt = aead_decrypt;
+ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+ !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
+ devm_kfree(dev, t_alg);
+ return ERR_PTR(-ENOTSUPP);
+ }
break;
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
+ alg->cra_exit = talitos_cra_exit_ahash;
alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
@@ -3064,7 +3193,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) {
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
return ERR_PTR(-ENOTSUPP);
}
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
@@ -3079,7 +3208,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
default:
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
return ERR_PTR(-EINVAL);
}
@@ -3156,11 +3285,11 @@ static int talitos_probe(struct platform_device *ofdev)
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct talitos_private *priv;
- const unsigned int *prop;
int i, err;
int stride;
+ struct resource *res;
- priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -3172,7 +3301,10 @@ static int talitos_probe(struct platform_device *ofdev)
spin_lock_init(&priv->reg_lock);
- priv->reg = of_iomap(np, 0);
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+ priv->reg = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
err = -ENOMEM;
@@ -3180,21 +3312,11 @@ static int talitos_probe(struct platform_device *ofdev)
}
/* get SEC version capabilities from device tree */
- prop = of_get_property(np, "fsl,num-channels", NULL);
- if (prop)
- priv->num_channels = *prop;
-
- prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
- if (prop)
- priv->chfifo_len = *prop;
-
- prop = of_get_property(np, "fsl,exec-units-mask", NULL);
- if (prop)
- priv->exec_units = *prop;
-
- prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
- if (prop)
- priv->desc_types = *prop;
+ of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
+ of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
+ of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
+ of_property_read_u32(np, "fsl,descriptor-types-mask",
+ &priv->desc_types);
if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
!priv->exec_units || !priv->desc_types) {
@@ -3244,22 +3366,29 @@ static int talitos_probe(struct platform_device *ofdev)
goto err_out;
if (of_device_is_compatible(np, "fsl,sec1.0")) {
- tasklet_init(&priv->done_task[0], talitos1_done_4ch,
- (unsigned long)dev);
- } else {
- if (!priv->irq[1]) {
- tasklet_init(&priv->done_task[0], talitos2_done_4ch,
+ if (priv->num_channels == 1)
+ tasklet_init(&priv->done_task[0], talitos1_done_ch0,
(unsigned long)dev);
- } else {
+ else
+ tasklet_init(&priv->done_task[0], talitos1_done_4ch,
+ (unsigned long)dev);
+ } else {
+ if (priv->irq[1]) {
tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
(unsigned long)dev);
tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
(unsigned long)dev);
+ } else if (priv->num_channels == 1) {
+ tasklet_init(&priv->done_task[0], talitos2_done_ch0,
+ (unsigned long)dev);
+ } else {
+ tasklet_init(&priv->done_task[0], talitos2_done_4ch,
+ (unsigned long)dev);
}
}
- priv->chan = kzalloc(sizeof(struct talitos_channel) *
- priv->num_channels, GFP_KERNEL);
+ priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
+ priv->num_channels, GFP_KERNEL);
if (!priv->chan) {
dev_err(dev, "failed to allocate channel management space\n");
err = -ENOMEM;
@@ -3276,8 +3405,9 @@ static int talitos_probe(struct platform_device *ofdev)
spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->chan[i].tail_lock);
- priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
- priv->fifo_len, GFP_KERNEL);
+ priv->chan[i].fifo = devm_kzalloc(dev,
+ sizeof(struct talitos_request) *
+ priv->fifo_len, GFP_KERNEL);
if (!priv->chan[i].fifo) {
dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM;
@@ -3343,7 +3473,7 @@ static int talitos_probe(struct platform_device *ofdev)
if (err) {
dev_err(dev, "%s alg registration failed\n",
alg->cra_driver_name);
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
}
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 8dd8f40e2771..a65a63e0d6c1 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -52,8 +52,6 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
-static const struct talitos_ptr zero_entry;
-
/* descriptor */
struct talitos_desc {
__be32 hdr; /* header high bits */
@@ -210,9 +208,13 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
#define TALITOS_ISR 0x1010 /* interrupt status register */
#define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */
#define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */
+#define TALITOS1_ISR_CH_0_ERR (2 << 28) /* ch 0 errors mask */
+#define TALITOS1_ISR_CH_0_DONE (1 << 28) /* ch 0 done mask */
#define TALITOS1_ISR_TEA_ERR 0x00000040
#define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */
#define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */
+#define TALITOS2_ISR_CH_0_ERR 2 /* ch 0 errors mask */
+#define TALITOS2_ISR_CH_0_DONE 1 /* ch 0 done mask */
#define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */
#define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */
#define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */
@@ -234,6 +236,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
+#define TALITOS_CCCR_LO_NE 0x8 /* fetch next descriptor enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
#define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 790f7cadc1ed..765f53e548ab 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1751,7 +1751,6 @@ static void __exit ux500_cryp_mod_fini(void)
{
pr_debug("[%s] is called!", __func__);
platform_driver_unregister(&cryp_driver);
- return;
}
module_init(ux500_cryp_mod_init);
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 5035b0dc1e40..abe8c15450df 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -319,7 +319,7 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
struct virtio_crypto *vcrypto =
virtcrypto_get_dev_node(node);
if (!vcrypto) {
- pr_err("virtio_crypto: Could not find a virtio device in the system");
+ pr_err("virtio_crypto: Could not find a virtio device in the system\n");
return -ENODEV;
}
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 17d84217dd76..fc60d00a2e84 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -27,21 +27,23 @@
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_blkcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_blkcipher *fallback;
+ struct crypto_skcipher *fallback;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback =
- crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_skcipher(alg, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -49,11 +51,11 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ crypto_skcipher_driver_name(fallback));
- crypto_blkcipher_set_flags(
+ crypto_skcipher_set_flags(
fallback,
- crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+ crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
return 0;
@@ -64,7 +66,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_blkcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -83,7 +85,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -117,15 +119,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- struct blkcipher_desc fallback_desc = {
- .tfm = ctx->fallback,
- .info = desc->info,
- .flags = desc->flags
- };
if (in_interrupt()) {
- ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
- nbytes);
+ SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_tfm(req, ctx->fallback);
+ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
} else {
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index a1c4ee818614..78fb496ecb4e 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -28,6 +28,9 @@
#include <linux/of.h>
#include "governor.h"
+#define MAX(a,b) ((a > b) ? a : b)
+#define MIN(a,b) ((a < b) ? a : b)
+
static struct class *devfreq_class;
/*
@@ -69,6 +72,34 @@ static struct devfreq *find_device_devfreq(struct device *dev)
return ERR_PTR(-ENODEV);
}
+static unsigned long find_available_min_freq(struct devfreq *devfreq)
+{
+ struct dev_pm_opp *opp;
+ unsigned long min_freq = 0;
+
+ opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
+ if (IS_ERR(opp))
+ min_freq = 0;
+ else
+ dev_pm_opp_put(opp);
+
+ return min_freq;
+}
+
+static unsigned long find_available_max_freq(struct devfreq *devfreq)
+{
+ struct dev_pm_opp *opp;
+ unsigned long max_freq = ULONG_MAX;
+
+ opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
+ if (IS_ERR(opp))
+ max_freq = 0;
+ else
+ dev_pm_opp_put(opp);
+
+ return max_freq;
+}
+
/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
@@ -85,11 +116,7 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
return -EINVAL;
}
-/**
- * devfreq_set_freq_table() - Initialize freq_table for the frequency
- * @devfreq: the devfreq instance
- */
-static void devfreq_set_freq_table(struct devfreq *devfreq)
+static int set_freq_table(struct devfreq *devfreq)
{
struct devfreq_dev_profile *profile = devfreq->profile;
struct dev_pm_opp *opp;
@@ -99,7 +126,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
/* Initialize the freq_table from OPP table */
count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
if (count <= 0)
- return;
+ return -EINVAL;
profile->max_state = count;
profile->freq_table = devm_kcalloc(devfreq->dev.parent,
@@ -108,7 +135,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
GFP_KERNEL);
if (!profile->freq_table) {
profile->max_state = 0;
- return;
+ return -ENOMEM;
}
for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
@@ -116,11 +143,13 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
if (IS_ERR(opp)) {
devm_kfree(devfreq->dev.parent, profile->freq_table);
profile->max_state = 0;
- return;
+ return PTR_ERR(opp);
}
dev_pm_opp_put(opp);
profile->freq_table[i] = freq;
}
+
+ return 0;
}
/**
@@ -227,7 +256,7 @@ static int devfreq_notify_transition(struct devfreq *devfreq,
int update_devfreq(struct devfreq *devfreq)
{
struct devfreq_freqs freqs;
- unsigned long freq, cur_freq;
+ unsigned long freq, cur_freq, min_freq, max_freq;
int err = 0;
u32 flags = 0;
@@ -245,19 +274,21 @@ int update_devfreq(struct devfreq *devfreq)
return err;
/*
- * Adjust the frequency with user freq and QoS.
+ * Adjust the frequency with user freq, QoS and available freq.
*
* List from the highest priority
* max_freq
* min_freq
*/
+ max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
+ min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
- if (devfreq->min_freq && freq < devfreq->min_freq) {
- freq = devfreq->min_freq;
+ if (min_freq && freq < min_freq) {
+ freq = min_freq;
flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
}
- if (devfreq->max_freq && freq > devfreq->max_freq) {
- freq = devfreq->max_freq;
+ if (max_freq && freq > max_freq) {
+ freq = max_freq;
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
@@ -280,10 +311,9 @@ int update_devfreq(struct devfreq *devfreq)
freqs.new = freq;
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
- if (devfreq->profile->freq_table)
- if (devfreq_update_status(devfreq, freq))
- dev_err(&devfreq->dev,
- "Couldn't update frequency transition information.\n");
+ if (devfreq_update_status(devfreq, freq))
+ dev_err(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
devfreq->previous_freq = freq;
return err;
@@ -466,6 +496,19 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
int ret;
mutex_lock(&devfreq->lock);
+
+ devfreq->scaling_min_freq = find_available_min_freq(devfreq);
+ if (!devfreq->scaling_min_freq) {
+ mutex_unlock(&devfreq->lock);
+ return -EINVAL;
+ }
+
+ devfreq->scaling_max_freq = find_available_max_freq(devfreq);
+ if (!devfreq->scaling_max_freq) {
+ mutex_unlock(&devfreq->lock);
+ return -EINVAL;
+ }
+
ret = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
@@ -555,10 +598,28 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
mutex_unlock(&devfreq->lock);
- devfreq_set_freq_table(devfreq);
+ err = set_freq_table(devfreq);
+ if (err < 0)
+ goto err_out;
mutex_lock(&devfreq->lock);
}
+ devfreq->min_freq = find_available_min_freq(devfreq);
+ if (!devfreq->min_freq) {
+ mutex_unlock(&devfreq->lock);
+ err = -EINVAL;
+ goto err_dev;
+ }
+ devfreq->scaling_min_freq = devfreq->min_freq;
+
+ devfreq->max_freq = find_available_max_freq(devfreq);
+ if (!devfreq->max_freq) {
+ mutex_unlock(&devfreq->lock);
+ err = -EINVAL;
+ goto err_dev;
+ }
+ devfreq->scaling_max_freq = devfreq->max_freq;
+
dev_set_name(&devfreq->dev, "devfreq%d",
atomic_inc_return(&devfreq_no));
err = device_register(&devfreq->dev);
@@ -1082,6 +1143,14 @@ unlock:
return ret;
}
+static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
+
+ return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq));
+}
+
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1108,17 +1177,15 @@ unlock:
mutex_unlock(&df->lock);
return ret;
}
+static DEVICE_ATTR_RW(min_freq);
-#define show_one(name) \
-static ssize_t name##_show \
-(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%lu\n", to_devfreq(dev)->name); \
-}
-show_one(min_freq);
-show_one(max_freq);
+static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
-static DEVICE_ATTR_RW(min_freq);
+ return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq));
+}
static DEVICE_ATTR_RW(max_freq);
static ssize_t available_frequencies_show(struct device *d,
@@ -1126,22 +1193,16 @@ static ssize_t available_frequencies_show(struct device *d,
char *buf)
{
struct devfreq *df = to_devfreq(d);
- struct device *dev = df->dev.parent;
- struct dev_pm_opp *opp;
ssize_t count = 0;
- unsigned long freq = 0;
+ int i;
- do {
- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- if (IS_ERR(opp))
- break;
+ mutex_lock(&df->lock);
- dev_pm_opp_put(opp);
+ for (i = 0; i < df->profile->max_state; i++)
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
- "%lu ", freq);
- freq++;
- } while (1);
+ "%lu ", df->profile->freq_table[i]);
+ mutex_unlock(&df->lock);
/* Truncate the trailing space */
if (count)
count--;
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 49f68929e024..c25658b26598 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -436,7 +436,8 @@ static int exynos_bus_probe(struct platform_device *pdev)
ondemand_data->downdifferential = 5;
/* Add devfreq device to monitor and handle the exynos bus */
- bus->devfreq = devm_devfreq_add_device(dev, profile, "simple_ondemand",
+ bus->devfreq = devm_devfreq_add_device(dev, profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
ondemand_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev, "failed to add devfreq device\n");
@@ -488,7 +489,7 @@ passive:
passive_data->parent = parent_devfreq;
/* Add devfreq device for exynos bus with passive governor */
- bus->devfreq = devm_devfreq_add_device(dev, profile, "passive",
+ bus->devfreq = devm_devfreq_add_device(dev, profile, DEVFREQ_GOV_PASSIVE,
passive_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 673ad8cc9a1d..3bc29acbd54e 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -183,7 +183,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_passive = {
- .name = "passive",
+ .name = DEVFREQ_GOV_PASSIVE,
.immutable = 1,
.get_target_freq = devfreq_passive_get_target_freq,
.event_handler = devfreq_passive_event_handler,
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index c72f942f30a8..4d23ecfbd948 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -42,7 +42,7 @@ static int devfreq_performance_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_performance = {
- .name = "performance",
+ .name = DEVFREQ_GOV_PERFORMANCE,
.get_target_freq = devfreq_performance_func,
.event_handler = devfreq_performance_handler,
};
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 0c6bed567e6d..0c42f23249ef 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -39,7 +39,7 @@ static int devfreq_powersave_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_powersave = {
- .name = "powersave",
+ .name = DEVFREQ_GOV_POWERSAVE,
.get_target_freq = devfreq_powersave_func,
.event_handler = devfreq_powersave_handler,
};
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index ae72ba5e78df..28e0f2de7100 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -125,7 +125,7 @@ static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_simple_ondemand = {
- .name = "simple_ondemand",
+ .name = DEVFREQ_GOV_SIMPLE_ONDEMAND,
.get_target_freq = devfreq_simple_ondemand_func,
.event_handler = devfreq_simple_ondemand_handler,
};
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 77028c27593c..080607c3f34d 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -87,7 +87,7 @@ static struct attribute *dev_entries[] = {
NULL,
};
static const struct attribute_group dev_attr_group = {
- .name = "userspace",
+ .name = DEVFREQ_GOV_USERSPACE,
.attrs = dev_entries,
};
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 1b89ebbad02c..5dfbfa3cc878 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -431,7 +431,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->devfreq = devm_devfreq_add_device(dev,
&rk3399_devfreq_dmc_profile,
- "simple_ondemand",
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
&data->ondemand_data);
if (IS_ERR(data->devfreq))
return PTR_ERR(data->devfreq);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a038dcf5361..bc1cb284111c 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
- struct sg_table *sg_table = ERR_PTR(-EINVAL);
+ struct sg_table *sg_table;
might_sleep();
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index dec3a815455d..b44d9d7db347 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* @dst: the destination reservation object
* @src: the source reservation object
*
-* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
-* held.
+* Copy all fences from src to dst. dst-lock must be held.
*/
int reservation_object_copy_fences(struct reservation_object *dst,
struct reservation_object *src)
@@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size;
unsigned i;
- src_list = reservation_object_get_list(src);
+ rcu_read_lock();
+ src_list = rcu_dereference(src->fence);
+retry:
if (src_list) {
- size = offsetof(typeof(*src_list),
- shared[src_list->shared_count]);
+ unsigned shared_count = src_list->shared_count;
+
+ size = offsetof(typeof(*src_list), shared[shared_count]);
+ rcu_read_unlock();
+
dst_list = kmalloc(size, GFP_KERNEL);
if (!dst_list)
return -ENOMEM;
- dst_list->shared_count = src_list->shared_count;
- dst_list->shared_max = src_list->shared_count;
- for (i = 0; i < src_list->shared_count; ++i)
- dst_list->shared[i] =
- dma_fence_get(src_list->shared[i]);
+ rcu_read_lock();
+ src_list = rcu_dereference(src->fence);
+ if (!src_list || src_list->shared_count > shared_count) {
+ kfree(dst_list);
+ goto retry;
+ }
+
+ dst_list->shared_count = 0;
+ dst_list->shared_max = shared_count;
+ for (i = 0; i < src_list->shared_count; ++i) {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference(src_list->shared[i]);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags))
+ continue;
+
+ if (!dma_fence_get_rcu(fence)) {
+ kfree(dst_list);
+ src_list = rcu_dereference(src->fence);
+ goto retry;
+ }
+
+ if (dma_fence_is_signaled(fence)) {
+ dma_fence_put(fence);
+ continue;
+ }
+
+ dst_list->shared[dst_list->shared_count++] = fence;
+ }
} else {
dst_list = NULL;
}
+ new = dma_fence_get_rcu_safe(&src->fence_excl);
+ rcu_read_unlock();
+
kfree(dst->staged);
dst->staged = NULL;
src_list = reservation_object_get_list(dst);
-
old = reservation_object_get_excl(dst);
- new = reservation_object_get_excl(src);
-
- dma_fence_get(new);
preempt_disable();
write_seqcount_begin(&dst->seq);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 38cc7389a6c1..24f83f9eeaed 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
{
struct sync_timeline *obj = file->private_data;
+ struct sync_pt *pt, *next;
+
+ spin_lock_irq(&obj->lock);
+
+ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+ dma_fence_set_error(&pt->base, -ENOENT);
+ dma_fence_signal_locked(&pt->base);
+ }
- smp_wmb();
+ spin_unlock_irq(&obj->lock);
sync_timeline_put(obj);
return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fadc4d8783bd..27df3e2837fd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -115,7 +115,7 @@ config BCM_SBA_RAID
select DMA_ENGINE_RAID
select ASYNC_TX_DISABLE_XOR_VAL_DMA
select ASYNC_TX_DISABLE_PQ_VAL_DMA
- default ARCH_BCM_IPROC
+ default m if ARCH_BCM_IPROC
help
Enable support for Broadcom SBA RAID Engine. The SBA RAID
engine is available on most of the Broadcom iProc SoCs. It
@@ -483,6 +483,35 @@ config STM32_DMA
If you have a board based on such a MCU and wish to use DMA say Y
here.
+config STM32_DMAMUX
+ bool "STMicroelectronics STM32 dma multiplexer support"
+ depends on STM32_DMA || COMPILE_TEST
+ help
+ Enable support for the on-chip DMA multiplexer on STMicroelectronics
+ STM32 MCUs.
+ If you have a board based on such a MCU and wish to use DMAMUX say Y
+ here.
+
+config STM32_MDMA
+ bool "STMicroelectronics STM32 master dma support"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip MDMA controller on STMicroelectronics
+ STM32 platforms.
+ If you have a board based on STM32 SoC and wish to use the master DMA
+ say Y here.
+
+config SPRD_DMA
+ tristate "Spreadtrum DMA support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip DMA controller on Spreadtrum platform.
+
config S3C24XX_DMAC
bool "Samsung S3C24XX DMA support"
depends on ARCH_S3C24XX || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 9d0156b50294..b9dca8a0e142 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -60,6 +60,9 @@ obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
+obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
+obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
+obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 7f58f06157f6..ef3f227ce3e6 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,7 +385,7 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
{
dev_crit(chan2dev(&atchan->chan_common),
- " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
+ "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n",
&lli->saddr, &lli->daddr,
lli->ctrla, lli->ctrlb, &lli->dscr);
}
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 6c2c44724637..3956a018bf5a 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1,9 +1,14 @@
/*
* Copyright (C) 2017 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
/*
@@ -25,11 +30,8 @@
*
* The Broadcom SBA RAID driver does not require any register programming
* except submitting request to SBA hardware device via mailbox channels.
- * This driver implements a DMA device with one DMA channel using a set
- * of mailbox channels provided by Broadcom SoC specific ring manager
- * driver. To exploit parallelism (as described above), all DMA request
- * coming to SBA RAID DMA channel are broken down to smaller requests
- * and submitted to multiple mailbox channels in round-robin fashion.
+ * This driver implements a DMA device with one DMA channel using a single
+ * mailbox channel provided by Broadcom SoC specific ring manager driver.
* For having more SBA DMA channels, we can create more SBA device nodes
* in Broadcom SoC specific DTS based on number of hardware rings supported
* by Broadcom SoC ring manager.
@@ -85,6 +87,7 @@
#define SBA_CMD_GALOIS 0xe
#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
+#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
/* Driver helper macros */
#define to_sba_request(tx) \
@@ -142,9 +145,7 @@ struct sba_device {
u32 max_cmds_pool_size;
/* Maibox client and Mailbox channels */
struct mbox_client client;
- int mchans_count;
- atomic_t mchans_current;
- struct mbox_chan **mchans;
+ struct mbox_chan *mchan;
struct device *mbox_dev;
/* DMA device and DMA channel */
struct dma_device dma_dev;
@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
/* ====== General helper routines ===== */
-static void sba_peek_mchans(struct sba_device *sba)
-{
- int mchan_idx;
-
- for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
- mbox_client_peek_data(sba->mchans[mchan_idx]);
-}
-
static struct sba_request *sba_alloc_request(struct sba_device *sba)
{
bool found = false;
@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
* would have completed which will create more
* room for new requests.
*/
- sba_peek_mchans(sba);
+ mbox_client_peek_data(sba->mchan);
return NULL;
}
@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
static int sba_send_mbox_request(struct sba_device *sba,
struct sba_request *req)
{
- int mchans_idx, ret = 0;
-
- /* Select mailbox channel in round-robin fashion */
- mchans_idx = atomic_inc_return(&sba->mchans_current);
- mchans_idx = mchans_idx % sba->mchans_count;
+ int ret = 0;
/* Send message for the request */
req->msg.error = 0;
- ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
+ ret = mbox_send_message(sba->mchan, &req->msg);
if (ret < 0) {
dev_err(sba->dev, "send message failed with error %d", ret);
return ret;
@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba,
}
/* Signal txdone for mailbox channel */
- mbox_client_txdone(sba->mchans[mchans_idx], ret);
+ mbox_client_txdone(sba->mchan, ret);
return ret;
}
@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba)
u32 count;
struct sba_request *req;
- /*
- * Process few pending requests
- *
- * For now, we process (<number_of_mailbox_channels> * 8)
- * number of requests at a time.
- */
- count = sba->mchans_count * 8;
+ /* Process few pending requests */
+ count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
while (!list_empty(&sba->reqs_pending_list) && count) {
/* Get the first pending request */
req = list_first_entry(&sba->reqs_pending_list,
@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba,
WARN_ON(tx->cookie < 0);
if (tx->cookie > 0) {
+ spin_lock_irqsave(&sba->reqs_lock, flags);
dma_cookie_complete(tx);
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
tx->callback = NULL;
@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE)
return ret;
- sba_peek_mchans(sba);
+ mbox_client_peek_data(sba->mchan);
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba)
static int sba_probe(struct platform_device *pdev)
{
- int i, ret = 0, mchans_count;
+ int ret = 0;
struct sba_device *sba;
struct platform_device *mbox_pdev;
struct of_phandle_args args;
@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev)
sba->dev = &pdev->dev;
platform_set_drvdata(pdev, sba);
- /* Number of channels equals number of mailbox channels */
+ /* Number of mailbox channels should be atleast 1 */
ret = of_count_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells");
if (ret <= 0)
return -ENODEV;
- mchans_count = ret;
/* Determine SBA version from DT compatible string */
if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
- sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count;
+ sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
sba->max_cmd_per_req = sba->max_pq_srcs + 3;
sba->max_xor_srcs = sba->max_cmd_per_req - 1;
sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev)
sba->client.knows_txdone = true;
sba->client.tx_tout = 0;
- /* Allocate mailbox channel array */
- sba->mchans = devm_kcalloc(&pdev->dev, mchans_count,
- sizeof(*sba->mchans), GFP_KERNEL);
- if (!sba->mchans)
- return -ENOMEM;
-
- /* Request mailbox channels */
- sba->mchans_count = 0;
- for (i = 0; i < mchans_count; i++) {
- sba->mchans[i] = mbox_request_channel(&sba->client, i);
- if (IS_ERR(sba->mchans[i])) {
- ret = PTR_ERR(sba->mchans[i]);
- goto fail_free_mchans;
- }
- sba->mchans_count++;
+ /* Request mailbox channel */
+ sba->mchan = mbox_request_channel(&sba->client, 0);
+ if (IS_ERR(sba->mchan)) {
+ ret = PTR_ERR(sba->mchan);
+ goto fail_free_mchan;
}
- atomic_set(&sba->mchans_current, 0);
/* Find-out underlying mailbox device */
ret = of_parse_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells", 0, &args);
if (ret)
- goto fail_free_mchans;
+ goto fail_free_mchan;
mbox_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!mbox_pdev) {
ret = -ENODEV;
- goto fail_free_mchans;
+ goto fail_free_mchan;
}
sba->mbox_dev = &mbox_pdev->dev;
- /* All mailbox channels should be of same ring manager device */
- for (i = 1; i < mchans_count; i++) {
- ret = of_parse_phandle_with_args(pdev->dev.of_node,
- "mboxes", "#mbox-cells", i, &args);
- if (ret)
- goto fail_free_mchans;
- mbox_pdev = of_find_device_by_node(args.np);
- of_node_put(args.np);
- if (sba->mbox_dev != &mbox_pdev->dev) {
- ret = -EINVAL;
- goto fail_free_mchans;
- }
- }
-
/* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba);
if (ret)
- goto fail_free_mchans;
+ goto fail_free_mchan;
/* Check availability of debugfs */
if (!debugfs_initialized())
@@ -1777,24 +1737,22 @@ skip_debugfs:
goto fail_free_resources;
/* Print device info */
- dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
+ dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
dma_chan_name(&sba->dma_chan), sba->ver+1,
- sba->mchans_count);
+ dev_name(sba->mbox_dev));
return 0;
fail_free_resources:
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
-fail_free_mchans:
- for (i = 0; i < sba->mchans_count; i++)
- mbox_free_channel(sba->mchans[i]);
+fail_free_mchan:
+ mbox_free_channel(sba->mchan);
return ret;
}
static int sba_remove(struct platform_device *pdev)
{
- int i;
struct sba_device *sba = platform_get_drvdata(pdev);
dma_async_device_unregister(&sba->dma_dev);
@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba);
- for (i = 0; i < sba->mchans_count; i++)
- mbox_free_channel(sba->mchans[i]);
+ mbox_free_channel(sba->mchan);
return 0;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 74794c9859f6..da74fd74636b 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1319,8 +1319,8 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
int i = 0;
while (l) {
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad"
- ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n",
+ dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad"
+ ", dst %pad, link %pad virt_link_addr 0x%p\n",
i, l, l->control, &l->src_addr, &l->dst_addr,
&l->link_addr, l->virt_link_addr);
i++;
@@ -2231,7 +2231,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n",
+ "[%s] channel %d src %pad dest %pad size %zu\n",
__func__, cohc->id, &src, &dest, size);
if (flags & DMA_PREP_INTERRUPT)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 7f0b9aa15867..2419fe524daa 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -72,6 +72,9 @@
#define AXI_DMAC_FLAG_CYCLIC BIT(0)
+/* The maximum ID allocated by the hardware is 31 */
+#define AXI_DMAC_SG_UNUSED 32U
+
struct axi_dmac_sg {
dma_addr_t src_addr;
dma_addr_t dest_addr;
@@ -80,6 +83,7 @@ struct axi_dmac_sg {
unsigned int dest_stride;
unsigned int src_stride;
unsigned int id;
+ bool schedule_when_free;
};
struct axi_dmac_desc {
@@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
}
sg = &desc->sg[desc->num_submitted];
+ /* Already queued in cyclic mode. Wait for it to finish */
+ if (sg->id != AXI_DMAC_SG_UNUSED) {
+ sg->schedule_when_free = true;
+ return;
+ }
+
desc->num_submitted++;
- if (desc->num_submitted == desc->num_sgs)
- chan->next_desc = NULL;
- else
+ if (desc->num_submitted == desc->num_sgs) {
+ if (desc->cyclic)
+ desc->num_submitted = 0; /* Start again */
+ else
+ chan->next_desc = NULL;
+ } else {
chan->next_desc = desc;
+ }
sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
@@ -220,9 +234,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
/*
* If the hardware supports cyclic transfers and there is no callback to
- * call, enable hw cyclic mode to avoid unnecessary interrupts.
+ * call and only a single segment, enable hw cyclic mode to avoid
+ * unnecessary interrupts.
*/
- if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback)
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
+ desc->num_sgs == 1)
flags |= AXI_DMAC_FLAG_CYCLIC;
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
@@ -237,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
struct axi_dmac_desc, vdesc.node);
}
-static void axi_dmac_transfer_done(struct axi_dmac_chan *chan,
+static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
unsigned int completed_transfers)
{
struct axi_dmac_desc *active;
struct axi_dmac_sg *sg;
+ bool start_next = false;
active = axi_dmac_active_desc(chan);
if (!active)
- return;
+ return false;
- if (active->cyclic) {
- vchan_cyclic_callback(&active->vdesc);
- } else {
- do {
- sg = &active->sg[active->num_completed];
- if (!(BIT(sg->id) & completed_transfers))
- break;
- active->num_completed++;
- if (active->num_completed == active->num_sgs) {
+ do {
+ sg = &active->sg[active->num_completed];
+ if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
+ break;
+ if (!(BIT(sg->id) & completed_transfers))
+ break;
+ active->num_completed++;
+ sg->id = AXI_DMAC_SG_UNUSED;
+ if (sg->schedule_when_free) {
+ sg->schedule_when_free = false;
+ start_next = true;
+ }
+
+ if (active->cyclic)
+ vchan_cyclic_callback(&active->vdesc);
+
+ if (active->num_completed == active->num_sgs) {
+ if (active->cyclic) {
+ active->num_completed = 0; /* wrap around */
+ } else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
active = axi_dmac_active_desc(chan);
}
- } while (active);
- }
+ }
+ } while (active);
+
+ return start_next;
}
static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
{
struct axi_dmac *dmac = devid;
unsigned int pending;
+ bool start_next = false;
pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
if (!pending)
@@ -281,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
unsigned int completed;
completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
- axi_dmac_transfer_done(&dmac->chan, completed);
+ start_next = axi_dmac_transfer_done(&dmac->chan, completed);
}
/* Space has become available in the descriptor queue */
- if (pending & AXI_DMAC_IRQ_SOT)
+ if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
axi_dmac_start_transfer(&dmac->chan);
spin_unlock(&dmac->chan.vchan.lock);
@@ -334,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
{
struct axi_dmac_desc *desc;
+ unsigned int i;
desc = kzalloc(sizeof(struct axi_dmac_desc) +
sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
if (!desc)
return NULL;
+ for (i = 0; i < num_sgs; i++)
+ desc->sg[i].id = AXI_DMAC_SG_UNUSED;
+
desc->num_sgs = num_sgs;
return desc;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 34ff53290b03..47edc7fbf91f 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -702,6 +702,7 @@ static int dmatest_func(void *data)
* free it this time?" dancing. For now, just
* leave it dangling.
*/
+ WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index a7ea20e7b8e9..9364a3ed345a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -891,6 +891,10 @@ static int edma_slave_config(struct dma_chan *chan,
cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (cfg->src_maxburst > chan->device->max_burst ||
+ cfg->dst_maxburst > chan->device->max_burst)
+ return -EINVAL;
+
memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
return 0;
@@ -1868,6 +1872,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
s_ddev->dev = ecc->dev;
INIT_LIST_HEAD(&s_ddev->channels);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 54db1411ce73..0391f930aecc 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -730,14 +731,23 @@ static int mdc_slave_config(struct dma_chan *chan,
return 0;
}
+static int mdc_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mdc_chan *mchan = to_mdc_chan(chan);
+ struct device *dev = mdma2dev(mchan->mdma);
+
+ return pm_runtime_get_sync(dev);
+}
+
static void mdc_free_chan_resources(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
struct mdc_dma *mdma = mchan->mdma;
+ struct device *dev = mdma2dev(mdma);
mdc_terminate_all(chan);
-
mdma->soc->disable_chan(mchan);
+ pm_runtime_put(dev);
}
static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
@@ -854,6 +864,22 @@ static const struct of_device_id mdc_dma_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
+static int img_mdc_runtime_suspend(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(mdma->clk);
+
+ return 0;
+}
+
+static int img_mdc_runtime_resume(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(mdma->clk);
+}
+
static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
@@ -883,10 +909,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
if (IS_ERR(mdma->clk))
return PTR_ERR(mdma->clk);
- ret = clk_prepare_enable(mdma->clk);
- if (ret)
- return ret;
-
dma_cap_zero(mdma->dma_dev.cap_mask);
dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
@@ -919,12 +941,13 @@ static int mdc_dma_probe(struct platform_device *pdev)
"img,max-burst-multiplier",
&mdma->max_burst_mult);
if (ret)
- goto disable_clk;
+ return ret;
mdma->dma_dev.dev = &pdev->dev;
mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
+ mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
@@ -945,15 +968,14 @@ static int mdc_dma_probe(struct platform_device *pdev)
mchan->mdma = mdma;
mchan->chan_nr = i;
mchan->irq = platform_get_irq(pdev, i);
- if (mchan->irq < 0) {
- ret = mchan->irq;
- goto disable_clk;
- }
+ if (mchan->irq < 0)
+ return mchan->irq;
+
ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
IRQ_TYPE_LEVEL_HIGH,
dev_name(&pdev->dev), mchan);
if (ret < 0)
- goto disable_clk;
+ return ret;
mchan->vc.desc_free = mdc_desc_free;
vchan_init(&mchan->vc, &mdma->dma_dev);
@@ -962,14 +984,19 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
sizeof(struct mdc_hw_list_desc),
4, 0);
- if (!mdma->desc_pool) {
- ret = -ENOMEM;
- goto disable_clk;
+ if (!mdma->desc_pool)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_mdc_runtime_resume(&pdev->dev);
+ if (ret)
+ return ret;
}
ret = dma_async_device_register(&mdma->dma_dev);
if (ret)
- goto disable_clk;
+ goto suspend;
ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
if (ret)
@@ -982,8 +1009,10 @@ static int mdc_dma_probe(struct platform_device *pdev)
unregister:
dma_async_device_unregister(&mdma->dma_dev);
-disable_clk:
- clk_disable_unprepare(mdma->clk);
+suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_mdc_runtime_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -1004,14 +1033,47 @@ static int mdc_dma_remove(struct platform_device *pdev)
tasklet_kill(&mchan->vc.task);
}
- clk_disable_unprepare(mdma->clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_mdc_runtime_suspend(&pdev->dev);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int img_mdc_suspend_late(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+ int i;
+
+ /* Check that all channels are idle */
+ for (i = 0; i < mdma->nr_channels; i++) {
+ struct mdc_chan *mchan = &mdma->channels[i];
+
+ if (unlikely(mchan->desc))
+ return -EBUSY;
+ }
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int img_mdc_resume_early(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_mdc_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend,
+ img_mdc_runtime_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late,
+ img_mdc_resume_early)
+};
+
static struct platform_driver mdc_dma_driver = {
.driver = {
.name = "img-mdc-dma",
+ .pm = &img_mdc_pm_ops,
.of_match_table = of_match_ptr(mdc_dma_of_match),
},
.probe = mdc_dma_probe,
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f681df8f0ed3..331f863c605e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -364,9 +364,9 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
local_irq_restore(flags);
}
-static void imxdma_watchdog(unsigned long data)
+static void imxdma_watchdog(struct timer_list *t)
{
- struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
+ struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
struct imxdma_engine *imxdma = imxdmac->imxdma;
int channel = imxdmac->channel;
@@ -1153,9 +1153,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
}
imxdmac->irq = irq + i;
- init_timer(&imxdmac->watchdog);
- imxdmac->watchdog.function = &imxdma_watchdog;
- imxdmac->watchdog.data = (unsigned long)imxdmac;
+ timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
}
imxdmac->imxdma = imxdma;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a67ec1bdc4e0..2184881afe76 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -178,6 +178,14 @@
#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
+#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
+ BIT(DMA_MEM_TO_DEV) | \
+ BIT(DMA_DEV_TO_DEV))
+
/*
* Mode/Count of data node descriptors - IPCv2
*/
@@ -1851,9 +1859,9 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
- sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index f70cc74032ea..58d4ccd33672 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -474,7 +474,7 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
if (time_is_before_jiffies(ioat_chan->timer.expires)
&& timer_pending(&ioat_chan->timer)) {
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
- ioat_timer_event((unsigned long)ioat_chan);
+ ioat_timer_event(&ioat_chan->timer);
}
return -ENOMEM;
@@ -862,9 +862,9 @@ static void check_active(struct ioatdma_chan *ioat_chan)
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
-void ioat_timer_event(unsigned long data)
+void ioat_timer_event(struct timer_list *t)
{
- struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+ struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
dma_addr_t phys_complete;
u64 status;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 56200eefcf5e..1ab42ec2b7ff 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -406,10 +406,9 @@ enum dma_status
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate);
void ioat_cleanup_event(unsigned long data);
-void ioat_timer_event(unsigned long data);
+void ioat_timer_event(struct timer_list *t);
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
void ioat_issue_pending(struct dma_chan *chan);
-void ioat_timer_event(unsigned long data);
/* IOAT Init functions */
bool is_bwd_ioat(struct pci_dev *pdev);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 93e006c3441d..2f31d3d0caa6 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -760,7 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
dma_cookie_init(&ioat_chan->dma_chan);
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
ioat_dma->idx[idx] = ioat_chan;
- setup_timer(&ioat_chan->timer, ioat_timer_event, data);
+ timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
}
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index d3f918a9ee76..50559338239b 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1286,7 +1286,6 @@ MODULE_DEVICE_TABLE(of, nbpf_match);
static int nbpf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id = of_match_device(nbpf_match, dev);
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
@@ -1300,10 +1299,10 @@ static int nbpf_probe(struct platform_device *pdev)
BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
/* DT only */
- if (!np || !of_id || !of_id->data)
+ if (!np)
return -ENODEV;
- cfg = of_id->data;
+ cfg = of_device_get_match_data(dev);
num_channels = cfg->num_channels;
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 8c1665c8fe33..f6dd849159d8 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1288,6 +1288,10 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config
cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (cfg->src_maxburst > chan->device->max_burst ||
+ cfg->dst_maxburst > chan->device->max_burst)
+ return -EINVAL;
+
memcpy(&c->cfg, cfg, sizeof(c->cfg));
return 0;
@@ -1482,6 +1486,7 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
spin_lock_init(&od->lock);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index f9028e9d0dfc..afd8f27bda96 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -123,7 +123,7 @@ struct pch_dma_chan {
struct pch_dma {
struct dma_device dma;
void __iomem *membase;
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct pch_dma_regs regs;
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
struct pch_dma_chan channels[MAX_CHAN_NR];
@@ -437,7 +437,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_zalloc(pd->pool, flags, &addr);
+ desc = dma_pool_zalloc(pd->pool, flags, &addr);
if (desc) {
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
@@ -549,7 +549,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
- pci_pool_free(pd->pool, desc, desc->txd.phys);
+ dma_pool_free(pd->pool, desc, desc->txd.phys);
pdc_enable_irq(chan, 0);
}
@@ -880,7 +880,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_iounmap;
}
- pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
+ pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
sizeof(struct pch_dma_desc), 4, 0);
if (!pd->pool) {
dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
@@ -931,7 +931,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
return 0;
err_free_pool:
- pci_pool_destroy(pd->pool);
+ dma_pool_destroy(pd->pool);
err_free_irq:
free_irq(pdev->irq, pd);
err_iounmap:
@@ -963,7 +963,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
tasklet_kill(&pd_chan->tasklet);
}
- pci_pool_destroy(pd->pool);
+ dma_pool_destroy(pd->pool);
pci_iounmap(pdev, pd->membase);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f122c2a7b9f0..d7327fd5f445 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2390,7 +2390,8 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
}
/* Returns the number of descriptors added to the DMAC pool */
-static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
+static int add_desc(struct list_head *pool, spinlock_t *lock,
+ gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
@@ -2400,27 +2401,28 @@ static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
if (!desc)
return 0;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(lock, flags);
for (i = 0; i < count; i++) {
_init_desc(&desc[i]);
- list_add_tail(&desc[i].node, &pl330->desc_pool);
+ list_add_tail(&desc[i].node, pool);
}
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return count;
}
-static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
+static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
+ spinlock_t *lock)
{
struct dma_pl330_desc *desc = NULL;
unsigned long flags;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(lock, flags);
- if (!list_empty(&pl330->desc_pool)) {
- desc = list_entry(pl330->desc_pool.next,
+ if (!list_empty(pool)) {
+ desc = list_entry(pool->next,
struct dma_pl330_desc, node);
list_del_init(&desc->node);
@@ -2429,7 +2431,7 @@ static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
desc->txd.callback = NULL;
}
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return desc;
}
@@ -2441,20 +2443,18 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
- desc = pluck_desc(pl330);
+ desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- if (!add_desc(pl330, GFP_ATOMIC, 1))
- return NULL;
+ DEFINE_SPINLOCK(lock);
+ LIST_HEAD(pool);
- /* Try again */
- desc = pluck_desc(pl330);
- if (!desc) {
- dev_err(pch->dmac->ddma.dev,
- "%s:%d ALERT!\n", __func__, __LINE__);
+ if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
return NULL;
- }
+
+ desc = pluck_desc(&pool, &lock);
+ WARN_ON(!desc || !list_empty(&pool));
}
/* Initialize the descriptor */
@@ -2868,7 +2868,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&pl330->pool_lock);
/* Create a descriptor pool of default size */
- if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
+ if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
+ GFP_KERNEL, NR_DEFAULT_DESC))
dev_warn(&adev->dev, "unable to allocate desc\n");
INIT_LIST_HEAD(&pd->channels);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 6d89fb6a6a92..d076940e0c69 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -46,6 +46,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
+#include <linux/circ_buf.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
@@ -78,6 +79,8 @@ struct bam_async_desc {
struct bam_desc_hw *curr_desc;
+ /* list node for the desc in the bam_chan list of descriptors */
+ struct list_head desc_node;
enum dma_transfer_direction dir;
size_t length;
struct bam_desc_hw desc[0];
@@ -347,6 +350,8 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = {
#define BAM_DESC_FIFO_SIZE SZ_32K
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
#define BAM_FIFO_SIZE (SZ_32K - 8)
+#define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\
+ MAX_DESCRIPTORS + 1) == 0)
struct bam_chan {
struct virt_dma_chan vc;
@@ -356,8 +361,6 @@ struct bam_chan {
/* configuration from device tree */
u32 id;
- struct bam_async_desc *curr_txd; /* current running dma */
-
/* runtime configuration */
struct dma_slave_config slave;
@@ -372,6 +375,8 @@ struct bam_chan {
unsigned int initialized; /* is the channel hw initialized? */
unsigned int paused; /* is the channel paused? */
unsigned int reconfigure; /* new slave config? */
+ /* list of descriptors currently processed */
+ struct list_head desc_list;
struct list_head node;
};
@@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan));
- if (bchan->curr_txd) {
+ if (!list_empty(&bchan->desc_list)) {
dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
goto err;
}
@@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
if (flags & DMA_PREP_INTERRUPT)
async_desc->flags |= DESC_FLAG_EOT;
- else
- async_desc->flags |= DESC_FLAG_INT;
async_desc->num_desc = num_alloc;
async_desc->curr_desc = async_desc->desc;
@@ -684,14 +687,16 @@ err_out:
static int bam_dma_terminate_all(struct dma_chan *chan)
{
struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_async_desc *async_desc, *tmp;
unsigned long flag;
LIST_HEAD(head);
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
- if (bchan->curr_txd) {
- list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
- bchan->curr_txd = NULL;
+ list_for_each_entry_safe(async_desc, tmp,
+ &bchan->desc_list, desc_node) {
+ list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
+ list_del(&async_desc->desc_node);
}
vchan_get_all_descriptors(&bchan->vc, &head);
@@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *chan)
*/
static u32 process_channel_irqs(struct bam_device *bdev)
{
- u32 i, srcs, pipe_stts;
+ u32 i, srcs, pipe_stts, offset, avail;
unsigned long flags;
- struct bam_async_desc *async_desc;
+ struct bam_async_desc *async_desc, *tmp;
srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
@@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct bam_device *bdev)
writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
spin_lock_irqsave(&bchan->vc.lock, flags);
- async_desc = bchan->curr_txd;
- if (async_desc) {
- async_desc->num_desc -= async_desc->xfer_len;
- async_desc->curr_desc += async_desc->xfer_len;
- bchan->curr_txd = NULL;
+ offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
+ P_SW_OFSTS_MASK;
+ offset /= sizeof(struct bam_desc_hw);
+
+ /* Number of bytes available to read */
+ avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
+
+ list_for_each_entry_safe(async_desc, tmp,
+ &bchan->desc_list, desc_node) {
+ /* Not enough data to read */
+ if (avail < async_desc->xfer_len)
+ break;
/* manage FIFO */
bchan->head += async_desc->xfer_len;
bchan->head %= MAX_DESCRIPTORS;
+ async_desc->num_desc -= async_desc->xfer_len;
+ async_desc->curr_desc += async_desc->xfer_len;
+ avail -= async_desc->xfer_len;
+
/*
- * if complete, process cookie. Otherwise
+ * if complete, process cookie. Otherwise
* push back to front of desc_issued so that
* it gets restarted by the tasklet
*/
- if (!async_desc->num_desc)
+ if (!async_desc->num_desc) {
vchan_cookie_complete(&async_desc->vd);
- else
+ } else {
list_add(&async_desc->vd.node,
- &bchan->vc.desc_issued);
+ &bchan->vc.desc_issued);
+ }
+ list_del(&async_desc->desc_node);
}
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_async_desc *async_desc;
struct virt_dma_desc *vd;
int ret;
size_t residue = 0;
@@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
spin_lock_irqsave(&bchan->vc.lock, flags);
vd = vchan_find_desc(&bchan->vc, cookie);
- if (vd)
+ if (vd) {
residue = container_of(vd, struct bam_async_desc, vd)->length;
- else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
- for (i = 0; i < bchan->curr_txd->num_desc; i++)
- residue += bchan->curr_txd->curr_desc[i].size;
+ } else {
+ list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
+ if (async_desc->vd.tx.cookie != cookie)
+ continue;
+
+ for (i = 0; i < async_desc->num_desc; i++)
+ residue += async_desc->curr_desc[i].size;
+ }
+ }
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_chan *bchan)
{
struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
struct bam_device *bdev = bchan->bdev;
- struct bam_async_desc *async_desc;
+ struct bam_async_desc *async_desc = NULL;
struct bam_desc_hw *desc;
struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
sizeof(struct bam_desc_hw));
int ret;
+ unsigned int avail;
+ struct dmaengine_desc_callback cb;
lockdep_assert_held(&bchan->vc.lock);
if (!vd)
return;
- list_del(&vd->node);
-
- async_desc = container_of(vd, struct bam_async_desc, vd);
- bchan->curr_txd = async_desc;
-
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
- /* on first use, initialize the channel hardware */
- if (!bchan->initialized)
- bam_chan_init_hw(bchan, async_desc->dir);
+ while (vd && !IS_BUSY(bchan)) {
+ list_del(&vd->node);
- /* apply new slave config changes, if necessary */
- if (bchan->reconfigure)
- bam_apply_new_config(bchan, async_desc->dir);
+ async_desc = container_of(vd, struct bam_async_desc, vd);
- desc = bchan->curr_txd->curr_desc;
+ /* on first use, initialize the channel hardware */
+ if (!bchan->initialized)
+ bam_chan_init_hw(bchan, async_desc->dir);
- if (async_desc->num_desc > MAX_DESCRIPTORS)
- async_desc->xfer_len = MAX_DESCRIPTORS;
- else
- async_desc->xfer_len = async_desc->num_desc;
+ /* apply new slave config changes, if necessary */
+ if (bchan->reconfigure)
+ bam_apply_new_config(bchan, async_desc->dir);
- /* set any special flags on the last descriptor */
- if (async_desc->num_desc == async_desc->xfer_len)
- desc[async_desc->xfer_len - 1].flags |=
- cpu_to_le16(async_desc->flags);
- else
- desc[async_desc->xfer_len - 1].flags |=
- cpu_to_le16(DESC_FLAG_INT);
+ desc = async_desc->curr_desc;
+ avail = CIRC_SPACE(bchan->tail, bchan->head,
+ MAX_DESCRIPTORS + 1);
+
+ if (async_desc->num_desc > avail)
+ async_desc->xfer_len = avail;
+ else
+ async_desc->xfer_len = async_desc->num_desc;
+
+ /* set any special flags on the last descriptor */
+ if (async_desc->num_desc == async_desc->xfer_len)
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(async_desc->flags);
- if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
- u32 partial = MAX_DESCRIPTORS - bchan->tail;
+ vd = vchan_next_desc(&bchan->vc);
- memcpy(&fifo[bchan->tail], desc,
- partial * sizeof(struct bam_desc_hw));
- memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
+ dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
+
+ /*
+ * An interrupt is generated at this desc, if
+ * - FIFO is FULL.
+ * - No more descriptors to add.
+ * - If a callback completion was requested for this DESC,
+ * In this case, BAM will deliver the completion callback
+ * for this desc and continue processing the next desc.
+ */
+ if (((avail <= async_desc->xfer_len) || !vd ||
+ dmaengine_desc_callback_valid(&cb)) &&
+ !(async_desc->flags & DESC_FLAG_EOT))
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(DESC_FLAG_INT);
+
+ if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+ u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+ memcpy(&fifo[bchan->tail], desc,
+ partial * sizeof(struct bam_desc_hw));
+ memcpy(fifo, &desc[partial],
+ (async_desc->xfer_len - partial) *
sizeof(struct bam_desc_hw));
- } else {
- memcpy(&fifo[bchan->tail], desc,
- async_desc->xfer_len * sizeof(struct bam_desc_hw));
- }
+ } else {
+ memcpy(&fifo[bchan->tail], desc,
+ async_desc->xfer_len *
+ sizeof(struct bam_desc_hw));
+ }
- bchan->tail += async_desc->xfer_len;
- bchan->tail %= MAX_DESCRIPTORS;
+ bchan->tail += async_desc->xfer_len;
+ bchan->tail %= MAX_DESCRIPTORS;
+ list_add_tail(&async_desc->desc_node, &bchan->desc_list);
+ }
/* ensure descriptor writes and dma start not reordered */
wmb();
@@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long data)
bchan = &bdev->channels[i];
spin_lock_irqsave(&bchan->vc.lock, flags);
- if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
+ if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
}
@@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&bchan->vc.lock, flags);
/* if work pending and idle, start a transaction */
- if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
+ if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
vchan_init(&bchan->vc, &bdev->common);
bchan->vc.desc_free = bam_dma_free_desc;
+ INIT_LIST_HEAD(&bchan->desc_list);
}
static const struct of_device_id bam_of_match[] = {
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 1adeb3265085..c7a89c22890e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -823,6 +823,13 @@ static const struct sa11x0_dma_channel_desc chan_desc[] = {
CD(Ser4SSPRc, DDAR_RW),
};
+static const struct dma_slave_map sa11x0_dma_map[] = {
+ { "sa11x0-ir", "tx", "Ser2ICPTr" },
+ { "sa11x0-ir", "rx", "Ser2ICPRc" },
+ { "sa11x0-ssp", "tx", "Ser4SSPTr" },
+ { "sa11x0-ssp", "rx", "Ser4SSPRc" },
+};
+
static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
struct device *dev)
{
@@ -909,6 +916,10 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->chan_pending);
+ d->slave.filter.fn = sa11x0_dma_filter_fn;
+ d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
+ d->slave.filter.map = sa11x0_dma_map;
+
d->base = ioremap(res->start, resource_size(res));
if (!d->base) {
ret = -ENOMEM;
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
new file mode 100644
index 000000000000..b652071a2096
--- /dev/null
+++ b/drivers/dma/sprd-dma.c
@@ -0,0 +1,988 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+#define SPRD_DMA_CHN_REG_OFFSET 0x1000
+#define SPRD_DMA_CHN_REG_LENGTH 0x40
+#define SPRD_DMA_MEMCPY_MIN_SIZE 64
+
+/* DMA global registers definition */
+#define SPRD_DMA_GLB_PAUSE 0x0
+#define SPRD_DMA_GLB_FRAG_WAIT 0x4
+#define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
+#define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
+#define SPRD_DMA_GLB_INT_RAW_STS 0x10
+#define SPRD_DMA_GLB_INT_MSK_STS 0x14
+#define SPRD_DMA_GLB_REQ_STS 0x18
+#define SPRD_DMA_GLB_CHN_EN_STS 0x1c
+#define SPRD_DMA_GLB_DEBUG_STS 0x20
+#define SPRD_DMA_GLB_ARB_SEL_STS 0x24
+#define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
+#define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
+
+/* DMA channel registers definition */
+#define SPRD_DMA_CHN_PAUSE 0x0
+#define SPRD_DMA_CHN_REQ 0x4
+#define SPRD_DMA_CHN_CFG 0x8
+#define SPRD_DMA_CHN_INTC 0xc
+#define SPRD_DMA_CHN_SRC_ADDR 0x10
+#define SPRD_DMA_CHN_DES_ADDR 0x14
+#define SPRD_DMA_CHN_FRG_LEN 0x18
+#define SPRD_DMA_CHN_BLK_LEN 0x1c
+#define SPRD_DMA_CHN_TRSC_LEN 0x20
+#define SPRD_DMA_CHN_TRSF_STEP 0x24
+#define SPRD_DMA_CHN_WARP_PTR 0x28
+#define SPRD_DMA_CHN_WARP_TO 0x2c
+#define SPRD_DMA_CHN_LLIST_PTR 0x30
+#define SPRD_DMA_CHN_FRAG_STEP 0x34
+#define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
+#define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
+
+/* SPRD_DMA_CHN_INTC register definition */
+#define SPRD_DMA_INT_MASK GENMASK(4, 0)
+#define SPRD_DMA_INT_CLR_OFFSET 24
+#define SPRD_DMA_FRAG_INT_EN BIT(0)
+#define SPRD_DMA_BLK_INT_EN BIT(1)
+#define SPRD_DMA_TRANS_INT_EN BIT(2)
+#define SPRD_DMA_LIST_INT_EN BIT(3)
+#define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
+
+/* SPRD_DMA_CHN_CFG register definition */
+#define SPRD_DMA_CHN_EN BIT(0)
+#define SPRD_DMA_WAIT_BDONE_OFFSET 24
+#define SPRD_DMA_DONOT_WAIT_BDONE 1
+
+/* SPRD_DMA_CHN_REQ register definition */
+#define SPRD_DMA_REQ_EN BIT(0)
+
+/* SPRD_DMA_CHN_PAUSE register definition */
+#define SPRD_DMA_PAUSE_EN BIT(0)
+#define SPRD_DMA_PAUSE_STS BIT(2)
+#define SPRD_DMA_PAUSE_CNT 0x2000
+
+/* DMA_CHN_WARP_* register definition */
+#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
+#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
+#define SPRD_DMA_HIGH_ADDR_OFFSET 4
+
+/* SPRD_DMA_CHN_INTC register definition */
+#define SPRD_DMA_FRAG_INT_STS BIT(16)
+#define SPRD_DMA_BLK_INT_STS BIT(17)
+#define SPRD_DMA_TRSC_INT_STS BIT(18)
+#define SPRD_DMA_LIST_INT_STS BIT(19)
+#define SPRD_DMA_CFGERR_INT_STS BIT(20)
+#define SPRD_DMA_CHN_INT_STS \
+ (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
+ SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
+ SPRD_DMA_CFGERR_INT_STS)
+
+/* SPRD_DMA_CHN_FRG_LEN register definition */
+#define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
+#define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
+#define SPRD_DMA_SWT_MODE_OFFSET 26
+#define SPRD_DMA_REQ_MODE_OFFSET 24
+#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
+#define SPRD_DMA_FIX_SEL_OFFSET 21
+#define SPRD_DMA_FIX_EN_OFFSET 20
+#define SPRD_DMA_LLIST_END_OFFSET 19
+#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
+
+/* SPRD_DMA_CHN_BLK_LEN register definition */
+#define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
+
+/* SPRD_DMA_CHN_TRSC_LEN register definition */
+#define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
+
+/* SPRD_DMA_CHN_TRSF_STEP register definition */
+#define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
+#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
+#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
+
+#define SPRD_DMA_SOFTWARE_UID 0
+
+/*
+ * enum sprd_dma_req_mode: define the DMA request mode
+ * @SPRD_DMA_FRAG_REQ: fragment request mode
+ * @SPRD_DMA_BLK_REQ: block request mode
+ * @SPRD_DMA_TRANS_REQ: transaction request mode
+ * @SPRD_DMA_LIST_REQ: link-list request mode
+ *
+ * We have 4 types request mode: fragment mode, block mode, transaction mode
+ * and linklist mode. One transaction can contain several blocks, one block can
+ * contain several fragments. Link-list mode means we can save several DMA
+ * configuration into one reserved memory, then DMA can fetch each DMA
+ * configuration automatically to start transfer.
+ */
+enum sprd_dma_req_mode {
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_BLK_REQ,
+ SPRD_DMA_TRANS_REQ,
+ SPRD_DMA_LIST_REQ,
+};
+
+/*
+ * enum sprd_dma_int_type: define the DMA interrupt type
+ * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
+ * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
+ * is done.
+ * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
+ * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
+ * or one block request is done.
+ * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
+ * request is done.
+ * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
+ * transaction request or fragment request is done.
+ * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
+ * transaction request or block request is done.
+ * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
+ * is done.
+ * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
+ * incorrect.
+ */
+enum sprd_dma_int_type {
+ SPRD_DMA_NO_INT,
+ SPRD_DMA_FRAG_INT,
+ SPRD_DMA_BLK_INT,
+ SPRD_DMA_BLK_FRAG_INT,
+ SPRD_DMA_TRANS_INT,
+ SPRD_DMA_TRANS_FRAG_INT,
+ SPRD_DMA_TRANS_BLK_INT,
+ SPRD_DMA_LIST_INT,
+ SPRD_DMA_CFGERR_INT,
+};
+
+/* dma channel hardware configuration */
+struct sprd_dma_chn_hw {
+ u32 pause;
+ u32 req;
+ u32 cfg;
+ u32 intc;
+ u32 src_addr;
+ u32 des_addr;
+ u32 frg_len;
+ u32 blk_len;
+ u32 trsc_len;
+ u32 trsf_step;
+ u32 wrap_ptr;
+ u32 wrap_to;
+ u32 llist_ptr;
+ u32 frg_step;
+ u32 src_blk_step;
+ u32 des_blk_step;
+};
+
+/* dma request description */
+struct sprd_dma_desc {
+ struct virt_dma_desc vd;
+ struct sprd_dma_chn_hw chn_hw;
+};
+
+/* dma channel description */
+struct sprd_dma_chn {
+ struct virt_dma_chan vc;
+ void __iomem *chn_base;
+ u32 chn_num;
+ u32 dev_id;
+ struct sprd_dma_desc *cur_desc;
+};
+
+/* SPRD dma device */
+struct sprd_dma_dev {
+ struct dma_device dma_dev;
+ void __iomem *glb_base;
+ struct clk *clk;
+ struct clk *ashb_clk;
+ int irq;
+ u32 total_chns;
+ struct sprd_dma_chn channels[0];
+};
+
+static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
+static struct of_dma_filter_info sprd_dma_info = {
+ .filter_fn = sprd_dma_filter_fn,
+};
+
+static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct sprd_dma_chn, vc.chan);
+}
+
+static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
+
+ return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
+}
+
+static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct sprd_dma_desc, vd);
+}
+
+static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
+ u32 mask, u32 val)
+{
+ u32 orig = readl(schan->chn_base + reg);
+ u32 tmp;
+
+ tmp = (orig & ~mask) | val;
+ writel(tmp, schan->chn_base + reg);
+}
+
+static int sprd_dma_enable(struct sprd_dma_dev *sdev)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sdev->clk);
+ if (ret)
+ return ret;
+
+ /*
+ * The ashb_clk is optional and only for AGCP DMA controller, so we
+ * need add one condition to check if the ashb_clk need enable.
+ */
+ if (!IS_ERR(sdev->ashb_clk))
+ ret = clk_prepare_enable(sdev->ashb_clk);
+
+ return ret;
+}
+
+static void sprd_dma_disable(struct sprd_dma_dev *sdev)
+{
+ clk_disable_unprepare(sdev->clk);
+
+ /*
+ * Need to check if we need disable the optional ashb_clk for AGCP DMA.
+ */
+ if (!IS_ERR(sdev->ashb_clk))
+ clk_disable_unprepare(sdev->ashb_clk);
+}
+
+static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 dev_id = schan->dev_id;
+
+ if (dev_id != SPRD_DMA_SOFTWARE_UID) {
+ u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
+ SPRD_DMA_GLB_REQ_UID(dev_id);
+
+ writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
+ }
+}
+
+static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 dev_id = schan->dev_id;
+
+ if (dev_id != SPRD_DMA_SOFTWARE_UID) {
+ u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
+ SPRD_DMA_GLB_REQ_UID(dev_id);
+
+ writel(0, sdev->glb_base + uid_offset);
+ }
+}
+
+static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
+ SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
+ SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
+}
+
+static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
+ SPRD_DMA_CHN_EN);
+}
+
+static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
+}
+
+static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
+ SPRD_DMA_REQ_EN);
+}
+
+static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
+
+ if (enable) {
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
+ SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
+
+ do {
+ pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
+ if (pause & SPRD_DMA_PAUSE_STS)
+ break;
+
+ cpu_relax();
+ } while (--timeout > 0);
+
+ if (!timeout)
+ dev_warn(sdev->dma_dev.dev,
+ "pause dma controller timeout\n");
+ } else {
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
+ SPRD_DMA_PAUSE_EN, 0);
+ }
+}
+
+static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
+{
+ u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
+
+ if (!(cfg & SPRD_DMA_CHN_EN))
+ return;
+
+ sprd_dma_pause_resume(schan, true);
+ sprd_dma_disable_chn(schan);
+}
+
+static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
+{
+ unsigned long addr, addr_high;
+
+ addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
+ addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
+ SPRD_DMA_HIGH_ADDR_MASK;
+
+ return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
+}
+
+static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
+ SPRD_DMA_CHN_INT_STS;
+
+ switch (intc_sts) {
+ case SPRD_DMA_CFGERR_INT_STS:
+ return SPRD_DMA_CFGERR_INT;
+
+ case SPRD_DMA_LIST_INT_STS:
+ return SPRD_DMA_LIST_INT;
+
+ case SPRD_DMA_TRSC_INT_STS:
+ return SPRD_DMA_TRANS_INT;
+
+ case SPRD_DMA_BLK_INT_STS:
+ return SPRD_DMA_BLK_INT;
+
+ case SPRD_DMA_FRAG_INT_STS:
+ return SPRD_DMA_FRAG_INT;
+
+ default:
+ dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
+ return SPRD_DMA_NO_INT;
+ }
+}
+
+static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
+{
+ u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
+
+ return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
+}
+
+static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
+ struct sprd_dma_desc *sdesc)
+{
+ struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
+
+ writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
+ writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
+ writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
+ writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
+ writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
+ writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
+ writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
+ writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
+ writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
+ writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
+ writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
+ writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
+ writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
+ writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
+ writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
+ writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
+}
+
+static void sprd_dma_start(struct sprd_dma_chn *schan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+ schan->cur_desc = to_sprd_dma_desc(vd);
+
+ /*
+ * Copy the DMA configuration from DMA descriptor to this hardware
+ * channel.
+ */
+ sprd_dma_set_chn_config(schan, schan->cur_desc);
+ sprd_dma_set_uid(schan);
+ sprd_dma_enable_chn(schan);
+
+ if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+ sprd_dma_soft_request(schan);
+}
+
+static void sprd_dma_stop(struct sprd_dma_chn *schan)
+{
+ sprd_dma_stop_and_disable(schan);
+ sprd_dma_unset_uid(schan);
+ sprd_dma_clear_int(schan);
+}
+
+static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
+ enum sprd_dma_int_type int_type,
+ enum sprd_dma_req_mode req_mode)
+{
+ if (int_type == SPRD_DMA_NO_INT)
+ return false;
+
+ if (int_type >= req_mode + 1)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t dma_irq_handle(int irq, void *dev_id)
+{
+ struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
+ u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
+ struct sprd_dma_chn *schan;
+ struct sprd_dma_desc *sdesc;
+ enum sprd_dma_req_mode req_type;
+ enum sprd_dma_int_type int_type;
+ bool trans_done = false;
+ u32 i;
+
+ while (irq_status) {
+ i = __ffs(irq_status);
+ irq_status &= (irq_status - 1);
+ schan = &sdev->channels[i];
+
+ spin_lock(&schan->vc.lock);
+ int_type = sprd_dma_get_int_type(schan);
+ req_type = sprd_dma_get_req_type(schan);
+ sprd_dma_clear_int(schan);
+
+ sdesc = schan->cur_desc;
+
+ /* Check if the dma request descriptor is done. */
+ trans_done = sprd_dma_check_trans_done(sdesc, int_type,
+ req_type);
+ if (trans_done == true) {
+ vchan_cookie_complete(&sdesc->vd);
+ schan->cur_desc = NULL;
+ sprd_dma_start(schan);
+ }
+ spin_unlock(&schan->vc.lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ int ret;
+
+ ret = pm_runtime_get_sync(chan->device->dev);
+ if (ret < 0)
+ return ret;
+
+ schan->dev_id = SPRD_DMA_SOFTWARE_UID;
+ return 0;
+}
+
+static void sprd_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_stop(schan);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ vchan_free_chan_resources(&schan->vc);
+ pm_runtime_put(chan->device->dev);
+}
+
+static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ u32 pos;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ vd = vchan_find_desc(&schan->vc, cookie);
+ if (vd) {
+ struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
+ struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
+
+ if (hw->trsc_len > 0)
+ pos = hw->trsc_len;
+ else if (hw->blk_len > 0)
+ pos = hw->blk_len;
+ else if (hw->frg_len > 0)
+ pos = hw->frg_len;
+ else
+ pos = 0;
+ } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
+ pos = sprd_dma_get_dst_addr(schan);
+ } else {
+ pos = 0;
+ }
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ dma_set_residue(txstate, pos);
+ return ret;
+}
+
+static void sprd_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
+ sprd_dma_start(schan);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+}
+
+static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
+ dma_addr_t dest, dma_addr_t src, size_t len)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
+ struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
+ u32 datawidth, src_step, des_step, fragment_len;
+ u32 block_len, req_mode, irq_mode, transcation_len;
+ u32 fix_mode = 0, fix_en = 0;
+
+ if (IS_ALIGNED(len, 4)) {
+ datawidth = 2;
+ src_step = 4;
+ des_step = 4;
+ } else if (IS_ALIGNED(len, 2)) {
+ datawidth = 1;
+ src_step = 2;
+ des_step = 2;
+ } else {
+ datawidth = 0;
+ src_step = 1;
+ des_step = 1;
+ }
+
+ fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
+ if (len <= SPRD_DMA_BLK_LEN_MASK) {
+ block_len = len;
+ transcation_len = 0;
+ req_mode = SPRD_DMA_BLK_REQ;
+ irq_mode = SPRD_DMA_BLK_INT;
+ } else {
+ block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
+ transcation_len = len;
+ req_mode = SPRD_DMA_TRANS_REQ;
+ irq_mode = SPRD_DMA_TRANS_INT;
+ }
+
+ hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+ hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+ SPRD_DMA_HIGH_ADDR_MASK);
+ hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+ SPRD_DMA_HIGH_ADDR_MASK);
+
+ hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
+ hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
+
+ if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
+ fix_en = 0;
+ } else {
+ fix_en = 1;
+ if (src_step)
+ fix_mode = 1;
+ else
+ fix_mode = 0;
+ }
+
+ hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
+ datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
+ req_mode << SPRD_DMA_REQ_MODE_OFFSET |
+ fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
+ fix_en << SPRD_DMA_FIX_EN_OFFSET |
+ (fragment_len & SPRD_DMA_FRG_LEN_MASK);
+ hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
+
+ hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
+
+ switch (irq_mode) {
+ case SPRD_DMA_NO_INT:
+ break;
+
+ case SPRD_DMA_FRAG_INT:
+ hw->intc |= SPRD_DMA_FRAG_INT_EN;
+ break;
+
+ case SPRD_DMA_BLK_INT:
+ hw->intc |= SPRD_DMA_BLK_INT_EN;
+ break;
+
+ case SPRD_DMA_BLK_FRAG_INT:
+ hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
+ break;
+
+ case SPRD_DMA_TRANS_INT:
+ hw->intc |= SPRD_DMA_TRANS_INT_EN;
+ break;
+
+ case SPRD_DMA_TRANS_FRAG_INT:
+ hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
+ break;
+
+ case SPRD_DMA_TRANS_BLK_INT:
+ hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
+ break;
+
+ case SPRD_DMA_LIST_INT:
+ hw->intc |= SPRD_DMA_LIST_INT_EN;
+ break;
+
+ case SPRD_DMA_CFGERR_INT:
+ hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
+ break;
+
+ default:
+ dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
+ return -EINVAL;
+ }
+
+ if (transcation_len == 0)
+ hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
+ else
+ hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
+
+ hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
+ SPRD_DMA_DEST_TRSF_STEP_OFFSET |
+ (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
+ SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+
+ hw->frg_step = 0;
+ hw->src_blk_step = 0;
+ hw->des_blk_step = 0;
+ hw->src_blk_step = 0;
+ return 0;
+}
+
+struct dma_async_tx_descriptor *
+sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_desc *sdesc;
+ int ret;
+
+ sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+ if (!sdesc)
+ return NULL;
+
+ ret = sprd_dma_config(chan, sdesc, dest, src, len);
+ if (ret) {
+ kfree(sdesc);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
+}
+
+static int sprd_dma_pause(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_pause_resume(schan, true);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ return 0;
+}
+
+static int sprd_dma_resume(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_pause_resume(schan, false);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ return 0;
+}
+
+static int sprd_dma_terminate_all(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_stop(schan);
+
+ vchan_get_all_descriptors(&schan->vc, &head);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&schan->vc, &head);
+ return 0;
+}
+
+static void sprd_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
+
+ kfree(sdesc);
+}
+
+static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 req = *(u32 *)param;
+
+ if (req < sdev->total_chns)
+ return req == schan->chn_num + 1;
+ else
+ return false;
+}
+
+static int sprd_dma_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sprd_dma_dev *sdev;
+ struct sprd_dma_chn *dma_chn;
+ struct resource *res;
+ u32 chn_count;
+ int ret, i;
+
+ ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
+ if (ret) {
+ dev_err(&pdev->dev, "get dma channels count failed\n");
+ return ret;
+ }
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) +
+ sizeof(*dma_chn) * chn_count,
+ GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(sdev->clk)) {
+ dev_err(&pdev->dev, "get enable clock failed\n");
+ return PTR_ERR(sdev->clk);
+ }
+
+ /* ashb clock is optional for AGCP DMA */
+ sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
+ if (IS_ERR(sdev->ashb_clk))
+ dev_warn(&pdev->dev, "no optional ashb eb clock\n");
+
+ /*
+ * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
+ * DMA controller, it can or do not request the irq, which will save
+ * system power without resuming system by DMA interrupts if AGCP DMA
+ * does not request the irq. Thus the DMA interrupts property should
+ * be optional.
+ */
+ sdev->irq = platform_get_irq(pdev, 0);
+ if (sdev->irq > 0) {
+ ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
+ 0, "sprd_dma", (void *)sdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request dma irq failed\n");
+ return ret;
+ }
+ } else {
+ dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!sdev->glb_base)
+ return -ENOMEM;
+
+ dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
+ sdev->total_chns = chn_count;
+ sdev->dma_dev.chancnt = chn_count;
+ INIT_LIST_HEAD(&sdev->dma_dev.channels);
+ INIT_LIST_HEAD(&sdev->dma_dev.global_node);
+ sdev->dma_dev.dev = &pdev->dev;
+ sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
+ sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
+ sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
+ sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
+ sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
+ sdev->dma_dev.device_pause = sprd_dma_pause;
+ sdev->dma_dev.device_resume = sprd_dma_resume;
+ sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
+
+ for (i = 0; i < chn_count; i++) {
+ dma_chn = &sdev->channels[i];
+ dma_chn->chn_num = i;
+ dma_chn->cur_desc = NULL;
+ /* get each channel's registers base address. */
+ dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
+ SPRD_DMA_CHN_REG_LENGTH * i;
+
+ dma_chn->vc.desc_free = sprd_dma_free_desc;
+ vchan_init(&dma_chn->vc, &sdev->dma_dev);
+ }
+
+ platform_set_drvdata(pdev, sdev);
+ ret = sprd_dma_enable(sdev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_rpm;
+
+ ret = dma_async_device_register(&sdev->dma_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
+ goto err_register;
+ }
+
+ sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
+ ret = of_dma_controller_register(np, of_dma_simple_xlate,
+ &sprd_dma_info);
+ if (ret)
+ goto err_of_register;
+
+ pm_runtime_put(&pdev->dev);
+ return 0;
+
+err_of_register:
+ dma_async_device_unregister(&sdev->dma_dev);
+err_register:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+err_rpm:
+ sprd_dma_disable(sdev);
+ return ret;
+}
+
+static int sprd_dma_remove(struct platform_device *pdev)
+{
+ struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
+ struct sprd_dma_chn *c, *cn;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* explicitly free the irq */
+ if (sdev->irq > 0)
+ devm_free_irq(&pdev->dev, sdev->irq, sdev);
+
+ list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
+ vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&sdev->dma_dev);
+ sprd_dma_disable(sdev);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id sprd_dma_match[] = {
+ { .compatible = "sprd,sc9860-dma", },
+ {},
+};
+
+static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
+{
+ struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
+
+ sprd_dma_disable(sdev);
+ return 0;
+}
+
+static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
+{
+ struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = sprd_dma_enable(sdev);
+ if (ret)
+ dev_err(sdev->dma_dev.dev, "enable dma failed\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops sprd_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
+ sprd_dma_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver sprd_dma_driver = {
+ .probe = sprd_dma_probe,
+ .remove = sprd_dma_remove,
+ .driver = {
+ .name = "sprd-dma",
+ .of_match_table = sprd_dma_match,
+ .pm = &sprd_dma_pm_ops,
+ },
+};
+module_platform_driver(sprd_dma_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DMA driver for Spreadtrum");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
+MODULE_ALIAS("platform:sprd-dma");
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
new file mode 100644
index 000000000000..d5db0f6e1ff8
--- /dev/null
+++ b/drivers/dma/stm32-dmamux.c
@@ -0,0 +1,327 @@
+/*
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * DMA Router driver for STM32 DMA MUX
+ *
+ * Based on TI DMA Crossbar driver
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define STM32_DMAMUX_CCR(x) (0x4 * (x))
+#define STM32_DMAMUX_MAX_DMA_REQUESTS 32
+#define STM32_DMAMUX_MAX_REQUESTS 255
+
+struct stm32_dmamux {
+ u32 master;
+ u32 request;
+ u32 chan_id;
+};
+
+struct stm32_dmamux_data {
+ struct dma_router dmarouter;
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *iomem;
+ u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
+ u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
+ spinlock_t lock; /* Protects register access */
+ unsigned long *dma_inuse; /* Used DMA channel */
+ u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
+ * [0] holds number of DMA Masters.
+ * To be kept at very end end of this structure
+ */
+};
+
+static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
+{
+ return readl_relaxed(iomem + reg);
+}
+
+static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
+{
+ writel_relaxed(val, iomem + reg);
+}
+
+static void stm32_dmamux_free(struct device *dev, void *route_data)
+{
+ struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct stm32_dmamux *mux = route_data;
+ unsigned long flags;
+
+ /* Clear dma request */
+ spin_lock_irqsave(&dmamux->lock, flags);
+
+ stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
+ clear_bit(mux->chan_id, dmamux->dma_inuse);
+
+ if (!IS_ERR(dmamux->clk))
+ clk_disable(dmamux->clk);
+
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
+ mux->request, mux->master, mux->chan_id);
+
+ kfree(mux);
+}
+
+static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct stm32_dmamux *mux;
+ u32 i, min, max;
+ int ret;
+ unsigned long flags;
+
+ if (dma_spec->args_count != 3) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dma_spec->args[0] > dmamux->dmamux_requests) {
+ dev_err(&pdev->dev, "invalid mux request number: %d\n",
+ dma_spec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+ mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
+ dmamux->dma_requests);
+ set_bit(mux->chan_id, dmamux->dma_inuse);
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ if (mux->chan_id == dmamux->dma_requests) {
+ dev_err(&pdev->dev, "Run out of free DMA requests\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Look for DMA Master */
+ for (i = 1, min = 0, max = dmamux->dma_reqs[i];
+ i <= dmamux->dma_reqs[0];
+ min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
+ if (mux->chan_id < max)
+ break;
+ mux->master = i - 1;
+
+ /* The of_node_put() will be done in of_dma_router_xlate function */
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Set dma request */
+ spin_lock_irqsave(&dmamux->lock, flags);
+ if (!IS_ERR(dmamux->clk)) {
+ ret = clk_enable(dmamux->clk);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret);
+ goto error;
+ }
+ }
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ mux->request = dma_spec->args[0];
+
+ /* craft DMA spec */
+ dma_spec->args[3] = dma_spec->args[2];
+ dma_spec->args[2] = dma_spec->args[1];
+ dma_spec->args[1] = 0;
+ dma_spec->args[0] = mux->chan_id - min;
+ dma_spec->args_count = 4;
+
+ stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
+ mux->request);
+ dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
+ mux->request, mux->master, mux->chan_id);
+
+ return mux;
+
+error:
+ clear_bit(mux->chan_id, dmamux->dma_inuse);
+ kfree(mux);
+ return ERR_PTR(ret);
+}
+
+static const struct of_device_id stm32_stm32dma_master_match[] = {
+ { .compatible = "st,stm32-dma", },
+ {},
+};
+
+static int stm32_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct device_node *dma_node;
+ struct stm32_dmamux_data *stm32_dmamux;
+ struct resource *res;
+ void __iomem *iomem;
+ int i, count, ret;
+ u32 dma_req;
+
+ if (!node)
+ return -ENODEV;
+
+ count = device_property_read_u32_array(&pdev->dev, "dma-masters",
+ NULL, 0);
+ if (count < 0) {
+ dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
+ return -ENODEV;
+ }
+
+ stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
+ sizeof(u32) * (count + 1), GFP_KERNEL);
+ if (!stm32_dmamux)
+ return -ENOMEM;
+
+ dma_req = 0;
+ for (i = 1; i <= count; i++) {
+ dma_node = of_parse_phandle(node, "dma-masters", i - 1);
+
+ match = of_match_node(stm32_stm32dma_master_match, dma_node);
+ if (!match) {
+ dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dma_node, "dma-requests",
+ &stm32_dmamux->dma_reqs[i])) {
+ dev_info(&pdev->dev,
+ "Missing MUX output information, using %u.\n",
+ STM32_DMAMUX_MAX_DMA_REQUESTS);
+ stm32_dmamux->dma_reqs[i] =
+ STM32_DMAMUX_MAX_DMA_REQUESTS;
+ }
+ dma_req += stm32_dmamux->dma_reqs[i];
+ of_node_put(dma_node);
+ }
+
+ if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
+ dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
+ return -ENODEV;
+ }
+
+ stm32_dmamux->dma_requests = dma_req;
+ stm32_dmamux->dma_reqs[0] = count;
+ stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(dma_req),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!stm32_dmamux->dma_inuse)
+ return -ENOMEM;
+
+ if (device_property_read_u32(&pdev->dev, "dma-requests",
+ &stm32_dmamux->dmamux_requests)) {
+ stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
+ dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
+ stm32_dmamux->dmamux_requests);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ iomem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+
+ spin_lock_init(&stm32_dmamux->lock);
+
+ stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(stm32_dmamux->clk)) {
+ ret = PTR_ERR(stm32_dmamux->clk);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&pdev->dev, "Missing controller clock\n");
+ return ret;
+ }
+
+ stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(stm32_dmamux->rst)) {
+ reset_control_assert(stm32_dmamux->rst);
+ udelay(2);
+ reset_control_deassert(stm32_dmamux->rst);
+ }
+
+ stm32_dmamux->iomem = iomem;
+ stm32_dmamux->dmarouter.dev = &pdev->dev;
+ stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
+
+ platform_set_drvdata(pdev, stm32_dmamux);
+
+ if (!IS_ERR(stm32_dmamux->clk)) {
+ ret = clk_prepare_enable(stm32_dmamux->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Reset the dmamux */
+ for (i = 0; i < stm32_dmamux->dma_requests; i++)
+ stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
+
+ if (!IS_ERR(stm32_dmamux->clk))
+ clk_disable(stm32_dmamux->clk);
+
+ return of_dma_router_register(node, stm32_dmamux_route_allocate,
+ &stm32_dmamux->dmarouter);
+}
+
+static const struct of_device_id stm32_dmamux_match[] = {
+ { .compatible = "st,stm32h7-dmamux" },
+ {},
+};
+
+static struct platform_driver stm32_dmamux_driver = {
+ .probe = stm32_dmamux_probe,
+ .driver = {
+ .name = "stm32-dmamux",
+ .of_match_table = stm32_dmamux_match,
+ },
+};
+
+static int __init stm32_dmamux_init(void)
+{
+ return platform_driver_register(&stm32_dmamux_driver);
+}
+arch_initcall(stm32_dmamux_init);
+
+MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
new file mode 100644
index 000000000000..daa1602eb9f5
--- /dev/null
+++ b/drivers/dma/stm32-mdma.c
@@ -0,0 +1,1682 @@
+/*
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * Driver for STM32 MDMA controller
+ *
+ * Inspired by stm32-dma.c and dma-jz4780.c
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+/* MDMA Generic getter/setter */
+#define STM32_MDMA_SHIFT(n) (ffs(n) - 1)
+#define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \
+ (mask))
+#define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \
+ STM32_MDMA_SHIFT(mask))
+
+#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
+#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
+
+/* MDMA Channel x interrupt/status register */
+#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
+#define STM32_MDMA_CISR_CRQA BIT(16)
+#define STM32_MDMA_CISR_TCIF BIT(4)
+#define STM32_MDMA_CISR_BTIF BIT(3)
+#define STM32_MDMA_CISR_BRTIF BIT(2)
+#define STM32_MDMA_CISR_CTCIF BIT(1)
+#define STM32_MDMA_CISR_TEIF BIT(0)
+
+/* MDMA Channel x interrupt flag clear register */
+#define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x))
+#define STM32_MDMA_CIFCR_CLTCIF BIT(4)
+#define STM32_MDMA_CIFCR_CBTIF BIT(3)
+#define STM32_MDMA_CIFCR_CBRTIF BIT(2)
+#define STM32_MDMA_CIFCR_CCTCIF BIT(1)
+#define STM32_MDMA_CIFCR_CTEIF BIT(0)
+#define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \
+ | STM32_MDMA_CIFCR_CBTIF \
+ | STM32_MDMA_CIFCR_CBRTIF \
+ | STM32_MDMA_CIFCR_CCTCIF \
+ | STM32_MDMA_CIFCR_CTEIF)
+
+/* MDMA Channel x error status register */
+#define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x))
+#define STM32_MDMA_CESR_BSE BIT(11)
+#define STM32_MDMA_CESR_ASR BIT(10)
+#define STM32_MDMA_CESR_TEMD BIT(9)
+#define STM32_MDMA_CESR_TELD BIT(8)
+#define STM32_MDMA_CESR_TED BIT(7)
+#define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0)
+
+/* MDMA Channel x control register */
+#define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x))
+#define STM32_MDMA_CCR_SWRQ BIT(16)
+#define STM32_MDMA_CCR_WEX BIT(14)
+#define STM32_MDMA_CCR_HEX BIT(13)
+#define STM32_MDMA_CCR_BEX BIT(12)
+#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
+#define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CCR_PL_MASK)
+#define STM32_MDMA_CCR_TCIE BIT(5)
+#define STM32_MDMA_CCR_BTIE BIT(4)
+#define STM32_MDMA_CCR_BRTIE BIT(3)
+#define STM32_MDMA_CCR_CTCIE BIT(2)
+#define STM32_MDMA_CCR_TEIE BIT(1)
+#define STM32_MDMA_CCR_EN BIT(0)
+#define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \
+ | STM32_MDMA_CCR_BTIE \
+ | STM32_MDMA_CCR_BRTIE \
+ | STM32_MDMA_CCR_CTCIE \
+ | STM32_MDMA_CCR_TEIE)
+
+/* MDMA Channel x transfer configuration register */
+#define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x))
+#define STM32_MDMA_CTCR_BWM BIT(31)
+#define STM32_MDMA_CTCR_SWRM BIT(30)
+#define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
+#define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_TRGM_MSK)
+#define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CTCR_TRGM_MSK)
+#define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
+#define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_PAM_MASK)
+#define STM32_MDMA_CTCR_PKE BIT(25)
+#define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
+#define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_TLEN_MSK)
+#define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CTCR_TLEN_MSK)
+#define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
+#define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_LEN2_MSK)
+#define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CTCR_LEN2_MSK)
+#define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
+#define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_DBURST_MASK)
+#define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
+#define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_SBURST_MASK)
+#define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
+#define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_DINCOS_MASK)
+#define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
+#define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_SINCOS_MASK)
+#define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
+#define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_DSIZE_MASK)
+#define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
+#define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_SSIZE_MASK)
+#define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
+#define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_DINC_MASK)
+#define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
+#define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_SINC_MASK)
+#define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
+ | STM32_MDMA_CTCR_DINC_MASK \
+ | STM32_MDMA_CTCR_SINCOS_MASK \
+ | STM32_MDMA_CTCR_DINCOS_MASK \
+ | STM32_MDMA_CTCR_LEN2_MSK \
+ | STM32_MDMA_CTCR_TRGM_MSK)
+
+/* MDMA Channel x block number of data register */
+#define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
+#define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
+#define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBNDTR_BRC_MK)
+#define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CBNDTR_BRC_MK)
+
+#define STM32_MDMA_CBNDTR_BRDUM BIT(19)
+#define STM32_MDMA_CBNDTR_BRSUM BIT(18)
+#define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
+#define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBNDTR_BNDT_MASK)
+
+/* MDMA Channel x source address register */
+#define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
+
+/* MDMA Channel x destination address register */
+#define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x))
+
+/* MDMA Channel x block repeat address update register */
+#define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
+#define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
+#define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBRUR_DUV_MASK)
+#define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
+#define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBRUR_SUV_MASK)
+
+/* MDMA Channel x link address register */
+#define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
+
+/* MDMA Channel x trigger and bus selection register */
+#define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x))
+#define STM32_MDMA_CTBR_DBUS BIT(17)
+#define STM32_MDMA_CTBR_SBUS BIT(16)
+#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0)
+#define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTBR_TSEL_MASK)
+
+/* MDMA Channel x mask address register */
+#define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
+
+/* MDMA Channel x mask data register */
+#define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x))
+
+#define STM32_MDMA_MAX_BUF_LEN 128
+#define STM32_MDMA_MAX_BLOCK_LEN 65536
+#define STM32_MDMA_MAX_CHANNELS 63
+#define STM32_MDMA_MAX_REQUESTS 256
+#define STM32_MDMA_MAX_BURST 128
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+
+enum stm32_mdma_trigger_mode {
+ STM32_MDMA_BUFFER,
+ STM32_MDMA_BLOCK,
+ STM32_MDMA_BLOCK_REP,
+ STM32_MDMA_LINKED_LIST,
+};
+
+enum stm32_mdma_width {
+ STM32_MDMA_BYTE,
+ STM32_MDMA_HALF_WORD,
+ STM32_MDMA_WORD,
+ STM32_MDMA_DOUBLE_WORD,
+};
+
+enum stm32_mdma_inc_mode {
+ STM32_MDMA_FIXED = 0,
+ STM32_MDMA_INC = 2,
+ STM32_MDMA_DEC = 3,
+};
+
+struct stm32_mdma_chan_config {
+ u32 request;
+ u32 priority_level;
+ u32 transfer_config;
+ u32 mask_addr;
+ u32 mask_data;
+};
+
+struct stm32_mdma_hwdesc {
+ u32 ctcr;
+ u32 cbndtr;
+ u32 csar;
+ u32 cdar;
+ u32 cbrur;
+ u32 clar;
+ u32 ctbr;
+ u32 dummy;
+ u32 cmar;
+ u32 cmdr;
+} __aligned(64);
+
+struct stm32_mdma_desc {
+ struct virt_dma_desc vdesc;
+ u32 ccr;
+ struct stm32_mdma_hwdesc *hwdesc;
+ dma_addr_t hwdesc_phys;
+ bool cyclic;
+ u32 count;
+};
+
+struct stm32_mdma_chan {
+ struct virt_dma_chan vchan;
+ struct dma_pool *desc_pool;
+ u32 id;
+ struct stm32_mdma_desc *desc;
+ u32 curr_hwdesc;
+ struct dma_slave_config dma_config;
+ struct stm32_mdma_chan_config chan_config;
+ bool busy;
+ u32 mem_burst;
+ u32 mem_width;
+};
+
+struct stm32_mdma_device {
+ struct dma_device ddev;
+ void __iomem *base;
+ struct clk *clk;
+ int irq;
+ struct reset_control *rst;
+ u32 nr_channels;
+ u32 nr_requests;
+ u32 nr_ahb_addr_masks;
+ struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
+ u32 ahb_addr_masks[];
+};
+
+static struct stm32_mdma_device *stm32_mdma_get_dev(
+ struct stm32_mdma_chan *chan)
+{
+ return container_of(chan->vchan.chan.device, struct stm32_mdma_device,
+ ddev);
+}
+
+static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct stm32_mdma_chan, vchan.chan);
+}
+
+static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct stm32_mdma_desc, vdesc);
+}
+
+static struct device *chan2dev(struct stm32_mdma_chan *chan)
+{
+ return &chan->vchan.chan.dev->device;
+}
+
+static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev)
+{
+ return mdma_dev->ddev.dev;
+}
+
+static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg)
+{
+ return readl_relaxed(dmadev->base + reg);
+}
+
+static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val)
+{
+ writel_relaxed(val, dmadev->base + reg);
+}
+
+static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg,
+ u32 mask)
+{
+ void __iomem *addr = dmadev->base + reg;
+
+ writel_relaxed(readl_relaxed(addr) | mask, addr);
+}
+
+static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg,
+ u32 mask)
+{
+ void __iomem *addr = dmadev->base + reg;
+
+ writel_relaxed(readl_relaxed(addr) & ~mask, addr);
+}
+
+static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
+ struct stm32_mdma_chan *chan, u32 count)
+{
+ struct stm32_mdma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->hwdesc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
+ &desc->hwdesc_phys);
+ if (!desc->hwdesc) {
+ dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
+ kfree(desc);
+ return NULL;
+ }
+
+ desc->count = count;
+
+ return desc;
+}
+
+static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
+
+ dma_pool_free(chan->desc_pool, desc->hwdesc, desc->hwdesc_phys);
+ kfree(desc);
+}
+
+static int stm32_mdma_get_width(struct stm32_mdma_chan *chan,
+ enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ return ffs(width) - 1;
+ default:
+ dev_err(chan2dev(chan), "Dma bus width %i not supported\n",
+ width);
+ return -EINVAL;
+ }
+}
+
+static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
+ u32 buf_len, u32 tlen)
+{
+ enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ max_width > DMA_SLAVE_BUSWIDTH_1_BYTE;
+ max_width >>= 1) {
+ /*
+ * Address and buffer length both have to be aligned on
+ * bus width
+ */
+ if ((((buf_len | addr) & (max_width - 1)) == 0) &&
+ tlen >= max_width)
+ break;
+ }
+
+ return max_width;
+}
+
+static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
+ enum dma_slave_buswidth width)
+{
+ u32 best_burst = max_burst;
+ u32 burst_len = best_burst * width;
+
+ while ((burst_len > 0) && (tlen % burst_len)) {
+ best_burst = best_burst >> 1;
+ burst_len = best_burst * width;
+ }
+
+ return (best_burst > 0) ? best_burst : 1;
+}
+
+static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ u32 ccr, cisr, id, reg;
+ int ret;
+
+ id = chan->id;
+ reg = STM32_MDMA_CCR(id);
+
+ /* Disable interrupts */
+ stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK);
+
+ ccr = stm32_mdma_read(dmadev, reg);
+ if (ccr & STM32_MDMA_CCR_EN) {
+ stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN);
+
+ /* Ensure that any ongoing transfer has been completed */
+ ret = readl_relaxed_poll_timeout_atomic(
+ dmadev->base + STM32_MDMA_CISR(id), cisr,
+ (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000);
+ if (ret) {
+ dev_err(chan2dev(chan), "%s: timeout!\n", __func__);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void stm32_mdma_stop(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ u32 status;
+ int ret;
+
+ /* Disable DMA */
+ ret = stm32_mdma_disable_chan(chan);
+ if (ret < 0)
+ return;
+
+ /* Clear interrupt status if it is there */
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+ if (status) {
+ dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
+ __func__, status);
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
+ }
+
+ chan->busy = false;
+}
+
+static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr,
+ u32 ctbr_mask, u32 src_addr)
+{
+ u32 mask;
+ int i;
+
+ /* Check if memory device is on AHB or AXI */
+ *ctbr &= ~ctbr_mask;
+ mask = src_addr & 0xF0000000;
+ for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) {
+ if (mask == dmadev->ahb_addr_masks[i]) {
+ *ctbr |= ctbr_mask;
+ break;
+ }
+ }
+}
+
+static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+ enum dma_transfer_direction direction,
+ u32 *mdma_ccr, u32 *mdma_ctcr,
+ u32 *mdma_ctbr, dma_addr_t addr,
+ u32 buf_len)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
+ phys_addr_t src_addr, dst_addr;
+ int src_bus_width, dst_bus_width;
+ u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
+ u32 ccr, ctcr, ctbr, tlen;
+
+ src_addr_width = chan->dma_config.src_addr_width;
+ dst_addr_width = chan->dma_config.dst_addr_width;
+ src_maxburst = chan->dma_config.src_maxburst;
+ dst_maxburst = chan->dma_config.dst_maxburst;
+
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+
+ /* Enable HW request mode */
+ ctcr &= ~STM32_MDMA_CTCR_SWRM;
+
+ /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */
+ ctcr &= ~STM32_MDMA_CTCR_CFG_MASK;
+ ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK;
+
+ /*
+ * For buffer transfer length (TLEN) we have to set
+ * the number of bytes - 1 in CTCR register
+ */
+ tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr);
+ ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK;
+ ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
+
+ /* Disable Pack Enable */
+ ctcr &= ~STM32_MDMA_CTCR_PKE;
+
+ /* Check burst size constraints */
+ if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST ||
+ dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) {
+ dev_err(chan2dev(chan),
+ "burst size * bus width higher than %d bytes\n",
+ STM32_MDMA_MAX_BURST);
+ return -EINVAL;
+ }
+
+ if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) ||
+ (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) {
+ dev_err(chan2dev(chan), "burst size must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Configure channel control:
+ * - Clear SW request as in this case this is a HW one
+ * - Clear WEX, HEX and BEX bits
+ * - Set priority level
+ */
+ ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
+ STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK);
+ ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level);
+
+ /* Configure Trigger selection */
+ ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
+ ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dst_addr = chan->dma_config.dst_addr;
+
+ /* Set device data size */
+ dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
+ if (dst_bus_width < 0)
+ return dst_bus_width;
+ ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
+ ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
+
+ /* Set device burst value */
+ dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ dst_maxburst,
+ dst_addr_width);
+ chan->mem_burst = dst_best_burst;
+ ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
+
+ /* Set memory data size */
+ src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
+ chan->mem_width = src_addr_width;
+ src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
+ if (src_bus_width < 0)
+ return src_bus_width;
+ ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK |
+ STM32_MDMA_CTCR_SINCOS_MASK;
+ ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+ STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+ /* Set memory burst value */
+ src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
+ src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ src_maxburst,
+ src_addr_width);
+ chan->mem_burst = src_best_burst;
+ ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
+
+ /* Select bus */
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+ dst_addr);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Set destination address */
+ stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr);
+ break;
+
+ case DMA_DEV_TO_MEM:
+ src_addr = chan->dma_config.src_addr;
+
+ /* Set device data size */
+ src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
+ if (src_bus_width < 0)
+ return src_bus_width;
+ ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
+ ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
+
+ /* Set device burst value */
+ src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ src_maxburst,
+ src_addr_width);
+ ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
+
+ /* Set memory data size */
+ dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
+ chan->mem_width = dst_addr_width;
+ dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
+ if (dst_bus_width < 0)
+ return dst_bus_width;
+ ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK |
+ STM32_MDMA_CTCR_DINCOS_MASK);
+ ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+ STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+ /* Set memory burst value */
+ dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
+ dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ dst_maxburst,
+ dst_addr_width);
+ ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
+
+ /* Select bus */
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+ src_addr);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Set source address */
+ stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr);
+ break;
+
+ default:
+ dev_err(chan2dev(chan), "Dma direction is not supported\n");
+ return -EINVAL;
+ }
+
+ *mdma_ccr = ccr;
+ *mdma_ctcr = ctcr;
+ *mdma_ctbr = ctbr;
+
+ return 0;
+}
+
+static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_hwdesc *hwdesc)
+{
+ dev_dbg(chan2dev(chan), "hwdesc: 0x%p\n", hwdesc);
+ dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", hwdesc->ctcr);
+ dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", hwdesc->cbndtr);
+ dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", hwdesc->csar);
+ dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", hwdesc->cdar);
+ dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", hwdesc->cbrur);
+ dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", hwdesc->clar);
+ dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", hwdesc->ctbr);
+ dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", hwdesc->cmar);
+ dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", hwdesc->cmdr);
+}
+
+static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_desc *desc,
+ enum dma_transfer_direction dir, u32 count,
+ dma_addr_t src_addr, dma_addr_t dst_addr,
+ u32 len, u32 ctcr, u32 ctbr, bool is_last,
+ bool is_first, bool is_cyclic)
+{
+ struct stm32_mdma_chan_config *config = &chan->chan_config;
+ struct stm32_mdma_hwdesc *hwdesc;
+ u32 next = count + 1;
+
+ hwdesc = &desc->hwdesc[count];
+ hwdesc->ctcr = ctcr;
+ hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
+ STM32_MDMA_CBNDTR_BRDUM |
+ STM32_MDMA_CBNDTR_BRSUM |
+ STM32_MDMA_CBNDTR_BNDT_MASK);
+ hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
+ hwdesc->csar = src_addr;
+ hwdesc->cdar = dst_addr;
+ hwdesc->cbrur = 0;
+ hwdesc->clar = desc->hwdesc_phys + next * sizeof(*hwdesc);
+ hwdesc->ctbr = ctbr;
+ hwdesc->cmar = config->mask_addr;
+ hwdesc->cmdr = config->mask_data;
+
+ if (is_last) {
+ if (is_cyclic)
+ hwdesc->clar = desc->hwdesc_phys;
+ else
+ hwdesc->clar = 0;
+ }
+
+ stm32_mdma_dump_hwdesc(chan, hwdesc);
+}
+
+static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_desc *desc,
+ struct scatterlist *sgl, u32 sg_len,
+ enum dma_transfer_direction direction)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct dma_slave_config *dma_config = &chan->dma_config;
+ struct scatterlist *sg;
+ dma_addr_t src_addr, dst_addr;
+ u32 ccr, ctcr, ctbr;
+ int i, ret = 0;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
+ dev_err(chan2dev(chan), "Invalid block len\n");
+ return -EINVAL;
+ }
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = dma_config->dst_addr;
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
+ &ctcr, &ctbr, src_addr,
+ sg_dma_len(sg));
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+ src_addr);
+ } else {
+ src_addr = dma_config->src_addr;
+ dst_addr = sg_dma_address(sg);
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
+ &ctcr, &ctbr, dst_addr,
+ sg_dma_len(sg));
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+ dst_addr);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
+ dst_addr, sg_dma_len(sg), ctcr, ctbr,
+ i == sg_len - 1, i == 0, false);
+ }
+
+ /* Enable interrupts */
+ ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+ ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
+ if (sg_len > 1)
+ ccr |= STM32_MDMA_CCR_BTIE;
+ desc->ccr = ccr;
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
+ u32 sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_desc *desc;
+ int ret;
+
+ /*
+ * Once DMA is in setup cyclic mode the channel we cannot assign this
+ * channel anymore. The DMA channel needs to be aborted or terminated
+ * for allowing another request.
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dev_err(chan2dev(chan),
+ "Request not allowed when dma in cyclic mode\n");
+ return NULL;
+ }
+
+ desc = stm32_mdma_alloc_desc(chan, sg_len);
+ if (!desc)
+ return NULL;
+
+ ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction);
+ if (ret < 0)
+ goto xfer_setup_err;
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+xfer_setup_err:
+ dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+ kfree(desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct dma_slave_config *dma_config = &chan->dma_config;
+ struct stm32_mdma_desc *desc;
+ dma_addr_t src_addr, dst_addr;
+ u32 ccr, ctcr, ctbr, count;
+ int i, ret;
+
+ /*
+ * Once DMA is in setup cyclic mode the channel we cannot assign this
+ * channel anymore. The DMA channel needs to be aborted or terminated
+ * for allowing another request.
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dev_err(chan2dev(chan),
+ "Request not allowed when dma in cyclic mode\n");
+ return NULL;
+ }
+
+ if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) {
+ dev_err(chan2dev(chan), "Invalid buffer/period len\n");
+ return NULL;
+ }
+
+ if (buf_len % period_len) {
+ dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
+ return NULL;
+ }
+
+ count = buf_len / period_len;
+
+ desc = stm32_mdma_alloc_desc(chan, count);
+ if (!desc)
+ return NULL;
+
+ /* Select bus */
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = buf_addr;
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
+ &ctbr, src_addr, period_len);
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+ src_addr);
+ } else {
+ dst_addr = buf_addr;
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
+ &ctbr, dst_addr, period_len);
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+ dst_addr);
+ }
+
+ if (ret < 0)
+ goto xfer_setup_err;
+
+ /* Enable interrupts */
+ ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+ ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE;
+ desc->ccr = ccr;
+
+ /* Configure hwdesc list */
+ for (i = 0; i < count; i++) {
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = buf_addr + i * period_len;
+ dst_addr = dma_config->dst_addr;
+ } else {
+ src_addr = dma_config->src_addr;
+ dst_addr = buf_addr + i * period_len;
+ }
+
+ stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
+ dst_addr, period_len, ctcr, ctbr,
+ i == count - 1, i == 0, true);
+ }
+
+ desc->cyclic = true;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+xfer_setup_err:
+ dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+ kfree(desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ enum dma_slave_buswidth max_width;
+ struct stm32_mdma_desc *desc;
+ struct stm32_mdma_hwdesc *hwdesc;
+ u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst;
+ u32 best_burst, tlen;
+ size_t xfer_count, offset;
+ int src_bus_width, dst_bus_width;
+ int i;
+
+ /*
+ * Once DMA is in setup cyclic mode the channel we cannot assign this
+ * channel anymore. The DMA channel needs to be aborted or terminated
+ * to allow another request
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dev_err(chan2dev(chan),
+ "Request not allowed when dma in cyclic mode\n");
+ return NULL;
+ }
+
+ count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN);
+ desc = stm32_mdma_alloc_desc(chan, count);
+ if (!desc)
+ return NULL;
+
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+
+ /* Enable sw req, some interrupts and clear other bits */
+ ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
+ STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK |
+ STM32_MDMA_CCR_IRQ_MASK);
+ ccr |= STM32_MDMA_CCR_TEIE;
+
+ /* Enable SW request mode, dest/src inc and clear other bits */
+ ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK |
+ STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE |
+ STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK |
+ STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK |
+ STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK |
+ STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK |
+ STM32_MDMA_CTCR_SINC_MASK);
+ ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) |
+ STM32_MDMA_CTCR_DINC(STM32_MDMA_INC);
+
+ /* Reset HW request */
+ ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
+
+ /* Select bus */
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src);
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest);
+
+ /* Clear CBNDTR registers */
+ cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM |
+ STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK);
+
+ if (len <= STM32_MDMA_MAX_BLOCK_LEN) {
+ cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
+ if (len <= STM32_MDMA_MAX_BUF_LEN) {
+ /* Setup a buffer transfer */
+ ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE;
+ ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER);
+ } else {
+ /* Setup a block transfer */
+ ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
+ ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK);
+ }
+
+ tlen = STM32_MDMA_MAX_BUF_LEN;
+ ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
+
+ /* Set source best burst size */
+ max_width = stm32_mdma_get_max_width(src, len, tlen);
+ src_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
+ STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+ STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+ /* Set destination best burst size */
+ max_width = stm32_mdma_get_max_width(dest, len, tlen);
+ dst_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
+ STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+ STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Prepare hardware descriptor */
+ hwdesc = desc->hwdesc;
+ hwdesc->ctcr = ctcr;
+ hwdesc->cbndtr = cbndtr;
+ hwdesc->csar = src;
+ hwdesc->cdar = dest;
+ hwdesc->cbrur = 0;
+ hwdesc->clar = 0;
+ hwdesc->ctbr = ctbr;
+ hwdesc->cmar = 0;
+ hwdesc->cmdr = 0;
+
+ stm32_mdma_dump_hwdesc(chan, hwdesc);
+ } else {
+ /* Setup a LLI transfer */
+ ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
+ STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1));
+ ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
+ tlen = STM32_MDMA_MAX_BUF_LEN;
+
+ for (i = 0, offset = 0; offset < len;
+ i++, offset += xfer_count) {
+ xfer_count = min_t(size_t, len - offset,
+ STM32_MDMA_MAX_BLOCK_LEN);
+
+ /* Set source best burst size */
+ max_width = stm32_mdma_get_max_width(src, len, tlen);
+ src_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen,
+ max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
+ STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+ STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+ /* Set destination best burst size */
+ max_width = stm32_mdma_get_max_width(dest, len, tlen);
+ dst_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen,
+ max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
+ STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+ STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Prepare hardware descriptor */
+ stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i,
+ src + offset, dest + offset,
+ xfer_count, ctcr, ctbr,
+ i == count - 1, i == 0, false);
+ }
+ }
+
+ desc->ccr = ccr;
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+
+ dev_dbg(chan2dev(chan), "CCR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)));
+ dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)));
+ dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)));
+ dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id)));
+ dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)));
+ dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id)));
+}
+
+static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct virt_dma_desc *vdesc;
+ struct stm32_mdma_hwdesc *hwdesc;
+ u32 id = chan->id;
+ u32 status, reg;
+
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc) {
+ chan->desc = NULL;
+ return;
+ }
+
+ chan->desc = to_stm32_mdma_desc(vdesc);
+ hwdesc = chan->desc->hwdesc;
+ chan->curr_hwdesc = 0;
+
+ stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur);
+ stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr);
+
+ /* Clear interrupt status if it is there */
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
+ if (status)
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status);
+
+ stm32_mdma_dump_reg(chan);
+
+ /* Start DMA */
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN);
+
+ /* Set SW request in case of MEM2MEM transfer */
+ if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) {
+ reg = STM32_MDMA_CCR(id);
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
+ }
+
+ chan->busy = true;
+
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
+}
+
+static void stm32_mdma_issue_pending(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (!vchan_issue_pending(&chan->vchan))
+ goto end;
+
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+
+ if (!chan->desc && !chan->busy)
+ stm32_mdma_start_transfer(chan);
+
+end:
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static int stm32_mdma_pause(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ ret = stm32_mdma_disable_chan(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ if (!ret)
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
+
+ return ret;
+}
+
+static int stm32_mdma_resume(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct stm32_mdma_hwdesc *hwdesc;
+ unsigned long flags;
+ u32 status, reg;
+
+ hwdesc = &chan->desc->hwdesc[chan->curr_hwdesc];
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ /* Re-configure control register */
+ stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr);
+
+ /* Clear interrupt status if it is there */
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+ if (status)
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
+
+ stm32_mdma_dump_reg(chan);
+
+ /* Re-start DMA */
+ reg = STM32_MDMA_CCR(chan->id);
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN);
+
+ /* Set SW request in case of MEM2MEM transfer */
+ if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM)
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
+
+ return 0;
+}
+
+static int stm32_mdma_terminate_all(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (chan->busy) {
+ stm32_mdma_stop(chan);
+ chan->desc = NULL;
+ }
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+
+ return 0;
+}
+
+static void stm32_mdma_synchronize(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+
+ vchan_synchronize(&chan->vchan);
+}
+
+static int stm32_mdma_slave_config(struct dma_chan *c,
+ struct dma_slave_config *config)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+
+ memcpy(&chan->dma_config, config, sizeof(*config));
+
+ return 0;
+}
+
+static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_desc *desc,
+ u32 curr_hwdesc)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ u32 cbndtr, residue, modulo, burst_size;
+ int i;
+
+ residue = 0;
+ for (i = curr_hwdesc + 1; i < desc->count; i++) {
+ struct stm32_mdma_hwdesc *hwdesc = &desc->hwdesc[i];
+
+ residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
+ }
+ cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+ residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+
+ if (!chan->mem_burst)
+ return residue;
+
+ burst_size = chan->mem_burst * chan->mem_width;
+ modulo = residue % burst_size;
+ if (modulo)
+ residue = residue - modulo + burst_size;
+
+ return residue;
+}
+
+static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+ u32 residue = 0;
+
+ status = dma_cookie_status(c, cookie, state);
+ if ((status == DMA_COMPLETE) || (!state))
+ return status;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ vdesc = vchan_find_desc(&chan->vchan, cookie);
+ if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
+ residue = stm32_mdma_desc_residue(chan, chan->desc,
+ chan->curr_hwdesc);
+ else if (vdesc)
+ residue = stm32_mdma_desc_residue(chan,
+ to_stm32_mdma_desc(vdesc), 0);
+ dma_set_residue(state, residue);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return status;
+}
+
+static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
+{
+ list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
+ chan->busy = false;
+
+ /* Start the next transfer if this driver has a next desc */
+ stm32_mdma_start_transfer(chan);
+}
+
+static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
+{
+ struct stm32_mdma_device *dmadev = devid;
+ struct stm32_mdma_chan *chan = devid;
+ u32 reg, id, ien, status, flag;
+
+ /* Find out which channel generates the interrupt */
+ status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
+ if (status) {
+ id = __ffs(status);
+ } else {
+ status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
+ if (!status) {
+ dev_dbg(mdma2dev(dmadev), "spurious it\n");
+ return IRQ_NONE;
+ }
+ id = __ffs(status);
+ /*
+ * As GISR0 provides status for channel id from 0 to 31,
+ * so GISR1 provides status for channel id from 32 to 62
+ */
+ id += 32;
+ }
+
+ chan = &dmadev->chan[id];
+ if (!chan) {
+ dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+ goto exit;
+ }
+
+ /* Handle interrupt for the channel */
+ spin_lock(&chan->vchan.lock);
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+ ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ien &= STM32_MDMA_CCR_IRQ_MASK;
+ ien >>= 1;
+
+ if (!(status & ien)) {
+ spin_unlock(&chan->vchan.lock);
+ dev_dbg(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n",
+ status, ien);
+ return IRQ_NONE;
+ }
+
+ flag = __ffs(status & ien);
+ reg = STM32_MDMA_CIFCR(chan->id);
+
+ switch (1 << flag) {
+ case STM32_MDMA_CISR_TEIF:
+ id = chan->id;
+ status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id));
+ dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status);
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
+ break;
+
+ case STM32_MDMA_CISR_CTCIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
+ stm32_mdma_xfer_end(chan);
+ break;
+
+ case STM32_MDMA_CISR_BRTIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
+ break;
+
+ case STM32_MDMA_CISR_BTIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
+ chan->curr_hwdesc++;
+ if (chan->desc && chan->desc->cyclic) {
+ if (chan->curr_hwdesc == chan->desc->count)
+ chan->curr_hwdesc = 0;
+ vchan_cyclic_callback(&chan->desc->vdesc);
+ }
+ break;
+
+ case STM32_MDMA_CISR_TCIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
+ break;
+
+ default:
+ dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n",
+ 1 << flag, status);
+ }
+
+ spin_unlock(&chan->vchan.lock);
+
+exit:
+ return IRQ_HANDLED;
+}
+
+static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ int ret;
+
+ chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device),
+ c->device->dev,
+ sizeof(struct stm32_mdma_hwdesc),
+ __alignof__(struct stm32_mdma_hwdesc),
+ 0);
+ if (!chan->desc_pool) {
+ dev_err(chan2dev(chan), "failed to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret < 0) {
+ dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_mdma_disable_chan(chan);
+ if (ret < 0)
+ clk_disable_unprepare(dmadev->clk);
+
+ return ret;
+}
+
+static void stm32_mdma_free_chan_resources(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
+
+ if (chan->busy) {
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ stm32_mdma_stop(chan);
+ chan->desc = NULL;
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ }
+
+ clk_disable_unprepare(dmadev->clk);
+ vchan_free_chan_resources(to_virt_chan(c));
+ dmam_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
+ struct stm32_mdma_chan *chan;
+ struct dma_chan *c;
+ struct stm32_mdma_chan_config config;
+
+ if (dma_spec->args_count < 5) {
+ dev_err(mdma2dev(dmadev), "Bad number of args\n");
+ return NULL;
+ }
+
+ config.request = dma_spec->args[0];
+ config.priority_level = dma_spec->args[1];
+ config.transfer_config = dma_spec->args[2];
+ config.mask_addr = dma_spec->args[3];
+ config.mask_data = dma_spec->args[4];
+
+ if (config.request >= dmadev->nr_requests) {
+ dev_err(mdma2dev(dmadev), "Bad request line\n");
+ return NULL;
+ }
+
+ if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) {
+ dev_err(mdma2dev(dmadev), "Priority level not supported\n");
+ return NULL;
+ }
+
+ c = dma_get_any_slave_channel(&dmadev->ddev);
+ if (!c) {
+ dev_err(mdma2dev(dmadev), "No more channel avalaible\n");
+ return NULL;
+ }
+
+ chan = to_stm32_mdma_chan(c);
+ chan->chan_config = config;
+
+ return c;
+}
+
+static const struct of_device_id stm32_mdma_of_match[] = {
+ { .compatible = "st,stm32h7-mdma", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_mdma_of_match);
+
+static int stm32_mdma_probe(struct platform_device *pdev)
+{
+ struct stm32_mdma_chan *chan;
+ struct stm32_mdma_device *dmadev;
+ struct dma_device *dd;
+ struct device_node *of_node;
+ struct resource *res;
+ u32 nr_channels, nr_requests;
+ int i, count, ret;
+
+ of_node = pdev->dev.of_node;
+ if (!of_node)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "dma-channels",
+ &nr_channels);
+ if (ret) {
+ nr_channels = STM32_MDMA_MAX_CHANNELS;
+ dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n",
+ nr_channels);
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "dma-requests",
+ &nr_requests);
+ if (ret) {
+ nr_requests = STM32_MDMA_MAX_REQUESTS;
+ dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n",
+ nr_requests);
+ }
+
+ count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+ NULL, 0);
+ if (count < 0)
+ count = 0;
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count,
+ GFP_KERNEL);
+ if (!dmadev)
+ return -ENOMEM;
+
+ dmadev->nr_channels = nr_channels;
+ dmadev->nr_requests = nr_requests;
+ device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+ dmadev->ahb_addr_masks,
+ count);
+ dmadev->nr_ahb_addr_masks = count;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dmadev->base))
+ return PTR_ERR(dmadev->base);
+
+ dmadev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dmadev->clk)) {
+ ret = PTR_ERR(dmadev->clk);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&pdev->dev, "Missing controller clock\n");
+ return ret;
+ }
+
+ dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(dmadev->rst)) {
+ reset_control_assert(dmadev->rst);
+ udelay(2);
+ reset_control_deassert(dmadev->rst);
+ }
+
+ dd = &dmadev->ddev;
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+ dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
+ dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
+ dd->device_tx_status = stm32_mdma_tx_status;
+ dd->device_issue_pending = stm32_mdma_issue_pending;
+ dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
+ dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
+ dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
+ dd->device_config = stm32_mdma_slave_config;
+ dd->device_pause = stm32_mdma_pause;
+ dd->device_resume = stm32_mdma_resume;
+ dd->device_terminate_all = stm32_mdma_terminate_all;
+ dd->device_synchronize = stm32_mdma_synchronize;
+ dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_MEM_TO_MEM);
+ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dd->max_burst = STM32_MDMA_MAX_BURST;
+ dd->dev = &pdev->dev;
+ INIT_LIST_HEAD(&dd->channels);
+
+ for (i = 0; i < dmadev->nr_channels; i++) {
+ chan = &dmadev->chan[i];
+ chan->id = i;
+ chan->vchan.desc_free = stm32_mdma_desc_free;
+ vchan_init(&chan->vchan, dd);
+ }
+
+ dmadev->irq = platform_get_irq(pdev, 0);
+ if (dmadev->irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return dmadev->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
+ 0, dev_name(&pdev->dev), dmadev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(dd);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "STM32 MDMA DMA OF registration failed %d\n", ret);
+ goto err_unregister;
+ }
+
+ platform_set_drvdata(pdev, dmadev);
+
+ dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
+
+ return 0;
+
+err_unregister:
+ dma_async_device_unregister(dd);
+
+ return ret;
+}
+
+static struct platform_driver stm32_mdma_driver = {
+ .probe = stm32_mdma_probe,
+ .driver = {
+ .name = "stm32-mdma",
+ .of_match_table = stm32_mdma_of_match,
+ },
+};
+
+static int __init stm32_mdma_init(void)
+{
+ return platform_driver_register(&stm32_mdma_driver);
+}
+
+subsys_initcall(stm32_mdma_init);
+
+MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index bcd496edc70f..0cd13f17fc11 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -42,12 +42,18 @@
#define DMA_STAT 0x30
+/* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */
+#define DMA_MAX_CHANNELS (DMA_IRQ_CHAN_NR * 0x10 / 4)
+
/*
* sun8i specific registers
*/
#define SUN8I_DMA_GATE 0x20
#define SUN8I_DMA_GATE_ENABLE 0x4
+#define SUNXI_H3_SECURE_REG 0x20
+#define SUNXI_H3_DMA_GATE 0x28
+#define SUNXI_H3_DMA_GATE_ENABLE 0x4
/*
* Channels specific registers
*/
@@ -62,16 +68,19 @@
#define DMA_CHAN_LLI_ADDR 0x08
#define DMA_CHAN_CUR_CFG 0x0c
-#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f)
+#define DMA_CHAN_MAX_DRQ 0x1f
+#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ)
#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
-#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7)
+#define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7)
+#define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6)
#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
-#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16)
+#define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16)
+#define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16)
#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
#define DMA_CHAN_CUR_SRC 0x10
@@ -90,6 +99,9 @@
#define NORMAL_WAIT 8
#define DRQ_SDRAM 1
+/* forward declaration */
+struct sun6i_dma_dev;
+
/*
* Hardware channels / ports representation
*
@@ -111,7 +123,12 @@ struct sun6i_dma_config {
* however these SoCs really have and need this bit, as seen in the
* BSP kernel source code.
*/
- bool gate_needed;
+ void (*clock_autogate_enable)(struct sun6i_dma_dev *);
+ void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst);
+ u32 src_burst_lengths;
+ u32 dst_burst_lengths;
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
};
/*
@@ -175,6 +192,9 @@ struct sun6i_dma_dev {
struct sun6i_pchan *pchans;
struct sun6i_vchan *vchans;
const struct sun6i_dma_config *cfg;
+ u32 num_pchans;
+ u32 num_vchans;
+ u32 max_request;
};
static struct device *chan2dev(struct dma_chan *chan)
@@ -251,8 +271,12 @@ static inline s8 convert_burst(u32 maxburst)
switch (maxburst) {
case 1:
return 0;
+ case 4:
+ return 1;
case 8:
return 2;
+ case 16:
+ return 3;
default:
return -EINVAL;
}
@@ -260,11 +284,29 @@ static inline s8 convert_burst(u32 maxburst)
static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width)
{
- if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) ||
- (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
- return -EINVAL;
+ return ilog2(addr_width);
+}
+
+static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev)
+{
+ writel(SUN8I_DMA_GATE_ENABLE, sdev->base + SUN8I_DMA_GATE);
+}
+
+static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev)
+{
+ writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE);
+}
- return addr_width >> 1;
+static void sun6i_set_burst_length_a31(u32 *p_cfg, s8 src_burst, s8 dst_burst)
+{
+ *p_cfg |= DMA_CHAN_CFG_SRC_BURST_A31(src_burst) |
+ DMA_CHAN_CFG_DST_BURST_A31(dst_burst);
+}
+
+static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst)
+{
+ *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) |
+ DMA_CHAN_CFG_DST_BURST_H3(dst_burst);
}
static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
@@ -399,7 +441,6 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
static void sun6i_dma_tasklet(unsigned long data)
{
struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
- const struct sun6i_dma_config *cfg = sdev->cfg;
struct sun6i_vchan *vchan;
struct sun6i_pchan *pchan;
unsigned int pchan_alloc = 0;
@@ -427,7 +468,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_lock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) {
pchan = &sdev->pchans[pchan_idx];
if (pchan->vchan || list_empty(&sdev->pending))
@@ -448,7 +489,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_unlock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) {
if (!(pchan_alloc & BIT(pchan_idx)))
continue;
@@ -470,7 +511,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
int i, j, ret = IRQ_NONE;
u32 status;
- for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) {
+ for (i = 0; i < sdev->num_pchans / DMA_IRQ_CHAN_NR; i++) {
status = readl(sdev->base + DMA_IRQ_STAT(i));
if (!status)
continue;
@@ -510,47 +551,49 @@ static int set_config(struct sun6i_dma_dev *sdev,
enum dma_transfer_direction direction,
u32 *p_cfg)
{
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
+ u32 src_maxburst, dst_maxburst;
s8 src_width, dst_width, src_burst, dst_burst;
+ src_addr_width = sconfig->src_addr_width;
+ dst_addr_width = sconfig->dst_addr_width;
+ src_maxburst = sconfig->src_maxburst;
+ dst_maxburst = sconfig->dst_maxburst;
+
switch (direction) {
case DMA_MEM_TO_DEV:
- src_burst = convert_burst(sconfig->src_maxburst ?
- sconfig->src_maxburst : 8);
- src_width = convert_buswidth(sconfig->src_addr_width !=
- DMA_SLAVE_BUSWIDTH_UNDEFINED ?
- sconfig->src_addr_width :
- DMA_SLAVE_BUSWIDTH_4_BYTES);
- dst_burst = convert_burst(sconfig->dst_maxburst);
- dst_width = convert_buswidth(sconfig->dst_addr_width);
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ src_maxburst = src_maxburst ? src_maxburst : 8;
break;
case DMA_DEV_TO_MEM:
- src_burst = convert_burst(sconfig->src_maxburst);
- src_width = convert_buswidth(sconfig->src_addr_width);
- dst_burst = convert_burst(sconfig->dst_maxburst ?
- sconfig->dst_maxburst : 8);
- dst_width = convert_buswidth(sconfig->dst_addr_width !=
- DMA_SLAVE_BUSWIDTH_UNDEFINED ?
- sconfig->dst_addr_width :
- DMA_SLAVE_BUSWIDTH_4_BYTES);
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dst_maxburst = dst_maxburst ? dst_maxburst : 8;
break;
default:
return -EINVAL;
}
- if (src_burst < 0)
- return src_burst;
- if (src_width < 0)
- return src_width;
- if (dst_burst < 0)
- return dst_burst;
- if (dst_width < 0)
- return dst_width;
-
- *p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
- DMA_CHAN_CFG_SRC_WIDTH(src_width) |
- DMA_CHAN_CFG_DST_BURST(dst_burst) |
+ if (!(BIT(src_addr_width) & sdev->slave.src_addr_widths))
+ return -EINVAL;
+ if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths))
+ return -EINVAL;
+ if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths))
+ return -EINVAL;
+ if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths))
+ return -EINVAL;
+
+ src_width = convert_buswidth(src_addr_width);
+ dst_width = convert_buswidth(dst_addr_width);
+ dst_burst = convert_burst(dst_maxburst);
+ src_burst = convert_burst(src_maxburst);
+
+ *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) |
DMA_CHAN_CFG_DST_WIDTH(dst_width);
+ sdev->cfg->set_burst_length(p_cfg, src_burst, dst_burst);
+
return 0;
}
@@ -593,11 +636,11 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_LINEAR_MODE |
DMA_CHAN_CFG_SRC_LINEAR_MODE |
- DMA_CHAN_CFG_SRC_BURST(burst) |
DMA_CHAN_CFG_SRC_WIDTH(width) |
- DMA_CHAN_CFG_DST_BURST(burst) |
DMA_CHAN_CFG_DST_WIDTH(width);
+ sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst);
+
sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
sun6i_dma_dump_lli(vchan, v_lli);
@@ -948,7 +991,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
u8 port = dma_spec->args[0];
- if (port > sdev->cfg->nr_max_requests)
+ if (port > sdev->max_request)
return NULL;
chan = dma_get_any_slave_channel(&sdev->slave);
@@ -981,7 +1024,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
{
int i;
- for (i = 0; i < sdev->cfg->nr_max_vchans; i++) {
+ for (i = 0; i < sdev->num_vchans; i++) {
struct sun6i_vchan *vchan = &sdev->vchans[i];
list_del(&vchan->vc.chan.device_node);
@@ -1009,6 +1052,15 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = {
.nr_max_channels = 16,
.nr_max_requests = 30,
.nr_max_vchans = 53,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
/*
@@ -1020,24 +1072,76 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 24,
.nr_max_vchans = 37,
- .gate_needed = true,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 28,
.nr_max_vchans = 39,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
/*
* The H3 has 12 physical channels, a maximum DRQ port id of 27,
* and a total of 34 usable source and destination endpoints.
+ * It also supports additional burst lengths and bus widths,
+ * and the burst length fields have different offsets.
*/
static struct sun6i_dma_config sun8i_h3_dma_cfg = {
.nr_max_channels = 12,
.nr_max_requests = 27,
.nr_max_vchans = 34,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
+ .set_burst_length = sun6i_set_burst_length_h3,
+ .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+};
+
+/*
+ * The A64 binding uses the number of dma channels from the
+ * device tree node.
+ */
+static struct sun6i_dma_config sun50i_a64_dma_cfg = {
+ .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
+ .set_burst_length = sun6i_set_burst_length_h3,
+ .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
};
/*
@@ -1049,7 +1153,16 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 23,
.nr_max_vchans = 24,
- .gate_needed = true,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
static const struct of_device_id sun6i_dma_match[] = {
@@ -1058,13 +1171,14 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
+ { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun6i_dma_match);
static int sun6i_dma_probe(struct platform_device *pdev)
{
- const struct of_device_id *device;
+ struct device_node *np = pdev->dev.of_node;
struct sun6i_dma_dev *sdc;
struct resource *res;
int ret, i;
@@ -1073,10 +1187,9 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc)
return -ENOMEM;
- device = of_match_device(sun6i_dma_match, &pdev->dev);
- if (!device)
+ sdc->cfg = of_device_get_match_data(&pdev->dev);
+ if (!sdc->cfg)
return -ENODEV;
- sdc->cfg = device->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdc->base = devm_ioremap_resource(&pdev->dev, res);
@@ -1129,37 +1242,57 @@ static int sun6i_dma_probe(struct platform_device *pdev)
sdc->slave.device_pause = sun6i_dma_pause;
sdc->slave.device_resume = sun6i_dma_resume;
sdc->slave.device_terminate_all = sun6i_dma_terminate_all;
- sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ sdc->slave.src_addr_widths = sdc->cfg->src_addr_widths;
+ sdc->slave.dst_addr_widths = sdc->cfg->dst_addr_widths;
sdc->slave.directions = BIT(DMA_DEV_TO_MEM) |
BIT(DMA_MEM_TO_DEV);
sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
sdc->slave.dev = &pdev->dev;
- sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
+ sdc->num_pchans = sdc->cfg->nr_max_channels;
+ sdc->num_vchans = sdc->cfg->nr_max_vchans;
+ sdc->max_request = sdc->cfg->nr_max_requests;
+
+ ret = of_property_read_u32(np, "dma-channels", &sdc->num_pchans);
+ if (ret && !sdc->num_pchans) {
+ dev_err(&pdev->dev, "Can't get dma-channels.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "dma-requests", &sdc->max_request);
+ if (ret && !sdc->max_request) {
+ dev_info(&pdev->dev, "Missing dma-requests, using %u.\n",
+ DMA_CHAN_MAX_DRQ);
+ sdc->max_request = DMA_CHAN_MAX_DRQ;
+ }
+
+ /*
+ * If the number of vchans is not specified, derive it from the
+ * highest port number, at most one channel per port and direction.
+ */
+ if (!sdc->num_vchans)
+ sdc->num_vchans = 2 * (sdc->max_request + 1);
+
+ sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans,
sizeof(struct sun6i_pchan), GFP_KERNEL);
if (!sdc->pchans)
return -ENOMEM;
- sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans,
+ sdc->vchans = devm_kcalloc(&pdev->dev, sdc->num_vchans,
sizeof(struct sun6i_vchan), GFP_KERNEL);
if (!sdc->vchans)
return -ENOMEM;
tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
- for (i = 0; i < sdc->cfg->nr_max_channels; i++) {
+ for (i = 0; i < sdc->num_pchans; i++) {
struct sun6i_pchan *pchan = &sdc->pchans[i];
pchan->idx = i;
pchan->base = sdc->base + 0x100 + i * 0x40;
}
- for (i = 0; i < sdc->cfg->nr_max_vchans; i++) {
+ for (i = 0; i < sdc->num_vchans; i++) {
struct sun6i_vchan *vchan = &sdc->vchans[i];
INIT_LIST_HEAD(&vchan->node);
@@ -1199,8 +1332,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
goto err_dma_unregister;
}
- if (sdc->cfg->gate_needed)
- writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
+ if (sdc->cfg->clock_autogate_enable)
+ sdc->cfg->clock_autogate_enable(sdc);
return 0;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index f1d04b70ee67..7df910e7c348 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -49,12 +49,12 @@ struct ti_am335x_xbar_data {
struct ti_am335x_xbar_map {
u16 dma_line;
- u16 mux_val;
+ u8 mux_val;
};
-static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
+static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val & 0x1f, iomem + event);
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
@@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
}
map->dma_line = (u16)dma_spec->args[0];
- map->mux_val = (u16)dma_spec->args[2];
+ map->mux_val = (u8)dma_spec->args[2];
dma_spec->args[2] = 0;
dma_spec->args_count = 2;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 8722bcba489d..5eef13380ca8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -366,6 +366,20 @@ struct xilinx_dma_chan {
u16 tdest;
};
+/**
+ * enum xdma_ip_type: DMA IP type.
+ *
+ * XDMA_TYPE_AXIDMA: Axi dma ip.
+ * XDMA_TYPE_CDMA: Axi cdma ip.
+ * XDMA_TYPE_VDMA: Axi vdma ip.
+ *
+ */
+enum xdma_ip_type {
+ XDMA_TYPE_AXIDMA = 0,
+ XDMA_TYPE_CDMA,
+ XDMA_TYPE_VDMA,
+};
+
struct xilinx_dma_config {
enum xdma_ip_type dmatype;
int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 346c4987b284..11d6419788c2 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -175,11 +175,11 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
/*
* To trigger the error, we need to read the data back
* (the data was written with errors above).
- * The ACCESS_ONCE macros and printk are used to prevent the
+ * The READ_ONCE macros and printk are used to prevent the
* the compiler optimizing these reads out.
*/
- reg = ACCESS_ONCE(ptemp[0]);
- read_reg = ACCESS_ONCE(ptemp[1]);
+ reg = READ_ONCE(ptemp[0]);
+ read_reg = READ_ONCE(ptemp[1]);
/* Force Read */
rmb();
@@ -618,7 +618,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
/* Read data so we're in the correct state */
rmb();
- if (ACCESS_ONCE(ptemp[i]))
+ if (READ_ONCE(ptemp[i]))
result = -1;
/* Toggle Error bit (it is latched), leave ECC enabled */
writel(error_mask, (drvdata->base + priv->set_err_ofst));
@@ -635,7 +635,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
/* Read out written data. ECC error caused here */
for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
- if (ACCESS_ONCE(ptemp[i]) != i)
+ if (READ_ONCE(ptemp[i]) != i)
edac_printk(KERN_ERR, EDAC_DEVICE,
"Read doesn't match written data\n");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ac2f30295efe..8b16ec595fa7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3434,9 +3434,14 @@ MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
static int __init amd64_edac_init(void)
{
+ const char *owner;
int err = -ENODEV;
int i;
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
if (!x86_match_cpu(amd64_cpuids))
return -ENODEV;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 480072139b7a..48193f5f3b56 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -53,7 +53,7 @@ static LIST_HEAD(mc_devices);
* Used to lock EDAC MC to just one module, avoiding two drivers e. g.
* apei/ghes and i7core_edac to be used at the same time.
*/
-static void const *edac_mc_owner;
+static const char *edac_mc_owner;
static struct bus_type mc_bus[EDAC_MAX_MCS];
@@ -701,6 +701,11 @@ unlock:
}
EXPORT_SYMBOL(edac_mc_find);
+const char *edac_get_owner(void)
+{
+ return edac_mc_owner;
+}
+EXPORT_SYMBOL_GPL(edac_get_owner);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 5357800e418d..4165e15995ad 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -128,6 +128,14 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
unsigned sz_pvt);
/**
+ * edac_get_owner - Return the owner's mod_name of EDAC MC
+ *
+ * Returns:
+ * Pointer to mod_name string when EDAC MC is owned. NULL otherwise.
+ */
+extern const char *edac_get_owner(void);
+
+/*
* edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci
* global list and create sysfs entries associated with @mci structure.
*
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e4fcfa84fbd3..c70ea82c815c 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -50,7 +50,7 @@ int edac_mc_get_poll_msec(void)
return edac_mc_poll_msec;
}
-static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
+static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
{
unsigned long l;
int ret;
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 172598a27d7d..32a931d0cb71 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -19,7 +19,8 @@
#ifdef CONFIG_EDAC_DEBUG
-static int edac_set_debug_level(const char *buf, struct kernel_param *kp)
+static int edac_set_debug_level(const char *buf,
+ const struct kernel_param *kp)
{
unsigned long val;
int ret;
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 6f80eb65c26c..68b6ee18bea6 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -28,10 +28,19 @@ struct ghes_edac_pvt {
char msg[80];
};
-static LIST_HEAD(ghes_reglist);
-static DEFINE_MUTEX(ghes_edac_lock);
-static int ghes_edac_mc_num;
+static atomic_t ghes_init = ATOMIC_INIT(0);
+static struct ghes_edac_pvt *ghes_pvt;
+/*
+ * Sync with other, potentially concurrent callers of
+ * ghes_edac_report_mem_error(). We don't know what the
+ * "inventive" firmware would do.
+ */
+static DEFINE_SPINLOCK(ghes_lock);
+
+/* "ghes_edac.force_load=1" skips the platform check */
+static bool __read_mostly force_load;
+module_param(force_load, bool, 0);
/* Memory Device - Type 17 of SMBIOS spec */
struct memdev_dmi_entry {
@@ -169,18 +178,26 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt = NULL;
+ struct ghes_edac_pvt *pvt = ghes_pvt;
+ unsigned long flags;
char *p;
u8 grain_bits;
- list_for_each_entry(pvt, &ghes_reglist, list) {
- if (ghes == pvt->ghes)
- break;
- }
if (!pvt) {
pr_err("Internal error: Can't find EDAC structure\n");
return;
}
+
+ /*
+ * We can do the locking below because GHES defers error processing
+ * from NMI to IRQ context. Whenever that changes, we'd at least
+ * know.
+ */
+ if (WARN_ON_ONCE(in_nmi()))
+ return;
+
+ spin_lock_irqsave(&ghes_lock, flags);
+
mci = pvt->mci;
e = &mci->error_desc;
@@ -398,10 +415,17 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
(e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
grain_bits, e->syndrome, pvt->detail_location);
- /* Report the error via EDAC API */
edac_raw_mc_handle_error(type, mci, e);
+ spin_unlock_irqrestore(&ghes_lock, flags);
}
-EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
+
+/*
+ * Known systems that are safe to enable this module.
+ */
+static struct acpi_platform_list plat_list[] = {
+ {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions},
+ { } /* End */
+};
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
@@ -409,8 +433,19 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
int rc, num_dimm = 0;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[1];
- struct ghes_edac_pvt *pvt;
struct ghes_edac_dimm_fill dimm_fill;
+ int idx;
+
+ /* Check if safe to enable on this system */
+ idx = acpi_match_platform_list(plat_list);
+ if (!force_load && idx < 0)
+ return 0;
+
+ /*
+ * We have only one logical memory controller to which all DIMMs belong.
+ */
+ if (atomic_inc_return(&ghes_init) > 1)
+ return 0;
/* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm);
@@ -425,26 +460,17 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
layers[0].size = num_dimm;
layers[0].is_virt_csrow = true;
- /*
- * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
- * to avoid duplicated memory controller numbers
- */
- mutex_lock(&ghes_edac_lock);
- mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
- sizeof(*pvt));
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
- mutex_unlock(&ghes_edac_lock);
return -ENOMEM;
}
- pvt = mci->pvt_info;
- memset(pvt, 0, sizeof(*pvt));
- list_add_tail(&pvt->list, &ghes_reglist);
- pvt->ghes = ghes;
- pvt->mci = mci;
- mci->pdev = dev;
+ ghes_pvt = mci->pvt_info;
+ ghes_pvt->ghes = ghes;
+ ghes_pvt->mci = mci;
+ mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
@@ -452,36 +478,23 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci->ctl_name = "ghes_edac";
mci->dev_name = "ghes";
- if (!ghes_edac_mc_num) {
- if (!fake) {
- pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
- pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
- pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
- pr_info("If you find incorrect reports, please contact your hardware vendor\n");
- pr_info("to correct its BIOS.\n");
- pr_info("This system has %d DIMM sockets.\n",
- num_dimm);
- } else {
- pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
- pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
- pr_info("work on such system. Use this driver with caution\n");
- }
+ if (fake) {
+ pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
+ pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
+ pr_info("work on such system. Use this driver with caution\n");
+ } else if (idx < 0) {
+ pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
+ pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
+ pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
+ pr_info("If you find incorrect reports, please contact your hardware vendor\n");
+ pr_info("to correct its BIOS.\n");
+ pr_info("This system has %d DIMM sockets.\n", num_dimm);
}
if (!fake) {
- /*
- * Fill DIMM info from DMI for the memory controller #0
- *
- * Keep it in blank for the other memory controllers, as
- * there's no reliable way to properly credit each DIMM to
- * the memory controller, as different BIOSes fill the
- * DMI bank location fields on different ways
- */
- if (!ghes_edac_mc_num) {
- dimm_fill.count = 0;
- dimm_fill.mci = mci;
- dmi_walk(ghes_edac_dmidecode, &dimm_fill);
- }
+ dimm_fill.count = 0;
+ dimm_fill.mci = mci;
+ dmi_walk(ghes_edac_dmidecode, &dimm_fill);
} else {
struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
mci->n_layers, 0, 0, 0);
@@ -497,28 +510,16 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) {
pr_info("Can't register at EDAC core\n");
edac_mc_free(mci);
- mutex_unlock(&ghes_edac_lock);
return -ENODEV;
}
-
- ghes_edac_mc_num++;
- mutex_unlock(&ghes_edac_lock);
return 0;
}
-EXPORT_SYMBOL_GPL(ghes_edac_register);
void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt, *tmp;
-
- list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
- if (ghes == pvt->ghes) {
- mci = pvt->mci;
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
- list_del(&pvt->list);
- }
- }
+
+ mci = ghes_pvt->mci;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
}
-EXPORT_SYMBOL_GPL(ghes_edac_unregister);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index c16c3b931b3d..8c5540160a23 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2159,8 +2159,13 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7core_edac.c";
- mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
- i7core_dev->socket);
+
+ mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
mci->dev_name = pci_name(i7core_dev->pdev[0]);
mci->ctl_page_to_phys = NULL;
@@ -2214,6 +2219,8 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
fail0:
kfree(mci->ctl_name);
+
+fail1:
edac_mc_free(mci);
i7core_dev->mci = NULL;
return rc;
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 4395c84cdcbf..df28b65358d2 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -45,6 +45,8 @@
#include "edac_module.h"
#include "pnd2_edac.h"
+#define EDAC_MOD_STR "pnd2_edac"
+
#define APL_NUM_CHANNELS 4
#define DNV_NUM_CHANNELS 2
#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
@@ -1355,7 +1357,7 @@ static int pnd2_register_mci(struct mem_ctl_info **ppmci)
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
- mci->mod_name = "pnd2_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = ops->name;
mci->ctl_name = "Pondicherry2";
@@ -1547,10 +1549,15 @@ MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
static int __init pnd2_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(pnd2_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index dc0591654011..f34430f99fd8 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -36,7 +36,7 @@ static LIST_HEAD(sbridge_edac_list);
* Alter this version for the module when modifications are made
*/
#define SBRIDGE_REVISION " Ver: 1.1.2 "
-#define EDAC_MOD_STR "sbridge_edac"
+#define EDAC_MOD_STR "sb_edac"
/*
* Debug macros
@@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
/* Processor Home Agent */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
/* Memory controller */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
@@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
/* Optional, mode 2HA */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
@@ -1318,9 +1318,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
int cur_reg_start;
int mc;
int channel;
- int way;
int participants[KNL_MAX_CHANNELS];
- int participant_count = 0;
for (i = 0; i < KNL_MAX_CHANNELS; i++)
mc_sizes[i] = 0;
@@ -1495,21 +1493,14 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
* this channel mapped to the given target?
*/
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
- for (way = 0; way < intrlv_ways; way++) {
- int target;
- int cha;
-
- if (KNL_MOD3(dram_rule))
- target = way;
- else
- target = 0x7 & sad_pkg(
- pvt->info.interleave_pkg, interleave_reg, way);
+ int target;
+ int cha;
+ for (target = 0; target < KNL_MAX_CHANNELS; target++) {
for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
&& !participants[channel]) {
- participant_count++;
participants[channel] = 1;
break;
}
@@ -1517,10 +1508,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
}
}
- if (participant_count != intrlv_ways)
- edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n",
- participant_count, intrlv_ways);
-
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
mc = knl_channel_mc(channel);
if (participants[channel]) {
@@ -2291,6 +2278,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
next_imc:
sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
if (!sbridge_dev) {
+ /* If the HA1 wasn't found, don't create EDAC second memory controller */
+ if (dev_descr->dom == IMC1 && devno != 1) {
+ edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return 0;
+ }
if (dev_descr->dom == SOCK)
goto out_imc;
@@ -2491,6 +2485,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
pvt->pci_ta = pdev;
+ break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
pvt->pci_ras = pdev;
@@ -3155,7 +3150,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "sb_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
@@ -3287,6 +3282,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
break;
}
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
/* Get dimm basic config and the memory layout */
rc = get_dimm_config(mci);
if (rc < 0) {
@@ -3402,10 +3402,15 @@ static void sbridge_remove(void)
static int __init sbridge_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(sbridge_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 16dea97568a1..912c4930c9ef 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -31,6 +31,8 @@
#include "edac_module.h"
+#define EDAC_MOD_STR "skx_edac"
+
/*
* Debug macros
*/
@@ -65,6 +67,7 @@ static u64 skx_tolm, skx_tohm;
struct skx_dev {
struct list_head list;
u8 bus[4];
+ int seg;
struct pci_dev *sad_all;
struct pci_dev *util_all;
u32 mcroute;
@@ -110,12 +113,12 @@ struct decoded_addr {
int bank_group;
};
-static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
list_for_each_entry(d, &skx_edac_list, list) {
- if (d->bus[idx] == bus)
+ if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
return d;
}
@@ -172,6 +175,7 @@ static int get_all_bus_mappings(void)
pci_dev_put(pdev);
return -ENOMEM;
}
+ d->seg = pci_domain_nr(pdev->bus);
pci_read_config_dword(pdev, 0xCC, &reg);
d->bus[0] = GET_BITFIELD(reg, 0, 7);
d->bus[1] = GET_BITFIELD(reg, 8, 15);
@@ -207,7 +211,7 @@ static int get_all_munits(const struct munit *m)
if (i == NUM_IMC)
goto fail;
}
- d = get_skx_dev(pdev->bus->number, m->busidx);
+ d = get_skx_dev(pdev->bus, m->busidx);
if (!d)
goto fail;
@@ -299,7 +303,7 @@ static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
-#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks")
#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
@@ -360,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
imc->mc, chan, dimmno, size, npages,
- banks, ranks, rows, cols);
+ banks, 1 << ranks, rows, cols);
imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
@@ -464,12 +468,16 @@ static int skx_register_mci(struct skx_imc *imc)
pvt = mci->pvt_info;
pvt->imc = imc;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
- imc->node_id, imc->lmc);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", imc->node_id, imc->lmc);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
mci->mtype_cap = MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "skx_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(imc->chan[0].cdev);
mci->ctl_page_to_phys = NULL;
@@ -491,6 +499,7 @@ static int skx_register_mci(struct skx_imc *imc)
fail:
kfree(mci->ctl_name);
+fail0:
edac_mc_free(mci);
imc->mci = NULL;
return rc;
@@ -1039,12 +1048,17 @@ static int __init skx_init(void)
{
const struct x86_cpu_id *id;
const struct munit *m;
+ const char *owner;
int rc = 0, i;
u8 mc = 0, src_id, node_id;
struct skx_dev *d;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(skx_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index f35d87519a3e..4803c6468bab 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -639,27 +639,6 @@ err_free:
return ret;
}
-#ifdef CONFIG_PM
-static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int thunderx_lmc_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- return 0;
-}
-#endif
-
static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
{ 0, },
@@ -834,10 +813,6 @@ static struct pci_driver thunderx_lmc_driver = {
.name = "thunderx_lmc_edac",
.probe = thunderx_lmc_probe,
.remove = thunderx_lmc_remove,
-#ifdef CONFIG_PM
- .suspend = thunderx_lmc_suspend,
- .resume = thunderx_lmc_resume,
-#endif
.id_table = thunderx_lmc_pci_tbl,
};
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 6f6537ab0a79..3877d86c746a 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -26,7 +26,7 @@
#include <linux/workqueue.h>
#include <linux/iio/consumer.h>
#include <linux/extcon/extcon-adc-jack.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
/**
* struct adc_jack_data - internal data for adc_jack device driver
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index f84da4a17724..da0e9bc4262f 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -27,7 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <sound/soc.h>
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index f4fd03e58e37..981fba56bc18 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -22,7 +22,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/notifier.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index ebed22f22d75..ab770adcca7e 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -17,7 +17,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/extcon/extcon-gpio.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 91a0023074af..7c4bc8c44c3f 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -15,7 +15,7 @@
* more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/intel_soc_pmic.h>
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index 1a45e745717d..c8691b5a9cb0 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -19,7 +19,7 @@
*/
#include <linux/acpi.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index f6414b7fa5bc..b871836da8a4 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -23,7 +23,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/max14577.h>
#include <linux/mfd/max14577-private.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
@@ -204,8 +204,8 @@ static int max14577_muic_set_debounce_time(struct max14577_muic_info *info,
static int max14577_muic_set_path(struct max14577_muic_info *info,
u8 val, bool attached)
{
- int ret = 0;
u8 ctrl1, ctrl2 = 0;
+ int ret;
/* Set open state to path before changing hw path */
ret = max14577_update_reg(info->max14577->regmap,
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index 533e16a952b8..0aa410836f4e 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -9,7 +9,7 @@
* may be copied, distributed, and modified under those terms.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 7a5856809047..643411066ad9 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -26,7 +26,7 @@
#include <linux/mfd/max77693.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/regmap.h>
#include <linux/irqdomain.h>
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 6e722d552cf1..c9fcd6cd41cb 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -11,7 +11,7 @@
* (at your option) any later version.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -80,7 +80,7 @@ enum max77843_muic_accessory_type {
MAX77843_MUIC_ADC_REMOTE_S12_BUTTON,
MAX77843_MUIC_ADC_RESERVED_ACC_1,
MAX77843_MUIC_ADC_RESERVED_ACC_2,
- MAX77843_MUIC_ADC_RESERVED_ACC_3,
+ MAX77843_MUIC_ADC_RESERVED_ACC_3, /* SmartDock */
MAX77843_MUIC_ADC_RESERVED_ACC_4,
MAX77843_MUIC_ADC_RESERVED_ACC_5,
MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2,
@@ -119,6 +119,7 @@ enum max77843_muic_charger_type {
MAX77843_MUIC_CHG_SPECIAL_BIAS,
MAX77843_MUIC_CHG_RESERVED,
MAX77843_MUIC_CHG_GND,
+ MAX77843_MUIC_CHG_DOCK,
};
static const unsigned int max77843_extcon_cable[] = {
@@ -130,6 +131,7 @@ static const unsigned int max77843_extcon_cable[] = {
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
EXTCON_DISP_MHL,
+ EXTCON_DOCK,
EXTCON_JIG,
EXTCON_NONE,
};
@@ -200,7 +202,7 @@ static const struct regmap_irq_chip max77843_muic_irq_chip = {
};
static int max77843_muic_set_path(struct max77843_muic_info *info,
- u8 val, bool attached)
+ u8 val, bool attached, bool nobccomp)
{
struct max77693_dev *max77843 = info->max77843;
int ret = 0;
@@ -210,10 +212,16 @@ static int max77843_muic_set_path(struct max77843_muic_info *info,
ctrl1 = val;
else
ctrl1 = MAX77843_MUIC_CONTROL1_SW_OPEN;
+ if (nobccomp) {
+ /* Disable BC1.2 protocol and force manual switch control */
+ ctrl1 |= MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK;
+ }
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL1,
- MAX77843_MUIC_CONTROL1_COM_SW, ctrl1);
+ MAX77843_MUIC_CONTROL1_COM_SW |
+ MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK,
+ ctrl1);
if (ret < 0) {
dev_err(info->dev, "Cannot switch MUIC port\n");
return ret;
@@ -240,6 +248,21 @@ static int max77843_muic_set_path(struct max77843_muic_info *info,
return 0;
}
+static void max77843_charger_set_otg_vbus(struct max77843_muic_info *info,
+ bool on)
+{
+ struct max77693_dev *max77843 = info->max77843;
+ unsigned int cnfg00;
+
+ if (on)
+ cnfg00 = MAX77843_CHG_OTG_MASK | MAX77843_CHG_BOOST_MASK;
+ else
+ cnfg00 = MAX77843_CHG_ENABLE | MAX77843_CHG_BUCK_MASK;
+
+ regmap_update_bits(max77843->regmap_chg, MAX77843_CHG_REG_CHG_CNFG_00,
+ MAX77843_CHG_MODE_MASK, cnfg00);
+}
+
static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
enum max77843_muic_cable_group group, bool *attached)
{
@@ -288,6 +311,19 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
break;
}
+ if (adc == MAX77843_MUIC_ADC_RESERVED_ACC_3) { /* SmartDock */
+ if (chg_type == MAX77843_MUIC_CHG_NONE) {
+ *attached = false;
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
+ } else {
+ *attached = true;
+ cable_type = MAX77843_MUIC_CHG_DOCK;
+ info->prev_chg_type = MAX77843_MUIC_CHG_DOCK;
+ }
+ break;
+ }
+
if (chg_type == MAX77843_MUIC_CHG_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
@@ -350,17 +386,18 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_GND_USB_HOST_VB:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_USB,
- attached);
+ attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
+ max77843_charger_set_otg_vbus(info, attached);
break;
case MAX77843_MUIC_GND_MHL_VB:
case MAX77843_MUIC_GND_MHL:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -396,7 +433,7 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
return -EINVAL;
}
- ret = max77843_muic_set_path(info, path, attached);
+ ret = max77843_muic_set_path(info, path, attached, false);
if (ret < 0)
return ret;
@@ -405,6 +442,26 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
return 0;
}
+static int max77843_muic_dock_handler(struct max77843_muic_info *info,
+ bool attached)
+{
+ int ret;
+
+ dev_dbg(info->dev, "external connector is %s (adc: 0x10)\n",
+ attached ? "attached" : "detached");
+
+ ret = max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_USB,
+ attached, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DOCK, attached);
+
+ return 0;
+}
+
static int max77843_muic_adc_handler(struct max77843_muic_info *info)
{
int ret, cable_type;
@@ -419,6 +476,11 @@ static int max77843_muic_adc_handler(struct max77843_muic_info *info)
info->prev_cable_type);
switch (cable_type) {
+ case MAX77843_MUIC_ADC_RESERVED_ACC_3: /* SmartDock */
+ ret = max77843_muic_dock_handler(info, attached);
+ if (ret < 0)
+ return ret;
+ break;
case MAX77843_MUIC_ADC_GROUND:
ret = max77843_muic_adc_gnd_handler(info);
if (ret < 0)
@@ -446,7 +508,6 @@ static int max77843_muic_adc_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX77843_MUIC_ADC_RESERVED_ACC_1:
case MAX77843_MUIC_ADC_RESERVED_ACC_2:
- case MAX77843_MUIC_ADC_RESERVED_ACC_3:
case MAX77843_MUIC_ADC_RESERVED_ACC_4:
case MAX77843_MUIC_ADC_RESERVED_ACC_5:
case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2:
@@ -490,7 +551,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_USB:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_USB,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -501,7 +562,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_DOWNSTREAM:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -511,7 +572,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_DEDICATED:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -521,7 +582,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_SPECIAL_500MA:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -531,7 +592,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_SPECIAL_1A:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -550,6 +611,9 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
false);
break;
+ case MAX77843_MUIC_CHG_DOCK:
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP, attached);
+ break;
case MAX77843_MUIC_CHG_NONE:
break;
default:
@@ -558,7 +622,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
attached ? "attached" : "detached", chg_type);
max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
return -EINVAL;
}
@@ -798,7 +862,8 @@ static int max77843_muic_probe(struct platform_device *pdev)
max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
/* Set initial path for UART */
- max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true);
+ max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true,
+ false);
/* Check revision number of MUIC device */
ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 4a0612fb9c07..8152790d72e1 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -25,7 +25,7 @@
#include <linux/kobject.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index b8cde096a808..660bbf163bf5 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index eaa355e7d9e4..e059bd5f2041 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -20,7 +20,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include "extcon-rt8973a.h"
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 106ef0297b53..0cfb5a3efdf6 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include "extcon-sm5502.h"
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 9c925b05b7aa..53762864a9f7 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -14,7 +14,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 598956f1dcae..6187f731b29d 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -14,7 +14,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 35e9fb885486..cb38c2747684 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -36,7 +36,7 @@
#define SUPPORTED_CABLE_MAX 32
-struct __extcon_info {
+static const struct __extcon_info {
unsigned int type;
unsigned int id;
const char *name;
diff --git a/drivers/extcon/extcon.h b/drivers/extcon/extcon.h
index 61358479bfcc..93b5e0306966 100644
--- a/drivers/extcon/extcon.h
+++ b/drivers/extcon/extcon.h
@@ -2,7 +2,7 @@
#ifndef __LINUX_EXTCON_INTERNAL_H__
#define __LINUX_EXTCON_INTERNAL_H__
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
/**
* struct extcon_dev - An extcon device represents one external connector.
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8bf89267dc25..ccf52368a073 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
- res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
+ res_count = READ_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */
while (i != last && res_count == 0) {
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
/*
* If the next descriptor is still empty, we must stop at this
* descriptor.
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
if (next_res_count != cpu_to_le16(PAGE_SIZE))
goto next_buffer_is_active;
}
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context,
u32 buffer_dma;
req_count = le16_to_cpu(last->req_count);
- res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
+ res_count = le16_to_cpu(READ_ONCE(last->res_count));
completed = req_count - res_count;
buffer_dma = le32_to_cpu(last->data_address);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
index a01461d63f68..00de793e6423 100644
--- a/drivers/firmware/tegra/ivc.c
+++ b/drivers/firmware/tegra/ivc.c
@@ -99,11 +99,11 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
{
/*
* This function performs multiple checks on the same values with
- * security implications, so create snapshots with ACCESS_ONCE() to
+ * security implications, so create snapshots with READ_ONCE() to
* ensure that these checks use the same values.
*/
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Perform an over-full check to prevent denial of service attacks
@@ -124,8 +124,8 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Invalid cases where the counters indicate that the queue is over
@@ -137,8 +137,8 @@ static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* This function isn't expected to be used in scenarios where an
@@ -151,8 +151,8 @@ static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->tx.channel->tx.count) =
- ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+ WRITE_ONCE(ivc->tx.channel->tx.count,
+ READ_ONCE(ivc->tx.channel->tx.count) + 1);
if (ivc->tx.position == ivc->num_frames - 1)
ivc->tx.position = 0;
@@ -162,8 +162,8 @@ static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->rx.channel->rx.count) =
- ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+ WRITE_ONCE(ivc->rx.channel->rx.count,
+ READ_ONCE(ivc->rx.channel->rx.count) + 1);
if (ivc->rx.position == ivc->num_frames - 1)
ivc->rx.position = 0;
@@ -428,7 +428,7 @@ int tegra_ivc_notified(struct tegra_ivc *ivc)
/* Copy the receiver's state out of shared memory. */
tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
- state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+ state = READ_ONCE(ivc->rx.channel->tx.state);
if (state == TEGRA_IVC_STATE_SYNC) {
offset = offsetof(struct tegra_ivc_header, tx.count);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index e359930bebc8..0d7743089414 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -79,7 +79,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
return !status;
}
-static struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
+static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
.enable_set = xlnx_pr_decoupler_enable_set,
.enable_show = xlnx_pr_decoupler_enable_show,
};
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4ea63d9bd131..e318bf8c623c 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -185,7 +185,7 @@ static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
return 0;
}
-int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
+static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
uint32_t irq, stat;
@@ -215,8 +215,8 @@ int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
-int fsi_slave_handle_error(struct fsi_slave *slave, bool write, uint32_t addr,
- size_t size)
+static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
+ uint32_t addr, size_t size)
{
struct fsi_master *master = slave->master;
int rc, link;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3f80f167ed56..d6a8e851ad13 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,7 +139,7 @@ config GPIO_BRCMSTB
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
depends on OF_GPIO && (ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST)
select GPIO_GENERIC
- select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN
help
Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs.
@@ -286,8 +286,7 @@ config GPIO_LYNXPOINT
Requires ACPI device enumeration code to set up a platform device.
config GPIO_MB86S7X
- bool "GPIO support for Fujitsu MB86S7x Platforms"
- depends on ARCH_MB86S7X || COMPILE_TEST
+ tristate "GPIO support for Fujitsu MB86S7x Platforms"
help
Say yes here to support the GPIO controller in Fujitsu MB86S70 SoCs.
@@ -442,6 +441,15 @@ config GPIO_TEGRA
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
+config GPIO_TEGRA186
+ tristate "NVIDIA Tegra186 GPIO support"
+ default ARCH_TEGRA_186_SOC
+ depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support GPIO pins on NVIDIA Tegra186 SoCs.
+
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
depends on OF_GPIO
@@ -475,6 +483,14 @@ config GPIO_TZ1090_PDC
help
Say yes here to support Toumaz Xenif TZ1090 PDC GPIOs.
+config GPIO_UNIPHIER
+ tristate "UniPhier GPIO support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF_GPIO
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support UniPhier GPIOs.
+
config GPIO_VF610
def_bool y
depends on ARCH_MXC && SOC_VF610
@@ -818,15 +834,6 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
-config GPIO_SX150X
- bool "Semtech SX150x I2C GPIO expander (deprecated)"
- depends on PINCTRL && I2C=y
- select PINCTRL_SX150X
- default n
- help
- Say yes here to provide support for Semtech SX150x-series I2C
- GPIO expanders. The GPIO driver was replaced by a Pinctrl version.
-
config GPIO_TPIC2810
tristate "TPIC2810 8-Bit I2C GPO expander"
help
@@ -1256,6 +1263,16 @@ config GPIO_74X164
shift registers. This driver can be used to provide access
to more gpio outputs.
+config GPIO_MAX3191X
+ tristate "Maxim MAX3191x industrial serializer"
+ select CRC8
+ help
+ GPIO driver for Maxim MAX31910, MAX31911, MAX31912, MAX31913,
+ MAX31953 and MAX31963 industrial serializer, a daisy-chainable
+ chip to make 8 digital 24V inputs available via SPI. Supports
+ CRC checksums to guard against electromagnetic interference,
+ as well as undervoltage and overtemperature detection.
+
config GPIO_MAX7301
tristate "Maxim MAX7301 GPIO expander"
select GPIO_MAX730X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 8a2dfba3b231..4bc24febb889 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
+obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
@@ -114,6 +115,7 @@ obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_TEGRA186) += gpio-tegra186.o
obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
@@ -132,6 +134,7 @@ obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_TZ1090) += gpio-tz1090.o
obj-$(CONFIG_GPIO_TZ1090_PDC) += gpio-tz1090-pdc.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 598e209efa2d..bab3b94c5cbc 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -326,7 +326,7 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
unsigned long gpio;
for_each_set_bit(gpio, &irq_mask, 2)
- generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
19 + gpio*24));
raw_spin_lock(&dio48egpio->lock);
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 51f046e29ff7..add859d59766 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -209,7 +209,7 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
for_each_set_bit(bit_num, &irq_mask, 8) {
gpio = bit_num + boundary * 8;
- generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
gpio));
}
}
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ec2ce34ff473..2f16638a0589 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -199,7 +199,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
int gpio;
for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
- generic_handle_irq(irq_find_mapping(chip->irqdomain, gpio));
+ generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
raw_spin_lock(&idio16gpio->lock);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 89863ea25de1..44c09904daa6 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -192,28 +192,20 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
mutex_lock(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
mutex_unlock(&adnp->i2c_lock);
@@ -240,6 +232,11 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
direction, level, interrupt, pending);
}
}
+
+ return;
+
+unlock:
+ mutex_unlock(&adnp->i2c_lock);
}
static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios)
@@ -323,7 +320,7 @@ static irqreturn_t adnp_irq(int irq, void *data)
for_each_set_bit(bit, &pending, 8) {
unsigned int child_irq;
- child_irq = irq_find_mapping(adnp->gpio.irqdomain,
+ child_irq = irq_find_mapping(adnp->gpio.irq.domain,
base + bit);
handle_nested_irq(child_irq);
}
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index ccc02ed65b3c..8e76d390e653 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -211,7 +211,7 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irqdomain;
+ irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
@@ -239,7 +239,7 @@ static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irqdomain;
+ irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index bfc53995064a..6b3ca6601af2 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -411,13 +411,16 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
type2 |= bit;
+ /* fall through */
case IRQ_TYPE_EDGE_RISING:
type0 |= bit;
+ /* fall through */
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
type0 |= bit;
+ /* fall through */
case IRQ_TYPE_LEVEL_LOW:
type1 |= bit;
handler = handle_level_irq;
@@ -466,7 +469,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS));
for_each_set_bit(p, &reg, 32) {
- girq = irq_find_mapping(gc->irqdomain, i * 32 + p);
+ girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
generic_handle_irq(girq);
}
@@ -498,7 +501,7 @@ static void set_irq_valid_mask(struct aspeed_gpio *gpio)
if (i >= gpio->config->nr_gpios)
break;
- clear_bit(i, gpio->chip.irq_valid_mask);
+ clear_bit(i, gpio->chip.irq.valid_mask);
}
props++;
@@ -536,12 +539,12 @@ static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
if (!have_gpio(gpiochip_get_data(chip), offset))
return -ENODEV;
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio,
@@ -853,7 +856,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
- gpio->chip.irq_need_valid_mask = true;
+ gpio->chip.irq.need_valid_mask = true;
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (rc < 0)
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index f33d4a5fe671..5fad89dfab7e 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -132,6 +132,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
case IRQ_TYPE_LEVEL_HIGH:
polarity |= mask;
+ /* fall through */
case IRQ_TYPE_LEVEL_LOW:
type |= mask;
break;
@@ -208,7 +209,7 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
if (pending) {
for_each_set_bit(irq, &pending, gc->ngpio)
generic_handle_irq(
- irq_linear_revmap(gc->irqdomain, irq));
+ irq_linear_revmap(gc->irq.domain, irq));
}
chained_irq_exit(irqchip, desc);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index dd0308cc8bb0..545d43a587b7 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -19,17 +19,30 @@
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
-#include <linux/reboot.h>
-
-#define GIO_BANK_SIZE 0x20
-#define GIO_ODEN(bank) (((bank) * GIO_BANK_SIZE) + 0x00)
-#define GIO_DATA(bank) (((bank) * GIO_BANK_SIZE) + 0x04)
-#define GIO_IODIR(bank) (((bank) * GIO_BANK_SIZE) + 0x08)
-#define GIO_EC(bank) (((bank) * GIO_BANK_SIZE) + 0x0c)
-#define GIO_EI(bank) (((bank) * GIO_BANK_SIZE) + 0x10)
-#define GIO_MASK(bank) (((bank) * GIO_BANK_SIZE) + 0x14)
-#define GIO_LEVEL(bank) (((bank) * GIO_BANK_SIZE) + 0x18)
-#define GIO_STAT(bank) (((bank) * GIO_BANK_SIZE) + 0x1c)
+#include <linux/bitops.h>
+
+enum gio_reg_index {
+ GIO_REG_ODEN = 0,
+ GIO_REG_DATA,
+ GIO_REG_IODIR,
+ GIO_REG_EC,
+ GIO_REG_EI,
+ GIO_REG_MASK,
+ GIO_REG_LEVEL,
+ GIO_REG_STAT,
+ NUMBER_OF_GIO_REGISTERS
+};
+
+#define GIO_BANK_SIZE (NUMBER_OF_GIO_REGISTERS * sizeof(u32))
+#define GIO_BANK_OFF(bank, off) (((bank) * GIO_BANK_SIZE) + (off * sizeof(u32)))
+#define GIO_ODEN(bank) GIO_BANK_OFF(bank, GIO_REG_ODEN)
+#define GIO_DATA(bank) GIO_BANK_OFF(bank, GIO_REG_DATA)
+#define GIO_IODIR(bank) GIO_BANK_OFF(bank, GIO_REG_IODIR)
+#define GIO_EC(bank) GIO_BANK_OFF(bank, GIO_REG_EC)
+#define GIO_EI(bank) GIO_BANK_OFF(bank, GIO_REG_EI)
+#define GIO_MASK(bank) GIO_BANK_OFF(bank, GIO_REG_MASK)
+#define GIO_LEVEL(bank) GIO_BANK_OFF(bank, GIO_REG_LEVEL)
+#define GIO_STAT(bank) GIO_BANK_OFF(bank, GIO_REG_STAT)
struct brcmstb_gpio_bank {
struct list_head node;
@@ -37,21 +50,23 @@ struct brcmstb_gpio_bank {
struct gpio_chip gc;
struct brcmstb_gpio_priv *parent_priv;
u32 width;
- struct irq_chip irq_chip;
+ u32 wake_active;
+ u32 saved_regs[GIO_REG_STAT]; /* Don't save and restore GIO_REG_STAT */
};
struct brcmstb_gpio_priv {
struct list_head bank_list;
void __iomem *reg_base;
struct platform_device *pdev;
+ struct irq_domain *irq_domain;
+ struct irq_chip irq_chip;
int parent_irq;
int gpio_base;
- bool can_wake;
+ int num_gpios;
int parent_wake_irq;
- struct notifier_block reboot_notifier;
};
-#define MAX_GPIO_PER_BANK 32
+#define MAX_GPIO_PER_BANK 32
#define GPIO_BANK(gpio) ((gpio) >> 5)
/* assumes MAX_GPIO_PER_BANK is a multiple of 2 */
#define GPIO_BIT(gpio) ((gpio) & (MAX_GPIO_PER_BANK - 1))
@@ -63,12 +78,40 @@ brcmstb_gpio_gc_to_priv(struct gpio_chip *gc)
return bank->parent_priv;
}
+static unsigned long
+__brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
+{
+ void __iomem *reg_base = bank->parent_priv->reg_base;
+
+ return bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
+ bank->gc.read_reg(reg_base + GIO_MASK(bank->id));
+}
+
+static unsigned long
+brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
+{
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
+ status = __brcmstb_gpio_get_active_irqs(bank);
+ spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
+
+ return status;
+}
+
+static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
+ struct brcmstb_gpio_bank *bank)
+{
+ return hwirq - (bank->gc.base - bank->parent_priv->gpio_base);
+}
+
static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
- unsigned int offset, bool enable)
+ unsigned int hwirq, bool enable)
{
struct gpio_chip *gc = &bank->gc;
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- u32 mask = gc->pin2mask(gc, offset);
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(hwirq, bank));
u32 imask;
unsigned long flags;
@@ -82,6 +125,17 @@ static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
+static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ /* gc_offset is relative to this gpio_chip; want real offset */
+ int hwirq = offset + (gc->base - priv->gpio_base);
+
+ if (hwirq >= priv->num_gpios)
+ return -ENXIO;
+ return irq_create_mapping(priv->irq_domain, hwirq);
+}
+
/* -------------------- IRQ chip functions -------------------- */
static void brcmstb_gpio_irq_mask(struct irq_data *d)
@@ -100,12 +154,22 @@ static void brcmstb_gpio_irq_unmask(struct irq_data *d)
brcmstb_gpio_set_imask(bank, d->hwirq, true);
}
+static void brcmstb_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct brcmstb_gpio_bank *bank = gpiochip_get_data(gc);
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
+
+ gc->write_reg(priv->reg_base + GIO_STAT(bank->id), mask);
+}
+
static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct brcmstb_gpio_bank *bank = gpiochip_get_data(gc);
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- u32 mask = BIT(d->hwirq);
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
u32 edge_insensitive, iedge_insensitive;
u32 edge_config, iedge_config;
u32 level, ilevel;
@@ -113,13 +177,13 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_LEVEL_LOW:
- level = 0;
+ level = mask;
edge_config = 0;
edge_insensitive = 0;
break;
case IRQ_TYPE_LEVEL_HIGH:
level = mask;
- edge_config = 0;
+ edge_config = mask;
edge_insensitive = 0;
break;
case IRQ_TYPE_EDGE_FALLING:
@@ -166,11 +230,6 @@ static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
{
int ret = 0;
- /*
- * Only enable wake IRQ once for however many hwirqs can wake
- * since they all use the same wake IRQ. Mask will be set
- * up appropriately thanks to IRQCHIP_MASK_ON_SUSPEND flag.
- */
if (enable)
ret = enable_irq_wake(priv->parent_wake_irq);
else
@@ -184,7 +243,18 @@ static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
static int brcmstb_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ struct brcmstb_gpio_bank *bank = gpiochip_get_data(gc);
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
+
+ /*
+ * Do not do anything specific for now, suspend/resume callbacks will
+ * configure the interrupt mask appropriately
+ */
+ if (enable)
+ bank->wake_active |= mask;
+ else
+ bank->wake_active &= ~mask;
return brcmstb_gpio_priv_set_wake(priv, enable);
}
@@ -195,43 +265,36 @@ static irqreturn_t brcmstb_gpio_wake_irq_handler(int irq, void *data)
if (!priv || irq != priv->parent_wake_irq)
return IRQ_NONE;
- pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* Nothing to do */
return IRQ_HANDLED;
}
static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- struct irq_domain *irq_domain = bank->gc.irqdomain;
- void __iomem *reg_base = priv->reg_base;
+ struct irq_domain *domain = priv->irq_domain;
+ int hwbase = bank->gc.base - priv->gpio_base;
unsigned long status;
- unsigned long flags;
- spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
- while ((status = bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
- bank->gc.read_reg(reg_base + GIO_MASK(bank->id)))) {
- int bit;
-
- for_each_set_bit(bit, &status, 32) {
- u32 stat = bank->gc.read_reg(reg_base +
- GIO_STAT(bank->id));
- if (bit >= bank->width)
+ while ((status = brcmstb_gpio_get_active_irqs(bank))) {
+ unsigned int irq, offset;
+
+ for_each_set_bit(offset, &status, 32) {
+ if (offset >= bank->width)
dev_warn(&priv->pdev->dev,
"IRQ for invalid GPIO (bank=%d, offset=%d)\n",
- bank->id, bit);
- bank->gc.write_reg(reg_base + GIO_STAT(bank->id),
- stat | BIT(bit));
- generic_handle_irq(irq_find_mapping(irq_domain, bit));
+ bank->id, offset);
+ irq = irq_linear_revmap(domain, hwbase + offset);
+ generic_handle_irq(irq);
}
}
- spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
}
/* Each UPG GIO block has one IRQ for all banks */
static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ struct brcmstb_gpio_priv *priv = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct brcmstb_gpio_bank *bank;
@@ -244,19 +307,63 @@ static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int brcmstb_gpio_reboot(struct notifier_block *nb,
- unsigned long action, void *data)
+static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
+ struct brcmstb_gpio_priv *priv, irq_hw_number_t hwirq)
{
- struct brcmstb_gpio_priv *priv =
- container_of(nb, struct brcmstb_gpio_priv, reboot_notifier);
+ struct brcmstb_gpio_bank *bank;
+ int i = 0;
- /* Enable GPIO for S5 cold boot */
- if (action == SYS_POWER_OFF)
- brcmstb_gpio_priv_set_wake(priv, 1);
+ /* banks are in descending order */
+ list_for_each_entry_reverse(bank, &priv->bank_list, node) {
+ i += bank->gc.ngpio;
+ if (hwirq < i)
+ return bank;
+ }
+ return NULL;
+}
+
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key brcmstb_gpio_irq_lock_class;
- return NOTIFY_DONE;
+
+static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct brcmstb_gpio_priv *priv = d->host_data;
+ struct brcmstb_gpio_bank *bank =
+ brcmstb_gpio_hwirq_to_bank(priv, hwirq);
+ struct platform_device *pdev = priv->pdev;
+ int ret;
+
+ if (!bank)
+ return -EINVAL;
+
+ dev_dbg(&pdev->dev, "Mapping irq %d for gpio line %d (bank %d)\n",
+ irq, (int)hwirq, bank->id);
+ ret = irq_set_chip_data(irq, &bank->gc);
+ if (ret < 0)
+ return ret;
+ irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+ irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+ return 0;
}
+static void brcmstb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops brcmstb_gpio_irq_domain_ops = {
+ .map = brcmstb_gpio_irq_map,
+ .unmap = brcmstb_gpio_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
/* Make sure that the number of banks matches up between properties */
static int brcmstb_gpio_sanity_check_banks(struct device *dev,
struct device_node *np, struct resource *res)
@@ -278,13 +385,25 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
{
struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev);
struct brcmstb_gpio_bank *bank;
- int ret = 0;
+ int offset, ret = 0, virq;
if (!priv) {
dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
return -EFAULT;
}
+ if (priv->parent_irq > 0)
+ irq_set_chained_handler_and_data(priv->parent_irq, NULL, NULL);
+
+ /* Remove all IRQ mappings and delete the domain */
+ if (priv->irq_domain) {
+ for (offset = 0; offset < priv->num_gpios; offset++) {
+ virq = irq_find_mapping(priv->irq_domain, offset);
+ irq_dispose_mapping(virq);
+ }
+ irq_domain_remove(priv->irq_domain);
+ }
+
/*
* You can lose return values below, but we report all errors, and it's
* more important to actually perform all of the steps.
@@ -292,12 +411,6 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
list_for_each_entry(bank, &priv->bank_list, node)
gpiochip_remove(&bank->gc);
- if (priv->reboot_notifier.notifier_call) {
- ret = unregister_reboot_notifier(&priv->reboot_notifier);
- if (ret)
- dev_err(&pdev->dev,
- "failed to unregister reboot notifier\n");
- }
return ret;
}
@@ -332,66 +445,163 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
return offset;
}
-/* Before calling, must have bank->parent_irq set and gpiochip registered */
+/* priv->parent_irq and priv->num_gpios must be set before calling */
static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
- struct brcmstb_gpio_bank *bank)
+ struct brcmstb_gpio_priv *priv)
{
- struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int err;
- bank->irq_chip.name = dev_name(dev);
- bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
- bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
- bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
-
- /* Ensures that all non-wakeup IRQs are disabled at suspend */
- bank->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+ priv->irq_domain =
+ irq_domain_add_linear(np, priv->num_gpios,
+ &brcmstb_gpio_irq_domain_ops,
+ priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "Couldn't allocate IRQ domain\n");
+ return -ENXIO;
+ }
- if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake &&
- of_property_read_bool(np, "wakeup-source")) {
+ if (of_property_read_bool(np, "wakeup-source")) {
priv->parent_wake_irq = platform_get_irq(pdev, 1);
if (priv->parent_wake_irq < 0) {
+ priv->parent_wake_irq = 0;
dev_warn(dev,
"Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
} else {
/*
- * Set wakeup capability before requesting wakeup
- * interrupt, so we can process boot-time "wakeups"
- * (e.g., from S5 cold boot)
+ * Set wakeup capability so we can process boot-time
+ * "wakeups" (e.g., from S5 cold boot)
*/
device_set_wakeup_capable(dev, true);
device_wakeup_enable(dev);
err = devm_request_irq(dev, priv->parent_wake_irq,
- brcmstb_gpio_wake_irq_handler, 0,
- "brcmstb-gpio-wake", priv);
+ brcmstb_gpio_wake_irq_handler,
+ IRQF_SHARED,
+ "brcmstb-gpio-wake", priv);
if (err < 0) {
dev_err(dev, "Couldn't request wake IRQ");
- return err;
+ goto out_free_domain;
}
-
- priv->reboot_notifier.notifier_call =
- brcmstb_gpio_reboot;
- register_reboot_notifier(&priv->reboot_notifier);
- priv->can_wake = true;
}
}
- if (priv->can_wake)
- bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+ priv->irq_chip.name = dev_name(dev);
+ priv->irq_chip.irq_disable = brcmstb_gpio_irq_mask;
+ priv->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
+ priv->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
+ priv->irq_chip.irq_ack = brcmstb_gpio_irq_ack;
+ priv->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
+
+ if (priv->parent_wake_irq)
+ priv->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+
+ irq_set_chained_handler_and_data(priv->parent_irq,
+ brcmstb_gpio_irq_handler, priv);
+ irq_set_status_flags(priv->parent_irq, IRQ_DISABLE_UNLAZY);
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(priv->irq_domain);
+
+ return err;
+}
+
+static void brcmstb_gpio_bank_save(struct brcmstb_gpio_priv *priv,
+ struct brcmstb_gpio_bank *bank)
+{
+ struct gpio_chip *gc = &bank->gc;
+ unsigned int i;
+
+ for (i = 0; i < GIO_REG_STAT; i++)
+ bank->saved_regs[i] = gc->read_reg(priv->reg_base +
+ GIO_BANK_OFF(bank->id, i));
+}
+
+static void brcmstb_gpio_quiesce(struct device *dev, bool save)
+{
+ struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
+ struct brcmstb_gpio_bank *bank;
+ struct gpio_chip *gc;
+ u32 imask;
+
+ /* disable non-wake interrupt */
+ if (priv->parent_irq >= 0)
+ disable_irq(priv->parent_irq);
+
+ list_for_each_entry(bank, &priv->bank_list, node) {
+ gc = &bank->gc;
+
+ if (save)
+ brcmstb_gpio_bank_save(priv, bank);
+
+ /* Unmask GPIOs which have been flagged as wake-up sources */
+ if (priv->parent_wake_irq)
+ imask = bank->wake_active;
+ else
+ imask = 0;
+ gc->write_reg(priv->reg_base + GIO_MASK(bank->id),
+ imask);
+ }
+}
+
+static void brcmstb_gpio_shutdown(struct platform_device *pdev)
+{
+ /* Enable GPIO for S5 cold boot */
+ brcmstb_gpio_quiesce(&pdev->dev, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void brcmstb_gpio_bank_restore(struct brcmstb_gpio_priv *priv,
+ struct brcmstb_gpio_bank *bank)
+{
+ struct gpio_chip *gc = &bank->gc;
+ unsigned int i;
+
+ for (i = 0; i < GIO_REG_STAT; i++)
+ gc->write_reg(priv->reg_base + GIO_BANK_OFF(bank->id, i),
+ bank->saved_regs[i]);
+}
+
+static int brcmstb_gpio_suspend(struct device *dev)
+{
+ brcmstb_gpio_quiesce(dev, true);
+ return 0;
+}
+
+static int brcmstb_gpio_resume(struct device *dev)
+{
+ struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
+ struct brcmstb_gpio_bank *bank;
+ bool need_wakeup_event = false;
+
+ list_for_each_entry(bank, &priv->bank_list, node) {
+ need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
+ brcmstb_gpio_bank_restore(priv, bank);
+ }
+
+ if (priv->parent_wake_irq && need_wakeup_event)
+ pm_wakeup_event(dev, 0);
- err = gpiochip_irqchip_add(&bank->gc, &bank->irq_chip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
- if (err)
- return err;
- gpiochip_set_chained_irqchip(&bank->gc, &bank->irq_chip,
- priv->parent_irq, brcmstb_gpio_irq_handler);
+ /* enable non-wake interrupt */
+ if (priv->parent_irq >= 0)
+ enable_irq(priv->parent_irq);
return 0;
}
+#else
+#define brcmstb_gpio_suspend NULL
+#define brcmstb_gpio_resume NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
+ .suspend_noirq = brcmstb_gpio_suspend,
+ .resume_noirq = brcmstb_gpio_resume,
+};
+
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -406,6 +616,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
int err;
static int gpio_base;
unsigned long flags = 0;
+ bool need_wakeup_event = false;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -485,16 +696,23 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
gc->of_node = np;
gc->owner = THIS_MODULE;
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", dev->of_node);
+ if (!gc->label) {
+ err = -ENOMEM;
+ goto fail;
+ }
gc->base = gpio_base;
gc->of_gpio_n_cells = 2;
gc->of_xlate = brcmstb_gpio_of_xlate;
/* not all ngpio lines are valid, will use bank width later */
gc->ngpio = MAX_GPIO_PER_BANK;
+ if (priv->parent_irq > 0)
+ gc->to_irq = brcmstb_gpio_to_irq;
/*
* Mask all interrupts by default, since wakeup interrupts may
* be retained from S5 cold boot
*/
+ need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
err = gpiochip_add_data(gc, bank);
@@ -505,12 +723,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
}
gpio_base += gc->ngpio;
- if (priv->parent_irq > 0) {
- err = brcmstb_gpio_irq_setup(pdev, bank);
- if (err)
- goto fail;
- }
-
dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
gc->base, gc->ngpio, bank->width);
@@ -520,9 +732,19 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
num_banks++;
}
+ priv->num_gpios = gpio_base - priv->gpio_base;
+ if (priv->parent_irq > 0) {
+ err = brcmstb_gpio_irq_setup(pdev, priv);
+ if (err)
+ goto fail;
+ }
+
dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
num_banks, priv->gpio_base, gpio_base - 1);
+ if (priv->parent_wake_irq && need_wakeup_event)
+ pm_wakeup_event(dev, 0);
+
return 0;
fail:
@@ -541,9 +763,11 @@ static struct platform_driver brcmstb_gpio_driver = {
.driver = {
.name = "brcmstb-gpio",
.of_match_table = brcmstb_gpio_of_match,
+ .pm = &brcmstb_gpio_pm_ops,
},
.probe = brcmstb_gpio_probe,
.remove = brcmstb_gpio_remove,
+ .shutdown = brcmstb_gpio_shutdown,
};
module_platform_driver(brcmstb_gpio_driver);
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index e60156ec0c18..b6f0f729656c 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -295,7 +295,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
if (pending & BIT(gpio)) {
- virq = irq_find_mapping(cg->chip.irqdomain, gpio);
+ virq = irq_find_mapping(cg->chip.irq.domain, gpio);
handle_nested_irq(virq);
}
}
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index aecb847166f5..1dada68b9a27 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -420,7 +420,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
return;
}
- irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
+ irq = irq_find_mapping(dln2->gpio.irq.domain, pin);
if (!irq) {
dev_err(dln2->gpio.parent, "pin %d not mapped to IRQ\n", pin);
return;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index c07ada9c7af6..6730c6642ce3 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -25,6 +25,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/platform_data/gpio-dwapb.h>
#include <linux/slab.h>
@@ -77,6 +78,7 @@ struct dwapb_context {
u32 int_type;
u32 int_pol;
u32 int_deb;
+ u32 wake_en;
};
#endif
@@ -97,6 +99,7 @@ struct dwapb_gpio {
unsigned int nr_ports;
struct irq_domain *domain;
unsigned int flags;
+ struct reset_control *rst;
};
static inline u32 gpio_reg_v2_convert(unsigned int offset)
@@ -295,13 +298,29 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
+{
+ struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = igc->private;
+ struct dwapb_context *ctx = gpio->ports[0].ctx;
+
+ if (enable)
+ ctx->wake_en |= BIT(d->hwirq);
+ else
+ ctx->wake_en &= ~BIT(d->hwirq);
+
+ return 0;
+}
+#endif
+
static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
unsigned offset, unsigned debounce)
{
struct dwapb_gpio_port *port = gpiochip_get_data(gc);
struct dwapb_gpio *gpio = port->gpio;
unsigned long flags, val_deb;
- unsigned long mask = gc->pin2mask(gc, offset);
+ unsigned long mask = BIT(offset);
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -385,6 +404,9 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
ct->chip.irq_disable = dwapb_irq_disable;
ct->chip.irq_request_resources = dwapb_irq_reqres;
ct->chip.irq_release_resources = dwapb_irq_relres;
+#ifdef CONFIG_PM_SLEEP
+ ct->chip.irq_set_wake = dwapb_irq_set_wake;
+#endif
ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI);
ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK);
ct->type = IRQ_TYPE_LEVEL_MASK;
@@ -460,7 +482,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
(pp->idx * GPIO_SWPORT_DDR_SIZE);
err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
- NULL, false);
+ NULL, 0);
if (err) {
dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
port->idx);
@@ -609,6 +631,12 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
gpio->dev = &pdev->dev;
gpio->nr_ports = pdata->nports;
+ gpio->rst = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(gpio->rst))
+ return PTR_ERR(gpio->rst);
+
+ reset_control_deassert(gpio->rst);
+
gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports,
sizeof(*gpio->ports), GFP_KERNEL);
if (!gpio->ports)
@@ -660,6 +688,7 @@ static int dwapb_gpio_remove(struct platform_device *pdev)
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
+ reset_control_assert(gpio->rst);
return 0;
}
@@ -699,7 +728,8 @@ static int dwapb_gpio_suspend(struct device *dev)
ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
/* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK, 0xffffffff);
+ dwapb_write(gpio, GPIO_INTMASK,
+ 0xffffffff & ~ctx->wake_en);
}
}
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 8d32ccc980d9..b86e09e1b13b 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -239,12 +239,12 @@ static int em_gio_to_irq(struct gpio_chip *chip, unsigned offset)
static int em_gio_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void em_gio_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
/* Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 139f73d3f4ba..7b3394fdc624 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -150,7 +150,7 @@ static void ftgpio_gpio_irq_handler(struct irq_desc *desc)
stat = readl(g->base + GPIO_INT_STAT);
if (stat)
for_each_set_bit(offset, &stat, gc->ngpio)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
offset));
chained_irq_exit(irqchip, desc);
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 6544a16ab02e..e2fc561f4315 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/bitops.h>
#define GRGPIO_MAX_NGPIO 32
@@ -96,12 +97,11 @@ static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
int val)
{
struct gpio_chip *gc = &priv->gc;
- unsigned long mask = gc->pin2mask(gc, offset);
if (val)
- priv->imask |= mask;
+ priv->imask |= BIT(offset);
else
- priv->imask &= ~mask;
+ priv->imask &= ~BIT(offset);
gc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
}
diff --git a/drivers/gpio/gpio-ingenic.c b/drivers/gpio/gpio-ingenic.c
index 254780730b95..15fb2bc796a8 100644
--- a/drivers/gpio/gpio-ingenic.c
+++ b/drivers/gpio/gpio-ingenic.c
@@ -242,7 +242,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
flag = gpio_ingenic_read_reg(jzgc, JZ4740_GPIO_FLAG);
for_each_set_bit(i, &flag, 32)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, i));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
chained_irq_exit(irq_chip, desc);
}
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index b76ecee82c3f..629575ea46a0 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -295,7 +295,7 @@ static void intel_mid_irq_handler(struct irq_desc *desc)
mask = BIT(gpio);
/* Clear before handling so we can't lose an edge */
writel(mask, gedr);
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
base + gpio));
}
}
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 72b64039241a..fca84ccac35c 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/bitops.h>
/* Loongson 1 GPIO Register Definitions */
#define GPIO_CFG 0x0
@@ -22,11 +23,10 @@ static void __iomem *gpio_reg_base;
static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
- unsigned long pinmask = gc->pin2mask(gc, offset);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
- __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | pinmask,
+ __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | BIT(offset),
gpio_reg_base + GPIO_CFG);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -35,11 +35,10 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
- unsigned long pinmask = gc->pin2mask(gc, offset);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
- __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~pinmask,
+ __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~BIT(offset),
gpio_reg_base + GPIO_CFG);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index fbd393b46ce0..1e557b10d73e 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -255,7 +255,7 @@ static void lp_gpio_irq_handler(struct irq_desc *desc)
mask = BIT(pin);
/* Clear before handling so we don't lose an edge */
outl(mask, reg);
- irq = irq_find_mapping(lg->chip.irqdomain, base + pin);
+ irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
generic_handle_irq(irq);
}
}
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
new file mode 100644
index 000000000000..f74b1072e84b
--- /dev/null
+++ b/drivers/gpio/gpio-max3191x.c
@@ -0,0 +1,492 @@
+/*
+ * gpio-max3191x.c - GPIO driver for Maxim MAX3191x industrial serializer
+ *
+ * Copyright (C) 2017 KUNBUS GmbH
+ *
+ * The MAX3191x makes 8 digital 24V inputs available via SPI.
+ * Multiple chips can be daisy-chained, the spec does not impose
+ * a limit on the number of chips and neither does this driver.
+ *
+ * Either of two modes is selectable: In 8-bit mode, only the state
+ * of the inputs is clocked out to achieve high readout speeds;
+ * In 16-bit mode, an additional status byte is clocked out with
+ * a CRC and indicator bits for undervoltage and overtemperature.
+ * The driver returns an error instead of potentially bogus data
+ * if any of these fault conditions occur. However it does allow
+ * readout of non-faulting chips in the same daisy-chain.
+ *
+ * MAX3191x supports four debounce settings and the driver is
+ * capable of configuring these differently for each chip in the
+ * daisy-chain.
+ *
+ * If the chips are hardwired to 8-bit mode ("modesel" pulled high),
+ * gpio-pisosr.c can be used alternatively to this driver.
+ *
+ * https://datasheets.maximintegrated.com/en/ds/MAX31910.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31911.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31912.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31913.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31953-MAX31963.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/crc8.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+enum max3191x_mode {
+ STATUS_BYTE_ENABLED,
+ STATUS_BYTE_DISABLED,
+};
+
+/**
+ * struct max3191x_chip - max3191x daisy-chain
+ * @gpio: GPIO controller struct
+ * @lock: protects read sequences
+ * @nchips: number of chips in the daisy-chain
+ * @mode: current mode, 0 for 16-bit, 1 for 8-bit;
+ * for simplicity, all chips in the daisy-chain are assumed
+ * to use the same mode
+ * @modesel_pins: GPIO pins to configure modesel of each chip
+ * @fault_pins: GPIO pins to detect fault of each chip
+ * @db0_pins: GPIO pins to configure debounce of each chip
+ * @db1_pins: GPIO pins to configure debounce of each chip
+ * @mesg: SPI message to perform a readout
+ * @xfer: SPI transfer used by @mesg
+ * @crc_error: bitmap signaling CRC error for each chip
+ * @overtemp: bitmap signaling overtemperature alarm for each chip
+ * @undervolt1: bitmap signaling undervoltage alarm for each chip
+ * @undervolt2: bitmap signaling undervoltage warning for each chip
+ * @fault: bitmap signaling assertion of @fault_pins for each chip
+ * @ignore_uv: whether to ignore undervoltage alarms;
+ * set by a device property if the chips are powered through
+ * 5VOUT instead of VCC24V, in which case they will constantly
+ * signal undervoltage;
+ * for simplicity, all chips in the daisy-chain are assumed
+ * to be powered the same way
+ */
+struct max3191x_chip {
+ struct gpio_chip gpio;
+ struct mutex lock;
+ u32 nchips;
+ enum max3191x_mode mode;
+ struct gpio_descs *modesel_pins;
+ struct gpio_descs *fault_pins;
+ struct gpio_descs *db0_pins;
+ struct gpio_descs *db1_pins;
+ struct spi_message mesg;
+ struct spi_transfer xfer;
+ unsigned long *crc_error;
+ unsigned long *overtemp;
+ unsigned long *undervolt1;
+ unsigned long *undervolt2;
+ unsigned long *fault;
+ bool ignore_uv;
+};
+
+#define MAX3191X_NGPIO 8
+#define MAX3191X_CRC8_POLYNOMIAL 0xa8 /* (x^5) + x^4 + x^2 + x^0 */
+
+DECLARE_CRC8_TABLE(max3191x_crc8);
+
+static int max3191x_get_direction(struct gpio_chip *gpio, unsigned int offset)
+{
+ return 1; /* always in */
+}
+
+static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
+{
+ return 0;
+}
+
+static int max3191x_direction_output(struct gpio_chip *gpio,
+ unsigned int offset, int value)
+{
+ return -EINVAL;
+}
+
+static void max3191x_set(struct gpio_chip *gpio, unsigned int offset, int value)
+{ }
+
+static void max3191x_set_multiple(struct gpio_chip *gpio, unsigned long *mask,
+ unsigned long *bits)
+{ }
+
+static unsigned int max3191x_wordlen(struct max3191x_chip *max3191x)
+{
+ return max3191x->mode == STATUS_BYTE_ENABLED ? 2 : 1;
+}
+
+static int max3191x_readout_locked(struct max3191x_chip *max3191x)
+{
+ struct device *dev = max3191x->gpio.parent;
+ struct spi_device *spi = to_spi_device(dev);
+ int val, i, ot = 0, uv1 = 0;
+
+ val = spi_sync(spi, &max3191x->mesg);
+ if (val) {
+ dev_err_ratelimited(dev, "SPI receive error %d\n", val);
+ return val;
+ }
+
+ for (i = 0; i < max3191x->nchips; i++) {
+ if (max3191x->mode == STATUS_BYTE_ENABLED) {
+ u8 in = ((u8 *)max3191x->xfer.rx_buf)[i * 2];
+ u8 status = ((u8 *)max3191x->xfer.rx_buf)[i * 2 + 1];
+
+ val = (status & 0xf8) != crc8(max3191x_crc8, &in, 1, 0);
+ __assign_bit(i, max3191x->crc_error, val);
+ if (val)
+ dev_err_ratelimited(dev,
+ "chip %d: CRC error\n", i);
+
+ ot = (status >> 1) & 1;
+ __assign_bit(i, max3191x->overtemp, ot);
+ if (ot)
+ dev_err_ratelimited(dev,
+ "chip %d: overtemperature\n", i);
+
+ if (!max3191x->ignore_uv) {
+ uv1 = !((status >> 2) & 1);
+ __assign_bit(i, max3191x->undervolt1, uv1);
+ if (uv1)
+ dev_err_ratelimited(dev,
+ "chip %d: undervoltage\n", i);
+
+ val = !(status & 1);
+ __assign_bit(i, max3191x->undervolt2, val);
+ if (val && !uv1)
+ dev_warn_ratelimited(dev,
+ "chip %d: voltage warn\n", i);
+ }
+ }
+
+ if (max3191x->fault_pins && !max3191x->ignore_uv) {
+ /* fault pin shared by all chips or per chip */
+ struct gpio_desc *fault_pin =
+ (max3191x->fault_pins->ndescs == 1)
+ ? max3191x->fault_pins->desc[0]
+ : max3191x->fault_pins->desc[i];
+
+ val = gpiod_get_value_cansleep(fault_pin);
+ if (val < 0) {
+ dev_err_ratelimited(dev,
+ "GPIO read error %d\n", val);
+ return val;
+ }
+ __assign_bit(i, max3191x->fault, val);
+ if (val && !uv1 && !ot)
+ dev_err_ratelimited(dev,
+ "chip %d: fault\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static bool max3191x_chip_is_faulting(struct max3191x_chip *max3191x,
+ unsigned int chipnum)
+{
+ /* without status byte the only diagnostic is the fault pin */
+ if (!max3191x->ignore_uv && test_bit(chipnum, max3191x->fault))
+ return true;
+
+ if (max3191x->mode == STATUS_BYTE_DISABLED)
+ return false;
+
+ return test_bit(chipnum, max3191x->crc_error) ||
+ test_bit(chipnum, max3191x->overtemp) ||
+ (!max3191x->ignore_uv &&
+ test_bit(chipnum, max3191x->undervolt1));
+}
+
+static int max3191x_get(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
+ int ret, chipnum, wordlen = max3191x_wordlen(max3191x);
+ u8 in;
+
+ mutex_lock(&max3191x->lock);
+ ret = max3191x_readout_locked(max3191x);
+ if (ret)
+ goto out_unlock;
+
+ chipnum = offset / MAX3191X_NGPIO;
+ if (max3191x_chip_is_faulting(max3191x, chipnum)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ in = ((u8 *)max3191x->xfer.rx_buf)[chipnum * wordlen];
+ ret = (in >> (offset % MAX3191X_NGPIO)) & 1;
+
+out_unlock:
+ mutex_unlock(&max3191x->lock);
+ return ret;
+}
+
+static int max3191x_get_multiple(struct gpio_chip *gpio, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
+ int ret, bit = 0, wordlen = max3191x_wordlen(max3191x);
+
+ mutex_lock(&max3191x->lock);
+ ret = max3191x_readout_locked(max3191x);
+ if (ret)
+ goto out_unlock;
+
+ while ((bit = find_next_bit(mask, gpio->ngpio, bit)) != gpio->ngpio) {
+ unsigned int chipnum = bit / MAX3191X_NGPIO;
+ unsigned long in, shift, index;
+
+ if (max3191x_chip_is_faulting(max3191x, chipnum)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ in = ((u8 *)max3191x->xfer.rx_buf)[chipnum * wordlen];
+ shift = round_down(bit % BITS_PER_LONG, MAX3191X_NGPIO);
+ index = bit / BITS_PER_LONG;
+ bits[index] &= ~(mask[index] & (0xff << shift));
+ bits[index] |= mask[index] & (in << shift); /* copy bits */
+
+ bit = (chipnum + 1) * MAX3191X_NGPIO; /* go to next chip */
+ }
+
+out_unlock:
+ mutex_unlock(&max3191x->lock);
+ return ret;
+}
+
+static int max3191x_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
+ u32 debounce, chipnum, db0_val, db1_val;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ if (!max3191x->db0_pins || !max3191x->db1_pins)
+ return -EINVAL;
+
+ debounce = pinconf_to_config_argument(config);
+ switch (debounce) {
+ case 0:
+ db0_val = 0;
+ db1_val = 0;
+ break;
+ case 1 ... 25:
+ db0_val = 0;
+ db1_val = 1;
+ break;
+ case 26 ... 750:
+ db0_val = 1;
+ db1_val = 0;
+ break;
+ case 751 ... 3000:
+ db0_val = 1;
+ db1_val = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (max3191x->db0_pins->ndescs == 1)
+ chipnum = 0; /* all chips use the same pair of debounce pins */
+ else
+ chipnum = offset / MAX3191X_NGPIO; /* per chip debounce pins */
+
+ mutex_lock(&max3191x->lock);
+ gpiod_set_value_cansleep(max3191x->db0_pins->desc[chipnum], db0_val);
+ gpiod_set_value_cansleep(max3191x->db1_pins->desc[chipnum], db1_val);
+ mutex_unlock(&max3191x->lock);
+ return 0;
+}
+
+static void gpiod_set_array_single_value_cansleep(unsigned int ndescs,
+ struct gpio_desc **desc,
+ int value)
+{
+ int i, values[ndescs];
+
+ for (i = 0; i < ndescs; i++)
+ values[i] = value;
+
+ gpiod_set_array_value_cansleep(ndescs, desc, values);
+}
+
+static struct gpio_descs *devm_gpiod_get_array_optional_count(
+ struct device *dev, const char *con_id,
+ enum gpiod_flags flags, unsigned int expected)
+{
+ struct gpio_descs *descs;
+ int found = gpiod_count(dev, con_id);
+
+ if (found == -ENOENT)
+ return NULL;
+
+ if (found != expected && found != 1) {
+ dev_err(dev, "ignoring %s-gpios: found %d, expected %u or 1\n",
+ con_id, found, expected);
+ return NULL;
+ }
+
+ descs = devm_gpiod_get_array_optional(dev, con_id, flags);
+
+ if (IS_ERR(descs)) {
+ dev_err(dev, "failed to get %s-gpios: %ld\n",
+ con_id, PTR_ERR(descs));
+ return NULL;
+ }
+
+ return descs;
+}
+
+static int max3191x_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct max3191x_chip *max3191x;
+ int n, ret;
+
+ max3191x = devm_kzalloc(dev, sizeof(*max3191x), GFP_KERNEL);
+ if (!max3191x)
+ return -ENOMEM;
+ spi_set_drvdata(spi, max3191x);
+
+ max3191x->nchips = 1;
+ device_property_read_u32(dev, "#daisy-chained-devices",
+ &max3191x->nchips);
+
+ n = BITS_TO_LONGS(max3191x->nchips);
+ max3191x->crc_error = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->undervolt1 = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->undervolt2 = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->overtemp = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->fault = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->xfer.rx_buf = devm_kcalloc(dev, max3191x->nchips,
+ 2, GFP_KERNEL);
+ if (!max3191x->crc_error || !max3191x->undervolt1 ||
+ !max3191x->overtemp || !max3191x->undervolt2 ||
+ !max3191x->fault || !max3191x->xfer.rx_buf)
+ return -ENOMEM;
+
+ max3191x->modesel_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,modesel", GPIOD_ASIS, max3191x->nchips);
+ max3191x->fault_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,fault", GPIOD_IN, max3191x->nchips);
+ max3191x->db0_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,db0", GPIOD_OUT_LOW, max3191x->nchips);
+ max3191x->db1_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,db1", GPIOD_OUT_LOW, max3191x->nchips);
+
+ max3191x->mode = device_property_read_bool(dev, "maxim,modesel-8bit")
+ ? STATUS_BYTE_DISABLED : STATUS_BYTE_ENABLED;
+ if (max3191x->modesel_pins)
+ gpiod_set_array_single_value_cansleep(
+ max3191x->modesel_pins->ndescs,
+ max3191x->modesel_pins->desc, max3191x->mode);
+
+ max3191x->ignore_uv = device_property_read_bool(dev,
+ "maxim,ignore-undervoltage");
+
+ if (max3191x->db0_pins && max3191x->db1_pins &&
+ max3191x->db0_pins->ndescs != max3191x->db1_pins->ndescs) {
+ dev_err(dev, "ignoring maxim,db*-gpios: array len mismatch\n");
+ devm_gpiod_put_array(dev, max3191x->db0_pins);
+ devm_gpiod_put_array(dev, max3191x->db1_pins);
+ max3191x->db0_pins = NULL;
+ max3191x->db1_pins = NULL;
+ }
+
+ max3191x->xfer.len = max3191x->nchips * max3191x_wordlen(max3191x);
+ spi_message_init_with_transfers(&max3191x->mesg, &max3191x->xfer, 1);
+
+ max3191x->gpio.label = spi->modalias;
+ max3191x->gpio.owner = THIS_MODULE;
+ max3191x->gpio.parent = dev;
+ max3191x->gpio.base = -1;
+ max3191x->gpio.ngpio = max3191x->nchips * MAX3191X_NGPIO;
+ max3191x->gpio.can_sleep = true;
+
+ max3191x->gpio.get_direction = max3191x_get_direction;
+ max3191x->gpio.direction_input = max3191x_direction_input;
+ max3191x->gpio.direction_output = max3191x_direction_output;
+ max3191x->gpio.set = max3191x_set;
+ max3191x->gpio.set_multiple = max3191x_set_multiple;
+ max3191x->gpio.get = max3191x_get;
+ max3191x->gpio.get_multiple = max3191x_get_multiple;
+ max3191x->gpio.set_config = max3191x_set_config;
+
+ mutex_init(&max3191x->lock);
+
+ ret = gpiochip_add_data(&max3191x->gpio, max3191x);
+ if (ret) {
+ mutex_destroy(&max3191x->lock);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max3191x_remove(struct spi_device *spi)
+{
+ struct max3191x_chip *max3191x = spi_get_drvdata(spi);
+
+ gpiochip_remove(&max3191x->gpio);
+ mutex_destroy(&max3191x->lock);
+
+ return 0;
+}
+
+static int __init max3191x_register_driver(struct spi_driver *sdrv)
+{
+ crc8_populate_msb(max3191x_crc8, MAX3191X_CRC8_POLYNOMIAL);
+ return spi_register_driver(sdrv);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id max3191x_of_id[] = {
+ { .compatible = "maxim,max31910" },
+ { .compatible = "maxim,max31911" },
+ { .compatible = "maxim,max31912" },
+ { .compatible = "maxim,max31913" },
+ { .compatible = "maxim,max31953" },
+ { .compatible = "maxim,max31963" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max3191x_of_id);
+#endif
+
+static const struct spi_device_id max3191x_spi_id[] = {
+ { "max31910" },
+ { "max31911" },
+ { "max31912" },
+ { "max31913" },
+ { "max31953" },
+ { "max31963" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max3191x_spi_id);
+
+static struct spi_driver max3191x_driver = {
+ .driver = {
+ .name = "max3191x",
+ .of_match_table = of_match_ptr(max3191x_of_id),
+ },
+ .probe = max3191x_probe,
+ .remove = max3191x_remove,
+ .id_table = max3191x_spi_id,
+};
+module_driver(max3191x_driver, max3191x_register_driver, spi_unregister_driver);
+
+MODULE_AUTHOR("Lukas Wunner <lukas@wunner.de>");
+MODULE_DESCRIPTION("GPIO driver for Maxim MAX3191x industrial serializer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 7f4d26ce5f23..c04fae1ba32a 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -486,7 +486,7 @@ static irqreturn_t max732x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain,
+ handle_nested_irq(irq_find_mapping(chip->gpio_chip.irq.domain,
level));
pending &= ~(1 << level);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 94d772677ed6..3134c0d2bfe4 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -52,11 +53,6 @@ static int mb86s70_gpio_request(struct gpio_chip *gc, unsigned gpio)
spin_lock_irqsave(&gchip->lock, flags);
val = readl(gchip->base + PFR(gpio));
- if (!(val & OFFSET(gpio))) {
- spin_unlock_irqrestore(&gchip->lock, flags);
- return -EINVAL;
- }
-
val &= ~OFFSET(gpio);
writel(val, gchip->base + PFR(gpio));
@@ -209,6 +205,7 @@ static const struct of_device_id mb86s70_gpio_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-gpio" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids);
static struct platform_driver mb86s70_gpio_driver = {
.driver = {
@@ -218,5 +215,8 @@ static struct platform_driver mb86s70_gpio_driver = {
.probe = mb86s70_gpio_probe,
.remove = mb86s70_gpio_remove,
};
+module_platform_driver(mb86s70_gpio_driver);
-builtin_platform_driver(mb86s70_gpio_driver);
+MODULE_DESCRIPTION("MB86S7x GPIO Driver");
+MODULE_ALIAS("platform:mb86s70-gpio");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index ec8560298805..dd67a31ac337 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -357,7 +357,7 @@ static void mrfld_irq_handler(struct irq_desc *desc)
for_each_set_bit(gpio, &pending, 32) {
unsigned int irq;
- irq = irq_find_mapping(gc->irqdomain, base + gpio);
+ irq = irq_find_mapping(gc->irq.domain, base + gpio);
generic_handle_irq(irq);
}
}
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f7da40e46c55..f9042bcc27a4 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -126,20 +126,16 @@ static unsigned long bgpio_read32be(void __iomem *reg)
return ioread32be(reg);
}
-static unsigned long bgpio_pin2mask(struct gpio_chip *gc, unsigned int pin)
+static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
{
- return BIT(pin);
-}
-
-static unsigned long bgpio_pin2mask_be(struct gpio_chip *gc,
- unsigned int pin)
-{
- return BIT(gc->bgpio_bits - 1 - pin);
+ if (gc->be_bits)
+ return BIT(gc->bgpio_bits - 1 - line);
+ return BIT(line);
}
static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
- unsigned long pinmask = gc->pin2mask(gc, gpio);
+ unsigned long pinmask = bgpio_line2mask(gc, gpio);
if (gc->bgpio_dir & pinmask)
return !!(gc->read_reg(gc->reg_set) & pinmask);
@@ -147,9 +143,76 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
return !!(gc->read_reg(gc->reg_dat) & pinmask);
}
+/*
+ * This assumes that the bits in the GPIO register are in native endianness.
+ * We only assign the function pointer if we have that.
+ */
+static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long get_mask = 0;
+ unsigned long set_mask = 0;
+ int bit = 0;
+
+ while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) {
+ if (gc->bgpio_dir & BIT(bit))
+ set_mask |= BIT(bit);
+ else
+ get_mask |= BIT(bit);
+ }
+
+ if (set_mask)
+ *bits |= gc->read_reg(gc->reg_set) & set_mask;
+ if (get_mask)
+ *bits |= gc->read_reg(gc->reg_dat) & get_mask;
+
+ return 0;
+}
+
static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- return !!(gc->read_reg(gc->reg_dat) & gc->pin2mask(gc, gpio));
+ return !!(gc->read_reg(gc->reg_dat) & bgpio_line2mask(gc, gpio));
+}
+
+/*
+ * This only works if the bits in the GPIO register are in native endianness.
+ * It is dirt simple and fast in this case. (Also the most common case.)
+ */
+static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+
+ *bits = gc->read_reg(gc->reg_dat) & *mask;
+ return 0;
+}
+
+/*
+ * With big endian mirrored bit order it becomes more tedious.
+ */
+static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long readmask = 0;
+ unsigned long val;
+ int bit;
+
+ /* Create a mirrored mask */
+ bit = 0;
+ while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio)
+ readmask |= bgpio_line2mask(gc, bit);
+
+ /* Read the register */
+ val = gc->read_reg(gc->reg_dat) & readmask;
+
+ /*
+ * Mirror the result into the "bits" result, this will give line 0
+ * in bit 0 ... line 31 in bit 31 for a 32bit register.
+ */
+ bit = 0;
+ while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio)
+ *bits |= bgpio_line2mask(gc, bit);
+
+ return 0;
}
static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -158,7 +221,7 @@ static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = gc->pin2mask(gc, gpio);
+ unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -176,7 +239,7 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- unsigned long mask = gc->pin2mask(gc, gpio);
+ unsigned long mask = bgpio_line2mask(gc, gpio);
if (val)
gc->write_reg(gc->reg_set, mask);
@@ -186,7 +249,7 @@ static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = gc->pin2mask(gc, gpio);
+ unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -216,9 +279,9 @@ static void bgpio_multiple_get_masks(struct gpio_chip *gc,
break;
if (__test_and_clear_bit(i, mask)) {
if (test_bit(i, bits))
- *set_mask |= gc->pin2mask(gc, i);
+ *set_mask |= bgpio_line2mask(gc, i);
else
- *clear_mask |= gc->pin2mask(gc, i);
+ *clear_mask |= bgpio_line2mask(gc, i);
}
}
}
@@ -294,7 +357,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -305,7 +368,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 of input */
- return !(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
+ return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -316,7 +379,7 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir |= gc->pin2mask(gc, gpio);
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -330,7 +393,7 @@ static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir |= gc->pin2mask(gc, gpio);
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -346,7 +409,7 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -357,12 +420,11 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 if input */
- return !!(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
+ return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
}
static int bgpio_setup_accessors(struct device *dev,
struct gpio_chip *gc,
- bool bit_be,
bool byte_be)
{
@@ -406,8 +468,6 @@ static int bgpio_setup_accessors(struct device *dev,
return -EINVAL;
}
- gc->pin2mask = bit_be ? bgpio_pin2mask_be : bgpio_pin2mask;
-
return 0;
}
@@ -462,10 +522,24 @@ static int bgpio_setup_io(struct gpio_chip *gc,
}
if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
- (flags & BGPIOF_READ_OUTPUT_REG_SET))
+ (flags & BGPIOF_READ_OUTPUT_REG_SET)) {
gc->get = bgpio_get_set;
- else
+ if (!gc->be_bits)
+ gc->get_multiple = bgpio_get_set_multiple;
+ /*
+ * We deliberately avoid assigning the ->get_multiple() call
+ * for big endian mirrored registers which are ALSO reflecting
+ * their value in the set register when used as output. It is
+ * simply too much complexity, let the GPIO core fall back to
+ * reading each line individually in that fringe case.
+ */
+ } else {
gc->get = bgpio_get;
+ if (gc->be_bits)
+ gc->get_multiple = bgpio_get_multiple_be;
+ else
+ gc->get_multiple = bgpio_get_multiple;
+ }
return 0;
}
@@ -526,13 +600,13 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
gc->base = -1;
gc->ngpio = gc->bgpio_bits;
gc->request = bgpio_request;
+ gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN);
ret = bgpio_setup_io(gc, dat, set, clr, flags);
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN,
- flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 8c93dec498fa..c8673a5d9412 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
#define MPC8XXX_GPIO_PINS 32
@@ -44,6 +45,16 @@ struct mpc8xxx_gpio_chip {
unsigned int irqn;
};
+/*
+ * This hardware has a big endian bit assignment such that GPIO line 0 is
+ * connected to bit 31, line 1 to bit 30 ... line 31 to bit 0.
+ * This inline helper give the right bitmask for a certain line.
+ */
+static inline u32 mpc_pin2mask(unsigned int offset)
+{
+ return BIT(31 - offset);
+}
+
/* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs
* defined as output cannot be determined by reading GPDAT register,
* so we use shadow data register instead. The status of input pins
@@ -59,7 +70,7 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
out_shadow = gc->bgpio_data & out_mask;
- return !!((val | out_shadow) & gc->pin2mask(gc, gpio));
+ return !!((val | out_shadow) & mpc_pin2mask(gpio));
}
static int mpc5121_gpio_dir_out(struct gpio_chip *gc,
@@ -120,7 +131,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
- | gc->pin2mask(gc, irqd_to_hwirq(d)));
+ | mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -135,7 +146,7 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
- & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
+ & ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -146,7 +157,7 @@ static void mpc8xxx_irq_ack(struct irq_data *d)
struct gpio_chip *gc = &mpc8xxx_gc->gc;
gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
- gc->pin2mask(gc, irqd_to_hwirq(d)));
+ mpc_pin2mask(irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -160,7 +171,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
- | gc->pin2mask(gc, irqd_to_hwirq(d)));
+ | mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -168,7 +179,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
- & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
+ & ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 3233b72b6828..e136d666f1e5 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -737,7 +737,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
- generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
+ generic_handle_irq(irq_find_mapping(bank->chip.irq.domain,
bit));
raw_spin_unlock_irqrestore(&bank->wa_lock,
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 1b9dbf691ae7..babb7bd2ba59 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -608,7 +608,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
for (i = 0; i < NBANK(chip); i++) {
while (pending[i]) {
level = __ffs(pending[i]);
- handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain,
+ handle_nested_irq(irq_find_mapping(chip->gpio_chip.irq.domain,
level + (BANK_SZ * i)));
pending[i] &= ~(1 << level);
nhandled++;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a4fd78b9c0e4..38fbb420c6cd 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -196,7 +196,7 @@ static irqreturn_t pcf857x_irq(int irq, void *data)
mutex_unlock(&gpio->lock);
for_each_set_bit(i, &change, gpio->chip.ngpio)
- handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
+ handle_nested_irq(irq_find_mapping(gpio->chip.irq.domain, i));
return IRQ_HANDLED;
}
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 7de4f6a2cb49..57d1b7fbf07b 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -240,7 +240,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
- generic_handle_irq(irq_find_mapping(chip->irqdomain, gpio));
+ generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
raw_spin_lock(&idio16gpio->lock);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 6aaaab79c205..b70974cb9ef1 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -221,7 +221,7 @@ static void pl061_irq_handler(struct irq_desc *desc)
pending = readb(pl061->base + GPIOMIS);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
offset));
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 6029899789f3..f480fb896963 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -330,16 +330,6 @@ static int pxa_gpio_of_xlate(struct gpio_chip *gc,
}
#endif
-static int pxa_gpio_request(struct gpio_chip *chip, unsigned int offset)
-{
- return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void pxa_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- pinctrl_free_gpio(chip->base + offset);
-}
-
static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
struct device_node *np, void __iomem *regbase)
{
@@ -358,8 +348,8 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
- pchip->chip.request = pxa_gpio_request;
- pchip->chip.free = pxa_gpio_free;
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 1f0871553fd2..e76de57dd617 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -206,7 +207,7 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
gpio_rcar_read(p, INTMSK))) {
offset = __ffs(pending);
gpio_rcar_write(p, INTCLR, BIT(offset));
- generic_handle_irq(irq_find_mapping(p->gpio_chip.irqdomain,
+ generic_handle_irq(irq_find_mapping(p->gpio_chip.irq.domain,
offset));
irqs_handled++;
}
@@ -249,7 +250,7 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
if (error < 0)
return error;
- error = pinctrl_request_gpio(chip->base + offset);
+ error = pinctrl_gpio_request(chip->base + offset);
if (error)
pm_runtime_put(&p->pdev->dev);
@@ -260,7 +261,7 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
/*
* Set the GPIO as an input to ensure that the next GPIO request won't
@@ -393,16 +394,11 @@ MODULE_DEVICE_TABLE(of, gpio_rcar_of_table);
static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
{
struct device_node *np = p->pdev->dev.of_node;
- const struct of_device_id *match;
const struct gpio_rcar_info *info;
struct of_phandle_args args;
int ret;
- match = of_match_node(gpio_rcar_of_table, np);
- if (!match)
- return -EINVAL;
-
- info = match->data;
+ info = of_device_get_match_data(&p->pdev->dev);
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
@@ -456,19 +452,17 @@ static int gpio_rcar_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-
- if (!io || !irq) {
- dev_err(dev, "missing IRQ or IOMEM\n");
+ if (!irq) {
+ dev_err(dev, "missing IRQ\n");
ret = -EINVAL;
goto err0;
}
- p->base = devm_ioremap_nocache(dev, io->start, resource_size(io));
- if (!p->base) {
- dev_err(dev, "failed to remap I/O memory\n");
- ret = -ENXIO;
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->base = devm_ioremap_resource(dev, io);
+ if (IS_ERR(p->base)) {
+ ret = PTR_ERR(p->base);
goto err0;
}
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index e85903eddc68..23e771dba4c1 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
struct gpio_reg *r = to_gpio_reg(gc);
int irq = r->irqs[offset];
- if (irq >= 0 && r->irqdomain)
- irq = irq_find_mapping(r->irqdomain, irq);
+ if (irq >= 0 && r->irq.domain)
+ irq = irq_find_mapping(r->irq.domain, irq);
return irq;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 16cbc5702865..e6e5cca624a7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -299,7 +299,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (ret < 0)
return;
edge_det = !!(ret & mask);
-
+ /* fall through */
case STMPE1801:
rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank];
fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank];
@@ -312,7 +312,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (ret < 0)
return;
fall = !!(ret & mask);
-
+ /* fall through */
case STMPE801:
case STMPE1600:
irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank];
@@ -397,7 +397,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = bank * 8 + bit;
- int child_irq = irq_find_mapping(stmpe_gpio->chip.irqdomain,
+ int child_irq = irq_find_mapping(stmpe_gpio->chip.irq.domain,
line);
handle_nested_irq(child_irq);
@@ -451,7 +451,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
of_property_read_u32(np, "st,norequest-mask",
&stmpe_gpio->norequest_mask);
if (stmpe_gpio->norequest_mask)
- stmpe_gpio->chip.irq_need_valid_mask = true;
+ stmpe_gpio->chip.irq.need_valid_mask = true;
if (irq < 0)
dev_info(&pdev->dev,
@@ -482,7 +482,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
/* Forbid unused lines to be mapped as IRQs */
for (i = 0; i < sizeof(u32); i++)
if (stmpe_gpio->norequest_mask & BIT(i))
- clear_bit(i, stmpe_gpio->chip.irq_valid_mask);
+ clear_bit(i, stmpe_gpio->chip.irq.valid_mask);
}
ret = gpiochip_irqchip_add_nested(&stmpe_gpio->chip,
&stmpe_gpio_irq_chip,
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 091ffaaec635..ac6f2a9841e5 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -193,6 +193,9 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
tb10x_gpio->gc.label =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
+ if (!tb10x_gpio->gc.label)
+ return -ENOMEM;
+
tb10x_gpio->gc.parent = &pdev->dev;
tb10x_gpio->gc.owner = THIS_MODULE;
tb10x_gpio->gc.direction_input = tb10x_gpio_direction_in;
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 433b45ef332e..91a8ef8e7f3f 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -268,7 +268,7 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = i * 8 + bit;
- int irq = irq_find_mapping(tc3589x_gpio->chip.irqdomain,
+ int irq = irq_find_mapping(tc3589x_gpio->chip.irq.domain,
line);
handle_nested_irq(irq);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index fbaf974277df..8db47f671708 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -141,14 +141,14 @@ static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio)
static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_request_gpio(offset);
+ return pinctrl_gpio_request(offset);
}
static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
- pinctrl_free_gpio(offset);
+ pinctrl_gpio_free(offset);
tegra_gpio_disable(tgi, offset);
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
new file mode 100644
index 000000000000..7f1aa4c21e0d
--- /dev/null
+++ b/drivers/gpio/gpio-tegra186.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (c) 2016-2017 NVIDIA Corporation
+ *
+ * Author: Thierry Reding <treding@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/gpio/tegra186-gpio.h>
+
+#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
+#define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0)
+#define TEGRA186_GPIO_ENABLE_CONFIG_OUT BIT(1)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_NONE (0x0 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL (0x1 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE (0x2 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE (0x3 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK (0x3 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4)
+#define TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6)
+
+#define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04
+#define TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(x) ((x) & 0xff)
+
+#define TEGRA186_GPIO_INPUT 0x08
+#define TEGRA186_GPIO_INPUT_HIGH BIT(0)
+
+#define TEGRA186_GPIO_OUTPUT_CONTROL 0x0c
+#define TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED BIT(0)
+
+#define TEGRA186_GPIO_OUTPUT_VALUE 0x10
+#define TEGRA186_GPIO_OUTPUT_VALUE_HIGH BIT(0)
+
+#define TEGRA186_GPIO_INTERRUPT_CLEAR 0x14
+
+#define TEGRA186_GPIO_INTERRUPT_STATUS(x) (0x100 + (x) * 4)
+
+struct tegra_gpio_port {
+ const char *name;
+ unsigned int offset;
+ unsigned int pins;
+ unsigned int irq;
+};
+
+struct tegra_gpio_soc {
+ const struct tegra_gpio_port *ports;
+ unsigned int num_ports;
+ const char *name;
+};
+
+struct tegra_gpio {
+ struct gpio_chip gpio;
+ struct irq_chip intc;
+ unsigned int num_irq;
+ unsigned int *irq;
+
+ const struct tegra_gpio_soc *soc;
+
+ void __iomem *base;
+};
+
+static const struct tegra_gpio_port *
+tegra186_gpio_get_port(struct tegra_gpio *gpio, unsigned int *pin)
+{
+ unsigned int start = 0, i;
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+
+ if (*pin >= start && *pin < start + port->pins) {
+ *pin -= start;
+ return port;
+ }
+
+ start += port->pins;
+ }
+
+ return NULL;
+}
+
+static void __iomem *tegra186_gpio_get_base(struct tegra_gpio *gpio,
+ unsigned int pin)
+{
+ const struct tegra_gpio_port *port;
+
+ port = tegra186_gpio_get_port(gpio, &pin);
+ if (!port)
+ return NULL;
+
+ return gpio->base + port->offset + pin * 0x20;
+}
+
+static int tegra186_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT)
+ return 0;
+
+ return 1;
+}
+
+static int tegra186_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_OUTPUT_CONTROL);
+ value |= TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED;
+ writel(value, base + TEGRA186_GPIO_OUTPUT_CONTROL);
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_ENABLE;
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_OUT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
+static int tegra186_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int level)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ /* configure output level first */
+ chip->set(chip, offset, level);
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -EINVAL;
+
+ /* set the direction */
+ value = readl(base + TEGRA186_GPIO_OUTPUT_CONTROL);
+ value &= ~TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED;
+ writel(value, base + TEGRA186_GPIO_OUTPUT_CONTROL);
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_ENABLE;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_OUT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
+static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT)
+ value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
+ else
+ value = readl(base + TEGRA186_GPIO_INPUT);
+
+ return value & BIT(0);
+}
+
+static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int level)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return;
+
+ value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
+ if (level == 0)
+ value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+ else
+ value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+
+ writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
+}
+
+static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *spec,
+ u32 *flags)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int port, pin, i, offset = 0;
+
+ if (WARN_ON(chip->of_gpio_n_cells < 2))
+ return -EINVAL;
+
+ if (WARN_ON(spec->args_count < chip->of_gpio_n_cells))
+ return -EINVAL;
+
+ port = spec->args[0] / 8;
+ pin = spec->args[0] % 8;
+
+ if (port >= gpio->soc->num_ports) {
+ dev_err(chip->parent, "invalid port number: %u\n", port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < port; i++)
+ offset += gpio->soc->ports[i].pins;
+
+ if (flags)
+ *flags = spec->args[1];
+
+ return offset + pin;
+}
+
+static void tegra186_irq_ack(struct irq_data *data)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return;
+
+ writel(1, base + TEGRA186_GPIO_INTERRUPT_CLEAR);
+}
+
+static void tegra186_irq_mask(struct irq_data *data)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+}
+
+static void tegra186_irq_unmask(struct irq_data *data)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+}
+
+static int tegra186_irq_set_type(struct irq_data *data, unsigned int flow)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK;
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+
+ switch (flow & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_NONE:
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ if ((flow & IRQ_TYPE_EDGE_BOTH) == 0)
+ irq_set_handler_locked(data, handle_level_irq);
+ else
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ return 0;
+}
+
+static void tegra186_gpio_irq(struct irq_desc *desc)
+{
+ struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
+ struct irq_domain *domain = gpio->gpio.irq.domain;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int parent = irq_desc_get_irq(desc);
+ unsigned int i, offset = 0;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+ void __iomem *base = gpio->base + port->offset;
+ unsigned int pin, irq;
+ unsigned long value;
+
+ /* skip ports that are not associated with this controller */
+ if (parent != gpio->irq[port->irq])
+ goto skip;
+
+ value = readl(base + TEGRA186_GPIO_INTERRUPT_STATUS(1));
+
+ for_each_set_bit(pin, &value, port->pins) {
+ irq = irq_find_mapping(domain, offset + pin);
+ if (WARN_ON(irq == 0))
+ continue;
+
+ generic_handle_irq(irq);
+ }
+
+skip:
+ offset += port->pins;
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int tegra186_gpio_irq_domain_xlate(struct irq_domain *domain,
+ struct device_node *np,
+ const u32 *spec, unsigned int size,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(domain->host_data);
+ unsigned int port, pin, i, offset = 0;
+
+ if (size < 2)
+ return -EINVAL;
+
+ port = spec[0] / 8;
+ pin = spec[0] % 8;
+
+ if (port >= gpio->soc->num_ports) {
+ dev_err(gpio->gpio.parent, "invalid port number: %u\n", port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < port; i++)
+ offset += gpio->soc->ports[i].pins;
+
+ *type = spec[1] & IRQ_TYPE_SENSE_MASK;
+ *hwirq = offset + pin;
+
+ return 0;
+}
+
+static const struct irq_domain_ops tegra186_gpio_irq_domain_ops = {
+ .map = gpiochip_irq_map,
+ .unmap = gpiochip_irq_unmap,
+ .xlate = tegra186_gpio_irq_domain_xlate,
+};
+
+static int tegra186_gpio_probe(struct platform_device *pdev)
+{
+ unsigned int i, j, offset;
+ struct gpio_irq_chip *irq;
+ struct tegra_gpio *gpio;
+ struct resource *res;
+ char **names;
+ int err;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->soc = of_device_get_match_data(&pdev->dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
+ gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+
+ err = platform_irq_count(pdev);
+ if (err < 0)
+ return err;
+
+ gpio->num_irq = err;
+
+ gpio->irq = devm_kcalloc(&pdev->dev, gpio->num_irq, sizeof(*gpio->irq),
+ GFP_KERNEL);
+ if (!gpio->irq)
+ return -ENOMEM;
+
+ for (i = 0; i < gpio->num_irq; i++) {
+ err = platform_get_irq(pdev, i);
+ if (err < 0)
+ return err;
+
+ gpio->irq[i] = err;
+ }
+
+ gpio->gpio.label = gpio->soc->name;
+ gpio->gpio.parent = &pdev->dev;
+
+ gpio->gpio.get_direction = tegra186_gpio_get_direction;
+ gpio->gpio.direction_input = tegra186_gpio_direction_input;
+ gpio->gpio.direction_output = tegra186_gpio_direction_output;
+ gpio->gpio.get = tegra186_gpio_get,
+ gpio->gpio.set = tegra186_gpio_set;
+
+ gpio->gpio.base = -1;
+
+ for (i = 0; i < gpio->soc->num_ports; i++)
+ gpio->gpio.ngpio += gpio->soc->ports[i].pins;
+
+ names = devm_kcalloc(gpio->gpio.parent, gpio->gpio.ngpio,
+ sizeof(*names), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0, offset = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+ char *name;
+
+ for (j = 0; j < port->pins; j++) {
+ name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL,
+ "P%s.%02x", port->name, j);
+ if (!name)
+ return -ENOMEM;
+
+ names[offset + j] = name;
+ }
+
+ offset += port->pins;
+ }
+
+ gpio->gpio.names = (const char * const *)names;
+
+ gpio->gpio.of_node = pdev->dev.of_node;
+ gpio->gpio.of_gpio_n_cells = 2;
+ gpio->gpio.of_xlate = tegra186_gpio_of_xlate;
+
+ gpio->intc.name = pdev->dev.of_node->name;
+ gpio->intc.irq_ack = tegra186_irq_ack;
+ gpio->intc.irq_mask = tegra186_irq_mask;
+ gpio->intc.irq_unmask = tegra186_irq_unmask;
+ gpio->intc.irq_set_type = tegra186_irq_set_type;
+
+ irq = &gpio->gpio.irq;
+ irq->chip = &gpio->intc;
+ irq->domain_ops = &tegra186_gpio_irq_domain_ops;
+ irq->handler = handle_simple_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->parent_handler = tegra186_gpio_irq;
+ irq->parent_handler_data = gpio;
+ irq->num_parents = gpio->num_irq;
+ irq->parents = gpio->irq;
+
+ irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio,
+ sizeof(*irq->map), GFP_KERNEL);
+ if (!irq->map)
+ return -ENOMEM;
+
+ for (i = 0, offset = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+
+ for (j = 0; j < port->pins; j++)
+ irq->map[offset + j] = irq->parents[port->irq];
+
+ offset += port->pins;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ err = devm_gpiochip_add_data(&pdev->dev, &gpio->gpio, gpio);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra186_gpio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#define TEGRA_MAIN_GPIO_PORT(port, base, count, controller) \
+ [TEGRA_MAIN_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra186_main_ports[] = {
+ TEGRA_MAIN_GPIO_PORT( A, 0x2000, 7, 2),
+ TEGRA_MAIN_GPIO_PORT( B, 0x3000, 7, 3),
+ TEGRA_MAIN_GPIO_PORT( C, 0x3200, 7, 3),
+ TEGRA_MAIN_GPIO_PORT( D, 0x3400, 6, 3),
+ TEGRA_MAIN_GPIO_PORT( E, 0x2200, 8, 2),
+ TEGRA_MAIN_GPIO_PORT( F, 0x2400, 6, 2),
+ TEGRA_MAIN_GPIO_PORT( G, 0x4200, 6, 4),
+ TEGRA_MAIN_GPIO_PORT( H, 0x1000, 7, 1),
+ TEGRA_MAIN_GPIO_PORT( I, 0x0800, 8, 0),
+ TEGRA_MAIN_GPIO_PORT( J, 0x5000, 8, 5),
+ TEGRA_MAIN_GPIO_PORT( K, 0x5200, 1, 5),
+ TEGRA_MAIN_GPIO_PORT( L, 0x1200, 8, 1),
+ TEGRA_MAIN_GPIO_PORT( M, 0x5600, 6, 5),
+ TEGRA_MAIN_GPIO_PORT( N, 0x0000, 7, 0),
+ TEGRA_MAIN_GPIO_PORT( O, 0x0200, 4, 0),
+ TEGRA_MAIN_GPIO_PORT( P, 0x4000, 7, 4),
+ TEGRA_MAIN_GPIO_PORT( Q, 0x0400, 6, 0),
+ TEGRA_MAIN_GPIO_PORT( R, 0x0a00, 6, 0),
+ TEGRA_MAIN_GPIO_PORT( T, 0x0600, 4, 0),
+ TEGRA_MAIN_GPIO_PORT( X, 0x1400, 8, 1),
+ TEGRA_MAIN_GPIO_PORT( Y, 0x1600, 7, 1),
+ TEGRA_MAIN_GPIO_PORT(BB, 0x2600, 2, 2),
+ TEGRA_MAIN_GPIO_PORT(CC, 0x5400, 4, 5),
+};
+
+static const struct tegra_gpio_soc tegra186_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra186_main_ports),
+ .ports = tegra186_main_ports,
+ .name = "tegra186-gpio",
+};
+
+#define TEGRA_AON_GPIO_PORT(port, base, count, controller) \
+ [TEGRA_AON_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra186_aon_ports[] = {
+ TEGRA_AON_GPIO_PORT( S, 0x0200, 5, 0),
+ TEGRA_AON_GPIO_PORT( U, 0x0400, 6, 0),
+ TEGRA_AON_GPIO_PORT( V, 0x0800, 8, 0),
+ TEGRA_AON_GPIO_PORT( W, 0x0a00, 8, 0),
+ TEGRA_AON_GPIO_PORT( Z, 0x0e00, 4, 0),
+ TEGRA_AON_GPIO_PORT(AA, 0x0c00, 8, 0),
+ TEGRA_AON_GPIO_PORT(EE, 0x0600, 3, 0),
+ TEGRA_AON_GPIO_PORT(FF, 0x0000, 5, 0),
+};
+
+static const struct tegra_gpio_soc tegra186_aon_soc = {
+ .num_ports = ARRAY_SIZE(tegra186_aon_ports),
+ .ports = tegra186_aon_ports,
+ .name = "tegra186-gpio-aon",
+};
+
+static const struct of_device_id tegra186_gpio_of_match[] = {
+ {
+ .compatible = "nvidia,tegra186-gpio",
+ .data = &tegra186_main_soc
+ }, {
+ .compatible = "nvidia,tegra186-gpio-aon",
+ .data = &tegra186_aon_soc
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver tegra186_gpio_driver = {
+ .driver = {
+ .name = "tegra186-gpio",
+ .of_match_table = tegra186_gpio_of_match,
+ },
+ .probe = tegra186_gpio_probe,
+ .remove = tegra186_gpio_remove,
+};
+module_platform_driver(tegra186_gpio_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra186 GPIO controller driver");
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 57efb251f9c4..b5adb79a631a 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -417,18 +417,6 @@ static struct irq_chip thunderx_gpio_irq_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED
};
-static int thunderx_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct thunderx_gpio *txgpio = d->host_data;
-
- if (hwirq >= txgpio->chip.ngpio)
- return -EINVAL;
- if (!thunderx_gpio_is_gpio_nowarn(txgpio, hwirq))
- return -EPERM;
- return 0;
-}
-
static int thunderx_gpio_irq_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
irq_hw_number_t *hwirq,
@@ -455,7 +443,6 @@ static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
}
static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
- .map = thunderx_gpio_irq_map,
.alloc = thunderx_gpio_irq_alloc,
.translate = thunderx_gpio_irq_translate
};
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 22c5be65051f..0bb9bb583889 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -232,7 +232,7 @@ static int tz1090_gpio_request(struct gpio_chip *chip, unsigned int offset)
struct tz1090_gpio_bank *bank = gpiochip_get_data(chip);
int ret;
- ret = pinctrl_request_gpio(chip->base + offset);
+ ret = pinctrl_gpio_request(chip->base + offset);
if (ret)
return ret;
@@ -246,7 +246,7 @@ static void tz1090_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tz1090_gpio_bank *bank = gpiochip_get_data(chip);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
tz1090_gpio_clear_bit(bank, REG_GPIO_BIT_EN, offset);
}
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
new file mode 100644
index 000000000000..016d7427ebfa
--- /dev/null
+++ b/drivers/gpio/gpio-uniphier.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <dt-bindings/gpio/uniphier-gpio.h>
+
+#define UNIPHIER_GPIO_BANK_MASK \
+ GENMASK((UNIPHIER_GPIO_LINES_PER_BANK) - 1, 0)
+
+#define UNIPHIER_GPIO_IRQ_MAX_NUM 24
+
+#define UNIPHIER_GPIO_PORT_DATA 0x0 /* data */
+#define UNIPHIER_GPIO_PORT_DIR 0x4 /* direction (1:in, 0:out) */
+#define UNIPHIER_GPIO_IRQ_EN 0x90 /* irq enable */
+#define UNIPHIER_GPIO_IRQ_MODE 0x94 /* irq mode (1: both edge) */
+#define UNIPHIER_GPIO_IRQ_FLT_EN 0x98 /* noise filter enable */
+#define UNIPHIER_GPIO_IRQ_FLT_CYC 0x9c /* noise filter clock cycle */
+
+struct uniphier_gpio_priv {
+ struct gpio_chip chip;
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+ void __iomem *regs;
+ spinlock_t lock;
+ u32 saved_vals[0];
+};
+
+static unsigned int uniphier_gpio_bank_to_reg(unsigned int bank)
+{
+ unsigned int reg;
+
+ reg = (bank + 1) * 8;
+
+ /*
+ * Unfortunately, the GPIO port registers are not contiguous because
+ * offset 0x90-0x9f is used for IRQ. Add 0x10 when crossing the region.
+ */
+ if (reg >= UNIPHIER_GPIO_IRQ_EN)
+ reg += 0x10;
+
+ return reg;
+}
+
+static void uniphier_gpio_get_bank_and_mask(unsigned int offset,
+ unsigned int *bank, u32 *mask)
+{
+ *bank = offset / UNIPHIER_GPIO_LINES_PER_BANK;
+ *mask = BIT(offset % UNIPHIER_GPIO_LINES_PER_BANK);
+}
+
+static void uniphier_gpio_reg_update(struct uniphier_gpio_priv *priv,
+ unsigned int reg, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tmp = readl(priv->regs + reg);
+ tmp &= ~mask;
+ tmp |= mask & val;
+ writel(tmp, priv->regs + reg);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void uniphier_gpio_bank_write(struct gpio_chip *chip, unsigned int bank,
+ unsigned int reg, u32 mask, u32 val)
+{
+ struct uniphier_gpio_priv *priv = gpiochip_get_data(chip);
+
+ if (!mask)
+ return;
+
+ uniphier_gpio_reg_update(priv, uniphier_gpio_bank_to_reg(bank) + reg,
+ mask, val);
+}
+
+static void uniphier_gpio_offset_write(struct gpio_chip *chip,
+ unsigned int offset, unsigned int reg,
+ int val)
+{
+ unsigned int bank;
+ u32 mask;
+
+ uniphier_gpio_get_bank_and_mask(offset, &bank, &mask);
+
+ uniphier_gpio_bank_write(chip, bank, reg, mask, val ? mask : 0);
+}
+
+static int uniphier_gpio_offset_read(struct gpio_chip *chip,
+ unsigned int offset, unsigned int reg)
+{
+ struct uniphier_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int bank, reg_offset;
+ u32 mask;
+
+ uniphier_gpio_get_bank_and_mask(offset, &bank, &mask);
+ reg_offset = uniphier_gpio_bank_to_reg(bank) + reg;
+
+ return !!(readl(priv->regs + reg_offset) & mask);
+}
+
+static int uniphier_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR);
+}
+
+static int uniphier_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 1);
+
+ return 0;
+}
+
+static int uniphier_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 0);
+
+ return 0;
+}
+
+static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA);
+}
+
+static void uniphier_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
+}
+
+static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ unsigned int bank, shift, bank_mask, bank_bits;
+ int i;
+
+ for (i = 0; i < chip->ngpio; i += UNIPHIER_GPIO_LINES_PER_BANK) {
+ bank = i / UNIPHIER_GPIO_LINES_PER_BANK;
+ shift = i % BITS_PER_LONG;
+ bank_mask = (mask[BIT_WORD(i)] >> shift) &
+ UNIPHIER_GPIO_BANK_MASK;
+ bank_bits = bits[BIT_WORD(i)] >> shift;
+
+ uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
+ bank_mask, bank_bits);
+ }
+}
+
+static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct irq_fwspec fwspec;
+
+ if (offset < UNIPHIER_GPIO_IRQ_OFFSET)
+ return -ENXIO;
+
+ fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static void uniphier_gpio_irq_mask(struct irq_data *data)
+{
+ struct uniphier_gpio_priv *priv = data->chip_data;
+ u32 mask = BIT(data->hwirq);
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
+
+ return irq_chip_mask_parent(data);
+}
+
+static void uniphier_gpio_irq_unmask(struct irq_data *data)
+{
+ struct uniphier_gpio_priv *priv = data->chip_data;
+ u32 mask = BIT(data->hwirq);
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
+
+ return irq_chip_unmask_parent(data);
+}
+
+static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct uniphier_gpio_priv *priv = data->chip_data;
+ u32 mask = BIT(data->hwirq);
+ u32 val = 0;
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ val = mask;
+ type = IRQ_TYPE_EDGE_FALLING;
+ }
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_MODE, mask, val);
+ /* To enable both edge detection, the noise filter must be enabled. */
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_FLT_EN, mask, val);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static int uniphier_gpio_irq_get_parent_hwirq(struct uniphier_gpio_priv *priv,
+ unsigned int hwirq)
+{
+ struct device_node *np = priv->chip.parent->of_node;
+ const __be32 *range;
+ u32 base, parent_base, size;
+ int len;
+
+ range = of_get_property(np, "socionext,interrupt-ranges", &len);
+ if (!range)
+ return -EINVAL;
+
+ len /= sizeof(*range);
+
+ for (; len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ parent_base = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= hwirq && hwirq < base + size)
+ return hwirq - base + parent_base;
+ }
+
+ return -ENOENT;
+}
+
+static int uniphier_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+
+ *out_hwirq = fwspec->param[0];
+ *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int uniphier_gpio_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct uniphier_gpio_priv *priv = domain->host_data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = uniphier_gpio_irq_domain_translate(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = uniphier_gpio_irq_get_parent_hwirq(priv, hwirq);
+ if (ret < 0)
+ return ret;
+
+ /* parent is UniPhier AIDET */
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = ret;
+ parent_fwspec.param[1] = (type == IRQ_TYPE_EDGE_BOTH) ?
+ IRQ_TYPE_EDGE_FALLING : type;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &priv->irq_chip, priv);
+ if (ret)
+ return ret;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static int uniphier_gpio_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *data, bool early)
+{
+ struct uniphier_gpio_priv *priv = domain->host_data;
+ struct gpio_chip *chip = &priv->chip;
+
+ gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
+ return 0;
+}
+
+static void uniphier_gpio_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *data)
+{
+ struct uniphier_gpio_priv *priv = domain->host_data;
+ struct gpio_chip *chip = &priv->chip;
+
+ gpiochip_unlock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
+}
+
+static const struct irq_domain_ops uniphier_gpio_irq_domain_ops = {
+ .alloc = uniphier_gpio_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .activate = uniphier_gpio_irq_domain_activate,
+ .deactivate = uniphier_gpio_irq_domain_deactivate,
+ .translate = uniphier_gpio_irq_domain_translate,
+};
+
+static void uniphier_gpio_hw_init(struct uniphier_gpio_priv *priv)
+{
+ /*
+ * Due to the hardware design, the noise filter must be enabled to
+ * detect both edge interrupts. This filter is intended to remove the
+ * noise from the irq lines. It does not work for GPIO input, so GPIO
+ * debounce is not supported. Unfortunately, the filter period is
+ * shared among all irq lines. Just choose a sensible period here.
+ */
+ writel(0xff, priv->regs + UNIPHIER_GPIO_IRQ_FLT_CYC);
+}
+
+static unsigned int uniphier_gpio_get_nbanks(unsigned int ngpio)
+{
+ return DIV_ROUND_UP(ngpio, UNIPHIER_GPIO_LINES_PER_BANK);
+}
+
+static int uniphier_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent_np;
+ struct irq_domain *parent_domain;
+ struct uniphier_gpio_priv *priv;
+ struct gpio_chip *chip;
+ struct irq_chip *irq_chip;
+ struct resource *regs;
+ unsigned int nregs;
+ u32 ngpios;
+ int ret;
+
+ parent_np = of_irq_find_parent(dev->of_node);
+ if (!parent_np)
+ return -ENXIO;
+
+ parent_domain = irq_find_host(parent_np);
+ of_node_put(parent_np);
+ if (!parent_domain)
+ return -EPROBE_DEFER;
+
+ ret = of_property_read_u32(dev->of_node, "ngpios", &ngpios);
+ if (ret)
+ return ret;
+
+ nregs = uniphier_gpio_get_nbanks(ngpios) * 2 + 3;
+ priv = devm_kzalloc(dev,
+ sizeof(*priv) + sizeof(priv->saved_vals[0]) * nregs,
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ spin_lock_init(&priv->lock);
+
+ chip = &priv->chip;
+ chip->label = dev_name(dev);
+ chip->parent = dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->get_direction = uniphier_gpio_get_direction;
+ chip->direction_input = uniphier_gpio_direction_input;
+ chip->direction_output = uniphier_gpio_direction_output;
+ chip->get = uniphier_gpio_get;
+ chip->set = uniphier_gpio_set;
+ chip->set_multiple = uniphier_gpio_set_multiple;
+ chip->to_irq = uniphier_gpio_to_irq;
+ chip->base = -1;
+ chip->ngpio = ngpios;
+
+ irq_chip = &priv->irq_chip;
+ irq_chip->name = dev_name(dev);
+ irq_chip->irq_mask = uniphier_gpio_irq_mask;
+ irq_chip->irq_unmask = uniphier_gpio_irq_unmask;
+ irq_chip->irq_eoi = irq_chip_eoi_parent;
+ irq_chip->irq_set_affinity = irq_chip_set_affinity_parent;
+ irq_chip->irq_set_type = uniphier_gpio_irq_set_type;
+
+ uniphier_gpio_hw_init(priv);
+
+ ret = devm_gpiochip_add_data(dev, chip, priv);
+ if (ret)
+ return ret;
+
+ priv->domain = irq_domain_create_hierarchy(
+ parent_domain, 0,
+ UNIPHIER_GPIO_IRQ_MAX_NUM,
+ of_node_to_fwnode(dev->of_node),
+ &uniphier_gpio_irq_domain_ops, priv);
+ if (!priv->domain)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int uniphier_gpio_remove(struct platform_device *pdev)
+{
+ struct uniphier_gpio_priv *priv = platform_get_drvdata(pdev);
+
+ irq_domain_remove(priv->domain);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_gpio_suspend(struct device *dev)
+{
+ struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
+ unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
+ u32 *val = priv->saved_vals;
+ unsigned int reg;
+ int i;
+
+ for (i = 0; i < nbanks; i++) {
+ reg = uniphier_gpio_bank_to_reg(i);
+
+ *val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DATA);
+ *val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DIR);
+ }
+
+ *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_EN);
+ *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_MODE);
+ *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_gpio_resume(struct device *dev)
+{
+ struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
+ unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
+ const u32 *val = priv->saved_vals;
+ unsigned int reg;
+ int i;
+
+ for (i = 0; i < nbanks; i++) {
+ reg = uniphier_gpio_bank_to_reg(i);
+
+ writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DATA);
+ writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DIR);
+ }
+
+ writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_EN);
+ writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_MODE);
+ writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN);
+
+ uniphier_gpio_hw_init(priv);
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_gpio_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend,
+ uniphier_gpio_resume)
+};
+
+static const struct of_device_id uniphier_gpio_match[] = {
+ { .compatible = "socionext,uniphier-gpio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_gpio_match);
+
+static struct platform_driver uniphier_gpio_driver = {
+ .probe = uniphier_gpio_probe,
+ .remove = uniphier_gpio_remove,
+ .driver = {
+ .name = "uniphier-gpio",
+ .of_match_table = uniphier_gpio_match,
+ .pm = &uniphier_gpio_pm_ops,
+ },
+};
+module_platform_driver(uniphier_gpio_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index cbe9e06861de..4610cc2938ad 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -160,7 +160,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
- generic_handle_irq(irq_find_mapping(port->gc.irqdomain, pin));
+ generic_handle_irq(irq_find_mapping(port->gc.irq.domain, pin));
}
chained_irq_exit(chip, desc);
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 85341eab795d..dde7c6aecbb5 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -350,7 +350,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
BIT(gpio);
- virq = irq_find_mapping(wg->chip.irqdomain, gpio);
+ virq = irq_find_mapping(wg->chip.irq.domain, gpio);
handle_nested_irq(virq);
regmap_update_bits(wg->regmap, IRQ_STATUS_BASE + offset,
mask, mask);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 5037974ac063..746648244bf3 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -332,7 +332,7 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
int_id = inb(ws16c48gpio->base + 8 + port);
for_each_set_bit(gpio, &int_id, 8)
generic_handle_irq(irq_find_mapping(
- chip->irqdomain, gpio + 8*port));
+ chip->irq.domain, gpio + 8*port));
}
int_pending = inb(ws16c48gpio->base + 6) & 0x7;
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 033258634b8c..2313af82fad3 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -130,18 +130,16 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
(gpio > HWIRQ_TO_GPIO(priv, priv->nirq)))
return -ENXIO;
- if (gc->parent->of_node)
- fwspec.fwnode = of_node_to_fwnode(gc->parent->of_node);
- else
- fwspec.fwnode = gc->parent->fwnode;
+ fwspec.fwnode = gc->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio);
fwspec.param[1] = IRQ_TYPE_NONE;
return irq_create_fwspec_mapping(&fwspec);
}
-static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
- struct irq_data *irq_data)
+static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
+ struct irq_data *irq_data,
+ bool early)
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
@@ -150,11 +148,12 @@ static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
dev_err(priv->gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
- return;
+ return -ENOSPC;
}
xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
+ return 0;
}
static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
@@ -231,7 +230,6 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *regs;
struct irq_domain *parent_domain = NULL;
- struct fwnode_handle *fwnode;
u32 val32;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -285,18 +283,13 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- if (pdev->dev.of_node)
- fwnode = of_node_to_fwnode(pdev->dev.of_node);
- else
- fwnode = pdev->dev.fwnode;
-
priv->irq_domain = irq_domain_create_hierarchy(parent_domain,
- 0, priv->nirq, fwnode,
+ 0, priv->nirq, pdev->dev.fwnode,
&xgene_gpio_sb_domain_ops, priv);
if (!priv->irq_domain)
return -ENODEV;
- priv->gc.irqdomain = priv->irq_domain;
+ priv->gc.irq.domain = priv->irq_domain;
ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
if (ret) {
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index d857e1d8e731..e74bd43a6974 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -225,7 +225,7 @@ static void xlp_gpio_generic_handler(struct irq_desc *desc)
if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ))
generic_handle_irq(irq_find_mapping(
- priv->chip.irqdomain, gpio));
+ priv->chip.irq.domain, gpio));
}
chained_irq_exit(irqchip, desc);
}
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index be3a87da8438..5eacad9b2692 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -170,7 +170,7 @@ static void zx_irq_handler(struct irq_desc *desc)
writew_relaxed(pending, chip->base + ZX_GPIO_IC);
if (pending) {
for_each_set_bit(offset, &pending, ZX_GPIO_NR)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
offset));
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index b3cc948a2d8b..75ee877e5cd5 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -562,7 +562,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
unsigned long pending)
{
unsigned int bank_offset = gpio->p_data->bank_min[bank_num];
- struct irq_domain *irqdomain = gpio->chip.irqdomain;
+ struct irq_domain *irqdomain = gpio->chip.irq.domain;
int offset;
if (!pending)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index bfcd20699ec8..e0d59e61b52f 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -153,8 +153,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_OPEN_SOURCE;
}
- if (of_flags & OF_GPIO_SLEEP_MAY_LOOSE_VALUE)
- *flags |= GPIO_SLEEP_MAY_LOOSE_VALUE;
+ if (of_flags & OF_GPIO_SLEEP_MAY_LOSE_VALUE)
+ *flags |= GPIO_SLEEP_MAY_LOSE_VALUE;
return desc;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index eb80dac4e26a..aad84a6306c4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -72,6 +72,8 @@ static LIST_HEAD(gpio_lookup_list);
LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
+static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+ struct lock_class_key *key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -365,28 +367,28 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
struct linehandle_state *lh = filep->private_data;
void __user *ip = (void __user *)arg;
struct gpiohandle_data ghd;
+ int vals[GPIOHANDLES_MAX];
int i;
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
- int val;
+ /* TODO: check if descriptors are really input */
+ int ret = gpiod_get_array_value_complex(false,
+ true,
+ lh->numdescs,
+ lh->descs,
+ vals);
+ if (ret)
+ return ret;
memset(&ghd, 0, sizeof(ghd));
-
- /* TODO: check if descriptors are really input */
- for (i = 0; i < lh->numdescs; i++) {
- val = gpiod_get_value_cansleep(lh->descs[i]);
- if (val < 0)
- return val;
- ghd.values[i] = val;
- }
+ for (i = 0; i < lh->numdescs; i++)
+ ghd.values[i] = vals[i];
if (copy_to_user(ip, &ghd, sizeof(ghd)))
return -EFAULT;
return 0;
} else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) {
- int vals[GPIOHANDLES_MAX];
-
/* TODO: check if descriptors are really output */
if (copy_from_user(&ghd, ip, sizeof(ghd)))
return -EFAULT;
@@ -444,12 +446,25 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
struct linehandle_state *lh;
struct file *file;
int fd, i, ret;
+ u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
return -EFAULT;
if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
return -EINVAL;
+ lflags = handlereq.flags;
+
+ /* Return an error if an unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
+ return -EINVAL;
+
+ /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
+ if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
+ ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
+ return -EINVAL;
+
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
return -ENOMEM;
@@ -470,7 +485,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- u32 lflags = handlereq.flags;
struct gpio_desc *desc;
if (offset >= gdev->ngpio) {
@@ -478,12 +492,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
goto out_free_descs;
}
- /* Return an error if a unknown flag is set */
- if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
- ret = -EINVAL;
- goto out_free_descs;
- }
-
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -1091,30 +1099,8 @@ static void gpiochip_setup_devs(void)
}
}
-/**
- * gpiochip_add_data() - register a gpio_chip
- * @chip: the chip to register, with chip->base initialized
- * @data: driver-private data associated with this chip
- *
- * Context: potentially before irqs will work
- *
- * When gpiochip_add_data() is called very early during boot, so that GPIOs
- * can be freely used, the chip->parent device must be registered before
- * the gpio framework's arch_initcall(). Otherwise sysfs initialization
- * for GPIOs will fail rudely.
- *
- * gpiochip_add_data() must only be called after gpiolib initialization,
- * ie after core_initcall().
- *
- * If chip->base is negative, this requests dynamic assignment of
- * a range of valid GPIOs.
- *
- * Returns:
- * A negative errno if the chip can't be registered, such as because the
- * chip->base is invalid or already associated with a different chip.
- * Otherwise it returns zero as a success code.
- */
-int gpiochip_add_data(struct gpio_chip *chip, void *data)
+int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+ struct lock_class_key *key)
{
unsigned long flags;
int status = 0;
@@ -1260,6 +1246,10 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
if (status)
goto err_remove_from_list;
+ status = gpiochip_add_irqchip(chip, key);
+ if (status)
+ goto err_remove_chip;
+
status = of_gpiochip_add(chip);
if (status)
goto err_remove_chip;
@@ -1303,7 +1293,7 @@ err_free_gdev:
kfree(gdev);
return status;
}
-EXPORT_SYMBOL_GPL(gpiochip_add_data);
+EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
/**
* gpiochip_get_data() - get per-subdriver data for the chip
@@ -1498,33 +1488,33 @@ static struct gpio_chip *find_chip_by_name(const char *name)
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
- if (!gpiochip->irq_need_valid_mask)
+ if (!gpiochip->irq.need_valid_mask)
return 0;
- gpiochip->irq_valid_mask = kcalloc(BITS_TO_LONGS(gpiochip->ngpio),
+ gpiochip->irq.valid_mask = kcalloc(BITS_TO_LONGS(gpiochip->ngpio),
sizeof(long), GFP_KERNEL);
- if (!gpiochip->irq_valid_mask)
+ if (!gpiochip->irq.valid_mask)
return -ENOMEM;
/* Assume by default all GPIOs are valid */
- bitmap_fill(gpiochip->irq_valid_mask, gpiochip->ngpio);
+ bitmap_fill(gpiochip->irq.valid_mask, gpiochip->ngpio);
return 0;
}
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
{
- kfree(gpiochip->irq_valid_mask);
- gpiochip->irq_valid_mask = NULL;
+ kfree(gpiochip->irq.valid_mask);
+ gpiochip->irq.valid_mask = NULL;
}
static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
unsigned int offset)
{
/* No mask means all valid */
- if (likely(!gpiochip->irq_valid_mask))
+ if (likely(!gpiochip->irq.valid_mask))
return true;
- return test_bit(offset, gpiochip->irq_valid_mask);
+ return test_bit(offset, gpiochip->irq.valid_mask);
}
/**
@@ -1544,7 +1534,7 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
{
unsigned int offset;
- if (!gpiochip->irqdomain) {
+ if (!gpiochip->irq.domain) {
chip_err(gpiochip, "called %s before setting up irqchip\n",
__func__);
return;
@@ -1564,14 +1554,15 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip);
- gpiochip->irq_chained_parent = parent_irq;
+ gpiochip->irq.parents = &parent_irq;
+ gpiochip->irq.num_parents = 1;
}
/* Set the parent IRQ for all affected IRQs */
for (offset = 0; offset < gpiochip->ngpio; offset++) {
if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
continue;
- irq_set_parent(irq_find_mapping(gpiochip->irqdomain, offset),
+ irq_set_parent(irq_find_mapping(gpiochip->irq.domain, offset),
parent_irq);
}
}
@@ -1591,6 +1582,11 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
unsigned int parent_irq,
irq_flow_handler_t parent_handler)
{
+ if (gpiochip->irq.threaded) {
+ chip_err(gpiochip, "tried to chain a threaded gpiochip\n");
+ return;
+ }
+
gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
parent_handler);
}
@@ -1607,10 +1603,6 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int parent_irq)
{
- if (!gpiochip->irq_nested) {
- chip_err(gpiochip, "tried to nest a chained gpiochip\n");
- return;
- }
gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
NULL);
}
@@ -1626,10 +1618,11 @@ EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
* gpiochip by assigning the gpiochip as chip data, and using the irqchip
* stored inside the gpiochip.
*/
-static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
+int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct gpio_chip *chip = d->host_data;
+ int err = 0;
if (!gpiochip_irqchip_irq_valid(chip, hwirq))
return -ENXIO;
@@ -1639,32 +1632,42 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
- irq_set_lockdep_class(irq, chip->lock_key);
- irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
+ irq_set_lockdep_class(irq, chip->irq.lock_key);
+ irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
/* Chips that use nested thread handlers have them marked */
- if (chip->irq_nested)
+ if (chip->irq.threaded)
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
+ if (chip->irq.num_parents == 1)
+ err = irq_set_parent(irq, chip->irq.parents[0]);
+ else if (chip->irq.map)
+ err = irq_set_parent(irq, chip->irq.map[hwirq]);
+
+ if (err < 0)
+ return err;
+
/*
* No set-up of the hardware will happen if IRQ_TYPE_NONE
* is passed as default type.
*/
- if (chip->irq_default_type != IRQ_TYPE_NONE)
- irq_set_irq_type(irq, chip->irq_default_type);
+ if (chip->irq.default_type != IRQ_TYPE_NONE)
+ irq_set_irq_type(irq, chip->irq.default_type);
return 0;
}
+EXPORT_SYMBOL_GPL(gpiochip_irq_map);
-static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
+void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
struct gpio_chip *chip = d->host_data;
- if (chip->irq_nested)
+ if (chip->irq.threaded)
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
+EXPORT_SYMBOL_GPL(gpiochip_irq_unmap);
static const struct irq_domain_ops gpiochip_domain_ops = {
.map = gpiochip_irq_map,
@@ -1702,7 +1705,94 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
{
if (!gpiochip_irqchip_irq_valid(chip, offset))
return -ENXIO;
- return irq_create_mapping(chip->irqdomain, offset);
+
+ return irq_create_mapping(chip->irq.domain, offset);
+}
+
+/**
+ * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
+ * @gpiochip: the GPIO chip to add the IRQ chip to
+ * @lock_key: lockdep class
+ */
+static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+ struct lock_class_key *lock_key)
+{
+ struct irq_chip *irqchip = gpiochip->irq.chip;
+ const struct irq_domain_ops *ops;
+ struct device_node *np;
+ unsigned int type;
+ unsigned int i;
+
+ if (!irqchip)
+ return 0;
+
+ if (gpiochip->irq.parent_handler && gpiochip->can_sleep) {
+ chip_err(gpiochip, "you cannot have chained interrupts on a "
+ "chip that may sleep\n");
+ return -EINVAL;
+ }
+
+ np = gpiochip->gpiodev->dev.of_node;
+ type = gpiochip->irq.default_type;
+
+ /*
+ * Specifying a default trigger is a terrible idea if DT or ACPI is
+ * used to configure the interrupts, as you may end up with
+ * conflicting triggers. Tell the user, and reset to NONE.
+ */
+ if (WARN(np && type != IRQ_TYPE_NONE,
+ "%s: Ignoring %u default trigger\n", np->full_name, type))
+ type = IRQ_TYPE_NONE;
+
+ if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
+ acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+ "Ignoring %u default trigger\n", type);
+ type = IRQ_TYPE_NONE;
+ }
+
+ gpiochip->to_irq = gpiochip_to_irq;
+ gpiochip->irq.default_type = type;
+ gpiochip->irq.lock_key = lock_key;
+
+ if (gpiochip->irq.domain_ops)
+ ops = gpiochip->irq.domain_ops;
+ else
+ ops = &gpiochip_domain_ops;
+
+ gpiochip->irq.domain = irq_domain_add_simple(np, gpiochip->ngpio,
+ gpiochip->irq.first,
+ ops, gpiochip);
+ if (!gpiochip->irq.domain)
+ return -EINVAL;
+
+ /*
+ * It is possible for a driver to override this, but only if the
+ * alternative functions are both implemented.
+ */
+ if (!irqchip->irq_request_resources &&
+ !irqchip->irq_release_resources) {
+ irqchip->irq_request_resources = gpiochip_irq_reqres;
+ irqchip->irq_release_resources = gpiochip_irq_relres;
+ }
+
+ if (gpiochip->irq.parent_handler) {
+ void *data = gpiochip->irq.parent_handler_data ?: gpiochip;
+
+ for (i = 0; i < gpiochip->irq.num_parents; i++) {
+ /*
+ * The parent IRQ chip is already using the chip_data
+ * for this IRQ chip, so our callbacks simply use the
+ * handler_data.
+ */
+ irq_set_chained_handler_and_data(gpiochip->irq.parents[i],
+ gpiochip->irq.parent_handler,
+ data);
+ }
+ }
+
+ acpi_gpiochip_request_interrupts(gpiochip);
+
+ return 0;
}
/**
@@ -1717,26 +1807,34 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
acpi_gpiochip_free_interrupts(gpiochip);
- if (gpiochip->irq_chained_parent) {
- irq_set_chained_handler(gpiochip->irq_chained_parent, NULL);
- irq_set_handler_data(gpiochip->irq_chained_parent, NULL);
+ if (gpiochip->irq.chip && gpiochip->irq.parent_handler) {
+ struct gpio_irq_chip *irq = &gpiochip->irq;
+ unsigned int i;
+
+ for (i = 0; i < irq->num_parents; i++)
+ irq_set_chained_handler_and_data(irq->parents[i],
+ NULL, NULL);
}
/* Remove all IRQ mappings and delete the domain */
- if (gpiochip->irqdomain) {
+ if (gpiochip->irq.domain) {
+ unsigned int irq;
+
for (offset = 0; offset < gpiochip->ngpio; offset++) {
if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
continue;
- irq_dispose_mapping(
- irq_find_mapping(gpiochip->irqdomain, offset));
+
+ irq = irq_find_mapping(gpiochip->irq.domain, offset);
+ irq_dispose_mapping(irq);
}
- irq_domain_remove(gpiochip->irqdomain);
+
+ irq_domain_remove(gpiochip->irq.domain);
}
- if (gpiochip->irqchip) {
- gpiochip->irqchip->irq_request_resources = NULL;
- gpiochip->irqchip->irq_release_resources = NULL;
- gpiochip->irqchip = NULL;
+ if (gpiochip->irq.chip) {
+ gpiochip->irq.chip->irq_request_resources = NULL;
+ gpiochip->irq.chip->irq_release_resources = NULL;
+ gpiochip->irq.chip = NULL;
}
gpiochip_irqchip_free_valid_mask(gpiochip);
@@ -1751,8 +1849,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @handler: the irq handler to use (often a predefined irq core function)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
- * @nested: whether this is a nested irqchip calling handle_nested_irq()
- * in its IRQ handler
+ * @threaded: whether this irqchip uses a nested thread handler
* @lock_key: lockdep class
*
* This function closely associates a certain irqchip with a certain
@@ -1774,7 +1871,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
- bool nested,
+ bool threaded,
struct lock_class_key *lock_key)
{
struct device_node *of_node;
@@ -1786,7 +1883,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
pr_err("missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
- gpiochip->irq_nested = nested;
+ gpiochip->irq.threaded = threaded;
of_node = gpiochip->parent->of_node;
#ifdef CONFIG_OF_GPIO
/*
@@ -1811,16 +1908,16 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
type = IRQ_TYPE_NONE;
}
- gpiochip->irqchip = irqchip;
- gpiochip->irq_handler = handler;
- gpiochip->irq_default_type = type;
+ gpiochip->irq.chip = irqchip;
+ gpiochip->irq.handler = handler;
+ gpiochip->irq.default_type = type;
gpiochip->to_irq = gpiochip_to_irq;
- gpiochip->lock_key = lock_key;
- gpiochip->irqdomain = irq_domain_add_simple(of_node,
+ gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.domain = irq_domain_add_simple(of_node,
gpiochip->ngpio, first_irq,
&gpiochip_domain_ops, gpiochip);
- if (!gpiochip->irqdomain) {
- gpiochip->irqchip = NULL;
+ if (!gpiochip->irq.domain) {
+ gpiochip->irq.chip = NULL;
return -EINVAL;
}
@@ -1842,6 +1939,12 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
+static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+ struct lock_class_key *key)
+{
+ return 0;
+}
+
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
@@ -1859,7 +1962,7 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
*/
int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->gpiodev->base + offset);
+ return pinctrl_gpio_request(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
@@ -1870,7 +1973,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
*/
void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->gpiodev->base + offset);
+ pinctrl_gpio_free(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
@@ -2013,7 +2116,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
* on each other, and help provide better diagnostics in debugfs.
* They're called even less than the "set direction" calls.
*/
-static int __gpiod_request(struct gpio_desc *desc, const char *label)
+static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
{
struct gpio_chip *chip = desc->gdev->chip;
int status;
@@ -2106,7 +2209,7 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
gdev = desc->gdev;
if (try_module_get(gdev->owner)) {
- status = __gpiod_request(desc, label);
+ status = gpiod_request_commit(desc, label);
if (status < 0)
module_put(gdev->owner);
else
@@ -2119,7 +2222,7 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
return status;
}
-static bool __gpiod_free(struct gpio_desc *desc)
+static bool gpiod_free_commit(struct gpio_desc *desc)
{
bool ret = false;
unsigned long flags;
@@ -2154,7 +2257,7 @@ static bool __gpiod_free(struct gpio_desc *desc)
void gpiod_free(struct gpio_desc *desc)
{
- if (desc && desc->gdev && __gpiod_free(desc)) {
+ if (desc && desc->gdev && gpiod_free_commit(desc)) {
module_put(desc->gdev->owner);
put_device(&desc->gdev->dev);
} else {
@@ -2217,7 +2320,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
return desc;
}
- err = __gpiod_request(desc, label);
+ err = gpiod_request_commit(desc, label);
if (err < 0)
return ERR_PTR(err);
@@ -2235,7 +2338,7 @@ EXPORT_SYMBOL_GPL(gpiochip_request_own_desc);
void gpiochip_free_own_desc(struct gpio_desc *desc)
{
if (desc)
- __gpiod_free(desc);
+ gpiod_free_commit(desc);
}
EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
@@ -2291,44 +2394,12 @@ static int gpio_set_drive_single_ended(struct gpio_chip *gc, unsigned offset,
return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
}
-static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
struct gpio_chip *gc = desc->gdev->chip;
int val = !!value;
int ret;
- /* GPIOs used for IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
- gpiod_err(desc,
- "%s: tried to set a GPIO tied to an IRQ as output\n",
- __func__);
- return -EIO;
- }
-
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
- /* First see if we can enable open drain in hardware */
- ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_DRAIN);
- if (!ret)
- goto set_output_value;
- /* Emulate open drain by not actively driving the line high */
- if (val)
- return gpiod_direction_input(desc);
- }
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_SOURCE);
- if (!ret)
- goto set_output_value;
- /* Emulate open source by not actively driving the line low */
- if (!val)
- return gpiod_direction_input(desc);
- } else {
- gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_PUSH_PULL);
- }
-
-set_output_value:
if (!gc->set || !gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() or direction_output() operations\n",
@@ -2358,7 +2429,7 @@ set_output_value:
int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
VALIDATE_DESC(desc);
- return _gpiod_direction_output_raw(desc, value);
+ return gpiod_direction_output_raw_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
@@ -2376,12 +2447,48 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
+ struct gpio_chip *gc = desc->gdev->chip;
+ int ret;
+
VALIDATE_DESC(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
else
value = !!value;
- return _gpiod_direction_output_raw(desc, value);
+
+ /* GPIOs used for IRQs shall not be set as output */
+ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
+ gpiod_err(desc,
+ "%s: tried to set a GPIO tied to an IRQ as output\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ /* First see if we can enable open drain in hardware */
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_DRAIN);
+ if (!ret)
+ goto set_output_value;
+ /* Emulate open drain by not actively driving the line high */
+ if (value)
+ return gpiod_direction_input(desc);
+ }
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_SOURCE);
+ if (!ret)
+ goto set_output_value;
+ /* Emulate open source by not actively driving the line low */
+ if (!value)
+ return gpiod_direction_input(desc);
+ } else {
+ gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_PUSH_PULL);
+ }
+
+set_output_value:
+ return gpiod_direction_output_raw_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output);
@@ -2448,7 +2555,7 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
* that the GPIO was actually requested.
*/
-static int _gpiod_get_raw_value(const struct gpio_desc *desc)
+static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int offset;
@@ -2462,6 +2569,71 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
return value;
}
+static int gpio_chip_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ if (chip->get_multiple) {
+ return chip->get_multiple(chip, mask, bits);
+ } else if (chip->get) {
+ int i, value;
+
+ for_each_set_bit(i, mask, chip->ngpio) {
+ value = chip->get(chip, i);
+ if (value < 0)
+ return value;
+ __assign_bit(i, bits, value);
+ }
+ return 0;
+ }
+ return -EIO;
+}
+
+int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ int i = 0;
+
+ while (i < array_size) {
+ struct gpio_chip *chip = desc_array[i]->gdev->chip;
+ unsigned long mask[BITS_TO_LONGS(chip->ngpio)];
+ unsigned long bits[BITS_TO_LONGS(chip->ngpio)];
+ int first, j, ret;
+
+ if (!can_sleep)
+ WARN_ON(chip->can_sleep);
+
+ /* collect all inputs belonging to the same chip */
+ first = i;
+ memset(mask, 0, sizeof(mask));
+ do {
+ const struct gpio_desc *desc = desc_array[i];
+ int hwgpio = gpio_chip_hwgpio(desc);
+
+ __set_bit(hwgpio, mask);
+ i++;
+ } while ((i < array_size) &&
+ (desc_array[i]->gdev->chip == chip));
+
+ ret = gpio_chip_get_multiple(chip, mask, bits);
+ if (ret)
+ return ret;
+
+ for (j = first; j < i; j++) {
+ const struct gpio_desc *desc = desc_array[j];
+ int hwgpio = gpio_chip_hwgpio(desc);
+ int value = test_bit(hwgpio, bits);
+
+ if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ value_array[j] = value;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
+ }
+ }
+ return 0;
+}
+
/**
* gpiod_get_raw_value() - return a gpio's raw value
* @desc: gpio whose value will be returned
@@ -2477,7 +2649,7 @@ int gpiod_get_raw_value(const struct gpio_desc *desc)
VALIDATE_DESC(desc);
/* Should be using gpio_get_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- return _gpiod_get_raw_value(desc);
+ return gpiod_get_raw_value_commit(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
@@ -2499,7 +2671,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
/* Should be using gpio_get_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- value = _gpiod_get_raw_value(desc);
+ value = gpiod_get_raw_value_commit(desc);
if (value < 0)
return value;
@@ -2510,12 +2682,57 @@ int gpiod_get_value(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_get_value);
+/**
+ * gpiod_get_raw_array_value() - read raw values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the raw values of the GPIOs, i.e. the values of the physical lines
+ * without regard for their ACTIVE_LOW status. Return 0 in case of success,
+ * else an error code.
+ *
+ * This function should be called from contexts where we cannot sleep,
+ * and it will complain if the GPIO chip functions potentially sleep.
+ */
+int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array)
+{
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(true, false, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value);
+
+/**
+ * gpiod_get_array_value() - read values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
+ * into account. Return 0 in case of success, else an error code.
+ *
+ * This function should be called from contexts where we cannot sleep,
+ * and it will complain if the GPIO chip functions potentially sleep.
+ */
+int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array)
+{
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(false, false, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_array_value);
+
/*
- * _gpio_set_open_drain_value() - Set the open drain gpio's value.
+ * gpio_set_open_drain_value_commit() - Set the open drain gpio's value.
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
+static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
int err = 0;
struct gpio_chip *chip = desc->gdev->chip;
@@ -2542,7 +2759,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
+static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
int err = 0;
struct gpio_chip *chip = desc->gdev->chip;
@@ -2564,18 +2781,13 @@ static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
__func__, err);
}
-static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
+static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
struct gpio_chip *chip;
chip = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- _gpio_set_open_drain_value(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- _gpio_set_open_source_value(desc, value);
- else
- chip->set(chip, gpio_chip_hwgpio(desc), value);
+ chip->set(chip, gpio_chip_hwgpio(desc), value);
}
/*
@@ -2630,10 +2842,10 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
* collect all normal outputs belonging to the same chip
* open drain and open source outputs are set individually
*/
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
- _gpio_set_open_drain_value(desc, value);
- } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- _gpio_set_open_source_value(desc, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
+ gpio_set_open_drain_value_commit(desc, value);
+ } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
+ gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
if (value)
@@ -2667,7 +2879,7 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
VALIDATE_DESC_VOID(desc);
/* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- _gpiod_set_raw_value(desc, value);
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -2676,8 +2888,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
* @desc: gpio whose value will be assigned
* @value: value to assign
*
- * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into
- * account
+ * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW,
+ * OPEN_DRAIN and OPEN_SOURCE flags into account.
*
* This function should be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
@@ -2689,7 +2901,12 @@ void gpiod_set_value(struct gpio_desc *desc, int value)
WARN_ON(desc->gdev->chip->can_sleep);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- _gpiod_set_raw_value(desc, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ gpio_set_open_drain_value_commit(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ gpio_set_open_source_value_commit(desc, value);
+ else
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -2890,7 +3107,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return false;
- return !test_bit(FLAG_SLEEP_MAY_LOOSE_VALUE,
+ return !test_bit(FLAG_SLEEP_MAY_LOSE_VALUE,
&chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -2908,7 +3125,7 @@ int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
might_sleep_if(extra_checks);
VALIDATE_DESC(desc);
- return _gpiod_get_raw_value(desc);
+ return gpiod_get_raw_value_commit(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
@@ -2927,7 +3144,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
might_sleep_if(extra_checks);
VALIDATE_DESC(desc);
- value = _gpiod_get_raw_value(desc);
+ value = gpiod_get_raw_value_commit(desc);
if (value < 0)
return value;
@@ -2939,6 +3156,53 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
/**
+ * gpiod_get_raw_array_value_cansleep() - read raw values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the raw values of the GPIOs, i.e. the values of the physical lines
+ * without regard for their ACTIVE_LOW status. Return 0 in case of success,
+ * else an error code.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ might_sleep_if(extra_checks);
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(true, true, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value_cansleep);
+
+/**
+ * gpiod_get_array_value_cansleep() - read values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
+ * into account. Return 0 in case of success, else an error code.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ might_sleep_if(extra_checks);
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(false, true, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep);
+
+/**
* gpiod_set_raw_value_cansleep() - assign a gpio's raw value
* @desc: gpio whose value will be assigned
* @value: value to assign
@@ -2952,7 +3216,7 @@ void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
VALIDATE_DESC_VOID(desc);
- _gpiod_set_raw_value(desc, value);
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
@@ -2972,7 +3236,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
VALIDATE_DESC_VOID(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- _gpiod_set_raw_value(desc, value);
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
@@ -3264,12 +3528,25 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
if (lflags & GPIO_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ else if (dflags & GPIOD_FLAGS_BIT_OPEN_DRAIN) {
+ /*
+ * This enforces open drain mode from the consumer side.
+ * This is necessary for some busses like I2C, but the lookup
+ * should *REALLY* have specified them as open drain in the
+ * first place, so print a little warning here.
+ */
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ gpiod_warn(desc,
+ "enforced open drain please flag it properly in DT/ACPI DSDT/board file\n");
+ }
+
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if (lflags & GPIO_SLEEP_MAY_LOOSE_VALUE)
- set_bit(FLAG_SLEEP_MAY_LOOSE_VALUE, &desc->flags);
+ if (lflags & GPIO_SLEEP_MAY_LOSE_VALUE)
+ set_bit(FLAG_SLEEP_MAY_LOSE_VALUE, &desc->flags);
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index d003ccb12781..af48322839c3 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -180,6 +180,10 @@ static inline bool acpi_can_fallback_to_crs(struct acpi_device *adev,
#endif
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
void gpiod_set_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
@@ -201,7 +205,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_SLEEP_MAY_LOOSE_VALUE 12 /* GPIO may loose value in sleep */
+#define FLAG_SLEEP_MAY_LOSE_VALUE 12 /* GPIO may lose value in sleep */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 83cb2a88c204..4d9f21831741 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -110,7 +110,7 @@ config DRM_FBDEV_OVERALLOC
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
- depends on DRM_KMS_HELPER
+ depends on DRM
help
Say Y here, if you want to use EDID data to be loaded from the
/lib/firmware directory or one of the provided built-in
@@ -184,6 +184,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
+ select CHASH
help
Choose this option if you have a recent AMD Radeon graphics card.
@@ -191,6 +192,8 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
+source "drivers/gpu/drm/amd/lib/Kconfig"
+
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
@@ -278,6 +281,8 @@ source "drivers/gpu/drm/tinydrm/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
+source "drivers/gpu/drm/tve200/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 8ce07039bb89..e9500844333e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
- drm_syncobj.o
+ drm_syncobj.o drm_lease.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
@@ -29,6 +29,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
+drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@@ -37,7 +38,6 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
-drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
@@ -45,14 +45,13 @@ drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
-CFLAGS_drm_trace_points.o := -I$(src)
-
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
+obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
@@ -101,3 +100,4 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
+obj-$(CONFIG_DRM_TVE200) += tve200/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 567b0377e1e2..7fc42e077770 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -26,7 +26,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o amdgpu_vf_error.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -134,5 +134,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
-
-CFLAGS_amdgpu_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 103635ab784c..cbcb6a153aba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -65,6 +65,7 @@
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_mn.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
@@ -91,7 +92,7 @@ extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
extern int amdgpu_runtime_pm;
-extern unsigned amdgpu_ip_block_mask;
+extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
@@ -104,14 +105,14 @@ extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
extern int amdgpu_direct_gma_size;
-extern unsigned amdgpu_pcie_gen_cap;
-extern unsigned amdgpu_pcie_lane_cap;
-extern unsigned amdgpu_cg_mask;
-extern unsigned amdgpu_pg_mask;
-extern unsigned amdgpu_sdma_phase_quantum;
+extern uint amdgpu_pcie_gen_cap;
+extern uint amdgpu_pcie_lane_cap;
+extern uint amdgpu_cg_mask;
+extern uint amdgpu_pg_mask;
+extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
-extern unsigned amdgpu_pp_feature_mask;
+extern uint amdgpu_pp_feature_mask;
extern int amdgpu_vram_page_split;
extern int amdgpu_ngg;
extern int amdgpu_prim_buf_per_se;
@@ -120,6 +121,7 @@ extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
+extern int amdgpu_compute_multipipe;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -178,6 +180,7 @@ struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
+struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -292,14 +295,25 @@ struct amdgpu_buffer_funcs {
/* provided by hw blocks that can write ptes, e.g., sdma */
struct amdgpu_vm_pte_funcs {
+ /* number of dw to reserve per operation */
+ unsigned copy_pte_num_dw;
+
/* copy pte entries from GART */
void (*copy_pte)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
+
/* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr);
+
+ /* maximum nums of PTEs/PDEs in a single operation */
+ uint32_t set_max_nums_pte_pde;
+
+ /* number of dw to reserve per operation */
+ unsigned set_pte_pde_num_dw;
+
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@@ -332,6 +346,7 @@ struct amdgpu_gart_funcs {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
+ bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
@@ -399,6 +414,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
@@ -455,9 +471,10 @@ struct amdgpu_sa_bo {
*/
void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj);
+ int alignment, u32 initial_domain,
+ u64 flags, bool kernel,
+ struct reservation_object *resv,
+ struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -715,10 +732,14 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter;
+ uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
- bool preamble_presented;
+ bool preamble_presented;
+ enum amd_sched_priority init_priority;
+ enum amd_sched_priority override_priority;
+ struct mutex lock;
};
struct amdgpu_ctx_mgr {
@@ -731,17 +752,22 @@ struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct dma_fence *fence);
+int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+
/*
* file private structure
*/
@@ -753,7 +779,6 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
- u32 vram_lost_counter;
};
/*
@@ -854,7 +879,7 @@ struct amdgpu_mec {
struct amdgpu_kiq {
u64 eop_gpu_addr;
struct amdgpu_bo *eop_obj;
- struct mutex ring_mutex;
+ spinlock_t ring_lock;
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
};
@@ -1014,11 +1039,14 @@ struct amdgpu_gfx {
/* reset mask */
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- bool in_reset;
/* s3/s4 mask */
bool in_suspend;
/* NGG */
struct amdgpu_ngg ngg;
+
+ /* pipe reservation */
+ struct mutex pipe_reserve_mutex;
+ DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1056,6 +1084,7 @@ struct amdgpu_cs_parser {
/* buffer objects */
struct ww_acquire_ctx ticket;
struct amdgpu_bo_list *bo_list;
+ struct amdgpu_mn *mn;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
struct dma_fence *fence;
@@ -1096,6 +1125,7 @@ struct amdgpu_job {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
+ uint32_t vram_lost_counter;
/* user fence handling */
uint64_t uf_addr;
@@ -1121,7 +1151,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -1183,6 +1213,9 @@ struct amdgpu_firmware {
/* gpu info firmware data pointer */
const struct firmware *gpu_info_fw;
+
+ void *fw_buf_ptr;
+ uint64_t fw_buf_mc;
};
/*
@@ -1197,20 +1230,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
void amdgpu_test_moves(struct amdgpu_device *adev);
/*
- * MMU Notifier
- */
-#if defined(CONFIG_MMU_NOTIFIER)
-int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
-void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-#else
-static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
-{
- return -ENODEV;
-}
-static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
-#endif
-
-/*
* Debugfs
*/
struct amdgpu_debugfs {
@@ -1305,6 +1324,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1371,6 +1392,18 @@ struct amdgpu_atcs {
};
/*
+ * Firmware VRAM reservation
+ */
+struct amdgpu_fw_vram_usage {
+ u64 start_offset;
+ u64 size;
+ struct amdgpu_bo *reserved_bo;
+ void *va;
+};
+
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
+
+/*
* CGS
*/
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -1519,7 +1552,6 @@ struct amdgpu_device {
/* powerplay */
struct amd_powerplay powerplay;
- bool pp_enabled;
bool pp_force_state_enabled;
/* dpm */
@@ -1575,6 +1607,8 @@ struct amdgpu_device {
struct delayed_work late_init_work;
struct amdgpu_virt virt;
+ /* firmware VRAM reservation */
+ struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */
struct list_head shadow_list;
@@ -1592,6 +1626,7 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
+ bool in_sriov_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1759,6 +1794,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
+#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
@@ -1791,18 +1827,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags);
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
-struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
-bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end);
-bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
- int *last_invalidated);
-bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
@@ -1836,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
- struct amdgpu_fpriv *fpriv);
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
@@ -1885,10 +1907,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo);
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **mapping);
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index a52795d9b458..c04f44a90392 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -35,41 +35,50 @@
#include "acp_gfx_if.h"
-#define ACP_TILE_ON_MASK 0x03
-#define ACP_TILE_OFF_MASK 0x02
-#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
-#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
-
-#define ACP_TILE_P1_MASK 0x3e
-#define ACP_TILE_P2_MASK 0x3d
-#define ACP_TILE_DSP0_MASK 0x3b
-#define ACP_TILE_DSP1_MASK 0x37
-
-#define ACP_TILE_DSP2_MASK 0x2f
-
-#define ACP_DMA_REGS_END 0x146c0
-#define ACP_I2S_PLAY_REGS_START 0x14840
-#define ACP_I2S_PLAY_REGS_END 0x148b4
-#define ACP_I2S_CAP_REGS_START 0x148b8
-#define ACP_I2S_CAP_REGS_END 0x1496c
-
-#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
-#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
-#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
-#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
-
-#define mmACP_PGFSM_RETAIN_REG 0x51c9
-#define mmACP_PGFSM_CONFIG_REG 0x51ca
-#define mmACP_PGFSM_READ_REG_0 0x51cc
-
-#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
-#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
-#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
-#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
-
-#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
-#define ACP_SRC_ID 162
+#define ACP_TILE_ON_MASK 0x03
+#define ACP_TILE_OFF_MASK 0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
+
+#define ACP_TILE_P1_MASK 0x3e
+#define ACP_TILE_P2_MASK 0x3d
+#define ACP_TILE_DSP0_MASK 0x3b
+#define ACP_TILE_DSP1_MASK 0x37
+
+#define ACP_TILE_DSP2_MASK 0x2f
+
+#define ACP_DMA_REGS_END 0x146c0
+#define ACP_I2S_PLAY_REGS_START 0x14840
+#define ACP_I2S_PLAY_REGS_END 0x148b4
+#define ACP_I2S_CAP_REGS_START 0x148b8
+#define ACP_I2S_CAP_REGS_END 0x1496c
+
+#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
+#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
+#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
+#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+
+#define mmACP_PGFSM_RETAIN_REG 0x51c9
+#define mmACP_PGFSM_CONFIG_REG 0x51ca
+#define mmACP_PGFSM_READ_REG_0 0x51cc
+
+#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
+#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
+#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
+#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
+
+#define mmACP_CONTROL 0x5131
+#define mmACP_STATUS 0x5133
+#define mmACP_SOFT_RESET 0x5134
+#define ACP_CONTROL__ClkEn_MASK 0x1
+#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
+#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
+#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
+#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
+
+#define ACP_TIMEOUT_LOOP 0x000000FF
+#define ACP_DEVS 3
+#define ACP_SRC_ID 162
enum {
ACP_TILE_P1 = 0,
@@ -260,6 +269,8 @@ static int acp_hw_init(void *handle)
{
int r, i;
uint64_t acp_base;
+ u32 val = 0;
+ u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata;
@@ -371,6 +382,8 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell[0].name = "acp_audio_dma";
adev->acp.acp_cell[0].num_resources = 4;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
+ adev->acp.acp_cell[0].platform_data = &adev->asic_type;
+ adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
adev->acp.acp_cell[1].num_resources = 1;
@@ -400,6 +413,46 @@ static int acp_hw_init(void *handle)
}
}
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Enable clock to ACP and wait until the clock is enabled */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val = val | ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Deassert the SOFT RESET flags */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
return 0;
}
@@ -412,6 +465,8 @@ static int acp_hw_init(void *handle)
static int acp_hw_fini(void *handle)
{
int i, ret;
+ u32 val = 0;
+ u32 count = 0;
struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -419,6 +474,42 @@ static int acp_hw_fini(void *handle)
if (!adev->acp.acp_cell)
return 0;
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Disable ACP clock */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val &= ~ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+
if (adev->acp.acp_genpd) {
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b9dbbf9cb8b0..47d1c132ac40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .alloc_pasid = amdgpu_vm_alloc_pasid,
+ .free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
@@ -336,6 +338,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
@@ -354,7 +357,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- if (read_user_wptr(mm, wptr, wptr_val))
+ /* read_user_ptr may take the mm->mmap_sem.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(kgd);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(kgd, pipe_id, queue_id);
+ if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 309f2419c6d8..056929b8ccd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .alloc_pasid = amdgpu_vm_alloc_pasid,
+ .free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
@@ -290,6 +292,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct vi_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
@@ -337,7 +340,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- if (read_user_wptr(mm, wptr, wptr_val))
+ /* read_user_ptr may take the mm->mmap_sem.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(kgd);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(kgd, pipe_id, queue_id);
+ if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index ce443586a0c7..f450b69323fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
return true;
}
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
@@ -1807,6 +1805,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
uint16_t data_offset;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+ u64 start_addr;
+ u64 size;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@@ -1815,7 +1815,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
+ size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
+
+ if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->fw_vram_usage.start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->fw_vram_usage.size = size << 10;
+ /* Use the default scratch size */
+ usage_bytes = 0;
+ } else {
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index f9ffe8ef0cd6..ff8efd0f8fd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -71,19 +71,33 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
+ struct vram_usagebyfirmware_v2_1 * firmware_usage;
+ uint32_t start_addr, size;
uint16_t data_offset;
int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
- struct vram_usagebyfirmware_v2_1 *firmware_usage =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
-
+ firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
le32_to_cpu(firmware_usage->start_address_in_kb),
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
- usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) * 1024;
+ start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
+ size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
+
+ if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->fw_vram_usage.start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->fw_vram_usage.size = size << 10;
+ /* Use the default scratch size */
+ usage_bytes = 0;
+ } else {
+ usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
+ }
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index fd435a96481c..a7afe553e0a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -42,10 +42,31 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
+static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
+ int (*call_back_func)(struct amd_pp_init *, void **))
+{
+ CGS_FUNC_ADEV;
+ struct amd_pp_init pp_init;
+ struct amd_powerplay *amd_pp;
+
+ if (call_back_func == NULL)
+ return NULL;
+
+ amd_pp = &(adev->powerplay);
+ pp_init.chip_family = adev->family;
+ pp_init.chip_id = adev->asic_type;
+ pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
+ pp_init.feature_mask = amdgpu_pp_feature_mask;
+ pp_init.device = cgs_device;
+ if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
+ return NULL;
+
+ return adev->powerplay.pp_handle;
+}
+
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
- uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
@@ -53,13 +74,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
int ret = 0;
uint32_t domain = 0;
struct amdgpu_bo *obj;
- struct ttm_placement placement;
- struct ttm_place place;
-
- if (min_offset > max_offset) {
- BUG_ON(1);
- return -EINVAL;
- }
/* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1)))
@@ -73,41 +87,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (max_offset > adev->mc.real_vram_size)
- return -EINVAL;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- place.fpfn =
- max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
- place.lpfn =
- min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
- }
-
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_UNCACHED;
break;
default:
return -EINVAL;
@@ -116,15 +108,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0;
- placement.placement = &place;
- placement.num_placement = 1;
- placement.busy_placement = &place;
- placement.num_busy_placement = 1;
-
- ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
- true, domain, flags,
- NULL, &placement, NULL,
- 0, &obj);
+ ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
+ NULL, NULL, 0, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -155,19 +140,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
uint64_t *mcaddr)
{
int r;
- u64 min_offset, max_offset;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1);
- min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
- max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
-
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
- min_offset, max_offset, mcaddr);
+ r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
}
@@ -675,6 +655,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ strcpy(fw_name, "radeon/tahiti_smc.bin");
+ break;
+ case CHIP_PITCAIRN:
+ if ((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/pitcairn_smc.bin");
+ }
+ break;
+ case CHIP_VERDE:
+ if (((adev->pdev->device == 0x6820) &&
+ ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83))) ||
+ ((adev->pdev->device == 0x6821) &&
+ ((adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87))) ||
+ ((adev->pdev->revision == 0x87) &&
+ ((adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682b)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/verde_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/verde_smc.bin");
+ }
+ break;
+ case CHIP_OLAND:
+ if (((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6600) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605) ||
+ (adev->pdev->device == 0x6610))) ||
+ ((adev->pdev->revision == 0x83) &&
+ (adev->pdev->device == 0x6610))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/oland_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/oland_smc.bin");
+ }
+ break;
+ case CHIP_HAINAN:
+ if (((adev->pdev->revision == 0x81) &&
+ (adev->pdev->device == 0x6660)) ||
+ ((adev->pdev->revision == 0x83) &&
+ ((adev->pdev->device == 0x6660) ||
+ (adev->pdev->device == 0x6663) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/hainan_k_smc.bin");
+ } else if ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/banks_k_2_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/hainan_smc.bin");
+ }
+ break;
+ case CHIP_BONAIRE:
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/bonaire_smc.bin");
+ }
+ break;
+ case CHIP_HAWAII:
+ if (adev->pdev->revision == 0x80) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/hawaii_smc.bin");
+ }
+ break;
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
@@ -838,6 +897,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
sys_info->value = adev->pdev->subsystem_vendor;
break;
+ case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
+ sys_info->value = adev->pdev->devfn;
+ break;
default:
return -ENODEV;
}
@@ -1139,6 +1201,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
+ .register_pp_handle = amdgpu_cgs_register_pp_handle,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8d1cf2d3e663..df9cbc78e168 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -231,7 +231,7 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -256,7 +256,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->edid) {
- kfree(amdgpu_connector->edid);
- amdgpu_connector->edid = NULL;
- }
+ kfree(amdgpu_connector->edid);
+ amdgpu_connector->edid = NULL;
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
@@ -374,7 +372,7 @@ amdgpu_connector_best_single_encoder(struct drm_connector *connector)
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1079,7 +1077,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1136,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1155,7 +1153,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1296,7 +1294,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1325,7 +1323,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 60d8bedb694d..6c78623e1386 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -25,6 +25,7 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/pagemap.h>
+#include <linux/sync_file.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -89,12 +90,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
- goto put_ctx;
+ goto free_chunk;
}
p->nchunks = cs->in.num_chunks;
@@ -102,7 +105,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
- goto put_ctx;
+ goto free_chunk;
}
for (i = 0; i < p->nchunks; i++) {
@@ -169,6 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret)
goto free_all_kdata;
+ if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
+ ret = -ECANCELED;
+ goto free_all_kdata;
+ }
+
if (p->uf_entry.robj)
p->job->uf_addr = uf_offset;
kfree(chunk_array);
@@ -182,8 +190,6 @@ free_partial_kdata:
kfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
-put_ctx:
- amdgpu_ctx_put(p->ctx);
free_chunk:
kfree(chunk_array);
@@ -473,11 +479,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM;
/* Check if we have user pages and nobody bound the BO already */
- if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
- size_t size = sizeof(struct page *);
-
- size *= bo->tbo.ttm->num_pages;
- memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ lobj->user_pages) {
+ amdgpu_ttm_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
+ false);
+ if (r)
+ return r;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+ lobj->user_pages);
binding_userptr = true;
}
@@ -502,7 +513,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- bool need_mmap_lock = false;
unsigned i, tries = 10;
int r;
@@ -510,9 +520,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) {
- need_mmap_lock = p->bo_list->first_userptr !=
- p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev);
}
INIT_LIST_HEAD(&duplicates);
@@ -521,9 +531,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
- if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
-
while (1) {
struct list_head need_pages;
unsigned i;
@@ -543,23 +550,24 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo;
e = &p->bo_list->array[i];
+ bo = e->robj;
- if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
+ bo->tbo.ttm->num_pages);
kvfree(e->user_pages);
e->user_pages = NULL;
}
- if (e->robj->tbo.ttm->state != tt_bound &&
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) {
list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages);
@@ -636,9 +644,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- fpriv->vm.last_eviction_counter =
- atomic64_read(&p->adev->num_evictions);
-
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -679,9 +684,6 @@ error_validate:
error_free_pages:
- if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
-
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
@@ -691,8 +693,7 @@ error_free_pages:
continue;
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
+ e->robj->tbo.ttm->num_pages);
kvfree(e->user_pages);
}
}
@@ -707,7 +708,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
+ amdgpu_bo_explicit_sync(e->robj));
if (r)
return r;
@@ -728,11 +730,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (!error)
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- parser->fence);
- else if (backoff)
+ if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
@@ -742,8 +740,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
- if (parser->ctx)
+ if (parser->ctx) {
+ mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
+ }
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
@@ -768,10 +768,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
- if (r)
- return r;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -825,7 +821,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
- r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
+ r = amdgpu_vm_handle_moved(adev, vm);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
+ if (r)
+ return r;
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -835,7 +837,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo);
+ amdgpu_vm_bo_invalidate(adev, bo, false);
}
}
@@ -848,19 +850,63 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring;
- int i, r;
+ int r;
/* Only for UVD/VCE VM emulation */
- if (ring->funcs->parse_cs) {
- for (i = 0; i < p->job->num_ibs; i++) {
- r = amdgpu_ring_parse_cs(ring, p, i);
+ if (p->job->ring->funcs->parse_cs) {
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib;
+ struct amdgpu_bo_va_mapping *m;
+ struct amdgpu_bo *aobj = NULL;
+ struct amdgpu_cs_chunk *chunk;
+ struct amdgpu_ib *ib;
+ uint64_t offset;
+ uint8_t *kptr;
+
+ chunk = &p->chunks[i];
+ ib = &p->job->ibs[j];
+ chunk_ib = chunk->kdata;
+
+ if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+ continue;
+
+ r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
+ &aobj, &m);
+ if (r) {
+ DRM_ERROR("IB va_start is invalid\n");
+ return r;
+ }
+
+ if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ return -EINVAL;
+ }
+
+ /* the IB should be reserved at this point */
+ r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+ if (r) {
+ return r;
+ }
+
+ offset = m->start * AMDGPU_GPU_PAGE_SIZE;
+ kptr += chunk_ib->va_start - offset;
+
+ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, j);
if (r)
return r;
+
+ j++;
}
}
if (p->job->vm) {
- p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
r = amdgpu_bo_vm_update_pte(p);
if (r)
@@ -922,54 +968,18 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring = ring;
- if (ring->funcs->parse_cs) {
- struct amdgpu_bo_va_mapping *m;
- struct amdgpu_bo *aobj = NULL;
- uint64_t offset;
- uint8_t *kptr;
-
- m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
- &aobj);
- if (!aobj) {
- DRM_ERROR("IB va_start is invalid\n");
- return -EINVAL;
- }
-
- if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
- (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
- return -EINVAL;
- }
-
- /* the IB should be reserved at this point */
- r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
- return r;
- }
-
- offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += chunk_ib->va_start - offset;
-
- r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
- } else {
- r = amdgpu_ib_get(adev, vm, 0, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
+ r = amdgpu_ib_get(adev, vm,
+ ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
+ ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
}
ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
+
j++;
}
@@ -979,7 +989,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
- return 0;
+ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1133,14 +1143,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
+ unsigned i;
+ uint64_t seq;
+
int r;
+ amdgpu_mn_lock(p->mn);
+ if (p->bo_list) {
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+ return -ERESTARTSYS;
+ }
+ }
+ }
+
job = p->job;
p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
return r;
}
@@ -1148,21 +1175,36 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
+ r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
+ if (r) {
+ dma_fence_put(p->fence);
+ dma_fence_put(&job->base.s_fence->finished);
+ amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
+ return r;
+ }
+
amdgpu_cs_post_dependencies(p);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
- job->uf_sequence = cs->out.handle;
+ cs->out.handle = seq;
+ job->uf_sequence = seq;
+
amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring,
+ amd_sched_get_job_priority(&job->base));
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+
+ ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+ amdgpu_mn_unlock(p->mn);
+
return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
@@ -1170,8 +1212,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
parser.adev = adev;
parser.filp = filp;
@@ -1182,6 +1222,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
+ r = amdgpu_cs_ib_fill(adev, &parser);
+ if (r)
+ goto out;
+
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
@@ -1192,9 +1236,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, &parser);
- if (r)
- goto out;
r = amdgpu_cs_dependencies(adev, &parser);
if (r) {
@@ -1230,16 +1271,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
long r;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
-
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
@@ -1257,6 +1294,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = PTR_ERR(fence);
else if (fence) {
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
dma_fence_put(fence);
} else
r = 1;
@@ -1304,6 +1343,62 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
return fence;
}
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ union drm_amdgpu_fence_to_handle *info = data;
+ struct dma_fence *fence;
+ struct drm_syncobj *syncobj;
+ struct sync_file *sync_file;
+ int fd, r;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ switch (info->in.what) {
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ dma_fence_put(fence);
+ return fd;
+ }
+
+ sync_file = sync_file_create(fence);
+ dma_fence_put(fence);
+ if (!sync_file) {
+ put_unused_fd(fd);
+ return -ENOMEM;
+ }
+
+ fd_install(fd, sync_file->file);
+ info->out.handle = fd;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* amdgpu_cs_wait_all_fence - wait on all fences to signal
*
@@ -1338,6 +1433,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
if (r == 0)
break;
+
+ if (fence->error)
+ return fence->error;
}
memset(wait, 0, sizeof(*wait));
@@ -1383,6 +1481,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence;
} else { /* NULL, the fence has been already signaled */
r = 1;
+ first = i;
goto out;
}
}
@@ -1397,7 +1496,7 @@ out:
wait->out.status = (r > 0);
wait->out.first_signaled = first;
/* set return value 0 to indicate success */
- r = 0;
+ r = array[first]->error;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
@@ -1418,15 +1517,12 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);
@@ -1462,78 +1558,36 @@ err_free_fences:
* virtual memory address. Returns allocation structure when found, NULL
* otherwise.
*/
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo)
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **map)
{
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
- unsigned i;
-
- if (!parser->bo_list)
- return NULL;
-
- addr /= AMDGPU_GPU_PAGE_SIZE;
-
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo_list_entry *lobj;
-
- lobj = &parser->bo_list->array[i];
- if (!lobj->bo_va)
- continue;
-
- list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
-
- list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
- }
-
- return NULL;
-}
-
-/**
- * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
- *
- * @parser: command submission parser context
- *
- * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
- */
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
-{
- unsigned i;
int r;
- if (!parser->bo_list)
- return 0;
+ addr /= AMDGPU_GPU_PAGE_SIZE;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+ return -EINVAL;
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r))
- return r;
+ *bo = mapping->bo_va->base.bo;
+ *map = mapping;
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- continue;
+ /* Double check that the BO is reserved by this CS */
+ if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+ return -EINVAL;
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r))
+ if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
+ false);
+ if (r)
return r;
}
- return 0;
+ return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a11e44340b23..c184468e2b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -23,13 +23,41 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_auth.h>
#include "amdgpu.h"
+#include "amdgpu_sched.h"
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ enum amd_sched_priority priority)
+{
+ /* NORMAL and below are accessible by everyone */
+ if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+ enum amd_sched_priority priority,
+ struct drm_file *filp,
+ struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
+ if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+ return -EINVAL;
+
+ r = amdgpu_ctx_priority_permit(filp, priority);
+ if (r)
+ return r;
+
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
kref_init(&ctx->refcount);
@@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
if (!ctx->fences)
return -ENOMEM;
+ mutex_init(&ctx->lock);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+ ctx->init_priority = priority;
+ ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
struct amd_sched_rq *rq;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
@@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+
+ mutex_destroy(&ctx->lock);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
+ struct drm_file *filp,
+ enum amd_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
kfree(ctx);
return r;
}
+
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, ctx);
+ r = amdgpu_ctx_init(adev, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
+ enum amd_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0;
id = args->in.ctx_id;
+ priority = amdgpu_to_sched_priority(args->in.priority);
+
+ /* For backwards compatibility reasons, we need to accept
+ * ioctls with garbage in the priority field */
+ if (priority == AMD_SCHED_PRIORITY_INVALID)
+ priority = AMD_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, &id);
+ r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
@@ -246,8 +291,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0;
}
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct dma_fence *fence)
+int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t* handler)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
@@ -256,12 +301,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
- if (other) {
- signed long r;
- r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
- }
+ if (other)
+ BUG_ON(!dma_fence_is_signaled(other));
dma_fence_get(fence);
@@ -271,8 +312,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_unlock(&ctx->ring_lock);
dma_fence_put(other);
+ if (handler)
+ *handler = seq;
- return seq;
+ return 0;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@@ -303,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ enum amd_sched_priority priority)
+{
+ int i;
+ struct amdgpu_device *adev = ctx->adev;
+ struct amd_sched_rq *rq;
+ struct amd_sched_entity *entity;
+ struct amdgpu_ring *ring;
+ enum amd_sched_priority ctx_prio;
+
+ ctx->override_priority = priority;
+
+ ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ ring = adev->rings[i];
+ entity = &ctx->rings[i].entity;
+ rq = &ring->sched.sched_rq[ctx_prio];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ continue;
+
+ amd_sched_entity_set_rq(entity, rq);
+ }
+}
+
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+{
+ struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
+ unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
+ struct dma_fence *other = cring->fences[idx];
+
+ if (other) {
+ signed long r;
+ r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0) {
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e630d918fefc..efcacb827de7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -56,6 +56,7 @@
#include "amdgpu_vf_error.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_pm.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -65,6 +66,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = {
"TAHITI",
@@ -107,10 +109,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_rreg(adev, reg);
- }
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -135,10 +135,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_wreg(adev, reg, v);
- }
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -402,6 +400,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_doorbell_init(struct amdgpu_device *adev)
{
+ /* No doorbell on SI hardware generation */
+ if (adev->asic_type < CHIP_BONAIRE) {
+ adev->doorbell.base = 0;
+ adev->doorbell.size = 0;
+ adev->doorbell.num_doorbells = 0;
+ adev->doorbell.ptr = NULL;
+ return 0;
+ }
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@@ -539,7 +546,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
- *wb = offset * 8; /* convert to dw offset */
+ *wb = offset << 3; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
@@ -557,7 +564,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
{
if (wb < adev->wb.num_wb)
- __clear_bit(wb, adev->wb.used);
+ __clear_bit(wb >> 3, adev->wb.used);
}
/**
@@ -647,42 +654,96 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
}
/*
- * GPU helpers function.
+ * Firmware Reservation functions
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_fw_reserve_vram_fini - free fw reserved vram
*
* @adev: amdgpu_device pointer
*
- * Check if the asic has been initialized (all asics) at driver startup
- * or post is needed if hw reset is performed.
- * Returns true if need or false if not.
+ * free fw reserved vram if it has been reserved.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
- uint32_t reg;
+ amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
+ NULL, &adev->fw_vram_usage.va);
+}
- if (adev->has_hw_reset) {
- adev->has_hw_reset = false;
- return true;
- }
+/**
+ * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+ u64 gpu_addr;
+ u64 vram_size = adev->mc.visible_vram_size;
- /* bios scratch used on CIK+ */
- if (adev->asic_type >= CHIP_BONAIRE)
- return amdgpu_atombios_scratch_need_asic_init(adev);
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
- /* check MEM_SIZE for older asics */
- reg = amdgpu_asic_get_config_memsize(adev);
+ if (adev->fw_vram_usage.size > 0 &&
+ adev->fw_vram_usage.size <= vram_size) {
- if ((reg != 0) && (reg != 0xffffffff))
- return false;
+ r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
+ PAGE_SIZE, true, 0,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
+ &adev->fw_vram_usage.reserved_bo);
+ if (r)
+ goto error_create;
- return true;
+ r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+ if (r)
+ goto error_reserve;
+ r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ adev->fw_vram_usage.start_offset,
+ (adev->fw_vram_usage.start_offset +
+ adev->fw_vram_usage.size), &gpu_addr);
+ if (r)
+ goto error_pin;
+ r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+ if (r)
+ goto error_kmap;
+
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ }
+ return r;
+error_kmap:
+ amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+error_pin:
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+error_reserve:
+ amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+error_create:
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+ return r;
}
-static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+
+/*
+ * GPU helpers function.
+ */
+/**
+ * amdgpu_need_post - check if the hw need post or not
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Check if the asic has been initialized (all asics) at driver startup
+ * or post is needed if hw reset is performed.
+ * Returns true if need or false if not.
+ */
+bool amdgpu_need_post(struct amdgpu_device *adev)
{
+ uint32_t reg;
+
if (amdgpu_sriov_vf(adev))
return false;
@@ -705,7 +766,23 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true;
}
}
- return amdgpu_need_post(adev);
+
+ if (adev->has_hw_reset) {
+ adev->has_hw_reset = false;
+ return true;
+ }
+
+ /* bios scratch used on CIK+ */
+ if (adev->asic_type >= CHIP_BONAIRE)
+ return amdgpu_atombios_scratch_need_asic_init(adev);
+
+ /* check MEM_SIZE for older asics */
+ reg = amdgpu_asic_get_config_memsize(adev);
+
+ if ((reg != 0) && (reg != 0xffffffff))
+ return false;
+
+ return true;
}
/**
@@ -887,6 +964,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r;
}
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+
/**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
*
@@ -906,6 +997,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev)
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
adev->mode_info.atom_card_info = NULL;
+ device_remove_file(adev->dev, &dev_attr_vbios_version);
}
/**
@@ -922,6 +1014,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
+ int ret;
if (!atom_card_info)
return -ENOMEM;
@@ -958,6 +1051,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
}
+
+ ret = device_create_file(adev->dev, &dev_attr_vbios_version);
+ if (ret) {
+ DRM_ERROR("Failed to create device file for VBIOS version\n");
+ return ret;
+ }
+
return 0;
}
@@ -1757,10 +1857,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
- if (amdgpu_sriov_vf(adev)) {
- amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
+ if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- }
return 0;
}
@@ -1848,6 +1946,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
+ AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
@@ -1933,12 +2032,17 @@ static int amdgpu_resume(struct amdgpu_device *adev)
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
- if (adev->is_atom_fw) {
- if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
- } else {
- if (amdgpu_atombios_has_gpu_virtualization_table(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ if (amdgpu_sriov_vf(adev)) {
+ if (adev->is_atom_fw) {
+ if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ } else {
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ }
+
+ if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
}
}
@@ -1979,6 +2083,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@@ -2007,8 +2112,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
+ mutex_init(&adev->gfx.pipe_reserve_mutex);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
amdgpu_check_arguments(adev);
@@ -2051,9 +2158,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- if (adev->asic_type >= CHIP_BONAIRE)
- /* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ /* doorbell bar mapping */
+ amdgpu_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2095,7 +2201,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2103,10 +2209,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (amdgpu_vpost_needed(adev)) {
+ if (amdgpu_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
r = -EINVAL;
goto failed;
}
@@ -2114,7 +2219,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
goto failed;
}
} else {
@@ -2126,7 +2230,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atomfirmware_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
} else {
@@ -2134,7 +2238,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
/* init i2c buses */
@@ -2145,7 +2249,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2155,7 +2259,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_fini(adev);
goto failed;
}
@@ -2175,7 +2279,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto failed;
}
@@ -2183,8 +2287,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
amdgpu_fbdev_init(adev);
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ DRM_ERROR("registering pm debugfs failed (%d).\n", r);
+
r = amdgpu_gem_debugfs_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2201,6 +2312,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ r = amdgpu_debugfs_vbios_dump_init(adev);
+ if (r)
+ DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
+
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -2220,7 +2335,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@@ -2252,6 +2367,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
+ amdgpu_fw_reserve_vram_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
@@ -2276,8 +2392,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- if (adev->asic_type >= CHIP_BONAIRE)
- amdgpu_doorbell_fini(adev);
+ amdgpu_doorbell_fini(adev);
+ amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2504,6 +2620,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
int i;
bool asic_hang = false;
+ if (amdgpu_sriov_vf(adev))
+ return true;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -2546,7 +2665,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
@@ -2654,7 +2774,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->virt.lock_reset);
atomic_inc(&adev->gpu_reset_counter);
- adev->gfx.in_reset = true;
+ adev->in_sriov_reset = true;
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -2765,7 +2885,7 @@ give_up_reset:
dev_info(adev->dev, "GPU reset successed!\n");
}
- adev->gfx.in_reset = false;
+ adev->in_sriov_reset = false;
mutex_unlock(&adev->virt.lock_reset);
return r;
}
@@ -2902,7 +3022,6 @@ out:
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
@@ -2916,7 +3035,6 @@ out:
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
- amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
}
else {
dev_info(adev->dev, "GPU reset successed!\n");
@@ -3463,10 +3581,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values);
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
- else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
- r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
- &valuesize);
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
else
return -EINVAL;
@@ -3754,6 +3869,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
{
return 0;
}
+
+static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_write(m, adev->bios, adev->bios_size);
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_vbios_dump_list[] = {
+ {"amdgpu_vbios",
+ amdgpu_debugfs_get_vbios_dump,
+ 0, NULL},
+};
+
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
+{
+ return amdgpu_debugfs_add_files(adev,
+ amdgpu_vbios_dump_list, 1);
+}
#else
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
{
@@ -3763,5 +3900,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 1cb52fd19060..e997ebbe43ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
}
struct amd_vce_state*
-amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 8c96a4caa715..7279fb5c3abc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -241,179 +241,125 @@ enum amdgpu_pcie_gen {
AMDGPU_PCIE_GEN_INVALID = 0xffff
};
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*check_state_equal)(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
- bool *equal);
- int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
- int *size);
-
- struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
- int (*reset_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(struct amdgpu_device *adev,
- enum amd_pp_profile_type type);
-};
+#define amdgpu_dpm_pre_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_post_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_display_configuration_changed(adev) \
+ ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_dpm_print_power_state(adev, ps) \
+ ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
+
+#define amdgpu_dpm_vblank_too_short(adev) \
+ ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_enable_bapm(adev, e) \
+ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
- (adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
+ ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
+ ((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+ ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
+ ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+ ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+ ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
- -EINVAL)
+ ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
+ ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
+ ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
+ ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
+ ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
+ ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+ ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+ ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+ ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+ ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+ ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+ ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+ ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+ ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \
+ ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output))
-#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
+ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
- (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+ ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
- (adev)->pm.dpm.forced_level)
+#define amdgpu_dpm_get_performance_level(adev) \
+ ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_get_power_profile_state(adev, query) \
- ((adev)->powerplay.pp_funcs->get_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->get_power_profile_state(\
(adev)->powerplay.pp_handle, query))
#define amdgpu_dpm_set_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->set_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->set_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_switch_power_profile(adev, type) \
- ((adev)->powerplay.pp_funcs->switch_power_profile(\
+ ((adev)->powerplay.pp_funcs->switch_power_profile(\
(adev)->powerplay.pp_handle, type))
+#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
+ ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
+ (adev)->powerplay.pp_handle, msg_id))
+
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -485,7 +431,6 @@ struct amdgpu_pm {
struct amdgpu_dpm dpm;
const struct firmware *fw; /* SMC firmware */
uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
@@ -551,6 +496,6 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u8 amdgpu_encode_pci_lane_width(u32 lanes);
struct amd_vce_state*
-amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+amdgpu_get_vce_clock_state(void *handle, u32 idx);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0f16986ec5bc..dd2f060d62a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -69,9 +69,13 @@
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap
* - 3.19.0 - Add support for UVD MJPEG decode
+ * - 3.20.0 - Add support for local BOs
+ * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
+ * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
+ * - 3.23.0 - Add query for VRAM lost counter
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 19
+#define KMS_DRIVER_MINOR 23
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -91,7 +95,7 @@ int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1;
-unsigned amdgpu_ip_block_mask = 0xffffffff;
+uint amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1;
@@ -106,14 +110,14 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0;
-unsigned amdgpu_pcie_gen_cap = 0;
-unsigned amdgpu_pcie_lane_cap = 0;
-unsigned amdgpu_cg_mask = 0xffffffff;
-unsigned amdgpu_pg_mask = 0xffffffff;
-unsigned amdgpu_sdma_phase_quantum = 32;
+uint amdgpu_pcie_gen_cap = 0;
+uint amdgpu_pcie_lane_cap = 0;
+uint amdgpu_cg_mask = 0xffffffff;
+uint amdgpu_pg_mask = 0xffffffff;
+uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-unsigned amdgpu_pp_feature_mask = 0xffffffff;
+uint amdgpu_pp_feature_mask = 0xffffffff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -121,6 +125,7 @@ int amdgpu_cntl_sb_buf_per_se = 0;
int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
+int amdgpu_compute_multipipe = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -264,6 +269,9 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
+MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -608,6 +616,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
drm_dev_unref(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
}
static void
@@ -852,6 +862,7 @@ static struct drm_driver kms_driver = {
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
+ .gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9afa9c097e1f..562930b17a6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
- true, &gobj);
+ true, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
+ drm_framebuffer_unregister_private(&rfb->base);
+ drm_framebuffer_cleanup(&rfb->base);
}
drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 333bad749067..bd5b8065c32e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -169,6 +169,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
}
/**
+ * amdgpu_fence_emit_polling - emit a fence on the requeste ring
+ *
+ * @ring: ring the fence is associated with
+ * @s: resulting sequence number
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Used For polling fence.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+{
+ uint32_t seq;
+
+ if (!s)
+ return -EINVAL;
+
+ seq = ++ring->fence_drv.sync_seq;
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ seq, AMDGPU_FENCE_FLAG_INT);
+
+ *s = seq;
+
+ return 0;
+}
+
+/**
* amdgpu_fence_schedule_fallback - schedule fallback check
*
* @ring: pointer to struct amdgpu_ring
@@ -260,7 +286,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
@@ -282,6 +308,30 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_fence_wait_polling - busy wait for givn sequence number
+ *
+ * @ring: ring index the fence is associated with
+ * @wait_seq: sequence number to wait
+ * @timeout: the timeout for waiting in usecs
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns left time if no timeout, 0 or minus if timeout.
+ */
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout)
+{
+ uint32_t seq;
+
+ do {
+ seq = amdgpu_fence_read(ring);
+ udelay(5);
+ timeout -= 5;
+ } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
+
+ return timeout > 0 ? timeout : 0;
+}
+/**
* amdgpu_fence_count_emitted - get the count of emitted fences
*
* @ring: ring the fence is associated with
@@ -300,7 +350,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
- emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ emitted += READ_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
@@ -641,6 +691,19 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
atomic_read(&ring->fence_drv.last_seq));
seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
+ continue;
+
+ /* set in CP_VMID_PREEMPT and preemption occurred */
+ seq_printf(m, "Last preempted 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
+ /* set in CP_VMID_RESET and reset occurred */
+ seq_printf(m, "Last reset 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
+ /* Both preemption and reset occurred */
+ seq_printf(m, "Last both 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index f4370081f6e6..fe818501c520 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -332,12 +332,13 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
adev->gart.pages[p] = pagelist[i];
#endif
- if (adev->gart.ptr) {
- r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
- adev->gart.ptr);
- if (r)
- return r;
- }
+ if (!adev->gart.ptr)
+ return 0;
+
+ r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+ adev->gart.ptr);
+ if (r)
+ return r;
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7171968f261e..a418df1b9422 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj)
+ int alignment, u32 initial_domain,
+ u64 flags, bool kernel,
+ struct reservation_object *resv,
+ struct drm_gem_object **obj)
{
- struct amdgpu_bo *robj;
+ struct amdgpu_bo *bo;
int r;
*obj = NULL;
@@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, 0, &robj);
+ flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -71,7 +72,7 @@ retry:
}
return r;
}
- *obj = &robj->gem_base;
+ *obj = &bo->gem_base;
return 0;
}
@@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
+ struct mm_struct *mm;
int r;
+
+ mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
+ if (mm && mm != current->mm)
+ return -EPERM;
+
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
+ abo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return -EPERM;
+
r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
@@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
-static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
-{
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (!amdgpu_bo_gpu_accessible(bo))
- return -ERESTARTSYS;
-
- if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct amdgpu_bo *bo =
- container_of(entry->bo, struct amdgpu_bo, tbo);
- if (amdgpu_gem_vm_check(NULL, bo))
- return false;
- }
-
- return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
-}
-
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
@@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
- struct list_head list;
+ struct list_head list, duplicates;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
int r;
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.shared = true;
@@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
- if (amdgpu_gem_vm_ready(adev, vm, &list)) {
+ if (amdgpu_vm_ready(vm)) {
struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
@@ -214,18 +197,24 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data;
+ uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size;
+ struct reservation_object *resv = NULL;
struct drm_gem_object *gobj;
uint32_t handle;
- bool kernel = false;
int r;
/* reject invalid gem flags */
- if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED))
+ if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED |
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+
return -EINVAL;
/* reject invalid gem domains */
@@ -240,7 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
- kernel = true;
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
size = size << AMDGPU_GDS_SHIFT;
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
@@ -252,10 +241,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
}
size = roundup(size, PAGE_SIZE);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ r = amdgpu_bo_reserve(vm->root.base.bo, false);
+ if (r)
+ return r;
+
+ resv = vm->root.base.bo->tbo.resv;
+ }
+
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains),
- args->in.domain_flags,
- kernel, &gobj);
+ flags, false, resv, &gobj);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ if (!r) {
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ abo->parent = amdgpu_bo_ref(vm->root.base.bo);
+ }
+ amdgpu_bo_unreserve(vm->root.base.bo);
+ }
if (r)
return r;
@@ -297,9 +301,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
/* create a gem object to contain this object in */
- r = amdgpu_gem_object_create(adev, args->size, 0,
- AMDGPU_GEM_DOMAIN_CPU, 0,
- 0, &gobj);
+ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
+ 0, 0, NULL, &gobj);
if (r)
return r;
@@ -317,8 +320,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
-
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
@@ -333,8 +334,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(bo);
if (r)
goto free_pages;
-
- up_read(&current->mm->mmap_sem);
}
r = drm_gem_handle_create(filp, gobj, &handle);
@@ -347,7 +346,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return 0;
free_pages:
- release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
+ release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
unlock_mmap_sem:
up_read(&current->mm->mmap_sem);
@@ -511,10 +510,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list,
uint32_t operation)
{
- int r = -ERESTARTSYS;
+ int r;
- if (!amdgpu_gem_vm_ready(adev, vm, list))
- goto error;
+ if (!amdgpu_vm_ready(vm))
+ return;
r = amdgpu_vm_update_directories(adev, vm);
if (r)
@@ -551,7 +550,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
- struct list_head list;
+ struct list_head list, duplicates;
uint64_t va_flags;
int r = 0;
@@ -580,13 +579,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation);
return -EINVAL;
}
- if ((args->operation == AMDGPU_VA_OP_MAP) ||
- (args->operation == AMDGPU_VA_OP_REPLACE)) {
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
- }
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
@@ -603,7 +598,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -669,6 +664,7 @@ error_unref:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_bo *robj;
@@ -716,6 +712,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+ if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ amdgpu_vm_bo_invalidate(adev, robj, true);
+
amdgpu_bo_unreserve(robj);
break;
default:
@@ -745,8 +744,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- ttm_bo_type_device,
- &gobj);
+ false, NULL, &gobj);
if (r)
return -ENOMEM;
@@ -788,11 +786,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- offset = ACCESS_ONCE(bo->tbo.mem.start);
+ offset = READ_ONCE(bo->tbo.mem.start);
if (offset != AMDGPU_BO_INVALID_OFFSET)
seq_printf(m, " @ 0x%010Lx", offset);
- pin_count = ACCESS_ONCE(bo->pin_count);
+ pin_count = READ_ONCE(bo->pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 4f6c68fc1dd9..ef043361009f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -109,9 +109,26 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
}
}
+static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
+{
+ if (amdgpu_compute_multipipe != -1) {
+ DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
+ amdgpu_compute_multipipe);
+ return amdgpu_compute_multipipe == 1;
+ }
+
+ /* FIXME: spreading the queues across pipes causes perf regressions
+ * on POLARIS11 compute workloads */
+ if (adev->asic_type == CHIP_POLARIS11)
+ return false;
+
+ return adev->gfx.mec.num_mec > 1;
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
+ bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
/* policy for amdgpu compute queue ownership */
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
@@ -125,8 +142,7 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
- /* FIXME: spreading the queues across pipes causes perf regressions */
- if (0) {
+ if (multipipe_policy) {
/* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap);
@@ -185,7 +201,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
int r = 0;
- mutex_init(&kiq->ring_mutex);
+ spin_lock_init(&kiq->ring_lock);
r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
@@ -260,8 +276,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) {
+ /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
+ * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
+ * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
+ * KIQ MQD no matter SRIOV or Bare-metal
+ */
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0d15eb7d31d7..33535d347734 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -169,7 +169,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
- if (atomic64_read(&mgr->available) < mem->num_pages) {
+ if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
+ atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
@@ -244,8 +245,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
+ s64 result = man->size - atomic64_read(&mgr->available);
- return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
+ return (result > 0 ? result : 0) * PAGE_SIZE;
}
/**
@@ -265,7 +267,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
- drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+ drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
man->size, (u64)atomic64_read(&mgr->available),
amdgpu_gtt_mgr_usage(man) >> 20);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 3ab4c65ecc8b..f5f27e4f0f7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -169,6 +169,12 @@ restart_ih:
while (adev->irq.ih.rptr != wptr) {
u32 ring_index = adev->irq.ih.rptr >> 2;
+ /* Prescreening of high-frequency interrupts */
+ if (!amdgpu_ih_prescreen_iv(adev)) {
+ adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
+ continue;
+ }
+
/* Before dispatching irq to IP blocks, send it to amdkfd */
amdgpu_amdkfd_interrupt(adev,
(const void *) &adev->irq.ih.ring[ring_index]);
@@ -190,3 +196,79 @@ restart_ih:
return IRQ_HANDLED;
}
+
+/**
+ * amdgpu_ih_add_fault - Add a page fault record
+ *
+ * @adev: amdgpu device pointer
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a retry page fault interrupt is
+ * received. If this is a new page fault, it will be added to a hash
+ * table. The return value indicates whether this is a new fault, or
+ * a fault that was already known and is already being handled.
+ *
+ * If there are too many pending page faults, this will fail. Retry
+ * interrupts should be ignored in this case until there is enough
+ * free space.
+ *
+ * Returns 0 if the fault was added, 1 if the fault was already known,
+ * -ENOSPC if there are too many pending faults.
+ */
+int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
+{
+ unsigned long flags;
+ int r = -ENOSPC;
+
+ if (WARN_ON_ONCE(!adev->irq.ih.faults))
+ /* Should be allocated in <IP>_ih_sw_init on GPUs that
+ * support retry faults and require retry filtering.
+ */
+ return r;
+
+ spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+
+ /* Only let the hash table fill up to 50% for best performance */
+ if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
+ goto unlock_out;
+
+ r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
+ if (!r)
+ adev->irq.ih.faults->count++;
+
+ /* chash_table_copy_in should never fail unless we're losing count */
+ WARN_ON_ONCE(r < 0);
+
+unlock_out:
+ spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+ return r;
+}
+
+/**
+ * amdgpu_ih_clear_fault - Remove a page fault record
+ *
+ * @adev: amdgpu device pointer
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a page fault has been handled. Any
+ * future interrupt with this key will be processed as a new
+ * page fault.
+ */
+void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
+{
+ unsigned long flags;
+ int r;
+
+ if (!adev->irq.ih.faults)
+ return;
+
+ spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+
+ r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
+ if (!WARN_ON_ONCE(r < 0)) {
+ adev->irq.ih.faults->count--;
+ WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
+ }
+
+ spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 3de8e74e5b3a..ada89358e220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
+#include <linux/chash.h>
+
struct amdgpu_device;
/*
* vega10+ IH clients
@@ -69,6 +71,13 @@ enum amdgpu_ih_clientid
#define AMDGPU_IH_CLIENTID_LEGACY 0
+#define AMDGPU_PAGEFAULT_HASH_BITS 8
+struct amdgpu_retryfault_hashtable {
+ DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+ spinlock_t lock;
+ int count;
+};
+
/*
* R6xx+ IH ring
*/
@@ -87,6 +96,7 @@ struct amdgpu_ih_ring {
bool use_doorbell;
bool use_bus_addr;
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
+ struct amdgpu_retryfault_hashtable *faults;
};
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
@@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
int amdgpu_ih_process(struct amdgpu_device *adev);
+int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
+void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4510627ae83e..0cfc68db575b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -65,6 +65,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync);
+ (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
return 0;
}
@@ -103,6 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@@ -139,6 +141,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
+ amdgpu_ring_priority_get(job->ring,
+ amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
return 0;
@@ -177,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
struct dma_fence *fence = NULL;
+ struct amdgpu_device *adev;
struct amdgpu_job *job;
- struct amdgpu_fpriv *fpriv = NULL;
int r;
if (!sched_job) {
@@ -186,23 +190,25 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
+ adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
- if (job->vm)
- fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
/* skip ib schedule when vram is lost */
- if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
+ dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
DRM_ERROR("Skip scheduling IBs!\n");
- else {
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
+ } else {
+ r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+ &fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
+
amdgpu_job_free_resources(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e16229000a98..6f0b26dae3b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
+#include "amdgpu_sched.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -269,7 +270,6 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@@ -282,8 +282,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- if (amdgpu_kms_vram_lost(adev, fpriv))
- return -ENODEV;
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
@@ -765,6 +763,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VRAM_LOST_COUNTER:
+ ui32 = atomic_read(&adev->vram_lost_counter);
+ return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -791,12 +792,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
vga_switcheroo_process_delayed_switch();
}
-bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
- struct amdgpu_fpriv *fpriv)
-{
- return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
-}
-
/**
* amdgpu_driver_open_kms - drm callback for open
*
@@ -825,7 +820,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
r = amdgpu_vm_init(adev, &fpriv->vm,
- AMDGPU_VM_CONTEXT_GFX);
+ AMDGPU_VM_CONTEXT_GFX, 0);
if (r) {
kfree(fpriv);
goto out_suspend;
@@ -841,8 +836,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
- if (r)
+ if (r) {
+ amdgpu_vm_fini(adev, &fpriv->vm);
+ kfree(fpriv);
goto out_suspend;
+ }
}
mutex_init(&fpriv->bo_list_lock);
@@ -850,7 +848,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
- fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
file_priv->driver_priv = fpriv;
out_suspend:
@@ -1020,7 +1017,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3b0f2ec6eec7..bd67f4cb8e6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -50,8 +50,10 @@ struct amdgpu_mn {
struct hlist_node node;
/* objects protected by lock */
- struct mutex lock;
+ struct rw_semaphore lock;
struct rb_root_cached objects;
+ struct mutex read_lock;
+ atomic_t recursion;
};
struct amdgpu_mn_node {
@@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node,
&rmn->objects.rb_root, it.rb) {
@@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
@@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
schedule_work(&rmn->work);
}
+
+/**
+ * amdgpu_mn_lock - take the write side lock for this mn
+ */
+void amdgpu_mn_lock(struct amdgpu_mn *mn)
+{
+ if (mn)
+ down_write(&mn->lock);
+}
+
+/**
+ * amdgpu_mn_unlock - drop the write side lock for this mn
+ */
+void amdgpu_mn_unlock(struct amdgpu_mn *mn)
+{
+ if (mn)
+ up_write(&mn->lock);
+}
+
+/**
+ * amdgpu_mn_read_lock - take the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Take the rmn read side lock.
+ */
+static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+{
+ mutex_lock(&rmn->read_lock);
+ if (atomic_inc_return(&rmn->recursion) == 1)
+ down_read_non_owner(&rmn->lock);
+ mutex_unlock(&rmn->read_lock);
+}
+
+/**
+ * amdgpu_mn_read_unlock - drop the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Drop the rmn read side lock.
+ */
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+{
+ if (atomic_dec_return(&rmn->recursion) == 0)
+ up_read_non_owner(&rmn->lock);
+}
+
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
@@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- amdgpu_bo_unreserve(bo);
+ amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
}
}
@@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ amdgpu_mn_read_lock(rmn);
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
amdgpu_mn_invalidate_node(node, start, end);
}
+}
- mutex_unlock(&rmn->lock);
+/**
+ * amdgpu_mn_invalidate_range_end - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * Release the lock again to allow new command submissions.
+ */
+static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+
+ amdgpu_mn_read_unlock(rmn);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
+ .invalidate_range_end = amdgpu_mn_invalidate_range_end,
};
/**
@@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
*
* Creates a notifier context for current->mm.
*/
-static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{
struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn;
@@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
- mutex_init(&rmn->lock);
+ init_rwsem(&rmn->lock);
rmn->objects = RB_ROOT_CACHED;
+ mutex_init(&rmn->read_lock);
+ atomic_set(&rmn->recursion, 0);
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
@@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
@@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return -ENOMEM;
}
}
@@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return 0;
}
@@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return;
}
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
new file mode 100644
index 000000000000..d0095a3793b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_MN_H__
+#define __AMDGPU_MN_H__
+
+/*
+ * MMU Notifier
+ */
+struct amdgpu_mn;
+
+#if defined(CONFIG_MMU_NOTIFIER)
+void amdgpu_mn_lock(struct amdgpu_mn *mn);
+void amdgpu_mn_unlock(struct amdgpu_mn *mn);
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
+int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
+void amdgpu_mn_unregister(struct amdgpu_bo *bo);
+#else
+static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
+static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
+static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+{
+ return NULL;
+}
+static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9e495da0bb03..ea25164e7f4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -40,9 +40,7 @@
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *bo;
-
- bo = container_of(tbo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
amdgpu_bo_kunmap(bo);
@@ -64,11 +62,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
return false;
}
-static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
- struct ttm_placement *placement,
- struct ttm_place *places,
- u32 domain, u64 flags)
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct ttm_placement *placement = &abo->placement;
+ struct ttm_place *places = abo->placements;
+ u64 flags = abo->flags;
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
@@ -151,27 +150,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placement->busy_placement = places;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
-
- amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
- domain, abo->flags);
-}
-
-static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
- struct ttm_placement *placement)
-{
- BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
-
- memcpy(bo->placements, placement->placement,
- placement->num_placement * sizeof(struct ttm_place));
- bo->placement.num_placement = placement->num_placement;
- bo->placement.num_busy_placement = placement->num_busy_placement;
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
-}
-
/**
* amdgpu_bo_create_reserved - create reserved BO for kernel use
*
@@ -303,14 +281,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- uint64_t init_value,
- struct amdgpu_bo **bo_ptr)
+static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain, u64 flags,
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ uint64_t init_value,
+ struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
enum ttm_bo_type type;
@@ -384,13 +361,17 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
#endif
- amdgpu_fill_placement_to_bo(bo, placement);
- /* Kernel allocation are uninterruptible */
+ bo->tbo.bdev = &adev->mman.bdev;
+ amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
+ /* Kernel allocation are uninterruptible */
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
+ if (unlikely(r != 0))
+ return r;
+
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
@@ -400,9 +381,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
else
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
- if (unlikely(r != 0))
- return r;
-
if (kernel)
bo->tbo.priority = 1;
@@ -442,27 +420,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align,
struct amdgpu_bo *bo)
{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
int r;
if (bo->shadow)
return 0;
- memset(&placements, 0, sizeof(placements));
- amdgpu_ttm_placement_init(adev, &placement, placements,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW,
- NULL, &placement,
- bo->tbo.resv,
- 0,
- &bo->shadow);
+ r = amdgpu_bo_do_create(adev, size, byte_align, true,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW,
+ NULL, bo->tbo.resv, 0,
+ &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
@@ -484,18 +452,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- memset(&placements, 0, sizeof(placements));
- amdgpu_ttm_placement_init(adev, &placement, placements,
- domain, parent_flags);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
- parent_flags, sg, &placement, resv,
- init_value, bo_ptr);
+ r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
+ parent_flags, sg, resv, init_value, bo_ptr);
if (r)
return r;
@@ -672,7 +633,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
- unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
@@ -704,22 +664,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ /* force to pin into visible video ram */
+ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
- /* force to pin into visible video ram */
- if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
- (!max_offset || max_offset >
- adev->mc.visible_vram_size)) {
- if (WARN_ON_ONCE(min_offset >
- adev->mc.visible_vram_size))
- return -EINVAL;
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- } else {
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = max_offset >> PAGE_SHIFT;
- }
+ unsigned fpfn, lpfn;
+
+ fpfn = min_offset >> PAGE_SHIFT;
+ lpfn = max_offset >> PAGE_SHIFT;
+
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
if (!bo->placements[i].lpfn ||
@@ -928,8 +882,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
- abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(adev, abo);
+ abo = ttm_to_amdgpu_bo(bo);
+ amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo);
@@ -955,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return 0;
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a288fa6d72c8..428aae048f4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -35,6 +35,7 @@
/* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping {
+ struct amdgpu_bo_va *bo_va;
struct list_head list;
struct rb_node rb;
uint64_t start;
@@ -49,12 +50,17 @@ struct amdgpu_bo_va {
struct amdgpu_vm_bo_base base;
/* protected by bo being reserved */
- struct dma_fence *last_pt_update;
unsigned ref_count;
+ /* all other members protected by the VM PD being reserved */
+ struct dma_fence *last_pt_update;
+
/* mappings for this bo_va */
struct list_head invalids;
struct list_head valids;
+
+ /* If the mappings are cleared or filled */
+ bool cleared;
};
struct amdgpu_bo {
@@ -88,6 +94,11 @@ struct amdgpu_bo {
};
};
+static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
+{
+ return container_of(tbo, struct amdgpu_bo, tbo);
+}
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -182,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
}
}
+/**
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
+ */
+static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+}
+
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@@ -189,14 +208,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr);
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- uint64_t init_value,
- struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7df503aedb69..a59e04f3eeba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -64,17 +64,13 @@ static const struct cg_flag_name clocks[] = {
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{
- if (adev->pp_enabled)
- /* TODO */
- return;
-
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
adev->pm.dpm.ac_power = true;
else
adev->pm.dpm.ac_power = false;
- if (adev->pm.funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
@@ -88,9 +84,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev);
- } else
+ else
pm = adev->pm.dpm.user_state;
return snprintf(buf, PAGE_SIZE, "%s\n",
@@ -118,8 +114,8 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail;
}
- if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
@@ -140,13 +136,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level;
+ enum amd_dpm_forced_level level = 0xff;
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
- level = amdgpu_dpm_get_performance_level(adev);
+ if (adev->powerplay.pp_funcs->get_performance_level)
+ level = amdgpu_dpm_get_performance_level(adev);
+ else
+ level = adev->pm.dpm.forced_level;
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -167,7 +167,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level;
+ enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
/* Can't force performance level when the card is off */
@@ -175,7 +175,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- current_level = amdgpu_dpm_get_performance_level(adev);
+ if (adev->powerplay.pp_funcs->get_performance_level)
+ current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -203,9 +204,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level)
return count;
- if (adev->pp_enabled)
- amdgpu_dpm_force_performance_level(adev, level);
- else {
+ if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
@@ -233,7 +232,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -257,8 +256,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0;
- if (adev->pp_enabled) {
-
+ if (adev->powerplay.pp_funcs->get_current_power_state
+ && adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
@@ -280,25 +279,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- enum amd_pm_state_type pm = 0;
- int i;
- if (adev->pp_force_state_enabled && adev->pp_enabled) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
-
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
-
- } else
+ if (adev->pp_force_state_enabled)
+ return amdgpu_get_pp_cur_state(dev, attr, buf);
+ else
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -315,7 +299,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
- else if (adev->pp_enabled) {
+ else if (adev->powerplay.pp_funcs->dispatch_tasks &&
+ adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
@@ -330,7 +315,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev,
- AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
adev->pp_force_state_enabled = true;
}
}
@@ -347,7 +332,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table);
else
return 0;
@@ -368,7 +353,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
return count;
@@ -380,14 +365,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
@@ -416,10 +398,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
mask |= 1 << level;
}
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
+
fail:
return count;
}
@@ -430,14 +411,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
-
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -465,11 +443,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
}
mask |= 1 << level;
}
-
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
+
fail:
return count;
}
@@ -480,14 +456,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
-
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -515,11 +488,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
}
mask |= 1 << level;
}
-
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+
fail:
return count;
}
@@ -532,10 +503,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
- else if (adev->pm.funcs->get_sclk_od)
- value = adev->pm.funcs->get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -556,12 +525,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
-
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->set_sclk_od)
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_sclk_od) {
- adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@@ -578,10 +547,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
- else if (adev->pm.funcs->get_mclk_od)
- value = adev->pm.funcs->get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -602,12 +569,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
-
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->set_mclk_od)
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_mclk_od) {
- adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@@ -621,14 +588,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- int ret = 0;
+ int ret = 0xff;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_power_profile_state)
ret = amdgpu_dpm_get_power_profile_state(
adev, query);
- else if (adev->pm.funcs->get_power_profile_state)
- ret = adev->pm.funcs->get_power_profile_state(
- adev, query);
if (ret)
return ret;
@@ -675,15 +639,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
char *sub_str, buf_cpy[128], *tmp_str;
const char delimiter[3] = {' ', '\n', '\0'};
long int value;
- int ret = 0;
+ int ret = 0xff;
if (strncmp("reset", buf, strlen("reset")) == 0) {
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->reset_power_profile_state)
ret = amdgpu_dpm_reset_power_profile_state(
adev, request);
- else if (adev->pm.funcs->reset_power_profile_state)
- ret = adev->pm.funcs->reset_power_profile_state(
- adev, request);
if (ret) {
count = -EINVAL;
goto fail;
@@ -692,12 +653,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
}
if (strncmp("set", buf, strlen("set")) == 0) {
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->set_power_profile_state)
ret = amdgpu_dpm_set_power_profile_state(
adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
+
if (ret) {
count = -EINVAL;
goto fail;
@@ -745,13 +704,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
loop++;
}
-
- if (adev->pp_enabled)
- ret = amdgpu_dpm_set_power_profile_state(
- adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
+ if (adev->powerplay.pp_funcs->set_power_profile_state)
+ ret = amdgpu_dpm_set_power_profile_state(adev, request);
if (ret)
count = -EINVAL;
@@ -831,7 +785,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
+ if (!adev->powerplay.pp_funcs->get_temperature)
temp = 0;
else
temp = amdgpu_dpm_get_temperature(adev);
@@ -862,7 +816,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
- if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
return -EINVAL;
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
@@ -879,7 +833,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err;
int value;
- if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
return -EINVAL;
err = kstrtoint(buf, 10, &value);
@@ -919,9 +873,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ err = amdgpu_dpm_set_fan_speed_percent(adev, value);
+ if (err)
+ return err;
+ }
return count;
}
@@ -932,11 +888,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
- u32 speed;
+ u32 speed = 0;
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
+ if (err)
+ return err;
+ }
speed = (speed * 255) / 100;
@@ -949,11 +907,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
- u32 speed;
+ u32 speed = 0;
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
+ if (err)
+ return err;
+ }
return sprintf(buf, "%i\n", speed);
}
@@ -996,9 +956,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
- if (adev->pp_enabled)
- return effective_mode;
-
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
@@ -1008,21 +965,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->pm.funcs->get_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->pm.funcs->get_fan_control_mode &&
+ (!adev->powerplay.pp_funcs->get_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO;
- if ((!adev->pm.funcs->set_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->pm.funcs->set_fan_control_mode &&
+ (!adev->powerplay.pp_funcs->set_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
/* hide max/min values if we can't both query and manage the fan */
- if ((!adev->pm.funcs->set_fan_speed_percent &&
- !adev->pm.funcs->get_fan_speed_percent) &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
@@ -1055,7 +1012,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled)
return;
- if (adev->pm.funcs->get_temperature) {
+ if (adev->powerplay.pp_funcs->get_temperature) {
int temp = amdgpu_dpm_get_temperature(adev);
if (temp < adev->pm.dpm.thermal.min_temp)
@@ -1087,7 +1044,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
true : false;
/* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->pm.funcs->vblank_too_short) {
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
if (amdgpu_dpm_vblank_too_short(adev))
single_display = false;
}
@@ -1216,7 +1173,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
- bool equal;
+ bool equal = false;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1236,7 +1193,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- if (amdgpu_dpm == 1) {
+ if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
printk("switching to power state:\n");
@@ -1245,15 +1202,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
-
- amdgpu_dpm_display_configuration_changed(adev);
+ if (adev->powerplay.pp_funcs->display_configuration_changed)
+ amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
- equal = false;
+ if (adev->powerplay.pp_funcs->check_state_equal) {
+ if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+ equal = false;
+ }
if (equal)
return;
@@ -1264,7 +1223,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- if (adev->pm.funcs->force_performance_level) {
+ if (adev->powerplay.pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */
@@ -1280,7 +1239,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+ if (adev->powerplay.pp_funcs->powergate_uvd) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
@@ -1302,7 +1261,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+ if (adev->powerplay.pp_funcs->powergate_vce) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
@@ -1337,8 +1296,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
{
int i;
- if (adev->pp_enabled)
- /* TO DO */
+ if (adev->powerplay.pp_funcs->print_power_state == NULL)
return;
for (i = 0; i < adev->pm.dpm.num_ps; i++)
@@ -1353,10 +1311,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized)
return 0;
- if (!adev->pp_enabled) {
- if (adev->pm.funcs->get_temperature == NULL)
- return 0;
- }
+ if (adev->pm.dpm_enabled == 0)
+ return 0;
+
+ if (adev->powerplay.pp_funcs->get_temperature == NULL)
+ return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
@@ -1379,27 +1338,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
- if (adev->pp_enabled) {
- ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
- if (ret) {
- DRM_ERROR("failed to create device file pp_num_states\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_cur_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_force_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_table);
- if (ret) {
- DRM_ERROR("failed to create device file pp_table\n");
- return ret;
- }
+
+ ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_num_states\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_cur_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_force_state\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_table);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_table\n");
+ return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
@@ -1455,16 +1413,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ if (adev->pm.dpm_enabled == 0)
+ return;
+
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
device_remove_file(adev->dev, &dev_attr_power_dpm_state);
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (adev->pp_enabled) {
- device_remove_file(adev->dev, &dev_attr_pp_num_states);
- device_remove_file(adev->dev, &dev_attr_pp_cur_state);
- device_remove_file(adev->dev, &dev_attr_pp_force_state);
- device_remove_file(adev->dev, &dev_attr_pp_table);
- }
+
+ device_remove_file(adev->dev, &dev_attr_pp_num_states);
+ device_remove_file(adev->dev, &dev_attr_pp_cur_state);
+ device_remove_file(adev->dev, &dev_attr_pp_force_state);
+ device_remove_file(adev->dev, &dev_attr_pp_table);
+
device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
@@ -1495,8 +1456,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.new_active_crtcs = 0;
@@ -1630,15 +1591,15 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
- } else if (adev->pp_enabled) {
- return amdgpu_debugfs_pm_info_pp(m, adev);
- } else {
+ } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
- if (adev->pm.funcs->debugfs_print_current_performance_level)
- adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
+ if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
+ adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
+ } else {
+ return amdgpu_debugfs_pm_info_pp(m, adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b7e1c026c0c8..5f5aa5fddc16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -34,24 +34,6 @@
#include "cik_dpm.h"
#include "vi_dpm.h"
-static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
-{
- struct amd_pp_init pp_init;
- struct amd_powerplay *amd_pp;
- int ret;
-
- amd_pp = &(adev->powerplay);
- pp_init.chip_family = adev->family;
- pp_init.chip_id = adev->asic_type;
- pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
- pp_init.feature_mask = amdgpu_pp_feature_mask;
- pp_init.device = amdgpu_cgs_create_device(adev);
- ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
- if (ret)
- return -EINVAL;
- return 0;
-}
-
static int amdgpu_pp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -59,7 +41,6 @@ static int amdgpu_pp_early_init(void *handle)
int ret = 0;
amd_pp = &(adev->powerplay);
- adev->pp_enabled = false;
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
@@ -73,9 +54,7 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_STONEY:
case CHIP_VEGA10:
case CHIP_RAVEN:
- adev->pp_enabled = true;
- if (amdgpu_create_pp_handle(adev))
- return -EINVAL;
+ amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
break;
@@ -87,17 +66,26 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_OLAND:
case CHIP_HAINAN:
amd_pp->ip_funcs = &si_dpm_ip_funcs;
+ amd_pp->pp_funcs = &si_dpm_funcs;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+ if (amdgpu_dpm == -1) {
+ amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+ amd_pp->pp_funcs = &ci_dpm_funcs;
+ } else {
+ amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
+ amd_pp->ip_funcs = &pp_ip_funcs;
+ amd_pp->pp_funcs = &pp_dpm_funcs;
+ }
break;
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
+ amd_pp->pp_funcs = &kv_dpm_funcs;
break;
#endif
default:
@@ -107,12 +95,9 @@ static int amdgpu_pp_early_init(void *handle)
if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init(
- adev->powerplay.pp_handle);
+ amd_pp->cgs_device ? amd_pp->cgs_device :
+ amd_pp->pp_handle);
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
return ret;
}
@@ -126,11 +111,6 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
- if (adev->pp_enabled && adev->pm.dpm_enabled) {
- amdgpu_pm_sysfs_init(adev);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
- }
-
return ret;
}
@@ -165,21 +145,13 @@ static int amdgpu_pp_hw_init(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_init_bo(adev);
if (adev->powerplay.ip_funcs->hw_init)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
-
- if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
- adev->pm.dpm_enabled = true;
-
return ret;
}
@@ -188,14 +160,11 @@ static int amdgpu_pp_hw_fini(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pp_enabled && adev->pm.dpm_enabled)
- amdgpu_pm_sysfs_fini(adev);
-
if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_fini_bo(adev);
return ret;
@@ -209,9 +178,8 @@ static void amdgpu_pp_late_fini(void *handle)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
-
- if (adev->pp_enabled)
- amd_powerplay_destroy(adev->powerplay.pp_handle);
+ if (adev->powerplay.cgs_device)
+ amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
}
static int amdgpu_pp_suspend(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 5b3f92891f89..90af8e82b16a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ unsigned asize = amdgpu_bo_size(bo);
+ int ret;
+
+ if (!vma->vm_file)
+ return -ENODEV;
+
+ if (adev == NULL)
+ return -ENODEV;
+
+ /* Check for valid size. */
+ if (asize < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+ (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
+ return -EPERM;
+ }
+ vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
+
+ /* prime mmap does not need to check access, so allow here */
+ ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
+ drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
+
+ return ret;
+}
+
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -136,7 +170,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+ bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8c2204c7b384..447d446b5015 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -57,21 +57,23 @@ static int psp_sw_init(void *handle)
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
+ psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
+ psp->mode1_reset = psp_v3_1_mode1_reset;
break;
case CHIP_RAVEN:
-#if 0
psp->init_microcode = psp_v10_0_init_microcode;
-#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create;
+ psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
+ psp->mode1_reset = psp_v10_0_mode1_reset;
break;
default:
return -EINVAL;
@@ -90,6 +92,12 @@ static int psp_sw_init(void *handle)
static int psp_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
return 0;
}
@@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp)
static int psp_hw_start(struct psp_context *psp)
{
+ struct amdgpu_device *adev = psp->adev;
int ret;
- ret = psp_bootloader_load_sysdrv(psp);
- if (ret)
- return ret;
+ if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ ret = psp_bootloader_load_sysdrv(psp);
+ if (ret)
+ return ret;
- ret = psp_bootloader_load_sos(psp);
- if (ret)
- return ret;
+ ret = psp_bootloader_load_sos(psp);
+ if (ret)
+ return ret;
+ }
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret)
@@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle)
static int psp_suspend(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
+
+ ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
+ if (ret) {
+ DRM_ERROR("PSP ring stop failed\n");
+ return ret;
+ }
+
return 0;
}
@@ -487,6 +508,22 @@ failed:
return ret;
}
+static bool psp_check_reset(void* handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->flags & AMD_IS_APU)
+ return true;
+
+ return false;
+}
+
+static int psp_reset(void* handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ return psp_mode1_reset(&adev->psp);
+}
+
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type)
{
@@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend,
.resume = psp_resume,
.is_idle = NULL,
+ .check_soft_reset = psp_check_reset,
.wait_for_idle = NULL,
- .soft_reset = NULL,
+ .soft_reset = psp_reset,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 538fa9dbfb21..ce4654550416 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -66,6 +66,8 @@ struct psp_context
struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
+ int (*ring_stop)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
@@ -74,6 +76,7 @@ struct psp_context
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
+ int (*mode1_reset)(struct psp_context *psp);
/* fence buffer */
struct amdgpu_bo *fw_pri_bo;
@@ -123,6 +126,7 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
+#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
@@ -136,6 +140,8 @@ struct amdgpu_psp_funcs {
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
+#define psp_mode1_reset(psp) \
+ ((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index befc09b68543..190e28cb827e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int user_ring,
+ int user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
@@ -139,7 +139,7 @@ static int amdgpu_lru_map(struct amdgpu_device *adev,
}
r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
- j, out_ring);
+ j, lru_pipe_order, out_ring);
if (r)
return r;
@@ -284,8 +284,10 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break;
case AMDGPU_HW_IP_DMA:
+ r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
+ break;
case AMDGPU_HW_IP_COMPUTE:
- r = amdgpu_lru_map(adev, mapper, ring, out_ring);
+ r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
break;
default:
*out_ring = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5ce65280b396..a98fbbb4739f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
if (ring->funcs->end_use)
ring->funcs->end_use(ring);
- amdgpu_ring_lru_touch(ring->adev, ring);
+ if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
+ amdgpu_ring_lru_touch(ring->adev, ring);
}
/**
@@ -155,6 +156,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_ring_priority_put - restore a ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Release a request for executing at @priority
+ */
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ int i;
+
+ if (!ring->funcs->set_priority)
+ return;
+
+ if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
+ return;
+
+ /* no need to restore if the job is already at the lowest priority */
+ if (priority == AMD_SCHED_PRIORITY_NORMAL)
+ return;
+
+ mutex_lock(&ring->priority_mutex);
+ /* something higher prio is executing, no need to decay */
+ if (ring->priority > priority)
+ goto out_unlock;
+
+ /* decay priority to the next level with a job available */
+ for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
+ if (i == AMD_SCHED_PRIORITY_NORMAL
+ || atomic_read(&ring->num_jobs[i])) {
+ ring->priority = i;
+ ring->funcs->set_priority(ring, i);
+ break;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&ring->priority_mutex);
+}
+
+/**
+ * amdgpu_ring_priority_get - change the ring's priority
+ *
+ * @ring: amdgpu_ring structure holding the information
+ * @priority: target priority
+ *
+ * Request a ring's priority to be raised to @priority (refcounted).
+ */
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ if (!ring->funcs->set_priority)
+ return;
+
+ atomic_inc(&ring->num_jobs[priority]);
+
+ mutex_lock(&ring->priority_mutex);
+ if (priority <= ring->priority)
+ goto out_unlock;
+
+ ring->priority = priority;
+ ring->funcs->set_priority(ring, priority);
+
+out_unlock:
+ mutex_unlock(&ring->priority_mutex);
+}
+
+/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
@@ -169,7 +239,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned max_dw, struct amdgpu_irq_src *irq_src,
unsigned irq_type)
{
- int r;
+ int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission;
/* Set the hw submission limit higher for KIQ because
@@ -247,9 +317,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
+ ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+ mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
+ for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+ atomic_set(&ring->num_jobs[i], 0);
+
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
@@ -315,14 +390,16 @@ static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
* @type: amdgpu_ring_type enum
* @blacklist: blacklisted ring ids array
* @num_blacklist: number of entries in @blacklist
+ * @lru_pipe_order: find a ring from the least recently used pipe
* @ring: output ring
*
* Retrieve the amdgpu_ring structure for the least recently used ring of
* a specific IP block (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
- int num_blacklist, struct amdgpu_ring **ring)
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+ int *blacklist, int num_blacklist,
+ bool lru_pipe_order, struct amdgpu_ring **ring)
{
struct amdgpu_ring *entry;
@@ -337,10 +414,23 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
continue;
- *ring = entry;
- amdgpu_ring_lru_touch_locked(adev, *ring);
- break;
+ if (!*ring) {
+ *ring = entry;
+
+ /* We are done for ring LRU */
+ if (!lru_pipe_order)
+ break;
+ }
+
+ /* Move all rings on the same pipe to the end of the list */
+ if (entry->pipe == (*ring)->pipe)
+ amdgpu_ring_lru_touch_locked(adev, entry);
}
+
+ /* Move the ring we found to the end of the list */
+ if (*ring)
+ amdgpu_ring_lru_touch_locked(adev, *ring);
+
spin_unlock(&adev->ring_lru_list_lock);
if (!*ring) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 322d25299a00..b18c2b96691f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -24,6 +24,7 @@
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__
+#include <drm/amdgpu_drm.h>
#include "gpu_scheduler.h"
/* max number of rings */
@@ -56,6 +57,7 @@ struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
struct amdgpu_cs_parser;
+struct amdgpu_job;
/*
* Fences.
@@ -88,8 +90,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
@@ -147,6 +153,9 @@ struct amdgpu_ring_funcs {
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ /* priority functions */
+ void (*set_priority) (struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
};
struct amdgpu_ring {
@@ -187,6 +196,12 @@ struct amdgpu_ring {
volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
bool has_compute_vm_bug;
+
+ atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
+ struct mutex priority_mutex;
+ /* protected by priority_mutex */
+ int priority;
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
@@ -197,12 +212,17 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
+void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
+void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
-int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
- int num_blacklist, struct amdgpu_ring **ring);
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+ int *blacklist, int num_blacklist,
+ bool lru_pipe_order, struct amdgpu_ring **ring);
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..290cc3f9c433
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
+ */
+
+#include <linux/fdtable.h>
+#include <linux/pid.h>
+#include <drm/amdgpu_drm.h>
+#include "amdgpu.h"
+
+#include "amdgpu_vm.h"
+
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+{
+ switch (amdgpu_priority) {
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return AMD_SCHED_PRIORITY_HIGH_HW;
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return AMD_SCHED_PRIORITY_HIGH_SW;
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ return AMD_SCHED_PRIORITY_NORMAL;
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ return AMD_SCHED_PRIORITY_LOW;
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ return AMD_SCHED_PRIORITY_UNSET;
+ default:
+ WARN(1, "Invalid context priority %d\n", amdgpu_priority);
+ return AMD_SCHED_PRIORITY_INVALID;
+ }
+}
+
+static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ int fd,
+ enum amd_sched_priority priority)
+{
+ struct file *filp = fcheck(fd);
+ struct drm_file *file;
+ struct pid *pid;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx *ctx;
+ uint32_t id;
+
+ if (!filp)
+ return -EINVAL;
+
+ pid = get_pid(((struct drm_file *)filp->private_data)->pid);
+
+ mutex_lock(&adev->ddev->filelist_mutex);
+ list_for_each_entry(file, &adev->ddev->filelist, lhead) {
+ if (file->pid != pid)
+ continue;
+
+ fpriv = file->driver_priv;
+ idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ amdgpu_ctx_priority_override(ctx, priority);
+ }
+ mutex_unlock(&adev->ddev->filelist_mutex);
+
+ put_pid(pid);
+
+ return 0;
+}
+
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_sched *args = data;
+ struct amdgpu_device *adev = dev->dev_private;
+ enum amd_sched_priority priority;
+ int r;
+
+ priority = amdgpu_to_sched_priority(args->in.priority);
+ if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_process_priority_override(adev,
+ args->in.fd,
+ priority);
+ break;
+ default:
+ DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index 9ef96aab3f24..b28c067d3822 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,16 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
*/
-#ifndef _EVENTINIT_H_
-#define _EVENTINIT_H_
+#ifndef __AMDGPU_SCHED_H__
+#define __AMDGPU_SCHED_H__
-#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4
+#include <drm/drmP.h>
-void pem_init_feature_info(struct pp_eventmgr *eventmgr);
-void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr);
-int pem_register_interrupts(struct pp_eventmgr *eventmgr);
-int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
+enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
-#endif /* _EVENTINIT_H_ */
+#endif // __AMDGPU_SCHED_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c586f44312f9..a4bf21f8f1c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @shared: true if we should only sync to the exclusive fence
+ * @explicit_sync: true if we should only sync to the exclusive fence
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
- void *owner)
+ void *owner, bool explicit_sync)
{
struct reservation_object_list *flist;
struct dma_fence *f;
@@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f);
+ if (explicit_sync)
+ return r;
+
flist = reservation_object_get_list(resv);
if (!flist || r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index dc7687993317..70d7e3a279a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
- void *owner);
+ void *owner,
+ bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 34c99a3c8d2d..f337c316ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -15,62 +15,6 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_ttm_tt_populate,
- TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
- TP_ARGS(adev, dma_address, phys_address),
- TP_STRUCT__entry(
- __field(uint16_t, domain)
- __field(uint8_t, bus)
- __field(uint8_t, slot)
- __field(uint8_t, func)
- __field(uint64_t, dma)
- __field(uint64_t, phys)
- ),
- TP_fast_assign(
- __entry->domain = pci_domain_nr(adev->pdev->bus);
- __entry->bus = adev->pdev->bus->number;
- __entry->slot = PCI_SLOT(adev->pdev->devfn);
- __entry->func = PCI_FUNC(adev->pdev->devfn);
- __entry->dma = dma_address;
- __entry->phys = phys_address;
- ),
- TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
- (unsigned)__entry->domain,
- (unsigned)__entry->bus,
- (unsigned)__entry->slot,
- (unsigned)__entry->func,
- (unsigned long long)__entry->dma,
- (unsigned long long)__entry->phys)
-);
-
-TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
- TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
- TP_ARGS(adev, dma_address, phys_address),
- TP_STRUCT__entry(
- __field(uint16_t, domain)
- __field(uint8_t, bus)
- __field(uint8_t, slot)
- __field(uint8_t, func)
- __field(uint64_t, dma)
- __field(uint64_t, phys)
- ),
- TP_fast_assign(
- __entry->domain = pci_domain_nr(adev->pdev->bus);
- __entry->bus = adev->pdev->bus->number;
- __entry->slot = PCI_SLOT(adev->pdev->devfn);
- __entry->func = PCI_FUNC(adev->pdev->devfn);
- __entry->dma = dma_address;
- __entry->phys = phys_address;
- ),
- TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
- (unsigned)__entry->domain,
- (unsigned)__entry->bus,
- (unsigned)__entry->slot,
- (unsigned)__entry->func,
- (unsigned long long)__entry->dma,
- (unsigned long long)__entry->phys)
-);
-
TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
@@ -474,5 +418,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index 89680d554ed8..b160b958e5fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -1,5 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
* Author : Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index bc746131987f..ad5bf86ee8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -42,7 +42,9 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
+#include <linux/iommu.h>
#include "amdgpu.h"
+#include "amdgpu_object.h"
#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
@@ -208,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (adev->mman.buffer_funcs &&
@@ -256,7 +258,7 @@ gtt:
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
@@ -288,97 +290,177 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
return addr;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+/**
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node
+ * corresponding to @offset. It also modifies the offset to be
+ * within the drm_mm_node returned
+ */
+static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
+ unsigned long *offset)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct drm_mm_node *mm_node = mem->mm_node;
- struct drm_mm_node *old_mm, *new_mm;
- uint64_t old_start, old_size, new_start, new_size;
- unsigned long num_pages;
- struct dma_fence *fence = NULL;
- int r;
+ while (*offset >= (mm_node->size << PAGE_SHIFT)) {
+ *offset -= (mm_node->size << PAGE_SHIFT);
+ ++mm_node;
+ }
+ return mm_node;
+}
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+/**
+ * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ *
+ * The function copies @size bytes from {src->mem + src->offset} to
+ * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
+ * move and different for a BO to BO copy.
+ *
+ * @f: Returns the last fence if multiple jobs are submitted.
+ */
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_copy_mem *src,
+ struct amdgpu_copy_mem *dst,
+ uint64_t size,
+ struct reservation_object *resv,
+ struct dma_fence **f)
+{
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct drm_mm_node *src_mm, *dst_mm;
+ uint64_t src_node_start, dst_node_start, src_node_size,
+ dst_node_size, src_page_offset, dst_page_offset;
+ struct dma_fence *fence = NULL;
+ int r = 0;
+ const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE);
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- old_mm = old_mem->mm_node;
- old_size = old_mm->size;
- old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
+ src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
+ src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
+ src->offset;
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
- new_mm = new_mem->mm_node;
- new_size = new_mm->size;
- new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
+ dst->offset;
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
- num_pages = new_mem->num_pages;
mutex_lock(&adev->mman.gtt_window_lock);
- while (num_pages) {
- unsigned long cur_pages = min(min(old_size, new_size),
- (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
- uint64_t from = old_start, to = new_start;
+
+ while (size) {
+ unsigned long cur_size;
+ uint64_t from = src_node_start, to = dst_node_start;
struct dma_fence *next;
- if (old_mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(old_mem)) {
- r = amdgpu_map_buffer(bo, old_mem, cur_pages,
- old_start, 0, ring, &from);
+ /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
+ * begins at an offset, then adjust the size accordingly
+ */
+ cur_size = min3(min(src_node_size, dst_node_size), size,
+ GTT_MAX_BYTES);
+ if (cur_size + src_page_offset > GTT_MAX_BYTES ||
+ cur_size + dst_page_offset > GTT_MAX_BYTES)
+ cur_size -= max(src_page_offset, dst_page_offset);
+
+ /* Map only what needs to be accessed. Map src to window 0 and
+ * dst to window 1
+ */
+ if (src->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(src->mem)) {
+ r = amdgpu_map_buffer(src->bo, src->mem,
+ PFN_UP(cur_size + src_page_offset),
+ src_node_start, 0, ring,
+ &from);
if (r)
goto error;
+ /* Adjust the offset because amdgpu_map_buffer returns
+ * start of mapped page
+ */
+ from += src_page_offset;
}
- if (new_mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(new_mem)) {
- r = amdgpu_map_buffer(bo, new_mem, cur_pages,
- new_start, 1, ring, &to);
+ if (dst->mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
+ r = amdgpu_map_buffer(dst->bo, dst->mem,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_node_start, 1, ring,
+ &to);
if (r)
goto error;
+ to += dst_page_offset;
}
- r = amdgpu_copy_buffer(ring, from, to,
- cur_pages * PAGE_SIZE,
- bo->resv, &next, false, true);
+ r = amdgpu_copy_buffer(ring, from, to, cur_size,
+ resv, &next, false, true);
if (r)
goto error;
dma_fence_put(fence);
fence = next;
- num_pages -= cur_pages;
- if (!num_pages)
+ size -= cur_size;
+ if (!size)
break;
- old_size -= cur_pages;
- if (!old_size) {
- old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
- old_size = old_mm->size;
+ src_node_size -= cur_size;
+ if (!src_node_size) {
+ src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
+ src->mem);
+ src_node_size = (src_mm->size << PAGE_SHIFT);
} else {
- old_start += cur_pages * PAGE_SIZE;
+ src_node_start += cur_size;
+ src_page_offset = src_node_start & (PAGE_SIZE - 1);
}
-
- new_size -= cur_pages;
- if (!new_size) {
- new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
- new_size = new_mm->size;
+ dst_node_size -= cur_size;
+ if (!dst_node_size) {
+ dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
+ dst->mem);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT);
} else {
- new_start += cur_pages * PAGE_SIZE;
+ dst_node_start += cur_size;
+ dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
}
}
+error:
mutex_unlock(&adev->mman.gtt_window_lock);
+ if (f)
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
+ return r;
+}
+
+
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem,
+ struct ttm_mem_reg *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_copy_mem src, dst;
+ struct dma_fence *fence = NULL;
+ int r;
+
+ src.bo = bo;
+ dst.bo = bo;
+ src.mem = old_mem;
+ dst.mem = new_mem;
+ src.offset = 0;
+ dst.offset = 0;
+
+ r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+ new_mem->num_pages << PAGE_SHIFT,
+ bo->resv, &fence);
+ if (r)
+ goto error;
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence);
return r;
error:
- mutex_unlock(&adev->mman.gtt_window_lock);
-
if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -483,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
int r;
/* Can't move a pinned BO */
- abo = container_of(bo, struct amdgpu_bo, tbo);
+ abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
@@ -581,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
- struct drm_mm_node *mm = bo->mem.mm_node;
- uint64_t size = mm->size;
- uint64_t offset = page_offset;
+ struct drm_mm_node *mm;
+ unsigned long offset = (page_offset << PAGE_SHIFT);
- page_offset = do_div(offset, size);
- mm += offset;
- return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+ mm = amdgpu_find_mm_node(&bo->mem, &offset);
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
+ (offset >> PAGE_SHIFT);
}
/*
@@ -608,6 +689,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ uint32_t last_set_pages;
struct list_head list;
};
@@ -621,6 +703,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
flags |= FOLL_WRITE;
+ down_read(&current->mm->mmap_sem);
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -628,8 +712,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
- if (!vma || vma->vm_file || vma->vm_end < end)
+ if (!vma || vma->vm_file || vma->vm_end < end) {
+ up_read(&current->mm->mmap_sem);
return -EPERM;
+ }
}
do {
@@ -656,42 +742,44 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
} while (pinned < ttm->num_pages);
+ up_read(&current->mm->mmap_sem);
return 0;
release_pages:
- release_pages(pages, pinned, 0);
+ release_pages(pages, pinned);
+ up_read(&current->mm->mmap_sem);
return r;
}
-static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
- if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
- for (i = 0; i < ttm->num_pages; i++) {
- trace_amdgpu_ttm_tt_populate(
- adev,
- gtt->ttm.dma_address[i],
- page_to_phys(ttm->pages[i]));
- }
+ gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i])
+ put_page(ttm->pages[i]);
+
+ ttm->pages[i] = pages ? pages[i] : NULL;
}
}
-static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
+void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
- if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
- for (i = 0; i < ttm->num_pages; i++) {
- trace_amdgpu_ttm_tt_unpopulate(
- adev,
- gtt->ttm.dma_address[i],
- page_to_phys(ttm->pages[i]));
- }
+ for (i = 0; i < ttm->num_pages; ++i) {
+ struct page *page = ttm->pages[i];
+
+ if (!page)
+ continue;
+
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
}
}
@@ -721,8 +809,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
- amdgpu_trace_dma_map(ttm);
-
return 0;
release_sg:
@@ -734,7 +820,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct sg_page_iter sg_iter;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
@@ -747,16 +832,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- set_page_dirty(page);
-
- mark_page_accessed(page);
- put_page(page);
- }
-
- amdgpu_trace_dma_unmap(ttm);
+ amdgpu_ttm_tt_mark_user_pages(ttm);
sg_free_table(ttm->sg);
}
@@ -818,7 +894,6 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg tmp;
-
struct ttm_placement placement;
struct ttm_place placements;
int r;
@@ -834,7 +909,8 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
- placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
+ placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+ TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
@@ -941,8 +1017,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@@ -962,52 +1036,26 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
- r = 0;
- goto trace_mappings;
+ return 0;
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- r = ttm_dma_populate(&gtt->ttm, adev->dev);
- goto trace_mappings;
+ return ttm_dma_populate(&gtt->ttm, adev->dev);
}
#endif
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
-
- r = 0;
-trace_mappings:
- if (likely(!r))
- amdgpu_trace_dma_map(ttm);
- return r;
+ return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
+ amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
@@ -1018,8 +1066,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
- amdgpu_trace_dma_unmap(ttm);
-
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@@ -1027,14 +1073,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
+ ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
}
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@@ -1051,6 +1090,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
spin_lock_init(&gtt->guptasklock);
INIT_LIST_HEAD(&gtt->guptasks);
atomic_set(&gtt->mmu_invalidations, 0);
+ gtt->last_set_pages = 0;
return 0;
}
@@ -1103,6 +1143,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated;
}
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL || !gtt->userptr)
+ return false;
+
+ return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1143,9 +1193,6 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
- return ttm_bo_eviction_valuable(bo, place);
-
switch (bo->mem.mem_type) {
case TTM_PL_TT:
return true;
@@ -1160,7 +1207,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size;
++node;
}
- break;
+ return false;
default:
break;
@@ -1173,9 +1220,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
unsigned long offset,
void *buf, int len, int write)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
- struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
+ struct drm_mm_node *nodes;
uint32_t value = 0;
int ret = 0;
uint64_t pos;
@@ -1184,10 +1231,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- while (offset >= (nodes->size << PAGE_SHIFT)) {
- offset -= nodes->size << PAGE_SHIFT;
- ++nodes;
- }
+ nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
pos = (nodes->start << PAGE_SHIFT) + offset;
while (len && pos < adev->mc.mc_vram_size) {
@@ -1202,14 +1246,14 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
}
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
if (!write || mask != 0xffffffff)
- value = RREG32(mmMM_DATA);
+ value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
- WREG32(mmMM_DATA, value);
+ WREG32_NO_KIQ(mmMM_DATA, value);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
if (!write) {
@@ -1286,6 +1330,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ /*
+ *The reserved vram for firmware must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_fw_reserve_vram_init(adev);
+ if (r) {
+ return r;
+ }
+
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory,
@@ -1510,7 +1563,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -1557,8 +1611,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
- uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ uint32_t max_bytes = 8 *
+ adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node;
@@ -1590,8 +1644,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
++mm_node;
}
- /* 10 double words for each SDMA_OP_PTEPDE cmd */
- num_dw = num_loops * 10;
+ /* num of dwords for each SDMA_OP_PTEPDE cmd */
+ num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */
num_dw += 64;
@@ -1602,7 +1656,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -1697,9 +1751,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32(mmMM_DATA);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ value = RREG32_NO_KIQ(mmMM_DATA);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
r = put_user(value, (uint32_t *)buf);
@@ -1715,10 +1769,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
}
+static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return -ENXIO;
+
+ while (size) {
+ unsigned long flags;
+ uint32_t value;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_ttm_vram_fops = {
.owner = THIS_MODULE,
.read = amdgpu_ttm_vram_read,
- .llseek = default_llseek
+ .write = amdgpu_ttm_vram_write,
+ .llseek = default_llseek,
};
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
@@ -1770,6 +1864,53 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
+static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int r;
+ uint64_t phys;
+ struct iommu_domain *dom;
+
+ // always return 8 bytes
+ if (size != 8)
+ return -EINVAL;
+
+ // only accept page addresses
+ if (*pos & 0xFFF)
+ return -EINVAL;
+
+ dom = iommu_get_domain_for_dev(adev->dev);
+ if (dom)
+ phys = iommu_iova_to_phys(dom, *pos);
+ else
+ phys = *pos;
+
+ r = copy_to_user(buf, &phys, 8);
+ if (r)
+ return -EFAULT;
+
+ return 8;
+}
+
+static const struct file_operations amdgpu_ttm_iova_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_iova_to_phys_read,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+ int domain;
+} ttm_debugfs_entries[] = {
+ { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
+#endif
+ { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
+};
+
#endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
@@ -1780,22 +1921,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
- ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_vram_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.mc_vram_size);
- adev->mman.vram = ent;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_gtt_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.gart_size);
- adev->mman.gtt = ent;
+ for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
+ ent = debugfs_create_file(
+ ttm_debugfs_entries[count].name,
+ S_IFREG | S_IRUGO, root,
+ adev,
+ ttm_debugfs_entries[count].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
+ i_size_write(ent->d_inode, adev->mc.mc_vram_size);
+ else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
+ i_size_write(ent->d_inode, adev->mc.gart_size);
+ adev->mman.debugfs_entries[count] = ent;
+ }
-#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@@ -1805,7 +1945,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
-
return 0;
#endif
}
@@ -1813,14 +1952,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
- debugfs_remove(adev->mman.vram);
- adev->mman.vram = NULL;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- debugfs_remove(adev->mman.gtt);
- adev->mman.gtt = NULL;
-#endif
-
+ for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
+ debugfs_remove(adev->mman.debugfs_entries[i]);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 43093bffa2cf..abd4084982a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -24,6 +24,7 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
+#include "amdgpu.h"
#include "gpu_scheduler.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@@ -45,8 +46,7 @@ struct amdgpu_mman {
bool initialized;
#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
+ struct dentry *debugfs_entries[8];
#endif
/* buffer handling */
@@ -58,6 +58,12 @@ struct amdgpu_mman {
struct amd_sched_entity entity;
};
+struct amdgpu_copy_mem {
+ struct ttm_buffer_object *bo;
+ struct ttm_mem_reg *mem;
+ unsigned long offset;
+};
+
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
@@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ struct amdgpu_copy_mem *src,
+ struct amdgpu_copy_mem *dst,
+ uint64_t size,
+ struct reservation_object *resv,
+ struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data,
struct reservation_object *resv,
@@ -82,4 +94,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
+int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ uint32_t flags);
+bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end);
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated);
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
+uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_mem_reg *mem);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 36c763310df5..65649026b836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10:
- if (!load_type)
- return AMDGPU_FW_LOAD_DIRECT;
- else
- return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN:
- if (load_type != 2)
+ if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
@@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
- uint64_t fw_mc_addr;
- void *fw_buf_ptr = NULL;
uint64_t fw_offset = 0;
int i, err;
struct amdgpu_firmware_info *ucode = NULL;
@@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
- err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, bo);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
- goto failed;
- }
+ if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ NULL, NULL, 0, bo);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
+ goto failed;
+ }
- err = amdgpu_bo_reserve(*bo, false);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
- goto failed_reserve;
- }
+ err = amdgpu_bo_reserve(*bo, false);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
+ goto failed_reserve;
+ }
- err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- &fw_mc_addr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
- goto failed_pin;
- }
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &adev->firmware.fw_buf_mc);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
+ goto failed_pin;
+ }
- err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
- goto failed_kmap;
- }
+ err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
+ goto failed_kmap;
+ }
- amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unreserve(*bo);
+ }
- memset(fw_buf_ptr, 0, adev->firmware.fw_size);
+ memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
@@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
header = (const struct common_firmware_header *)ucode->fw->data;
- amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset,
- (void *)((uint8_t *)fw_buf_ptr + fw_offset));
+ amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
- fw_buf_ptr + fw_offset);
+ amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
}
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e19928dae8e3..e8bd50cf9785 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
+ int i;
kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
@@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.ring);
+ for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+
release_firmware(adev->uvd.fw);
return 0;
@@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
if (!ctx->parser->adev->uvd.address_64_bit) {
@@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
start = amdgpu_bo_gpu_offset(bo);
@@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
- r = amdgpu_cs_sysvm_access_required(parser);
- if (r)
- return r;
-
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c855366521ab..2918de2f39ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint64_t addr;
+ int r;
if (index == 0xffffffff)
index = 0;
@@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index);
- mapping = amdgpu_cs_find_mapping(p, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index);
- return -EINVAL;
+ return r;
}
if ((addr + (uint64_t)size) >
@@ -647,15 +648,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r, idx = 0;
+ int i, r = 0, idx = 0;
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
- r = amdgpu_cs_sysvm_access_required(p);
- if (r)
- return r;
-
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
index 45ac91861965..7f7097931c6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
@@ -25,30 +25,26 @@
#include "amdgpu_vf_error.h"
#include "mxgpu_ai.h"
-#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
-
-/* struct error_entry - amdgpu VF error information. */
-struct amdgpu_vf_error_buffer {
- int read_count;
- int write_count;
- uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
- uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
- uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
-};
-
-struct amdgpu_vf_error_buffer admgpu_vf_errors;
-
-
-void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data)
{
int index;
- uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+ uint16_t error_code;
- index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
- admgpu_vf_errors.code [index] = error_code;
- admgpu_vf_errors.flags [index] = error_flags;
- admgpu_vf_errors.data [index] = error_data;
- admgpu_vf_errors.write_count ++;
+ if (!amdgpu_sriov_vf(adev))
+ return;
+
+ error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+
+ mutex_lock(&adev->virt.vf_errors.lock);
+ index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ adev->virt.vf_errors.code [index] = error_code;
+ adev->virt.vf_errors.flags [index] = error_flags;
+ adev->virt.vf_errors.data [index] = error_data;
+ adev->virt.vf_errors.write_count ++;
+ mutex_unlock(&adev->virt.vf_errors.lock);
}
@@ -58,7 +54,8 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
u32 data1, data2, data3;
int index;
- if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
+ if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
+ (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
return;
}
/*
@@ -68,18 +65,22 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
return;
}
*/
+
+ mutex_lock(&adev->virt.vf_errors.lock);
/* The errors are overlay of array, correct read_count as full. */
- if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
- admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
+ if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
+ adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
}
- while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
- index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
- data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
- data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
- data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
+ while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
+ index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
+ adev->virt.vf_errors.flags[index]);
+ data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
+ data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
- admgpu_vf_errors.read_count ++;
+ adev->virt.vf_errors.read_count ++;
}
+ mutex_unlock(&adev->virt.vf_errors.lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
index 2a3278ec76ba..6436bd053325 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
@@ -56,7 +56,10 @@ enum AMDGIM_ERROR_CATEGORY {
AMDGIM_ERROR_CATEGORY_MAX
};
-void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data);
void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
#endif /* __VF_ERROR_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ab05121b9272..6738df836a70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,7 +22,7 @@
*/
#include "amdgpu.h"
-#define MAX_KIQ_REG_WAIT 100000
+#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
{
@@ -114,27 +114,25 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r;
- uint32_t val;
- struct dma_fence *f;
+ unsigned long flags;
+ uint32_t val, seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
- mutex_lock(&kiq->ring_mutex);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit(ring, &f);
+ amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
- mutex_unlock(&kiq->ring_mutex);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
- r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
- dma_fence_put(f);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1) {
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
+ DRM_ERROR("wait for kiq fence error: %ld\n", r);
return ~0;
}
-
val = adev->wb.wb[adev->virt.reg_val_offs];
return val;
@@ -143,23 +141,23 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
signed long r;
- struct dma_fence *f;
+ unsigned long flags;
+ uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
- mutex_lock(&kiq->ring_mutex);
+ spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit(ring, &f);
+ amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
- mutex_unlock(&kiq->ring_mutex);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
- r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1)
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- dma_fence_put(f);
+ DRM_ERROR("wait for kiq fence error: %ld\n", r);
}
/**
@@ -274,3 +272,80 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
(void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0;
}
+
+
+int amdgpu_virt_fw_reserve_get_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int chksum)
+{
+ unsigned int ret = key;
+ unsigned long i = 0;
+ unsigned char *pos;
+
+ pos = (char *)obj;
+ /* calculate checksum */
+ for (i = 0; i < obj_size; ++i)
+ ret += *(pos + i);
+ /* minus the chksum itself */
+ pos = (char *)&chksum;
+ for (i = 0; i < sizeof(chksum); ++i)
+ ret -= *(pos + i);
+ return ret;
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
+ uint32_t pf2vf_ver = 0;
+ uint32_t pf2vf_size = 0;
+ uint32_t checksum = 0;
+ uint32_t checkval;
+ char *str;
+
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ adev->virt.fw_reserve.p_vf2pf = NULL;
+
+ if (adev->fw_vram_usage.va != NULL) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amdgim_pf2vf_info_header *)(
+ adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
+ pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
+ AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
+ AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
+
+ /* pf2vf message must be in 4K */
+ if (pf2vf_size > 0 && pf2vf_size < 4096) {
+ checkval = amdgpu_virt_fw_reserve_get_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
+ adev->virt.fw_reserve.checksum_key, checksum);
+ if (checkval == checksum) {
+ adev->virt.fw_reserve.p_vf2pf =
+ ((void *)adev->virt.fw_reserve.p_pf2vf +
+ pf2vf_size);
+ memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
+ sizeof(amdgim_vf2pf_info));
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
+ AMDGPU_FW_VRAM_VF2PF_VER);
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
+ sizeof(amdgim_vf2pf_info));
+ AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
+ &str);
+#ifdef MODULE
+ if (THIS_MODULE->version != NULL)
+ strcpy(str, THIS_MODULE->version);
+ else
+#endif
+ strcpy(str, "N/A");
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
+ 0);
+ AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
+ amdgpu_virt_fw_reserve_get_checksum(
+ adev->virt.fw_reserve.p_vf2pf,
+ pf2vf_size,
+ adev->virt.fw_reserve.checksum_key, 0));
+ }
+ }
+ }
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index afcfb8bcfb65..b89d37fc406f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -36,6 +36,18 @@ struct amdgpu_mm_table {
uint64_t gpu_addr;
};
+#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
+
+/* struct error_entry - amdgpu VF error information. */
+struct amdgpu_vf_error_buffer {
+ struct mutex lock;
+ int read_count;
+ int write_count;
+ uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+};
+
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
@@ -46,6 +58,179 @@ struct amdgpu_virt_ops {
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
};
+/*
+ * Firmware Reserve Frame buffer
+ */
+struct amdgpu_virt_fw_reserve {
+ struct amdgim_pf2vf_info_header *p_pf2vf;
+ struct amdgim_vf2pf_info_header *p_vf2pf;
+ unsigned int checksum_key;
+};
+/*
+ * Defination between PF and VF
+ * Structures forcibly aligned to 4 to keep the same style as PF.
+ */
+#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
+
+#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
+ (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
+
+enum AMDGIM_FEATURE_FLAG {
+ /* GIM supports feature of Error log collecting */
+ AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
+ /* GIM supports feature of loading uCodes */
+ AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
+};
+
+struct amdgim_pf2vf_info_header {
+ /* the total structure size in byte. */
+ uint32_t size;
+ /* version of this structure, written by the GIM */
+ uint32_t version;
+} __aligned(4);
+struct amdgim_pf2vf_info_v1 {
+ /* header contains size and version */
+ struct amdgim_pf2vf_info_header header;
+ /* max_width * max_height */
+ unsigned int uvd_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int uvd_enc_max_bandwidth;
+ /* max_width * max_height */
+ unsigned int vce_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int vce_enc_max_bandwidth;
+ /* MEC FW position in kb from the start of visible frame buffer */
+ unsigned int mecfw_kboffset;
+ /* The features flags of the GIM driver supports. */
+ unsigned int feature_flags;
+ /* use private key from mailbox 2 to create chueksum */
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_pf2vf_info_v2 {
+ /* header contains size and version */
+ struct amdgim_pf2vf_info_header header;
+ /* use private key from mailbox 2 to create chueksum */
+ uint32_t checksum;
+ /* The features flags of the GIM driver supports. */
+ uint32_t feature_flags;
+ /* max_width * max_height */
+ uint32_t uvd_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ uint32_t uvd_enc_max_bandwidth;
+ /* max_width * max_height */
+ uint32_t vce_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ uint32_t vce_enc_max_bandwidth;
+ /* MEC FW position in kb from the start of VF visible frame buffer */
+ uint64_t mecfw_kboffset;
+ /* MEC FW size in KB */
+ uint32_t mecfw_ksize;
+ /* UVD FW position in kb from the start of VF visible frame buffer */
+ uint64_t uvdfw_kboffset;
+ /* UVD FW size in KB */
+ uint32_t uvdfw_ksize;
+ /* VCE FW position in kb from the start of VF visible frame buffer */
+ uint64_t vcefw_kboffset;
+ /* VCE FW size in KB */
+ uint32_t vcefw_ksize;
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+} __aligned(4);
+
+
+struct amdgim_vf2pf_info_header {
+ /* the total structure size in byte. */
+ uint32_t size;
+ /*version of this structure, written by the guest */
+ uint32_t version;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v1 {
+ /* header contains size and version */
+ struct amdgim_vf2pf_info_header header;
+ /* driver version */
+ char driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ unsigned int driver_cert;
+ /* guest OS type and version: need a define */
+ unsigned int os_info;
+ /* in the unit of 1M */
+ unsigned int fb_usage;
+ /* guest gfx engine usage percentage */
+ unsigned int gfx_usage;
+ /* guest gfx engine health percentage */
+ unsigned int gfx_health;
+ /* guest compute engine usage percentage */
+ unsigned int compute_usage;
+ /* guest compute engine health percentage */
+ unsigned int compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ unsigned int vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ unsigned int vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_health;
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v2 {
+ /* header contains size and version */
+ struct amdgim_vf2pf_info_header header;
+ uint32_t checksum;
+ /* driver version */
+ uint8_t driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ uint32_t driver_cert;
+ /* guest OS type and version: need a define */
+ uint32_t os_info;
+ /* in the unit of 1M */
+ uint32_t fb_usage;
+ /* guest gfx engine usage percentage */
+ uint32_t gfx_usage;
+ /* guest gfx engine health percentage */
+ uint32_t gfx_health;
+ /* guest compute engine usage percentage */
+ uint32_t compute_usage;
+ /* guest compute engine health percentage */
+ uint32_t compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ uint32_t vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ uint32_t vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_health;
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+} __aligned(4);
+
+#define AMDGPU_FW_VRAM_VF2PF_VER 2
+typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
+
+#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
+ do { \
+ ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
+ } while (0)
+
+#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
+ do { \
+ (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
+ } while (0)
+
+#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
+ do { \
+ if (!adev->virt.fw_reserve.p_pf2vf) \
+ *(val) = 0; \
+ else { \
+ if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
+ *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+ if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
+ *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
+ } \
+ } while (0)
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -59,6 +244,8 @@ struct amdgpu_virt {
struct work_struct flr_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
+ struct amdgpu_vf_error_buffer vf_errors;
+ struct amdgpu_virt_fw_reserve fw_reserve;
};
#define AMDGPU_CSA_SIZE (8 * 1024)
@@ -101,5 +288,9 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
+int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
+ unsigned int key,
+ unsigned int chksum);
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bd20ff018512..c8c26f21993c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -27,12 +27,59 @@
*/
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
+#include <linux/idr.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_vm_pasid_ida);
+
+/**
+ * amdgpu_vm_alloc_pasid - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_vm_alloc_pasid(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
+ 1U << (bits - 1), 1U << bits,
+ GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ return pasid;
+}
+
+/**
+ * amdgpu_vm_free_pasid - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_vm_free_pasid(unsigned int pasid)
+{
+ ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
+}
+
+/*
* GPUVM
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
@@ -140,7 +187,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry)
{
- entry->robj = vm->root.bo;
+ entry->robj = vm->root.base.bo;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
@@ -149,86 +196,80 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_validate_layer - validate a single page table level
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
- * @parent: parent page table level
+ * @adev: amdgpu device pointer
+ * @vm: vm providing the BOs
* @validate: callback to do the validation
* @param: parameter for the validation callback
*
* Validate the page table BOs on command submission if neccessary.
*/
-static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
- int (*validate)(void *, struct amdgpu_bo *),
- void *param, bool use_cpu_for_update,
- struct ttm_bo_global *glob)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
- unsigned i;
+ struct ttm_bo_global *glob = adev->mman.bdev.glob;
int r;
- if (use_cpu_for_update) {
- r = amdgpu_bo_kmap(parent->bo, NULL);
- if (r)
- return r;
- }
-
- if (!parent->entries)
- return 0;
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
- if (!entry->bo)
- continue;
+ bo = bo_base->bo;
+ BUG_ON(!bo);
+ if (bo->parent) {
+ r = validate(param, bo);
+ if (r)
+ return r;
- r = validate(param, entry->bo);
- if (r)
- return r;
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
+ if (bo->shadow)
+ ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
+ spin_unlock(&glob->lru_lock);
+ }
- spin_lock(&glob->lru_lock);
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
- if (entry->bo->shadow)
- ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
- spin_unlock(&glob->lru_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel &&
+ vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(bo, NULL);
+ if (r)
+ return r;
+ }
- /*
- * Recurse into the sub directory. This is harmless because we
- * have only a maximum of 5 layers.
- */
- r = amdgpu_vm_validate_level(entry, validate, param,
- use_cpu_for_update, glob);
- if (r)
- return r;
+ spin_lock(&vm->status_lock);
+ if (bo->tbo.type != ttm_bo_type_kernel)
+ list_move(&bo_base->vm_status, &vm->moved);
+ else
+ list_move(&bo_base->vm_status, &vm->relocated);
}
+ spin_unlock(&vm->status_lock);
- return r;
+ return 0;
}
/**
- * amdgpu_vm_validate_pt_bos - validate the page table BOs
+ * amdgpu_vm_ready - check VM is ready for updates
*
- * @adev: amdgpu device pointer
- * @vm: vm providing the BOs
- * @validate: callback to do the validation
- * @param: parameter for the validation callback
+ * @vm: VM to check
*
- * Validate the page table BOs on command submission if neccessary.
+ * Check if all VM PDs/PTs are ready for updates
*/
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*validate)(void *p, struct amdgpu_bo *bo),
- void *param)
+bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- uint64_t num_evictions;
+ bool ready;
- /* We only need to validate the page tables
- * if they aren't already valid.
- */
- num_evictions = atomic64_read(&adev->num_evictions);
- if (num_evictions == vm->last_eviction_counter)
- return 0;
+ spin_lock(&vm->status_lock);
+ ready = list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
- return amdgpu_vm_validate_level(&vm->root, validate, param,
- vm->use_cpu_for_update,
- adev->mman.bdev.glob);
+ return ready;
}
/**
@@ -287,18 +328,19 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_SHADOW);
if (vm->pte_support_ats) {
- init_value = AMDGPU_PTE_SYSTEM;
+ init_value = AMDGPU_PTE_DEFAULT_ATC;
if (level != adev->vm_manager.num_level - 1)
init_value |= AMDGPU_PDE_PTE;
+
}
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
- struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct reservation_object *resv = vm->root.base.bo->tbo.resv;
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
struct amdgpu_bo *pt;
- if (!entry->bo) {
+ if (!entry->base.bo) {
r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level),
AMDGPU_GPU_PAGE_SIZE, true,
@@ -319,9 +361,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
- pt->parent = amdgpu_bo_ref(vm->root.bo);
-
- entry->bo = pt;
+ pt->parent = amdgpu_bo_ref(parent->base.bo);
+
+ entry->base.vm = vm;
+ entry->base.bo = pt;
+ list_add_tail(&entry->base.bo_list, &pt->va);
+ spin_lock(&vm->status_lock);
+ list_add(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
entry->addr = 0;
}
@@ -988,7 +1035,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
+ amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
r = amdgpu_sync_wait(&sync, true);
amdgpu_sync_free(&sync);
@@ -1007,18 +1054,17 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_update_level(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- unsigned level)
+ struct amdgpu_vm_pt *parent)
{
struct amdgpu_bo *shadow;
struct amdgpu_ring *ring = NULL;
uint64_t pd_addr, shadow_addr = 0;
- uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw = 0;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
struct dma_fence *fence = NULL;
+ uint32_t incr;
int r;
@@ -1027,10 +1073,10 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
- shadow = parent->bo->shadow;
+ shadow = parent->base.bo->shadow;
if (vm->use_cpu_for_update) {
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
if (unlikely(r))
return r;
@@ -1046,7 +1092,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
/* assume the worst case */
ndw += parent->last_entry_used * 6;
- pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
if (shadow) {
shadow_addr = amdgpu_bo_gpu_offset(shadow);
@@ -1066,12 +1112,17 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
/* walk over the address space and update the directory */
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
+ struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+ struct amdgpu_bo *bo = entry->base.bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
+ spin_lock(&vm->status_lock);
+ list_del_init(&entry->base.vm_status);
+ spin_unlock(&vm->status_lock);
+
pt = amdgpu_bo_gpu_offset(bo);
pt = amdgpu_gart_get_vm_pde(adev, pt);
/* Don't update huge pages here */
@@ -1082,6 +1133,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
pde = pd_addr + pt_idx * 8;
+ incr = amdgpu_bo_size(bo);
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt) ||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
@@ -1109,7 +1161,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
}
if (count) {
- if (vm->root.bo->shadow)
+ if (vm->root.base.bo->shadow)
params.func(&params, last_shadow, last_pt,
count, incr, AMDGPU_PTE_VALID);
@@ -1122,12 +1174,13 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
amdgpu_job_free(job);
} else {
amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &job->sync,
+ parent->base.bo->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
if (shadow)
amdgpu_sync_resv(adev, &job->sync,
shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
+ AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
@@ -1135,26 +1188,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_bo_fence(parent->bo, fence, true);
- dma_fence_put(vm->last_dir_update);
- vm->last_dir_update = dma_fence_get(fence);
- dma_fence_put(fence);
+ amdgpu_bo_fence(parent->base.bo, fence, true);
+ dma_fence_put(vm->last_update);
+ vm->last_update = fence;
}
}
- /*
- * Recurse into the subdirectories. This recursion is harmless because
- * we only have a maximum of 5 layers.
- */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
-
- if (!entry->bo)
- continue;
-
- r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
- if (r)
- return r;
- }
return 0;
@@ -1170,7 +1208,8 @@ error_free:
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent)
{
unsigned pt_idx;
@@ -1181,11 +1220,15 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- if (!entry->bo)
+ if (!entry->base.bo)
continue;
entry->addr = ~0ULL;
- amdgpu_vm_invalidate_level(entry);
+ spin_lock(&vm->status_lock);
+ if (list_empty(&entry->base.vm_status))
+ list_add(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
+ amdgpu_vm_invalidate_level(vm, entry);
}
}
@@ -1201,11 +1244,40 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- int r;
+ int r = 0;
- r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
- if (r)
- amdgpu_vm_invalidate_level(&vm->root);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->relocated)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
+
+ bo_base = list_first_entry(&vm->relocated,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo->parent;
+ if (bo) {
+ struct amdgpu_vm_bo_base *parent;
+ struct amdgpu_vm_pt *pt;
+
+ parent = list_first_entry(&bo->va,
+ struct amdgpu_vm_bo_base,
+ bo_list);
+ pt = container_of(parent, struct amdgpu_vm_pt, base);
+
+ r = amdgpu_vm_update_level(adev, vm, pt);
+ if (r) {
+ amdgpu_vm_invalidate_level(vm, &vm->root);
+ return r;
+ }
+ spin_lock(&vm->status_lock);
+ } else {
+ spin_lock(&vm->status_lock);
+ list_del_init(&bo_base->vm_status);
+ }
+ }
+ spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
@@ -1236,7 +1308,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
*entry = &p->vm->root;
while ((*entry)->entries) {
idx = addr >> (p->adev->vm_manager.block_size * level--);
- idx %= amdgpu_bo_size((*entry)->bo) / 8;
+ idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
*parent = *entry;
*entry = &(*entry)->entries[idx];
}
@@ -1272,7 +1344,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
p->src ||
!(flags & AMDGPU_PTE_VALID)) {
- dst = amdgpu_bo_gpu_offset(entry->bo);
+ dst = amdgpu_bo_gpu_offset(entry->base.bo);
dst = amdgpu_gart_get_vm_pde(p->adev, dst);
flags = AMDGPU_PTE_VALID;
} else {
@@ -1298,18 +1370,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
tmp = p->pages_addr;
p->pages_addr = NULL;
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
p->pages_addr = tmp;
} else {
- if (parent->bo->shadow) {
- pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
+ if (parent->base.bo->shadow) {
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
}
- pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
}
@@ -1360,7 +1432,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if (entry->addr & AMDGPU_PDE_PTE)
continue;
- pt = entry->bo;
+ pt = entry->base.bo;
if (use_cpu_update) {
pe_start = (unsigned long)amdgpu_bo_kptr(pt);
} else {
@@ -1396,8 +1468,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
- int r;
-
/**
* The MC L1 TLB supports variable sized pages, based on a fragment
* field in the PTE. When this field is set to a non-zero value, page
@@ -1416,39 +1486,38 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Userspace can support this by aligning virtual base address and
* allocation size to the fragment size.
*/
- unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
- uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
- uint64_t frag_align = 1 << pages_per_frag;
-
- uint64_t frag_start = ALIGN(start, frag_align);
- uint64_t frag_end = end & ~(frag_align - 1);
+ unsigned max_frag = params->adev->vm_manager.fragment_size;
+ int r;
/* system pages are non continuously */
- if (params->src || !(flags & AMDGPU_PTE_VALID) ||
- (frag_start >= frag_end))
+ if (params->src || !(flags & AMDGPU_PTE_VALID))
return amdgpu_vm_update_ptes(params, start, end, dst, flags);
- /* handle the 4K area at the beginning */
- if (start != frag_start) {
- r = amdgpu_vm_update_ptes(params, start, frag_start,
- dst, flags);
+ while (start != end) {
+ uint64_t frag_flags, frag_end;
+ unsigned frag;
+
+ /* This intentionally wraps around if no bit is set */
+ frag = min((unsigned)ffs(start) - 1,
+ (unsigned)fls64(end - start) - 1);
+ if (frag >= max_frag) {
+ frag_flags = AMDGPU_PTE_FRAG(max_frag);
+ frag_end = end & ~((1ULL << max_frag) - 1);
+ } else {
+ frag_flags = AMDGPU_PTE_FRAG(frag);
+ frag_end = start + (1 << frag);
+ }
+
+ r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
+ flags | frag_flags);
if (r)
return r;
- dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
- }
- /* handle the area in the middle */
- r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
- flags | frag_flags);
- if (r)
- return r;
-
- /* handle the 4K area at the end */
- if (frag_end != end) {
- dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
- r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
+ dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
+ start = frag_end;
}
- return r;
+
+ return 0;
}
/**
@@ -1456,7 +1525,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
*
* @adev: amdgpu_device pointer
* @exclusive: fence we need to sync to
- * @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of mapped range
@@ -1470,7 +1538,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
- uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
@@ -1488,7 +1555,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.src = src;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -1517,10 +1583,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
nptes = last - start + 1;
/*
- * reserve space for one command every (1 << BLOCK_SIZE)
+ * reserve space for two commands every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
+ *
+ * The second command is for the shadow pagetables.
*/
- ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
/* padding, etc. */
ndw = 64;
@@ -1528,15 +1596,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* one PDE write for each huge page */
ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
- if (src) {
- /* only copy commands needed */
- ndw += ncmds * 7;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else if (pages_addr) {
+ if (pages_addr) {
/* copy commands needed */
- ndw += ncmds * 7;
+ ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
/* and also PTEs */
ndw += nptes * 2;
@@ -1545,10 +1607,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
} else {
/* set page commands needed */
- ndw += ncmds * 10;
+ ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
- /* two extra commands for begin/end of fragment */
- ndw += 2 * 10;
+ /* extra commands for begin/end fragments */
+ ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
+ * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1559,7 +1622,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.ib = &job->ibs[0];
- if (!src && pages_addr) {
+ if (pages_addr) {
uint64_t *pte;
unsigned i;
@@ -1580,12 +1643,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
- owner);
+ r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
+ owner, false);
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
if (r)
goto error_free;
@@ -1600,14 +1663,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_bo_fence(vm->root.bo, f, true);
+ amdgpu_bo_fence(vm->root.base.bo, f, true);
dma_fence_put(*fence);
*fence = f;
return 0;
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(&vm->root);
+ amdgpu_vm_invalidate_level(vm, &vm->root);
return r;
}
@@ -1636,7 +1699,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
- uint64_t pfn, src = 0, start = mapping->start;
+ unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size;
+ uint64_t pfn, start = mapping->start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1670,6 +1734,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
do {
+ dma_addr_t *dma_addr = NULL;
uint64_t max_entries;
uint64_t addr, last;
@@ -1683,16 +1748,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
if (pages_addr) {
+ uint64_t count;
+
max_entries = min(max_entries, 16ull * 1024ull);
- addr = 0;
+ for (count = 1; count < max_entries; ++count) {
+ uint64_t idx = pfn + count;
+
+ if (pages_addr[idx] !=
+ (pages_addr[idx - 1] + PAGE_SIZE))
+ break;
+ }
+
+ if (count < min_linear_pages) {
+ addr = pfn << PAGE_SHIFT;
+ dma_addr = pages_addr;
+ } else {
+ addr = pages_addr[pfn];
+ max_entries = count;
+ }
+
} else if (flags & AMDGPU_PTE_VALID) {
addr += adev->vm_manager.vram_base_offset;
+ addr += pfn << PAGE_SHIFT;
}
- addr += pfn << PAGE_SHIFT;
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -1730,7 +1811,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
- struct dma_fence *exclusive;
+ struct dma_fence *exclusive, **last_update;
uint64_t flags;
int r;
@@ -1756,38 +1837,43 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
else
flags = 0x0;
- spin_lock(&vm->status_lock);
- if (!list_empty(&bo_va->base.vm_status))
+ if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
+ last_update = &vm->last_update;
+ else
+ last_update = &bo_va->last_pt_update;
+
+ if (!clear && bo_va->base.moved) {
+ bo_va->base.moved = false;
list_splice_init(&bo_va->valids, &bo_va->invalids);
- spin_unlock(&vm->status_lock);
+
+ } else if (bo_va->cleared != clear) {
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ }
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
mapping, flags, nodes,
- &bo_va->last_pt_update);
+ last_update);
if (r)
return r;
}
- if (trace_amdgpu_vm_bo_mapping_enabled()) {
- list_for_each_entry(mapping, &bo_va->valids, list)
- trace_amdgpu_vm_bo_mapping(mapping);
-
- list_for_each_entry(mapping, &bo_va->invalids, list)
- trace_amdgpu_vm_bo_mapping(mapping);
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+ mb();
+ amdgpu_gart_flush_gpu_tlb(adev, 0);
}
spin_lock(&vm->status_lock);
- list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->base.vm_status);
- if (clear)
- list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+
+ if (trace_amdgpu_vm_bo_mapping_enabled()) {
+ list_for_each_entry(mapping, &bo_va->valids, list)
+ trace_amdgpu_vm_bo_mapping(mapping);
}
return 0;
@@ -1895,7 +1981,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct reservation_object *resv = vm->root.base.bo->tbo.resv;
struct dma_fence *excl, **shared;
unsigned i, shared_count;
int r;
@@ -1951,9 +2037,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
list_del(&mapping->list);
if (vm->pte_support_ats)
- init_pte_value = AMDGPU_PTE_SYSTEM;
+ init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
mapping->start, mapping->last,
init_pte_value, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -1975,29 +2061,35 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_clear_moved - clear moved BOs in the PT
+ * amdgpu_vm_handle_moved - handle moved BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @sync: sync object to add fences to
*
- * Make sure all moved BOs are cleared in the PT.
+ * Make sure all BOs which are moved are updated in the PTs.
* Returns 0 for success.
*
- * PTs have to be reserved and mutex must be locked!
+ * PTs have to be reserved!
*/
-int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync)
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
- struct amdgpu_bo_va *bo_va = NULL;
+ bool clear;
int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
+ struct amdgpu_bo_va *bo_va;
+
bo_va = list_first_entry(&vm->moved,
struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, true);
+ /* Per VM BOs never need to bo cleared in the page tables */
+ clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
+
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
@@ -2005,9 +2097,6 @@ int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
spin_unlock(&vm->status_lock);
- if (bo_va)
- r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
-
return r;
}
@@ -2049,6 +2138,39 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
return bo_va;
}
+
+/**
+ * amdgpu_vm_bo_insert_mapping - insert a new mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to store the address
+ * @mapping: the mapping to insert
+ *
+ * Insert a new mapping into all structures.
+ */
+static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ struct amdgpu_bo_va_mapping *mapping)
+{
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+
+ mapping->bo_va = bo_va;
+ list_add(&mapping->list, &bo_va->invalids);
+ amdgpu_vm_it_insert(mapping, &vm->va);
+
+ if (mapping->flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ spin_lock(&vm->status_lock);
+ if (list_empty(&bo_va->base.vm_status))
+ list_add(&bo_va->base.vm_status, &vm->moved);
+ spin_unlock(&vm->status_lock);
+ }
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -2100,17 +2222,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (!mapping)
return -ENOMEM;
- INIT_LIST_HEAD(&mapping->list);
mapping->start = saddr;
mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -2137,7 +2254,6 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
- struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
@@ -2171,11 +2287,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -2221,6 +2333,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
@@ -2306,6 +2419,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
if (tmp->last > eaddr)
tmp->last = eaddr;
+ tmp->bo_va = NULL;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
}
@@ -2332,6 +2446,19 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_bo_lookup_mapping - find mapping by address
+ *
+ * @vm: the requested VM
+ *
+ * Find a mapping by it's address.
+ */
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr)
+{
+ return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2356,6 +2483,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
@@ -2380,15 +2508,36 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* Mark @bo as invalid.
*/
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
list_for_each_entry(bo_base, &bo->va, bo_list) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ bo_base->moved = true;
+ if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&bo_base->vm_status, &vm->evicted);
+ else
+ list_move_tail(&bo_base->vm_status,
+ &vm->evicted);
+ spin_unlock(&bo_base->vm->status_lock);
+ continue;
+ }
+
+ if (bo->tbo.type == ttm_bo_type_kernel) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status, &vm->relocated);
+ spin_unlock(&bo_base->vm->status_lock);
+ continue;
+ }
+
spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status))
- list_add(&bo_base->vm_status,
- &bo_base->vm->moved);
+ list_add(&bo_base->vm_status, &vm->moved);
spin_unlock(&bo_base->vm->status_lock);
}
}
@@ -2412,7 +2561,8 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* @adev: amdgpu_device pointer
* @fragment_size_default: the default fragment size if it's set auto
*/
-void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
+ uint32_t fragment_size_default)
{
if (amdgpu_vm_fragment_size == -1)
adev->vm_manager.fragment_size = fragment_size_default;
@@ -2426,7 +2576,8 @@ void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_s
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
+ uint32_t fragment_size_default)
{
/* adjust vm size firstly */
if (amdgpu_vm_size == -1)
@@ -2458,7 +2609,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_
* Init @vm fields.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context)
+ int vm_context, unsigned int pasid)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8);
@@ -2474,8 +2625,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
+ INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
- INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */
@@ -2497,7 +2649,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (adev->asic_type == CHIP_RAVEN) {
vm->pte_support_ats = true;
- init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+ init_pde_value = AMDGPU_PTE_DEFAULT_ATC
+ | AMDGPU_PDE_PTE;
+
}
} else
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
@@ -2506,7 +2660,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
"CPU update of VM recommended only for large BAR system\n");
- vm->last_dir_update = NULL;
+ vm->last_update = NULL;
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED;
@@ -2519,30 +2673,47 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, NULL, init_pde_value, &vm->root.bo);
+ NULL, NULL, init_pde_value, &vm->root.base.bo);
if (r)
goto error_free_sched_entity;
- r = amdgpu_bo_reserve(vm->root.bo, false);
- if (r)
- goto error_free_root;
-
- vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+ vm->root.base.vm = vm;
+ list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
+ INIT_LIST_HEAD(&vm->root.base.vm_status);
if (vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(vm->root.bo, NULL);
+ r = amdgpu_bo_reserve(vm->root.base.bo, false);
+ if (r)
+ goto error_free_root;
+
+ r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
+ amdgpu_bo_unreserve(vm->root.base.bo);
if (r)
goto error_free_root;
}
- amdgpu_bo_unreserve(vm->root.bo);
+ if (pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
+ GFP_ATOMIC);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
+
+ INIT_KFIFO(vm->faults);
+ vm->fault_credit = 16;
return 0;
error_free_root:
- amdgpu_bo_unref(&vm->root.bo->shadow);
- amdgpu_bo_unref(&vm->root.bo);
- vm->root.bo = NULL;
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+ amdgpu_bo_unref(&vm->root.base.bo);
+ vm->root.base.bo = NULL;
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
@@ -2561,9 +2732,11 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
{
unsigned i;
- if (level->bo) {
- amdgpu_bo_unref(&level->bo->shadow);
- amdgpu_bo_unref(&level->bo);
+ if (level->base.bo) {
+ list_del(&level->base.bo_list);
+ list_del(&level->base.vm_status);
+ amdgpu_bo_unref(&level->base.bo->shadow);
+ amdgpu_bo_unref(&level->base.bo);
}
if (level->entries)
@@ -2586,7 +2759,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
- int i;
+ struct amdgpu_bo *root;
+ u64 fault;
+ int i, r;
+
+ /* Clear pending page faults from IH when the VM is destroyed */
+ while (kfifo_get(&vm->faults, &fault))
+ amdgpu_ih_clear_fault(adev, fault);
+
+ if (vm->pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ }
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
@@ -2609,13 +2796,51 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- amdgpu_vm_free_levels(&vm->root);
- dma_fence_put(vm->last_dir_update);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ r = amdgpu_bo_reserve(root, true);
+ if (r) {
+ dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
+ } else {
+ amdgpu_vm_free_levels(&vm->root);
+ amdgpu_bo_unreserve(root);
+ }
+ amdgpu_bo_unref(&root);
+ dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vm_free_reserved_vmid(adev, vm, i);
}
/**
+ * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
+ *
+ * @adev: amdgpu_device pointer
+ * @pasid: PASID do identify the VM
+ *
+ * This function is expected to be called in interrupt context. Returns
+ * true if there was fault credit, false otherwise
+ */
+bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid)
+{
+ struct amdgpu_vm *vm;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ /* VM not found, can't track fault credit */
+ return true;
+
+ /* No lock needed. only accessed by IRQ handler */
+ if (!vm->fault_credit)
+ /* Too many faults in this VM */
+ return false;
+
+ vm->fault_credit--;
+ return true;
+}
+
+/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
@@ -2668,6 +2893,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->vm_manager.vm_update_mode = 0;
#endif
+ idr_init(&adev->vm_manager.pasid_idr);
+ spin_lock_init(&adev->vm_manager.pasid_lock);
}
/**
@@ -2681,6 +2908,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i, j;
+ WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
+ idr_destroy(&adev->vm_manager.pasid_idr);
+
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 6716355403ec..aa914256b4bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -25,6 +25,7 @@
#define __AMDGPU_VM_H__
#include <linux/rbtree.h>
+#include <linux/idr.h>
#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
@@ -72,6 +73,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
+/* For Raven */
+#define AMDGPU_MTYPE_CC 2
+
+#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
+ | AMDGPU_PTE_SNOOPED \
+ | AMDGPU_PTE_EXECUTABLE \
+ | AMDGPU_PTE_READABLE \
+ | AMDGPU_PTE_WRITEABLE \
+ | AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
+
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
@@ -105,17 +116,24 @@ struct amdgpu_vm_bo_base {
/* protected by spinlock */
struct list_head vm_status;
+
+ /* protected by the BO being reserved */
+ bool moved;
};
struct amdgpu_vm_pt {
- struct amdgpu_bo *bo;
- uint64_t addr;
+ struct amdgpu_vm_bo_base base;
+ uint64_t addr;
/* array of page tables, one for each directory entry */
- struct amdgpu_vm_pt *entries;
- unsigned last_entry_used;
+ struct amdgpu_vm_pt *entries;
+ unsigned last_entry_used;
};
+#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
+#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
+#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -123,19 +141,21 @@ struct amdgpu_vm {
/* protecting invalidated */
spinlock_t status_lock;
+ /* BOs who needs a validation */
+ struct list_head evicted;
+
+ /* PT BOs which relocated and their parent need an update */
+ struct list_head relocated;
+
/* BOs moved, but not yet updated in the PT */
struct list_head moved;
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_vm_pt root;
- struct dma_fence *last_dir_update;
- uint64_t last_eviction_counter;
+ struct dma_fence *last_update;
/* protecting freed */
spinlock_t freed_lock;
@@ -143,8 +163,9 @@ struct amdgpu_vm {
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
- /* client id */
+ /* client id and PASID (TODO: replace client_id with PASID) */
u64 client_id;
+ unsigned int pasid;
/* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
@@ -153,6 +174,12 @@ struct amdgpu_vm {
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
+
+ /* Up to 128 pending retry page faults */
+ DECLARE_KFIFO(faults, u64, 128);
+
+ /* Limit non-retry fault storms */
+ unsigned int fault_credit;
};
struct amdgpu_vm_id {
@@ -215,16 +242,27 @@ struct amdgpu_vm_manager {
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
*/
int vm_update_mode;
+
+ /* PASID to VM mapping, will be used in interrupt context to
+ * look up VM of a page fault
+ */
+ struct idr pasid_idr;
+ spinlock_t pasid_lock;
};
+int amdgpu_vm_alloc_pasid(unsigned int bits);
+void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context);
+ int vm_context, unsigned int pasid);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
+ unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
+bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
@@ -243,13 +281,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
-int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
+ struct amdgpu_bo *bo, bool evicted);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -269,6 +307,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index d69aa2e179bb..69500a8b4e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1343,8 +1343,11 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
idx = 0x80;
str = CSTR(idx);
- if (*str != '\0')
+ if (*str != '\0') {
pr_info("ATOM BIOS: %s\n", str);
+ strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
+ }
+
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index ddd8045accf3..a39170991afe 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -140,6 +140,7 @@ struct atom_context {
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
+ char vbios_version[20];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index cb508a211b2f..68b505c768ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -307,7 +307,6 @@ static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
@@ -883,8 +882,9 @@ static int ci_power_control_set_level(struct amdgpu_device *adev)
return ret;
}
-static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+static void ci_dpm_powergate_uvd(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
pi->uvd_power_gated = gate;
@@ -901,8 +901,9 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
}
}
-static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
+static bool ci_dpm_vblank_too_short(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
@@ -1210,11 +1211,12 @@ static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
-static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+static int ci_dpm_get_fan_speed_percent(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan)
return -ENOENT;
@@ -1237,12 +1239,13 @@ static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+static int ci_dpm_set_fan_speed_percent(void *handle,
u32 speed)
{
u32 tmp;
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.no_fan)
@@ -1271,8 +1274,10 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
switch (mode) {
case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -1292,8 +1297,9 @@ static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
}
}
-static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+static u32 ci_dpm_get_fan_control_mode(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (pi->fan_is_controlled_by_smc)
@@ -4378,9 +4384,10 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
}
-static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
+static int ci_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp, levels, i;
int ret;
@@ -5291,8 +5298,9 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
-static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int ci_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -5304,8 +5312,9 @@ static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void ci_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@@ -5479,8 +5488,9 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
ci_update_current_ps(adev, boot_ps);
}
-static int ci_dpm_set_power_state(struct amdgpu_device *adev)
+static int ci_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@@ -5551,8 +5561,10 @@ static void ci_dpm_reset_asic(struct amdgpu_device *adev)
}
#endif
-static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void ci_dpm_display_configuration_changed(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
ci_program_display_gap(adev);
}
@@ -6105,9 +6117,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
}
static void
-ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ci_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *rps = &pi->current_rps;
u32 sclk = ci_get_average_sclk_freq(adev);
@@ -6131,12 +6144,13 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
seq_printf(m, "GPU load: %u %%\n", activity_percent);
}
-static void ci_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+static void ci_dpm_print_power_state(void *handle, void *current_ps)
{
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct ci_ps *ps = ci_get_ps(rps);
struct ci_pl *pl;
int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@@ -6158,20 +6172,23 @@ static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
}
-static int ci_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int ci_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct ci_ps *ci_cps;
struct ci_ps *ci_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
- ci_cps = ci_get_ps(cps);
- ci_rps = ci_get_ps(rps);
+ ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
+ ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
if (ci_cps == NULL) {
*equal = false;
@@ -6199,8 +6216,9 @@ static int ci_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 ci_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@@ -6210,8 +6228,9 @@ static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
-static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 ci_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@@ -6222,10 +6241,11 @@ static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
}
/* get temperature in millidegrees */
-static int ci_dpm_get_temp(struct amdgpu_device *adev)
+static int ci_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -6261,7 +6281,6 @@ static int ci_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ci_dpm_set_dpm_funcs(adev);
ci_dpm_set_irq_funcs(adev);
return 0;
@@ -6346,7 +6365,6 @@ static int ci_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@@ -6551,9 +6569,10 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0;
}
-static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+static int ci_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
@@ -6618,9 +6637,10 @@ static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
return size;
}
-static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+static int ci_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
@@ -6664,8 +6684,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+static int ci_dpm_get_sclk_od(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
struct ci_single_dpm_table *golden_sclk_table =
@@ -6680,8 +6701,9 @@ static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
return value;
}
-static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_sclk_table =
@@ -6698,8 +6720,9 @@ static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
-static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+static int ci_dpm_get_mclk_od(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
struct ci_single_dpm_table *golden_mclk_table =
@@ -6714,8 +6737,9 @@ static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
return value;
}
-static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_mclk_table =
@@ -6732,9 +6756,10 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
-static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_get_power_profile_state(void *handle,
struct amd_pp_profile *query)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !query)
@@ -6851,9 +6876,10 @@ static int ci_set_power_profile_state(struct amdgpu_device *adev,
return result;
}
-static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_set_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
int ret = -1;
@@ -6906,9 +6932,10 @@ static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_reset_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !request)
@@ -6927,9 +6954,10 @@ static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
return -EINVAL;
}
-static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
+static int ci_dpm_switch_power_profile(void *handle,
enum amd_pp_profile_type type)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amd_pp_profile request = {0};
@@ -6944,11 +6972,12 @@ static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int ci_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
u32 activity_percent = 50;
int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -7003,7 +7032,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.set_powergating_state = ci_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
+const struct amd_pm_funcs ci_dpm_funcs = {
.get_temperature = &ci_dpm_get_temp,
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
@@ -7035,12 +7064,6 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.read_sensor = ci_dpm_read_sensor,
};
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &ci_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
.set = ci_dpm_set_interrupt_state,
.process = ci_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index b1c8e7b446ea..c7b4349f6319 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -26,5 +26,6 @@
extern const struct amd_ip_funcs ci_dpm_ip_funcs;
extern const struct amd_ip_funcs kv_dpm_ip_funcs;
-
+extern const struct amd_pm_funcs ci_dpm_funcs;
+extern const struct amd_pm_funcs kv_dpm_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b8918432c572..a870b354e3f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,6 +228,34 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
+/**
+ * cik_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -433,6 +461,7 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
+ .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index f508f4d01e4a..60cecd117705 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1387,8 +1387,13 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = cik_sdma_vm_copy_pte,
+
.write_pte = cik_sdma_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x1fffff >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 0c1209cdd1cb..fa61d649bb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,6 +208,34 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * cz_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
+/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -414,6 +442,7 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
+ .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index b9ee9073cb0d..a8829af120c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -288,7 +288,7 @@ dce_virtual_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -298,7 +298,7 @@ dce_virtual_encoder(struct drm_connector *connector)
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc260c13b1da..b8002ac3e536 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -125,24 +126,39 @@ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
@@ -918,8 +934,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
@@ -929,8 +954,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.me_fw);
@@ -941,8 +975,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
@@ -1012,8 +1055,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
@@ -1025,8 +1077,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ }
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
@@ -2053,6 +2114,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
@@ -3891,10 +3953,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indices,
&indices_count,
- sizeof(unique_indices) / sizeof(int),
+ ARRAY_SIZE(unique_indices),
indirect_start_offsets,
&offset_count,
- sizeof(indirect_start_offsets)/sizeof(int));
+ ARRAY_SIZE(indirect_start_offsets));
/* save and restore list */
WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
@@ -3916,14 +3978,14 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
/* starting offsets starts */
WREG32(mmRLC_GPM_SCRATCH_ADDR,
adev->gfx.rlc.starting_offsets_start);
- for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+ for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(mmRLC_GPM_SCRATCH_DATA,
indirect_start_offsets[i]);
/* unique indices */
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
- for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
+ for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
if (unique_indices[i] != 0) {
WREG32(temp + i, unique_indices[i] & 0x3FFFF);
WREG32(data + i, unique_indices[i] >> 20);
@@ -4071,18 +4133,12 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev);
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- /* legacy rlc firmware loading */
- r = gfx_v8_0_rlc_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_RLC_G);
- if (r)
- return -EINVAL;
- }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ /* legacy rlc firmware loading */
+ r = gfx_v8_0_rlc_load_microcode(adev);
+ if (r)
+ return r;
}
gfx_v8_0_rlc_start(adev);
@@ -4577,12 +4633,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
- if (!(adev->flags & AMD_IS_APU)) {
- mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
- mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
- }
+ mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -4753,7 +4807,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring);
- if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4790,7 +4844,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -4802,7 +4856,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4900,43 +4954,15 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy firmware loading */
- r = gfx_v8_0_cp_gfx_load_microcode(adev);
- if (r)
- return r;
+ r = gfx_v8_0_cp_gfx_load_microcode(adev);
+ if (r)
+ return r;
- r = gfx_v8_0_cp_compute_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_CE);
- if (r)
- return -EINVAL;
-
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_PFP);
- if (r)
- return -EINVAL;
-
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_ME);
- if (r)
- return -EINVAL;
-
- if (adev->asic_type == CHIP_TOPAZ) {
- r = gfx_v8_0_cp_compute_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_CP_MEC1);
- if (r)
- return -EINVAL;
- }
- }
+ r = gfx_v8_0_cp_compute_load_microcode(adev);
+ if (r)
+ return r;
}
r = gfx_v8_0_cp_gfx_resume(adev);
@@ -4975,12 +5001,69 @@ static int gfx_v8_0_hw_init(void *handle)
return r;
}
+static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint32_t scratch, tmp = 0;
+ int r, i;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(kiq_ring, 10);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+ /* unmap queues */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ /* write to scratch for completion */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+ amdgpu_ring_commit(kiq_ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i >= adev->usec_timeout) {
+ DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
@@ -5902,7 +5985,6 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
{
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -5920,7 +6002,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5941,7 +6024,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -5953,7 +6037,6 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@@ -5971,7 +6054,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@@ -5990,7 +6074,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -6011,7 +6096,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@@ -6026,7 +6112,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@@ -6040,7 +6127,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@@ -6307,6 +6395,104 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
+static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
+ bool acquire)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int pipe_num, tmp, reg;
+ int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
+
+ pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
+
+ /* first me only has 2 entries, GFX and HP3D */
+ if (ring->me > 0)
+ pipe_num -= 2;
+
+ reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
+ tmp = RREG32(reg);
+ tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
+ WREG32(reg, tmp);
+}
+
+static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ int i, pipe;
+ bool reserve;
+ struct amdgpu_ring *iring;
+
+ mutex_lock(&adev->gfx.pipe_reserve_mutex);
+ pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
+ if (acquire)
+ set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ else
+ clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+
+ if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
+ /* Clear all reservations - everyone reacquires all resources */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
+ gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
+ true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i)
+ gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
+ true);
+ } else {
+ /* Lower all pipes without a current reservation */
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ iring = &adev->gfx.gfx_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ iring = &adev->gfx.compute_ring[i];
+ pipe = amdgpu_gfx_queue_to_bit(adev,
+ iring->me,
+ iring->pipe,
+ 0);
+ reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
+ gfx_v8_0_ring_set_pipe_percent(iring, reserve);
+ }
+ }
+
+ mutex_unlock(&adev->gfx.pipe_reserve_mutex);
+}
+
+static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ bool acquire)
+{
+ uint32_t pipe_priority = acquire ? 0x2 : 0x0;
+ uint32_t queue_priority = acquire ? 0xf : 0x0;
+
+ mutex_lock(&adev->srbm_mutex);
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
+ WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
+
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
+ enum amd_sched_priority priority)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return;
+
+ gfx_v8_0_hqd_set_priority(adev, ring, acquire);
+ gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
+}
+
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@@ -6752,6 +6938,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .set_priority = gfx_v8_0_ring_set_priority_compute,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@@ -6960,7 +7147,7 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
uint64_t ce_payload_addr;
int cnt_ce;
- static union {
+ union {
struct vi_ce_ib_state regular;
struct vi_ce_ib_state_chained_ib chained;
} ce_payload = {};
@@ -6989,7 +7176,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
uint64_t de_payload_addr, gds_addr, csa_addr;
int cnt_de;
- static union {
+ union {
struct vi_de_ib_state regular;
struct vi_de_ib_state_chained_ib chained;
} de_payload = {};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69182eeca264..7f15bb2c5233 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@@ -66,38 +67,70 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
};
static const u32 golden_settings_gc_9_0[] =
@@ -352,6 +385,25 @@ err1:
return r;
}
+
+static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -1120,30 +1172,22 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
int r;
- u32 data;
- u32 size;
- u32 base;
+ u32 data, base;
if (!amdgpu_ngg)
return 0;
/* Program buffer size */
- data = 0;
- size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
-
- size = adev->gfx.ngg.buf[NGG_POS].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
-
+ data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
+ data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_POS].size >> 8);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
- data = 0;
- size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
-
- size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
-
+ data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
+ data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
/* Program buffer base address */
@@ -1306,7 +1350,10 @@ static int gfx_v9_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
- sprintf(ring->name, "gfx");
+ if (!i)
+ sprintf(ring->name, "gfx");
+ else
+ sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1346,7 +1393,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
- r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
+ r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
if (r)
return r;
@@ -1398,9 +1445,11 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ gfx_v9_0_free_microcode(adev);
return 0;
}
@@ -1682,10 +1731,10 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indirect_regs,
&unique_indirect_reg_count,
- sizeof(unique_indirect_regs)/sizeof(int),
+ ARRAY_SIZE(unique_indirect_regs),
indirect_start_offsets,
&indirect_start_offsets_count,
- sizeof(indirect_start_offsets)/sizeof(int));
+ ARRAY_SIZE(indirect_start_offsets));
/* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@@ -1722,12 +1771,12 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
/* write the starting offsets to RLC scratch ram */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.starting_offsets_start);
- for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+ for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
indirect_start_offsets[i]);
/* load unique indirect regs*/
- for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
+ for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
unique_indirect_regs[i] & 0x3FFFF);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
@@ -1740,11 +1789,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
- u32 tmp = 0;
-
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
- tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
@@ -1822,16 +1867,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- if (default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
+ enable ? 1 : 0);
+ if (default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
@@ -1841,16 +1881,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *ad
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
+ enable ? 1 : 0);
+ if(default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
@@ -1860,16 +1895,11 @@ static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ CP_PG_DISABLE,
+ enable ? 0 : 1);
+ if(default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -1878,10 +1908,9 @@ static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ GFX_POWER_GATING_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1892,10 +1921,9 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ GFX_PIPELINE_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
@@ -1910,10 +1938,9 @@ static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ STATIC_PER_CU_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1924,10 +1951,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ DYN_PER_CU_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1967,13 +1993,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
- WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
-
+ WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
-
gfx_v9_0_wait_for_rlc_serdes(adev);
}
@@ -2045,8 +2066,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v9_0_init_csb(adev);
return 0;
+ }
gfx_v9_0_rlc_stop(adev);
@@ -2463,6 +2486,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
+ mqd->dynamic_cu_mask_addr_lo =
+ lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi =
+ upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -2486,10 +2516,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
- }
- else
+ } else {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
+ }
mqd->cp_hqd_pq_doorbell_control = tmp;
@@ -2692,10 +2722,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring);
- if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -2707,7 +2737,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
- memset((void *)mqd, 0, sizeof(*mqd));
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@@ -2716,7 +2748,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
}
return 0;
@@ -2728,8 +2760,10 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
- memset((void *)mqd, 0, sizeof(*mqd));
+ if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@@ -2737,11 +2771,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
+ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -2882,12 +2916,70 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
+static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint32_t scratch, tmp = 0;
+ int r, i;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(kiq_ring, 10);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+ /* unmap queues */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ /* write to scratch for completion */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+ amdgpu_ring_commit(kiq_ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i >= adev->usec_timeout) {
+ DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
@@ -2930,15 +3022,10 @@ static bool gfx_v9_0_is_idle(void *handle)
static int gfx_v9_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
- GRBM_STATUS__GUI_ACTIVE_MASK;
-
- if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+ if (gfx_v9_0_is_idle(handle))
return 0;
udelay(1);
}
@@ -3497,9 +3584,11 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
- struct nbio_hdp_flush_reg *nbio_hf_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg;
- if (ring->adev->asic_type == CHIP_VEGA10)
+ if (ring->adev->flags & AMD_IS_APU)
+ nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
+ else
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
@@ -3528,7 +3617,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
gfx_v9_0_write_data_to_reg(ring, 0, true,
- SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
+ SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@@ -3718,7 +3807,7 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
- static struct v9_ce_ib_state ce_payload = {0};
+ struct v9_ce_ib_state ce_payload = {0};
uint64_t csa_addr;
int cnt;
@@ -3737,7 +3826,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
- static struct v9_de_ib_state de_payload = {0};
+ struct v9_de_ib_state de_payload = {0};
uint64_t csa_addr, gds_addr;
int cnt;
@@ -3757,6 +3846,12 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
+static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
+ amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+}
+
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
@@ -3764,6 +3859,8 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
+ gfx_v9_0_ring_emit_tmz(ring, true);
+
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -3814,12 +3911,6 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
-}
-
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6c8040e616c4..c17996e18086 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -319,6 +319,12 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5be9c83dfcf7..f4603a7c8ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -831,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
@@ -901,6 +901,8 @@ static int gmc_v6_0_sw_fini(void *handle)
gmc_v6_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index eace9e7182c8..b0528ca9207b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -970,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1050,6 +1050,8 @@ static int gmc_v7_0_sw_fini(void *handle)
gmc_v7_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 3b3326daf32b..f368cfe2f585 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1067,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1147,6 +1147,8 @@ static int gmc_v8_0_sw_fini(void *handle)
gmc_v8_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index d04d0b123212..621699331e09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -32,6 +32,8 @@
#include "vega10/DC/dce_12_0_offset.h"
#include "vega10/DC/dce_12_0_sh_mask.h"
#include "vega10/vega10_enum.h"
+#include "vega10/MMHUB/mmhub_1_0_offset.h"
+#include "vega10/ATHUB/athub_1_0_offset.h"
#include "soc15_common.h"
@@ -71,13 +73,25 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000,
};
+static const u32 golden_settings_mmhub_1_0_0[] =
+{
+ SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
+};
+
+static const u32 golden_settings_athub_1_0_0[] =
+{
+ SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
+ SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
+};
+
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
struct amdgpu_vmhub *hub;
- u32 tmp, reg, bits, i;
+ u32 tmp, reg, bits, i, j;
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@@ -89,43 +103,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- /* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
- for (i = 0; i< 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp &= ~bits;
- WREG32(reg, tmp);
- }
-
- /* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp &= ~bits;
- WREG32(reg, tmp);
+ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ hub = &adev->vmhub[j];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + i;
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
- /* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
- for (i = 0; i< 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp |= bits;
- WREG32(reg, tmp);
- }
-
- /* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp |= bits;
- WREG32(reg, tmp);
+ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ hub = &adev->vmhub[j];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + i;
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
}
- break;
default:
break;
}
@@ -682,8 +679,17 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_mmhub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
case CHIP_RAVEN:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
default:
break;
@@ -713,12 +719,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- /* After HDP is initialized, flush HDP.*/
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
-
switch (adev->asic_type) {
case CHIP_RAVEN:
mmhub_v1_0_initialize_power_gating(adev);
@@ -736,13 +736,16 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
- tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
- WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ /* After HDP is initialized, flush HDP.*/
+ if (adev->flags & AMD_IS_APU)
+ nbio_v7_0_hdp_flush(adev);
+ else
+ nbio_v6_1_hdp_flush(adev);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -751,7 +754,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
-
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -770,17 +772,11 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
- u32 tmp;
-
/* Lockout access through VGA aperture*/
- tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
/* disable VGA render */
- tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
r = gmc_v9_0_gart_enable(adev);
@@ -822,9 +818,7 @@ static int gmc_v9_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v9_0_hw_fini(adev);
-
- return 0;
+ return gmc_v9_0_hw_fini(adev);
}
static int gmc_v9_0_resume(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 7a0ea27ac429..bd592cb39f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,6 +208,34 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * iceland_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
+/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -412,6 +440,7 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
+ .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3bbf2ccfca89..f33d1ffdb20b 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,7 +42,6 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
-static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable);
@@ -64,7 +63,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
-static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
+static void kv_dpm_powergate_uvd(void *handle, bool gate);
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@@ -1245,8 +1244,9 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
-static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
+static void kv_dpm_enable_bapm(void *handle, bool enable)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@@ -1672,8 +1672,9 @@ static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
return kv_enable_acp_dpm(adev, !gate);
}
-static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_uvd(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@@ -1868,10 +1869,11 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
return ret;
}
-static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
+static int kv_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
ret = kv_force_dpm_highest(adev);
@@ -1892,8 +1894,9 @@ static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int kv_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -1907,8 +1910,9 @@ static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static int kv_dpm_set_power_state(struct amdgpu_device *adev)
+static int kv_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@@ -1981,8 +1985,9 @@ static int kv_dpm_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void kv_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void kv_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@@ -2848,9 +2853,10 @@ static int kv_dpm_init(struct amdgpu_device *adev)
}
static void
-kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+kv_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
u32 current_index =
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
@@ -2875,11 +2881,12 @@ kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
}
static void
-kv_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+kv_dpm_print_power_state(void *handle, void *request_ps)
{
int i;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct kv_ps *ps = kv_get_ps(rps);
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@@ -2905,13 +2912,14 @@ static void kv_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
-static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void kv_dpm_display_configuration_changed(void *handle)
{
}
-static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 kv_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
@@ -2921,18 +2929,20 @@ static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->levels[requested_state->num_levels - 1].sclk;
}
-static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 kv_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
return pi->sys_info.bootup_uma_clk;
}
/* get temperature in millidegrees */
-static int kv_dpm_get_temp(struct amdgpu_device *adev)
+static int kv_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = RREG32_SMC(0xC0300E0C);
@@ -2950,7 +2960,6 @@ static int kv_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kv_dpm_set_dpm_funcs(adev);
kv_dpm_set_irq_funcs(adev);
return 0;
@@ -2960,16 +2969,10 @@ static int kv_dpm_late_init(void *handle)
{
/* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret;
if (!amdgpu_dpm)
return 0;
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true);
@@ -3031,7 +3034,6 @@ static int kv_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@@ -3222,14 +3224,17 @@ static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
}
-static int kv_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int kv_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct kv_ps *kv_cps;
struct kv_ps *kv_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
@@ -3262,9 +3267,10 @@ static int kv_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int kv_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
uint32_t sclk;
u32 pl_index =
@@ -3312,7 +3318,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
+const struct amd_pm_funcs kv_dpm_funcs = {
.get_temperature = &kv_dpm_get_temp,
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
@@ -3330,12 +3336,6 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.read_sensor = &kv_dpm_read_sensor,
};
-static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &kv_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
.set = kv_dpm_set_interrupt_state,
.process = kv_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 74cb647da30e..cc21c4bdec27 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -273,7 +273,7 @@ static const struct pctl_data pctl0_data[] = {
{0x135, 0x12a810},
{0x149, 0x7a82c}
};
-#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
+#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
#define PCTL0_RENG_EXEC_END_PTR 0x151
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
@@ -309,7 +309,7 @@ static const struct pctl_data pctl1_data[] = {
{0x1f0, 0x5000a7f6},
{0x1f1, 0x5000a7e4}
};
-#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
+#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
#define PCTL1_RENG_EXEC_END_PTR 0x1f1
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
@@ -561,6 +561,13 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 2812d88a8bdd..b4906d2f30d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -183,6 +183,12 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
return r;
}
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 1e91b9a1c591..67e78576a9eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,7 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 5000
+#define AI_MAILBOX_TIMEDOUT 12000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index c791d73d2d54..f13dc6cc158f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -23,7 +23,7 @@
#ifndef __MXGPU_VI_H__
#define __MXGPU_VI_H__
-#define VI_MAILBOX_TIMEDOUT 5000
+#define VI_MAILBOX_TIMEDOUT 12000
#define VI_MAILBOX_RESET_TIME 12
/* VI mailbox messages request */
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 045988b18bc3..904a1bab9b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -215,31 +215,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+ .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
+ .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+ .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
+};
-int nbio_v6_1_init(struct amdgpu_device *adev)
-{
- nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
- nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
- nbio_v6_1_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
- nbio_v6_1_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
-
- return 0;
-}
+const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
+ .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
+ .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
+};
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 686e4b4d296a..14ca8d45a46c 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,8 +26,8 @@
#include "soc15_common.h"
-extern struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-extern struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
+extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
int nbio_v6_1_init(struct amdgpu_device *adev);
u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 11b70d601922..f802b973410a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -185,28 +185,24 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
-struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
+const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
+ .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
+ .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
+ .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
+};
-int nbio_v7_0_init(struct amdgpu_device *adev)
-{
- nbio_v7_0_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
- nbio_v7_0_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
- nbio_v7_0_hdp_flush_reg.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
- nbio_v7_0_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
- nbio_v7_0_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
-
- return 0;
-}
+const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
+ .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
+ .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 054ff49427e6..df8fa90f40d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,8 +26,8 @@
#include "soc15_common.h"
-extern struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-extern struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
+extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
int nbio_v7_0_init(struct amdgpu_device *adev);
u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
uint32_t idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index f7cf994b1da2..4e20d91d5d50 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -35,6 +35,8 @@
#include "raven1/GC/gc_9_1_offset.h"
#include "raven1/SDMA0/sdma0_4_1_offset.h"
+MODULE_FIRMWARE("amdgpu/raven_asd.bin");
+
static int
psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
{
@@ -136,15 +138,13 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
{
int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr;
- struct common_firmware_header *header;
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
- header = (struct common_firmware_header *)ucode->fw;
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes);
+ cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
if (ret)
@@ -209,7 +209,7 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -229,6 +229,19 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
+ return ret;
+}
+
+int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v10_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -244,16 +257,31 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
struct amdgpu_device *adev = psp->adev;
+ uint32_t ring_size_dw = ring->ring_size / 4;
+ uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
- if ((psp_write_ptr_reg % ring->ring_size) == 0)
- write_frame = ring->ring_mem;
+ if ((psp_write_ptr_reg % ring_size_dw) == 0)
+ write_frame = ring_buffer_start;
else
- write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4));
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
+
+ /* Initialize KM RB frame */
+ memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
/* Update KM RB frame */
write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
@@ -263,8 +291,7 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame->fence_value = index;
/* Update the write Pointer in DWORDs */
- psp_write_ptr_reg += sizeof(struct psp_gfx_rb_frame) / 4;
- psp_write_ptr_reg = (psp_write_ptr_reg >= ring->ring_size) ? 0 : psp_write_ptr_reg;
+ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
@@ -390,3 +417,10 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
return true;
}
+
+
+int psp_v10_0_mode1_reset(struct psp_context *psp)
+{
+ DRM_INFO("psp mode 1 reset not supported now! \n");
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
index e76cde2f01f9..451e8308303f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
@@ -34,6 +34,8 @@ extern int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v10_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
@@ -43,4 +45,6 @@ extern int psp_v10_0_cmd_submit(struct psp_context *psp,
extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
+
+extern int psp_v10_0_mode1_reset(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 2a535a4b8d5b..c7bcfe8e286c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -319,7 +319,7 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -339,6 +339,19 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
+ return ret;
+}
+
+int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v3_1_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -354,6 +367,9 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
struct amdgpu_device *adev = psp->adev;
uint32_t ring_size_dw = ring->ring_size / 4;
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
@@ -365,9 +381,16 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
/* write_frame ptr increments by size of rb_frame in bytes */
/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
if ((psp_write_ptr_reg % ring_size_dw) == 0)
- write_frame = ring->ring_mem;
+ write_frame = ring_buffer_start;
else
- write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ DRM_ERROR("write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
+ }
/* Initialize KM RB frame */
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
@@ -517,3 +540,37 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
+
+int psp_v3_1_mode1_reset(struct psp_context *psp)
+{
+ int ret;
+ uint32_t offset;
+ struct amdgpu_device *adev = psp->adev;
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+
+ if (ret) {
+ DRM_INFO("psp is not working correctly before mode1 reset!\n");
+ return -EINVAL;
+ }
+
+ /*send the mode 1 reset command*/
+ WREG32(offset, 0x70000);
+
+ mdelay(1000);
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+
+ if (ret) {
+ DRM_INFO("psp mode 1 reset failed!\n");
+ return -EINVAL;
+ }
+
+ DRM_INFO("psp mode1 reset succeed \n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
index 9dcd0b25c4c6..b05dbada7751 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
@@ -41,6 +41,8 @@ extern int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v3_1_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
@@ -51,4 +53,5 @@ extern bool psp_v3_1_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
+extern int psp_v3_1_mode1_reset(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index f2d0710258cb..67f375bfe452 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -561,21 +561,11 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA0);
- if (r)
- return -EINVAL;
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
- }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ r = sdma_v2_4_load_microcode(adev);
+ if (r)
+ return r;
}
/* halt the engine before programing */
@@ -1324,8 +1314,13 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v2_4_vm_copy_pte,
+
.write_pte = sdma_v2_4_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x1fffff >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b1de44f22824..6d06f8eb659f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -379,8 +379,10 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
+ u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
+
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
+ WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
@@ -641,10 +643,11 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
+ u64 wptr_gpu_addr;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -707,6 +710,20 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
}
WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
+ lower_32_bits(wptr_gpu_addr));
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
+ if (amdgpu_sriov_vf(adev))
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ else
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
+
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -802,23 +819,12 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
*/
static int sdma_v3_0_start(struct amdgpu_device *adev)
{
- int r, i;
+ int r;
- if (!adev->pp_enabled) {
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
- r = sdma_v3_0_load_microcode(adev);
- if (r)
- return r;
- } else {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- (i == 0) ?
- AMDGPU_UCODE_ID_SDMA0 :
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
- }
- }
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ r = sdma_v3_0_load_microcode(adev);
+ if (r)
+ return r;
}
/* disable sdma engine before programing it */
@@ -1713,11 +1719,11 @@ static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
}
static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
- .copy_max_bytes = 0x1fffff,
+ .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
- .fill_max_bytes = 0x1fffff,
+ .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
};
@@ -1731,8 +1737,14 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v3_0_vm_copy_pte,
+
.write_pte = sdma_v3_0_vm_write_pte,
+
+ /* not 0x3fffff due to HW limitation */
+ .set_max_nums_pte_pde = 0x3fffe0 >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd7c72aaafa6..46009db3d195 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,7 +54,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 golden_settings_sdma_4[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
+ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
@@ -89,7 +89,7 @@ static const u32 golden_settings_sdma_vg10[] = {
static const u32 golden_settings_sdma_4_1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
+ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
@@ -371,7 +371,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
- struct nbio_hdp_flush_reg *nbio_hf_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg;
if (ring->adev->flags & AMD_IS_APU)
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
@@ -398,7 +398,7 @@ static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0));
+ amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
amdgpu_ring_write(ring, 1);
}
@@ -1264,6 +1264,11 @@ static int sdma_v4_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+
return 0;
}
@@ -1714,8 +1719,13 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v4_0_vm_copy_pte,
+
.write_pte = sdma_v4_0_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x400000 >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v4_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 112969f3301a..3fa2fbf8c9a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -887,8 +887,13 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+ .copy_pte_num_dw = 5,
.copy_pte = si_dma_vm_copy_pte,
+
.write_pte = si_dma_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0xffff8 >> 3,
+ .set_pte_pde_num_dw = 9,
.set_pte_pde = si_dma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index d63873f3f574..51fd0c9a20a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -1847,7 +1847,6 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
-static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
@@ -3060,9 +3059,9 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
return ret;
}
-static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+static bool si_dpm_vblank_too_short(void *handle)
{
-
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
@@ -3871,9 +3870,10 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad
0 : -EINVAL;
}
-static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+static int si_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
struct si_ps *ps = si_get_ps(rps);
u32 levels = ps->performance_level_count;
@@ -6575,11 +6575,12 @@ static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
-static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+static int si_dpm_get_fan_speed_percent(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan)
return -ENOENT;
@@ -6600,9 +6601,10 @@ static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+static int si_dpm_set_fan_speed_percent(void *handle,
u32 speed)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
u32 duty, duty100;
@@ -6633,8 +6635,10 @@ static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
if (mode) {
/* stop auto-manage */
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -6649,8 +6653,9 @@ static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
}
}
-static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+static u32 si_dpm_get_fan_control_mode(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
@@ -6946,8 +6951,9 @@ static void si_dpm_disable(struct amdgpu_device *adev)
ni_update_current_ps(adev, boot_ps);
}
-static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int si_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -6984,8 +6990,9 @@ static int si_power_control_set_level(struct amdgpu_device *adev)
return 0;
}
-static int si_dpm_set_power_state(struct amdgpu_device *adev)
+static int si_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
struct amdgpu_ps *old_ps = &eg_pi->current_rps;
@@ -7086,8 +7093,9 @@ static int si_dpm_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void si_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
@@ -7103,8 +7111,10 @@ void si_dpm_reset_asic(struct amdgpu_device *adev)
}
#endif
-static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void si_dpm_display_configuration_changed(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
si_program_display_gap(adev);
}
@@ -7486,9 +7496,10 @@ static void si_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
-static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
@@ -7593,11 +7604,6 @@ static int si_dpm_late_init(void *handle)
if (!amdgpu_dpm)
return 0;
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
ret = si_set_temperature_range(adev);
if (ret)
return ret;
@@ -7753,7 +7759,6 @@ static int si_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
si_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@@ -7860,10 +7865,11 @@ static int si_dpm_set_powergating_state(void *handle,
}
/* get temperature in millidegrees */
-static int si_dpm_get_temp(struct amdgpu_device *adev)
+static int si_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
CTF_TEMP_SHIFT;
@@ -7878,8 +7884,9 @@ static int si_dpm_get_temp(struct amdgpu_device *adev)
return actual_temp;
}
-static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 si_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
@@ -7889,8 +7896,9 @@ static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
-static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 si_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
@@ -7900,9 +7908,11 @@ static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
}
-static void si_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+static void si_dpm_print_power_state(void *handle,
+ void *current_ps)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
int i;
@@ -7927,7 +7937,6 @@ static int si_dpm_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- si_dpm_set_dpm_funcs(adev);
si_dpm_set_irq_funcs(adev);
return 0;
}
@@ -7942,20 +7951,23 @@ static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
(si_cpl1->vddci == si_cpl2->vddci));
}
-static int si_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int si_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct si_ps *si_cps;
struct si_ps *si_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
- si_cps = si_get_ps(cps);
- si_rps = si_get_ps(rps);
+ si_cps = si_get_ps((struct amdgpu_ps *)cps);
+ si_rps = si_get_ps((struct amdgpu_ps *)rps);
if (si_cps == NULL) {
printk("si_cps is NULL\n");
@@ -7983,9 +7995,10 @@ static int si_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int si_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
@@ -8041,7 +8054,7 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
.set_powergating_state = si_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+const struct amd_pm_funcs si_dpm_funcs = {
.get_temperature = &si_dpm_get_temp,
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
@@ -8062,12 +8075,6 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.read_sensor = &si_dpm_read_sensor,
};
-static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &si_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
.set = si_dpm_set_interrupt_state,
.process = si_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 51ce21c5f4fb..9fe343de3477 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -246,6 +246,7 @@ enum si_display_gap
};
extern const struct amd_ip_funcs si_dpm_ip_funcs;
+extern const struct amd_pm_funcs si_dpm_funcs;
struct ni_leakage_coeffients
{
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index ce25e03a077d..d2c6b80309c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,6 +118,19 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
+/**
+ * si_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ /* Process all interrupts */
+ return true;
+}
+
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -288,6 +301,7 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
+ .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f2c3a49f73a0..3ca9d114f630 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -101,7 +101,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- struct nbio_pcie_index_data *nbio_pcie_id;
+ const struct nbio_pcie_index_data *nbio_pcie_id;
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
@@ -122,7 +122,7 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- struct nbio_pcie_index_data *nbio_pcie_id;
+ const struct nbio_pcie_index_data *nbio_pcie_id;
if (adev->flags & AMD_IS_APU)
nbio_pcie_id = &nbio_v7_0_pcie_index_data;
@@ -279,10 +279,7 @@ static void soc15_init_golden_registers(struct amdgpu_device *adev)
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_VEGA10)
- return adev->clock.spll.reference_freq/4;
- else
- return adev->clock.spll.reference_freq;
+ return adev->clock.spll.reference_freq;
}
@@ -407,18 +404,27 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int soc15_asic_reset(struct amdgpu_device *adev)
{
u32 i;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ dev_info(adev->dev, "GPU reset\n");
/* disable BM */
pci_clear_master(adev->pdev);
- /* reset */
- amdgpu_pci_config_reset(adev);
- udelay(100);
+ pci_save_state(adev->pdev);
+
+ for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
+ adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
+ break;
+ }
+ }
+
+ pci_restore_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -430,14 +436,6 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
udelay(1);
}
-}
-
-static int soc15_asic_reset(struct amdgpu_device *adev)
-{
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- soc15_gpu_pci_config_reset(adev);
-
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
@@ -603,21 +601,6 @@ static int soc15_common_early_init(void *handle)
(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
psp_enabled = true;
- /*
- * nbio need be used for both sdma and gfx9, but only
- * initializes once
- */
- switch(adev->asic_type) {
- case CHIP_VEGA10:
- nbio_v6_1_init(adev);
- break;
- case CHIP_RAVEN:
- nbio_v7_0_init(adev);
- break;
- default:
- return -EINVAL;
- }
-
adev->rev_id = soc15_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 923df2c0e535..aa4e320e31f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,6 +219,34 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * tonga_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u16 pasid;
+
+ switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
+ case 146:
+ case 147:
+ pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
+ if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
+ return true;
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ adev->irq.ih.rptr += 16;
+ return false;
+}
+
+/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -478,6 +506,7 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
+ .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 62cd16a23921..920910ac8663 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -38,6 +38,8 @@
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
@@ -48,6 +50,18 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
+* uvd_v6_0_enc_support - get encode support status
+*
+* @adev: amdgpu_device pointer
+*
+* Returns the current hardware encode support status
+*/
+static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
+{
+ return ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_POLARIS12));
+}
+
+/**
* uvd_v6_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
@@ -62,6 +76,22 @@ static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
}
/**
+ * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc read pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ return RREG32(mmUVD_RB_RPTR);
+ else
+ return RREG32(mmUVD_RB_RPTR2);
+}
+/**
* uvd_v6_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
@@ -76,6 +106,23 @@ static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
}
/**
+ * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware enc write pointer
+ */
+static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ return RREG32(mmUVD_RB_WPTR);
+ else
+ return RREG32(mmUVD_RB_WPTR2);
+}
+
+/**
* uvd_v6_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
@@ -89,6 +136,237 @@ static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
+/**
+ * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the enc write pointer to the hardware
+ */
+static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring == &adev->uvd.ring_enc[0])
+ WREG32(mmUVD_RB_WPTR,
+ lower_32_bits(ring->wptr));
+ else
+ WREG32(mmUVD_RB_WPTR2,
+ lower_32_bits(ring->wptr));
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ unsigned i;
+ int r;
+
+ r = amdgpu_ring_alloc(ring, 16);
+ if (r) {
+ DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ring_get_rptr(ring) != rptr)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < adev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("amdgpu: ring %d test failed\n",
+ ring->idx);
+ r = -ETIMEDOUT;
+ }
+
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Open up a stream for HW test
+ */
+static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint64_t dummy;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ dummy = ib->gpu_addr + 1024;
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = dma_fence_get(f);
+ if (r)
+ goto err;
+
+ amdgpu_job_free(job);
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring we should submit the msg to
+ * @handle: session handle to use
+ * @fence: optional fence to return
+ *
+ * Close up a stream for HW test or if userspace failed to do so
+ */
+static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
+ uint32_t handle,
+ bool direct, struct dma_fence **fence)
+{
+ const unsigned ib_size_dw = 16;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint64_t dummy;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ dummy = ib->gpu_addr + 1024;
+
+ ib->length_dw = 0;
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = 0x00010000;
+ ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
+ ib->ptr[ib->length_dw++] = dummy;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ if (direct) {
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = dma_fence_get(f);
+ if (r)
+ goto err;
+
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+ }
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
+ * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
+ *
+ * @ring: the engine to test on
+ *
+ */
+static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct dma_fence *fence = NULL;
+ long r;
+
+ r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ goto error;
+ }
+
+ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+ }
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ } else {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ }
+error:
+ dma_fence_put(fence);
+ return r;
+}
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -98,6 +376,12 @@ static int uvd_v6_0_early_init(void *handle)
return -ENOENT;
uvd_v6_0_set_ring_funcs(adev);
+
+ if (uvd_v6_0_enc_support(adev)) {
+ adev->uvd.num_enc_rings = 2;
+ uvd_v6_0_set_enc_ring_funcs(adev);
+ }
+
uvd_v6_0_set_irq_funcs(adev);
return 0;
@@ -106,7 +390,7 @@ static int uvd_v6_0_early_init(void *handle)
static int uvd_v6_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
@@ -114,10 +398,31 @@ static int uvd_v6_0_sw_init(void *handle)
if (r)
return r;
+ /* UVD ENC TRAP */
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq);
+ if (r)
+ return r;
+ }
+ }
+
r = amdgpu_uvd_sw_init(adev);
if (r)
return r;
+ if (uvd_v6_0_enc_support(adev)) {
+ struct amd_sched_rq *rq;
+ ring = &adev->uvd.ring_enc[0];
+ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+ r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq, amdgpu_sched_jobs);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD ENC run queue.\n");
+ return r;
+ }
+ }
+
r = amdgpu_uvd_resume(adev);
if (r)
return r;
@@ -125,19 +430,38 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+ if (r)
+ return r;
+
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.ring_enc[i];
+ sprintf(ring->name, "uvd_enc%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
+ if (r)
+ return r;
+ }
+ }
return r;
}
static int uvd_v6_0_sw_fini(void *handle)
{
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
+ if (uvd_v6_0_enc_support(adev)) {
+ amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+ }
+
return amdgpu_uvd_sw_fini(adev);
}
@@ -153,7 +477,7 @@ static int uvd_v6_0_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t tmp;
- int r;
+ int i, r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
@@ -193,9 +517,25 @@ static int uvd_v6_0_hw_init(void *handle)
amdgpu_ring_commit(ring);
+ if (uvd_v6_0_enc_support(adev)) {
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
+ ring = &adev->uvd.ring_enc[i];
+ ring->ready = true;
+ r = amdgpu_ring_test_ring(ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+ }
+ }
+
done:
- if (!r)
- DRM_INFO("UVD initialized successfully.\n");
+ if (!r) {
+ if (uvd_v6_0_enc_support(adev))
+ DRM_INFO("UVD and UVD ENC initialized successfully.\n");
+ else
+ DRM_INFO("UVD initialized successfully.\n");
+ }
return r;
}
@@ -512,6 +852,22 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
+ if (uvd_v6_0_enc_support(adev)) {
+ ring = &adev->uvd.ring_enc[0];
+ WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->uvd.ring_enc[1];
+ WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
+ }
+
return 0;
}
@@ -575,6 +931,26 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write enc a fence and a trap command to the ring.
+ */
+static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
+{
+ WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
+ amdgpu_ring_write(ring, addr);
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
+}
+
+/**
* uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
*
* @ring: amdgpu_ring pointer
@@ -665,6 +1041,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
+/**
+ * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write enc ring commands to execute the indirect buffer
+ */
+static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+}
+
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
@@ -716,6 +1110,33 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE);
}
+static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq);
+}
+
+static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
+}
+
+static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned int vm_id, uint64_t pd_addr)
+{
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
+ amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, pd_addr >> 12);
+
+ amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
+ amdgpu_ring_write(ring, vm_id);
+}
+
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -823,8 +1244,31 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ bool int_handled = true;
DRM_DEBUG("IH: UVD TRAP\n");
- amdgpu_fence_process(&adev->uvd.ring);
+
+ switch (entry->src_id) {
+ case 124:
+ amdgpu_fence_process(&adev->uvd.ring);
+ break;
+ case 119:
+ if (likely(uvd_v6_0_enc_support(adev)))
+ amdgpu_fence_process(&adev->uvd.ring_enc[0]);
+ else
+ int_handled = false;
+ break;
+ case 120:
+ if (likely(uvd_v6_0_enc_support(adev)))
+ amdgpu_fence_process(&adev->uvd.ring_enc[1]);
+ else
+ int_handled = false;
+ break;
+ }
+
+ if (false == int_handled)
+ DRM_ERROR("Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+
return 0;
}
@@ -1151,6 +1595,33 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.end_use = amdgpu_uvd_ring_end_use,
};
+static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_UVD_ENC,
+ .align_mask = 0x3f,
+ .nop = HEVC_ENC_CMD_NO_OP,
+ .support_64bit_ptrs = false,
+ .get_rptr = uvd_v6_0_enc_ring_get_rptr,
+ .get_wptr = uvd_v6_0_enc_ring_get_wptr,
+ .set_wptr = uvd_v6_0_enc_ring_set_wptr,
+ .emit_frame_size =
+ 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
+ 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* uvd_v6_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
+ .emit_ib = uvd_v6_0_enc_ring_emit_ib,
+ .emit_fence = uvd_v6_0_enc_ring_emit_fence,
+ .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
+ .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
+ .test_ring = uvd_v6_0_enc_ring_test_ring,
+ .test_ib = uvd_v6_0_enc_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = uvd_v6_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+};
+
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
if (adev->asic_type >= CHIP_POLARIS10) {
@@ -1162,6 +1633,16 @@ static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
}
}
+static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->uvd.num_enc_rings; ++i)
+ adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
+
+ DRM_INFO("UVD ENC is enabled in VM mode\n");
+}
+
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
.set = uvd_v6_0_set_interrupt_state,
.process = uvd_v6_0_process_interrupt,
@@ -1169,7 +1650,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->uvd.irq.num_types = 1;
+ if (uvd_v6_0_enc_support(adev))
+ adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1;
+ else
+ adev->uvd.irq.num_types = 1;
+
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 23a85750edd6..6634545060fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -592,11 +592,7 @@ static int uvd_v7_0_suspend(void *handle)
if (r)
return r;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU))
- r = amdgpu_uvd_suspend(adev);
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v7_0_resume(void *handle)
@@ -604,12 +600,10 @@ static int uvd_v7_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
- }
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
return uvd_v7_0_hw_init(adev);
}
@@ -1161,7 +1155,7 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
*/
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 11134d5f7443..75745544600a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1011,10 +1011,6 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: VCE\n");
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
- VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
- ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
-
switch (entry->src_data[0]) {
case 0:
case 1:
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21e7b88401e1..1eb4d79d6e30 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -812,7 +812,7 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
*/
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 56150e8d1ed2..697325737ba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -219,14 +219,95 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
wptr, adev->irq.ih.rptr, tmp);
adev->irq.ih.rptr = tmp;
- tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
+ tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
}
return (wptr & adev->irq.ih.ptr_mask);
}
/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 dw0, dw3, dw4, dw5;
+ u16 pasid;
+ u64 addr, key;
+ struct amdgpu_vm *vm;
+ int r;
+
+ dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+ dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
+ dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
+
+ /* Filter retry page faults, let only the first one pass. If
+ * there are too many outstanding faults, ignore them until
+ * some faults get cleared.
+ */
+ switch (dw0 & 0xff) {
+ case AMDGPU_IH_CLIENTID_VMC:
+ case AMDGPU_IH_CLIENTID_UTCL2:
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ pasid = dw3 & 0xffff;
+ /* No PASID, can't identify faulting process */
+ if (!pasid)
+ return true;
+
+ /* Not a retry fault, check fault credit */
+ if (!(dw5 & 0x80)) {
+ if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
+ goto ignore_iv;
+ return true;
+ }
+
+ addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
+ key = AMDGPU_VM_FAULT(pasid, addr);
+ r = amdgpu_ih_add_fault(adev, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0)
+ goto ignore_iv;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (WARN_ON_ONCE(!vm)) {
+ /* VM not found, process it normally */
+ amdgpu_ih_clear_fault(adev, key);
+ return true;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_ih_clear_fault(adev, key);
+ goto ignore_iv;
+ }
+
+ /* It's the first fault for this address, process it normally */
+ return true;
+
+ignore_iv:
+ adev->irq.ih.rptr += 32;
+ return false;
+}
+
+/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -310,6 +391,14 @@ static int vega10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
+ if (!adev->irq.ih.faults)
+ return -ENOMEM;
+ INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
+ AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+ spin_lock_init(&adev->irq.ih.faults->lock);
+ adev->irq.ih.faults->count = 0;
+
r = amdgpu_irq_init(adev);
return r;
@@ -322,6 +411,9 @@ static int vega10_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
+ kfree(adev->irq.ih.faults);
+ adev->irq.ih.faults = NULL;
+
return 0;
}
@@ -410,6 +502,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
+ .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9ff69b90df36..f3cfef48aa99 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1254,7 +1254,6 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
@@ -1271,7 +1270,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_MC,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1289,7 +1289,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_SDMA,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
@@ -1307,7 +1308,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_HDP,
pp_support_state,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
@@ -1321,7 +1323,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_LS,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
if (state == AMD_CG_STATE_UNGATE)
@@ -1333,7 +1336,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_BIF,
PP_STATE_SUPPORT_CG,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
@@ -1347,7 +1351,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_DRM,
PP_STATE_SUPPORT_LS,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
@@ -1361,7 +1366,8 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
PP_BLOCK_SYS_ROM,
PP_STATE_SUPPORT_CG,
pp_state);
- amd_set_clockgating_by_smu(pp_handle, msg_id);
+ if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
+ amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index a6485254a169..dbf3703cbd1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -465,6 +465,16 @@
#define VCE_CMD_UPDATE_PTB 0x00000107
#define VCE_CMD_FLUSH_TLB 0x00000108
+/* HEVC ENC */
+#define HEVC_ENC_CMD_NO_OP 0x00000000
+#define HEVC_ENC_CMD_END 0x00000001
+#define HEVC_ENC_CMD_FENCE 0x00000003
+#define HEVC_ENC_CMD_TRAP 0x00000004
+#define HEVC_ENC_CMD_IB_VM 0x00000102
+#define HEVC_ENC_CMD_WAIT_GE 0x00000106
+#define HEVC_ENC_CMD_UPDATE_PTB 0x00000107
+#define HEVC_ENC_CMD_FLUSH_TLB 0x00000108
+
/* mmPA_SC_RASTER_CONFIG mask */
#define RB_MAP_PKR0(x) ((x) << 0)
#define RB_MAP_PKR0_MASK (0x3 << 0)
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index e13c67c8d2c0..bc5a2945bd2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,6 +4,6 @@
config HSA_AMD
tristate "HSA kernel driver for AMD GPU devices"
- depends on (DRM_RADEON || DRM_AMDGPU) && AMD_IOMMU_V2 && X86_64
+ depends on DRM_AMDGPU && AMD_IOMMU_V2 && X86_64
help
Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 211fc48697fa..3d5ccb3755d4 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -36,6 +36,7 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
/* Do not process in ISR, just request it to be forwarded to WQ. */
return (pasid != 0) &&
(ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
+ ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE);
}
@@ -46,6 +47,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
unsigned int pasid;
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
+ uint32_t context_id = ihre->data & 0xfffffff;
pasid = (ihre->ring_id & 0xffff0000) >> 16;
@@ -53,9 +55,11 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
return;
if (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, 0, 0);
+ kfd_signal_event_interrupt(pasid, context_id, 28);
+ else if (ihre->source_id == CIK_INTSRC_SDMA_TRAP)
+ kfd_signal_event_interrupt(pasid, context_id, 28);
else if (ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, ihre->data & 0xFF, 8);
+ kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
index 79a16d24c1b8..109298b9d507 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
@@ -32,9 +32,10 @@ struct cik_ih_ring_entry {
uint32_t reserved;
};
-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
+#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
+#define CIK_INTSRC_SDMA_TRAP 0xE0
#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 660b3fbade41..505d39156acd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -282,8 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties,
- 0, q_properties.type, &queue_id);
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
if (err != 0)
goto err_create_queue;
@@ -451,8 +450,8 @@ static int kfd_ioctl_dbg_register(struct file *filep,
return -EINVAL;
}
- mutex_lock(kfd_get_dbgmgr_mutex());
mutex_lock(&p->mutex);
+ mutex_lock(kfd_get_dbgmgr_mutex());
/*
* make sure that we have pdd, if this the first queue created for
@@ -480,8 +479,8 @@ static int kfd_ioctl_dbg_register(struct file *filep,
}
out:
- mutex_unlock(&p->mutex);
mutex_unlock(kfd_get_dbgmgr_mutex());
+ mutex_unlock(&p->mutex);
return status;
}
@@ -836,15 +835,12 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_wait_events_args *args = data;
- enum kfd_event_wait_result wait_result;
int err;
err = kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
- args->timeout, &wait_result);
-
- args->wait_result = wait_result;
+ args->timeout, &args->wait_result);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index 0aa021aa0aa1..c407f6bd9956 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -184,9 +184,10 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
struct kernel_queue *kq = NULL;
int status;
+ properties.type = KFD_QUEUE_TYPE_DIQ;
+
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
- &properties, 0, KFD_QUEUE_TYPE_DIQ,
- &qid);
+ &properties, &qid);
if (status) {
pr_err("Failed to create DIQ\n");
@@ -769,13 +770,8 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
struct dbg_wave_control_info wac_info;
- int temp;
- int first_vmid_to_scan = 8;
- int last_vmid_to_scan = 15;
-
- first_vmid_to_scan = ffs(dev->shared_resources.compute_vmid_bitmap) - 1;
- temp = dev->shared_resources.compute_vmid_bitmap >> first_vmid_to_scan;
- last_vmid_to_scan = first_vmid_to_scan + ffz(temp);
+ int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
+ int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
reg_sq_cmd.u32All = 0;
status = 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 61fff25b4ce7..621a3b53a038 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -92,6 +92,8 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+static int kfd_resume(struct kfd_dev *kfd);
+
static const struct kfd_device_info *lookup_device_info(unsigned short did)
{
size_t i;
@@ -168,23 +170,9 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
pasid_limit = min_t(unsigned int,
(unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
- /*
- * last pasid is used for kernel queues doorbells
- * in the future the last pasid might be used for a kernel thread.
- */
- pasid_limit = min_t(unsigned int,
- pasid_limit,
- kfd->doorbell_process_limit - 1);
-
- err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err < 0) {
- dev_err(kfd_device, "error initializing iommu device\n");
- return false;
- }
if (!kfd_set_pasid_limit(pasid_limit)) {
dev_err(kfd_device, "error setting pasid limit\n");
- amd_iommu_free_device(kfd->pdev);
return false;
}
@@ -196,7 +184,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
if (dev)
- kfd_unbind_process_from_device(dev, pasid);
+ kfd_process_iommu_unbind_callback(dev, pasid);
}
/*
@@ -231,6 +219,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
+ kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
+ kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
+ - kfd->vm_info.first_vmid_kfd + 1;
+
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -280,29 +273,22 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_interrupt_error;
}
- if (!device_iommu_pasid_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing iommuv2 for device %x:%x\n",
- kfd->pdev->vendor, kfd->pdev->device);
- goto device_iommu_pasid_error;
- }
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
- iommu_pasid_shutdown_callback);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
-
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
- if (kfd->dqm->ops.start(kfd->dqm)) {
+ if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
- "Error starting queue manager for device %x:%x\n",
+ "Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
- goto dqm_start_error;
+ goto device_iommu_pasid_error;
}
+ if (kfd_resume(kfd))
+ goto kfd_resume_error;
+
kfd->dbgmgr = NULL;
kfd->init_complete = true;
@@ -314,11 +300,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto out;
-dqm_start_error:
+kfd_resume_error:
+device_iommu_pasid_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
- amd_iommu_free_device(kfd->pdev);
-device_iommu_pasid_error:
kfd_interrupt_exit(kfd);
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
@@ -338,8 +323,8 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
+ kgd2kfd_suspend(kfd);
device_queue_manager_uninit(kfd->dqm);
- amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
kfd_doorbell_fini(kfd);
@@ -352,35 +337,59 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
- if (kfd->init_complete) {
- kfd->dqm->ops.stop(kfd->dqm);
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
- amd_iommu_free_device(kfd->pdev);
- }
+ if (!kfd->init_complete)
+ return;
+
+ kfd->dqm->ops.stop(kfd->dqm);
+
+ kfd_unbind_processes_from_device(kfd);
+
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
+ amd_iommu_free_device(kfd->pdev);
}
int kgd2kfd_resume(struct kfd_dev *kfd)
{
- unsigned int pasid_limit;
- int err;
+ if (!kfd->init_complete)
+ return 0;
- pasid_limit = kfd_get_pasid_limit();
+ return kfd_resume(kfd);
- if (kfd->init_complete) {
- err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err < 0) {
- dev_err(kfd_device, "failed to initialize iommu\n");
- return -ENXIO;
- }
+}
- amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
- iommu_pasid_shutdown_callback);
- amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
- kfd->dqm->ops.start(kfd->dqm);
+static int kfd_resume(struct kfd_dev *kfd)
+{
+ int err = 0;
+ unsigned int pasid_limit = kfd_get_pasid_limit();
+
+ err = amd_iommu_init_device(kfd->pdev, pasid_limit);
+ if (err)
+ return -ENXIO;
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev,
+ iommu_invalid_ppr_cb);
+
+ err = kfd_bind_processes_to_device(kfd);
+ if (err)
+ goto processes_bind_error;
+
+ err = kfd->dqm->ops.start(kfd->dqm);
+ if (err) {
+ dev_err(kfd_device,
+ "Error starting queue manager for device %x:%x\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+ goto dqm_start_error;
}
- return 0;
+ return err;
+
+dqm_start_error:
+processes_bind_error:
+ amd_iommu_free_device(kfd->pdev);
+
+ return err;
}
/* This is called directly from KGD at ISR. */
@@ -394,7 +403,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
if (kfd->interrupts_active
&& interrupt_is_wanted(kfd, ih_ring_entry)
&& enqueue_ih_ring_entry(kfd, ih_ring_entry))
- schedule_work(&kfd->interrupt_work);
+ queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 53a66e821624..e202921c150e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -44,9 +44,14 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
-static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
-static int destroy_queues_cpsch(struct device_queue_manager *dqm,
- bool preempt_static_queues, bool lock);
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param);
+static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param);
+
+static int map_queues_cpsch(struct device_queue_manager *dqm);
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
@@ -113,11 +118,11 @@ static int allocate_vmid(struct device_queue_manager *dqm,
if (dqm->vmid_bitmap == 0)
return -ENOMEM;
- bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
+ bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap,
+ dqm->dev->vm_info.vmid_num_kfd);
clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
- /* Kaveri kfd vmid's starts from vmid 8 */
- allocated_vmid = bit + KFD_VMID_START_OFFSET;
+ allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -132,7 +137,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+ int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
@@ -184,6 +189,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
}
list_add(&q->list, &qpd->queues_list);
+ qpd->queue_count++;
if (q->properties.is_active)
dqm->queue_count++;
@@ -273,6 +279,9 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
dqm->dev->kfd2kgd->set_scratch_backing_va(
dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (!q->properties.is_active)
+ return 0;
+
retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
q->process->mm);
if (retval)
@@ -288,65 +297,74 @@ out_deallocate_hqd:
return retval;
}
-static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
+ * to avoid asynchronized access
+ */
+static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
int retval;
struct mqd_manager *mqd;
- retval = 0;
-
- mutex_lock(&dqm->lock);
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd)
+ return -ENOMEM;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (mqd == NULL) {
- retval = -ENOMEM;
- goto out;
- }
deallocate_hqd(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
- if (mqd == NULL) {
- retval = -ENOMEM;
- goto out;
- }
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id);
} else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
- retval = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ dqm->total_queue_count--;
retval = mqd->destroy_mqd(mqd, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
- QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ KFD_UNMAP_LATENCY_MS,
q->pipe, q->queue);
-
- if (retval)
- goto out;
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
- if (list_empty(&qpd->queues_list))
+ if (list_empty(&qpd->queues_list)) {
+ if (qpd->reset_wavefronts) {
+ pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
+ dqm->dev);
+ /* dbgdev_wave_reset_wavefronts has to be called before
+ * deallocate_vmid(), i.e. when vmid is still in use.
+ */
+ dbgdev_wave_reset_wavefronts(dqm->dev,
+ qpd->pqm->process);
+ qpd->reset_wavefronts = false;
+ }
+
deallocate_vmid(dqm, qpd, q);
+ }
+ qpd->queue_count--;
if (q->properties.is_active)
dqm->queue_count--;
- /*
- * Unconditionally decrement this counter, regardless of the queue's
- * type
- */
- dqm->total_queue_count--;
- pr_debug("Total of %d queues are accountable so far\n",
- dqm->total_queue_count);
+ return retval;
+}
-out:
+static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ struct queue *q)
+{
+ int retval;
+
+ mutex_lock(&dqm->lock);
+ retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
mutex_unlock(&dqm->lock);
+
return retval;
}
@@ -364,29 +382,56 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
goto out_unlock;
}
- if (q->properties.is_active)
- prev_active = true;
+ /* Save previous activity state for counters */
+ prev_active = q->properties.is_active;
+
+ /* Make sure the queue is unmapped before updating the MQD */
+ if (sched_policy != KFD_SCHED_POLICY_NO_HWS) {
+ retval = unmap_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ if (retval) {
+ pr_err("unmap queue failed\n");
+ goto out_unlock;
+ }
+ } else if (prev_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+ retval = mqd->destroy_mqd(mqd, q->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
+ KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
+ if (retval) {
+ pr_err("destroy mqd failed\n");
+ goto out_unlock;
+ }
+ }
+
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
/*
- *
- * check active state vs. the previous state
- * and modify counter accordingly
+ * check active state vs. the previous state and modify
+ * counter accordingly. map_queues_cpsch uses the
+ * dqm->queue_count to determine whether a new runlist must be
+ * uploaded.
*/
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
- if ((q->properties.is_active) && (!prev_active))
+ if (q->properties.is_active && !prev_active)
dqm->queue_count++;
else if (!q->properties.is_active && prev_active)
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
- retval = execute_queues_cpsch(dqm, false);
+ retval = map_queues_cpsch(dqm);
+ else if (q->properties.is_active &&
+ (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_SDMA))
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
out_unlock:
mutex_unlock(&dqm->lock);
return retval;
}
-static struct mqd_manager *get_mqd_manager_nocpsch(
+static struct mqd_manager *get_mqd_manager(
struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
struct mqd_manager *mqd;
@@ -407,7 +452,7 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
return mqd;
}
-static int register_process_nocpsch(struct device_queue_manager *dqm,
+static int register_process(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct device_process_node *n;
@@ -422,7 +467,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
list_add(&n->list, &dqm->queues);
- retval = dqm->ops_asic_specific.register_process(dqm, qpd);
+ retval = dqm->asic_ops.update_qpd(dqm, qpd);
dqm->processes_count++;
@@ -431,7 +476,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
return retval;
}
-static int unregister_process_nocpsch(struct device_queue_manager *dqm,
+static int unregister_process(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
@@ -507,13 +552,13 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
+ dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
return 0;
}
-static void uninitialize_nocpsch(struct device_queue_manager *dqm)
+static void uninitialize(struct device_queue_manager *dqm)
{
int i;
@@ -577,14 +622,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
return retval;
- q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
- q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
+ q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
pr_debug("SDMA id is: %d\n", q->sdma_id);
pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
- dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
@@ -613,8 +658,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
int i, mec;
struct scheduling_resources res;
- res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
- res.vmid_mask <<= KFD_VMID_START_OFFSET;
+ res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
res.queue_mask = 0;
for (i = 0; i < KGD_MAX_QUEUES; ++i) {
@@ -652,8 +696,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
- int retval;
-
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock);
@@ -661,16 +703,13 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
- retval = dqm->ops_asic_specific.initialize(dqm);
- if (retval)
- mutex_destroy(&dqm->lock);
+ dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
- return retval;
+ return 0;
}
static int start_cpsch(struct device_queue_manager *dqm)
{
- struct device_process_node *node;
int retval;
retval = 0;
@@ -697,12 +736,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
- list_for_each_entry(node, &dqm->queues, list)
- if (node->qpd->pqm->process && dqm->dev)
- kfd_bind_process_to_device(dqm->dev,
- node->qpd->pqm->process);
-
- execute_queues_cpsch(dqm, true);
+ mutex_lock(&dqm->lock);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ mutex_unlock(&dqm->lock);
return 0;
fail_allocate_vidmem:
@@ -714,15 +750,10 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
- struct device_process_node *node;
- struct kfd_process_device *pdd;
-
- destroy_queues_cpsch(dqm, true, true);
+ mutex_lock(&dqm->lock);
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ mutex_unlock(&dqm->lock);
- list_for_each_entry(node, &dqm->queues, list) {
- pdd = qpd_to_pdd(node->qpd);
- pdd->bound = false;
- }
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
@@ -752,7 +783,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
- execute_queues_cpsch(dqm, false);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
mutex_unlock(&dqm->lock);
return 0;
@@ -763,12 +794,10 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
mutex_lock(&dqm->lock);
- /* here we actually preempt the DIQ */
- destroy_queues_cpsch(dqm, true, false);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
- execute_queues_cpsch(dqm, false);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
@@ -779,14 +808,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
mutex_unlock(&dqm->lock);
}
-static void select_sdma_engine_id(struct queue *q)
-{
- static int sdma_id;
-
- q->sdma_id = sdma_id;
- sdma_id = (sdma_id + 1) % 2;
-}
-
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd, int *allocate_vmid)
{
@@ -807,9 +828,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
goto out;
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- select_sdma_engine_id(q);
-
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+ retval = allocate_sdma_queue(dqm, &q->sdma_id);
+ if (retval)
+ goto out;
+ q->properties.sdma_queue_id =
+ q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_engine_id =
+ q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ }
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
@@ -818,16 +845,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
goto out;
}
- dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
+ dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out;
list_add(&q->list, &qpd->queues_list);
+ qpd->queue_count++;
if (q->properties.is_active) {
dqm->queue_count++;
- retval = execute_queues_cpsch(dqm, false);
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
@@ -848,12 +877,12 @@ out:
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
- unsigned long timeout)
+ unsigned int timeout_ms)
{
- timeout += jiffies;
+ unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
while (*fence_addr != fence_value) {
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, end_jiffies)) {
pr_err("qcm fence wait loop timeout expired\n");
return -ETIME;
}
@@ -863,44 +892,57 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int destroy_sdma_queues(struct device_queue_manager *dqm,
+static int unmap_sdma_queues(struct device_queue_manager *dqm,
unsigned int sdma_engine)
{
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
sdma_engine);
}
-static int destroy_queues_cpsch(struct device_queue_manager *dqm,
- bool preempt_static_queues, bool lock)
+/* dqm->lock mutex has to be locked before calling this function */
+static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
- enum kfd_preempt_type_filter preempt_type;
- struct kfd_process_device *pdd;
- retval = 0;
+ if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+ return 0;
+
+ if (dqm->active_runlist)
+ return 0;
+
+ retval = pm_send_runlist(&dqm->packets, &dqm->queues);
+ if (retval) {
+ pr_err("failed to execute runlist\n");
+ return retval;
+ }
+ dqm->active_runlist = true;
+
+ return retval;
+}
+
+/* dqm->lock mutex has to be locked before calling this function */
+static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param)
+{
+ int retval = 0;
- if (lock)
- mutex_lock(&dqm->lock);
if (!dqm->active_runlist)
- goto out;
+ return retval;
pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) {
- destroy_sdma_queues(dqm, 0);
- destroy_sdma_queues(dqm, 1);
+ unmap_sdma_queues(dqm, 0);
+ unmap_sdma_queues(dqm, 1);
}
- preempt_type = preempt_static_queues ?
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
- KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
-
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
- preempt_type, 0, false, 0);
+ filter, filter_param, false, 0);
if (retval)
- goto out;
+ return retval;
*dqm->fence_addr = KFD_FENCE_INIT;
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
@@ -908,55 +950,29 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
- if (retval) {
- pdd = kfd_get_process_device_data(dqm->dev,
- kfd_get_process(current));
- pdd->reset_wavefronts = true;
- goto out;
- }
+ if (retval)
+ return retval;
+
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
-out:
- if (lock)
- mutex_unlock(&dqm->lock);
return retval;
}
-static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
+/* dqm->lock mutex has to be locked before calling this function */
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param)
{
int retval;
- if (lock)
- mutex_lock(&dqm->lock);
-
- retval = destroy_queues_cpsch(dqm, false, false);
- if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
- goto out;
- }
-
- if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
- retval = 0;
- goto out;
- }
-
- if (dqm->active_runlist) {
- retval = 0;
- goto out;
- }
-
- retval = pm_send_runlist(&dqm->packets, &dqm->queues);
+ retval = unmap_queues_cpsch(dqm, filter, filter_param);
if (retval) {
- pr_err("failed to execute runlist");
- goto out;
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ return retval;
}
- dqm->active_runlist = true;
-out:
- if (lock)
- mutex_unlock(&dqm->lock);
- return retval;
+ return map_queues_cpsch(dqm);
}
static int destroy_queue_cpsch(struct device_queue_manager *dqm,
@@ -991,14 +1007,20 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
goto failed;
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
dqm->sdma_queue_count--;
+ deallocate_sdma_queue(dqm, q->sdma_id);
+ }
list_del(&q->list);
+ qpd->queue_count--;
if (q->properties.is_active)
dqm->queue_count--;
- execute_queues_cpsch(dqm, false);
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ if (retval == -ETIME)
+ qpd->reset_wavefronts = true;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -1068,7 +1090,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit = limit >> 16;
}
- retval = dqm->ops_asic_specific.set_cache_memory_policy(
+ retval = dqm->asic_ops.set_cache_memory_policy(
dqm,
qpd,
default_policy,
@@ -1088,6 +1110,109 @@ out:
return retval;
}
+static int process_termination_nocpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ struct queue *q, *next;
+ struct device_process_node *cur, *next_dpn;
+ int retval = 0;
+
+ mutex_lock(&dqm->lock);
+
+ /* Clear all user mode queues */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ int ret;
+
+ ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
+ if (ret)
+ retval = ret;
+ }
+
+ /* Unregister process */
+ list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+ kfree(cur);
+ dqm->processes_count--;
+ break;
+ }
+ }
+
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
+
+static int process_termination_cpsch(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd)
+{
+ int retval;
+ struct queue *q, *next;
+ struct kernel_queue *kq, *kq_next;
+ struct mqd_manager *mqd;
+ struct device_process_node *cur, *next_dpn;
+ enum kfd_unmap_queues_filter filter =
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
+
+ retval = 0;
+
+ mutex_lock(&dqm->lock);
+
+ /* Clean all kernel queues */
+ list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
+ list_del(&kq->list);
+ dqm->queue_count--;
+ qpd->is_debug = false;
+ dqm->total_queue_count--;
+ filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
+ }
+
+ /* Clear all user mode queues */
+ list_for_each_entry(q, &qpd->queues_list, list) {
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count--;
+
+ if (q->properties.is_active)
+ dqm->queue_count--;
+
+ dqm->total_queue_count--;
+ }
+
+ /* Unregister process */
+ list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
+ if (qpd == cur->qpd) {
+ list_del(&cur->list);
+ kfree(cur);
+ dqm->processes_count--;
+ break;
+ }
+ }
+
+ retval = execute_queues_cpsch(dqm, filter, 0);
+ if (retval || qpd->reset_wavefronts) {
+ pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
+ dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
+ qpd->reset_wavefronts = false;
+ }
+
+ /* lastly, free mqd resources */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ mqd = dqm->ops.get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ list_del(&q->list);
+ qpd->queue_count--;
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ }
+
+out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+}
+
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
struct device_queue_manager *dqm;
@@ -1109,13 +1234,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.stop = stop_cpsch;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
- dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->ops.register_process = register_process_nocpsch;
- dqm->ops.unregister_process = unregister_process_nocpsch;
- dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.get_mqd_manager = get_mqd_manager;
+ dqm->ops.register_process = register_process;
+ dqm->ops.unregister_process = unregister_process;
+ dqm->ops.uninitialize = uninitialize;
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.process_termination = process_termination_cpsch;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
@@ -1124,12 +1250,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
- dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
- dqm->ops.register_process = register_process_nocpsch;
- dqm->ops.unregister_process = unregister_process_nocpsch;
+ dqm->ops.get_mqd_manager = get_mqd_manager;
+ dqm->ops.register_process = register_process;
+ dqm->ops.unregister_process = unregister_process;
dqm->ops.initialize = initialize_nocpsch;
- dqm->ops.uninitialize = uninitialize_nocpsch;
+ dqm->ops.uninitialize = uninitialize;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.process_termination = process_termination_nocpsch;
break;
default:
pr_err("Invalid scheduling policy %d\n", sched_policy);
@@ -1138,12 +1265,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
switch (dev->device_info->asic_family) {
case CHIP_CARRIZO:
- device_queue_manager_init_vi(&dqm->ops_asic_specific);
+ device_queue_manager_init_vi(&dqm->asic_ops);
break;
case CHIP_KAVERI:
- device_queue_manager_init_cik(&dqm->ops_asic_specific);
+ device_queue_manager_init_cik(&dqm->asic_ops);
break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->device_info->asic_family);
+ goto out_free;
}
if (!dqm->ops.initialize(dqm))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index faf820a06400..5b77cb69f732 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -29,11 +29,9 @@
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
-#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
-#define CIK_VMID_NUM (8)
-#define KFD_VMID_START_OFFSET (8)
-#define VMID_PER_DEVICE CIK_VMID_NUM
-#define KFD_DQM_FIRST_PIPE (0)
+#define KFD_UNMAP_LATENCY_MS (4000)
+#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
+
#define CIK_SDMA_QUEUES (4)
#define CIK_SDMA_QUEUES_PER_ENGINE (2)
#define CIK_SDMA_ENGINE_NUM (2)
@@ -79,6 +77,8 @@ struct device_process_node {
* @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
* memory apertures.
*
+ * @process_termination: Clears all process queues belongs to that device.
+ *
*/
struct device_queue_manager_ops {
@@ -122,12 +122,14 @@ struct device_queue_manager_ops {
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
+
+ int (*process_termination)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
};
struct device_queue_manager_asic_ops {
- int (*register_process)(struct device_queue_manager *dqm,
+ int (*update_qpd)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
- int (*initialize)(struct device_queue_manager *dqm);
bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
@@ -153,7 +155,7 @@ struct device_queue_manager_asic_ops {
struct device_queue_manager {
struct device_queue_manager_ops ops;
- struct device_queue_manager_asic_ops ops_asic_specific;
+ struct device_queue_manager_asic_ops asic_ops;
struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
@@ -176,8 +178,10 @@ struct device_queue_manager {
bool active_runlist;
};
-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
+void device_queue_manager_init_cik(
+ struct device_queue_manager_asic_ops *asic_ops);
+void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
unsigned int get_queues_num(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 72c3cbabc0a7..28e48c90c596 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -32,18 +32,17 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
-static int register_process_cik(struct device_queue_manager *dqm,
+static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-static int initialize_cpsch_cik(struct device_queue_manager *dqm);
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops)
+void device_queue_manager_init_cik(
+ struct device_queue_manager_asic_ops *asic_ops)
{
- ops->set_cache_memory_policy = set_cache_memory_policy_cik;
- ops->register_process = register_process_cik;
- ops->initialize = initialize_cpsch_cik;
- ops->init_sdma_vm = init_sdma_vm;
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
+ asic_ops->update_qpd = update_qpd_cik;
+ asic_ops->init_sdma_vm = init_sdma_vm;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
@@ -99,7 +98,7 @@ static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
return true;
}
-static int register_process_cik(struct device_queue_manager *dqm,
+static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
@@ -148,8 +147,3 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
q->properties.sdma_vm_addr = value;
}
-
-static int initialize_cpsch_cik(struct device_queue_manager *dqm)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 40e9ddd096cd..2fbce57a2f21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -33,18 +33,17 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
-static int register_process_vi(struct device_queue_manager *dqm,
+static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-static int initialize_cpsch_vi(struct device_queue_manager *dqm);
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
+void device_queue_manager_init_vi(
+ struct device_queue_manager_asic_ops *asic_ops)
{
- ops->set_cache_memory_policy = set_cache_memory_policy_vi;
- ops->register_process = register_process_vi;
- ops->initialize = initialize_cpsch_vi;
- ops->init_sdma_vm = init_sdma_vm;
+ asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
+ asic_ops->update_qpd = update_qpd_vi;
+ asic_ops->init_sdma_vm = init_sdma_vm;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
@@ -104,7 +103,7 @@ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
return true;
}
-static int register_process_vi(struct device_queue_manager *dqm,
+static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
@@ -160,8 +159,3 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
q->properties.sdma_vm_addr = value;
}
-
-static int initialize_cpsch_vi(struct device_queue_manager *dqm)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index acf4d2a977ad..feb76c235b1a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -24,16 +24,15 @@
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/idr.h>
/*
- * This extension supports a kernel level doorbells management for
- * the kernel queues.
- * Basically the last doorbells page is devoted to kernel queues
- * and that's assures that any user process won't get access to the
- * kernel doorbells page
+ * This extension supports a kernel level doorbells management for the
+ * kernel queues using the first doorbell page reserved for the kernel.
*/
-#define KERNEL_DOORBELL_PASID 1
+static DEFINE_IDA(doorbell_ida);
+static unsigned int max_doorbell_slices;
#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
/*
@@ -84,13 +83,16 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
(doorbell_aperture_size - doorbell_start_offset) /
doorbell_process_allocation();
else
- doorbell_process_limit = 0;
+ return -ENOSPC;
+
+ if (!max_doorbell_slices ||
+ doorbell_process_limit < max_doorbell_slices)
+ max_doorbell_slices = doorbell_process_limit;
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
- kfd->doorbell_process_limit = doorbell_process_limit - 1;
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
doorbell_process_allocation());
@@ -185,11 +187,10 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
return NULL;
/*
- * Calculating the kernel doorbell offset using "faked" kernel
- * pasid that allocated for kernel queues only
+ * Calculating the kernel doorbell offset using the first
+ * doorbell page.
*/
- *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
- sizeof(u32)) + inx;
+ *doorbell_off = kfd->doorbell_id_offset + inx;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -228,11 +229,12 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
{
/*
* doorbell_id_offset accounts for doorbells taken by KGD.
- * pasid * doorbell_process_allocation/sizeof(u32) adjusts
- * to the process's doorbells
+ * index * doorbell_process_allocation/sizeof(u32) adjusts to
+ * the process's doorbells.
*/
return kfd->doorbell_id_offset +
- process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
+ process->doorbell_index
+ * doorbell_process_allocation() / sizeof(u32) +
queue_id;
}
@@ -250,5 +252,21 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
struct kfd_process *process)
{
return dev->doorbell_base +
- process->pasid * doorbell_process_allocation();
+ process->doorbell_index * doorbell_process_allocation();
+}
+
+int kfd_alloc_process_doorbells(struct kfd_process *process)
+{
+ int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices,
+ GFP_KERNEL);
+ if (r > 0)
+ process->doorbell_index = r;
+
+ return r;
+}
+
+void kfd_free_process_doorbells(struct kfd_process *process)
+{
+ if (process->doorbell_index)
+ ida_simple_remove(&doorbell_ida, process->doorbell_index);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 944abfad39c1..cb92d4b72400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -24,8 +24,8 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include <linux/uaccess.h>
-#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/memory.h>
#include "kfd_priv.h"
@@ -33,185 +33,89 @@
#include <linux/device.h>
/*
- * A task can only be on a single wait_queue at a time, but we need to support
- * waiting on multiple events (any/all).
- * Instead of each event simply having a wait_queue with sleeping tasks, it
- * has a singly-linked list of tasks.
- * A thread that wants to sleep creates an array of these, one for each event
- * and adds one to each event's waiter chain.
+ * Wrapper around wait_queue_entry_t
*/
struct kfd_event_waiter {
- struct list_head waiters;
- struct task_struct *sleeping_task;
-
- /* Transitions to true when the event this belongs to is signaled. */
- bool activated;
-
- /* Event */
- struct kfd_event *event;
- uint32_t input_index;
+ wait_queue_entry_t wait;
+ struct kfd_event *event; /* Event to wait for */
+ bool activated; /* Becomes true when event is signaled */
};
/*
- * Over-complicated pooled allocator for event notification slots.
- *
* Each signal event needs a 64-bit signal slot where the signaler will write
- * a 1 before sending an interrupt.l (This is needed because some interrupts
+ * a 1 before sending an interrupt. (This is needed because some interrupts
* do not contain enough spare data bits to identify an event.)
- * We get whole pages from vmalloc and map them to the process VA.
- * Individual signal events are then allocated a slot in a page.
+ * We get whole pages and map them to the process VA.
+ * Individual signal events use their event_id as slot index.
*/
-
-struct signal_page {
- struct list_head event_pages; /* kfd_process.signal_event_pages */
+struct kfd_signal_page {
uint64_t *kernel_address;
uint64_t __user *user_address;
- uint32_t page_index; /* Index into the mmap aperture. */
- unsigned int free_slots;
- unsigned long used_slot_bitmap[0];
};
-#define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
-#define SLOT_BITMAP_SIZE BITS_TO_LONGS(SLOTS_PER_PAGE)
-#define BITS_PER_PAGE (ilog2(SLOTS_PER_PAGE)+1)
-#define SIGNAL_PAGE_SIZE (sizeof(struct signal_page) + \
- SLOT_BITMAP_SIZE * sizeof(long))
-
-/*
- * For signal events, the event ID is used as the interrupt user data.
- * For SQ s_sendmsg interrupts, this is limited to 8 bits.
- */
-
-#define INTERRUPT_DATA_BITS 8
-#define SIGNAL_EVENT_ID_SLOT_SHIFT 0
-static uint64_t *page_slots(struct signal_page *page)
+static uint64_t *page_slots(struct kfd_signal_page *page)
{
return page->kernel_address;
}
-static bool allocate_free_slot(struct kfd_process *process,
- struct signal_page **out_page,
- unsigned int *out_slot_index)
-{
- struct signal_page *page;
-
- list_for_each_entry(page, &process->signal_event_pages, event_pages) {
- if (page->free_slots > 0) {
- unsigned int slot =
- find_first_zero_bit(page->used_slot_bitmap,
- SLOTS_PER_PAGE);
-
- __set_bit(slot, page->used_slot_bitmap);
- page->free_slots--;
-
- page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
-
- *out_page = page;
- *out_slot_index = slot;
-
- pr_debug("Allocated event signal slot in page %p, slot %d\n",
- page, slot);
-
- return true;
- }
- }
-
- pr_debug("No free event signal slots were found for process %p\n",
- process);
-
- return false;
-}
-
-#define list_tail_entry(head, type, member) \
- list_entry((head)->prev, type, member)
-
-static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
+static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
{
void *backing_store;
- struct signal_page *page;
+ struct kfd_signal_page *page;
- page = kzalloc(SIGNAL_PAGE_SIZE, GFP_KERNEL);
+ page = kzalloc(sizeof(*page), GFP_KERNEL);
if (!page)
- goto fail_alloc_signal_page;
+ return NULL;
- page->free_slots = SLOTS_PER_PAGE;
-
- backing_store = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ backing_store = (void *) __get_free_pages(GFP_KERNEL,
get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
if (!backing_store)
goto fail_alloc_signal_store;
- /* prevent user-mode info leaks */
+ /* Initialize all events to unsignaled */
memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
- KFD_SIGNAL_EVENT_LIMIT * 8);
+ KFD_SIGNAL_EVENT_LIMIT * 8);
page->kernel_address = backing_store;
-
- if (list_empty(&p->signal_event_pages))
- page->page_index = 0;
- else
- page->page_index = list_tail_entry(&p->signal_event_pages,
- struct signal_page,
- event_pages)->page_index + 1;
-
pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p);
- pr_debug("Page index is %d\n", page->page_index);
- list_add(&page->event_pages, &p->signal_event_pages);
-
- return true;
+ return page;
fail_alloc_signal_store:
kfree(page);
-fail_alloc_signal_page:
- return false;
+ return NULL;
}
-static bool allocate_event_notification_slot(struct file *devkfd,
- struct kfd_process *p,
- struct signal_page **page,
- unsigned int *signal_slot_index)
+static int allocate_event_notification_slot(struct kfd_process *p,
+ struct kfd_event *ev)
{
- bool ret;
+ int id;
- ret = allocate_free_slot(p, page, signal_slot_index);
- if (!ret) {
- ret = allocate_signal_page(devkfd, p);
- if (ret)
- ret = allocate_free_slot(p, page, signal_slot_index);
+ if (!p->signal_page) {
+ p->signal_page = allocate_signal_page(p);
+ if (!p->signal_page)
+ return -ENOMEM;
+ /* Oldest user mode expects 256 event slots */
+ p->signal_mapped_size = 256*8;
}
- return ret;
-}
-
-/* Assumes that the process's event_mutex is locked. */
-static void release_event_notification_slot(struct signal_page *page,
- size_t slot_index)
-{
- __clear_bit(slot_index, page->used_slot_bitmap);
- page->free_slots++;
-
- /* We don't free signal pages, they are retained by the process
- * and reused until it exits.
- */
-}
-
-static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
- unsigned int page_index)
-{
- struct signal_page *page;
-
/*
- * This is safe because we don't delete signal pages until the
- * process exits.
+ * Compatibility with old user mode: Only use signal slots
+ * user mode has mapped, may be less than
+ * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
+ * of the event limit without breaking user mode.
*/
- list_for_each_entry(page, &p->signal_event_pages, event_pages)
- if (page->page_index == page_index)
- return page;
+ id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
+ GFP_KERNEL);
+ if (id < 0)
+ return id;
- return NULL;
+ ev->event_id = id;
+ page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
+
+ return 0;
}
/*
@@ -220,99 +124,81 @@ static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
*/
static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
{
- struct kfd_event *ev;
-
- hash_for_each_possible(p->events, ev, events, id)
- if (ev->event_id == id)
- return ev;
-
- return NULL;
+ return idr_find(&p->event_idr, id);
}
-static u32 make_signal_event_id(struct signal_page *page,
- unsigned int signal_slot_index)
-{
- return page->page_index |
- (signal_slot_index << SIGNAL_EVENT_ID_SLOT_SHIFT);
-}
-
-/*
- * Produce a kfd event id for a nonsignal event.
- * These are arbitrary numbers, so we do a sequential search through
- * the hash table for an unused number.
+/**
+ * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
+ * @p: Pointer to struct kfd_process
+ * @id: ID to look up
+ * @bits: Number of valid bits in @id
+ *
+ * Finds the first signaled event with a matching partial ID. If no
+ * matching signaled event is found, returns NULL. In that case the
+ * caller should assume that the partial ID is invalid and do an
+ * exhaustive search of all siglaned events.
+ *
+ * If multiple events with the same partial ID signal at the same
+ * time, they will be found one interrupt at a time, not necessarily
+ * in the same order the interrupts occurred. As long as the number of
+ * interrupts is correct, all signaled events will be seen by the
+ * driver.
*/
-static u32 make_nonsignal_event_id(struct kfd_process *p)
+static struct kfd_event *lookup_signaled_event_by_partial_id(
+ struct kfd_process *p, uint32_t id, uint32_t bits)
{
- u32 id;
-
- for (id = p->next_nonsignal_event_id;
- id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id);
- id++)
- ;
+ struct kfd_event *ev;
- if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
+ if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
+ return NULL;
- /*
- * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
- * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
- * the first loop fails immediately and we proceed with the
- * wraparound loop below.
- */
- p->next_nonsignal_event_id = id + 1;
+ /* Fast path for the common case that @id is not a partial ID
+ * and we only need a single lookup.
+ */
+ if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
+ if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
+ return NULL;
- return id;
+ return idr_find(&p->event_idr, id);
}
- for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
- id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id);
- id++)
- ;
-
+ /* General case for partial IDs: Iterate over all matching IDs
+ * and find the first one that has signaled.
+ */
+ for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
+ if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
+ continue;
- if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
- p->next_nonsignal_event_id = id + 1;
- return id;
+ ev = idr_find(&p->event_idr, id);
}
- p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
- return 0;
-}
-
-static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
- struct signal_page *page,
- unsigned int signal_slot)
-{
- return lookup_event_by_id(p, make_signal_event_id(page, signal_slot));
+ return ev;
}
static int create_signal_event(struct file *devkfd,
struct kfd_process *p,
struct kfd_event *ev)
{
- if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
+ int ret;
+
+ if (p->signal_mapped_size &&
+ p->signal_event_count == p->signal_mapped_size / 8) {
if (!p->signal_event_limit_reached) {
pr_warn("Signal event wasn't created because limit was reached\n");
p->signal_event_limit_reached = true;
}
- return -ENOMEM;
+ return -ENOSPC;
}
- if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
- &ev->signal_slot_index)) {
+ ret = allocate_event_notification_slot(p, ev);
+ if (ret) {
pr_warn("Signal event wasn't created because out of kernel memory\n");
- return -ENOMEM;
+ return ret;
}
p->signal_event_count++;
- ev->user_signal_address =
- &ev->signal_page->user_address[ev->signal_slot_index];
-
- ev->event_id = make_signal_event_id(ev->signal_page,
- ev->signal_slot_index);
-
+ ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
@@ -320,16 +206,20 @@ static int create_signal_event(struct file *devkfd,
return 0;
}
-/*
- * No non-signal events are supported yet.
- * We create them as events that never signal.
- * Set event calls from user-mode are failed.
- */
static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
{
- ev->event_id = make_nonsignal_event_id(p);
- if (ev->event_id == 0)
- return -ENOMEM;
+ /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
+ * intentional integer overflow to -1 without a compiler
+ * warning. idr_alloc treats a negative value as "maximum
+ * signed integer".
+ */
+ int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
+ (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
+ GFP_KERNEL);
+
+ if (id < 0)
+ return id;
+ ev->event_id = id;
return 0;
}
@@ -337,50 +227,47 @@ static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
void kfd_event_init_process(struct kfd_process *p)
{
mutex_init(&p->event_mutex);
- hash_init(p->events);
- INIT_LIST_HEAD(&p->signal_event_pages);
- p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_init(&p->event_idr);
+ p->signal_page = NULL;
p->signal_event_count = 0;
}
static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
- if (ev->signal_page) {
- release_event_notification_slot(ev->signal_page,
- ev->signal_slot_index);
- p->signal_event_count--;
- }
+ struct kfd_event_waiter *waiter;
- /*
- * Abandon the list of waiters. Individual waiting threads will
- * clean up their own data.
- */
- list_del(&ev->waiters);
+ /* Wake up pending waiters. They will return failure */
+ list_for_each_entry(waiter, &ev->wq.head, wait.entry)
+ waiter->event = NULL;
+ wake_up_all(&ev->wq);
+
+ if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
+ ev->type == KFD_EVENT_TYPE_DEBUG)
+ p->signal_event_count--;
- hash_del(&ev->events);
+ idr_remove(&p->event_idr, ev->event_id);
kfree(ev);
}
static void destroy_events(struct kfd_process *p)
{
struct kfd_event *ev;
- struct hlist_node *tmp;
- unsigned int hash_bkt;
+ uint32_t id;
- hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
+ idr_for_each_entry(&p->event_idr, ev, id)
destroy_event(p, ev);
+ idr_destroy(&p->event_idr);
}
/*
* We assume that the process is being destroyed and there is no need to
* unmap the pages or keep bookkeeping data in order.
*/
-static void shutdown_signal_pages(struct kfd_process *p)
+static void shutdown_signal_page(struct kfd_process *p)
{
- struct signal_page *page, *tmp;
+ struct kfd_signal_page *page = p->signal_page;
- list_for_each_entry_safe(page, tmp, &p->signal_event_pages,
- event_pages) {
+ if (page) {
free_pages((unsigned long)page->kernel_address,
get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
kfree(page);
@@ -390,7 +277,7 @@ static void shutdown_signal_pages(struct kfd_process *p)
void kfd_event_free_process(struct kfd_process *p)
{
destroy_events(p);
- shutdown_signal_pages(p);
+ shutdown_signal_page(p);
}
static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
@@ -419,7 +306,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
ev->auto_reset = auto_reset;
ev->signaled = false;
- INIT_LIST_HEAD(&ev->waiters);
+ init_waitqueue_head(&ev->wq);
*event_page_offset = 0;
@@ -430,10 +317,9 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
case KFD_EVENT_TYPE_DEBUG:
ret = create_signal_event(devkfd, p, ev);
if (!ret) {
- *event_page_offset = (ev->signal_page->page_index |
- KFD_MMAP_EVENTS_MASK);
+ *event_page_offset = KFD_MMAP_EVENTS_MASK;
*event_page_offset <<= PAGE_SHIFT;
- *event_slot_index = ev->signal_slot_index;
+ *event_slot_index = ev->event_id;
}
break;
default:
@@ -442,8 +328,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
}
if (!ret) {
- hash_add(p->events, &ev->events, ev->event_id);
-
*event_id = ev->event_id;
*event_trigger_data = ev->event_id;
} else {
@@ -477,19 +361,18 @@ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
static void set_event(struct kfd_event *ev)
{
struct kfd_event_waiter *waiter;
- struct kfd_event_waiter *next;
- /* Auto reset if the list is non-empty and we're waking someone. */
- ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
+ /* Auto reset if the list is non-empty and we're waking
+ * someone. waitqueue_active is safe here because we're
+ * protected by the p->event_mutex, which is also held when
+ * updating the wait queues in kfd_wait_on_events.
+ */
+ ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
- list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
+ list_for_each_entry(waiter, &ev->wq.head, wait.entry)
waiter->activated = true;
- /* _init because free_waiters will call list_del */
- list_del_init(&waiter->waiters);
-
- wake_up_process(waiter->sleeping_task);
- }
+ wake_up_all(&ev->wq);
}
/* Assumes that p is current. */
@@ -538,13 +421,7 @@ int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
{
- page_slots(ev->signal_page)[ev->signal_slot_index] =
- UNSIGNALED_EVENT_SLOT;
-}
-
-static bool is_slot_signaled(struct signal_page *page, unsigned int index)
-{
- return page_slots(page)[index] != UNSIGNALED_EVENT_SLOT;
+ page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
}
static void set_event_from_interrupt(struct kfd_process *p,
@@ -559,7 +436,7 @@ static void set_event_from_interrupt(struct kfd_process *p,
void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
uint32_t valid_id_bits)
{
- struct kfd_event *ev;
+ struct kfd_event *ev = NULL;
/*
* Because we are called from arbitrary context (workqueue) as opposed
@@ -573,26 +450,46 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
mutex_lock(&p->event_mutex);
- if (valid_id_bits >= INTERRUPT_DATA_BITS) {
- /* Partial ID is a full ID. */
- ev = lookup_event_by_id(p, partial_id);
+ if (valid_id_bits)
+ ev = lookup_signaled_event_by_partial_id(p, partial_id,
+ valid_id_bits);
+ if (ev) {
set_event_from_interrupt(p, ev);
- } else {
+ } else if (p->signal_page) {
/*
- * Partial ID is in fact partial. For now we completely
- * ignore it, but we could use any bits we did receive to
- * search faster.
+ * Partial ID lookup failed. Assume that the event ID
+ * in the interrupt payload was invalid and do an
+ * exhaustive search of signaled events.
*/
- struct signal_page *page;
- unsigned int i;
-
- list_for_each_entry(page, &p->signal_event_pages, event_pages)
- for (i = 0; i < SLOTS_PER_PAGE; i++)
- if (is_slot_signaled(page, i)) {
- ev = lookup_event_by_page_slot(p,
- page, i);
+ uint64_t *slots = page_slots(p->signal_page);
+ uint32_t id;
+
+ if (valid_id_bits)
+ pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
+ partial_id, valid_id_bits);
+
+ if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
+ /* With relatively few events, it's faster to
+ * iterate over the event IDR
+ */
+ idr_for_each_entry(&p->event_idr, ev, id) {
+ if (id >= KFD_SIGNAL_EVENT_LIMIT)
+ break;
+
+ if (slots[id] != UNSIGNALED_EVENT_SLOT)
+ set_event_from_interrupt(p, ev);
+ }
+ } else {
+ /* With relatively many events, it's faster to
+ * iterate over the signal slots and lookup
+ * only signaled events from the IDR.
+ */
+ for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
+ if (slots[id] != UNSIGNALED_EVENT_SLOT) {
+ ev = lookup_event_by_id(p, id);
set_event_from_interrupt(p, ev);
}
+ }
}
mutex_unlock(&p->event_mutex);
@@ -609,18 +506,16 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
GFP_KERNEL);
for (i = 0; (event_waiters) && (i < num_events) ; i++) {
- INIT_LIST_HEAD(&event_waiters[i].waiters);
- event_waiters[i].sleeping_task = current;
+ init_wait(&event_waiters[i].wait);
event_waiters[i].activated = false;
}
return event_waiters;
}
-static int init_event_waiter(struct kfd_process *p,
+static int init_event_waiter_get_status(struct kfd_process *p,
struct kfd_event_waiter *waiter,
- uint32_t event_id,
- uint32_t input_index)
+ uint32_t event_id)
{
struct kfd_event *ev = lookup_event_by_id(p, event_id);
@@ -628,38 +523,60 @@ static int init_event_waiter(struct kfd_process *p,
return -EINVAL;
waiter->event = ev;
- waiter->input_index = input_index;
waiter->activated = ev->signaled;
ev->signaled = ev->signaled && !ev->auto_reset;
- list_add(&waiter->waiters, &ev->waiters);
-
return 0;
}
-static bool test_event_condition(bool all, uint32_t num_events,
+static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
+{
+ struct kfd_event *ev = waiter->event;
+
+ /* Only add to the wait list if we actually need to
+ * wait on this event.
+ */
+ if (!waiter->activated)
+ add_wait_queue(&ev->wq, &waiter->wait);
+}
+
+/* test_event_condition - Test condition of events being waited for
+ * @all: Return completion only if all events have signaled
+ * @num_events: Number of events to wait for
+ * @event_waiters: Array of event waiters, one per event
+ *
+ * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
+ * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
+ * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
+ * the events have been destroyed.
+ */
+static uint32_t test_event_condition(bool all, uint32_t num_events,
struct kfd_event_waiter *event_waiters)
{
uint32_t i;
uint32_t activated_count = 0;
for (i = 0; i < num_events; i++) {
+ if (!event_waiters[i].event)
+ return KFD_IOC_WAIT_RESULT_FAIL;
+
if (event_waiters[i].activated) {
if (!all)
- return true;
+ return KFD_IOC_WAIT_RESULT_COMPLETE;
activated_count++;
}
}
- return activated_count == num_events;
+ return activated_count == num_events ?
+ KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
}
/*
* Copy event specific data, if defined.
* Currently only memory exception events have additional data to copy to user
*/
-static bool copy_signaled_event_data(uint32_t num_events,
+static int copy_signaled_event_data(uint32_t num_events,
struct kfd_event_waiter *event_waiters,
struct kfd_event_data __user *data)
{
@@ -673,15 +590,15 @@ static bool copy_signaled_event_data(uint32_t num_events,
waiter = &event_waiters[i];
event = waiter->event;
if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
- dst = &data[waiter->input_index].memory_exception_data;
+ dst = &data[i].memory_exception_data;
src = &event->memory_exception_data;
if (copy_to_user(dst, src,
sizeof(struct kfd_hsa_memory_exception_data)))
- return false;
+ return -EFAULT;
}
}
- return true;
+ return 0;
}
@@ -710,7 +627,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
uint32_t i;
for (i = 0; i < num_events; i++)
- list_del(&waiters[i].waiters);
+ if (waiters[i].event)
+ remove_wait_queue(&waiters[i].event->wq,
+ &waiters[i].wait);
kfree(waiters);
}
@@ -718,38 +637,56 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
bool all, uint32_t user_timeout_ms,
- enum kfd_event_wait_result *wait_result)
+ uint32_t *wait_result)
{
struct kfd_event_data __user *events =
(struct kfd_event_data __user *) data;
uint32_t i;
int ret = 0;
+
struct kfd_event_waiter *event_waiters = NULL;
long timeout = user_timeout_to_jiffies(user_timeout_ms);
- mutex_lock(&p->event_mutex);
-
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
ret = -ENOMEM;
- goto fail;
+ goto out;
}
+ mutex_lock(&p->event_mutex);
+
for (i = 0; i < num_events; i++) {
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
sizeof(struct kfd_event_data))) {
ret = -EFAULT;
- goto fail;
+ goto out_unlock;
}
- ret = init_event_waiter(p, &event_waiters[i],
- event_data.event_id, i);
+ ret = init_event_waiter_get_status(p, &event_waiters[i],
+ event_data.event_id);
if (ret)
- goto fail;
+ goto out_unlock;
}
+ /* Check condition once. */
+ *wait_result = test_event_condition(all, num_events, event_waiters);
+ if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
+ ret = copy_signaled_event_data(num_events,
+ event_waiters, events);
+ goto out_unlock;
+ } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
+ /* This should not happen. Events shouldn't be
+ * destroyed while we're holding the event_mutex
+ */
+ goto out_unlock;
+ }
+
+ /* Add to wait lists if we need to wait. */
+ for (i = 0; i < num_events; i++)
+ init_event_waiter_add_to_waitlist(&event_waiters[i]);
+
mutex_unlock(&p->event_mutex);
while (true) {
@@ -771,62 +708,66 @@ int kfd_wait_on_events(struct kfd_process *p,
break;
}
- if (test_event_condition(all, num_events, event_waiters)) {
- if (copy_signaled_event_data(num_events,
- event_waiters, events))
- *wait_result = KFD_WAIT_COMPLETE;
- else
- *wait_result = KFD_WAIT_ERROR;
+ /* Set task state to interruptible sleep before
+ * checking wake-up conditions. A concurrent wake-up
+ * will put the task back into runnable state. In that
+ * case schedule_timeout will not put the task to
+ * sleep and we'll get a chance to re-check the
+ * updated conditions almost immediately. Otherwise,
+ * this race condition would lead to a soft hang or a
+ * very long sleep.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ *wait_result = test_event_condition(all, num_events,
+ event_waiters);
+ if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
break;
- }
- if (timeout <= 0) {
- *wait_result = KFD_WAIT_TIMEOUT;
+ if (timeout <= 0)
break;
- }
- timeout = schedule_timeout_interruptible(timeout);
+ timeout = schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
+ /* copy_signaled_event_data may sleep. So this has to happen
+ * after the task state is set back to RUNNING.
+ */
+ if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
+ ret = copy_signaled_event_data(num_events,
+ event_waiters, events);
+
mutex_lock(&p->event_mutex);
+out_unlock:
free_waiters(num_events, event_waiters);
mutex_unlock(&p->event_mutex);
-
- return ret;
-
-fail:
- if (event_waiters)
- free_waiters(num_events, event_waiters);
-
- mutex_unlock(&p->event_mutex);
-
- *wait_result = KFD_WAIT_ERROR;
+out:
+ if (ret)
+ *wait_result = KFD_IOC_WAIT_RESULT_FAIL;
+ else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
+ ret = -EIO;
return ret;
}
int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
{
-
- unsigned int page_index;
unsigned long pfn;
- struct signal_page *page;
+ struct kfd_signal_page *page;
+ int ret;
- /* check required size is logical */
- if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
+ /* check required size doesn't exceed the allocated size */
+ if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
get_order(vma->vm_end - vma->vm_start)) {
pr_err("Event page mmap requested illegal size\n");
return -EINVAL;
}
- page_index = vma->vm_pgoff;
-
- page = lookup_signal_page_by_index(p, page_index);
+ page = p->signal_page;
if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */
- pr_debug("Signal page could not be found for page_index %u\n",
- page_index);
+ pr_debug("Signal page could not be found\n");
return -EINVAL;
}
@@ -847,8 +788,12 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
page->user_address = (uint64_t __user *)vma->vm_start;
/* mapping the page to user process */
- return remap_pfn_range(vma, vma->vm_start, pfn,
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (!ret)
+ p->signal_mapped_size = vma->vm_end - vma->vm_start;
+
+ return ret;
}
/*
@@ -860,12 +805,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
{
struct kfd_hsa_memory_exception_data *ev_data;
struct kfd_event *ev;
- int bkt;
+ uint32_t id;
bool send_signal = true;
ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
- hash_for_each(p->events, bkt, ev, events)
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == type) {
send_signal = false;
dev_dbg(kfd_device,
@@ -904,14 +850,24 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
* running so the lookup function returns a locked process.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct mm_struct *mm;
if (!p)
return; /* Presumably process exited. */
+ /* Take a safe reference to the mm_struct, which may otherwise
+ * disappear even while the kfd_process is still referenced.
+ */
+ mm = get_task_mm(p->lead_thread);
+ if (!mm) {
+ mutex_unlock(&p->mutex);
+ return; /* Process is exiting */
+ }
+
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- down_read(&p->mm->mmap_sem);
- vma = find_vma(p->mm, address);
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
memory_exception_data.gpu_id = dev->id;
memory_exception_data.va = address;
@@ -937,7 +893,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
}
}
- up_read(&p->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
mutex_lock(&p->event_mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index 28f6838b1f4c..abca5bfebbff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -27,12 +27,17 @@
#include <linux/hashtable.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/wait.h>
#include "kfd_priv.h"
#include <uapi/linux/kfd_ioctl.h>
-#define KFD_EVENT_ID_NONSIGNAL_MASK 0x80000000U
-#define KFD_FIRST_NONSIGNAL_EVENT_ID KFD_EVENT_ID_NONSIGNAL_MASK
-#define KFD_LAST_NONSIGNAL_EVENT_ID UINT_MAX
+/*
+ * IDR supports non-negative integer IDs. Small IDs are used for
+ * signal events to match their signal slot. Use the upper half of the
+ * ID space for non-signal events.
+ */
+#define KFD_FIRST_NONSIGNAL_EVENT_ID ((INT_MAX >> 1) + 1)
+#define KFD_LAST_NONSIGNAL_EVENT_ID INT_MAX
/*
* Written into kfd_signal_slot_t to indicate that the event is not signaled.
@@ -46,9 +51,6 @@ struct kfd_event_waiter;
struct signal_page;
struct kfd_event {
- /* All events in process, rooted at kfd_process.events. */
- struct hlist_node events;
-
u32 event_id;
bool signaled;
@@ -56,11 +58,9 @@ struct kfd_event {
int type;
- struct list_head waiters; /* List of kfd_event_waiter by waiters. */
+ wait_queue_head_t wq; /* List of event waiters. */
/* Only for signal events. */
- struct signal_page *signal_page;
- unsigned int signal_slot_index;
uint64_t __user *user_signal_address;
/* type specific data */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 70b3a99cffc2..035c351f47c5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -42,26 +42,26 @@
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/kfifo.h>
#include "kfd_priv.h"
-#define KFD_INTERRUPT_RING_SIZE 1024
+#define KFD_IH_NUM_ENTRIES 8192
static void interrupt_wq(struct work_struct *);
int kfd_interrupt_init(struct kfd_dev *kfd)
{
- void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
- kfd->device_info->ih_ring_entry_size,
- GFP_KERNEL);
- if (!interrupt_ring)
- return -ENOMEM;
-
- kfd->interrupt_ring = interrupt_ring;
- kfd->interrupt_ring_size =
- KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
- atomic_set(&kfd->interrupt_ring_wptr, 0);
- atomic_set(&kfd->interrupt_ring_rptr, 0);
+ int r;
+
+ r = kfifo_alloc(&kfd->ih_fifo,
+ KFD_IH_NUM_ENTRIES * kfd->device_info->ih_ring_entry_size,
+ GFP_KERNEL);
+ if (r) {
+ dev_err(kfd_chardev(), "Failed to allocate IH fifo\n");
+ return r;
+ }
+ kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
@@ -92,74 +92,47 @@ void kfd_interrupt_exit(struct kfd_dev *kfd)
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
/*
- * Flush_scheduled_work ensures that there are no outstanding
+ * flush_work ensures that there are no outstanding
* work-queue items that will access interrupt_ring. New work items
* can't be created because we stopped interrupt handling above.
*/
- flush_scheduled_work();
+ flush_workqueue(kfd->ih_wq);
- kfree(kfd->interrupt_ring);
+ kfifo_free(&kfd->ih_fifo);
}
/*
- * This assumes that it can't be called concurrently with itself
- * but only with dequeue_ih_ring_entry.
+ * Assumption: single reader/writer. This function is not re-entrant
*/
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
+ int count;
- if ((rptr - wptr) % kfd->interrupt_ring_size ==
- kfd->device_info->ih_ring_entry_size) {
- /* This is very bad, the system is likely to hang. */
+ count = kfifo_in(&kfd->ih_fifo, ih_ring_entry,
+ kfd->device_info->ih_ring_entry_size);
+ if (count != kfd->device_info->ih_ring_entry_size) {
dev_err_ratelimited(kfd_chardev(),
- "Interrupt ring overflow, dropping interrupt.\n");
+ "Interrupt ring overflow, dropping interrupt %d\n",
+ count);
return false;
}
- memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
-
- wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
- smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
- atomic_set(&kfd->interrupt_ring_wptr, wptr);
-
return true;
}
/*
- * This assumes that it can't be called concurrently with itself
- * but only with enqueue_ih_ring_entry.
+ * Assumption: single reader/writer. This function is not re-entrant
*/
static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
{
- /*
- * Assume that wait queues have an implicit barrier, i.e. anything that
- * happened in the ISR before it queued work is visible.
- */
-
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
+ int count;
- if (rptr == wptr)
- return false;
-
- memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
- kfd->device_info->ih_ring_entry_size);
-
- rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
+ count = kfifo_out(&kfd->ih_fifo, ih_ring_entry,
+ kfd->device_info->ih_ring_entry_size);
- /*
- * Ensure the rptr write update is not visible until
- * memcpy has finished reading.
- */
- smp_mb();
- atomic_set(&kfd->interrupt_ring_rptr, rptr);
+ WARN_ON(count && count != kfd->device_info->ih_ring_entry_size);
- return true;
+ return count == kfd->device_info->ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index ed71ad40e8f7..8b0c0645d7c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -185,7 +185,7 @@ static void uninitialize(struct kernel_queue *kq)
kq->mqd->destroy_mqd(kq->mqd,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
- QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ KFD_UNMAP_LATENCY_MS,
kq->queue->pipe,
kq->queue->queue);
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
@@ -303,14 +303,20 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_KAVERI:
kernel_queue_init_cik(&kq->ops_asic_specific);
break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->device_info->asic_family);
+ goto out_free;
}
- if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
- pr_err("Failed to init kernel queue\n");
- kfree(kq);
- return NULL;
- }
- return kq;
+ if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
+ return kq;
+
+ pr_err("Failed to init kernel queue\n");
+
+out_free:
+ kfree(kq);
+ return NULL;
}
void kernel_queue_uninit(struct kernel_queue *kq)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 0d73bea22c45..6c5a9cab55de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -103,10 +103,6 @@ static int __init kfd_module_init(void)
return -1;
}
- err = kfd_pasid_init();
- if (err < 0)
- return err;
-
err = kfd_chardev_init();
if (err < 0)
goto err_ioctl;
@@ -126,7 +122,6 @@ static int __init kfd_module_init(void)
err_topology:
kfd_chardev_exit();
err_ioctl:
- kfd_pasid_exit();
return err;
}
@@ -137,7 +132,6 @@ static void __exit kfd_module_exit(void)
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
- kfd_pasid_exit();
dev_info(kfd_device, "Removed module\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index b1ef1368c3bb..dfd260ef81ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -31,6 +31,9 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
return mqd_manager_init_cik(type, dev);
case CHIP_CARRIZO:
return mqd_manager_init_vi(type, dev);
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->device_info->asic_family);
}
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 44ffd23348fc..4859d263fa2a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -189,12 +189,9 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
@@ -215,24 +212,17 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->sdma_rlc_doorbell = q->doorbell_off <<
- SDMA0_RLC0_DOORBELL__OFFSET__SHIFT |
- 1 << SDMA0_RLC0_DOORBELL__ENABLE__SHIFT;
+ m->sdma_rlc_doorbell =
+ q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- m->sdma_rlc_rb_cntl |=
- 1 << SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT;
-
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
@@ -359,19 +349,13 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
- DOORBELL_OFFSET(q->doorbell_off);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
- m->cp_hqd_active = 0;
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- m->cp_hqd_active = 1;
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 73cbfe186dd2..4ea854f9007b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -163,12 +163,9 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
- q->is_active = false;
- if (q->queue_size > 0 &&
+ q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
- q->queue_percent > 0) {
- q->is_active = true;
- }
+ q->queue_percent > 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 1d312603de9f..16da8ad02d8b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -140,8 +140,6 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
struct pm4_mes_map_process *packet;
- struct queue *cur;
- uint32_t num_queues;
packet = (struct pm4_mes_map_process *)buffer;
@@ -156,10 +154,7 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields10.gds_size = qpd->gds_size;
packet->bitfields10.num_gws = qpd->num_gws;
packet->bitfields10.num_oac = qpd->num_oac;
- num_queues = 0;
- list_for_each_entry(cur, &qpd->queues_list, list)
- num_queues++;
- packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
+ packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
@@ -208,7 +203,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
queue_type__mes_map_queues__debug_interface_queue_vi;
break;
case KFD_QUEUE_TYPE_SDMA:
- packet->bitfields2.engine_sel =
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
break;
@@ -376,7 +371,7 @@ int pm_send_set_resources(struct packet_manager *pm,
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
packet->bitfields2.vmid_mask = res->vmid_mask;
- packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
+ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
packet->bitfields7.oac_mask = res->oac_mask;
packet->bitfields8.gds_heap_base = res->gds_heap_base;
packet->bitfields8.gds_heap_size = res->gds_heap_size;
@@ -476,7 +471,7 @@ fail_acquire_packet_buffer:
}
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
- enum kfd_preempt_type_filter mode,
+ enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset,
unsigned int sdma_engine)
{
@@ -494,8 +489,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet = (struct pm4_mes_unmap_queues *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
- pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
- mode, reset, type);
+ pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
+ filter, reset, type);
packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
sizeof(struct pm4_mes_unmap_queues));
switch (type) {
@@ -521,29 +516,29 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
packet->bitfields2.action =
action__mes_unmap_queues__preempt_queues;
- switch (mode) {
- case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
+ switch (filter) {
+ case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
packet->bitfields2.num_queues = 1;
packet->bitfields3b.doorbell_offset0 = filter_param;
break;
- case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
+ case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
packet->bitfields3a.pasid = filter_param;
break;
- case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
+ case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_queues;
break;
- case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
+ case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
- WARN(1, "filter %d", mode);
+ WARN(1, "filter %d", filter);
retval = -EINVAL;
goto err_invalid;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 1e06de0bc673..d6a796144269 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -20,78 +20,64 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/slab.h>
#include <linux/types.h>
#include "kfd_priv.h"
-static unsigned long *pasid_bitmap;
-static unsigned int pasid_limit;
-static DEFINE_MUTEX(pasid_mutex);
-
-int kfd_pasid_init(void)
-{
- pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
-
- pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
- GFP_KERNEL);
- if (!pasid_bitmap)
- return -ENOMEM;
-
- set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
-
- return 0;
-}
-
-void kfd_pasid_exit(void)
-{
- kfree(pasid_bitmap);
-}
+static unsigned int pasid_bits = 16;
+static const struct kfd2kgd_calls *kfd2kgd;
bool kfd_set_pasid_limit(unsigned int new_limit)
{
- if (new_limit < pasid_limit) {
- bool ok;
-
- mutex_lock(&pasid_mutex);
-
- /* ensure that no pasids >= new_limit are in-use */
- ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
- pasid_limit);
- if (ok)
- pasid_limit = new_limit;
-
- mutex_unlock(&pasid_mutex);
-
- return ok;
+ if (new_limit < 2)
+ return false;
+
+ if (new_limit < (1U << pasid_bits)) {
+ if (kfd2kgd)
+ /* We've already allocated user PASIDs, too late to
+ * change the limit
+ */
+ return false;
+
+ while (new_limit < (1U << pasid_bits))
+ pasid_bits--;
}
return true;
}
-inline unsigned int kfd_get_pasid_limit(void)
+unsigned int kfd_get_pasid_limit(void)
{
- return pasid_limit;
+ return 1U << pasid_bits;
}
unsigned int kfd_pasid_alloc(void)
{
- unsigned int found;
-
- mutex_lock(&pasid_mutex);
-
- found = find_first_zero_bit(pasid_bitmap, pasid_limit);
- if (found == pasid_limit)
- found = 0;
- else
- set_bit(found, pasid_bitmap);
+ int r;
+
+ /* Find the first best KFD device for calling KGD */
+ if (!kfd2kgd) {
+ struct kfd_dev *dev = NULL;
+ unsigned int i = 0;
+
+ while ((dev = kfd_topology_enum_kfd_devices(i)) != NULL) {
+ if (dev && dev->kfd2kgd) {
+ kfd2kgd = dev->kfd2kgd;
+ break;
+ }
+ i++;
+ }
+
+ if (!kfd2kgd)
+ return false;
+ }
- mutex_unlock(&pasid_mutex);
+ r = kfd2kgd->alloc_pasid(pasid_bits);
- return found;
+ return r > 0 ? r : 0;
}
void kfd_pasid_free(unsigned int pasid)
{
- if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
- clear_bit(pasid, pasid_bitmap);
+ if (kfd2kgd)
+ kfd2kgd->free_pasid(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b87e96cee5fa..9e4134c5b481 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -31,8 +31,12 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/kfd_ioctl.h>
+#include <linux/idr.h>
+#include <linux/kfifo.h>
#include <kgd_kfd_interface.h>
+#include "amd_shared.h"
+
#define KFD_SYSFS_FILE_MODE 0444
#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
@@ -112,11 +116,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-enum asic_family_type {
- CHIP_KAVERI = 0,
- CHIP_CARRIZO
-};
-
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
const uint32_t *ih_ring_entry);
@@ -125,7 +124,7 @@ struct kfd_event_interrupt_class {
};
struct kfd_device_info {
- unsigned int asic_family;
+ enum amd_asic_type asic_family;
const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
unsigned int max_no_of_hqd;
@@ -141,6 +140,12 @@ struct kfd_mem_obj {
uint32_t *cpu_ptr;
};
+struct kfd_vmid_info {
+ uint32_t first_vmid_kfd;
+ uint32_t last_vmid_kfd;
+ uint32_t vmid_num_kfd;
+};
+
struct kfd_dev {
struct kgd_dev *kgd;
@@ -157,14 +162,12 @@ struct kfd_dev {
* to HW doorbell, GFX reserved some
* at the start)
*/
- size_t doorbell_process_limit; /* Number of processes we have doorbell
- * space for.
- */
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
* page used by kernel queue
*/
struct kgd2kfd_shared_resources shared_resources;
+ struct kfd_vmid_info vm_info;
const struct kfd2kgd_calls *kfd2kgd;
struct mutex doorbell_mutex;
@@ -180,10 +183,8 @@ struct kfd_dev {
unsigned int gtt_sa_num_of_chunks;
/* Interrupts */
- void *interrupt_ring;
- size_t interrupt_ring_size;
- atomic_t interrupt_ring_rptr;
- atomic_t interrupt_ring_wptr;
+ struct kfifo ih_fifo;
+ struct workqueue_struct *ih_wq;
struct work_struct interrupt_work;
spinlock_t interrupt_lock;
@@ -221,22 +222,22 @@ void kfd_chardev_exit(void);
struct device *kfd_chardev(void);
/**
- * enum kfd_preempt_type_filter
+ * enum kfd_unmap_queues_filter
*
- * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
+ * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
*
- * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
+ * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
* running queues list.
*
- * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
+ * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
* specific process.
*
*/
-enum kfd_preempt_type_filter {
- KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
- KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
- KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES,
- KFD_PREEMPT_TYPE_FILTER_BY_PASID
+enum kfd_unmap_queues_filter {
+ KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
+ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
+ KFD_UNMAP_QUEUES_FILTER_BY_PASID
};
/**
@@ -404,7 +405,6 @@ struct scheduling_resources {
struct process_queue_manager {
/* data */
struct kfd_process *process;
- unsigned int num_concurrent_processes;
struct list_head queues;
unsigned long *queue_slot_bitmap;
};
@@ -420,6 +420,12 @@ struct qcm_process_device {
unsigned int queue_count;
unsigned int vmid;
bool is_debug;
+
+ /* This flag tells if we should reset all wavefronts on
+ * process termination
+ */
+ bool reset_wavefronts;
+
/*
* All the memory management data should be here too
*/
@@ -435,6 +441,13 @@ struct qcm_process_device {
uint32_t sh_hidden_private_base;
};
+
+enum kfd_pdd_bound {
+ PDD_UNBOUND = 0,
+ PDD_BOUND,
+ PDD_BOUND_SUSPENDED,
+};
+
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
@@ -446,6 +459,8 @@ struct kfd_process_device {
/* The device that owns this data. */
struct kfd_dev *dev;
+ /* The process that owns this kfd_process_device. */
+ struct kfd_process *process;
/* per-process-per device QCM data structure */
struct qcm_process_device qpd;
@@ -459,12 +474,14 @@ struct kfd_process_device {
uint64_t scratch_limit;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
- bool bound;
+ enum kfd_pdd_bound bound;
- /* This flag tells if we should reset all
- * wavefronts on process termination
+ /* Flag used to tell the pdd has dequeued from the dqm.
+ * This is used to prevent dev->dqm->ops.process_termination() from
+ * being called twice when it is already called in IOMMU callback
+ * function.
*/
- bool reset_wavefronts;
+ bool already_dequeued;
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -477,7 +494,12 @@ struct kfd_process {
*/
struct hlist_node kfd_processes;
- struct mm_struct *mm;
+ /*
+ * Opaque pointer to mm_struct. We don't hold a reference to
+ * it so it should never be dereferenced from here. This is
+ * only used for looking up processes by their mm.
+ */
+ void *mm;
struct mutex mutex;
@@ -485,6 +507,8 @@ struct kfd_process {
* In any process, the thread that started main() is the lead
* thread and outlives the rest.
* It is here because amd_iommu_bind_pasid wants a task_struct.
+ * It can also be used for safely getting a reference to the
+ * mm_struct of the process.
*/
struct task_struct *lead_thread;
@@ -495,6 +519,7 @@ struct kfd_process {
struct rcu_head rcu;
unsigned int pasid;
+ unsigned int doorbell_index;
/*
* List of kfd_process_device structures,
@@ -504,22 +529,16 @@ struct kfd_process {
struct process_queue_manager pqm;
- /* The process's queues. */
- size_t queue_array_size;
-
- /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
- struct kfd_queue **queues;
-
/*Is the user space process 32 bit?*/
bool is_32bit_user_mode;
/* Event-related data */
struct mutex event_mutex;
- /* All events in process hashed by ID, linked on kfd_event.events. */
- DECLARE_HASHTABLE(events, 4);
- /* struct slot_page_header.event_pages */
- struct list_head signal_event_pages;
- u32 next_nonsignal_event_id;
+ /* Event ID allocator and lookup */
+ struct idr event_idr;
+ /* Event page */
+ struct kfd_signal_page *signal_page;
+ size_t signal_mapped_size;
size_t signal_event_count;
bool signal_event_limit_reached;
};
@@ -549,8 +568,10 @@ struct kfd_process *kfd_get_process(const struct task_struct *);
struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
- struct kfd_process *p);
-void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
+ struct kfd_process *p);
+int kfd_bind_processes_to_device(struct kfd_dev *dev);
+void kfd_unbind_processes_from_device(struct kfd_dev *dev);
+void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
@@ -584,6 +605,10 @@ void write_kernel_doorbell(u32 __iomem *db, u32 value);
unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int queue_id);
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+ struct kfd_process *process);
+int kfd_alloc_process_doorbells(struct kfd_process *process);
+void kfd_free_process_doorbells(struct kfd_process *process);
/* GTT Sub-Allocator */
@@ -644,14 +669,14 @@ struct process_queue_node {
struct list_head process_queue_list;
};
+void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
+void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
void pqm_uninit(struct process_queue_manager *pqm);
int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int flags,
- enum kfd_queue_type type,
unsigned int *qid);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
@@ -661,15 +686,12 @@ struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
- unsigned long timeout);
+ unsigned int timeout_ms);
/* Packet Manager */
-#define KFD_HIQ_TIMEOUT (500)
-
#define KFD_FENCE_COMPLETED (100)
#define KFD_FENCE_INIT (10)
-#define KFD_UNMAP_LATENCY (150)
struct packet_manager {
struct device_queue_manager *dqm;
@@ -688,33 +710,25 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value);
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
- enum kfd_preempt_type_filter mode,
+ enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset,
unsigned int sdma_engine);
void pm_release_ib(struct packet_manager *pm);
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
- struct kfd_process *process);
/* Events */
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
extern const struct kfd_device_global_init_class device_global_init_class_cik;
-enum kfd_event_wait_result {
- KFD_WAIT_COMPLETE,
- KFD_WAIT_TIMEOUT,
- KFD_WAIT_ERROR
-};
-
void kfd_event_init_process(struct kfd_process *p);
void kfd_event_free_process(struct kfd_process *p);
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
bool all, uint32_t user_timeout_ms,
- enum kfd_event_wait_result *wait_result);
+ uint32_t *wait_result);
void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
uint32_t valid_id_bits);
void kfd_signal_iommu_event(struct kfd_dev *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index c74cf22a1ed9..1f5ccd28bd41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -35,13 +35,6 @@ struct mm_struct;
#include "kfd_dbgmgr.h"
/*
- * Initial size for the array of queues.
- * The allocated size is doubled each time
- * it is exceeded up to MAX_PROCESS_QUEUES.
- */
-#define INITIAL_QUEUE_ARRAY_SIZE 16
-
-/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
@@ -171,25 +164,22 @@ static void kfd_process_wq_release(struct work_struct *work)
pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
pdd->dev->id, p->pasid);
- if (pdd->reset_wavefronts)
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
+ if (pdd->bound == PDD_BOUND)
+ amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
- amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
list_del(&pdd->per_device_list);
-
kfree(pdd);
}
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
+ kfd_free_process_doorbells(p);
mutex_unlock(&p->mutex);
mutex_destroy(&p->mutex);
- kfree(p->queues);
-
kfree(p);
kfree(work);
@@ -201,7 +191,6 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process *p;
p = container_of(rcu, struct kfd_process, rcu);
- WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm);
@@ -235,24 +224,26 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
mutex_lock(&p->mutex);
- /* In case our notifier is called before IOMMU notifier */
- pqm_uninit(&p->pqm);
-
- /* Iterate over all process device data structure and check
- * if we should delete debug managers and reset all wavefronts
+ /* Iterate over all process device data structures and if the
+ * pdd is in debug mode, we should first force unregistration,
+ * then we will be able to destroy the queues
*/
list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
- if ((pdd->dev->dbgmgr) &&
- (pdd->dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
-
- if (pdd->reset_wavefronts) {
- pr_warn("Resetting all wave fronts\n");
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
+ struct kfd_dev *dev = pdd->dev;
+
+ mutex_lock(kfd_get_dbgmgr_mutex());
+ if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
+ if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
}
+ mutex_unlock(kfd_get_dbgmgr_mutex());
}
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
mutex_unlock(&p->mutex);
/*
@@ -279,15 +270,13 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (!process)
goto err_alloc_process;
- process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
- sizeof(process->queues[0]), GFP_KERNEL);
- if (!process->queues)
- goto err_alloc_queues;
-
process->pasid = kfd_pasid_alloc();
if (process->pasid == 0)
goto err_alloc_pasid;
+ if (kfd_alloc_process_doorbells(process) < 0)
+ goto err_alloc_doorbells;
+
mutex_init(&process->mutex);
process->mm = thread->mm;
@@ -303,8 +292,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
process->lead_thread = thread->group_leader;
- process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
-
INIT_LIST_HEAD(&process->per_device_data);
kfd_event_init_process(process);
@@ -329,10 +316,10 @@ err_process_pqm_init:
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
err_mmu_notifier:
mutex_destroy(&process->mutex);
+ kfd_free_process_doorbells(process);
+err_alloc_doorbells:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
- kfree(process->queues);
-err_alloc_queues:
kfree(process);
err_alloc_process:
return ERR_PTR(err);
@@ -345,9 +332,9 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if (pdd->dev == dev)
- break;
+ return pdd;
- return pdd;
+ return NULL;
}
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
@@ -361,7 +348,9 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
- pdd->reset_wavefronts = false;
+ pdd->process = p;
+ pdd->bound = PDD_UNBOUND;
+ pdd->already_dequeued = false;
list_add(&pdd->per_device_list, &p->per_device_data);
}
@@ -387,19 +376,87 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
- if (pdd->bound)
+ if (pdd->bound == PDD_BOUND) {
return pdd;
+ } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
+ pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
+ return ERR_PTR(-EINVAL);
+ }
err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
if (err < 0)
return ERR_PTR(err);
- pdd->bound = true;
+ pdd->bound = PDD_BOUND;
return pdd;
}
-void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
+/*
+ * Bind processes do the device that have been temporarily unbound
+ * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
+ */
+int kfd_bind_processes_to_device(struct kfd_dev *dev)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+ int err = 0;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(dev, p);
+ if (pdd->bound != PDD_BOUND_SUSPENDED) {
+ mutex_unlock(&p->mutex);
+ continue;
+ }
+
+ err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
+ p->lead_thread);
+ if (err < 0) {
+ pr_err("Unexpected pasid %d binding failure\n",
+ p->pasid);
+ mutex_unlock(&p->mutex);
+ break;
+ }
+
+ pdd->bound = PDD_BOUND;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return err;
+}
+
+/*
+ * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
+ * processes will be restored to PDD_BOUND state in
+ * kfd_bind_processes_to_device.
+ */
+void kfd_unbind_processes_from_device(struct kfd_dev *dev)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p;
+ unsigned int temp;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->mutex);
+ pdd = kfd_get_process_device_data(dev, p);
+
+ if (pdd->bound == PDD_BOUND)
+ pdd->bound = PDD_BOUND_SUSPENDED;
+ mutex_unlock(&p->mutex);
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
+
+void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
@@ -415,31 +472,23 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pr_debug("Unbinding process %d from IOMMU\n", pasid);
- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
- kfd_dbgmgr_destroy(dev->dbgmgr);
-
- pqm_uninit(&p->pqm);
-
- pdd = kfd_get_process_device_data(dev, p);
+ mutex_lock(kfd_get_dbgmgr_mutex());
- if (!pdd) {
- mutex_unlock(&p->mutex);
- return;
+ if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
+ if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
+ kfd_dbgmgr_destroy(dev->dbgmgr);
+ dev->dbgmgr = NULL;
+ }
}
- if (pdd->reset_wavefronts) {
- dbgdev_wave_reset_wavefronts(pdd->dev, p);
- pdd->reset_wavefronts = false;
- }
+ mutex_unlock(kfd_get_dbgmgr_mutex());
- /*
- * Just mark pdd as unbound, because we still need it
- * to call amd_iommu_unbind_pasid() in when the
- * process exits.
- * We don't call amd_iommu_unbind_pasid() here
- * because the IOMMU called us.
- */
- pdd->bound = false;
+ pdd = kfd_get_process_device_data(dev, p);
+ if (pdd)
+ /* For GPU relying on IOMMU, we need to dequeue here
+ * when PASID is still bound.
+ */
+ kfd_process_dequeue_from_device(pdd);
mutex_unlock(&p->mutex);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 03bec765b03d..2bec902fc939 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -63,6 +63,25 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
return 0;
}
+void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+
+ if (pdd->already_dequeued)
+ return;
+
+ dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+ pdd->already_dequeued = true;
+}
+
+void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ kfd_process_dequeue_from_device(pdd);
+}
+
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
INIT_LIST_HEAD(&pqm->queues);
@@ -78,21 +97,14 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
void pqm_uninit(struct process_queue_manager *pqm)
{
- int retval;
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
- retval = pqm_destroy_queue(
- pqm,
- (pqn->q != NULL) ?
- pqn->q->properties.queue_id :
- pqn->kq->queue->properties.queue_id);
-
- if (retval != 0) {
- pr_err("failed to destroy queue\n");
- return;
- }
+ uninit_queue(pqn->q);
+ list_del(&pqn->process_queue_list);
+ kfree(pqn);
}
+
kfree(pqm->queue_slot_bitmap);
pqm->queue_slot_bitmap = NULL;
}
@@ -130,20 +142,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int flags,
- enum kfd_queue_type type,
unsigned int *qid)
{
int retval;
struct kfd_process_device *pdd;
- struct queue_properties q_properties;
struct queue *q;
struct process_queue_node *pqn;
struct kernel_queue *kq;
- int num_queues = 0;
- struct queue *cur;
+ enum kfd_queue_type type = properties->type;
+ unsigned int max_queues = 127; /* HWS limit */
- memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
kq = NULL;
@@ -159,19 +167,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
* If we are just about to create DIQ, the is_debug flag is not set yet
* Hence we also check the type as well
*/
- if ((pdd->qpd.is_debug) ||
- (type == KFD_QUEUE_TYPE_DIQ)) {
- list_for_each_entry(cur, &pdd->qpd.queues_list, list)
- num_queues++;
- if (num_queues >= dev->device_info->max_no_of_hqd/2)
- return -ENOSPC;
- }
+ if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
+ max_queues = dev->device_info->max_no_of_hqd/2;
+
+ if (pdd->qpd.queue_count >= max_queues)
+ return -ENOSPC;
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
return retval;
- if (list_empty(&pqm->queues)) {
+ if (list_empty(&pdd->qpd.queues_list) &&
+ list_empty(&pdd->qpd.priv_queue_list)) {
pdd->qpd.pqm = pqm;
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
}
@@ -187,14 +194,14 @@ int pqm_create_queue(struct process_queue_manager *pqm,
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
- ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
+ ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid);
+ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -231,9 +238,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_add(&pqn->process_queue_list, &pqm->queues);
if (q) {
- *properties = q->properties;
pr_debug("PQM done creating queue\n");
- print_queue_properties(properties);
+ print_queue_properties(&q->properties);
}
return retval;
@@ -243,7 +249,8 @@ err_create_queue:
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
- if (list_empty(&pqm->queues))
+ if (list_empty(&pdd->qpd.queues_list) &&
+ list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
@@ -290,9 +297,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
- if (retval != 0)
- return retval;
-
uninit_queue(pqn->q);
}
@@ -300,7 +304,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
kfree(pqn);
clear_bit(qid, pqm->queue_slot_bitmap);
- if (list_empty(&pqm->queues))
+ if (list_empty(&pdd->qpd.queues_list) &&
+ list_empty(&pdd->qpd.priv_queue_list))
dqm->ops.unregister_process(dqm, &pdd->qpd);
return retval;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 70e8c20acb2f..b72f8a43d86b 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,34 +23,11 @@
#ifndef __AMD_SHARED_H__
#define __AMD_SHARED_H__
-#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
+#include <drm/amd_asic_type.h>
-/*
- * Supported ASIC types
- */
-enum amd_asic_type {
- CHIP_TAHITI = 0,
- CHIP_PITCAIRN,
- CHIP_VERDE,
- CHIP_OLAND,
- CHIP_HAINAN,
- CHIP_BONAIRE,
- CHIP_KAVERI,
- CHIP_KABINI,
- CHIP_HAWAII,
- CHIP_MULLINS,
- CHIP_TOPAZ,
- CHIP_TONGA,
- CHIP_FIJI,
- CHIP_CARRIZO,
- CHIP_STONEY,
- CHIP_POLARIS10,
- CHIP_POLARIS11,
- CHIP_POLARIS12,
- CHIP_VEGA10,
- CHIP_RAVEN,
- CHIP_LAST,
-};
+struct seq_file;
+
+#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
/*
* Chip flags
@@ -144,6 +121,12 @@ enum amd_fan_ctrl_mode {
AMD_FAN_CTRL_AUTO = 2,
};
+enum pp_clock_type {
+ PP_SCLK,
+ PP_MCLK,
+ PP_PCIE,
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
@@ -249,4 +232,96 @@ struct amd_ip_funcs {
void (*get_clockgating_state)(void *handle, u32 *flags);
};
+
+enum amd_pp_task;
+enum amd_pp_clock_type;
+struct pp_states_info;
+struct amd_pp_simple_clock_info;
+struct amd_pp_display_configuration;
+struct amd_pp_clock_info;
+struct pp_display_clock_request;
+struct pp_wm_sets_with_clock_ranges_soc15;
+struct pp_clock_levels_with_voltage;
+struct pp_clock_levels_with_latency;
+struct amd_pp_clocks;
+
+struct amd_pm_funcs {
+/* export for dpm on ci and si */
+ int (*pre_set_power_state)(void *handle);
+ int (*set_power_state)(void *handle);
+ void (*post_set_power_state)(void *handle);
+ void (*display_configuration_changed)(void *handle);
+ void (*print_power_state)(void *handle, void *ps);
+ bool (*vblank_too_short)(void *handle);
+ void (*enable_bapm)(void *handle, bool enable);
+ int (*check_state_equal)(void *handle,
+ void *cps,
+ void *rps,
+ bool *equal);
+/* export for sysfs */
+ int (*get_temperature)(void *handle);
+ void (*set_fan_control_mode)(void *handle, u32 mode);
+ u32 (*get_fan_control_mode)(void *handle);
+ int (*set_fan_speed_percent)(void *handle, u32 speed);
+ int (*get_fan_speed_percent)(void *handle, u32 *speed);
+ int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+ int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
+ int (*get_sclk_od)(void *handle);
+ int (*set_sclk_od)(void *handle, uint32_t value);
+ int (*get_mclk_od)(void *handle);
+ int (*set_mclk_od)(void *handle, uint32_t value);
+ int (*read_sensor)(void *handle, int idx, void *value, int *size);
+ enum amd_dpm_forced_level (*get_performance_level)(void *handle);
+ enum amd_pm_state_type (*get_current_power_state)(void *handle);
+ int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
+ int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
+ int (*get_pp_table)(void *handle, char **table);
+ int (*set_pp_table)(void *handle, const char *buf, size_t size);
+ void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
+
+ int (*reset_power_profile_state)(void *handle,
+ struct amd_pp_profile *request);
+ int (*get_power_profile_state)(void *handle,
+ struct amd_pp_profile *query);
+ int (*set_power_profile_state)(void *handle,
+ struct amd_pp_profile *request);
+ int (*switch_power_profile)(void *handle,
+ enum amd_pp_profile_type type);
+/* export to amdgpu */
+ void (*powergate_uvd)(void *handle, bool gate);
+ void (*powergate_vce)(void *handle, bool gate);
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx);
+ int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
+ void *input, void *output);
+ int (*load_firmware)(void *handle);
+ int (*wait_for_fw_loading_complete)(void *handle);
+ int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
+/* export to DC */
+ u32 (*get_sclk)(void *handle, bool low);
+ u32 (*get_mclk)(void *handle, bool low);
+ int (*display_configuration_change)(void *handle,
+ const struct amd_pp_display_configuration *input);
+ int (*get_display_power_level)(void *handle,
+ struct amd_pp_simple_clock_info *output);
+ int (*get_current_clocks)(void *handle,
+ struct amd_pp_clock_info *clocks);
+ int (*get_clock_by_type)(void *handle,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+ int (*get_clock_by_type_with_latency)(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks);
+ int (*get_clock_by_type_with_voltage)(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+ int (*set_watermarks_for_clocks_ranges)(void *handle,
+ struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ int (*display_clock_voltage_request)(void *handle,
+ struct pp_display_clock_request *clock);
+ int (*get_display_mode_validation_clocks)(void *handle,
+ struct amd_pp_simple_clock_info *clocks);
+};
+
+
#endif /* __AMD_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 34c6ff52710e..6af9f0217b34 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -5454,5 +5454,7 @@
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
+#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en_MASK 0x1
+#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en__SHIFT 0
#endif /* SMU_7_0_1_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index 378f4b6b43da..344237256d02 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -36,6 +36,16 @@
#define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5
#define mmUVD_POWER_STATUS_U 0x3bfd
#define mmUVD_NO_OP 0x3bff
+#define mmUVD_RB_BASE_LO2 0x3c21
+#define mmUVD_RB_BASE_HI2 0x3c22
+#define mmUVD_RB_SIZE2 0x3c23
+#define mmUVD_RB_RPTR2 0x3c24
+#define mmUVD_RB_WPTR2 0x3c25
+#define mmUVD_RB_BASE_LO 0x3c26
+#define mmUVD_RB_BASE_HI 0x3c27
+#define mmUVD_RB_SIZE 0x3c28
+#define mmUVD_RB_RPTR 0x3c29
+#define mmUVD_RB_WPTR 0x3c2a
#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69
#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67
@@ -43,6 +53,11 @@
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x3c5f
#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x3c5e
#define mmUVD_SEMA_CNTL 0x3d00
+#define mmUVD_RB_WPTR3 0x3d1c
+#define mmUVD_RB_RPTR3 0x3d1b
+#define mmUVD_RB_BASE_LO3 0x3d1d
+#define mmUVD_RB_BASE_HI3 0x3d1e
+#define mmUVD_RB_SIZE3 0x3d1f
#define mmUVD_LMI_EXT40_ADDR 0x3d26
#define mmUVD_CTX_INDEX 0x3d28
#define mmUVD_CTX_DATA 0x3d29
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 181a2c3c6362..f696bbb643ef 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -4292,6 +4292,7 @@ typedef struct _ATOM_DPCD_INFO
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
+#define ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION 0x2
/***********************************************************************************/
// Structure used in VRAM_UsageByFirmwareTable
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 837296db9628..7c92f4707085 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1017,6 +1017,19 @@ struct atom_14nm_combphy_tmds_vs_set
uint8_t margin_deemph_lane0__deemph_sel_val;
};
+struct atom_i2c_reg_info {
+ uint8_t ucI2cRegIndex;
+ uint8_t ucI2cRegVal;
+};
+
+struct atom_hdmi_retimer_redriver_set {
+ uint8_t HdmiSlvAddr;
+ uint8_t HdmiRegNum;
+ uint8_t Hdmi6GRegNum;
+ struct atom_i2c_reg_info HdmiRegSetting[9]; //For non 6G Hz use
+ struct atom_i2c_reg_info Hdmi6GhzRegSetting[3]; //For 6G Hz use.
+};
+
struct atom_integrated_system_info_v1_11
{
struct atom_common_table_header table_header;
@@ -1052,7 +1065,11 @@ struct atom_integrated_system_info_v1_11
struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
struct atom_camera_data camera_info;
- uint32_t reserved[138];
+ struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
+ struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
+ struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
+ struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
+ uint32_t reserved[108];
};
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 0214f63f52fc..675988d56392 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -100,6 +100,7 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_GFX_SE_INFO,
CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
+ CGS_SYSTEM_INFO_PCIE_BUS_DEVFN,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
@@ -193,8 +194,6 @@ struct cgs_acpi_method_info {
* @type: memory type
* @size: size in bytes
* @align: alignment in bytes
- * @min_offset: minimum offset from start of heap
- * @max_offset: maximum offset from start of heap
* @handle: memory handle (output)
*
* The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
@@ -216,7 +215,6 @@ struct cgs_acpi_method_info {
*/
typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
- uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle);
/**
@@ -310,6 +308,22 @@ typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum
typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index, uint32_t value);
+#define CGS_REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+#define CGS_REG_FIELD_MASK(reg, field) reg##__##field##_MASK
+
+#define CGS_REG_SET_FIELD(orig_val, reg, field, field_val) \
+ (((orig_val) & ~CGS_REG_FIELD_MASK(reg, field)) | \
+ (CGS_REG_FIELD_MASK(reg, field) & ((field_val) << CGS_REG_FIELD_SHIFT(reg, field))))
+
+#define CGS_REG_GET_FIELD(value, reg, field) \
+ (((value) & CGS_REG_FIELD_MASK(reg, field)) >> CGS_REG_FIELD_SHIFT(reg, field))
+
+#define CGS_WREG32_FIELD(device, reg, field, val) \
+ cgs_write_register(device, mm##reg, (cgs_read_register(device, mm##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
+
+#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \
+ cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
+
/**
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
* @cgs_device: opaque device handle
@@ -409,6 +423,10 @@ typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
+struct amd_pp_init;
+typedef void* (*cgs_register_pp_handle)(struct cgs_device *cgs_device,
+ int (*call_back_func)(struct amd_pp_init *, void **));
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_alloc_gpu_mem_t alloc_gpu_mem;
@@ -445,6 +463,7 @@ struct cgs_ops {
cgs_is_virtualization_enabled_t is_virtualization_enabled;
cgs_enter_safe_mode enter_safe_mode;
cgs_lock_grbm_idx lock_grbm_idx;
+ cgs_register_pp_handle register_pp_handle;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -463,8 +482,8 @@ struct cgs_device
#define CGS_OS_CALL(func,dev,...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
-#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
- CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
+#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \
+ CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle)
#define cgs_free_gpu_mem(dev,handle) \
CGS_CALL(free_gpu_mem,dev,handle)
#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
@@ -523,4 +542,7 @@ struct cgs_device
#define cgs_lock_grbm_idx(cgs_device, lock) \
CGS_CALL(lock_grbm_idx, cgs_device, lock)
+#define cgs_register_pp_handle(cgs_device, call_back_func) \
+ CGS_CALL(register_pp_handle, cgs_device, call_back_func)
+
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 94277cb734d2..f516fd10e6ba 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -112,6 +112,9 @@ struct tile_config {
*
* @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
*
+ * @alloc_pasid: Allocate a PASID
+ * @free_pasid: Free a PASID
+ *
* @program_sh_mem_settings: A function that should initiate the memory
* properties such as main aperture memory type (cache / non cached) and
* secondary aperture base address, size and memory type.
@@ -160,6 +163,9 @@ struct kfd2kgd_calls {
uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
+ int (*alloc_pasid)(unsigned int bits);
+ void (*free_pasid)(unsigned int pasid);
+
/* Register access functions */
void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
diff --git a/drivers/gpu/drm/amd/include/linux/chash.h b/drivers/gpu/drm/amd/include/linux/chash.h
new file mode 100644
index 000000000000..6dc159924ed1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/linux/chash.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _LINUX_CHASH_H
+#define _LINUX_CHASH_H
+
+#include <linux/types.h>
+#include <linux/hash.h>
+#include <linux/bug.h>
+#include <asm/bitsperlong.h>
+
+#if BITS_PER_LONG == 32
+# define _CHASH_LONG_SHIFT 5
+#elif BITS_PER_LONG == 64
+# define _CHASH_LONG_SHIFT 6
+#else
+# error "Unexpected BITS_PER_LONG"
+#endif
+
+struct __chash_table {
+ u8 bits;
+ u8 key_size;
+ unsigned int value_size;
+ u32 size_mask;
+ unsigned long *occup_bitmap, *valid_bitmap;
+ union {
+ u32 *keys32;
+ u64 *keys64;
+ };
+ u8 *values;
+
+#ifdef CONFIG_CHASH_STATS
+ u64 hits, hits_steps, hits_time_ns;
+ u64 miss, miss_steps, miss_time_ns;
+ u64 relocs, reloc_dist;
+#endif
+};
+
+#define __CHASH_BITMAP_SIZE(bits) \
+ (((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define __CHASH_ARRAY_SIZE(bits, size) \
+ ((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))
+
+#define __CHASH_DATA_SIZE(bits, key_size, value_size) \
+ (__CHASH_BITMAP_SIZE(bits) * 2 + \
+ __CHASH_ARRAY_SIZE(bits, key_size) + \
+ __CHASH_ARRAY_SIZE(bits, value_size))
+
+#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \
+ struct { \
+ struct __chash_table table; \
+ unsigned long data \
+ [__CHASH_DATA_SIZE(bits, key_size, value_size)];\
+ }
+
+/**
+ * struct chash_table - Dynamically allocated closed hash table
+ *
+ * Use this struct for dynamically allocated hash tables (using
+ * chash_table_alloc and chash_table_free), where the size is
+ * determined at runtime.
+ */
+struct chash_table {
+ struct __chash_table table;
+ unsigned long *data;
+};
+
+/**
+ * DECLARE_CHASH_TABLE - macro to declare a closed hash table
+ * @table: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ *
+ * This declares the hash table variable with a static size.
+ *
+ * The closed hash table stores key-value pairs with low memory and
+ * lookup overhead. In operation it performs no dynamic memory
+ * management. The data being stored does not require any
+ * list_heads. The hash table performs best with small @val_sz and as
+ * long as some space (about 50%) is left free in the table. But the
+ * table can still work reasonably efficiently even when filled up to
+ * about 90%. If bigger data items need to be stored and looked up,
+ * store the pointer to it as value in the hash table.
+ *
+ * @val_sz may be 0. This can be useful when all the stored
+ * information is contained in the key itself and the fact that it is
+ * in the hash table (or not).
+ */
+#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \
+ STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table
+
+#ifdef CONFIG_CHASH_STATS
+#define __CHASH_STATS_INIT(prefix), \
+ prefix.hits = 0, \
+ prefix.hits_steps = 0, \
+ prefix.hits_time_ns = 0, \
+ prefix.miss = 0, \
+ prefix.miss_steps = 0, \
+ prefix.miss_time_ns = 0, \
+ prefix.relocs = 0, \
+ prefix.reloc_dist = 0
+#else
+#define __CHASH_STATS_INIT(prefix)
+#endif
+
+#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \
+ prefix.bits = (bts), \
+ prefix.key_size = (key_sz), \
+ prefix.value_size = (val_sz), \
+ prefix.size_mask = ((1 << bts) - 1), \
+ prefix.occup_bitmap = &data[0], \
+ prefix.valid_bitmap = &data \
+ [__CHASH_BITMAP_SIZE(bts)], \
+ prefix.keys64 = (u64 *)&data \
+ [__CHASH_BITMAP_SIZE(bts) * 2], \
+ prefix.values = (u8 *)&data \
+ [__CHASH_BITMAP_SIZE(bts) * 2 + \
+ __CHASH_ARRAY_SIZE(bts, key_sz)] \
+ __CHASH_STATS_INIT(prefix)
+
+/**
+ * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
+ * @tbl: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ *
+ * Note: the macro can be used for global and local hash table variables.
+ */
+#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
+ DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \
+ .table = { \
+ __CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
+ }, \
+ .data = {0} \
+ }
+
+/**
+ * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
+ * @tbl: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ */
+#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
+ __CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)
+
+int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
+ unsigned int value_size, gfp_t gfp_mask);
+void chash_table_free(struct chash_table *table);
+
+/**
+ * chash_table_dump_stats - Dump statistics of a closed hash table
+ * @tbl: Pointer to the table structure
+ *
+ * Dumps some performance statistics of the table gathered in operation
+ * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
+ * user must turn on messages for chash.c (file chash.c +p).
+ */
+#ifdef CONFIG_CHASH_STATS
+#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)
+
+void __chash_table_dump_stats(struct __chash_table *table);
+#else
+#define chash_table_dump_stats(tbl)
+#endif
+
+/**
+ * chash_table_reset_stats - Reset statistics of a closed hash table
+ * @tbl: Pointer to the table structure
+ */
+#ifdef CONFIG_CHASH_STATS
+#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)
+
+static inline void __chash_table_reset_stats(struct __chash_table *table)
+{
+ (void)table __CHASH_STATS_INIT((*table));
+}
+#else
+#define chash_table_reset_stats(tbl)
+#endif
+
+/**
+ * chash_table_copy_in - Copy a new value into the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to add or update
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @key already has an entry, its value is replaced. Otherwise a
+ * new entry is added. If @value is NULL, the value is left unchanged
+ * or uninitialized. Returns 1 if an entry already existed, 0 if a new
+ * entry was added or %-ENOMEM if there was no free space in the
+ * table.
+ */
+#define chash_table_copy_in(tbl, key, value) \
+ __chash_table_copy_in(&(*tbl).table, key, value)
+
+int __chash_table_copy_in(struct __chash_table *table, u64 key,
+ const void *value);
+
+/**
+ * chash_table_copy_out - Copy a value out of the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to find
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @value is not NULL and the table has a non-0 value_size, the
+ * value at @key is copied to @value. Returns the slot index of the
+ * entry or %-EINVAL if @key was not found.
+ */
+#define chash_table_copy_out(tbl, key, value) \
+ __chash_table_copy_out(&(*tbl).table, key, value, false)
+
+int __chash_table_copy_out(struct __chash_table *table, u64 key,
+ void *value, bool remove);
+
+/**
+ * chash_table_remove - Remove an entry from the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to find
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @value is not NULL and the table has a non-0 value_size, the
+ * value at @key is copied to @value. The entry is removed from the
+ * table. Returns the slot index of the removed entry or %-EINVAL if
+ * @key was not found.
+ */
+#define chash_table_remove(tbl, key, value) \
+ __chash_table_copy_out(&(*tbl).table, key, value, true)
+
+/*
+ * Low level iterator API used internally by the above functions.
+ */
+struct chash_iter {
+ struct __chash_table *table;
+ unsigned long mask;
+ int slot;
+};
+
+/**
+ * CHASH_ITER_INIT - Initialize a hash table iterator
+ * @tbl: Pointer to hash table to iterate over
+ * @s: Initial slot number
+ */
+#define CHASH_ITER_INIT(table, s) { \
+ table, \
+ 1UL << ((s) & (BITS_PER_LONG - 1)), \
+ s \
+ }
+/**
+ * CHASH_ITER_SET - Set hash table iterator to new slot
+ * @iter: Iterator
+ * @s: Slot number
+ */
+#define CHASH_ITER_SET(iter, s) \
+ (iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \
+ (iter).slot = (s)
+/**
+ * CHASH_ITER_INC - Increment hash table iterator
+ * @table: Hash table to iterate over
+ *
+ * Wraps around at the end.
+ */
+#define CHASH_ITER_INC(iter) do { \
+ (iter).mask = (iter).mask << 1 | \
+ (iter).mask >> (BITS_PER_LONG - 1); \
+ (iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
+ } while (0)
+
+static inline bool chash_iter_is_valid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
+ iter.mask);
+}
+static inline bool chash_iter_is_empty(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
+ iter.mask);
+}
+
+static inline void chash_iter_set_valid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
+ iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
+}
+static inline void chash_iter_set_invalid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
+}
+static inline void chash_iter_set_empty(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
+}
+
+static inline u32 chash_iter_key32(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 4);
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->keys32[iter.slot];
+}
+static inline u64 chash_iter_key64(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 8);
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->keys64[iter.slot];
+}
+static inline u64 chash_iter_key(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return (iter.table->key_size == 4) ?
+ iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
+}
+
+static inline u32 chash_iter_hash32(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 4);
+ return hash_32(chash_iter_key32(iter), iter.table->bits);
+}
+
+static inline u32 chash_iter_hash64(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 8);
+ return hash_64(chash_iter_key64(iter), iter.table->bits);
+}
+
+static inline u32 chash_iter_hash(const struct chash_iter iter)
+{
+ return (iter.table->key_size == 4) ?
+ hash_32(chash_iter_key32(iter), iter.table->bits) :
+ hash_64(chash_iter_key64(iter), iter.table->bits);
+}
+
+static inline void *chash_iter_value(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->values +
+ ((unsigned long)iter.slot * iter.table->value_size);
+}
+
+#endif /* _LINUX_CHASH_H */
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index 9a9e6c7e89ea..2fb25abaf7c8 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -284,8 +284,8 @@ struct v9_mqd {
uint32_t gds_save_mask_hi;
uint32_t ctx_save_base_addr_lo;
uint32_t ctx_save_base_addr_hi;
- uint32_t reserved_126;
- uint32_t reserved_127;
+ uint32_t dynamic_cu_mask_addr_lo;
+ uint32_t dynamic_cu_mask_addr_hi;
uint32_t cp_mqd_base_addr_lo;
uint32_t cp_mqd_base_addr_hi;
uint32_t cp_hqd_active;
@@ -672,6 +672,14 @@ struct v9_mqd {
uint32_t reserved_511;
};
+struct v9_mqd_allocation {
+ struct v9_mqd mqd;
+ uint32_t wptr_poll_mem;
+ uint32_t rptr_report_mem;
+ uint32_t dynamic_cu_mask;
+ uint32_t dynamic_rb_mask;
+};
+
/* from vega10 all CSA format is shifted to chain ib compatible mode */
struct v9_ce_ib_state {
/* section of non chained ib part */
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index 3e606a761d0e..20234820194b 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -423,265 +423,6 @@ struct vi_mqd_allocation {
uint32_t dynamic_rb_mask;
};
-struct cz_mqd {
- uint32_t header;
- uint32_t compute_dispatch_initiator;
- uint32_t compute_dim_x;
- uint32_t compute_dim_y;
- uint32_t compute_dim_z;
- uint32_t compute_start_x;
- uint32_t compute_start_y;
- uint32_t compute_start_z;
- uint32_t compute_num_thread_x;
- uint32_t compute_num_thread_y;
- uint32_t compute_num_thread_z;
- uint32_t compute_pipelinestat_enable;
- uint32_t compute_perfcount_enable;
- uint32_t compute_pgm_lo;
- uint32_t compute_pgm_hi;
- uint32_t compute_tba_lo;
- uint32_t compute_tba_hi;
- uint32_t compute_tma_lo;
- uint32_t compute_tma_hi;
- uint32_t compute_pgm_rsrc1;
- uint32_t compute_pgm_rsrc2;
- uint32_t compute_vmid;
- uint32_t compute_resource_limits;
- uint32_t compute_static_thread_mgmt_se0;
- uint32_t compute_static_thread_mgmt_se1;
- uint32_t compute_tmpring_size;
- uint32_t compute_static_thread_mgmt_se2;
- uint32_t compute_static_thread_mgmt_se3;
- uint32_t compute_restart_x;
- uint32_t compute_restart_y;
- uint32_t compute_restart_z;
- uint32_t compute_thread_trace_enable;
- uint32_t compute_misc_reserved;
- uint32_t compute_dispatch_id;
- uint32_t compute_threadgroup_id;
- uint32_t compute_relaunch;
- uint32_t compute_wave_restore_addr_lo;
- uint32_t compute_wave_restore_addr_hi;
- uint32_t compute_wave_restore_control;
- uint32_t reserved_39;
- uint32_t reserved_40;
- uint32_t reserved_41;
- uint32_t reserved_42;
- uint32_t reserved_43;
- uint32_t reserved_44;
- uint32_t reserved_45;
- uint32_t reserved_46;
- uint32_t reserved_47;
- uint32_t reserved_48;
- uint32_t reserved_49;
- uint32_t reserved_50;
- uint32_t reserved_51;
- uint32_t reserved_52;
- uint32_t reserved_53;
- uint32_t reserved_54;
- uint32_t reserved_55;
- uint32_t reserved_56;
- uint32_t reserved_57;
- uint32_t reserved_58;
- uint32_t reserved_59;
- uint32_t reserved_60;
- uint32_t reserved_61;
- uint32_t reserved_62;
- uint32_t reserved_63;
- uint32_t reserved_64;
- uint32_t compute_user_data_0;
- uint32_t compute_user_data_1;
- uint32_t compute_user_data_2;
- uint32_t compute_user_data_3;
- uint32_t compute_user_data_4;
- uint32_t compute_user_data_5;
- uint32_t compute_user_data_6;
- uint32_t compute_user_data_7;
- uint32_t compute_user_data_8;
- uint32_t compute_user_data_9;
- uint32_t compute_user_data_10;
- uint32_t compute_user_data_11;
- uint32_t compute_user_data_12;
- uint32_t compute_user_data_13;
- uint32_t compute_user_data_14;
- uint32_t compute_user_data_15;
- uint32_t cp_compute_csinvoc_count_lo;
- uint32_t cp_compute_csinvoc_count_hi;
- uint32_t reserved_83;
- uint32_t reserved_84;
- uint32_t reserved_85;
- uint32_t cp_mqd_query_time_lo;
- uint32_t cp_mqd_query_time_hi;
- uint32_t cp_mqd_connect_start_time_lo;
- uint32_t cp_mqd_connect_start_time_hi;
- uint32_t cp_mqd_connect_end_time_lo;
- uint32_t cp_mqd_connect_end_time_hi;
- uint32_t cp_mqd_connect_end_wf_count;
- uint32_t cp_mqd_connect_end_pq_rptr;
- uint32_t cp_mqd_connect_end_pq_wptr;
- uint32_t cp_mqd_connect_end_ib_rptr;
- uint32_t reserved_96;
- uint32_t reserved_97;
- uint32_t cp_mqd_save_start_time_lo;
- uint32_t cp_mqd_save_start_time_hi;
- uint32_t cp_mqd_save_end_time_lo;
- uint32_t cp_mqd_save_end_time_hi;
- uint32_t cp_mqd_restore_start_time_lo;
- uint32_t cp_mqd_restore_start_time_hi;
- uint32_t cp_mqd_restore_end_time_lo;
- uint32_t cp_mqd_restore_end_time_hi;
- uint32_t reserved_106;
- uint32_t reserved_107;
- uint32_t gds_cs_ctxsw_cnt0;
- uint32_t gds_cs_ctxsw_cnt1;
- uint32_t gds_cs_ctxsw_cnt2;
- uint32_t gds_cs_ctxsw_cnt3;
- uint32_t reserved_112;
- uint32_t reserved_113;
- uint32_t cp_pq_exe_status_lo;
- uint32_t cp_pq_exe_status_hi;
- uint32_t cp_packet_id_lo;
- uint32_t cp_packet_id_hi;
- uint32_t cp_packet_exe_status_lo;
- uint32_t cp_packet_exe_status_hi;
- uint32_t gds_save_base_addr_lo;
- uint32_t gds_save_base_addr_hi;
- uint32_t gds_save_mask_lo;
- uint32_t gds_save_mask_hi;
- uint32_t ctx_save_base_addr_lo;
- uint32_t ctx_save_base_addr_hi;
- uint32_t reserved_126;
- uint32_t reserved_127;
- uint32_t cp_mqd_base_addr_lo;
- uint32_t cp_mqd_base_addr_hi;
- uint32_t cp_hqd_active;
- uint32_t cp_hqd_vmid;
- uint32_t cp_hqd_persistent_state;
- uint32_t cp_hqd_pipe_priority;
- uint32_t cp_hqd_queue_priority;
- uint32_t cp_hqd_quantum;
- uint32_t cp_hqd_pq_base_lo;
- uint32_t cp_hqd_pq_base_hi;
- uint32_t cp_hqd_pq_rptr;
- uint32_t cp_hqd_pq_rptr_report_addr_lo;
- uint32_t cp_hqd_pq_rptr_report_addr_hi;
- uint32_t cp_hqd_pq_wptr_poll_addr_lo;
- uint32_t cp_hqd_pq_wptr_poll_addr_hi;
- uint32_t cp_hqd_pq_doorbell_control;
- uint32_t cp_hqd_pq_wptr;
- uint32_t cp_hqd_pq_control;
- uint32_t cp_hqd_ib_base_addr_lo;
- uint32_t cp_hqd_ib_base_addr_hi;
- uint32_t cp_hqd_ib_rptr;
- uint32_t cp_hqd_ib_control;
- uint32_t cp_hqd_iq_timer;
- uint32_t cp_hqd_iq_rptr;
- uint32_t cp_hqd_dequeue_request;
- uint32_t cp_hqd_dma_offload;
- uint32_t cp_hqd_sema_cmd;
- uint32_t cp_hqd_msg_type;
- uint32_t cp_hqd_atomic0_preop_lo;
- uint32_t cp_hqd_atomic0_preop_hi;
- uint32_t cp_hqd_atomic1_preop_lo;
- uint32_t cp_hqd_atomic1_preop_hi;
- uint32_t cp_hqd_hq_status0;
- uint32_t cp_hqd_hq_control0;
- uint32_t cp_mqd_control;
- uint32_t cp_hqd_hq_status1;
- uint32_t cp_hqd_hq_control1;
- uint32_t cp_hqd_eop_base_addr_lo;
- uint32_t cp_hqd_eop_base_addr_hi;
- uint32_t cp_hqd_eop_control;
- uint32_t cp_hqd_eop_rptr;
- uint32_t cp_hqd_eop_wptr;
- uint32_t cp_hqd_eop_done_events;
- uint32_t cp_hqd_ctx_save_base_addr_lo;
- uint32_t cp_hqd_ctx_save_base_addr_hi;
- uint32_t cp_hqd_ctx_save_control;
- uint32_t cp_hqd_cntl_stack_offset;
- uint32_t cp_hqd_cntl_stack_size;
- uint32_t cp_hqd_wg_state_offset;
- uint32_t cp_hqd_ctx_save_size;
- uint32_t cp_hqd_gds_resource_state;
- uint32_t cp_hqd_error;
- uint32_t cp_hqd_eop_wptr_mem;
- uint32_t cp_hqd_eop_dones;
- uint32_t reserved_182;
- uint32_t reserved_183;
- uint32_t reserved_184;
- uint32_t reserved_185;
- uint32_t reserved_186;
- uint32_t reserved_187;
- uint32_t reserved_188;
- uint32_t reserved_189;
- uint32_t reserved_190;
- uint32_t reserved_191;
- uint32_t iqtimer_pkt_header;
- uint32_t iqtimer_pkt_dw0;
- uint32_t iqtimer_pkt_dw1;
- uint32_t iqtimer_pkt_dw2;
- uint32_t iqtimer_pkt_dw3;
- uint32_t iqtimer_pkt_dw4;
- uint32_t iqtimer_pkt_dw5;
- uint32_t iqtimer_pkt_dw6;
- uint32_t iqtimer_pkt_dw7;
- uint32_t iqtimer_pkt_dw8;
- uint32_t iqtimer_pkt_dw9;
- uint32_t iqtimer_pkt_dw10;
- uint32_t iqtimer_pkt_dw11;
- uint32_t iqtimer_pkt_dw12;
- uint32_t iqtimer_pkt_dw13;
- uint32_t iqtimer_pkt_dw14;
- uint32_t iqtimer_pkt_dw15;
- uint32_t iqtimer_pkt_dw16;
- uint32_t iqtimer_pkt_dw17;
- uint32_t iqtimer_pkt_dw18;
- uint32_t iqtimer_pkt_dw19;
- uint32_t iqtimer_pkt_dw20;
- uint32_t iqtimer_pkt_dw21;
- uint32_t iqtimer_pkt_dw22;
- uint32_t iqtimer_pkt_dw23;
- uint32_t iqtimer_pkt_dw24;
- uint32_t iqtimer_pkt_dw25;
- uint32_t iqtimer_pkt_dw26;
- uint32_t iqtimer_pkt_dw27;
- uint32_t iqtimer_pkt_dw28;
- uint32_t iqtimer_pkt_dw29;
- uint32_t iqtimer_pkt_dw30;
- uint32_t iqtimer_pkt_dw31;
- uint32_t reserved_225;
- uint32_t reserved_226;
- uint32_t reserved_227;
- uint32_t set_resources_header;
- uint32_t set_resources_dw1;
- uint32_t set_resources_dw2;
- uint32_t set_resources_dw3;
- uint32_t set_resources_dw4;
- uint32_t set_resources_dw5;
- uint32_t set_resources_dw6;
- uint32_t set_resources_dw7;
- uint32_t reserved_236;
- uint32_t reserved_237;
- uint32_t reserved_238;
- uint32_t reserved_239;
- uint32_t queue_doorbell_id0;
- uint32_t queue_doorbell_id1;
- uint32_t queue_doorbell_id2;
- uint32_t queue_doorbell_id3;
- uint32_t queue_doorbell_id4;
- uint32_t queue_doorbell_id5;
- uint32_t queue_doorbell_id6;
- uint32_t queue_doorbell_id7;
- uint32_t queue_doorbell_id8;
- uint32_t queue_doorbell_id9;
- uint32_t queue_doorbell_id10;
- uint32_t queue_doorbell_id11;
- uint32_t queue_doorbell_id12;
- uint32_t queue_doorbell_id13;
- uint32_t queue_doorbell_id14;
- uint32_t queue_doorbell_id15;
-};
-
struct vi_ce_ib_state {
uint32_t ce_ib_completion_status;
uint32_t ce_constegnine_count;
diff --git a/drivers/gpu/drm/amd/lib/Kconfig b/drivers/gpu/drm/amd/lib/Kconfig
new file mode 100644
index 000000000000..776ef3434c10
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/Kconfig
@@ -0,0 +1,28 @@
+menu "AMD Library routines"
+
+#
+# Closed hash table
+#
+config CHASH
+ tristate
+ default DRM_AMDGPU
+ help
+ Statically sized closed hash table implementation with low
+ memory and CPU overhead.
+
+config CHASH_STATS
+ bool "Closed hash table performance statistics"
+ depends on CHASH
+ default n
+ help
+ Enable collection of performance statistics for closed hash tables.
+
+config CHASH_SELFTEST
+ bool "Closed hash table self test"
+ depends on CHASH
+ default n
+ help
+ Runs a selftest during module load. Several module parameters
+ are available to modify the behaviour of the test.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
new file mode 100644
index 000000000000..87cd7009e80f
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for AMD library routines, which are used by AMD driver
+# components.
+#
+# This is for common library routines that can be shared between AMD
+# driver components or later moved to kernel/lib for sharing with
+# other drivers.
+
+ccflags-y := -I$(src)/../include
+
+obj-$(CONFIG_CHASH) += chash.o
diff --git a/drivers/gpu/drm/amd/lib/chash.c b/drivers/gpu/drm/amd/lib/chash.c
new file mode 100644
index 000000000000..b8e45f356a1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/chash.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/hash.h>
+#include <linux/bug.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sched/clock.h>
+#include <asm/div64.h>
+#include <linux/chash.h>
+
+/**
+ * chash_table_alloc - Allocate closed hash table
+ * @table: Pointer to the table structure
+ * @bits: Table size will be 2^bits entries
+ * @key_size: Size of hash keys in bytes, 4 or 8
+ * @value_size: Size of data values in bytes, can be 0
+ */
+int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
+ unsigned int value_size, gfp_t gfp_mask)
+{
+ if (bits > 31)
+ return -EINVAL;
+
+ if (key_size != 4 && key_size != 8)
+ return -EINVAL;
+
+ table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size),
+ sizeof(long), gfp_mask);
+ if (!table->data)
+ return -ENOMEM;
+
+ __CHASH_TABLE_INIT(table->table, table->data,
+ bits, key_size, value_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(chash_table_alloc);
+
+/**
+ * chash_table_free - Free closed hash table
+ * @table: Pointer to the table structure
+ */
+void chash_table_free(struct chash_table *table)
+{
+ kfree(table->data);
+}
+EXPORT_SYMBOL(chash_table_free);
+
+#ifdef CONFIG_CHASH_STATS
+
+#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \
+ u64 __nom = (nom); \
+ u64 __denom = (denom); \
+ u64 __quot, __frac; \
+ u32 __rem; \
+ \
+ while (__denom >> 32) { \
+ __nom >>= 1; \
+ __denom >>= 1; \
+ } \
+ __quot = __nom; \
+ __rem = do_div(__quot, __denom); \
+ __frac = __rem * (frac_digits) + (__denom >> 1); \
+ do_div(__frac, __denom); \
+ (quot) = __quot; \
+ (frac) = __frac; \
+ } while (0)
+
+void __chash_table_dump_stats(struct __chash_table *table)
+{
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+ u32 filled = 0, empty = 0, tombstones = 0;
+ u64 quot1, quot2;
+ u32 frac1, frac2;
+
+ do {
+ if (chash_iter_is_valid(iter))
+ filled++;
+ else if (chash_iter_is_empty(iter))
+ empty++;
+ else
+ tombstones++;
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ pr_debug("chash: key size %u, value size %u\n",
+ table->key_size, table->value_size);
+ pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n",
+ 1 << table->bits, filled, empty, tombstones);
+ if (table->hits > 0) {
+ DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000);
+ DIV_FRAC(table->hits * 1000, table->hits_time_ns,
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->hits, quot1, frac1, quot2, frac2);
+ if (table->miss > 0) {
+ DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000);
+ DIV_FRAC(table->miss * 1000, table->miss_time_ns,
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->miss, quot1, frac1, quot2, frac2);
+ if (table->hits + table->miss > 0) {
+ DIV_FRAC(table->hits_steps + table->miss_steps,
+ table->hits + table->miss, quot1, frac1, 1000);
+ DIV_FRAC((table->hits + table->miss) * 1000,
+ (table->hits_time_ns + table->miss_time_ns),
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->hits + table->miss, quot1, frac1, quot2, frac2);
+ if (table->relocs > 0) {
+ DIV_FRAC(table->hits + table->miss, table->relocs,
+ quot1, frac1, 1000);
+ DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000);
+ pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n",
+ table->relocs, quot1, frac1, quot2, frac2);
+ } else {
+ pr_debug(" No relocations\n");
+ }
+}
+EXPORT_SYMBOL(__chash_table_dump_stats);
+
+#undef DIV_FRAC
+#endif
+
+#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask)
+#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask)
+#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask)
+#define CHASH_IN_RANGE(table, slot, first, last) \
+ (CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first))
+
+/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/
+#ifdef CHASH_DEBUG
+static void chash_table_dump(struct __chash_table *table)
+{
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+
+ do {
+ if ((iter.slot & 3) == 0)
+ pr_debug("%04x: ", iter.slot);
+
+ if (chash_iter_is_valid(iter))
+ pr_debug("[%016llx] ", chash_iter_key(iter));
+ else if (chash_iter_is_empty(iter))
+ pr_debug("[ <empty> ] ");
+ else
+ pr_debug("[ <tombstone> ] ");
+
+ if ((iter.slot & 3) == 3)
+ pr_debug("\n");
+
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ if ((iter.slot & 3) != 0)
+ pr_debug("\n");
+}
+
+static int chash_table_check(struct __chash_table *table)
+{
+ u32 hash;
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+ struct chash_iter cur = CHASH_ITER_INIT(table, 0);
+
+ do {
+ if (!chash_iter_is_valid(iter)) {
+ CHASH_ITER_INC(iter);
+ continue;
+ }
+
+ hash = chash_iter_hash(iter);
+ CHASH_ITER_SET(cur, hash);
+ while (cur.slot != iter.slot) {
+ if (chash_iter_is_empty(cur)) {
+ pr_err("Path to element at %x with hash %x broken at slot %x\n",
+ iter.slot, hash, cur.slot);
+ chash_table_dump(table);
+ return -EINVAL;
+ }
+ CHASH_ITER_INC(cur);
+ }
+
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ return 0;
+}
+#endif
+
+static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
+{
+ BUG_ON(src.table == dst.table && src.slot == dst.slot);
+ BUG_ON(src.table->key_size != dst.table->key_size);
+ BUG_ON(src.table->value_size != dst.table->value_size);
+
+ if (dst.table->key_size == 4)
+ dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
+ else
+ dst.table->keys64[dst.slot] = src.table->keys64[src.slot];
+
+ if (dst.table->value_size)
+ memcpy(chash_iter_value(dst), chash_iter_value(src),
+ dst.table->value_size);
+
+ chash_iter_set_valid(dst);
+ chash_iter_set_invalid(src);
+
+#ifdef CONFIG_CHASH_STATS
+ if (src.table == dst.table) {
+ dst.table->relocs++;
+ dst.table->reloc_dist +=
+ CHASH_SUB(dst.table, src.slot, dst.slot);
+ }
+#endif
+}
+
+/**
+ * __chash_table_find - Helper for looking up a hash table entry
+ * @iter: Pointer to hash table iterator
+ * @key: Key of the entry to find
+ * @for_removal: set to true if the element will be removed soon
+ *
+ * Searches for an entry in the hash table with a given key. iter must
+ * be initialized by the caller to point to the home position of the
+ * hypothetical entry, i.e. it must be initialized with the hash table
+ * and the key's hash as the initial slot for the search.
+ *
+ * This function also does some local clean-up to speed up future
+ * look-ups by relocating entries to better slots and removing
+ * tombstones that are no longer needed.
+ *
+ * If @for_removal is true, the function avoids relocating the entry
+ * that is being returned.
+ *
+ * Returns 0 if the search is successful. In this case iter is updated
+ * to point to the found entry. Otherwise %-EINVAL is returned and the
+ * iter is updated to point to the first available slot for the given
+ * key. If the table is full, the slot is set to -1.
+ */
+static int chash_table_find(struct chash_iter *iter, u64 key,
+ bool for_removal)
+{
+#ifdef CONFIG_CHASH_STATS
+ u64 ts1 = local_clock();
+#endif
+ u32 hash = iter->slot;
+ struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1);
+ int first_avail = (for_removal ? -2 : -1);
+
+ while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) {
+ if (chash_iter_is_empty(*iter)) {
+ /* Found an empty slot, which ends the
+ * search. Clean up any preceding tombstones
+ * that are no longer needed because they lead
+ * to no-where
+ */
+ if ((int)first_redundant.slot < 0)
+ goto not_found;
+ while (first_redundant.slot != iter->slot) {
+ if (!chash_iter_is_valid(first_redundant))
+ chash_iter_set_empty(first_redundant);
+ CHASH_ITER_INC(first_redundant);
+ }
+#ifdef CHASH_DEBUG
+ chash_table_check(iter->table);
+#endif
+ goto not_found;
+ } else if (!chash_iter_is_valid(*iter)) {
+ /* Found a tombstone. Remember it as candidate
+ * for relocating the entry we're looking for
+ * or for adding a new entry with the given key
+ */
+ if (first_avail == -1)
+ first_avail = iter->slot;
+ /* Or mark it as the start of a series of
+ * potentially redundant tombstones
+ */
+ else if (first_redundant.slot == -1)
+ CHASH_ITER_SET(first_redundant, iter->slot);
+ } else if (first_redundant.slot >= 0) {
+ /* Found a valid, occupied slot with a
+ * preceding series of tombstones. Relocate it
+ * to a better position that no longer depends
+ * on those tombstones
+ */
+ u32 cur_hash = chash_iter_hash(*iter);
+
+ if (!CHASH_IN_RANGE(iter->table, cur_hash,
+ first_redundant.slot + 1,
+ iter->slot)) {
+ /* This entry has a hash at or before
+ * the first tombstone we found. We
+ * can relocate it to that tombstone
+ * and advance to the next tombstone
+ */
+ chash_iter_relocate(first_redundant, *iter);
+ do {
+ CHASH_ITER_INC(first_redundant);
+ } while (chash_iter_is_valid(first_redundant));
+ } else if (cur_hash != iter->slot) {
+ /* Relocate entry to its home position
+ * or as close as possible so it no
+ * longer depends on any preceding
+ * tombstones
+ */
+ struct chash_iter new_iter =
+ CHASH_ITER_INIT(iter->table, cur_hash);
+
+ while (new_iter.slot != iter->slot &&
+ chash_iter_is_valid(new_iter))
+ CHASH_ITER_INC(new_iter);
+
+ if (new_iter.slot != iter->slot)
+ chash_iter_relocate(new_iter, *iter);
+ }
+ }
+
+ CHASH_ITER_INC(*iter);
+ if (iter->slot == hash) {
+ iter->slot = -1;
+ goto not_found;
+ }
+ }
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->hits++;
+ iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1;
+#endif
+
+ if (first_avail >= 0) {
+ CHASH_ITER_SET(first_redundant, first_avail);
+ chash_iter_relocate(first_redundant, *iter);
+ iter->slot = first_redundant.slot;
+ iter->mask = first_redundant.mask;
+ }
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->hits_time_ns += local_clock() - ts1;
+#endif
+
+ return 0;
+
+not_found:
+#ifdef CONFIG_CHASH_STATS
+ iter->table->miss++;
+ iter->table->miss_steps += (iter->slot < 0) ?
+ (1 << iter->table->bits) :
+ CHASH_SUB(iter->table, iter->slot, hash) + 1;
+#endif
+
+ if (first_avail >= 0)
+ CHASH_ITER_SET(*iter, first_avail);
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->miss_time_ns += local_clock() - ts1;
+#endif
+
+ return -EINVAL;
+}
+
+int __chash_table_copy_in(struct __chash_table *table, u64 key,
+ const void *value)
+{
+ u32 hash = (table->key_size == 4) ?
+ hash_32(key, table->bits) : hash_64(key, table->bits);
+ struct chash_iter iter = CHASH_ITER_INIT(table, hash);
+ int r = chash_table_find(&iter, key, false);
+
+ /* Found an existing entry */
+ if (!r) {
+ if (value && table->value_size)
+ memcpy(chash_iter_value(iter), value,
+ table->value_size);
+ return 1;
+ }
+
+ /* Is there a place to add a new entry? */
+ if (iter.slot < 0) {
+ pr_err("Hash table overflow\n");
+ return -ENOMEM;
+ }
+
+ chash_iter_set_valid(iter);
+
+ if (table->key_size == 4)
+ table->keys32[iter.slot] = key;
+ else
+ table->keys64[iter.slot] = key;
+ if (value && table->value_size)
+ memcpy(chash_iter_value(iter), value, table->value_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(__chash_table_copy_in);
+
+int __chash_table_copy_out(struct __chash_table *table, u64 key,
+ void *value, bool remove)
+{
+ u32 hash = (table->key_size == 4) ?
+ hash_32(key, table->bits) : hash_64(key, table->bits);
+ struct chash_iter iter = CHASH_ITER_INIT(table, hash);
+ int r = chash_table_find(&iter, key, remove);
+
+ if (r < 0)
+ return r;
+
+ if (value && table->value_size)
+ memcpy(value, chash_iter_value(iter), table->value_size);
+
+ if (remove)
+ chash_iter_set_invalid(iter);
+
+ return iter.slot;
+}
+EXPORT_SYMBOL(__chash_table_copy_out);
+
+#ifdef CONFIG_CHASH_SELFTEST
+/**
+ * chash_self_test - Run a self-test of the hash table implementation
+ * @bits: Table size will be 2^bits entries
+ * @key_size: Size of hash keys in bytes, 4 or 8
+ * @min_fill: Minimum fill level during the test
+ * @max_fill: Maximum fill level during the test
+ * @iterations: Number of test iterations
+ *
+ * The test adds and removes entries from a hash table, cycling the
+ * fill level between min_fill and max_fill entries. Also tests lookup
+ * and value retrieval.
+ */
+static int __init chash_self_test(u8 bits, u8 key_size,
+ int min_fill, int max_fill,
+ u64 iterations)
+{
+ struct chash_table table;
+ int ret;
+ u64 add_count, rmv_count;
+ u64 value;
+
+ if (key_size == 4 && iterations > 0xffffffff)
+ return -EINVAL;
+ if (min_fill >= max_fill)
+ return -EINVAL;
+
+ ret = chash_table_alloc(&table, bits, key_size, sizeof(u64),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("chash_table_alloc failed: %d\n", ret);
+ return ret;
+ }
+
+ for (add_count = 0, rmv_count = 0; add_count < iterations;
+ add_count++) {
+ /* When we hit the max_fill level, remove entries down
+ * to min_fill
+ */
+ if (add_count - rmv_count == max_fill) {
+ u64 find_count = rmv_count;
+
+ /* First try to find all entries that we're
+ * about to remove, confirm their value, test
+ * writing them back a second time.
+ */
+ for (; add_count - find_count > min_fill;
+ find_count++) {
+ ret = chash_table_copy_out(&table, find_count,
+ &value);
+ if (ret < 0) {
+ pr_err("chash_table_copy_out failed: %d\n",
+ ret);
+ goto out;
+ }
+ if (value != ~find_count) {
+ pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n",
+ find_count, ~find_count, value);
+#ifdef CHASH_DEBUG
+ chash_table_dump(&table.table);
+#endif
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = chash_table_copy_in(&table, find_count,
+ &value);
+ if (ret != 1) {
+ pr_err("copy_in second time returned %d, expected 1\n",
+ ret);
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ /* Remove them until we hit min_fill level */
+ for (; add_count - rmv_count > min_fill; rmv_count++) {
+ ret = chash_table_remove(&table, rmv_count,
+ NULL);
+ if (ret < 0) {
+ pr_err("chash_table_remove failed: %d\n",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ /* Add a new value */
+ value = ~add_count;
+ ret = chash_table_copy_in(&table, add_count, &value);
+ if (ret != 0) {
+ pr_err("copy_in first time returned %d, expected 0\n",
+ ret);
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ chash_table_dump_stats(&table);
+ chash_table_reset_stats(&table);
+
+out:
+ chash_table_free(&table);
+ return ret;
+}
+
+static unsigned int chash_test_bits = 10;
+MODULE_PARM_DESC(test_bits,
+ "Selftest number of hash bits ([4..20], default=10)");
+module_param_named(test_bits, chash_test_bits, uint, 0444);
+
+static unsigned int chash_test_keysize = 8;
+MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)");
+module_param_named(test_keysize, chash_test_keysize, uint, 0444);
+
+static unsigned int chash_test_minfill;
+MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)");
+module_param_named(test_minfill, chash_test_minfill, uint, 0444);
+
+static unsigned int chash_test_maxfill;
+MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)");
+module_param_named(test_maxfill, chash_test_maxfill, uint, 0444);
+
+static unsigned long chash_test_iters;
+MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)");
+module_param_named(test_iters, chash_test_iters, ulong, 0444);
+
+static int __init chash_init(void)
+{
+ int ret;
+ u64 ts1_ns;
+
+ /* Skip self test on user errors */
+ if (chash_test_bits < 4 || chash_test_bits > 20) {
+ pr_err("chash: test_bits out of range [4..20].\n");
+ return 0;
+ }
+ if (chash_test_keysize != 4 && chash_test_keysize != 8) {
+ pr_err("chash: test_keysize invalid. Must be 4 or 8.\n");
+ return 0;
+ }
+
+ if (!chash_test_minfill)
+ chash_test_minfill = (1 << chash_test_bits) / 2;
+ if (!chash_test_maxfill)
+ chash_test_maxfill = (1 << chash_test_bits) * 4 / 5;
+ if (!chash_test_iters)
+ chash_test_iters = (1 << chash_test_bits) * 1000;
+
+ if (chash_test_minfill >= (1 << chash_test_bits)) {
+ pr_err("chash: test_minfill too big. Must be < table size.\n");
+ return 0;
+ }
+ if (chash_test_maxfill >= (1 << chash_test_bits)) {
+ pr_err("chash: test_maxfill too big. Must be < table size.\n");
+ return 0;
+ }
+ if (chash_test_minfill >= chash_test_maxfill) {
+ pr_err("chash: test_minfill must be < test_maxfill.\n");
+ return 0;
+ }
+ if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) {
+ pr_err("chash: test_iters must be < 4G for 4 byte keys.\n");
+ return 0;
+ }
+
+ ts1_ns = local_clock();
+ ret = chash_self_test(chash_test_bits, chash_test_keysize,
+ chash_test_minfill, chash_test_maxfill,
+ chash_test_iters);
+ if (!ret) {
+ u64 ts_delta_us = local_clock() - ts1_ns;
+ u64 iters_per_second = (u64)chash_test_iters * 1000000;
+
+ do_div(ts_delta_us, 1000);
+ do_div(iters_per_second, ts_delta_us);
+ pr_info("chash: self test took %llu us, %llu iterations/s\n",
+ ts_delta_us, iters_per_second);
+ } else {
+ pr_err("chash: self test failed: %d\n", ret);
+ }
+
+ return ret;
+}
+
+module_init(chash_init);
+
+#endif /* CONFIG_CHASH_SELFTEST */
+
+MODULE_DESCRIPTION("Closed hash table");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 72d5f50508b6..8c55c6e254d9 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -5,12 +5,11 @@ subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/powerplay/smumgr\
- -I$(FULL_AMD_PATH)/powerplay/hwmgr \
- -I$(FULL_AMD_PATH)/powerplay/eventmgr
+ -I$(FULL_AMD_PATH)/powerplay/hwmgr
AMD_PP_PATH = ../powerplay
-PP_LIBS = smumgr hwmgr eventmgr
+PP_LIBS = smumgr hwmgr
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index f73e80c4bf33..c7e34128cbde 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -29,72 +29,98 @@
#include "amd_powerplay.h"
#include "pp_instance.h"
#include "power_state.h"
-#include "eventmanager.h"
+#define PP_DPM_DISABLED 0xCCCC
+
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
+ void *input, void *output);
static inline int pp_check(struct pp_instance *handle)
{
- if (handle == NULL || handle->pp_valid != PP_VALID)
+ if (handle == NULL)
return -EINVAL;
- if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL)
+ if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
return -EINVAL;
if (handle->pm_en == 0)
return PP_DPM_DISABLED;
- if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL
- || handle->eventmgr == NULL)
+ if (handle->hwmgr->hwmgr_func == NULL)
return PP_DPM_DISABLED;
return 0;
}
+static int amd_powerplay_create(struct amd_pp_init *pp_init,
+ void **handle)
+{
+ struct pp_instance *instance;
+
+ if (pp_init == NULL || handle == NULL)
+ return -EINVAL;
+
+ instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
+ if (instance == NULL)
+ return -ENOMEM;
+
+ instance->chip_family = pp_init->chip_family;
+ instance->chip_id = pp_init->chip_id;
+ instance->pm_en = pp_init->pm_en;
+ instance->feature_mask = pp_init->feature_mask;
+ instance->device = pp_init->device;
+ mutex_init(&instance->pp_lock);
+ *handle = instance;
+ return 0;
+}
+
+static int amd_powerplay_destroy(void *handle)
+{
+ struct pp_instance *instance = (struct pp_instance *)handle;
+
+ kfree(instance->hwmgr->hardcode_pp_table);
+ instance->hwmgr->hardcode_pp_table = NULL;
+
+ kfree(instance->hwmgr);
+ instance->hwmgr = NULL;
+
+ kfree(instance);
+ instance = NULL;
+ return 0;
+}
+
static int pp_early_init(void *handle)
{
int ret;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_instance *pp_handle = NULL;
- ret = smum_early_init(pp_handle);
- if (ret)
- return ret;
+ pp_handle = cgs_register_pp_handle(handle, amd_powerplay_create);
- if ((pp_handle->pm_en == 0)
- || cgs_is_virtualization_enabled(pp_handle->device))
- return PP_DPM_DISABLED;
+ if (!pp_handle)
+ return -EINVAL;
ret = hwmgr_early_init(pp_handle);
- if (ret) {
- pp_handle->pm_en = 0;
- return PP_DPM_DISABLED;
- }
-
- ret = eventmgr_early_init(pp_handle);
- if (ret) {
- kfree(pp_handle->hwmgr);
- pp_handle->hwmgr = NULL;
- pp_handle->pm_en = 0;
- return PP_DPM_DISABLED;
- }
+ if (ret)
+ return -EINVAL;
return 0;
}
static int pp_sw_init(void *handle)
{
- struct pp_smumgr *smumgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
- if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ if (ret >= 0) {
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->smu_init == NULL)
+ if (hwmgr->smumgr_funcs->smu_init == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->smu_init(smumgr);
+ ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
pr_info("amdgpu: powerplay sw initialized\n");
}
@@ -103,84 +129,86 @@ static int pp_sw_init(void *handle)
static int pp_sw_fini(void *handle)
{
- struct pp_smumgr *smumgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
- if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ if (ret >= 0) {
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->smu_fini == NULL)
+ if (hwmgr->smumgr_funcs->smu_fini == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->smu_fini(smumgr);
+ ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
}
return ret;
}
static int pp_hw_init(void *handle)
{
- struct pp_smumgr *smumgr;
- struct pp_eventmgr *eventmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr;
ret = pp_check(pp_handle);
- if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ if (ret >= 0) {
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->start_smu == NULL)
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- if(smumgr->smumgr_funcs->start_smu(smumgr)) {
+ if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
pr_err("smc start failed\n");
- smumgr->smumgr_funcs->smu_fini(smumgr);
+ hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
return -EINVAL;;
}
if (ret == PP_DPM_DISABLED)
- return PP_DPM_DISABLED;
+ goto exit;
+ ret = hwmgr_hw_init(pp_handle);
+ if (ret)
+ goto exit;
}
-
- ret = hwmgr_hw_init(pp_handle);
- if (ret)
- goto err;
-
- eventmgr = pp_handle->eventmgr;
- if (eventmgr->pp_eventmgr_init == NULL ||
- eventmgr->pp_eventmgr_init(eventmgr))
- goto err;
-
- return 0;
-err:
+ return ret;
+exit:
pp_handle->pm_en = 0;
- kfree(pp_handle->eventmgr);
- kfree(pp_handle->hwmgr);
- pp_handle->hwmgr = NULL;
- pp_handle->eventmgr = NULL;
- return PP_DPM_DISABLED;
+ cgs_notify_dpm_enabled(hwmgr->device, false);
+ return 0;
+
}
static int pp_hw_fini(void *handle)
{
- struct pp_eventmgr *eventmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
ret = pp_check(pp_handle);
+ if (ret == 0)
+ hwmgr_hw_fini(pp_handle);
+
+ return 0;
+}
- if (ret == 0) {
- eventmgr = pp_handle->eventmgr;
+static int pp_late_init(void *handle)
+{
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
- if (eventmgr->pp_eventmgr_fini != NULL)
- eventmgr->pp_eventmgr_fini(eventmgr);
+ ret = pp_check(pp_handle);
+ if (ret == 0)
+ pp_dpm_dispatch_tasks(pp_handle,
+ AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
- hwmgr_hw_fini(pp_handle);
- }
return 0;
}
+static void pp_late_fini(void *handle)
+{
+ amd_powerplay_destroy(handle);
+}
+
+
static bool pp_is_idle(void *handle)
{
return false;
@@ -196,28 +224,6 @@ static int pp_sw_reset(void *handle)
return 0;
}
-
-int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
-{
- struct pp_hwmgr *hwmgr;
- struct pp_instance *pp_handle = (struct pp_instance *)handle;
- int ret = 0;
-
- ret = pp_check(pp_handle);
-
- if (ret != 0)
- return ret;
-
- hwmgr = pp_handle->hwmgr;
-
- if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
- pr_info("%s was not implemented.\n", __func__);
- return 0;
- }
-
- return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-}
-
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
@@ -227,7 +233,7 @@ static int pp_set_powergating_state(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -244,67 +250,52 @@ static int pp_set_powergating_state(void *handle,
static int pp_suspend(void *handle)
{
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
ret = pp_check(pp_handle);
-
- if (ret == PP_DPM_DISABLED)
- return 0;
- else if (ret != 0)
- return ret;
-
- eventmgr = pp_handle->eventmgr;
- pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
-
+ if (ret == 0)
+ hwmgr_hw_suspend(pp_handle);
return 0;
}
static int pp_resume(void *handle)
{
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
- struct pp_smumgr *smumgr;
- int ret, ret1;
+ struct pp_hwmgr *hwmgr;
+ int ret;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret1 = pp_check(pp_handle);
+ ret = pp_check(pp_handle);
- if (ret1 != 0 && ret1 != PP_DPM_DISABLED)
- return ret1;
+ if (ret < 0)
+ return ret;
- smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->start_smu == NULL)
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->start_smu(smumgr);
- if (ret) {
+ if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
pr_err("smc start failed\n");
- smumgr->smumgr_funcs->smu_fini(smumgr);
- return ret;
+ hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
+ return -EINVAL;
}
- if (ret1 == PP_DPM_DISABLED)
+ if (ret == PP_DPM_DISABLED)
return 0;
- eventmgr = pp_handle->eventmgr;
-
- pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
-
- return 0;
+ return hwmgr_hw_resume(pp_handle);
}
const struct amd_ip_funcs pp_ip_funcs = {
.name = "powerplay",
.early_init = pp_early_init,
- .late_init = NULL,
+ .late_init = pp_late_init,
.sw_init = pp_sw_init,
.sw_fini = pp_sw_fini,
.hw_init = pp_hw_init,
.hw_fini = pp_hw_fini,
+ .late_fini = pp_late_fini,
.suspend = pp_suspend,
.resume = pp_resume,
.is_idle = pp_is_idle,
@@ -324,6 +315,63 @@ static int pp_dpm_fw_loading_complete(void *handle)
return 0;
}
+static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
+{
+ struct pp_hwmgr *hwmgr;
+ struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ int ret = 0;
+
+ ret = pp_check(pp_handle);
+
+ if (ret)
+ return ret;
+
+ hwmgr = pp_handle->hwmgr;
+
+ if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+}
+
+static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level *level)
+{
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter umd pstate, save current level, disable gfx cg*/
+ if (*level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ hwmgr->en_umd_pstate = true;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ }
+ } else {
+ /* exit umd pstate, restore level, enable gfx cg*/
+ if (!(*level & profile_mode_mask)) {
+ if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ *level = hwmgr->saved_dpm_level;
+ hwmgr->en_umd_pstate = false;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+ }
+}
+
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
@@ -333,18 +381,27 @@ static int pp_dpm_force_performance_level(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
+ if (level == hwmgr->dpm_level)
+ return 0;
+
if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
+ pp_dpm_en_umd_pstate(hwmgr, &level);
+ hwmgr->request_dpm_level = level;
+ hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
+ if (!ret)
+ hwmgr->dpm_level = hwmgr->request_dpm_level;
+
mutex_unlock(&pp_handle->pp_lock);
return 0;
}
@@ -359,7 +416,7 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -369,15 +426,16 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
return level;
}
-static int pp_dpm_get_sclk(void *handle, bool low)
+static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t clk = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -387,20 +445,21 @@ static int pp_dpm_get_sclk(void *handle, bool low)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
+ clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return clk;
}
-static int pp_dpm_get_mclk(void *handle, bool low)
+static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t clk = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -410,12 +469,12 @@ static int pp_dpm_get_mclk(void *handle, bool low)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
+ clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return clk;
}
-static int pp_dpm_powergate_vce(void *handle, bool gate)
+static void pp_dpm_powergate_vce(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -423,22 +482,21 @@ static int pp_dpm_powergate_vce(void *handle, bool gate)
ret = pp_check(pp_handle);
- if (ret != 0)
- return ret;
+ if (ret)
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
+ hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
}
-static int pp_dpm_powergate_uvd(void *handle, bool gate)
+static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -446,75 +504,35 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate)
ret = pp_check(pp_handle);
- if (ret != 0)
- return ret;
+ if (ret)
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
+ hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
-}
-
-static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
-{
- switch (state) {
- case POWER_STATE_TYPE_BATTERY:
- return PP_StateUILabel_Battery;
- case POWER_STATE_TYPE_BALANCED:
- return PP_StateUILabel_Balanced;
- case POWER_STATE_TYPE_PERFORMANCE:
- return PP_StateUILabel_Performance;
- default:
- return PP_StateUILabel_None;
- }
}
-static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
void *input, void *output)
{
int ret = 0;
- struct pem_event_data data = { {0} };
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
- mutex_lock(&pp_handle->pp_lock);
- switch (event_id) {
- case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- case AMD_PP_EVENT_ENABLE_USER_STATE:
- {
- enum amd_pm_state_type ps;
-
- if (input == NULL) {
- ret = -EINVAL;
- break;
- }
- ps = *(unsigned long *)input;
- data.requested_ui_label = power_state_convert(ps);
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- }
- case AMD_PP_EVENT_COMPLETE_INIT:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- case AMD_PP_EVENT_READJUST_POWER_STATE:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- default:
- break;
- }
+ mutex_lock(&pp_handle->pp_lock);
+ ret = hwmgr_handle_task(pp_handle, task_id, input, output);
mutex_unlock(&pp_handle->pp_lock);
+
return ret;
}
@@ -528,7 +546,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -562,7 +580,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
return pm_type;
}
-static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -570,30 +588,30 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
ret = pp_check(pp_handle);
- if (ret != 0)
- return ret;
+ if (ret)
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
+ hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
}
-static int pp_dpm_get_fan_control_mode(void *handle)
+static uint32_t pp_dpm_get_fan_control_mode(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t mode = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -603,9 +621,9 @@ static int pp_dpm_get_fan_control_mode(void *handle)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+ mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return mode;
}
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
@@ -616,7 +634,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -639,7 +657,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -663,7 +681,7 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -685,7 +703,7 @@ static int pp_dpm_get_temperature(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -710,7 +728,7 @@ static int pp_dpm_get_pp_num_states(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -755,7 +773,7 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -778,7 +796,7 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -820,7 +838,7 @@ static int pp_dpm_force_clock_level(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -844,7 +862,7 @@ static int pp_dpm_print_clock_levels(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -867,7 +885,7 @@ static int pp_dpm_get_sclk_od(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -890,7 +908,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -914,7 +932,7 @@ static int pp_dpm_get_mclk_od(void *handle)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -937,7 +955,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -961,7 +979,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -987,7 +1005,7 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return NULL;
hwmgr = pp_handle->hwmgr;
@@ -1128,7 +1146,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-const struct amd_powerplay_funcs pp_dpm_funcs = {
+const struct amd_pm_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1160,81 +1178,27 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_power_profile_state = pp_dpm_get_power_profile_state,
.set_power_profile_state = pp_dpm_set_power_profile_state,
.switch_power_profile = pp_dpm_switch_power_profile,
+ .set_clockgating_by_smu = pp_set_clockgating_by_smu,
};
-int amd_powerplay_create(struct amd_pp_init *pp_init,
- void **handle)
-{
- struct pp_instance *instance;
-
- if (pp_init == NULL || handle == NULL)
- return -EINVAL;
-
- instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL);
- if (instance == NULL)
- return -ENOMEM;
-
- instance->pp_valid = PP_VALID;
- instance->chip_family = pp_init->chip_family;
- instance->chip_id = pp_init->chip_id;
- instance->pm_en = pp_init->pm_en;
- instance->feature_mask = pp_init->feature_mask;
- instance->device = pp_init->device;
- mutex_init(&instance->pp_lock);
- *handle = instance;
- return 0;
-}
-
-int amd_powerplay_destroy(void *handle)
-{
- struct pp_instance *instance = (struct pp_instance *)handle;
-
- if (instance->pm_en) {
- kfree(instance->eventmgr);
- kfree(instance->hwmgr);
- instance->hwmgr = NULL;
- instance->eventmgr = NULL;
- }
-
- kfree(instance->smu_mgr);
- instance->smu_mgr = NULL;
- kfree(instance);
- instance = NULL;
- return 0;
-}
-
int amd_powerplay_reset(void *handle)
{
struct pp_instance *instance = (struct pp_instance *)handle;
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
int ret;
- if (cgs_is_virtualization_enabled(instance->smu_mgr->device))
- return PP_DPM_DISABLED;
-
ret = pp_check(instance);
- if (ret != 0)
+ if (ret)
return ret;
- ret = pp_hw_fini(handle);
+ ret = pp_hw_fini(instance);
if (ret)
return ret;
ret = hwmgr_hw_init(instance);
if (ret)
- return PP_DPM_DISABLED;
-
- eventmgr = instance->eventmgr;
-
- if (eventmgr->pp_eventmgr_init == NULL)
- return PP_DPM_DISABLED;
-
- ret = eventmgr->pp_eventmgr_init(eventmgr);
- if (ret)
return ret;
- return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+ return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
}
/* export this function to DAL */
@@ -1248,7 +1212,7 @@ int amd_powerplay_display_configuration_change(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1267,7 +1231,7 @@ int amd_powerplay_get_display_power_level(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1292,7 +1256,7 @@ int amd_powerplay_get_current_clocks(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1309,7 +1273,7 @@ int amd_powerplay_get_current_clocks(void *handle,
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
- if (ret != 0) {
+ if (ret) {
pr_info("Error in phm_get_clock_info \n");
mutex_unlock(&pp_handle->pp_lock);
return -EINVAL;
@@ -1343,7 +1307,7 @@ int amd_powerplay_get_clock_by_type(void *handle, enum amd_pp_clock_type type, s
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
@@ -1366,7 +1330,7 @@ int amd_powerplay_get_clock_by_type_with_latency(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!clocks)
@@ -1388,7 +1352,7 @@ int amd_powerplay_get_clock_by_type_with_voltage(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!clocks)
@@ -1412,7 +1376,7 @@ int amd_powerplay_set_watermarks_for_clocks_ranges(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!wm_with_clock_ranges)
@@ -1436,7 +1400,7 @@ int amd_powerplay_display_clock_voltage_request(void *handle,
int ret = 0;
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
if (!clock)
@@ -1460,7 +1424,7 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle,
ret = pp_check(pp_handle);
- if (ret != 0)
+ if (ret)
return ret;
hwmgr = pp_handle->hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
deleted file mode 100644
index 7509e3850087..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the 'event manager' sub-component of powerplay.
-# It provides the event management services for the driver.
-
-EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \
- eventactionchains.o eventsubchains.o eventtasks.o psm.o
-
-AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR))
-
-AMD_POWERPLAY_FILES += $(AMD_PP_EVENT)
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
deleted file mode 100644
index 8cee4e0f9fde..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmgr.h"
-#include "eventactionchains.h"
-#include "eventsubchains.h"
-
-static const pem_event_action * const initialize_event[] = {
- block_adjust_power_state_tasks,
- power_budget_tasks,
- system_config_tasks,
- setup_asic_tasks,
- enable_dynamic_state_management_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- initialize_thermal_controller_tasks,
- conditionally_force_3d_performance_state_tasks,
- process_vbios_eventinfo_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain initialize_action_chain = {
- "Initialize",
- initialize_event
-};
-
-static const pem_event_action * const uninitialize_event[] = {
- ungate_all_display_phys_tasks,
- uninitialize_display_phy_access_tasks,
- disable_gfx_voltage_island_power_gating_tasks,
- disable_gfx_clock_gating_tasks,
- uninitialize_thermal_controller_tasks,
- set_boot_state_tasks,
- adjust_power_state_tasks,
- disable_dynamic_state_management_tasks,
- disable_clock_power_gatings_tasks,
- cleanup_asic_tasks,
- prepare_for_pnp_stop_tasks,
- NULL
-};
-
-const struct action_chain uninitialize_action_chain = {
- "Uninitialize",
- uninitialize_event
-};
-
-static const pem_event_action * const power_source_change_event_pp_enabled[] = {
- set_power_source_tasks,
- set_power_saving_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- set_nbmcu_state_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain power_source_change_action_chain_pp_enabled = {
- "Power source change - PowerPlay enabled",
- power_source_change_event_pp_enabled
-};
-
-static const pem_event_action * const power_source_change_event_pp_disabled[] = {
- set_power_source_tasks,
- set_nbmcu_state_tasks,
- NULL
-};
-
-const struct action_chain power_source_changes_action_chain_pp_disabled = {
- "Power source change - PowerPlay disabled",
- power_source_change_event_pp_disabled
-};
-
-static const pem_event_action * const power_source_change_event_hardware_dc[] = {
- set_power_source_tasks,
- set_power_saving_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- reset_hardware_dc_notification_tasks,
- set_nbmcu_state_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain power_source_change_action_chain_hardware_dc = {
- "Power source change - with Hardware DC switching",
- power_source_change_event_hardware_dc
-};
-
-static const pem_event_action * const suspend_event[] = {
- reset_display_phy_access_tasks,
- unregister_interrupt_tasks,
- disable_gfx_voltage_island_power_gating_tasks,
- disable_gfx_clock_gating_tasks,
- notify_smu_suspend_tasks,
- disable_smc_firmware_ctf_tasks,
- set_boot_state_tasks,
- adjust_power_state_tasks,
- disable_fps_tasks,
- vari_bright_suspend_tasks,
- reset_fan_speed_to_default_tasks,
- power_down_asic_tasks,
- disable_stutter_mode_tasks,
- set_connected_standby_tasks,
- block_hw_access_tasks,
- NULL
-};
-
-const struct action_chain suspend_action_chain = {
- "Suspend",
- suspend_event
-};
-
-static const pem_event_action * const resume_event[] = {
- unblock_hw_access_tasks,
- resume_connected_standby_tasks,
- notify_smu_resume_tasks,
- reset_display_configCounter_tasks,
- update_dal_configuration_tasks,
- vari_bright_resume_tasks,
- setup_asic_tasks,
- enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
- enable_dynamic_state_management_tasks,
- enable_disable_bapm_tasks,
- initialize_thermal_controller_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- notify_hw_power_source_tasks,
- process_vbios_event_info_tasks,
- enable_gfx_clock_gating_tasks,
- enable_gfx_voltage_island_power_gating_tasks,
- reset_clock_gating_tasks,
- notify_smu_vpu_recovery_end_tasks,
- disable_vpu_cap_tasks,
- execute_escape_sequence_tasks,
- NULL
-};
-
-
-const struct action_chain resume_action_chain = {
- "resume",
- resume_event
-};
-
-static const pem_event_action * const complete_init_event[] = {
- unblock_adjust_power_state_tasks,
- adjust_power_state_tasks,
- enable_gfx_clock_gating_tasks,
- enable_gfx_voltage_island_power_gating_tasks,
- notify_power_state_change_tasks,
- NULL
-};
-
-const struct action_chain complete_init_action_chain = {
- "complete init",
- complete_init_event
-};
-
-static const pem_event_action * const enable_gfx_clock_gating_event[] = {
- enable_gfx_clock_gating_tasks,
- NULL
-};
-
-const struct action_chain enable_gfx_clock_gating_action_chain = {
- "enable gfx clock gate",
- enable_gfx_clock_gating_event
-};
-
-static const pem_event_action * const disable_gfx_clock_gating_event[] = {
- disable_gfx_clock_gating_tasks,
- NULL
-};
-
-const struct action_chain disable_gfx_clock_gating_action_chain = {
- "disable gfx clock gate",
- disable_gfx_clock_gating_event
-};
-
-static const pem_event_action * const enable_cgpg_event[] = {
- enable_cgpg_tasks,
- NULL
-};
-
-const struct action_chain enable_cgpg_action_chain = {
- "eable cg pg",
- enable_cgpg_event
-};
-
-static const pem_event_action * const disable_cgpg_event[] = {
- disable_cgpg_tasks,
- NULL
-};
-
-const struct action_chain disable_cgpg_action_chain = {
- "disable cg pg",
- disable_cgpg_event
-};
-
-
-/* Enable user _2d performance and activate */
-
-static const pem_event_action * const enable_user_state_event[] = {
- create_new_user_performance_state_tasks,
- adjust_power_state_tasks,
- NULL
-};
-
-const struct action_chain enable_user_state_action_chain = {
- "Enable user state",
- enable_user_state_event
-};
-
-static const pem_event_action * const enable_user_2d_performance_event[] = {
- enable_user_2d_performance_tasks,
- add_user_2d_performance_state_tasks,
- set_performance_state_tasks,
- adjust_power_state_tasks,
- delete_user_2d_performance_state_tasks,
- NULL
-};
-
-const struct action_chain enable_user_2d_performance_action_chain = {
- "enable_user_2d_performance_event_activate",
- enable_user_2d_performance_event
-};
-
-
-static const pem_event_action * const disable_user_2d_performance_event[] = {
- disable_user_2d_performance_tasks,
- delete_user_2d_performance_state_tasks,
- NULL
-};
-
-const struct action_chain disable_user_2d_performance_action_chain = {
- "disable_user_2d_performance_event",
- disable_user_2d_performance_event
-};
-
-
-static const pem_event_action * const display_config_change_event[] = {
- /* countDisplayConfigurationChangeEventTasks, */
- unblock_adjust_power_state_tasks,
- set_cpu_power_state,
- notify_hw_power_source_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- /* updateDALConfigurationTasks,
- variBrightDisplayConfigurationChangeTasks, */
- adjust_power_state_tasks,
- /*enableDisableFPSTasks,
- setNBMCUStateTasks,
- notifyPCIEDeviceReadyTasks,*/
- NULL
-};
-
-const struct action_chain display_config_change_action_chain = {
- "Display configuration change",
- display_config_change_event
-};
-
-static const pem_event_action * const readjust_power_state_event[] = {
- adjust_power_state_tasks,
- NULL
-};
-
-const struct action_chain readjust_power_state_action_chain = {
- "re-adjust power state",
- readjust_power_state_event
-};
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
deleted file mode 100644
index f181e53cdcda..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_ACTION_CHAINS_H_
-#define _EVENT_ACTION_CHAINS_H_
-#include "eventmgr.h"
-
-extern const struct action_chain initialize_action_chain;
-
-extern const struct action_chain uninitialize_action_chain;
-
-extern const struct action_chain power_source_change_action_chain_pp_enabled;
-
-extern const struct action_chain power_source_changes_action_chain_pp_disabled;
-
-extern const struct action_chain power_source_change_action_chain_hardware_dc;
-
-extern const struct action_chain suspend_action_chain;
-
-extern const struct action_chain resume_action_chain;
-
-extern const struct action_chain complete_init_action_chain;
-
-extern const struct action_chain enable_gfx_clock_gating_action_chain;
-
-extern const struct action_chain disable_gfx_clock_gating_action_chain;
-
-extern const struct action_chain enable_cgpg_action_chain;
-
-extern const struct action_chain disable_cgpg_action_chain;
-
-extern const struct action_chain enable_user_2d_performance_action_chain;
-
-extern const struct action_chain disable_user_2d_performance_action_chain;
-
-extern const struct action_chain enable_user_state_action_chain;
-
-extern const struct action_chain readjust_power_state_action_chain;
-
-extern const struct action_chain display_config_change_action_chain;
-
-#endif /*_EVENT_ACTION_CHAINS_H_*/
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
deleted file mode 100644
index a3cd230d636d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "ppinterrupt.h"
-#include "hardwaremanager.h"
-
-void pem_init_feature_info(struct pp_eventmgr *eventmgr)
-{
-
- /* PowerPlay info */
- eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable =
- PP_StateUILabel_Performance;
-
- eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label =
- PP_StateUILabel_Performance;
-
- eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable =
- PP_StateUILabel_Battery;
-
- eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label =
- PP_StateUILabel_Battery;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) {
- eventmgr->features[PP_Feature_PowerPlay].supported = true;
- eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION;
- eventmgr->features[PP_Feature_PowerPlay].enabled_default = true;
- eventmgr->features[PP_Feature_PowerPlay].enabled = true;
- } else {
- eventmgr->features[PP_Feature_PowerPlay].supported = false;
- eventmgr->features[PP_Feature_PowerPlay].enabled = false;
- eventmgr->features[PP_Feature_PowerPlay].enabled_default = false;
- }
-
- eventmgr->features[PP_Feature_Force3DClock].supported = true;
- eventmgr->features[PP_Feature_Force3DClock].enabled = false;
- eventmgr->features[PP_Feature_Force3DClock].enabled_default = false;
- eventmgr->features[PP_Feature_Force3DClock].version = 1;
-
- /* over drive*/
- eventmgr->features[PP_Feature_User2DPerformance].version = 4;
- eventmgr->features[PP_Feature_User3DPerformance].version = 4;
- eventmgr->features[PP_Feature_OverdriveTest].version = 4;
-
- eventmgr->features[PP_Feature_OverDrive].version = 4;
- eventmgr->features[PP_Feature_OverDrive].enabled = false;
- eventmgr->features[PP_Feature_OverDrive].enabled_default = false;
-
- eventmgr->features[PP_Feature_User2DPerformance].supported = false;
- eventmgr->features[PP_Feature_User2DPerformance].enabled = false;
- eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false;
-
- eventmgr->features[PP_Feature_User3DPerformance].supported = false;
- eventmgr->features[PP_Feature_User3DPerformance].enabled = false;
- eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false;
-
- eventmgr->features[PP_Feature_OverdriveTest].supported = false;
- eventmgr->features[PP_Feature_OverdriveTest].enabled = false;
- eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false;
-
- eventmgr->features[PP_Feature_OverDrive].supported = false;
-
- eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false;
-
- /* Multi UVD States support */
- eventmgr->features[PP_Feature_MultiUVDState].supported = false;
- eventmgr->features[PP_Feature_MultiUVDState].enabled = false;
- eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false;
-
- /* Dynamic UVD States support */
- eventmgr->features[PP_Feature_DynamicUVDState].supported = false;
- eventmgr->features[PP_Feature_DynamicUVDState].enabled = false;
- eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false;
-
- /* VCE DPM support */
- eventmgr->features[PP_Feature_VCEDPM].supported = false;
- eventmgr->features[PP_Feature_VCEDPM].enabled = false;
- eventmgr->features[PP_Feature_VCEDPM].enabled_default = false;
-
- /* ACP PowerGating support */
- eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false;
- eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false;
- eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false;
-
- /* PPM support */
- eventmgr->features[PP_Feature_PPM].version = 1;
- eventmgr->features[PP_Feature_PPM].supported = false;
- eventmgr->features[PP_Feature_PPM].enabled = false;
-
- /* FFC support (enables fan and temp settings, Gemini needs temp settings) */
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) ||
- phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) {
- eventmgr->features[PP_Feature_FFC].version = 1;
- eventmgr->features[PP_Feature_FFC].supported = true;
- eventmgr->features[PP_Feature_FFC].enabled = true;
- eventmgr->features[PP_Feature_FFC].enabled_default = true;
- } else {
- eventmgr->features[PP_Feature_FFC].supported = false;
- eventmgr->features[PP_Feature_FFC].enabled = false;
- eventmgr->features[PP_Feature_FFC].enabled_default = false;
- }
-
- eventmgr->features[PP_Feature_VariBright].supported = false;
- eventmgr->features[PP_Feature_VariBright].enabled = false;
- eventmgr->features[PP_Feature_VariBright].enabled_default = false;
-
- eventmgr->features[PP_Feature_BACO].supported = false;
- eventmgr->features[PP_Feature_BACO].supported = false;
- eventmgr->features[PP_Feature_BACO].enabled_default = false;
-
- /* PowerDown feature support */
- eventmgr->features[PP_Feature_PowerDown].supported = false;
- eventmgr->features[PP_Feature_PowerDown].enabled = false;
- eventmgr->features[PP_Feature_PowerDown].enabled_default = false;
-
- eventmgr->features[PP_Feature_FPS].version = 1;
- eventmgr->features[PP_Feature_FPS].supported = false;
- eventmgr->features[PP_Feature_FPS].enabled_default = false;
- eventmgr->features[PP_Feature_FPS].enabled = false;
-
- eventmgr->features[PP_Feature_ViPG].version = 1;
- eventmgr->features[PP_Feature_ViPG].supported = false;
- eventmgr->features[PP_Feature_ViPG].enabled_default = false;
- eventmgr->features[PP_Feature_ViPG].enabled = false;
-}
-
-static int thermal_interrupt_callback(void *private_data,
- unsigned src_id, const uint32_t *iv_entry)
-{
- /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/
- pr_info("current thermal is out of range \n");
- return 0;
-}
-
-int pem_register_interrupts(struct pp_eventmgr *eventmgr)
-{
- int result = 0;
- struct pp_interrupt_registration_info info;
-
- info.call_back = thermal_interrupt_callback;
- info.context = eventmgr;
-
- result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info);
-
- /* TODO:
- * 2. Register CTF event interrupt
- * 3. Register for vbios events interrupt
- * 4. Register External Throttle Interrupt
- * 5. Register Smc To Host Interrupt
- * */
- return result;
-}
-
-
-int pem_unregister_interrupts(struct pp_eventmgr *eventmgr)
-{
- return 0;
-}
-
-
-void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr)
-{
- eventmgr->features[PP_Feature_MultiUVDState].supported = false;
- eventmgr->features[PP_Feature_VariBright].supported = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
- eventmgr->features[PP_Feature_OverDrive].supported = false;
- eventmgr->features[PP_Feature_OverdriveTest].supported = false;
- eventmgr->features[PP_Feature_User3DPerformance].supported = false;
- eventmgr->features[PP_Feature_User2DPerformance].supported = false;
- eventmgr->features[PP_Feature_PowerPlay].supported = false;
- eventmgr->features[PP_Feature_Force3DClock].supported = false;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
deleted file mode 100644
index cd1ca07ef7f7..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmanagement.h"
-#include "eventmgr.h"
-#include "eventactionchains.h"
-
-int pem_init_event_action_chains(struct pp_eventmgr *eventmgr)
-{
- int i;
-
- for (i = 0; i < AMD_PP_EVENT_MAX; i++)
- eventmgr->event_chain[i] = NULL;
-
- eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr);
- return 0;
-}
-
-int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
-{
- const pem_event_action * const *paction_chain;
- const pem_event_action *psub_chain;
- int tmp_result = 0;
- int result = 0;
-
- if (eventmgr == NULL || event_chain == NULL || event_data == NULL)
- return -EINVAL;
-
- for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) {
- if (0 != result)
- return result;
-
- for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) {
- tmp_result = (*psub_chain)(eventmgr, event_data);
- if (0 == result)
- result = tmp_result;
- }
- }
-
- return result;
-}
-
-const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &suspend_action_chain;
-}
-
-const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &initialize_action_chain;
-}
-
-const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &uninitialize_action_chain;
-}
-
-const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/
-}
-
-const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &resume_action_chain;
-}
-
-const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_gfx_clock_gating_action_chain;
-}
-
-const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &disable_gfx_clock_gating_action_chain;
-}
-
-const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_cgpg_action_chain;
-}
-
-const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &disable_cgpg_action_chain;
-}
-
-const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &complete_init_action_chain;
-}
-
-const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_user_state_action_chain;
-}
-
-const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &readjust_power_state_action_chain;
-}
-
-const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &display_config_change_action_chain;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
deleted file mode 100644
index 383d4b295aa9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_MANAGEMENT_H_
-#define _EVENT_MANAGEMENT_H_
-
-#include "eventmgr.h"
-
-int pem_init_event_action_chains(struct pp_eventmgr *eventmgr);
-int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data);
-const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr);
-
-extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr);
-extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr);
-
-
-#endif /* _EVENT_MANAGEMENT_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
deleted file mode 100644
index 3e3ca03bd344..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "eventmgr.h"
-#include "hwmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-
-static int pem_init(struct pp_eventmgr *eventmgr)
-{
- int result = 0;
- struct pem_event_data event_data = { {0} };
-
- /* Initialize PowerPlay feature info */
- pem_init_feature_info(eventmgr);
-
- /* Initialize event action chains */
- pem_init_event_action_chains(eventmgr);
-
- /* Call initialization event */
- result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data);
-
- /* if (0 != result)
- return result; */
-
- /* Register interrupt callback functions */
- result = pem_register_interrupts(eventmgr);
- return 0;
-}
-
-static void pem_fini(struct pp_eventmgr *eventmgr)
-{
- struct pem_event_data event_data = { {0} };
-
- pem_uninit_featureInfo(eventmgr);
- pem_unregister_interrupts(eventmgr);
-
- pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-}
-
-int eventmgr_early_init(struct pp_instance *handle)
-{
- struct pp_eventmgr *eventmgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL);
- if (eventmgr == NULL)
- return -ENOMEM;
-
- eventmgr->hwmgr = handle->hwmgr;
- handle->eventmgr = eventmgr;
-
- eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor);
- eventmgr->pp_eventmgr_init = pem_init;
- eventmgr->pp_eventmgr_fini = pem_fini;
-
- return 0;
-}
-
-static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data)
-{
- if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL)
- return -EINVAL;
-
- return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data);
-}
-
-int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data)
-{
- int r = 0;
-
- r = pem_handle_event_unlocked(eventmgr, event, event_data);
-
- return r;
-}
-
-bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr)
-{
- return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr));
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
deleted file mode 100644
index b82c43af59ab..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "eventmgr.h"
-#include "eventsubchains.h"
-#include "eventtasks.h"
-#include "hardwaremanager.h"
-
-const pem_event_action reset_display_phy_access_tasks[] = {
- pem_task_reset_display_phys_access,
- NULL
-};
-
-const pem_event_action broadcast_power_policy_tasks[] = {
- /* PEM_Task_BroadcastPowerPolicyChange, */
- NULL
-};
-
-const pem_event_action unregister_interrupt_tasks[] = {
- pem_task_unregister_interrupts,
- NULL
-};
-
-/* Disable GFX Voltage Islands Power Gating */
-const pem_event_action disable_gfx_voltage_island_powergating_tasks[] = {
- pem_task_disable_voltage_island_power_gating,
- NULL
-};
-
-const pem_event_action disable_gfx_clockgating_tasks[] = {
- pem_task_disable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action block_adjust_power_state_tasks[] = {
- pem_task_block_adjust_power_state,
- NULL
-};
-
-
-const pem_event_action unblock_adjust_power_state_tasks[] = {
- pem_task_unblock_adjust_power_state,
- NULL
-};
-
-const pem_event_action set_performance_state_tasks[] = {
- pem_task_set_performance_state,
- NULL
-};
-
-const pem_event_action get_2d_performance_state_tasks[] = {
- pem_task_get_2D_performance_state_id,
- NULL
-};
-
-const pem_event_action conditionally_force3D_performance_state_tasks[] = {
- pem_task_conditionally_force_3d_performance_state,
- NULL
-};
-
-const pem_event_action process_vbios_eventinfo_tasks[] = {
- /* PEM_Task_ProcessVbiosEventInfo,*/
- NULL
-};
-
-const pem_event_action enable_dynamic_state_management_tasks[] = {
- /* PEM_Task_ResetBAPMPolicyChangedFlag,*/
- pem_task_get_boot_state_id,
- pem_task_enable_dynamic_state_management,
- pem_task_register_interrupts,
- NULL
-};
-
-const pem_event_action enable_clock_power_gatings_tasks[] = {
- pem_task_enable_clock_power_gatings_tasks,
- pem_task_powerdown_uvd_tasks,
- pem_task_powerdown_vce_tasks,
- NULL
-};
-
-const pem_event_action setup_asic_tasks[] = {
- pem_task_setup_asic,
- NULL
-};
-
-const pem_event_action power_budget_tasks[] = {
- /* TODO
- * PEM_Task_PowerBudgetWaiverAvailable,
- * PEM_Task_PowerBudgetWarningMessage,
- * PEM_Task_PruneStatesBasedOnPowerBudget,
- */
- NULL
-};
-
-const pem_event_action system_config_tasks[] = {
- /* PEM_Task_PruneStatesBasedOnSystemConfig,*/
- NULL
-};
-
-
-const pem_event_action conditionally_force_3d_performance_state_tasks[] = {
- pem_task_conditionally_force_3d_performance_state,
- NULL
-};
-
-const pem_event_action ungate_all_display_phys_tasks[] = {
- /* PEM_Task_GetDisplayPhyAccessInfo */
- NULL
-};
-
-const pem_event_action uninitialize_display_phy_access_tasks[] = {
- /* PEM_Task_UninitializeDisplayPhysAccess, */
- NULL
-};
-
-const pem_event_action disable_gfx_voltage_island_power_gating_tasks[] = {
- /* PEM_Task_DisableVoltageIslandPowerGating, */
- NULL
-};
-
-const pem_event_action disable_gfx_clock_gating_tasks[] = {
- pem_task_disable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action set_boot_state_tasks[] = {
- pem_task_get_boot_state_id,
- pem_task_set_boot_state,
- NULL
-};
-
-const pem_event_action adjust_power_state_tasks[] = {
- pem_task_notify_hw_mgr_display_configuration_change,
- pem_task_adjust_power_state,
- pem_task_notify_smc_display_config_after_power_state_adjustment,
- pem_task_update_allowed_performance_levels,
- /* to do pem_task_Enable_disable_bapm, */
- NULL
-};
-
-const pem_event_action disable_dynamic_state_management_tasks[] = {
- pem_task_unregister_interrupts,
- pem_task_get_boot_state_id,
- pem_task_disable_dynamic_state_management,
- NULL
-};
-
-const pem_event_action disable_clock_power_gatings_tasks[] = {
- pem_task_disable_clock_power_gatings_tasks,
- NULL
-};
-
-const pem_event_action cleanup_asic_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- pem_task_cleanup_asic,
- NULL
-};
-
-const pem_event_action prepare_for_pnp_stop_tasks[] = {
- /* PEM_Task_PrepareForPnpStop,*/
- NULL
-};
-
-const pem_event_action set_power_source_tasks[] = {
- pem_task_set_power_source,
- pem_task_notify_hw_of_power_source,
- NULL
-};
-
-const pem_event_action set_power_saving_state_tasks[] = {
- pem_task_reset_power_saving_state,
- pem_task_get_power_saving_state,
- pem_task_set_power_saving_state,
- /* PEM_Task_ResetODDCState,
- * PEM_Task_GetODDCState,
- * PEM_Task_SetODDCState,*/
- NULL
-};
-
-const pem_event_action enable_disable_fps_tasks[] = {
- /* PEM_Task_EnableDisableFPS,*/
- NULL
-};
-
-const pem_event_action set_nbmcu_state_tasks[] = {
- /* PEM_Task_NBMCUStateChange,*/
- NULL
-};
-
-const pem_event_action reset_hardware_dc_notification_tasks[] = {
- /* PEM_Task_ResetHardwareDCNotification,*/
- NULL
-};
-
-
-const pem_event_action notify_smu_suspend_tasks[] = {
- /* PEM_Task_NotifySMUSuspend,*/
- NULL
-};
-
-const pem_event_action disable_smc_firmware_ctf_tasks[] = {
- pem_task_disable_smc_firmware_ctf,
- NULL
-};
-
-const pem_event_action disable_fps_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- NULL
-};
-
-const pem_event_action vari_bright_suspend_tasks[] = {
- /* PEM_Task_VariBright_Suspend,*/
- NULL
-};
-
-const pem_event_action reset_fan_speed_to_default_tasks[] = {
- /* PEM_Task_ResetFanSpeedToDefault,*/
- NULL
-};
-
-const pem_event_action power_down_asic_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- pem_task_power_down_asic,
- NULL
-};
-
-const pem_event_action disable_stutter_mode_tasks[] = {
- /* PEM_Task_DisableStutterMode,*/
- NULL
-};
-
-const pem_event_action set_connected_standby_tasks[] = {
- /* PEM_Task_SetConnectedStandby,*/
- NULL
-};
-
-const pem_event_action block_hw_access_tasks[] = {
- pem_task_block_hw_access,
- NULL
-};
-
-const pem_event_action unblock_hw_access_tasks[] = {
- pem_task_un_block_hw_access,
- NULL
-};
-
-const pem_event_action resume_connected_standby_tasks[] = {
- /* PEM_Task_ResumeConnectedStandby,*/
- NULL
-};
-
-const pem_event_action notify_smu_resume_tasks[] = {
- /* PEM_Task_NotifySMUResume,*/
- NULL
-};
-
-const pem_event_action reset_display_configCounter_tasks[] = {
- pem_task_reset_display_phys_access,
- NULL
-};
-
-const pem_event_action update_dal_configuration_tasks[] = {
- /* PEM_Task_CheckVBlankTime,*/
- NULL
-};
-
-const pem_event_action vari_bright_resume_tasks[] = {
- /* PEM_Task_VariBright_Resume,*/
- NULL
-};
-
-const pem_event_action notify_hw_power_source_tasks[] = {
- pem_task_notify_hw_of_power_source,
- NULL
-};
-
-const pem_event_action process_vbios_event_info_tasks[] = {
- /* PEM_Task_ProcessVbiosEventInfo,*/
- NULL
-};
-
-const pem_event_action enable_gfx_clock_gating_tasks[] = {
- pem_task_enable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action enable_gfx_voltage_island_power_gating_tasks[] = {
- pem_task_enable_voltage_island_power_gating,
- NULL
-};
-
-const pem_event_action reset_clock_gating_tasks[] = {
- /* PEM_Task_ResetClockGating*/
- NULL
-};
-
-const pem_event_action notify_smu_vpu_recovery_end_tasks[] = {
- /* PEM_Task_NotifySmuVPURecoveryEnd,*/
- NULL
-};
-
-const pem_event_action disable_vpu_cap_tasks[] = {
- /* PEM_Task_DisableVPUCap,*/
- NULL
-};
-
-const pem_event_action execute_escape_sequence_tasks[] = {
- /* PEM_Task_ExecuteEscapesequence,*/
- NULL
-};
-
-const pem_event_action notify_power_state_change_tasks[] = {
- pem_task_notify_power_state_change,
- NULL
-};
-
-const pem_event_action enable_cgpg_tasks[] = {
- pem_task_enable_cgpg,
- NULL
-};
-
-const pem_event_action disable_cgpg_tasks[] = {
- pem_task_disable_cgpg,
- NULL
-};
-
-const pem_event_action enable_user_2d_performance_tasks[] = {
- /* PEM_Task_SetUser2DPerformanceFlag,*/
- /* PEM_Task_UpdateUser2DPerformanceEnableEvents,*/
- NULL
-};
-
-const pem_event_action add_user_2d_performance_state_tasks[] = {
- /* PEM_Task_Get2DPerformanceTemplate,*/
- /* PEM_Task_AllocateNewPowerStateMemory,*/
- /* PEM_Task_CopyNewPowerStateInfo,*/
- /* PEM_Task_UpdateNewPowerStateClocks,*/
- /* PEM_Task_UpdateNewPowerStateUser2DPerformanceFlag,*/
- /* PEM_Task_AddPowerState,*/
- /* PEM_Task_ReleaseNewPowerStateMemory,*/
- NULL
-};
-
-const pem_event_action delete_user_2d_performance_state_tasks[] = {
- /* PEM_Task_GetCurrentUser2DPerformanceStateID,*/
- /* PEM_Task_DeletePowerState,*/
- /* PEM_Task_SetCurrentUser2DPerformanceStateID,*/
- NULL
-};
-
-const pem_event_action disable_user_2d_performance_tasks[] = {
- /* PEM_Task_ResetUser2DPerformanceFlag,*/
- /* PEM_Task_UpdateUser2DPerformanceDisableEvents,*/
- NULL
-};
-
-const pem_event_action enable_stutter_mode_tasks[] = {
- pem_task_enable_stutter_mode,
- NULL
-};
-
-const pem_event_action enable_disable_bapm_tasks[] = {
- /*PEM_Task_EnableDisableBAPM,*/
- NULL
-};
-
-const pem_event_action reset_boot_state_tasks[] = {
- pem_task_reset_boot_state,
- NULL
-};
-
-const pem_event_action create_new_user_performance_state_tasks[] = {
- pem_task_create_user_performance_state,
- NULL
-};
-
-const pem_event_action initialize_thermal_controller_tasks[] = {
- pem_task_initialize_thermal_controller,
- NULL
-};
-
-const pem_event_action uninitialize_thermal_controller_tasks[] = {
- pem_task_uninitialize_thermal_controller,
- NULL
-};
-
-const pem_event_action set_cpu_power_state[] = {
- pem_task_set_cpu_power_state,
- NULL
-}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
deleted file mode 100644
index 7714cb927428..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENT_SUB_CHAINS_H_
-#define _EVENT_SUB_CHAINS_H_
-
-#include "eventmgr.h"
-
-extern const pem_event_action reset_display_phy_access_tasks[];
-extern const pem_event_action broadcast_power_policy_tasks[];
-extern const pem_event_action unregister_interrupt_tasks[];
-extern const pem_event_action disable_GFX_voltage_island_powergating_tasks[];
-extern const pem_event_action disable_GFX_clockgating_tasks[];
-extern const pem_event_action block_adjust_power_state_tasks[];
-extern const pem_event_action unblock_adjust_power_state_tasks[];
-extern const pem_event_action set_performance_state_tasks[];
-extern const pem_event_action get_2D_performance_state_tasks[];
-extern const pem_event_action conditionally_force3D_performance_state_tasks[];
-extern const pem_event_action process_vbios_eventinfo_tasks[];
-extern const pem_event_action enable_dynamic_state_management_tasks[];
-extern const pem_event_action enable_clock_power_gatings_tasks[];
-extern const pem_event_action conditionally_force3D_performance_state_tasks[];
-extern const pem_event_action setup_asic_tasks[];
-extern const pem_event_action power_budget_tasks[];
-extern const pem_event_action system_config_tasks[];
-extern const pem_event_action get_2d_performance_state_tasks[];
-extern const pem_event_action conditionally_force_3d_performance_state_tasks[];
-extern const pem_event_action ungate_all_display_phys_tasks[];
-extern const pem_event_action uninitialize_display_phy_access_tasks[];
-extern const pem_event_action disable_gfx_voltage_island_power_gating_tasks[];
-extern const pem_event_action disable_gfx_clock_gating_tasks[];
-extern const pem_event_action set_boot_state_tasks[];
-extern const pem_event_action adjust_power_state_tasks[];
-extern const pem_event_action disable_dynamic_state_management_tasks[];
-extern const pem_event_action disable_clock_power_gatings_tasks[];
-extern const pem_event_action cleanup_asic_tasks[];
-extern const pem_event_action prepare_for_pnp_stop_tasks[];
-extern const pem_event_action set_power_source_tasks[];
-extern const pem_event_action set_power_saving_state_tasks[];
-extern const pem_event_action enable_disable_fps_tasks[];
-extern const pem_event_action set_nbmcu_state_tasks[];
-extern const pem_event_action reset_hardware_dc_notification_tasks[];
-extern const pem_event_action notify_smu_suspend_tasks[];
-extern const pem_event_action disable_smc_firmware_ctf_tasks[];
-extern const pem_event_action disable_fps_tasks[];
-extern const pem_event_action vari_bright_suspend_tasks[];
-extern const pem_event_action reset_fan_speed_to_default_tasks[];
-extern const pem_event_action power_down_asic_tasks[];
-extern const pem_event_action disable_stutter_mode_tasks[];
-extern const pem_event_action set_connected_standby_tasks[];
-extern const pem_event_action block_hw_access_tasks[];
-extern const pem_event_action unblock_hw_access_tasks[];
-extern const pem_event_action resume_connected_standby_tasks[];
-extern const pem_event_action notify_smu_resume_tasks[];
-extern const pem_event_action reset_display_configCounter_tasks[];
-extern const pem_event_action update_dal_configuration_tasks[];
-extern const pem_event_action vari_bright_resume_tasks[];
-extern const pem_event_action notify_hw_power_source_tasks[];
-extern const pem_event_action process_vbios_event_info_tasks[];
-extern const pem_event_action enable_gfx_clock_gating_tasks[];
-extern const pem_event_action enable_gfx_voltage_island_power_gating_tasks[];
-extern const pem_event_action reset_clock_gating_tasks[];
-extern const pem_event_action notify_smu_vpu_recovery_end_tasks[];
-extern const pem_event_action disable_vpu_cap_tasks[];
-extern const pem_event_action execute_escape_sequence_tasks[];
-extern const pem_event_action notify_power_state_change_tasks[];
-extern const pem_event_action enable_cgpg_tasks[];
-extern const pem_event_action disable_cgpg_tasks[];
-extern const pem_event_action enable_user_2d_performance_tasks[];
-extern const pem_event_action add_user_2d_performance_state_tasks[];
-extern const pem_event_action delete_user_2d_performance_state_tasks[];
-extern const pem_event_action disable_user_2d_performance_tasks[];
-extern const pem_event_action enable_stutter_mode_tasks[];
-extern const pem_event_action enable_disable_bapm_tasks[];
-extern const pem_event_action reset_boot_state_tasks[];
-extern const pem_event_action create_new_user_performance_state_tasks[];
-extern const pem_event_action initialize_thermal_controller_tasks[];
-extern const pem_event_action uninitialize_thermal_controller_tasks[];
-extern const pem_event_action set_cpu_power_state[];
-#endif /* _EVENT_SUB_CHAINS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
deleted file mode 100644
index 8c4ebaae1e0c..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-#include "eventmanager.h"
-#include "hardwaremanager.h"
-#include "eventtasks.h"
-#include "power_state.h"
-#include "hwmgr.h"
-#include "amd_powerplay.h"
-#include "psm.h"
-
-#define TEMP_RANGE_MIN (90 * 1000)
-#define TEMP_RANGE_MAX (120 * 1000)
-
-int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
-
- if (eventmgr == NULL || eventmgr->hwmgr == NULL)
- return -EINVAL;
-
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level);
-
- return 0;
-}
-
-/* eventtasks_generic.c */
-int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct pp_hwmgr *hwmgr;
-
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- hwmgr = eventmgr->hwmgr;
- if (event_data->pnew_power_state != NULL)
- hwmgr->request_ps = event_data->pnew_power_state;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
- psm_adjust_power_state_dynamic(eventmgr, event_data->skip_state_adjust_rules);
- else
- psm_adjust_power_state_static(eventmgr, event_data->skip_state_adjust_rules);
-
- return 0;
-}
-
-int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_power_down_asic(eventmgr->hwmgr);
-}
-
-int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
- return psm_set_states(eventmgr, &(event_data->requested_state_id));
-
- return 0;
-}
-
-int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return pem_unregister_interrupts(eventmgr);
-}
-
-int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- int result;
-
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_Boot,
- &(event_data->requested_state_id)
- );
-
- if (0 == result)
- pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
- else
- pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
-
- return result;
-}
-
-int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_enable_dynamic_state_management(eventmgr->hwmgr);
-}
-
-int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_disable_dynamic_state_management(eventmgr->hwmgr);
-}
-
-int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_enable_clock_power_gatings(eventmgr->hwmgr);
-}
-
-int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_powerdown_uvd(eventmgr->hwmgr);
-}
-
-int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- phm_powergate_uvd(eventmgr->hwmgr, true);
- phm_powergate_vce(eventmgr->hwmgr, true);
- return 0;
-}
-
-int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- phm_disable_clock_power_gatings(eventmgr->hwmgr);
- return 0;
-}
-
-int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_disable_smc_firmware_ctf(eventmgr->hwmgr);
-}
-
-int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_setup_asic(eventmgr->hwmgr);
-}
-
-int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_store_dal_configuration(struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config)
-{
- /* TODO */
- return 0;
- /*phm_store_dal_configuration_data(eventmgr->hwmgr, display_config) */
-}
-
-int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- return phm_display_configuration_changed(eventmgr->hwmgr);
-}
-
-int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return 0;
-}
-
-int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- return phm_notify_smc_display_config_after_ps_adjustment(eventmgr->hwmgr);
-}
-
-int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- eventmgr->block_adjust_power_state = true;
- /* to do PHM_ResetIPSCounter(pEventMgr->pHwMgr);*/
- return 0;
-}
-
-int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- eventmgr->block_adjust_power_state = false;
- return 0;
-}
-
-int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_set_cpu_power_state(eventmgr->hwmgr);
-}
-
-/*powersaving*/
-
-int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_clock_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-
-int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-
-/* performance */
-int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
- return psm_set_states(eventmgr, &(event_data->requested_state_id));
-
- return 0;
-}
-
-int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- int result;
-
- if (eventmgr->features[PP_Feature_PowerPlay].supported &&
- !(eventmgr->features[PP_Feature_PowerPlay].enabled))
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_Boot,
- &(event_data->requested_state_id));
- else if (eventmgr->features[PP_Feature_User2DPerformance].enabled)
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_User2DPerformance,
- &(event_data->requested_state_id));
- else
- result = psm_get_ui_state(eventmgr, PP_StateUILabel_Performance,
- &(event_data->requested_state_id));
-
- if (0 == result)
- pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
- else
- pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
-
- return result;
-}
-
-int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
-restart_search:
- for (i = 0; i < table_entries; i++) {
- if (state->classification.ui_label & event_data->requested_ui_label) {
- event_data->pnew_power_state = state;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
-
- switch (event_data->requested_ui_label) {
- case PP_StateUILabel_Battery:
- case PP_StateUILabel_Balanced:
- event_data->requested_ui_label = PP_StateUILabel_Performance;
- goto restart_search;
- default:
- break;
- }
- return -1;
-}
-
-int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct PP_TemperatureRange range;
-
- range.max = TEMP_RANGE_MAX;
- range.min = TEMP_RANGE_MIN;
-
- if (eventmgr == NULL || eventmgr->platform_descriptor == NULL)
- return -EINVAL;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ThermalController))
- return phm_start_thermal_controller(eventmgr->hwmgr, &range);
-
- return 0;
-}
-
-int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_stop_thermal_controller(eventmgr->hwmgr);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
deleted file mode 100644
index 37e7ca5a58e0..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENT_TASKS_H_
-#define _EVENT_TASKS_H_
-#include "eventmgr.h"
-
-struct amd_display_configuration;
-
-/* eventtasks_generic.c */
-int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_store_dal_configuration (struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config);
-int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-/*powersaving*/
-
-int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-
-/* performance */
-int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-/*thermal */
-int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-
-#endif /* _EVENT_TASKS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
deleted file mode 100644
index 489908887e9c..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "psm.h"
-
-int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->classification.ui_label & ui_label) {
- *state_id = state->id;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->classification.flags & flag) {
- *state_id = state->id;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
-
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->id == *state_id) {
- memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
-{
-
- struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
- struct pp_hwmgr *hwmgr;
- bool equal;
-
- if (skip)
- return 0;
-
- hwmgr = eventmgr->hwmgr;
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- if (requested == NULL)
- return 0;
-
- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
-
- if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
- equal = false;
-
- if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
- phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
- memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
- }
- return 0;
-}
-
-int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index d13fdadbbf9e..824fb6fe54ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -3,14 +3,15 @@
# Makefile for the 'hw manager' sub-component of powerplay.
# It provides the hardware management services for the driver.
-HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
+HARDWARE_MGR = hwmgr.o processpptables.o \
hardwaremanager.o pp_acpi.o cz_hwmgr.o \
cz_clockpowergating.o pppcielanes.o\
process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
smu7_clockpowergating.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
- vega10_thermal.o pp_overdriver.o rv_hwmgr.o
+ vega10_thermal.o rv_hwmgr.o pp_psm.o\
+ pp_overdriver.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b33935fcf428..44de0874629f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -103,16 +103,6 @@ int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
-{
- return 0;
-}
-
-static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
-{
- return 0;
-}
-
int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -123,12 +113,12 @@ int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
PHM_PlatformCaps_UVDDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= UVD_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
return 0;
@@ -144,12 +134,12 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
PHM_PlatformCaps_VCEDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= VCE_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
@@ -157,7 +147,7 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
}
-int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -183,10 +173,9 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cz_dpm_update_uvd_dpm(hwmgr, false);
}
- return 0;
}
-int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -215,29 +204,6 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_CG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
- return 0;
}
-
- return 0;
}
-
-static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
- /*we don't need an exit table here, because there is only D3 cold on Kv*/
- {
- .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating,
- .tableFunction = cz_tf_uvd_power_gating_initialize
- },
- {
- .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating,
- .tableFunction = cz_tf_vce_power_gating_initialize
- },
- /* to do { NULL, cz_tf_xdma_power_gating_enable }, */
- { }
-};
-
-const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_enable_clock_power_gatings_list
-};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
index 1954ceaed439..92f707bc46e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
@@ -29,8 +29,8 @@
extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index bc839ff0bdd0..ad1f6b57884b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -162,8 +162,8 @@ static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (cz_hwmgr->max_sclk_level == 0) {
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxSclkLevel);
- cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr->smumgr) + 1;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
+ cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
}
return cz_hwmgr->max_sclk_level;
@@ -440,14 +440,7 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_tf_reset_active_process_mask(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
-{
- return 0;
-}
-
-static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
{
struct SMU8_Fusion_ClkTable *clock_table;
int ret;
@@ -469,7 +462,7 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
if (!hwmgr->need_pp_table_upload)
return 0;
- ret = smum_download_powerplay_table(hwmgr->smumgr, &table);
+ ret = smum_download_powerplay_table(hwmgr, &table);
PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
"Fail to get clock table from SMU!", return -EINVAL;);
@@ -561,13 +554,12 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
(uint8_t)dividers.pll_post_divider;
}
- ret = smum_upload_powerplay_table(hwmgr->smumgr);
+ ret = smum_upload_powerplay_table(hwmgr);
return ret;
}
-static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -593,8 +585,7 @@ static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_uvd_clock_voltage_dependency_table *table =
@@ -607,8 +598,8 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->uvd_dpm.soft_min_clk = 0;
cz_hwmgr->uvd_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxUvdLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].vclk;
@@ -621,8 +612,7 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_vce_clock_voltage_dependency_table *table =
@@ -635,8 +625,8 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->vce_dpm.soft_min_clk = 0;
cz_hwmgr->vce_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxEclkLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].ecclk;
@@ -649,8 +639,7 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_acp_clock_voltage_dependency_table *table =
@@ -663,8 +652,8 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->acp_dpm.soft_min_clk = 0;
cz_hwmgr->acp_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxAclkLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].acpclk;
@@ -676,8 +665,7 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -686,22 +674,16 @@ static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->samu_power_gated = false;
cz_hwmgr->acp_power_gated = false;
cz_hwmgr->pgacpinit = true;
-
- return 0;
}
-static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->low_sclk_interrupt_threshold = 0;
-
- return 0;
}
-static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+
+static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -722,12 +704,12 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
clock = hwmgr->display_config.min_core_set_clock;
if (clock == 0)
- pr_info("min_core_set_clock not set\n");
+ pr_debug("min_core_set_clock not set\n");
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkHardMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.hard_min_clk,
@@ -753,7 +735,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
cz_hwmgr->sclk_dpm.soft_min_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
@@ -764,7 +746,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_StablePState) &&
cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
cz_hwmgr->sclk_dpm.soft_max_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -774,9 +756,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
@@ -786,7 +766,7 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepSclk,
clks);
}
@@ -794,77 +774,84 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr =
(struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
cz_hwmgr->sclk_dpm.soft_max_clk);
return 0;
}
-static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
{
+ struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+ if (hw_data->is_nb_dpm_enabled) {
+ if (enable) {
+ PP_DBG_LOG("enable Low Memory PState.\n");
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableLowMemoryPstate,
+ (lock ? 1 : 0));
+ } else {
+ PP_DBG_LOG("disable Low Memory PState.\n");
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableLowMemoryPstate,
+ (lock ? 1 : 0));
+ }
+ }
+
return 0;
}
-
-static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
unsigned long dpm_features = 0;
- if (!cz_hwmgr->is_nb_dpm_enabled) {
- PP_DBG_LOG("enabling ALL SMU features.\n");
+ if (cz_hwmgr->is_nb_dpm_enabled) {
+ cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_EnableAllSmuFeatures,
+ hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures,
dpm_features);
if (ret == 0)
- cz_hwmgr->is_nb_dpm_enabled = true;
+ cz_hwmgr->is_nb_dpm_enabled = false;
}
return ret;
}
-static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
+static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
- if (hw_data->is_nb_dpm_enabled) {
- if (enable) {
- PP_DBG_LOG("enable Low Memory PState.\n");
+ int ret = 0;
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_EnableLowMemoryPstate,
- (lock ? 1 : 0));
- } else {
- PP_DBG_LOG("disable Low Memory PState.\n");
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ unsigned long dpm_features = 0;
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_DisableLowMemoryPstate,
- (lock ? 1 : 0));
- }
+ if (!cz_hwmgr->is_nb_dpm_enabled) {
+ PP_DBG_LOG("enabling ALL SMU features.\n");
+ dpm_features |= NB_DPM_MASK;
+ ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features);
+ if (ret == 0)
+ cz_hwmgr->is_nb_dpm_enabled = true;
}
- return 0;
+ return ret;
}
-static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
{
bool disable_switch;
bool enable_low_mem_state;
@@ -886,64 +873,64 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
return 0;
}
-static const struct phm_master_table_item cz_set_power_state_list[] = {
- { .tableFunction = cz_tf_update_sclk_limit },
- { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold },
- { .tableFunction = cz_tf_set_watermark_threshold },
- { .tableFunction = cz_tf_set_enabled_levels },
- { .tableFunction = cz_tf_enable_nb_dpm },
- { .tableFunction = cz_tf_update_low_mem_pstate },
- { }
-};
+static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ int ret = 0;
-static const struct phm_master_table_header cz_set_power_state_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_set_power_state_list
-};
+ cz_update_sclk_limit(hwmgr);
+ cz_set_deep_sleep_sclk_threshold(hwmgr);
+ cz_set_watermark_threshold(hwmgr);
+ ret = cz_enable_nb_dpm(hwmgr);
+ if (ret)
+ return ret;
+ cz_update_low_mem_pstate(hwmgr, input);
-static const struct phm_master_table_item cz_setup_asic_list[] = {
- { .tableFunction = cz_tf_reset_active_process_mask },
- { .tableFunction = cz_tf_upload_pptable_to_smu },
- { .tableFunction = cz_tf_init_sclk_limit },
- { .tableFunction = cz_tf_init_uvd_limit },
- { .tableFunction = cz_tf_init_vce_limit },
- { .tableFunction = cz_tf_init_acp_limit },
- { .tableFunction = cz_tf_init_power_gate_state },
- { .tableFunction = cz_tf_init_sclk_threshold },
- { }
+ return 0;
};
-static const struct phm_master_table_header cz_setup_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_setup_asic_list
-};
-static int cz_tf_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+
+ ret = cz_upload_pptable_to_smu(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_sclk_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_uvd_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_vce_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_acp_limit(hwmgr);
+ if (ret)
+ return ret;
+
+ cz_init_power_gate_state(hwmgr);
+ cz_init_sclk_threshold(hwmgr);
+
+ return 0;
+}
+
+static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
hw_data->disp_clk_bypass_pending = false;
hw_data->disp_clk_bypass = false;
-
- return 0;
}
-static int cz_tf_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
- hw_data->is_nb_dpm_enabled = false;
- return 0;
+ hw_data->is_nb_dpm_enabled = false;
}
-static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
@@ -951,63 +938,68 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
hw_data->cc6_settings.cpu_pstate_separation_time = 0;
hw_data->cc6_settings.cpu_cc6_disable = false;
hw_data->cc6_settings.cpu_pstate_disable = false;
-
- return 0;
}
-static const struct phm_master_table_item cz_power_down_asic_list[] = {
- { .tableFunction = cz_tf_power_up_display_clock_sys_pll },
- { .tableFunction = cz_tf_clear_nb_dpm_flag },
- { .tableFunction = cz_tf_reset_cc6_data },
- { }
-};
-
-static const struct phm_master_table_header cz_power_down_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_power_down_asic_list
+static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ cz_power_up_display_clock_sys_pll(hwmgr);
+ cz_clear_nb_dpm_flag(hwmgr);
+ cz_reset_cc6_data(hwmgr);
+ return 0;
};
-static int cz_tf_program_voting_clients(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
{
PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
- return 0;
}
-static int cz_tf_start_dpm(struct pp_hwmgr *hwmgr, void *input, void *output,
- void *storage, int result)
+static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int cz_start_dpm(struct pp_hwmgr *hwmgr)
{
- int res = 0xff;
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- unsigned long dpm_features = 0;
cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
- dpm_features |= SCLK_DPM_MASK;
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- dpm_features);
+ SCLK_DPM_MASK);
+}
+
+static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ unsigned long dpm_features = 0;
- return res;
+ if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
+ dpm_features |= SCLK_DPM_MASK;
+ cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features);
+ }
+ return ret;
}
-static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1016,13 +1008,11 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->acp_boot_level = 0xff;
- return 0;
}
static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
@@ -1031,67 +1021,52 @@ static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
int result;
unsigned long features;
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetFeatureStatus, 0);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
if (result == 0) {
- features = smum_get_argument(hwmgr->smumgr);
+ features = smum_get_argument(hwmgr);
if (features & check_feature)
return true;
}
- return result;
+ return false;
}
-static int cz_tf_check_for_dpm_disabled(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr)
{
if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return PP_Result_TableImmediateExit;
- return 0;
+ return true;
+ return false;
}
-static int cz_tf_enable_didt(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- /* TO DO */
- return 0;
-}
+ if (!cz_check_for_dpm_enabled(hwmgr)) {
+ pr_info("dpm has been disabled\n");
+ return 0;
+ }
+ cz_disable_nb_dpm(hwmgr);
-static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
-{
- if (!cz_dpm_check_smu_features(hwmgr,
- SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return PP_Result_TableImmediateExit;
- return 0;
-}
+ cz_clear_voting_clients(hwmgr);
+ if (cz_stop_dpm(hwmgr))
+ return -EINVAL;
-static const struct phm_master_table_item cz_disable_dpm_list[] = {
- { .tableFunction = cz_tf_check_for_dpm_enabled },
- { },
+ return 0;
};
+static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ if (cz_check_for_dpm_enabled(hwmgr)) {
+ pr_info("dpm has been enabled\n");
+ return 0;
+ }
-static const struct phm_master_table_header cz_disable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_disable_dpm_list
-};
-
-static const struct phm_master_table_item cz_enable_dpm_list[] = {
- { .tableFunction = cz_tf_check_for_dpm_disabled },
- { .tableFunction = cz_tf_program_voting_clients },
- { .tableFunction = cz_tf_start_dpm },
- { .tableFunction = cz_tf_program_bootup_state },
- { .tableFunction = cz_tf_enable_didt },
- { .tableFunction = cz_tf_reset_acp_boot_level },
- { },
-};
+ cz_program_voting_clients(hwmgr);
+ if (cz_start_dpm(hwmgr))
+ return -EINVAL;
+ cz_program_bootup_state(hwmgr);
+ cz_reset_acp_boot_level(hwmgr);
-static const struct phm_master_table_header cz_enable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_enable_dpm_list
+ return 0;
};
static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
@@ -1138,7 +1113,11 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cz_ps->action = cz_current_ps->action;
- if (!force_high && (cz_ps->action == FORCE_HIGH))
+ if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
+ else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
+ cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
+ else if (!force_high && (cz_ps->action == FORCE_HIGH))
cz_ps->action = CANCEL_FORCE_HIGH;
else if (force_high && (cz_ps->action != FORCE_HIGH))
cz_ps->action = FORCE_HIGH;
@@ -1173,62 +1152,16 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
cz_construct_boot_state(hwmgr);
- result = phm_construct_table(hwmgr, &cz_setup_asic_master,
- &(hwmgr->setup_asic));
- if (result != 0) {
- pr_err("Fail to construct setup ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &cz_power_down_asic_master,
- &(hwmgr->power_down_asic));
- if (result != 0) {
- pr_err("Fail to construct power down ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &cz_disable_dpm_master,
- &(hwmgr->disable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to disable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &cz_enable_dpm_master,
- &(hwmgr->enable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to enable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &cz_set_power_state_master,
- &(hwmgr->set_power_state));
- if (result != 0) {
- pr_err("Fail to construct set_power_state\n");
- return result;
- }
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
- result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
- if (result != 0) {
- pr_err("Fail to construct enable_clock_power_gatings\n");
- return result;
- }
return result;
}
static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
if (hwmgr != NULL) {
- phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings));
- phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
- phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
- phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
-
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -1240,13 +1173,13 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1278,13 +1211,13 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
cz_hwmgr->sclk_dpm.soft_max_clk = clock;
cz_hwmgr->sclk_dpm.hard_max_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1297,13 +1230,13 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMax));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
@@ -1312,106 +1245,25 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
-{
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- sclk,
- PPSMC_MSG_SetSclkSoftMin));
-
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- sclk,
- PPSMC_MSG_SetSclkSoftMax));
- return 0;
-}
-
-static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
-{
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
- int32_t tmp_sclk;
- int32_t count;
-
- tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
-
- for (count = table->count-1; count >= 0; count--) {
- if (tmp_sclk >= table->entries[count].clk) {
- tmp_sclk = table->entries[count].clk;
- *sclk = tmp_sclk;
- break;
- }
- }
- if (count < 0)
- *sclk = table->entries[0].clk;
-
- return 0;
-}
-
static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
- uint32_t sclk = 0;
int ret = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = cz_phm_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = cz_phm_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = cz_phm_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = cz_get_profiling_clk(hwmgr, &sclk);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
- cz_phm_force_dpm_sclk(hwmgr, sclk);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -1422,27 +1274,18 @@ static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_UVDPowerOFF);
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
return 0;
}
int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDynamicPowerGating)) {
- return smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 1);
- } else {
- return smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_UVDPowerON, 0);
- }
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
+ return smum_send_msg_to_smc_with_parameter(
+ hwmgr,
+ PPSMC_MSG_UVDPowerON,
+ PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
}
return 0;
@@ -1456,16 +1299,16 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
if (!bgate) {
/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+ hwmgr->en_umd_pstate) {
cz_hwmgr->uvd_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].vclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetUvdHardMin,
- cz_get_uvd_level(hwmgr,
- cz_hwmgr->uvd_dpm.hard_min_clk,
- PPSMC_MSG_SetUvdHardMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUvdHardMin,
+ cz_get_uvd_level(hwmgr,
+ cz_hwmgr->uvd_dpm.hard_min_clk,
+ PPSMC_MSG_SetUvdHardMin));
cz_enable_disable_uvd_dpm(hwmgr, true);
} else {
@@ -1485,32 +1328,32 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState) ||
+ hwmgr->en_umd_pstate) {
cz_hwmgr->vce_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
} else {
/*Program HardMin based on the vce_arbiter.ecclk */
if (hwmgr->vce_arbiter.ecclk == 0) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin, 0);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkSoftMin, 1);
} else {
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
}
}
return 0;
@@ -1518,30 +1361,28 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
-static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
return cz_hwmgr->sys_info.bootup_uma_clock;
}
-static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct cz_power_state *cz_ps;
@@ -1679,7 +1520,7 @@ static void cz_hw_print_display_cfg(
PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
data);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplaySizePowerParams,
data);
}
@@ -1744,10 +1585,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
mask);
break;
@@ -1989,7 +1830,7 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
if (0 == result) {
activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
@@ -2012,10 +1853,36 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
}
+static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrHiVirtual,
+ mc_addr_hi);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrLoVirtual,
+ mc_addr_low);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrHiPhysical,
+ virtual_addr_hi);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramAddrLoPhysical,
+ virtual_addr_low);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramBufferSize,
+ size);
+ return 0;
+}
+
+
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
- .asic_setup = NULL,
.apply_state_adjust_rules = cz_apply_state_adjust_rules,
.force_dpm_level = cz_dpm_force_dpm_level,
.get_power_state_size = cz_get_power_state_size,
@@ -2036,7 +1903,14 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
.get_clock_by_type = cz_get_clock_by_type,
.get_max_high_clocks = cz_get_max_high_clocks,
+ .get_temperature = cz_thermal_get_temperature,
.read_sensor = cz_read_sensor,
+ .power_off_asic = cz_power_off_asic,
+ .asic_setup = cz_setup_asic_task,
+ .dynamic_state_management_enable = cz_enable_dpm_tasks,
+ .power_state_set = cz_set_power_state_tasks,
+ .dynamic_state_management_disable = cz_disable_dpm_tasks,
+ .notify_cac_buffer_info = cz_notify_cac_buffer_info,
};
int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
deleted file mode 100644
index bc7d8bd7e7cb..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "hwmgr.h"
-
-static int phm_run_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input,
- void *output,
- void *temp_storage)
-{
- int result = 0;
- phm_table_function *function;
-
- if (rt_table->function_list == NULL) {
- pr_debug("this function not implement!\n");
- return 0;
- }
-
- for (function = rt_table->function_list; NULL != *function; function++) {
- int tmp = (*function)(hwmgr, input, output, temp_storage, result);
-
- if (tmp == PP_Result_TableImmediateExit)
- break;
- if (tmp) {
- if (0 == result)
- result = tmp;
- if (rt_table->exit_error)
- break;
- }
- }
-
- return result;
-}
-
-int phm_dispatch_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input, void *output)
-{
- int result;
- void *temp_storage;
-
- if (hwmgr == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter!\n");
- return -EINVAL;
- }
-
- if (0 != rt_table->storage_size) {
- temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL);
- if (temp_storage == NULL) {
- pr_err("Could not allocate table temporary storage\n");
- return -ENOMEM;
- }
- } else {
- temp_storage = NULL;
- }
-
- result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
-
- kfree(temp_storage);
-
- return result;
-}
-
-int phm_construct_table(struct pp_hwmgr *hwmgr,
- const struct phm_master_table_header *master_table,
- struct phm_runtime_table_header *rt_table)
-{
- uint32_t function_count = 0;
- const struct phm_master_table_item *table_item;
- uint32_t size;
- phm_table_function *run_time_list;
- phm_table_function *rtf;
-
- if (hwmgr == NULL || master_table == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter!\n");
- return -EINVAL;
- }
-
- for (table_item = master_table->master_list;
- NULL != table_item->tableFunction; table_item++) {
- if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
- (table_item->isFunctionNeededInRuntimeTable(hwmgr)))
- function_count++;
- }
-
- size = (function_count + 1) * sizeof(phm_table_function);
- run_time_list = kzalloc(size, GFP_KERNEL);
-
- if (NULL == run_time_list)
- return -ENOMEM;
-
- rtf = run_time_list;
- for (table_item = master_table->master_list;
- NULL != table_item->tableFunction; table_item++) {
- if ((rtf - run_time_list) > function_count) {
- pr_err("Check function results have changed\n");
- kfree(run_time_list);
- return -EINVAL;
- }
-
- if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
- (table_item->isFunctionNeededInRuntimeTable(hwmgr))) {
- *(rtf++) = table_item->tableFunction;
- }
- }
-
- if ((rtf - run_time_list) > function_count) {
- pr_err("Check function results have changed\n");
- kfree(run_time_list);
- return -EINVAL;
- }
-
- *rtf = NULL;
- rt_table->function_list = run_time_list;
- rt_table->exit_error = (0 != (master_table->flags & PHM_MasterTableFlag_ExitOnError));
- rt_table->storage_size = master_table->storage_size;
- return 0;
-}
-
-int phm_destroy_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table)
-{
- if (hwmgr == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter\n");
- return -EINVAL;
- }
-
- if (NULL == rt_table->function_list)
- return 0;
-
- kfree(rt_table->function_list);
-
- rt_table->function_list = NULL;
- rt_table->storage_size = 0;
- rt_table->exit_error = false;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index fcc722ea7649..623cff90233d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -26,35 +26,22 @@
#include "hardwaremanager.h"
#include "power_state.h"
+
+#define TEMP_RANGE_MIN (0)
+#define TEMP_RANGE_MAX (80 * 1000)
+
#define PHM_FUNC_CHECK(hw) \
do { \
if ((hw) == NULL || (hw)->hwmgr_func == NULL) \
return -EINVAL; \
} while (0)
-bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
-{
- return hwmgr->block_hw_access;
-}
-
-int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block)
-{
- hwmgr->block_hw_access = block;
- return 0;
-}
-
int phm_setup_asic(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->asic_setup)
- return hwmgr->hwmgr_func->asic_setup(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->asic_setup)
+ return hwmgr->hwmgr_func->asic_setup(hwmgr);
return 0;
}
@@ -63,14 +50,8 @@ int phm_power_down_asic(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->power_off_asic)
- return hwmgr->hwmgr_func->power_off_asic(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->power_down_asic),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->power_off_asic)
+ return hwmgr->hwmgr_func->power_off_asic(hwmgr);
return 0;
}
@@ -86,13 +67,8 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
states.pcurrent_state = pcurrent_state;
states.pnew_state = pnew_power_state;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->power_state_set)
- return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->power_state_set)
+ return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
return 0;
}
@@ -103,15 +79,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
bool enabled;
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
- ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
- } else {
- ret = phm_dispatch_table(hwmgr,
- &(hwmgr->enable_dynamic_state_management),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
enabled = ret == 0;
@@ -127,15 +96,8 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (hwmgr->hwmgr_func->dynamic_state_management_disable)
- ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
- } else {
- ret = phm_dispatch_table(hwmgr,
- &(hwmgr->disable_dynamic_state_management),
- NULL, NULL);
- }
+ if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
enabled = ret == 0 ? false : true;
@@ -193,35 +155,13 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powergate_uvd != NULL)
- return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- return 0;
-}
-
-int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powergate_vce != NULL)
- return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- return 0;
-}
-
int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
- return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
+ return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
+
return 0;
}
@@ -229,11 +169,9 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
- return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
- }
+ if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
+ return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
+
return 0;
}
@@ -242,12 +180,9 @@ int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->display_config_changed)
- hwmgr->hwmgr_func->display_config_changed(hwmgr);
- } else
- return phm_dispatch_table(hwmgr, &hwmgr->display_configuration_changed, NULL, NULL);
+ if (NULL != hwmgr->hwmgr_func->display_config_changed)
+ hwmgr->hwmgr_func->display_config_changed(hwmgr);
+
return 0;
}
@@ -255,9 +190,7 @@ int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface))
- if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
+ if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
return 0;
@@ -277,10 +210,10 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
{
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->register_internal_thermal_interrupt == NULL)
- return -EINVAL;
+ if (hwmgr->hwmgr_func->register_internal_thermal_interrupt != NULL)
+ return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
- return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
+ return 0;
}
/**
@@ -292,7 +225,21 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
*/
int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range)
{
- return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), temperature_range, NULL);
+ struct PP_TemperatureRange range;
+
+ if (temperature_range == NULL) {
+ range.max = TEMP_RANGE_MAX;
+ range.min = TEMP_RANGE_MIN;
+ } else {
+ range.max = temperature_range->max;
+ range.min = temperature_range->min;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController)
+ && hwmgr->hwmgr_func->start_thermal_controller != NULL)
+ return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
+
+ return 0;
}
@@ -323,6 +270,9 @@ int phm_check_states_equal(struct pp_hwmgr *hwmgr,
int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
const struct amd_pp_display_configuration *display_config)
{
+ int index = 0;
+ int number_of_active_display = 0;
+
PHM_FUNC_CHECK(hwmgr);
if (display_config == NULL)
@@ -330,6 +280,17 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
hwmgr->display_config = *display_config;
+ if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
+ hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk);
+
+ for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) {
+ if (hwmgr->display_config.displays[index].controller_id != 0)
+ number_of_active_display++;
+ }
+
+ if (NULL != hwmgr->hwmgr_func->set_active_display_count)
+ hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
+
if (hwmgr->hwmgr_func->store_cc6_data == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 9547f265a8bb..ce59e0e67cb2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -26,8 +26,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/pci.h>
#include <drm/amdgpu_drm.h>
-#include "cgs_common.h"
#include "power_state.h"
#include "hwmgr.h"
#include "pppcielanes.h"
@@ -35,21 +35,100 @@
#include "ppsmc.h"
#include "pp_acpi.h"
#include "amd_acpi.h"
+#include "pp_psm.h"
-extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
+extern const struct pp_smumgr_func ci_smu_funcs;
+extern const struct pp_smumgr_func cz_smu_funcs;
+extern const struct pp_smumgr_func iceland_smu_funcs;
+extern const struct pp_smumgr_func tonga_smu_funcs;
+extern const struct pp_smumgr_func fiji_smu_funcs;
+extern const struct pp_smumgr_func polaris10_smu_funcs;
+extern const struct pp_smumgr_func vega10_smu_funcs;
+extern const struct pp_smumgr_func rv_smu_funcs;
+extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
uint8_t convert_to_vid(uint16_t vddc)
{
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
}
+static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr,
+ struct cgs_system_info *sys_info)
+{
+ sys_info->size = sizeof(struct cgs_system_info);
+ sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN;
+
+ return cgs_query_system_info(hwmgr->device, sys_info);
+}
+
+static int phm_thermal_l2h_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static int phm_thermal_h2l_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static int phm_ctf_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static const struct cgs_irq_src_funcs thermal_irq_src[3] = {
+ { .handler = phm_thermal_l2h_irq },
+ { .handler = phm_thermal_h2l_irq },
+ { .handler = phm_ctf_irq }
+};
+
int hwmgr_early_init(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
@@ -62,7 +141,6 @@ int hwmgr_early_init(struct pp_instance *handle)
return -ENOMEM;
handle->hwmgr = hwmgr;
- hwmgr->smumgr = handle->smu_mgr;
hwmgr->device = handle->device;
hwmgr->chip_family = handle->chip_family;
hwmgr->chip_id = handle->chip_id;
@@ -73,24 +151,38 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
+ hwmgr->fan_ctrl_is_in_default_mode = true;
+ hwmgr->reload_fw = 1;
switch (hwmgr->chip_family) {
+ case AMDGPU_FAMILY_CI:
+ hwmgr->smumgr_funcs = &ci_smu_funcs;
+ ci_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
+ hwmgr->pp_table_version = PP_TABLE_V0;
+ smu7_init_function_pointers(hwmgr);
+ break;
case AMDGPU_FAMILY_CZ:
+ hwmgr->smumgr_funcs = &cz_smu_funcs;
cz_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
+ hwmgr->smumgr_funcs = &iceland_smu_funcs;
topaz_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
break;
case CHIP_TONGA:
+ hwmgr->smumgr_funcs = &tonga_smu_funcs;
tonga_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
break;
case CHIP_FIJI:
+ hwmgr->smumgr_funcs = &fiji_smu_funcs;
fiji_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
@@ -98,6 +190,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
+ hwmgr->smumgr_funcs = &polaris10_smu_funcs;
polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
@@ -109,6 +202,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
default:
@@ -118,6 +212,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case AMDGPU_FAMILY_RV:
switch (hwmgr->chip_id) {
case CHIP_RAVEN:
+ hwmgr->smumgr_funcs = &rv_smu_funcs;
rv_init_function_pointers(hwmgr);
break;
default:
@@ -131,80 +226,6 @@ int hwmgr_early_init(struct pp_instance *handle)
return 0;
}
-static int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- unsigned int i;
- unsigned int table_entries;
- struct pp_power_state *state;
- int size;
-
- if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
- return -EINVAL;
-
- if (hwmgr->hwmgr_func->get_power_state_size == NULL)
- return -EINVAL;
-
- hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
-
- hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
- sizeof(struct pp_power_state);
-
- hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
- if (hwmgr->ps == NULL)
- return -ENOMEM;
-
- hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->request_ps == NULL) {
- kfree(hwmgr->ps);
- hwmgr->ps = NULL;
- return -ENOMEM;
- }
-
- hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->current_ps == NULL) {
- kfree(hwmgr->request_ps);
- kfree(hwmgr->ps);
- hwmgr->request_ps = NULL;
- hwmgr->ps = NULL;
- return -ENOMEM;
- }
-
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
-
- if (state->classification.flags & PP_StateClassificationFlag_Boot) {
- hwmgr->boot_ps = state;
- memcpy(hwmgr->current_ps, state, size);
- memcpy(hwmgr->request_ps, state, size);
- }
-
- state->id = i + 1; /* assigned unique num for every power state id */
-
- if (state->classification.flags & PP_StateClassificationFlag_Uvd)
- hwmgr->uvd_ps = state;
- state = (struct pp_power_state *)((unsigned long)state + size);
- }
-
- return 0;
-}
-
-static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr == NULL)
- return -EINVAL;
-
- kfree(hwmgr->current_ps);
- kfree(hwmgr->request_ps);
- kfree(hwmgr->ps);
- hwmgr->request_ps = NULL;
- hwmgr->ps = NULL;
- hwmgr->current_ps = NULL;
- return 0;
-}
-
int hwmgr_hw_init(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
@@ -228,9 +249,26 @@ int hwmgr_hw_init(struct pp_instance *handle)
if (ret)
goto err1;
- ret = hw_init_power_state_table(hwmgr);
+ ret = psm_init_power_state_table(hwmgr);
+ if (ret)
+ goto err2;
+
+ ret = phm_setup_asic(hwmgr);
if (ret)
goto err2;
+
+ ret = phm_enable_dynamic_state_management(hwmgr);
+ if (ret)
+ goto err2;
+ ret = phm_start_thermal_controller(hwmgr, NULL);
+ ret |= psm_set_performance_states(hwmgr);
+ if (ret)
+ goto err2;
+
+ ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src);
+ if (ret)
+ goto err2;
+
return 0;
err2:
if (hwmgr->hwmgr_func->backend_fini)
@@ -247,19 +285,137 @@ int hwmgr_hw_fini(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
- if (handle == NULL)
+ if (handle == NULL || handle->hwmgr == NULL)
return -EINVAL;
hwmgr = handle->hwmgr;
+ phm_stop_thermal_controller(hwmgr);
+ psm_set_boot_states(hwmgr);
+ psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ phm_disable_dynamic_state_management(hwmgr);
+ phm_disable_clock_power_gatings(hwmgr);
+
if (hwmgr->hwmgr_func->backend_fini)
hwmgr->hwmgr_func->backend_fini(hwmgr);
if (hwmgr->pptable_func->pptable_fini)
hwmgr->pptable_func->pptable_fini(hwmgr);
- return hw_fini_power_state_table(hwmgr);
+ return psm_fini_power_state_table(hwmgr);
}
+int hwmgr_hw_suspend(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+ phm_disable_smc_firmware_ctf(hwmgr);
+ ret = psm_set_boot_states(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ if (ret)
+ return ret;
+ ret = phm_power_down_asic(hwmgr);
+
+ return ret;
+}
+int hwmgr_hw_resume(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+ ret = phm_setup_asic(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = phm_enable_dynamic_state_management(hwmgr);
+ if (ret)
+ return ret;
+ ret = phm_start_thermal_controller(hwmgr, NULL);
+ if (ret)
+ return ret;
+
+ ret |= psm_set_performance_states(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+
+ return ret;
+}
+
+static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
+{
+ switch (state) {
+ case POWER_STATE_TYPE_BATTERY:
+ return PP_StateUILabel_Battery;
+ case POWER_STATE_TYPE_BALANCED:
+ return PP_StateUILabel_Balanced;
+ case POWER_STATE_TYPE_PERFORMANCE:
+ return PP_StateUILabel_Performance;
+ default:
+ return PP_StateUILabel_None;
+ }
+}
+
+int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
+ void *input, void *output)
+{
+ int ret = 0;
+ struct pp_hwmgr *hwmgr;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+
+ switch (task_id) {
+ case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_set_cpu_power_state(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_set_performance_states(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ break;
+ case AMD_PP_TASK_ENABLE_USER_STATE:
+ {
+ enum amd_pm_state_type ps;
+ enum PP_StateUILabel requested_ui_label;
+ struct pp_power_state *requested_ps = NULL;
+
+ if (input == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+ ps = *(unsigned long *)input;
+
+ requested_ui_label = power_state_convert(ps);
+ ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps);
+ break;
+ }
+ case AMD_PP_TASK_COMPLETE_INIT:
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
/**
* Returns once the part of the register indicated by the mask has
* reached the given value.
@@ -294,7 +450,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
* reached the given value.The indirect space is described by giving
* the memory-mapped index of the indirect index register.
*/
-void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
@@ -302,14 +458,50 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
{
if (hwmgr == NULL || hwmgr->device == NULL) {
pr_err("Invalid Hardware Manager!");
- return;
+ return -EINVAL;
}
cgs_write_register(hwmgr->device, indirect_port, index);
- phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+}
+
+int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask)
+{
+ uint32_t i;
+ uint32_t cur_value;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ cur_value = cgs_read_register(hwmgr->device,
+ index);
+ if ((cur_value & mask) != (value & mask))
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == hwmgr->usec_timeout)
+ return -ETIME;
+ return 0;
}
+int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+ cgs_write_register(hwmgr->device, indirect_port, index);
+ return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
+ value, mask);
+}
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
{
@@ -678,7 +870,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
for (i = 0; i < vddc_table->count; i++) {
if (req_vddc <= vddc_table->entries[i].vddc) {
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VddC_Request, req_volt);
return;
}
@@ -689,28 +881,8 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
{
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
@@ -735,7 +907,6 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
return;
}
@@ -784,7 +955,8 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
-
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -793,10 +965,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
-
if (hwmgr->chip_id != CHIP_POLARIS10)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
@@ -814,6 +982,8 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -822,15 +992,13 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
return 0;
}
int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -844,14 +1012,25 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
+ return 0;
+}
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
+ PHM_PlatformCaps_EVV);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
return 0;
}
-int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
@@ -862,8 +1041,8 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EVV);
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
index e0766c5e3d74..67fae834bc67 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
@@ -2,1263 +2,1252 @@
#include "pp_overdriver.h"
#include <linux/errno.h>
-struct phm_fuses_default vega10_fuses_default[] = {
- {"0000001000010011111010101001010011011110000011100100100101100100",0x00003C96,0xFFFFE226,0x00000656,0x00002203,0xFFFFF201,0x000003FF,0x00002203,0xFFFFF201,0x000003FF},
- {"0000001000010011111010101001010011011110000010100001100010000100",0x00003CC5,0xFFFFE23A,0x0000064E,0x00002258,0xFFFFF1F7,0x000003FC,0x00002258,0xFFFFF1F7,0x000003FC},
- {"0000001000010011111010101001010011011110000011100011000110100100",0x00003CAF,0xFFFFE36E,0x00000602,0x00001E98,0xFFFFF569,0x00000357,0x00001E98,0xFFFFF569,0x00000357},
- {"0000001000010011111010101001010011011110001011000001000101000100",0x0000391A,0xFFFFE548,0x000005C9,0x00001B98,0xFFFFF707,0x00000324,0x00001B98,0xFFFFF707,0x00000324},
- {"0000001000010011111010101001010011011110001011000001100011000100",0x00003821,0xFFFFE674,0x00000597,0x00002196,0xFFFFF361,0x000003C0,0x00002196,0xFFFFF361,0x000003C0},
- {"0000001000010011111010101001010011011110001001100011100010000100",0x000044A2,0xFFFFDCB7,0x00000738,0x0000325C,0xFFFFE6A7,0x000005E6,0x0000325C,0xFFFFE6A7,0x000005E6},
- {"0000001000010011111010101001010011011110000010000010100100100100",0x00004057,0xFFFFE1CF,0x0000063C,0x00002E2E,0xFFFFEB62,0x000004FD,0x00002E2E,0xFFFFEB62,0x000004FD},
- {"0000001000010011111010101001010011011110001010000100100100100100",0x00003FD0,0xFFFFDF0F,0x000006E5,0x0000267C,0xFFFFEE2D,0x000004AB,0x0000267C,0xFFFFEE2D,0x000004AB},
- {"0000001000010011111010101001010011011110001010000000100100000100",0x00003F13,0xFFFFE010,0x000006AD,0x000020E7,0xFFFFF266,0x000003EC,0x000020E7,0xFFFFF266,0x000003EC},
- {"0000001000010011111010101001010011011110000010000010000001000100",0x00004088,0xFFFFDFAB,0x000006B6,0x0000252B,0xFFFFEFDB,0x00000458,0x0000252B,0xFFFFEFDB,0x00000458},
- {"0000001000010011111010101001010011011110001010000011100010000100",0x00003EF6,0xFFFFE017,0x000006AA,0x00001F67,0xFFFFF369,0x000003BE,0x00001F67,0xFFFFF369,0x000003BE},
- {"0000001000010011111010101001010011011110001011000010000110000100",0x00003CDD,0xFFFFE2A7,0x0000063C,0x000026C6,0xFFFFEF38,0x00000478,0x000026C6,0xFFFFEF38,0x00000478},
- {"0000001000010011111010101001010011011110000100000101000100100100",0x00003FA8,0xFFFFDF02,0x000006F0,0x000027FE,0xFFFFECF6,0x000004EA,0x000027FE,0xFFFFECF6,0x000004EA},
- {"0000001000010011111010101001010011011110001001100011100011000100",0x00004670,0xFFFFDC40,0x00000742,0x00003A7A,0xFFFFE1A7,0x000006B6,0x00003A7A,0xFFFFE1A7,0x000006B6},
- {"0000001000010011111010101001010011011110001011000011000000100100",0x00003CDC,0xFFFFE18C,0x00000683,0x00002A69,0xFFFFEBE7,0x00000515,0x00002A69,0xFFFFEBE7,0x00000515},
- {"0000001000010011111010101001010011011110000011100011100011000100",0x00003CEC,0xFFFFE38E,0x00000601,0x00002752,0xFFFFEFA7,0x00000453,0x00002752,0xFFFFEFA7,0x00000453},
- {"0000001000010011111010101001010011011110001011000001000100100100",0x000037D0,0xFFFFE634,0x000005A7,0x00001CD2,0xFFFFF644,0x00000348,0x00001CD2,0xFFFFF644,0x00000348},
- {"0000001000010011111010101001010011011110001010000011100101100100",0x00003DF5,0xFFFFE0A5,0x00000698,0x00001FD5,0xFFFFF30E,0x000003D1,0x00001FD5,0xFFFFF30E,0x000003D1},
- {"0000001000010011111010101001010011011110000010000010100011000100",0x00004201,0xFFFFE03E,0x00000688,0x00003206,0xFFFFE852,0x0000058A,0x00003206,0xFFFFE852,0x0000058A},
- {"0000001000010011111010101001010011011110001011000001100001100100",0x00003BED,0xFFFFE2F5,0x00000638,0x0000270D,0xFFFFEED0,0x0000048E,0x0000270D,0xFFFFEED0,0x0000048E},
- {"0000001000010011111010101001010011011110000010100001100100000100",0x00003E82,0xFFFFE1BE,0x00000654,0x000025FB,0xFFFFEFFA,0x00000448,0x000025FB,0xFFFFEFFA,0x00000448},
- {"0000001000010011111010101001010011011110001011000100000011000100",0x00003962,0xFFFFE4B9,0x000005EF,0x00002385,0xFFFFF156,0x00000423,0x00002385,0xFFFFF156,0x00000423},
- {"0000001000010011111010101001010011011110001011000000100101000100",0x00003D88,0xFFFFE21A,0x00000655,0x0000295A,0xFFFFED68,0x000004C4,0x0000295A,0xFFFFED68,0x000004C4},
- {"0000001000010011111010101001010011011110001011000001000100000100",0x00003AA4,0xFFFFE4A3,0x000005E0,0x000022EF,0xFFFFF250,0x000003EB,0x000022EF,0xFFFFF250,0x000003EB},
- {"0000001000010011111010101001010011011110000011100010100110100100",0x00003D97,0xFFFFE30D,0x0000060D,0x0000205D,0xFFFFF45D,0x00000380,0x0000205D,0xFFFFF45D,0x00000380},
- {"0000001000010011111010101001010011011110001011000100000010100100",0x000039B6,0xFFFFE446,0x00000605,0x00002325,0xFFFFF16C,0x0000041F,0x00002325,0xFFFFF16C,0x0000041F},
- {"0000001000010011111010101001010011011110001001100011100100000100",0x0000457E,0xFFFFDCF6,0x00000722,0x00003972,0xFFFFE27B,0x0000068E,0x00003972,0xFFFFE27B,0x0000068E},
- {"0000001000010011111010101001010011011110000010100001100100100100",0x00003FB8,0xFFFFE101,0x00000670,0x00002787,0xFFFFEEF5,0x00000471,0x00002787,0xFFFFEEF5,0x00000471},
- {"0000001000010011111010101001010011011110000011100011100010100100",0x00003BB2,0xFFFFE430,0x000005EA,0x000024A5,0xFFFFF162,0x00000409,0x000024A5,0xFFFFF162,0x00000409},
- {"0000001000010011111010101001010011011110000010000010000101000100",0x00003EC5,0xFFFFE1BD,0x0000064F,0x000022F0,0xFFFFF227,0x000003E8,0x000022F0,0xFFFFF227,0x000003E8},
- {"0000001000010011111010101001010011011110001011000011000101100100",0x000038A7,0xFFFFE59F,0x000005C1,0x000021CC,0xFFFFF2DF,0x000003D9,0x000021CC,0xFFFFF2DF,0x000003D9},
- {"0000001000010011111010101001010011011110001100100100000110000100",0x00002995,0xFFFFEF7A,0x0000044C,0x00001552,0xFFFFFB5D,0x00000292,0x00001552,0xFFFFFB5D,0x00000292},
- {"0000001000010011111010101001010011011110001011000100000001100100",0x00003B26,0xFFFFE2D3,0x00000649,0x000023B4,0xFFFFF09B,0x00000449,0x000023B4,0xFFFFF09B,0x00000449},
- {"0000001000010011111010101001010011011110000010000001000100100100",0x000040D2,0xFFFFE00A,0x00000696,0x000022DA,0xFFFFF1E9,0x000003F2,0x000022DA,0xFFFFF1E9,0x000003F2},
- {"0000001000010011111010101001010011011110001011000011100100100100",0x00003C98,0xFFFFE365,0x00000618,0x00002D5D,0xFFFFEB3A,0x0000051D,0x00002D5D,0xFFFFEB3A,0x0000051D},
- {"0000001000010011111010101001010011011110001011000001000010100100",0x00003BBD,0xFFFFE37E,0x00000617,0x0000252E,0xFFFFF06E,0x00000441,0x0000252E,0xFFFFF06E,0x00000441},
- {"0000001000010011111010101001010011011110001001100010100100100100",0x00004363,0xFFFFDF7A,0x000006A0,0x000031F5,0xFFFFE880,0x0000057B,0x000031F5,0xFFFFE880,0x0000057B},
- {"0000001000010011111010101001010011011110000011100011100001000100",0x00003CFC,0xFFFFE2AF,0x0000062E,0x0000212A,0xFFFFF335,0x000003BF,0x0000212A,0xFFFFF335,0x000003BF},
- {"0000001000010011111010101001010011011110000111000100100100100100",0x0000252D,0xFFFFF31B,0x000003C3,0x00001A1A,0xFFFFF882,0x00000325,0x00001A1A,0xFFFFF882,0x00000325},
- {"0000001000010011111010101001010011011110000010100010100110100100",0x00003FE2,0xFFFFDFEF,0x000006AC,0x000025A2,0xFFFFEF84,0x00000462,0x000025A2,0xFFFFEF84,0x00000462},
- {"0000001000010011111010101001010011011110000010000010000011100100",0x000040A5,0xFFFFE13B,0x0000065B,0x00002C13,0xFFFFEC75,0x000004D7,0x00002C13,0xFFFFEC75,0x000004D7},
- {"0000001000010011111010101001010011011110000011100100100010100100",0x00003E42,0xFFFFE1B3,0x00000657,0x0000221D,0xFFFFF273,0x000003DE,0x0000221D,0xFFFFF273,0x000003DE},
- {"0000001000010011111010101001010011011110000010100010000011100100",0x00003E7F,0xFFFFE255,0x00000638,0x00002D30,0xFFFFEB8A,0x00000503,0x00002D30,0xFFFFEB8A,0x00000503},
- {"0000001000010011111010101001010011011110001011000010100111000100",0x00003E56,0xFFFFE16D,0x00000670,0x000028DC,0xFFFFEDA0,0x000004BA,0x000028DC,0xFFFFEDA0,0x000004BA},
- {"0000001000010011111010101001010011011110001001100011000010100100",0x000044AD,0xFFFFDE24,0x000006DD,0x000031AD,0xFFFFE850,0x00000585,0x000031AD,0xFFFFE850,0x00000585},
- {"0000001000010011111010101001010011011110001011000010000011100100",0x00003AF3,0xFFFFE5B0,0x000005A6,0x00002CF6,0xFFFFEC75,0x000004DD,0x00002CF6,0xFFFFEC75,0x000004DD},
- {"0000001000010011111010101001010011011110000010100010000010000100",0x00003E66,0xFFFFE19E,0x0000065B,0x00002332,0xFFFFF1B9,0x000003FD,0x00002332,0xFFFFF1B9,0x000003FD},
- {"0000001000010011111010101001010011011110000010000010100010000100",0x00003FB4,0xFFFFE0A5,0x00000686,0x0000253E,0xFFFFF02E,0x00000444,0x0000253E,0xFFFFF02E,0x00000444},
- {"0000001000010011111010101001010011011110001010000001100010100100",0x00003E28,0xFFFFE14D,0x0000066E,0x00001FE2,0xFFFFF39A,0x000003B1,0x00001FE2,0xFFFFF39A,0x000003B1},
- {"0000001000010011111010101001010011011110001011000000100100000100",0x000039E6,0xFFFFE44B,0x000005FE,0x0000210C,0xFFFFF2F4,0x000003DA,0x0000210C,0xFFFFF2F4,0x000003DA},
- {"0000001000010011111010101001010011011110001011000101000100000100",0x00003A4D,0xFFFFE252,0x0000067A,0x000027E2,0xFFFFECED,0x000004FA,0x000027E2,0xFFFFECED,0x000004FA},
- {"0000001000010011111010101001010011011110000010100010100101100100",0x00004065,0xFFFFE02F,0x0000069B,0x0000299D,0xFFFFED38,0x000004C2,0x0000299D,0xFFFFED38,0x000004C2},
- {"0000001000010011111010101001010011011110000011100010000010100100",0x000039EE,0xFFFFE603,0x00000594,0x0000214F,0xFFFFF429,0x0000038E,0x0000214F,0xFFFFF429,0x0000038E},
- {"0000001000010011111010101001010011011110000011100100100011100100",0x00003BD2,0xFFFFE351,0x00000618,0x000020B8,0xFFFFF377,0x000003B4,0x000020B8,0xFFFFF377,0x000003B4},
- {"0000001000010011111010101001010011011110000010100011000100100100",0x00003FAA,0xFFFFE183,0x0000065E,0x000032AE,0xFFFFE7C2,0x000005A6,0x000032AE,0xFFFFE7C2,0x000005A6},
- {"0000001000010011111010101001010011011110001011000010100110000100",0x00003AFB,0xFFFFE3E4,0x00000608,0x00002293,0xFFFFF21F,0x000003FA,0x00002293,0xFFFFF21F,0x000003FA},
- {"0000001000010011111010101001010011011110001001100010000001100100",0x0000448B,0xFFFFDD5D,0x0000070D,0x00002E4E,0xFFFFE9DF,0x00000551,0x00002E4E,0xFFFFE9DF,0x00000551},
- {"0000001000010011111010101001010011011110000011100010000110000100",0x00003D46,0xFFFFE39B,0x000005F3,0x0000218E,0xFFFFF3CD,0x00000398,0x0000218E,0xFFFFF3CD,0x00000398},
- {"0000001000010011111010101001010011011110000010000100100011100100",0x00003F01,0xFFFFDFD9,0x000006BF,0x000023AF,0xFFFFF04E,0x0000044C,0x000023AF,0xFFFFF04E,0x0000044C},
- {"0000001000010011111010101001010011011110000100000010100110100100",0x0000403D,0xFFFFDF6B,0x000006C9,0x0000270D,0xFFFFEE4B,0x0000049E,0x0000270D,0xFFFFEE4B,0x0000049E},
- {"0000001000010011111010101001010011011110000011100011100101100100",0x00003C11,0xFFFFE35C,0x00000613,0x000020F9,0xFFFFF365,0x000003B9,0x000020F9,0xFFFFF365,0x000003B9},
- {"0000001000010011111010101001010011011110001011000011100010000100",0x00003B58,0xFFFFE37D,0x0000061F,0x00002698,0xFFFFEF46,0x00000478,0x00002698,0xFFFFEF46,0x00000478},
- {"0000001000010011111010101001010011011110001010000100000110100100",0x00003EBC,0xFFFFDF7A,0x000006D6,0x0000212B,0xFFFFF195,0x0000041B,0x0000212B,0xFFFFF195,0x0000041B},
- {"0000001000010011111010101001010011011110000010000100100011000100",0x00004050,0xFFFFDEB3,0x000006FE,0x00002D6C,0xFFFFE961,0x00000582,0x00002D6C,0xFFFFE961,0x00000582},
- {"0000001000010011111010101001010011011110001001100010000001000100",0x000043F0,0xFFFFDD9C,0x00000702,0x00002B31,0xFFFFEBEA,0x000004F7,0x00002B31,0xFFFFEBEA,0x000004F7},
- {"0000001000010011111010101001010011011110000100000000100100100100",0x00003EFA,0xFFFFE093,0x00000696,0x000026DB,0xFFFFEEB3,0x00000489,0x000026DB,0xFFFFEEB3,0x00000489},
- {"0000001000010011111010101001010011011110000010000010000001100100",0x0000425D,0xFFFFDE8D,0x000006E6,0x00002CA4,0xFFFFEAD2,0x00000531,0x00002CA4,0xFFFFEAD2,0x00000531},
- {"0000001000010011111010101001010011011110001001100011100110100100",0x000043B0,0xFFFFDD03,0x00000728,0x00002946,0xFFFFECA6,0x000004DE,0x00002946,0xFFFFECA6,0x000004DE},
- {"0000001000010011111010101001010011011110001010000010100001100100",0x00003F6A,0xFFFFE03A,0x0000069D,0x00002208,0xFFFFF1F8,0x000003F6,0x00002208,0xFFFFF1F8,0x000003F6},
- {"0000001000010011111010101001010011011110001011000010100101100100",0x00003A94,0xFFFFE4A7,0x000005E2,0x000024D0,0xFFFFF100,0x00000426,0x000024D0,0xFFFFF100,0x00000426},
- {"0000001000010011111010101001010011011110001010000001000011000100",0x00003F2F,0xFFFFE0A3,0x00000688,0x00002198,0xFFFFF271,0x000003E2,0x00002198,0xFFFFF271,0x000003E2},
- {"0000001000010011111010101001010011011110000100000100100011100100",0x00003EA5,0xFFFFE032,0x000006AE,0x0000227C,0xFFFFF130,0x00000426,0x0000227C,0xFFFFF130,0x00000426},
- {"0000001000010011111010101001010011011110001001100100000101000100",0x0000442F,0xFFFFDBC4,0x0000078B,0x00003CD6,0xFFFFDE6C,0x0000076C,0x00003CD6,0xFFFFDE6C,0x0000076C},
- {"0000001000010011111010101001010011011110001010000010100010000100",0x00003DDE,0xFFFFE174,0x00000668,0x00001FF4,0xFFFFF38F,0x000003B1,0x00001FF4,0xFFFFF38F,0x000003B1},
- {"0000001000010011111010101001010011011110000010100011000101000100",0x000040B0,0xFFFFE016,0x000006A0,0x00002DBB,0xFFFFEA7F,0x00000537,0x00002DBB,0xFFFFEA7F,0x00000537},
- {"0000001000010011111010101001010011011110001011000011000100000100",0x00003429,0xFFFFEA97,0x000004DD,0x000024D5,0xFFFFF26F,0x000003DF,0x000024D5,0xFFFFF26F,0x000003DF},
- {"0000001000010011111010101001010011011110000011100001100100000100",0x00003AEB,0xFFFFE590,0x000005A3,0x000022CB,0xFFFFF347,0x000003B2,0x000022CB,0xFFFFF347,0x000003B2},
- {"0000001000010011111010101001010011011110001010000011100100000100",0x00003B8E,0xFFFFE2EF,0x00000636,0x00002351,0xFFFFF143,0x0000041C,0x00002351,0xFFFFF143,0x0000041C},
- {"0000001000010011111010101001010011011110001100100100000011000100",0x00002926,0xFFFFF0B0,0x00000410,0x0000194E,0xFFFFF94E,0x000002E9,0x0000194E,0xFFFFF94E,0x000002E9},
- {"0000001000010011111010101001010011011110001010000011000110000100",0x0000402B,0xFFFFDF78,0x000006C2,0x00002273,0xFFFFF16C,0x00000414,0x00002273,0xFFFFF16C,0x00000414},
- {"0000001000010011111010101001010011011110000010100001000010100100",0x00003D6A,0xFFFFE1D3,0x00000659,0x00002006,0xFFFFF394,0x000003B1,0x00002006,0xFFFFF394,0x000003B1},
- {"0000001000010011111010101001010011011110001010000100000001100100",0x00004042,0xFFFFDFD8,0x000006A8,0x00002135,0xFFFFF29F,0x000003D9,0x00002135,0xFFFFF29F,0x000003D9},
- {"0000001000010011111010101001010011011110000010000010000010100100",0x0000405B,0xFFFFE093,0x00000682,0x0000288F,0xFFFFEE3A,0x00000491,0x0000288F,0xFFFFEE3A,0x00000491},
- {"0000001000010011111010101001010011011110001011000100100010100100",0x00003A49,0xFFFFE30C,0x00000648,0x000023F9,0xFFFFF02D,0x00000460,0x000023F9,0xFFFFF02D,0x00000460},
- {"0000001000010011111010101001010011011110001010000010100101100100",0x00003D59,0xFFFFE1CC,0x0000065B,0x00002013,0xFFFFF37D,0x000003B6,0x00002013,0xFFFFF37D,0x000003B6},
- {"0000001000010011111010101001010011011110001011000011100110000100",0x000040C1,0xFFFFDF8C,0x000006CA,0x00003271,0xFFFFE6CA,0x000005EA,0x00003271,0xFFFFE6CA,0x000005EA},
- {"0000001000010011111010101001010011011110001001100010000011100100",0x000042E9,0xFFFFDFDC,0x0000068C,0x00002ED9,0xFFFFEAAF,0x0000051B,0x00002ED9,0xFFFFEAAF,0x0000051B},
- {"0000001000010011111010101001010011011110000010000011000010000100",0x000042ED,0xFFFFDE50,0x000006F0,0x00002FCF,0xFFFFE8BB,0x0000058C,0x00002FCF,0xFFFFE8BB,0x0000058C},
- {"0000001000010011111010101001010011011110000010100100000100000100",0x00003EBD,0xFFFFE099,0x00000698,0x00002709,0xFFFFEE7B,0x00000495,0x00002709,0xFFFFEE7B,0x00000495},
- {"0000001000010011111010101001010011011110001010000100100100000100",0x00003F71,0xFFFFDF82,0x000006C9,0x0000219B,0xFFFFF1AD,0x0000040F,0x0000219B,0xFFFFF1AD,0x0000040F},
- {"0000001000010011111010101001010011011110001010000000100011100100",0x00003E73,0xFFFFE080,0x0000069B,0x000020E7,0xFFFFF273,0x000003E9,0x000020E7,0xFFFFF273,0x000003E9},
- {"0000001000010011111010101001010011011110000011100011000110000100",0x00003E14,0xFFFFE278,0x0000062C,0x00002275,0xFFFFF2B3,0x000003CE,0x00002275,0xFFFFF2B3,0x000003CE},
- {"0000001000010011111010101001010011011110001011000010000110100100",0x00003ABB,0xFFFFE3B9,0x00000615,0x00002192,0xFFFFF28F,0x000003EB,0x00002192,0xFFFFF28F,0x000003EB},
- {"0000001000010011111010101001010011011110001010000011000100100100",0x00003D53,0xFFFFE255,0x00000643,0x0000275B,0xFFFFEEED,0x00000479,0x0000275B,0xFFFFEEED,0x00000479},
- {"0000001000010011111010101001010011011110001001100010100001100100",0x000043E3,0xFFFFDDC3,0x000006FB,0x00002B6B,0xFFFFEBD6,0x000004FA,0x00002B6B,0xFFFFEBD6,0x000004FA},
- {"0000001000010011111010101001010011011110000011100010000101000100",0x00003BDE,0xFFFFE507,0x000005B4,0x000022CE,0xFFFFF358,0x000003AB,0x000022CE,0xFFFFF358,0x000003AB},
- {"0000001000010011111010101001010011011110001100100011000101100100",0x00002460,0xFFFFF3B5,0x000003A2,0x000014E7,0xFFFFFC32,0x0000027C,0x000014E7,0xFFFFFC32,0x0000027C},
- {"0000001000010011111010101001010011011110001010000010000011000100",0x00003D20,0xFFFFE298,0x0000062F,0x00002080,0xFFFFF3AF,0x000003A8,0x00002080,0xFFFFF3AF,0x000003A8},
- {"0000001000010011111010101001010011011110000010000001100100000100",0x00003E14,0xFFFFE221,0x00000641,0x000021BB,0xFFFFF2EA,0x000003CA,0x000021BB,0xFFFFF2EA,0x000003CA},
- {"0000001000010011111010101001010011011110000010100100000011000100",0x00003DE1,0xFFFFE14E,0x00000677,0x00002468,0xFFFFF068,0x00000440,0x00002468,0xFFFFF068,0x00000440},
- {"0000001000010011111010101001010011011110001001100001000010000100",0x00004372,0xFFFFDDF8,0x000006F5,0x00002B3F,0xFFFFEBE8,0x000004F8,0x00002B3F,0xFFFFEBE8,0x000004F8},
- {"0000001000010011111010101001010011011110000010100010100011000100",0x00003E4F,0xFFFFE2A3,0x0000062B,0x00002F5A,0xFFFFEA37,0x0000053B,0x00002F5A,0xFFFFEA37,0x0000053B},
- {"0000001000010011111010101001010011011110001010000101000011100100",0x00003E07,0xFFFFE02F,0x000006B6,0x0000216B,0xFFFFF1A3,0x00000416,0x0000216B,0xFFFFF1A3,0x00000416},
- {"0000001000010011111010101001010011011110001010000011100010100100",0x00003DAB,0xFFFFE128,0x0000067F,0x0000216F,0xFFFFF236,0x000003F3,0x0000216F,0xFFFFF236,0x000003F3},
- {"0000001000010011111010101001010011011110001011000010100100100100",0x0000364B,0xFFFFE8CB,0x0000052A,0x00002568,0xFFFFF1B2,0x00000400,0x00002568,0xFFFFF1B2,0x00000400},
- {"0000001000010011111010101001010011011110001001100001000001100100",0x00004219,0xFFFFDE87,0x000006E8,0x00002C59,0xFFFFEAEE,0x00000529,0x00002C59,0xFFFFEAEE,0x00000529},
- {"0000001000010011111010101001010011011110000011100001100101000100",0x000039A8,0xFFFFE602,0x00000594,0x00001D06,0xFFFFF6F0,0x00000316,0x00001D06,0xFFFFF6F0,0x00000316},
- {"0000001000010011111010101001010011011110001001100001000011100100",0x00004052,0xFFFFE01C,0x00000698,0x00002310,0xFFFFF1A1,0x000003FE,0x00002310,0xFFFFF1A1,0x000003FE},
- {"0000001000010011111010101001010011011110000011100010100000100100",0x00003C1C,0xFFFFE3EB,0x000005F1,0x00002289,0xFFFFF2CF,0x000003C9,0x00002289,0xFFFFF2CF,0x000003C9},
- {"0000001000010011111010101001010011011110000011100101000100100100",0x00003F19,0xFFFFE085,0x0000069E,0x00002B94,0xFFFFEB72,0x0000051D,0x00002B94,0xFFFFEB72,0x0000051D},
- {"0000001000010011111010101001010011011110000011100100000110100100",0x00003C51,0xFFFFE2AD,0x00000638,0x0000206B,0xFFFFF361,0x000003BE,0x0000206B,0xFFFFF361,0x000003BE},
- {"0000001000010011111010101001010011011110001001100001000011000100",0x000040B9,0xFFFFDFBB,0x000006AB,0x0000241F,0xFFFFF0CC,0x00000425,0x0000241F,0xFFFFF0CC,0x00000425},
- {"0000001000010011111010101001010011011110000010100010000001100100",0x00003E62,0xFFFFE12C,0x00000678,0x00002445,0xFFFFF09E,0x00000435,0x00002445,0xFFFFF09E,0x00000435},
- {"0000001000010011111010101001010011011110000011100001100110000100",0x00003C97,0xFFFFE399,0x000005FB,0x0000209D,0xFFFFF41D,0x0000038F,0x0000209D,0xFFFFF41D,0x0000038F},
- {"0000001000010011111010101001010011011110000011100011000101000100",0x00003FF9,0xFFFFE1E9,0x0000063E,0x00002E96,0xFFFFEAF5,0x00000516,0x00002E96,0xFFFFEAF5,0x00000516},
- {"0000001000010011111010101001010011011110000010100011000010000100",0x00003F04,0xFFFFE109,0x0000067A,0x000026E1,0xFFFFEF0B,0x00000476,0x000026E1,0xFFFFEF0B,0x00000476},
- {"0000001000010011111010101001010011011110000100000001000100100100",0x00003E3E,0xFFFFE187,0x00000660,0x00002049,0xFFFFF38D,0x000003B0,0x00002049,0xFFFFF38D,0x000003B0},
- {"0000001000010011111010101001010011011110001010000010100101000100",0x00003D58,0xFFFFE253,0x0000063D,0x00002158,0xFFFFF308,0x000003C3,0x00002158,0xFFFFF308,0x000003C3},
- {"0000001000010011111010101001010011011110000010000100000011000100",0x00004074,0xFFFFDF8D,0x000006C0,0x00002799,0xFFFFEE19,0x000004A5,0x00002799,0xFFFFEE19,0x000004A5},
- {"0000001000010011111010101001010011011110001010000001100100100100",0x00003DAF,0xFFFFE1C9,0x00000659,0x000020E5,0xFFFFF313,0x000003C6,0x000020E5,0xFFFFF313,0x000003C6},
- {"0000001000010011111010101001010011011110000010100011100101100100",0x000041DD,0xFFFFDDFA,0x0000071B,0x0000348D,0xFFFFE4B4,0x0000064C,0x0000348D,0xFFFFE4B4,0x0000064C},
- {"0000001000010011111010101001010011011110001011000010100010000100",0x00003947,0xFFFFE5AE,0x000005B8,0x000024A6,0xFFFFF140,0x0000041D,0x000024A6,0xFFFFF140,0x0000041D},
- {"0000001000010011111010101001010011011110000100000001100001000100",0x00003D35,0xFFFFE197,0x0000066E,0x00002248,0xFFFFF1BC,0x00000408,0x00002248,0xFFFFF1BC,0x00000408},
- {"0000001000010011111010101001010011011110000010100001100011100100",0x00003F4F,0xFFFFE13E,0x0000066D,0x00002AF0,0xFFFFEC99,0x000004DB,0x00002AF0,0xFFFFEC99,0x000004DB},
- {"0000001000010011111010101001010011011110001001100011100101000100",0x0000430F,0xFFFFDDFB,0x000006FC,0x00002D4D,0xFFFFEA55,0x00000540,0x00002D4D,0xFFFFEA55,0x00000540},
- {"0000001000010011111010101001010011011110000011100010100101000100",0x00003B22,0xFFFFE543,0x000005B1,0x000022E1,0xFFFFF31B,0x000003B9,0x000022E1,0xFFFFF31B,0x000003B9},
- {"0000001000010011111010101001010011011110000011100010000010000100",0x00003978,0xFFFFE611,0x00000592,0x00001C36,0xFFFFF771,0x00000302,0x00001C36,0xFFFFF771,0x00000302},
- {"0000001000010011111010101001010011011110001001100010000101100100",0x000044DF,0xFFFFDDAB,0x000006F2,0x00002CEA,0xFFFFEB47,0x00000507,0x00002CEA,0xFFFFEB47,0x00000507},
- {"0000001000010011111010101001010011011110000010100011100011000100",0x00003E9B,0xFFFFE12C,0x0000067C,0x00002B79,0xFFFFEBD9,0x00000503,0x00002B79,0xFFFFEBD9,0x00000503},
- {"0000001000010011111010101001010011011110001001100011000001000100",0x00004464,0xFFFFDCD3,0x00000731,0x00002D14,0xFFFFEA2D,0x0000054E,0x00002D14,0xFFFFEA2D,0x0000054E},
- {"0000001000010011111010101001010011011110001010000001000100100100",0x00003FB3,0xFFFFE052,0x00000693,0x000020AC,0xFFFFF311,0x000003C6,0x000020AC,0xFFFFF311,0x000003C6},
- {"0000001000010011111010101001010011011110001011000001000010000100",0x00003BDA,0xFFFFE2FB,0x00000636,0x0000261E,0xFFFFEF72,0x00000471,0x0000261E,0xFFFFEF72,0x00000471},
- {"0000001000010011111010101001010011011110001011000001100101100100",0x00003D72,0xFFFFE28A,0x0000063E,0x000029D8,0xFFFFED54,0x000004C7,0x000029D8,0xFFFFED54,0x000004C7},
- {"0000001000010011111010101001010011011110001011000010100000100100",0x00003E26,0xFFFFE102,0x00000694,0x00002DD1,0xFFFFE9CA,0x0000056D,0x00002DD1,0xFFFFE9CA,0x0000056D},
- {"0000001000010011111010101001010011011110000100000100000100100100",0x000041CD,0xFFFFDE97,0x000006ED,0x00002DE5,0xFFFFE9B9,0x00000565,0x00002DE5,0xFFFFE9B9,0x00000565},
- {"0000001000010011111010101001010011011110000010100010100110000100",0x00003F30,0xFFFFE06E,0x00000698,0x000024FF,0xFFFFEFFC,0x0000044F,0x000024FF,0xFFFFEFFC,0x0000044F},
- {"0000001000010011111010101001010011011110001011000011100011000100",0x0000378B,0xFFFFE6B4,0x00000594,0x000023A7,0xFFFFF1DC,0x00000407,0x000023A7,0xFFFFF1DC,0x00000407},
- {"0000001000010011111010101001010011011110000011100100000101100100",0x00003CD7,0xFFFFE28D,0x00000636,0x00002036,0xFFFFF3B5,0x000003AA,0x00002036,0xFFFFF3B5,0x000003AA},
- {"0000001000010011111010101001010011011110000010100011100010000100",0x00003EF9,0xFFFFE0AA,0x0000068D,0x000024D3,0xFFFFF02F,0x00000445,0x000024D3,0xFFFFF02F,0x00000445},
- {"0000001000010011111010101001010011011110001010000011100101000100",0x00003D08,0xFFFFE1BB,0x00000665,0x00002159,0xFFFFF26F,0x000003E6,0x00002159,0xFFFFF26F,0x000003E6},
- {"0000001000010011111010101001010011011110001011000010000011000100",0x000038A9,0xFFFFE6CA,0x00000580,0x000025D3,0xFFFFF101,0x00000421,0x000025D3,0xFFFFF101,0x00000421},
- {"0000001000010011111010101001010011011110000010100010000010100100",0x00003E45,0xFFFFE1F8,0x0000064D,0x000027E3,0xFFFFEEBB,0x0000047F,0x000027E3,0xFFFFEEBB,0x0000047F},
- {"0000001000010011111010101001010011011110000011100011100001100100",0x00003F76,0xFFFFE128,0x0000066E,0x0000286B,0xFFFFEE4C,0x00000493,0x0000286B,0xFFFFEE4C,0x00000493},
- {"0000001000010011111010101001010011011110001001100100000100000100",0x0000440D,0xFFFFDCA2,0x0000074F,0x00003817,0xFFFFE256,0x000006AF,0x00003817,0xFFFFE256,0x000006AF},
- {"0000001000010011111010101001010011011110000100000101000100000100",0x00003EE1,0xFFFFDFA7,0x000006D4,0x000027EA,0xFFFFED2B,0x000004DE,0x000027EA,0xFFFFED2B,0x000004DE},
- {"0000001000010011111010101001010011011110001011000011100001100100",0x00003C62,0xFFFFE285,0x0000064A,0x00002520,0xFFFFF001,0x0000045C,0x00002520,0xFFFFF001,0x0000045C},
- {"0000001000010011111010101001010011011110001100100011100101100100",0x0000272E,0xFFFFF17A,0x000003FA,0x0000150B,0xFFFFFBD5,0x00000284,0x0000150B,0xFFFFFBD5,0x00000284},
- {"0000001000010011111010101001010011011110001001100001100100100100",0x00004275,0xFFFFDF69,0x000006A5,0x000025AA,0xFFFFF05C,0x0000042B,0x000025AA,0xFFFFF05C,0x0000042B},
- {"0000001000010011111010101001010011011110000011100100000011100100",0x00003CAA,0xFFFFE392,0x000005FF,0x000023A8,0xFFFFF20E,0x000003E9,0x000023A8,0xFFFFF20E,0x000003E9},
- {"0000001000010011111010101001010011011110001011000101000011000100",0x00003CF8,0xFFFFE0FB,0x000006A6,0x00002CA7,0xFFFFE9FF,0x0000056E,0x00002CA7,0xFFFFE9FF,0x0000056E},
- {"0000001000010011111010101001010011011110001010000010000100100100",0x00003D00,0xFFFFE296,0x00000633,0x000021C1,0xFFFFF2C8,0x000003CF,0x000021C1,0xFFFFF2C8,0x000003CF},
- {"0000001000010011111010101001010011011110001010000011100011100100",0x00003B46,0xFFFFE301,0x00000632,0x0000204C,0xFFFFF33B,0x000003C8,0x0000204C,0xFFFFF33B,0x000003C8},
- {"0000001000010011111010101001010011011110001000000100000101100100",0x00002026,0xFFFFF5CE,0x00000368,0x00001598,0xFFFFFB29,0x000002C3,0x00001598,0xFFFFFB29,0x000002C3},
- {"0000001000010011111010101001010011011110001010000011000101100100",0x00003DCA,0xFFFFE178,0x00000668,0x00001FDB,0xFFFFF39D,0x000003AF,0x00001FDB,0xFFFFF39D,0x000003AF},
- {"0000001000010011111010101001010011011110001011000100100011000100",0x00003A59,0xFFFFE327,0x00000642,0x000024B9,0xFFFFEFC4,0x00000471,0x000024B9,0xFFFFEFC4,0x00000471},
- {"0000001000010011111010101001010011011110001011000010100101000100",0x00003C26,0xFFFFE440,0x000005EB,0x00002C0F,0xFFFFEC88,0x000004E0,0x00002C0F,0xFFFFEC88,0x000004E0},
- {"0000001000010011111010101001010011011110000010000011100010000100",0x00004149,0xFFFFDEB8,0x000006E7,0x0000280A,0xFFFFED89,0x000004C2,0x0000280A,0xFFFFED89,0x000004C2},
- {"0000001000010011111010101001010011011110000011100100000100100100",0x00003EB4,0xFFFFE1E5,0x0000064D,0x0000299F,0xFFFFEDB3,0x000004A9,0x0000299F,0xFFFFEDB3,0x000004A9},
- {"0000001000010011111010101001010011011110001011000011100110100100",0x00003BBF,0xFFFFE268,0x0000065A,0x00002504,0xFFFFEFB0,0x00000470,0x00002504,0xFFFFEFB0,0x00000470},
- {"0000001000010011111010101001010011011110000010000100100100000100",0x00004203,0xFFFFDDC6,0x00000720,0x0000303B,0xFFFFE78F,0x000005D0,0x0000303B,0xFFFFE78F,0x000005D0},
- {"0000001000010011111010101001010011011110000011100011100110000100",0x00003DA3,0xFFFFE244,0x0000063E,0x000021B4,0xFFFFF2DA,0x000003CD,0x000021B4,0xFFFFF2DA,0x000003CD},
- {"0000001000010011111010101001010011011110000010100011100011100100",0x00004035,0xFFFFE065,0x0000069B,0x00003323,0xFFFFE6D6,0x000005D8,0x00003323,0xFFFFE6D6,0x000005D8},
- {"0000001000010011111010101001010011011110001011000001000101100100",0x00003944,0xFFFFE4E5,0x000005E2,0x00001F3C,0xFFFFF456,0x0000039D,0x00001F3C,0xFFFFF456,0x0000039D},
- {"0000001000010011111010101001010011011110000001100001100100000100",0x000032D8,0xFFFFEAE8,0x000004E6,0x00001812,0xFFFFFA1C,0x000002BC,0x00001812,0xFFFFFA1C,0x000002BC},
- {"0000001000010011111100001111110101000010110100100010100101000100",0x000041F6,0xFFFFE025,0x0000069A,0x0000241E,0xFFFFF1B4,0x00000402,0x0000241E,0xFFFFF1B4,0x00000402},
- {"0000001000010011111100001111111010011001000011000011000010100100",0x00003300,0xFFFFEB60,0x000004C1,0x00001E15,0xFFFFF6A6,0x0000033B,0x00001E15,0xFFFFF6A6,0x0000033B},
- {"0000001000010011111010101001010011011110000001000000100010100100",0x000037F0,0xFFFFE68F,0x0000059B,0x00001F8A,0xFFFFF467,0x000003A3,0x00001F8A,0xFFFFF467,0x000003A3},
- {"0000001000010011111100001111111010011001000110000010100110000100",0x000025D8,0xFFFFF2AA,0x000003C3,0x000018A8,0xFFFFF9BE,0x000002D2,0x000018A8,0xFFFFF9BE,0x000002D2},
- {"0000001000010011111100001111111010011001000001100010000011000100",0x0000364F,0xFFFFE988,0x000004FC,0x00001E51,0xFFFFF633,0x0000034F,0x00001E51,0xFFFFF633,0x0000034F},
- {"0000001000010011111010101001010011011110000001100001000101000100",0x00002288,0xFFFFF483,0x0000036C,0x0000280F,0xFFFFEF39,0x0000047B,0x0000280F,0xFFFFEF39,0x0000047B},
- {"0000001000010011111100001111111010011001000010000010000010000100",0x00003322,0xFFFFEA7E,0x000004ED,0x00001DAD,0xFFFFF62B,0x00000355,0x00001DAD,0xFFFFF62B,0x00000355},
- {"0000001000010011111010101001010011011110000000100101000011100100",0x00002B7B,0xFFFFEE4F,0x0000045B,0x00001AA2,0xFFFFF710,0x0000033E,0x00001AA2,0xFFFFF710,0x0000033E},
- {"0000001000010011111100001111111010011001000001000010000011000100",0x000034CC,0xFFFFEA79,0x000004E4,0x00001B05,0xFFFFF8B3,0x000002EC,0x00001B05,0xFFFFF8B3,0x000002EC},
- {"0000001000010011111100001111110101000010110111000010100001100100",0x00003837,0xFFFFE5ED,0x000005C3,0x00001ACB,0xFFFFF7B2,0x00000314,0x00001ACB,0xFFFFF7B2,0x00000314},
- {"0000001000010011111100001111111010011001000001000100000101100100",0x0000352D,0xFFFFE88F,0x00000548,0x000021E6,0xFFFFF3B5,0x000003AA,0x000021E6,0xFFFFF3B5,0x000003AA},
- {"0000001000010011111100001111111010011001000010100100100010000100",0x00003300,0xFFFFE835,0x0000057B,0x00001A85,0xFFFFF715,0x00000336,0x00001A85,0xFFFFF715,0x00000336},
- {"0000001000010011111010101001010011011110000001000100100010100100",0x000033FA,0xFFFFE851,0x00000565,0x00001A8E,0xFFFFF727,0x0000033B,0x00001A8E,0xFFFFF727,0x0000033B},
- {"0000001000010011111100001111110101000010110110100011100100100100",0x000039D3,0xFFFFE5D3,0x000005B0,0x00001888,0xFFFFF978,0x000002C8,0x00001888,0xFFFFF978,0x000002C8},
- {"0000001000010011111100001111111010011001000011100100100001100100",0x00002F6B,0xFFFFEC53,0x000004B9,0x00001C15,0xFFFFF71B,0x00000337,0x00001C15,0xFFFFF71B,0x00000337},
- {"0000001000010011111100001111111010011001000001100100000101000100",0x0000384D,0xFFFFE737,0x00000569,0x00001D2D,0xFFFFF673,0x00000343,0x00001D2D,0xFFFFF673,0x00000343},
- {"0000001000010011111100001111111010011001000001100010000010100100",0x00003A49,0xFFFFE70B,0x0000055F,0x00001A63,0xFFFFF8CD,0x000002E2,0x00001A63,0xFFFFF8CD,0x000002E2},
- {"0000001000010011111100001111111010011001000001000010100110000100",0x0000311E,0xFFFFEB97,0x000004C6,0x00001EAE,0xFFFFF5A9,0x00000367,0x00001EAE,0xFFFFF5A9,0x00000367},
- {"0000001000010011111100001111111010011001000011100001000100100100",0x000027D3,0xFFFFF075,0x00000417,0x00002001,0xFFFFF44A,0x000003A2,0x00002001,0xFFFFF44A,0x000003A2},
- {"0000001000010011111100001111111010011001000001100100100100000100",0x00003B72,0xFFFFE4BD,0x000005DC,0x00001D76,0xFFFFF606,0x0000035A,0x00001D76,0xFFFFF606,0x0000035A},
- {"0000001000010011111100001111111010011001000100000001000100100100",0x00002E0F,0xFFFFECA7,0x000004AE,0x00001DC6,0xFFFFF5BF,0x0000036A,0x00001DC6,0xFFFFF5BF,0x0000036A},
- {"0000001000010011111100001111111010011001000000100011100010100100",0x000032C7,0xFFFFEA7A,0x000004F0,0x00001A7B,0xFFFFF827,0x00000301,0x00001A7B,0xFFFFF827,0x00000301},
- {"0000001000010011111010101001010011011110000001000100100010000100",0x0000312D,0xFFFFEA39,0x00000515,0x00001948,0xFFFFF800,0x00000318,0x00001948,0xFFFFF800,0x00000318},
- {"0000001000010011111010101001010011011110000001100010000010000100",0x00003611,0xFFFFE8D7,0x00000533,0x00001929,0xFFFFF965,0x000002D2,0x00001929,0xFFFFF965,0x000002D2},
- {"0000001000010011111100001111111010011001001011000011000011100100",0x00002FE2,0xFFFFED89,0x00000470,0x00001A3C,0xFFFFF955,0x000002D5,0x00001A3C,0xFFFFF955,0x000002D5},
- {"0000001000010011111010101001010011011110000000100000100010100100",0x000035FF,0xFFFFE884,0x00000548,0x0000182A,0xFFFFF9AB,0x000002CF,0x0000182A,0xFFFFF9AB,0x000002CF},
- {"0000001000010011111100001111111010011001000000100010000011100100",0x00003597,0xFFFFE904,0x00000528,0x00001A94,0xFFFFF840,0x00000300,0x00001A94,0xFFFFF840,0x00000300},
- {"0000001000010011111100001111111010011001000110000001100101000100",0x000026CB,0xFFFFF1FB,0x000003E4,0x000017CC,0xFFFFFA25,0x000002C8,0x000017CC,0xFFFFFA25,0x000002C8},
- {"0000001000010011111010101001010011011110000001100000100011000100",0x00003274,0xFFFFEA39,0x0000050C,0x00001B20,0xFFFFF7C1,0x00000314,0x00001B20,0xFFFFF7C1,0x00000314},
- {"0000001000010011111100001111110101000010110110000010100100100100",0x0000280B,0xFFFFF283,0x000003B5,0x000018D0,0xFFFFF992,0x000002EC,0x000018D0,0xFFFFF992,0x000002EC},
- {"0000001000010011111100001111111010011001000001100010000100000100",0x000033AB,0xFFFFEB1B,0x000004C4,0x00001FEE,0xFFFFF53A,0x00000378,0x00001FEE,0xFFFFF53A,0x00000378},
- {"0000001000010011111100001111111010011001000010100011100101100100",0x00002F79,0xFFFFEB0C,0x000004FA,0x00001E57,0xFFFFF4BF,0x0000039B,0x00001E57,0xFFFFF4BF,0x0000039B},
- {"0000001000010011111100001111111010011001000001000100100011100100",0x00003487,0xFFFFE8F2,0x00000539,0x0000185B,0xFFFFF9AE,0x000002BA,0x0000185B,0xFFFFF9AE,0x000002BA},
- {"0000001000010011111100001111111010011001000010100001100010100100",0x00003500,0xFFFFE793,0x0000058A,0x00001AA2,0xFFFFF792,0x0000031D,0x00001AA2,0xFFFFF792,0x0000031D},
- {"0000001000010011111100001111111010011001000010000001000101100100",0x00003943,0xFFFFE54D,0x000005D9,0x00001BC8,0xFFFFF6E0,0x00000339,0x00001BC8,0xFFFFF6E0,0x00000339},
- {"0000001000010011111010101001010011011110000001000011000010100100",0x0000306D,0xFFFFEC5E,0x000004A5,0x00001A3A,0xFFFFF85F,0x00000304,0x00001A3A,0xFFFFF85F,0x00000304},
- {"0000001000010011111100001111110101000010110110000011000010000100",0x00002BA4,0xFFFFEE8D,0x0000046A,0x0000198C,0xFFFFF88E,0x00000307,0x0000198C,0xFFFFF88E,0x00000307},
- {"0000001000010011111100001111110101000010110100100001100011100100",0x00003D30,0xFFFFE2F6,0x0000062A,0x000025DC,0xFFFFF074,0x00000435,0x000025DC,0xFFFFF074,0x00000435},
- {"0000001000010011111100001111110101000010110110000011100101100100",0x00002CD6,0xFFFFED79,0x0000049B,0x000016D0,0xFFFFFA53,0x000002BB,0x000016D0,0xFFFFFA53,0x000002BB},
- {"0000001000010011111100001111111010011001000101100011000101100100",0x00002484,0xFFFFF3BD,0x000003A0,0x000015B8,0xFFFFFB6B,0x000002A4,0x000015B8,0xFFFFFB6B,0x000002A4},
- {"0000001000010011111100001111111010011001000011100011100101000100",0x000038AE,0xFFFFE6D1,0x00000587,0x00001A2A,0xFFFFF8F1,0x000002D4,0x00001A2A,0xFFFFF8F1,0x000002D4},
- {"0000001000010011111100001111111010011001000001000100100101000100",0x000036FD,0xFFFFE76C,0x00000576,0x00001EE4,0xFFFFF58D,0x00000361,0x00001EE4,0xFFFFF58D,0x00000361},
- {"0000001000010011111100001111110101000010110110000011000010100100",0x00002BCF,0xFFFFEF28,0x00000448,0x00001B93,0xFFFFF7BA,0x00000327,0x00001B93,0xFFFFF7BA,0x00000327},
- {"0000001000010011111100001111111010011001000001100010100010000100",0x00003834,0xFFFFE818,0x0000053B,0x00001AFE,0xFFFFF85C,0x000002F3,0x00001AFE,0xFFFFF85C,0x000002F3},
- {"0000001000010011111100001111111010011001001100100011000110100100",0x00002EF7,0xFFFFEBFC,0x000004CE,0x00001897,0xFFFFF8EF,0x000002EC,0x00001897,0xFFFFF8EF,0x000002EC},
- {"0000001000010011111100001111111010011001001011000001100011000100",0x000035BD,0xFFFFE8BB,0x0000053B,0x00001F22,0xFFFFF561,0x00000373,0x00001F22,0xFFFFF561,0x00000373},
- {"0000001000010011111100001111111010011001000110000011100110000100",0x00002D42,0xFFFFEE1D,0x00000478,0x000016F0,0xFFFFFAAE,0x000002B3,0x000016F0,0xFFFFFAAE,0x000002B3},
- {"0000001000010011111010101001010011011110000001000101000100100100",0x00002F98,0xFFFFEB3C,0x000004F0,0x00001903,0xFFFFF818,0x00000319,0x00001903,0xFFFFF818,0x00000319},
- {"0000001000010011111100001111110101000010110101000010000101000100",0x00004081,0xFFFFDF13,0x000006F3,0x00002A6D,0xFFFFEC1B,0x00000509,0x00002A6D,0xFFFFEC1B,0x00000509},
- {"0000001000010011111010101001010011011110000001000000100100000100",0x00002D68,0xFFFFED21,0x00000498,0x00001FF6,0xFFFFF427,0x000003B0,0x00001FF6,0xFFFFF427,0x000003B0},
- {"0000001000010011111100001111111010011001000000100011100010000100",0x00003243,0xFFFFEA5C,0x000004FD,0x000020FB,0xFFFFF39E,0x000003C0,0x000020FB,0xFFFFF39E,0x000003C0},
- {"0000001000010011111100001111110101000010110110000100100010100100",0x00002F20,0xFFFFEC19,0x000004C6,0x00001748,0xFFFFF99F,0x000002DA,0x00001748,0xFFFFF99F,0x000002DA},
- {"0000001000010011111100001111111010011001000100000011100110000100",0x00002D68,0xFFFFED21,0x00000498,0x00001A43,0xFFFFF843,0x000002F9,0x00001A43,0xFFFFF843,0x000002F9},
- {"0000001000010011111100001111111010011001000000100010000010100100",0x0000396E,0xFFFFE616,0x000005A9,0x00001A51,0xFFFFF850,0x000002FA,0x00001A51,0xFFFFF850,0x000002FA},
- {"0000001000010011111100001111111010011001000001000011000101000100",0x0000305C,0xFFFFED4B,0x0000046C,0x00001CF9,0xFFFFF7BA,0x00000304,0x00001CF9,0xFFFFF7BA,0x00000304},
- {"0000001000010011111100001111110101000010110110100100000101100100",0x0000343C,0xFFFFE869,0x00000559,0x00001CE2,0xFFFFF614,0x00000359,0x00001CE2,0xFFFFF614,0x00000359},
- {"0000001000010011111100001111111010011001000110000011100101100100",0x00002782,0xFFFFF1FE,0x000003D9,0x000015DC,0xFFFFFB8B,0x00000290,0x000015DC,0xFFFFFB8B,0x00000290},
- {"0000001000010011111100001111111010011001000110000001100011000100",0x00002B9C,0xFFFFEF63,0x00000443,0x00001369,0xFFFFFD51,0x00000244,0x00001369,0xFFFFFD51,0x00000244},
- {"0000001000010011111100001111111010011001000010100010000010000100",0x000035F8,0xFFFFE743,0x00000592,0x000018D8,0xFFFFF8EE,0x000002E4,0x000018D8,0xFFFFF8EE,0x000002E4},
- {"0000001000010011111010101001010011011110000001100010100001000100",0x00002B72,0xFFFFEF1E,0x0000043C,0x00002647,0xFFFFF092,0x0000043E,0x00002647,0xFFFFF092,0x0000043E},
- {"0000001000010011111100001111111010011001000100000010000110000100",0x00002EC9,0xFFFFEC5F,0x000004B8,0x000018B6,0xFFFFF936,0x000002D8,0x000018B6,0xFFFFF936,0x000002D8},
- {"0000001000010011111100001111111010011001000001100100000010000100",0x000038A7,0xFFFFE6AC,0x00000589,0x00001C42,0xFFFFF70B,0x00000329,0x00001C42,0xFFFFF70B,0x00000329},
- {"0000001000010011111100001111111010011001001100000000100010100100",0x00002F6B,0xFFFFEBF6,0x000004CF,0x000018AE,0xFFFFF928,0x000002E3,0x000018AE,0xFFFFF928,0x000002E3},
- {"0000001000010011111100001111110101000010110110100101000100000100",0x000029CD,0xFFFFEEE1,0x00000459,0x00001AB5,0xFFFFF76F,0x00000324,0x00001AB5,0xFFFFF76F,0x00000324},
- {"0000001000010011111010101001010011011110000001100011100011000100",0x00003921,0xFFFFE71D,0x00000577,0x00001646,0xFFFFFB24,0x00000293,0x00001646,0xFFFFFB24,0x00000293},
- {"0000001000010011111010101001010011011110000001000100000101100100",0x00003940,0xFFFFE521,0x000005E8,0x00001947,0xFFFFF839,0x0000030D,0x00001947,0xFFFFF839,0x0000030D},
- {"0000001000010011111100001111110101000010110100100100000101100100",0x00003DCA,0xFFFFE211,0x00000659,0x0000250E,0xFFFFF072,0x00000443,0x0000250E,0xFFFFF072,0x00000443},
- {"0000001000010011111100001111111010011001000011000000100100000100",0x00002E95,0xFFFFEC20,0x000004C9,0x000015B4,0xFFFFFAD3,0x0000029D,0x000015B4,0xFFFFFAD3,0x0000029D},
- {"0000001000010011111100001111111010011001000001000001000010000100",0x00002C11,0xFFFFEE6E,0x00000468,0x00001901,0xFFFFF924,0x000002E7,0x00001901,0xFFFFF924,0x000002E7},
- {"0000001000010011111010101001010011011110000001100010000100000100",0x0000293F,0xFFFFF158,0x000003E6,0x0000183F,0xFFFFF9F6,0x000002D2,0x0000183F,0xFFFFF9F6,0x000002D2},
- {"0000001000010011111100001111111010011001000011100001000100000100",0x00002A67,0xFFFFEF34,0x0000043E,0x00001C6F,0xFFFFF6F1,0x0000032B,0x00001C6F,0xFFFFF6F1,0x0000032B},
- {"0000001000010011111010101001010011011110000001100101000100100100",0x00002F8D,0xFFFFEB77,0x000004DA,0x00001C0D,0xFFFFF627,0x00000365,0x00001C0D,0xFFFFF627,0x00000365},
- {"0000001000010011111100001111111010011001000011000011100011000100",0x00003476,0xFFFFEA5B,0x000004E7,0x00001DBF,0xFFFFF6C7,0x00000333,0x00001DBF,0xFFFFF6C7,0x00000333},
- {"0000001000010011111100001111111010011001000011100000100101000100",0x00003336,0xFFFFE92F,0x00000546,0x00001614,0xFFFFFAE0,0x00000296,0x00001614,0xFFFFFAE0,0x00000296},
- {"0000001000010011111100001111111010011001000101100010000101100100",0x00002513,0xFFFFF323,0x000003BC,0x000016DB,0xFFFFFA79,0x000002CD,0x000016DB,0xFFFFFA79,0x000002CD},
- {"0000001000010011111100001111111010011001000010100010100101000100",0x000035A7,0xFFFFE78E,0x00000584,0x00001B0D,0xFFFFF77D,0x0000031F,0x00001B0D,0xFFFFF77D,0x0000031F},
- {"0000001000010011111100001111111010011001001100100011100011100100",0x00003171,0xFFFFEB98,0x000004C6,0x00001C76,0xFFFFF71F,0x0000032F,0x00001C76,0xFFFFF71F,0x0000032F},
- {"0000001000010011111100001111110101000010110110100001000010000100",0x00002C52,0xFFFFED2E,0x000004A7,0x00002182,0xFFFFF2F4,0x000003E4,0x00002182,0xFFFFF2F4,0x000003E4},
- {"0000001000010011111100001111111010011001000100000010100100100100",0x000032E1,0xFFFFEB39,0x000004D0,0x00001B55,0xFFFFF859,0x000002FA,0x00001B55,0xFFFFF859,0x000002FA},
- {"0000001000010011111100001111111010011001000110000100100010100100",0x000029B6,0xFFFFEFF7,0x00000430,0x0000151B,0xFFFFFBC6,0x0000027F,0x0000151B,0xFFFFFBC6,0x0000027F},
- {"0000001000010011111100001111110101000010110110100001100101100100",0x00002FF7,0xFFFFEB67,0x000004DA,0x000020E9,0xFFFFF363,0x000003CE,0x000020E9,0xFFFFF363,0x000003CE},
- {"0000001000010011111100001111110101000010110110100101000100100100",0x00003CDD,0xFFFFE2B2,0x00000649,0x00001B18,0xFFFFF739,0x00000329,0x00001B18,0xFFFFF739,0x00000329},
- {"0000001000010011111100001111111010011001000001100010100010100100",0x00003C82,0xFFFFE5C6,0x0000058E,0x00001F3F,0xFFFFF5AD,0x00000361,0x00001F3F,0xFFFFF5AD,0x00000361},
- {"0000001000010011111100001111110101000010110111000100000010000100",0x0000319B,0xFFFFEA15,0x0000051B,0x00001CC9,0xFFFFF62E,0x00000358,0x00001CC9,0xFFFFF62E,0x00000358},
- {"0000001000010011111010101001010011011110000001100011100011100100",0x000032B6,0xFFFFEB2B,0x000004D6,0x000018E0,0xFFFFF966,0x000002DE,0x000018E0,0xFFFFF966,0x000002DE},
- {"0000001000010011111010101001010011011110000000100011100110000100",0x0000300A,0xFFFFEBA6,0x000004D1,0x00001CFD,0xFFFFF5F6,0x0000036D,0x00001CFD,0xFFFFF5F6,0x0000036D},
- {"0000001000010011111100001111110101000010110110000010100110000100",0x000026A9,0xFFFFF15D,0x00000400,0x00001561,0xFFFFFB1F,0x000002A0,0x00001561,0xFFFFFB1F,0x000002A0},
- {"0000001000010011111100001111111010011001000011100101000100100100",0x00003123,0xFFFFEAD2,0x000004FA,0x000018CB,0xFFFFF8F5,0x000002EC,0x000018CB,0xFFFFF8F5,0x000002EC},
- {"0000001000010011111100001111111010011001000110000100000011000100",0x00003577,0xFFFFE935,0x00000533,0x000016CD,0xFFFFFB44,0x00000289,0x000016CD,0xFFFFFB44,0x00000289},
- {"0000001000010011111100001111111010011001001010000010000110000100",0x00002875,0xFFFFF170,0x000003F3,0x00001567,0xFFFFFBD5,0x00000289,0x00001567,0xFFFFFBD5,0x00000289},
- {"0000001000010011111100001111111010011001000010000100000010000100",0x00003AE2,0xFFFFE538,0x000005C1,0x00001CB4,0xFFFFF6A3,0x0000033C,0x00001CB4,0xFFFFF6A3,0x0000033C},
- {"0000001000010011111100001111111010011001000011000011100011100100",0x000031DF,0xFFFFEC2A,0x000004A3,0x00001EF0,0xFFFFF626,0x00000352,0x00001EF0,0xFFFFF626,0x00000352},
- {"0000001000010011111100001111110101000010110100100101000101000100",0x00004A6A,0xFFFFDB15,0x00000758,0x000027F3,0xFFFFEEEE,0x00000479,0x000027F3,0xFFFFEEEE,0x00000479},
- {"0000001000010011111010101001010011011110000001100011100100000100",0x00002BB9,0xFFFFEF5D,0x00000433,0x00001589,0xFFFFFB57,0x00000295,0x00001589,0xFFFFFB57,0x00000295},
- {"0000001000010011111100001111111010011001000001000010000101100100",0x000033A0,0xFFFFE98F,0x00000528,0x00001CB4,0xFFFFF706,0x0000032D,0x00001CB4,0xFFFFF706,0x0000032D},
- {"0000001000010011111100001111111010011001000101100011000001100100",0x0000248E,0xFFFFF380,0x000003AC,0x000016EA,0xFFFFFA6C,0x000002CE,0x000016EA,0xFFFFFA6C,0x000002CE},
- {"0000001000010011111100001111111010011001000000100010000110100100",0x00002FE2,0xFFFFEB2F,0x000004E9,0x00001D4E,0xFFFFF56B,0x00000380,0x00001D4E,0xFFFFF56B,0x00000380},
- {"0000001000010011111100001111111010011001000010100010100010000100",0x00003283,0xFFFFE9E7,0x0000051D,0x00000694,0xFFFFFD32,0x000003C3,0x00000694,0xFFFFFD32,0x000003C3},
- {"0000001000010011111100001111110101000010110110000101000011000100",0x00002EE4,0xFFFFEBFD,0x000004D3,0x0000151A,0xFFFFFAF6,0x000002A4,0x0000151A,0xFFFFFAF6,0x000002A4},
- {"0000001000010011111100001111110101000010110111000001100011100100",0x0000302D,0xFFFFEB7F,0x000004DA,0x00001E6D,0xFFFFF54B,0x00000380,0x00001E6D,0xFFFFF54B,0x00000380},
- {"0000001000010011111100001111110101000010110110100101000011000100",0x000033DA,0xFFFFE7FB,0x0000057F,0x00001DED,0xFFFFF50E,0x0000038D,0x00001DED,0xFFFFF50E,0x0000038D},
- {"0000001000010011111100001111111010011001001011000100000010000100",0x000030B5,0xFFFFEBB8,0x000004C4,0x00001C3F,0xFFFFF726,0x0000032A,0x00001C3F,0xFFFFF726,0x0000032A},
- {"0000001000010011111100001111111010011001000010000011000111000100",0x00003BBD,0xFFFFE55C,0x000005B8,0x000019DB,0xFFFFF8BB,0x000002EF,0x000019DB,0xFFFFF8BB,0x000002EF},
- {"0000001000010011111100001111111010011001000011100011100010000100",0x00002964,0xFFFFF051,0x0000040E,0x000025CD,0xFFFFF11B,0x0000041F,0x000025CD,0xFFFFF11B,0x0000041F},
- {"0000001000010011111100001111110101000010110111000100100010000100",0x000033F5,0xFFFFE863,0x00000560,0x00001BCE,0xFFFFF689,0x0000034B,0x00001BCE,0xFFFFF689,0x0000034B},
- {"0000001000010011111100001111111010011001000010100010100001100100",0x00003294,0xFFFFE924,0x00000548,0x00001D41,0xFFFFF580,0x0000037D,0x00001D41,0xFFFFF580,0x0000037D},
- {"0000001000010011111100001111110101000010110111000011100110100100",0x000034FB,0xFFFFE7FE,0x0000056D,0x00001CB1,0xFFFFF635,0x00000357,0x00001CB1,0xFFFFF635,0x00000357},
- {"0000001000010011111100001111111010011001000010100001000010100100",0x00002E28,0xFFFFEBB9,0x000004E0,0x00001B20,0xFFFFF6E3,0x0000033C,0x00001B20,0xFFFFF6E3,0x0000033C},
- {"0000001000010011111100001111110101000010110110100001100100000100",0x00002799,0xFFFFF0F4,0x000003FC,0x00001C9D,0xFFFFF6A1,0x00000345,0x00001C9D,0xFFFFF6A1,0x00000345},
- {"0000001000010011111100001111111010011001000001100100000100000100",0x00003AEA,0xFFFFE5DB,0x0000059D,0x00001B61,0xFFFFF7F0,0x00000301,0x00001B61,0xFFFFF7F0,0x00000301},
- {"0000001000010011111010101001010011011110000001000001100110000100",0x000031F6,0xFFFFEAB8,0x000004F3,0x00001D90,0xFFFFF622,0x00000359,0x00001D90,0xFFFFF622,0x00000359},
- {"0000001000010011111100001111111010011001000011000100000001100100",0x000031B8,0xFFFFEA61,0x0000050F,0x0000199D,0xFFFFF87C,0x000002FD,0x0000199D,0xFFFFF87C,0x000002FD},
- {"0000001000010011111100001111110101000010110100100011000101000100",0x00004514,0xFFFFDDFF,0x000006F6,0x000022CD,0xFFFFF29F,0x000003D9,0x000022CD,0xFFFFF29F,0x000003D9},
- {"0000001000010011111010101001010011011110000001000011000101100100",0x00002F30,0xFFFFECB8,0x000004A0,0x00001B07,0xFFFFF7E2,0x00000313,0x00001B07,0xFFFFF7E2,0x00000313},
- {"0000001000010011111100001111110101000010110111000011000010100100",0x0000383B,0xFFFFE702,0x00000581,0x00001A08,0xFFFFF8CA,0x000002E2,0x00001A08,0xFFFFF8CA,0x000002E2},
- {"0000001000010011111100001111111010011001000000100010000101100100",0x00002CC5,0xFFFFEDF8,0x00000465,0x00001F47,0xFFFFF4B2,0x00000393,0x00001F47,0xFFFFF4B2,0x00000393},
- {"0000001000010011111100001111111010011001000101100010000111000100",0x00002304,0xFFFFF453,0x00000384,0x0000170A,0xFFFFFA3F,0x000002CE,0x0000170A,0xFFFFFA3F,0x000002CE},
- {"0000001000010011111100001111111010011001000010100101000100100100",0x0000337E,0xFFFFE850,0x0000056E,0x00001BDD,0xFFFFF668,0x00000353,0x00001BDD,0xFFFFF668,0x00000353},
- {"0000001000010011111100001111111010011001000011100100100100100100",0x00002E2F,0xFFFFEC9B,0x000004AE,0x00001C4D,0xFFFFF6D3,0x00000338,0x00001C4D,0xFFFFF6D3,0x00000338},
- {"0000001000010011111010101001010011011110000001100001000100100100",0x00002DDD,0xFFFFEDA4,0x00000477,0x00002010,0xFFFFF4BB,0x00000390,0x00002010,0xFFFFF4BB,0x00000390},
- {"0000001000010011111100001111110101000010110110100100100011100100",0x0000290C,0xFFFFEF61,0x00000445,0x00002133,0xFFFFF324,0x000003D8,0x00002133,0xFFFFF324,0x000003D8},
- {"0000001000010011111100001111111010011001000001100010100100100100",0x0000371E,0xFFFFE8D5,0x00000524,0x00001C3A,0xFFFFF7AE,0x00000314,0x00001C3A,0xFFFFF7AE,0x00000314},
- {"0000001000010011111100001111110101000010110110000011100011100100",0x00002A58,0xFFFFF007,0x00000429,0x000018A6,0xFFFFF98F,0x000002E1,0x000018A6,0xFFFFF98F,0x000002E1},
- {"0000001000010011111100001111111010011001000000100011000010000100",0x00002FED,0xFFFFEC48,0x000004AA,0x00001E9D,0xFFFFF584,0x00000370,0x00001E9D,0xFFFFF584,0x00000370},
- {"0000001000010011111100001111111010011001000110000001100010000100",0x00002829,0xFFFFF15F,0x000003F7,0x0000157E,0xFFFFFBD4,0x00000282,0x0000157E,0xFFFFFBD4,0x00000282},
- {"0000001000010011111100001111111010011001000100000001100100100100",0x000030CF,0xFFFFEB8D,0x000004CE,0x00001A4C,0xFFFFF868,0x000002F7,0x00001A4C,0xFFFFF868,0x000002F7},
- {"0000001000010011111100001111110101000010110110100010000010000100",0x00002C8F,0xFFFFEDD2,0x0000047D,0x00001CCE,0xFFFFF6A1,0x00000343,0x00001CCE,0xFFFFF6A1,0x00000343},
- {"0000001000010011111100001111111010011001000110000010000101100100",0x00002A84,0xFFFFEFBA,0x0000043E,0x000015EF,0xFFFFFB4B,0x0000029E,0x000015EF,0xFFFFFB4B,0x0000029E},
- {"0000001000010011111100001111111010011001000011000010100010100100",0x000034CA,0xFFFFEA08,0x000004FF,0x00001C19,0xFFFFF7ED,0x00000309,0x00001C19,0xFFFFF7ED,0x00000309},
- {"0000001000010011111100001111111010011001000101100011100110100100",0x00002187,0xFFFFF4B0,0x0000037E,0x0000154A,0xFFFFFB0C,0x000002AE,0x0000154A,0xFFFFFB0C,0x000002AE},
- {"0000001000010011111100001111110101000010110110100011100001000100",0x00002F4F,0xFFFFEB3C,0x000004F8,0x0000181F,0xFFFFF92D,0x000002DF,0x0000181F,0xFFFFF92D,0x000002DF},
- {"0000001000010011111100001111111010011001000001000001000011100100",0x0000290C,0xFFFFF0B1,0x000003FC,0x00001DB0,0xFFFFF636,0x00000355,0x00001DB0,0xFFFFF636,0x00000355},
- {"0000001000010011111100001111111010011001000010100001000001100100",0x000034C1,0xFFFFE888,0x0000055A,0x000019BF,0xFFFFF881,0x000002FB,0x000019BF,0xFFFFF881,0x000002FB},
- {"0000001000010011111100001111110101000010110111000001100011000100",0x00003139,0xFFFFEA98,0x00000504,0x000019F2,0xFFFFF820,0x0000030B,0x000019F2,0xFFFFF820,0x0000030B},
- {"0000001000010011111100001111110101000010110110000011000101000100",0x00002CAC,0xFFFFEEB2,0x00000458,0x0000152C,0xFFFFFBEF,0x0000027B,0x0000152C,0xFFFFFBEF,0x0000027B},
- {"0000001000010011111100001111111010011001001011000011100011100100",0x00003577,0xFFFFE99C,0x0000050D,0x00001E64,0xFFFFF679,0x0000033F,0x00001E64,0xFFFFF679,0x0000033F},
- {"0000001000010011111100001111110101000010110110100100000100000100",0x0000263A,0xFFFFF1E4,0x000003D4,0x00001F68,0xFFFFF4ED,0x00000386,0x00001F68,0xFFFFF4ED,0x00000386},
- {"0000001000010011111100001111110101000010110110000001100110000100",0x00002CE9,0xFFFFED63,0x00000497,0x00001810,0xFFFFF94D,0x000002E3,0x00001810,0xFFFFF94D,0x000002E3},
- {"0000001000010011111010101001010011011110000001000100000100000100",0x0000318A,0xFFFFEAC8,0x000004F5,0x0000195C,0xFFFFF896,0x000002FB,0x0000195C,0xFFFFF896,0x000002FB},
- {"0000001000010011111100001111110101000010110110000011100100000100",0x00002C41,0xFFFFEEC6,0x0000045D,0x000017DD,0xFFFFFA16,0x000002CB,0x000017DD,0xFFFFFA16,0x000002CB},
- {"0000001000010011111100001111111010011001000000100011000110100100",0x00002DD4,0xFFFFEC98,0x000004AD,0x00001BD7,0xFFFFF69F,0x00000347,0x00001BD7,0xFFFFF69F,0x00000347},
- {"0000001000010011111100001111110101000010110110100011100101000100",0x00003351,0xFFFFE9B2,0x0000051A,0x00001CA1,0xFFFFF6A4,0x00000341,0x00001CA1,0xFFFFF6A4,0x00000341},
- {"0000001000010011111100001111111010011001000000100001000100000100",0x0000322D,0xFFFFE9BE,0x00000527,0x00001CF9,0xFFFFF5EB,0x00000366,0x00001CF9,0xFFFFF5EB,0x00000366},
- {"0000001000010011111100001111111010011001000011000010100011000100",0x00003678,0xFFFFE9A8,0x00000503,0x00001AD4,0xFFFFF8F6,0x000002E3,0x00001AD4,0xFFFFF8F6,0x000002E3},
- {"0000001000010011111100001111111010011001000101100001100100100100",0x0000260E,0xFFFFF2C1,0x000003CA,0x00001139,0xFFFFFE48,0x00000236,0x00001139,0xFFFFFE48,0x00000236},
- {"0000001000010011111100001111111010011001000010100010000101100100",0x000033D3,0xFFFFE872,0x00000565,0x00001B72,0xFFFFF713,0x00000332,0x00001B72,0xFFFFF713,0x00000332},
- {"0000001000010011111100001111111010011001001100100011100001000100",0x0000309B,0xFFFFEB42,0x000004E4,0x00001918,0xFFFFF8C8,0x000002F2,0x00001918,0xFFFFF8C8,0x000002F2},
- {"0000001000010011111100001111111010011001000110000010100001100100",0x000028B8,0xFFFFF105,0x00000402,0x000018BB,0xFFFFF9BC,0x000002D3,0x000018BB,0xFFFFF9BC,0x000002D3},
- {"0000001000010011111100001111111010011001000010100001100010000100",0x00003123,0xFFFFE9D1,0x00000534,0x00001B19,0xFFFFF6FE,0x0000033C,0x00001B19,0xFFFFF6FE,0x0000033C},
- {"0000001000010011111100001111111010011001000000100010000101000100",0x00003216,0xFFFFEA8E,0x000004F6,0x00001F72,0xFFFFF4CE,0x0000038B,0x00001F72,0xFFFFF4CE,0x0000038B},
- {"0000001000010011111100001111111010011001000101100010100101100100",0x00002564,0xFFFFF32D,0x000003B6,0x00001685,0xFFFFFADB,0x000002BB,0x00001685,0xFFFFFADB,0x000002BB},
- {"0000001000010011111100001111110101000010110110100010100100100100",0x00002E60,0xFFFFED13,0x00000497,0x00001CA5,0xFFFFF6B9,0x00000346,0x00001CA5,0xFFFFF6B9,0x00000346},
- {"0000001000010011111100001111111010011001000011100011100110100100",0x0000336D,0xFFFFE934,0x0000053B,0x00001B3E,0xFFFFF763,0x00000327,0x00001B3E,0xFFFFF763,0x00000327},
- {"0000001000010011111100001111111010011001000100000001000010000100",0x0000274A,0xFFFFF119,0x000003FA,0x00001D75,0xFFFFF5CD,0x0000036F,0x00001D75,0xFFFFF5CD,0x0000036F},
- {"0000001000010011111100001111110101000010110110100010000101100100",0x0000366B,0xFFFFE70A,0x0000059A,0x00001ED8,0xFFFFF501,0x00000389,0x00001ED8,0xFFFFF501,0x00000389},
- {"0000001000010011111100001111111010011001001000100011100101100100",0x00003164,0xFFFFEAB4,0x000004FA,0x00001C52,0xFFFFF6E0,0x00000336,0x00001C52,0xFFFFF6E0,0x00000336},
- {"0000001000010011111100001111110101000010110100100011000001100100",0x00004224,0xFFFFDF7F,0x000006C1,0x00002A52,0xFFFFED5E,0x000004BB,0x00002A52,0xFFFFED5E,0x000004BB},
- {"0000001000010011111100001111111010011001000100000010100001100100",0x000030E3,0xFFFFEB07,0x000004ED,0x00001FD3,0xFFFFF46D,0x000003A1,0x00001FD3,0xFFFFF46D,0x000003A1},
- {"0000001000010011111100001111110101000010110110000010100010000100",0x00002AEB,0xFFFFEF1B,0x00000454,0x00001829,0xFFFFF995,0x000002DD,0x00001829,0xFFFFF995,0x000002DD},
- {"0000001000010011111100001111110101000010110111000101000011100100",0x0000346B,0xFFFFE7A2,0x0000058B,0x000020C5,0xFFFFF2E8,0x000003EC,0x000020C5,0xFFFFF2E8,0x000003EC},
- {"0000001000010011111100001111110101000010110111000100000101100100",0x000039CF,0xFFFFE5D7,0x000005A9,0x00001D66,0xFFFFF5D6,0x00000366,0x00001D66,0xFFFFF5D6,0x00000366},
- {"0000001000010011111100001111111010011001000001000001100011100100",0x000034AC,0xFFFFE9AE,0x00000515,0x00001A28,0xFFFFF904,0x000002DC,0x00001A28,0xFFFFF904,0x000002DC},
- {"0000001000010011111100001111110101000010110111000010000010000100",0x00002D68,0xFFFFED21,0x00000498,0x00001C6F,0xFFFFF686,0x0000034C,0x00001C6F,0xFFFFF686,0x0000034C},
- {"0000001000010011111100001111111010011001000010000010000011000100",0x0000328B,0xFFFFEBA1,0x000004B4,0x00001DA3,0xFFFFF683,0x00000349,0x00001DA3,0xFFFFF683,0x00000349},
- {"0000001000010011111100001111111010011001000110000010100011000100",0x000027DC,0xFFFFF295,0x000003BF,0x000019C1,0xFFFFF98E,0x000002E8,0x000019C1,0xFFFFF98E,0x000002E8},
- {"0000001000010011111100001111111010011001000110000100000010000100",0x00002756,0xFFFFF1D7,0x000003DF,0x000015D9,0xFFFFFB51,0x00000298,0x000015D9,0xFFFFFB51,0x00000298},
- {"0000001000010011111100001111111010011001000010000011100010000100",0x00003526,0xFFFFE907,0x00000526,0x000017AB,0xFFFFFA12,0x000002AB,0x000017AB,0xFFFFFA12,0x000002AB},
- {"0000001000010011111100001111110101000010110110100001100011100100",0x0000351B,0xFFFFE8B7,0x00000540,0x00001A86,0xFFFFF821,0x00000303,0x00001A86,0xFFFFF821,0x00000303},
- {"0000001000010011111100001111111010011001000101100100000101000100",0x000024B2,0xFFFFF34E,0x000003B1,0x000018E2,0xFFFFF926,0x000002FC,0x000018E2,0xFFFFF926,0x000002FC},
- {"0000001000010011111100001111110101000010110110000010100010100100",0x00002F36,0xFFFFED5D,0x00000486,0x0000157A,0xFFFFFB85,0x00000293,0x0000157A,0xFFFFFB85,0x00000293},
- {"0000001000010011111100001111110101000010110111000101000011000100",0x00003A6E,0xFFFFE456,0x000005FD,0x00001F68,0xFFFFF3D1,0x000003C3,0x00001F68,0xFFFFF3D1,0x000003C3},
- {"0000001000010011111100001111111010011001000010100011000110100100",0x00002BC3,0xFFFFED2D,0x000004A7,0x00001C3F,0xFFFFF609,0x00000364,0x00001C3F,0xFFFFF609,0x00000364},
- {"0000001000010011111100001111111010011001000011100010000010000100",0x000032E1,0xFFFFEA83,0x000004F6,0x00001B37,0xFFFFF842,0x000002F5,0x00001B37,0xFFFFF842,0x000002F5},
- {"0000001000010011111100001111110101000010110110000011000110000100",0x000028E3,0xFFFFF07F,0x00000412,0x00001676,0xFFFFFA68,0x000002BE,0x00001676,0xFFFFFA68,0x000002BE},
- {"0000001000010011111100001111110101000010110100100001000100000100",0x0000444C,0xFFFFDDAD,0x00000712,0x00002634,0xFFFFEF89,0x0000046C,0x00002634,0xFFFFEF89,0x0000046C},
- {"0000001000010011111100001111111010011001000001000001100011000100",0x00003121,0xFFFFEBBB,0x000004C6,0x00001C98,0xFFFFF72B,0x0000032D,0x00001C98,0xFFFFF72B,0x0000032D},
- {"0000001000010011111100001111110101000010110110000100000010100100",0x00002C31,0xFFFFEDC4,0x00000490,0x0000162D,0xFFFFFA8E,0x000002B4,0x0000162D,0xFFFFFA8E,0x000002B4},
- {"0000001000010011111100001111110101000010110110100001100011000100",0x00002749,0xFFFFF112,0x000003FC,0x00001C85,0xFFFFF6B8,0x00000342,0x00001C85,0xFFFFF6B8,0x00000342},
- {"0000001000010011111100001111111010011001000001000100000100000100",0x00003159,0xFFFFEB99,0x000004C2,0x00001BD0,0xFFFFF7CA,0x00000307,0x00001BD0,0xFFFFF7CA,0x00000307},
- {"0000001000010011111100001111111010011001000101100100000101100100",0x00002610,0xFFFFF1FD,0x000003EC,0x000016BE,0xFFFFFA53,0x000002CB,0x000016BE,0xFFFFFA53,0x000002CB},
- {"0000001000010011111100001111111010011001000000100011000110000100",0x000037B5,0xFFFFE63D,0x000005B5,0x00002285,0xFFFFF25D,0x000003F7,0x00002285,0xFFFFF25D,0x000003F7},
- {"0000001000010011111100001111111010011001000010100010100010100100",0x00002FEE,0xFFFFEB47,0x000004EF,0x00001CBE,0xFFFFF64E,0x00000358,0x00001CBE,0xFFFFF64E,0x00000358},
- {"0000001000010011111100001111111010011001000100000101000100000100",0x00002E90,0xFFFFEC48,0x000004C0,0x00001A47,0xFFFFF7D1,0x0000031A,0x00001A47,0xFFFFF7D1,0x0000031A},
- {"0000001000010011111100001111110101000010110110100100000010000100",0x000034AB,0xFFFFE84A,0x00000559,0x00001A72,0xFFFFF79A,0x0000031C,0x00001A72,0xFFFFF79A,0x0000031C},
- {"0000001000010011111100001111111010011001000110000011100010000100",0x00002F7B,0xFFFFECFC,0x0000049C,0x00001814,0xFFFFFA22,0x000002C2,0x00001814,0xFFFFFA22,0x000002C2},
- {"0000001000010011111100001111111010011001000000100001100101100100",0x00003618,0xFFFFE709,0x00000596,0x00001EBF,0xFFFFF482,0x000003A5,0x00001EBF,0xFFFFF482,0x000003A5},
- {"0000001000010011111010101001010011011110000000100100100100000100",0x0000341B,0xFFFFE8B2,0x0000054F,0x00001D26,0xFFFFF578,0x00000388,0x00001D26,0xFFFFF578,0x00000388},
- {"0000001000010011111100001111111010011001000100000010000101000100",0x000030F6,0xFFFFEB89,0x000004CD,0x000019C0,0xFFFFF8CC,0x000002E6,0x000019C0,0xFFFFF8CC,0x000002E6},
- {"0000001000010011111100001111111010011001001010000100000110100100",0x00002B76,0xFFFFEF6C,0x00000444,0x00001563,0xFFFFFBBE,0x0000028D,0x00001563,0xFFFFFBBE,0x0000028D},
- {"0000001000010011111100001111110101000010110110000001100001100100",0x00002BA2,0xFFFFEE31,0x0000047F,0x00001A3D,0xFFFFF7F3,0x00000320,0x00001A3D,0xFFFFF7F3,0x00000320},
- {"0000001000010011111100001111111010011001001011000100100011100100",0x00003545,0xFFFFE87A,0x0000054A,0x00001B5A,0xFFFFF7B0,0x0000030C,0x00001B5A,0xFFFFF7B0,0x0000030C},
- {"0000001000010011111010101001010011011110000001000010100101000100",0x00003879,0xFFFFE73F,0x00000578,0x00001649,0xFFFFFB57,0x00000283,0x00001649,0xFFFFFB57,0x00000283},
- {"0000001000010011111100001111110101000010110110000100000011000100",0x00002772,0xFFFFF0F1,0x00000410,0x0000142F,0xFFFFFBCF,0x00000287,0x0000142F,0xFFFFFBCF,0x00000287},
- {"0000001000010011111100001111110101000010110110100011000110000100",0x00003228,0xFFFFE98E,0x00000535,0x00001F48,0xFFFFF495,0x00000399,0x00001F48,0xFFFFF495,0x00000399},
- {"0000001000010011111100001111111010011001000011100100000011100100",0x00002887,0xFFFFF119,0x000003E8,0x000021AA,0xFFFFF3F5,0x000003A5,0x000021AA,0xFFFFF3F5,0x000003A5},
- {"0000001000010011111100001111110101000010110110100010100010100100",0x0000301F,0xFFFFEBB2,0x000004D2,0x00001C02,0xFFFFF736,0x0000032B,0x00001C02,0xFFFFF736,0x0000032B},
- {"0000001000010011111100001111111010011001000110000010000010100100",0x00002E13,0xFFFFEE3F,0x00000468,0x000016AC,0xFFFFFB32,0x0000029E,0x000016AC,0xFFFFFB32,0x0000029E},
- {"0000001000010011111100001111111010011001000001000100100100100100",0x00003478,0xFFFFE8F9,0x00000538,0x00001DAB,0xFFFFF645,0x00000345,0x00001DAB,0xFFFFF645,0x00000345},
- {"0000001000010011111100001111111010011001000001100000100011000100",0x000030C6,0xFFFFEB6C,0x000004D4,0x0000184A,0xFFFFF934,0x000002E1,0x0000184A,0xFFFFF934,0x000002E1},
- {"0000001000010011111100001111111010011001000010100010000001000100",0x00002F1B,0xFFFFEBD3,0x000004D3,0x000019E7,0xFFFFF813,0x0000030D,0x000019E7,0xFFFFF813,0x0000030D},
- {"0000001000010011111100001111111010011001000000100011100100000100",0x00003214,0xFFFFEAE9,0x000004E0,0x0000178F,0xFFFFFA1C,0x000002B1,0x0000178F,0xFFFFFA1C,0x000002B1},
- {"0000001000010011111100001111110101000010110111000011000101000100",0x0000399C,0xFFFFE738,0x0000055E,0x00001EA1,0xFFFFF5E7,0x0000035A,0x00001EA1,0xFFFFF5E7,0x0000035A},
- {"0000001000010011111100001111111010011001000001100101000011000100",0x00003A01,0xFFFFE5B2,0x000005B6,0x00001D95,0xFFFFF5D2,0x0000036A,0x00001D95,0xFFFFF5D2,0x0000036A},
- {"0000001000010011111100001111111010011001000001000011100010000100",0x0000310D,0xFFFFEB78,0x000004D0,0x00001C06,0xFFFFF76E,0x0000031A,0x00001C06,0xFFFFF76E,0x0000031A},
- {"0000001000010011111100001111111010011001000001100011100001100100",0x00003CD1,0xFFFFE42F,0x000005EB,0x00001933,0xFFFFF91F,0x000002D4,0x00001933,0xFFFFF91F,0x000002D4},
- {"0000001000010011111100001111110101000010110110100011000101100100",0x00003119,0xFFFFEB1B,0x000004E1,0x00001FC7,0xFFFFF46A,0x000003A2,0x00001FC7,0xFFFFF46A,0x000003A2},
- {"0000001000010011111010101001010011011110000001100100100010100100",0x0000390D,0xFFFFE566,0x000005D8,0x00001EC6,0xFFFFF4DC,0x00000391,0x00001EC6,0xFFFFF4DC,0x00000391},
- {"0000001000010011111100001111110101000010110110100001000011000100",0x00003446,0xFFFFE858,0x00000561,0x00001FDB,0xFFFFF3FF,0x000003B9,0x00001FDB,0xFFFFF3FF,0x000003B9},
- {"0000001000010011111100001111111010011001000001000100100100000100",0x000032BA,0xFFFFEA07,0x00000511,0x00001B25,0xFFFFF7C9,0x0000030D,0x00001B25,0xFFFFF7C9,0x0000030D},
- {"0000001000010011111100001111111010011001000011100001100001100100",0x00002CCF,0xFFFFEDE5,0x00000478,0x00001BC8,0xFFFFF761,0x00000326,0x00001BC8,0xFFFFF761,0x00000326},
- {"0000001000010011111100001111111010011001000001100010100110000100",0x0000400E,0xFFFFE1CB,0x00000652,0x00001AF8,0xFFFFF7B9,0x00000312,0x00001AF8,0xFFFFF7B9,0x00000312},
- {"0000001000010011111100001111111010011001000001000000100011100100",0x00002F24,0xFFFFEC2A,0x000004C7,0x00001B94,0xFFFFF748,0x00000333,0x00001B94,0xFFFFF748,0x00000333},
- {"0000001000010011111100001111110101000010110100100001100100100100",0x00003FDA,0xFFFFE1C1,0x0000064B,0x00002427,0xFFFFF180,0x0000040C,0x00002427,0xFFFFF180,0x0000040C},
- {"0000001000010011111100001111111010011001000010100001100011000100",0x00002F6B,0xFFFFEBA7,0x000004DD,0x00001C25,0xFFFFF6C1,0x00000344,0x00001C25,0xFFFFF6C1,0x00000344},
- {"0000001000010011111100001111111010011001000110000010000100000100",0x00002A53,0xFFFFF0EE,0x00000402,0x000017C6,0xFFFFFAA0,0x000002BF,0x000017C6,0xFFFFFAA0,0x000002BF},
- {"0000001000010011111100001111111010011001000100000101000101000100",0x000031F4,0xFFFFEA34,0x00000517,0x000016FF,0xFFFFFA4E,0x000002AC,0x000016FF,0xFFFFFA4E,0x000002AC},
- {"0000001000010011111100001111111010011001001100100010000101000100",0x00002E24,0xFFFFED46,0x00000489,0x00001712,0xFFFFFA5D,0x000002AC,0x00001712,0xFFFFFA5D,0x000002AC},
- {"0000001000010011111100001111111010011001000110000010100000100100",0x000028CD,0xFFFFF0E3,0x0000040E,0x00001606,0xFFFFFB37,0x000002A4,0x00001606,0xFFFFFB37,0x000002A4},
- {"0000001000010011111100001111111010011001000000100010000011000100",0x00003184,0xFFFFEB88,0x000004C3,0x000018DA,0xFFFFF939,0x000002DB,0x000018DA,0xFFFFF939,0x000002DB},
- {"0000001000010011111100001111111010011001000101100010000100100100",0x0000239B,0xFFFFF470,0x00000386,0x00001714,0xFFFFFA9F,0x000002C8,0x00001714,0xFFFFFA9F,0x000002C8},
- {"0000001000010011111100001111110101000010110111000011100011100100",0x00003641,0xFFFFE92B,0x00000515,0x00001BE2,0xFFFFF795,0x0000031B,0x00001BE2,0xFFFFF795,0x0000031B},
- {"0000001000010011111100001111111010011001001011000001000101000100",0x00003278,0xFFFFEA17,0x00000510,0x00001B71,0xFFFFF778,0x0000031D,0x00001B71,0xFFFFF778,0x0000031D},
- {"0000001000010011111100001111111010011001000001100010100001000100",0x000035B9,0xFFFFE8DA,0x0000052D,0x00001A6A,0xFFFFF83B,0x000002FF,0x00001A6A,0xFFFFF83B,0x000002FF},
- {"0000001000010011111100001111111010011001000011100001100011000100",0x00002E5E,0xFFFFED32,0x0000048B,0x00001E7D,0xFFFFF60E,0x0000034E,0x00001E7D,0xFFFFF60E,0x0000034E},
- {"0000001000010011111100001111111010011001000100000001100110100100",0x00003178,0xFFFFEA52,0x00000513,0x00001AD0,0xFFFFF793,0x0000031F,0x00001AD0,0xFFFFF793,0x0000031F},
- {"0000001000010011111100001111110101000010110101000100000100000100",0x00003A2C,0xFFFFE346,0x00000641,0x000023D0,0xFFFFF0CE,0x00000433,0x000023D0,0xFFFFF0CE,0x00000433},
- {"0000001000010011111100001111110101000010110110000001100011000100",0x000028FD,0xFFFFF02A,0x0000042B,0x0000152B,0xFFFFFB90,0x00000289,0x0000152B,0xFFFFFB90,0x00000289},
- {"0000001000010011111100001111111010011001000011100011000010000100",0x000030DE,0xFFFFEBDF,0x000004BE,0x00001CDC,0xFFFFF747,0x0000031C,0x00001CDC,0xFFFFF747,0x0000031C},
- {"0000001000010011111100001111111010011001000000100001100101000100",0x000036CB,0xFFFFE6EE,0x00000596,0x00002096,0xFFFFF3C2,0x000003BB,0x00002096,0xFFFFF3C2,0x000003BB},
- {"0000001000010011111100001111111010011001000011000100100011000100",0x00003172,0xFFFFEAC1,0x000004F4,0x00001C87,0xFFFFF6CD,0x00000337,0x00001C87,0xFFFFF6CD,0x00000337},
- {"0000001000010011111100001111110101000010110100100100100001100100",0x00004A18,0xFFFFDB34,0x00000758,0x0000213C,0xFFFFF3A2,0x000003AC,0x0000213C,0xFFFFF3A2,0x000003AC},
- {"0000001000010011111100001111111010011001000000100010000100000100",0x000031F3,0xFFFFEB73,0x000004C6,0x00001B23,0xFFFFF7CB,0x0000031A,0x00001B23,0xFFFFF7CB,0x0000031A},
- {"0000001000010011111100001111111010011001000010100010100100100100",0x000031C0,0xFFFFEABA,0x000004F7,0x00001A5A,0xFFFFF845,0x000002FF,0x00001A5A,0xFFFFF845,0x000002FF},
- {"0000001000010011111100001111111010011001000100000100100101000100",0x00003B77,0xFFFFE3B3,0x00000623,0x00001BCA,0xFFFFF6F8,0x00000333,0x00001BCA,0xFFFFF6F8,0x00000333},
- {"0000001000010011111100001111111010011001000010100011100101000100",0x000035AF,0xFFFFE76D,0x00000588,0x00001C16,0xFFFFF6AB,0x00000341,0x00001C16,0xFFFFF6AB,0x00000341},
- {"0000001000010011111010101001010011011110000001000011100011000100",0x000032AD,0xFFFFEA8E,0x000004F8,0x00001A3A,0xFFFFF832,0x0000030E,0x00001A3A,0xFFFFF832,0x0000030E},
- {"0000001000010011111100001111111010011001000100000100100010000100",0x00002E92,0xFFFFEBD2,0x000004DA,0x00001E04,0xFFFFF51E,0x0000038A,0x00001E04,0xFFFFF51E,0x0000038A},
- {"0000001000010011111100001111110101000010110101000100000010100100",0x00003E57,0xFFFFE0F7,0x0000068F,0x000021F1,0xFFFFF1C6,0x00000411,0x000021F1,0xFFFFF1C6,0x00000411},
- {"0000001000010011111100001111111010011001000010000010000110100100",0x00003598,0xFFFFE8BB,0x00000535,0x00001B62,0xFFFFF764,0x00000326,0x00001B62,0xFFFFF764,0x00000326},
- {"0000001000010011111100001111111010011001000010100011100010000100",0x00002B15,0xFFFFEDEC,0x00000487,0x00001E8B,0xFFFFF4AB,0x0000039F,0x00001E8B,0xFFFFF4AB,0x0000039F},
- {"0000001000010011111010101001010011011110000001100000100100000100",0x0000267E,0xFFFFF1A7,0x000003E1,0x000021C1,0xFFFFF2E9,0x000003EA,0x000021C1,0xFFFFF2E9,0x000003EA},
- {"0000001000010011111010101001010011011110000000100011100110100100",0x00002ED7,0xFFFFEC88,0x000004A6,0x00001DEC,0xFFFFF57C,0x00000378,0x00001DEC,0xFFFFF57C,0x00000378},
- {"0000001000010011111010101001010011011110000001000100000110100100",0x00003365,0xFFFFE946,0x00000536,0x000019E9,0xFFFFF7E0,0x0000031D,0x000019E9,0xFFFFF7E0,0x0000031D},
- {"0000001000010011111100001111111010011001000110000001100011100100",0x000029A4,0xFFFFF0FD,0x000003FE,0x0000163F,0xFFFFFB68,0x00000299,0x0000163F,0xFFFFFB68,0x00000299},
- {"0000001000010011111010101001010011011110000000100001100100000100",0x0000348D,0xFFFFE9F7,0x00000509,0x000017A0,0xFFFFFA59,0x000002B6,0x000017A0,0xFFFFFA59,0x000002B6},
- {"0000001000010011111100001111111010011001000001100001000011000100",0x00003144,0xFFFFEB23,0x000004D9,0x00001C9B,0xFFFFF664,0x00000351,0x00001C9B,0xFFFFF664,0x00000351},
- {"0000001000010011111010101001010011011110000001100010000011100100",0x00002E95,0xFFFFEE1A,0x00000463,0x00001707,0xFFFFFAB7,0x000002B3,0x00001707,0xFFFFFAB7,0x000002B3},
- {"0000001000010011111100001111110101000010110101000001100001100100",0x0000489C,0xFFFFDA43,0x000007AC,0x00002866,0xFFFFED6B,0x000004D0,0x00002866,0xFFFFED6B,0x000004D0},
- {"0000001000010011111100001111111010011001000101100001100001000100",0x00002895,0xFFFFF10A,0x0000040A,0x000013E9,0xFFFFFC9F,0x0000026E,0x000013E9,0xFFFFFC9F,0x0000026E},
- {"0000001000010011111100001111111010011001000001100001100101100100",0x000033A0,0xFFFFE9B1,0x00000510,0x00001D96,0xFFFFF5AE,0x0000036F,0x00001D96,0xFFFFF5AE,0x0000036F},
- {"0000001000010011111100001111111010011001000010000011100110000100",0x0000327C,0xFFFFEAEA,0x000004DD,0x00001D45,0xFFFFF649,0x00000356,0x00001D45,0xFFFFF649,0x00000356},
- {"0000001000010011111010101001010011011110000000100100100010100100",0x000031DF,0xFFFFE9AB,0x0000052F,0x000019C8,0xFFFFF7B7,0x00000321,0x000019C8,0xFFFFF7B7,0x00000321},
- {"0000001000010011111100001111111010011001000101100100000010100100",0x00002BCC,0xFFFFEEF4,0x0000045C,0x000015CD,0xFFFFFB58,0x0000029E,0x000015CD,0xFFFFFB58,0x0000029E},
- {"0000001000010011111100001111111010011001000001100011100011100100",0x00003534,0xFFFFEA10,0x000004EB,0x00001BB6,0xFFFFF7B9,0x00000314,0x00001BB6,0xFFFFF7B9,0x00000314},
- {"0000001000010011111100001111111010011001000001000001100110000100",0x00002F4F,0xFFFFEC35,0x000004B9,0x0000205D,0xFFFFF47F,0x00000392,0x0000205D,0xFFFFF47F,0x00000392},
- {"0000001000010011111100001111111010011001000011000010000010100100",0x00003295,0xFFFFEB1C,0x000004D6,0x000019C1,0xFFFFF931,0x000002D5,0x000019C1,0xFFFFF931,0x000002D5},
- {"0000001000010011111100001111111010011001000000100100000101000100",0x00003557,0xFFFFE7F7,0x00000568,0x00002342,0xFFFFF1F9,0x00000405,0x00002342,0xFFFFF1F9,0x00000405},
- {"0000001000010011111100001111111010011001000001000101000011000100",0x00003487,0xFFFFE872,0x0000055D,0x000019D7,0xFFFFF823,0x0000030C,0x000019D7,0xFFFFF823,0x0000030C},
- {"0000001000010011111100001111111010011001001011000011100101000100",0x0000378F,0xFFFFE7A6,0x00000566,0x00001875,0xFFFFFA04,0x000002AF,0x00001875,0xFFFFFA04,0x000002AF},
- {"0000001000010011111010101001010011011110000000100011000011100100",0x00002A67,0xFFFFF157,0x000003DD,0x000017BD,0xFFFFFA53,0x000002D1,0x000017BD,0xFFFFFA53,0x000002D1},
- {"0000001000010011111100001111110101000010110100100010000011100100",0x000030B5,0xFFFFEB32,0x000004D9,0x00002129,0xFFFFF38A,0x000003BB,0x00002129,0xFFFFF38A,0x000003BB},
- {"0000001000010011111100001111111010011001000001100001000010100100",0x00003786,0xFFFFE703,0x00000584,0x00001D63,0xFFFFF5DC,0x00000367,0x00001D63,0xFFFFF5DC,0x00000367},
- {"0000001000010011111100001111110101000010110110100010000011000100",0x0000346A,0xFFFFE93E,0x0000052C,0x00001B27,0xFFFFF79D,0x0000031F,0x00001B27,0xFFFFF79D,0x0000031F},
- {"0000001000010011111100001111111010011001000011100011000000100100",0x0000294E,0xFFFFF0A5,0x00000409,0x00001928,0xFFFFF93B,0x000002E6,0x00001928,0xFFFFF93B,0x000002E6},
- {"0000001000010011111100001111110101000010110101000001000011000100",0x00003E09,0xFFFFE0FF,0x00000694,0x000025A0,0xFFFFEF0F,0x0000048F,0x000025A0,0xFFFFEF0F,0x0000048F},
- {"0000001000010011111100001111111010011001000010100010100101100100",0x00003197,0xFFFFEA06,0x00000520,0x00001B42,0xFFFFF73B,0x0000032A,0x00001B42,0xFFFFF73B,0x0000032A},
- {"0000001000010011111100001111111010011001000101100001100001100100",0x000022CB,0xFFFFF3FC,0x000003A3,0x00001449,0xFFFFFBD0,0x00000297,0x00001449,0xFFFFFBD0,0x00000297},
- {"0000001000010011111100001111110101000010110110000010100101000100",0x00002A79,0xFFFFEFD2,0x00000433,0x00001585,0xFFFFFB92,0x0000028E,0x00001585,0xFFFFFB92,0x0000028E},
- {"0000001000010011111100001111111010011001000011000100000110000100",0x00003249,0xFFFFEA92,0x000004F4,0x000019CB,0xFFFFF8CF,0x000002E1,0x000019CB,0xFFFFF8CF,0x000002E1},
- {"0000001000010011111010101001010011011110000000100001100010100100",0x00002CEA,0xFFFFEE46,0x00000463,0x00001A5E,0xFFFFF83C,0x0000030D,0x00001A5E,0xFFFFF83C,0x0000030D},
- {"0000001000010011111100001111110101000010110111000101000101000100",0x00003AE2,0xFFFFE422,0x00000600,0x00001C65,0xFFFFF62F,0x0000034B,0x00001C65,0xFFFFF62F,0x0000034B},
- {"0000001000010011111100001111111010011001000110000001000110000100",0x000026A0,0xFFFFF1C2,0x000003F8,0x000010E5,0xFFFFFE56,0x0000022A,0x000010E5,0xFFFFFE56,0x0000022A},
- {"0000001000010011111100001111111010011001001010000010100110100100",0x00002A7B,0xFFFFF063,0x00000417,0x000016FC,0xFFFFFAD7,0x000002B1,0x000016FC,0xFFFFFAD7,0x000002B1},
- {"0000001000010011111100001111111010011001001100100001000011000100",0x00003092,0xFFFFEAB9,0x00000507,0x00001AE3,0xFFFFF783,0x00000323,0x00001AE3,0xFFFFF783,0x00000323},
- {"0000001000010011111100001111111010011001000001000011100011100100",0x00003265,0xFFFFEBE8,0x000004AA,0x00001D65,0xFFFFF73F,0x00000321,0x00001D65,0xFFFFF73F,0x00000321},
- {"0000001000010011111010101001010011011110000000100011000010000100",0x00002F14,0xFFFFECC2,0x000004A4,0x00001A8D,0xFFFFF7F3,0x0000031D,0x00001A8D,0xFFFFF7F3,0x0000031D},
- {"0000001000010011111100001111110101000010110111000001000011100100",0x000035FB,0xFFFFE6D3,0x000005AC,0x00001B19,0xFFFFF712,0x00000338,0x00001B19,0xFFFFF712,0x00000338},
- {"0000001000010011111100001111110101000010110110100010000100100100",0x00003519,0xFFFFE8CC,0x0000053A,0x00001A0F,0xFFFFF86E,0x000002F5,0x00001A0F,0xFFFFF86E,0x000002F5},
- {"0000001000010011111100001111111010011001001011000010000101000100",0x0000364C,0xFFFFE879,0x00000541,0x00001A42,0xFFFFF8BA,0x000002E2,0x00001A42,0xFFFFF8BA,0x000002E2},
- {"0000001000010011111010101001010011011110000000100001100011000100",0x000029BA,0xFFFFF09A,0x00000408,0x00001986,0xFFFFF8D9,0x000002FE,0x00001986,0xFFFFF8D9,0x000002FE},
- {"0000001000010011111100001111110101000010110110100011100011100100",0x00003507,0xFFFFE961,0x00000518,0x00001B79,0xFFFFF775,0x00000325,0x00001B79,0xFFFFF775,0x00000325},
- {"0000001000010011111100001111110101000010110111000011000110000100",0x00003AD5,0xFFFFE415,0x00000613,0x00001CB4,0xFFFFF66D,0x00000348,0x00001CB4,0xFFFFF66D,0x00000348},
- {"0000001000010011111100001111111010011001000101100100000011100100",0x000023D1,0xFFFFF42B,0x0000038F,0x00001546,0xFFFFFBA0,0x0000029F,0x00001546,0xFFFFFBA0,0x0000029F},
- {"0000001000010011111100001111111010011001000010100001100100100100",0x0000399E,0xFFFFE518,0x000005E7,0x00001990,0xFFFFF871,0x000002FB,0x00001990,0xFFFFF871,0x000002FB},
- {"0000001000010011111100001111110101000010110110000010100101100100",0x00002EDE,0xFFFFEC93,0x000004B8,0x0000152C,0xFFFFFBB3,0x0000027E,0x0000152C,0xFFFFFBB3,0x0000027E},
- {"0000001000010011111010101001010011011110000001000010100101100100",0x00003140,0xFFFFEBC9,0x000004BB,0x000016BE,0xFFFFFB0A,0x00000288,0x000016BE,0xFFFFFB0A,0x00000288},
- {"0000001000010011111100001111111010011001000001100100000001100100",0x000030F6,0xFFFFEB89,0x000004CD,0x0000185D,0xFFFFF95A,0x000002D9,0x0000185D,0xFFFFF95A,0x000002D9},
- {"0000001000010011111100001111111010011001000000100011100001000100",0x0000389C,0xFFFFE65A,0x000005A2,0x0000195D,0xFFFFF8C8,0x000002E8,0x0000195D,0xFFFFF8C8,0x000002E8},
- {"0000001000010011111100001111111010011001000001000010000100000100",0x0000362B,0xFFFFE9EC,0x000004F6,0x00001605,0xFFFFFC1C,0x00000263,0x00001605,0xFFFFFC1C,0x00000263},
- {"0000001000010011111100001111111010011001001010100001100101100100",0x00002946,0xFFFFF04F,0x00000426,0x000015BA,0xFFFFFB2F,0x000002A3,0x000015BA,0xFFFFFB2F,0x000002A3},
- {"0000001000010011111100001111111010011001000010000010000110000100",0x0000368E,0xFFFFE837,0x0000054A,0x000017D7,0xFFFFF9EB,0x000002BA,0x000017D7,0xFFFFF9EB,0x000002BA},
- {"0000001000010011111100001111110101000010110110100010100001000100",0x00002E74,0xFFFFEBE8,0x000004DA,0x00001DD6,0xFFFFF57E,0x00000379,0x00001DD6,0xFFFFF57E,0x00000379},
- {"0000001000010011111100001111111010011001000001000001100101000100",0x0000322D,0xFFFFEAA8,0x000004F5,0x00001B55,0xFFFFF7DD,0x0000030B,0x00001B55,0xFFFFF7DD,0x0000030B},
- {"0000001000010011111100001111111010011001000110000001100100000100",0x00002A29,0xFFFFF07B,0x00000416,0x00001671,0xFFFFFB3E,0x0000029F,0x00001671,0xFFFFFB3E,0x0000029F},
- {"0000001000010011111100001111110101000010110110100010000100000100",0x000030F6,0xFFFFEB89,0x000004CD,0x00001815,0xFFFFF9AE,0x000002C9,0x00001815,0xFFFFF9AE,0x000002C9},
- {"0000001000010011111100001111111010011001000011100001000011100100",0x0000265F,0xFFFFF1CB,0x000003D5,0x00001ED2,0xFFFFF539,0x0000037A,0x00001ED2,0xFFFFF539,0x0000037A},
- {"0000001000010011111100001111111010011001000101100010000110000100",0x000027A8,0xFFFFF10D,0x00000413,0x000014B5,0xFFFFFBA1,0x00000299,0x000014B5,0xFFFFFBA1,0x00000299},
- {"0000001000010011111100001111111010011001000001000011000001100100",0x00002CEE,0xFFFFEDF6,0x00000476,0x00001A99,0xFFFFF83E,0x00000305,0x00001A99,0xFFFFF83E,0x00000305},
- {"0000001000010011111100001111111010011001000001100100000011000100",0x0000346C,0xFFFFEA17,0x000004EF,0x00001D38,0xFFFFF69F,0x0000033D,0x00001D38,0xFFFFF69F,0x0000033D},
- {"0000001000010011111100001111110101000010110110100010100101000100",0x00002DBB,0xFFFFED35,0x00000490,0x000018C1,0xFFFFF930,0x000002DA,0x000018C1,0xFFFFF930,0x000002DA},
- {"0000001000010011111100001111111010011001000001000010100100100100",0x000038DF,0xFFFFE8A7,0x0000051E,0x00001B59,0xFFFFF915,0x000002D3,0x00001B59,0xFFFFF915,0x000002D3},
- {"0000001000010011111100001111111010011001000010000000100101000100",0x00003384,0xFFFFE979,0x00000524,0x00001AF3,0xFFFFF74C,0x0000032F,0x00001AF3,0xFFFFF74C,0x0000032F},
- {"0000001000010011111100001111111010011001000110000001100001100100",0x0000258B,0xFFFFF2AE,0x000003CB,0x0000190C,0xFFFFF93E,0x000002EF,0x0000190C,0xFFFFF93E,0x000002EF},
- {"0000001000010011111100001111111010011001000100000011100010000100",0x000034F1,0xFFFFE84B,0x0000055E,0x00001CB8,0xFFFFF670,0x0000034A,0x00001CB8,0xFFFFF670,0x0000034A},
- {"0000001000010011111100001111111010011001000011000010000100000100",0x000030FB,0xFFFFECD2,0x00000488,0x00001BF4,0xFFFFF821,0x00000302,0x00001BF4,0xFFFFF821,0x00000302},
- {"0000001000010011111100001111111010011001000001100011000001000100",0x000036A6,0xFFFFE815,0x00000556,0x000018FD,0xFFFFF925,0x000002DF,0x000018FD,0xFFFFF925,0x000002DF},
- {"0000001000010011111010101001010011011110000000100011000001000100",0x0000302A,0xFFFFEB79,0x000004E0,0x00001C11,0xFFFFF694,0x00000358,0x00001C11,0xFFFFF694,0x00000358},
- {"0000001000010011111100001111111010011001000110000001000100100100",0x00002555,0xFFFFF2C4,0x000003CB,0x000017E3,0xFFFFFA1F,0x000002CB,0x000017E3,0xFFFFFA1F,0x000002CB},
- {"0000001000010011111100001111111010011001000010100011000101100100",0x000032A3,0xFFFFE933,0x00000544,0x000019D3,0xFFFFF81A,0x00000306,0x000019D3,0xFFFFF81A,0x00000306},
- {"0000001000010011111100001111110101000010110110000101000100000100",0x00002B91,0xFFFFED81,0x000004A9,0x0000158B,0xFFFFFAB9,0x000002AC,0x0000158B,0xFFFFFAB9,0x000002AC},
- {"0000001000010011111100001111111010011001000011100010000011000100",0x00003537,0xFFFFE912,0x0000052C,0x00001C8A,0xFFFFF754,0x0000031B,0x00001C8A,0xFFFFF754,0x0000031B},
- {"0000001000010011111010101001010011011110000001100011000110000100",0x000032E1,0xFFFFEA5A,0x000004F9,0x000017B4,0xFFFFF9D9,0x000002C2,0x000017B4,0xFFFFF9D9,0x000002C2},
- {"0000001000010011111100001111110101000010110100100001000011000100",0x00003B76,0xFFFFE330,0x00000636,0x000026FB,0xFFFFEF06,0x00000481,0x000026FB,0xFFFFEF06,0x00000481},
- {"0000001000010011111100001111111010011001000001000010000101000100",0x0000320C,0xFFFFEB84,0x000004C3,0x00001A3A,0xFFFFF8E9,0x000002DF,0x00001A3A,0xFFFFF8E9,0x000002DF},
- {"0000001000010011111100001111111010011001000000100011100110000100",0x0000317D,0xFFFFEA1F,0x00000515,0x00002100,0xFFFFF31B,0x000003DD,0x00002100,0xFFFFF31B,0x000003DD},
- {"0000001000010011111100001111110101000010110101000011000101100100",0x00003DCB,0xFFFFE0B4,0x000006B4,0x00002160,0xFFFFF269,0x000003F0,0x00002160,0xFFFFF269,0x000003F0},
- {"0000001000010011111100001111111010011001000101100001100011000100",0x00002737,0xFFFFF218,0x000003E1,0x000015B5,0xFFFFFB8F,0x0000029C,0x000015B5,0xFFFFFB8F,0x0000029C},
- {"0000001000010011111010101001010011011110000000100011000110000100",0x0000318F,0xFFFFEB3F,0x000004D8,0x00001938,0xFFFFF8E9,0x000002EB,0x00001938,0xFFFFF8E9,0x000002EB},
- {"0000001000010011111100001111111010011001000100000100100011000100",0x000031BD,0xFFFFE9DE,0x00000527,0x000018A7,0xFFFFF8CA,0x000002ED,0x000018A7,0xFFFFF8CA,0x000002ED},
- {"0000001000010011111100001111110101000010110110100011100010000100",0x00002F77,0xFFFFEC2F,0x000004B4,0x00001D25,0xFFFFF61B,0x0000035D,0x00001D25,0xFFFFF61B,0x0000035D},
- {"0000001000010011111100001111111010011001000011100100100100000100",0x00002CCA,0xFFFFEDB3,0x0000047C,0x00001FBD,0xFFFFF4A7,0x00000391,0x00001FBD,0xFFFFF4A7,0x00000391},
- {"0000001000010011111100001111110101000010110101000011100010100100",0x00003FF6,0xFFFFE058,0x000006A2,0x000024CD,0xFFFFF026,0x00000452,0x000024CD,0xFFFFF026,0x00000452},
- {"0000001000010011111100001111111010011001000010100011100011100100",0x00003161,0xFFFFEAC8,0x000004F3,0x00001BB6,0xFFFFF72A,0x0000032B,0x00001BB6,0xFFFFF72A,0x0000032B},
- {"0000001000010011111100001111110101000010110110000011100010100100",0x00002EA0,0xFFFFECA6,0x000004B7,0x000018C2,0xFFFFF94E,0x000002E1,0x000018C2,0xFFFFF94E,0x000002E1},
- {"0000001000010011111100001111111010011001000110000010000110000100",0x00002F62,0xFFFFEC9E,0x000004B8,0x00001531,0xFFFFFBCD,0x00000285,0x00001531,0xFFFFFBCD,0x00000285},
- {"0000001000010011111100001111111010011001000001000100000010100100",0x00003013,0xFFFFEBD6,0x000004C2,0x00001B01,0xFFFFF802,0x000002FF,0x00001B01,0xFFFFF802,0x000002FF},
- {"0000001000010011111100001111111010011001000110000011000001100100",0x00002972,0xFFFFF08D,0x00000417,0x00001A32,0xFFFFF8A4,0x00000305,0x00001A32,0xFFFFF8A4,0x00000305},
- {"0000001000010011111100001111110101000010110110000010000011100100",0x00002E95,0xFFFFED94,0x00000487,0x00001529,0xFFFFFC26,0x00000271,0x00001529,0xFFFFFC26,0x00000271},
- {"0000001000010011111100001111111010011001000010100001000010000100",0x00002D6A,0xFFFFEC79,0x000004C1,0x00001AE2,0xFFFFF725,0x00000337,0x00001AE2,0xFFFFF725,0x00000337},
- {"0000001000010011111100001111111010011001000000100001100010000100",0x000036B4,0xFFFFE704,0x00000591,0x00001E7E,0xFFFFF51C,0x00000383,0x00001E7E,0xFFFFF51C,0x00000383},
- {"0000001000010011111100001111111010011001000001000001100001000100",0x00002A6F,0xFFFFEF70,0x00000443,0x00001BAA,0xFFFFF752,0x00000336,0x00001BAA,0xFFFFF752,0x00000336},
- {"0000001000010011111100001111111010011001000110000011100101000100",0x00002C66,0xFFFFEF5F,0x0000043A,0x000019F7,0xFFFFF931,0x000002EC,0x000019F7,0xFFFFF931,0x000002EC},
- {"0000001000010011111010101001010011011110000001100011000111000100",0x00003852,0xFFFFE6AB,0x00000590,0x000019C1,0xFFFFF8B1,0x000002E5,0x000019C1,0xFFFFF8B1,0x000002E5},
- {"0000001000010011111100001111110101000010110110100011000100100100",0x00003521,0xFFFFE932,0x00000523,0x000018A9,0xFFFFF96B,0x000002D0,0x000018A9,0xFFFFF96B,0x000002D0},
- {"0000001000010011111100001111111010011001000001100010000101100100",0x000031B9,0xFFFFEB36,0x000004D0,0x00001D65,0xFFFFF612,0x0000035D,0x00001D65,0xFFFFF612,0x0000035D},
- {"0000001000010011111100001111110101000010110101000001000001100100",0x00003ED0,0xFFFFE135,0x00000679,0x00002351,0xFFFFF0FE,0x00000433,0x00002351,0xFFFFF0FE,0x00000433},
- {"0000001000010011111100001111111010011001000010100010000011100100",0x000033ED,0xFFFFE91A,0x00000541,0x00001C93,0xFFFFF6A0,0x0000034A,0x00001C93,0xFFFFF6A0,0x0000034A},
- {"0000001000010011111010101001010011011110000000100001100001000100",0x0000356F,0xFFFFE8F7,0x00000530,0x000016BF,0xFFFFFA85,0x000002AB,0x000016BF,0xFFFFFA85,0x000002AB},
- {"0000001000010011111100001111111010011001000110000100000011100100",0x00002304,0xFFFFF4F3,0x00000364,0x000017CC,0xFFFFFA41,0x000002CA,0x000017CC,0xFFFFFA41,0x000002CA},
- {"0000001000010011111100001111111010011001000101100001000101100100",0x00002887,0xFFFFEFD7,0x00000450,0x00001474,0xFFFFFB94,0x00000299,0x00001474,0xFFFFFB94,0x00000299},
- {"0000001000010011111100001111111010011001000001100011000001100100",0x00003D0B,0xFFFFE416,0x000005EF,0x00001C7E,0xFFFFF71D,0x00000325,0x00001C7E,0xFFFFF71D,0x00000325},
- {"0000001000010011111100001111111010011001000010000001000011100100",0x00003185,0xFFFFEAFA,0x000004E4,0x00001A12,0xFFFFF83C,0x00000303,0x00001A12,0xFFFFF83C,0x00000303},
- {"0000001000010011111100001111111010011001000010100001100101000100",0x00003032,0xFFFFEAE6,0x000004FC,0x00001B2A,0xFFFFF73F,0x0000032B,0x00001B2A,0xFFFFF73F,0x0000032B},
- {"0000001000010011111100001111110101000010110110000011100011000100",0x00002691,0xFFFFF22D,0x000003D6,0x00001700,0xFFFFFA6E,0x000002C0,0x00001700,0xFFFFFA6E,0x000002C0},
- {"0000001000010011111100001111111010011001000000100001100010100100",0x00002B2F,0xFFFFEEC4,0x0000044B,0x0000215F,0xFFFFF33F,0x000003D2,0x0000215F,0xFFFFF33F,0x000003D2},
- {"0000001000010011111100001111111010011001000010100100000110000100",0x000034AA,0xFFFFE706,0x000005B1,0x00001B28,0xFFFFF6B5,0x00000349,0x00001B28,0xFFFFF6B5,0x00000349},
- {"0000001000010011111100001111110101000010110110100010100101100100",0x0000307E,0xFFFFEB38,0x000004E6,0x00001A22,0xFFFFF83F,0x00000300,0x00001A22,0xFFFFF83F,0x00000300},
- {"0000001000010011111100001111111010011001000001100001100010100100",0x000038D6,0xFFFFE6D8,0x0000057C,0x00001B24,0xFFFFF7E4,0x00000307,0x00001B24,0xFFFFF7E4,0x00000307},
- {"0000001000010011111100001111111010011001000110000011000001000100",0x00002757,0xFFFFF1E8,0x000003DD,0x000017F5,0xFFFFFA15,0x000002C8,0x000017F5,0xFFFFFA15,0x000002C8},
- {"0000001000010011111100001111111010011001000010000011000110000100",0x000031FC,0xFFFFEB3E,0x000004CE,0x00001B4C,0xFFFFF7AD,0x00000319,0x00001B4C,0xFFFFF7AD,0x00000319},
- {"0000001000010011111100001111111010011001001100000001100001100100",0x00002933,0xFFFFF073,0x0000040E,0x00001C3C,0xFFFFF701,0x0000033C,0x00001C3C,0xFFFFF701,0x0000033C},
- {"0000001000010011111100001111110101000010110100100001100010100100",0x000040BB,0xFFFFE066,0x0000069A,0x0000257F,0xFFFFF08A,0x00000435,0x0000257F,0xFFFFF08A,0x00000435},
- {"0000001000010011111100001111111010011001000100000001000010100100",0x0000305B,0xFFFFEB9B,0x000004CB,0x00001996,0xFFFFF846,0x00000308,0x00001996,0xFFFFF846,0x00000308},
- {"0000001000010011111100001111111010011001000001100100100010000100",0x000039C0,0xFFFFE5D3,0x000005B0,0x00001A8D,0xFFFFF7DA,0x00000313,0x00001A8D,0xFFFFF7DA,0x00000313},
- {"0000001000010011111010101001010011011110000000100001000010100100",0x00002E23,0xFFFFED3F,0x0000048F,0x0000189D,0xFFFFF94C,0x000002DE,0x0000189D,0xFFFFF94C,0x000002DE},
- {"0000001000010011111010101001010011011110000000100001100110000100",0x0000332B,0xFFFFE9F1,0x00000516,0x000018E6,0xFFFFF8FE,0x000002EC,0x000018E6,0xFFFFF8FE,0x000002EC},
- {"0000001000010011111100001111111010011001000010000011100011000100",0x000034A0,0xFFFFEA44,0x000004E4,0x00001ECD,0xFFFFF5B4,0x00000364,0x00001ECD,0xFFFFF5B4,0x00000364},
- {"0000001000010011111100001111110101000010110100100100000100000100",0x0000448C,0xFFFFDF34,0x000006A8,0x0000231C,0xFFFFF286,0x000003D9,0x0000231C,0xFFFFF286,0x000003D9},
- {"0000001000010011111010101001010011011110000001100010000101000100",0x00002D8C,0xFFFFEE65,0x00000456,0x000018B1,0xFFFFF9C8,0x000002C8,0x000018B1,0xFFFFF9C8,0x000002C8},
- {"0000001000010011111100001111111010011001000001100001100100000100",0x00003527,0xFFFFE9BF,0x000004FD,0x00001D23,0xFFFFF69F,0x00000342,0x00001D23,0xFFFFF69F,0x00000342},
- {"0000001000010011111100001111110101000010110111000011100010100100",0x00002C51,0xFFFFEDC3,0x00000483,0x00001BE0,0xFFFFF720,0x0000032D,0x00001BE0,0xFFFFF720,0x0000032D},
- {"0000001000010011111100001111111010011001000010100011000001000100",0x00002C6C,0xFFFFECEB,0x000004B7,0x00001C86,0xFFFFF5E7,0x00000371,0x00001C86,0xFFFFF5E7,0x00000371},
- {"0000001000010011111100001111111010011001000001000101000101000100",0x000037CF,0xFFFFE6BE,0x00000599,0x000018CD,0xFFFFF967,0x000002C7,0x000018CD,0xFFFFF967,0x000002C7},
- {"0000001000010011111100001111111010011001000100000011000101100100",0x00002E6F,0xFFFFED1D,0x0000048E,0x00001ADC,0xFFFFF7F4,0x0000030E,0x00001ADC,0xFFFFF7F4,0x0000030E},
- {"0000001000010011111100001111110101000010110101000010100110000100",0x00003FF3,0xFFFFDF13,0x000006F9,0x000025BF,0xFFFFEEEE,0x00000497,0x000025BF,0xFFFFEEEE,0x00000497},
- {"0000001000010011111100001111110101000010110111000101000100000100",0x00004135,0xFFFFDF97,0x000006CC,0x00001D52,0xFFFFF541,0x00000383,0x00001D52,0xFFFFF541,0x00000383},
- {"0000001000010011111100001111110101000010110111000010000011100100",0x00002EA9,0xFFFFEDDB,0x0000045F,0x0000197C,0xFFFFF8E1,0x000002F0,0x0000197C,0xFFFFF8E1,0x000002F0},
- {"0000001000010011111010101001010011011110000001000011000010000100",0x0000345C,0xFFFFE922,0x00000532,0x00001922,0xFFFFF8C7,0x000002F1,0x00001922,0xFFFFF8C7,0x000002F1},
- {"0000001000010011111100001111111010011001000001100100000100100100",0x000035C4,0xFFFFE8FE,0x00000521,0x00001C87,0xFFFFF6F3,0x00000330,0x00001C87,0xFFFFF6F3,0x00000330},
- {"0000001000010011111100001111110101000010110110000011000101100100",0x00002888,0xFFFFF08A,0x0000041E,0x0000150F,0xFFFFFB87,0x00000291,0x0000150F,0xFFFFFB87,0x00000291},
- {"0000001000010011111100001111111010011001000010100001000100100100",0x000035E9,0xFFFFE657,0x000005CC,0x00001BD6,0xFFFFF664,0x00000355,0x00001BD6,0xFFFFF664,0x00000355},
- {"0000001000010011111100001111111010011001000101100100100011100100",0x00002F94,0xFFFFEBD0,0x000004E5,0x00001333,0xFFFFFCA7,0x00000266,0x00001333,0xFFFFFCA7,0x00000266},
- {"0000001000010011111100001111111010011001000110000001100101100100",0x000029E7,0xFFFFF009,0x00000433,0x0000144A,0xFFFFFC37,0x0000027D,0x0000144A,0xFFFFFC37,0x0000027D},
- {"0000001000010011111100001111111010011001001011000001100101000100",0x00003418,0xFFFFE979,0x00000521,0x00001D33,0xFFFFF66B,0x0000034A,0x00001D33,0xFFFFF66B,0x0000034A},
- {"0000001000010011111010101001010011011110000001000100000011100100",0x00003656,0xFFFFE79D,0x0000057A,0x000017C2,0xFFFFF992,0x000002D4,0x000017C2,0xFFFFF992,0x000002D4},
- {"0000001000010011111100001111111010011001000011000100000011000100",0x00002EB2,0xFFFFECFE,0x00000493,0x00001F2A,0xFFFFF543,0x0000037B,0x00001F2A,0xFFFFF543,0x0000037B},
- {"0000001000010011111100001111111010011001000000100001000100100100",0x00002FC1,0xFFFFEB3F,0x000004E8,0x00001CD0,0xFFFFF5F7,0x00000364,0x00001CD0,0xFFFFF5F7,0x00000364},
- {"0000001000010011111100001111111010011001000011000001000100100100",0x0000307B,0xFFFFEB66,0x000004DE,0x00001953,0xFFFFF8ED,0x000002E4,0x00001953,0xFFFFF8ED,0x000002E4},
- {"0000001000010011111100001111110101000010110110100001100010000100",0x00002CAA,0xFFFFED07,0x000004AC,0x0000251C,0xFFFFF086,0x0000044D,0x0000251C,0xFFFFF086,0x0000044D},
- {"0000001000010011111010101001010011011110000001000011100101000100",0x00002C94,0xFFFFEE5F,0x0000045B,0x000018D7,0xFFFFF900,0x000002EB,0x000018D7,0xFFFFF900,0x000002EB},
- {"0000001000010011111100001111111010011001000000100001100001100100",0x000031F1,0xFFFFE9BE,0x0000052E,0x00001DDF,0xFFFFF558,0x00000380,0x00001DDF,0xFFFFF558,0x00000380},
- {"0000001000010011111100001111111010011001000011100101000011000100",0x00002603,0xFFFFF1E9,0x000003DA,0x00001B37,0xFFFFF75A,0x0000032F,0x00001B37,0xFFFFF75A,0x0000032F},
- {"0000001000010011111100001111110101000010110110100011000001000100",0x00003992,0xFFFFE4F9,0x000005EB,0x00001775,0xFFFFF9B8,0x000002C2,0x00001775,0xFFFFF9B8,0x000002C2},
- {"0000001000010011111100001111111010011001000110000100100101100100",0x000029DA,0xFFFFF052,0x0000041F,0x000016E2,0xFFFFFA99,0x000002BB,0x000016E2,0xFFFFFA99,0x000002BB},
- {"0000001000010011111100001111111010011001000100000001000001100100",0x00002FF2,0xFFFFEB8F,0x000004DF,0x00001AF6,0xFFFFF7A1,0x00000321,0x00001AF6,0xFFFFF7A1,0x00000321},
- {"0000001000010011111100001111111010011001000101100000100011100100",0x00002590,0xFFFFF222,0x000003EE,0x0000130B,0xFFFFFCC9,0x00000268,0x0000130B,0xFFFFFCC9,0x00000268},
- {"0000001000010011111100001111111010011001000000100100000001100100",0x000038A2,0xFFFFE65F,0x000005A2,0x000018B1,0xFFFFF917,0x000002E1,0x000018B1,0xFFFFF917,0x000002E1},
- {"0000001000010011111100001111110101000010110111000100100011100100",0x000035FD,0xFFFFE73C,0x0000058D,0x00001BB3,0xFFFFF6E1,0x00000337,0x00001BB3,0xFFFFF6E1,0x00000337},
- {"0000001000010011111100001111111010011001000100000011100011000100",0x00002AB7,0xFFFFEF98,0x00000429,0x00001F35,0xFFFFF539,0x0000037C,0x00001F35,0xFFFFF539,0x0000037C},
- {"0000001000010011111100001111111010011001000010100000100101000100",0x000034BA,0xFFFFE73D,0x000005A6,0x000018A6,0xFFFFF888,0x000002FB,0x000018A6,0xFFFFF888,0x000002FB},
- {"0000001000010011111100001111111010011001000001100011100001000100",0x000032EA,0xFFFFEA78,0x000004F4,0x00001AB6,0xFFFFF812,0x00000308,0x00001AB6,0xFFFFF812,0x00000308},
- {"0000001000010011111100001111111010011001000011000011000001000100",0x00002BE9,0xFFFFEE9A,0x00000457,0x00001942,0xFFFFF8D2,0x000002F2,0x00001942,0xFFFFF8D2,0x000002F2},
- {"0000001000010011111100001111111010011001000100000101000100100100",0x00002FAB,0xFFFFEB76,0x000004E1,0x00001DCA,0xFFFFF57D,0x00000378,0x00001DCA,0xFFFFF57D,0x00000378},
- {"0000001000010011111100001111111010011001001011100010100001000100",0x0000330A,0xFFFFE9E1,0x0000051B,0x00001CC4,0xFFFFF6DF,0x00000335,0x00001CC4,0xFFFFF6DF,0x00000335},
- {"0000001000010011111100001111111010011001000110000010100010100100",0x000027D8,0xFFFFF276,0x000003BF,0x0000178A,0xFFFFFABF,0x000002B5,0x0000178A,0xFFFFFABF,0x000002B5},
- {"0000001000010011111100001111110101000010110111000011100001100100",0x0000340A,0xFFFFE86D,0x00000562,0x00001B85,0xFFFFF719,0x0000032F,0x00001B85,0xFFFFF719,0x0000032F},
- {"0000001000010011111010101001010011011110000001100011000010000100",0x00003879,0xFFFFE73F,0x00000578,0x0000161C,0xFFFFFB6B,0x00000281,0x0000161C,0xFFFFFB6B,0x00000281},
- {"0000001000010011111100001111111010011001000110000100000001100100",0x00002879,0xFFFFF0F8,0x0000040A,0x00001749,0xFFFFFA37,0x000002CC,0x00001749,0xFFFFFA37,0x000002CC},
- {"0000001000010011111100001111111010011001000001000011100101100100",0x00002C3A,0xFFFFEEA0,0x0000044F,0x00001D57,0xFFFFF6C2,0x00000332,0x00001D57,0xFFFFF6C2,0x00000332},
- {"0000001000010011111010101001010011011110000000100001100101100100",0x000035BB,0xFFFFE90D,0x0000052A,0x000017D9,0xFFFFF9F5,0x000002C3,0x000017D9,0xFFFFF9F5,0x000002C3},
- {"0000001000010011111010101001010011011110000001000001000100100100",0x000031F1,0xFFFFEAD4,0x000004ED,0x00001F10,0xFFFFF539,0x0000037D,0x00001F10,0xFFFFF539,0x0000037D},
- {"0000001000010011111100001111111010011001000100000010100000100100",0x00002A1A,0xFFFFEFAD,0x00000430,0x00001D47,0xFFFFF62F,0x0000035E,0x00001D47,0xFFFFF62F,0x0000035E},
- {"0000001000010011111100001111111010011001000101100100100100100100",0x00002AF0,0xFFFFEEDC,0x00000465,0x0000145F,0xFFFFFBEB,0x00000281,0x0000145F,0xFFFFFBEB,0x00000281},
- {"0000001000010011111100001111111010011001000110000011000101100100",0x00002657,0xFFFFF2E0,0x000003B6,0x00001664,0xFFFFFB37,0x000002A2,0x00001664,0xFFFFFB37,0x000002A2},
- {"0000001000010011111100001111110101000010110100000011100001100100",0x00003183,0xFFFFE9F1,0x0000052B,0x00002020,0xFFFFF3CE,0x000003C1,0x00002020,0xFFFFF3CE,0x000003C1},
- {"0000001000010011111100001111110101000010110001100010100011100100",0x00003240,0xFFFFEB65,0x000004C7,0x00002425,0xFFFFF245,0x000003F3,0x00002425,0xFFFFF245,0x000003F3},
- {"0000001000010011111010101001010011011110001100100001000100000100",0x000023D0,0xFFFFF400,0x00000397,0x00001345,0xFFFFFD6B,0x00000241,0x00001345,0xFFFFFD6B,0x00000241},
- {"0000001000010011111100001111110101000010110011100011100010100100",0x00003440,0xFFFFE872,0x0000055B,0x00002247,0xFFFFF296,0x000003E8,0x00002247,0xFFFFF296,0x000003E8},
- {"0000001000010011111100001111110101000010110100000100100100000100",0x00003275,0xFFFFE970,0x00000538,0x00001F94,0xFFFFF429,0x000003AD,0x00001F94,0xFFFFF429,0x000003AD},
- {"0000001000010011111100001111110101000010110001100100000010100100",0x00003918,0xFFFFE5DA,0x000005B6,0x000024FC,0xFFFFF106,0x00000426,0x000024FC,0xFFFFF106,0x00000426},
- {"0000001000010011111010101001010011011110000001100010000001000100",0x0000334B,0xFFFFEA39,0x000004FD,0x00001983,0xFFFFF8F6,0x000002E2,0x00001983,0xFFFFF8F6,0x000002E2},
- {"0000001000010011111100001111110101000010110001100100100110000100",0x00003B59,0xFFFFE4D0,0x000005DA,0x00002605,0xFFFFF090,0x00000439,0x00002605,0xFFFFF090,0x00000439},
- {"0000001000010011111100001111110101000010110100000011000100100100",0x00003251,0xFFFFEA46,0x00000511,0x00002781,0xFFFFEF84,0x00000470,0x00002781,0xFFFFEF84,0x00000470},
- {"0000001000010011111100001111110101000010110010100011000101100100",0x00003304,0xFFFFE926,0x00000542,0x00001EE9,0xFFFFF4E4,0x0000038B,0x00001EE9,0xFFFFF4E4,0x0000038B},
- {"0000001000010011111100001111110101000010110011000011100011000100",0x00002F4C,0xFFFFEC0C,0x000004C4,0x00001E49,0xFFFFF578,0x00000374,0x00001E49,0xFFFFF578,0x00000374},
- {"0000001000010011111010101001010011011110000111000010000101100100",0x00002034,0xFFFFF692,0x0000034C,0x000014B8,0xFFFFFC5B,0x00000294,0x000014B8,0xFFFFFC5B,0x00000294},
- {"0000001000010011111100001111110101000010110011100100100100100100",0x0000385F,0xFFFFE513,0x000005F3,0x000024E7,0xFFFFF053,0x00000450,0x000024E7,0xFFFFF053,0x00000450},
- {"0000001000010011111010101001010011011110000111000100000011100100",0x00001D70,0xFFFFF821,0x0000030F,0x00001541,0xFFFFFBB4,0x000002B0,0x00001541,0xFFFFFBB4,0x000002B0},
- {"0000001000010011111100001111110101000010110100000010000010000100",0x000034EB,0xFFFFE7FF,0x00000575,0x000019B4,0xFFFFF836,0x00000308,0x000019B4,0xFFFFF836,0x00000308},
- {"0000001000010011111100001111110101000010110100000101000011100100",0x000037C9,0xFFFFE5D4,0x000005CD,0x000026A1,0xFFFFEF0C,0x00000491,0x000026A1,0xFFFFEF0C,0x00000491},
- {"0000001000010011111010101001010011011110000100100001100101000100",0x00002918,0xFFFFF148,0x000003E9,0x00001A49,0xFFFFF94C,0x000002CF,0x00001A49,0xFFFFF94C,0x000002CF},
- {"0000001000010011111100001111110101000010110010100100000001100100",0x00002F90,0xFFFFEAB5,0x00000514,0x00001707,0xFFFFF9C7,0x000002C4,0x00001707,0xFFFFF9C7,0x000002C4},
- {"0000001000010011111010101001010011011110000001100010000001100100",0x0000327E,0xFFFFEA99,0x000004F4,0x0000194F,0xFFFFF929,0x000002DC,0x0000194F,0xFFFFF929,0x000002DC},
- {"0000001000010011111100001111110101000010110001100100000010000100",0x0000326F,0xFFFFE9CF,0x00000519,0x00002240,0xFFFFF299,0x000003E7,0x00002240,0xFFFFF299,0x000003E7},
- {"0000001000010011111010101001010011011110001100100001000100100100",0x000022FB,0xFFFFF4C6,0x00000371,0x00001506,0xFFFFFC73,0x00000265,0x00001506,0xFFFFFC73,0x00000265},
- {"0000001000010011111100001111110101000010110010100011100100100100",0x00003AD6,0xFFFFE470,0x000005FE,0x00001F03,0xFFFFF4F3,0x00000387,0x00001F03,0xFFFFF4F3,0x00000387},
- {"0000001000010011111010101001010011011110001000000001000100100100",0x00001F11,0xFFFFF756,0x00000332,0x00001666,0xFFFFFB8A,0x000002B2,0x00001666,0xFFFFFB8A,0x000002B2},
- {"0000001000010011111010101001010011011110000000100011100010100100",0x00002A5F,0xFFFFEFA7,0x00000430,0x00001943,0xFFFFF8C6,0x000002F7,0x00001943,0xFFFFF8C6,0x000002F7},
- {"0000001000010011111010101001010011011110000101100101000011100100",0x0000235E,0xFFFFF3B4,0x000003B3,0x00001489,0xFFFFFBCF,0x0000029B,0x00001489,0xFFFFFBCF,0x0000029B},
- {"0000001000010011111100001111110101000010110011000011100010100100",0x00003570,0xFFFFE780,0x0000058D,0x00001B1D,0xFFFFF767,0x00000325,0x00001B1D,0xFFFFF767,0x00000325},
- {"0000001000010011111010101001010011011110000001000010000001100100",0x00003678,0xFFFFE7C3,0x00000569,0x00001831,0xFFFFF98E,0x000002C8,0x00001831,0xFFFFF98E,0x000002C8},
- {"0000001000010011111010101001010011011110001000000001100001100100",0x000020B9,0xFFFFF625,0x0000035A,0x000015C5,0xFFFFFB8A,0x000002B5,0x000015C5,0xFFFFFB8A,0x000002B5},
- {"0000001000010011111100001111110101000010110001100011000110000100",0x00003985,0xFFFFE529,0x000005DD,0x00002165,0xFFFFF351,0x000003C5,0x00002165,0xFFFFF351,0x000003C5},
- {"0000001000010011111100001111110101000010110100000010000001100100",0x0000322A,0xFFFFE99D,0x00000535,0x000019A1,0xFFFFF844,0x00000305,0x000019A1,0xFFFFF844,0x00000305},
- {"0000001000010011111100001111110101000010110100000101000100000100",0x000033ED,0xFFFFE834,0x00000571,0x00002094,0xFFFFF33A,0x000003DB,0x00002094,0xFFFFF33A,0x000003DB},
- {"0000001000010011111010101001010011011110001000000100000011000100",0x00001D10,0xFFFFF84D,0x0000030B,0x00001659,0xFFFFFB0A,0x000002CB,0x00001659,0xFFFFFB0A,0x000002CB},
- {"0000001000010011111010101001010011011110000111000001000100100100",0x0000210F,0xFFFFF644,0x00000355,0x00001A4A,0xFFFFF90F,0x00000310,0x00001A4A,0xFFFFF90F,0x00000310},
- {"0000001000010011111010101001010011011110000101100100000101100100",0x00001CA8,0xFFFFF813,0x00000316,0x00001440,0xFFFFFC1C,0x0000029D,0x00001440,0xFFFFFC1C,0x0000029D},
- {"0000001000010011111010101001010011011110001100100001000011000100",0x00002864,0xFFFFF15A,0x000003FA,0x0000137F,0xFFFFFD43,0x00000248,0x0000137F,0xFFFFFD43,0x00000248},
- {"0000001000010011111100001111110101000010110100000100000110000100",0x00002CDB,0xFFFFECFD,0x000004A7,0x00002472,0xFFFFF0E1,0x00000437,0x00002472,0xFFFFF0E1,0x00000437},
- {"0000001000010011111100001111110101000010110011000101000100000100",0x00003348,0xFFFFE8CA,0x00000554,0x00001E91,0xFFFFF4D4,0x00000392,0x00001E91,0xFFFFF4D4,0x00000392},
- {"0000001000010011111100001111110101000010110001100100100101000100",0x00003989,0xFFFFE4BB,0x000005F8,0x00001ACB,0xFFFFF780,0x00000319,0x00001ACB,0xFFFFF780,0x00000319},
- {"0000001000010011111100001111110101000010110010100010000100000100",0x00003238,0xFFFFEA09,0x0000051E,0x00001F08,0xFFFFF4F4,0x0000038C,0x00001F08,0xFFFFF4F4,0x0000038C},
- {"0000001000010011111010101001010011011110000100100000100100000100",0x00002453,0xFFFFF3B0,0x0000038D,0x00001AED,0xFFFFF8A2,0x000002EA,0x00001AED,0xFFFFF8A2,0x000002EA},
- {"0000001000010011111010101001010011011110000111000011000000100100",0x00002459,0xFFFFF409,0x000003A8,0x000017B5,0xFFFFFA53,0x000002E1,0x000017B5,0xFFFFFA53,0x000002E1},
- {"0000001000010011111010101001010011011110000000100001000110000100",0x0000310D,0xFFFFEB78,0x000004D0,0x00001DC9,0xFFFFF5D5,0x00000368,0x00001DC9,0xFFFFF5D5,0x00000368},
- {"0000001000010011111010101001010011011110000000100011000100000100",0x000031BF,0xFFFFECA3,0x00000498,0x00001DC9,0xFFFFF717,0x00000336,0x00001DC9,0xFFFFF717,0x00000336},
- {"0000001000010011111100001111110101000010110011100010000100000100",0x00003896,0xFFFFE5DD,0x000005C5,0x000023E2,0xFFFFF1A1,0x00000416,0x000023E2,0xFFFFF1A1,0x00000416},
- {"0000001000010011111010101001010011011110001100100011100100000100",0x000023CB,0xFFFFF4C8,0x00000372,0x00001C33,0xFFFFF7D5,0x0000032A,0x00001C33,0xFFFFF7D5,0x0000032A},
- {"0000001000010011111100001111110101000010110100000010000011000100",0x00002F6B,0xFFFFEBF0,0x000004CE,0x00001C89,0xFFFFF689,0x0000034D,0x00001C89,0xFFFFF689,0x0000034D},
- {"0000001000010011111100001111110101000010110011100011100100000100",0x00003E72,0xFFFFE211,0x0000065D,0x0000218D,0xFFFFF309,0x000003DC,0x0000218D,0xFFFFF309,0x000003DC},
- {"0000001000010011111010101001010011011110000000100010000010000100",0x00002612,0xFFFFF2C3,0x000003AD,0x000019F7,0xFFFFF891,0x000002FE,0x000019F7,0xFFFFF891,0x000002FE},
- {"0000001000010011111010101001010011011110000101100100000110000100",0x0000205D,0xFFFFF59F,0x00000372,0x000012E6,0xFFFFFD0A,0x00000270,0x000012E6,0xFFFFFD0A,0x00000270},
- {"0000001000010011111100001111110101000010110010100010000100100100",0x00002ECB,0xFFFFEC47,0x000004BD,0x00001936,0xFFFFF8D9,0x000002E4,0x00001936,0xFFFFF8D9,0x000002E4},
- {"0000001000010011111010101001010011011110000001100100100100000100",0x00002BDB,0xFFFFEE6D,0x00000458,0x00001852,0xFFFFF943,0x000002D9,0x00001852,0xFFFFF943,0x000002D9},
- {"0000001000010011111010101001010011011110000100100100100100000100",0x00003387,0xFFFFE958,0x00000534,0x00001932,0xFFFFF8FA,0x000002E4,0x00001932,0xFFFFF8FA,0x000002E4},
- {"0000001000010011111010101001010011011110000000100000100011000100",0x00002E3C,0xFFFFED26,0x00000495,0x00001858,0xFFFFF990,0x000002D1,0x00001858,0xFFFFF990,0x000002D1},
- {"0000001000010011111010101001010011011110000000100010100101100100",0x000033B8,0xFFFFEA5C,0x000004F9,0x00001BD1,0xFFFFF76A,0x0000032E,0x00001BD1,0xFFFFF76A,0x0000032E},
- {"0000001000010011111010101001010011011110000001100010100110000100",0x00002BCE,0xFFFFEEE9,0x00000443,0x00001982,0xFFFFF90D,0x000002DF,0x00001982,0xFFFFF90D,0x000002DF},
- {"0000001000010011111100001111110101000010110100000100100011100100",0x00003495,0xFFFFE7D9,0x0000057B,0x00001D2A,0xFFFFF5A5,0x00000372,0x00001D2A,0xFFFFF5A5,0x00000372},
- {"0000001000010011111100001111110101000010110010100011100011100100",0x000034B1,0xFFFFE88D,0x00000556,0x00002014,0xFFFFF43A,0x000003AA,0x00002014,0xFFFFF43A,0x000003AA},
- {"0000001000010011111100001111110101000010110011000011000100100100",0x00002F96,0xFFFFEC84,0x000004AD,0x000024A2,0xFFFFF1CE,0x0000040A,0x000024A2,0xFFFFF1CE,0x0000040A},
- {"0000001000010011111010101001010011011110000101100001000001100100",0x0000203B,0xFFFFF640,0x00000359,0x000014EC,0xFFFFFC14,0x0000029C,0x000014EC,0xFFFFFC14,0x0000029C},
- {"0000001000010011111100001111110101000010110100000010100110000100",0x000034E2,0xFFFFE7B8,0x00000582,0x00001938,0xFFFFF872,0x000002FA,0x00001938,0xFFFFF872,0x000002FA},
- {"0000001000010011111010101001010011011110000001100011000100100100",0x00002AC7,0xFFFFF0C1,0x000003F5,0x00002268,0xFFFFF39C,0x000003C9,0x00002268,0xFFFFF39C,0x000003C9},
- {"0000001000010011111100001111110101000010110001100011000101000100",0x000036F6,0xFFFFE77F,0x00000571,0x000027D9,0xFFFFEF6F,0x00000461,0x000027D9,0xFFFFEF6F,0x00000461},
- {"0000001000010011111010101001010011011110000100100011000100100100",0x00002BAB,0xFFFFF018,0x00000419,0x00002126,0xFFFFF4E2,0x0000038F,0x00002126,0xFFFFF4E2,0x0000038F},
- {"0000001000010011111010101001010011011110001100100011100100100100",0x000028C4,0xFFFFF161,0x000003F8,0x0000180C,0xFFFFFA4B,0x000002C8,0x0000180C,0xFFFFFA4B,0x000002C8},
- {"0000001000010011111100001111110101000010110010100010100001100100",0x00002F48,0xFFFFEB62,0x000004EE,0x00001912,0xFFFFF8C8,0x000002EA,0x00001912,0xFFFFF8C8,0x000002EA},
- {"0000001000010011111100001111110101000010110011100010100001100100",0x000032DF,0xFFFFE911,0x00000545,0x00001F06,0xFFFFF485,0x0000039C,0x00001F06,0xFFFFF485,0x0000039C},
- {"0000001000010011111100001111110101000010110100000100000101000100",0x000035B8,0xFFFFE74F,0x00000590,0x00001FD7,0xFFFFF410,0x000003AF,0x00001FD7,0xFFFFF410,0x000003AF},
- {"0000001000010011111100001111110101000010110100000101000011000100",0x00003608,0xFFFFE6D7,0x000005A9,0x000024A6,0xFFFFF075,0x00000450,0x000024A6,0xFFFFF075,0x00000450},
- {"0000001000010011111100001111110101000010110010100011100010000100",0x000030AB,0xFFFFEAED,0x000004F5,0x000019EE,0xFFFFF84E,0x000002FC,0x000019EE,0xFFFFF84E,0x000002FC},
- {"0000001000010011111010101001010011011110000001100010000011000100",0x000030C6,0xFFFFEC92,0x0000049E,0x000019BB,0xFFFFF8F1,0x000002F3,0x000019BB,0xFFFFF8F1,0x000002F3},
- {"0000001000010011111100001111110101000010110001100011000010100100",0x00003B27,0xFFFFE544,0x000005C1,0x00002697,0xFFFFF072,0x00000438,0x00002697,0xFFFFF072,0x00000438},
- {"0000001000010011111010101001010011011110000100100100100011100100",0x00002F23,0xFFFFEC48,0x000004B9,0x0000199A,0xFFFFF8CF,0x000002E9,0x0000199A,0xFFFFF8CF,0x000002E9},
- {"0000001000010011111010101001010011011110000001100010100110100100",0x00002BD7,0xFFFFEEAC,0x00000450,0x00001991,0xFFFFF8F4,0x000002E2,0x00001991,0xFFFFF8F4,0x000002E2},
- {"0000001000010011111010101001010011011110000000100010000000100100",0x00003210,0xFFFFEB24,0x000004DE,0x00001BDF,0xFFFFF744,0x00000333,0x00001BDF,0xFFFFF744,0x00000333},
- {"0000001000010011111010101001010011011110001001000100000101000100",0x00002DDC,0xFFFFED0D,0x000004AC,0x000019D0,0xFFFFF869,0x0000030F,0x000019D0,0xFFFFF869,0x0000030F},
- {"0000001000010011111010101001010011011110001000000011100101100100",0x000023E6,0xFFFFF40C,0x000003A9,0x000014EB,0xFFFFFBC4,0x000002AF,0x000014EB,0xFFFFFBC4,0x000002AF},
- {"0000001000010011111100001111110101000010110010100010100110100100",0x000030CE,0xFFFFE9A5,0x0000053C,0x00001C45,0xFFFFF60E,0x0000035D,0x00001C45,0xFFFFF60E,0x0000035D},
- {"0000001000010011111010101001010011011110000101100001000010000100",0x00001E89,0xFFFFF73A,0x00000337,0x0000157C,0xFFFFFBC0,0x000002AA,0x0000157C,0xFFFFFBC0,0x000002AA},
- {"0000001000010011111100001111110101000010110100000100000100100100",0x000036C6,0xFFFFE6CF,0x000005A1,0x00002457,0xFFFFF11D,0x0000042D,0x00002457,0xFFFFF11D,0x0000042D},
- {"0000001000010011111010101001010011011110001100100001100101000100",0x00002815,0xFFFFF19A,0x000003F2,0x000016D2,0xFFFFFB40,0x00000299,0x000016D2,0xFFFFFB40,0x00000299},
- {"0000001000010011111010101001010011011110000111000001100110100100",0x00001FE2,0xFFFFF660,0x00000354,0x000015A7,0xFFFFFB47,0x000002C1,0x000015A7,0xFFFFFB47,0x000002C1},
- {"0000001000010011111010101001010011011110000101100001100101100100",0x00002114,0xFFFFF634,0x00000356,0x000016C1,0xFFFFFB43,0x000002B8,0x000016C1,0xFFFFFB43,0x000002B8},
- {"0000001000010011111100001111110101000010110011000010100011000100",0x000028E3,0xFFFFF075,0x00000414,0x0000203C,0xFFFFF438,0x000003B3,0x0000203C,0xFFFFF438,0x000003B3},
- {"0000001000010011111010101001010011011110000111000011100100100100",0x00001EEB,0xFFFFF7BB,0x0000031A,0x00001580,0xFFFFFBD7,0x000002AD,0x00001580,0xFFFFFBD7,0x000002AD},
- {"0000001000010011111010101001010011011110001001000000100011000100",0x00002BB2,0xFFFFEE72,0x00000470,0x0000192C,0xFFFFF91E,0x000002E7,0x0000192C,0xFFFFF91E,0x000002E7},
- {"0000001000010011111010101001010011011110000001100101000011100100",0x00003A3D,0xFFFFE49D,0x000005F5,0x00001A3B,0xFFFFF7B1,0x00000320,0x00001A3B,0xFFFFF7B1,0x00000320},
- {"0000001000010011111100001111110101000010110011100011000101100100",0x00002E93,0xFFFFEC5A,0x000004B4,0x000025EB,0xFFFFF03C,0x0000044A,0x000025EB,0xFFFFF03C,0x0000044A},
- {"0000001000010011111100001111110101000010110010100010000011000100",0x0000331F,0xFFFFE97A,0x00000531,0x00001A06,0xFFFFF850,0x000002FD,0x00001A06,0xFFFFF850,0x000002FD},
- {"0000001000010011111100001111110101000010110001100011100101100100",0x00003937,0xFFFFE5A0,0x000005C7,0x0000235E,0xFFFFF234,0x000003F2,0x0000235E,0xFFFFF234,0x000003F2},
- {"0000001000010011111010101001010011011110000111100011100100100100",0x00001DD0,0xFFFFF80E,0x00000319,0x000015C7,0xFFFFFB91,0x000002BC,0x000015C7,0xFFFFFB91,0x000002BC},
- {"0000001000010011111100001111110101000010110100000011100101100100",0x00003328,0xFFFFE905,0x0000054A,0x00002054,0xFFFFF3BF,0x000003C0,0x00002054,0xFFFFF3BF,0x000003C0},
- {"0000001000010011111100001111110101000010110011000001000100000100",0x00002FE5,0xFFFFEA65,0x00000520,0x0000188B,0xFFFFF8A7,0x000002F5,0x0000188B,0xFFFFF8A7,0x000002F5},
- {"0000001000010011111100001111110101000010110010100011100010100100",0x00002ED3,0xFFFFEC51,0x000004B9,0x00001888,0xFFFFF96A,0x000002CA,0x00001888,0xFFFFF96A,0x000002CA},
- {"0000001000010011111100001111110101000010110100000011000010000100",0x00002FCC,0xFFFFEB60,0x000004EA,0x00001F8D,0xFFFFF436,0x000003B4,0x00001F8D,0xFFFFF436,0x000003B4},
- {"0000001000010011111100001111110101000010110011100100000010000100",0x0000329F,0xFFFFE8F7,0x0000054F,0x000023DB,0xFFFFF0EE,0x0000043A,0x000023DB,0xFFFFF0EE,0x0000043A},
- {"0000001000010011111010101001010011011110000001000011100010100100",0x000030B5,0xFFFFEBB8,0x000004C4,0x00001AFD,0xFFFFF781,0x00000329,0x00001AFD,0xFFFFF781,0x00000329},
- {"0000001000010011111010101001010011011110000111100001100110100100",0x00001BBF,0xFFFFF8E2,0x000002F7,0x00001722,0xFFFFFA85,0x000002DB,0x00001722,0xFFFFFA85,0x000002DB},
- {"0000001000010011111010101001010011011110000000100010000001000100",0x000030E4,0xFFFFEBE6,0x000004BB,0x00001C80,0xFFFFF6E1,0x0000033E,0x00001C80,0xFFFFF6E1,0x0000033E},
- {"0000001000010011111010101001010011011110000100100010100101000100",0x000030E2,0xFFFFECD0,0x00000492,0x00001CE0,0xFFFFF753,0x0000032F,0x00001CE0,0xFFFFF753,0x0000032F},
- {"0000001000010011111010101001010011011110001100100010100001100100",0x00002513,0xFFFFF323,0x000003BC,0x00001965,0xFFFFF93C,0x000002F0,0x00001965,0xFFFFF93C,0x000002F0},
- {"0000001000010011111010101001010011011110000101100001000010100100",0x00002147,0xFFFFF585,0x0000037A,0x000014CC,0xFFFFFC3B,0x00000296,0x000014CC,0xFFFFFC3B,0x00000296},
- {"0000001000010011111010101001010011011110001100100010000100100100",0x00002507,0xFFFFF432,0x0000038A,0x00001890,0xFFFFFA61,0x000002C6,0x00001890,0xFFFFFA61,0x000002C6},
- {"0000001000010011111010101001010011011110000001100011100010100100",0x0000339B,0xFFFFEA7D,0x000004F0,0x0000191E,0xFFFFF944,0x000002DF,0x0000191E,0xFFFFF944,0x000002DF},
- {"0000001000010011111100001111110101000010110011000010100010100100",0x00002842,0xFFFFF043,0x00000427,0x00001988,0xFFFFF892,0x000002F7,0x00001988,0xFFFFF892,0x000002F7},
- {"0000001000010011111100001111110101000010110001100001100010100100",0x0000389D,0xFFFFE5D8,0x000005BF,0x00001EE1,0xFFFFF4EF,0x00000387,0x00001EE1,0xFFFFF4EF,0x00000387},
- {"0000001000010011111100001111110101000010110011100011000110000100",0x0000396D,0xFFFFE4D7,0x000005F2,0x000020DA,0xFFFFF34E,0x000003CD,0x000020DA,0xFFFFF34E,0x000003CD},
- {"0000001000010011111100001111110101000010110010100011000100000100",0x0000355F,0xFFFFE85A,0x0000055F,0x0000281F,0xFFFFEF28,0x0000047D,0x0000281F,0xFFFFEF28,0x0000047D},
- {"0000001000010011111010101001010011011110000111000101000011100100",0x00002284,0xFFFFF46E,0x00000399,0x00001498,0xFFFFFBE3,0x0000029C,0x00001498,0xFFFFFBE3,0x0000029C},
- {"0000001000010011111010101001010011011110000000100011100101000100",0x000031B6,0xFFFFEB42,0x000004D9,0x00001F54,0xFFFFF4D2,0x00000399,0x00001F54,0xFFFFF4D2,0x00000399},
- {"0000001000010011111100001111110101000010110001100011000001100100",0x000035CE,0xFFFFE79D,0x00000578,0x00001C78,0xFFFFF68C,0x00000344,0x00001C78,0xFFFFF68C,0x00000344},
- {"0000001000010011111010101001010011011110000111100100100101100100",0x00001C0A,0xFFFFF81B,0x00000318,0x00001492,0xFFFFFBCC,0x000002A5,0x00001492,0xFFFFFBCC,0x000002A5},
- {"0000001000010011111010101001010011011110000000100010000110000100",0x00003492,0xFFFFE95C,0x00000526,0x00001A97,0xFFFFF81B,0x0000030B,0x00001A97,0xFFFFF81B,0x0000030B},
- {"0000001000010011111010101001010011011110000101100011000101100100",0x00001E89,0xFFFFF7D0,0x0000031A,0x000017A5,0xFFFFFA99,0x000002D9,0x000017A5,0xFFFFFA99,0x000002D9},
- {"0000001000010011111100001111110101000010110010100100100011000100",0x00002DCC,0xFFFFEBE0,0x000004DE,0x000019BA,0xFFFFF7F5,0x0000030D,0x000019BA,0xFFFFF7F5,0x0000030D},
- {"0000001000010011111010101001010011011110000001000010100110000100",0x000030EF,0xFFFFEBC1,0x000004C0,0x00001AA9,0xFFFFF814,0x0000030A,0x00001AA9,0xFFFFF814,0x0000030A},
- {"0000001000010011111010101001010011011110001001000101000100100100",0x00002EA3,0xFFFFEBF6,0x000004D8,0x00001DCF,0xFFFFF521,0x00000399,0x00001DCF,0xFFFFF521,0x00000399},
- {"0000001000010011111010101001010011011110001100100100000101100100",0x00002B5F,0xFFFFEEA1,0x0000046C,0x000017EB,0xFFFFF9C9,0x000002D4,0x000017EB,0xFFFFF9C9,0x000002D4},
- {"0000001000010011111010101001010011011110000000100100000100000100",0x00002C63,0xFFFFEE82,0x00000455,0x00002268,0xFFFFF29D,0x000003F6,0x00002268,0xFFFFF29D,0x000003F6},
- {"0000001000010011111010101001010011011110000100100001100100000100",0x00002B1A,0xFFFFF016,0x0000041C,0x000019AA,0xFFFFF988,0x000002D2,0x000019AA,0xFFFFF988,0x000002D2},
- {"0000001000010011111100001111110101000010110010100010100101100100",0x0000332F,0xFFFFE934,0x0000053B,0x00001E47,0xFFFFF566,0x00000374,0x00001E47,0xFFFFF566,0x00000374},
- {"0000001000010011111100001111110101000010110010100100100011100100",0x00002995,0xFFFFEEC1,0x00000465,0x0000178F,0xFFFFF995,0x000002C5,0x0000178F,0xFFFFF995,0x000002C5},
- {"0000001000010011111010101001010011011110001000000001100010000100",0x00001C2E,0xFFFFF932,0x000002E9,0x000015C2,0xFFFFFBC5,0x000002AD,0x000015C2,0xFFFFFBC5,0x000002AD},
- {"0000001000010011111100001111110101000010110001100100000011100100",0x00003B08,0xFFFFE4E8,0x000005D8,0x0000209D,0xFFFFF444,0x00000398,0x0000209D,0xFFFFF444,0x00000398},
- {"0000001000010011111010101001010011011110000001000101000011100100",0x00002F1F,0xFFFFEB74,0x000004EB,0x00001F4C,0xFFFFF3D4,0x000003CE,0x00001F4C,0xFFFFF3D4,0x000003CE},
- {"0000001000010011111010101001010011011110000001000011100010000100",0x00003415,0xFFFFE89F,0x00000553,0x0000186B,0xFFFFF8E1,0x000002EF,0x0000186B,0xFFFFF8E1,0x000002EF},
- {"0000001000010011111100001111110101000010110011000001000011000100",0x00003441,0xFFFFE779,0x0000059D,0x000019EA,0xFFFFF7B2,0x0000031F,0x000019EA,0xFFFFF7B2,0x0000031F},
- {"0000001000010011111010101001010011011110000101100100000001100100",0x00002174,0xFFFFF546,0x00000378,0x00001456,0xFFFFFC5F,0x00000284,0x00001456,0xFFFFFC5F,0x00000284},
- {"0000001000010011111100001111110101000010110011100100000011000100",0x00003788,0xFFFFE61E,0x000005BF,0x00001DF4,0xFFFFF562,0x00000374,0x00001DF4,0xFFFFF562,0x00000374},
- {"0000001000010011111010101001010011011110000111100001100001000100",0x00001C41,0xFFFFF8C1,0x000002FC,0x0000171E,0xFFFFFA93,0x000002DE,0x0000171E,0xFFFFFA93,0x000002DE},
- {"0000001000010011111100001111110101000010110010100011100001100100",0x00002B15,0xFFFFEDEC,0x00000487,0x000017E4,0xFFFFF934,0x000002DF,0x000017E4,0xFFFFF934,0x000002DF},
- {"0000001000010011111100001111110101000010110011000011000101000100",0x0000327A,0xFFFFEA71,0x000004FF,0x00001D96,0xFFFFF63B,0x00000351,0x00001D96,0xFFFFF63B,0x00000351},
- {"0000001000010011111010101001010011011110000111100100000001100100",0x000023C6,0xFFFFF3E5,0x000003B6,0x000014DE,0xFFFFFC29,0x00000294,0x000014DE,0xFFFFFC29,0x00000294},
- {"0000001000010011111010101001010011011110000101100100100101000100",0x00001F96,0xFFFFF5FA,0x00000364,0x00001397,0xFFFFFC9D,0x0000027D,0x00001397,0xFFFFFC9D,0x0000027D},
- {"0000001000010011111010101001010011011110000001100011000101000100",0x00002B51,0xFFFFEFB5,0x00000420,0x00001ACA,0xFFFFF824,0x0000030D,0x00001ACA,0xFFFFF824,0x0000030D},
- {"0000001000010011111010101001010011011110000111100100100101000100",0x000020DB,0xFFFFF55B,0x0000037C,0x0000153D,0xFFFFFB5F,0x000002BA,0x0000153D,0xFFFFFB5F,0x000002BA},
- {"0000001000010011111010101001010011011110000000100010000110100100",0x000030BB,0xFFFFEBDA,0x000004BC,0x00001B0E,0xFFFFF7A8,0x0000031E,0x00001B0E,0xFFFFF7A8,0x0000031E},
- {"0000001000010011111100001111110101000010110001100010100100000100",0x000033C4,0xFFFFEA41,0x000004FA,0x000022C6,0xFFFFF363,0x000003BC,0x000022C6,0xFFFFF363,0x000003BC},
- {"0000001000010011111010101001010011011110001001000000100100100100",0x00002D47,0xFFFFEE01,0x00000477,0x000021CD,0xFFFFF36E,0x000003D6,0x000021CD,0xFFFFF36E,0x000003D6},
- {"0000001000010011111010101001010011011110000111100011000110100100",0x00001E7B,0xFFFFF733,0x00000339,0x00001668,0xFFFFFB29,0x000002BF,0x00001668,0xFFFFFB29,0x000002BF},
- {"0000001000010011111100001111110101000010110010100010100110000100",0x00002F7E,0xFFFFEAFF,0x000004FC,0x000018D4,0xFFFFF8BE,0x000002E8,0x000018D4,0xFFFFF8BE,0x000002E8},
- {"0000001000010011111010101001010011011110001100100011100010100100",0x00002635,0xFFFFF2E1,0x000003BC,0x000017A4,0xFFFFFA67,0x000002C3,0x000017A4,0xFFFFFA67,0x000002C3},
- {"0000001000010011111010101001010011011110000100100011000010100100",0x000026CA,0xFFFFF2C1,0x000003B2,0x00001C3E,0xFFFFF7AE,0x0000031F,0x00001C3E,0xFFFFF7AE,0x0000031F},
- {"0000001000010011111010101001010011011110000111000001000001100100",0x00002550,0xFFFFF380,0x000003B5,0x000019F5,0xFFFFF8E7,0x00000313,0x000019F5,0xFFFFF8E7,0x00000313},
- {"0000001000010011111100001111110101000010110010100100100100000100",0x00002FBC,0xFFFFEAF8,0x000004FA,0x000018CC,0xFFFFF8C6,0x000002E8,0x000018CC,0xFFFFF8C6,0x000002E8},
- {"0000001000010011111100001111110101000010110100000001100011100100",0x00002FCC,0xFFFFEB60,0x000004EA,0x00001EFF,0xFFFFF4DA,0x0000038F,0x00001EFF,0xFFFFF4DA,0x0000038F},
- {"0000001000010011111010101001010011011110000101100100000010000100",0x000023E6,0xFFFFF413,0x000003A1,0x00001544,0xFFFFFC16,0x0000028B,0x00001544,0xFFFFFC16,0x0000028B},
- {"0000001000010011111100001111110101000010110011100011000000100100",0x00003251,0xFFFFEAA2,0x000004F5,0x000025B0,0xFFFFF0DF,0x00000431,0x000025B0,0xFFFFF0DF,0x00000431},
- {"0000001000010011111100001111110101000010110100000011100110000100",0x00002F6F,0xFFFFEB67,0x000004E6,0x00002275,0xFFFFF249,0x000003FB,0x00002275,0xFFFFF249,0x000003FB},
- {"0000001000010011111010101001010011011110001100100010100101100100",0x00002597,0xFFFFF34A,0x000003B1,0x00001BCC,0xFFFFF822,0x0000031A,0x00001BCC,0xFFFFF822,0x0000031A},
- {"0000001000010011111100001111110101000010110001100011100001100100",0x00003B1D,0xFFFFE40E,0x0000060D,0x00001F61,0xFFFFF470,0x0000039F,0x00001F61,0xFFFFF470,0x0000039F},
- {"0000001000010011111100001111110101000010110001100100000101000100",0x0000379F,0xFFFFE6DB,0x0000058C,0x00002460,0xFFFFF170,0x00000415,0x00002460,0xFFFFF170,0x00000415},
- {"0000001000010011111010101001010011011110000101100101000101000100",0x00002442,0xFFFFF2FB,0x000003D9,0x00001414,0xFFFFFBDC,0x000002A2,0x00001414,0xFFFFFBDC,0x000002A2},
- {"0000001000010011111010101001010011011110000000100100000011000100",0x00003270,0xFFFFEA0D,0x0000051C,0x00001AFD,0xFFFFF783,0x00000328,0x00001AFD,0xFFFFF783,0x00000328},
- {"0000001000010011111010101001010011011110000101100001000100000100",0x00001B23,0xFFFFF94B,0x000002EB,0x000015F1,0xFFFFFB82,0x000002B4,0x000015F1,0xFFFFFB82,0x000002B4},
- {"0000001000010011111010101001010011011110001100100011100001000100",0x000026AE,0xFFFFF21A,0x000003DB,0x00001827,0xFFFFFA10,0x000002C8,0x00001827,0xFFFFFA10,0x000002C8},
- {"0000001000010011111100001111110101000010110010100100100010000100",0x00002DCF,0xFFFFEBD8,0x000004DB,0x00001A75,0xFFFFF719,0x0000033A,0x00001A75,0xFFFFF719,0x0000033A},
- {"0000001000010011111100001111110101000010110011100100000011100100",0x00003983,0xFFFFE500,0x000005EA,0x000022A6,0xFFFFF25F,0x000003F1,0x000022A6,0xFFFFF25F,0x000003F1},
- {"0000001000010011111010101001010011011110000100100001100011000100",0x00002AD5,0xFFFFF07A,0x00000406,0x000019FB,0xFFFFF961,0x000002D8,0x000019FB,0xFFFFF961,0x000002D8},
- {"0000001000010011111100001111110101000010110010100011100110100100",0x00002A43,0xFFFFEE43,0x00000474,0x00001D65,0xFFFFF538,0x00000387,0x00001D65,0xFFFFF538,0x00000387},
- {"0000001000010011111100001111110101000010110001100010000010000100",0x0000311E,0xFFFFEAF8,0x000004E8,0x00001959,0xFFFFF8E4,0x000002DC,0x00001959,0xFFFFF8E4,0x000002DC},
- {"0000001000010011111100001111110101000010110100000011000110100100",0x0000339A,0xFFFFE8A7,0x00000559,0x00001A04,0xFFFFF7E5,0x00000311,0x00001A04,0xFFFFF7E5,0x00000311},
- {"0000001000010011111010101001010011011110001000000100000101000100",0x000021B3,0xFFFFF50F,0x00000389,0x00001470,0xFFFFFBF7,0x000002A5,0x00001470,0xFFFFFBF7,0x000002A5},
- {"0000001000010011111010101001010011011110000000100001100010000100",0x00003417,0xFFFFE9A6,0x0000051D,0x000018A4,0xFFFFF984,0x000002CF,0x000018A4,0xFFFFF984,0x000002CF},
- {"0000001000010011111010101001010011011110001000000010100110000100",0x00001FED,0xFFFFF6A2,0x00000347,0x00001639,0xFFFFFB59,0x000002BB,0x00001639,0xFFFFFB59,0x000002BB},
- {"0000001000010011111010101001010011011110000100100001100010100100",0x000032D2,0xFFFFEB18,0x000004DC,0x00001A01,0xFFFFF95E,0x000002CF,0x00001A01,0xFFFFF95E,0x000002CF},
- {"0000001000010011111100001111110101000010110100000100000010000100",0x00003147,0xFFFFEA3B,0x00000518,0x0000241D,0xFFFFF11C,0x00000431,0x0000241D,0xFFFFF11C,0x00000431},
- {"0000001000010011111010101001010011011110000111000000100100000100",0x00001D44,0xFFFFF7E7,0x0000031A,0x0000153F,0xFFFFFBBC,0x000002A9,0x0000153F,0xFFFFFBBC,0x000002A9},
- {"0000001000010011111100001111110101000010110011000100000100000100",0x00003690,0xFFFFE6E3,0x000005A4,0x000018DE,0xFFFFF908,0x000002DD,0x000018DE,0xFFFFF908,0x000002DD},
- {"0000001000010011111100001111110101000010110011000010000110000100",0x00003561,0xFFFFE6F8,0x000005AB,0x000018B5,0xFFFFF8A0,0x000002F3,0x000018B5,0xFFFFF8A0,0x000002F3},
- {"0000001000010011111010101001010011011110001100100011000100100100",0x000028F4,0xFFFFF23A,0x000003CE,0x00001BC6,0xFFFFF881,0x00000311,0x00001BC6,0xFFFFF881,0x00000311},
- {"0000001000010011111100001111110101000010110100000011000110000100",0x000035D7,0xFFFFE71C,0x0000059B,0x00001D49,0xFFFFF5C8,0x00000368,0x00001D49,0xFFFFF5C8,0x00000368},
- {"0000001000010011111100001111110101000010110011100001100010100100",0x0000397E,0xFFFFE4CB,0x000005F4,0x00001989,0xFFFFF844,0x000002FD,0x00001989,0xFFFFF844,0x000002FD},
- {"0000001000010011111100001111110101000010110001100010000001100100",0x00003BAB,0xFFFFE332,0x0000063F,0x00001A69,0xFFFFF7B9,0x00000312,0x00001A69,0xFFFFF7B9,0x00000312},
- {"0000001000010011111100001111110101000010110100000011000001100100",0x00002F26,0xFFFFEB82,0x000004E8,0x00001D7D,0xFFFFF590,0x00000379,0x00001D7D,0xFFFFF590,0x00000379},
- {"0000001000010011111010101001010011011110000001100011000110100100",0x00002FDC,0xFFFFEBE0,0x000004C3,0x00001940,0xFFFFF8CC,0x000002EE,0x00001940,0xFFFFF8CC,0x000002EE},
- {"0000001000010011111010101001010011011110000111000000100011100100",0x000021B2,0xFFFFF558,0x00000379,0x00001643,0xFFFFFB1C,0x000002C3,0x00001643,0xFFFFFB1C,0x000002C3},
- {"0000001000010011111010101001010011011110001100100001100100000100",0x00002897,0xFFFFF181,0x000003F7,0x00001990,0xFFFFF994,0x000002E2,0x00001990,0xFFFFF994,0x000002E2},
- {"0000001000010011111010101001010011011110000111100000100100100100",0x00001D19,0xFFFFF829,0x0000031A,0x00001558,0xFFFFFBCA,0x000002AF,0x00001558,0xFFFFFBCA,0x000002AF},
- {"0000001000010011111010101001010011011110000001000011000101000100",0x00003311,0xFFFFEAD9,0x000004E1,0x00001BDC,0xFFFFF79E,0x0000031D,0x00001BDC,0xFFFFF79E,0x0000031D},
- {"0000001000010011111010101001010011011110000111100010100111000100",0x00001E54,0xFFFFF740,0x00000333,0x000016A1,0xFFFFFAF0,0x000002C4,0x000016A1,0xFFFFFAF0,0x000002C4},
- {"0000001000010011111100001111110101000010110011100011100101100100",0x00003266,0xFFFFE9A8,0x00000527,0x00002307,0xFFFFF219,0x000003FC,0x00002307,0xFFFFF219,0x000003FC},
- {"0000001000010011111010101001010011011110001100100001000101000100",0x00001D1F,0xFFFFF82B,0x000002F0,0x000013F0,0xFFFFFD0B,0x0000024E,0x000013F0,0xFFFFFD0B,0x0000024E},
- {"0000001000010011111100001111110101000010110001100100100010100100",0x0000312E,0xFFFFEA67,0x00000502,0x0000222A,0xFFFFF253,0x000003F9,0x0000222A,0xFFFFF253,0x000003F9},
- {"0000001000010011111100001111110101000010110010100100000100100100",0x000032B2,0xFFFFE9AD,0x00000523,0x00001E97,0xFFFFF527,0x0000037F,0x00001E97,0xFFFFF527,0x0000037F},
- {"0000001000010011111010101001010011011110000101100100000011100100",0x00001F6A,0xFFFFF6FC,0x00000338,0x0000164B,0xFFFFFB2C,0x000002C2,0x0000164B,0xFFFFFB2C,0x000002C2},
- {"0000001000010011111010101001010011011110000000100010100011000100",0x00002603,0xFFFFF386,0x00000392,0x00001EE0,0xFFFFF601,0x00000369,0x00001EE0,0xFFFFF601,0x00000369},
- {"0000001000010011111010101001010011011110001000000001000101100100",0x00001D0C,0xFFFFF803,0x00000317,0x00001345,0xFFFFFD52,0x00000260,0x00001345,0xFFFFFD52,0x00000260},
- {"0000001000010011111100001111110101000010110011000001100010000100",0x0000327A,0xFFFFE8E5,0x0000055C,0x00001680,0xFFFFFA2D,0x000002B2,0x00001680,0xFFFFFA2D,0x000002B2},
- {"0000001000010011111100001111110101000010110010100011100101100100",0x000032B8,0xFFFFE91A,0x0000054A,0x00001BAB,0xFFFFF6EC,0x00000338,0x00001BAB,0xFFFFF6EC,0x00000338},
- {"0000001000010011111100001111110101000010110011000011000001000100",0x00002F79,0xFFFFEB63,0x000004EF,0x000017BB,0xFFFFF9B1,0x000002CA,0x000017BB,0xFFFFF9B1,0x000002CA},
- {"0000001000010011111010101001010011011110000001000011100011100100",0x00002AE5,0xFFFFEFCB,0x0000041D,0x0000214A,0xFFFFF3A7,0x000003C7,0x0000214A,0xFFFFF3A7,0x000003C7},
- {"0000001000010011111010101001010011011110001100100010000001100100",0x0000212C,0xFFFFF5BC,0x0000034F,0x000017ED,0xFFFFFA4C,0x000002C1,0x000017ED,0xFFFFFA4C,0x000002C1},
- {"0000001000010011111010101001010011011110000100100001000100100100",0x00002BE7,0xFFFFEF40,0x0000043C,0x00001AE2,0xFFFFF8CF,0x000002E3,0x00001AE2,0xFFFFF8CF,0x000002E3},
- {"0000001000010011111100001111110101000010110100000101000101000100",0x000032DC,0xFFFFE90F,0x00000549,0x00002A2D,0xFFFFECC9,0x000004ED,0x00002A2D,0xFFFFECC9,0x000004ED},
- {"0000001000010011111010101001010011011110000101100001100010100100",0x00001DE3,0xFFFFF80D,0x00000319,0x000016FA,0xFFFFFB42,0x000002BC,0x000016FA,0xFFFFFB42,0x000002BC},
- {"0000001000010011111010101001010011011110000111100010100001000100",0x00001F1B,0xFFFFF6DE,0x00000346,0x00001502,0xFFFFFC23,0x00000298,0x00001502,0xFFFFFC23,0x00000298},
- {"0000001000010011111010101001010011011110000001100001100001100100",0x00003203,0xFFFFEA87,0x000004FE,0x0000194E,0xFFFFF8E3,0x000002EC,0x0000194E,0xFFFFF8E3,0x000002EC},
- {"0000001000010011111100001111110101000010110100000010000101000100",0x0000337A,0xFFFFE8DD,0x00000551,0x00001E3C,0xFFFFF534,0x00000385,0x00001E3C,0xFFFFF534,0x00000385},
- {"0000001000010011111100001111110101000010110010100100100001100100",0x000036F6,0xFFFFE62A,0x000005C5,0x000023C0,0xFFFFF117,0x00000435,0x000023C0,0xFFFFF117,0x00000435},
- {"0000001000010011111100001111110101000010110011000010000101000100",0x00003125,0xFFFFEA4E,0x0000051A,0x00001E6C,0xFFFFF503,0x0000038E,0x00001E6C,0xFFFFF503,0x0000038E},
- {"0000001000010011111010101001010011011110000111000000100010100100",0x00001CD4,0xFFFFF82D,0x0000030E,0x0000156D,0xFFFFFB64,0x000002B8,0x0000156D,0xFFFFFB64,0x000002B8},
- {"0000001000010011111010101001010011011110000000100100000010100100",0x00002F14,0xFFFFEC46,0x000004B8,0x000017F1,0xFFFFF977,0x000002D2,0x000017F1,0xFFFFF977,0x000002D2},
- {"0000001000010011111010101001010011011110000001100100000010100100",0x000031F1,0xFFFFEAD4,0x000004ED,0x0000184C,0xFFFFF983,0x000002D4,0x0000184C,0xFFFFF983,0x000002D4},
- {"0000001000010011111100001111110101000010110100000100100110000100",0x00002EA9,0xFFFFEBD7,0x000004D5,0x0000288D,0xFFFFEDDB,0x000004C0,0x0000288D,0xFFFFEDDB,0x000004C0},
- {"0000001000010011111100001111110101000010110010100011100110000100",0x0000335F,0xFFFFE82C,0x00000579,0x00001DBF,0xFFFFF512,0x0000038C,0x00001DBF,0xFFFFF512,0x0000038C},
- {"0000001000010011111010101001010011011110001000000001000110000100",0x0000224F,0xFFFFF4B5,0x00000391,0x0000138C,0xFFFFFCC3,0x0000027A,0x0000138C,0xFFFFFCC3,0x0000027A},
- {"0000001000010011111010101001010011011110000100100100000010100100",0x0000320D,0xFFFFEACD,0x000004F5,0x00001976,0xFFFFF913,0x000002E2,0x00001976,0xFFFFF913,0x000002E2},
- {"0000001000010011111010101001010011011110001000000010000100000100",0x00001BEB,0xFFFFF99C,0x000002E4,0x000016A4,0xFFFFFB77,0x000002C3,0x000016A4,0xFFFFFB77,0x000002C3},
- {"0000001000010011111010101001010011011110000001100011000001000100",0x0000396E,0xFFFFE616,0x000005A9,0x000018F4,0xFFFFF91A,0x000002E3,0x000018F4,0xFFFFF91A,0x000002E3},
- {"0000001000010011111010101001010011011110000000100010100001100100",0x00003251,0xFFFFEA8E,0x000004FA,0x000018EF,0xFFFFF910,0x000002E4,0x000018EF,0xFFFFF910,0x000002E4},
- {"0000001000010011111010101001010011011110000111000001100100100100",0x00001DAF,0xFFFFF857,0x0000030D,0x00001915,0xFFFFF9D8,0x000002F7,0x00001915,0xFFFFF9D8,0x000002F7},
- {"0000001000010011111010101001010011011110001000000100000110100100",0x000025B6,0xFFFFF26B,0x000003E5,0x00001531,0xFFFFFB68,0x000002AF,0x00001531,0xFFFFFB68,0x000002AF},
- {"0000001000010011111010101001010011011110000001100001100010000100",0x00002B2E,0xFFFFEF2E,0x00000440,0x00001968,0xFFFFF91A,0x000002DF,0x00001968,0xFFFFF91A,0x000002DF},
- {"0000001000010011111010101001010011011110000111000010000001100100",0x00002305,0xFFFFF528,0x00000377,0x000018A4,0xFFFFF9EB,0x000002F0,0x000018A4,0xFFFFF9EB,0x000002F0},
- {"0000001000010011111100001111110101000010110010100100000011000100",0x000032A1,0xFFFFE992,0x0000052E,0x00001A55,0xFFFFF826,0x000002FE,0x00001A55,0xFFFFF826,0x000002FE},
- {"0000001000010011111010101001010011011110000001000010000110000100",0x00002CCD,0xFFFFEE35,0x00000462,0x00001B09,0xFFFFF7E6,0x0000030F,0x00001B09,0xFFFFF7E6,0x0000030F},
- {"0000001000010011111010101001010011011110001100100011000010000100",0x00002602,0xFFFFF2CF,0x000003C5,0x000016EE,0xFFFFFAD4,0x000002B4,0x000016EE,0xFFFFFAD4,0x000002B4},
- {"0000001000010011111100001111110101000010110100000001100101100100",0x00003370,0xFFFFE891,0x00000560,0x000017F0,0xFFFFF930,0x000002DF,0x000017F0,0xFFFFF930,0x000002DF},
- {"0000001000010011111100001111110101000010110010100001100010000100",0x00002EDC,0xFFFFEB6D,0x000004EC,0x000016E6,0xFFFFF9ED,0x000002BC,0x000016E6,0xFFFFF9ED,0x000002BC},
- {"0000001000010011111010101001010011011110000100100010100011000100",0x00002A05,0xFFFFF13D,0x000003F0,0x00002065,0xFFFFF57B,0x00000378,0x00002065,0xFFFFF57B,0x00000378},
- {"0000001000010011111100001111110101000010110011100010000001000100",0x00002F8A,0xFFFFEB6E,0x000004E4,0x00001E3E,0xFFFFF50E,0x0000038D,0x00001E3E,0xFFFFF50E,0x0000038D},
- {"0000001000010011111100001111110101000010110010100011000001000100",0x00002BB5,0xFFFFED6A,0x000004A1,0x000017BF,0xFFFFF937,0x000002E5,0x000017BF,0xFFFFF937,0x000002E5},
- {"0000001000010011111010101001010011011110001000000001100101100100",0x0000202C,0xFFFFF6CE,0x0000033F,0x000015EE,0xFFFFFB83,0x000002B9,0x000015EE,0xFFFFFB83,0x000002B9},
- {"0000001000010011111010101001010011011110000000100010100010000100",0x00002C0C,0xFFFFEF10,0x0000043F,0x00001A73,0xFFFFF83E,0x0000030C,0x00001A73,0xFFFFF83E,0x0000030C},
- {"0000001000010011111010101001010011011110001100100100000100000100",0x0000234F,0xFFFFF460,0x00000385,0x000018C3,0xFFFFF9A5,0x000002DD,0x000018C3,0xFFFFF9A5,0x000002DD},
- {"0000001000010011111100001111110101000010110011100001100100000100",0x00003679,0xFFFFE704,0x00000595,0x00002177,0xFFFFF31A,0x000003D7,0x00002177,0xFFFFF31A,0x000003D7},
- {"0000001000010011111100001111110101000010110010100010100100100100",0x00003008,0xFFFFEBB8,0x000004D5,0x000024FF,0xFFFFF112,0x00000430,0x000024FF,0xFFFFF112,0x00000430},
- {"0000001000010011111100001111110101000010110001100100000110100100",0x00003848,0xFFFFE6A3,0x00000594,0x00002958,0xFFFFEE37,0x000004A0,0x00002958,0xFFFFEE37,0x000004A0},
- {"0000001000010011111100001111110101000010110011000001100100100100",0x00002FDF,0xFFFFEB08,0x000004FD,0x00001D77,0xFFFFF58B,0x0000037A,0x00001D77,0xFFFFF58B,0x0000037A},
- {"0000001000010011111010101001010011011110000001100011000001100100",0x00002EC8,0xFFFFED41,0x00000481,0x00001949,0xFFFFF91C,0x000002DF,0x00001949,0xFFFFF91C,0x000002DF},
- {"0000001000010011111100001111110101000010110100000100000110100100",0x000037C1,0xFFFFE5BA,0x000005D7,0x0000252C,0xFFFFF023,0x00000460,0x0000252C,0xFFFFF023,0x00000460},
- {"0000001000010011111100001111110101000010110011100010100101000100",0x00003716,0xFFFFE70C,0x0000058A,0x000028CC,0xFFFFEE57,0x0000049D,0x000028CC,0xFFFFEE57,0x0000049D},
- {"0000001000010011111100001111110101000010110010100100000011100100",0x000033D1,0xFFFFE8E8,0x00000547,0x00001AB1,0xFFFFF7E5,0x00000309,0x00001AB1,0xFFFFF7E5,0x00000309},
- {"0000001000010011111100001111110101000010110011000010100101000100",0x00002D72,0xFFFFED65,0x0000048E,0x00001E0D,0xFFFFF5A7,0x00000370,0x00001E0D,0xFFFFF5A7,0x00000370},
- {"0000001000010011111010101001010011011110000111000011100110100100",0x00002292,0xFFFFF49F,0x00000393,0x000017F4,0xFFFFF9CD,0x000002F5,0x000017F4,0xFFFFF9CD,0x000002F5},
- {"0000001000010011111010101001010011011110001001000011000001000100",0x000026EE,0xFFFFF18C,0x000003F7,0x000018A7,0xFFFFF95A,0x000002E5,0x000018A7,0xFFFFF95A,0x000002E5},
- {"0000001000010011111010101001010011011110000001000010000101100100",0x00002F62,0xFFFFEC9B,0x000004A4,0x0000194E,0xFFFFF932,0x000002D9,0x0000194E,0xFFFFF932,0x000002D9},
- {"0000001000010011111010101001010011011110000111100011100110000100",0x00001CE8,0xFFFFF7FA,0x0000031C,0x000014CE,0xFFFFFBD4,0x000002AB,0x000014CE,0xFFFFFBD4,0x000002AB},
- {"0000001000010011111010101001010011011110000100100001000011100100",0x00002E5A,0xFFFFEDAB,0x0000047C,0x00001A82,0xFFFFF8F7,0x000002DE,0x00001A82,0xFFFFF8F7,0x000002DE},
- {"0000001000010011111100001111110101000010110011000011000011100100",0x00003057,0xFFFFEC34,0x000004B9,0x00002296,0xFFFFF342,0x000003D0,0x00002296,0xFFFFF342,0x000003D0},
- {"0000001000010011111010101001010011011110000001000001100010100100",0x00002B0F,0xFFFFEF58,0x00000434,0x00001BFD,0xFFFFF721,0x00000330,0x00001BFD,0xFFFFF721,0x00000330},
- {"0000001000010011111010101001010011011110001000000001000010100100",0x00001F01,0xFFFFF751,0x0000032F,0x00001502,0xFFFFFC3E,0x00000296,0x00001502,0xFFFFFC3E,0x00000296},
- {"0000001000010011111100001111110101000010110010100011000001100100",0x00002FF4,0xFFFFEAE2,0x00000503,0x00001B36,0xFFFFF736,0x00000330,0x00001B36,0xFFFFF736,0x00000330},
- {"0000001000010011111100001111110101000010110011100010000001100100",0x00003762,0xFFFFE5AB,0x000005DE,0x000018CB,0xFFFFF896,0x000002F4,0x000018CB,0xFFFFF896,0x000002F4},
- {"0000001000010011111100001111110101000010110011000010000001100100",0x00002890,0xFFFFEF92,0x00000445,0x0000191D,0xFFFFF86F,0x00000302,0x0000191D,0xFFFFF86F,0x00000302},
- {"0000001000010011111010101001010011011110000001000011000001100100",0x00002F76,0xFFFFEC0E,0x000004BF,0x00001F7D,0xFFFFF41A,0x000003C0,0x00001F7D,0xFFFFF41A,0x000003C0},
- {"0000001000010011111010101001010011011110000111100000100010100100",0x00001D55,0xFFFFF7F8,0x0000031E,0x000015DF,0xFFFFFB79,0x000002B7,0x000015DF,0xFFFFFB79,0x000002B7},
- {"0000001000010011111010101001010011011110001000000100100100100100",0x00001FE9,0xFFFFF64A,0x00000353,0x000019E8,0xFFFFF882,0x0000032A,0x000019E8,0xFFFFF882,0x0000032A},
- {"0000001000010011111010101001010011011110000001100011100101100100",0x000030B5,0xFFFFEBB8,0x000004C4,0x00001857,0xFFFFF968,0x000002D8,0x00001857,0xFFFFF968,0x000002D8},
- {"0000001000010011111100001111110101000010110010100010100011000100",0x00003398,0xFFFFE9A3,0x00000524,0x00001FF9,0xFFFFF458,0x000003AD,0x00001FF9,0xFFFFF458,0x000003AD},
- {"0000001000010011111100001111110101000010110011100010100101100100",0x00003897,0xFFFFE5BD,0x000005C8,0x00002519,0xFFFFF0BA,0x00000438,0x00002519,0xFFFFF0BA,0x00000438},
- {"0000001000010011111100001111110101000010110100000100000001100100",0x00003234,0xFFFFE9B1,0x00000530,0x000022CC,0xFFFFF20E,0x00000409,0x000022CC,0xFFFFF20E,0x00000409},
- {"0000001000010011111010101001010011011110001000000101000100000100",0x00001FD2,0xFFFFF641,0x00000354,0x000017C9,0xFFFFF9C0,0x000002FB,0x000017C9,0xFFFFF9C0,0x000002FB},
- {"0000001000010011111100001111110101000010110011100100100011100100",0x00003234,0xFFFFE946,0x0000053D,0x00002267,0xFFFFF1F5,0x0000040D,0x00002267,0xFFFFF1F5,0x0000040D},
- {"0000001000010011111010101001010011011110001000000010100110100100",0x00002330,0xFFFFF474,0x00000399,0x00001490,0xFFFFFC67,0x00000288,0x00001490,0xFFFFFC67,0x00000288},
- {"0000001000010011111100001111110101000010110100000011100100100100",0x000032A3,0xFFFFE9EB,0x0000051B,0x0000234D,0xFFFFF23C,0x000003F7,0x0000234D,0xFFFFF23C,0x000003F7},
- {"0000001000010011111010101001010011011110001000000000100100000100",0x0000217E,0xFFFFF53A,0x00000384,0x00001511,0xFFFFFBF5,0x0000029E,0x00001511,0xFFFFFBF5,0x0000029E},
- {"0000001000010011111100001111110101000010110011100101000011100100",0x0000384F,0xFFFFE562,0x000005E2,0x0000295A,0xFFFFED53,0x000004D3,0x0000295A,0xFFFFED53,0x000004D3},
- {"0000001000010011111100001111110101000010110100000101000100100100",0x00003315,0xFFFFE8D1,0x00000552,0x000025D1,0xFFFFEFAF,0x00000471,0x000025D1,0xFFFFEFAF,0x00000471},
- {"0000001000010011111100001111110101000010110001100100100100100100",0x00004183,0xFFFFDF61,0x000006DA,0x0000193C,0xFFFFF88F,0x000002EC,0x0000193C,0xFFFFF88F,0x000002EC},
- {"0000001000010011111010101001010011011110001001000010000101100100",0x00002DFC,0xFFFFEDF2,0x0000047A,0x00001755,0xFFFFFAC2,0x000002AC,0x00001755,0xFFFFFAC2,0x000002AC},
- {"0000001000010011111100001111110101000010110010100011000110100100",0x000033FE,0xFFFFE774,0x0000059F,0x00001E70,0xFFFFF492,0x000003A0,0x00001E70,0xFFFFF492,0x000003A0},
- {"0000001000010011111100001111110101000010110001100010100110100100",0x000040D7,0xFFFFDFB8,0x000006CE,0x00001AC8,0xFFFFF773,0x0000031D,0x00001AC8,0xFFFFF773,0x0000031D},
- {"0000001000010011111010101001010011011110000111100001000101100100",0x00001D02,0xFFFFF803,0x00000322,0x000015FE,0xFFFFFB71,0x000002BB,0x000015FE,0xFFFFFB71,0x000002BB},
- {"0000001000010011111100001111110101000010110100000010100010000100",0x00002EB0,0xFFFFEC31,0x000004C4,0x00001B3C,0xFFFFF73B,0x00000330,0x00001B3C,0xFFFFF73B,0x00000330},
- {"0000001000010011111100001111110101000010110010100100100110000100",0x00002D9F,0xFFFFECBF,0x000004A8,0x000022B0,0xFFFFF23C,0x000003F9,0x000022B0,0xFFFFF23C,0x000003F9},
- {"0000001000010011111100001111110101000010110011000001100011100100",0x00002C6A,0xFFFFEDAC,0x00000488,0x00002419,0xFFFFF159,0x00000427,0x00002419,0xFFFFF159,0x00000427},
- {"0000001000010011111010101001010011011110000100100001000010100100",0x00002991,0xFFFFF06C,0x0000040E,0x00001AA9,0xFFFFF8D0,0x000002E1,0x00001AA9,0xFFFFF8D0,0x000002E1},
- {"0000001000010011111010101001010011011110000100100011100100000100",0x00002F8E,0xFFFFED1B,0x00000493,0x00001DE4,0xFFFFF69C,0x00000347,0x00001DE4,0xFFFFF69C,0x00000347},
- {"0000001000010011111010101001010011011110001000000100000110000100",0x00002136,0xFFFFF540,0x0000037C,0x000014FF,0xFFFFFB83,0x000002B2,0x000014FF,0xFFFFFB83,0x000002B2},
- {"0000001000010011111010101001010011011110000001100001100011100100",0x0000354C,0xFFFFE97D,0x0000051A,0x00001906,0xFFFFF965,0x000002DD,0x00001906,0xFFFFF965,0x000002DD},
- {"0000001000010011111100001111110101000010110001100010000011000100",0x0000348B,0xFFFFE94D,0x0000051F,0x0000285B,0xFFFFEF1A,0x00000473,0x0000285B,0xFFFFEF1A,0x00000473},
- {"0000001000010011111010101001010011011110001100100001100010100100",0x000026E6,0xFFFFF24E,0x000003D6,0x0000141F,0xFFFFFCCE,0x00000260,0x0000141F,0xFFFFFCCE,0x00000260},
- {"0000001000010011111100001111110101000010110001100100000101100100",0x00003CED,0xFFFFE2A5,0x0000064E,0x00002060,0xFFFFF3E0,0x000003B0,0x00002060,0xFFFFF3E0,0x000003B0},
- {"0000001000010011111010101001010011011110000000100001000010000100",0x000029D4,0xFFFFEFF7,0x00000426,0x00001976,0xFFFFF8E1,0x000002EE,0x00001976,0xFFFFF8E1,0x000002EE},
- {"0000001000010011111100001111110101000010110010100100000010100100",0x00003767,0xFFFFE601,0x000005CC,0x00001D22,0xFFFFF5F4,0x00000361,0x00001D22,0xFFFFF5F4,0x00000361},
- {"0000001000010011111100001111110101000010110001100101000011000100",0x00003CE8,0xFFFFE2E8,0x00000637,0x0000232C,0xFFFFF1E7,0x00000405,0x0000232C,0xFFFFF1E7,0x00000405},
- {"0000001000010011111010101001010011011110001000000001000001100100",0x000023A8,0xFFFFF4CD,0x00000386,0x00001944,0xFFFFF983,0x00000300,0x00001944,0xFFFFF983,0x00000300},
- {"0000001000010011111100001111110101000010110011000011000010100100",0x00003451,0xFFFFE8B9,0x00000551,0x00001AD7,0xFFFFF7BF,0x00000318,0x00001AD7,0xFFFFF7BF,0x00000318},
- {"0000001000010011111100001111110101000010110011100010100110000100",0x0000381B,0xFFFFE5A0,0x000005D0,0x00001E0F,0xFFFFF521,0x00000382,0x00001E0F,0xFFFFF521,0x00000382},
- {"0000001000010011111010101001010011011110001000000011100011000100",0x000023A4,0xFFFFF4A6,0x00000394,0x0000171F,0xFFFFFABB,0x000002D9,0x0000171F,0xFFFFFABB,0x000002D9},
- {"0000001000010011111100001111110101000010110001100010000010100100",0x00003C2B,0xFFFFE447,0x000005F0,0x0000207F,0xFFFFF44E,0x0000039A,0x0000207F,0xFFFFF44E,0x0000039A},
- {"0000001000010011111100001111110101000010110011000011100110000100",0x00002F07,0xFFFFEB70,0x000004E9,0x00001765,0xFFFFF9A5,0x000002C6,0x00001765,0xFFFFF9A5,0x000002C6},
- {"0000001000010011111100001111110101000010110001100010100110000100",0x00003A01,0xFFFFE4E0,0x000005E7,0x0000227A,0xFFFFF292,0x000003E5,0x0000227A,0xFFFFF292,0x000003E5},
- {"0000001000010011111100001111110101000010110011100010000010100100",0x0000376E,0xFFFFE686,0x000005A6,0x00001FCF,0xFFFFF43B,0x000003A8,0x00001FCF,0xFFFFF43B,0x000003A8},
- {"0000001000010011111100001111111111101111010110100100100110000100",0x0000485F,0xFFFFDCC1,0x00000713,0x00002CF8,0xFFFFEC45,0x000004DA,0x00002CF8,0xFFFFEC45,0x000004DA},
- {"0000001000010011111100001111111111101111010111000011000110000100",0x0000331C,0xFFFFE8FF,0x00000541,0x00002366,0xFFFFF19D,0x00000411,0x00002366,0xFFFFF19D,0x00000411},
- {"0000001000010011111100001111111111101111011001000011100001100100",0x00003CF3,0xFFFFE15A,0x00000694,0x00002FB3,0xFFFFE827,0x000005B9,0x00002FB3,0xFFFFE827,0x000005B9},
- {"0000001000010011111010101001010011011110001100100001000100000100",0x000023F3,0xFFFFF3EA,0x0000039A,0x00001345,0xFFFFFD6B,0x00000241,0x00001345,0xFFFFFD6B,0x00000241},
- {"0000001000010011111100001111111111101111010111000010100010100100",0x000038C0,0xFFFFE58A,0x000005CC,0x000023CA,0xFFFFF1AA,0x00000408,0x000023CA,0xFFFFF1AA,0x00000408},
- {"0000001000010011111100001111111111101111011001100010100101000100",0x00004976,0xFFFFDD6A,0x000006D7,0x000033C6,0xFFFFE8EB,0x0000054D,0x000033C6,0xFFFFE8EB,0x0000054D},
- {"0000001000010011111100001111111111101111011001000100100100000100",0x00004049,0xFFFFDF6D,0x000006D8,0x00003129,0xFFFFE716,0x000005E9,0x00003129,0xFFFFE716,0x000005E9},
- {"0000001000010011111100001111111111101111011001100001000101100100",0x000046C2,0xFFFFDCEB,0x0000071C,0x00002E6D,0xFFFFEA8F,0x0000052E,0x00002E6D,0xFFFFEA8F,0x0000052E},
- {"0000001000010011111100001111111111101111011000100011100010100100",0x00004080,0xFFFFE1E1,0x0000063A,0x0000396D,0xFFFFE40A,0x0000062C,0x0000396D,0xFFFFE40A,0x0000062C},
- {"0000001000010011111100001111111111101111010111100010000100100100",0x00003DE0,0xFFFFE358,0x0000060C,0x00002AA2,0xFFFFEDBF,0x000004A0,0x00002AA2,0xFFFFEDBF,0x000004A0},
- {"0000001000010011111100001111111111101111010111100011000101000100",0x00003FC0,0xFFFFE2A1,0x0000061A,0x000027D8,0xFFFFEFEC,0x0000043A,0x000027D8,0xFFFFEFEC,0x0000043A},
- {"0000001000010011111100001111111111101111011001100001100100100100",0x00003FBF,0xFFFFE2F5,0x00000603,0x000032D7,0xFFFFE900,0x00000552,0x000032D7,0xFFFFE900,0x00000552},
- {"0000001000010011111100001111111111101111010111000001000011100100",0x000035EE,0xFFFFE6CA,0x000005A2,0x0000247C,0xFFFFF088,0x00000446,0x0000247C,0xFFFFF088,0x00000446},
- {"0000001000010011111100001111111111101111011001000011100010000100",0x000039C8,0xFFFFE3AE,0x0000062A,0x000028AF,0xFFFFED24,0x000004DF,0x000028AF,0xFFFFED24,0x000004DF},
- {"0000001000010011111100001111111111101111010111000010100010000100",0x00003BDE,0xFFFFE33B,0x00000632,0x00001B6C,0xFFFFF720,0x00000326,0x00001B6C,0xFFFFF720,0x00000326},
- {"0000001000010011111100001111111111101111011100100001000010100100",0x00003818,0xFFFFE57D,0x000005D4,0x000020EF,0xFFFFF327,0x000003CE,0x000020EF,0xFFFFF327,0x000003CE},
- {"0000001000010011111100001111111111101111010111100001100110100100",0x000038DA,0xFFFFE561,0x000005D3,0x0000297D,0xFFFFED6D,0x000004C5,0x0000297D,0xFFFFED6D,0x000004C5},
- {"0000001000010011111100001111111111101111011010000100100010000100",0x000027AC,0xFFFFF0CE,0x00000417,0x00001F5F,0xFFFFF484,0x000003B2,0x00001F5F,0xFFFFF484,0x000003B2},
- {"0000001000010011111100001111111111101111011001100100100010100100",0x00003F02,0xFFFFE222,0x00000643,0x000026D4,0xFFFFF000,0x00000443,0x000026D4,0xFFFFF000,0x00000443},
- {"0000001000010011111100001111111111101111011000100100000101100100",0x00004303,0xFFFFDFE3,0x00000690,0x0000312C,0xFFFFE912,0x00000561,0x0000312C,0xFFFFE912,0x00000561},
- {"0000001000010011111100001111111111101111011000000000100100000100",0x000039E5,0xFFFFE31F,0x00000657,0x00001D23,0xFFFFF51F,0x00000386,0x00001D23,0xFFFFF51F,0x00000386},
- {"0000001000010011111100001111111111101111011001100001000101000100",0x000041FA,0xFFFFE01B,0x00000697,0x00002767,0xFFFFEF90,0x00000455,0x00002767,0xFFFFEF90,0x00000455},
- {"0000001000010011111100001111111111101111011010000011000010100100",0x00002888,0xFFFFF11C,0x00000403,0x00001864,0xFFFFF9D8,0x000002D3,0x00001864,0xFFFFF9D8,0x000002D3},
- {"0000001000010011111010101001010011011110001000000001100001100100",0x0000215C,0xFFFFF5B6,0x0000036D,0x000015C5,0xFFFFFB8A,0x000002B5,0x000015C5,0xFFFFFB8A,0x000002B5},
- {"0000001000010011111100001111111111101111011010000011100110000100",0x00002FAF,0xFFFFEC27,0x000004CA,0x00002184,0xFFFFF39C,0x000003CD,0x00002184,0xFFFFF39C,0x000003CD},
- {"0000001000010011111100001111111111101111010111100001000011000100",0x00004ACE,0xFFFFD9A3,0x000007BC,0x00001A5D,0xFFFFF7F6,0x000002FC,0x00001A5D,0xFFFFF7F6,0x000002FC},
- {"0000001000010011111100001111111111101111010110100011000001000100",0x00003763,0xFFFFE797,0x0000055F,0x000029B5,0xFFFFEEA1,0x00000474,0x000029B5,0xFFFFEEA1,0x00000474},
- {"0000001000010011111100001111111111101111010111100011000101100100",0x00003832,0xFFFFE6F9,0x00000575,0x00002C99,0xFFFFEC42,0x000004E3,0x00002C99,0xFFFFEC42,0x000004E3},
- {"0000001000010011111100001111111111101111011000000100000101100100",0x000041C9,0xFFFFDE33,0x0000071E,0x0000199D,0xFFFFF808,0x000002F9,0x0000199D,0xFFFFF808,0x000002F9},
- {"0000001000010011111100001111111111101111011001000001000101100100",0x0000474A,0xFFFFD96E,0x00000802,0x00002A30,0xFFFFEB57,0x0000053F,0x00002A30,0xFFFFEB57,0x0000053F},
- {"0000001000010011111100001111111111101111010111000011000111000100",0x0000312F,0xFFFFEA6A,0x00000508,0x000029D3,0xFFFFED38,0x000004D3,0x000029D3,0xFFFFED38,0x000004D3},
- {"0000001000010011111100001111111111101111011100100001000011000100",0x00003BD6,0xFFFFE2E7,0x00000644,0x00002093,0xFFFFF37B,0x000003BD,0x00002093,0xFFFFF37B,0x000003BD},
- {"0000001000010011111100001111111111101111011010000100000011100100",0x00002F94,0xFFFFECD4,0x000004A3,0x00002196,0xFFFFF40B,0x000003B5,0x00002196,0xFFFFF40B,0x000003B5},
- {"0000001000010011111100001111111111101111010111100001100101000100",0x0000369B,0xFFFFE762,0x00000571,0x00002726,0xFFFFEF99,0x00000459,0x00002726,0xFFFFEF99,0x00000459},
- {"0000001000010011111100001111111111101111011001000010000001100100",0x00003F57,0xFFFFDF47,0x000006F4,0x00002E5F,0xFFFFE8AE,0x000005AB,0x00002E5F,0xFFFFE8AE,0x000005AB},
- {"0000001000010011111010101001010011011110000010100100000011000100",0x00004313,0xFFFFDD81,0x0000072D,0x00002468,0xFFFFF068,0x00000440,0x00002468,0xFFFFF068,0x00000440},
- {"0000001000010011111100001111111111101111011010000011000001000100",0x00002A35,0xFFFFEFA8,0x00000441,0x00001F3F,0xFFFFF4F3,0x000003A0,0x00001F3F,0xFFFFF4F3,0x000003A0},
- {"0000001000010011111100001111111111101111011001100011000010100100",0x00003E33,0xFFFFE4B0,0x000005AF,0x00002802,0xFFFFF092,0x00000412,0x00002802,0xFFFFF092,0x00000412},
- {"0000001000010011111010101001010011011110001100100011100100000100",0x00002815,0xFFFFF20E,0x000003DD,0x00001C33,0xFFFFF7D5,0x0000032A,0x00001C33,0xFFFFF7D5,0x0000032A},
- {"0000001000010011111100001111111111101111010110100010000110000100",0x00003CC2,0xFFFFE43E,0x000005DE,0x00002C16,0xFFFFECED,0x000004BA,0x00002C16,0xFFFFECED,0x000004BA},
- {"0000001000010011111100001111111111101111010111000100000010000100",0x00003CFA,0xFFFFE1EE,0x00000673,0x00001F7D,0xFFFFF402,0x000003AE,0x00001F7D,0xFFFFF402,0x000003AE},
- {"0000001000010011111100001111111111101111011000100010000100000100",0x0000486E,0xFFFFDD43,0x000006EE,0x000036F0,0xFFFFE609,0x000005D5,0x000036F0,0xFFFFE609,0x000005D5},
- {"0000001000010011111100001111111111101111010111000100100101100100",0x000039FE,0xFFFFE41F,0x00000613,0x0000266C,0xFFFFEF35,0x0000047D,0x0000266C,0xFFFFEF35,0x0000047D},
- {"0000001000010011111010101001010011011110000100100011000100100100",0x00002EA4,0xFFFFEE3B,0x00000462,0x00002126,0xFFFFF4E2,0x0000038F,0x00002126,0xFFFFF4E2,0x0000038F},
- {"0000001000010011111100001111111111101111011010000011100101000100",0x00002D2E,0xFFFFEE7B,0x00000462,0x0000229D,0xFFFFF363,0x000003D4,0x0000229D,0xFFFFF363,0x000003D4},
- {"0000001000010011111100001111111111101111010111100010100001000100",0x0000375C,0xFFFFE695,0x0000059D,0x00002319,0xFFFFF237,0x000003EE,0x00002319,0xFFFFF237,0x000003EE},
- {"0000001000010011111100001111111111101111011100100101000011000100",0x00004522,0xFFFFDC71,0x0000075E,0x0000247E,0xFFFFF0A0,0x0000043C,0x0000247E,0xFFFFF0A0,0x0000043C},
- {"0000001000010011111010101001010011011110000100100100100011100100",0x00002E58,0xFFFFECB9,0x000004A9,0x0000199A,0xFFFFF8CF,0x000002E9,0x0000199A,0xFFFFF8CF,0x000002E9},
- {"0000001000010011111100001111111111101111011001000011100011100100",0x00003791,0xFFFFE5FE,0x000005B6,0x000029F5,0xFFFFED0D,0x000004CD,0x000029F5,0xFFFFED0D,0x000004CD},
- {"0000001000010011111010101001010011011110001001000100000101000100",0x00002E9E,0xFFFFEC8D,0x000004C1,0x000019D0,0xFFFFF869,0x0000030F,0x000019D0,0xFFFFF869,0x0000030F},
- {"0000001000010011111010101001010011011110001000000011100101100100",0x0000237C,0xFFFFF435,0x000003A6,0x000014EB,0xFFFFFBC4,0x000002AF,0x000014EB,0xFFFFFBC4,0x000002AF},
- {"0000001000010011111100001111111111101111011001100010100100100100",0x00003FE5,0xFFFFE4A2,0x000005A0,0x00003416,0xFFFFE995,0x00000523,0x00003416,0xFFFFE995,0x00000523},
- {"0000001000010011111100001111111111101111010111000000100100100100",0x00002B27,0xFFFFED51,0x000004A5,0x000025D1,0xFFFFEF18,0x00000492,0x000025D1,0xFFFFEF18,0x00000492},
- {"0000001000010011111100001111111111101111011010000100100100000100",0x00002D77,0xFFFFED79,0x00000494,0x00002196,0xFFFFF352,0x000003DE,0x00002196,0xFFFFF352,0x000003DE},
- {"0000001000010011111100001111111111101111010111000010000011000100",0x00003750,0xFFFFE6AC,0x00000596,0x00002524,0xFFFFF0B5,0x00000431,0x00002524,0xFFFFF0B5,0x00000431},
- {"0000001000010011111010101001010011011110000100100010100101000100",0x00002896,0xFFFFF1BB,0x000003D9,0x00001CE0,0xFFFFF753,0x0000032F,0x00001CE0,0xFFFFF753,0x0000032F},
- {"0000001000010011111100001111111111101111011001000001100110000100",0x00003CA7,0xFFFFE0F7,0x000006B1,0x00002CB8,0xFFFFE9AB,0x00000587,0x00002CB8,0xFFFFE9AB,0x00000587},
- {"0000001000010011111010101001010011011110001100100010100001100100",0x00002513,0xFFFFF323,0x000003BC,0x00001965,0xFFFFF93C,0x000002F0,0x00001965,0xFFFFF93C,0x000002F0},
- {"0000001000010011111100001111111111101111011001100010000101100100",0x00003914,0xFFFFE683,0x00000586,0x00003120,0xFFFFE9A6,0x00000543,0x00003120,0xFFFFE9A6,0x00000543},
- {"0000001000010011111100001111111111101111011001000011100100000100",0x000040D0,0xFFFFE007,0x000006AC,0x00002B9E,0xFFFFEBF5,0x000004FB,0x00002B9E,0xFFFFEBF5,0x000004FB},
- {"0000001000010011111100001111111111101111010110100100100010000100",0x00004412,0xFFFFDF5F,0x000006A9,0x00002A9E,0xFFFFEDCE,0x00000498,0x00002A9E,0xFFFFEDCE,0x00000498},
- {"0000001000010011111100001111111111101111011000100100100010000100",0x000042A6,0xFFFFDFEF,0x00000696,0x00002E65,0xFFFFEAAE,0x00000529,0x00002E65,0xFFFFEAAE,0x00000529},
- {"0000001000010011111010101001010011011110001100100010000100100100",0x000022E8,0xFFFFF565,0x0000035F,0x00001890,0xFFFFFA61,0x000002C6,0x00001890,0xFFFFFA61,0x000002C6},
- {"0000001000010011111100001111111111101111011000100011100110100100",0x00004637,0xFFFFDDD8,0x000006E9,0x0000349D,0xFFFFE6C8,0x000005C7,0x0000349D,0xFFFFE6C8,0x000005C7},
- {"0000001000010011111010101001010011011110001001100011100100000100",0x00004686,0xFFFFDC58,0x0000073D,0x00003972,0xFFFFE27B,0x0000068E,0x00003972,0xFFFFE27B,0x0000068E},
- {"0000001000010011111100001111111111101111011010000000100011100100",0x00002B35,0xFFFFEE9C,0x0000046C,0x00001F5B,0xFFFFF4A3,0x000003A9,0x00001F5B,0xFFFFF4A3,0x000003A9},
- {"0000001000010011111100001111111111101111011100100100000101000100",0x00003AC9,0xFFFFE3B2,0x0000061B,0x000023A1,0xFFFFF170,0x0000040F,0x000023A1,0xFFFFF170,0x0000040F},
- {"0000001000010011111100001111111111101111010111100001100010000100",0x00003C50,0xFFFFE37E,0x00000617,0x0000218F,0xFFFFF339,0x000003C4,0x0000218F,0xFFFFF339,0x000003C4},
- {"0000001000010011111100001111111111101111011001100011000001000100",0x00003793,0xFFFFE761,0x0000055D,0x000029C7,0xFFFFEE03,0x00000496,0x000029C7,0xFFFFEE03,0x00000496},
- {"0000001000010011111100001111111111101111011001000011100010100100",0x000040B5,0xFFFFDF78,0x000006DA,0x00002DED,0xFFFFEA20,0x00000551,0x00002DED,0xFFFFEA20,0x00000551},
- {"0000001000010011111100001111111111101111011000000001000101000100",0x000039D6,0xFFFFE37D,0x0000063C,0x00001AED,0xFFFFF6E2,0x00000331,0x00001AED,0xFFFFF6E2,0x00000331},
- {"0000001000010011111100001111111111101111011001100010000101000100",0x0000431F,0xFFFFE09B,0x0000066A,0x00002BDF,0xFFFFED93,0x00000496,0x00002BDF,0xFFFFED93,0x00000496},
- {"0000001000010011111100001111111111101111011000100011100001100100",0x00004887,0xFFFFDC65,0x00000721,0x00003669,0xFFFFE5C4,0x000005E9,0x00003669,0xFFFFE5C4,0x000005E9},
- {"0000001000010011111100001111111111101111011001000000100100100100",0x00004120,0xFFFFDDAE,0x00000748,0x0000303B,0xFFFFE70D,0x000005FC,0x0000303B,0xFFFFE70D,0x000005FC},
- {"0000001000010011111100001111111111101111010111100010100010100100",0x0000415D,0xFFFFE0BE,0x0000067B,0x00002FA7,0xFFFFEA28,0x00000538,0x00002FA7,0xFFFFEA28,0x00000538},
- {"0000001000010011111100001111111111101111011010000001100100000100",0x00002B12,0xFFFFEFF9,0x00000428,0x00001DDA,0xFFFFF693,0x00000356,0x00001DDA,0xFFFFF693,0x00000356},
- {"0000001000010011111100001111111111101111010111100011000110000100",0x00003ED3,0xFFFFE28D,0x0000062D,0x00002B00,0xFFFFED4E,0x000004B3,0x00002B00,0xFFFFED4E,0x000004B3},
- {"0000001000010011111100001111111111101111011000100101000010100100",0x00004218,0xFFFFE039,0x0000068F,0x00002F84,0xFFFFEA0C,0x00000541,0x00002F84,0xFFFFEA0C,0x00000541},
- {"0000001000010011111100001111111111101111010110100011100001000100",0x00003FF5,0xFFFFE2A3,0x00000617,0x00003017,0xFFFFEA7A,0x00000520,0x00003017,0xFFFFEA7A,0x00000520},
- {"0000001000010011111100001111111111101111010110100000100010100100",0x00004304,0xFFFFDFCC,0x0000069E,0x00002E0C,0xFFFFEB51,0x00000505,0x00002E0C,0xFFFFEB51,0x00000505},
- {"0000001000010011111100001111111111101111011001000001100101000100",0x00003D3A,0xFFFFE17F,0x00000687,0x0000284C,0xFFFFED83,0x000004CD,0x0000284C,0xFFFFED83,0x000004CD},
- {"0000001000010011111100001111111111101111010111100100000010100100",0x000042F5,0xFFFFDF76,0x000006B2,0x000027B6,0xFFFFEF72,0x00000455,0x000027B6,0xFFFFEF72,0x00000455},
- {"0000001000010011111100001111111111101111010111000011100011000100",0x00004267,0xFFFFDF29,0x000006D5,0x0000298F,0xFFFFEDBD,0x000004AC,0x0000298F,0xFFFFEDBD,0x000004AC},
- {"0000001000010011111010101001010011011110001001000000100100100100",0x0000303E,0xFFFFEC00,0x000004CB,0x000021CD,0xFFFFF36E,0x000003D6,0x000021CD,0xFFFFF36E,0x000003D6},
- {"0000001000010011111100001111111111101111010111100010100011000100",0x00003127,0xFFFFEBDB,0x000004A6,0x00002E95,0xFFFFEB78,0x000004F3,0x00002E95,0xFFFFEB78,0x000004F3},
- {"0000001000010011111010101001010011011110000111000001000001100100",0x00002655,0xFFFFF2D9,0x000003CF,0x000019F5,0xFFFFF8E7,0x00000313,0x000019F5,0xFFFFF8E7,0x00000313},
- {"0000001000010011111010101001010011011110000101100100000010000100",0x00002372,0xFFFFF449,0x0000039B,0x00001544,0xFFFFFC16,0x0000028B,0x00001544,0xFFFFFC16,0x0000028B},
- {"0000001000010011111100001111111111101111011001100010100011000100",0x0000348E,0xFFFFEB20,0x000004B2,0x00002BE8,0xFFFFEE80,0x00000467,0x00002BE8,0xFFFFEE80,0x00000467},
- {"0000001000010011111100001111111111101111010111100001000100000100",0x00004092,0xFFFFE073,0x0000069B,0x00002061,0xFFFFF403,0x000003A0,0x00002061,0xFFFFF403,0x000003A0},
- {"0000001000010011111100001111111111101111011100100010000011100100",0x000039D1,0xFFFFE55D,0x000005CC,0x000025CB,0xFFFFF0C0,0x00000428,0x000025CB,0xFFFFF0C0,0x00000428},
- {"0000001000010011111100001111111111101111010111100100100010000100",0x000042AA,0xFFFFDF68,0x000006C2,0x0000290B,0xFFFFEE78,0x00000485,0x0000290B,0xFFFFEE78,0x00000485},
- {"0000001000010011111100001111111111101111011100100001100011000100",0x0000356F,0xFFFFE7AC,0x0000056E,0x00001BE8,0xFFFFF6E3,0x0000032A,0x00001BE8,0xFFFFF6E3,0x0000032A},
- {"0000001000010011111100001111111111101111010111100001000101000100",0x00003525,0xFFFFE7FF,0x0000055D,0x0000242C,0xFFFFF12E,0x0000041D,0x0000242C,0xFFFFF12E,0x0000041D},
- {"0000001000010011111100001111111111101111010111000100100011000100",0x00003360,0xFFFFE895,0x00000550,0x00002175,0xFFFFF29E,0x000003E9,0x00002175,0xFFFFF29E,0x000003E9},
- {"0000001000010011111100001111111111101111011001000100000010100100",0x00003C94,0xFFFFE1C4,0x0000067E,0x00002E28,0xFFFFE964,0x0000057F,0x00002E28,0xFFFFE964,0x0000057F},
- {"0000001000010011111100001111111111101111011100100100000100100100",0x0000431C,0xFFFFDE4B,0x000006FF,0x00002270,0xFFFFF268,0x000003E5,0x00002270,0xFFFFF268,0x000003E5},
- {"0000001000010011111010101001010011011110000100100001100011000100",0x00002B67,0xFFFFF01D,0x00000414,0x000019FB,0xFFFFF961,0x000002D8,0x000019FB,0xFFFFF961,0x000002D8},
- {"0000001000010011111100001111111111101111010111100011100110000100",0x0000400B,0xFFFFE13D,0x0000066F,0x000024F3,0xFFFFF125,0x00000417,0x000024F3,0xFFFFF125,0x00000417},
- {"0000001000010011111100001111111111101111010110100010000010100100",0x00004460,0xFFFFE00E,0x0000067B,0x000023DF,0xFFFFF2E6,0x000003BB,0x000023DF,0xFFFFF2E6,0x000003BB},
- {"0000001000010011111100001111111111101111011001000001100001100100",0x00003AFB,0xFFFFE2C5,0x00000650,0x00002D46,0xFFFFE9C4,0x00000571,0x00002D46,0xFFFFE9C4,0x00000571},
- {"0000001000010011111100001111111111101111011000100010100100100100",0x00005482,0xFFFFD5BC,0x0000081A,0x00003250,0xFFFFE961,0x00000541,0x00003250,0xFFFFE961,0x00000541},
- {"0000001000010011111100001111111111101111010111000010100101000100",0x00003D27,0xFFFFE2FA,0x00000632,0x00002A4D,0xFFFFED6A,0x000004BB,0x00002A4D,0xFFFFED6A,0x000004BB},
- {"0000001000010011111100001111111111101111011000000001100010100100",0x00003E03,0xFFFFE142,0x00000690,0x00001E08,0xFFFFF555,0x0000036C,0x00001E08,0xFFFFF555,0x0000036C},
- {"0000001000010011111100001111111111101111010111000010000001100100",0x000031B5,0xFFFFE97D,0x00000535,0x0000232E,0xFFFFF166,0x00000422,0x0000232E,0xFFFFF166,0x00000422},
- {"0000001000010011111100001111111111101111010111100001100011100100",0x00003753,0xFFFFE724,0x00000575,0x0000281A,0xFFFFEF1A,0x0000046B,0x0000281A,0xFFFFEF1A,0x0000046B},
- {"0000001000010011111010101001010011011110001000000100000101000100",0x00002071,0xFFFFF5C9,0x0000036F,0x00001470,0xFFFFFBF7,0x000002A5,0x00001470,0xFFFFFBF7,0x000002A5},
- {"0000001000010011111100001111111111101111011010000011000101000100",0x00002799,0xFFFFF223,0x000003CF,0x00001CD3,0xFFFFF74A,0x00000333,0x00001CD3,0xFFFFF74A,0x00000333},
- {"0000001000010011111100001111111111101111011001100001000011000100",0x000040DF,0xFFFFE11C,0x00000664,0x000031D4,0xFFFFE8BC,0x0000056F,0x000031D4,0xFFFFE8BC,0x0000056F},
- {"0000001000010011111100001111111111101111011001000100000011000100",0x00003A4D,0xFFFFE3A6,0x00000627,0x00002871,0xFFFFEDA0,0x000004C0,0x00002871,0xFFFFEDA0,0x000004C0},
- {"0000001000010011111100001111111111101111011010000001100110000100",0x00002AF9,0xFFFFEED7,0x00000464,0x0000219B,0xFFFFF368,0x000003D6,0x0000219B,0xFFFFF368,0x000003D6},
- {"0000001000010011111010101001010011011110001100100011000100100100",0x000026D5,0xFFFFF36C,0x000003A3,0x00001BC6,0xFFFFF881,0x00000311,0x00001BC6,0xFFFFF881,0x00000311},
- {"0000001000010011111100001111111111101111010111100010000001000100",0x0000325D,0xFFFFEA07,0x0000050B,0x000026D1,0xFFFFEFB3,0x0000045A,0x000026D1,0xFFFFEFB3,0x0000045A},
- {"0000001000010011111100001111111111101111011010000010100001100100",0x00002F75,0xFFFFEC64,0x000004BE,0x00001EEB,0xFFFFF559,0x00000386,0x00001EEB,0xFFFFF559,0x00000386},
- {"0000001000010011111100001111111111101111010110100011100010100100",0x00003C2F,0xFFFFE541,0x000005A3,0x000025B6,0xFFFFF16F,0x000003FA,0x000025B6,0xFFFFF16F,0x000003FA},
- {"0000001000010011111100001111111111101111011010000100100100100100",0x00002BC2,0xFFFFEE89,0x0000046A,0x00001D04,0xFFFFF651,0x00000361,0x00001D04,0xFFFFF651,0x00000361},
- {"0000001000010011111100001111111111101111011010000010100110100100",0x00002DD0,0xFFFFED40,0x0000049F,0x00001C8C,0xFFFFF6B3,0x00000353,0x00001C8C,0xFFFFF6B3,0x00000353},
- {"0000001000010011111010101001010011011110000111000000100011100100",0x000021ED,0xFFFFF530,0x00000380,0x00001643,0xFFFFFB1C,0x000002C3,0x00001643,0xFFFFFB1C,0x000002C3},
- {"0000001000010011111010101001010011011110001100100001100100000100",0x000028C7,0xFFFFF160,0x000003FD,0x00001990,0xFFFFF994,0x000002E2,0x00001990,0xFFFFF994,0x000002E2},
- {"0000001000010011111100001111111111101111011001100001000010100100",0x0000431C,0xFFFFDF9D,0x000006A3,0x000034A6,0xFFFFE6B0,0x000005C9,0x000034A6,0xFFFFE6B0,0x000005C9},
- {"0000001000010011111010101001010011011110001001100011000010100100",0x00004115,0xFFFFE0D6,0x00000667,0x000031AD,0xFFFFE850,0x00000585,0x000031AD,0xFFFFE850,0x00000585},
- {"0000001000010011111100001111111111101111011001000011100100100100",0x0000424A,0xFFFFDEEC,0x000006E1,0x0000346A,0xFFFFE5EA,0x00000602,0x0000346A,0xFFFFE5EA,0x00000602},
- {"0000001000010011111100001111111111101111011001100001100110000100",0x00004990,0xFFFFDAFA,0x00000771,0x00002A9C,0xFFFFED37,0x000004BC,0x00002A9C,0xFFFFED37,0x000004BC},
- {"0000001000010011111100001111111111101111011001000010100010100100",0x00003858,0xFFFFE568,0x000005D2,0x00003030,0xFFFFE8B0,0x0000058E,0x00003030,0xFFFFE8B0,0x0000058E},
- {"0000001000010011111100001111111111101111011010000100000101100100",0x00001EDC,0xFFFFF6CD,0x00000322,0x00001FCA,0xFFFFF4BD,0x0000039E,0x00001FCA,0xFFFFF4BD,0x0000039E},
- {"0000001000010011111100001111111111101111011001100010000100100100",0x00004C88,0xFFFFDBA3,0x0000071B,0x000030C4,0xFFFFEAFD,0x000004F7,0x000030C4,0xFFFFEAFD,0x000004F7},
- {"0000001000010011111100001111111111101111011010000000100100000100",0x00002B9A,0xFFFFEE41,0x0000047D,0x00002131,0xFFFFF344,0x000003E5,0x00002131,0xFFFFF344,0x000003E5},
- {"0000001000010011111100001111111111101111011000100011100110000100",0x00003E4B,0xFFFFE33C,0x000005FA,0x00003877,0xFFFFE437,0x0000062E,0x00003877,0xFFFFE437,0x0000062E},
- {"0000001000010011111010101001010011011110001100100010000001100100",0x00002376,0xFFFFF444,0x0000038A,0x000017ED,0xFFFFFA4C,0x000002C1,0x000017ED,0xFFFFFA4C,0x000002C1},
- {"0000001000010011111100001111111111101111011001100001000010000100",0x00004517,0xFFFFDDF4,0x000006F2,0x000030DC,0xFFFFE8EF,0x00000571,0x000030DC,0xFFFFE8EF,0x00000571},
- {"0000001000010011111100001111111111101111011010000001100101000100",0x0000270C,0xFFFFF1F3,0x000003DF,0x0000207B,0xFFFFF474,0x000003AD,0x0000207B,0xFFFFF474,0x000003AD},
- {"0000001000010011111100001111111111101111011001000101000101000100",0x00004086,0xFFFFDF39,0x000006E3,0x00002A24,0xFFFFEC2B,0x000004FF,0x00002A24,0xFFFFEC2B,0x000004FF},
- {"0000001000010011111100001111111111101111010111000011000100100100",0x00003BDE,0xFFFFE45E,0x000005EB,0x00002CD5,0xFFFFEC45,0x000004DD,0x00002CD5,0xFFFFEC45,0x000004DD},
- {"0000001000010011111100001111111111101111011100100011000011100100",0x00003803,0xFFFFE714,0x00000579,0x0000288A,0xFFFFEF21,0x0000046B,0x0000288A,0xFFFFEF21,0x0000046B},
- {"0000001000010011111100001111111111101111011000000001000100000100",0x00003F50,0xFFFFE002,0x000006CD,0x00001AD4,0xFFFFF72E,0x0000031F,0x00001AD4,0xFFFFF72E,0x0000031F},
- {"0000001000010011111100001111111111101111011010000010000011100100",0x00002968,0xFFFFF100,0x00000402,0x00001FB5,0xFFFFF57C,0x0000037F,0x00001FB5,0xFFFFF57C,0x0000037F},
- {"0000001000010011111100001111111111101111011001100010000100000100",0x00004283,0xFFFFE2A7,0x000005F5,0x00003165,0xFFFFEB0C,0x000004EC,0x00003165,0xFFFFEB0C,0x000004EC},
- {"0000001000010011111100001111111111101111011001000011000110100100",0x00004253,0xFFFFDDA8,0x00000732,0x00002E5C,0xFFFFE90A,0x00000593,0x00002E5C,0xFFFFE90A,0x00000593},
- {"0000001000010011111100001111111111101111010111000101000010100100",0x00003551,0xFFFFE756,0x0000058D,0x000029A7,0xFFFFED0C,0x000004DE,0x000029A7,0xFFFFED0C,0x000004DE},
- {"0000001000010011111100001111111111101111011001000010100011000100",0x00003728,0xFFFFE604,0x000005C4,0x00002832,0xFFFFEE64,0x00000493,0x00002832,0xFFFFEE64,0x00000493},
- {"0000001000010011111100001111111111101111011000100011100101100100",0x00004796,0xFFFFDCC8,0x00000715,0x000032AB,0xFFFFE848,0x0000057C,0x000032AB,0xFFFFE848,0x0000057C},
- {"0000001000010011111100001111111111101111011000100001000011000100",0x000049DF,0xFFFFDB24,0x0000075F,0x00003076,0xFFFFE967,0x0000055C,0x00003076,0xFFFFE967,0x0000055C},
- {"0000001000010011111100001111111111101111011100100001000100000100",0x00003F13,0xFFFFE099,0x000006A8,0x00002279,0xFFFFF226,0x000003F3,0x00002279,0xFFFFF226,0x000003F3},
- {"0000001000010011111100001111111111101111011001000011000010100100",0x00003E03,0xFFFFE19F,0x00000674,0x00002D66,0xFFFFEAA7,0x00000537,0x00002D66,0xFFFFEAA7,0x00000537},
- {"0000001000010011111100001111111111101111010111000100000100000100",0x000037DA,0xFFFFE63F,0x000005A7,0x00002543,0xFFFFF0A0,0x00000431,0x00002543,0xFFFFF0A0,0x00000431},
- {"0000001000010011111100001111111111101111011000100100100101000100",0x00003D82,0xFFFFE3F5,0x000005D9,0x0000332F,0xFFFFE834,0x00000577,0x0000332F,0xFFFFE834,0x00000577},
- {"0000001000010011111010101001010011011110000100100010100011000100",0x00002915,0xFFFFF1E0,0x000003D4,0x00002065,0xFFFFF57B,0x00000378,0x00002065,0xFFFFF57B,0x00000378},
- {"0000001000010011111100001111111111101111010111100100100100000100",0x000036FC,0xFFFFE72D,0x00000577,0x00002811,0xFFFFEF30,0x00000464,0x00002811,0xFFFFEF30,0x00000464},
- {"0000001000010011111100001111111111101111011000100011000110000100",0x00004767,0xFFFFDD30,0x000006FD,0x00003703,0xFFFFE564,0x000005F8,0x00003703,0xFFFFE564,0x000005F8},
- {"0000001000010011111100001111111111101111011000000011000110000100",0x00003094,0xFFFFEAA9,0x000004F5,0x000022E7,0xFFFFF200,0x000003FB,0x000022E7,0xFFFFF200,0x000003FB},
- {"0000001000010011111100001111111111101111011001000001000101000100",0x00003EF0,0xFFFFDF83,0x000006ED,0x00002A27,0xFFFFEB7C,0x00000537,0x00002A27,0xFFFFEB7C,0x00000537},
- {"0000001000010011111100001111111111101111011010000001000100100100",0x0000243C,0xFFFFF358,0x000003AC,0x00001DC4,0xFFFFF5E9,0x00000372,0x00001DC4,0xFFFFF5E9,0x00000372},
- {"0000001000010011111100001111111111101111011100100010000101000100",0x0000284B,0xFFFFF036,0x0000040F,0x00001FCD,0xFFFFF445,0x00000395,0x00001FCD,0xFFFFF445,0x00000395},
- {"0000001000010011111100001111111111101111011010000100000011000100",0x00002611,0xFFFFF285,0x000003C7,0x00001CFE,0xFFFFF6A0,0x00000355,0x00001CFE,0xFFFFF6A0,0x00000355},
- {"0000001000010011111010101001010011011110000111000011100110100100",0x00002292,0xFFFFF49F,0x00000393,0x000017F4,0xFFFFF9CD,0x000002F5,0x000017F4,0xFFFFF9CD,0x000002F5},
- {"0000001000010011111100001111111111101111010111100011100010100100",0x000037F3,0xFFFFE68D,0x00000590,0x00002443,0xFFFFF1AD,0x000003FA,0x00002443,0xFFFFF1AD,0x000003FA},
- {"0000001000010011111100001111111111101111011010000010000101000100",0x00002C01,0xFFFFEF3F,0x00000444,0x0000210A,0xFFFFF475,0x000003A7,0x0000210A,0xFFFFF475,0x000003A7},
- {"0000001000010011111010101001010011011110000100100001000011100100",0x00002C0E,0xFFFFEF0F,0x00000446,0x00001A82,0xFFFFF8F7,0x000002DE,0x00001A82,0xFFFFF8F7,0x000002DE},
- {"0000001000010011111100001111111111101111010111100010000011000100",0x00003FA6,0xFFFFE20A,0x0000063F,0x00002E29,0xFFFFEB21,0x00000510,0x00002E29,0xFFFFEB21,0x00000510},
- {"0000001000010011111100001111111111101111010111000010000101100100",0x00003BCD,0xFFFFE31B,0x0000063C,0x000019AF,0xFFFFF83D,0x000002F8,0x000019AF,0xFFFFF83D,0x000002F8},
- {"0000001000010011111100001111111111101111011001100100000101100100",0x000044C8,0xFFFFDF08,0x000006B0,0x00002E2E,0xFFFFEB62,0x000004FD,0x00002E2E,0xFFFFEB62,0x000004FD},
- {"0000001000010011111100001111111111101111010111000001100010000100",0x00003790,0xFFFFE571,0x000005E3,0x00002042,0xFFFFF35D,0x000003CF,0x00002042,0xFFFFF35D,0x000003CF},
- {"0000001000010011111100001111111111101111011000000101000011100100",0x000038AC,0xFFFFE46C,0x00000609,0x0000215E,0xFFFFF22D,0x00000403,0x0000215E,0xFFFFF22D,0x00000403},
- {"0000001000010011111100001111111111101111010111100010100110100100",0x00003A1E,0xFFFFE536,0x000005C9,0x000024F3,0xFFFFF11A,0x0000041B,0x000024F3,0xFFFFF11A,0x0000041B},
- {"0000001000010011111100001111111111101111011001100101000011100100",0x0000431A,0xFFFFDF1B,0x000006C5,0x00002F34,0xFFFFEA02,0x00000545,0x00002F34,0xFFFFEA02,0x00000545},
- {"0000001000010011111100001111111111101111011001000001100100000100",0x000042DC,0xFFFFDE28,0x0000070C,0x00003B53,0xFFFFE0EA,0x000006E2,0x00003B53,0xFFFFE0EA,0x000006E2},
- {"0000001000010011111100001111111111101111011010000011000101100100",0x0000264B,0xFFFFF29A,0x000003C4,0x000021D0,0xFFFFF3CE,0x000003C4,0x000021D0,0xFFFFF3CE,0x000003C4},
- {"0000001000010011111100001111111111101111010110100100000001100100",0x00004225,0xFFFFE0E8,0x00000665,0x00002B53,0xFFFFED89,0x0000049F,0x00002B53,0xFFFFED89,0x0000049F},
- {"0000001000010011111010101001010011011110001000000100100100100100",0x00001FCC,0xFFFFF63F,0x00000358,0x000019E8,0xFFFFF882,0x0000032A,0x000019E8,0xFFFFF882,0x0000032A},
- {"0000001000010011111100001111111111101111011000100100000010100100",0x000045E0,0xFFFFDDD0,0x000006ED,0x00003193,0xFFFFE8BD,0x00000572,0x00003193,0xFFFFE8BD,0x00000572},
- {"0000001000010011111100001111111111101111011010000011100100100100",0x000024FC,0xFFFFF366,0x000003A6,0x00001FE8,0xFFFFF509,0x00000394,0x00001FE8,0xFFFFF509,0x00000394},
- {"0000001000010011111100001111111111101111010111000100100010000100",0x0000378F,0xFFFFE54B,0x000005F1,0x00001C9B,0xFFFFF5C7,0x00000368,0x00001C9B,0xFFFFF5C7,0x00000368},
- {"0000001000010011111100001111111111101111011001000001100010100100",0x00003CF3,0xFFFFE15A,0x00000694,0x00002CDD,0xFFFFEA44,0x00000557,0x00002CDD,0xFFFFEA44,0x00000557},
- {"0000001000010011111010101001010011011110001000000000100100000100",0x000021EC,0xFFFFF4F4,0x0000038F,0x00001511,0xFFFFFBF5,0x0000029E,0x00001511,0xFFFFFBF5,0x0000029E},
- {"0000001000010011111100001111111111101111011000000001000010100100",0x00003C8A,0xFFFFE1C1,0x00000685,0x000019C7,0xFFFFF7E2,0x00000301,0x000019C7,0xFFFFF7E2,0x00000301},
- {"0000001000010011111100001111111111101111010111100010000001100100",0x00003908,0xFFFFE5C7,0x000005B3,0x00002793,0xFFFFEF46,0x00000465,0x00002793,0xFFFFEF46,0x00000465},
- {"0000001000010011111100001111111111101111011000000101000100000100",0x000040A3,0xFFFFDE61,0x00000725,0x00002077,0xFFFFF2CE,0x000003E8,0x00002077,0xFFFFF2CE,0x000003E8},
- {"0000001000010011111100001111111111101111011001100100000101000100",0x00003DCA,0xFFFFE34D,0x00000608,0x00002D66,0xFFFFEBDF,0x000004E8,0x00002D66,0xFFFFEBDF,0x000004E8},
- {"0000001000010011111100001111111111101111010111100101000011000100",0x00003085,0xFFFFEB70,0x000004C8,0x000029B1,0xFFFFEDD9,0x000004A5,0x000029B1,0xFFFFEDD9,0x000004A5},
- {"0000001000010011111010101001010011011110000010000011100010000100",0x00004C73,0xFFFFD676,0x0000086C,0x0000280A,0xFFFFED89,0x000004C2,0x0000280A,0xFFFFED89,0x000004C2},
- {"0000001000010011111010101001010011011110001001000010000101100100",0x00002CE5,0xFFFFEE8C,0x00000466,0x00001755,0xFFFFFAC2,0x000002AC,0x00001755,0xFFFFFAC2,0x000002AC},
- {"0000001000010011111100001111111111101111011000100001000100100100",0x0000489F,0xFFFFDBF1,0x0000073E,0x0000332D,0xFFFFE786,0x000005AD,0x0000332D,0xFFFFE786,0x000005AD},
- {"0000001000010011111100001111111111101111011000000010100001100100",0x00003D09,0xFFFFE193,0x00000689,0x00001E82,0xFFFFF4C0,0x00000386,0x00001E82,0xFFFFF4C0,0x00000386},
- {"0000001000010011111100001111111111101111011001000100000100000100",0x00003E4C,0xFFFFE131,0x00000689,0x00002F4E,0xFFFFE925,0x0000057B,0x00002F4E,0xFFFFE925,0x0000057B},
- {"0000001000010011111100001111111111101111010110100100000010000100",0x00003B31,0xFFFFE53F,0x000005B3,0x0000248A,0xFFFFF211,0x000003DF,0x0000248A,0xFFFFF211,0x000003DF},
- {"0000001000010011111100001111111111101111011001000100000100100100",0x000038DD,0xFFFFE54A,0x000005C9,0x00002B6D,0xFFFFEBDF,0x00000502,0x00002B6D,0xFFFFEBDF,0x00000502},
- {"0000001000010011111100001111111111101111011010000100000001100100",0x00002698,0xFFFFF1A8,0x000003F2,0x00002163,0xFFFFF34B,0x000003E3,0x00002163,0xFFFFF34B,0x000003E3},
- {"0000001000010011111010101001010011011110001000000001000001100100",0x000023A8,0xFFFFF4CD,0x00000386,0x00001944,0xFFFFF983,0x00000300,0x00001944,0xFFFFF983,0x00000300},
- {"0000001000010011111100001111111111101111011001000001100011000100",0x00003EAF,0xFFFFE0C3,0x000006A0,0x000030AB,0xFFFFE829,0x000005A6,0x000030AB,0xFFFFE829,0x000005A6},
- {"0000001000010011111100001111111111101111011010000100100101000100",0x00002E89,0xFFFFECA6,0x000004B6,0x00001FA0,0xFFFFF4A8,0x000003A3,0x00001FA0,0xFFFFF4A8,0x000003A3},
- {"0000001000010011111100001111111111101111011010000010100010100100",0x000028A4,0xFFFFF112,0x00000402,0x00001F7C,0xFFFFF545,0x0000038A,0x00001F7C,0xFFFFF545,0x0000038A},
- {"0000001000010011111100001111111111101111011001100101000010100100",0x00004135,0xFFFFDFA2,0x000006C5,0x0000324C,0xFFFFE7AA,0x000005AF,0x0000324C,0xFFFFE7AA,0x000005AF},
- {"0000001000010011111010101001010011011110001000000011100011000100",0x00002012,0xFFFFF693,0x00000352,0x0000171F,0xFFFFFABB,0x000002D9,0x0000171F,0xFFFFFABB,0x000002D9},
- {"0000001000010011111100001111111111101111011001000011000010000100",0x00003D7C,0xFFFFE1BC,0x00000671,0x00002A45,0xFFFFEC84,0x000004EC,0x00002A45,0xFFFFEC84,0x000004EC},
- {"0000001000010011111100001111111111101111011100100011000001100100",0x00004172,0xFFFFDF58,0x000006DA,0x00002504,0xFFFFF0A6,0x00000431,0x00002504,0xFFFFF0A6,0x00000431},
- {"0000001000010011111100001111111010011001001010000001100101000100",0x000029C7,0xFFFFF087,0x00000414,0x00001DCB,0xFFFFF675,0x0000035F,0x00001DCB,0xFFFFF675,0x0000035F},
- {"0000001000010011111100001111111010011001001010100010100110100100",0x000027F0,0xFFFFF05A,0x00000432,0x00001707,0xFFFFFA0E,0x000002D1,0x00001707,0xFFFFFA0E,0x000002D1},
- {"0000001000010011111100001111111010011001001000100010000101000100",0x00003279,0xFFFFE9F7,0x00000511,0x00001B5E,0xFFFFF787,0x00000317,0x00001B5E,0xFFFFF787,0x00000317},
- {"0000001000010011111100001111111010011001001100100010000110000100",0x000030A5,0xFFFFEABC,0x000004FF,0x000019D1,0xFFFFF83C,0x00000304,0x000019D1,0xFFFFF83C,0x00000304},
- {"0000001000010011111100001111111010011001001010000010100001000100",0x0000283B,0xFFFFF122,0x00000402,0x000019C2,0xFFFFF8E9,0x000002FB,0x000019C2,0xFFFFF8E9,0x000002FB},
- {"0000001000010011111100001111111010011001001011000010000010000100",0x00003376,0xFFFFE9E1,0x00000510,0x000021A7,0xFFFFF39F,0x000003BF,0x000021A7,0xFFFFF39F,0x000003BF},
- {"0000001000010011111100001111111010011001001100100001100011000100",0x000031D2,0xFFFFEA9C,0x000004FC,0x00001F66,0xFFFFF4E4,0x00000390,0x00001F66,0xFFFFF4E4,0x00000390},
- {"0000001000010011111100001111111010011001000110100011100001100100",0x00003006,0xFFFFEB18,0x000004F2,0x000019B3,0xFFFFF84E,0x00000301,0x000019B3,0xFFFFF84E,0x00000301},
- {"0000001000010011111100001111111010011001001100000011100110100100",0x0000364F,0xFFFFE81F,0x00000556,0x00002AC9,0xFFFFED87,0x000004BD,0x00002AC9,0xFFFFED87,0x000004BD},
- {"0000001000010011111100001111111010011001001011100011100001000100",0x00003043,0xFFFFEBAE,0x000004CD,0x00001B0C,0xFFFFF7ED,0x0000030C,0x00001B0C,0xFFFFF7ED,0x0000030C},
- {"0000001000010011111100001111111010011001001100000100100010100100",0x000037CE,0xFFFFE69E,0x00000596,0x0000276B,0xFFFFEF65,0x0000046E,0x0000276B,0xFFFFEF65,0x0000046E},
- {"0000001000010011111100001111111010011001001011000011000100000100",0x00003063,0xFFFFED5E,0x0000046F,0x000024AE,0xFFFFF2C4,0x000003D8,0x000024AE,0xFFFFF2C4,0x000003D8},
- {"0000001000010011111100001111111010011001001011100000100010100100",0x00002F5D,0xFFFFEBDC,0x000004D3,0x00001EDB,0xFFFFF50F,0x0000038E,0x00001EDB,0xFFFFF50F,0x0000038E},
- {"0000001000010011111100001111111010011001001011100100100010100100",0x00003148,0xFFFFEA9A,0x000004FB,0x0000192D,0xFFFFF8E9,0x000002DF,0x0000192D,0xFFFFF8E9,0x000002DF},
- {"0000001000010011111100001111111010011001001011000010000001100100",0x00003682,0xFFFFE7E4,0x0000055C,0x0000250E,0xFFFFF150,0x0000041A,0x0000250E,0xFFFFF150,0x0000041A},
- {"0000001000010011111100001111111010011001001010100010000010000100",0x0000284E,0xFFFFF15A,0x000003F8,0x00001CE2,0xFFFFF6F9,0x0000034F,0x00001CE2,0xFFFFF6F9,0x0000034F},
- {"0000001000010011111100001111111010011001001100000001100010100100",0x00003171,0xFFFFEAE9,0x000004ED,0x00001F40,0xFFFFF513,0x00000384,0x00001F40,0xFFFFF513,0x00000384},
- {"0000001000010011111100001111111010011001001100100011000001000100",0x000031BD,0xFFFFEA64,0x0000050A,0x00001EFD,0xFFFFF4F7,0x00000390,0x00001EFD,0xFFFFF4F7,0x00000390},
- {"0000001000010011111100001111111010011001001011100101000011100100",0x00003050,0xFFFFEB29,0x000004EA,0x000019B3,0xFFFFF878,0x000002F9,0x000019B3,0xFFFFF878,0x000002F9},
- {"0000001000010011111100001111111010011001001011000001100100000100",0x00003400,0xFFFFE9A0,0x0000051A,0x00002460,0xFFFFF1DA,0x00000409,0x00002460,0xFFFFF1DA,0x00000409},
- {"0000001000010011111100001111111010011001001011000100100010000100",0x000034A1,0xFFFFE86F,0x00000558,0x0000255D,0xFFFFF09E,0x00000443,0x0000255D,0xFFFFF09E,0x00000443},
- {"0000001000010011111100001111111010011001001011100100100011100100",0x00003103,0xFFFFEAD7,0x000004F0,0x00001896,0xFFFFF95A,0x000002CC,0x00001896,0xFFFFF95A,0x000002CC},
- {"0000001000010011111100001111111010011001001100000001100011100100",0x00003120,0xFFFFEB9E,0x000004CB,0x000021E8,0xFFFFF3A2,0x000003C1,0x000021E8,0xFFFFF3A2,0x000003C1},
- {"0000001000010011111100001111111010011001000111000101000011100100",0x00003558,0xFFFFE812,0x00000565,0x0000256E,0xFFFFF097,0x00000447,0x0000256E,0xFFFFF097,0x00000447},
- {"0000001000010011111100001111111010011001000110100010100001000100",0x00002DA8,0xFFFFECA8,0x000004B7,0x0000180B,0xFFFFF96D,0x000002D8,0x0000180B,0xFFFFF96D,0x000002D8},
- {"0000001000010011111100001111111010011001001011100011000001100100",0x00003232,0xFFFFEA66,0x000004FF,0x00001DDE,0xFFFFF5FE,0x0000035A,0x00001DDE,0xFFFFF5FE,0x0000035A},
- {"0000001000010011111100001111111010011001001100000101000011100100",0x000034D2,0xFFFFE89F,0x00000548,0x0000246C,0xFFFFF17F,0x00000418,0x0000246C,0xFFFFF17F,0x00000418},
- {"0000001000010011111100001111111010011001001100000100100100000100",0x000033EC,0xFFFFE954,0x0000052A,0x00002323,0xFFFFF279,0x000003EE,0x00002323,0xFFFFF279,0x000003EE},
- {"0000001000010011111100001111111010011001001100000011100010000100",0x000033AA,0xFFFFE955,0x0000052D,0x0000229F,0xFFFFF2B2,0x000003E7,0x0000229F,0xFFFFF2B2,0x000003E7},
- {"0000001000010011111100001111111010011001001100100100100101100100",0x00003258,0xFFFFE9AA,0x0000052A,0x00001D5F,0xFFFFF5D1,0x0000036B,0x00001D5F,0xFFFFF5D1,0x0000036B},
- {"0000001000010011111100001111111010011001001100000010100110100100",0x0000323A,0xFFFFEA5F,0x00000504,0x00002108,0xFFFFF3D5,0x000003BA,0x00002108,0xFFFFF3D5,0x000003BA},
- {"0000001000010011111100001111111010011001001011000010000110000100",0x00003216,0xFFFFEA6B,0x000004FF,0x00001D6E,0xFFFFF640,0x00000350,0x00001D6E,0xFFFFF640,0x00000350},
- {"0000001000010011111100001111111010011001001100100001000011100100",0x000030C5,0xFFFFEAC4,0x000004FC,0x00001924,0xFFFFF8C2,0x000002EE,0x00001924,0xFFFFF8C2,0x000002EE},
- {"0000001000010011111100001111111010011001001100000101000100000100",0x000032BB,0xFFFFE9F1,0x00000515,0x00002211,0xFFFFF31B,0x000003D5,0x00002211,0xFFFFF31B,0x000003D5},
- {"0000001000010011111100001111111010011001001100000100100011000100",0x0000352C,0xFFFFE85B,0x00000553,0x00002410,0xFFFFF1B4,0x0000040F,0x00002410,0xFFFFF1B4,0x0000040F},
- {"0000001000010011111100001111111010011001001000100011100011000100",0x000036A0,0xFFFFE7E8,0x0000055D,0x00002901,0xFFFFEEB8,0x00000489,0x00002901,0xFFFFEEB8,0x00000489},
- {"0000001000010011111100001111111010011001001011000011000001000100",0x00003340,0xFFFFE9D9,0x00000516,0x00002332,0xFFFFF27A,0x000003F0,0x00002332,0xFFFFF27A,0x000003F0},
- {"0000001000010011111100001111111010011001000110100011100010100100",0x00003564,0xFFFFE86D,0x0000054E,0x00002613,0xFFFFF07C,0x00000444,0x00002613,0xFFFFF07C,0x00000444},
- {"0000001000010011111100001111111010011001001010000000100100000100",0x00002AD1,0xFFFFEF0B,0x0000045C,0x00001DEA,0xFFFFF5C8,0x00000381,0x00001DEA,0xFFFFF5C8,0x00000381},
- {"0000001000010011111100001111111010011001001000100010000011100100",0x000035B0,0xFFFFE846,0x00000555,0x000027BE,0xFFFFEF5D,0x00000474,0x000027BE,0xFFFFEF5D,0x00000474},
- {"0000001000010011111100001111111010011001001000100011100010100100",0x000032C4,0xFFFFEA48,0x00000502,0x000022C6,0xFFFFF2DF,0x000003DE,0x000022C6,0xFFFFF2DF,0x000003DE},
- {"0000001000010011111100001111111010011001001100000000100011000100",0x00003036,0xFFFFEB0D,0x000004F9,0x00001FE8,0xFFFFF41A,0x000003BC,0x00001FE8,0xFFFFF41A,0x000003BC},
- {"0000001000010011111100001111111010011001000110100000100100000100",0x000030F8,0xFFFFEA13,0x00000524,0x00001B6A,0xFFFFF6C9,0x0000034A,0x00001B6A,0xFFFFF6C9,0x0000034A},
- {"0000001000010011111100001111111010011001001100000001000010100100",0x00002EE2,0xFFFFEC0C,0x000004CB,0x00001A39,0xFFFFF814,0x0000030F,0x00001A39,0xFFFFF814,0x0000030F},
- {"0000001000010011111100001111111010011001000111000011000110000100",0x00003457,0xFFFFE924,0x0000052A,0x00001E9D,0xFFFFF59C,0x00000363,0x00001E9D,0xFFFFF59C,0x00000363},
- {"0000001000010011111100001111111010011001001100100010100001000100",0x000030BF,0xFFFFEB18,0x000004ED,0x00001D37,0xFFFFF636,0x0000035C,0x00001D37,0xFFFFF636,0x0000035C},
- {"0000001000010011111100001111111010011001001011100100000010000100",0x000031AF,0xFFFFEA75,0x000004FE,0x000019F2,0xFFFFF87A,0x000002F0,0x000019F2,0xFFFFF87A,0x000002F0},
- {"0000001000010011111100001111111010011001001100000010100010000100",0x00003642,0xFFFFE85B,0x00000547,0x00002975,0xFFFFEE98,0x0000048B,0x00002975,0xFFFFEE98,0x0000048B},
- {"0000001000010011111100001111111010011001001011100010100010000100",0x00002E8B,0xFFFFED1E,0x0000048E,0x000019C1,0xFFFFF917,0x000002D6,0x000019C1,0xFFFFF917,0x000002D6},
- {"0000001000010011111100001111111010011001001100100100000110100100",0x000033D9,0xFFFFE8E1,0x00000548,0x0000224B,0xFFFFF298,0x000003F4,0x0000224B,0xFFFFF298,0x000003F4},
- {"0000001000010011111100001111111010011001001011100010100011000100",0x000032BC,0xFFFFEB0F,0x000004D6,0x00002488,0xFFFFF240,0x000003F2,0x00002488,0xFFFFF240,0x000003F2},
- {"0000001000010011111100001111111010011001001100000100100101000100",0x000035FD,0xFFFFE838,0x00000553,0x00002762,0xFFFFEFBC,0x00000460,0x00002762,0xFFFFEFBC,0x00000460},
- {"0000001000010011111100001111111010011001001010000001100010100100",0x0000268B,0xFFFFF263,0x000003D1,0x00001914,0xFFFFF977,0x000002E8,0x00001914,0xFFFFF977,0x000002E8},
- {"0000001000010011111100001111111010011001001011000011000110000100",0x0000330B,0xFFFFEA1E,0x00000505,0x000020B1,0xFFFFF44D,0x0000039E,0x000020B1,0xFFFFF44D,0x0000039E},
- {"0000001000010011111100001111111010011001001000100010000010000100",0x0000326E,0xFFFFEA26,0x00000508,0x00001C17,0xFFFFF722,0x00000328,0x00001C17,0xFFFFF722,0x00000328},
- {"0000001000010011111100001111111010011001001010100011000110100100",0x00002A3F,0xFFFFEEE8,0x0000046D,0x00001B2B,0xFFFFF737,0x0000034D,0x00001B2B,0xFFFFF737,0x0000034D},
- {"0000001000010011111100001111111010011001001011000100000001100100",0x00003732,0xFFFFE765,0x00000574,0x00002A6D,0xFFFFEDA8,0x000004B7,0x00002A6D,0xFFFFEDA8,0x000004B7},
- {"0000001000010011111100001111111010011001001100000000100100100100",0x000034D3,0xFFFFE827,0x00000569,0x000027AA,0xFFFFEEE7,0x00000492,0x000027AA,0xFFFFEEE7,0x00000492},
- {"0000001000010011111100001111111010011001001011100100000011000100",0x00003306,0xFFFFEA39,0x000004FC,0x00001DCC,0xFFFFF655,0x00000344,0x00001DCC,0xFFFFF655,0x00000344},
- {"0000001000010011111100001111111010011001001010000010000001000100",0x00002A48,0xFFFFEFCA,0x00000439,0x00001DED,0xFFFFF60D,0x00000375,0x00001DED,0xFFFFF60D,0x00000375},
- {"0000001000010011111100001111111010011001001100000011100011000100",0x000033A3,0xFFFFEA36,0x000004F9,0x0000247C,0xFFFFF21F,0x000003F4,0x0000247C,0xFFFFF21F,0x000003F4},
- {"0000001000010011111100001111111010011001001011000011000101100100",0x0000311B,0xFFFFEB76,0x000004D1,0x00001EB1,0xFFFFF5B6,0x00000366,0x00001EB1,0xFFFFF5B6,0x00000366},
- {"0000001000010011111100001111111010011001001100100100000101100100",0x00003307,0xFFFFE97F,0x0000052A,0x00001E76,0xFFFFF54D,0x0000037C,0x00001E76,0xFFFFF54D,0x0000037C},
- {"0000001000010011111100001111111010011001000111000010000101000100",0x0000344B,0xFFFFE9C5,0x00000509,0x000020D6,0xFFFFF486,0x0000038F,0x000020D6,0xFFFFF486,0x0000038F},
- {"0000001000010011111100001111111010011001001011000011000101000100",0x000034B9,0xFFFFEA0B,0x000004F7,0x000027B3,0xFFFFF057,0x0000043A,0x000027B3,0xFFFFF057,0x0000043A},
- {"0000001000010011111100001111111010011001001100000001100101100100",0x00003360,0xFFFFE984,0x00000527,0x00002238,0xFFFFF2EE,0x000003E0,0x00002238,0xFFFFF2EE,0x000003E0},
- {"0000001000010011111100001111111010011001001100000010000100100100",0x0000315C,0xFFFFEC05,0x000004B1,0x000023C8,0xFFFFF2CC,0x000003DE,0x000023C8,0xFFFFF2CC,0x000003DE},
- {"0000001000010011111100001111111010011001001011000010100001100100",0x0000389B,0xFFFFE6D5,0x00000582,0x00002C6C,0xFFFFEC92,0x000004DE,0x00002C6C,0xFFFFEC92,0x000004DE},
- {"0000001000010011111100001111111010011001001011100001000100100100",0x00003058,0xFFFFEB30,0x000004E6,0x000019B5,0xFFFFF88B,0x000002F1,0x000019B5,0xFFFFF88B,0x000002F1},
- {"0000001000010011111100001111111010011001001011100000100100000100",0x00002F69,0xFFFFEB4A,0x000004F1,0x00001B82,0xFFFFF6EC,0x00000341,0x00001B82,0xFFFFF6EC,0x00000341},
- {"0000001000010011111100001111111010011001000110100001100011100100",0x000031EB,0xFFFFEA64,0x00000508,0x00002059,0xFFFFF427,0x000003B0,0x00002059,0xFFFFF427,0x000003B0},
- {"0000001000010011111100001111111010011001001000100100000100100100",0x000033E2,0xFFFFE94D,0x0000052A,0x000020BF,0xFFFFF40B,0x000003AB,0x000020BF,0xFFFFF40B,0x000003AB},
- {"0000001000010011111100001111111010011001001010000011000110000100",0x00002AF9,0xFFFFEFE9,0x00000427,0x00001F72,0xFFFFF57A,0x00000383,0x00001F72,0xFFFFF57A,0x00000383},
- {"0000001000010011111100001111111010011001001011000010100000100100",0x00003282,0xFFFFEA88,0x000004FA,0x00002561,0xFFFFF126,0x0000042B,0x00002561,0xFFFFF126,0x0000042B},
- {"0000001000010011111100001111111010011001001100000001000011100100",0x0000308A,0xFFFFEB5D,0x000004E0,0x00001E83,0xFFFFF577,0x00000378,0x00001E83,0xFFFFF577,0x00000378},
- {"0000001000010011111100001111111010011001001100100100100010000100",0x0000336E,0xFFFFE8C8,0x00000553,0x0000217C,0xFFFFF2E1,0x000003EB,0x0000217C,0xFFFFF2E1,0x000003EB},
- {"0000001000010011111100001111111010011001000110100010000101100100",0x000034A9,0xFFFFE838,0x00000561,0x000020CE,0xFFFFF38A,0x000003C7,0x000020CE,0xFFFFF38A,0x000003C7},
- {"0000001000010011111100001111111010011001001000100010000110000100",0x00003152,0xFFFFE9EB,0x00000522,0x00001755,0xFFFFF9A9,0x000002C6,0x00001755,0xFFFFF9A9,0x000002C6},
- {"0000001000010011111100001111111010011001001010000001100010000100",0x0000286E,0xFFFFF136,0x000003FD,0x00001BAB,0xFFFFF7C3,0x0000032C,0x00001BAB,0xFFFFF7C3,0x0000032C},
- {"0000001000010011111100001111111010011001001100000000100101000100",0x0000316B,0xFFFFEA02,0x00000528,0x00002247,0xFFFFF24E,0x00000408,0x00002247,0xFFFFF24E,0x00000408},
- {"0000001000010011111100001111111010011001001011000000100011100100",0x000034CF,0xFFFFE83D,0x00000562,0x00002458,0xFFFFF130,0x00000430,0x00002458,0xFFFFF130,0x00000430},
- {"0000001000010011111100001111111010011001001011000010100110000100",0x00003352,0xFFFFE9D1,0x00000515,0x0000212A,0xFFFFF3DC,0x000003B4,0x0000212A,0xFFFFF3DC,0x000003B4},
- {"0000001000010011111100001111111010011001001010000100000010100100",0x00002946,0xFFFFF09B,0x00000415,0x00001DC9,0xFFFFF650,0x00000366,0x00001DC9,0xFFFFF650,0x00000366},
- {"0000001000010011111100001111111010011001001100000001000100100100",0x00003080,0xFFFFEB47,0x000004E1,0x00001BD5,0xFFFFF73B,0x00000329,0x00001BD5,0xFFFFF73B,0x00000329},
- {"0000001000010011111100001111111010011001000110100001100010000100",0x00002FBD,0xFFFFEB7B,0x000004DD,0x000017FC,0xFFFFF99E,0x000002C7,0x000017FC,0xFFFFF99E,0x000002C7},
- {"0000001000010011111100001111111010011001001010000001000100100100",0x00002A28,0xFFFFF032,0x0000041F,0x00001B19,0xFFFFF83A,0x00000312,0x00001B19,0xFFFFF83A,0x00000312},
- {"0000001000010011111100001111111010011001001000100100000011000100",0x00003420,0xFFFFE936,0x00000530,0x000023C2,0xFFFFF203,0x00000406,0x000023C2,0xFFFFF203,0x00000406},
- {"0000001000010011111100001111111010011001001100000001000101000100",0x00002F7C,0xFFFFEBBA,0x000004D1,0x0000185D,0xFFFFF975,0x000002CA,0x0000185D,0xFFFFF975,0x000002CA},
- {"0000001000010011111100001111111010011001001011100010000001000100",0x00002C51,0xFFFFEE3B,0x0000046F,0x000019AA,0xFFFFF8DD,0x000002ED,0x000019AA,0xFFFFF8DD,0x000002ED},
- {"0000001000010011111100001111111010011001000110100100000101000100",0x000033D6,0xFFFFE8F2,0x0000053D,0x00001D73,0xFFFFF5FB,0x0000035B,0x00001D73,0xFFFFF5FB,0x0000035B},
- {"0000001000010011111100001111111010011001001100100011000010000100",0x000031D9,0xFFFFEAF7,0x000004E4,0x00001EBD,0xFFFFF5A6,0x00000368,0x00001EBD,0xFFFFF5A6,0x00000368},
- {"0000001000010011111100001111111010011001000110100010000010100100",0x00003386,0xFFFFE9CE,0x00000515,0x00002422,0xFFFFF1F3,0x00000405,0x00002422,0xFFFFF1F3,0x00000405},
- {"0000001000010011111100001111111010011001001011000101000011100100",0x000032FB,0xFFFFE9BC,0x00000520,0x00002301,0xFFFFF267,0x000003F7,0x00002301,0xFFFFF267,0x000003F7},
- {"0000001000010011111100001111111010011001001100100010100100100100",0x000032C2,0xFFFFEAC0,0x000004EA,0x0000250F,0xFFFFF1A2,0x00000413,0x0000250F,0xFFFFF1A2,0x00000413},
- {"0000001000010011111100001111111010011001000111000010100101000100",0x00003722,0xFFFFE8A6,0x00000527,0x000026E4,0xFFFFF0F5,0x0000041C,0x000026E4,0xFFFFF0F5,0x0000041C},
- {"0000001000010011111100001111111010011001001011000100100011000100",0x000035A4,0xFFFFE822,0x00000558,0x000022F2,0xFFFFF288,0x000003E8,0x000022F2,0xFFFFF288,0x000003E8},
- {"0000001000010011111100001111111010011001001010000000100100100100",0x00002CD1,0xFFFFEDC6,0x0000048C,0x00001EAF,0xFFFFF53D,0x00000396,0x00001EAF,0xFFFFF53D,0x00000396},
- {"0000001000010011111100001111111010011001001100000001000101100100",0x00003156,0xFFFFEA60,0x0000050B,0x00001BBC,0xFFFFF704,0x00000335,0x00001BBC,0xFFFFF704,0x00000335},
- {"0000001000010011111100001111111010011001001011000101000100000100",0x000034A1,0xFFFFE8C0,0x00000544,0x00002528,0xFFFFF105,0x0000042C,0x00002528,0xFFFFF105,0x0000042C},
- {"0000001000010011111100001111111010011001001100100011000001100100",0x000032CE,0xFFFFE9D3,0x00000520,0x000021FF,0xFFFFF2FD,0x000003E4,0x000021FF,0xFFFFF2FD,0x000003E4},
- {"0000001000010011111100001111111010011001000110100101000010100100",0x000034A0,0xFFFFE823,0x0000056D,0x0000256F,0xFFFFF047,0x0000045A,0x0000256F,0xFFFFF047,0x0000045A},
- {"0000001000010011111100001111111010011001001100000011100101000100",0x00003109,0xFFFFEBD6,0x000004BF,0x000022D4,0xFFFFF32D,0x000003D0,0x000022D4,0xFFFFF32D,0x000003D0},
- {"0000001000010011111100001111111010011001001011000001000101100100",0x000030B7,0xFFFFEAF0,0x000004F3,0x00001AEC,0xFFFFF7A9,0x0000031B,0x00001AEC,0xFFFFF7A9,0x0000031B},
- {"0000001000010011111100001111111010011001001011000011100110100100",0x00003078,0xFFFFEBA4,0x000004CF,0x00001E7A,0xFFFFF5AF,0x0000036B,0x00001E7A,0xFFFFF5AF,0x0000036B},
- {"0000001000010011111100001111111010011001001100000100000100100100",0x00003442,0xFFFFE998,0x00000518,0x000025EA,0xFFFFF0F3,0x0000042B,0x000025EA,0xFFFFF0F3,0x0000042B},
- {"0000001000010011111100001111111010011001001100000010000110100100",0x000031CB,0xFFFFEA80,0x00000501,0x000020A3,0xFFFFF403,0x000003B2,0x000020A3,0xFFFFF403,0x000003B2},
- {"0000001000010011111100001111111010011001001010100010100110000100",0x00002947,0xFFFFF018,0x00000433,0x00001BA5,0xFFFFF75C,0x00000340,0x00001BA5,0xFFFFF75C,0x00000340},
- {"0000001000010011111100001111111010011001001011000011100110000100",0x000033F9,0xFFFFE99D,0x00000518,0x00002231,0xFFFFF358,0x000003C5,0x00002231,0xFFFFF358,0x000003C5},
- {"0000001000010011111100001111111010011001001100100001000100100100",0x00003131,0xFFFFEA45,0x00000513,0x00001973,0xFFFFF85E,0x00000301,0x00001973,0xFFFFF85E,0x00000301},
- {"0000001000010011111100001111111010011001000111000010100110100100",0x00003571,0xFFFFE8AC,0x00000539,0x00002049,0xFFFFF49C,0x0000038D,0x00002049,0xFFFFF49C,0x0000038D},
- {"0000001000010011111100001111111010011001001011100011100001100100",0x0000309E,0xFFFFEB1D,0x000004E8,0x000019ED,0xFFFFF86E,0x000002F8,0x000019ED,0xFFFFF86E,0x000002F8},
- {"0000001000010011111100001111111010011001001100000010100110000100",0x00003091,0xFFFFEB9B,0x000004CC,0x00001D2C,0xFFFFF6A2,0x0000033D,0x00001D2C,0xFFFFF6A2,0x0000033D},
- {"0000001000010011111100001111111010011001001100000000100011100100",0x00003069,0xFFFFEAFD,0x000004F8,0x00001E82,0xFFFFF51C,0x0000038D,0x00001E82,0xFFFFF51C,0x0000038D},
- {"0000001000010011111100001111111010011001001000100001000010100100",0x00003459,0xFFFFE7F2,0x00000572,0x00001DA7,0xFFFFF552,0x0000037F,0x00001DA7,0xFFFFF552,0x0000037F},
- {"0000001000010011111100001111111010011001001100100001000100000100",0x0000304B,0xFFFFEAFB,0x000004F4,0x0000191E,0xFFFFF8BD,0x000002EE,0x0000191E,0xFFFFF8BD,0x000002EE},
- {"0000001000010011111100001111111010011001001100000010000011000100",0x0000346E,0xFFFFEA07,0x000004FD,0x00002767,0xFFFFF058,0x00000440,0x00002767,0xFFFFF058,0x00000440},
- {"0000001000010011111100001111111010011001001011100011000010000100",0x000030B5,0xFFFFEBC1,0x000004C1,0x00001B3C,0xFFFFF818,0x000002FD,0x00001B3C,0xFFFFF818,0x000002FD},
- {"0000001000010011111100001111111010011001001100000000100100000100",0x0000321F,0xFFFFE9EA,0x00000524,0x00002380,0xFFFFF1C2,0x0000041A,0x00002380,0xFFFFF1C2,0x0000041A},
- {"0000001000010011111100001111111010011001001011100011000001000100",0x000030DF,0xFFFFEB37,0x000004E2,0x00001E3C,0xFFFFF5BB,0x00000369,0x00001E3C,0xFFFFF5BB,0x00000369},
- {"0000001000010011111100001111111010011001001010000100100010100100",0x000027E0,0xFFFFF0E2,0x00000416,0x00001A6A,0xFFFFF820,0x00000321,0x00001A6A,0xFFFFF820,0x00000321},
- {"0000001000010011111100001111111010011001000110100001000010000100",0x00002FA1,0xFFFFEB63,0x000004E7,0x0000196B,0xFFFFF880,0x000002FB,0x0000196B,0xFFFFF880,0x000002FB},
- {"0000001000010011111100001111111010011001000111000001000010000100",0x0000310C,0xFFFFEAAF,0x000004FC,0x000019EF,0xFFFFF850,0x000002FD,0x000019EF,0xFFFFF850,0x000002FD},
- {"0000001000010011111100001111111010011001001100100011100100000100",0x0000334A,0xFFFFEA07,0x0000050B,0x00002380,0xFFFFF26F,0x000003F0,0x00002380,0xFFFFF26F,0x000003F0},
- {"0000001000010011111100001111111010011001001100000010100101000100",0x00002FF9,0xFFFFECDC,0x00000492,0x00002297,0xFFFFF394,0x000003BF,0x00002297,0xFFFFF394,0x000003BF},
- {"0000001000010011111100001111111010011001001011000010000101100100",0x0000354B,0xFFFFE894,0x00000546,0x000024C4,0xFFFFF16C,0x0000041B,0x000024C4,0xFFFFF16C,0x0000041B},
- {"0000001000010011111100001111111010011001001000100000100100100100",0x00003245,0xFFFFE92F,0x00000544,0x00001829,0xFFFFF8F1,0x000002EA,0x00001829,0xFFFFF8F1,0x000002EA},
- {"0000001000010011111100001111111010011001001011100100100010000100",0x0000302F,0xFFFFEB51,0x000004E3,0x0000199F,0xFFFFF894,0x000002F4,0x0000199F,0xFFFFF894,0x000002F4},
- {"0000001000010011111100001111111010011001001011100001100011000100",0x00002F54,0xFFFFEC86,0x000004A6,0x00001A6F,0xFFFFF891,0x000002EC,0x00001A6F,0xFFFFF891,0x000002EC},
- {"0000001000010011111100001111111010011001001010000100000101100100",0x00002908,0xFFFFF0D8,0x0000040A,0x00001C9B,0xFFFFF729,0x00000342,0x00001C9B,0xFFFFF729,0x00000342},
- {"0000001000010011111100001111111010011001001100000010100101100100",0x000031D9,0xFFFFEB40,0x000004D7,0x000023F5,0xFFFFF259,0x000003F4,0x000023F5,0xFFFFF259,0x000003F4},
- {"0000001000010011111100001111111010011001001100000100100011100100",0x000034C8,0xFFFFE8C6,0x0000053F,0x00002313,0xFFFFF280,0x000003EC,0x00002313,0xFFFFF280,0x000003EC},
- {"0000001000010011111100001111111010011001001100000101000011000100",0x000037D1,0xFFFFE6A1,0x0000059C,0x00002C6A,0xFFFFEBFF,0x00000504,0x00002C6A,0xFFFFEBFF,0x00000504},
- {"0000001000010011111100001111111010011001001100100001100101100100",0x000030E9,0xFFFFEA6B,0x0000050F,0x00001A2D,0xFFFFF7DF,0x00000316,0x00001A2D,0xFFFFF7DF,0x00000316},
- {"0000001000010011111100001111111010011001001100000010000010000100",0x0000323D,0xFFFFEA95,0x000004F4,0x00001ED2,0xFFFFF584,0x0000036C,0x00001ED2,0xFFFFF584,0x0000036C},
- {"0000001000010011111100001111111010011001001011000011000000100100",0x000033D6,0xFFFFE9DB,0x00000510,0x000027A7,0xFFFFEFC7,0x0000045E,0x000027A7,0xFFFFEFC7,0x0000045E},
- {"0000001000010011111100001111111010011001000111000011000101100100",0x00003444,0xFFFFE98A,0x00000517,0x000020FD,0xFFFFF43F,0x0000039D,0x000020FD,0xFFFFF43F,0x0000039D},
- {"0000001000010011111100001111111010011001001010000000100011100100",0x00002987,0xFFFFEFA1,0x0000044B,0x00001B06,0xFFFFF788,0x0000033C,0x00001B06,0xFFFFF788,0x0000033C},
- {"0000001000010011111100001111111010011001001011000010100011100100",0x0000311D,0xFFFFED20,0x00000474,0x000025DA,0xFFFFF223,0x000003F0,0x000025DA,0xFFFFF223,0x000003F0},
- {"0000001000010011111100001111111010011001001011000001000100100100",0x000032A2,0xFFFFEA0A,0x0000050D,0x00001D48,0xFFFFF659,0x0000034A,0x00001D48,0xFFFFF659,0x0000034A},
- {"0000001000010011111100001111111010011001001000100000100011100100",0x00003110,0xFFFFE9EA,0x00000529,0x00001786,0xFFFFF958,0x000002DB,0x00001786,0xFFFFF958,0x000002DB},
- {"0000001000010011111100001111111010011001001010000010000110100100",0x000027F2,0xFFFFF174,0x000003F7,0x00001C7A,0xFFFFF72A,0x00000348,0x00001C7A,0xFFFFF72A,0x00000348},
- {"0000001000010011111100001111111010011001000111000001000011100100",0x000031DB,0xFFFFEA7D,0x000004FB,0x000019C4,0xFFFFF8B1,0x000002E6,0x000019C4,0xFFFFF8B1,0x000002E6},
- {"0000001000010011111100001111111010011001001011000001000100000100",0x00003158,0xFFFFEAAC,0x000004FA,0x00001BC1,0xFFFFF737,0x0000032B,0x00001BC1,0xFFFFF737,0x0000032B},
- {"0000001000010011111100001111111010011001001100000001000011000100",0x00002F36,0xFFFFEBF9,0x000004CA,0x00001A2A,0xFFFFF83F,0x00000303,0x00001A2A,0xFFFFF83F,0x00000303},
- {"0000001000010011111100001111111010011001001100100011100010100100",0x000032B4,0xFFFFEA72,0x000004FA,0x000021FF,0xFFFFF378,0x000003C5,0x000021FF,0xFFFFF378,0x000003C5},
- {"0000001000010011111100001111111010011001001100000011000101100100",0x00003262,0xFFFFEAFA,0x000004DF,0x00002441,0xFFFFF237,0x000003F6,0x00002441,0xFFFFF237,0x000003F6},
- {"0000001000010011111100001111111010011001001100000011100100100100",0x0000336A,0xFFFFEAFB,0x000004D1,0x00002746,0xFFFFF0B8,0x0000042B,0x00002746,0xFFFFF0B8,0x0000042B},
- {"0000001000010011111100001111111010011001000110100100000010000100",0x000032E5,0xFFFFE923,0x00000541,0x00001DF0,0xFFFFF552,0x00000380,0x00001DF0,0xFFFFF552,0x00000380},
- {"0000001000010011111100001111111010011001001100000100000001100100",0x000035D1,0xFFFFE80B,0x0000055F,0x00002780,0xFFFFEF74,0x0000046F,0x00002780,0xFFFFEF74,0x0000046F},
- {"0000001000010011111100001111111010011001001100000010100010100100",0x000033EC,0xFFFFEA48,0x000004F4,0x0000269F,0xFFFFF0D8,0x0000042A,0x0000269F,0xFFFFF0D8,0x0000042A},
- {"0000001000010011111100001111111010011001001100100011100010000100",0x000030C4,0xFFFFEB39,0x000004E2,0x00001B44,0xFFFFF7AA,0x00000318,0x00001B44,0xFFFFF7AA,0x00000318},
- {"0000001000010011111100001111111010011001001010000001000101000100",0x00002926,0xFFFFF0AF,0x0000040E,0x0000194E,0xFFFFF959,0x000002E2,0x0000194E,0xFFFFF959,0x000002E2},
- {"0000001000010011111100001111111010011001001011000001000011000100",0x00003141,0xFFFFEAAF,0x000004F6,0x00001864,0xFFFFF97C,0x000002C6,0x00001864,0xFFFFF97C,0x000002C6},
- {"0000001000010011111100001111111010011001001100000001000001100100",0x000030B2,0xFFFFEB7C,0x000004DB,0x000022CE,0xFFFFF2B5,0x000003F0,0x000022CE,0xFFFFF2B5,0x000003F0},
- {"0000001000010011111100001111111010011001001100000001100101000100",0x0000318C,0xFFFFEAC7,0x000004F6,0x00002113,0xFFFFF3CA,0x000003BD,0x00002113,0xFFFFF3CA,0x000003BD},
- {"0000001000010011111100001111111010011001001011100001000100000100",0x00002FD2,0xFFFFEB8F,0x000004D9,0x00001996,0xFFFFF89F,0x000002F1,0x00001996,0xFFFFF89F,0x000002F1},
- {"0000001000010011111100001111111010011001000110100010100010100100",0x0000310D,0xFFFFEB25,0x000004E7,0x00001F67,0xFFFFF4EF,0x0000038E,0x00001F67,0xFFFFF4EF,0x0000038E},
- {"0000001000010011111100001111111010011001001010100100100101100100",0x00002BBC,0xFFFFEE68,0x00000477,0x00002050,0xFFFFF41D,0x000003C8,0x00002050,0xFFFFF41D,0x000003C8},
- {"0000001000010011111100001111111010011001001100000010000100000100",0x00003096,0xFFFFECED,0x00000486,0x000024C9,0xFFFFF278,0x000003E7,0x000024C9,0xFFFFF278,0x000003E7},
- {"0000001000010011111100001111111010011001001011000001000010100100",0x00003401,0xFFFFE8F1,0x0000053C,0x00001E75,0xFFFFF55C,0x00000376,0x00001E75,0xFFFFF55C,0x00000376},
- {"0000001000010011111100001111111010011001001100000010100001000100",0x0000319E,0xFFFFEAB1,0x000004F8,0x00001EA3,0xFFFFF567,0x00000378,0x00001EA3,0xFFFFF567,0x00000378},
- {"0000001000010011111100001111111010011001001100100010100101100100",0x000030FD,0xFFFFEB4C,0x000004DB,0x00001CA6,0xFFFFF6E8,0x00000335,0x00001CA6,0xFFFFF6E8,0x00000335},
- {"0000001000010011111100001111111010011001001011100100000010100100",0x000030D6,0xFFFFEB1A,0x000004E4,0x00001A0D,0xFFFFF87D,0x000002EF,0x00001A0D,0xFFFFF87D,0x000002EF},
- {"0000001000010011111100001111111010011001001011000010000100100100",0x0000324B,0xFFFFEB17,0x000004D9,0x00002225,0xFFFFF3A8,0x000003BA,0x00002225,0xFFFFF3A8,0x000003BA},
- {"0000001000010011111100001111111010011001001010000100000010000100",0x00002A00,0xFFFFF02E,0x00000424,0x00001E21,0xFFFFF61D,0x0000036C,0x00001E21,0xFFFFF61D,0x0000036C},
- {"0000001000010011111100001111111010011001001010100100100010100100",0x000029CF,0xFFFFEF53,0x00000457,0x00001B11,0xFFFFF772,0x0000033D,0x00001B11,0xFFFFF772,0x0000033D},
- {"0000001000010011111100001111111010011001000110100011000010100100",0x000032A1,0xFFFFEA63,0x000004FB,0x00001F83,0xFFFFF516,0x0000037E,0x00001F83,0xFFFFF516,0x0000037E},
- {"0000001000010011111100001111111010011001001011100010000011000100",0x0000305C,0xFFFFEC14,0x000004B5,0x00001D0B,0xFFFFF6ED,0x00000332,0x00001D0B,0xFFFFF6ED,0x00000332},
- {"0000001000010011111100001111111010011001001011000001000001100100",0x00003467,0xFFFFE8D5,0x00000543,0x0000243F,0xFFFFF190,0x00000418,0x0000243F,0xFFFFF190,0x00000418},
- {"0000001000010011111100001111111010011001001010100010000001100100",0x00002796,0xFFFFF133,0x00000409,0x00001903,0xFFFFF91C,0x000002FC,0x00001903,0xFFFFF91C,0x000002FC},
- {"0000001000010011111100001111111010011001001100000010000101100100",0x000031F6,0xFFFFEAB7,0x000004F5,0x000022B9,0xFFFFF2D0,0x000003E6,0x000022B9,0xFFFFF2D0,0x000003E6},
- {"0000001000010011111100001111111010011001001011100101000100000100",0x00003196,0xFFFFEA76,0x00000503,0x00001CC5,0xFFFFF67D,0x0000034A,0x00001CC5,0xFFFFF67D,0x0000034A},
- {"0000001000010011111100001111111010011001001100100001000101000100",0x00002F9E,0xFFFFEAD9,0x00000505,0x000017C1,0xFFFFF93D,0x000002DF,0x000017C1,0xFFFFF93D,0x000002DF},
- {"0000001000010011111100001111111010011001001011100010000100100100",0x00002FBC,0xFFFFEC75,0x000004A8,0x00001D6D,0xFFFFF6AC,0x0000033D,0x00001D6D,0xFFFFF6AC,0x0000033D},
- {"0000001000010011111100001111111010011001001011000011100010100100",0x00003541,0xFFFFE921,0x00000524,0x00002662,0xFFFFF0CB,0x0000042B,0x00002662,0xFFFFF0CB,0x0000042B},
- {"0000001000010011111100001111111010011001001010100010000110100100",0x00002953,0xFFFFEF76,0x00000459,0x00001C05,0xFFFFF6A0,0x00000368,0x00001C05,0xFFFFF6A0,0x00000368},
- {"0000001000010011111100001111111010011001001011000100100100100100",0x000034BC,0xFFFFE8DD,0x00000536,0x0000210E,0xFFFFF3F4,0x000003A8,0x0000210E,0xFFFFF3F4,0x000003A8},
- {"0000001000010011111100001111111010011001001011000010100110100100",0x000034BE,0xFFFFE916,0x0000052F,0x000024A1,0xFFFFF1A6,0x00000410,0x000024A1,0xFFFFF1A6,0x00000410},
- {"0000001000010011111100001111111010011001001100000100100101100100",0x000037B5,0xFFFFE7A9,0x0000055B,0x000028A1,0xFFFFEF51,0x00000467,0x000028A1,0xFFFFEF51,0x00000467},
- {"0000001000010011111100001111111010011001001100000001000100000100",0x00002FC5,0xFFFFEBBE,0x000004D1,0x00001BA5,0xFFFFF757,0x00000328,0x00001BA5,0xFFFFF757,0x00000328},
- {"0000001000010011111100001111111010011001001100000100000010100100",0x000033CB,0xFFFFE944,0x0000052B,0x00001FBE,0xFFFFF4B1,0x0000038C,0x00001FBE,0xFFFFF4B1,0x0000038C},
- {"0000001000010011111100001111111010011001001100000001100001000100",0x000030AE,0xFFFFEBA0,0x000004D3,0x00002268,0xFFFFF316,0x000003DD,0x00002268,0xFFFFF316,0x000003DD},
- {"0000001000010011111100001111111010011001001011000010000010100100",0x00002F90,0xFFFFEC5A,0x000004B0,0x00001C3A,0xFFFFF752,0x00000323,0x00001C3A,0xFFFFF752,0x00000323},
- {"0000001000010011111100001111111010011001001011100011100011100100",0x00003113,0xFFFFEB91,0x000004C8,0x00001E3C,0xFFFFF623,0x0000034E,0x00001E3C,0xFFFFF623,0x0000034E},
- {"0000001000010011111100001111111010011001001100100011100110000100",0x0000330B,0xFFFFE94B,0x00000539,0x000020E7,0xFFFFF37E,0x000003CD,0x000020E7,0xFFFFF37E,0x000003CD},
- {"0000001000010011111100001111111010011001001011100010100001100100",0x000031D1,0xFFFFEACB,0x000004ED,0x00001E82,0xFFFFF5B2,0x00000365,0x00001E82,0xFFFFF5B2,0x00000365},
- {"0000001000010011111100001111111010011001001010100011100110000100",0x00002CD5,0xFFFFEDC1,0x0000048D,0x000020F8,0xFFFFF3C1,0x000003D1,0x000020F8,0xFFFFF3C1,0x000003D1},
- { NULL ,0x000,0x000,0x000,0x000,0x000,0x000,0x000,0x000,0x000}
+static const struct phm_fuses_default vega10_fuses_default[] = {
+ { 0x0213EA94DE0E4964, 0x00003C96, 0xFFFFE226, 0x00000656, 0x00002203, 0xFFFFF201, 0x000003FF, 0x00002203, 0xFFFFF201, 0x000003FF },
+ { 0x0213EA94DE0A1884, 0x00003CC5, 0xFFFFE23A, 0x0000064E, 0x00002258, 0xFFFFF1F7, 0x000003FC, 0x00002258, 0xFFFFF1F7, 0x000003FC },
+ { 0x0213EA94DE0E31A4, 0x00003CAF, 0xFFFFE36E, 0x00000602, 0x00001E98, 0xFFFFF569, 0x00000357, 0x00001E98, 0xFFFFF569, 0x00000357 },
+ { 0x0213EA94DE2C1144, 0x0000391A, 0xFFFFE548, 0x000005C9, 0x00001B98, 0xFFFFF707, 0x00000324, 0x00001B98, 0xFFFFF707, 0x00000324 },
+ { 0x0213EA94DE2C18C4, 0x00003821, 0xFFFFE674, 0x00000597, 0x00002196, 0xFFFFF361, 0x000003C0, 0x00002196, 0xFFFFF361, 0x000003C0 },
+ { 0x0213EA94DE263884, 0x000044A2, 0xFFFFDCB7, 0x00000738, 0x0000325C, 0xFFFFE6A7, 0x000005E6, 0x0000325C, 0xFFFFE6A7, 0x000005E6 },
+ { 0x0213EA94DE082924, 0x00004057, 0xFFFFE1CF, 0x0000063C, 0x00002E2E, 0xFFFFEB62, 0x000004FD, 0x00002E2E, 0xFFFFEB62, 0x000004FD },
+ { 0x0213EA94DE284924, 0x00003FD0, 0xFFFFDF0F, 0x000006E5, 0x0000267C, 0xFFFFEE2D, 0x000004AB, 0x0000267C, 0xFFFFEE2D, 0x000004AB },
+ { 0x0213EA94DE280904, 0x00003F13, 0xFFFFE010, 0x000006AD, 0x000020E7, 0xFFFFF266, 0x000003EC, 0x000020E7, 0xFFFFF266, 0x000003EC },
+ { 0x0213EA94DE082044, 0x00004088, 0xFFFFDFAB, 0x000006B6, 0x0000252B, 0xFFFFEFDB, 0x00000458, 0x0000252B, 0xFFFFEFDB, 0x00000458 },
+ { 0x0213EA94DE283884, 0x00003EF6, 0xFFFFE017, 0x000006AA, 0x00001F67, 0xFFFFF369, 0x000003BE, 0x00001F67, 0xFFFFF369, 0x000003BE },
+ { 0x0213EA94DE2C2184, 0x00003CDD, 0xFFFFE2A7, 0x0000063C, 0x000026C6, 0xFFFFEF38, 0x00000478, 0x000026C6, 0xFFFFEF38, 0x00000478 },
+ { 0x0213EA94DE105124, 0x00003FA8, 0xFFFFDF02, 0x000006F0, 0x000027FE, 0xFFFFECF6, 0x000004EA, 0x000027FE, 0xFFFFECF6, 0x000004EA },
+ { 0x0213EA94DE2638C4, 0x00004670, 0xFFFFDC40, 0x00000742, 0x00003A7A, 0xFFFFE1A7, 0x000006B6, 0x00003A7A, 0xFFFFE1A7, 0x000006B6 },
+ { 0x0213EA94DE2C3024, 0x00003CDC, 0xFFFFE18C, 0x00000683, 0x00002A69, 0xFFFFEBE7, 0x00000515, 0x00002A69, 0xFFFFEBE7, 0x00000515 },
+ { 0x0213EA94DE0E38C4, 0x00003CEC, 0xFFFFE38E, 0x00000601, 0x00002752, 0xFFFFEFA7, 0x00000453, 0x00002752, 0xFFFFEFA7, 0x00000453 },
+ { 0x0213EA94DE2C1124, 0x000037D0, 0xFFFFE634, 0x000005A7, 0x00001CD2, 0xFFFFF644, 0x00000348, 0x00001CD2, 0xFFFFF644, 0x00000348 },
+ { 0x0213EA94DE283964, 0x00003DF5, 0xFFFFE0A5, 0x00000698, 0x00001FD5, 0xFFFFF30E, 0x000003D1, 0x00001FD5, 0xFFFFF30E, 0x000003D1 },
+ { 0x0213EA94DE0828C4, 0x00004201, 0xFFFFE03E, 0x00000688, 0x00003206, 0xFFFFE852, 0x0000058A, 0x00003206, 0xFFFFE852, 0x0000058A },
+ { 0x0213EA94DE2C1864, 0x00003BED, 0xFFFFE2F5, 0x00000638, 0x0000270D, 0xFFFFEED0, 0x0000048E, 0x0000270D, 0xFFFFEED0, 0x0000048E },
+ { 0x0213EA94DE0A1904, 0x00003E82, 0xFFFFE1BE, 0x00000654, 0x000025FB, 0xFFFFEFFA, 0x00000448, 0x000025FB, 0xFFFFEFFA, 0x00000448 },
+ { 0x0213EA94DE2C40C4, 0x00003962, 0xFFFFE4B9, 0x000005EF, 0x00002385, 0xFFFFF156, 0x00000423, 0x00002385, 0xFFFFF156, 0x00000423 },
+ { 0x0213EA94DE2C0944, 0x00003D88, 0xFFFFE21A, 0x00000655, 0x0000295A, 0xFFFFED68, 0x000004C4, 0x0000295A, 0xFFFFED68, 0x000004C4 },
+ { 0x0213EA94DE2C1104, 0x00003AA4, 0xFFFFE4A3, 0x000005E0, 0x000022EF, 0xFFFFF250, 0x000003EB, 0x000022EF, 0xFFFFF250, 0x000003EB },
+ { 0x0213EA94DE0E29A4, 0x00003D97, 0xFFFFE30D, 0x0000060D, 0x0000205D, 0xFFFFF45D, 0x00000380, 0x0000205D, 0xFFFFF45D, 0x00000380 },
+ { 0x0213EA94DE2C40A4, 0x000039B6, 0xFFFFE446, 0x00000605, 0x00002325, 0xFFFFF16C, 0x0000041F, 0x00002325, 0xFFFFF16C, 0x0000041F },
+ { 0x0213EA94DE263904, 0x0000457E, 0xFFFFDCF6, 0x00000722, 0x00003972, 0xFFFFE27B, 0x0000068E, 0x00003972, 0xFFFFE27B, 0x0000068E },
+ { 0x0213EA94DE0A1924, 0x00003FB8, 0xFFFFE101, 0x00000670, 0x00002787, 0xFFFFEEF5, 0x00000471, 0x00002787, 0xFFFFEEF5, 0x00000471 },
+ { 0x0213EA94DE0E38A4, 0x00003BB2, 0xFFFFE430, 0x000005EA, 0x000024A5, 0xFFFFF162, 0x00000409, 0x000024A5, 0xFFFFF162, 0x00000409 },
+ { 0x0213EA94DE082144, 0x00003EC5, 0xFFFFE1BD, 0x0000064F, 0x000022F0, 0xFFFFF227, 0x000003E8, 0x000022F0, 0xFFFFF227, 0x000003E8 },
+ { 0x0213EA94DE2C3164, 0x000038A7, 0xFFFFE59F, 0x000005C1, 0x000021CC, 0xFFFFF2DF, 0x000003D9, 0x000021CC, 0xFFFFF2DF, 0x000003D9 },
+ { 0x0213EA94DE324184, 0x00002995, 0xFFFFEF7A, 0x0000044C, 0x00001552, 0xFFFFFB5D, 0x00000292, 0x00001552, 0xFFFFFB5D, 0x00000292 },
+ { 0x0213EA94DE2C4064, 0x00003B26, 0xFFFFE2D3, 0x00000649, 0x000023B4, 0xFFFFF09B, 0x00000449, 0x000023B4, 0xFFFFF09B, 0x00000449 },
+ { 0x0213EA94DE081124, 0x000040D2, 0xFFFFE00A, 0x00000696, 0x000022DA, 0xFFFFF1E9, 0x000003F2, 0x000022DA, 0xFFFFF1E9, 0x000003F2 },
+ { 0x0213EA94DE2C3924, 0x00003C98, 0xFFFFE365, 0x00000618, 0x00002D5D, 0xFFFFEB3A, 0x0000051D, 0x00002D5D, 0xFFFFEB3A, 0x0000051D },
+ { 0x0213EA94DE2C10A4, 0x00003BBD, 0xFFFFE37E, 0x00000617, 0x0000252E, 0xFFFFF06E, 0x00000441, 0x0000252E, 0xFFFFF06E, 0x00000441 },
+ { 0x0213EA94DE262924, 0x00004363, 0xFFFFDF7A, 0x000006A0, 0x000031F5, 0xFFFFE880, 0x0000057B, 0x000031F5, 0xFFFFE880, 0x0000057B },
+ { 0x0213EA94DE0E3844, 0x00003CFC, 0xFFFFE2AF, 0x0000062E, 0x0000212A, 0xFFFFF335, 0x000003BF, 0x0000212A, 0xFFFFF335, 0x000003BF },
+ { 0x0213EA94DE1C4924, 0x0000252D, 0xFFFFF31B, 0x000003C3, 0x00001A1A, 0xFFFFF882, 0x00000325, 0x00001A1A, 0xFFFFF882, 0x00000325 },
+ { 0x0213EA94DE0A29A4, 0x00003FE2, 0xFFFFDFEF, 0x000006AC, 0x000025A2, 0xFFFFEF84, 0x00000462, 0x000025A2, 0xFFFFEF84, 0x00000462 },
+ { 0x0213EA94DE0820E4, 0x000040A5, 0xFFFFE13B, 0x0000065B, 0x00002C13, 0xFFFFEC75, 0x000004D7, 0x00002C13, 0xFFFFEC75, 0x000004D7 },
+ { 0x0213EA94DE0E48A4, 0x00003E42, 0xFFFFE1B3, 0x00000657, 0x0000221D, 0xFFFFF273, 0x000003DE, 0x0000221D, 0xFFFFF273, 0x000003DE },
+ { 0x0213EA94DE0A20E4, 0x00003E7F, 0xFFFFE255, 0x00000638, 0x00002D30, 0xFFFFEB8A, 0x00000503, 0x00002D30, 0xFFFFEB8A, 0x00000503 },
+ { 0x0213EA94DE2C29C4, 0x00003E56, 0xFFFFE16D, 0x00000670, 0x000028DC, 0xFFFFEDA0, 0x000004BA, 0x000028DC, 0xFFFFEDA0, 0x000004BA },
+ { 0x0213EA94DE2630A4, 0x000044AD, 0xFFFFDE24, 0x000006DD, 0x000031AD, 0xFFFFE850, 0x00000585, 0x000031AD, 0xFFFFE850, 0x00000585 },
+ { 0x0213EA94DE2C20E4, 0x00003AF3, 0xFFFFE5B0, 0x000005A6, 0x00002CF6, 0xFFFFEC75, 0x000004DD, 0x00002CF6, 0xFFFFEC75, 0x000004DD },
+ { 0x0213EA94DE0A2084, 0x00003E66, 0xFFFFE19E, 0x0000065B, 0x00002332, 0xFFFFF1B9, 0x000003FD, 0x00002332, 0xFFFFF1B9, 0x000003FD },
+ { 0x0213EA94DE082884, 0x00003FB4, 0xFFFFE0A5, 0x00000686, 0x0000253E, 0xFFFFF02E, 0x00000444, 0x0000253E, 0xFFFFF02E, 0x00000444 },
+ { 0x0213EA94DE2818A4, 0x00003E28, 0xFFFFE14D, 0x0000066E, 0x00001FE2, 0xFFFFF39A, 0x000003B1, 0x00001FE2, 0xFFFFF39A, 0x000003B1 },
+ { 0x0213EA94DE2C0904, 0x000039E6, 0xFFFFE44B, 0x000005FE, 0x0000210C, 0xFFFFF2F4, 0x000003DA, 0x0000210C, 0xFFFFF2F4, 0x000003DA },
+ { 0x0213EA94DE2C5104, 0x00003A4D, 0xFFFFE252, 0x0000067A, 0x000027E2, 0xFFFFECED, 0x000004FA, 0x000027E2, 0xFFFFECED, 0x000004FA },
+ { 0x0213EA94DE0A2964, 0x00004065, 0xFFFFE02F, 0x0000069B, 0x0000299D, 0xFFFFED38, 0x000004C2, 0x0000299D, 0xFFFFED38, 0x000004C2 },
+ { 0x0213EA94DE0E20A4, 0x000039EE, 0xFFFFE603, 0x00000594, 0x0000214F, 0xFFFFF429, 0x0000038E, 0x0000214F, 0xFFFFF429, 0x0000038E },
+ { 0x0213EA94DE0E48E4, 0x00003BD2, 0xFFFFE351, 0x00000618, 0x000020B8, 0xFFFFF377, 0x000003B4, 0x000020B8, 0xFFFFF377, 0x000003B4 },
+ { 0x0213EA94DE0A3124, 0x00003FAA, 0xFFFFE183, 0x0000065E, 0x000032AE, 0xFFFFE7C2, 0x000005A6, 0x000032AE, 0xFFFFE7C2, 0x000005A6 },
+ { 0x0213EA94DE2C2984, 0x00003AFB, 0xFFFFE3E4, 0x00000608, 0x00002293, 0xFFFFF21F, 0x000003FA, 0x00002293, 0xFFFFF21F, 0x000003FA },
+ { 0x0213EA94DE262064, 0x0000448B, 0xFFFFDD5D, 0x0000070D, 0x00002E4E, 0xFFFFE9DF, 0x00000551, 0x00002E4E, 0xFFFFE9DF, 0x00000551 },
+ { 0x0213EA94DE0E2184, 0x00003D46, 0xFFFFE39B, 0x000005F3, 0x0000218E, 0xFFFFF3CD, 0x00000398, 0x0000218E, 0xFFFFF3CD, 0x00000398 },
+ { 0x0213EA94DE0848E4, 0x00003F01, 0xFFFFDFD9, 0x000006BF, 0x000023AF, 0xFFFFF04E, 0x0000044C, 0x000023AF, 0xFFFFF04E, 0x0000044C },
+ { 0x0213EA94DE1029A4, 0x0000403D, 0xFFFFDF6B, 0x000006C9, 0x0000270D, 0xFFFFEE4B, 0x0000049E, 0x0000270D, 0xFFFFEE4B, 0x0000049E },
+ { 0x0213EA94DE0E3964, 0x00003C11, 0xFFFFE35C, 0x00000613, 0x000020F9, 0xFFFFF365, 0x000003B9, 0x000020F9, 0xFFFFF365, 0x000003B9 },
+ { 0x0213EA94DE2C3884, 0x00003B58, 0xFFFFE37D, 0x0000061F, 0x00002698, 0xFFFFEF46, 0x00000478, 0x00002698, 0xFFFFEF46, 0x00000478 },
+ { 0x0213EA94DE2841A4, 0x00003EBC, 0xFFFFDF7A, 0x000006D6, 0x0000212B, 0xFFFFF195, 0x0000041B, 0x0000212B, 0xFFFFF195, 0x0000041B },
+ { 0x0213EA94DE0848C4, 0x00004050, 0xFFFFDEB3, 0x000006FE, 0x00002D6C, 0xFFFFE961, 0x00000582, 0x00002D6C, 0xFFFFE961, 0x00000582 },
+ { 0x0213EA94DE262044, 0x000043F0, 0xFFFFDD9C, 0x00000702, 0x00002B31, 0xFFFFEBEA, 0x000004F7, 0x00002B31, 0xFFFFEBEA, 0x000004F7 },
+ { 0x0213EA94DE100924, 0x00003EFA, 0xFFFFE093, 0x00000696, 0x000026DB, 0xFFFFEEB3, 0x00000489, 0x000026DB, 0xFFFFEEB3, 0x00000489 },
+ { 0x0213EA94DE082064, 0x0000425D, 0xFFFFDE8D, 0x000006E6, 0x00002CA4, 0xFFFFEAD2, 0x00000531, 0x00002CA4, 0xFFFFEAD2, 0x00000531 },
+ { 0x0213EA94DE2639A4, 0x000043B0, 0xFFFFDD03, 0x00000728, 0x00002946, 0xFFFFECA6, 0x000004DE, 0x00002946, 0xFFFFECA6, 0x000004DE },
+ { 0x0213EA94DE282864, 0x00003F6A, 0xFFFFE03A, 0x0000069D, 0x00002208, 0xFFFFF1F8, 0x000003F6, 0x00002208, 0xFFFFF1F8, 0x000003F6 },
+ { 0x0213EA94DE2C2964, 0x00003A94, 0xFFFFE4A7, 0x000005E2, 0x000024D0, 0xFFFFF100, 0x00000426, 0x000024D0, 0xFFFFF100, 0x00000426 },
+ { 0x0213EA94DE2810C4, 0x00003F2F, 0xFFFFE0A3, 0x00000688, 0x00002198, 0xFFFFF271, 0x000003E2, 0x00002198, 0xFFFFF271, 0x000003E2 },
+ { 0x0213EA94DE1048E4, 0x00003EA5, 0xFFFFE032, 0x000006AE, 0x0000227C, 0xFFFFF130, 0x00000426, 0x0000227C, 0xFFFFF130, 0x00000426 },
+ { 0x0213EA94DE264144, 0x0000442F, 0xFFFFDBC4, 0x0000078B, 0x00003CD6, 0xFFFFDE6C, 0x0000076C, 0x00003CD6, 0xFFFFDE6C, 0x0000076C },
+ { 0x0213EA94DE282884, 0x00003DDE, 0xFFFFE174, 0x00000668, 0x00001FF4, 0xFFFFF38F, 0x000003B1, 0x00001FF4, 0xFFFFF38F, 0x000003B1 },
+ { 0x0213EA94DE0A3144, 0x000040B0, 0xFFFFE016, 0x000006A0, 0x00002DBB, 0xFFFFEA7F, 0x00000537, 0x00002DBB, 0xFFFFEA7F, 0x00000537 },
+ { 0x0213EA94DE2C3104, 0x00003429, 0xFFFFEA97, 0x000004DD, 0x000024D5, 0xFFFFF26F, 0x000003DF, 0x000024D5, 0xFFFFF26F, 0x000003DF },
+ { 0x0213EA94DE0E1904, 0x00003AEB, 0xFFFFE590, 0x000005A3, 0x000022CB, 0xFFFFF347, 0x000003B2, 0x000022CB, 0xFFFFF347, 0x000003B2 },
+ { 0x0213EA94DE283904, 0x00003B8E, 0xFFFFE2EF, 0x00000636, 0x00002351, 0xFFFFF143, 0x0000041C, 0x00002351, 0xFFFFF143, 0x0000041C },
+ { 0x0213EA94DE3240C4, 0x00002926, 0xFFFFF0B0, 0x00000410, 0x0000194E, 0xFFFFF94E, 0x000002E9, 0x0000194E, 0xFFFFF94E, 0x000002E9 },
+ { 0x0213EA94DE283184, 0x0000402B, 0xFFFFDF78, 0x000006C2, 0x00002273, 0xFFFFF16C, 0x00000414, 0x00002273, 0xFFFFF16C, 0x00000414 },
+ { 0x0213EA94DE0A10A4, 0x00003D6A, 0xFFFFE1D3, 0x00000659, 0x00002006, 0xFFFFF394, 0x000003B1, 0x00002006, 0xFFFFF394, 0x000003B1 },
+ { 0x0213EA94DE284064, 0x00004042, 0xFFFFDFD8, 0x000006A8, 0x00002135, 0xFFFFF29F, 0x000003D9, 0x00002135, 0xFFFFF29F, 0x000003D9 },
+ { 0x0213EA94DE0820A4, 0x0000405B, 0xFFFFE093, 0x00000682, 0x0000288F, 0xFFFFEE3A, 0x00000491, 0x0000288F, 0xFFFFEE3A, 0x00000491 },
+ { 0x0213EA94DE2C48A4, 0x00003A49, 0xFFFFE30C, 0x00000648, 0x000023F9, 0xFFFFF02D, 0x00000460, 0x000023F9, 0xFFFFF02D, 0x00000460 },
+ { 0x0213EA94DE282964, 0x00003D59, 0xFFFFE1CC, 0x0000065B, 0x00002013, 0xFFFFF37D, 0x000003B6, 0x00002013, 0xFFFFF37D, 0x000003B6 },
+ { 0x0213EA94DE2C3984, 0x000040C1, 0xFFFFDF8C, 0x000006CA, 0x00003271, 0xFFFFE6CA, 0x000005EA, 0x00003271, 0xFFFFE6CA, 0x000005EA },
+ { 0x0213EA94DE2620E4, 0x000042E9, 0xFFFFDFDC, 0x0000068C, 0x00002ED9, 0xFFFFEAAF, 0x0000051B, 0x00002ED9, 0xFFFFEAAF, 0x0000051B },
+ { 0x0213EA94DE083084, 0x000042ED, 0xFFFFDE50, 0x000006F0, 0x00002FCF, 0xFFFFE8BB, 0x0000058C, 0x00002FCF, 0xFFFFE8BB, 0x0000058C },
+ { 0x0213EA94DE0A4104, 0x00003EBD, 0xFFFFE099, 0x00000698, 0x00002709, 0xFFFFEE7B, 0x00000495, 0x00002709, 0xFFFFEE7B, 0x00000495 },
+ { 0x0213EA94DE284904, 0x00003F71, 0xFFFFDF82, 0x000006C9, 0x0000219B, 0xFFFFF1AD, 0x0000040F, 0x0000219B, 0xFFFFF1AD, 0x0000040F },
+ { 0x0213EA94DE2808E4, 0x00003E73, 0xFFFFE080, 0x0000069B, 0x000020E7, 0xFFFFF273, 0x000003E9, 0x000020E7, 0xFFFFF273, 0x000003E9 },
+ { 0x0213EA94DE0E3184, 0x00003E14, 0xFFFFE278, 0x0000062C, 0x00002275, 0xFFFFF2B3, 0x000003CE, 0x00002275, 0xFFFFF2B3, 0x000003CE },
+ { 0x0213EA94DE2C21A4, 0x00003ABB, 0xFFFFE3B9, 0x00000615, 0x00002192, 0xFFFFF28F, 0x000003EB, 0x00002192, 0xFFFFF28F, 0x000003EB },
+ { 0x0213EA94DE283124, 0x00003D53, 0xFFFFE255, 0x00000643, 0x0000275B, 0xFFFFEEED, 0x00000479, 0x0000275B, 0xFFFFEEED, 0x00000479 },
+ { 0x0213EA94DE262864, 0x000043E3, 0xFFFFDDC3, 0x000006FB, 0x00002B6B, 0xFFFFEBD6, 0x000004FA, 0x00002B6B, 0xFFFFEBD6, 0x000004FA },
+ { 0x0213EA94DE0E2144, 0x00003BDE, 0xFFFFE507, 0x000005B4, 0x000022CE, 0xFFFFF358, 0x000003AB, 0x000022CE, 0xFFFFF358, 0x000003AB },
+ { 0x0213EA94DE323164, 0x00002460, 0xFFFFF3B5, 0x000003A2, 0x000014E7, 0xFFFFFC32, 0x0000027C, 0x000014E7, 0xFFFFFC32, 0x0000027C },
+ { 0x0213EA94DE2820C4, 0x00003D20, 0xFFFFE298, 0x0000062F, 0x00002080, 0xFFFFF3AF, 0x000003A8, 0x00002080, 0xFFFFF3AF, 0x000003A8 },
+ { 0x0213EA94DE081904, 0x00003E14, 0xFFFFE221, 0x00000641, 0x000021BB, 0xFFFFF2EA, 0x000003CA, 0x000021BB, 0xFFFFF2EA, 0x000003CA },
+ { 0x0213EA94DE0A40C4, 0x00003DE1, 0xFFFFE14E, 0x00000677, 0x00002468, 0xFFFFF068, 0x00000440, 0x00002468, 0xFFFFF068, 0x00000440 },
+ { 0x0213EA94DE261084, 0x00004372, 0xFFFFDDF8, 0x000006F5, 0x00002B3F, 0xFFFFEBE8, 0x000004F8, 0x00002B3F, 0xFFFFEBE8, 0x000004F8 },
+ { 0x0213EA94DE0A28C4, 0x00003E4F, 0xFFFFE2A3, 0x0000062B, 0x00002F5A, 0xFFFFEA37, 0x0000053B, 0x00002F5A, 0xFFFFEA37, 0x0000053B },
+ { 0x0213EA94DE2850E4, 0x00003E07, 0xFFFFE02F, 0x000006B6, 0x0000216B, 0xFFFFF1A3, 0x00000416, 0x0000216B, 0xFFFFF1A3, 0x00000416 },
+ { 0x0213EA94DE2838A4, 0x00003DAB, 0xFFFFE128, 0x0000067F, 0x0000216F, 0xFFFFF236, 0x000003F3, 0x0000216F, 0xFFFFF236, 0x000003F3 },
+ { 0x0213EA94DE2C2924, 0x0000364B, 0xFFFFE8CB, 0x0000052A, 0x00002568, 0xFFFFF1B2, 0x00000400, 0x00002568, 0xFFFFF1B2, 0x00000400 },
+ { 0x0213EA94DE261064, 0x00004219, 0xFFFFDE87, 0x000006E8, 0x00002C59, 0xFFFFEAEE, 0x00000529, 0x00002C59, 0xFFFFEAEE, 0x00000529 },
+ { 0x0213EA94DE0E1944, 0x000039A8, 0xFFFFE602, 0x00000594, 0x00001D06, 0xFFFFF6F0, 0x00000316, 0x00001D06, 0xFFFFF6F0, 0x00000316 },
+ { 0x0213EA94DE2610E4, 0x00004052, 0xFFFFE01C, 0x00000698, 0x00002310, 0xFFFFF1A1, 0x000003FE, 0x00002310, 0xFFFFF1A1, 0x000003FE },
+ { 0x0213EA94DE0E2824, 0x00003C1C, 0xFFFFE3EB, 0x000005F1, 0x00002289, 0xFFFFF2CF, 0x000003C9, 0x00002289, 0xFFFFF2CF, 0x000003C9 },
+ { 0x0213EA94DE0E5124, 0x00003F19, 0xFFFFE085, 0x0000069E, 0x00002B94, 0xFFFFEB72, 0x0000051D, 0x00002B94, 0xFFFFEB72, 0x0000051D },
+ { 0x0213EA94DE0E41A4, 0x00003C51, 0xFFFFE2AD, 0x00000638, 0x0000206B, 0xFFFFF361, 0x000003BE, 0x0000206B, 0xFFFFF361, 0x000003BE },
+ { 0x0213EA94DE2610C4, 0x000040B9, 0xFFFFDFBB, 0x000006AB, 0x0000241F, 0xFFFFF0CC, 0x00000425, 0x0000241F, 0xFFFFF0CC, 0x00000425 },
+ { 0x0213EA94DE0A2064, 0x00003E62, 0xFFFFE12C, 0x00000678, 0x00002445, 0xFFFFF09E, 0x00000435, 0x00002445, 0xFFFFF09E, 0x00000435 },
+ { 0x0213EA94DE0E1984, 0x00003C97, 0xFFFFE399, 0x000005FB, 0x0000209D, 0xFFFFF41D, 0x0000038F, 0x0000209D, 0xFFFFF41D, 0x0000038F },
+ { 0x0213EA94DE0E3144, 0x00003FF9, 0xFFFFE1E9, 0x0000063E, 0x00002E96, 0xFFFFEAF5, 0x00000516, 0x00002E96, 0xFFFFEAF5, 0x00000516 },
+ { 0x0213EA94DE0A3084, 0x00003F04, 0xFFFFE109, 0x0000067A, 0x000026E1, 0xFFFFEF0B, 0x00000476, 0x000026E1, 0xFFFFEF0B, 0x00000476 },
+ { 0x0213EA94DE101124, 0x00003E3E, 0xFFFFE187, 0x00000660, 0x00002049, 0xFFFFF38D, 0x000003B0, 0x00002049, 0xFFFFF38D, 0x000003B0 },
+ { 0x0213EA94DE282944, 0x00003D58, 0xFFFFE253, 0x0000063D, 0x00002158, 0xFFFFF308, 0x000003C3, 0x00002158, 0xFFFFF308, 0x000003C3 },
+ { 0x0213EA94DE0840C4, 0x00004074, 0xFFFFDF8D, 0x000006C0, 0x00002799, 0xFFFFEE19, 0x000004A5, 0x00002799, 0xFFFFEE19, 0x000004A5 },
+ { 0x0213EA94DE281924, 0x00003DAF, 0xFFFFE1C9, 0x00000659, 0x000020E5, 0xFFFFF313, 0x000003C6, 0x000020E5, 0xFFFFF313, 0x000003C6 },
+ { 0x0213EA94DE0A3964, 0x000041DD, 0xFFFFDDFA, 0x0000071B, 0x0000348D, 0xFFFFE4B4, 0x0000064C, 0x0000348D, 0xFFFFE4B4, 0x0000064C },
+ { 0x0213EA94DE2C2884, 0x00003947, 0xFFFFE5AE, 0x000005B8, 0x000024A6, 0xFFFFF140, 0x0000041D, 0x000024A6, 0xFFFFF140, 0x0000041D },
+ { 0x0213EA94DE101844, 0x00003D35, 0xFFFFE197, 0x0000066E, 0x00002248, 0xFFFFF1BC, 0x00000408, 0x00002248, 0xFFFFF1BC, 0x00000408 },
+ { 0x0213EA94DE0A18E4, 0x00003F4F, 0xFFFFE13E, 0x0000066D, 0x00002AF0, 0xFFFFEC99, 0x000004DB, 0x00002AF0, 0xFFFFEC99, 0x000004DB },
+ { 0x0213EA94DE263944, 0x0000430F, 0xFFFFDDFB, 0x000006FC, 0x00002D4D, 0xFFFFEA55, 0x00000540, 0x00002D4D, 0xFFFFEA55, 0x00000540 },
+ { 0x0213EA94DE0E2944, 0x00003B22, 0xFFFFE543, 0x000005B1, 0x000022E1, 0xFFFFF31B, 0x000003B9, 0x000022E1, 0xFFFFF31B, 0x000003B9 },
+ { 0x0213EA94DE0E2084, 0x00003978, 0xFFFFE611, 0x00000592, 0x00001C36, 0xFFFFF771, 0x00000302, 0x00001C36, 0xFFFFF771, 0x00000302 },
+ { 0x0213EA94DE262164, 0x000044DF, 0xFFFFDDAB, 0x000006F2, 0x00002CEA, 0xFFFFEB47, 0x00000507, 0x00002CEA, 0xFFFFEB47, 0x00000507 },
+ { 0x0213EA94DE0A38C4, 0x00003E9B, 0xFFFFE12C, 0x0000067C, 0x00002B79, 0xFFFFEBD9, 0x00000503, 0x00002B79, 0xFFFFEBD9, 0x00000503 },
+ { 0x0213EA94DE263044, 0x00004464, 0xFFFFDCD3, 0x00000731, 0x00002D14, 0xFFFFEA2D, 0x0000054E, 0x00002D14, 0xFFFFEA2D, 0x0000054E },
+ { 0x0213EA94DE281124, 0x00003FB3, 0xFFFFE052, 0x00000693, 0x000020AC, 0xFFFFF311, 0x000003C6, 0x000020AC, 0xFFFFF311, 0x000003C6 },
+ { 0x0213EA94DE2C1084, 0x00003BDA, 0xFFFFE2FB, 0x00000636, 0x0000261E, 0xFFFFEF72, 0x00000471, 0x0000261E, 0xFFFFEF72, 0x00000471 },
+ { 0x0213EA94DE2C1964, 0x00003D72, 0xFFFFE28A, 0x0000063E, 0x000029D8, 0xFFFFED54, 0x000004C7, 0x000029D8, 0xFFFFED54, 0x000004C7 },
+ { 0x0213EA94DE2C2824, 0x00003E26, 0xFFFFE102, 0x00000694, 0x00002DD1, 0xFFFFE9CA, 0x0000056D, 0x00002DD1, 0xFFFFE9CA, 0x0000056D },
+ { 0x0213EA94DE104124, 0x000041CD, 0xFFFFDE97, 0x000006ED, 0x00002DE5, 0xFFFFE9B9, 0x00000565, 0x00002DE5, 0xFFFFE9B9, 0x00000565 },
+ { 0x0213EA94DE0A2984, 0x00003F30, 0xFFFFE06E, 0x00000698, 0x000024FF, 0xFFFFEFFC, 0x0000044F, 0x000024FF, 0xFFFFEFFC, 0x0000044F },
+ { 0x0213EA94DE2C38C4, 0x0000378B, 0xFFFFE6B4, 0x00000594, 0x000023A7, 0xFFFFF1DC, 0x00000407, 0x000023A7, 0xFFFFF1DC, 0x00000407 },
+ { 0x0213EA94DE0E4164, 0x00003CD7, 0xFFFFE28D, 0x00000636, 0x00002036, 0xFFFFF3B5, 0x000003AA, 0x00002036, 0xFFFFF3B5, 0x000003AA },
+ { 0x0213EA94DE0A3884, 0x00003EF9, 0xFFFFE0AA, 0x0000068D, 0x000024D3, 0xFFFFF02F, 0x00000445, 0x000024D3, 0xFFFFF02F, 0x00000445 },
+ { 0x0213EA94DE283944, 0x00003D08, 0xFFFFE1BB, 0x00000665, 0x00002159, 0xFFFFF26F, 0x000003E6, 0x00002159, 0xFFFFF26F, 0x000003E6 },
+ { 0x0213EA94DE2C20C4, 0x000038A9, 0xFFFFE6CA, 0x00000580, 0x000025D3, 0xFFFFF101, 0x00000421, 0x000025D3, 0xFFFFF101, 0x00000421 },
+ { 0x0213EA94DE0A20A4, 0x00003E45, 0xFFFFE1F8, 0x0000064D, 0x000027E3, 0xFFFFEEBB, 0x0000047F, 0x000027E3, 0xFFFFEEBB, 0x0000047F },
+ { 0x0213EA94DE0E3864, 0x00003F76, 0xFFFFE128, 0x0000066E, 0x0000286B, 0xFFFFEE4C, 0x00000493, 0x0000286B, 0xFFFFEE4C, 0x00000493 },
+ { 0x0213EA94DE264104, 0x0000440D, 0xFFFFDCA2, 0x0000074F, 0x00003817, 0xFFFFE256, 0x000006AF, 0x00003817, 0xFFFFE256, 0x000006AF },
+ { 0x0213EA94DE105104, 0x00003EE1, 0xFFFFDFA7, 0x000006D4, 0x000027EA, 0xFFFFED2B, 0x000004DE, 0x000027EA, 0xFFFFED2B, 0x000004DE },
+ { 0x0213EA94DE2C3864, 0x00003C62, 0xFFFFE285, 0x0000064A, 0x00002520, 0xFFFFF001, 0x0000045C, 0x00002520, 0xFFFFF001, 0x0000045C },
+ { 0x0213EA94DE323964, 0x0000272E, 0xFFFFF17A, 0x000003FA, 0x0000150B, 0xFFFFFBD5, 0x00000284, 0x0000150B, 0xFFFFFBD5, 0x00000284 },
+ { 0x0213EA94DE261924, 0x00004275, 0xFFFFDF69, 0x000006A5, 0x000025AA, 0xFFFFF05C, 0x0000042B, 0x000025AA, 0xFFFFF05C, 0x0000042B },
+ { 0x0213EA94DE0E40E4, 0x00003CAA, 0xFFFFE392, 0x000005FF, 0x000023A8, 0xFFFFF20E, 0x000003E9, 0x000023A8, 0xFFFFF20E, 0x000003E9 },
+ { 0x0213EA94DE2C50C4, 0x00003CF8, 0xFFFFE0FB, 0x000006A6, 0x00002CA7, 0xFFFFE9FF, 0x0000056E, 0x00002CA7, 0xFFFFE9FF, 0x0000056E },
+ { 0x0213EA94DE282124, 0x00003D00, 0xFFFFE296, 0x00000633, 0x000021C1, 0xFFFFF2C8, 0x000003CF, 0x000021C1, 0xFFFFF2C8, 0x000003CF },
+ { 0x0213EA94DE2838E4, 0x00003B46, 0xFFFFE301, 0x00000632, 0x0000204C, 0xFFFFF33B, 0x000003C8, 0x0000204C, 0xFFFFF33B, 0x000003C8 },
+ { 0x0213EA94DE204164, 0x00002026, 0xFFFFF5CE, 0x00000368, 0x00001598, 0xFFFFFB29, 0x000002C3, 0x00001598, 0xFFFFFB29, 0x000002C3 },
+ { 0x0213EA94DE283164, 0x00003DCA, 0xFFFFE178, 0x00000668, 0x00001FDB, 0xFFFFF39D, 0x000003AF, 0x00001FDB, 0xFFFFF39D, 0x000003AF },
+ { 0x0213EA94DE2C48C4, 0x00003A59, 0xFFFFE327, 0x00000642, 0x000024B9, 0xFFFFEFC4, 0x00000471, 0x000024B9, 0xFFFFEFC4, 0x00000471 },
+ { 0x0213EA94DE2C2944, 0x00003C26, 0xFFFFE440, 0x000005EB, 0x00002C0F, 0xFFFFEC88, 0x000004E0, 0x00002C0F, 0xFFFFEC88, 0x000004E0 },
+ { 0x0213EA94DE083884, 0x00004149, 0xFFFFDEB8, 0x000006E7, 0x0000280A, 0xFFFFED89, 0x000004C2, 0x0000280A, 0xFFFFED89, 0x000004C2 },
+ { 0x0213EA94DE0E4124, 0x00003EB4, 0xFFFFE1E5, 0x0000064D, 0x0000299F, 0xFFFFEDB3, 0x000004A9, 0x0000299F, 0xFFFFEDB3, 0x000004A9 },
+ { 0x0213EA94DE2C39A4, 0x00003BBF, 0xFFFFE268, 0x0000065A, 0x00002504, 0xFFFFEFB0, 0x00000470, 0x00002504, 0xFFFFEFB0, 0x00000470 },
+ { 0x0213EA94DE084904, 0x00004203, 0xFFFFDDC6, 0x00000720, 0x0000303B, 0xFFFFE78F, 0x000005D0, 0x0000303B, 0xFFFFE78F, 0x000005D0 },
+ { 0x0213EA94DE0E3984, 0x00003DA3, 0xFFFFE244, 0x0000063E, 0x000021B4, 0xFFFFF2DA, 0x000003CD, 0x000021B4, 0xFFFFF2DA, 0x000003CD },
+ { 0x0213EA94DE0A38E4, 0x00004035, 0xFFFFE065, 0x0000069B, 0x00003323, 0xFFFFE6D6, 0x000005D8, 0x00003323, 0xFFFFE6D6, 0x000005D8 },
+ { 0x0213EA94DE2C1164, 0x00003944, 0xFFFFE4E5, 0x000005E2, 0x00001F3C, 0xFFFFF456, 0x0000039D, 0x00001F3C, 0xFFFFF456, 0x0000039D },
+ { 0x0213EA94DE061904, 0x000032D8, 0xFFFFEAE8, 0x000004E6, 0x00001812, 0xFFFFFA1C, 0x000002BC, 0x00001812, 0xFFFFFA1C, 0x000002BC },
+ { 0x0213F0FD42D22944, 0x000041F6, 0xFFFFE025, 0x0000069A, 0x0000241E, 0xFFFFF1B4, 0x00000402, 0x0000241E, 0xFFFFF1B4, 0x00000402 },
+ { 0x0213F0FE990C30A4, 0x00003300, 0xFFFFEB60, 0x000004C1, 0x00001E15, 0xFFFFF6A6, 0x0000033B, 0x00001E15, 0xFFFFF6A6, 0x0000033B },
+ { 0x0213EA94DE0408A4, 0x000037F0, 0xFFFFE68F, 0x0000059B, 0x00001F8A, 0xFFFFF467, 0x000003A3, 0x00001F8A, 0xFFFFF467, 0x000003A3 },
+ { 0x0213F0FE99182984, 0x000025D8, 0xFFFFF2AA, 0x000003C3, 0x000018A8, 0xFFFFF9BE, 0x000002D2, 0x000018A8, 0xFFFFF9BE, 0x000002D2 },
+ { 0x0213F0FE990620C4, 0x0000364F, 0xFFFFE988, 0x000004FC, 0x00001E51, 0xFFFFF633, 0x0000034F, 0x00001E51, 0xFFFFF633, 0x0000034F },
+ { 0x0213EA94DE061144, 0x00002288, 0xFFFFF483, 0x0000036C, 0x0000280F, 0xFFFFEF39, 0x0000047B, 0x0000280F, 0xFFFFEF39, 0x0000047B },
+ { 0x0213F0FE99082084, 0x00003322, 0xFFFFEA7E, 0x000004ED, 0x00001DAD, 0xFFFFF62B, 0x00000355, 0x00001DAD, 0xFFFFF62B, 0x00000355 },
+ { 0x0213EA94DE0250E4, 0x00002B7B, 0xFFFFEE4F, 0x0000045B, 0x00001AA2, 0xFFFFF710, 0x0000033E, 0x00001AA2, 0xFFFFF710, 0x0000033E },
+ { 0x0213F0FE990420C4, 0x000034CC, 0xFFFFEA79, 0x000004E4, 0x00001B05, 0xFFFFF8B3, 0x000002EC, 0x00001B05, 0xFFFFF8B3, 0x000002EC },
+ { 0x0213F0FD42DC2864, 0x00003837, 0xFFFFE5ED, 0x000005C3, 0x00001ACB, 0xFFFFF7B2, 0x00000314, 0x00001ACB, 0xFFFFF7B2, 0x00000314 },
+ { 0x0213F0FE99044164, 0x0000352D, 0xFFFFE88F, 0x00000548, 0x000021E6, 0xFFFFF3B5, 0x000003AA, 0x000021E6, 0xFFFFF3B5, 0x000003AA },
+ { 0x0213F0FE990A4884, 0x00003300, 0xFFFFE835, 0x0000057B, 0x00001A85, 0xFFFFF715, 0x00000336, 0x00001A85, 0xFFFFF715, 0x00000336 },
+ { 0x0213EA94DE0448A4, 0x000033FA, 0xFFFFE851, 0x00000565, 0x00001A8E, 0xFFFFF727, 0x0000033B, 0x00001A8E, 0xFFFFF727, 0x0000033B },
+ { 0x0213F0FD42DA3924, 0x000039D3, 0xFFFFE5D3, 0x000005B0, 0x00001888, 0xFFFFF978, 0x000002C8, 0x00001888, 0xFFFFF978, 0x000002C8 },
+ { 0x0213F0FE990E4864, 0x00002F6B, 0xFFFFEC53, 0x000004B9, 0x00001C15, 0xFFFFF71B, 0x00000337, 0x00001C15, 0xFFFFF71B, 0x00000337 },
+ { 0x0213F0FE99064144, 0x0000384D, 0xFFFFE737, 0x00000569, 0x00001D2D, 0xFFFFF673, 0x00000343, 0x00001D2D, 0xFFFFF673, 0x00000343 },
+ { 0x0213F0FE990620A4, 0x00003A49, 0xFFFFE70B, 0x0000055F, 0x00001A63, 0xFFFFF8CD, 0x000002E2, 0x00001A63, 0xFFFFF8CD, 0x000002E2 },
+ { 0x0213F0FE99042984, 0x0000311E, 0xFFFFEB97, 0x000004C6, 0x00001EAE, 0xFFFFF5A9, 0x00000367, 0x00001EAE, 0xFFFFF5A9, 0x00000367 },
+ { 0x0213F0FE990E1124, 0x000027D3, 0xFFFFF075, 0x00000417, 0x00002001, 0xFFFFF44A, 0x000003A2, 0x00002001, 0xFFFFF44A, 0x000003A2 },
+ { 0x0213F0FE99064904, 0x00003B72, 0xFFFFE4BD, 0x000005DC, 0x00001D76, 0xFFFFF606, 0x0000035A, 0x00001D76, 0xFFFFF606, 0x0000035A },
+ { 0x0213F0FE99101124, 0x00002E0F, 0xFFFFECA7, 0x000004AE, 0x00001DC6, 0xFFFFF5BF, 0x0000036A, 0x00001DC6, 0xFFFFF5BF, 0x0000036A },
+ { 0x0213F0FE990238A4, 0x000032C7, 0xFFFFEA7A, 0x000004F0, 0x00001A7B, 0xFFFFF827, 0x00000301, 0x00001A7B, 0xFFFFF827, 0x00000301 },
+ { 0x0213EA94DE044884, 0x0000312D, 0xFFFFEA39, 0x00000515, 0x00001948, 0xFFFFF800, 0x00000318, 0x00001948, 0xFFFFF800, 0x00000318 },
+ { 0x0213EA94DE062084, 0x00003611, 0xFFFFE8D7, 0x00000533, 0x00001929, 0xFFFFF965, 0x000002D2, 0x00001929, 0xFFFFF965, 0x000002D2 },
+ { 0x0213F0FE992C30E4, 0x00002FE2, 0xFFFFED89, 0x00000470, 0x00001A3C, 0xFFFFF955, 0x000002D5, 0x00001A3C, 0xFFFFF955, 0x000002D5 },
+ { 0x0213EA94DE0208A4, 0x000035FF, 0xFFFFE884, 0x00000548, 0x0000182A, 0xFFFFF9AB, 0x000002CF, 0x0000182A, 0xFFFFF9AB, 0x000002CF },
+ { 0x0213F0FE990220E4, 0x00003597, 0xFFFFE904, 0x00000528, 0x00001A94, 0xFFFFF840, 0x00000300, 0x00001A94, 0xFFFFF840, 0x00000300 },
+ { 0x0213F0FE99181944, 0x000026CB, 0xFFFFF1FB, 0x000003E4, 0x000017CC, 0xFFFFFA25, 0x000002C8, 0x000017CC, 0xFFFFFA25, 0x000002C8 },
+ { 0x0213EA94DE0608C4, 0x00003274, 0xFFFFEA39, 0x0000050C, 0x00001B20, 0xFFFFF7C1, 0x00000314, 0x00001B20, 0xFFFFF7C1, 0x00000314 },
+ { 0x0213F0FD42D82924, 0x0000280B, 0xFFFFF283, 0x000003B5, 0x000018D0, 0xFFFFF992, 0x000002EC, 0x000018D0, 0xFFFFF992, 0x000002EC },
+ { 0x0213F0FE99062104, 0x000033AB, 0xFFFFEB1B, 0x000004C4, 0x00001FEE, 0xFFFFF53A, 0x00000378, 0x00001FEE, 0xFFFFF53A, 0x00000378 },
+ { 0x0213F0FE990A3964, 0x00002F79, 0xFFFFEB0C, 0x000004FA, 0x00001E57, 0xFFFFF4BF, 0x0000039B, 0x00001E57, 0xFFFFF4BF, 0x0000039B },
+ { 0x0213F0FE990448E4, 0x00003487, 0xFFFFE8F2, 0x00000539, 0x0000185B, 0xFFFFF9AE, 0x000002BA, 0x0000185B, 0xFFFFF9AE, 0x000002BA },
+ { 0x0213F0FE990A18A4, 0x00003500, 0xFFFFE793, 0x0000058A, 0x00001AA2, 0xFFFFF792, 0x0000031D, 0x00001AA2, 0xFFFFF792, 0x0000031D },
+ { 0x0213F0FE99081164, 0x00003943, 0xFFFFE54D, 0x000005D9, 0x00001BC8, 0xFFFFF6E0, 0x00000339, 0x00001BC8, 0xFFFFF6E0, 0x00000339 },
+ { 0x0213EA94DE0430A4, 0x0000306D, 0xFFFFEC5E, 0x000004A5, 0x00001A3A, 0xFFFFF85F, 0x00000304, 0x00001A3A, 0xFFFFF85F, 0x00000304 },
+ { 0x0213F0FD42D83084, 0x00002BA4, 0xFFFFEE8D, 0x0000046A, 0x0000198C, 0xFFFFF88E, 0x00000307, 0x0000198C, 0xFFFFF88E, 0x00000307 },
+ { 0x0213F0FD42D218E4, 0x00003D30, 0xFFFFE2F6, 0x0000062A, 0x000025DC, 0xFFFFF074, 0x00000435, 0x000025DC, 0xFFFFF074, 0x00000435 },
+ { 0x0213F0FD42D83964, 0x00002CD6, 0xFFFFED79, 0x0000049B, 0x000016D0, 0xFFFFFA53, 0x000002BB, 0x000016D0, 0xFFFFFA53, 0x000002BB },
+ { 0x0213F0FE99163164, 0x00002484, 0xFFFFF3BD, 0x000003A0, 0x000015B8, 0xFFFFFB6B, 0x000002A4, 0x000015B8, 0xFFFFFB6B, 0x000002A4 },
+ { 0x0213F0FE990E3944, 0x000038AE, 0xFFFFE6D1, 0x00000587, 0x00001A2A, 0xFFFFF8F1, 0x000002D4, 0x00001A2A, 0xFFFFF8F1, 0x000002D4 },
+ { 0x0213F0FE99044944, 0x000036FD, 0xFFFFE76C, 0x00000576, 0x00001EE4, 0xFFFFF58D, 0x00000361, 0x00001EE4, 0xFFFFF58D, 0x00000361 },
+ { 0x0213F0FD42D830A4, 0x00002BCF, 0xFFFFEF28, 0x00000448, 0x00001B93, 0xFFFFF7BA, 0x00000327, 0x00001B93, 0xFFFFF7BA, 0x00000327 },
+ { 0x0213F0FE99062884, 0x00003834, 0xFFFFE818, 0x0000053B, 0x00001AFE, 0xFFFFF85C, 0x000002F3, 0x00001AFE, 0xFFFFF85C, 0x000002F3 },
+ { 0x0213F0FE993231A4, 0x00002EF7, 0xFFFFEBFC, 0x000004CE, 0x00001897, 0xFFFFF8EF, 0x000002EC, 0x00001897, 0xFFFFF8EF, 0x000002EC },
+ { 0x0213F0FE992C18C4, 0x000035BD, 0xFFFFE8BB, 0x0000053B, 0x00001F22, 0xFFFFF561, 0x00000373, 0x00001F22, 0xFFFFF561, 0x00000373 },
+ { 0x0213F0FE99183984, 0x00002D42, 0xFFFFEE1D, 0x00000478, 0x000016F0, 0xFFFFFAAE, 0x000002B3, 0x000016F0, 0xFFFFFAAE, 0x000002B3 },
+ { 0x0213EA94DE045124, 0x00002F98, 0xFFFFEB3C, 0x000004F0, 0x00001903, 0xFFFFF818, 0x00000319, 0x00001903, 0xFFFFF818, 0x00000319 },
+ { 0x0213F0FD42D42144, 0x00004081, 0xFFFFDF13, 0x000006F3, 0x00002A6D, 0xFFFFEC1B, 0x00000509, 0x00002A6D, 0xFFFFEC1B, 0x00000509 },
+ { 0x0213EA94DE040904, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001FF6, 0xFFFFF427, 0x000003B0, 0x00001FF6, 0xFFFFF427, 0x000003B0 },
+ { 0x0213F0FE99023884, 0x00003243, 0xFFFFEA5C, 0x000004FD, 0x000020FB, 0xFFFFF39E, 0x000003C0, 0x000020FB, 0xFFFFF39E, 0x000003C0 },
+ { 0x0213F0FD42D848A4, 0x00002F20, 0xFFFFEC19, 0x000004C6, 0x00001748, 0xFFFFF99F, 0x000002DA, 0x00001748, 0xFFFFF99F, 0x000002DA },
+ { 0x0213F0FE99103984, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001A43, 0xFFFFF843, 0x000002F9, 0x00001A43, 0xFFFFF843, 0x000002F9 },
+ { 0x0213F0FE990220A4, 0x0000396E, 0xFFFFE616, 0x000005A9, 0x00001A51, 0xFFFFF850, 0x000002FA, 0x00001A51, 0xFFFFF850, 0x000002FA },
+ { 0x0213F0FE99043144, 0x0000305C, 0xFFFFED4B, 0x0000046C, 0x00001CF9, 0xFFFFF7BA, 0x00000304, 0x00001CF9, 0xFFFFF7BA, 0x00000304 },
+ { 0x0213F0FD42DA4164, 0x0000343C, 0xFFFFE869, 0x00000559, 0x00001CE2, 0xFFFFF614, 0x00000359, 0x00001CE2, 0xFFFFF614, 0x00000359 },
+ { 0x0213F0FE99183964, 0x00002782, 0xFFFFF1FE, 0x000003D9, 0x000015DC, 0xFFFFFB8B, 0x00000290, 0x000015DC, 0xFFFFFB8B, 0x00000290 },
+ { 0x0213F0FE991818C4, 0x00002B9C, 0xFFFFEF63, 0x00000443, 0x00001369, 0xFFFFFD51, 0x00000244, 0x00001369, 0xFFFFFD51, 0x00000244 },
+ { 0x0213F0FE990A2084, 0x000035F8, 0xFFFFE743, 0x00000592, 0x000018D8, 0xFFFFF8EE, 0x000002E4, 0x000018D8, 0xFFFFF8EE, 0x000002E4 },
+ { 0x0213EA94DE062844, 0x00002B72, 0xFFFFEF1E, 0x0000043C, 0x00002647, 0xFFFFF092, 0x0000043E, 0x00002647, 0xFFFFF092, 0x0000043E },
+ { 0x0213F0FE99102184, 0x00002EC9, 0xFFFFEC5F, 0x000004B8, 0x000018B6, 0xFFFFF936, 0x000002D8, 0x000018B6, 0xFFFFF936, 0x000002D8 },
+ { 0x0213F0FE99064084, 0x000038A7, 0xFFFFE6AC, 0x00000589, 0x00001C42, 0xFFFFF70B, 0x00000329, 0x00001C42, 0xFFFFF70B, 0x00000329 },
+ { 0x0213F0FE993008A4, 0x00002F6B, 0xFFFFEBF6, 0x000004CF, 0x000018AE, 0xFFFFF928, 0x000002E3, 0x000018AE, 0xFFFFF928, 0x000002E3 },
+ { 0x0213F0FD42DA5104, 0x000029CD, 0xFFFFEEE1, 0x00000459, 0x00001AB5, 0xFFFFF76F, 0x00000324, 0x00001AB5, 0xFFFFF76F, 0x00000324 },
+ { 0x0213EA94DE0638C4, 0x00003921, 0xFFFFE71D, 0x00000577, 0x00001646, 0xFFFFFB24, 0x00000293, 0x00001646, 0xFFFFFB24, 0x00000293 },
+ { 0x0213EA94DE044164, 0x00003940, 0xFFFFE521, 0x000005E8, 0x00001947, 0xFFFFF839, 0x0000030D, 0x00001947, 0xFFFFF839, 0x0000030D },
+ { 0x0213F0FD42D24164, 0x00003DCA, 0xFFFFE211, 0x00000659, 0x0000250E, 0xFFFFF072, 0x00000443, 0x0000250E, 0xFFFFF072, 0x00000443 },
+ { 0x0213F0FE990C0904, 0x00002E95, 0xFFFFEC20, 0x000004C9, 0x000015B4, 0xFFFFFAD3, 0x0000029D, 0x000015B4, 0xFFFFFAD3, 0x0000029D },
+ { 0x0213F0FE99041084, 0x00002C11, 0xFFFFEE6E, 0x00000468, 0x00001901, 0xFFFFF924, 0x000002E7, 0x00001901, 0xFFFFF924, 0x000002E7 },
+ { 0x0213EA94DE062104, 0x0000293F, 0xFFFFF158, 0x000003E6, 0x0000183F, 0xFFFFF9F6, 0x000002D2, 0x0000183F, 0xFFFFF9F6, 0x000002D2 },
+ { 0x0213F0FE990E1104, 0x00002A67, 0xFFFFEF34, 0x0000043E, 0x00001C6F, 0xFFFFF6F1, 0x0000032B, 0x00001C6F, 0xFFFFF6F1, 0x0000032B },
+ { 0x0213EA94DE065124, 0x00002F8D, 0xFFFFEB77, 0x000004DA, 0x00001C0D, 0xFFFFF627, 0x00000365, 0x00001C0D, 0xFFFFF627, 0x00000365 },
+ { 0x0213F0FE990C38C4, 0x00003476, 0xFFFFEA5B, 0x000004E7, 0x00001DBF, 0xFFFFF6C7, 0x00000333, 0x00001DBF, 0xFFFFF6C7, 0x00000333 },
+ { 0x0213F0FE990E0944, 0x00003336, 0xFFFFE92F, 0x00000546, 0x00001614, 0xFFFFFAE0, 0x00000296, 0x00001614, 0xFFFFFAE0, 0x00000296 },
+ { 0x0213F0FE99162164, 0x00002513, 0xFFFFF323, 0x000003BC, 0x000016DB, 0xFFFFFA79, 0x000002CD, 0x000016DB, 0xFFFFFA79, 0x000002CD },
+ { 0x0213F0FE990A2944, 0x000035A7, 0xFFFFE78E, 0x00000584, 0x00001B0D, 0xFFFFF77D, 0x0000031F, 0x00001B0D, 0xFFFFF77D, 0x0000031F },
+ { 0x0213F0FE993238E4, 0x00003171, 0xFFFFEB98, 0x000004C6, 0x00001C76, 0xFFFFF71F, 0x0000032F, 0x00001C76, 0xFFFFF71F, 0x0000032F },
+ { 0x0213F0FD42DA1084, 0x00002C52, 0xFFFFED2E, 0x000004A7, 0x00002182, 0xFFFFF2F4, 0x000003E4, 0x00002182, 0xFFFFF2F4, 0x000003E4 },
+ { 0x0213F0FE99102924, 0x000032E1, 0xFFFFEB39, 0x000004D0, 0x00001B55, 0xFFFFF859, 0x000002FA, 0x00001B55, 0xFFFFF859, 0x000002FA },
+ { 0x0213F0FE991848A4, 0x000029B6, 0xFFFFEFF7, 0x00000430, 0x0000151B, 0xFFFFFBC6, 0x0000027F, 0x0000151B, 0xFFFFFBC6, 0x0000027F },
+ { 0x0213F0FD42DA1964, 0x00002FF7, 0xFFFFEB67, 0x000004DA, 0x000020E9, 0xFFFFF363, 0x000003CE, 0x000020E9, 0xFFFFF363, 0x000003CE },
+ { 0x0213F0FD42DA5124, 0x00003CDD, 0xFFFFE2B2, 0x00000649, 0x00001B18, 0xFFFFF739, 0x00000329, 0x00001B18, 0xFFFFF739, 0x00000329 },
+ { 0x0213F0FE990628A4, 0x00003C82, 0xFFFFE5C6, 0x0000058E, 0x00001F3F, 0xFFFFF5AD, 0x00000361, 0x00001F3F, 0xFFFFF5AD, 0x00000361 },
+ { 0x0213F0FD42DC4084, 0x0000319B, 0xFFFFEA15, 0x0000051B, 0x00001CC9, 0xFFFFF62E, 0x00000358, 0x00001CC9, 0xFFFFF62E, 0x00000358 },
+ { 0x0213EA94DE0638E4, 0x000032B6, 0xFFFFEB2B, 0x000004D6, 0x000018E0, 0xFFFFF966, 0x000002DE, 0x000018E0, 0xFFFFF966, 0x000002DE },
+ { 0x0213EA94DE023984, 0x0000300A, 0xFFFFEBA6, 0x000004D1, 0x00001CFD, 0xFFFFF5F6, 0x0000036D, 0x00001CFD, 0xFFFFF5F6, 0x0000036D },
+ { 0x0213F0FD42D82984, 0x000026A9, 0xFFFFF15D, 0x00000400, 0x00001561, 0xFFFFFB1F, 0x000002A0, 0x00001561, 0xFFFFFB1F, 0x000002A0 },
+ { 0x0213F0FE990E5124, 0x00003123, 0xFFFFEAD2, 0x000004FA, 0x000018CB, 0xFFFFF8F5, 0x000002EC, 0x000018CB, 0xFFFFF8F5, 0x000002EC },
+ { 0x0213F0FE991840C4, 0x00003577, 0xFFFFE935, 0x00000533, 0x000016CD, 0xFFFFFB44, 0x00000289, 0x000016CD, 0xFFFFFB44, 0x00000289 },
+ { 0x0213F0FE99282184, 0x00002875, 0xFFFFF170, 0x000003F3, 0x00001567, 0xFFFFFBD5, 0x00000289, 0x00001567, 0xFFFFFBD5, 0x00000289 },
+ { 0x0213F0FE99084084, 0x00003AE2, 0xFFFFE538, 0x000005C1, 0x00001CB4, 0xFFFFF6A3, 0x0000033C, 0x00001CB4, 0xFFFFF6A3, 0x0000033C },
+ { 0x0213F0FE990C38E4, 0x000031DF, 0xFFFFEC2A, 0x000004A3, 0x00001EF0, 0xFFFFF626, 0x00000352, 0x00001EF0, 0xFFFFF626, 0x00000352 },
+ { 0x0213F0FD42D25144, 0x00004A6A, 0xFFFFDB15, 0x00000758, 0x000027F3, 0xFFFFEEEE, 0x00000479, 0x000027F3, 0xFFFFEEEE, 0x00000479 },
+ { 0x0213EA94DE063904, 0x00002BB9, 0xFFFFEF5D, 0x00000433, 0x00001589, 0xFFFFFB57, 0x00000295, 0x00001589, 0xFFFFFB57, 0x00000295 },
+ { 0x0213F0FE99042164, 0x000033A0, 0xFFFFE98F, 0x00000528, 0x00001CB4, 0xFFFFF706, 0x0000032D, 0x00001CB4, 0xFFFFF706, 0x0000032D },
+ { 0x0213F0FE99163064, 0x0000248E, 0xFFFFF380, 0x000003AC, 0x000016EA, 0xFFFFFA6C, 0x000002CE, 0x000016EA, 0xFFFFFA6C, 0x000002CE },
+ { 0x0213F0FE990221A4, 0x00002FE2, 0xFFFFEB2F, 0x000004E9, 0x00001D4E, 0xFFFFF56B, 0x00000380, 0x00001D4E, 0xFFFFF56B, 0x00000380 },
+ { 0x0213F0FE990A2884, 0x00003283, 0xFFFFE9E7, 0x0000051D, 0x00000694, 0xFFFFFD32, 0x000003C3, 0x00000694, 0xFFFFFD32, 0x000003C3 },
+ { 0x0213F0FD42D850C4, 0x00002EE4, 0xFFFFEBFD, 0x000004D3, 0x0000151A, 0xFFFFFAF6, 0x000002A4, 0x0000151A, 0xFFFFFAF6, 0x000002A4 },
+ { 0x0213F0FD42DC18E4, 0x0000302D, 0xFFFFEB7F, 0x000004DA, 0x00001E6D, 0xFFFFF54B, 0x00000380, 0x00001E6D, 0xFFFFF54B, 0x00000380 },
+ { 0x0213F0FD42DA50C4, 0x000033DA, 0xFFFFE7FB, 0x0000057F, 0x00001DED, 0xFFFFF50E, 0x0000038D, 0x00001DED, 0xFFFFF50E, 0x0000038D },
+ { 0x0213F0FE992C4084, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001C3F, 0xFFFFF726, 0x0000032A, 0x00001C3F, 0xFFFFF726, 0x0000032A },
+ { 0x0213F0FE990831C4, 0x00003BBD, 0xFFFFE55C, 0x000005B8, 0x000019DB, 0xFFFFF8BB, 0x000002EF, 0x000019DB, 0xFFFFF8BB, 0x000002EF },
+ { 0x0213F0FE990E3884, 0x00002964, 0xFFFFF051, 0x0000040E, 0x000025CD, 0xFFFFF11B, 0x0000041F, 0x000025CD, 0xFFFFF11B, 0x0000041F },
+ { 0x0213F0FD42DC4884, 0x000033F5, 0xFFFFE863, 0x00000560, 0x00001BCE, 0xFFFFF689, 0x0000034B, 0x00001BCE, 0xFFFFF689, 0x0000034B },
+ { 0x0213F0FE990A2864, 0x00003294, 0xFFFFE924, 0x00000548, 0x00001D41, 0xFFFFF580, 0x0000037D, 0x00001D41, 0xFFFFF580, 0x0000037D },
+ { 0x0213F0FD42DC39A4, 0x000034FB, 0xFFFFE7FE, 0x0000056D, 0x00001CB1, 0xFFFFF635, 0x00000357, 0x00001CB1, 0xFFFFF635, 0x00000357 },
+ { 0x0213F0FE990A10A4, 0x00002E28, 0xFFFFEBB9, 0x000004E0, 0x00001B20, 0xFFFFF6E3, 0x0000033C, 0x00001B20, 0xFFFFF6E3, 0x0000033C },
+ { 0x0213F0FD42DA1904, 0x00002799, 0xFFFFF0F4, 0x000003FC, 0x00001C9D, 0xFFFFF6A1, 0x00000345, 0x00001C9D, 0xFFFFF6A1, 0x00000345 },
+ { 0x0213F0FE99064104, 0x00003AEA, 0xFFFFE5DB, 0x0000059D, 0x00001B61, 0xFFFFF7F0, 0x00000301, 0x00001B61, 0xFFFFF7F0, 0x00000301 },
+ { 0x0213EA94DE041984, 0x000031F6, 0xFFFFEAB8, 0x000004F3, 0x00001D90, 0xFFFFF622, 0x00000359, 0x00001D90, 0xFFFFF622, 0x00000359 },
+ { 0x0213F0FE990C4064, 0x000031B8, 0xFFFFEA61, 0x0000050F, 0x0000199D, 0xFFFFF87C, 0x000002FD, 0x0000199D, 0xFFFFF87C, 0x000002FD },
+ { 0x0213F0FD42D23144, 0x00004514, 0xFFFFDDFF, 0x000006F6, 0x000022CD, 0xFFFFF29F, 0x000003D9, 0x000022CD, 0xFFFFF29F, 0x000003D9 },
+ { 0x0213EA94DE043164, 0x00002F30, 0xFFFFECB8, 0x000004A0, 0x00001B07, 0xFFFFF7E2, 0x00000313, 0x00001B07, 0xFFFFF7E2, 0x00000313 },
+ { 0x0213F0FD42DC30A4, 0x0000383B, 0xFFFFE702, 0x00000581, 0x00001A08, 0xFFFFF8CA, 0x000002E2, 0x00001A08, 0xFFFFF8CA, 0x000002E2 },
+ { 0x0213F0FE99022164, 0x00002CC5, 0xFFFFEDF8, 0x00000465, 0x00001F47, 0xFFFFF4B2, 0x00000393, 0x00001F47, 0xFFFFF4B2, 0x00000393 },
+ { 0x0213F0FE991621C4, 0x00002304, 0xFFFFF453, 0x00000384, 0x0000170A, 0xFFFFFA3F, 0x000002CE, 0x0000170A, 0xFFFFFA3F, 0x000002CE },
+ { 0x0213F0FE990A5124, 0x0000337E, 0xFFFFE850, 0x0000056E, 0x00001BDD, 0xFFFFF668, 0x00000353, 0x00001BDD, 0xFFFFF668, 0x00000353 },
+ { 0x0213F0FE990E4924, 0x00002E2F, 0xFFFFEC9B, 0x000004AE, 0x00001C4D, 0xFFFFF6D3, 0x00000338, 0x00001C4D, 0xFFFFF6D3, 0x00000338 },
+ { 0x0213EA94DE061124, 0x00002DDD, 0xFFFFEDA4, 0x00000477, 0x00002010, 0xFFFFF4BB, 0x00000390, 0x00002010, 0xFFFFF4BB, 0x00000390 },
+ { 0x0213F0FD42DA48E4, 0x0000290C, 0xFFFFEF61, 0x00000445, 0x00002133, 0xFFFFF324, 0x000003D8, 0x00002133, 0xFFFFF324, 0x000003D8 },
+ { 0x0213F0FE99062924, 0x0000371E, 0xFFFFE8D5, 0x00000524, 0x00001C3A, 0xFFFFF7AE, 0x00000314, 0x00001C3A, 0xFFFFF7AE, 0x00000314 },
+ { 0x0213F0FD42D838E4, 0x00002A58, 0xFFFFF007, 0x00000429, 0x000018A6, 0xFFFFF98F, 0x000002E1, 0x000018A6, 0xFFFFF98F, 0x000002E1 },
+ { 0x0213F0FE99023084, 0x00002FED, 0xFFFFEC48, 0x000004AA, 0x00001E9D, 0xFFFFF584, 0x00000370, 0x00001E9D, 0xFFFFF584, 0x00000370 },
+ { 0x0213F0FE99181884, 0x00002829, 0xFFFFF15F, 0x000003F7, 0x0000157E, 0xFFFFFBD4, 0x00000282, 0x0000157E, 0xFFFFFBD4, 0x00000282 },
+ { 0x0213F0FE99101924, 0x000030CF, 0xFFFFEB8D, 0x000004CE, 0x00001A4C, 0xFFFFF868, 0x000002F7, 0x00001A4C, 0xFFFFF868, 0x000002F7 },
+ { 0x0213F0FD42DA2084, 0x00002C8F, 0xFFFFEDD2, 0x0000047D, 0x00001CCE, 0xFFFFF6A1, 0x00000343, 0x00001CCE, 0xFFFFF6A1, 0x00000343 },
+ { 0x0213F0FE99182164, 0x00002A84, 0xFFFFEFBA, 0x0000043E, 0x000015EF, 0xFFFFFB4B, 0x0000029E, 0x000015EF, 0xFFFFFB4B, 0x0000029E },
+ { 0x0213F0FE990C28A4, 0x000034CA, 0xFFFFEA08, 0x000004FF, 0x00001C19, 0xFFFFF7ED, 0x00000309, 0x00001C19, 0xFFFFF7ED, 0x00000309 },
+ { 0x0213F0FE991639A4, 0x00002187, 0xFFFFF4B0, 0x0000037E, 0x0000154A, 0xFFFFFB0C, 0x000002AE, 0x0000154A, 0xFFFFFB0C, 0x000002AE },
+ { 0x0213F0FD42DA3844, 0x00002F4F, 0xFFFFEB3C, 0x000004F8, 0x0000181F, 0xFFFFF92D, 0x000002DF, 0x0000181F, 0xFFFFF92D, 0x000002DF },
+ { 0x0213F0FE990410E4, 0x0000290C, 0xFFFFF0B1, 0x000003FC, 0x00001DB0, 0xFFFFF636, 0x00000355, 0x00001DB0, 0xFFFFF636, 0x00000355 },
+ { 0x0213F0FE990A1064, 0x000034C1, 0xFFFFE888, 0x0000055A, 0x000019BF, 0xFFFFF881, 0x000002FB, 0x000019BF, 0xFFFFF881, 0x000002FB },
+ { 0x0213F0FD42DC18C4, 0x00003139, 0xFFFFEA98, 0x00000504, 0x000019F2, 0xFFFFF820, 0x0000030B, 0x000019F2, 0xFFFFF820, 0x0000030B },
+ { 0x0213F0FD42D83144, 0x00002CAC, 0xFFFFEEB2, 0x00000458, 0x0000152C, 0xFFFFFBEF, 0x0000027B, 0x0000152C, 0xFFFFFBEF, 0x0000027B },
+ { 0x0213F0FE992C38E4, 0x00003577, 0xFFFFE99C, 0x0000050D, 0x00001E64, 0xFFFFF679, 0x0000033F, 0x00001E64, 0xFFFFF679, 0x0000033F },
+ { 0x0213F0FD42DA4104, 0x0000263A, 0xFFFFF1E4, 0x000003D4, 0x00001F68, 0xFFFFF4ED, 0x00000386, 0x00001F68, 0xFFFFF4ED, 0x00000386 },
+ { 0x0213F0FD42D81984, 0x00002CE9, 0xFFFFED63, 0x00000497, 0x00001810, 0xFFFFF94D, 0x000002E3, 0x00001810, 0xFFFFF94D, 0x000002E3 },
+ { 0x0213EA94DE044104, 0x0000318A, 0xFFFFEAC8, 0x000004F5, 0x0000195C, 0xFFFFF896, 0x000002FB, 0x0000195C, 0xFFFFF896, 0x000002FB },
+ { 0x0213F0FD42D83904, 0x00002C41, 0xFFFFEEC6, 0x0000045D, 0x000017DD, 0xFFFFFA16, 0x000002CB, 0x000017DD, 0xFFFFFA16, 0x000002CB },
+ { 0x0213F0FE990231A4, 0x00002DD4, 0xFFFFEC98, 0x000004AD, 0x00001BD7, 0xFFFFF69F, 0x00000347, 0x00001BD7, 0xFFFFF69F, 0x00000347 },
+ { 0x0213F0FD42DA3944, 0x00003351, 0xFFFFE9B2, 0x0000051A, 0x00001CA1, 0xFFFFF6A4, 0x00000341, 0x00001CA1, 0xFFFFF6A4, 0x00000341 },
+ { 0x0213F0FE99021104, 0x0000322D, 0xFFFFE9BE, 0x00000527, 0x00001CF9, 0xFFFFF5EB, 0x00000366, 0x00001CF9, 0xFFFFF5EB, 0x00000366 },
+ { 0x0213F0FE990C28C4, 0x00003678, 0xFFFFE9A8, 0x00000503, 0x00001AD4, 0xFFFFF8F6, 0x000002E3, 0x00001AD4, 0xFFFFF8F6, 0x000002E3 },
+ { 0x0213F0FE99161924, 0x0000260E, 0xFFFFF2C1, 0x000003CA, 0x00001139, 0xFFFFFE48, 0x00000236, 0x00001139, 0xFFFFFE48, 0x00000236 },
+ { 0x0213F0FE990A2164, 0x000033D3, 0xFFFFE872, 0x00000565, 0x00001B72, 0xFFFFF713, 0x00000332, 0x00001B72, 0xFFFFF713, 0x00000332 },
+ { 0x0213F0FE99323844, 0x0000309B, 0xFFFFEB42, 0x000004E4, 0x00001918, 0xFFFFF8C8, 0x000002F2, 0x00001918, 0xFFFFF8C8, 0x000002F2 },
+ { 0x0213F0FE99182864, 0x000028B8, 0xFFFFF105, 0x00000402, 0x000018BB, 0xFFFFF9BC, 0x000002D3, 0x000018BB, 0xFFFFF9BC, 0x000002D3 },
+ { 0x0213F0FE990A1884, 0x00003123, 0xFFFFE9D1, 0x00000534, 0x00001B19, 0xFFFFF6FE, 0x0000033C, 0x00001B19, 0xFFFFF6FE, 0x0000033C },
+ { 0x0213F0FE99022144, 0x00003216, 0xFFFFEA8E, 0x000004F6, 0x00001F72, 0xFFFFF4CE, 0x0000038B, 0x00001F72, 0xFFFFF4CE, 0x0000038B },
+ { 0x0213F0FE99162964, 0x00002564, 0xFFFFF32D, 0x000003B6, 0x00001685, 0xFFFFFADB, 0x000002BB, 0x00001685, 0xFFFFFADB, 0x000002BB },
+ { 0x0213F0FD42DA2924, 0x00002E60, 0xFFFFED13, 0x00000497, 0x00001CA5, 0xFFFFF6B9, 0x00000346, 0x00001CA5, 0xFFFFF6B9, 0x00000346 },
+ { 0x0213F0FE990E39A4, 0x0000336D, 0xFFFFE934, 0x0000053B, 0x00001B3E, 0xFFFFF763, 0x00000327, 0x00001B3E, 0xFFFFF763, 0x00000327 },
+ { 0x0213F0FE99101084, 0x0000274A, 0xFFFFF119, 0x000003FA, 0x00001D75, 0xFFFFF5CD, 0x0000036F, 0x00001D75, 0xFFFFF5CD, 0x0000036F },
+ { 0x0213F0FD42DA2164, 0x0000366B, 0xFFFFE70A, 0x0000059A, 0x00001ED8, 0xFFFFF501, 0x00000389, 0x00001ED8, 0xFFFFF501, 0x00000389 },
+ { 0x0213F0FE99223964, 0x00003164, 0xFFFFEAB4, 0x000004FA, 0x00001C52, 0xFFFFF6E0, 0x00000336, 0x00001C52, 0xFFFFF6E0, 0x00000336 },
+ { 0x0213F0FD42D23064, 0x00004224, 0xFFFFDF7F, 0x000006C1, 0x00002A52, 0xFFFFED5E, 0x000004BB, 0x00002A52, 0xFFFFED5E, 0x000004BB },
+ { 0x0213F0FE99102864, 0x000030E3, 0xFFFFEB07, 0x000004ED, 0x00001FD3, 0xFFFFF46D, 0x000003A1, 0x00001FD3, 0xFFFFF46D, 0x000003A1 },
+ { 0x0213F0FD42D82884, 0x00002AEB, 0xFFFFEF1B, 0x00000454, 0x00001829, 0xFFFFF995, 0x000002DD, 0x00001829, 0xFFFFF995, 0x000002DD },
+ { 0x0213F0FD42DC50E4, 0x0000346B, 0xFFFFE7A2, 0x0000058B, 0x000020C5, 0xFFFFF2E8, 0x000003EC, 0x000020C5, 0xFFFFF2E8, 0x000003EC },
+ { 0x0213F0FD42DC4164, 0x000039CF, 0xFFFFE5D7, 0x000005A9, 0x00001D66, 0xFFFFF5D6, 0x00000366, 0x00001D66, 0xFFFFF5D6, 0x00000366 },
+ { 0x0213F0FE990418E4, 0x000034AC, 0xFFFFE9AE, 0x00000515, 0x00001A28, 0xFFFFF904, 0x000002DC, 0x00001A28, 0xFFFFF904, 0x000002DC },
+ { 0x0213F0FD42DC2084, 0x00002D68, 0xFFFFED21, 0x00000498, 0x00001C6F, 0xFFFFF686, 0x0000034C, 0x00001C6F, 0xFFFFF686, 0x0000034C },
+ { 0x0213F0FE990820C4, 0x0000328B, 0xFFFFEBA1, 0x000004B4, 0x00001DA3, 0xFFFFF683, 0x00000349, 0x00001DA3, 0xFFFFF683, 0x00000349 },
+ { 0x0213F0FE991828C4, 0x000027DC, 0xFFFFF295, 0x000003BF, 0x000019C1, 0xFFFFF98E, 0x000002E8, 0x000019C1, 0xFFFFF98E, 0x000002E8 },
+ { 0x0213F0FE99184084, 0x00002756, 0xFFFFF1D7, 0x000003DF, 0x000015D9, 0xFFFFFB51, 0x00000298, 0x000015D9, 0xFFFFFB51, 0x00000298 },
+ { 0x0213F0FE99083884, 0x00003526, 0xFFFFE907, 0x00000526, 0x000017AB, 0xFFFFFA12, 0x000002AB, 0x000017AB, 0xFFFFFA12, 0x000002AB },
+ { 0x0213F0FD42DA18E4, 0x0000351B, 0xFFFFE8B7, 0x00000540, 0x00001A86, 0xFFFFF821, 0x00000303, 0x00001A86, 0xFFFFF821, 0x00000303 },
+ { 0x0213F0FE99164144, 0x000024B2, 0xFFFFF34E, 0x000003B1, 0x000018E2, 0xFFFFF926, 0x000002FC, 0x000018E2, 0xFFFFF926, 0x000002FC },
+ { 0x0213F0FD42D828A4, 0x00002F36, 0xFFFFED5D, 0x00000486, 0x0000157A, 0xFFFFFB85, 0x00000293, 0x0000157A, 0xFFFFFB85, 0x00000293 },
+ { 0x0213F0FD42DC50C4, 0x00003A6E, 0xFFFFE456, 0x000005FD, 0x00001F68, 0xFFFFF3D1, 0x000003C3, 0x00001F68, 0xFFFFF3D1, 0x000003C3 },
+ { 0x0213F0FE990A31A4, 0x00002BC3, 0xFFFFED2D, 0x000004A7, 0x00001C3F, 0xFFFFF609, 0x00000364, 0x00001C3F, 0xFFFFF609, 0x00000364 },
+ { 0x0213F0FE990E2084, 0x000032E1, 0xFFFFEA83, 0x000004F6, 0x00001B37, 0xFFFFF842, 0x000002F5, 0x00001B37, 0xFFFFF842, 0x000002F5 },
+ { 0x0213F0FD42D83184, 0x000028E3, 0xFFFFF07F, 0x00000412, 0x00001676, 0xFFFFFA68, 0x000002BE, 0x00001676, 0xFFFFFA68, 0x000002BE },
+ { 0x0213F0FD42D21104, 0x0000444C, 0xFFFFDDAD, 0x00000712, 0x00002634, 0xFFFFEF89, 0x0000046C, 0x00002634, 0xFFFFEF89, 0x0000046C },
+ { 0x0213F0FE990418C4, 0x00003121, 0xFFFFEBBB, 0x000004C6, 0x00001C98, 0xFFFFF72B, 0x0000032D, 0x00001C98, 0xFFFFF72B, 0x0000032D },
+ { 0x0213F0FD42D840A4, 0x00002C31, 0xFFFFEDC4, 0x00000490, 0x0000162D, 0xFFFFFA8E, 0x000002B4, 0x0000162D, 0xFFFFFA8E, 0x000002B4 },
+ { 0x0213F0FD42DA18C4, 0x00002749, 0xFFFFF112, 0x000003FC, 0x00001C85, 0xFFFFF6B8, 0x00000342, 0x00001C85, 0xFFFFF6B8, 0x00000342 },
+ { 0x0213F0FE99044104, 0x00003159, 0xFFFFEB99, 0x000004C2, 0x00001BD0, 0xFFFFF7CA, 0x00000307, 0x00001BD0, 0xFFFFF7CA, 0x00000307 },
+ { 0x0213F0FE99164164, 0x00002610, 0xFFFFF1FD, 0x000003EC, 0x000016BE, 0xFFFFFA53, 0x000002CB, 0x000016BE, 0xFFFFFA53, 0x000002CB },
+ { 0x0213F0FE99023184, 0x000037B5, 0xFFFFE63D, 0x000005B5, 0x00002285, 0xFFFFF25D, 0x000003F7, 0x00002285, 0xFFFFF25D, 0x000003F7 },
+ { 0x0213F0FE990A28A4, 0x00002FEE, 0xFFFFEB47, 0x000004EF, 0x00001CBE, 0xFFFFF64E, 0x00000358, 0x00001CBE, 0xFFFFF64E, 0x00000358 },
+ { 0x0213F0FE99105104, 0x00002E90, 0xFFFFEC48, 0x000004C0, 0x00001A47, 0xFFFFF7D1, 0x0000031A, 0x00001A47, 0xFFFFF7D1, 0x0000031A },
+ { 0x0213F0FD42DA4084, 0x000034AB, 0xFFFFE84A, 0x00000559, 0x00001A72, 0xFFFFF79A, 0x0000031C, 0x00001A72, 0xFFFFF79A, 0x0000031C },
+ { 0x0213F0FE99183884, 0x00002F7B, 0xFFFFECFC, 0x0000049C, 0x00001814, 0xFFFFFA22, 0x000002C2, 0x00001814, 0xFFFFFA22, 0x000002C2 },
+ { 0x0213F0FE99021964, 0x00003618, 0xFFFFE709, 0x00000596, 0x00001EBF, 0xFFFFF482, 0x000003A5, 0x00001EBF, 0xFFFFF482, 0x000003A5 },
+ { 0x0213EA94DE024904, 0x0000341B, 0xFFFFE8B2, 0x0000054F, 0x00001D26, 0xFFFFF578, 0x00000388, 0x00001D26, 0xFFFFF578, 0x00000388 },
+ { 0x0213F0FE99102144, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x000019C0, 0xFFFFF8CC, 0x000002E6, 0x000019C0, 0xFFFFF8CC, 0x000002E6 },
+ { 0x0213F0FE992841A4, 0x00002B76, 0xFFFFEF6C, 0x00000444, 0x00001563, 0xFFFFFBBE, 0x0000028D, 0x00001563, 0xFFFFFBBE, 0x0000028D },
+ { 0x0213F0FD42D81864, 0x00002BA2, 0xFFFFEE31, 0x0000047F, 0x00001A3D, 0xFFFFF7F3, 0x00000320, 0x00001A3D, 0xFFFFF7F3, 0x00000320 },
+ { 0x0213F0FE992C48E4, 0x00003545, 0xFFFFE87A, 0x0000054A, 0x00001B5A, 0xFFFFF7B0, 0x0000030C, 0x00001B5A, 0xFFFFF7B0, 0x0000030C },
+ { 0x0213EA94DE042944, 0x00003879, 0xFFFFE73F, 0x00000578, 0x00001649, 0xFFFFFB57, 0x00000283, 0x00001649, 0xFFFFFB57, 0x00000283 },
+ { 0x0213F0FD42D840C4, 0x00002772, 0xFFFFF0F1, 0x00000410, 0x0000142F, 0xFFFFFBCF, 0x00000287, 0x0000142F, 0xFFFFFBCF, 0x00000287 },
+ { 0x0213F0FD42DA3184, 0x00003228, 0xFFFFE98E, 0x00000535, 0x00001F48, 0xFFFFF495, 0x00000399, 0x00001F48, 0xFFFFF495, 0x00000399 },
+ { 0x0213F0FE990E40E4, 0x00002887, 0xFFFFF119, 0x000003E8, 0x000021AA, 0xFFFFF3F5, 0x000003A5, 0x000021AA, 0xFFFFF3F5, 0x000003A5 },
+ { 0x0213F0FD42DA28A4, 0x0000301F, 0xFFFFEBB2, 0x000004D2, 0x00001C02, 0xFFFFF736, 0x0000032B, 0x00001C02, 0xFFFFF736, 0x0000032B },
+ { 0x0213F0FE991820A4, 0x00002E13, 0xFFFFEE3F, 0x00000468, 0x000016AC, 0xFFFFFB32, 0x0000029E, 0x000016AC, 0xFFFFFB32, 0x0000029E },
+ { 0x0213F0FE99044924, 0x00003478, 0xFFFFE8F9, 0x00000538, 0x00001DAB, 0xFFFFF645, 0x00000345, 0x00001DAB, 0xFFFFF645, 0x00000345 },
+ { 0x0213F0FE990608C4, 0x000030C6, 0xFFFFEB6C, 0x000004D4, 0x0000184A, 0xFFFFF934, 0x000002E1, 0x0000184A, 0xFFFFF934, 0x000002E1 },
+ { 0x0213F0FE990A2044, 0x00002F1B, 0xFFFFEBD3, 0x000004D3, 0x000019E7, 0xFFFFF813, 0x0000030D, 0x000019E7, 0xFFFFF813, 0x0000030D },
+ { 0x0213F0FE99023904, 0x00003214, 0xFFFFEAE9, 0x000004E0, 0x0000178F, 0xFFFFFA1C, 0x000002B1, 0x0000178F, 0xFFFFFA1C, 0x000002B1 },
+ { 0x0213F0FD42DC3144, 0x0000399C, 0xFFFFE738, 0x0000055E, 0x00001EA1, 0xFFFFF5E7, 0x0000035A, 0x00001EA1, 0xFFFFF5E7, 0x0000035A },
+ { 0x0213F0FE990650C4, 0x00003A01, 0xFFFFE5B2, 0x000005B6, 0x00001D95, 0xFFFFF5D2, 0x0000036A, 0x00001D95, 0xFFFFF5D2, 0x0000036A },
+ { 0x0213F0FE99043884, 0x0000310D, 0xFFFFEB78, 0x000004D0, 0x00001C06, 0xFFFFF76E, 0x0000031A, 0x00001C06, 0xFFFFF76E, 0x0000031A },
+ { 0x0213F0FE99063864, 0x00003CD1, 0xFFFFE42F, 0x000005EB, 0x00001933, 0xFFFFF91F, 0x000002D4, 0x00001933, 0xFFFFF91F, 0x000002D4 },
+ { 0x0213F0FD42DA3164, 0x00003119, 0xFFFFEB1B, 0x000004E1, 0x00001FC7, 0xFFFFF46A, 0x000003A2, 0x00001FC7, 0xFFFFF46A, 0x000003A2 },
+ { 0x0213EA94DE0648A4, 0x0000390D, 0xFFFFE566, 0x000005D8, 0x00001EC6, 0xFFFFF4DC, 0x00000391, 0x00001EC6, 0xFFFFF4DC, 0x00000391 },
+ { 0x0213F0FD42DA10C4, 0x00003446, 0xFFFFE858, 0x00000561, 0x00001FDB, 0xFFFFF3FF, 0x000003B9, 0x00001FDB, 0xFFFFF3FF, 0x000003B9 },
+ { 0x0213F0FE99044904, 0x000032BA, 0xFFFFEA07, 0x00000511, 0x00001B25, 0xFFFFF7C9, 0x0000030D, 0x00001B25, 0xFFFFF7C9, 0x0000030D },
+ { 0x0213F0FE990E1864, 0x00002CCF, 0xFFFFEDE5, 0x00000478, 0x00001BC8, 0xFFFFF761, 0x00000326, 0x00001BC8, 0xFFFFF761, 0x00000326 },
+ { 0x0213F0FE99062984, 0x0000400E, 0xFFFFE1CB, 0x00000652, 0x00001AF8, 0xFFFFF7B9, 0x00000312, 0x00001AF8, 0xFFFFF7B9, 0x00000312 },
+ { 0x0213F0FE990408E4, 0x00002F24, 0xFFFFEC2A, 0x000004C7, 0x00001B94, 0xFFFFF748, 0x00000333, 0x00001B94, 0xFFFFF748, 0x00000333 },
+ { 0x0213F0FD42D21924, 0x00003FDA, 0xFFFFE1C1, 0x0000064B, 0x00002427, 0xFFFFF180, 0x0000040C, 0x00002427, 0xFFFFF180, 0x0000040C },
+ { 0x0213F0FE990A18C4, 0x00002F6B, 0xFFFFEBA7, 0x000004DD, 0x00001C25, 0xFFFFF6C1, 0x00000344, 0x00001C25, 0xFFFFF6C1, 0x00000344 },
+ { 0x0213F0FE99182104, 0x00002A53, 0xFFFFF0EE, 0x00000402, 0x000017C6, 0xFFFFFAA0, 0x000002BF, 0x000017C6, 0xFFFFFAA0, 0x000002BF },
+ { 0x0213F0FE99105144, 0x000031F4, 0xFFFFEA34, 0x00000517, 0x000016FF, 0xFFFFFA4E, 0x000002AC, 0x000016FF, 0xFFFFFA4E, 0x000002AC },
+ { 0x0213F0FE99322144, 0x00002E24, 0xFFFFED46, 0x00000489, 0x00001712, 0xFFFFFA5D, 0x000002AC, 0x00001712, 0xFFFFFA5D, 0x000002AC },
+ { 0x0213F0FE99182824, 0x000028CD, 0xFFFFF0E3, 0x0000040E, 0x00001606, 0xFFFFFB37, 0x000002A4, 0x00001606, 0xFFFFFB37, 0x000002A4 },
+ { 0x0213F0FE990220C4, 0x00003184, 0xFFFFEB88, 0x000004C3, 0x000018DA, 0xFFFFF939, 0x000002DB, 0x000018DA, 0xFFFFF939, 0x000002DB },
+ { 0x0213F0FE99162124, 0x0000239B, 0xFFFFF470, 0x00000386, 0x00001714, 0xFFFFFA9F, 0x000002C8, 0x00001714, 0xFFFFFA9F, 0x000002C8 },
+ { 0x0213F0FD42DC38E4, 0x00003641, 0xFFFFE92B, 0x00000515, 0x00001BE2, 0xFFFFF795, 0x0000031B, 0x00001BE2, 0xFFFFF795, 0x0000031B },
+ { 0x0213F0FE992C1144, 0x00003278, 0xFFFFEA17, 0x00000510, 0x00001B71, 0xFFFFF778, 0x0000031D, 0x00001B71, 0xFFFFF778, 0x0000031D },
+ { 0x0213F0FE99062844, 0x000035B9, 0xFFFFE8DA, 0x0000052D, 0x00001A6A, 0xFFFFF83B, 0x000002FF, 0x00001A6A, 0xFFFFF83B, 0x000002FF },
+ { 0x0213F0FE990E18C4, 0x00002E5E, 0xFFFFED32, 0x0000048B, 0x00001E7D, 0xFFFFF60E, 0x0000034E, 0x00001E7D, 0xFFFFF60E, 0x0000034E },
+ { 0x0213F0FE991019A4, 0x00003178, 0xFFFFEA52, 0x00000513, 0x00001AD0, 0xFFFFF793, 0x0000031F, 0x00001AD0, 0xFFFFF793, 0x0000031F },
+ { 0x0213F0FD42D44104, 0x00003A2C, 0xFFFFE346, 0x00000641, 0x000023D0, 0xFFFFF0CE, 0x00000433, 0x000023D0, 0xFFFFF0CE, 0x00000433 },
+ { 0x0213F0FD42D818C4, 0x000028FD, 0xFFFFF02A, 0x0000042B, 0x0000152B, 0xFFFFFB90, 0x00000289, 0x0000152B, 0xFFFFFB90, 0x00000289 },
+ { 0x0213F0FE990E3084, 0x000030DE, 0xFFFFEBDF, 0x000004BE, 0x00001CDC, 0xFFFFF747, 0x0000031C, 0x00001CDC, 0xFFFFF747, 0x0000031C },
+ { 0x0213F0FE99021944, 0x000036CB, 0xFFFFE6EE, 0x00000596, 0x00002096, 0xFFFFF3C2, 0x000003BB, 0x00002096, 0xFFFFF3C2, 0x000003BB },
+ { 0x0213F0FE990C48C4, 0x00003172, 0xFFFFEAC1, 0x000004F4, 0x00001C87, 0xFFFFF6CD, 0x00000337, 0x00001C87, 0xFFFFF6CD, 0x00000337 },
+ { 0x0213F0FD42D24864, 0x00004A18, 0xFFFFDB34, 0x00000758, 0x0000213C, 0xFFFFF3A2, 0x000003AC, 0x0000213C, 0xFFFFF3A2, 0x000003AC },
+ { 0x0213F0FE99022104, 0x000031F3, 0xFFFFEB73, 0x000004C6, 0x00001B23, 0xFFFFF7CB, 0x0000031A, 0x00001B23, 0xFFFFF7CB, 0x0000031A },
+ { 0x0213F0FE990A2924, 0x000031C0, 0xFFFFEABA, 0x000004F7, 0x00001A5A, 0xFFFFF845, 0x000002FF, 0x00001A5A, 0xFFFFF845, 0x000002FF },
+ { 0x0213F0FE99104944, 0x00003B77, 0xFFFFE3B3, 0x00000623, 0x00001BCA, 0xFFFFF6F8, 0x00000333, 0x00001BCA, 0xFFFFF6F8, 0x00000333 },
+ { 0x0213F0FE990A3944, 0x000035AF, 0xFFFFE76D, 0x00000588, 0x00001C16, 0xFFFFF6AB, 0x00000341, 0x00001C16, 0xFFFFF6AB, 0x00000341 },
+ { 0x0213EA94DE0438C4, 0x000032AD, 0xFFFFEA8E, 0x000004F8, 0x00001A3A, 0xFFFFF832, 0x0000030E, 0x00001A3A, 0xFFFFF832, 0x0000030E },
+ { 0x0213F0FE99104884, 0x00002E92, 0xFFFFEBD2, 0x000004DA, 0x00001E04, 0xFFFFF51E, 0x0000038A, 0x00001E04, 0xFFFFF51E, 0x0000038A },
+ { 0x0213F0FD42D440A4, 0x00003E57, 0xFFFFE0F7, 0x0000068F, 0x000021F1, 0xFFFFF1C6, 0x00000411, 0x000021F1, 0xFFFFF1C6, 0x00000411 },
+ { 0x0213F0FE990821A4, 0x00003598, 0xFFFFE8BB, 0x00000535, 0x00001B62, 0xFFFFF764, 0x00000326, 0x00001B62, 0xFFFFF764, 0x00000326 },
+ { 0x0213F0FE990A3884, 0x00002B15, 0xFFFFEDEC, 0x00000487, 0x00001E8B, 0xFFFFF4AB, 0x0000039F, 0x00001E8B, 0xFFFFF4AB, 0x0000039F },
+ { 0x0213EA94DE060904, 0x0000267E, 0xFFFFF1A7, 0x000003E1, 0x000021C1, 0xFFFFF2E9, 0x000003EA, 0x000021C1, 0xFFFFF2E9, 0x000003EA },
+ { 0x0213EA94DE0239A4, 0x00002ED7, 0xFFFFEC88, 0x000004A6, 0x00001DEC, 0xFFFFF57C, 0x00000378, 0x00001DEC, 0xFFFFF57C, 0x00000378 },
+ { 0x0213EA94DE0441A4, 0x00003365, 0xFFFFE946, 0x00000536, 0x000019E9, 0xFFFFF7E0, 0x0000031D, 0x000019E9, 0xFFFFF7E0, 0x0000031D },
+ { 0x0213F0FE991818E4, 0x000029A4, 0xFFFFF0FD, 0x000003FE, 0x0000163F, 0xFFFFFB68, 0x00000299, 0x0000163F, 0xFFFFFB68, 0x00000299 },
+ { 0x0213EA94DE021904, 0x0000348D, 0xFFFFE9F7, 0x00000509, 0x000017A0, 0xFFFFFA59, 0x000002B6, 0x000017A0, 0xFFFFFA59, 0x000002B6 },
+ { 0x0213F0FE990610C4, 0x00003144, 0xFFFFEB23, 0x000004D9, 0x00001C9B, 0xFFFFF664, 0x00000351, 0x00001C9B, 0xFFFFF664, 0x00000351 },
+ { 0x0213EA94DE0620E4, 0x00002E95, 0xFFFFEE1A, 0x00000463, 0x00001707, 0xFFFFFAB7, 0x000002B3, 0x00001707, 0xFFFFFAB7, 0x000002B3 },
+ { 0x0213F0FD42D41864, 0x0000489C, 0xFFFFDA43, 0x000007AC, 0x00002866, 0xFFFFED6B, 0x000004D0, 0x00002866, 0xFFFFED6B, 0x000004D0 },
+ { 0x0213F0FE99161844, 0x00002895, 0xFFFFF10A, 0x0000040A, 0x000013E9, 0xFFFFFC9F, 0x0000026E, 0x000013E9, 0xFFFFFC9F, 0x0000026E },
+ { 0x0213F0FE99061964, 0x000033A0, 0xFFFFE9B1, 0x00000510, 0x00001D96, 0xFFFFF5AE, 0x0000036F, 0x00001D96, 0xFFFFF5AE, 0x0000036F },
+ { 0x0213F0FE99083984, 0x0000327C, 0xFFFFEAEA, 0x000004DD, 0x00001D45, 0xFFFFF649, 0x00000356, 0x00001D45, 0xFFFFF649, 0x00000356 },
+ { 0x0213EA94DE0248A4, 0x000031DF, 0xFFFFE9AB, 0x0000052F, 0x000019C8, 0xFFFFF7B7, 0x00000321, 0x000019C8, 0xFFFFF7B7, 0x00000321 },
+ { 0x0213F0FE991640A4, 0x00002BCC, 0xFFFFEEF4, 0x0000045C, 0x000015CD, 0xFFFFFB58, 0x0000029E, 0x000015CD, 0xFFFFFB58, 0x0000029E },
+ { 0x0213F0FE990638E4, 0x00003534, 0xFFFFEA10, 0x000004EB, 0x00001BB6, 0xFFFFF7B9, 0x00000314, 0x00001BB6, 0xFFFFF7B9, 0x00000314 },
+ { 0x0213F0FE99041984, 0x00002F4F, 0xFFFFEC35, 0x000004B9, 0x0000205D, 0xFFFFF47F, 0x00000392, 0x0000205D, 0xFFFFF47F, 0x00000392 },
+ { 0x0213F0FE990C20A4, 0x00003295, 0xFFFFEB1C, 0x000004D6, 0x000019C1, 0xFFFFF931, 0x000002D5, 0x000019C1, 0xFFFFF931, 0x000002D5 },
+ { 0x0213F0FE99024144, 0x00003557, 0xFFFFE7F7, 0x00000568, 0x00002342, 0xFFFFF1F9, 0x00000405, 0x00002342, 0xFFFFF1F9, 0x00000405 },
+ { 0x0213F0FE990450C4, 0x00003487, 0xFFFFE872, 0x0000055D, 0x000019D7, 0xFFFFF823, 0x0000030C, 0x000019D7, 0xFFFFF823, 0x0000030C },
+ { 0x0213F0FE992C3944, 0x0000378F, 0xFFFFE7A6, 0x00000566, 0x00001875, 0xFFFFFA04, 0x000002AF, 0x00001875, 0xFFFFFA04, 0x000002AF },
+ { 0x0213EA94DE0230E4, 0x00002A67, 0xFFFFF157, 0x000003DD, 0x000017BD, 0xFFFFFA53, 0x000002D1, 0x000017BD, 0xFFFFFA53, 0x000002D1 },
+ { 0x0213F0FD42D220E4, 0x000030B5, 0xFFFFEB32, 0x000004D9, 0x00002129, 0xFFFFF38A, 0x000003BB, 0x00002129, 0xFFFFF38A, 0x000003BB },
+ { 0x0213F0FE990610A4, 0x00003786, 0xFFFFE703, 0x00000584, 0x00001D63, 0xFFFFF5DC, 0x00000367, 0x00001D63, 0xFFFFF5DC, 0x00000367 },
+ { 0x0213F0FD42DA20C4, 0x0000346A, 0xFFFFE93E, 0x0000052C, 0x00001B27, 0xFFFFF79D, 0x0000031F, 0x00001B27, 0xFFFFF79D, 0x0000031F },
+ { 0x0213F0FE990E3024, 0x0000294E, 0xFFFFF0A5, 0x00000409, 0x00001928, 0xFFFFF93B, 0x000002E6, 0x00001928, 0xFFFFF93B, 0x000002E6 },
+ { 0x0213F0FD42D410C4, 0x00003E09, 0xFFFFE0FF, 0x00000694, 0x000025A0, 0xFFFFEF0F, 0x0000048F, 0x000025A0, 0xFFFFEF0F, 0x0000048F },
+ { 0x0213F0FE990A2964, 0x00003197, 0xFFFFEA06, 0x00000520, 0x00001B42, 0xFFFFF73B, 0x0000032A, 0x00001B42, 0xFFFFF73B, 0x0000032A },
+ { 0x0213F0FE99161864, 0x000022CB, 0xFFFFF3FC, 0x000003A3, 0x00001449, 0xFFFFFBD0, 0x00000297, 0x00001449, 0xFFFFFBD0, 0x00000297 },
+ { 0x0213F0FD42D82944, 0x00002A79, 0xFFFFEFD2, 0x00000433, 0x00001585, 0xFFFFFB92, 0x0000028E, 0x00001585, 0xFFFFFB92, 0x0000028E },
+ { 0x0213F0FE990C4184, 0x00003249, 0xFFFFEA92, 0x000004F4, 0x000019CB, 0xFFFFF8CF, 0x000002E1, 0x000019CB, 0xFFFFF8CF, 0x000002E1 },
+ { 0x0213EA94DE0218A4, 0x00002CEA, 0xFFFFEE46, 0x00000463, 0x00001A5E, 0xFFFFF83C, 0x0000030D, 0x00001A5E, 0xFFFFF83C, 0x0000030D },
+ { 0x0213F0FD42DC5144, 0x00003AE2, 0xFFFFE422, 0x00000600, 0x00001C65, 0xFFFFF62F, 0x0000034B, 0x00001C65, 0xFFFFF62F, 0x0000034B },
+ { 0x0213F0FE99181184, 0x000026A0, 0xFFFFF1C2, 0x000003F8, 0x000010E5, 0xFFFFFE56, 0x0000022A, 0x000010E5, 0xFFFFFE56, 0x0000022A },
+ { 0x0213F0FE992829A4, 0x00002A7B, 0xFFFFF063, 0x00000417, 0x000016FC, 0xFFFFFAD7, 0x000002B1, 0x000016FC, 0xFFFFFAD7, 0x000002B1 },
+ { 0x0213F0FE993210C4, 0x00003092, 0xFFFFEAB9, 0x00000507, 0x00001AE3, 0xFFFFF783, 0x00000323, 0x00001AE3, 0xFFFFF783, 0x00000323 },
+ { 0x0213F0FE990438E4, 0x00003265, 0xFFFFEBE8, 0x000004AA, 0x00001D65, 0xFFFFF73F, 0x00000321, 0x00001D65, 0xFFFFF73F, 0x00000321 },
+ { 0x0213EA94DE023084, 0x00002F14, 0xFFFFECC2, 0x000004A4, 0x00001A8D, 0xFFFFF7F3, 0x0000031D, 0x00001A8D, 0xFFFFF7F3, 0x0000031D },
+ { 0x0213F0FD42DC10E4, 0x000035FB, 0xFFFFE6D3, 0x000005AC, 0x00001B19, 0xFFFFF712, 0x00000338, 0x00001B19, 0xFFFFF712, 0x00000338 },
+ { 0x0213F0FD42DA2124, 0x00003519, 0xFFFFE8CC, 0x0000053A, 0x00001A0F, 0xFFFFF86E, 0x000002F5, 0x00001A0F, 0xFFFFF86E, 0x000002F5 },
+ { 0x0213F0FE992C2144, 0x0000364C, 0xFFFFE879, 0x00000541, 0x00001A42, 0xFFFFF8BA, 0x000002E2, 0x00001A42, 0xFFFFF8BA, 0x000002E2 },
+ { 0x0213EA94DE0218C4, 0x000029BA, 0xFFFFF09A, 0x00000408, 0x00001986, 0xFFFFF8D9, 0x000002FE, 0x00001986, 0xFFFFF8D9, 0x000002FE },
+ { 0x0213F0FD42DA38E4, 0x00003507, 0xFFFFE961, 0x00000518, 0x00001B79, 0xFFFFF775, 0x00000325, 0x00001B79, 0xFFFFF775, 0x00000325 },
+ { 0x0213F0FD42DC3184, 0x00003AD5, 0xFFFFE415, 0x00000613, 0x00001CB4, 0xFFFFF66D, 0x00000348, 0x00001CB4, 0xFFFFF66D, 0x00000348 },
+ { 0x0213F0FE991640E4, 0x000023D1, 0xFFFFF42B, 0x0000038F, 0x00001546, 0xFFFFFBA0, 0x0000029F, 0x00001546, 0xFFFFFBA0, 0x0000029F },
+ { 0x0213F0FE990A1924, 0x0000399E, 0xFFFFE518, 0x000005E7, 0x00001990, 0xFFFFF871, 0x000002FB, 0x00001990, 0xFFFFF871, 0x000002FB },
+ { 0x0213F0FD42D82964, 0x00002EDE, 0xFFFFEC93, 0x000004B8, 0x0000152C, 0xFFFFFBB3, 0x0000027E, 0x0000152C, 0xFFFFFBB3, 0x0000027E },
+ { 0x0213EA94DE042964, 0x00003140, 0xFFFFEBC9, 0x000004BB, 0x000016BE, 0xFFFFFB0A, 0x00000288, 0x000016BE, 0xFFFFFB0A, 0x00000288 },
+ { 0x0213F0FE99064064, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x0000185D, 0xFFFFF95A, 0x000002D9, 0x0000185D, 0xFFFFF95A, 0x000002D9 },
+ { 0x0213F0FE99023844, 0x0000389C, 0xFFFFE65A, 0x000005A2, 0x0000195D, 0xFFFFF8C8, 0x000002E8, 0x0000195D, 0xFFFFF8C8, 0x000002E8 },
+ { 0x0213F0FE99042104, 0x0000362B, 0xFFFFE9EC, 0x000004F6, 0x00001605, 0xFFFFFC1C, 0x00000263, 0x00001605, 0xFFFFFC1C, 0x00000263 },
+ { 0x0213F0FE992A1964, 0x00002946, 0xFFFFF04F, 0x00000426, 0x000015BA, 0xFFFFFB2F, 0x000002A3, 0x000015BA, 0xFFFFFB2F, 0x000002A3 },
+ { 0x0213F0FE99082184, 0x0000368E, 0xFFFFE837, 0x0000054A, 0x000017D7, 0xFFFFF9EB, 0x000002BA, 0x000017D7, 0xFFFFF9EB, 0x000002BA },
+ { 0x0213F0FD42DA2844, 0x00002E74, 0xFFFFEBE8, 0x000004DA, 0x00001DD6, 0xFFFFF57E, 0x00000379, 0x00001DD6, 0xFFFFF57E, 0x00000379 },
+ { 0x0213F0FE99041944, 0x0000322D, 0xFFFFEAA8, 0x000004F5, 0x00001B55, 0xFFFFF7DD, 0x0000030B, 0x00001B55, 0xFFFFF7DD, 0x0000030B },
+ { 0x0213F0FE99181904, 0x00002A29, 0xFFFFF07B, 0x00000416, 0x00001671, 0xFFFFFB3E, 0x0000029F, 0x00001671, 0xFFFFFB3E, 0x0000029F },
+ { 0x0213F0FD42DA2104, 0x000030F6, 0xFFFFEB89, 0x000004CD, 0x00001815, 0xFFFFF9AE, 0x000002C9, 0x00001815, 0xFFFFF9AE, 0x000002C9 },
+ { 0x0213F0FE990E10E4, 0x0000265F, 0xFFFFF1CB, 0x000003D5, 0x00001ED2, 0xFFFFF539, 0x0000037A, 0x00001ED2, 0xFFFFF539, 0x0000037A },
+ { 0x0213F0FE99162184, 0x000027A8, 0xFFFFF10D, 0x00000413, 0x000014B5, 0xFFFFFBA1, 0x00000299, 0x000014B5, 0xFFFFFBA1, 0x00000299 },
+ { 0x0213F0FE99043064, 0x00002CEE, 0xFFFFEDF6, 0x00000476, 0x00001A99, 0xFFFFF83E, 0x00000305, 0x00001A99, 0xFFFFF83E, 0x00000305 },
+ { 0x0213F0FE990640C4, 0x0000346C, 0xFFFFEA17, 0x000004EF, 0x00001D38, 0xFFFFF69F, 0x0000033D, 0x00001D38, 0xFFFFF69F, 0x0000033D },
+ { 0x0213F0FD42DA2944, 0x00002DBB, 0xFFFFED35, 0x00000490, 0x000018C1, 0xFFFFF930, 0x000002DA, 0x000018C1, 0xFFFFF930, 0x000002DA },
+ { 0x0213F0FE99042924, 0x000038DF, 0xFFFFE8A7, 0x0000051E, 0x00001B59, 0xFFFFF915, 0x000002D3, 0x00001B59, 0xFFFFF915, 0x000002D3 },
+ { 0x0213F0FE99080944, 0x00003384, 0xFFFFE979, 0x00000524, 0x00001AF3, 0xFFFFF74C, 0x0000032F, 0x00001AF3, 0xFFFFF74C, 0x0000032F },
+ { 0x0213F0FE99181864, 0x0000258B, 0xFFFFF2AE, 0x000003CB, 0x0000190C, 0xFFFFF93E, 0x000002EF, 0x0000190C, 0xFFFFF93E, 0x000002EF },
+ { 0x0213F0FE99103884, 0x000034F1, 0xFFFFE84B, 0x0000055E, 0x00001CB8, 0xFFFFF670, 0x0000034A, 0x00001CB8, 0xFFFFF670, 0x0000034A },
+ { 0x0213F0FE990C2104, 0x000030FB, 0xFFFFECD2, 0x00000488, 0x00001BF4, 0xFFFFF821, 0x00000302, 0x00001BF4, 0xFFFFF821, 0x00000302 },
+ { 0x0213F0FE99063044, 0x000036A6, 0xFFFFE815, 0x00000556, 0x000018FD, 0xFFFFF925, 0x000002DF, 0x000018FD, 0xFFFFF925, 0x000002DF },
+ { 0x0213EA94DE023044, 0x0000302A, 0xFFFFEB79, 0x000004E0, 0x00001C11, 0xFFFFF694, 0x00000358, 0x00001C11, 0xFFFFF694, 0x00000358 },
+ { 0x0213F0FE99181124, 0x00002555, 0xFFFFF2C4, 0x000003CB, 0x000017E3, 0xFFFFFA1F, 0x000002CB, 0x000017E3, 0xFFFFFA1F, 0x000002CB },
+ { 0x0213F0FE990A3164, 0x000032A3, 0xFFFFE933, 0x00000544, 0x000019D3, 0xFFFFF81A, 0x00000306, 0x000019D3, 0xFFFFF81A, 0x00000306 },
+ { 0x0213F0FD42D85104, 0x00002B91, 0xFFFFED81, 0x000004A9, 0x0000158B, 0xFFFFFAB9, 0x000002AC, 0x0000158B, 0xFFFFFAB9, 0x000002AC },
+ { 0x0213F0FE990E20C4, 0x00003537, 0xFFFFE912, 0x0000052C, 0x00001C8A, 0xFFFFF754, 0x0000031B, 0x00001C8A, 0xFFFFF754, 0x0000031B },
+ { 0x0213EA94DE063184, 0x000032E1, 0xFFFFEA5A, 0x000004F9, 0x000017B4, 0xFFFFF9D9, 0x000002C2, 0x000017B4, 0xFFFFF9D9, 0x000002C2 },
+ { 0x0213F0FD42D210C4, 0x00003B76, 0xFFFFE330, 0x00000636, 0x000026FB, 0xFFFFEF06, 0x00000481, 0x000026FB, 0xFFFFEF06, 0x00000481 },
+ { 0x0213F0FE99042144, 0x0000320C, 0xFFFFEB84, 0x000004C3, 0x00001A3A, 0xFFFFF8E9, 0x000002DF, 0x00001A3A, 0xFFFFF8E9, 0x000002DF },
+ { 0x0213F0FE99023984, 0x0000317D, 0xFFFFEA1F, 0x00000515, 0x00002100, 0xFFFFF31B, 0x000003DD, 0x00002100, 0xFFFFF31B, 0x000003DD },
+ { 0x0213F0FD42D43164, 0x00003DCB, 0xFFFFE0B4, 0x000006B4, 0x00002160, 0xFFFFF269, 0x000003F0, 0x00002160, 0xFFFFF269, 0x000003F0 },
+ { 0x0213F0FE991618C4, 0x00002737, 0xFFFFF218, 0x000003E1, 0x000015B5, 0xFFFFFB8F, 0x0000029C, 0x000015B5, 0xFFFFFB8F, 0x0000029C },
+ { 0x0213EA94DE023184, 0x0000318F, 0xFFFFEB3F, 0x000004D8, 0x00001938, 0xFFFFF8E9, 0x000002EB, 0x00001938, 0xFFFFF8E9, 0x000002EB },
+ { 0x0213F0FE991048C4, 0x000031BD, 0xFFFFE9DE, 0x00000527, 0x000018A7, 0xFFFFF8CA, 0x000002ED, 0x000018A7, 0xFFFFF8CA, 0x000002ED },
+ { 0x0213F0FD42DA3884, 0x00002F77, 0xFFFFEC2F, 0x000004B4, 0x00001D25, 0xFFFFF61B, 0x0000035D, 0x00001D25, 0xFFFFF61B, 0x0000035D },
+ { 0x0213F0FE990E4904, 0x00002CCA, 0xFFFFEDB3, 0x0000047C, 0x00001FBD, 0xFFFFF4A7, 0x00000391, 0x00001FBD, 0xFFFFF4A7, 0x00000391 },
+ { 0x0213F0FD42D438A4, 0x00003FF6, 0xFFFFE058, 0x000006A2, 0x000024CD, 0xFFFFF026, 0x00000452, 0x000024CD, 0xFFFFF026, 0x00000452 },
+ { 0x0213F0FE990A38E4, 0x00003161, 0xFFFFEAC8, 0x000004F3, 0x00001BB6, 0xFFFFF72A, 0x0000032B, 0x00001BB6, 0xFFFFF72A, 0x0000032B },
+ { 0x0213F0FD42D838A4, 0x00002EA0, 0xFFFFECA6, 0x000004B7, 0x000018C2, 0xFFFFF94E, 0x000002E1, 0x000018C2, 0xFFFFF94E, 0x000002E1 },
+ { 0x0213F0FE99182184, 0x00002F62, 0xFFFFEC9E, 0x000004B8, 0x00001531, 0xFFFFFBCD, 0x00000285, 0x00001531, 0xFFFFFBCD, 0x00000285 },
+ { 0x0213F0FE990440A4, 0x00003013, 0xFFFFEBD6, 0x000004C2, 0x00001B01, 0xFFFFF802, 0x000002FF, 0x00001B01, 0xFFFFF802, 0x000002FF },
+ { 0x0213F0FE99183064, 0x00002972, 0xFFFFF08D, 0x00000417, 0x00001A32, 0xFFFFF8A4, 0x00000305, 0x00001A32, 0xFFFFF8A4, 0x00000305 },
+ { 0x0213F0FD42D820E4, 0x00002E95, 0xFFFFED94, 0x00000487, 0x00001529, 0xFFFFFC26, 0x00000271, 0x00001529, 0xFFFFFC26, 0x00000271 },
+ { 0x0213F0FE990A1084, 0x00002D6A, 0xFFFFEC79, 0x000004C1, 0x00001AE2, 0xFFFFF725, 0x00000337, 0x00001AE2, 0xFFFFF725, 0x00000337 },
+ { 0x0213F0FE99021884, 0x000036B4, 0xFFFFE704, 0x00000591, 0x00001E7E, 0xFFFFF51C, 0x00000383, 0x00001E7E, 0xFFFFF51C, 0x00000383 },
+ { 0x0213F0FE99041844, 0x00002A6F, 0xFFFFEF70, 0x00000443, 0x00001BAA, 0xFFFFF752, 0x00000336, 0x00001BAA, 0xFFFFF752, 0x00000336 },
+ { 0x0213F0FE99183944, 0x00002C66, 0xFFFFEF5F, 0x0000043A, 0x000019F7, 0xFFFFF931, 0x000002EC, 0x000019F7, 0xFFFFF931, 0x000002EC },
+ { 0x0213EA94DE0631C4, 0x00003852, 0xFFFFE6AB, 0x00000590, 0x000019C1, 0xFFFFF8B1, 0x000002E5, 0x000019C1, 0xFFFFF8B1, 0x000002E5 },
+ { 0x0213F0FD42DA3124, 0x00003521, 0xFFFFE932, 0x00000523, 0x000018A9, 0xFFFFF96B, 0x000002D0, 0x000018A9, 0xFFFFF96B, 0x000002D0 },
+ { 0x0213F0FE99062164, 0x000031B9, 0xFFFFEB36, 0x000004D0, 0x00001D65, 0xFFFFF612, 0x0000035D, 0x00001D65, 0xFFFFF612, 0x0000035D },
+ { 0x0213F0FD42D41064, 0x00003ED0, 0xFFFFE135, 0x00000679, 0x00002351, 0xFFFFF0FE, 0x00000433, 0x00002351, 0xFFFFF0FE, 0x00000433 },
+ { 0x0213F0FE990A20E4, 0x000033ED, 0xFFFFE91A, 0x00000541, 0x00001C93, 0xFFFFF6A0, 0x0000034A, 0x00001C93, 0xFFFFF6A0, 0x0000034A },
+ { 0x0213EA94DE021844, 0x0000356F, 0xFFFFE8F7, 0x00000530, 0x000016BF, 0xFFFFFA85, 0x000002AB, 0x000016BF, 0xFFFFFA85, 0x000002AB },
+ { 0x0213F0FE991840E4, 0x00002304, 0xFFFFF4F3, 0x00000364, 0x000017CC, 0xFFFFFA41, 0x000002CA, 0x000017CC, 0xFFFFFA41, 0x000002CA },
+ { 0x0213F0FE99161164, 0x00002887, 0xFFFFEFD7, 0x00000450, 0x00001474, 0xFFFFFB94, 0x00000299, 0x00001474, 0xFFFFFB94, 0x00000299 },
+ { 0x0213F0FE99063064, 0x00003D0B, 0xFFFFE416, 0x000005EF, 0x00001C7E, 0xFFFFF71D, 0x00000325, 0x00001C7E, 0xFFFFF71D, 0x00000325 },
+ { 0x0213F0FE990810E4, 0x00003185, 0xFFFFEAFA, 0x000004E4, 0x00001A12, 0xFFFFF83C, 0x00000303, 0x00001A12, 0xFFFFF83C, 0x00000303 },
+ { 0x0213F0FE990A1944, 0x00003032, 0xFFFFEAE6, 0x000004FC, 0x00001B2A, 0xFFFFF73F, 0x0000032B, 0x00001B2A, 0xFFFFF73F, 0x0000032B },
+ { 0x0213F0FD42D838C4, 0x00002691, 0xFFFFF22D, 0x000003D6, 0x00001700, 0xFFFFFA6E, 0x000002C0, 0x00001700, 0xFFFFFA6E, 0x000002C0 },
+ { 0x0213F0FE990218A4, 0x00002B2F, 0xFFFFEEC4, 0x0000044B, 0x0000215F, 0xFFFFF33F, 0x000003D2, 0x0000215F, 0xFFFFF33F, 0x000003D2 },
+ { 0x0213F0FE990A4184, 0x000034AA, 0xFFFFE706, 0x000005B1, 0x00001B28, 0xFFFFF6B5, 0x00000349, 0x00001B28, 0xFFFFF6B5, 0x00000349 },
+ { 0x0213F0FD42DA2964, 0x0000307E, 0xFFFFEB38, 0x000004E6, 0x00001A22, 0xFFFFF83F, 0x00000300, 0x00001A22, 0xFFFFF83F, 0x00000300 },
+ { 0x0213F0FE990618A4, 0x000038D6, 0xFFFFE6D8, 0x0000057C, 0x00001B24, 0xFFFFF7E4, 0x00000307, 0x00001B24, 0xFFFFF7E4, 0x00000307 },
+ { 0x0213F0FE99183044, 0x00002757, 0xFFFFF1E8, 0x000003DD, 0x000017F5, 0xFFFFFA15, 0x000002C8, 0x000017F5, 0xFFFFFA15, 0x000002C8 },
+ { 0x0213F0FE99083184, 0x000031FC, 0xFFFFEB3E, 0x000004CE, 0x00001B4C, 0xFFFFF7AD, 0x00000319, 0x00001B4C, 0xFFFFF7AD, 0x00000319 },
+ { 0x0213F0FE99301864, 0x00002933, 0xFFFFF073, 0x0000040E, 0x00001C3C, 0xFFFFF701, 0x0000033C, 0x00001C3C, 0xFFFFF701, 0x0000033C },
+ { 0x0213F0FD42D218A4, 0x000040BB, 0xFFFFE066, 0x0000069A, 0x0000257F, 0xFFFFF08A, 0x00000435, 0x0000257F, 0xFFFFF08A, 0x00000435 },
+ { 0x0213F0FE991010A4, 0x0000305B, 0xFFFFEB9B, 0x000004CB, 0x00001996, 0xFFFFF846, 0x00000308, 0x00001996, 0xFFFFF846, 0x00000308 },
+ { 0x0213F0FE99064884, 0x000039C0, 0xFFFFE5D3, 0x000005B0, 0x00001A8D, 0xFFFFF7DA, 0x00000313, 0x00001A8D, 0xFFFFF7DA, 0x00000313 },
+ { 0x0213EA94DE0210A4, 0x00002E23, 0xFFFFED3F, 0x0000048F, 0x0000189D, 0xFFFFF94C, 0x000002DE, 0x0000189D, 0xFFFFF94C, 0x000002DE },
+ { 0x0213EA94DE021984, 0x0000332B, 0xFFFFE9F1, 0x00000516, 0x000018E6, 0xFFFFF8FE, 0x000002EC, 0x000018E6, 0xFFFFF8FE, 0x000002EC },
+ { 0x0213F0FE990838C4, 0x000034A0, 0xFFFFEA44, 0x000004E4, 0x00001ECD, 0xFFFFF5B4, 0x00000364, 0x00001ECD, 0xFFFFF5B4, 0x00000364 },
+ { 0x0213F0FD42D24104, 0x0000448C, 0xFFFFDF34, 0x000006A8, 0x0000231C, 0xFFFFF286, 0x000003D9, 0x0000231C, 0xFFFFF286, 0x000003D9 },
+ { 0x0213EA94DE062144, 0x00002D8C, 0xFFFFEE65, 0x00000456, 0x000018B1, 0xFFFFF9C8, 0x000002C8, 0x000018B1, 0xFFFFF9C8, 0x000002C8 },
+ { 0x0213F0FE99061904, 0x00003527, 0xFFFFE9BF, 0x000004FD, 0x00001D23, 0xFFFFF69F, 0x00000342, 0x00001D23, 0xFFFFF69F, 0x00000342 },
+ { 0x0213F0FD42DC38A4, 0x00002C51, 0xFFFFEDC3, 0x00000483, 0x00001BE0, 0xFFFFF720, 0x0000032D, 0x00001BE0, 0xFFFFF720, 0x0000032D },
+ { 0x0213F0FE990A3044, 0x00002C6C, 0xFFFFECEB, 0x000004B7, 0x00001C86, 0xFFFFF5E7, 0x00000371, 0x00001C86, 0xFFFFF5E7, 0x00000371 },
+ { 0x0213F0FE99045144, 0x000037CF, 0xFFFFE6BE, 0x00000599, 0x000018CD, 0xFFFFF967, 0x000002C7, 0x000018CD, 0xFFFFF967, 0x000002C7 },
+ { 0x0213F0FE99103164, 0x00002E6F, 0xFFFFED1D, 0x0000048E, 0x00001ADC, 0xFFFFF7F4, 0x0000030E, 0x00001ADC, 0xFFFFF7F4, 0x0000030E },
+ { 0x0213F0FD42D42984, 0x00003FF3, 0xFFFFDF13, 0x000006F9, 0x000025BF, 0xFFFFEEEE, 0x00000497, 0x000025BF, 0xFFFFEEEE, 0x00000497 },
+ { 0x0213F0FD42DC5104, 0x00004135, 0xFFFFDF97, 0x000006CC, 0x00001D52, 0xFFFFF541, 0x00000383, 0x00001D52, 0xFFFFF541, 0x00000383 },
+ { 0x0213F0FD42DC20E4, 0x00002EA9, 0xFFFFEDDB, 0x0000045F, 0x0000197C, 0xFFFFF8E1, 0x000002F0, 0x0000197C, 0xFFFFF8E1, 0x000002F0 },
+ { 0x0213EA94DE043084, 0x0000345C, 0xFFFFE922, 0x00000532, 0x00001922, 0xFFFFF8C7, 0x000002F1, 0x00001922, 0xFFFFF8C7, 0x000002F1 },
+ { 0x0213F0FE99064124, 0x000035C4, 0xFFFFE8FE, 0x00000521, 0x00001C87, 0xFFFFF6F3, 0x00000330, 0x00001C87, 0xFFFFF6F3, 0x00000330 },
+ { 0x0213F0FD42D83164, 0x00002888, 0xFFFFF08A, 0x0000041E, 0x0000150F, 0xFFFFFB87, 0x00000291, 0x0000150F, 0xFFFFFB87, 0x00000291 },
+ { 0x0213F0FE990A1124, 0x000035E9, 0xFFFFE657, 0x000005CC, 0x00001BD6, 0xFFFFF664, 0x00000355, 0x00001BD6, 0xFFFFF664, 0x00000355 },
+ { 0x0213F0FE991648E4, 0x00002F94, 0xFFFFEBD0, 0x000004E5, 0x00001333, 0xFFFFFCA7, 0x00000266, 0x00001333, 0xFFFFFCA7, 0x00000266 },
+ { 0x0213F0FE99181964, 0x000029E7, 0xFFFFF009, 0x00000433, 0x0000144A, 0xFFFFFC37, 0x0000027D, 0x0000144A, 0xFFFFFC37, 0x0000027D },
+ { 0x0213F0FE992C1944, 0x00003418, 0xFFFFE979, 0x00000521, 0x00001D33, 0xFFFFF66B, 0x0000034A, 0x00001D33, 0xFFFFF66B, 0x0000034A },
+ { 0x0213EA94DE0440E4, 0x00003656, 0xFFFFE79D, 0x0000057A, 0x000017C2, 0xFFFFF992, 0x000002D4, 0x000017C2, 0xFFFFF992, 0x000002D4 },
+ { 0x0213F0FE990C40C4, 0x00002EB2, 0xFFFFECFE, 0x00000493, 0x00001F2A, 0xFFFFF543, 0x0000037B, 0x00001F2A, 0xFFFFF543, 0x0000037B },
+ { 0x0213F0FE99021124, 0x00002FC1, 0xFFFFEB3F, 0x000004E8, 0x00001CD0, 0xFFFFF5F7, 0x00000364, 0x00001CD0, 0xFFFFF5F7, 0x00000364 },
+ { 0x0213F0FE990C1124, 0x0000307B, 0xFFFFEB66, 0x000004DE, 0x00001953, 0xFFFFF8ED, 0x000002E4, 0x00001953, 0xFFFFF8ED, 0x000002E4 },
+ { 0x0213F0FD42DA1884, 0x00002CAA, 0xFFFFED07, 0x000004AC, 0x0000251C, 0xFFFFF086, 0x0000044D, 0x0000251C, 0xFFFFF086, 0x0000044D },
+ { 0x0213EA94DE043944, 0x00002C94, 0xFFFFEE5F, 0x0000045B, 0x000018D7, 0xFFFFF900, 0x000002EB, 0x000018D7, 0xFFFFF900, 0x000002EB },
+ { 0x0213F0FE99021864, 0x000031F1, 0xFFFFE9BE, 0x0000052E, 0x00001DDF, 0xFFFFF558, 0x00000380, 0x00001DDF, 0xFFFFF558, 0x00000380 },
+ { 0x0213F0FE990E50C4, 0x00002603, 0xFFFFF1E9, 0x000003DA, 0x00001B37, 0xFFFFF75A, 0x0000032F, 0x00001B37, 0xFFFFF75A, 0x0000032F },
+ { 0x0213F0FD42DA3044, 0x00003992, 0xFFFFE4F9, 0x000005EB, 0x00001775, 0xFFFFF9B8, 0x000002C2, 0x00001775, 0xFFFFF9B8, 0x000002C2 },
+ { 0x0213F0FE99184964, 0x000029DA, 0xFFFFF052, 0x0000041F, 0x000016E2, 0xFFFFFA99, 0x000002BB, 0x000016E2, 0xFFFFFA99, 0x000002BB },
+ { 0x0213F0FE99101064, 0x00002FF2, 0xFFFFEB8F, 0x000004DF, 0x00001AF6, 0xFFFFF7A1, 0x00000321, 0x00001AF6, 0xFFFFF7A1, 0x00000321 },
+ { 0x0213F0FE991608E4, 0x00002590, 0xFFFFF222, 0x000003EE, 0x0000130B, 0xFFFFFCC9, 0x00000268, 0x0000130B, 0xFFFFFCC9, 0x00000268 },
+ { 0x0213F0FE99024064, 0x000038A2, 0xFFFFE65F, 0x000005A2, 0x000018B1, 0xFFFFF917, 0x000002E1, 0x000018B1, 0xFFFFF917, 0x000002E1 },
+ { 0x0213F0FD42DC48E4, 0x000035FD, 0xFFFFE73C, 0x0000058D, 0x00001BB3, 0xFFFFF6E1, 0x00000337, 0x00001BB3, 0xFFFFF6E1, 0x00000337 },
+ { 0x0213F0FE991038C4, 0x00002AB7, 0xFFFFEF98, 0x00000429, 0x00001F35, 0xFFFFF539, 0x0000037C, 0x00001F35, 0xFFFFF539, 0x0000037C },
+ { 0x0213F0FE990A0944, 0x000034BA, 0xFFFFE73D, 0x000005A6, 0x000018A6, 0xFFFFF888, 0x000002FB, 0x000018A6, 0xFFFFF888, 0x000002FB },
+ { 0x0213F0FE99063844, 0x000032EA, 0xFFFFEA78, 0x000004F4, 0x00001AB6, 0xFFFFF812, 0x00000308, 0x00001AB6, 0xFFFFF812, 0x00000308 },
+ { 0x0213F0FE990C3044, 0x00002BE9, 0xFFFFEE9A, 0x00000457, 0x00001942, 0xFFFFF8D2, 0x000002F2, 0x00001942, 0xFFFFF8D2, 0x000002F2 },
+ { 0x0213F0FE99105124, 0x00002FAB, 0xFFFFEB76, 0x000004E1, 0x00001DCA, 0xFFFFF57D, 0x00000378, 0x00001DCA, 0xFFFFF57D, 0x00000378 },
+ { 0x0213F0FE992E2844, 0x0000330A, 0xFFFFE9E1, 0x0000051B, 0x00001CC4, 0xFFFFF6DF, 0x00000335, 0x00001CC4, 0xFFFFF6DF, 0x00000335 },
+ { 0x0213F0FE991828A4, 0x000027D8, 0xFFFFF276, 0x000003BF, 0x0000178A, 0xFFFFFABF, 0x000002B5, 0x0000178A, 0xFFFFFABF, 0x000002B5 },
+ { 0x0213F0FD42DC3864, 0x0000340A, 0xFFFFE86D, 0x00000562, 0x00001B85, 0xFFFFF719, 0x0000032F, 0x00001B85, 0xFFFFF719, 0x0000032F },
+ { 0x0213EA94DE063084, 0x00003879, 0xFFFFE73F, 0x00000578, 0x0000161C, 0xFFFFFB6B, 0x00000281, 0x0000161C, 0xFFFFFB6B, 0x00000281 },
+ { 0x0213F0FE99184064, 0x00002879, 0xFFFFF0F8, 0x0000040A, 0x00001749, 0xFFFFFA37, 0x000002CC, 0x00001749, 0xFFFFFA37, 0x000002CC },
+ { 0x0213F0FE99043964, 0x00002C3A, 0xFFFFEEA0, 0x0000044F, 0x00001D57, 0xFFFFF6C2, 0x00000332, 0x00001D57, 0xFFFFF6C2, 0x00000332 },
+ { 0x0213EA94DE021964, 0x000035BB, 0xFFFFE90D, 0x0000052A, 0x000017D9, 0xFFFFF9F5, 0x000002C3, 0x000017D9, 0xFFFFF9F5, 0x000002C3 },
+ { 0x0213EA94DE041124, 0x000031F1, 0xFFFFEAD4, 0x000004ED, 0x00001F10, 0xFFFFF539, 0x0000037D, 0x00001F10, 0xFFFFF539, 0x0000037D },
+ { 0x0213F0FE99102824, 0x00002A1A, 0xFFFFEFAD, 0x00000430, 0x00001D47, 0xFFFFF62F, 0x0000035E, 0x00001D47, 0xFFFFF62F, 0x0000035E },
+ { 0x0213F0FE99164924, 0x00002AF0, 0xFFFFEEDC, 0x00000465, 0x0000145F, 0xFFFFFBEB, 0x00000281, 0x0000145F, 0xFFFFFBEB, 0x00000281 },
+ { 0x0213F0FE99183164, 0x00002657, 0xFFFFF2E0, 0x000003B6, 0x00001664, 0xFFFFFB37, 0x000002A2, 0x00001664, 0xFFFFFB37, 0x000002A2 },
+ { 0x0213F0FD42D03864, 0x00003183, 0xFFFFE9F1, 0x0000052B, 0x00002020, 0xFFFFF3CE, 0x000003C1, 0x00002020, 0xFFFFF3CE, 0x000003C1 },
+ { 0x0213F0FD42C628E4, 0x00003240, 0xFFFFEB65, 0x000004C7, 0x00002425, 0xFFFFF245, 0x000003F3, 0x00002425, 0xFFFFF245, 0x000003F3 },
+ { 0x0213EA94DE321104, 0x000023D0, 0xFFFFF400, 0x00000397, 0x00001345, 0xFFFFFD6B, 0x00000241, 0x00001345, 0xFFFFFD6B, 0x00000241 },
+ { 0x0213F0FD42CE38A4, 0x00003440, 0xFFFFE872, 0x0000055B, 0x00002247, 0xFFFFF296, 0x000003E8, 0x00002247, 0xFFFFF296, 0x000003E8 },
+ { 0x0213F0FD42D04904, 0x00003275, 0xFFFFE970, 0x00000538, 0x00001F94, 0xFFFFF429, 0x000003AD, 0x00001F94, 0xFFFFF429, 0x000003AD },
+ { 0x0213F0FD42C640A4, 0x00003918, 0xFFFFE5DA, 0x000005B6, 0x000024FC, 0xFFFFF106, 0x00000426, 0x000024FC, 0xFFFFF106, 0x00000426 },
+ { 0x0213EA94DE062044, 0x0000334B, 0xFFFFEA39, 0x000004FD, 0x00001983, 0xFFFFF8F6, 0x000002E2, 0x00001983, 0xFFFFF8F6, 0x000002E2 },
+ { 0x0213F0FD42C64984, 0x00003B59, 0xFFFFE4D0, 0x000005DA, 0x00002605, 0xFFFFF090, 0x00000439, 0x00002605, 0xFFFFF090, 0x00000439 },
+ { 0x0213F0FD42D03124, 0x00003251, 0xFFFFEA46, 0x00000511, 0x00002781, 0xFFFFEF84, 0x00000470, 0x00002781, 0xFFFFEF84, 0x00000470 },
+ { 0x0213F0FD42CA3164, 0x00003304, 0xFFFFE926, 0x00000542, 0x00001EE9, 0xFFFFF4E4, 0x0000038B, 0x00001EE9, 0xFFFFF4E4, 0x0000038B },
+ { 0x0213F0FD42CC38C4, 0x00002F4C, 0xFFFFEC0C, 0x000004C4, 0x00001E49, 0xFFFFF578, 0x00000374, 0x00001E49, 0xFFFFF578, 0x00000374 },
+ { 0x0213EA94DE1C2164, 0x00002034, 0xFFFFF692, 0x0000034C, 0x000014B8, 0xFFFFFC5B, 0x00000294, 0x000014B8, 0xFFFFFC5B, 0x00000294 },
+ { 0x0213F0FD42CE4924, 0x0000385F, 0xFFFFE513, 0x000005F3, 0x000024E7, 0xFFFFF053, 0x00000450, 0x000024E7, 0xFFFFF053, 0x00000450 },
+ { 0x0213EA94DE1C40E4, 0x00001D70, 0xFFFFF821, 0x0000030F, 0x00001541, 0xFFFFFBB4, 0x000002B0, 0x00001541, 0xFFFFFBB4, 0x000002B0 },
+ { 0x0213F0FD42D02084, 0x000034EB, 0xFFFFE7FF, 0x00000575, 0x000019B4, 0xFFFFF836, 0x00000308, 0x000019B4, 0xFFFFF836, 0x00000308 },
+ { 0x0213F0FD42D050E4, 0x000037C9, 0xFFFFE5D4, 0x000005CD, 0x000026A1, 0xFFFFEF0C, 0x00000491, 0x000026A1, 0xFFFFEF0C, 0x00000491 },
+ { 0x0213EA94DE121944, 0x00002918, 0xFFFFF148, 0x000003E9, 0x00001A49, 0xFFFFF94C, 0x000002CF, 0x00001A49, 0xFFFFF94C, 0x000002CF },
+ { 0x0213F0FD42CA4064, 0x00002F90, 0xFFFFEAB5, 0x00000514, 0x00001707, 0xFFFFF9C7, 0x000002C4, 0x00001707, 0xFFFFF9C7, 0x000002C4 },
+ { 0x0213EA94DE062064, 0x0000327E, 0xFFFFEA99, 0x000004F4, 0x0000194F, 0xFFFFF929, 0x000002DC, 0x0000194F, 0xFFFFF929, 0x000002DC },
+ { 0x0213F0FD42C64084, 0x0000326F, 0xFFFFE9CF, 0x00000519, 0x00002240, 0xFFFFF299, 0x000003E7, 0x00002240, 0xFFFFF299, 0x000003E7 },
+ { 0x0213EA94DE321124, 0x000022FB, 0xFFFFF4C6, 0x00000371, 0x00001506, 0xFFFFFC73, 0x00000265, 0x00001506, 0xFFFFFC73, 0x00000265 },
+ { 0x0213F0FD42CA3924, 0x00003AD6, 0xFFFFE470, 0x000005FE, 0x00001F03, 0xFFFFF4F3, 0x00000387, 0x00001F03, 0xFFFFF4F3, 0x00000387 },
+ { 0x0213EA94DE201124, 0x00001F11, 0xFFFFF756, 0x00000332, 0x00001666, 0xFFFFFB8A, 0x000002B2, 0x00001666, 0xFFFFFB8A, 0x000002B2 },
+ { 0x0213EA94DE0238A4, 0x00002A5F, 0xFFFFEFA7, 0x00000430, 0x00001943, 0xFFFFF8C6, 0x000002F7, 0x00001943, 0xFFFFF8C6, 0x000002F7 },
+ { 0x0213EA94DE1650E4, 0x0000235E, 0xFFFFF3B4, 0x000003B3, 0x00001489, 0xFFFFFBCF, 0x0000029B, 0x00001489, 0xFFFFFBCF, 0x0000029B },
+ { 0x0213F0FD42CC38A4, 0x00003570, 0xFFFFE780, 0x0000058D, 0x00001B1D, 0xFFFFF767, 0x00000325, 0x00001B1D, 0xFFFFF767, 0x00000325 },
+ { 0x0213EA94DE042064, 0x00003678, 0xFFFFE7C3, 0x00000569, 0x00001831, 0xFFFFF98E, 0x000002C8, 0x00001831, 0xFFFFF98E, 0x000002C8 },
+ { 0x0213EA94DE201864, 0x000020B9, 0xFFFFF625, 0x0000035A, 0x000015C5, 0xFFFFFB8A, 0x000002B5, 0x000015C5, 0xFFFFFB8A, 0x000002B5 },
+ { 0x0213F0FD42C63184, 0x00003985, 0xFFFFE529, 0x000005DD, 0x00002165, 0xFFFFF351, 0x000003C5, 0x00002165, 0xFFFFF351, 0x000003C5 },
+ { 0x0213F0FD42D02064, 0x0000322A, 0xFFFFE99D, 0x00000535, 0x000019A1, 0xFFFFF844, 0x00000305, 0x000019A1, 0xFFFFF844, 0x00000305 },
+ { 0x0213F0FD42D05104, 0x000033ED, 0xFFFFE834, 0x00000571, 0x00002094, 0xFFFFF33A, 0x000003DB, 0x00002094, 0xFFFFF33A, 0x000003DB },
+ { 0x0213EA94DE2040C4, 0x00001D10, 0xFFFFF84D, 0x0000030B, 0x00001659, 0xFFFFFB0A, 0x000002CB, 0x00001659, 0xFFFFFB0A, 0x000002CB },
+ { 0x0213EA94DE1C1124, 0x0000210F, 0xFFFFF644, 0x00000355, 0x00001A4A, 0xFFFFF90F, 0x00000310, 0x00001A4A, 0xFFFFF90F, 0x00000310 },
+ { 0x0213EA94DE164164, 0x00001CA8, 0xFFFFF813, 0x00000316, 0x00001440, 0xFFFFFC1C, 0x0000029D, 0x00001440, 0xFFFFFC1C, 0x0000029D },
+ { 0x0213EA94DE3210C4, 0x00002864, 0xFFFFF15A, 0x000003FA, 0x0000137F, 0xFFFFFD43, 0x00000248, 0x0000137F, 0xFFFFFD43, 0x00000248 },
+ { 0x0213F0FD42D04184, 0x00002CDB, 0xFFFFECFD, 0x000004A7, 0x00002472, 0xFFFFF0E1, 0x00000437, 0x00002472, 0xFFFFF0E1, 0x00000437 },
+ { 0x0213F0FD42CC5104, 0x00003348, 0xFFFFE8CA, 0x00000554, 0x00001E91, 0xFFFFF4D4, 0x00000392, 0x00001E91, 0xFFFFF4D4, 0x00000392 },
+ { 0x0213F0FD42C64944, 0x00003989, 0xFFFFE4BB, 0x000005F8, 0x00001ACB, 0xFFFFF780, 0x00000319, 0x00001ACB, 0xFFFFF780, 0x00000319 },
+ { 0x0213F0FD42CA2104, 0x00003238, 0xFFFFEA09, 0x0000051E, 0x00001F08, 0xFFFFF4F4, 0x0000038C, 0x00001F08, 0xFFFFF4F4, 0x0000038C },
+ { 0x0213EA94DE120904, 0x00002453, 0xFFFFF3B0, 0x0000038D, 0x00001AED, 0xFFFFF8A2, 0x000002EA, 0x00001AED, 0xFFFFF8A2, 0x000002EA },
+ { 0x0213EA94DE1C3024, 0x00002459, 0xFFFFF409, 0x000003A8, 0x000017B5, 0xFFFFFA53, 0x000002E1, 0x000017B5, 0xFFFFFA53, 0x000002E1 },
+ { 0x0213EA94DE021184, 0x0000310D, 0xFFFFEB78, 0x000004D0, 0x00001DC9, 0xFFFFF5D5, 0x00000368, 0x00001DC9, 0xFFFFF5D5, 0x00000368 },
+ { 0x0213EA94DE023104, 0x000031BF, 0xFFFFECA3, 0x00000498, 0x00001DC9, 0xFFFFF717, 0x00000336, 0x00001DC9, 0xFFFFF717, 0x00000336 },
+ { 0x0213F0FD42CE2104, 0x00003896, 0xFFFFE5DD, 0x000005C5, 0x000023E2, 0xFFFFF1A1, 0x00000416, 0x000023E2, 0xFFFFF1A1, 0x00000416 },
+ { 0x0213EA94DE323904, 0x000023CB, 0xFFFFF4C8, 0x00000372, 0x00001C33, 0xFFFFF7D5, 0x0000032A, 0x00001C33, 0xFFFFF7D5, 0x0000032A },
+ { 0x0213F0FD42D020C4, 0x00002F6B, 0xFFFFEBF0, 0x000004CE, 0x00001C89, 0xFFFFF689, 0x0000034D, 0x00001C89, 0xFFFFF689, 0x0000034D },
+ { 0x0213F0FD42CE3904, 0x00003E72, 0xFFFFE211, 0x0000065D, 0x0000218D, 0xFFFFF309, 0x000003DC, 0x0000218D, 0xFFFFF309, 0x000003DC },
+ { 0x0213EA94DE022084, 0x00002612, 0xFFFFF2C3, 0x000003AD, 0x000019F7, 0xFFFFF891, 0x000002FE, 0x000019F7, 0xFFFFF891, 0x000002FE },
+ { 0x0213EA94DE164184, 0x0000205D, 0xFFFFF59F, 0x00000372, 0x000012E6, 0xFFFFFD0A, 0x00000270, 0x000012E6, 0xFFFFFD0A, 0x00000270 },
+ { 0x0213F0FD42CA2124, 0x00002ECB, 0xFFFFEC47, 0x000004BD, 0x00001936, 0xFFFFF8D9, 0x000002E4, 0x00001936, 0xFFFFF8D9, 0x000002E4 },
+ { 0x0213EA94DE064904, 0x00002BDB, 0xFFFFEE6D, 0x00000458, 0x00001852, 0xFFFFF943, 0x000002D9, 0x00001852, 0xFFFFF943, 0x000002D9 },
+ { 0x0213EA94DE124904, 0x00003387, 0xFFFFE958, 0x00000534, 0x00001932, 0xFFFFF8FA, 0x000002E4, 0x00001932, 0xFFFFF8FA, 0x000002E4 },
+ { 0x0213EA94DE0208C4, 0x00002E3C, 0xFFFFED26, 0x00000495, 0x00001858, 0xFFFFF990, 0x000002D1, 0x00001858, 0xFFFFF990, 0x000002D1 },
+ { 0x0213EA94DE022964, 0x000033B8, 0xFFFFEA5C, 0x000004F9, 0x00001BD1, 0xFFFFF76A, 0x0000032E, 0x00001BD1, 0xFFFFF76A, 0x0000032E },
+ { 0x0213EA94DE062984, 0x00002BCE, 0xFFFFEEE9, 0x00000443, 0x00001982, 0xFFFFF90D, 0x000002DF, 0x00001982, 0xFFFFF90D, 0x000002DF },
+ { 0x0213F0FD42D048E4, 0x00003495, 0xFFFFE7D9, 0x0000057B, 0x00001D2A, 0xFFFFF5A5, 0x00000372, 0x00001D2A, 0xFFFFF5A5, 0x00000372 },
+ { 0x0213F0FD42CA38E4, 0x000034B1, 0xFFFFE88D, 0x00000556, 0x00002014, 0xFFFFF43A, 0x000003AA, 0x00002014, 0xFFFFF43A, 0x000003AA },
+ { 0x0213F0FD42CC3124, 0x00002F96, 0xFFFFEC84, 0x000004AD, 0x000024A2, 0xFFFFF1CE, 0x0000040A, 0x000024A2, 0xFFFFF1CE, 0x0000040A },
+ { 0x0213EA94DE161064, 0x0000203B, 0xFFFFF640, 0x00000359, 0x000014EC, 0xFFFFFC14, 0x0000029C, 0x000014EC, 0xFFFFFC14, 0x0000029C },
+ { 0x0213F0FD42D02984, 0x000034E2, 0xFFFFE7B8, 0x00000582, 0x00001938, 0xFFFFF872, 0x000002FA, 0x00001938, 0xFFFFF872, 0x000002FA },
+ { 0x0213EA94DE063124, 0x00002AC7, 0xFFFFF0C1, 0x000003F5, 0x00002268, 0xFFFFF39C, 0x000003C9, 0x00002268, 0xFFFFF39C, 0x000003C9 },
+ { 0x0213F0FD42C63144, 0x000036F6, 0xFFFFE77F, 0x00000571, 0x000027D9, 0xFFFFEF6F, 0x00000461, 0x000027D9, 0xFFFFEF6F, 0x00000461 },
+ { 0x0213EA94DE123124, 0x00002BAB, 0xFFFFF018, 0x00000419, 0x00002126, 0xFFFFF4E2, 0x0000038F, 0x00002126, 0xFFFFF4E2, 0x0000038F },
+ { 0x0213EA94DE323924, 0x000028C4, 0xFFFFF161, 0x000003F8, 0x0000180C, 0xFFFFFA4B, 0x000002C8, 0x0000180C, 0xFFFFFA4B, 0x000002C8 },
+ { 0x0213F0FD42CA2864, 0x00002F48, 0xFFFFEB62, 0x000004EE, 0x00001912, 0xFFFFF8C8, 0x000002EA, 0x00001912, 0xFFFFF8C8, 0x000002EA },
+ { 0x0213F0FD42CE2864, 0x000032DF, 0xFFFFE911, 0x00000545, 0x00001F06, 0xFFFFF485, 0x0000039C, 0x00001F06, 0xFFFFF485, 0x0000039C },
+ { 0x0213F0FD42D04144, 0x000035B8, 0xFFFFE74F, 0x00000590, 0x00001FD7, 0xFFFFF410, 0x000003AF, 0x00001FD7, 0xFFFFF410, 0x000003AF },
+ { 0x0213F0FD42D050C4, 0x00003608, 0xFFFFE6D7, 0x000005A9, 0x000024A6, 0xFFFFF075, 0x00000450, 0x000024A6, 0xFFFFF075, 0x00000450 },
+ { 0x0213F0FD42CA3884, 0x000030AB, 0xFFFFEAED, 0x000004F5, 0x000019EE, 0xFFFFF84E, 0x000002FC, 0x000019EE, 0xFFFFF84E, 0x000002FC },
+ { 0x0213EA94DE0620C4, 0x000030C6, 0xFFFFEC92, 0x0000049E, 0x000019BB, 0xFFFFF8F1, 0x000002F3, 0x000019BB, 0xFFFFF8F1, 0x000002F3 },
+ { 0x0213F0FD42C630A4, 0x00003B27, 0xFFFFE544, 0x000005C1, 0x00002697, 0xFFFFF072, 0x00000438, 0x00002697, 0xFFFFF072, 0x00000438 },
+ { 0x0213EA94DE1248E4, 0x00002F23, 0xFFFFEC48, 0x000004B9, 0x0000199A, 0xFFFFF8CF, 0x000002E9, 0x0000199A, 0xFFFFF8CF, 0x000002E9 },
+ { 0x0213EA94DE0629A4, 0x00002BD7, 0xFFFFEEAC, 0x00000450, 0x00001991, 0xFFFFF8F4, 0x000002E2, 0x00001991, 0xFFFFF8F4, 0x000002E2 },
+ { 0x0213EA94DE022024, 0x00003210, 0xFFFFEB24, 0x000004DE, 0x00001BDF, 0xFFFFF744, 0x00000333, 0x00001BDF, 0xFFFFF744, 0x00000333 },
+ { 0x0213EA94DE244144, 0x00002DDC, 0xFFFFED0D, 0x000004AC, 0x000019D0, 0xFFFFF869, 0x0000030F, 0x000019D0, 0xFFFFF869, 0x0000030F },
+ { 0x0213EA94DE203964, 0x000023E6, 0xFFFFF40C, 0x000003A9, 0x000014EB, 0xFFFFFBC4, 0x000002AF, 0x000014EB, 0xFFFFFBC4, 0x000002AF },
+ { 0x0213F0FD42CA29A4, 0x000030CE, 0xFFFFE9A5, 0x0000053C, 0x00001C45, 0xFFFFF60E, 0x0000035D, 0x00001C45, 0xFFFFF60E, 0x0000035D },
+ { 0x0213EA94DE161084, 0x00001E89, 0xFFFFF73A, 0x00000337, 0x0000157C, 0xFFFFFBC0, 0x000002AA, 0x0000157C, 0xFFFFFBC0, 0x000002AA },
+ { 0x0213F0FD42D04124, 0x000036C6, 0xFFFFE6CF, 0x000005A1, 0x00002457, 0xFFFFF11D, 0x0000042D, 0x00002457, 0xFFFFF11D, 0x0000042D },
+ { 0x0213EA94DE321944, 0x00002815, 0xFFFFF19A, 0x000003F2, 0x000016D2, 0xFFFFFB40, 0x00000299, 0x000016D2, 0xFFFFFB40, 0x00000299 },
+ { 0x0213EA94DE1C19A4, 0x00001FE2, 0xFFFFF660, 0x00000354, 0x000015A7, 0xFFFFFB47, 0x000002C1, 0x000015A7, 0xFFFFFB47, 0x000002C1 },
+ { 0x0213EA94DE161964, 0x00002114, 0xFFFFF634, 0x00000356, 0x000016C1, 0xFFFFFB43, 0x000002B8, 0x000016C1, 0xFFFFFB43, 0x000002B8 },
+ { 0x0213F0FD42CC28C4, 0x000028E3, 0xFFFFF075, 0x00000414, 0x0000203C, 0xFFFFF438, 0x000003B3, 0x0000203C, 0xFFFFF438, 0x000003B3 },
+ { 0x0213EA94DE1C3924, 0x00001EEB, 0xFFFFF7BB, 0x0000031A, 0x00001580, 0xFFFFFBD7, 0x000002AD, 0x00001580, 0xFFFFFBD7, 0x000002AD },
+ { 0x0213EA94DE2408C4, 0x00002BB2, 0xFFFFEE72, 0x00000470, 0x0000192C, 0xFFFFF91E, 0x000002E7, 0x0000192C, 0xFFFFF91E, 0x000002E7 },
+ { 0x0213EA94DE0650E4, 0x00003A3D, 0xFFFFE49D, 0x000005F5, 0x00001A3B, 0xFFFFF7B1, 0x00000320, 0x00001A3B, 0xFFFFF7B1, 0x00000320 },
+ { 0x0213F0FD42CE3164, 0x00002E93, 0xFFFFEC5A, 0x000004B4, 0x000025EB, 0xFFFFF03C, 0x0000044A, 0x000025EB, 0xFFFFF03C, 0x0000044A },
+ { 0x0213F0FD42CA20C4, 0x0000331F, 0xFFFFE97A, 0x00000531, 0x00001A06, 0xFFFFF850, 0x000002FD, 0x00001A06, 0xFFFFF850, 0x000002FD },
+ { 0x0213F0FD42C63964, 0x00003937, 0xFFFFE5A0, 0x000005C7, 0x0000235E, 0xFFFFF234, 0x000003F2, 0x0000235E, 0xFFFFF234, 0x000003F2 },
+ { 0x0213EA94DE1E3924, 0x00001DD0, 0xFFFFF80E, 0x00000319, 0x000015C7, 0xFFFFFB91, 0x000002BC, 0x000015C7, 0xFFFFFB91, 0x000002BC },
+ { 0x0213F0FD42D03964, 0x00003328, 0xFFFFE905, 0x0000054A, 0x00002054, 0xFFFFF3BF, 0x000003C0, 0x00002054, 0xFFFFF3BF, 0x000003C0 },
+ { 0x0213F0FD42CC1104, 0x00002FE5, 0xFFFFEA65, 0x00000520, 0x0000188B, 0xFFFFF8A7, 0x000002F5, 0x0000188B, 0xFFFFF8A7, 0x000002F5 },
+ { 0x0213F0FD42CA38A4, 0x00002ED3, 0xFFFFEC51, 0x000004B9, 0x00001888, 0xFFFFF96A, 0x000002CA, 0x00001888, 0xFFFFF96A, 0x000002CA },
+ { 0x0213F0FD42D03084, 0x00002FCC, 0xFFFFEB60, 0x000004EA, 0x00001F8D, 0xFFFFF436, 0x000003B4, 0x00001F8D, 0xFFFFF436, 0x000003B4 },
+ { 0x0213F0FD42CE4084, 0x0000329F, 0xFFFFE8F7, 0x0000054F, 0x000023DB, 0xFFFFF0EE, 0x0000043A, 0x000023DB, 0xFFFFF0EE, 0x0000043A },
+ { 0x0213EA94DE0438A4, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001AFD, 0xFFFFF781, 0x00000329, 0x00001AFD, 0xFFFFF781, 0x00000329 },
+ { 0x0213EA94DE1E19A4, 0x00001BBF, 0xFFFFF8E2, 0x000002F7, 0x00001722, 0xFFFFFA85, 0x000002DB, 0x00001722, 0xFFFFFA85, 0x000002DB },
+ { 0x0213EA94DE022044, 0x000030E4, 0xFFFFEBE6, 0x000004BB, 0x00001C80, 0xFFFFF6E1, 0x0000033E, 0x00001C80, 0xFFFFF6E1, 0x0000033E },
+ { 0x0213EA94DE122944, 0x000030E2, 0xFFFFECD0, 0x00000492, 0x00001CE0, 0xFFFFF753, 0x0000032F, 0x00001CE0, 0xFFFFF753, 0x0000032F },
+ { 0x0213EA94DE322864, 0x00002513, 0xFFFFF323, 0x000003BC, 0x00001965, 0xFFFFF93C, 0x000002F0, 0x00001965, 0xFFFFF93C, 0x000002F0 },
+ { 0x0213EA94DE1610A4, 0x00002147, 0xFFFFF585, 0x0000037A, 0x000014CC, 0xFFFFFC3B, 0x00000296, 0x000014CC, 0xFFFFFC3B, 0x00000296 },
+ { 0x0213EA94DE322124, 0x00002507, 0xFFFFF432, 0x0000038A, 0x00001890, 0xFFFFFA61, 0x000002C6, 0x00001890, 0xFFFFFA61, 0x000002C6 },
+ { 0x0213EA94DE0638A4, 0x0000339B, 0xFFFFEA7D, 0x000004F0, 0x0000191E, 0xFFFFF944, 0x000002DF, 0x0000191E, 0xFFFFF944, 0x000002DF },
+ { 0x0213F0FD42CC28A4, 0x00002842, 0xFFFFF043, 0x00000427, 0x00001988, 0xFFFFF892, 0x000002F7, 0x00001988, 0xFFFFF892, 0x000002F7 },
+ { 0x0213F0FD42C618A4, 0x0000389D, 0xFFFFE5D8, 0x000005BF, 0x00001EE1, 0xFFFFF4EF, 0x00000387, 0x00001EE1, 0xFFFFF4EF, 0x00000387 },
+ { 0x0213F0FD42CE3184, 0x0000396D, 0xFFFFE4D7, 0x000005F2, 0x000020DA, 0xFFFFF34E, 0x000003CD, 0x000020DA, 0xFFFFF34E, 0x000003CD },
+ { 0x0213F0FD42CA3104, 0x0000355F, 0xFFFFE85A, 0x0000055F, 0x0000281F, 0xFFFFEF28, 0x0000047D, 0x0000281F, 0xFFFFEF28, 0x0000047D },
+ { 0x0213EA94DE1C50E4, 0x00002284, 0xFFFFF46E, 0x00000399, 0x00001498, 0xFFFFFBE3, 0x0000029C, 0x00001498, 0xFFFFFBE3, 0x0000029C },
+ { 0x0213EA94DE023944, 0x000031B6, 0xFFFFEB42, 0x000004D9, 0x00001F54, 0xFFFFF4D2, 0x00000399, 0x00001F54, 0xFFFFF4D2, 0x00000399 },
+ { 0x0213F0FD42C63064, 0x000035CE, 0xFFFFE79D, 0x00000578, 0x00001C78, 0xFFFFF68C, 0x00000344, 0x00001C78, 0xFFFFF68C, 0x00000344 },
+ { 0x0213EA94DE1E4964, 0x00001C0A, 0xFFFFF81B, 0x00000318, 0x00001492, 0xFFFFFBCC, 0x000002A5, 0x00001492, 0xFFFFFBCC, 0x000002A5 },
+ { 0x0213EA94DE022184, 0x00003492, 0xFFFFE95C, 0x00000526, 0x00001A97, 0xFFFFF81B, 0x0000030B, 0x00001A97, 0xFFFFF81B, 0x0000030B },
+ { 0x0213EA94DE163164, 0x00001E89, 0xFFFFF7D0, 0x0000031A, 0x000017A5, 0xFFFFFA99, 0x000002D9, 0x000017A5, 0xFFFFFA99, 0x000002D9 },
+ { 0x0213F0FD42CA48C4, 0x00002DCC, 0xFFFFEBE0, 0x000004DE, 0x000019BA, 0xFFFFF7F5, 0x0000030D, 0x000019BA, 0xFFFFF7F5, 0x0000030D },
+ { 0x0213EA94DE042984, 0x000030EF, 0xFFFFEBC1, 0x000004C0, 0x00001AA9, 0xFFFFF814, 0x0000030A, 0x00001AA9, 0xFFFFF814, 0x0000030A },
+ { 0x0213EA94DE245124, 0x00002EA3, 0xFFFFEBF6, 0x000004D8, 0x00001DCF, 0xFFFFF521, 0x00000399, 0x00001DCF, 0xFFFFF521, 0x00000399 },
+ { 0x0213EA94DE324164, 0x00002B5F, 0xFFFFEEA1, 0x0000046C, 0x000017EB, 0xFFFFF9C9, 0x000002D4, 0x000017EB, 0xFFFFF9C9, 0x000002D4 },
+ { 0x0213EA94DE024104, 0x00002C63, 0xFFFFEE82, 0x00000455, 0x00002268, 0xFFFFF29D, 0x000003F6, 0x00002268, 0xFFFFF29D, 0x000003F6 },
+ { 0x0213EA94DE121904, 0x00002B1A, 0xFFFFF016, 0x0000041C, 0x000019AA, 0xFFFFF988, 0x000002D2, 0x000019AA, 0xFFFFF988, 0x000002D2 },
+ { 0x0213F0FD42CA2964, 0x0000332F, 0xFFFFE934, 0x0000053B, 0x00001E47, 0xFFFFF566, 0x00000374, 0x00001E47, 0xFFFFF566, 0x00000374 },
+ { 0x0213F0FD42CA48E4, 0x00002995, 0xFFFFEEC1, 0x00000465, 0x0000178F, 0xFFFFF995, 0x000002C5, 0x0000178F, 0xFFFFF995, 0x000002C5 },
+ { 0x0213EA94DE201884, 0x00001C2E, 0xFFFFF932, 0x000002E9, 0x000015C2, 0xFFFFFBC5, 0x000002AD, 0x000015C2, 0xFFFFFBC5, 0x000002AD },
+ { 0x0213F0FD42C640E4, 0x00003B08, 0xFFFFE4E8, 0x000005D8, 0x0000209D, 0xFFFFF444, 0x00000398, 0x0000209D, 0xFFFFF444, 0x00000398 },
+ { 0x0213EA94DE0450E4, 0x00002F1F, 0xFFFFEB74, 0x000004EB, 0x00001F4C, 0xFFFFF3D4, 0x000003CE, 0x00001F4C, 0xFFFFF3D4, 0x000003CE },
+ { 0x0213EA94DE043884, 0x00003415, 0xFFFFE89F, 0x00000553, 0x0000186B, 0xFFFFF8E1, 0x000002EF, 0x0000186B, 0xFFFFF8E1, 0x000002EF },
+ { 0x0213F0FD42CC10C4, 0x00003441, 0xFFFFE779, 0x0000059D, 0x000019EA, 0xFFFFF7B2, 0x0000031F, 0x000019EA, 0xFFFFF7B2, 0x0000031F },
+ { 0x0213EA94DE164064, 0x00002174, 0xFFFFF546, 0x00000378, 0x00001456, 0xFFFFFC5F, 0x00000284, 0x00001456, 0xFFFFFC5F, 0x00000284 },
+ { 0x0213F0FD42CE40C4, 0x00003788, 0xFFFFE61E, 0x000005BF, 0x00001DF4, 0xFFFFF562, 0x00000374, 0x00001DF4, 0xFFFFF562, 0x00000374 },
+ { 0x0213EA94DE1E1844, 0x00001C41, 0xFFFFF8C1, 0x000002FC, 0x0000171E, 0xFFFFFA93, 0x000002DE, 0x0000171E, 0xFFFFFA93, 0x000002DE },
+ { 0x0213F0FD42CA3864, 0x00002B15, 0xFFFFEDEC, 0x00000487, 0x000017E4, 0xFFFFF934, 0x000002DF, 0x000017E4, 0xFFFFF934, 0x000002DF },
+ { 0x0213F0FD42CC3144, 0x0000327A, 0xFFFFEA71, 0x000004FF, 0x00001D96, 0xFFFFF63B, 0x00000351, 0x00001D96, 0xFFFFF63B, 0x00000351 },
+ { 0x0213EA94DE1E4064, 0x000023C6, 0xFFFFF3E5, 0x000003B6, 0x000014DE, 0xFFFFFC29, 0x00000294, 0x000014DE, 0xFFFFFC29, 0x00000294 },
+ { 0x0213EA94DE164944, 0x00001F96, 0xFFFFF5FA, 0x00000364, 0x00001397, 0xFFFFFC9D, 0x0000027D, 0x00001397, 0xFFFFFC9D, 0x0000027D },
+ { 0x0213EA94DE063144, 0x00002B51, 0xFFFFEFB5, 0x00000420, 0x00001ACA, 0xFFFFF824, 0x0000030D, 0x00001ACA, 0xFFFFF824, 0x0000030D },
+ { 0x0213EA94DE1E4944, 0x000020DB, 0xFFFFF55B, 0x0000037C, 0x0000153D, 0xFFFFFB5F, 0x000002BA, 0x0000153D, 0xFFFFFB5F, 0x000002BA },
+ { 0x0213EA94DE0221A4, 0x000030BB, 0xFFFFEBDA, 0x000004BC, 0x00001B0E, 0xFFFFF7A8, 0x0000031E, 0x00001B0E, 0xFFFFF7A8, 0x0000031E },
+ { 0x0213F0FD42C62904, 0x000033C4, 0xFFFFEA41, 0x000004FA, 0x000022C6, 0xFFFFF363, 0x000003BC, 0x000022C6, 0xFFFFF363, 0x000003BC },
+ { 0x0213EA94DE240924, 0x00002D47, 0xFFFFEE01, 0x00000477, 0x000021CD, 0xFFFFF36E, 0x000003D6, 0x000021CD, 0xFFFFF36E, 0x000003D6 },
+ { 0x0213EA94DE1E31A4, 0x00001E7B, 0xFFFFF733, 0x00000339, 0x00001668, 0xFFFFFB29, 0x000002BF, 0x00001668, 0xFFFFFB29, 0x000002BF },
+ { 0x0213F0FD42CA2984, 0x00002F7E, 0xFFFFEAFF, 0x000004FC, 0x000018D4, 0xFFFFF8BE, 0x000002E8, 0x000018D4, 0xFFFFF8BE, 0x000002E8 },
+ { 0x0213EA94DE3238A4, 0x00002635, 0xFFFFF2E1, 0x000003BC, 0x000017A4, 0xFFFFFA67, 0x000002C3, 0x000017A4, 0xFFFFFA67, 0x000002C3 },
+ { 0x0213EA94DE1230A4, 0x000026CA, 0xFFFFF2C1, 0x000003B2, 0x00001C3E, 0xFFFFF7AE, 0x0000031F, 0x00001C3E, 0xFFFFF7AE, 0x0000031F },
+ { 0x0213EA94DE1C1064, 0x00002550, 0xFFFFF380, 0x000003B5, 0x000019F5, 0xFFFFF8E7, 0x00000313, 0x000019F5, 0xFFFFF8E7, 0x00000313 },
+ { 0x0213F0FD42CA4904, 0x00002FBC, 0xFFFFEAF8, 0x000004FA, 0x000018CC, 0xFFFFF8C6, 0x000002E8, 0x000018CC, 0xFFFFF8C6, 0x000002E8 },
+ { 0x0213F0FD42D018E4, 0x00002FCC, 0xFFFFEB60, 0x000004EA, 0x00001EFF, 0xFFFFF4DA, 0x0000038F, 0x00001EFF, 0xFFFFF4DA, 0x0000038F },
+ { 0x0213EA94DE164084, 0x000023E6, 0xFFFFF413, 0x000003A1, 0x00001544, 0xFFFFFC16, 0x0000028B, 0x00001544, 0xFFFFFC16, 0x0000028B },
+ { 0x0213F0FD42CE3024, 0x00003251, 0xFFFFEAA2, 0x000004F5, 0x000025B0, 0xFFFFF0DF, 0x00000431, 0x000025B0, 0xFFFFF0DF, 0x00000431 },
+ { 0x0213F0FD42D03984, 0x00002F6F, 0xFFFFEB67, 0x000004E6, 0x00002275, 0xFFFFF249, 0x000003FB, 0x00002275, 0xFFFFF249, 0x000003FB },
+ { 0x0213EA94DE322964, 0x00002597, 0xFFFFF34A, 0x000003B1, 0x00001BCC, 0xFFFFF822, 0x0000031A, 0x00001BCC, 0xFFFFF822, 0x0000031A },
+ { 0x0213F0FD42C63864, 0x00003B1D, 0xFFFFE40E, 0x0000060D, 0x00001F61, 0xFFFFF470, 0x0000039F, 0x00001F61, 0xFFFFF470, 0x0000039F },
+ { 0x0213F0FD42C64144, 0x0000379F, 0xFFFFE6DB, 0x0000058C, 0x00002460, 0xFFFFF170, 0x00000415, 0x00002460, 0xFFFFF170, 0x00000415 },
+ { 0x0213EA94DE165144, 0x00002442, 0xFFFFF2FB, 0x000003D9, 0x00001414, 0xFFFFFBDC, 0x000002A2, 0x00001414, 0xFFFFFBDC, 0x000002A2 },
+ { 0x0213EA94DE0240C4, 0x00003270, 0xFFFFEA0D, 0x0000051C, 0x00001AFD, 0xFFFFF783, 0x00000328, 0x00001AFD, 0xFFFFF783, 0x00000328 },
+ { 0x0213EA94DE161104, 0x00001B23, 0xFFFFF94B, 0x000002EB, 0x000015F1, 0xFFFFFB82, 0x000002B4, 0x000015F1, 0xFFFFFB82, 0x000002B4 },
+ { 0x0213EA94DE323844, 0x000026AE, 0xFFFFF21A, 0x000003DB, 0x00001827, 0xFFFFFA10, 0x000002C8, 0x00001827, 0xFFFFFA10, 0x000002C8 },
+ { 0x0213F0FD42CA4884, 0x00002DCF, 0xFFFFEBD8, 0x000004DB, 0x00001A75, 0xFFFFF719, 0x0000033A, 0x00001A75, 0xFFFFF719, 0x0000033A },
+ { 0x0213F0FD42CE40E4, 0x00003983, 0xFFFFE500, 0x000005EA, 0x000022A6, 0xFFFFF25F, 0x000003F1, 0x000022A6, 0xFFFFF25F, 0x000003F1 },
+ { 0x0213EA94DE1218C4, 0x00002AD5, 0xFFFFF07A, 0x00000406, 0x000019FB, 0xFFFFF961, 0x000002D8, 0x000019FB, 0xFFFFF961, 0x000002D8 },
+ { 0x0213F0FD42CA39A4, 0x00002A43, 0xFFFFEE43, 0x00000474, 0x00001D65, 0xFFFFF538, 0x00000387, 0x00001D65, 0xFFFFF538, 0x00000387 },
+ { 0x0213F0FD42C62084, 0x0000311E, 0xFFFFEAF8, 0x000004E8, 0x00001959, 0xFFFFF8E4, 0x000002DC, 0x00001959, 0xFFFFF8E4, 0x000002DC },
+ { 0x0213F0FD42D031A4, 0x0000339A, 0xFFFFE8A7, 0x00000559, 0x00001A04, 0xFFFFF7E5, 0x00000311, 0x00001A04, 0xFFFFF7E5, 0x00000311 },
+ { 0x0213EA94DE204144, 0x000021B3, 0xFFFFF50F, 0x00000389, 0x00001470, 0xFFFFFBF7, 0x000002A5, 0x00001470, 0xFFFFFBF7, 0x000002A5 },
+ { 0x0213EA94DE021884, 0x00003417, 0xFFFFE9A6, 0x0000051D, 0x000018A4, 0xFFFFF984, 0x000002CF, 0x000018A4, 0xFFFFF984, 0x000002CF },
+ { 0x0213EA94DE202984, 0x00001FED, 0xFFFFF6A2, 0x00000347, 0x00001639, 0xFFFFFB59, 0x000002BB, 0x00001639, 0xFFFFFB59, 0x000002BB },
+ { 0x0213EA94DE1218A4, 0x000032D2, 0xFFFFEB18, 0x000004DC, 0x00001A01, 0xFFFFF95E, 0x000002CF, 0x00001A01, 0xFFFFF95E, 0x000002CF },
+ { 0x0213F0FD42D04084, 0x00003147, 0xFFFFEA3B, 0x00000518, 0x0000241D, 0xFFFFF11C, 0x00000431, 0x0000241D, 0xFFFFF11C, 0x00000431 },
+ { 0x0213EA94DE1C0904, 0x00001D44, 0xFFFFF7E7, 0x0000031A, 0x0000153F, 0xFFFFFBBC, 0x000002A9, 0x0000153F, 0xFFFFFBBC, 0x000002A9 },
+ { 0x0213F0FD42CC4104, 0x00003690, 0xFFFFE6E3, 0x000005A4, 0x000018DE, 0xFFFFF908, 0x000002DD, 0x000018DE, 0xFFFFF908, 0x000002DD },
+ { 0x0213F0FD42CC2184, 0x00003561, 0xFFFFE6F8, 0x000005AB, 0x000018B5, 0xFFFFF8A0, 0x000002F3, 0x000018B5, 0xFFFFF8A0, 0x000002F3 },
+ { 0x0213EA94DE323124, 0x000028F4, 0xFFFFF23A, 0x000003CE, 0x00001BC6, 0xFFFFF881, 0x00000311, 0x00001BC6, 0xFFFFF881, 0x00000311 },
+ { 0x0213F0FD42D03184, 0x000035D7, 0xFFFFE71C, 0x0000059B, 0x00001D49, 0xFFFFF5C8, 0x00000368, 0x00001D49, 0xFFFFF5C8, 0x00000368 },
+ { 0x0213F0FD42CE18A4, 0x0000397E, 0xFFFFE4CB, 0x000005F4, 0x00001989, 0xFFFFF844, 0x000002FD, 0x00001989, 0xFFFFF844, 0x000002FD },
+ { 0x0213F0FD42C62064, 0x00003BAB, 0xFFFFE332, 0x0000063F, 0x00001A69, 0xFFFFF7B9, 0x00000312, 0x00001A69, 0xFFFFF7B9, 0x00000312 },
+ { 0x0213F0FD42D03064, 0x00002F26, 0xFFFFEB82, 0x000004E8, 0x00001D7D, 0xFFFFF590, 0x00000379, 0x00001D7D, 0xFFFFF590, 0x00000379 },
+ { 0x0213EA94DE0631A4, 0x00002FDC, 0xFFFFEBE0, 0x000004C3, 0x00001940, 0xFFFFF8CC, 0x000002EE, 0x00001940, 0xFFFFF8CC, 0x000002EE },
+ { 0x0213EA94DE1C08E4, 0x000021B2, 0xFFFFF558, 0x00000379, 0x00001643, 0xFFFFFB1C, 0x000002C3, 0x00001643, 0xFFFFFB1C, 0x000002C3 },
+ { 0x0213EA94DE321904, 0x00002897, 0xFFFFF181, 0x000003F7, 0x00001990, 0xFFFFF994, 0x000002E2, 0x00001990, 0xFFFFF994, 0x000002E2 },
+ { 0x0213EA94DE1E0924, 0x00001D19, 0xFFFFF829, 0x0000031A, 0x00001558, 0xFFFFFBCA, 0x000002AF, 0x00001558, 0xFFFFFBCA, 0x000002AF },
+ { 0x0213EA94DE043144, 0x00003311, 0xFFFFEAD9, 0x000004E1, 0x00001BDC, 0xFFFFF79E, 0x0000031D, 0x00001BDC, 0xFFFFF79E, 0x0000031D },
+ { 0x0213EA94DE1E29C4, 0x00001E54, 0xFFFFF740, 0x00000333, 0x000016A1, 0xFFFFFAF0, 0x000002C4, 0x000016A1, 0xFFFFFAF0, 0x000002C4 },
+ { 0x0213F0FD42CE3964, 0x00003266, 0xFFFFE9A8, 0x00000527, 0x00002307, 0xFFFFF219, 0x000003FC, 0x00002307, 0xFFFFF219, 0x000003FC },
+ { 0x0213EA94DE321144, 0x00001D1F, 0xFFFFF82B, 0x000002F0, 0x000013F0, 0xFFFFFD0B, 0x0000024E, 0x000013F0, 0xFFFFFD0B, 0x0000024E },
+ { 0x0213F0FD42C648A4, 0x0000312E, 0xFFFFEA67, 0x00000502, 0x0000222A, 0xFFFFF253, 0x000003F9, 0x0000222A, 0xFFFFF253, 0x000003F9 },
+ { 0x0213F0FD42CA4124, 0x000032B2, 0xFFFFE9AD, 0x00000523, 0x00001E97, 0xFFFFF527, 0x0000037F, 0x00001E97, 0xFFFFF527, 0x0000037F },
+ { 0x0213EA94DE1640E4, 0x00001F6A, 0xFFFFF6FC, 0x00000338, 0x0000164B, 0xFFFFFB2C, 0x000002C2, 0x0000164B, 0xFFFFFB2C, 0x000002C2 },
+ { 0x0213EA94DE0228C4, 0x00002603, 0xFFFFF386, 0x00000392, 0x00001EE0, 0xFFFFF601, 0x00000369, 0x00001EE0, 0xFFFFF601, 0x00000369 },
+ { 0x0213EA94DE201164, 0x00001D0C, 0xFFFFF803, 0x00000317, 0x00001345, 0xFFFFFD52, 0x00000260, 0x00001345, 0xFFFFFD52, 0x00000260 },
+ { 0x0213F0FD42CC1884, 0x0000327A, 0xFFFFE8E5, 0x0000055C, 0x00001680, 0xFFFFFA2D, 0x000002B2, 0x00001680, 0xFFFFFA2D, 0x000002B2 },
+ { 0x0213F0FD42CA3964, 0x000032B8, 0xFFFFE91A, 0x0000054A, 0x00001BAB, 0xFFFFF6EC, 0x00000338, 0x00001BAB, 0xFFFFF6EC, 0x00000338 },
+ { 0x0213F0FD42CC3044, 0x00002F79, 0xFFFFEB63, 0x000004EF, 0x000017BB, 0xFFFFF9B1, 0x000002CA, 0x000017BB, 0xFFFFF9B1, 0x000002CA },
+ { 0x0213EA94DE0438E4, 0x00002AE5, 0xFFFFEFCB, 0x0000041D, 0x0000214A, 0xFFFFF3A7, 0x000003C7, 0x0000214A, 0xFFFFF3A7, 0x000003C7 },
+ { 0x0213EA94DE322064, 0x0000212C, 0xFFFFF5BC, 0x0000034F, 0x000017ED, 0xFFFFFA4C, 0x000002C1, 0x000017ED, 0xFFFFFA4C, 0x000002C1 },
+ { 0x0213EA94DE121124, 0x00002BE7, 0xFFFFEF40, 0x0000043C, 0x00001AE2, 0xFFFFF8CF, 0x000002E3, 0x00001AE2, 0xFFFFF8CF, 0x000002E3 },
+ { 0x0213F0FD42D05144, 0x000032DC, 0xFFFFE90F, 0x00000549, 0x00002A2D, 0xFFFFECC9, 0x000004ED, 0x00002A2D, 0xFFFFECC9, 0x000004ED },
+ { 0x0213EA94DE1618A4, 0x00001DE3, 0xFFFFF80D, 0x00000319, 0x000016FA, 0xFFFFFB42, 0x000002BC, 0x000016FA, 0xFFFFFB42, 0x000002BC },
+ { 0x0213EA94DE1E2844, 0x00001F1B, 0xFFFFF6DE, 0x00000346, 0x00001502, 0xFFFFFC23, 0x00000298, 0x00001502, 0xFFFFFC23, 0x00000298 },
+ { 0x0213EA94DE061864, 0x00003203, 0xFFFFEA87, 0x000004FE, 0x0000194E, 0xFFFFF8E3, 0x000002EC, 0x0000194E, 0xFFFFF8E3, 0x000002EC },
+ { 0x0213F0FD42D02144, 0x0000337A, 0xFFFFE8DD, 0x00000551, 0x00001E3C, 0xFFFFF534, 0x00000385, 0x00001E3C, 0xFFFFF534, 0x00000385 },
+ { 0x0213F0FD42CA4864, 0x000036F6, 0xFFFFE62A, 0x000005C5, 0x000023C0, 0xFFFFF117, 0x00000435, 0x000023C0, 0xFFFFF117, 0x00000435 },
+ { 0x0213F0FD42CC2144, 0x00003125, 0xFFFFEA4E, 0x0000051A, 0x00001E6C, 0xFFFFF503, 0x0000038E, 0x00001E6C, 0xFFFFF503, 0x0000038E },
+ { 0x0213EA94DE1C08A4, 0x00001CD4, 0xFFFFF82D, 0x0000030E, 0x0000156D, 0xFFFFFB64, 0x000002B8, 0x0000156D, 0xFFFFFB64, 0x000002B8 },
+ { 0x0213EA94DE0240A4, 0x00002F14, 0xFFFFEC46, 0x000004B8, 0x000017F1, 0xFFFFF977, 0x000002D2, 0x000017F1, 0xFFFFF977, 0x000002D2 },
+ { 0x0213EA94DE0640A4, 0x000031F1, 0xFFFFEAD4, 0x000004ED, 0x0000184C, 0xFFFFF983, 0x000002D4, 0x0000184C, 0xFFFFF983, 0x000002D4 },
+ { 0x0213F0FD42D04984, 0x00002EA9, 0xFFFFEBD7, 0x000004D5, 0x0000288D, 0xFFFFEDDB, 0x000004C0, 0x0000288D, 0xFFFFEDDB, 0x000004C0 },
+ { 0x0213F0FD42CA3984, 0x0000335F, 0xFFFFE82C, 0x00000579, 0x00001DBF, 0xFFFFF512, 0x0000038C, 0x00001DBF, 0xFFFFF512, 0x0000038C },
+ { 0x0213EA94DE201184, 0x0000224F, 0xFFFFF4B5, 0x00000391, 0x0000138C, 0xFFFFFCC3, 0x0000027A, 0x0000138C, 0xFFFFFCC3, 0x0000027A },
+ { 0x0213EA94DE1240A4, 0x0000320D, 0xFFFFEACD, 0x000004F5, 0x00001976, 0xFFFFF913, 0x000002E2, 0x00001976, 0xFFFFF913, 0x000002E2 },
+ { 0x0213EA94DE202104, 0x00001BEB, 0xFFFFF99C, 0x000002E4, 0x000016A4, 0xFFFFFB77, 0x000002C3, 0x000016A4, 0xFFFFFB77, 0x000002C3 },
+ { 0x0213EA94DE063044, 0x0000396E, 0xFFFFE616, 0x000005A9, 0x000018F4, 0xFFFFF91A, 0x000002E3, 0x000018F4, 0xFFFFF91A, 0x000002E3 },
+ { 0x0213EA94DE022864, 0x00003251, 0xFFFFEA8E, 0x000004FA, 0x000018EF, 0xFFFFF910, 0x000002E4, 0x000018EF, 0xFFFFF910, 0x000002E4 },
+ { 0x0213EA94DE1C1924, 0x00001DAF, 0xFFFFF857, 0x0000030D, 0x00001915, 0xFFFFF9D8, 0x000002F7, 0x00001915, 0xFFFFF9D8, 0x000002F7 },
+ { 0x0213EA94DE2041A4, 0x000025B6, 0xFFFFF26B, 0x000003E5, 0x00001531, 0xFFFFFB68, 0x000002AF, 0x00001531, 0xFFFFFB68, 0x000002AF },
+ { 0x0213EA94DE061884, 0x00002B2E, 0xFFFFEF2E, 0x00000440, 0x00001968, 0xFFFFF91A, 0x000002DF, 0x00001968, 0xFFFFF91A, 0x000002DF },
+ { 0x0213EA94DE1C2064, 0x00002305, 0xFFFFF528, 0x00000377, 0x000018A4, 0xFFFFF9EB, 0x000002F0, 0x000018A4, 0xFFFFF9EB, 0x000002F0 },
+ { 0x0213F0FD42CA40C4, 0x000032A1, 0xFFFFE992, 0x0000052E, 0x00001A55, 0xFFFFF826, 0x000002FE, 0x00001A55, 0xFFFFF826, 0x000002FE },
+ { 0x0213EA94DE042184, 0x00002CCD, 0xFFFFEE35, 0x00000462, 0x00001B09, 0xFFFFF7E6, 0x0000030F, 0x00001B09, 0xFFFFF7E6, 0x0000030F },
+ { 0x0213EA94DE323084, 0x00002602, 0xFFFFF2CF, 0x000003C5, 0x000016EE, 0xFFFFFAD4, 0x000002B4, 0x000016EE, 0xFFFFFAD4, 0x000002B4 },
+ { 0x0213F0FD42D01964, 0x00003370, 0xFFFFE891, 0x00000560, 0x000017F0, 0xFFFFF930, 0x000002DF, 0x000017F0, 0xFFFFF930, 0x000002DF },
+ { 0x0213F0FD42CA1884, 0x00002EDC, 0xFFFFEB6D, 0x000004EC, 0x000016E6, 0xFFFFF9ED, 0x000002BC, 0x000016E6, 0xFFFFF9ED, 0x000002BC },
+ { 0x0213EA94DE1228C4, 0x00002A05, 0xFFFFF13D, 0x000003F0, 0x00002065, 0xFFFFF57B, 0x00000378, 0x00002065, 0xFFFFF57B, 0x00000378 },
+ { 0x0213F0FD42CE2044, 0x00002F8A, 0xFFFFEB6E, 0x000004E4, 0x00001E3E, 0xFFFFF50E, 0x0000038D, 0x00001E3E, 0xFFFFF50E, 0x0000038D },
+ { 0x0213F0FD42CA3044, 0x00002BB5, 0xFFFFED6A, 0x000004A1, 0x000017BF, 0xFFFFF937, 0x000002E5, 0x000017BF, 0xFFFFF937, 0x000002E5 },
+ { 0x0213EA94DE201964, 0x0000202C, 0xFFFFF6CE, 0x0000033F, 0x000015EE, 0xFFFFFB83, 0x000002B9, 0x000015EE, 0xFFFFFB83, 0x000002B9 },
+ { 0x0213EA94DE022884, 0x00002C0C, 0xFFFFEF10, 0x0000043F, 0x00001A73, 0xFFFFF83E, 0x0000030C, 0x00001A73, 0xFFFFF83E, 0x0000030C },
+ { 0x0213EA94DE324104, 0x0000234F, 0xFFFFF460, 0x00000385, 0x000018C3, 0xFFFFF9A5, 0x000002DD, 0x000018C3, 0xFFFFF9A5, 0x000002DD },
+ { 0x0213F0FD42CE1904, 0x00003679, 0xFFFFE704, 0x00000595, 0x00002177, 0xFFFFF31A, 0x000003D7, 0x00002177, 0xFFFFF31A, 0x000003D7 },
+ { 0x0213F0FD42CA2924, 0x00003008, 0xFFFFEBB8, 0x000004D5, 0x000024FF, 0xFFFFF112, 0x00000430, 0x000024FF, 0xFFFFF112, 0x00000430 },
+ { 0x0213F0FD42C641A4, 0x00003848, 0xFFFFE6A3, 0x00000594, 0x00002958, 0xFFFFEE37, 0x000004A0, 0x00002958, 0xFFFFEE37, 0x000004A0 },
+ { 0x0213F0FD42CC1924, 0x00002FDF, 0xFFFFEB08, 0x000004FD, 0x00001D77, 0xFFFFF58B, 0x0000037A, 0x00001D77, 0xFFFFF58B, 0x0000037A },
+ { 0x0213EA94DE063064, 0x00002EC8, 0xFFFFED41, 0x00000481, 0x00001949, 0xFFFFF91C, 0x000002DF, 0x00001949, 0xFFFFF91C, 0x000002DF },
+ { 0x0213F0FD42D041A4, 0x000037C1, 0xFFFFE5BA, 0x000005D7, 0x0000252C, 0xFFFFF023, 0x00000460, 0x0000252C, 0xFFFFF023, 0x00000460 },
+ { 0x0213F0FD42CE2944, 0x00003716, 0xFFFFE70C, 0x0000058A, 0x000028CC, 0xFFFFEE57, 0x0000049D, 0x000028CC, 0xFFFFEE57, 0x0000049D },
+ { 0x0213F0FD42CA40E4, 0x000033D1, 0xFFFFE8E8, 0x00000547, 0x00001AB1, 0xFFFFF7E5, 0x00000309, 0x00001AB1, 0xFFFFF7E5, 0x00000309 },
+ { 0x0213F0FD42CC2944, 0x00002D72, 0xFFFFED65, 0x0000048E, 0x00001E0D, 0xFFFFF5A7, 0x00000370, 0x00001E0D, 0xFFFFF5A7, 0x00000370 },
+ { 0x0213EA94DE1C39A4, 0x00002292, 0xFFFFF49F, 0x00000393, 0x000017F4, 0xFFFFF9CD, 0x000002F5, 0x000017F4, 0xFFFFF9CD, 0x000002F5 },
+ { 0x0213EA94DE243044, 0x000026EE, 0xFFFFF18C, 0x000003F7, 0x000018A7, 0xFFFFF95A, 0x000002E5, 0x000018A7, 0xFFFFF95A, 0x000002E5 },
+ { 0x0213EA94DE042164, 0x00002F62, 0xFFFFEC9B, 0x000004A4, 0x0000194E, 0xFFFFF932, 0x000002D9, 0x0000194E, 0xFFFFF932, 0x000002D9 },
+ { 0x0213EA94DE1E3984, 0x00001CE8, 0xFFFFF7FA, 0x0000031C, 0x000014CE, 0xFFFFFBD4, 0x000002AB, 0x000014CE, 0xFFFFFBD4, 0x000002AB },
+ { 0x0213EA94DE1210E4, 0x00002E5A, 0xFFFFEDAB, 0x0000047C, 0x00001A82, 0xFFFFF8F7, 0x000002DE, 0x00001A82, 0xFFFFF8F7, 0x000002DE },
+ { 0x0213F0FD42CC30E4, 0x00003057, 0xFFFFEC34, 0x000004B9, 0x00002296, 0xFFFFF342, 0x000003D0, 0x00002296, 0xFFFFF342, 0x000003D0 },
+ { 0x0213EA94DE0418A4, 0x00002B0F, 0xFFFFEF58, 0x00000434, 0x00001BFD, 0xFFFFF721, 0x00000330, 0x00001BFD, 0xFFFFF721, 0x00000330 },
+ { 0x0213EA94DE2010A4, 0x00001F01, 0xFFFFF751, 0x0000032F, 0x00001502, 0xFFFFFC3E, 0x00000296, 0x00001502, 0xFFFFFC3E, 0x00000296 },
+ { 0x0213F0FD42CA3064, 0x00002FF4, 0xFFFFEAE2, 0x00000503, 0x00001B36, 0xFFFFF736, 0x00000330, 0x00001B36, 0xFFFFF736, 0x00000330 },
+ { 0x0213F0FD42CE2064, 0x00003762, 0xFFFFE5AB, 0x000005DE, 0x000018CB, 0xFFFFF896, 0x000002F4, 0x000018CB, 0xFFFFF896, 0x000002F4 },
+ { 0x0213F0FD42CC2064, 0x00002890, 0xFFFFEF92, 0x00000445, 0x0000191D, 0xFFFFF86F, 0x00000302, 0x0000191D, 0xFFFFF86F, 0x00000302 },
+ { 0x0213EA94DE043064, 0x00002F76, 0xFFFFEC0E, 0x000004BF, 0x00001F7D, 0xFFFFF41A, 0x000003C0, 0x00001F7D, 0xFFFFF41A, 0x000003C0 },
+ { 0x0213EA94DE1E08A4, 0x00001D55, 0xFFFFF7F8, 0x0000031E, 0x000015DF, 0xFFFFFB79, 0x000002B7, 0x000015DF, 0xFFFFFB79, 0x000002B7 },
+ { 0x0213EA94DE204924, 0x00001FE9, 0xFFFFF64A, 0x00000353, 0x000019E8, 0xFFFFF882, 0x0000032A, 0x000019E8, 0xFFFFF882, 0x0000032A },
+ { 0x0213EA94DE063964, 0x000030B5, 0xFFFFEBB8, 0x000004C4, 0x00001857, 0xFFFFF968, 0x000002D8, 0x00001857, 0xFFFFF968, 0x000002D8 },
+ { 0x0213F0FD42CA28C4, 0x00003398, 0xFFFFE9A3, 0x00000524, 0x00001FF9, 0xFFFFF458, 0x000003AD, 0x00001FF9, 0xFFFFF458, 0x000003AD },
+ { 0x0213F0FD42CE2964, 0x00003897, 0xFFFFE5BD, 0x000005C8, 0x00002519, 0xFFFFF0BA, 0x00000438, 0x00002519, 0xFFFFF0BA, 0x00000438 },
+ { 0x0213F0FD42D04064, 0x00003234, 0xFFFFE9B1, 0x00000530, 0x000022CC, 0xFFFFF20E, 0x00000409, 0x000022CC, 0xFFFFF20E, 0x00000409 },
+ { 0x0213EA94DE205104, 0x00001FD2, 0xFFFFF641, 0x00000354, 0x000017C9, 0xFFFFF9C0, 0x000002FB, 0x000017C9, 0xFFFFF9C0, 0x000002FB },
+ { 0x0213F0FD42CE48E4, 0x00003234, 0xFFFFE946, 0x0000053D, 0x00002267, 0xFFFFF1F5, 0x0000040D, 0x00002267, 0xFFFFF1F5, 0x0000040D },
+ { 0x0213EA94DE2029A4, 0x00002330, 0xFFFFF474, 0x00000399, 0x00001490, 0xFFFFFC67, 0x00000288, 0x00001490, 0xFFFFFC67, 0x00000288 },
+ { 0x0213F0FD42D03924, 0x000032A3, 0xFFFFE9EB, 0x0000051B, 0x0000234D, 0xFFFFF23C, 0x000003F7, 0x0000234D, 0xFFFFF23C, 0x000003F7 },
+ { 0x0213EA94DE200904, 0x0000217E, 0xFFFFF53A, 0x00000384, 0x00001511, 0xFFFFFBF5, 0x0000029E, 0x00001511, 0xFFFFFBF5, 0x0000029E },
+ { 0x0213F0FD42CE50E4, 0x0000384F, 0xFFFFE562, 0x000005E2, 0x0000295A, 0xFFFFED53, 0x000004D3, 0x0000295A, 0xFFFFED53, 0x000004D3 },
+ { 0x0213F0FD42D05124, 0x00003315, 0xFFFFE8D1, 0x00000552, 0x000025D1, 0xFFFFEFAF, 0x00000471, 0x000025D1, 0xFFFFEFAF, 0x00000471 },
+ { 0x0213F0FD42C64924, 0x00004183, 0xFFFFDF61, 0x000006DA, 0x0000193C, 0xFFFFF88F, 0x000002EC, 0x0000193C, 0xFFFFF88F, 0x000002EC },
+ { 0x0213EA94DE242164, 0x00002DFC, 0xFFFFEDF2, 0x0000047A, 0x00001755, 0xFFFFFAC2, 0x000002AC, 0x00001755, 0xFFFFFAC2, 0x000002AC },
+ { 0x0213F0FD42CA31A4, 0x000033FE, 0xFFFFE774, 0x0000059F, 0x00001E70, 0xFFFFF492, 0x000003A0, 0x00001E70, 0xFFFFF492, 0x000003A0 },
+ { 0x0213F0FD42C629A4, 0x000040D7, 0xFFFFDFB8, 0x000006CE, 0x00001AC8, 0xFFFFF773, 0x0000031D, 0x00001AC8, 0xFFFFF773, 0x0000031D },
+ { 0x0213EA94DE1E1164, 0x00001D02, 0xFFFFF803, 0x00000322, 0x000015FE, 0xFFFFFB71, 0x000002BB, 0x000015FE, 0xFFFFFB71, 0x000002BB },
+ { 0x0213F0FD42D02884, 0x00002EB0, 0xFFFFEC31, 0x000004C4, 0x00001B3C, 0xFFFFF73B, 0x00000330, 0x00001B3C, 0xFFFFF73B, 0x00000330 },
+ { 0x0213F0FD42CA4984, 0x00002D9F, 0xFFFFECBF, 0x000004A8, 0x000022B0, 0xFFFFF23C, 0x000003F9, 0x000022B0, 0xFFFFF23C, 0x000003F9 },
+ { 0x0213F0FD42CC18E4, 0x00002C6A, 0xFFFFEDAC, 0x00000488, 0x00002419, 0xFFFFF159, 0x00000427, 0x00002419, 0xFFFFF159, 0x00000427 },
+ { 0x0213EA94DE1210A4, 0x00002991, 0xFFFFF06C, 0x0000040E, 0x00001AA9, 0xFFFFF8D0, 0x000002E1, 0x00001AA9, 0xFFFFF8D0, 0x000002E1 },
+ { 0x0213EA94DE123904, 0x00002F8E, 0xFFFFED1B, 0x00000493, 0x00001DE4, 0xFFFFF69C, 0x00000347, 0x00001DE4, 0xFFFFF69C, 0x00000347 },
+ { 0x0213EA94DE204184, 0x00002136, 0xFFFFF540, 0x0000037C, 0x000014FF, 0xFFFFFB83, 0x000002B2, 0x000014FF, 0xFFFFFB83, 0x000002B2 },
+ { 0x0213EA94DE0618E4, 0x0000354C, 0xFFFFE97D, 0x0000051A, 0x00001906, 0xFFFFF965, 0x000002DD, 0x00001906, 0xFFFFF965, 0x000002DD },
+ { 0x0213F0FD42C620C4, 0x0000348B, 0xFFFFE94D, 0x0000051F, 0x0000285B, 0xFFFFEF1A, 0x00000473, 0x0000285B, 0xFFFFEF1A, 0x00000473 },
+ { 0x0213EA94DE3218A4, 0x000026E6, 0xFFFFF24E, 0x000003D6, 0x0000141F, 0xFFFFFCCE, 0x00000260, 0x0000141F, 0xFFFFFCCE, 0x00000260 },
+ { 0x0213F0FD42C64164, 0x00003CED, 0xFFFFE2A5, 0x0000064E, 0x00002060, 0xFFFFF3E0, 0x000003B0, 0x00002060, 0xFFFFF3E0, 0x000003B0 },
+ { 0x0213EA94DE021084, 0x000029D4, 0xFFFFEFF7, 0x00000426, 0x00001976, 0xFFFFF8E1, 0x000002EE, 0x00001976, 0xFFFFF8E1, 0x000002EE },
+ { 0x0213F0FD42CA40A4, 0x00003767, 0xFFFFE601, 0x000005CC, 0x00001D22, 0xFFFFF5F4, 0x00000361, 0x00001D22, 0xFFFFF5F4, 0x00000361 },
+ { 0x0213F0FD42C650C4, 0x00003CE8, 0xFFFFE2E8, 0x00000637, 0x0000232C, 0xFFFFF1E7, 0x00000405, 0x0000232C, 0xFFFFF1E7, 0x00000405 },
+ { 0x0213EA94DE201064, 0x000023A8, 0xFFFFF4CD, 0x00000386, 0x00001944, 0xFFFFF983, 0x00000300, 0x00001944, 0xFFFFF983, 0x00000300 },
+ { 0x0213F0FD42CC30A4, 0x00003451, 0xFFFFE8B9, 0x00000551, 0x00001AD7, 0xFFFFF7BF, 0x00000318, 0x00001AD7, 0xFFFFF7BF, 0x00000318 },
+ { 0x0213F0FD42CE2984, 0x0000381B, 0xFFFFE5A0, 0x000005D0, 0x00001E0F, 0xFFFFF521, 0x00000382, 0x00001E0F, 0xFFFFF521, 0x00000382 },
+ { 0x0213EA94DE2038C4, 0x000023A4, 0xFFFFF4A6, 0x00000394, 0x0000171F, 0xFFFFFABB, 0x000002D9, 0x0000171F, 0xFFFFFABB, 0x000002D9 },
+ { 0x0213F0FD42C620A4, 0x00003C2B, 0xFFFFE447, 0x000005F0, 0x0000207F, 0xFFFFF44E, 0x0000039A, 0x0000207F, 0xFFFFF44E, 0x0000039A },
+ { 0x0213F0FD42CC3984, 0x00002F07, 0xFFFFEB70, 0x000004E9, 0x00001765, 0xFFFFF9A5, 0x000002C6, 0x00001765, 0xFFFFF9A5, 0x000002C6 },
+ { 0x0213F0FD42C62984, 0x00003A01, 0xFFFFE4E0, 0x000005E7, 0x0000227A, 0xFFFFF292, 0x000003E5, 0x0000227A, 0xFFFFF292, 0x000003E5 },
+ { 0x0213F0FD42CE20A4, 0x0000376E, 0xFFFFE686, 0x000005A6, 0x00001FCF, 0xFFFFF43B, 0x000003A8, 0x00001FCF, 0xFFFFF43B, 0x000003A8 },
+ { 0x0213F0FFEF5A4984, 0x0000485F, 0xFFFFDCC1, 0x00000713, 0x00002CF8, 0xFFFFEC45, 0x000004DA, 0x00002CF8, 0xFFFFEC45, 0x000004DA },
+ { 0x0213F0FFEF5C3184, 0x0000331C, 0xFFFFE8FF, 0x00000541, 0x00002366, 0xFFFFF19D, 0x00000411, 0x00002366, 0xFFFFF19D, 0x00000411 },
+ { 0x0213F0FFEF643864, 0x00003CF3, 0xFFFFE15A, 0x00000694, 0x00002FB3, 0xFFFFE827, 0x000005B9, 0x00002FB3, 0xFFFFE827, 0x000005B9 },
+ { 0x0213EA94DE321104, 0x000023F3, 0xFFFFF3EA, 0x0000039A, 0x00001345, 0xFFFFFD6B, 0x00000241, 0x00001345, 0xFFFFFD6B, 0x00000241 },
+ { 0x0213F0FFEF5C28A4, 0x000038C0, 0xFFFFE58A, 0x000005CC, 0x000023CA, 0xFFFFF1AA, 0x00000408, 0x000023CA, 0xFFFFF1AA, 0x00000408 },
+ { 0x0213F0FFEF662944, 0x00004976, 0xFFFFDD6A, 0x000006D7, 0x000033C6, 0xFFFFE8EB, 0x0000054D, 0x000033C6, 0xFFFFE8EB, 0x0000054D },
+ { 0x0213F0FFEF644904, 0x00004049, 0xFFFFDF6D, 0x000006D8, 0x00003129, 0xFFFFE716, 0x000005E9, 0x00003129, 0xFFFFE716, 0x000005E9 },
+ { 0x0213F0FFEF661164, 0x000046C2, 0xFFFFDCEB, 0x0000071C, 0x00002E6D, 0xFFFFEA8F, 0x0000052E, 0x00002E6D, 0xFFFFEA8F, 0x0000052E },
+ { 0x0213F0FFEF6238A4, 0x00004080, 0xFFFFE1E1, 0x0000063A, 0x0000396D, 0xFFFFE40A, 0x0000062C, 0x0000396D, 0xFFFFE40A, 0x0000062C },
+ { 0x0213F0FFEF5E2124, 0x00003DE0, 0xFFFFE358, 0x0000060C, 0x00002AA2, 0xFFFFEDBF, 0x000004A0, 0x00002AA2, 0xFFFFEDBF, 0x000004A0 },
+ { 0x0213F0FFEF5E3144, 0x00003FC0, 0xFFFFE2A1, 0x0000061A, 0x000027D8, 0xFFFFEFEC, 0x0000043A, 0x000027D8, 0xFFFFEFEC, 0x0000043A },
+ { 0x0213F0FFEF661924, 0x00003FBF, 0xFFFFE2F5, 0x00000603, 0x000032D7, 0xFFFFE900, 0x00000552, 0x000032D7, 0xFFFFE900, 0x00000552 },
+ { 0x0213F0FFEF5C10E4, 0x000035EE, 0xFFFFE6CA, 0x000005A2, 0x0000247C, 0xFFFFF088, 0x00000446, 0x0000247C, 0xFFFFF088, 0x00000446 },
+ { 0x0213F0FFEF643884, 0x000039C8, 0xFFFFE3AE, 0x0000062A, 0x000028AF, 0xFFFFED24, 0x000004DF, 0x000028AF, 0xFFFFED24, 0x000004DF },
+ { 0x0213F0FFEF5C2884, 0x00003BDE, 0xFFFFE33B, 0x00000632, 0x00001B6C, 0xFFFFF720, 0x00000326, 0x00001B6C, 0xFFFFF720, 0x00000326 },
+ { 0x0213F0FFEF7210A4, 0x00003818, 0xFFFFE57D, 0x000005D4, 0x000020EF, 0xFFFFF327, 0x000003CE, 0x000020EF, 0xFFFFF327, 0x000003CE },
+ { 0x0213F0FFEF5E19A4, 0x000038DA, 0xFFFFE561, 0x000005D3, 0x0000297D, 0xFFFFED6D, 0x000004C5, 0x0000297D, 0xFFFFED6D, 0x000004C5 },
+ { 0x0213F0FFEF684884, 0x000027AC, 0xFFFFF0CE, 0x00000417, 0x00001F5F, 0xFFFFF484, 0x000003B2, 0x00001F5F, 0xFFFFF484, 0x000003B2 },
+ { 0x0213F0FFEF6648A4, 0x00003F02, 0xFFFFE222, 0x00000643, 0x000026D4, 0xFFFFF000, 0x00000443, 0x000026D4, 0xFFFFF000, 0x00000443 },
+ { 0x0213F0FFEF624164, 0x00004303, 0xFFFFDFE3, 0x00000690, 0x0000312C, 0xFFFFE912, 0x00000561, 0x0000312C, 0xFFFFE912, 0x00000561 },
+ { 0x0213F0FFEF600904, 0x000039E5, 0xFFFFE31F, 0x00000657, 0x00001D23, 0xFFFFF51F, 0x00000386, 0x00001D23, 0xFFFFF51F, 0x00000386 },
+ { 0x0213F0FFEF661144, 0x000041FA, 0xFFFFE01B, 0x00000697, 0x00002767, 0xFFFFEF90, 0x00000455, 0x00002767, 0xFFFFEF90, 0x00000455 },
+ { 0x0213F0FFEF6830A4, 0x00002888, 0xFFFFF11C, 0x00000403, 0x00001864, 0xFFFFF9D8, 0x000002D3, 0x00001864, 0xFFFFF9D8, 0x000002D3 },
+ { 0x0213EA94DE201864, 0x0000215C, 0xFFFFF5B6, 0x0000036D, 0x000015C5, 0xFFFFFB8A, 0x000002B5, 0x000015C5, 0xFFFFFB8A, 0x000002B5 },
+ { 0x0213F0FFEF683984, 0x00002FAF, 0xFFFFEC27, 0x000004CA, 0x00002184, 0xFFFFF39C, 0x000003CD, 0x00002184, 0xFFFFF39C, 0x000003CD },
+ { 0x0213F0FFEF5E10C4, 0x00004ACE, 0xFFFFD9A3, 0x000007BC, 0x00001A5D, 0xFFFFF7F6, 0x000002FC, 0x00001A5D, 0xFFFFF7F6, 0x000002FC },
+ { 0x0213F0FFEF5A3044, 0x00003763, 0xFFFFE797, 0x0000055F, 0x000029B5, 0xFFFFEEA1, 0x00000474, 0x000029B5, 0xFFFFEEA1, 0x00000474 },
+ { 0x0213F0FFEF5E3164, 0x00003832, 0xFFFFE6F9, 0x00000575, 0x00002C99, 0xFFFFEC42, 0x000004E3, 0x00002C99, 0xFFFFEC42, 0x000004E3 },
+ { 0x0213F0FFEF604164, 0x000041C9, 0xFFFFDE33, 0x0000071E, 0x0000199D, 0xFFFFF808, 0x000002F9, 0x0000199D, 0xFFFFF808, 0x000002F9 },
+ { 0x0213F0FFEF641164, 0x0000474A, 0xFFFFD96E, 0x00000802, 0x00002A30, 0xFFFFEB57, 0x0000053F, 0x00002A30, 0xFFFFEB57, 0x0000053F },
+ { 0x0213F0FFEF5C31C4, 0x0000312F, 0xFFFFEA6A, 0x00000508, 0x000029D3, 0xFFFFED38, 0x000004D3, 0x000029D3, 0xFFFFED38, 0x000004D3 },
+ { 0x0213F0FFEF7210C4, 0x00003BD6, 0xFFFFE2E7, 0x00000644, 0x00002093, 0xFFFFF37B, 0x000003BD, 0x00002093, 0xFFFFF37B, 0x000003BD },
+ { 0x0213F0FFEF6840E4, 0x00002F94, 0xFFFFECD4, 0x000004A3, 0x00002196, 0xFFFFF40B, 0x000003B5, 0x00002196, 0xFFFFF40B, 0x000003B5 },
+ { 0x0213F0FFEF5E1944, 0x0000369B, 0xFFFFE762, 0x00000571, 0x00002726, 0xFFFFEF99, 0x00000459, 0x00002726, 0xFFFFEF99, 0x00000459 },
+ { 0x0213F0FFEF642064, 0x00003F57, 0xFFFFDF47, 0x000006F4, 0x00002E5F, 0xFFFFE8AE, 0x000005AB, 0x00002E5F, 0xFFFFE8AE, 0x000005AB },
+ { 0x0213EA94DE0A40C4, 0x00004313, 0xFFFFDD81, 0x0000072D, 0x00002468, 0xFFFFF068, 0x00000440, 0x00002468, 0xFFFFF068, 0x00000440 },
+ { 0x0213F0FFEF683044, 0x00002A35, 0xFFFFEFA8, 0x00000441, 0x00001F3F, 0xFFFFF4F3, 0x000003A0, 0x00001F3F, 0xFFFFF4F3, 0x000003A0 },
+ { 0x0213F0FFEF6630A4, 0x00003E33, 0xFFFFE4B0, 0x000005AF, 0x00002802, 0xFFFFF092, 0x00000412, 0x00002802, 0xFFFFF092, 0x00000412 },
+ { 0x0213EA94DE323904, 0x00002815, 0xFFFFF20E, 0x000003DD, 0x00001C33, 0xFFFFF7D5, 0x0000032A, 0x00001C33, 0xFFFFF7D5, 0x0000032A },
+ { 0x0213F0FFEF5A2184, 0x00003CC2, 0xFFFFE43E, 0x000005DE, 0x00002C16, 0xFFFFECED, 0x000004BA, 0x00002C16, 0xFFFFECED, 0x000004BA },
+ { 0x0213F0FFEF5C4084, 0x00003CFA, 0xFFFFE1EE, 0x00000673, 0x00001F7D, 0xFFFFF402, 0x000003AE, 0x00001F7D, 0xFFFFF402, 0x000003AE },
+ { 0x0213F0FFEF622104, 0x0000486E, 0xFFFFDD43, 0x000006EE, 0x000036F0, 0xFFFFE609, 0x000005D5, 0x000036F0, 0xFFFFE609, 0x000005D5 },
+ { 0x0213F0FFEF5C4964, 0x000039FE, 0xFFFFE41F, 0x00000613, 0x0000266C, 0xFFFFEF35, 0x0000047D, 0x0000266C, 0xFFFFEF35, 0x0000047D },
+ { 0x0213EA94DE123124, 0x00002EA4, 0xFFFFEE3B, 0x00000462, 0x00002126, 0xFFFFF4E2, 0x0000038F, 0x00002126, 0xFFFFF4E2, 0x0000038F },
+ { 0x0213F0FFEF683944, 0x00002D2E, 0xFFFFEE7B, 0x00000462, 0x0000229D, 0xFFFFF363, 0x000003D4, 0x0000229D, 0xFFFFF363, 0x000003D4 },
+ { 0x0213F0FFEF5E2844, 0x0000375C, 0xFFFFE695, 0x0000059D, 0x00002319, 0xFFFFF237, 0x000003EE, 0x00002319, 0xFFFFF237, 0x000003EE },
+ { 0x0213F0FFEF7250C4, 0x00004522, 0xFFFFDC71, 0x0000075E, 0x0000247E, 0xFFFFF0A0, 0x0000043C, 0x0000247E, 0xFFFFF0A0, 0x0000043C },
+ { 0x0213EA94DE1248E4, 0x00002E58, 0xFFFFECB9, 0x000004A9, 0x0000199A, 0xFFFFF8CF, 0x000002E9, 0x0000199A, 0xFFFFF8CF, 0x000002E9 },
+ { 0x0213F0FFEF6438E4, 0x00003791, 0xFFFFE5FE, 0x000005B6, 0x000029F5, 0xFFFFED0D, 0x000004CD, 0x000029F5, 0xFFFFED0D, 0x000004CD },
+ { 0x0213EA94DE244144, 0x00002E9E, 0xFFFFEC8D, 0x000004C1, 0x000019D0, 0xFFFFF869, 0x0000030F, 0x000019D0, 0xFFFFF869, 0x0000030F },
+ { 0x0213EA94DE203964, 0x0000237C, 0xFFFFF435, 0x000003A6, 0x000014EB, 0xFFFFFBC4, 0x000002AF, 0x000014EB, 0xFFFFFBC4, 0x000002AF },
+ { 0x0213F0FFEF662924, 0x00003FE5, 0xFFFFE4A2, 0x000005A0, 0x00003416, 0xFFFFE995, 0x00000523, 0x00003416, 0xFFFFE995, 0x00000523 },
+ { 0x0213F0FFEF5C0924, 0x00002B27, 0xFFFFED51, 0x000004A5, 0x000025D1, 0xFFFFEF18, 0x00000492, 0x000025D1, 0xFFFFEF18, 0x00000492 },
+ { 0x0213F0FFEF684904, 0x00002D77, 0xFFFFED79, 0x00000494, 0x00002196, 0xFFFFF352, 0x000003DE, 0x00002196, 0xFFFFF352, 0x000003DE },
+ { 0x0213F0FFEF5C20C4, 0x00003750, 0xFFFFE6AC, 0x00000596, 0x00002524, 0xFFFFF0B5, 0x00000431, 0x00002524, 0xFFFFF0B5, 0x00000431 },
+ { 0x0213EA94DE122944, 0x00002896, 0xFFFFF1BB, 0x000003D9, 0x00001CE0, 0xFFFFF753, 0x0000032F, 0x00001CE0, 0xFFFFF753, 0x0000032F },
+ { 0x0213F0FFEF641984, 0x00003CA7, 0xFFFFE0F7, 0x000006B1, 0x00002CB8, 0xFFFFE9AB, 0x00000587, 0x00002CB8, 0xFFFFE9AB, 0x00000587 },
+ { 0x0213EA94DE322864, 0x00002513, 0xFFFFF323, 0x000003BC, 0x00001965, 0xFFFFF93C, 0x000002F0, 0x00001965, 0xFFFFF93C, 0x000002F0 },
+ { 0x0213F0FFEF662164, 0x00003914, 0xFFFFE683, 0x00000586, 0x00003120, 0xFFFFE9A6, 0x00000543, 0x00003120, 0xFFFFE9A6, 0x00000543 },
+ { 0x0213F0FFEF643904, 0x000040D0, 0xFFFFE007, 0x000006AC, 0x00002B9E, 0xFFFFEBF5, 0x000004FB, 0x00002B9E, 0xFFFFEBF5, 0x000004FB },
+ { 0x0213F0FFEF5A4884, 0x00004412, 0xFFFFDF5F, 0x000006A9, 0x00002A9E, 0xFFFFEDCE, 0x00000498, 0x00002A9E, 0xFFFFEDCE, 0x00000498 },
+ { 0x0213F0FFEF624884, 0x000042A6, 0xFFFFDFEF, 0x00000696, 0x00002E65, 0xFFFFEAAE, 0x00000529, 0x00002E65, 0xFFFFEAAE, 0x00000529 },
+ { 0x0213EA94DE322124, 0x000022E8, 0xFFFFF565, 0x0000035F, 0x00001890, 0xFFFFFA61, 0x000002C6, 0x00001890, 0xFFFFFA61, 0x000002C6 },
+ { 0x0213F0FFEF6239A4, 0x00004637, 0xFFFFDDD8, 0x000006E9, 0x0000349D, 0xFFFFE6C8, 0x000005C7, 0x0000349D, 0xFFFFE6C8, 0x000005C7 },
+ { 0x0213EA94DE263904, 0x00004686, 0xFFFFDC58, 0x0000073D, 0x00003972, 0xFFFFE27B, 0x0000068E, 0x00003972, 0xFFFFE27B, 0x0000068E },
+ { 0x0213F0FFEF6808E4, 0x00002B35, 0xFFFFEE9C, 0x0000046C, 0x00001F5B, 0xFFFFF4A3, 0x000003A9, 0x00001F5B, 0xFFFFF4A3, 0x000003A9 },
+ { 0x0213F0FFEF724144, 0x00003AC9, 0xFFFFE3B2, 0x0000061B, 0x000023A1, 0xFFFFF170, 0x0000040F, 0x000023A1, 0xFFFFF170, 0x0000040F },
+ { 0x0213F0FFEF5E1884, 0x00003C50, 0xFFFFE37E, 0x00000617, 0x0000218F, 0xFFFFF339, 0x000003C4, 0x0000218F, 0xFFFFF339, 0x000003C4 },
+ { 0x0213F0FFEF663044, 0x00003793, 0xFFFFE761, 0x0000055D, 0x000029C7, 0xFFFFEE03, 0x00000496, 0x000029C7, 0xFFFFEE03, 0x00000496 },
+ { 0x0213F0FFEF6438A4, 0x000040B5, 0xFFFFDF78, 0x000006DA, 0x00002DED, 0xFFFFEA20, 0x00000551, 0x00002DED, 0xFFFFEA20, 0x00000551 },
+ { 0x0213F0FFEF601144, 0x000039D6, 0xFFFFE37D, 0x0000063C, 0x00001AED, 0xFFFFF6E2, 0x00000331, 0x00001AED, 0xFFFFF6E2, 0x00000331 },
+ { 0x0213F0FFEF662144, 0x0000431F, 0xFFFFE09B, 0x0000066A, 0x00002BDF, 0xFFFFED93, 0x00000496, 0x00002BDF, 0xFFFFED93, 0x00000496 },
+ { 0x0213F0FFEF623864, 0x00004887, 0xFFFFDC65, 0x00000721, 0x00003669, 0xFFFFE5C4, 0x000005E9, 0x00003669, 0xFFFFE5C4, 0x000005E9 },
+ { 0x0213F0FFEF640924, 0x00004120, 0xFFFFDDAE, 0x00000748, 0x0000303B, 0xFFFFE70D, 0x000005FC, 0x0000303B, 0xFFFFE70D, 0x000005FC },
+ { 0x0213F0FFEF5E28A4, 0x0000415D, 0xFFFFE0BE, 0x0000067B, 0x00002FA7, 0xFFFFEA28, 0x00000538, 0x00002FA7, 0xFFFFEA28, 0x00000538 },
+ { 0x0213F0FFEF681904, 0x00002B12, 0xFFFFEFF9, 0x00000428, 0x00001DDA, 0xFFFFF693, 0x00000356, 0x00001DDA, 0xFFFFF693, 0x00000356 },
+ { 0x0213F0FFEF5E3184, 0x00003ED3, 0xFFFFE28D, 0x0000062D, 0x00002B00, 0xFFFFED4E, 0x000004B3, 0x00002B00, 0xFFFFED4E, 0x000004B3 },
+ { 0x0213F0FFEF6250A4, 0x00004218, 0xFFFFE039, 0x0000068F, 0x00002F84, 0xFFFFEA0C, 0x00000541, 0x00002F84, 0xFFFFEA0C, 0x00000541 },
+ { 0x0213F0FFEF5A3844, 0x00003FF5, 0xFFFFE2A3, 0x00000617, 0x00003017, 0xFFFFEA7A, 0x00000520, 0x00003017, 0xFFFFEA7A, 0x00000520 },
+ { 0x0213F0FFEF5A08A4, 0x00004304, 0xFFFFDFCC, 0x0000069E, 0x00002E0C, 0xFFFFEB51, 0x00000505, 0x00002E0C, 0xFFFFEB51, 0x00000505 },
+ { 0x0213F0FFEF641944, 0x00003D3A, 0xFFFFE17F, 0x00000687, 0x0000284C, 0xFFFFED83, 0x000004CD, 0x0000284C, 0xFFFFED83, 0x000004CD },
+ { 0x0213F0FFEF5E40A4, 0x000042F5, 0xFFFFDF76, 0x000006B2, 0x000027B6, 0xFFFFEF72, 0x00000455, 0x000027B6, 0xFFFFEF72, 0x00000455 },
+ { 0x0213F0FFEF5C38C4, 0x00004267, 0xFFFFDF29, 0x000006D5, 0x0000298F, 0xFFFFEDBD, 0x000004AC, 0x0000298F, 0xFFFFEDBD, 0x000004AC },
+ { 0x0213EA94DE240924, 0x0000303E, 0xFFFFEC00, 0x000004CB, 0x000021CD, 0xFFFFF36E, 0x000003D6, 0x000021CD, 0xFFFFF36E, 0x000003D6 },
+ { 0x0213F0FFEF5E28C4, 0x00003127, 0xFFFFEBDB, 0x000004A6, 0x00002E95, 0xFFFFEB78, 0x000004F3, 0x00002E95, 0xFFFFEB78, 0x000004F3 },
+ { 0x0213EA94DE1C1064, 0x00002655, 0xFFFFF2D9, 0x000003CF, 0x000019F5, 0xFFFFF8E7, 0x00000313, 0x000019F5, 0xFFFFF8E7, 0x00000313 },
+ { 0x0213EA94DE164084, 0x00002372, 0xFFFFF449, 0x0000039B, 0x00001544, 0xFFFFFC16, 0x0000028B, 0x00001544, 0xFFFFFC16, 0x0000028B },
+ { 0x0213F0FFEF6628C4, 0x0000348E, 0xFFFFEB20, 0x000004B2, 0x00002BE8, 0xFFFFEE80, 0x00000467, 0x00002BE8, 0xFFFFEE80, 0x00000467 },
+ { 0x0213F0FFEF5E1104, 0x00004092, 0xFFFFE073, 0x0000069B, 0x00002061, 0xFFFFF403, 0x000003A0, 0x00002061, 0xFFFFF403, 0x000003A0 },
+ { 0x0213F0FFEF7220E4, 0x000039D1, 0xFFFFE55D, 0x000005CC, 0x000025CB, 0xFFFFF0C0, 0x00000428, 0x000025CB, 0xFFFFF0C0, 0x00000428 },
+ { 0x0213F0FFEF5E4884, 0x000042AA, 0xFFFFDF68, 0x000006C2, 0x0000290B, 0xFFFFEE78, 0x00000485, 0x0000290B, 0xFFFFEE78, 0x00000485 },
+ { 0x0213F0FFEF7218C4, 0x0000356F, 0xFFFFE7AC, 0x0000056E, 0x00001BE8, 0xFFFFF6E3, 0x0000032A, 0x00001BE8, 0xFFFFF6E3, 0x0000032A },
+ { 0x0213F0FFEF5E1144, 0x00003525, 0xFFFFE7FF, 0x0000055D, 0x0000242C, 0xFFFFF12E, 0x0000041D, 0x0000242C, 0xFFFFF12E, 0x0000041D },
+ { 0x0213F0FFEF5C48C4, 0x00003360, 0xFFFFE895, 0x00000550, 0x00002175, 0xFFFFF29E, 0x000003E9, 0x00002175, 0xFFFFF29E, 0x000003E9 },
+ { 0x0213F0FFEF6440A4, 0x00003C94, 0xFFFFE1C4, 0x0000067E, 0x00002E28, 0xFFFFE964, 0x0000057F, 0x00002E28, 0xFFFFE964, 0x0000057F },
+ { 0x0213F0FFEF724124, 0x0000431C, 0xFFFFDE4B, 0x000006FF, 0x00002270, 0xFFFFF268, 0x000003E5, 0x00002270, 0xFFFFF268, 0x000003E5 },
+ { 0x0213EA94DE1218C4, 0x00002B67, 0xFFFFF01D, 0x00000414, 0x000019FB, 0xFFFFF961, 0x000002D8, 0x000019FB, 0xFFFFF961, 0x000002D8 },
+ { 0x0213F0FFEF5E3984, 0x0000400B, 0xFFFFE13D, 0x0000066F, 0x000024F3, 0xFFFFF125, 0x00000417, 0x000024F3, 0xFFFFF125, 0x00000417 },
+ { 0x0213F0FFEF5A20A4, 0x00004460, 0xFFFFE00E, 0x0000067B, 0x000023DF, 0xFFFFF2E6, 0x000003BB, 0x000023DF, 0xFFFFF2E6, 0x000003BB },
+ { 0x0213F0FFEF641864, 0x00003AFB, 0xFFFFE2C5, 0x00000650, 0x00002D46, 0xFFFFE9C4, 0x00000571, 0x00002D46, 0xFFFFE9C4, 0x00000571 },
+ { 0x0213F0FFEF622924, 0x00005482, 0xFFFFD5BC, 0x0000081A, 0x00003250, 0xFFFFE961, 0x00000541, 0x00003250, 0xFFFFE961, 0x00000541 },
+ { 0x0213F0FFEF5C2944, 0x00003D27, 0xFFFFE2FA, 0x00000632, 0x00002A4D, 0xFFFFED6A, 0x000004BB, 0x00002A4D, 0xFFFFED6A, 0x000004BB },
+ { 0x0213F0FFEF6018A4, 0x00003E03, 0xFFFFE142, 0x00000690, 0x00001E08, 0xFFFFF555, 0x0000036C, 0x00001E08, 0xFFFFF555, 0x0000036C },
+ { 0x0213F0FFEF5C2064, 0x000031B5, 0xFFFFE97D, 0x00000535, 0x0000232E, 0xFFFFF166, 0x00000422, 0x0000232E, 0xFFFFF166, 0x00000422 },
+ { 0x0213F0FFEF5E18E4, 0x00003753, 0xFFFFE724, 0x00000575, 0x0000281A, 0xFFFFEF1A, 0x0000046B, 0x0000281A, 0xFFFFEF1A, 0x0000046B },
+ { 0x0213EA94DE204144, 0x00002071, 0xFFFFF5C9, 0x0000036F, 0x00001470, 0xFFFFFBF7, 0x000002A5, 0x00001470, 0xFFFFFBF7, 0x000002A5 },
+ { 0x0213F0FFEF683144, 0x00002799, 0xFFFFF223, 0x000003CF, 0x00001CD3, 0xFFFFF74A, 0x00000333, 0x00001CD3, 0xFFFFF74A, 0x00000333 },
+ { 0x0213F0FFEF6610C4, 0x000040DF, 0xFFFFE11C, 0x00000664, 0x000031D4, 0xFFFFE8BC, 0x0000056F, 0x000031D4, 0xFFFFE8BC, 0x0000056F },
+ { 0x0213F0FFEF6440C4, 0x00003A4D, 0xFFFFE3A6, 0x00000627, 0x00002871, 0xFFFFEDA0, 0x000004C0, 0x00002871, 0xFFFFEDA0, 0x000004C0 },
+ { 0x0213F0FFEF681984, 0x00002AF9, 0xFFFFEED7, 0x00000464, 0x0000219B, 0xFFFFF368, 0x000003D6, 0x0000219B, 0xFFFFF368, 0x000003D6 },
+ { 0x0213EA94DE323124, 0x000026D5, 0xFFFFF36C, 0x000003A3, 0x00001BC6, 0xFFFFF881, 0x00000311, 0x00001BC6, 0xFFFFF881, 0x00000311 },
+ { 0x0213F0FFEF5E2044, 0x0000325D, 0xFFFFEA07, 0x0000050B, 0x000026D1, 0xFFFFEFB3, 0x0000045A, 0x000026D1, 0xFFFFEFB3, 0x0000045A },
+ { 0x0213F0FFEF682864, 0x00002F75, 0xFFFFEC64, 0x000004BE, 0x00001EEB, 0xFFFFF559, 0x00000386, 0x00001EEB, 0xFFFFF559, 0x00000386 },
+ { 0x0213F0FFEF5A38A4, 0x00003C2F, 0xFFFFE541, 0x000005A3, 0x000025B6, 0xFFFFF16F, 0x000003FA, 0x000025B6, 0xFFFFF16F, 0x000003FA },
+ { 0x0213F0FFEF684924, 0x00002BC2, 0xFFFFEE89, 0x0000046A, 0x00001D04, 0xFFFFF651, 0x00000361, 0x00001D04, 0xFFFFF651, 0x00000361 },
+ { 0x0213F0FFEF6829A4, 0x00002DD0, 0xFFFFED40, 0x0000049F, 0x00001C8C, 0xFFFFF6B3, 0x00000353, 0x00001C8C, 0xFFFFF6B3, 0x00000353 },
+ { 0x0213EA94DE1C08E4, 0x000021ED, 0xFFFFF530, 0x00000380, 0x00001643, 0xFFFFFB1C, 0x000002C3, 0x00001643, 0xFFFFFB1C, 0x000002C3 },
+ { 0x0213EA94DE321904, 0x000028C7, 0xFFFFF160, 0x000003FD, 0x00001990, 0xFFFFF994, 0x000002E2, 0x00001990, 0xFFFFF994, 0x000002E2 },
+ { 0x0213F0FFEF6610A4, 0x0000431C, 0xFFFFDF9D, 0x000006A3, 0x000034A6, 0xFFFFE6B0, 0x000005C9, 0x000034A6, 0xFFFFE6B0, 0x000005C9 },
+ { 0x0213EA94DE2630A4, 0x00004115, 0xFFFFE0D6, 0x00000667, 0x000031AD, 0xFFFFE850, 0x00000585, 0x000031AD, 0xFFFFE850, 0x00000585 },
+ { 0x0213F0FFEF643924, 0x0000424A, 0xFFFFDEEC, 0x000006E1, 0x0000346A, 0xFFFFE5EA, 0x00000602, 0x0000346A, 0xFFFFE5EA, 0x00000602 },
+ { 0x0213F0FFEF661984, 0x00004990, 0xFFFFDAFA, 0x00000771, 0x00002A9C, 0xFFFFED37, 0x000004BC, 0x00002A9C, 0xFFFFED37, 0x000004BC },
+ { 0x0213F0FFEF6428A4, 0x00003858, 0xFFFFE568, 0x000005D2, 0x00003030, 0xFFFFE8B0, 0x0000058E, 0x00003030, 0xFFFFE8B0, 0x0000058E },
+ { 0x0213F0FFEF684164, 0x00001EDC, 0xFFFFF6CD, 0x00000322, 0x00001FCA, 0xFFFFF4BD, 0x0000039E, 0x00001FCA, 0xFFFFF4BD, 0x0000039E },
+ { 0x0213F0FFEF662124, 0x00004C88, 0xFFFFDBA3, 0x0000071B, 0x000030C4, 0xFFFFEAFD, 0x000004F7, 0x000030C4, 0xFFFFEAFD, 0x000004F7 },
+ { 0x0213F0FFEF680904, 0x00002B9A, 0xFFFFEE41, 0x0000047D, 0x00002131, 0xFFFFF344, 0x000003E5, 0x00002131, 0xFFFFF344, 0x000003E5 },
+ { 0x0213F0FFEF623984, 0x00003E4B, 0xFFFFE33C, 0x000005FA, 0x00003877, 0xFFFFE437, 0x0000062E, 0x00003877, 0xFFFFE437, 0x0000062E },
+ { 0x0213EA94DE322064, 0x00002376, 0xFFFFF444, 0x0000038A, 0x000017ED, 0xFFFFFA4C, 0x000002C1, 0x000017ED, 0xFFFFFA4C, 0x000002C1 },
+ { 0x0213F0FFEF661084, 0x00004517, 0xFFFFDDF4, 0x000006F2, 0x000030DC, 0xFFFFE8EF, 0x00000571, 0x000030DC, 0xFFFFE8EF, 0x00000571 },
+ { 0x0213F0FFEF681944, 0x0000270C, 0xFFFFF1F3, 0x000003DF, 0x0000207B, 0xFFFFF474, 0x000003AD, 0x0000207B, 0xFFFFF474, 0x000003AD },
+ { 0x0213F0FFEF645144, 0x00004086, 0xFFFFDF39, 0x000006E3, 0x00002A24, 0xFFFFEC2B, 0x000004FF, 0x00002A24, 0xFFFFEC2B, 0x000004FF },
+ { 0x0213F0FFEF5C3124, 0x00003BDE, 0xFFFFE45E, 0x000005EB, 0x00002CD5, 0xFFFFEC45, 0x000004DD, 0x00002CD5, 0xFFFFEC45, 0x000004DD },
+ { 0x0213F0FFEF7230E4, 0x00003803, 0xFFFFE714, 0x00000579, 0x0000288A, 0xFFFFEF21, 0x0000046B, 0x0000288A, 0xFFFFEF21, 0x0000046B },
+ { 0x0213F0FFEF601104, 0x00003F50, 0xFFFFE002, 0x000006CD, 0x00001AD4, 0xFFFFF72E, 0x0000031F, 0x00001AD4, 0xFFFFF72E, 0x0000031F },
+ { 0x0213F0FFEF6820E4, 0x00002968, 0xFFFFF100, 0x00000402, 0x00001FB5, 0xFFFFF57C, 0x0000037F, 0x00001FB5, 0xFFFFF57C, 0x0000037F },
+ { 0x0213F0FFEF662104, 0x00004283, 0xFFFFE2A7, 0x000005F5, 0x00003165, 0xFFFFEB0C, 0x000004EC, 0x00003165, 0xFFFFEB0C, 0x000004EC },
+ { 0x0213F0FFEF6431A4, 0x00004253, 0xFFFFDDA8, 0x00000732, 0x00002E5C, 0xFFFFE90A, 0x00000593, 0x00002E5C, 0xFFFFE90A, 0x00000593 },
+ { 0x0213F0FFEF5C50A4, 0x00003551, 0xFFFFE756, 0x0000058D, 0x000029A7, 0xFFFFED0C, 0x000004DE, 0x000029A7, 0xFFFFED0C, 0x000004DE },
+ { 0x0213F0FFEF6428C4, 0x00003728, 0xFFFFE604, 0x000005C4, 0x00002832, 0xFFFFEE64, 0x00000493, 0x00002832, 0xFFFFEE64, 0x00000493 },
+ { 0x0213F0FFEF623964, 0x00004796, 0xFFFFDCC8, 0x00000715, 0x000032AB, 0xFFFFE848, 0x0000057C, 0x000032AB, 0xFFFFE848, 0x0000057C },
+ { 0x0213F0FFEF6210C4, 0x000049DF, 0xFFFFDB24, 0x0000075F, 0x00003076, 0xFFFFE967, 0x0000055C, 0x00003076, 0xFFFFE967, 0x0000055C },
+ { 0x0213F0FFEF721104, 0x00003F13, 0xFFFFE099, 0x000006A8, 0x00002279, 0xFFFFF226, 0x000003F3, 0x00002279, 0xFFFFF226, 0x000003F3 },
+ { 0x0213F0FFEF6430A4, 0x00003E03, 0xFFFFE19F, 0x00000674, 0x00002D66, 0xFFFFEAA7, 0x00000537, 0x00002D66, 0xFFFFEAA7, 0x00000537 },
+ { 0x0213F0FFEF5C4104, 0x000037DA, 0xFFFFE63F, 0x000005A7, 0x00002543, 0xFFFFF0A0, 0x00000431, 0x00002543, 0xFFFFF0A0, 0x00000431 },
+ { 0x0213F0FFEF624944, 0x00003D82, 0xFFFFE3F5, 0x000005D9, 0x0000332F, 0xFFFFE834, 0x00000577, 0x0000332F, 0xFFFFE834, 0x00000577 },
+ { 0x0213EA94DE1228C4, 0x00002915, 0xFFFFF1E0, 0x000003D4, 0x00002065, 0xFFFFF57B, 0x00000378, 0x00002065, 0xFFFFF57B, 0x00000378 },
+ { 0x0213F0FFEF5E4904, 0x000036FC, 0xFFFFE72D, 0x00000577, 0x00002811, 0xFFFFEF30, 0x00000464, 0x00002811, 0xFFFFEF30, 0x00000464 },
+ { 0x0213F0FFEF623184, 0x00004767, 0xFFFFDD30, 0x000006FD, 0x00003703, 0xFFFFE564, 0x000005F8, 0x00003703, 0xFFFFE564, 0x000005F8 },
+ { 0x0213F0FFEF603184, 0x00003094, 0xFFFFEAA9, 0x000004F5, 0x000022E7, 0xFFFFF200, 0x000003FB, 0x000022E7, 0xFFFFF200, 0x000003FB },
+ { 0x0213F0FFEF641144, 0x00003EF0, 0xFFFFDF83, 0x000006ED, 0x00002A27, 0xFFFFEB7C, 0x00000537, 0x00002A27, 0xFFFFEB7C, 0x00000537 },
+ { 0x0213F0FFEF681124, 0x0000243C, 0xFFFFF358, 0x000003AC, 0x00001DC4, 0xFFFFF5E9, 0x00000372, 0x00001DC4, 0xFFFFF5E9, 0x00000372 },
+ { 0x0213F0FFEF722144, 0x0000284B, 0xFFFFF036, 0x0000040F, 0x00001FCD, 0xFFFFF445, 0x00000395, 0x00001FCD, 0xFFFFF445, 0x00000395 },
+ { 0x0213F0FFEF6840C4, 0x00002611, 0xFFFFF285, 0x000003C7, 0x00001CFE, 0xFFFFF6A0, 0x00000355, 0x00001CFE, 0xFFFFF6A0, 0x00000355 },
+ { 0x0213EA94DE1C39A4, 0x00002292, 0xFFFFF49F, 0x00000393, 0x000017F4, 0xFFFFF9CD, 0x000002F5, 0x000017F4, 0xFFFFF9CD, 0x000002F5 },
+ { 0x0213F0FFEF5E38A4, 0x000037F3, 0xFFFFE68D, 0x00000590, 0x00002443, 0xFFFFF1AD, 0x000003FA, 0x00002443, 0xFFFFF1AD, 0x000003FA },
+ { 0x0213F0FFEF682144, 0x00002C01, 0xFFFFEF3F, 0x00000444, 0x0000210A, 0xFFFFF475, 0x000003A7, 0x0000210A, 0xFFFFF475, 0x000003A7 },
+ { 0x0213EA94DE1210E4, 0x00002C0E, 0xFFFFEF0F, 0x00000446, 0x00001A82, 0xFFFFF8F7, 0x000002DE, 0x00001A82, 0xFFFFF8F7, 0x000002DE },
+ { 0x0213F0FFEF5E20C4, 0x00003FA6, 0xFFFFE20A, 0x0000063F, 0x00002E29, 0xFFFFEB21, 0x00000510, 0x00002E29, 0xFFFFEB21, 0x00000510 },
+ { 0x0213F0FFEF5C2164, 0x00003BCD, 0xFFFFE31B, 0x0000063C, 0x000019AF, 0xFFFFF83D, 0x000002F8, 0x000019AF, 0xFFFFF83D, 0x000002F8 },
+ { 0x0213F0FFEF664164, 0x000044C8, 0xFFFFDF08, 0x000006B0, 0x00002E2E, 0xFFFFEB62, 0x000004FD, 0x00002E2E, 0xFFFFEB62, 0x000004FD },
+ { 0x0213F0FFEF5C1884, 0x00003790, 0xFFFFE571, 0x000005E3, 0x00002042, 0xFFFFF35D, 0x000003CF, 0x00002042, 0xFFFFF35D, 0x000003CF },
+ { 0x0213F0FFEF6050E4, 0x000038AC, 0xFFFFE46C, 0x00000609, 0x0000215E, 0xFFFFF22D, 0x00000403, 0x0000215E, 0xFFFFF22D, 0x00000403 },
+ { 0x0213F0FFEF5E29A4, 0x00003A1E, 0xFFFFE536, 0x000005C9, 0x000024F3, 0xFFFFF11A, 0x0000041B, 0x000024F3, 0xFFFFF11A, 0x0000041B },
+ { 0x0213F0FFEF6650E4, 0x0000431A, 0xFFFFDF1B, 0x000006C5, 0x00002F34, 0xFFFFEA02, 0x00000545, 0x00002F34, 0xFFFFEA02, 0x00000545 },
+ { 0x0213F0FFEF641904, 0x000042DC, 0xFFFFDE28, 0x0000070C, 0x00003B53, 0xFFFFE0EA, 0x000006E2, 0x00003B53, 0xFFFFE0EA, 0x000006E2 },
+ { 0x0213F0FFEF683164, 0x0000264B, 0xFFFFF29A, 0x000003C4, 0x000021D0, 0xFFFFF3CE, 0x000003C4, 0x000021D0, 0xFFFFF3CE, 0x000003C4 },
+ { 0x0213F0FFEF5A4064, 0x00004225, 0xFFFFE0E8, 0x00000665, 0x00002B53, 0xFFFFED89, 0x0000049F, 0x00002B53, 0xFFFFED89, 0x0000049F },
+ { 0x0213EA94DE204924, 0x00001FCC, 0xFFFFF63F, 0x00000358, 0x000019E8, 0xFFFFF882, 0x0000032A, 0x000019E8, 0xFFFFF882, 0x0000032A },
+ { 0x0213F0FFEF6240A4, 0x000045E0, 0xFFFFDDD0, 0x000006ED, 0x00003193, 0xFFFFE8BD, 0x00000572, 0x00003193, 0xFFFFE8BD, 0x00000572 },
+ { 0x0213F0FFEF683924, 0x000024FC, 0xFFFFF366, 0x000003A6, 0x00001FE8, 0xFFFFF509, 0x00000394, 0x00001FE8, 0xFFFFF509, 0x00000394 },
+ { 0x0213F0FFEF5C4884, 0x0000378F, 0xFFFFE54B, 0x000005F1, 0x00001C9B, 0xFFFFF5C7, 0x00000368, 0x00001C9B, 0xFFFFF5C7, 0x00000368 },
+ { 0x0213F0FFEF6418A4, 0x00003CF3, 0xFFFFE15A, 0x00000694, 0x00002CDD, 0xFFFFEA44, 0x00000557, 0x00002CDD, 0xFFFFEA44, 0x00000557 },
+ { 0x0213EA94DE200904, 0x000021EC, 0xFFFFF4F4, 0x0000038F, 0x00001511, 0xFFFFFBF5, 0x0000029E, 0x00001511, 0xFFFFFBF5, 0x0000029E },
+ { 0x0213F0FFEF6010A4, 0x00003C8A, 0xFFFFE1C1, 0x00000685, 0x000019C7, 0xFFFFF7E2, 0x00000301, 0x000019C7, 0xFFFFF7E2, 0x00000301 },
+ { 0x0213F0FFEF5E2064, 0x00003908, 0xFFFFE5C7, 0x000005B3, 0x00002793, 0xFFFFEF46, 0x00000465, 0x00002793, 0xFFFFEF46, 0x00000465 },
+ { 0x0213F0FFEF605104, 0x000040A3, 0xFFFFDE61, 0x00000725, 0x00002077, 0xFFFFF2CE, 0x000003E8, 0x00002077, 0xFFFFF2CE, 0x000003E8 },
+ { 0x0213F0FFEF664144, 0x00003DCA, 0xFFFFE34D, 0x00000608, 0x00002D66, 0xFFFFEBDF, 0x000004E8, 0x00002D66, 0xFFFFEBDF, 0x000004E8 },
+ { 0x0213F0FFEF5E50C4, 0x00003085, 0xFFFFEB70, 0x000004C8, 0x000029B1, 0xFFFFEDD9, 0x000004A5, 0x000029B1, 0xFFFFEDD9, 0x000004A5 },
+ { 0x0213EA94DE083884, 0x00004C73, 0xFFFFD676, 0x0000086C, 0x0000280A, 0xFFFFED89, 0x000004C2, 0x0000280A, 0xFFFFED89, 0x000004C2 },
+ { 0x0213EA94DE242164, 0x00002CE5, 0xFFFFEE8C, 0x00000466, 0x00001755, 0xFFFFFAC2, 0x000002AC, 0x00001755, 0xFFFFFAC2, 0x000002AC },
+ { 0x0213F0FFEF621124, 0x0000489F, 0xFFFFDBF1, 0x0000073E, 0x0000332D, 0xFFFFE786, 0x000005AD, 0x0000332D, 0xFFFFE786, 0x000005AD },
+ { 0x0213F0FFEF602864, 0x00003D09, 0xFFFFE193, 0x00000689, 0x00001E82, 0xFFFFF4C0, 0x00000386, 0x00001E82, 0xFFFFF4C0, 0x00000386 },
+ { 0x0213F0FFEF644104, 0x00003E4C, 0xFFFFE131, 0x00000689, 0x00002F4E, 0xFFFFE925, 0x0000057B, 0x00002F4E, 0xFFFFE925, 0x0000057B },
+ { 0x0213F0FFEF5A4084, 0x00003B31, 0xFFFFE53F, 0x000005B3, 0x0000248A, 0xFFFFF211, 0x000003DF, 0x0000248A, 0xFFFFF211, 0x000003DF },
+ { 0x0213F0FFEF644124, 0x000038DD, 0xFFFFE54A, 0x000005C9, 0x00002B6D, 0xFFFFEBDF, 0x00000502, 0x00002B6D, 0xFFFFEBDF, 0x00000502 },
+ { 0x0213F0FFEF684064, 0x00002698, 0xFFFFF1A8, 0x000003F2, 0x00002163, 0xFFFFF34B, 0x000003E3, 0x00002163, 0xFFFFF34B, 0x000003E3 },
+ { 0x0213EA94DE201064, 0x000023A8, 0xFFFFF4CD, 0x00000386, 0x00001944, 0xFFFFF983, 0x00000300, 0x00001944, 0xFFFFF983, 0x00000300 },
+ { 0x0213F0FFEF6418C4, 0x00003EAF, 0xFFFFE0C3, 0x000006A0, 0x000030AB, 0xFFFFE829, 0x000005A6, 0x000030AB, 0xFFFFE829, 0x000005A6 },
+ { 0x0213F0FFEF684944, 0x00002E89, 0xFFFFECA6, 0x000004B6, 0x00001FA0, 0xFFFFF4A8, 0x000003A3, 0x00001FA0, 0xFFFFF4A8, 0x000003A3 },
+ { 0x0213F0FFEF6828A4, 0x000028A4, 0xFFFFF112, 0x00000402, 0x00001F7C, 0xFFFFF545, 0x0000038A, 0x00001F7C, 0xFFFFF545, 0x0000038A },
+ { 0x0213F0FFEF6650A4, 0x00004135, 0xFFFFDFA2, 0x000006C5, 0x0000324C, 0xFFFFE7AA, 0x000005AF, 0x0000324C, 0xFFFFE7AA, 0x000005AF },
+ { 0x0213EA94DE2038C4, 0x00002012, 0xFFFFF693, 0x00000352, 0x0000171F, 0xFFFFFABB, 0x000002D9, 0x0000171F, 0xFFFFFABB, 0x000002D9 },
+ { 0x0213F0FFEF643084, 0x00003D7C, 0xFFFFE1BC, 0x00000671, 0x00002A45, 0xFFFFEC84, 0x000004EC, 0x00002A45, 0xFFFFEC84, 0x000004EC },
+ { 0x0213F0FFEF723064, 0x00004172, 0xFFFFDF58, 0x000006DA, 0x00002504, 0xFFFFF0A6, 0x00000431, 0x00002504, 0xFFFFF0A6, 0x00000431 },
+ { 0x0213F0FE99281944, 0x000029C7, 0xFFFFF087, 0x00000414, 0x00001DCB, 0xFFFFF675, 0x0000035F, 0x00001DCB, 0xFFFFF675, 0x0000035F },
+ { 0x0213F0FE992A29A4, 0x000027F0, 0xFFFFF05A, 0x00000432, 0x00001707, 0xFFFFFA0E, 0x000002D1, 0x00001707, 0xFFFFFA0E, 0x000002D1 },
+ { 0x0213F0FE99222144, 0x00003279, 0xFFFFE9F7, 0x00000511, 0x00001B5E, 0xFFFFF787, 0x00000317, 0x00001B5E, 0xFFFFF787, 0x00000317 },
+ { 0x0213F0FE99322184, 0x000030A5, 0xFFFFEABC, 0x000004FF, 0x000019D1, 0xFFFFF83C, 0x00000304, 0x000019D1, 0xFFFFF83C, 0x00000304 },
+ { 0x0213F0FE99282844, 0x0000283B, 0xFFFFF122, 0x00000402, 0x000019C2, 0xFFFFF8E9, 0x000002FB, 0x000019C2, 0xFFFFF8E9, 0x000002FB },
+ { 0x0213F0FE992C2084, 0x00003376, 0xFFFFE9E1, 0x00000510, 0x000021A7, 0xFFFFF39F, 0x000003BF, 0x000021A7, 0xFFFFF39F, 0x000003BF },
+ { 0x0213F0FE993218C4, 0x000031D2, 0xFFFFEA9C, 0x000004FC, 0x00001F66, 0xFFFFF4E4, 0x00000390, 0x00001F66, 0xFFFFF4E4, 0x00000390 },
+ { 0x0213F0FE991A3864, 0x00003006, 0xFFFFEB18, 0x000004F2, 0x000019B3, 0xFFFFF84E, 0x00000301, 0x000019B3, 0xFFFFF84E, 0x00000301 },
+ { 0x0213F0FE993039A4, 0x0000364F, 0xFFFFE81F, 0x00000556, 0x00002AC9, 0xFFFFED87, 0x000004BD, 0x00002AC9, 0xFFFFED87, 0x000004BD },
+ { 0x0213F0FE992E3844, 0x00003043, 0xFFFFEBAE, 0x000004CD, 0x00001B0C, 0xFFFFF7ED, 0x0000030C, 0x00001B0C, 0xFFFFF7ED, 0x0000030C },
+ { 0x0213F0FE993048A4, 0x000037CE, 0xFFFFE69E, 0x00000596, 0x0000276B, 0xFFFFEF65, 0x0000046E, 0x0000276B, 0xFFFFEF65, 0x0000046E },
+ { 0x0213F0FE992C3104, 0x00003063, 0xFFFFED5E, 0x0000046F, 0x000024AE, 0xFFFFF2C4, 0x000003D8, 0x000024AE, 0xFFFFF2C4, 0x000003D8 },
+ { 0x0213F0FE992E08A4, 0x00002F5D, 0xFFFFEBDC, 0x000004D3, 0x00001EDB, 0xFFFFF50F, 0x0000038E, 0x00001EDB, 0xFFFFF50F, 0x0000038E },
+ { 0x0213F0FE992E48A4, 0x00003148, 0xFFFFEA9A, 0x000004FB, 0x0000192D, 0xFFFFF8E9, 0x000002DF, 0x0000192D, 0xFFFFF8E9, 0x000002DF },
+ { 0x0213F0FE992C2064, 0x00003682, 0xFFFFE7E4, 0x0000055C, 0x0000250E, 0xFFFFF150, 0x0000041A, 0x0000250E, 0xFFFFF150, 0x0000041A },
+ { 0x0213F0FE992A2084, 0x0000284E, 0xFFFFF15A, 0x000003F8, 0x00001CE2, 0xFFFFF6F9, 0x0000034F, 0x00001CE2, 0xFFFFF6F9, 0x0000034F },
+ { 0x0213F0FE993018A4, 0x00003171, 0xFFFFEAE9, 0x000004ED, 0x00001F40, 0xFFFFF513, 0x00000384, 0x00001F40, 0xFFFFF513, 0x00000384 },
+ { 0x0213F0FE99323044, 0x000031BD, 0xFFFFEA64, 0x0000050A, 0x00001EFD, 0xFFFFF4F7, 0x00000390, 0x00001EFD, 0xFFFFF4F7, 0x00000390 },
+ { 0x0213F0FE992E50E4, 0x00003050, 0xFFFFEB29, 0x000004EA, 0x000019B3, 0xFFFFF878, 0x000002F9, 0x000019B3, 0xFFFFF878, 0x000002F9 },
+ { 0x0213F0FE992C1904, 0x00003400, 0xFFFFE9A0, 0x0000051A, 0x00002460, 0xFFFFF1DA, 0x00000409, 0x00002460, 0xFFFFF1DA, 0x00000409 },
+ { 0x0213F0FE992C4884, 0x000034A1, 0xFFFFE86F, 0x00000558, 0x0000255D, 0xFFFFF09E, 0x00000443, 0x0000255D, 0xFFFFF09E, 0x00000443 },
+ { 0x0213F0FE992E48E4, 0x00003103, 0xFFFFEAD7, 0x000004F0, 0x00001896, 0xFFFFF95A, 0x000002CC, 0x00001896, 0xFFFFF95A, 0x000002CC },
+ { 0x0213F0FE993018E4, 0x00003120, 0xFFFFEB9E, 0x000004CB, 0x000021E8, 0xFFFFF3A2, 0x000003C1, 0x000021E8, 0xFFFFF3A2, 0x000003C1 },
+ { 0x0213F0FE991C50E4, 0x00003558, 0xFFFFE812, 0x00000565, 0x0000256E, 0xFFFFF097, 0x00000447, 0x0000256E, 0xFFFFF097, 0x00000447 },
+ { 0x0213F0FE991A2844, 0x00002DA8, 0xFFFFECA8, 0x000004B7, 0x0000180B, 0xFFFFF96D, 0x000002D8, 0x0000180B, 0xFFFFF96D, 0x000002D8 },
+ { 0x0213F0FE992E3064, 0x00003232, 0xFFFFEA66, 0x000004FF, 0x00001DDE, 0xFFFFF5FE, 0x0000035A, 0x00001DDE, 0xFFFFF5FE, 0x0000035A },
+ { 0x0213F0FE993050E4, 0x000034D2, 0xFFFFE89F, 0x00000548, 0x0000246C, 0xFFFFF17F, 0x00000418, 0x0000246C, 0xFFFFF17F, 0x00000418 },
+ { 0x0213F0FE99304904, 0x000033EC, 0xFFFFE954, 0x0000052A, 0x00002323, 0xFFFFF279, 0x000003EE, 0x00002323, 0xFFFFF279, 0x000003EE },
+ { 0x0213F0FE99303884, 0x000033AA, 0xFFFFE955, 0x0000052D, 0x0000229F, 0xFFFFF2B2, 0x000003E7, 0x0000229F, 0xFFFFF2B2, 0x000003E7 },
+ { 0x0213F0FE99324964, 0x00003258, 0xFFFFE9AA, 0x0000052A, 0x00001D5F, 0xFFFFF5D1, 0x0000036B, 0x00001D5F, 0xFFFFF5D1, 0x0000036B },
+ { 0x0213F0FE993029A4, 0x0000323A, 0xFFFFEA5F, 0x00000504, 0x00002108, 0xFFFFF3D5, 0x000003BA, 0x00002108, 0xFFFFF3D5, 0x000003BA },
+ { 0x0213F0FE992C2184, 0x00003216, 0xFFFFEA6B, 0x000004FF, 0x00001D6E, 0xFFFFF640, 0x00000350, 0x00001D6E, 0xFFFFF640, 0x00000350 },
+ { 0x0213F0FE993210E4, 0x000030C5, 0xFFFFEAC4, 0x000004FC, 0x00001924, 0xFFFFF8C2, 0x000002EE, 0x00001924, 0xFFFFF8C2, 0x000002EE },
+ { 0x0213F0FE99305104, 0x000032BB, 0xFFFFE9F1, 0x00000515, 0x00002211, 0xFFFFF31B, 0x000003D5, 0x00002211, 0xFFFFF31B, 0x000003D5 },
+ { 0x0213F0FE993048C4, 0x0000352C, 0xFFFFE85B, 0x00000553, 0x00002410, 0xFFFFF1B4, 0x0000040F, 0x00002410, 0xFFFFF1B4, 0x0000040F },
+ { 0x0213F0FE992238C4, 0x000036A0, 0xFFFFE7E8, 0x0000055D, 0x00002901, 0xFFFFEEB8, 0x00000489, 0x00002901, 0xFFFFEEB8, 0x00000489 },
+ { 0x0213F0FE992C3044, 0x00003340, 0xFFFFE9D9, 0x00000516, 0x00002332, 0xFFFFF27A, 0x000003F0, 0x00002332, 0xFFFFF27A, 0x000003F0 },
+ { 0x0213F0FE991A38A4, 0x00003564, 0xFFFFE86D, 0x0000054E, 0x00002613, 0xFFFFF07C, 0x00000444, 0x00002613, 0xFFFFF07C, 0x00000444 },
+ { 0x0213F0FE99280904, 0x00002AD1, 0xFFFFEF0B, 0x0000045C, 0x00001DEA, 0xFFFFF5C8, 0x00000381, 0x00001DEA, 0xFFFFF5C8, 0x00000381 },
+ { 0x0213F0FE992220E4, 0x000035B0, 0xFFFFE846, 0x00000555, 0x000027BE, 0xFFFFEF5D, 0x00000474, 0x000027BE, 0xFFFFEF5D, 0x00000474 },
+ { 0x0213F0FE992238A4, 0x000032C4, 0xFFFFEA48, 0x00000502, 0x000022C6, 0xFFFFF2DF, 0x000003DE, 0x000022C6, 0xFFFFF2DF, 0x000003DE },
+ { 0x0213F0FE993008C4, 0x00003036, 0xFFFFEB0D, 0x000004F9, 0x00001FE8, 0xFFFFF41A, 0x000003BC, 0x00001FE8, 0xFFFFF41A, 0x000003BC },
+ { 0x0213F0FE991A0904, 0x000030F8, 0xFFFFEA13, 0x00000524, 0x00001B6A, 0xFFFFF6C9, 0x0000034A, 0x00001B6A, 0xFFFFF6C9, 0x0000034A },
+ { 0x0213F0FE993010A4, 0x00002EE2, 0xFFFFEC0C, 0x000004CB, 0x00001A39, 0xFFFFF814, 0x0000030F, 0x00001A39, 0xFFFFF814, 0x0000030F },
+ { 0x0213F0FE991C3184, 0x00003457, 0xFFFFE924, 0x0000052A, 0x00001E9D, 0xFFFFF59C, 0x00000363, 0x00001E9D, 0xFFFFF59C, 0x00000363 },
+ { 0x0213F0FE99322844, 0x000030BF, 0xFFFFEB18, 0x000004ED, 0x00001D37, 0xFFFFF636, 0x0000035C, 0x00001D37, 0xFFFFF636, 0x0000035C },
+ { 0x0213F0FE992E4084, 0x000031AF, 0xFFFFEA75, 0x000004FE, 0x000019F2, 0xFFFFF87A, 0x000002F0, 0x000019F2, 0xFFFFF87A, 0x000002F0 },
+ { 0x0213F0FE99302884, 0x00003642, 0xFFFFE85B, 0x00000547, 0x00002975, 0xFFFFEE98, 0x0000048B, 0x00002975, 0xFFFFEE98, 0x0000048B },
+ { 0x0213F0FE992E2884, 0x00002E8B, 0xFFFFED1E, 0x0000048E, 0x000019C1, 0xFFFFF917, 0x000002D6, 0x000019C1, 0xFFFFF917, 0x000002D6 },
+ { 0x0213F0FE993241A4, 0x000033D9, 0xFFFFE8E1, 0x00000548, 0x0000224B, 0xFFFFF298, 0x000003F4, 0x0000224B, 0xFFFFF298, 0x000003F4 },
+ { 0x0213F0FE992E28C4, 0x000032BC, 0xFFFFEB0F, 0x000004D6, 0x00002488, 0xFFFFF240, 0x000003F2, 0x00002488, 0xFFFFF240, 0x000003F2 },
+ { 0x0213F0FE99304944, 0x000035FD, 0xFFFFE838, 0x00000553, 0x00002762, 0xFFFFEFBC, 0x00000460, 0x00002762, 0xFFFFEFBC, 0x00000460 },
+ { 0x0213F0FE992818A4, 0x0000268B, 0xFFFFF263, 0x000003D1, 0x00001914, 0xFFFFF977, 0x000002E8, 0x00001914, 0xFFFFF977, 0x000002E8 },
+ { 0x0213F0FE992C3184, 0x0000330B, 0xFFFFEA1E, 0x00000505, 0x000020B1, 0xFFFFF44D, 0x0000039E, 0x000020B1, 0xFFFFF44D, 0x0000039E },
+ { 0x0213F0FE99222084, 0x0000326E, 0xFFFFEA26, 0x00000508, 0x00001C17, 0xFFFFF722, 0x00000328, 0x00001C17, 0xFFFFF722, 0x00000328 },
+ { 0x0213F0FE992A31A4, 0x00002A3F, 0xFFFFEEE8, 0x0000046D, 0x00001B2B, 0xFFFFF737, 0x0000034D, 0x00001B2B, 0xFFFFF737, 0x0000034D },
+ { 0x0213F0FE992C4064, 0x00003732, 0xFFFFE765, 0x00000574, 0x00002A6D, 0xFFFFEDA8, 0x000004B7, 0x00002A6D, 0xFFFFEDA8, 0x000004B7 },
+ { 0x0213F0FE99300924, 0x000034D3, 0xFFFFE827, 0x00000569, 0x000027AA, 0xFFFFEEE7, 0x00000492, 0x000027AA, 0xFFFFEEE7, 0x00000492 },
+ { 0x0213F0FE992E40C4, 0x00003306, 0xFFFFEA39, 0x000004FC, 0x00001DCC, 0xFFFFF655, 0x00000344, 0x00001DCC, 0xFFFFF655, 0x00000344 },
+ { 0x0213F0FE99282044, 0x00002A48, 0xFFFFEFCA, 0x00000439, 0x00001DED, 0xFFFFF60D, 0x00000375, 0x00001DED, 0xFFFFF60D, 0x00000375 },
+ { 0x0213F0FE993038C4, 0x000033A3, 0xFFFFEA36, 0x000004F9, 0x0000247C, 0xFFFFF21F, 0x000003F4, 0x0000247C, 0xFFFFF21F, 0x000003F4 },
+ { 0x0213F0FE992C3164, 0x0000311B, 0xFFFFEB76, 0x000004D1, 0x00001EB1, 0xFFFFF5B6, 0x00000366, 0x00001EB1, 0xFFFFF5B6, 0x00000366 },
+ { 0x0213F0FE99324164, 0x00003307, 0xFFFFE97F, 0x0000052A, 0x00001E76, 0xFFFFF54D, 0x0000037C, 0x00001E76, 0xFFFFF54D, 0x0000037C },
+ { 0x0213F0FE991C2144, 0x0000344B, 0xFFFFE9C5, 0x00000509, 0x000020D6, 0xFFFFF486, 0x0000038F, 0x000020D6, 0xFFFFF486, 0x0000038F },
+ { 0x0213F0FE992C3144, 0x000034B9, 0xFFFFEA0B, 0x000004F7, 0x000027B3, 0xFFFFF057, 0x0000043A, 0x000027B3, 0xFFFFF057, 0x0000043A },
+ { 0x0213F0FE99301964, 0x00003360, 0xFFFFE984, 0x00000527, 0x00002238, 0xFFFFF2EE, 0x000003E0, 0x00002238, 0xFFFFF2EE, 0x000003E0 },
+ { 0x0213F0FE99302124, 0x0000315C, 0xFFFFEC05, 0x000004B1, 0x000023C8, 0xFFFFF2CC, 0x000003DE, 0x000023C8, 0xFFFFF2CC, 0x000003DE },
+ { 0x0213F0FE992C2864, 0x0000389B, 0xFFFFE6D5, 0x00000582, 0x00002C6C, 0xFFFFEC92, 0x000004DE, 0x00002C6C, 0xFFFFEC92, 0x000004DE },
+ { 0x0213F0FE992E1124, 0x00003058, 0xFFFFEB30, 0x000004E6, 0x000019B5, 0xFFFFF88B, 0x000002F1, 0x000019B5, 0xFFFFF88B, 0x000002F1 },
+ { 0x0213F0FE992E0904, 0x00002F69, 0xFFFFEB4A, 0x000004F1, 0x00001B82, 0xFFFFF6EC, 0x00000341, 0x00001B82, 0xFFFFF6EC, 0x00000341 },
+ { 0x0213F0FE991A18E4, 0x000031EB, 0xFFFFEA64, 0x00000508, 0x00002059, 0xFFFFF427, 0x000003B0, 0x00002059, 0xFFFFF427, 0x000003B0 },
+ { 0x0213F0FE99224124, 0x000033E2, 0xFFFFE94D, 0x0000052A, 0x000020BF, 0xFFFFF40B, 0x000003AB, 0x000020BF, 0xFFFFF40B, 0x000003AB },
+ { 0x0213F0FE99283184, 0x00002AF9, 0xFFFFEFE9, 0x00000427, 0x00001F72, 0xFFFFF57A, 0x00000383, 0x00001F72, 0xFFFFF57A, 0x00000383 },
+ { 0x0213F0FE992C2824, 0x00003282, 0xFFFFEA88, 0x000004FA, 0x00002561, 0xFFFFF126, 0x0000042B, 0x00002561, 0xFFFFF126, 0x0000042B },
+ { 0x0213F0FE993010E4, 0x0000308A, 0xFFFFEB5D, 0x000004E0, 0x00001E83, 0xFFFFF577, 0x00000378, 0x00001E83, 0xFFFFF577, 0x00000378 },
+ { 0x0213F0FE99324884, 0x0000336E, 0xFFFFE8C8, 0x00000553, 0x0000217C, 0xFFFFF2E1, 0x000003EB, 0x0000217C, 0xFFFFF2E1, 0x000003EB },
+ { 0x0213F0FE991A2164, 0x000034A9, 0xFFFFE838, 0x00000561, 0x000020CE, 0xFFFFF38A, 0x000003C7, 0x000020CE, 0xFFFFF38A, 0x000003C7 },
+ { 0x0213F0FE99222184, 0x00003152, 0xFFFFE9EB, 0x00000522, 0x00001755, 0xFFFFF9A9, 0x000002C6, 0x00001755, 0xFFFFF9A9, 0x000002C6 },
+ { 0x0213F0FE99281884, 0x0000286E, 0xFFFFF136, 0x000003FD, 0x00001BAB, 0xFFFFF7C3, 0x0000032C, 0x00001BAB, 0xFFFFF7C3, 0x0000032C },
+ { 0x0213F0FE99300944, 0x0000316B, 0xFFFFEA02, 0x00000528, 0x00002247, 0xFFFFF24E, 0x00000408, 0x00002247, 0xFFFFF24E, 0x00000408 },
+ { 0x0213F0FE992C08E4, 0x000034CF, 0xFFFFE83D, 0x00000562, 0x00002458, 0xFFFFF130, 0x00000430, 0x00002458, 0xFFFFF130, 0x00000430 },
+ { 0x0213F0FE992C2984, 0x00003352, 0xFFFFE9D1, 0x00000515, 0x0000212A, 0xFFFFF3DC, 0x000003B4, 0x0000212A, 0xFFFFF3DC, 0x000003B4 },
+ { 0x0213F0FE992840A4, 0x00002946, 0xFFFFF09B, 0x00000415, 0x00001DC9, 0xFFFFF650, 0x00000366, 0x00001DC9, 0xFFFFF650, 0x00000366 },
+ { 0x0213F0FE99301124, 0x00003080, 0xFFFFEB47, 0x000004E1, 0x00001BD5, 0xFFFFF73B, 0x00000329, 0x00001BD5, 0xFFFFF73B, 0x00000329 },
+ { 0x0213F0FE991A1884, 0x00002FBD, 0xFFFFEB7B, 0x000004DD, 0x000017FC, 0xFFFFF99E, 0x000002C7, 0x000017FC, 0xFFFFF99E, 0x000002C7 },
+ { 0x0213F0FE99281124, 0x00002A28, 0xFFFFF032, 0x0000041F, 0x00001B19, 0xFFFFF83A, 0x00000312, 0x00001B19, 0xFFFFF83A, 0x00000312 },
+ { 0x0213F0FE992240C4, 0x00003420, 0xFFFFE936, 0x00000530, 0x000023C2, 0xFFFFF203, 0x00000406, 0x000023C2, 0xFFFFF203, 0x00000406 },
+ { 0x0213F0FE99301144, 0x00002F7C, 0xFFFFEBBA, 0x000004D1, 0x0000185D, 0xFFFFF975, 0x000002CA, 0x0000185D, 0xFFFFF975, 0x000002CA },
+ { 0x0213F0FE992E2044, 0x00002C51, 0xFFFFEE3B, 0x0000046F, 0x000019AA, 0xFFFFF8DD, 0x000002ED, 0x000019AA, 0xFFFFF8DD, 0x000002ED },
+ { 0x0213F0FE991A4144, 0x000033D6, 0xFFFFE8F2, 0x0000053D, 0x00001D73, 0xFFFFF5FB, 0x0000035B, 0x00001D73, 0xFFFFF5FB, 0x0000035B },
+ { 0x0213F0FE99323084, 0x000031D9, 0xFFFFEAF7, 0x000004E4, 0x00001EBD, 0xFFFFF5A6, 0x00000368, 0x00001EBD, 0xFFFFF5A6, 0x00000368 },
+ { 0x0213F0FE991A20A4, 0x00003386, 0xFFFFE9CE, 0x00000515, 0x00002422, 0xFFFFF1F3, 0x00000405, 0x00002422, 0xFFFFF1F3, 0x00000405 },
+ { 0x0213F0FE992C50E4, 0x000032FB, 0xFFFFE9BC, 0x00000520, 0x00002301, 0xFFFFF267, 0x000003F7, 0x00002301, 0xFFFFF267, 0x000003F7 },
+ { 0x0213F0FE99322924, 0x000032C2, 0xFFFFEAC0, 0x000004EA, 0x0000250F, 0xFFFFF1A2, 0x00000413, 0x0000250F, 0xFFFFF1A2, 0x00000413 },
+ { 0x0213F0FE991C2944, 0x00003722, 0xFFFFE8A6, 0x00000527, 0x000026E4, 0xFFFFF0F5, 0x0000041C, 0x000026E4, 0xFFFFF0F5, 0x0000041C },
+ { 0x0213F0FE992C48C4, 0x000035A4, 0xFFFFE822, 0x00000558, 0x000022F2, 0xFFFFF288, 0x000003E8, 0x000022F2, 0xFFFFF288, 0x000003E8 },
+ { 0x0213F0FE99280924, 0x00002CD1, 0xFFFFEDC6, 0x0000048C, 0x00001EAF, 0xFFFFF53D, 0x00000396, 0x00001EAF, 0xFFFFF53D, 0x00000396 },
+ { 0x0213F0FE99301164, 0x00003156, 0xFFFFEA60, 0x0000050B, 0x00001BBC, 0xFFFFF704, 0x00000335, 0x00001BBC, 0xFFFFF704, 0x00000335 },
+ { 0x0213F0FE992C5104, 0x000034A1, 0xFFFFE8C0, 0x00000544, 0x00002528, 0xFFFFF105, 0x0000042C, 0x00002528, 0xFFFFF105, 0x0000042C },
+ { 0x0213F0FE99323064, 0x000032CE, 0xFFFFE9D3, 0x00000520, 0x000021FF, 0xFFFFF2FD, 0x000003E4, 0x000021FF, 0xFFFFF2FD, 0x000003E4 },
+ { 0x0213F0FE991A50A4, 0x000034A0, 0xFFFFE823, 0x0000056D, 0x0000256F, 0xFFFFF047, 0x0000045A, 0x0000256F, 0xFFFFF047, 0x0000045A },
+ { 0x0213F0FE99303944, 0x00003109, 0xFFFFEBD6, 0x000004BF, 0x000022D4, 0xFFFFF32D, 0x000003D0, 0x000022D4, 0xFFFFF32D, 0x000003D0 },
+ { 0x0213F0FE992C1164, 0x000030B7, 0xFFFFEAF0, 0x000004F3, 0x00001AEC, 0xFFFFF7A9, 0x0000031B, 0x00001AEC, 0xFFFFF7A9, 0x0000031B },
+ { 0x0213F0FE992C39A4, 0x00003078, 0xFFFFEBA4, 0x000004CF, 0x00001E7A, 0xFFFFF5AF, 0x0000036B, 0x00001E7A, 0xFFFFF5AF, 0x0000036B },
+ { 0x0213F0FE99304124, 0x00003442, 0xFFFFE998, 0x00000518, 0x000025EA, 0xFFFFF0F3, 0x0000042B, 0x000025EA, 0xFFFFF0F3, 0x0000042B },
+ { 0x0213F0FE993021A4, 0x000031CB, 0xFFFFEA80, 0x00000501, 0x000020A3, 0xFFFFF403, 0x000003B2, 0x000020A3, 0xFFFFF403, 0x000003B2 },
+ { 0x0213F0FE992A2984, 0x00002947, 0xFFFFF018, 0x00000433, 0x00001BA5, 0xFFFFF75C, 0x00000340, 0x00001BA5, 0xFFFFF75C, 0x00000340 },
+ { 0x0213F0FE992C3984, 0x000033F9, 0xFFFFE99D, 0x00000518, 0x00002231, 0xFFFFF358, 0x000003C5, 0x00002231, 0xFFFFF358, 0x000003C5 },
+ { 0x0213F0FE99321124, 0x00003131, 0xFFFFEA45, 0x00000513, 0x00001973, 0xFFFFF85E, 0x00000301, 0x00001973, 0xFFFFF85E, 0x00000301 },
+ { 0x0213F0FE991C29A4, 0x00003571, 0xFFFFE8AC, 0x00000539, 0x00002049, 0xFFFFF49C, 0x0000038D, 0x00002049, 0xFFFFF49C, 0x0000038D },
+ { 0x0213F0FE992E3864, 0x0000309E, 0xFFFFEB1D, 0x000004E8, 0x000019ED, 0xFFFFF86E, 0x000002F8, 0x000019ED, 0xFFFFF86E, 0x000002F8 },
+ { 0x0213F0FE99302984, 0x00003091, 0xFFFFEB9B, 0x000004CC, 0x00001D2C, 0xFFFFF6A2, 0x0000033D, 0x00001D2C, 0xFFFFF6A2, 0x0000033D },
+ { 0x0213F0FE993008E4, 0x00003069, 0xFFFFEAFD, 0x000004F8, 0x00001E82, 0xFFFFF51C, 0x0000038D, 0x00001E82, 0xFFFFF51C, 0x0000038D },
+ { 0x0213F0FE992210A4, 0x00003459, 0xFFFFE7F2, 0x00000572, 0x00001DA7, 0xFFFFF552, 0x0000037F, 0x00001DA7, 0xFFFFF552, 0x0000037F },
+ { 0x0213F0FE99321104, 0x0000304B, 0xFFFFEAFB, 0x000004F4, 0x0000191E, 0xFFFFF8BD, 0x000002EE, 0x0000191E, 0xFFFFF8BD, 0x000002EE },
+ { 0x0213F0FE993020C4, 0x0000346E, 0xFFFFEA07, 0x000004FD, 0x00002767, 0xFFFFF058, 0x00000440, 0x00002767, 0xFFFFF058, 0x00000440 },
+ { 0x0213F0FE992E3084, 0x000030B5, 0xFFFFEBC1, 0x000004C1, 0x00001B3C, 0xFFFFF818, 0x000002FD, 0x00001B3C, 0xFFFFF818, 0x000002FD },
+ { 0x0213F0FE99300904, 0x0000321F, 0xFFFFE9EA, 0x00000524, 0x00002380, 0xFFFFF1C2, 0x0000041A, 0x00002380, 0xFFFFF1C2, 0x0000041A },
+ { 0x0213F0FE992E3044, 0x000030DF, 0xFFFFEB37, 0x000004E2, 0x00001E3C, 0xFFFFF5BB, 0x00000369, 0x00001E3C, 0xFFFFF5BB, 0x00000369 },
+ { 0x0213F0FE992848A4, 0x000027E0, 0xFFFFF0E2, 0x00000416, 0x00001A6A, 0xFFFFF820, 0x00000321, 0x00001A6A, 0xFFFFF820, 0x00000321 },
+ { 0x0213F0FE991A1084, 0x00002FA1, 0xFFFFEB63, 0x000004E7, 0x0000196B, 0xFFFFF880, 0x000002FB, 0x0000196B, 0xFFFFF880, 0x000002FB },
+ { 0x0213F0FE991C1084, 0x0000310C, 0xFFFFEAAF, 0x000004FC, 0x000019EF, 0xFFFFF850, 0x000002FD, 0x000019EF, 0xFFFFF850, 0x000002FD },
+ { 0x0213F0FE99323904, 0x0000334A, 0xFFFFEA07, 0x0000050B, 0x00002380, 0xFFFFF26F, 0x000003F0, 0x00002380, 0xFFFFF26F, 0x000003F0 },
+ { 0x0213F0FE99302944, 0x00002FF9, 0xFFFFECDC, 0x00000492, 0x00002297, 0xFFFFF394, 0x000003BF, 0x00002297, 0xFFFFF394, 0x000003BF },
+ { 0x0213F0FE992C2164, 0x0000354B, 0xFFFFE894, 0x00000546, 0x000024C4, 0xFFFFF16C, 0x0000041B, 0x000024C4, 0xFFFFF16C, 0x0000041B },
+ { 0x0213F0FE99220924, 0x00003245, 0xFFFFE92F, 0x00000544, 0x00001829, 0xFFFFF8F1, 0x000002EA, 0x00001829, 0xFFFFF8F1, 0x000002EA },
+ { 0x0213F0FE992E4884, 0x0000302F, 0xFFFFEB51, 0x000004E3, 0x0000199F, 0xFFFFF894, 0x000002F4, 0x0000199F, 0xFFFFF894, 0x000002F4 },
+ { 0x0213F0FE992E18C4, 0x00002F54, 0xFFFFEC86, 0x000004A6, 0x00001A6F, 0xFFFFF891, 0x000002EC, 0x00001A6F, 0xFFFFF891, 0x000002EC },
+ { 0x0213F0FE99284164, 0x00002908, 0xFFFFF0D8, 0x0000040A, 0x00001C9B, 0xFFFFF729, 0x00000342, 0x00001C9B, 0xFFFFF729, 0x00000342 },
+ { 0x0213F0FE99302964, 0x000031D9, 0xFFFFEB40, 0x000004D7, 0x000023F5, 0xFFFFF259, 0x000003F4, 0x000023F5, 0xFFFFF259, 0x000003F4 },
+ { 0x0213F0FE993048E4, 0x000034C8, 0xFFFFE8C6, 0x0000053F, 0x00002313, 0xFFFFF280, 0x000003EC, 0x00002313, 0xFFFFF280, 0x000003EC },
+ { 0x0213F0FE993050C4, 0x000037D1, 0xFFFFE6A1, 0x0000059C, 0x00002C6A, 0xFFFFEBFF, 0x00000504, 0x00002C6A, 0xFFFFEBFF, 0x00000504 },
+ { 0x0213F0FE99321964, 0x000030E9, 0xFFFFEA6B, 0x0000050F, 0x00001A2D, 0xFFFFF7DF, 0x00000316, 0x00001A2D, 0xFFFFF7DF, 0x00000316 },
+ { 0x0213F0FE99302084, 0x0000323D, 0xFFFFEA95, 0x000004F4, 0x00001ED2, 0xFFFFF584, 0x0000036C, 0x00001ED2, 0xFFFFF584, 0x0000036C },
+ { 0x0213F0FE992C3024, 0x000033D6, 0xFFFFE9DB, 0x00000510, 0x000027A7, 0xFFFFEFC7, 0x0000045E, 0x000027A7, 0xFFFFEFC7, 0x0000045E },
+ { 0x0213F0FE991C3164, 0x00003444, 0xFFFFE98A, 0x00000517, 0x000020FD, 0xFFFFF43F, 0x0000039D, 0x000020FD, 0xFFFFF43F, 0x0000039D },
+ { 0x0213F0FE992808E4, 0x00002987, 0xFFFFEFA1, 0x0000044B, 0x00001B06, 0xFFFFF788, 0x0000033C, 0x00001B06, 0xFFFFF788, 0x0000033C },
+ { 0x0213F0FE992C28E4, 0x0000311D, 0xFFFFED20, 0x00000474, 0x000025DA, 0xFFFFF223, 0x000003F0, 0x000025DA, 0xFFFFF223, 0x000003F0 },
+ { 0x0213F0FE992C1124, 0x000032A2, 0xFFFFEA0A, 0x0000050D, 0x00001D48, 0xFFFFF659, 0x0000034A, 0x00001D48, 0xFFFFF659, 0x0000034A },
+ { 0x0213F0FE992208E4, 0x00003110, 0xFFFFE9EA, 0x00000529, 0x00001786, 0xFFFFF958, 0x000002DB, 0x00001786, 0xFFFFF958, 0x000002DB },
+ { 0x0213F0FE992821A4, 0x000027F2, 0xFFFFF174, 0x000003F7, 0x00001C7A, 0xFFFFF72A, 0x00000348, 0x00001C7A, 0xFFFFF72A, 0x00000348 },
+ { 0x0213F0FE991C10E4, 0x000031DB, 0xFFFFEA7D, 0x000004FB, 0x000019C4, 0xFFFFF8B1, 0x000002E6, 0x000019C4, 0xFFFFF8B1, 0x000002E6 },
+ { 0x0213F0FE992C1104, 0x00003158, 0xFFFFEAAC, 0x000004FA, 0x00001BC1, 0xFFFFF737, 0x0000032B, 0x00001BC1, 0xFFFFF737, 0x0000032B },
+ { 0x0213F0FE993010C4, 0x00002F36, 0xFFFFEBF9, 0x000004CA, 0x00001A2A, 0xFFFFF83F, 0x00000303, 0x00001A2A, 0xFFFFF83F, 0x00000303 },
+ { 0x0213F0FE993238A4, 0x000032B4, 0xFFFFEA72, 0x000004FA, 0x000021FF, 0xFFFFF378, 0x000003C5, 0x000021FF, 0xFFFFF378, 0x000003C5 },
+ { 0x0213F0FE99303164, 0x00003262, 0xFFFFEAFA, 0x000004DF, 0x00002441, 0xFFFFF237, 0x000003F6, 0x00002441, 0xFFFFF237, 0x000003F6 },
+ { 0x0213F0FE99303924, 0x0000336A, 0xFFFFEAFB, 0x000004D1, 0x00002746, 0xFFFFF0B8, 0x0000042B, 0x00002746, 0xFFFFF0B8, 0x0000042B },
+ { 0x0213F0FE991A4084, 0x000032E5, 0xFFFFE923, 0x00000541, 0x00001DF0, 0xFFFFF552, 0x00000380, 0x00001DF0, 0xFFFFF552, 0x00000380 },
+ { 0x0213F0FE99304064, 0x000035D1, 0xFFFFE80B, 0x0000055F, 0x00002780, 0xFFFFEF74, 0x0000046F, 0x00002780, 0xFFFFEF74, 0x0000046F },
+ { 0x0213F0FE993028A4, 0x000033EC, 0xFFFFEA48, 0x000004F4, 0x0000269F, 0xFFFFF0D8, 0x0000042A, 0x0000269F, 0xFFFFF0D8, 0x0000042A },
+ { 0x0213F0FE99323884, 0x000030C4, 0xFFFFEB39, 0x000004E2, 0x00001B44, 0xFFFFF7AA, 0x00000318, 0x00001B44, 0xFFFFF7AA, 0x00000318 },
+ { 0x0213F0FE99281144, 0x00002926, 0xFFFFF0AF, 0x0000040E, 0x0000194E, 0xFFFFF959, 0x000002E2, 0x0000194E, 0xFFFFF959, 0x000002E2 },
+ { 0x0213F0FE992C10C4, 0x00003141, 0xFFFFEAAF, 0x000004F6, 0x00001864, 0xFFFFF97C, 0x000002C6, 0x00001864, 0xFFFFF97C, 0x000002C6 },
+ { 0x0213F0FE99301064, 0x000030B2, 0xFFFFEB7C, 0x000004DB, 0x000022CE, 0xFFFFF2B5, 0x000003F0, 0x000022CE, 0xFFFFF2B5, 0x000003F0 },
+ { 0x0213F0FE99301944, 0x0000318C, 0xFFFFEAC7, 0x000004F6, 0x00002113, 0xFFFFF3CA, 0x000003BD, 0x00002113, 0xFFFFF3CA, 0x000003BD },
+ { 0x0213F0FE992E1104, 0x00002FD2, 0xFFFFEB8F, 0x000004D9, 0x00001996, 0xFFFFF89F, 0x000002F1, 0x00001996, 0xFFFFF89F, 0x000002F1 },
+ { 0x0213F0FE991A28A4, 0x0000310D, 0xFFFFEB25, 0x000004E7, 0x00001F67, 0xFFFFF4EF, 0x0000038E, 0x00001F67, 0xFFFFF4EF, 0x0000038E },
+ { 0x0213F0FE992A4964, 0x00002BBC, 0xFFFFEE68, 0x00000477, 0x00002050, 0xFFFFF41D, 0x000003C8, 0x00002050, 0xFFFFF41D, 0x000003C8 },
+ { 0x0213F0FE99302104, 0x00003096, 0xFFFFECED, 0x00000486, 0x000024C9, 0xFFFFF278, 0x000003E7, 0x000024C9, 0xFFFFF278, 0x000003E7 },
+ { 0x0213F0FE992C10A4, 0x00003401, 0xFFFFE8F1, 0x0000053C, 0x00001E75, 0xFFFFF55C, 0x00000376, 0x00001E75, 0xFFFFF55C, 0x00000376 },
+ { 0x0213F0FE99302844, 0x0000319E, 0xFFFFEAB1, 0x000004F8, 0x00001EA3, 0xFFFFF567, 0x00000378, 0x00001EA3, 0xFFFFF567, 0x00000378 },
+ { 0x0213F0FE99322964, 0x000030FD, 0xFFFFEB4C, 0x000004DB, 0x00001CA6, 0xFFFFF6E8, 0x00000335, 0x00001CA6, 0xFFFFF6E8, 0x00000335 },
+ { 0x0213F0FE992E40A4, 0x000030D6, 0xFFFFEB1A, 0x000004E4, 0x00001A0D, 0xFFFFF87D, 0x000002EF, 0x00001A0D, 0xFFFFF87D, 0x000002EF },
+ { 0x0213F0FE992C2124, 0x0000324B, 0xFFFFEB17, 0x000004D9, 0x00002225, 0xFFFFF3A8, 0x000003BA, 0x00002225, 0xFFFFF3A8, 0x000003BA },
+ { 0x0213F0FE99284084, 0x00002A00, 0xFFFFF02E, 0x00000424, 0x00001E21, 0xFFFFF61D, 0x0000036C, 0x00001E21, 0xFFFFF61D, 0x0000036C },
+ { 0x0213F0FE992A48A4, 0x000029CF, 0xFFFFEF53, 0x00000457, 0x00001B11, 0xFFFFF772, 0x0000033D, 0x00001B11, 0xFFFFF772, 0x0000033D },
+ { 0x0213F0FE991A30A4, 0x000032A1, 0xFFFFEA63, 0x000004FB, 0x00001F83, 0xFFFFF516, 0x0000037E, 0x00001F83, 0xFFFFF516, 0x0000037E },
+ { 0x0213F0FE992E20C4, 0x0000305C, 0xFFFFEC14, 0x000004B5, 0x00001D0B, 0xFFFFF6ED, 0x00000332, 0x00001D0B, 0xFFFFF6ED, 0x00000332 },
+ { 0x0213F0FE992C1064, 0x00003467, 0xFFFFE8D5, 0x00000543, 0x0000243F, 0xFFFFF190, 0x00000418, 0x0000243F, 0xFFFFF190, 0x00000418 },
+ { 0x0213F0FE992A2064, 0x00002796, 0xFFFFF133, 0x00000409, 0x00001903, 0xFFFFF91C, 0x000002FC, 0x00001903, 0xFFFFF91C, 0x000002FC },
+ { 0x0213F0FE99302164, 0x000031F6, 0xFFFFEAB7, 0x000004F5, 0x000022B9, 0xFFFFF2D0, 0x000003E6, 0x000022B9, 0xFFFFF2D0, 0x000003E6 },
+ { 0x0213F0FE992E5104, 0x00003196, 0xFFFFEA76, 0x00000503, 0x00001CC5, 0xFFFFF67D, 0x0000034A, 0x00001CC5, 0xFFFFF67D, 0x0000034A },
+ { 0x0213F0FE99321144, 0x00002F9E, 0xFFFFEAD9, 0x00000505, 0x000017C1, 0xFFFFF93D, 0x000002DF, 0x000017C1, 0xFFFFF93D, 0x000002DF },
+ { 0x0213F0FE992E2124, 0x00002FBC, 0xFFFFEC75, 0x000004A8, 0x00001D6D, 0xFFFFF6AC, 0x0000033D, 0x00001D6D, 0xFFFFF6AC, 0x0000033D },
+ { 0x0213F0FE992C38A4, 0x00003541, 0xFFFFE921, 0x00000524, 0x00002662, 0xFFFFF0CB, 0x0000042B, 0x00002662, 0xFFFFF0CB, 0x0000042B },
+ { 0x0213F0FE992A21A4, 0x00002953, 0xFFFFEF76, 0x00000459, 0x00001C05, 0xFFFFF6A0, 0x00000368, 0x00001C05, 0xFFFFF6A0, 0x00000368 },
+ { 0x0213F0FE992C4924, 0x000034BC, 0xFFFFE8DD, 0x00000536, 0x0000210E, 0xFFFFF3F4, 0x000003A8, 0x0000210E, 0xFFFFF3F4, 0x000003A8 },
+ { 0x0213F0FE992C29A4, 0x000034BE, 0xFFFFE916, 0x0000052F, 0x000024A1, 0xFFFFF1A6, 0x00000410, 0x000024A1, 0xFFFFF1A6, 0x00000410 },
+ { 0x0213F0FE99304964, 0x000037B5, 0xFFFFE7A9, 0x0000055B, 0x000028A1, 0xFFFFEF51, 0x00000467, 0x000028A1, 0xFFFFEF51, 0x00000467 },
+ { 0x0213F0FE99301104, 0x00002FC5, 0xFFFFEBBE, 0x000004D1, 0x00001BA5, 0xFFFFF757, 0x00000328, 0x00001BA5, 0xFFFFF757, 0x00000328 },
+ { 0x0213F0FE993040A4, 0x000033CB, 0xFFFFE944, 0x0000052B, 0x00001FBE, 0xFFFFF4B1, 0x0000038C, 0x00001FBE, 0xFFFFF4B1, 0x0000038C },
+ { 0x0213F0FE99301844, 0x000030AE, 0xFFFFEBA0, 0x000004D3, 0x00002268, 0xFFFFF316, 0x000003DD, 0x00002268, 0xFFFFF316, 0x000003DD },
+ { 0x0213F0FE992C20A4, 0x00002F90, 0xFFFFEC5A, 0x000004B0, 0x00001C3A, 0xFFFFF752, 0x00000323, 0x00001C3A, 0xFFFFF752, 0x00000323 },
+ { 0x0213F0FE992E38E4, 0x00003113, 0xFFFFEB91, 0x000004C8, 0x00001E3C, 0xFFFFF623, 0x0000034E, 0x00001E3C, 0xFFFFF623, 0x0000034E },
+ { 0x0213F0FE99323984, 0x0000330B, 0xFFFFE94B, 0x00000539, 0x000020E7, 0xFFFFF37E, 0x000003CD, 0x000020E7, 0xFFFFF37E, 0x000003CD },
+ { 0x0213F0FE992E2864, 0x000031D1, 0xFFFFEACB, 0x000004ED, 0x00001E82, 0xFFFFF5B2, 0x00000365, 0x00001E82, 0xFFFFF5B2, 0x00000365 },
+ { 0x0213F0FE992A3984, 0x00002CD5, 0xFFFFEDC1, 0x0000048D, 0x000020F8, 0xFFFFF3C1, 0x000003D1, 0x000020F8, 0xFFFFF3C1, 0x000003D1 },
+ { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }
};
int pp_override_get_default_fuse_value(uint64_t key,
- struct phm_fuses_default list[],
struct phm_fuses_default *result)
{
+ const struct phm_fuses_default *list = vega10_fuses_default;
uint32_t i;
- uint64_t temp_serial_numer;
- uint32_t bit;
- const char *temp;
- for (i = 0; list[i].key != NULL; i++) {
- temp = list[i].key;
- temp_serial_numer = 0;
- do {
- bit = *temp=='1'? 1 : 0;
- temp_serial_numer = (temp_serial_numer <<1 ) | bit;
- temp++;
- } while (*temp);
-
- if (key == temp_serial_numer) {
+ for (i = 0; list[i].key != 0; i++) {
+ if (key == list[i].key) {
result->key = list[i].key;
result->VFT2_m1 = list[i].VFT2_m1;
result->VFT2_m2 = list[i].VFT2_m2;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
index 6e8f7a2119c1..c6ba0d64cfb7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.h
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
struct phm_fuses_default {
- const char *key;
+ uint64_t key;
uint32_t VFT2_m1;
uint32_t VFT2_m2;
uint32_t VFT2_b;
@@ -40,9 +40,7 @@ struct phm_fuses_default {
uint32_t VFT0_b;
};
-extern struct phm_fuses_default vega10_fuses_default[];
extern int pp_override_get_default_fuse_value(uint64_t key,
- struct phm_fuses_default list[],
struct phm_fuses_default *result);
#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
new file mode 100644
index 000000000000..ffa44bbb218e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "pp_psm.h"
+
+int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ unsigned int i;
+ unsigned int table_entries;
+ struct pp_power_state *state;
+ int size;
+
+ if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->get_power_state_size == NULL)
+ return -EINVAL;
+
+ hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
+
+ hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
+ sizeof(struct pp_power_state);
+
+ hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
+ if (hwmgr->ps == NULL)
+ return -ENOMEM;
+
+ hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->request_ps == NULL) {
+ kfree(hwmgr->ps);
+ hwmgr->ps = NULL;
+ return -ENOMEM;
+ }
+
+ hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->current_ps == NULL) {
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ return -ENOMEM;
+ }
+
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
+
+ if (state->classification.flags & PP_StateClassificationFlag_Boot) {
+ hwmgr->boot_ps = state;
+ memcpy(hwmgr->current_ps, state, size);
+ memcpy(hwmgr->request_ps, state, size);
+ }
+
+ state->id = i + 1; /* assigned unique num for every power state id */
+
+ if (state->classification.flags & PP_StateClassificationFlag_Uvd)
+ hwmgr->uvd_ps = state;
+ state = (struct pp_power_state *)((unsigned long)state + size);
+ }
+
+ return 0;
+}
+
+int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ kfree(hwmgr->current_ps);
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ hwmgr->current_ps = NULL;
+ return 0;
+}
+
+static int psm_get_ui_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel ui_label,
+ unsigned long *state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->classification.ui_label & ui_label) {
+ *state_id = state->id;
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr,
+ enum PP_StateClassificationFlag flag,
+ unsigned long *state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->classification.flags & flag) {
+ *state_id = state->id;
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->id == state_id) {
+ memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+int psm_set_boot_states(struct pp_hwmgr *hwmgr)
+{
+ unsigned long state_id;
+ int ret = -EINVAL;
+
+ if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
+ &state_id))
+ ret = psm_set_states(hwmgr, state_id);
+
+ return ret;
+}
+
+int psm_set_performance_states(struct pp_hwmgr *hwmgr)
+{
+ unsigned long state_id;
+ int ret = -EINVAL;
+
+ if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
+ &state_id))
+ ret = psm_set_states(hwmgr, state_id);
+
+ return ret;
+}
+
+int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel label_id,
+ struct pp_power_state **state)
+{
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ *state = hwmgr->ps;
+
+restart_search:
+ for (i = 0; i < table_entries; i++) {
+ if ((*state)->classification.ui_label & label_id)
+ return 0;
+ *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size);
+ }
+
+ switch (label_id) {
+ case PP_StateUILabel_Battery:
+ case PP_StateUILabel_Balanced:
+ label_id = PP_StateUILabel_Performance;
+ goto restart_search;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+ struct pp_power_state *new_ps)
+{
+ struct pp_power_state *pcurrent;
+ struct pp_power_state *requested;
+ bool equal;
+
+ if (skip)
+ return 0;
+
+ phm_display_configuration_changed(hwmgr);
+
+ if (new_ps != NULL)
+ requested = new_ps;
+ else
+ requested = hwmgr->request_ps;
+
+ pcurrent = hwmgr->current_ps;
+
+ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+ if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
+ &pcurrent->hardware, &requested->hardware, &equal)))
+ equal = false;
+
+ if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
+ phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
+ memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
+ }
+
+ phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
index fbdff3e02aa3..fa1b6825036a 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,19 +20,21 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-#include "eventmanager.h"
-#include "power_state.h"
-#include "hardwaremanager.h"
-int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id);
+#ifndef PP_PSM_H
+#define PP_PSM_H
-int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id);
+#include "hwmgr.h"
-int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id);
+int psm_init_power_state_table(struct pp_hwmgr *hwmgr);
+int psm_fini_power_state_table(struct pp_hwmgr *hwmgr);
+int psm_set_boot_states(struct pp_hwmgr *hwmgr);
+int psm_set_performance_states(struct pp_hwmgr *hwmgr);
+int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel label_id,
+ struct pp_power_state **state);
+int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr,
+ bool skip,
+ struct pp_power_state *new_ps);
-int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip);
-
-int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip);
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 953e0c9ad7cd..a129bc5b1844 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -470,7 +470,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
* SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
* voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
*/
-bool atomctrl_is_voltage_controled_by_gpio_v3(
+bool atomctrl_is_voltage_controlled_by_gpio_v3(
struct pp_hwmgr *hwmgr,
uint8_t voltage_type,
uint8_t voltage_mode)
@@ -1100,10 +1100,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
}
}
- PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
- "Can't find requested voltage id in vddc_dependency_on_sclk table!",
+ if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
return -EINVAL;
- );
+ }
get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
@@ -1418,3 +1418,83 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
return 0;
}
+
+int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
+{
+ int result;
+ SET_VOLTAGE_PS_ALLOCATION allocation;
+ SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
+ (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
+
+ voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+
+ result = cgs_atom_exec_cmd_table(hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, SetVoltage),
+ voltage_parameters);
+
+ *virtual_voltage_id = voltage_parameters->usVoltageLevel;
+
+ return result;
+}
+
+int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *vddc, uint16_t *vddci,
+ uint16_t virtual_voltage_id,
+ uint16_t efuse_voltage_id)
+{
+ int i, j;
+ int ix;
+ u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+ ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+
+ *vddc = 0;
+ *vddci = 0;
+
+ ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ ix,
+ NULL, NULL, NULL);
+ if (!profile)
+ return -EINVAL;
+
+ if ((profile->asHeader.ucTableFormatRevision >= 2) &&
+ (profile->asHeader.ucTableContentRevision >= 1) &&
+ (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
+ leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
+ vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
+ vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
+ if (profile->ucElbVDDC_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+ if (vddc_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (efuse_voltage_id <= leakage_bin[j]) {
+ *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
+ vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
+ if (profile->ucElbVDDCI_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+ if (vddci_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (efuse_voltage_id <= leakage_bin[j]) {
+ *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index e9fe2e84006b..c44a92064cf1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -291,7 +291,7 @@ extern uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode);
extern int atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
extern int atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
-extern bool atomctrl_is_voltage_controled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode);
+extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode);
extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
@@ -314,5 +314,11 @@ extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_
extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
uint16_t *load_line);
+
+extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *vddc, uint16_t *vddci,
+ uint16_t virtual_voltage_id,
+ uint16_t efuse_voltage_id);
+extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 84f01fd33aff..d1af1483c69b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -173,8 +173,6 @@ static int get_vddc_lookup_table(
if (NULL == table)
return -ENOMEM;
- memset(table, 0x00, table_size);
-
table->count = vddc_lookup_pp_tables->ucNumEntries;
for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
@@ -335,8 +333,6 @@ static int get_valid_clk(
if (NULL == table)
return -ENOMEM;
- memset(table, 0x00, table_size);
-
table->count = (uint32_t)clk_volt_pp_table->count;
for (i = 0; i < table->count; i++) {
@@ -390,8 +386,6 @@ static int get_mclk_voltage_dependency_table(
if (NULL == mclk_table)
return -ENOMEM;
- memset(mclk_table, 0x00, table_size);
-
mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
@@ -439,8 +433,6 @@ static int get_sclk_voltage_dependency_table(
if (NULL == sclk_table)
return -ENOMEM;
- memset(sclk_table, 0x00, table_size);
-
sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
for (i = 0; i < tonga_table->ucNumEntries; i++) {
@@ -473,8 +465,6 @@ static int get_sclk_voltage_dependency_table(
if (NULL == sclk_table)
return -ENOMEM;
- memset(sclk_table, 0x00, table_size);
-
sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
for (i = 0; i < polaris_table->ucNumEntries; i++) {
@@ -525,8 +515,6 @@ static int get_pcie_table(
if (pcie_table == NULL)
return -ENOMEM;
- memset(pcie_table, 0x00, table_size);
-
/*
* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
@@ -567,8 +555,6 @@ static int get_pcie_table(
if (pcie_table == NULL)
return -ENOMEM;
- memset(pcie_table, 0x00, table_size);
-
/*
* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
@@ -615,8 +601,6 @@ static int get_cac_tdp_table(
if (NULL == tdp_table)
return -ENOMEM;
- memset(tdp_table, 0x00, table_size);
-
hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == hwmgr->dyn_state.cac_dtp_table) {
@@ -624,8 +608,6 @@ static int get_cac_tdp_table(
return -ENOMEM;
}
- memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
-
if (table->ucRevId < 3) {
const ATOM_Tonga_PowerTune_Table *tonga_table =
(ATOM_Tonga_PowerTune_Table *)table;
@@ -725,8 +707,6 @@ static int get_mm_clock_voltage_table(
if (NULL == mm_table)
return -ENOMEM;
- memset(mm_table, 0x00, table_size);
-
mm_table->count = mm_dependency_table->ucNumEntries;
for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 2716721e5453..afae32ee2b0d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-
+#include <drm/amdgpu_drm.h>
#include "processpptables.h"
#include <atom-types.h>
#include <atombios.h>
@@ -790,6 +790,39 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
return pstate;
}
+static const unsigned char soft_dummy_pp_table[] = {
+ 0xe1, 0x01, 0x06, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0x4a, 0x00, 0x6c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x42, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x4e, 0x00, 0x88, 0x00, 0x00, 0x9e, 0x00, 0x17, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x18, 0x05, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x8e, 0x01, 0x00, 0x00, 0xb8, 0x01, 0x00, 0x00, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c,
+ 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x70, 0x00, 0x91, 0xf4, 0x00,
+ 0x64, 0x00, 0x40, 0x19, 0x01, 0x5a, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a,
+ 0x00, 0x00, 0x09, 0x30, 0x75, 0x00, 0x30, 0x75, 0x00, 0x40, 0x9c, 0x00, 0x40, 0x9c, 0x00, 0x59,
+ 0xd8, 0x00, 0x59, 0xd8, 0x00, 0x91, 0xf4, 0x00, 0x91, 0xf4, 0x00, 0x0e, 0x28, 0x01, 0x0e, 0x28,
+ 0x01, 0x90, 0x5f, 0x01, 0x90, 0x5f, 0x01, 0x00, 0x77, 0x01, 0x00, 0x77, 0x01, 0xca, 0x91, 0x01,
+ 0xca, 0x91, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01,
+ 0x7c, 0x00, 0x02, 0x70, 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a,
+ 0x00, 0x07, 0x08, 0x08, 0x00, 0x08, 0x00, 0x01, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02, 0x02, 0x03,
+ 0x02, 0x04, 0x02, 0x00, 0x08, 0x40, 0x9c, 0x00, 0x30, 0x75, 0x00, 0x74, 0xb5, 0x00, 0xa0, 0x8c,
+ 0x00, 0x60, 0xea, 0x00, 0x74, 0xb5, 0x00, 0x0e, 0x28, 0x01, 0x60, 0xea, 0x00, 0x90, 0x5f, 0x01,
+ 0x40, 0x19, 0x01, 0xb2, 0xb0, 0x01, 0x90, 0x5f, 0x01, 0xc0, 0xd4, 0x01, 0x00, 0x77, 0x01, 0x5e,
+ 0xff, 0x01, 0xca, 0x91, 0x01, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01, 0x7c, 0x00, 0x02, 0x70,
+ 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a, 0x00, 0x07, 0x00, 0x08,
+ 0x80, 0x00, 0x30, 0x75, 0x00, 0x7e, 0x00, 0x40, 0x9c, 0x00, 0x7c, 0x00, 0x59, 0xd8, 0x00, 0x70,
+ 0x00, 0xdc, 0x0b, 0x01, 0x64, 0x00, 0x80, 0x38, 0x01, 0x5a, 0x00, 0x80, 0x38, 0x01, 0x52, 0x00,
+ 0x80, 0x38, 0x01, 0x4a, 0x00, 0x80, 0x38, 0x01, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c,
+ 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x74, 0x00, 0x91, 0xf4, 0x00,
+ 0x66, 0x00, 0x40, 0x19, 0x01, 0x58, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a,
+ 0x00
+};
static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
struct pp_hwmgr *hwmgr)
@@ -799,12 +832,17 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
uint16_t size;
if (!table_addr) {
- table_addr = cgs_atom_get_data_table(hwmgr->device,
- GetIndexIntoMasterTable(DATA, PowerPlayInfo),
- &size, &frev, &crev);
-
- hwmgr->soft_pp_table = table_addr;
- hwmgr->soft_pp_table_size = size;
+ if (hwmgr->chip_id == CHIP_RAVEN) {
+ table_addr = &soft_dummy_pp_table[0];
+ hwmgr->soft_pp_table = &soft_dummy_pp_table[0];
+ hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table);
+ } else {
+ table_addr = cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+ &size, &frev, &crev);
+ hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table_size = size;
+ }
}
return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
@@ -924,15 +962,14 @@ int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
}
}
- if ((0 == result) &&
- (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot)))
- result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware));
+ if ((0 == result) && (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) {
+ if (hwmgr->chip_family < AMDGPU_FAMILY_RV)
+ result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware));
+ }
return result;
}
-
-
static int init_powerplay_tables(
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table
@@ -1615,85 +1652,53 @@ static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id == CHIP_RAVEN)
return 0;
- if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
- kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
- hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
+ hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
- if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.vddci_dependency_on_mclk);
- hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddci_dependency_on_mclk);
+ hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.vddc_dependency_on_mclk);
- hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dependency_on_mclk);
+ hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk);
- hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk);
+ hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.valid_mclk_values) {
- kfree(hwmgr->dyn_state.valid_mclk_values);
- hwmgr->dyn_state.valid_mclk_values = NULL;
- }
+ kfree(hwmgr->dyn_state.valid_mclk_values);
+ hwmgr->dyn_state.valid_mclk_values = NULL;
- if (NULL != hwmgr->dyn_state.valid_sclk_values) {
- kfree(hwmgr->dyn_state.valid_sclk_values);
- hwmgr->dyn_state.valid_sclk_values = NULL;
- }
+ kfree(hwmgr->dyn_state.valid_sclk_values);
+ hwmgr->dyn_state.valid_sclk_values = NULL;
- if (NULL != hwmgr->dyn_state.cac_leakage_table) {
- kfree(hwmgr->dyn_state.cac_leakage_table);
- hwmgr->dyn_state.cac_leakage_table = NULL;
- }
+ kfree(hwmgr->dyn_state.cac_leakage_table);
+ hwmgr->dyn_state.cac_leakage_table = NULL;
- if (NULL != hwmgr->dyn_state.vddc_phase_shed_limits_table) {
- kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table);
- hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table);
+ hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL;
- if (NULL != hwmgr->dyn_state.vce_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table);
- hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.uvd_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.samu_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table);
- hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+ hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.acp_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table);
- hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table) {
- kfree(hwmgr->dyn_state.cac_dtp_table);
- hwmgr->dyn_state.cac_dtp_table = NULL;
- }
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.ppm_parameter_table) {
- kfree(hwmgr->dyn_state.ppm_parameter_table);
- hwmgr->dyn_state.ppm_parameter_table = NULL;
- }
+ kfree(hwmgr->dyn_state.ppm_parameter_table);
+ hwmgr->dyn_state.ppm_parameter_table = NULL;
- if (NULL != hwmgr->dyn_state.vdd_gfx_dependency_on_sclk) {
- kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
- hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
+ hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
- if (NULL != hwmgr->dyn_state.vq_budgeting_table) {
- kfree(hwmgr->dyn_state.vq_budgeting_table);
- hwmgr->dyn_state.vq_budgeting_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vq_budgeting_table);
+ hwmgr->dyn_state.vq_budgeting_table = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 2c3e6baf2524..3e0b267c74a8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -38,20 +38,17 @@
#include "pp_soc15.h"
#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define RAVEN_MINIMUM_ENGINE_CLOCK 800 //8Mhz, the low boundary of engine clock allowed on this chip
+#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
#define SCLK_MIN_DIV_INTV_SHIFT 12
-#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 //100mhz
+#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic;
+
+
int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req);
-struct phm_vq_budgeting_record rv_vqtable[] = {
- /* _TBD
- * CUs, SSP low, SSP High, Min Sclk Low, Min Sclk, High, AWD/non-AWD, DCLK, ECLK, Sustainable Sclk, Sustainable CUs */
- { 8, 0, 45, 0, 0, VQ_DisplayConfig_NoneAWD, 80000, 120000, 4, 0 },
-};
static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps)
{
@@ -70,101 +67,27 @@ static const struct rv_power_state *cast_const_rv_ps(
return (struct rv_power_state *)hw_ps;
}
-static int rv_init_vq_budget_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size, i;
- struct phm_vq_budgeting_table *ptable;
- uint32_t num_entries = ARRAY_SIZE(rv_vqtable);
-
- if (hwmgr->dyn_state.vq_budgeting_table != NULL)
- return 0;
-
- table_size = sizeof(struct phm_vq_budgeting_table) +
- sizeof(struct phm_vq_budgeting_record) * (num_entries - 1);
-
- ptable = kzalloc(table_size, GFP_KERNEL);
- if (NULL == ptable)
- return -ENOMEM;
-
- ptable->numEntries = (uint8_t) num_entries;
-
- for (i = 0; i < ptable->numEntries; i++) {
- ptable->entries[i].ulCUs = rv_vqtable[i].ulCUs;
- ptable->entries[i].ulSustainableSOCPowerLimitLow = rv_vqtable[i].ulSustainableSOCPowerLimitLow;
- ptable->entries[i].ulSustainableSOCPowerLimitHigh = rv_vqtable[i].ulSustainableSOCPowerLimitHigh;
- ptable->entries[i].ulMinSclkLow = rv_vqtable[i].ulMinSclkLow;
- ptable->entries[i].ulMinSclkHigh = rv_vqtable[i].ulMinSclkHigh;
- ptable->entries[i].ucDispConfig = rv_vqtable[i].ucDispConfig;
- ptable->entries[i].ulDClk = rv_vqtable[i].ulDClk;
- ptable->entries[i].ulEClk = rv_vqtable[i].ulEClk;
- ptable->entries[i].ulSustainableSclk = rv_vqtable[i].ulSustainableSclk;
- ptable->entries[i].ulSustainableCUs = rv_vqtable[i].ulSustainableCUs;
- }
-
- hwmgr->dyn_state.vq_budgeting_table = ptable;
-
- return 0;
-}
-
static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend);
- struct cgs_system_info sys_info = {0};
- int result;
- rv_hwmgr->ddi_power_gating_disabled = 0;
- rv_hwmgr->bapm_enabled = 1;
rv_hwmgr->dce_slow_sclk_threshold = 30000;
- rv_hwmgr->disable_driver_thermal_policy = 1;
rv_hwmgr->thermal_auto_throttling_treshold = 0;
rv_hwmgr->is_nb_dpm_enabled = 1;
rv_hwmgr->dpm_flags = 1;
- rv_hwmgr->disable_smu_acp_s3_handshake = 1;
- rv_hwmgr->disable_notify_smu_vpu_recovery = 0;
rv_hwmgr->gfx_off_controled_by_driver = false;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicM3Arbiter);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDynamicPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ACP);
+ rv_hwmgr->need_min_deep_sleep_dcefclk = true;
+ rv_hwmgr->num_active_display = 0;
+ rv_hwmgr->deep_sleep_dcefclk = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXDynamicMGPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableVoltageIsland);
-
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_GFX_DMG)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXDynamicMGPowerGating);
- }
-
+ PHM_PlatformCaps_PowerPlaySupport);
return 0;
}
@@ -234,102 +157,88 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
- clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
clock_req.clock_type = amd_pp_dcf_clock;
clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
- if (clocks.dcefClock == 0 && clocks.dcefClockInSR == 0)
- clock_req.clock_freq_in_khz = rv_data->dcf_actual_hard_min_freq;
-
PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
"Attempt to set DCF Clock Failed!", return -EINVAL);
- if(rv_data->need_min_deep_sleep_dcefclk && 0 != clocks.dcefClockInSR)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetMinDeepSleepDcefclk,
- clocks.dcefClockInSR / 100);
- /*
- if(!rv_data->isp_tileA_power_gated || !rv_data->isp_tileB_power_gated) {
- if ((hwmgr->ispArbiter.iclk != 0) && (rv_data->ISPActualHardMinFreq != (hwmgr->ispArbiter.iclk / 100) )) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetHardMinIspclkByFreq, hwmgr->ispArbiter.iclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->ISPActualHardMinFreq),
- }
- } */
-
if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinVcn,
(rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
}
if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
hwmgr->gfx_arbiter.sclk_hard_min / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->soc_actual_hard_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq);
}
if ((hwmgr->gfx_arbiter.gfxclk != 0) &&
(rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinVideoGfxclkFreq,
hwmgr->gfx_arbiter.gfxclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->gfx_actual_soft_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq);
}
if ((hwmgr->gfx_arbiter.fclk != 0) &&
(rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinVideoFclkFreq,
hwmgr->gfx_arbiter.fclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->fabric_actual_soft_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq);
}
return 0;
}
-static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
- uint32_t num_of_active_displays = 0;
- struct cgs_display_info info = {0};
+ struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+ if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) {
+ rv_data->deep_sleep_dcefclk = clock/100;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ rv_data->deep_sleep_dcefclk);
+ }
+ return 0;
+}
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_of_active_displays = info.display_count;
+static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ if (rv_data->num_active_display != count) {
+ rv_data->num_active_display = count;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
- num_of_active_displays);
+ rv_data->num_active_display);
+ }
+
return 0;
}
-static const struct phm_master_table_item rv_set_power_state_list[] = {
- { .tableFunction = rv_tf_set_clock_limit },
- { .tableFunction = rv_tf_set_num_active_display },
- { }
-};
-
-static const struct phm_master_table_header rv_set_power_state_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_set_power_state_list
-};
+static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ return rv_set_clock_limit(hwmgr, input);
+}
-static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
@@ -340,20 +249,13 @@ static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static const struct phm_master_table_item rv_setup_asic_list[] = {
- { .tableFunction = rv_tf_init_power_gate_state },
- { }
-};
-static const struct phm_master_table_header rv_setup_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_setup_asic_list
-};
+static int rv_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ return rv_init_power_gate_state(hwmgr);
+}
-static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
@@ -365,66 +267,42 @@ static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
return 0;
}
-static const struct phm_master_table_item rv_power_down_asic_list[] = {
- { .tableFunction = rv_tf_reset_cc6_data },
- { }
-};
-
-static const struct phm_master_table_header rv_power_down_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_power_down_asic_list
-};
-
+static int rv_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ return rv_reset_cc6_data(hwmgr);
+}
-static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_DisableGfxOff);
return 0;
}
-static const struct phm_master_table_item rv_disable_dpm_list[] = {
- { .tableFunction = rv_tf_disable_gfx_off },
- { },
-};
-
-
-static const struct phm_master_table_header rv_disable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_disable_dpm_list
-};
+static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return rv_disable_gfx_off(hwmgr);
+}
-static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableGfxOff);
return 0;
}
-static const struct phm_master_table_item rv_enable_dpm_list[] = {
- { .tableFunction = rv_tf_enable_gfx_off },
- { },
-};
-
-static const struct phm_master_table_header rv_enable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_enable_dpm_list
-};
+static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return rv_enable_gfx_off(hwmgr);
+}
static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
@@ -434,37 +312,37 @@ static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
/* temporary hardcoded clock voltage breakdown tables */
-DpmClock_t VddDcfClk[]= {
+static const DpmClock_t VddDcfClk[]= {
{ 300, 2600},
{ 600, 3200},
{ 600, 3600},
};
-DpmClock_t VddSocClk[]= {
+static const DpmClock_t VddSocClk[]= {
{ 478, 2600},
{ 722, 3200},
{ 722, 3600},
};
-DpmClock_t VddFClk[]= {
+static const DpmClock_t VddFClk[]= {
{ 400, 2600},
{1200, 3200},
{1200, 3600},
};
-DpmClock_t VddDispClk[]= {
+static const DpmClock_t VddDispClk[]= {
{ 435, 2600},
{ 661, 3200},
{1086, 3600},
};
-DpmClock_t VddDppClk[]= {
+static const DpmClock_t VddDppClk[]= {
{ 435, 2600},
{ 661, 3200},
{ 661, 3600},
};
-DpmClock_t VddPhyClk[]= {
+static const DpmClock_t VddPhyClk[]= {
{ 540, 2600},
{ 810, 3200},
{ 810, 3600},
@@ -472,7 +350,7 @@ DpmClock_t VddPhyClk[]= {
static int rv_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
struct rv_voltage_dependency_table **pptable,
- uint32_t num_entry, DpmClock_t *pclk_dependency_table)
+ uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
{
uint32_t table_size, i;
struct rv_voltage_dependency_table *ptable;
@@ -505,7 +383,7 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
DpmClocks_t *table = &(rv_data->clock_table);
struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- result = rv_copy_table_from_smc(hwmgr->smumgr, (uint8_t *)table, CLOCKTABLE);
+ result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE);
PP_ASSERT_WITH_CODE((0 == result),
"Attempt to copy clock table from smc failed",
@@ -543,6 +421,26 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
rv_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMinGfxclkFrequency),
+ "Attempt to get min GFXCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &result),
+ "Attempt to get min GFXCLK Failed!",
+ return -1);
+ rv_data->gfx_min_freq_limit = result * 100;
+
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxGfxclkFrequency),
+ "Attempt to get max GFXCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &result),
+ "Attempt to get max GFXCLK Failed!",
+ return -1);
+ rv_data->gfx_max_freq_limit = result * 100;
+
return 0;
}
@@ -563,9 +461,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
return result;
}
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerPlaySupport);
-
rv_populate_clock_table(hwmgr);
result = rv_get_system_info_data(hwmgr);
@@ -576,40 +471,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
rv_construct_boot_state(hwmgr);
- result = phm_construct_table(hwmgr, &rv_setup_asic_master,
- &(hwmgr->setup_asic));
- if (result != 0) {
- pr_err("Fail to construct setup ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_power_down_asic_master,
- &(hwmgr->power_down_asic));
- if (result != 0) {
- pr_err("Fail to construct power down ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_set_power_state_master,
- &(hwmgr->set_power_state));
- if (result != 0) {
- pr_err("Fail to construct set_power_state\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_disable_dpm_master,
- &(hwmgr->disable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to disable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &rv_enable_dpm_master,
- &(hwmgr->enable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to enable_dynamic_state\n");
- return result;
- }
-
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
RAVEN_MAX_HARDWARE_POWERLEVELS;
@@ -624,8 +485,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- rv_init_vq_budget_table(hwmgr);
-
return result;
}
@@ -634,46 +493,21 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
- phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
- phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
-
- if (pinfo->vdd_dep_on_dcefclk) {
- kfree(pinfo->vdd_dep_on_dcefclk);
- pinfo->vdd_dep_on_dcefclk = NULL;
- }
- if (pinfo->vdd_dep_on_socclk) {
- kfree(pinfo->vdd_dep_on_socclk);
- pinfo->vdd_dep_on_socclk = NULL;
- }
- if (pinfo->vdd_dep_on_fclk) {
- kfree(pinfo->vdd_dep_on_fclk);
- pinfo->vdd_dep_on_fclk = NULL;
- }
- if (pinfo->vdd_dep_on_dispclk) {
- kfree(pinfo->vdd_dep_on_dispclk);
- pinfo->vdd_dep_on_dispclk = NULL;
- }
- if (pinfo->vdd_dep_on_dppclk) {
- kfree(pinfo->vdd_dep_on_dppclk);
- pinfo->vdd_dep_on_dppclk = NULL;
- }
- if (pinfo->vdd_dep_on_phyclk) {
- kfree(pinfo->vdd_dep_on_phyclk);
- pinfo->vdd_dep_on_phyclk = NULL;
- }
-
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
-
- if (NULL != hwmgr->dyn_state.vq_budgeting_table) {
- kfree(hwmgr->dyn_state.vq_budgeting_table);
- hwmgr->dyn_state.vq_budgeting_table = NULL;
- }
+ kfree(pinfo->vdd_dep_on_dcefclk);
+ pinfo->vdd_dep_on_dcefclk = NULL;
+ kfree(pinfo->vdd_dep_on_socclk);
+ pinfo->vdd_dep_on_socclk = NULL;
+ kfree(pinfo->vdd_dep_on_fclk);
+ pinfo->vdd_dep_on_fclk = NULL;
+ kfree(pinfo->vdd_dep_on_dispclk);
+ pinfo->vdd_dep_on_dispclk = NULL;
+ kfree(pinfo->vdd_dep_on_dppclk);
+ pinfo->vdd_dep_on_dppclk = NULL;
+ kfree(pinfo->vdd_dep_on_phyclk);
+ pinfo->vdd_dep_on_phyclk = NULL;
+
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -687,12 +521,12 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
return 0;
}
-static int rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
return 0;
}
@@ -711,18 +545,9 @@ static int rv_dpm_get_pp_table_entry_callback(
{
struct rv_power_state *rv_ps = cast_rv_ps(hw_ps);
- const ATOM_PPLIB_CZ_CLOCK_INFO *rv_clock_info = clock_info;
-
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
- uint8_t clock_info_index = rv_clock_info->index;
-
- if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
- clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
-
- rv_ps->levels[index].engine_clock = table->entries[clock_info_index].clk;
- rv_ps->levels[index].vddc_index = (uint8_t)table->entries[clock_info_index].v;
+ rv_ps->levels[index].engine_clock = 0;
+ rv_ps->levels[index].vddc_index = 0;
rv_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
@@ -794,43 +619,74 @@ static int rv_force_clock_level(struct pp_hwmgr *hwmgr,
static int rv_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
- return 0;
+ struct rv_hwmgr *data = (struct rv_hwmgr *)(hwmgr->backend);
+ struct rv_voltage_dependency_table *mclk_table =
+ data->clock_vol_info.vdd_dep_on_fclk;
+ int i, now, size = 0;
+
+ switch (type) {
+ case PP_SCLK:
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetGfxclkFrequency),
+ "Attempt to get current GFXCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &now),
+ "Attempt to get current GFXCLK Failed!",
+ return -1);
+
+ size += sprintf(buf + size, "0: %uMhz %s\n",
+ data->gfx_min_freq_limit / 100,
+ ((data->gfx_min_freq_limit / 100)
+ == now) ? "*" : "");
+ size += sprintf(buf + size, "1: %uMhz %s\n",
+ data->gfx_max_freq_limit / 100,
+ ((data->gfx_max_freq_limit / 100)
+ == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetFclkFrequency),
+ "Attempt to get current MEMCLK Failed!",
+ return -1);
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
+ &now),
+ "Attempt to get current MEMCLK Failed!",
+ return -1);
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i,
+ mclk_table->entries[i].clk / 100,
+ ((mclk_table->entries[i].clk / 100)
+ == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
}
static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
- const struct rv_power_state *ps;
struct rv_hwmgr *data;
- uint32_t level_index;
- uint32_t i;
- uint32_t vol_dep_record_index = 0;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
data = (struct rv_hwmgr *)(hwmgr->backend);
- ps = cast_const_rv_ps(state);
- level_index = index > ps->level - 1 ? ps->level - 1 : index;
- level->coreClock = ps->levels[level_index].engine_clock;
-
- if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
- for (i = 1; i < ps->level; i++) {
- if (ps->levels[i].engine_clock > data->dce_slow_sclk_threshold) {
- level->coreClock = ps->levels[i].engine_clock;
- break;
- }
- }
- }
-
- if (level_index == 0) {
- vol_dep_record_index = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
- level->memory_clock =
- data->clock_vol_info.vdd_dep_on_fclk->entries[vol_dep_record_index].clk;
- } else
+ if (index == 0) {
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+ level->coreClock = data->gfx_min_freq_limit;
+ } else {
+ level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[
+ data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
+ level->coreClock = data->gfx_max_freq_limit;
+ }
level->nonLocalMemoryFreq = 0;
level->nonLocalMemoryWidth = 0;
@@ -993,7 +849,7 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, msg,
clk_freq);
return result;
@@ -1001,7 +857,8 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
- return -EINVAL;
+ clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
+ return 0;
}
static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
@@ -1023,13 +880,37 @@ static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int rv_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
+ uint32_t sclk, mclk;
+ int ret = 0;
+
switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
+ if (!ret) {
+ rv_read_arg_from_smc(hwmgr, &sclk);
+ /* in units of 10KHZ */
+ *((uint32_t *)value) = sclk * 100;
+ *size = 4;
+ }
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
+ if (!ret) {
+ rv_read_arg_from_smc(hwmgr, &mclk);
+ /* in units of 10KHZ */
+ *((uint32_t *)value) = mclk * 100;
+ *size = 4;
+ }
+ break;
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = rv_thermal_get_temperature(hwmgr);
- return 0;
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
+
+ return ret;
}
static const struct pp_hwmgr_func rv_hwmgr_funcs = {
@@ -1058,6 +939,13 @@ static const struct pp_hwmgr_func rv_hwmgr_funcs = {
.get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage,
.get_max_high_clocks = rv_get_max_high_clocks,
.read_sensor = rv_read_sensor,
+ .set_active_display_count = rv_set_active_display_count,
+ .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk,
+ .dynamic_state_management_enable = rv_enable_dpm_tasks,
+ .power_off_asic = rv_power_off_asic,
+ .asic_setup = rv_setup_asic_task,
+ .power_state_set = rv_set_power_state_tasks,
+ .dynamic_state_management_disable = rv_disable_dpm_tasks,
};
int rv_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index 2472b50e54cf..9dc503055394 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -283,6 +283,8 @@ struct rv_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_min_freq_limit;
+ uint32_t gfx_max_freq_limit;
bool vcn_power_gated;
bool vcn_dpg_mode;
@@ -293,7 +295,9 @@ struct rv_hwmgr {
DpmClocks_t clock_table;
uint32_t active_process_mask;
- bool need_min_deep_sleep_dcefclk; /* disabled by default */
+ bool need_min_deep_sleep_dcefclk;
+ uint32_t deep_sleep_dcefclk;
+ uint32_t num_active_display;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 261b828ad590..69a0678ace98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -27,21 +27,21 @@
static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_UVDDPM_Enable :
PPSMC_MSG_UVDDPM_Disable);
}
static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_VCEDPM_Enable :
PPSMC_MSG_VCEDPM_Disable);
}
static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_SAMUDPM_Enable :
PPSMC_MSG_SAMUDPM_Disable);
}
@@ -70,7 +70,7 @@ static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_UVDPowerOFF);
return 0;
}
@@ -80,10 +80,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDPowerON, 1);
} else {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDPowerON, 0);
}
}
@@ -94,7 +94,7 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
@@ -102,7 +102,7 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
@@ -111,7 +111,7 @@ static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SAMPowerOFF);
return 0;
}
@@ -120,7 +120,7 @@ static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SAMPowerON);
return 0;
}
@@ -140,7 +140,7 @@ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -166,10 +166,9 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
smu7_update_uvd_dpm(hwmgr, false);
}
- return 0;
}
-int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -194,7 +193,6 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_PG_STATE_UNGATE);
smu7_update_vce_dpm(hwmgr, false);
}
- return 0;
}
int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
@@ -237,7 +235,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -247,7 +245,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -260,7 +258,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -271,7 +269,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -284,7 +282,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_RLC_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -297,7 +295,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CP_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -311,7 +309,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
CG_GFX_OTHERS_MGCG_MASK);
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -331,7 +329,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -341,7 +339,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -354,7 +352,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -365,7 +363,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -378,7 +376,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -388,7 +386,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -401,7 +399,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -412,7 +410,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -425,7 +423,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -436,7 +434,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -449,7 +447,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_ROM_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -489,9 +487,9 @@ int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
active_cus = sys_info.value;
if (enable)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
else
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GFX_CU_PG_DISABLE);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index c96ed9ed7eaf..7b54d48b2ce2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -27,8 +27,8 @@
#include "smu7_hwmgr.h"
#include "pp_asicblocks.h"
-int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index b526f49be65d..4466469cf8ab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/div64.h>
+#include <drm/amdgpu_drm.h>
#include "pp_acpi.h"
#include "ppatomctrl.h"
#include "atombios.h"
@@ -163,7 +164,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
return 0;
}
@@ -300,28 +301,28 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
}
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
PP_ASSERT_WITH_CODE(
(data->vddc_voltage_table.count <= tmp),
"Too many voltage values for VDDC. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddc_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
PP_ASSERT_WITH_CODE(
(data->vddgfx_voltage_table.count <= tmp),
"Too many voltage values for VDDC. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddgfx_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
PP_ASSERT_WITH_CODE(
(data->vddci_voltage_table.count <= tmp),
"Too many voltage values for VDDCI. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddci_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
PP_ASSERT_WITH_CODE(
(data->mvdd_voltage_table.count <= tmp),
"Too many voltage values for MVDD. Trimming to fit state table.",
@@ -387,6 +388,7 @@ static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int i;
/* Clear reset for voting clients before enabling DPM */
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -394,50 +396,26 @@ static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
+ for (i = 0; i < 8; i++)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0 + i * 4,
+ data->voting_rights_clients[i]);
return 0;
}
static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
{
+ int i;
+
/* Reset voting clients before disabling DPM */
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
+ for (i = 0; i < 8; i++)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
return 0;
}
@@ -493,7 +471,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
}
/**
@@ -551,7 +529,7 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
data->pcie_gen_performance = data->pcie_gen_power_saving;
data->pcie_lane_performance = data->pcie_lane_power_saving;
}
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
tmp,
MAX_REGULAR_DPM_NUMBER);
@@ -607,13 +585,20 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
data->dpm_table.pcie_speed_table.count = 6;
}
/* Populate last level for boot PCIE level, but do not increment count. */
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ data->vbios_boot_state.pcie_lane_bootup_value);
+ } else {
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
data->dpm_table.pcie_speed_table.count,
get_pcie_gen_support(data->pcie_gen_cap,
PP_Min_PCIEGen),
get_pcie_lane_support(data->pcie_lane_cap,
PP_Max_PCIELane));
-
+ }
return 0;
}
@@ -625,27 +610,27 @@ static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
phm_reset_single_dpm_table(
&data->dpm_table.sclk_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_GRAPHICS),
MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.mclk_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.vddc_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_VDDC),
MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.vddci_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.mvdd_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_MVDD),
MAX_REGULAR_DPM_NUMBER);
return 0;
@@ -689,7 +674,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
allowed_vdd_sclk_table->entries[i].clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.sclk_table.count++;
}
}
@@ -703,7 +688,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
allowed_vdd_mclk_table->entries[i].clk) {
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.mclk_table.count++;
}
}
@@ -855,7 +840,7 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableVRHotGPIOInterrupt);
return 0;
@@ -873,7 +858,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
return 0;
}
@@ -883,7 +868,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
return 0;
}
@@ -892,12 +877,12 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
PP_ASSERT_WITH_CODE(false,
"Attempt to enable Master Deep Sleep switch failed!",
return -EINVAL);
} else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
+ if (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MASTER_DeepSleep_OFF)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
@@ -912,7 +897,7 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
+ if (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MASTER_DeepSleep_OFF)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
@@ -928,12 +913,12 @@ static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t soft_register_value = 0;
uint32_t handshake_disables_offset = data->soft_regs_start
- + smum_get_offsetof(hwmgr->smumgr,
+ + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, HandshakeDisables);
soft_register_value = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC, handshake_disables_offset);
- soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
+ soft_register_value |= smum_get_mac_definition(hwmgr,
SMU_UVD_MCLK_HANDSHAKE_DISABLE);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
handshake_disables_offset, soft_register_value);
@@ -947,7 +932,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
/* enable SCLK dpm */
if (!data->sclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
"Failed to enable SCLK DPM during DPM Start Function!",
return -EINVAL);
@@ -956,20 +941,31 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
smu7_disable_handshake_uvd(hwmgr);
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ (0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MCLKDPM_Enable)),
"Failed to enable MCLK DPM during DPM Start Function!",
return -EINVAL);
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
- udelay(10);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
+ } else {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+ }
}
return 0;
@@ -993,11 +989,15 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
data->soft_regs_start +
- smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
+ smum_get_offsetof(hwmgr, SMU_SoftRegisters,
VoltageChangeTimeout), 0x1000);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
SWRST_COMMAND_1, RESETLC, 0x0);
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
+ cgs_write_register(hwmgr->device, 0x1488,
+ (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
+
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
pr_err("Failed to enable Sclk DPM and Mclk DPM!");
return -EINVAL;
@@ -1006,7 +1006,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
/* enable PCIE dpm */
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ (0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Enable)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
@@ -1014,7 +1014,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableACDCGPIOInterrupt)),
"Failed to enable AC DC GPIO Interrupt!",
);
@@ -1032,7 +1032,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable SCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
}
/* disable MCLK dpm */
@@ -1040,7 +1040,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable MCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
}
return 0;
@@ -1060,7 +1060,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
/* disable PCIE dpm */
if (!data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
+ (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Disable) == 0),
"Failed to disable pcie DPM during DPM Stop Function!",
return -EINVAL);
@@ -1072,7 +1072,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
"Trying to disable voltage DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
return 0;
}
@@ -1226,7 +1226,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
+ smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1361,14 +1361,14 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->vddc_vddgfx_delta = 300;
data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+ data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
+ data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
+ data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
@@ -1382,23 +1382,40 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->force_pcie_gen = PP_PCIEGenInvalid;
data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
- if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) {
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
uint8_t tmp1, tmp2;
uint16_t tmp3 = 0;
atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
&tmp3);
tmp3 = (tmp3 >> 5) & 0x3;
data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
+ } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ data->vddc_phase_shed_control = 1;
+ } else {
+ data->vddc_phase_shed_control = 0;
+ }
+
+ if (hwmgr->chip_id == CHIP_HAWAII) {
+ data->thermal_temp_setting.temperature_low = 94500;
+ data->thermal_temp_setting.temperature_high = 95000;
+ data->thermal_temp_setting.temperature_shutdown = 104000;
+ } else {
+ data->thermal_temp_setting.temperature_low = 99500;
+ data->thermal_temp_setting.temperature_high = 100000;
+ data->thermal_temp_setting.temperature_shutdown = 104000;
}
data->fast_watermark_threshold = 100;
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDGFX)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
@@ -1406,25 +1423,24 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDGFX);
- }
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
@@ -1543,7 +1559,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
if (vddc >= 2000 || vddc == 0)
return -EINVAL;
} else {
- pr_warn("failed to retrieving EVV voltage!\n");
+ pr_debug("failed to retrieving EVV voltage!\n");
continue;
}
@@ -1676,7 +1692,7 @@ static int phm_add_voltage(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((0 != look_up_table->count),
"Lookup Table empty.", return -EINVAL);
- i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
PP_ASSERT_WITH_CODE((i >= look_up_table->count),
"Lookup Table is full.", return -EINVAL);
@@ -2274,7 +2290,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
}
- if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+ if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
return 0;
@@ -2282,40 +2298,65 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
- pp_smu7_thermal_fini(hwmgr);
- if (NULL != hwmgr->backend) {
- kfree(hwmgr->backend);
- hwmgr->backend = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
+
+ return 0;
+}
+
+static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
+{
+ uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int i;
+ if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
+ for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
+ virtual_voltage_id,
+ efuse_voltage_id) == 0) {
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
+ data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
+ data->vddc_leakage.count++;
+ }
+ if (vddci != 0 && vddci != virtual_voltage_id) {
+ data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
+ data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
+ data->vddci_leakage.count++;
+ }
+ }
+ }
+ }
return 0;
}
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
- int result;
+ int result = 0;
data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
- pp_smu7_thermal_initialize(hwmgr);
-
smu7_patch_voltage_workaround(hwmgr);
smu7_init_dpm_defaults(hwmgr);
/* Get leakage voltage based on leakage ID. */
- result = smu7_get_evv_voltages(hwmgr);
-
- if (result) {
- pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
- return -EINVAL;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV)) {
+ result = smu7_get_evv_voltages(hwmgr);
+ if (result) {
+ pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
+ return -EINVAL;
+ }
+ } else {
+ smu7_get_elb_voltages(hwmgr);
}
if (hwmgr->pp_table_version == PP_TABLE_V1) {
@@ -2382,7 +2423,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel, level);
}
}
@@ -2395,7 +2436,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2409,7 +2450,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2428,14 +2469,14 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
}
if (!data->mclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
}
@@ -2451,7 +2492,7 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return -EINVAL;
if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_UnForceLevel);
}
@@ -2468,7 +2509,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
(1 << level));
@@ -2478,7 +2519,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2488,7 +2529,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
(level));
}
@@ -2572,51 +2613,16 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = smu7_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = smu7_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -2625,26 +2631,23 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
if (ret)
return ret;
- hwmgr->dpm_level = level;
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
-
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
+ if (!ret) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+ }
+ return ret;
}
static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
@@ -2843,7 +2846,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
-static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct smu7_power_state *smu7_ps;
@@ -2865,7 +2868,7 @@ static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
[smu7_ps->performance_level_count-1].memory_clock;
}
-static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct smu7_power_state *smu7_ps;
@@ -3002,7 +3005,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
[smu7_power_state->performance_level_count++]);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
@@ -3071,11 +3074,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- pr_err("Single MCLK entry VDDCI/MCLK dependency table "
+ pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].vddci !=
data->vbios_boot_state.vddci_bootup_value)
- pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3166,7 +3169,7 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
data->highest_mclk = memory_clock;
PP_ASSERT_WITH_CODE(
- (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
@@ -3219,11 +3222,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- pr_err("Single MCLK entry VDDCI/MCLK dependency table "
+ pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].v !=
data->vbios_boot_state.vddci_bootup_value)
- pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3312,14 +3315,14 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
struct pp_gpu_power *query)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogStart),
"Failed to start pm status log!",
return -1);
msleep_interruptible(20);
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogSample),
"Failed to sample pm status log!",
return -1);
@@ -3353,19 +3356,19 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
AverageGraphicsActivity);
@@ -3532,7 +3535,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_FreezeLevel),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3544,7 +3547,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze MCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MCLKDPM_FreezeLevel),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3762,7 +3765,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3774,7 +3777,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3824,9 +3827,9 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
- return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
}
static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -3899,10 +3902,7 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
@@ -3911,7 +3911,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
+ return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
}
static int
@@ -3974,12 +3974,12 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
PreVBlankGap), 0x64);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
VBlankTimeout),
(frame_time_in_us - pre_vbi_time_in_us));
@@ -4004,10 +4004,7 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
@@ -4249,21 +4246,21 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
+ if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
switch (type) {
case PP_SCLK:
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
break;
@@ -4276,7 +4273,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
level++;
if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
level);
break;
@@ -4300,7 +4297,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < sclk_table->count; i++) {
@@ -4316,7 +4313,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < mclk_table->count; i++) {
@@ -4353,31 +4350,27 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
-static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- int result = 0;
-
switch (mode) {
case AMD_FAN_CTRL_NONE:
- result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
- result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
- result = smu7_fan_ctrl_set_static_mode(hwmgr, mode);
- if (!result)
- result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+ if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
+ smu7_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
- return result;
}
-static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
}
@@ -4606,7 +4599,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (sclk_mask) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.
sclk_dpm_enable_mask &
@@ -4615,7 +4608,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (mclk_mask) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.
mclk_dpm_enable_mask &
@@ -4627,8 +4620,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
{
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (smu_data == NULL)
return -EINVAL;
@@ -4640,19 +4632,60 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr->smumgr, PPSMC_MSG_EnableAvfs),
+ hwmgr, PPSMC_MSG_EnableAvfs),
"Failed to enable AVFS!",
return -EINVAL);
} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr->smumgr, PPSMC_MSG_DisableAvfs),
+ hwmgr, PPSMC_MSG_DisableAvfs),
"Failed to disable AVFS!",
return -EINVAL);
return 0;
}
+static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_ADDR_H),
+ mc_addr_hi);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_ADDR_L),
+ mc_addr_low);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
+ virtual_addr_hi);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
+ virtual_addr_low);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start +
+ smum_get_offsetof(hwmgr,
+ SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
+ size);
+ return 0;
+}
+
static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.backend_init = &smu7_hwmgr_backend_init,
.backend_fini = &smu7_hwmgr_backend_fini,
@@ -4703,6 +4736,8 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_power_profile_state = smu7_set_power_profile_state,
.avfs_control = smu7_avfs_control,
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
+ .start_thermal_controller = smu7_start_thermal_controller,
+ .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index f221e17b67e7..e021154aedbd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -182,14 +182,7 @@ struct smu7_hwmgr {
struct smu7_dpm_table dpm_table;
struct smu7_dpm_table golden_dpm_table;
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
+ uint32_t voting_rights_clients[8];
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 1dc31aa72781..85ca16abb626 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -629,51 +629,38 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
uint32_t block_en = 0;
int32_t result = 0;
uint32_t didt_block;
- uint32_t data;
if (hwmgr->chip_id == CHIP_POLARIS11)
didt_block = Polaris11_DIDTBlock_Info;
else
didt_block = DIDTBlock_Info;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_SQRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_SQ_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~SQ_Enable_MASK;
didt_block |= block_en << SQ_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_DBRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DB_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~DB_Enable_MASK;
didt_block |= block_en << DB_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0;
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_TDRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TD_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~TD_Enable_MASK;
didt_block |= block_en << TD_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_TCPRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TCP_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~TCP_Enable_MASK;
didt_block |= block_en << TCP_Enable_SHIFT;
-
if (enable)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
return result;
}
@@ -753,12 +740,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
if (result == 0)
num_se = sys_info.value;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
+ PP_CAP(PHM_PlatformCaps_DBRamping) ||
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
cgs_enter_safe_mode(hwmgr->device, true);
+ cgs_lock_grbm_idx(hwmgr->device, true);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
for (count = 0; count < num_se; count++) {
@@ -775,7 +763,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- if (hwmgr->smumgr->is_kicker)
+ if (hwmgr->is_kicker)
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker);
else
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
@@ -793,11 +781,12 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = smum_send_msg_to_smc(hwmgr->smumgr,
+ result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_EnableDpmDidt));
PP_ASSERT_WITH_CODE((0 == result),
"Failed to enable DPM DIDT.", return result);
}
+ cgs_lock_grbm_idx(hwmgr->device, false);
cgs_enter_safe_mode(hwmgr->device, false);
}
@@ -808,10 +797,10 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
+ PP_CAP(PHM_PlatformCaps_DBRamping) ||
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
cgs_enter_safe_mode(hwmgr->device, true);
@@ -820,7 +809,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
"Post DIDT enable clock gating failed.",
return result);
if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = smum_send_msg_to_smc(hwmgr->smumgr,
+ result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableDpmDidt));
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", return result);
@@ -836,10 +825,9 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC)) {
+ if (PP_CAP(PHM_PlatformCaps_CAC)) {
int smc_result;
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_EnableCac));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
@@ -854,9 +842,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC) && data->cac_enabled) {
- int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableCac));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
@@ -872,7 +859,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PkgPwrSetLimit, n);
return 0;
}
@@ -880,7 +867,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
uint32_t target_tdp)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
@@ -899,11 +886,9 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
else
cac_table = hwmgr->dyn_state.cac_dtp_table;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
-
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->enable_tdc_limit_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_TDCLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
@@ -913,14 +898,13 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
}
if (data->enable_pkg_pwr_tracking_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
@@ -937,14 +921,13 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment) &&
- data->power_containment_features) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
int smc_result;
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_TDCLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
@@ -953,7 +936,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableDTE));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
@@ -962,7 +945,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
@@ -987,16 +970,17 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
cac_table = table_info->cac_dtp_table;
else
cac_table = hwmgr->dyn_state.cac_dtp_table;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
- /* SMC requested that target_tdp to be 7 bit fraction in DPM table
- * but message to be 8 bit fraction for messages
- */
- target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+
+ if (hwmgr->chip_id > CHIP_TONGA)
+ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+ else
+ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100;
+
result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index baddb569a8b8..d7aa643cdb51 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -37,9 +37,8 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
- hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
@@ -87,8 +86,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
uint32_t crystal_clock_freq;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution == 0))
+ !hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -152,13 +150,11 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM))
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM);
@@ -169,12 +165,12 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
} else {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
}
if (!result && hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature);
@@ -187,7 +183,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
hwmgr->fan_ctrl_enabled = false;
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
}
/**
@@ -209,8 +205,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (speed > 100)
speed = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -241,8 +236,7 @@ int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result)
result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
@@ -270,8 +264,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
crystal_clock_freq = smu7_get_xclk(hwmgr);
@@ -367,7 +360,7 @@ static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
*
* @param hwmgr The address of the hardware manager.
*/
-int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
@@ -378,7 +371,7 @@ int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
}
/**
@@ -396,7 +389,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
}
/**
@@ -423,16 +416,14 @@ int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
* @param Result the last failure code
* @return result from set temperature range routine
*/
-static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+static int smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
smu7_fan_ctrl_start_smc_fan_control(hwmgr);
smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
@@ -440,108 +431,34 @@ static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
return 0;
}
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+ int ret = 0;
if (range == NULL)
return -EINVAL;
- return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_disable_alert(hwmgr);
-}
+ smu7_thermal_initialize(hwmgr);
+ ret = smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
+ if (ret)
+ return -EINVAL;
+ smu7_thermal_enable_alert(hwmgr);
+ ret = smum_thermal_avfs_enable(hwmgr);
+ if (ret)
+ return -EINVAL;
-static const struct phm_master_table_item
-phm_thermal_start_thermal_controller_master_list[] = {
- { .tableFunction = tf_smu7_thermal_initialize },
- { .tableFunction = tf_smu7_thermal_set_temperature_range },
- { .tableFunction = tf_smu7_thermal_enable_alert },
- { .tableFunction = smum_thermal_avfs_enable },
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- { .tableFunction = smum_thermal_setup_fan_table },
- { .tableFunction = tf_smu7_thermal_start_smc_fan_control },
- { }
-};
-
-static const struct phm_master_table_header
-phm_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- phm_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-phm_thermal_set_temperature_range_master_list[] = {
- { .tableFunction = tf_smu7_thermal_disable_alert },
- { .tableFunction = tf_smu7_thermal_set_temperature_range },
- { .tableFunction = tf_smu7_thermal_enable_alert },
- { }
-};
-
-static const struct phm_master_table_header
-phm_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- phm_thermal_set_temperature_range_master_list
-};
+ smum_thermal_setup_fan_table(hwmgr);
+ smu7_thermal_start_smc_fan_control(hwmgr);
+ return 0;
+}
+
+
int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
@@ -550,35 +467,3 @@ int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
return 0;
}
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &phm_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &phm_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
-void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr)
-{
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller));
- return;
-} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
index ba71b608fa75..42c1ba0fad78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -46,14 +46,13 @@ extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *temperature_range);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f8f02e70b8bc..4f79c21f27ed 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -56,7 +56,7 @@
#define HBM_MEMORY_CHANNEL_WIDTH 128
-uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
+static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
#define MEM_FREQ_LOW_LATENCY 25000
#define MEM_FREQ_HIGH_LATENCY 80000
@@ -81,7 +81,7 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask);
-const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
+static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
struct vega10_power_state *cast_phw_vega10_power_state(
struct pp_hw_power_state *hw_ps)
@@ -201,9 +201,6 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_ControlVDDCI);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
sys_info.size = sizeof(struct cgs_system_info);
@@ -381,12 +378,10 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (!data->registry_data.socclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_SOCCLK].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM))
+ if (PP_CAP(PHM_PlatformCaps_UVDDPM))
data->smu_features[GNLD_DPM_UVD].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEDPM))
+ if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true;
if (!data->registry_data.pcie_dpm_key_disabled)
@@ -395,9 +390,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep) &&
- data->registry_data.sclk_deep_sleep_support) {
+ if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
+ data->registry_data.sclk_deep_sleep_support) {
data->smu_features[GNLD_DS_GFXCLK].supported = true;
data->smu_features[GNLD_DS_SOCCLK].supported = true;
data->smu_features[GNLD_DS_LCLK].supported = true;
@@ -431,8 +425,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion);
- vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version));
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ vega10_read_arg_from_smc(hwmgr, &(data->smu_version));
/* ACG firmware has major version 5 */
if ((data->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
@@ -497,8 +491,7 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
if (!vega10_get_socclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
+ if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
for (j = 1; j < socclk_table->count; j++) {
if (socclk_table->entries[j].clk == sclk &&
socclk_table->entries[j].cks_enable == 0) {
@@ -591,61 +584,37 @@ static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
static int vega10_patch_voltage_dependency_tables_with_lookup_table(
struct pp_hwmgr *hwmgr)
{
- uint8_t entry_id;
- uint8_t voltage_id;
+ uint8_t entry_id, voltage_id;
+ unsigned i;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
- table_info->vdd_dep_on_socclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
- table_info->vdd_dep_on_dcefclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
- table_info->vdd_dep_on_pixclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
- table_info->vdd_dep_on_dispclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
- table_info->vdd_dep_on_phyclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+ table_info->vdd_dep_on_mclk;
- for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
- voltage_id = socclk_table->entries[entry_id].vddInd;
- socclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
- voltage_id = gfxclk_table->entries[entry_id].vddInd;
- gfxclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
- voltage_id = dcefclk_table->entries[entry_id].vddInd;
- dcefclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
- voltage_id = pixclk_table->entries[entry_id].vddInd;
- pixclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
+ for (i = 0; i < 6; i++) {
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
+ switch (i) {
+ case 0: vdt = table_info->vdd_dep_on_socclk; break;
+ case 1: vdt = table_info->vdd_dep_on_sclk; break;
+ case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
+ case 3: vdt = table_info->vdd_dep_on_pixclk; break;
+ case 4: vdt = table_info->vdd_dep_on_dispclk; break;
+ case 5: vdt = table_info->vdd_dep_on_phyclk; break;
+ }
- for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
- voltage_id = dspclk_table->entries[entry_id].vddInd;
- dspclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ for (entry_id = 0; entry_id < vdt->count; entry_id++) {
+ voltage_id = vdt->entries[entry_id].vddInd;
+ vdt->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
}
- for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
- voltage_id = phyclk_table->entries[entry_id].vddInd;
- phyclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
+ voltage_id = mm_table->entries[entry_id].vddcInd;
+ mm_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
}
for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
@@ -660,11 +629,6 @@ static int vega10_patch_voltage_dependency_tables_with_lookup_table(
table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
}
- for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
- voltage_id = mm_table->entries[entry_id].vddcInd;
- mm_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
return 0;
@@ -838,8 +802,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
/* VDDCI_MEM */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
+ if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
@@ -959,7 +922,7 @@ static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
uint32_t features_enabled;
- if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
+ if (!vega10_get_smc_features(hwmgr, &features_enabled)) {
if (features_enabled & SMC_DPM_FEATURES)
return true;
}
@@ -1198,6 +1161,8 @@ static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
{
int i;
+ dpm_table->count = 0;
+
for (i = 0; i < dep_table->count; i++) {
if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
dep_table->entries[i].clk) {
@@ -1306,10 +1271,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
return -EINVAL);
/* Initialize Sclk DPM table based on allow Sclk values */
- data->dpm_table.soc_table.count = 0;
- data->dpm_table.gfx_table.count = 0;
- data->dpm_table.dcef_table.count = 0;
-
dpm_table = &(data->dpm_table.soc_table);
vega10_setup_default_single_dpm_table(hwmgr,
dpm_table,
@@ -1411,10 +1372,8 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
data->odn_dpm_table.odn_core_clock_dpm_levels.
number_of_performance_levels = data->dpm_table.gfx_table.count;
for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
@@ -1848,6 +1807,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
mem_channels = (cgs_read_register(hwmgr->device, reg) &
DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
+ "Mem Channel Index Exceeded maximum!",
+ return -1);
+
pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
pp_table->MemoryChannelWidth =
cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
@@ -2311,21 +2274,21 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
uint32_t agc_btc_response;
if (data->smu_features[GNLD_ACG].supported) {
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
- vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
+ vega10_read_arg_from_smc(hwmgr, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
else if (2 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop);
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+ if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = true;
} else {
@@ -2342,13 +2305,11 @@ static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (data->smu_features[GNLD_ACG].supported) {
- if (data->smu_features[GNLD_ACG].enabled) {
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, false,
- data->smu_features[GNLD_ACG].smu_feature_bitmap))
+ if (data->smu_features[GNLD_ACG].supported &&
+ data->smu_features[GNLD_ACG].enabled)
+ if (!vega10_enable_smc_features(hwmgr, false,
+ data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = false;
- }
- }
return 0;
}
@@ -2363,9 +2324,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
if (!result) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- (data->registry_data.regulator_hot_gpio_support)) {
+ if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
+ data->registry_data.regulator_hot_gpio_support) {
pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
@@ -2377,9 +2337,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
pp_table->VR1HotPolarity = 0;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition) &&
- (data->registry_data.ac_dc_switch_gpio_support)) {
+ if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
+ data->registry_data.ac_dc_switch_gpio_support) {
pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
} else {
@@ -2398,16 +2357,16 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
if (data->smu_features[GNLD_AVFS].supported) {
if (enable) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_AVFS].smu_feature_bitmap),
"[avfs_control] Attempt to Enable AVFS feature Failed!",
return -1);
data->smu_features[GNLD_AVFS].enabled = true;
} else {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false,
- data->smu_features[GNLD_AVFS].smu_feature_id),
+ data->smu_features[GNLD_AVFS].smu_feature_bitmap),
"[avfs_control] Attempt to Disable AVFS feature Failed!",
return -1);
data->smu_features[GNLD_AVFS].enabled = false;
@@ -2428,15 +2387,15 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
- vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
+ vega10_read_arg_from_smc(hwmgr, &top32);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
- vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
+ vega10_read_arg_from_smc(hwmgr, &bottom32);
serial_number = ((uint64_t)bottom32 << 32) | top32;
- if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
+ if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
avfs_fuse_table->VFT0_b = fuse.VFT0_b;
avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
@@ -2446,7 +2405,7 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
avfs_fuse_table->VFT2_b = fuse.VFT2_b;
avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload FuseOVerride!",
@@ -2585,14 +2544,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
if (0 != boot_up_values.usVddc) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage,
(boot_up_values.usVddc * 4));
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
}
@@ -2618,7 +2577,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
vega10_populate_and_upload_avfs_fuse_override(hwmgr);
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)pp_table, PPTABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -2641,7 +2600,7 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
pr_info("THERMAL Feature Already enabled!");
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"Enable THERMAL Feature Failed!",
@@ -2661,7 +2620,7 @@ static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
pr_info("THERMAL Feature Already disabled!");
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"disable THERMAL Feature Failed!",
@@ -2677,11 +2636,10 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot)) {
+ if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
if (data->smu_features[GNLD_VR0HOT].supported) {
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
@@ -2690,7 +2648,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
} else {
if (data->smu_features[GNLD_VR1HOT].supported) {
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
@@ -2708,7 +2666,7 @@ static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.ulv_support) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"Enable ULV Feature Failed!",
return -1);
@@ -2724,7 +2682,7 @@ static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.ulv_support) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"disable ULV Feature Failed!",
return -EINVAL);
@@ -2740,7 +2698,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -EINVAL);
@@ -2748,7 +2706,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to Enable DS_SOCCLK Feature Failed!",
return -EINVAL);
@@ -2756,7 +2714,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to Enable DS_LCLK Feature Failed!",
return -EINVAL);
@@ -2764,7 +2722,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to Enable DS_DCEFCLK Feature Failed!",
return -EINVAL);
@@ -2780,7 +2738,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to disable DS_GFXCLK Feature Failed!",
return -EINVAL);
@@ -2788,7 +2746,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to disable DS_ Feature Failed!",
return -EINVAL);
@@ -2796,7 +2754,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to disable DS_LCLK Feature Failed!",
return -EINVAL);
@@ -2804,7 +2762,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to disable DS_DCEFCLK Feature Failed!",
return -EINVAL);
@@ -2822,7 +2780,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to disable LED DPM feature failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = false;
@@ -2840,7 +2798,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
+ vega10_enable_smc_features(hwmgr, false, feature_mask);
return 0;
}
@@ -2870,7 +2828,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- if (vega10_enable_smc_features(hwmgr->smumgr,
+ if (vega10_enable_smc_features(hwmgr,
true, feature_mask)) {
for (i = 0; i < GNLD_DPM_MAX; i++) {
if (data->smu_features[i].smu_feature_bitmap &
@@ -2880,22 +2838,21 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to Enable LED DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = true;
}
if (data->vbios_boot_state.bsoc_vddc_lock) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage, 0);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
+ if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
if (data->smu_features[GNLD_ACDC].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -1);
@@ -2912,13 +2869,13 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
int tmp_result, result = 0;
- tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to configure telemetry!",
return tmp_result);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, 0);
tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
@@ -2926,6 +2883,15 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"DPM is already running right , skipping re-enablement!",
return 0);
+ if ((data->smu_version == 0x001c2c00) ||
+ (data->smu_version == 0x001c2d00)) {
+ tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
+ PP_ASSERT_WITH_CODE(!tmp_result,
+ "Failed to set package power PID!",
+ return tmp_result);
+ }
+
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to contruct voltage tables!",
@@ -2936,8 +2902,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to initialize SMC table!",
result = tmp_result);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController)) {
+ if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
tmp_result = vega10_enable_thermal_protection(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable thermal protection!",
@@ -3172,8 +3137,9 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState)) {
+ stable_pstate_sclk_dpm_percentage =
+ data->registry_data.stable_pstate_sclk_dpm_percentage;
PP_ASSERT_WITH_CODE(
data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
@@ -3238,10 +3204,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchForVR);
- force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ForceMclkHigh);
+ disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
+ force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
disable_mclk_switching = (info.display_count > 1) ||
disable_mclk_switching_for_frame_lock ||
@@ -3292,8 +3256,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
vega10_ps->performance_levels[1].mem_clock;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState)) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
@@ -3325,10 +3288,8 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
data->need_update_dpm_table = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
break;
@@ -3412,10 +3373,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
uint32_t dpm_count, clock_percent;
uint32_t i;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
if (!data->need_update_dpm_table &&
!data->apply_optimized_settings &&
@@ -3480,10 +3439,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
dpm_table->
gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
value = sclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
/* Need to do calculation based on the golden DPM table
* as the Heatmap GPU Clock axis is also based on
* the default values
@@ -3537,10 +3494,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
mem_table.dpm_levels[dpm_table->mem_table.count - 1].
value = mclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
PP_ASSERT_WITH_CODE(
golden_dpm_table->mem_table.dpm_levels
@@ -3732,7 +3687,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.gfx_boot_level !=
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
data->smc_state_table.gfx_boot_level),
"Failed to set soft min sclk index!",
@@ -3748,14 +3703,14 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
socclk_idx),
"Failed to set soft min uclk index!",
return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
data->smc_state_table.mem_boot_level),
"Failed to set soft min uclk index!",
@@ -3780,7 +3735,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.gfx_max_level !=
data->dpm_table.gfx_table.dpm_state.soft_max_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
data->smc_state_table.gfx_max_level),
"Failed to set soft max sclk index!",
@@ -3794,7 +3749,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.mem_max_level !=
data->dpm_table.mem_table.dpm_state.soft_max_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMaxUclkByIndex,
data->smc_state_table.mem_max_level),
"Failed to set soft max mclk index!",
@@ -3853,7 +3808,7 @@ int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_VCE].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
"Attempt to Enable/Disable DPM VCE Failed!",
@@ -3871,9 +3826,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
+ if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
+ (hwmgr->gfx_arbiter.sclk_threshold !=
data->low_sclk_interrupt_threshold)) {
data->low_sclk_interrupt_threshold =
hwmgr->gfx_arbiter.sclk_threshold;
@@ -3884,7 +3838,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
cpu_to_le32(low_sclk_interrupt_threshold);
/* This message will also enable SmcToHost Interrupt */
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
(uint32_t)low_sclk_interrupt_threshold);
}
@@ -3920,7 +3874,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
"Failed to update SCLK threshold!",
result = tmp_result);
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)pp_table, PPTABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -3931,7 +3885,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
return 0;
}
-static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
@@ -3953,7 +3907,7 @@ static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
[vega10_ps->performance_level_count - 1].gfx_clock;
}
-static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
@@ -3980,12 +3934,12 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
{
uint32_t value;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrPkgPwr),
"Failed to get current package power!",
return -EINVAL);
- vega10_read_arg_from_smc(hwmgr->smumgr, &value);
+ vega10_read_arg_from_smc(hwmgr, &value);
/* power value is an integer */
query->average_gpu_power = value << 8;
@@ -4002,25 +3956,25 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
+ vega10_read_arg_from_smc(hwmgr, &sclk_idx);
*((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
*size = 4;
}
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
+ vega10_read_arg_from_smc(hwmgr, &mclk_idx);
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
+ vega10_read_arg_from_smc(hwmgr, &activity_percent);
*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
*size = 4;
}
@@ -4055,7 +4009,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
bool has_disp)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 0 : 1);
}
@@ -4090,7 +4044,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (!result) {
clk_request = (clk_freq << 16) | clk_select;
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
clk_request);
}
@@ -4160,7 +4114,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
+ hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcefClockInSR /100),
"Attempt to set divider for DCEFCLK Failed!",);
} else {
@@ -4172,7 +4126,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (min_clocks.memoryClock != 0) {
idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
}
@@ -4275,28 +4229,23 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
return 0;
}
-static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- int result = 0;
-
switch (mode) {
case AMD_FAN_CTRL_NONE:
- result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
- result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
- if (!result)
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega10_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
- return result;
}
static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
@@ -4306,51 +4255,16 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = vega10_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = vega10_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -4359,27 +4273,25 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
- hwmgr->dpm_level = level;
vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
-
- return 0;
+ if (!ret) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+ }
+ return ret;
}
-static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -4624,7 +4536,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
int i;
- if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
AMD_DPM_FORCED_LEVEL_LOW |
AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
@@ -4697,11 +4609,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.sclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentGfxclkIndex),
"Attempt to get current sclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read sclk index Failed!",
return -1);
@@ -4715,11 +4627,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentUclkIndex),
"Attempt to get current mclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read mclk index Failed!",
return -1);
@@ -4730,11 +4642,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_PCIE:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentLinkIndex),
"Attempt to get current mclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read mclk index Failed!",
return -1);
@@ -4762,7 +4674,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)wm_table, WMTABLE);
PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
@@ -4771,7 +4683,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if (data->water_marks_bitmap & WaterMarksLoaded) {
cgs_get_active_displays_info(hwmgr->device, &info);
num_turned_on_displays = info.display_count;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
}
@@ -4784,7 +4696,7 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_UVD].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
"Attempt to Enable/Disable DPM UVD Failed!",
@@ -4794,20 +4706,20 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
return 0;
}
-static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
data->vce_power_gated = bgate;
- return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
+ vega10_enable_disable_vce_dpm(hwmgr, !bgate);
}
-static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = bgate;
- return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
+ vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
}
static inline bool vega10_are_power_levels_equal(
@@ -4866,7 +4778,7 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
if (data->display_timing.num_existing_displays != info.display_count)
is_update_required = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
is_update_required = true;
}
@@ -4883,8 +4795,7 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
"DPM is not running right now, no need to disable DPM!",
return 0);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
+ if (PP_CAP(PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
tmp_result = vega10_disable_power_containment(hwmgr);
@@ -4972,7 +4883,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (!data->registry_data.sclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
sclk_idx),
"Failed to set soft min sclk index!",
@@ -4983,7 +4894,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (!data->registry_data.mclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
mclk_idx),
"Failed to set soft min mclk index!",
@@ -5096,6 +5007,65 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
return 0;
}
+static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSystemVirtualDramAddrHigh,
+ virtual_addr_hi);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSystemVirtualDramAddrLow,
+ virtual_addr_low);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramAddrHigh,
+ mc_addr_hi);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramAddrLow,
+ mc_addr_low);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DramLogSetDramSize,
+ size);
+ return 0;
+}
+
+static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
+ const void *info)
+{
+ struct cgs_irq_src_funcs *irq_src =
+ (struct cgs_irq_src_funcs *)info;
+
+ if (hwmgr->thermal_controller.ucType ==
+ ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 ||
+ hwmgr->thermal_controller.ucType ==
+ ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
+ "Failed to register high thermal interrupt!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
+ "Failed to register low thermal interrupt!",
+ return -EINVAL);
+ }
+
+ /* Register CTF(GPIO_19) interrupt */
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
+ 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
+ "Failed to register CTF thermal interrupt!",
+ return -EINVAL);
+
+ return 0;
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5149,12 +5119,15 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_mclk_od = vega10_get_mclk_od,
.set_mclk_od = vega10_set_mclk_od,
.avfs_control = vega10_avfs_enable,
+ .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
+ .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
+ .start_thermal_controller = vega10_start_thermal_controller,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
hwmgr->pptable_func = &vega10_pptable_funcs;
- pp_vega10_thermal_initialize(hwmgr);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 676cd7735883..b4b461c3b8ee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -31,7 +31,6 @@
#include "vega10_ppsmc.h"
#include "vega10_powertune.h"
-extern const uint32_t PhwVega10_Magic;
#define VEGA10_MAX_HARDWARE_POWERLEVELS 2
#define WaterMarksExist 1
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index e7fa67063cdc..598a194737a9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -854,99 +854,79 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
uint32_t en = (enable ? 1 : 0);
uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~SQ_Enable_MASK;
didt_block_info |= en << SQ_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~DB_Enable_MASK;
didt_block_info |= en << DB_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~TD_Enable_MASK;
didt_block_info |= en << TD_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~TCP_Enable_MASK;
didt_block_info |= en << TCP_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0);
- data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
- data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
- data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
- data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
- data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
- data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
}
}
if (enable) {
/* For Vega10, SMC does not support any mask yet. */
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
}
}
@@ -1040,10 +1020,10 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC))
+ if (PP_CAP(PHM_PlatformCaps_GCEDC))
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
return 0;
@@ -1059,12 +1039,12 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
return 0;
@@ -1159,12 +1139,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
return 0;
@@ -1180,12 +1160,12 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
return 0;
@@ -1263,8 +1243,8 @@ int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
- "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
+ result = vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
data->smu_features[GNLD_DIDT].enabled = true;
}
}
@@ -1310,8 +1290,8 @@ int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
- "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
+ result = vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
data->smu_features[GNLD_DIDT].enabled = false;
}
}
@@ -1364,7 +1344,7 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.enable_pkg_pwr_tracking_feature)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetPptLimit, n);
return 0;
@@ -1381,16 +1361,15 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
(uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
"Attempt to enable PPT feature Failed!",
data->smu_features[GNLD_PPT].supported = false);
if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
"Attempt to enable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
@@ -1409,16 +1388,15 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_PPT].supported = false);
if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
@@ -1430,7 +1408,7 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
}
@@ -1438,8 +1416,7 @@ int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
int adjust_percent, result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
adjust_percent =
hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index e343df190375..f14c7611fad3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -291,8 +291,7 @@ static int get_mm_clock_voltage_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) *
mm_dependency_table->ucNumEntries;
- mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mm_table = kzalloc(table_size, GFP_KERNEL);
if (!mm_table)
return -ENOMEM;
@@ -519,8 +518,7 @@ static int get_socclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
clk_dep_table->ucNumEntries;
- clk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -554,8 +552,7 @@ static int get_mclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
mclk_dep_table->ucNumEntries;
- mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mclk_table = kzalloc(table_size, GFP_KERNEL);
if (!mclk_table)
return -ENOMEM;
@@ -596,8 +593,7 @@ static int get_gfxclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
clk_dep_table->ucNumEntries;
- clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -663,8 +659,7 @@ static int get_pix_clk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
clk_dep_table->ucNumEntries;
- clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -728,8 +723,7 @@ static int get_dcefclk_voltage_dependency_table(
sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
num_entries;
- clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ clk_table = kzalloc(table_size, GFP_KERNEL);
if (!clk_table)
return -ENOMEM;
@@ -772,8 +766,7 @@ static int get_pcie_table(struct pp_hwmgr *hwmgr,
sizeof(struct phm_ppt_v1_pcie_record) *
atom_pcie_table->ucNumEntries;
- pcie_table = (struct phm_ppt_v1_pcie_table *)
- kzalloc(table_size, GFP_KERNEL);
+ pcie_table = kzalloc(table_size, GFP_KERNEL);
if (!pcie_table)
return -ENOMEM;
@@ -1026,10 +1019,9 @@ static int get_vddc_lookup_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
- table = (phm_ppt_v1_voltage_lookup_table *)
- kzalloc(table_size, GFP_KERNEL);
+ table = kzalloc(table_size, GFP_KERNEL);
- if (NULL == table)
+ if (table == NULL)
return -ENOMEM;
table->count = vddc_lookup_pp_tables->ucNumEntries;
@@ -1138,12 +1130,12 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v2_information), GFP_KERNEL);
- PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
+ PP_ASSERT_WITH_CODE((hwmgr->pptable != NULL),
"Failed to allocate hwmgr->pptable!", return -ENOMEM);
powerplay_table = get_powerplay_table(hwmgr);
- PP_ASSERT_WITH_CODE((NULL != powerplay_table),
+ PP_ASSERT_WITH_CODE((powerplay_table != NULL),
"Missing PowerPlay Table!", return -1);
result = check_powerplay_tables(hwmgr, powerplay_table);
@@ -1182,7 +1174,6 @@ int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr)
static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
{
- int result = 0;
struct phm_ppt_v2_information *pp_table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -1225,7 +1216,7 @@ static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
kfree(hwmgr->pptable);
hwmgr->pptable = NULL;
- return result;
+ return 0;
}
const struct pp_table_func vega10_pptable_funcs = {
@@ -1238,7 +1229,7 @@ int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
const ATOM_Vega10_State_Array *state_arrays;
const ATOM_Vega10_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr);
- PP_ASSERT_WITH_CODE((NULL != pp_table),
+ PP_ASSERT_WITH_CODE((pp_table != NULL),
"Missing PowerPlay Table!", return -1);
PP_ASSERT_WITH_CODE((pp_table->sHeader.format_revision >=
ATOM_Vega10_TABLE_REVISION_VEGA10),
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index d44243441d28..dc3761bcb9b6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -31,11 +31,11 @@
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm),
"Attempt to get current RPM from SMC Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
current_rpm),
"Attempt to read current RPM from SMC Failed!",
return -1);
@@ -54,8 +54,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
@@ -105,14 +104,15 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return -1;
- if (data->smu_features[GNLD_FAN_CONTROL].supported)
+ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
result = vega10_get_current_rpm(hwmgr, speed);
- else {
+ } else {
uint32_t reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
- tach_period = (cgs_read_register(hwmgr->device,
- reg) & CG_TACH_STATUS__TACH_PERIOD_MASK) >>
- CG_TACH_STATUS__TACH_PERIOD__SHIFT;
+ tach_period =
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_STATUS,
+ TACH_PERIOD);
if (tach_period == 0)
return -EINVAL;
@@ -141,23 +141,20 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
- (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >>
- CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- hwmgr->tmin = (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL2__TMIN_MASK) >>
- CG_FDO_CTRL2__TMIN__SHIFT;
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE);
+ hwmgr->tmin =
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TMIN_MASK) |
- (0 << CG_FDO_CTRL2__TMIN__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN, 0));
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) |
- (mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
return 0;
}
@@ -176,14 +173,13 @@ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
if (!hwmgr->fan_ctrl_is_in_default_mode) {
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) |
- (hwmgr->fan_ctrl_default_mode <<
- CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE,
+ hwmgr->fan_ctrl_default_mode));
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TMIN_MASK) |
- (hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN,
+ hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
hwmgr->fan_ctrl_is_in_default_mode = true;
}
@@ -203,7 +199,7 @@ static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
- hwmgr->smumgr, true,
+ hwmgr, true,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
@@ -220,7 +216,7 @@ static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
- hwmgr->smumgr, false,
+ hwmgr, false,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
@@ -279,16 +275,14 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (speed > 100)
speed = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1);
- duty100 = (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL1__FMAX_DUTY100_MASK) >>
- CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0)
return -EINVAL;
@@ -300,9 +294,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK) |
- (duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
@@ -314,18 +307,13 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
*/
int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
- int result;
-
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- } else
- result = vega10_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ return vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ else
+ return vega10_fan_ctrl_set_default_mode(hwmgr);
}
/**
@@ -342,12 +330,11 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
uint32_t reg;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
- (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+ (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return -1;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
if (!result) {
@@ -356,9 +343,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_TACH_STATUS__TACH_PERIOD_MASK) |
- (tach_period << CG_TACH_STATUS__TACH_PERIOD__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_STATUS, TACH_PERIOD,
+ tach_period));
}
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
}
@@ -374,12 +361,12 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
uint32_t reg;
reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_TACH_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
+ mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
temp = cgs_read_register(hwmgr->device, reg);
- temp = (temp & CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK) >>
- CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP__SHIFT;
+ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
temp = temp & 0x1ff;
@@ -418,20 +405,10 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = cgs_read_register(hwmgr->device, reg);
- val &= (~THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK);
- val |= (5 << THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK);
- val |= (1 << THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK);
- val |= ((high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
- << THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
- val |= ((low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
- << THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
-
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
cgs_write_register(hwmgr->device, reg, val);
@@ -452,19 +429,16 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_TACH_CTRL__EDGE_PER_REV_MASK) |
- ((hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution - 1) <<
- CG_TACH_CTRL__EDGE_PER_REV__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_CTRL, EDGE_PER_REV,
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1));
}
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK) |
- (0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28));
return 0;
}
@@ -484,7 +458,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FW_CTF].enabled)
printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n");
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to Enable FW CTF feature Failed!",
@@ -516,7 +490,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
printk("[Thermal_EnableAlert] FW CTF Already disabled!\n");
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to disable FW CTF feature Failed!",
@@ -554,8 +528,7 @@ int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
int ret;
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -573,7 +546,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
table->FanTargetTemperature = hwmgr->thermal_controller.
advanceFanControlParameters.usTMax;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
(uint32_t)table->FanTargetTemperature);
@@ -602,7 +575,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
table->FanStartTemp = hwmgr->thermal_controller.
advanceFanControlParameters.usZeroRPMStartTemperature;
- ret = vega10_copy_table_to_smc(hwmgr->smumgr,
+ ret = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE);
if (ret)
pr_info("Failed to update Fan Control Table in PPTable!");
@@ -619,123 +592,50 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- }
return 0;
}
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+
+int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+ int ret = 0;
if (range == NULL)
return -EINVAL;
- return vega10_thermal_set_temperature_range(hwmgr, range);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_disable_alert(hwmgr);
-}
+ vega10_thermal_initialize(hwmgr);
+ ret = vega10_thermal_set_temperature_range(hwmgr, range);
+ if (ret)
+ return -EINVAL;
-static struct phm_master_table_item
-vega10_thermal_start_thermal_controller_master_list[] = {
- { .tableFunction = tf_vega10_thermal_initialize },
- { .tableFunction = tf_vega10_thermal_set_temperature_range },
- { .tableFunction = tf_vega10_thermal_enable_alert },
+ vega10_thermal_enable_alert(hwmgr);
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- { .tableFunction = tf_vega10_thermal_setup_fan_table },
- { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
- { }
-};
+ ret = vega10_thermal_setup_fan_table(hwmgr);
+ if (ret)
+ return -EINVAL;
-static struct phm_master_table_header
-vega10_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- vega10_thermal_start_thermal_controller_master_list
-};
+ vega10_thermal_start_smc_fan_control(hwmgr);
-static struct phm_master_table_item
-vega10_thermal_set_temperature_range_master_list[] = {
- { .tableFunction = tf_vega10_thermal_disable_alert },
- { .tableFunction = tf_vega10_thermal_set_temperature_range },
- { .tableFunction = tf_vega10_thermal_enable_alert },
- { }
+ return 0;
};
-struct phm_master_table_header
-vega10_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- vega10_thermal_set_temperature_range_master_list
-};
+
+
int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
@@ -745,32 +645,3 @@ int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
}
return 0;
}
-
-/**
-* Initializes the thermal controller related functions
-* in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &vega10_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &vega10_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr,
- &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
index 776f3a2effc0..82f10bdd5f07 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
@@ -50,13 +50,6 @@ struct vega10_temperature {
#define FDO_PWM_MODE_STATIC_RPM 5
-extern int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-
extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
@@ -69,7 +62,6 @@ extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr,
extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed);
extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_ctrl_uninitialize_thermal_controller(
struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
@@ -77,9 +69,11 @@ extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
-int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range);
+extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 07e9c0b5915d..95932cc88460 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -31,9 +31,7 @@
#include "dm_pp_interface.h"
extern const struct amd_ip_funcs pp_ip_funcs;
-extern const struct amd_powerplay_funcs pp_dpm_funcs;
-
-#define PP_DPM_DISABLED 0xCCCC
+extern const struct amd_pm_funcs pp_dpm_funcs;
enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GFX_SCLK = 0,
@@ -50,94 +48,12 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GPU_POWER,
};
-enum amd_pp_event {
- AMD_PP_EVENT_INITIALIZE = 0,
- AMD_PP_EVENT_UNINITIALIZE,
- AMD_PP_EVENT_POWER_SOURCE_CHANGE,
- AMD_PP_EVENT_SUSPEND,
- AMD_PP_EVENT_RESUME,
- AMD_PP_EVENT_ENTER_REST_STATE,
- AMD_PP_EVENT_EXIT_REST_STATE,
- AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE,
- AMD_PP_EVENT_THERMAL_NOTIFICATION,
- AMD_PP_EVENT_VBIOS_NOTIFICATION,
- AMD_PP_EVENT_ENTER_THERMAL_STATE,
- AMD_PP_EVENT_EXIT_THERMAL_STATE,
- AMD_PP_EVENT_ENTER_FORCED_STATE,
- AMD_PP_EVENT_EXIT_FORCED_STATE,
- AMD_PP_EVENT_ENTER_EXCLUSIVE_MODE,
- AMD_PP_EVENT_EXIT_EXCLUSIVE_MODE,
- AMD_PP_EVENT_ENTER_SCREEN_SAVER,
- AMD_PP_EVENT_EXIT_SCREEN_SAVER,
- AMD_PP_EVENT_VPU_RECOVERY_BEGIN,
- AMD_PP_EVENT_VPU_RECOVERY_END,
- AMD_PP_EVENT_ENABLE_POWER_PLAY,
- AMD_PP_EVENT_DISABLE_POWER_PLAY,
- AMD_PP_EVENT_CHANGE_POWER_SOURCE_UI_LABEL,
- AMD_PP_EVENT_ENABLE_USER2D_PERFORMANCE,
- AMD_PP_EVENT_DISABLE_USER2D_PERFORMANCE,
- AMD_PP_EVENT_ENABLE_USER3D_PERFORMANCE,
- AMD_PP_EVENT_DISABLE_USER3D_PERFORMANCE,
- AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST,
- AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST,
- AMD_PP_EVENT_ENABLE_REDUCED_REFRESH_RATE,
- AMD_PP_EVENT_DISABLE_REDUCED_REFRESH_RATE,
- AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING,
- AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING,
- AMD_PP_EVENT_ENABLE_CGPG,
- AMD_PP_EVENT_DISABLE_CGPG,
- AMD_PP_EVENT_ENTER_TEXT_MODE,
- AMD_PP_EVENT_EXIT_TEXT_MODE,
- AMD_PP_EVENT_VIDEO_START,
- AMD_PP_EVENT_VIDEO_STOP,
- AMD_PP_EVENT_ENABLE_USER_STATE,
- AMD_PP_EVENT_DISABLE_USER_STATE,
- AMD_PP_EVENT_READJUST_POWER_STATE,
- AMD_PP_EVENT_START_INACTIVITY,
- AMD_PP_EVENT_STOP_INACTIVITY,
- AMD_PP_EVENT_LINKED_ADAPTERS_READY,
- AMD_PP_EVENT_ADAPTER_SAFE_TO_DISABLE,
- AMD_PP_EVENT_COMPLETE_INIT,
- AMD_PP_EVENT_CRITICAL_THERMAL_FAULT,
- AMD_PP_EVENT_BACKLIGHT_CHANGED,
- AMD_PP_EVENT_ENABLE_VARI_BRIGHT,
- AMD_PP_EVENT_DISABLE_VARI_BRIGHT,
- AMD_PP_EVENT_ENABLE_VARI_BRIGHT_ON_POWER_XPRESS,
- AMD_PP_EVENT_DISABLE_VARI_BRIGHT_ON_POWER_XPRESS,
- AMD_PP_EVENT_SET_VARI_BRIGHT_LEVEL,
- AMD_PP_EVENT_VARI_BRIGHT_MONITOR_MEASUREMENT,
- AMD_PP_EVENT_SCREEN_ON,
- AMD_PP_EVENT_SCREEN_OFF,
- AMD_PP_EVENT_PRE_DISPLAY_CONFIG_CHANGE,
- AMD_PP_EVENT_ENTER_ULP_STATE,
- AMD_PP_EVENT_EXIT_ULP_STATE,
- AMD_PP_EVENT_REGISTER_IP_STATE,
- AMD_PP_EVENT_UNREGISTER_IP_STATE,
- AMD_PP_EVENT_ENTER_MGPU_MODE,
- AMD_PP_EVENT_EXIT_MGPU_MODE,
- AMD_PP_EVENT_ENTER_MULTI_GPU_MODE,
- AMD_PP_EVENT_PRE_SUSPEND,
- AMD_PP_EVENT_PRE_RESUME,
- AMD_PP_EVENT_ENTER_BACOS,
- AMD_PP_EVENT_EXIT_BACOS,
- AMD_PP_EVENT_RESUME_BACO,
- AMD_PP_EVENT_RESET_BACO,
- AMD_PP_EVENT_PRE_DISPLAY_PHY_ACCESS,
- AMD_PP_EVENT_POST_DISPLAY_PHY_CCESS,
- AMD_PP_EVENT_START_COMPUTE_APPLICATION,
- AMD_PP_EVENT_STOP_COMPUTE_APPLICATION,
- AMD_PP_EVENT_REDUCE_POWER_LIMIT,
- AMD_PP_EVENT_ENTER_FRAME_LOCK,
- AMD_PP_EVENT_EXIT_FRAME_LOOCK,
- AMD_PP_EVENT_LONG_IDLE_REQUEST_BACO,
- AMD_PP_EVENT_LONG_IDLE_ENTER_BACO,
- AMD_PP_EVENT_LONG_IDLE_EXIT_BACO,
- AMD_PP_EVENT_HIBERNATE,
- AMD_PP_EVENT_CONNECTED_STANDBY,
- AMD_PP_EVENT_ENTER_SELF_REFRESH,
- AMD_PP_EVENT_EXIT_SELF_REFRESH,
- AMD_PP_EVENT_START_AVFS_BTC,
- AMD_PP_EVENT_MAX
+enum amd_pp_task {
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ AMD_PP_TASK_ENABLE_USER_STATE,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ AMD_PP_TASK_COMPLETE_INIT,
+ AMD_PP_TASK_MAX
};
struct amd_pp_init {
@@ -295,12 +211,6 @@ enum {
PP_GROUP_MAX
};
-enum pp_clock_type {
- PP_SCLK,
- PP_MCLK,
- PP_PCIE,
-};
-
struct pp_states_info {
uint32_t nums;
uint32_t states[16];
@@ -355,56 +265,13 @@ struct pp_display_clock_request {
support << PP_STATE_SUPPORT_SHIFT |\
state << PP_STATE_SHIFT)
-struct amd_powerplay_funcs {
- int (*get_temperature)(void *handle);
- int (*load_firmware)(void *handle);
- int (*wait_for_fw_loading_complete)(void *handle);
- int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
- enum amd_dpm_forced_level (*get_performance_level)(void *handle);
- enum amd_pm_state_type (*get_current_power_state)(void *handle);
- int (*get_sclk)(void *handle, bool low);
- int (*get_mclk)(void *handle, bool low);
- int (*powergate_vce)(void *handle, bool gate);
- int (*powergate_uvd)(void *handle, bool gate);
- int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
- void *input, void *output);
- int (*set_fan_control_mode)(void *handle, uint32_t mode);
- int (*get_fan_control_mode)(void *handle);
- int (*set_fan_speed_percent)(void *handle, uint32_t percent);
- int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
- int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
- int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
- int (*get_pp_table)(void *handle, char **table);
- int (*set_pp_table)(void *handle, const char *buf, size_t size);
- int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(void *handle);
- int (*set_sclk_od)(void *handle, uint32_t value);
- int (*get_mclk_od)(void *handle);
- int (*set_mclk_od)(void *handle, uint32_t value);
- int (*read_sensor)(void *handle, int idx, void *value, int *size);
- struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
- int (*reset_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(void *handle,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(void *handle,
- enum amd_pp_profile_type type);
-};
-
struct amd_powerplay {
+ struct cgs_device *cgs_device;
void *pp_handle;
const struct amd_ip_funcs *ip_funcs;
- const struct amd_powerplay_funcs *pp_funcs;
+ const struct amd_pm_funcs *pp_funcs;
};
-int amd_powerplay_create(struct amd_pp_init *pp_init,
- void **handle);
-
-int amd_powerplay_destroy(void *handle);
-
int amd_powerplay_reset(void *handle);
int amd_powerplay_display_configuration_change(void *handle,
@@ -437,6 +304,5 @@ int amd_powerplay_display_clock_voltage_request(void *handle,
int amd_powerplay_get_display_mode_validation_clocks(void *handle,
struct amd_pp_simple_clock_info *output);
-int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id);
#endif /* _AMD_POWERPLAY_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
deleted file mode 100644
index b9d84de8a44d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_MANAGER_H_
-#define _EVENT_MANAGER_H_
-
-#include "power_state.h"
-#include "pp_power_source.h"
-#include "hardwaremanager.h"
-#include "pp_asicblocks.h"
-
-struct pp_eventmgr;
-enum amd_pp_event;
-
-enum PEM_EventDataValid {
- PEM_EventDataValid_RequestedStateID = 0,
- PEM_EventDataValid_RequestedUILabel,
- PEM_EventDataValid_NewPowerState,
- PEM_EventDataValid_RequestedPowerSource,
- PEM_EventDataValid_RequestedClocks,
- PEM_EventDataValid_CurrentTemperature,
- PEM_EventDataValid_AsicBlocks,
- PEM_EventDataValid_ODParameters,
- PEM_EventDataValid_PXAdapterPrefs,
- PEM_EventDataValid_PXUserPrefs,
- PEM_EventDataValid_PXSwitchReason,
- PEM_EventDataValid_PXSwitchPhase,
- PEM_EventDataValid_HdVideo,
- PEM_EventDataValid_BacklightLevel,
- PEM_EventDatavalid_VariBrightParams,
- PEM_EventDataValid_VariBrightLevel,
- PEM_EventDataValid_VariBrightImmediateChange,
- PEM_EventDataValid_PercentWhite,
- PEM_EventDataValid_SdVideo,
- PEM_EventDataValid_HTLinkChangeReason,
- PEM_EventDataValid_HWBlocks,
- PEM_EventDataValid_RequestedThermalState,
- PEM_EventDataValid_MvcVideo,
- PEM_EventDataValid_Max
-};
-
-typedef enum PEM_EventDataValid PEM_EventDataValid;
-
-/* Number of bits in ULONG variable */
-#define PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD (sizeof(unsigned long)*8)
-
-/* Number of ULONG entries used by event data valid bits */
-#define PEM_MAX_NUM_EVENTDATAVALID_ULONG_ENTRIES \
- ((PEM_EventDataValid_Max + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD - 1) / \
- PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)
-
-static inline void pem_set_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
-{
- fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] |=
- (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-static inline void pem_unset_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
-{
- fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &=
- ~(1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-static inline unsigned long pem_is_event_data_valid(const unsigned long *fields, PEM_EventDataValid valid_field)
-{
- return fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &
- (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-struct pem_event_data {
- unsigned long valid_fields[100];
- unsigned long requested_state_id;
- enum PP_StateUILabel requested_ui_label;
- struct pp_power_state *pnew_power_state;
- enum pp_power_source requested_power_source;
- struct PP_Clocks requested_clocks;
- bool skip_state_adjust_rules;
- struct phm_asic_blocks asic_blocks;
- /* to doPP_ThermalState requestedThermalState;
- enum ThermalStateRequestSrc requestThermalStateSrc;
- PP_Temperature currentTemperature;*/
-
-};
-
-int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event,
- struct pem_event_data *event_data);
-
-bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr);
-
-#endif /* _EVENT_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
deleted file mode 100644
index 7bd8a7e57080..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENTMGR_H_
-#define _EVENTMGR_H_
-
-#include <linux/mutex.h>
-#include "pp_instance.h"
-#include "hardwaremanager.h"
-#include "eventmanager.h"
-#include "pp_feature.h"
-#include "pp_power_source.h"
-#include "power_state.h"
-
-typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr,
- struct pem_event_data *event_data);
-
-struct action_chain {
- const char *description; /* action chain description for debugging purpose */
- const pem_event_action * const *action_chain; /* pointer to chain of event actions */
-};
-
-struct pem_power_source_ui_state_info {
- enum PP_StateUILabel current_ui_label;
- enum PP_StateUILabel default_ui_lable;
- unsigned long configurable_ui_mapping;
-};
-
-struct pp_clock_range {
- uint32_t min_sclk_khz;
- uint32_t max_sclk_khz;
-
- uint32_t min_mclk_khz;
- uint32_t max_mclk_khz;
-
- uint32_t min_vclk_khz;
- uint32_t max_vclk_khz;
-
- uint32_t min_dclk_khz;
- uint32_t max_dclk_khz;
-
- uint32_t min_aclk_khz;
- uint32_t max_aclk_khz;
-
- uint32_t min_eclk_khz;
- uint32_t max_eclk_khz;
-};
-
-enum pp_state {
- UNINITIALIZED,
- INACTIVE,
- ACTIVE
-};
-
-enum pp_ring_index {
- PP_RING_TYPE_GFX_INDEX = 0,
- PP_RING_TYPE_DMA_INDEX,
- PP_RING_TYPE_DMA1_INDEX,
- PP_RING_TYPE_UVD_INDEX,
- PP_RING_TYPE_VCE0_INDEX,
- PP_RING_TYPE_VCE1_INDEX,
- PP_RING_TYPE_CP1_INDEX,
- PP_RING_TYPE_CP2_INDEX,
- PP_NUM_RINGS,
-};
-
-struct pp_request {
- uint32_t flags;
- uint32_t sclk;
- uint32_t sclk_throttle;
- uint32_t mclk;
- uint32_t vclk;
- uint32_t dclk;
- uint32_t eclk;
- uint32_t aclk;
- uint32_t iclk;
- uint32_t vp8clk;
- uint32_t rsv[32];
-};
-
-struct pp_eventmgr {
- struct pp_hwmgr *hwmgr;
- struct pp_smumgr *smumgr;
-
- struct pp_feature_info features[PP_Feature_Max];
- const struct action_chain *event_chain[AMD_PP_EVENT_MAX];
- struct phm_platform_descriptor *platform_descriptor;
- struct pp_clock_range clock_range;
- enum pp_power_source current_power_source;
- struct pem_power_source_ui_state_info ui_state_info[PP_PowerSource_Max];
- enum pp_state states[PP_NUM_RINGS];
- struct pp_request hi_req;
- struct list_head context_list;
- struct mutex lock;
- bool block_adjust_power_state;
- bool enable_cg;
- bool enable_gfx_cgpg;
- int (*pp_eventmgr_init)(struct pp_eventmgr *eventmgr);
- void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr);
-};
-
-int eventmgr_early_init(struct pp_instance *handle);
-
-#endif /* _EVENTMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
deleted file mode 100644
index 8a31665321a8..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
+++ /dev/null
@@ -1,10299 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _FIJI_PWRVIRUS_H_
-#define _FIJI_PWRVIRUS_H_
-
-#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
-#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
-#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
-#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d
-
-enum PWR_Command
-{
- PwrCmdNull = 0,
- PwrCmdWrite,
- PwrCmdEnd,
- PwrCmdMax
-};
-typedef enum PWR_Command PWR_Command;
-
-struct PWR_Command_Table
-{
- PWR_Command command;
- ULONG data;
- ULONG reg;
-};
-typedef struct PWR_Command_Table PWR_Command_Table;
-
-#define PWR_VIRUS_TABLE_SIZE 10243
-static const PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] =
-{
- { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX },
- { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
- { PwrCmdWrite, 0x100100b6, mmPCIE_INDEX },
- { PwrCmdWrite, 0x0300078c, mmPCIE_DATA },
- { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL },
- { PwrCmdWrite, 0x00000001, mmBIF_CLK_CTRL },
- { PwrCmdWrite, 0x00000000, mmBIF_CLK_CTRL },
- { PwrCmdWrite, 0x00000003, mmBIF_FB_EN },
- { PwrCmdWrite, 0x00000000, mmBIF_FB_EN },
- { PwrCmdWrite, 0x00000001, mmBIF_DOORBELL_APER_EN },
- { PwrCmdWrite, 0x00000000, mmBIF_DOORBELL_APER_EN },
- { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
- { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
- { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
- { PwrCmdWrite, 0x22000000, mmPCIE_DATA },
- { PwrCmdWrite, 0x014000c0, mmPCIE_INDEX },
- { PwrCmdWrite, 0x00000000, mmPCIE_DATA },
- /*
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x00000000, mmMC_CITF_CNTL },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET },*/
- { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_LO },
- { PwrCmdWrite, 0x00000000, mmRLC_CSIB_ADDR_HI },
- { PwrCmdWrite, 0x00000000, mmRLC_CSIB_LENGTH },
- /*
- { PwrCmdWrite, 0x00000000, mmMC_VM_MX_L1_TLB_CNTL },
- { PwrCmdWrite, 0x00000001, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR },
- { PwrCmdWrite, 0x00000000, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR },
- { PwrCmdWrite, 0x00000000, mmMC_VM_FB_LOCATION },
- { PwrCmdWrite, 0x009f0090, mmMC_VM_FB_LOCATION },*/
- { PwrCmdWrite, 0x00000000, mmVM_CONTEXT0_CNTL },
- { PwrCmdWrite, 0x00000000, mmVM_CONTEXT1_CNTL },
- /*
- { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_BASE },
- { PwrCmdWrite, 0x00000002, mmMC_VM_AGP_BOT },
- { PwrCmdWrite, 0x00000000, mmMC_VM_AGP_TOP },*/
- { PwrCmdWrite, 0x04000000, mmATC_VM_APERTURE0_LOW_ADDR },
- { PwrCmdWrite, 0x0400ff20, mmATC_VM_APERTURE0_HIGH_ADDR },
- { PwrCmdWrite, 0x00000002, mmATC_VM_APERTURE0_CNTL },
- { PwrCmdWrite, 0x0000ffff, mmATC_VM_APERTURE0_CNTL2 },
- { PwrCmdWrite, 0x00000001, mmATC_VM_APERTURE1_LOW_ADDR },
- { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_HIGH_ADDR },
- { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL },
- { PwrCmdWrite, 0x00000000, mmATC_VM_APERTURE1_CNTL2 },
- //{ PwrCmdWrite, 0x00000000, mmMC_ARB_RAMCFG },
- { PwrCmdWrite, 0x12011003, mmGB_ADDR_CONFIG },
- { PwrCmdWrite, 0x00800010, mmGB_TILE_MODE0 },
- { PwrCmdWrite, 0x00800810, mmGB_TILE_MODE1 },
- { PwrCmdWrite, 0x00801010, mmGB_TILE_MODE2 },
- { PwrCmdWrite, 0x00801810, mmGB_TILE_MODE3 },
- { PwrCmdWrite, 0x00802810, mmGB_TILE_MODE4 },
- { PwrCmdWrite, 0x00802808, mmGB_TILE_MODE5 },
- { PwrCmdWrite, 0x00802814, mmGB_TILE_MODE6 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE7 },
- { PwrCmdWrite, 0x00000004, mmGB_TILE_MODE8 },
- { PwrCmdWrite, 0x02000008, mmGB_TILE_MODE9 },
- { PwrCmdWrite, 0x02000010, mmGB_TILE_MODE10 },
- { PwrCmdWrite, 0x06000014, mmGB_TILE_MODE11 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE12 },
- { PwrCmdWrite, 0x02400008, mmGB_TILE_MODE13 },
- { PwrCmdWrite, 0x02400010, mmGB_TILE_MODE14 },
- { PwrCmdWrite, 0x02400030, mmGB_TILE_MODE15 },
- { PwrCmdWrite, 0x06400014, mmGB_TILE_MODE16 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE17 },
- { PwrCmdWrite, 0x0040000c, mmGB_TILE_MODE18 },
- { PwrCmdWrite, 0x0100000c, mmGB_TILE_MODE19 },
- { PwrCmdWrite, 0x0100001c, mmGB_TILE_MODE20 },
- { PwrCmdWrite, 0x01000034, mmGB_TILE_MODE21 },
- { PwrCmdWrite, 0x01000024, mmGB_TILE_MODE22 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE23 },
- { PwrCmdWrite, 0x0040001c, mmGB_TILE_MODE24 },
- { PwrCmdWrite, 0x01000020, mmGB_TILE_MODE25 },
- { PwrCmdWrite, 0x01000038, mmGB_TILE_MODE26 },
- { PwrCmdWrite, 0x02c00008, mmGB_TILE_MODE27 },
- { PwrCmdWrite, 0x02c00010, mmGB_TILE_MODE28 },
- { PwrCmdWrite, 0x06c00014, mmGB_TILE_MODE29 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE30 },
- { PwrCmdWrite, 0x00000000, mmGB_TILE_MODE31 },
- { PwrCmdWrite, 0x000000a8, mmGB_MACROTILE_MODE0 },
- { PwrCmdWrite, 0x000000a4, mmGB_MACROTILE_MODE1 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE2 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE3 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE4 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE5 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE6 },
- { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE7 },
- { PwrCmdWrite, 0x000000ee, mmGB_MACROTILE_MODE8 },
- { PwrCmdWrite, 0x000000ea, mmGB_MACROTILE_MODE9 },
- { PwrCmdWrite, 0x000000e9, mmGB_MACROTILE_MODE10 },
- { PwrCmdWrite, 0x000000e5, mmGB_MACROTILE_MODE11 },
- { PwrCmdWrite, 0x000000e4, mmGB_MACROTILE_MODE12 },
- { PwrCmdWrite, 0x000000e0, mmGB_MACROTILE_MODE13 },
- { PwrCmdWrite, 0x00000090, mmGB_MACROTILE_MODE14 },
- { PwrCmdWrite, 0x00000000, mmGB_MACROTILE_MODE15 },
- { PwrCmdWrite, 0x00900000, mmHDP_NONSURFACE_BASE },
- { PwrCmdWrite, 0x00008000, mmHDP_NONSURFACE_INFO },
- { PwrCmdWrite, 0x3fffffff, mmHDP_NONSURFACE_SIZE },
- { PwrCmdWrite, 0x00000003, mmBIF_FB_EN },
- //{ PwrCmdWrite, 0x00000000, mmMC_VM_FB_OFFSET },
- { PwrCmdWrite, 0x00000000, mmSRBM_CNTL },
- { PwrCmdWrite, 0x00020000, mmSRBM_CNTL },
- { PwrCmdWrite, 0x80000000, mmATC_VMID0_PASID_MAPPING },
- { PwrCmdWrite, 0x00000000, mmATC_VMID_PASID_MAPPING_UPDATE_STATUS },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0xe0000000, mmGRBM_GFX_INDEX },
- { PwrCmdWrite, 0x00000000, mmCGTS_TCC_DISABLE },
- { PwrCmdWrite, 0x00000000, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x76543210, mmTCP_CHAN_STEER_LO },
- { PwrCmdWrite, 0xfedcba98, mmTCP_CHAN_STEER_HI },
- { PwrCmdWrite, 0x00000000, mmDB_DEBUG2 },
- { PwrCmdWrite, 0x00000000, mmDB_DEBUG },
- { PwrCmdWrite, 0x00002b16, mmCP_QUEUE_THRESHOLDS },
- { PwrCmdWrite, 0x00006030, mmCP_MEQ_THRESHOLDS },
- { PwrCmdWrite, 0x01000104, mmSPI_CONFIG_CNTL_1 },
- { PwrCmdWrite, 0x98184020, mmPA_SC_FIFO_SIZE },
- { PwrCmdWrite, 0x00000001, mmVGT_NUM_INSTANCES },
- { PwrCmdWrite, 0x00000000, mmCP_PERFMON_CNTL },
- { PwrCmdWrite, 0x01180000, mmSQ_CONFIG },
- { PwrCmdWrite, 0x00000000, mmVGT_CACHE_INVALIDATION },
- { PwrCmdWrite, 0x00000000, mmSQ_THREAD_TRACE_BASE },
- { PwrCmdWrite, 0x0000df80, mmSQ_THREAD_TRACE_MASK },
- { PwrCmdWrite, 0x02249249, mmSQ_THREAD_TRACE_MODE },
- { PwrCmdWrite, 0x00000000, mmPA_SC_LINE_STIPPLE_STATE },
- { PwrCmdWrite, 0x00000000, mmCB_PERFCOUNTER0_SELECT1 },
- { PwrCmdWrite, 0x06000100, mmCGTT_VGT_CLK_CTRL },
- { PwrCmdWrite, 0x00000007, mmPA_CL_ENHANCE },
- { PwrCmdWrite, 0x00000001, mmPA_SC_ENHANCE },
- { PwrCmdWrite, 0x00ffffff, mmPA_SC_FORCE_EOV_MAX_CNTS },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000010, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000020, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000030, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000040, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000050, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000060, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000070, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000080, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000090, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000a0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000b0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000c0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000d0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000e0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x000000f0, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmRLC_PG_CNTL },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS2 },
- { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x0000000e, mmSH_MEM_APE1_BASE },
- { PwrCmdWrite, 0x0000020d, mmSH_MEM_APE1_LIMIT },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000320, mmSH_MEM_CONFIG },
- { PwrCmdWrite, 0x00000000, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_RB_VMID },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000000, mmRLC_SRM_CNTL },
- { PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL },
- { PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL },
- { PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE },
- { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO },
- { PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdEnd, 0x00000000, 0x00000000 },
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a4c8b09b6f14..57a0467b7267 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -283,6 +283,8 @@ static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps
(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1)))));
}
+#define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c))
+
#define PP_PCIEGenInvalid 0xffff
enum PP_PCIEGen {
PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */
@@ -295,7 +297,7 @@ typedef enum PP_PCIEGen PP_PCIEGen;
#define PP_Min_PCIEGen PP_PCIEGen1
#define PP_Max_PCIEGen PP_PCIEGen3
#define PP_Min_PCIELane 1
-#define PP_Max_PCIELane 32
+#define PP_Max_PCIELane 16
enum phm_clock_Type {
PHM_DispClock = 1,
@@ -373,8 +375,6 @@ struct phm_odn_clock_levels {
extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
-extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
-extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 91b0105e8240..004a40e88bde 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -32,6 +32,7 @@
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
#include "power_state.h"
+#include "cgs_linux.h"
struct pp_instance;
struct pp_hwmgr;
@@ -61,10 +62,6 @@ struct vi_dpm_table {
struct vi_dpm_level dpm_level[1];
};
-enum PP_Result {
- PP_Result_TableImmediateExit = 0x13,
-};
-
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
#define PCIE_PERF_REQ_GEN1 2
@@ -103,17 +100,6 @@ enum PHM_BackEnd_Magic {
PHM_Rv_Magic = 0x20161121
};
-
-#define PHM_PCIE_POWERGATING_TARGET_GFX 0
-#define PHM_PCIE_POWERGATING_TARGET_DDI 1
-#define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2
-#define PHM_PCIE_POWERGATING_TARGET_PHY 3
-
-typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result);
-
-typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr);
-
struct phm_set_power_state_input {
const struct pp_hw_power_state *pcurrent_state;
const struct pp_hw_power_state *pnew_state;
@@ -149,30 +135,6 @@ struct phm_gfx_arbiter {
uint32_t fclk;
};
-/* Entries in the master tables */
-struct phm_master_table_item {
- phm_check_function isFunctionNeededInRuntimeTable;
- phm_table_function tableFunction;
-};
-
-enum phm_master_table_flag {
- PHM_MasterTableFlag_None = 0,
- PHM_MasterTableFlag_ExitOnError = 1,
-};
-
-/* The header of the master tables */
-struct phm_master_table_header {
- uint32_t storage_size;
- uint32_t flags;
- const struct phm_master_table_item *master_list;
-};
-
-struct phm_runtime_table_header {
- uint32_t storage_size;
- bool exit_error;
- phm_table_function *function_list;
-};
-
struct phm_clock_array {
uint32_t count;
uint32_t values[1];
@@ -216,19 +178,6 @@ struct phm_phase_shedding_limits_record {
uint32_t Mclk;
};
-
-extern int phm_dispatch_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input, void *output);
-
-extern int phm_construct_table(struct pp_hwmgr *hwmgr,
- const struct phm_master_table_header *master_table,
- struct phm_runtime_table_header *rt_table);
-
-extern int phm_destroy_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table);
-
-
struct phm_uvd_clock_voltage_dependency_record {
uint32_t vclk;
uint32_t dclk;
@@ -286,6 +235,39 @@ struct phm_vce_clock_voltage_dependency_table {
struct phm_vce_clock_voltage_dependency_record entries[1];
};
+struct pp_smumgr_func {
+ int (*smu_init)(struct pp_hwmgr *hwmgr);
+ int (*smu_fini)(struct pp_hwmgr *hwmgr);
+ int (*start_smu)(struct pp_hwmgr *hwmgr);
+ int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr,
+ uint32_t firmware);
+ int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
+ int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
+ uint32_t firmware);
+ int (*get_argument)(struct pp_hwmgr *hwmgr);
+ int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
+ int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+ int (*download_pptable_settings)(struct pp_hwmgr *hwmgr,
+ void **table);
+ int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr);
+ int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
+ int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
+ int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
+ int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
+ int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
+ int (*init_smc_table)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
+ int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
+ uint32_t (*get_mac_definition)(uint32_t value);
+ bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
+ int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request);
+ bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
+};
+
struct pp_hwmgr_func {
int (*backend_init)(struct pp_hwmgr *hw_mgr);
int (*backend_fini)(struct pp_hwmgr *hw_mgr);
@@ -311,10 +293,10 @@ struct pp_hwmgr_func {
unsigned long, struct pp_power_state *);
int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
- int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
- int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
- int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
- int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
+ void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
+ void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
+ uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
+ uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
int (*power_state_set)(struct pp_hwmgr *hwmgr,
const void *state);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
@@ -328,8 +310,8 @@ struct pp_hwmgr_func {
int (*get_temperature)(struct pp_hwmgr *hwmgr);
int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr);
int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
- int (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
- int (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
+ void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
+ uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent);
int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed);
int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent);
@@ -378,6 +360,15 @@ struct pp_hwmgr_func {
struct amd_pp_profile *request);
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
+ int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
+ int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
+ int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
+ uint32_t virtual_addr_low,
+ uint32_t virtual_addr_hi,
+ uint32_t mc_addr_low,
+ uint32_t mc_addr_hi,
+ uint32_t size);
};
struct pp_table_func {
@@ -745,7 +736,7 @@ struct pp_hwmgr {
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
- bool block_hw_access;
+ enum amd_dpm_forced_level request_dpm_level;
struct phm_gfx_arbiter gfx_arbiter;
struct phm_acp_arbiter acp_arbiter;
struct phm_uvd_arbiter uvd_arbiter;
@@ -754,19 +745,17 @@ struct pp_hwmgr {
void *pptable;
struct phm_platform_descriptor platform_descriptor;
void *backend;
+
+ void *smu_backend;
+ const struct pp_smumgr_func *smumgr_funcs;
+ bool is_kicker;
+ bool reload_fw;
+
enum PP_DAL_POWERLEVEL dal_power_level;
struct phm_dynamic_state_info dyn_state;
- struct phm_runtime_table_header setup_asic;
- struct phm_runtime_table_header power_down_asic;
- struct phm_runtime_table_header disable_dynamic_state_management;
- struct phm_runtime_table_header enable_dynamic_state_management;
- struct phm_runtime_table_header set_power_state;
- struct phm_runtime_table_header enable_clock_power_gatings;
- struct phm_runtime_table_header display_configuration_changed;
- struct phm_runtime_table_header start_thermal_controller;
- struct phm_runtime_table_header set_temperature_range;
const struct pp_hwmgr_func *hwmgr_func;
const struct pp_table_func *pptable_func;
+
struct pp_power_state *ps;
enum pp_power_source power_source;
uint32_t num_ps;
@@ -784,26 +773,44 @@ struct pp_hwmgr {
struct amd_pp_display_configuration display_config;
uint32_t feature_mask;
- /* power profile */
+ /* UMD Pstate */
struct amd_pp_profile gfx_power_profile;
struct amd_pp_profile compute_power_profile;
struct amd_pp_profile default_gfx_power_profile;
struct amd_pp_profile default_compute_power_profile;
enum amd_pp_profile_type current_power_profile;
+ bool en_umd_pstate;
+};
+
+struct cgs_irq_src_funcs {
+ cgs_irq_source_set_func_t set;
+ cgs_irq_handler_func_t handler;
};
extern int hwmgr_early_init(struct pp_instance *handle);
extern int hwmgr_hw_init(struct pp_instance *handle);
extern int hwmgr_hw_fini(struct pp_instance *handle);
+extern int hwmgr_hw_suspend(struct pp_instance *handle);
+extern int hwmgr_hw_resume(struct pp_instance *handle);
+extern int hwmgr_handle_task(struct pp_instance *handle,
+ enum amd_pp_task task_id,
+ void *input, void *output);
extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t value, uint32_t mask);
-extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
uint32_t mask);
+extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask);
+extern int phm_wait_for_indirect_register_unequal(
+ struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port, uint32_t index,
+ uint32_t value, uint32_t mask);
extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
@@ -888,5 +895,58 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t
PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX, index, value, mask)
+
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field) )
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ index, value, mask) \
+ phm_wait_for_register_unequal(hwmgr, \
+ index, value, mask)
+
+#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
+ PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ mm##reg, value, mask)
+
+#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
+ PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
index 0de443612312..6a53b7e74ccd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -29,10058 +29,1764 @@
#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d
-enum PWR_Command {
- PwrCmdNull = 0,
- PwrCmdWrite,
- PwrCmdEnd,
- PwrCmdMax
-};
-
-typedef enum PWR_Command PWR_Command;
-
struct PWR_Command_Table {
- PWR_Command command;
uint32_t data;
uint32_t reg;
};
typedef struct PWR_Command_Table PWR_Command_Table;
+struct PWR_DFY_Section {
+ uint32_t dfy_cntl;
+ uint32_t dfy_addr_hi, dfy_addr_lo;
+ uint32_t dfy_size;
+ uint32_t dfy_data[];
+};
+
+typedef struct PWR_DFY_Section PWR_DFY_Section;
+
+static const PWR_Command_Table pwr_virus_table_pre[] = {
+ { 0x00000000, mmRLC_CNTL },
+ { 0x00000002, mmRLC_SRM_CNTL },
+ { 0x15000000, mmCP_ME_CNTL },
+ { 0x50000000, mmCP_MEC_CNTL },
+ { 0x80000004, mmCP_DFY_CNTL },
+ { 0x0840800a, mmCP_RB0_CNTL },
+ { 0xf30fff0f, mmTCC_CTRL },
+ { 0x00000002, mmTCC_EXE_DISABLE },
+ { 0x000000ff, mmTCP_ADDR_CONFIG },
+ { 0x540ff000, mmCP_CPC_IC_BASE_LO },
+ { 0x000000b4, mmCP_CPC_IC_BASE_HI },
+ { 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
+ { 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
+ { 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { 0x00000000, 0xFFFFFFFF },
+};
+
+static const PWR_DFY_Section pwr_virus_section1 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x540fe800,
+ .dfy_data = {
+ 0x7e000200, 0x7e020201, 0x7e040204, 0x7e060205, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701,
+ 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0xbf810000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54106f00, 0x000400b4, 0x00004000, 0x00804fac, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 416
+};
+
+static const PWR_DFY_Section pwr_virus_section2 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x540fef00,
+ .dfy_data = {
+ 0xc0031502, 0x00001e00, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 16
+};
-#define PWR_VIRUS_TABLE_SIZE 10031
+static const PWR_DFY_Section pwr_virus_section3 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x540ff000,
+ .dfy_data = {
+ 0xc424000b, 0x80000145, 0x94800001, 0x94c00001, 0x95000001, 0x95400001, 0x95800001, 0xdc810000,
+ 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xc4080061, 0xd8400013, 0xd8000003, 0xc40c0001,
+ 0x24ccffff, 0x3cd08000, 0x9500fffd, 0x1cd0ffcf, 0x7d018001, 0xc4140004, 0x050c0019, 0xd8400008,
+ 0x84c00000, 0x80000023, 0x80000067, 0x8000006a, 0x8000006d, 0x80000079, 0x80000084, 0x8000008f,
+ 0x80000099, 0x800000a0, 0x800000af, 0xd8400053, 0xc4080007, 0x388c0001, 0x08880002, 0x04100003,
+ 0x94c00005, 0x98800003, 0x04100004, 0x8000002d, 0x04100005, 0x8c00003f, 0x8c000043, 0x28cc0000,
+ 0xccc00050, 0x8c000055, 0x28080001, 0xcc000004, 0x7d808001, 0xd8400013, 0xd88130b8, 0xcd400008,
+ 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, 0xcc800005, 0xdc080000, 0x80000168, 0xc40c000e,
+ 0x28cc0008, 0xccc00013, 0x90000000, 0xcd013278, 0xc4113278, 0x95000001, 0x24cc0700, 0xd8400029,
+ 0xc4113255, 0xcd01324f, 0xc4113254, 0x1d10ffdf, 0xcd013254, 0x10cc0014, 0x1d10c017, 0x7d0d000a,
+ 0xd8400013, 0xd8400008, 0xcd0130b7, 0x14cc0010, 0x90000000, 0xd9c00036, 0x8000005d, 0xd8400013,
+ 0xc00c4000, 0xccc130b5, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc40c0021, 0x14d00011, 0x9500fffe,
+ 0xdc030000, 0xd800000c, 0xd800000d, 0xc40c005e, 0x94c01b10, 0xd8400013, 0x90000000, 0xc00e0080,
+ 0xccc130b5, 0x8000013b, 0xc00e0800, 0xccc130b5, 0x8000013b, 0xd8400053, 0x04100006, 0x8c00003f,
+ 0x8c000043, 0x28cc0000, 0xccc00050, 0x8c000055, 0x280c0008, 0xccc00052, 0xd8000021, 0x28180039,
+ 0x80000034, 0xd8400053, 0x04100007, 0x8c00003f, 0x8c000043, 0x28cc0001, 0xccc00050, 0x8c000055,
+ 0x280c0010, 0xccc00052, 0x28180039, 0x80000034, 0xd8400053, 0x04100008, 0x8c00003f, 0x8c000043,
+ 0x28cc0003, 0xccc00050, 0x8c000055, 0x280c0020, 0xccc00052, 0x28180039, 0x80000034, 0xdc030000,
+ 0xd8000069, 0x28080001, 0xc428000d, 0x7ca88004, 0xcc800079, 0x04280001, 0xcc00006f, 0x8000013b,
+ 0x80000034, 0x04100010, 0x8c00003f, 0x8c000043, 0xccc00078, 0x8c000055, 0x28180080, 0x80000034,
+ 0x04100001, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xcd013278, 0xc4113278, 0x95000001, 0xc00c4000,
+ 0xc4113254, 0x1d10c017, 0xd8400013, 0xd8400008, 0xccc130b5, 0xcd0130b7, 0x8000013b, 0x95c00001,
+ 0x96000001, 0x96400001, 0x96800001, 0x96c00001, 0x97000001, 0x97400001, 0x97800001, 0x97c00001,
+ 0xdc810000, 0xc40c000c, 0xcd4c0380, 0xcdcc0388, 0x55dc0020, 0xcdcc038c, 0xce0c0390, 0x56200020,
+ 0xce0c0394, 0xce4c0398, 0x56640020, 0xce4c039c, 0xce8c03a0, 0x56a80020, 0xce8c03a4, 0xcecc03a8,
+ 0x56ec0020, 0xcecc03ac, 0xcf0c03b0, 0x57300020, 0xcf0c03b4, 0xcf4c03b8, 0x57740020, 0xcf4c03bc,
+ 0xcf8c03c0, 0x57b80020, 0xcf8c03c4, 0xcfcc03c8, 0x57fc0020, 0xcfcc03cc, 0xd9000033, 0xc41c0009,
+ 0x25dc0010, 0x95c0fffe, 0xd8400013, 0xc41c000c, 0x05dc002f, 0xcdc12009, 0xc41d200a, 0xd8400013,
+ 0xcc012009, 0xd9000034, 0x25e01c00, 0x12200013, 0x25e40300, 0x12640008, 0x25e800c0, 0x12a80002,
+ 0x25ec003f, 0x7e25c00a, 0x7eae400a, 0x7de5c00a, 0xddc10000, 0xc02ee000, 0xcec1c200, 0xc40c005f,
+ 0xccc00037, 0x24d000ff, 0x31100006, 0x9500007b, 0x8c000190, 0xdc1c0000, 0xd8400013, 0xcdc1c200,
+ 0xc40c000c, 0xc4df0388, 0xc4d7038c, 0x51540020, 0x7d5dc01a, 0xc4e30390, 0xc4d70394, 0x51540020,
+ 0x7d62001a, 0xc4e70398, 0xc4d7039c, 0x51540020, 0x7d66401a, 0xc4eb03a0, 0xc4d703a4, 0x51540020,
+ 0x7d6a801a, 0xc4ef03a8, 0xc4d703ac, 0x51540020, 0x7d6ec01a, 0xc4f303b0, 0xc4d703b4, 0x51540020,
+ 0x7d73001a, 0xc4f703b8, 0xc4d703bc, 0x51540020, 0x7d77401a, 0xc4fb03c0, 0xc4d703c4, 0x51540020,
+ 0x7d7b801a, 0xc4ff03c8, 0xc4d703cc, 0x51540020, 0x7d7fc01a, 0xdc080000, 0xcc800013, 0xc4d70380,
+ 0xc4080001, 0x1c88001c, 0xcd400008, 0xc40c0083, 0x94c00010, 0xdc0e0000, 0x94c0000e, 0xc40c0082,
+ 0x24d00001, 0x9900000b, 0x18cc01e3, 0x3cd00004, 0x95000008, 0xc40c0085, 0x18cc006a, 0x98c00005,
+ 0xc40c0082, 0x18cc01e3, 0x3cd00004, 0x9900fffa, 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000,
+ 0xcc800004, 0xdc080000, 0x90000000, 0xc4080001, 0x1c88001c, 0xcd400008, 0xdc180000, 0xdc140000,
+ 0xdc100000, 0xdc0c0000, 0xcc800004, 0xdc080000, 0x90000000, 0xd8400051, 0xc428000c, 0x04180018,
+ 0x32640002, 0x9a80001f, 0x9a40001e, 0xcd800013, 0xc4293265, 0x040c0000, 0x1aac0027, 0x2aa80080,
+ 0xce813265, 0x9ac00017, 0xd80002f1, 0x04080002, 0x08880001, 0xd8080250, 0xd8080258, 0xd8080230,
+ 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, 0xd8080278, 0xd8080280, 0xd8080228,
+ 0xd8000367, 0x9880fff3, 0x04080010, 0x08880001, 0xd80c0309, 0xd80c0319, 0x04cc0001, 0x9880fffc,
+ 0x7c408001, 0x88000000, 0xc00e0100, 0xd8400013, 0xd8400008, 0xccc130b5, 0x8000016e, 0xc4180032,
+ 0x29980008, 0xcd800013, 0x95800001, 0x7c40c001, 0x18d0003f, 0x24d4001f, 0x24d80001, 0x155c0001,
+ 0x05e80180, 0x9900000b, 0x202c003d, 0xcd800010, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x86800000,
+ 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0xc4200007, 0x0a200001, 0xce000010, 0x80001b70,
+ 0x7c40c001, 0x8c000190, 0xc410001b, 0xd8000032, 0xd8000031, 0x9900091a, 0x7c408001, 0x88000000,
+ 0x24d000ff, 0x05280196, 0x18d4fe04, 0x29540008, 0xcd400013, 0x86800000, 0x800001b4, 0x8000032b,
+ 0x80000350, 0x80000352, 0x8000035f, 0x80000701, 0x8000047c, 0x8000019f, 0x80000800, 0xc419325b,
+ 0x1d98001f, 0xcd81325b, 0x8c00003f, 0xc4140004, 0xd8400008, 0x04100002, 0x8c000043, 0x28cc0002,
+ 0xccc00050, 0xc43c0044, 0x27fc0003, 0x9bc00002, 0x97c00006, 0xc00c4000, 0xccc130b5, 0x8c000055,
+ 0xd8400013, 0xd88130b8, 0xcd400008, 0x90000000, 0xd8400008, 0xcd400013, 0x7d40c001, 0xd8400028,
+ 0xd8400029, 0xd9400036, 0xc4193256, 0xc41d3254, 0x15540008, 0xcd400009, 0xcd40005b, 0xcd40005e,
+ 0xcd40005d, 0xd840006d, 0xc421325a, 0xc42d3249, 0x11540015, 0x19a4003c, 0x1998003f, 0x1af0007d,
+ 0x11dc000b, 0x1264001f, 0x15dc000d, 0x7d65400a, 0x13300018, 0x1a38003f, 0x7dd5c00a, 0x7df1c00a,
+ 0xcd800045, 0xcdc00100, 0xc411326a, 0xc415326b, 0xc419326c, 0xc41d326d, 0xc425326e, 0xc4293279,
+ 0xce800077, 0xcd000056, 0xcd400057, 0xcd800058, 0xcdc00059, 0xc4193265, 0x259c8000, 0x99c00004,
+ 0xce40005a, 0x29988000, 0xcd813265, 0xc4113248, 0x2510000f, 0xcd000073, 0xc418000d, 0xc411326f,
+ 0x17300019, 0x97000009, 0x25140fff, 0x95400007, 0xd800003a, 0x8c001b6d, 0xc4153279, 0xcd400077,
+ 0xcd00005f, 0xd8000075, 0x26f00001, 0x15100010, 0x7d190004, 0xcd000035, 0x97000035, 0x1af07fe8,
+ 0xd8800013, 0xd8400010, 0xd8400008, 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001,
+ 0x04300010, 0xdf430000, 0x7c434001, 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078,
+ 0xdf030000, 0xd4412e40, 0xd8400013, 0xcc41c030, 0xcc41c031, 0xc43dc031, 0xccc00013, 0x04343000,
+ 0xc4113246, 0xc41d3245, 0xcf413267, 0x51100020, 0x7dd1c01a, 0xc4353267, 0x45dc0160, 0xc810001f,
+ 0x1b4c0057, 0x1b700213, 0x1b740199, 0x7f4f400a, 0x7f73400a, 0x55180020, 0x2198003f, 0xd1c00025,
+ 0xcf400024, 0xcd000026, 0xcd800026, 0xd8400027, 0x9bc00001, 0x248dfffe, 0xd8800013, 0xccc12e00,
+ 0x7c434001, 0x7c434001, 0x8c00142b, 0xc43c000e, 0x1af4007d, 0x2bfc0008, 0x33740003, 0x26d80001,
+ 0xcfc00013, 0x1ae8003e, 0x9680000c, 0xc4253277, 0x26680001, 0x96800009, 0x2a640002, 0xce413277,
+ 0xd8400013, 0xc4253348, 0xce413348, 0xc4253348, 0x96400001, 0xcfc00013, 0x9b400003, 0x958000d8,
+ 0x80000315, 0xc4253277, 0x04303000, 0x26680001, 0xcf013267, 0xc4193246, 0xc41d3245, 0xc4313267,
+ 0x96800041, 0x51980020, 0x1b342010, 0x7d9d801a, 0x1714000c, 0x25540800, 0x1b30c012, 0x459801b0,
+ 0x7d77400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0xd180001e, 0xd8400021, 0x04240010, 0x199c01e2,
+ 0x7e5e4002, 0x3e5c0004, 0x3e540002, 0xc428000f, 0x9a80ffff, 0x95c00006, 0xc80c0011, 0xc8140011,
+ 0x54d00020, 0x55580020, 0x80000282, 0x95400015, 0xc80c0011, 0x0a640002, 0x041c0001, 0x45980008,
+ 0x54d00020, 0x96400004, 0xc8140011, 0x45980004, 0x041c0000, 0xcf00001c, 0xd180001e, 0xd8400021,
+ 0xc428000f, 0x9a80ffff, 0x99c00003, 0xc8180011, 0x80000282, 0xc8140011, 0x55580020, 0x80000282,
+ 0x45980004, 0xc80c0011, 0xcf00001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8100011,
+ 0xc8140011, 0x55580020, 0xd8400013, 0xccc1334e, 0xcd01334f, 0xcd413350, 0xcd813351, 0xd881334d,
+ 0xcfc00013, 0xc4193273, 0xc41d3275, 0xc40d3271, 0xc4113270, 0xc4153274, 0x50cc0020, 0x7cd0c01a,
+ 0x7cdcc011, 0x05900008, 0xcd00006a, 0xcdc0006b, 0xc41d3272, 0x7d594002, 0x54d00020, 0xd8800013,
+ 0xccc12e23, 0xcd012e24, 0xcdc12e25, 0xcfc00013, 0xc4193246, 0xc41d3245, 0xc4313267, 0x15540002,
+ 0x51980020, 0x7d9d801a, 0xc81c001f, 0x1b340057, 0x1b280213, 0x1b300199, 0x45980198, 0x7f37000a,
+ 0x7f2b000a, 0x55e40020, 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0xcd40000d,
+ 0xcd40000a, 0xc40d3249, 0x20cc003c, 0xccc13249, 0xc4113274, 0xdd430000, 0xc01e0001, 0x29dc0002,
+ 0x04280000, 0xd8000036, 0xcc400078, 0xcc400078, 0x2d540002, 0x95400022, 0x078c0000, 0x07d40000,
+ 0x8c00120d, 0x8c001239, 0x8c001232, 0x04f80000, 0x057c0000, 0xcdc00013, 0xc414000d, 0xc41c0019,
+ 0x7dd5c005, 0x25dc0001, 0xd840007c, 0xd8400074, 0xd8400069, 0xc40c005e, 0x94c018a6, 0xd4412e22,
+ 0xd800007c, 0xc40c005e, 0x94c018a2, 0x95c00007, 0xc40c0019, 0x7cd4c005, 0x24cc0001, 0x94c00008,
+ 0x9680fffc, 0x800002e3, 0xc40c0057, 0x7cd0c002, 0x94c00003, 0x9680fffd, 0x800002e3, 0xd8000069,
+ 0xcfc00013, 0xcd013273, 0xcd013275, 0xd8000074, 0xc414005e, 0x9540188f, 0xcfc00013, 0xc40d3249,
+ 0xc013cfff, 0x7cd0c009, 0xccc13249, 0x9680000b, 0xc40c0077, 0x38d00001, 0x99000006, 0x04cc0002,
+ 0xdcc30000, 0xc40c005e, 0x94c01882, 0xd4400078, 0xd800000d, 0x80000304, 0x7c41c001, 0x7c41c001,
+ 0xd840002f, 0xc41c0015, 0x95c0ffff, 0xd8400030, 0xc41c0016, 0x95c0ffff, 0xd8000030, 0xc41c0016,
+ 0x99c0ffff, 0xd800002f, 0xc41c0015, 0x99c0ffff, 0xc81c001f, 0x49980198, 0x55e40020, 0x459801a0,
+ 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0x04302000, 0xcfc00013, 0xcf013267,
+ 0xc4313267, 0x96800004, 0x97000001, 0xd8000036, 0x80000329, 0xd8800013, 0xcc812e00, 0x04302000,
+ 0xcfc00013, 0xcf013267, 0xc4313267, 0x97000001, 0xc4193256, 0xc42d3249, 0x16ec001f, 0xd8000028,
+ 0xd800002b, 0x1998003e, 0xcec00031, 0xd8000036, 0xd8000010, 0x97800004, 0xd8400010, 0xce00000a,
+ 0x1a18003e, 0xcd800008, 0x90000000, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000,
+ 0x7d43c001, 0xcd400013, 0xc4093249, 0x1888003e, 0x94800015, 0xd8400074, 0x8c000671, 0xcd400013,
+ 0x9a400006, 0xc419324c, 0x259c0001, 0x1598001f, 0x95c0000d, 0x9580000c, 0x99000003, 0xd8400036,
+ 0x04100001, 0xc40c0021, 0x14d80011, 0x24dc00ff, 0x31e00002, 0x31dc0003, 0x9580fff0, 0x9a000003,
+ 0x99c00002, 0xd9c00036, 0x94800004, 0xd8000074, 0xc418005e, 0x95801827, 0xcf800008, 0x90000000,
+ 0xd8800036, 0x90000000, 0xd8c00036, 0xc424000b, 0x32640002, 0x9a400004, 0xc4180014, 0x9580ffff,
+ 0xd840002f, 0xc40c0021, 0x14dc0011, 0x95c0fffe, 0xccc00037, 0x8c000190, 0x90000000, 0xd8400008,
+ 0xd800006d, 0xc41d3246, 0xc4193245, 0x51dc0020, 0x7d9d801a, 0xd8400028, 0xd8400029, 0xc420000b,
+ 0x32200002, 0x9a0000ad, 0x04200032, 0xd9000010, 0xde030000, 0xd8400033, 0x04080000, 0xc43c0009,
+ 0x27fc0002, 0x97c0fffe, 0xc42c0015, 0x96c0ffff, 0xd800002e, 0xc42d3249, 0x1af4003e, 0x9740004d,
+ 0xc428000d, 0xc4080060, 0x7ca88005, 0x24880001, 0x7f4b4009, 0x97400046, 0xc4313274, 0xc4100057,
+ 0x7d33400c, 0x97400009, 0x28240100, 0x7e6a4004, 0xce400079, 0x1eecffdd, 0xcec13249, 0xcf013273,
+ 0xcf013275, 0x800003c3, 0xc429326f, 0x1aa80030, 0x96800006, 0x28240001, 0xc428000d, 0x06a80008,
+ 0x7e6a8004, 0xce800035, 0xc41d3272, 0x25cc0001, 0x10cc0004, 0x19e80042, 0x25dc0006, 0x11dc0001,
+ 0x7e8e800a, 0x7de9c00a, 0xc40d3271, 0xc4293270, 0x50cc0020, 0x7ce8c01a, 0x7cd30011, 0x11e80007,
+ 0x2aa80000, 0xce80001c, 0xd300001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc4300011, 0x1b30003f,
+ 0x33300000, 0xc4240059, 0x1660001f, 0x7e320009, 0xc0328000, 0x7e72400a, 0x0430000c, 0x9a000002,
+ 0x04300008, 0xc02ac000, 0x7d310002, 0x17300002, 0x2aa87600, 0x7cd0c011, 0xcdc00024, 0xd0c00025,
+ 0xce800026, 0x04280222, 0xce800026, 0x96000002, 0xce400026, 0xd8400027, 0xc4280058, 0x22ec003d,
+ 0xcec13249, 0xcd013273, 0xce813275, 0xd800007b, 0xc8380018, 0x57b00020, 0x04343108, 0xc429325d,
+ 0x040c3000, 0x13740008, 0x2374007e, 0x32a80003, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
+ 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0x94800003, 0xd4400078, 0x800003e7, 0x04200022, 0xde030000,
+ 0xccc00024, 0xd1800025, 0xcf400026, 0xd4400026, 0xd8400027, 0x04200010, 0xde030000, 0xccc00024,
+ 0x45980104, 0xd1800025, 0xd4400026, 0xcf800026, 0xcf000026, 0xd8400027, 0x49980104, 0x9a80000a,
+ 0xc81c001f, 0x45980168, 0x55e00020, 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027,
+ 0x800003f2, 0x8c000448, 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xc40d3249,
+ 0x18cc003e, 0xd8400030, 0xc42c0016, 0x96c0ffff, 0xd8000030, 0xc42c0016, 0x9ac0ffff, 0xd800002f,
+ 0xc42c0015, 0x9ac0ffff, 0xd8400034, 0xc4300025, 0xc4340024, 0xc4380081, 0xcf813279, 0xcf41326e,
+ 0xcf01326d, 0x94c0000d, 0x254c0700, 0xc424001e, 0x10cc0010, 0x1a641fe8, 0x28cc0726, 0x2a640200,
+ 0xd8400013, 0xccc1237b, 0x2264003f, 0xcd400013, 0xd8813260, 0xce41325b, 0xc4240033, 0xc4280034,
+ 0xd9000036, 0xd8000010, 0x8c001427, 0x96400006, 0xde430000, 0xce40000c, 0xc40c005e, 0x94c01755,
+ 0xd4400078, 0x9680000a, 0xce80000a, 0x06a80002, 0xd8400010, 0xde830000, 0xce80000d, 0xc40c005e,
+ 0x94c0174c, 0xd4400078, 0xd8000010, 0x8c00142b, 0xc4393265, 0x2bb80040, 0xd8400032, 0xcf813265,
+ 0xc4200012, 0x9a00ffff, 0xc4100044, 0x19180024, 0xc8100072, 0x551c003f, 0x99c00003, 0x95800010,
+ 0x8000043d, 0xc00c8000, 0xd840006c, 0x28200000, 0x8000043f, 0xc00c4000, 0x282000f0, 0xcd400013,
+ 0xd8400008, 0xc4113255, 0xcd01324f, 0xd8400013, 0xd88130b8, 0xccc130b5, 0xce000053, 0x90000000,
+ 0x195c00e8, 0xc4100004, 0x2555fff0, 0xc0360001, 0x042c0000, 0x29540001, 0xd8400008, 0x04240000,
+ 0x04280004, 0xc420000b, 0x32200002, 0x9a000009, 0xcd400013, 0xcec1c200, 0xc5e124dc, 0x0aa80001,
+ 0x7ef6c001, 0x7e624001, 0x96000001, 0x9a80fff9, 0xc02ee000, 0xcd400013, 0x2555fff0, 0xcec1c200,
+ 0x29540008, 0xc81c001f, 0xcd400013, 0x55e00020, 0xc42d3255, 0xc4353259, 0xd8013260, 0x45980158,
+ 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, 0x49980158, 0x45980170, 0xc4200012,
+ 0x16200010, 0x9a00fffe, 0xccc00024, 0xd1800025, 0xc429324f, 0xce400026, 0xce800026, 0xcec00026,
+ 0xcf400026, 0xd8400027, 0xcd000008, 0x90000000, 0xc40d325b, 0x7d43c001, 0x195400e8, 0x1154000a,
+ 0x18dc00e8, 0x05e80488, 0x18d0006c, 0x18f807f0, 0x18e40077, 0x18ec0199, 0x7e6e400a, 0x86800000,
+ 0x8000048e, 0x80000494, 0x800004de, 0x80000685, 0x80000686, 0x800006ac, 0x1ccc001f, 0xccc1325b,
+ 0xc411325d, 0x251001ef, 0xcd01325d, 0x90000000, 0xc4293254, 0x1264000a, 0xc4300004, 0x7d79400a,
+ 0x7e7a400a, 0x52a8001e, 0x15180001, 0x7d69401a, 0x202c007d, 0xcec1325b, 0x95000008, 0x95800028,
+ 0xc42d3267, 0xc4193246, 0xc41d3245, 0x1aec0028, 0xc40d325c, 0x800004cc, 0xc42d3256, 0xc419324e,
+ 0x26e8003f, 0x1aec003e, 0x12f4000e, 0xc41d324d, 0xc40d324f, 0x7d75401a, 0x04100002, 0x7d290004,
+ 0x7f8f4001, 0x7f52800f, 0x51980020, 0x7d9d801a, 0x50e00002, 0x51980008, 0x9a800002, 0x800004d1,
+ 0x7d0dc002, 0x6665fc00, 0x7e5e401a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000,
+ 0xce400002, 0x7f534002, 0x6665fc00, 0x7e76401a, 0xd1800002, 0xce400002, 0x800004d7, 0xc42d325a,
+ 0xc4193258, 0x1aec003e, 0xc41d3257, 0xc4213259, 0x12f4000e, 0x7d75401a, 0x51980020, 0x52200002,
+ 0x7d9d801a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, 0xce400002, 0x202c003d,
+ 0xcf000008, 0xcfc00013, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x90000000, 0xc4193260, 0x259c0007,
+ 0x15980004, 0x05e804e3, 0x86800000, 0x800004e7, 0x800004f0, 0x80000505, 0x8000016a, 0xc4380004,
+ 0xcfc00013, 0xd8400008, 0xc435325d, 0xd801325b, 0x277401ef, 0xcf41325d, 0xcf800008, 0x90000000,
+ 0xc4380004, 0xd8400008, 0x8c000671, 0x9640fff4, 0x17e00008, 0xc418000d, 0xce000009, 0xd84131db,
+ 0xcf800008, 0xcd800009, 0xc430001e, 0xcfc00013, 0xc42d325b, 0x1b301ff8, 0x2b300400, 0x2330003f,
+ 0x26edf000, 0x7ef2c00a, 0xd8413260, 0xcec1325b, 0x90000000, 0x05a80507, 0x86800000, 0x8000050c,
+ 0x80000528, 0x8000057d, 0x800005c2, 0x800005f3, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013,
+ 0x9a400012, 0x1bd400e8, 0xc42c004a, 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000c, 0xc4100019,
+ 0x7d150005, 0x25100001, 0x99000008, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277,
+ 0xd801326f, 0x80000624, 0x04240012, 0x1be00fe4, 0xce413260, 0xce000066, 0xcf800008, 0x90000000,
+ 0xd8400068, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, 0x9a400013, 0x1bd400e8, 0xc42c004a,
+ 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000d, 0xc4100019, 0x7d150005, 0x25100001, 0x99000009,
+ 0xd8400067, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, 0xd801326f, 0x80000624,
+ 0x1bd400e8, 0xc42c0060, 0x7ed6c005, 0x26ec0001, 0xc4113271, 0xc4153270, 0xc4193272, 0xc41d3273,
+ 0x04280022, 0x51100020, 0x7d51401a, 0xc4113274, 0xc4213275, 0xc4253276, 0xc4313248, 0xd1400061,
+ 0x2730000f, 0x13300010, 0x7db1800a, 0xcd800060, 0x96c00002, 0x05dc0008, 0xcdc00062, 0x042c3000,
+ 0xcd000063, 0xce000064, 0xce400065, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xce813260,
+ 0x52ec0020, 0x7ef2c01a, 0xc820001f, 0x1b700057, 0x1b680213, 0x1b740199, 0x46ec0188, 0x7f73400a,
+ 0x7f6b400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
+ 0xc418000d, 0x17e00008, 0xce000009, 0xcec13267, 0xc42d3267, 0x26e01000, 0x9a00fffe, 0xd8400013,
+ 0xd9c131fc, 0xcd800009, 0xcf800008, 0x96c00001, 0x90000000, 0xc4380004, 0xd8400008, 0xc4113277,
+ 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0x29dc0001, 0x25140001, 0x191807e4,
+ 0x192007ec, 0x95400004, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x9580000e, 0x09980001, 0x041c0001,
+ 0x95800005, 0x09980001, 0x51dc0001, 0x69dc0001, 0x9980fffd, 0x7de20014, 0x561c0020, 0xd8400013,
+ 0xce013344, 0xcdc13345, 0xcfc00013, 0x95400022, 0x042c3000, 0xcec13267, 0xc42d3246, 0xc4313245,
+ 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, 0xc419334e, 0xc41d334f, 0xc4213350,
+ 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, 0x1b740199, 0x46ec01b0, 0x7f6b400a,
+ 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, 0xcdc00026, 0xce000026, 0xce400026,
+ 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, 0x04280032, 0xce813260, 0xd8800068,
+ 0xcf800008, 0x90000000, 0xc4380004, 0xd8400008, 0x2010007d, 0xcd01325b, 0xc411325b, 0x1910003e,
+ 0x9500fffe, 0x04100040, 0xcd00001b, 0xd8400021, 0xc410000f, 0x9900ffff, 0x04100060, 0xcd00001b,
+ 0xd8400021, 0xc410000f, 0x9900ffff, 0xcfc00013, 0x2010003d, 0xcd01325b, 0xc4113277, 0x25140001,
+ 0x191807e4, 0x9540000b, 0x2511fffd, 0xcd013277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
+ 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x95800005, 0xd8400013, 0xd8013344, 0xd8013345,
+ 0xcfc00013, 0xc4180050, 0xc41c0052, 0x04280042, 0xcd813273, 0xcdc13275, 0xce813260, 0xd9000068,
+ 0xd8400067, 0xcf800008, 0x90000000, 0x07d40000, 0x8c00120d, 0x8c00124f, 0x8c001232, 0x057c0000,
+ 0x042c3000, 0xc4380004, 0xcfc00013, 0xd8400008, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267,
+ 0x52ec0020, 0x7ef2c01a, 0x1b680057, 0x1b700213, 0x1b740199, 0xc820001f, 0x46ec0190, 0x7f6b400a,
+ 0x7f73400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027,
+ 0xcfc00013, 0xcec13267, 0xc4153249, 0x2154003d, 0xc41c0019, 0x1bd800e8, 0x7dd9c005, 0x25dc0001,
+ 0xc42c004a, 0xcd80005e, 0xc420004d, 0xcec0005e, 0x11dc0010, 0x7e1e000a, 0xcd413249, 0xce01326f,
+ 0x28340001, 0x05980008, 0x7f598004, 0xcd800035, 0x1be800e8, 0xc42c004a, 0xce80005e, 0xd801327a,
+ 0xd800005f, 0xd8000075, 0xd800007f, 0xc424004c, 0xce41326e, 0xcec0005e, 0x28240100, 0x7e6a4004,
+ 0xce400079, 0xc435325d, 0x277401ef, 0x04240020, 0xce41325e, 0xd801325b, 0xd8013260, 0xcf41325d,
+ 0xda000068, 0xcf800008, 0x90000000, 0xc4113277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001,
+ 0x11dc0008, 0x29dc0001, 0x25140001, 0x9540002d, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x042c3000,
+ 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe,
+ 0xc419334e, 0xc41d334f, 0xc4213350, 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213,
+ 0x1b740199, 0x46ec01b0, 0x7f6b400a, 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026,
+ 0xcdc00026, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001,
+ 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013,
+ 0x90000000, 0xc430000b, 0x33300002, 0x04240000, 0x9b000010, 0x1be000e8, 0x042c0000, 0xc0360001,
+ 0x04280004, 0xd8400013, 0xcec1c200, 0xc63124dc, 0x0aa80001, 0x7ef6c001, 0x7e724001, 0x97000001,
+ 0x9a80fff9, 0xc02ee000, 0xd8400013, 0xcec1c200, 0x90000000, 0x90000000, 0xc4253260, 0x7fc14001,
+ 0xc40d3249, 0x18cc003e, 0x98c00005, 0x194c1c03, 0xccc0003b, 0xc40c002d, 0x80000697, 0xc420004a,
+ 0x194c00e8, 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x98c00003,
+ 0x8c0007e0, 0x95c00008, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, 0xcf01325b,
+ 0x90000000, 0xcd400013, 0xd801325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x25100007, 0x31100005,
+ 0x9900008e, 0xc40c0007, 0xd9000010, 0x8000075e, 0x202c007d, 0xcec1325b, 0xc4293265, 0xc4353254,
+ 0x26a9feff, 0xc4380004, 0xd8400008, 0x1374000b, 0xc40c000d, 0xd8000009, 0x1774000d, 0xd8400013,
+ 0xc41d30b8, 0xcfc00013, 0x95c00008, 0xc411325d, 0xd801325b, 0xccc00009, 0xcf800008, 0x251001ef,
+ 0xcd01325d, 0x90000000, 0xce813265, 0xcf400100, 0xc00ac006, 0xc00e0000, 0x28880700, 0x28cc0014,
+ 0x8c0006de, 0x14cc0010, 0x30d4000f, 0x04cc0001, 0x10cc0010, 0x28cc0014, 0x99400009, 0xd8400013,
+ 0xc41530b8, 0xcfc00013, 0xc4193265, 0x19980028, 0x99400003, 0x99800002, 0x800006c8, 0xcfc00013,
+ 0xc411325d, 0xd801325b, 0xcf800008, 0x251001ef, 0xcd01325d, 0x90000000, 0x15600008, 0xce000009,
+ 0xc8380023, 0xc4180081, 0x11a00002, 0x7fa38011, 0xc4100026, 0x05980008, 0x7d1a0002, 0x282c2002,
+ 0x3e280008, 0xcec00013, 0xc4300027, 0x042c0008, 0xd3800025, 0xcf000024, 0x202400d0, 0x7ca48001,
+ 0xcc800026, 0xccc00026, 0x28240006, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800004, 0x32280000,
+ 0x9a800002, 0x9a000000, 0xd8400027, 0x24d8003f, 0xd840003c, 0xcec0003a, 0xd8800013, 0xcd81a2a4,
+ 0x90000000, 0xc41d325d, 0x25dc0007, 0xc40d3249, 0x18cc003e, 0x94c0000a, 0xc420004a, 0x194c00e8,
+ 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x80000712, 0x194c1c03,
+ 0xccc0003b, 0xc40c002d, 0x05e80714, 0x86800000, 0x8000071c, 0x80000720, 0x80000747, 0x8000071d,
+ 0x800007c4, 0x80000732, 0x80000745, 0x80000744, 0x90000000, 0x98c00006, 0x8000072e, 0x90000000,
+ 0x98c00003, 0x8c0007e0, 0x95c0000c, 0xcd400013, 0xc4253265, 0x2a64008c, 0xce413265, 0xc430001e,
+ 0x1b301fe8, 0x2b300400, 0x2330003f, 0xd8013260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010,
+ 0x04240000, 0x8000075e, 0x98c0fff1, 0x8c0007e0, 0x95c00002, 0x80000723, 0xcd400013, 0xc41f02f1,
+ 0x95c00004, 0xd8013247, 0xd801325d, 0x80000743, 0xd8813247, 0xd801325d, 0xc4100004, 0xd8400008,
+ 0xd8400013, 0xd88130b8, 0xcd000008, 0x90000000, 0x04100001, 0x98c0ffde, 0x8000072e, 0x98c00003,
+ 0x8c0007e0, 0x95c00012, 0xc4340004, 0xd8400008, 0x15600008, 0xc418000d, 0xce000009, 0xd8400013,
+ 0xd84131db, 0xcf400008, 0xcd800009, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013,
+ 0xd8413260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, 0x04240000, 0xcd400013, 0x041c3000,
+ 0xcdc13267, 0xc41d3267, 0xc41d3265, 0x25dc8000, 0x95c00007, 0xc41c004a, 0x195800e8, 0xcd80005e,
+ 0xc418004c, 0xcd81326e, 0xcdc0005e, 0xc41d3265, 0x25dd7fff, 0xcdc13265, 0xc41d3246, 0xc4193245,
+ 0xc42d3267, 0x51e00020, 0x7e1a001a, 0x46200200, 0x04283247, 0x04300033, 0x1af80057, 0x1af40213,
+ 0x042c000c, 0x7f7b400a, 0x7f6f400a, 0xcf400024, 0xd2000025, 0xcd800026, 0xcdc00026, 0xc6990000,
+ 0x329c325d, 0x99c00008, 0x329c3269, 0x99c00006, 0x329c3267, 0x95c00005, 0xc01defff, 0x7d9d8009,
+ 0x8000078a, 0x25980000, 0x0b300001, 0x06a80001, 0xcd800026, 0x9b00fff2, 0xd8400027, 0xc43c0012,
+ 0x9bc0ffff, 0xcd400013, 0xd801325b, 0xc431325a, 0xc03e7ff0, 0x7f3f0009, 0xcf01325a, 0xc4313249,
+ 0x1f30001f, 0xcf013249, 0xc03e4000, 0xcfc13254, 0xcd400013, 0xd8013254, 0xc431325d, 0xd801324f,
+ 0xd8013255, 0xd8013247, 0xd801325d, 0x1b300028, 0x8c00120d, 0x8c001219, 0x8c001232, 0xc4380004,
+ 0xd8400008, 0xd8400013, 0x9900000d, 0xd88130b8, 0x9700000b, 0xc43d30b5, 0x1bf0003a, 0x9b000b80,
+ 0x203c003a, 0xc430000e, 0x27300700, 0x13300014, 0x2b300001, 0xcf0130b7, 0xcfc130b5, 0x46200008,
+ 0xcf400024, 0xd2000025, 0xd8000026, 0xd8400027, 0x043c2000, 0xcd400013, 0xcfc13267, 0xc43d3267,
+ 0x9bc00001, 0xccc00010, 0xcf800008, 0x90000000, 0xc4080007, 0xd9000010, 0xc4193260, 0x259c0003,
+ 0x31dc0003, 0x95c00014, 0x040c3000, 0xd8400008, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213,
+ 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0xc4193246, 0xc41d3245, 0x51980020, 0x7d9d801a, 0x8c000448,
+ 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xcc800010, 0xd801325d, 0x90000000,
+ 0xc418000b, 0x31980002, 0x041c0000, 0x9980001c, 0x19580066, 0x15600008, 0x040c0000, 0xc0120001,
+ 0x11980003, 0x04240004, 0x7da18001, 0xc4200007, 0xc4340004, 0xd9000010, 0xd8400008, 0xd8400013,
+ 0xccc1c200, 0xc41d24db, 0x7cd0c001, 0x0a640001, 0x7dd9c005, 0x25dc0001, 0x99c00002, 0x9a40fff8,
+ 0xc418005e, 0x9580137b, 0xc00ee000, 0xd8400013, 0xccc1c200, 0xce000010, 0xcf400008, 0x90000000,
+ 0xd840004f, 0xc4113269, 0x19080070, 0x190c00e8, 0x2510003f, 0x2518000f, 0xcd813268, 0x05a80809,
+ 0x86800000, 0x8000080e, 0x8000080f, 0x80000898, 0x80000946, 0x800009e1, 0x80000a5a, 0x04a80811,
+ 0x86800000, 0x80000815, 0x80000834, 0x8000085e, 0x8000085e, 0x04341001, 0xcf400013, 0xc4380004,
+ 0xd8400008, 0xc42d3045, 0xcec1c091, 0x31300021, 0x9700000b, 0xd84002f1, 0xd8400013, 0xc43130b8,
+ 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, 0xcf800008, 0x9b000241, 0x8000084a, 0xcf400013,
+ 0xd8400008, 0xc43130b6, 0x9b000003, 0xc02f0001, 0xcec130b6, 0xc4252087, 0x5668001a, 0x26a80005,
+ 0x9a80fffd, 0xcf400013, 0xd80130b6, 0x8000084a, 0xc4380004, 0xd8400008, 0x04341001, 0xcf400013,
+ 0xc431ecaa, 0x27300080, 0x9b000010, 0xc02e0001, 0xcec130b6, 0xcf400013, 0xd80130b6, 0x31300021,
+ 0x9700000a, 0xd84002f1, 0xd8400013, 0xc43130b8, 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a,
+ 0xcf800008, 0x9b00021d, 0xdd410000, 0x040c0005, 0xd84802e9, 0x8c001a41, 0xc43b02f1, 0x9b800006,
+ 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0xcf800008, 0xcec80278, 0x56f00020, 0xcf080280,
+ 0x8c001608, 0xdc140000, 0xcd400013, 0xd8813247, 0xd80802e9, 0x8000085e, 0xcd400013, 0x31100011,
+ 0x950001fa, 0xc02e0001, 0x2aec0008, 0xc01c0020, 0xc0180001, 0xc00c0007, 0x11a40006, 0x7de6000a,
+ 0x10e40008, 0x7e26000a, 0x7e2e000a, 0xce000013, 0xc4113254, 0x1d10ffdf, 0x2110003e, 0xcd013254,
+ 0xd801324f, 0xd8013255, 0x1d10ff9e, 0xcd013254, 0xd8013247, 0xd801325d, 0xd801325e, 0xc0245301,
+ 0xce413249, 0xd801325f, 0xc425326c, 0xc0121fff, 0x29108eff, 0x7e524009, 0xce41326c, 0xc425325a,
+ 0xc0127ff0, 0x7e524009, 0xce41325a, 0xc425325b, 0xc0131fff, 0x7e524009, 0xce41325b, 0xd801326d,
+ 0xd801326e, 0xd8013279, 0x94c00003, 0x08cc0001, 0x80000866, 0xc00c0007, 0x95800003, 0x09980001,
+ 0x80000866, 0xc0100010, 0x7dd2400c, 0x9a400004, 0xc0180003, 0x7dd1c002, 0x80000866, 0x80000a5a,
+ 0x04a8089a, 0x86800000, 0x8000089e, 0x800008fa, 0x80000945, 0x80000945, 0x31300022, 0x97000007,
+ 0xc4380004, 0xd8400008, 0xd8400013, 0xc43130b8, 0x27300001, 0xcf800008, 0xcd400013, 0x04183000,
+ 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
+ 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
+ 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000036, 0x45980008, 0xd180001e,
+ 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002f, 0xc43c0004, 0xd8400008, 0xd8400013,
+ 0x13b80001, 0xc79d3300, 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e,
+ 0x964012a4, 0x7c028009, 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x800008d2,
+ 0xc4180006, 0x9980ffff, 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001,
+ 0x9980fffd, 0xc02620c0, 0xce41c078, 0xce81c080, 0xcc01c081, 0xcf01c082, 0x57240020, 0xce41c083,
+ 0xc0260400, 0x7e6e400a, 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x800008d2, 0xc4180006, 0x9980ffff,
+ 0xcdf93300, 0xce393301, 0xcfc00008, 0xcd400013, 0xc43c0004, 0xd8400008, 0x04182000, 0xcd813267,
+ 0xcfc00008, 0x80000903, 0x31240022, 0x96400008, 0x04100001, 0xc4380004, 0xd8400008, 0xd8400013,
+ 0xc43130b8, 0x27300001, 0xcf800008, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x7ec30011,
+ 0x32f80000, 0x9b800011, 0x043c0020, 0x04280000, 0x67180001, 0x0bfc0001, 0x57300001, 0x95800006,
+ 0x8c001628, 0x9a400003, 0xd981325d, 0x80000915, 0xd9c1325d, 0x06a80001, 0x9bc0fff6, 0x7f818001,
+ 0x8c001606, 0x7d838001, 0x94800010, 0xcd400013, 0xc41d3259, 0xc421325a, 0x16240014, 0x12640014,
+ 0x1a2801f0, 0x12a80010, 0x2620ffff, 0x7e2a000a, 0x7de1c001, 0x7e5e400a, 0x9b800002, 0x2264003f,
+ 0xce41325a, 0xd8013259, 0xc40c0007, 0xd9000010, 0x8c00075e, 0xc4af0228, 0x043c0000, 0x66d80001,
+ 0x95800010, 0x04300002, 0x1330000d, 0x13f40014, 0x7f73400a, 0xcf400013, 0x04380040, 0xcf80001b,
+ 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff,
+ 0x07fc0001, 0x56ec0001, 0x33e80010, 0x9680ffec, 0x80000a5a, 0x80000a5a, 0x04a80948, 0x86800000,
+ 0x8000094c, 0x8000099b, 0x800009e0, 0x800009e0, 0xc43c0004, 0xd8400008, 0xcd400013, 0x04183000,
+ 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f,
+ 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
+ 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000033, 0x45980008, 0xd180001e,
+ 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002c, 0xd8400013, 0x13b80001, 0xc79d3300,
+ 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, 0x964011fe, 0x7c028009,
+ 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x80000978, 0xc4180006, 0x9980ffff,
+ 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, 0x9980fffd, 0xc0260010,
+ 0xce41c078, 0xcf01c080, 0x57240020, 0xce41c081, 0xce81c082, 0xcc01c083, 0xc0260800, 0x7e6e400a,
+ 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x80000978, 0xc4180006, 0x9980ffff, 0xcdf93300, 0xce393301,
+ 0x04182000, 0xcd813267, 0xcfc00008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, 0x7dda801a,
+ 0x7d41c001, 0x7e838011, 0xd84802e9, 0x8c001802, 0x469c0390, 0xc4313267, 0x04183000, 0xcd813267,
+ 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c,
+ 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, 0x45dc0004, 0xd1c0001e,
+ 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4240011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
+ 0x9980ffff, 0xc4280011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc42c0011,
+ 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4300011, 0x45dc0004, 0xd1c0001e,
+ 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4340011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f,
+ 0x9980ffff, 0xc4380011, 0xcd400013, 0x04182000, 0xcd813267, 0x043c0001, 0x8c0014df, 0x80000a5a,
+ 0x80000a5a, 0x31280014, 0xce8802ef, 0x9a800062, 0x31280034, 0x9a800060, 0x04a809e8, 0x86800000,
+ 0x800009ec, 0x80000a45, 0x80000a59, 0x80000a59, 0xcd400013, 0xc4113246, 0xc4193245, 0x51100020,
+ 0x7d91801a, 0x45980400, 0xc4b30258, 0xc4a70250, 0x53300020, 0x7e72401a, 0xc4313267, 0x1b342010,
+ 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0x042c0020,
+ 0x66740001, 0x97400041, 0xcd400013, 0x04383000, 0xcf813267, 0xc4393267, 0x9b800001, 0xd180001e,
+ 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4300011, 0x1b38007e, 0x33b40003, 0x9b400003, 0x4598001c,
+ 0x9740002f, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc40c0011, 0x45980004,
+ 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x45980004, 0xd180001e, 0xd8400021,
+ 0xc438000f, 0x9b80ffff, 0xc4340011, 0xcf4002eb, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f,
+ 0x9b80ffff, 0xc4340011, 0xcf4002ec, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff,
+ 0xc4340011, 0xcf4002ed, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4340011,
+ 0xcf4002ee, 0x45980004, 0xcd400013, 0x04382000, 0xcf813267, 0xd84802e9, 0x8c001715, 0xcd400013,
+ 0x04382000, 0xcf813267, 0x56640001, 0x0aec0001, 0x9ac0ffbc, 0xc4380004, 0xd8400008, 0x04341001,
+ 0xcf400013, 0x94800005, 0xc431ecaa, 0x27300080, 0x97000002, 0x80000a55, 0xc43130b6, 0x233c0032,
+ 0xcfc130b6, 0xcf400013, 0xcf0130b6, 0xc49302ef, 0x99000003, 0xcd400013, 0xd8413247, 0xcf800008,
+ 0x80000a5a, 0x80000a5a, 0xcd400013, 0x04180001, 0x5198001f, 0xcd813268, 0xc4193269, 0x2598000f,
+ 0x9980fffe, 0xd80002f1, 0xcd400013, 0xd8013268, 0xd800004f, 0x90000000, 0xcd400013, 0x04380001,
+ 0x53b8001f, 0x7db9801a, 0xcd813268, 0x80000a5e, 0xd8400029, 0xc40c005e, 0x94c01106, 0xd8800013,
+ 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xd8400029, 0xc40c005e, 0x94c010fd,
+ 0x7c40c001, 0x50640020, 0x7ce4c01a, 0xd0c00072, 0xc80c0072, 0x58e801fc, 0x12a80009, 0x2aa80000,
+ 0xd0c0001e, 0xce80001c, 0xd8400021, 0xc424000f, 0x9a40ffff, 0x04240010, 0x18dc01e2, 0x7e5e4002,
+ 0x3e5c0003, 0x3e540002, 0x95c00006, 0xc8180011, 0xc8100011, 0xc8100011, 0x55140020, 0x80000aa2,
+ 0x9540000a, 0xc8180011, 0x44cc0008, 0x55900020, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
+ 0xc4140011, 0x80000aa2, 0x44cc0004, 0xc4180011, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff,
+ 0xc8100011, 0x55140020, 0xd8800013, 0xcd812e01, 0xcd012e02, 0xcd412e03, 0xcc412e00, 0xc428000e,
+ 0x2aa80008, 0xce800013, 0xc4253249, 0x2264003f, 0xce413249, 0xce800013, 0xc4253249, 0x96400001,
+ 0xd800002a, 0xc410001a, 0xc40c0021, 0xc4140028, 0x95000005, 0x1e64001f, 0xce800013, 0xce413249,
+ 0x80001b70, 0x14d00010, 0xc4180030, 0xc41c0007, 0x99000004, 0x99400009, 0x9980000c, 0x80000ab1,
+ 0xccc00037, 0x8c000190, 0xc420001c, 0xd8000032, 0x9a0010ac, 0x80000aa7, 0xd880003f, 0x95c00002,
+ 0xd8c0003f, 0x80001082, 0xd8800040, 0x95c00002, 0xd8c00040, 0x800010de, 0xc010ffff, 0x18d403f7,
+ 0x7d0cc009, 0xc41b0367, 0x7d958004, 0x7d85800a, 0xdc1e0000, 0x90000000, 0xc424000b, 0x32640002,
+ 0x7c40c001, 0x18d001fc, 0x05280adc, 0x86800000, 0x80000af1, 0x80000adf, 0x80000ae7, 0x8c000ace,
+ 0xd8c00013, 0x96400002, 0xd8400013, 0xcd8d2000, 0x99c00010, 0x7c408001, 0x88000000, 0x18d803f7,
+ 0xc010ffff, 0x7d0cc009, 0x04140000, 0x11940014, 0x29544001, 0x9a400002, 0x29544003, 0xcd400013,
+ 0x80000af4, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44d2000, 0x7c408001, 0x88000000, 0xc424000b,
+ 0x32640002, 0x7c40c001, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44dc000, 0x7c408001, 0x88000000,
+ 0x7c40c001, 0x18d0003c, 0x95000006, 0x8c000ace, 0xd8800013, 0xcd8d2c00, 0x99c00003, 0x80000b0a,
+ 0xd8800013, 0xd44d2c00, 0x7c408001, 0x88000000, 0x7c40c001, 0x28148004, 0x24d800ff, 0xccc00019,
+ 0xcd400013, 0xd4593240, 0x7c408001, 0x88000000, 0xd8400029, 0xc40c005e, 0x94c0105e, 0x7c410001,
+ 0x50540020, 0x7c418001, 0x2198003f, 0x199c0034, 0xc40c0007, 0x95c00028, 0xc428000e, 0x2aa80008,
+ 0xce800013, 0xc42d324f, 0xc4313255, 0x7ef3400c, 0x9b400021, 0xd800002a, 0x80001b70, 0xc40c0007,
+ 0x14e80001, 0x9a8000af, 0xd9000010, 0x041c0002, 0x042c01c8, 0x8c000d61, 0xccc00010, 0xd8400029,
+ 0xc40c005e, 0x94c01043, 0x7c410001, 0x50540020, 0x7c418001, 0x18a01fe8, 0x3620005c, 0x9a00000e,
+ 0x2464003f, 0xd8400013, 0xc6290ce7, 0x16ac001f, 0x96c00004, 0x26ac003f, 0x7ee6c00d, 0x96c00005,
+ 0x06200001, 0x2620000f, 0x9a00fff8, 0x8000016a, 0xce000367, 0xc424005e, 0x9640102e, 0xc428000e,
+ 0x199c0037, 0x19a00035, 0x2aa80008, 0xce800013, 0x95c0005d, 0xd800002a, 0xc42d3256, 0xc431325a,
+ 0x2330003f, 0x16f8001f, 0x9780000d, 0xc4253248, 0xc035f0ff, 0x7e764009, 0x19b401f8, 0x13740008,
+ 0x7e76400a, 0xce800013, 0xce413248, 0xcf01325a, 0xce800013, 0xc431325a, 0x97000001, 0x7d15001a,
+ 0xd1000072, 0xc8100072, 0x55140020, 0x199c0034, 0xd8400010, 0xd8400029, 0x9b800004, 0x1ae4003e,
+ 0xce400008, 0x80000b7c, 0xc4353254, 0x16a80008, 0x1aec003c, 0x19a4003f, 0x12a80015, 0x12ec001f,
+ 0x1374000b, 0x7eae800a, 0xc02e4000, 0x1774000d, 0x7eae800a, 0xce400008, 0x7f6b400a, 0x95c00005,
+ 0xc43d3248, 0x1bfc01e8, 0x13fc0018, 0x7dbd800a, 0x1d98ff15, 0x592c00fc, 0xcd80000a, 0x12e00016,
+ 0x7da1800a, 0x592c007e, 0x12e00015, 0x7da1800a, 0xd1000001, 0xcd800001, 0x11a0000c, 0x1264001e,
+ 0x1620000c, 0x7e26000a, 0x7e32000a, 0x12e4001b, 0x7e26000a, 0x5924007e, 0x12640017, 0x7e26000a,
+ 0x19a4003c, 0x12640018, 0x7e26000a, 0xd800002a, 0xce01325a, 0xcd013257, 0xcd413258, 0xc429325a,
+ 0xc40c005e, 0x94c00fdb, 0x96800001, 0x95c00003, 0x7c40c001, 0x7c410001, 0x9780f5ca, 0xcf400100,
+ 0xc40c0007, 0xd9000010, 0x8c00120d, 0x8c001219, 0x8c001232, 0xccc00010, 0x8c001b6d, 0x7c408001,
+ 0x88000000, 0xc42d324e, 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x52ec0008,
+ 0x07740003, 0x04240002, 0x269c003f, 0x7e5e4004, 0x7f67000f, 0x97000003, 0x7f674002, 0x0b740001,
+ 0x53740002, 0x7ef6c011, 0x1ab42010, 0x1ab8c006, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f7b400a,
+ 0x7f6b400a, 0xcf40001c, 0xd2c0001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4180011, 0x9a000003,
+ 0x8c000bec, 0x80000b47, 0xc42c001d, 0xc4313256, 0x1b34060b, 0x1b300077, 0x7f370009, 0x13300017,
+ 0x04340100, 0x26ec00ff, 0xc03a8004, 0x7ef6c00a, 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16,
+ 0xc40c0032, 0xc410001d, 0x28cc0008, 0xccc00013, 0xc415325b, 0x7c418001, 0x7c418001, 0x18580037,
+ 0x251000ff, 0xc421325d, 0x262001ef, 0xce01325d, 0x99800004, 0x7d15400a, 0xcd41325b, 0x80000168,
+ 0x1d54001f, 0xcd41325b, 0x7c408001, 0x88000000, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
+ 0x7eae800a, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0xcd280200, 0xcd680208,
+ 0xcda80210, 0x9b00000c, 0x9b400014, 0x9b800017, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004,
+ 0x7eae800a, 0xc6930200, 0xc6970208, 0xc69b0210, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037,
+ 0x8c000190, 0xd8000032, 0x90000000, 0xd8000028, 0xd800002b, 0x80000168, 0xd900003f, 0x97c00002,
+ 0xd940003f, 0x80001082, 0xd9000040, 0x97c00002, 0xd9400040, 0x800010de, 0xc40c0021, 0x14fc0011,
+ 0x24f800ff, 0x33b80001, 0x97c0fffc, 0x9b800007, 0xccc00037, 0x8c000190, 0xd8000032, 0xd8000028,
+ 0xd800002b, 0x80001b70, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, 0x04140000,
+ 0xc418000e, 0x29980008, 0x7d83c001, 0xcd800013, 0xc4093249, 0x1888003e, 0x94800020, 0xd8400074,
+ 0x8c000671, 0x9a400009, 0xc418000e, 0x29980008, 0xcd800013, 0xc419324c, 0x259c0001, 0x1598001f,
+ 0x95c00016, 0x95800015, 0x99000003, 0xd8400036, 0x04100001, 0xc40c0021, 0x14d80011, 0x24e000ff,
+ 0x321c0002, 0x32200001, 0x9580ffee, 0x99c00014, 0x96000004, 0xccc00037, 0x04140001, 0x80000c30,
+ 0x9480000a, 0xd8000074, 0xc418005e, 0x95800f29, 0xcf800008, 0x80000c16, 0x94800004, 0xd8000074,
+ 0xc418005e, 0x95800f23, 0xd9c00036, 0x99400002, 0xccc00037, 0xcf800008, 0x80000c16, 0x94800004,
+ 0xd8000074, 0xc418005e, 0x95800f1a, 0xccc00037, 0xd8800036, 0x80001b70, 0x041c0003, 0x042c01c8,
+ 0x8c000d61, 0xc4200007, 0xc40c0077, 0x94c00001, 0x7c418001, 0xc428000e, 0x9600f502, 0x0a200001,
+ 0x98c0f500, 0x2aa80008, 0xce000010, 0x9a000f05, 0xce800013, 0xc431325a, 0xc42d3256, 0x1f30001f,
+ 0x16e4001f, 0xcf01325a, 0xc431325a, 0x97000001, 0x9640f4f4, 0xc434000b, 0x33740002, 0x9b40f4f1,
+ 0xc4353254, 0x16a80008, 0x1aec003c, 0x12a80015, 0x12ec001f, 0x1374000b, 0x7eae800a, 0xc02e4000,
+ 0x1774000d, 0x7eae800a, 0x7f6b400a, 0xcf400100, 0x12780001, 0x2bb80001, 0xc00ac005, 0xc00e0002,
+ 0x28cc8000, 0x28884900, 0x28cc0014, 0x80000ff3, 0xc43c0007, 0x7c40c001, 0x17fc0001, 0xd8400013,
+ 0x9bc00004, 0xd8400029, 0xc424005e, 0x96400ee1, 0xcc41c40a, 0xcc41c40c, 0xcc41c40d, 0x7c414001,
+ 0x24d0007f, 0x15580010, 0x255400ff, 0xcd01c411, 0xcd81c40f, 0xcd41c40e, 0xcc41c410, 0x7c414001,
+ 0x7c418001, 0x04200000, 0x18e80033, 0x18ec0034, 0xcc41c414, 0xcc41c415, 0xcd81c413, 0xcd41c412,
+ 0x18dc0032, 0x7c030011, 0x7c038011, 0x95c00027, 0x96c00002, 0xc431c417, 0xc435c416, 0x96800004,
+ 0x96c00002, 0xc439c419, 0xc43dc418, 0xc41c000e, 0x29dc0008, 0xcdc00013, 0xcf413261, 0x96c00002,
+ 0xcf013262, 0x96800004, 0xcfc13263, 0x96c00002, 0xcf813264, 0x18dc0030, 0xc43c0007, 0x95c00017,
+ 0x17fc0001, 0x9ac00005, 0x7d77000c, 0x9bc00015, 0x9700000a, 0x80000cd6, 0x51b80020, 0x53300020,
+ 0x7f97801a, 0x7f37001a, 0x7f3b000c, 0x9bc0000d, 0x97800002, 0x80000cd6, 0x9a000018, 0xd8400013,
+ 0x28200001, 0x80000ca7, 0x18dc0031, 0x95c00003, 0xc435c40b, 0x9740fffd, 0xd800002a, 0x80001b70,
+ 0xc4280032, 0x2aa80008, 0xce800013, 0xc40d325b, 0x97000002, 0x800012c2, 0xc438001d, 0x1bb81ff0,
+ 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0xc428000e, 0xc43c0007,
+ 0x2aa80008, 0xc438001d, 0xce800013, 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077,
+ 0x7ff3c00a, 0x80000cf4, 0xc43d325a, 0x1bfc0677, 0x13fc0017, 0x04300100, 0x1bb81fe8, 0x7f73400a,
+ 0xc032800b, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, 0x80000c16, 0xc43c0007, 0x7c40c001,
+ 0x18d42011, 0x17fc0001, 0x18d001e8, 0x24cc007f, 0x7cd4c00a, 0x9bc00004, 0xd8400029, 0xc428005e,
+ 0x96800e6c, 0x7c414001, 0x50580020, 0x7d59401a, 0xd1400072, 0xc8140072, 0x596001fc, 0x12200009,
+ 0x7ce0c00a, 0x7c418001, 0x505c0020, 0x7d9d801a, 0x7c41c001, 0x50600020, 0x7de1c01a, 0x7c420001,
+ 0xccc0001b, 0xd140001d, 0xd180001f, 0xd1c00020, 0xd8400021, 0x95000010, 0x04300000, 0xc428000f,
+ 0x9a80ffff, 0xc8240010, 0x7e5e800c, 0x9bc00015, 0x9a80000c, 0x9b000024, 0x28300001, 0x122c0004,
+ 0x06ec0001, 0x0aec0001, 0x9ac0ffff, 0xd8400021, 0x80000d1f, 0xc428000f, 0x9a80ffff, 0xc8240010,
+ 0x566c0020, 0xc428000e, 0x2aa80008, 0xce800013, 0xce413261, 0xcec13262, 0xd800002a, 0x80001b70,
+ 0xc4340032, 0x2b740008, 0xcf400013, 0xc40d325b, 0x96800005, 0x566c0020, 0xce413261, 0xcec13262,
+ 0x800012c2, 0xc438001d, 0x1bb81fe8, 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d,
+ 0x80001b70, 0xc43c0007, 0xc438001d, 0xc428000e, 0x2aa80008, 0xce800013, 0x13f4000c, 0x9bc00006,
+ 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x80000d57, 0xc43d325a, 0x1bfc0677, 0x13fc0017,
+ 0x04300100, 0x1bb81fe8, 0x7f73400a, 0xc0328009, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b,
+ 0x80000c16, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0xc4253246, 0xc4113245, 0x04143000, 0xcd413267,
+ 0x52640020, 0x7e51001a, 0xc4153267, 0x7d2d0011, 0x19640057, 0x19580213, 0x19600199, 0x7da6400a,
+ 0x7e26400a, 0xd1000025, 0xce400024, 0xcdc00026, 0xd8400027, 0x04142000, 0xcfc00013, 0xcd413267,
+ 0xc4153267, 0x99400001, 0x90000000, 0x7c40c001, 0x18d001e8, 0x18d40030, 0x18d80034, 0x05280d83,
+ 0x7c420001, 0x7c424001, 0x86800000, 0x80000d8a, 0x8000016a, 0x80000d95, 0x80000db1, 0x8000016a,
+ 0x80000d95, 0x80000dbc, 0x11540010, 0x7e010001, 0x8c00187c, 0x7d75400a, 0xcd400013, 0xd4610000,
+ 0x9580f3d8, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0xd8000016, 0x526c0020, 0x18e80058,
+ 0x7e2ec01a, 0xd2c00072, 0xc82c0072, 0x5ae0073a, 0x7ea2800a, 0x9940000a, 0xce800024, 0xd2c00025,
+ 0xd4400026, 0xd8400027, 0x9580f3c6, 0xc4380012, 0x9b80ffff, 0x7c408001, 0x88000000, 0xdc3a0000,
+ 0x0bb80001, 0xce800024, 0xd2c00025, 0xcc400026, 0xd8400027, 0x9b80fffb, 0x9980fff5, 0x7c408001,
+ 0x88000000, 0xc02a0001, 0x2aa80001, 0x16200002, 0xce800013, 0xce01c405, 0xd441c406, 0x9580f3b1,
+ 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, 0x32640002, 0x9a40000b, 0x11540010,
+ 0x29540002, 0xcd400013, 0xd4610000, 0x9580f3a5, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001,
+ 0x88000000, 0xd4400078, 0x80000168, 0xd8400029, 0xc40c005e, 0x94c00da7, 0x7c40c001, 0x50500020,
+ 0x7cd0c01a, 0xd0c00072, 0xc8280072, 0x5aac007e, 0x12d80017, 0x7c41c001, 0x7d9d800a, 0x56a00020,
+ 0x2620ffff, 0x7da1800a, 0x51980020, 0x7e82400a, 0x7e58c01a, 0x19d4003d, 0x28182002, 0x99400030,
+ 0x8c00104f, 0xc430000d, 0xc4340035, 0xd800002a, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005,
+ 0xc011000f, 0xc4240004, 0x11a00002, 0x7c908009, 0x12640004, 0x7d614011, 0xc4100026, 0x05980008,
+ 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, 0x20880188, 0x54ec0020, 0x7cb4800a, 0xc4300027,
+ 0x04380008, 0xd1400025, 0xcf000024, 0x20240090, 0x7ca48001, 0xcc800026, 0xccc00026, 0xcec00026,
+ 0xcec00026, 0x28240004, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800005, 0x32280000, 0x9a800002,
+ 0x9a000000, 0x7c018001, 0xd8400027, 0xd8000016, 0xcf80003a, 0xd901a2a4, 0x80001037, 0xc418000e,
+ 0x29980008, 0xcd800013, 0xc421326c, 0x1624001f, 0x9a40fffe, 0xd841325f, 0xd8800033, 0xc43c0009,
+ 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xd8800034, 0xc429325f,
+ 0x26ac0001, 0x9ac0fffe, 0x26ac0002, 0x96c00003, 0xd800002a, 0x80001b70, 0xc43c0007, 0xc430001e,
+ 0xd8800033, 0x13f4000c, 0x1b301ff0, 0x2b300300, 0x2330003f, 0x7f37000a, 0x9680000b, 0xc43c0009,
+ 0x27fc0004, 0x97c0fffe, 0xd8400039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xcf01325b, 0xd8800034,
+ 0x80000c16, 0xd8800034, 0x8c0001a2, 0x80001b70, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
+ 0x18ac0024, 0x2b304000, 0x7c40c001, 0xcec00008, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a,
+ 0x29980008, 0xcd800013, 0xc4113249, 0x1910003e, 0x99000002, 0xd840003d, 0x7c410001, 0xd4400078,
+ 0x51100020, 0xcf01326c, 0x7cd0c01a, 0xc421326c, 0x12a80014, 0x2220003f, 0x7e2a000a, 0xcd800013,
+ 0xce01326c, 0xd8800033, 0xc43c0009, 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022,
+ 0x9bc0ffff, 0xd8800034, 0x80001190, 0x7c40c001, 0x18dc003d, 0x95c00004, 0x041c0001, 0x042c01c8,
+ 0x8c000d61, 0x18d40030, 0x18d001e8, 0x18fc0034, 0x24e8000f, 0x06a80e71, 0x7c418001, 0x7c41c001,
+ 0x86800000, 0x80000edd, 0x80000e91, 0x80000e91, 0x80000ea1, 0x80000eaa, 0x80000e7c, 0x80000e7f,
+ 0x80000e7f, 0x80000e87, 0x80000e8f, 0x8000016a, 0x51dc0020, 0x7d9e001a, 0x80000ee6, 0xc420000e,
+ 0x2a200008, 0xce000013, 0xc4213262, 0xc4253261, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc420000e,
+ 0x2a200008, 0xce000013, 0xc4213264, 0xc4253263, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc820001f,
+ 0x80000ee6, 0x18e82005, 0x51e00020, 0x2aa80000, 0x7da1801a, 0xd1800072, 0xc8180072, 0x59a001fc,
+ 0x12200009, 0x7ea2800a, 0xce80001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8200011,
+ 0x80000ee6, 0x15980002, 0xd8400013, 0xcd81c400, 0xc421c401, 0x95400041, 0xc425c401, 0x52640020,
+ 0x7e26001a, 0x80000ee6, 0x31ac2580, 0x9ac00011, 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d,
+ 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005,
+ 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, 0xc4340004, 0xd8400008, 0x80000ede, 0x39ac7c06,
+ 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002,
+ 0x80000ebc, 0x39acc335, 0x3db0c336, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9002, 0x3db09001,
+ 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9012, 0x3db09011, 0x9ac00003, 0x97000002, 0x80000ebc,
+ 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000ebc, 0xc4340004, 0xd8400013, 0xc5a10000,
+ 0x95400005, 0x05980001, 0xc5a50000, 0x52640020, 0x7e26001a, 0xcf400008, 0x05280eea, 0x7c418001,
+ 0x7c41c001, 0x86800000, 0x80000ef1, 0x8000016a, 0x80000efe, 0x80000f11, 0x80000f2e, 0x80000efe,
+ 0x80000f1f, 0xc4340004, 0xd8400013, 0xce190000, 0x95400005, 0x05980001, 0x56200020, 0xce190000,
+ 0xcf400008, 0x97c0f26f, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x51ec0020, 0x18e80058,
+ 0x7daec01a, 0xd2c00072, 0xc82c0072, 0x5af8073a, 0x7eba800a, 0xd2c00025, 0xce800024, 0xce000026,
+ 0x95400003, 0x56240020, 0xce400026, 0xd8400027, 0x97c0f25c, 0xc4380012, 0x9b80ffff, 0x7c408001,
+ 0x88000000, 0xc02a0001, 0x2aa80001, 0x15980002, 0xce800013, 0xcd81c405, 0xce01c406, 0x95400003,
+ 0x56240020, 0xce41c406, 0x97c0f24e, 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b,
+ 0x32640002, 0x9a40f247, 0xd8800013, 0xce190000, 0x95400004, 0x05980001, 0x56200020, 0xce190000,
+ 0x97c0f240, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x31ac2580, 0x9ac00011,
+ 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009,
+ 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004,
+ 0xc4340004, 0xd8400008, 0x80000ef2, 0x39ac7c06, 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000f40,
+ 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, 0x80000f40, 0x39acc335, 0x3db0c336, 0x9ac00003,
+ 0x97000002, 0x80000f40, 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9002,
+ 0x3db09002, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9012, 0x3db09012, 0x9ac00003, 0x97000002,
+ 0x80000f40, 0x80000ef1, 0xc40c0006, 0x98c0ffff, 0x7c40c001, 0x7c410001, 0x7c414001, 0x7c418001,
+ 0x7c41c001, 0x7c43c001, 0x95c00001, 0xc434000e, 0x2b740008, 0x2b780001, 0xcf400013, 0xd8c1325e,
+ 0xcf80001a, 0xd8400013, 0x7c034001, 0x7c038001, 0x18e0007d, 0x32240003, 0x9a400006, 0x32240000,
+ 0x9a400004, 0xcd01c080, 0xcd41c081, 0x80000f88, 0x51640020, 0x7e52401a, 0xd2400072, 0xc8280072,
+ 0xce81c080, 0x56ac0020, 0x26f0ffff, 0xcf01c081, 0x1af000fc, 0x1334000a, 0x24e02000, 0x7f63400a,
+ 0x18e00074, 0x32240003, 0x9a400006, 0x32240000, 0x9a400004, 0xcd81c082, 0xcdc1c083, 0x80000f9d,
+ 0x51e40020, 0x7e5a401a, 0xd2400072, 0xc8280072, 0xce81c082, 0x56ac0020, 0x26f0ffff, 0xcf01c083,
+ 0x1af000fc, 0x13380016, 0x18e00039, 0x12200019, 0x7fa3800a, 0x7fb7800a, 0x18e0007d, 0x1220001d,
+ 0x7fa3800a, 0x18e00074, 0x12200014, 0x7fa3800a, 0xcf81c078, 0xcfc1c084, 0x80000c16, 0x7c40c001,
+ 0x18dc003d, 0x95c00004, 0x041c0000, 0x042c01c8, 0x8c000d61, 0x18d001e8, 0x31140005, 0x99400003,
+ 0x31140006, 0x95400002, 0x8c00104f, 0x05280fb7, 0x28140002, 0xcd400013, 0x86800000, 0x80000fbe,
+ 0x80000fbe, 0x80000fc2, 0x80000fbe, 0x80000fd1, 0x80000ff2, 0x80000ff2, 0x24cc003f, 0xccc1a2a4,
+ 0x7c408001, 0x88000000, 0x7c414001, 0x18e80039, 0x52a8003b, 0x50580020, 0x24cc003f, 0x7d59401a,
+ 0xd1400072, 0xc8140072, 0x7d69401a, 0xc41c0017, 0x99c0ffff, 0xd140004b, 0xccc1a2a4, 0x7c408001,
+ 0x88000000, 0xc414000d, 0x04180001, 0x24cc003f, 0x7d958004, 0xcd800035, 0xccc1a2a4, 0xc43c000e,
+ 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x97c00002, 0xd8400074, 0xc4100019, 0x7d150005,
+ 0x25100001, 0x9500000b, 0x97c0fffc, 0xc4180021, 0x159c0011, 0x259800ff, 0x31a00003, 0x31a40001,
+ 0x7e25800a, 0x95c0fff5, 0x9580fff4, 0x80000fef, 0xc411326f, 0x1d100010, 0xcd01326f, 0x97c00002,
+ 0xd8000074, 0x80001b70, 0x04380000, 0xc430000d, 0xc8140023, 0xc4180081, 0x13300005, 0xc011000f,
+ 0xc4240004, 0x33b40003, 0x97400003, 0xc0340008, 0x80000ffe, 0xc4340035, 0x11a00002, 0x7c908009,
+ 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x282c2002,
+ 0x208801a8, 0x3e280008, 0x7cb4800a, 0xcec00013, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024,
+ 0x20240030, 0x7ca48001, 0xcc800026, 0xccc00026, 0x9b800013, 0xcc400026, 0x7c414001, 0x28340000,
+ 0xcf400013, 0x507c0020, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
+ 0xcd400026, 0xcfc00026, 0xd4400026, 0x9a80000e, 0x32280000, 0x9a80000b, 0x8000102f, 0xcc000026,
+ 0xcc000026, 0xcc000026, 0xcc000026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
+ 0x7c018001, 0xcc000026, 0xd8400027, 0x1cccfe08, 0xd8800013, 0xcec0003a, 0xccc1a2a4, 0xc43c000e,
+ 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x9bc00007, 0xc428000e, 0x16a80008, 0xce800009,
+ 0xc42c005e, 0x96c00b33, 0xd840003c, 0xc4200025, 0x7da2400f, 0x7da28002, 0x7e1ac002, 0x0aec0001,
+ 0x96400002, 0x7d2ac002, 0x3ef40010, 0x9b40f11d, 0x04380030, 0xcf81325e, 0x80000c16, 0xde410000,
+ 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xddc10000, 0xde010000, 0xc40c000e, 0x7c024001,
+ 0x28cc0008, 0xccc00013, 0xc8100086, 0x5510003f, 0xc40d3249, 0x18cc003e, 0x98c00003, 0x99000011,
+ 0x80001075, 0x9900000c, 0xc40c0026, 0xc4100081, 0xc4140025, 0x7d15800f, 0x7d15c002, 0x7d520002,
+ 0x0a200001, 0x95800002, 0x7cde0002, 0x3e20001a, 0x9a000009, 0x040c0030, 0xccc1325e, 0x80001071,
+ 0xd9c00036, 0xd8400029, 0xc40c005e, 0x94c00b01, 0x04240001, 0xdc200000, 0xdc1c0000, 0xdc180000,
+ 0xdc140000, 0xdc100000, 0xdc0c0000, 0x96400004, 0xdc240000, 0xdc0c0000, 0x80000c16, 0xdc240000,
+ 0x90000000, 0xcc40003f, 0xd8c00010, 0xc4080029, 0xcc80003b, 0xc418000e, 0x18a800e5, 0x1d980008,
+ 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0x18a400e5, 0x12500009, 0x248c0008, 0x94c00006,
+ 0x200c006d, 0x7cd0c00a, 0xccc1326c, 0xc421326c, 0x96000001, 0xcd800013, 0x200c0228, 0x7cd0c00a,
+ 0xccc1326c, 0xc421326c, 0x96000001, 0xc40c002a, 0xc410002b, 0x18881fe8, 0x18d4072c, 0x18cc00d1,
+ 0x7cd4c00a, 0x3094000d, 0x38d80000, 0x311c0003, 0x99400006, 0x30940007, 0x1620001f, 0x9940001d,
+ 0x9a000023, 0x800010c4, 0x9580001a, 0x99c00019, 0xccc00041, 0x25140001, 0xc418002c, 0x9940000d,
+ 0x259c007f, 0x95c00013, 0x19a00030, 0xcdc0001b, 0xd8400021, 0xd8400022, 0xc430000f, 0x17300001,
+ 0x9b00fffe, 0x9a000012, 0xd8400023, 0x800010cb, 0x199c0fe8, 0xcdc0001b, 0xd8400021, 0xd8400023,
+ 0xc430000f, 0x17300001, 0x9b00fffe, 0x800010cb, 0xd8c00010, 0xd8000022, 0xd8000023, 0xc430005e,
+ 0x97000aac, 0x7c408001, 0x88000000, 0xc43c000e, 0xc434002e, 0x2bfc0008, 0x2020002c, 0xcfc00013,
+ 0xce01326c, 0x17780001, 0x27740001, 0x07a810d8, 0xcf400010, 0xc421326c, 0x96000001, 0x86800000,
+ 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0x8000104c, 0xcc400040, 0xd8800010, 0xc4180032,
+ 0x29980008, 0xcd800013, 0x200c007d, 0xccc1325b, 0xc411325b, 0x95000001, 0x7c408001, 0x88000000,
+ 0x28240007, 0xde430000, 0xd4400078, 0x80001190, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a,
+ 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0xc40d3249, 0x18cc003e,
+ 0x98c00002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x7c40c001, 0x7c410001, 0x7c414001,
+ 0x192400fd, 0x50580020, 0x7d59401a, 0x7c41c001, 0x06681110, 0x7c420001, 0xcc400078, 0x18ac0024,
+ 0x19180070, 0x19100078, 0xcec00008, 0x18f40058, 0x5978073a, 0x7f7b400a, 0x97000001, 0x86800000,
+ 0x80001117, 0x80001118, 0x80001122, 0x8000112d, 0x80001130, 0x80001133, 0x8000016a, 0x8000117b,
+ 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, 0xcf400024, 0xcdc00026,
+ 0xd8400027, 0x8000117b, 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025,
+ 0xcf400024, 0xcdc00026, 0xce000026, 0xd8400027, 0x8000117b, 0xc81c001f, 0x55e00020, 0x80001122,
+ 0xc81c0020, 0x55e00020, 0x80001122, 0x8c00116b, 0xd8400013, 0xc02a0200, 0x7e8e8009, 0x22a8003d,
+ 0x22a80074, 0x2774001c, 0x13740014, 0x7eb6800a, 0x25ecffff, 0x55700020, 0x15f40010, 0x13740002,
+ 0x275c001f, 0x95c00027, 0x7c018001, 0x7f41c001, 0x15dc0002, 0x39e00008, 0x25dc0007, 0x7dc1c01e,
+ 0x05dc0001, 0x96000004, 0x05e40008, 0x8c00116e, 0x80001168, 0x7dc2001e, 0x06200001, 0x05e40008,
+ 0x7e62000e, 0x9a000004, 0x7da58001, 0x8c00116e, 0x80001165, 0x7dc2001e, 0x06200001, 0x7e1a0001,
+ 0x05cc0008, 0x7e0d000e, 0x95000007, 0x7e02401e, 0x06640001, 0x06640008, 0x05d80008, 0x8c00116e,
+ 0x80001168, 0x7dc2401e, 0x06640001, 0x7da58001, 0x8c00116e, 0x05e00008, 0x7da2000c, 0x9600ffe6,
+ 0x17640002, 0x8c00116e, 0x80001190, 0xc4200006, 0x9a00ffff, 0x90000000, 0x8c00116b, 0xc420000e,
+ 0x2a200001, 0xce00001a, 0xce81c078, 0xcec1c080, 0xcc01c081, 0xcd41c082, 0xcf01c083, 0x12640002,
+ 0x22640435, 0xce41c084, 0x90000000, 0x0528117e, 0x312c0003, 0x86800000, 0x80001190, 0x80001185,
+ 0x80001182, 0x80001182, 0xc4300012, 0x9b00ffff, 0x9ac0000c, 0xc03a0400, 0xc4340004, 0xd8400013,
+ 0xd8400008, 0xc418000e, 0x15980008, 0x1198001c, 0x7d81c00a, 0xcdc130b7, 0xcf8130b5, 0xcf400008,
+ 0x04240008, 0xc418000e, 0xc41c0049, 0x19a000e8, 0x29a80008, 0x7de2c00c, 0xce800013, 0xc421325e,
+ 0x26200010, 0xc415326d, 0x9a000006, 0xc420007d, 0x96000004, 0x96c00003, 0xce40003e, 0x800011a3,
+ 0x7d654001, 0xcd41326d, 0x7c020001, 0x96000005, 0xc4100026, 0xc4240081, 0xc4140025, 0x800011b6,
+ 0xc4253279, 0xc415326d, 0xc431326c, 0x2730003f, 0x3b380006, 0x97800004, 0x3f38000b, 0x9b800004,
+ 0x800011b4, 0x04300006, 0x800011b4, 0x0430000b, 0x04380002, 0x7fb10004, 0x7e57000f, 0x7e578002,
+ 0x7d67c002, 0x0be40001, 0x97000002, 0x7d3a4002, 0x202c002c, 0xc421325e, 0x04280020, 0xcec1326c,
+ 0x26200010, 0x3e640010, 0x96000003, 0x96400002, 0xce81325e, 0xc4300028, 0xc434002e, 0x17780001,
+ 0x27740001, 0x07a811cf, 0x9b00feb8, 0xcf400010, 0xc414005e, 0x954009a7, 0x86800000, 0x80000168,
+ 0x80000aa7, 0x80000bfc, 0x800012e9, 0x80000168, 0x8c00120d, 0x7c40c001, 0xccc1c07c, 0xcc41c07d,
+ 0xcc41c08c, 0x7c410001, 0xcc41c079, 0xcd01c07e, 0x7c414001, 0x18f0012f, 0x18f40612, 0x18cc00c1,
+ 0x7f73400a, 0x7cf7400a, 0x39600004, 0x9a000002, 0xc0140004, 0x11600001, 0x18fc003e, 0x9740001c,
+ 0xcf400041, 0xc425c07f, 0x97c00003, 0x166c001f, 0x800011ee, 0x1a6c003e, 0x96c00006, 0x04200002,
+ 0x0a200001, 0x9a00ffff, 0xd8400013, 0x800011e8, 0xc428002c, 0x96800010, 0x26ac007f, 0xcec0001b,
+ 0xd8400021, 0x1ab00030, 0x1aac0fe8, 0xc434000f, 0x9b40ffff, 0x97000008, 0xcec0001b, 0xd8400021,
+ 0xc434000f, 0x9b40ffff, 0x80001205, 0x0a200001, 0x9a00ffff, 0xd8400013, 0xc425c07f, 0x166c001f,
+ 0x11600001, 0x9ac0fffa, 0x8c001232, 0x7c408001, 0x88000000, 0xd8000033, 0xc438000b, 0xc43c0009,
+ 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, 0x7ffbc00c, 0x97c0fffd,
+ 0x90000000, 0xc03a2800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380040,
+ 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f,
+ 0x9b80ffff, 0x04380002, 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010,
+ 0x9bc0fffa, 0x90000000, 0xd8400013, 0xd801c07f, 0xd8400013, 0xc43dc07f, 0xcfc00078, 0xd8000034,
+ 0x90000000, 0xc03ae000, 0xcf81c200, 0xc03a0800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079,
+ 0xcc01c07e, 0x04380040, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380002, 0x0bb80001,
+ 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, 0xc03ae000,
+ 0xcf81c200, 0xc03a4000, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380002,
+ 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000,
+ 0xc40c0007, 0x30d00002, 0x99000052, 0xd8400029, 0xc424005e, 0x9640090f, 0x7c410001, 0xc428000e,
+ 0x1514001f, 0x19180038, 0x2aa80008, 0x99400030, 0x30dc0001, 0xce800013, 0x99c0000a, 0xc42d324e,
+ 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x1ab0c006, 0x52ec0008, 0x8000127f,
+ 0xc42d3258, 0xc4313257, 0x52ec0020, 0x7ef2c01a, 0xc4353259, 0xc429325a, 0x1ab0c012, 0x07740001,
+ 0x04240002, 0x26a0003f, 0x7e624004, 0x7f67800f, 0x97800002, 0x04340000, 0x53740002, 0x7ef6c011,
+ 0x1ab42010, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f73400a, 0x7f6b400a, 0xcf40001c, 0xd2c0001e,
+ 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x1514001f, 0x99400006, 0x9980000a, 0x8c0012e1,
+ 0xc40c0007, 0x04100000, 0x80001267, 0xd800002a, 0xc424005e, 0x964008d7, 0xd9800036, 0x80000c16,
+ 0xc42c001d, 0x95c00005, 0xc431325a, 0x1b300677, 0x11dc000c, 0x800012aa, 0xc4313256, 0x1b34060b,
+ 0x1b300077, 0x7f37000a, 0x13300017, 0x04340100, 0x26ec00ff, 0xc03a8002, 0x7ef6c00a, 0x7edec00a,
+ 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, 0xc4140032, 0xc410001d, 0x29540008, 0xcd400013,
+ 0xc40d325b, 0x1858003f, 0x251000ff, 0x99800007, 0x7d0cc00a, 0xccc1325b, 0xc411325d, 0x251001ef,
+ 0xcd01325d, 0x80000168, 0x18d0006c, 0x18d407f0, 0x9900000e, 0x04100002, 0xc4193256, 0xc41d324f,
+ 0x2598003f, 0x7d190004, 0x7d5d4001, 0x7d52000f, 0x9a000003, 0xcd41324f, 0x800012d8, 0x7d514002,
+ 0xcd41324f, 0x800012d8, 0xc4193259, 0xc41d325a, 0x7d958001, 0x7dd5c002, 0xcd813259, 0xcdc1325a,
+ 0xc411325d, 0x251001ef, 0xcd01325d, 0x1ccc001e, 0xccc1325b, 0xc40d325b, 0x94c00001, 0x7c408001,
+ 0x88000000, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0x9b000004, 0x9b40000c,
+ 0x9b80000f, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, 0x8c000190, 0xd8000032, 0x90000000,
+ 0xd8000028, 0xd800002b, 0x80000168, 0xd980003f, 0x97c00002, 0xd9c0003f, 0x80001082, 0xd9800040,
+ 0x97c00002, 0xd9c00040, 0x800010de, 0xc43c0007, 0x33f80003, 0x97800051, 0xcc80003b, 0x24b00008,
+ 0xc418000e, 0x1330000a, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013,
+ 0xc4353249, 0x1b74003e, 0x9b400002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x97000001,
+ 0x7c434001, 0x1b4c00f8, 0x7c410001, 0x7c414001, 0x50700020, 0x04e81324, 0x18ac0024, 0x7c41c001,
+ 0x50600020, 0xcc400078, 0x30e40004, 0x9a400007, 0x7d71401a, 0x596401fc, 0x12640009, 0x1b74008d,
+ 0x7e76400a, 0x2a640000, 0xcec00008, 0x86800000, 0x8000016a, 0x8000016a, 0x8000016a, 0x8000016a,
+ 0x8000132c, 0x8000133b, 0x80001344, 0x8000016a, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42530b5,
+ 0x1a68003a, 0x9a80fffe, 0x2024003a, 0xc418000e, 0x25980700, 0x11980014, 0x7d19000a, 0xcd0130b7,
+ 0xce4130b5, 0xcf400008, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, 0x9a80ffff,
+ 0xc4240011, 0x7de6800f, 0x9a80ffea, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f,
+ 0x9a80ffff, 0xc8240011, 0x7de1c01a, 0x7de6800f, 0x9a80ffe0, 0x80001190, 0x8c00104f, 0x28182002,
+ 0xc430000d, 0xc4340035, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, 0xc4240004, 0x11a00002,
+ 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008,
+ 0x7cb4800a, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, 0x20240030, 0x7ca48001, 0xcc800026,
+ 0x7c434001, 0x1b4c00f8, 0xcf400026, 0xcc400026, 0x28340000, 0xcf400013, 0x7c414001, 0x507c0020,
+ 0x30e40004, 0x9a400005, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013,
+ 0xcd400026, 0xcfc00026, 0xd4400026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000,
+ 0x7c018001, 0xd8400027, 0xd8800013, 0x04380028, 0xcec0003a, 0xcf81a2a4, 0x80001037, 0xd8400029,
+ 0xc40c005e, 0x94c007eb, 0x7c40c001, 0x50500020, 0x7d0d001a, 0xd1000072, 0xc8100072, 0x591c01fc,
+ 0x11dc0009, 0x45140210, 0x595801fc, 0x11980009, 0x29dc0000, 0xcdc0001c, 0xd140001e, 0xd8400021,
+ 0xc418000f, 0x9980ffff, 0xc4200011, 0x1624001f, 0x96400069, 0xc40c000e, 0x28cc0008, 0xccc00013,
+ 0xce013249, 0x1a307fe8, 0xcf00000a, 0x23304076, 0xd1000001, 0xcf000001, 0xc41d3254, 0xc4253256,
+ 0x18cc00e8, 0x10cc0015, 0x4514020c, 0xd140001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011,
+ 0xce013248, 0x1a2001e8, 0x12200014, 0x2a204001, 0xce000013, 0x1a64003c, 0x1264001f, 0x11dc0009,
+ 0x15dc000b, 0x7dcdc00a, 0x7e5dc00a, 0xcdc00100, 0xd8800013, 0xd8400010, 0xd800002a, 0xd8400008,
+ 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, 0x04300010, 0xdf430000, 0x7c434001,
+ 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, 0xdf030000, 0xd4412e40, 0xd8400013,
+ 0xcc41c030, 0xcc41c031, 0x248dfffe, 0xccc12e00, 0xd8800013, 0xcc812e00, 0x7c434001, 0x7c434001,
+ 0x8c00142b, 0xd8000010, 0xc40c000e, 0x28cc0008, 0xccc00013, 0x45140248, 0xd140001e, 0xd8400021,
+ 0xc418000f, 0x9980ffff, 0xc8200011, 0xce013257, 0x56200020, 0xce013258, 0x0434000c, 0xdb000024,
+ 0xd1400025, 0xd8000026, 0xd8000026, 0xd8400027, 0x45540008, 0xd140001e, 0xd8400021, 0xc418000f,
+ 0x9980ffff, 0xc8200011, 0xce013259, 0x56200020, 0xc0337fff, 0x7f220009, 0xce01325a, 0x55300020,
+ 0x7d01c001, 0x042c01d0, 0x8c000d61, 0x06ec0004, 0x7f01c001, 0x8c000d61, 0x041c0002, 0x042c01c8,
+ 0x8c000d61, 0xc4380012, 0x9b80ffff, 0xd800002a, 0x80000aa7, 0xd800002a, 0x7c408001, 0x88000000,
+ 0xd8400029, 0x7c40c001, 0x50500020, 0x8c001427, 0x7cd0c01a, 0xc4200007, 0xd0c00072, 0xc8240072,
+ 0xd240001e, 0x7c414001, 0x19682011, 0x5a6c01fc, 0x12ec0009, 0x7eeac00a, 0x2aec0000, 0xcec0001c,
+ 0xd8400021, 0xc430000f, 0x9b00ffff, 0xc4180011, 0x7c438001, 0x99800007, 0xdf830000, 0xcfa0000c,
+ 0x8c00142b, 0xd4400078, 0xd800002a, 0x80001b70, 0x8c00142b, 0xd800002a, 0x80001b70, 0xd8000012,
+ 0xc43c0008, 0x9bc0ffff, 0x90000000, 0xd8400012, 0xc43c0008, 0x97c0ffff, 0x90000000, 0xc4380007,
+ 0x7c40c001, 0x17b80001, 0x18d40038, 0x7c410001, 0x9b800004, 0xd8400029, 0xc414005e, 0x9540073d,
+ 0x18c80066, 0x7c414001, 0x30880001, 0x7c418001, 0x94800008, 0x8c00187c, 0xcf400013, 0xc42c0004,
+ 0xd8400008, 0xcd910000, 0xcec00008, 0x7d410001, 0x043c0000, 0x7c41c001, 0x7c420001, 0x04240001,
+ 0x06200001, 0x4220000c, 0x0a640001, 0xcc000078, 0x9a40fffe, 0x24e80007, 0x24ec0010, 0xd8400013,
+ 0x9ac00006, 0xc42c0004, 0xd8400008, 0xc5310000, 0xcec00008, 0x80001465, 0x51540020, 0x7d15001a,
+ 0xd1000072, 0xc82c0072, 0xd2c0001e, 0x18f02011, 0x5aec01fc, 0x12ec0009, 0x7ef2c00a, 0x2aec0000,
+ 0xcec0001c, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc4300011, 0x96800012, 0x12a80001, 0x0aa80001,
+ 0x06a8146a, 0x7f1f0009, 0x86800000, 0x7f1b400f, 0x80001478, 0x7f1b400e, 0x80001478, 0x7f1b400c,
+ 0x8000147a, 0x7f1b400d, 0x8000147a, 0x7f1b400f, 0x8000147a, 0x7f1b400e, 0x8000147a, 0x7f334002,
+ 0x97400014, 0x8000147b, 0x9b400012, 0x9b800005, 0x9bc0001f, 0x7e024001, 0x043c0001, 0x8000144a,
+ 0xc40c0032, 0xc438001d, 0x28cc0008, 0xccc00013, 0xc43d325b, 0x1bb81ff0, 0x7fbfc00a, 0xcfc1325b,
+ 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0x94800007, 0x8c00187c, 0xcf400013, 0xc42c0004,
+ 0xd8400008, 0xcd910000, 0xcec00008, 0x9b800003, 0xd800002a, 0x80001b70, 0xc40c0032, 0x28cc0008,
+ 0xccc00013, 0xc40d325b, 0x800012c2, 0xc40c000e, 0xc43c0007, 0xc438001d, 0x28cc0008, 0xccc00013,
+ 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x800014a9, 0xc43d325a,
+ 0x1bfc0677, 0x04300100, 0x1bb81ff0, 0x7f73400a, 0xc0328007, 0x7fb7800a, 0x13fc0017, 0x7ff3c00a,
+ 0x7ffbc00a, 0xcfc1325b, 0xc03a0002, 0xc4340004, 0xd8400013, 0xd8400008, 0xcf8130b5, 0xcf400008,
+ 0x80000c16, 0x043c0000, 0xc414000e, 0x29540008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020,
+ 0x7dd9c01a, 0x45dc0390, 0xc4313267, 0x04183000, 0xcd813267, 0x1b380057, 0x1b340213, 0x1b300199,
+ 0x7f7b400a, 0x7f73400a, 0xcf400024, 0xd1c00025, 0xcc800026, 0x7c420001, 0xce000026, 0x7c424001,
+ 0xce400026, 0x7c428001, 0xce800026, 0x7c42c001, 0xcec00026, 0x7c430001, 0xcf000026, 0x7c434001,
+ 0xcf400026, 0x7c438001, 0xcf800026, 0xd8400027, 0xcd400013, 0x04182000, 0xcd813267, 0xd840004f,
+ 0x1a0800fd, 0x109c000a, 0xc4193265, 0x7dd9c00a, 0xcdc13265, 0x2620ffff, 0xce080228, 0x9880000e,
+ 0xce480250, 0xce880258, 0xd8080230, 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270,
+ 0xd8080278, 0xd8080280, 0xd800004f, 0x97c0ec75, 0x90000000, 0x040c0000, 0x041c0010, 0x26180001,
+ 0x09dc0001, 0x16200001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80230, 0xd8080238, 0xd8080240,
+ 0xd8080248, 0x040c0000, 0xce480250, 0xce880258, 0x52a80020, 0x7e6a401a, 0x041c0020, 0x66580001,
+ 0x09dc0001, 0x56640001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80260, 0xd8080268, 0xd8080270,
+ 0xd8080278, 0xd8080280, 0x040c0000, 0xcec80288, 0xcf080290, 0xcec80298, 0xcf0802a0, 0x040c0000,
+ 0x041c0010, 0xcf4802a8, 0x27580001, 0x09dc0001, 0x17740001, 0x95800002, 0x04cc0001, 0x99c0fffb,
+ 0xccc802b0, 0xd80802b8, 0x178c000b, 0x27b8003f, 0x7cf8c001, 0xcf8802c0, 0xccc802c8, 0xcf8802d0,
+ 0xcf8802d8, 0xd800004f, 0x97c00002, 0x90000000, 0x7c408001, 0x88000000, 0xc40c000e, 0x28cc0008,
+ 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c418001, 0x25b8ffff, 0xc4930240, 0xc48f0238, 0x04cc0001,
+ 0x24cc000f, 0x7cd2800c, 0x9a80000b, 0xc5230309, 0x2620ffff, 0x7e3a400c, 0x9a400004, 0x05100001,
+ 0x2510000f, 0x80001539, 0xcd08034b, 0xd4400078, 0x80000168, 0xc48f0230, 0xc4930240, 0x98c00004,
+ 0xcd880353, 0x8c00163f, 0xc49b0353, 0xc4930238, 0xc48f0228, 0x05100001, 0x2510000f, 0x7cd14005,
+ 0x25540001, 0x99400004, 0x05100001, 0x2510000f, 0x8000154f, 0xc48f0230, 0x7c41c001, 0xcd080238,
+ 0xcd08034b, 0x08cc0001, 0x2598ffff, 0x3d200008, 0xccc80230, 0xcd900309, 0xd8100319, 0x04340801,
+ 0x2198003f, 0xcf400013, 0xcd910ce7, 0xc4190ce6, 0x7d918005, 0x25980001, 0x9580fffd, 0x7d918004,
+ 0xcd810ce6, 0x9a000003, 0xcdd1054f, 0x8000156e, 0x090c0008, 0xcdcd050e, 0x040c0000, 0x110c0014,
+ 0x28cc4001, 0xccc00013, 0xcc41230a, 0xcc41230b, 0xcc41230c, 0xcc41230d, 0xcc480329, 0xcc48032a,
+ 0xcc4802e0, 0xd8000055, 0xc48f02e0, 0x24d8003f, 0x09940001, 0x44100001, 0x9580002c, 0x95400005,
+ 0x09540001, 0x51100001, 0x69100001, 0x8000157f, 0x24cc003f, 0xc4970290, 0xc49b0288, 0x51540020,
+ 0x7d59401a, 0xc49b02a0, 0xc49f0298, 0x51980020, 0x7d9d801a, 0x041c0040, 0x04200000, 0x7dcdc002,
+ 0x7d924019, 0x7d26400c, 0x09dc0001, 0x9a400008, 0x51100001, 0x06200001, 0x99c0fffa, 0xc48f0230,
+ 0xc4930240, 0x8c00163f, 0x80001579, 0x7d010021, 0x7d914019, 0xc4930238, 0x55580020, 0xcd480298,
+ 0xcd8802a0, 0x10d40010, 0x12180016, 0xc51f0309, 0x7d95800a, 0x7d62000a, 0x7dd9c00a, 0xd8400013,
+ 0xcdd00309, 0xce113320, 0xc48f02e0, 0xc49b02b0, 0x18dc01e8, 0x7dd9400e, 0xc48f0230, 0xc4930240,
+ 0x95c0001d, 0x95400003, 0x8c00163f, 0x800015aa, 0xc48f0238, 0xc4a302b8, 0x12240004, 0x7e5e400a,
+ 0xc4ab02a8, 0x04100000, 0xce4c0319, 0x7d9d8002, 0x7ea14005, 0x25540001, 0x99400004, 0x06200001,
+ 0x2620000f, 0x800015bc, 0x09dc0001, 0x04240001, 0x7e624004, 0x06200001, 0x7d25000a, 0x2620000f,
+ 0x99c0fff4, 0xd8400013, 0xcd0d3330, 0xce0802b8, 0xcd8802b0, 0xc4ab02e0, 0x1aa807f0, 0xc48f02d0,
+ 0xc49702d8, 0xc49b02c8, 0xc49f02c0, 0x96800028, 0x7d4e000f, 0x9600000b, 0x7d964002, 0x7e6a000f,
+ 0x96000003, 0x7d694001, 0x800015e9, 0x7cde4002, 0x7e6a000f, 0x96000008, 0x7de94001, 0x800015e9,
+ 0x7cd64002, 0x7e6a000e, 0x96000003, 0x7d694001, 0x800015e9, 0xc48f0230, 0xc4930240, 0x8c00163f,
+ 0x800015cd, 0xc4930238, 0x7d698002, 0xcd4802d8, 0x129c0008, 0xc50f0319, 0x11a0000e, 0x11140001,
+ 0xc4340004, 0xd8400008, 0xd8400013, 0x7e1e000a, 0x1198000a, 0xcd953300, 0x7e0e000a, 0x12a8000a,
+ 0xce953301, 0xce100319, 0xcf400008, 0xc4b70280, 0xc4b30278, 0x7f73800a, 0x536c0020, 0x7ef2c01a,
+ 0x9780eb68, 0x8c001608, 0xd8080278, 0xd8080280, 0x7c408001, 0x88000000, 0x043c0003, 0x80001609,
+ 0x043c0001, 0x30b40000, 0x9b400011, 0xc4b70258, 0xc4b30250, 0x53780020, 0x7fb3801a, 0x7faf8019,
+ 0x04300020, 0x04280000, 0x67b40001, 0x0b300001, 0x57b80001, 0x97400002, 0x06a80001, 0x9b00fffb,
+ 0xc4bb0260, 0x7fab8001, 0xcf880260, 0x04300020, 0x04280000, 0x66f40001, 0x0b300001, 0x56ec0001,
+ 0x97400005, 0x8c001628, 0xc4353247, 0x7f7f4009, 0x9b40fffe, 0x06a80001, 0x9b00fff7, 0x90000000,
+ 0x269c0007, 0x11dc0008, 0x29dc0008, 0x26a00018, 0x12200003, 0x7de1c00a, 0x26a00060, 0x06200020,
+ 0x16200001, 0x7de1c00a, 0xcdc00013, 0x90000000, 0x269c0018, 0x26a00007, 0x26a40060, 0x11dc0006,
+ 0x12200006, 0x16640001, 0x29dc0008, 0x7de1c00a, 0x7de5c00a, 0xcdc00013, 0x90000000, 0xc4b70228,
+ 0x05100001, 0x04cc0001, 0x2510000f, 0xccc80230, 0x7f514005, 0x25540001, 0x99400004, 0x05100001,
+ 0x2510000f, 0x80001644, 0xc4b30248, 0xcd080240, 0x7f130005, 0x27300001, 0x9b000002, 0x8c001688,
+ 0x8c00120d, 0x8c001219, 0x8c001232, 0x04300001, 0x04340801, 0x7f130004, 0xcf400013, 0xcf01051e,
+ 0xc42d051f, 0x7ed2c005, 0x26ec0001, 0x96c0fffd, 0xcf01051f, 0xd8000055, 0xc5170309, 0x195c07f0,
+ 0x196007f6, 0x04340000, 0x95c00008, 0x09dc0001, 0x04340001, 0x95c00005, 0x09dc0001, 0x53740001,
+ 0x6b740001, 0x80001665, 0xc4a702a0, 0xc4ab0298, 0x52640020, 0x7e6a401a, 0x7f634014, 0x7e76401a,
+ 0xc4300004, 0xd8400008, 0xd8400013, 0x56680020, 0xd8113320, 0xce480298, 0xce8802a0, 0xc5170319,
+ 0xc4b702b0, 0x255c000f, 0x7f5f4001, 0xd8113330, 0xcf4802b0, 0x11340001, 0x195c07e8, 0x196007ee,
+ 0xd8353300, 0x7e1e4001, 0xd8353301, 0xce4802d0, 0xd8100309, 0xd8100319, 0xcf000008, 0x90000000,
+ 0xc4970258, 0xc48f0250, 0x51540020, 0x7cd4c01a, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a,
+ 0x04140020, 0x04280000, 0x64d80001, 0x09540001, 0x54cc0001, 0x95800060, 0x8c001628, 0xc4193247,
+ 0x25980001, 0x9580005c, 0x7dc24001, 0xc41d3248, 0x25dc000f, 0x7dd2000c, 0x96000057, 0xc41d3255,
+ 0xc435324f, 0x7df5c00c, 0x99c00004, 0xc4193265, 0x25980040, 0x9580fffe, 0xc439325b, 0x1bb0003f,
+ 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, 0x1bb000e4,
+ 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x800016f1, 0xce400013, 0xc033ffff,
+ 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, 0xd8c00033,
+ 0xc4300009, 0x27300008, 0x9700fffe, 0x1a7003e6, 0x27380003, 0x13b80004, 0x27300003, 0x13300003,
+ 0x7fb38001, 0x1a7000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, 0x1a700064,
+ 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x0b300003, 0x800016df, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
+ 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xce400013, 0xc431325d,
+ 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xd841325d, 0x2030007b, 0xcf01325b,
+ 0x800016f2, 0xd841325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9c, 0x8c001608,
+ 0xd8080278, 0xd8080280, 0x90000000, 0xd840004f, 0xc414000e, 0x29540008, 0xcd400013, 0xc43d3265,
+ 0x1bc800ea, 0xd80802e9, 0x7c40c001, 0x18fc0064, 0x9bc00042, 0xc4193246, 0xc41d3245, 0x51980020,
+ 0x7dd9801a, 0x45980400, 0xc4313267, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x9bc00001, 0x1b380057,
+ 0x1b340213, 0x1b300199, 0x7f7b400a, 0x7f73400a, 0xcf400024, 0x14f4001d, 0xc4bf02e9, 0x9bc0001c,
+ 0x7c410001, 0x192807fa, 0xc4bf0258, 0xc4a70250, 0x53fc0020, 0x7e7e401a, 0x042c0000, 0x04300000,
+ 0x667c0001, 0x56640001, 0x06ec0001, 0x97c0fffd, 0x07300001, 0x0aec0001, 0x7eebc00c, 0x06ec0001,
+ 0x97c0fff8, 0x0b300001, 0x43300007, 0x53300002, 0x7db30011, 0xd3000025, 0xc03ec005, 0x2bfca200,
+ 0xcfc00026, 0xccc00026, 0xcd000026, 0x192807fa, 0xc01f007f, 0x7d1d0009, 0x2110007d, 0x8c001628,
+ 0x203c003f, 0xcfc13256, 0x8c0017f5, 0xcd013254, 0x18fc01e8, 0xcfc13248, 0x8c00185b, 0xd8413247,
+ 0x0b740001, 0x9b40ffd5, 0xd800004f, 0xc4bf02e9, 0x97c0ea24, 0x90000000, 0x14d4001d, 0xc4930260,
+ 0x7d52400e, 0xc49f0258, 0xc4a30250, 0x51dc0020, 0x7de1801a, 0x96400017, 0x7d534002, 0xc4af0270,
+ 0x7dae4005, 0x26640001, 0x32e0001f, 0x9a400006, 0x06ec0001, 0x96000002, 0x042c0000, 0xcec80270,
+ 0x8000174f, 0x0b740001, 0x8c00178a, 0x05100001, 0x9b40fff3, 0xc4af0280, 0xc4b30278, 0x52ec0020,
+ 0x7ef2c01a, 0x8c001608, 0xd8080278, 0xd8080280, 0xc4ab0268, 0x7daa4005, 0x26640001, 0x32a0001f,
+ 0x9a400005, 0x06a80001, 0x96000002, 0x24280000, 0x80001765, 0x7c410001, 0xc01f007f, 0x09540001,
+ 0x7d1d0009, 0x2110007d, 0x8c001628, 0xd8013256, 0x8c0017f2, 0xcd013254, 0xc4113248, 0x15100004,
+ 0x11100004, 0xc4b3034b, 0x7f13000a, 0xcf013248, 0xc4930260, 0x8c001855, 0x32a4001f, 0xd8413247,
+ 0xd800004f, 0x09100001, 0x06a80001, 0x96400002, 0x24280000, 0xcd080260, 0xce880268, 0x9940ffc0,
+ 0x7c408001, 0x88000000, 0x7ec28001, 0x8c001628, 0x32e0001f, 0xc4253247, 0x26640001, 0x9640005e,
+ 0xc4293265, 0xc4253255, 0xc431324f, 0x7e72400c, 0x26a80040, 0x9a400002, 0x9680fff7, 0xc429325b,
+ 0x1aa4003f, 0x96400049, 0x1aa400e8, 0x32680003, 0x9a800046, 0x32640002, 0x9640000a, 0xc4293260,
+ 0x1aa400e4, 0x32640004, 0x96400040, 0xc425325d, 0x26640010, 0x9a40fffe, 0x800017e2, 0xcdc00013,
+ 0xc027ffff, 0x2e6400ff, 0xc429325b, 0x7e6a4009, 0xce41325b, 0xc429325b, 0x26a800ff, 0x9a80fffe,
+ 0xd8c00033, 0xc4240009, 0x26640008, 0x9640fffe, 0x19e403e6, 0x26680003, 0x12a80004, 0x26640003,
+ 0x12640003, 0x7ea68001, 0x19e400e8, 0x7ea68001, 0x12640001, 0x7ea68001, 0x06a80002, 0xd8400013,
+ 0x19e40064, 0x32640002, 0x96400009, 0x16a40005, 0x06640003, 0xce412082, 0xcc01203f, 0xd8400013,
+ 0xcc01203f, 0x0a640003, 0x800017d0, 0x16a40005, 0xce412082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x12640005, 0x7ea64002, 0xc4292083, 0x7ea68005, 0x26a80001, 0x9a80ffdf, 0xd8c00034, 0xcdc00013,
+ 0xc425325d, 0x26640010, 0x9a40fffe, 0xc429325b, 0x26a400ff, 0x9a40ffca, 0xd841325d, 0x2024007b,
+ 0xce41325b, 0x800017e3, 0xd841325d, 0xc4a70280, 0xc4ab0278, 0x52640020, 0x7e6a401a, 0x04280001,
+ 0x7eae8014, 0x7e6a401a, 0x56680020, 0xce480278, 0xce880280, 0x06ec0001, 0x96000002, 0x042c0000,
+ 0xcec80270, 0x90000000, 0x7c438001, 0x7c420001, 0x800017fe, 0xc4bf02e9, 0x9bc00006, 0x7c438001,
+ 0x7c420001, 0xcf800026, 0xce000026, 0x800017fe, 0xc43b02eb, 0xc42302ec, 0xcf813245, 0xce013246,
+ 0x52200020, 0x7fa3801a, 0x47b8020c, 0x15e00008, 0x1220000a, 0x2a206032, 0x513c001e, 0x7e3e001a,
+ 0xc4bf02e9, 0x9bc00005, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x8000180f, 0xcd400013, 0xc4313267,
+ 0x1b3c0077, 0x1b300199, 0x7ff3000a, 0x1330000a, 0x2b300032, 0x043c3000, 0xcfc13267, 0xc43d3267,
+ 0xd200000b, 0xc4200007, 0xd3800002, 0xcf000002, 0xd8000040, 0x96000002, 0xd8400040, 0xd8400018,
+ 0x043c2000, 0xcfc13267, 0xd8000018, 0xd8800010, 0xcdc00013, 0x7dc30001, 0xdc1e0000, 0x04380032,
+ 0xcf80000e, 0x8c001427, 0xcc413248, 0xc43d3269, 0x27fc000f, 0x33fc0003, 0x97c00011, 0x043c001f,
+ 0xdfc30000, 0xd4413249, 0x7c43c001, 0x7c43c001, 0x043c0024, 0x0bfc0021, 0xdfc30000, 0xd441326a,
+ 0x173c0008, 0x1b300303, 0x7f3f0001, 0x043c0001, 0x7ff3c004, 0xcfc13084, 0x80001842, 0x043c0024,
+ 0xdfc30000, 0xd4413249, 0x7c43c001, 0x23fc003f, 0xcfc1326d, 0x0bb80026, 0xdf830000, 0xd441326e,
+ 0x7c438001, 0x7c438001, 0xc4393265, 0x1fb8ffc6, 0xddc30000, 0xcf813265, 0x9a000003, 0xcdc0000c,
+ 0x80001852, 0xcdc0000d, 0xce000010, 0x8c00142b, 0x90000000, 0x7c41c001, 0x7c420001, 0xcdc13252,
+ 0xce013253, 0x8c001628, 0x80001878, 0xc49f02e9, 0x99c00018, 0x7c41c001, 0x7c420001, 0xcdc13252,
+ 0xce013253, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x97c0ffff,
+ 0xcdc00026, 0xce000026, 0xd8400027, 0xc41c0012, 0x99c0ffff, 0xc43c000e, 0x2bfc0008, 0xcfc00013,
+ 0x043c2000, 0xcfc13267, 0x8c001628, 0x80001878, 0xc41f02ed, 0xc42302ee, 0xcdc13252, 0xce013253,
+ 0x04200001, 0x7e2a0004, 0xce013084, 0x90000000, 0x28340001, 0x313c0bcc, 0x9bc00010, 0x393c051f,
+ 0x9bc00004, 0x3d3c050e, 0x9bc0000c, 0x97c0000c, 0x393c0560, 0x9bc00004, 0x3d3c054f, 0x9bc00007,
+ 0x97c00007, 0x393c1538, 0x9bc00005, 0x3d3c1537, 0x9bc00002, 0x97c00002, 0x2b740800, 0x90000000,
+ 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e8007c, 0x7c42c001,
+ 0x06a8189a, 0x86800000, 0x8000189e, 0x800018c5, 0x800018f2, 0x8000016a, 0x7c414001, 0x18d0007e,
+ 0x50580020, 0x09200001, 0x7d59401a, 0xd1400072, 0xc8140072, 0x09240002, 0x7c418001, 0x7c41c001,
+ 0x99000011, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42130b5, 0x1a24002c, 0x9a40fffe, 0x2020002c,
+ 0xc418000d, 0x1198001c, 0x10cc0004, 0x14cc0004, 0x7cd8c00a, 0xccc130b7, 0xce0130b5, 0xcf400008,
+ 0x80000168, 0xd1400025, 0x5978073a, 0x2bb80002, 0xcf800024, 0xcd800026, 0xcdc00026, 0xd8400027,
+ 0x9600e8a8, 0xc4300012, 0x9b00ffff, 0x9640e8a5, 0x800018a9, 0x04140000, 0xc55b0309, 0x3d5c0010,
+ 0x05540001, 0x2598ffff, 0x09780001, 0x7dad800c, 0x99c0ffd2, 0x9580fff9, 0xc4970258, 0xc4930250,
+ 0x51540020, 0x7d15001a, 0x04140020, 0x04280000, 0x442c0000, 0x65180001, 0x09540001, 0x55100001,
+ 0x9580000b, 0x8c001628, 0xc41d3248, 0x04300001, 0x7f2b0014, 0x25dc000f, 0x7df9c00c, 0x95c00004,
+ 0x7ef2c01a, 0xd8c13260, 0xd901325d, 0x06a80001, 0x9940fff1, 0x04140020, 0x04280000, 0x66d80001,
+ 0x09540001, 0x56ec0001, 0x95800005, 0x8c001628, 0xc421325d, 0x26240007, 0x9a40fffe, 0x06a80001,
+ 0x9940fff7, 0x8000189e, 0x04140020, 0x04280000, 0x09540001, 0x8c001628, 0xc41d3254, 0xc023007f,
+ 0x19e4003e, 0x7de1c009, 0x7dee000c, 0x96400008, 0x96000007, 0xd8c13260, 0xd901325d, 0xc421325d,
+ 0x261c0007, 0x99c0fffe, 0x8000189e, 0x06a80001, 0x9940fff0, 0x8000189e, 0xc40c000e, 0x28cc0008,
+ 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e00064, 0x06281911, 0x14f4001d, 0x24cc0003,
+ 0x86800000, 0x80001915, 0x800019af, 0x80001a2b, 0x8000016a, 0xcc48032b, 0xcc480333, 0xcc48033b,
+ 0xcc480343, 0x98800011, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267,
+ 0x04203000, 0xce013267, 0xc4213267, 0x9a000001, 0x1b3c0057, 0x1b200213, 0x1b300199, 0x7e3e000a,
+ 0x7e32000a, 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278,
+ 0x52ec0020, 0x7ef2c01a, 0x04180000, 0x04140020, 0x04280000, 0x7f438001, 0x8c001628, 0xc41d3247,
+ 0x25dc0001, 0x95c00068, 0xc4213254, 0x1a1c003e, 0x95c00065, 0xc01f007f, 0x7e1e0009, 0x97800062,
+ 0x0bb80001, 0x43bc0008, 0x7fcbc001, 0xc7df032b, 0x7e1fc00c, 0x97c0fffa, 0x043c0101, 0x94c00002,
+ 0x043c0102, 0xc439325b, 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002,
+ 0x97000009, 0xc4393260, 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe,
+ 0x80001994, 0x8c001628, 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b,
+ 0x27b800ff, 0x9b80fffe, 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003,
+ 0x13b80004, 0x27300003, 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001,
+ 0x07b80002, 0xd8400013, 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082,
+ 0xcc01203f, 0xd8400013, 0xcc01203f, 0x0b300003, 0x80001982, 0x17b00005, 0xcf012082, 0xcc01203f,
+ 0xd8400013, 0xcc01203f, 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf,
+ 0xd8c00034, 0xcdc00013, 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffcb,
+ 0xcfc1325d, 0x2030007b, 0xcf01325b, 0x80001995, 0xcfc1325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a,
+ 0x98800009, 0x41bc0007, 0x53fc0002, 0x7e7fc011, 0xd3c00025, 0xd8000026, 0xd8400027, 0xc43c0012,
+ 0x9bc0ffff, 0x653c0001, 0x7dbd8001, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff8f, 0xc43c000e,
+ 0x2bfc0008, 0xcfc00013, 0x043c2000, 0xcfc13267, 0xd8080278, 0xd8080280, 0x80000168, 0x7c410001,
+ 0x04140000, 0xc55b0309, 0x3d5c0010, 0x2598ffff, 0x05540001, 0x7d91800c, 0x95c00003, 0xd4400078,
+ 0x80000168, 0x9580fff8, 0x09780001, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280,
+ 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x09540001, 0x55100001,
+ 0x9580005d, 0x8c001628, 0xc4253247, 0x26640001, 0x04200101, 0x96400058, 0x7dc24001, 0xc41d3248,
+ 0x25dc000f, 0x7df9c00c, 0x95c00053, 0x94c00002, 0x04200102, 0x7e41c001, 0xc425325b, 0x1a70003f,
+ 0x97000049, 0x1a7000e8, 0x33240003, 0x9a400046, 0x33300002, 0x9700000a, 0xc4253260, 0x1a7000e4,
+ 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001a21, 0xcdc00013, 0xc033ffff,
+ 0x2f3000ff, 0xc425325b, 0x7f270009, 0xcf01325b, 0xc425325b, 0x266400ff, 0x9a40fffe, 0xd8c00033,
+ 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27240003, 0x12640004, 0x27300003, 0x13300003,
+ 0x7e724001, 0x19f000e8, 0x7e724001, 0x13300001, 0x7e724001, 0x06640002, 0xd8400013, 0x19f00064,
+ 0x33300002, 0x97000009, 0x16700005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x0b300003, 0x80001a0f, 0x16700005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005,
+ 0x7e730002, 0xc4252083, 0x7e724005, 0x26640001, 0x9a40ffdf, 0xd8c00034, 0xcdc00013, 0xc431325d,
+ 0x27300010, 0x9b00fffe, 0xc425325b, 0x267000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, 0xcf01325b,
+ 0x80001a22, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9f, 0xd4400078,
+ 0xd8080278, 0xd8080280, 0x80000168, 0x8c001a31, 0xd4400078, 0xd8080278, 0xd8080280, 0x7c408001,
+ 0x88000000, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, 0x04203000,
+ 0xce013267, 0xc4213267, 0x9a000001, 0x1b180057, 0x1b200213, 0x1b300199, 0x7e1a000a, 0x7e32000a,
+ 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, 0x52ec0020,
+ 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x95800060, 0x8c001628, 0xc4193247, 0x25980001,
+ 0x04200101, 0x94c00005, 0x30f00005, 0x04200005, 0x9b000002, 0x04200102, 0x95800056, 0xc439325b,
+ 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260,
+ 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001aa2, 0xcdc00013,
+ 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe,
+ 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, 0x13b80004, 0x27300003,
+ 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013,
+ 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013,
+ 0xcc01203f, 0x0b300003, 0x80001a90, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f,
+ 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xcdc00013,
+ 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xce01325d, 0x2030007b,
+ 0xcf00325b, 0x80001aa3, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0xc49b02e9, 0x99800005,
+ 0xd2400025, 0x4664001c, 0xd8000026, 0xd8400027, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff9c,
+ 0xc49b02e9, 0x99800008, 0xc430000e, 0x2b300008, 0xcf000013, 0x04302000, 0xcf013267, 0xc4313267,
+ 0x97000001, 0x90000000, 0x244c00ff, 0xcc4c0200, 0x7c408001, 0x88000000, 0xc44f0200, 0xc410000b,
+ 0xc414000c, 0x7d158010, 0x059cc000, 0xd8400013, 0xccdd0000, 0x7c408001, 0x88000000, 0xc40c0037,
+ 0x94c0ffff, 0xcc000049, 0xc40c003a, 0x94c0ffff, 0x7c40c001, 0x24d00001, 0x9500e69a, 0x18d0003b,
+ 0x18d40021, 0x99400006, 0xd840004a, 0xc40c003c, 0x94c0ffff, 0x14cc0001, 0x94c00028, 0xd8000033,
+ 0xc438000b, 0xc43c0009, 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078,
+ 0x7ffbc00c, 0x97c0fffd, 0x99000004, 0xc0120840, 0x282c0040, 0x80001ae8, 0xc0121841, 0x282c001a,
+ 0xcd01c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04200004, 0xcec0001b, 0xd8400021,
+ 0x0a200001, 0x9a00ffff, 0xc425c07f, 0x166c001f, 0x04200004, 0x9ac0fffb, 0xc434000f, 0x9b40ffff,
+ 0xd801c07f, 0xd8400013, 0xc425c07f, 0xce400078, 0xd8000034, 0x9940e66b, 0xd800004a, 0x7c408001,
+ 0x88000000, 0xc40c0036, 0x24d00001, 0x9900fffe, 0x18cc0021, 0xccc00047, 0xcc000046, 0xc40c0039,
+ 0x94c0ffff, 0xc40c003d, 0x98c0ffff, 0x7c40c001, 0x24d003ff, 0x18d47fea, 0x18d87ff4, 0xcd00004c,
+ 0xcd40004e, 0xcd80004d, 0xd8400013, 0xcd41c405, 0xc02a0001, 0x2aa80001, 0xce800013, 0xcd01c406,
+ 0xcc01c406, 0xcc01c406, 0xc40c0006, 0x98c0ffff, 0xc414000e, 0x29540008, 0x295c0001, 0xcd400013,
+ 0xd8c1325e, 0xcdc0001a, 0x11980002, 0x4110000c, 0xc0160800, 0x7d15000a, 0xc0164010, 0xd8400013,
+ 0xcd41c078, 0xcc01c080, 0xcc01c081, 0xcd81c082, 0xcc01c083, 0xcd01c084, 0xc40c0006, 0x98c0ffff,
+ 0xd8400048, 0xc40c003b, 0x94c0ffff, 0x80000c16, 0xd8400013, 0xd801c40a, 0xd901c40d, 0xd801c410,
+ 0xd801c40e, 0xd801c40f, 0xc40c0040, 0x04140001, 0x09540001, 0x9940ffff, 0x04140096, 0xd8400013,
+ 0xccc1c400, 0xc411c401, 0x9500fffa, 0xc424003e, 0x04d00001, 0x11100002, 0xcd01c40c, 0xc0180034,
+ 0xcd81c411, 0xd841c414, 0x0a540001, 0xcd41c412, 0x2468000f, 0xc419c416, 0x41980003, 0xc41c003f,
+ 0x7dda0001, 0x12200002, 0x10cc0002, 0xccc1c40c, 0xd901c411, 0xce41c412, 0xd8800013, 0xce292e40,
+ 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xc43c0007, 0xdc120000, 0x31144000,
+ 0x95400005, 0xdc030000, 0xd800002a, 0xcc3c000c, 0x80001b70, 0x33f80003, 0xd4400078, 0x9780e601,
+ 0x188cfff0, 0x04e40002, 0x80001190, 0x7c408001, 0x88000000, 0xc424005e, 0x96400006, 0x90000000,
+ 0xc424005e, 0x96400003, 0x7c408001, 0x88000000, 0x80001b74, 0x80000168, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307,
+ 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 7440
+};
+
+static const PWR_DFY_Section pwr_virus_section4 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x54106500,
+ .dfy_data = {
+ 0x7e000200, 0x7e020204, 0xc00a0505, 0x00000000, 0xbf8c007f, 0xb8900904, 0xb8911a04, 0xb8920304,
+ 0xb8930b44, 0x921c0d0c, 0x921c1c13, 0x921d0c12, 0x811c1d1c, 0x811c111c, 0x921cff1c, 0x00000400,
+ 0x921dff10, 0x00000100, 0x81181d1c, 0x7e040218, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302,
+ 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 240
+};
+
+static const PWR_DFY_Section pwr_virus_section5 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x54106900,
+ .dfy_data = {
+ 0x7e080200, 0x7e100204, 0xbefc00ff, 0x00010000, 0x24200087, 0x262200ff, 0x000001f0, 0x20222282,
+ 0x28182111, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000,
+ 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000,
+ 0x1100000c, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 384
+};
+
+static const PWR_DFY_Section pwr_virus_section6 = {
+ .dfy_cntl = 0x80000004,
+ .dfy_addr_hi = 0x000000b4,
+ .dfy_addr_lo = 0x54116f00,
+ .dfy_data = {
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4540fe8, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000041, 0x0000000c, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54116f00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb454105e, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x000000c0, 0x00000010, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54117300, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541065, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000500, 0x0000001c, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54117700, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541069, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000444, 0x0000008a, 0x00000000, 0x07808000, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555,
+ 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x54117b00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000,
+ 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000,
+ 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ },
+ .dfy_size = 1024
+};
-static const PWR_Command_Table pwr_virus_table[PWR_VIRUS_TABLE_SIZE] = {
- { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
- { PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL },
- { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
- { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL },
- { PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL },
- { PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE },
- { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
- { PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO },
- { PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
- { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
- { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
- { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
- { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
- { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
- { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
- { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
- { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
- { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
- { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
- { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
- { PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
- { PwrCmdEnd, 0x00000000, 0x00000000 },
+static const PWR_Command_Table pwr_virus_table_post[] = {
+ { 0x00000000, mmCP_MEC_CNTL },
+ { 0x00000000, mmCP_MEC_CNTL },
+ { 0x00000004, mmSRBM_GFX_CNTL },
+ { 0x54116f00, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000005, mmSRBM_GFX_CNTL },
+ { 0x54117300, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000006, mmSRBM_GFX_CNTL },
+ { 0x54117700, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000007, mmSRBM_GFX_CNTL },
+ { 0x54117b00, mmCP_MQD_BASE_ADDR },
+ { 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { 0x00010000, mmCP_HQD_VMID },
+ { 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { 0x00000004, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000104, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000204, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000304, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000404, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000504, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000604, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000704, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000005, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000105, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000205, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000305, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000405, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000505, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000605, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000705, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000006, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000106, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000206, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000306, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000406, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000506, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000606, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000706, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000007, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000107, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000207, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000307, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000407, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000507, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000607, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000707, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000008, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000108, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000208, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000308, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000408, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000508, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000608, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000708, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000009, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000109, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000209, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000309, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000409, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000509, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000609, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000709, mmSRBM_GFX_CNTL },
+ { 0x00000000, mmCP_HQD_ACTIVE },
+ { 0x00000000, mmCP_HQD_PQ_RPTR },
+ { 0x00000000, mmCP_HQD_PQ_WPTR },
+ { 0x00000001, mmCP_HQD_ACTIVE },
+ { 0x00000004, mmSRBM_GFX_CNTL },
+ { 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
+ { 0x00000000, mmGRBM_STATUS },
+ { 0x00000000, mmGRBM_STATUS },
+ { 0x00000000, mmGRBM_STATUS },
+ { 0x00000000, 0xFFFFFFFF },
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
index 4c3b537a714f..7d1eec5d2e7a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
@@ -23,22 +23,15 @@
#ifndef _PP_INSTANCE_H_
#define _PP_INSTANCE_H_
-#include "smumgr.h"
#include "hwmgr.h"
-#include "eventmgr.h"
-
-#define PP_VALID 0x1F1F1F1F
struct pp_instance {
- uint32_t pp_valid;
uint32_t chip_family;
uint32_t chip_id;
bool pm_en;
uint32_t feature_mask;
void *device;
- struct pp_smumgr *smu_mgr;
struct pp_hwmgr *hwmgr;
- struct pp_eventmgr *eventmgr;
struct mutex pp_lock;
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 901c960cfe21..2b3497135bbd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -70,7 +70,12 @@
#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26
#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27
#define PPSMC_MSG_SetSoftMinVcn 0x28
-#define PPSMC_Message_Count 0x29
+#define PPSMC_MSG_GetGfxclkFrequency 0x2A
+#define PPSMC_MSG_GetFclkFrequency 0x2B
+#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
+#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
+#define PPSMC_MSG_SoftReset 0x2E
+#define PPSMC_Message_Count 0x2F
typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 5d61cc9d4554..b1b27b2128f6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -23,23 +23,13 @@
#ifndef _SMUMGR_H_
#define _SMUMGR_H_
#include <linux/types.h>
-#include "pp_instance.h"
#include "amd_powerplay.h"
-
-struct pp_smumgr;
-struct pp_instance;
-struct pp_hwmgr;
+#include "hwmgr.h"
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
-extern const struct pp_smumgr_func cz_smu_funcs;
-extern const struct pp_smumgr_func iceland_smu_funcs;
-extern const struct pp_smumgr_func tonga_smu_funcs;
-extern const struct pp_smumgr_func fiji_smu_funcs;
-extern const struct pp_smumgr_func polaris10_smu_funcs;
-extern const struct pp_smumgr_func vega10_smu_funcs;
-extern const struct pp_smumgr_func rv_smu_funcs;
+
enum AVFS_BTC_STATUS {
AVFS_BTC_BOOT = 0,
@@ -85,6 +75,11 @@ enum SMU_MEMBER {
VceBootLevel,
SamuBootLevel,
LowSclkInterruptThreshold,
+ DRAM_LOG_ADDR_H,
+ DRAM_LOG_ADDR_L,
+ DRAM_LOG_PHY_ADDR_H,
+ DRAM_LOG_PHY_ADDR_L,
+ DRAM_LOG_BUFF_SIZE,
};
@@ -100,216 +95,44 @@ enum SMU_MAC_DEFINITION {
SMU_UVD_MCLK_HANDSHAKE_DISABLE,
};
+extern int smum_get_argument(struct pp_hwmgr *hwmgr);
-struct pp_smumgr_func {
- int (*smu_init)(struct pp_smumgr *smumgr);
- int (*smu_fini)(struct pp_smumgr *smumgr);
- int (*start_smu)(struct pp_smumgr *smumgr);
- int (*check_fw_load_finish)(struct pp_smumgr *smumgr,
- uint32_t firmware);
- int (*request_smu_load_fw)(struct pp_smumgr *smumgr);
- int (*request_smu_load_specific_fw)(struct pp_smumgr *smumgr,
- uint32_t firmware);
- int (*get_argument)(struct pp_smumgr *smumgr);
- int (*send_msg_to_smc)(struct pp_smumgr *smumgr, uint16_t msg);
- int (*send_msg_to_smc_with_parameter)(struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter);
- int (*download_pptable_settings)(struct pp_smumgr *smumgr,
- void **table);
- int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
- int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
- int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
- int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
- int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
- int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
- int (*init_smc_table)(struct pp_hwmgr *hwmgr);
- int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
- int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
- int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
- uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
- uint32_t (*get_mac_definition)(uint32_t value);
- bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
- int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
- bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr);
-};
-
-struct pp_smumgr {
- uint32_t chip_family;
- uint32_t chip_id;
- void *device;
- void *backend;
- uint32_t usec_timeout;
- bool reload_fw;
- const struct pp_smumgr_func *smumgr_funcs;
- bool is_kicker;
-};
-
-extern int smum_early_init(struct pp_instance *handle);
+extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
-extern int smum_get_argument(struct pp_smumgr *smumgr);
+extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
-extern int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-extern int smum_upload_powerplay_table(struct pp_smumgr *smumgr);
-
-extern int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
-
-extern int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-extern int smum_wait_on_register(struct pp_smumgr *smumgr,
- uint32_t index, uint32_t value, uint32_t mask);
-
-extern int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
- uint32_t index, uint32_t value, uint32_t mask);
-
-extern int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
-
-extern void smum_wait_for_indirect_register_unequal(
- struct pp_smumgr *smumgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
extern int smu_allocate_memory(void *device, uint32_t size,
enum cgs_gpu_mem_type type,
uint32_t byte_align, uint64_t *mc_addr,
void **kptr, void *handle);
extern int smu_free_memory(void *device, void *handle);
-extern int vega10_smum_init(struct pp_smumgr *smumgr);
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
-extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
+extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
+extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
+extern uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr,
uint32_t type, uint32_t member);
-extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
+extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value);
extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
-extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr);
-
-#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
-
-#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
-
-#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_on_indirect_register(smumgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
- SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field) )
-
-#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- index, value, mask) \
- smum_wait_for_register_unequal(smumgr, \
- index, value, mask)
-
-#define SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, value, mask) \
- SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- mm##reg, value, mask)
-
-#define SMUM_WAIT_FIELD_UNEQUAL(smumgr, reg, field, fieldval) \
- SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_GET_FIELD(value, reg, field) \
- (((value) & SMUM_FIELD_MASK(reg, field)) \
- >> SMUM_FIELD_SHIFT(reg, field))
-
-#define SMUM_READ_FIELD(device, reg, field) \
- SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
-
-#define SMUM_SET_FIELD(value, reg, field, field_val) \
- (((value) & ~SMUM_FIELD_MASK(reg, field)) | \
- (SMUM_FIELD_MASK(reg, field) & ((field_val) << \
- SMUM_FIELD_SHIFT(reg, field))))
-
-#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
- SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_on_indirect_register(smumgr, \
- mm##port##_INDEX_0, index, value, mask)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_for_indirect_register_unequal(smumgr, \
- mm##port##_INDEX_0, index, value, mask)
-
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-
-/*Operations on named fields.*/
-
-#define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
- SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define SMUM_WRITE_FIELD(device, reg, field, fieldval) \
- cgs_write_register(device, mm##reg, \
- SMUM_SET_FIELD(cgs_read_register(device, mm##reg), reg, field, fieldval))
-
-#define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-
-#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-
-#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \
- smum_wait_for_indirect_register_unequal(smumgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
- SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr);
-#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field) )
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index cb070ebc7de1..247c97397a27 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -124,12 +124,15 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_NumOfDisplays 0x56
#define PPSMC_MSG_ReadSerialNumTop32 0x58
#define PPSMC_MSG_ReadSerialNumBottom32 0x59
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x5A
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x5B
#define PPSMC_MSG_RunAcgBtc 0x5C
#define PPSMC_MSG_RunAcgInClosedLoop 0x5D
#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
#define PPSMC_MSG_InitializeAcg 0x5F
#define PPSMC_MSG_GetCurrPkgPwr 0x61
-#define PPSMC_Message_Count 0x62
+#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68
+#define PPSMC_Message_Count 0x69
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index e7ad45297b1d..30d3089d7dba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -3,9 +3,9 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
- polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
- smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \
+ polaris10_smumgr.o iceland_smumgr.o \
+ smu7_smumgr.o vega10_smumgr.o rv_smumgr.o ci_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 51adf04ab4b3..4d672cd15785 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -1,16 +1,9 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
@@ -18,90 +11,231 @@
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "linux/delay.h"
+#include <linux/types.h>
+#include "smumgr.h"
#include "pp_debug.h"
-#include "iceland_smc.h"
-#include "smu7_dyn_defaults.h"
-
+#include "ci_smumgr.h"
+#include "ppsmc.h"
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
#include "atombios.h"
#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-#include "smu71_discrete.h"
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "processpptables.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
-#include "iceland_smumgr.h"
+#include "processpptables.h"
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
-#define VDDC_VDDCI_DELTA 200
-
-#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
-#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
-#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
-#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
-
-static const struct iceland_pt_defaults defaults_iceland = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
- * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
- */
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+
+static const struct ci_pt_defaults defaults_hawaii_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt = {
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
{ 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-/* 35W - XT, XTL */
-static const struct iceland_pt_defaults defaults_icelandxt = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC,
- * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
- * BAPM_TEMP_GRADIENT
- */
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
- { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
- { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
-};
-/* 25W - PRO, LE */
-static const struct iceland_pt_defaults defaults_icelandpro = {
- /*
- * sviLoadLIneEn, SviLoadLineVddC,
- * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
- * BAPM_TEMP_GRADIENT
- */
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
- { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
- { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+static const struct ci_pt_defaults defaults_saturn_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+
+static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+ uint32_t smc_addr, uint32_t limit)
+{
+ if ((0 != (3 & smc_addr))
+ || ((smc_addr + 3) >= limit)) {
+ pr_err("smc_addr invalid \n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ return 0;
+}
+
+static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ int result;
+ uint32_t data = 0;
+ uint32_t original_data;
+ uint32_t addr = 0;
+ uint32_t extra_shift;
+
+ if ((3 & smc_start_address)
+ || ((smc_start_address + byte_count) >= limit)) {
+ pr_err("smc_start_address invalid \n");
+ return -EINVAL;
+ }
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC address space with the MSB first. */
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+
+ data = 0;
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = (0x100 * data) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ }
+
+ return 0;
+}
+
+
+static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
+{
+ static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
+ uint32_t *value, uint32_t limit)
+{
+ int result;
+
+ result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+ return 0;
+}
+
+static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ int ret;
+
+ if (!ci_is_smc_ram_running(hwmgr))
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
+
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ pr_info("\n failed to send message %x ret is %d\n", msg, ret);
+
+ return 0;
+}
+
+static int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
+ return ci_send_msg_to_smc(hwmgr, msg);
+}
+
+static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
@@ -111,27 +245,282 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
dev_id = (uint32_t)sys_info.value;
switch (dev_id) {
- case DEVICE_ID_VI_ICELAND_M_6900:
- case DEVICE_ID_VI_ICELAND_M_6903:
- smu_data->power_tune_defaults = &defaults_icelandxt;
+ case 0x67BA:
+ case 0x66B1:
+ smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
-
- case DEVICE_ID_VI_ICELAND_M_6901:
- case DEVICE_ID_VI_ICELAND_M_6902:
- smu_data->power_tune_defaults = &defaults_icelandpro;
+ case 0x67B8:
+ case 0x66B0:
+ smu_data->power_tune_defaults = &defaults_hawaii_xt;
+ break;
+ case 0x6640:
+ case 0x6641:
+ case 0x6646:
+ case 0x6647:
+ smu_data->power_tune_defaults = &defaults_saturn_xt;
break;
+ case 0x6649:
+ case 0x6650:
+ case 0x6651:
+ case 0x6658:
+ case 0x665C:
+ case 0x665D:
+ case 0x67A0:
+ case 0x67A1:
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B9:
+ case 0x67BE:
default:
- smu_data->power_tune_defaults = &defaults_iceland;
- pr_warn("Unknown V.I. Device ID.\n");
+ smu_data->power_tune_defaults = &defaults_bonaire_xt;
break;
}
- return;
}
-static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ss_info;
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ss_info)) {
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ss_info.speed_spectrum_rate);
+ uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+}
+
+static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ if (clock < min) {
+ pr_info("Engine clock can't satisfy stutter requirement!\n");
+ return 0;
+ }
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
+
+static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU7_Discrete_GraphicsLevel *level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+
+ result = ci_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
+ (uint32_t *)(&level->MinVddc));
+ if (result) {
+ pr_err("vdd_dep_on_sclk table is NULL\n");
+ return result;
+ }
+
+ level->SclkFrequency = clock;
+ level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ clock,
+ &level->MinVddcPhases);
+
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ level->EnabledForThrottle = 1;
+ level->UpH = 0;
+ level->DownH = 0;
+ level->VoltageDownH = 0;
+ level->PowerThrottle = 0;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId =
+ ci_get_sleep_divider_id_from_clock(clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result = 0;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = ci_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ if (i == (dpm_table->sclk_table.count - 1))
+ smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ result = ci_copy_bytes_to_smc(hwmgr, array,
+ (u8 *)levels, array_size,
+ SMC_RAM_END);
+
+ return result;
+
+}
+
+static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
@@ -141,11 +530,11 @@ static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
smu_data->power_tune_table.TDC_VDDC_PkgLimit =
@@ -157,15 +546,15 @@ static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
uint32_t temp;
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (ci_read_smc_sram_dword(hwmgr,
fuse_table_offset +
- offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
"Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
@@ -176,52 +565,29 @@ static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offs
return 0;
}
-static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- return 0;
-}
-
-static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 8; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
+ uint16_t tmp;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- return 0;
-}
-
-static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
+ else
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
return 0;
}
-static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
"The CAC Leakage table does not exist!", return -EINVAL);
@@ -230,22 +596,24 @@ static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
"CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
- for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
+ } else {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
}
- } else {
- PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
}
return 0;
}
-static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t *vid = smu_data->power_tune_table.VddCVid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -253,117 +621,154 @@ static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
"There should never be more than 8 entries for VddcVid!!!",
return -EINVAL);
- for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
}
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
+ smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
+
return 0;
}
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+ return 0;
+}
+
+static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t pm_fuse_table_offset;
+ int ret = 0;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
+ if (ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END)) {
+ pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
+ return -EINVAL;
+ }
/* DW0 - DW3 */
- if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate bapm vddc vid Failed!",
- return -EINVAL);
-
+ ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
/* DW4 - DW5 */
- if (iceland_populate_vddc_vid(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate vddc vid Failed!",
- return -EINVAL);
-
+ ret |= ci_populate_vddc_vid(hwmgr);
/* DW6 */
- if (iceland_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
+ ret |= ci_populate_svi_load_line(hwmgr);
/* DW7 */
- if (iceland_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ ret |= ci_populate_tdc_limit(hwmgr);
/* DW8 */
- if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != iceland_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW16 */
- if (iceland_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW17 */
- if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW18 */
- if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
- return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
+
+ ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
(uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
+ sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
}
- return 0;
+ return ret;
}
-static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, uint32_t *vol)
+static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
- uint32_t i = 0;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- *vol = allowed_clock_voltage_table->entries[i].v;
- return 0;
- }
+ dpm_table->DTETjOffset = 0;
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
}
- /* sclk is bigger than max sclk in the dependence table */
- *vol = allowed_clock_voltage_table->entries[i - 1].v;
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
return 0;
}
-static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
uint16_t *lo)
{
@@ -372,7 +777,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
*hi = tab->value * VOLTAGE_SCALE;
*lo = tab->value * VOLTAGE_SCALE;
- /* SCLK/VDDC Dependency Table has to exist. */
PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
"The SCLK/VDDC Dependency Table does not exist.\n",
return -EINVAL);
@@ -382,11 +786,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
return 0;
}
- /*
- * Since voltage in the sclk/vddc dependency table is not
- * necessarily in ascending order because of ELB voltage
- * patching, loop through entire list to find exact voltage.
- */
for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
vol_found = true;
@@ -402,10 +801,6 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
}
}
- /*
- * If voltage is not found in the first pass, loop again to
- * find the best match, equal or higher value.
- */
if (!vol_found) {
for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
@@ -429,29 +824,29 @@ static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
pp_atomctrl_voltage_table_entry *tab,
- SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+ SMU7_Discrete_VoltageLevel *smc_voltage_tab)
{
int result;
- result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ result = ci_get_std_voltage_value_sidd(hwmgr, tab,
&smc_voltage_tab->StdVoltageHiSidd,
&smc_voltage_tab->StdVoltageLoSidd);
- if (0 != result) {
+ if (result) {
smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
}
smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
- CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
return 0;
}
-static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
unsigned int count;
int result;
@@ -459,7 +854,7 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
table->VddcLevelCount = data->vddc_voltage_table.count;
for (count = 0; count < table->VddcLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->vddc_voltage_table.entries[count]),
&(table->VddcLevel[count]));
PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
@@ -467,7 +862,7 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
/* GPIO voltage control */
if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ else
table->VddcLevel[count].Smio = 0;
}
@@ -476,8 +871,8 @@ static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t count;
@@ -486,7 +881,7 @@ static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
table->VddciLevelCount = data->vddci_voltage_table.count;
for (count = 0; count < table->VddciLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->vddci_voltage_table.entries[count]),
&(table->VddciLevel[count]));
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
@@ -501,8 +896,8 @@ static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t count;
@@ -510,8 +905,8 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ result = ci_populate_smc_voltage_table(hwmgr,
&(data->mvdd_voltage_table.entries[count]),
&table->MvddLevel[count]);
PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
@@ -527,28 +922,28 @@ static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
}
-static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result;
- result = iceland_populate_smc_vddc_table(hwmgr, table);
+ result = ci_populate_smc_vddc_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate VDDC voltage table to SMC", return -EINVAL);
- result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ result = ci_populate_smc_vdd_ci_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate VDDCI voltage table to SMC", return -EINVAL);
- result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ result = ci_populate_smc_mvdd_table(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
"can not populate MVDD voltage table to SMC", return -EINVAL);
return 0;
}
-static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU71_Discrete_Ulv *state)
+static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU7_Discrete_Ulv *state)
{
uint32_t voltage_response_time, ulv_voltage;
int result;
@@ -591,33 +986,28 @@ static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_Ulv *ulv_level)
+static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_Ulv *ulv_level)
{
- return iceland_populate_ulv_level(hwmgr, ulv_level);
+ return ci_populate_ulv_level(hwmgr, ulv_level);
}
-static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t i;
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
table->LinkLevel[i].PcieGenSpeed =
(uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
table->LinkLevel[i].PcieLaneCount =
(uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
}
smu_data->smc_state_table.LinkLevelCount =
@@ -628,294 +1018,15 @@ static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discret
return 0;
}
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
- const struct phm_phase_shedding_limits_table *pl,
- uint32_t sclk, uint32_t *p_shed)
-{
- unsigned int i;
-
- /* use the minimum phase shedding */
- *p_shed = 1;
-
- for (i = 0; i < pl->count; i++) {
- if (sclk < pl->entries[i].Sclk) {
- *p_shed = i;
- break;
- }
- }
- return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
- SMU71_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
- /* populate graphics levels*/
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
- hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
- &graphic_level->MinVddc);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for VDDC \
- engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
- graphic_level->MinVddcPhases = 1;
-
- if (data->vddc_phase_shed_control)
- iceland_populate_phase_value_based_on_sclk(hwmgr,
- hwmgr->dyn_state.vddc_phase_shed_limits_table,
- engine_clock,
- &graphic_level->MinVddcPhases);
-
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 100;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- smu7_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_in_sr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (0 == result) {
- graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
-
- uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
- SMU71_MAX_LEVELS_GRAPHICS;
-
- SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
- uint32_t i;
- uint8_t highest_pcie_level_enabled = 0;
- uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
- uint8_t count = 0;
- int result = 0;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = iceland_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result != 0)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now. */
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (highest_pcie_level_enabled + 1))) != 0) {
- highest_pcie_level_enabled++;
- }
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
- count++;
- }
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++) {
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
- }
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
- (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_calculate_mclk_params(
+static int ci_calculate_mclk_params(
struct pp_hwmgr *hwmgr,
uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *mclk,
+ SMU7_Discrete_MemoryLevel *mclk,
bool strobe_mode,
bool dllStateOn
)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
@@ -934,10 +1045,8 @@ static int iceland_calculate_mclk_params(
PP_ASSERT_WITH_CODE(0 == result,
"Error retrieving Memory Clock Parameters from VBIOS.", return result);
- /* MPLL_FUNC_CNTL setup*/
mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
- /* MPLL_FUNC_CNTL_1 setup*/
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
@@ -945,12 +1054,10 @@ static int iceland_calculate_mclk_params(
mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
- /* MPLL_AD_FUNC_CNTL setup*/
mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
if (data->is_memory_gddr5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
@@ -959,21 +1066,6 @@ static int iceland_calculate_mclk_params(
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
pp_atomctrl_internal_ss_info ss_info;
uint32_t freq_nom;
uint32_t tmp;
@@ -990,14 +1082,7 @@ static int iceland_calculate_mclk_params(
tmp = tmp * tmp;
if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
uint32_t clkv =
(uint32_t)((((131 * ss_info.speed_spectrum_percentage *
ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
@@ -1007,7 +1092,6 @@ static int iceland_calculate_mclk_params(
}
}
- /* MCLK_PWRMGT_CNTL setup */
mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
@@ -1016,7 +1100,6 @@ static int iceland_calculate_mclk_params(
MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
- /* Save the result data to outpupt memory level structure */
mclk->MclkFrequency = memory_clock;
mclk->MpllFuncCntl = mpll_func_cntl;
mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
@@ -1031,48 +1114,45 @@ static int iceland_calculate_mclk_params(
return 0;
}
-static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
bool strobe_mode)
{
uint8_t mc_para_index;
if (strobe_mode) {
- if (memory_clock < 12500) {
+ if (memory_clock < 12500)
mc_para_index = 0x00;
- } else if (memory_clock > 47500) {
+ else if (memory_clock > 47500)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- }
} else {
- if (memory_clock < 65000) {
+ if (memory_clock < 65000)
mc_para_index = 0x00;
- } else if (memory_clock > 135000) {
+ else if (memory_clock > 135000)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
}
return mc_para_index;
}
-static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
{
uint8_t mc_para_index;
- if (memory_clock < 10000) {
+ if (memory_clock < 10000)
mc_para_index = 0;
- } else if (memory_clock >= 80000) {
+ else if (memory_clock >= 80000)
mc_para_index = 0x0f;
- } else {
+ else
mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
- }
return mc_para_index;
}
-static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
uint32_t memory_clock, uint32_t *p_shed)
{
unsigned int i;
@@ -1089,10 +1169,10 @@ static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, co
return 0;
}
-static int iceland_populate_single_memory_level(
+static int ci_populate_single_memory_level(
struct pp_hwmgr *hwmgr,
uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *memory_level
+ SMU7_Discrete_MemoryLevel *memory_level
)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1104,16 +1184,14 @@ static int iceland_populate_single_memory_level(
uint32_t mclk_strobe_mode_threshold = 40000;
if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ result = ci_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
PP_ASSERT_WITH_CODE((0 == result),
"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
}
- if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
- memory_level->MinVddci = memory_level->MinVddc;
- } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddci_dependency_on_mclk,
memory_clock,
&memory_level->MinVddci);
@@ -1121,18 +1199,27 @@ static int iceland_populate_single_memory_level(
"can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
}
+ if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.mvdd_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
+ }
+
memory_level->MinVddcPhases = 1;
if (data->vddc_phase_shed_control) {
- iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
memory_clock, &memory_level->MinVddcPhases);
}
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
@@ -1148,7 +1235,7 @@ static int iceland_populate_single_memory_level(
cgs_get_active_displays_info(hwmgr->device, &info);
data->display_timing.num_existing_displays = info.display_count;
- /* stutter mode not support on iceland */
+ /* stutter mode not support on ci */
/* decide strobe mode*/
memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
@@ -1156,7 +1243,7 @@ static int iceland_populate_single_memory_level(
/* decide EDC mode and memory clock ratio*/
if (data->is_memory_gddr5) {
- memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
memory_level->StrobeEnable);
if ((mclk_edc_enable_threshold != 0) &&
@@ -1170,7 +1257,7 @@ static int iceland_populate_single_memory_level(
}
if (memory_level->StrobeEnable) {
- if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
else
@@ -1179,11 +1266,11 @@ static int iceland_populate_single_memory_level(
dll_state_on = data->dll_default_on;
} else {
memory_level->StrobeRatio =
- iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ ci_get_ddr3_mclk_frequency_ratio(memory_clock);
dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
}
- result = iceland_calculate_mclk_params(hwmgr,
+ result = ci_calculate_mclk_params(hwmgr,
memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
if (0 == result) {
@@ -1209,23 +1296,18 @@ static int iceland_populate_single_memory_level(
return result;
}
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int result;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
- SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
uint32_t i;
memset(levels, 0x00, level_array_size);
@@ -1233,39 +1315,42 @@ int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->mclk_table.count; i++) {
PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
"can not populate memory level as memory clock is zero", return -EINVAL);
- result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
&(smu_data->smc_state_table.MemoryLevel[i]));
- if (0 != result) {
+ if (0 != result)
return result;
- }
}
- /* Only enable level 0 for now.*/
smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ if ((dpm_table->mclk_table.count >= 2)
+ && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
+ smu_data->smc_state_table.MemoryLevel[1].MinVddci =
+ smu_data->smc_state_table.MemoryLevel[0].MinVddci;
+ smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
+ smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
+ }
smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ result = ci_copy_bytes_to_smc(hwmgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
SMC_RAM_END);
return result;
}
-static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
- SMU71_Discrete_VoltageLevel *voltage)
+static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
{
const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1291,15 +1376,14 @@ static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
return 0;
}
-static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result = 0;
const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t vddc_phase_shed_control = 0;
- SMU71_Discrete_VoltageLevel voltage_level;
+ SMU7_Discrete_VoltageLevel voltage_level;
uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
@@ -1314,7 +1398,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
else
table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
- table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
/* assign zero for now*/
table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
@@ -1346,7 +1430,6 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->ACPILevel.CcPwrDynRm = 0;
table->ACPILevel.CcPwrDynRm1 = 0;
-
/* For various features to be enabled/disabled while this level is active.*/
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
/* SCLK frequency in units of 10KHz*/
@@ -1360,6 +1443,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
/* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
@@ -1373,7 +1457,7 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
}
- if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
table->MemoryACPILevel.MinMvdd =
PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
else
@@ -1418,9 +1502,9 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
table->MemoryACPILevel.EnabledForThrottle = 0;
table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
/* Indicates maximum activity level for this performance level.*/
table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
@@ -1433,35 +1517,145 @@ static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = 0;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ table->UvdLevelCount = (uint8_t)(uvd_table->count);
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ uvd_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ uvd_table->entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ uvd_table->entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
+ }
+
+ return result;
}
-static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+ table->VceLevelCount = (uint8_t)(vce_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ vce_table->entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_acp_clock_voltage_dependency_table *acp_table =
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+
+ table->AcpLevelCount = (uint8_t)(acp_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
+ table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
- return 0;
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_samu_clock_voltage_dependency_table *samu_table =
+ hwmgr->dyn_state.samu_clock_voltage_dependency_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(samu_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
+ table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
}
-static int iceland_populate_memory_timing_parameters(
+static int ci_populate_memory_timing_parameters(
struct pp_hwmgr *hwmgr,
uint32_t engine_clock,
uint32_t memory_clock,
- struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
)
{
uint32_t dramTiming;
@@ -1486,42 +1680,34 @@ static int iceland_populate_memory_timing_parameters(
return 0;
}
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
int result = 0;
- SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
uint32_t i, j;
- memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+ memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = iceland_populate_memory_timing_parameters
+ result = ci_populate_memory_timing_parameters
(hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
data->dpm_table.mclk_table.dpm_levels[j].value,
&arb_regs.entries[i][j]);
- if (0 != result) {
+ if (0 != result)
break;
- }
}
}
if (0 == result) {
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->arb_table_start,
(uint8_t *)&arb_regs,
- sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
SMC_RAM_END
);
}
@@ -1529,12 +1715,13 @@ static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
return result;
}
-static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
+static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
@@ -1562,26 +1749,22 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
}
table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- table->BootVddci = table->BootVddc;
- else
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
-
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
return result;
}
-static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
- SMU71_Discrete_MCRegisters *mc_reg_table)
+static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
{
- const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+ const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
uint32_t i, j;
for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
if (smu_data->mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
"Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
mc_reg_table->address[i].s0 =
PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
@@ -1596,10 +1779,9 @@ static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
return 0;
}
-/*convert register values from driver to SMC format */
-static void iceland_convert_mc_registers(
- const struct iceland_mc_reg_entry *entry,
- SMU71_Discrete_MCRegisterSet *data,
+static void ci_convert_mc_registers(
+ const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
uint32_t num_entries, uint32_t valid_flag)
{
uint32_t i, j;
@@ -1612,13 +1794,13 @@ static void iceland_convert_mc_registers(
}
}
-static int iceland_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
+static int ci_convert_mc_reg_table_entry_to_smc(
+ struct pp_hwmgr *hwmgr,
const uint32_t memory_clock,
- SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data
)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint32_t i = 0;
for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
@@ -1631,15 +1813,15 @@ static int iceland_convert_mc_reg_table_entry_to_smc(
if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
--i;
- iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
mc_reg_table_data, smu_data->mc_reg_table.last,
smu_data->mc_reg_table.validflag);
return 0;
}
-static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_MCRegisters *mc_regs)
+static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_regs)
{
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -1647,8 +1829,8 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = iceland_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
+ res = ci_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
data->dpm_table.mclk_table.dpm_levels[i].value,
&mc_regs->data[i]
);
@@ -1660,10 +1842,9 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
return result;
}
-static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t address;
int32_t result;
@@ -1672,45 +1853,43 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
return 0;
- memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+ memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
- result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
if (result != 0)
return result;
+ address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
- address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+ return ci_copy_bytes_to_smc(hwmgr, address,
(uint8_t *)&smu_data->mc_regs.data[0],
- sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
SMC_RAM_END);
}
-static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
- result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
+ result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for the MC register addresses!", return result;);
- result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for driver state!", return result;);
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
- (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+ return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
}
-static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
uint8_t count, level;
count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
@@ -1736,107 +1915,48 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
- struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
- struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
- const uint16_t *def1, *def2;
- int i, j, k;
-
-
- /*
- * TDP number of fraction bits are changed from 8 to 7 for Iceland
- * as requested by SMC team
- */
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
-
- dpm_table->DTETjOffset = 0;
-
- dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
- /* The following are for new Iceland Multi-input fan/thermal control */
- if (NULL != ppm) {
- dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
- dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
- } else {
- dpm_table->PPM_PkgPwrLimit = 0;
- dpm_table->PPM_TemperatureLimit = 0;
- }
-
- CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
- CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
-
- dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
- def1 = defaults->bapmti_r;
- def2 = defaults->bapmti_rc;
-
- for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU71_DTE_SOURCES; j++) {
- for (k = 0; k < SMU71_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
- dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
- def1++;
- def2++;
- }
- }
- }
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
return 0;
}
-static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *tab)
+static int ci_start_smc(struct pp_hwmgr *hwmgr)
{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ /* set smc instruct start point at 0x0 */
+ ci_program_jump_on_start(hwmgr);
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
- tab->SVI2Enable |= VDDC_ON_SVI2;
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- tab->SVI2Enable |= VDDCI_ON_SVI2;
- else
- tab->MergedVddci = 1;
+ /* enable smc clock */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
- tab->SVI2Enable |= MVDD_ON_SVI2;
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
- (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
+ INTERRUPTS_ENABLED, 1);
return 0;
}
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
- SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
-
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ u32 i;
- iceland_initialize_power_tune_defaults(hwmgr);
+ ci_initialize_power_tune_defaults(hwmgr);
memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
- iceland_populate_smc_voltage_tables(hwmgr, table);
- }
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ ci_populate_smc_voltage_tables(hwmgr, table);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition))
@@ -1850,67 +1970,75 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
if (data->is_memory_gddr5)
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
if (data->ulv_supported) {
- result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result;);
+ "Failed to initialize ULV state!", return result);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixCG_ULV_PARAMETER, 0x40035);
}
- result = iceland_populate_smc_link_level(hwmgr, table);
+ result = ci_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result;);
+ "Failed to initialize Graphics Level!", return result);
- result = iceland_populate_all_graphic_levels(hwmgr);
+ result = ci_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result;);
+ "Failed to initialize Memory Level!", return result);
- result = iceland_populate_all_memory_levels(hwmgr);
+ result = ci_populate_smc_link_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result;);
+ "Failed to initialize Link Level!", return result);
- result = iceland_populate_smc_acpi_level(hwmgr, table);
+ result = ci_populate_smc_acpi_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result;);
+ "Failed to initialize ACPI Level!", return result);
- result = iceland_populate_smc_vce_level(hwmgr, table);
+ result = ci_populate_smc_vce_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result;);
+ "Failed to initialize VCE Level!", return result);
- result = iceland_populate_smc_acp_level(hwmgr, table);
+ result = ci_populate_smc_acp_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result;);
+ "Failed to initialize ACP Level!", return result);
- result = iceland_populate_smc_samu_level(hwmgr, table);
+ result = ci_populate_smc_samu_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
+ "Failed to initialize SAMU Level!", return result);
/* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
/* need to populate the ARB settings for the initial state. */
- result = iceland_program_memory_timing_parameters(hwmgr);
+ result = ci_program_memory_timing_parameters(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result;);
+ "Failed to Write ARB settings for the initial state.", return result);
- result = iceland_populate_smc_uvd_level(hwmgr, table);
+ result = ci_populate_smc_uvd_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result;);
+ "Failed to initialize UVD Level!", return result);
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
- result = iceland_populate_smc_boot_level(hwmgr, table);
+ result = ci_populate_smc_boot_level(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result;);
+ "Failed to initialize Boot Level!", return result);
- result = iceland_populate_smc_initial_state(hwmgr);
+ result = ci_populate_smc_initial_state(hwmgr);
PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
- result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
table->GraphicsVoltageChangeEnable = 1;
table->GraphicsThermThrottleEnable = 1;
table->GraphicsInterval = 1;
@@ -1927,17 +2055,35 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->MemoryVoltageChangeEnable = 1;
table->MemoryInterval = 1;
table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
table->PCIeGenInterval = 1;
- result = iceland_populate_smc_svi2_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate SVI2 setting!", return result);
+ ci_populate_smc_svi2_config(hwmgr, table);
+
+ for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
+ CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
table->ThermGpio = 17;
table->SclkStepSize = 0x4000;
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
@@ -1947,6 +2093,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
@@ -1955,47 +2102,32 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
- SMC_RAM_END);
+ result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
+ SMC_RAM_END);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to upload dpm data to SMC memory!", return result;);
- /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.ulv_setting_starts,
- (uint8_t *)&(smu_data->ulv_setting),
- sizeof(SMU71_Discrete_Ulv),
- SMC_RAM_END);
-
-
- result = iceland_populate_initial_mc_reg_table(hwmgr);
+ result = ci_populate_initial_mc_reg_table(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate initialize MC Reg table!", return result);
- result = iceland_populate_pm_fuses(hwmgr);
+ result = ci_populate_pm_fuses(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to populate PM fuses to SMC memory!", return result);
+ ci_start_smc(hwmgr);
+
return 0;
}
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+static int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
- SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
uint16_t fdo_min, slope1, slope2;
@@ -2012,7 +2144,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
- if (0 == smu7_data->fan_table_start) {
+ if (0 == ci_data->fan_table_start) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
return 0;
}
@@ -2062,29 +2194,26 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
- /* fan_table.FanControl_GL_Flag = 1; */
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+ res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
return 0;
}
-
-static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return iceland_program_memory_timing_parameters(hwmgr);
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return ci_program_memory_timing_parameters(hwmgr);
return 0;
}
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
@@ -2100,21 +2229,21 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable,
- LowSclkInterruptThreshold),
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable,
+ LowSclkInterruptT),
(uint8_t *)&low_sclk_interrupt_threshold,
sizeof(uint32_t),
SMC_RAM_END);
}
- result = iceland_update_and_upload_mc_reg_table(hwmgr);
+ result = ci_update_and_upload_mc_reg_table(hwmgr);
PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
- result = iceland_program_mem_timing_parameters(hwmgr);
+ result = ci_program_mem_timing_parameters(hwmgr);
PP_ASSERT_WITH_CODE((result == 0),
"Failed to program memory timing parameters!",
);
@@ -2122,161 +2251,200 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
return result;
}
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+static uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
{
switch (type) {
case SMU_SoftRegisters:
switch (member) {
case HandshakeDisables:
- return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ return offsetof(SMU7_SoftRegisters, HandshakeDisables);
case VoltageChangeTimeout:
- return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
case AverageGraphicsActivity:
- return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
case PreVBlankGap:
- return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ return offsetof(SMU7_SoftRegisters, PreVBlankGap);
case VBlankTimeout:
- return offsetof(SMU71_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ return offsetof(SMU7_SoftRegisters, VBlankTimeout);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
case SMU_Discrete_DpmTable:
switch (member) {
case LowSclkInterruptThreshold:
- return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
}
}
- pr_warn("can't get the offset of type %x member %x\n", type, member);
+ pr_debug("can't get the offset of type %x member %x\n", type, member);
return 0;
}
-uint32_t iceland_get_mac_definition(uint32_t value)
+static uint32_t ci_get_mac_definition(uint32_t value)
{
switch (value) {
case SMU_MAX_LEVELS_GRAPHICS:
- return SMU71_MAX_LEVELS_GRAPHICS;
+ return SMU7_MAX_LEVELS_GRAPHICS;
case SMU_MAX_LEVELS_MEMORY:
- return SMU71_MAX_LEVELS_MEMORY;
+ return SMU7_MAX_LEVELS_MEMORY;
case SMU_MAX_LEVELS_LINK:
- return SMU71_MAX_LEVELS_LINK;
+ return SMU7_MAX_LEVELS_LINK;
case SMU_MAX_ENTRIES_SMIO:
- return SMU71_MAX_ENTRIES_SMIO;
+ return SMU7_MAX_ENTRIES_SMIO;
case SMU_MAX_LEVELS_VDDC:
- return SMU71_MAX_LEVELS_VDDC;
+ return SMU7_MAX_LEVELS_VDDC;
case SMU_MAX_LEVELS_VDDCI:
- return SMU71_MAX_LEVELS_VDDCI;
+ return SMU7_MAX_LEVELS_VDDCI;
case SMU_MAX_LEVELS_MVDD:
- return SMU71_MAX_LEVELS_MVDD;
+ return SMU7_MAX_LEVELS_MVDD;
}
- pr_warn("can't get the mac of %x\n", value);
+ pr_debug("can't get the mac of %x\n", value);
return 0;
}
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
+{
+ uint32_t byte_count, start_addr;
+ uint8_t *src;
+ uint32_t data;
+
+ struct cgs_firmware_info info = {0};
+
+ cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
+ hwmgr->is_kicker = info.is_kicker;
+ byte_count = info.image_size;
+ src = (uint8_t *)info.kptr;
+ start_addr = info.ucode_start_address;
+
+ if (byte_count > SMC_RAM_END) {
+ pr_err("SMC address is beyond the SMC RAM area.\n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+ for (; byte_count >= 4; byte_count -= 4) {
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ src += 4;
+ }
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+ if (0 != byte_count) {
+ pr_err("SMC size must be divisible by 4\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
+{
+ if (ci_is_smc_ram_running(hwmgr)) {
+ pr_info("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
+ boot_seq_done, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
+ pre_fetcher_en, 1);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+ return ci_load_smc_ucode(hwmgr);
+}
+
+static int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
- uint32_t tmp;
+ uint32_t tmp = 0;
int result;
bool error = false;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, DpmTable),
+ if (ci_upload_firmware(hwmgr))
+ return -EINVAL;
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->dpm_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->dpm_table_start = tmp;
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, SoftRegisters),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
if (0 == result) {
data->soft_regs_start = tmp;
- smu7_data->soft_regs_start = tmp;
+ ci_data->soft_regs_start = tmp;
}
error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->mc_reg_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->mc_reg_table_start = tmp;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, FanTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->fan_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->fan_table_start = tmp;
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
&tmp, SMC_RAM_END);
- if (0 == result) {
- smu7_data->arb_table_start = tmp;
- }
+ if (0 == result)
+ ci_data->arb_table_start = tmp;
error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, Version),
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, Version),
&tmp, SMC_RAM_END);
- if (0 == result) {
+ if (0 == result)
hwmgr->microcode_version_info.SMC = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, UlvSettings),
- &tmp, SMC_RAM_END);
-
- if (0 == result) {
- smu7_data->ulv_setting_starts = tmp;
- }
error |= (0 != result);
return error ? 1 : 0;
}
-/*---------------------------MC----------------------------*/
-
-static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
{
return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
}
-static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
{
bool result = true;
@@ -2369,32 +2537,32 @@ static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
return result;
}
-static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
{
uint32_t i;
uint16_t address;
for (i = 0; i < table->last; i++) {
table->mc_reg_address[i].s0 =
- iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
? address : table->mc_reg_address[i].s1;
}
return 0;
}
-static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
- struct iceland_mc_reg_table *ni_table)
+static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct ci_mc_reg_table *ni_table)
{
uint8_t i, j;
- PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
"Invalid VramInfo table.", return -EINVAL);
- for (i = 0; i < table->last; i++) {
+ for (i = 0; i < table->last; i++)
ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- }
+
ni_table->last = table->last;
for (i = 0; i < table->num_entries; i++) {
@@ -2411,26 +2579,15 @@ static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *tabl
return 0;
}
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
- struct iceland_mc_reg_table *table)
+static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct ci_mc_reg_table *table)
{
uint8_t i, j, k;
uint32_t temp_reg;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
switch (table->mc_reg_address[i].s1) {
@@ -2445,7 +2602,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
}
j++;
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
@@ -2456,15 +2613,14 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (!data->is_memory_gddr5) {
+ if (!data->is_memory_gddr5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
@@ -2472,7 +2628,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
}
@@ -2488,7 +2644,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
}
j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
break;
@@ -2503,14 +2659,15 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
return 0;
}
-static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+static int ci_set_valid_flag(struct ci_mc_reg_table *table)
{
uint8_t i, j;
+
for (i = 0; i < table->last; i++) {
for (j = 1; j < table->num_entries; j++) {
if (table->mc_reg_table_entry[j-1].mc_data[i] !=
table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
+ table->validflag |= (1 << i);
break;
}
}
@@ -2519,13 +2676,13 @@ static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
return 0;
}
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *table;
- struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
- uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+ struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = ci_get_memory_modile_index(hwmgr);
table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
@@ -2559,24 +2716,103 @@ int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (0 == result)
- result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+ result = ci_copy_vbios_smc_reg_table(table, ni_table);
if (0 == result) {
- iceland_set_s0_mc_reg_index(ni_table);
- result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ ci_set_s0_mc_reg_index(ni_table);
+ result = ci_set_mc_special_registers(hwmgr, ni_table);
}
if (0 == result)
- iceland_set_valid_flag(ni_table);
+ ci_set_valid_flag(ni_table);
kfree(table);
return result;
}
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return ci_is_smc_ram_running(hwmgr);
}
+
+static int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpH = request->up_hyst;
+ levels[i].DownH = request->down_hyst;
+ }
+
+ return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
+
+static int ci_smu_init(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct ci_smumgr *ci_priv = NULL;
+
+ ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
+
+ if (ci_priv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ ci_priv->activity_target[i] = 30;
+
+ hwmgr->smu_backend = ci_priv;
+
+ return 0;
+}
+
+static int ci_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ return 0;
+}
+
+static int ci_start_smu(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+const struct pp_smumgr_func ci_smu_funcs = {
+ .smu_init = ci_smu_init,
+ .smu_fini = ci_smu_fini,
+ .start_smu = ci_start_smu,
+ .check_fw_load_finish = NULL,
+ .request_smu_load_fw = NULL,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = ci_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_offsetof = ci_get_offsetof,
+ .process_firmware_header = ci_process_firmware_header,
+ .init_smc_table = ci_init_smc_table,
+ .update_sclk_threshold = ci_update_sclk_threshold,
+ .thermal_setup_fan_table = ci_thermal_setup_fan_table,
+ .populate_all_graphic_levels = ci_populate_all_graphic_levels,
+ .populate_all_memory_levels = ci_populate_all_memory_levels,
+ .get_mac_definition = ci_get_mac_definition,
+ .initialize_mc_reg_table = ci_initialize_mc_reg_table,
+ .is_dpm_running = ci_is_dpm_running,
+ .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
new file mode 100644
index 000000000000..8189cfa17c46
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _CI_SMUMANAGER_H_
+#define _CI_SMUMANAGER_H_
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+#include "smu7_discrete.h"
+#include <pp_endian.h>
+#include "ppatomctrl.h"
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+struct ci_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ uint8_t last;
+ uint8_t num_entries;
+ uint16_t validflag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_smumgr {
+ uint32_t soft_regs_start;
+ uint32_t dpm_table_start;
+ uint32_t mc_reg_table_start;
+ uint32_t fan_table_start;
+ uint32_t arb_table_start;
+ uint32_t ulv_setting_starts;
+ struct SMU7_Discrete_DpmTable smc_state_table;
+ struct SMU7_Discrete_PmFuses power_tune_table;
+ const struct ci_pt_defaults *power_tune_defaults;
+ SMU7_Discrete_MCRegisters mc_regs;
+ struct ci_mc_reg_table mc_reg_table;
+ uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 652aaa43e95c..78ab0556e48f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -52,53 +52,52 @@ static const enum cz_scratch_entry firmware_list[] = {
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
};
-static int cz_smum_get_argument(struct pp_smumgr *smumgr)
+static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- return cgs_read_register(smumgr->device,
+ return cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_ARG_0);
}
-static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
- uint16_t msg)
+static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
if (result != 0) {
pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
return result;
}
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
return 0;
}
/* Send a message to the SMC, and wait for its response.*/
-static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- result = cz_send_msg_to_smc_async(smumgr, msg);
+ result = cz_send_msg_to_smc_async(hwmgr, msg);
if (result != 0)
return result;
- return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
}
-static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
+static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t limit)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
if (0 != (3 & smc_address)) {
@@ -111,39 +110,39 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
return -EINVAL;
}
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
SMN_MP1_SRAM_START_ADDR + smc_address);
return 0;
}
-static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
+static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t value, uint32_t limit)
{
int result;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = cz_set_smc_sram_address(smumgr, smc_address, limit);
+ result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
if (!result)
- cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
return result;
}
-static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc(smumgr, msg);
+ return cz_send_msg_to_smc(hwmgr, msg);
}
-static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
+static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
uint32_t firmware)
{
int i;
@@ -151,19 +150,19 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
- for (i = 0; i < smumgr->usec_timeout; i++) {
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
if (firmware ==
- (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
+ (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
break;
udelay(1);
}
- if (i >= smumgr->usec_timeout) {
+ if (i >= hwmgr->usec_timeout) {
pr_err("SMU check loaded firmware failed.\n");
return -EINVAL;
}
@@ -171,7 +170,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
return 0;
}
-static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
+static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
{
uint32_t reg_data;
uint32_t tmp;
@@ -179,44 +178,44 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
struct cgs_firmware_info info = {0};
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
- ret = cgs_get_firmware_info(smumgr->device,
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+ ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
if (ret)
return -EINVAL;
/* Disable MEC parsing/prefetching */
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_MEC_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_CPC_IC_BASE_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
reg_data = smu_lower_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
reg_data = smu_upper_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
return 0;
}
-static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
enum cz_scratch_entry firmware_enum)
{
uint8_t ret = 0;
@@ -226,7 +225,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_SDMA0;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_SDMA0;
else
ret = UCODE_ID_SDMA1;
@@ -244,7 +243,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_CP_MEC_JT1;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_CP_MEC_JT1;
else
ret = UCODE_ID_CP_MEC_JT2;
@@ -326,17 +325,17 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
}
static int cz_smu_populate_single_scratch_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
uint8_t type, bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = type;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -363,17 +362,17 @@ static int cz_smu_populate_single_scratch_task(
}
static int cz_smu_populate_single_ucode_load_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -392,22 +391,22 @@ static int cz_smu_populate_single_ucode_load_task(
return 0;
}
-static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_SAVE, true);
return 0;
}
-static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
+static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
{
int i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
@@ -416,17 +415,17 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_SAVE, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_SAVE, true);
@@ -434,121 +433,120 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
}
-static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id == CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
else
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
/* populate scratch */
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_LOAD, true);
return 0;
}
-static int cz_smu_construct_toc_for_power_profiling(
- struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
return 0;
}
-static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_used_count = 0;
- cz_smu_initialize_toc_empty_job_list(smumgr);
- cz_smu_construct_toc_for_rlc_aram_save(smumgr);
- cz_smu_construct_toc_for_vddgfx_enter(smumgr);
- cz_smu_construct_toc_for_vddgfx_exit(smumgr);
- cz_smu_construct_toc_for_power_profiling(smumgr);
- cz_smu_construct_toc_for_bootup(smumgr);
- cz_smu_construct_toc_for_clock_table(smumgr);
+ cz_smu_initialize_toc_empty_job_list(hwmgr);
+ cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
+ cz_smu_construct_toc_for_power_profiling(hwmgr);
+ cz_smu_construct_toc_for_bootup(hwmgr);
+ cz_smu_construct_toc_for_clock_table(hwmgr);
return 0;
}
-static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
+static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
uint32_t firmware_type;
uint32_t i;
int ret;
@@ -559,12 +557,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
- firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
+ firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
firmware_list[i]);
ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
ucode_id, &info);
if (ret == 0) {
@@ -585,12 +583,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
}
static int cz_smu_populate_single_scratch_entry(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry scratch_type,
uint32_t ulsize_byte,
struct cz_buffer_entry *entry)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
long long mc_addr =
((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
| cz_smu->smu_buffer.mc_addr_low;
@@ -611,9 +609,9 @@ static int cz_smu_populate_single_scratch_entry(
return 0;
}
-static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
+static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -624,25 +622,25 @@ static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
*table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
return 0;
}
-static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
+static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -651,63 +649,63 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
break;
}
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
return 0;
}
-static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
+static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
uint32_t smc_address;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
- cz_smu_populate_firmware_entries(smumgr);
+ cz_smu_populate_firmware_entries(hwmgr);
- cz_smu_construct_toc(smumgr);
+ cz_smu_construct_toc(hwmgr);
smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
+ cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrHi,
cz_smu->toc_buffer.mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrLo,
cz_smu->toc_buffer.mc_addr_low);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_aram);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_power_profiling_index);
- return cz_send_msg_to_smc_with_parameter(smumgr,
+ return cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_initialize_index);
}
-static int cz_start_smu(struct pp_smumgr *smumgr)
+static int cz_start_smu(struct pp_hwmgr *hwmgr)
{
int ret = 0;
uint32_t fw_to_check = 0;
@@ -721,23 +719,23 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
UCODE_ID_CP_MEC_JT1_MASK |
UCODE_ID_CP_MEC_JT2_MASK;
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
- ret = cz_request_smu_load_fw(smumgr);
+ ret = cz_request_smu_load_fw(hwmgr);
if (ret)
pr_err("SMU firmware load failed\n");
- cz_check_fw_load_finish(smumgr, fw_to_check);
+ cz_check_fw_load_finish(hwmgr, fw_to_check);
- ret = cz_load_mec_firmware(smumgr);
+ ret = cz_load_mec_firmware(hwmgr);
if (ret)
pr_err("Mec Firmware load failed\n");
return ret;
}
-static int cz_smu_init(struct pp_smumgr *smumgr)
+static int cz_smu_init(struct pp_hwmgr *hwmgr)
{
uint64_t mc_addr = 0;
int ret = 0;
@@ -747,7 +745,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
if (cz_smu == NULL)
return -ENOMEM;
- smumgr->backend = cz_smu;
+ hwmgr->smu_backend = cz_smu;
cz_smu->toc_buffer.data_size = 4096;
cz_smu->smu_buffer.data_size =
@@ -757,7 +755,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->toc_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -770,7 +768,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -783,7 +781,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -791,14 +789,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -806,7 +804,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
sizeof(struct SMU8_MultimediaPowerLogData),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -814,7 +812,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
sizeof(struct SMU8_Fusion_ClkTable),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -825,18 +823,18 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_fini(struct pp_smumgr *smumgr)
+static int cz_smu_fini(struct pp_hwmgr *hwmgr)
{
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
if (cz_smu) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->toc_buffer.handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->smu_buffer.handle);
kfree(cz_smu);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
deleted file mode 100644
index 8712f093d6d9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ /dev/null
@@ -1,2498 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "fiji_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "fiji_smumgr.h"
-#include "pppcielanes.h"
-#include "smu7_ppsmc.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "smu7_smumgr.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define VDDC_VDDCI_DELTA 300
-#define MC_CG_ARB_FREQ_F1 0x0b
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
- {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
- {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
- {1, 0xF, 0xFD,
- /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
- 0x19, 5, 45}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- *voltage = *mvdd = 0;
-
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
-{
- switch (line) {
- case SMU7_I2CLineID_DDC1:
- *scl = SMU7_I2C_DDC1CLK;
- *sda = SMU7_I2C_DDC1DATA;
- break;
- case SMU7_I2CLineID_DDC2:
- *scl = SMU7_I2C_DDC2CLK;
- *sda = SMU7_I2C_DDC2DATA;
- break;
- case SMU7_I2CLineID_DDC3:
- *scl = SMU7_I2C_DDC3CLK;
- *sda = SMU7_I2C_DDC3DATA;
- break;
- case SMU7_I2CLineID_DDC4:
- *scl = SMU7_I2C_DDC4CLK;
- *sda = SMU7_I2C_DDC4DATA;
- break;
- case SMU7_I2CLineID_DDC5:
- *scl = SMU7_I2C_DDC5CLK;
- *sda = SMU7_I2C_DDC5DATA;
- break;
- case SMU7_I2CLineID_DDC6:
- *scl = SMU7_I2C_DDC6CLK;
- *sda = SMU7_I2C_DDC6DATA;
- break;
- case SMU7_I2CLineID_SCLSDA:
- *scl = SMU7_I2C_SCL;
- *sda = SMU7_I2C_SDA;
- break;
- case SMU7_I2CLineID_DDCVGA:
- *scl = SMU7_I2C_DDCVGACLK;
- *sda = SMU7_I2C_DDCVGADATA;
- break;
- default:
- *scl = 0;
- *sda = 0;
- break;
- }
-}
-
-static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &fiji_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
-}
-
-static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
-
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table =
- &hwmgr->thermal_controller.advanceFanControlParameters;
- uint8_t uc_scl, uc_sda;
-
- /* TDP number of fraction bits are changed from 8 to 7 for Fiji
- * as requested by SMC team
- */
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
- /* The following are for new Fiji Multi-input fan/thermal control */
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid1 * 256);
- dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid2 * 256);
- dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrVddc * 256);
- dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrMvdd * 256);
- dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitPlx * 256);
-
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
- dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainLiquid));
- dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrVddc));
- dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
- dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainPlx));
- dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHbm));
-
- dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
- dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
- dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
- dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
- get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Liquid_I2C_LineSCL = uc_scl;
- dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Vr_I2C_LineSCL = uc_scl;
- dpm_table->Vr_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Plx_I2C_LineSCL = uc_scl;
- dpm_table->Plx_I2C_LineSDA = uc_sda;
-
- return 0;
-}
-
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- smu_data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- smu_data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- 0 == hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
- return 0;
-}
-
-static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- uint32_t pm_fuse_table_offset;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- /* DW6 */
- if (fiji_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
- /* DW7 */
- if (fiji_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
- /* DW8 */
- if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != fiji_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW14 */
- if (fiji_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (fiji_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW19 */
- if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW20 */
- if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
-
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result;
-
- result = fiji_populate_cac_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_Ulv *state)
-{
- int result = 0;
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = 1;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
- }
- return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t ref_clock;
- uint32_t ref_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
- ref_clock = atomctrl_get_reference_clock(hwmgr);
- ref_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider */
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup */
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- struct pp_atomctrl_internal_ss_info ssInfo;
-
- uint32_t vco_freq = clock * dividers.uc_pll_post_div;
- if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
- vco_freq, &ssInfo)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- *
- * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
- */
- uint32_t clk_s = ref_clock * 5 /
- (ref_divider * ssInfo.speed_spectrum_rate);
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
- fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
- CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
- }
- }
-
- sclk->SclkFrequency = clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU73_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
- /* populate graphics levels */
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- (uint32_t *)(&level->MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
-
- level->SclkFrequency = clock;
- level->ActivityLevel = sclk_al_threshold;
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- threshold = clock * data->fast_watermark_threshold / 100;
-
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
- return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- struct SMU73_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = fiji_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &levels[i]);
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now.*/
- levels[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz, 800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
- if (mem_clock <= 10000)
- return 0x0;
- if (mem_clock <= 15000)
- return 0x1;
- if (mem_clock <= 20000)
- return 0x2;
- if (mem_clock <= 25000)
- return 0x3;
- if (mem_clock <= 30000)
- return 0x4;
- if (mem_clock <= 35000)
- return 0x5;
- if (mem_clock <= 40000)
- return 0x6;
- if (mem_clock <= 45000)
- return 0x7;
- if (mem_clock <= 50000)
- return 0x8;
- if (mem_clock <= 55000)
- return 0x9;
- if (mem_clock <= 60000)
- return 0xa;
- if (mem_clock <= 65000)
- return 0xb;
- if (mem_clock <= 70000)
- return 0xc;
- if (mem_clock <= 75000)
- return 0xd;
- if (mem_clock <= 80000)
- return 0xe;
- /* mem_clock > 800MHz */
- return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the memory clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
- struct pp_atomctrl_memory_clock_param mem_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to get Memory PLL Dividers.",
- );
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = clock;
- mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
- mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
-
- return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- uint32_t mclk_stutter_mode_threshold = 60000;
-
- if (table_info->vdd_dep_on_mclk) {
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
-
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- /* enable stutter mode if all the follow condition applied
- * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
- * &(data->DisplayTiming.numExistingDisplays));
- */
- data->display_timing.num_existing_displays = 1;
-
- if (mclk_stutter_mode_threshold &&
- (clock <= mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
- SMU73_MAX_LEVELS_MEMORY;
- struct SMU73_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = fiji_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now. */
- levels[0].EnabledForActivity = 1;
-
- /* in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high */
- levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- table->ACPILevel.SclkFrequency =
- data->dpm_table.sclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value " \
- "in Clock Dependency Table",
- );
- } else {
- table->ACPILevel.SclkFrequency =
- data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
- );
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
-
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!fiji_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t)(mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burstTime;
- ULONG state, trrds, trrdl;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
- trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
- trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
- arb_regs->TRRDS = (uint8_t)trrds;
- arb_regs->TRRDL = (uint8_t)trrdl;
-
- return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = fiji_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result)
- break;
- }
- }
-
- if (!result)
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU73_Discrete_MCArbDramTimingTable),
- SMC_RAM_END);
- return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
- }
- return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- smu_data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- smu_data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][0];
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
- GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (fiji_clock_stretch_amount_conversion
- [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >=
- (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq <
- (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct SMU73_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-
- return 0;
-}
-
-static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
-{
- pp_atomctrl_voltage_table param_led_dpm;
- int result = 0;
- u32 mask = 0;
-
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
- &param_led_dpm);
- if (result == 0) {
- int i, j;
- u32 tmp = param_led_dpm.mask_low;
-
- for (i = 0, j = 0; i < 32; i++) {
- if (tmp & 1) {
- mask |= (i << (8 * j));
- if (++j >= 3)
- break;
- }
- tmp >>= 1;
- }
- }
- if (mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_LedConfig,
- mask);
- return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
- fiji_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
- fiji_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = fiji_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, 0x40035);
- }
-
- result = fiji_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = fiji_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = fiji_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = fiji_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result);
-
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = fiji_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = fiji_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = fiji_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = fiji_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = fiji_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = fiji_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
- SMC_RAM_END);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- result = fiji_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload arb data to SMC memory!", return result);
-
- result = fiji_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate PM fuses to SMC memory!", return result);
-
- result = fiji_setup_dpm_led_config(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup dpm led config", return result);
-
- fiji_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (smu_data->smu7_data.fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
- return 0;
-
- ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs);
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return fiji_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
- result = fiji_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters!",
- );
- return result;
-}
-
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU73_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU73_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU73_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t fiji_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU73_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU73_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU73_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU73_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU73_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU73_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU73_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU73_MAX_LEVELS_MVDD;
- }
-
- pr_warn("can't get the mac of %x\n", value);
- return 0;
-}
-
-
-static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
- UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- smu_data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- fiji_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- fiji_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- fiji_update_samu_smc_table(hwmgr);
- break;
- default:
- break;
- }
- return 0;
-}
-
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (0 == result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
-
- /* Program additional LP registers
- * that are no longer programmed by VBIOS
- */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
- return 0;
-}
-
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU73_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
deleted file mode 100644
index d9c72d992e30..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_SMC_H
-#define FIJI_SMC_H
-
-#include "smumgr.h"
-#include "smu73.h"
-
-struct fiji_pt_defaults {
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-};
-
-int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int fiji_init_smc_table(struct pp_hwmgr *hwmgr);
-int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t fiji_get_offsetof(uint32_t type, uint32_t member);
-uint32_t fiji_get_mac_definition(uint32_t value);
-int fiji_process_firmware_header(struct pp_hwmgr *hwmgr);
-int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
-int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 6ae948fc524f..f572beff197f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -23,6 +23,7 @@
#include "pp_debug.h"
#include "smumgr.h"
+#include "smu7_dyn_defaults.h"
#include "smu73.h"
#include "smu_ucode_xfer_vi.h"
#include "fiji_smumgr.h"
@@ -37,14 +38,54 @@
#include "gca/gfx_8_0_d.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-#include "fiji_pwrvirus.h"
-#include "fiji_smc.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+#include "hardwaremanager.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "hwmgr.h"
+#include "smu7_hwmgr.h"
+
#define AVFS_EN_MSB 1568
#define AVFS_EN_LSB 1568
#define FIJI_SMC_SIZE 0x20000
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define VDDC_VDDCI_DELTA 300
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
+ * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
+ */
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and
+ * [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
+ * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
+ */
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+static const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
+ {1, 0xF, 0xFD,
+ /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
+ 0x19, 5, 45}
+};
+
static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
/* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */
/* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
@@ -58,147 +99,114 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
{ 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
};
-static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0); */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for ROM firmware to initialize interrupt hendler */
- /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
+ /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND,
SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
INTERRUPTS_ENABLED, 1);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS) != 1) {
PP_ASSERT_WITH_CODE(false,
"SMU Firmware start failed!", return -1);
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
-{
- int i;
- int result = -EINVAL;
- uint32_t reg, data;
-
- const PWR_Command_Table *pvirus = PwrVirusTable;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
- switch (pvirus->command) {
- case PwrCmdWrite:
- reg = pvirus->reg;
- data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
- break;
-
- case PwrCmdEnd:
- result = 0;
- break;
-
- default:
- pr_info("Table Exit with Invalid Command!");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -EINVAL;
- break;
- }
- pvirus++;
- }
-
- return result;
-}
-
-static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
+static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
@@ -206,23 +214,23 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
}
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
/* clear reset */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
return result;
}
-static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
int32_t vr_config;
uint32_t table_start;
uint32_t level_addr, vr_config_addr;
uint32_t level_size = sizeof(avfs_graphics_level);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable),
&table_start, 0x40000),
@@ -237,7 +245,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_addr = table_start +
offsetof(SMU73_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr,
(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
"vr_config value over to SMC",
@@ -245,7 +253,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr,
(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1;);
@@ -253,9 +261,9 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -265,17 +273,17 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
if (!smu_started)
break;
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
" table over to SMU",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not setup "
"Pwr Virus for AVFS ",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Failure at "
"fiji_start_avfs_btc. AVFS Disabled",
return -EINVAL;);
@@ -293,64 +301,64 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
return 0;
}
-static int fiji_start_smu(struct pp_smumgr *smumgr)
+static int fiji_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr)
- || cgs_is_virtualization_enabled(smumgr->device))) {
- fiji_avfs_event_mgr(smumgr, false);
+ if (!(smu7_is_smc_ram_running(hwmgr)
+ || cgs_is_virtualization_enabled(hwmgr->device))) {
+ fiji_avfs_event_mgr(hwmgr, false);
/* Check if SMU is running in protected mode */
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = fiji_start_smu_in_non_protection_mode(smumgr);
+ result = fiji_start_smu_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = fiji_start_smu_in_protection_mode(smumgr);
+ result = fiji_start_smu_in_protection_mode(hwmgr);
if (result)
return result;
}
- fiji_avfs_event_mgr(smumgr, true);
+ fiji_avfs_event_mgr(hwmgr, true);
}
/* To initialize all clock gating before RLC loaded and running.*/
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
/* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/
- smu7_read_smc_sram_dword(smumgr,
+ smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters),
&(priv->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
- if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
+ if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) {
if (efuse)
return true;
@@ -358,14 +366,7 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
return false;
}
-/**
-* Write a 32bit value to the SMC SRAM space.
-* ALL PARAMETERS ARE IN HOST BYTE ORDER.
-* @param smumgr the address of the powerplay hardware manager.
-* @param smc_addr the address in the SMC RAM to access.
-* @param value to write to the SMC SRAM.
-*/
-static int fiji_smu_init(struct pp_smumgr *smumgr)
+static int fiji_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct fiji_smumgr *fiji_priv = NULL;
@@ -375,9 +376,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
if (fiji_priv == NULL)
return -ENOMEM;
- smumgr->backend = fiji_priv;
+ hwmgr->smu_backend = fiji_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
@@ -386,6 +387,2334 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, uint32_t *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ *voltage = *mvdd = 0;
+
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
+{
+ switch (line) {
+ case SMU7_I2CLineID_DDC1:
+ *scl = SMU7_I2C_DDC1CLK;
+ *sda = SMU7_I2C_DDC1DATA;
+ break;
+ case SMU7_I2CLineID_DDC2:
+ *scl = SMU7_I2C_DDC2CLK;
+ *sda = SMU7_I2C_DDC2DATA;
+ break;
+ case SMU7_I2CLineID_DDC3:
+ *scl = SMU7_I2C_DDC3CLK;
+ *sda = SMU7_I2C_DDC3DATA;
+ break;
+ case SMU7_I2CLineID_DDC4:
+ *scl = SMU7_I2C_DDC4CLK;
+ *sda = SMU7_I2C_DDC4DATA;
+ break;
+ case SMU7_I2CLineID_DDC5:
+ *scl = SMU7_I2C_DDC5CLK;
+ *sda = SMU7_I2C_DDC5DATA;
+ break;
+ case SMU7_I2CLineID_DDC6:
+ *scl = SMU7_I2C_DDC6CLK;
+ *sda = SMU7_I2C_DDC6DATA;
+ break;
+ case SMU7_I2CLineID_SCLSDA:
+ *scl = SMU7_I2C_SCL;
+ *sda = SMU7_I2C_SDA;
+ break;
+ case SMU7_I2CLineID_DDCVGA:
+ *scl = SMU7_I2C_DDCVGACLK;
+ *sda = SMU7_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
+static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &fiji_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &fiji_power_tune_data_set_array[0];
+
+}
+
+static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ uint8_t uc_scl, uc_sda;
+
+ /* TDP number of fraction bits are changed from 8 to 7 for Fiji
+ * as requested by SMC team
+ */
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
+
+ /* The following are for new Fiji Multi-input fan/thermal control */
+ dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid1 * 256);
+ dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitLiquid2 * 256);
+ dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrVddc * 256);
+ dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitVrMvdd * 256);
+ dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitPlx * 256);
+
+ dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+ dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainLiquid));
+ dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrVddc));
+ dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
+ dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainPlx));
+ dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHbm));
+
+ dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
+ dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
+ dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
+ dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
+
+ get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Liquid_I2C_LineSCL = uc_scl;
+ dpm_table->Liquid_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Vr_I2C_LineSCL = uc_scl;
+ dpm_table->Vr_I2C_LineSDA = uc_sda;
+
+ get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
+ dpm_table->Plx_I2C_LineSCL = uc_scl;
+ dpm_table->Plx_I2C_LineSDA = uc_sda;
+
+ return 0;
+}
+
+
+static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+
+static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ 0 == hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ uint32_t pm_fuse_table_offset;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (fiji_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (fiji_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != fiji_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (fiji_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (fiji_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW20 */
+ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = fiji_populate_cac_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_Ulv *state)
+{
+ int result = 0;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+ }
+ return result;
+}
+
+static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ return fiji_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ssInfo;
+
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ssInfo)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ *
+ * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
+ */
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ssInfo.speed_spectrum_rate);
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU73_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t threshold, mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = fiji_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ (uint32_t *)(&level->MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+
+ level->SclkFrequency = clock;
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+
+ threshold = clock * data->fast_watermark_threshold / 100;
+
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+
+ return 0;
+}
+
+static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = fiji_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now.*/
+ levels[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+/**
+ * MCLK Frequency Ratio
+ * SEQ_CG_RESP Bit[31:24] - 0x0
+ * Bit[27:24] \96 DDR3 Frequency ratio
+ * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
+ * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
+ * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
+ * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
+ * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
+ * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
+ * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
+ * 400 < 0x7 <= 450MHz, 800 < 0xF
+ */
+static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
+{
+ if (mem_clock <= 10000)
+ return 0x0;
+ if (mem_clock <= 15000)
+ return 0x1;
+ if (mem_clock <= 20000)
+ return 0x2;
+ if (mem_clock <= 25000)
+ return 0x3;
+ if (mem_clock <= 30000)
+ return 0x4;
+ if (mem_clock <= 35000)
+ return 0x5;
+ if (mem_clock <= 40000)
+ return 0x6;
+ if (mem_clock <= 45000)
+ return 0x7;
+ if (mem_clock <= 50000)
+ return 0x8;
+ if (mem_clock <= 55000)
+ return 0x9;
+ if (mem_clock <= 60000)
+ return 0xa;
+ if (mem_clock <= 65000)
+ return 0xb;
+ if (mem_clock <= 70000)
+ return 0xc;
+ if (mem_clock <= 75000)
+ return 0xd;
+ if (mem_clock <= 80000)
+ return 0xe;
+ /* mem_clock > 800MHz */
+ return 0xf;
+}
+
+static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
+{
+ struct pp_atomctrl_memory_clock_param mem_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to get Memory PLL Dividers.",
+ );
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = clock;
+ mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
+ mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
+
+ return result;
+}
+
+static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ uint32_t mclk_stutter_mode_threshold = 60000;
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ (uint32_t *)(&mem_level->MinVoltage), &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ /* enable stutter mode if all the follow condition applied
+ * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
+ * &(data->DisplayTiming.numExistingDisplays));
+ */
+ data->display_timing.num_existing_displays = 1;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
+ SMU73_MAX_LEVELS_MEMORY;
+ struct SMU73_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = fiji_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now. */
+ levels[0].EnabledForActivity = 1;
+
+ /* in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not effected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high */
+ levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (!data->sclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ table->ACPILevel.SclkFrequency =
+ data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ table->ACPILevel.SclkFrequency,
+ (uint32_t *)(&table->ACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value " \
+ "in Clock Dependency Table",
+ );
+ } else {
+ table->ACPILevel.SclkFrequency =
+ data->vbios_boot_state.sclk_bootup_value;
+ table->ACPILevel.MinVoltage =
+ data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
+ }
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ if (!data->mclk_dpm_key_disabled) {
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = fiji_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ (uint32_t *)(&table->MemoryACPILevel.MinVoltage), &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value in Clock Dependency Table",
+ );
+ } else {
+ table->MemoryACPILevel.MclkFrequency =
+ data->vbios_boot_state.mclk_bootup_value;
+ table->MemoryACPILevel.MinVoltage =
+ data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
+ }
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!fiji_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->VceLevel[count].MinVoltage |=
+ ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t)(mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burstTime;
+ ULONG state, trrds, trrdl;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
+
+ state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
+ trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
+ trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+ arb_regs->TRRDS = (uint8_t)trrds;
+ arb_regs->TRRDL = (uint8_t)trrdl;
+
+ return 0;
+}
+
+static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = fiji_populate_memory_timing_parameters(hwmgr,
+ data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result)
+ break;
+ }
+ }
+
+ if (!result)
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU73_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
+ VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+
+ }
+ return result;
+}
+
+static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (fiji_clock_stretch_amount_conversion
+ [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >=
+ (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq <
+ (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU73_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+
+ return 0;
+}
+
+static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
+{
+ pp_atomctrl_voltage_table param_led_dpm;
+ int result = 0;
+ u32 mask = 0;
+
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_LEDDPM, VOLTAGE_OBJ_GPIO_LUT,
+ &param_led_dpm);
+ if (result == 0) {
+ int i, j;
+ u32 tmp = param_led_dpm.mask_low;
+
+ for (i = 0, j = 0; i < 32; i++) {
+ if (tmp & 1) {
+ mask |= (i << (8 * j));
+ if (++j >= 3)
+ break;
+ }
+ tmp >>= 1;
+ }
+ }
+ if (mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_LedConfig,
+ mask);
+ return 0;
+}
+
+static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+
+ fiji_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ fiji_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = fiji_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = fiji_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = fiji_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = fiji_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = fiji_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = fiji_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = fiji_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+ result = fiji_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = fiji_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = fiji_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = fiji_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = fiji_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = fiji_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = fiji_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = fiji_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = fiji_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ result = fiji_setup_dpm_led_config(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to setup dpm led config", return result);
+
+ fiji_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+
+static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+ if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ return 0;
+
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return fiji_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ result = fiji_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+ return result;
+}
+
+static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU73_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU73_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU73_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU73_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU73_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t fiji_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU73_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU73_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU73_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU73_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU73_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU73_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU73_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU73_MAX_LEVELS_MVDD;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+
+static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ fiji_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ fiji_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ fiji_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU73_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+
+ /* Program additional LP registers
+ * that are no longer programmed by VBIOS
+ */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+
+ return 0;
+}
+
+static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU73_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
+ SMU73_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
const struct pp_smumgr_func fiji_smu_funcs = {
.smu_init = &fiji_smu_init,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 175bf9f8ef9c..279647772578 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -28,6 +28,15 @@
#include "smu7_smumgr.h"
+struct fiji_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+};
+
struct fiji_smumgr {
struct smu7_smumgr smu7_data;
struct SMU73_Discrete_DpmTable smc_state_table;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 0bf2def3b659..34128822b8fb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -30,64 +30,133 @@
#include "smumgr.h"
#include "iceland_smumgr.h"
-#include "smu_ucode_xfer_vi.h"
+
#include "ppsmc.h"
+
+#include "cgs_common.h"
+
+#include "smu7_dyn_defaults.h"
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+#include "pp_endian.h"
+#include "processpptables.h"
+
+
#include "smu/smu_7_1_1_d.h"
#include "smu/smu_7_1_1_sh_mask.h"
-#include "cgs_common.h"
-#include "iceland_smc.h"
+#include "smu71_discrete.h"
+
+#include "smu_ucode_xfer_vi.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
#define ICELAND_SMC_SIZE 0x20000
-static int iceland_start_smc(struct pp_smumgr *smumgr)
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+#define DEVICE_ID_VI_ICELAND_M_6900 0x6900
+#define DEVICE_ID_VI_ICELAND_M_6901 0x6901
+#define DEVICE_ID_VI_ICELAND_M_6902 0x6902
+#define DEVICE_ID_VI_ICELAND_M_6903 0x6903
+
+static const struct iceland_pt_defaults defaults_iceland = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc,
+ * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+/* 35W - XT, XTL */
+static const struct iceland_pt_defaults defaults_icelandxt = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+/* 25W - PRO, LE */
+static const struct iceland_pt_defaults defaults_icelandpro = {
+ /*
+ * sviLoadLIneEn, SviLoadLineVddC,
+ * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac,
+ * BAPM_TEMP_GRADIENT
+ */
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0,
+ { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0},
+ { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0}
+};
+
+static int iceland_start_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
return 0;
}
-static void iceland_reset_smc(struct pp_smumgr *smumgr)
+static void iceland_reset_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
}
-static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 1);
}
-static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 0);
}
-static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr)
{
/* set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* enable smc clock */
- iceland_start_smc_clock(smumgr);
+ iceland_start_smc_clock(hwmgr);
/* de-assert reset */
- iceland_start_smc(smumgr);
+ iceland_start_smc(hwmgr);
- SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
INTERRUPTS_ENABLED, 1);
return 0;
}
-static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr,
uint32_t length, const uint8_t *src,
uint32_t limit, uint32_t start_addr)
{
@@ -96,34 +165,34 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
while (byte_count >= 4) {
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
}
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
return 0;
}
-static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr)
{
uint32_t val;
struct cgs_firmware_info info = {0};
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
/* load SMC firmware */
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
if (info.image_size & 3) {
@@ -137,68 +206,61 @@ static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
}
/* wait for smc boot up */
- SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* clear firmware interrupt enable flag */
- val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ val = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL, val | 1);
/* stop smc clock */
- iceland_stop_smc_clock(smumgr);
+ iceland_stop_smc_clock(hwmgr);
/* reset smc */
- iceland_reset_smc(smumgr);
- iceland_upload_smc_firmware_data(smumgr, info.image_size,
+ iceland_reset_smc(hwmgr);
+ iceland_upload_smc_firmware_data(hwmgr, info.image_size,
(uint8_t *)info.kptr, ICELAND_SMC_SIZE,
info.ucode_start_address);
return 0;
}
-static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr,
uint32_t firmwareType)
{
return 0;
}
-static int iceland_start_smu(struct pp_smumgr *smumgr)
+static int iceland_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
pr_info("smu not running, upload firmware again \n");
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-static int iceland_smu_init(struct pp_smumgr *smumgr)
+static int iceland_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct iceland_smumgr *iceland_priv = NULL;
@@ -208,9 +270,9 @@ static int iceland_smu_init(struct pp_smumgr *smumgr)
if (iceland_priv == NULL)
return -ENOMEM;
- smumgr->backend = iceland_priv;
+ hwmgr->smu_backend = iceland_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
@@ -219,6 +281,2413 @@ static int iceland_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+
+static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ switch (dev_id) {
+ case DEVICE_ID_VI_ICELAND_M_6900:
+ case DEVICE_ID_VI_ICELAND_M_6903:
+ smu_data->power_tune_defaults = &defaults_icelandxt;
+ break;
+
+ case DEVICE_ID_VI_ICELAND_M_6901:
+ case DEVICE_ID_VI_ICELAND_M_6902:
+ smu_data->power_tune_defaults = &defaults_icelandpro;
+ break;
+ default:
+ smu_data->power_tune_defaults = &defaults_iceland;
+ pr_warn("Unknown V.I. Device ID.\n");
+ break;
+ }
+ return;
+}
+
+static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 8; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+ "The CAC Leakage table does not exist!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+ "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ }
+ } else {
+ PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
+ }
+
+ return 0;
+}
+
+static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t *vid = smu_data->power_tune_table.VddCVid;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+ "There should never be more than 8 entries for VddcVid!!!",
+ return -EINVAL);
+
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
+ vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+ }
+
+ return 0;
+}
+
+
+
+static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ /* DW0 - DW3 */
+ if (iceland_populate_bapm_vddc_vid_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate bapm vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW4 - DW5 */
+ if (iceland_populate_vddc_vid(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate vddc vid Failed!",
+ return -EINVAL);
+
+ /* DW6 */
+ if (iceland_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+ /* DW7 */
+ if (iceland_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+ /* DW8 */
+ if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (0 != iceland_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ /* DW13-DW16 */
+ if (iceland_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ /* DW18 */
+ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+
+ return 0;
+}
+
+static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+ uint16_t *lo)
+{
+ uint16_t v_index;
+ bool vol_found = false;
+ *hi = tab->value * VOLTAGE_SCALE;
+ *lo = tab->value * VOLTAGE_SCALE;
+
+ /* SCLK/VDDC Dependency Table has to exist. */
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+ "The SCLK/VDDC Dependency Table does not exist.\n",
+ return -EINVAL);
+
+ if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+ pr_warn("CAC Leakage Table does not exist, using vddc.\n");
+ return 0;
+ }
+
+ /*
+ * Since voltage in the sclk/vddc dependency table is not
+ * necessarily in ascending order because of ELB voltage
+ * patching, loop through entire list to find exact voltage.
+ */
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ /*
+ * If voltage is not found in the first pass, loop again to
+ * find the best match, equal or higher value.
+ */
+ if (!vol_found) {
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ if (!vol_found)
+ pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab,
+ SMU71_Discrete_VoltageLevel *smc_voltage_tab)
+{
+ int result;
+
+ result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
+ &smc_voltage_tab->StdVoltageHiSidd,
+ &smc_voltage_tab->StdVoltageLoSidd);
+ if (0 != result) {
+ smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+ smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddc_voltage_table.entries[count]),
+ &(table->VddcLevel[count]));
+ PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+ /* GPIO voltage control */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+ table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->VddcLevel[count].Smio = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->vddci_voltage_table.entries[count]),
+ &(table->VddciLevel[count]));
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = iceland_populate_smc_voltage_table(hwmgr,
+ &(data->mvdd_voltage_table.entries[count]),
+ &table->MvddLevel[count]);
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+ table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+ return 0;
+}
+
+
+static int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = iceland_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+ result = iceland_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU71_Discrete_Ulv *state)
+{
+ uint32_t voltage_response_time, ulv_voltage;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+ PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+ if (ulv_voltage == 0) {
+ data->ulv_supported = false;
+ return 0;
+ }
+
+ if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffset = 0;
+ else
+ /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+ state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+ } else {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffsetVid = 0;
+ else /* used in SVI2 Mode */
+ state->VddcOffsetVid = (uint8_t)(
+ (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+ * VOLTAGE_VID_OFFSET_SCALE2
+ / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_Ulv *ulv_level)
+{
+ return iceland_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU71_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
+ &graphic_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for VDDC \
+ engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ graphic_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ iceland_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 100;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) *
+ SMU71_MAX_LEVELS_GRAPHICS;
+
+ SMU71_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = iceland_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (highest_pcie_level_enabled + 1))) != 0) {
+ highest_pcie_level_enabled++;
+ }
+
+ while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
+ count++;
+ }
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+ }
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 47500) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ }
+ } else {
+ if (memory_clock < 65000) {
+ mc_para_index = 0x00;
+ } else if (memory_clock > 135000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000) {
+ mc_para_index = 0;
+ } else if (memory_clock >= 80000) {
+ mc_para_index = 0x0f;
+ } else {
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+ }
+
+ return mc_para_index;
+}
+
+static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+ uint32_t memory_clock, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (memory_clock < pl->entries[i].Mclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU71_Discrete_MemoryLevel *memory_level
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+ }
+
+ if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
+ memory_level->MinVddci = memory_level->MinVddc;
+ } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddci_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinVddci);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control) {
+ iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ memory_clock, &memory_level->MinVddcPhases);
+ }
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ /* stutter mode not support on iceland */
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ else
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ } else
+ dll_state_on = data->dll_default_on;
+ } else {
+ memory_level->StrobeRatio =
+ iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = iceland_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (0 == result) {
+ memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+ memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
+ SMU71_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero", return -EINVAL);
+ result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (0 != result) {
+ return result;
+ }
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU71_Discrete_VoltageLevel *voltage)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+ if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+ "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t vddc_phase_shed_control = 0;
+
+ SMU71_Discrete_VoltageLevel voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (data->acpi_vddc)
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = vddc_phase_shed_control ? 0 : 1;
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
+ CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+ else {
+ if (data->acpi_vddci != 0)
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+ }
+
+ if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ return 0;
+}
+
+static int iceland_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ SMU71_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = iceland_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (0 != result) {
+ break;
+ }
+ }
+ }
+
+ if (0 == result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU71_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Graphics DPM level 0!");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Memory DPM level 0!");
+ result = 0;
+ }
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->BootVddci = table->BootVddc;
+ else
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ return result;
+}
+
+static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void iceland_convert_mc_registers(
+ const struct iceland_mc_reg_entry *entry,
+ SMU71_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
+ const uint32_t memory_clock,
+ SMU71_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ iceland_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = iceland_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU71_Discrete_MCRegisters));
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(hwmgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
+ result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+ result = iceland_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for driver state!", return result;);
+
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ uint8_t count, level;
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+ >= data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+ >= data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
+
+
+ /*
+ * TDP number of fraction bits are changed from 8 to 7 for Iceland
+ * as requested by SMC team
+ */
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+
+ dpm_table->DTETjOffset = 0;
+
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ /* The following are for new Iceland Multi-input fan/thermal control */
+ if (NULL != ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU71_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU71_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU71_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU71_Discrete_DpmTable *tab)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ tab->SVI2Enable |= VDDC_ON_SVI2;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ tab->SVI2Enable |= VDDCI_ON_SVI2;
+ else
+ tab->MergedVddci = 1;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
+ tab->SVI2Enable |= MVDD_ON_SVI2;
+
+ PP_ASSERT_WITH_CODE(tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
+ (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
+
+ return 0;
+}
+
+static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+
+
+ iceland_initialize_power_tune_defaults(hwmgr);
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) {
+ iceland_populate_smc_voltage_tables(hwmgr, table);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+
+ if (data->ulv_supported) {
+ result = iceland_populate_ulv_state(hwmgr, &(smu_data->ulv_setting));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = iceland_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result;);
+
+ result = iceland_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result;);
+
+ result = iceland_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result;);
+
+ result = iceland_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result;);
+
+ result = iceland_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result;);
+
+ result = iceland_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result;);
+
+ result = iceland_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result;);
+
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = iceland_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result;);
+
+ result = iceland_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result;);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ result = iceland_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result;);
+
+ result = iceland_populate_smc_initial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+ result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+
+ table->TemperatureLimitHigh =
+ (data->thermal_temp_setting.temperature_high *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ table->TemperatureLimitLow =
+ (data->thermal_temp_setting.temperature_low *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+
+ result = iceland_populate_smc_svi2_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate SVI2 setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result;);
+
+ /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.ulv_setting_starts,
+ (uint8_t *)&(smu_data->ulv_setting),
+ sizeof(SMU71_Discrete_Ulv),
+ SMC_RAM_END);
+
+
+ result = iceland_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate initialize MC Reg table!", return result);
+
+ result = iceland_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ return 0;
+}
+
+int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (0 == smu7_data->fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ /* fan_table.FanControl_GL_Flag = 1; */
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return iceland_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU71_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = iceland_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+ result = iceland_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU71_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU71_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU71_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU71_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t iceland_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU71_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU71_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU71_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU71_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU71_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU71_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU71_MAX_LEVELS_MVDD;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->dpm_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ data->soft_regs_start = tmp;
+ smu7_data->soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->mc_reg_table_start = tmp;
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->fan_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->arb_table_start = tmp;
+ }
+
+ error |= (0 != result);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ hwmgr->microcode_version_info.SMC = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU71_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU71_Firmware_Header, UlvSettings),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ smu7_data->ulv_setting_starts = tmp;
+ }
+
+ error |= (0 != result);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t iceland_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool iceland_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int iceland_set_s0_mc_reg_index(struct iceland_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ? address : table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct iceland_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++) {
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+ }
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
+{
+ uint8_t i, j;
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (NULL == table)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (0 == result)
+ result = iceland_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (0 == result) {
+ iceland_set_s0_mc_reg_index(ni_table);
+ result = iceland_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (0 == result)
+ iceland_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
const struct pp_smumgr_func iceland_smu_funcs = {
.smu_init = &iceland_smu_init,
.smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
index 8eae01b37c40..802472530d34 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -39,7 +39,7 @@ struct iceland_pt_defaults {
uint8_t tdc_waterfall_ctl;
uint8_t dte_ambient_temp_base;
uint32_t display_cac;
- uint32_t bamp_temp_gradient;
+ uint32_t bapm_temp_gradient;
uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
deleted file mode 100644
index 99a00bd39256..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ /dev/null
@@ -1,2364 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "pp_debug.h"
-#include "polaris10_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "polaris10_smumgr.h"
-#include "pppcielanes.h"
-
-#include "smu_ucode_xfer_vi.h"
-#include "smu74_discrete.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "oss/oss_3_0_d.h"
-#include "gca/gfx_8_0_d.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
-#include "smu7_ppsmc.h"
-#include "smu7_smumgr.h"
-
-#define POLARIS10_SMC_SIZE 0x20000
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VDDC_VDDCI_DELTA 200
-#define MC_CG_ARB_FREQ_F1 0x0b
-
-static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
- { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
- { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
-};
-
-static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
- {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
- {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
- {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
- {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
- {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
-
-static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table =
- &hwmgr->thermal_controller.advanceFanControlParameters;
- int i, j, k;
- const uint16_t *pdef1;
- const uint16_t *pdef2;
-
- table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
- table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",
- );
-
- table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
-
- pdef1 = defaults->BAPMTI_R;
- pdef2 = defaults->BAPMTI_RC;
-
- for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU74_DTE_SOURCES; j++) {
- for (k = 0; k < SMU74_DTE_SINKS; k++) {
- table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
- table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- smu_data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- smu_data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
-/* TO DO move to hwmgr */
- if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
- || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- if (polaris10_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
-
- if (polaris10_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
-
- if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- if (0 != polaris10_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- if (polaris10_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- if (polaris10_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count, level;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- count = data->mvdd_voltage_table.count;
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; level++) {
- table->SmioTable2.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[level].Smio =
- (uint8_t) level;
- table->Smio[level] |=
- data->mvdd_voltage_table.entries[level].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
- }
-
- return 0;
-}
-
-static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count, level;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- count = data->vddci_voltage_table.count;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- if (count > SMU_MAX_SMIO_LEVELS)
- count = SMU_MAX_SMIO_LEVELS;
- for (level = 0; level < count; ++level) {
- table->SmioTable1.Pattern[level].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
- table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
-
- table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
-
- return 0;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for (count = 0; count < lookup_table->count; count++) {
- index = phm_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- polaris10_populate_smc_vddci_table(hwmgr, table);
- polaris10_populate_smc_mvdd_table(hwmgr, table);
- polaris10_populate_cac_table(hwmgr, table);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_Ulv *state)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker)
- state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
- else
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
-
-/* To Do move to hwmgr */
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-
-static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t i, ref_clk;
-
- struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
-
- ref_clk = smu7_get_xclk(hwmgr);
-
- if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
- table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
- return;
- }
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
- smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
-
- table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
- table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
- table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
-
- table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
- table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
- CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
- }
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, SMU_SclkSetting *sclk_setting)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct pp_atomctrl_clock_dividers_ai dividers;
- uint32_t ref_clock;
- uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
- uint8_t i;
- int result;
- uint64_t temp;
-
- sclk_setting->SclkFrequency = clock;
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
- if (result == 0) {
- sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
- sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
- sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
- sclk_setting->PllRange = dividers.ucSclkPllRange;
- sclk_setting->Sclk_slew_rate = 0x400;
- sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
- sclk_setting->Pcc_down_slew_rate = 0xffff;
- sclk_setting->SSc_En = dividers.ucSscEnable;
- sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
- sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
- sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
- return result;
- }
-
- ref_clock = smu7_get_xclk(hwmgr);
-
- for (i = 0; i < NUM_SCLK_RANGE; i++) {
- if (clock > smu_data->range_table[i].trans_lower_frequency
- && clock <= smu_data->range_table[i].trans_upper_frequency) {
- sclk_setting->PllRange = i;
- break;
- }
- }
-
- sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw_frac = temp & 0xffff;
-
- pcc_target_percent = 10; /* Hardcode 10% for now. */
- pcc_target_freq = clock - (clock * pcc_target_percent / 100);
- sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
-
- ss_target_percent = 2; /* Hardcode 2% for now. */
- sclk_setting->SSc_En = 0;
- if (ss_target_percent) {
- sclk_setting->SSc_En = 1;
- ss_target_freq = clock - (clock * ss_target_percent / 100);
- sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
- temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
- temp <<= 0x10;
- do_div(temp, ref_clock);
- sclk_setting->Fcw1_frac = temp & 0xffff;
- }
-
- return 0;
-}
-
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU74_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMU_SclkSetting curr_sclk_setting = { 0 };
-
- result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
-
- /* populate graphics levels */
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
-
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
- level->ActivityLevel = sclk_al_threshold;
-
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- if (data->update_up_hyst)
- level->UpHyst = (uint8_t)data->up_hyst;
- if (data->update_down_hyst)
- level->DownHyst = (uint8_t)data->down_hyst;
-
- level->SclkSetting = curr_sclk_setting;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
- return 0;
-}
-
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- struct SMU74_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
-
- result = polaris10_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SPLLShutdownSupport))
- smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
-
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- } else {
- while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0))
- hightest_pcie_level_enabled++;
-
- while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
- hightest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-
-static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- struct cgs_display_info info = {0, 0, NULL};
- uint32_t mclk_stutter_mode_threshold = 40000;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (table_info->vdd_dep_on_mclk) {
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->MclkFrequency = clock;
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- data->display_timing.num_existing_displays = info.display_count;
-
- if (mclk_stutter_mode_threshold &&
- (clock <= mclk_stutter_mode_threshold) &&
- (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
- SMU74_MAX_LEVELS_MEMORY;
- struct SMU74_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = polaris10_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (i == dpm_table->mclk_table.count - 1) {
- levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
- levels[i].EnabledForActivity = 1;
- }
- if (result)
- return result;
- }
-
- /* In order to prevent MC activity from stutter mode to push DPM up,
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not affected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = 0x1f;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
- /* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, SMC_RAM_END);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- uint32_t sclk_frequency;
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- sclk_frequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",
- );
-
- result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
- PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- table->ACPILevel.DeepSleepDivId = 0;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
- CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
-
-
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
- result = polaris10_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",
- );
-
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
- table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- table->MemoryACPILevel.StutterEnable = false;
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-
- table->VceLevel[count].MinVoltage |=
- (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burst_time;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burst_time;
-
- return 0;
-}
-
-static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
- result = polaris10_populate_memory_timing_parameters(hwmgr,
- hw_data->dpm_table.sclk_table.dpm_levels[i].value,
- hw_data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result == 0)
- result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
- if (result != 0)
- return result;
- }
- }
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU74_Discrete_MCArbDramTimingTable),
- SMC_RAM_END);
- return result;
-}
-
-static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t vddci;
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
- vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
- vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
- else
- vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
- table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
- }
-
- return result;
-}
-
-static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
-
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_sclk->entries[level].clk >=
- hw_data->vbios_boot_state.sclk_bootup_value) {
- smu_data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if (table_info->vdd_dep_on_mclk->entries[level].clk >=
- hw_data->vbios_boot_state.mclk_bootup_value) {
- smu_data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-
-static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (67 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
-
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
- } else {
- min = 1100;
- max = 2100;
- }
-
- ro = efuse * (max - min) / 255 + min;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (hwmgr->chip_id == CHIP_POLARIS10) {
- volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
- (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
- volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
- (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
- } else {
- volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
- (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
- volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
- (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
- }
-
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
-
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU74_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",
- );
- }
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
- offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
- SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- int result = 0;
- struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
- AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
- AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
- uint32_t tmp, i;
-
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
-
- if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return result;
-
- result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
-
- if (0 == result) {
- table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
- table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
- table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
- table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
- table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
- table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
- table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
- table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
- table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
- table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
- table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
- table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
- table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
- table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
- table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
- table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
- AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
- AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
- AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
- AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
- AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
- AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
- AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
-
- for (i = 0; i < NUM_VFT_COLUMNS; i++) {
- AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
- AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
- }
-
- result = smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
- &tmp, SMC_RAM_END);
-
- smu7_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_meanNsigma,
- sizeof(AVFS_meanNsigma_t),
- SMC_RAM_END);
-
- result = smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
- &tmp, SMC_RAM_END);
- smu7_copy_bytes_to_smc(smumgr,
- tmp,
- (uint8_t *)&AVFS_SclkOffset,
- sizeof(AVFS_Sclk_Offset_t),
- SMC_RAM_END);
-
- data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
- (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
- data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
- }
- return result;
-}
-
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &polaris10_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
-
-}
-
-static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct SMU74_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
- pp_atomctrl_clock_dividers_vi dividers;
-
- polaris10_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
- polaris10_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (hw_data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = polaris10_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
- }
-
- result = polaris10_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = polaris10_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = polaris10_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = polaris10_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = polaris10_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = polaris10_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = polaris10_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = polaris10_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = polaris10_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = polaris10_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = polaris10_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- result = polaris10_populate_avfs_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
-
- table->CurrSclkPllRange = 0xff;
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = polaris10_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
- & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
- && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- /* Populate BIF_SCLK levels into SMC DPM table */
- for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
- PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
-
- if (i == 0)
- table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- else
- table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
- }
-
- for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
- SMC_RAM_END);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- result = polaris10_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload arb data to SMC memory!", return result);
-
- result = polaris10_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate PM fuses to SMC memory!", return result);
-
- polaris10_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return polaris10_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
-{
- int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
- return 0;
-
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
-
- ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
- 0 : -1;
-
- if (!ret)
- /* If this param is not changed, this function could fire unnecessarily */
- smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
-
- return ret;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (smu_data->smu7_data.fan_table_start == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (duty100 == 0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
- usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->
- thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->
- thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
- thermal_controller.advanceFanControlParameters.ulCycleDelay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
- hwmgr->device, CGS_IND_REG__SMC,
- CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanMinPwm,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
-
- if (!res && hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanSclkTarget,
- hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
-
- if (res)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- return 0;
-}
-
-static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
- UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- else
- smu_data->smc_state_table.VceBootLevel = 0;
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-
-static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- int max_entry, i;
-
- max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU74_MAX_LEVELS_LINK :
- pcie_table->count;
- /* Setup BIF_SCLK levels */
- for (i = 0; i < max_entry; i++)
- smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
- return 0;
-}
-
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- polaris10_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- polaris10_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- polaris10_update_samu_smc_table(hwmgr);
- break;
- case SMU_BIF_TABLE:
- polaris10_update_bif_smc_table(hwmgr);
- default:
- break;
- }
- return 0;
-}
-
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to update SCLK threshold!", return result);
-
- result = polaris10_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters!",
- );
-
- return result;
-}
-
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU74_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU74_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU74_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t polaris10_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU74_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU74_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU74_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU74_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU74_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU74_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU74_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU74_MAX_LEVELS_MVDD;
- case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
- return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
- }
-
- pr_warn("can't get the mac of %x\n", value);
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (0 == result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU74_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU74_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
- SMU74_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 75f43dadc56b..bd6be7793ca7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -35,13 +35,47 @@
#include "gca/gfx_8_0_d.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-#include "polaris10_pwrvirus.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
-#include "polaris10_smc.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
+#include "smu7_dyn_defaults.h"
+
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VDDC_VDDCI_DELTA 200
+#define MC_CG_ARB_FREQ_F1 0x0b
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = {
+ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
+ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
+
#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
@@ -60,46 +94,13 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
-{
- int i;
- int result = -EINVAL;
- uint32_t reg, data;
-
- const PWR_Command_Table *pvirus = pwr_virus_table;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
-
- for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
- switch (pvirus->command) {
- case PwrCmdWrite:
- reg = pvirus->reg;
- data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
- break;
-
- case PwrCmdEnd:
- result = 0;
- break;
-
- default:
- pr_info("Table Exit with Invalid Command!");
- smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -EINVAL;
- break;
- }
- pvirus++;
- }
-
- return result;
-}
-
-static int polaris10_perform_btc(struct pp_smumgr *smumgr)
+static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -107,16 +108,16 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
if (smu_data->avfs.avfs_btc_param > 1) {
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
}
return result;
}
-static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -127,7 +128,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_size = sizeof(avfs_graphics_level_polaris10);
u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
&dpm_table_start, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -138,14 +139,14 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address,
(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
return -1);
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_graphics_level_polaris10),
graphics_level_size, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -153,7 +154,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
return -1);
@@ -162,7 +163,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1);
@@ -172,9 +173,9 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
static int
-polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -183,20 +184,20 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
return -EINVAL);
if (smu_data->avfs.avfs_btc_param > 1) {
pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == smu7_setup_pwr_virus(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
return -EINVAL);
}
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
return -EINVAL);
smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
@@ -215,146 +216,146 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
return 0;
}
-static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+ /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/* Call Test SMU message with 0x20000 offset to trigger SMU start */
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait done bit to be set */
/* Check pass/failed indicator */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS))
PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu(struct pp_smumgr *smumgr)
+static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
SMU_VFT_INTACT = false;
- smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
- smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+ smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+ smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */
if (smu_data->protected_mode == 0) {
- result = polaris10_start_smu_in_non_protection_mode(smumgr);
+ result = polaris10_start_smu_in_non_protection_mode(hwmgr);
} else {
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
/* If failed, try with different security Key. */
if (result != 0) {
smu_data->smu7_data.security_hard_key ^= 1;
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
}
}
if (result != 0)
PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
- polaris10_avfs_event_mgr(smumgr, true);
+ polaris10_avfs_event_mgr(hwmgr, true);
} else
SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
- polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
+ polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
- smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+ smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
&(smu_data->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse;
- efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
efuse &= 0x00000001;
if (efuse)
return true;
@@ -362,7 +363,7 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
return false;
}
-static int polaris10_smu_init(struct pp_smumgr *smumgr)
+static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data;
int i;
@@ -371,9 +372,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
if (smu_data == NULL)
return -ENOMEM;
- smumgr->backend = smu_data;
+ hwmgr->smu_backend = smu_data;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
@@ -382,6 +383,2195 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ *voltage = *mvdd = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table =
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ int i, j, k;
+ const uint16_t *pdef1;
+ const uint16_t *pdef2;
+
+ table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+ table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+ pdef1 = defaults->BAPMTI_R;
+ pdef2 = defaults->BAPMTI_RC;
+
+ for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU74_DTE_SINKS; k++) {
+ table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+ table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ smu_data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ smu_data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+/* TO DO move to hwmgr */
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+ if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ if (0 != polaris10_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count, level;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ count = data->mvdd_voltage_table.count;
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; level++) {
+ table->SmioTable2.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[level].Smio =
+ (uint8_t) level;
+ table->Smio[level] |=
+ data->mvdd_voltage_table.entries[level].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count, level;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ count = data->vddci_voltage_table.count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; ++level) {
+ table->SmioTable1.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+ return 0;
+}
+
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ polaris10_populate_smc_vddci_table(hwmgr, table);
+ polaris10_populate_smc_mvdd_table(hwmgr, table);
+ polaris10_populate_cac_table(hwmgr, table);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_Ulv *state)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
+ state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
+ else
+ state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+
+/* To Do move to hwmgr */
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t i, ref_clk;
+
+ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+ ref_clk = smu7_get_xclk(hwmgr);
+
+ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+ table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+ return;
+ }
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ smu_data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+ smu_data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+}
+
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_clock_dividers_ai dividers;
+ uint32_t ref_clock;
+ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+ uint8_t i;
+ int result;
+ uint64_t temp;
+
+ sclk_setting->SclkFrequency = clock;
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
+ if (result == 0) {
+ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+ sclk_setting->PllRange = dividers.ucSclkPllRange;
+ sclk_setting->Sclk_slew_rate = 0x400;
+ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+ sclk_setting->Pcc_down_slew_rate = 0xffff;
+ sclk_setting->SSc_En = dividers.ucSscEnable;
+ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+ return result;
+ }
+
+ ref_clock = smu7_get_xclk(hwmgr);
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ if (clock > smu_data->range_table[i].trans_lower_frequency
+ && clock <= smu_data->range_table[i].trans_upper_frequency) {
+ sclk_setting->PllRange = i;
+ break;
+ }
+ }
+
+ sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw_frac = temp & 0xffff;
+
+ pcc_target_percent = 10; /* Hardcode 10% for now. */
+ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+ sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+ ss_target_percent = 2; /* Hardcode 2% for now. */
+ sclk_setting->SSc_En = 0;
+ if (ss_target_percent) {
+ sclk_setting->SSc_En = 1;
+ ss_target_freq = clock - (clock * ss_target_percent / 100);
+ sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw1_frac = temp & 0xffff;
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU74_Discrete_GraphicsLevel *level)
+{
+ int result;
+ /* PP_Clocks minClocks; */
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMU_SclkSetting curr_sclk_setting = { 0 };
+
+ result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+ /* populate graphics levels */
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ &level->MinVoltage, &mvdd);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+ level->ActivityLevel = sclk_al_threshold;
+
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ if (data->update_up_hyst)
+ level->UpHyst = (uint8_t)data->up_hyst;
+ if (data->update_down_hyst)
+ level->DownHyst = (uint8_t)data->down_hyst;
+
+ level->SclkSetting = curr_sclk_setting;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+ return 0;
+}
+
+static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ polaris10_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table));
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+ result = polaris10_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport))
+ smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ struct cgs_display_info info = {0, 0, NULL};
+ uint32_t mclk_stutter_mode_threshold = 40000;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ &mem_level->MinVoltage, &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->MclkFrequency = clock;
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if (mclk_stutter_mode_threshold &&
+ (clock <= mclk_stutter_mode_threshold) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+ SMU74_MAX_LEVELS_MEMORY;
+ struct SMU74_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = polaris10_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (i == dpm_table->mclk_table.count - 1) {
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ levels[i].EnabledForActivity = 1;
+ }
+ if (result)
+ return result;
+ }
+
+ /* In order to prevent MC activity from stutter mode to push DPM up,
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not affected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = 0x1f;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ /* level count will send to smc once at init smc table and never change */
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, SMC_RAM_END);
+
+ return result;
+}
+
+static int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint32_t sclk_frequency;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
+ result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
+ PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ table->ACPILevel.DeepSleepDivId = 0;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
+
+ us_mvdd = 0;
+ if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!polaris10_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ table->MemoryACPILevel.StutterEnable = false;
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+ table->VceLevel[count].MinVoltage |=
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burst_time;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burst_time;
+
+ return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) {
+ result = polaris10_populate_memory_timing_parameters(hwmgr,
+ hw_data->dpm_table.sclk_table.dpm_levels[i].value,
+ hw_data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result == 0)
+ result = atomctrl_set_ac_timing_ai(hwmgr, hw_data->dpm_table.mclk_table.dpm_levels[j].value, j);
+ if (result != 0)
+ return result;
+ }
+ }
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU74_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END);
+ return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+ }
+
+ return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ hw_data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ hw_data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (67 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
+
+ ro = efuse * (max - min) / 255 + min;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, smu_data->smu7_data.soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, SMC_RAM_END);
+
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ SMC_RAM_END);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, SMC_RAM_END);
+ smu7_copy_bytes_to_smc(hwmgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ SMC_RAM_END);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &polaris10_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkSetting.SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ pp_atomctrl_clock_dividers_vi dividers;
+
+ polaris10_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control)
+ polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (hw_data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = polaris10_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT);
+ }
+
+ result = polaris10_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = polaris10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = polaris10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = polaris10_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = polaris10_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = polaris10_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = polaris10_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = polaris10_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = polaris10_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = polaris10_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+ table->CurrSclkPllRange = 0xff;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = polaris10_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+ & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+ && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ /* Populate BIF_SCLK levels into SMC DPM table */
+ for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) {
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, smu_data->bif_sclk_table[i], &dividers);
+ PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+ if (i == 0)
+ table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ else
+ table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ }
+
+ for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+ SMC_RAM_END);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ result = polaris10_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload arb data to SMC memory!", return result);
+
+ result = polaris10_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ polaris10_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return polaris10_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+ ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
+ 0 : -1;
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (smu_data->smu7_data.fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable,
+ UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ smu_data->smc_state_table.VceBootLevel = 0;
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+
+static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ int max_entry, i;
+
+ max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+ SMU74_MAX_LEVELS_LINK :
+ pcie_table->count;
+ /* Setup BIF_SCLK levels */
+ for (i = 0; i < max_entry; i++)
+ smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+ return 0;
+}
+
+static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ polaris10_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ polaris10_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ polaris10_update_samu_smc_table(hwmgr);
+ break;
+ case SMU_BIF_TABLE:
+ polaris10_update_bif_smc_table(hwmgr);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to update SCLK threshold!", return result);
+
+ result = polaris10_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU74_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU74_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU74_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU74_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t polaris10_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU74_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU74_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU74_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU74_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU74_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU74_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU74_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU74_MAX_LEVELS_MVDD;
+ case SMU_UVD_MCLK_HANDSHAKE_DISABLE:
+ return SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ }
+
+ pr_warn("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
const struct pp_smumgr_func polaris10_smu_funcs = {
.smu_init = polaris10_smu_init,
.smu_fini = smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
index ce0a30388ea1..b98ade676d12 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
@@ -48,20 +48,20 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010028
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -69,97 +69,97 @@ bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
return false;
}
-static uint32_t rv_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
-int rv_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
-int rv_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -167,16 +167,16 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL;);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -188,11 +188,11 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
return 0;
}
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -204,17 +204,17 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -223,15 +223,15 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
+static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
&smc_driver_if_version),
"Attempt to read SMC IF Version Number Failed!",
return -EINVAL);
@@ -243,9 +243,9 @@ static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
}
/* sdma is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerUpSdma),
"Attempt to power up sdma Failed!",
return -EINVAL);
@@ -253,9 +253,9 @@ static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerDownSdma),
"Attempt to power down sdma Failed!",
return -EINVAL);
@@ -264,9 +264,9 @@ static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
}
/* vcn is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0),
"Attempt to power up vcn Failed!",
return -EINVAL);
@@ -274,9 +274,9 @@ static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0),
"Attempt to power down vcn Failed!",
return -EINVAL);
@@ -284,38 +284,38 @@ static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smu_fini(struct pp_smumgr *smumgr)
+static int rv_smu_fini(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
if (priv) {
- rv_smc_disable_sdma(smumgr);
- rv_smc_disable_vcn(smumgr);
- cgs_free_gpu_mem(smumgr->device,
+ rv_smc_disable_sdma(hwmgr);
+ rv_smc_disable_vcn(hwmgr);
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[CLOCKTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int rv_start_smu(struct pp_smumgr *smumgr)
+static int rv_start_smu(struct pp_hwmgr *hwmgr)
{
- if (rv_verify_smc_interface(smumgr))
+ if (rv_verify_smc_interface(hwmgr))
return -EINVAL;
- if (rv_smc_enable_sdma(smumgr))
+ if (rv_smc_enable_sdma(hwmgr))
return -EINVAL;
- if (rv_smc_enable_vcn(smumgr))
+ if (rv_smc_enable_vcn(hwmgr))
return -EINVAL;
return 0;
}
-static int rv_smu_init(struct pp_smumgr *smumgr)
+static int rv_smu_init(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv;
uint64_t mc_addr;
@@ -327,10 +327,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[WMTABLE].version = 0x01;
@@ -355,7 +355,7 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(DpmClocks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -365,10 +365,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for CLOCKTABLE.",
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
index 262c8ded87c0..58888400f1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
@@ -51,11 +51,11 @@ struct rv_smumgr {
struct smu_table_array smu_tables;
};
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr);
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index c49a6f22002f..7f5359a97ef2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -25,27 +25,28 @@
#include "pp_debug.h"
#include "smumgr.h"
#include "smu_ucode_xfer_vi.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
#include "ppatomctrl.h"
#include "cgs_common.h"
#include "smu7_ppsmc.h"
#include "smu7_smumgr.h"
+#include "smu7_common.h"
+
+#include "polaris10_pwrvirus.h"
#define SMU7_SMC_SIZE 0x20000
-static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
return 0;
}
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
{
uint32_t data;
uint32_t addr;
@@ -59,7 +60,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
addr = smc_start_address;
while (byte_count >= 4) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*dest = PP_SMC_TO_HOST_UL(data);
@@ -69,7 +70,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
if (byte_count) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*pdata = PP_SMC_TO_HOST_UL(data);
/* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
dest_byte = (uint8_t *)dest;
@@ -81,7 +82,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
int result;
@@ -99,12 +100,12 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
/* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
src += 4;
byte_count -= 4;
@@ -115,13 +116,13 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data = 0;
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
extra_shift = 8 * (4 - byte_count);
@@ -135,53 +136,53 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data |= (original_data & ~((~0UL) << extra_shift));
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
}
return 0;
}
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
{
static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
- smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+ smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
return 0;
}
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int ret;
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send pre message %x ret is %d \n", msg, ret);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send message %x ret is %d \n", msg, ret);
@@ -189,53 +190,53 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
return 0;
}
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
return 0;
}
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
return -EINVAL;
}
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc(smumgr, msg);
+ return smu7_send_msg_to_smc(hwmgr, msg);
}
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+ return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
}
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+ if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
pr_info("Failed to send Message.\n");
return 0;
}
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
{
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
return 0;
}
@@ -289,29 +290,29 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
}
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
return 0;
}
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
return 0;
}
@@ -354,14 +355,14 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
return result;
}
-static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
uint32_t fw_type,
struct SMU_Entry *entry)
{
int result = 0;
struct cgs_firmware_info info = {0};
- result = cgs_get_firmware_info(smumgr->device,
+ result = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(fw_type),
&info);
@@ -374,7 +375,7 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
entry->meta_data_addr_low = 0;
/* digest need be excluded out */
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
@@ -389,30 +390,30 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
return 0;
}
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
if (smu_data->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
0x0);
- if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
- if (!cgs_is_virtualization_enabled(smumgr->device)) {
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+ if (!cgs_is_virtualization_enabled(hwmgr->device)) {
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
smu_data->smu_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
smu_data->smu_buffer.mc_addr_low);
}
@@ -439,122 +440,162 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
toc->num_entries = 0;
toc->structure_version = 1;
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ if (cgs_is_virtualization_enabled(hwmgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
- if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
return result;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
uint32_t ret;
- ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
fw_mask, fw_mask);
-
return ret;
}
-int smu7_reload_firmware(struct pp_smumgr *smumgr)
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
{
- return smumgr->smumgr_funcs->start_smu(smumgr);
+ return hwmgr->smumgr_funcs->start_smu(hwmgr);
}
-static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
{
uint32_t byte_count = length;
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
for (; byte_count >= 4; byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
- PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
return 0;
}
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct cgs_firmware_info info = {0};
if (smu_data->security_hard_key == 1)
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
else
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
- smumgr->is_kicker = info.is_kicker;
+ hwmgr->is_kicker = info.is_kicker;
- result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+ result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
}
-int smu7_init(struct pp_smumgr *smumgr)
+static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
+{
+ int i;
+ uint32_t reg, data;
+
+ for (i = 0; i < size; i++) {
+ reg = pvirus->reg;
+ data = pvirus->data;
+ if (reg != 0xffffffff)
+ cgs_write_register(hwmgr->device, reg, data);
+ else
+ break;
+ pvirus++;
+ }
+}
+
+static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
+{
+ int i;
+
+ cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
+ cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
+ cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
+ for (i = 0; i < section->dfy_size; i++)
+ cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
+}
+
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
+{
+ execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
+ execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
+ execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
+
+ return 0;
+}
+
+int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
uint8_t *internal_buf;
uint64_t mc_addr = 0;
/* Allocate memory for backend private data */
- smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -568,16 +609,16 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != smu_data->header),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->header_buffer.handle);
return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
smu_data->smu_buffer.data_size = 200*4096;
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -591,12 +632,12 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != internal_buf),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->smu_buffer.handle);
return -EINVAL);
- if (smum_is_hw_avfs_present(smumgr))
+ if (smum_is_hw_avfs_present(hwmgr))
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
else
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
@@ -605,12 +646,10 @@ int smu7_init(struct pp_smumgr *smumgr)
}
-int smu7_smu_fini(struct pp_smumgr *smumgr)
+int smu7_smu_fini(struct pp_hwmgr *hwmgr)
{
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index ee5e32d2921e..c87263bc0caa 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -60,32 +60,34 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
uint32_t *dest, uint32_t byte_count, uint32_t limit);
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t *value, uint32_t limit);
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t value, uint32_t limit);
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
-int smu7_reload_firmware(struct pp_smumgr *smumgr);
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
-int smu7_init(struct pp_smumgr *smumgr);
-int smu7_smu_fini(struct pp_smumgr *smumgr);
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr);
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr);
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr);
+int smu7_init(struct pp_hwmgr *hwmgr);
+int smu7_smu_fini(struct pp_hwmgr *hwmgr);
-#endif \ No newline at end of file
+int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 3bdf6478de7f..867388456530 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <drm/amdgpu_drm.h>
-#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
@@ -46,88 +45,18 @@ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
-int smum_early_init(struct pp_instance *handle)
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
- if (smumgr == NULL)
- return -ENOMEM;
-
- smumgr->device = handle->device;
- smumgr->chip_family = handle->chip_family;
- smumgr->chip_id = handle->chip_id;
- smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
- smumgr->reload_fw = 1;
- handle->smu_mgr = smumgr;
-
- switch (smumgr->chip_family) {
- case AMDGPU_FAMILY_CZ:
- smumgr->smumgr_funcs = &cz_smu_funcs;
- break;
- case AMDGPU_FAMILY_VI:
- switch (smumgr->chip_id) {
- case CHIP_TOPAZ:
- smumgr->smumgr_funcs = &iceland_smu_funcs;
- break;
- case CHIP_TONGA:
- smumgr->smumgr_funcs = &tonga_smu_funcs;
- break;
- case CHIP_FIJI:
- smumgr->smumgr_funcs = &fiji_smu_funcs;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- smumgr->smumgr_funcs = &polaris10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_AI:
- switch (smumgr->chip_id) {
- case CHIP_VEGA10:
- smumgr->smumgr_funcs = &vega10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_RV:
- switch (smumgr->chip_id) {
- case CHIP_RAVEN:
- smumgr->smumgr_funcs = &rv_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- kfree(smumgr);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
- return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_avfs_enable)
+ return hwmgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
return 0;
}
-int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
- return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_setup_fan_table)
+ return hwmgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
return 0;
}
@@ -135,8 +64,8 @@ int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
- return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->update_sclk_threshold)
+ return hwmgr->smumgr_funcs->update_sclk_threshold(hwmgr);
return 0;
}
@@ -144,163 +73,75 @@ int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
- return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+ if (NULL != hwmgr->smumgr_funcs->update_smc_table)
+ return hwmgr->smumgr_funcs->update_smc_table(hwmgr, type);
return 0;
}
-uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member)
{
- if (NULL != smumgr->smumgr_funcs->get_offsetof)
- return smumgr->smumgr_funcs->get_offsetof(type, member);
+ if (NULL != hwmgr->smumgr_funcs->get_offsetof)
+ return hwmgr->smumgr_funcs->get_offsetof(type, member);
return 0;
}
int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
- return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->process_firmware_header)
+ return hwmgr->smumgr_funcs->process_firmware_header(hwmgr);
return 0;
}
-int smum_get_argument(struct pp_smumgr *smumgr)
+int smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->get_argument)
- return smumgr->smumgr_funcs->get_argument(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->get_argument)
+ return hwmgr->smumgr_funcs->get_argument(hwmgr);
return 0;
}
-uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
{
- if (NULL != smumgr->smumgr_funcs->get_mac_definition)
- return smumgr->smumgr_funcs->get_mac_definition(value);
+ if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
+ return hwmgr->smumgr_funcs->get_mac_definition(value);
return 0;
}
-int smum_download_powerplay_table(struct pp_smumgr *smumgr,
- void **table)
+int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table)
{
- if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
- return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
+ if (NULL != hwmgr->smumgr_funcs->download_pptable_settings)
+ return hwmgr->smumgr_funcs->download_pptable_settings(hwmgr,
table);
return 0;
}
-int smum_upload_powerplay_table(struct pp_smumgr *smumgr)
+int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->upload_pptable_settings)
- return smumgr->smumgr_funcs->upload_pptable_settings(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->upload_pptable_settings)
+ return hwmgr->smumgr_funcs->upload_pptable_settings(hwmgr);
return 0;
}
-int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL)
+ if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg);
+ return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
}
-int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL ||
- smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
- return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter(
- smumgr, msg, parameter);
-}
-
-/*
- * Returns once the part of the register indicated by the mask has
- * reached the given value.
- */
-int smum_wait_on_register(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device, index);
- if ((cur_value & mask) == (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL)
+ if (hwmgr == NULL ||
+ hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device,
- index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-
-/*
- * Returns once the part of the register indicated by the mask
- * has reached the given value.The indirect space is described by
- * giving the memory-mapped index of the indirect index register.
- */
-int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(smumgr->device, indirect_port, index);
- return smum_wait_on_register(smumgr, indirect_port + 1,
- mask, value);
-}
-
-void smum_wait_for_indirect_register_unequal(
- struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return;
- cgs_write_register(smumgr->device, indirect_port, index);
- smum_wait_for_register_unequal(smumgr, indirect_port + 1,
- value, mask);
+ return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+ hwmgr, msg, parameter);
}
int smu_allocate_memory(void *device, uint32_t size,
@@ -316,7 +157,7 @@ int smu_allocate_memory(void *device, uint32_t size,
return -EINVAL;
ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
- 0, 0, (cgs_handle_t *)handle);
+ (cgs_handle_t *)handle);
if (ret)
return -ENOMEM;
@@ -356,24 +197,24 @@ int smu_free_memory(void *device, void *handle)
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
- return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->init_smc_table)
+ return hwmgr->smumgr_funcs->init_smc_table(hwmgr);
return 0;
}
int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
return 0;
}
int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_memory_levels)
+ return hwmgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
return 0;
}
@@ -381,16 +222,16 @@ int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
/*this interface is needed by island ci/vi */
int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
- return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->initialize_mc_reg_table)
+ return hwmgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
return 0;
}
bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
- return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->is_dpm_running)
+ return hwmgr->smumgr_funcs->is_dpm_running(hwmgr);
return true;
}
@@ -398,17 +239,17 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
- if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels(
+ if (hwmgr->smumgr_funcs->populate_requested_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_requested_graphic_levels(
hwmgr, request);
return 0;
}
-bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr)
+bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
- if (smumgr->smumgr_funcs->is_hw_avfs_present)
- return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr);
+ if (hwmgr->smumgr_funcs->is_hw_avfs_present)
+ return hwmgr->smumgr_funcs->is_hw_avfs_present(hwmgr);
return false;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
deleted file mode 100644
index 65d3a4893958..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ /dev/null
@@ -1,3275 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- *
- */
-
-#include "pp_debug.h"
-#include "tonga_smc.h"
-#include "smu7_dyn_defaults.h"
-
-#include "smu7_hwmgr.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "cgs_common.h"
-#include "atombios.h"
-#include "tonga_smumgr.h"
-#include "pppcielanes.h"
-#include "pp_endian.h"
-#include "smu7_ppsmc.h"
-
-#include "smu72_discrete.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define VDDC_VDDCI_DELTA 200
-
-
-static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
-/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
- * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
- */
- {1, 0xF, 0xFD, 0x19,
- 5, 45, 0, 0xB0000,
- {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
- 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
- {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
- 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
- },
-};
-
-/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
- {600, 1050, 3, 0},
- {600, 1050, 6, 1}
-};
-
-/* [FF, SS] type, [] 4 voltage ranges,
- * and [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
- { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
-};
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
-static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
- {0, 1, 3, 2, 4, 5},
- {0, 2, 4, 5, 6, 5}
-};
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-
-
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- voltage->VddGfx = phm_get_voltage_index(
- pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i].vddgfx);
- voltage->Vddc = phm_get_voltage_index(
- pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i].vddc);
-
- if (allowed_clock_voltage_table->entries[i].vddci)
- voltage->Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
- else
- voltage->Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
-
-
- if (allowed_clock_voltage_table->entries[i].mvdd)
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
-
- voltage->Phases = 1;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddgfx);
- voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
- allowed_clock_voltage_table->entries[i-1].vddc);
-
- if (allowed_clock_voltage_table->entries[i-1].vddci)
- voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
- allowed_clock_voltage_table->entries[i-1].vddci);
-
- if (allowed_clock_voltage_table->entries[i-1].mvdd)
- *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
-
- return 0;
-}
-
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- table->VddcLevelCount = data->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- table->VddcTable[count] =
- PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
- }
- return 0;
-}
-
-/**
- * VddGfx table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
- for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
- table->VddGfxTable[count] =
- PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
- }
- return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- table->VddciLevelCount = data->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- table->SmioTable1.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
- table->SmioTable1.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->vddci_voltage_table.entries[count].smio_low;
- table->VddciTable[count] =
- PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
- }
- }
-
- table->SmioMask1 = data->vddci_voltage_table.mask_low;
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
-
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t count;
-
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- table->SmioTable2.Pattern[count].Voltage =
- PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
- /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
- table->SmioTable2.Pattern[count].Smio =
- (uint8_t) count;
- table->Smio[count] |=
- data->mvdd_voltage_table.entries[count].smio_low;
- }
- table->SmioMask2 = data->mvdd_voltage_table.mask_low;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
- }
-
- return 0;
-}
-
-/**
- * Preparation of vddc and vddgfx CAC tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
- pptable_info->vddgfx_lookup_table;
- struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
- pptable_info->vddc_lookup_table;
-
- /* table is already swapped, so in order to use the value from it
- * we need to swap it back.
- */
- uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
- uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
-
- for (count = 0; count < vddc_level_count; count++) {
- /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
- index = phm_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddcVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddcVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
-
- if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
- /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
- for (count = 0; count < vddgfx_level_count; count++) {
- index = phm_get_voltage_index(vddgfx_lookup_table,
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
- }
- } else {
- for (count = 0; count < vddc_level_count; count++) {
- index = phm_get_voltage_index(vddc_lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddGfxVidLoSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
- table->BapmVddGfxVidHiSidd[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
- table->BapmVddGfxVidHiSidd2[count] =
- convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
- }
- }
-
- return 0;
-}
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-
-static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result;
-
- result = tonga_populate_smc_vddc_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDC voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDCI voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate VDDGFX voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_smc_mvdd_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate MVDD voltage table to SMC",
- return -EINVAL);
-
- result = tonga_populate_cac_tables(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU72_Discrete_Ulv *state)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
-
- state->VddcPhase = 1;
-
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
-
- return 0;
-}
-
-static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU72_Discrete_DpmTable *table)
-{
- return tonga_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t i;
-
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
- }
-
- smu_data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint16_t sclk_activity_level_threshold,
- SMU72_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- uint32_t mvdd;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
- /* populate graphics levels*/
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_sclk, engine_clock,
- &graphic_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((!result),
- "can not find VDDC voltage value for VDDC "
- "engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 0;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 0;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- data->display_timing.min_clock_in_sr =
- hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep))
- graphic_level->DeepSleepDivId =
- smu7_get_sleep_divider_id_from_clock(engine_clock,
- data->display_timing.min_clock_in_sr);
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (!result) {
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
- uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
- uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
-
- uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS;
-
- SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
-
- uint32_t i, max_entry;
- uint8_t highest_pcie_level_enabled = 0;
- uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
- uint8_t count = 0;
- int result = 0;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = tonga_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)smu_data->activity_target[i],
- &(smu_data->smc_state_table.GraphicsLevel[i]));
- if (result != 0)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now. */
- smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- smu_data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry) ? i : max_entry);
- }
- } else {
- if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
- pr_err("Pcie Dpm Enablemask is 0 !");
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(highest_pcie_level_enabled+1))) != 0)) {
- highest_pcie_level_enabled++;
- }
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<lowest_pcie_level_enabled)) == 0)) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
- count++;
- }
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
-
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++)
- smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
- (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int tonga_calculate_mclk_params(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dllStateOn
- )
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
- uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
- uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
- uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
- uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
- uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
- uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
- uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
- pp_atomctrl_memory_clock_param mpll_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_si(hwmgr,
- memory_clock, &mpll_param, strobe_mode);
- PP_ASSERT_WITH_CODE(
- !result,
- "Error retrieving Memory Clock Parameters from VBIOS.",
- return result);
-
- /* MPLL_FUNC_CNTL setup*/
- mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
- mpll_param.bw_ctrl);
-
- /* MPLL_FUNC_CNTL_1 setup*/
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKF,
- mpll_param.mpll_fb_divider.cl_kf);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKFRAC,
- mpll_param.mpll_fb_divider.clk_frac);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, VCO_MODE,
- mpll_param.vco_mode);
-
- /* MPLL_AD_FUNC_CNTL setup*/
- mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
- MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
- mpll_param.mpll_post_divider);
-
- if (data->is_memory_gddr5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_SEL,
- mpll_param.yclk_sel);
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
- mpll_param.mpll_post_divider);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
- pp_atomctrl_internal_ss_info ss_info;
- uint32_t freq_nom;
- uint32_t tmp;
- uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
- /* for GDDR5 for all modes and DDR3 */
- if (1 == mpll_param.qdr)
- freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
- /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
-
- if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
- uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
- uint32_t clkv =
- (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
- ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
- mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
- }
- }
-
- /* MCLK_PWRMGT_CNTL setup */
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
- bool strobe_mode)
-{
- uint8_t mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500)
- mc_para_index = 0x00;
- else if (memory_clock > 47500)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- } else {
- if (memory_clock < 65000)
- mc_para_index = 0x00;
- else if (memory_clock > 135000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
-
- return mc_para_index;
-}
-
-static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
- uint8_t mc_para_index;
-
- if (memory_clock < 10000)
- mc_para_index = 0;
- else if (memory_clock >= 80000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
-
- return mc_para_index;
-}
-
-
-static int tonga_populate_single_memory_level(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU72_Discrete_MemoryLevel *memory_level
- )
-{
- uint32_t mvdd = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
- bool dll_state_on;
- struct cgs_display_info info = {0};
- uint32_t mclk_edc_wr_enable_threshold = 40000;
- uint32_t mclk_stutter_mode_threshold = 30000;
- uint32_t mclk_edc_enable_threshold = 40000;
- uint32_t mclk_strobe_mode_threshold = 40000;
-
- if (NULL != pptable_info->vdd_dep_on_mclk) {
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
- pptable_info->vdd_dep_on_mclk,
- memory_clock,
- &memory_level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE(
- !result,
- "can not find MinVddc voltage value from memory VDDC "
- "voltage dependency table",
- return result);
- }
-
- if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
- memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
- else
- memory_level->MinMvdd = mvdd;
-
- memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 0;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
-
- /* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- memory_level->StutterEnable = 0;
- memory_level->StrobeEnable = 0;
- memory_level->EdcReadEnable = 0;
- memory_level->EdcWriteEnable = 0;
- memory_level->RttEnable = 0;
-
- /* default set to low watermark. Highest level will be set to high later.*/
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
-
- if ((mclk_stutter_mode_threshold != 0) &&
- (memory_clock <= mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled)
- && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
- && (data->display_timing.num_existing_displays <= 2)
- && (data->display_timing.num_existing_displays != 0))
- memory_level->StutterEnable = 1;
-
- /* decide strobe mode*/
- memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
- (memory_clock <= mclk_strobe_mode_threshold);
-
- /* decide EDC mode and memory clock ratio*/
- if (data->is_memory_gddr5) {
- memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
- memory_level->StrobeEnable);
-
- if ((mclk_edc_enable_threshold != 0) &&
- (memory_clock > mclk_edc_enable_threshold)) {
- memory_level->EdcReadEnable = 1;
- }
-
- if ((mclk_edc_wr_enable_threshold != 0) &&
- (memory_clock > mclk_edc_wr_enable_threshold)) {
- memory_level->EdcWriteEnable = 1;
- }
-
- if (memory_level->StrobeEnable) {
- if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
- ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- } else {
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
- }
-
- } else {
- dll_state_on = data->dll_default_on;
- }
- } else {
- memory_level->StrobeRatio =
- tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
- dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- }
-
- result = tonga_calculate_mclk_params(hwmgr,
- memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
- /* MCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
- /* Indicates maximum activity level for this performance level.*/
- CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
- }
-
- return result;
-}
-
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct smu7_dpm_table *dpm_table = &data->dpm_table;
- int result;
-
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_address =
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size =
- sizeof(SMU72_Discrete_MemoryLevel) *
- SMU72_MAX_LEVELS_MEMORY;
- SMU72_Discrete_MemoryLevel *levels =
- smu_data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = tonga_populate_single_memory_level(
- hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &(smu_data->smc_state_table.MemoryLevel[i]));
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now.*/
- smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
- smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
- smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
- smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
- SMC_RAM_END);
-
- return result;
-}
-
-static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pattern)
-{
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- /* Always round to higher voltage. */
- smio_pattern->Voltage =
- data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
-
- SMIO_Pattern voltage_level;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
- /* The ACPI state should not do DPM on DC (or ever).*/
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- table->ACPILevel.MinVoltage =
- smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
-
- /* assign zero for now*/
- table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* divider ID for required SCLK*/
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
-
- /* For various features to be enabled/disabled while this level is active.*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- /* SCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
- table->MemoryACPILevel.MinVoltage =
- smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
-
- /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
- if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- /* Force reset on DLL*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
- /* Disable DLL in ACPIState*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
- /* Enable DLL bypass signal*/
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK0_BYPASS, 0);
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK1_BYPASS, 0);
-
- table->MemoryACPILevel.DllCntl =
- PP_HOST_TO_SMC_UL(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl =
- PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
- table->MemoryACPILevel.MpllDqFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
- table->MemoryACPILevel.MpllSs1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
- table->MemoryACPILevel.MpllSs2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- /* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = 0;
- table->MemoryACPILevel.StrobeEnable = 0;
- table->MemoryACPILevel.EdcReadEnable = 0;
- table->MemoryACPILevel.EdcWriteEnable = 0;
- table->MemoryACPILevel.RttEnable = 0;
-
- return result;
-}
-
-static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->UvdLevelCount = (uint8_t) (mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->UvdLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->UvdLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->UvdLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(
- hwmgr,
- table->UvdLevel[count].VclkFrequency,
- &dividers);
-
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for Vclk clock",
- return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for Dclk clock",
- return result);
-
- table->UvdLevel[count].DclkDivider =
- (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- }
-
- return result;
-
-}
-
-static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
-
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->VceLevelCount = (uint8_t) (mm_table->count);
- table->VceBootLevel = 0;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency =
- mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->VceLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->VceLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->VceLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->AcpLevelCount = (uint8_t) (mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->AcpLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->AcpLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->AcpLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- uint8_t count;
- pp_atomctrl_clock_dividers_vi dividers;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *pptable_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- pptable_info->mm_dep_table;
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t) (mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].Frequency =
- pptable_info->mm_dep_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage.Vddc =
- phm_get_voltage_index(pptable_info->vddc_lookup_table,
- mm_table->entries[count].vddc);
- table->SamuLevel[count].MinVoltage.VddGfx =
- (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
- phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
- mm_table->entries[count].vddgfx) : 0;
- table->SamuLevel[count].MinVoltage.Vddci =
- phm_get_voltage_id(&data->vddci_voltage_table,
- mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
- table->SamuLevel[count].MinVoltage.Phases = 1;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((!result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- }
-
- return result;
-}
-
-static int tonga_populate_memory_timing_parameters(
- struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint32_t memory_clock,
- struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
- )
-{
- uint32_t dramTiming;
- uint32_t dramTiming2;
- uint32_t burstTime;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- engine_clock, memory_clock);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
- return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- int result = 0;
- SMU72_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
-
- memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = tonga_populate_memory_timing_parameters
- (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
-
- if (result)
- break;
- }
- }
-
- if (!result) {
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU72_Discrete_MCArbDramTimingTable),
- SMC_RAM_END
- );
- }
-
- return result;
-}
-
-static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table*/
- result = phm_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
-
- if (result != 0) {
- smu_data->smc_state_table.GraphicsBootLevel = 0;
- pr_err("[powerplay] VBIOS did not find boot engine "
- "clock value in dependency table. "
- "Using Graphics DPM level 0 !");
- result = 0;
- }
-
- result = phm_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
-
- if (result != 0) {
- smu_data->smc_state_table.MemoryBootLevel = 0;
- pr_err("[powerplay] VBIOS did not find boot "
- "engine clock value in dependency table."
- "Using Memory DPM level 0 !");
- result = 0;
- }
-
- table->BootVoltage.Vddc =
- phm_get_voltage_id(&(data->vddc_voltage_table),
- data->vbios_boot_state.vddc_bootup_value);
- table->BootVoltage.VddGfx =
- phm_get_voltage_id(&(data->vddgfx_voltage_table),
- data->vbios_boot_state.vddgfx_bootup_value);
- table->BootVoltage.Vddci =
- phm_get_voltage_id(&(data->vddci_voltage_table),
- data->vbios_boot_state.vddci_bootup_value);
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return result;
-}
-
-static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- uint32_t hw_revision, dev_id;
- struct cgs_system_info sys_info = {0};
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- sys_info.size = sizeof(struct cgs_system_info);
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- hw_revision = (uint32_t)sys_info.value;
-
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(hwmgr->device, &sys_info);
- dev_id = (uint32_t)sys_info.value;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
- volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
- (sclk_table->entries[i].clk/100) / 10000) * 1000 /
- (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
- volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
- (sclk_table->entries[i].clk/100) / 100000) * 1000 /
- (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
- } else {
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- }
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- tonga_clock_stretcher_lookup_table[stretch_amount2][0];
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- tonga_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
- GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (tonga_clock_stretch_amount_conversion
- [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
- cks_setting |= 0x2;
- if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
- cks_setting |= 0x1;
- }
- smu_data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
- * Populates the SMC VRConfig field in DPM table.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_DpmTable *table)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
- /* Splitted mode */
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= config;
- } else {
- pr_err("VDDC and VDDGFX should "
- "be both on SVI2 control in splitted mode !\n");
- }
- } else {
- /* Merged mode */
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- pr_err("VDDC should be on "
- "SVI2 control in merged mode !\n");
- }
- }
-
- /* Set Vddci Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
- }
-
- /* Set Mvdd Voltage Controller */
- if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-
-/**
- * Initialize the ARB DRAM timing table's index field.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- uint32_t tmp;
- int result;
-
- /*
- * This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = smu7_read_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
-
- if (result != 0)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return smu7_write_smc_sram_dword(smumgr,
- smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
-}
-
-
-static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- int i, j, k;
- const uint16_t *pdef1, *pdef2;
-
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 256));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range !",
- );
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
-
- dpm_table->BAPM_TEMP_GRADIENT =
- PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
- pdef1 = defaults->bapmti_r;
- pdef2 = defaults->bapmti_rc;
-
- for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU72_DTE_SOURCES; j++) {
- for (k = 0; k < SMU72_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] =
- PP_HOST_TO_SMC_US(*pdef1);
- dpm_table->BAPMTI_RC[i][j][k] =
- PP_HOST_TO_SMC_US(*pdef2);
- pdef1++;
- pdef2++;
- }
- }
- }
-
- return 0;
-}
-
-static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
-
- smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
- smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
- smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
- smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
- smu_data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->tdc_vddc_throttle_release_limit_perc;
- smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
-
- return 0;
-}
-
-static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
- uint32_t temp;
-
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 "
- "(SviLoadLineEn) from SMC Failed !",
- return -EINVAL);
- else
- smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
-
- return 0;
-}
-
-static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- if ((hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- smu_data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
-static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
- smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
-
- return 0;
-}
-
-static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed !",
- return -EINVAL);
-
- /* DW6 */
- if (tonga_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed !",
- return -EINVAL);
- /* DW7 */
- if (tonga_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed !",
- return -EINVAL);
- /* DW8 */
- if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl Failed !",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (tonga_populate_temperature_scaler(hwmgr) != 0)
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed !",
- return -EINVAL);
-
- /* DW13-DW14 */
- if (tonga_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan "
- "Control parameters Failed !",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (tonga_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed !",
- return -EINVAL);
-
- /* DW19 */
- if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML "
- "Min and Max Vid Failed !",
- return -EINVAL);
-
- /* DW20 */
- if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(
- false,
- "Attempt to populate BapmVddCBaseLeakage "
- "Hi and Lo Sidd Failed !",
- return -EINVAL);
-
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&smu_data->power_tune_table,
- sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed !",
- return -EINVAL);
- }
- return 0;
-}
-
-static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
- SMU72_Discrete_MCRegisters *mc_reg_table)
-{
- const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
-
- uint32_t i, j;
-
- for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
- if (smu_data->mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(
- i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
- "Index of mc_reg_table->address[] array "
- "out of boundary",
- return -EINVAL);
- mc_reg_table->address[i].s0 =
- PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (uint8_t)i;
-
- return 0;
-}
-
-/*convert register values from driver to SMC format */
-static void tonga_convert_mc_registers(
- const struct tonga_mc_reg_entry *entry,
- SMU72_Discrete_MCRegisterSet *data,
- uint32_t num_entries, uint32_t valid_flag)
-{
- uint32_t i, j;
-
- for (i = 0, j = 0; j < num_entries; j++) {
- if (valid_flag & 1<<j) {
- data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
- i++;
- }
- }
-}
-
-static int tonga_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
- const uint32_t memory_clock,
- SMU72_Discrete_MCRegisterSet *mc_reg_table_data
- )
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- uint32_t i = 0;
-
- for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
- if (memory_clock <=
- smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
- break;
- }
- }
-
- if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
- --i;
-
- tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, smu_data->mc_reg_table.last,
- smu_data->mc_reg_table.validflag);
-
- return 0;
-}
-
-static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU72_Discrete_MCRegisters *mc_regs)
-{
- int result = 0;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- int res;
- uint32_t i;
-
- for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = tonga_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
- data->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_regs->data[i]
- );
-
- if (0 != res)
- result = res;
- }
-
- return result;
-}
-
-static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- uint32_t address;
- int32_t result;
-
- if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
- return 0;
-
-
- memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
-
- if (result != 0)
- return result;
-
-
- address = smu_data->smu7_data.mc_reg_table_start +
- (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
-
- return smu7_copy_bytes_to_smc(
- hwmgr->smumgr, address,
- (uint8_t *)&smu_data->mc_regs.data[0],
- sizeof(SMU72_Discrete_MCRegisterSet) *
- data->dpm_table.mclk_table.count,
- SMC_RAM_END);
-}
-
-static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
-
- memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
- result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize MCRegTable for the MC register addresses !",
- return result;);
-
- result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize MCRegTable for driver state !",
- return result;);
-
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
- (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
-}
-
-static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- smu_data->power_tune_defaults =
- &tonga_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
-}
-
-static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- struct SMU72_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- unsigned min_level = 1;
-
- hwmgr->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
- hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
- hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Workaround compute SDMA instability: disable lowest SCLK
- * DPM level. Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (data->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- else
- min_level = 0;
- hwmgr->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
- hwmgr->default_compute_power_profile.up_hyst = 0;
- hwmgr->default_compute_power_profile.down_hyst = 5;
-
- hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
- hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- uint8_t i;
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
-
-
- memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
-
- tonga_initialize_power_tune_defaults(hwmgr);
-
- if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
- tonga_populate_smc_voltage_tables(hwmgr, table);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
-
- if (i == 1 || i == 0)
- table->SystemFlags |= 0x40;
-
- if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = tonga_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ULV state !",
- return result;);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, 0x40035);
- }
-
- result = tonga_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Link Level !", return result);
-
- result = tonga_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Graphics Level !", return result);
-
- result = tonga_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Memory Level !", return result);
-
- result = tonga_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ACPI Level !", return result);
-
- result = tonga_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize VCE Level !", return result);
-
- result = tonga_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize ACP Level !", return result);
-
- result = tonga_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize SAMU Level !", return result);
-
- /* Since only the initial state is completely set up at this
- * point (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = tonga_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to Write ARB settings for the initial state.",
- return result;);
-
- result = tonga_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize UVD Level !", return result);
-
- result = tonga_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to initialize Boot Level !", return result);
-
- tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate BAPM Parameters !", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = tonga_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate Clock Stretcher Data Table !",
- return result;);
- }
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- SMU7_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
-
- /*
- * Cail reads current link status and reports it as cap (we cannot
- * change this due to some previous issues we had)
- * SMC drops the link status to lowest level after enabling
- * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
- * but this time Cail reads current link status which was set to low by
- * SMC and reports it as cap to powerplay
- * To avoid it, we set PCIeBootLinkLevel to highest dpm level
- */
- PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
-
- table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
-
- table->PCIeGenInterval = 1;
-
- result = tonga_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to populate VRConfig setting !", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
-
- if (0) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr,
- THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
-
- table->ThermOutPolarity =
- (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
-
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal)){
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- }
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
- SMC_RAM_END);
-
- PP_ASSERT_WITH_CODE(!result,
- "Failed to upload dpm data to SMC memory !", return result;);
-
- result = tonga_init_arb_table_index(hwmgr->smumgr);
- PP_ASSERT_WITH_CODE(!result,
- "Failed to upload arb data to SMC memory !", return result);
-
- tonga_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((!result),
- "Failed to populate initialize pm fuses !", return result);
-
- result = tonga_populate_initial_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((!result),
- "Failed to populate initialize MC Reg table !", return result);
-
- tonga_save_default_power_profile(hwmgr);
-
- return 0;
-}
-
-/**
-* Set up the fan table to control the fan using the SMC.
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- uint32_t duty100;
- uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- uint16_t fdo_min, slope1, slope2;
- uint32_t reference_clock;
- int res;
- uint64_t tmp64;
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- return 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- if (0 == smu_data->smu7_data.fan_table_start) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC,
- CG_FDO_CTRL1, FMAX_DUTY100);
-
- if (0 == duty100) {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
- return 0;
- }
-
- tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
- do_div(tmp64, 10000);
- fdo_min = (uint16_t)tmp64;
-
- t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
- t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
-
- pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
- pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
-
- slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
- fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
- fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = smu7_get_xclk(hwmgr);
-
- fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
-
- fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
-
- fan_table.FanControl_GL_Flag = 1;
-
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
- smu_data->smu7_data.fan_table_start,
- (uint8_t *)&fan_table,
- (uint32_t)sizeof(fan_table),
- SMC_RAM_END);
-
- return 0;
-}
-
-
-static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return tonga_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
- smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- SMC_RAM_END);
- }
-
- result = tonga_update_and_upload_mc_reg_table(hwmgr);
-
- PP_ASSERT_WITH_CODE((!result),
- "Failed to upload MC reg table !",
- return result);
-
- result = tonga_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((result == 0),
- "Failed to program memory timing parameters !",
- );
-
- return result;
-}
-
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
-{
- switch (type) {
- case SMU_SoftRegisters:
- switch (member) {
- case HandshakeDisables:
- return offsetof(SMU72_SoftRegisters, HandshakeDisables);
- case VoltageChangeTimeout:
- return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
- case AverageGraphicsActivity:
- return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
- case PreVBlankGap:
- return offsetof(SMU72_SoftRegisters, PreVBlankGap);
- case VBlankTimeout:
- return offsetof(SMU72_SoftRegisters, VBlankTimeout);
- case UcodeLoadStatus:
- return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
- }
- case SMU_Discrete_DpmTable:
- switch (member) {
- case UvdBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- case VceBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- case SamuBootLevel:
- return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
- case LowSclkInterruptThreshold:
- return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
- }
- }
- pr_warn("can't get the offset of type %x member %x\n", type, member);
- return 0;
-}
-
-uint32_t tonga_get_mac_definition(uint32_t value)
-{
- switch (value) {
- case SMU_MAX_LEVELS_GRAPHICS:
- return SMU72_MAX_LEVELS_GRAPHICS;
- case SMU_MAX_LEVELS_MEMORY:
- return SMU72_MAX_LEVELS_MEMORY;
- case SMU_MAX_LEVELS_LINK:
- return SMU72_MAX_LEVELS_LINK;
- case SMU_MAX_ENTRIES_SMIO:
- return SMU72_MAX_ENTRIES_SMIO;
- case SMU_MAX_LEVELS_VDDC:
- return SMU72_MAX_LEVELS_VDDC;
- case SMU_MAX_LEVELS_VDDGFX:
- return SMU72_MAX_LEVELS_VDDGFX;
- case SMU_MAX_LEVELS_VDDCI:
- return SMU72_MAX_LEVELS_VDDCI;
- case SMU_MAX_LEVELS_MVDD:
- return SMU72_MAX_LEVELS_MVDD;
- }
- pr_warn("can't get the mac value %x\n", value);
-
- return 0;
-}
-
-
-static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- smu_data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- smu_data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
- return 0;
-}
-
-static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
-
- smu_data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
- return 0;
-}
-
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
-
- smu_data->smc_state_table.SamuBootLevel = 0;
- mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
- return 0;
-}
-
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
-{
- switch (type) {
- case SMU_UVD_TABLE:
- tonga_update_uvd_smc_table(hwmgr);
- break;
- case SMU_VCE_TABLE:
- tonga_update_vce_smc_table(hwmgr);
- break;
- case SMU_SAMU_TABLE:
- tonga_update_samu_smc_table(hwmgr);
- break;
- default:
- break;
- }
- return 0;
-}
-
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
-
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, DpmTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.dpm_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, SoftRegisters),
- &tmp, SMC_RAM_END);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->smu7_data.soft_regs_start = tmp;
- }
-
- error |= (result != 0);
-
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcRegisterTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.mc_reg_table_start = tmp;
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, FanTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.fan_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
- &tmp, SMC_RAM_END);
-
- if (!result)
- smu_data->smu7_data.arb_table_start = tmp;
-
- error |= (result != 0);
-
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
- SMU72_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU72_Firmware_Header, Version),
- &tmp, SMC_RAM_END);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (result != 0);
-
- return error ? 1 : 0;
-}
-
-/*---------------------------MC----------------------------*/
-
-static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
-{
- return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
-{
- bool result = true;
-
- switch (in_reg) {
- case mmMC_SEQ_RAS_TIMING:
- *out_reg = mmMC_SEQ_RAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_DLL_STBY:
- *out_reg = mmMC_SEQ_DLL_STBY_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD0:
- *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD1:
- *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CTRL:
- *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
-
- case mmMC_SEQ_CAS_TIMING:
- *out_reg = mmMC_SEQ_CAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING:
- *out_reg = mmMC_SEQ_MISC_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING2:
- *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CMD:
- *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CTL:
- *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D0:
- *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D1:
- *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D0:
- *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D1:
- *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
-
- case mmMC_PMG_CMD_EMRS:
- *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS1:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
-
- case mmMC_SEQ_PMG_TIMING:
- *out_reg = mmMC_SEQ_PMG_TIMING_LP;
- break;
-
- case mmMC_PMG_CMD_MRS2:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_2:
- *out_reg = mmMC_SEQ_WR_CTL_2_LP;
- break;
-
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
-{
- uint32_t i;
- uint16_t address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
- &address) ?
- address :
- table->mc_reg_address[i].s1;
- }
- return 0;
-}
-
-static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
- struct tonga_mc_reg_table *ni_table)
-{
- uint8_t i, j;
-
- PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
- "Invalid VramInfo table.", return -EINVAL);
-
- for (i = 0; i < table->last; i++)
- ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-
- ni_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ni_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- ni_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
-
- ni_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to
- * mmMC_PMG_CMD_EMRS /_LP[15:0]. Bit[15:0] MRS, need to be update
- * mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to
- * mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
- struct tonga_mc_reg_table *table)
-{
- uint8_t i, j, k;
- uint32_t temp_reg;
- struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
- for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- switch (table->mc_reg_address[i].s1) {
-
- case mmMC_SEQ_MISC1:
- temp_reg = cgs_read_register(hwmgr->device,
- mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
- PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
- if (!data->is_memory_gddr5)
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
-
- if (!data->is_memory_gddr5) {
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++)
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- }
-
- break;
-
- case mmMC_SEQ_RESERVE_M:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -EINVAL);
- break;
-
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
-{
- uint8_t i, j;
-
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
- break;
- }
- }
- }
-
- return 0;
-}
-
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
- pp_atomctrl_mc_reg_table *table;
- struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
- uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
-
- table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
- if (table == NULL)
- return -ENOMEM;
-
- /* Program additional LP registers that are no longer programmed by VBIOS */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
- cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
- result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
- if (!result)
- result = tonga_copy_vbios_smc_reg_table(table, ni_table);
-
- if (!result) {
- tonga_set_s0_mc_reg_index(ni_table);
- result = tonga_set_mc_special_registers(hwmgr, ni_table);
- }
-
- if (!result)
- tonga_set_valid_flag(ni_table);
-
- kfree(table);
-
- return result;
-}
-
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request)
-{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
- (hwmgr->smumgr->backend);
- struct SMU72_Discrete_GraphicsLevel *levels =
- smu_data->smc_state_table.GraphicsLevel;
- uint32_t array = smu_data->smu7_data.dpm_table_start +
- offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
- SMU72_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpHyst = request->up_hyst;
- levels[i].DownHyst = request->down_hyst;
- }
-
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- array_size, SMC_RAM_END);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
deleted file mode 100644
index 962860f13f24..000000000000
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _TONGA_SMC_H
-#define _TONGA_SMC_H
-
-#include "smumgr.h"
-#include "smu72.h"
-
-
-#define ASICID_IS_TONGA_P(wDID, bRID) \
- (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
- || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
-
-
-struct tonga_pt_defaults {
- uint8_t svi_load_line_en;
- uint8_t svi_load_line_vddC;
- uint8_t tdc_vddc_throttle_release_limit_perc;
- uint8_t tdc_mawt;
- uint8_t tdc_waterfall_ctl;
- uint8_t dte_ambient_temp_base;
- uint32_t display_cac;
- uint32_t bamp_temp_gradient;
- uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
- uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
-};
-
-int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int tonga_init_smc_table(struct pp_hwmgr *hwmgr);
-int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t tonga_get_offsetof(uint32_t type, uint32_t member);
-uint32_t tonga_get_mac_definition(uint32_t value);
-int tonga_process_firmware_header(struct pp_hwmgr *hwmgr);
-int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr);
-int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index c35f4c35c9ca..0a8e48bff219 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -33,141 +33,193 @@
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "cgs_common.h"
-#include "tonga_smc.h"
#include "smu7_smumgr.h"
+#include "smu7_dyn_defaults.h"
-static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+
+#include "atombios.h"
+
+#include "pppcielanes.h"
+#include "pp_endian.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define VDDC_VDDCI_DELTA 200
+
+
+static const struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT
+ */
+ {1, 0xF, 0xFD, 0x19,
+ 5, 45, 0, 0xB0000,
+ {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8,
+ 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203,
+ 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4}
+ },
+};
+
+/* [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
+static const uint16_t tonga_clock_stretcher_lookup_table[2][4] = {
+ {600, 1050, 3, 0},
+ {600, 1050, 6, 1}
+};
+
+/* [FF, SS] type, [] 4 voltage ranges,
+ * and [Floor Freq, Boundary Freq, VID min , VID max]
+ */
+static const uint32_t tonga_clock_stretcher_ddt_table[2][4][4] = {
+ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} }
+};
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] */
+static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
+ {0, 1, 3, 2, 4, 5},
+ {0, 2, 4, 5, 6, 5}
+};
+
+static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result;
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/**
* Call Test SMU message with 0x20000 offset to trigger SMU start
*/
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) {
pr_err("SMU Firmware start failed\n");
return -EINVAL;
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return 0;
}
-
-static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/*Clear firmware interrupt enable flag*/
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/*De-assert reset*/
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int tonga_start_smu(struct pp_smumgr *smumgr)
+static int tonga_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr) ||
- cgs_is_virtualization_enabled(smumgr->device))) {
+ if (!(smu7_is_smc_ram_running(hwmgr) ||
+ cgs_is_virtualization_enabled(hwmgr->device))) {
/*Check if SMU is running in protected mode*/
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = tonga_start_in_non_protection_mode(smumgr);
+ result = tonga_start_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = tonga_start_in_protection_mode(smumgr);
+ result = tonga_start_in_protection_mode(hwmgr);
if (result)
return result;
}
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-/**
- * Write a 32bit value to the SMC SRAM space.
- * ALL PARAMETERS ARE IN HOST BYTE ORDER.
- * @param smumgr the address of the powerplay hardware manager.
- * @param smcAddress the address in the SMC RAM to access.
- * @param value to write to the SMC SRAM.
- */
-static int tonga_smu_init(struct pp_smumgr *smumgr)
+static int tonga_smu_init(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *tonga_priv = NULL;
int i;
@@ -176,9 +228,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr)
if (tonga_priv == NULL)
return -ENOMEM;
- smumgr->backend = tonga_priv;
+ hwmgr->smu_backend = tonga_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
@@ -187,6 +239,3053 @@ static int tonga_smu_init(struct pp_smumgr *smumgr)
return 0;
}
+
+static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* clock - voltage dependency table is empty table */
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ voltage->VddGfx = phm_get_voltage_index(
+ pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(
+ pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i].vddc);
+
+ if (allowed_clock_voltage_table->entries[i].vddci)
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci);
+ else
+ voltage->Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA);
+
+
+ if (allowed_clock_voltage_table->entries[i].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd;
+
+ voltage->Phases = 1;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ voltage->VddGfx = phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddgfx);
+ voltage->Vddc = phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ allowed_clock_voltage_table->entries[i-1].vddc);
+
+ if (allowed_clock_voltage_table->entries[i-1].vddci)
+ voltage->Vddci = phm_get_voltage_id(&data->vddci_voltage_table,
+ allowed_clock_voltage_table->entries[i-1].vddci);
+
+ if (allowed_clock_voltage_table->entries[i-1].mvdd)
+ *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd;
+
+ return 0;
+}
+
+static int tonga_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ table->VddcTable[count] =
+ PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+ }
+ return 0;
+}
+
+static int tonga_populate_smc_vdd_gfx_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ table->VddGfxLevelCount = data->vddgfx_voltage_table.count;
+ for (count = 0; count < data->vddgfx_voltage_table.count; count++) {
+ table->VddGfxTable[count] =
+ PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddGfxLevelCount);
+ }
+ return 0;
+}
+
+static int tonga_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ table->SmioTable1.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level. */
+ table->SmioTable1.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->vddci_voltage_table.entries[count].smio_low;
+ table->VddciTable[count] =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ table->SmioTable2.Pattern[count].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[count].Smio =
+ (uint8_t) count;
+ table->Smio[count] |=
+ data->mvdd_voltage_table.entries[count].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+ }
+
+ return 0;
+}
+
+static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table =
+ pptable_info->vddgfx_lookup_table;
+ struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table =
+ pptable_info->vddc_lookup_table;
+
+ /* table is already swapped, so in order to use the value from it
+ * we need to swap it back.
+ */
+ uint32_t vddc_level_count = PP_SMC_TO_HOST_UL(table->VddcLevelCount);
+ uint32_t vddgfx_level_count = PP_SMC_TO_HOST_UL(table->VddGfxLevelCount);
+
+ for (count = 0; count < vddc_level_count; count++) {
+ /* We are populating vddc CAC data to BapmVddc table in split and merged mode */
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+
+ if ((data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2)) {
+ /* We are populating vddgfx CAC data to BapmVddgfx table in split mode */
+ for (count = 0; count < vddgfx_level_count; count++) {
+ index = phm_get_voltage_index(vddgfx_lookup_table,
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid));
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high);
+ }
+ } else {
+ for (count = 0; count < vddc_level_count; count++) {
+ index = phm_get_voltage_index(vddc_lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddGfxVidLoSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_low);
+ table->BapmVddGfxVidHiSidd[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid);
+ table->BapmVddGfxVidHiSidd2[count] =
+ convert_to_vid(vddc_lookup_table->entries[index].us_cac_high);
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = tonga_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDC voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDCI voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_vdd_gfx_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate VDDGFX voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate MVDD voltage table to SMC",
+ return -EINVAL);
+
+ result = tonga_populate_cac_tables(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "can not populate CAC voltage tables to SMC",
+ return -EINVAL);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_Ulv *state)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int tonga_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU72_Discrete_DpmTable *table)
+{
+ return tonga_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+
+ /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity =
+ 1;
+ table->LinkLevel[i].SPC =
+ (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold =
+ PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold =
+ PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock, SMU72_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t reference_clock;
+ uint32_t reference_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
+ reference_clock = atomctrl_get_reference_clock(hwmgr);
+
+ reference_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider*/
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup*/
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
+ CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+
+ uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
+ if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
+ /*
+ * ss_info.speed_spectrum_percentage -- in unit of 0.01%
+ * ss_info.speed_spectrum_rate -- in unit of khz
+ */
+ /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
+ uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
+
+ /* clkv = 2 * D * fbdiv / NS */
+ uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
+
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
+ cg_spll_spread_spectrum =
+ PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 =
+ PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint16_t sclk_activity_level_threshold,
+ SMU72_Discrete_GraphicsLevel *graphic_level)
+{
+ int result;
+ uint32_t mvdd;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
+
+ /* populate graphics levels*/
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_sclk, engine_clock,
+ &graphic_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find VDDC voltage value for VDDC "
+ "engine clock dependency table", return result);
+
+ /* SCLK frequency in units of 10KHz*/
+ graphic_level->SclkFrequency = engine_clock;
+ /* Indicates maximum activity level for this performance level. 50% for now*/
+ graphic_level->ActivityLevel = sclk_activity_level_threshold;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ /* this level can be used if activity is high enough.*/
+ graphic_level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpHyst = 0;
+ graphic_level->DownHyst = 0;
+ graphic_level->VoltageDownHyst = 0;
+ graphic_level->PowerThrottle = 0;
+
+ data->display_timing.min_clock_in_sr =
+ hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ smu7_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_in_sr);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (!result) {
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
+ /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);*/
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+static int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
+ uint8_t pcie_entry_count = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ uint32_t level_array_address = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+
+ uint32_t level_array_size = sizeof(SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+
+ SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel;
+
+ uint32_t i, max_entry;
+ uint8_t highest_pcie_level_enabled = 0;
+ uint8_t lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0;
+ uint8_t count = 0;
+ int result = 0;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = tonga_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &(smu_data->smc_state_table.GraphicsLevel[i]));
+ if (result != 0)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ }
+
+ /* Only enable level 0 for now. */
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ /* set highest level watermark to high */
+ if (dpm_table->sclk_table.count > 1)
+ smu_data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((pcie_entry_count >= 1),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_count - 1; /* for indexing, we need to decrement by 1.*/
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ }
+ } else {
+ if (0 == data->dpm_level_enable_mask.pcie_dpm_enable_mask)
+ pr_err("Pcie Dpm Enablemask is 0 !");
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(highest_pcie_level_enabled+1))) != 0)) {
+ highest_pcie_level_enabled++;
+ }
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<lowest_pcie_level_enabled)) == 0)) {
+ lowest_pcie_level_enabled++;
+ }
+
+ while ((count < highest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1<<(lowest_pcie_level_enabled+1+count))) == 0)) {
+ count++;
+ }
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled+1+count) : highest_pcie_level_enabled;
+
+
+ /* set pcieDpmLevel to highest_pcie_level_enabled*/
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ smu_data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled*/
+ smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
+ (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "Error retrieving Memory Clock Parameters from VBIOS.",
+ return result);
+
+ /* MPLL_FUNC_CNTL setup*/
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL,
+ mpll_param.bw_ctrl);
+
+ /* MPLL_FUNC_CNTL_1 setup*/
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF,
+ mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC,
+ mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE,
+ mpll_param.vco_mode);
+
+ /* MPLL_AD_FUNC_CNTL setup*/
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ /* MPLL_DQ_FUNC_CNTL setup*/
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL,
+ mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV,
+ mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ /*
+ ************************************
+ Fref = Reference Frequency
+ NF = Feedback divider ratio
+ NR = Reference divider ratio
+ Fnom = Nominal VCO output frequency = Fref * NF / NR
+ Fs = Spreading Rate
+ D = Percentage down-spread / 2
+ Fint = Reference input frequency to PFD = Fref / NR
+ NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
+ CLKS = NS - 1 = ISS_STEP_NUM[11:0]
+ NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
+ CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
+ *************************************
+ */
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
+ /* ss.Info.speed_spectrum_rate -- in unit of khz */
+ /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
+ /* = reference_clock * 5 / speed_spectrum_rate */
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+
+ /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
+ /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ /* MCLK_PWRMGT_CNTL setup */
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+ /* Save the result data to outpupt memory level structure */
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t tonga_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t tonga_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+ return mc_para_index;
+}
+
+
+static int tonga_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU72_Discrete_MemoryLevel *memory_level
+ )
+{
+ uint32_t mvdd = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_stutter_mode_threshold = 30000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (NULL != pptable_info->vdd_dep_on_mclk) {
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
+ pptable_info->vdd_dep_on_mclk,
+ memory_clock,
+ &memory_level->MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE(
+ !result,
+ "can not find MinVddc voltage value from memory VDDC "
+ "voltage dependency table",
+ return result);
+ }
+
+ if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
+ memory_level->MinMvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else
+ memory_level->MinMvdd = mvdd;
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 0;
+ memory_level->UpHyst = 0;
+ memory_level->DownHyst = 100;
+ memory_level->VoltageDownHyst = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if ((mclk_stutter_mode_threshold != 0) &&
+ (memory_clock <= mclk_stutter_mode_threshold) &&
+ (!data->is_uvd_enabled)
+ && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+ && (data->display_timing.num_existing_displays <= 2)
+ && (data->display_timing.num_existing_displays != 0))
+ memory_level->StutterEnable = 1;
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = tonga_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (tonga_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ } else {
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ } else {
+ dll_state_on = data->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio =
+ tonga_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = tonga_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinMvdd);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t level_array_address =
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size =
+ sizeof(SMU72_Discrete_MemoryLevel) *
+ SMU72_MAX_LEVELS_MEMORY;
+ SMU72_Discrete_MemoryLevel *levels =
+ smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = tonga_populate_single_memory_level(
+ hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (result)
+ return result;
+ }
+
+ /* Only enable level 0 for now.*/
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ /*
+ * in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in a higher state
+ * by default such that we are not effected by up threshold or and MCLK DPM latency.
+ */
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ /* set highest level watermark to high*/
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ /* level count will send to smc once at init smc table and never change*/
+ result = smu7_copy_bytes_to_smc(hwmgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int tonga_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pattern)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ smio_pattern->Voltage =
+ data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+
+ SMIO_Pattern voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ table->ACPILevel.MinVoltage =
+ smu_data->smc_state_table.GraphicsLevel[0].MinVoltage;
+
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
+ SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVoltage =
+ smu_data->smc_state_table.MemoryLevel[0].MinVoltage;
+
+ /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
+
+ if (0 == tonga_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int tonga_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->UvdLevelCount = (uint8_t) (mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->UvdLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->UvdLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->UvdLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(
+ hwmgr,
+ table->UvdLevel[count].VclkFrequency,
+ &dividers);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Vclk clock",
+ return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for Dclk clock",
+ return result);
+
+ table->UvdLevel[count].DclkDivider =
+ (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ }
+
+ return result;
+
+}
+
+static int tonga_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->VceLevelCount = (uint8_t) (mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->VceLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->VceLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->VceLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->AcpLevelCount = (uint8_t) (mm_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].aclk;
+ table->AcpLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->AcpLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->AcpLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->AcpLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ pp_atomctrl_clock_dividers_vi dividers;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *pptable_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ pptable_info->mm_dep_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t) (mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].Frequency =
+ pptable_info->mm_dep_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage.Vddc =
+ phm_get_voltage_index(pptable_info->vddc_lookup_table,
+ mm_table->entries[count].vddc);
+ table->SamuLevel[count].MinVoltage.VddGfx =
+ (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
+ phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
+ mm_table->entries[count].vddgfx) : 0;
+ table->SamuLevel[count].MinVoltage.Vddci =
+ phm_get_voltage_id(&data->vddci_voltage_table,
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ table->SamuLevel[count].MinVoltage.Phases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((!result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ }
+
+ return result;
+}
+
+static int tonga_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU72_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ SMU72_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU72_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = tonga_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (result)
+ break;
+ }
+ }
+
+ if (!result) {
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU72_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ pr_err("[powerplay] VBIOS did not find boot engine "
+ "clock value in dependency table. "
+ "Using Graphics DPM level 0 !");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (result != 0) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ pr_err("[powerplay] VBIOS did not find boot "
+ "engine clock value in dependency table."
+ "Using Memory DPM level 0 !");
+ result = 0;
+ }
+
+ table->BootVoltage.Vddc =
+ phm_get_voltage_id(&(data->vddc_voltage_table),
+ data->vbios_boot_state.vddc_bootup_value);
+ table->BootVoltage.VddGfx =
+ phm_get_voltage_id(&(data->vddgfx_voltage_table),
+ data->vbios_boot_state.vddgfx_bootup_value);
+ table->BootVoltage.Vddci =
+ phm_get_voltage_id(&(data->vddci_voltage_table),
+ data->vbios_boot_state.vddci_bootup_value);
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return result;
+}
+
+static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
+ volt_with_cks, value;
+ uint16_t clock_freq_u16;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
+ volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ uint32_t hw_revision, dev_id;
+ struct cgs_system_info sys_info = {0};
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ hw_revision = (uint32_t)sys_info.value;
+
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (146 * 4));
+ efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (148 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+ efuse2 &= 0xF;
+
+ if (efuse2 == 1)
+ ro = (2300 - 1350) * efuse / 255 + 1350;
+ else
+ ro = (2500 - 1000) * efuse / 255 + 1000;
+
+ if (ro >= 1660)
+ type = 0;
+ else
+ type = 1;
+
+ /* Populate Stretch amount */
+ smu_data->smc_state_table.ClockStretcherAmount = stretch_amount;
+
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (ASICID_IS_TONGA_P(dev_id, hw_revision)) {
+ volt_without_cks = (uint32_t)((7732 + 60 - ro - 20838 *
+ (sclk_table->entries[i].clk/100) / 10000) * 1000 /
+ (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000)));
+ volt_with_cks = (uint32_t)((5250 + 51 - ro - 2404 *
+ (sclk_table->entries[i].clk/100) / 100000) * 1000 /
+ (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000)));
+ } else {
+ volt_without_cks = (uint32_t)((14041 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
+ (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
+ volt_with_cks = (uint32_t)((13946 *
+ (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
+ (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+ }
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+ smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ STRETCH_ENABLE, 0x0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ staticEnable, 0x1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
+ masterReset, 0x0);
+
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFC2FF87;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][0];
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1];
+ clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(smu_data->smc_state_table.
+ GraphicsLevel[smu_data->smc_state_table.GraphicsDpmLevelCount - 1].
+ SclkFrequency) / 100);
+ if (tonga_clock_stretcher_lookup_table[stretch_amount2][0] <
+ clock_freq_u16 &&
+ tonga_clock_stretcher_lookup_table[stretch_amount2][1] >
+ clock_freq_u16) {
+ /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
+ /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
+ value |= (tonga_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
+ /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
+ value |= (tonga_clock_stretch_amount_conversion
+ [tonga_clock_stretcher_lookup_table[stretch_amount2][3]]
+ [stretch_amount]) << 3;
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].minFreq);
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.CKS_LOOKUPTable.
+ CKS_LOOKUPTableEntry[0].maxFreq);
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
+ tonga_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
+ smu_data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
+ (tonga_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ /* Populate DDT Lookup Table */
+ for (i = 0; i < 4; i++) {
+ /* Assign the minimum and maximum VID stored
+ * in the last row of Clock Stretcher Voltage Table.
+ */
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].minVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][2];
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].maxVID =
+ (uint8_t) tonga_clock_stretcher_ddt_table[type][i][3];
+ /* Loop through each SCLK and check the frequency
+ * to see if it lies within the frequency for clock stretcher.
+ */
+ for (j = 0; j < smu_data->smc_state_table.GraphicsDpmLevelCount; j++) {
+ cks_setting = 0;
+ clock_freq = PP_SMC_TO_HOST_UL(
+ smu_data->smc_state_table.GraphicsLevel[j].SclkFrequency);
+ /* Check the allowed frequency against the sclk level[j].
+ * Sclk's endianness has already been converted,
+ * and it's in 10Khz unit,
+ * as opposed to Data table, which is in Mhz unit.
+ */
+ if (clock_freq >= tonga_clock_stretcher_ddt_table[type][i][0] * 100) {
+ cks_setting |= 0x2;
+ if (clock_freq < tonga_clock_stretcher_ddt_table[type][i][1] * 100)
+ cks_setting |= 0x1;
+ }
+ smu_data->smc_state_table.ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
+ }
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.
+ ClockStretcherDataTable.
+ ClockStretcherDataTableEntry[i].setting);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
+ /* Splitted mode */
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= config;
+ } else {
+ pr_err("VDDC and VDDGFX should "
+ "be both on SVI2 control in splitted mode !\n");
+ }
+ } else {
+ /* Merged mode */
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config<<VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ pr_err("VDDC should be on "
+ "SVI2 control in merged mode !\n");
+ }
+ }
+
+ /* Set Vddci Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config<<VRCONF_VDDCI_SHIFT);
+ }
+
+ /* Set Mvdd Voltage Controller */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config<<VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t tmp;
+ int result;
+
+ /*
+ * This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU72_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = smu7_read_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
+
+ if (result != 0)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return smu7_write_smc_sram_dword(hwmgr,
+ smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
+}
+
+
+static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ int i, j, k;
+ const uint16_t *pdef1, *pdef2;
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
+ (uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range !",
+ );
+
+ dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ dpm_table->BAPM_TEMP_GRADIENT =
+ PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ pdef1 = defaults->bapmti_r;
+ pdef2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU72_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU72_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU72_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef1);
+ dpm_table->BAPMTI_RC[i][j][k] =
+ PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ /* TDC number of fraction bits are changed from 8 to 7
+ * for Fiji as requested by SMC team
+ */
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (smu7_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 "
+ "(SviLoadLineEn) from SMC Failed !",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity & (1 << 15)) ||
+ (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0))
+ hwmgr->thermal_controller.advanceFanControlParameters.
+ usFanOutputSensitivity = hwmgr->thermal_controller.
+ advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta =
+ PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
+ advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ smu_data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed !",
+ return -EINVAL);
+
+ /* DW6 */
+ if (tonga_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed !",
+ return -EINVAL);
+ /* DW7 */
+ if (tonga_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed !",
+ return -EINVAL);
+ /* DW8 */
+ if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl Failed !",
+ return -EINVAL);
+
+ /* DW9-DW12 */
+ if (tonga_populate_temperature_scaler(hwmgr) != 0)
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed !",
+ return -EINVAL);
+
+ /* DW13-DW14 */
+ if (tonga_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan "
+ "Control parameters Failed !",
+ return -EINVAL);
+
+ /* DW15-DW18 */
+ if (tonga_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed !",
+ return -EINVAL);
+
+ /* DW20 */
+ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(
+ false,
+ "Attempt to populate BapmVddCBaseLeakage "
+ "Hi and Lo Sidd Failed !",
+ return -EINVAL);
+
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed !",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(
+ i < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array "
+ "out of boundary",
+ return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+/*convert register values from driver to SMC format */
+static void tonga_convert_mc_registers(
+ const struct tonga_mc_reg_entry *entry,
+ SMU72_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int tonga_convert_mc_reg_table_entry_to_smc(
+ struct pp_hwmgr *hwmgr,
+ const uint32_t memory_clock,
+ SMU72_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ tonga_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU72_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = tonga_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU72_Discrete_MCRegisters));
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+
+ address = smu_data->smu7_data.mc_reg_table_start +
+ (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
+
+ return smu7_copy_bytes_to_smc(
+ hwmgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU72_Discrete_MCRegisterSet) *
+ data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
+ result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for the MC register addresses !",
+ return result;);
+
+ result = tonga_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize MCRegTable for driver state !",
+ return result;);
+
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ smu_data->power_tune_defaults =
+ &tonga_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ smu_data->power_tune_defaults = &tonga_power_tune_data_set_array[0];
+}
+
+static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ struct SMU72_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ unsigned min_level = 1;
+
+ hwmgr->default_gfx_power_profile.activity_threshold =
+ be16_to_cpu(levels[0].ActivityLevel);
+ hwmgr->default_gfx_power_profile.up_hyst = levels[0].UpHyst;
+ hwmgr->default_gfx_power_profile.down_hyst = levels[0].DownHyst;
+ hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
+
+ hwmgr->default_compute_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
+
+ /* Workaround compute SDMA instability: disable lowest SCLK
+ * DPM level. Optimize compute power profile: Use only highest
+ * 2 power levels (if more than 2 are available), Hysteresis:
+ * 0ms up, 5ms down
+ */
+ if (data->smc_state_table.GraphicsDpmLevelCount > 2)
+ min_level = data->smc_state_table.GraphicsDpmLevelCount - 2;
+ else if (data->smc_state_table.GraphicsDpmLevelCount == 2)
+ min_level = 1;
+ else
+ min_level = 0;
+ hwmgr->default_compute_power_profile.min_sclk =
+ be32_to_cpu(levels[min_level].SclkFrequency);
+ hwmgr->default_compute_power_profile.up_hyst = 0;
+ hwmgr->default_compute_power_profile.down_hyst = 5;
+
+ hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
+ hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
+}
+
+static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ uint8_t i;
+ pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+
+
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ tonga_initialize_power_tune_defaults(hwmgr);
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ tonga_populate_smc_voltage_tables(hwmgr, table);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ i = PHM_READ_FIELD(hwmgr->device, CC_MC_MAX_CHANNEL, NOOFCHAN);
+
+ if (i == 1 || i == 0)
+ table->SystemFlags |= 0x40;
+
+ if (data->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = tonga_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ULV state !",
+ return result;);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = tonga_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Link Level !", return result);
+
+ result = tonga_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Graphics Level !", return result);
+
+ result = tonga_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Memory Level !", return result);
+
+ result = tonga_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACPI Level !", return result);
+
+ result = tonga_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize VCE Level !", return result);
+
+ result = tonga_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize ACP Level !", return result);
+
+ result = tonga_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize SAMU Level !", return result);
+
+ /* Since only the initial state is completely set up at this
+ * point (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = tonga_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to Write ARB settings for the initial state.",
+ return result;);
+
+ result = tonga_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize UVD Level !", return result);
+
+ result = tonga_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to initialize Boot Level !", return result);
+
+ tonga_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate BAPM Parameters !", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = tonga_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate Clock Stretcher Data Table !",
+ return result;);
+ }
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+
+ /*
+ * Cail reads current link status and reports it as cap (we cannot
+ * change this due to some previous issues we had)
+ * SMC drops the link status to lowest level after enabling
+ * DPM by PowerPlay. After pnp or toggling CF, driver gets reloaded again
+ * but this time Cail reads current link status which was set to low by
+ * SMC and reports it as cap to powerplay
+ * To avoid it, we set PCIeBootLinkLevel to highest dpm level
+ */
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
+
+ table->PCIeGenInterval = 1;
+
+ result = tonga_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to populate VRConfig setting !", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment)) {
+ table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+
+ if (0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr,
+ THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
+
+ table->ThermOutPolarity =
+ (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
+ (1 << gpio_pin_assignment.uc_gpio_pin_bit_shift))) ? 1 : 0;
+
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO*/
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot) &&
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CombinePCCWithThermalSignal)){
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ }
+ } else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ for (i = 0; i < SMU72_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload dpm data to SMC memory !", return result;);
+
+ result = tonga_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload arb data to SMC memory !", return result);
+
+ tonga_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize pm fuses !", return result);
+
+ result = tonga_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to populate initialize MC Reg table !", return result);
+
+ tonga_save_default_power_profile(hwmgr);
+
+ return 0;
+}
+
+static int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (0 == smu_data->smu7_data.fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ fan_table.FanControl_GL_Flag = 1;
+
+ res = smu7_copy_bytes_to_smc(hwmgr,
+ smu_data->smu7_data.fan_table_start,
+ (uint8_t *)&fan_table,
+ (uint32_t)sizeof(fan_table),
+ SMC_RAM_END);
+
+ return 0;
+}
+
+
+static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return tonga_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = smu7_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = tonga_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((!result),
+ "Failed to upload MC reg table !",
+ return result);
+
+ result = tonga_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters !",
+ );
+
+ return result;
+}
+
+static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU72_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
+ case PreVBlankGap:
+ return offsetof(SMU72_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU72_SoftRegisters, VBlankTimeout);
+ case UcodeLoadStatus:
+ return offsetof(SMU72_SoftRegisters, UcodeLoadStatus);
+ case DRAM_LOG_ADDR_H:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_H);
+ case DRAM_LOG_ADDR_L:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_ADDR_L);
+ case DRAM_LOG_PHY_ADDR_H:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_H);
+ case DRAM_LOG_PHY_ADDR_L:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_PHY_ADDR_L);
+ case DRAM_LOG_BUFF_SIZE:
+ return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case UvdBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ case VceBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ case SamuBootLevel:
+ return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
+ }
+ }
+ pr_warn("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+static uint32_t tonga_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU72_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU72_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU72_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU72_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU72_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDGFX:
+ return SMU72_MAX_LEVELS_VDDGFX;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU72_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU72_MAX_LEVELS_MVDD;
+ }
+ pr_warn("can't get the mac value %x\n", value);
+
+ return 0;
+}
+
+static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ smu_data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ smu_data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC,
+ mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ return 0;
+}
+
+static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data =
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+
+ smu_data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ return 0;
+}
+
+static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+ smu_data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
+
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
+ return 0;
+}
+
+static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+ switch (type) {
+ case SMU_UVD_TABLE:
+ tonga_update_uvd_smc_table(hwmgr);
+ break;
+ case SMU_VCE_TABLE:
+ tonga_update_vce_smc_table(hwmgr);
+ break;
+ case SMU_SAMU_TABLE:
+ tonga_update_samu_smc_table(hwmgr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.dpm_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->smu7_data.soft_regs_start = tmp;
+ }
+
+ error |= (result != 0);
+
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.mc_reg_table_start = tmp;
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.fan_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ smu_data->smu7_data.arb_table_start = tmp;
+
+ error |= (result != 0);
+
+ result = smu7_read_smc_sram_dword(hwmgr,
+ SMU72_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU72_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (result != 0);
+
+ return error ? 1 : 0;
+}
+
+/*---------------------------MC----------------------------*/
+
+static uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool tonga_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int tonga_set_s0_mc_reg_index(struct tonga_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ tonga_check_s0_mc_reg_index(table->mc_reg_address[i].s1,
+ &address) ?
+ address :
+ table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int tonga_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct tonga_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++)
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int tonga_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device,
+ mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++)
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU72_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
+{
+ uint8_t i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1<<i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP,
+ cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
+ cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (!result)
+ result = tonga_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (!result) {
+ tonga_set_s0_mc_reg_index(ni_table);
+ result = tonga_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (!result)
+ tonga_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+static int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU72_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->smu7_data.dpm_table_start +
+ offsetof(SMU72_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU72_Discrete_GraphicsLevel) *
+ SMU72_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpHyst = request->up_hyst;
+ levels[i].DownHyst = request->down_hyst;
+ }
+
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
+
const struct pp_smumgr_func tonga_smu_funcs = {
.smu_init = &tonga_smu_init,
.smu_fini = &smu7_smu_fini,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
index 8c4f761d5bc8..5d70a00348e2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h
@@ -25,8 +25,26 @@
#define _TONGA_SMUMGR_H_
#include "smu72_discrete.h"
-
#include "smu7_smumgr.h"
+#include "smu72.h"
+
+
+#define ASICID_IS_TONGA_P(wDID, bRID) \
+ (((wDID == 0x6930) && ((bRID == 0xF0) || (bRID == 0xF1) || (bRID == 0xFF))) \
+ || ((wDID == 0x6920) && ((bRID == 0) || (bRID == 1))))
+
+struct tonga_pt_defaults {
+ uint8_t svi_load_line_en;
+ uint8_t svi_load_line_vddC;
+ uint8_t tdc_vddc_throttle_release_limit_perc;
+ uint8_t tdc_mawt;
+ uint8_t tdc_waterfall_ctl;
+ uint8_t dte_ambient_temp_base;
+ uint32_t display_cac;
+ uint32_t bapm_temp_gradient;
+ uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+ uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
+};
struct tonga_mc_reg_entry {
uint32_t mclk_max;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 408514c965a0..2f979fb86824 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -53,20 +53,20 @@
#define smnMP0_FW_INTF 0x3010104
#define smnMP1_PUB_CTRL 0x3010b14
-static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
+static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -80,20 +80,20 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
* @param smumgr the address of the powerplay hardware manager.
* @return TRUE SMC has responded, FALSE otherwise.
*/
-static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
/*
@@ -102,43 +102,43 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
/*
* Send a message to the SMC, and wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -146,32 +146,32 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
/*
* Send a message to the SMC with parameter
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -180,51 +180,51 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
/*
* Send a message to the SMC with parameter, do not wait for response
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int vega10_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+ struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- return vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ return vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
}
/*
* Retrieve an argument from SMC.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param arg pointer to store the argument from SMC.
* @return Always return 0.
*/
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
/*
* Copy table from SMC into driver FB
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the driver's table ID to copy from
*/
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -232,16 +232,16 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -255,14 +255,14 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
/*
* Copy table from Driver FB into SMC
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the table to copy from
*/
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -274,17 +274,17 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -293,87 +293,87 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask)
{
int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
PPSMC_MSG_DisableSmuFeatures;
- return vega10_send_msg_to_smc_with_parameter(smumgr,
+ return vega10_send_msg_to_smc_with_parameter(hwmgr,
msg, feature_mask);
}
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled)
{
if (features_enabled == NULL)
return -EINVAL;
- if (!vega10_send_msg_to_smc(smumgr,
+ if (!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeatures)) {
- vega10_read_arg_from_smc(smumgr, features_enabled);
+ vega10_read_arg_from_smc(hwmgr, features_enabled);
return 0;
}
return -EINVAL;
}
-int vega10_set_tools_address(struct pp_smumgr *smumgr)
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high ||
priv->smu_tables.entry[TOOLSTABLE].table_addr_low) {
- if (!vega10_send_msg_to_smc_with_parameter(smumgr,
+ if (!vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
priv->smu_tables.entry[TOOLSTABLE].table_addr_high))
- vega10_send_msg_to_smc_with_parameter(smumgr,
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
priv->smu_tables.entry[TOOLSTABLE].table_addr_low);
}
return 0;
}
-static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
+static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
+ vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version);
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
dev_id = (uint32_t)sys_info.value;
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
rev_id = (uint32_t)sys_info.value;
if (!((dev_id == 0x687f) &&
@@ -392,7 +392,7 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_init(struct pp_smumgr *smumgr)
+static int vega10_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv;
uint64_t mc_addr;
@@ -401,7 +401,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
int ret;
struct cgs_firmware_info info = {0};
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
&info);
if (ret || !info.kptr)
@@ -412,10 +412,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for pptable */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(PPTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -425,8 +425,8 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for pptable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -441,7 +441,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[PPTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -451,10 +451,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -469,7 +469,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for AVFS table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -479,12 +479,12 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -500,7 +500,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
tools_size = 0x19000;
if (tools_size) {
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
tools_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -522,7 +522,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
}
/* allocate space for AVFS Fuse table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsFuseOverride_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -532,16 +532,16 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs fuse table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -558,36 +558,36 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_fini(struct pp_smumgr *smumgr)
+static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
if (priv->smu_tables.entry[TOOLSTABLE].table)
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int vega10_start_smu(struct pp_smumgr *smumgr)
+static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr),
+ PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
"Failed to verify SMC interface!",
return -EINVAL);
- vega10_set_tools_address(smumgr);
+ vega10_set_tools_address(hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 821425c1e4e0..0695455b21b2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -52,19 +52,19 @@ struct vega10_smumgr {
struct smu_table_array smu_tables;
};
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask);
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled);
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
-int vega10_set_tools_address(struct pp_smumgr *smumgr);
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 38cea6fb25a8..92ec663fdada 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -133,6 +133,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
entity->rq = rq;
entity->sched = sched;
+ spin_lock_init(&entity->rq_lock);
spin_lock_init(&entity->queue_lock);
r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
if (r)
@@ -187,7 +188,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
if (kfifo_is_empty(&entity->job_queue))
return false;
- if (ACCESS_ONCE(entity->dependency))
+ if (READ_ONCE(entity->dependency))
return false;
return true;
@@ -204,18 +205,38 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
- struct amd_sched_rq *rq = entity->rq;
+ int r;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
-
/**
* The client will not queue more IBs during this fini, consume existing
- * queued IBs
+ * queued IBs or discard them on SIGKILL
*/
- wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
+ if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
+ r = -ERESTARTSYS;
+ else
+ r = wait_event_killable(sched->job_scheduled,
+ amd_sched_entity_is_idle(entity));
+ amd_sched_entity_set_rq(entity, NULL);
+ if (r) {
+ struct amd_sched_job *job;
+
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+ */
+ kthread_park(sched->thread);
+ kthread_unpark(sched->thread);
+ while (kfifo_out(&entity->job_queue, &job, sizeof(job))) {
+ struct amd_sched_fence *s_fence = job->s_fence;
+ amd_sched_fence_scheduled(s_fence);
+ dma_fence_set_error(&s_fence->finished, -ESRCH);
+ amd_sched_fence_finished(s_fence);
+ dma_fence_put(&s_fence->finished);
+ sched->ops->free_job(job);
+ }
- amd_sched_rq_remove_entity(rq, entity);
+ }
kfifo_free(&entity->job_queue);
}
@@ -236,6 +257,24 @@ static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
dma_fence_put(f);
}
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq)
+{
+ if (entity->rq == rq)
+ return;
+
+ spin_lock(&entity->rq_lock);
+
+ if (entity->rq)
+ amd_sched_rq_remove_entity(entity->rq, entity);
+
+ entity->rq = rq;
+ if (rq)
+ amd_sched_rq_add_entity(rq, entity);
+
+ spin_unlock(&entity->rq_lock);
+}
+
bool amd_sched_dependency_optimized(struct dma_fence* fence,
struct amd_sched_entity *entity)
{
@@ -293,7 +332,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
}
static struct amd_sched_job *
-amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+amd_sched_entity_peek_job(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
struct amd_sched_job *sched_job;
@@ -333,14 +372,15 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
/* first job wakes up scheduler */
if (first) {
/* Add the entity to the run queue */
+ spin_lock(&entity->rq_lock);
amd_sched_rq_add_entity(entity->rq, entity);
+ spin_unlock(&entity->rq_lock);
amd_sched_wakeup(sched);
}
return added;
}
-/* job_finish is called after hw fence signaled, and
- * the job had already been deleted from ring_mirror_list
+/* job_finish is called after hw fence signaled
*/
static void amd_sched_job_finish(struct work_struct *work)
{
@@ -366,6 +406,7 @@ static void amd_sched_job_finish(struct work_struct *work)
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
spin_unlock(&sched->job_list_lock);
+ dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -381,6 +422,9 @@ static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
+ dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
+ amd_sched_job_finish_cb);
+
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
@@ -473,8 +517,6 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *entity = sched_job->s_entity;
trace_amd_sched_job(sched_job);
- dma_fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
- amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
@@ -545,6 +587,7 @@ static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
+ dma_fence_get(&s_fence->finished);
atomic_dec(&sched->hw_rq_count);
amd_sched_fence_finished(s_fence);
@@ -585,7 +628,7 @@ static int amd_sched_main(void *param)
if (!entity)
continue;
- sched_job = amd_sched_entity_pop_job(entity);
+ sched_job = amd_sched_entity_peek_job(entity);
if (!sched_job)
continue;
@@ -596,6 +639,7 @@ static int amd_sched_main(void *param)
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
+
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index f9d8f28efd16..52c8e5447624 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -39,6 +39,7 @@ struct amd_sched_rq;
struct amd_sched_entity {
struct list_head list;
struct amd_sched_rq *rq;
+ spinlock_t rq_lock;
struct amd_gpu_scheduler *sched;
spinlock_t queue_lock;
@@ -115,9 +116,14 @@ struct amd_sched_backend_ops {
enum amd_sched_priority {
AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_NORMAL = AMD_SCHED_PRIORITY_MIN,
+ AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
+ AMD_SCHED_PRIORITY_NORMAL,
+ AMD_SCHED_PRIORITY_HIGH_SW,
+ AMD_SCHED_PRIORITY_HIGH_HW,
AMD_SCHED_PRIORITY_KERNEL,
- AMD_SCHED_PRIORITY_MAX
+ AMD_SCHED_PRIORITY_MAX,
+ AMD_SCHED_PRIORITY_INVALID = -1,
+ AMD_SCHED_PRIORITY_UNSET = -2
};
/**
@@ -150,6 +156,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq);
int amd_sched_fence_slab_init(void);
void amd_sched_fence_slab_fini(void);
@@ -167,4 +175,11 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
bool amd_sched_dependency_optimized(struct dma_fence* fence,
struct amd_sched_entity *entity);
void amd_sched_job_kickout(struct amd_sched_job *s_job);
+
+static inline enum amd_sched_priority
+amd_sched_get_job_priority(struct amd_sched_job *job)
+{
+ return (job->s_entity->rq - job->sched->sched_rq);
+}
+
#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 289eda54e5aa..074fd4ea7ece 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
#include <linux/of_reserved_mem.h>
@@ -32,7 +33,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
}
static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index f9bda7b0d2ec..764d0c83710c 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include "hdlcd_drv.h"
@@ -106,7 +107,7 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = hdlcd_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 1a57cc28955e..b8944666a18f 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -26,6 +26,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include "malidp_drv.h"
@@ -249,7 +250,7 @@ static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
};
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = malidp_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index 1ab4cf863bf7..ecf25cf9f9f5 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -5,5 +5,3 @@ armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
obj-$(CONFIG_DRM_ARMADA) := armada.o
-
-CFLAGS_armada_trace.o := -I$(src)
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index ad3d2ebf95c9..41a784f5a5e6 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -9,7 +9,6 @@
*/
#include <linux/clk.h>
#include <linux/io.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2a4d163ac76f..2e065facdce7 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -298,7 +298,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
if (force) {
/* Display is disabled, so just drop the old fb */
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
return;
}
@@ -321,7 +321,7 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
* the best. The worst that will happen is the buffer gets
* reused before it has finished being displayed.
*/
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
}
static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
@@ -577,7 +577,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
unsigned i;
bool interlaced;
- drm_framebuffer_reference(crtc->primary->fb);
+ drm_framebuffer_get(crtc->primary->fb);
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
@@ -718,7 +718,7 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
MAX_SCHEDULE_TIMEOUT);
/* Take a reference to the new fb as we're using it */
- drm_framebuffer_reference(crtc->primary->fb);
+ drm_framebuffer_get(crtc->primary->fb);
/* Update the base in the CRTC */
armada_drm_crtc_update_regs(dcrtc, regs);
@@ -742,7 +742,7 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
* primary plane.
*/
if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
+ drm_framebuffer_put(plane->fb);
/* Power down the Y/U/V FIFOs */
sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
@@ -947,13 +947,13 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
/* Must be a kernel-mapped object */
if (!obj->addr) {
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return -EINVAL;
}
if (obj->obj.size < w * h * 4) {
DRM_ERROR("buffer is too small\n");
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return -ENOMEM;
}
}
@@ -961,7 +961,7 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
if (dcrtc->cursor_obj) {
dcrtc->cursor_obj->update = NULL;
dcrtc->cursor_obj->update_data = NULL;
- drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
+ drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
}
dcrtc->cursor_obj = obj;
dcrtc->cursor_w = w;
@@ -997,7 +997,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
struct armada_private *priv = crtc->dev->dev_private;
if (dcrtc->cursor_obj)
- drm_gem_object_unreference_unlocked(&dcrtc->cursor_obj->obj);
+ drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
@@ -1045,12 +1045,12 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
* Ensure that we hold a reference on the new framebuffer.
* This has to match the behaviour in mode_set.
*/
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
if (ret) {
/* Undo our reference above */
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
kfree(work);
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 0b3227c039d7..e857b88a9799 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -9,7 +9,6 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
@@ -26,7 +25,7 @@ static void armada_drm_unref_work(struct work_struct *work)
struct drm_framebuffer *fb;
while (kfifo_get(&priv->fb_unref, &fb))
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
}
/* Must be called with dev->event_lock held */
@@ -70,8 +69,6 @@ static struct drm_driver armada_drm_driver = {
.gem_prime_export = armada_gem_prime_export,
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
- .dumb_map_offset = armada_gem_dumb_map_offset,
- .dumb_destroy = armada_gem_dumb_destroy,
.gem_vm_ops = &armada_gem_vm_ops,
.major = 1,
.minor = 0,
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 92e6b08ea64a..a38d5a0892a9 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -5,7 +5,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include "armada_drm.h"
@@ -18,7 +17,7 @@ static void armada_fb_destroy(struct drm_framebuffer *fb)
struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
drm_framebuffer_cleanup(&dfb->fb);
- drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+ drm_gem_object_put_unlocked(&dfb->obj->obj);
kfree(dfb);
}
@@ -95,7 +94,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
* the above call, but the caller will drop their reference
* to it. Hence we need to take our own reference.
*/
- drm_gem_object_reference(&obj->obj);
+ drm_gem_object_get(&obj->obj);
return dfb;
}
@@ -144,12 +143,12 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err;
}
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return &dfb->fb;
err_unref:
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
err:
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 29c7d047b152..a2ce83f84800 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -52,13 +51,13 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
ret = armada_gem_linear_back(dev, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return ret;
}
ptr = armada_gem_map_object(dev, obj);
if (!ptr) {
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
return -ENOMEM;
}
@@ -68,7 +67,7 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
* A reference is now held by the framebuffer object if
* successful, otherwise this drops the ref for the error path.
*/
- drm_gem_object_unreference_unlocked(&obj->obj);
+ drm_gem_object_put_unlocked(&obj->obj);
if (IS_ERR(dfb))
return PTR_ERR(dfb);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index a76ca21d063b..a97f509743a5 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -8,7 +8,6 @@
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
-#include <drm/drmP.h>
#include "armada_drm.h"
#include "armada_gem.h"
#include <drm/armada_drm.h>
@@ -266,46 +265,10 @@ int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return ret;
}
-int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- struct armada_gem_object *obj;
- int ret = 0;
-
- obj = armada_gem_object_lookup(file, handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object\n");
- return -EINVAL;
- }
-
- /* Don't allow imported objects to be mapped */
- if (obj->obj.import_attach) {
- ret = -EINVAL;
- goto err_unref;
- }
-
- ret = drm_gem_create_mmap_offset(&obj->obj);
- if (ret == 0) {
- *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
- DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
- }
-
- err_unref:
- drm_gem_object_unreference_unlocked(&obj->obj);
-
- return ret;
-}
-
-int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/* Private driver gem ioctls */
int armada_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -334,7 +297,7 @@ int armada_gem_create_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return ret;
}
@@ -351,13 +314,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (!dobj->obj.filp) {
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return -EINVAL;
}
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, args->offset);
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
if (IS_ERR_VALUE(addr))
return addr;
@@ -412,7 +375,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
unref:
- drm_gem_object_unreference_unlocked(&dobj->obj);
+ drm_gem_object_put_unlocked(&dobj->obj);
return ret;
}
@@ -561,7 +524,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
* Importing our own dmabuf(s) increases the
* refcount on the gem object itself.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return obj;
}
}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index 6e524e0676bb..1ac90792b166 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -35,10 +35,6 @@ struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
size_t);
int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *);
-int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
- uint32_t, uint64_t *);
-int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
- uint32_t);
struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index edc44910d79f..b411b608821a 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -177,7 +177,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
* Take a reference on the new framebuffer - we want to
* hold on to it while the hardware is displaying it.
*/
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
if (plane->fb)
armada_ovl_retire_fb(dplane, plane->fb);
@@ -278,7 +278,7 @@ static int armada_ovl_plane_disable(struct drm_plane *plane,
fb = xchg(&dplane->old_fb, NULL);
if (fb)
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index 1e9f55fc8735..8dbfea7a00fe 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -63,5 +63,5 @@ TRACE_EVENT(armada_ovl_plane_work,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/armada
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6f3849ec0c1d..9555a3542022 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -713,7 +713,7 @@ static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connect
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 74d66e11f688..c6e8061ffcfc 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -458,7 +458,7 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 4237b0446721..6833ee253cfa 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 6a91e62da2f4..a24a18fbd65a 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -213,7 +213,7 @@ bochs_connector_best_encoder(struct drm_connector *connector)
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index adf9ae0e0b7c..3b99d5a06c16 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -71,7 +71,7 @@ config DRM_PARADE_PS8622
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
- depends on OF
+ depends on OF && RC_CORE
select DRM_KMS_HELPER
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
@@ -84,6 +84,14 @@ config DRM_SII902X
---help---
Silicon Image sii902x bridge chip driver.
+config DRM_SII9234
+ tristate "Silicon Image SII9234 HDMI/MHL bridge"
+ depends on OF
+ ---help---
+ Say Y here if you want support for the MHL interface.
+ It is an I2C driver, that detects connection of MHL bridge
+ and starts encapsulation of HDMI signal.
+
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 60dab87e4783..373eb28f31ed 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
+obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 2fed567f9943..592b9d2ec034 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -21,3 +21,11 @@ config DRM_I2C_ADV7533
default y
help
Support for the Analog Devices ADV7533 DSI to HDMI encoder.
+
+config DRM_I2C_ADV7511_CEC
+ bool "ADV7511/33 HDMI CEC driver"
+ depends on DRM_I2C_ADV7511
+ select CEC_CORE
+ default y
+ help
+ When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index 5ba675534f6e..5bb384938a71 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,4 +1,5 @@
adv7511-y := adv7511_drv.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
+adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index fe18a5d2d84b..b4efcbabf7f7 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -195,6 +195,25 @@
#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
+#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
+#define ADV7511_REG_CEC_TX_FRAME_LEN 0x10
+#define ADV7511_REG_CEC_TX_ENABLE 0x11
+#define ADV7511_REG_CEC_TX_RETRY 0x12
+#define ADV7511_REG_CEC_TX_LOW_DRV_CNT 0x14
+#define ADV7511_REG_CEC_RX_FRAME_HDR 0x15
+#define ADV7511_REG_CEC_RX_FRAME_DATA0 0x16
+#define ADV7511_REG_CEC_RX_FRAME_LEN 0x25
+#define ADV7511_REG_CEC_RX_ENABLE 0x26
+#define ADV7511_REG_CEC_RX_BUFFERS 0x4a
+#define ADV7511_REG_CEC_LOG_ADDR_MASK 0x4b
+#define ADV7511_REG_CEC_LOG_ADDR_0_1 0x4c
+#define ADV7511_REG_CEC_LOG_ADDR_2 0x4d
+#define ADV7511_REG_CEC_CLK_DIV 0x4e
+#define ADV7511_REG_CEC_SOFT_RESET 0x50
+
+#define ADV7533_REG_CEC_OFFSET 0x70
+
enum adv7511_input_clock {
ADV7511_INPUT_CLOCK_1X,
ADV7511_INPUT_CLOCK_2X,
@@ -297,6 +316,8 @@ enum adv7511_type {
ADV7533,
};
+#define ADV7511_MAX_ADDRS 3
+
struct adv7511 {
struct i2c_client *i2c_main;
struct i2c_client *i2c_edid;
@@ -328,8 +349,6 @@ struct adv7511 {
enum adv7511_sync_polarity hsync_polarity;
bool rgb;
- struct edid *edid;
-
struct gpio_desc *gpio_pd;
struct regulator_bulk_data *supplies;
@@ -343,15 +362,27 @@ struct adv7511 {
enum adv7511_type type;
struct platform_device *audio_pdev;
+
+ struct cec_adapter *cec_adap;
+ u8 cec_addr[ADV7511_MAX_ADDRS];
+ u8 cec_valid_addrs;
+ bool cec_enabled_adap;
+ struct clk *cec_clk;
+ u32 cec_clk_freq;
};
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
+ unsigned int offset);
+void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#endif
+
#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
-void adv7533_uninit_cec(struct adv7511 *adv);
-int adv7533_init_cec(struct adv7511 *adv);
+int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
@@ -374,11 +405,7 @@ static inline int adv7533_patch_registers(struct adv7511 *adv)
return -ENODEV;
}
-static inline void adv7533_uninit_cec(struct adv7511 *adv)
-{
-}
-
-static inline int adv7533_init_cec(struct adv7511 *adv)
+static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
{
return -ENODEV;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 67469c26bae8..1b4783d45c53 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -210,7 +210,7 @@ static const struct hdmi_codec_ops adv7511_codec_ops = {
.get_dai_id = adv7511_hdmi_i2s_get_dai_id,
};
-static struct hdmi_codec_pdata codec_data = {
+static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
new file mode 100644
index 000000000000..b33d730e4d73
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -0,0 +1,337 @@
+/*
+ * adv7511_cec.c - Analog Devices ADV7511/33 cec driver
+ *
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+
+#include <media/cec.h>
+
+#include "adv7511.h"
+
+#define ADV7511_INT1_CEC_MASK \
+ (ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | \
+ ADV7511_INT1_CEC_TX_RETRY_TIMEOUT | ADV7511_INT1_CEC_RX_READY1)
+
+static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ unsigned int val;
+
+ if (regmap_read(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_ENABLE + offset, &val))
+ return;
+
+ if ((val & 0x01) == 0)
+ return;
+
+ if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
+ cec_transmit_attempt_done(adv7511->cec_adap,
+ CEC_TX_STATUS_ARB_LOST);
+ return;
+ }
+ if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
+ u8 status;
+ u8 err_cnt = 0;
+ u8 nack_cnt = 0;
+ u8 low_drive_cnt = 0;
+ unsigned int cnt;
+
+ /*
+ * We set this status bit since this hardware performs
+ * retransmissions.
+ */
+ status = CEC_TX_STATUS_MAX_RETRIES;
+ if (regmap_read(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_LOW_DRV_CNT + offset, &cnt)) {
+ err_cnt = 1;
+ status |= CEC_TX_STATUS_ERROR;
+ } else {
+ nack_cnt = cnt & 0xf;
+ if (nack_cnt)
+ status |= CEC_TX_STATUS_NACK;
+ low_drive_cnt = cnt >> 4;
+ if (low_drive_cnt)
+ status |= CEC_TX_STATUS_LOW_DRIVE;
+ }
+ cec_transmit_done(adv7511->cec_adap, status,
+ 0, nack_cnt, low_drive_cnt, err_cnt);
+ return;
+ }
+ if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
+ cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
+ return;
+ }
+}
+
+void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY |
+ ADV7511_INT1_CEC_TX_ARBIT_LOST |
+ ADV7511_INT1_CEC_TX_RETRY_TIMEOUT;
+ struct cec_msg msg = {};
+ unsigned int len;
+ unsigned int val;
+ u8 i;
+
+ if (irq1 & irq_tx_mask)
+ adv_cec_tx_raw_status(adv7511, irq1);
+
+ if (!(irq1 & ADV7511_INT1_CEC_RX_READY1))
+ return;
+
+ if (regmap_read(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_FRAME_LEN + offset, &len))
+ return;
+
+ msg.len = len & 0x1f;
+
+ if (msg.len > 16)
+ msg.len = 16;
+
+ if (!msg.len)
+ return;
+
+ for (i = 0; i < msg.len; i++) {
+ regmap_read(adv7511->regmap_cec,
+ i + ADV7511_REG_CEC_RX_FRAME_HDR + offset, &val);
+ msg.msg[i] = val;
+ }
+
+ /* toggle to re-enable rx 1 */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 1);
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
+ cec_received_msg(adv7511->cec_adap, &msg);
+}
+
+static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+
+ if (adv7511->i2c_cec == NULL)
+ return -EIO;
+
+ if (!adv7511->cec_enabled_adap && enable) {
+ /* power up cec section */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_CLK_DIV + offset,
+ 0x03, 0x01);
+ /* legacy mode and clear all rx buffers */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0x07);
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0);
+ /* initially disable tx */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_ENABLE + offset, 1, 0);
+ /* enabled irqs: */
+ /* tx: ready */
+ /* tx: arbitration lost */
+ /* tx: retry timeout */
+ /* rx: ready 1 */
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1), 0x3f,
+ ADV7511_INT1_CEC_MASK);
+ } else if (adv7511->cec_enabled_adap && !enable) {
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1), 0x3f, 0);
+ /* disable address mask 1-3 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x70, 0x00);
+ /* power down cec section */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_CLK_DIV + offset,
+ 0x03, 0x00);
+ adv7511->cec_valid_addrs = 0;
+ }
+ adv7511->cec_enabled_adap = enable;
+ return 0;
+}
+
+static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ unsigned int i, free_idx = ADV7511_MAX_ADDRS;
+
+ if (!adv7511->cec_enabled_adap)
+ return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
+
+ if (addr == CEC_LOG_ADDR_INVALID) {
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x70, 0);
+ adv7511->cec_valid_addrs = 0;
+ return 0;
+ }
+
+ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+ bool is_valid = adv7511->cec_valid_addrs & (1 << i);
+
+ if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
+ free_idx = i;
+ if (is_valid && adv7511->cec_addr[i] == addr)
+ return 0;
+ }
+ if (i == ADV7511_MAX_ADDRS) {
+ i = free_idx;
+ if (i == ADV7511_MAX_ADDRS)
+ return -ENXIO;
+ }
+ adv7511->cec_addr[i] = addr;
+ adv7511->cec_valid_addrs |= 1 << i;
+
+ switch (i) {
+ case 0:
+ /* enable address mask 0 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x10, 0x10);
+ /* set address for mask 0 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
+ 0x0f, addr);
+ break;
+ case 1:
+ /* enable address mask 1 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x20, 0x20);
+ /* set address for mask 1 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_0_1 + offset,
+ 0xf0, addr << 4);
+ break;
+ case 2:
+ /* enable address mask 2 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_MASK + offset,
+ 0x40, 0x40);
+ /* set address for mask 1 */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_LOG_ADDR_2 + offset,
+ 0x0f, addr);
+ break;
+ }
+ return 0;
+}
+
+static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+ u8 len = msg->len;
+ unsigned int i;
+
+ /*
+ * The number of retries is the number of attempts - 1, but retry
+ * at least once. It's not clear if a value of 0 is allowed, so
+ * let's do at least one retry.
+ */
+ regmap_update_bits(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_RETRY + offset,
+ 0x70, max(1, attempts - 1) << 4);
+
+ /* blocking, clear cec tx irq status */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INT(1), 0x38, 0x38);
+
+ /* write data */
+ for (i = 0; i < len; i++)
+ regmap_write(adv7511->regmap_cec,
+ i + ADV7511_REG_CEC_TX_FRAME_HDR + offset,
+ msg->msg[i]);
+
+ /* set length (data + header) */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_FRAME_LEN + offset, len);
+ /* start transmit, enable tx */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_TX_ENABLE + offset, 0x01);
+ return 0;
+}
+
+static const struct cec_adap_ops adv7511_cec_adap_ops = {
+ .adap_enable = adv7511_cec_adap_enable,
+ .adap_log_addr = adv7511_cec_adap_log_addr,
+ .adap_transmit = adv7511_cec_adap_transmit,
+};
+
+static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
+{
+ adv7511->cec_clk = devm_clk_get(dev, "cec");
+ if (IS_ERR(adv7511->cec_clk)) {
+ int ret = PTR_ERR(adv7511->cec_clk);
+
+ adv7511->cec_clk = NULL;
+ return ret;
+ }
+ clk_prepare_enable(adv7511->cec_clk);
+ adv7511->cec_clk_freq = clk_get_rate(adv7511->cec_clk);
+ return 0;
+}
+
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
+ unsigned int offset)
+{
+ int ret = adv7511_cec_parse_dt(dev, adv7511);
+
+ if (ret)
+ return ret;
+
+ adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
+ adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
+ if (IS_ERR(adv7511->cec_adap))
+ return PTR_ERR(adv7511->cec_adap);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
+ /* cec soft reset */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_SOFT_RESET + offset, 0x00);
+
+ /* legacy mode */
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_RX_BUFFERS + offset, 0x00);
+
+ regmap_write(adv7511->regmap_cec,
+ ADV7511_REG_CEC_CLK_DIV + offset,
+ ((adv7511->cec_clk_freq / 750000) - 1) << 2);
+
+ ret = cec_register_adapter(adv7511->cec_adap, dev);
+ if (ret) {
+ cec_delete_adapter(adv7511->cec_adap);
+ adv7511->cec_adap = NULL;
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index b2431aee7887..0e14f1572d05 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -11,12 +11,15 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <media/cec.h>
+
#include "adv7511.h"
/* ADI recommended values for proper operation. */
@@ -199,17 +202,14 @@ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
static void adv7511_set_config_csc(struct adv7511 *adv7511,
struct drm_connector *connector,
- bool rgb)
+ bool rgb, bool hdmi_mode)
{
struct adv7511_video_config config;
bool output_format_422, output_format_ycbcr;
unsigned int mode;
uint8_t infoframe[17];
- if (adv7511->edid)
- config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
- else
- config.hdmi_mode = false;
+ config.hdmi_mode = hdmi_mode;
hdmi_avi_infoframe_init(&config.avi_infoframe);
@@ -339,8 +339,10 @@ static void __adv7511_power_on(struct adv7511 *adv7511)
*/
regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD);
- regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
- ADV7511_INT1_DDC_ERROR);
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR,
+ ADV7511_INT1_DDC_ERROR);
}
/*
@@ -376,6 +378,9 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN,
ADV7511_POWER_POWER_DOWN);
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR, 0);
regcache_mark_dirty(adv7511->regmap);
}
@@ -426,6 +431,8 @@ static void adv7511_hpd_work(struct work_struct *work)
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
+ if (status == connector_status_disconnected)
+ cec_phys_addr_invalidate(adv7511->cec_adap);
drm_kms_helper_hotplug_event(adv7511->connector.dev);
}
}
@@ -456,6 +463,10 @@ static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
wake_up_all(&adv7511->wq);
}
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ adv7511_cec_irq_process(adv7511, irq1);
+#endif
+
return 0;
}
@@ -589,15 +600,16 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- kfree(adv7511->edid);
- adv7511->edid = edid;
- if (!edid)
- return 0;
drm_mode_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
+ drm_detect_hdmi_monitor(edid));
+
+ cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
+
+ kfree(edid);
return count;
}
@@ -833,7 +845,11 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
return -ENODEV;
}
- adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ if (adv->i2c_main->irq)
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
ret = drm_connector_init(bridge->dev, &adv->connector,
&adv7511_connector_funcs,
@@ -919,6 +935,65 @@ static void adv7511_uninit_regulators(struct adv7511 *adv)
regulator_bulk_disable(adv->num_supplies, adv->supplies);
}
+static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ if (adv7511->type == ADV7533)
+ reg -= ADV7533_REG_CEC_OFFSET;
+
+ switch (reg) {
+ case ADV7511_REG_CEC_RX_FRAME_HDR:
+ case ADV7511_REG_CEC_RX_FRAME_DATA0...
+ ADV7511_REG_CEC_RX_FRAME_DATA0 + 14:
+ case ADV7511_REG_CEC_RX_FRAME_LEN:
+ case ADV7511_REG_CEC_RX_BUFFERS:
+ case ADV7511_REG_CEC_TX_LOW_DRV_CNT:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config adv7511_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = adv7511_cec_register_volatile,
+};
+
+static int adv7511_init_cec_regmap(struct adv7511 *adv)
+{
+ int ret;
+
+ adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
+ adv->i2c_main->addr - 1);
+ if (!adv->i2c_cec)
+ return -ENOMEM;
+ i2c_set_clientdata(adv->i2c_cec, adv);
+
+ adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
+ &adv7511_cec_regmap_config);
+ if (IS_ERR(adv->regmap_cec)) {
+ ret = PTR_ERR(adv->regmap_cec);
+ goto err;
+ }
+
+ if (adv->type == ADV7533) {
+ ret = adv7533_patch_cec_registers(adv);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ i2c_unregister_device(adv->i2c_cec);
+ return ret;
+}
+
static int adv7511_parse_dt(struct device_node *np,
struct adv7511_link_config *config)
{
@@ -1009,6 +1084,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
struct device *dev = &i2c->dev;
unsigned int main_i2c_addr = i2c->addr << 1;
unsigned int edid_i2c_addr = main_i2c_addr + 4;
+ unsigned int offset;
unsigned int val;
int ret;
@@ -1092,11 +1168,9 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto uninit_regulators;
}
- if (adv7511->type == ADV7533) {
- ret = adv7533_init_cec(adv7511);
- if (ret)
- goto err_i2c_unregister_edid;
- }
+ ret = adv7511_init_cec_regmap(adv7511);
+ if (ret)
+ goto err_i2c_unregister_edid;
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
@@ -1111,10 +1185,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
}
- /* CEC is unused for now */
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
-
adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
@@ -1129,10 +1199,23 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511_audio_init(dev, adv7511);
+ offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
+
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ ret = adv7511_cec_init(dev, adv7511, offset);
+ if (ret)
+ goto err_unregister_cec;
+#else
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+#endif
+
return 0;
err_unregister_cec:
- adv7533_uninit_cec(adv7511);
+ i2c_unregister_device(adv7511->i2c_cec);
+ if (adv7511->cec_clk)
+ clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
@@ -1145,10 +1228,11 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533) {
+ if (adv7511->type == ADV7533)
adv7533_detach_dsi(adv7511);
- adv7533_uninit_cec(adv7511);
- }
+ i2c_unregister_device(adv7511->i2c_cec);
+ if (adv7511->cec_clk)
+ clk_disable_unprepare(adv7511->cec_clk);
adv7511_uninit_regulators(adv7511);
@@ -1156,9 +1240,9 @@ static int adv7511_remove(struct i2c_client *i2c)
adv7511_audio_exit(adv7511);
- i2c_unregister_device(adv7511->i2c_edid);
+ cec_unregister_adapter(adv7511->cec_adap);
- kfree(adv7511->edid);
+ i2c_unregister_device(adv7511->i2c_edid);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index ac804f81e2f6..185b6d842166 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -32,14 +32,6 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
-static const struct regmap_config adv7533_cec_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-
- .max_register = 0xff,
- .cache_type = REGCACHE_RBTREE,
-};
-
static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
@@ -145,37 +137,11 @@ int adv7533_patch_registers(struct adv7511 *adv)
ARRAY_SIZE(adv7533_fixed_registers));
}
-void adv7533_uninit_cec(struct adv7511 *adv)
-{
- i2c_unregister_device(adv->i2c_cec);
-}
-
-int adv7533_init_cec(struct adv7511 *adv)
+int adv7533_patch_cec_registers(struct adv7511 *adv)
{
- int ret;
-
- adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter,
- adv->i2c_main->addr - 1);
- if (!adv->i2c_cec)
- return -ENOMEM;
-
- adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
- &adv7533_cec_regmap_config);
- if (IS_ERR(adv->regmap_cec)) {
- ret = PTR_ERR(adv->regmap_cec);
- goto err;
- }
-
- ret = regmap_register_patch(adv->regmap_cec,
+ return regmap_register_patch(adv->regmap_cec,
adv7533_cec_fixed_registers,
ARRAY_SIZE(adv7533_cec_fixed_registers));
- if (ret)
- goto err;
-
- return 0;
-err:
- adv7533_uninit_cec(adv);
- return ret;
}
int adv7533_attach_dsi(struct adv7511 *adv)
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e0cca19b4044..6d99d4a3beb3 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -188,7 +188,15 @@ EXPORT_SYMBOL(drm_panel_bridge_add);
*/
void drm_panel_bridge_remove(struct drm_bridge *bridge)
{
- struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+ struct panel_bridge *panel_bridge;
+
+ if (!bridge)
+ return;
+
+ if (bridge->funcs != &panel_bridge_bridge_funcs)
+ return;
+
+ panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
devm_kfree(panel_bridge->panel->dev, bridge);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
new file mode 100644
index 000000000000..c77000626c22
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) 2017 Samsung Electronics
+ *
+ * Authors:
+ * Tomasz Stanislawski <t.stanislaws@samsung.com>
+ * Maciej Purski <m.purski@samsung.com>
+ *
+ * Based on sii9234 driver created by:
+ * Adam Hampson <ahampson@sta.samsung.com>
+ * Erik Gilling <konkers@android.com>
+ * Shankar Bandal <shankar.b@samsung.com>
+ * Dharam Kumar <dharam.kr@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program
+ *
+ */
+#include <drm/bridge/mhl.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define CBUS_DEVCAP_OFFSET 0x80
+
+#define SII9234_MHL_VERSION 0x11
+#define SII9234_SCRATCHPAD_SIZE 0x10
+#define SII9234_INT_STAT_SIZE 0x33
+
+#define BIT_TMDS_CCTRL_TMDS_OE BIT(4)
+#define MHL_HPD_OUT_OVR_EN BIT(4)
+#define MHL_HPD_OUT_OVR_VAL BIT(5)
+#define MHL_INIT_TIMEOUT 0x0C
+
+/* MHL Tx registers and bits */
+#define MHL_TX_SRST 0x05
+#define MHL_TX_SYSSTAT_REG 0x09
+#define MHL_TX_INTR1_REG 0x71
+#define MHL_TX_INTR4_REG 0x74
+#define MHL_TX_INTR1_ENABLE_REG 0x75
+#define MHL_TX_INTR4_ENABLE_REG 0x78
+#define MHL_TX_INT_CTRL_REG 0x79
+#define MHL_TX_TMDS_CCTRL 0x80
+#define MHL_TX_DISC_CTRL1_REG 0x90
+#define MHL_TX_DISC_CTRL2_REG 0x91
+#define MHL_TX_DISC_CTRL3_REG 0x92
+#define MHL_TX_DISC_CTRL4_REG 0x93
+#define MHL_TX_DISC_CTRL5_REG 0x94
+#define MHL_TX_DISC_CTRL6_REG 0x95
+#define MHL_TX_DISC_CTRL7_REG 0x96
+#define MHL_TX_DISC_CTRL8_REG 0x97
+#define MHL_TX_STAT2_REG 0x99
+#define MHL_TX_MHLTX_CTL1_REG 0xA0
+#define MHL_TX_MHLTX_CTL2_REG 0xA1
+#define MHL_TX_MHLTX_CTL4_REG 0xA3
+#define MHL_TX_MHLTX_CTL6_REG 0xA5
+#define MHL_TX_MHLTX_CTL7_REG 0xA6
+
+#define RSEN_STATUS BIT(2)
+#define HPD_CHANGE_INT BIT(6)
+#define RSEN_CHANGE_INT BIT(5)
+#define RGND_READY_INT BIT(6)
+#define VBUS_LOW_INT BIT(5)
+#define CBUS_LKOUT_INT BIT(4)
+#define MHL_DISC_FAIL_INT BIT(3)
+#define MHL_EST_INT BIT(2)
+#define HPD_CHANGE_INT_MASK BIT(6)
+#define RSEN_CHANGE_INT_MASK BIT(5)
+
+#define RGND_READY_MASK BIT(6)
+#define CBUS_LKOUT_MASK BIT(4)
+#define MHL_DISC_FAIL_MASK BIT(3)
+#define MHL_EST_MASK BIT(2)
+
+#define SKIP_GND BIT(6)
+
+#define ATT_THRESH_SHIFT 0x04
+#define ATT_THRESH_MASK (0x03 << ATT_THRESH_SHIFT)
+#define USB_D_OEN BIT(3)
+#define DEGLITCH_TIME_MASK 0x07
+#define DEGLITCH_TIME_2MS 0
+#define DEGLITCH_TIME_4MS 1
+#define DEGLITCH_TIME_8MS 2
+#define DEGLITCH_TIME_16MS 3
+#define DEGLITCH_TIME_40MS 4
+#define DEGLITCH_TIME_50MS 5
+#define DEGLITCH_TIME_60MS 6
+#define DEGLITCH_TIME_128MS 7
+
+#define USB_D_OVR BIT(7)
+#define USB_ID_OVR BIT(6)
+#define DVRFLT_SEL BIT(5)
+#define BLOCK_RGND_INT BIT(4)
+#define SKIP_DEG BIT(3)
+#define CI2CA_POL BIT(2)
+#define CI2CA_WKUP BIT(1)
+#define SINGLE_ATT BIT(0)
+
+#define USB_D_ODN BIT(5)
+#define VBUS_CHECK BIT(2)
+#define RGND_INTP_MASK 0x03
+#define RGND_INTP_OPEN 0
+#define RGND_INTP_2K 1
+#define RGND_INTP_1K 2
+#define RGND_INTP_SHORT 3
+
+/* HDMI registers */
+#define HDMI_RX_TMDS0_CCTRL1_REG 0x10
+#define HDMI_RX_TMDS_CLK_EN_REG 0x11
+#define HDMI_RX_TMDS_CH_EN_REG 0x12
+#define HDMI_RX_PLL_CALREFSEL_REG 0x17
+#define HDMI_RX_PLL_VCOCAL_REG 0x1A
+#define HDMI_RX_EQ_DATA0_REG 0x22
+#define HDMI_RX_EQ_DATA1_REG 0x23
+#define HDMI_RX_EQ_DATA2_REG 0x24
+#define HDMI_RX_EQ_DATA3_REG 0x25
+#define HDMI_RX_EQ_DATA4_REG 0x26
+#define HDMI_RX_TMDS_ZONE_CTRL_REG 0x4C
+#define HDMI_RX_TMDS_MODE_CTRL_REG 0x4D
+
+/* CBUS registers */
+#define CBUS_INT_STATUS_1_REG 0x08
+#define CBUS_INTR1_ENABLE_REG 0x09
+#define CBUS_MSC_REQ_ABORT_REASON_REG 0x0D
+#define CBUS_INT_STATUS_2_REG 0x1E
+#define CBUS_INTR2_ENABLE_REG 0x1F
+#define CBUS_LINK_CONTROL_2_REG 0x31
+#define CBUS_MHL_STATUS_REG_0 0xB0
+#define CBUS_MHL_STATUS_REG_1 0xB1
+
+#define BIT_CBUS_RESET BIT(3)
+#define SET_HPD_DOWNSTREAM BIT(6)
+
+/* TPI registers */
+#define TPI_DPD_REG 0x3D
+
+/* Timeouts in msec */
+#define T_SRC_VBUS_CBUS_TO_STABLE 200
+#define T_SRC_CBUS_FLOAT 100
+#define T_SRC_CBUS_DEGLITCH 2
+#define T_SRC_RXSENSE_DEGLITCH 110
+
+#define MHL1_MAX_CLK 75000 /* in kHz */
+
+#define I2C_TPI_ADDR 0x3D
+#define I2C_HDMI_ADDR 0x49
+#define I2C_CBUS_ADDR 0x64
+
+enum sii9234_state {
+ ST_OFF,
+ ST_D3,
+ ST_RGND_INIT,
+ ST_RGND_1K,
+ ST_RSEN_HIGH,
+ ST_MHL_ESTABLISHED,
+ ST_FAILURE_DISCOVERY,
+ ST_FAILURE,
+};
+
+struct sii9234 {
+ struct i2c_client *client[4];
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct gpio_desc *gpio_reset;
+ int i2c_error;
+ struct regulator_bulk_data supplies[4];
+
+ struct mutex lock; /* Protects fields below and device registers */
+ enum sii9234_state state;
+};
+
+enum sii9234_client_id {
+ I2C_MHL,
+ I2C_TPI,
+ I2C_HDMI,
+ I2C_CBUS,
+};
+
+static const char * const sii9234_client_name[] = {
+ [I2C_MHL] = "MHL",
+ [I2C_TPI] = "TPI",
+ [I2C_HDMI] = "HDMI",
+ [I2C_CBUS] = "CBUS",
+};
+
+static int sii9234_writeb(struct sii9234 *ctx, int id, int offset,
+ int value)
+{
+ int ret;
+ struct i2c_client *client = ctx->client[id];
+
+ if (ctx->i2c_error)
+ return ctx->i2c_error;
+
+ ret = i2c_smbus_write_byte_data(client, offset, value);
+ if (ret < 0)
+ dev_err(ctx->dev, "writeb: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+
+ return ret;
+}
+
+static int sii9234_writebm(struct sii9234 *ctx, int id, int offset,
+ int value, int mask)
+{
+ int ret;
+ struct i2c_client *client = ctx->client[id];
+
+ if (ctx->i2c_error)
+ return ctx->i2c_error;
+
+ ret = i2c_smbus_write_byte(client, offset);
+ if (ret < 0) {
+ dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+ return ret;
+ }
+
+ value = (value & mask) | (ret & ~mask);
+
+ ret = i2c_smbus_write_byte_data(client, offset, value);
+ if (ret < 0) {
+ dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n",
+ sii9234_client_name[id], offset, value);
+ ctx->i2c_error = ret;
+ }
+
+ return ret;
+}
+
+static int sii9234_readb(struct sii9234 *ctx, int id, int offset)
+{
+ int ret;
+ struct i2c_client *client = ctx->client[id];
+
+ if (ctx->i2c_error)
+ return ctx->i2c_error;
+
+ ret = i2c_smbus_write_byte(client, offset);
+ if (ret < 0) {
+ dev_err(ctx->dev, "readb: %4s[0x%02x]\n",
+ sii9234_client_name[id], offset);
+ ctx->i2c_error = ret;
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_err(ctx->dev, "readb: %4s[0x%02x]\n",
+ sii9234_client_name[id], offset);
+ ctx->i2c_error = ret;
+ }
+
+ return ret;
+}
+
+static int sii9234_clear_error(struct sii9234 *ctx)
+{
+ int ret = ctx->i2c_error;
+
+ ctx->i2c_error = 0;
+
+ return ret;
+}
+
+#define mhl_tx_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_MHL, offset, value)
+#define mhl_tx_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_MHL, offset, value, mask)
+#define mhl_tx_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_MHL, offset)
+#define cbus_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_CBUS, offset, value)
+#define cbus_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_CBUS, offset, value, mask)
+#define cbus_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_CBUS, offset)
+#define hdmi_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_HDMI, offset, value)
+#define hdmi_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_HDMI, offset, value, mask)
+#define hdmi_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_HDMI, offset)
+#define tpi_writeb(sii9234, offset, value) \
+ sii9234_writeb(sii9234, I2C_TPI, offset, value)
+#define tpi_writebm(sii9234, offset, value, mask) \
+ sii9234_writebm(sii9234, I2C_TPI, offset, value, mask)
+#define tpi_readb(sii9234, offset) \
+ sii9234_readb(sii9234, I2C_TPI, offset)
+
+static u8 sii9234_tmds_control(struct sii9234 *ctx, bool enable)
+{
+ mhl_tx_writebm(ctx, MHL_TX_TMDS_CCTRL, enable ? ~0 : 0,
+ BIT_TMDS_CCTRL_TMDS_OE);
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, enable ? ~0 : 0,
+ MHL_HPD_OUT_OVR_EN | MHL_HPD_OUT_OVR_VAL);
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_cbus_reset(struct sii9234 *ctx)
+{
+ int i;
+
+ mhl_tx_writebm(ctx, MHL_TX_SRST, ~0, BIT_CBUS_RESET);
+ msleep(T_SRC_CBUS_DEGLITCH);
+ mhl_tx_writebm(ctx, MHL_TX_SRST, 0, BIT_CBUS_RESET);
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * Enable WRITE_STAT interrupt for writes to all
+ * 4 MSC Status registers.
+ */
+ cbus_writeb(ctx, 0xE0 + i, 0xF2);
+ /*
+ * Enable SET_INT interrupt for writes to all
+ * 4 MSC Interrupt registers.
+ */
+ cbus_writeb(ctx, 0xF0 + i, 0xF2);
+ }
+
+ return sii9234_clear_error(ctx);
+}
+
+/* Require to chek mhl imformation of samsung in cbus_init_register */
+static int sii9234_cbus_init(struct sii9234 *ctx)
+{
+ cbus_writeb(ctx, 0x07, 0xF2);
+ cbus_writeb(ctx, 0x40, 0x03);
+ cbus_writeb(ctx, 0x42, 0x06);
+ cbus_writeb(ctx, 0x36, 0x0C);
+ cbus_writeb(ctx, 0x3D, 0xFD);
+ cbus_writeb(ctx, 0x1C, 0x01);
+ cbus_writeb(ctx, 0x1D, 0x0F);
+ cbus_writeb(ctx, 0x44, 0x02);
+ /* Setup our devcap */
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEV_STATE, 0x00);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_MHL_VERSION,
+ SII9234_MHL_VERSION);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_CAT,
+ MHL_DCAP_CAT_SOURCE);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_H, 0x01);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_L, 0x41);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VID_LINK_MODE,
+ MHL_DCAP_VID_LINK_RGB444 | MHL_DCAP_VID_LINK_YCBCR444);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VIDEO_TYPE,
+ MHL_DCAP_VT_GRAPHICS);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_LOG_DEV_MAP,
+ MHL_DCAP_LD_GUI);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_BANDWIDTH, 0x0F);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_FEATURE_FLAG,
+ MHL_DCAP_FEATURE_RCP_SUPPORT | MHL_DCAP_FEATURE_RAP_SUPPORT
+ | MHL_DCAP_FEATURE_SP_SUPPORT);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_H, 0x0);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_L, 0x0);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_SCRATCHPAD_SIZE,
+ SII9234_SCRATCHPAD_SIZE);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_INT_STAT_SIZE,
+ SII9234_INT_STAT_SIZE);
+ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_RESERVED, 0);
+ cbus_writebm(ctx, 0x31, 0x0C, 0x0C);
+ cbus_writeb(ctx, 0x30, 0x01);
+ cbus_writebm(ctx, 0x3C, 0x30, 0x38);
+ cbus_writebm(ctx, 0x22, 0x0D, 0x0F);
+ cbus_writebm(ctx, 0x2E, 0x15, 0x15);
+ cbus_writeb(ctx, CBUS_INTR1_ENABLE_REG, 0);
+ cbus_writeb(ctx, CBUS_INTR2_ENABLE_REG, 0);
+
+ return sii9234_clear_error(ctx);
+}
+
+static void force_usb_id_switch_open(struct sii9234 *ctx)
+{
+ /* Disable CBUS discovery */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0, 0x01);
+ /* Force USB ID switch to open */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86);
+ /* Force upstream HPD to 0 when not in MHL mode. */
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x30);
+}
+
+static void release_usb_id_switch_open(struct sii9234 *ctx)
+{
+ msleep(T_SRC_CBUS_FLOAT);
+ /* Clear USB ID switch to open */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR);
+ /* Enable CBUS discovery */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 0x01);
+}
+
+static int sii9234_power_init(struct sii9234 *ctx)
+{
+ /* Force the SiI9234 into the D0 state. */
+ tpi_writeb(ctx, TPI_DPD_REG, 0x3F);
+ /* Enable TxPLL Clock */
+ hdmi_writeb(ctx, HDMI_RX_TMDS_CLK_EN_REG, 0x01);
+ /* Enable Tx Clock Path & Equalizer */
+ hdmi_writeb(ctx, HDMI_RX_TMDS_CH_EN_REG, 0x15);
+ /* Power Up TMDS */
+ mhl_tx_writeb(ctx, 0x08, 0x35);
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_hdmi_init(struct sii9234 *ctx)
+{
+ hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1);
+ hdmi_writeb(ctx, HDMI_RX_PLL_CALREFSEL_REG, 0x03);
+ hdmi_writeb(ctx, HDMI_RX_PLL_VCOCAL_REG, 0x20);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA0_REG, 0x8A);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA1_REG, 0x6A);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA2_REG, 0xAA);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA3_REG, 0xCA);
+ hdmi_writeb(ctx, HDMI_RX_EQ_DATA4_REG, 0xEA);
+ hdmi_writeb(ctx, HDMI_RX_TMDS_ZONE_CTRL_REG, 0xA0);
+ hdmi_writeb(ctx, HDMI_RX_TMDS_MODE_CTRL_REG, 0x00);
+ mhl_tx_writeb(ctx, MHL_TX_TMDS_CCTRL, 0x34);
+ hdmi_writeb(ctx, 0x45, 0x44);
+ hdmi_writeb(ctx, 0x31, 0x0A);
+ hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1);
+
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_mhl_tx_ctl_int(struct sii9234 *ctx)
+{
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0xD0);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL2_REG, 0xFC);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL4_REG, 0xEB);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL7_REG, 0x0C);
+
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_reset(struct sii9234 *ctx)
+{
+ int ret;
+
+ sii9234_clear_error(ctx);
+
+ ret = sii9234_power_init(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_cbus_reset(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_hdmi_init(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_mhl_tx_ctl_int(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* Enable HDCP Compliance safety */
+ mhl_tx_writeb(ctx, 0x2B, 0x01);
+ /* CBUS discovery cycle time for each drive and float = 150us */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0x04, 0x06);
+ /* Clear bit 6 (reg_skip_rgnd) */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL2_REG, (1 << 7) /* Reserved */
+ | 2 << ATT_THRESH_SHIFT | DEGLITCH_TIME_50MS);
+ /*
+ * Changed from 66 to 65 for 94[1:0] = 01 = 5k reg_cbusmhl_pup_sel
+ * 1.8V CBUS VTH & GND threshold
+ * to meet CTS 3.3.7.2 spec
+ */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77);
+ cbus_writebm(ctx, CBUS_LINK_CONTROL_2_REG, ~0, MHL_INIT_TIMEOUT);
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL6_REG, 0xA0);
+ /* RGND & single discovery attempt (RGND blocking) */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL6_REG, BLOCK_RGND_INT |
+ DVRFLT_SEL | SINGLE_ATT);
+ /* Use VBUS path of discovery state machine */
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL8_REG, 0);
+ /* 0x92[3] sets the CBUS / ID switch */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR);
+ /*
+ * To allow RGND engine to operate correctly.
+ * When moving the chip from D2 to D0 (power up, init regs)
+ * the values should be
+ * 94[1:0] = 01 reg_cbusmhl_pup_sel[1:0] should be set for 5k
+ * 93[7:6] = 10 reg_cbusdisc_pup_sel[1:0] should be
+ * set for 10k (default)
+ * 93[5:4] = 00 reg_cbusidle_pup_sel[1:0] = open (default)
+ */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86);
+ /*
+ * Change from CC to 8C to match 5K
+ * to meet CTS 3.3.72 spec
+ */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C);
+ /* Configure the interrupt as active high */
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x06);
+
+ msleep(25);
+
+ /* Release usb_id switch */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR);
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL1_REG, 0x27);
+
+ ret = sii9234_clear_error(ctx);
+ if (ret < 0)
+ return ret;
+ ret = sii9234_cbus_init(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto soft reset on SCDT = 0 */
+ mhl_tx_writeb(ctx, 0x05, 0x04);
+ /* HDMI Transcode mode enable */
+ mhl_tx_writeb(ctx, 0x0D, 0x1C);
+ mhl_tx_writeb(ctx, MHL_TX_INTR4_ENABLE_REG,
+ RGND_READY_MASK | CBUS_LKOUT_MASK
+ | MHL_DISC_FAIL_MASK | MHL_EST_MASK);
+ mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG, 0x60);
+
+ /* This point is very important before measure RGND impedance */
+ force_usb_id_switch_open(ctx);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, 0, 0xF0);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL5_REG, 0, 0x03);
+ release_usb_id_switch_open(ctx);
+
+ /* Force upstream HPD to 0 when not in MHL mode */
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 1 << 5);
+ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, ~0, 1 << 4);
+
+ return sii9234_clear_error(ctx);
+}
+
+static int sii9234_goto_d3(struct sii9234 *ctx)
+{
+ int ret;
+
+ dev_dbg(ctx->dev, "sii9234: detection started d3\n");
+
+ ret = sii9234_reset(ctx);
+ if (ret < 0)
+ goto exit;
+
+ hdmi_writeb(ctx, 0x01, 0x03);
+ tpi_writebm(ctx, TPI_DPD_REG, 0, 1);
+ /* I2C above is expected to fail because power goes down */
+ sii9234_clear_error(ctx);
+
+ ctx->state = ST_D3;
+
+ return 0;
+ exit:
+ dev_err(ctx->dev, "%s failed\n", __func__);
+ return -1;
+}
+
+static int sii9234_hw_on(struct sii9234 *ctx)
+{
+ return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii9234_hw_off(struct sii9234 *ctx)
+{
+ gpiod_set_value(ctx->gpio_reset, 1);
+ msleep(20);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static void sii9234_hw_reset(struct sii9234 *ctx)
+{
+ gpiod_set_value(ctx->gpio_reset, 1);
+ msleep(20);
+ gpiod_set_value(ctx->gpio_reset, 0);
+}
+
+static void sii9234_cable_in(struct sii9234 *ctx)
+{
+ int ret;
+
+ mutex_lock(&ctx->lock);
+ if (ctx->state != ST_OFF)
+ goto unlock;
+ ret = sii9234_hw_on(ctx);
+ if (ret < 0)
+ goto unlock;
+
+ sii9234_hw_reset(ctx);
+ sii9234_goto_d3(ctx);
+ /* To avoid irq storm, when hw is in meta state */
+ enable_irq(to_i2c_client(ctx->dev)->irq);
+
+unlock:
+ mutex_unlock(&ctx->lock);
+}
+
+static void sii9234_cable_out(struct sii9234 *ctx)
+{
+ mutex_lock(&ctx->lock);
+
+ if (ctx->state == ST_OFF)
+ goto unlock;
+
+ disable_irq(to_i2c_client(ctx->dev)->irq);
+ tpi_writeb(ctx, TPI_DPD_REG, 0);
+ /* Turn on&off hpd festure for only QCT HDMI */
+ sii9234_hw_off(ctx);
+
+ ctx->state = ST_OFF;
+
+unlock:
+ mutex_unlock(&ctx->lock);
+}
+
+static enum sii9234_state sii9234_rgnd_ready_irq(struct sii9234 *ctx)
+{
+ int value;
+
+ if (ctx->state == ST_D3) {
+ int ret;
+
+ dev_dbg(ctx->dev, "RGND_READY_INT\n");
+ sii9234_hw_reset(ctx);
+
+ ret = sii9234_reset(ctx);
+ if (ret < 0) {
+ dev_err(ctx->dev, "sii9234_reset() failed\n");
+ return ST_FAILURE;
+ }
+
+ return ST_RGND_INIT;
+ }
+
+ /* Got interrupt in inappropriate state */
+ if (ctx->state != ST_RGND_INIT)
+ return ST_FAILURE;
+
+ value = mhl_tx_readb(ctx, MHL_TX_STAT2_REG);
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ if ((value & RGND_INTP_MASK) != RGND_INTP_1K) {
+ dev_warn(ctx->dev, "RGND is not 1k\n");
+ return ST_RGND_INIT;
+ }
+ dev_dbg(ctx->dev, "RGND 1K!!\n");
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C);
+ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77);
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, 0x05);
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ msleep(T_SRC_VBUS_CBUS_TO_STABLE);
+ return ST_RGND_1K;
+}
+
+static enum sii9234_state sii9234_mhl_established(struct sii9234 *ctx)
+{
+ dev_dbg(ctx->dev, "mhl est interrupt\n");
+
+ /* Discovery override */
+ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0x10);
+ /* Increase DDC translation layer timer (byte mode) */
+ cbus_writeb(ctx, 0x07, 0x32);
+ cbus_writebm(ctx, 0x44, ~0, 1 << 1);
+ /* Keep the discovery enabled. Need RGND interrupt */
+ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 1);
+ mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG,
+ RSEN_CHANGE_INT_MASK | HPD_CHANGE_INT_MASK);
+
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ return ST_MHL_ESTABLISHED;
+}
+
+static enum sii9234_state sii9234_hpd_change(struct sii9234 *ctx)
+{
+ int value;
+
+ value = cbus_readb(ctx, CBUS_MSC_REQ_ABORT_REASON_REG);
+ if (sii9234_clear_error(ctx))
+ return ST_FAILURE;
+
+ if (value & SET_HPD_DOWNSTREAM) {
+ /* Downstream HPD High, Enable TMDS */
+ sii9234_tmds_control(ctx, true);
+ } else {
+ /* Downstream HPD Low, Disable TMDS */
+ sii9234_tmds_control(ctx, false);
+ }
+
+ return ctx->state;
+}
+
+static enum sii9234_state sii9234_rsen_change(struct sii9234 *ctx)
+{
+ int value;
+
+ /* Work_around code to handle wrong interrupt */
+ if (ctx->state != ST_RGND_1K) {
+ dev_err(ctx->dev, "RSEN_HIGH without RGND_1K\n");
+ return ST_FAILURE;
+ }
+ value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG);
+ if (value < 0)
+ return ST_FAILURE;
+
+ if (value & RSEN_STATUS) {
+ dev_dbg(ctx->dev, "MHL cable connected.. RSEN High\n");
+ return ST_RSEN_HIGH;
+ }
+ dev_dbg(ctx->dev, "RSEN lost\n");
+ /*
+ * Once RSEN loss is confirmed,we need to check
+ * based on cable status and chip power status,whether
+ * it is SINK Loss(HDMI cable not connected, TV Off)
+ * or MHL cable disconnection
+ * TODO: Define the below mhl_disconnection()
+ */
+ msleep(T_SRC_RXSENSE_DEGLITCH);
+ value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG);
+ if (value < 0)
+ return ST_FAILURE;
+ dev_dbg(ctx->dev, "sys_stat: %x\n", value);
+
+ if (value & RSEN_STATUS) {
+ dev_dbg(ctx->dev, "RSEN recovery\n");
+ return ST_RSEN_HIGH;
+ }
+ dev_dbg(ctx->dev, "RSEN Really LOW\n");
+ /* To meet CTS 3.3.22.2 spec */
+ sii9234_tmds_control(ctx, false);
+ force_usb_id_switch_open(ctx);
+ release_usb_id_switch_open(ctx);
+
+ return ST_FAILURE;
+}
+
+static irqreturn_t sii9234_irq_thread(int irq, void *data)
+{
+ struct sii9234 *ctx = data;
+ int intr1, intr4;
+ int intr1_en, intr4_en;
+ int cbus_intr1, cbus_intr2;
+
+ dev_dbg(ctx->dev, "%s\n", __func__);
+
+ mutex_lock(&ctx->lock);
+
+ intr1 = mhl_tx_readb(ctx, MHL_TX_INTR1_REG);
+ intr4 = mhl_tx_readb(ctx, MHL_TX_INTR4_REG);
+ intr1_en = mhl_tx_readb(ctx, MHL_TX_INTR1_ENABLE_REG);
+ intr4_en = mhl_tx_readb(ctx, MHL_TX_INTR4_ENABLE_REG);
+ cbus_intr1 = cbus_readb(ctx, CBUS_INT_STATUS_1_REG);
+ cbus_intr2 = cbus_readb(ctx, CBUS_INT_STATUS_2_REG);
+
+ if (sii9234_clear_error(ctx))
+ goto done;
+
+ dev_dbg(ctx->dev, "irq %02x/%02x %02x/%02x %02x/%02x\n",
+ intr1, intr1_en, intr4, intr4_en, cbus_intr1, cbus_intr2);
+
+ if (intr4 & RGND_READY_INT)
+ ctx->state = sii9234_rgnd_ready_irq(ctx);
+ if (intr1 & RSEN_CHANGE_INT)
+ ctx->state = sii9234_rsen_change(ctx);
+ if (intr4 & MHL_EST_INT)
+ ctx->state = sii9234_mhl_established(ctx);
+ if (intr1 & HPD_CHANGE_INT)
+ ctx->state = sii9234_hpd_change(ctx);
+ if (intr4 & CBUS_LKOUT_INT)
+ ctx->state = ST_FAILURE;
+ if (intr4 & MHL_DISC_FAIL_INT)
+ ctx->state = ST_FAILURE_DISCOVERY;
+
+ done:
+ /* Clean interrupt status and pending flags */
+ mhl_tx_writeb(ctx, MHL_TX_INTR1_REG, intr1);
+ mhl_tx_writeb(ctx, MHL_TX_INTR4_REG, intr4);
+ cbus_writeb(ctx, CBUS_MHL_STATUS_REG_0, 0xFF);
+ cbus_writeb(ctx, CBUS_MHL_STATUS_REG_1, 0xFF);
+ cbus_writeb(ctx, CBUS_INT_STATUS_1_REG, cbus_intr1);
+ cbus_writeb(ctx, CBUS_INT_STATUS_2_REG, cbus_intr2);
+
+ sii9234_clear_error(ctx);
+
+ if (ctx->state == ST_FAILURE) {
+ dev_dbg(ctx->dev, "try to reset after failure\n");
+ sii9234_hw_reset(ctx);
+ sii9234_goto_d3(ctx);
+ }
+
+ if (ctx->state == ST_FAILURE_DISCOVERY) {
+ dev_err(ctx->dev, "discovery failed, no power for MHL?\n");
+ tpi_writebm(ctx, TPI_DPD_REG, 0, 1);
+ ctx->state = ST_D3;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sii9234_init_resources(struct sii9234 *ctx,
+ struct i2c_client *client)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ int ret;
+
+ if (!ctx->dev->of_node) {
+ dev_err(ctx->dev, "not DT device\n");
+ return -ENODEV;
+ }
+
+ ctx->gpio_reset = devm_gpiod_get(ctx->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(ctx->dev, "failed to get reset gpio from DT\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ctx->supplies[0].supply = "avcc12";
+ ctx->supplies[1].supply = "avcc33";
+ ctx->supplies[2].supply = "iovcc18";
+ ctx->supplies[3].supply = "cvcc12";
+ ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
+ if (ret) {
+ dev_err(ctx->dev, "regulator_bulk failed\n");
+ return ret;
+ }
+
+ ctx->client[I2C_MHL] = client;
+
+ ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
+ if (!ctx->client[I2C_TPI]) {
+ dev_err(ctx->dev, "failed to create TPI client\n");
+ return -ENODEV;
+ }
+
+ ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
+ if (!ctx->client[I2C_HDMI]) {
+ dev_err(ctx->dev, "failed to create HDMI RX client\n");
+ goto fail_tpi;
+ }
+
+ ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
+ if (!ctx->client[I2C_CBUS]) {
+ dev_err(ctx->dev, "failed to create CBUS client\n");
+ goto fail_hdmi;
+ }
+
+ return 0;
+
+fail_hdmi:
+ i2c_unregister_device(ctx->client[I2C_HDMI]);
+fail_tpi:
+ i2c_unregister_device(ctx->client[I2C_TPI]);
+
+ return -ENODEV;
+}
+
+static void sii9234_deinit_resources(struct sii9234 *ctx)
+{
+ i2c_unregister_device(ctx->client[I2C_CBUS]);
+ i2c_unregister_device(ctx->client[I2C_HDMI]);
+ i2c_unregister_device(ctx->client[I2C_TPI]);
+}
+
+static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii9234, bridge);
+}
+
+static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > MHL1_MAX_CLK)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static const struct drm_bridge_funcs sii9234_bridge_funcs = {
+ .mode_valid = sii9234_mode_valid,
+};
+
+static int sii9234_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct sii9234 *ctx;
+ struct device *dev = &client->dev;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ mutex_init(&ctx->lock);
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(dev, "I2C adapter lacks SMBUS feature\n");
+ return -EIO;
+ }
+
+ if (!client->irq) {
+ dev_err(dev, "no irq provided\n");
+ return -EINVAL;
+ }
+
+ irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii9234_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "sii9234", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to install IRQ handler\n");
+ return ret;
+ }
+
+ ret = sii9234_init_resources(ctx, client);
+ if (ret < 0)
+ return ret;
+
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &sii9234_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ sii9234_cable_in(ctx);
+
+ return 0;
+}
+
+static int sii9234_remove(struct i2c_client *client)
+{
+ struct sii9234 *ctx = i2c_get_clientdata(client);
+
+ sii9234_cable_out(ctx);
+ drm_bridge_remove(&ctx->bridge);
+ sii9234_deinit_resources(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id sii9234_dt_match[] = {
+ { .compatible = "sil,sii9234" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sii9234_dt_match);
+
+static const struct i2c_device_id sii9234_id[] = {
+ { "SII9234", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sii9234_id);
+
+static struct i2c_driver sii9234_driver = {
+ .driver = {
+ .name = "sii9234",
+ .of_match_table = sii9234_dt_match,
+ },
+ .probe = sii9234_probe,
+ .remove = sii9234_remove,
+ .id_table = sii9234_id,
+};
+
+module_i2c_driver(sii9234_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 5131bfb94f06..b7eb704d0a8a 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -28,6 +28,8 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <media/rc-core.h>
+
#include "sil-sii8620.h"
#define SII8620_BURST_BUF_LEN 288
@@ -58,6 +60,7 @@ enum sii8620_mt_state {
struct sii8620 {
struct drm_bridge bridge;
struct device *dev;
+ struct rc_dev *rc_dev;
struct clk *clk_xtal;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_int;
@@ -431,6 +434,16 @@ static void sii8620_mt_rap(struct sii8620 *ctx, u8 code)
sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code);
}
+static void sii8620_mt_rcpk(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPK, code);
+}
+
+static void sii8620_mt_rcpe(struct sii8620 *ctx, u8 code)
+{
+ sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPE, code);
+}
+
static void sii8620_mt_read_devcap_send(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
@@ -1753,6 +1766,25 @@ static void sii8620_send_features(struct sii8620 *ctx)
sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf));
}
+static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
+{
+ bool pressed = !(scancode & MHL_RCP_KEY_RELEASED_MASK);
+
+ scancode &= MHL_RCP_KEY_ID_MASK;
+
+ if (!ctx->rc_dev) {
+ dev_dbg(ctx->dev, "RCP input device not initialized\n");
+ return false;
+ }
+
+ if (pressed)
+ rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
+ else
+ rc_keyup(ctx->rc_dev);
+
+ return true;
+}
+
static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
{
u8 ints[MHL_INT_SIZE];
@@ -1804,19 +1836,25 @@ static void sii8620_msc_mt_done(struct sii8620 *ctx)
static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx)
{
- struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx);
+ struct sii8620_mt_msg *msg;
u8 buf[2];
- if (!msg)
- return;
-
sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2);
switch (buf[0]) {
case MHL_MSC_MSG_RAPK:
+ msg = sii8620_msc_msg_first(ctx);
+ if (!msg)
+ return;
msg->ret = buf[1];
ctx->mt_state = MT_STATE_DONE;
break;
+ case MHL_MSC_MSG_RCP:
+ if (!sii8620_rcp_consume(ctx, buf[1]))
+ sii8620_mt_rcpe(ctx,
+ MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE);
+ sii8620_mt_rcpk(ctx, buf[1]);
+ break;
default:
dev_err(ctx->dev, "%s message type %d,%d not supported",
__func__, buf[0], buf[1]);
@@ -2102,11 +2140,57 @@ static void sii8620_cable_in(struct sii8620 *ctx)
enable_irq(to_i2c_client(ctx->dev)->irq);
}
+static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
+{
+ struct rc_dev *rc_dev;
+ int ret;
+
+ rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
+ if (!rc_dev) {
+ dev_err(ctx->dev, "Failed to allocate RC device\n");
+ ctx->error = -ENOMEM;
+ return;
+ }
+
+ rc_dev->input_phys = "sii8620/input0";
+ rc_dev->input_id.bustype = BUS_VIRTUAL;
+ rc_dev->map_name = RC_MAP_CEC;
+ rc_dev->allowed_protocols = RC_PROTO_BIT_CEC;
+ rc_dev->driver_name = "sii8620";
+ rc_dev->device_name = "sii8620";
+
+ ret = rc_register_device(rc_dev);
+
+ if (ret) {
+ dev_err(ctx->dev, "Failed to register RC device\n");
+ ctx->error = ret;
+ rc_free_device(ctx->rc_dev);
+ return;
+ }
+ ctx->rc_dev = rc_dev;
+}
+
static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
{
return container_of(bridge, struct sii8620, bridge);
}
+static int sii8620_attach(struct drm_bridge *bridge)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+
+ sii8620_init_rcp_input_dev(ctx);
+
+ return sii8620_clear_error(ctx);
+}
+
+static void sii8620_detach(struct drm_bridge *bridge)
+{
+ struct sii8620 *ctx = bridge_to_sii8620(bridge);
+
+ rc_unregister_device(ctx->rc_dev);
+}
+
static bool sii8620_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -2151,6 +2235,8 @@ end:
}
static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+ .attach = sii8620_attach,
+ .detach = sii8620_detach,
.mode_fixup = sii8620_mode_fixup,
};
@@ -2217,8 +2303,8 @@ static int sii8620_remove(struct i2c_client *client)
struct sii8620 *ctx = i2c_get_clientdata(client);
disable_irq(to_i2c_client(ctx->dev)->irq);
- drm_bridge_remove(&ctx->bridge);
sii8620_hw_off(ctx);
+ drm_bridge_remove(&ctx->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 63c7a01b7053..d9cca4fd66ec 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -30,19 +30,20 @@
#include <video/mipi_display.h>
#define DSI_VERSION 0x00
+
#define DSI_PWR_UP 0x04
#define RESET 0
#define POWERUP BIT(0)
#define DSI_CLKMGR_CFG 0x08
-#define TO_CLK_DIVIDSION(div) (((div) & 0xff) << 8)
-#define TX_ESC_CLK_DIVIDSION(div) (((div) & 0xff) << 0)
+#define TO_CLK_DIVISION(div) (((div) & 0xff) << 8)
+#define TX_ESC_CLK_DIVISION(div) ((div) & 0xff)
#define DSI_DPI_VCID 0x0c
-#define DPI_VID(vid) (((vid) & 0x3) << 0)
+#define DPI_VCID(vcid) ((vcid) & 0x3)
#define DSI_DPI_COLOR_CODING 0x10
-#define EN18_LOOSELY BIT(8)
+#define LOOSELY18_EN BIT(8)
#define DPI_COLOR_CODING_16BIT_1 0x0
#define DPI_COLOR_CODING_16BIT_2 0x1
#define DPI_COLOR_CODING_16BIT_3 0x2
@@ -61,22 +62,25 @@
#define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16)
#define INVACT_LPCMD_TIME(p) ((p) & 0xff)
+#define DSI_DBI_VCID 0x1c
#define DSI_DBI_CFG 0x20
+#define DSI_DBI_PARTITIONING_EN 0x24
#define DSI_DBI_CMDSIZE 0x28
#define DSI_PCKHDL_CFG 0x2c
-#define EN_CRC_RX BIT(4)
-#define EN_ECC_RX BIT(3)
-#define EN_BTA BIT(2)
-#define EN_EOTP_RX BIT(1)
-#define EN_EOTP_TX BIT(0)
+#define CRC_RX_EN BIT(4)
+#define ECC_RX_EN BIT(3)
+#define BTA_EN BIT(2)
+#define EOTP_RX_EN BIT(1)
+#define EOTP_TX_EN BIT(0)
+
+#define DSI_GEN_VCID 0x30
#define DSI_MODE_CFG 0x34
#define ENABLE_VIDEO_MODE 0
#define ENABLE_CMD_MODE BIT(0)
#define DSI_VID_MODE_CFG 0x38
-#define FRAME_BTA_ACK BIT(14)
#define ENABLE_LOW_POWER (0x3f << 8)
#define ENABLE_LOW_POWER_MASK (0x3f << 8)
#define VID_MODE_TYPE_NON_BURST_SYNC_PULSES 0x0
@@ -85,8 +89,13 @@
#define VID_MODE_TYPE_MASK 0x3
#define DSI_VID_PKT_SIZE 0x3c
-#define VID_PKT_SIZE(p) (((p) & 0x3fff) << 0)
-#define VID_PKT_MAX_SIZE 0x3fff
+#define VID_PKT_SIZE(p) ((p) & 0x3fff)
+
+#define DSI_VID_NUM_CHUNKS 0x40
+#define VID_NUM_CHUNKS(c) ((c) & 0x1fff)
+
+#define DSI_VID_NULL_SIZE 0x44
+#define VID_NULL_SIZE(b) ((b) & 0x1fff)
#define DSI_VID_HSA_TIME 0x48
#define DSI_VID_HBP_TIME 0x4c
@@ -95,6 +104,8 @@
#define DSI_VID_VBP_LINES 0x58
#define DSI_VID_VFP_LINES 0x5c
#define DSI_VID_VACTIVE_LINES 0x60
+#define DSI_EDPI_CMD_SIZE 0x64
+
#define DSI_CMD_MODE_CFG 0x68
#define MAX_RD_PKT_SIZE_LP BIT(24)
#define DCS_LW_TX_LP BIT(19)
@@ -108,8 +119,8 @@
#define GEN_SW_2P_TX_LP BIT(10)
#define GEN_SW_1P_TX_LP BIT(9)
#define GEN_SW_0P_TX_LP BIT(8)
-#define EN_ACK_RQST BIT(1)
-#define EN_TEAR_FX BIT(0)
+#define ACK_RQST_EN BIT(1)
+#define TEAR_FX_EN BIT(0)
#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
DCS_LW_TX_LP | \
@@ -125,27 +136,31 @@
GEN_SW_0P_TX_LP)
#define DSI_GEN_HDR 0x6c
+/* TODO These 2 defines will be reworked thanks to mipi_dsi_create_packet() */
#define GEN_HDATA(data) (((data) & 0xffff) << 8)
-#define GEN_HDATA_MASK (0xffff << 8)
#define GEN_HTYPE(type) (((type) & 0xff) << 0)
-#define GEN_HTYPE_MASK 0xff
#define DSI_GEN_PLD_DATA 0x70
#define DSI_CMD_PKT_STATUS 0x74
-#define GEN_CMD_EMPTY BIT(0)
-#define GEN_CMD_FULL BIT(1)
-#define GEN_PLD_W_EMPTY BIT(2)
-#define GEN_PLD_W_FULL BIT(3)
-#define GEN_PLD_R_EMPTY BIT(4)
-#define GEN_PLD_R_FULL BIT(5)
#define GEN_RD_CMD_BUSY BIT(6)
+#define GEN_PLD_R_FULL BIT(5)
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_CMD_EMPTY BIT(0)
#define DSI_TO_CNT_CFG 0x78
#define HSTX_TO_CNT(p) (((p) & 0xffff) << 16)
#define LPRX_TO_CNT(p) ((p) & 0xffff)
+#define DSI_HS_RD_TO_CNT 0x7c
+#define DSI_LP_RD_TO_CNT 0x80
+#define DSI_HS_WR_TO_CNT 0x84
+#define DSI_LP_WR_TO_CNT 0x88
#define DSI_BTA_TO_CNT 0x8c
+
#define DSI_LPCLK_CTRL 0x94
#define AUTO_CLKLANE_CTRL BIT(1)
#define PHY_TXREQUESTCLKHS BIT(0)
@@ -154,6 +169,7 @@
#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
+/* TODO Next register is slightly different between 1.30 & 1.31 IP version */
#define DSI_PHY_TMR_CFG 0x9c
#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
@@ -170,12 +186,15 @@
#define PHY_UNSHUTDOWNZ BIT(0)
#define DSI_PHY_IF_CFG 0xa4
-#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
+#define N_LANES(n) (((n) - 1) & 0x3)
+
+#define DSI_PHY_ULPS_CTRL 0xa8
+#define DSI_PHY_TX_TRIGGERS 0xac
#define DSI_PHY_STATUS 0xb0
-#define LOCK BIT(0)
-#define STOP_STATE_CLK_LANE BIT(2)
+#define PHY_STOP_STATE_CLK_LANE BIT(2)
+#define PHY_LOCK BIT(0)
#define DSI_PHY_TST_CTRL0 0xb4
#define PHY_TESTCLK BIT(1)
@@ -187,12 +206,13 @@
#define PHY_TESTEN BIT(16)
#define PHY_UNTESTEN 0
#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
-#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
+#define PHY_TESTDIN(n) ((n) & 0xff)
#define DSI_INT_ST0 0xbc
#define DSI_INT_ST1 0xc0
#define DSI_INT_MSK0 0xc4
#define DSI_INT_MSK1 0xc8
+#define DSI_PHY_TMR_RD_CFG 0xf4
#define PHY_STATUS_TIMEOUT_US 10000
#define CMD_PKT_STATUS_TIMEOUT_US 20000
@@ -201,7 +221,6 @@ struct dw_mipi_dsi {
struct drm_bridge bridge;
struct mipi_dsi_host dsi_host;
struct drm_bridge *panel_bridge;
- bool is_panel_bridge;
struct device *dev;
void __iomem *base;
@@ -277,7 +296,6 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
- dsi->is_panel_bridge = true;
}
dsi->panel_bridge = bridge;
@@ -292,8 +310,7 @@ static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
- if (dsi->is_panel_bridge)
- drm_panel_bridge_remove(dsi->panel_bridge);
+ drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
drm_bridge_remove(&dsi->bridge);
@@ -307,7 +324,7 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
u32 val = 0;
if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
- val |= EN_ACK_RQST;
+ val |= ACK_RQST_EN;
if (lpm)
val |= CMD_MODE_ALL_LP;
@@ -506,8 +523,8 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
* timeout clock division should be computed with the
* high speed transmission counter timeout and byte lane...
*/
- dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
- TX_ESC_CLK_DIVIDSION(esc_clk_division));
+ dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVISION(10) |
+ TX_ESC_CLK_DIVISION(esc_clk_division));
}
static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
@@ -520,7 +537,7 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
color = DPI_COLOR_CODING_24BIT;
break;
case MIPI_DSI_FMT_RGB666:
- color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
+ color = DPI_COLOR_CODING_18BIT_2 | LOOSELY18_EN;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
color = DPI_COLOR_CODING_18BIT_1;
@@ -535,7 +552,7 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
val |= HSYNC_ACTIVE_LOW;
- dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
+ dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel));
dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
dsi_write(dsi, DSI_DPI_CFG_POL, val);
/*
@@ -550,7 +567,7 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
{
- dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
+ dsi_write(dsi, DSI_PCKHDL_CFG, CRC_RX_EN | ECC_RX_EN | BTA_EN);
}
static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
@@ -571,7 +588,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
/*
* TODO dw drv improvements
* compute high speed transmission counter timeout according
- * to the timeout clock division (TO_CLK_DIVIDSION) and byte lane...
+ * to the timeout clock division (TO_CLK_DIVISION) and byte lane...
*/
dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
/*
@@ -684,13 +701,13 @@ static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi)
dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
- ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
+ ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val,
+ val & PHY_LOCK, 1000, PHY_STATUS_TIMEOUT_US);
if (ret < 0)
DRM_DEBUG_DRIVER("failed to wait phy lock state\n");
ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & STOP_STATE_CLK_LANE, 1000,
+ val, val & PHY_STOP_STATE_CLK_LANE, 1000,
PHY_STATUS_TIMEOUT_US);
if (ret < 0)
DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n");
@@ -865,15 +882,14 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
* Note that the reset was not defined in the initial device tree, so
* we have to be prepared for it not being found.
*/
- apb_rst = devm_reset_control_get(dev, "apb");
+ apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb");
if (IS_ERR(apb_rst)) {
ret = PTR_ERR(apb_rst);
- if (ret == -ENOENT) {
- apb_rst = NULL;
- } else {
+
+ if (ret != -EPROBE_DEFER)
dev_err(dev, "Unable to get reset control: %d\n", ret);
- return ERR_PTR(ret);
- }
+
+ return ERR_PTR(ret);
}
if (apb_rst) {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index a4c4a465b385..cd23b1b28259 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -457,7 +457,7 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index c89953449e96..737f02885c28 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -70,7 +70,6 @@ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
return 0;
}
-
EXPORT_SYMBOL(drm_agp_info);
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
@@ -95,18 +94,18 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
-int drm_agp_acquire(struct drm_device * dev)
+int drm_agp_acquire(struct drm_device *dev)
{
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
- if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+ dev->agp->bridge = agp_backend_acquire(dev->pdev);
+ if (!dev->agp->bridge)
return -ENODEV;
dev->agp->acquired = 1;
return 0;
}
-
EXPORT_SYMBOL(drm_agp_acquire);
/**
@@ -135,7 +134,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
-int drm_agp_release(struct drm_device * dev)
+int drm_agp_release(struct drm_device *dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -161,7 +160,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data,
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
-int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -171,7 +170,6 @@ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
dev->agp->enabled = 1;
return 0;
}
-
EXPORT_SYMBOL(drm_agp_enable);
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
@@ -203,12 +201,14 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = kzalloc(sizeof(*entry), GFP_KERNEL)))
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
return -ENOMEM;
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
- if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
+ memory = agp_allocate_memory(dev->agp->bridge, pages, type);
+ if (!memory) {
kfree(entry);
return -ENOMEM;
}
@@ -244,8 +244,8 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
-static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
- unsigned long handle)
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
+ unsigned long handle)
{
struct drm_agp_mem *entry;
@@ -275,9 +275,8 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (!entry->bound)
+ entry = drm_agp_lookup_entry(dev, request->handle);
+ if (!entry || !entry->bound)
return -EINVAL;
ret = drm_unbind_agp(entry->memory);
if (ret == 0)
@@ -316,12 +315,12 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
- return -EINVAL;
- if (entry->bound)
+ entry = drm_agp_lookup_entry(dev, request->handle);
+ if (!entry || entry->bound)
return -EINVAL;
page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
- if ((retcode = drm_bind_agp(entry->memory, page)))
+ retcode = drm_bind_agp(entry->memory, page);
+ if (retcode)
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
@@ -359,7 +358,8 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+ entry = drm_agp_lookup_entry(dev, request->handle);
+ if (!entry)
return -EINVAL;
if (entry->bound)
drm_unbind_agp(entry->memory);
@@ -373,7 +373,6 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
EXPORT_SYMBOL(drm_agp_free);
-
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -398,11 +397,13 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
struct drm_agp_head *head = NULL;
- if (!(head = kzalloc(sizeof(*head), GFP_KERNEL)))
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
return NULL;
head->bridge = agp_find_bridge(dev->pdev);
if (!head->bridge) {
- if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+ head->bridge = agp_backend_acquire(dev->pdev);
+ if (!head->bridge) {
kfree(head);
return NULL;
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 2fd383d7253a..c2da5585e201 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -163,13 +163,6 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
crtc->funcs->atomic_destroy_state(crtc,
state->crtcs[i].state);
- if (state->crtcs[i].commit) {
- kfree(state->crtcs[i].commit->event);
- state->crtcs[i].commit->event = NULL;
- drm_crtc_commit_put(state->crtcs[i].commit);
- }
-
- state->crtcs[i].commit = NULL;
state->crtcs[i].ptr = NULL;
state->crtcs[i].state = NULL;
}
@@ -189,9 +182,6 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
- if (!obj)
- continue;
-
obj->funcs->atomic_destroy_state(obj,
state->private_objs[i].state);
state->private_objs[i].ptr = NULL;
@@ -199,6 +189,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
}
state->num_private_objs = 0;
+ if (state->fake_commit) {
+ drm_crtc_commit_put(state->fake_commit);
+ state->fake_commit = NULL;
+ }
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -721,7 +715,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_fb_id) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
+ struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_put(fb);
@@ -737,7 +731,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return -EINVAL;
} else if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
@@ -1152,7 +1146,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
@@ -1818,7 +1812,7 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, uint64_t user_data)
+ struct drm_crtc *crtc, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
@@ -1828,7 +1822,8 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
- e->event.user_data = user_data;
+ e->event.vbl.crtc_id = crtc->base.id;
+ e->event.vbl.user_data = user_data;
return e;
}
@@ -2082,7 +2077,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
struct drm_pending_vblank_event *e;
- e = create_vblank_event(dev, arg->user_data);
+ e = create_vblank_event(crtc, arg->user_data);
if (!e)
return -ENOMEM;
@@ -2237,7 +2232,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
return -EINVAL;
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state = drm_atomic_state_alloc(dev);
if (!state)
@@ -2262,7 +2257,7 @@ retry:
goto out;
}
- obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
+ obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj) {
ret = -ENOENT;
goto out;
@@ -2350,8 +2345,9 @@ out:
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0028591f3f95..71d712f1b56a 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -860,6 +860,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
+ int ret;
/* Shut down everything that needs a full modeset. */
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
@@ -883,6 +884,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs->disable(crtc);
else
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (!(dev->irq_enabled && dev->num_crtcs))
+ continue;
+
+ ret = drm_crtc_vblank_get(crtc);
+ WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
+ if (ret == 0)
+ drm_crtc_vblank_put(crtc);
}
}
@@ -1262,12 +1271,12 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- struct drm_crtc_state *unused;
+ struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
- for_each_new_crtc_in_state(old_state, crtc, unused, i) {
- struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ struct drm_crtc_commit *commit = new_crtc_state->commit;
int ret;
if (!commit)
@@ -1388,35 +1397,31 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- struct drm_crtc_commit *commit;
- struct drm_plane *__plane, *plane = NULL;
- struct drm_plane_state *__plane_state, *plane_state = NULL;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
const struct drm_plane_helper_funcs *funcs;
- int i, j, n_planes = 0;
+ int i, n_planes = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
return -EINVAL;
}
- for_each_new_plane_in_state(state, __plane, __plane_state, i) {
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
n_planes++;
- plane = __plane;
- plane_state = __plane_state;
- }
/* FIXME: we support only single plane updates for now */
- if (!plane || n_planes != 1)
+ if (n_planes != 1)
return -EINVAL;
- if (!plane_state->crtc)
+ if (!new_plane_state->crtc)
return -EINVAL;
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
- if (plane_state->fence)
+ if (new_plane_state->fence)
return -EINVAL;
/*
@@ -1424,31 +1429,11 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- if (plane->crtc != crtc)
- continue;
-
- spin_lock(&crtc->commit_lock);
- commit = list_first_entry_or_null(&crtc->commit_list,
- struct drm_crtc_commit,
- commit_entry);
- if (!commit) {
- spin_unlock(&crtc->commit_lock);
- continue;
- }
- spin_unlock(&crtc->commit_lock);
-
- if (!crtc->state->state)
- continue;
-
- for_each_plane_in_state(crtc->state->state, __plane,
- __plane_state, j) {
- if (__plane == plane)
- return -EINVAL;
- }
- }
+ if (old_plane_state->commit &&
+ !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ return -EBUSY;
- return funcs->atomic_async_check(plane, plane_state);
+ return funcs->atomic_async_check(plane, new_plane_state);
}
EXPORT_SYMBOL(drm_atomic_helper_async_check);
@@ -1633,8 +1618,7 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
return -EBUSY;
}
} else if (i == 1) {
- stall_commit = commit;
- drm_crtc_commit_get(stall_commit);
+ stall_commit = drm_crtc_commit_get(commit);
break;
}
@@ -1668,6 +1652,38 @@ static void release_crtc_commit(struct completion *completion)
drm_crtc_commit_put(commit);
}
+static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc)
+{
+ init_completion(&commit->flip_done);
+ init_completion(&commit->hw_done);
+ init_completion(&commit->cleanup_done);
+ INIT_LIST_HEAD(&commit->commit_entry);
+ kref_init(&commit->ref);
+ commit->crtc = crtc;
+}
+
+static struct drm_crtc_commit *
+crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ if (crtc) {
+ struct drm_crtc_state *new_crtc_state;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ return new_crtc_state->commit;
+ }
+
+ if (!state->fake_commit) {
+ state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL);
+ if (!state->fake_commit)
+ return NULL;
+
+ init_commit(state->fake_commit, NULL);
+ }
+
+ return state->fake_commit;
+}
+
/**
* drm_atomic_helper_setup_commit - setup possibly nonblocking commit
* @state: new modeset state to be committed
@@ -1697,7 +1713,7 @@ static void release_crtc_commit(struct completion *completion)
* drm_atomic_helper_commit_cleanup_done().
*
* This is all implemented by in drm_atomic_helper_commit(), giving drivers a
- * complete and esay-to-use default implementation of the atomic_commit() hook.
+ * complete and easy-to-use default implementation of the atomic_commit() hook.
*
* The tracking of asynchronously executed and still pending commits is done
* using the core structure &drm_crtc_commit.
@@ -1716,6 +1732,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
int i, ret;
@@ -1724,14 +1744,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
if (!commit)
return -ENOMEM;
- init_completion(&commit->flip_done);
- init_completion(&commit->hw_done);
- init_completion(&commit->cleanup_done);
- INIT_LIST_HEAD(&commit->commit_entry);
- kref_init(&commit->ref);
- commit->crtc = crtc;
+ init_commit(commit, crtc);
- state->crtcs[i].commit = commit;
+ new_crtc_state->commit = commit;
ret = stall_checks(crtc, nonblock);
if (ret)
@@ -1765,25 +1780,45 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
drm_crtc_commit_get(commit);
}
- return 0;
-}
-EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
+ for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
+ /* Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones. */
+ if (nonblock && old_conn_state->commit &&
+ !try_wait_for_completion(&old_conn_state->commit->flip_done))
+ return -EBUSY;
+ /* commit tracked through new_crtc_state->commit, no need to do it explicitly */
+ if (new_conn_state->crtc)
+ continue;
-static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_commit *commit;
- int i = 0;
+ commit = crtc_or_fake_commit(state, old_conn_state->crtc);
+ if (!commit)
+ return -ENOMEM;
- list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
- /* skip the first entry, that's the current commit */
- if (i == 1)
- return commit;
- i++;
+ new_conn_state->commit = drm_crtc_commit_get(commit);
}
- return NULL;
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ /* Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones. */
+ if (nonblock && old_plane_state->commit &&
+ !try_wait_for_completion(&old_plane_state->commit->flip_done))
+ return -EBUSY;
+
+ /*
+ * Unlike connectors, always track planes explicitly for
+ * async pageflip support.
+ */
+ commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
+ if (!commit)
+ return -ENOMEM;
+
+ new_plane_state->commit = drm_crtc_commit_get(commit);
+ }
+
+ return 0;
}
+EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
/**
* drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
@@ -1792,7 +1827,7 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
* This function waits for all preceeding commits that touch the same CRTC as
* @old_state to both be committed to the hardware (as signalled by
* drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
- * by calling drm_crtc_vblank_send_event() on the &drm_crtc_state.event).
+ * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event).
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -1800,17 +1835,17 @@ static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ struct drm_connector *conn;
+ struct drm_connector_state *old_conn_state;
struct drm_crtc_commit *commit;
int i;
long ret;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- spin_lock(&crtc->commit_lock);
- commit = preceeding_commit(crtc);
- if (commit)
- drm_crtc_commit_get(commit);
- spin_unlock(&crtc->commit_lock);
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ commit = old_crtc_state->commit;
if (!commit)
continue;
@@ -1828,8 +1863,48 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
if (ret == 0)
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
+ }
+
+ for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ commit = old_conn_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
+ conn->base.id, conn->name);
+
+ /* Currently no support for overwriting flips, hence
+ * stall for previous one to execute completely. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
+ conn->base.id, conn->name);
+ }
+
+ for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
+ commit = old_plane_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
+ plane->base.id, plane->name);
- drm_crtc_commit_put(commit);
+ /* Currently no support for overwriting flips, hence
+ * stall for previous one to execute completely. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
+ plane->base.id, plane->name);
}
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
@@ -1852,19 +1927,34 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc_commit *commit;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- commit = old_state->crtcs[i].commit;
+ for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
+ commit = new_crtc_state->commit;
if (!commit)
continue;
+ /*
+ * copy new_crtc_state->commit to old_crtc_state->commit,
+ * it's unsafe to touch new_crtc_state after hw_done,
+ * but we still need to do so in cleanup_done().
+ */
+ if (old_crtc_state->commit)
+ drm_crtc_commit_put(old_crtc_state->commit);
+
+ old_crtc_state->commit = drm_crtc_commit_get(commit);
+
/* backend must have consumed any event by now */
WARN_ON(new_crtc_state->event);
complete_all(&commit->hw_done);
}
+
+ if (old_state->fake_commit) {
+ complete_all(&old_state->fake_commit->hw_done);
+ complete_all(&old_state->fake_commit->flip_done);
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
@@ -1882,39 +1972,25 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_commit *commit;
int i;
- long ret;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- commit = old_state->crtcs[i].commit;
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ commit = old_crtc_state->commit;
if (WARN_ON(!commit))
continue;
complete_all(&commit->cleanup_done);
WARN_ON(!try_wait_for_completion(&commit->hw_done));
- /* commit_list borrows our reference, need to remove before we
- * clean up our drm_atomic_state. But only after it actually
- * completed, otherwise subsequent commits won't stall properly. */
- if (try_wait_for_completion(&commit->flip_done))
- goto del_commit;
-
- /* We must wait for the vblank event to signal our completion
- * before releasing our reference, since the vblank work does
- * not hold a reference of its own. */
- ret = wait_for_completion_timeout(&commit->flip_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
- crtc->base.id, crtc->name);
-
-del_commit:
spin_lock(&crtc->commit_lock);
list_del(&commit->commit_entry);
spin_unlock(&crtc->commit_lock);
}
+
+ if (old_state->fake_commit)
+ complete_all(&old_state->fake_commit->cleanup_done);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -2294,20 +2370,44 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_private_state *old_obj_state, *new_obj_state;
if (stall) {
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- spin_lock(&crtc->commit_lock);
- commit = list_first_entry_or_null(&crtc->commit_list,
- struct drm_crtc_commit, commit_entry);
- if (commit)
- drm_crtc_commit_get(commit);
- spin_unlock(&crtc->commit_lock);
+ /*
+ * We have to stall for hw_done here before
+ * drm_atomic_helper_wait_for_dependencies() because flip
+ * depth > 1 is not yet supported by all drivers. As long as
+ * obj->state is directly dereferenced anywhere in the drivers
+ * atomic_commit_tail function, then it's unsafe to swap state
+ * before drm_atomic_helper_commit_hw_done() is called.
+ */
+
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ commit = old_crtc_state->commit;
if (!commit)
continue;
ret = wait_for_completion_interruptible(&commit->hw_done);
- drm_crtc_commit_put(commit);
+ if (ret)
+ return ret;
+ }
+ for_each_old_connector_in_state(state, connector, old_conn_state, i) {
+ commit = old_conn_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ return ret;
+ }
+
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
+ commit = old_plane_state->commit;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_interruptible(&commit->hw_done);
if (ret)
return ret;
}
@@ -2332,13 +2432,13 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
state->crtcs[i].state = old_crtc_state;
crtc->state = new_crtc_state;
- if (state->crtcs[i].commit) {
+ if (new_crtc_state->commit) {
spin_lock(&crtc->commit_lock);
- list_add(&state->crtcs[i].commit->commit_entry,
+ list_add(&new_crtc_state->commit->commit_entry,
&crtc->commit_list);
spin_unlock(&crtc->commit_lock);
- state->crtcs[i].commit->event = NULL;
+ new_crtc_state->commit->event = NULL;
}
}
@@ -3115,7 +3215,7 @@ struct drm_encoder *
drm_atomic_helper_best_encoder(struct drm_connector *connector)
{
WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
}
EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
@@ -3187,6 +3287,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->connectors_changed = false;
state->color_mgmt_changed = false;
state->zpos_changed = false;
+ state->commit = NULL;
state->event = NULL;
state->pageflip_flags = 0;
}
@@ -3225,6 +3326,12 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
*/
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{
+ if (state->commit) {
+ kfree(state->commit->event);
+ state->commit->event = NULL;
+ drm_crtc_commit_put(state->commit);
+ }
+
drm_property_blob_put(state->mode_blob);
drm_property_blob_put(state->degamma_lut);
drm_property_blob_put(state->ctm);
@@ -3287,6 +3394,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
drm_framebuffer_get(state->fb);
state->fence = NULL;
+ state->commit = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -3328,6 +3436,9 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
if (state->fence)
dma_fence_put(state->fence);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
@@ -3406,6 +3517,7 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
memcpy(state, connector->state, sizeof(*state));
if (state->crtc)
drm_connector_get(connector);
+ state->commit = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
@@ -3532,6 +3644,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
{
if (state->crtc)
drm_connector_put(state->connector);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 7ff697389d74..aad468d170a7 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -31,6 +31,7 @@
#include <drm/drmP.h>
#include "drm_internal.h"
#include "drm_legacy.h"
+#include <drm/drm_lease.h>
/**
* DOC: master and authentication
@@ -93,7 +94,7 @@ int drm_authmagic(struct drm_device *dev, void *data,
return file ? 0 : -EINVAL;
}
-static struct drm_master *drm_master_create(struct drm_device *dev)
+struct drm_master *drm_master_create(struct drm_device *dev)
{
struct drm_master *master;
@@ -107,6 +108,14 @@ static struct drm_master *drm_master_create(struct drm_device *dev)
idr_init(&master->magic_map);
master->dev = dev;
+ /* initialize the tree of output resource lessees */
+ master->lessor = NULL;
+ master->lessee_id = 0;
+ INIT_LIST_HEAD(&master->lessees);
+ INIT_LIST_HEAD(&master->lessee_list);
+ idr_init(&master->leases);
+ idr_init(&master->lessee_idr);
+
return master;
}
@@ -189,6 +198,12 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
+ if (file_priv->master->lessor != NULL) {
+ DRM_DEBUG_LEASE("Attempt to set lessee %d as master\n", file_priv->master->lessee_id);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = drm_set_master(dev, file_priv, false);
out_unlock:
mutex_unlock(&dev->master_mutex);
@@ -270,6 +285,13 @@ void drm_master_release(struct drm_file *file_priv)
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
out:
+ if (drm_core_check_feature(dev, DRIVER_MODESET) && file_priv->is_master) {
+ /* Revoke any leases held by this or lessees, but only if
+ * this is the "real" master
+ */
+ drm_lease_revoke(master);
+ }
+
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
@@ -288,7 +310,7 @@ out:
*/
bool drm_is_current_master(struct drm_file *fpriv)
{
- return fpriv->is_master && fpriv->master == fpriv->minor->dev->master;
+ return fpriv->is_master && drm_lease_owner(fpriv->master) == fpriv->minor->dev->master;
}
EXPORT_SYMBOL(drm_is_current_master);
@@ -310,12 +332,18 @@ static void drm_master_destroy(struct kref *kref)
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_device *dev = master->dev;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_lease_destroy(master);
+
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
+ idr_destroy(&master->leases);
+ idr_destroy(&master->lessee_idr);
+
kfree(master->unique);
kfree(master);
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index dc8cdfe1dcac..1638bfe9627c 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -67,17 +67,12 @@ static LIST_HEAD(bridge_list);
* drm_bridge_add - add the given bridge to the global bridge list
*
* @bridge: bridge control structure
- *
- * RETURNS:
- * Unconditionally returns Zero.
*/
-int drm_bridge_add(struct drm_bridge *bridge)
+void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
-
- return 0;
}
EXPORT_SYMBOL(drm_bridge_add);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index fe0982708e95..0d002b045bd2 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -230,7 +230,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
@@ -308,7 +308,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- crtc = drm_crtc_find(dev, crtc_lut->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_lut->crtc_id);
if (!crtc)
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index ba9f36cef68c..704fc8934616 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -615,7 +615,6 @@ static const struct drm_prop_enum_list drm_link_status_enum_list[] = {
{ DRM_MODE_LINK_STATUS_GOOD, "Good" },
{ DRM_MODE_LINK_STATUS_BAD, "Bad" },
};
-DRM_ENUM_NAME_FN(drm_get_link_status_name, drm_link_status_enum_list)
/**
* drm_display_info_set_bus_formats - set the supported bus formats
@@ -720,6 +719,29 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core. This is the only standard connector
* property that userspace can change.
+ *
+ * Note that this property cannot be set through the MODE_ATOMIC ioctl,
+ * userspace must use "ACTIVE" on the CRTC instead.
+ *
+ * WARNING:
+ *
+ * For userspace also running on legacy drivers the "DPMS" semantics are a
+ * lot more complicated. First, userspace cannot rely on the "DPMS" value
+ * returned by the GETCONNECTOR actually reflecting reality, because many
+ * drivers fail to update it. For atomic drivers this is taken care of in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * The second issue is that the DPMS state is only well-defined when the
+ * connector is connected to a CRTC. In atomic the DRM core enforces that
+ * "ACTIVE" is off in such a case, no such checks exists for "DPMS".
+ *
+ * Finally, when enabling an output using the legacy SETCONFIG ioctl then
+ * "DPMS" is forced to ON. But see above, that might not be reflected in
+ * the software value on legacy drivers.
+ *
+ * Summarizing: Only set "DPMS" when the connector is known to be enabled,
+ * assume that a successful SETCONFIG call also sets "DPMS" to on, and
+ * never read back the value of "DPMS" because it can be incorrect.
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
@@ -1288,7 +1310,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
- connector = drm_connector_lookup(dev, out_resp->connector_id);
+ connector = drm_connector_lookup(dev, file_priv, out_resp->connector_id);
if (!connector)
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5af25ce5bf7c..f0556e654116 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -402,7 +402,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_resp->crtc_id);
if (!crtc)
return -ENOENT;
@@ -569,7 +569,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
return -ERANGE;
- crtc = drm_crtc_find(dev, crtc_req->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, crtc_req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id);
return -ENOENT;
@@ -577,7 +577,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
mutex_lock(&crtc->dev->mode_config.mutex);
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
if (ret)
@@ -595,7 +595,7 @@ retry:
/* Make refcounting symmetric with the lookup path. */
drm_framebuffer_get(fb);
} else {
- fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, crtc_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
@@ -680,7 +680,7 @@ retry:
goto out;
}
- connector = drm_connector_lookup(dev, out_id);
+ connector = drm_connector_lookup(dev, file_priv, out_id);
if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
@@ -717,8 +717,9 @@ out:
kfree(connector_set);
drm_mode_destroy(dev, mode);
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index eab36a460638..5a84c3bc915d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -562,12 +562,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
- save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+ save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!save_encoder_crtcs)
return -ENOMEM;
- save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+ save_connector_encoders = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_encoder *), GFP_KERNEL);
if (!save_connector_encoders) {
kfree(save_encoder_crtcs);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a43582076b20..9ebb8841778c 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -106,6 +106,7 @@ int drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj);
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id, uint32_t type);
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index f9e26dda56d6..9dd879589a2c 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -155,7 +155,7 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
int ret = 0;
if (drm_drv_uses_atomic_modeset(crtc->dev)) {
- ret = drm_modeset_lock_interruptible(&crtc->mutex, NULL);
+ ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 0ef9011a1856..02a50929af67 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
{
u8 data;
int ret = 0;
+ int retry;
if (!mode) {
DRM_ERROR("NULL input\n");
@@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
}
/* Read Status: i2c over aux */
- ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE,
- &data, sizeof(data));
+ for (retry = 0; retry < 6; retry++) {
+ if (retry)
+ usleep_range(500, 1000);
+
+ ret = drm_dp_dual_mode_read(adapter,
+ DP_DUAL_MODE_LSPCON_CURRENT_MODE,
+ &data, sizeof(data));
+ if (!ret)
+ break;
+ }
+
if (ret < 0) {
- DRM_ERROR("LSPCON read(0x80, 0x41) failed\n");
+ DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 08af8d6b844b..b3d68964b407 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -137,8 +137,10 @@ EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
- case 162000:
default:
+ WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
+ DP_LINK_BW_1_62);
+ case 162000:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
@@ -151,8 +153,9 @@ EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
switch (link_bw) {
- case DP_LINK_BW_1_62:
default:
+ WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
+ case DP_LINK_BW_1_62:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 41b492f99955..70dcfa58d3c2 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -294,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+ idx++;
+ break;
}
raw->cur_len = idx;
}
@@ -538,6 +544,21 @@ fail_len:
return false;
}
+static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+
+ repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen) {
+ DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
+ idx, raw->curlen);
+ return false;
+ }
+ return true;
+}
+
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *msg)
{
@@ -567,6 +588,9 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
case DP_ALLOCATE_PAYLOAD:
return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
default:
DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
return false;
@@ -693,6 +717,22 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
return 0;
}
+static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+ int port_num, bool power_up)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ if (power_up)
+ req.req_type = DP_POWER_UP_PHY;
+ else
+ req.req_type = DP_POWER_DOWN_PHY;
+
+ req.u.port_num.port_number = port_num;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
@@ -1724,6 +1764,40 @@ fail_put:
return ret;
}
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, bool power_up)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int len, ret;
+
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ drm_dp_put_port(port);
+ return -ENOMEM;
+ }
+
+ txmsg->dst = port->parent;
+ len = build_power_updown_phy(txmsg, port->port_num, power_up);
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1)
+ ret = -EINVAL;
+ else
+ ret = 0;
+ }
+ kfree(txmsg);
+ drm_dp_put_port(port);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
+
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be38ac7050d4..a934fd5e7e55 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -57,7 +57,8 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)");
module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
@@ -286,13 +287,13 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
spin_lock_irqsave(&drm_minor_lock, flags);
minor = idr_find(&drm_minors_idr, minor_id);
if (minor)
- drm_dev_ref(minor->dev);
+ drm_dev_get(minor->dev);
spin_unlock_irqrestore(&drm_minor_lock, flags);
if (!minor) {
return ERR_PTR(-ENODEV);
} else if (drm_dev_is_unplugged(minor->dev)) {
- drm_dev_unref(minor->dev);
+ drm_dev_put(minor->dev);
return ERR_PTR(-ENODEV);
}
@@ -301,7 +302,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
void drm_minor_release(struct drm_minor *minor)
{
- drm_dev_unref(minor->dev);
+ drm_dev_put(minor->dev);
}
/**
@@ -326,11 +327,11 @@ void drm_minor_release(struct drm_minor *minor)
* When cleaning up a device instance everything needs to be done in reverse:
* First unpublish the device instance with drm_dev_unregister(). Then clean up
* any other resources allocated at device initialization and drop the driver's
- * reference to &drm_device using drm_dev_unref().
+ * reference to &drm_device using drm_dev_put().
*
* Note that the lifetime rules for &drm_device instance has still a lot of
* historical baggage. Hence use the reference counting provided by
- * drm_dev_ref() and drm_dev_unref() only carefully.
+ * drm_dev_get() and drm_dev_put() only carefully.
*
* It is recommended that drivers embed &struct drm_device into their own device
* structure, which is supported through drm_dev_init().
@@ -345,7 +346,7 @@ void drm_minor_release(struct drm_minor *minor)
* Cleans up all DRM device, calling drm_lastclose().
*
* Note: Use of this function is deprecated. It will eventually go away
- * completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly
+ * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
* instead to make sure that the device isn't userspace accessible any more
* while teardown is in progress, ensuring that userspace can't access an
* inconsistent state.
@@ -360,7 +361,7 @@ void drm_put_dev(struct drm_device *dev)
}
drm_dev_unregister(dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_put_dev);
@@ -386,7 +387,7 @@ void drm_dev_unplug(struct drm_device *dev)
mutex_lock(&drm_global_mutex);
drm_device_set_unplugged(dev);
if (dev->open_count == 0)
- drm_dev_unref(dev);
+ drm_dev_put(dev);
mutex_unlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -475,8 +476,8 @@ static void drm_fs_inode_free(struct inode *inode)
* initialization sequence to make sure userspace can't access an inconsistent
* state.
*
- * The initial ref-count of the object is 1. Use drm_dev_ref() and
- * drm_dev_unref() to take and drop further ref-counts.
+ * The initial ref-count of the object is 1. Use drm_dev_get() and
+ * drm_dev_put() to take and drop further ref-counts.
*
* Note that for purely virtual devices @parent can be NULL.
*
@@ -626,8 +627,8 @@ EXPORT_SYMBOL(drm_dev_fini);
* initialization sequence to make sure userspace can't access an inconsistent
* state.
*
- * The initial ref-count of the object is 1. Use drm_dev_ref() and
- * drm_dev_unref() to take and drop further ref-counts.
+ * The initial ref-count of the object is 1. Use drm_dev_get() and
+ * drm_dev_put() to take and drop further ref-counts.
*
* Note that for purely virtual devices @parent can be NULL.
*
@@ -670,36 +671,49 @@ static void drm_dev_release(struct kref *ref)
}
/**
- * drm_dev_ref - Take reference of a DRM device
+ * drm_dev_get - Take reference of a DRM device
* @dev: device to take reference of or NULL
*
* This increases the ref-count of @dev by one. You *must* already own a
- * reference when calling this. Use drm_dev_unref() to drop this reference
+ * reference when calling this. Use drm_dev_put() to drop this reference
* again.
*
* This function never fails. However, this function does not provide *any*
* guarantee whether the device is alive or running. It only provides a
* reference to the object and the memory associated with it.
*/
-void drm_dev_ref(struct drm_device *dev)
+void drm_dev_get(struct drm_device *dev)
{
if (dev)
kref_get(&dev->ref);
}
-EXPORT_SYMBOL(drm_dev_ref);
+EXPORT_SYMBOL(drm_dev_get);
/**
- * drm_dev_unref - Drop reference of a DRM device
+ * drm_dev_put - Drop reference of a DRM device
* @dev: device to drop reference of or NULL
*
* This decreases the ref-count of @dev by one. The device is destroyed if the
* ref-count drops to zero.
*/
-void drm_dev_unref(struct drm_device *dev)
+void drm_dev_put(struct drm_device *dev)
{
if (dev)
kref_put(&dev->ref, drm_dev_release);
}
+EXPORT_SYMBOL(drm_dev_put);
+
+/**
+ * drm_dev_unref - Drop reference of a DRM device
+ * @dev: device to drop reference of or NULL
+ *
+ * This is a compatibility alias for drm_dev_put() and should not be used by new
+ * code.
+ */
+void drm_dev_unref(struct drm_device *dev)
+{
+ drm_dev_put(dev);
+}
EXPORT_SYMBOL(drm_dev_unref);
static int create_compat_control_link(struct drm_device *dev)
@@ -839,7 +853,7 @@ EXPORT_SYMBOL(drm_dev_register);
*
* Unregister the DRM device from the system. This does the reverse of
* drm_dev_register() but does not deallocate the device. The caller must call
- * drm_dev_unref() to drop their final reference.
+ * drm_dev_put() to drop their final reference.
*
* A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
* which can be called while there are still open users of @dev.
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6bb6337be920..00ddabfbf980 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1533,6 +1533,10 @@ static void connector_bad_edid(struct drm_connector *connector,
* level, drivers must make all reasonable efforts to expose it as an I2C
* adapter and use drm_get_edid() instead of abusing this function.
*
+ * The EDID may be overridden using debugfs override_edid or firmare EDID
+ * (drm_load_edid_firmware() and drm.edid_firmware parameter), in this priority
+ * order. Having either of them bypasses actual EDID reads.
+ *
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_do_get_edid(struct drm_connector *connector,
@@ -1542,6 +1546,17 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
{
int i, j = 0, valid_extensions = 0;
u8 *edid, *new;
+ struct edid *override = NULL;
+
+ if (connector->override_edid)
+ override = drm_edid_duplicate((const struct edid *)
+ connector->edid_blob_ptr->data);
+
+ if (!override)
+ override = drm_load_edid_firmware(connector);
+
+ if (!IS_ERR_OR_NULL(override))
+ return override;
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 1c0495acf341..a4915099aaa9 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -31,6 +31,22 @@ module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
"from built-in data or /lib/firmware instead. ");
+/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
+int __drm_set_edid_firmware_path(const char *path)
+{
+ scnprintf(edid_firmware, sizeof(edid_firmware), "%s", path);
+
+ return 0;
+}
+EXPORT_SYMBOL(__drm_set_edid_firmware_path);
+
+/* Use only for backward compatibility with drm_kms_helper.edid_firmware */
+int __drm_get_edid_firmware_path(char *buf, size_t bufsize)
+{
+ return scnprintf(buf, bufsize, "%s", edid_firmware);
+}
+EXPORT_SYMBOL(__drm_get_edid_firmware_path);
+
#define GENERIC_EDIDS 6
static const char * const generic_edid_name[GENERIC_EDIDS] = {
"edid/800x600.bin",
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 0708779840d2..59e0ebe733f8 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -220,13 +220,13 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- encoder = drm_encoder_find(dev, enc_resp->encoder_id);
+ encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id);
if (!encoder)
return -ENOENT;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
crtc = drm_encoder_get_crtc(encoder);
- if (crtc)
+ if (crtc && drm_lease_held(file_priv, crtc->base.id))
enc_resp->crtc_id = crtc->base.id;
else
enc_resp->crtc_id = 0;
@@ -234,7 +234,8 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
enc_resp->encoder_type = encoder->encoder_type;
enc_resp->encoder_id = encoder->base.id;
- enc_resp->possible_crtcs = encoder->possible_crtcs;
+ enc_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv,
+ encoder->possible_crtcs);
enc_resp->possible_clones = encoder->possible_clones;
return 0;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index f2ee88363015..0e3c14174d08 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -38,7 +38,7 @@ struct drm_fbdev_cma {
* Provides helper functions for creating a cma (contiguous memory allocator)
* backed framebuffer.
*
- * drm_fb_cma_create() is used in the &drm_mode_config_funcs.fb_create
+ * drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
* callback function to create a cma backed framebuffer.
*
* An fbdev framebuffer backed by cma is also available by calling
@@ -61,8 +61,8 @@ struct drm_fbdev_cma {
* }
*
* static struct drm_framebuffer_funcs driver_fb_funcs = {
- * .destroy = drm_fb_cma_destroy,
- * .create_handle = drm_fb_cma_create_handle,
+ * .destroy = drm_gem_fb_destroy,
+ * .create_handle = drm_gem_fb_create_handle,
* .dirty = driver_fb_dirty,
* };
*
@@ -80,57 +80,6 @@ static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
return container_of(helper, struct drm_fbdev_cma, fb_helper);
}
-void drm_fb_cma_destroy(struct drm_framebuffer *fb)
-{
- drm_gem_fb_destroy(fb);
-}
-EXPORT_SYMBOL(drm_fb_cma_destroy);
-
-int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned int *handle)
-{
- return drm_gem_fb_create_handle(fb, file_priv, handle);
-}
-EXPORT_SYMBOL(drm_fb_cma_create_handle);
-
-/**
- * drm_fb_cma_create_with_funcs() - helper function for the
- * &drm_mode_config_funcs.fb_create
- * callback
- * @dev: DRM device
- * @file_priv: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
- * @funcs: vtable to be used for the new framebuffer object
- *
- * This can be used to set &drm_framebuffer_funcs for drivers that need the
- * &drm_framebuffer_funcs.dirty callback. Use drm_fb_cma_create() if you don't
- * need to change &drm_framebuffer_funcs.
- */
-struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
- const struct drm_framebuffer_funcs *funcs)
-{
- return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, funcs);
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
-
-/**
- * drm_fb_cma_create() - &drm_mode_config_funcs.fb_create callback function
- * @dev: DRM device
- * @file_priv: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
- *
- * If your hardware has special alignment or pitch requirements these should be
- * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
- * you need to set &drm_framebuffer_funcs.dirty.
- */
-struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- return drm_gem_fb_create(dev, file_priv, mode_cmd);
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_create);
-
/**
* drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
* @fb: The framebuffer
@@ -181,26 +130,6 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
-/**
- * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
- * @plane: Which plane
- * @state: Plane state attach fence to
- *
- * This should be set as the &struct drm_plane_helper_funcs.prepare_fb hook.
- *
- * This function checks if the plane FB has an dma-buf attached, extracts
- * the exclusive fence and attaches it to plane state for the atomic helper
- * to wait on.
- *
- * There is no need for cleanup_fb for CMA based framebuffer drivers.
- */
-int drm_fb_cma_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- return drm_gem_fb_prepare_fb(plane, state);
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
-
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b8f013ffa65..116d1f1337c7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -910,6 +910,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
if (!drm_fbdev_emulation || !fb_helper)
return;
+ cancel_work_sync(&fb_helper->resume_work);
+ cancel_work_sync(&fb_helper->dirty_work);
+
info = fb_helper->fbdev;
if (info) {
if (info->cmap.len)
@@ -918,9 +921,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
fb_helper->fbdev = NULL;
- cancel_work_sync(&fb_helper->resume_work);
- cancel_work_sync(&fb_helper->dirty_work);
-
mutex_lock(&kernel_fb_helper_lock);
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
@@ -2266,7 +2266,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (modes[n] == NULL)
return best_score;
- crtcs = kzalloc(fb_helper->connector_count *
+ crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index af279844d7ce..279c1035c12d 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -381,7 +381,7 @@ int drm_mode_rmfb(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, *id);
+ fb = drm_framebuffer_lookup(dev, file_priv, *id);
if (!fb)
return -ENOENT;
@@ -450,7 +450,7 @@ int drm_mode_getfb(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, r->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
@@ -515,7 +515,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- fb = drm_framebuffer_lookup(dev, r->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
if (!fb)
return -ENOENT;
@@ -681,6 +681,7 @@ EXPORT_SYMBOL(drm_framebuffer_init);
/**
* drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
* @dev: drm device
+ * @file_priv: drm file to check for lease against.
* @id: id of the fb object
*
* If successful, this grabs an additional reference to the framebuffer -
@@ -688,12 +689,13 @@ EXPORT_SYMBOL(drm_framebuffer_init);
* again, using drm_framebuffer_put().
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
- obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB);
+ obj = __drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_FB);
if (obj)
fb = obj_to_fb(obj);
return fb;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c55f338e380b..55d6182555c7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -282,15 +282,6 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_gem_object *obj;
- /* This is gross. The idr system doesn't let us try a delete and
- * return an error code. It just spews if you fail at deleting.
- * So, we have to grab a lock around finding the object and then
- * doing the delete on it and dropping the refcount, or the user
- * could race us to double-decrement the refcount and cause a
- * use-after-free later. Given the frequency of our handle lookups,
- * we may want to use ida for number allocation and a hash table
- * for the pointers, anyway.
- */
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
@@ -334,6 +325,12 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
if (!obj)
return -ENOENT;
+ /* Don't allow imported objects to be mapped */
+ if (obj->import_attach) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
@@ -537,7 +534,7 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
* Note that you are not allowed to change gfp-zones during runtime. That is,
* shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
* set during initialization. If you have special zone constraints, set them
- * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
+ * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 373e33f22be4..020e7668dfab 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -112,7 +112,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!cma_obj->vaddr) {
- dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
+ dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n",
size);
ret = -ENOMEM;
goto error;
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index d54a083dc5dd..aa8cb9bfa499 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -27,19 +27,24 @@
* DOC: overview
*
* This library provides helpers for drivers that don't subclass
- * &drm_framebuffer and and use &drm_gem_object for their backing storage.
+ * &drm_framebuffer and use &drm_gem_object for their backing storage.
*
* Drivers without additional needs to validate framebuffers can simply use
- * drm_gem_fb_create() and everything is wired up automatically. But all
- * parts can be used individually.
+ * drm_gem_fb_create() and everything is wired up automatically. Other drivers
+ * can use all parts independently.
*/
/**
- * drm_gem_fb_get_obj() - Get GEM object for framebuffer
- * @fb: The framebuffer
- * @plane: Which plane
+ * drm_gem_fb_get_obj() - Get GEM object backing the framebuffer
+ * @fb: Framebuffer
+ * @plane: Plane index
*
- * Returns the GEM object for given framebuffer.
+ * No additional reference is taken beyond the one that the &drm_frambuffer
+ * already holds.
+ *
+ * Returns:
+ * Pointer to &drm_gem_object for the given framebuffer and plane index or NULL
+ * if it does not exist.
*/
struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
unsigned int plane)
@@ -82,7 +87,7 @@ drm_gem_fb_alloc(struct drm_device *dev,
/**
* drm_gem_fb_destroy - Free GEM backed framebuffer
- * @fb: DRM framebuffer
+ * @fb: Framebuffer
*
* Frees a GEM backed framebuffer with its backing buffer(s) and the structure
* itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
@@ -102,12 +107,13 @@ EXPORT_SYMBOL(drm_gem_fb_destroy);
/**
* drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
- * @fb: DRM framebuffer
- * @file: drm file
- * @handle: handle created
+ * @fb: Framebuffer
+ * @file: DRM file to register the handle for
+ * @handle: Pointer to return the created handle
*
+ * This function creates a handle for the GEM object backing the framebuffer.
* Drivers can use this as their &drm_framebuffer_funcs->create_handle
- * callback.
+ * callback. The GETFB IOCTL calls into this callback.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -120,18 +126,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
EXPORT_SYMBOL(drm_gem_fb_create_handle);
/**
- * drm_gem_fb_create_with_funcs() - helper function for the
+ * drm_gem_fb_create_with_funcs() - Helper function for the
* &drm_mode_config_funcs.fb_create
* callback
* @dev: DRM device
- * @file: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
*
* This can be used to set &drm_framebuffer_funcs for drivers that need the
* &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
* need to change &drm_framebuffer_funcs.
* The function does buffer size validation.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
@@ -154,7 +163,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!objs[i]) {
- DRM_DEV_ERROR(dev->dev, "Failed to lookup GEM\n");
+ DRM_DEBUG_KMS("Failed to lookup GEM object\n");
ret = -ENOENT;
goto err_gem_object_put;
}
@@ -192,15 +201,26 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
};
/**
- * drm_gem_fb_create() - &drm_mode_config_funcs.fb_create callback function
+ * drm_gem_fb_create() - Helper function for the
+ * &drm_mode_config_funcs.fb_create callback
* @dev: DRM device
- * @file: drm file for the ioctl call
- * @mode_cmd: metadata from the userspace fb creation request
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ *
+ * This function creates a new framebuffer object described by
+ * &drm_mode_fb_cmd2. This description includes handles for the buffer(s)
+ * backing the framebuffer.
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. The function does buffer size
* validation. Use drm_gem_fb_create_with_funcs() if you need to set
* &drm_framebuffer_funcs.dirty.
+ *
+ * Drivers can use this as their &drm_mode_config_funcs.fb_create callback.
+ * The ADDFB2 IOCTL calls into this callback.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
@@ -212,15 +232,15 @@ drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
EXPORT_SYMBOL_GPL(drm_gem_fb_create);
/**
- * drm_gem_fb_prepare_fb() - Prepare gem framebuffer
- * @plane: Which plane
- * @state: Plane state attach fence to
+ * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
+ * @plane: Plane
+ * @state: Plane state the fence will be attached to
*
- * This can be used as the &drm_plane_helper_funcs.prepare_fb hook.
- *
- * This function checks if the plane FB has an dma-buf attached, extracts
- * the exclusive fence and attaches it to plane state for the atomic helper
- * to wait on.
+ * This function prepares a GEM backed framebuffer for scanout by checking if
+ * the plane framebuffer has a DMA-BUF attached. If it does, it extracts the
+ * exclusive fence and attaches it to the plane state for the atomic helper to
+ * wait on. This function can be used as the &drm_plane_helper_funcs.prepare_fb
+ * callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* gem based framebuffer drivers which have their buffers always pinned in
@@ -232,7 +252,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct dma_buf *dma_buf;
struct dma_fence *fence;
- if ((plane->state->fb == state->fb) || !state->fb)
+ if (plane->state->fb == state->fb || !state->fb)
return 0;
dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
@@ -246,17 +266,19 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
/**
- * drm_gem_fbdev_fb_create - Create a drm_framebuffer for fbdev emulation
+ * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
+ * emulation
* @dev: DRM device
* @sizes: fbdev size description
- * @pitch_align: optional pitch alignment
+ * @pitch_align: Optional pitch alignment
* @obj: GEM object backing the framebuffer
* @funcs: vtable to be used for the new framebuffer object
*
- * This function creates a framebuffer for use with fbdev emulation.
+ * This function creates a framebuffer from a &drm_fb_helper_surface_size
+ * description for use in the &drm_fb_helper_funcs.fb_probe callback.
*
* Returns:
- * Pointer to a drm_framebuffer on success or an error pointer on failure.
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
*/
struct drm_framebuffer *
drm_gem_fbdev_fb_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index fbc3f308fa19..c9d5a6cd4d41 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -55,7 +55,6 @@ int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_vblank.c */
-extern unsigned int drm_timestamp_monotonic;
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
void drm_vblank_cleanup(struct drm_device *dev);
@@ -71,6 +70,12 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
/* drm_auth.c */
int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index a9ae6dd2d593..4aafe4802099 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -235,7 +235,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
/* Only some caps make sense with UMS/render-only drivers. */
switch (req->capability) {
case DRM_CAP_TIMESTAMP_MONOTONIC:
- req->value = drm_timestamp_monotonic;
+ req->value = 1;
return 0;
case DRM_CAP_PRIME:
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
@@ -663,6 +663,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 6e35a56a6102..93e2b30fe1a5 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <drm/drmP.h>
#include "drm_crtc_helper_internal.h"
@@ -33,6 +34,33 @@ MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
+#if IS_ENABLED(CONFIG_DRM_LOAD_EDID_FIRMWARE)
+
+/* Backward compatibility for drm_kms_helper.edid_firmware */
+static int edid_firmware_set(const char *val, const struct kernel_param *kp)
+{
+ DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n");
+
+ return __drm_set_edid_firmware_path(val);
+}
+
+static int edid_firmware_get(char *buffer, const struct kernel_param *kp)
+{
+ return __drm_get_edid_firmware_path(buffer, PAGE_SIZE);
+}
+
+static const struct kernel_param_ops edid_firmware_ops = {
+ .set = edid_firmware_set,
+ .get = edid_firmware_get,
+};
+
+module_param_cb(edid_firmware, &edid_firmware_ops, NULL, 0644);
+__MODULE_PARM_TYPE(edid_firmware, "charp");
+MODULE_PARM_DESC(edid_firmware,
+ "DEPRECATED. Use drm.edid_firmware module parameter instead.");
+
+#endif
+
static int __init drm_kms_helper_init(void)
{
int ret;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
new file mode 100644
index 000000000000..d1eb56a1eff4
--- /dev/null
+++ b/drivers/gpu/drm/drm_lease.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright © 2017 Keith Packard <keithp@keithp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include "drm_internal.h"
+#include "drm_legacy.h"
+#include "drm_crtc_internal.h"
+#include <drm/drm_lease.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_crtc_helper.h>
+
+#define drm_for_each_lessee(lessee, lessor) \
+ list_for_each_entry((lessee), &(lessor)->lessees, lessee_list)
+
+static uint64_t drm_lease_idr_object;
+
+/**
+ * drm_lease_owner - return ancestor owner drm_master
+ * @master: drm_master somewhere within tree of lessees and lessors
+ *
+ * RETURN:
+ *
+ * drm_master at the top of the tree (i.e, with lessor NULL
+ */
+struct drm_master *drm_lease_owner(struct drm_master *master)
+{
+ while (master->lessor != NULL)
+ master = master->lessor;
+ return master;
+}
+EXPORT_SYMBOL(drm_lease_owner);
+
+/**
+ * _drm_find_lessee - find lessee by id (idr_mutex held)
+ * @master: drm_master of lessor
+ * @id: lessee_id
+ *
+ * RETURN:
+ *
+ * drm_master of the lessee if valid, NULL otherwise
+ */
+
+static struct drm_master*
+_drm_find_lessee(struct drm_master *master, int lessee_id)
+{
+ lockdep_assert_held(&master->dev->mode_config.idr_mutex);
+ return idr_find(&drm_lease_owner(master)->lessee_idr, lessee_id);
+}
+
+/**
+ * _drm_lease_held_master - check to see if an object is leased (or owned) by master (idr_mutex held)
+ * @master: the master to check the lease status of
+ * @id: the id to check
+ *
+ * Checks if the specified master holds a lease on the object. Return
+ * value:
+ *
+ * true 'master' holds a lease on (or owns) the object
+ * false 'master' does not hold a lease.
+ */
+static int _drm_lease_held_master(struct drm_master *master, int id)
+{
+ lockdep_assert_held(&master->dev->mode_config.idr_mutex);
+ if (master->lessor)
+ return idr_find(&master->leases, id) != NULL;
+ return true;
+}
+
+/**
+ * _drm_has_leased - check to see if an object has been leased (idr_mutex held)
+ * @master: the master to check the lease status of
+ * @id: the id to check
+ *
+ * Checks if any lessee of 'master' holds a lease on 'id'. Return
+ * value:
+ *
+ * true Some lessee holds a lease on the object.
+ * false No lessee has a lease on the object.
+ */
+static bool _drm_has_leased(struct drm_master *master, int id)
+{
+ struct drm_master *lessee;
+
+ lockdep_assert_held(&master->dev->mode_config.idr_mutex);
+ drm_for_each_lessee(lessee, master)
+ if (_drm_lease_held_master(lessee, id))
+ return true;
+ return false;
+}
+
+/**
+ * _drm_lease_held - check drm_mode_object lease status (idr_mutex held)
+ * @master: the drm_master
+ * @id: the object id
+ *
+ * Checks if the specified master holds a lease on the object. Return
+ * value:
+ *
+ * true 'master' holds a lease on (or owns) the object
+ * false 'master' does not hold a lease.
+ */
+bool _drm_lease_held(struct drm_file *file_priv, int id)
+{
+ if (file_priv == NULL || file_priv->master == NULL)
+ return true;
+
+ return _drm_lease_held_master(file_priv->master, id);
+}
+EXPORT_SYMBOL(_drm_lease_held);
+
+/**
+ * drm_lease_held - check drm_mode_object lease status (idr_mutex not held)
+ * @master: the drm_master
+ * @id: the object id
+ *
+ * Checks if the specified master holds a lease on the object. Return
+ * value:
+ *
+ * true 'master' holds a lease on (or owns) the object
+ * false 'master' does not hold a lease.
+ */
+bool drm_lease_held(struct drm_file *file_priv, int id)
+{
+ struct drm_master *master;
+ bool ret;
+
+ if (file_priv == NULL || file_priv->master == NULL)
+ return true;
+
+ master = file_priv->master;
+ mutex_lock(&master->dev->mode_config.idr_mutex);
+ ret = _drm_lease_held_master(master, id);
+ mutex_unlock(&master->dev->mode_config.idr_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_lease_held);
+
+/**
+ * drm_lease_filter_crtcs - restricted crtc set to leased values (idr_mutex not held)
+ * @file_priv: requestor file
+ * @crtcs: bitmask of crtcs to check
+ *
+ * Reconstructs a crtc mask based on the crtcs which are visible
+ * through the specified file.
+ */
+uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
+{
+ struct drm_master *master;
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ int count_in, count_out;
+ uint32_t crtcs_out = 0;
+
+ if (file_priv == NULL || file_priv->master == NULL)
+ return crtcs_in;
+
+ master = file_priv->master;
+ dev = master->dev;
+
+ count_in = count_out = 0;
+ mutex_lock(&master->dev->mode_config.idr_mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (_drm_lease_held_master(master, crtc->base.id)) {
+ uint32_t mask_in = 1ul << count_in;
+ if ((crtcs_in & mask_in) != 0) {
+ uint32_t mask_out = 1ul << count_out;
+ crtcs_out |= mask_out;
+ }
+ count_out++;
+ }
+ count_in++;
+ }
+ mutex_unlock(&master->dev->mode_config.idr_mutex);
+ return crtcs_out;
+}
+EXPORT_SYMBOL(drm_lease_filter_crtcs);
+
+/*
+ * drm_lease_create - create a new drm_master with leased objects (idr_mutex not held)
+ * @lessor: lease holder (or owner) of objects
+ * @leases: objects to lease to the new drm_master
+ *
+ * Uses drm_master_create to allocate a new drm_master, then checks to
+ * make sure all of the desired objects can be leased, atomically
+ * leasing them to the new drmmaster.
+ *
+ * ERR_PTR(-EACCESS) some other master holds the title to any object
+ * ERR_PTR(-ENOENT) some object is not a valid DRM object for this device
+ * ERR_PTR(-EBUSY) some other lessee holds title to this object
+ * ERR_PTR(-EEXIST) same object specified more than once in the provided list
+ * ERR_PTR(-ENOMEM) allocation failed
+ */
+static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr *leases)
+{
+ struct drm_device *dev = lessor->dev;
+ int error;
+ struct drm_master *lessee;
+ int object;
+ int id;
+ void *entry;
+
+ DRM_DEBUG_LEASE("lessor %d\n", lessor->lessee_id);
+
+ lessee = drm_master_create(lessor->dev);
+ if (!lessee) {
+ DRM_DEBUG_LEASE("drm_master_create failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ /* Insert the new lessee into the tree */
+ id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
+ if (id < 0) {
+ error = id;
+ goto out_lessee;
+ }
+
+ lessee->lessee_id = id;
+ lessee->lessor = drm_master_get(lessor);
+ list_add_tail(&lessee->lessee_list, &lessor->lessees);
+
+ idr_for_each_entry(leases, entry, object) {
+ error = 0;
+ if (!idr_find(&dev->mode_config.crtc_idr, object))
+ error = -ENOENT;
+ else if (!_drm_lease_held_master(lessor, object))
+ error = -EACCES;
+ else if (_drm_has_leased(lessor, object))
+ error = -EBUSY;
+
+ if (error != 0) {
+ DRM_DEBUG_LEASE("object %d failed %d\n", object, error);
+ goto out_lessee;
+ }
+ }
+
+ /* Move the leases over */
+ lessee->leases = *leases;
+ DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return lessee;
+
+out_lessee:
+ drm_master_put(&lessee);
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ERR_PTR(error);
+}
+
+/**
+ * drm_lease_destroy - a master is going away (idr_mutex not held)
+ * @master: the drm_master being destroyed
+ *
+ * All lessees will have been destroyed as they
+ * hold a reference on their lessor. Notify any
+ * lessor for this master so that it can check
+ * the list of lessees.
+ */
+void drm_lease_destroy(struct drm_master *master)
+{
+ struct drm_device *dev = master->dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ DRM_DEBUG_LEASE("drm_lease_destroy %d\n", master->lessee_id);
+
+ /* This master is referenced by all lessees, hence it cannot be destroyed
+ * until all of them have been
+ */
+ WARN_ON(!list_empty(&master->lessees));
+
+ /* Remove this master from the lessee idr in the owner */
+ if (master->lessee_id != 0) {
+ DRM_DEBUG_LEASE("remove master %d from device list of lessees\n", master->lessee_id);
+ idr_remove(&(drm_lease_owner(master)->lessee_idr), master->lessee_id);
+ }
+
+ /* Remove this master from any lessee list it may be on */
+ list_del(&master->lessee_list);
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ if (master->lessor) {
+ /* Tell the master to check the lessee list */
+ drm_sysfs_hotplug_event(dev);
+ drm_master_put(&master->lessor);
+ }
+
+ DRM_DEBUG_LEASE("drm_lease_destroy done %d\n", master->lessee_id);
+}
+
+/**
+ * _drm_lease_revoke - revoke access to all leased objects (idr_mutex held)
+ * @master: the master losing its lease
+ */
+static void _drm_lease_revoke(struct drm_master *top)
+{
+ int object;
+ void *entry;
+ struct drm_master *master = top;
+
+ lockdep_assert_held(&top->dev->mode_config.idr_mutex);
+
+ /*
+ * Walk the tree starting at 'top' emptying all leases. Because
+ * the tree is fully connected, we can do this without recursing
+ */
+ for (;;) {
+ DRM_DEBUG_LEASE("revoke leases for %p %d\n", master, master->lessee_id);
+
+ /* Evacuate the lease */
+ idr_for_each_entry(&master->leases, entry, object)
+ idr_remove(&master->leases, object);
+
+ /* Depth-first list walk */
+
+ /* Down */
+ if (!list_empty(&master->lessees)) {
+ master = list_first_entry(&master->lessees, struct drm_master, lessee_list);
+ } else {
+ /* Up */
+ while (master != top && master == list_last_entry(&master->lessor->lessees, struct drm_master, lessee_list))
+ master = master->lessor;
+
+ if (master == top)
+ break;
+
+ /* Over */
+ master = list_entry(master->lessee_list.next, struct drm_master, lessee_list);
+ }
+ }
+}
+
+/**
+ * drm_lease_revoke - revoke access to all leased objects (idr_mutex not held)
+ * @top: the master losing its lease
+ */
+void drm_lease_revoke(struct drm_master *top)
+{
+ mutex_lock(&top->dev->mode_config.idr_mutex);
+ _drm_lease_revoke(top);
+ mutex_unlock(&top->dev->mode_config.idr_mutex);
+}
+
+static int validate_lease(struct drm_device *dev,
+ struct drm_file *lessor_priv,
+ int object_count,
+ struct drm_mode_object **objects)
+{
+ int o;
+ int has_crtc = -1;
+ int has_connector = -1;
+ int has_plane = -1;
+
+ /* we want to confirm that there is at least one crtc, plane
+ connector object. */
+
+ for (o = 0; o < object_count; o++) {
+ if (objects[o]->type == DRM_MODE_OBJECT_CRTC && has_crtc == -1) {
+ has_crtc = o;
+ }
+ if (objects[o]->type == DRM_MODE_OBJECT_CONNECTOR && has_connector == -1)
+ has_connector = o;
+
+ if (lessor_priv->universal_planes) {
+ if (objects[o]->type == DRM_MODE_OBJECT_PLANE && has_plane == -1)
+ has_plane = o;
+ }
+ }
+ if (has_crtc == -1 || has_connector == -1)
+ return -EINVAL;
+ if (lessor_priv->universal_planes && has_plane == -1)
+ return -EINVAL;
+ return 0;
+}
+
+static int fill_object_idr(struct drm_device *dev,
+ struct drm_file *lessor_priv,
+ struct idr *leases,
+ int object_count,
+ u32 *object_ids)
+{
+ struct drm_mode_object **objects;
+ u32 o;
+ int ret;
+ objects = kcalloc(object_count, sizeof(struct drm_mode_object *),
+ GFP_KERNEL);
+ if (!objects)
+ return -ENOMEM;
+
+ /* step one - get references to all the mode objects
+ and check for validity. */
+ for (o = 0; o < object_count; o++) {
+ if ((int) object_ids[o] < 0) {
+ ret = -EINVAL;
+ goto out_free_objects;
+ }
+
+ objects[o] = drm_mode_object_find(dev, lessor_priv,
+ object_ids[o],
+ DRM_MODE_OBJECT_ANY);
+ if (!objects[o]) {
+ ret = -ENOENT;
+ goto out_free_objects;
+ }
+
+ if (!drm_mode_object_lease_required(objects[o]->type)) {
+ ret = -EINVAL;
+ goto out_free_objects;
+ }
+ }
+
+ ret = validate_lease(dev, lessor_priv, object_count, objects);
+ if (ret)
+ goto out_free_objects;
+
+ /* add their IDs to the lease request - taking into account
+ universal planes */
+ for (o = 0; o < object_count; o++) {
+ struct drm_mode_object *obj = objects[o];
+ u32 object_id = objects[o]->id;
+ DRM_DEBUG_LEASE("Adding object %d to lease\n", object_id);
+
+ /*
+ * We're using an IDR to hold the set of leased
+ * objects, but we don't need to point at the object's
+ * data structure from the lease as the main crtc_idr
+ * will be used to actually find that. Instead, all we
+ * really want is a 'leased/not-leased' result, for
+ * which any non-NULL pointer will work fine.
+ */
+ ret = idr_alloc(leases, &drm_lease_idr_object , object_id, object_id + 1, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_DEBUG_LEASE("Object %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
+ goto out_free_objects;
+ }
+ if (obj->type == DRM_MODE_OBJECT_CRTC && !lessor_priv->universal_planes) {
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+ ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_DEBUG_LEASE("Object primary plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
+ goto out_free_objects;
+ }
+ if (crtc->cursor) {
+ ret = idr_alloc(leases, &drm_lease_idr_object, crtc->cursor->base.id, crtc->cursor->base.id + 1, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_DEBUG_LEASE("Object cursor plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
+ goto out_free_objects;
+ }
+ }
+ }
+ }
+
+ ret = 0;
+out_free_objects:
+ for (o = 0; o < object_count; o++) {
+ if (objects[o])
+ drm_mode_object_put(objects[o]);
+ }
+ kfree(objects);
+ return ret;
+}
+
+/**
+ * drm_mode_create_lease_ioctl - create a new lease
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_create_lease
+ * @file_priv: the file being manipulated
+ *
+ * The master associated with the specified file will have a lease
+ * created containing the objects specified in the ioctl structure.
+ * A file descriptor will be allocated for that and returned to the
+ * application.
+ */
+int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessor_priv)
+{
+ struct drm_mode_create_lease *cl = data;
+ size_t object_count;
+ int ret = 0;
+ struct idr leases;
+ struct drm_master *lessor = lessor_priv->master;
+ struct drm_master *lessee = NULL;
+ struct file *lessee_file = NULL;
+ struct file *lessor_file = lessor_priv->filp;
+ struct drm_file *lessee_priv;
+ int fd = -1;
+ uint32_t *object_ids;
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* Do not allow sub-leases */
+ if (lessor->lessor)
+ return -EINVAL;
+
+ /* need some objects */
+ if (cl->object_count == 0)
+ return -EINVAL;
+
+ if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK)))
+ return -EINVAL;
+
+ object_count = cl->object_count;
+
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
+ if (IS_ERR(object_ids))
+ return PTR_ERR(object_ids);
+
+ idr_init(&leases);
+
+ /* fill and validate the object idr */
+ ret = fill_object_idr(dev, lessor_priv, &leases,
+ object_count, object_ids);
+ kfree(object_ids);
+ if (ret) {
+ idr_destroy(&leases);
+ return ret;
+ }
+
+ /* Allocate a file descriptor for the lease */
+ fd = get_unused_fd_flags(cl->flags & (O_CLOEXEC | O_NONBLOCK));
+ if (fd < 0) {
+ idr_destroy(&leases);
+ return fd;
+ }
+
+ DRM_DEBUG_LEASE("Creating lease\n");
+ lessee = drm_lease_create(lessor, &leases);
+
+ if (IS_ERR(lessee)) {
+ ret = PTR_ERR(lessee);
+ goto out_leases;
+ }
+
+ /* Clone the lessor file to create a new file for us */
+ DRM_DEBUG_LEASE("Allocating lease file\n");
+ path_get(&lessor_file->f_path);
+ lessee_file = alloc_file(&lessor_file->f_path,
+ lessor_file->f_mode,
+ fops_get(lessor_file->f_inode->i_fop));
+
+ if (IS_ERR(lessee_file)) {
+ ret = PTR_ERR(lessee_file);
+ goto out_lessee;
+ }
+
+ /* Initialize the new file for DRM */
+ DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
+ ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
+ if (ret)
+ goto out_lessee_file;
+
+ lessee_priv = lessee_file->private_data;
+
+ /* Change the file to a master one */
+ drm_master_put(&lessee_priv->master);
+ lessee_priv->master = lessee;
+ lessee_priv->is_master = 1;
+ lessee_priv->authenticated = 1;
+
+ /* Hook up the fd */
+ fd_install(fd, lessee_file);
+
+ /* Pass fd back to userspace */
+ DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
+ cl->fd = fd;
+ cl->lessee_id = lessee->lessee_id;
+
+ DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
+ return 0;
+
+out_lessee_file:
+ fput(lessee_file);
+
+out_lessee:
+ drm_master_put(&lessee);
+
+out_leases:
+ put_unused_fd(fd);
+ idr_destroy(&leases);
+
+ DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
+ return ret;
+}
+
+/**
+ * drm_mode_list_lessees_ioctl - list lessee ids
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_list_lessees
+ * @lessor_priv: the file being manipulated
+ *
+ * Starting from the master associated with the specified file,
+ * the master with the provided lessee_id is found, and then
+ * an array of lessee ids associated with leases from that master
+ * are returned.
+ */
+
+int drm_mode_list_lessees_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessor_priv)
+{
+ struct drm_mode_list_lessees *arg = data;
+ __u32 __user *lessee_ids = (__u32 __user *) (uintptr_t) (arg->lessees_ptr);
+ __u32 count_lessees = arg->count_lessees;
+ struct drm_master *lessor = lessor_priv->master, *lessee;
+ int count;
+ int ret = 0;
+
+ if (arg->pad)
+ return -EINVAL;
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ count = 0;
+ drm_for_each_lessee(lessee, lessor) {
+ /* Only list un-revoked leases */
+ if (!idr_is_empty(&lessee->leases)) {
+ if (count_lessees > count) {
+ DRM_DEBUG_LEASE("Add lessee %d\n", lessee->lessee_id);
+ ret = put_user(lessee->lessee_id, lessee_ids + count);
+ if (ret)
+ break;
+ }
+ count++;
+ }
+ }
+
+ DRM_DEBUG_LEASE("Lessor leases to %d\n", count);
+ if (ret == 0)
+ arg->count_lessees = count;
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret;
+}
+
+/**
+ * drm_mode_get_lease_ioctl - list leased objects
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_get_lease
+ * @file_priv: the file being manipulated
+ *
+ * Return the list of leased objects for the specified lessee
+ */
+
+int drm_mode_get_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessee_priv)
+{
+ struct drm_mode_get_lease *arg = data;
+ __u32 __user *object_ids = (__u32 __user *) (uintptr_t) (arg->objects_ptr);
+ __u32 count_objects = arg->count_objects;
+ struct drm_master *lessee = lessee_priv->master;
+ struct idr *object_idr;
+ int count;
+ void *entry;
+ int object;
+ int ret = 0;
+
+ if (arg->pad)
+ return -EINVAL;
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ if (lessee->lessor == NULL)
+ /* owner can use all objects */
+ object_idr = &lessee->dev->mode_config.crtc_idr;
+ else
+ /* lessee can only use allowed object */
+ object_idr = &lessee->leases;
+
+ count = 0;
+ idr_for_each_entry(object_idr, entry, object) {
+ if (count_objects > count) {
+ DRM_DEBUG_LEASE("adding object %d\n", object);
+ ret = put_user(object, object_ids + count);
+ if (ret)
+ break;
+ }
+ count++;
+ }
+
+ DRM_DEBUG("lease holds %d objects\n", count);
+ if (ret == 0)
+ arg->count_objects = count;
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret;
+}
+
+/**
+ * drm_mode_revoke_lease_ioctl - revoke lease
+ * @dev: the drm device
+ * @data: pointer to struct drm_mode_revoke_lease
+ * @file_priv: the file being manipulated
+ *
+ * This removes all of the objects from the lease without
+ * actually getting rid of the lease itself; that way all
+ * references to it still work correctly
+ */
+int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *lessor_priv)
+{
+ struct drm_mode_revoke_lease *arg = data;
+ struct drm_master *lessor = lessor_priv->master;
+ struct drm_master *lessee;
+ int ret = 0;
+
+ DRM_DEBUG_LEASE("revoke lease for %d\n", arg->lessee_id);
+
+ /* Can't lease without MODESET */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+
+ lessee = _drm_find_lessee(lessor, arg->lessee_id);
+
+ /* No such lessee */
+ if (!lessee) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ /* Lease is not held by lessor */
+ if (lessee->lessor != lessor) {
+ ret = -EACCES;
+ goto fail;
+ }
+
+ _drm_lease_revoke(lessee);
+
+fail:
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 74f6ff5df656..cda8bfab6d3b 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -122,10 +122,12 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr);
drm_for_each_crtc(crtc, dev) {
- if (count < card_res->count_crtcs &&
- put_user(crtc->base.id, crtc_id + count))
- return -EFAULT;
- count++;
+ if (drm_lease_held(file_priv, crtc->base.id)) {
+ if (count < card_res->count_crtcs &&
+ put_user(crtc->base.id, crtc_id + count))
+ return -EFAULT;
+ count++;
+ }
}
card_res->count_crtcs = count;
@@ -143,12 +145,14 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
- if (count < card_res->count_connectors &&
- put_user(connector->base.id, connector_id + count)) {
- drm_connector_list_iter_end(&conn_iter);
- return -EFAULT;
+ if (drm_lease_held(file_priv, connector->base.id)) {
+ if (count < card_res->count_connectors &&
+ put_user(connector->base.id, connector_id + count)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return -EFAULT;
+ }
+ count++;
}
- count++;
}
card_res->count_connectors = count;
drm_connector_list_iter_end(&conn_iter);
@@ -385,7 +389,6 @@ void drm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_connector = 0;
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
- dev->mode_config.num_overlay_plane = 0;
dev->mode_config.num_total_plane = 0;
}
EXPORT_SYMBOL(drm_mode_config_init);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 1055533792f3..ce4d2fb32810 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -104,7 +104,27 @@ void drm_mode_object_unregister(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
+/**
+ * drm_lease_required - check types which must be leased to be used
+ * @type: type of object
+ *
+ * Returns whether the provided type of drm_mode_object must
+ * be owned or leased to be used by a process.
+ */
+bool drm_mode_object_lease_required(uint32_t type)
+{
+ switch(type) {
+ case DRM_MODE_OBJECT_CRTC:
+ case DRM_MODE_OBJECT_CONNECTOR:
+ case DRM_MODE_OBJECT_PLANE:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
@@ -116,6 +136,10 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
if (obj && obj->id != id)
obj = NULL;
+ if (obj && drm_mode_object_lease_required(obj->type) &&
+ !_drm_lease_held(file_priv, obj->id))
+ obj = NULL;
+
if (obj && obj->free_cb) {
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
@@ -128,6 +152,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
+ * @file_priv: drm file
* @id: id of the mode object
* @type: type of the mode object
*
@@ -136,11 +161,12 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
* by callind drm_mode_object_put().
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
- obj = __drm_mode_object_find(dev, id, type);
+ obj = __drm_mode_object_find(dev, file_priv, id, type);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@@ -247,8 +273,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_set_value);
-int __drm_object_property_get_value(struct drm_mode_object *obj,
- struct drm_property *property, uint64_t *val)
+static int __drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *val)
{
int i;
@@ -358,7 +385,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -ENOENT;
goto out;
@@ -480,7 +507,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ arg_obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!arg_obj)
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index af4e906c630d..963e23db0fe7 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -39,23 +39,28 @@
*
* The basic usage pattern is to::
*
- * drm_modeset_acquire_init(&ctx)
+ * drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
* retry:
* foreach (lock in random_ordered_set_of_locks) {
- * ret = drm_modeset_lock(lock, &ctx)
+ * ret = drm_modeset_lock(lock, ctx)
* if (ret == -EDEADLK) {
- * drm_modeset_backoff(&ctx);
- * goto retry;
+ * ret = drm_modeset_backoff(ctx);
+ * if (!ret)
+ * goto retry;
* }
+ * if (ret)
+ * goto out;
* }
* ... do stuff ...
- * drm_modeset_drop_locks(&ctx);
- * drm_modeset_acquire_fini(&ctx);
+ * out:
+ * drm_modeset_drop_locks(ctx);
+ * drm_modeset_acquire_fini(ctx);
*
* If all that is needed is a single modeset lock, then the &struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
- * by passing a NULL instead of ctx in the drm_modeset_lock()
- * call and, when done, by calling drm_modeset_unlock().
+ * by passing a NULL instead of ctx in the drm_modeset_lock() call or
+ * calling drm_modeset_lock_single_interruptible(). To unlock afterwards
+ * call drm_modeset_unlock().
*
* On top of these per-object locks using &ww_mutex there's also an overall
* &drm_mode_config.mutex, for protecting everything else. Mostly this means
@@ -88,7 +93,7 @@ void drm_modeset_lock_all(struct drm_device *dev)
struct drm_modeset_acquire_ctx *ctx;
int ret;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
if (WARN_ON(!ctx))
return;
@@ -178,7 +183,11 @@ EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
- * @flags: for future
+ * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE
+ *
+ * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags,
+ * all calls to drm_modeset_lock() will perform an interruptible
+ * wait.
*/
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags)
@@ -186,6 +195,9 @@ void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
memset(ctx, 0, sizeof(*ctx));
ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
INIT_LIST_HEAD(&ctx->locked);
+
+ if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE)
+ ctx->interruptible = true;
}
EXPORT_SYMBOL(drm_modeset_acquire_init);
@@ -261,8 +273,19 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
return ret;
}
-static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
- bool interruptible)
+/**
+ * drm_modeset_backoff - deadlock avoidance backoff
+ * @ctx: the acquire context
+ *
+ * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
+ * you must call this function to drop all currently held locks and
+ * block until the contended lock becomes available.
+ *
+ * This function returns 0 on success, or -ERESTARTSYS if this context
+ * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the
+ * wait has been interrupted.
+ */
+int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
{
struct drm_modeset_lock *contended = ctx->contended;
@@ -273,36 +296,11 @@ static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
drm_modeset_drop_locks(ctx);
- return modeset_lock(contended, ctx, interruptible, true);
-}
-
-/**
- * drm_modeset_backoff - deadlock avoidance backoff
- * @ctx: the acquire context
- *
- * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
- * you must call this function to drop all currently held locks and
- * block until the contended lock becomes available.
- */
-void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
-{
- modeset_backoff(ctx, false);
+ return modeset_lock(contended, ctx, ctx->interruptible, true);
}
EXPORT_SYMBOL(drm_modeset_backoff);
/**
- * drm_modeset_backoff_interruptible - deadlock avoidance backoff
- * @ctx: the acquire context
- *
- * Interruptible version of drm_modeset_backoff()
- */
-int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
-{
- return modeset_backoff(ctx, true);
-}
-EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
-
-/**
* drm_modeset_lock_init - initialize lock
* @lock: lock to init
*/
@@ -324,14 +322,18 @@ EXPORT_SYMBOL(drm_modeset_lock_init);
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff().
*
+ * If the @ctx is not NULL and initialized with
+ * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with
+ * -ERESTARTSYS when interrupted.
+ *
* If @ctx is NULL then the function call behaves like a normal,
- * non-nesting mutex_lock() call.
+ * uninterruptible non-nesting mutex_lock() call.
*/
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
{
if (ctx)
- return modeset_lock(lock, ctx, false, false);
+ return modeset_lock(lock, ctx, ctx->interruptible, false);
ww_mutex_lock(&lock->mutex, NULL);
return 0;
@@ -339,21 +341,19 @@ int drm_modeset_lock(struct drm_modeset_lock *lock,
EXPORT_SYMBOL(drm_modeset_lock);
/**
- * drm_modeset_lock_interruptible - take modeset lock
+ * drm_modeset_lock_single_interruptible - take a single modeset lock
* @lock: lock to take
- * @ctx: acquire ctx
*
- * Interruptible version of drm_modeset_lock()
+ * This function behaves as drm_modeset_lock() with a NULL context,
+ * but performs interruptible waits.
+ *
+ * This function returns 0 on success, or -ERESTARTSYS when interrupted.
*/
-int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
- struct drm_modeset_acquire_ctx *ctx)
+int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock)
{
- if (ctx)
- return modeset_lock(lock, ctx, true, false);
-
return ww_mutex_lock_interruptible(&lock->mutex, NULL);
}
-EXPORT_SYMBOL(drm_modeset_lock_interruptible);
+EXPORT_SYMBOL(drm_modeset_lock_single_interruptible);
/**
* drm_modeset_unlock - drop modeset lock
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 8dafbdfcd2ea..4c191c050e7d 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -233,6 +233,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!panel && !bridge)
return -EINVAL;
+ if (panel)
+ *panel = NULL;
remote = of_graph_get_remote_node(np, port, endpoint);
if (!remote)
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1235c9877d6f..4db9c515b74f 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -274,7 +274,7 @@ err_agp:
drm_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
- drm_dev_unref(dev);
+ drm_dev_put(dev);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 7a00351d5b5d..19404e34cd59 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -241,8 +241,6 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
list_add_tail(&plane->head, &config->plane_list);
plane->index = config->num_total_plane++;
- if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- config->num_overlay_plane++;
drm_object_attach_property(&plane->base,
config->plane_type_property,
@@ -353,8 +351,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
list_del(&plane->head);
dev->mode_config.num_total_plane--;
- if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- dev->mode_config.num_overlay_plane--;
WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
if (plane->state && plane->funcs->atomic_destroy_state)
@@ -462,43 +458,35 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
- int copied = 0;
- unsigned num_planes;
+ int count = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
config = &dev->mode_config;
-
- if (file_priv->universal_planes)
- num_planes = config->num_total_plane;
- else
- num_planes = config->num_overlay_plane;
+ plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr);
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
- if (num_planes &&
- (plane_resp->count_planes >= num_planes)) {
- plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
-
- /* Plane lists are invariant, no locking needed. */
- drm_for_each_plane(plane, dev) {
- /*
- * Unless userspace set the 'universal planes'
- * capability bit, only advertise overlays.
- */
- if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
- !file_priv->universal_planes)
- continue;
-
- if (put_user(plane->base.id, plane_ptr + copied))
+ drm_for_each_plane(plane, dev) {
+ /*
+ * Unless userspace set the 'universal planes'
+ * capability bit, only advertise overlays.
+ */
+ if (plane->type != DRM_PLANE_TYPE_OVERLAY &&
+ !file_priv->universal_planes)
+ continue;
+
+ if (drm_lease_held(file_priv, plane->base.id)) {
+ if (count < plane_resp->count_planes &&
+ put_user(plane->base.id, plane_ptr + count))
return -EFAULT;
- copied++;
+ count++;
}
}
- plane_resp->count_planes = num_planes;
+ plane_resp->count_planes = count;
return 0;
}
@@ -513,14 +501,14 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- plane = drm_plane_find(dev, plane_resp->plane_id);
+ plane = drm_plane_find(dev, file_priv, plane_resp->plane_id);
if (!plane)
return -ENOENT;
drm_modeset_lock(&plane->mutex, NULL);
- if (plane->state && plane->state->crtc)
+ if (plane->state && plane->state->crtc && drm_lease_held(file_priv, plane->state->crtc->base.id))
plane_resp->crtc_id = plane->state->crtc->base.id;
- else if (!plane->state && plane->crtc)
+ else if (!plane->state && plane->crtc && drm_lease_held(file_priv, plane->crtc->base.id))
plane_resp->crtc_id = plane->crtc->base.id;
else
plane_resp->crtc_id = 0;
@@ -534,7 +522,9 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
drm_modeset_unlock(&plane->mutex);
plane_resp->plane_id = plane->base.id;
- plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv,
+ plane->possible_crtcs);
+
plane_resp->gamma_size = 0;
/*
@@ -667,7 +657,7 @@ static int setplane_internal(struct drm_plane *plane,
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
if (ret)
@@ -678,8 +668,9 @@ retry:
fail:
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -702,7 +693,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
- plane = drm_plane_find(dev, plane_req->plane_id);
+ plane = drm_plane_find(dev, file_priv, plane_req->plane_id);
if (!plane) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
@@ -710,14 +701,14 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
}
if (plane_req->fb_id) {
- fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
return -ENOENT;
}
- crtc = drm_crtc_find(dev, plane_req->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, plane_req->crtc_id);
if (!crtc) {
drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
@@ -828,13 +819,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
- crtc = drm_crtc_find(dev, req->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, req->crtc_id);
if (!crtc) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
return -ENOENT;
}
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret)
@@ -876,8 +867,9 @@ retry:
}
out:
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
@@ -942,7 +934,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
return -EINVAL;
- crtc = drm_crtc_find(dev, page_flip->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, page_flip->crtc_id);
if (!crtc)
return -ENOENT;
@@ -985,7 +977,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
return -EINVAL;
}
- drm_modeset_acquire_init(&ctx, 0);
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
ret = drm_modeset_lock(&crtc->mutex, &ctx);
if (ret)
@@ -1003,7 +995,7 @@ retry:
goto out;
}
- fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, page_flip->fb_id);
if (!fb) {
ret = -ENOENT;
goto out;
@@ -1037,7 +1029,7 @@ retry:
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
- e->event.user_data = page_flip->user_data;
+ e->event.vbl.user_data = page_flip->user_data;
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
if (ret) {
kfree(e);
@@ -1074,8 +1066,9 @@ out:
crtc->primary->old_fb = NULL;
if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
}
drm_modeset_drop_locks(&ctx);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 06aee1741e96..759ed93f4ba8 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -354,7 +354,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
/* Find current connectors for CRTC */
num_connectors = get_connectors_for_crtc(crtc, NULL, 0);
BUG_ON(num_connectors == 0);
- connector_list = kzalloc(num_connectors * sizeof(*connector_list),
+ connector_list = kcalloc(num_connectors, sizeof(*connector_list),
GFP_KERNEL);
if (!connector_list)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 22408badc617..8de93a226c24 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -318,7 +318,7 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
if (IS_ERR(dma_buf))
return dma_buf;
- drm_dev_ref(dev);
+ drm_dev_get(dev);
drm_gem_object_get(exp_info->priv);
return dma_buf;
@@ -342,7 +342,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
/* drop the reference on the export fd holds */
drm_gem_object_put_unlocked(obj);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 904966cde32b..6dc2dde5b672 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -99,7 +99,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
/* Step 2: Validate against encoders and crtcs */
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct drm_encoder *encoder = drm_encoder_find(dev, ids[i]);
+ struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
struct drm_crtc *crtc;
if (!encoder)
@@ -353,8 +353,6 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
* drm_mode_probed_add(). New modes start their life with status as OK.
* Modes are added from a single source using the following priority order.
*
- * - debugfs 'override_edid' (used for testing only)
- * - firmware EDID (drm_load_edid_firmware())
* - &drm_connector_helper_funcs.get_modes vfunc
* - if the connector status is connector_status_connected, standard
* VESA DMT modes up to 1024x768 are automatically added
@@ -483,22 +481,7 @@ retry:
goto prune;
}
- if (connector->override_edid) {
- struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
-
- count = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- } else {
- struct edid *edid = drm_load_edid_firmware(connector);
- if (!IS_ERR_OR_NULL(edid)) {
- drm_mode_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- kfree(edid);
- }
- if (count == 0)
- count = (*connector_funcs->get_modes)(connector);
- }
+ count = (*connector_funcs->get_modes)(connector);
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index bc5128203056..bae50e6b819d 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -450,7 +450,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- property = drm_property_find(dev, out_resp->prop_id);
+ property = drm_property_find(dev, file_priv, out_resp->prop_id);
if (!property)
return -ENOENT;
@@ -634,7 +634,7 @@ struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
struct drm_mode_object *obj;
struct drm_property_blob *blob = NULL;
- obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+ obj = __drm_mode_object_find(dev, NULL, id, DRM_MODE_OBJECT_BLOB);
if (obj)
blob = obj_to_blob(obj);
return blob;
@@ -897,7 +897,7 @@ bool drm_property_change_valid_get(struct drm_property *property,
if (value == 0)
return true;
- *ref = __drm_mode_object_find(property->dev, value,
+ *ref = __drm_mode_object_find(property->dev, NULL, value,
property->values[0]);
return *ref != NULL;
}
diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c
index 935653eb3616..657ea5ab6c3f 100644
--- a/drivers/gpu/drm/drm_scdc_helper.c
+++ b/drivers/gpu/drm/drm_scdc_helper.c
@@ -134,7 +134,6 @@ EXPORT_SYMBOL(drm_scdc_write);
* Returns:
* True if the scrambling is enabled, false otherwise.
*/
-
bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
{
u8 status;
@@ -142,7 +141,7 @@ bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter)
ret = drm_scdc_readb(adapter, SCDC_SCRAMBLER_STATUS, &status);
if (ret < 0) {
- DRM_ERROR("Failed to read scrambling status, error %d\n", ret);
+ DRM_ERROR("Failed to read scrambling status: %d\n", ret);
return false;
}
@@ -162,7 +161,6 @@ EXPORT_SYMBOL(drm_scdc_get_scrambling_status);
* Returns:
* True if scrambling is set/reset successfully, false otherwise.
*/
-
bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
{
u8 config;
@@ -170,7 +168,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_ERROR("Failed to read tmds config, err=%d\n", ret);
+ DRM_ERROR("Failed to read TMDS config: %d\n", ret);
return false;
}
@@ -181,7 +179,7 @@ bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable)
ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
- DRM_ERROR("Failed to enable scrambling, error %d\n", ret);
+ DRM_ERROR("Failed to enable scrambling: %d\n", ret);
return false;
}
@@ -225,7 +223,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
- DRM_ERROR("Failed to read tmds config, err=%d\n", ret);
+ DRM_ERROR("Failed to read TMDS config: %d\n", ret);
return false;
}
@@ -236,7 +234,7 @@ bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set)
ret = drm_scdc_writeb(adapter, SCDC_TMDS_CONFIG, config);
if (ret < 0) {
- DRM_ERROR("Failed to set TMDS clock ratio, error %d\n", ret);
+ DRM_ERROR("Failed to set TMDS clock ratio: %d\n", ret);
return false;
}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 0422b8c2c2e7..f776fc1cc543 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -262,8 +262,14 @@ void drm_syncobj_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_syncobj_free);
-static int drm_syncobj_create(struct drm_file *file_private,
- u32 *handle, uint32_t flags)
+/**
+ * drm_syncobj_create - create a new syncobj
+ * @out_syncobj: returned syncobj
+ * @flags: DRM_SYNCOBJ_* flags
+ * @fence: if non-NULL, the syncobj will represent this fence
+ */
+int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
+ struct dma_fence *fence)
{
int ret;
struct drm_syncobj *syncobj;
@@ -284,6 +290,25 @@ static int drm_syncobj_create(struct drm_file *file_private,
}
}
+ if (fence)
+ drm_syncobj_replace_fence(syncobj, fence);
+
+ *out_syncobj = syncobj;
+ return 0;
+}
+EXPORT_SYMBOL(drm_syncobj_create);
+
+/**
+ * drm_syncobj_get_handle - get a handle from a syncobj
+ */
+int drm_syncobj_get_handle(struct drm_file *file_private,
+ struct drm_syncobj *syncobj, u32 *handle)
+{
+ int ret;
+
+ /* take a reference to put in the idr */
+ drm_syncobj_get(syncobj);
+
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
@@ -299,6 +324,22 @@ static int drm_syncobj_create(struct drm_file *file_private,
*handle = ret;
return 0;
}
+EXPORT_SYMBOL(drm_syncobj_get_handle);
+
+static int drm_syncobj_create_as_handle(struct drm_file *file_private,
+ u32 *handle, uint32_t flags)
+{
+ int ret;
+ struct drm_syncobj *syncobj;
+
+ ret = drm_syncobj_create(&syncobj, flags, NULL);
+ if (ret)
+ return ret;
+
+ ret = drm_syncobj_get_handle(file_private, syncobj, handle);
+ drm_syncobj_put(syncobj);
+ return ret;
+}
static int drm_syncobj_destroy(struct drm_file *file_private,
u32 handle)
@@ -345,33 +386,38 @@ static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
return 0;
}
-static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
- u32 handle, int *p_fd)
+int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
- struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
int ret;
int fd;
- if (!syncobj)
- return -EINVAL;
-
fd = get_unused_fd_flags(O_CLOEXEC);
- if (fd < 0) {
- drm_syncobj_put(syncobj);
+ if (fd < 0)
return fd;
- }
if (!syncobj->file) {
ret = drm_syncobj_alloc_file(syncobj);
- if (ret)
- goto out_put_fd;
+ if (ret) {
+ put_unused_fd(fd);
+ return ret;
+ }
}
fd_install(fd, syncobj->file);
- drm_syncobj_put(syncobj);
*p_fd = fd;
return 0;
-out_put_fd:
- put_unused_fd(fd);
+}
+EXPORT_SYMBOL(drm_syncobj_get_fd);
+
+static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
+ u32 handle, int *p_fd)
+{
+ struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
+ int ret;
+
+ if (!syncobj)
+ return -EINVAL;
+
+ ret = drm_syncobj_get_fd(syncobj, p_fd);
drm_syncobj_put(syncobj);
return ret;
}
@@ -417,8 +463,8 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
return 0;
}
-int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
- int fd, int handle)
+static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
+ int fd, int handle)
{
struct dma_fence *fence = sync_file_get_fence(fd);
struct drm_syncobj *syncobj;
@@ -438,8 +484,8 @@ int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return 0;
}
-int drm_syncobj_export_sync_file(struct drm_file *file_private,
- int handle, int *p_fd)
+static int drm_syncobj_export_sync_file(struct drm_file *file_private,
+ int handle, int *p_fd)
{
int ret;
struct dma_fence *fence;
@@ -522,8 +568,8 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
- return drm_syncobj_create(file_private,
- &args->handle, args->flags);
+ return drm_syncobj_create_as_handle(file_private,
+ &args->handle, args->flags);
}
int
@@ -799,7 +845,8 @@ static int drm_syncobj_array_wait(struct drm_device *dev,
}
static int drm_syncobj_array_find(struct drm_file *file_private,
- void *user_handles, uint32_t count_handles,
+ void __user *user_handles,
+ uint32_t count_handles,
struct drm_syncobj ***syncobjs_out)
{
uint32_t i, *handles;
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index a8370775ed50..baccc63db106 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -62,5 +62,5 @@ TRACE_EVENT(drm_vblank_event_delivered,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 70f2b9593edc..09c1c4ff93ca 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -78,28 +78,20 @@
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- struct timeval *tvblank, bool in_vblank_irq);
+ ktime_t *tvblank, bool in_vblank_irq);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-/*
- * Default to use monotonic timestamps for wait-for-vblank and page-flip
- * complete events.
- */
-unsigned int drm_timestamp_monotonic = 1;
-
static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
static void store_vblank(struct drm_device *dev, unsigned int pipe,
u32 vblank_count_inc,
- struct timeval *t_vblank, u32 last)
+ ktime_t t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -108,7 +100,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
vblank->last = last;
write_seqlock(&vblank->seqlock);
- vblank->time = *t_vblank;
+ vblank->time = t_vblank;
vblank->count += vblank_count_inc;
write_sequnlock(&vblank->seqlock);
}
@@ -151,7 +143,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
{
u32 cur_vblank;
bool rc;
- struct timeval t_vblank;
+ ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
spin_lock(&dev->vblank_time_lock);
@@ -171,13 +163,13 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
* interrupt and assign 0 for now, to mark the vblanktimestamp as invalid.
*/
if (!rc)
- t_vblank = (struct timeval) {0, 0};
+ t_vblank = 0;
/*
* +1 to make sure user will never see the same
* vblank counter value before and after a modeset
*/
- store_vblank(dev, pipe, 1, &t_vblank, cur_vblank);
+ store_vblank(dev, pipe, 1, t_vblank, cur_vblank);
spin_unlock(&dev->vblank_time_lock);
}
@@ -200,7 +192,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u32 cur_vblank, diff;
bool rc;
- struct timeval t_vblank;
+ ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
int framedur_ns = vblank->framedur_ns;
@@ -225,11 +217,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
/* trust the hw counter when it's around */
diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
} else if (rc && framedur_ns) {
- const struct timeval *t_old;
- u64 diff_ns;
-
- t_old = &vblank->time;
- diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
+ u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
/*
* Figure out how many vblanks we've missed based
@@ -263,7 +251,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
}
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
- " current=%u, diff=%u, hw=%u hw_last=%u\n",
+ " current=%llu, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
if (diff == 0) {
@@ -278,9 +266,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* for now, to mark the vblanktimestamp as invalid.
*/
if (!rc && !in_vblank_irq)
- t_vblank = (struct timeval) {0, 0};
+ t_vblank = 0;
- store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
+ store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
@@ -311,8 +299,8 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u32 vblank;
unsigned long flags;
- WARN(!dev->driver->get_vblank_timestamp,
- "This function requires support for accurate vblank timestamps.");
+ WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
+ "This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -556,7 +544,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* @pipe: index of CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
- * @vblank_time: Pointer to struct timeval which should receive the timestamp
+ * @vblank_time: Pointer to time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
@@ -584,10 +572,10 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe,
int *max_error,
- struct timeval *vblank_time,
+ ktime_t *vblank_time,
bool in_vblank_irq)
{
- struct timeval tv_etime;
+ struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
struct drm_crtc *crtc;
@@ -676,41 +664,31 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos),
mode->crtc_clock);
- if (!drm_timestamp_monotonic)
- etime = ktime_mono_to_real(etime);
-
/* save this only for debugging purposes */
- tv_etime = ktime_to_timeval(etime);
+ ts_etime = ktime_to_timespec64(etime);
+ ts_vblank_time = ktime_to_timespec64(*vblank_time);
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
etime = ktime_sub_ns(etime, delta_ns);
- *vblank_time = ktime_to_timeval(etime);
+ *vblank_time = etime;
- DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
+ DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
pipe, hpos, vpos,
- (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
- (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
- duration_ns/1000, i);
+ (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
+ (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
+ duration_ns / 1000, i);
return true;
}
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
-static struct timeval get_drm_timestamp(void)
-{
- ktime_t now;
-
- now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
- return ktime_to_timeval(now);
-}
-
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval
* @dev: DRM device
* @pipe: index of CRTC whose vblank timestamp to retrieve
- * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @tvblank: Pointer to target time which should receive the timestamp
* @in_vblank_irq:
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
@@ -728,7 +706,7 @@ static struct timeval get_drm_timestamp(void)
*/
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
- struct timeval *tvblank, bool in_vblank_irq)
+ ktime_t *tvblank, bool in_vblank_irq)
{
bool ret = false;
@@ -744,7 +722,7 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* Return current monotonic/gettimeofday timestamp as best estimate.
*/
if (!ret)
- *tvblank = get_drm_timestamp();
+ *tvblank = ktime_get();
return ret;
}
@@ -762,21 +740,35 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* Returns:
* The software vblank counter.
*/
-u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+u64 drm_crtc_vblank_count(struct drm_crtc *crtc)
{
return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
-static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime)
+/**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
+ * system timestamp corresponding to that vblank counter value.
+ * @dev: DRM device
+ * @pipe: index of CRTC whose counter to retrieve
+ * @vblanktime: Pointer to ktime_t to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * This is the legacy version of drm_crtc_vblank_count_and_time().
+ */
+static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
+ ktime_t *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- u32 vblank_count;
+ u64 vblank_count;
unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs)) {
- *vblanktime = (struct timeval) { 0 };
+ *vblanktime = 0;
return 0;
}
@@ -793,15 +785,15 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value
* @crtc: which counter to retrieve
- * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ * @vblanktime: Pointer to time to receive the vblank timestamp.
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
*/
-u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime)
+u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ ktime_t *vblanktime)
{
return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc),
vblanktime);
@@ -810,15 +802,30 @@ EXPORT_SYMBOL(drm_crtc_vblank_count_and_time);
static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
- unsigned long seq, struct timeval *now)
+ u64 seq, ktime_t now)
{
- e->event.sequence = seq;
- e->event.tv_sec = now->tv_sec;
- e->event.tv_usec = now->tv_usec;
-
- trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe,
- e->event.sequence);
+ struct timespec64 tv;
+ switch (e->event.base.type) {
+ case DRM_EVENT_VBLANK:
+ case DRM_EVENT_FLIP_COMPLETE:
+ tv = ktime_to_timespec64(now);
+ e->event.vbl.sequence = seq;
+ /*
+ * e->event is a user space structure, with hardcoded unsigned
+ * 32-bit seconds/microseconds. This is safe as we always use
+ * monotonic timestamps since linux-4.15
+ */
+ e->event.vbl.tv_sec = tv.tv_sec;
+ e->event.vbl.tv_usec = tv.tv_nsec / 1000;
+ break;
+ case DRM_EVENT_CRTC_SEQUENCE:
+ if (seq)
+ e->event.seq.sequence = seq;
+ e->event.seq.time_ns = ktime_to_ns(now);
+ break;
+ }
+ trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq);
drm_send_event_locked(dev, &e->base);
}
@@ -869,8 +876,7 @@ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
assert_spin_locked(&dev->event_lock);
e->pipe = pipe;
- e->event.sequence = drm_vblank_count(dev, pipe);
- e->event.crtc_id = crtc->base.id;
+ e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
@@ -890,19 +896,19 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
struct drm_device *dev = crtc->dev;
- unsigned int seq, pipe = drm_crtc_index(crtc);
- struct timeval now;
+ u64 seq;
+ unsigned int pipe = drm_crtc_index(crtc);
+ ktime_t now;
if (dev->num_crtcs > 0) {
seq = drm_vblank_count_and_time(dev, pipe, &now);
} else {
seq = 0;
- now = get_drm_timestamp();
+ now = ktime_get();
}
e->pipe = pipe;
- e->event.crtc_id = crtc->base.id;
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
@@ -1100,9 +1106,10 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e, *t;
- struct timeval now;
+
+ ktime_t now;
unsigned long irqflags;
- unsigned int seq;
+ u64 seq;
if (WARN_ON(pipe >= dev->num_crtcs))
return;
@@ -1137,11 +1144,11 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
if (e->pipe != pipe)
continue;
DRM_DEBUG("Sending premature vblank event on disable: "
- "wanted %u, current %u\n",
- e->event.sequence, seq);
+ "wanted %llu, current %llu\n",
+ e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
}
spin_unlock_irqrestore(&dev->event_lock, irqflags);
@@ -1310,20 +1317,21 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static inline bool vblank_passed(u32 seq, u32 ref)
+static inline bool vblank_passed(u64 seq, u64 ref)
{
return (seq - ref) <= (1 << 23);
}
static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
+ u64 req_seq,
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e;
- struct timeval now;
+ ktime_t now;
unsigned long flags;
- unsigned int seq;
+ u64 seq;
int ret;
e = kzalloc(sizeof(*e), GFP_KERNEL);
@@ -1334,8 +1342,14 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
e->pipe = pipe;
e->event.base.type = DRM_EVENT_VBLANK;
- e->event.base.length = sizeof(e->event);
- e->event.user_data = vblwait->request.signal;
+ e->event.base.length = sizeof(e->event.vbl);
+ e->event.vbl.user_data = vblwait->request.signal;
+ e->event.vbl.crtc_id = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ if (crtc)
+ e->event.vbl.crtc_id = crtc->base.id;
+ }
spin_lock_irqsave(&dev->event_lock, flags);
@@ -1358,21 +1372,20 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
seq = drm_vblank_count_and_time(dev, pipe, &now);
- DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n",
- vblwait->request.sequence, seq, pipe);
+ DRM_DEBUG("event on vblank count %llu, current %llu, crtc %u\n",
+ req_seq, seq, pipe);
- trace_drm_vblank_event_queued(file_priv, pipe,
- vblwait->request.sequence);
+ trace_drm_vblank_event_queued(file_priv, pipe, req_seq);
- e->event.sequence = vblwait->request.sequence;
- if (vblank_passed(seq, vblwait->request.sequence)) {
+ e->sequence = req_seq;
+ if (vblank_passed(seq, req_seq)) {
drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
vblwait->reply.sequence = seq;
} else {
/* drm_handle_vblank_events will call drm_vblank_put */
list_add_tail(&e->base.link, &dev->vblank_event_list);
- vblwait->reply.sequence = vblwait->request.sequence;
+ vblwait->reply.sequence = req_seq;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -1398,13 +1411,49 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
_DRM_VBLANK_NEXTONMISS));
}
+/*
+ * Widen a 32-bit param to 64-bits.
+ *
+ * \param narrow 32-bit value (missing upper 32 bits)
+ * \param near 64-bit value that should be 'close' to near
+ *
+ * This function returns a 64-bit value using the lower 32-bits from
+ * 'narrow' and constructing the upper 32-bits so that the result is
+ * as close as possible to 'near'.
+ */
+
+static u64 widen_32_to_64(u32 narrow, u64 near)
+{
+ return near + (s32) (narrow - near);
+}
+
+static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe,
+ struct drm_wait_vblank_reply *reply)
+{
+ ktime_t now;
+ struct timespec64 ts;
+
+ /*
+ * drm_wait_vblank_reply is a UAPI structure that uses 'long'
+ * to store the seconds. This is safe as we always use monotonic
+ * timestamps since linux-4.15.
+ */
+ reply->sequence = drm_vblank_count_and_time(dev, pipe, &now);
+ ts = ktime_to_timespec64(now);
+ reply->tval_sec = (u32)ts.tv_sec;
+ reply->tval_usec = ts.tv_nsec / 1000;
+}
+
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_crtc *crtc;
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
int ret;
- unsigned int flags, seq, pipe, high_pipe;
+ u64 req_seq, seq;
+ unsigned int pipe_index;
+ unsigned int flags, pipe, high_pipe;
if (!dev->irq_enabled)
return -EINVAL;
@@ -1425,9 +1474,25 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK);
if (high_pipe)
- pipe = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
+ pipe_index = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT;
else
- pipe = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+ pipe_index = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
+
+ /* Convert lease-relative crtc index into global crtc index */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ pipe = 0;
+ drm_for_each_crtc(crtc, dev) {
+ if (drm_lease_held(file_priv, crtc->base.id)) {
+ if (pipe_index == 0)
+ break;
+ pipe_index--;
+ }
+ pipe++;
+ }
+ } else {
+ pipe = pipe_index;
+ }
+
if (pipe >= dev->num_crtcs)
return -EINVAL;
@@ -1439,12 +1504,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (dev->vblank_disable_immediate &&
drm_wait_vblank_is_query(vblwait) &&
READ_ONCE(vblank->enabled)) {
- struct timeval now;
-
- vblwait->reply.sequence =
- drm_vblank_count_and_time(dev, pipe, &now);
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
+ drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
return 0;
}
@@ -1457,9 +1517,12 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
- vblwait->request.sequence += seq;
+ req_seq = seq + vblwait->request.sequence;
+ vblwait->request.sequence = req_seq;
vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ break;
case _DRM_VBLANK_ABSOLUTE:
+ req_seq = widen_32_to_64(vblwait->request.sequence, seq);
break;
default:
ret = -EINVAL;
@@ -1467,31 +1530,30 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- vblank_passed(seq, vblwait->request.sequence))
- vblwait->request.sequence = seq + 1;
+ vblank_passed(seq, req_seq)) {
+ req_seq = seq + 1;
+ vblwait->request.type &= ~_DRM_VBLANK_NEXTONMISS;
+ vblwait->request.sequence = req_seq;
+ }
if (flags & _DRM_VBLANK_EVENT) {
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
*/
- return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
+ return drm_queue_vblank_event(dev, pipe, req_seq, vblwait, file_priv);
}
- if (vblwait->request.sequence != seq) {
- DRM_DEBUG("waiting on vblank count %u, crtc %u\n",
- vblwait->request.sequence, pipe);
+ if (req_seq != seq) {
+ DRM_DEBUG("waiting on vblank count %llu, crtc %u\n",
+ req_seq, pipe);
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
vblank_passed(drm_vblank_count(dev, pipe),
- vblwait->request.sequence) ||
+ req_seq) ||
!READ_ONCE(vblank->enabled));
}
if (ret != -EINTR) {
- struct timeval now;
-
- vblwait->reply.sequence = drm_vblank_count_and_time(dev, pipe, &now);
- vblwait->reply.tval_sec = now.tv_sec;
- vblwait->reply.tval_usec = now.tv_usec;
+ drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
DRM_DEBUG("crtc %d returning %u to client\n",
pipe, vblwait->reply.sequence);
@@ -1507,8 +1569,8 @@ done:
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned int seq;
+ ktime_t now;
+ u64 seq;
assert_spin_locked(&dev->event_lock);
@@ -1517,15 +1579,15 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != pipe)
continue;
- if (!vblank_passed(seq, e->event.sequence))
+ if (!vblank_passed(seq, e->sequence))
continue;
- DRM_DEBUG("vblank event on %u, current %u\n",
- e->event.sequence, seq);
+ DRM_DEBUG("vblank event on %llu, current %llu\n",
+ e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
- send_vblank_event(dev, e, seq, &now);
+ send_vblank_event(dev, e, seq, now);
}
trace_drm_vblank_event(pipe, seq);
@@ -1611,3 +1673,166 @@ bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_handle_vblank);
+
+/*
+ * Get crtc VBLANK count.
+ *
+ * \param dev DRM device
+ * \param data user arguement, pointing to a drm_crtc_get_sequence structure.
+ * \param file_priv drm file private for the user's open file descriptor
+ */
+
+int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+ struct drm_vblank_crtc *vblank;
+ int pipe;
+ struct drm_crtc_get_sequence *get_seq = data;
+ ktime_t now;
+ bool vblank_enabled;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (!dev->irq_enabled)
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
+ if (!crtc)
+ return -ENOENT;
+
+ pipe = drm_crtc_index(crtc);
+
+ vblank = &dev->vblank[pipe];
+ vblank_enabled = dev->vblank_disable_immediate && READ_ONCE(vblank->enabled);
+
+ if (!vblank_enabled) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret) {
+ DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ return ret;
+ }
+ }
+ drm_modeset_lock(&crtc->mutex, NULL);
+ if (crtc->state)
+ get_seq->active = crtc->state->enable;
+ else
+ get_seq->active = crtc->enabled;
+ drm_modeset_unlock(&crtc->mutex);
+ get_seq->sequence = drm_vblank_count_and_time(dev, pipe, &now);
+ get_seq->sequence_ns = ktime_to_ns(now);
+ if (!vblank_enabled)
+ drm_crtc_vblank_put(crtc);
+ return 0;
+}
+
+/*
+ * Queue a event for VBLANK sequence
+ *
+ * \param dev DRM device
+ * \param data user arguement, pointing to a drm_crtc_queue_sequence structure.
+ * \param file_priv drm file private for the user's open file descriptor
+ */
+
+int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+ struct drm_vblank_crtc *vblank;
+ int pipe;
+ struct drm_crtc_queue_sequence *queue_seq = data;
+ ktime_t now;
+ struct drm_pending_vblank_event *e;
+ u32 flags;
+ u64 seq;
+ u64 req_seq;
+ int ret;
+ unsigned long spin_flags;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (!dev->irq_enabled)
+ return -EINVAL;
+
+ crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
+ if (!crtc)
+ return -ENOENT;
+
+ flags = queue_seq->flags;
+ /* Check valid flag bits */
+ if (flags & ~(DRM_CRTC_SEQUENCE_RELATIVE|
+ DRM_CRTC_SEQUENCE_NEXT_ON_MISS))
+ return -EINVAL;
+
+ pipe = drm_crtc_index(crtc);
+
+ vblank = &dev->vblank[pipe];
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret) {
+ DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ goto err_free;
+ }
+
+ seq = drm_vblank_count_and_time(dev, pipe, &now);
+ req_seq = queue_seq->sequence;
+
+ if (flags & DRM_CRTC_SEQUENCE_RELATIVE)
+ req_seq += seq;
+
+ if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && vblank_passed(seq, req_seq))
+ req_seq = seq + 1;
+
+ e->pipe = pipe;
+ e->event.base.type = DRM_EVENT_CRTC_SEQUENCE;
+ e->event.base.length = sizeof(e->event.seq);
+ e->event.seq.user_data = queue_seq->user_data;
+
+ spin_lock_irqsave(&dev->event_lock, spin_flags);
+
+ /*
+ * drm_crtc_vblank_off() might have been called after we called
+ * drm_crtc_vblank_get(). drm_crtc_vblank_off() holds event_lock around the
+ * vblank disable, so no need for further locking. The reference from
+ * drm_crtc_vblank_get() protects against vblank disable from another source.
+ */
+ if (!READ_ONCE(vblank->enabled)) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
+ &e->event.base);
+
+ if (ret)
+ goto err_unlock;
+
+ e->sequence = req_seq;
+
+ if (vblank_passed(seq, req_seq)) {
+ drm_crtc_vblank_put(crtc);
+ send_vblank_event(dev, e, seq, now);
+ queue_seq->sequence = seq;
+ } else {
+ /* drm_handle_vblank_events will call drm_vblank_put */
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ queue_seq->sequence = req_seq;
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, spin_flags);
+ return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, spin_flags);
+ drm_crtc_vblank_put(crtc);
+err_free:
+ kfree(e);
+ return ret;
+}
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 38b477b5fbf9..a29b8f59eb15 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -7,8 +7,6 @@ config DRM_ETNAVIV
select SHMEM
select SYNC_FILE
select TMPFS
- select IOMMU_API
- select IOMMU_SUPPORT
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
select DMA_CMA if HAVE_DMA_CONTIGUOUS
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index ab3f551831d7..1281c8d4fae5 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -11,6 +11,7 @@ etnaviv-y := \
etnaviv_gpu.o \
etnaviv_iommu_v2.o \
etnaviv_iommu.o \
- etnaviv_mmu.o
+ etnaviv_mmu.o \
+ etnaviv_perfmon.o
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index ed9588f36bc9..9e7098e3207f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -250,6 +250,42 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
}
}
+/* Append a 'sync point' to the ring buffer. */
+void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
+{
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ unsigned int waitlink_offset = buffer->user_size - 16;
+ u32 dwords, target;
+
+ /*
+ * We need at most 3 dwords in the return target:
+ * 1 event + 1 end + 1 wait + 1 link.
+ */
+ dwords = 4;
+ target = etnaviv_buffer_reserve(gpu, buffer, dwords);
+
+ /* Signal sync point event */
+ CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
+ VIVS_GL_EVENT_FROM_PE);
+
+ /* Stop the FE to 'pause' the GPU */
+ CMD_END(buffer);
+
+ /* Append waitlink */
+ CMD_WAIT(buffer);
+ CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
+ buffer->user_size - 4);
+
+ /*
+ * Kick off the 'sync point' command by replacing the previous
+ * WAIT with a link to the address in the ring buffer.
+ */
+ etnaviv_buffer_replace_wait(buffer, waitlink_offset,
+ VIV_FE_LINK_HEADER_OP_LINK |
+ VIV_FE_LINK_HEADER_PREFETCH(dwords),
+ target);
+}
+
/* Append a command buffer to the ring buffer. */
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 633e0f07cbac..66ac79558bbd 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -19,6 +19,7 @@
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
#define SUBALLOC_SIZE SZ_256K
#define SUBALLOC_GRANULE SZ_4K
@@ -87,9 +88,10 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos)
+ size_t nr_bos, size_t nr_pmrs)
{
struct etnaviv_cmdbuf *cmdbuf;
+ struct etnaviv_perfmon_request *pmrs;
size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
sizeof(*cmdbuf));
int granule_offs, order, ret;
@@ -98,6 +100,12 @@ etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
if (!cmdbuf)
return NULL;
+ sz = sizeof(*pmrs) * nr_pmrs;
+ pmrs = kzalloc(sz, GFP_KERNEL);
+ if (!pmrs)
+ goto out_free_cmdbuf;
+
+ cmdbuf->pmrs = pmrs;
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
@@ -124,6 +132,10 @@ retry:
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
return cmdbuf;
+
+out_free_cmdbuf:
+ kfree(cmdbuf);
+ return NULL;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
@@ -139,6 +151,7 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
+ kfree(cmdbuf->pmrs);
kfree(cmdbuf);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index 80d78076c679..b6348b9f2a9d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -21,6 +21,7 @@
struct etnaviv_gpu;
struct etnaviv_cmdbuf_suballoc;
+struct etnaviv_perfmon_request;
struct etnaviv_cmdbuf {
/* suballocator this cmdbuf is allocated from */
@@ -38,6 +39,9 @@ struct etnaviv_cmdbuf {
u32 exec_state;
/* per GPU in-flight list */
struct list_head node;
+ /* perfmon requests */
+ unsigned int nr_pmrs;
+ struct etnaviv_perfmon_request *pmrs;
/* BOs attached to this command buffer */
unsigned int nr_bos;
struct etnaviv_vram_mapping *bo_map[0];
@@ -49,7 +53,7 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos);
+ size_t nr_bos, size_t nr_pmrs);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 2cb4773823c2..491eddf9b150 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -23,6 +23,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
static bool reglog;
@@ -451,6 +452,46 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
return ret;
}
+static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_pm_domain *args = data;
+ struct etnaviv_gpu *gpu;
+
+ /* reject as long as the feature isn't stable */
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_pm_query_dom(gpu, args);
+}
+
+static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct drm_etnaviv_pm_signal *args = data;
+ struct etnaviv_gpu *gpu;
+
+ /* reject as long as the feature isn't stable */
+ return -EINVAL;
+
+ if (args->pipe >= ETNA_MAX_PIPES)
+ return -EINVAL;
+
+ gpu = priv->gpu[args->pipe];
+ if (!gpu)
+ return -ENXIO;
+
+ return etnaviv_pm_query_sig(gpu, args);
+}
+
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
#define ETNA_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
@@ -463,6 +504,8 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 058389f93b69..d249acb6da08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -26,7 +26,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/list.h>
-#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/sizes.h>
@@ -92,15 +91,12 @@ int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
void etnaviv_gem_free_object(struct drm_gem_object *obj);
int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
-struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
- u32 size, u32 flags);
-struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
- u32 size, u32 flags);
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
+void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 57881167ccd2..daee3f1196df 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -704,25 +704,6 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
-struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
- u32 size, u32 flags)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = __etnaviv_gem_new(dev, size, flags);
- if (IS_ERR(obj))
- return obj;
-
- ret = etnaviv_gem_obj_add(dev, obj);
- if (ret < 0) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
- }
-
- return obj;
-}
-
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
struct etnaviv_gem_object **res)
@@ -779,7 +760,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
up_read(&mm->mmap_sem);
if (ret < 0) {
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
return ERR_PTR(ret);
}
@@ -852,7 +833,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
}
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -886,7 +867,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
if (etnaviv_obj->pages) {
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- release_pages(etnaviv_obj->pages, npages, 0);
+ release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
put_task_struct(etnaviv_obj->userptr.task);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46dfe0737f43..ff911541a190 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -21,6 +21,7 @@
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
+#include "etnaviv_perfmon.h"
/*
* Cmdstream submission:
@@ -283,6 +284,54 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
return 0;
}
+static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
+ struct etnaviv_cmdbuf *cmdbuf,
+ const struct drm_etnaviv_gem_submit_pmr *pmrs,
+ u32 nr_pms)
+{
+ u32 i;
+
+ for (i = 0; i < nr_pms; i++) {
+ const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
+ struct etnaviv_gem_submit_bo *bo;
+ int ret;
+
+ ret = submit_bo(submit, r->read_idx, &bo);
+ if (ret)
+ return ret;
+
+ /* at offset 0 a sequence number gets stored used for userspace sync */
+ if (r->read_offset == 0) {
+ DRM_ERROR("perfmon request: offset is 0");
+ return -EINVAL;
+ }
+
+ if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
+ DRM_ERROR("perfmon request: offset %u outside object", i);
+ return -EINVAL;
+ }
+
+ if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
+ DRM_ERROR("perfmon request: flags are not valid");
+ return -EINVAL;
+ }
+
+ if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
+ DRM_ERROR("perfmon request: domain or signal not valid");
+ return -EINVAL;
+ }
+
+ cmdbuf->pmrs[i].flags = r->flags;
+ cmdbuf->pmrs[i].domain = r->domain;
+ cmdbuf->pmrs[i].signal = r->signal;
+ cmdbuf->pmrs[i].sequence = r->sequence;
+ cmdbuf->pmrs[i].offset = r->read_offset;
+ cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
+ }
+
+ return 0;
+}
+
static void submit_cleanup(struct etnaviv_gem_submit *submit)
{
unsigned i;
@@ -306,6 +355,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_submit *args = data;
struct drm_etnaviv_gem_submit_reloc *relocs;
+ struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf;
@@ -347,11 +397,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
+ pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
ALIGN(args->stream_size, 8) + 8,
- args->nr_bos);
- if (!bos || !relocs || !stream || !cmdbuf) {
+ args->nr_bos, args->nr_pmrs);
+ if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
ret = -ENOMEM;
goto err_submit_cmds;
}
@@ -373,6 +424,14 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_cmds;
}
+ ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
+ args->nr_pmrs * sizeof(*pmrs));
+ if (ret) {
+ ret = -EFAULT;
+ goto err_submit_cmds;
+ }
+ cmdbuf->nr_pmrs = args->nr_pmrs;
+
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
if (ret) {
@@ -441,6 +500,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
+ ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
+ if (ret)
+ goto out;
+
memcpy(cmdbuf->vaddr, stream, args->stream_size);
cmdbuf->user_size = ALIGN(args->stream_size, 8);
@@ -496,6 +559,8 @@ err_submit_cmds:
kvfree(bos);
if (relocs)
kvfree(relocs);
+ if (pmrs)
+ kvfree(pmrs);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 4b152e0d31a6..e19cbe05da2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -25,6 +25,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_perfmon.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
@@ -420,9 +421,10 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
gpu->base_rate_shader >> gpu->freq_scale);
} else {
unsigned int fscale = 1 << (6 - gpu->freq_scale);
- u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+ u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ clock &= ~VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK;
+ clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, clock);
}
}
@@ -433,24 +435,14 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
unsigned long timeout;
bool failed = true;
- /* TODO
- *
- * - clock gating
- * - puls eater
- * - what about VG?
- */
-
/* We hope that the GPU resets in under one second */
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
/* enable clock */
- etnaviv_gpu_update_clock(gpu);
-
- control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
-
- /* Wait for stable clock. Vivante's code waited for 1ms */
- usleep_range(1000, 10000);
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+ etnaviv_gpu_load_clock(gpu, control);
/* isolate the GPU. */
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
@@ -461,7 +453,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
/* wait for reset. */
- msleep(1);
+ usleep_range(10, 20);
/* reset soft reset bit. */
control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
@@ -490,6 +482,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
continue;
}
+ /* disable debug registers, as they are not normally needed */
+ control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+
failed = false;
break;
}
@@ -721,7 +717,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/* Create buffer: */
- gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0);
+ gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
if (!gpu->buffer) {
ret = -ENOMEM;
dev_err(gpu->dev, "could not create command buffer\n");
@@ -739,10 +735,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
/* Setup event management */
spin_lock_init(&gpu->event_spinlock);
init_completion(&gpu->event_free);
- for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
- gpu->event[i].used = false;
+ bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
complete(&gpu->event_free);
- }
/* Now program the hardware */
mutex_lock(&gpu->lock);
@@ -926,7 +921,7 @@ static void recover_worker(struct work_struct *work)
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
recover_work);
unsigned long flags;
- unsigned int i;
+ unsigned int i = 0;
dev_err(gpu->dev, "hangcheck recover!\n");
@@ -945,14 +940,12 @@ static void recover_worker(struct work_struct *work)
/* complete all events, the GPU won't do it after the reset */
spin_lock_irqsave(&gpu->event_spinlock, flags);
- for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
- if (!gpu->event[i].used)
- continue;
+ for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) {
dma_fence_signal(gpu->event[i].fence);
gpu->event[i].fence = NULL;
- gpu->event[i].used = false;
complete(&gpu->event_free);
}
+ bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
gpu->completed_fence = gpu->active_fence;
@@ -1140,30 +1133,45 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
* event management:
*/
-static unsigned int event_alloc(struct etnaviv_gpu *gpu)
+static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
+ unsigned int *events)
{
- unsigned long ret, flags;
- unsigned int i, event = ~0U;
+ unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
+ unsigned i, acquired = 0;
- ret = wait_for_completion_timeout(&gpu->event_free,
- msecs_to_jiffies(10 * 10000));
- if (!ret)
- dev_err(gpu->dev, "wait_for_completion_timeout failed");
+ for (i = 0; i < nr_events; i++) {
+ unsigned long ret;
- spin_lock_irqsave(&gpu->event_spinlock, flags);
+ ret = wait_for_completion_timeout(&gpu->event_free, timeout);
- /* find first free event */
- for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
- if (gpu->event[i].used == false) {
- gpu->event[i].used = true;
- event = i;
- break;
+ if (!ret) {
+ dev_err(gpu->dev, "wait_for_completion_timeout failed");
+ goto out;
}
+
+ acquired++;
+ timeout = ret;
+ }
+
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
+
+ for (i = 0; i < nr_events; i++) {
+ int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
+
+ events[i] = event;
+ memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
+ set_bit(event, gpu->event_bitmap);
}
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
- return event;
+ return 0;
+
+out:
+ for (i = 0; i < acquired; i++)
+ complete(&gpu->event_free);
+
+ return -EBUSY;
}
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
@@ -1172,12 +1180,12 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
spin_lock_irqsave(&gpu->event_spinlock, flags);
- if (gpu->event[event].used == false) {
+ if (!test_bit(event, gpu->event_bitmap)) {
dev_warn(gpu->dev, "event %u is already marked as free",
event);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
} else {
- gpu->event[event].used = false;
+ clear_bit(event, gpu->event_bitmap);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
complete(&gpu->event_free);
@@ -1311,12 +1319,71 @@ void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
pm_runtime_put_autosuspend(gpu->dev);
}
+static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event, unsigned int flags)
+{
+ const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ unsigned int i;
+
+ for (i = 0; i < cmdbuf->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+
+ if (pmr->flags == flags)
+ etnaviv_perfmon_process(gpu, pmr);
+ }
+}
+
+static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ u32 val;
+
+ /* disable clock gating */
+ val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
+
+ /* enable debug register */
+ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
+
+ sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
+}
+
+static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ unsigned int i;
+ u32 val;
+
+ sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
+
+ for (i = 0; i < cmdbuf->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+
+ *pmr->bo_vma = pmr->sequence;
+ }
+
+ /* disable debug register */
+ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
+
+ /* enable clock gating */
+ val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
+}
+
+
/* add bo's to gpu's ring, and kick gpu: */
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
{
struct dma_fence *fence;
- unsigned int event, i;
+ unsigned int i, nr_events = 1, event[3];
int ret;
ret = etnaviv_gpu_pm_get_sync(gpu);
@@ -1332,10 +1399,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
*
*/
- event = event_alloc(gpu);
- if (unlikely(event == ~0U)) {
- DRM_ERROR("no free event\n");
- ret = -EBUSY;
+ /*
+ * if there are performance monitor requests we need to have
+ * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
+ * requests.
+ * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
+ * and update the sequence number for userspace.
+ */
+ if (cmdbuf->nr_pmrs)
+ nr_events = 3;
+
+ ret = event_alloc(gpu, nr_events, event);
+ if (ret) {
+ DRM_ERROR("no free events\n");
goto out_pm_put;
}
@@ -1343,12 +1419,14 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
- event_free(gpu, event);
+ for (i = 0; i < nr_events; i++)
+ event_free(gpu, event[i]);
+
ret = -ENOMEM;
goto out_unlock;
}
- gpu->event[event].fence = fence;
+ gpu->event[event[0]].fence = fence;
submit->fence = dma_fence_get(fence);
gpu->active_fence = submit->fence->seqno;
@@ -1358,7 +1436,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
gpu->lastctx = cmdbuf->ctx;
}
- etnaviv_buffer_queue(gpu, event, cmdbuf);
+ if (cmdbuf->nr_pmrs) {
+ gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
+ gpu->event[event[1]].cmdbuf = cmdbuf;
+ etnaviv_sync_point_queue(gpu, event[1]);
+ }
+
+ etnaviv_buffer_queue(gpu, event[0], cmdbuf);
+
+ if (cmdbuf->nr_pmrs) {
+ gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
+ gpu->event[event[2]].cmdbuf = cmdbuf;
+ etnaviv_sync_point_queue(gpu, event[2]);
+ }
cmdbuf->fence = fence;
list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
@@ -1394,6 +1484,24 @@ out_pm_put:
return ret;
}
+static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
+ struct etnaviv_event *event)
+{
+ u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+
+ event->sync_point(gpu, event);
+ etnaviv_gpu_start_fe(gpu, addr + 2, 2);
+}
+
+static void sync_point_worker(struct work_struct *work)
+{
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
+ sync_point_work);
+
+ etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
+ event_free(gpu, gpu->sync_point_event);
+}
+
/*
* Init/Cleanup:
*/
@@ -1440,7 +1548,15 @@ static irqreturn_t irq_handler(int irq, void *data)
dev_dbg(gpu->dev, "event %u\n", event);
+ if (gpu->event[event].sync_point) {
+ gpu->sync_point_event = event;
+ etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
+ }
+
fence = gpu->event[event].fence;
+ if (!fence)
+ continue;
+
gpu->event[event].fence = NULL;
dma_fence_signal(fence);
@@ -1645,6 +1761,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
INIT_LIST_HEAD(&gpu->active_cmd_list);
INIT_WORK(&gpu->retire_work, retire_worker);
+ INIT_WORK(&gpu->sync_point_work, sync_point_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 689cb8f3680c..4f10f147297a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -88,13 +88,17 @@ struct etnaviv_chip_identity {
};
struct etnaviv_event {
- bool used;
struct dma_fence *fence;
+ struct etnaviv_cmdbuf *cmdbuf;
+
+ void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
};
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_cmdbuf;
+#define ETNA_NR_EVENTS 30
+
struct etnaviv_gpu {
struct drm_device *drm;
struct thermal_cooling_device *cooling;
@@ -112,7 +116,8 @@ struct etnaviv_gpu {
u32 memory_base;
/* event management: */
- struct etnaviv_event event[30];
+ DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
+ struct etnaviv_event event[ETNA_NR_EVENTS];
struct completion event_free;
spinlock_t event_spinlock;
@@ -133,6 +138,10 @@ struct etnaviv_gpu {
/* worker for handling active-list retiring: */
struct work_struct retire_work;
+ /* worker for handling 'sync' points: */
+ struct work_struct sync_point_work;
+ int sync_point_event;
+
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 7a7c97f599d7..14e24ac6573f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -31,174 +30,115 @@
#define GPU_MEM_START 0x80000000
-struct etnaviv_iommu_domain_pgtable {
- u32 *pgtable;
- dma_addr_t paddr;
+struct etnaviv_iommuv1_domain {
+ struct etnaviv_iommu_domain base;
+ u32 *pgtable_cpu;
+ dma_addr_t pgtable_dma;
};
-struct etnaviv_iommu_domain {
- struct iommu_domain domain;
- struct device *dev;
- void *bad_page_cpu;
- dma_addr_t bad_page_dma;
- struct etnaviv_iommu_domain_pgtable pgtable;
- spinlock_t map_lock;
-};
-
-static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
-{
- return container_of(domain, struct etnaviv_iommu_domain, domain);
-}
-
-static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
- size_t size)
-{
- pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
- if (!pgtable->pgtable)
- return -ENOMEM;
-
- return 0;
-}
-
-static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
- size_t size)
+static struct etnaviv_iommuv1_domain *
+to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
- dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
-}
-
-static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
- unsigned long iova)
-{
- /* calcuate index into page table */
- unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
- phys_addr_t paddr;
-
- paddr = pgtable->pgtable[index];
-
- return paddr;
+ return container_of(domain, struct etnaviv_iommuv1_domain, base);
}
-static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
- unsigned long iova, phys_addr_t paddr)
-{
- /* calcuate index into page table */
- unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
-
- pgtable->pgtable[index] = paddr;
-}
-
-static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
+static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
{
u32 *p;
- int ret, i;
-
- etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
- SZ_4K,
- &etnaviv_domain->bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->bad_page_cpu)
+ int i;
+
+ etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
+ etnaviv_domain->base.dev,
+ SZ_4K,
+ &etnaviv_domain->base.bad_page_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->base.bad_page_cpu)
return -ENOMEM;
- p = etnaviv_domain->bad_page_cpu;
+ p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
- ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
- if (ret < 0) {
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
- return ret;
+ etnaviv_domain->pgtable_cpu =
+ dma_alloc_coherent(etnaviv_domain->base.dev, PT_SIZE,
+ &etnaviv_domain->pgtable_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->pgtable_cpu) {
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
+ return -ENOMEM;
}
for (i = 0; i < PT_ENTRIES; i++)
- etnaviv_domain->pgtable.pgtable[i] =
- etnaviv_domain->bad_page_dma;
-
- spin_lock_init(&etnaviv_domain->map_lock);
+ etnaviv_domain->pgtable_cpu[i] =
+ etnaviv_domain->base.bad_page_dma;
return 0;
}
-static void etnaviv_domain_free(struct iommu_domain *domain)
+static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
- pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
+ dma_free_coherent(etnaviv_domain->base.dev, PT_SIZE,
+ etnaviv_domain->pgtable_cpu,
+ etnaviv_domain->pgtable_dma);
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
kfree(etnaviv_domain);
}
-static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- spin_lock(&etnaviv_domain->map_lock);
- pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
- spin_unlock(&etnaviv_domain->map_lock);
+ etnaviv_domain->pgtable_cpu[index] = paddr;
return 0;
}
-static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
+static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain,
unsigned long iova, size_t size)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K)
return -EINVAL;
- spin_lock(&etnaviv_domain->map_lock);
- pgtable_write(&etnaviv_domain->pgtable, iova,
- etnaviv_domain->bad_page_dma);
- spin_unlock(&etnaviv_domain->map_lock);
+ etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma;
return SZ_4K;
}
-static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
-{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
-
- return pgtable_read(&etnaviv_domain->pgtable, iova);
-}
-
-static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
+static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain)
{
return PT_SIZE;
}
-static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
+static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
- struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
+ to_etnaviv_domain(domain);
- memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
+ memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE);
}
-static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
- .ops = {
- .domain_free = etnaviv_domain_free,
- .map = etnaviv_iommuv1_map,
- .unmap = etnaviv_iommuv1_unmap,
- .iova_to_phys = etnaviv_iommu_iova_to_phys,
- .pgsize_bitmap = SZ_4K,
- },
- .dump_size = etnaviv_iommuv1_dump_size,
- .dump = etnaviv_iommuv1_dump,
-};
-
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
{
- struct etnaviv_iommu_domain *etnaviv_domain =
+ struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
u32 pgtable;
@@ -210,7 +150,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
/* set page table address in MC */
- pgtable = (u32)etnaviv_domain->pgtable.paddr;
+ pgtable = (u32)etnaviv_domain->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
@@ -219,28 +159,37 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
+const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
+ .free = etnaviv_iommuv1_domain_free,
+ .map = etnaviv_iommuv1_map,
+ .unmap = etnaviv_iommuv1_unmap,
+ .dump_size = etnaviv_iommuv1_dump_size,
+ .dump = etnaviv_iommuv1_dump,
+};
+
+struct etnaviv_iommu_domain *
+etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu)
{
- struct etnaviv_iommu_domain *etnaviv_domain;
+ struct etnaviv_iommuv1_domain *etnaviv_domain;
+ struct etnaviv_iommu_domain *domain;
int ret;
etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
if (!etnaviv_domain)
return NULL;
- etnaviv_domain->dev = gpu->dev;
+ domain = &etnaviv_domain->base;
- etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
- etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
- etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
- etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
- etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
+ domain->dev = gpu->dev;
+ domain->base = GPU_MEM_START;
+ domain->size = PT_ENTRIES * SZ_4K;
+ domain->ops = &etnaviv_iommuv1_ops;
ret = __etnaviv_iommu_init(etnaviv_domain);
if (ret)
goto out_free;
- return &etnaviv_domain->domain;
+ return &etnaviv_domain->base;
out_free:
kfree(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
index 8b51e7c16feb..01d59bf70d78 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
@@ -18,11 +18,14 @@
#define __ETNAVIV_IOMMU_H__
struct etnaviv_gpu;
+struct etnaviv_iommu_domain;
-struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
+struct etnaviv_iommu_domain *
+etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
-struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
+struct etnaviv_iommu_domain *
+etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_IOMMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index cbe447ac5974..fc60fc8ddbf0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -14,7 +14,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -40,10 +39,7 @@
#define MMUv2_MAX_STLB_ENTRIES 1024
struct etnaviv_iommuv2_domain {
- struct iommu_domain domain;
- struct device *dev;
- void *bad_page_cpu;
- dma_addr_t bad_page_dma;
+ struct etnaviv_iommu_domain base;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
@@ -52,13 +48,15 @@ struct etnaviv_iommuv2_domain {
dma_addr_t stlb_dma[1024];
};
-static struct etnaviv_iommuv2_domain *to_etnaviv_domain(struct iommu_domain *domain)
+static struct etnaviv_iommuv2_domain *
+to_etnaviv_domain(struct etnaviv_iommu_domain *domain)
{
- return container_of(domain, struct etnaviv_iommuv2_domain, domain);
+ return container_of(domain, struct etnaviv_iommuv2_domain, base);
}
-static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -68,7 +66,7 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
if (size != SZ_4K)
return -EINVAL;
- if (prot & IOMMU_WRITE)
+ if (prot & ETNAVIV_PROT_WRITE)
entry |= MMUv2_PTE_WRITEABLE;
mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
@@ -79,8 +77,8 @@ static int etnaviv_iommuv2_map(struct iommu_domain *domain, unsigned long iova,
return 0;
}
-static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size)
+static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -97,38 +95,26 @@ static size_t etnaviv_iommuv2_unmap(struct iommu_domain *domain,
return SZ_4K;
}
-static phys_addr_t etnaviv_iommuv2_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
-{
- struct etnaviv_iommuv2_domain *etnaviv_domain =
- to_etnaviv_domain(domain);
- int mtlb_entry, stlb_entry;
-
- mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
- stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
-
- return etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] & ~(SZ_4K - 1);
-}
-
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
u32 *p;
int ret, i, j;
/* allocate scratch page */
- etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
- SZ_4K,
- &etnaviv_domain->bad_page_dma,
- GFP_KERNEL);
- if (!etnaviv_domain->bad_page_cpu) {
+ etnaviv_domain->base.bad_page_cpu = dma_alloc_coherent(
+ etnaviv_domain->base.dev,
+ SZ_4K,
+ &etnaviv_domain->base.bad_page_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->base.bad_page_cpu) {
ret = -ENOMEM;
goto fail_mem;
}
- p = etnaviv_domain->bad_page_cpu;
+ p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
- etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->dev,
+ etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->mtlb_dma,
GFP_KERNEL);
@@ -140,7 +126,7 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
/* pre-populate STLB pages (may want to switch to on-demand later) */
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
etnaviv_domain->stlb_cpu[i] =
- dma_alloc_coherent(etnaviv_domain->dev,
+ dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->stlb_dma[i],
GFP_KERNEL);
@@ -159,19 +145,19 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
return 0;
fail_mem:
- if (etnaviv_domain->bad_page_cpu)
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
+ if (etnaviv_domain->base.bad_page_cpu)
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
if (etnaviv_domain->mtlb_cpu)
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
@@ -179,23 +165,23 @@ fail_mem:
return ret;
}
-static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
+static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
int i;
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
- etnaviv_domain->bad_page_cpu,
- etnaviv_domain->bad_page_dma);
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->base.bad_page_cpu,
+ etnaviv_domain->base.bad_page_dma);
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
if (etnaviv_domain->stlb_cpu[i])
- dma_free_coherent(etnaviv_domain->dev, SZ_4K,
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->stlb_cpu[i],
etnaviv_domain->stlb_dma[i]);
}
@@ -203,7 +189,7 @@ static void etnaviv_iommuv2_domain_free(struct iommu_domain *domain)
vfree(etnaviv_domain);
}
-static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
+static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -217,7 +203,7 @@ static size_t etnaviv_iommuv2_dump_size(struct iommu_domain *domain)
return dump_size;
}
-static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
+static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(domain);
@@ -230,18 +216,6 @@ static void etnaviv_iommuv2_dump(struct iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
-static const struct etnaviv_iommu_ops etnaviv_iommu_ops = {
- .ops = {
- .domain_free = etnaviv_iommuv2_domain_free,
- .map = etnaviv_iommuv2_map,
- .unmap = etnaviv_iommuv2_unmap,
- .iova_to_phys = etnaviv_iommuv2_iova_to_phys,
- .pgsize_bitmap = SZ_4K,
- },
- .dump_size = etnaviv_iommuv2_dump_size,
- .dump = etnaviv_iommuv2_dump,
-};
-
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
@@ -254,35 +228,45 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
- (u32)etnaviv_domain->bad_page_dma);
+ (u32)etnaviv_domain->base.bad_page_dma);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
-struct iommu_domain *etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
+
+const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
+ .free = etnaviv_iommuv2_domain_free,
+ .map = etnaviv_iommuv2_map,
+ .unmap = etnaviv_iommuv2_unmap,
+ .dump_size = etnaviv_iommuv2_dump_size,
+ .dump = etnaviv_iommuv2_dump,
+};
+
+struct etnaviv_iommu_domain *
+etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain;
+ struct etnaviv_iommu_domain *domain;
int ret;
etnaviv_domain = vzalloc(sizeof(*etnaviv_domain));
if (!etnaviv_domain)
return NULL;
- etnaviv_domain->dev = gpu->dev;
+ domain = &etnaviv_domain->base;
- etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
- etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
- etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
- etnaviv_domain->domain.geometry.aperture_start = 0;
- etnaviv_domain->domain.geometry.aperture_end = ~0UL & ~(SZ_4K - 1);
+ domain->dev = gpu->dev;
+ domain->base = 0;
+ domain->size = (u64)SZ_1G * 4;
+ domain->ops = &etnaviv_iommuv2_ops;
ret = etnaviv_iommuv2_init(etnaviv_domain);
if (ret)
goto out_free;
- return &etnaviv_domain->domain;
+ return &etnaviv_domain->base;
out_free:
vfree(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index f103e787de94..35074b944778 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -22,17 +22,64 @@
#include "etnaviv_iommu.h"
#include "etnaviv_mmu.h"
-static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
- unsigned long iova, int flags, void *arg)
+static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, size_t size)
{
- DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
+ size_t unmapped_page, unmapped = 0;
+ size_t pgsize = SZ_4K;
+
+ if (!IS_ALIGNED(iova | size, pgsize)) {
+ pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, pgsize);
+ return;
+ }
+
+ while (unmapped < size) {
+ unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
}
-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
{
- struct iommu_domain *domain = iommu->domain;
+ unsigned long orig_iova = iova;
+ size_t pgsize = SZ_4K;
+ size_t orig_size = size;
+ int ret = 0;
+
+ if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
+ pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+ iova, &paddr, size, pgsize);
+ return -EINVAL;
+ }
+
+ while (size) {
+ ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
+
+ return ret;
+}
+
+static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
unsigned int i, j;
@@ -47,7 +94,7 @@ int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
- ret = iommu_map(domain, da, pa, bytes, prot);
+ ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
if (ret)
goto fail;
@@ -62,27 +109,24 @@ fail:
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- iommu_unmap(domain, da, bytes);
+ etnaviv_domain_unmap(domain, da, bytes);
da += bytes;
}
return ret;
}
-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len)
+static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
+ struct sg_table *sgt, unsigned len)
{
- struct iommu_domain *domain = iommu->domain;
+ struct etnaviv_iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
unsigned int da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
- size_t unmapped;
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
+ etnaviv_domain_unmap(domain, da, bytes);
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
@@ -90,8 +134,6 @@ int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
da += bytes;
}
-
- return 0;
}
static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
@@ -237,7 +279,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
- IOMMU_READ | IOMMU_WRITE);
+ ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
if (ret < 0) {
drm_mm_remove_node(node);
@@ -271,7 +313,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
{
drm_mm_takedown(&mmu->mm);
- iommu_domain_free(mmu->domain);
+ mmu->domain->ops->free(mmu->domain);
kfree(mmu);
}
@@ -303,11 +345,7 @@ struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
mutex_init(&mmu->lock);
INIT_LIST_HEAD(&mmu->mappings);
- drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
- mmu->domain->geometry.aperture_end -
- mmu->domain->geometry.aperture_start + 1);
-
- iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
+ drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
return mmu;
}
@@ -338,8 +376,8 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
mutex_unlock(&mmu->lock);
return ret;
}
- ret = iommu_map(mmu->domain, vram_node->start, paddr, size,
- IOMMU_READ);
+ ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
+ size, ETNAVIV_PROT_READ);
if (ret < 0) {
drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
@@ -362,25 +400,17 @@ void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
if (mmu->version == ETNAVIV_IOMMU_V2) {
mutex_lock(&mmu->lock);
- iommu_unmap(mmu->domain,iova, size);
+ etnaviv_domain_unmap(mmu->domain, iova, size);
drm_mm_remove_node(vram_node);
mutex_unlock(&mmu->lock);
}
}
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
{
- struct etnaviv_iommu_ops *ops;
-
- ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
-
- return ops->dump_size(iommu->domain);
+ return iommu->domain->ops->dump_size(iommu->domain);
}
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
{
- struct etnaviv_iommu_ops *ops;
-
- ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
-
- ops->dump(iommu->domain, buf);
+ iommu->domain->ops->dump(iommu->domain, buf);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
index 54be289e5981..ab603f5166b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
@@ -17,7 +17,8 @@
#ifndef __ETNAVIV_MMU_H__
#define __ETNAVIV_MMU_H__
-#include <linux/iommu.h>
+#define ETNAVIV_PROT_READ (1 << 0)
+#define ETNAVIV_PROT_WRITE (1 << 1)
enum etnaviv_iommu_version {
ETNAVIV_IOMMU_V1 = 0,
@@ -26,16 +27,31 @@ enum etnaviv_iommu_version {
struct etnaviv_gpu;
struct etnaviv_vram_mapping;
+struct etnaviv_iommu_domain;
-struct etnaviv_iommu_ops {
- struct iommu_ops ops;
- size_t (*dump_size)(struct iommu_domain *);
- void (*dump)(struct iommu_domain *, void *);
+struct etnaviv_iommu_domain_ops {
+ void (*free)(struct etnaviv_iommu_domain *);
+ int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova,
+ size_t size);
+ size_t (*dump_size)(struct etnaviv_iommu_domain *);
+ void (*dump)(struct etnaviv_iommu_domain *, void *);
+};
+
+struct etnaviv_iommu_domain {
+ struct device *dev;
+ void *bad_page_cpu;
+ dma_addr_t bad_page_dma;
+ u64 base;
+ u64 size;
+
+ const struct etnaviv_iommu_domain_ops *ops;
};
struct etnaviv_iommu {
struct etnaviv_gpu *gpu;
- struct iommu_domain *domain;
+ struct etnaviv_iommu_domain *domain;
enum etnaviv_iommu_version version;
@@ -49,18 +65,11 @@ struct etnaviv_iommu {
struct etnaviv_gem_object;
-int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
- int cnt);
-int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len, int prot);
-int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
- struct sg_table *sgt, unsigned len);
int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
struct etnaviv_vram_mapping *mapping);
-void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
struct drm_mm_node *vram_node, size_t size,
@@ -73,6 +82,7 @@ size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu);
+void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_MMU_H__ */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
new file mode 100644
index 000000000000..768f5aafdd18
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_gpu.h"
+#include "etnaviv_perfmon.h"
+#include "state_hi.xml.h"
+
+struct etnaviv_pm_domain;
+
+struct etnaviv_pm_signal {
+ char name[64];
+ u32 data;
+
+ u32 (*sample)(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal);
+};
+
+struct etnaviv_pm_domain {
+ char name[64];
+
+ /* profile register */
+ u32 profile_read;
+ u32 profile_config;
+
+ u8 nr_signals;
+ const struct etnaviv_pm_signal *signal;
+};
+
+struct etnaviv_pm_domain_meta {
+ const struct etnaviv_pm_domain *domains;
+ u32 nr_domains;
+};
+
+static u32 simple_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ return gpu_read(gpu, signal->data);
+}
+
+static u32 perf_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ gpu_write(gpu, domain->profile_config, signal->data);
+
+ return gpu_read(gpu, domain->profile_read);
+}
+
+static u32 pipe_reg_read(struct etnaviv_gpu *gpu,
+ const struct etnaviv_pm_domain *domain,
+ const struct etnaviv_pm_signal *signal)
+{
+ u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
+ u32 value = 0;
+ unsigned i;
+
+ for (i = 0; i < gpu->identity.pixel_pipes; i++) {
+ clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
+ clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(i);
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+ gpu_write(gpu, domain->profile_config, signal->data);
+ value += gpu_read(gpu, domain->profile_read);
+ }
+
+ /* switch back to pixel pipe 0 to prevent GPU hang */
+ clock &= ~(VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK);
+ clock |= VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(0);
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
+
+ return value;
+}
+
+static const struct etnaviv_pm_domain doms_3d[] = {
+ {
+ .name = "HI",
+ .profile_read = VIVS_MC_PROFILE_HI_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG2,
+ .nr_signals = 5,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_CYCLES",
+ VIVS_HI_PROFILE_TOTAL_CYCLES,
+ &simple_reg_read
+ },
+ {
+ "IDLE_CYCLES",
+ VIVS_HI_PROFILE_IDLE_CYCLES,
+ &simple_reg_read
+ },
+ {
+ "AXI_CYCLES_READ_REQUEST_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED,
+ &perf_reg_read
+ },
+ {
+ "AXI_CYCLES_WRITE_REQUEST_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED,
+ &perf_reg_read
+ },
+ {
+ "AXI_CYCLES_WRITE_DATA_STALLED",
+ VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "PE",
+ .profile_read = VIVS_MC_PROFILE_PE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 5,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE,
+ &pipe_reg_read
+ },
+ {
+ "PIXEL_COUNT_KILLED_BY_DEPTH_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE,
+ &pipe_reg_read
+ },
+ {
+ "PIXEL_COUNT_DRAWN_BY_COLOR_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE,
+ &pipe_reg_read
+ },
+ {
+ "PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE,
+ &pipe_reg_read
+ }
+ }
+ },
+ {
+ .name = "SH",
+ .profile_read = VIVS_MC_PROFILE_SH_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 9,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "SHADER_CYCLES",
+ VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES,
+ &perf_reg_read
+ },
+ {
+ "PS_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "RENDERED_PIXEL_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "VS_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "RENDERED_VERTICE_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "VTX_BRANCH_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "VTX_TEXLD_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "PXL_BRANCH_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "PXL_TEXLD_INST_COUNTER",
+ VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER,
+ &pipe_reg_read
+ }
+ }
+ },
+ {
+ .name = "PA",
+ .profile_read = VIVS_MC_PROFILE_PA_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 6,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "INPUT_VTX_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "INPUT_PRIM_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "OUTPUT_PRIM_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "DEPTH_CLIPPED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "TRIVIAL_REJECTED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER,
+ &pipe_reg_read
+ },
+ {
+ "CULLED_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER,
+ &pipe_reg_read
+ }
+ }
+ },
+ {
+ .name = "SE",
+ .profile_read = VIVS_MC_PROFILE_SE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 2,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "CULLED_TRIANGLE_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CULLED_LINES_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "RA",
+ .profile_read = VIVS_MC_PROFILE_RA_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 7,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "VALID_PIXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_QUAD_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT,
+ &perf_reg_read
+ },
+ {
+ "VALID_QUAD_COUNT_AFTER_EARLY_Z",
+ VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_PRIMITIVE_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT,
+ &perf_reg_read
+ },
+ {
+ "PIPE_CACHE_MISS_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "PREFETCH_CACHE_MISS_COUNTER",
+ VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER,
+ &perf_reg_read
+ },
+ {
+ "CULLED_QUAD_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "TX",
+ .profile_read = VIVS_MC_PROFILE_TX_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG1,
+ .nr_signals = 9,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_BILINEAR_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_TRILINEAR_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_DISCARDED_TEXTURE_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_TEXTURE_REQUESTS",
+ VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS,
+ &perf_reg_read
+ },
+ {
+ "MEM_READ_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT,
+ &perf_reg_read
+ },
+ {
+ "MEM_READ_IN_8B_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_MISS_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_HIT_TEXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT,
+ &perf_reg_read
+ },
+ {
+ "CACHE_MISS_TEXEL_COUNT",
+ VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT,
+ &perf_reg_read
+ }
+ }
+ },
+ {
+ .name = "MC",
+ .profile_read = VIVS_MC_PROFILE_MC_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG2,
+ .nr_signals = 3,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "TOTAL_READ_REQ_8B_FROM_PIPELINE",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_READ_REQ_8B_FROM_IP",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP,
+ &perf_reg_read
+ },
+ {
+ "TOTAL_WRITE_REQ_8B_FROM_PIPELINE",
+ VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE,
+ &perf_reg_read
+ }
+ }
+ }
+};
+
+static const struct etnaviv_pm_domain doms_2d[] = {
+ {
+ .name = "PE",
+ .profile_read = VIVS_MC_PROFILE_PE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+ .nr_signals = 1,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "PIXELS_RENDERED_2D",
+ VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D,
+ &pipe_reg_read
+ }
+ }
+ }
+};
+
+static const struct etnaviv_pm_domain doms_vg[] = {
+};
+
+static const struct etnaviv_pm_domain_meta doms_meta[] = {
+ {
+ .nr_domains = ARRAY_SIZE(doms_3d),
+ .domains = &doms_3d[0]
+ },
+ {
+ .nr_domains = ARRAY_SIZE(doms_2d),
+ .domains = &doms_2d[0]
+ },
+ {
+ .nr_domains = ARRAY_SIZE(doms_vg),
+ .domains = &doms_vg[0]
+ }
+};
+
+int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_domain *domain)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[domain->pipe];
+ const struct etnaviv_pm_domain *dom;
+
+ if (domain->iter >= meta->nr_domains)
+ return -EINVAL;
+
+ dom = meta->domains + domain->iter;
+
+ domain->id = domain->iter;
+ domain->nr_signals = dom->nr_signals;
+ strncpy(domain->name, dom->name, sizeof(domain->name));
+
+ domain->iter++;
+ if (domain->iter == meta->nr_domains)
+ domain->iter = 0xff;
+
+ return 0;
+}
+
+int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_signal *signal)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[signal->pipe];
+ const struct etnaviv_pm_domain *dom;
+ const struct etnaviv_pm_signal *sig;
+
+ if (signal->domain >= meta->nr_domains)
+ return -EINVAL;
+
+ dom = meta->domains + signal->domain;
+
+ if (signal->iter > dom->nr_signals)
+ return -EINVAL;
+
+ sig = &dom->signal[signal->iter];
+
+ signal->id = signal->iter;
+ strncpy(signal->name, sig->name, sizeof(signal->name));
+
+ signal->iter++;
+ if (signal->iter == dom->nr_signals)
+ signal->iter = 0xffff;
+
+ return 0;
+}
+
+int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
+ u32 exec_state)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
+ const struct etnaviv_pm_domain *dom;
+
+ if (r->domain >= meta->nr_domains)
+ return -EINVAL;
+
+ dom = meta->domains + r->domain;
+
+ if (r->signal > dom->nr_signals)
+ return -EINVAL;
+
+ return 0;
+}
+
+void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
+ const struct etnaviv_perfmon_request *pmr)
+{
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[gpu->exec_state];
+ const struct etnaviv_pm_domain *dom;
+ const struct etnaviv_pm_signal *sig;
+ u32 *bo = pmr->bo_vma;
+ u32 val;
+
+ dom = meta->domains + pmr->domain;
+ sig = &dom->signal[pmr->signal];
+ val = sig->sample(gpu, dom, sig);
+
+ *(bo + pmr->offset) = val;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
new file mode 100644
index 000000000000..35dce194cb00
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_PERFMON_H__
+#define __ETNAVIV_PERFMON_H__
+
+struct etnaviv_gpu;
+struct drm_etnaviv_pm_domain;
+struct drm_etnaviv_pm_signal;
+
+struct etnaviv_perfmon_request
+{
+ u32 flags;
+ u8 domain;
+ u8 signal;
+ u32 sequence;
+
+ /* bo to store a value */
+ u32 *bo_vma;
+ u32 offset;
+};
+
+int etnaviv_pm_query_dom(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_domain *domain);
+
+int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+ struct drm_etnaviv_pm_signal *signal);
+
+int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
+ u32 exec_state);
+
+void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
+ const struct etnaviv_perfmon_request *pmr);
+
+#endif /* __ETNAVIV_PERFMON_H__ */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 305dc3d4ff77..5a7c9d8abd6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -3,6 +3,7 @@ config DRM_EXYNOS
depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
+ select SND_SOC_HDMI_CODEC if SND_SOC
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 6ce0821590df..dc01342e759a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -95,8 +95,23 @@ static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc,
return MODE_OK;
}
+static bool exynos_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (exynos_crtc->ops->mode_fixup)
+ return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
+ adjusted_mode);
+
+ return true;
+}
+
+
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.mode_valid = exynos_crtc_mode_valid,
+ .mode_fixup = exynos_crtc_mode_fixup,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index f8bae4cb4823..c6847fa708fa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -136,6 +136,9 @@ struct exynos_drm_crtc_ops {
u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode);
+ bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index ba4a32b132ba..2174814273e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -420,11 +420,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->bridge.funcs = &mic_bridge_funcs;
mic->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&mic->bridge);
- if (ret) {
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- return ret;
- }
+ drm_bridge_add(&mic->bridge);
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 0109ff40b1db..82d1b7e2febe 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -40,7 +40,7 @@
#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-
+#include <sound/hdmi-codec.h>
#include <drm/exynos_drm.h>
#include <media/cec-notifier.h>
@@ -111,15 +111,20 @@ struct hdmi_driver_data {
struct string_array_spec clk_muxes;
};
+struct hdmi_audio {
+ struct platform_device *pdev;
+ struct hdmi_audio_infoframe infoframe;
+ struct hdmi_codec_params params;
+ bool mute;
+};
+
struct hdmi_context {
struct drm_encoder encoder;
struct device *dev;
struct drm_device *drm_dev;
struct drm_connector connector;
- bool powered;
bool dvi_mode;
struct delayed_work hotplug_work;
- struct drm_display_mode current_mode;
struct cec_notifier *notifier;
const struct hdmi_driver_data *drv_data;
@@ -137,6 +142,11 @@ struct hdmi_context {
struct regulator *reg_hdmi_en;
struct exynos_drm_clk phy_clk;
struct drm_bridge *bridge;
+
+ /* mutex protecting subsequent fields below */
+ struct mutex mutex;
+ struct hdmi_audio audio;
+ bool powered;
};
static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -298,6 +308,15 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
},
{
+ .pixel_clock = 85500000,
+ .conf = {
+ 0x01, 0xd1, 0x24, 0x11, 0x40, 0x40, 0xd0, 0x08,
+ 0x84, 0xa0, 0xd6, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x90, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
+ },
+ },
+ {
.pixel_clock = 106500000,
.conf = {
0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
@@ -768,8 +787,25 @@ static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
return ret;
}
+static int hdmi_audio_infoframe_apply(struct hdmi_context *hdata)
+{
+ struct hdmi_audio_infoframe *infoframe = &hdata->audio.infoframe;
+ u8 buf[HDMI_INFOFRAME_SIZE(AUDIO)];
+ int len;
+
+ len = hdmi_audio_infoframe_pack(infoframe, buf, sizeof(buf));
+ if (len < 0)
+ return len;
+
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC);
+ hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, len);
+
+ return 0;
+}
+
static void hdmi_reg_infoframes(struct hdmi_context *hdata)
{
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
union hdmi_infoframe frm;
u8 buf[25];
int ret;
@@ -783,8 +819,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
return;
}
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
- &hdata->current_mode, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, m, false);
if (!ret)
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
if (ret > 0) {
@@ -794,8 +829,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
DRM_INFO("%s: invalid AVI infoframe (%d)\n", __func__, ret);
}
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi,
- &hdata->current_mode);
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frm.vendor.hdmi, m);
if (!ret)
ret = hdmi_vendor_infoframe_pack(&frm.vendor.hdmi, buf,
sizeof(buf));
@@ -805,15 +839,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3);
}
- ret = hdmi_audio_infoframe_init(&frm.audio);
- if (!ret) {
- frm.audio.channels = 2;
- ret = hdmi_audio_infoframe_pack(&frm.audio, buf, sizeof(buf));
- }
- if (ret > 0) {
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_EVERY_VSYNC);
- hdmi_reg_write_buf(hdata, HDMI_AUI_HEADER0, buf, ret);
- }
+ hdmi_audio_infoframe_apply(hdata);
}
static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
@@ -1003,23 +1029,18 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u32 freq)
hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
}
-static void hdmi_audio_init(struct hdmi_context *hdata)
+static void hdmi_audio_config(struct hdmi_context *hdata)
{
- u32 sample_rate, bits_per_sample;
- u32 data_num, bit_ch, sample_frq;
- u32 val;
-
- sample_rate = 44100;
- bits_per_sample = 16;
+ u32 bit_ch = 1;
+ u32 data_num, val;
+ int i;
- switch (bits_per_sample) {
+ switch (hdata->audio.params.sample_width) {
case 20:
data_num = 2;
- bit_ch = 1;
break;
case 24:
data_num = 3;
- bit_ch = 1;
break;
default:
data_num = 1;
@@ -1027,7 +1048,7 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
break;
}
- hdmi_reg_acr(hdata, sample_rate);
+ hdmi_reg_acr(hdata, hdata->audio.params.sample_rate);
hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
| HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
@@ -1037,12 +1058,6 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
| HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN);
hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN);
-
- sample_frq = (sample_rate == 44100) ? 0 :
- (sample_rate == 48000) ? 2 :
- (sample_rate == 32000) ? 3 :
- (sample_rate == 96000) ? 0xa : 0x0;
-
hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS);
hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN);
@@ -1066,39 +1081,33 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
| HDMI_I2S_SET_SDATA_BIT(data_num)
| HDMI_I2S_BASIC_FORMAT);
- /* Configure register related to CUV information */
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0
- | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH
- | HDMI_I2S_COPYRIGHT
- | HDMI_I2S_LINEAR_PCM
- | HDMI_I2S_CONSUMER_FORMAT);
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER);
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0));
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2
- | HDMI_I2S_SET_SMP_FREQ(sample_frq));
- hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4,
- HDMI_I2S_ORG_SMP_FREQ_44_1
- | HDMI_I2S_WORD_LEN_MAX24_24BITS
- | HDMI_I2S_WORD_LEN_MAX_24BITS);
+ /* Configuration of the audio channel status registers */
+ for (i = 0; i < HDMI_I2S_CH_ST_MAXNUM; i++)
+ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST(i),
+ hdata->audio.params.iec.status[i]);
hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD);
}
-static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
+static void hdmi_audio_control(struct hdmi_context *hdata)
{
+ bool enable = !hdata->audio.mute;
+
if (hdata->dvi_mode)
return;
- hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
- hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ?
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, enable ?
+ HDMI_AVI_CON_EVERY_VSYNC : HDMI_AUI_CON_NO_TRAN);
+ hdmi_reg_writemask(hdata, HDMI_CON_0, enable ?
HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
}
static void hdmi_start(struct hdmi_context *hdata, bool start)
{
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
u32 val = start ? HDMI_TG_EN : 0;
- if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
val |= HDMI_FIELD_EN;
hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN);
@@ -1168,7 +1177,7 @@ static void hdmiphy_wait_for_pll(struct hdmi_context *hdata)
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
{
- struct drm_display_mode *m = &hdata->current_mode;
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
unsigned int val;
hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
@@ -1247,7 +1256,19 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
{
- struct drm_display_mode *m = &hdata->current_mode;
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
+ struct drm_display_mode *am =
+ &hdata->encoder.crtc->state->adjusted_mode;
+ int hquirk = 0;
+
+ /*
+ * In case video mode coming from CRTC differs from requested one HDMI
+ * sometimes is able to almost properly perform conversion - only
+ * first line is distorted.
+ */
+ if ((m->vdisplay != am->vdisplay) &&
+ (m->hdisplay == 1280 || m->hdisplay == 1024 || m->hdisplay == 1366))
+ hquirk = 258;
hdmi_reg_writev(hdata, HDMI_H_BLANK_0, 2, m->htotal - m->hdisplay);
hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
@@ -1341,8 +1362,9 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
- hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
- hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2,
+ m->htotal - m->hdisplay - hquirk);
+ hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay + hquirk);
hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
if (hdata->drv_data == &exynos5433_hdmi_driver_data)
hdmi_reg_writeb(hdata, HDMI_TG_DECON_EN, 1);
@@ -1380,10 +1402,11 @@ static void hdmiphy_enable_mode_set(struct hdmi_context *hdata, bool enable)
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
+ struct drm_display_mode *m = &hdata->encoder.crtc->state->mode;
int ret;
const u8 *phy_conf;
- ret = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
+ ret = hdmi_find_phy_conf(hdata, m->clock * 1000);
if (ret < 0) {
DRM_ERROR("failed to find hdmiphy conf\n");
return;
@@ -1406,28 +1429,14 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
hdmiphy_wait_for_pll(hdata);
}
+/* Should be called with hdata->mutex mutex held */
static void hdmi_conf_apply(struct hdmi_context *hdata)
{
hdmi_start(hdata, false);
hdmi_conf_init(hdata);
- hdmi_audio_init(hdata);
+ hdmi_audio_config(hdata);
hdmi_mode_apply(hdata);
- hdmi_audio_control(hdata, true);
-}
-
-static void hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct hdmi_context *hdata = encoder_to_hdmi(encoder);
- struct drm_display_mode *m = adjusted_mode;
-
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
- m->hdisplay, m->vdisplay,
- m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ?
- "INTERLACED" : "PROGRESSIVE");
-
- drm_mode_copy(&hdata->current_mode, m);
+ hdmi_audio_control(hdata);
}
static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
@@ -1439,6 +1448,7 @@ static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
SYSREG_HDMI_REFCLK_INT_CLK, on ? ~0 : 0);
}
+/* Should be called with hdata->mutex mutex held. */
static void hdmiphy_enable(struct hdmi_context *hdata)
{
if (hdata->powered)
@@ -1461,6 +1471,7 @@ static void hdmiphy_enable(struct hdmi_context *hdata)
hdata->powered = true;
}
+/* Should be called with hdata->mutex mutex held. */
static void hdmiphy_disable(struct hdmi_context *hdata)
{
if (!hdata->powered)
@@ -1486,33 +1497,42 @@ static void hdmi_enable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+ mutex_lock(&hdata->mutex);
+
hdmiphy_enable(hdata);
hdmi_conf_apply(hdata);
+
+ mutex_unlock(&hdata->mutex);
}
static void hdmi_disable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
- if (!hdata->powered)
+ mutex_lock(&hdata->mutex);
+
+ if (hdata->powered) {
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * To achieve such sequence HDMI is disabled together with
+ * HDMI PHY, via pipe clock callback.
+ */
+ mutex_unlock(&hdata->mutex);
+ cancel_delayed_work(&hdata->hotplug_work);
+ cec_notifier_set_phys_addr(hdata->notifier,
+ CEC_PHYS_ADDR_INVALID);
return;
+ }
- /*
- * The SFRs of VP and Mixer are updated by Vertical Sync of
- * Timing generator which is a part of HDMI so the sequence
- * to disable TV Subsystem should be as following,
- * VP -> Mixer -> HDMI
- *
- * To achieve such sequence HDMI is disabled together with HDMI PHY, via
- * pipe clock callback.
- */
- cancel_delayed_work(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
+ mutex_unlock(&hdata->mutex);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
.mode_fixup = hdmi_mode_fixup,
- .mode_set = hdmi_mode_set,
.enable = hdmi_enable,
.disable = hdmi_disable,
};
@@ -1521,6 +1541,99 @@ static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static void hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+
+ mutex_lock(&hdata->mutex);
+
+ hdata->audio.mute = true;
+
+ if (hdata->powered)
+ hdmi_audio_control(hdata);
+
+ mutex_unlock(&hdata->mutex);
+}
+
+static int hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+
+ if (daifmt->fmt != HDMI_I2S || daifmt->bit_clk_inv ||
+ daifmt->frame_clk_inv || daifmt->bit_clk_master ||
+ daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdata->mutex);
+
+ hdata->audio.params = *params;
+
+ if (hdata->powered) {
+ hdmi_audio_config(hdata);
+ hdmi_audio_infoframe_apply(hdata);
+ }
+
+ mutex_unlock(&hdata->mutex);
+
+ return 0;
+}
+
+static int hdmi_audio_digital_mute(struct device *dev, void *data, bool mute)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+
+ mutex_lock(&hdata->mutex);
+
+ hdata->audio.mute = mute;
+
+ if (hdata->powered)
+ hdmi_audio_control(hdata);
+
+ mutex_unlock(&hdata->mutex);
+
+ return 0;
+}
+
+static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
+ size_t len)
+{
+ struct hdmi_context *hdata = dev_get_drvdata(dev);
+ struct drm_connector *connector = &hdata->connector;
+
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = hdmi_audio_hw_params,
+ .audio_shutdown = hdmi_audio_shutdown,
+ .digital_mute = hdmi_audio_digital_mute,
+ .get_eld = hdmi_audio_get_eld,
+};
+
+static int hdmi_register_audio_device(struct hdmi_context *hdata)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 6,
+ .i2s = 1,
+ };
+
+ hdata->audio.pdev = platform_device_register_data(
+ hdata->dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(hdata->audio.pdev);
+}
+
static void hdmi_hotplug_work_func(struct work_struct *work)
{
struct hdmi_context *hdata;
@@ -1596,11 +1709,14 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
{
struct hdmi_context *hdata = container_of(clk, struct hdmi_context,
phy_clk);
+ mutex_lock(&hdata->mutex);
if (enable)
hdmiphy_enable(hdata);
else
hdmiphy_disable(hdata);
+
+ mutex_unlock(&hdata->mutex);
}
static int hdmi_bridge_init(struct hdmi_context *hdata)
@@ -1811,6 +1927,7 @@ out:
static int hdmi_probe(struct platform_device *pdev)
{
+ struct hdmi_audio_infoframe *audio_infoframe;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
struct resource *res;
@@ -1826,6 +1943,8 @@ static int hdmi_probe(struct platform_device *pdev)
hdata->dev = dev;
+ mutex_init(&hdata->mutex);
+
ret = hdmi_resources_init(hdata);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -1885,12 +2004,26 @@ static int hdmi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- ret = component_add(&pdev->dev, &hdmi_component_ops);
+ audio_infoframe = &hdata->audio.infoframe;
+ hdmi_audio_infoframe_init(audio_infoframe);
+ audio_infoframe->coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+ audio_infoframe->sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+ audio_infoframe->sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+ audio_infoframe->channels = 2;
+
+ ret = hdmi_register_audio_device(hdata);
if (ret)
goto err_notifier_put;
+ ret = component_add(&pdev->dev, &hdmi_component_ops);
+ if (ret)
+ goto err_unregister_audio;
+
return ret;
+err_unregister_audio:
+ platform_device_unregister(hdata->audio.pdev);
+
err_notifier_put:
cec_notifier_put(hdata->notifier);
pm_runtime_disable(dev);
@@ -1914,6 +2047,7 @@ static int hdmi_remove(struct platform_device *pdev)
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
+ platform_device_unregister(hdata->audio.pdev);
cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
@@ -1929,6 +2063,8 @@ static int hdmi_remove(struct platform_device *pdev)
put_device(&hdata->ddc_adpt->dev);
+ mutex_destroy(&hdata->mutex);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 002755415e00..dc5d79465f9b 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -67,19 +67,6 @@
#define MXR_FORMAT_ARGB4444 6
#define MXR_FORMAT_ARGB8888 7
-struct mixer_resources {
- int irq;
- void __iomem *mixer_regs;
- void __iomem *vp_regs;
- spinlock_t reg_slock;
- struct clk *mixer;
- struct clk *vp;
- struct clk *hdmi;
- struct clk *sclk_mixer;
- struct clk *sclk_hdmi;
- struct clk *mout_mixer;
-};
-
enum mixer_version_id {
MXR_VER_0_0_0_16,
MXR_VER_16_0_33_0,
@@ -117,8 +104,18 @@ struct mixer_context {
struct exynos_drm_plane planes[MIXER_WIN_NR];
unsigned long flags;
- struct mixer_resources mixer_res;
+ int irq;
+ void __iomem *mixer_regs;
+ void __iomem *vp_regs;
+ spinlock_t reg_slock;
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *hdmi;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *mout_mixer;
enum mixer_version_id mxr_ver;
+ int scan_value;
};
struct mixer_drv_data {
@@ -194,44 +191,44 @@ static inline bool is_alpha_format(unsigned int pixel_format)
}
}
-static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+static inline u32 vp_reg_read(struct mixer_context *ctx, u32 reg_id)
{
- return readl(res->vp_regs + reg_id);
+ return readl(ctx->vp_regs + reg_id);
}
-static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+static inline void vp_reg_write(struct mixer_context *ctx, u32 reg_id,
u32 val)
{
- writel(val, res->vp_regs + reg_id);
+ writel(val, ctx->vp_regs + reg_id);
}
-static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+static inline void vp_reg_writemask(struct mixer_context *ctx, u32 reg_id,
u32 val, u32 mask)
{
- u32 old = vp_reg_read(res, reg_id);
+ u32 old = vp_reg_read(ctx, reg_id);
val = (val & mask) | (old & ~mask);
- writel(val, res->vp_regs + reg_id);
+ writel(val, ctx->vp_regs + reg_id);
}
-static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+static inline u32 mixer_reg_read(struct mixer_context *ctx, u32 reg_id)
{
- return readl(res->mixer_regs + reg_id);
+ return readl(ctx->mixer_regs + reg_id);
}
-static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+static inline void mixer_reg_write(struct mixer_context *ctx, u32 reg_id,
u32 val)
{
- writel(val, res->mixer_regs + reg_id);
+ writel(val, ctx->mixer_regs + reg_id);
}
-static inline void mixer_reg_writemask(struct mixer_resources *res,
+static inline void mixer_reg_writemask(struct mixer_context *ctx,
u32 reg_id, u32 val, u32 mask)
{
- u32 old = mixer_reg_read(res, reg_id);
+ u32 old = mixer_reg_read(ctx, reg_id);
val = (val & mask) | (old & ~mask);
- writel(val, res->mixer_regs + reg_id);
+ writel(val, ctx->mixer_regs + reg_id);
}
static void mixer_regs_dump(struct mixer_context *ctx)
@@ -239,7 +236,7 @@ static void mixer_regs_dump(struct mixer_context *ctx)
#define DUMPREG(reg_id) \
do { \
DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+ (u32)readl(ctx->mixer_regs + reg_id)); \
} while (0)
DUMPREG(MXR_STATUS);
@@ -271,7 +268,7 @@ static void vp_regs_dump(struct mixer_context *ctx)
#define DUMPREG(reg_id) \
do { \
DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+ (u32) readl(ctx->vp_regs + reg_id)); \
} while (0)
DUMPREG(VP_ENABLE);
@@ -301,7 +298,7 @@ do { \
#undef DUMPREG
}
-static inline void vp_filter_set(struct mixer_resources *res,
+static inline void vp_filter_set(struct mixer_context *ctx,
int reg_id, const u8 *data, unsigned int size)
{
/* assure 4-byte align */
@@ -309,24 +306,23 @@ static inline void vp_filter_set(struct mixer_resources *res,
for (; size; size -= 4, reg_id += 4, data += 4) {
u32 val = (data[0] << 24) | (data[1] << 16) |
(data[2] << 8) | data[3];
- vp_reg_write(res, reg_id, val);
+ vp_reg_write(ctx, reg_id, val);
}
}
-static void vp_default_filter(struct mixer_resources *res)
+static void vp_default_filter(struct mixer_context *ctx)
{
- vp_filter_set(res, VP_POLY8_Y0_LL,
+ vp_filter_set(ctx, VP_POLY8_Y0_LL,
filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
- vp_filter_set(res, VP_POLY4_Y0_LL,
+ vp_filter_set(ctx, VP_POLY4_Y0_LL,
filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
- vp_filter_set(res, VP_POLY4_C0_LL,
+ vp_filter_set(ctx, VP_POLY4_C0_LL,
filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
}
static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
bool alpha)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
@@ -335,13 +331,12 @@ static void mixer_cfg_gfx_blend(struct mixer_context *ctx, unsigned int win,
val |= MXR_GRP_CFG_BLEND_PRE_MUL;
val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
}
- mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
val, MXR_GRP_CFG_MISC_MASK);
}
static void mixer_cfg_vp_blend(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
/*
@@ -351,51 +346,39 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx)
* support blending of the video layer through this.
*/
val = 0;
- mixer_reg_write(res, MXR_VIDEO_CFG, val);
+ mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
}
static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
/* block update on vsync */
- mixer_reg_writemask(res, MXR_STATUS, enable ?
+ mixer_reg_writemask(ctx, MXR_STATUS, enable ?
MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
- vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+ vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ?
VP_SHADOW_UPDATE_ENABLE : 0);
}
-static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
/* choosing between interlace and progressive mode */
val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ?
MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE;
- if (ctx->mxr_ver != MXR_VER_128_0_0_184) {
- /* choosing between proper HD and SD mode */
- if (height <= 480)
- val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
- else if (height <= 576)
- val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
- else if (height <= 720)
- val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- else if (height <= 1080)
- val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
- else
- val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
- }
+ if (ctx->mxr_ver == MXR_VER_128_0_0_184)
+ mixer_reg_write(ctx, MXR_RESOLUTION,
+ MXR_MXR_RES_HEIGHT(height) | MXR_MXR_RES_WIDTH(width));
+ else
+ val |= ctx->scan_value;
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}
static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val;
switch (height) {
@@ -408,45 +391,44 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
default:
val = MXR_CFG_RGB709_16_235;
/* Configure the BT.709 CSC matrix for full range RGB. */
- mixer_reg_write(res, MXR_CM_COEFF_Y,
+ mixer_reg_write(ctx, MXR_CM_COEFF_Y,
MXR_CSC_CT( 0.184, 0.614, 0.063) |
MXR_CM_COEFF_RGB_FULL);
- mixer_reg_write(res, MXR_CM_COEFF_CB,
+ mixer_reg_write(ctx, MXR_CM_COEFF_CB,
MXR_CSC_CT(-0.102, -0.338, 0.440));
- mixer_reg_write(res, MXR_CM_COEFF_CR,
+ mixer_reg_write(ctx, MXR_CM_COEFF_CR,
MXR_CSC_CT( 0.440, -0.399, -0.040));
break;
}
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}
static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
unsigned int priority, bool enable)
{
- struct mixer_resources *res = &ctx->mixer_res;
u32 val = enable ? ~0 : 0;
switch (win) {
case 0:
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
- mixer_reg_writemask(res, MXR_LAYER_CFG,
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_GRP0_VAL(priority),
MXR_LAYER_CFG_GRP0_MASK);
break;
case 1:
- mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
- mixer_reg_writemask(res, MXR_LAYER_CFG,
+ mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_GRP1_VAL(priority),
MXR_LAYER_CFG_GRP1_MASK);
break;
case VP_DEFAULT_WIN:
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
- vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
- mixer_reg_writemask(res, MXR_CFG, val,
+ vp_reg_writemask(ctx, VP_ENABLE, val, VP_ENABLE_ON);
+ mixer_reg_writemask(ctx, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
- mixer_reg_writemask(res, MXR_LAYER_CFG,
+ mixer_reg_writemask(ctx, MXR_LAYER_CFG,
MXR_LAYER_CFG_VP_VAL(priority),
MXR_LAYER_CFG_VP_MASK);
}
@@ -456,30 +438,34 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
static void mixer_run(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
}
static void mixer_stop(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
int timeout = 20;
- mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+ mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
- while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+ while (!(mixer_reg_read(ctx, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
--timeout)
usleep_range(10000, 12000);
}
+static void mixer_commit(struct mixer_context *ctx)
+{
+ struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
+
+ mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_run(ctx);
+}
+
static void vp_video_buffer(struct mixer_context *ctx,
struct exynos_drm_plane *plane)
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
- struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
- struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
@@ -493,8 +479,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
+ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
if (is_tiled) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
@@ -503,63 +488,59 @@ static void vp_video_buffer(struct mixer_context *ctx,
chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
}
} else {
- __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
luma_addr[1] = 0;
chroma_addr[1] = 0;
}
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&ctx->reg_slock, flags);
/* interlace or progressive scan mode */
val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
- vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+ vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12);
val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
- vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+ vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
- vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+ vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
/* chroma plane for NV12/NV21 is half the height of the luma plane */
- vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+ vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height / 2));
- vp_reg_write(res, VP_SRC_WIDTH, state->src.w);
- vp_reg_write(res, VP_SRC_HEIGHT, state->src.h);
- vp_reg_write(res, VP_SRC_H_POSITION,
+ vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
+ vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
+ vp_reg_write(ctx, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(state->src.x));
- vp_reg_write(res, VP_SRC_V_POSITION, state->src.y);
+ vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
- vp_reg_write(res, VP_DST_WIDTH, state->crtc.w);
- vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x);
+ vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
+ vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
- vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2);
- vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2);
+ vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
+ vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
} else {
- vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h);
- vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y);
+ vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
+ vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
}
- vp_reg_write(res, VP_H_RATIO, state->h_ratio);
- vp_reg_write(res, VP_V_RATIO, state->v_ratio);
+ vp_reg_write(ctx, VP_H_RATIO, state->h_ratio);
+ vp_reg_write(ctx, VP_V_RATIO, state->v_ratio);
- vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+ vp_reg_write(ctx, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
/* set buffer address to vp */
- vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
- vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
- vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
- vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+ vp_reg_write(ctx, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_reg_write(ctx, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_reg_write(ctx, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_reg_write(ctx, VP_BOT_C_PTR, chroma_addr[1]);
- mixer_cfg_scan(ctx, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
mixer_cfg_layer(ctx, plane->index, priority, true);
mixer_cfg_vp_blend(ctx);
- mixer_run(ctx);
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
vp_regs_dump(ctx);
@@ -567,9 +548,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
static void mixer_layer_update(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
-
- mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+ mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
static void mixer_graph_buffer(struct mixer_context *ctx,
@@ -577,8 +556,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
{
struct exynos_drm_plane_state *state =
to_exynos_plane_state(plane->base.state);
- struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
- struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
@@ -623,45 +600,30 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
+ (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
- else
- __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
-
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&ctx->reg_slock, flags);
/* setup format */
- mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ mixer_reg_writemask(ctx, MXR_GRAPHIC_CFG(win),
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
- mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
+ mixer_reg_write(ctx, MXR_GRAPHIC_SPAN(win),
fb->pitches[0] / fb->format->cpp[0]);
- /* setup display size */
- if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
- win == DEFAULT_WIN) {
- val = MXR_MXR_RES_HEIGHT(mode->vdisplay);
- val |= MXR_MXR_RES_WIDTH(mode->hdisplay);
- mixer_reg_write(res, MXR_RESOLUTION, val);
- }
-
val = MXR_GRP_WH_WIDTH(state->src.w);
val |= MXR_GRP_WH_HEIGHT(state->src.h);
val |= MXR_GRP_WH_H_SCALE(x_ratio);
val |= MXR_GRP_WH_V_SCALE(y_ratio);
- mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+ mixer_reg_write(ctx, MXR_GRAPHIC_WH(win), val);
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(dst_x_offset);
val |= MXR_GRP_DXY_DY(dst_y_offset);
- mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+ mixer_reg_write(ctx, MXR_GRAPHIC_DXY(win), val);
/* set buffer address to mixer */
- mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+ mixer_reg_write(ctx, MXR_GRAPHIC_BASE(win), dma_addr);
- mixer_cfg_scan(ctx, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->format->format));
@@ -670,22 +632,19 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
ctx->mxr_ver == MXR_VER_128_0_0_184)
mixer_layer_update(ctx);
- mixer_run(ctx);
-
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&ctx->reg_slock, flags);
mixer_regs_dump(ctx);
}
static void vp_win_reset(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
unsigned int tries = 100;
- vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+ vp_reg_write(ctx, VP_SRESET, VP_SRESET_PROCESSING);
while (--tries) {
/* waiting until VP_SRESET_PROCESSING is 0 */
- if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+ if (~vp_reg_read(ctx, VP_SRESET) & VP_SRESET_PROCESSING)
break;
mdelay(10);
}
@@ -694,57 +653,55 @@ static void vp_win_reset(struct mixer_context *ctx)
static void mixer_win_reset(struct mixer_context *ctx)
{
- struct mixer_resources *res = &ctx->mixer_res;
unsigned long flags;
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&ctx->reg_slock, flags);
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
/* set output in RGB888 mode */
- mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+ mixer_reg_writemask(ctx, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
/* 16 beat burst in DMA */
- mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ mixer_reg_writemask(ctx, MXR_STATUS, MXR_STATUS_16_BURST,
MXR_STATUS_BURST_MASK);
/* reset default layer priority */
- mixer_reg_write(res, MXR_LAYER_CFG, 0);
+ mixer_reg_write(ctx, MXR_LAYER_CFG, 0);
/* set all background colors to RGB (0,0,0) */
- mixer_reg_write(res, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128));
- mixer_reg_write(res, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128));
- mixer_reg_write(res, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(ctx, MXR_BG_COLOR0, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(ctx, MXR_BG_COLOR1, MXR_YCBCR_VAL(0, 128, 128));
+ mixer_reg_write(ctx, MXR_BG_COLOR2, MXR_YCBCR_VAL(0, 128, 128));
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
- vp_default_filter(res);
+ vp_default_filter(ctx);
}
/* disable all layers */
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
- mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+ mixer_reg_writemask(ctx, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
/* set all source image offsets to zero */
- mixer_reg_write(res, MXR_GRAPHIC_SXY(0), 0);
- mixer_reg_write(res, MXR_GRAPHIC_SXY(1), 0);
+ mixer_reg_write(ctx, MXR_GRAPHIC_SXY(0), 0);
+ mixer_reg_write(ctx, MXR_GRAPHIC_SXY(1), 0);
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&ctx->reg_slock, flags);
}
static irqreturn_t mixer_irq_handler(int irq, void *arg)
{
struct mixer_context *ctx = arg;
- struct mixer_resources *res = &ctx->mixer_res;
u32 val, base, shadow;
- spin_lock(&res->reg_slock);
+ spin_lock(&ctx->reg_slock);
/* read interrupt status for handling and clearing flags for VSYNC */
- val = mixer_reg_read(res, MXR_INT_STATUS);
+ val = mixer_reg_read(ctx, MXR_INT_STATUS);
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
@@ -754,13 +711,13 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
/* interlace scan need to check shadow register */
if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
- base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
- shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
if (base != shadow)
goto out;
- base = mixer_reg_read(res, MXR_GRAPHIC_BASE(1));
- shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
+ shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
if (base != shadow)
goto out;
}
@@ -770,9 +727,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
out:
/* clear interrupts */
- mixer_reg_write(res, MXR_INT_STATUS, val);
+ mixer_reg_write(ctx, MXR_INT_STATUS, val);
- spin_unlock(&res->reg_slock);
+ spin_unlock(&ctx->reg_slock);
return IRQ_HANDLED;
}
@@ -780,26 +737,25 @@ out:
static int mixer_resources_init(struct mixer_context *mixer_ctx)
{
struct device *dev = &mixer_ctx->pdev->dev;
- struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
int ret;
- spin_lock_init(&mixer_res->reg_slock);
+ spin_lock_init(&mixer_ctx->reg_slock);
- mixer_res->mixer = devm_clk_get(dev, "mixer");
- if (IS_ERR(mixer_res->mixer)) {
+ mixer_ctx->mixer = devm_clk_get(dev, "mixer");
+ if (IS_ERR(mixer_ctx->mixer)) {
dev_err(dev, "failed to get clock 'mixer'\n");
return -ENODEV;
}
- mixer_res->hdmi = devm_clk_get(dev, "hdmi");
- if (IS_ERR(mixer_res->hdmi)) {
+ mixer_ctx->hdmi = devm_clk_get(dev, "hdmi");
+ if (IS_ERR(mixer_ctx->hdmi)) {
dev_err(dev, "failed to get clock 'hdmi'\n");
- return PTR_ERR(mixer_res->hdmi);
+ return PTR_ERR(mixer_ctx->hdmi);
}
- mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
- if (IS_ERR(mixer_res->sclk_hdmi)) {
+ mixer_ctx->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
+ if (IS_ERR(mixer_ctx->sclk_hdmi)) {
dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
return -ENODEV;
}
@@ -809,9 +765,9 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
return -ENXIO;
}
- mixer_res->mixer_regs = devm_ioremap(dev, res->start,
+ mixer_ctx->mixer_regs = devm_ioremap(dev, res->start,
resource_size(res));
- if (mixer_res->mixer_regs == NULL) {
+ if (mixer_ctx->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
return -ENXIO;
}
@@ -828,7 +784,7 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
dev_err(dev, "request interrupt failed.\n");
return ret;
}
- mixer_res->irq = res->start;
+ mixer_ctx->irq = res->start;
return 0;
}
@@ -836,30 +792,29 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
static int vp_resources_init(struct mixer_context *mixer_ctx)
{
struct device *dev = &mixer_ctx->pdev->dev;
- struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
struct resource *res;
- mixer_res->vp = devm_clk_get(dev, "vp");
- if (IS_ERR(mixer_res->vp)) {
+ mixer_ctx->vp = devm_clk_get(dev, "vp");
+ if (IS_ERR(mixer_ctx->vp)) {
dev_err(dev, "failed to get clock 'vp'\n");
return -ENODEV;
}
if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) {
- mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
- if (IS_ERR(mixer_res->sclk_mixer)) {
+ mixer_ctx->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ if (IS_ERR(mixer_ctx->sclk_mixer)) {
dev_err(dev, "failed to get clock 'sclk_mixer'\n");
return -ENODEV;
}
- mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
- if (IS_ERR(mixer_res->mout_mixer)) {
+ mixer_ctx->mout_mixer = devm_clk_get(dev, "mout_mixer");
+ if (IS_ERR(mixer_ctx->mout_mixer)) {
dev_err(dev, "failed to get clock 'mout_mixer'\n");
return -ENODEV;
}
- if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
- clk_set_parent(mixer_res->mout_mixer,
- mixer_res->sclk_hdmi);
+ if (mixer_ctx->sclk_hdmi && mixer_ctx->mout_mixer)
+ clk_set_parent(mixer_ctx->mout_mixer,
+ mixer_ctx->sclk_hdmi);
}
res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
@@ -868,9 +823,9 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
return -ENXIO;
}
- mixer_res->vp_regs = devm_ioremap(dev, res->start,
+ mixer_ctx->vp_regs = devm_ioremap(dev, res->start,
resource_size(res));
- if (mixer_res->vp_regs == NULL) {
+ if (mixer_ctx->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
return -ENXIO;
}
@@ -914,15 +869,14 @@ static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
__set_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return 0;
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
- mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
@@ -930,7 +884,6 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
__clear_bit(MXR_BIT_VSYNC, &mixer_ctx->flags);
@@ -938,8 +891,8 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
return;
/* disable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
- mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(mixer_ctx, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
@@ -972,7 +925,6 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct mixer_context *mixer_ctx = crtc->ctx;
- struct mixer_resources *res = &mixer_ctx->mixer_res;
unsigned long flags;
DRM_DEBUG_KMS("win: %d\n", plane->index);
@@ -980,9 +932,9 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
- spin_lock_irqsave(&res->reg_slock, flags);
+ spin_lock_irqsave(&mixer_ctx->reg_slock, flags);
mixer_cfg_layer(mixer_ctx, plane->index, 0, false);
- spin_unlock_irqrestore(&res->reg_slock, flags);
+ spin_unlock_irqrestore(&mixer_ctx->reg_slock, flags);
}
static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -999,7 +951,6 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
static void mixer_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
- struct mixer_resources *res = &ctx->mixer_res;
if (test_bit(MXR_BIT_POWERED, &ctx->flags))
return;
@@ -1010,14 +961,17 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
mixer_vsync_set_update(ctx, false);
- mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+ mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
if (test_bit(MXR_BIT_VSYNC, &ctx->flags)) {
- mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
- mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(ctx, MXR_INT_STATUS, ~0,
+ MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(ctx, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
}
mixer_win_reset(ctx);
+ mixer_commit(ctx);
+
mixer_vsync_set_update(ctx, true);
set_bit(MXR_BIT_POWERED, &ctx->flags);
@@ -1044,26 +998,75 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
clear_bit(MXR_BIT_POWERED, &ctx->flags);
}
-/* Only valid for Mixer version 16.0.33.0 */
-static int mixer_atomic_check(struct exynos_drm_crtc *crtc,
- struct drm_crtc_state *state)
+static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- struct drm_display_mode *mode = &state->adjusted_mode;
- u32 w, h;
+ struct mixer_context *ctx = crtc->ctx;
+ u32 w = mode->hdisplay, h = mode->vdisplay;
- w = mode->hdisplay;
- h = mode->vdisplay;
+ DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", w, h,
+ mode->vrefresh, !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
+ if (ctx->mxr_ver == MXR_VER_128_0_0_184)
+ return MODE_OK;
if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
- (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
- (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
- return 0;
+ (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
+ (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
+ return MODE_OK;
+
+ if ((w == 1024 && h == 768) ||
+ (w == 1366 && h == 768) ||
+ (w == 1280 && h == 1024))
+ return MODE_OK;
+
+ return MODE_BAD;
+}
+
+static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mixer_context *ctx = crtc->ctx;
+ int width = mode->hdisplay, height = mode->vdisplay, i;
+
+ struct {
+ int hdisplay, vdisplay, htotal, vtotal, scan_val;
+ } static const modes[] = {
+ { 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
+ { 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
+ { 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
+ { 1920, 1080, 2200, 1125, MXR_CFG_SCAN_HD_1080 |
+ MXR_CFG_SCAN_HD }
+ };
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ __set_bit(MXR_BIT_INTERLACE, &ctx->flags);
+ else
+ __clear_bit(MXR_BIT_INTERLACE, &ctx->flags);
+
+ if (ctx->mxr_ver == MXR_VER_128_0_0_184)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i)
+ if (width <= modes[i].hdisplay && height <= modes[i].vdisplay) {
+ ctx->scan_value = modes[i].scan_val;
+ if (width < modes[i].hdisplay ||
+ height < modes[i].vdisplay) {
+ adjusted_mode->hdisplay = modes[i].hdisplay;
+ adjusted_mode->hsync_start = modes[i].hdisplay;
+ adjusted_mode->hsync_end = modes[i].htotal;
+ adjusted_mode->htotal = modes[i].htotal;
+ adjusted_mode->vdisplay = modes[i].vdisplay;
+ adjusted_mode->vsync_start = modes[i].vdisplay;
+ adjusted_mode->vsync_end = modes[i].vtotal;
+ adjusted_mode->vtotal = modes[i].vtotal;
+ }
+
+ return true;
+ }
- return -EINVAL;
+ return false;
}
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
@@ -1075,7 +1078,8 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.update_plane = mixer_update_plane,
.disable_plane = mixer_disable_plane,
.atomic_flush = mixer_atomic_flush,
- .atomic_check = mixer_atomic_check,
+ .mode_valid = mixer_mode_valid,
+ .mode_fixup = mixer_mode_fixup,
};
static const struct mixer_drv_data exynos5420_mxr_drv_data = {
@@ -1217,14 +1221,13 @@ static int mixer_remove(struct platform_device *pdev)
static int __maybe_unused exynos_mixer_suspend(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
- struct mixer_resources *res = &ctx->mixer_res;
- clk_disable_unprepare(res->hdmi);
- clk_disable_unprepare(res->mixer);
+ clk_disable_unprepare(ctx->hdmi);
+ clk_disable_unprepare(ctx->mixer);
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
- clk_disable_unprepare(res->vp);
+ clk_disable_unprepare(ctx->vp);
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags))
- clk_disable_unprepare(res->sclk_mixer);
+ clk_disable_unprepare(ctx->sclk_mixer);
}
return 0;
@@ -1233,28 +1236,27 @@ static int __maybe_unused exynos_mixer_suspend(struct device *dev)
static int __maybe_unused exynos_mixer_resume(struct device *dev)
{
struct mixer_context *ctx = dev_get_drvdata(dev);
- struct mixer_resources *res = &ctx->mixer_res;
int ret;
- ret = clk_prepare_enable(res->mixer);
+ ret = clk_prepare_enable(ctx->mixer);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
return ret;
}
- ret = clk_prepare_enable(res->hdmi);
+ ret = clk_prepare_enable(ctx->hdmi);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
return ret;
}
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
- ret = clk_prepare_enable(res->vp);
+ ret = clk_prepare_enable(ctx->vp);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
ret);
return ret;
}
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
- ret = clk_prepare_enable(res->sclk_mixer);
+ ret = clk_prepare_enable(ctx->sclk_mixer);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the " \
"sclk_mixer clk [%d]\n",
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index a0507dc18d9e..04be0f7e8193 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -419,11 +419,9 @@
#define HDMI_I2S_DSD_CON HDMI_I2S_BASE(0x01c)
#define HDMI_I2S_MUX_CON HDMI_I2S_BASE(0x020)
#define HDMI_I2S_CH_ST_CON HDMI_I2S_BASE(0x024)
-#define HDMI_I2S_CH_ST_0 HDMI_I2S_BASE(0x028)
-#define HDMI_I2S_CH_ST_1 HDMI_I2S_BASE(0x02c)
-#define HDMI_I2S_CH_ST_2 HDMI_I2S_BASE(0x030)
-#define HDMI_I2S_CH_ST_3 HDMI_I2S_BASE(0x034)
-#define HDMI_I2S_CH_ST_4 HDMI_I2S_BASE(0x038)
+/* n must be within range 0...(HDMI_I2S_CH_ST_MAXNUM - 1) */
+#define HDMI_I2S_CH_ST_MAXNUM 5
+#define HDMI_I2S_CH_ST(n) HDMI_I2S_BASE(0x028 + 4 * (n))
#define HDMI_I2S_CH_ST_SH_0 HDMI_I2S_BASE(0x03c)
#define HDMI_I2S_CH_ST_SH_1 HDMI_I2S_BASE(0x040)
#define HDMI_I2S_CH_ST_SH_2 HDMI_I2S_BASE(0x044)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index d9d6cc1c8e39..ddc68e476a4d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
@@ -20,7 +21,7 @@
static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = {
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
};
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index c52f9adf5e04..a4bb89b7878f 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1901,10 +1901,8 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
if (is_edp(gma_encoder)) {
/* cdv_intel_panel_destroy_backlight(connector->dev); */
- if (intel_dp->panel_fixed_mode) {
- kfree(intel_dp->panel_fixed_mode);
- intel_dp->panel_fixed_mode = NULL;
- }
+ kfree(intel_dp->panel_fixed_mode);
+ intel_dp->panel_fixed_mode = NULL;
}
i2c_del_adapter(&intel_dp->adapter);
drm_connector_unregister(connector);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 531e4450c000..5c066448be5b 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -99,7 +99,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_PIPE_STATE) == 1)
+ if (temp & PIPEACONF_PIPE_STATE)
break;
}
}
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index d75ecb3bdee7..1fa163373a47 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -237,7 +237,7 @@ static int mid_get_vbt_data_r10(struct drm_psb_private *dev_priv, u32 addr)
gct = kmalloc(sizeof(*gct) * vbt.panel_count, GFP_KERNEL);
if (!gct)
- return -1;
+ return -ENOMEM;
gct_virtual = ioremap(addr + sizeof(vbt),
sizeof(*gct) * vbt.panel_count);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index e787d376ba67..84507912be84 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -37,6 +37,7 @@
#include "psb_drv.h"
#include "psb_intel_sdvo_regs.h"
#include "psb_intel_reg.h"
+#include <linux/kernel.h>
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
@@ -62,8 +63,6 @@ static const char *tv_format_names[] = {
"SECAM_60"
};
-#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
-
struct psb_intel_sdvo {
struct gma_encoder base;
@@ -148,7 +147,7 @@ struct psb_intel_sdvo_connector {
int force_audio;
/* This contains all current supported TV format */
- u8 tv_format_supported[TV_FORMAT_NUM];
+ u8 tv_format_supported[ARRAY_SIZE(tv_format_names)];
int format_supported_num;
struct drm_property *tv_format;
@@ -1709,7 +1708,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == psb_intel_sdvo_connector->tv_format) {
- if (val >= TV_FORMAT_NUM)
+ if (val >= ARRAY_SIZE(tv_format_names))
return -EINVAL;
if (psb_intel_sdvo->tv_format_index ==
@@ -2269,7 +2268,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
return false;
psb_intel_sdvo_connector->format_supported_num = 0;
- for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ for (i = 0 ; i < ARRAY_SIZE(tv_format_names); i++)
if (format_map & (1 << i))
psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index ec4dd9df9150..f4eba87c96f3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -36,7 +36,7 @@ static int hibmc_connector_mode_valid(struct drm_connector *connector,
static struct drm_encoder *
hibmc_connector_best_encoder(struct drm_connector *connector)
{
- return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
}
static const struct drm_connector_helper_funcs
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 9823477b1855..2269be91f3e1 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -534,9 +534,12 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
+ ade_ldi_set_mode(acrtc, mode, adj_mode);
}
static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index e27352ca26c4..ddb0403f1975 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -22,6 +22,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
@@ -56,7 +57,7 @@ static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
}
static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = kirin_fbdev_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -236,8 +237,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev)
}
remote = of_graph_get_remote_node(np, 0, 0);
- if (IS_ERR(remote))
- return PTR_ERR(remote);
+ if (!remote)
+ return -ENODEV;
drm_of_component_match_add(dev, &match, compare_of, remote);
of_node_put(remote);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index e9e8ae2ec06b..544a8a2d3562 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -485,7 +485,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
return 0;
}
-static struct i2c_device_id ch7006_ids[] = {
+static const struct i2c_device_id ch7006_ids[] = {
{ "ch7006", 0 },
{ }
};
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index db0b03fb0ff1..ecaa58757529 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -415,7 +415,7 @@ sil164_encoder_init(struct i2c_client *client,
return 0;
}
-static struct i2c_device_id sil164_ids[] = {
+static const struct i2c_device_id sil164_ids[] = {
{ "sil164", 0 },
{ }
};
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 54e3255dde13..4d1f45acf2cd 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1746,7 +1746,7 @@ static const struct of_device_id tda998x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
#endif
-static struct i2c_device_id tda998x_ids[] = {
+static const struct i2c_device_id tda998x_ids[] = {
{ "tda998x", 0 },
{ }
};
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index e9e64e8e9765..dfd95889f4b7 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -12,6 +12,7 @@ config DRM_I915
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
+ select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2e034efc4d6d..2acf3b3c5f9d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -48,6 +48,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_tiling.o \
i915_gem_timeline.o \
i915_gem_userptr.o \
+ i915_gemfs.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
@@ -60,9 +61,11 @@ i915-y += i915_cmd_parser.o \
# general-purpose microcontroller (GuC) support
i915-y += intel_uc.o \
+ intel_uc_fw.o \
+ intel_guc.o \
intel_guc_ct.o \
intel_guc_log.o \
- intel_guc_loader.o \
+ intel_guc_fw.o \
intel_huc.o \
i915_guc_submission.o
@@ -140,7 +143,8 @@ i915-y += i915_perf.o \
i915_oa_bxt.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
- i915_oa_glk.o
+ i915_oa_glk.o \
+ i915_oa_cflgt2.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
@@ -151,5 +155,3 @@ endif
i915-y += intel_lpe_audio.o
obj-$(CONFIG_DRM_I915) += i915.o
-
-CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index ca3d1925beda..7c9ec4f4f36c 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -173,8 +173,8 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
- list_add_tail(&reg->link,
- &dev_priv->mm.fence_list);
+ i915_unreserve_fence(reg);
+ vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -187,24 +187,19 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
int i;
- struct list_head *pos, *q;
intel_runtime_pm_get(dev_priv);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
- i = 0;
- list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
- reg = list_entry(pos, struct drm_i915_fence_reg, link);
- if (reg->pin_count || reg->vma)
- continue;
- list_del(pos);
+
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+ reg = i915_reserve_fence(dev_priv);
+ if (IS_ERR(reg))
+ goto out_free_fence;
+
vgpu->fence.regs[i] = reg;
- if (++i == vgpu_fence_sz(vgpu))
- break;
}
- if (i != vgpu_fence_sz(vgpu))
- goto out_free_fence;
_clear_vgpu_fence(vgpu);
@@ -212,13 +207,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_put(dev_priv);
return 0;
out_free_fence:
+ gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
if (!reg)
continue;
- list_add_tail(&reg->link,
- &dev_priv->mm.fence_list);
+ i915_unreserve_fence(reg);
+ vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index ff3154fe6588..ab19545d59a1 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -101,7 +101,7 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -110,13 +110,25 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
- u64 first_gfn, first_mfn;
+ phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
+ unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
+ u64 first_gfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
+ if (map) {
+ vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
+ MEMREMAP_WC);
+ if (!vgpu->gm.aperture_va)
+ return -ENOMEM;
+ } else {
+ memunmap(vgpu->gm.aperture_va);
+ vgpu->gm.aperture_va = NULL;
+ }
+
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
@@ -124,14 +136,16 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
- first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
- first_mfn,
- vgpu_aperture_sz(vgpu) >>
- PAGE_SHIFT, map);
- if (ret)
+ aperture_pa >> PAGE_SHIFT,
+ aperture_sz >> PAGE_SHIFT,
+ map);
+ if (ret) {
+ memunmap(vgpu->gm.aperture_va);
+ vgpu->gm.aperture_va = NULL;
return ret;
+ }
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
@@ -275,7 +289,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
+ if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d4726a3358a4..701a3c6f1669 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1576,11 +1576,11 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
-static uint32_t find_bb_size(struct parser_exec_state *s)
+static int find_bb_size(struct parser_exec_state *s)
{
unsigned long gma = 0;
struct cmd_info *info;
- uint32_t bb_size = 0;
+ int bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
@@ -1637,6 +1637,8 @@ static int perform_bb_shadow(struct parser_exec_state *s)
/* get the size of the batch buffer */
bb_size = find_bb_size(s);
+ if (bb_size < 0)
+ return -EINVAL;
/* allocate shadow batch buffer */
entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
@@ -2603,7 +2605,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
- u32 *cs;
+ void *shadow_ring_buffer_va;
+ int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2616,34 +2619,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- /* allocate shadow ring buffer */
- cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
+ void *va = vgpu->reserve_ring_buffer_va[ring_id];
+ /* realloc the new ring buffer if needed */
+ vgpu->reserve_ring_buffer_va[ring_id] =
+ krealloc(va, workload->rb_len, GFP_KERNEL);
+ if (!vgpu->reserve_ring_buffer_va[ring_id]) {
+ gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ return -ENOMEM;
+ }
+ vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
+ }
+
+ shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
/* get shadow ring buffer va */
- workload->shadow_ring_buffer_va = cs;
+ workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_top, cs);
+ gma_head, gma_top, shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- cs += ret / sizeof(u32);
+ shadow_ring_buffer_va += ret;
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
- ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
+ shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
- cs += ret / sizeof(u32);
- intel_ring_advance(workload->req, cs);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index e5320b4eb698..4427be18e4a9 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
+static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
@@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
- return;
+ return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (gmadr_bytes == 8)
entry_obj->bb_start_cmd_va[2] = 0;
}
+ return 0;
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -420,7 +421,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned char *per_ctx_va =
@@ -428,12 +429,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
- return;
+ return 0;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
- return;
+ return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
@@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
-}
-
-static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
-{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
-
- intel_vgpu_pin_mm(workload->shadow_mm);
- intel_vgpu_sync_oos_pages(workload->vgpu);
- intel_vgpu_flush_post_shadow(workload->vgpu);
- prepare_shadow_batch_buffer(workload);
- prepare_shadow_wa_ctx(&workload->wa_ctx);
- if (!workload->emulate_schedule_in)
- return 0;
-
- ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
-
- return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+ return 0;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -489,13 +471,62 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
-static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
- if (!wa_ctx->indirect_ctx.obj)
- return;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct execlist_ctx_descriptor_format ctx[2];
+ int ring_id = workload->ring_id;
+ int ret;
+
+ ret = intel_vgpu_pin_mm(workload->shadow_mm);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu pin mm\n");
+ goto out;
+ }
+
+ ret = intel_vgpu_sync_oos_pages(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to vgpu sync oos pages\n");
+ goto err_unpin_mm;
+ }
- i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ ret = intel_vgpu_flush_post_shadow(workload->vgpu);
+ if (ret) {
+ gvt_vgpu_err("fail to flush post shadow\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_batch_buffer(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+ goto err_unpin_mm;
+ }
+
+ ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
+ if (ret) {
+ gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
+ goto err_shadow_batch;
+ }
+
+ if (!workload->emulate_schedule_in)
+ return 0;
+
+ ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
+ ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
+
+ ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
+ if (!ret)
+ goto out;
+ else
+ gvt_vgpu_err("fail to emulate execlist schedule in\n");
+
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_shadow_batch:
+ release_shadow_batch_buffer(workload);
+err_unpin_mm:
+ intel_vgpu_unpin_mm(workload->shadow_mm);
+out:
+ return ret;
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
@@ -511,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- release_shadow_batch_buffer(workload);
- release_shadow_wa_ctx(&workload->wa_ctx);
+ if (!workload->status) {
+ release_shadow_batch_buffer(workload);
+ release_shadow_wa_ctx(&workload->wa_ctx);
+ }
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
@@ -819,10 +852,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
+ enum intel_engine_id i;
+ struct intel_engine_cs *engine;
+
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
+
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ kfree(vgpu->reserve_ring_buffer_va[i]);
+ vgpu->reserve_ring_buffer_va[i] = NULL;
+ vgpu->reserve_ring_buffer_size[i] = 0;
+ }
+
}
+#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
@@ -842,7 +886,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
if (!vgpu->workloads)
return -ENOMEM;
+ /* each ring has a shadow ring buffer until vgpu destroyed */
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ vgpu->reserve_ring_buffer_va[i] =
+ kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
+ if (!vgpu->reserve_ring_buffer_va[i]) {
+ gvt_vgpu_err("fail to alloc reserve ring buffer\n");
+ goto out;
+ }
+ vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
+ }
return 0;
+out:
+ for_each_engine(engine, vgpu->gvt->dev_priv, i) {
+ if (vgpu->reserve_ring_buffer_size[i]) {
+ kfree(vgpu->reserve_ring_buffer_va[i]);
+ vgpu->reserve_ring_buffer_va[i] = NULL;
+ vgpu->reserve_ring_buffer_size[i] = 0;
+ }
+ }
+ return -ENOMEM;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index e6dfc3331f4b..2801d70579d8 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1647,14 +1647,13 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
return 0;
- atomic_inc(&mm->pincount);
-
if (!mm->shadowed) {
ret = shadow_mm(mm);
if (ret)
return ret;
}
+ atomic_inc(&mm->pincount);
list_del_init(&mm->lru_list);
list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
return 0;
@@ -1972,7 +1971,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
*/
se.val64 |= _PAGE_PRESENT | _PAGE_RW;
if (type == GTT_TYPE_PPGTT_PDE_PT)
- se.val64 |= PPAT_CACHED_INDEX;
+ se.val64 |= PPAT_CACHED;
for (i = 0; i < page_entry_num; i++)
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index c27c6838eaca..aaa347f8620c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -111,7 +111,7 @@ static void init_device_info(struct intel_gvt *gvt)
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
- info->cfg_space_size = 256;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->gtt_start_offset = 8 * 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 44b719eda8c4..9c2e7c0aa38f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -80,6 +80,7 @@ struct intel_gvt_device_info {
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
+ void *aperture_va;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
@@ -99,7 +100,6 @@ struct intel_vgpu_mmio {
bool disable_warn_untrack;
};
-#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
#define INTEL_GVT_MAX_BAR_NUM 4
struct intel_vgpu_pci_bar {
@@ -108,7 +108,7 @@ struct intel_vgpu_pci_bar {
};
struct intel_vgpu_cfg_space {
- unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
+ unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
};
@@ -165,6 +165,9 @@ struct intel_vgpu {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
+ /* 1/2K for each reserve ring buffer */
+ void *reserve_ring_buffer_va[I915_NUM_ENGINES];
+ int reserve_ring_buffer_size[I915_NUM_ENGINES];
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
@@ -474,6 +477,13 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
+{
+ /* We are 64bit bar. */
+ return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
+ PCI_BASE_ADDRESS_MEM_MASK;
+}
+
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
int intel_gvt_init_opregion(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 83e88c70272a..96060920a6fe 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -609,21 +609,20 @@ static void intel_vgpu_release_work(struct work_struct *work)
__intel_vgpu_release(vgpu);
}
-static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
+static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
{
u32 start_lo, start_hi;
u32 mem_type;
- int pos = PCI_BASE_ADDRESS_0;
- start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
PCI_BASE_ADDRESS_MEM_MASK;
- mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
+ mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
PCI_BASE_ADDRESS_MEM_TYPE_MASK;
switch (mem_type) {
case PCI_BASE_ADDRESS_MEM_TYPE_64:
start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
- + pos + 4));
+ + bar + 4));
break;
case PCI_BASE_ADDRESS_MEM_TYPE_32:
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
@@ -637,6 +636,21 @@ static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
return ((u64)start_hi << 32) | start_lo;
}
+static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
+ void *buf, unsigned int count, bool is_write)
+{
+ uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
+ int ret;
+
+ if (is_write)
+ ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ bar_start + off, buf, count);
+ else
+ ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ bar_start + off, buf, count);
+ return ret;
+}
+
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
@@ -661,20 +675,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
buf, count);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
- case VFIO_PCI_BAR1_REGION_INDEX:
- if (is_write) {
- uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
-
- ret = intel_gvt_ops->emulate_mmio_write(vgpu,
- bar0_start + pos, buf, count);
- } else {
- uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
-
- ret = intel_gvt_ops->emulate_mmio_read(vgpu,
- bar0_start + pos, buf, count);
- }
+ ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
+ buf, count, is_write);
break;
case VFIO_PCI_BAR2_REGION_INDEX:
+ ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
+ buf, count, is_write);
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
case VFIO_PCI_BAR3_REGION_INDEX:
case VFIO_PCI_BAR4_REGION_INDEX:
case VFIO_PCI_BAR5_REGION_INDEX:
@@ -970,7 +978,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
+ info.size = vgpu->gvt->device_info.cfg_space_size;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 980ec8906b1e..1e1310f50289 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -45,8 +45,7 @@
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
- u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
- ~GENMASK(3, 0);
+ u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
return gpa - gttmmio_gpa;
}
@@ -57,6 +56,38 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
+{
+ u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
+ u64 aperture_sz = vgpu_aperture_sz(vgpu);
+
+ return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
+}
+
+static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
+ void *pdata, unsigned int size, bool is_read)
+{
+ u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
+ u64 offset = gpa - aperture_gpa;
+
+ if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
+ gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
+ offset, size);
+ return -EINVAL;
+ }
+
+ if (!vgpu->gm.aperture_va) {
+ gvt_vgpu_err("BAR is not enabled\n");
+ return -ENXIO;
+ }
+
+ if (is_read)
+ memcpy(pdata, vgpu->gm.aperture_va + offset, size);
+ else
+ memcpy(vgpu->gm.aperture_va + offset, pdata, size);
+ return 0;
+}
+
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes, bool read)
{
@@ -133,6 +164,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
}
mutex_lock(&gvt->lock);
+ if (vgpu_gpa_is_aperture(vgpu, pa)) {
+ ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
@@ -224,6 +261,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_lock(&gvt->lock);
+ if (vgpu_gpa_is_aperture(vgpu, pa)) {
+ ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
+ mutex_unlock(&gvt->lock);
+ return ret;
+ }
+
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2ea542257f03..6d066cf35478 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -293,7 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
*/
if (mmio->in_context &&
((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
- i915.enable_execlists)
+ i915_modparams.enable_execlists)
continue;
if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 391800d2067b..f6ded475bb2c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -87,7 +87,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EINVAL;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
@@ -174,6 +174,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
+ case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -201,6 +202,43 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc;
}
+static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ void *shadow_ring_buffer_va;
+ u32 *cs;
+
+ /* allocate shadow ring buffer */
+ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+ if (IS_ERR(cs)) {
+ gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
+ workload->rb_len);
+ return PTR_ERR(cs);
+ }
+
+ shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
+
+ /* get shadow ring buffer va */
+ workload->shadow_ring_buffer_va = cs;
+
+ memcpy(cs, shadow_ring_buffer_va,
+ workload->rb_len);
+
+ cs += workload->rb_len / sizeof(u32);
+ intel_ring_advance(workload->req, cs);
+
+ return 0;
+}
+
+void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+{
+ if (!wa_ctx->indirect_ctx.obj)
+ return;
+
+ i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+}
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -214,8 +252,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -231,35 +271,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
- rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
- if (IS_ERR(rq)) {
- gvt_vgpu_err("fail to allocate gem request\n");
- ret = PTR_ERR(rq);
- goto out;
- }
-
- gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
- workload->req = i915_gem_request_get(rq);
-
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
- goto out;
+ goto err_scan;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
- goto out;
+ goto err_scan;
+ }
+
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ gvt_vgpu_err("fail to pin shadow context\n");
+ goto err_shadow;
}
ret = populate_shadow_context(workload);
if (ret)
- goto out;
+ goto err_unpin;
+ rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
+ if (IS_ERR(rq)) {
+ gvt_vgpu_err("fail to allocate gem request\n");
+ ret = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
+
+ workload->req = i915_gem_request_get(rq);
+ ret = copy_workload_to_ring_buffer(workload);
+ if (ret)
+ goto err_unpin;
workload->shadowed = true;
+ return 0;
-out:
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
return ret;
}
@@ -269,8 +330,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_ring *ring;
int ret = 0;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -284,22 +343,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (workload->prepare) {
ret = workload->prepare(workload);
- if (ret)
+ if (ret) {
+ engine->context_unpin(engine, shadow_ctx);
goto out;
- }
-
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = engine->context_pin(engine, shadow_ctx);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
- goto out;
+ }
}
out:
@@ -408,7 +455,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 93a49eb0209e..2d694f6c0907 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -141,4 +141,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
+void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e4d4b6b41e26..c65e381b85f3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,6 +30,7 @@
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include "intel_drv.h"
+#include "i915_guc_submission.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -67,7 +68,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
#undef PRINT_FLAG
kernel_param_lock(THIS_MODULE);
-#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
+#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
I915_PARAMS_FOR_EACH(PRINT_PARAM);
#undef PRINT_PARAM
kernel_param_unlock(THIS_MODULE);
@@ -82,7 +83,7 @@ static char get_active_flag(struct drm_i915_gem_object *obj)
static char get_pin_flag(struct drm_i915_gem_object *obj)
{
- return obj->pin_display ? 'p' : ' ';
+ return obj->pin_global ? 'p' : ' ';
}
static char get_tiling_flag(struct drm_i915_gem_object *obj)
@@ -97,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
- return !list_empty(&obj->userfault_link) ? 'g' : ' ';
+ return obj->userfault_count ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
@@ -118,6 +119,36 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
return size;
}
+static const char *
+stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
+{
+ size_t x = 0;
+
+ switch (page_sizes) {
+ case 0:
+ return "";
+ case I915_GTT_PAGE_SIZE_4K:
+ return "4K";
+ case I915_GTT_PAGE_SIZE_64K:
+ return "64K";
+ case I915_GTT_PAGE_SIZE_2M:
+ return "2M";
+ default:
+ if (!buf)
+ return "M";
+
+ if (page_sizes & I915_GTT_PAGE_SIZE_2M)
+ x += snprintf(buf + x, len - x, "2M, ");
+ if (page_sizes & I915_GTT_PAGE_SIZE_64K)
+ x += snprintf(buf + x, len - x, "64K, ");
+ if (page_sizes & I915_GTT_PAGE_SIZE_4K)
+ x += snprintf(buf + x, len - x, "4K, ");
+ buf[x-2] = '\0';
+
+ return buf;
+ }
+}
+
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -149,15 +180,16 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
pin_count++;
}
seq_printf(m, " (pinned x %d)", pin_count);
- if (obj->pin_display)
- seq_printf(m, " (display)");
+ if (obj->pin_global)
+ seq_printf(m, " (global)");
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
- seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
+ seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
i915_vma_is_ggtt(vma) ? "g" : "pp",
- vma->node.start, vma->node.size);
+ vma->node.start, vma->node.size,
+ stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
if (i915_vma_is_ggtt(vma)) {
switch (vma->ggtt_view.type) {
case I915_GGTT_VIEW_NORMAL:
@@ -239,7 +271,9 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
goto out;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
if (count == total)
break;
@@ -251,7 +285,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
}
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
if (count == total)
break;
@@ -261,6 +295,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
objects[count++] = obj;
total_obj_size += obj->base.size;
}
+ spin_unlock(&dev_priv->mm.obj_lock);
sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
@@ -402,10 +437,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- u32 count, mapped_count, purgeable_count, dpy_count;
- u64 size, mapped_size, purgeable_size, dpy_size;
+ u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
+ u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
struct drm_i915_gem_object *obj;
+ unsigned int page_sizes = 0;
struct drm_file *file;
+ char buf[80];
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -419,7 +456,10 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size = count = 0;
mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ huge_size = huge_count = 0;
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
size += obj->base.size;
++count;
@@ -432,15 +472,21 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
mapped_count++;
mapped_size += obj->base.size;
}
+
+ if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ huge_count++;
+ huge_size += obj->base.size;
+ page_sizes |= obj->mm.page_sizes.sg;
+ }
}
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
size += obj->base.size;
++count;
- if (obj->pin_display) {
+ if (obj->pin_global) {
dpy_size += obj->base.size;
++dpy_count;
}
@@ -454,18 +500,33 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
mapped_count++;
mapped_size += obj->base.size;
}
+
+ if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ huge_count++;
+ huge_size += obj->base.size;
+ page_sizes |= obj->mm.page_sizes.sg;
+ }
}
+ spin_unlock(&dev_priv->mm.obj_lock);
+
seq_printf(m, "%u bound objects, %llu bytes\n",
count, size);
seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
seq_printf(m, "%u mapped objects, %llu bytes\n",
mapped_count, mapped_size);
- seq_printf(m, "%u display objects (pinned), %llu bytes\n",
+ seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
+ huge_count,
+ stringify_page_sizes(page_sizes, buf, sizeof(buf)),
+ huge_size);
+ seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
dpy_count, dpy_size);
seq_printf(m, "%llu [%llu] gtt total\n",
ggtt->base.total, ggtt->mappable_end);
+ seq_printf(m, "Supported page sizes: %s\n",
+ stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
+ buf, sizeof(buf)));
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
@@ -514,32 +575,46 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_i915_private *dev_priv = node_to_i915(node);
struct drm_device *dev = &dev_priv->drm;
- bool show_pin_display_only = !!node->info_ent->data;
+ struct drm_i915_gem_object **objects;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
+ unsigned long nobject, n;
int count, ret;
+ nobject = READ_ONCE(dev_priv->mm.object_count);
+ objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
+ if (!objects)
+ return -ENOMEM;
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
- if (show_pin_display_only && !obj->pin_display)
- continue;
+ count = 0;
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
+ objects[count++] = obj;
+ if (count == nobject)
+ break;
+ }
+ spin_unlock(&dev_priv->mm.obj_lock);
+
+ total_obj_size = total_gtt_size = 0;
+ for (n = 0; n < count; n++) {
+ obj = objects[n];
seq_puts(m, " ");
describe_obj(m, obj);
seq_putc(m, '\n');
total_obj_size += obj->base.size;
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
- count++;
}
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
+ kvfree(objects);
return 0;
}
@@ -589,54 +664,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
-static void print_request(struct seq_file *m,
- struct drm_i915_gem_request *rq,
- const char *prefix)
-{
- seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
- rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
- rq->priotree.priority,
- jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- rq->timeline->common->name);
-}
-
-static int i915_gem_request_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_i915_gem_request *req;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int ret, any;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- any = 0;
- for_each_engine(engine, dev_priv, id) {
- int count;
-
- count = 0;
- list_for_each_entry(req, &engine->timeline->requests, link)
- count++;
- if (count == 0)
- continue;
-
- seq_printf(m, "%s requests: %d\n", engine->name, count);
- list_for_each_entry(req, &engine->timeline->requests, link)
- print_request(m, req, " ");
-
- any++;
- }
- mutex_unlock(&dev->struct_mutex);
-
- if (any == 0)
- seq_puts(m, "No requests\n");
-
- return 0;
-}
-
static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *engine)
{
@@ -1026,6 +1053,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int ret = 0;
intel_runtime_pm_get(dev_priv);
@@ -1041,9 +1069,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 freq_sts;
+ u32 rpmodectl, freq_sts;
+
+ mutex_lock(&dev_priv->pcu_lock);
+
+ rpmodectl = I915_READ(GEN6_RP_CONTROL);
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
- mutex_lock(&dev_priv->rps.hw_lock);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
@@ -1052,21 +1090,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ intel_gpu_freq(dev_priv, rps->min_freq));
seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+ intel_gpu_freq(dev_priv, rps->idle_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_gpu_freq(dev_priv, rps->efficient_freq));
+ mutex_unlock(&dev_priv->pcu_lock);
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -1136,10 +1174,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
pm_iir = I915_READ(GEN8_GT_IIR(2));
pm_mask = I915_READ(GEN6_PMINTRMSK);
}
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
- dev_priv->rps.pm_intrmsk_mbz);
+ rps->pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
@@ -1159,8 +1204,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n",
- dev_priv->rps.up_threshold);
+ seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1168,8 +1212,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n",
- dev_priv->rps.down_threshold);
+ seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
@@ -1191,22 +1234,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+ intel_gpu_freq(dev_priv, rps->idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ intel_gpu_freq(dev_priv, rps->min_freq));
seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+ intel_gpu_freq(dev_priv, rps->boost_freq));
seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
+ intel_gpu_freq(dev_priv, rps->efficient_freq));
} else {
seq_puts(m, "no P-state info available\n");
}
@@ -1267,7 +1310,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
seq_puts(m, "struct_mutex blocked for reset\n");
- if (!i915.enable_hangcheck) {
+ if (!i915_modparams.enable_hangcheck) {
seq_puts(m, "Hangcheck disabled\n");
return 0;
}
@@ -1422,6 +1465,9 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
+ seq_printf(m, "user.bypass_count = %u\n",
+ i915->uncore.user_forcewake.count);
+
for_each_fw_domain(fw_domain, i915, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
@@ -1444,21 +1490,11 @@ static void print_rc6_res(struct seq_file *m,
static int vlv_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rpmodectl1, rcctl1, pw_status;
+ u32 rcctl1, pw_status;
pw_status = I915_READ(VLV_GTLC_PW_STATUS);
- rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "Turbo enabled: %s\n",
- yesno(rpmodectl1 & GEN6_RP_ENABLE));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl1 & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "RC6 Enabled: %s\n",
yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
@@ -1476,7 +1512,7 @@ static int vlv_drpc_info(struct seq_file *m)
static int gen6_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
+ u32 gt_core_status, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
unsigned forcewake_count;
int count = 0;
@@ -1495,24 +1531,16 @@ static int gen6_drpc_info(struct seq_file *m)
gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
- rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
if (INTEL_GEN(dev_priv) >= 9) {
gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl1 & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
seq_printf(m, "RC1e Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
@@ -1699,7 +1727,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915.enable_ips));
+ yesno(i915_modparams.enable_ips));
if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
@@ -1775,6 +1803,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int ret = 0;
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
@@ -1786,19 +1815,17 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
goto out;
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
- min_gpu_freq =
- dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
- max_gpu_freq =
- dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
+ min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
+ max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
} else {
- min_gpu_freq = dev_priv->rps.min_freq_softlimit;
- max_gpu_freq = dev_priv->rps.max_freq_softlimit;
+ min_gpu_freq = rps->min_freq_softlimit;
+ max_gpu_freq = rps->max_freq_softlimit;
}
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -1817,7 +1844,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 8) & 0xff) * 100);
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
out:
intel_runtime_pm_put(dev_priv);
@@ -2014,7 +2041,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
enum intel_engine_id id;
int ret;
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
return 0;
}
@@ -2251,25 +2278,26 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct drm_file *file;
- seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
+ seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
- atomic_read(&dev_priv->rps.num_waiters));
+ atomic_read(&rps->num_waiters));
seq_printf(m, "Frequency requested %d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ intel_gpu_freq(dev_priv, rps->cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ intel_gpu_freq(dev_priv, rps->min_freq),
+ intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
+ intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
+ intel_gpu_freq(dev_priv, rps->max_freq));
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
+ intel_gpu_freq(dev_priv, rps->idle_freq),
+ intel_gpu_freq(dev_priv, rps->efficient_freq),
+ intel_gpu_freq(dev_priv, rps->boost_freq));
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
@@ -2281,15 +2309,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "%s [%d]: %d boosts\n",
task ? task->comm : "<unknown>",
task ? task->pid : -1,
- atomic_read(&file_priv->rps.boosts));
+ atomic_read(&file_priv->rps_client.boosts));
rcu_read_unlock();
}
seq_printf(m, "Kernel (anonymous) boosts: %d\n",
- atomic_read(&dev_priv->rps.boosts));
+ atomic_read(&rps->boosts));
mutex_unlock(&dev->filelist_mutex);
if (INTEL_GEN(dev_priv) >= 6 &&
- dev_priv->rps.enabled &&
+ rps->enabled &&
dev_priv->gt.active_requests) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2302,13 +2330,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
- rps_power_to_str(dev_priv->rps.power));
+ rps_power_to_str(rps->power));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
rpup && rpupei ? 100 * rpup / rpupei : 0,
- dev_priv->rps.up_threshold);
+ rps->up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
- dev_priv->rps.down_threshold);
+ rps->down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
}
@@ -2331,27 +2359,13 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct drm_printer p;
if (!HAS_HUC_UCODE(dev_priv))
return 0;
- seq_puts(m, "HuC firmware status:\n");
- seq_printf(m, "\tpath: %s\n", huc_fw->path);
- seq_printf(m, "\tfetch: %s\n",
- intel_uc_fw_status_repr(huc_fw->fetch_status));
- seq_printf(m, "\tload: %s\n",
- intel_uc_fw_status_repr(huc_fw->load_status));
- seq_printf(m, "\tversion wanted: %d.%d\n",
- huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
- seq_printf(m, "\tversion found: %d.%d\n",
- huc_fw->major_ver_found, huc_fw->minor_ver_found);
- seq_printf(m, "\theader: offset is %d; size = %d\n",
- huc_fw->header_offset, huc_fw->header_size);
- seq_printf(m, "\tuCode: offset is %d; size = %d\n",
- huc_fw->ucode_offset, huc_fw->ucode_size);
- seq_printf(m, "\tRSA: offset is %d; size = %d\n",
- huc_fw->rsa_offset, huc_fw->rsa_size);
+ p = drm_seq_file_printer(m);
+ intel_uc_fw_dump(&dev_priv->huc.fw, &p);
intel_runtime_pm_get(dev_priv);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
@@ -2363,29 +2377,14 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct drm_printer p;
u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv))
return 0;
- seq_printf(m, "GuC firmware status:\n");
- seq_printf(m, "\tpath: %s\n",
- guc_fw->path);
- seq_printf(m, "\tfetch: %s\n",
- intel_uc_fw_status_repr(guc_fw->fetch_status));
- seq_printf(m, "\tload: %s\n",
- intel_uc_fw_status_repr(guc_fw->load_status));
- seq_printf(m, "\tversion wanted: %d.%d\n",
- guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
- seq_printf(m, "\tversion found: %d.%d\n",
- guc_fw->major_ver_found, guc_fw->minor_ver_found);
- seq_printf(m, "\theader: offset is %d; size = %d\n",
- guc_fw->header_offset, guc_fw->header_size);
- seq_printf(m, "\tuCode: offset is %d; size = %d\n",
- guc_fw->ucode_offset, guc_fw->ucode_size);
- seq_printf(m, "\tRSA: offset is %d; size = %d\n",
- guc_fw->rsa_offset, guc_fw->rsa_size);
+ p = drm_seq_file_printer(m);
+ intel_uc_fw_dump(&dev_priv->guc.fw, &p);
intel_runtime_pm_get(dev_priv);
@@ -2443,12 +2442,8 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
client->priority, client->stage_id, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
- client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
- seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
- client->wq_size, client->wq_offset, client->wq_tail);
-
- seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
+ client->doorbell_id, client->doorbell_offset);
for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
@@ -2594,7 +2589,7 @@ static int i915_guc_log_control_get(void *data, u64 *val)
if (!dev_priv->guc.log.vma)
return -EINVAL;
- *val = i915.guc_log_level;
+ *val = i915_modparams.guc_log_level;
return 0;
}
@@ -3239,9 +3234,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ struct drm_printer p;
intel_runtime_pm_get(dev_priv);
@@ -3250,146 +3245,21 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
- for_each_engine(engine, dev_priv, id) {
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct drm_i915_gem_request *rq;
- struct rb_node *rb;
- u64 addr;
-
- seq_printf(m, "%s\n", engine->name);
- seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
- engine->timeline->inflight_seqnos);
- seq_printf(m, "\tReset count: %d\n",
- i915_reset_engine_count(error, engine));
-
- rcu_read_lock();
-
- seq_printf(m, "\tRequests:\n");
-
- rq = list_first_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tfirst ");
-
- rq = list_last_entry(&engine->timeline->requests,
- struct drm_i915_gem_request, link);
- if (&rq->link != &engine->timeline->requests)
- print_request(m, rq, "\t\tlast ");
-
- rq = i915_gem_find_active_request(engine);
- if (rq) {
- print_request(m, rq, "\t\tactive ");
- seq_printf(m,
- "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
- rq->head, rq->postfix, rq->tail,
- rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
- rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
- }
-
- seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
- I915_READ(RING_START(engine->mmio_base)),
- rq ? i915_ggtt_offset(rq->ring->vma) : 0);
- seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
- rq ? rq->ring->head : 0);
- seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
- rq ? rq->ring->tail : 0);
- seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
- I915_READ(RING_CTL(engine->mmio_base)),
- I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
-
- rcu_read_unlock();
-
- addr = intel_engine_get_active_head(engine);
- seq_printf(m, "\tACTHD: 0x%08x_%08x\n",
- upper_32_bits(addr), lower_32_bits(addr));
- addr = intel_engine_get_last_batch_head(engine);
- seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
- upper_32_bits(addr), lower_32_bits(addr));
-
- if (i915.enable_execlists) {
- u32 ptr, read, write;
- unsigned int idx;
-
- seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
- I915_READ(RING_EXECLIST_STATUS_LO(engine)),
- I915_READ(RING_EXECLIST_STATUS_HI(engine)));
-
- ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
- read = GEN8_CSB_READ_PTR(ptr);
- write = GEN8_CSB_WRITE_PTR(ptr);
- seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
- read, write,
- yesno(test_bit(ENGINE_IRQ_EXECLIST,
- &engine->irq_posted)));
- if (read >= GEN8_CSB_ENTRIES)
- read = 0;
- if (write >= GEN8_CSB_ENTRIES)
- write = 0;
- if (read > write)
- write += GEN8_CSB_ENTRIES;
- while (read < write) {
- idx = ++read % GEN8_CSB_ENTRIES;
- seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
- idx,
- I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
- I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
- }
-
- rcu_read_lock();
- for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
- unsigned int count;
-
- rq = port_unpack(&engine->execlist_port[idx],
- &count);
- if (rq) {
- seq_printf(m, "\t\tELSP[%d] count=%d, ",
- idx, count);
- print_request(m, rq, "rq: ");
- } else {
- seq_printf(m, "\t\tELSP[%d] idle\n",
- idx);
- }
- }
- rcu_read_unlock();
-
- spin_lock_irq(&engine->timeline->lock);
- for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
- struct i915_priolist *p =
- rb_entry(rb, typeof(*p), node);
-
- list_for_each_entry(rq, &p->requests,
- priotree.link)
- print_request(m, rq, "\t\tQ ");
- }
- spin_unlock_irq(&engine->timeline->lock);
- } else if (INTEL_GEN(dev_priv) > 6) {
- seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE(engine)));
- seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
- seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
- I915_READ(RING_PP_DIR_DCLV(engine)));
- }
+ p = drm_seq_file_printer(m);
+ for_each_engine(engine, dev_priv, id)
+ intel_engine_dump(engine, &p);
- spin_lock_irq(&b->rb_lock);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+ intel_runtime_pm_put(dev_priv);
- seq_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
- }
- spin_unlock_irq(&b->rb_lock);
+ return 0;
+}
- seq_puts(m, "\n");
- }
+static int i915_shrinker_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
+ seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
return 0;
}
@@ -3403,7 +3273,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
enum intel_engine_id id;
int j, ret;
- if (!i915.semaphores) {
+ if (!i915_modparams.semaphores) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
@@ -3523,6 +3393,57 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ yesno(dev_priv->ipc_enabled));
+ return 0;
+}
+
+static int i915_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (!HAS_IPC(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, i915_ipc_status_show, dev_priv);
+}
+
+static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ int ret;
+ bool enable;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ intel_runtime_pm_get(dev_priv);
+ if (!dev_priv->ipc_enabled && enable)
+ DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ intel_runtime_pm_put(dev_priv);
+
+ return len;
+}
+
+static const struct file_operations i915_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_ipc_status_write
+};
+
static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4203,8 +4124,7 @@ fault_irq_set(struct drm_i915_private *i915,
mutex_unlock(&i915->drm.struct_mutex);
/* Flush idle worker to disarm irq */
- while (flush_delayed_work(&i915->gt.idle_work))
- ;
+ drain_delayed_work(&i915->gt.idle_work);
return 0;
@@ -4259,18 +4179,20 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
i915_ring_test_irq_get, i915_ring_test_irq_set,
"0x%08llx\n");
-#define DROP_UNBOUND 0x1
-#define DROP_BOUND 0x2
-#define DROP_RETIRE 0x4
-#define DROP_ACTIVE 0x8
-#define DROP_FREED 0x10
-#define DROP_SHRINK_ALL 0x20
+#define DROP_UNBOUND BIT(0)
+#define DROP_BOUND BIT(1)
+#define DROP_RETIRE BIT(2)
+#define DROP_ACTIVE BIT(3)
+#define DROP_FREED BIT(4)
+#define DROP_SHRINK_ALL BIT(5)
+#define DROP_IDLE BIT(6)
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE | \
DROP_FREED | \
- DROP_SHRINK_ALL)
+ DROP_SHRINK_ALL |\
+ DROP_IDLE)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4286,7 +4208,8 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = &dev_priv->drm;
int ret = 0;
- DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
+ DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
+ val, val & DROP_ALL);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
@@ -4317,6 +4240,9 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL);
+ if (val & DROP_IDLE)
+ drain_delayed_work(&dev_priv->gt.idle_work);
+
if (val & DROP_FREED) {
synchronize_rcu();
i915_gem_drain_freed_objects(dev_priv);
@@ -4337,7 +4263,7 @@ i915_max_freq_get(void *data, u64 *val)
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
+ *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
return 0;
}
@@ -4345,6 +4271,7 @@ static int
i915_max_freq_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 hw_max, hw_min;
int ret;
@@ -4353,7 +4280,7 @@ i915_max_freq_set(void *data, u64 val)
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
return ret;
@@ -4362,20 +4289,20 @@ i915_max_freq_set(void *data, u64 val)
*/
val = intel_freq_opcode(dev_priv, val);
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
+ hw_max = rps->max_freq;
+ hw_min = rps->min_freq;
- if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
return -EINVAL;
}
- dev_priv->rps.max_freq_softlimit = val;
+ rps->max_freq_softlimit = val;
if (intel_set_rps(dev_priv, val))
DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return 0;
}
@@ -4392,7 +4319,7 @@ i915_min_freq_get(void *data, u64 *val)
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
+ *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
return 0;
}
@@ -4400,6 +4327,7 @@ static int
i915_min_freq_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 hw_max, hw_min;
int ret;
@@ -4408,7 +4336,7 @@ i915_min_freq_set(void *data, u64 val)
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
- ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
+ ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
return ret;
@@ -4417,21 +4345,21 @@ i915_min_freq_set(void *data, u64 val)
*/
val = intel_freq_opcode(dev_priv, val);
- hw_max = dev_priv->rps.max_freq;
- hw_min = dev_priv->rps.min_freq;
+ hw_max = rps->max_freq;
+ hw_min = rps->min_freq;
if (val < hw_min ||
- val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ val > hw_max || val > rps->max_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
return -EINVAL;
}
- dev_priv->rps.min_freq_softlimit = val;
+ rps->min_freq_softlimit = val;
if (intel_set_rps(dev_priv, val))
DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return 0;
}
@@ -4674,26 +4602,26 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct drm_i915_private *i915 = inode->i_private;
- if (INTEL_GEN(dev_priv) < 6)
+ if (INTEL_GEN(i915) < 6)
return 0;
- intel_runtime_pm_get(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_get(i915);
+ intel_uncore_forcewake_user_get(i915);
return 0;
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
- struct drm_i915_private *dev_priv = inode->i_private;
+ struct drm_i915_private *i915 = inode->i_private;
- if (INTEL_GEN(dev_priv) < 6)
+ if (INTEL_GEN(i915) < 6)
return 0;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_uncore_forcewake_user_put(i915);
+ intel_runtime_pm_put(i915);
return 0;
}
@@ -4783,9 +4711,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
- {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
{"i915_gem_stolen", i915_gem_stolen_list_info },
- {"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
@@ -4823,6 +4749,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
+ {"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
@@ -4859,7 +4786,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_control", &i915_guc_log_control_fops},
- {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
+ {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_ipc_status", &i915_ipc_status_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9f45cfeae775..960d3d8b95b8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,12 +58,12 @@ static unsigned int i915_load_fail_count;
bool __i915_inject_load_failure(const char *func, int line)
{
- if (i915_load_fail_count >= i915.inject_load_failure)
+ if (i915_load_fail_count >= i915_modparams.inject_load_failure)
return false;
- if (++i915_load_fail_count == i915.inject_load_failure) {
+ if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
- i915.inject_load_failure, func, line);
+ i915_modparams.inject_load_failure, func, line);
return true;
}
@@ -106,8 +106,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
- return i915.inject_load_failure &&
- i915_load_fail_count == i915.inject_load_failure;
+ return i915_modparams.inject_load_failure &&
+ i915_load_fail_count == i915_modparams.inject_load_failure;
}
#define i915_load_error(dev_priv, fmt, ...) \
@@ -239,7 +239,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
- !IS_KABYLAKE(dev_priv));
+ !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
@@ -320,7 +321,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = USES_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = i915.semaphores;
+ value = i915_modparams.semaphores;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -339,7 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
- value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+ value = i915_modparams.enable_hangcheck &&
+ intel_has_gpu_reset(dev_priv);
if (value && intel_has_reset_engine(dev_priv))
value = 2;
break;
@@ -365,9 +367,18 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = i915_gem_mmap_gtt_version();
break;
case I915_PARAM_HAS_SCHEDULER:
- value = dev_priv->engine[RCS] &&
- dev_priv->engine[RCS]->schedule;
+ value = 0;
+ if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
+ value |= I915_SCHEDULER_CAP_ENABLED;
+ value |= I915_SCHEDULER_CAP_PRIORITY;
+
+ if (INTEL_INFO(dev_priv)->has_logical_ring_preemption &&
+ i915_modparams.enable_execlists &&
+ !i915_modparams.enable_guc_submission)
+ value |= I915_SCHEDULER_CAP_PREEMPTION;
+ }
break;
+
case I915_PARAM_MMAP_VERSION:
/* Remember to bump this if the version changes! */
case I915_PARAM_HAS_GEM:
@@ -604,9 +615,10 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
- i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_cleanup_userptr(dev_priv);
+
i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->contexts.list));
@@ -868,6 +880,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
memcpy(device_info, match_info, sizeof(*device_info));
device_info->device_id = dev_priv->drm.pdev->device;
+ BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+ sizeof(device_info->platform_mask) * BITS_PER_BYTE);
+ device_info->platform_mask = BIT(device_info->platform);
+
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
device_info->gen_mask = BIT(device_info->gen - 1);
@@ -1001,6 +1017,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_uncore_init(dev_priv);
+ intel_uc_init_mmio(dev_priv);
+
ret = intel_engines_init_mmio(dev_priv);
if (ret)
goto err_uncore;
@@ -1030,9 +1048,9 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- i915.enable_execlists =
+ i915_modparams.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
- i915.enable_execlists);
+ i915_modparams.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
@@ -1040,12 +1058,15 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
- i915.enable_ppgtt =
- intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+ i915_modparams.enable_ppgtt =
+ intel_sanitize_enable_ppgtt(dev_priv,
+ i915_modparams.enable_ppgtt);
+ DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
- i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
- DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
+ i915_modparams.semaphores =
+ intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
+ DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
+ yesno(i915_modparams.semaphores));
intel_uc_sanitize_options(dev_priv);
@@ -1276,7 +1297,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
/* Enable nuclear pageflip on ILK+ */
- if (!i915.nuclear_pageflip && match_info->gen < 5)
+ if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
driver.driver_features &= ~DRIVER_ATOMIC;
ret = -ENOMEM;
@@ -1304,7 +1325,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
@@ -1340,7 +1361,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
- dev_priv->ipc_enabled = false;
+ intel_init_ipc(dev_priv);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
DRM_INFO("DRM_I915_DEBUG enabled\n");
@@ -1571,7 +1592,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_set_init_power(dev_priv, false);
- fw_csr = !IS_GEN9_LP(dev_priv) &&
+ fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
@@ -2061,11 +2082,14 @@ static int i915_pm_resume(struct device *kdev)
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *kdev)
{
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
int ret;
- ret = i915_pm_suspend(kdev);
- if (ret)
- return ret;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend(dev);
+ if (ret)
+ return ret;
+ }
ret = i915_gem_freeze(kdev_to_i915(kdev));
if (ret)
@@ -2076,11 +2100,14 @@ static int i915_pm_freeze(struct device *kdev)
static int i915_pm_freeze_late(struct device *kdev)
{
+ struct drm_device *dev = &kdev_to_i915(kdev)->drm;
int ret;
- ret = i915_pm_suspend_late(kdev);
- if (ret)
- return ret;
+ if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend_late(dev, true);
+ if (ret)
+ return ret;
+ }
ret = i915_gem_freeze_late(kdev_to_i915(kdev));
if (ret)
@@ -2476,7 +2503,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
+ if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled())))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
@@ -2518,12 +2545,12 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uncore_suspend(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
- WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+ WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
- dev_priv->pm.suspended = true;
+ dev_priv->runtime_pm.suspended = true;
/*
* FIXME: We really should find a document that references the arguments
@@ -2569,11 +2596,11 @@ static int intel_runtime_resume(struct device *kdev)
DRM_DEBUG_KMS("Resuming device\n");
- WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+ WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
- dev_priv->pm.suspended = false;
+ dev_priv->runtime_pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -2608,6 +2635,8 @@ static int intel_runtime_resume(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
+ intel_enable_ipc(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 18d9da53282b..54b5d4c582b6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170818"
-#define DRIVER_TIMESTAMP 1503088845
+#define DRIVER_DATE "20171023"
+#define DRIVER_TIMESTAMP 1508748913
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -93,7 +93,7 @@
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- if (!WARN(i915.verbose_state_checks, format)) \
+ if (!WARN(i915_modparams.verbose_state_checks, format)) \
DRM_ERROR(format); \
unlikely(__ret_warn_on); \
})
@@ -126,7 +126,7 @@ static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{
uint_fixed_16_16_t fp;
- WARN_ON(val >> 16);
+ WARN_ON(val > U16_MAX);
fp.val = val << 16;
return fp;
@@ -163,8 +163,8 @@ static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
{
uint_fixed_16_16_t fp;
- WARN_ON(val >> 32);
- fp.val = clamp_t(uint32_t, val, 0, ~0);
+ WARN_ON(val > U32_MAX);
+ fp.val = (uint32_t) val;
return fp;
}
@@ -181,8 +181,8 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
- WARN_ON(intermediate_val >> 32);
- return clamp_t(uint32_t, intermediate_val, 0, ~0);
+ WARN_ON(intermediate_val > U32_MAX);
+ return (uint32_t) intermediate_val;
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
@@ -211,8 +211,8 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
- WARN_ON(interm_val >> 32);
- return clamp_t(uint32_t, interm_val, 0, ~0);
+ WARN_ON(interm_val > U32_MAX);
+ return (uint32_t) interm_val;
}
static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
@@ -569,6 +569,24 @@ struct i915_hotplug {
(__i)++) \
for_each_if (plane_state)
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if (crtc)
+
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if (plane)
+
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@@ -591,7 +609,7 @@ struct drm_i915_file_private {
struct intel_rps_client {
atomic_t boosts;
- } rps;
+ } rps_client;
unsigned int bsd_engine;
@@ -707,8 +725,7 @@ struct drm_i915_display_funcs {
struct drm_atomic_state *old_state);
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct drm_atomic_state *old_state);
- void (*update_crtcs)(struct drm_atomic_state *state,
- unsigned int *crtc_vblank_mask);
+ void (*update_crtcs)(struct drm_atomic_state *state);
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
@@ -759,7 +776,6 @@ struct intel_csr {
func(has_fpga_dbg); \
func(has_full_ppgtt); \
func(has_full_48bit_ppgtt); \
- func(has_gmbus_irq); \
func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
@@ -767,8 +783,8 @@ struct intel_csr {
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
+ func(has_logical_ring_preemption); \
func(has_overlay); \
- func(has_pipe_cxsr); \
func(has_pooled_eu); \
func(has_psr); \
func(has_rc6); \
@@ -780,7 +796,8 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
- func(supports_tv);
+ func(supports_tv); \
+ func(has_ipc);
struct sseu_dev_info {
u8 slice_mask;
@@ -834,20 +851,30 @@ enum intel_platform {
};
struct intel_device_info {
- u32 display_mmio_offset;
u16 device_id;
+ u16 gen_mask;
+
+ u8 gen;
+ u8 gt; /* GT number, 0 if undefined */
+ u8 num_rings;
+ u8 ring_mask; /* Rings supported by the HW */
+
+ enum intel_platform platform;
+ u32 platform_mask;
+
+ u32 display_mmio_offset;
+
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- u8 gen;
- u16 gen_mask;
- enum intel_platform platform;
- u8 ring_mask; /* Rings supported by the HW */
- u8 num_rings;
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
+
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
@@ -956,6 +983,7 @@ struct i915_gpu_state {
pid_t pid;
u32 handle;
u32 hw_id;
+ int priority;
int ban_score;
int active;
int guilty;
@@ -978,11 +1006,13 @@ struct i915_gpu_state {
long jiffies;
pid_t pid;
u32 context;
+ int priority;
int ban_score;
u32 seqno;
u32 head;
u32 tail;
- } *requests, execlist[2];
+ } *requests, execlist[EXECLIST_MAX_PORTS];
+ unsigned int num_ports;
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
@@ -1077,6 +1107,16 @@ struct intel_fbc {
int src_w;
int src_h;
bool visible;
+ /*
+ * Display surface base address adjustement for
+ * pageflips. Note that on gen4+ this only adjusts up
+ * to a tile, offsets within a tile are handled in
+ * the hw itself (with the TILEOFF register).
+ */
+ int adjusted_x;
+ int adjusted_y;
+
+ int y;
} plane;
struct {
@@ -1107,6 +1147,7 @@ struct intel_fbc {
} fb;
int cfb_size;
+ unsigned int gen9_wa_cfb_stride;
} params;
struct intel_fbc_work {
@@ -1159,6 +1200,14 @@ struct i915_psr {
bool y_cord_support;
bool colorimetry_support;
bool alpm;
+
+ void (*enable_source)(struct intel_dp *,
+ const struct intel_crtc_state *);
+ void (*disable_source)(struct intel_dp *,
+ const struct intel_crtc_state *);
+ void (*enable_sink)(struct intel_dp *);
+ void (*activate)(struct intel_dp *);
+ void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
};
enum intel_pch {
@@ -1277,7 +1326,7 @@ struct intel_rps_ei {
u32 media_c0;
};
-struct intel_gen6_power_mgmt {
+struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
@@ -1318,20 +1367,26 @@ struct intel_gen6_power_mgmt {
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
bool enabled;
- struct delayed_work autoenable_work;
atomic_t num_waiters;
atomic_t boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
+};
- /*
- * Protects RPS/RC6 register access and PCU communication.
- * Must be taken after struct_mutex if nested. Note that
- * this lock may be held for long periods of time when
- * talking to hw - so only take it when talking to hw!
- */
- struct mutex hw_lock;
+struct intel_rc6 {
+ bool enabled;
+};
+
+struct intel_llc_pstate {
+ bool enabled;
+};
+
+struct intel_gen6_power_mgmt {
+ struct intel_rps rps;
+ struct intel_rc6 rc6;
+ struct intel_llc_pstate llc_pstate;
+ struct delayed_work autoenable_work;
};
/* defined intel_pm.c */
@@ -1444,6 +1499,9 @@ struct i915_gem_mm {
* always the inner lock when overlapping with struct_mutex. */
struct mutex stolen_lock;
+ /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
+ spinlock_t obj_lock;
+
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -1464,10 +1522,21 @@ struct i915_gem_mm {
*/
struct llist_head free_list;
struct work_struct free_work;
+ spinlock_t free_lock;
+
+ /**
+ * Small stash of WC pages
+ */
+ struct pagevec wc_stash;
/** Usable portion of the GTT for GEM */
dma_addr_t stolen_base; /* limited to low memory (32-bit) */
+ /**
+ * tmpfs instance used for shmem backed objects
+ */
+ struct vfsmount *gemfs;
+
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
@@ -1709,6 +1778,8 @@ struct intel_vbt_data {
u16 panel_id;
struct mipi_config *config;
struct mipi_pps_data *pps;
+ u16 bl_ports;
+ u16 cabc_ports;
u8 seq_version;
u32 size;
u8 *data;
@@ -1718,7 +1789,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
int child_dev_num;
- union child_device_config *child_dev;
+ struct child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
@@ -1812,6 +1883,20 @@ struct skl_wm_level {
uint8_t plane_res_l;
};
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ uint32_t width;
+ uint8_t cpp;
+ uint32_t plane_pixel_rate;
+ uint32_t y_min_scanlines;
+ uint32_t plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ uint32_t linetime_us;
+};
+
/*
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
@@ -1890,13 +1975,7 @@ struct i915_wa_reg {
u32 mask;
};
-/*
- * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
- * allowing it for RCS as we don't foresee any requirement of having
- * a whitelist for other engines. When it is really required for
- * other engines then the limit need to be increased.
- */
-#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
+#define I915_MAX_WA_REGS 16
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
@@ -2197,8 +2276,11 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct i915_gem_context *kernel_context;
struct intel_engine_cs *engine[I915_NUM_ENGINES];
+ /* Context used internally to idle the GPU and setup initial state */
+ struct i915_gem_context *kernel_context;
+ /* Context only to be used for injecting preemption commands */
+ struct i915_gem_context *preempt_context;
struct i915_vma *semaphore;
struct drm_dma_handle *status_page_dmah;
@@ -2307,6 +2389,8 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
+ struct intel_ppat ppat;
+
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -2329,7 +2413,8 @@ struct drm_i915_private {
struct mutex dpll_lock;
unsigned int active_crtcs;
- unsigned int min_pixclk[I915_MAX_PIPES];
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@@ -2351,8 +2436,16 @@ struct drm_i915_private {
/* Cannot be determined by PCIID. You must always read a register. */
u32 edram_cap;
- /* gen6+ rps state */
- struct intel_gen6_power_mgmt rps;
+ /*
+ * Protects RPS/RC6 register access and PCU communication.
+ * Must be taken after struct_mutex if nested. Note that
+ * this lock may be held for long periods of time when
+ * talking to hw - so only take it when talking to hw!
+ */
+ struct mutex pcu_lock;
+
+ /* gen6+ GT PM state */
+ struct intel_gen6_power_mgmt gt_pm;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
@@ -2463,7 +2556,7 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
- struct i915_runtime_pm pm;
+ struct i915_runtime_pm runtime_pm;
struct {
bool initialized;
@@ -2786,8 +2879,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#define for_each_sgt_dma(__dmap, __iter, __sgt) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
((__dmap) = (__iter).dma + (__iter).curr); \
- (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
- ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
+ (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
* for_each_sgt_page - iterate over the pages of the given sg_table
@@ -2799,8 +2892,38 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
((__pp) = (__iter).pfn == 0 ? NULL : \
pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
- (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
- ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
+ (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
+ (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
+
+static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
+{
+ unsigned int page_sizes;
+
+ page_sizes = 0;
+ while (sg) {
+ GEM_BUG_ON(sg->offset);
+ GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
+ page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ return page_sizes;
+}
+
+static inline unsigned int i915_sg_segment_size(void)
+{
+ unsigned int size = swiotlb_max_segment();
+
+ if (size == 0)
+ return SCATTERLIST_MAX_SEGMENT;
+
+ size = rounddown(size, PAGE_SIZE);
+ /* swiotlb_max_segment_size can return 1 byte when it means one page. */
+ if (size < PAGE_SIZE)
+ size = PAGE_SIZE;
+
+ return size;
+}
static inline const struct intel_device_info *
intel_info(const struct drm_i915_private *dev_priv)
@@ -2817,23 +2940,21 @@ intel_info(const struct drm_i915_private *dev_priv)
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
+
+#define INTEL_GEN_MASK(s, e) ( \
+ BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
+ BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
+ GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
+ (s) != GEN_FOREVER ? (s) - 1 : 0) \
+)
+
/*
* Returns true if Gen is in inclusive range [Start, End].
*
* Use GEN_FOREVER for unbound start and or end.
*/
-#define IS_GEN(dev_priv, s, e) ({ \
- unsigned int __s = (s), __e = (e); \
- BUILD_BUG_ON(!__builtin_constant_p(s)); \
- BUILD_BUG_ON(!__builtin_constant_p(e)); \
- if ((__s) != GEN_FOREVER) \
- __s = (s) - 1; \
- if ((__e) == GEN_FOREVER) \
- __e = BITS_PER_LONG - 1; \
- else \
- __e = (e) - 1; \
- !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
-})
+#define IS_GEN(dev_priv, s, e) \
+ (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
/*
* Return true if revision is in range [since,until] inclusive.
@@ -2843,38 +2964,39 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830)
-#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G)
-#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X)
-#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G)
-#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G)
-#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM)
-#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G)
-#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM)
-#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G)
-#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM)
-#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45)
-#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45)
+#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
+
+#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
+#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
+#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
+#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
+#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
+#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
+#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
+#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
+#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
+#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
+#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
+#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
-#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW)
-#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33)
+#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
+#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
-#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE)
-#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
- INTEL_DEVID(dev_priv) == 0x0152 || \
- INTEL_DEVID(dev_priv) == 0x015a)
-#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW)
-#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW)
-#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL)
-#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL)
-#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE)
-#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
-#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
-#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
-#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE)
-#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE)
+#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
+#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
+ (dev_priv)->info.gt == 1)
+#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
+#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
+#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
+#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
+#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
+#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
+#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
+#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
+#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
+#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -2886,11 +3008,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
@@ -2911,17 +3033,19 @@ intel_info(const struct drm_i915_private *dev_priv)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
+ (dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
+ (dev_priv)->info.gt == 4)
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
+ (dev_priv)->info.gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
+ (dev_priv)->info.gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
+#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
+ (dev_priv)->info.gt == 2)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -2962,6 +3086,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define CNL_REVID_A0 0x0
#define CNL_REVID_B0 0x1
+#define CNL_REVID_C0 0x2
#define IS_CNL_REVID(p, since, until) \
(IS_CANNONLAKE(p) && IS_REVID(p, since, until))
@@ -3012,9 +3137,13 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
-#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
+#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
+#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
+#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
+ GEM_BUG_ON((sizes) == 0); \
+ ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
+})
#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
@@ -3032,9 +3161,12 @@ intel_info(const struct drm_i915_private *dev_priv)
* even when in MSI mode. This results in spurious interrupt warnings if the
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
+ *
+ * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
+ * interrupts.
*/
-#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
-#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
+#define HAS_AUX_IRQ(dev_priv) true
+#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
@@ -3046,7 +3178,6 @@ intel_info(const struct drm_i915_private *dev_priv)
#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
@@ -3065,6 +3196,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
@@ -3210,7 +3343,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
unsigned long delay;
- if (unlikely(!i915.enable_hangcheck))
+ if (unlikely(!i915_modparams.enable_hangcheck))
return;
/* Don't continually defer the hangcheck so that it is always run at
@@ -3243,6 +3376,8 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
+u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 status_mask);
@@ -3424,7 +3559,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages);
+ struct sg_table *pages,
+ unsigned int sg_page_sizes);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
@@ -3438,10 +3574,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
return __i915_gem_object_get_pages(obj);
}
+static inline bool
+i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
+{
+ return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+}
+
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- GEM_BUG_ON(!obj->mm.pages);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
atomic_inc(&obj->mm.pages_pin_count);
}
@@ -3455,8 +3597,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(!obj->mm.pages);
atomic_dec(&obj->mm.pages_pin_count);
}
@@ -3646,8 +3788,9 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
}
/* i915_gem_fence_reg.c */
-int __must_check i915_vma_get_fence(struct i915_vma *vma);
-int __must_check i915_vma_put_fence(struct i915_vma *vma);
+struct drm_i915_fence_reg *
+i915_reserve_fence(struct drm_i915_private *dev_priv);
+void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
@@ -4333,11 +4476,12 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
-static inline bool
-intel_engine_can_store_dword(struct intel_engine_cs *engine)
+static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
- return __intel_engine_can_store_dword(INTEL_GEN(engine->i915),
- engine->class);
+ if (INTEL_GEN(i915) >= 10)
+ return CNL_HWS_CSB_WRITE_INDEX;
+ else
+ return I915_HWS_CSB_WRITE_INDEX;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dc1faa49687d..3a140eedfc83 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,6 +35,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include "i915_gemfs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/reservation.h>
@@ -55,7 +56,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
- return obj->pin_display;
+ return obj->pin_global; /* currently in use by HW, keep flushed */
}
static int
@@ -161,8 +162,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static struct sg_table *
-i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
drm_dma_handle_t *phys;
@@ -170,19 +170,20 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
struct scatterlist *sg;
char *vaddr;
int i;
+ int err;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
phys = drm_pci_alloc(obj->base.dev,
- obj->base.size,
+ roundup_pow_of_two(obj->base.size),
roundup_pow_of_two(obj->base.size));
if (!phys)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
@@ -191,7 +192,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) {
- st = ERR_CAST(page);
+ err = PTR_ERR(page);
goto err_phys;
}
@@ -208,13 +209,13 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
- st = ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
goto err_phys;
}
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- st = ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
goto err_phys;
}
@@ -226,11 +227,15 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
obj->phys_handle = phys;
- return st;
+
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
err_phys:
drm_pci_free(obj->base.dev, phys);
- return st;
+
+ return err;
}
static void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -353,7 +358,7 @@ static long
i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags,
long timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
struct drm_i915_gem_request *rq;
@@ -386,11 +391,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
- if (rps) {
+ if (rps_client) {
if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq, rps);
+ gen6_rps_boost(rq, rps_client);
else
- rps = NULL;
+ rps_client = NULL;
}
timeout = i915_wait_request(rq, flags, timeout);
@@ -406,7 +411,7 @@ static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
long timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
@@ -425,7 +430,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout,
- rps);
+ rps_client);
if (timeout < 0)
break;
@@ -442,7 +447,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
}
if (excl && timeout >= 0) {
- timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout,
+ rps_client);
prune_fences = timeout >= 0;
}
@@ -538,7 +544,7 @@ int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
@@ -550,7 +556,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
timeout = i915_gem_object_wait_reservation(obj->resv,
flags, timeout,
- rps);
+ rps_client);
return timeout < 0 ? timeout : 0;
}
@@ -558,7 +564,7 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
- return &fpriv->rps;
+ return &fpriv->rps_client;
}
static int
@@ -694,10 +700,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
- if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+ if (!HAS_LLC(dev_priv)) {
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
spin_unlock_irq(&dev_priv->uncore.lock);
intel_runtime_pm_put(dev_priv);
}
@@ -1013,17 +1019,20 @@ gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
- void *vaddr;
+ void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+ vaddr = io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_to_user_inatomic(user_data,
+ (void __force *)vaddr + offset,
+ length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
- vaddr = (void __force *)
- io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_to_user(user_data, vaddr + offset, length);
+ vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_to_user(user_data,
+ (void __force *)vaddr + offset,
+ length);
io_mapping_unmap(vaddr);
}
return unwritten;
@@ -1047,7 +1056,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE | PIN_NONBLOCK);
+ PIN_MAPPABLE |
+ PIN_NONFAULT |
+ PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -1189,18 +1200,18 @@ ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
- void *vaddr;
+ void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
- vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
- unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
+ vaddr = io_mapping_map_atomic_wc(mapping, base);
+ unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
- vaddr = (void __force *)
- io_mapping_map_wc(mapping, base, PAGE_SIZE);
- unwritten = copy_from_user(vaddr + offset, user_data, length);
+ vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+ unwritten = copy_from_user((void __force *)vaddr + offset,
+ user_data, length);
io_mapping_unmap(vaddr);
}
@@ -1229,9 +1240,27 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- intel_runtime_pm_get(i915);
+ if (i915_gem_object_has_struct_page(obj)) {
+ /*
+ * Avoid waking the device up if we can fallback, as
+ * waking/resuming is very slow (worst-case 10-100 ms
+ * depending on PCI sleeps and our own resume time).
+ * This easily dwarfs any performance advantage from
+ * using the cache bypass of indirect GGTT access.
+ */
+ if (!intel_runtime_pm_get_if_in_use(i915)) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ } else {
+ /* No backing pages, no fallback, we must force GGTT access */
+ intel_runtime_pm_get(i915);
+ }
+
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE | PIN_NONBLOCK);
+ PIN_MAPPABLE |
+ PIN_NONFAULT |
+ PIN_NONBLOCK);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
node.allocated = false;
@@ -1244,7 +1273,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out_unlock;
+ goto out_rpm;
GEM_BUG_ON(!node.allocated);
}
@@ -1307,8 +1336,9 @@ out_unpin:
} else {
i915_vma_unpin(vma);
}
-out_unlock:
+out_rpm:
intel_runtime_pm_put(i915);
+out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1524,6 +1554,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct list_head *list;
struct i915_vma *vma;
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma))
break;
@@ -1538,8 +1570,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
}
i915 = to_i915(obj->base.dev);
+ spin_lock(&i915->mm.obj_lock);
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
- list_move_tail(&obj->global_link, list);
+ list_move_tail(&obj->mm.link, list);
+ spin_unlock(&i915->mm.obj_lock);
}
/**
@@ -1902,22 +1936,27 @@ int i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_unpin;
- ret = i915_vma_get_fence(vma);
+ ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
- /* Mark as being mmapped into userspace for later revocation */
- assert_rpm_wakelock_held(dev_priv);
- if (list_empty(&obj->userfault_link))
- list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
-
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
+ if (ret)
+ goto err_fence;
+ /* Mark as being mmapped into userspace for later revocation */
+ assert_rpm_wakelock_held(dev_priv);
+ if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+ list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+ GEM_BUG_ON(!obj->userfault_count);
+
+err_fence:
+ i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
err_unlock:
@@ -1969,6 +2008,25 @@ err:
return ret;
}
+static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!obj->userfault_count);
+
+ obj->userfault_count = 0;
+ list_del(&obj->userfault_link);
+ drm_vma_node_unmap(&obj->base.vma_node,
+ obj->base.dev->anon_inode->i_mapping);
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ i915_vma_unset_userfault(vma);
+ }
+}
+
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@@ -1999,12 +2057,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (list_empty(&obj->userfault_link))
+ if (!obj->userfault_count)
goto out;
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
+ __i915_gem_object_release_mmap(obj);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
@@ -2032,11 +2088,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/
list_for_each_entry_safe(obj, on,
- &dev_priv->mm.userfault_list, userfault_link) {
- list_del_init(&obj->userfault_link);
- drm_vma_node_unmap(&obj->base.vma_node,
- obj->base.dev->anon_inode->i_mapping);
- }
+ &dev_priv->mm.userfault_list, userfault_link)
+ __i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
@@ -2059,7 +2112,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
if (!reg->vma)
continue;
- GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true;
}
}
@@ -2164,7 +2217,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
struct address_space *mapping;
lockdep_assert_held(&obj->mm.lock);
- GEM_BUG_ON(obj->mm.pages);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
switch (obj->mm.madv) {
case I915_MADV_DONTNEED:
@@ -2223,13 +2276,14 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
if (i915_gem_object_has_pinned_pages(obj))
return;
GEM_BUG_ON(obj->bind_count);
- if (!READ_ONCE(obj->mm.pages))
+ if (!i915_gem_object_has_pages(obj))
return;
/* May be called by shrinker from within get_pages() (on another bo) */
@@ -2243,6 +2297,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
pages = fetch_and_zero(&obj->mm.pages);
GEM_BUG_ON(!pages);
+ spin_lock(&i915->mm.obj_lock);
+ list_del(&obj->mm.link);
+ spin_unlock(&i915->mm.obj_lock);
+
if (obj->mm.mapping) {
void *ptr;
@@ -2260,6 +2318,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
+ obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
unlock:
mutex_unlock(&obj->mm.lock);
}
@@ -2290,8 +2350,7 @@ static bool i915_sg_trim(struct sg_table *orig_st)
return true;
}
-static struct sg_table *
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const unsigned long page_count = obj->base.size / PAGE_SIZE;
@@ -2302,7 +2361,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
- unsigned int max_segment;
+ unsigned int max_segment = i915_sg_segment_size();
+ unsigned int sg_page_sizes;
gfp_t noreclaim;
int ret;
@@ -2313,18 +2373,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- max_segment = swiotlb_max_segment();
- if (!max_segment)
- max_segment = rounddown(UINT_MAX, PAGE_SIZE);
-
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
/* Get the list of pages out of our struct file. They'll be pinned
@@ -2338,6 +2394,7 @@ rebuild_st:
sg = st->sgl;
st->nents = 0;
+ sg_page_sizes = 0;
for (i = 0; i < page_count; i++) {
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
@@ -2390,8 +2447,10 @@ rebuild_st:
if (!i ||
sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) {
- if (i)
+ if (i) {
+ sg_page_sizes |= sg->length;
sg = sg_next(sg);
+ }
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
} else {
@@ -2402,8 +2461,10 @@ rebuild_st:
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
- if (sg) /* loop terminated early; short sg table */
+ if (sg) { /* loop terminated early; short sg table */
+ sg_page_sizes |= sg->length;
sg_mark_end(sg);
+ }
/* Trim unused sg entries to avoid wasting memory. */
i915_sg_trim(st);
@@ -2432,7 +2493,9 @@ rebuild_st:
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
- return st;
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
err_sg:
sg_mark_end(sg);
@@ -2453,12 +2516,17 @@ err_pages:
if (ret == -ENOSPC)
ret = -ENOMEM;
- return ERR_PTR(ret);
+ return ret;
}
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
+ struct sg_table *pages,
+ unsigned int sg_page_sizes)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ int i;
+
lockdep_assert_held(&obj->mm.lock);
obj->mm.get_page.sg_pos = pages->sgl;
@@ -2467,30 +2535,48 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages;
if (i915_gem_object_is_tiled(obj) &&
- to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj);
obj->mm.quirked = true;
}
+
+ GEM_BUG_ON(!sg_page_sizes);
+ obj->mm.page_sizes.phys = sg_page_sizes;
+
+ /*
+ * Calculate the supported page-sizes which fit into the given
+ * sg_page_sizes. This will give us the page-sizes which we may be able
+ * to use opportunistically when later inserting into the GTT. For
+ * example if phys=2G, then in theory we should be able to use 1G, 2M,
+ * 64K or 4K pages, although in practice this will depend on a number of
+ * other factors.
+ */
+ obj->mm.page_sizes.sg = 0;
+ for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ if (obj->mm.page_sizes.phys & ~0u << i)
+ obj->mm.page_sizes.sg |= BIT(i);
+ }
+ GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+ spin_lock(&i915->mm.obj_lock);
+ list_add(&obj->mm.link, &i915->mm.unbound_list);
+ spin_unlock(&i915->mm.obj_lock);
}
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- struct sg_table *pages;
-
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+ int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
- pages = obj->ops->get_pages(obj);
- if (unlikely(IS_ERR(pages)))
- return PTR_ERR(pages);
+ err = obj->ops->get_pages(obj);
+ GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
- __i915_gem_object_set_pages(obj, pages);
- return 0;
+ return err;
}
/* Ensure that the associated pages are gathered from the backing storage
@@ -2508,7 +2594,9 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err)
return err;
- if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
@@ -2591,7 +2679,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
type &= ~I915_MAP_OVERRIDE;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+ if (unlikely(!i915_gem_object_has_pages(obj))) {
+ GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
@@ -2601,7 +2691,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
atomic_inc(&obj->mm.pages_pin_count);
pinned = false;
}
- GEM_BUG_ON(!obj->mm.pages);
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) {
@@ -2656,7 +2746,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
* allows it to avoid the cost of retrieving a page (either swapin
* or clearing-before-use) before it is overwritten.
*/
- if (READ_ONCE(obj->mm.pages))
+ if (i915_gem_object_has_pages(obj))
return -ENODEV;
if (obj->mm.madv != I915_MADV_WILLNEED)
@@ -2800,7 +2890,17 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request = NULL;
- /* Prevent the signaler thread from updating the request
+ /*
+ * During the reset sequence, we must prevent the engine from
+ * entering RC6. As the context state is undefined until we restart
+ * the engine, if it does enter RC6 during the reset, the state
+ * written to the powercontext is undefined and so we may lose
+ * GPU state upon resume, i.e. fail to restart after a reset.
+ */
+ intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+
+ /*
+ * Prevent the signaler thread from updating the request
* state (by calling dma_fence_signal) as we are processing
* the reset. The write from the GPU of the seqno is
* asynchronous and the signaler thread may see a different
@@ -2811,7 +2911,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
*/
kthread_park(engine->breadcrumbs.signaler);
- /* Prevent request submission to the hardware until we have
+ /*
+ * Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its engine->irq_tasklet *just* as we are
@@ -2819,8 +2920,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Turning off the engine->irq_tasklet until the reset is over
* prevents the race.
*/
- tasklet_kill(&engine->irq_tasklet);
- tasklet_disable(&engine->irq_tasklet);
+ tasklet_kill(&engine->execlists.irq_tasklet);
+ tasklet_disable(&engine->execlists.irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
@@ -2999,8 +3100,10 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->irq_tasklet);
+ tasklet_enable(&engine->execlists.irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
+
+ intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
}
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
@@ -3018,9 +3121,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ dma_fence_set_error(&request->fence, -EIO);
+
+ i915_gem_request_submit(request);
+}
+
+static void nop_complete_submit_request(struct drm_i915_gem_request *request)
+{
unsigned long flags;
- GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline->lock, flags);
@@ -3029,81 +3138,59 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
}
-static void engine_set_wedged(struct intel_engine_cs *engine)
+void i915_gem_set_wedged(struct drm_i915_private *i915)
{
- struct drm_i915_gem_request *request;
- unsigned long flags;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- /* We need to be sure that no thread is running the old callback as
- * we install the nop handler (otherwise we would submit a request
- * to hardware that will never complete). In order to prevent this
- * race, we wait until the machine is idle before making the swap
- * (using stop_machine()).
+ /*
+ * First, stop submission to hw, but do not yet complete requests by
+ * rolling the global seqno forward (since this would complete requests
+ * for which we haven't set the fence error to EIO yet).
*/
- engine->submit_request = nop_submit_request;
-
- /* Mark all executing requests as skipped */
- spin_lock_irqsave(&engine->timeline->lock, flags);
- list_for_each_entry(request, &engine->timeline->requests, link)
- if (!i915_gem_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ for_each_engine(engine, i915, id)
+ engine->submit_request = nop_submit_request;
/*
- * Clear the execlists queue up before freeing the requests, as those
- * are the ones that keep the context and ringbuffer backing objects
- * pinned in place.
+ * Make sure no one is running the old callback before we proceed with
+ * cancelling requests and resetting the completion tracking. Otherwise
+ * we might submit a request to the hardware which never completes.
*/
+ synchronize_rcu();
- if (i915.enable_execlists) {
- struct execlist_port *port = engine->execlist_port;
- unsigned long flags;
- unsigned int n;
-
- spin_lock_irqsave(&engine->timeline->lock, flags);
-
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
- i915_gem_request_put(port_request(&port[n]));
- memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- engine->execlist_queue = RB_ROOT;
- engine->execlist_first = NULL;
-
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ for_each_engine(engine, i915, id) {
+ /* Mark all executing requests as skipped */
+ engine->cancel_requests(engine);
- /* The port is checked prior to scheduling a tasklet, but
- * just in case we have suspended the tasklet to do the
- * wedging make sure that when it wakes, it decides there
- * is no work to do by clearing the irq_posted bit.
+ /*
+ * Only once we've force-cancelled all in-flight requests can we
+ * start to complete all requests.
*/
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ engine->submit_request = nop_complete_submit_request;
}
- /* Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
+ /*
+ * Make sure no request can slip through without getting completed by
+ * either this call here to intel_engine_init_global_seqno, or the one
+ * in nop_complete_submit_request.
*/
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
-}
+ synchronize_rcu();
-static int __i915_gem_set_wedged_BKL(void *data)
-{
- struct drm_i915_private *i915 = data;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ for_each_engine(engine, i915, id) {
+ unsigned long flags;
- for_each_engine(engine, i915, id)
- engine_set_wedged(engine);
+ /* Mark all pending requests as complete so that any concurrent
+ * (lockless) lookup doesn't try and wait upon the request as we
+ * reset it.
+ */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ intel_engine_init_global_seqno(engine,
+ intel_engine_last_submit(engine));
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ }
set_bit(I915_WEDGED, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
-
- return 0;
-}
-
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
-{
- stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
@@ -3267,11 +3354,11 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
+ GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
if (ctx->file_priv != fpriv)
continue;
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
-
GEM_BUG_ON(vma->obj != obj);
/* We allow the process to have multiple handles to the same
@@ -3385,24 +3472,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
return 0;
}
-static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
-{
- return wait_for(intel_engine_is_idle(engine), timeout_ms);
-}
-
static int wait_for_engines(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
- i915_gem_set_wedged(i915);
- return -EIO;
- }
-
- GEM_BUG_ON(intel_engine_get_seqno(engine) !=
- intel_engine_last_submit(engine));
+ if (wait_for(intel_engines_are_idle(i915), 50)) {
+ DRM_ERROR("Failed to idle engines, declaring wedged!\n");
+ i915_gem_set_wedged(i915);
+ return -EIO;
}
return 0;
@@ -3452,7 +3527,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
- if (!READ_ONCE(obj->pin_display))
+ if (!READ_ONCE(obj->pin_global))
return;
mutex_lock(&obj->base.dev->struct_mutex);
@@ -3819,10 +3894,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
- /* Mark the pin_display early so that we account for the
+ /* Mark the global pin early so that we account for the
* display coherency whilst setting up the cache domains.
*/
- obj->pin_display++;
+ obj->pin_global++;
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
@@ -3838,7 +3913,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
I915_CACHE_WT : I915_CACHE_NONE);
if (ret) {
vma = ERR_PTR(ret);
- goto err_unpin_display;
+ goto err_unpin_global;
}
/* As the user may map the buffer once pinned in the display plane
@@ -3869,7 +3944,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
}
if (IS_ERR(vma))
- goto err_unpin_display;
+ goto err_unpin_global;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
@@ -3884,8 +3959,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return vma;
-err_unpin_display:
- obj->pin_display--;
+err_unpin_global:
+ obj->pin_global--;
return vma;
}
@@ -3894,10 +3969,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(vma->obj->pin_display == 0))
+ if (WARN_ON(vma->obj->pin_global == 0))
return;
- if (--vma->obj->pin_display == 0)
+ if (--vma->obj->pin_global == 0)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
@@ -4016,42 +4091,47 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
+ if (!view && flags & PIN_MAPPABLE) {
+ /* If the required space is larger than the available
+ * aperture, we will not able to find a slot for the
+ * object and unbinding the object now will be in
+ * vain. Worse, doing so may cause us to ping-pong
+ * the object in and out of the Global GTT and
+ * waste a lot of cycles under the mutex.
+ */
+ if (obj->base.size > dev_priv->ggtt.mappable_end)
+ return ERR_PTR(-E2BIG);
+
+ /* If NONBLOCK is set the caller is optimistically
+ * trying to cache the full object within the mappable
+ * aperture, and *must* have a fallback in place for
+ * situations where we cannot bind the object. We
+ * can be a little more lax here and use the fallback
+ * more often to avoid costly migrations of ourselves
+ * and other objects within the aperture.
+ *
+ * Half-the-aperture is used as a simple heuristic.
+ * More interesting would to do search for a free
+ * block prior to making the commitment to unbind.
+ * That caters for the self-harm case, and with a
+ * little more heuristics (e.g. NOFAULT, NOEVICT)
+ * we could try to minimise harm to others.
+ */
+ if (flags & PIN_NONBLOCK &&
+ obj->base.size > dev_priv->ggtt.mappable_end / 2)
+ return ERR_PTR(-ENOSPC);
+ }
+
vma = i915_vma_instance(obj, vm, view);
if (unlikely(IS_ERR(vma)))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
- if (flags & PIN_NONBLOCK &&
- (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
- return ERR_PTR(-ENOSPC);
+ if (flags & PIN_NONBLOCK) {
+ if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
+ return ERR_PTR(-ENOSPC);
- if (flags & PIN_MAPPABLE) {
- /* If the required space is larger than the available
- * aperture, we will not able to find a slot for the
- * object and unbinding the object now will be in
- * vain. Worse, doing so may cause us to ping-pong
- * the object in and out of the Global GTT and
- * waste a lot of cycles under the mutex.
- */
- if (vma->fence_size > dev_priv->ggtt.mappable_end)
- return ERR_PTR(-E2BIG);
-
- /* If NONBLOCK is set the caller is optimistically
- * trying to cache the full object within the mappable
- * aperture, and *must* have a fallback in place for
- * situations where we cannot bind the object. We
- * can be a little more lax here and use the fallback
- * more often to avoid costly migrations of ourselves
- * and other objects within the aperture.
- *
- * Half-the-aperture is used as a simple heuristic.
- * More interesting would to do search for a free
- * block prior to making the commitment to unbind.
- * That caters for the self-harm case, and with a
- * little more heuristics (e.g. NOFAULT, NOEVICT)
- * we could try to minimise harm to others.
- */
- if (flags & PIN_NONBLOCK &&
+ if (flags & PIN_MAPPABLE &&
vma->fence_size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
@@ -4232,7 +4312,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (err)
goto out;
- if (obj->mm.pages &&
+ if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -4251,7 +4331,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */
- if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+ if (obj->mm.madv == I915_MADV_DONTNEED &&
+ !i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -4277,8 +4358,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{
mutex_init(&obj->mm.lock);
- INIT_LIST_HEAD(&obj->global_link);
- INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4308,6 +4387,30 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.pwrite = i915_gem_object_pwrite_gtt,
};
+static int i915_gem_object_create_shmem(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ unsigned long flags = VM_NORESERVE;
+ struct file *filp;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ if (i915->mm.gemfs)
+ filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
+ flags);
+ else
+ filp = shmem_file_setup("i915", size, flags);
+
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ obj->filp = filp;
+
+ return 0;
+}
+
struct drm_i915_gem_object *
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
{
@@ -4332,7 +4435,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
if (obj == NULL)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
+ ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
if (ret)
goto fail;
@@ -4409,13 +4512,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
{
struct drm_i915_gem_object *obj, *on;
- mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- llist_for_each_entry(obj, freed, freed) {
+ llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
trace_i915_gem_object_destroy(obj);
+ mutex_lock(&i915->drm.struct_mutex);
+
GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn,
&obj->vma_list, obj_link) {
@@ -4426,16 +4530,24 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
- list_del(&obj->global_link);
- }
- intel_runtime_pm_put(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ /* This serializes freeing with the shrinker. Since the free
+ * is delayed, first by RCU then by the workqueue, we want the
+ * shrinker to be able to free pages of unreferenced objects,
+ * or else we may oom whilst there are plenty of deferred
+ * freed objects.
+ */
+ if (i915_gem_object_has_pages(obj)) {
+ spin_lock(&i915->mm.obj_lock);
+ list_del_init(&obj->mm.link);
+ spin_unlock(&i915->mm.obj_lock);
+ }
- cond_resched();
+ mutex_unlock(&i915->drm.struct_mutex);
- llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count);
+ GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+ GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
@@ -4443,7 +4555,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- GEM_BUG_ON(obj->mm.pages);
+ GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
@@ -4454,16 +4566,29 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
kfree(obj->bit_17);
i915_gem_object_free(obj);
+
+ if (on)
+ cond_resched();
}
+ intel_runtime_pm_put(i915);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
struct llist_node *freed;
- freed = llist_del_all(&i915->mm.free_list);
- if (unlikely(freed))
+ /* Free the oldest, most stale object to keep the free_list short */
+ freed = NULL;
+ if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
+ /* Only one consumer of llist_del_first() allowed */
+ spin_lock(&i915->mm.free_lock);
+ freed = llist_del_first(&i915->mm.free_list);
+ spin_unlock(&i915->mm.free_lock);
+ }
+ if (unlikely(freed)) {
+ freed->next = NULL;
__i915_gem_free_objects(i915, freed);
+ }
}
static void __i915_gem_free_work(struct work_struct *work)
@@ -4480,11 +4605,17 @@ static void __i915_gem_free_work(struct work_struct *work)
* unbound now.
*/
+ spin_lock(&i915->mm.free_lock);
while ((freed = llist_del_all(&i915->mm.free_list))) {
+ spin_unlock(&i915->mm.free_lock);
+
__i915_gem_free_objects(i915, freed);
if (need_resched())
- break;
+ return;
+
+ spin_lock(&i915->mm.free_lock);
}
+ spin_unlock(&i915->mm.free_lock);
}
static void __i915_gem_free_object_rcu(struct rcu_head *head)
@@ -4543,6 +4674,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
+ if (i915_terminally_wedged(&i915->gpu_error)) {
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_unset_wedged(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
/*
* If we inherit context state from the BIOS or earlier occupants
* of the GPU, the GPU may be in an inconsistent state when we
@@ -4582,7 +4719,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
- if (ret)
+ if (ret && ret != -EIO)
goto err_unlock;
assert_kernel_context_is_current(dev_priv);
@@ -4597,14 +4734,14 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
/* As the idle_work is rearming if it detects a race, play safe and
* repeat the flush until it is definitely idle.
*/
- while (flush_delayed_work(&dev_priv->gt.idle_work))
- ;
+ drain_delayed_work(&dev_priv->gt.idle_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
- WARN_ON(!intel_engines_are_idle(dev_priv));
+ if (WARN_ON(!intel_engines_are_idle(dev_priv)))
+ i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
/*
* Neither the BIOS, ourselves or any other kernel
@@ -4626,11 +4763,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machine in an unusable condition.
*/
i915_gem_sanitize(dev_priv);
- goto out_rpm_put;
+
+ intel_runtime_pm_put(dev_priv);
+ return 0;
err_unlock:
mutex_unlock(&dev->struct_mutex);
-out_rpm_put:
intel_runtime_pm_put(dev_priv);
return ret;
}
@@ -4643,6 +4781,7 @@ void i915_gem_resume(struct drm_i915_private *dev_priv)
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev_priv);
+ i915_gem_restore_fences(dev_priv);
/* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
@@ -4756,6 +4895,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
+ if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+ ret = -EIO;
+ goto out;
+ }
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
@@ -4786,7 +4929,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return false;
/* TODO: make semaphores and Execlists play nicely together */
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return false;
if (value >= 0)
@@ -4805,9 +4948,18 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
+ /*
+ * We need to fallback to 4K pages since gvt gtt handling doesn't
+ * support huge page entries - we will need to check either hypervisor
+ * mm can support huge guest page or just do emulation in gvt.
+ */
+ if (intel_vgpu_active(dev_priv))
+ mkwrite_device_info(dev_priv)->page_sizes =
+ I915_GTT_PAGE_SIZE_4K;
+
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} else {
@@ -4845,8 +4997,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
- i915_gem_set_wedged(dev_priv);
+ if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+ DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+ i915_gem_set_wedged(dev_priv);
+ }
ret = 0;
}
@@ -4946,11 +5100,15 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
goto err_priorities;
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+
+ spin_lock_init(&dev_priv->mm.obj_lock);
+ spin_lock_init(&dev_priv->mm.free_lock);
init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
+
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4962,6 +5120,10 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->fb_tracking.lock);
+ err = i915_gemfs_init(dev_priv);
+ if (err)
+ DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
+
return 0;
err_priorities:
@@ -5000,6 +5162,8 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();
+
+ i915_gemfs_fini(dev_priv);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -5038,12 +5202,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
i915_gem_drain_freed_objects(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
+ spin_lock(&dev_priv->mm.obj_lock);
for (p = phases; *p; p++) {
- list_for_each_entry(obj, *p, global_link)
+ list_for_each_entry(obj, *p, mm.link)
__start_cpu_write(obj);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ spin_unlock(&dev_priv->mm.obj_lock);
return 0;
}
@@ -5362,7 +5526,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock;
}
- pages = obj->mm.pages;
+ pages = fetch_and_zero(&obj->mm.pages);
+ if (pages) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ __i915_gem_object_reset_page_iter(obj);
+
+ spin_lock(&i915->mm.obj_lock);
+ list_del(&obj->mm.link);
+ spin_unlock(&i915->mm.obj_lock);
+ }
+
obj->ops = &i915_gem_phys_ops;
err = ____i915_gem_object_get_pages(obj);
@@ -5389,6 +5563,7 @@ err_unlock:
#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
#include "selftests/huge_gem_object.c"
+#include "selftests/huge_pages.c"
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 8a04d33055be..f663cd919795 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{
+ GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
intel_fb_obj_flush(obj, ORIGIN_CPU);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8afd2ce59b8d..f782cf2069c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -107,14 +107,9 @@ static void lut_close(struct i915_gem_context *ctx)
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
- struct drm_i915_gem_object *obj = vma->obj;
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
-
- if (!i915_vma_is_ggtt(vma))
- i915_vma_close(vma);
-
- __i915_gem_object_release_unless_active(obj);
+ __i915_gem_object_release_unless_active(vma->obj);
}
rcu_read_unlock();
}
@@ -200,6 +195,11 @@ static void context_close(struct i915_gem_context *ctx)
{
i915_gem_context_set_closed(ctx);
+ /*
+ * The LUT uses the VMA as a backpointer to unref the object,
+ * so we need to clear the LUT before we close all the VMA (inside
+ * the ppgtt).
+ */
lut_close(ctx);
if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base);
@@ -316,7 +316,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* present or not in use we still need a small bias as ring wraparound
* at offset 0 sometimes hangs. No idea why.
*/
- if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
+ if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -409,7 +409,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
- if (!i915.enable_guc_submission)
+ if (!i915_modparams.enable_guc_submission)
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
@@ -418,14 +418,43 @@ out:
return ctx;
}
+static struct i915_gem_context *
+create_kernel_context(struct drm_i915_private *i915, int prio)
+{
+ struct i915_gem_context *ctx;
+
+ ctx = i915_gem_create_context(i915, NULL);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ i915_gem_context_clear_bannable(ctx);
+ ctx->priority = prio;
+ ctx->ring_size = PAGE_SIZE;
+
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+ return ctx;
+}
+
+static void
+destroy_kernel_context(struct i915_gem_context **ctxp)
+{
+ struct i915_gem_context *ctx;
+
+ /* Keep the context ref so that we can free it immediately ourselves */
+ ctx = i915_gem_context_get(fetch_and_zero(ctxp));
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
+ context_close(ctx);
+ i915_gem_context_free(ctx);
+}
+
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
+ int err;
- /* Init should only be called once per module load. Eventually the
- * restriction on the context_disabled check can be loosened. */
- if (WARN_ON(dev_priv->kernel_context))
- return 0;
+ GEM_BUG_ON(dev_priv->kernel_context);
INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
@@ -433,7 +462,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
@@ -443,28 +472,38 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
- ctx = i915_gem_create_context(dev_priv, NULL);
+ /* lowest priority; idle task */
+ ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
- DRM_ERROR("Failed to create default global context (error %ld)\n",
- PTR_ERR(ctx));
- return PTR_ERR(ctx);
+ DRM_ERROR("Failed to create default global context\n");
+ err = PTR_ERR(ctx);
+ goto err;
}
-
- /* For easy recognisablity, we want the kernel context to be 0 and then
+ /*
+ * For easy recognisablity, we want the kernel context to be 0 and then
* all user contexts will have non-zero hw_id.
*/
GEM_BUG_ON(ctx->hw_id);
-
- i915_gem_context_clear_bannable(ctx);
- ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+ /* highest priority; preempting task */
+ ctx = create_kernel_context(dev_priv, INT_MAX);
+ if (IS_ERR(ctx)) {
+ DRM_ERROR("Failed to create default preempt context\n");
+ err = PTR_ERR(ctx);
+ goto err_kernel_context;
+ }
+ dev_priv->preempt_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
dev_priv->engine[RCS]->context_size ? "logical" :
"fake");
return 0;
+
+err_kernel_context:
+ destroy_kernel_context(&dev_priv->kernel_context);
+err:
+ return err;
}
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
@@ -485,7 +524,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
}
/* Force the GPU state to be restored on enabling */
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
@@ -509,15 +548,10 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx;
-
lockdep_assert_held(&i915->drm.struct_mutex);
- /* Keep the context so that we can free it immediately ourselves */
- ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
- GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
- context_close(ctx);
- i915_gem_context_free(ctx);
+ destroy_kernel_context(&i915->preempt_context);
+ destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
ida_destroy(&i915->contexts.hw_ida);
@@ -570,7 +604,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags)
enum intel_engine_id id;
const int num_rings =
/* Use an extended w/a on gen7 if signalling from other rings */
- (i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
+ (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len;
@@ -839,7 +873,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return 0;
if (!req->ctx->engine[engine->id].state) {
@@ -1038,6 +1072,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_BANNABLE:
args->value = i915_gem_context_is_bannable(ctx);
break;
+ case I915_CONTEXT_PARAM_PRIORITY:
+ args->value = ctx->priority;
+ break;
default:
ret = -EINVAL;
break;
@@ -1093,6 +1130,26 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
i915_gem_context_clear_bannable(ctx);
break;
+
+ case I915_CONTEXT_PARAM_PRIORITY:
+ {
+ int priority = args->value;
+
+ if (args->size)
+ ret = -EINVAL;
+ else if (!to_i915(dev)->engine[RCS]->schedule)
+ ret = -ENODEV;
+ else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+ priority < I915_CONTEXT_MIN_USER_PRIORITY)
+ ret = -EINVAL;
+ else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+ !capable(CAP_SYS_NICE))
+ ret = -EPERM;
+ else
+ ctx->priority = priority;
+ }
+ break;
+
default:
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 6176e589cf09..864439a214c8 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -256,11 +256,21 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return drm_gem_dmabuf_export(dev, &exp_info);
}
-static struct sg_table *
-i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
- return dma_buf_map_attachment(obj->base.import_attach,
- DMA_BIDIRECTIONAL);
+ struct sg_table *pages;
+ unsigned int sg_page_sizes;
+
+ pages = dma_buf_map_attachment(obj->base.import_attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+ return 0;
}
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e161d383b526..8daa8a78cdc0 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,6 +33,10 @@
#include "intel_drv.h"
#include "i915_trace.h"
+I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
+ bool fail_if_busy:1;
+} igt_evict_ctl;)
+
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -81,7 +85,7 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
- if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
+ if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
return false;
list_add(&vma->evict_link, unwind);
@@ -205,6 +209,9 @@ search_again:
* the kernel's there is no more we can evict.
*/
if (!ggtt_is_idle(dev_priv)) {
+ if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
+ return -EBUSY;
+
ret = ggtt_flush(dev_priv);
if (ret)
return ret;
@@ -330,6 +337,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
+ if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
+ ret = -ENOSPC;
+ break;
+ }
+
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 83876a1c8d98..435ed95df144 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -58,6 +58,7 @@ enum {
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_VALIDATED BIT(30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 30)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -268,6 +269,11 @@ static inline u64 gen8_noncanonical_addr(u64 address)
return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}
+static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
+{
+ return eb->engine->needs_cmd_parser && eb->batch_len;
+}
+
static int eb_create(struct i915_execbuffer *eb)
{
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
@@ -365,12 +371,12 @@ eb_pin_vma(struct i915_execbuffer *eb,
return false;
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
- if (unlikely(i915_vma_get_fence(vma))) {
+ if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma);
return false;
}
- if (i915_vma_pin_fence(vma))
+ if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
@@ -383,7 +389,7 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
- i915_vma_unpin_fence(vma);
+ __i915_vma_unpin_fence(vma);
__i915_vma_unpin(vma);
}
@@ -561,13 +567,13 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
}
if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
- err = i915_vma_get_fence(vma);
+ err = i915_vma_pin_fence(vma);
if (unlikely(err)) {
i915_vma_unpin(vma);
return err;
}
- if (i915_vma_pin_fence(vma))
+ if (vma->fence)
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
@@ -678,7 +684,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
- struct drm_i915_gem_object *uninitialized_var(obj);
+ struct drm_i915_gem_object *obj;
unsigned int i;
int err;
@@ -724,19 +730,17 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj;
}
+ /* transfer ref to ctx */
vma->open_count++;
list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list);
lut->ctx = eb->ctx;
lut->handle = handle;
- /* transfer ref to ctx */
- obj = NULL;
-
add_vma:
err = eb_add_vma(eb, i, vma);
if (unlikely(err))
- goto err_obj;
+ goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
@@ -765,8 +769,7 @@ add_vma:
return eb_reserve(eb);
err_obj:
- if (obj)
- i915_gem_object_put(obj);
+ i915_gem_object_put(obj);
err_vma:
eb->vma[i] = NULL;
return err;
@@ -975,7 +978,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
return ERR_PTR(err);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
- PIN_MAPPABLE | PIN_NONBLOCK);
+ PIN_MAPPABLE |
+ PIN_NONBLOCK |
+ PIN_NONFAULT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range
@@ -1163,6 +1168,13 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (unlikely(!cache->rq)) {
int err;
+ /* If we need to copy for the cmdparser, we will stall anyway */
+ if (eb_use_cmdparser(eb))
+ return ERR_PTR(-EWOULDBLOCK);
+
+ if (!intel_engine_can_store_dword(eb->engine))
+ return ERR_PTR(-ENODEV);
+
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@@ -1187,9 +1199,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !reservation_object_test_signaled_rcu(vma->resv, true)) &&
- __intel_engine_can_store_dword(eb->reloc_cache.gen,
- eb->engine->class)) {
+ !reservation_object_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -1581,7 +1591,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
const unsigned int count = eb->buffer_count;
unsigned int i;
- if (unlikely(i915.prefault_disable))
+ if (unlikely(i915_modparams.prefault_disable))
return 0;
for (i = 0; i < count; i++) {
@@ -2190,6 +2200,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
int out_fence_fd = -1;
int err;
+ BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS);
@@ -2303,7 +2314,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (eb.engine->needs_cmd_parser && eb.batch_len) {
+ if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma;
vma = eb_parse(&eb, drm_is_current_master(file));
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 5fe2cd8c8f28..012250f25255 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
/* Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
- i915_gem_release_mmap(fence->vma->obj);
+ GEM_BUG_ON(fence->vma->fence != fence);
+ i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL;
fence->vma = NULL;
@@ -280,8 +281,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
*
* 0 on success, negative error code on failure.
*/
-int
-i915_vma_put_fence(struct i915_vma *vma)
+int i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
@@ -299,6 +299,8 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *fence;
list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
+ GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+
if (fence->pin_count)
continue;
@@ -313,7 +315,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}
/**
- * i915_vma_get_fence - set up fencing for a vma
+ * i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
@@ -331,10 +333,11 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
* 0 on success, negative error code on failure.
*/
int
-i915_vma_get_fence(struct i915_vma *vma)
+i915_vma_pin_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ int err;
/* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
@@ -344,6 +347,8 @@ i915_vma_get_fence(struct i915_vma *vma)
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
+ GEM_BUG_ON(fence->vma != vma);
+ fence->pin_count++;
if (!fence->dirty) {
list_move_tail(&fence->link,
&fence->i915->mm.fence_list);
@@ -353,10 +358,76 @@ i915_vma_get_fence(struct i915_vma *vma)
fence = fence_find(vma->vm->i915);
if (IS_ERR(fence))
return PTR_ERR(fence);
+
+ GEM_BUG_ON(fence->pin_count);
+ fence->pin_count++;
} else
return 0;
- return fence_update(fence, set);
+ err = fence_update(fence, set);
+ if (err)
+ goto out_unpin;
+
+ GEM_BUG_ON(fence->vma != set);
+ GEM_BUG_ON(vma->fence != (set ? fence : NULL));
+
+ if (set)
+ return 0;
+
+out_unpin:
+ fence->pin_count--;
+ return err;
+}
+
+/**
+ * i915_reserve_fence - Reserve a fence for vGPU
+ * @dev_priv: i915 device private
+ *
+ * This function walks the fence regs looking for a free one and remove
+ * it from the fence_list. It is used to reserve fence for vGPU to use.
+ */
+struct drm_i915_fence_reg *
+i915_reserve_fence(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_fence_reg *fence;
+ int count;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Keep at least one fence available for the display engine. */
+ count = 0;
+ list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
+ count += !fence->pin_count;
+ if (count <= 1)
+ return ERR_PTR(-ENOSPC);
+
+ fence = fence_find(dev_priv);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (fence->vma) {
+ /* Force-remove fence from VMA */
+ ret = fence_update(fence, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ list_del(&fence->link);
+ return fence;
+}
+
+/**
+ * i915_unreserve_fence - Reclaim a reserved fence
+ * @fence: the fence reg
+ *
+ * This function add a reserved fence register from vGPU to the fence_list.
+ */
+void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
+{
+ lockdep_assert_held(&fence->i915->drm.struct_mutex);
+
+ list_add(&fence->link, &fence->i915->mm.fence_list);
}
/**
@@ -378,8 +449,10 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+ GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
+
if (fence->vma)
- i915_gem_release_mmap(fence->vma->obj);
+ i915_vma_revoke_mmap(fence->vma);
}
}
@@ -399,13 +472,15 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
+ GEM_BUG_ON(vma && vma->fence != reg);
+
/*
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
+ GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ad524cb0f6fc..2af65ecf2df8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -135,11 +135,12 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
{
- bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
+ if (!dev_priv->info.has_aliasing_ppgtt)
+ return 0;
+
has_full_ppgtt = dev_priv->info.has_full_ppgtt;
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
@@ -149,9 +150,6 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
}
- if (!has_aliasing_ppgtt)
- return 0;
-
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
@@ -180,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
+ if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
if (has_full_48bit_ppgtt)
return 3;
@@ -188,7 +186,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 2;
}
- return has_aliasing_ppgtt ? 1 : 0;
+ return 1;
}
static int ppgtt_bind_vma(struct i915_vma *vma,
@@ -205,8 +203,6 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
return ret;
}
- vma->pages = vma->obj->mm.pages;
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
@@ -222,6 +218,30 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
+static int ppgtt_set_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pages);
+
+ vma->pages = vma->obj->mm.pages;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+static void clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
@@ -230,13 +250,13 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
switch (level) {
case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED_INDEX;
+ pte |= PPAT_UNCACHED;
break;
case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC_INDEX;
+ pte |= PPAT_DISPLAY_ELLC;
break;
default:
- pte |= PPAT_CACHED_INDEX;
+ pte |= PPAT_CACHED;
break;
}
@@ -249,9 +269,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
- pde |= PPAT_CACHED_PDE_INDEX;
+ pde |= PPAT_CACHED_PDE;
else
- pde |= PPAT_UNCACHED_INDEX;
+ pde |= PPAT_UNCACHED;
return pde;
}
@@ -356,39 +376,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct page *page;
+ struct pagevec *pvec = &vm->free_pages;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- if (vm->free_pages.nr)
- return vm->free_pages.pages[--vm->free_pages.nr];
+ if (likely(pvec->nr))
+ return pvec->pages[--pvec->nr];
+
+ if (!vm->pt_kmap_wc)
+ return alloc_page(gfp);
+
+ /* A placeholder for a specific mutex to guard the WC stash */
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+
+ /* Look in our global stash of WC pages... */
+ pvec = &vm->i915->mm.wc_stash;
+ if (likely(pvec->nr))
+ return pvec->pages[--pvec->nr];
+
+ /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
+ do {
+ struct page *page;
- page = alloc_page(gfp);
- if (!page)
+ page = alloc_page(gfp);
+ if (unlikely(!page))
+ break;
+
+ pvec->pages[pvec->nr++] = page;
+ } while (pagevec_space(pvec));
+
+ if (unlikely(!pvec->nr))
return NULL;
- if (vm->pt_kmap_wc)
- set_pages_array_wc(&page, 1);
+ set_pages_array_wc(pvec->pages, pvec->nr);
- return page;
+ return pvec->pages[--pvec->nr];
}
-static void vm_free_pages_release(struct i915_address_space *vm)
+static void vm_free_pages_release(struct i915_address_space *vm,
+ bool immediate)
{
- GEM_BUG_ON(!pagevec_count(&vm->free_pages));
+ struct pagevec *pvec = &vm->free_pages;
+
+ GEM_BUG_ON(!pagevec_count(pvec));
- if (vm->pt_kmap_wc)
- set_pages_array_wb(vm->free_pages.pages,
- pagevec_count(&vm->free_pages));
+ if (vm->pt_kmap_wc) {
+ struct pagevec *stash = &vm->i915->mm.wc_stash;
- __pagevec_release(&vm->free_pages);
+ /* When we use WC, first fill up the global stash and then
+ * only if full immediately free the overflow.
+ */
+
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ if (pagevec_space(stash)) {
+ do {
+ stash->pages[stash->nr++] =
+ pvec->pages[--pvec->nr];
+ if (!pvec->nr)
+ return;
+ } while (pagevec_space(stash));
+
+ /* As we have made some room in the VM's free_pages,
+ * we can wait for it to fill again. Unless we are
+ * inside i915_address_space_fini() and must
+ * immediately release the pages!
+ */
+ if (!immediate)
+ return;
+ }
+
+ set_pages_array_wb(pvec->pages, pvec->nr);
+ }
+
+ __pagevec_release(pvec);
}
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
if (!pagevec_add(&vm->free_pages, page))
- vm_free_pages_release(vm);
+ vm_free_pages_release(vm, false);
}
static int __setup_page_dma(struct i915_address_space *vm,
@@ -434,10 +501,8 @@ static void fill_page_dma(struct i915_address_space *vm,
const u64 val)
{
u64 * const vaddr = kmap_atomic(p->page);
- int i;
- for (i = 0; i < 512; i++)
- vaddr[i] = val;
+ memset64(vaddr, val, PAGE_SIZE / sizeof(val));
kunmap_atomic(vaddr);
}
@@ -452,12 +517,73 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
- return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
+ struct page *page = NULL;
+ dma_addr_t addr;
+ int order;
+
+ /*
+ * In order to utilize 64K pages for an object with a size < 2M, we will
+ * need to support a 64K scratch page, given that every 16th entry for a
+ * page-table operating in 64K mode must point to a properly aligned 64K
+ * region, including any PTEs which happen to point to scratch.
+ *
+ * This is only relevant for the 48b PPGTT where we support
+ * huge-gtt-pages, see also i915_vma_insert().
+ *
+ * TODO: we should really consider write-protecting the scratch-page and
+ * sharing between ppgtt
+ */
+ if (i915_vm_is_48bit(vm) &&
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+ order = get_order(I915_GTT_PAGE_SIZE_64K);
+ page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
+ if (page) {
+ addr = dma_map_page(vm->dma, page, 0,
+ I915_GTT_PAGE_SIZE_64K,
+ PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(vm->dma, addr))) {
+ __free_pages(page, order);
+ page = NULL;
+ }
+
+ if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
+ dma_unmap_page(vm->dma, addr,
+ I915_GTT_PAGE_SIZE_64K,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(page, order);
+ page = NULL;
+ }
+ }
+ }
+
+ if (!page) {
+ order = 0;
+ page = alloc_page(gfp | __GFP_ZERO);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(vm->dma, addr))) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+ }
+
+ vm->scratch_page.page = page;
+ vm->scratch_page.daddr = addr;
+ vm->scratch_page.order = order;
+
+ return 0;
}
static void cleanup_scratch_page(struct i915_address_space *vm)
{
- cleanup_page_dma(vm, &vm->scratch_page);
+ struct i915_page_dma *p = &vm->scratch_page;
+
+ dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(p->page, p->order);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -925,6 +1051,105 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
+ struct i915_page_directory_pointer **pdps,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level)
+{
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ u64 start = vma->node.start;
+ dma_addr_t rem = iter->sg->length;
+
+ do {
+ struct gen8_insert_pte idx = gen8_insert_pte(start);
+ struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
+ struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
+ unsigned int page_size;
+ bool maybe_64K = false;
+ gen8_pte_t encode = pte_encode;
+ gen8_pte_t *vaddr;
+ u16 index, max;
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
+ index = idx.pde;
+ max = I915_PDES;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ encode |= GEN8_PDE_PS_2M;
+
+ vaddr = kmap_atomic_px(pd);
+ } else {
+ struct i915_page_table *pt = pd->page_table[idx.pde];
+
+ index = idx.pte;
+ max = GEN8_PTES;
+ page_size = I915_GTT_PAGE_SIZE;
+
+ if (!index &&
+ vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (max - index) << PAGE_SHIFT))
+ maybe_64K = true;
+
+ vaddr = kmap_atomic_px(pt);
+ }
+
+ do {
+ GEM_BUG_ON(iter->sg->length < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = iter->sg->length;
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (maybe_64K && index < max &&
+ !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (max - index) << PAGE_SHIFT)))
+ maybe_64K = false;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < max);
+
+ kunmap_atomic(vaddr);
+
+ /*
+ * Is it safe to mark the 2M block as 64K? -- Either we have
+ * filled whole page-table with 64K entries, or filled part of
+ * it and have reached the end of the sg table and we have
+ * enough padding.
+ */
+ if (maybe_64K &&
+ (index == max ||
+ (i915_vm_has_scratch_64K(vma->vm) &&
+ !iter->sg && IS_ALIGNED(vma->node.start +
+ vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)))) {
+ vaddr = kmap_atomic_px(pd);
+ vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+ kunmap_atomic(vaddr);
+ page_size = I915_GTT_PAGE_SIZE_64K;
+ }
+
+ vma->page_sizes.gtt |= page_size;
+ } while (iter->sg);
}
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
@@ -935,11 +1160,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
- &idx, cache_level))
- GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+ if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+ } else {
+ struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+
+ while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
+ &iter, &idx, cache_level))
+ GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ }
}
static void gen8_free_page_tables(struct i915_address_space *vm,
@@ -1098,19 +1330,22 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
unsigned int pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
+ int count = gen8_pte_count(start, length);
+
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind;
- gen8_initialize_pt(vm, pt);
+ if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
+ gen8_initialize_pt(vm, pt);
gen8_ppgtt_set_pde(vm, pd, pt, pde);
pd->used_pdes++;
GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
- pt->used_ptes += gen8_pte_count(start, length);
+ pt->used_ptes += count;
}
return 0;
@@ -1333,18 +1568,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1ULL << 48 :
1ULL << 32;
- ret = gen8_init_scratch(&ppgtt->base);
- if (ret) {
- ppgtt->base.total = 0;
- return ret;
- }
-
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
ppgtt->base.pt_kmap_wc = true;
+ ret = gen8_init_scratch(&ppgtt->base);
+ if (ret) {
+ ppgtt->base.total = 0;
+ return ret;
+ }
+
if (use_4lvl(vm)) {
ret = setup_px(&ppgtt->base, &ppgtt->pml4);
if (ret)
@@ -1381,6 +1616,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->debug_dump = gen8_dump_ppgtt;
return 0;
@@ -1652,6 +1889,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
} while (1);
kunmap_atomic(vaddr);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
static int gen6_alloc_va_range(struct i915_address_space *vm,
@@ -1820,6 +2059,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->debug_dump = gen6_dump_ppgtt;
@@ -1859,13 +2100,13 @@ static void i915_address_space_init(struct i915_address_space *vm,
INIT_LIST_HEAD(&vm->unbound_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages, false);
+ pagevec_init(&vm->free_pages);
}
static void i915_address_space_fini(struct i915_address_space *vm)
{
if (pagevec_count(&vm->free_pages))
- vm_free_pages_release(vm);
+ vm_free_pages_release(vm, true);
i915_gem_timeline_fini(&vm->timeline);
drm_mm_takedown(&vm->mm);
@@ -1878,15 +2119,32 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_BC(dev_priv))
+ else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+
+ /*
+ * To support 64K PTEs we need to first enable the use of the
+ * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+ * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+ * shouldn't be needed after GEN10.
+ *
+ * 64K pages were first introduced from BDW+, although technically they
+ * only *work* from gen9+. For pre-BDW we instead have the option for
+ * 32K pages, but we don't currently have any support for it in our
+ * driver.
+ */
+ if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(dev_priv) <= 10)
+ I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
+ I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
}
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
@@ -1896,7 +2154,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return 0;
if (!USES_PPGTT(dev_priv))
@@ -2331,12 +2589,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- if (unlikely(!vma->pages)) {
- int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (obj->gt_ro)
@@ -2346,6 +2598,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
@@ -2373,12 +2627,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
u32 pte_flags;
int ret;
- if (unlikely(!vma->pages)) {
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
@@ -2393,7 +2641,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
vma->node.start,
vma->size);
if (ret)
- goto err_pages;
+ return ret;
}
appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
@@ -2407,17 +2655,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
}
return 0;
-
-err_pages:
- if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
- if (vma->pages != vma->obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
- }
- return ret;
}
static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
@@ -2455,6 +2692,21 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
+static int ggtt_set_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ GEM_BUG_ON(vma->pages);
+
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
static void i915_gtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -2591,6 +2843,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma, *vn;
+ struct pagevec *pvec;
ggtt->base.closed = true;
@@ -2614,6 +2867,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
}
ggtt->base.cleanup(&ggtt->base);
+
+ pvec = &dev_priv->mm.wc_stash;
+ if (pvec->nr) {
+ set_pages_array_wb(pvec->pages, pvec->nr);
+ __pagevec_release(pvec);
+ }
+
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
@@ -2709,13 +2969,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
/*
- * On BXT writes larger than 64 bit to the GTT pagetable range will be
- * dropped. For WC mappings in general we have 64 byte burst writes
- * when the WC buffer is flushed, so we can't use it, but have to
+ * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -2735,41 +2995,209 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
+static struct intel_ppat_entry *
+__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
+{
+ struct intel_ppat_entry *entry = &ppat->entries[index];
+
+ GEM_BUG_ON(index >= ppat->max_entries);
+ GEM_BUG_ON(test_bit(index, ppat->used));
+
+ entry->ppat = ppat;
+ entry->value = value;
+ kref_init(&entry->ref);
+ set_bit(index, ppat->used);
+ set_bit(index, ppat->dirty);
+
+ return entry;
+}
+
+static void __free_ppat_entry(struct intel_ppat_entry *entry)
+{
+ struct intel_ppat *ppat = entry->ppat;
+ unsigned int index = entry - ppat->entries;
+
+ GEM_BUG_ON(index >= ppat->max_entries);
+ GEM_BUG_ON(!test_bit(index, ppat->used));
+
+ entry->value = ppat->clear_value;
+ clear_bit(index, ppat->used);
+ set_bit(index, ppat->dirty);
+}
+
+/**
+ * intel_ppat_get - get a usable PPAT entry
+ * @i915: i915 device instance
+ * @value: the PPAT value required by the caller
+ *
+ * The function tries to search if there is an existing PPAT entry which
+ * matches with the required value. If perfectly matched, the existing PPAT
+ * entry will be used. If only partially matched, it will try to check if
+ * there is any available PPAT index. If yes, it will allocate a new PPAT
+ * index for the required entry and update the HW. If not, the partially
+ * matched entry will be used.
+ */
+const struct intel_ppat_entry *
+intel_ppat_get(struct drm_i915_private *i915, u8 value)
+{
+ struct intel_ppat *ppat = &i915->ppat;
+ struct intel_ppat_entry *entry;
+ unsigned int scanned, best_score;
+ int i;
+
+ GEM_BUG_ON(!ppat->max_entries);
+
+ scanned = best_score = 0;
+ for_each_set_bit(i, ppat->used, ppat->max_entries) {
+ unsigned int score;
+
+ score = ppat->match(ppat->entries[i].value, value);
+ if (score > best_score) {
+ entry = &ppat->entries[i];
+ if (score == INTEL_PPAT_PERFECT_MATCH) {
+ kref_get(&entry->ref);
+ return entry;
+ }
+ best_score = score;
+ }
+ scanned++;
+ }
+
+ if (scanned == ppat->max_entries) {
+ if (!best_score)
+ return ERR_PTR(-ENOSPC);
+
+ kref_get(&entry->ref);
+ return entry;
+ }
+
+ i = find_first_zero_bit(ppat->used, ppat->max_entries);
+ entry = __alloc_ppat_entry(ppat, i, value);
+ ppat->update_hw(i915);
+ return entry;
+}
+
+static void release_ppat(struct kref *kref)
+{
+ struct intel_ppat_entry *entry =
+ container_of(kref, struct intel_ppat_entry, ref);
+ struct drm_i915_private *i915 = entry->ppat->i915;
+
+ __free_ppat_entry(entry);
+ entry->ppat->update_hw(i915);
+}
+
+/**
+ * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
+ * @entry: an intel PPAT entry
+ *
+ * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
+ * entry is dynamically allocated, its reference count will be decreased. Once
+ * the reference count becomes into zero, the PPAT index becomes free again.
+ */
+void intel_ppat_put(const struct intel_ppat_entry *entry)
+{
+ struct intel_ppat *ppat = entry->ppat;
+ unsigned int index = entry - ppat->entries;
+
+ GEM_BUG_ON(!ppat->max_entries);
+
+ kref_put(&ppat->entries[index].ref, release_ppat);
+}
+
+static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ int i;
+
+ for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
+ I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
+ clear_bit(i, ppat->dirty);
+ }
+}
+
+static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ u64 pat = 0;
+ int i;
+
+ for (i = 0; i < ppat->max_entries; i++)
+ pat |= GEN8_PPAT(i, ppat->entries[i].value);
+
+ bitmap_clear(ppat->dirty, 0, ppat->max_entries);
+
+ I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static unsigned int bdw_private_pat_match(u8 src, u8 dst)
+{
+ unsigned int score = 0;
+ enum {
+ AGE_MATCH = BIT(0),
+ TC_MATCH = BIT(1),
+ CA_MATCH = BIT(2),
+ };
+
+ /* Cache attribute has to be matched. */
+ if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
+ return 0;
+
+ score |= CA_MATCH;
+
+ if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
+ score |= TC_MATCH;
+
+ if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
+ score |= AGE_MATCH;
+
+ if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
+ return INTEL_PPAT_PERFECT_MATCH;
+
+ return score;
+}
+
+static unsigned int chv_private_pat_match(u8 src, u8 dst)
+{
+ return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
+ INTEL_PPAT_PERFECT_MATCH : 0;
+}
+
+static void cnl_setup_private_ppat(struct intel_ppat *ppat)
{
+ ppat->max_entries = 8;
+ ppat->update_hw = cnl_private_pat_update_hw;
+ ppat->match = bdw_private_pat_match;
+ ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
+
/* XXX: spec is unclear if this is still needed for CNL+ */
- if (!USES_PPGTT(dev_priv)) {
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
+ if (!USES_PPGTT(ppat->i915)) {
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
return;
}
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
- I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
+ __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void bdw_setup_private_ppat(struct intel_ppat *ppat)
{
- u64 pat;
-
- pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
- GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
- GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
- GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
- GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
- GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
- GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
- GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ ppat->max_entries = 8;
+ ppat->update_hw = bdw_private_pat_update_hw;
+ ppat->match = bdw_private_pat_match;
+ ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- if (!USES_PPGTT(dev_priv))
+ if (!USES_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -2783,17 +3211,26 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
* So we can still hold onto all our assumptions wrt cpu
* clflushing on LLC machines.
*/
- pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
+ return;
+ }
- /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
- * write would work. */
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
+ __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
+ __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
+ __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
+ __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
-static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void chv_setup_private_ppat(struct intel_ppat *ppat)
{
- u64 pat;
+ ppat->max_entries = 8;
+ ppat->update_hw = bdw_private_pat_update_hw;
+ ppat->match = chv_private_pat_match;
+ ppat->clear_value = CHV_PPAT_SNOOP;
/*
* Map WB on BDW to snooped on CHV.
@@ -2813,17 +3250,15 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
- pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
- GEN8_PPAT(1, 0) |
- GEN8_PPAT(2, 0) |
- GEN8_PPAT(3, 0) |
- GEN8_PPAT(4, CHV_PPAT_SNOOP) |
- GEN8_PPAT(5, CHV_PPAT_SNOOP) |
- GEN8_PPAT(6, CHV_PPAT_SNOOP) |
- GEN8_PPAT(7, CHV_PPAT_SNOOP);
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+ __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 1, 0);
+ __alloc_ppat_entry(ppat, 2, 0);
+ __alloc_ppat_entry(ppat, 3, 0);
+ __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -2834,6 +3269,31 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
+static void setup_private_pat(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ int i;
+
+ ppat->i915 = dev_priv;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ cnl_setup_private_ppat(ppat);
+ else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
+ chv_setup_private_ppat(ppat);
+ else
+ bdw_setup_private_ppat(ppat);
+
+ GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
+
+ for_each_clear_bit(i, ppat->used, ppat->max_entries) {
+ ppat->entries[i].value = ppat->clear_value;
+ ppat->entries[i].ppat = ppat;
+ set_bit(i, ppat->dirty);
+ }
+
+ ppat->update_hw(dev_priv);
+}
+
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
@@ -2866,17 +3326,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
}
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
-
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
-
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.insert_page = gen8_ggtt_insert_page;
ggtt->base.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
@@ -2894,6 +3348,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->invalidate = gen6_ggtt_invalidate;
+ setup_private_pat(dev_priv);
+
return ggtt_probe_common(ggtt, size);
}
@@ -2933,6 +3389,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -2978,6 +3436,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.clear_range = i915_ggtt_clear_range;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
@@ -3014,7 +3474,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* currently don't have any bits spare to pass in this upper
* restriction!
*/
- if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
+ if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
}
@@ -3127,8 +3587,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_link) {
+ list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@@ -3151,13 +3610,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ bitmap_set(ppat->dirty, 0, ppat->max_entries);
+ dev_priv->ppat.update_hw(dev_priv);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b4e3aa7c0ce1..93211a96fdad 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,7 +42,13 @@
#include "i915_gem_request.h"
#include "i915_selftest.h"
-#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_PAGE_SIZE_4K BIT(12)
+#define I915_GTT_PAGE_SIZE_64K BIT(16)
+#define I915_GTT_PAGE_SIZE_2M BIT(21)
+
+#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
+#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1
@@ -126,13 +132,13 @@ typedef u64 gen8_ppgtt_pml4e_t;
* tables */
#define GEN8_PDPE_MASK 0x1ff
-#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
-#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
-#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
-#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
+#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
+#define PPAT_CACHED_PDE 0 /* WB LLC */
+#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
+#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
#define CHV_PPAT_SNOOP (1<<6)
-#define GEN8_PPAT_AGE(x) (x<<4)
+#define GEN8_PPAT_AGE(x) ((x)<<4)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLC (1<<2)
@@ -143,6 +149,14 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
+#define GEN8_PPAT_GET_CA(x) ((x) & 3)
+#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
+#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
+#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
+
+#define GEN8_PDE_IPS_64K BIT(11)
+#define GEN8_PDE_PS_2M BIT(7)
+
struct sg_table;
struct intel_rotation_info {
@@ -202,6 +216,7 @@ struct i915_vma;
struct i915_page_dma {
struct page *page;
+ int order;
union {
dma_addr_t daddr;
@@ -324,6 +339,8 @@ struct i915_address_space {
int (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
+ int (*set_pages)(struct i915_vma *vma);
+ void (*clear_pages)(struct i915_vma *vma);
I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
};
@@ -336,6 +353,12 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
return (vm->total - 1) >> 32;
}
+static inline bool
+i915_vm_has_scratch_64K(struct i915_address_space *vm)
+{
+ return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
+}
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -536,6 +559,37 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
return container_of(vm, struct i915_ggtt, base);
}
+#define INTEL_MAX_PPAT_ENTRIES 8
+#define INTEL_PPAT_PERFECT_MATCH (~0U)
+
+struct intel_ppat;
+
+struct intel_ppat_entry {
+ struct intel_ppat *ppat;
+ struct kref ref;
+ u8 value;
+};
+
+struct intel_ppat {
+ struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
+ DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
+ DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
+ unsigned int max_entries;
+ u8 clear_value;
+ /*
+ * Return a score to show how two PPAT values match,
+ * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
+ */
+ unsigned int (*match)(u8 src, u8 dst);
+ void (*update_hw)(struct drm_i915_private *i915);
+
+ struct drm_i915_private *i915;
+};
+
+const struct intel_ppat_entry *
+intel_ppat_get(struct drm_i915_private *i915, u8 value);
+void intel_ppat_put(const struct intel_ppat_entry *entry);
+
int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index c1f64ddaf8aa..ee83ec838ee7 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -44,12 +44,12 @@ static void internal_free_pages(struct sg_table *st)
kfree(st);
}
-static struct sg_table *
-i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
+ unsigned int sg_page_sizes;
unsigned int npages;
int max_order;
gfp_t gfp;
@@ -78,16 +78,17 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
create_st:
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
sg = st->sgl;
st->nents = 0;
+ sg_page_sizes = 0;
do {
int order = min(fls(npages) - 1, max_order);
@@ -105,6 +106,7 @@ create_st:
} while (1);
sg_set_page(sg, page, PAGE_SIZE << order, 0);
+ sg_page_sizes |= PAGE_SIZE << order;
st->nents++;
npages -= 1 << order;
@@ -132,13 +134,17 @@ create_st:
* object are only valid whilst active and pinned.
*/
obj->mm.madv = I915_MADV_DONTNEED;
- return st;
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
err:
sg_set_page(sg, NULL, 0, 0);
sg_mark_end(sg);
internal_free_pages(st);
- return ERR_PTR(-ENOMEM);
+
+ return -ENOMEM;
}
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index c30d8f808185..63ce38c1cce9 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -69,7 +69,7 @@ struct drm_i915_gem_object_ops {
* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
- struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+ int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*pwrite)(struct drm_i915_gem_object *,
@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
- struct list_head global_link;
union {
struct rcu_head rcu;
struct llist_node freed;
@@ -123,6 +122,7 @@ struct drm_i915_gem_object {
/**
* Whether the object is currently in the GGTT mmap.
*/
+ unsigned int userfault_count;
struct list_head userfault_link;
struct list_head batch_pool_link;
@@ -160,7 +160,8 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
unsigned int bind_count;
unsigned int active_count;
- unsigned int pin_display;
+ /** Count of how many global VMA are currently pinned for use by HW */
+ unsigned int pin_global;
struct {
struct mutex lock; /* protects the pages and their use */
@@ -169,6 +170,35 @@ struct drm_i915_gem_object {
struct sg_table *pages;
void *mapping;
+ /* TODO: whack some of this into the error state */
+ struct i915_page_sizes {
+ /**
+ * The sg mask of the pages sg_table. i.e the mask of
+ * of the lengths for each sg entry.
+ */
+ unsigned int phys;
+
+ /**
+ * The gtt page sizes we are allowed to use given the
+ * sg mask and the supported page sizes. This will
+ * express the smallest unit we can use for the whole
+ * object, as well as the larger sizes we may be able
+ * to use opportunistically.
+ */
+ unsigned int sg;
+
+ /**
+ * The actual gtt page size usage. Since we can have
+ * multiple vma associated with this object we need to
+ * prevent any trampling of state, hence a copy of this
+ * struct also lives in each vma, therefore the gtt
+ * value here should only be read/write through the vma.
+ */
+ unsigned int gtt;
+ } page_sizes;
+
+ I915_SELFTEST_DECLARE(unsigned int page_mask);
+
struct i915_gem_object_page_iter {
struct scatterlist *sg_pos;
unsigned int sg_idx; /* in pages, but 32bit eek! */
@@ -178,6 +208,12 @@ struct drm_i915_gem_object {
} get_page;
/**
+ * Element within i915->mm.unbound_list or i915->mm.bound_list,
+ * locked by i915->mm.obj_lock.
+ */
+ struct list_head link;
+
+ /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 4dd4c2159a92..3703dc91eeda 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
return 0;
/* Recreate the page after shrinking */
- if (!so->vma->obj->mm.pages)
+ if (!i915_gem_object_has_pages(so->vma->obj))
so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 813a3b546d6e..d140fcf5c6a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -186,7 +186,7 @@ i915_priotree_init(struct i915_priotree *pt)
INIT_LIST_HEAD(&pt->signalers_list);
INIT_LIST_HEAD(&pt->waiters_list);
INIT_LIST_HEAD(&pt->link);
- pt->priority = INT_MIN;
+ pt->priority = I915_PRIORITY_INVALID;
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
spin_lock_irq(&request->lock);
if (request->waitboost)
- atomic_dec(&request->i915->rps.num_waiters);
+ atomic_dec(&request->i915->gt_pm.rps.num_waiters);
dma_fence_signal_locked(&request->fence);
spin_unlock_irq(&request->lock);
@@ -556,7 +556,16 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
switch (state) {
case FENCE_COMPLETE:
trace_i915_gem_request_submit(request);
+ /*
+ * We need to serialize use of the submit_request() callback with its
+ * hotplugging performed during an emergency i915_gem_set_wedged().
+ * We use the RCU mechanism to mark the critical section in order to
+ * force i915_gem_set_wedged() to wait until the submit_request() is
+ * completed before proceeding.
+ */
+ rcu_read_lock();
request->engine->submit_request(request);
+ rcu_read_unlock();
break;
case FENCE_FREE:
@@ -587,6 +596,13 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ /*
+ * Preempt contexts are reserved for exclusive use to inject a
+ * preemption context switch. They are never to be used for any trivial
+ * request!
+ */
+ GEM_BUG_ON(ctx == dev_priv->preempt_context);
+
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
@@ -1021,12 +1037,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
-bool __i915_spin_request(const struct drm_i915_gem_request *req,
- u32 seqno, int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct drm_i915_gem_request *req,
+ u32 seqno, int state, unsigned long timeout_us)
{
struct intel_engine_cs *engine = req->engine;
unsigned int irq, cpu;
+ GEM_BUG_ON(!seqno);
+
+ /*
+ * Only wait for the request if we know it is likely to complete.
+ *
+ * We don't track the timestamps around requests, nor the average
+ * request length, so we do not have a good indicator that this
+ * request will complete within the timeout. What we do know is the
+ * order in which requests are executed by the engine and so we can
+ * tell if the request has started. If the request hasn't started yet,
+ * it is a fair assumption that it will not complete within our
+ * relatively short timeout.
+ */
+ if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
+ return false;
+
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
@@ -1040,12 +1072,8 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
irq = atomic_read(&engine->irq_count);
timeout_us += local_clock_us(&cpu);
do {
- if (seqno != i915_gem_request_global_seqno(req))
- break;
-
- if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
- seqno))
- return true;
+ if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
+ return seqno == i915_gem_request_global_seqno(req);
/* Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
@@ -1156,7 +1184,7 @@ restart:
GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
/* Optimistic short spin before touching IRQs */
- if (i915_spin_request(req, state, 5))
+ if (__i915_spin_request(req, wait.seqno, state, 5))
goto complete;
set_current_state(state);
@@ -1213,7 +1241,7 @@ wakeup:
continue;
/* Only spin if we know the GPU is processing this request */
- if (i915_spin_request(req, state, 2))
+ if (__i915_spin_request(req, wait.seqno, state, 2))
break;
if (!intel_wait_check_request(&wait, req)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 49a4c8994ff0..26249f39de67 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -30,6 +30,8 @@
#include "i915_gem.h"
#include "i915_sw_fence.h"
+#include <uapi/drm/i915_drm.h>
+
struct drm_file;
struct drm_i915_gem_object;
struct drm_i915_gem_request;
@@ -69,9 +71,14 @@ struct i915_priotree {
struct list_head waiters_list; /* those after us, they depend upon us */
struct list_head link;
int priority;
-#define I915_PRIORITY_MAX 1024
-#define I915_PRIORITY_NORMAL 0
-#define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
+};
+
+enum {
+ I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+ I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+ I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ I915_PRIORITY_INVALID = INT_MIN
};
struct i915_gem_capture_list {
@@ -313,26 +320,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- seqno - 1);
-}
-
-static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
-{
- u32 seqno;
-
- seqno = i915_gem_request_global_seqno(req);
- if (!seqno)
- return false;
-
- return __i915_gem_request_started(req, seqno);
-}
-
-static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
{
GEM_BUG_ON(!seqno);
@@ -352,21 +339,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
return __i915_gem_request_completed(req, seqno);
}
-bool __i915_spin_request(const struct drm_i915_gem_request *request,
- u32 seqno, int state, unsigned long timeout_us);
-static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
- int state, unsigned long timeout_us)
-{
- u32 seqno;
-
- seqno = i915_gem_request_global_seqno(request);
- if (!seqno)
- return 0;
-
- return (__i915_gem_request_started(request, seqno) &&
- __i915_spin_request(request, seqno, state, timeout_us));
-}
-
/* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 74002b2d1b6f..3770e3323fc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
mutex_unlock(&dev_priv->drm.struct_mutex);
}
-static bool any_vma_pinned(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- /* Only GGTT vma may be permanently pinned, and are always
- * at the start of the list. We can stop hunting as soon
- * as we see a ppGTT vma.
- */
- if (!i915_vma_is_ggtt(vma))
- break;
-
- if (i915_vma_is_pinned(vma))
- return true;
- }
-
- return false;
-}
-
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@@ -97,9 +78,6 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
- if (!obj->mm.pages)
- return false;
-
/* Consider only shrinkable ojects. */
if (!i915_gem_object_is_shrinkable(obj))
return false;
@@ -115,7 +93,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false;
- if (any_vma_pinned(obj))
+ /* If any vma are "permanently" pinned, it will prevent us from
+ * reclaiming the obj->mm.pages. We only allow scanout objects to claim
+ * a permanent pin, along with a few others like the context objects.
+ * To simplify the scan, and to avoid walking the list of vma under the
+ * object, we just check the count of its permanently pinned.
+ */
+ if (READ_ONCE(obj->pin_global))
return false;
/* We can only return physical pages to the system if we can either
@@ -129,7 +113,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
- return !READ_ONCE(obj->mm.pages);
+ return !i915_gem_object_has_pages(obj);
}
/**
@@ -178,6 +162,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!shrinker_lock(dev_priv, &unlock))
return 0;
+ /*
+ * When shrinking the active list, also consider active contexts.
+ * Active contexts are pinned until they are retired, and so can
+ * not be simply unbound to retire and unpin their pages. To shrink
+ * the contexts, we must wait until the gpu is idle.
+ *
+ * We don't care about errors here; if we cannot wait upon the GPU,
+ * we will free as much as we can and hope to get a second chance.
+ */
+ if (flags & I915_SHRINK_ACTIVE)
+ i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+
trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv);
@@ -217,15 +213,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
continue;
INIT_LIST_HEAD(&still_in_list);
+
+ /*
+ * We serialize our access to unreferenced objects through
+ * the use of the struct_mutex. While the objects are not
+ * yet freed (due to RCU then a workqueue) we still want
+ * to be able to shrink their pages, so they remain on
+ * the unbound/bound list until actually freed.
+ */
+ spin_lock(&dev_priv->mm.obj_lock);
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- global_link))) {
- list_move_tail(&obj->global_link, &still_in_list);
- if (!obj->mm.pages) {
- list_del_init(&obj->global_link);
- continue;
- }
+ mm.link))) {
+ list_move_tail(&obj->mm.link, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE &&
obj->mm.madv != I915_MADV_DONTNEED)
@@ -243,20 +244,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj))
continue;
+ spin_unlock(&dev_priv->mm.obj_lock);
+
if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
- if (!obj->mm.pages) {
+ if (!i915_gem_object_has_pages(obj)) {
__i915_gem_object_invalidate(obj);
- list_del_init(&obj->global_link);
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
- scanned += obj->base.size >> PAGE_SHIFT;
}
+ scanned += obj->base.size >> PAGE_SHIFT;
+
+ spin_lock(&dev_priv->mm.obj_lock);
}
list_splice_tail(&still_in_list, phase->list);
+ spin_unlock(&dev_priv->mm.obj_lock);
}
if (flags & I915_SHRINK_BOUND)
@@ -302,28 +307,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_i915_gem_object *obj;
- unsigned long count;
- bool unlock;
-
- if (!shrinker_lock(dev_priv, &unlock))
- return 0;
-
- i915_gem_retire_requests(dev_priv);
+ unsigned long num_objects = 0;
+ unsigned long count = 0;
- count = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
- if (can_release_pages(obj))
+ spin_lock(&i915->mm.obj_lock);
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+ if (can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
+ num_objects++;
+ }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
- if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+ if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
- }
+ num_objects++;
+ }
+ spin_unlock(&i915->mm.obj_lock);
- shrinker_unlock(dev_priv, unlock);
+ /* Update our preferred vmscan batch size for the next pass.
+ * Our rough guess for an effective batch size is roughly 2
+ * available GEM objects worth of pages. That is we don't want
+ * the shrinker to fire, until it is worth the cost of freeing an
+ * entire GEM object.
+ */
+ if (num_objects) {
+ unsigned long avg = 2 * count / num_objects;
+
+ i915->mm.shrinker.batch =
+ max((i915->mm.shrinker.batch + avg) >> 1,
+ 128ul /* default SHRINK_BATCH */);
+ }
return count;
}
@@ -400,10 +416,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
- bool unlock;
-
- if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
- return NOTIFY_DONE;
freed_pages = i915_gem_shrink_all(dev_priv);
@@ -412,26 +424,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware.
*/
unbound = bound = unevictable = 0;
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
- if (!obj->mm.pages)
- continue;
-
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size >> PAGE_SHIFT;
}
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
- if (!obj->mm.pages)
- continue;
-
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
bound += obj->base.size >> PAGE_SHIFT;
}
-
- shrinker_unlock(dev_priv, unlock);
+ spin_unlock(&dev_priv->mm.obj_lock);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, "
@@ -498,6 +504,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ dev_priv->mm.shrinker.batch = 4096;
WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 507c9f0d8df1..03e7abc7e043 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -539,12 +539,18 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st;
}
-static struct sg_table *
-i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
- return i915_pages_create_for_stolen(obj->base.dev,
- obj->stolen->start,
- obj->stolen->size);
+ struct sg_table *pages =
+ i915_pages_create_for_stolen(obj->base.dev,
+ obj->stolen->start,
+ obj->stolen->size);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
+
+ return 0;
}
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
@@ -718,8 +724,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
- list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++;
+ spin_unlock(&dev_priv->mm.obj_lock);
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index fb5231f98c0d..1294cf695df0 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* due to the change in swizzling.
*/
mutex_lock(&obj->mm.lock);
- if (obj->mm.pages &&
+ if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 709efe2357ea..135fc750a837 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -82,11 +82,11 @@ static void cancel_userptr(struct work_struct *work)
/* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- WARN_ONCE(obj->mm.pages,
- "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+ WARN_ONCE(i915_gem_object_has_pages(obj),
+ "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
obj->bind_count,
atomic_read(&obj->mm.pages_pin_count),
- obj->pin_display);
+ obj->pin_global);
mutex_unlock(&obj->base.dev->struct_mutex);
@@ -164,7 +164,6 @@ static struct i915_mmu_notifier *
i915_mmu_notifier_create(struct mm_struct *mm)
{
struct i915_mmu_notifier *mn;
- int ret;
mn = kmalloc(sizeof(*mn), GFP_KERNEL);
if (mn == NULL)
@@ -179,14 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
return ERR_PTR(-ENOMEM);
}
- /* Protected by mmap_sem (write-lock) */
- ret = __mmu_notifier_register(&mn->mn, mm);
- if (ret) {
- destroy_workqueue(mn->wq);
- kfree(mn);
- return ERR_PTR(ret);
- }
-
return mn;
}
@@ -210,23 +201,42 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
- struct i915_mmu_notifier *mn = mm->mn;
+ struct i915_mmu_notifier *mn;
+ int err = 0;
mn = mm->mn;
if (mn)
return mn;
+ mn = i915_mmu_notifier_create(mm->mm);
+ if (IS_ERR(mn))
+ err = PTR_ERR(mn);
+
down_write(&mm->mm->mmap_sem);
mutex_lock(&mm->i915->mm_lock);
- if ((mn = mm->mn) == NULL) {
- mn = i915_mmu_notifier_create(mm->mm);
- if (!IS_ERR(mn))
- mm->mn = mn;
+ if (mm->mn == NULL && !err) {
+ /* Protected by mmap_sem (write-lock) */
+ err = __mmu_notifier_register(&mn->mn, mm->mm);
+ if (!err) {
+ /* Protected by mm_lock */
+ mm->mn = fetch_and_zero(&mn);
+ }
+ } else if (mm->mn) {
+ /*
+ * Someone else raced and successfully installed the mmu
+ * notifier, we can cancel our own errors.
+ */
+ err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
- return mn;
+ if (mn && !IS_ERR(mn)) {
+ destroy_workqueue(mn->wq);
+ kfree(mn);
+ }
+
+ return err ? ERR_PTR(err) : mm->mn;
}
static int
@@ -399,64 +409,47 @@ struct get_pages_work {
struct task_struct *task;
};
-#if IS_ENABLED(CONFIG_SWIOTLB)
-#define swiotlb_active() swiotlb_nr_tbl()
-#else
-#define swiotlb_active() 0
-#endif
-
-static int
-st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
-{
- struct scatterlist *sg;
- int ret, n;
-
- *st = kmalloc(sizeof(**st), GFP_KERNEL);
- if (*st == NULL)
- return -ENOMEM;
-
- if (swiotlb_active()) {
- ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
- if (ret)
- goto err;
-
- for_each_sg((*st)->sgl, sg, num_pages, n)
- sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
- } else {
- ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
- 0, num_pages << PAGE_SHIFT,
- GFP_KERNEL);
- if (ret)
- goto err;
- }
-
- return 0;
-
-err:
- kfree(*st);
- *st = NULL;
- return ret;
-}
-
static struct sg_table *
-__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
- struct page **pvec, int num_pages)
+__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
+ struct page **pvec, int num_pages)
{
- struct sg_table *pages;
+ unsigned int max_segment = i915_sg_segment_size();
+ struct sg_table *st;
+ unsigned int sg_page_sizes;
int ret;
- ret = st_set_pages(&pages, pvec, num_pages);
- if (ret)
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+alloc_table:
+ ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
+ 0, num_pages << PAGE_SHIFT,
+ max_segment,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(st);
return ERR_PTR(ret);
+ }
- ret = i915_gem_gtt_prepare_pages(obj, pages);
+ ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
- sg_free_table(pages);
- kfree(pages);
+ sg_free_table(st);
+
+ if (max_segment > PAGE_SIZE) {
+ max_segment = PAGE_SIZE;
+ goto alloc_table;
+ }
+
+ kfree(st);
return ERR_PTR(ret);
}
- return pages;
+ sg_page_sizes = i915_sg_page_sizes(st->sgl);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return st;
}
static int
@@ -540,9 +533,9 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct sg_table *pages = ERR_PTR(ret);
if (pinned == npages) {
- pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+ pages = __i915_gem_userptr_alloc_pages(obj, pvec,
+ npages);
if (!IS_ERR(pages)) {
- __i915_gem_object_set_pages(obj, pages);
pinned = 0;
pages = NULL;
}
@@ -554,7 +547,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -603,8 +596,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
return ERR_PTR(-EAGAIN);
}
-static struct sg_table *
-i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
+static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct mm_struct *mm = obj->userptr.mm->mm;
@@ -633,9 +625,9 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) {
/* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work))
- return ERR_CAST(obj->userptr.work);
+ return PTR_ERR(obj->userptr.work);
else
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
pvec = NULL;
@@ -661,17 +653,17 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pages = __i915_gem_userptr_get_pages_schedule(obj);
active = pages == ERR_PTR(-EAGAIN);
} else {
- pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+ pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
active = !IS_ERR(pages);
}
if (active)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
- return pages;
+ return PTR_ERR_OR_ZERO(pages);
}
static void
@@ -834,7 +826,9 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
hash_init(dev_priv->mm_structs);
dev_priv->mm.userptr_wq =
- alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
+ alloc_workqueue("i915-userptr-acquire",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ 0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
new file mode 100644
index 000000000000..e2993857df37
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gemfs.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+
+#include "i915_drv.h"
+#include "i915_gemfs.h"
+
+int i915_gemfs_init(struct drm_i915_private *i915)
+{
+ struct file_system_type *type;
+ struct vfsmount *gemfs;
+
+ type = get_fs_type("tmpfs");
+ if (!type)
+ return -ENODEV;
+
+ gemfs = kern_mount(type);
+ if (IS_ERR(gemfs))
+ return PTR_ERR(gemfs);
+
+ /*
+ * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
+ * likely 2M. Note that within_size may overallocate huge-pages, if say
+ * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
+ * memory pressure shmem should split any huge-pages which can be
+ * shrunk.
+ */
+
+ if (has_transparent_hugepage()) {
+ struct super_block *sb = gemfs->mnt_sb;
+ char options[] = "huge=within_size";
+ int flags = 0;
+ int err;
+
+ err = sb->s_op->remount_fs(sb, &flags, options);
+ if (err) {
+ kern_unmount(gemfs);
+ return err;
+ }
+ }
+
+ i915->mm.gemfs = gemfs;
+
+ return 0;
+}
+
+void i915_gemfs_fini(struct drm_i915_private *i915)
+{
+ kern_unmount(i915->mm.gemfs);
+}
diff --git a/drivers/gpu/drm/i915/i915_gemfs.h b/drivers/gpu/drm/i915/i915_gemfs.h
new file mode 100644
index 000000000000..cca8bdc5b93e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gemfs.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEMFS_H__
+#define __I915_GEMFS_H__
+
+struct drm_i915_private;
+
+int i915_gemfs_init(struct drm_i915_private *i915);
+
+void i915_gemfs_fini(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0c779671fe2d..653fb69e7ecb 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -377,9 +377,9 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
prefix, erq->pid, erq->ban_score,
- erq->context, erq->seqno,
+ erq->context, erq->seqno, erq->priority,
jiffies_to_msecs(jiffies - erq->jiffies),
erq->head, erq->tail);
}
@@ -388,14 +388,16 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
+ err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
- ctx->ban_score, ctx->guilty, ctx->active);
+ ctx->priority, ctx->ban_score, ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
+ int n;
+
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
@@ -465,8 +467,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
err_printf(m, " engine reset count: %u\n", ee->reset_count);
- error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
- error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
+ for (n = 0; n < ee->num_ports; n++) {
+ err_printf(m, " ELSP[%d]:", n);
+ error_print_request(m, " ", &ee->execlist[n]);
+ }
+
error_print_context(m, " Active context: ", &ee->context);
}
@@ -567,7 +572,7 @@ static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
static void err_print_params(struct drm_i915_error_state_buf *m,
const struct i915_params *p)
{
-#define PRINT(T, x) err_print_param(m, #x, #T, &p->x);
+#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
@@ -861,7 +866,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
-#define FREE(T, x) free_param(#T, &error->params.x);
+#define FREE(T, x, ...) free_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
@@ -1266,6 +1271,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
+ erq->priority = request->priotree.priority;
erq->ban_score = atomic_read(&request->ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
@@ -1327,17 +1333,19 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
- const struct execlist_port *port = engine->execlist_port;
+ const struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned int n;
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
- struct drm_i915_gem_request *rq = port_request(&port[n]);
+ for (n = 0; n < execlists_num_ports(execlists); n++) {
+ struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
if (!rq)
break;
record_request(rq, &ee->execlist[n]);
}
+
+ ee->num_ports = n;
}
static void record_context(struct drm_i915_error_context *e,
@@ -1357,6 +1365,7 @@ static void record_context(struct drm_i915_error_context *e,
e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
+ e->priority = ctx->priority;
e->ban_score = atomic_read(&ctx->ban_score);
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
@@ -1554,7 +1563,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
{
/* Capturing log buf contents won't be useful if logging was disabled */
- if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
+ if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
return;
error->guc_log = i915_error_object_create(dev_priv,
@@ -1665,8 +1674,8 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
{
error->awake = dev_priv->gt.awake;
- error->wakelock = atomic_read(&dev_priv->pm.wakeref_count);
- error->suspended = dev_priv->pm.suspended;
+ error->wakelock = atomic_read(&dev_priv->runtime_pm.wakeref_count);
+ error->suspended = dev_priv->runtime_pm.suspended;
error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
@@ -1696,8 +1705,8 @@ static int capture(void *data)
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
- error->params = i915;
-#define DUP(T, x) dup_param(#T, &error->params.x);
+ error->params = i915_modparams;
+#define DUP(T, x, ...) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
@@ -1751,7 +1760,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error;
unsigned long flags;
- if (!i915.error_capture)
+ if (!i915_modparams.error_capture)
return;
if (READ_ONCE(dev_priv->gpu_error.first_error))
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 48a1e9349a2c..f84c267728fd 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -21,12 +21,13 @@
* IN THE SOFTWARE.
*
*/
-#include <linux/circ_buf.h>
-#include "i915_drv.h"
-#include "intel_uc.h"
+#include <linux/circ_buf.h>
#include <trace/events/dma_fence.h>
+#include "i915_guc_submission.h"
+#include "i915_drv.h"
+
/**
* DOC: GuC-based command submission
*
@@ -192,13 +193,12 @@ static int __create_doorbell(struct i915_guc_client *client)
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
- doorbell->cookie = client->doorbell_cookie;
+ doorbell->cookie = 0;
err = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (err) {
+ if (err)
doorbell->db_status = GUC_DOORBELL_DISABLED;
- doorbell->cookie = 0;
- }
+
return err;
}
@@ -306,7 +306,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->db_base_addr = 0;
desc->stage_id = client->stage_id;
- desc->wq_size_bytes = client->wq_size;
+ desc->wq_size_bytes = GUC_WQ_SIZE;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
}
@@ -338,7 +338,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id];
- uint32_t guc_engine_id = engine->guc_id;
+ u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
/* TODO: We have a design issue to be solved here. Only when we
@@ -388,13 +388,13 @@ static void guc_stage_desc_init(struct intel_guc *guc,
gfx_addr = guc_ggtt_offset(client->vma);
desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
+ desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
desc->process_desc = gfx_addr + client->proc_desc_offset;
- desc->wq_addr = gfx_addr + client->wq_offset;
- desc->wq_size = client->wq_size;
+ desc->wq_addr = gfx_addr + GUC_DB_SIZE;
+ desc->wq_size = GUC_WQ_SIZE;
- desc->desc_private = (uintptr_t)client;
+ desc->desc_private = ptr_to_u64(client);
}
static void guc_stage_desc_fini(struct intel_guc *guc,
@@ -406,82 +406,23 @@ static void guc_stage_desc_fini(struct intel_guc *guc,
memset(desc, 0, sizeof(*desc));
}
-/**
- * i915_guc_wq_reserve() - reserve space in the GuC's workqueue
- * @request: request associated with the commands
- *
- * Return: 0 if space is available
- * -EAGAIN if space is not currently available
- *
- * This function must be called (and must return 0) before a request
- * is submitted to the GuC via i915_guc_submit() below. Once a result
- * of 0 has been returned, it must be balanced by a corresponding
- * call to submit().
- *
- * Reservation allows the caller to determine in advance that space
- * will be available for the next submission before committing resources
- * to it, and helps avoid late failures with complicated recovery paths.
- */
-int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
-{
- const size_t wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *client = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = __get_process_desc(client);
- u32 freespace;
- int ret;
-
- spin_lock_irq(&client->wq_lock);
- freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
- freespace -= client->wq_rsvd;
- if (likely(freespace >= wqi_size)) {
- client->wq_rsvd += wqi_size;
- ret = 0;
- } else {
- client->no_wq_space++;
- ret = -EAGAIN;
- }
- spin_unlock_irq(&client->wq_lock);
-
- return ret;
-}
-
-static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&client->wq_lock, flags);
- client->wq_rsvd += size;
- spin_unlock_irqrestore(&client->wq_lock, flags);
-}
-
-void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
-{
- const int wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *client = request->i915->guc.execbuf_client;
-
- GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
- guc_client_update_wq_rsvd(client, -wqi_size);
-}
-
/* Construct a Work Item and append it to the GuC's Work Queue */
static void guc_wq_item_append(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
- const u32 wqi_len = wqi_size/sizeof(u32) - 1;
+ const u32 wqi_len = wqi_size / sizeof(u32) - 1;
struct intel_engine_cs *engine = rq->engine;
+ struct i915_gem_context *ctx = rq->ctx;
struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
- u32 freespace, tail, wq_off;
+ u32 ring_tail, wq_off;
- /* Free space is guaranteed, see i915_guc_wq_reserve() above */
- freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
- GEM_BUG_ON(freespace < wqi_size);
+ lockdep_assert_held(&client->wq_lock);
- /* The GuC firmware wants the tail index in QWords, not bytes */
- tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
- GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
+ ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
+ GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -491,29 +432,29 @@ static void guc_wq_item_append(struct i915_guc_client *client,
* workqueue buffer dw by dw.
*/
BUILD_BUG_ON(wqi_size != 16);
- GEM_BUG_ON(client->wq_rsvd < wqi_size);
- /* postincrement WQ tail for next time */
- wq_off = client->wq_tail;
+ /* Free space is guaranteed. */
+ wq_off = READ_ONCE(desc->tail);
+ GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
+ GUC_WQ_SIZE) < wqi_size);
GEM_BUG_ON(wq_off & (wqi_size - 1));
- client->wq_tail += wqi_size;
- client->wq_tail &= client->wq_size - 1;
- client->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
wqi = client->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
- (wqi_len << WQ_LEN_SHIFT) |
- (engine->guc_id << WQ_TARGET_SHIFT) |
- WQ_NO_WCFLUSH_WAIT;
+ (wqi_len << WQ_LEN_SHIFT) |
+ (engine->guc_id << WQ_TARGET_SHIFT) |
+ WQ_NO_WCFLUSH_WAIT;
- /* The GuC wants only the low-order word of the context descriptor */
- wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
+ wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine));
- wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
+ wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->global_seqno;
+
+ /* Postincrement WQ tail for next time. */
+ WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
static void guc_reset_wq(struct i915_guc_client *client)
@@ -522,106 +463,64 @@ static void guc_reset_wq(struct i915_guc_client *client)
desc->head = 0;
desc->tail = 0;
-
- client->wq_tail = 0;
}
-static int guc_ring_doorbell(struct i915_guc_client *client)
+static void guc_ring_doorbell(struct i915_guc_client *client)
{
- struct guc_process_desc *desc = __get_process_desc(client);
- union guc_doorbell_qw db_cmp, db_exc, db_ret;
- union guc_doorbell_qw *db;
- int attempt = 2, ret = -EAGAIN;
+ struct guc_doorbell_info *db;
+ u32 cookie;
- /* Update the tail so it is visible to GuC */
- desc->tail = client->wq_tail;
-
- /* current cookie */
- db_cmp.db_status = GUC_DOORBELL_ENABLED;
- db_cmp.cookie = client->doorbell_cookie;
-
- /* cookie to be updated */
- db_exc.db_status = GUC_DOORBELL_ENABLED;
- db_exc.cookie = client->doorbell_cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
+ lockdep_assert_held(&client->wq_lock);
/* pointer of current doorbell cacheline */
- db = (union guc_doorbell_qw *)__get_doorbell(client);
-
- while (attempt--) {
- /* lets ring the doorbell */
- db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
- db_cmp.value_qw, db_exc.value_qw);
-
- /* if the exchange was successfully executed */
- if (db_ret.value_qw == db_cmp.value_qw) {
- /* db was successfully rung */
- client->doorbell_cookie = db_exc.cookie;
- ret = 0;
- break;
- }
-
- /* XXX: doorbell was lost and need to acquire it again */
- if (db_ret.db_status == GUC_DOORBELL_DISABLED)
- break;
+ db = __get_doorbell(client);
- DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
- db_cmp.cookie, db_ret.cookie);
+ /* we're not expecting the doorbell cookie to change behind our back */
+ cookie = READ_ONCE(db->cookie);
+ WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie);
- /* update the cookie to newly read cookie from GuC */
- db_cmp.cookie = db_ret.cookie;
- db_exc.cookie = db_ret.cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
- }
-
- return ret;
+ /* XXX: doorbell was lost and need to acquire it again */
+ GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
}
/**
- * __i915_guc_submit() - Submit commands through GuC
- * @rq: request associated with the commands
- *
- * The caller must have already called i915_guc_wq_reserve() above with
- * a result of 0 (success), guaranteeing that there is space in the work
- * queue for the new request, so enqueuing the item cannot fail.
- *
- * Bad Things Will Happen if the caller violates this protocol e.g. calls
- * submit() when _reserve() says there's no space, or calls _submit()
- * a different number of times from (successful) calls to _reserve().
+ * i915_guc_submit() - Submit commands through GuC
+ * @engine: engine associated with the commands
*
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-static void __i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = rq->i915;
- struct intel_engine_cs *engine = rq->engine;
- unsigned int engine_id = engine->id;
- struct intel_guc *guc = &rq->i915->guc;
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client;
- unsigned long flags;
- int b_ret;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ const unsigned int engine_id = engine->id;
+ unsigned int n;
- /* WA to flush out the pending GMADR writes to ring buffer. */
- if (i915_vma_is_map_and_fenceable(rq->ring->vma))
- POSTING_READ_FW(GUC_STATUS);
+ for (n = 0; n < execlists_num_ports(execlists); n++) {
+ struct drm_i915_gem_request *rq;
+ unsigned int count;
- spin_lock_irqsave(&client->wq_lock, flags);
+ rq = port_unpack(&port[n], &count);
+ if (rq && count == 0) {
+ port_set(&port[n], port_pack(rq, ++count));
- guc_wq_item_append(client, rq);
- b_ret = guc_ring_doorbell(client);
+ if (i915_vma_is_map_and_fenceable(rq->ring->vma))
+ POSTING_READ_FW(GUC_STATUS);
- client->submissions[engine_id] += 1;
+ spin_lock(&client->wq_lock);
- spin_unlock_irqrestore(&client->wq_lock, flags);
-}
+ guc_wq_item_append(client, rq);
+ guc_ring_doorbell(client);
-static void i915_guc_submit(struct drm_i915_gem_request *rq)
-{
- __i915_gem_request_submit(rq);
- __i915_guc_submit(rq);
+ client->submissions[engine_id] += 1;
+
+ spin_unlock(&client->wq_lock);
+ }
+ }
}
static void nested_enable_signaling(struct drm_i915_gem_request *rq)
@@ -655,27 +554,33 @@ static void port_assign(struct execlist_port *port,
if (port_isset(port))
i915_gem_request_put(port_request(port));
- port_set(port, i915_gem_request_get(rq));
+ port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
nested_enable_signaling(rq);
}
-static bool i915_guc_dequeue(struct intel_engine_cs *engine)
+static void i915_guc_dequeue(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlist_port;
- struct drm_i915_gem_request *last = port_request(port);
- struct rb_node *rb;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ struct drm_i915_gem_request *last = NULL;
+ const struct execlist_port * const last_port =
+ &execlists->port[execlists->port_mask];
bool submit = false;
+ struct rb_node *rb;
+
+ if (port_isset(port))
+ port++;
spin_lock_irq(&engine->timeline->lock);
- rb = engine->execlist_first;
- GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
+ rb = execlists->first;
+ GEM_BUG_ON(rb_first(&execlists->queue) != rb);
while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
- if (port != engine->execlist_port) {
+ if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -689,50 +594,51 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&rq->priotree.link);
rq->priotree.priority = INT_MAX;
- i915_guc_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, engine));
+ __i915_gem_request_submit(rq);
+ trace_i915_gem_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
rb = rb_next(rb);
- rb_erase(&p->node, &engine->execlist_queue);
+ rb_erase(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
done:
- engine->execlist_first = rb;
- if (submit)
+ execlists->first = rb;
+ if (submit) {
port_assign(port, last);
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
+ i915_guc_submit(engine);
+ }
spin_unlock_irq(&engine->timeline->lock);
-
- return submit;
}
static void i915_guc_irq_handler(unsigned long data)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ const struct execlist_port * const last_port =
+ &execlists->port[execlists->port_mask];
struct drm_i915_gem_request *rq;
- bool submit;
- do {
- rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
- trace_i915_gem_request_out(rq);
- i915_gem_request_put(rq);
+ rq = port_request(&port[0]);
+ while (rq && i915_gem_request_completed(rq)) {
+ trace_i915_gem_request_out(rq);
+ i915_gem_request_put(rq);
- port[0] = port[1];
- memset(&port[1], 0, sizeof(port[1]));
+ execlists_port_complete(execlists, port);
- rq = port_request(&port[0]);
- }
+ rq = port_request(&port[0]);
+ }
+ if (!rq)
+ execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
- submit = false;
- if (!port_count(&port[1]))
- submit = i915_guc_dequeue(engine);
- } while (submit);
+ if (!port_isset(last_port))
+ i915_guc_dequeue(engine);
}
/*
@@ -741,48 +647,6 @@ static void i915_guc_irq_handler(unsigned long data)
* path of i915_guc_submit() above.
*/
-/**
- * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
- * @guc: the guc
- * @size: size of area to allocate (both virtual space and memory)
- *
- * This is a wrapper to create an object for use with the GuC. In order to
- * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
- * both some backing storage and a range inside the Global GTT. We must pin
- * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
- * range is reserved inside GuC.
- *
- * Return: A i915_vma if successful, otherwise an ERR_PTR.
- */
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- obj = i915_gem_object_create(dev_priv, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
- if (IS_ERR(vma))
- goto err;
-
- ret = i915_vma_pin(vma, 0, PAGE_SIZE,
- PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return vma;
-}
-
/* Check that a doorbell register is in the expected state */
static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
@@ -894,8 +758,8 @@ static int guc_init_doorbell_hw(struct intel_guc *guc)
*/
static struct i915_guc_client *
guc_client_alloc(struct drm_i915_private *dev_priv,
- uint32_t engines,
- uint32_t priority,
+ u32 engines,
+ u32 priority,
struct i915_gem_context *ctx)
{
struct i915_guc_client *client;
@@ -913,8 +777,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
client->engines = engines;
client->priority = priority;
client->doorbell_id = GUC_DOORBELL_INVALID;
- client->wq_offset = GUC_DB_SIZE;
- client->wq_size = GUC_WQ_SIZE;
spin_lock_init(&client->wq_lock);
ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
@@ -996,28 +858,39 @@ static void guc_client_free(struct i915_guc_client *client)
kfree(client);
}
+static void guc_policy_init(struct guc_policy *policy)
+{
+ policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
+ policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
+ policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
+ policy->policy_flags = 0;
+}
+
static void guc_policies_init(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
- policies->dpc_promote_time = 500000;
+ policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
- policy->execution_quantum = 1000000;
- policy->preemption_time = 500000;
- policy->fault_time = 250000;
- policy->policy_flags = 0;
+ guc_policy_init(policy);
}
}
policies->is_valid = 1;
}
+/*
+ * The first 80 dwords of the register state context, containing the
+ * execlists and ppgtt registers.
+ */
+#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
+
static int guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1032,6 +905,8 @@ static int guc_ads_create(struct intel_guc *guc)
} __packed *blob;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
+ const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
GEM_BUG_ON(guc->ads_vma);
@@ -1062,13 +937,20 @@ static int guc_ads_create(struct intel_guc *guc)
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
- * to find it.
+ * to find it. Note that we have to skip our header (1 page),
+ * because our GuC shared data is there.
*/
blob->ads.golden_context_lrca =
- dev_priv->engine[RCS]->status_page.ggtt_offset;
+ guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset;
+ /*
+ * The GuC expects us to exclude the portion of the context image that
+ * it skips from the size it is to read. It starts reading from after
+ * the execlist context (so skipping the first page [PPHWSP] and 80
+ * dwords). Weird guc is weird.
+ */
for_each_engine(engine, dev_priv, id)
- blob->ads.eng_state_size[engine->guc_id] = engine->context_size;
+ blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size;
base = guc_ggtt_offset(vma);
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
@@ -1149,6 +1031,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1185,12 +1068,13 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
* Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
* result in the register bit being left SET!
*/
- dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
- dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+ rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1209,8 +1093,8 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_VCS2_VCS1_IER, 0);
I915_WRITE(GUC_WD_VECS_IER, 0);
- dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
}
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
@@ -1221,6 +1105,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
int err;
+ /*
+ * We're using GuC work items for submitting work through GuC. Since
+ * we're coalescing multiple requests from a single context into a
+ * single work item prior to assigning it to execlist_port, we can
+ * never have more work items than the total number of ports (for all
+ * engines). The GuC firmware is controlling the HEAD of work queue,
+ * and it is guaranteed that it will remove the work item from the
+ * queue before our request is completed.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
+ sizeof(struct guc_wq_item) *
+ I915_NUM_ENGINES > GUC_WQ_SIZE);
+
if (!client) {
client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask,
@@ -1248,24 +1145,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
- const int wqi_size = sizeof(struct guc_wq_item);
- struct drm_i915_gem_request *rq;
-
+ struct intel_engine_execlists * const execlists = &engine->execlists;
/* The tasklet was initialised by execlists, and may be in
* a state of flux (across a reset) and so we just want to
* take over the callback without changing any other state
* in the tasklet.
*/
- engine->irq_tasklet.func = i915_guc_irq_handler;
+ execlists->irq_tasklet.func = i915_guc_irq_handler;
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-
- /* Replay the current set of previously submitted requests */
- spin_lock_irq(&engine->timeline->lock);
- list_for_each_entry(rq, &engine->timeline->requests, link) {
- guc_client_update_wq_rsvd(client, wqi_size);
- __i915_guc_submit(rq);
- }
- spin_unlock_irq(&engine->timeline->lock);
+ tasklet_schedule(&execlists->irq_tasklet);
}
return 0;
@@ -1288,55 +1176,3 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
guc_client_free(guc->execbuf_client);
guc->execbuf_client = NULL;
}
-
-/**
- * intel_guc_suspend() - notify GuC entering suspend state
- * @dev_priv: i915 device private
- */
-int intel_guc_suspend(struct drm_i915_private *dev_priv)
-{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
- u32 data[3];
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- gen9_disable_guc_interrupts(dev_priv);
-
- ctx = dev_priv->kernel_context;
-
- data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
- /* any value greater than GUC_POWER_D0 */
- data[1] = GUC_POWER_D1;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
-
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
-}
-
-/**
- * intel_guc_resume() - notify GuC resuming from suspend state
- * @dev_priv: i915 device private
- */
-int intel_guc_resume(struct drm_i915_private *dev_priv)
-{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_gem_context *ctx;
- u32 data[3];
-
- if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return 0;
-
- if (i915.guc_log_level >= 0)
- gen9_enable_guc_interrupts(dev_priv);
-
- ctx = dev_priv->kernel_context;
-
- data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
- data[1] = GUC_POWER_D0;
- /* first page is shared data with GuC */
- data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
-
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
-}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.h b/drivers/gpu/drm/i915/i915_guc_submission.h
new file mode 100644
index 000000000000..cb4353b59059
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_guc_submission.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_GUC_SUBMISSION_H_
+#define _I915_GUC_SUBMISSION_H_
+
+#include <linux/spinlock.h>
+
+#include "i915_gem.h"
+
+struct drm_i915_private;
+
+/*
+ * This structure primarily describes the GEM object shared with the GuC.
+ * The specs sometimes refer to this object as a "GuC context", but we use
+ * the term "client" to avoid confusion with hardware contexts. This
+ * GEM object is held for the entire lifetime of our interaction with
+ * the GuC, being allocated before the GuC is loaded with its firmware.
+ * Because there's no way to update the address used by the GuC after
+ * initialisation, the shared object must stay pinned into the GGTT as
+ * long as the GuC is in use. We also keep the first page (only) mapped
+ * into kernel address space, as it includes shared data that must be
+ * updated on every request submission.
+ *
+ * The single GEM object described here is actually made up of several
+ * separate areas, as far as the GuC is concerned. The first page (kept
+ * kmap'd) includes the "process descriptor" which holds sequence data for
+ * the doorbell, and one cacheline which actually *is* the doorbell; a
+ * write to this will "ring the doorbell" (i.e. send an interrupt to the
+ * GuC). The subsequent pages of the client object constitute the work
+ * queue (a circular array of work items), again described in the process
+ * descriptor. Work queue pages are mapped momentarily as required.
+ */
+struct i915_guc_client {
+ struct i915_vma *vma;
+ void *vaddr;
+ struct i915_gem_context *owner;
+ struct intel_guc *guc;
+
+ /* bitmap of (host) engine ids */
+ u32 engines;
+ u32 priority;
+ u32 stage_id;
+ u32 proc_desc_offset;
+
+ u16 doorbell_id;
+ unsigned long doorbell_offset;
+
+ spinlock_t wq_lock;
+ /* Per-engine counts of GuC submissions */
+ u64 submissions[I915_NUM_ENGINES];
+};
+
+int i915_guc_submission_init(struct drm_i915_private *dev_priv);
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b63893eeca73..f8205841868b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -126,7 +126,7 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)
-#define GEN5_IRQ_RESET(type) do { \
+#define GEN3_IRQ_RESET(type) do { \
I915_WRITE(type##IMR, 0xffffffff); \
POSTING_READ(type##IMR); \
I915_WRITE(type##IER, 0); \
@@ -136,10 +136,20 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
POSTING_READ(type##IIR); \
} while (0)
+#define GEN2_IRQ_RESET(type) do { \
+ I915_WRITE16(type##IMR, 0xffff); \
+ POSTING_READ16(type##IMR); \
+ I915_WRITE16(type##IER, 0); \
+ I915_WRITE16(type##IIR, 0xffff); \
+ POSTING_READ16(type##IIR); \
+ I915_WRITE16(type##IIR, 0xffff); \
+ POSTING_READ16(type##IIR); \
+} while (0)
+
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
+static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
u32 val = I915_READ(reg);
@@ -155,20 +165,43 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
POSTING_READ(reg);
}
+static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ u16 val = I915_READ16(reg);
+
+ if (val == 0)
+ return;
+
+ WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(reg), val);
+ I915_WRITE16(reg, 0xffff);
+ POSTING_READ16(reg);
+ I915_WRITE16(reg, 0xffff);
+ POSTING_READ16(reg);
+}
+
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
- gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
+ gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
POSTING_READ(GEN8_##type##_IMR(which)); \
} while (0)
-#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
- gen5_assert_iir_is_zero(dev_priv, type##IIR); \
+#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
+ gen3_assert_iir_is_zero(dev_priv, type##IIR); \
I915_WRITE(type##IER, (ier_val)); \
I915_WRITE(type##IMR, (imr_val)); \
POSTING_READ(type##IMR); \
} while (0)
+#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
+ gen2_assert_iir_is_zero(dev_priv, type##IIR); \
+ I915_WRITE16(type##IER, (ier_val)); \
+ I915_WRITE16(type##IMR, (imr_val)); \
+ POSTING_READ16(type##IMR); \
+} while (0)
+
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
@@ -336,7 +369,7 @@ void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
__gen6_mask_pm_irq(dev_priv, mask);
}
-void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
+static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
@@ -347,7 +380,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
POSTING_READ(reg);
}
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@@ -357,7 +390,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@@ -371,19 +404,21 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
- dev_priv->rps.pm_iir = 0;
+ dev_priv->gt_pm.rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- if (READ_ONCE(dev_priv->rps.interrupts_enabled))
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ if (READ_ONCE(rps->interrupts_enabled))
return;
spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON_ONCE(dev_priv->rps.pm_iir);
+ WARN_ON_ONCE(rps->pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
- dev_priv->rps.interrupts_enabled = true;
+ rps->interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -391,11 +426,13 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ if (!READ_ONCE(rps->interrupts_enabled))
return;
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->rps.interrupts_enabled = false;
+ rps->interrupts_enabled = false;
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
@@ -405,11 +442,11 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
synchronize_irq(dev_priv->drm.irq);
/* Now that we will not be generating any more work, flush any
- * outsanding tasks. As we are called on the RPS idle path,
+ * outstanding tasks. As we are called on the RPS idle path,
* we will reset the GPU to minimum frequencies, so the current
* state of the worker can be discarded.
*/
- cancel_work_sync(&dev_priv->rps.work);
+ cancel_work_sync(&rps->work);
gen6_reset_rps_interrupts(dev_priv);
}
@@ -534,62 +571,16 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
POSTING_READ(SDEIMR);
}
-static void
-__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 enable_mask, u32 status_mask)
-{
- i915_reg_t reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
-
- lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
-
- if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
- pipe_name(pipe), enable_mask, status_mask))
- return;
-
- if ((pipestat & enable_mask) == enable_mask)
- return;
-
- dev_priv->pipestat_irq_mask[pipe] |= status_mask;
-
- /* Enable the interrupt, clear any pending status */
- pipestat |= enable_mask | status_mask;
- I915_WRITE(reg, pipestat);
- POSTING_READ(reg);
-}
-
-static void
-__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 enable_mask, u32 status_mask)
+u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- i915_reg_t reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
+ u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
+ u32 enable_mask = status_mask << 16;
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
-
- if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
- pipe_name(pipe), enable_mask, status_mask))
- return;
-
- if ((pipestat & enable_mask) == 0)
- return;
-
- dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
- pipestat &= ~enable_mask;
- I915_WRITE(reg, pipestat);
- POSTING_READ(reg);
-}
-
-static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
-{
- u32 enable_mask = status_mask << 16;
+ if (INTEL_GEN(dev_priv) < 5)
+ goto out;
/*
* On pipe A we don't support the PSR interrupt yet,
@@ -612,35 +603,59 @@ static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
+out:
+ WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask);
+
return enable_mask;
}
-void
-i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask)
+void i915_enable_pipestat(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 status_mask)
{
+ i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
- status_mask);
- else
- enable_mask = status_mask << 16;
- __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+ WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
+
+ if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
+ return;
+
+ dev_priv->pipestat_irq_mask[pipe] |= status_mask;
+ enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+ I915_WRITE(reg, enable_mask | status_mask);
+ POSTING_READ(reg);
}
-void
-i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
- u32 status_mask)
+void i915_disable_pipestat(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 status_mask)
{
+ i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
- status_mask);
- else
- enable_mask = status_mask << 16;
- __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
+ WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
+
+ lockdep_assert_held(&dev_priv->irq_lock);
+ WARN_ON(!intel_irqs_enabled(dev_priv));
+
+ if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
+ return;
+
+ dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
+ enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+ I915_WRITE(reg, enable_mask | status_mask);
+ POSTING_READ(reg);
}
/**
@@ -772,6 +787,57 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 htotal = mode->crtc_htotal;
+ u32 clock = mode->crtc_clock;
+ u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
+
+ /*
+ * To avoid the race condition where we might cross into the
+ * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * during the same frame.
+ */
+ do {
+ /*
+ * This field provides read back of the display
+ * pipe frame time stamp. The time stamp value
+ * is sampled at every start of vertical blank.
+ */
+ scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+
+ /*
+ * The TIMESTAMP_CTR register has the current
+ * time stamp value.
+ */
+ scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
+
+ scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ } while (scan_post_time != scan_prev_time);
+
+ scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+ scanline = min(scanline, vtotal - 1);
+ scanline = (scanline + vblank_start) % vtotal;
+
+ return scanline;
+}
+
/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
@@ -788,6 +854,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
mode = &vblank->hwmode;
+ if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
+ return __intel_get_crtc_scanline_from_timestamp(crtc);
+
vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
@@ -1005,6 +1074,8 @@ static void notify_ring(struct intel_engine_cs *engine)
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
+ bool wakeup = engine->irq_seqno_barrier;
+
/* We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
@@ -1017,12 +1088,18 @@ static void notify_ring(struct intel_engine_cs *engine)
* and many waiters.
*/
if (i915_seqno_passed(intel_engine_get_seqno(engine),
- wait->seqno) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &wait->request->fence.flags))
- rq = i915_gem_request_get(wait->request);
+ wait->seqno)) {
+ struct drm_i915_gem_request *waiter = wait->request;
+
+ wakeup = true;
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &waiter->fence.flags) &&
+ intel_wait_check_request(wait, waiter))
+ rq = i915_gem_request_get(waiter);
+ }
- wake_up_process(wait->tsk);
+ if (wakeup)
+ wake_up_process(wait->tsk);
} else {
__intel_engine_disarm_breadcrumbs(engine);
}
@@ -1046,12 +1123,13 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
+ memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- const struct intel_rps_ei *prev = &dev_priv->rps.ei;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ const struct intel_rps_ei *prev = &rps->ei;
struct intel_rps_ei now;
u32 events = 0;
@@ -1078,28 +1156,29 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
- if (c0 > time * dev_priv->rps.up_threshold)
+ if (c0 > time * rps->up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * dev_priv->rps.down_threshold)
+ else if (c0 < time * rps->down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
- dev_priv->rps.ei = now;
+ rps->ei = now;
return events;
}
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, rps.work);
+ container_of(work, struct drm_i915_private, gt_pm.rps.work);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
bool client_boost = false;
int new_delay, adj, min, max;
u32 pm_iir = 0;
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->rps.interrupts_enabled) {
- pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
- client_boost = atomic_read(&dev_priv->rps.num_waiters);
+ if (rps->interrupts_enabled) {
+ pm_iir = fetch_and_zero(&rps->pm_iir);
+ client_boost = atomic_read(&rps->num_waiters);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1108,18 +1187,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
goto out;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
- adj = dev_priv->rps.last_adj;
- new_delay = dev_priv->rps.cur_freq;
- min = dev_priv->rps.min_freq_softlimit;
- max = dev_priv->rps.max_freq_softlimit;
+ adj = rps->last_adj;
+ new_delay = rps->cur_freq;
+ min = rps->min_freq_softlimit;
+ max = rps->max_freq_softlimit;
if (client_boost)
- max = dev_priv->rps.max_freq;
- if (client_boost && new_delay < dev_priv->rps.boost_freq) {
- new_delay = dev_priv->rps.boost_freq;
+ max = rps->max_freq;
+ if (client_boost && new_delay < rps->boost_freq) {
+ new_delay = rps->boost_freq;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
@@ -1127,15 +1206,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
- if (new_delay >= dev_priv->rps.max_freq_softlimit)
+ if (new_delay >= rps->max_freq_softlimit)
adj = 0;
} else if (client_boost) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
- if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
- new_delay = dev_priv->rps.efficient_freq;
- else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
- new_delay = dev_priv->rps.min_freq_softlimit;
+ if (rps->cur_freq > rps->efficient_freq)
+ new_delay = rps->efficient_freq;
+ else if (rps->cur_freq > rps->min_freq_softlimit)
+ new_delay = rps->min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
@@ -1143,13 +1222,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
- if (new_delay <= dev_priv->rps.min_freq_softlimit)
+ if (new_delay <= rps->min_freq_softlimit)
adj = 0;
} else { /* unknown event */
adj = 0;
}
- dev_priv->rps.last_adj = adj;
+ rps->last_adj = adj;
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
@@ -1159,15 +1238,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (intel_set_rps(dev_priv, new_delay)) {
DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- dev_priv->rps.last_adj = 0;
+ rps->last_adj = 0;
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->rps.interrupts_enabled)
+ if (rps->interrupts_enabled)
gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1305,10 +1384,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false;
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
- if (port_count(&engine->execlist_port[0])) {
+ if (READ_ONCE(engine->execlists.active)) {
__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet = true;
}
@@ -1316,11 +1396,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
notify_ring(engine);
- tasklet |= i915.enable_guc_submission;
+ tasklet |= i915_modparams.enable_guc_submission;
}
if (tasklet)
- tasklet_hi_schedule(&engine->irq_tasklet);
+ tasklet_hi_schedule(&execlists->irq_tasklet);
}
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@@ -1573,11 +1653,11 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* bonkers. So let's just wait for the next vblank and read
* out the buggy result.
*
- * On CHV sometimes the second CRC is bonkers as well, so
+ * On GEN8+ sometimes the second CRC is bonkers as well, so
* don't trust that one either.
*/
if (pipe_crc->skipped == 0 ||
- (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
+ (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
pipe_crc->skipped++;
spin_unlock(&pipe_crc->lock);
return;
@@ -1649,12 +1729,14 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
- if (dev_priv->rps.interrupts_enabled) {
- dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- schedule_work(&dev_priv->rps.work);
+ if (rps->interrupts_enabled) {
+ rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
+ schedule_work(&rps->work);
}
spin_unlock(&dev_priv->irq_lock);
}
@@ -1706,8 +1788,21 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
}
}
-static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
- u32 iir, u32 pipe_stats[I915_MAX_PIPES])
+static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ I915_WRITE(PIPESTAT(pipe),
+ PIPESTAT_INT_STATUS_MASK |
+ PIPE_FIFO_UNDERRUN_STATUS);
+
+ dev_priv->pipestat_irq_mask[pipe] = 0;
+ }
+}
+
+static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
int pipe;
@@ -1720,7 +1815,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
i915_reg_t reg;
- u32 mask, iir_bit = 0;
+ u32 status_mask, enable_mask, iir_bit = 0;
/*
* PIPESTAT bits get signalled even when the interrupt is
@@ -1731,7 +1826,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
*/
/* fifo underruns are filterered in the underrun handler. */
- mask = PIPE_FIFO_UNDERRUN_STATUS;
+ status_mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
case PIPE_A:
@@ -1745,25 +1840,92 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
break;
}
if (iir & iir_bit)
- mask |= dev_priv->pipestat_irq_mask[pipe];
+ status_mask |= dev_priv->pipestat_irq_mask[pipe];
- if (!mask)
+ if (!status_mask)
continue;
reg = PIPESTAT(pipe);
- mask |= PIPESTAT_INT_ENABLE_MASK;
- pipe_stats[pipe] = I915_READ(reg) & mask;
+ pipe_stats[pipe] = I915_READ(reg) & status_mask;
+ enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
/*
* Clear the PIPE*STAT regs before the IIR
*/
- if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
- PIPESTAT_INT_STATUS_MASK))
- I915_WRITE(reg, pipe_stats[pipe]);
+ if (pipe_stats[pipe])
+ I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
}
spin_unlock(&dev_priv->irq_lock);
}
+static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+ u16 iir, u32 pipe_stats[I915_MAX_PIPES])
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ }
+}
+
+static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
+{
+ bool blc_event = false;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev_priv);
+}
+
+static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
+{
+ bool blc_event = false;
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
+
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
+
+ if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
+
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
+ }
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
+ intel_opregion_asle_intr(dev_priv);
+
+ if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
+ gmbus_irq_handler(dev_priv);
+}
+
static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 pipe_stats[I915_MAX_PIPES])
{
@@ -1879,7 +2041,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT))
@@ -1963,7 +2125,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
if (iir & (I915_LPE_PIPE_A_INTERRUPT |
I915_LPE_PIPE_B_INTERRUPT |
@@ -2100,18 +2262,14 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
u32 serr_int = I915_READ(SERR_INT);
+ enum pipe pipe;
if (serr_int & SERR_INT_POISON)
DRM_ERROR("PCH poison interrupt\n");
- if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
-
- if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
-
- if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
+ for_each_pipe(dev_priv, pipe)
+ if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
+ intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
I915_WRITE(SERR_INT, serr_int);
}
@@ -2860,7 +3018,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_NOP(dev_priv))
return;
- GEN5_IRQ_RESET(SDE);
+ GEN3_IRQ_RESET(SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
@@ -2888,15 +3046,13 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN5_IRQ_RESET(GT);
+ GEN3_IRQ_RESET(GT);
if (INTEL_GEN(dev_priv) >= 6)
- GEN5_IRQ_RESET(GEN6_PM);
+ GEN3_IRQ_RESET(GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
- enum pipe pipe;
-
if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -2905,14 +3061,9 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- for_each_pipe(dev_priv, pipe) {
- I915_WRITE(PIPESTAT(pipe),
- PIPE_FIFO_UNDERRUN_STATUS |
- PIPESTAT_INT_STATUS_MASK);
- dev_priv->pipestat_irq_mask[pipe] = 0;
- }
+ i9xx_pipestat_irq_reset(dev_priv);
- GEN5_IRQ_RESET(VLV_);
+ GEN3_IRQ_RESET(VLV_);
dev_priv->irq_mask = ~0;
}
@@ -2922,8 +3073,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
u32 enable_mask;
enum pipe pipe;
- pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
+ pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
@@ -2943,7 +3093,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
@@ -2952,9 +3102,10 @@ static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(HWSTAM, 0xffffffff);
+ if (IS_GEN5(dev_priv))
+ I915_WRITE(HWSTAM, 0xffffffff);
- GEN5_IRQ_RESET(DE);
+ GEN3_IRQ_RESET(DE);
if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
@@ -2963,7 +3114,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
-static void valleyview_irq_preinstall(struct drm_device *dev)
+static void valleyview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3001,9 +3152,9 @@ static void gen8_irq_reset(struct drm_device *dev)
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
- GEN5_IRQ_RESET(GEN8_DE_PORT_);
- GEN5_IRQ_RESET(GEN8_DE_MISC_);
- GEN5_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -3016,10 +3167,17 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
+
+ if (!intel_irqs_enabled(dev_priv)) {
+ spin_unlock_irq(&dev_priv->irq_lock);
+ return;
+ }
+
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
+
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3029,15 +3187,22 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
+
+ if (!intel_irqs_enabled(dev_priv)) {
+ spin_unlock_irq(&dev_priv->irq_lock);
+ return;
+ }
+
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
synchronize_irq(dev_priv->drm.irq);
}
-static void cherryview_irq_preinstall(struct drm_device *dev)
+static void cherryview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3046,7 +3211,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
- GEN5_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3111,7 +3276,15 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug;
+ u32 val, hotplug;
+
+ /* Display WA #1179 WaHardHangonHotPlug: cnp */
+ if (HAS_PCH_CNP(dev_priv)) {
+ val = I915_READ(SOUTH_CHICKEN1);
+ val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
+ val |= CHASSIS_CLK_REQ_DURATION(0xf);
+ I915_WRITE(SOUTH_CHICKEN1, val);
+ }
/* Enable digital hotplug on the PCH */
hotplug = I915_READ(PCH_PORT_HOTPLUG);
@@ -3238,10 +3411,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_IBX(dev_priv))
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
- else
+ else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
+ else
+ mask = SDE_GMBUS_CPT;
- gen5_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(dev_priv, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
@@ -3272,7 +3447,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+ GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
@@ -3285,7 +3460,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
}
dev_priv->pm_imr = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
@@ -3296,18 +3471,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
if (INTEL_GEN(dev_priv) >= 7) {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
- DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
+ DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
DE_DP_A_HOTPLUG_IVB);
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
- DE_AUX_CHANNEL_A |
- DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
- DE_POISON);
+ DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
+ DE_PIPEA_CRC_DONE | DE_POISON);
extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_DP_A_HOTPLUG);
@@ -3315,11 +3486,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask = ~display_mask;
- I915_WRITE(HWSTAM, 0xeffe);
-
ibx_irq_pre_postinstall(dev);
- GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+ GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3429,15 +3598,13 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 9) {
- de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
- GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
- de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
- GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+ de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3449,19 +3616,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
- dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
- dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+ for_each_pipe(dev_priv, pipe) {
+ dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
- for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
+ }
- GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
- GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+ GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
if (IS_GEN9_LP(dev_priv))
bxt_hpd_detection_setup(dev_priv);
@@ -3505,98 +3671,36 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void gen8_irq_uninstall(struct drm_device *dev)
+static void i8xx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!dev_priv)
- return;
-
- gen8_irq_reset(dev);
-}
-
-static void valleyview_irq_uninstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!dev_priv)
- return;
-
- I915_WRITE(VLV_MASTER_IER, 0);
- POSTING_READ(VLV_MASTER_IER);
-
- gen5_gt_irq_reset(dev_priv);
-
- I915_WRITE(HWSTAM, 0xffffffff);
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void cherryview_irq_uninstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!dev_priv)
- return;
-
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ i9xx_pipestat_irq_reset(dev_priv);
- gen8_gt_irq_reset(dev_priv);
-
- GEN5_IRQ_RESET(GEN8_PCU_);
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- vlv_display_irq_reset(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
-
-static void ironlake_irq_uninstall(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (!dev_priv)
- return;
-
- ironlake_irq_reset(dev);
-}
-
-static void i8xx_irq_preinstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
+ I915_WRITE16(HWSTAM, 0xffff);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE16(IMR, 0xffff);
- I915_WRITE16(IER, 0x0);
- POSTING_READ16(IER);
+ GEN2_IRQ_RESET();
}
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ u16 enable_mask;
- I915_WRITE16(EMR,
- ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+ I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
- I915_WRITE16(IER,
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_USER_INTERRUPT);
- POSTING_READ16(IER);
+ enable_mask =
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -3608,17 +3712,11 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
-/*
- * Returns true when a page flip has completed.
- */
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = to_i915(dev);
- u16 iir, new_iir;
- u32 pipe_stats[2];
- int pipe;
- irqreturn_t ret;
+ irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -3626,96 +3724,50 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- ret = IRQ_NONE;
- iir = I915_READ16(IIR);
- if (iir == 0)
- goto out;
+ do {
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u16 iir;
- while (iir) {
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock(&dev_priv->irq_lock);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+ iir = I915_READ16(IIR);
+ if (iir == 0)
+ break;
- for_each_pipe(dev_priv, pipe) {
- i915_reg_t reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
+ ret = IRQ_HANDLED;
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff)
- I915_WRITE(reg, pipe_stats[pipe]);
- }
- spin_unlock(&dev_priv->irq_lock);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
I915_WRITE16(IIR, iir);
- new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- for_each_pipe(dev_priv, pipe) {
- int plane = pipe;
- if (HAS_FBC(dev_priv))
- plane = !plane;
-
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv,
- pipe);
- }
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
- iir = new_iir;
- }
- ret = IRQ_HANDLED;
+ i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ } while (0);
-out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
-static void i8xx_irq_uninstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
-
- for_each_pipe(dev_priv, pipe) {
- /* Clear enable bits; then clear status bits */
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
- }
- I915_WRITE16(IMR, 0xffff);
- I915_WRITE16(IER, 0x0);
- I915_WRITE16(IIR, I915_READ16(IIR));
-}
-
-static void i915_irq_preinstall(struct drm_device * dev)
+static void i915_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
- I915_WRITE16(HWSTAM, 0xeffe);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
- POSTING_READ(IER);
+ i9xx_pipestat_irq_reset(dev_priv);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ GEN3_IRQ_RESET();
}
static int i915_irq_postinstall(struct drm_device *dev)
@@ -3723,15 +3775,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
u32 enable_mask;
- I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
+ I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
+ I915_ERROR_MEMORY_REFRESH));
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
enable_mask =
I915_ASLE_INTERRUPT |
@@ -3740,20 +3791,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev_priv)) {
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- POSTING_READ(PORT_HOTPLUG_EN);
-
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
- I915_WRITE(IMR, dev_priv->irq_mask);
- I915_WRITE(IER, enable_mask);
- POSTING_READ(IER);
-
- i915_enable_asle_pipestat(dev_priv);
+ GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -3762,6 +3806,8 @@ static int i915_irq_postinstall(struct drm_device *dev)
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
+ i915_enable_asle_pipestat(dev_priv);
+
return 0;
}
@@ -3769,8 +3815,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
- int pipe, ret = IRQ_NONE;
+ irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -3778,131 +3823,56 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- iir = I915_READ(IIR);
do {
- bool irq_received = (iir) != 0;
- bool blc_event = false;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock(&dev_priv->irq_lock);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
-
- for_each_pipe(dev_priv, pipe) {
- i915_reg_t reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /* Clear the PIPE*STAT regs before the IIR */
- if (pipe_stats[pipe] & 0x8000ffff) {
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = true;
- }
- }
- spin_unlock(&dev_priv->irq_lock);
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 iir;
- if (!irq_received)
+ iir = I915_READ(IIR);
+ if (iir == 0)
break;
- /* Consume port. Then clear IIR or we'll miss events */
+ ret = IRQ_HANDLED;
+
if (I915_HAS_HOTPLUG(dev_priv) &&
- iir & I915_DISPLAY_PORT_INTERRUPT) {
- u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
- if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- }
+ iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
- for_each_pipe(dev_priv, pipe) {
- int plane = pipe;
- if (HAS_FBC(dev_priv))
- plane = !plane;
-
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv,
- pipe);
- }
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- ret = IRQ_HANDLED;
- iir = new_iir;
- } while (iir);
+ i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
-static void i915_irq_uninstall(struct drm_device * dev)
+static void i965_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
-
- if (I915_HAS_HOTPLUG(dev_priv)) {
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
-
- I915_WRITE16(HWSTAM, 0xffff);
- for_each_pipe(dev_priv, pipe) {
- /* Clear enable bits; then clear status bits */
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
- }
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
-
- I915_WRITE(IIR, I915_READ(IIR));
-}
-
-static void i965_irq_preinstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- I915_WRITE(HWSTAM, 0xeffe);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
- POSTING_READ(IER);
+ i9xx_pipestat_irq_reset(dev_priv);
+
+ I915_WRITE(HWSTAM, 0xffffffff);
+
+ GEN3_IRQ_RESET();
}
static int i965_irq_postinstall(struct drm_device *dev)
@@ -3911,31 +3881,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 error_mask;
- /* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
-
- enable_mask = ~dev_priv->irq_mask;
- enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
- enable_mask |= I915_USER_INTERRUPT;
-
- if (IS_G4X(dev_priv))
- enable_mask |= I915_BSD_USER_INTERRUPT;
-
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
- i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
- spin_unlock_irq(&dev_priv->irq_lock);
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
@@ -3951,12 +3896,34 @@ static int i965_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- I915_WRITE(IMR, dev_priv->irq_mask);
- I915_WRITE(IER, enable_mask);
- POSTING_READ(IER);
+ /* Unmask the interrupts that we always want on. */
+ dev_priv->irq_mask =
+ ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- POSTING_READ(PORT_HOTPLUG_EN);
+ enable_mask =
+ I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+ I915_USER_INTERRUPT;
+
+ if (IS_G4X(dev_priv))
+ enable_mask |= I915_BSD_USER_INTERRUPT;
+
+ GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irq(&dev_priv->irq_lock);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
+ spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
@@ -3992,9 +3959,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 iir, new_iir;
- u32 pipe_stats[I915_MAX_PIPES];
- int ret = IRQ_NONE, pipe;
+ irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -4002,121 +3967,46 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- iir = I915_READ(IIR);
-
- for (;;) {
- bool irq_received = (iir) != 0;
- bool blc_event = false;
-
- /* Can't rely on pipestat interrupt bit in iir as it might
- * have been cleared after the pipestat interrupt was received.
- * It doesn't set the bit in iir again, but it still produces
- * interrupts (for non-MSI).
- */
- spin_lock(&dev_priv->irq_lock);
- if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
-
- for_each_pipe(dev_priv, pipe) {
- i915_reg_t reg = PIPESTAT(pipe);
- pipe_stats[pipe] = I915_READ(reg);
-
- /*
- * Clear the PIPE*STAT regs before the IIR
- */
- if (pipe_stats[pipe] & 0x8000ffff) {
- I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = true;
- }
- }
- spin_unlock(&dev_priv->irq_lock);
+ do {
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 iir;
- if (!irq_received)
+ iir = I915_READ(IIR);
+ if (iir == 0)
break;
ret = IRQ_HANDLED;
- /* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT) {
- u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
- if (hotplug_status)
- i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- }
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
I915_WRITE(IIR, iir);
- new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]);
+
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]);
- for_each_pipe(dev_priv, pipe) {
- if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
-
- if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
- blc_event = true;
-
- if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
- intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- }
-
- if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev_priv);
+ if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
- if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- /* With MSI, interrupts are only generated when iir
- * transitions from zero to nonzero. If another bit got
- * set while we were handling the existing iir bits, then
- * we would never get another interrupt.
- *
- * This is fine on non-MSI as well, as if we hit this path
- * we avoid exiting the interrupt handler only to generate
- * another one.
- *
- * Note that for MSI this could cause a stray interrupt report
- * if an interrupt landed in the time between writing IIR and
- * the posting read. This should be rare enough to never
- * trigger the 99% of 100,000 interrupts test for disabling
- * stray interrupts.
- */
- iir = new_iir;
- }
+ i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
-static void i965_irq_uninstall(struct drm_device * dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe;
-
- if (!dev_priv)
- return;
-
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- I915_WRITE(HWSTAM, 0xffffffff);
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0);
- I915_WRITE(IMR, 0xffffffff);
- I915_WRITE(IER, 0x0);
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe),
- I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
- I915_WRITE(IIR, I915_READ(IIR));
-}
-
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
@@ -4127,11 +4017,12 @@ static void i965_irq_uninstall(struct drm_device * dev)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
intel_hpd_init_work(dev_priv);
- INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
+ INIT_WORK(&rps->work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
@@ -4147,7 +4038,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
- dev_priv->rps.pm_intrmsk_mbz = 0;
+ rps->pm_intrmsk_mbz = 0;
/*
* SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
@@ -4156,10 +4047,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
* TODO: verify if this can be reproduced on VLV,CHV.
*/
if (INTEL_GEN(dev_priv) <= 7)
- dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_GEN(dev_priv) >= 8)
- dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
@@ -4197,17 +4088,17 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
- dev->driver->irq_preinstall = cherryview_irq_preinstall;
+ dev->driver->irq_preinstall = cherryview_irq_reset;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
- dev->driver->irq_uninstall = cherryview_irq_uninstall;
+ dev->driver->irq_uninstall = cherryview_irq_reset;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
- dev->driver->irq_preinstall = valleyview_irq_preinstall;
+ dev->driver->irq_preinstall = valleyview_irq_reset;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
- dev->driver->irq_uninstall = valleyview_irq_uninstall;
+ dev->driver->irq_uninstall = valleyview_irq_reset;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
@@ -4215,7 +4106,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
- dev->driver->irq_uninstall = gen8_irq_uninstall;
+ dev->driver->irq_uninstall = gen8_irq_reset;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
@@ -4229,29 +4120,29 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->irq_uninstall = ironlake_irq_reset;
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
if (IS_GEN2(dev_priv)) {
- dev->driver->irq_preinstall = i8xx_irq_preinstall;
+ dev->driver->irq_preinstall = i8xx_irq_reset;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
- dev->driver->irq_uninstall = i8xx_irq_uninstall;
+ dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
- dev->driver->irq_preinstall = i915_irq_preinstall;
+ dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_uninstall;
+ dev->driver->irq_uninstall = i915_irq_reset;
dev->driver->irq_handler = i915_irq_handler;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else {
- dev->driver->irq_preinstall = i965_irq_preinstall;
+ dev->driver->irq_preinstall = i965_irq_reset;
dev->driver->irq_postinstall = i965_irq_postinstall;
- dev->driver->irq_uninstall = i965_irq_uninstall;
+ dev->driver->irq_uninstall = i965_irq_reset;
dev->driver->irq_handler = i965_irq_handler;
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
@@ -4293,7 +4184,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
* interrupts as enabled _before_ actually enabling them to avoid
* special cases in our ordering checks.
*/
- dev_priv->pm.irqs_enabled = true;
+ dev_priv->runtime_pm.irqs_enabled = true;
return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
}
@@ -4309,7 +4200,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
drm_irq_uninstall(&dev_priv->drm);
intel_hpd_cancel_work(dev_priv);
- dev_priv->pm.irqs_enabled = false;
+ dev_priv->runtime_pm.irqs_enabled = false;
}
/**
@@ -4322,7 +4213,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
- dev_priv->pm.irqs_enabled = false;
+ dev_priv->runtime_pm.irqs_enabled = false;
synchronize_irq(dev_priv->drm.irq);
}
@@ -4335,7 +4226,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
- dev_priv->pm.irqs_enabled = true;
+ dev_priv->runtime_pm.irqs_enabled = true;
dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
new file mode 100644
index 000000000000..368c87d7ee9a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -0,0 +1,109 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_cflgt2.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00800000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2770), 0x00000004 },
+ { _MMIO(0x2774), 0x00000000 },
+ { _MMIO(0x2778), 0x00000003 },
+ { _MMIO(0x277c), 0x00000000 },
+ { _MMIO(0x2780), 0x00000007 },
+ { _MMIO(0x2784), 0x00000000 },
+ { _MMIO(0x2788), 0x00100002 },
+ { _MMIO(0x278c), 0x0000fff7 },
+ { _MMIO(0x2790), 0x00100002 },
+ { _MMIO(0x2794), 0x0000ffcf },
+ { _MMIO(0x2798), 0x00100082 },
+ { _MMIO(0x279c), 0x0000ffef },
+ { _MMIO(0x27a0), 0x001000c2 },
+ { _MMIO(0x27a4), 0x0000ffe7 },
+ { _MMIO(0x27a8), 0x00100001 },
+ { _MMIO(0x27ac), 0x0000ffe7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
+ { _MMIO(0x9888), 0x11810000 },
+ { _MMIO(0x9888), 0x07810013 },
+ { _MMIO(0x9888), 0x1f810000 },
+ { _MMIO(0x9888), 0x1d810000 },
+ { _MMIO(0x9888), 0x1b930040 },
+ { _MMIO(0x9888), 0x07e54000 },
+ { _MMIO(0x9888), 0x1f908000 },
+ { _MMIO(0x9888), 0x11900000 },
+ { _MMIO(0x9888), 0x37900000 },
+ { _MMIO(0x9888), 0x53900000 },
+ { _MMIO(0x9888), 0x45900000 },
+ { _MMIO(0x9888), 0x33900000 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
+{
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "74fb4902-d3d3-4237-9e90-cbdc68d0a446",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
+
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
new file mode 100644
index 000000000000..1f3268ef2ea2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -0,0 +1,34 @@
+/*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ *
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_CFLGT2_H__
+#define __I915_OA_CFLGT2_H__
+
+extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 8ab003dca113..b4faeb6aa2bd 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -25,235 +25,168 @@
#include "i915_params.h"
#include "i915_drv.h"
-struct i915_params i915 __read_mostly = {
- .modeset = -1,
- .panel_ignore_lid = 1,
- .semaphores = -1,
- .lvds_channel_mode = 0,
- .panel_use_ssc = -1,
- .vbt_sdvo_panel_type = -1,
- .enable_rc6 = -1,
- .enable_dc = -1,
- .enable_fbc = -1,
- .enable_execlists = -1,
- .enable_hangcheck = true,
- .enable_ppgtt = -1,
- .enable_psr = -1,
- .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
- .disable_power_well = -1,
- .enable_ips = 1,
- .fastboot = 0,
- .prefault_disable = 0,
- .load_detect_test = 0,
- .force_reset_modeset_test = 0,
- .reset = 2,
- .error_capture = true,
- .invert_brightness = 0,
- .disable_display = 0,
- .enable_cmd_parser = true,
- .use_mmio_flip = 0,
- .mmio_debug = 0,
- .verbose_state_checks = 1,
- .nuclear_pageflip = 0,
- .edp_vswing = 0,
- .enable_guc_loading = 0,
- .enable_guc_submission = 0,
- .guc_log_level = -1,
- .guc_firmware_path = NULL,
- .huc_firmware_path = NULL,
- .enable_dp_mst = true,
- .inject_load_failure = 0,
- .enable_dpcd_backlight = false,
- .enable_gvt = false,
+#define i915_param_named(name, T, perm, desc) \
+ module_param_named(name, i915_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+#define i915_param_named_unsafe(name, T, perm, desc) \
+ module_param_named_unsafe(name, i915_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+
+struct i915_params i915_modparams __read_mostly = {
+#define MEMBER(T, member, value) .member = (value),
+ I915_PARAMS_FOR_EACH(MEMBER)
+#undef MEMBER
};
-module_param_named(modeset, i915.modeset, int, 0400);
-MODULE_PARM_DESC(modeset,
+i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
-MODULE_PARM_DESC(panel_ignore_lid,
+i915_param_named_unsafe(panel_ignore_lid, int, 0600,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
-module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
-MODULE_PARM_DESC(semaphores,
+i915_param_named_unsafe(semaphores, int, 0400,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
-module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
-MODULE_PARM_DESC(enable_rc6,
+i915_param_named_unsafe(enable_rc6, int, 0400,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
-module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
-MODULE_PARM_DESC(enable_dc,
+i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
-module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
-MODULE_PARM_DESC(enable_fbc,
+i915_param_named_unsafe(enable_fbc, int, 0600,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
-module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
-MODULE_PARM_DESC(lvds_channel_mode,
+i915_param_named_unsafe(lvds_channel_mode, int, 0400,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
-MODULE_PARM_DESC(lvds_use_ssc,
+i915_param_named_unsafe(panel_use_ssc, int, 0600,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
-module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
-MODULE_PARM_DESC(vbt_sdvo_panel_type,
+i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-module_param_named_unsafe(reset, i915.reset, int, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
+i915_param_named_unsafe(reset, int, 0600,
+ "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
-module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400);
-MODULE_PARM_DESC(vbt_firmware,
- "Load VBT from specified file under /lib/firmware");
+i915_param_named_unsafe(vbt_firmware, charp, 0400,
+ "Load VBT from specified file under /lib/firmware");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-module_param_named(error_capture, i915.error_capture, bool, 0600);
-MODULE_PARM_DESC(error_capture,
+i915_param_named(error_capture, bool, 0600,
"Record the GPU state following a hang. "
"This information in /sys/class/drm/card<N>/error is vital for "
"triaging and debugging hangs.");
#endif
-module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
-MODULE_PARM_DESC(enable_hangcheck,
+i915_param_named_unsafe(enable_hangcheck, bool, 0644,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
-MODULE_PARM_DESC(enable_ppgtt,
+i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
-MODULE_PARM_DESC(enable_execlists,
+i915_param_named_unsafe(enable_execlists, int, 0400,
"Override execlists usage. "
"(-1=auto [default], 0=disabled, 1=enabled)");
-module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR "
- "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
- "Default: -1 (use per-chip default)");
+i915_param_named_unsafe(enable_psr, int, 0600,
+ "Enable PSR "
+ "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "Default: -1 (use per-chip default)");
-module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400);
-MODULE_PARM_DESC(alpha_support,
+i915_param_named_unsafe(alpha_support, bool, 0400,
"Enable alpha quality driver support for latest hardware. "
"See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
-module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
-MODULE_PARM_DESC(disable_power_well,
+i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
-module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
-MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
-module_param_named(fastboot, i915.fastboot, bool, 0600);
-MODULE_PARM_DESC(fastboot,
+i915_param_named(fastboot, bool, 0600,
"Try to skip unnecessary mode sets at boot time (default: false)");
-module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
-MODULE_PARM_DESC(prefault_disable,
+i915_param_named_unsafe(prefault_disable, bool, 0600,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
-module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
-MODULE_PARM_DESC(load_detect_test,
+i915_param_named_unsafe(load_detect_test, bool, 0600,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
-module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600);
-MODULE_PARM_DESC(force_reset_modeset_test,
+i915_param_named_unsafe(force_reset_modeset_test, bool, 0600,
"Force a modeset during gpu reset for testing (default:false). "
"For developers only.");
-module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
-MODULE_PARM_DESC(invert_brightness,
+i915_param_named_unsafe(invert_brightness, int, 0600,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
-module_param_named(disable_display, i915.disable_display, bool, 0400);
-MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
+i915_param_named(disable_display, bool, 0400,
+ "Disable display (default: false)");
-module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
-MODULE_PARM_DESC(enable_cmd_parser,
- "Enable command parsing (true=enabled [default], false=disabled)");
+i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
+ "Enable command parsing (true=enabled [default], false=disabled)");
-module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
-MODULE_PARM_DESC(use_mmio_flip,
- "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
-
-module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
-MODULE_PARM_DESC(mmio_debug,
+i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
-module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
-MODULE_PARM_DESC(verbose_state_checks,
+i915_param_named(verbose_state_checks, bool, 0600,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400);
-MODULE_PARM_DESC(nuclear_pageflip,
- "Force enable atomic functionality on platforms that don't have full support yet.");
+i915_param_named_unsafe(nuclear_pageflip, bool, 0400,
+ "Force enable atomic functionality on platforms that don't have full support yet.");
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
-module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
-MODULE_PARM_DESC(edp_vswing,
- "Ignore/Override vswing pre-emph table selection from VBT "
- "(0=use value from vbt [default], 1=low power swing(200mV),"
- "2=default swing(400mV))");
-
-module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
-MODULE_PARM_DESC(enable_guc_loading,
- "Enable GuC firmware loading "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
-MODULE_PARM_DESC(enable_guc_submission,
- "Enable GuC submission "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
-MODULE_PARM_DESC(guc_log_level,
+i915_param_named_unsafe(edp_vswing, int, 0400,
+ "Ignore/Override vswing pre-emph table selection from VBT "
+ "(0=use value from vbt [default], 1=low power swing(200mV),"
+ "2=default swing(400mV))");
+
+i915_param_named_unsafe(enable_guc_loading, int, 0400,
+ "Enable GuC firmware loading "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+i915_param_named_unsafe(enable_guc_submission, int, 0400,
+ "Enable GuC submission "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+i915_param_named(guc_log_level, int, 0400,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
-module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400);
-MODULE_PARM_DESC(guc_firmware_path,
+i915_param_named_unsafe(guc_firmware_path, charp, 0400,
"GuC firmware path to use instead of the default one");
-module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400);
-MODULE_PARM_DESC(huc_firmware_path,
+i915_param_named_unsafe(huc_firmware_path, charp, 0400,
"HuC firmware path to use instead of the default one");
-module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
-MODULE_PARM_DESC(enable_dp_mst,
+i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
-module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
-MODULE_PARM_DESC(inject_load_failure,
+
+i915_param_named_unsafe(inject_load_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
-module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
-MODULE_PARM_DESC(enable_dpcd_backlight,
+
+i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
-module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
-MODULE_PARM_DESC(enable_gvt,
+i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index ac844709c97e..c7292268ed43 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -27,56 +27,55 @@
#include <linux/cache.h> /* for __read_mostly */
-#define I915_PARAMS_FOR_EACH(func) \
- func(char *, vbt_firmware); \
- func(int, modeset); \
- func(int, panel_ignore_lid); \
- func(int, semaphores); \
- func(int, lvds_channel_mode); \
- func(int, panel_use_ssc); \
- func(int, vbt_sdvo_panel_type); \
- func(int, enable_rc6); \
- func(int, enable_dc); \
- func(int, enable_fbc); \
- func(int, enable_ppgtt); \
- func(int, enable_execlists); \
- func(int, enable_psr); \
- func(int, disable_power_well); \
- func(int, enable_ips); \
- func(int, invert_brightness); \
- func(int, enable_guc_loading); \
- func(int, enable_guc_submission); \
- func(int, guc_log_level); \
- func(char *, guc_firmware_path); \
- func(char *, huc_firmware_path); \
- func(int, use_mmio_flip); \
- func(int, mmio_debug); \
- func(int, edp_vswing); \
- func(int, reset); \
- func(unsigned int, inject_load_failure); \
+#define I915_PARAMS_FOR_EACH(param) \
+ param(char *, vbt_firmware, NULL) \
+ param(int, modeset, -1) \
+ param(int, panel_ignore_lid, 1) \
+ param(int, semaphores, -1) \
+ param(int, lvds_channel_mode, 0) \
+ param(int, panel_use_ssc, -1) \
+ param(int, vbt_sdvo_panel_type, -1) \
+ param(int, enable_rc6, -1) \
+ param(int, enable_dc, -1) \
+ param(int, enable_fbc, -1) \
+ param(int, enable_ppgtt, -1) \
+ param(int, enable_execlists, -1) \
+ param(int, enable_psr, -1) \
+ param(int, disable_power_well, -1) \
+ param(int, enable_ips, 1) \
+ param(int, invert_brightness, 0) \
+ param(int, enable_guc_loading, 0) \
+ param(int, enable_guc_submission, 0) \
+ param(int, guc_log_level, -1) \
+ param(char *, guc_firmware_path, NULL) \
+ param(char *, huc_firmware_path, NULL) \
+ param(int, mmio_debug, 0) \
+ param(int, edp_vswing, 0) \
+ param(int, reset, 2) \
+ param(unsigned int, inject_load_failure, 0) \
/* leave bools at the end to not create holes */ \
- func(bool, alpha_support); \
- func(bool, enable_cmd_parser); \
- func(bool, enable_hangcheck); \
- func(bool, fastboot); \
- func(bool, prefault_disable); \
- func(bool, load_detect_test); \
- func(bool, force_reset_modeset_test); \
- func(bool, error_capture); \
- func(bool, disable_display); \
- func(bool, verbose_state_checks); \
- func(bool, nuclear_pageflip); \
- func(bool, enable_dp_mst); \
- func(bool, enable_dpcd_backlight); \
- func(bool, enable_gvt)
+ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
+ param(bool, enable_cmd_parser, true) \
+ param(bool, enable_hangcheck, true) \
+ param(bool, fastboot, false) \
+ param(bool, prefault_disable, false) \
+ param(bool, load_detect_test, false) \
+ param(bool, force_reset_modeset_test, false) \
+ param(bool, error_capture, true) \
+ param(bool, disable_display, false) \
+ param(bool, verbose_state_checks, true) \
+ param(bool, nuclear_pageflip, false) \
+ param(bool, enable_dp_mst, true) \
+ param(bool, enable_dpcd_backlight, false) \
+ param(bool, enable_gvt, false)
-#define MEMBER(T, member) T member
+#define MEMBER(T, member, ...) T member;
struct i915_params {
I915_PARAMS_FOR_EACH(MEMBER);
};
#undef MEMBER
-extern struct i915_params i915 __read_mostly;
+extern struct i915_params i915_modparams __read_mostly;
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 09d97e0990b7..6458c309c039 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -54,8 +54,14 @@
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+#define GLK_COLORS \
+ .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
/* Keep in gen based order, and chronological order within a gen */
+
+#define GEN_DEFAULT_PAGE_SIZES \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K
+
#define GEN2_FEATURES \
.gen = 2, .num_pipes = 1, \
.has_overlay = 1, .overlay_needs_physical = 1, \
@@ -63,22 +69,24 @@
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i830_info = {
+static const struct intel_device_info intel_i830_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I830,
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
-static const struct intel_device_info intel_i845g_info = {
+static const struct intel_device_info intel_i845g_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I845G,
};
-static const struct intel_device_info intel_i85x_info = {
+static const struct intel_device_info intel_i85x_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I85X, .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
@@ -86,7 +94,7 @@ static const struct intel_device_info intel_i85x_info = {
.has_fbc = 1,
};
-static const struct intel_device_info intel_i865g_info = {
+static const struct intel_device_info intel_i865g_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I865G,
};
@@ -95,10 +103,12 @@ static const struct intel_device_info intel_i865g_info = {
.gen = 3, .num_pipes = 2, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i915g_info = {
+static const struct intel_device_info intel_i915g_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -106,7 +116,7 @@ static const struct intel_device_info intel_i915g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i915gm_info = {
+static const struct intel_device_info intel_i915gm_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I915GM,
.is_mobile = 1,
@@ -118,7 +128,7 @@ static const struct intel_device_info intel_i915gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945g_info = {
+static const struct intel_device_info intel_i945g_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I945G,
.has_hotplug = 1, .cursor_needs_physical = 1,
@@ -127,7 +137,7 @@ static const struct intel_device_info intel_i945g_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945gm_info = {
+static const struct intel_device_info intel_i945gm_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I945GM, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
@@ -138,14 +148,14 @@ static const struct intel_device_info intel_i945gm_info = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_g33_info = {
+static const struct intel_device_info intel_g33_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_G33,
.has_hotplug = 1,
.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_PINEVIEW, .is_mobile = 1,
.has_hotplug = 1,
@@ -157,37 +167,39 @@ static const struct intel_device_info intel_pineview_info = {
.has_hotplug = 1, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i965g_info = {
+static const struct intel_device_info intel_i965g_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_I965G,
.has_overlay = 1,
.hws_needs_physical = 1,
+ .has_snoop = false,
};
-static const struct intel_device_info intel_i965gm_info = {
+static const struct intel_device_info intel_i965gm_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_I965GM,
.is_mobile = 1, .has_fbc = 1,
.has_overlay = 1,
.supports_tv = 1,
.hws_needs_physical = 1,
+ .has_snoop = false,
};
-static const struct intel_device_info intel_g45_info = {
+static const struct intel_device_info intel_g45_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_G45,
- .has_pipe_cxsr = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
-static const struct intel_device_info intel_gm45_info = {
+static const struct intel_device_info intel_gm45_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_GM45,
.is_mobile = 1, .has_fbc = 1,
- .has_pipe_cxsr = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
@@ -195,17 +207,18 @@ static const struct intel_device_info intel_gm45_info = {
#define GEN5_FEATURES \
.gen = 5, .num_pipes = 2, \
.has_hotplug = 1, \
- .has_gmbus_irq = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
+ .has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_ironlake_d_info = {
+static const struct intel_device_info intel_ironlake_d_info __initconst = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
};
-static const struct intel_device_info intel_ironlake_m_info = {
+static const struct intel_device_info intel_ironlake_m_info __initconst = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
.is_mobile = 1, .has_fbc = 1,
@@ -219,20 +232,39 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_gmbus_irq = 1, \
.has_aliasing_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_sandybridge_d_info = {
- GEN6_FEATURES,
- .platform = INTEL_SANDYBRIDGE,
+#define SNB_D_PLATFORM \
+ GEN6_FEATURES, \
+ .platform = INTEL_SANDYBRIDGE
+
+static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
+ SNB_D_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_sandybridge_m_info = {
- GEN6_FEATURES,
- .platform = INTEL_SANDYBRIDGE,
- .is_mobile = 1,
+static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
+ SNB_D_PLATFORM,
+ .gt = 2,
+};
+
+#define SNB_M_PLATFORM \
+ GEN6_FEATURES, \
+ .platform = INTEL_SANDYBRIDGE, \
+ .is_mobile = 1
+
+
+static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
+ SNB_M_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
+ SNB_M_PLATFORM,
+ .gt = 2,
};
#define GEN7_FEATURES \
@@ -243,33 +275,52 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_gmbus_irq = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
+ GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS
-static const struct intel_device_info intel_ivybridge_d_info = {
- GEN7_FEATURES,
- .platform = INTEL_IVYBRIDGE,
- .has_l3_dpf = 1,
+#define IVB_D_PLATFORM \
+ GEN7_FEATURES, \
+ .platform = INTEL_IVYBRIDGE, \
+ .has_l3_dpf = 1
+
+static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
+ IVB_D_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_ivybridge_m_info = {
- GEN7_FEATURES,
- .platform = INTEL_IVYBRIDGE,
- .is_mobile = 1,
- .has_l3_dpf = 1,
+static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
+ IVB_D_PLATFORM,
+ .gt = 2,
};
-static const struct intel_device_info intel_ivybridge_q_info = {
+#define IVB_M_PLATFORM \
+ GEN7_FEATURES, \
+ .platform = INTEL_IVYBRIDGE, \
+ .is_mobile = 1, \
+ .has_l3_dpf = 1
+
+static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
+ IVB_M_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
+ IVB_M_PLATFORM,
+ .gt = 2,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info __initconst = {
GEN7_FEATURES,
.platform = INTEL_IVYBRIDGE,
+ .gt = 2,
.num_pipes = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
-static const struct intel_device_info intel_valleyview_info = {
+static const struct intel_device_info intel_valleyview_info __initconst = {
.platform = INTEL_VALLEYVIEW,
.gen = 7,
.is_lp = 1,
@@ -277,18 +328,19 @@ static const struct intel_device_info intel_valleyview_info = {
.has_psr = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .has_gmbus_irq = 1,
.has_gmch_display = 1,
.has_hotplug = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
+ .has_snoop = true,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_PIPEOFFSETS,
CURSOR_OFFSETS
};
-#define HSW_FEATURES \
+#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.has_ddi = 1, \
@@ -299,35 +351,66 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
-static const struct intel_device_info intel_haswell_info = {
- HSW_FEATURES,
- .platform = INTEL_HASWELL,
- .has_l3_dpf = 1,
+#define HSW_PLATFORM \
+ G75_FEATURES, \
+ .platform = INTEL_HASWELL, \
+ .has_l3_dpf = 1
+
+static const struct intel_device_info intel_haswell_gt1_info __initconst = {
+ HSW_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_haswell_gt2_info __initconst = {
+ HSW_PLATFORM,
+ .gt = 2,
+};
+
+static const struct intel_device_info intel_haswell_gt3_info __initconst = {
+ HSW_PLATFORM,
+ .gt = 3,
};
-#define BDW_FEATURES \
- HSW_FEATURES, \
+#define GEN8_FEATURES \
+ G75_FEATURES, \
BDW_COLORS, \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
.has_full_48bit_ppgtt = 1, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
#define BDW_PLATFORM \
- BDW_FEATURES, \
+ GEN8_FEATURES, \
.gen = 8, \
.platform = INTEL_BROADWELL
-static const struct intel_device_info intel_broadwell_info = {
+static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
+ BDW_PLATFORM,
+ .gt = 1,
+};
+
+static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
BDW_PLATFORM,
+ .gt = 2,
};
-static const struct intel_device_info intel_broadwell_gt3_info = {
+static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
BDW_PLATFORM,
+ .gt = 3,
+ /* According to the device ID those devices are GT3, they were
+ * previously treated as not GT3, keep it like that.
+ */
+};
+
+static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
+ BDW_PLATFORM,
+ .gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
-static const struct intel_device_info intel_cherryview_info = {
+static const struct intel_device_info intel_cherryview_info __initconst = {
.gen = 8, .num_pipes = 3,
.has_hotplug = 1,
.is_lp = 1,
@@ -338,33 +421,61 @@ static const struct intel_device_info intel_cherryview_info = {
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
- .has_gmbus_irq = 1,
.has_logical_ring_contexts = 1,
.has_gmch_display = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
.has_reset_engine = 1,
+ .has_snoop = true,
.display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_DEFAULT_PAGE_SIZES,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
CHV_COLORS,
};
-#define SKL_PLATFORM \
- BDW_FEATURES, \
- .gen = 9, \
- .platform = INTEL_SKYLAKE, \
+#define GEN9_DEFAULT_PAGE_SIZES \
+ .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
+
+#define GEN9_FEATURES \
+ GEN8_FEATURES, \
+ GEN9_DEFAULT_PAGE_SIZES, \
+ .has_logical_ring_preemption = 1, \
.has_csr = 1, \
.has_guc = 1, \
+ .has_ipc = 1, \
.ddb_size = 896
-static const struct intel_device_info intel_skylake_info = {
+#define SKL_PLATFORM \
+ GEN9_FEATURES, \
+ .gen = 9, \
+ .platform = INTEL_SKYLAKE
+
+static const struct intel_device_info intel_skylake_gt1_info __initconst = {
SKL_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_skylake_gt3_info = {
+static const struct intel_device_info intel_skylake_gt2_info __initconst = {
SKL_PLATFORM,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .gt = 2,
+};
+
+#define SKL_GT3_PLUS_PLATFORM \
+ SKL_PLATFORM, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
+
+
+static const struct intel_device_info intel_skylake_gt3_info __initconst = {
+ SKL_GT3_PLUS_PLATFORM,
+ .gt = 3,
+};
+
+static const struct intel_device_info intel_skylake_gt4_info __initconst = {
+ SKL_GT3_PLUS_PLATFORM,
+ .gt = 4,
};
#define GEN9_LP_FEATURES \
@@ -377,80 +488,93 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_ddi = 1, \
.has_fpga_dbg = 1, \
.has_fbc = 1, \
+ .has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
.has_csr = 1, \
.has_resource_streamer = 1, \
.has_rc6 = 1, \
.has_dp_mst = 1, \
- .has_gmbus_irq = 1, \
.has_logical_ring_contexts = 1, \
+ .has_logical_ring_preemption = 1, \
.has_guc = 1, \
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
.has_reset_engine = 1, \
+ .has_snoop = true, \
+ .has_ipc = 1, \
+ GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS
-static const struct intel_device_info intel_broxton_info = {
+static const struct intel_device_info intel_broxton_info __initconst = {
GEN9_LP_FEATURES,
.platform = INTEL_BROXTON,
.ddb_size = 512,
- .has_reset_engine = false,
};
-static const struct intel_device_info intel_geminilake_info = {
+static const struct intel_device_info intel_geminilake_info __initconst = {
GEN9_LP_FEATURES,
.platform = INTEL_GEMINILAKE,
.ddb_size = 1024,
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
+ GLK_COLORS,
};
#define KBL_PLATFORM \
- BDW_FEATURES, \
+ GEN9_FEATURES, \
.gen = 9, \
- .platform = INTEL_KABYLAKE, \
- .has_csr = 1, \
- .has_guc = 1, \
- .ddb_size = 896
+ .platform = INTEL_KABYLAKE
-static const struct intel_device_info intel_kabylake_info = {
+static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
KBL_PLATFORM,
+ .gt = 1,
};
-static const struct intel_device_info intel_kabylake_gt3_info = {
+static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
KBL_PLATFORM,
+ .gt = 2,
+};
+
+static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
+ KBL_PLATFORM,
+ .gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
#define CFL_PLATFORM \
- .is_alpha_support = 1, \
- BDW_FEATURES, \
+ GEN9_FEATURES, \
.gen = 9, \
- .platform = INTEL_COFFEELAKE, \
- .has_csr = 1, \
- .has_guc = 1, \
- .ddb_size = 896
+ .platform = INTEL_COFFEELAKE
+
+static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
+ CFL_PLATFORM,
+ .gt = 1,
+};
-static const struct intel_device_info intel_coffeelake_info = {
+static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
CFL_PLATFORM,
+ .gt = 2,
};
-static const struct intel_device_info intel_coffeelake_gt3_info = {
+static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
CFL_PLATFORM,
+ .gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
-static const struct intel_device_info intel_cannonlake_info = {
- BDW_FEATURES,
+#define GEN10_FEATURES \
+ GEN9_FEATURES, \
+ .ddb_size = 1024, \
+ GLK_COLORS
+
+static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
+ GEN10_FEATURES,
.is_alpha_support = 1,
.platform = INTEL_CANNONLAKE,
.gen = 10,
- .ddb_size = 1024,
- .has_csr = 1,
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
+ .gt = 2,
};
/*
@@ -476,31 +600,40 @@ static const struct pci_device_id pciidlist[] = {
INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
- INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
- INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
+ INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
+ INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
+ INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
+ INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
- INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
- INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
- INTEL_HSW_IDS(&intel_haswell_info),
+ INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
+ INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
+ INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
+ INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
+ INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
+ INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
+ INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
INTEL_VLV_IDS(&intel_valleyview_info),
- INTEL_BDW_GT12_IDS(&intel_broadwell_info),
+ INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
+ INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
- INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
+ INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
INTEL_CHV_IDS(&intel_cherryview_info),
- INTEL_SKL_GT1_IDS(&intel_skylake_info),
- INTEL_SKL_GT2_IDS(&intel_skylake_info),
+ INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
+ INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
- INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
+ INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
INTEL_BXT_IDS(&intel_broxton_info),
INTEL_GLK_IDS(&intel_geminilake_info),
- INTEL_KBL_GT1_IDS(&intel_kabylake_info),
- INTEL_KBL_GT2_IDS(&intel_kabylake_info),
+ INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
+ INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_CFL_S_IDS(&intel_coffeelake_info),
- INTEL_CFL_H_IDS(&intel_coffeelake_info),
- INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info),
- INTEL_CNL_IDS(&intel_cannonlake_info),
+ INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
+ INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -510,7 +643,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
i915_driver_unload(dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -519,7 +652,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *) ent->driver_data;
int err;
- if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
+ if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) {
DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
"See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
"to enable support in this kernel version, or check for kernel updates.\n");
@@ -577,10 +710,10 @@ static int __init i915_init(void)
* vga_text_mode_force boot option.
*/
- if (i915.modeset == 0)
+ if (i915_modparams.modeset == 0)
use_kms = false;
- if (vgacon_text_force() && i915.modeset == -1)
+ if (vgacon_text_force() && i915_modparams.modeset == -1)
use_kms = false;
if (!use_kms) {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 370b9d248fed..59ee808f8fd9 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -206,6 +206,7 @@
#include "i915_oa_kblgt2.h"
#include "i915_oa_kblgt3.h"
#include "i915_oa_glk.h"
+#include "i915_oa_cflgt2.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1213,7 +1214,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1259,7 +1260,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- if (i915.enable_execlists) {
+ if (i915_modparams.enable_execlists) {
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
@@ -1850,8 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEN9(dev_priv)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2931,6 +2931,9 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_kblgt3(dev_priv);
} else if (IS_GEMINILAKE(dev_priv)) {
i915_perf_load_test_config_glk(dev_priv);
+ } else if (IS_COFFEELAKE(dev_priv)) {
+ if (IS_CFL_GT2(dev_priv))
+ i915_perf_load_test_config_cflgt2(dev_priv);
}
if (dev_priv->perf.oa.test_config.id == 0)
@@ -3409,7 +3412,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.timestamp_frequency = 12500000;
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
- } else if (i915.enable_execlists) {
+ } else if (i915_modparams.enable_execlists) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
@@ -3457,6 +3460,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
break;
case INTEL_SKYLAKE:
case INTEL_KABYLAKE:
+ case INTEL_COFFEELAKE:
dev_priv->perf.oa.timestamp_frequency = 12000000;
break;
default:
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 0679a58cdbae..195203f298df 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -53,6 +53,7 @@ enum vgt_g2v_type {
* VGT capabilities type
*/
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
+#define VGT_CAPS_HWSP_EMULATION BIT(3)
struct vgt_if {
u64 magic; /* VGT_MAGIC */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c9bcc6c45012..68a58cce6ab1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2336,7 +2336,7 @@ enum i915_power_well_id {
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + index*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
@@ -2371,8 +2371,12 @@ enum i915_power_well_id {
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
+#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
+#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@@ -2491,6 +2495,7 @@ enum i915_power_well_id {
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
+#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
@@ -2728,6 +2733,11 @@ enum i915_power_well_id {
#define GEN9_F2_SS_DIS_SHIFT 20
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
+#define GEN10_F2_S_ENA_SHIFT 22
+#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT)
+#define GEN10_F2_SS_DIS_SHIFT 18
+#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT)
+
#define GEN8_EU_DISABLE0 _MMIO(0x9134)
#define GEN8_EU_DIS0_S0_MASK 0xffffff
#define GEN8_EU_DIS0_S1_SHIFT 24
@@ -2743,6 +2753,9 @@ enum i915_power_well_id {
#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4)
+#define GEN10_EU_DISABLE3 _MMIO(0x9140)
+#define GEN10_EU_DIS_SS_MASK 0xff
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -2938,6 +2951,9 @@ enum i915_power_well_id {
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
+#define GLK_SKIP_SEG_EN (1<<12)
+#define GLK_SKIP_SEG_COUNT_MASK (3<<10)
+#define GLK_SKIP_SEG_COUNT(x) ((x)<<10)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
@@ -3806,6 +3822,22 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define _CLKGATE_DIS_PSL_A 0x46520
+#define _CLKGATE_DIS_PSL_B 0x46524
+#define _CLKGATE_DIS_PSL_C 0x46528
+#define DPF_GATING_DIS (1 << 10)
+#define DPF_RAM_GATING_DIS (1 << 9)
+#define DPFR_GATING_DIS (1 << 8)
+
+#define CLKGATE_DIS_PSL(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+
+/*
+ * GEN10 clock gating regs
+ */
+#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
+#define SARBUNIT_CLKGATE_DIS (1 << 5)
+
/*
* Display engine regs
*/
@@ -4036,7 +4068,7 @@ enum {
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
#define EDP_PSR2_IDLE_MASK 0xf
-#define EDP_FRAMES_BEFORE_SU_ENTRY (1<<4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
@@ -5210,7 +5242,7 @@ enum {
#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
-#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
+#define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */
#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
@@ -5652,8 +5684,7 @@ enum {
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define CBR_DPLLBMD_PIPE_C (1<<29)
-#define CBR_DPLLBMD_PIPE_B (1<<18)
+#define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -6902,7 +6933,7 @@ enum {
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
#define CHICKEN_PAR1_1 _MMIO(0x42080)
-#define SKL_RC_HASH_OUTSIDE (1 << 15)
+#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
@@ -6916,6 +6947,10 @@ enum {
#define GLK_CL1_PWR_DOWN (1 << 11)
#define GLK_CL0_PWR_DOWN (1 << 10)
+#define CHICKEN_MISC_4 _MMIO(0x4208c)
+#define FBC_STRIDE_OVERRIDE (1 << 13)
+#define FBC_STRIDE_MASK 0x1FFF
+
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@@ -6934,6 +6969,7 @@ enum {
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1<<6)
+#define DISP_IPC_ENABLE (1<<3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_POWER_REQUEST (1<<31)
#define DBUF_POWER_STATE (1<<30)
@@ -6969,12 +7005,19 @@ enum {
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0)
+#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
+#define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
+#define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
+#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
+#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13)
# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
@@ -7018,6 +7061,7 @@ enum {
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
+#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
@@ -7139,9 +7183,6 @@ enum {
#define SERR_INT _MMIO(0xc4040)
#define SERR_INT_POISON (1<<31)
-#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
-#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
-#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3))
/* digital port hotplug */
@@ -7454,6 +7495,8 @@ enum {
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
+#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
+#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
#define SPT_PWM_GRANULARITY (1<<0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
@@ -7471,6 +7514,7 @@ enum {
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
@@ -7937,8 +7981,8 @@ enum {
#define GEN7_PCODE_TIMEOUT 0x2
#define GEN7_PCODE_ILLEGAL_DATA 0x3
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
-#define GEN6_PCODE_WRITE_RC6VIDS 0x4
-#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
@@ -7957,7 +8001,9 @@ enum {
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
-#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
+ /* See also IPS_CTL */
+#define IPS_PCODE_CONTROL (1 << 30)
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN9_PCODE_SAGV_CONTROL 0x21
#define GEN9_SAGV_DISABLE 0x0
#define GEN9_SAGV_IS_DISABLED 0x1
@@ -8045,10 +8091,12 @@ enum {
#define FLOW_CONTROL_ENABLE (1<<15)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
+#define THROTTLE_12_5 (7<<2)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1<<0)
+#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -8060,9 +8108,11 @@ enum {
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
+#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
+#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8)
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
@@ -8575,7 +8625,7 @@ enum skl_power_gate {
#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
-#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10)
+#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10)
#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
@@ -8782,6 +8832,15 @@ enum skl_power_gate {
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+/* Gen4+ Timestamp and Pipe Frame time stamp registers */
+#define GEN4_TIMESTAMP _MMIO(0x2358)
+#define ILK_TIMESTAMP_HI _MMIO(0x70070)
+#define IVB_TIMESTAMP_CTR _MMIO(0x44070)
+
+#define _PIPE_FRMTMSTMP_A 0x70048
+#define PIPE_FRMTMSTMP(pipe) \
+ _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A)
+
/* BXT MIPI clock controls */
#define BXT_MAX_VAR_OUTPUT_KHZ 39500
@@ -9363,4 +9422,8 @@ enum skl_power_gate {
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */
#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */
+#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */
+#define MMCD_PCLA (1 << 31)
+#define MMCD_HOTSPOT_EN (1 << 27)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5c86925a0294..8f3aa4dc0c98 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -108,8 +108,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_restore_fences(dev_priv);
-
if (IS_GEN4(dev_priv))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index f29540f922af..e8ca67a129d2 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
#include <linux/reservation.h>
#include "i915_sw_fence.h"
@@ -40,6 +41,11 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
debug_object_init(fence, &i915_sw_fence_debug_descr);
}
+static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+{
+ debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
+}
+
static inline void debug_fence_activate(struct i915_sw_fence *fence)
{
debug_object_activate(fence, &i915_sw_fence_debug_descr);
@@ -78,6 +84,10 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
{
}
+static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+{
+}
+
static inline void debug_fence_activate(struct i915_sw_fence *fence)
{
}
@@ -356,31 +366,44 @@ struct i915_sw_dma_fence_cb {
struct i915_sw_fence *fence;
struct dma_fence *dma;
struct timer_list timer;
+ struct irq_work work;
};
-static void timer_i915_sw_fence_wake(unsigned long data)
+static void timer_i915_sw_fence_wake(struct timer_list *t)
{
- struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data;
+ struct i915_sw_dma_fence_cb *cb = from_timer(cb, t, timer);
+ struct i915_sw_fence *fence;
+
+ fence = xchg(&cb->fence, NULL);
+ if (!fence)
+ return;
pr_warn("asynchronous wait on fence %s:%s:%x timed out\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno);
- dma_fence_put(cb->dma);
- cb->dma = NULL;
- i915_sw_fence_complete(cb->fence);
- cb->timer.function = NULL;
+ i915_sw_fence_complete(fence);
}
static void dma_i915_sw_fence_wake(struct dma_fence *dma,
struct dma_fence_cb *data)
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+ struct i915_sw_fence *fence;
+
+ fence = xchg(&cb->fence, NULL);
+ if (fence)
+ i915_sw_fence_complete(fence);
+
+ irq_work_queue(&cb->work);
+}
+
+static void irq_i915_sw_fence_work(struct irq_work *wrk)
+{
+ struct i915_sw_dma_fence_cb *cb = container_of(wrk, typeof(*cb), work);
del_timer_sync(&cb->timer);
- if (cb->timer.function)
- i915_sw_fence_complete(cb->fence);
dma_fence_put(cb->dma);
kfree(cb);
@@ -411,9 +434,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
i915_sw_fence_await(fence);
cb->dma = NULL;
- __setup_timer(&cb->timer,
- timer_i915_sw_fence_wake, (unsigned long)cb,
- TIMER_IRQSAFE);
+ timer_setup(&cb->timer, timer_i915_sw_fence_wake, TIMER_IRQSAFE);
+ init_irq_work(&cb->work, irq_i915_sw_fence_work);
if (timeout) {
cb->dma = dma_fence_get(dma);
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout));
@@ -492,5 +514,6 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/lib_sw_fence.c"
#include "selftests/i915_sw_fence.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d61c8727f756..791759f632e1 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -49,7 +49,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_rc6_enabled());
}
static ssize_t
@@ -246,7 +246,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -261,7 +261,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
ret = intel_gpu_freq(dev_priv, ret);
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
@@ -275,7 +275,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.cur_freq));
+ dev_priv->gt_pm.rps.cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -284,7 +284,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.boost_freq));
+ dev_priv->gt_pm.rps.boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -292,6 +292,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
@@ -301,12 +302,12 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
/* Validate against (static) hardware limits */
val = intel_freq_opcode(dev_priv, val);
- if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
+ if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
- mutex_lock(&dev_priv->rps.hw_lock);
- dev_priv->rps.boost_freq = val;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
+ rps->boost_freq = val;
+ mutex_unlock(&dev_priv->pcu_lock);
return count;
}
@@ -318,7 +319,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.efficient_freq));
+ dev_priv->gt_pm.rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -327,7 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.max_freq_softlimit));
+ dev_priv->gt_pm.rps.max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -335,6 +336,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
@@ -344,34 +346,34 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val);
- if (val < dev_priv->rps.min_freq ||
- val > dev_priv->rps.max_freq ||
- val < dev_priv->rps.min_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ if (val < rps->min_freq ||
+ val > rps->max_freq ||
+ val < rps->min_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
- if (val > dev_priv->rps.rp0_freq)
+ if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
intel_gpu_freq(dev_priv, val));
- dev_priv->rps.max_freq_softlimit = val;
+ rps->max_freq_softlimit = val;
- val = clamp_t(int, dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
+ val = clamp_t(int, rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
@@ -384,7 +386,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
- dev_priv->rps.min_freq_softlimit));
+ dev_priv->gt_pm.rps.min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -392,6 +394,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
@@ -401,30 +404,30 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = intel_freq_opcode(dev_priv, val);
- if (val < dev_priv->rps.min_freq ||
- val > dev_priv->rps.max_freq ||
- val > dev_priv->rps.max_freq_softlimit) {
- mutex_unlock(&dev_priv->rps.hw_lock);
+ if (val < rps->min_freq ||
+ val > rps->max_freq ||
+ val > rps->max_freq_softlimit) {
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
- dev_priv->rps.min_freq_softlimit = val;
+ rps->min_freq_softlimit = val;
- val = clamp_t(int, dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
+ val = clamp_t(int, rps->cur_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
ret = intel_set_rps(dev_priv, val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
@@ -448,14 +451,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ val = intel_gpu_freq(dev_priv, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ val = intel_gpu_freq(dev_priv, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ val = intel_gpu_freq(dev_priv, rps->min_freq);
else
BUG();
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index ef72da74b87f..4e76768ffa95 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -346,7 +346,7 @@ TRACE_EVENT(i915_gem_object_create,
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, size)
+ __field(u64, size)
),
TP_fast_assign(
@@ -354,7 +354,7 @@ TRACE_EVENT(i915_gem_object_create,
__entry->size = obj->base.size;
),
- TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
+ TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
);
TRACE_EVENT(i915_gem_shrink,
@@ -385,7 +385,7 @@ TRACE_EVENT(i915_vma_bind,
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u64, offset)
- __field(u32, size)
+ __field(u64, size)
__field(unsigned, flags)
),
@@ -397,7 +397,7 @@ TRACE_EVENT(i915_vma_bind,
__entry->flags = flags;
),
- TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
+ TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm)
@@ -411,7 +411,7 @@ TRACE_EVENT(i915_vma_unbind,
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u64, offset)
- __field(u32, size)
+ __field(u64, size)
),
TP_fast_assign(
@@ -421,18 +421,18 @@ TRACE_EVENT(i915_vma_unbind,
__entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
+ TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_pwrite,
- TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, offset)
- __field(u32, len)
+ __field(u64, offset)
+ __field(u64, len)
),
TP_fast_assign(
@@ -441,18 +441,18 @@ TRACE_EVENT(i915_gem_object_pwrite,
__entry->len = len;
),
- TP_printk("obj=%p, offset=%u, len=%u",
+ TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_pread,
- TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, offset)
- __field(u32, len)
+ __field(u64, offset)
+ __field(u64, len)
),
TP_fast_assign(
@@ -461,17 +461,17 @@ TRACE_EVENT(i915_gem_object_pread,
__entry->len = len;
),
- TP_printk("obj=%p, offset=%u, len=%u",
+ TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_fault,
- TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+ TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
TP_ARGS(obj, index, gtt, write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, index)
+ __field(u64, index)
__field(bool, gtt)
__field(bool, write)
),
@@ -483,7 +483,7 @@ TRACE_EVENT(i915_gem_object_fault,
__entry->write = write;
),
- TP_printk("obj=%p, %s index=%u %s",
+ TP_printk("obj=%p, %s index=%llu %s",
__entry->obj,
__entry->gtt ? "GTT" : "CPU",
__entry->index,
@@ -516,14 +516,14 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
);
TRACE_EVENT(i915_gem_evict,
- TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+ TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
TP_ARGS(vm, size, align, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_address_space *, vm)
- __field(u32, size)
- __field(u32, align)
+ __field(u64, size)
+ __field(u64, align)
__field(unsigned int, flags)
),
@@ -535,43 +535,11 @@ TRACE_EVENT(i915_gem_evict,
__entry->flags = flags;
),
- TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+ TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
__entry->dev, __entry->vm, __entry->size, __entry->align,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "")
);
-TRACE_EVENT(i915_gem_evict_everything,
- TP_PROTO(struct drm_device *dev),
- TP_ARGS(dev),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- ),
-
- TP_fast_assign(
- __entry->dev = dev->primary->index;
- ),
-
- TP_printk("dev=%d", __entry->dev)
-);
-
-TRACE_EVENT(i915_gem_evict_vm,
- TP_PROTO(struct i915_address_space *vm),
- TP_ARGS(vm),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(struct i915_address_space *, vm)
- ),
-
- TP_fast_assign(
- __entry->dev = vm->i915->drm.primary->index;
- __entry->vm = vm;
- ),
-
- TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
-);
-
TRACE_EVENT(i915_gem_evict_node,
TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
TP_ARGS(vm, node, flags),
@@ -594,12 +562,29 @@ TRACE_EVENT(i915_gem_evict_node,
__entry->flags = flags;
),
- TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
+ TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
__entry->dev, __entry->vm,
__entry->start, __entry->size,
__entry->color, __entry->flags)
);
+TRACE_EVENT(i915_gem_evict_vm,
+ TP_PROTO(struct i915_address_space *vm),
+ TP_ARGS(vm),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct i915_address_space *, vm)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = vm->i915->drm.primary->index;
+ __entry->vm = vm;
+ ),
+
+ TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
+);
+
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from),
@@ -650,29 +635,6 @@ TRACE_EVENT(i915_gem_request_queue,
__entry->flags)
);
-TRACE_EVENT(i915_gem_ring_flush,
- TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
- TP_ARGS(req, invalidate, flush),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, ring)
- __field(u32, invalidate)
- __field(u32, flush)
- ),
-
- TP_fast_assign(
- __entry->dev = req->i915->drm.primary->index;
- __entry->ring = req->engine->id;
- __entry->invalidate = invalidate;
- __entry->flush = flush;
- ),
-
- TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
- __entry->dev, __entry->ring,
- __entry->invalidate, __entry->flush)
-);
-
DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct drm_i915_gem_request *req),
TP_ARGS(req),
@@ -1032,5 +994,5 @@ TRACE_EVENT(switch_mm,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 12fc250b47b9..af3d7cc53fa1 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -99,6 +99,11 @@
__T; \
})
+static inline u64 ptr_to_u64(const void *ptr)
+{
+ return (uintptr_t)ptr;
+}
+
#define u64_to_ptr(T, x) ({ \
typecheck(u64, x); \
(T *)(uintptr_t)(x); \
@@ -119,4 +124,17 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first);
}
+/*
+ * Wait until the work is finally complete, even if it tries to postpone
+ * by requeueing itself. Note, that if the worker never cancels itself,
+ * we will spin forever.
+ */
+static inline void drain_delayed_work(struct delayed_work *dw)
+{
+ do {
+ while (flush_delayed_work(dw))
+ ;
+ } while (delayed_work_pending(dw));
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index b72bd2956b70..bb8338450dc1 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -30,6 +30,12 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
+static inline bool
+intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 02d1a5eacb00..fbfab2f33023 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -54,12 +54,21 @@ i915_vma_retire(struct i915_gem_active *active,
if (--obj->active_count)
return;
+ /* Prune the shared fence arrays iff completely idle (inc. external) */
+ if (reservation_object_trylock(obj->resv)) {
+ if (reservation_object_test_signaled_rcu(obj->resv, true))
+ reservation_object_add_excl_fence(obj->resv, NULL);
+ reservation_object_unlock(obj->resv);
+ }
+
/* Bump our place on the bound list to keep it roughly in LRU order
* so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!)
*/
+ spin_lock(&rq->i915->mm.obj_lock);
if (obj->bind_count)
- list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
+ list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
+ spin_unlock(&rq->i915->mm.obj_lock);
obj->mm.dirty = true; /* be paranoid */
@@ -266,6 +275,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (bind_flags == 0)
return 0;
+ GEM_BUG_ON(!vma->pages);
+
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
@@ -278,13 +289,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
void __iomem *ptr;
+ int err;
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(vma->vm->i915);
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
- return IO_ERR_PTR(-ENODEV);
+ if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ err = -ENODEV;
+ goto err;
+ }
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
@@ -294,14 +308,36 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
vma->node.start,
vma->node.size);
- if (ptr == NULL)
- return IO_ERR_PTR(-ENOMEM);
+ if (ptr == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
vma->iomap = ptr;
}
__i915_vma_pin(vma);
+
+ err = i915_vma_pin_fence(vma);
+ if (err)
+ goto err_unpin;
+
return ptr;
+
+err_unpin:
+ __i915_vma_unpin(vma);
+err:
+ return IO_ERR_PTR(err);
+}
+
+void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+
+ GEM_BUG_ON(vma->iomap == NULL);
+
+ i915_vma_unpin_fence(vma);
+ i915_vma_unpin(vma);
}
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
@@ -471,25 +507,64 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (ret)
return ret;
+ GEM_BUG_ON(vma->pages);
+
+ ret = vma->vm->set_pages(vma);
+ if (ret)
+ goto err_unpin;
+
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end)) {
ret = -EINVAL;
- goto err_unpin;
+ goto err_clear;
}
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
size, offset, obj->cache_level,
flags);
if (ret)
- goto err_unpin;
+ goto err_clear;
} else {
+ /*
+ * We only support huge gtt pages through the 48b PPGTT,
+ * however we also don't want to force any alignment for
+ * objects which need to be tightly packed into the low 32bits.
+ *
+ * Note that we assume that GGTT are limited to 4GiB for the
+ * forseeable future. See also i915_ggtt_offset().
+ */
+ if (upper_32_bits(end - 1) &&
+ vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ /*
+ * We can't mix 64K and 4K PTEs in the same page-table
+ * (2M block), and so to avoid the ugliness and
+ * complexity of coloring we opt for just aligning 64K
+ * objects to 2M.
+ */
+ u64 page_alignment =
+ rounddown_pow_of_two(vma->page_sizes.sg |
+ I915_GTT_PAGE_SIZE_2M);
+
+ /*
+ * Check we don't expand for the limited Global GTT
+ * (mappable aperture is even more precious!). This
+ * also checks that we exclude the aliasing-ppgtt.
+ */
+ GEM_BUG_ON(i915_vma_is_ggtt(vma));
+
+ alignment = max(alignment, page_alignment);
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ }
+
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level,
start, end, flags);
if (ret)
- goto err_unpin;
+ goto err_clear;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -497,13 +572,19 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
- list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+
+ spin_lock(&dev_priv->mm.obj_lock);
+ list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++;
+ spin_unlock(&dev_priv->mm.obj_lock);
+
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
return 0;
+err_clear:
+ vma->vm->clear_pages(vma);
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
@@ -512,20 +593,24 @@ err_unpin:
static void
i915_vma_remove(struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ vma->vm->clear_pages(vma);
+
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
+ spin_lock(&i915->mm.obj_lock);
if (--obj->bind_count == 0)
- list_move_tail(&obj->global_link,
- &to_i915(obj->base.dev)->mm.unbound_list);
+ list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+ spin_unlock(&i915->mm.obj_lock);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -569,8 +654,8 @@ int __i915_vma_do_pin(struct i915_vma *vma,
err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
- GEM_BUG_ON(vma->pages);
i915_vma_remove(vma);
+ GEM_BUG_ON(vma->pages);
}
err_unpin:
__i915_vma_unpin(vma);
@@ -620,6 +705,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
vma->iomap = NULL;
}
+void i915_vma_revoke_mmap(struct i915_vma *vma)
+{
+ struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ u64 vma_offset;
+
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+
+ if (!i915_vma_has_userfault(vma))
+ return;
+
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(!vma->obj->userfault_count);
+
+ vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+ drm_vma_node_offset_addr(node) + vma_offset,
+ vma->size,
+ 1);
+
+ i915_vma_unset_userfault(vma);
+ if (!--vma->obj->userfault_count)
+ list_del(&vma->obj->userfault_link);
+}
+
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -683,11 +792,13 @@ int i915_vma_unbind(struct i915_vma *vma)
return ret;
/* Force a pagefault for domain tracking on next user access */
- i915_gem_release_mmap(obj);
+ i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
}
+ GEM_BUG_ON(vma->fence);
+ GEM_BUG_ON(i915_vma_has_userfault(vma));
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
@@ -695,13 +806,6 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- if (vma->pages != obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
i915_vma_remove(vma);
destroy:
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e811067c7724..1e2bc9b3c3ac 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,6 +55,7 @@ struct i915_vma {
void __iomem *iomap;
u64 size;
u64 display_alignment;
+ struct i915_page_sizes page_sizes;
u32 fence_size;
u32 fence_alignment;
@@ -65,7 +66,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma.
*/
unsigned int open_count;
- unsigned int flags;
+ unsigned long flags;
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer
@@ -87,6 +88,8 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(8)
#define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10)
+#define I915_VMA_USERFAULT_BIT 11
+#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -145,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return vma->flags & I915_VMA_CLOSED;
}
+static inline bool i915_vma_set_userfault(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline void i915_vma_unset_userfault(struct i915_vma *vma)
+{
+ return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
+{
+ return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+}
+
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{
return vma->active;
@@ -243,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
+void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
@@ -321,12 +341,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
* Callers must hold the struct_mutex. This function is only valid to be
* called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
*/
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- GEM_BUG_ON(vma->iomap == NULL);
- i915_vma_unpin(vma);
-}
+void i915_vma_unpin_iomap(struct i915_vma *vma);
static inline struct page *i915_vma_first_page(struct i915_vma *vma)
{
@@ -349,15 +364,13 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
*
* True if the vma has a fence, false otherwise.
*/
-static inline bool
-i915_vma_pin_fence(struct i915_vma *vma)
+int i915_vma_pin_fence(struct i915_vma *vma);
+int __must_check i915_vma_put_fence(struct i915_vma *vma);
+
+static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- if (vma->fence) {
- vma->fence->pin_count++;
- return true;
- } else
- return false;
+ GEM_BUG_ON(vma->fence->pin_count <= 0);
+ vma->fence->pin_count--;
}
/**
@@ -372,10 +385,8 @@ static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
- if (vma->fence) {
- GEM_BUG_ON(vma->fence->pin_count <= 0);
- vma->fence->pin_count--;
- }
+ if (vma->fence)
+ __i915_vma_unpin_fence(vma);
}
#endif
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index ee76fab7bb6f..8e6dc159f64d 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -107,7 +107,9 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
-int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state)
{
struct drm_plane *plane = intel_state->base.plane;
@@ -124,7 +126,7 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
* anything driver-specific we need to test in that case, so
* just return success.
*/
- if (!intel_state->base.crtc && !plane->state->crtc)
+ if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
@@ -194,16 +196,21 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
else
crtc_state->active_planes &= ~BIT(intel_plane->id);
- return intel_plane_atomic_calc_changes(&crtc_state->base, state);
+ return intel_plane_atomic_calc_changes(old_crtc_state,
+ &crtc_state->base,
+ old_plane_state,
+ state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_plane_state *new_plane_state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_crtc_state *drm_crtc_state;
-
- crtc = crtc ? crtc : plane->state->crtc;
+ struct drm_atomic_state *state = new_plane_state->state;
+ const struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
+ const struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
/*
* Both crtc and plane->crtc could be NULL if we're updating a
@@ -214,29 +221,33 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
- if (WARN_ON(!drm_crtc_state))
- return -EINVAL;
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
- return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state),
- to_intel_plane_state(state));
+ return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
+ to_intel_crtc_state(new_crtc_state),
+ to_intel_plane_state(old_plane_state),
+ to_intel_plane_state(new_plane_state));
}
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_plane_state *intel_state =
- to_intel_plane_state(plane->state);
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ const struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, intel_plane);
+ struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
+
+ if (new_plane_state->base.visible) {
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
- if (intel_state->base.visible) {
trace_intel_update_plane(plane,
to_intel_crtc(crtc));
intel_plane->update_plane(intel_plane,
- to_intel_crtc_state(crtc->state),
- intel_state);
+ new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(plane,
to_intel_crtc(crtc));
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 27743be5b768..0ddba16fde1b 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -754,7 +754,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (WARN_ON(pipe >= I915_MAX_PIPES))
+ if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
return NULL;
/* MST */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 5d4cd3d00564..fd23023df7c1 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -356,7 +356,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915.vbt_sdvo_panel_type;
+ index = i915_modparams.vbt_sdvo_panel_type;
if (index == -2) {
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
@@ -431,70 +431,31 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->vbt.fdi_rx_polarity_inverted);
}
-static void
-parse_general_definitions(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+static const struct child_device_config *
+child_device_ptr(const struct bdb_general_definitions *defs, int i)
{
- const struct bdb_general_definitions *general;
-
- general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- if (general) {
- u16 block_size = get_blocksize(general);
- if (block_size >= sizeof(*general)) {
- int bus_pin = general->crt_ddc_gmbus_pin;
- DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
- dev_priv->vbt.crt_ddc_pin = bus_pin;
- } else {
- DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
- block_size);
- }
- }
-}
-
-static const union child_device_config *
-child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
-{
- return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
+ return (const void *) &defs->devices[i * defs->child_dev_size];
}
static void
-parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- struct sdvo_device_mapping *p_mapping;
- const struct bdb_general_definitions *p_defs;
- const struct old_child_dev_config *child; /* legacy */
- int i, child_device_num, count;
- u16 block_size;
-
- p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- if (!p_defs) {
- DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
- return;
- }
+ struct sdvo_device_mapping *mapping;
+ const struct child_device_config *child;
+ int i, count = 0;
/*
- * Only parse SDVO mappings when the general definitions block child
- * device size matches that of the *legacy* child device config
- * struct. Thus, SDVO mapping will be skipped for newer VBT.
+ * Only parse SDVO mappings on gens that could have SDVO. This isn't
+ * accurate and doesn't have to be, as long as it's not too strict.
*/
- if (p_defs->child_dev_size != sizeof(*child)) {
- DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
+ if (!IS_GEN(dev_priv, 3, 7)) {
+ DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
return;
}
- /* get the block size of general definitions */
- block_size = get_blocksize(p_defs);
- /* get the number of child device */
- child_device_num = (block_size - sizeof(*p_defs)) /
- p_defs->child_dev_size;
- count = 0;
- for (i = 0; i < child_device_num; i++) {
- child = &child_device_ptr(p_defs, i)->old;
- if (!child->device_type) {
- /* skip the device block if device type is invalid */
- continue;
- }
+
+ for (i = 0, count = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ child = dev_priv->vbt.child_dev + i;
+
if (child->slave_addr != SLAVE_ADDR1 &&
child->slave_addr != SLAVE_ADDR2) {
/*
@@ -514,20 +475,20 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
- if (!p_mapping->initialized) {
- p_mapping->dvo_port = child->dvo_port;
- p_mapping->slave_addr = child->slave_addr;
- p_mapping->dvo_wiring = child->dvo_wiring;
- p_mapping->ddc_pin = child->ddc_pin;
- p_mapping->i2c_pin = child->i2c_pin;
- p_mapping->initialized = 1;
+ mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
+ if (!mapping->initialized) {
+ mapping->dvo_port = child->dvo_port;
+ mapping->slave_addr = child->slave_addr;
+ mapping->dvo_wiring = child->dvo_wiring;
+ mapping->ddc_pin = child->ddc_pin;
+ mapping->i2c_pin = child->i2c_pin;
+ mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
- p_mapping->dvo_port,
- p_mapping->slave_addr,
- p_mapping->dvo_wiring,
- p_mapping->ddc_pin,
- p_mapping->i2c_pin);
+ mapping->dvo_port,
+ mapping->slave_addr,
+ mapping->dvo_wiring,
+ mapping->ddc_pin,
+ mapping->i2c_pin);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@@ -545,7 +506,6 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
/* No SDVO device info is found */
DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
}
- return;
}
static void
@@ -577,7 +537,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
- const struct edp_link_params *edp_link_params;
+ const struct edp_fast_link_params *edp_link_params;
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
@@ -601,7 +561,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* Get the eDP sequencing and link info */
edp_pps = &edp->power_seqs[panel_type];
- edp_link_params = &edp->link_params[panel_type];
+ edp_link_params = &edp->fast_link_params[panel_type];
dev_priv->vbt.edp.pps = *edp_pps;
@@ -676,8 +636,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
uint8_t vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915.edp_vswing) {
- dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1;
+ if (i915_modparams.edp_vswing) {
+ dev_priv->vbt.edp.low_vswing =
+ i915_modparams.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
dev_priv->vbt.edp.low_vswing = vswing == 0;
@@ -730,6 +691,48 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
}
+static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
+ u16 version, enum port port)
+{
+ if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
+ dev_priv->vbt.dsi.bl_ports = BIT(port);
+ if (dev_priv->vbt.dsi.config->cabc_supported)
+ dev_priv->vbt.dsi.cabc_ports = BIT(port);
+
+ return;
+ }
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+ case DL_DCS_PORT_A:
+ dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+
+ if (!dev_priv->vbt.dsi.config->cabc_supported)
+ return;
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+ case DL_DCS_PORT_A:
+ dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ dev_priv->vbt.dsi.cabc_ports =
+ BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+}
+
static void
parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
@@ -738,9 +741,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
const struct mipi_config *config;
const struct mipi_pps_data *pps;
int panel_type = dev_priv->vbt.panel_type;
+ enum port port;
/* parse MIPI blocks only if LFP type is MIPI */
- if (!intel_bios_is_dsi_present(dev_priv, NULL))
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
/* Initialize this to undefined indicating no generic MIPI support */
@@ -781,15 +785,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return;
}
- /*
- * These fields are introduced from the VBT version 197 onwards,
- * so making sure that these bits are set zero in the previous
- * versions.
- */
- if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
- dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
- dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
- }
+ parse_dsi_backlight_ports(dev_priv, bdb->version, port);
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
@@ -1110,10 +1106,26 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
}
}
+static const u8 cnp_ddc_pin_map[] = {
+ [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+ [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
+{
+ if (HAS_PCH_CNP(dev_priv) &&
+ vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
+ return cnp_ddc_pin_map[vbt_pin];
+
+ return vbt_pin;
+}
+
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
- const struct bdb_header *bdb)
+ u8 bdb_version)
{
- union child_device_config *it, *child = NULL;
+ struct child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
uint8_t hdmi_level_shift;
int i, j;
@@ -1141,7 +1153,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (dvo_ports[port][j] == -1)
break;
- if (it->common.dvo_port == dvo_ports[port][j]) {
+ if (it->dvo_port == dvo_ports[port][j]) {
if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@@ -1154,14 +1166,21 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->common.aux_channel;
- ddc_pin = child->common.ddc_pin;
+ aux_channel = child->aux_channel;
+ ddc_pin = child->ddc_pin;
+
+ is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+ is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+ is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
- is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
- is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
- is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
- is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
if (port == PORT_A && is_dvi) {
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
@@ -1195,16 +1214,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
- info->alternate_ddc_pin = ddc_pin;
-
- /*
- * All VBTs that we got so far for B Stepping has this
- * information wrong for Port D. So, let's just ignore for now.
- */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
- port == PORT_D) {
- info->alternate_ddc_pin = 0;
- }
+ info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
sanitize_ddc_pin(dev_priv, port);
}
@@ -1215,9 +1225,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
sanitize_aux_ch(dev_priv, port);
}
- if (bdb->version >= 158) {
+ if (bdb_version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
- hdmi_level_shift = child->raw[7] & 0xF;
+ hdmi_level_shift = child->hdmi_level_shifter_value;
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
port_name(port),
hdmi_level_shift);
@@ -1225,18 +1235,17 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
/* Parse the I_boost config for SKL and above */
- if (bdb->version >= 196 && child->common.iboost) {
- info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+ if (bdb_version >= 196 && child->iboost) {
+ info->dp_boost_level = translate_iboost(child->dp_iboost_level);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
- info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+ info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
port_name(port), info->hdmi_boost_level);
}
}
-static void parse_ddi_ports(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
enum port port;
@@ -1246,79 +1255,86 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
if (!dev_priv->vbt.child_dev_num)
return;
- if (bdb->version < 155)
+ if (bdb_version < 155)
return;
for (port = PORT_A; port < I915_MAX_PORTS; port++)
- parse_ddi_port(dev_priv, port, bdb);
+ parse_ddi_port(dev_priv, port, bdb_version);
}
static void
-parse_device_mapping(struct drm_i915_private *dev_priv,
- const struct bdb_header *bdb)
+parse_general_definitions(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
- const struct bdb_general_definitions *p_defs;
- const union child_device_config *p_child;
- union child_device_config *child_dev_ptr;
+ const struct bdb_general_definitions *defs;
+ const struct child_device_config *child;
int i, child_device_num, count;
u8 expected_size;
u16 block_size;
+ int bus_pin;
- p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
- if (!p_defs) {
+ defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
+
+ block_size = get_blocksize(defs);
+ if (block_size < sizeof(*defs)) {
+ DRM_DEBUG_KMS("General definitions block too small (%u)\n",
+ block_size);
+ return;
+ }
+
+ bus_pin = defs->crt_ddc_gmbus_pin;
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
+ dev_priv->vbt.crt_ddc_pin = bus_pin;
+
if (bdb->version < 106) {
expected_size = 22;
} else if (bdb->version < 111) {
expected_size = 27;
} else if (bdb->version < 195) {
- BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
- expected_size = sizeof(struct old_child_dev_config);
+ expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
} else if (bdb->version == 195) {
expected_size = 37;
} else if (bdb->version <= 197) {
expected_size = 38;
} else {
expected_size = 38;
- BUILD_BUG_ON(sizeof(*p_child) < 38);
+ BUILD_BUG_ON(sizeof(*child) < 38);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
- if (p_defs->child_dev_size != expected_size)
+ if (defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
- p_defs->child_dev_size, expected_size, bdb->version);
+ defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
- if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+ if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
DRM_DEBUG_KMS("Child device config size %u is too small.\n",
- p_defs->child_dev_size);
+ defs->child_dev_size);
return;
}
- /* get the block size of general definitions */
- block_size = get_blocksize(p_defs);
/* get the number of child device */
- child_device_num = (block_size - sizeof(*p_defs)) /
- p_defs->child_dev_size;
+ child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
count = 0;
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
- p_child = child_device_ptr(p_defs, i);
- if (!p_child->common.device_type) {
- /* skip the device block if device type is invalid */
+ child = child_device_ptr(defs, i);
+ if (!child->device_type)
continue;
- }
count++;
}
if (!count) {
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
- dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
if (!dev_priv->vbt.child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
@@ -1327,37 +1343,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
- p_child = child_device_ptr(p_defs, i);
- if (!p_child->common.device_type) {
- /* skip the device block if device type is invalid */
+ child = child_device_ptr(defs, i);
+ if (!child->device_type)
continue;
- }
-
- child_dev_ptr = dev_priv->vbt.child_dev + count;
- count++;
/*
* Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device. Accessing the data must
* depend on VBT version.
*/
- memcpy(child_dev_ptr, p_child,
- min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
-
- /*
- * copied full block, now init values when they are not
- * available in current version
- */
- if (bdb->version < 196) {
- /* Set default values for bits added from v196 */
- child_dev_ptr->common.iboost = 0;
- child_dev_ptr->common.hpd_invert = 0;
- }
-
- if (bdb->version < 192)
- child_dev_ptr->common.lspcon = 0;
+ memcpy(dev_priv->vbt.child_dev + count, child,
+ min_t(size_t, defs->child_dev_size, sizeof(*child)));
+ count++;
}
- return;
}
/* Common defaults which may be overridden by VBT. */
@@ -1538,14 +1536,15 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
parse_lfp_panel_data(dev_priv, bdb);
parse_lfp_backlight(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
- parse_sdvo_device_mapping(dev_priv, bdb);
- parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
parse_edp(dev_priv, bdb);
parse_psr(dev_priv, bdb);
parse_mipi_config(dev_priv, bdb);
parse_mipi_sequence(dev_priv, bdb);
- parse_ddi_ports(dev_priv, bdb);
+
+ /* Further processing on pre-parsed data */
+ parse_sdvo_device_mapping(dev_priv, bdb->version);
+ parse_ddi_ports(dev_priv, bdb->version);
out:
if (!vbt) {
@@ -1566,7 +1565,7 @@ out:
*/
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
int i;
if (!dev_priv->vbt.int_tv_support)
@@ -1576,11 +1575,11 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
/*
* If the device type is not TV, continue.
*/
- switch (p_child->old.device_type) {
+ switch (child->device_type) {
case DEVICE_TYPE_INT_TV:
case DEVICE_TYPE_TV:
case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
@@ -1591,7 +1590,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
- if (p_child->old.addin_offset)
+ if (child->addin_offset)
return true;
}
@@ -1608,14 +1607,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
{
+ const struct child_device_config *child;
int i;
if (!dev_priv->vbt.child_dev_num)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- union child_device_config *uchild = dev_priv->vbt.child_dev + i;
- struct old_child_dev_config *child = &uchild->old;
+ child = dev_priv->vbt.child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -1657,6 +1656,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
} port_mapping[] = {
@@ -1675,12 +1675,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- const union child_device_config *p_child =
- &dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
- DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ child = dev_priv->vbt.child_dev + i;
+
+ if ((child->dvo_port == port_mapping[port].dp ||
+ child->dvo_port == port_mapping[port].hdmi) &&
+ (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
return true;
}
@@ -1696,7 +1696,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
*/
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
[PORT_C] = DVO_PORT_DPC,
@@ -1712,10 +1712,10 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
- if (p_child->common.dvo_port == port_mapping[port] &&
- (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+ if (child->dvo_port == port_mapping[port] &&
+ (child->device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
}
@@ -1723,7 +1723,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
enum port port)
{
static const struct {
@@ -1742,16 +1742,16 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
- if (p_child->common.dvo_port == port_mapping[port].dp)
+ if (child->dvo_port == port_mapping[port].dp)
return true;
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
- if (p_child->common.dvo_port == port_mapping[port].hdmi &&
- p_child->common.aux_channel != 0)
+ if (child->dvo_port == port_mapping[port].hdmi &&
+ child->aux_channel != 0)
return true;
return false;
@@ -1760,13 +1760,13 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- const union child_device_config *p_child =
- &dev_priv->vbt.child_dev[i];
+ child = dev_priv->vbt.child_dev + i;
- if (child_dev_is_dp_dual_mode(p_child, port))
+ if (child_dev_is_dp_dual_mode(child, port))
return true;
}
@@ -1783,17 +1783,17 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
enum port *port)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
u8 dvo_port;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
- if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- dvo_port = p_child->common.dvo_port;
+ dvo_port = child->dvo_port;
switch (dvo_port) {
case DVO_PORT_MIPIA:
@@ -1823,16 +1823,19 @@ bool
intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
+ child = dev_priv->vbt.child_dev + i;
+
+ if (!child->hpd_invert)
continue;
- switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ switch (child->dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
@@ -1867,16 +1870,19 @@ bool
intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
if (!HAS_LSPCON(dev_priv))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- if (!dev_priv->vbt.child_dev[i].common.lspcon)
+ child = dev_priv->vbt.child_dev + i;
+
+ if (!child->lspcon)
continue;
- switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ switch (child->dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 4e00e5cb9fa1..48e1ba01ccf8 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -64,7 +64,7 @@ static unsigned long wait_timeout(void)
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
- DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s, current seqno=%x, last=%x\n",
+ DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n",
engine->name, __builtin_return_address(0),
yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
&engine->irq_posted)),
@@ -74,9 +74,10 @@ static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
-static void intel_breadcrumbs_hangcheck(unsigned long data)
+static void intel_breadcrumbs_hangcheck(struct timer_list *t)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs *engine = from_timer(engine, t,
+ breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!b->irq_armed)
@@ -108,9 +109,10 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
}
}
-static void intel_breadcrumbs_fake_irq(unsigned long data)
+static void intel_breadcrumbs_fake_irq(struct timer_list *t)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct intel_engine_cs *engine = from_timer(engine, t,
+ breadcrumbs.fake_irq);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The timer persists in case we cannot enable interrupts,
@@ -787,12 +789,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
spin_lock_init(&b->rb_lock);
spin_lock_init(&b->irq_lock);
- setup_timer(&b->fake_irq,
- intel_breadcrumbs_fake_irq,
- (unsigned long)engine);
- setup_timer(&b->hangcheck,
- intel_breadcrumbs_hangcheck,
- (unsigned long)engine);
+ timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
+ timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
/* Spawn a thread to provide a common bottom-half for all signals.
* As this is an asynchronous interface we cannot steal the current
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 1241e5891b29..b2a6d62b71c0 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -417,24 +417,21 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = 540000;
}
-static int vlv_calc_cdclk(struct drm_i915_private *dev_priv,
- int max_pixclk)
+static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
333333 : 320000;
- int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
/*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
- if (!IS_CHERRYVIEW(dev_priv) &&
- max_pixclk > freq_320*limit/100)
+ if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
return 400000;
- else if (max_pixclk > 266667*limit/100)
+ else if (min_cdclk > 266667)
return freq_320;
- else if (max_pixclk > 0)
+ else if (min_cdclk > 0)
return 266667;
else
return 200000;
@@ -506,7 +503,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
else
cmd = 0;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
@@ -516,7 +513,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
mutex_lock(&dev_priv->sb_lock);
@@ -593,7 +590,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
*/
cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
@@ -603,7 +600,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
@@ -612,13 +609,13 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
-static int bdw_calc_cdclk(int max_pixclk)
+static int bdw_calc_cdclk(int min_cdclk)
{
- if (max_pixclk > 540000)
+ if (min_cdclk > 540000)
return 675000;
- else if (max_pixclk > 450000)
+ else if (min_cdclk > 450000)
return 540000;
- else if (max_pixclk > 337500)
+ else if (min_cdclk > 337500)
return 450000;
else
return 337500;
@@ -659,10 +656,10 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("failed to inform pcode about cdclk change\n");
return;
@@ -672,8 +669,12 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
+ /*
+ * According to the spec, it should be enough to poll for this 1 us.
+ * However, extensive testing shows that this can take longer.
+ */
if (wait_for_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ LCPLL_CD_SOURCE_FCLK_DONE, 100))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
@@ -711,9 +712,9 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -724,23 +725,23 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
cdclk, dev_priv->cdclk.hw.cdclk);
}
-static int skl_calc_cdclk(int max_pixclk, int vco)
+static int skl_calc_cdclk(int min_cdclk, int vco)
{
if (vco == 8640000) {
- if (max_pixclk > 540000)
+ if (min_cdclk > 540000)
return 617143;
- else if (max_pixclk > 432000)
+ else if (min_cdclk > 432000)
return 540000;
- else if (max_pixclk > 308571)
+ else if (min_cdclk > 308571)
return 432000;
else
return 308571;
} else {
- if (max_pixclk > 540000)
+ if (min_cdclk > 540000)
return 675000;
- else if (max_pixclk > 450000)
+ else if (min_cdclk > 450000)
return 540000;
- else if (max_pixclk > 337500)
+ else if (min_cdclk > 337500)
return 450000;
else
return 337500;
@@ -927,12 +928,12 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
WARN_ON((cdclk == 24000) != (vco == 0));
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -974,9 +975,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
}
@@ -1075,31 +1076,25 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_state);
}
-static int bxt_calc_cdclk(int max_pixclk)
+static int bxt_calc_cdclk(int min_cdclk)
{
- if (max_pixclk > 576000)
+ if (min_cdclk > 576000)
return 624000;
- else if (max_pixclk > 384000)
+ else if (min_cdclk > 384000)
return 576000;
- else if (max_pixclk > 288000)
+ else if (min_cdclk > 288000)
return 384000;
- else if (max_pixclk > 144000)
+ else if (min_cdclk > 144000)
return 288000;
else
return 144000;
}
-static int glk_calc_cdclk(int max_pixclk)
+static int glk_calc_cdclk(int min_cdclk)
{
- /*
- * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
- * as a temporary workaround. Use a higher cdclk instead. (Note that
- * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
- * cdclk.)
- */
- if (max_pixclk > DIV_ROUND_UP(2 * 158400 * 99, 100))
+ if (min_cdclk > 158400)
return 316800;
- else if (max_pixclk > DIV_ROUND_UP(2 * 79200 * 99, 100))
+ else if (min_cdclk > 79200)
return 158400;
else
return 79200;
@@ -1273,10 +1268,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
}
/* Inform power controller of upcoming frequency change */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
@@ -1305,10 +1300,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
I915_WRITE(CDCLK_CTL, val);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
DIV_ROUND_UP(cdclk, 25000));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
@@ -1420,11 +1415,11 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
bxt_set_cdclk(dev_priv, &cdclk_state);
}
-static int cnl_calc_cdclk(int max_pixclk)
+static int cnl_calc_cdclk(int min_cdclk)
{
- if (max_pixclk > 336000)
+ if (min_cdclk > 336000)
return 528000;
- else if (max_pixclk > 168000)
+ else if (min_cdclk > 168000)
return 336000;
else
return 168000;
@@ -1523,12 +1518,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider, pcu_ack;
int ret;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
ret);
@@ -1580,9 +1575,9 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, val);
/* inform PCU of the change */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
intel_update_cdclk(dev_priv);
}
@@ -1732,104 +1727,119 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
}
-static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
- int pixel_rate)
+static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
+ int pixel_rate)
+{
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * FIXME: Switch to DIV_ROUND_UP(pixel_rate, 2)
+ * once DDI clock voltage requirements are
+ * handled correctly.
+ */
+ return pixel_rate;
+ else if (IS_GEMINILAKE(dev_priv))
+ /*
+ * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
+ * as a temporary workaround. Use a higher cdclk instead. (Note that
+ * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
+ * cdclk.)
+ */
+ return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
+ else if (IS_GEN9(dev_priv) ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return pixel_rate;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return DIV_ROUND_UP(pixel_rate * 100, 95);
+ else
+ return DIV_ROUND_UP(pixel_rate * 100, 90);
+}
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
+ int min_cdclk;
+
+ if (!crtc_state->base.enable)
+ return 0;
+
+ min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+ min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
* audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
* there may be audio corruption or screen corruption." This cdclk
- * restriction for GLK is 316.8 MHz and since GLK can output two
- * pixels per clock, the pixel rate becomes 2 * 316.8 MHz.
+ * restriction for GLK is 316.8 MHz.
*/
if (intel_crtc_has_dp_encoder(crtc_state) &&
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
crtc_state->lane_count == 4) {
- if (IS_CANNONLAKE(dev_priv))
- pixel_rate = max(316800, pixel_rate);
- else if (IS_GEMINILAKE(dev_priv))
- pixel_rate = max(2 * 316800, pixel_rate);
- else
- pixel_rate = max(432000, pixel_rate);
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
+ /* Display WA #1145: glk,cnl */
+ min_cdclk = max(316800, min_cdclk);
+ } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
+ /* Display WA #1144: skl,bxt */
+ min_cdclk = max(432000, min_cdclk);
+ }
}
/* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- * The check for GLK has to be adjusted as the platform can output
- * two pixels per clock.
*/
- if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) {
- if (IS_GEMINILAKE(dev_priv))
- pixel_rate = max(2 * 2 * 96000, pixel_rate);
- else
- pixel_rate = max(2 * 96000, pixel_rate);
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+ min_cdclk = max(2 * 96000, min_cdclk);
+
+ if (min_cdclk > dev_priv->max_cdclk_freq) {
+ DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
+ min_cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
}
- return pixel_rate;
+ return min_cdclk;
}
-/* compute the max rate for new configuration */
-static int intel_max_pixel_rate(struct drm_atomic_state *state)
+static int intel_compute_min_cdclk(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
+ struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- unsigned int max_pixel_rate = 0, i;
+ int min_cdclk, i;
enum pipe pipe;
- memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
- sizeof(intel_state->min_pixclk));
-
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- int pixel_rate;
-
- crtc_state = to_intel_crtc_state(cstate);
- if (!crtc_state->base.enable) {
- intel_state->min_pixclk[i] = 0;
- continue;
- }
-
- pixel_rate = crtc_state->pixel_rate;
+ memcpy(intel_state->min_cdclk, dev_priv->min_cdclk,
+ sizeof(intel_state->min_cdclk));
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
- pixel_rate =
- bdw_adjust_min_pipe_pixel_rate(crtc_state,
- pixel_rate);
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (min_cdclk < 0)
+ return min_cdclk;
- intel_state->min_pixclk[i] = pixel_rate;
+ intel_state->min_cdclk[i] = min_cdclk;
}
+ min_cdclk = 0;
for_each_pipe(dev_priv, pipe)
- max_pixel_rate = max(intel_state->min_pixclk[pipe],
- max_pixel_rate);
+ min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
- return max_pixel_rate;
+ return min_cdclk;
}
static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- int max_pixclk = intel_max_pixel_rate(state);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- int cdclk;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk;
- cdclk = vlv_calc_cdclk(dev_priv, max_pixclk);
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1847,22 +1857,18 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- int max_pixclk = intel_max_pixel_rate(state);
- int cdclk;
+ int min_cdclk, cdclk;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
/*
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
- cdclk = bdw_calc_cdclk(max_pixclk);
-
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = bdw_calc_cdclk(min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1880,10 +1886,13 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(state->dev);
- const int max_pixclk = intel_max_pixel_rate(state);
- int cdclk, vco;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
vco = intel_state->cdclk.logical.vco;
if (!vco)
@@ -1893,13 +1902,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
- cdclk = skl_calc_cdclk(max_pixclk, vco);
-
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = skl_calc_cdclk(min_cdclk, vco);
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1920,25 +1923,21 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- int max_pixclk = intel_max_pixel_rate(state);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- int cdclk, vco;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk, vco;
+
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(max_pixclk);
+ cdclk = glk_calc_cdclk(min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
- cdclk = bxt_calc_cdclk(max_pixclk);
+ cdclk = bxt_calc_cdclk(min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
-
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1964,19 +1963,15 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- int max_pixclk = intel_max_pixel_rate(state);
- int cdclk, vco;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int min_cdclk, cdclk, vco;
- cdclk = cnl_calc_cdclk(max_pixclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ min_cdclk = intel_compute_min_cdclk(state);
+ if (min_cdclk < 0)
+ return min_cdclk;
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
+ cdclk = cnl_calc_cdclk(min_cdclk);
+ vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@@ -1999,14 +1994,21 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * FIXME: Allow '2 * max_cdclk_freq'
+ * once DDI clock voltage requirements are
+ * handled correctly.
+ */
+ return max_cdclk_freq;
+ else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Limiting to 99% as a temporary workaround. See
- * glk_calc_cdclk() for details.
+ * intel_min_cdclk() for details.
*/
return 2 * max_cdclk_freq * 99 / 100;
- else if (INTEL_INFO(dev_priv)->gen >= 9 ||
- IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ else if (IS_GEN9(dev_priv) ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 70e0ff41070c..437339f5d098 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -143,7 +143,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
+ const struct intel_crtc_state *crtc_state,
int mode)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -194,28 +194,41 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
}
static void intel_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
static void pch_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
}
static void pch_post_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_crt(encoder, old_crtc_state, old_conn_state);
}
+static void hsw_disable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_crtc *crtc = old_crtc_state->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
static void hsw_post_disable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -225,11 +238,63 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
lpt_disable_iclkip(dev_priv);
intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
+
+ WARN_ON(!old_crtc_state->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+}
+
+static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
+}
+
+static void hsw_pre_enable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
+}
+
+static void hsw_enable_crt(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ WARN_ON(!intel_crtc->config->has_pch_encoder);
+
+ intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
+
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_wait_for_vblank(dev_priv, pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
static void intel_enable_crt(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
}
@@ -279,10 +344,25 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ return true;
+}
+
+static bool pch_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ pipe_config->has_pch_encoder = true;
+
+ return true;
+}
+
+static bool hsw_crt_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (HAS_PCH_SPLIT(dev_priv))
- pipe_config->has_pch_encoder = true;
+ pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
@@ -295,8 +375,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev_priv))
- pipe_config->port_clock = 135000 * 2;
+ pipe_config->port_clock = 135000 * 2;
return true;
}
@@ -712,7 +791,7 @@ intel_crt_detect(struct drm_connector *connector,
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
- if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
+ if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) {
status = connector_status_disconnected;
goto out;
}
@@ -730,7 +809,7 @@ intel_crt_detect(struct drm_connector *connector,
else if (INTEL_GEN(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (i915.load_detect_test)
+ else if (i915_modparams.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
@@ -890,26 +969,33 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
- crt->base.compute_config = intel_crt_compute_config;
- if (HAS_PCH_SPLIT(dev_priv)) {
- crt->base.disable = pch_disable_crt;
- crt->base.post_disable = pch_post_disable_crt;
- } else {
- crt->base.disable = intel_disable_crt;
- }
- crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev_priv) &&
!dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT;
+
if (HAS_DDI(dev_priv)) {
crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.compute_config = hsw_crt_compute_config;
+ crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
+ crt->base.pre_enable = hsw_pre_enable_crt;
+ crt->base.enable = hsw_enable_crt;
+ crt->base.disable = hsw_disable_crt;
crt->base.post_disable = hsw_post_disable_crt;
} else {
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ crt->base.compute_config = pch_crt_compute_config;
+ crt->base.disable = pch_disable_crt;
+ crt->base.post_disable = pch_post_disable_crt;
+ } else {
+ crt->base.compute_config = intel_crt_compute_config;
+ crt->base.disable = intel_disable_crt;
+ }
crt->base.port = PORT_NONE;
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
+ crt->base.enable = intel_enable_crt;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 92c1f8e166dc..da9de47562b8 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -52,10 +52,6 @@ MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
-
-
-
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -252,8 +248,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
}
fw_size = dev_priv->csr.dmc_fw_size;
+ assert_rpm_wakelock_held(dev_priv);
+
+ preempt_disable();
+
for (i = 0; i < fw_size; i++)
- I915_WRITE(CSR_PROGRAM(i), payload[i]);
+ I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
+
+ preempt_enable();
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
I915_WRITE(dev_priv->csr.mmioaddr[i],
@@ -285,7 +287,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) {
- DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
+ DRM_ERROR("DMC firmware has wrong CSS header length "
+ "(%u bytes)\n",
(css_header->header_len * 4));
return NULL;
}
@@ -309,7 +312,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
if (csr->version != required_version) {
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
- " please use v%u.%u [" FIRMWARE_URL "].\n",
+ " please use v%u.%u\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(required_version),
@@ -324,7 +327,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
&fw->data[readcount];
if (sizeof(struct intel_package_header) !=
(package_header->header_len * 4)) {
- DRM_ERROR("Firmware has wrong package header length %u bytes\n",
+ DRM_ERROR("DMC firmware has wrong package header length "
+ "(%u bytes)\n",
(package_header->header_len * 4));
return NULL;
}
@@ -345,7 +349,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
dmc_offset = package_header->fw_info[i].offset;
}
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("Firmware not supported for %c stepping\n",
+ DRM_ERROR("DMC firmware not supported for %c stepping\n",
si->stepping);
return NULL;
}
@@ -354,7 +358,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
- DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
+ DRM_ERROR("DMC firmware has wrong dmc header length "
+ "(%u bytes)\n",
(dmc_header->header_len));
return NULL;
}
@@ -362,7 +367,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Cache the dmc header info. */
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
- DRM_ERROR("Firmware has wrong mmio count %u\n",
+ DRM_ERROR("DMC firmware has wrong mmio count %u\n",
dmc_header->mmio_count);
return NULL;
}
@@ -370,7 +375,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
for (i = 0; i < dmc_header->mmio_count; i++) {
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
- DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
+ DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]);
return NULL;
}
@@ -381,7 +386,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
if (nbytes > CSR_MAX_FW_SIZE) {
- DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
+ DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes);
return NULL;
}
csr->dmc_fw_size = dmc_header->fw_size;
@@ -419,9 +424,11 @@ static void csr_load_work_fn(struct work_struct *work)
CSR_VERSION_MINOR(csr->version));
} else {
dev_notice(dev_priv->drm.dev,
- "Failed to load DMC firmware"
- " [" FIRMWARE_URL "],"
- " disabling runtime power management.\n");
+ "Failed to load DMC firmware %s."
+ " Disabling runtime power management.\n",
+ csr->fw_path);
+ dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+ INTEL_UC_FIRMWARE_URL);
}
release_firmware(fw);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5e5fe03b638c..933c18fd4258 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,39 +301,38 @@ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
};
struct bxt_ddi_buf_trans {
- u32 margin; /* swing value */
- u32 scale; /* scale value */
- u32 enable; /* scale enable */
- u32 deemphasis;
- bool default_index; /* true if the entry represents default value */
+ u8 margin; /* swing value */
+ u8 scale; /* scale value */
+ u8 enable; /* scale enable */
+ u8 deemphasis;
};
static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
/* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */
- { 78, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
- { 104, 0x9A, 0, 64, false }, /* 2: 400 6 */
- { 154, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
- { 116, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
- { 154, 0x9A, 0, 64, false }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
- { 154, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 104, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 154, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
};
static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
/* Idx NT mV diff db */
- { 26, 0, 0, 128, false }, /* 0: 200 0 */
- { 38, 0, 0, 112, false }, /* 1: 200 1.5 */
- { 48, 0, 0, 96, false }, /* 2: 200 4 */
- { 54, 0, 0, 69, false }, /* 3: 200 6 */
- { 32, 0, 0, 128, false }, /* 4: 250 0 */
- { 48, 0, 0, 104, false }, /* 5: 250 1.5 */
- { 54, 0, 0, 85, false }, /* 6: 250 4 */
- { 43, 0, 0, 128, false }, /* 7: 300 0 */
- { 54, 0, 0, 101, false }, /* 8: 300 1.5 */
- { 48, 0, 0, 128, false }, /* 9: 300 0 */
+ { 26, 0, 0, 128, }, /* 0: 200 0 */
+ { 38, 0, 0, 112, }, /* 1: 200 1.5 */
+ { 48, 0, 0, 96, }, /* 2: 200 4 */
+ { 54, 0, 0, 69, }, /* 3: 200 6 */
+ { 32, 0, 0, 128, }, /* 4: 250 0 */
+ { 48, 0, 0, 104, }, /* 5: 250 1.5 */
+ { 54, 0, 0, 85, }, /* 6: 250 4 */
+ { 43, 0, 0, 128, }, /* 7: 300 0 */
+ { 54, 0, 0, 101, }, /* 8: 300 1.5 */
+ { 48, 0, 0, 128, }, /* 9: 300 0 */
};
/* BSpec has 2 recommended values - entries 0 and 8.
@@ -341,24 +340,24 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
*/
static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
/* Idx NT mV diff db */
- { 52, 0x9A, 0, 128, false }, /* 0: 400 0 */
- { 52, 0x9A, 0, 85, false }, /* 1: 400 3.5 */
- { 52, 0x9A, 0, 64, false }, /* 2: 400 6 */
- { 42, 0x9A, 0, 43, false }, /* 3: 400 9.5 */
- { 77, 0x9A, 0, 128, false }, /* 4: 600 0 */
- { 77, 0x9A, 0, 85, false }, /* 5: 600 3.5 */
- { 77, 0x9A, 0, 64, false }, /* 6: 600 6 */
- { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */
- { 102, 0x9A, 0, 85, false }, /* 8: 800 3.5 */
- { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
+ { 52, 0x9A, 0, 128, }, /* 0: 400 0 */
+ { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */
+ { 52, 0x9A, 0, 64, }, /* 2: 400 6 */
+ { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */
+ { 77, 0x9A, 0, 128, }, /* 4: 600 0 */
+ { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */
+ { 77, 0x9A, 0, 64, }, /* 6: 600 6 */
+ { 102, 0x9A, 0, 128, }, /* 7: 800 0 */
+ { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */
};
struct cnl_ddi_buf_trans {
- u32 dw2_swing_sel;
- u32 dw7_n_scalar;
- u32 dw4_cursor_coeff;
- u32 dw4_post_cursor_2;
- u32 dw4_post_cursor_1;
+ u8 dw2_swing_sel;
+ u8 dw7_n_scalar;
+ u8 dw4_cursor_coeff;
+ u8 dw4_post_cursor_2;
+ u8 dw4_post_cursor_1;
};
/* Voltage Swing Programming for VccIO 0.85V for DP */
@@ -588,48 +587,29 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
}
}
-static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+static int skl_buf_trans_num_entries(enum port port, int n_entries)
{
- int n_hdmi_entries;
- int hdmi_level;
- int hdmi_default_entry;
-
- hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
-
- if (IS_GEN9_LP(dev_priv))
- return hdmi_level;
-
- if (IS_GEN9_BC(dev_priv)) {
- skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
- hdmi_default_entry = 8;
- } else if (IS_BROADWELL(dev_priv)) {
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
- } else if (IS_HASWELL(dev_priv)) {
- n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- hdmi_default_entry = 6;
- } else {
- WARN(1, "ddi translation table missing\n");
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- hdmi_default_entry = 7;
- }
-
- /* Choose a good default if VBT is badly populated */
- if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
- hdmi_level >= n_hdmi_entries)
- hdmi_level = hdmi_default_entry;
-
- return hdmi_level;
+ /* Only DDIA and DDIE can select the 10th register with DP */
+ if (port == PORT_A || port == PORT_E)
+ return min(n_entries, 10);
+ else
+ return min(n_entries, 9);
}
static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
- int *n_entries)
+ enum port port, int *n_entries)
{
if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ const struct ddi_buf_trans *ddi_translations =
+ kbl_get_buf_trans_dp(dev_priv, n_entries);
+ *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ return ddi_translations;
} else if (IS_SKYLAKE(dev_priv)) {
- return skl_get_buf_trans_dp(dev_priv, n_entries);
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_dp(dev_priv, n_entries);
+ *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
return bdw_ddi_translations_dp;
@@ -644,10 +624,13 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
static const struct ddi_buf_trans *
intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
- int *n_entries)
+ enum port port, int *n_entries)
{
if (IS_GEN9_BC(dev_priv)) {
- return skl_get_buf_trans_edp(dev_priv, n_entries);
+ const struct ddi_buf_trans *ddi_translations =
+ skl_get_buf_trans_edp(dev_priv, n_entries);
+ *n_entries = skl_buf_trans_num_entries(port, *n_entries);
+ return ddi_translations;
} else if (IS_BROADWELL(dev_priv)) {
return bdw_get_buf_trans_edp(dev_priv, n_entries);
} else if (IS_HASWELL(dev_priv)) {
@@ -675,6 +658,154 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
return NULL;
}
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
+ int *n_entries)
+{
+ if (IS_GEN9_BC(dev_priv)) {
+ return skl_get_buf_trans_hdmi(dev_priv, n_entries);
+ } else if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
+ return bdw_ddi_translations_hdmi;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
+ return hsw_ddi_translations_hdmi;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
+ return bxt_ddi_translations_dp;
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ if (dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
+ return bxt_ddi_translations_edp;
+ }
+
+ return bxt_get_buf_trans_dp(dev_priv, n_entries);
+}
+
+static const struct bxt_ddi_buf_trans *
+bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
+ return bxt_ddi_translations_hdmi;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
+ return cnl_ddi_translations_hdmi_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
+ return cnl_ddi_translations_hdmi_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
+ return cnl_ddi_translations_hdmi_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
+ return cnl_ddi_translations_dp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
+ return cnl_ddi_translations_dp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
+ return cnl_ddi_translations_dp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+}
+
+static const struct cnl_ddi_buf_trans *
+cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
+{
+ u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+
+ if (dev_priv->vbt.edp.low_vswing) {
+ if (voltage == VOLTAGE_INFO_0_85V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
+ return cnl_ddi_translations_edp_0_85V;
+ } else if (voltage == VOLTAGE_INFO_0_95V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
+ return cnl_ddi_translations_edp_0_95V;
+ } else if (voltage == VOLTAGE_INFO_1_05V) {
+ *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
+ return cnl_ddi_translations_edp_1_05V;
+ } else {
+ *n_entries = 1; /* shut up gcc */
+ MISSING_CASE(voltage);
+ }
+ return NULL;
+ } else {
+ return cnl_get_buf_trans_dp(dev_priv, n_entries);
+ }
+}
+
+static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+{
+ int n_entries, level, default_entry;
+
+ level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+ if (IS_CANNONLAKE(dev_priv)) {
+ cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = n_entries - 1;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = n_entries - 1;
+ } else if (IS_GEN9_BC(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = 8;
+ } else if (IS_BROADWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = 7;
+ } else if (IS_HASWELL(dev_priv)) {
+ intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ default_entry = 6;
+ } else {
+ WARN(1, "ddi translation table missing\n");
+ return 0;
+ }
+
+ /* Choose a good default if VBT is badly populated */
+ if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries)
+ level = default_entry;
+
+ if (WARN_ON_ONCE(n_entries == 0))
+ return 0;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
+
+ return level;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. This function programs the correct values for
@@ -688,16 +819,13 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations;
- if (IS_GEN9_LP(dev_priv))
- return;
-
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
- ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv,
+ ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port,
&n_entries);
break;
case INTEL_OUTPUT_DP:
- ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv,
+ ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port,
&n_entries);
break;
case INTEL_OUTPUT_ANALOG:
@@ -709,16 +837,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
return;
}
- if (IS_GEN9_BC(dev_priv)) {
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
- if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
- port != PORT_A && port != PORT_E &&
- n_entries > 9))
- n_entries = 9;
- }
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (IS_GEN9_BC(dev_priv) &&
+ dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
@@ -733,42 +855,32 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
* values in advance. This function programs the correct values for
* HDMI/DVI use cases.
*/
-static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
+ int level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
- int n_hdmi_entries, hdmi_level;
+ int n_entries;
enum port port = intel_ddi_get_encoder_port(encoder);
- const struct ddi_buf_trans *ddi_translations_hdmi;
-
- if (IS_GEN9_LP(dev_priv))
- return;
+ const struct ddi_buf_trans *ddi_translations;
- hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
- if (IS_GEN9_BC(dev_priv)) {
- ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
+ if (WARN_ON_ONCE(!ddi_translations))
+ return;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
- } else if (IS_BROADWELL(dev_priv)) {
- ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- } else if (IS_HASWELL(dev_priv)) {
- ddi_translations_hdmi = hsw_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
- } else {
- WARN(1, "ddi translation table missing\n");
- ddi_translations_hdmi = bdw_ddi_translations_hdmi;
- n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
- }
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (IS_GEN9_BC(dev_priv) &&
+ dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
- ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
+ ddi_translations[level].trans1 | iboost_bit);
I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
- ddi_translations_hdmi[hdmi_level].trans2);
+ ddi_translations[level].trans2);
}
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -785,7 +897,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
-static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
{
switch (pll->id) {
case DPLL_ID_WRPLL1:
@@ -1044,14 +1156,14 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- uint32_t dpll)
+ enum intel_dpll_id pll_id)
{
i915_reg_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq;
- cfgcr1_reg = DPLL_CFGCR1(dpll);
- cfgcr2_reg = DPLL_CFGCR2(dpll);
+ cfgcr1_reg = DPLL_CFGCR1(pll_id);
+ cfgcr2_reg = DPLL_CFGCR2(pll_id);
cfgcr1_val = I915_READ(cfgcr1_reg);
cfgcr2_val = I915_READ(cfgcr2_reg);
@@ -1104,7 +1216,7 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- uint32_t pll_id)
+ enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -1154,7 +1266,10 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000;
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
return dco_freq / (p0 * p1 * p2 * 5);
}
@@ -1188,7 +1303,8 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t cfgcr0, pll_id;
+ uint32_t cfgcr0;
+ enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1241,17 +1357,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t dpll_ctl1, dpll;
+ uint32_t dpll_ctl1;
+ enum intel_dpll_id pll_id;
- dpll = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
dpll_ctl1 = I915_READ(DPLL_CTRL1);
- if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
- link_clock = skl_calc_wrpll_link(dev_priv, dpll);
+ if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
+ link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
} else {
- link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
+ link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
+ link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
switch (link_clock) {
case DPLL_CTRL1_LINK_RATE_810:
@@ -1332,17 +1449,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
}
static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id dpll)
+ enum intel_dpll_id pll_id)
{
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
struct dpll clock;
/* For DDI ports we always use a shared PLL. */
- if (WARN_ON(dpll == DPLL_ID_PRIVATE))
+ if (WARN_ON(pll_id == DPLL_ID_PRIVATE))
return 0;
- pll = &dev_priv->shared_dplls[dpll];
+ pll = &dev_priv->shared_dplls[pll_id];
state = &pll->state.hw_state;
clock.m1 = 2;
@@ -1361,9 +1478,9 @@ static void bxt_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
- uint32_t dpll = port;
+ enum intel_dpll_id pll_id = port;
- pipe_config->port_clock = bxt_calc_pll_link(dev_priv, dpll);
+ pipe_config->port_clock = bxt_calc_pll_link(dev_priv, pll_id);
ddi_dotclock_get(pipe_config);
}
@@ -1715,54 +1832,36 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
}
-static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
+static void skl_ddi_set_iboost(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->port;
- int type = encoder->type;
- const struct ddi_buf_trans *ddi_translations;
uint8_t iboost;
- uint8_t dp_iboost, hdmi_iboost;
- int n_entries;
- /* VBT may override standard boost values */
- dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
- hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+ if (type == INTEL_OUTPUT_HDMI)
+ iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+ else
+ iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+
+ if (iboost == 0) {
+ const struct ddi_buf_trans *ddi_translations;
+ int n_entries;
- if (type == INTEL_OUTPUT_DP) {
- if (dp_iboost) {
- iboost = dp_iboost;
- } else {
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- ddi_translations = kbl_get_buf_trans_dp(dev_priv,
- &n_entries);
- else
- ddi_translations = skl_get_buf_trans_dp(dev_priv,
- &n_entries);
- iboost = ddi_translations[level].i_boost;
- }
- } else if (type == INTEL_OUTPUT_EDP) {
- if (dp_iboost) {
- iboost = dp_iboost;
- } else {
- ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
+ if (type == INTEL_OUTPUT_HDMI)
+ ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
+ else if (type == INTEL_OUTPUT_EDP)
+ ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ else
+ ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
- if (WARN_ON(port != PORT_A &&
- port != PORT_E && n_entries > 9))
- n_entries = 9;
+ if (WARN_ON_ONCE(!ddi_translations))
+ return;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
- iboost = ddi_translations[level].i_boost;
- }
- } else if (type == INTEL_OUTPUT_HDMI) {
- if (hdmi_iboost) {
- iboost = hdmi_iboost;
- } else {
- ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
- iboost = ddi_translations[level].i_boost;
- }
- } else {
- return;
+ iboost = ddi_translations[level].i_boost;
}
/* Make sure that the requested I_boost is valid */
@@ -1777,38 +1876,25 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
-static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
+static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
const struct bxt_ddi_buf_trans *ddi_translations;
- u32 n_entries, i;
-
- if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
- n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
- ddi_translations = bxt_ddi_translations_edp;
- } else if (type == INTEL_OUTPUT_DP
- || type == INTEL_OUTPUT_EDP) {
- n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
- ddi_translations = bxt_ddi_translations_dp;
- } else if (type == INTEL_OUTPUT_HDMI) {
- n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
- ddi_translations = bxt_ddi_translations_hdmi;
- } else {
- DRM_DEBUG_KMS("Vswing programming not done for encoder %d\n",
- type);
- return;
- }
+ enum port port = encoder->port;
+ int n_entries;
- /* Check if default value has to be used */
- if (level >= n_entries ||
- (type == INTEL_OUTPUT_HDMI && level == HDMI_LEVEL_SHIFT_UNKNOWN)) {
- for (i = 0; i < n_entries; i++) {
- if (ddi_translations[i].default_index) {
- level = i;
- break;
- }
- }
- }
+ if (type == INTEL_OUTPUT_HDMI)
+ ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries);
+ else if (type == INTEL_OUTPUT_EDP)
+ ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
+
+ if (WARN_ON_ONCE(!ddi_translations))
+ return;
+ if (WARN_ON_ONCE(level >= n_entries))
+ level = n_entries - 1;
bxt_ddi_phy_set_signal_level(dev_priv, port,
ddi_translations[level].margin,
@@ -1820,12 +1906,25 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
int n_entries;
- if (encoder->type == INTEL_OUTPUT_EDP)
- intel_ddi_get_buf_trans_edp(dev_priv, &n_entries);
- else
- intel_ddi_get_buf_trans_dp(dev_priv, &n_entries);
+ if (IS_CANNONLAKE(dev_priv)) {
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ cnl_get_buf_trans_dp(dev_priv, &n_entries);
+ } else if (IS_GEN9_LP(dev_priv)) {
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ bxt_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ bxt_get_buf_trans_dp(dev_priv, &n_entries);
+ } else {
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries);
+ else
+ intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
+ }
if (WARN_ON(n_entries < 1))
n_entries = 1;
@@ -1836,95 +1935,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
- u32 voltage, int *n_entries)
-{
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
- return cnl_ddi_translations_hdmi_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
- return cnl_ddi_translations_hdmi_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
- return cnl_ddi_translations_hdmi_1_05V;
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv,
- u32 voltage, int *n_entries)
+static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
- return cnl_ddi_translations_dp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
- return cnl_ddi_translations_dp_1_05V;
- }
- return NULL;
-}
-
-static const struct cnl_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
- u32 voltage, int *n_entries)
-{
- if (dev_priv->vbt.edp.low_vswing) {
- if (voltage == VOLTAGE_INFO_0_85V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_edp_0_85V;
- } else if (voltage == VOLTAGE_INFO_0_95V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
- return cnl_ddi_translations_edp_0_95V;
- } else if (voltage == VOLTAGE_INFO_1_05V) {
- *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
- return cnl_ddi_translations_edp_1_05V;
- }
- return NULL;
- } else {
- return cnl_get_buf_trans_dp(dev_priv, voltage, n_entries);
- }
-}
-
-static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
-{
- const struct cnl_ddi_buf_trans *ddi_translations = NULL;
- u32 n_entries, val, voltage;
- int ln;
-
- /*
- * Values for each port type are listed in
- * voltage swing programming tables.
- * Vccio voltage found in PORT_COMP_DW3.
- */
- voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ const struct cnl_ddi_buf_trans *ddi_translations;
+ int n_entries, ln;
+ u32 val;
- if (type == INTEL_OUTPUT_HDMI) {
- ddi_translations = cnl_get_buf_trans_hdmi(dev_priv,
- voltage, &n_entries);
- } else if (type == INTEL_OUTPUT_DP) {
- ddi_translations = cnl_get_buf_trans_dp(dev_priv,
- voltage, &n_entries);
- } else if (type == INTEL_OUTPUT_EDP) {
- ddi_translations = cnl_get_buf_trans_edp(dev_priv,
- voltage, &n_entries);
- }
+ if (type == INTEL_OUTPUT_HDMI)
+ ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
+ else if (type == INTEL_OUTPUT_EDP)
+ ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
- if (ddi_translations == NULL) {
- MISSING_CASE(voltage);
+ if (WARN_ON_ONCE(!ddi_translations))
return;
- }
-
- if (level >= n_entries) {
- DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
+ if (WARN_ON_ONCE(level >= n_entries))
level = n_entries - 1;
- }
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
@@ -1942,7 +1972,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
val |= RCOMP_SCALAR(0x98);
I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
- /* Program PORT_TX_DW4 */
+ /* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
@@ -1954,7 +1984,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
}
- /* Program PORT_TX_DW5 */
+ /* Program PORT_TX_DW5 */
/* All DW5 values are fixed for every table entry */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
val &= ~RTERM_SELECT_MASK;
@@ -1962,33 +1992,29 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
val |= TAP3_DISABLE;
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
- /* Program PORT_TX_DW7 */
+ /* Program PORT_TX_DW7 */
val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
}
-static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
+static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = intel_ddi_get_encoder_port(encoder);
- int type = encoder->type;
- int width = 0;
- int rate = 0;
+ int width, rate, ln;
u32 val;
- int ln = 0;
- if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
- width = intel_dp->lane_count;
- rate = intel_dp->link_rate;
- } else if (type == INTEL_OUTPUT_HDMI) {
+ if (type == INTEL_OUTPUT_HDMI) {
width = 4;
- /* Rate is always < than 6GHz for HDMI */
+ rate = 0; /* Rate is always < than 6GHz for HDMI */
} else {
- MISSING_CASE(type);
- return;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ width = intel_dp->lane_count;
+ rate = intel_dp->link_rate;
}
/*
@@ -1997,7 +2023,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
* else clear to 0b.
*/
val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
- if (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)
+ if (type != INTEL_OUTPUT_HDMI)
val |= COMMON_KEEPER_EN;
else
val &= ~COMMON_KEEPER_EN;
@@ -2032,7 +2058,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
- cnl_ddi_vswing_program(dev_priv, level, port, type);
+ cnl_ddi_vswing_program(encoder, level, type);
/* 6. Set training enable to trigger update */
val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
@@ -2055,33 +2081,45 @@ static uint32_t translate_signal_level(int signal_levels)
return 0;
}
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
- struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
- enum port port = dport->port;
- uint32_t level;
- level = translate_signal_level(signal_levels);
+ return translate_signal_level(signal_levels);
+}
+
+u32 bxt_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ struct intel_encoder *encoder = &dport->base;
+ int level = intel_ddi_dp_level(intel_dp);
+
+ if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, level, encoder->type);
+ else
+ bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+
+ return 0;
+}
+
+uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ struct intel_encoder *encoder = &dport->base;
+ int level = intel_ddi_dp_level(intel_dp);
if (IS_GEN9_BC(dev_priv))
- skl_ddi_set_iboost(encoder, level);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
- else if (IS_CANNONLAKE(dev_priv)) {
- cnl_ddi_vswing_sequence(encoder, level);
- /* DDI_BUF_CTL bits 27:24 are reserved on CNL */
- return 0;
- }
+ skl_ddi_set_iboost(encoder, level, encoder->type);
+
return DDI_BUF_TRANS_SELECT(level);
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll)
+ const struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
@@ -2120,108 +2158,113 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
}
}
+static void intel_ddi_clk_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ if (IS_CANNONLAKE(dev_priv))
+ I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
+ DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+ else if (IS_GEN9_BC(dev_priv))
+ I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
+ DPLL_CTRL2_DDI_CLK_OFF(port));
+ else if (INTEL_GEN(dev_priv) < 9)
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
- int link_rate, uint32_t lane_count,
- struct intel_shared_dpll *pll,
- bool link_mst)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int level = intel_ddi_dp_level(intel_dp);
+
+ WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
- WARN_ON(link_mst && (port == PORT_A || port == PORT_E));
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+ crtc_state->lane_count, is_mst);
- intel_dp_set_link_params(intel_dp, link_rate, lane_count,
- link_mst);
- if (encoder->type == INTEL_OUTPUT_EDP)
- intel_edp_panel_on(intel_dp);
+ intel_edp_panel_on(intel_dp);
- intel_ddi_clk_select(encoder, pll);
+ intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- intel_prepare_dp_ddi_buffers(encoder);
+ if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, level, encoder->type);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_vswing_sequence(encoder, level, encoder->type);
+ else
+ intel_prepare_dp_ddi_buffers(encoder);
+
intel_ddi_init_dp_buf_reg(encoder);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
- bool has_hdmi_sink,
const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state,
- struct intel_shared_dpll *pll)
+ const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct drm_encoder *drm_encoder = &encoder->base;
enum port port = intel_ddi_get_encoder_port(encoder);
int level = intel_ddi_hdmi_level(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- intel_ddi_clk_select(encoder, pll);
+ intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- intel_prepare_hdmi_ddi_buffers(encoder);
- if (IS_GEN9_BC(dev_priv))
- skl_ddi_set_iboost(encoder, level);
+ if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_vswing_sequence(dev_priv, level, port,
- INTEL_OUTPUT_HDMI);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_vswing_sequence(encoder, level);
+ bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI);
+ else
+ intel_prepare_hdmi_ddi_buffers(encoder, level);
+
+ if (IS_GEN9_BC(dev_priv))
+ skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
- intel_hdmi->set_infoframes(drm_encoder,
- has_hdmi_sink,
- crtc_state, conn_state);
+ intel_dig_port->set_infoframes(&encoder->base,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- int type = encoder->type;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
- intel_ddi_pre_enable_dp(encoder,
- pipe_config->port_clock,
- pipe_config->lane_count,
- pipe_config->shared_dpll,
- intel_crtc_has_type(pipe_config,
- INTEL_OUTPUT_DP_MST));
- }
- if (type == INTEL_OUTPUT_HDMI) {
- intel_ddi_pre_enable_hdmi(encoder,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state,
- pipe_config->shared_dpll);
- }
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
+ else
+ intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
}
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct intel_dp *intel_dp = NULL;
- int type = intel_encoder->type;
- uint32_t val;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
bool wait = false;
-
- /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
-
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
- intel_dp = enc_to_intel_dp(encoder);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- }
+ u32 val;
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
@@ -2237,34 +2280,80 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
+}
- if (intel_dp) {
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_off(intel_dp);
- }
+static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_dp *intel_dp = &dig_port->dp;
+ /*
+ * old_crtc_state and old_conn_state are NULL when called from
+ * DP_MST. The main connector associated with this port is never
+ * bound to a crtc for MST.
+ */
+ bool is_mst = !old_crtc_state;
+
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- if (dig_port)
- intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+ intel_disable_ddi_buf(encoder);
- if (IS_CANNONLAKE(dev_priv))
- I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
- DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- else if (IS_GEN9_BC(dev_priv))
- I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
- DPLL_CTRL2_DDI_CLK_OFF(port)));
- else if (INTEL_GEN(dev_priv) < 9)
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_off(intel_dp);
- if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
- intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
- }
+ intel_ddi_clk_disable(encoder);
+}
+
+static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+
+ intel_disable_ddi_buf(encoder);
+
+ dig_port->set_infoframes(&encoder->base, false,
+ old_crtc_state, old_conn_state);
+
+ intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+
+ intel_ddi_clk_disable(encoder);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ /*
+ * old_crtc_state and old_conn_state are NULL when called from
+ * DP_MST. The main connector associated with this port is never
+ * bound to a crtc for MST.
+ */
+ if (old_crtc_state &&
+ intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_post_disable_hdmi(encoder,
+ old_crtc_state, old_conn_state);
+ else
+ intel_ddi_post_disable_dp(encoder,
+ old_crtc_state, old_conn_state);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t val;
@@ -2279,7 +2368,8 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_ddi_post_disable(encoder, old_crtc_state, old_conn_state);
+ intel_disable_ddi_buf(encoder);
+ intel_ddi_clk_disable(encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -2295,75 +2385,98 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
}
-static void intel_enable_ddi(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static void intel_enable_ddi_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ enum port port = intel_ddi_get_encoder_port(encoder);
- if (type == INTEL_OUTPUT_HDMI) {
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio;
- bool scrambling = pipe_config->hdmi_scrambling;
-
- intel_hdmi_handle_sink_scrambling(intel_encoder,
- conn_state->connector,
- clock_ratio, scrambling);
-
- /* In HDMI/DVI mode, the port width, and swing/emphasis values
- * are ignored so nothing special needs to be done besides
- * enabling the port.
- */
- I915_WRITE(DDI_BUF_CTL(port),
- intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE);
- } else if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
+ intel_dp_stop_link_train(intel_dp);
- if (port == PORT_A && INTEL_GEN(dev_priv) < 9)
- intel_dp_stop_link_train(intel_dp);
+ intel_edp_backlight_on(crtc_state, conn_state);
+ intel_psr_enable(intel_dp, crtc_state);
+ intel_edp_drrs_enable(intel_dp, crtc_state);
- intel_edp_backlight_on(pipe_config, conn_state);
- intel_psr_enable(intel_dp);
- intel_edp_drrs_enable(intel_dp, pipe_config);
- }
+ if (crtc_state->has_audio)
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
- if (pipe_config->has_audio)
- intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
+static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ intel_hdmi_handle_sink_scrambling(encoder,
+ conn_state->connector,
+ crtc_state->hdmi_high_tmds_clock_ratio,
+ crtc_state->hdmi_scrambling);
+
+ /* In HDMI/DVI mode, the port width, and swing/emphasis values
+ * are ignored so nothing special needs to be done besides
+ * enabling the port.
+ */
+ I915_WRITE(DDI_BUF_CTL(port),
+ dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+
+ if (crtc_state->has_audio)
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
-static void intel_disable_ddi(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+static void intel_enable_ddi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- int type = intel_encoder->type;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
+ else
+ intel_enable_ddi_dp(encoder, crtc_state, conn_state);
+}
+
+static void intel_disable_ddi_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
if (old_crtc_state->has_audio)
- intel_audio_codec_disable(intel_encoder);
+ intel_audio_codec_disable(encoder);
- if (type == INTEL_OUTPUT_HDMI) {
- intel_hdmi_handle_sink_scrambling(intel_encoder,
- old_conn_state->connector,
- false, false);
- }
+ intel_edp_drrs_disable(intel_dp, old_crtc_state);
+ intel_psr_disable(intel_dp, old_crtc_state);
+ intel_edp_backlight_off(old_conn_state);
+}
- if (type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (old_crtc_state->has_audio)
+ intel_audio_codec_disable(encoder);
- intel_edp_drrs_disable(intel_dp, old_crtc_state);
- intel_psr_disable(intel_dp);
- intel_edp_backlight_off(old_conn_state);
- }
+ intel_hdmi_handle_sink_scrambling(encoder,
+ old_conn_state->connector,
+ false, false);
+}
+
+static void intel_disable_ddi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+ intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state);
+ else
+ intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
uint8_t mask = pipe_config->lane_lat_optim_mask;
@@ -2435,7 +2548,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_hdmi *intel_hdmi;
+ struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
@@ -2474,9 +2587,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
- intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -2729,6 +2842,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
+ intel_infoframe_init(intel_dig_port);
+
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 5f91ddc78c7a..875d428ea75f 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -82,6 +82,39 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv)
#undef PRINT_FLAG
}
+static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ const u32 fuse2 = I915_READ(GEN8_FUSE2);
+
+ sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
+ GEN10_F2_S_ENA_SHIFT;
+ sseu->subslice_mask = (1 << 4) - 1;
+ sseu->subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
+ sseu->eu_total = hweight32(~I915_READ(GEN8_EU_DISABLE0));
+ sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE1));
+ sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE2));
+ sseu->eu_total += hweight8(~(I915_READ(GEN10_EU_DISABLE3) &
+ GEN10_EU_DIS_SS_MASK));
+
+ /*
+ * CNL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery.
+ */
+ sseu->eu_per_subslice = sseu_subslice_total(sseu) ?
+ DIV_ROUND_UP(sseu->eu_total,
+ sseu_subslice_total(sseu)) : 0;
+
+ /* No restrictions on Power Gating */
+ sseu->has_slice_pg = 1;
+ sseu->has_subslice_pg = 1;
+ sseu->has_eu_pg = 1;
+}
+
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -343,7 +376,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->num_sprites[pipe] = 1;
}
- if (i915.disable_display) {
+ if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (info->num_pipes > 0 &&
@@ -409,10 +442,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
- else if (INTEL_INFO(dev_priv)->gen >= 9)
+ else if (INTEL_GEN(dev_priv) == 9)
gen9_sseu_info_init(dev_priv);
-
- info->has_snoop = !info->has_llc;
+ else if (INTEL_GEN(dev_priv) >= 10)
+ gen10_sseu_info_init(dev_priv);
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5ebdb63330dd..878acc432a4b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1539,7 +1539,7 @@ static void chv_enable_pll(struct intel_crtc *crtc,
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
+ I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
I915_WRITE(CBR4_VLV, 0);
dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
@@ -1568,11 +1568,12 @@ static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
return count;
}
-static void i9xx_enable_pll(struct intel_crtc *crtc)
+static void i9xx_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = DPLL(crtc->pipe);
- u32 dpll = crtc->config->dpll_hw_state.dpll;
+ u32 dpll = crtc_state->dpll_hw_state.dpll;
int i;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -1609,7 +1610,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DPLL_MD(crtc->pipe),
- crtc->config->dpll_hw_state.dpll_md);
+ crtc_state->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
@@ -1627,15 +1628,6 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
}
}
-/**
- * i9xx_disable_pll - disable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to disable
- *
- * Disable the PLL for @pipe, making sure the pipe is off first.
- *
- * Note! This is for pre-ILK only.
- */
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2219,8 +2211,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
* something and try to run the system in a "less than optimal"
* mode that matches the user configuration.
*/
- if (i915_vma_get_fence(vma) == 0)
- i915_vma_pin_fence(vma);
+ i915_vma_pin_fence(vma);
}
i915_vma_get(vma);
@@ -2856,7 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = c->primary->fb;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
goto valid_fb;
}
}
@@ -2887,7 +2878,7 @@ valid_fb:
intel_crtc->pipe, PTR_ERR(intel_state->vma));
intel_state->vma = NULL;
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
return;
}
@@ -2908,7 +2899,7 @@ valid_fb:
if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base;
@@ -3298,7 +3289,6 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane plane = primary->plane;
u32 linear_offset;
@@ -3307,16 +3297,14 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
int x = plane_state->main.x;
int y = plane_state->main.y;
unsigned long irqflags;
+ u32 dspaddr_offset;
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- crtc->dspaddr_offset = plane_state->main.offset;
+ dspaddr_offset = plane_state->main.offset;
else
- crtc->dspaddr_offset = linear_offset;
-
- crtc->adjusted_x = x;
- crtc->adjusted_y = y;
+ dspaddr_offset = linear_offset;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -3342,18 +3330,18 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
- crtc->dspaddr_offset);
+ dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
- crtc->dspaddr_offset);
+ dspaddr_offset);
I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
} else {
I915_WRITE_FW(DSPADDR(plane),
intel_plane_ggtt_offset(plane_state) +
- crtc->dspaddr_offset);
+ dspaddr_offset);
}
POSTING_READ_FW(reg);
@@ -3553,100 +3541,6 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl;
}
-static void skylake_update_primary_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl;
- unsigned int rotation = plane_state->base.rotation;
- u32 stride = skl_plane_stride(fb, 0, rotation);
- u32 aux_stride = skl_plane_stride(fb, 1, rotation);
- u32 surf_addr = plane_state->main.offset;
- int scaler_id = plane_state->scaler_id;
- int src_x = plane_state->main.x;
- int src_y = plane_state->main.y;
- int src_w = drm_rect_width(&plane_state->base.src) >> 16;
- int src_h = drm_rect_height(&plane_state->base.src) >> 16;
- int dst_x = plane_state->base.dst.x1;
- int dst_y = plane_state->base.dst.y1;
- int dst_w = drm_rect_width(&plane_state->base.dst);
- int dst_h = drm_rect_height(&plane_state->base.dst);
- unsigned long irqflags;
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
- dst_w--;
- dst_h--;
-
- crtc->dspaddr_offset = surf_addr;
-
- crtc->adjusted_x = src_x;
- crtc->adjusted_y = src_y;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- PLANE_COLOR_PIPE_GAMMA_ENABLE |
- PLANE_COLOR_PIPE_CSC_ENABLE |
- PLANE_COLOR_PLANE_GAMMA_DISABLE);
- }
-
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
- I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
- I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
- (plane_state->aux.offset - surf_addr) | aux_stride);
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->aux.y << 16) | plane_state->aux.x);
-
- if (scaler_id >= 0) {
- uint32_t ps_ctrl = 0;
-
- WARN_ON(!dst_w || !dst_h);
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
- crtc_state->scaler_state.scalers[scaler_id].mode;
- I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
- I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
- I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
- } else {
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
- }
-
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
-
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
-static void skylake_disable_primary_plane(struct intel_plane *primary,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
- enum plane_id plane_id = primary->id;
- enum pipe pipe = primary->pipe;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -3701,7 +3595,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
/* reset doesn't touch the display */
- if (!i915.force_reset_modeset_test &&
+ if (!i915_modparams.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
@@ -3757,7 +3651,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
- if (!i915.force_reset_modeset_test &&
+ if (!i915_modparams.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
@@ -3770,8 +3664,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
if (!gpu_reset_clobbers_display(dev_priv)) {
/* for testing only restore the display */
ret = __intel_display_resume(dev, state, ctx);
- if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
@@ -3782,6 +3676,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev);
+ intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
@@ -3804,15 +3699,14 @@ unlock:
clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
}
-static void intel_update_pipe_config(struct intel_crtc *crtc,
- struct intel_crtc_state *old_crtc_state)
+static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->base.state);
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
- crtc->base.mode = crtc->base.state->mode;
+ crtc->base.mode = new_crtc_state->base.mode;
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@@ -3824,17 +3718,17 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
*/
I915_WRITE(PIPESRC(crtc->pipe),
- ((pipe_config->pipe_src_w - 1) << 16) |
- (pipe_config->pipe_src_h - 1));
+ ((new_crtc_state->pipe_src_w - 1) << 16) |
+ (new_crtc_state->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
- if (pipe_config->pch_pfit.enabled)
+ if (new_crtc_state->pch_pfit.enabled)
skylake_pfit_enable(crtc);
} else if (HAS_PCH_SPLIT(dev_priv)) {
- if (pipe_config->pch_pfit.enabled)
+ if (new_crtc_state->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
ironlake_pfit_disable(crtc, true);
@@ -4956,9 +4850,10 @@ void hsw_enable_ips(struct intel_crtc *crtc)
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
+ WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
+ mutex_unlock(&dev_priv->pcu_lock);
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
@@ -4988,9 +4883,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
/* wait for pcode to finish disabling IPS, which may take up to 42ms */
if (intel_wait_for_register(dev_priv,
IPS_CTL, IPS_ENABLE, 0,
@@ -5118,7 +5013,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->base.state);
+ intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
+ crtc);
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
@@ -5130,7 +5026,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_pri_state) {
struct intel_plane_state *primary_state =
- to_intel_plane_state(primary->state);
+ intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
+ to_intel_plane(primary));
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
@@ -5159,7 +5056,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (old_pri_state) {
struct intel_plane_state *primary_state =
- to_intel_plane_state(primary->state);
+ intel_atomic_get_new_plane_state(old_intel_state,
+ to_intel_plane(primary));
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
@@ -5456,6 +5354,20 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
}
+static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool apply)
+{
+ u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+ u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
+
+ if (apply)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+}
+
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5466,13 +5378,11 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
+ bool psl_clkgate_wa;
if (WARN_ON(intel_crtc->active))
return;
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->shared_dpll)
@@ -5506,19 +5416,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (intel_crtc->config->has_pch_encoder)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- else
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->has_pch_encoder)
- dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
-
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(pipe_config);
+ /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
+ psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
+ intel_crtc->config->pch_pfit.enabled;
+ if (psl_clkgate_wa)
+ glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
else
@@ -5552,11 +5460,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->has_pch_encoder) {
- intel_wait_for_vblank(dev_priv, pipe);
+ if (psl_clkgate_wa) {
intel_wait_for_vblank(dev_priv, pipe);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
+ glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
}
/* If we change the relative order between pipe/planes enabling, we need
@@ -5652,9 +5558,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
-
intel_encoders_disable(crtc, old_crtc_state, old_state);
drm_crtc_vblank_off(crtc);
@@ -5679,9 +5582,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_ddi_disable_pipe_clock(intel_crtc->config);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
-
- if (old_crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5891,7 +5791,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- i9xx_enable_pll(intel_crtc);
+ i9xx_enable_pll(intel_crtc, pipe_config);
i9xx_pfit_enable(intel_crtc);
@@ -6038,7 +5938,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_crtc->enabled_power_domains = 0;
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
- dev_priv->min_pixclk[intel_crtc->pipe] = 0;
+ dev_priv->min_cdclk[intel_crtc->pipe] = 0;
}
/*
@@ -6143,6 +6043,19 @@ struct intel_connector *intel_connector_alloc(void)
return connector;
}
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+ kfree(to_intel_digital_connector_state(connector->base.state));
+ kfree(connector);
+}
+
/* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector. */
@@ -6283,6 +6196,9 @@ retry:
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
+ if (pipe_config->ips_force_disable)
+ return false;
+
if (pipe_config->pipe_bpp > 24)
return false;
@@ -6307,7 +6223,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- pipe_config->ips_enabled = i915.enable_ips &&
+ pipe_config->ips_enabled = i915_modparams.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
pipe_config_supports_ips(dev_priv, pipe_config);
}
@@ -6488,8 +6404,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- if (i915.panel_use_ssc >= 0)
- return i915.panel_use_ssc != 0;
+ if (i915_modparams.panel_use_ssc >= 0)
+ return i915_modparams.panel_use_ssc != 0;
return dev_priv->vbt.lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
@@ -6523,11 +6439,9 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
- crtc->lowfreq_avail = false;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
- crtc->lowfreq_avail = true;
} else {
crtc_state->dpll_hw_state.fp1 = fp;
}
@@ -7222,15 +7136,6 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (HAS_PIPE_CXSR(dev_priv)) {
- if (intel_crtc->lowfreq_avail) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- } else {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- }
- }
-
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
@@ -8366,8 +8271,6 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- crtc->lowfreq_avail = false;
-
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
return 0;
@@ -8840,11 +8743,11 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
if (IS_HASWELL(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
DRM_DEBUG_KMS("Failed to write to D_COMP\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
} else {
I915_WRITE(D_COMP_BDW, val);
POSTING_READ(D_COMP_BDW);
@@ -9026,8 +8929,6 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
}
}
- crtc->lowfreq_avail = false;
-
return 0;
}
@@ -9039,7 +8940,7 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
u32 temp;
temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- id = temp >> (port * 2);
+ id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
return;
@@ -9304,11 +9205,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (IS_BROADWELL(dev_priv) || dev_priv->info.gen >= 9) {
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
- if (IS_GEMINILAKE(dev_priv) || dev_priv->info.gen >= 10) {
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
bool blend_mode_420 = tmp &
PIPEMISC_YUV420_MODE_FULL_BLEND;
@@ -9753,7 +9654,7 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
/* VESA 640x480x72Hz mode to set on the pipe */
-static struct drm_display_mode load_detect_mode = {
+static const struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
@@ -9788,7 +9689,7 @@ intel_framebuffer_pitch_for_width(int width, int bpp)
}
static u32
-intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
+intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
return PAGE_ALIGN(pitch * mode->vdisplay);
@@ -9796,7 +9697,7 @@ intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device *dev,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
int depth, int bpp)
{
struct drm_framebuffer *fb;
@@ -9823,7 +9724,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -9847,7 +9748,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
return fb;
#else
return NULL;
@@ -9856,7 +9757,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_framebuffer *fb,
int x, int y)
{
@@ -9890,7 +9791,7 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
}
int intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -10028,7 +9929,7 @@ found:
if (ret)
goto fail;
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
@@ -10218,7 +10119,7 @@ int intel_dotclock_calculate(int link_freq,
if (!m_n->link_n)
return 0;
- return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+ return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
}
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -10239,62 +10140,44 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
&pipe_config->fdi_m_n);
}
-/** Returns the currently programmed mode of the given pipe. */
-struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc)
+/* Returns the currently programmed mode of the given encoder. */
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc_state *crtc_state;
struct drm_display_mode *mode;
- struct intel_crtc_state *pipe_config;
- u32 htot, hsync, vtot, vsync;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
+
+ if (!encoder->get_hw_state(encoder, &pipe))
+ return NULL;
+
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
- pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
- if (!pipe_config) {
+ crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
+ if (!crtc_state) {
kfree(mode);
return NULL;
}
- /*
- * Construct a pipe_config sufficient for getting the clock info
- * back out of crtc_clock_get.
- *
- * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
- * to use a real value here instead.
- */
- pipe_config->cpu_transcoder = (enum transcoder) pipe;
- pipe_config->pixel_multiplier = 1;
- pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
- pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
- pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
- i9xx_crtc_clock_get(intel_crtc, pipe_config);
-
- mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
-
- cpu_transcoder = pipe_config->cpu_transcoder;
- htot = I915_READ(HTOTAL(cpu_transcoder));
- hsync = I915_READ(HSYNC(cpu_transcoder));
- vtot = I915_READ(VTOTAL(cpu_transcoder));
- vsync = I915_READ(VSYNC(cpu_transcoder));
-
- mode->hdisplay = (htot & 0xffff) + 1;
- mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
- mode->hsync_start = (hsync & 0xffff) + 1;
- mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
- mode->vdisplay = (vtot & 0xffff) + 1;
- mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
- mode->vsync_start = (vsync & 0xffff) + 1;
- mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+ crtc_state->base.crtc = &crtc->base;
- drm_mode_set_name(mode);
+ if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
+ kfree(crtc_state);
+ kfree(mode);
+ return NULL;
+ }
- kfree(pipe_config);
+ encoder->get_config(encoder, crtc_state);
+
+ intel_mode_from_pipe_config(mode, crtc_state);
+
+ kfree(crtc_state);
return mode;
}
@@ -10341,7 +10224,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
return false;
}
-static bool needs_scaling(struct intel_plane_state *state)
+static bool needs_scaling(const struct intel_plane_state *state)
{
int src_w = drm_rect_width(&state->base.src) >> 16;
int src_h = drm_rect_height(&state->base.src) >> 16;
@@ -10351,7 +10234,9 @@ static bool needs_scaling(struct intel_plane_state *state)
return (src_w != dst_w || src_h != dst_h);
}
-int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct drm_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct drm_plane_state *plane_state)
{
struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
@@ -10360,10 +10245,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->plane);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
bool mode_changed = needs_modeset(crtc_state);
- bool was_crtc_enabled = crtc->state->active;
+ bool was_crtc_enabled = old_crtc_state->base.active;
bool is_crtc_enabled = crtc_state->active;
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
@@ -10681,6 +10564,52 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
m_n->link_m, m_n->link_n, m_n->tu);
}
+#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
+
+static const char * const output_type_str[] = {
+ OUTPUT_TYPE(UNUSED),
+ OUTPUT_TYPE(ANALOG),
+ OUTPUT_TYPE(DVO),
+ OUTPUT_TYPE(SDVO),
+ OUTPUT_TYPE(LVDS),
+ OUTPUT_TYPE(TVOUT),
+ OUTPUT_TYPE(HDMI),
+ OUTPUT_TYPE(DP),
+ OUTPUT_TYPE(EDP),
+ OUTPUT_TYPE(DSI),
+ OUTPUT_TYPE(UNKNOWN),
+ OUTPUT_TYPE(DP_MST),
+};
+
+#undef OUTPUT_TYPE
+
+static void snprintf_output_types(char *buf, size_t len,
+ unsigned int output_types)
+{
+ char *str = buf;
+ int i;
+
+ str[0] = '\0';
+
+ for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
+ int r;
+
+ if ((output_types & BIT(i)) == 0)
+ continue;
+
+ r = snprintf(str, len, "%s%s",
+ str != buf ? "," : "", output_type_str[i]);
+ if (r >= len)
+ break;
+ str += r;
+ len -= r;
+
+ output_types &= ~BIT(i);
+ }
+
+ WARN_ON_ONCE(output_types != 0);
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
@@ -10691,10 +10620,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane *intel_plane;
struct intel_plane_state *state;
struct drm_framebuffer *fb;
+ char buf[64];
DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
crtc->base.base.id, crtc->base.name, context);
+ snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
+ DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
+ buf, pipe_config->output_types);
+
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
@@ -10854,7 +10788,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct intel_dpll_hw_state dpll_hw_state;
struct intel_shared_dpll *shared_dpll;
struct intel_crtc_wm_state wm_state;
- bool force_thru;
+ bool force_thru, ips_force_disable;
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
@@ -10865,6 +10799,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
force_thru = crtc_state->pch_pfit.force_thru;
+ ips_force_disable = crtc_state->ips_force_disable;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
wm_state = crtc_state->wm;
@@ -10878,6 +10813,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->pch_pfit.force_thru = force_thru;
+ crtc_state->ips_force_disable = ips_force_disable;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
crtc_state->wm = wm_state;
@@ -11332,6 +11268,18 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+ PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
+ PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
@@ -12080,7 +12028,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
- if (i915.fastboot &&
+ if (i915_modparams.fastboot &&
intel_pipe_config_compare(dev_priv,
to_intel_crtc_state(old_crtc_state),
pipe_config, true)) {
@@ -12133,73 +12081,10 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
return dev->driver->get_vblank_counter(dev, crtc->pipe);
}
-static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
- unsigned crtc_mask)
-{
- unsigned last_vblank_count[I915_MAX_PIPES];
- enum pipe pipe;
- int ret;
-
- if (!crtc_mask)
- return;
-
- for_each_pipe(dev_priv, pipe) {
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
-
- if (!((1 << pipe) & crtc_mask))
- continue;
-
- ret = drm_crtc_vblank_get(&crtc->base);
- if (WARN_ON(ret != 0)) {
- crtc_mask &= ~(1 << pipe);
- continue;
- }
-
- last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base);
- }
-
- for_each_pipe(dev_priv, pipe) {
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
- long lret;
-
- if (!((1 << pipe) & crtc_mask))
- continue;
-
- lret = wait_event_timeout(dev->vblank[pipe].queue,
- last_vblank_count[pipe] !=
- drm_crtc_vblank_count(&crtc->base),
- msecs_to_jiffies(50));
-
- WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
-
- drm_crtc_vblank_put(&crtc->base);
- }
-}
-
-static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
-{
- /* fb updated, need to unpin old fb */
- if (crtc_state->fb_changed)
- return true;
-
- /* wm changes, need vblank before final wm's */
- if (crtc_state->update_wm_post)
- return true;
-
- if (crtc_state->wm.need_postvbl_update)
- return true;
-
- return false;
-}
-
static void intel_update_crtc(struct drm_crtc *crtc,
struct drm_atomic_state *state,
struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state,
- unsigned int *crtc_vblank_mask)
+ struct drm_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -12222,13 +12107,9 @@ static void intel_update_crtc(struct drm_crtc *crtc,
}
drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
-
- if (needs_vblank_wait(pipe_config))
- *crtc_vblank_mask |= drm_crtc_mask(crtc);
}
-static void intel_update_crtcs(struct drm_atomic_state *state,
- unsigned int *crtc_vblank_mask)
+static void intel_update_crtcs(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -12239,12 +12120,11 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
continue;
intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state, crtc_vblank_mask);
+ new_crtc_state);
}
}
-static void skl_update_crtcs(struct drm_atomic_state *state,
- unsigned int *crtc_vblank_mask)
+static void skl_update_crtcs(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
@@ -12278,13 +12158,16 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
unsigned int cmask = drm_crtc_mask(crtc);
intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(crtc->state);
+ cstate = to_intel_crtc_state(new_crtc_state);
pipe = intel_crtc->pipe;
if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i))
+ if (skl_ddb_allocation_overlaps(dev_priv,
+ entries,
+ &cstate->wm.skl.ddb,
+ i))
continue;
updated |= cmask;
@@ -12303,7 +12186,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
vbl_wait = true;
intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state, crtc_vblank_mask);
+ new_crtc_state);
if (vbl_wait)
intel_wait_for_vblank(dev_priv, pipe);
@@ -12364,7 +12247,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
u64 put_domains[I915_MAX_PIPES] = {};
- unsigned crtc_vblank_mask = 0;
int i;
intel_atomic_commit_fence_wait(intel_state);
@@ -12405,7 +12287,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!crtc->state->active) {
+ if (!new_crtc_state->active) {
/*
* Make sure we don't call initial_watermarks
* for ILK-style watermark updates.
@@ -12414,7 +12296,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (INTEL_GEN(dev_priv) >= 9)
dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(crtc->state));
+ to_intel_crtc_state(new_crtc_state));
}
}
}
@@ -12453,7 +12335,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
+ dev_priv->display.update_crtcs(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
@@ -12464,8 +12346,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- if (!state->legacy_cursor_update)
- intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
+ drm_atomic_helper_wait_for_flip_done(dev, state);
/*
* Now that the vblank has passed, we can go ahead and program the
@@ -12581,21 +12462,10 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- ret = drm_atomic_helper_setup_commit(state, nonblock);
- if (ret)
- return ret;
-
drm_atomic_state_get(state);
i915_sw_fence_init(&intel_state->commit_ready,
intel_atomic_commit_ready);
- ret = intel_atomic_prepare_commit(dev, state);
- if (ret) {
- DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&intel_state->commit_ready);
- return ret;
- }
-
/*
* The intel_legacy_cursor_update() fast path takes care
* of avoiding the vblank waits for simple cursor
@@ -12604,19 +12474,37 @@ static int intel_atomic_commit(struct drm_device *dev,
* updates happen during the correct frames. Gen9+ have
* double buffered watermarks and so shouldn't need this.
*
- * Do this after drm_atomic_helper_setup_commit() and
- * intel_atomic_prepare_commit() because we still want
- * to skip the flip and fb cleanup waits. Although that
- * does risk yanking the mapping from under the display
- * engine.
+ * Unset state->legacy_cursor_update before the call to
+ * drm_atomic_helper_setup_commit() because otherwise
+ * drm_atomic_helper_wait_for_flip_done() is a noop and
+ * we get FIFO underruns because we didn't wait
+ * for vblank.
*
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (INTEL_GEN(dev_priv) < 9)
- state->legacy_cursor_update = false;
+ if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
+ if (new_crtc_state->wm.need_postvbl_update ||
+ new_crtc_state->update_wm_post)
+ state->legacy_cursor_update = false;
+ }
+
+ ret = intel_atomic_prepare_commit(dev, state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ i915_sw_fence_commit(&intel_state->commit_ready);
+ return ret;
+ }
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (!ret)
+ ret = drm_atomic_helper_swap_state(state, true);
- ret = drm_atomic_helper_swap_state(state, true);
if (ret) {
i915_sw_fence_commit(&intel_state->commit_ready);
@@ -12628,8 +12516,8 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_track_fbs(state);
if (intel_state->modeset) {
- memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
- sizeof(intel_state->min_pixclk));
+ memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
+ sizeof(intel_state->min_cdclk));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->cdclk.logical = intel_state->cdclk.logical;
dev_priv->cdclk.actual = intel_state->cdclk.actual;
@@ -12658,6 +12546,58 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.set_crc_source = intel_crtc_set_crc_source,
};
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct drm_i915_gem_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct drm_i915_gem_request *rq = wait->request;
+
+ gen6_rps_boost(rq, NULL);
+ i915_gem_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
@@ -12755,12 +12695,22 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
if (!new_state->fence) { /* implicit fencing */
+ struct dma_fence *fence;
+
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
obj->resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
+
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ if (fence) {
+ add_rps_boost_after_vblank(new_state->crtc, fence);
+ dma_fence_put(fence);
+ }
+ } else {
+ add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
}
return 0;
@@ -12877,29 +12827,29 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *intel_cstate =
- to_intel_crtc_state(crtc->state);
struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
- bool modeset = needs_modeset(crtc->state);
+ struct intel_crtc_state *intel_cstate =
+ intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
+ bool modeset = needs_modeset(&intel_cstate->base);
if (!modeset &&
(intel_cstate->base.color_mgmt_changed ||
intel_cstate->update_pipe)) {
- intel_color_set_csc(crtc->state);
- intel_color_load_luts(crtc->state);
+ intel_color_set_csc(&intel_cstate->base);
+ intel_color_load_luts(&intel_cstate->base);
}
/* Perform vblank evasion around commit operation */
- intel_pipe_update_start(intel_crtc);
+ intel_pipe_update_start(intel_cstate);
if (modeset)
goto out;
if (intel_cstate->update_pipe)
- intel_update_pipe_config(intel_crtc, old_intel_cstate);
+ intel_update_pipe_config(old_intel_cstate, intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
@@ -12913,8 +12863,12 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
- intel_pipe_update_end(intel_crtc);
+ intel_pipe_update_end(new_crtc_state);
}
/**
@@ -13063,6 +13017,14 @@ intel_legacy_cursor_update(struct drm_plane *plane,
goto slow;
old_plane_state = plane->state;
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ if (old_plane_state->commit &&
+ !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ goto slow;
/*
* If any parameters change that may affect watermarks,
@@ -13093,6 +13055,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
new_plane_state->crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
+ to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
+ to_intel_plane_state(plane->state),
to_intel_plane_state(new_plane_state));
if (ret)
goto out_free;
@@ -13122,17 +13086,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
}
old_fb = old_plane_state->fb;
- old_vma = to_intel_plane_state(old_plane_state)->vma;
i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
intel_plane->frontbuffer_bit);
/* Swap plane state */
- new_plane_state->fence = old_plane_state->fence;
- *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
- new_plane_state->fence = NULL;
- new_plane_state->fb = old_fb;
- to_intel_plane_state(new_plane_state)->vma = NULL;
+ plane->state = new_plane_state;
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
@@ -13144,13 +13103,17 @@ intel_legacy_cursor_update(struct drm_plane *plane,
intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
+ old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma);
if (old_vma)
intel_unpin_fb_vma(old_vma);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
- intel_plane_destroy_state(plane, new_plane_state);
+ if (ret)
+ intel_plane_destroy_state(plane, new_plane_state);
+ else
+ intel_plane_destroy_state(plane, old_plane_state);
return ret;
slow:
@@ -13219,8 +13182,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs;
- primary->update_plane = skylake_update_primary_plane;
- primary->disable_plane = skylake_disable_primary_plane;
+ primary->update_plane = skl_update_plane;
+ primary->disable_plane = skl_disable_plane;
} else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
@@ -13229,8 +13192,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
else
modifiers = skl_format_modifiers_noccs;
- primary->update_plane = skylake_update_primary_plane;
- primary->disable_plane = skylake_disable_primary_plane;
+ primary->update_plane = skl_update_plane;
+ primary->disable_plane = skl_disable_plane;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13501,7 +13464,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
- drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
+ drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
if (!drmmode_crtc)
return -ENOENT;
@@ -13665,7 +13628,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
+ dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
@@ -13708,14 +13671,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev_priv, PORT_B);
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev_priv, PORT_C);
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
@@ -14208,7 +14171,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
}
- if (dev_priv->info.gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
dev_priv->display.update_crtcs = skl_update_crtcs;
else
dev_priv->display.update_crtcs = intel_update_crtcs;
@@ -14388,8 +14351,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
intel_update_cdclk(dev_priv);
dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
-
- intel_init_clock_gating(dev_priv);
}
/*
@@ -14739,10 +14700,10 @@ static struct intel_connector *intel_encoder_find_connector(struct intel_encoder
}
static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
- enum transcoder pch_transcoder)
+ enum pipe pch_transcoder)
{
return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
- (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A);
+ (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
}
static void intel_sanitize_crtc(struct intel_crtc *crtc,
@@ -14825,7 +14786,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
* PCH transcoders B and C would prevent enabling the south
* error interrupt (see cpt_can_enable_serr_int()).
*/
- if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe))
+ if (has_pch_trancoder(dev_priv, crtc->pipe))
crtc->pch_fifo_underrun_disabled = true;
}
}
@@ -15032,7 +14993,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- int pixclk = 0;
+ int min_cdclk = 0;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc_state->base.active) {
@@ -15053,22 +15014,18 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_crtc_compute_pixel_rate(crtc_state);
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc_state->pixel_rate;
- else
- WARN_ON(dev_priv->display.modeset_calc_cdclk);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+ if (dev_priv->display.modeset_calc_cdclk) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (WARN_ON(min_cdclk < 0))
+ min_cdclk = 0;
+ }
drm_calc_timestamping_constants(&crtc->base,
&crtc_state->base.adjusted_mode);
update_scanline_offset(crtc);
}
- dev_priv->min_pixclk[crtc->pipe] = pixclk;
+ dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
@@ -15105,6 +15062,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
int i;
+ if (IS_HASWELL(dev_priv)) {
+ /*
+ * WaRsPkgCStateDisplayPMReq:hsw
+ * System hang if this isn't done before disabling all planes!
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ }
+
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
@@ -15186,6 +15152,7 @@ void intel_display_resume(struct drm_device *dev)
if (!ret)
ret = __intel_display_resume(dev, state, &ctx);
+ intel_enable_ipc(dev_priv);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -15201,6 +15168,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev_priv);
+ intel_init_clock_gating(dev_priv);
+
intel_setup_overlay(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09f274419eea..158438bb0389 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,6 +42,7 @@
#include "i915_drv.h"
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+#define DP_DPRX_ESI_LEN 14
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -103,13 +104,13 @@ static const int cnl_rates[] = { 162000, 216000, 270000,
static const int default_rates[] = { 162000, 270000, 540000 };
/**
- * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*/
-static bool is_edp(struct intel_dp *intel_dp)
+bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -136,32 +137,20 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static int intel_dp_num_rates(u8 link_bw_code)
-{
- switch (link_bw_code) {
- default:
- WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
- link_bw_code);
- case DP_LINK_BW_1_62:
- return 1;
- case DP_LINK_BW_2_7:
- return 2;
- case DP_LINK_BW_5_4:
- return 3;
- }
-}
-
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
- int i, num_rates;
+ int i, max_rate;
- num_rates = intel_dp_num_rates(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
- for (i = 0; i < num_rates; i++)
+ for (i = 0; i < ARRAY_SIZE(default_rates); i++) {
+ if (default_rates[i] > max_rate)
+ break;
intel_dp->sink_rates[i] = default_rates[i];
+ }
- intel_dp->num_sink_rates = num_rates;
+ intel_dp->num_sink_rates = i;
}
/* Theoretical max between source and sink */
@@ -253,15 +242,15 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
} else if (IS_GEN9_BC(dev_priv)) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
- } else {
+ } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
+ IS_BROADWELL(dev_priv)) {
source_rates = default_rates;
size = ARRAY_SIZE(default_rates);
+ } else {
+ source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates) - 1;
}
- /* This depends on the fact that 5.4 is last value in the array */
- if (!intel_dp_source_supports_hbr2(intel_dp))
- size--;
-
intel_dp->source_rates = source_rates;
intel_dp->num_source_rates = size;
}
@@ -388,7 +377,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
- if (is_edp(intel_dp) && fixed_mode) {
+ if (intel_dp_is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -597,7 +586,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!is_edp(intel_dp));
+ WARN_ON(!intel_dp_is_edp(intel_dp));
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
intel_dp->active_pipe != intel_dp->pps_pipe);
@@ -644,7 +633,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!is_edp(intel_dp));
+ WARN_ON(!intel_dp_is_edp(intel_dp));
/*
* TODO: BXT has 2 PPS instances. The correct port->PPS instance
@@ -847,7 +836,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!is_edp(intel_dp) || code != SYS_RESTART)
+ if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
pps_lock(intel_dp);
@@ -907,7 +896,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
@@ -1018,7 +1007,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
else
precharge = 5;
- if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
+ if (IS_BROADWELL(dev_priv))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -1043,7 +1032,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_TIME_OUT_1600us |
+ DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
@@ -1481,14 +1470,9 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
- if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
- IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
- return true;
- else
- return false;
+ return max_rate >= 540000;
}
static void
@@ -1681,7 +1665,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
else
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
- if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
struct drm_display_mode *panel_mode =
intel_connector->panel.alt_fixed_mode;
struct drm_display_mode *req_mode = &pipe_config->base.mode;
@@ -1736,7 +1720,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
@@ -1829,7 +1813,7 @@ found:
* DPLL0 VCO may need to be adjusted to get the correct
* clock for eDP. This will affect cdclk as well.
*/
- if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
+ if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
int vco;
switch (pipe_config->port_clock / 2) {
@@ -1848,6 +1832,8 @@ found:
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
+ intel_psr_compute_config(intel_dp, pipe_config);
+
return true;
}
@@ -1861,7 +1847,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
}
static void intel_dp_prepare(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2069,7 +2055,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return false;
cancel_delayed_work(&intel_dp->panel_vdd_work);
@@ -2119,7 +2105,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
bool vdd;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2203,7 +2189,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
@@ -2226,7 +2212,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
@@ -2267,7 +2253,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2285,7 +2271,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
@@ -2316,7 +2302,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2360,7 +2346,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@@ -2377,7 +2363,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2401,7 +2387,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@@ -2461,7 +2447,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2666,7 +2652,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+ if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
@@ -2688,33 +2674,55 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
static void intel_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
- if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
- intel_psr_disable(intel_dp);
-
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
intel_edp_backlight_off(old_conn_state);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_off(intel_dp);
+}
+
+static void g4x_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_disable_dp(encoder, old_crtc_state, old_conn_state);
/* disable the port before the pipe on g4x */
- if (INTEL_GEN(dev_priv) < 5)
- intel_dp_link_down(intel_dp);
+ intel_dp_link_down(intel_dp);
+}
+
+static void ilk_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ intel_disable_dp(encoder, old_crtc_state, old_conn_state);
+}
+
+static void vlv_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_psr_disable(intel_dp, old_crtc_state);
+
+ intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
static void ilk_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -2727,8 +2735,8 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder,
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2736,8 +2744,8 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder,
}
static void chv_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2842,7 +2850,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
- struct intel_crtc_state *old_crtc_state)
+ const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -2866,8 +2874,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@@ -2914,26 +2922,26 @@ static void intel_enable_dp(struct intel_encoder *encoder,
}
static void g4x_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(pipe_config, conn_state);
- intel_psr_enable(intel_dp);
+ intel_psr_enable(intel_dp, pipe_config);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -3040,7 +3048,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->active_pipe = crtc->pipe;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
/* now it's all ours */
@@ -3055,8 +3063,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
vlv_phy_pre_encoder_enable(encoder);
@@ -3064,8 +3072,8 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_dp_prepare(encoder, pipe_config);
@@ -3073,8 +3081,8 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
chv_phy_pre_encoder_enable(encoder);
@@ -3085,8 +3093,8 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_dp_prepare(encoder, pipe_config);
@@ -3094,8 +3102,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
chv_phy_post_pll_disable(encoder);
}
@@ -3147,9 +3155,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_GEN9_LP(dev_priv))
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- else if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
return intel_ddi_dp_voltage_max(encoder);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -3506,13 +3512,11 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
- if (HAS_DDI(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ signal_levels = bxt_signal_levels(intel_dp);
+ } else if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
-
- if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv))
- signal_levels = 0;
- else
- mask = DDI_BUF_EMP_MASK;
+ mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
@@ -3791,7 +3795,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
/* Don't clobber cached eDP rates. */
- if (!is_edp(intel_dp)) {
+ if (!intel_dp_is_edp(intel_dp)) {
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@@ -3813,7 +3817,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* downstream port information. So, an early return here saves
* time from performing other operations which are not required.
*/
- if (!is_edp(intel_dp) && !intel_dp->sink_count)
+ if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
return false;
if (!drm_dp_is_branch(intel_dp->dpcd))
@@ -3835,7 +3839,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
{
u8 mstm_cap;
- if (!i915.enable_dp_mst)
+ if (!i915_modparams.enable_dp_mst)
return false;
if (!intel_dp->can_mst)
@@ -3853,7 +3857,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
- if (!i915.enable_dp_mst)
+ if (!i915_modparams.enable_dp_mst)
return;
if (!intel_dp->can_mst)
@@ -4000,15 +4004,9 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
- int ret;
-
- ret = drm_dp_dpcd_read(&intel_dp->aux,
- DP_SINK_COUNT_ESI,
- sink_irq_vector, 14);
- if (ret != 14)
- return false;
-
- return true;
+ return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
+ sink_irq_vector, DP_DPRX_ESI_LEN) ==
+ DP_DPRX_ESI_LEN;
}
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
@@ -4208,7 +4206,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
bool bret;
if (intel_dp->is_mst) {
- u8 esi[16] = { 0 };
+ u8 esi[DP_DPRX_ESI_LEN] = { 0 };
int ret = 0;
int retry;
bool handled;
@@ -4403,7 +4401,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
- if (is_edp(intel_dp))
+ if (intel_dp_is_edp(intel_dp))
return connector_status_connected;
/* if there's no downstream port, we're done */
@@ -4719,7 +4717,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
- if (is_edp(intel_dp))
+ if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (intel_digital_port_connected(to_i915(dev),
dp_to_dig_port(intel_dp)))
@@ -4745,10 +4743,6 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DP;
- DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
- yesno(intel_dp_source_supports_hbr2(intel_dp)),
- yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
-
if (intel_dp->reset_link_params) {
/* Initial max link lane count */
intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
@@ -4799,7 +4793,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (is_edp(intel_dp) || intel_connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid)
status = connector_status_connected;
intel_dp->detect_done = true;
@@ -4883,7 +4877,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
- if (is_edp(intel_attached_dp(connector)) &&
+ if (intel_dp_is_edp(intel_attached_dp(connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -4934,8 +4928,10 @@ intel_dp_connector_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
- /* Can't call is_edp() since the encoder may have been destroyed
- * already. */
+ /*
+ * Can't call intel_dp_is_edp() since the encoder may have been
+ * destroyed already.
+ */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
@@ -4949,7 +4945,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
@@ -4975,7 +4971,7 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
/*
@@ -5043,7 +5039,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
/* Reinit the power sequencer, in case BIOS did something with it. */
intel_dp_pps_init(encoder->dev, intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
@@ -5144,7 +5140,7 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@@ -5167,7 +5163,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
@@ -5455,7 +5451,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
* The caller of this function needs to take a lock on dev_priv->drrs.
*/
static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state,
+ const struct intel_crtc_state *crtc_state,
int refresh_rate)
{
struct intel_encoder *encoder;
@@ -5474,11 +5470,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- /*
- * FIXME: This needs proper synchronization with psr state for some
- * platforms that cannot have PSR and DRRS enabled at the same time.
- */
-
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -5552,7 +5543,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
* Initializes frontbuffer_bits and drrs.dp
*/
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5562,6 +5553,11 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
return;
}
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
+ return;
+ }
+
mutex_lock(&dev_priv->drrs.mutex);
if (WARN_ON(dev_priv->drrs.dp)) {
DRM_ERROR("DRRS already enabled\n");
@@ -5583,7 +5579,7 @@ unlock:
*
*/
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- struct intel_crtc_state *old_crtc_state)
+ const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -5833,7 +5829,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return true;
/*
@@ -6049,7 +6045,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev_priv, port))
+ if (intel_dp_is_port_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -6067,7 +6063,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- is_edp(intel_dp) && port != PORT_B && port != PORT_C))
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
return false;
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
@@ -6095,7 +6092,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -6151,7 +6148,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
goto err_encoder_init;
intel_encoder->compute_config = intel_dp_compute_config;
- intel_encoder->disable = intel_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
@@ -6159,18 +6155,24 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
} else if (IS_VALLEYVIEW(dev_priv)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
+ intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ intel_encoder->pre_enable = g4x_pre_enable_dp;
+ intel_encoder->enable = g4x_enable_dp;
+ intel_encoder->disable = ilk_disable_dp;
+ intel_encoder->post_disable = ilk_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
- if (INTEL_GEN(dev_priv) >= 5)
- intel_encoder->post_disable = ilk_post_disable_dp;
+ intel_encoder->disable = g4x_disable_dp;
}
intel_dig_port->port = port;
@@ -6193,6 +6195,9 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
+ if (port != PORT_A)
+ intel_infoframe_init(intel_dig_port);
+
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index d2830ba3162e..2bb2ceb9d463 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -264,7 +264,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
- if (!i915.enable_dpcd_backlight)
+ if (!i915_modparams.enable_dpcd_backlight)
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 93fc8ab9bb31..772521440a9f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -123,8 +123,8 @@ static int intel_dp_mst_atomic_check(struct drm_connector *connector,
}
static void intel_mst_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -133,7 +133,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
to_intel_connector(old_conn_state->connector);
int ret;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
@@ -146,8 +146,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -155,8 +155,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
-
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
@@ -164,20 +162,26 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
+ /*
+ * Power down mst path before disabling the port, otherwise we end
+ * up getting interrupts from the sink upon detecting link loss.
+ */
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+ false);
+
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
intel_dig_port->base.post_disable(&intel_dig_port->base,
NULL, NULL);
-
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -195,8 +199,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->encoder = encoder;
intel_mst->connector = connector;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_enable(&intel_dig_port->base,
pipe_config, NULL);
@@ -219,8 +224,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@@ -229,7 +234,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
enum port port = intel_dig_port->port;
int ret;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_wait_for_register(dev_priv,
DP_TP_STATUS(port),
@@ -449,32 +454,52 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
- int i;
+ enum pipe pipe;
+ int ret;
intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
connector = &intel_connector->base;
- drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ intel_connector_free(intel_connector);
+ return NULL;
+ }
+
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
- for (i = PIPE_A; i <= PIPE_C; i++) {
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_dp->mst_encoders[i]->base.base);
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_encoder *enc =
+ &intel_dp->mst_encoders[pipe]->base.base;
+
+ ret = drm_mode_connector_attach_encoder(&intel_connector->base,
+ enc);
+ if (ret)
+ goto err;
}
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ ret = drm_mode_connector_set_path_property(connector, pathprop);
+ if (ret)
+ goto err;
+
return connector;
+
+err:
+ drm_connector_cleanup(connector);
+ return NULL;
}
static void intel_dp_register_mst_connector(struct drm_connector *connector)
@@ -494,6 +519,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
drm_connector_unregister(connector);
if (dev_priv->fbdev)
@@ -505,7 +531,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
drm_connector_unreference(connector);
- DRM_DEBUG_KMS("\n");
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -564,11 +589,12 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
{
- int i;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum pipe pipe;
- for (i = PIPE_A; i <= PIPE_C; i++)
- intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+ for_each_pipe(dev_priv, pipe)
+ intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 79fbaf78f604..7bc60c848940 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -220,23 +220,23 @@ struct intel_encoder {
struct intel_crtc_state *,
struct drm_connector_state *);
void (*pre_pll_enable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*pre_enable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*enable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*disable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*post_disable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
void (*post_pll_disable)(struct intel_encoder *,
- struct intel_crtc_state *,
- struct drm_connector_state *);
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -384,7 +384,8 @@ struct intel_atomic_state {
unsigned int active_pipe_changes;
unsigned int active_crtcs;
- unsigned int min_pixclk[I915_MAX_PIPES];
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -493,6 +494,8 @@ struct intel_crtc_scaler_state {
/* drm_mode->private_flags */
#define I915_MODE_FLAG_INHERITED 1
+/* Flag to get scanline using frame time stamps */
+#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1)
struct intel_pipe_wm {
struct intel_wm_level wm[5];
@@ -714,6 +717,9 @@ struct intel_crtc_state {
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ bool has_psr;
+ bool has_psr2;
+
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
@@ -752,6 +758,7 @@ struct intel_crtc_state {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
+ bool ips_force_disable;
bool enable_fbc;
@@ -795,18 +802,10 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
- bool lowfreq_avail;
u8 plane_ids_mask;
unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
- /* Display surface base address adjustement for pageflips. Note that on
- * gen4+ this only adjusts up to a tile, offsets within a tile are
- * handled in the hw itself (with the TILEOFF register). */
- u32 dspaddr_offset;
- int adjusted_x;
- int adjusted_y;
-
struct intel_crtc_state *config;
/* global reset count when the last flip was submitted */
@@ -908,16 +907,6 @@ struct intel_hdmi {
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
- void (*write_infoframe)(struct drm_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
- const void *frame, ssize_t len);
- void (*set_infoframes)(struct drm_encoder *encoder,
- bool enable,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct drm_encoder *encoder,
- const struct intel_crtc_state *pipe_config);
};
struct intel_dp_mst_encoder;
@@ -1068,6 +1057,17 @@ struct intel_digital_port {
bool release_cl2_override;
uint8_t max_lanes;
enum intel_display_power_domain ddi_io_power_domain;
+
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ bool (*infoframe_enabled)(struct drm_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
};
struct intel_dp_mst_encoder {
@@ -1188,6 +1188,30 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+static inline struct intel_plane_state *
+intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ return to_intel_plane_state(drm_atomic_get_new_plane_state(&state->base,
+ &plane->base));
+}
+
+static inline struct intel_crtc_state *
+intel_atomic_get_old_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return to_intel_crtc_state(drm_atomic_get_old_crtc_state(&state->base,
+ &crtc->base));
+}
+
+static inline struct intel_crtc_state *
+intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return to_intel_crtc_state(drm_atomic_get_new_crtc_state(&state->base,
+ &crtc->base));
+}
+
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@@ -1204,11 +1228,8 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1216,7 +1237,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
{
- return mask & ~i915->rps.pm_intrmsk_mbz;
+ return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
}
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
@@ -1227,7 +1248,7 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
* We only use drm_irq_uninstall() at unload and VT switch, so
* this is the only thing we need to check.
*/
- return dev_priv->pm.irqs_enabled;
+ return dev_priv->runtime_pm.irqs_enabled;
}
int intel_get_crtc_scanline(struct intel_crtc *crtc);
@@ -1245,8 +1266,8 @@ void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state);
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
@@ -1271,6 +1292,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
+u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
@@ -1289,6 +1311,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
/* intel_cdclk.c */
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void cnl_init_cdclk(struct drm_i915_private *dev_priv);
@@ -1331,11 +1354,13 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
-struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
- struct drm_crtc *crtc);
+struct drm_display_mode *
+intel_encoder_current_mode(struct intel_encoder *encoder);
+
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1376,7 +1401,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -1400,7 +1425,9 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
-int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
+int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
+ struct drm_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct drm_plane_state *plane_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
@@ -1498,7 +1525,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@@ -1517,9 +1545,9 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
+ const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
@@ -1647,6 +1675,7 @@ void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
bool high_tmds_clock_ratio,
bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
+void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
@@ -1718,8 +1747,10 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
/* intel_psr.c */
-void intel_psr_enable(struct intel_dp *intel_dp);
-void intel_psr_disable(struct intel_dp *intel_dp);
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_i915_private *dev_priv,
@@ -1728,6 +1759,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -1755,7 +1788,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(dev_priv->pm.suspended,
+ WARN_ONCE(dev_priv->runtime_pm.suspended,
"Device suspended during HW access\n");
}
@@ -1763,7 +1796,7 @@ static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
{
assert_rpm_device_not_suspended(dev_priv);
- WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count),
+ WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
"RPM wakelock ref not held during HW access");
}
@@ -1788,7 +1821,7 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
static inline void
disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
}
/**
@@ -1805,7 +1838,7 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
static inline void
enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
{
- atomic_dec(&dev_priv->pm.wakeref_count);
+ atomic_dec(&dev_priv->runtime_pm.wakeref_count);
}
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
@@ -1843,7 +1876,6 @@ void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_gem_request *rq,
struct intel_rps_client *rps);
-void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
@@ -1859,16 +1891,19 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry **entries,
const struct skl_ddb_entry *ddb,
int ignore);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
-static inline int intel_enable_rc6(void)
+void intel_init_ipc(struct drm_i915_private *dev_priv);
+void intel_enable_ipc(struct drm_i915_private *dev_priv);
+static inline int intel_rc6_enabled(void)
{
- return i915.enable_rc6;
+ return i915_modparams.enable_rc6;
}
/* intel_sdvo.c */
@@ -1883,8 +1918,12 @@ struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc);
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+void skl_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -1956,7 +1995,9 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
/* intel_color.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 7442891762be..83f15848098a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -263,7 +263,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
/* XXX: old code skips write if control unchanged */
if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
- DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+ DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd);
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
@@ -330,6 +330,10 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
adjusted_mode->flags = 0;
if (IS_GEN9_LP(dev_priv)) {
+ /* Enable Frame time stamp based scanline reporting */
+ adjusted_mode->private_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+
/* Dual link goes to DSI transcoder A. */
if (intel_dsi->ports == BIT(PORT_C))
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
@@ -731,7 +735,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config);
+ const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
@@ -783,17 +787,22 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
*/
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;
DRM_DEBUG_KMS("\n");
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
/*
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
@@ -878,8 +887,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* the pre_enable hook.
*/
static void intel_dsi_enable_nop(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
DRM_DEBUG_KMS("\n");
}
@@ -889,8 +898,8 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder,
* the post_disable hook.
*/
static void intel_dsi_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -925,8 +934,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -1066,7 +1075,7 @@ out_put_power:
}
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1102,6 +1111,10 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
pixel_format_from_register_bits(fmt));
bpp = pipe_config->pipe_bpp;
+ /* Enable Frame time stamo based scanline reporting */
+ adjusted_mode->private_flags |=
+ I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP;
+
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
@@ -1370,7 +1383,7 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
@@ -1738,42 +1751,13 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
else
intel_encoder->crtc_mask = BIT(PIPE_B);
- if (dev_priv->vbt.dsi.config->dual_link) {
+ if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
-
- switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
- case DL_DCS_PORT_A:
- intel_dsi->dcs_backlight_ports = BIT(PORT_A);
- break;
- case DL_DCS_PORT_C:
- intel_dsi->dcs_backlight_ports = BIT(PORT_C);
- break;
- default:
- case DL_DCS_PORT_A_AND_C:
- intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
- break;
- }
-
- switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
- case DL_DCS_PORT_A:
- intel_dsi->dcs_cabc_ports = BIT(PORT_A);
- break;
- case DL_DCS_PORT_C:
- intel_dsi->dcs_cabc_ports = BIT(PORT_C);
- break;
- default:
- case DL_DCS_PORT_A_AND_C:
- intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
- break;
- }
- } else {
+ else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = BIT(port);
- intel_dsi->dcs_cabc_ports = BIT(port);
- }
- if (!dev_priv->vbt.dsi.config->cabc_supported)
- intel_dsi->dcs_cabc_ports = 0;
+ intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index c0a027274c06..53c9b763f4ce 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -175,8 +175,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
}
static void intel_disable_dvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -189,8 +189,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
}
static void intel_enable_dvo(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -258,8 +258,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
}
static void intel_dvo_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
@@ -379,32 +379,15 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
-intel_dvo_get_current_mode(struct drm_connector *connector)
+intel_dvo_get_current_mode(struct intel_encoder *encoder)
{
- struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
- struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *mode;
- /* If the DVO port is active, that'll be the LVDS, so we can pull out
- * its timings to get how the BIOS set up the panel.
- */
- if (dvo_val & DVO_ENABLE) {
- struct intel_crtc *crtc;
- int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
-
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- if (crtc) {
- mode = intel_crtc_mode_get(dev, &crtc->base);
- if (mode) {
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
- mode->flags |= DRM_MODE_FLAG_PVSYNC;
- }
- }
+ mode = intel_encoder_current_mode(encoder);
+ if (mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(mode);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
}
return mode;
@@ -551,7 +534,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
* mode being output through DVO.
*/
intel_panel_init(&intel_connector->panel,
- intel_dvo_get_current_mode(connector),
+ intel_dvo_get_current_mode(intel_encoder),
NULL, NULL);
intel_dvo->panel_wants_dither = true;
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 3c2d9cf22ed5..ab5bf4e2e28e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -22,7 +22,10 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@@ -39,6 +42,7 @@
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
+#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
@@ -150,10 +154,11 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
default:
MISSING_CASE(INTEL_GEN(dev_priv));
case 10:
+ return GEN10_LR_CONTEXT_RENDER_SIZE;
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
- return i915.enable_execlists ?
+ return i915_modparams.enable_execlists ?
GEN8_LR_CONTEXT_RENDER_SIZE :
GEN8_CXT_TOTAL_SIZE;
case 7:
@@ -301,7 +306,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
init = class_info->init_execlists;
else
init = class_info->init_legacy;
@@ -380,6 +385,37 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];
}
+static bool csb_force_mmio(struct drm_i915_private *i915)
+{
+ /*
+ * IOMMU adds unpredictable latency causing the CSB write (from the
+ * GPU into the HWSP) to only be visible some time after the interrupt
+ * (missed breadcrumb syndrome).
+ */
+ if (intel_vtd_active())
+ return true;
+
+ /* Older GVT emulation depends upon intercepting CSB mmio */
+ if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
+ return true;
+
+ return false;
+}
+
+static void intel_engine_init_execlist(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ execlists->csb_use_mmio = csb_force_mmio(engine->i915);
+
+ execlists->port_mask = 1;
+ BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+ GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+
+ execlists->queue = RB_ROOT;
+ execlists->first = NULL;
+}
+
/**
* intel_engines_setup_common - setup engine state not requiring hw access
* @engine: Engine to setup.
@@ -391,8 +427,7 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
*/
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
- engine->execlist_queue = RB_ROOT;
- engine->execlist_first = NULL;
+ intel_engine_init_execlist(engine);
intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine);
@@ -442,6 +477,116 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&engine->scratch);
}
+static void cleanup_phys_status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ if (!dev_priv->status_page_dmah)
+ return;
+
+ drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
+ engine->status_page.page_addr = NULL;
+}
+
+static void cleanup_status_page(struct intel_engine_cs *engine)
+{
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+
+ vma = fetch_and_zero(&engine->status_page.vma);
+ if (!vma)
+ return;
+
+ obj = vma->obj;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_unless_active(obj);
+}
+
+static int init_status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags;
+ void *vaddr;
+ int ret;
+
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return PTR_ERR(obj);
+ }
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ if (ret)
+ goto err;
+
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ flags = PIN_GLOBAL;
+ if (!HAS_LLC(engine->i915))
+ /* On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actually map it).
+ */
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
+ ret = i915_vma_pin(vma, 0, 4096, flags);
+ if (ret)
+ goto err;
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_unpin;
+ }
+
+ engine->status_page.vma = vma;
+ engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
+ engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
+
+ DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
+ engine->name, i915_ggtt_offset(vma));
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static int init_phys_status_page(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ GEM_BUG_ON(engine->id != RCS);
+
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
+ if (!dev_priv->status_page_dmah)
+ return -ENOMEM;
+
+ engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(engine->status_page.page_addr, 0, PAGE_SIZE);
+
+ return 0;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -471,17 +616,44 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (IS_ERR(ring))
return PTR_ERR(ring);
+ /*
+ * Similarly the preempt context must always be available so that
+ * we can interrupt the engine at any time.
+ */
+ if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) {
+ ring = engine->context_pin(engine,
+ engine->i915->preempt_context);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ goto err_unpin_kernel;
+ }
+ }
+
ret = intel_engine_init_breadcrumbs(engine);
if (ret)
- goto err_unpin;
+ goto err_unpin_preempt;
ret = i915_gem_render_state_init(engine);
if (ret)
- goto err_unpin;
+ goto err_breadcrumbs;
+
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ ret = init_phys_status_page(engine);
+ else
+ ret = init_status_page(engine);
+ if (ret)
+ goto err_rs_fini;
return 0;
-err_unpin:
+err_rs_fini:
+ i915_gem_render_state_fini(engine);
+err_breadcrumbs:
+ intel_engine_fini_breadcrumbs(engine);
+err_unpin_preempt:
+ if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ engine->context_unpin(engine, engine->i915->preempt_context);
+err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context);
return ret;
}
@@ -497,11 +669,18 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
intel_engine_cleanup_scratch(engine);
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ cleanup_phys_status_page(engine);
+ else
+ cleanup_status_page(engine);
+
i915_gem_render_state_fini(engine);
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ if (INTEL_INFO(engine->i915)->has_logical_ring_preemption)
+ engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context);
}
@@ -672,11 +851,6 @@ static int wa_add(struct drm_i915_private *dev_priv,
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
-#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
-
-#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
@@ -687,8 +861,8 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
- WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
+ I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+ i915_mmio_reg_offset(reg));
wa->hw_whitelist_count[engine->id]++;
return 0;
@@ -812,6 +986,23 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
+ if (HAS_LLC(dev_priv)) {
+ /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
+ *
+ * Must match Display Engine. See
+ * WaCompressedResourceDisplayNewHashMode.
+ */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN9_PBE_COMPRESSED_HASH_SELECTION);
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
+
+ I915_WRITE(MMCD_MISC_CTRL,
+ I915_READ(MMCD_MISC_CTRL) |
+ MMCD_PCLA |
+ MMCD_HOTSPOT_EN);
+ }
+
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -900,13 +1091,33 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
+ /*
+ * Supporting preemption with fine-granularity requires changes in the
+ * batch buffer programming. Since we can't break old userspace, we
+ * need to set our default preemption level to safe value. Userspace is
+ * still able to use more fine-grained preemption levels, since in
+ * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
+ * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
+ * not real HW workarounds, but merely a way to start using preemption
+ * while maintaining old contract with userspace.
+ */
+
+ /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
+ WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+ /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
- ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
@@ -968,25 +1179,19 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
- /*
- * Actual WA is to disable percontext preemption granularity control
- * until D0 which is the default case so this is equivalent to
- * !WaDisablePerCtxtPreemptionGranularityControl:skl
- */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-
/* WaEnableGapsTsvCreditFix:skl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableGafsUnitClkGating:skl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
/* WaInPlaceDecompressionHang:skl */
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
/* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
@@ -1022,8 +1227,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaDisablePooledEuLoadBalancingFix:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
- GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ I915_WRITE(FF_SLICE_CS_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
}
/* WaDisableSbeCacheDispatchPortSharing:bxt */
@@ -1062,8 +1267,65 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaInPlaceDecompressionHang:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
+
+ return 0;
+}
+
+static int cnl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ (I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT));
+
+ /* WaForceContextSaveRestoreNonCoherent:cnl */
+ WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+
+ /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
+
+ /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
+
+ /* WaInPlaceDecompressionHang:cnl */
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
+
+ /* WaPushConstantDereferenceHoldDisable:cnl */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
+
+ /* FtrEnableFastAnisoL1BankingFix: cnl */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
+
+ /* WaDisable3DMidCmdPreemption:cnl */
+ WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
+
+ /* WaDisableGPGPUMidCmdPreemption:cnl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+
+ /* WaEnablePreemptionGranularityControlByUMD:cnl */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ if (ret)
+ return ret;
return 0;
}
@@ -1083,8 +1345,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- WA_SET_BIT(GAMT_CHKN_BIT_REG,
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ I915_WRITE(GAMT_CHKN_BIT_REG,
+ (I915_READ(GAMT_CHKN_BIT_REG) |
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING));
/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
@@ -1097,7 +1360,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:kbl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(
@@ -1105,8 +1369,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
/* WaDisableLSQCROPERFforOCL:kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
@@ -1150,7 +1415,8 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:cfl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) |
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE));
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(
@@ -1158,8 +1424,9 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
+ (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS));
return 0;
}
@@ -1188,6 +1455,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
err = glk_init_workarounds(engine);
else if (IS_COFFEELAKE(dev_priv))
err = cfl_init_workarounds(engine);
+ else if (IS_CANNONLAKE(dev_priv))
+ err = cnl_init_workarounds(engine);
else
err = 0;
if (err)
@@ -1279,12 +1548,12 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
return false;
- /* Both ports drained, no more ELSP submission? */
- if (port_request(&engine->execlist_port[0]))
+ /* Waiting to drain ELSP? */
+ if (READ_ONCE(engine->execlists.active))
return false;
/* ELSP is empty, but there are ready requests? */
- if (READ_ONCE(engine->execlist_first))
+ if (READ_ONCE(engine->execlists.first))
return false;
/* Ring stopped? */
@@ -1333,11 +1602,188 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- tasklet_kill(&engine->irq_tasklet);
- engine->no_priolist = false;
+ tasklet_kill(&engine->execlists.irq_tasklet);
+ engine->execlists.no_priolist = false;
+ }
+}
+
+bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
+{
+ switch (INTEL_GEN(engine->i915)) {
+ case 2:
+ return false; /* uses physical not virtual addresses */
+ case 3:
+ /* maybe only uses physical not virtual addresses */
+ return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
+ case 6:
+ return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
+ default:
+ return true;
}
}
+static void print_request(struct drm_printer *m,
+ struct drm_i915_gem_request *rq,
+ const char *prefix)
+{
+ drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
+ rq->global_seqno,
+ i915_gem_request_completed(rq) ? "!" : "",
+ rq->ctx->hw_id, rq->fence.seqno,
+ rq->priotree.priority,
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies),
+ rq->timeline->common->name);
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+{
+ struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+ const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_gem_request *rq;
+ struct rb_node *rb;
+ u64 addr;
+
+ drm_printf(m, "%s\n", engine->name);
+ drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+ intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine),
+ engine->hangcheck.seqno,
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ engine->timeline->inflight_seqnos);
+ drm_printf(m, "\tReset count: %d\n",
+ i915_reset_engine_count(error, engine));
+
+ rcu_read_lock();
+
+ drm_printf(m, "\tRequests:\n");
+
+ rq = list_first_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tfirst ");
+
+ rq = list_last_entry(&engine->timeline->requests,
+ struct drm_i915_gem_request, link);
+ if (&rq->link != &engine->timeline->requests)
+ print_request(m, rq, "\t\tlast ");
+
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ print_request(m, rq, "\t\tactive ");
+ drm_printf(m,
+ "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+ rq->head, rq->postfix, rq->tail,
+ rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+ rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+ }
+
+ drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
+ I915_READ(RING_START(engine->mmio_base)),
+ rq ? i915_ggtt_offset(rq->ring->vma) : 0);
+ drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n",
+ I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
+ rq ? rq->ring->head : 0);
+ drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n",
+ I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
+ rq ? rq->ring->tail : 0);
+ drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n",
+ I915_READ(RING_CTL(engine->mmio_base)),
+ I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : "");
+
+ rcu_read_unlock();
+
+ addr = intel_engine_get_active_head(engine);
+ drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ addr = intel_engine_get_last_batch_head(engine);
+ drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+
+ if (i915_modparams.enable_execlists) {
+ const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ u32 ptr, read, write;
+ unsigned int idx;
+
+ drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
+ I915_READ(RING_EXECLIST_STATUS_LO(engine)),
+ I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+
+ ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
+ read = GEN8_CSB_READ_PTR(ptr);
+ write = GEN8_CSB_WRITE_PTR(ptr);
+ drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
+ read, execlists->csb_head,
+ write,
+ intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
+ yesno(test_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted)));
+ if (read >= GEN8_CSB_ENTRIES)
+ read = 0;
+ if (write >= GEN8_CSB_ENTRIES)
+ write = 0;
+ if (read > write)
+ write += GEN8_CSB_ENTRIES;
+ while (read < write) {
+ idx = ++read % GEN8_CSB_ENTRIES;
+ drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
+ idx,
+ I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
+ hws[idx * 2],
+ I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
+ hws[idx * 2 + 1]);
+ }
+
+ rcu_read_lock();
+ for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+ unsigned int count;
+
+ rq = port_unpack(&execlists->port[idx], &count);
+ if (rq) {
+ drm_printf(m, "\t\tELSP[%d] count=%d, ",
+ idx, count);
+ print_request(m, rq, "rq: ");
+ } else {
+ drm_printf(m, "\t\tELSP[%d] idle\n",
+ idx);
+ }
+ }
+ drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
+ rcu_read_unlock();
+ } else if (INTEL_GEN(dev_priv) > 6) {
+ drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
+ }
+
+ spin_lock_irq(&engine->timeline->lock);
+ list_for_each_entry(rq, &engine->timeline->requests, link)
+ print_request(m, rq, "\t\tE ");
+ for (rb = execlists->first; rb; rb = rb_next(rb)) {
+ struct i915_priolist *p =
+ rb_entry(rb, typeof(*p), node);
+
+ list_for_each_entry(rq, &p->requests, priotree.link)
+ print_request(m, rq, "\t\tQ ");
+ }
+ spin_unlock_irq(&engine->timeline->lock);
+
+ spin_lock_irq(&b->rb_lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
+
+ drm_printf(m, "\t%s [%d] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock_irq(&b->rb_lock);
+
+ drm_printf(m, "\n");
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 8c8ead2276e0..1a0f5e0c8d10 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -69,9 +69,9 @@ static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
* address we program because it starts at the real start of the buffer, so we
* have to take this into consideration here.
*/
-static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
+static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
{
- return crtc->base.y - crtc->adjusted_y;
+ return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
}
/*
@@ -291,6 +291,19 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
+ /* Display WA #0529: skl, kbl, bxt. */
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ u32 val = I915_READ(CHICKEN_MISC_4);
+
+ val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
+
+ if (i915_gem_object_get_tiling(params->vma->obj) !=
+ I915_TILING_X)
+ val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
+
+ I915_WRITE(CHICKEN_MISC_4, val);
+ }
+
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
@@ -714,8 +727,8 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
&effective_h);
- effective_w += crtc->adjusted_x;
- effective_h += crtc->adjusted_y;
+ effective_w += fbc->state_cache.plane.adjusted_x;
+ effective_h += fbc->state_cache.plane.adjusted_y;
return effective_w <= max_w && effective_h <= max_h;
}
@@ -744,6 +757,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible;
+ cache->plane.adjusted_x = plane_state->main.x;
+ cache->plane.adjusted_y = plane_state->main.y;
+ cache->plane.y = plane_state->base.src.y1 >> 16;
if (!cache->plane.visible)
return;
@@ -846,7 +862,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
return false;
}
- if (!i915.enable_fbc) {
+ if (!i915_modparams.enable_fbc) {
fbc->no_fbc_reason = "disabled per module param or by default";
return false;
}
@@ -875,12 +891,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
- params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
+ params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
+
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
+ 32 * fbc->threshold) * 8;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
@@ -1293,8 +1313,8 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
- if (i915.enable_fbc >= 0)
- return !!i915.enable_fbc;
+ if (i915_modparams.enable_fbc >= 0)
+ return !!i915_modparams.enable_fbc;
if (!HAS_FBC(dev_priv))
return 0;
@@ -1338,8 +1358,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->has_fbc = false;
- i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
- DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
+ i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+ DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
+ i915_modparams.enable_fbc);
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 262e75c00dd2..b8af35187d22 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -189,7 +189,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
" releasing it\n",
intel_fb->base.width, intel_fb->base.height,
sizes->fb_width, sizes->fb_height);
- drm_framebuffer_unreference(&intel_fb->base);
+ drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
@@ -206,6 +206,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
+ intel_runtime_pm_get(dev_priv);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -269,6 +270,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->vma = vma;
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -276,6 +278,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma);
out_unlock:
+ intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -624,7 +627,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
ifbdev->fb = fb;
- drm_framebuffer_reference(&ifbdev->fb->base);
+ drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
for_each_crtc(dev, crtc) {
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 04689600e337..77c123cc8817 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -88,14 +88,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
i915_reg_t reg = PIPESTAT(crtc->pipe);
- u32 pipestat = I915_READ(reg) & 0xffff0000;
+ u32 enable_mask;
lockdep_assert_held(&dev_priv->irq_lock);
- if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
+ I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
@@ -108,15 +109,16 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPESTAT(pipe);
- u32 pipestat = I915_READ(reg) & 0xffff0000;
lockdep_assert_held(&dev_priv->irq_lock);
if (enable) {
- I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
+ u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
+
+ I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
} else {
- if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
+ if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
}
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
new file mode 100644
index 000000000000..10037c0fdf95
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_guc.h"
+#include "i915_drv.h"
+
+static void gen8_guc_raise_irq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+}
+
+static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
+{
+ GEM_BUG_ON(!guc->send_regs.base);
+ GEM_BUG_ON(!guc->send_regs.count);
+ GEM_BUG_ON(i >= guc->send_regs.count);
+
+ return _MMIO(guc->send_regs.base + 4 * i);
+}
+
+void intel_guc_init_send_regs(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ enum forcewake_domains fw_domains = 0;
+ unsigned int i;
+
+ guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
+ guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+
+ for (i = 0; i < guc->send_regs.count; i++) {
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ guc_send_reg(guc, i),
+ FW_REG_READ | FW_REG_WRITE);
+ }
+ guc->send_regs.fw_domains = fw_domains;
+}
+
+void intel_guc_init_early(struct intel_guc *guc)
+{
+ intel_guc_ct_init_early(&guc->ct);
+
+ mutex_init(&guc->send_mutex);
+ guc->send = intel_guc_send_nop;
+ guc->notify = gen8_guc_raise_irq;
+}
+
+static u32 get_gt_type(struct drm_i915_private *dev_priv)
+{
+ /* XXX: GT type based on PCI device ID? field seems unused by fw */
+ return 0;
+}
+
+static u32 get_core_family(struct drm_i915_private *dev_priv)
+{
+ u32 gen = INTEL_GEN(dev_priv);
+
+ switch (gen) {
+ case 9:
+ return GUC_CORE_FAMILY_GEN9;
+
+ default:
+ MISSING_CASE(gen);
+ return GUC_CORE_FAMILY_UNKNOWN;
+ }
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+void intel_guc_init_params(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 params[GUC_CTL_MAX_DWORDS];
+ int i;
+
+ memset(params, 0, sizeof(params));
+
+ params[GUC_CTL_DEVICE_INFO] |=
+ (get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
+ (get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);
+
+ /*
+ * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
+ * second. This ARAR is calculated by:
+ * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
+ */
+ params[GUC_CTL_ARAT_HIGH] = 0;
+ params[GUC_CTL_ARAT_LOW] = 100000000;
+
+ params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
+
+ params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
+ GUC_CTL_VCS2_ENABLED;
+
+ params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
+
+ if (i915_modparams.guc_log_level >= 0) {
+ params[GUC_CTL_DEBUG] =
+ i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
+ } else {
+ params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
+ }
+
+ /* If GuC submission is enabled, set up additional parameters here */
+ if (i915_modparams.enable_guc_submission) {
+ u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+ u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+ u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+ params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
+ params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
+
+ pgs >>= PAGE_SHIFT;
+ params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
+ (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+
+ params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+
+ /* Unmask this bit to enable the GuC's internal scheduler */
+ params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+ }
+
+ /*
+ * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
+ * they are power context saved so it's ok to release forcewake
+ * when we are done here and take it again at xfer time.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+
+ I915_WRITE(SOFT_SCRATCH(0), 0);
+
+ for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+ I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+}
+
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ WARN(1, "Unexpected send: action=%#x\n", *action);
+ return -ENODEV;
+}
+
+/*
+ * This function implements the MMIO based host to GuC interface.
+ */
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 status;
+ int i;
+ int ret;
+
+ GEM_BUG_ON(!len);
+ GEM_BUG_ON(len > guc->send_regs.count);
+
+ /* If CT is available, we expect to use MMIO only during init/fini */
+ GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
+ *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
+ *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
+
+ mutex_lock(&guc->send_mutex);
+ intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
+
+ for (i = 0; i < len; i++)
+ I915_WRITE(guc_send_reg(guc, i), action[i]);
+
+ POSTING_READ(guc_send_reg(guc, i - 1));
+
+ intel_guc_notify(guc);
+
+ /*
+ * No GuC command should ever take longer than 10ms.
+ * Fast commands should still complete in 10us.
+ */
+ ret = __intel_wait_for_register_fw(dev_priv,
+ guc_send_reg(guc, 0),
+ INTEL_GUC_RECV_MASK,
+ INTEL_GUC_RECV_MASK,
+ 10, 10, &status);
+ if (status != INTEL_GUC_STATUS_SUCCESS) {
+ /*
+ * Either the GuC explicitly returned an error (which
+ * we convert to -EIO here) or no response at all was
+ * received within the timeout limit (-ETIMEDOUT)
+ */
+ if (ret != -ETIMEDOUT)
+ ret = -EIO;
+
+ DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
+ " ret=%d status=0x%08X response=0x%08X\n",
+ action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
+ }
+
+ intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
+ mutex_unlock(&guc->send_mutex);
+
+ return ret;
+}
+
+int intel_guc_sample_forcewake(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 action[2];
+
+ action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
+ /* WaRsDisableCoarsePowerGating:skl,bxt */
+ if (!intel_rc6_enabled() ||
+ NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+ action[1] = 0;
+ else
+ /* bit 0 and 1 are for Render and Media domain separately */
+ action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/**
+ * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
+ * @guc: intel_guc structure
+ * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
+ *
+ * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
+ * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
+ * intel_huc_auth().
+ *
+ * Return: non-zero code on error
+ */
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_AUTHENTICATE_HUC,
+ rsa_offset
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+/**
+ * intel_guc_suspend() - notify GuC entering suspend state
+ * @dev_priv: i915 device private
+ */
+int intel_guc_suspend(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct i915_gem_context *ctx;
+ u32 data[3];
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ gen9_disable_guc_interrupts(dev_priv);
+
+ ctx = dev_priv->kernel_context;
+
+ data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
+ /* any value greater than GUC_POWER_D0 */
+ data[1] = GUC_POWER_D1;
+ /* first page is shared data with GuC */
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
+ LRC_GUCSHR_PN * PAGE_SIZE;
+
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_resume() - notify GuC resuming from suspend state
+ * @dev_priv: i915 device private
+ */
+int intel_guc_resume(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct i915_gem_context *ctx;
+ u32 data[3];
+
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return 0;
+
+ if (i915_modparams.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
+ ctx = dev_priv->kernel_context;
+
+ data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
+ data[1] = GUC_POWER_D0;
+ /* first page is shared data with GuC */
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state) +
+ LRC_GUCSHR_PN * PAGE_SIZE;
+
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
+}
+
+/**
+ * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * @guc: the guc
+ * @size: size of area to allocate (both virtual space and memory)
+ *
+ * This is a wrapper to create an object for use with the GuC. In order to
+ * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
+ * both some backing storage and a range inside the Global GTT. We must pin
+ * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that
+ * range is reserved inside GuC.
+ *
+ * Return: A i915_vma if successful, otherwise an ERR_PTR.
+ */
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create(dev_priv, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ ret = i915_vma_pin(vma, 0, PAGE_SIZE,
+ PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+ u32 wopcm_size = GUC_WOPCM_TOP;
+
+ /* On BXT, the top of WOPCM is reserved for RC6 context */
+ if (IS_GEN9_LP(dev_priv))
+ wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+ return wopcm_size;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
new file mode 100644
index 000000000000..418450b1ae27
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_H_
+#define _INTEL_GUC_H_
+
+#include "intel_uncore.h"
+#include "intel_guc_fw.h"
+#include "intel_guc_fwif.h"
+#include "intel_guc_ct.h"
+#include "intel_guc_log.h"
+#include "intel_uc_fw.h"
+#include "i915_guc_reg.h"
+#include "i915_vma.h"
+
+/*
+ * Top level structure of GuC. It handles firmware loading and manages client
+ * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
+ * ExecList submission.
+ */
+struct intel_guc {
+ struct intel_uc_fw fw;
+ struct intel_guc_log log;
+ struct intel_guc_ct ct;
+
+ /* Log snapshot if GuC errors during load */
+ struct drm_i915_gem_object *load_err_log;
+
+ /* intel_guc_recv interrupt related state */
+ bool interrupts_enabled;
+
+ struct i915_vma *ads_vma;
+ struct i915_vma *stage_desc_pool;
+ void *stage_desc_pool_vaddr;
+ struct ida stage_ids;
+
+ struct i915_guc_client *execbuf_client;
+
+ DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
+ /* Cyclic counter mod pagesize */
+ u32 db_cacheline;
+
+ /* GuC's FW specific registers used in MMIO send */
+ struct {
+ u32 base;
+ unsigned int count;
+ enum forcewake_domains fw_domains;
+ } send_regs;
+
+ /* To serialize the intel_guc_send actions */
+ struct mutex send_mutex;
+
+ /* GuC's FW specific send function */
+ int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
+
+ /* GuC's FW specific notify function */
+ void (*notify)(struct intel_guc *guc);
+};
+
+static
+inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ return guc->send(guc, action, len);
+}
+
+static inline void intel_guc_notify(struct intel_guc *guc)
+{
+ guc->notify(guc);
+}
+
+/*
+ * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
+ * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
+ * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
+ * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
+ */
+static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+{
+ u32 offset = i915_ggtt_offset(vma);
+
+ GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+ GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
+
+ return offset;
+}
+
+void intel_guc_init_early(struct intel_guc *guc);
+void intel_guc_init_send_regs(struct intel_guc *guc);
+void intel_guc_init_params(struct intel_guc *guc);
+int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_sample_forcewake(struct intel_guc *guc);
+int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
+int intel_guc_suspend(struct drm_i915_private *dev_priv);
+int intel_guc_resume(struct drm_i915_private *dev_priv);
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 8b0ae7fce7f2..ef67a36354c5 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -26,31 +26,9 @@
* Dave Gordon <david.s.gordon@intel.com>
* Alex Dai <yu.dai@intel.com>
*/
-#include "i915_drv.h"
-#include "intel_uc.h"
-/**
- * DOC: GuC-specific firmware loader
- *
- * intel_guc:
- * Top level structure of guc. It handles firmware loading and manages client
- * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
- * ExecList submission.
- *
- * Firmware versioning:
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- * The firmware installation package will install (symbolic link) proper version
- * of firmware.
- *
- * GuC address space:
- * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
- * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
- * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
- * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
- *
- */
+#include "intel_guc_fw.h"
+#include "i915_drv.h"
#define SKL_FW_MAJOR 6
#define SKL_FW_MINOR 1
@@ -78,88 +56,45 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
-
-static u32 get_gttype(struct drm_i915_private *dev_priv)
-{
- /* XXX: GT type based on PCI device ID? field seems unused by fw */
- return 0;
-}
-
-static u32 get_core_family(struct drm_i915_private *dev_priv)
-{
- u32 gen = INTEL_GEN(dev_priv);
-
- switch (gen) {
- case 9:
- return GUC_CORE_FAMILY_GEN9;
-
- default:
- MISSING_CASE(gen);
- return GUC_CORE_FAMILY_UNKNOWN;
- }
-}
-
-/*
- * Initialise the GuC parameter block before starting the firmware
- * transfer. These parameters are read by the firmware on startup
- * and cannot be changed thereafter.
+/**
+ * intel_guc_fw_select() - selects GuC firmware for uploading
+ *
+ * @guc: intel_guc struct
+ *
+ * Return: zero when we know firmware, non-zero in other case
*/
-static void guc_params_init(struct drm_i915_private *dev_priv)
+int intel_guc_fw_select(struct intel_guc *guc)
{
- struct intel_guc *guc = &dev_priv->guc;
- u32 params[GUC_CTL_MAX_DWORDS];
- int i;
-
- memset(&params, 0, sizeof(params));
-
- params[GUC_CTL_DEVICE_INFO] |=
- (get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
- (get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
-
- /*
- * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
- * second. This ARAR is calculated by:
- * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
- */
- params[GUC_CTL_ARAT_HIGH] = 0;
- params[GUC_CTL_ARAT_LOW] = 100000000;
-
- params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
-
- params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
- GUC_CTL_VCS2_ENABLED;
-
- params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
- if (i915.guc_log_level >= 0) {
- params[GUC_CTL_DEBUG] =
- i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
- } else
- params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
-
- /* If GuC submission is enabled, set up additional parameters here */
- if (i915.enable_guc_submission) {
- u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
- u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
- u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
- params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
- params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
- pgs >>= PAGE_SHIFT;
- params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
- (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
- params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
+ intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
- /* Unmask this bit to enable the GuC's internal scheduler */
- params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
+ if (i915_modparams.guc_firmware_path) {
+ guc->fw.path = i915_modparams.guc_firmware_path;
+ guc->fw.major_ver_wanted = 0;
+ guc->fw.minor_ver_wanted = 0;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ guc->fw.path = I915_SKL_GUC_UCODE;
+ guc->fw.major_ver_wanted = SKL_FW_MAJOR;
+ guc->fw.minor_ver_wanted = SKL_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ guc->fw.path = I915_BXT_GUC_UCODE;
+ guc->fw.major_ver_wanted = BXT_FW_MAJOR;
+ guc->fw.minor_ver_wanted = BXT_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ guc->fw.path = I915_KBL_GUC_UCODE;
+ guc->fw.major_ver_wanted = KBL_FW_MAJOR;
+ guc->fw.minor_ver_wanted = KBL_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ guc->fw.path = I915_GLK_GUC_UCODE;
+ guc->fw.major_ver_wanted = GLK_FW_MAJOR;
+ guc->fw.minor_ver_wanted = GLK_FW_MINOR;
+ } else {
+ DRM_ERROR("No GuC firmware known for platform with GuC!\n");
+ return -ENOENT;
}
- I915_WRITE(SOFT_SCRATCH(0), 0);
-
- for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+ return 0;
}
/*
@@ -250,38 +185,16 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
return ret;
}
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
-{
- u32 wopcm_size = GUC_WOPCM_TOP;
-
- /* On BXT, the top of WOPCM is reserved for RC6 context */
- if (IS_GEN9_LP(dev_priv))
- wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
-
- return wopcm_size;
-}
-
/*
* Load the GuC firmware blob into the MinuteIA.
*/
-static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
+static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct i915_vma *vma;
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret;
- ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
- if (ret) {
- DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
- return ret;
- }
-
- vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
- if (IS_ERR(vma)) {
- DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
- return PTR_ERR(vma);
- }
+ GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -312,23 +225,15 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
}
- guc_params_init(dev_priv);
-
ret = guc_ucode_xfer_dma(dev_priv, vma);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- /*
- * We keep the object pages for reuse during resume. But we can unpin it
- * now that DMA has completed, so it doesn't continue to take up space.
- */
- i915_vma_unpin(vma);
-
return ret;
}
/**
- * intel_guc_init_hw() - finish preparing the GuC for activity
+ * intel_guc_fw_upload() - finish preparing the GuC for activity
* @guc: intel_guc structure
*
* Called during driver loading and also after a GPU reset.
@@ -340,80 +245,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
*
* Return: non-zero code on error
*/
-int intel_guc_init_hw(struct intel_guc *guc)
+int intel_guc_fw_upload(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- const char *fw_path = guc->fw.path;
- int ret;
-
- DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
- fw_path,
- intel_uc_fw_status_repr(guc->fw.fetch_status),
- intel_uc_fw_status_repr(guc->fw.load_status));
-
- if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -EIO;
-
- guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_uc_fw_status_repr(guc->fw.fetch_status),
- intel_uc_fw_status_repr(guc->fw.load_status));
-
- ret = guc_ucode_xfer(dev_priv);
-
- if (ret)
- return -EAGAIN;
-
- guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
-
- DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
- i915.enable_guc_submission ? "submission enabled" : "loaded",
- guc->fw.path,
- guc->fw.major_ver_found, guc->fw.minor_ver_found);
-
- return 0;
-}
-
-/**
- * intel_guc_select_fw() - selects GuC firmware for loading
- * @guc: intel_guc struct
- *
- * Return: zero when we know firmware, non-zero in other case
- */
-int intel_guc_select_fw(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- guc->fw.path = NULL;
- guc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
- guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
- guc->fw.type = INTEL_UC_FW_TYPE_GUC;
-
- if (i915.guc_firmware_path) {
- guc->fw.path = i915.guc_firmware_path;
- guc->fw.major_ver_wanted = 0;
- guc->fw.minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- guc->fw.path = I915_SKL_GUC_UCODE;
- guc->fw.major_ver_wanted = SKL_FW_MAJOR;
- guc->fw.minor_ver_wanted = SKL_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- guc->fw.path = I915_BXT_GUC_UCODE;
- guc->fw.major_ver_wanted = BXT_FW_MAJOR;
- guc->fw.minor_ver_wanted = BXT_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- guc->fw.path = I915_KBL_GUC_UCODE;
- guc->fw.major_ver_wanted = KBL_FW_MAJOR;
- guc->fw.minor_ver_wanted = KBL_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- guc->fw.path = I915_GLK_GUC_UCODE;
- guc->fw.major_ver_wanted = GLK_FW_MAJOR;
- guc->fw.minor_ver_wanted = GLK_FW_MINOR;
- } else {
- DRM_ERROR("No GuC firmware known for platform with GuC!\n");
- return -ENOENT;
- }
-
- return 0;
+ return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h
new file mode 100644
index 000000000000..023f5baa9dd6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_fw.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_FW_H_
+#define _INTEL_GUC_FW_H_
+
+struct intel_guc;
+
+int intel_guc_fw_select(struct intel_guc *guc);
+int intel_guc_fw_upload(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 5fa286074811..80c507435458 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -56,10 +56,6 @@
#define WQ_LEN_SHIFT 16
#define WQ_NO_WCFLUSH_WAIT (1 << 27)
#define WQ_PRESENT_WORKLOAD (1 << 28)
-#define WQ_WORKLOAD_SHIFT 29
-#define WQ_WORKLOAD_GENERAL (0 << WQ_WORKLOAD_SHIFT)
-#define WQ_WORKLOAD_GPGPU (1 << WQ_WORKLOAD_SHIFT)
-#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
#define WQ_RING_TAIL_SHIFT 20
#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
@@ -86,8 +82,8 @@
#define GUC_CTL_ARAT_LOW 2
#define GUC_CTL_DEVICE_INFO 3
-#define GUC_CTL_GTTYPE_SHIFT 0
-#define GUC_CTL_COREFAMILY_SHIFT 7
+#define GUC_CTL_GT_TYPE_SHIFT 0
+#define GUC_CTL_CORE_FAMILY_SHIFT 7
#define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0)
@@ -182,49 +178,49 @@
*/
struct uc_css_header {
- uint32_t module_type;
+ u32 module_type;
/* header_size includes all non-uCode bits, including css_header, rsa
* key, modulus key and exponent data. */
- uint32_t header_size_dw;
- uint32_t header_version;
- uint32_t module_id;
- uint32_t module_vendor;
+ u32 header_size_dw;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
union {
struct {
- uint8_t day;
- uint8_t month;
- uint16_t year;
+ u8 day;
+ u8 month;
+ u16 year;
};
- uint32_t date;
+ u32 date;
};
- uint32_t size_dw; /* uCode plus header_size_dw */
- uint32_t key_size_dw;
- uint32_t modulus_size_dw;
- uint32_t exponent_size_dw;
+ u32 size_dw; /* uCode plus header_size_dw */
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
union {
struct {
- uint8_t hour;
- uint8_t min;
- uint16_t sec;
+ u8 hour;
+ u8 min;
+ u16 sec;
};
- uint32_t time;
+ u32 time;
};
char username[8];
char buildnumber[12];
union {
struct {
- uint32_t branch_client_version;
- uint32_t sw_version;
+ u32 branch_client_version;
+ u32 sw_version;
} guc;
struct {
- uint32_t sw_version;
- uint32_t reserved;
+ u32 sw_version;
+ u32 reserved;
} huc;
};
- uint32_t prod_preprod_fw;
- uint32_t reserved[12];
- uint32_t header_info;
+ u32 prod_preprod_fw;
+ u32 reserved[12];
+ u32 header_info;
} __packed;
struct guc_doorbell_info {
@@ -388,7 +384,11 @@ struct guc_ct_buffer_desc {
/* Preempt to idle on quantum expiry */
#define POLICY_PREEMPT_TO_IDLE (1<<1)
-#define POLICY_MAX_NUM_WI 15
+#define POLICY_MAX_NUM_WI 15
+#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
+#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
+#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000
+#define POLICY_DEFAULT_FAULT_TIME_US 250000
struct guc_policy {
/* Time for one workload to execute. (in micro seconds) */
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 16d3b8719cab..76d3eb1e4614 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -21,8 +21,11 @@
* IN THE SOFTWARE.
*
*/
+
#include <linux/debugfs.h>
#include <linux/relay.h>
+
+#include "intel_guc_log.h"
#include "i915_drv.h"
static void guc_log_capture_logs(struct intel_guc *guc);
@@ -144,7 +147,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
struct dentry *log_dir;
int ret;
- if (i915.guc_log_level < 0)
+ if (i915_modparams.guc_log_level < 0)
return 0;
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
@@ -480,7 +483,7 @@ err_runtime:
guc_log_runtime_destroy(guc);
err:
/* logging will remain off */
- i915.guc_log_level = -1;
+ i915_modparams.guc_log_level = -1;
return ret;
}
@@ -502,7 +505,8 @@ static void guc_flush_logs(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ if (!i915_modparams.enable_guc_submission ||
+ (i915_modparams.guc_log_level < 0))
return;
/* First disable the interrupts, will be renabled afterwards */
@@ -524,13 +528,14 @@ int intel_guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
- uint32_t size, flags;
+ u32 flags;
+ u32 size;
int ret;
GEM_BUG_ON(guc->log.vma);
- if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+ if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+ i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX;
/* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap */
@@ -555,7 +560,7 @@ int intel_guc_log_create(struct intel_guc *guc)
guc->log.vma = vma;
- if (i915.guc_log_level >= 0) {
+ if (i915_modparams.guc_log_level >= 0) {
ret = guc_log_runtime_create(guc);
if (ret < 0)
goto err_vma;
@@ -576,7 +581,7 @@ err_vma:
i915_vma_unpin_and_release(&guc->log.vma);
err:
/* logging will be off */
- i915.guc_log_level = -1;
+ i915_modparams.guc_log_level = -1;
return ret;
}
@@ -600,7 +605,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
return -EINVAL;
/* This combination doesn't make sense & won't have any effect */
- if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0))
return 0;
ret = guc_log_control(guc, log_param.value);
@@ -610,7 +615,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
}
if (log_param.logging_enabled) {
- i915.guc_log_level = log_param.verbosity;
+ i915_modparams.guc_log_level = log_param.verbosity;
/* If log_level was set as -1 at boot time, then the relay channel file
* wouldn't have been created by now and interrupts also would not have
@@ -633,7 +638,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
guc_flush_logs(guc);
/* As logging is disabled, update log level to reflect that */
- i915.guc_log_level = -1;
+ i915_modparams.guc_log_level = -1;
}
return ret;
@@ -641,7 +646,8 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_guc_submission || i915.guc_log_level < 0)
+ if (!i915_modparams.enable_guc_submission ||
+ (i915_modparams.guc_log_level < 0))
return;
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -651,7 +657,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_guc_submission)
+ if (!i915_modparams.enable_guc_submission)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h
new file mode 100644
index 000000000000..f512cf79339b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_log.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_GUC_LOG_H_
+#define _INTEL_GUC_LOG_H_
+
+#include <linux/workqueue.h>
+
+#include "intel_guc_fwif.h"
+
+struct drm_i915_private;
+struct intel_guc;
+
+struct intel_guc_log {
+ u32 flags;
+ struct i915_vma *vma;
+ /* The runtime stuff gets created only when GuC logging gets enabled */
+ struct {
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+ } runtime;
+ /* logging related stats */
+ u32 capture_miss_count;
+ u32 flush_interrupt_count;
+ u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
+ u32 flush_count[GUC_MAX_LOG_BUFFER];
+};
+
+int intel_guc_log_create(struct intel_guc *guc);
+void intel_guc_log_destroy(struct intel_guc *guc);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
+void i915_guc_log_register(struct drm_i915_private *dev_priv);
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index c17ed0e62b67..b4a7f31f0214 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -58,7 +58,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
*/
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_gvt)
+ if (!i915_modparams.enable_gvt)
return;
if (intel_vgpu_active(dev_priv)) {
@@ -73,7 +73,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
return;
bail:
- i915.enable_gvt = 0;
+ i915_modparams.enable_gvt = 0;
}
/**
@@ -90,17 +90,17 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (!i915.enable_gvt) {
+ if (!i915_modparams.enable_gvt) {
DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
return 0;
}
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
return -EIO;
}
- if (i915.enable_guc_submission) {
+ if (i915_modparams.enable_guc_submission) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
@@ -123,7 +123,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
bail:
- i915.enable_gvt = 0;
+ i915_modparams.enable_gvt = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index d9d87d96fb69..12ac270a5f93 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -428,7 +428,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
- if (!i915.enable_hangcheck)
+ if (!i915_modparams.enable_hangcheck)
return;
if (!READ_ONCE(dev_priv->gt.awake))
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index e8abea7594ec..5132dc814788 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -70,7 +70,7 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
-static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
+static u32 g4x_infoframe_index(unsigned int type)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -85,7 +85,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
}
}
-static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
+static u32 g4x_infoframe_enable(unsigned int type)
{
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -100,9 +100,11 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
}
}
-static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
+static u32 hsw_infoframe_enable(unsigned int type)
{
switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_ENABLE_VSC_HSW;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -118,10 +120,12 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
static i915_reg_t
hsw_dip_data_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder,
- enum hdmi_infoframe_type type,
+ unsigned int type,
int i)
{
switch (type) {
+ case DP_SDP_VSC:
+ return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,7 +140,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
static void g4x_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -191,7 +195,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
static void ibx_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -251,7 +255,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
static void cpt_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -309,7 +313,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
static void vlv_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -368,7 +372,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
static void hsw_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- enum hdmi_infoframe_type type,
+ unsigned int type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
@@ -377,6 +381,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
i915_reg_t data_reg;
+ int data_size = type == DP_SDP_VSC ?
+ VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
int i;
u32 val = I915_READ(ctl_reg);
@@ -392,7 +398,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
- for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+ for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
mmiowb();
@@ -434,7 +440,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
uint8_t buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -450,7 +456,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
buffer[3] = 0;
len++;
- intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@@ -945,6 +951,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -965,7 +972,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -991,8 +998,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
@@ -1003,8 +1010,8 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1025,8 +1032,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1075,8 +1082,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1130,18 +1137,20 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
}
static void intel_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
@@ -1184,14 +1193,15 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state);
+ intel_dig_port->set_infoframes(&encoder->base, false,
+ old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
static void g4x_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
@@ -1200,16 +1210,16 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
}
static void pch_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
@@ -1314,7 +1324,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return status;
}
-static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
+static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
@@ -1642,24 +1652,24 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
}
static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
intel_hdmi_prepare(encoder, pipe_config);
- intel_hdmi->set_infoframes(&encoder->base,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state);
+ intel_dig_port->set_infoframes(&encoder->base,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
}
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1669,9 +1679,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
- intel_hdmi->set_infoframes(&encoder->base,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state);
+ dport->set_infoframes(&encoder->base,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1679,8 +1689,8 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
}
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder, pipe_config);
@@ -1688,8 +1698,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder, pipe_config);
@@ -1697,23 +1707,23 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
vlv_phy_reset_lanes(encoder);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1727,11 +1737,10 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
}
static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1741,9 +1750,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
- intel_hdmi->set_infoframes(&encoder->base,
- pipe_config->has_hdmi_sink,
- pipe_config, conn_state);
+ dport->set_infoframes(&encoder->base,
+ pipe_config->has_infoframe,
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1958,6 +1967,34 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
+void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ intel_dig_port->write_infoframe = vlv_write_infoframe;
+ intel_dig_port->set_infoframes = vlv_set_infoframes;
+ intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
+ } else if (IS_G4X(dev_priv)) {
+ intel_dig_port->write_infoframe = g4x_write_infoframe;
+ intel_dig_port->set_infoframes = g4x_set_infoframes;
+ intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
+ } else if (HAS_DDI(dev_priv)) {
+ intel_dig_port->write_infoframe = hsw_write_infoframe;
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
+ } else if (HAS_PCH_IBX(dev_priv)) {
+ intel_dig_port->write_infoframe = ibx_write_infoframe;
+ intel_dig_port->set_infoframes = ibx_set_infoframes;
+ intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
+ } else {
+ intel_dig_port->write_infoframe = cpt_write_infoframe;
+ intel_dig_port->set_infoframes = cpt_set_infoframes;
+ intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
+ }
+}
+
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@@ -1993,28 +2030,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
return;
intel_encoder->hpd_pin = intel_hpd_pin(port);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_hdmi->write_infoframe = vlv_write_infoframe;
- intel_hdmi->set_infoframes = vlv_set_infoframes;
- intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
- } else if (IS_G4X(dev_priv)) {
- intel_hdmi->write_infoframe = g4x_write_infoframe;
- intel_hdmi->set_infoframes = g4x_set_infoframes;
- intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
- } else if (HAS_DDI(dev_priv)) {
- intel_hdmi->write_infoframe = hsw_write_infoframe;
- intel_hdmi->set_infoframes = hsw_set_infoframes;
- intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
- } else if (HAS_PCH_IBX(dev_priv)) {
- intel_hdmi->write_infoframe = ibx_write_infoframe;
- intel_hdmi->set_infoframes = ibx_set_infoframes;
- intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
- } else {
- intel_hdmi->write_infoframe = cpt_write_infoframe;
- intel_hdmi->set_infoframes = cpt_set_infoframes;
- intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
- }
-
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
@@ -2113,5 +2128,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
+ intel_infoframe_init(intel_dig_port);
+
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 6145fa0d6773..c8a48cbc2b7d 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -21,9 +21,11 @@
* IN THE SOFTWARE.
*
*/
-#include <linux/firmware.h>
+
+#include <linux/types.h>
+
+#include "intel_huc.h"
#include "i915_drv.h"
-#include "intel_uc.h"
/**
* DOC: HuC Firmware
@@ -76,6 +78,42 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
GLK_HUC_FW_MINOR, GLK_BLD_NUM)
/**
+ * intel_huc_select_fw() - selects HuC firmware for loading
+ * @huc: intel_huc struct
+ */
+void intel_huc_select_fw(struct intel_huc *huc)
+{
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+
+ intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+
+ if (i915_modparams.huc_firmware_path) {
+ huc->fw.path = i915_modparams.huc_firmware_path;
+ huc->fw.major_ver_wanted = 0;
+ huc->fw.minor_ver_wanted = 0;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ huc->fw.path = I915_SKL_HUC_UCODE;
+ huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ huc->fw.path = I915_BXT_HUC_UCODE;
+ huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ huc->fw.path = I915_KBL_HUC_UCODE;
+ huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ huc->fw.path = I915_GLK_HUC_UCODE;
+ huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
+ } else {
+ DRM_ERROR("No HuC firmware known for platform with HuC!\n");
+ return;
+ }
+}
+
+/**
* huc_ucode_xfer() - DMA's the firmware
* @dev_priv: the drm_i915_private device
*
@@ -83,26 +121,15 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
*
* Return: 0 on success, non-zero on failure
*/
-static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
+static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- struct i915_vma *vma;
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
unsigned long offset = 0;
u32 size;
int ret;
- ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
- if (ret) {
- DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
- return ret;
- }
-
- vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
- if (IS_ERR(vma)) {
- DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
- return PTR_ERR(vma);
- }
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -133,55 +160,10 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- /*
- * We keep the object pages for reuse during resume. But we can unpin it
- * now that DMA has completed, so it doesn't continue to take up space.
- */
- i915_vma_unpin(vma);
-
return ret;
}
/**
- * intel_huc_select_fw() - selects HuC firmware for loading
- * @huc: intel_huc struct
- */
-void intel_huc_select_fw(struct intel_huc *huc)
-{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
- huc->fw.path = NULL;
- huc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
- huc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
- huc->fw.type = INTEL_UC_FW_TYPE_HUC;
-
- if (i915.huc_firmware_path) {
- huc->fw.path = i915.huc_firmware_path;
- huc->fw.major_ver_wanted = 0;
- huc->fw.minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- huc->fw.path = I915_SKL_HUC_UCODE;
- huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- huc->fw.path = I915_BXT_HUC_UCODE;
- huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc->fw.path = I915_KBL_HUC_UCODE;
- huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- huc->fw.path = I915_GLK_HUC_UCODE;
- huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
- } else {
- DRM_ERROR("No HuC firmware known for platform with HuC!\n");
- return;
- }
-}
-
-/**
* intel_huc_init_hw() - load HuC uCode to device
* @huc: intel_huc structure
*
@@ -195,49 +177,26 @@ void intel_huc_select_fw(struct intel_huc *huc)
*/
void intel_huc_init_hw(struct intel_huc *huc)
{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- int err;
-
- DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
- huc->fw.path,
- intel_uc_fw_status_repr(huc->fw.fetch_status),
- intel_uc_fw_status_repr(huc->fw.load_status));
-
- if (huc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
-
- huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
-
- err = huc_ucode_xfer(dev_priv);
-
- huc->fw.load_status = err ?
- INTEL_UC_FIRMWARE_FAIL : INTEL_UC_FIRMWARE_SUCCESS;
-
- DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
- huc->fw.path,
- intel_uc_fw_status_repr(huc->fw.fetch_status),
- intel_uc_fw_status_repr(huc->fw.load_status));
-
- if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
-
- return;
+ intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
}
/**
- * intel_guc_auth_huc() - authenticate ucode
- * @dev_priv: the drm_i915_device
+ * intel_huc_auth() - Authenticate HuC uCode
+ * @huc: intel_huc structure
+ *
+ * Called after HuC and GuC firmware loading during intel_uc_init_hw().
*
- * Triggers a HuC fw authentication request to the GuC via intel_guc_action_
- * authenticate_huc interface.
+ * This function pins HuC firmware image object into GGTT.
+ * Then it invokes GuC action to authenticate passing the offset to RSA
+ * signature through intel_guc_auth_huc(). It then waits for 50ms for
+ * firmware verification ACK and unpins the object.
*/
-void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
+void intel_huc_auth(struct intel_huc *huc)
{
- struct intel_guc *guc = &dev_priv->guc;
- struct intel_huc *huc = &dev_priv->huc;
+ struct drm_i915_private *i915 = huc_to_i915(huc);
+ struct intel_guc *guc = &i915->guc;
struct i915_vma *vma;
int ret;
- u32 data[2];
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return;
@@ -250,23 +209,19 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
return;
}
- /* Specify auth action and where public signature is. */
- data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC;
- data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
-
- ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
+ ret = intel_guc_auth_huc(guc,
+ guc_ggtt_offset(vma) + huc->fw.rsa_offset);
if (ret) {
DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
goto out;
}
/* Check authentication status, it should be done by now */
- ret = intel_wait_for_register(dev_priv,
- HUC_STATUS2,
- HUC_FW_VERIFIED,
- HUC_FW_VERIFIED,
- 50);
-
+ ret = intel_wait_for_register(i915,
+ HUC_STATUS2,
+ HUC_FW_VERIFIED,
+ HUC_FW_VERIFIED,
+ 50);
if (ret) {
DRM_ERROR("HuC: Authentication failed %d\n", ret);
goto out;
@@ -275,4 +230,3 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
out:
i915_vma_unpin(vma);
}
-
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
new file mode 100644
index 000000000000..aaa38b9e5817
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_HUC_H_
+#define _INTEL_HUC_H_
+
+#include "intel_uc_fw.h"
+
+struct intel_huc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* HuC-specific additions */
+};
+
+void intel_huc_select_fw(struct intel_huc *huc);
+void intel_huc_init_hw(struct intel_huc *huc);
+void intel_huc_auth(struct intel_huc *huc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6f972e6ec663..d36e25607435 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -208,8 +208,9 @@
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-
#define WA_TAIL_DWORDS 2
+#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
+#define PREEMPT_ID 0x1
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
@@ -243,8 +244,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
return 0;
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
- USES_PPGTT(dev_priv) &&
- i915.use_mmio_flip >= 0)
+ USES_PPGTT(dev_priv))
return 1;
return 0;
@@ -279,17 +279,110 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
desc = ctx->desc_template; /* bits 0-11 */
- desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
+ desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
/* bits 12-31 */
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
ce->lrc_desc = desc;
}
-uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static struct i915_priolist *
+lookup_priolist(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_priolist *p;
+ struct rb_node **parent, *rb;
+ bool first = true;
+
+ if (unlikely(execlists->no_priolist))
+ prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ parent = &execlists->queue.rb_node;
+ while (*parent) {
+ rb = *parent;
+ p = rb_entry(rb, typeof(*p), node);
+ if (prio > p->priority) {
+ parent = &rb->rb_left;
+ } else if (prio < p->priority) {
+ parent = &rb->rb_right;
+ first = false;
+ } else {
+ return p;
+ }
+ }
+
+ if (prio == I915_PRIORITY_NORMAL) {
+ p = &execlists->default_priolist;
+ } else {
+ p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ /* Convert an allocation failure to a priority bump */
+ if (unlikely(!p)) {
+ prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+ /* To maintain ordering with all rendering, after an
+ * allocation failure we have to disable all scheduling.
+ * Requests will then be executed in fifo, and schedule
+ * will ensure that dependencies are emitted in fifo.
+ * There will be still some reordering with existing
+ * requests, so if userspace lied about their
+ * dependencies that reordering may be visible.
+ */
+ execlists->no_priolist = true;
+ goto find_priolist;
+ }
+ }
+
+ p->priority = prio;
+ INIT_LIST_HEAD(&p->requests);
+ rb_link_node(&p->node, rb, parent);
+ rb_insert_color(&p->node, &execlists->queue);
+
+ if (first)
+ execlists->first = &p->node;
+
+ return ptr_pack_bits(p, first, 1);
+}
+
+static void unwind_wa_tail(struct drm_i915_gem_request *rq)
{
- return ctx->engine[engine->id].lrc_desc;
+ rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+
+static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *rq, *rn;
+ struct i915_priolist *uninitialized_var(p);
+ int last_prio = I915_PRIORITY_INVALID;
+
+ lockdep_assert_held(&engine->timeline->lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->timeline->requests,
+ link) {
+ if (i915_gem_request_completed(rq))
+ return;
+
+ __i915_gem_request_unsubmit(rq);
+ unwind_wa_tail(rq);
+
+ GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
+ if (rq->priotree.priority != last_prio) {
+ p = lookup_priolist(engine,
+ &rq->priotree,
+ rq->priotree.priority);
+ p = ptr_mask_bits(p, 1);
+
+ last_prio = rq->priotree.priority;
+ }
+
+ list_add(&rq->priotree.link, &p->requests);
+ }
}
static inline void
@@ -336,14 +429,20 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
return ce->lrc_desc;
}
+static inline void elsp_write(u64 desc, u32 __iomem *elsp)
+{
+ writel(upper_32_bits(desc), elsp);
+ writel(lower_32_bits(desc), elsp);
+}
+
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlist_port;
+ struct execlist_port *port = engine->execlists.port;
u32 __iomem *elsp =
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
- for (n = ARRAY_SIZE(engine->execlist_port); n--; ) {
+ for (n = execlists_num_ports(&engine->execlists); n--; ) {
struct drm_i915_gem_request *rq;
unsigned int count;
u64 desc;
@@ -361,8 +460,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = 0;
}
- writel(upper_32_bits(desc), elsp);
- writel(lower_32_bits(desc), elsp);
+ elsp_write(desc, elsp);
}
}
@@ -395,25 +493,43 @@ static void port_assign(struct execlist_port *port,
port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
}
+static void inject_preempt_context(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce =
+ &engine->i915->preempt_context->engine[engine->id];
+ u32 __iomem *elsp =
+ engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+ unsigned int n;
+
+ GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
+ GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
+
+ memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
+ ce->ring->tail += WA_TAIL_BYTES;
+ ce->ring->tail &= (ce->ring->size - 1);
+ ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
+
+ for (n = execlists_num_ports(&engine->execlists); --n; )
+ elsp_write(0, elsp);
+
+ elsp_write(ce->lrc_desc, elsp);
+}
+
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ return INTEL_INFO(engine->i915)->has_logical_ring_preemption;
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *last;
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port *port = execlists->port;
+ const struct execlist_port * const last_port =
+ &execlists->port[execlists->port_mask];
+ struct drm_i915_gem_request *last = port_request(port);
struct rb_node *rb;
bool submit = false;
- last = port_request(port);
- if (last)
- /* WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_breadcrumb()
- * for where we prepare the padding after the end of the
- * request.
- */
- last->tail = last->wa_tail;
-
- GEM_BUG_ON(port_isset(&port[1]));
-
/* Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute
@@ -436,9 +552,68 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
spin_lock_irq(&engine->timeline->lock);
- rb = engine->execlist_first;
- GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
- while (rb) {
+ rb = execlists->first;
+ GEM_BUG_ON(rb_first(&execlists->queue) != rb);
+ if (!rb)
+ goto unlock;
+
+ if (last) {
+ /*
+ * Don't resubmit or switch until all outstanding
+ * preemptions (lite-restore) are seen. Then we
+ * know the next preemption status we see corresponds
+ * to this ELSP update.
+ */
+ if (port_count(&port[0]) > 1)
+ goto unlock;
+
+ if (can_preempt(engine) &&
+ rb_entry(rb, struct i915_priolist, node)->priority >
+ max(last->priotree.priority, 0)) {
+ /*
+ * Switch to our empty preempt context so
+ * the state of the GPU is known (idle).
+ */
+ inject_preempt_context(engine);
+ execlists_set_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ goto unlock;
+ } else {
+ /*
+ * In theory, we could coalesce more requests onto
+ * the second port (the first port is active, with
+ * no preemptions pending). However, that means we
+ * then have to deal with the possible lite-restore
+ * of the second port (as we submit the ELSP, there
+ * may be a context-switch) but also we may complete
+ * the resubmission before the context-switch. Ergo,
+ * coalescing onto the second port will cause a
+ * preemption event, but we cannot predict whether
+ * that will affect port[0] or port[1].
+ *
+ * If the second port is already active, we can wait
+ * until the next context-switch before contemplating
+ * new requests. The GPU will be busy and we should be
+ * able to resubmit the new ELSP before it idles,
+ * avoiding pipeline bubbles (momentary pauses where
+ * the driver is unable to keep up the supply of new
+ * work).
+ */
+ if (port_count(&port[1]))
+ goto unlock;
+
+ /* WaIdleLiteRestore:bdw,skl
+ * Apply the wa NOOPs to prevent
+ * ring:HEAD == req:TAIL as we resubmit the
+ * request. See gen8_emit_breadcrumb() for
+ * where we prepare the padding after the
+ * end of the request.
+ */
+ last->tail = last->wa_tail;
+ }
+ }
+
+ do {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
@@ -460,7 +635,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port != engine->execlist_port) {
+ if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -485,38 +660,108 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
port++;
+
+ GEM_BUG_ON(port_isset(port));
}
INIT_LIST_HEAD(&rq->priotree.link);
- rq->priotree.priority = INT_MAX;
-
__i915_gem_request_submit(rq);
- trace_i915_gem_request_in(rq, port_index(port, engine));
+ trace_i915_gem_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
rb = rb_next(rb);
- rb_erase(&p->node, &engine->execlist_queue);
+ rb_erase(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
- }
+ } while (rb);
done:
- engine->execlist_first = rb;
+ execlists->first = rb;
if (submit)
port_assign(port, last);
+unlock:
spin_unlock_irq(&engine->timeline->lock);
- if (submit)
+ if (submit) {
+ execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
execlists_submit_ports(engine);
+ }
+}
+
+static void
+execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+{
+ struct execlist_port *port = execlists->port;
+ unsigned int num_ports = execlists_num_ports(execlists);
+
+ while (num_ports-- && port_isset(port)) {
+ struct drm_i915_gem_request *rq = port_request(port);
+
+ GEM_BUG_ON(!execlists->active);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
+ i915_gem_request_put(rq);
+
+ memset(port, 0, sizeof(*port));
+ port++;
+ }
}
-static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
+static void execlists_cancel_requests(struct intel_engine_cs *engine)
{
- const struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct drm_i915_gem_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
- return port_count(&port[0]) + port_count(&port[1]) < 2;
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ /* Cancel the requests on the HW and clear the ELSP tracker. */
+ execlist_cancel_port_requests(execlists);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline->requests, link) {
+ GEM_BUG_ON(!rq->global_seqno);
+ if (!i915_gem_request_completed(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ rb = execlists->first;
+ while (rb) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+
+ list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
+ INIT_LIST_HEAD(&rq->priotree.link);
+
+ dma_fence_set_error(&rq->fence, -EIO);
+ __i915_gem_request_submit(rq);
+ }
+
+ rb = rb_next(rb);
+ rb_erase(&p->node, &execlists->queue);
+ INIT_LIST_HEAD(&p->requests);
+ if (p->priority != I915_PRIORITY_NORMAL)
+ kmem_cache_free(engine->i915->priorities, p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+
+ execlists->queue = RB_ROOT;
+ execlists->first = NULL;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ /*
+ * The port is checked prior to scheduling a tasklet, but
+ * just in case we have suspended the tasklet to do the
+ * wedging make sure that when it wakes, it decides there
+ * is no work to do by clearing the irq_posted bit.
+ */
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
/*
@@ -525,8 +770,9 @@ static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
*/
static void intel_lrc_irq_handler(unsigned long data)
{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct execlist_port * const port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -538,19 +784,24 @@ static void intel_lrc_irq_handler(unsigned long data)
*/
GEM_BUG_ON(!dev_priv->gt.awake);
- intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
+ intel_uncore_forcewake_get(dev_priv, execlists->fw_domains);
/* Prefer doing test_and_clear_bit() as a two stage operation to avoid
* imposing the cost of a locked atomic transaction when submitting a
* new request (outside of the context-switch interrupt).
*/
while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
- u32 __iomem *csb_mmio =
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
- u32 __iomem *buf =
- dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
+ /* The HWSP contains a (cacheable) mirror of the CSB */
+ const u32 *buf =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
unsigned int head, tail;
+ if (unlikely(execlists->csb_use_mmio)) {
+ buf = (u32 * __force)
+ (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_head = -1; /* force mmio read of CSB ptrs */
+ }
+
/* The write will be ordered by the uncached read (itself
* a memory barrier), so we do not need another in the form
* of a locked instruction. The race between the interrupt
@@ -562,9 +813,20 @@ static void intel_lrc_irq_handler(unsigned long data)
* is set and we do a new loop.
*/
__clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- head = readl(csb_mmio);
- tail = GEN8_CSB_WRITE_PTR(head);
- head = GEN8_CSB_READ_PTR(head);
+ if (unlikely(execlists->csb_head == -1)) { /* following a reset */
+ head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ tail = GEN8_CSB_WRITE_PTR(head);
+ head = GEN8_CSB_READ_PTR(head);
+ execlists->csb_head = head;
+ } else {
+ const int write_idx =
+ intel_hws_csb_write_index(dev_priv) -
+ I915_HWS_CSB_BUF0_INDEX;
+
+ head = execlists->csb_head;
+ tail = READ_ONCE(buf[write_idx]);
+ }
+
while (head != tail) {
struct drm_i915_gem_request *rq;
unsigned int status;
@@ -590,13 +852,35 @@ static void intel_lrc_irq_handler(unsigned long data)
* status notifier.
*/
- status = readl(buf + 2 * head);
+ status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
+ if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
+ buf[2*head + 1] == PREEMPT_ID) {
+ execlist_cancel_port_requests(execlists);
+
+ spin_lock_irq(&engine->timeline->lock);
+ unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->timeline->lock);
+
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT));
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT);
+ continue;
+ }
+
+ if (status & GEN8_CTX_STATUS_PREEMPTED &&
+ execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_PREEMPT))
+ continue;
+
+ GEM_BUG_ON(!execlists_is_active(execlists,
+ EXECLISTS_ACTIVE_USER));
+
/* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) !=
- port->context_id);
+ GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
rq = port_unpack(port, &count);
GEM_BUG_ON(count == 0);
@@ -608,8 +892,7 @@ static void intel_lrc_irq_handler(unsigned long data)
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
- port[0] = port[1];
- memset(&port[1], 0, sizeof(port[1]));
+ execlists_port_complete(execlists, port);
} else {
port_set(port, port_pack(rq, count));
}
@@ -617,80 +900,33 @@ static void intel_lrc_irq_handler(unsigned long data)
/* After the final element, the hw should be idle */
GEM_BUG_ON(port_count(port) == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+ if (port_count(port) == 0)
+ execlists_clear_active(execlists,
+ EXECLISTS_ACTIVE_USER);
}
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
- csb_mmio);
+ if (head != execlists->csb_head) {
+ execlists->csb_head = head;
+ writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
+ dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
+ }
}
- if (execlists_elsp_ready(engine))
+ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
execlists_dequeue(engine);
- intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+ intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
}
-static bool
-insert_request(struct intel_engine_cs *engine,
- struct i915_priotree *pt,
- int prio)
+static void insert_request(struct intel_engine_cs *engine,
+ struct i915_priotree *pt,
+ int prio)
{
- struct i915_priolist *p;
- struct rb_node **parent, *rb;
- bool first = true;
-
- if (unlikely(engine->no_priolist))
- prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
- /* most positive priority is scheduled first, equal priorities fifo */
- rb = NULL;
- parent = &engine->execlist_queue.rb_node;
- while (*parent) {
- rb = *parent;
- p = rb_entry(rb, typeof(*p), node);
- if (prio > p->priority) {
- parent = &rb->rb_left;
- } else if (prio < p->priority) {
- parent = &rb->rb_right;
- first = false;
- } else {
- list_add_tail(&pt->link, &p->requests);
- return false;
- }
- }
-
- if (prio == I915_PRIORITY_NORMAL) {
- p = &engine->default_priolist;
- } else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
- /* Convert an allocation failure to a priority bump */
- if (unlikely(!p)) {
- prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
- /* To maintain ordering with all rendering, after an
- * allocation failure we have to disable all scheduling.
- * Requests will then be executed in fifo, and schedule
- * will ensure that dependencies are emitted in fifo.
- * There will be still some reordering with existing
- * requests, so if userspace lied about their
- * dependencies that reordering may be visible.
- */
- engine->no_priolist = true;
- goto find_priolist;
- }
- }
+ struct i915_priolist *p = lookup_priolist(engine, pt, prio);
- p->priority = prio;
- rb_link_node(&p->node, rb, parent);
- rb_insert_color(&p->node, &engine->execlist_queue);
-
- INIT_LIST_HEAD(&p->requests);
- list_add_tail(&pt->link, &p->requests);
-
- if (first)
- engine->execlist_first = &p->node;
-
- return first;
+ list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
+ if (ptr_unmask_bits(p, 1))
+ tasklet_hi_schedule(&engine->execlists.irq_tasklet);
}
static void execlists_submit_request(struct drm_i915_gem_request *request)
@@ -701,24 +937,23 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- if (insert_request(engine,
- &request->priotree,
- request->priotree.priority)) {
- if (execlists_elsp_ready(engine))
- tasklet_hi_schedule(&engine->irq_tasklet);
- }
+ insert_request(engine, &request->priotree, request->priotree.priority);
- GEM_BUG_ON(!engine->execlist_first);
+ GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->priotree.link));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
+static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
+{
+ return container_of(pt, struct drm_i915_gem_request, priotree);
+}
+
static struct intel_engine_cs *
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
{
- struct intel_engine_cs *engine =
- container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+ struct intel_engine_cs *engine = pt_to_request(pt)->engine;
GEM_BUG_ON(!locked);
@@ -737,6 +972,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
struct i915_dependency stack;
LIST_HEAD(dfs);
+ GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
if (prio <= READ_ONCE(request->priotree.priority))
return;
@@ -772,6 +1009,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
* engines.
*/
list_for_each_entry(p, &pt->signalers_list, signal_link) {
+ if (i915_gem_request_completed(pt_to_request(p->signaler)))
+ continue;
+
GEM_BUG_ON(p->signaler->priority < pt->priority);
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
@@ -785,7 +1025,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
* execlists_submit_request()), we can set our own priority and skip
* acquiring the engine locks.
*/
- if (request->priotree.priority == INT_MIN) {
+ if (request->priotree.priority == I915_PRIORITY_INVALID) {
GEM_BUG_ON(!list_empty(&request->priotree.link));
request->priotree.priority = prio;
if (stack.dfs_link.next == stack.dfs_link.prev)
@@ -815,8 +1055,6 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
}
spin_unlock_irq(&engine->timeline->lock);
-
- /* XXX Do we need to preempt to make room for us and our deps? */
}
static struct intel_ring *
@@ -866,6 +1104,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
i915_ggtt_offset(ce->ring->vma);
ce->state->obj->mm.dirty = true;
+ ce->state->obj->pin_global++;
i915_gem_context_get(ctx);
out:
@@ -893,6 +1132,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
intel_ring_unpin(ce->ring);
+ ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj);
i915_vma_unpin(ce->state);
@@ -914,27 +1154,14 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- if (i915.enable_guc_submission) {
- /*
- * Check that the GuC has space for the request before
- * going any further, as the i915_add_request() call
- * later on mustn't fail ...
- */
- ret = i915_guc_wq_reserve(request);
- if (ret)
- goto err;
- }
-
cs = intel_ring_begin(request, 0);
- if (IS_ERR(cs)) {
- ret = PTR_ERR(cs);
- goto err_unreserve;
- }
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
if (!ce->initialised) {
ret = engine->init_context(request);
if (ret)
- goto err_unreserve;
+ return ret;
ce->initialised = true;
}
@@ -948,12 +1175,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
-
-err_unreserve:
- if (i915.enable_guc_submission)
- i915_guc_wq_unreserve(request);
-err:
- return ret;
}
/*
@@ -1031,6 +1252,8 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
i915_ggtt_offset(engine->scratch) +
2 * CACHELINE_BYTES);
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
/* Pad to end of cacheline */
while ((unsigned long)batch % CACHELINE_BYTES)
*batch++ = MI_NOOP;
@@ -1044,26 +1267,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-/*
- * This batch is started immediately after indirect_ctx batch. Since we ensure
- * that indirect_ctx ends on a cacheline this batch is aligned automatically.
- *
- * The number of DWORDS written are returned using this field.
- *
- * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
- * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
- */
-static u32 *gen8_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
- /* WaDisableCtxRestoreArbitration:bdw,chv */
- *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- *batch++ = MI_BATCH_BUFFER_END;
-
- return batch;
-}
-
static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
@@ -1109,6 +1316,8 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
*batch++ = 0;
}
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
/* Pad to end of cacheline */
while ((unsigned long)batch % CACHELINE_BYTES)
*batch++ = MI_NOOP;
@@ -1116,13 +1325,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 *gen9_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
- *batch++ = MI_BATCH_BUFFER_END;
-
- return batch;
-}
-
#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
@@ -1175,13 +1377,15 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
+ case 10:
+ return 0;
case 9:
wa_bb_fn[0] = gen9_init_indirectctx_bb;
- wa_bb_fn[1] = gen9_init_perctx_bb;
+ wa_bb_fn[1] = NULL;
break;
case 8:
wa_bb_fn[0] = gen8_init_indirectctx_bb;
- wa_bb_fn[1] = gen8_init_perctx_bb;
+ wa_bb_fn[1] = NULL;
break;
default:
MISSING_CASE(INTEL_GEN(engine->i915));
@@ -1208,7 +1412,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
ret = -EINVAL;
break;
}
- batch_ptr = wa_bb_fn[i](engine, batch_ptr);
+ if (wa_bb_fn[i])
+ batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
@@ -1232,9 +1437,7 @@ static u8 gtiir[] = {
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct execlist_port *port = engine->execlist_port;
- unsigned int n;
- bool submit;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
int ret;
ret = intel_mocs_init_engine(engine);
@@ -1267,24 +1470,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ execlists->csb_head = -1;
+ execlists->active = 0;
/* After a GPU reset, we may have requests to replay */
- submit = false;
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
- if (!port_isset(&port[n]))
- break;
-
- DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n",
- engine->name, n,
- port_request(&port[n])->global_seqno);
-
- /* Discard the current inflight count */
- port_set(&port[n], port_request(&port[n]));
- submit = true;
- }
-
- if (submit && !i915.enable_guc_submission)
- execlists_submit_ports(engine);
+ if (!i915_modparams.enable_guc_submission && execlists->first)
+ tasklet_schedule(&execlists->irq_tasklet);
return 0;
}
@@ -1325,9 +1516,11 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- struct execlist_port *port = engine->execlist_port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
- unsigned int n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
/*
* Catch up with any missed context-switch interrupts.
@@ -1338,20 +1531,12 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what
* requests were completed.
*/
- if (!request) {
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
- i915_gem_request_put(port_request(&port[n]));
- memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- return;
- }
+ execlist_cancel_port_requests(execlists);
- if (request->ctx != port_request(port)->ctx) {
- i915_gem_request_put(port_request(port));
- port[0] = port[1];
- memset(&port[1], 0, sizeof(port[1]));
- }
+ /* Push back any incomplete requests for replay after the reset. */
+ unwind_incomplete_requests(engine);
- GEM_BUG_ON(request->ctx != port_request(port)->ctx);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may
@@ -1363,7 +1548,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
- if (request->fence.error != -EIO)
+ if (!request || request->fence.error != -EIO)
return;
/* We want a simple context + ring to execute the breadcrumb update.
@@ -1386,10 +1571,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
intel_ring_update_space(request->ring);
/* Reset WaIdleLiteRestore:bdw,skl as well */
- request->tail =
- intel_ring_wrap(request->ring,
- request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
- assert_ring_tail_valid(request->ring, request->tail);
+ unwind_wa_tail(request);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1448,13 +1630,31 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
if (IS_ERR(cs))
return PTR_ERR(cs);
+ /*
+ * WaDisableCtxRestoreArbitration:bdw,chv
+ *
+ * We don't need to perform MI_ARB_ENABLE as often as we do (in
+ * particular all the gen that do not need the w/a at all!), if we
+ * took care to make sure that on every switch into this context
+ * (both ordinary and for preemption) that arbitrartion was enabled
+ * we would be fine. However, there doesn't seem to be a downside to
+ * being paranoid and making sure it is set before each batch and
+ * every context-switch.
+ *
+ * Note that if we fail to enable arbitration before the request
+ * is complete, then we do not see the context-switch interrupt and
+ * the engine hangs (with RING_HEAD == RING_TAIL).
+ *
+ * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
+ */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
/* FIXME(BDW): Address space and security selectors. */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
(flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
- *cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
@@ -1583,7 +1783,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
*/
static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
{
- *cs++ = MI_NOOP;
+ /* Ensure there's always at least one preemption point per-request. */
+ *cs++ = MI_ARB_CHECK;
*cs++ = MI_NOOP;
request->wa_tail = intel_ring_offset(request, cs);
}
@@ -1604,7 +1805,6 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
gen8_emit_wa_tail(request, cs);
}
-
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
@@ -1632,7 +1832,6 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
gen8_emit_wa_tail(request, cs);
}
-
static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
@@ -1666,8 +1865,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
* Tasklet cannot be active at this point due intel_mark_active/idle
* so this is just for documentation.
*/
- if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
- tasklet_kill(&engine->irq_tasklet);
+ if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state)))
+ tasklet_kill(&engine->execlists.irq_tasklet);
dev_priv = engine->i915;
@@ -1678,11 +1877,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- if (engine->status_page.vma) {
- i915_gem_object_unpin_map(engine->status_page.vma->obj);
- engine->status_page.vma = NULL;
- }
-
intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx(engine);
@@ -1694,8 +1888,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
+ engine->cancel_requests = execlists_cancel_requests;
engine->schedule = execlists_schedule;
- engine->irq_tasklet.func = intel_lrc_irq_handler;
+ engine->execlists.irq_tasklet.func = intel_lrc_irq_handler;
}
static void
@@ -1729,24 +1924,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-static int
-lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
-{
- const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
- void *hws;
-
- /* The HWSP is part of the default context object in LRC mode. */
- hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(hws))
- return PTR_ERR(hws);
-
- engine->status_page.page_addr = hws + hws_offset;
- engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset;
- engine->status_page.vma = vma;
-
- return 0;
-}
-
static void
logical_ring_setup(struct intel_engine_cs *engine)
{
@@ -1770,32 +1947,23 @@ logical_ring_setup(struct intel_engine_cs *engine)
RING_CONTEXT_STATUS_BUF_BASE(engine),
FW_REG_READ);
- engine->fw_domains = fw_domains;
+ engine->execlists.fw_domains = fw_domains;
- tasklet_init(&engine->irq_tasklet,
+ tasklet_init(&engine->execlists.irq_tasklet,
intel_lrc_irq_handler, (unsigned long)engine);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
}
-static int
-logical_ring_init(struct intel_engine_cs *engine)
+static int logical_ring_init(struct intel_engine_cs *engine)
{
- struct i915_gem_context *dctx = engine->i915->kernel_context;
int ret;
ret = intel_engine_init_common(engine);
if (ret)
goto error;
- /* And setup the hardware status page. */
- ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
- if (ret) {
- DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
- goto error;
- }
-
return 0;
error:
@@ -1953,13 +2121,12 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
if (rcs) {
- CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+
CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
RING_INDIRECT_CTX_OFFSET(base), 0);
-
- if (engine->wa_ctx.vma) {
- struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
+ if (wa_ctx->indirect_ctx.size) {
u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
regs[CTX_RCS_INDIRECT_CTX + 1] =
@@ -1968,6 +2135,11 @@ static void execlists_init_reg_state(u32 *regs,
regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
intel_lr_indirect_ctx_offset(engine) << 6;
+ }
+
+ CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
+ if (wa_ctx->per_ctx.size) {
+ u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
regs[CTX_BB_PER_CTX_PTR + 1] =
(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
@@ -2052,8 +2224,11 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
- /* One extra page as the sharing data between driver and GuC */
- context_size += PAGE_SIZE * LRC_PPHWSP_PN;
+ /*
+ * Before the actual start of the context image, we insert a few pages
+ * for our own use and for sharing with the GuC.
+ */
+ context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
if (IS_ERR(ctx_obj)) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 57ef5833c427..689fde1a63a9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,7 @@
#define _INTEL_LRC_H_
#include "intel_ringbuffer.h"
+#include "i915_gem_context.h"
#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
@@ -60,6 +61,7 @@
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
INTEL_CONTEXT_SCHEDULE_OUT,
+ INTEL_CONTEXT_SCHEDULE_PREEMPTED,
};
/* Logical Rings */
@@ -69,17 +71,42 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
-/* One extra page is added before LRC for GuC as shared data */
+/*
+ * We allocate a header at the start of the context image for our own
+ * use, therefore the actual location of the logical state is offset
+ * from the start of the VMA. The layout is
+ *
+ * | [guc] | [hwsp] [logical state] |
+ * |<- our header ->|<- context image ->|
+ *
+ */
+/* The first page is used for sharing data with the GuC */
#define LRC_GUCSHR_PN (0)
-#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
-#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
+#define LRC_GUCSHR_SZ (1)
+/* At the start of the context image is its per-process HWS page */
+#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
+#define LRC_PPHWSP_SZ (1)
+/* Finally we have the logical state for the context */
+#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
+
+/*
+ * Currently we include the PPHWSP in __intel_engine_context_size() so
+ * the size of the header is synonymous with the start of the PPHWSP.
+ */
+#define LRC_HEADER_PAGES LRC_PPHWSP_PN
struct drm_i915_private;
struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+
+static inline uint64_t
+intel_lr_context_descriptor(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ return ctx->engine[engine->id].lrc_desc;
+}
+
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index beb9baaf2f2e..dcbc786479f9 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -56,7 +56,7 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
if (drm_lspcon_get_mode(adapter, &current_mode)) {
- DRM_ERROR("Error reading LSPCON mode\n");
+ DRM_DEBUG_KMS("Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID;
}
return current_mode;
@@ -68,16 +68,15 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode current_mode;
current_mode = lspcon_get_current_mode(lspcon);
- if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID)
+ if (current_mode == mode)
goto out;
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
- current_mode == DRM_LSPCON_MODE_INVALID, 100);
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
if (current_mode != mode)
- DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
+ DRM_ERROR("LSPCON mode hasn't settled\n");
out:
DRM_DEBUG_KMS("Current LSPCON mode %s\n",
@@ -133,6 +132,7 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
+ int retry;
enum drm_dp_dual_mode_type adaptor_type;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
enum drm_lspcon_mode expected_mode;
@@ -141,10 +141,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
/* Lets probe the adaptor and check its type */
- adaptor_type = drm_dp_dual_mode_detect(adapter);
+ for (retry = 0; retry < 6; retry++) {
+ if (retry)
+ usleep_range(500, 1000);
+
+ adaptor_type = drm_dp_dual_mode_detect(adapter);
+ if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
+ break;
+ }
+
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
- drm_dp_get_dual_mode_type_name(adaptor_type));
+ drm_dp_get_dual_mode_type_name(adaptor_type));
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8e215777c7f4..38572d65e46e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -229,8 +229,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -306,8 +306,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
* Sets the power state for the panel.
*/
static void intel_enable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@@ -324,8 +324,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
}
static void intel_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -339,8 +339,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
}
static void gmch_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
@@ -349,15 +349,15 @@ static void gmch_disable_lvds(struct intel_encoder *encoder,
}
static void pch_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
}
static void pch_post_disable_lvds(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
}
@@ -880,8 +880,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
- if (i915.lvds_channel_mode > 0)
- return i915.lvds_channel_mode == 2;
+ if (i915_modparams.lvds_channel_mode > 0)
+ return i915_modparams.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
@@ -939,10 +939,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
- struct intel_crtc *crtc;
i915_reg_t lvds_reg;
u32 lvds;
- int pipe;
u8 pin;
u32 allowed_scalers;
@@ -1113,22 +1111,11 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* on. If so, assume that whatever is currently programmed is the
* correct mode.
*/
-
- /* Ironlake: FIXME if still fail, not try pipe mode now */
- if (HAS_PCH_SPLIT(dev_priv))
- goto failed;
-
- pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-
- if (crtc && (lvds & LVDS_PORT_EN)) {
- fixed_mode = intel_crtc_mode_get(dev, &crtc->base);
- if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
- drm_mode_debug_printmodeline(fixed_mode);
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- goto out;
- }
+ fixed_mode = intel_encoder_current_mode(intel_encoder);
+ if (fixed_mode) {
+ DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_mode_debug_printmodeline(fixed_mode);
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
/* If we still don't have a mode after all that, give up. */
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 98154efcb2f4..1d946240e55f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -921,7 +921,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
const struct firmware *fw = NULL;
- const char *name = i915.vbt_firmware;
+ const char *name = i915_modparams.vbt_firmware;
int ret;
if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index aace22e7ccac..1b397b41cb4f 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1134,7 +1134,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (!params)
return -ENOMEM;
- drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+ drmmode_crtc = drm_crtc_find(dev, file_priv, put_image_rec->crtc_id);
if (!drmmode_crtc) {
ret = -ENOENT;
goto out_free;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 3b1c5d783ee7..adc51e452e3e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -379,13 +379,13 @@ enum drm_connector_status
intel_panel_detect(struct drm_i915_private *dev_priv)
{
/* Assume that the BIOS does not lie through the OpRegion... */
- if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
+ if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
return *dev_priv->opregion.lid_state & 0x1 ?
connector_status_connected :
connector_status_disconnected;
}
- switch (i915.panel_ignore_lid) {
+ switch (i915_modparams.panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
@@ -465,10 +465,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
WARN_ON(panel->backlight.max == 0);
- if (i915.invert_brightness < 0)
+ if (i915_modparams.invert_brightness < 0)
return val;
- if (i915.invert_brightness > 0 ||
+ if (i915_modparams.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val + panel->backlight.min;
}
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 8fbd2bd0877f..899839f2f7c6 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -206,11 +206,11 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
static int display_crc_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- int i;
+ enum pipe pipe;
- for (i = 0; i < I915_MAX_PIPES; i++)
- seq_printf(m, "%c %s\n", pipe_name(i),
- pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+ for_each_pipe(dev_priv, pipe)
+ seq_printf(m, "%c %s\n", pipe_name(pipe),
+ pipe_crc_source_name(dev_priv->pipe_crc[pipe].source));
return 0;
}
@@ -506,8 +506,8 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
- bool enable)
+static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+ bool enable)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
@@ -533,10 +533,24 @@ retry:
goto put_state;
}
- pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
- pipe_config->base.connectors_changed = true;
+ if (HAS_IPS(dev_priv)) {
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ pipe_config->ips_force_disable = enable;
+ if (pipe_config->ips_enabled == enable)
+ pipe_config->base.connectors_changed = true;
+ }
+
+ if (IS_HASWELL(dev_priv)) {
+ pipe_config->pch_pfit.force_thru = enable;
+ if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+ pipe_config->pch_pfit.enabled != enable)
+ pipe_config->base.connectors_changed = true;
+ }
ret = drm_atomic_commit(state);
@@ -570,8 +584,9 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
- if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
+ if ((IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
+ hsw_pipe_A_crc_wa(dev_priv, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
@@ -606,7 +621,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source source)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -643,14 +657,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
goto out;
}
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- hsw_disable_ips(crtc);
-
spin_lock_irq(&pipe_crc->lock);
kfree(pipe_crc->entries);
pipe_crc->entries = entries;
@@ -691,10 +697,9 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
g4x_undo_pipe_scramble_reset(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
-
- hsw_enable_ips(crtc);
+ else if ((IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
+ hsw_pipe_A_crc_wa(dev_priv, false);
}
ret = 0;
@@ -770,11 +775,12 @@ display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
return -EINVAL;
}
-static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
+ const char *buf, enum pipe *pipe)
{
const char name = buf[0];
- if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+ if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes))
return -EINVAL;
*pipe = name - 'A';
@@ -823,7 +829,7 @@ static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+ if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) {
DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
return -EINVAL;
}
@@ -914,7 +920,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
u32 val = 0; /* shut up gcc */
@@ -935,16 +940,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
if (ret != 0)
goto out;
- if (source) {
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- hsw_disable_ips(intel_crtc);
- }
-
I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
@@ -953,10 +948,9 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
-
- hsw_enable_ips(intel_crtc);
+ else if ((IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
+ hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index cb950752c346..f4a4e9496893 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -58,24 +58,23 @@
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ if (HAS_LLC(dev_priv)) {
+ /*
+ * WaCompressedResourceDisplayNewHashMode:skl,kbl
+ * Display WA#0390: skl,kbl
+ *
+ * Must match Sampler, Pixel Back End, and Media. See
+ * WaCompressedResourceSamplerPbeMediaNewHashMode.
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) |
+ SKL_DE_COMPRESSED_HASH_MODE);
+ }
+
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
- /*
- * Display WA#0390: skl,bxt,kbl,glk
- *
- * Must match Sampler, Pixel Back End, and Media
- * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31).
- *
- * Including bits outside the page in the hash would
- * require 2 (or 4?) MiB alignment of resources. Just
- * assume the defaul hashing mode which only uses bits
- * within the page.
- */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE);
-
I915_WRITE(GEN8_CONFIG0,
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
@@ -125,6 +124,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ u32 val;
gen9_init_clock_gating(dev_priv);
/*
@@ -144,6 +144,11 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
}
+ /* Display WA #1133: WaFbcSkipSegments:glk */
+ val = I915_READ(ILK_DPFC_CHICKEN);
+ val &= ~GLK_SKIP_SEG_COUNT_MASK;
+ val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
+ I915_WRITE(ILK_DPFC_CHICKEN, val);
}
static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -317,7 +322,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if (enable)
@@ -332,14 +337,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
if (enable)
@@ -348,7 +353,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
val &= ~DSP_MAXFIFO_PM5_ENABLE;
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
#define FW_WM(value, plane) \
@@ -1322,21 +1327,21 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int num_active_planes = hweight32(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
const struct g4x_pipe_wm *raw;
- struct intel_plane_state *plane_state;
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
enum plane_id plane_id;
int i, level;
unsigned int dirty = 0;
- for_each_intel_plane_in_state(state, plane, plane_state, i) {
- const struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.crtc != &crtc->base &&
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->base.crtc != &crtc->base &&
old_plane_state->base.crtc != &crtc->base)
continue;
- if (g4x_raw_plane_wm_compute(crtc_state, plane_state))
+ if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
dirty |= BIT(plane->id);
}
@@ -1831,21 +1836,21 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int num_active_planes = hweight32(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
- struct intel_plane_state *plane_state;
+ const struct intel_plane_state *old_plane_state;
+ const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
enum plane_id plane_id;
int level, ret, i;
unsigned int dirty = 0;
- for_each_intel_plane_in_state(state, plane, plane_state, i) {
- const struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.crtc != &crtc->base &&
+ for_each_oldnew_intel_plane_in_state(state, plane,
+ old_plane_state,
+ new_plane_state, i) {
+ if (new_plane_state->base.crtc != &crtc->base &&
old_plane_state->base.crtc != &crtc->base)
continue;
- if (vlv_raw_plane_wm_compute(crtc_state, plane_state))
+ if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
dirty |= BIT(plane->id);
}
@@ -1864,7 +1869,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
/* cursor changes don't warrant a FIFO recompute */
if (dirty & ~BIT(PLANE_CURSOR)) {
const struct intel_crtc_state *old_crtc_state =
- to_intel_crtc_state(crtc->base.state);
+ intel_atomic_get_old_crtc_state(state, crtc);
const struct vlv_fifo_state *old_fifo_state =
&old_crtc_state->wm.vlv.fifo_state;
@@ -2785,11 +2790,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
@@ -2806,11 +2811,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
return;
@@ -3119,7 +3124,11 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc_state *newstate)
{
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
- struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(newstate->base.state);
+ const struct intel_crtc_state *oldstate =
+ intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
+ const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
int level, max_level = ilk_wm_max_level(to_i915(dev));
/*
@@ -3128,6 +3137,9 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
+ if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+ return 0;
+
a->pipe_enabled |= b->pipe_enabled;
a->sprites_enabled |= b->sprites_enabled;
a->sprites_scaled |= b->sprites_scaled;
@@ -3594,13 +3606,13 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Enabling the SAGV\n");
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for the SAGV when enabling */
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
@@ -3631,14 +3643,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
DRM_DEBUG_KMS("Disabling the SAGV\n");
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
/*
* Some skl systems, pre-release machines in particular,
@@ -4361,134 +4373,147 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
downscale_amount);
}
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- uint16_t ddb_allocation,
- int level,
- uint16_t *out_blocks, /* out */
- uint8_t *out_lines, /* out */
- bool *enabled /* out */)
+static int
+skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ struct skl_wm_params *wp)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
- uint32_t latency = dev_priv->wm.skl_latency[level];
- uint_fixed_16_16_t method1, method2;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t selected_result;
uint32_t interm_pbpl;
- uint32_t plane_bytes_per_line;
- uint32_t res_blocks, res_lines;
- uint8_t cpp;
- uint32_t width = 0;
- uint32_t plane_pixel_rate;
- uint_fixed_16_16_t y_tile_minimum;
- uint32_t y_min_scanlines;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- bool y_tiled, x_tiled;
- if (latency == 0 ||
- !intel_wm_plane_visible(cstate, intel_pstate)) {
- *enabled = false;
+ if (!intel_wm_plane_visible(cstate, intel_pstate))
return 0;
- }
-
- y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
-
- /* Display WA #1141: kbl,cfl */
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
- dev_priv->ipc_enabled)
- latency += 4;
- if (apply_memory_bw_wa && x_tiled)
- latency += 15;
+ wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
if (plane->id == PLANE_CURSOR) {
- width = intel_pstate->base.crtc_w;
+ wp->width = intel_pstate->base.crtc_w;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
+ wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
- fb->format->cpp[0];
- plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+ wp->cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
+ fb->format->cpp[0];
+ wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
+ intel_pstate);
if (drm_rotation_90_or_270(pstate->rotation)) {
- switch (cpp) {
+ switch (wp->cpp) {
case 1:
- y_min_scanlines = 16;
+ wp->y_min_scanlines = 16;
break;
case 2:
- y_min_scanlines = 8;
+ wp->y_min_scanlines = 8;
break;
case 4:
- y_min_scanlines = 4;
+ wp->y_min_scanlines = 4;
break;
default:
- MISSING_CASE(cpp);
+ MISSING_CASE(wp->cpp);
return -EINVAL;
}
} else {
- y_min_scanlines = 4;
+ wp->y_min_scanlines = 4;
}
if (apply_memory_bw_wa)
- y_min_scanlines *= 2;
+ wp->y_min_scanlines *= 2;
- plane_bytes_per_line = width * cpp;
- if (y_tiled) {
- interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
- y_min_scanlines, 512);
+ wp->plane_bytes_per_line = wp->width * wp->cpp;
+ if (wp->y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
+ wp->y_min_scanlines, 512);
if (INTEL_GEN(dev_priv) >= 10)
interm_pbpl++;
- plane_blocks_per_line = div_fixed16(interm_pbpl,
- y_min_scanlines);
- } else if (x_tiled && INTEL_GEN(dev_priv) == 9) {
- interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
- plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
+ wp->y_min_scanlines);
+ } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512);
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else {
- interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
- plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512) + 1;
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ }
+
+ wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
+ wp->plane_blocks_per_line);
+ wp->linetime_us = fixed16_to_u32_round_up(
+ intel_get_linetime_us(cstate));
+
+ return 0;
+}
+
+static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ uint16_t ddb_allocation,
+ int level,
+ const struct skl_wm_params *wp,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines, /* out */
+ bool *enabled /* out */)
+{
+ const struct drm_plane_state *pstate = &intel_pstate->base;
+ uint32_t latency = dev_priv->wm.skl_latency[level];
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t selected_result;
+ uint32_t res_blocks, res_lines;
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(cstate->base.state);
+ bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+
+ if (latency == 0 ||
+ !intel_wm_plane_visible(cstate, intel_pstate)) {
+ *enabled = false;
+ return 0;
}
- method1 = skl_wm_method1(dev_priv, plane_pixel_rate, cpp, latency);
- method2 = skl_wm_method2(plane_pixel_rate,
+ /* Display WA #1141: kbl,cfl */
+ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
+ IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
+ dev_priv->ipc_enabled)
+ latency += 4;
+
+ if (apply_memory_bw_wa && wp->x_tiled)
+ latency += 15;
+
+ method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
+ wp->cpp, latency);
+ method2 = skl_wm_method2(wp->plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
latency,
- plane_blocks_per_line);
-
- y_tile_minimum = mul_u32_fixed16(y_min_scanlines,
- plane_blocks_per_line);
+ wp->plane_blocks_per_line);
- if (y_tiled) {
- selected_result = max_fixed16(method2, y_tile_minimum);
+ if (wp->y_tiled) {
+ selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
- uint32_t linetime_us;
-
- linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(cstate));
- if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
- (plane_bytes_per_line / 512 < 1))
+ if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
+ 512 < 1) && (wp->plane_bytes_per_line / 512 < 1))
selected_result = method2;
else if (ddb_allocation >=
- fixed16_to_u32_round_up(plane_blocks_per_line))
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line))
selected_result = min_fixed16(method1, method2);
- else if (latency >= linetime_us)
+ else if (latency >= wp->linetime_us)
selected_result = min_fixed16(method1, method2);
else
selected_result = method1;
@@ -4496,19 +4521,18 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
res_lines = div_round_up_fixed16(selected_result,
- plane_blocks_per_line);
+ wp->plane_blocks_per_line);
/* Display WA #1125: skl,bxt,kbl,glk */
- if (level == 0 &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
- res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
+ if (level == 0 && wp->rc_surface)
+ res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
/* Display WA #1126: skl,bxt,kbl,glk */
if (level >= 1 && level <= 7) {
- if (y_tiled) {
- res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
- res_lines += y_min_scanlines;
+ if (wp->y_tiled) {
+ res_blocks += fixed16_to_u32_round_up(
+ wp->y_tile_minimum);
+ res_lines += wp->y_min_scanlines;
} else {
res_blocks++;
}
@@ -4546,6 +4570,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
+ const struct skl_wm_params *wm_params,
struct skl_plane_wm *wm)
{
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
@@ -4569,6 +4594,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
intel_pstate,
ddb_blocks,
level,
+ wm_params,
&result->plane_res_b,
&result->plane_res_l,
&result->plane_en);
@@ -4594,20 +4620,65 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
- /* Display WA #1135: bxt. */
- if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
- linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
+ /* Display WA #1135: bxt:ALL GLK:ALL */
+ if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
+ dev_priv->ipc_enabled)
+ linetime_wm /= 2;
return linetime_wm;
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
+ struct skl_wm_params *wp,
+ struct skl_wm_level *wm_l0,
+ uint16_t ddb_allocation,
struct skl_wm_level *trans_wm /* out */)
{
+ struct drm_device *dev = cstate->base.crtc->dev;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
+ uint16_t trans_min, trans_y_tile_min;
+ const uint16_t trans_amount = 10; /* This is configurable amount */
+ uint16_t trans_offset_b, res_blocks;
+
if (!cstate->base.active)
+ goto exit;
+
+ /* Transition WM are not recommended by HW team for GEN9 */
+ if (INTEL_GEN(dev_priv) <= 9)
+ goto exit;
+
+ /* Transition WM don't make any sense if ipc is disabled */
+ if (!dev_priv->ipc_enabled)
+ goto exit;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ trans_min = 4;
+
+ trans_offset_b = trans_min + trans_amount;
+
+ if (wp->y_tiled) {
+ trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
+ wp->y_tile_minimum);
+ res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+ trans_offset_b;
+ } else {
+ res_blocks = wm_l0->plane_res_b + trans_offset_b;
+
+ /* WA BUG:1938466 add one block for non y-tile planes */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
+ res_blocks += 1;
+
+ }
+
+ res_blocks += 1;
+
+ if (res_blocks < ddb_allocation) {
+ trans_wm->plane_res_b = res_blocks;
+ trans_wm->plane_en = true;
return;
+ }
- /* Until we know more, just disable transition WMs */
+exit:
trans_wm->plane_en = false;
}
@@ -4633,14 +4704,25 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
enum plane_id plane_id = to_intel_plane(plane)->id;
+ struct skl_wm_params wm_params;
+ enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
+ uint16_t ddb_blocks;
wm = &pipe_wm->planes[plane_id];
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
+ memset(&wm_params, 0, sizeof(struct skl_wm_params));
+
+ ret = skl_compute_plane_wm_params(dev_priv, cstate,
+ intel_pstate, &wm_params);
+ if (ret)
+ return ret;
ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, wm);
+ intel_pstate, &wm_params, wm);
if (ret)
return ret;
- skl_compute_transition_wm(cstate, &wm->trans_wm);
+ skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
+ ddb_blocks, &wm->trans_wm);
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -4736,16 +4818,18 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
+bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
+ const struct skl_ddb_entry **entries,
const struct skl_ddb_entry *ddb,
int ignore)
{
- int i;
+ enum pipe pipe;
- for (i = 0; i < I915_MAX_PIPES; i++)
- if (i != ignore && entries[i] &&
- skl_ddb_entries_overlap(ddb, entries[i]))
+ for_each_pipe(dev_priv, pipe) {
+ if (pipe != ignore && entries[pipe] &&
+ skl_ddb_entries_overlap(ddb, entries[pipe]))
return true;
+ }
return false;
}
@@ -5535,7 +5619,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
wm->level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
if (val & DSP_MAXFIFO_PM5_ENABLE)
@@ -5565,7 +5649,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
wm->level = VLV_WM_LEVEL_DDR_DVFS;
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
for_each_intel_crtc(dev, crtc) {
@@ -5669,12 +5753,30 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
+/*
+ * FIXME should probably kill this and improve
+ * the real watermark readout/sanitation instead
+ */
+static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
+ I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
+
+ /*
+ * Don't touch WM1S_LP_EN here.
+ * Doing so could cause underruns.
+ */
+}
+
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
+ ilk_init_lp_watermarks(dev_priv);
+
for_each_crtc(dev, crtc)
ilk_pipe_wm_get_hw_state(crtc);
@@ -5739,6 +5841,36 @@ void intel_update_watermarks(struct intel_crtc *crtc)
dev_priv->display.update_wm(crtc);
}
+void intel_enable_ipc(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(dev_priv)) {
+ dev_priv->ipc_enabled = false;
+ return;
+ }
+
+ val = I915_READ(DISP_ARB_CTL2);
+
+ if (dev_priv->ipc_enabled)
+ val |= DISP_IPC_ENABLE;
+ else
+ val &= ~DISP_IPC_ENABLE;
+
+ I915_WRITE(DISP_ARB_CTL2, val);
+}
+
+void intel_init_ipc(struct drm_i915_private *dev_priv)
+{
+ dev_priv->ipc_enabled = false;
+ if (!HAS_IPC(dev_priv))
+ return;
+
+ dev_priv->ipc_enabled = true;
+ intel_enable_ipc(dev_priv);
+}
+
/*
* Lock protecting IPS related data structures
*/
@@ -5872,6 +6004,7 @@ static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
*/
static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 limits;
/* Only set the down limit when we've reached the lowest level to avoid
@@ -5881,13 +6014,13 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
if (INTEL_GEN(dev_priv) >= 9) {
- limits = (dev_priv->rps.max_freq_softlimit) << 23;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= (dev_priv->rps.min_freq_softlimit) << 14;
+ limits = (rps->max_freq_softlimit) << 23;
+ if (val <= rps->min_freq_softlimit)
+ limits |= (rps->min_freq_softlimit) << 14;
} else {
- limits = dev_priv->rps.max_freq_softlimit << 24;
- if (val <= dev_priv->rps.min_freq_softlimit)
- limits |= dev_priv->rps.min_freq_softlimit << 16;
+ limits = rps->max_freq_softlimit << 24;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 16;
}
return limits;
@@ -5895,39 +6028,40 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int new_power;
u32 threshold_up = 0, threshold_down = 0; /* in % */
u32 ei_up = 0, ei_down = 0;
- new_power = dev_priv->rps.power;
- switch (dev_priv->rps.power) {
+ new_power = rps->power;
+ switch (rps->power) {
case LOW_POWER:
- if (val > dev_priv->rps.efficient_freq + 1 &&
- val > dev_priv->rps.cur_freq)
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
new_power = BETWEEN;
break;
case BETWEEN:
- if (val <= dev_priv->rps.efficient_freq &&
- val < dev_priv->rps.cur_freq)
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
new_power = LOW_POWER;
- else if (val >= dev_priv->rps.rp0_freq &&
- val > dev_priv->rps.cur_freq)
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
new_power = HIGH_POWER;
break;
case HIGH_POWER:
- if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
- val < dev_priv->rps.cur_freq)
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
new_power = BETWEEN;
break;
}
/* Max/min bins are special */
- if (val <= dev_priv->rps.min_freq_softlimit)
+ if (val <= rps->min_freq_softlimit)
new_power = LOW_POWER;
- if (val >= dev_priv->rps.max_freq_softlimit)
+ if (val >= rps->max_freq_softlimit)
new_power = HIGH_POWER;
- if (new_power == dev_priv->rps.power)
+ if (new_power == rps->power)
return;
/* Note the units here are not exactly 1us, but 1280ns. */
@@ -5990,20 +6124,21 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
- dev_priv->rps.power = new_power;
- dev_priv->rps.up_threshold = threshold_up;
- dev_priv->rps.down_threshold = threshold_down;
- dev_priv->rps.last_adj = 0;
+ rps->power = new_power;
+ rps->up_threshold = threshold_up;
+ rps->down_threshold = threshold_down;
+ rps->last_adj = 0;
}
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 mask = 0;
/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
- if (val > dev_priv->rps.min_freq_softlimit)
+ if (val > rps->min_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
- if (val < dev_priv->rps.max_freq_softlimit)
+ if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
mask &= dev_priv->pm_rps_events;
@@ -6016,10 +6151,12 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* min/max delay may still have been modified so be sure to
* write the limits value.
*/
- if (val != dev_priv->rps.cur_freq) {
+ if (val != rps->cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
if (INTEL_GEN(dev_priv) >= 9)
@@ -6041,7 +6178,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
- dev_priv->rps.cur_freq = val;
+ rps->cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
return 0;
@@ -6057,7 +6194,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
- if (val != dev_priv->rps.cur_freq) {
+ if (val != dev_priv->gt_pm.rps.cur_freq) {
err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (err)
return err;
@@ -6065,7 +6202,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
gen6_set_rps_thresholds(dev_priv, val);
}
- dev_priv->rps.cur_freq = val;
+ dev_priv->gt_pm.rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
return 0;
@@ -6080,10 +6217,11 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
- u32 val = dev_priv->rps.idle_freq;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u32 val = rps->idle_freq;
int err;
- if (dev_priv->rps.cur_freq <= val)
+ if (rps->cur_freq <= val)
return;
/* The punit delays the write of the frequency and voltage until it
@@ -6108,34 +6246,38 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ mutex_lock(&dev_priv->pcu_lock);
+ if (rps->enabled) {
u8 freq;
if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+ gen6_rps_pm_mask(dev_priv, rps->cur_freq));
gen6_enable_rps_interrupts(dev_priv);
/* Use the user's desired frequency as a guide, but for better
* performance, jump directly to RPe as our starting frequency.
*/
- freq = max(dev_priv->rps.cur_freq,
- dev_priv->rps.efficient_freq);
+ freq = max(rps->cur_freq,
+ rps->efficient_freq);
if (intel_set_rps(dev_priv,
clamp(freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit)))
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit)))
DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* Flush our bottom-half so that it does not race with us
* setting the idle frequency and so that it is bounded by
* our rpm wakeref. And then disable the interrupts to stop any
@@ -6143,58 +6285,60 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
*/
gen6_disable_rps_interrupts(dev_priv);
- mutex_lock(&dev_priv->rps.hw_lock);
- if (dev_priv->rps.enabled) {
+ mutex_lock(&dev_priv->pcu_lock);
+ if (rps->enabled) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
- dev_priv->rps.last_adj = 0;
+ gen6_set_rps(dev_priv, rps->idle_freq);
+ rps->last_adj = 0;
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
void gen6_rps_boost(struct drm_i915_gem_request *rq,
- struct intel_rps_client *rps)
+ struct intel_rps_client *rps_client)
{
- struct drm_i915_private *i915 = rq->i915;
+ struct intel_rps *rps = &rq->i915->gt_pm.rps;
+ unsigned long flags;
bool boost;
/* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker.
*/
- if (!i915->rps.enabled)
+ if (!rps->enabled)
return;
boost = false;
- spin_lock_irq(&rq->lock);
+ spin_lock_irqsave(&rq->lock, flags);
if (!rq->waitboost && !i915_gem_request_completed(rq)) {
- atomic_inc(&i915->rps.num_waiters);
+ atomic_inc(&rps->num_waiters);
rq->waitboost = true;
boost = true;
}
- spin_unlock_irq(&rq->lock);
+ spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)
return;
- if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
- schedule_work(&i915->rps.work);
+ if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ schedule_work(&rps->work);
- atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
+ atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int err;
- lockdep_assert_held(&dev_priv->rps.hw_lock);
- GEM_BUG_ON(val > dev_priv->rps.max_freq);
- GEM_BUG_ON(val < dev_priv->rps.min_freq);
+ lockdep_assert_held(&dev_priv->pcu_lock);
+ GEM_BUG_ON(val > rps->max_freq);
+ GEM_BUG_ON(val < rps->min_freq);
- if (!dev_priv->rps.enabled) {
- dev_priv->rps.cur_freq = val;
+ if (!rps->enabled) {
+ rps->cur_freq = val;
return 0;
}
@@ -6217,21 +6361,30 @@ static void gen9_disable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN6_RC_CONTROL, 0);
+}
+
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
+{
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
{
I915_WRITE(GEN6_RC_CONTROL, 0);
}
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
{
- /* we're doing forcewake before Disabling RC6,
+ I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
+static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
+{
+ /* We're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -6240,6 +6393,11 @@ static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
+static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
{
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -6362,24 +6520,26 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* All of these values are in units of 50MHz */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_GEN9_LP(dev_priv)) {
u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
- dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
- dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 16) & 0xff;
}
/* hw_max = RP0 until we check for overclocking */
- dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
+ rps->max_freq = rps->rp0_freq;
- dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
+ rps->efficient_freq = rps->rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
u32 ddcc_status = 0;
@@ -6387,33 +6547,34 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
if (sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status) == 0)
- dev_priv->rps.efficient_freq =
+ rps->efficient_freq =
clamp_t(u8,
((ddcc_status >> 8) & 0xff),
- dev_priv->rps.min_freq,
- dev_priv->rps.max_freq);
+ rps->min_freq,
+ rps->max_freq);
}
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
- dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
- dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
+ rps->rp0_freq *= GEN9_FREQ_SCALER;
+ rps->rp1_freq *= GEN9_FREQ_SCALER;
+ rps->min_freq *= GEN9_FREQ_SCALER;
+ rps->max_freq *= GEN9_FREQ_SCALER;
+ rps->efficient_freq *= GEN9_FREQ_SCALER;
}
}
static void reset_rps(struct drm_i915_private *dev_priv,
int (*set)(struct drm_i915_private *, u8))
{
- u8 freq = dev_priv->rps.cur_freq;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u8 freq = rps->cur_freq;
/* force a reset */
- dev_priv->rps.power = -1;
- dev_priv->rps.cur_freq = -1;
+ rps->power = -1;
+ rps->cur_freq = -1;
if (set(dev_priv, freq))
DRM_ERROR("Failed to reset RPS to initial values\n");
@@ -6426,7 +6587,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
/* Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
+ GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
/* 1 second timeout*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
@@ -6446,7 +6607,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- uint32_t rc6_mask = 0;
+ u32 rc6_mode, rc6_mask = 0;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -6480,12 +6641,19 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* WaRsUseTimeoutMode:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+ else
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
+ GEN6_RC_CTL_HW_ENABLE | rc6_mode | rc6_mask);
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
@@ -6500,7 +6668,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
+static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -6509,7 +6677,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
- /* 1c & 1d: Get forcewake during program sequence. Although the driver
+ /* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -6523,36 +6691,38 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
- else
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev_priv, rc6_mask);
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- rc6_mask);
- else
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
- /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ rc6_mask);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void gen8_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 1 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+ HSW_FREQUENCY(rps->rp1_freq));
I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(dev_priv->rps.rp1_freq));
+ HSW_FREQUENCY(rps->rp1_freq));
/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
/* Docs recommend 900MHz, and 300 MHz respectively */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- dev_priv->rps.max_freq_softlimit << 24 |
- dev_priv->rps.min_freq_softlimit << 16);
+ rps->max_freq_softlimit << 24 |
+ rps->min_freq_softlimit << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -6561,7 +6731,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* 5: Enable RPS */
+ /* 2: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
@@ -6570,14 +6740,12 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
- /* 6: Ring frequency + overclocking (our driver does this later */
-
reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
+static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -6586,14 +6754,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
int rc6_mode;
int ret;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
-
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
I915_WRITE(GEN6_RC_STATE, 0);
/* Clear the DBG now so we don't confuse earlier errors */
@@ -6627,7 +6787,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */
- rc6_mode = intel_enable_rc6();
+ rc6_mode = intel_rc6_enabled();
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
@@ -6647,12 +6807,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
- /* Power down if completely idle for over 50ms */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- reset_rps(dev_priv, gen6_set_rps);
-
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev_priv) && ret) {
@@ -6670,8 +6824,28 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
+static void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* Power down if completely idle for over 50ms */
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ reset_rps(dev_priv, gen6_set_rps);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
@@ -6679,7 +6853,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
int scaling_factor = 180;
struct cpufreq_policy *policy;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
policy = cpufreq_cpu_get(0);
if (policy) {
@@ -6702,11 +6876,11 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
- min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
- max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
+ min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER;
+ max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER;
} else {
- min_gpu_freq = dev_priv->rps.min_freq;
- max_gpu_freq = dev_priv->rps.max_freq;
+ min_gpu_freq = rps->min_freq;
+ max_gpu_freq = rps->max_freq;
}
/*
@@ -6957,17 +7131,18 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
{
- dev_priv->rps.gpll_ref_freq =
+ dev_priv->gt_pm.rps.gpll_ref_freq =
vlv_get_cck_clock(dev_priv, "GPLL ref",
CCK_GPLL_CLOCK_CONTROL,
dev_priv->czclk_freq);
DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
- dev_priv->rps.gpll_ref_freq);
+ dev_priv->gt_pm.rps.gpll_ref_freq);
}
static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
valleyview_setup_pctx(dev_priv);
@@ -6989,30 +7164,31 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
- dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
- dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ rps->max_freq = valleyview_rps_max_freq(dev_priv);
+ rps->rp0_freq = rps->max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
- dev_priv->rps.max_freq);
+ intel_gpu_freq(dev_priv, rps->max_freq),
+ rps->max_freq);
- dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
+ rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, rps->efficient_freq),
+ rps->efficient_freq);
- dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+ rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
- dev_priv->rps.rp1_freq);
+ intel_gpu_freq(dev_priv, rps->rp1_freq),
+ rps->rp1_freq);
- dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
+ rps->min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
- dev_priv->rps.min_freq);
+ intel_gpu_freq(dev_priv, rps->min_freq),
+ rps->min_freq);
}
static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
cherryview_setup_pctx(dev_priv);
@@ -7033,31 +7209,29 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
- dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
- dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ rps->max_freq = cherryview_rps_max_freq(dev_priv);
+ rps->rp0_freq = rps->max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.max_freq),
- dev_priv->rps.max_freq);
+ intel_gpu_freq(dev_priv, rps->max_freq),
+ rps->max_freq);
- dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+ rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, rps->efficient_freq),
+ rps->efficient_freq);
- dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+ rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
- dev_priv->rps.rp1_freq);
+ intel_gpu_freq(dev_priv, rps->rp1_freq),
+ rps->rp1_freq);
- dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+ rps->min_freq = cherryview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
- dev_priv->rps.min_freq);
+ intel_gpu_freq(dev_priv, rps->min_freq),
+ rps->min_freq);
- WARN_ONCE((dev_priv->rps.max_freq |
- dev_priv->rps.efficient_freq |
- dev_priv->rps.rp1_freq |
- dev_priv->rps.min_freq) & 1,
+ WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+ rps->min_freq) & 1,
"Odd GPU freq values\n");
}
@@ -7066,13 +7240,11 @@ static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
valleyview_cleanup_pctx(dev_priv);
}
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
+static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, val, rc6_mode = 0, pcbr;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ u32 gtfifodbg, rc6_mode = 0, pcbr;
gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
GT_FIFO_FREE_ENTRIES_CHV);
@@ -7103,7 +7275,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
- /* allows RC6 residency counter to work */
+ /* Allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC6_COUNT_EN |
@@ -7113,13 +7285,22 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
pcbr = I915_READ(VLV_PCBR);
/* 3: Enable RC6 */
- if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
+ if ((intel_rc6_enabled() & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
- /* 4 Program defaults and thresholds for RPS*/
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 1: Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -7128,7 +7309,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* 5: Enable RPS */
+ /* 2: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
@@ -7155,13 +7336,11 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
+static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 gtfifodbg, val, rc6_mode = 0;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ u32 gtfifodbg, rc6_mode = 0;
valleyview_check_pctx(dev_priv);
@@ -7172,28 +7351,11 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- /* If VLV, Forcewake all wells, else re-direct to regular path */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
@@ -7203,7 +7365,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
- /* allows RC6 residency counter to work */
+ /* Allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC0_COUNT_EN |
@@ -7211,13 +7373,38 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- if (intel_enable_rc6() & INTEL_RC6_ENABLE)
+ if (intel_rc6_enabled() & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
intel_print_rc6_info(dev_priv, rc6_mode);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN |
VLV_SOC_TDP_EN |
@@ -7425,7 +7612,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
lockdep_assert_held(&mchdev_lock);
- pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
+ pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
@@ -7712,17 +7899,19 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
- if (!i915.enable_rc6) {
+ if (!i915_modparams.enable_rc6) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
intel_runtime_pm_get(dev_priv);
}
mutex_lock(&dev_priv->drm.struct_mutex);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
/* Initialize RPS limits (for userspace) */
if (IS_CHERRYVIEW(dev_priv))
@@ -7733,16 +7922,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
gen6_init_rps_frequencies(dev_priv);
/* Derive initial user preferences/limits from the hardware limits */
- dev_priv->rps.idle_freq = dev_priv->rps.min_freq;
- dev_priv->rps.cur_freq = dev_priv->rps.idle_freq;
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
- dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
- dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+ rps->max_freq_softlimit = rps->max_freq;
+ rps->min_freq_softlimit = rps->min_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- dev_priv->rps.min_freq_softlimit =
+ rps->min_freq_softlimit =
max_t(int,
- dev_priv->rps.efficient_freq,
+ rps->efficient_freq,
intel_freq_opcode(dev_priv, 450));
/* After setting max-softlimit, find the overclock max freq */
@@ -7753,16 +7942,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
if (params & BIT(31)) { /* OC supported */
DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (dev_priv->rps.max_freq & 0xff) * 50,
+ (rps->max_freq & 0xff) * 50,
(params & 0xff) * 50);
- dev_priv->rps.max_freq = params & 0xff;
+ rps->max_freq = params & 0xff;
}
}
/* Finally allow us to boost to max by default */
- dev_priv->rps.boost_freq = dev_priv->rps.max_freq;
+ rps->boost_freq = rps->max_freq;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_autoenable_gt_powersave(dev_priv);
@@ -7773,7 +7962,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv))
valleyview_cleanup_gt_powersave(dev_priv);
- if (!i915.enable_rc6)
+ if (!i915_modparams.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
@@ -7790,7 +7979,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 6)
return;
- if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work))
+ if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
intel_runtime_pm_put(dev_priv);
/* gen6_rps_idle() will be called later to disable interrupts */
@@ -7798,90 +7987,168 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
- dev_priv->rps.enabled = true; /* force disabling */
+ dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
+ dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
intel_disable_gt_powersave(dev_priv);
gen6_reset_rps_interrupts(dev_priv);
}
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
+static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
{
- if (!READ_ONCE(dev_priv->rps.enabled))
+ lockdep_assert_held(&i915->pcu_lock);
+
+ if (!i915->gt_pm.llc_pstate.enabled)
return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ /* Currently there is no HW configuration to be done to disable. */
- if (INTEL_GEN(dev_priv) >= 9) {
+ i915->gt_pm.llc_pstate.enabled = false;
+}
+
+static void intel_disable_rc6(struct drm_i915_private *dev_priv)
+{
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (!dev_priv->gt_pm.rc6.enabled)
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 9)
gen9_disable_rc6(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_disable_rc6(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_disable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_disable_rc6(dev_priv);
+
+ dev_priv->gt_pm.rc6.enabled = false;
+}
+
+static void intel_disable_rps(struct drm_i915_private *dev_priv)
+{
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (!dev_priv->gt_pm.rps.enabled)
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 9)
gen9_disable_rps(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ else if (IS_CHERRYVIEW(dev_priv))
cherryview_disable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ else if (IS_VALLEYVIEW(dev_priv))
valleyview_disable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ else if (INTEL_GEN(dev_priv) >= 6)
gen6_disable_rps(dev_priv);
- } else if (IS_IRONLAKE_M(dev_priv)) {
+ else if (IS_IRONLAKE_M(dev_priv))
ironlake_disable_drps(dev_priv);
- }
- dev_priv->rps.enabled = false;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ dev_priv->gt_pm.rps.enabled = false;
}
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- /* We shouldn't be disabling as we submit, so this should be less
- * racy than it appears!
- */
- if (READ_ONCE(dev_priv->rps.enabled))
+ mutex_lock(&dev_priv->pcu_lock);
+
+ intel_disable_rc6(dev_priv);
+ intel_disable_rps(dev_priv);
+ if (HAS_LLC(dev_priv))
+ intel_disable_llc_pstate(dev_priv);
+
+ mutex_unlock(&dev_priv->pcu_lock);
+}
+
+static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
+{
+ lockdep_assert_held(&i915->pcu_lock);
+
+ if (i915->gt_pm.llc_pstate.enabled)
return;
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev_priv))
+ gen6_update_ring_freq(i915);
+
+ i915->gt_pm.llc_pstate.enabled = true;
+}
+
+static void intel_enable_rc6(struct drm_i915_private *dev_priv)
+{
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (dev_priv->gt_pm.rc6.enabled)
return;
- mutex_lock(&dev_priv->rps.hw_lock);
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_enable_rc6(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_enable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 9)
+ gen9_enable_rc6(dev_priv);
+ else if (IS_BROADWELL(dev_priv))
+ gen8_enable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ gen6_enable_rc6(dev_priv);
+
+ dev_priv->gt_pm.rc6.enabled = true;
+}
+
+static void intel_enable_rps(struct drm_i915_private *dev_priv)
+{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ lockdep_assert_held(&dev_priv->pcu_lock);
+
+ if (rps->enabled)
+ return;
if (IS_CHERRYVIEW(dev_priv)) {
cherryview_enable_rps(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv)) {
valleyview_enable_rps(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 9) {
- gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv);
- if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
- gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 6) {
gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
} else if (IS_IRONLAKE_M(dev_priv)) {
ironlake_enable_drps(dev_priv);
intel_init_emon(dev_priv);
}
- WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
- WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq);
+ WARN_ON(rps->max_freq < rps->min_freq);
+ WARN_ON(rps->idle_freq > rps->max_freq);
+
+ WARN_ON(rps->efficient_freq < rps->min_freq);
+ WARN_ON(rps->efficient_freq > rps->max_freq);
+
+ rps->enabled = true;
+}
+
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
+{
+ /* Powersaving is controlled by the host when inside a VM */
+ if (intel_vgpu_active(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->pcu_lock);
- WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq);
- WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
+ intel_enable_rc6(dev_priv);
+ intel_enable_rps(dev_priv);
+ if (HAS_LLC(dev_priv))
+ intel_enable_llc_pstate(dev_priv);
- dev_priv->rps.enabled = true;
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void __intel_autoenable_gt_powersave(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
+ container_of(work,
+ typeof(*dev_priv),
+ gt_pm.autoenable_work.work);
struct intel_engine_cs *rcs;
struct drm_i915_gem_request *req;
- if (READ_ONCE(dev_priv->rps.enabled))
- goto out;
-
rcs = dev_priv->engine[RCS];
if (rcs->last_retired_context)
goto out;
@@ -7895,7 +8162,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
if (IS_ERR(req))
goto unlock;
- if (!i915.enable_execlists && i915_switch_context(req) == 0)
+ if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
rcs->init_context(req);
/* Mark the device busy, calling intel_enable_gt_powersave() */
@@ -7909,9 +8176,6 @@ out:
void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
{
- if (READ_ONCE(dev_priv->rps.enabled))
- return;
-
if (IS_IRONLAKE_M(dev_priv)) {
ironlake_enable_drps(dev_priv);
intel_init_emon(dev_priv);
@@ -7929,7 +8193,7 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
* runtime resume it's necessary).
*/
if (queue_delayed_work(dev_priv->wq,
- &dev_priv->rps.autoenable_work,
+ &dev_priv->gt_pm.autoenable_work,
round_jiffies_up_relative(HZ)))
intel_runtime_pm_get_noresume(dev_priv);
}
@@ -7959,19 +8223,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
}
}
-static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
- I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
-
- /*
- * Don't touch WM1S_LP_EN here.
- * Doing so could cause underruns.
- */
-}
-
-static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
+static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
@@ -8004,8 +8256,6 @@ static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- ilk_init_lp_watermarks(dev_priv);
-
/*
* Based on the document from hardware guys the following bits
* should be set unconditionally in order to enable FBC.
@@ -8118,8 +8368,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_GT_MODE,
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
- ilk_init_lp_watermarks(dev_priv);
-
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
@@ -8257,7 +8505,57 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
-static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
+static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_PCH_CNP(dev_priv))
+ return;
+
+ /* Wa #1181 */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
+ CNP_PWM_CGE_GATING_DISABLE);
+}
+
+static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ cnp_init_clock_gating(dev_priv);
+
+ /* This is not an Wa. Enable for better image quality */
+ I915_WRITE(_3D_CHICKEN3,
+ _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+
+ /* WaEnableChickenDCPR:cnl */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1,
+ I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+ /* WaFbcWakeMemOn:cnl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_MEMORY_WAKE);
+
+ /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
+ I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
+ I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
+ SARBUNIT_CLKGATE_DIS);
+
+ /* Display WA #1133: WaFbcSkipSegments:cnl */
+ val = I915_READ(ILK_DPFC_CHICKEN);
+ val &= ~GLK_SKIP_SEG_COUNT_MASK;
+ val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
+ I915_WRITE(ILK_DPFC_CHICKEN, val);
+}
+
+static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ cnp_init_clock_gating(dev_priv);
+ gen9_init_clock_gating(dev_priv);
+
+ /* WaFbcNukeOnHostModify:cfl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
{
gen9_init_clock_gating(dev_priv);
@@ -8271,12 +8569,12 @@ static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
- /* WaFbcNukeOnHostModify:kbl,cfl */
+ /* WaFbcNukeOnHostModify:kbl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
+static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
{
gen9_init_clock_gating(dev_priv);
@@ -8289,12 +8587,13 @@ static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
-static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
+static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
+ /* The GTT cache must be disabled if the system is using 2M pages. */
+ bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
+ I915_GTT_PAGE_SIZE_2M);
enum pipe pipe;
- ilk_init_lp_watermarks(dev_priv);
-
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -8325,12 +8624,8 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaProgramL3SqcReg1Default:bdw */
gen8_set_l3sqc_credits(dev_priv, 30, 2);
- /*
- * WaGttCachingOffByDefault:bdw
- * GTT cache may not work with big pages, so if those
- * are ever enabled GTT cache may need to be disabled.
- */
- I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+ /* WaGttCachingOffByDefault:bdw */
+ I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
/* WaKVMNotificationOnConfigChange:bdw */
I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
@@ -8347,10 +8642,8 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
-static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
+static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- ilk_init_lp_watermarks(dev_priv);
-
/* L3 caching of data atomics doesn't work -- disable it. */
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
I915_WRITE(HSW_ROW_CHICKEN3,
@@ -8394,19 +8687,13 @@ static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
- /* WaRsPkgCStateDisplayPMReq:hsw */
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
-
lpt_init_clock_gating(dev_priv);
}
-static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
+static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t snpcr;
- ilk_init_lp_watermarks(dev_priv);
-
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:ivb */
@@ -8498,7 +8785,7 @@ static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
gen6_check_mch_setup(dev_priv);
}
-static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
+static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -8578,7 +8865,7 @@ static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
-static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
+static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -8638,7 +8925,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
g4x_disable_trickle_feed(dev_priv);
}
-static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
@@ -8652,7 +8939,7 @@ static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
-static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
+static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
@@ -8737,34 +9024,38 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = skylake_init_clock_gating;
- else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
+ if (IS_CANNONLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = cnl_init_clock_gating;
+ else if (IS_COFFEELAKE(dev_priv))
+ dev_priv->display.init_clock_gating = cfl_init_clock_gating;
+ else if (IS_SKYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = skl_init_clock_gating;
+ else if (IS_KABYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = kbl_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_GEMINILAKE(dev_priv))
dev_priv->display.init_clock_gating = glk_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
- dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
+ dev_priv->display.init_clock_gating = bdw_init_clock_gating;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
+ dev_priv->display.init_clock_gating = chv_init_clock_gating;
else if (IS_HASWELL(dev_priv))
- dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ dev_priv->display.init_clock_gating = hsw_init_clock_gating;
else if (IS_IVYBRIDGE(dev_priv))
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
+ dev_priv->display.init_clock_gating = vlv_init_clock_gating;
else if (IS_GEN6(dev_priv))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
else if (IS_GEN5(dev_priv))
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
else if (IS_I965GM(dev_priv))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ dev_priv->display.init_clock_gating = i965g_init_clock_gating;
else if (IS_GEN3(dev_priv))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
@@ -8907,7 +9198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
{
int status;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
* use te fw I915_READ variants to reduce the amount of work
@@ -8954,7 +9245,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
{
int status;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
/* GEN6_PCODE_* are outside of the forcewake domain, we can
* use te fw I915_READ variants to reduce the amount of work
@@ -9031,7 +9322,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 status;
int ret;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
&status)
@@ -9073,31 +9364,39 @@ out:
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/*
* N = val - 0xb7
* Slow = Fast = GPLL ref * N
*/
- return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
+ return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/*
* N = val / 2
* CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
*/
- return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
+ struct intel_rps *rps = &dev_priv->gt_pm.rps;
+
/* CHV needs even values */
- return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
}
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -9126,53 +9425,16 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
}
-struct request_boost {
- struct work_struct work;
- struct drm_i915_gem_request *req;
-};
-
-static void __intel_rps_boost_work(struct work_struct *work)
-{
- struct request_boost *boost = container_of(work, struct request_boost, work);
- struct drm_i915_gem_request *req = boost->req;
-
- if (!i915_gem_request_completed(req))
- gen6_rps_boost(req, NULL);
-
- i915_gem_request_put(req);
- kfree(boost);
-}
-
-void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
-{
- struct request_boost *boost;
-
- if (req == NULL || INTEL_GEN(req->i915) < 6)
- return;
-
- if (i915_gem_request_completed(req))
- return;
-
- boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
- if (boost == NULL)
- return;
-
- boost->req = i915_gem_request_get(req);
-
- INIT_WORK(&boost->work, __intel_rps_boost_work);
- queue_work(req->i915->wq, &boost->work);
-}
-
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->pcu_lock);
- INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
+ INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
__intel_autoenable_gt_powersave);
- atomic_set(&dev_priv->rps.num_waiters, 0);
+ atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
- dev_priv->pm.suspended = false;
- atomic_set(&dev_priv->pm.wakeref_count, 0);
+ dev_priv->runtime_pm.suspended = false;
+ atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
@@ -9225,7 +9487,7 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
{
u64 time_hw, units, div;
- if (!intel_enable_rc6())
+ if (!intel_rc6_enabled())
return 0;
intel_runtime_pm_get(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 1b31ab002dae..6e3b430fccdc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -58,6 +58,9 @@
static bool is_edp_psr(struct intel_dp *intel_dp)
{
+ if (!intel_dp_is_edp(intel_dp))
+ return false;
+
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
@@ -72,90 +75,54 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
(val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}
-static void intel_psr_write_vsc(struct intel_dp *intel_dp,
- const struct edp_vsc_psr *vsc_psr)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- uint32_t *data = (uint32_t *) vsc_psr;
- unsigned int i;
-
- /* As per BSPec (Pipe Video Data Island Packet), we need to disable
- the video DIP being updated before program video DIP data buffer
- registers for DIP being updated. */
- I915_WRITE(ctl_reg, 0);
- POSTING_READ(ctl_reg);
-
- for (i = 0; i < sizeof(*vsc_psr); i += 4) {
- I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
- i >> 2), *data);
- data++;
- }
- for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
- I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
- i >> 2), 0);
-
- I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
- POSTING_READ(ctl_reg);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
+static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t val;
/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
- val = I915_READ(VLV_VSCSDP(pipe));
+ val = I915_READ(VLV_VSCSDP(crtc->pipe));
val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
- I915_WRITE(VLV_VSCSDP(pipe), val);
+ I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
}
-static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
+static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
- struct edp_vsc_psr psr_vsc;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ struct edp_vsc_psr psr_vsc;
- /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- if (dev_priv->psr.colorimetry_support &&
- dev_priv->psr.y_cord_support) {
- psr_vsc.sdp_header.HB2 = 0x5;
- psr_vsc.sdp_header.HB3 = 0x13;
- } else if (dev_priv->psr.y_cord_support) {
- psr_vsc.sdp_header.HB2 = 0x4;
- psr_vsc.sdp_header.HB3 = 0xe;
+ if (dev_priv->psr.psr2_support) {
+ /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ if (dev_priv->psr.colorimetry_support &&
+ dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x5;
+ psr_vsc.sdp_header.HB3 = 0x13;
+ } else if (dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x4;
+ psr_vsc.sdp_header.HB3 = 0xe;
+ } else {
+ psr_vsc.sdp_header.HB2 = 0x3;
+ psr_vsc.sdp_header.HB3 = 0xc;
+ }
} else {
- psr_vsc.sdp_header.HB2 = 0x3;
- psr_vsc.sdp_header.HB3 = 0xc;
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
}
- intel_psr_write_vsc(intel_dp, &psr_vsc);
-}
-
-static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
-{
- struct edp_vsc_psr psr_vsc;
-
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- intel_psr_write_vsc(intel_dp, &psr_vsc);
+ intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+ DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
@@ -233,16 +200,15 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(aux_ctl_reg, aux_ctl);
}
-static void vlv_psr_enable_source(struct intel_dp *intel_dp)
+static void vlv_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
- I915_WRITE(VLV_PSRCTL(pipe),
+ /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
+ I915_WRITE(VLV_PSRCTL(crtc->pipe),
VLV_EDP_PSR_MODE_SW_TIMER |
VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
VLV_EDP_PSR_ENABLE);
@@ -256,16 +222,17 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
- /* Let's do the transition from PSR_state 1 to PSR_state 2
- * that is PSR transition to active - static frame transmission.
- * Then Hardware is responsible for the transition to PSR_state 3
- * that is PSR active - no Remote Frame Buffer (RFB) update.
+ /*
+ * Let's do the transition from PSR_state 1 (inactive) to
+ * PSR_state 2 (transition to active - static frame transmission).
+ * Then Hardware is responsible for the transition to
+ * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
*/
I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
VLV_EDP_PSR_ACTIVE_ENTRY);
}
-static void intel_enable_source_psr1(struct intel_dp *intel_dp)
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -319,7 +286,7 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR_CTL, val);
}
-static void intel_enable_source_psr2(struct intel_dp *intel_dp)
+static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -333,6 +300,7 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
*/
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
uint32_t val;
+ uint8_t sink_latency;
val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
@@ -340,8 +308,16 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
val |= EDP_PSR2_ENABLE |
- EDP_SU_TRACK_ENABLE |
- EDP_FRAMES_BEFORE_SU_ENTRY;
+ EDP_SU_TRACK_ENABLE;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SYNCHRONIZATION_LATENCY_IN_SINK,
+ &sink_latency) == 1) {
+ sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
+ } else {
+ sink_latency = 0;
+ }
+ val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
@@ -355,35 +331,43 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR2_CTL, val);
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+static void hsw_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ /* On HSW+ after we enable PSR on source it will activate it
+ * as soon as it match configure idle_frame count. So
+ * we just actually enable it here on activation time.
+ */
+
/* psr1 and psr2 are mutually exclusive.*/
if (dev_priv->psr.psr2_support)
- intel_enable_source_psr2(intel_dp);
+ hsw_activate_psr2(intel_dp);
else
- intel_enable_source_psr1(intel_dp);
+ hsw_activate_psr1(intel_dp);
}
-static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
const struct drm_display_mode *adjusted_mode =
- &intel_crtc->config->base.adjusted_mode;
+ &crtc_state->base.adjusted_mode;
int psr_setup_time;
- lockdep_assert_held(&dev_priv->psr.lock);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ if (!HAS_PSR(dev_priv))
+ return;
+
+ if (!is_edp_psr(intel_dp))
+ return;
- dev_priv->psr.source_ok = false;
+ if (!i915_modparams.enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ return;
+ }
/*
* HSW spec explicitly says PSR is tied to port A.
@@ -394,66 +378,70 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
*/
if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
- return false;
- }
-
- if (!i915.enable_psr) {
- DRM_DEBUG_KMS("PSR disable by flag\n");
- return false;
+ return;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
- return false;
+ return;
}
if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
+ I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- return false;
+ return;
}
if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- return false;
+ return;
}
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
if (psr_setup_time < 0) {
DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]);
- return false;
+ return;
}
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
psr_setup_time);
- return false;
+ return;
+ }
+
+ /*
+ * FIXME psr2_support is messed up. It's both computed
+ * dynamically during PSR enable, and extracted from sink
+ * caps during eDP detection.
+ */
+ if (!dev_priv->psr.psr2_support) {
+ crtc_state->has_psr = true;
+ return;
}
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (dev_priv->psr.psr2_support &&
- (intel_crtc->config->pipe_src_w > 3200 ||
- intel_crtc->config->pipe_src_h > 2000)) {
- dev_priv->psr.psr2_support = false;
- return false;
+ if (adjusted_mode->crtc_hdisplay > 3200 ||
+ adjusted_mode->crtc_vdisplay > 2000) {
+ DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
+ return;
}
/*
* FIXME:enable psr2 only for y-cordinate psr2 panels
* After gtc implementation , remove this restriction.
*/
- if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) {
+ if (!dev_priv->psr.y_cord_support) {
DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
- return false;
+ return;
}
- dev_priv->psr.source_ok = true;
- return true;
+ crtc_state->has_psr = true;
+ crtc_state->has_psr2 = true;
}
static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -469,153 +457,133 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
- /* Enable/Re-enable PSR on the host */
- if (HAS_DDI(dev_priv))
- /* On HSW+ after we enable PSR on source it will activate it
- * as soon as it match configure idle_frame count. So
- * we just actually enable it here on activation time.
- */
- hsw_psr_enable_source(intel_dp);
- else
- vlv_psr_activate(intel_dp);
-
+ dev_priv->psr.activate(intel_dp);
dev_priv->psr.active = true;
}
+static void hsw_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 chicken;
+
+ if (dev_priv->psr.psr2_support) {
+ chicken = PSR2_VSC_ENABLE_PROG_HEADER;
+ if (dev_priv->psr.y_cord_support)
+ chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
+ I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
+
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP |
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ } else {
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP
+ * and HPD. also mask LPSP to avoid dependency on other
+ * drivers that might block runtime_pm besides
+ * preventing other hw tracking issues now we can rely
+ * on frontbuffer tracking.
+ */
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP);
+ }
+}
+
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
*
* This function can only be called after the pipe is fully trained and enabled.
*/
-void intel_psr_enable(struct intel_dp *intel_dp)
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- u32 chicken;
- if (!HAS_PSR(dev_priv)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ if (!crtc_state->has_psr)
return;
- }
-
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
- return;
- }
+ WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
goto unlock;
}
- if (!intel_psr_match_conditions(intel_dp))
- goto unlock;
+ dev_priv->psr.psr2_support = crtc_state->has_psr2;
+ dev_priv->psr.source_ok = true;
dev_priv->psr.busy_frontbuffer_bits = 0;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_support) {
- skl_psr_setup_su_vsc(intel_dp);
- chicken = PSR2_VSC_ENABLE_PROG_HEADER;
- if (dev_priv->psr.y_cord_support)
- chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
- I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
- I915_WRITE(EDP_PSR_DEBUG_CTL,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
- } else {
- /* set up vsc header for psr1 */
- hsw_psr_setup_vsc(intel_dp);
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP
- * and HPD. also mask LPSP to avoid dependency on other
- * drivers that might block runtime_pm besides
- * preventing other hw tracking issues now we can rely
- * on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG_CTL,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP);
- }
-
- /* Enable PSR on the panel */
- hsw_psr_enable_sink(intel_dp);
+ dev_priv->psr.setup_vsc(intel_dp, crtc_state);
+ dev_priv->psr.enable_sink(intel_dp);
+ dev_priv->psr.enable_source(intel_dp, crtc_state);
+ dev_priv->psr.enabled = intel_dp;
- if (INTEL_GEN(dev_priv) >= 9)
- intel_psr_activate(intel_dp);
+ if (INTEL_GEN(dev_priv) >= 9) {
+ intel_psr_activate(intel_dp);
} else {
- vlv_psr_setup_vsc(intel_dp);
-
- /* Enable PSR on the panel */
- vlv_psr_enable_sink(intel_dp);
-
- /* On HSW+ enable_source also means go to PSR entry/active
- * state as soon as idle_frame achieved and here would be
- * to soon. However on VLV enable_source just enable PSR
- * but let it on inactive state. So we might do this prior
- * to active transition, i.e. here.
+ /*
+ * FIXME: Activation should happen immediately since this
+ * function is just called after pipe is fully trained and
+ * enabled.
+ * However on some platforms we face issues when first
+ * activation follows a modeset so quickly.
+ * - On VLV/CHV we get bank screen on first activation
+ * - On HSW/BDW we get a recoverable frozen screen until
+ * next exit-activate sequence.
*/
- vlv_psr_enable_source(intel_dp);
- }
-
- /*
- * FIXME: Activation should happen immediately since this function
- * is just called after pipe is fully trained and enabled.
- * However on every platform we face issues when first activation
- * follows a modeset so quickly.
- * - On VLV/CHV we get bank screen on first activation
- * - On HSW/BDW we get a recoverable frozen screen until next
- * exit-activate sequence.
- */
- if (INTEL_GEN(dev_priv) < 9)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+ }
- dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void vlv_psr_disable(struct intel_dp *intel_dp)
+static void vlv_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(intel_dig_port->base.base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
uint32_t val;
if (dev_priv->psr.active) {
- /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
+ /* Put VLV PSR back to PSR_state 0 (disabled). */
if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(intel_crtc->pipe),
+ VLV_PSRSTAT(crtc->pipe),
VLV_EDP_PSR_IN_TRANS,
0,
1))
WARN(1, "PSR transition took longer than expected\n");
- val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
+ val = I915_READ(VLV_PSRCTL(crtc->pipe));
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
val &= ~VLV_EDP_PSR_ENABLE;
val &= ~VLV_EDP_PSR_MODE_MASK;
- I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+ I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
dev_priv->psr.active = false;
} else {
- WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
+ WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
}
}
-static void hsw_psr_disable(struct intel_dp *intel_dp)
+static void hsw_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -664,26 +632,27 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
+ * @old_crtc_state: old CRTC state
*
* This function needs to be called before disabling pipe.
*/
-void intel_psr_disable(struct intel_dp *intel_dp)
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ if (!old_crtc_state->has_psr)
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
- /* Disable PSR on Source */
- if (HAS_DDI(dev_priv))
- hsw_psr_disable(intel_dp);
- else
- vlv_psr_disable(intel_dp);
+ dev_priv->psr.disable_source(intel_dp, old_crtc_state);
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -783,17 +752,20 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
} else {
val = I915_READ(VLV_PSRCTL(pipe));
- /* Here we do the transition from PSR_state 3 to PSR_state 5
- * directly once PSR State 4 that is active with single frame
- * update can be skipped. PSR_state 5 that is PSR exit then
- * Hardware is responsible to transition back to PSR_state 1
- * that is PSR inactive. Same state after
- * vlv_edp_psr_enable_source.
+ /*
+ * Here we do the transition drirectly from
+ * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
+ * PSR_state 5 (exit).
+ * PSR State 4 (active with single frame update) can be skipped.
+ * On PSR_state 5 (exit) Hardware is responsible to transition
+ * back to PSR_state 1 (inactive).
+ * Now we are at Same state after vlv_psr_enable_source.
*/
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
I915_WRITE(VLV_PSRCTL(pipe), val);
- /* Send AUX wake up - Spec says after transitioning to PSR
+ /*
+ * Send AUX wake up - Spec says after transitioning to PSR
* active we have to send AUX wake up by writing 01h in DPCD
* 600h of sink device.
* XXX: This might slow down the transition, but without this
@@ -824,6 +796,9 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
enum pipe pipe;
u32 val;
+ if (!HAS_PSR(dev_priv))
+ return;
+
/*
* Single frame update is already supported on BDW+ but it requires
* many W/A and it isn't really needed.
@@ -870,6 +845,9 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
+ if (!HAS_PSR(dev_priv))
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
@@ -907,6 +885,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
+ if (!HAS_PSR(dev_priv))
+ return;
+
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
@@ -939,12 +920,15 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PSR(dev_priv))
+ return;
+
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
/* Per platform default: all disabled. */
- if (i915.enable_psr == -1)
- i915.enable_psr = 0;
+ if (i915_modparams.enable_psr == -1)
+ i915_modparams.enable_psr = 0;
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -958,15 +942,29 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
/* Override link_standby x link_off defaults */
- if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
+ if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
DRM_DEBUG_KMS("PSR: Forcing link standby\n");
dev_priv->psr.link_standby = true;
}
- if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
+ if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
DRM_DEBUG_KMS("PSR: Forcing main link off\n");
dev_priv->psr.link_standby = false;
}
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->psr.enable_source = vlv_psr_enable_source;
+ dev_priv->psr.disable_source = vlv_psr_disable;
+ dev_priv->psr.enable_sink = vlv_psr_enable_sink;
+ dev_priv->psr.activate = vlv_psr_activate;
+ dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
+ } else {
+ dev_priv->psr.enable_source = hsw_psr_enable_source;
+ dev_priv->psr.disable_source = hsw_psr_disable;
+ dev_priv->psr.enable_sink = hsw_psr_enable_sink;
+ dev_priv->psr.activate = hsw_psr_activate;
+ dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cdf084ef5aae..8da1bde442dd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -402,17 +402,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
*/
if (IS_GEN7(dev_priv)) {
switch (engine->id) {
+ /*
+ * No more rings exist on Gen7. Default case is only to shut up
+ * gcc switch check warning.
+ */
+ default:
+ GEM_BUG_ON(engine->id);
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
- /*
- * VCS2 actually doesn't exist on Gen7. Only shut up
- * gcc switch check warning
- */
- case VCS2:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
@@ -427,6 +428,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = RING_HWS_PGA(engine->mmio_base);
}
+ if (INTEL_GEN(dev_priv) >= 6)
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
@@ -480,11 +484,6 @@ static bool stop_ring(struct intel_engine_cs *engine)
I915_WRITE_HEAD(engine, 0);
I915_WRITE_TAIL(engine, 0);
- if (INTEL_GEN(dev_priv) > 2) {
- (void)I915_READ_CTL(engine);
- I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
- }
-
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
@@ -566,6 +565,9 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
+ if (INTEL_GEN(dev_priv) > 2)
+ I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -575,7 +577,16 @@ out:
static void reset_ring_common(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- /* Try to restore the logical GPU state to match the continuation
+ /*
+ * RC6 must be prevented until the reset is complete and the engine
+ * reinitialised. If it occurs in the middle of this sequence, the
+ * state written to/loaded from the power context is ill-defined (e.g.
+ * the PP_BASE_DIR may be lost).
+ */
+ assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+
+ /*
+ * Try to restore the logical GPU state to match the continuation
* of the request queue. If we skip the context/PD restore, then
* the next request may try to execute assuming that its context
* is valid and loaded on the GPU and so may try to access invalid
@@ -778,6 +789,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
return cs;
}
+static void cancel_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->timeline->requests, link) {
+ GEM_BUG_ON(!request->global_seqno);
+ if (!i915_gem_request_completed(request))
+ dma_fence_set_error(&request->fence, -EIO);
+ }
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
static void i9xx_submit_request(struct drm_i915_gem_request *request)
{
struct drm_i915_private *dev_priv = request->i915;
@@ -1174,113 +1203,7 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static void cleanup_phys_status_page(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (!dev_priv->status_page_dmah)
- return;
-
- drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
- engine->status_page.page_addr = NULL;
-}
-
-static void cleanup_status_page(struct intel_engine_cs *engine)
-{
- struct i915_vma *vma;
- struct drm_i915_gem_object *obj;
-
- vma = fetch_and_zero(&engine->status_page.vma);
- if (!vma)
- return;
-
- obj = vma->obj;
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- i915_gem_object_unpin_map(obj);
- __i915_gem_object_release_unless_active(obj);
-}
-
-static int init_status_page(struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int flags;
- void *vaddr;
- int ret;
-
- obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate status page\n");
- return PTR_ERR(obj);
- }
-
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- if (ret)
- goto err;
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err;
- }
-
- flags = PIN_GLOBAL;
- if (!HAS_LLC(engine->i915))
- /* On g33, we cannot place HWS above 256MiB, so
- * restrict its pinning to the low mappable arena.
- * Though this restriction is not documented for
- * gen4, gen5, or byt, they also behave similarly
- * and hang if the HWS is placed at the top of the
- * GTT. To generalise, it appears that all !llc
- * platforms have issues with us placing the HWS
- * above the mappable region (even though we never
- * actualy map it).
- */
- flags |= PIN_MAPPABLE;
- ret = i915_vma_pin(vma, 0, 4096, flags);
- if (ret)
- goto err;
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_unpin;
- }
-
- engine->status_page.vma = vma;
- engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
-
- DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
- engine->name, i915_ggtt_offset(vma));
- return 0;
-
-err_unpin:
- i915_vma_unpin(vma);
-err:
- i915_gem_object_put(obj);
- return ret;
-}
-
-static int init_phys_status_page(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- GEM_BUG_ON(engine->id != RCS);
-
- dev_priv->status_page_dmah =
- drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
- if (!dev_priv->status_page_dmah)
- return -ENOMEM;
-
- engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(engine->status_page.page_addr, 0, PAGE_SIZE);
- return 0;
-}
int intel_ring_pin(struct intel_ring *ring,
struct drm_i915_private *i915,
@@ -1321,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
if (IS_ERR(addr))
goto err;
+ vma->obj->pin_global++;
+
ring->vaddr = addr;
return 0;
@@ -1352,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
i915_gem_object_unpin_map(ring->vma->obj);
ring->vaddr = NULL;
+ ring->vma->obj->pin_global--;
i915_vma_unpin(ring->vma);
}
@@ -1516,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
goto err;
ce->state->obj->mm.dirty = true;
+ ce->state->obj->pin_global++;
}
/* The kernel context is only used as a placeholder for flushing the
@@ -1550,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
if (--ce->pin_count)
return;
- if (ce->state)
+ if (ce->state) {
+ ce->state->obj->pin_global--;
i915_vma_unpin(ce->state);
+ }
i915_gem_context_put(ctx);
}
@@ -1567,17 +1496,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
goto err;
- if (HWS_NEEDS_PHYSICAL(engine->i915))
- err = init_phys_status_page(engine);
- else
- err = init_status_page(engine);
- if (err)
- goto err;
-
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
- goto err_hws;
+ goto err;
}
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1592,11 +1514,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
err_ring:
intel_ring_free(ring);
-err_hws:
- if (HWS_NEEDS_PHYSICAL(engine->i915))
- cleanup_phys_status_page(engine);
- else
- cleanup_status_page(engine);
err:
intel_engine_cleanup_common(engine);
return err;
@@ -1615,11 +1532,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- if (HWS_NEEDS_PHYSICAL(dev_priv))
- cleanup_phys_status_page(engine);
- else
- cleanup_status_page(engine);
-
intel_engine_cleanup_common(engine);
dev_priv->engine[engine->id] = NULL;
@@ -1983,7 +1895,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
int ret, i;
- if (!i915.semaphores)
+ if (!i915_modparams.semaphores)
return;
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
@@ -2083,7 +1995,7 @@ err_obj:
i915_gem_object_put(obj);
err:
DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
- i915.semaphores = 0;
+ i915_modparams.semaphores = 0;
}
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
@@ -2115,11 +2027,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
+ engine->cancel_requests = cancel_requests;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = gen6_bsd_submit_request;
+ engine->cancel_requests = cancel_requests;
}
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
@@ -2138,7 +2052,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
- if (i915.semaphores) {
+ if (i915_modparams.semaphores) {
int num_rings;
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
@@ -2182,7 +2096,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
engine->emit_flush = gen8_render_ring_flush;
- if (i915.semaphores) {
+ if (i915_modparams.semaphores) {
int num_rings;
engine->semaphore.signal = gen8_rcs_signal;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6b2067f10824..2863d5a65187 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -8,6 +8,8 @@
#include "i915_gem_timeline.h"
#include "i915_selftest.h"
+struct drm_printer;
+
#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
@@ -185,6 +187,104 @@ struct i915_priolist {
int priority;
};
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+ /**
+ * @irq_tasklet: softirq tasklet for bottom handler
+ */
+ struct tasklet_struct irq_tasklet;
+
+ /**
+ * @default_priolist: priority list for I915_PRIORITY_NORMAL
+ */
+ struct i915_priolist default_priolist;
+
+ /**
+ * @no_priolist: priority lists disabled
+ */
+ bool no_priolist;
+
+ /**
+ * @port: execlist port states
+ *
+ * For each hardware ELSP (ExecList Submission Port) we keep
+ * track of the last request and the number of times we submitted
+ * that port to hw. We then count the number of times the hw reports
+ * a context completion or preemption. As only one context can
+ * be active on hw, we limit resubmission of context to port[0]. This
+ * is called Lite Restore, of the context.
+ */
+ struct execlist_port {
+ /**
+ * @request_count: combined request and submission count
+ */
+ struct drm_i915_gem_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, execlists) ((p) - (execlists)->port)
+
+ /**
+ * @context_id: context ID for port
+ */
+ GEM_DEBUG_DECL(u32 context_id);
+
+#define EXECLIST_MAX_PORTS 2
+ } port[EXECLIST_MAX_PORTS];
+
+ /**
+ * @active: is the HW active? We consider the HW as active after
+ * submitting any context for execution and until we have seen the
+ * last context completion event. After that, we do not expect any
+ * more events until we submit, and so can park the HW.
+ *
+ * As we have a small number of different sources from which we feed
+ * the HW, we track the state of each inside a single bitfield.
+ */
+ unsigned int active;
+#define EXECLISTS_ACTIVE_USER 0
+#define EXECLISTS_ACTIVE_PREEMPT 1
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
+
+ /**
+ * @queue: queue of requests, in priority lists
+ */
+ struct rb_root queue;
+
+ /**
+ * @first: leftmost level in priority @queue
+ */
+ struct rb_node *first;
+
+ /**
+ * @fw_domains: forcewake domains for irq tasklet
+ */
+ unsigned int fw_domains;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ unsigned int csb_head;
+
+ /**
+ * @csb_use_mmio: access csb through mmio, instead of hwsp
+ */
+ bool csb_use_mmio;
+};
+
#define INTEL_ENGINE_CS_MAX_NAME 8
struct intel_engine_cs {
@@ -307,6 +407,14 @@ struct intel_engine_cs {
void (*schedule)(struct drm_i915_gem_request *request,
int priority);
+ /*
+ * Cancel all requests on the hardware, or queued for execution.
+ * This should only cancel the ready requests that have been
+ * submitted to the engine (via the engine->submit_request callback).
+ * This is called when marking the device as wedged.
+ */
+ void (*cancel_requests)(struct intel_engine_cs *engine);
+
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@@ -373,25 +481,7 @@ struct intel_engine_cs {
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
} semaphore;
- /* Execlists */
- struct tasklet_struct irq_tasklet;
- struct i915_priolist default_priolist;
- bool no_priolist;
- struct execlist_port {
- struct drm_i915_gem_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, e) ((p) - (e)->execlist_port)
- GEM_DEBUG_DECL(u32 context_id);
- } execlist_port[2];
- struct rb_root execlist_queue;
- struct rb_node *execlist_first;
- unsigned int fw_domains;
+ struct intel_engine_execlists execlists;
/* Contexts are pinned whilst they are active on the GPU. The last
* context executed remains active whilst the GPU is idle - the
@@ -444,6 +534,46 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
+static inline void
+execlists_set_active(struct intel_engine_execlists *execlists,
+ unsigned int bit)
+{
+ __set_bit(bit, (unsigned long *)&execlists->active);
+}
+
+static inline void
+execlists_clear_active(struct intel_engine_execlists *execlists,
+ unsigned int bit)
+{
+ __clear_bit(bit, (unsigned long *)&execlists->active);
+}
+
+static inline bool
+execlists_is_active(const struct intel_engine_execlists *execlists,
+ unsigned int bit)
+{
+ return test_bit(bit, (unsigned long *)&execlists->active);
+}
+
+static inline unsigned int
+execlists_num_ports(const struct intel_engine_execlists * const execlists)
+{
+ return execlists->port_mask + 1;
+}
+
+static inline void
+execlists_port_complete(struct intel_engine_execlists * const execlists,
+ struct execlist_port * const port)
+{
+ const unsigned int m = execlists->port_mask;
+
+ GEM_BUG_ON(port_index(port, execlists) != 0);
+ GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+
+ memmove(port, port + 1, m * sizeof(struct execlist_port));
+ memset(port + m, 0, sizeof(struct execlist_port));
+}
+
static inline unsigned int
intel_engine_flag(const struct intel_engine_cs *engine)
{
@@ -497,6 +627,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SCRATCH_INDEX 0x40
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
+#define I915_HWS_CSB_BUF0_INDEX 0x10
+#define I915_HWS_CSB_WRITE_INDEX 0x1f
+#define CNL_HWS_CSB_WRITE_INDEX 0x2f
+
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
int intel_ring_pin(struct intel_ring *ring,
@@ -736,16 +870,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
void intel_engines_mark_idle(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
-static inline bool
-__intel_engine_can_store_dword(unsigned int gen, unsigned int class)
-{
- if (gen <= 2)
- return false; /* uses physical not virtual addresses */
+bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
- if (gen == 6 && class == VIDEO_DECODE_CLASS)
- return false; /* b0rked */
-
- return true;
-}
+void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 49577eba8e7e..8af286c63d3b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -187,7 +187,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well;
bool is_enabled;
- if (dev_priv->pm.suspended)
+ if (dev_priv->runtime_pm.suspended)
return false;
is_enabled = true;
@@ -785,7 +785,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
PUNIT_PWRGT_PWR_GATE(power_well_id);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
@@ -806,7 +806,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
#undef COND
out:
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
@@ -833,7 +833,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
mask = PUNIT_PWRGT_MASK(power_well_id);
ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
/*
@@ -852,7 +852,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
WARN_ON(ctrl != state);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return enabled;
}
@@ -1364,7 +1364,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
bool enabled;
u32 state, ctrl;
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
/*
@@ -1381,7 +1381,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
WARN_ON(ctrl << 16 != state);
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
return enabled;
}
@@ -1396,7 +1396,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
- mutex_lock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->pcu_lock);
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
@@ -1417,7 +1417,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
#undef COND
out:
- mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_unlock(&dev_priv->pcu_lock);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
@@ -2413,7 +2413,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
mask = 0;
}
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -2471,10 +2471,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
- i915.disable_power_well);
- dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
- i915.enable_dc);
+ i915_modparams.disable_power_well =
+ sanitize_disable_power_well_option(dev_priv,
+ i915_modparams.disable_power_well);
+ dev_priv->csr.allowed_dc_mask =
+ get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
@@ -2535,7 +2536,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
intel_display_set_init_power(dev_priv, true);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
/*
@@ -2707,30 +2708,67 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-#define CNL_PROCMON_IDX(val) \
- (((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
-#define NUM_CNL_PROCMON \
- (CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
+enum {
+ PROCMON_0_85V_DOT_0,
+ PROCMON_0_95V_DOT_0,
+ PROCMON_0_95V_DOT_1,
+ PROCMON_1_05V_DOT_0,
+ PROCMON_1_05V_DOT_1,
+};
static const struct cnl_procmon {
u32 dw1, dw9, dw10;
-} cnl_procmon_values[NUM_CNL_PROCMON] = {
- [CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
- { .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
- [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
- { .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+} cnl_procmon_values[] = {
+ [PROCMON_0_85V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+ [PROCMON_0_95V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+ [PROCMON_0_95V_DOT_1] =
+ { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+ [PROCMON_1_05V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+ [PROCMON_1_05V_DOT_1] =
+ { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
};
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ val = I915_READ(CNL_PORT_COMP_DW3);
+ switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+ default:
+ MISSING_CASE(val);
+ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+ break;
+ }
+
+ val = I915_READ(CNL_PORT_COMP_DW1);
+ val &= ~((0xff << 16) | 0xff);
+ val |= procmon->dw1;
+ I915_WRITE(CNL_PORT_COMP_DW1, val);
+
+ I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
+ I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
+}
+
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- const struct cnl_procmon *procmon;
struct i915_power_well *well;
u32 val;
@@ -2746,18 +2784,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val &= ~CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
- val = I915_READ(CNL_PORT_COMP_DW3);
- procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
-
- WARN_ON(procmon->dw10 == 0);
-
- val = I915_READ(CNL_PORT_COMP_DW1);
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- I915_WRITE(CNL_PORT_COMP_DW1, val);
-
- I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
- I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
+ cnl_set_procmon_ref_values(dev_priv);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -2787,9 +2814,6 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
intel_csr_load_program(dev_priv);
}
-#undef CNL_PROCMON_IDX
-#undef NUM_CNL_PROCMON
-
static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
@@ -2975,7 +2999,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
/* Disable power support if the user asked so. */
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_power_domains_sync_hw(dev_priv);
power_domains->initializing = false;
@@ -2994,7 +3018,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
* Even if power well support was disabled we still want to disable
* power wells while we are system suspended.
*/
- if (!i915.disable_power_well)
+ if (!i915_modparams.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
if (IS_CANNONLAKE(dev_priv))
@@ -3104,7 +3128,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
ret = pm_runtime_get_sync(kdev);
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
}
@@ -3138,7 +3162,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
return false;
}
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
return true;
@@ -3169,7 +3193,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
assert_rpm_wakelock_held(dev_priv);
pm_runtime_get_noresume(kdev);
- atomic_inc(&dev_priv->pm.wakeref_count);
+ atomic_inc(&dev_priv->runtime_pm.wakeref_count);
}
/**
@@ -3186,7 +3210,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
- atomic_dec(&dev_priv->pm.wakeref_count);
+ atomic_dec(&dev_priv->runtime_pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 29a3b0f5bec7..7437944b388f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -201,11 +201,8 @@ to_intel_sdvo_connector(struct drm_connector *connector)
return container_of(connector, struct intel_sdvo_connector, base.base);
}
-static struct intel_sdvo_connector_state *
-to_intel_sdvo_connector_state(struct drm_connector_state *conn_state)
-{
- return container_of(conn_state, struct intel_sdvo_connector_state, base.base);
-}
+#define to_intel_sdvo_connector_state(conn_state) \
+ container_of((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
@@ -998,7 +995,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config)
{
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe frame;
@@ -1032,7 +1029,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
- struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state)
{
struct intel_sdvo_tv_format format;
uint32_t format_map;
@@ -1202,9 +1199,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
} while (0)
static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
- struct intel_sdvo_connector_state *sdvo_state)
+ const struct intel_sdvo_connector_state *sdvo_state)
{
- struct drm_connector_state *conn_state = &sdvo_state->base.base;
+ const struct drm_connector_state *conn_state = &sdvo_state->base.base;
struct intel_sdvo_connector *intel_sdvo_conn =
to_intel_sdvo_connector(conn_state->connector);
uint16_t val;
@@ -1258,14 +1255,15 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
}
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
- struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state);
- struct drm_display_mode *mode = &crtc_state->base.mode;
+ const struct intel_sdvo_connector_state *sdvo_state =
+ to_intel_sdvo_connector_state(conn_state);
+ const struct drm_display_mode *mode = &crtc_state->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@@ -1507,8 +1505,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
static void intel_disable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@@ -1552,21 +1550,21 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
}
static void pch_disable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
}
static void pch_post_disable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
}
static void intel_enable_sdvo(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 7d971cb56116..75c872bb8cc9 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -81,7 +81,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
@@ -95,7 +95,7 @@ int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
int err;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
mutex_lock(&dev_priv->sb_lock);
err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
@@ -125,7 +125,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 524933b01483..4fcf80ca91dd 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -66,12 +66,17 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
+/* FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio. */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
#define VBLANK_EVASION_TIME_US 100
+#endif
/**
* intel_pipe_update_start() - start update of a set of display registers
- * @crtc: the crtc of which the registers are going to be updated
- * @start_vbl_count: vblank counter return pointer used for error checking
+ * @new_crtc_state: the new crtc state
*
* Mark the start of an update to pipe registers that should be updated
* atomically regarding vblank. If the next vblank will happens within
@@ -79,18 +84,18 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*
* After a successful call to this function, interrupts will be disabled
* until a subsequent call to intel_pipe_update_end(). That is done to
- * avoid random delays. The value written to @start_vbl_count should be
- * supplied to intel_pipe_update_end() for error checking.
+ * avoid random delays.
*/
-void intel_pipe_update_start(struct intel_crtc *crtc)
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
+ intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start;
@@ -170,15 +175,15 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
/**
* intel_pipe_update_end() - end update of a set of display registers
- * @crtc: the crtc of which the registers were updated
- * @start_vbl_count: start vblank counter (used for error checking)
+ * @new_crtc_state: the new crtc state
*
* Mark the end of an update started with intel_pipe_update_start(). This
* re-enables interrupts and verifies the update was actually completed
- * before a vblank using the value of @start_vbl_count.
+ * before a vblank.
*/
-void intel_pipe_update_end(struct intel_crtc *crtc)
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
@@ -191,14 +196,14 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
- if (crtc->base.state->event) {
+ if (new_crtc_state->base.event) {
WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
+ drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
spin_unlock(&crtc->base.dev->event_lock);
- crtc->base.state->event = NULL;
+ new_crtc_state->base.event = NULL;
}
local_irq_enable();
@@ -225,7 +230,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
#endif
}
-static void
+void
skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -306,7 +311,7 @@ skl_update_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
+void
skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -995,7 +1000,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
set->flags & I915_SET_COLORKEY_DESTINATION)
return -EINVAL;
- plane = drm_plane_find(dev, set->plane_id);
+ plane = drm_plane_find(dev, file_priv, set->plane_id);
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 906893c006d8..a79a7591b2cf 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -814,8 +814,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
static void
intel_enable_tv(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -829,8 +829,8 @@ intel_enable_tv(struct intel_encoder *encoder,
static void
intel_disable_tv(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -838,7 +838,7 @@ intel_disable_tv(struct intel_encoder *encoder,
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
-static const struct tv_mode *intel_tv_mode_find(struct drm_connector_state *conn_state)
+static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
int format = conn_state->tv.mode;
@@ -976,8 +976,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
}
static void intel_tv_pre_enable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1385,7 +1385,7 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
mode_ptr->vtotal = vactive_s + 33;
- tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
+ tmp = mul_u32_u32(tv_mode->refresh, mode_ptr->vtotal);
tmp *= mode_ptr->htotal;
tmp = div_u64(tmp, 1000000);
mode_ptr->clock = (int) tmp;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 0178ba42a0e5..25bd162f38d2 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -22,22 +22,9 @@
*
*/
-#include "i915_drv.h"
#include "intel_uc.h"
-#include <linux/firmware.h>
-
-/* Cleans up uC firmware by releasing the firmware GEM obj.
- */
-static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&uc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
+#include "i915_drv.h"
+#include "i915_guc_submission.h"
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
@@ -63,234 +50,70 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
{
if (!HAS_GUC(dev_priv)) {
- if (i915.enable_guc_loading > 0 ||
- i915.enable_guc_submission > 0)
+ if (i915_modparams.enable_guc_loading > 0 ||
+ i915_modparams.enable_guc_submission > 0)
DRM_INFO("Ignoring GuC options, no hardware\n");
- i915.enable_guc_loading = 0;
- i915.enable_guc_submission = 0;
+ i915_modparams.enable_guc_loading = 0;
+ i915_modparams.enable_guc_submission = 0;
return;
}
/* A negative value means "use platform default" */
- if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ if (i915_modparams.enable_guc_loading < 0)
+ i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
/* Verify firmware version */
- if (i915.enable_guc_loading) {
+ if (i915_modparams.enable_guc_loading) {
if (HAS_HUC_UCODE(dev_priv))
intel_huc_select_fw(&dev_priv->huc);
- if (intel_guc_select_fw(&dev_priv->guc))
- i915.enable_guc_loading = 0;
+ if (intel_guc_fw_select(&dev_priv->guc))
+ i915_modparams.enable_guc_loading = 0;
}
/* Can't enable guc submission without guc loaded */
- if (!i915.enable_guc_loading)
- i915.enable_guc_submission = 0;
+ if (!i915_modparams.enable_guc_loading)
+ i915_modparams.enable_guc_submission = 0;
/* A negative value means "use platform default" */
- if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
-}
-
-static void gen8_guc_raise_irq(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+ if (i915_modparams.enable_guc_submission < 0)
+ i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
}
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
- struct intel_guc *guc = &dev_priv->guc;
-
- intel_guc_ct_init_early(&guc->ct);
-
- mutex_init(&guc->send_mutex);
- guc->send = intel_guc_send_nop;
- guc->notify = gen8_guc_raise_irq;
-}
-
-static void fetch_uc_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- if (!uc_fw->path)
- return;
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err)
- goto fail;
- if (!fw)
- goto fail;
-
- DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
- uc_fw->path, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_NOTE("Firmware header is missing\n");
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_NOTE("CSS header definition mismatch\n");
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
- DRM_NOTE("RSA key size is bad\n");
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_NOTE("Missing firmware components\n");
- goto fail;
- }
-
- /*
- * The GuC firmware image has the version number embedded at a
- * well-known offset within the firmware blob; note that major / minor
- * version are TWO bytes each (i.e. u16), although all pointers and
- * offsets are defined in terms of bytes (u8).
- */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = uc_fw->header_size + uc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > intel_guc_wopcm_size(dev_priv)) {
- DRM_ERROR("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
- break;
-
- default:
- DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
- err = -ENOEXEC;
- goto fail;
- }
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("Skipping %s firmware version check\n",
- intel_uc_fw_type_repr(uc_fw->type));
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
-
- DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
- uc_fw->obj);
-
- release_firmware(fw);
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- return;
-
-fail:
- DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
- uc_fw->path, err);
- DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, uc_fw->obj);
-
- release_firmware(fw); /* OK even if fw is NULL */
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+ intel_guc_init_early(&dev_priv->guc);
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
- fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
+ intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+ intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
}
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
- __intel_uc_fw_fini(&dev_priv->guc.fw);
- __intel_uc_fw_fini(&dev_priv->huc.fw);
+ intel_uc_fw_fini(&dev_priv->guc.fw);
+ intel_uc_fw_fini(&dev_priv->huc.fw);
}
-static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
-{
- GEM_BUG_ON(!guc->send_regs.base);
- GEM_BUG_ON(!guc->send_regs.count);
- GEM_BUG_ON(i >= guc->send_regs.count);
-
- return _MMIO(guc->send_regs.base + 4 * i);
-}
-
-static void guc_init_send_regs(struct intel_guc *guc)
+/**
+ * intel_uc_init_mmio - setup uC MMIO access
+ *
+ * @dev_priv: device private
+ *
+ * Setup minimal state necessary for MMIO accesses later in the
+ * initialization sequence.
+ */
+void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- enum forcewake_domains fw_domains = 0;
- unsigned int i;
-
- guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
-
- for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- guc_send_reg(guc, i),
- FW_REG_READ | FW_REG_WRITE);
- }
- guc->send_regs.fw_domains = fw_domains;
+ intel_guc_init_send_regs(&dev_priv->guc);
}
static void guc_capture_load_err_log(struct intel_guc *guc)
{
- if (!guc->log.vma || i915.guc_log_level < 0)
+ if (!guc->log.vma || i915_modparams.guc_log_level < 0)
return;
if (!guc->load_err_log)
@@ -309,8 +132,6 @@ static int guc_enable_communication(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- guc_init_send_regs(guc);
-
if (HAS_GUC_CT(dev_priv))
return intel_guc_enable_ct(guc);
@@ -333,7 +154,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
int ret, attempts;
- if (!i915.enable_guc_loading)
+ if (!i915_modparams.enable_guc_loading)
return 0;
guc_disable_communication(guc);
@@ -342,7 +163,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
- if (i915.enable_guc_submission) {
+ if (i915_modparams.enable_guc_submission) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -374,7 +195,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_submission;
intel_huc_init_hw(&dev_priv->huc);
- ret = intel_guc_init_hw(&dev_priv->guc);
+ intel_guc_init_params(guc);
+ ret = intel_guc_fw_upload(guc);
if (ret == 0 || ret != -EAGAIN)
break;
@@ -390,9 +212,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- intel_guc_auth_huc(dev_priv);
- if (i915.enable_guc_submission) {
- if (i915.guc_log_level >= 0)
+ intel_huc_auth(&dev_priv->huc);
+ if (i915_modparams.enable_guc_submission) {
+ if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
ret = i915_guc_submission_enable(dev_priv);
@@ -400,6 +222,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_interrupts;
}
+ dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
+ i915_modparams.enable_guc_submission ? "submission enabled" :
+ "loaded",
+ guc->fw.path,
+ guc->fw.major_ver_found, guc->fw.minor_ver_found);
+
return 0;
/*
@@ -417,24 +245,26 @@ err_interrupts:
err_log_capture:
guc_capture_load_err_log(guc);
err_submission:
- if (i915.enable_guc_submission)
+ if (i915_modparams.enable_guc_submission)
i915_guc_submission_fini(dev_priv);
err_guc:
i915_ggtt_disable_guc(dev_priv);
- DRM_ERROR("GuC init failed\n");
- if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1)
+ if (i915_modparams.enable_guc_loading > 1 ||
+ i915_modparams.enable_guc_submission > 1) {
+ DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
ret = -EIO;
- else
+ } else {
+ DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
ret = 0;
+ }
- if (i915.enable_guc_submission) {
- i915.enable_guc_submission = 0;
+ if (i915_modparams.enable_guc_submission) {
+ i915_modparams.enable_guc_submission = 0;
DRM_NOTE("Falling back from GuC submission to execlist mode\n");
}
- i915.enable_guc_loading = 0;
- DRM_NOTE("GuC firmware loading disabled\n");
+ i915_modparams.enable_guc_loading = 0;
return ret;
}
@@ -443,97 +273,18 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
{
guc_free_load_err_log(&dev_priv->guc);
- if (!i915.enable_guc_loading)
+ if (!i915_modparams.enable_guc_loading)
return;
- if (i915.enable_guc_submission)
+ if (i915_modparams.enable_guc_submission)
i915_guc_submission_disable(dev_priv);
guc_disable_communication(&dev_priv->guc);
- if (i915.enable_guc_submission) {
+ if (i915_modparams.enable_guc_submission) {
gen9_disable_guc_interrupts(dev_priv);
i915_guc_submission_fini(dev_priv);
}
i915_ggtt_disable_guc(dev_priv);
}
-
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
-{
- WARN(1, "Unexpected send: action=%#x\n", *action);
- return -ENODEV;
-}
-
-/*
- * This function implements the MMIO based host to GuC interface.
- */
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 status;
- int i;
- int ret;
-
- GEM_BUG_ON(!len);
- GEM_BUG_ON(len > guc->send_regs.count);
-
- /* If CT is available, we expect to use MMIO only during init/fini */
- GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
- *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
- *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
-
- mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
-
- for (i = 0; i < len; i++)
- I915_WRITE(guc_send_reg(guc, i), action[i]);
-
- POSTING_READ(guc_send_reg(guc, i - 1));
-
- intel_guc_notify(guc);
-
- /*
- * No GuC command should ever take longer than 10ms.
- * Fast commands should still complete in 10us.
- */
- ret = __intel_wait_for_register_fw(dev_priv,
- guc_send_reg(guc, 0),
- INTEL_GUC_RECV_MASK,
- INTEL_GUC_RECV_MASK,
- 10, 10, &status);
- if (status != INTEL_GUC_STATUS_SUCCESS) {
- /*
- * Either the GuC explicitly returned an error (which
- * we convert to -EIO here) or no response at all was
- * received within the timeout limit (-ETIMEDOUT)
- */
- if (ret != -ETIMEDOUT)
- ret = -EIO;
-
- DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
- " ret=%d status=0x%08X response=0x%08X\n",
- action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
- }
-
- intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
- mutex_unlock(&guc->send_mutex);
-
- return ret;
-}
-
-int intel_guc_sample_forcewake(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 action[2];
-
- action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
- /* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- action[1] = 0;
- else
- /* bit 0 and 1 are for Render and Media domain separately */
- action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
-
- return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 22ae52b17b0f..e18d3bb02088 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -24,256 +24,15 @@
#ifndef _INTEL_UC_H_
#define _INTEL_UC_H_
-#include "intel_guc_fwif.h"
-#include "i915_guc_reg.h"
-#include "intel_ringbuffer.h"
-#include "intel_guc_ct.h"
-#include "i915_vma.h"
+#include "intel_guc.h"
+#include "intel_huc.h"
-struct drm_i915_gem_request;
-
-/*
- * This structure primarily describes the GEM object shared with the GuC.
- * The specs sometimes refer to this object as a "GuC context", but we use
- * the term "client" to avoid confusion with hardware contexts. This
- * GEM object is held for the entire lifetime of our interaction with
- * the GuC, being allocated before the GuC is loaded with its firmware.
- * Because there's no way to update the address used by the GuC after
- * initialisation, the shared object must stay pinned into the GGTT as
- * long as the GuC is in use. We also keep the first page (only) mapped
- * into kernel address space, as it includes shared data that must be
- * updated on every request submission.
- *
- * The single GEM object described here is actually made up of several
- * separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process descriptor" which holds sequence data for
- * the doorbell, and one cacheline which actually *is* the doorbell; a
- * write to this will "ring the doorbell" (i.e. send an interrupt to the
- * GuC). The subsequent pages of the client object constitute the work
- * queue (a circular array of work items), again described in the process
- * descriptor. Work queue pages are mapped momentarily as required.
- *
- * We also keep a few statistics on failures. Ideally, these should all
- * be zero!
- * no_wq_space: times that the submission pre-check found no space was
- * available in the work queue (note, the queue is shared,
- * not per-engine). It is OK for this to be nonzero, but
- * it should not be huge!
- * b_fail: failed to ring the doorbell. This should never happen, unless
- * somehow the hardware misbehaves, or maybe if the GuC firmware
- * crashes? We probably need to reset the GPU to recover.
- * retcode: errno from last guc_submit()
- */
-struct i915_guc_client {
- struct i915_vma *vma;
- void *vaddr;
- struct i915_gem_context *owner;
- struct intel_guc *guc;
-
- uint32_t engines; /* bitmap of (host) engine ids */
- uint32_t priority;
- u32 stage_id;
- uint32_t proc_desc_offset;
-
- u16 doorbell_id;
- unsigned long doorbell_offset;
- u32 doorbell_cookie;
-
- spinlock_t wq_lock;
- uint32_t wq_offset;
- uint32_t wq_size;
- uint32_t wq_tail;
- uint32_t wq_rsvd;
- uint32_t no_wq_space;
-
- /* Per-engine counts of GuC submissions */
- uint64_t submissions[I915_NUM_ENGINES];
-};
-
-enum intel_uc_fw_status {
- INTEL_UC_FIRMWARE_FAIL = -1,
- INTEL_UC_FIRMWARE_NONE = 0,
- INTEL_UC_FIRMWARE_PENDING,
- INTEL_UC_FIRMWARE_SUCCESS
-};
-
-/* User-friendly representation of an enum */
-static inline
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
- switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
- }
- return "<invalid>";
-}
-
-enum intel_uc_fw_type {
- INTEL_UC_FW_TYPE_GUC,
- INTEL_UC_FW_TYPE_HUC
-};
-
-/* User-friendly representation of an enum */
-static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
-{
- switch (type) {
- case INTEL_UC_FW_TYPE_GUC:
- return "GuC";
- case INTEL_UC_FW_TYPE_HUC:
- return "HuC";
- }
- return "uC";
-}
-
-/*
- * This structure encapsulates all the data needed during the process
- * of fetching, caching, and loading the firmware image into the GuC.
- */
-struct intel_uc_fw {
- const char *path;
- size_t size;
- struct drm_i915_gem_object *obj;
- enum intel_uc_fw_status fetch_status;
- enum intel_uc_fw_status load_status;
-
- uint16_t major_ver_wanted;
- uint16_t minor_ver_wanted;
- uint16_t major_ver_found;
- uint16_t minor_ver_found;
-
- enum intel_uc_fw_type type;
- uint32_t header_size;
- uint32_t header_offset;
- uint32_t rsa_size;
- uint32_t rsa_offset;
- uint32_t ucode_size;
- uint32_t ucode_offset;
-};
-
-struct intel_guc_log {
- uint32_t flags;
- struct i915_vma *vma;
- /* The runtime stuff gets created only when GuC logging gets enabled */
- struct {
- void *buf_addr;
- struct workqueue_struct *flush_wq;
- struct work_struct flush_work;
- struct rchan *relay_chan;
- } runtime;
- /* logging related stats */
- u32 capture_miss_count;
- u32 flush_interrupt_count;
- u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
- u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
- u32 flush_count[GUC_MAX_LOG_BUFFER];
-};
-
-struct intel_guc {
- struct intel_uc_fw fw;
- struct intel_guc_log log;
- struct intel_guc_ct ct;
-
- /* Log snapshot if GuC errors during load */
- struct drm_i915_gem_object *load_err_log;
-
- /* intel_guc_recv interrupt related state */
- bool interrupts_enabled;
-
- struct i915_vma *ads_vma;
- struct i915_vma *stage_desc_pool;
- void *stage_desc_pool_vaddr;
- struct ida stage_ids;
-
- struct i915_guc_client *execbuf_client;
-
- DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
- uint32_t db_cacheline; /* Cyclic counter mod pagesize */
-
- /* GuC's FW specific registers used in MMIO send */
- struct {
- u32 base;
- unsigned int count;
- enum forcewake_domains fw_domains;
- } send_regs;
-
- /* To serialize the intel_guc_send actions */
- struct mutex send_mutex;
-
- /* GuC's FW specific send function */
- int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
-
- /* GuC's FW specific notify function */
- void (*notify)(struct intel_guc *guc);
-};
-
-struct intel_huc {
- /* Generic uC firmware management */
- struct intel_uc_fw fw;
-
- /* HuC-specific additions */
-};
-
-/* intel_uc.c */
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
+void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
-int intel_guc_sample_forcewake(struct intel_guc *guc);
-int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
-
-static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
-{
- return guc->send(guc, action, len);
-}
-
-static inline void intel_guc_notify(struct intel_guc *guc)
-{
- guc->notify(guc);
-}
-
-/* intel_guc_loader.c */
-int intel_guc_select_fw(struct intel_guc *guc);
-int intel_guc_init_hw(struct intel_guc *guc);
-int intel_guc_suspend(struct drm_i915_private *dev_priv);
-int intel_guc_resume(struct drm_i915_private *dev_priv);
-u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
-
-/* i915_guc_submission.c */
-int i915_guc_submission_init(struct drm_i915_private *dev_priv);
-int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
-int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
-void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
-void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
-struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-
-/* intel_guc_log.c */
-int intel_guc_log_create(struct intel_guc *guc);
-void intel_guc_log_destroy(struct intel_guc *guc);
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
-void i915_guc_log_register(struct drm_i915_private *dev_priv);
-void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
-
-static inline u32 guc_ggtt_offset(struct i915_vma *vma)
-{
- u32 offset = i915_ggtt_offset(vma);
- GEM_BUG_ON(offset < GUC_WOPCM_TOP);
- GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
- return offset;
-}
-
-/* intel_huc.c */
-void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_init_hw(struct intel_huc *huc);
-void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
new file mode 100644
index 000000000000..973888e94cba
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright © 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <drm/drm_print.h>
+
+#include "intel_uc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ *
+ * @dev_priv: device private
+ * @uc_fw: uC firmware
+ *
+ * Fetch uC firmware into GEM obj.
+ */
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
+ if (!uc_fw->path)
+ return;
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
+ intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
+
+ /* Check the size of the blob before examining buffer contents */
+ if (fw->size < sizeof(struct uc_css_header)) {
+ DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ fw->size, sizeof(struct uc_css_header));
+ err = -ENODATA;
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Firmware bits always start from header */
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ css->key_size_dw - css->exponent_size_dw) *
+ sizeof(u32);
+
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+ DRM_WARN("%s: Mismatched firmware header definition\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /* then, uCode */
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* Header and uCode will be loaded to WOPCM */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_WARN("%s: Firmware is too large to fit in WOPCM\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ err = -E2BIG;
+ goto fail;
+ }
+
+ /* now RSA */
+ if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
+ err = -ENOEXEC;
+ goto fail;
+ }
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (fw->size < size) {
+ DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
+ intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /*
+ * The GuC firmware image has the version number embedded at a
+ * well-known offset within the firmware blob; note that major / minor
+ * version are TWO bytes each (i.e. u16), although all pointers and
+ * offsets are defined in terms of bytes (u8).
+ */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ MISSING_CASE(uc_fw->type);
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+ if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+ DRM_NOTE("%s: Skipping firmware version check\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ release_firmware(fw);
+ return;
+
+fail:
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+ DRM_DEBUG_DRIVER("%s fw fetch %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ DRM_INFO("%s: Firmware can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+}
+
+/**
+ * intel_uc_fw_upload - load uC firmware using custom loader
+ *
+ * @uc_fw: uC firmware
+ * @loader: custom uC firmware loader function
+ *
+ * Loads uC firmware using custom loader and updates internal flags.
+ */
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
+ int (*xfer)(struct intel_uc_fw *uc_fw,
+ struct i915_vma *vma))
+{
+ struct i915_vma *vma;
+ int err;
+
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
+ if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return -EIO;
+
+ uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+
+ /* Pin object with firmware */
+ err = i915_gem_object_set_to_gtt_domain(uc_fw->obj, false);
+ if (err) {
+ DRM_DEBUG_DRIVER("%s fw set-domain err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ /* Call custom loader */
+ err = xfer(uc_fw, vma);
+
+ /*
+ * We keep the object pages for reuse during resume. But we can unpin it
+ * now that DMA has completed, so it doesn't continue to take up space.
+ */
+ i915_vma_unpin(vma);
+
+ if (err)
+ goto fail;
+
+ uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+
+ DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->path,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+
+ return 0;
+
+fail:
+ uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+
+ DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+
+ return err;
+}
+
+/**
+ * intel_uc_fw_fini - cleanup uC firmware
+ *
+ * @uc_fw: uC firmware
+ *
+ * Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = fetch_and_zero(&uc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
+/**
+ * intel_uc_fw_dump - dump information about uC firmware
+ * @uc_fw: uC firmware
+ * @p: the &drm_printer
+ *
+ * Pretty printer for uC firmware.
+ */
+void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p)
+{
+ drm_printf(p, "%s firmware: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ drm_printf(p, "\tstatus: fetch %s, load %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status),
+ intel_uc_fw_status_repr(uc_fw->load_status));
+ drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ drm_printf(p, "\theader: offset %u, size %u\n",
+ uc_fw->header_offset, uc_fw->header_size);
+ drm_printf(p, "\tuCode: offset %u, size %u\n",
+ uc_fw->ucode_offset, uc_fw->ucode_size);
+ drm_printf(p, "\tRSA: offset %u, size %u\n",
+ uc_fw->rsa_offset, uc_fw->rsa_size);
+}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
new file mode 100644
index 000000000000..132903669391
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_UC_FW_H_
+#define _INTEL_UC_FW_H_
+
+struct drm_printer;
+struct drm_i915_private;
+struct i915_vma;
+
+/* Home of GuC, HuC and DMC firmwares */
+#define INTEL_UC_FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
+
+enum intel_uc_fw_status {
+ INTEL_UC_FIRMWARE_FAIL = -1,
+ INTEL_UC_FIRMWARE_NONE = 0,
+ INTEL_UC_FIRMWARE_PENDING,
+ INTEL_UC_FIRMWARE_SUCCESS
+};
+
+enum intel_uc_fw_type {
+ INTEL_UC_FW_TYPE_GUC,
+ INTEL_UC_FW_TYPE_HUC
+};
+
+/*
+ * This structure encapsulates all the data needed during the process
+ * of fetching, caching, and loading the firmware image into the uC.
+ */
+struct intel_uc_fw {
+ const char *path;
+ size_t size;
+ struct drm_i915_gem_object *obj;
+ enum intel_uc_fw_status fetch_status;
+ enum intel_uc_fw_status load_status;
+
+ /*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+ u16 major_ver_wanted;
+ u16 minor_ver_wanted;
+ u16 major_ver_found;
+ u16 minor_ver_found;
+
+ enum intel_uc_fw_type type;
+ u32 header_size;
+ u32 header_offset;
+ u32 rsa_size;
+ u32 rsa_offset;
+ u32 ucode_size;
+ u32 ucode_offset;
+};
+
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
+ case INTEL_UC_FIRMWARE_NONE:
+ return "NONE";
+ case INTEL_UC_FIRMWARE_PENDING:
+ return "PENDING";
+ case INTEL_UC_FIRMWARE_SUCCESS:
+ return "SUCCESS";
+ }
+ return "<invalid>";
+}
+
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
+static inline
+void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
+{
+ uc_fw->path = NULL;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+ uc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
+ uc_fw->type = type;
+}
+
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw);
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
+ int (*xfer)(struct intel_uc_fw *uc_fw,
+ struct i915_vma *vma));
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
+void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 1d7b879cc68c..20e3c65c0999 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -436,7 +436,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
- i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
+ i915_modparams.enable_rc6 =
+ sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
intel_sanitize_gt_powersave(dev_priv);
@@ -490,6 +491,57 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
}
/**
+ * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
+ * @dev_priv: i915 device instance
+ *
+ * This function is a wrapper around intel_uncore_forcewake_get() to acquire
+ * the GT powerwell and in the process disable our debugging for the
+ * duration of userspace's bypass.
+ */
+void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->uncore.lock);
+ if (!dev_priv->uncore.user_forcewake.count++) {
+ intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+
+ /* Save and disable mmio debugging for the user bypass */
+ dev_priv->uncore.user_forcewake.saved_mmio_check =
+ dev_priv->uncore.unclaimed_mmio_check;
+ dev_priv->uncore.user_forcewake.saved_mmio_debug =
+ i915_modparams.mmio_debug;
+
+ dev_priv->uncore.unclaimed_mmio_check = 0;
+ i915_modparams.mmio_debug = 0;
+ }
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+/**
+ * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
+ * @dev_priv: i915 device instance
+ *
+ * This function complements intel_uncore_forcewake_user_get() and releases
+ * the GT powerwell taken on behalf of the userspace bypass.
+ */
+void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->uncore.lock);
+ if (!--dev_priv->uncore.user_forcewake.count) {
+ if (intel_uncore_unclaimed_mmio(dev_priv))
+ dev_info(dev_priv->drm.dev,
+ "Invalid mmio detected during user access\n");
+
+ dev_priv->uncore.unclaimed_mmio_check =
+ dev_priv->uncore.user_forcewake.saved_mmio_check;
+ i915_modparams.mmio_debug =
+ dev_priv->uncore.user_forcewake.saved_mmio_debug;
+
+ intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ }
+ spin_unlock_irq(&dev_priv->uncore.lock);
+}
+
+/**
* intel_uncore_forcewake_get__locked - grab forcewake domain references
* @dev_priv: i915 device instance
* @fw_domains: forcewake domains to get reference on
@@ -574,7 +626,23 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- WARN_ON(dev_priv->uncore.fw_domains_active);
+ WARN(dev_priv->uncore.fw_domains_active,
+ "Expected all fw_domains to be inactive, but %08x are still on\n",
+ dev_priv->uncore.fw_domains_active);
+}
+
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ assert_rpm_wakelock_held(dev_priv);
+
+ fw_domains &= dev_priv->uncore.fw_domains;
+ WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+ "Expected %08x fw_domains to be active, but %08x are off\n",
+ fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -790,7 +858,8 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
"Unclaimed %s register 0x%x\n",
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
- i915.mmio_debug--; /* Only report the first N failures */
+ /* Only report the first N failures */
+ i915_modparams.mmio_debug--;
}
static inline void
@@ -799,7 +868,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const bool read,
const bool before)
{
- if (likely(!i915.mmio_debug))
+ if (likely(!i915_modparams.mmio_debug))
return;
__unclaimed_reg_debug(dev_priv, reg, read, before);
@@ -1241,102 +1310,104 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_reset(dev_priv, false);
}
-#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
-
-static const struct register_whitelist {
- i915_reg_t offset_ldw, offset_udw;
- uint32_t size;
- /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
- uint32_t gen_bitmask;
-} whitelist[] = {
- { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
- .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
- .size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
-};
+static const struct reg_whitelist {
+ i915_reg_t offset_ldw;
+ i915_reg_t offset_udw;
+ u16 gen_mask;
+ u8 size;
+} reg_read_whitelist[] = { {
+ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
+ .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
+ .gen_mask = INTEL_GEN_MASK(4, 10),
+ .size = 8
+} };
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
- struct register_whitelist const *entry = whitelist;
- unsigned size;
- i915_reg_t offset_ldw, offset_udw;
- int i, ret = 0;
-
- for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask))
+ struct reg_whitelist const *entry;
+ unsigned int flags;
+ int remain;
+ int ret = 0;
+
+ entry = reg_read_whitelist;
+ remain = ARRAY_SIZE(reg_read_whitelist);
+ while (remain) {
+ u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
+
+ GEM_BUG_ON(!is_power_of_2(entry->size));
+ GEM_BUG_ON(entry->size > 8);
+ GEM_BUG_ON(entry_offset & (entry->size - 1));
+
+ if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
+ entry_offset == (reg->offset & -entry->size))
break;
+ entry++;
+ remain--;
}
- if (i == ARRAY_SIZE(whitelist))
+ if (!remain)
return -EINVAL;
- /* We use the low bits to encode extra flags as the register should
- * be naturally aligned (and those that are not so aligned merely
- * limit the available flags for that register).
- */
- offset_ldw = entry->offset_ldw;
- offset_udw = entry->offset_udw;
- size = entry->size;
- size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw);
+ flags = reg->offset & (entry->size - 1);
intel_runtime_pm_get(dev_priv);
-
- switch (size) {
- case 8 | 1:
- reg->val = I915_READ64_2x32(offset_ldw, offset_udw);
- break;
- case 8:
- reg->val = I915_READ64(offset_ldw);
- break;
- case 4:
- reg->val = I915_READ(offset_ldw);
- break;
- case 2:
- reg->val = I915_READ16(offset_ldw);
- break;
- case 1:
- reg->val = I915_READ8(offset_ldw);
- break;
- default:
+ if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
+ reg->val = I915_READ64_2x32(entry->offset_ldw,
+ entry->offset_udw);
+ else if (entry->size == 8 && flags == 0)
+ reg->val = I915_READ64(entry->offset_ldw);
+ else if (entry->size == 4 && flags == 0)
+ reg->val = I915_READ(entry->offset_ldw);
+ else if (entry->size == 2 && flags == 0)
+ reg->val = I915_READ16(entry->offset_ldw);
+ else if (entry->size == 1 && flags == 0)
+ reg->val = I915_READ8(entry->offset_ldw);
+ else
ret = -EINVAL;
- goto out;
- }
-
-out:
intel_runtime_pm_put(dev_priv);
+
return ret;
}
-static void gen3_stop_rings(struct drm_i915_private *dev_priv)
+static void gen3_stop_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+ const i915_reg_t mode = RING_MI_MODE(base);
+
+ I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+ if (intel_wait_for_register_fw(dev_priv,
+ mode,
+ MODE_IDLE,
+ MODE_IDLE,
+ 500))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
+ engine->name);
+
+ I915_WRITE_FW(RING_CTL(base), 0);
+ I915_WRITE_FW(RING_HEAD(base), 0);
+ I915_WRITE_FW(RING_TAIL(base), 0);
+
+ /* Check acts as a post */
+ if (I915_READ_FW(RING_HEAD(base)) != 0)
+ DRM_DEBUG_DRIVER("%s: ring head not parked\n",
+ engine->name);
+}
+
+static void i915_stop_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- const u32 base = engine->mmio_base;
- const i915_reg_t mode = RING_MI_MODE(base);
-
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register_fw(dev_priv,
- mode,
- MODE_IDLE,
- MODE_IDLE,
- 500))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
- engine->name);
-
- I915_WRITE_FW(RING_CTL(base), 0);
- I915_WRITE_FW(RING_HEAD(base), 0);
- I915_WRITE_FW(RING_TAIL(base), 0);
-
- /* Check acts as a post */
- if (I915_READ_FW(RING_HEAD(base)) != 0)
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
- engine->name);
- }
+ if (INTEL_GEN(dev_priv) < 3)
+ return;
+
+ for_each_engine_masked(engine, dev_priv, engine_mask, id)
+ gen3_stop_engine(engine);
}
static bool i915_reset_complete(struct pci_dev *pdev)
@@ -1371,9 +1442,6 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- /* Stop engines before we reset; see g4x_do_reset() below for why. */
- gen3_stop_rings(dev_priv);
-
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(pdev), 500);
}
@@ -1388,12 +1456,6 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- /* We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk).
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
- */
- gen3_stop_rings(dev_priv);
-
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(g4x_reset_complete(pdev), 500);
@@ -1662,7 +1724,7 @@ typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{
- if (!i915.reset)
+ if (!i915_modparams.reset)
return NULL;
if (INTEL_INFO(dev_priv)->gen >= 8)
@@ -1683,22 +1745,34 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- reset_func reset;
+ reset_func reset = intel_get_gpu_reset(dev_priv);
int retry;
int ret;
might_sleep();
- reset = intel_get_gpu_reset(dev_priv);
- if (reset == NULL)
- return -ENODEV;
-
/* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
for (retry = 0; retry < 3; retry++) {
- ret = reset(dev_priv, engine_mask);
+
+ /* We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ i915_stop_engines(dev_priv, engine_mask);
+
+ ret = -ENODEV;
+ if (reset)
+ ret = reset(dev_priv, engine_mask);
if (ret != -ETIMEDOUT)
break;
@@ -1722,7 +1796,7 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
{
return (dev_priv->info.has_reset_engine &&
!dev_priv->guc.execbuf_client &&
- i915.reset >= 2);
+ i915_modparams.reset >= 2);
}
int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1747,7 +1821,7 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
bool
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
{
- if (unlikely(i915.mmio_debug ||
+ if (unlikely(i915_modparams.mmio_debug ||
dev_priv->uncore.unclaimed_mmio_check <= 0))
return false;
@@ -1755,7 +1829,7 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
- i915.mmio_debug++;
+ i915_modparams.mmio_debug++;
dev_priv->uncore.unclaimed_mmio_check--;
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 5f90278da461..582771251b57 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -25,6 +25,12 @@
#ifndef __INTEL_UNCORE_H__
#define __INTEL_UNCORE_H__
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/hrtimer.h>
+
+#include "i915_reg.h"
+
struct drm_i915_private;
enum forcewake_domain_id {
@@ -102,6 +108,13 @@ struct intel_uncore {
i915_reg_t reg_ack;
} fw_domain[FW_DOMAIN_ID_COUNT];
+ struct {
+ unsigned int count;
+
+ int saved_mmio_check;
+ int saved_mmio_debug;
+ } user_forcewake;
+
int unclaimed_mmio_check;
};
@@ -124,6 +137,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains
@@ -144,6 +159,9 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
+void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
+void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+
int intel_wait_for_register(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u32 mask,
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index a92e7762f596..f225c288a121 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -149,16 +149,19 @@ struct bdb_general_features {
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
- u8 rsvd7:1;
+ u8 underscan_vga_timings:1;
u8 display_clock_mode:1;
- u8 rsvd8:1; /* finish byte */
+ u8 vbios_hotplug_support:1;
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:1;
+ u8 rotate_180:1; /* 181 */
u8 fdi_rx_polarity_inverted:1;
- u8 rsvd10:4; /* finish byte */
+ u8 vbios_extended_mode:1; /* 160 */
+ u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */
+ u8 panel_best_fit_timing:1; /* 160 */
+ u8 ignore_strap_state:1; /* 160 */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -167,9 +170,10 @@ struct bdb_general_features {
u8 int_crt_support:1;
u8 int_tv_support:1;
u8 int_efp_support:1;
- u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_enable:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
- u8 rsvd11:3; /* finish byte */
+ u8 dp_ssc_dongle_supported:1;
+ u8 rsvd11:2; /* finish byte */
} __packed;
/* pre-915 */
@@ -206,6 +210,56 @@ struct bdb_general_features {
#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
+#define DEVICE_TYPE_eDP 0x78C6
+
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the
+ * system, the other bits may or may not be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_DUAL_CHANNEL | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
#define DEVICE_CFG_NONE 0x00
#define DEVICE_CFG_12BIT_DVOB 0x01
#define DEVICE_CFG_12BIT_DVOC 0x02
@@ -226,77 +280,134 @@ struct bdb_general_features {
#define DEVICE_WIRE_DVOB_MASTER 0x0d
#define DEVICE_WIRE_DVOC_MASTER 0x0e
+/* dvo_port pre BDB 155 */
#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
+/* dvo_port BDB 155+ */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+#define DVO_PORT_DPE 11 /* 193 */
+#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_MIPIA 21 /* 171 */
+#define DVO_PORT_MIPIB 22 /* 171 */
+#define DVO_PORT_MIPIC 23 /* 171 */
+#define DVO_PORT_MIPID 24 /* 171 */
+
+#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
+
+/* DDC Bus DDI Type 155+ */
+enum vbt_gmbus_ddi {
+ DDC_BUS_DDI_B = 0x1,
+ DDC_BUS_DDI_C,
+ DDC_BUS_DDI_D,
+ DDC_BUS_DDI_F,
+};
+
/*
- * We used to keep this struct but without any version control. We should avoid
- * using it in the future, but it should be safe to keep using it in the old
- * code. Do not change; we rely on its size.
+ * The child device config, aka the display device data structure, provides a
+ * description of a port and its configuration on the platform.
+ *
+ * The child device config size has been increased, and fields have been added
+ * and their meaning has changed over time. Care must be taken when accessing
+ * basically any of the fields to ensure the correct interpretation for the BDB
+ * version in question.
+ *
+ * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
+ * space for the full structure below, and initialize the tail not actually
+ * present in VBT to zeros. Accessing those fields is fine, as long as the
+ * default zero is taken into account, again according to the BDB version.
+ *
+ * BDB versions 155 and below are considered legacy, and version 155 seems to be
+ * a baseline for some of the VBT documentation. When adding new fields, please
+ * include the BDB version when the field was added, if it's above that.
*/
-struct old_child_dev_config {
+struct child_device_config {
u16 handle;
- u16 device_type;
- u8 device_id[10]; /* ascii string */
- u16 addin_offset;
- u8 dvo_port; /* See Device_PORT_* above */
- u8 i2c_pin;
- u8 slave_addr;
- u8 ddc_pin;
- u16 edid_ptr;
- u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 dvo2_port;
- u8 i2c2_pin;
- u8 slave2_addr;
- u8 ddc2_pin;
- u8 capabilities;
- u8 dvo_wiring;/* See DEVICE_WIRE_* above */
- u8 dvo2_wiring;
- u16 extended_type;
- u8 dvo_function;
-} __packed;
+ u16 device_type; /* See DEVICE_TYPE_* above */
+
+ union {
+ u8 device_id[10]; /* ascii string */
+ struct {
+ u8 i2c_speed;
+ u8 dp_onboard_redriver; /* 158 */
+ u8 dp_ondock_redriver; /* 158 */
+ u8 hdmi_level_shifter_value:4; /* 169 */
+ u8 hdmi_max_data_rate:4; /* 204 */
+ u16 dtd_buf_ptr; /* 161 */
+ u8 edidless_efp:1; /* 161 */
+ u8 compression_enable:1; /* 198 */
+ u8 compression_method:1; /* 198 */
+ u8 ganged_edp:1; /* 202 */
+ u8 reserved0:4;
+ u8 compression_structure_index:4; /* 198 */
+ u8 reserved1:4;
+ u8 slave_port; /* 202 */
+ u8 reserved2;
+ } __packed;
+ } __packed;
-/* This one contains field offsets that are known to be common for all BDB
- * versions. Notice that the meaning of the contents contents may still change,
- * but at least the offsets are consistent. */
-
-struct common_child_dev_config {
- u16 handle;
- u16 device_type;
- u8 not_common1[12];
- u8 dvo_port;
- u8 not_common2[2];
+ u16 addin_offset;
+ u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 efp_routed:1;
- u8 lane_reversal:1;
- u8 lspcon:1;
- u8 iboost:1;
- u8 hpd_invert:1;
- u8 flag_reserved:3;
- u8 hdmi_support:1;
- u8 dp_support:1;
- u8 tmds_support:1;
- u8 support_reserved:5;
- u8 aux_channel;
- u8 not_common3[11];
- u8 iboost_level;
-} __packed;
+ union {
+ struct {
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ } __packed;
+ struct {
+ u8 efp_routed:1; /* 158 */
+ u8 lane_reversal:1; /* 184 */
+ u8 lspcon:1; /* 192 */
+ u8 iboost:1; /* 196 */
+ u8 hpd_invert:1; /* 196 */
+ u8 flag_reserved:3;
+ u8 hdmi_support:1; /* 158 */
+ u8 dp_support:1; /* 158 */
+ u8 tmds_support:1; /* 158 */
+ u8 support_reserved:5;
+ u8 aux_channel;
+ u8 dongle_detect;
+ } __packed;
+ } __packed;
+
+ u8 pipe_cap:2;
+ u8 sdvo_stall:1; /* 158 */
+ u8 hpd_status:2;
+ u8 integrated_encoder:1;
+ u8 capabilities_reserved:2;
+ u8 dvo_wiring; /* See DEVICE_WIRE_* above */
+
+ union {
+ u8 dvo2_wiring;
+ u8 mipi_bridge_type; /* 171 */
+ } __packed;
-/* This field changes depending on the BDB version, so the most reliable way to
- * read it is by checking the BDB version and reading the raw pointer. */
-union child_device_config {
- /* This one is safe to be used anywhere, but the code should still check
- * the BDB version. */
- u8 raw[33];
- /* This one should only be kept for legacy code. */
- struct old_child_dev_config old;
- /* This one should also be safe to use anywhere, even without version
- * checks. */
- struct common_child_dev_config common;
+ u16 extended_type;
+ u8 dvo_function;
+ u8 dp_usb_type_c:1; /* 195 */
+ u8 flags2_reserved:7; /* 195 */
+ u8 dp_gpio_index; /* 195 */
+ u16 dp_gpio_pin_num; /* 195 */
+ u8 dp_iboost_level:4; /* 196 */
+ u8 hdmi_iboost_level:4; /* 196 */
} __packed;
struct bdb_general_definitions {
@@ -585,23 +696,38 @@ struct bdb_driver_features {
#define EDP_VSWING_1_2V 3
-struct edp_link_params {
+struct edp_fast_link_params {
u8 rate:4;
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
} __packed;
+struct edp_pwm_delays {
+ u16 pwm_on_to_backlight_enable;
+ u16 backlight_disable_to_pwm_off;
+} __packed;
+
+struct edp_full_link_params {
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
- struct edp_link_params link_params[16];
+ struct edp_fast_link_params fast_link_params[16];
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature;
- u16 edp_t3_optimization;
- u64 edp_vswing_preemph; /* v173 */
+ u16 edp_s3d_feature; /* 162 */
+ u16 edp_t3_optimization; /* 165 */
+ u64 edp_vswing_preemph; /* 173 */
+ u16 fast_link_training; /* 182 */
+ u16 dpcd_600h_write_required; /* 185 */
+ struct edp_pwm_delays pwm_delays[16]; /* 186 */
+ u16 full_link_params_provided; /* 199 */
+ struct edp_full_link_params full_link_params[16]; /* 199 */
} __packed;
struct psr_table {
@@ -745,81 +871,6 @@ struct bdb_psr {
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
-/* Add the device class for LFP, TV, HDMI */
-#define DEVICE_TYPE_INT_LFP 0x1022
-#define DEVICE_TYPE_INT_TV 0x1009
-#define DEVICE_TYPE_HDMI 0x60D2
-#define DEVICE_TYPE_DP 0x68C6
-#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
-#define DEVICE_TYPE_eDP 0x78C6
-
-#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
-#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
-#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
-#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
-#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
-#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
-#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
-#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
-#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
-#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
-#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
-#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
-#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
-#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
-#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
-
-/*
- * Bits we care about when checking for DEVICE_TYPE_eDP
- * Depending on the system, the other bits may or may not
- * be set for eDP outputs.
- */
-#define DEVICE_TYPE_eDP_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_DUAL_CHANNEL | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_DIGITAL_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-/* define the DVO port for HDMI output type */
-#define DVO_B 1
-#define DVO_C 2
-#define DVO_D 3
-
-/* Possible values for the "DVO Port" field for versions >= 155: */
-#define DVO_PORT_HDMIA 0
-#define DVO_PORT_HDMIB 1
-#define DVO_PORT_HDMIC 2
-#define DVO_PORT_HDMID 3
-#define DVO_PORT_LVDS 4
-#define DVO_PORT_TV 5
-#define DVO_PORT_CRT 6
-#define DVO_PORT_DPB 7
-#define DVO_PORT_DPC 8
-#define DVO_PORT_DPD 9
-#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11
-#define DVO_PORT_HDMIE 12
-#define DVO_PORT_MIPIA 21
-#define DVO_PORT_MIPIB 22
-#define DVO_PORT_MIPIC 23
-#define DVO_PORT_MIPID 24
-
/* Block 52 contains MIPI configuration block
* 6 * bdb_mipi_config, followed by 6 pps data block
* block below
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index c5c7e8efbdd3..a2632df39173 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -37,8 +37,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
kfree(pages);
}
-static struct sg_table *
-huge_get_pages(struct drm_i915_gem_object *obj)
+static int huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
@@ -49,11 +48,11 @@ huge_get_pages(struct drm_i915_gem_object *obj)
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
if (sg_alloc_table(pages, npages, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
sg = pages->sgl;
@@ -81,11 +80,14 @@ huge_get_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, pages))
goto err;
- return pages;
+ __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
+
+ return 0;
err:
huge_free_pages(obj, pages);
- return ERR_PTR(-ENOMEM);
+
+ return -ENOMEM;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
new file mode 100644
index 000000000000..5cc8101bb2b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -0,0 +1,1734 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include <linux/prime_numbers.h>
+
+#include "mock_drm.h"
+
+static const unsigned int page_sizes[] = {
+ I915_GTT_PAGE_SIZE_2M,
+ I915_GTT_PAGE_SIZE_64K,
+ I915_GTT_PAGE_SIZE_4K,
+};
+
+static unsigned int get_largest_page_size(struct drm_i915_private *i915,
+ u64 rem)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
+ return page_size;
+ }
+
+ return 0;
+}
+
+static void huge_pages_free_pages(struct sg_table *st)
+{
+ struct scatterlist *sg;
+
+ for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+ if (sg_page(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+ }
+
+ sg_free_table(st);
+ kfree(st);
+}
+
+static int get_huge_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+ unsigned int page_mask = obj->mm.page_mask;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ u64 rem;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ rem = obj->base.size;
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+
+ /*
+ * Our goal here is simple, we want to greedily fill the object from
+ * largest to smallest page-size, while ensuring that we use *every*
+ * page-size as per the given page-mask.
+ */
+ do {
+ unsigned int bit = ilog2(page_mask);
+ unsigned int page_size = BIT(bit);
+ int order = get_order(page_size);
+
+ do {
+ struct page *page;
+
+ GEM_BUG_ON(order >= MAX_ORDER);
+ page = alloc_pages(GFP | __GFP_ZERO, order);
+ if (!page)
+ goto err;
+
+ sg_set_page(sg, page, page_size, 0);
+ sg_page_sizes |= page_size;
+ st->nents++;
+
+ rem -= page_size;
+ if (!rem) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = __sg_next(sg);
+ } while ((rem - ((page_size-1) & page_mask)) >= page_size);
+
+ page_mask &= (page_size-1);
+ } while (page_mask);
+
+ if (i915_gem_gtt_prepare_pages(obj, st))
+ goto err;
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err:
+ sg_set_page(sg, NULL, 0, 0);
+ sg_mark_end(sg);
+ huge_pages_free_pages(st);
+
+ return -ENOMEM;
+}
+
+static void put_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ huge_pages_free_pages(pages);
+
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops huge_page_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = get_huge_pages,
+ .put_pages = put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+huge_pages_object(struct drm_i915_private *i915,
+ u64 size,
+ unsigned int page_mask)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &huge_page_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ obj->mm.page_mask = page_mask;
+
+ return obj;
+}
+
+static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ const u64 max_len = rounddown_pow_of_two(UINT_MAX);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ u64 rem;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ /* Use optimal page sized chunks to fill in the sg table */
+ rem = obj->base.size;
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ do {
+ unsigned int page_size = get_largest_page_size(i915, rem);
+ unsigned int len = min(page_size * div_u64(rem, page_size),
+ max_len);
+
+ GEM_BUG_ON(!page_size);
+
+ sg->offset = 0;
+ sg->length = len;
+ sg_dma_len(sg) = len;
+ sg_dma_address(sg) = page_size;
+
+ sg_page_sizes |= len;
+
+ st->nents++;
+
+ rem -= len;
+ if (!rem) {
+ sg_mark_end(sg);
+ break;
+ }
+
+ sg = sg_next(sg);
+ } while (1);
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+}
+
+static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int page_size;
+
+ st = kmalloc(sizeof(*st), GFP);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, 1, GFP)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ sg = st->sgl;
+ st->nents = 1;
+
+ page_size = get_largest_page_size(i915, obj->base.size);
+ GEM_BUG_ON(!page_size);
+
+ sg->offset = 0;
+ sg->length = obj->base.size;
+ sg_dma_len(sg) = obj->base.size;
+ sg_dma_address(sg) = page_size;
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
+#undef GFP
+}
+
+static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ fake_free_huge_pages(obj, pages);
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_huge_pages,
+ .put_pages = fake_put_huge_pages,
+};
+
+static const struct drm_i915_gem_object_ops fake_ops_single = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_huge_pages_single,
+ .put_pages = fake_put_huge_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+ if (size >> PAGE_SHIFT > UINT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+
+ if (single)
+ i915_gem_object_init(obj, &fake_ops_single);
+ else
+ i915_gem_object_init(obj, &fake_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ return obj;
+}
+
+static int igt_check_page_sizes(struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj = vma->obj;
+ int err = 0;
+
+ if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
+ pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
+ vma->page_sizes.sg & ~supported, supported);
+ err = -EINVAL;
+ }
+
+ if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
+ pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
+ vma->page_sizes.gtt & ~supported, supported);
+ err = -EINVAL;
+ }
+
+ if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
+ pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
+ vma->page_sizes.phys, obj->mm.page_sizes.phys);
+ err = -EINVAL;
+ }
+
+ if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
+ pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
+ vma->page_sizes.sg, obj->mm.page_sizes.sg);
+ err = -EINVAL;
+ }
+
+ if (obj->mm.page_sizes.gtt) {
+ pr_err("obj->page_sizes.gtt(%u) should never be set\n",
+ obj->mm.page_sizes.gtt);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int igt_mock_exhaust_device_supported_pages(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int i, j, single;
+ int err;
+
+ /*
+ * Sanity check creating objects with every valid page support
+ * combination for our mock device.
+ */
+
+ for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
+ unsigned int combination = 0;
+
+ for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
+ if (i & BIT(j))
+ combination |= page_sizes[j];
+ }
+
+ mkwrite_device_info(i915)->page_sizes = combination;
+
+ for (single = 0; single <= 1; ++single) {
+ obj = fake_huge_pages_object(i915, combination, !!single);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_device;
+ }
+
+ if (obj->base.size != combination) {
+ pr_err("obj->base.size=%zu, expected=%u\n",
+ obj->base.size, combination);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.sg != combination) {
+ pr_err("page_sizes.sg=%u, expected=%u\n",
+ vma->page_sizes.sg, combination);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_put(obj);
+
+ if (err)
+ goto out_device;
+ }
+ }
+
+ goto out_device;
+
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_device:
+ mkwrite_device_info(i915)->page_sizes = saved_mask;
+
+ return err;
+}
+
+static int igt_mock_ppgtt_misaligned_dma(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct drm_i915_gem_object *obj;
+ int bit;
+ int err;
+
+ /*
+ * Sanity check dma misalignment for huge pages -- the dma addresses we
+ * insert into the paging structures need to always respect the page
+ * size alignment.
+ */
+
+ bit = ilog2(I915_GTT_PAGE_SIZE_64K);
+
+ for_each_set_bit_from(bit, &supported,
+ ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ IGT_TIMEOUT(end_time);
+ unsigned int page_size = BIT(bit);
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int offset;
+ unsigned int size =
+ round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
+ struct i915_vma *vma;
+
+ obj = fake_huge_pages_object(i915, size, true);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != size) {
+ pr_err("obj->base.size=%zu, expected=%u\n",
+ obj->base.size, size);
+ err = -EINVAL;
+ goto out_put;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ /* Force the page size for this object */
+ obj->mm.page_sizes.sg = page_size;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("page_sizes.gtt=%u, expected %u\n",
+ vma->page_sizes.gtt, page_size);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ /*
+ * Try all the other valid offsets until the next
+ * boundary -- should always fall back to using 4K
+ * pages.
+ */
+ for (offset = 4096; offset < page_size; offset += 4096) {
+ err = i915_vma_unbind(vma);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags | offset);
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+
+ if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
+ pr_err("page_sizes.gtt=%u, expected %lu\n",
+ vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
+ err = -EINVAL;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (err) {
+ i915_vma_close(vma);
+ goto out_unpin;
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out at offset %x with page-size %x\n",
+ __func__, offset, page_size))
+ break;
+ }
+
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static void close_object_list(struct list_head *objects,
+ struct i915_hw_ppgtt *ppgtt)
+{
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (!IS_ERR(vma))
+ i915_vma_close(vma);
+
+ list_del(&obj->st_link);
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+}
+
+static int igt_mock_ppgtt_huge_fill(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+ unsigned long page_num;
+ bool single = false;
+ LIST_HEAD(objects);
+ IGT_TIMEOUT(end_time);
+ int err = -ENODEV;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ struct drm_i915_gem_object *obj;
+ u64 size = page_num << PAGE_SHIFT;
+ struct i915_vma *vma;
+ unsigned int expected_gtt = 0;
+ int i;
+
+ obj = fake_huge_pages_object(i915, size, single);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ if (obj->base.size != size) {
+ pr_err("obj->base.size=%zd, expected=%llu\n",
+ obj->base.size, size);
+ i915_gem_object_put(obj);
+ err = -EINVAL;
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ break;
+
+ err = igt_check_page_sizes(vma);
+ if (err) {
+ i915_vma_unpin(vma);
+ break;
+ }
+
+ /*
+ * Figure out the expected gtt page size knowing that we go from
+ * largest to smallest page size sg chunks, and that we align to
+ * the largest page size.
+ */
+ for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
+ unsigned int page_size = page_sizes[i];
+
+ if (HAS_PAGE_SIZES(i915, page_size) &&
+ size >= page_size) {
+ expected_gtt |= page_size;
+ size &= page_size-1;
+ }
+ }
+
+ GEM_BUG_ON(!expected_gtt);
+ GEM_BUG_ON(size);
+
+ if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
+ expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
+
+ i915_vma_unpin(vma);
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.start(%llx) not aligned to 2M\n",
+ vma->node.start);
+ err = -EINVAL;
+ break;
+ }
+
+ if (!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.size(%llx) not aligned to 2M\n",
+ vma->node.size);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (vma->page_sizes.gtt != expected_gtt) {
+ pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
+ vma->page_sizes.gtt, expected_gtt,
+ obj->base.size, yesno(!!single));
+ err = -EINVAL;
+ break;
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out at size %zd\n",
+ __func__, obj->base.size))
+ break;
+
+ single = !single;
+ }
+
+ close_object_list(&objects, ppgtt);
+
+ if (err == -ENOMEM || err == -ENOSPC)
+ err = 0;
+
+ return err;
+}
+
+static int igt_mock_ppgtt_64K(void *arg)
+{
+ struct i915_hw_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->base.i915;
+ struct drm_i915_gem_object *obj;
+ const struct object_info {
+ unsigned int size;
+ unsigned int gtt;
+ unsigned int offset;
+ } objects[] = {
+ /* Cases with forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_64K + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_64K - SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M - SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_4K,
+ .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M + SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ {
+ .size = SZ_2M - SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_64K,
+ .offset = 0,
+ },
+ /* Try without any forced padding/alignment */
+ {
+ .size = SZ_64K,
+ .offset = SZ_2M,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ {
+ .size = SZ_128K,
+ .offset = SZ_2M - SZ_64K,
+ .gtt = I915_GTT_PAGE_SIZE_4K,
+ },
+ };
+ struct i915_vma *vma;
+ int i, single;
+ int err;
+
+ /*
+ * Sanity check some of the trickiness with 64K pages -- either we can
+ * safely mark the whole page-table(2M block) as 64K, or we have to
+ * always fallback to 4K.
+ */
+
+ if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(objects); ++i) {
+ unsigned int size = objects[i].size;
+ unsigned int expected_gtt = objects[i].gtt;
+ unsigned int offset = objects[i].offset;
+ unsigned int flags = PIN_USER;
+
+ for (single = 0; single <= 1; single++) {
+ obj = fake_huge_pages_object(i915, size, !!single);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_object_put;
+
+ /*
+ * Disable 2M pages -- We only want to use 64K/4K pages
+ * for this test.
+ */
+ obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object_unpin;
+ }
+
+ if (offset)
+ flags |= PIN_OFFSET_FIXED | offset;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_vma_close;
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
+ if (!IS_ALIGNED(vma->node.start,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.start(%llx) not aligned to 2M\n",
+ vma->node.start);
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+
+ if (!IS_ALIGNED(vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)) {
+ pr_err("node.size(%llx) not aligned to 2M\n",
+ vma->node.size);
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+ }
+
+ if (vma->page_sizes.gtt != expected_gtt) {
+ pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
+ vma->page_sizes.gtt, expected_gtt, i,
+ yesno(!!single));
+ err = -EINVAL;
+ goto out_vma_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ return 0;
+
+out_vma_unpin:
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+out_object_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_object_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static struct i915_vma *
+gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
+{
+ struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned int count = vma->size >> PAGE_SHIFT;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *batch;
+ unsigned int size;
+ u32 *cmd;
+ int n;
+ int err;
+
+ size = (1 + 4 * count) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = val;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? 1 << 22 : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = val;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = offset;
+ *cmd++ = val;
+ }
+
+ offset += PAGE_SIZE;
+ }
+
+ *cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ batch = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err;
+ }
+
+ err = i915_vma_pin(batch, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return batch;
+
+err:
+ i915_gem_object_put(obj);
+
+ return ERR_PTR(err);
+}
+
+static int gpu_write(struct i915_vma *vma,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 dword,
+ u32 value)
+{
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *batch;
+ int flags = 0;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+
+ err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+ if (err)
+ return err;
+
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ batch = gpu_write_dw(vma, dword * sizeof(u32), value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_request;
+ }
+
+ i915_vma_move_to_active(batch, rq, 0);
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto err_request;
+
+ err = i915_switch_context(rq);
+ if (err)
+ goto err_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
+ reservation_object_lock(vma->resv, NULL);
+ reservation_object_add_excl_fence(vma->resv, &rq->fence);
+ reservation_object_unlock(vma->resv);
+
+err_request:
+ __i915_add_request(rq, err == 0);
+
+ return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned int needs_flush;
+ unsigned long n;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+
+ if (needs_flush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(ptr, PAGE_SIZE);
+
+ if (ptr[dword] != val) {
+ pr_err("n=%lu ptr[%u]=%u, val=%u\n",
+ n, dword, ptr[dword], val);
+ kunmap_atomic(ptr);
+ err = -EINVAL;
+ break;
+ }
+
+ kunmap_atomic(ptr);
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+
+ return err;
+}
+
+static int igt_write_huge(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct intel_engine_cs *engine;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ unsigned int max_page_size;
+ unsigned int id;
+ u64 max;
+ u64 num;
+ u64 size;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ size = obj->base.size;
+ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+
+ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max = div_u64((vm->total - size), max_page_size);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+
+ if (!intel_engine_can_store_dword(engine)) {
+ pr_info("store-dword-imm not supported on engine=%u\n",
+ id);
+ continue;
+ }
+
+ /*
+ * Try various offsets until we timeout -- we want to avoid
+ * issues hidden by effectively always using offset = 0.
+ */
+ for_each_prime_number_from(num, 0, max) {
+ u64 offset = num * max_page_size;
+ u32 dword;
+
+ err = i915_vma_unbind(vma);
+ if (err)
+ goto out_vma_close;
+
+ err = i915_vma_pin(vma, size, max_page_size, flags | offset);
+ if (err) {
+ /*
+ * The ggtt may have some pages reserved so
+ * refrain from erroring out.
+ */
+ if (err == -ENOSPC && i915_is_ggtt(vm)) {
+ err = 0;
+ continue;
+ }
+
+ goto out_vma_close;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_vma_unpin;
+
+ dword = offset_in_page(num) / 4;
+
+ err = gpu_write(vma, ctx, engine, dword, num + 1);
+ if (err) {
+ pr_err("gpu-write failed at offset=%llx", offset);
+ goto out_vma_unpin;
+ }
+
+ err = cpu_check(obj, dword, num + 1);
+ if (err) {
+ pr_err("cpu-check failed at offset=%llx", offset);
+ goto out_vma_unpin;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (num > 0 &&
+ igt_timeout(end_time,
+ "%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+ __func__, id, offset, max_page_size))
+ break;
+ }
+ }
+
+out_vma_unpin:
+ if (i915_vma_is_pinned(vma))
+ i915_vma_unpin(vma);
+out_vma_close:
+ i915_vma_close(vma);
+
+ return err;
+}
+
+static int igt_ppgtt_exhaust_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ static unsigned int pages[ARRAY_SIZE(page_sizes)];
+ struct drm_i915_gem_object *obj;
+ unsigned int size_mask;
+ unsigned int page_mask;
+ int n, i;
+ int err = -ENODEV;
+
+ /*
+ * Sanity check creating objects with a varying mix of page sizes --
+ * ensuring that our writes lands in the right place.
+ */
+
+ n = 0;
+ for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
+ pages[n++] = BIT(i);
+
+ for (size_mask = 2; size_mask < BIT(n); size_mask++) {
+ unsigned int size = 0;
+
+ for (i = 0; i < n; i++) {
+ if (size_mask & BIT(i))
+ size |= pages[i];
+ }
+
+ /*
+ * For our page mask we want to enumerate all the page-size
+ * combinations which will fit into our chosen object size.
+ */
+ for (page_mask = 2; page_mask <= size_mask; page_mask++) {
+ unsigned int page_sizes = 0;
+
+ for (i = 0; i < n; i++) {
+ if (page_mask & BIT(i))
+ page_sizes |= pages[i];
+ }
+
+ /*
+ * Ensure that we can actually fill the given object
+ * with our chosen page mask.
+ */
+ if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
+ continue;
+
+ obj = huge_pages_object(i915, size, page_sizes);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_device;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+
+ if (err == -ENOMEM) {
+ pr_info("unable to get pages, size=%u, pages=%u\n",
+ size, page_sizes);
+ err = 0;
+ break;
+ }
+
+ pr_err("pin_pages failed, size=%u, pages=%u\n",
+ size_mask, page_mask);
+
+ goto out_device;
+ }
+
+ /* Force the page-size for the gtt insertion */
+ obj->mm.page_sizes.sg = page_sizes;
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("exhaust write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ goto out_device;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+out_device:
+ mkwrite_device_info(i915)->page_sizes = supported;
+
+ return err;
+}
+
+static int igt_ppgtt_internal_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ static const unsigned int sizes[] = {
+ SZ_64K,
+ SZ_128K,
+ SZ_256K,
+ SZ_512K,
+ SZ_1M,
+ SZ_2M,
+ };
+ int i;
+ int err;
+
+ /*
+ * Sanity check that the HW uses huge pages correctly through internal
+ * -- ensure that our writes land in the right place.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int size = sizes[i];
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+ pr_info("internal unable to allocate huge-page(s) with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("internal write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+ return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static int igt_ppgtt_gemfs_huge(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ static const unsigned int sizes[] = {
+ SZ_2M,
+ SZ_4M,
+ SZ_8M,
+ SZ_16M,
+ SZ_32M,
+ };
+ int i;
+ int err;
+
+ /*
+ * Sanity check that the HW uses huge pages correctly through gemfs --
+ * ensure that our writes land in the right place.
+ */
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ unsigned int size = sizes[i];
+
+ obj = i915_gem_object_create(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+ pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ err = igt_write_huge(ctx, obj);
+ if (err) {
+ pr_err("gemfs write-huge failed with size=%u\n",
+ size);
+ goto out_unpin;
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_ppgtt_pin_update(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *dev_priv = ctx->i915;
+ unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ int first, last;
+ int err;
+
+ /*
+ * Make sure there's no funny business when doing a PIN_UPDATE -- in the
+ * past we had a subtle issue with being able to incorrectly do multiple
+ * alloc va ranges on the same object when doing a PIN_UPDATE, which
+ * resulted in some pretty nasty bugs, though only when using
+ * huge-gtt-pages.
+ */
+
+ if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+ pr_info("48b PPGTT not supported, skipping\n");
+ return 0;
+ }
+
+ first = ilog2(I915_GTT_PAGE_SIZE_64K);
+ last = ilog2(I915_GTT_PAGE_SIZE_2M);
+
+ for_each_set_bit_from(first, &supported, last + 1) {
+ unsigned int page_size = BIT(first);
+
+ obj = i915_gem_object_create_internal(dev_priv, page_size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, SZ_2M, 0, flags);
+ if (err)
+ goto out_close;
+
+ if (vma->page_sizes.sg < page_size) {
+ pr_info("Unable to allocate page-size %x, finishing test early\n",
+ page_size);
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ if (vma->page_sizes.gtt != page_size) {
+ dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
+
+ /*
+ * The only valid reason for this to ever fail would be
+ * if the dma-mapper screwed us over when we did the
+ * dma_map_sg(), since it has the final say over the dma
+ * address.
+ */
+ if (IS_ALIGNED(addr, page_size)) {
+ pr_err("page_sizes.gtt=%u, expected=%u\n",
+ vma->page_sizes.gtt, page_size);
+ err = -EINVAL;
+ } else {
+ pr_info("dma address misaligned, finishing test early\n");
+ }
+
+ goto out_unpin;
+ }
+
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ if (err)
+ goto out_unpin;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_put(obj);
+ }
+
+ obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ /*
+ * Make sure we don't end up with something like where the pde is still
+ * pointing to the 2M page, and the pt we just filled-in is dangling --
+ * we can check this by writing to the first page where it would then
+ * land in the now stale 2M page.
+ */
+
+ err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_tmpfs_fallback(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct vfsmount *gemfs = i915->mm.gemfs;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that we don't burst into a ball of flames upon falling back
+ * to tmpfs, which we rely on if on the off-chance we encouter a failure
+ * when setting up gemfs.
+ */
+
+ i915->mm.gemfs = NULL;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_restore;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+ *vaddr = 0xdeadbeaf;
+
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_restore:
+ i915->mm.gemfs = gemfs;
+
+ return err;
+}
+
+static int igt_shrink_thp(void *arg)
+{
+ struct i915_gem_context *ctx = arg;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int flags = PIN_USER;
+ int err;
+
+ /*
+ * Sanity check shrinking huge-paged object -- make sure nothing blows
+ * up.
+ */
+
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("missing THP support, skipping\n");
+ return 0;
+ }
+
+ obj = i915_gem_object_create(i915, SZ_2M);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+ pr_info("failed to allocate THP, finishing test early\n");
+ goto out_unpin;
+ }
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ i915_vma_unpin(vma);
+
+ /*
+ * Now that the pages are *unpinned* shrink-all should invoke
+ * shmem to truncate our pages.
+ */
+ i915_gem_shrink_all(i915);
+ if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+ pr_err("shrink-all didn't truncate the pages\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
+ pr_err("residual page-size bits left\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ goto out_close;
+
+ err = cpu_check(obj, 0, 0xdeadbeaf);
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int i915_gem_huge_page_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_ppgtt_misaligned_dma),
+ SUBTEST(igt_mock_ppgtt_huge_fill),
+ SUBTEST(igt_mock_ppgtt_64K),
+ };
+ int saved_ppgtt = i915_modparams.enable_ppgtt;
+ struct drm_i915_private *dev_priv;
+ struct pci_dev *pdev;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ dev_priv = mock_gem_device();
+ if (!dev_priv)
+ return -ENOMEM;
+
+ /* Pretend to be a device which supports the 48b PPGTT */
+ i915_modparams.enable_ppgtt = 3;
+
+ pdev = dev_priv->drm.pdev;
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_unlock;
+ }
+
+ if (!i915_vm_is_48bit(&ppgtt->base)) {
+ pr_err("failed to create 48b PPGTT\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ /* If we were ever hit this then it's time to mock the 64K scratch */
+ if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+ pr_err("PPGTT missing 64K scratch page\n");
+ err = -EINVAL;
+ goto out_close;
+ }
+
+ err = i915_subtests(tests, ppgtt);
+
+out_close:
+ i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_put(ppgtt);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ i915_modparams.enable_ppgtt = saved_ppgtt;
+
+ drm_dev_unref(&dev_priv->drm);
+
+ return err;
+}
+
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_shrink_thp),
+ SUBTEST(igt_ppgtt_pin_update),
+ SUBTEST(igt_tmpfs_fallback),
+ SUBTEST(igt_ppgtt_exhaust_huge),
+ SUBTEST(igt_ppgtt_gemfs_huge),
+ SUBTEST(igt_ppgtt_internal_huge),
+ };
+ struct drm_file *file;
+ struct i915_gem_context *ctx;
+ int err;
+
+ if (!USES_PPGTT(dev_priv)) {
+ pr_info("PPGTT not supported, skipping live-selftests\n");
+ return 0;
+ }
+
+ file = mock_file(dev_priv);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ctx = live_context(dev_priv, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ err = i915_subtests(tests, ctx);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ mock_file_free(dev_priv, file);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index fb0a58fc8348..def5052862ae 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
if (err)
return err;
- list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 5ea373221f49..f463105ff48d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -24,6 +24,9 @@
#include "../i915_selftest.h"
+#include "lib_sw_fence.h"
+#include "mock_context.h"
+#include "mock_drm.h"
#include "mock_gem_device.h"
static int populate_ggtt(struct drm_i915_private *i915)
@@ -47,7 +50,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
if (!list_empty(&i915->mm.unbound_list)) {
size = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
size++;
pr_err("Found %lld objects unbound!\n", size);
@@ -74,10 +77,10 @@ static void cleanup_objects(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj, *on;
- list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
+ list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
i915_gem_object_put(obj);
- list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
+ list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
i915_gem_object_put(obj);
mutex_unlock(&i915->drm.struct_mutex);
@@ -149,8 +152,6 @@ static int igt_overcommit(void *arg)
goto cleanup;
}
- list_move(&obj->global_link, &i915->mm.unbound_list);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
@@ -325,6 +326,148 @@ cleanup:
return err;
}
+static int igt_evict_contexts(void *arg)
+{
+ const u64 PRETEND_GGTT_SIZE = 16ull << 20;
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct reserved {
+ struct drm_mm_node node;
+ struct reserved *next;
+ } *reserved = NULL;
+ struct drm_mm_node hole;
+ unsigned long count;
+ int err;
+
+ /*
+ * The purpose of this test is to verify that we will trigger an
+ * eviction in the GGTT when constructing a request that requires
+ * additional space in the GGTT for pinning the context. This space
+ * is not directly tied to the request so reclaiming it requires
+ * extra work.
+ *
+ * As such this test is only meaningful for full-ppgtt environments
+ * where the GTT space of the request is separate from the GGTT
+ * allocation required to build the request.
+ */
+ if (!USES_FULL_PPGTT(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ /* Reserve a block so that we know we have enough to fit a few rq */
+ memset(&hole, 0, sizeof(hole));
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+ PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, i915->ggtt.base.total,
+ PIN_NOEVICT);
+ if (err)
+ goto out_locked;
+
+ /* Make the GGTT appear small by filling it with unevictable nodes */
+ count = 0;
+ do {
+ struct reserved *r;
+
+ r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ err = -ENOMEM;
+ goto out_locked;
+ }
+
+ if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+ 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
+ 0, i915->ggtt.base.total,
+ PIN_NOEVICT)) {
+ kfree(r);
+ break;
+ }
+
+ r->next = reserved;
+ reserved = r;
+
+ count++;
+ } while (1);
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&i915->drm.struct_mutex);
+ pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
+
+ /* Overfill the GGTT with context objects and so try to evict one. */
+ for_each_engine(engine, i915, id) {
+ struct i915_sw_fence fence;
+ struct drm_file *file;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ count = 0;
+ mutex_lock(&i915->drm.struct_mutex);
+ onstack_fence_init(&fence);
+ do {
+ struct drm_i915_gem_request *rq;
+ struct i915_gem_context *ctx;
+
+ ctx = live_context(i915, file);
+ if (!ctx)
+ break;
+
+ /* We will need some GGTT space for the rq's context */
+ igt_evict_ctl.fail_if_busy = true;
+ rq = i915_gem_request_alloc(engine, ctx);
+ igt_evict_ctl.fail_if_busy = false;
+
+ if (IS_ERR(rq)) {
+ /* When full, fail_if_busy will trigger EBUSY */
+ if (PTR_ERR(rq) != -EBUSY) {
+ pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
+ ctx->hw_id, engine->name,
+ (int)PTR_ERR(rq));
+ err = PTR_ERR(rq);
+ }
+ break;
+ }
+
+ /* Keep every request/ctx pinned until we are full */
+ err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+ &fence,
+ GFP_KERNEL);
+ if (err < 0)
+ break;
+
+ i915_add_request(rq);
+ count++;
+ err = 0;
+ } while(1);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ onstack_fence_fini(&fence);
+ pr_info("Submitted %lu contexts/requests on %s\n",
+ count, engine->name);
+
+ mock_file_free(i915, file);
+ if (err)
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+ while (reserved) {
+ struct reserved *next = reserved->next;
+
+ drm_mm_remove_node(&reserved->node);
+ kfree(reserved);
+
+ reserved = next;
+ }
+ if (drm_mm_node_allocated(&hole))
+ drm_mm_remove_node(&hole);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
int i915_gem_evict_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -348,3 +491,12 @@ int i915_gem_evict_mock_selftests(void)
drm_dev_unref(&i915->drm);
return err;
}
+
+int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_evict_contexts),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 6b132caffa18..9da0c9f99916 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -39,25 +39,26 @@ static void fake_free_pages(struct drm_i915_gem_object *obj,
kfree(pages);
}
-static struct sg_table *
-fake_get_pages(struct drm_i915_gem_object *obj)
+static int fake_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
#define PFN_BIAS 0x1000
struct sg_table *pages;
struct scatterlist *sg;
+ unsigned int sg_page_sizes;
typeof(obj->base.size) rem;
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
rem = round_up(obj->base.size, BIT(31)) >> 31;
if (sg_alloc_table(pages, rem, GFP)) {
kfree(pages);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
+ sg_page_sizes = 0;
rem = obj->base.size;
for (sg = pages->sgl; sg; sg = sg_next(sg)) {
unsigned long len = min_t(typeof(rem), rem, BIT(31));
@@ -66,13 +67,17 @@ fake_get_pages(struct drm_i915_gem_object *obj)
sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
sg_dma_address(sg) = page_to_phys(sg_page(sg));
sg_dma_len(sg) = len;
+ sg_page_sizes |= len;
rem -= len;
}
GEM_BUG_ON(rem);
obj->mm.madv = I915_MADV_DONTNEED;
- return pages;
+
+ __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+
+ return 0;
#undef GFP
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 8f011c447e41..1b8774a42e48 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -251,14 +251,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return PTR_ERR(io);
}
- err = i915_vma_get_fence(vma);
- if (err) {
- pr_err("Failed to get fence for partial view: offset=%lu\n",
- page);
- i915_vma_unpin_iomap(vma);
- return err;
- }
-
iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
i915_vma_unpin_iomap(vma);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 6664cb2eb0b8..a999161e8db1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -215,7 +215,9 @@ static int igt_request_rewind(void *arg)
}
i915_gem_request_get(vip);
i915_add_request(vip);
+ rcu_read_lock();
request->engine->submit_request(request);
+ rcu_read_unlock();
mutex_unlock(&i915->drm.struct_mutex);
@@ -418,7 +420,10 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
err = PTR_ERR(cmd);
goto err;
}
+
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(i915);
+
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -605,8 +610,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+ i915_gem_chipset_flush(i915);
- wmb();
i915_gem_object_unpin_map(obj);
return vma;
@@ -625,7 +630,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(batch->vm->i915);
i915_gem_object_unpin_map(batch->obj);
@@ -858,7 +863,8 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(i915);
+
i915_gem_object_unpin_map(request[id]->batch->obj);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
index 7a44dab631b8..4795877abe56 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c
@@ -121,7 +121,7 @@ out:
static unsigned int random_engine(struct rnd_state *rnd)
{
- return ((u64)prandom_u32_state(rnd) * I915_NUM_ENGINES) >> 32;
+ return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
}
static int bench_sync(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 1519f1b7841b..d7dd98a6acad 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,5 +16,7 @@ selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(evict, i915_gem_evict_live_selftests)
+selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index e5a9e5dcf2f3..19c6fce837df 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -22,3 +22,4 @@ selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
+selftest(hugepages, i915_gem_huge_page_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 222c511bea49..b85872cc7fbe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -41,11 +41,6 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd)
return x;
}
-static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
-{
- return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
-}
-
void i915_random_reorder(unsigned int *order, unsigned int count,
struct rnd_state *state)
{
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 6c9379871384..7dffedc501ca 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -43,6 +43,11 @@
u64 i915_prandom_u64_state(struct rnd_state *rnd);
+static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+{
+ return upper_32_bits(mul_u32_u32(prandom_u32_state(state), ep_ro));
+}
+
unsigned int *i915_random_order(unsigned int count,
struct rnd_state *state);
void i915_random_reorder(unsigned int *order,
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index 19d145d6bf52..ea01d0fe3ace 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -24,6 +24,7 @@
#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
@@ -565,6 +566,46 @@ err_in:
return ret;
}
+static int test_timer(void *arg)
+{
+ unsigned long target, delay;
+ struct timed_fence tf;
+
+ timed_fence_init(&tf, target = jiffies);
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with immediate expiration not signaled\n");
+ goto err;
+ }
+ timed_fence_fini(&tf);
+
+ for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ timed_fence_init(&tf, target = jiffies + delay);
+ if (i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
+ goto err;
+ }
+
+ i915_sw_fence_wait(&tf.fence);
+ if (!i915_sw_fence_done(&tf.fence)) {
+ pr_err("Fence not signaled after wait\n");
+ goto err;
+ }
+ if (time_before(jiffies, target)) {
+ pr_err("Fence signaled too early, target=%lu, now=%lu\n",
+ target, jiffies);
+ goto err;
+ }
+
+ timed_fence_fini(&tf);
+ }
+
+ return 0;
+
+err:
+ timed_fence_fini(&tf);
+ return -EINVAL;
+}
+
int i915_sw_fence_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -576,6 +617,7 @@ int i915_sw_fence_mock_selftests(void)
SUBTEST(test_C_AB),
SUBTEST(test_chain),
SUBTEST(test_ipc),
+ SUBTEST(test_timer),
};
return i915_subtests(tests, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 02e52a146ed8..71ce06680d66 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -165,6 +165,7 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
+ i915_gem_chipset_flush(h->i915);
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
@@ -231,7 +232,7 @@ static u32 hws_seqno(const struct hang *h,
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(h->i915);
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
@@ -275,6 +276,8 @@ static int igt_hang_sanitycheck(void *arg)
i915_gem_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(i915);
+
__i915_add_request(rq, true);
timeout = i915_wait_request(rq,
@@ -621,7 +624,15 @@ static int igt_wait_reset(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, rq)) {
- pr_err("Failed to start request %x\n", rq->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto out_rq;
}
@@ -708,10 +719,18 @@ static int igt_reset_queue(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, prev)) {
- pr_err("Failed to start request %x\n",
- prev->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(rq->engine, &p);
+
i915_gem_request_put(rq);
i915_gem_request_put(prev);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto fini;
}
@@ -756,7 +775,7 @@ static int igt_reset_queue(void *arg)
pr_info("%s: Completed %d resets\n", engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
- wmb();
+ i915_gem_chipset_flush(i915);
i915_gem_request_put(prev);
}
@@ -806,7 +825,15 @@ static int igt_handle_error(void *arg)
__i915_add_request(rq, true);
if (!wait_for_hang(&h, rq)) {
- pr_err("Failed to start request %x\n", rq->fence.seqno);
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to start request %x, at %x\n",
+ rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p);
+
+ i915_reset(i915, 0);
+ i915_gem_set_wedged(i915);
+
err = -EIO;
goto err_request;
}
@@ -843,17 +870,24 @@ err_unlock:
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_hang_sanitycheck),
- SUBTEST(igt_global_reset),
SUBTEST(igt_reset_engine),
SUBTEST(igt_reset_active_engines),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
};
+ int err;
if (!intel_has_gpu_reset(i915))
return 0;
- return i915_subtests(tests, i915);
+ intel_runtime_pm_get(i915);
+
+ err = i915_subtests(tests, i915);
+
+ intel_runtime_pm_put(i915);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
new file mode 100644
index 000000000000..3790fdf44a1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "lib_sw_fence.h"
+
+/* Small library of different fence types useful for writing tests */
+
+static int __i915_sw_fence_call
+nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key)
+{
+ debug_fence_init_onstack(fence);
+
+ __init_waitqueue_head(&fence->wait, name, key);
+ atomic_set(&fence->pending, 1);
+ fence->flags = (unsigned long)nop_fence_notify;
+}
+
+void onstack_fence_fini(struct i915_sw_fence *fence)
+{
+ i915_sw_fence_commit(fence);
+ i915_sw_fence_fini(fence);
+}
+
+static void timed_fence_wake(unsigned long data)
+{
+ struct timed_fence *tf = (struct timed_fence *)data;
+
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires)
+{
+ onstack_fence_init(&tf->fence);
+
+ setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+
+ if (time_after(expires, jiffies))
+ mod_timer(&tf->timer, expires);
+ else
+ i915_sw_fence_commit(&tf->fence);
+}
+
+void timed_fence_fini(struct timed_fence *tf)
+{
+ if (del_timer_sync(&tf->timer))
+ i915_sw_fence_commit(&tf->fence);
+
+ destroy_timer_on_stack(&tf->timer);
+ i915_sw_fence_fini(&tf->fence);
+}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
new file mode 100644
index 000000000000..474aafb92ae1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
@@ -0,0 +1,42 @@
+/*
+ * lib_sw_fence.h - library routines for testing N:M synchronisation points
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _LIB_SW_FENCE_H_
+#define _LIB_SW_FENCE_H_
+
+#include <linux/timer.h>
+
+#include "../i915_sw_fence.h"
+
+#ifdef CONFIG_LOCKDEP
+#define onstack_fence_init(fence) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __onstack_fence_init((fence), #fence, &__key); \
+} while (0)
+#else
+#define onstack_fence_init(fence) \
+ __onstack_fence_init((fence), NULL, NULL)
+#endif
+
+void __onstack_fence_init(struct i915_sw_fence *fence,
+ const char *name,
+ struct lock_class_key *key);
+void onstack_fence_fini(struct i915_sw_fence *fence);
+
+struct timed_fence {
+ struct i915_sw_fence fence;
+ struct timer_list timer;
+};
+
+void timed_fence_init(struct timed_fence *tf, unsigned long expires);
+void timed_fence_fini(struct timed_fence *tf);
+
+#endif /* _LIB_SW_FENCE_H_ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 098ce643ad07..bbf80d42e793 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -73,11 +73,7 @@ err_put:
void mock_context_close(struct i915_gem_context *ctx)
{
- i915_gem_context_set_closed(ctx);
-
- i915_ppgtt_close(&ctx->ppgtt->base);
-
- i915_gem_context_put(ctx);
+ context_close(ctx);
}
void mock_init_contexts(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index fc0fd7498689..331c2b09869e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -32,9 +32,9 @@ static struct mock_request *first_request(struct mock_engine *engine)
link);
}
-static void hw_delay_complete(unsigned long data)
+static void hw_delay_complete(struct timer_list *t)
{
- struct mock_engine *engine = (typeof(engine))data;
+ struct mock_engine *engine = from_timer(engine, t, hw_delay);
struct mock_request *request;
spin_lock(&engine->hw_lock);
@@ -161,9 +161,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
- setup_timer(&engine->hw_delay,
- hw_delay_complete,
- (unsigned long)engine);
+ timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
return &engine->base;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 678723430d78..04eb9362f4f8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -83,6 +83,8 @@ static void mock_device_release(struct drm_device *dev)
kmem_cache_destroy(i915->vmas);
kmem_cache_destroy(i915->objects);
+ i915_gemfs_fini(i915);
+
drm_dev_fini(&i915->drm);
put_device(&i915->drm.pdev->dev);
}
@@ -146,6 +148,11 @@ struct drm_i915_private *mock_gem_device(void)
dev_set_name(&pdev->dev, "mock");
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
+ /* hack to disable iommu for the fake device; force identity mapping */
+ pdev->dev.archdata.iommu = (void *)-1;
+#endif
+
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -167,6 +174,11 @@ struct drm_i915_private *mock_gem_device(void)
mkwrite_device_info(i915)->gen = -1;
+ mkwrite_device_info(i915)->page_sizes =
+ I915_GTT_PAGE_SIZE_4K |
+ I915_GTT_PAGE_SIZE_64K |
+ I915_GTT_PAGE_SIZE_2M;
+
spin_lock_init(&i915->mm.object_stat_lock);
mock_uncore_init(i915);
@@ -234,8 +246,16 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->kernel_context)
goto err_engine;
+ i915->preempt_context = mock_context(i915, NULL);
+ if (!i915->preempt_context)
+ goto err_kernel_context;
+
+ WARN_ON(i915_gemfs_init(i915));
+
return i915;
+err_kernel_context:
+ i915_gem_context_put(i915->kernel_context);
err_engine:
for_each_engine(engine, i915, id)
mock_engine_free(engine);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f2118cf535a0..336e1afb250f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,6 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- vma->pages = vma->obj->mm.pages;
vma->flags |= I915_VMA_LOCAL_BIND;
return 0;
}
@@ -84,6 +83,8 @@ mock_ppgtt(struct drm_i915_private *i915,
ppgtt->base.insert_entries = mock_insert_entries;
ppgtt->base.bind_vma = mock_bind_ppgtt;
ppgtt->base.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->base.cleanup = mock_cleanup;
return ppgtt;
@@ -93,12 +94,6 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- int err;
-
- err = i915_get_ggtt_vma_pages(vma);
- if (err)
- return err;
-
vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
return 0;
}
@@ -124,6 +119,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->base.insert_entries = mock_insert_entries;
ggtt->base.bind_vma = mock_bind_ggtt;
ggtt->base.unbind_vma = mock_unbind_ggtt;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = mock_cleanup;
i915_address_space_init(&ggtt->base, i915, "global");
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index 1cc5d2931753..cd6d2a16071f 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -189,6 +189,20 @@ static unsigned int random(unsigned long n,
return 1 + (prandom_u32_state(rnd) % 1024);
}
+static unsigned int random_page_size_pages(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ /* 4K, 64K, 2M */
+ static unsigned int page_count[] = {
+ BIT(12) >> PAGE_SHIFT,
+ BIT(16) >> PAGE_SHIFT,
+ BIT(21) >> PAGE_SHIFT,
+ };
+
+ return page_count[(prandom_u32_state(rnd) % 3)];
+}
+
static inline bool page_contiguous(struct page *first,
struct page *last,
unsigned long npages)
@@ -252,6 +266,7 @@ static const npages_fn_t npages_funcs[] = {
grow,
shrink,
random,
+ random_page_size_pages,
NULL,
};
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index f91cb72d0830..93c7e3f9b4a8 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -24,6 +24,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_of.h>
@@ -105,7 +106,7 @@ static int imx_drm_atomic_check(struct drm_device *dev,
}
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = imx_drm_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index cf98596c7ce1..247c60e6bed2 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -18,6 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "video/imx-ipu-v3.h"
@@ -690,7 +691,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
- .prepare_fb = drm_fb_cma_prepare_fb,
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = ipu_plane_atomic_check,
.atomic_disable = ipu_plane_atomic_disable,
.atomic_update = ipu_plane_atomic_update,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 690c67507cbc..3ff502771ba2 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1696,11 +1696,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
- ret = drm_bridge_add(&hdmi->bridge);
- if (ret) {
- dev_err(dev, "failed to add bridge, ret = %d\n", ret);
- return ret;
- }
+ drm_bridge_add(&hdmi->bridge);
ret = mtk_hdmi_clk_enable_audio(hdmi);
if (ret) {
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 7742c7d81ed8..3b804fdaf7a0 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_fb_helper.h>
@@ -78,7 +79,7 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
.output_poll_changed = meson_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
};
static irqreturn_t meson_irq(int irq, void *arg)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5e9cd4c0e8b6..68e5d9c94475 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1670,7 +1670,7 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index ced70783b44e..92b3844202d2 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -9,6 +9,7 @@ msm-y := \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
+ adreno/a5xx_preempt.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -58,7 +59,8 @@ msm-y := \
msm_iommu.o \
msm_perf.o \
msm_rd.o \
- msm_ringbuffer.o
+ msm_ringbuffer.o \
+ msm_submitqueue.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 7791313405b5..4baef2738178 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -44,7 +44,7 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static bool a3xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -65,7 +65,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
return a3xx_idle(gpu);
}
@@ -339,7 +339,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
@@ -444,9 +444,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
- .last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
+ .active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -492,7 +492,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 58341ef6f15b..8199a4b9f2fa 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -116,7 +116,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
static bool a4xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -137,7 +137,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
return a4xx_idle(gpu);
}
@@ -337,7 +337,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
@@ -532,9 +532,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
- .last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
+ .active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -574,7 +574,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 17c59d839e6f..a1f4eeeb73e2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -26,8 +26,9 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
{
+ struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
struct device_node *np;
struct resource r;
@@ -55,10 +56,10 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
mem_size = resource_size(&r);
/* Request the MDT file for the firmware */
- ret = request_firmware(&fw, fwname, dev);
- if (ret) {
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ if (IS_ERR(fw)) {
DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
- return ret;
+ return PTR_ERR(fw);
}
/* Figure out how much memory we need */
@@ -75,9 +76,26 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
goto out;
}
- /* Load the rest of the MDT */
- ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID, mem_region, mem_phys,
- mem_size);
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten thru adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
+ mem_region, mem_phys, mem_size);
+ } else {
+ char newname[strlen("qcom/") + strlen(fwname) + 1];
+
+ sprintf(newname, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
+ mem_region, mem_phys, mem_size);
+ }
if (ret)
goto out;
@@ -95,14 +113,65 @@ out:
return ret;
}
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
@@ -120,16 +189,54 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
}
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, submit->seqno);
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
- gpu->funcs->flush(gpu);
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ a5xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
}
static const struct {
@@ -245,7 +352,7 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
static int a5xx_me_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT7(ring, CP_ME_INIT, 8);
@@ -276,11 +383,54 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
- return a5xx_idle(gpu) ? 0 : -EINVAL;
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
+
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
@@ -381,7 +531,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
- ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
+ ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
loaded = !ret;
@@ -396,6 +546,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
@@ -536,13 +687,14 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
- /* Load the GPMU firmware before starting the HW init */
- a5xx_gpmu_ucode_init(gpu);
-
ret = adreno_hw_init(gpu);
if (ret)
return ret;
+ a5xx_preempt_hw_init(gpu);
+
+ a5xx_gpmu_ucode_init(gpu);
+
ret = a5xx_ucode_init(gpu);
if (ret)
return ret;
@@ -565,11 +717,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
* ticking correctly
*/
if (adreno_is_a530(adreno_gpu)) {
- OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1);
- OUT_RING(gpu->rb, 0x0F);
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], 0x0F);
- gpu->funcs->flush(gpu);
- if (!a5xx_idle(gpu))
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
}
@@ -582,11 +734,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
*/
ret = a5xx_zap_shader_init(gpu);
if (!ret) {
- OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1);
- OUT_RING(gpu->rb, 0x00000000);
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
- gpu->funcs->flush(gpu);
- if (!a5xx_idle(gpu))
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else {
/* Print a warning so if we die, we know why */
@@ -595,6 +747,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
}
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
return 0;
}
@@ -625,6 +780,8 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
+ a5xx_preempt_fini(gpu);
+
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -660,18 +817,27 @@ static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
}
-bool a5xx_idle(struct msm_gpu *gpu)
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (ring != a5xx_gpu->cur_ring) {
+ WARN(1, "Tried to idle a non-current ringbuffer\n");
+ return false;
+ }
+
/* wait for CP to drain ringbuffer: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, ring))
return false;
if (spin_until(_a5xx_check_idle(gpu))) {
- DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n",
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
gpu->name, __builtin_return_address(0),
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
- gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
-
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
return false;
}
@@ -802,9 +968,10 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
- dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- gpu->funcs->last_fence(gpu),
+ dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
@@ -854,8 +1021,13 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
a5xx_gpmu_err_irq(gpu);
- if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ a5xx_preempt_trigger(gpu);
msm_gpu_retire(gpu);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
return IRQ_HANDLED;
}
@@ -985,6 +1157,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
}
#endif
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -992,9 +1172,9 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = a5xx_pm_suspend,
.pm_resume = a5xx_pm_resume,
.recover = a5xx_recover,
- .last_fence = adreno_last_fence,
.submit = a5xx_submit,
- .flush = adreno_flush,
+ .flush = a5xx_flush,
+ .active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -1030,7 +1210,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
a5xx_gpu->lm_leakage = 0x4E001A;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
@@ -1039,5 +1219,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index e94451685bf8..6fb8c2f9b9e4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,10 +35,100 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_ABORT,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocation for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+
int a5xx_power_init(struct msm_gpu *gpu);
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
@@ -55,7 +145,22 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
return -ETIMEDOUT;
}
-bool a5xx_idle(struct msm_gpu *gpu);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_ABORT);
+}
+
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 04aab1dcae2b..e5700bbf09dd 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -173,7 +173,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
if (!a5xx_gpu->gpmu_dwords)
return 0;
@@ -192,9 +192,9 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
- if (!a5xx_idle(gpu)) {
+ if (!a5xx_idle(gpu, ring)) {
DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
gpu->name);
return -EINVAL;
@@ -264,7 +264,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
return;
/* Get the firmware */
- if (request_firmware(&fw, adreno_gpu->info->gpmufw, drm->dev)) {
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw);
+ if (IS_ERR(fw)) {
DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
gpu->name);
return;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 000000000000..40f4840ef98e
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,305 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ empty = (get_wptr(ring) == ring->memptrs->rptr);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(unsigned long data)
+{
+ struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ /*
+ * Its possible that while a preemption request is in progress
+ * from an irq context, a user context trying to submit might
+ * fail to update the write pointer, because it determines
+ * that the preempt state is not PREEMPT_NONE.
+ *
+ * Close the race by introducing an intermediate
+ * state PREEMPT_ABORT to let the submit path
+ * know that the ringbuffer is not going to change
+ * and can safely update the write pointer.
+ */
+
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ a5xx_gpu->preempt[i]->wptr = 0;
+ a5xx_gpu->preempt[i]->rptr = 0;
+ a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
+ }
+
+ /* Write a 0 to signal that we aren't switching pagetables */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ struct drm_gem_object *bo = NULL;
+ u64 iova = 0;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ ptr->rptr_addr = rbmemptr(ring, rptr);
+ ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (!a5xx_gpu->preempt_bo[i])
+ continue;
+
+ msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
+
+ if (a5xx_gpu->preempt_iova[i])
+ msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+ drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
+ a5xx_gpu->preempt_bo[i] = NULL;
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+
+ return;
+ }
+ }
+
+ setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
+ (unsigned long) a5xx_gpu);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index c75c4df4bc39..05022ea2a007 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -125,51 +125,24 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
- struct adreno_platform_config *config;
- struct adreno_rev rev;
- const struct adreno_info *info;
- struct msm_gpu *gpu = NULL;
+ struct msm_gpu *gpu = platform_get_drvdata(priv->gpu_pdev);
+ int ret;
- if (!pdev) {
+ if (!gpu) {
dev_err(dev->dev, "no adreno device\n");
return NULL;
}
- config = pdev->dev.platform_data;
- rev = config->rev;
- info = adreno_info(config->rev);
-
- if (!info) {
- dev_warn(dev->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
- rev.core, rev.major, rev.minor, rev.patchid);
+ pm_runtime_get_sync(&pdev->dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = msm_gpu_hw_init(gpu);
+ mutex_unlock(&dev->struct_mutex);
+ pm_runtime_put_sync(&pdev->dev);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
- DBG("Found GPU: %u.%u.%u.%u", rev.core, rev.major,
- rev.minor, rev.patchid);
-
- gpu = info->init(dev);
- if (IS_ERR(gpu)) {
- dev_warn(dev->dev, "failed to load adreno gpu\n");
- gpu = NULL;
- /* not fatal */
- }
-
- if (gpu) {
- int ret;
-
- pm_runtime_get_sync(&pdev->dev);
- mutex_lock(&dev->struct_mutex);
- ret = msm_gpu_hw_init(gpu);
- mutex_unlock(&dev->struct_mutex);
- pm_runtime_put_sync(&pdev->dev);
- if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
- gpu->funcs->destroy(gpu);
- gpu = NULL;
- }
- }
-
return gpu;
}
@@ -282,6 +255,9 @@ static int adreno_get_pwrlevels(struct device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
+ const struct adreno_info *info;
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_gpu *gpu;
u32 val;
int ret;
@@ -302,13 +278,39 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
return ret;
dev->platform_data = &config;
- set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
+ set_gpu_pdev(drm, to_platform_device(dev));
+
+ info = adreno_info(config.rev);
+
+ if (!info) {
+ dev_warn(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
+ config.rev.core, config.rev.major,
+ config.rev.minor, config.rev.patchid);
+ return -ENXIO;
+ }
+
+ DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
+ config.rev.minor, config.rev.patchid);
+
+ gpu = info->init(drm);
+ if (IS_ERR(gpu)) {
+ dev_warn(drm->dev, "failed to load adreno gpu\n");
+ return PTR_ERR(gpu);
+ }
+
+ dev_set_drvdata(dev, gpu);
+
return 0;
}
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
+ struct msm_gpu *gpu = dev_get_drvdata(dev);
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->destroy(gpu);
+
set_gpu_pdev(dev_get_drvdata(master), NULL);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c8b4ac254bb5..e2ffecce59a3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -21,8 +21,6 @@
#include "msm_gem.h"
#include "msm_mmu.h"
-#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -58,72 +56,181 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
return ret;
}
return -EINVAL;
+ case MSM_PARAM_NR_RINGS:
+ *value = gpu->nr_rings;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
+const struct firmware *
+adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
+{
+ struct drm_device *drm = adreno_gpu->base.dev;
+ const struct firmware *fw = NULL;
+ char newname[strlen("qcom/") + strlen(fwname) + 1];
+ int ret;
+
+ sprintf(newname, "qcom/%s", fwname);
+
+ /*
+ * Try first to load from qcom/$fwfile using a direct load (to avoid
+ * a potential timeout waiting for usermode helper)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_NEW)) {
+
+ ret = request_firmware_direct(&fw, newname, drm->dev);
+ if (!ret) {
+ dev_info(drm->dev, "loaded %s from new location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_NEW;
+ return fw;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ dev_err(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ /*
+ * Then try the legacy location without qcom/ prefix
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) {
+
+ ret = request_firmware_direct(&fw, fwname, drm->dev);
+ if (!ret) {
+ dev_info(drm->dev, "loaded %s from legacy location\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_LEGACY;
+ return fw;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ dev_err(drm->dev, "failed to load %s: %d\n",
+ fwname, ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ /*
+ * Finally fall back to request_firmware() for cases where the
+ * usermode helper is needed (I think mainly android)
+ */
+ if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) ||
+ (adreno_gpu->fwloc == FW_LOCATION_HELPER)) {
+
+ ret = request_firmware(&fw, newname, drm->dev);
+ if (!ret) {
+ dev_info(drm->dev, "loaded %s with helper\n",
+ newname);
+ adreno_gpu->fwloc = FW_LOCATION_HELPER;
+ return fw;
+ } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
+ dev_err(drm->dev, "failed to load %s: %d\n",
+ newname, ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ dev_err(drm->dev, "failed to load %s\n", fwname);
+ return ERR_PTR(-ENOENT);
+}
+
+static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+{
+ const struct firmware *fw;
+
+ if (adreno_gpu->pm4)
+ return 0;
+
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
+ adreno_gpu->pm4 = fw;
+
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw);
+ if (IS_ERR(fw)) {
+ release_firmware(adreno_gpu->pm4);
+ adreno_gpu->pm4 = NULL;
+ return PTR_ERR(fw);
+ }
+ adreno_gpu->pfp = fw;
+
+ return 0;
+}
+
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int ret;
+ int ret, i;
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
return ret;
- }
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ if (!ring)
+ continue;
+
+ ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
+ if (ret) {
+ ring->iova = 0;
+ dev_err(gpu->dev->dev,
+ "could not map ringbuffer %d: %d\n", i, ret);
+ return ret;
+ }
+
+ ring->cur = ring->start;
+ ring->next = ring->start;
- /* reset completed fence seqno: */
- adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
- adreno_gpu->memptrs->rptr = 0;
+ /* reset completed fence seqno: */
+ ring->memptrs->fence = ring->seqno;
+ ring->memptrs->rptr = 0;
+ }
- /* Setup REG_CP_RB_CNTL: */
+ /*
+ * Setup REG_CP_RB_CNTL. The same value is used across targets (with
+ * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+ * for the ringbuffer size and block size is moved to msm_gpu.h for the
+ * pre-processor to deal with and the A430 variant is ORed in here
+ */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- /* size is log2(quad-words): */
- AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
- (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
+ MSM_GPU_RB_CNTL_DEFAULT |
+ (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
- /* Setup ringbuffer address: */
+ /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
- REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova);
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- rbmemptr(adreno_gpu, rptr));
+ rbmemptr(gpu->rb[0], rptr));
}
return 0;
}
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
-{
- return ring->cur - ring->start;
-}
-
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
-static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
{
if (adreno_is_a430(adreno_gpu))
- return adreno_gpu->memptrs->rptr = adreno_gpu_read(
+ return ring->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
- return adreno_gpu->memptrs->rptr;
+ return ring->memptrs->rptr;
}
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- return adreno_gpu->memptrs->fence;
+ return gpu->rb[0];
}
void adreno_recover(struct msm_gpu *gpu)
@@ -149,7 +256,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = submit->ring;
unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
@@ -164,7 +271,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
- OUT_RING(ring, submit->cmd[i].iova);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
@@ -172,7 +279,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, submit->seqno);
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
@@ -188,8 +295,8 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, fence));
- OUT_RING(ring, submit->fence->seqno);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
OUT_PKT3(ring, CP_INTERRUPT, 1);
@@ -215,20 +322,23 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
#endif
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
}
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr;
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
/*
* Mask wptr value that we calculate to fit in the HW range. This is
* to account for the possibility that the last command fit exactly into
* the ringbuffer and rb->next hasn't wrapped to zero yet
*/
- wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
+ wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -236,17 +346,19 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-bool adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
- if (!spin_until(get_rptr(adreno_gpu) == wptr))
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
return false;
}
@@ -261,10 +373,16 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->fctx->last_fence);
- seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
- seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ seq_printf(m, "rb %d: fence: %d/%d\n", i,
+ ring->memptrs->fence, ring->seqno);
+
+ seq_printf(m, " rptr: %d\n",
+ get_rptr(adreno_gpu, ring));
+ seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ }
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
@@ -290,16 +408,23 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
void adreno_dump_info(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
printk("revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->fctx->last_fence);
- printk("rptr: %d\n", get_rptr(adreno_gpu));
- printk("rb wptr: %d\n", get_wptr(gpu->rb));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ printk("rb %d: fence: %d/%d\n", i,
+ ring->memptrs->fence,
+ ring->seqno);
+
+ printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
+ printk("rb wptr: %d\n", get_wptr(ring));
+ }
}
/* would be nice to not have to duplicate the _show() stuff with printk(): */
@@ -322,28 +447,31 @@ void adreno_dump(struct msm_gpu *gpu)
}
}
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t size = gpu->rb->size / 4;
- uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = get_rptr(adreno_gpu);
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
- if (spin_until(ring_freewords(gpu) >= ndwords))
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_DEV_ERROR(ring->gpu->dev->dev,
+ "timeout waiting for space in ringubffer %d\n",
+ ring->id);
}
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs, int nr_rings)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
- int ret;
adreno_gpu->funcs = funcs;
adreno_gpu->info = adreno_info(config->rev);
@@ -366,59 +494,20 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
- adreno_gpu_config.ringsz = RB_SIZE;
+ adreno_gpu_config.nr_rings = nr_rings;
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
+ return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
- if (ret)
- return ret;
-
- ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
- if (ret) {
- dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
- adreno_gpu->info->pm4fw, ret);
- return ret;
- }
-
- ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
- if (ret) {
- dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
- adreno_gpu->info->pfpfw, ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_kernel_new(drm,
- sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
- &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
-
- if (IS_ERR(adreno_gpu->memptrs)) {
- ret = PTR_ERR(adreno_gpu->memptrs);
- adreno_gpu->memptrs = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- }
-
- return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if (adreno_gpu->memptrs_bo) {
- if (adreno_gpu->memptrs)
- msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
-
- if (adreno_gpu->memptrs_iova)
- msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
-
- drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
- }
release_firmware(adreno_gpu->pm4);
release_firmware(adreno_gpu->pfp);
- msm_gpu_cleanup(gpu);
+ msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 4d9165f29f43..28e3de6e5f94 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -82,14 +82,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
-struct adreno_rbmemptrs {
- volatile uint32_t rptr;
- volatile uint32_t fence;
-};
-
struct adreno_gpu {
struct msm_gpu base;
struct adreno_rev rev;
@@ -101,16 +93,30 @@ struct adreno_gpu {
/* interesting register offsets to dump: */
const unsigned int *registers;
+ /*
+ * Are we loading fw from legacy path? Prior to addition
+ * of gpu firmware to linux-firmware, the fw files were
+ * placed in toplevel firmware directory, following qcom's
+ * android kernel. But linux-firmware preferred they be
+ * placed in a 'qcom' subdirectory.
+ *
+ * For backwards compatibility, we try first to load from
+ * the new path, using request_firmware_direct() to avoid
+ * any potential timeout waiting for usermode helper, then
+ * fall back to the old path (with direct load). And
+ * finally fall back to request_firmware() with the new
+ * path to allow the usermode helper.
+ */
+ enum {
+ FW_LOCATION_UNKNOWN = 0,
+ FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
+ FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
+ FW_LOCATION_HELPER,
+ } fwloc;
+
/* firmware: */
const struct firmware *pm4, *pfp;
- /* ringbuffer rptr/wptr: */
- // TODO should this be in msm_ringbuffer? I think it would be
- // different for z180..
- struct adreno_rbmemptrs *memptrs;
- struct drm_gem_object *memptrs_bo;
- uint64_t memptrs_iova;
-
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
@@ -196,22 +202,25 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
}
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
+ const char *fwname);
int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-bool adreno_idle(struct msm_gpu *gpu);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
@@ -220,7 +229,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu);
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
@@ -228,14 +237,14 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
- adreno_wait_ring(ring->gpu, 1);
+ adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
@@ -257,14 +266,14 @@ static inline u32 PM4_PARITY(u32 val)
static inline void
OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt + 1);
+ adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, PKT4(regindx, cnt));
}
static inline void
OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt + 1);
+ adreno_wait_ring(ring, cnt + 1);
OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
}
@@ -323,6 +332,11 @@ static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
adreno_gpu_write(gpu, hi, upper_32_bits(data));
}
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
/*
* Given a register and a count, return a value to program into
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index a5d75c9b3a73..65c1dfbbe019 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -14,7 +14,7 @@
#include "dsi_cfg.h"
static const char * const dsi_v2_bus_clk_names[] = {
- "core_mmss_clk", "iface_clk", "bus_clk",
+ "core_mmss", "iface", "bus",
};
static const struct msm_dsi_config apq8064_dsi_cfg = {
@@ -34,7 +34,7 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
};
static const char * const dsi_6g_bus_clk_names[] = {
- "mdp_core_clk", "iface_clk", "bus_clk", "core_mmss_clk",
+ "mdp_core", "iface", "bus", "core_mmss",
};
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
@@ -55,7 +55,7 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
};
static const char * const dsi_8916_bus_clk_names[] = {
- "mdp_core_clk", "iface_clk", "bus_clk",
+ "mdp_core", "iface", "bus",
};
static const struct msm_dsi_config msm8916_dsi_cfg = {
@@ -99,7 +99,7 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
* without it too. Figure out why it doesn't enable and uncomment below
*/
static const char * const dsi_8996_bus_clk_names[] = {
- "mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */
+ "mdp_core", "iface", "bus", /* "core_mmss", */
};
static const struct msm_dsi_config msm8996_dsi_cfg = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index deaf869374ea..0f7324a686ca 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -334,46 +334,46 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
- struct device *dev = &msm_host->pdev->dev;
+ struct platform_device *pdev = msm_host->pdev;
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
const struct msm_dsi_config *cfg = cfg_hnd->cfg;
int i, ret = 0;
/* get bus clocks */
for (i = 0; i < cfg->num_bus_clks; i++) {
- msm_host->bus_clks[i] = devm_clk_get(dev,
+ msm_host->bus_clks[i] = msm_clk_get(pdev,
cfg->bus_clk_names[i]);
if (IS_ERR(msm_host->bus_clks[i])) {
ret = PTR_ERR(msm_host->bus_clks[i]);
- pr_err("%s: Unable to get %s, ret = %d\n",
+ pr_err("%s: Unable to get %s clock, ret = %d\n",
__func__, cfg->bus_clk_names[i], ret);
goto exit;
}
}
/* get link and source clocks */
- msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
+ msm_host->byte_clk = msm_clk_get(pdev, "byte");
if (IS_ERR(msm_host->byte_clk)) {
ret = PTR_ERR(msm_host->byte_clk);
- pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
+ pr_err("%s: can't find dsi_byte clock. ret=%d\n",
__func__, ret);
msm_host->byte_clk = NULL;
goto exit;
}
- msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ msm_host->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(msm_host->pixel_clk)) {
ret = PTR_ERR(msm_host->pixel_clk);
- pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
+ pr_err("%s: can't find dsi_pixel clock. ret=%d\n",
__func__, ret);
msm_host->pixel_clk = NULL;
goto exit;
}
- msm_host->esc_clk = devm_clk_get(dev, "core_clk");
+ msm_host->esc_clk = msm_clk_get(pdev, "core");
if (IS_ERR(msm_host->esc_clk)) {
ret = PTR_ERR(msm_host->esc_clk);
- pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
+ pr_err("%s: can't find dsi_esc clock. ret=%d\n",
__func__, ret);
msm_host->esc_clk = NULL;
goto exit;
@@ -382,22 +382,22 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
+ pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
if (!msm_host->pixel_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
+ pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret);
goto exit;
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- msm_host->src_clk = devm_clk_get(dev, "src_clk");
+ msm_host->src_clk = msm_clk_get(pdev, "src");
if (IS_ERR(msm_host->src_clk)) {
ret = PTR_ERR(msm_host->src_clk);
- pr_err("%s: can't find dsi_src_clk. ret=%d\n",
+ pr_err("%s: can't find src clock. ret=%d\n",
__func__, ret);
msm_host->src_clk = NULL;
goto exit;
@@ -406,7 +406,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
if (!msm_host->esc_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't get esc_clk_src. ret=%d\n",
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
__func__, ret);
goto exit;
}
@@ -414,7 +414,7 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
if (!msm_host->dsi_clk_src) {
ret = -ENODEV;
- pr_err("%s: can't get dsi_clk_src. ret=%d\n",
+ pr_err("%s: can't get src clock parent. ret=%d\n",
__func__, ret);
}
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 7c9bf91bc22b..790ca280cbfd 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -482,7 +482,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
}
- phy->ahb_clk = devm_clk_get(dev, "iface_clk");
+ phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index e32a4a4f3797..7c72264101ff 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -150,46 +150,46 @@ static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
static int edp_clk_init(struct edp_ctrl *ctrl)
{
- struct device *dev = &ctrl->pdev->dev;
+ struct platform_device *pdev = ctrl->pdev;
int ret;
- ctrl->aux_clk = devm_clk_get(dev, "core_clk");
+ ctrl->aux_clk = msm_clk_get(pdev, "core");
if (IS_ERR(ctrl->aux_clk)) {
ret = PTR_ERR(ctrl->aux_clk);
- pr_err("%s: Can't find aux_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find core clock, %d\n", __func__, ret);
ctrl->aux_clk = NULL;
return ret;
}
- ctrl->pixel_clk = devm_clk_get(dev, "pixel_clk");
+ ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
if (IS_ERR(ctrl->pixel_clk)) {
ret = PTR_ERR(ctrl->pixel_clk);
- pr_err("%s: Can't find pixel_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
ctrl->pixel_clk = NULL;
return ret;
}
- ctrl->ahb_clk = devm_clk_get(dev, "iface_clk");
+ ctrl->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(ctrl->ahb_clk)) {
ret = PTR_ERR(ctrl->ahb_clk);
- pr_err("%s: Can't find ahb_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
ctrl->ahb_clk = NULL;
return ret;
}
- ctrl->link_clk = devm_clk_get(dev, "link_clk");
+ ctrl->link_clk = msm_clk_get(pdev, "link");
if (IS_ERR(ctrl->link_clk)) {
ret = PTR_ERR(ctrl->link_clk);
- pr_err("%s: Can't find link_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find link clock, %d\n", __func__, ret);
ctrl->link_clk = NULL;
return ret;
}
/* need mdp core clock to receive irq */
- ctrl->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk");
+ ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
if (IS_ERR(ctrl->mdp_core_clk)) {
ret = PTR_ERR(ctrl->mdp_core_clk);
- pr_err("%s: Can't find mdp_core_clk, %d\n", __func__, ret);
+ pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
ctrl->mdp_core_clk = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 17e069a133a4..e63dc0fb55f8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -208,7 +208,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->hpd_clk_cnt; i++) {
struct clk *clk;
- clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
+ clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
@@ -228,7 +228,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
for (i = 0; i < config->pwr_clk_cnt; i++) {
struct clk *clk;
- clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
+ clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
@@ -361,7 +361,7 @@ static const char *hpd_reg_names_none[] = {};
static struct hdmi_platform_config hdmi_tx_8660_config;
static const char *hpd_reg_names_8960[] = {"core-vdda", "hdmi-mux"};
-static const char *hpd_clk_names_8960[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
+static const char *hpd_clk_names_8960[] = {"core", "master_iface", "slave_iface"};
static struct hdmi_platform_config hdmi_tx_8960_config = {
HDMI_CFG(hpd_reg, 8960),
@@ -370,8 +370,8 @@ static struct hdmi_platform_config hdmi_tx_8960_config = {
static const char *pwr_reg_names_8x74[] = {"core-vdda", "core-vcc"};
static const char *hpd_reg_names_8x74[] = {"hpd-gdsc", "hpd-5v"};
-static const char *pwr_clk_names_8x74[] = {"extp_clk", "alt_iface_clk"};
-static const char *hpd_clk_names_8x74[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+static const char *pwr_clk_names_8x74[] = {"extp", "alt_iface"};
+static const char *hpd_clk_names_8x74[] = {"iface", "core", "mdp_core"};
static unsigned long hpd_clk_freq_8x74[] = {0, 19200000, 0};
static struct hdmi_platform_config hdmi_tx_8974_config = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 534ce5b49781..5e631392dc85 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -48,7 +48,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
for (i = 0; i < cfg->num_clks; i++) {
struct clk *clk;
- clk = devm_clk_get(dev, cfg->clk_names[i]);
+ clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev, "failed to get phy clock: %s (%d)\n",
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index e6ee6b745ab7..0980da8ec966 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -48,7 +48,7 @@ static const char * const hdmi_phy_8960_reg_names[] = {
};
static const char * const hdmi_phy_8960_clk_names[] = {
- "slave_iface_clk",
+ "slave_iface",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 1fb7645cc721..0df504c61833 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -758,9 +758,7 @@ static const char * const hdmi_phy_8996_reg_names[] = {
};
static const char * const hdmi_phy_8996_clk_names[] = {
- "mmagic_iface_clk",
- "iface_clk",
- "ref_clk",
+ "iface", "ref",
};
const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
index c4a61e537851..4a8b8468586a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x74.c
@@ -41,8 +41,7 @@ static const char * const hdmi_phy_8x74_reg_names[] = {
};
static const char * const hdmi_phy_8x74_clk_names[] = {
- "iface_clk",
- "alt_iface_clk"
+ "iface", "alt_iface"
};
const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 47fa2aba1983..14bd3bd3e040 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -290,6 +290,9 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp4_crtc->enabled))
return;
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
mdp4_disable(mdp4_kms);
@@ -308,6 +311,10 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
return;
mdp4_enable(mdp4_kms);
+
+ /* Restore vblank irq handling after power is enabled */
+ drm_crtc_vblank_on(crtc);
+
mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
crtc_flush(crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 60790df91bfa..1abc7f5c345c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -224,7 +224,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 440977677001..e414850dbbda 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -55,18 +55,23 @@ struct mdp5_crtc {
struct completion pp_completion;
+ bool lm_cursor_enabled;
+
struct {
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
spinlock_t lock;
/* current cursor being scanned out: */
struct drm_gem_object *scanout_bo;
+ uint64_t iova;
uint32_t width, height;
uint32_t x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc);
+
static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -114,6 +119,8 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
return 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (!plane->state->visible)
+ continue;
flush_mask |= mdp5_plane_get_flush(plane);
}
@@ -242,6 +249,9 @@ static void blend_setup(struct drm_crtc *crtc)
drm_atomic_crtc_for_each_plane(plane, crtc) {
enum mdp5_pipe right_pipe;
+ if (!plane->state->visible)
+ continue;
+
pstate = to_mdp5_plane_state(plane->state);
pstates[pstate->stage] = pstate;
stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
@@ -422,11 +432,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
if (WARN_ON(!mdp5_crtc->enabled))
return;
+ /* Disable/save vblank irq handling before power is disabled */
+ drm_crtc_vblank_off(crtc);
+
if (mdp5_cstate->cmd_mode)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
mdp5_crtc->enabled = false;
}
@@ -446,6 +459,29 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
pm_runtime_get_sync(dev);
+ if (mdp5_crtc->lm_cursor_enabled) {
+ /*
+ * Restore LM cursor state, as it might have been lost
+ * with suspend:
+ */
+ if (mdp5_crtc->cursor.iova) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
+ mdp5_crtc_restore_cursor(crtc);
+ spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, true);
+ } else {
+ mdp5_ctl_set_cursor(mdp5_cstate->ctl,
+ &mdp5_cstate->pipeline, 0, false);
+ }
+ }
+
+ /* Restore vblank irq handling after power is enabled */
+ drm_crtc_vblank_on(crtc);
+
mdp5_crtc_mode_set_nofb(crtc);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
@@ -580,6 +616,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", crtc->name);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ if (!pstate->visible)
+ continue;
+
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -723,6 +762,50 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
mdp5_crtc->cursor.y);
}
+static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
+ uint32_t blendcfg, stride;
+ uint32_t x, y, width, height;
+ uint32_t roi_w, roi_h;
+ int lm;
+
+ assert_spin_locked(&mdp5_crtc->cursor.lock);
+
+ lm = mdp5_cstate->pipeline.mixer->lm;
+
+ x = mdp5_crtc->cursor.x;
+ y = mdp5_crtc->cursor.y;
+ width = mdp5_crtc->cursor.width;
+ height = mdp5_crtc->cursor.height;
+
+ stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
+
+ get_roi(crtc, &roi_w, &roi_h);
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
+ MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
+ MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
+ MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
+ MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
+ MDP5_LM_CURSOR_START_XY_Y_START(y) |
+ MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
+ mdp5_crtc->cursor.iova);
+
+ blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
+ blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
+}
+
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle,
uint32_t width, uint32_t height)
@@ -735,16 +818,18 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, stride;
- uint64_t cursor_addr;
struct mdp5_ctl *ctl;
- int ret, lm;
- enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
+ int ret;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
- uint32_t roi_w, roi_h;
bool cursor_enable = true;
unsigned long flags;
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_set is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
@@ -761,6 +846,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
+ mdp5_crtc->cursor.iova = 0;
pm_runtime_get_sync(&pdev->dev);
goto set_cursor;
}
@@ -769,13 +855,11 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+ &mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
- lm = mdp5_cstate->pipeline.mixer->lm;
- stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
-
pm_runtime_get_sync(&pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
@@ -785,22 +869,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
mdp5_crtc->cursor.width = width;
mdp5_crtc->cursor.height = height;
- get_roi(crtc, &roi_w, &roi_h);
-
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
- MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
- MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
- MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
- MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
- MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
-
- blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
- blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
+ mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
@@ -815,7 +884,7 @@ set_cursor:
crtc_flush(crtc, flush_mask);
end:
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
@@ -829,12 +898,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
- uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
+ struct drm_device *dev = crtc->dev;
uint32_t roi_w;
uint32_t roi_h;
unsigned long flags;
+ if (!mdp5_crtc->lm_cursor_enabled) {
+ dev_warn(dev->dev,
+ "cursor_move is deprecated with cursor planes\n");
+ return -EINVAL;
+ }
+
/* don't support LM cursors when we we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
@@ -851,17 +926,12 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
pm_runtime_get_sync(&mdp5_kms->pdev->dev);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
- MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
- MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
- MDP5_LM_CURSOR_START_XY_Y_START(y) |
- MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_crtc_restore_cursor(crtc);
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
crtc_flush(crtc, flush_mask);
- pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev);
+ pm_runtime_put_sync(&mdp5_kms->pdev->dev);
return 0;
}
@@ -941,16 +1011,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.atomic_print_state = mdp5_crtc_atomic_print_state,
};
-static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
- .set_config = drm_atomic_helper_set_config,
- .destroy = mdp5_crtc_destroy,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = mdp5_crtc_reset,
- .atomic_duplicate_state = mdp5_crtc_duplicate_state,
- .atomic_destroy_state = mdp5_crtc_destroy_state,
- .atomic_print_state = mdp5_crtc_atomic_print_state,
-};
-
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.atomic_check = mdp5_crtc_atomic_check,
@@ -1119,12 +1179,10 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
- if (cursor_plane)
- drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
- &mdp5_crtc_no_lm_cursor_funcs, NULL);
- else
- drm_crtc_init_with_planes(dev, crtc, plane, NULL,
- &mdp5_crtc_funcs, NULL);
+ mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true;
+
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
+ &mdp5_crtc_funcs, NULL);
drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
"unref cursor", unref_cursor_worker);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 5b851380d3f2..36ad3cbe5f79 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -384,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index bb5deb00c899..280e368bc9bb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -54,7 +54,7 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
@@ -72,7 +72,7 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -84,7 +84,7 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
@@ -119,7 +119,7 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -132,5 +132,5 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f7c0698fec40..3e9bba4d6624 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -125,7 +125,7 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
}
static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
@@ -496,12 +496,12 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
- pm_runtime_put_autosuspend(dev);
+ pm_runtime_put_sync(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
- DBG("MDP5 version v%d.%d", *major, *minor);
+ dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -599,7 +599,7 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct drm_crtc *crtc;
struct drm_encoder *encoder;
- if (pipe < 0 || pipe >= priv->num_crtcs)
+ if (pipe >= priv->num_crtcs)
return 0;
crtc = priv->crtcs[pipe];
@@ -683,7 +683,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;;
}
- pm_runtime_put_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
ret = modeset_init(mdp5_kms);
if (ret) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
index 2bfac3712685..ff52c49095f9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -17,19 +17,20 @@
#include "mdp5_kms.h"
-struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
- struct drm_plane *plane, uint32_t caps, uint32_t blkcfg)
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_state *state;
struct mdp5_hw_pipe_state *old_state, *new_state;
- struct mdp5_hw_pipe *hwpipe = NULL;
- int i;
+ int i, j;
state = mdp5_get_state(s);
if (IS_ERR(state))
- return ERR_CAST(state);
+ return PTR_ERR(state);
/* grab old_state after mdp5_get_state(), since now we hold lock: */
old_state = &mdp5_kms->state->hwpipe;
@@ -64,31 +65,67 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s,
/* possible candidate, take the one with the
* fewest unneeded caps bits set:
*/
- if (!hwpipe || (hweight_long(cur->caps & ~caps) <
- hweight_long(hwpipe->caps & ~caps)))
- hwpipe = cur;
+ if (!(*hwpipe) || (hweight_long(cur->caps & ~caps) <
+ hweight_long((*hwpipe)->caps & ~caps))) {
+ bool r_found = false;
+
+ if (r_hwpipe) {
+ for (j = i + 1; j < mdp5_kms->num_hwpipes;
+ j++) {
+ struct mdp5_hw_pipe *r_cur =
+ mdp5_kms->hwpipes[j];
+
+ /* reject different types of hwpipes */
+ if (r_cur->caps != cur->caps)
+ continue;
+
+ /* respect priority, eg. VIG0 > VIG1 */
+ if (cur->pipe > r_cur->pipe)
+ continue;
+
+ *r_hwpipe = r_cur;
+ r_found = true;
+ break;
+ }
+ }
+
+ if (!r_hwpipe || r_found)
+ *hwpipe = cur;
+ }
}
- if (!hwpipe)
- return ERR_PTR(-ENOMEM);
+ if (!(*hwpipe))
+ return -ENOMEM;
+
+ if (r_hwpipe && !(*r_hwpipe))
+ return -ENOMEM;
if (mdp5_kms->smp) {
int ret;
- DBG("%s: alloc SMP blocks", hwpipe->name);
+ /* We don't support SMP and 2 hwpipes/plane together */
+ WARN_ON(r_hwpipe);
+
+ DBG("%s: alloc SMP blocks", (*hwpipe)->name);
ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
- hwpipe->pipe, blkcfg);
+ (*hwpipe)->pipe, blkcfg);
if (ret)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- hwpipe->blkcfg = blkcfg;
+ (*hwpipe)->blkcfg = blkcfg;
}
DBG("%s: assign to plane %s for caps %x",
- hwpipe->name, plane->name, caps);
- new_state->hwpipe_to_plane[hwpipe->idx] = plane;
+ (*hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*hwpipe)->idx] = plane;
- return hwpipe;
+ if (r_hwpipe) {
+ DBG("%s: assign to right of plane %s for caps %x",
+ (*r_hwpipe)->name, plane->name, caps);
+ new_state->hwpipe_to_plane[(*r_hwpipe)->idx] = plane;
+ }
+
+ return 0;
}
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 924c3e6f9517..bb2b0ac7aa2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -44,9 +44,10 @@ struct mdp5_hw_pipe_state {
struct drm_plane *hwpipe_to_plane[SSPP_MAX];
};
-struct mdp5_hw_pipe *__must_check
-mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
- uint32_t caps, uint32_t blkcfg);
+int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
+ uint32_t caps, uint32_t blkcfg,
+ struct mdp5_hw_pipe **hwpipe,
+ struct mdp5_hw_pipe **r_hwpipe);
void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 4b22ac3413a1..be50445f9901 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,15 +31,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest);
-static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
@@ -254,18 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.atomic_print_state = mdp5_plane_atomic_print_state,
};
-static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
- .update_plane = mdp5_update_cursor_plane_legacy,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = mdp5_plane_destroy,
- .atomic_set_property = mdp5_plane_atomic_set_property,
- .atomic_get_property = mdp5_plane_atomic_get_property,
- .reset = mdp5_plane_reset,
- .atomic_duplicate_state = mdp5_plane_duplicate_state,
- .atomic_destroy_state = mdp5_plane_destroy_state,
- .atomic_print_state = mdp5_plane_atomic_print_state,
-};
-
static int mdp5_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -414,31 +393,30 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
struct mdp5_hw_pipe *old_right_hwpipe =
mdp5_state->r_hwpipe;
-
- mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
- plane, caps, blkcfg);
- if (IS_ERR(mdp5_state->hwpipe)) {
- DBG("%s: failed to assign hwpipe!", plane->name);
- return PTR_ERR(mdp5_state->hwpipe);
+ struct mdp5_hw_pipe *new_hwpipe = NULL;
+ struct mdp5_hw_pipe *new_right_hwpipe = NULL;
+
+ ret = mdp5_pipe_assign(state->state, plane, caps,
+ blkcfg, &new_hwpipe,
+ need_right_hwpipe ?
+ &new_right_hwpipe : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hwpipe(s)!",
+ plane->name);
+ return ret;
}
- if (need_right_hwpipe) {
- mdp5_state->r_hwpipe =
- mdp5_pipe_assign(state->state, plane,
- caps, blkcfg);
- if (IS_ERR(mdp5_state->r_hwpipe)) {
- DBG("%s: failed to assign right hwpipe",
- plane->name);
- return PTR_ERR(mdp5_state->r_hwpipe);
- }
- } else {
+ mdp5_state->hwpipe = new_hwpipe;
+ if (need_right_hwpipe)
+ mdp5_state->r_hwpipe = new_right_hwpipe;
+ else
/*
* set it to NULL so that the driver knows we
* don't have a right hwpipe when committing a
* new state
*/
mdp5_state->r_hwpipe = NULL;
- }
+
mdp5_pipe_release(state->state, old_hwpipe);
mdp5_pipe_release(state->state, old_right_hwpipe);
@@ -487,11 +465,98 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
}
}
+static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+ struct drm_crtc_state *crtc_state;
+ struct drm_rect clip;
+ int min_scale, max_scale;
+ int ret;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ mdp5_state = to_mdp5_plane_state(state);
+
+ /* don't use fast path if we don't have a hwpipe allocated yet */
+ if (!mdp5_state->hwpipe)
+ return -EINVAL;
+
+ /* only allow changing of position(crtc x/y or src x/y) in fast path */
+ if (plane->state->crtc != state->crtc ||
+ plane->state->src_w != state->src_w ||
+ plane->state->src_h != state->src_h ||
+ plane->state->crtc_w != state->crtc_w ||
+ plane->state->crtc_h != state->crtc_h ||
+ !plane->state->fb ||
+ plane->state->fb != state->fb)
+ return -EINVAL;
+
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_plane_helper_check_state(state, &clip, min_scale,
+ max_scale, true, true);
+ if (ret)
+ return ret;
+
+ /*
+ * if the visibility of the plane changes (i.e, if the cursor is
+ * clipped out completely, we can't take the async path because
+ * we need to stage/unstage the plane from the Layer Mixer(s). We
+ * also assign/unassign the hwpipe(s) tied to the plane. We avoid
+ * taking the fast path for both these reasons.
+ */
+ if (state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (plane_enabled(new_state)) {
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline *pipeline =
+ mdp5_crtc_get_pipeline(plane->crtc);
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ WARN_ON(ret < 0);
+
+ ctl = mdp5_crtc_get_ctl(new_state->crtc);
+
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
+ }
+
+ *to_mdp5_plane_state(plane->state) =
+ *to_mdp5_plane_state(new_state);
+}
+
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.prepare_fb = mdp5_plane_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update,
+ .atomic_async_check = mdp5_plane_atomic_async_check,
+ .atomic_async_update = mdp5_plane_atomic_async_update,
};
static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
@@ -996,84 +1061,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
return ret;
}
-static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_plane_state *plane_state, *new_plane_state;
- struct mdp5_plane_state *mdp5_pstate;
- struct drm_crtc_state *crtc_state = crtc->state;
- int ret;
-
- if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state))
- goto slow;
-
- plane_state = plane->state;
- mdp5_pstate = to_mdp5_plane_state(plane_state);
-
- /* don't use fast path if we don't have a hwpipe allocated yet */
- if (!mdp5_pstate->hwpipe)
- goto slow;
-
- /* only allow changing of position(crtc x/y or src x/y) in fast path */
- if (plane_state->crtc != crtc ||
- plane_state->src_w != src_w ||
- plane_state->src_h != src_h ||
- plane_state->crtc_w != crtc_w ||
- plane_state->crtc_h != crtc_h ||
- !plane_state->fb ||
- plane_state->fb != fb)
- goto slow;
-
- new_plane_state = mdp5_plane_duplicate_state(plane);
- if (!new_plane_state)
- return -ENOMEM;
-
- new_plane_state->src_x = src_x;
- new_plane_state->src_y = src_y;
- new_plane_state->src_w = src_w;
- new_plane_state->src_h = src_h;
- new_plane_state->crtc_x = crtc_x;
- new_plane_state->crtc_y = crtc_y;
- new_plane_state->crtc_w = crtc_w;
- new_plane_state->crtc_h = crtc_h;
-
- ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
- if (ret)
- goto slow_free;
-
- if (new_plane_state->visible) {
- struct mdp5_ctl *ctl;
- struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
-
- ret = mdp5_plane_mode_set(plane, crtc, fb,
- &new_plane_state->src,
- &new_plane_state->dst);
- WARN_ON(ret < 0);
-
- ctl = mdp5_crtc_get_ctl(crtc);
-
- mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
- }
-
- *to_mdp5_plane_state(plane_state) =
- *to_mdp5_plane_state(new_plane_state);
-
- mdp5_plane_destroy_state(plane, new_plane_state);
-
- return 0;
-slow_free:
- mdp5_plane_destroy_state(plane, new_plane_state);
-slow:
- return drm_atomic_helper_update_plane(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h, ctx);
-}
-
/*
* Use this func and the one below only after the atomic state has been
* successfully swapped
@@ -1133,16 +1120,9 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false);
- if (type == DRM_PLANE_TYPE_CURSOR)
- ret = drm_universal_plane_init(dev, plane, 0xff,
- &mdp5_cursor_plane_funcs,
- mdp5_plane->formats, mdp5_plane->nformats,
- NULL, type, NULL);
- else
- ret = drm_universal_plane_init(dev, plane, 0xff,
- &mdp5_plane_funcs,
- mdp5_plane->formats, mdp5_plane->nformats,
- NULL, type, NULL);
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ NULL, type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 025d454163b0..bf5f8c39f34d 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -146,35 +146,6 @@ static void commit_worker(struct work_struct *work)
complete_commit(container_of(work, struct msm_commit, work), true);
}
-/*
- * this func is identical to the drm_atomic_helper_check, but we keep this
- * because we might eventually need to have a more finegrained check
- * sequence without using the atomic helpers.
- *
- * In the past, we first called drm_atomic_helper_check_planes, and then
- * drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's
- * ->atomic_check could update ->mode_changed for pixel format changes.
- * This, however isn't needed now because if there is a pixel format change,
- * we just assign a new hwpipe for it with a new SMP allocation. We might
- * eventually hit a condition where we would need to do a full modeset if
- * we run out of planes. There, we'd probably need to set mode_changed.
- */
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
-{
- int ret;
-
- ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
- return ret;
-
- return ret;
-}
-
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
@@ -202,6 +173,18 @@ int msm_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
+ /*
+ * Note that plane->atomic_async_check() should fail if we need
+ * to re-assign hwpipe or anything that touches global atomic
+ * state, so we'll never go down the async update path in those
+ * cases.
+ */
+ if (state->async_update) {
+ drm_atomic_helper_async_commit(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return 0;
+ }
+
c = commit_init(state);
if (!c) {
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 606df7bea97b..0a3ea3034e39 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,9 +29,12 @@
* - 1.0.0 - initial interface
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
* - 1.2.0 - adds explicit fence support for submit ioctl
+ * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
+ * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
+ * MSM_GEM_INFO ioctl.
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 2
+#define MSM_VERSION_MINOR 3
#define MSM_VERSION_PATCHLEVEL 0
static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -44,7 +47,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
- .atomic_check = msm_atomic_check,
+ .atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
.atomic_state_alloc = msm_atomic_state_alloc,
.atomic_state_clear = msm_atomic_state_clear,
@@ -211,7 +214,6 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
- struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
@@ -253,15 +255,6 @@ static int msm_drm_uninit(struct device *dev)
if (kms && kms->funcs)
kms->funcs->destroy(kms);
- if (gpu) {
- mutex_lock(&ddev->struct_mutex);
- // XXX what do we do here?
- //pm_runtime_enable(&pdev->dev);
- gpu->funcs->pm_suspend(gpu);
- mutex_unlock(&ddev->struct_mutex);
- gpu->funcs->destroy(gpu);
- }
-
if (priv->vram.paddr) {
unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
drm_mm_takedown(&priv->vram.mm);
@@ -514,24 +507,37 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static int context_init(struct drm_device *dev, struct drm_file *file)
{
struct msm_file_private *ctx;
- /* For now, load gpu on open.. to avoid the requirement of having
- * firmware in the initrd.
- */
- load_gpu(dev);
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ msm_submitqueue_init(dev, ctx);
+
file->driver_priv = ctx;
return 0;
}
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ /* For now, load gpu on open.. to avoid the requirement of having
+ * firmware in the initrd.
+ */
+ load_gpu(dev);
+
+ return context_init(dev, file);
+}
+
+static void context_close(struct msm_file_private *ctx)
+{
+ msm_submitqueue_close(ctx);
+ kfree(ctx);
+}
+
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -542,7 +548,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
priv->lastctx = NULL;
mutex_unlock(&dev->struct_mutex);
- kfree(ctx);
+ context_close(ctx);
}
static void msm_lastclose(struct drm_device *dev)
@@ -737,16 +743,27 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout);
+ struct msm_gpu_submitqueue *queue;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
- if (!priv->gpu)
+ if (!gpu)
return 0;
- return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
+ queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
+ true);
+
+ msm_submitqueue_put(queue);
+ return ret;
}
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
@@ -787,6 +804,28 @@ unlock:
return ret;
}
+
+static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_submitqueue *args = data;
+
+ if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
+ return -EINVAL;
+
+ return msm_submitqueue_create(dev, file->driver_priv, args->prio,
+ args->flags, &args->id);
+}
+
+
+static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ u32 id = *(u32 *) data;
+
+ return msm_submitqueue_remove(file->driver_priv, id);
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -796,6 +835,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 5e8109c07560..c646843d8822 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -56,11 +56,9 @@ struct msm_gem_address_space;
struct msm_gem_vma;
struct msm_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
- int dummy;
+ rwlock_t queuelock;
+ struct list_head submitqueues;
+ int queueid;
};
enum msm_mdp_plane_property {
@@ -76,6 +74,8 @@ struct msm_vblank_ctrl {
spinlock_t lock;
};
+#define MSM_GPU_MAX_RINGS 4
+
struct msm_drm_private {
struct drm_device *dev;
@@ -108,7 +108,8 @@ struct msm_drm_private {
struct drm_fb_helper *fbdev;
- struct msm_rd_state *rd;
+ struct msm_rd_state *rd; /* debugfs to dump all submits */
+ struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/* list of GEM objects: */
@@ -154,20 +155,12 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
-
- /* task holding struct_mutex.. currently only used in submit path
- * to detect and reject faults from copy_from_user() for submit
- * ioctl.
- */
- struct task_struct *struct_mutex_task;
};
struct msm_format {
uint32_t pixel_format;
};
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
@@ -219,6 +212,7 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_get_vaddr(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_sync_object(struct drm_gem_object *obj,
@@ -303,7 +297,8 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
int msm_debugfs_late_init(struct drm_device *dev);
int msm_rd_debugfs_init(struct drm_minor *minor);
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
-void msm_rd_dump_submit(struct msm_gem_submit *submit);
+void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...);
int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
@@ -319,6 +314,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
+struct msm_gpu_submitqueue;
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id);
+int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
+
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a2f89bac9c16..349c12f670eb 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -31,7 +31,7 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name)
return ERR_PTR(-ENOMEM);
fctx->dev = dev;
- fctx->name = name;
+ strncpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
init_waitqueue_head(&fctx->event);
spin_lock_init(&fctx->spinlock);
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 56061aa1959d..1aa6a4c6530c 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -22,7 +22,7 @@
struct msm_fence_context {
struct drm_device *dev;
- const char *name;
+ char name[32];
unsigned context;
/* last_fence == completed_fence --> no pending work */
uint32_t last_fence; /* last assigned fence */
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ea5bb0e1632c..81fe6d6740ce 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -470,14 +470,16 @@ fail:
return ret;
}
-void *msm_gem_get_vaddr(struct drm_gem_object *obj)
+static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
mutex_lock(&msm_obj->lock);
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+ if (WARN_ON(msm_obj->madv > madv)) {
+ dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
}
@@ -513,6 +515,22 @@ fail:
return ERR_PTR(ret);
}
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
+{
+ return get_vaddr(obj, MSM_MADV_WILLNEED);
+}
+
+/*
+ * Don't use this! It is for the very special case of dumping
+ * submits from GPU hangs or faults, were the bo may already
+ * be MSM_MADV_DONTNEED, but we know the buffer is still on the
+ * active list.
+ */
+void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
+{
+ return get_vaddr(obj, __MSM_MADV_PURGED);
+}
+
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 91c210d2359c..9320e184b48d 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -138,12 +138,15 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
struct msm_gem_submit {
struct drm_device *dev;
struct msm_gpu *gpu;
- struct list_head node; /* node in gpu submit_list */
+ struct list_head node; /* node in ring submit list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
+ uint32_t seqno; /* Sequence number of the submit on the ring */
struct dma_fence *fence;
+ struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
+ struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 93535cac0676..b8dc8f96caf2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -31,7 +31,8 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
+ struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
+ uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
@@ -49,6 +50,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
+ submit->queue = queue;
+ submit->ring = gpu->rb[queue->prio];
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
@@ -66,6 +69,8 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
+ msm_submitqueue_put(submit->queue);
+
kfree(submit);
}
@@ -156,7 +161,8 @@ out:
return ret;
}
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
+ int i, bool backoff)
{
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -166,7 +172,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
- if (!(submit->bos[i].flags & BO_VALID))
+ if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
@@ -201,10 +207,10 @@ retry:
fail:
for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(submit, i, true);
if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked);
+ submit_unlock_unpin_bo(submit, slow_locked, true);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
@@ -243,7 +249,8 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
if (no_implicit)
continue;
- ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
+ ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
+ write);
if (ret)
break;
}
@@ -387,7 +394,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -405,6 +412,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_gpu *gpu = priv->gpu;
struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
+ struct msm_gpu_submitqueue *queue;
+ struct msm_ringbuffer *ring;
int out_fence_fd = -1;
unsigned i;
int ret;
@@ -421,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
+ queue = msm_submitqueue_get(ctx, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ring = gpu->rb[queue->prio];
+
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
@@ -431,7 +446,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
- if (!dma_fence_match_context(in_fence, gpu->fctx->context)) {
+ if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
ret = dma_fence_wait(in_fence, true);
if (ret)
return ret;
@@ -449,9 +464,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
}
- priv->struct_mutex_task = current;
- submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out_unlock;
@@ -534,7 +548,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- submit->fence = msm_fence_alloc(gpu->fctx);
+ submit->fence = msm_fence_alloc(ring->fctx);
if (IS_ERR(submit->fence)) {
ret = PTR_ERR(submit->fence);
submit->fence = NULL;
@@ -567,7 +581,6 @@ out:
out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
- priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6a887032c66a..8d4477818ec2 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -20,6 +20,8 @@
#include "msm_mmu.h"
#include "msm_fence.h"
+#include <linux/string_helpers.h>
+
/*
* Power Management:
@@ -221,33 +223,102 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
* Hangcheck detection for locked gpu:
*/
+static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ uint32_t fence)
+{
+ struct msm_gem_submit *submit;
+
+ list_for_each_entry(submit, &ring->submits, node) {
+ if (submit->seqno > fence)
+ break;
+
+ msm_update_fence(submit->ring->fctx,
+ submit->fence->seqno);
+ }
+}
+
+static struct msm_gem_submit *
+find_submit(struct msm_ringbuffer *ring, uint32_t fence)
+{
+ struct msm_gem_submit *submit;
+
+ WARN_ON(!mutex_is_locked(&ring->gpu->dev->struct_mutex));
+
+ list_for_each_entry(submit, &ring->submits, node)
+ if (submit->seqno == fence)
+ return submit;
+
+ return NULL;
+}
+
static void retire_submits(struct msm_gpu *gpu);
static void recover_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
-
- msm_update_fence(gpu->fctx, fence + 1);
+ struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ int i;
mutex_lock(&dev->struct_mutex);
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
- list_for_each_entry(submit, &gpu->submit_list, node) {
- if (submit->fence->seqno == (fence + 1)) {
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- dev_err(dev->dev, "%s: offending task: %s\n",
- gpu->name, task->comm);
- }
- rcu_read_unlock();
- break;
+
+ submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
+ if (submit) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ char *cmd;
+
+ /*
+ * So slightly annoying, in other paths like
+ * mmap'ing gem buffers, mmap_sem is acquired
+ * before struct_mutex, which means we can't
+ * hold struct_mutex across the call to
+ * get_cmdline(). But submits are retired
+ * from the same in-order workqueue, so we can
+ * safely drop the lock here without worrying
+ * about the submit going away.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ mutex_lock(&dev->struct_mutex);
+
+ dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+ gpu->name, task->comm, cmd);
+
+ msm_rd_dump_submit(priv->hangrd, submit,
+ "offending task: %s (%s)", task->comm, cmd);
+ } else {
+ msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
+ rcu_read_unlock();
+ }
+
+
+ /*
+ * Update all the rings with the latest and greatest fence.. this
+ * needs to happen after msm_rd_dump_submit() to ensure that the
+ * bo's referenced by the offending submit are still around.
+ */
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ uint32_t fence = ring->memptrs->fence;
+
+ /*
+ * For the current (faulting?) ring/submit advance the fence by
+ * one more to clear the faulting submit
+ */
+ if (ring == cur_ring)
+ fence++;
+
+ update_fences(gpu, ring, fence);
}
if (msm_gpu_active(gpu)) {
@@ -258,9 +329,15 @@ static void recover_worker(struct work_struct *work)
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- /* replay the remaining submits after the one that hung: */
- list_for_each_entry(submit, &gpu->submit_list, node) {
- gpu->funcs->submit(gpu, submit, NULL);
+ /*
+ * Replay all remaining submits starting with highest priority
+ * ring
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ list_for_each_entry(submit, &ring->submits, node)
+ gpu->funcs->submit(gpu, submit, NULL);
}
}
@@ -281,25 +358,27 @@ static void hangcheck_handler(unsigned long data)
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ uint32_t fence = ring->memptrs->fence;
- if (fence != gpu->hangcheck_fence) {
+ if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
- gpu->hangcheck_fence = fence;
- } else if (fence < gpu->fctx->last_fence) {
+ ring->hangcheck_fence = fence;
+ } else if (fence < ring->seqno) {
/* no progress and not done.. hung! */
- gpu->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
- gpu->name);
+ ring->hangcheck_fence = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->fctx->last_fence);
+ gpu->name, ring->seqno);
+
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->fctx->last_fence > gpu->hangcheck_fence)
+ if (ring->seqno > ring->hangcheck_fence)
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -428,19 +507,18 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
static void retire_submits(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit, *tmp;
+ int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- while (!list_empty(&gpu->submit_list)) {
- struct msm_gem_submit *submit;
-
- submit = list_first_entry(&gpu->submit_list,
- struct msm_gem_submit, node);
+ /* Retire the commits starting with highest priority */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
- if (dma_fence_is_signaled(submit->fence)) {
- retire_submit(gpu, submit);
- } else {
- break;
+ list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
+ if (dma_fence_is_signaled(submit->fence))
+ retire_submit(gpu, submit);
}
}
}
@@ -449,9 +527,10 @@ static void retire_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
struct drm_device *dev = gpu->dev;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ int i;
- msm_update_fence(gpu->fctx, fence);
+ for (i = 0; i < gpu->nr_rings; i++)
+ update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
@@ -472,6 +551,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -480,9 +560,11 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gpu_hw_init(gpu);
- list_add_tail(&submit->node, &gpu->submit_list);
+ submit->seqno = ++ring->seqno;
+
+ list_add_tail(&submit->node, &ring->submits);
- msm_rd_dump_submit(submit);
+ msm_rd_dump_submit(priv->rd, submit, NULL);
update_sw_cntrs(gpu);
@@ -605,7 +687,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
- int ret;
+ int i, ret, nr_rings = config->nr_rings;
+ void *memptrs;
+ uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -613,18 +697,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- gpu->fctx = msm_fence_context_alloc(drm, name);
- if (IS_ERR(gpu->fctx)) {
- ret = PTR_ERR(gpu->fctx);
- gpu->fctx = NULL;
- goto fail;
- }
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
- INIT_LIST_HEAD(&gpu->submit_list);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
@@ -689,34 +766,76 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- /* Create ringbuffer: */
- gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
- if (IS_ERR(gpu->rb)) {
- ret = PTR_ERR(gpu->rb);
- gpu->rb = NULL;
- dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
+ memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+ MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
+ &memptrs_iova);
+
+ if (IS_ERR(memptrs)) {
+ ret = PTR_ERR(memptrs);
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
+ if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+ DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
+ ARRAY_SIZE(gpu->rb));
+ nr_rings = ARRAY_SIZE(gpu->rb);
+ }
+
+ /* Create ringbuffer(s): */
+ for (i = 0; i < nr_rings; i++) {
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
+
+ if (IS_ERR(gpu->rb[i])) {
+ ret = PTR_ERR(gpu->rb[i]);
+ dev_err(drm->dev,
+ "could not create ringbuffer %d: %d\n", i, ret);
+ goto fail;
+ }
+
+ memptrs += sizeof(struct msm_rbmemptrs);
+ memptrs_iova += sizeof(struct msm_rbmemptrs);
+ }
+
+ gpu->nr_rings = nr_rings;
+
return 0;
fail:
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ }
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ }
+
platform_set_drvdata(pdev, NULL);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
+ int i;
+
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
bs_fini(gpu);
- if (gpu->rb) {
- if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
- msm_ringbuffer_destroy(gpu->rb);
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ }
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
if (!IS_ERR_OR_NULL(gpu->aspace)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index df4e2771fb85..e113d64574d3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -33,7 +33,7 @@ struct msm_gpu_config {
const char *irqname;
uint64_t va_start;
uint64_t va_end;
- unsigned int ringsz;
+ unsigned int nr_rings;
};
/* So far, with hardware that I've seen to date, we can have:
@@ -57,9 +57,9 @@ struct msm_gpu_funcs {
int (*pm_resume)(struct msm_gpu *gpu);
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
- void (*flush)(struct msm_gpu *gpu);
+ void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
- uint32_t (*last_fence)(struct msm_gpu *gpu);
+ struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
@@ -86,16 +86,12 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
- /* ringbuffer: */
- struct msm_ringbuffer *rb;
- uint64_t rb_iova;
+ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+ int nr_rings;
/* list of GEM active objects: */
struct list_head active_list;
- /* fencing: */
- struct msm_fence_context *fctx;
-
/* does gpu need hw_init? */
bool needs_hw_init;
@@ -126,15 +122,31 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
- uint32_t hangcheck_fence;
struct work_struct recover_work;
- struct list_head submit_list;
+ struct drm_gem_object *memptrs_bo;
};
+/* It turns out that all targets use the same ringbuffer size */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+ (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ if (ring->seqno > ring->memptrs->fence)
+ return true;
+ }
+
+ return false;
}
/* Perf-Counters:
@@ -150,6 +162,15 @@ struct msm_gpu_perfcntr {
const char *name;
};
+struct msm_gpu_submitqueue {
+ int id;
+ u32 flags;
+ u32 prio;
+ int faults;
+ struct list_head node;
+ struct kref ref;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -223,4 +244,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
void __init adreno_register(void);
void __exit adreno_unregister(void);
+static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
+{
+ if (queue)
+ kref_put(&queue->ref, msm_submitqueue_destroy);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index ec56794ad039..3aa8a8576abe 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -19,11 +19,17 @@
*
* tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
*
- * To log the cmdstream in a format that is understood by freedreno/cffdump
+ * to log the cmdstream in a format that is understood by freedreno/cffdump
* utility. By comparing the last successfully completed fence #, to the
* cmdstream for the next fence, you can narrow down which process and submit
* caused the gpu crash/lockup.
*
+ * Additionally:
+ *
+ * tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
+ *
+ * will capture just the cmdstream from submits which triggered a GPU hang.
+ *
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
@@ -220,53 +226,89 @@ static const struct file_operations rd_debugfs_fops = {
.release = rd_release,
};
-int msm_rd_debugfs_init(struct drm_minor *minor)
+
+static void rd_cleanup(struct msm_rd_state *rd)
+{
+ if (!rd)
+ return;
+
+ mutex_destroy(&rd->read_lock);
+ kfree(rd);
+}
+
+static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{
- struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd;
struct dentry *ent;
-
- /* only create on first minor: */
- if (priv->rd)
- return 0;
+ int ret = 0;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
rd->dev = minor->dev;
rd->fifo.buf = rd->buf;
mutex_init(&rd->read_lock);
- priv->rd = rd;
init_waitqueue_head(&rd->fifo_event);
- ent = debugfs_create_file("rd", S_IFREG | S_IRUGO,
+ ent = debugfs_create_file(name, S_IFREG | S_IRUGO,
minor->debugfs_root, rd, &rd_debugfs_fops);
if (!ent) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/rd\n",
- minor->debugfs_root);
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri/%pd/%s\n",
+ minor->debugfs_root, name);
+ ret = -ENOMEM;
goto fail;
}
+ return rd;
+
+fail:
+ rd_cleanup(rd);
+ return ERR_PTR(ret);
+}
+
+int msm_rd_debugfs_init(struct drm_minor *minor)
+{
+ struct msm_drm_private *priv = minor->dev->dev_private;
+ struct msm_rd_state *rd;
+ int ret;
+
+ /* only create on first minor: */
+ if (priv->rd)
+ return 0;
+
+ rd = rd_init(minor, "rd");
+ if (IS_ERR(rd)) {
+ ret = PTR_ERR(rd);
+ goto fail;
+ }
+
+ priv->rd = rd;
+
+ rd = rd_init(minor, "hangrd");
+ if (IS_ERR(rd)) {
+ ret = PTR_ERR(rd);
+ goto fail;
+ }
+
+ priv->hangrd = rd;
+
return 0;
fail:
msm_rd_debugfs_cleanup(priv);
- return -1;
+ return ret;
}
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
{
- struct msm_rd_state *rd = priv->rd;
-
- if (!rd)
- return;
-
+ rd_cleanup(priv->rd);
priv->rd = NULL;
- mutex_destroy(&rd->read_lock);
- kfree(rd);
+
+ rd_cleanup(priv->hangrd);
+ priv->hangrd = NULL;
}
static void snapshot_buf(struct msm_rd_state *rd,
@@ -276,10 +318,6 @@ static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf;
- buf = msm_gem_get_vaddr(&obj->base);
- if (IS_ERR(buf))
- return;
-
if (iova) {
buf += iova - submit->bos[idx].iova;
} else {
@@ -287,20 +325,33 @@ static void snapshot_buf(struct msm_rd_state *rd,
size = obj->base.size;
}
+ /*
+ * Always write the GPUADDR header so can get a complete list of all the
+ * buffers in the cmd
+ */
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
+
+ /* But only dump the contents of buffers marked READ */
+ if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
+ return;
+
+ buf = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(buf))
+ return;
+
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base);
}
/* called under struct_mutex */
-void msm_rd_dump_submit(struct msm_gem_submit *submit)
+void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...)
{
struct drm_device *dev = submit->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_rd_state *rd = priv->rd;
- char msg[128];
+ struct task_struct *task;
+ char msg[256];
int i, n;
if (!rd->open)
@@ -311,23 +362,32 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
*/
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
- TASK_COMM_LEN, current->comm, task_pid_nr(current),
- submit->fence->seqno);
+ if (fmt) {
+ va_list args;
- rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+ va_start(args, fmt);
+ n = vsnprintf(msg, sizeof(msg), fmt, args);
+ va_end(args);
- if (rd_full) {
- for (i = 0; i < submit->nr_bos; i++) {
- /* buffers that are written to probably don't start out
- * with anything interesting:
- */
- if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- continue;
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+ }
- snapshot_buf(rd, submit, i, 0, 0);
- }
+ rcu_read_lock();
+ task = pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ TASK_COMM_LEN, task->comm,
+ pid_nr(submit->pid), submit->seqno);
+ } else {
+ n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
+ pid_nr(submit->pid), submit->seqno);
}
+ rcu_read_unlock();
+
+ rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
+
+ for (i = 0; rd_full && i < submit->nr_bos; i++)
+ snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index bf065a540130..6ca98da35f63 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,13 +18,15 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ void *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
+ char name[32];
int ret;
- if (WARN_ON(!is_power_of_2(size)))
- return ERR_PTR(-EINVAL);
+ /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
@@ -33,32 +35,46 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
-
+ ring->id = id;
/* Pass NULL for the iova pointer - we will map it later */
- ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
- gpu->aspace, &ring->bo, NULL);
+ ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
+ MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
- ring->end = ring->start + (size / 4);
+ ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+ ring->next = ring->start;
ring->cur = ring->start;
- ring->size = size;
+ ring->memptrs = memptrs;
+ ring->memptrs_iova = memptrs_iova;
+
+ INIT_LIST_HEAD(&ring->submits);
+ spin_lock_init(&ring->lock);
+
+ snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
+
+ ring->fctx = msm_fence_context_alloc(gpu->dev, name);
return ring;
fail:
- if (ring)
- msm_ringbuffer_destroy(ring);
+ msm_ringbuffer_destroy(ring);
return ERR_PTR(ret);
}
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
+ if (IS_ERR_OR_NULL(ring))
+ return;
+
+ msm_fence_context_free(ring->fctx);
+
if (ring->bo) {
+ msm_gem_put_iova(ring->bo, ring->gpu->aspace);
msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 6e0e1049fa4f..cffce094aecb 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -20,14 +20,31 @@
#include "msm_drv.h"
+#define rbmemptr(ring, member) \
+ ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+
+struct msm_rbmemptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t fence;
+};
+
struct msm_ringbuffer {
struct msm_gpu *gpu;
- int size;
+ int id;
struct drm_gem_object *bo;
- uint32_t *start, *end, *cur;
+ uint32_t *start, *end, *cur, *next;
+ struct list_head submits;
+ uint64_t iova;
+ uint32_t seqno;
+ uint32_t hangcheck_fence;
+ struct msm_rbmemptrs *memptrs;
+ uint64_t memptrs_iova;
+ struct msm_fence_context *fctx;
+ spinlock_t lock;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ void *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +52,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
- if (ring->cur == ring->end)
- ring->cur = ring->start;
- *(ring->cur++) = data;
+ /*
+ * ring->next points to the current command being written - it won't be
+ * committed as ring->cur until the flush
+ */
+ if (ring->next == ring->end)
+ ring->next = ring->start;
+ *(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
new file mode 100644
index 000000000000..5115f75b5b7f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -0,0 +1,152 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kref.h>
+#include "msm_gpu.h"
+
+void msm_submitqueue_destroy(struct kref *kref)
+{
+ struct msm_gpu_submitqueue *queue = container_of(kref,
+ struct msm_gpu_submitqueue, ref);
+
+ kfree(queue);
+}
+
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ if (!ctx)
+ return NULL;
+
+ read_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ kref_get(&entry->ref);
+ read_unlock(&ctx->queuelock);
+
+ return entry;
+ }
+ }
+
+ read_unlock(&ctx->queuelock);
+ return NULL;
+}
+
+void msm_submitqueue_close(struct msm_file_private *ctx)
+{
+ struct msm_gpu_submitqueue *entry, *tmp;
+
+ if (!ctx)
+ return;
+
+ /*
+ * No lock needed in close and there won't
+ * be any more user ioctls coming our way
+ */
+ list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
+ msm_submitqueue_put(entry);
+}
+
+int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+ u32 prio, u32 flags, u32 *id)
+{
+ struct msm_drm_private *priv = drm->dev_private;
+ struct msm_gpu_submitqueue *queue;
+
+ if (!ctx)
+ return -ENODEV;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+
+ if (!queue)
+ return -ENOMEM;
+
+ kref_init(&queue->ref);
+ queue->flags = flags;
+
+ if (priv->gpu) {
+ if (prio >= priv->gpu->nr_rings)
+ return -EINVAL;
+
+ queue->prio = prio;
+ }
+
+ write_lock(&ctx->queuelock);
+
+ queue->id = ctx->queueid++;
+
+ if (id)
+ *id = queue->id;
+
+ list_add_tail(&queue->node, &ctx->submitqueues);
+
+ write_unlock(&ctx->queuelock);
+
+ return 0;
+}
+
+int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = drm->dev_private;
+ int default_prio;
+
+ if (!ctx)
+ return 0;
+
+ /*
+ * Select priority 2 as the "default priority" unless nr_rings is less
+ * than 2 and then pick the lowest pirority
+ */
+ default_prio = priv->gpu ?
+ clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
+
+ INIT_LIST_HEAD(&ctx->submitqueues);
+
+ rwlock_init(&ctx->queuelock);
+
+ return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
+}
+
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ if (!ctx)
+ return 0;
+
+ /*
+ * id 0 is the "default" queue and can't be destroyed
+ * by the user
+ */
+ if (!id)
+ return -ENOENT;
+
+ write_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ list_del(&entry->node);
+ write_unlock(&ctx->queuelock);
+
+ msm_submitqueue_put(entry);
+ return 0;
+ }
+ }
+
+ write_unlock(&ctx->queuelock);
+ return -ENOENT;
+}
+
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 7fbad9cb656e..1207ffe36250 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -35,6 +35,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
@@ -92,7 +93,7 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
}
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -127,7 +128,7 @@ static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
static int mxsfb_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
- return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
static struct drm_simple_display_pipe_funcs mxsfb_funcs = {
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 2e9ce53ae3a8..9c0c650655e9 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -30,9 +30,11 @@ nouveau-y += nouveau_vga.o
# DRM - memory management
nouveau-y += nouveau_bo.o
nouveau-y += nouveau_gem.o
+nouveau-y += nouveau_mem.o
nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o
+nouveau-y += nouveau_vmm.o
# DRM - modesetting
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index c02a13406a81..4b75ad40dd80 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -56,6 +56,13 @@ config NOUVEAU_DEBUG_DEFAULT
help
Selects the default debug level
+config NOUVEAU_DEBUG_MMU
+ bool "Enable additional MMU debugging"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to enable verbose MMU debug output.
+
config DRM_NOUVEAU_BACKLIGHT
bool "Support for backlight control"
depends on DRM_NOUVEAU
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 5b9d549aa791..501d2d290e9c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
- nvif_object_map(&drm->client.device.object);
+ nvif_object_map(&drm->client.device.object, NULL, 0);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
index b4cd58093300..989690fe3cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
@@ -6,7 +6,7 @@ struct nv50_channel_dma_v0 {
__u8 version;
__u8 chid;
__u8 pad02[6];
- __u64 vm;
+ __u64 vmm;
__u64 pushbuf;
__u64 offset;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
index 14d20c813cdb..5137b6879abd 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
@@ -9,6 +9,6 @@ struct nv50_channel_gpfifo_v0 {
__u32 ilength;
__u64 ioffset;
__u64 pushbuf;
- __u64 vm;
+ __u64 vmm;
};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 36944ff09e3c..1a875090b251 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -6,7 +6,7 @@ struct g82_channel_dma_v0 {
__u8 version;
__u8 chid;
__u8 pad02[6];
- __u64 vm;
+ __u64 vmm;
__u64 pushbuf;
__u64 offset;
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index df09a50817eb..e4e50cfe88f1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -9,7 +9,7 @@ struct g82_channel_gpfifo_v0 {
__u32 ilength;
__u64 ioffset;
__u64 pushbuf;
- __u64 vm;
+ __u64 vmm;
};
#define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index 6d16a3a2ec02..ab0fa8adb756 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -8,7 +8,7 @@ struct fermi_channel_gpfifo_v0 {
__u8 pad02[2];
__u32 ilength;
__u64 ioffset;
- __u64 vm;
+ __u64 vmm;
};
#define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 597ebb52d5f9..56f5bd81e480 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -23,7 +23,7 @@ struct kepler_channel_gpfifo_a_v0 {
__u32 engines;
__u32 ilength;
__u64 ioffset;
- __u64 vm;
+ __u64 vmm;
};
#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index e3a2ea8bde70..a7c5bf572788 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -15,6 +15,23 @@
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
+#define NVIF_CLASS_MMU /* if0008.h */ 0x80000008
+#define NVIF_CLASS_MMU_NV04 /* if0008.h */ 0x80000009
+#define NVIF_CLASS_MMU_NV50 /* if0008.h */ 0x80005009
+#define NVIF_CLASS_MMU_GF100 /* if0008.h */ 0x80009009
+
+#define NVIF_CLASS_MEM /* if000a.h */ 0x8000000a
+#define NVIF_CLASS_MEM_NV04 /* if000b.h */ 0x8000000b
+#define NVIF_CLASS_MEM_NV50 /* if500b.h */ 0x8000500b
+#define NVIF_CLASS_MEM_GF100 /* if900b.h */ 0x8000900b
+
+#define NVIF_CLASS_VMM /* if000c.h */ 0x8000000c
+#define NVIF_CLASS_VMM_NV04 /* if000d.h */ 0x8000000d
+#define NVIF_CLASS_VMM_NV50 /* if500d.h */ 0x8000500d
+#define NVIF_CLASS_VMM_GF100 /* if900d.h */ 0x8000900d
+#define NVIF_CLASS_VMM_GM200 /* ifb00d.h */ 0x8000b00d
+#define NVIF_CLASS_VMM_GP100 /* ifc00d.h */ 0x8000c00d
+
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
#define NV_NULL_CLASS 0x00000030
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 09439b037870..6edb6266857e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -39,7 +39,6 @@ u64 nvif_device_time(struct nvif_device *);
/*XXX*/
#include <subdev/bios.h>
#include <subdev/fb.h>
-#include <subdev/mmu.h>
#include <subdev/bar.h>
#include <subdev/gpio.h>
#include <subdev/clk.h>
@@ -58,8 +57,6 @@ u64 nvif_device_time(struct nvif_device *);
})
#define nvxx_bios(a) nvxx_device(a)->bios
#define nvxx_fb(a) nvxx_device(a)->fb
-#define nvxx_mmu(a) nvxx_device(a)->mmu
-#define nvxx_bar(a) nvxx_device(a)->bar
#define nvxx_gpio(a) nvxx_device(a)->gpio
#define nvxx_clk(a) nvxx_device(a)->clk
#define nvxx_i2c(a) nvxx_device(a)->i2c
@@ -67,10 +64,8 @@ u64 nvif_device_time(struct nvif_device *);
#define nvxx_therm(a) nvxx_device(a)->therm
#define nvxx_volt(a) nvxx_device(a)->volt
-#include <core/device.h>
#include <engine/fifo.h>
#include <engine/gr.h>
-#include <engine/sw.h>
#define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvxx_device(a)->gr
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0008.h b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
new file mode 100644
index 000000000000..8450127420f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0008.h
@@ -0,0 +1,42 @@
+#ifndef __NVIF_IF0008_H__
+#define __NVIF_IF0008_H__
+struct nvif_mmu_v0 {
+ __u8 version;
+ __u8 dmabits;
+ __u8 heap_nr;
+ __u8 type_nr;
+ __u16 kind_nr;
+};
+
+#define NVIF_MMU_V0_HEAP 0x00
+#define NVIF_MMU_V0_TYPE 0x01
+#define NVIF_MMU_V0_KIND 0x02
+
+struct nvif_mmu_heap_v0 {
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+ __u64 size;
+};
+
+struct nvif_mmu_type_v0 {
+ __u8 version;
+ __u8 index;
+ __u8 heap;
+ __u8 vram;
+ __u8 host;
+ __u8 comp;
+ __u8 disp;
+ __u8 kind;
+ __u8 mappable;
+ __u8 coherent;
+ __u8 uncached;
+};
+
+struct nvif_mmu_kind_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u16 count;
+ __u8 data[];
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000a.h b/drivers/gpu/drm/nouveau/include/nvif/if000a.h
new file mode 100644
index 000000000000..88d0938fbd5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000a.h
@@ -0,0 +1,22 @@
+#ifndef __NVIF_IF000A_H__
+#define __NVIF_IF000A_H__
+struct nvif_mem_v0 {
+ __u8 version;
+ __u8 type;
+ __u8 page;
+ __u8 pad03[5];
+ __u64 size;
+ __u64 addr;
+ __u8 data[];
+};
+
+struct nvif_mem_ram_vn {
+};
+
+struct nvif_mem_ram_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ dma_addr_t *dma;
+ struct scatterlist *sgl;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000b.h b/drivers/gpu/drm/nouveau/include/nvif/if000b.h
new file mode 100644
index 000000000000..c677fb0293da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000b.h
@@ -0,0 +1,11 @@
+#ifndef __NVIF_IF000B_H__
+#define __NVIF_IF000B_H__
+#include "if000a.h"
+
+struct nv04_mem_vn {
+ /* nvkm_mem_vX ... */
+};
+
+struct nv04_mem_map_vn {
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
new file mode 100644
index 000000000000..2928ecd989ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -0,0 +1,64 @@
+#ifndef __NVIF_IF000C_H__
+#define __NVIF_IF000C_H__
+struct nvif_vmm_v0 {
+ __u8 version;
+ __u8 page_nr;
+ __u8 pad02[6];
+ __u64 addr;
+ __u64 size;
+ __u8 data[];
+};
+
+#define NVIF_VMM_V0_PAGE 0x00
+#define NVIF_VMM_V0_GET 0x01
+#define NVIF_VMM_V0_PUT 0x02
+#define NVIF_VMM_V0_MAP 0x03
+#define NVIF_VMM_V0_UNMAP 0x04
+
+struct nvif_vmm_page_v0 {
+ __u8 version;
+ __u8 index;
+ __u8 shift;
+ __u8 sparse;
+ __u8 vram;
+ __u8 host;
+ __u8 comp;
+ __u8 pad07[1];
+};
+
+struct nvif_vmm_get_v0 {
+ __u8 version;
+#define NVIF_VMM_GET_V0_ADDR 0x00
+#define NVIF_VMM_GET_V0_PTES 0x01
+#define NVIF_VMM_GET_V0_LAZY 0x02
+ __u8 type;
+ __u8 sparse;
+ __u8 page;
+ __u8 align;
+ __u8 pad05[3];
+ __u64 size;
+ __u64 addr;
+};
+
+struct nvif_vmm_put_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 addr;
+};
+
+struct nvif_vmm_map_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 addr;
+ __u64 size;
+ __u64 memory;
+ __u64 offset;
+ __u8 data[];
+};
+
+struct nvif_vmm_unmap_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 addr;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000d.h b/drivers/gpu/drm/nouveau/include/nvif/if000d.h
new file mode 100644
index 000000000000..516ec9401401
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000d.h
@@ -0,0 +1,12 @@
+#ifndef __NVIF_IF000D_H__
+#define __NVIF_IF000D_H__
+#include "if000c.h"
+
+struct nv04_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct nv04_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if500b.h b/drivers/gpu/drm/nouveau/include/nvif/if500b.h
new file mode 100644
index 000000000000..c7c8431fb2ce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if500b.h
@@ -0,0 +1,25 @@
+#ifndef __NVIF_IF500B_H__
+#define __NVIF_IF500B_H__
+#include "if000a.h"
+
+struct nv50_mem_vn {
+ /* nvif_mem_vX ... */
+};
+
+struct nv50_mem_v0 {
+ /* nvif_mem_vX ... */
+ __u8 version;
+ __u8 bankswz;
+ __u8 contig;
+};
+
+struct nv50_mem_map_vn {
+};
+
+struct nv50_mem_map_v0 {
+ __u8 version;
+ __u8 ro;
+ __u8 kind;
+ __u8 comp;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if500d.h b/drivers/gpu/drm/nouveau/include/nvif/if500d.h
new file mode 100644
index 000000000000..c29a7822b363
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if500d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF500D_H__
+#define __NVIF_IF500D_H__
+#include "if000c.h"
+
+struct nv50_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct nv50_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct nv50_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+ __u8 comp;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if900b.h b/drivers/gpu/drm/nouveau/include/nvif/if900b.h
new file mode 100644
index 000000000000..9b164548eea8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if900b.h
@@ -0,0 +1,23 @@
+#ifndef __NVIF_IF900B_H__
+#define __NVIF_IF900B_H__
+#include "if000a.h"
+
+struct gf100_mem_vn {
+ /* nvif_mem_vX ... */
+};
+
+struct gf100_mem_v0 {
+ /* nvif_mem_vX ... */
+ __u8 version;
+ __u8 contig;
+};
+
+struct gf100_mem_map_vn {
+};
+
+struct gf100_mem_map_v0 {
+ __u8 version;
+ __u8 ro;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if900d.h b/drivers/gpu/drm/nouveau/include/nvif/if900d.h
new file mode 100644
index 000000000000..49aa50583c3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/if900d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IF900D_H__
+#define __NVIF_IF900D_H__
+#include "if000c.h"
+
+struct gf100_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct gf100_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct gf100_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 vol;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h b/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
new file mode 100644
index 000000000000..a0e419830595
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifb00d.h
@@ -0,0 +1,27 @@
+#ifndef __NVIF_IFB00D_H__
+#define __NVIF_IFB00D_H__
+#include "if000c.h"
+
+struct gm200_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct gm200_vmm_v0 {
+ /* nvif_vmm_vX ... */
+ __u8 version;
+ __u8 bigpage;
+};
+
+struct gm200_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct gm200_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 vol;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
new file mode 100644
index 000000000000..1d9c637859f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/ifc00d.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_IFC00D_H__
+#define __NVIF_IFC00D_H__
+#include "if000c.h"
+
+struct gp100_vmm_vn {
+ /* nvif_vmm_vX ... */
+};
+
+struct gp100_vmm_map_vn {
+ /* nvif_vmm_map_vX ... */
+};
+
+struct gp100_vmm_map_v0 {
+ /* nvif_vmm_map_vX ... */
+ __u8 version;
+ __u8 vol;
+ __u8 ro;
+ __u8 priv;
+ __u8 kind;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 688c4bcd9c64..b93d586a2304 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -2,7 +2,7 @@
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
-#define NVIF_VERSION_LATEST 0x0000000000000000ULL
+#define NVIF_VERSION_LATEST 0x0000000000000100ULL
struct nvif_ioctl_v0 {
__u8 version;
@@ -84,9 +84,13 @@ struct nvif_ioctl_wr_v0 {
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
__u8 version;
- __u8 pad01[3];
- __u32 length;
+#define NVIF_IOCTL_MAP_V0_IO 0x00
+#define NVIF_IOCTL_MAP_V0_VA 0x01
+ __u8 type;
+ __u8 pad02[6];
__u64 handle;
+ __u64 length;
+ __u8 data[];
};
struct nvif_ioctl_unmap {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mem.h b/drivers/gpu/drm/nouveau/include/nvif/mem.h
new file mode 100644
index 000000000000..b542fe38398e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/mem.h
@@ -0,0 +1,18 @@
+#ifndef __NVIF_MEM_H__
+#define __NVIF_MEM_H__
+#include "mmu.h"
+
+struct nvif_mem {
+ struct nvif_object object;
+ u8 type;
+ u8 page;
+ u64 addr;
+ u64 size;
+};
+
+int nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *);
+int nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *);
+void nvif_mem_fini(struct nvif_mem *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/mmu.h b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
new file mode 100644
index 000000000000..c8cd5b5b0688
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/mmu.h
@@ -0,0 +1,56 @@
+#ifndef __NVIF_MMU_H__
+#define __NVIF_MMU_H__
+#include <nvif/object.h>
+
+struct nvif_mmu {
+ struct nvif_object object;
+ u8 dmabits;
+ u8 heap_nr;
+ u8 type_nr;
+ u16 kind_nr;
+
+ struct {
+ u64 size;
+ } *heap;
+
+ struct {
+#define NVIF_MEM_VRAM 0x01
+#define NVIF_MEM_HOST 0x02
+#define NVIF_MEM_COMP 0x04
+#define NVIF_MEM_DISP 0x08
+#define NVIF_MEM_KIND 0x10
+#define NVIF_MEM_MAPPABLE 0x20
+#define NVIF_MEM_COHERENT 0x40
+#define NVIF_MEM_UNCACHED 0x80
+ u8 type;
+ u8 heap;
+ } *type;
+
+ u8 *kind;
+};
+
+int nvif_mmu_init(struct nvif_object *, s32 oclass, struct nvif_mmu *);
+void nvif_mmu_fini(struct nvif_mmu *);
+
+static inline bool
+nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind)
+{
+ const u8 invalid = mmu->kind_nr - 1;
+ if (kind) {
+ if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid)
+ return false;
+ }
+ return true;
+}
+
+static inline int
+nvif_mmu_type(struct nvif_mmu *mmu, u8 mask)
+{
+ int i;
+ for (i = 0; i < mmu->type_nr; i++) {
+ if ((mmu->type[i].type & mask) == mask)
+ return i;
+ }
+ return -EINVAL;
+}
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 6912b8cffc98..a2d5244ff2b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -17,7 +17,7 @@ struct nvif_object {
void *priv; /*XXX: hack */
struct {
void __iomem *ptr;
- u32 size;
+ u64 size;
} map;
};
@@ -30,7 +30,10 @@ void nvif_object_sclass_put(struct nvif_sclass **);
u32 nvif_object_rd(struct nvif_object *, int, u64);
void nvif_object_wr(struct nvif_object *, int, u64, u32);
int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
-int nvif_object_map(struct nvif_object *);
+int nvif_object_map_handle(struct nvif_object *, void *, u32,
+ u64 *handle, u64 *length);
+void nvif_object_unmap_handle(struct nvif_object *);
+int nvif_object_map(struct nvif_object *, void *, u32);
void nvif_object_unmap(struct nvif_object *);
#define nvif_handle(a) (unsigned long)(void *)(a)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 6b16ab6b26d5..fd09b2842972 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -34,18 +34,4 @@
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
-
-#ifndef ioread32_native
-#ifdef __BIG_ENDIAN
-#define ioread16_native ioread16be
-#define iowrite16_native iowrite16be
-#define ioread32_native ioread32be
-#define iowrite32_native iowrite32be
-#else /* def __BIG_ENDIAN */
-#define ioread16_native ioread16
-#define iowrite16_native iowrite16
-#define ioread32_native ioread32
-#define iowrite32_native iowrite32
-#endif /* def __BIG_ENDIAN else */
-#endif /* !ioread32_native */
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
new file mode 100644
index 000000000000..c5db8a2e82df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -0,0 +1,42 @@
+#ifndef __NVIF_VMM_H__
+#define __NVIF_VMM_H__
+#include <nvif/object.h>
+struct nvif_mem;
+struct nvif_mmu;
+
+enum nvif_vmm_get {
+ ADDR,
+ PTES,
+ LAZY
+};
+
+struct nvif_vma {
+ u64 addr;
+ u64 size;
+};
+
+struct nvif_vmm {
+ struct nvif_object object;
+ u64 start;
+ u64 limit;
+
+ struct {
+ u8 shift;
+ bool sparse:1;
+ bool vram:1;
+ bool host:1;
+ bool comp:1;
+ } *page;
+ int page_nr;
+};
+
+int nvif_vmm_init(struct nvif_mmu *, s32 oclass, u64 addr, u64 size,
+ void *argv, u32 argc, struct nvif_vmm *);
+void nvif_vmm_fini(struct nvif_vmm *);
+int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
+ u8 page, u8 align, u64 size, struct nvif_vma *);
+void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
+int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_mem *, u64 offset);
+int nvif_vmm_unmap(struct nvif_vmm *, u64);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index ca23230d5743..757fac823a10 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -17,7 +17,8 @@ struct nvkm_client {
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
- struct nvkm_vm *vm;
+ struct list_head umem;
+ spinlock_t lock;
};
int nvkm_client_new(const char *name, u64 device, const char *cfg,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index d7ecb65ba19f..560265b15ec2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
+#include <core/oclass.h>
#include <core/event.h>
-#include <core/object.h>
enum nvkm_devidx {
NVKM_SUBDEV_PCI,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index c6bcd8a64cae..ebf8473a39fe 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -16,6 +16,7 @@ struct nvkm_engine {
struct nvkm_engine_func {
void *(*dtor)(struct nvkm_engine *);
+ void (*preinit)(struct nvkm_engine *);
int (*oneinit)(struct nvkm_engine *);
int (*init)(struct nvkm_engine *);
int (*fini)(struct nvkm_engine *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index 473ba0b9a368..10eeaeebc242 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,18 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__
-#include <core/object.h>
#include <core/memory.h>
#include <core/mm.h>
-struct nvkm_vma;
-struct nvkm_vm;
#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
#define NVOBJ_FLAG_HEAP 0x00000004
struct nvkm_gpuobj {
- struct nvkm_object object;
- const struct nvkm_gpuobj_func *func;
+ union {
+ const struct nvkm_gpuobj_func *func;
+ const struct nvkm_gpuobj_func *ptrs;
+ };
struct nvkm_gpuobj *parent;
struct nvkm_memory *memory;
struct nvkm_mm_node *node;
@@ -29,15 +28,14 @@ struct nvkm_gpuobj_func {
void (*release)(struct nvkm_gpuobj *);
u32 (*rd32)(struct nvkm_gpuobj *, u32 offset);
void (*wr32)(struct nvkm_gpuobj *, u32 offset, u32 data);
+ int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *,
+ struct nvkm_vma *, void *argv, u32 argc);
};
int nvkm_gpuobj_new(struct nvkm_device *, u32 size, int align, bool zero,
struct nvkm_gpuobj *parent, struct nvkm_gpuobj **);
void nvkm_gpuobj_del(struct nvkm_gpuobj **);
int nvkm_gpuobj_wrap(struct nvkm_memory *, struct nvkm_gpuobj **);
-int nvkm_gpuobj_map(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
- struct nvkm_vma *);
-void nvkm_gpuobj_unmap(struct nvkm_vma *);
void nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src,
u32 length);
void nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index affba21fcbad..05f505de0075 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -4,7 +4,12 @@
#include <core/os.h>
struct nvkm_device;
struct nvkm_vma;
-struct nvkm_vm;
+struct nvkm_vmm;
+
+struct nvkm_tags {
+ struct nvkm_mm_node *mn;
+ refcount_t refcount;
+};
enum nvkm_memory_target {
NVKM_MEM_TARGET_INST, /* instance memory */
@@ -15,41 +20,84 @@ enum nvkm_memory_target {
struct nvkm_memory {
const struct nvkm_memory_func *func;
+ const struct nvkm_memory_ptrs *ptrs;
+ struct kref kref;
+ struct nvkm_tags *tags;
};
struct nvkm_memory_func {
void *(*dtor)(struct nvkm_memory *);
enum nvkm_memory_target (*target)(struct nvkm_memory *);
+ u8 (*page)(struct nvkm_memory *);
u64 (*addr)(struct nvkm_memory *);
u64 (*size)(struct nvkm_memory *);
- void (*boot)(struct nvkm_memory *, struct nvkm_vm *);
+ void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
void __iomem *(*acquire)(struct nvkm_memory *);
void (*release)(struct nvkm_memory *);
+ int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *,
+ struct nvkm_vma *, void *argv, u32 argc);
+};
+
+struct nvkm_memory_ptrs {
u32 (*rd32)(struct nvkm_memory *, u64 offset);
void (*wr32)(struct nvkm_memory *, u64 offset, u32 data);
- void (*map)(struct nvkm_memory *, struct nvkm_vma *, u64 offset);
};
void nvkm_memory_ctor(const struct nvkm_memory_func *, struct nvkm_memory *);
int nvkm_memory_new(struct nvkm_device *, enum nvkm_memory_target,
u64 size, u32 align, bool zero, struct nvkm_memory **);
-void nvkm_memory_del(struct nvkm_memory **);
+struct nvkm_memory *nvkm_memory_ref(struct nvkm_memory *);
+void nvkm_memory_unref(struct nvkm_memory **);
+int nvkm_memory_tags_get(struct nvkm_memory *, struct nvkm_device *, u32 tags,
+ void (*clear)(struct nvkm_device *, u32, u32),
+ struct nvkm_tags **);
+void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
+ struct nvkm_tags **);
+
#define nvkm_memory_target(p) (p)->func->target(p)
+#define nvkm_memory_page(p) (p)->func->page(p)
#define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
-#define nvkm_memory_map(p,v,o) (p)->func->map((p),(v),(o))
+#define nvkm_memory_map(p,o,vm,va,av,ac) \
+ (p)->func->map((p),(o),(vm),(va),(av),(ac))
/* accessor macros - kmap()/done() must bracket use of the other accessor
* macros to guarantee correct behaviour across all chipsets
*/
#define nvkm_kmap(o) (o)->func->acquire(o)
-#define nvkm_ro32(o,a) (o)->func->rd32((o), (a))
-#define nvkm_wo32(o,a,d) (o)->func->wr32((o), (a), (d))
+#define nvkm_done(o) (o)->func->release(o)
+
+#define nvkm_ro32(o,a) (o)->ptrs->rd32((o), (a))
+#define nvkm_wo32(o,a,d) (o)->ptrs->wr32((o), (a), (d))
#define nvkm_mo32(o,a,m,d) ({ \
u32 _addr = (a), _data = nvkm_ro32((o), _addr); \
nvkm_wo32((o), _addr, (_data & ~(m)) | (d)); \
_data; \
})
-#define nvkm_done(o) (o)->func->release(o)
+
+#define nvkm_wo64(o,a,d) do { \
+ u64 __a = (a), __d = (d); \
+ nvkm_wo32((o), __a + 0, lower_32_bits(__d)); \
+ nvkm_wo32((o), __a + 4, upper_32_bits(__d)); \
+} while(0)
+
+#define nvkm_fill(t,s,o,a,d,c) do { \
+ u64 _a = (a), _c = (c), _d = (d), _o = _a >> s, _s = _c << s; \
+ u##t __iomem *_m = nvkm_kmap(o); \
+ if (likely(_m)) { \
+ if (_d) { \
+ while (_c--) \
+ iowrite##t##_native(_d, &_m[_o++]); \
+ } else { \
+ memset_io(&_m[_o], _d, _s); \
+ } \
+ } else { \
+ for (; _c; _c--, _a += BIT(s)) \
+ nvkm_wo##t((o), _a, _d); \
+ } \
+ nvkm_done(o); \
+} while(0)
+#define nvkm_fo32(o,a,d,c) nvkm_fill(32, 2, (o), (a), (d), (c))
+#define nvkm_fo64(o,a,d,c) nvkm_fill(64, 3, (o), (a), (d), (c))
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 2002a4da9999..b0726c39429e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -31,7 +31,7 @@ nvkm_mm_initialised(struct nvkm_mm *mm)
return mm->heap_nodes;
}
-int nvkm_mm_init(struct nvkm_mm *, u32 offset, u32 length, u32 block);
+int nvkm_mm_init(struct nvkm_mm *, u8 heap, u32 offset, u32 length, u32 block);
int nvkm_mm_fini(struct nvkm_mm *);
int nvkm_mm_head(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
u32 size_min, u32 align, struct nvkm_mm_node **);
@@ -40,9 +40,39 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
void nvkm_mm_dump(struct nvkm_mm *, const char *);
+static inline u32
+nvkm_mm_heap_size(struct nvkm_mm *mm, u8 heap)
+{
+ struct nvkm_mm_node *node;
+ u32 size = 0;
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (node->heap == heap)
+ size += node->length;
+ }
+ return size;
+}
+
static inline bool
nvkm_mm_contiguous(struct nvkm_mm_node *node)
{
return !node->next;
}
+
+static inline u32
+nvkm_mm_addr(struct nvkm_mm_node *node)
+{
+ if (WARN_ON(!nvkm_mm_contiguous(node)))
+ return 0;
+ return node->offset;
+}
+
+static inline u32
+nvkm_mm_size(struct nvkm_mm_node *node)
+{
+ u32 size = 0;
+ do {
+ size += node->length;
+ } while ((node = node->next));
+ return size;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 3f13ff1d4ee4..270f893cc154 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,11 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OBJECT_H__
#define __NVKM_OBJECT_H__
-#include <core/os.h>
-#include <core/debug.h>
+#include <core/oclass.h>
struct nvkm_event;
struct nvkm_gpuobj;
-struct nvkm_oclass;
struct nvkm_object {
const struct nvkm_object_func *func;
@@ -22,13 +20,20 @@ struct nvkm_object {
struct rb_node node;
};
+enum nvkm_object_map {
+ NVKM_OBJECT_MAP_IO,
+ NVKM_OBJECT_MAP_VA
+};
+
struct nvkm_object_func {
void *(*dtor)(struct nvkm_object *);
int (*init)(struct nvkm_object *);
int (*fini)(struct nvkm_object *, bool suspend);
int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size);
int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **);
- int (*map)(struct nvkm_object *, u64 *addr, u32 *size);
+ int (*map)(struct nvkm_object *, void *argv, u32 argc,
+ enum nvkm_object_map *, u64 *addr, u64 *size);
+ int (*unmap)(struct nvkm_object *);
int (*rd08)(struct nvkm_object *, u64 addr, u8 *data);
int (*rd16)(struct nvkm_object *, u64 addr, u16 *data);
int (*rd32)(struct nvkm_object *, u64 addr, u32 *data);
@@ -53,7 +58,9 @@ int nvkm_object_init(struct nvkm_object *);
int nvkm_object_fini(struct nvkm_object *, bool suspend);
int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size);
int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **);
-int nvkm_object_map(struct nvkm_object *, u64 *addr, u32 *size);
+int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
+ enum nvkm_object_map *, u64 *addr, u64 *size);
+int nvkm_object_unmap(struct nvkm_object *);
int nvkm_object_rd08(struct nvkm_object *, u64 addr, u8 *data);
int nvkm_object_rd16(struct nvkm_object *, u64 addr, u16 *data);
int nvkm_object_rd32(struct nvkm_object *, u64 addr, u32 *data);
@@ -67,28 +74,4 @@ bool nvkm_object_insert(struct nvkm_object *);
void nvkm_object_remove(struct nvkm_object *);
struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
const struct nvkm_object_func *);
-
-struct nvkm_sclass {
- int minver;
- int maxver;
- s32 oclass;
- const struct nvkm_object_func *func;
- int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
- struct nvkm_object **);
-};
-
-struct nvkm_oclass {
- int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
- struct nvkm_object **);
- struct nvkm_sclass base;
- const void *priv;
- const void *engn;
- u32 handle;
- u8 route;
- u64 token;
- u64 object;
- struct nvkm_client *client;
- struct nvkm_object *parent;
- struct nvkm_engine *engine;
-};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
new file mode 100644
index 000000000000..8e1b945d38f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h
@@ -0,0 +1,31 @@
+#ifndef __NVKM_OCLASS_H__
+#define __NVKM_OCLASS_H__
+#include <core/os.h>
+#include <core/debug.h>
+struct nvkm_oclass;
+struct nvkm_object;
+
+struct nvkm_sclass {
+ int minver;
+ int maxver;
+ s32 oclass;
+ const struct nvkm_object_func *func;
+ int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+};
+
+struct nvkm_oclass {
+ int (*ctor)(const struct nvkm_oclass *, void *data, u32 size,
+ struct nvkm_object **);
+ struct nvkm_sclass base;
+ const void *priv;
+ const void *engn;
+ u32 handle;
+ u8 route;
+ u64 token;
+ u64 object;
+ struct nvkm_client *client;
+ struct nvkm_object *parent;
+ struct nvkm_engine *engine;
+};
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index fc9e8cd36087..445602d1e8d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -2,4 +2,23 @@
#ifndef __NVKM_OS_H__
#define __NVKM_OS_H__
#include <nvif/os.h>
+
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native ioread32be
+#define iowrite32_native iowrite32be
+#else
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native ioread32
+#define iowrite32_native iowrite32
+#endif
+
+#define iowrite64_native(v,p) do { \
+ u32 __iomem *_p = (u32 __iomem *)(p); \
+ u64 _v = (v); \
+ iowrite32_native(lower_32_bits(_v), &_p[0]); \
+ iowrite32_native(upper_32_bits(_v), &_p[1]); \
+} while(0)
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index 674a38408240..d5d789663aca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_RAMHT_H__
#define __NVKM_RAMHT_H__
#include <core/gpuobj.h>
+struct nvkm_object;
struct nvkm_ramht_data {
struct nvkm_gpuobj *inst;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 38f51ff7ab40..63df2290177f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -34,7 +34,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
/* subdev logging */
#define nvkm_printk_(s,l,p,f,a...) do { \
const struct nvkm_subdev *_subdev = (s); \
- if (_subdev->debug >= (l)) { \
+ if (CONFIG_NOUVEAU_DEBUG >= (l) && _subdev->debug >= (l)) { \
dev_##p(_subdev->device->dev, "%s: "f, \
nvkm_subdev_name[_subdev->index], ##a); \
} \
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index 5f5cae7c474e..0f9c1c702ed6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_DMA_H__
#define __NVKM_DMA_H__
#include <core/engine.h>
+#include <core/object.h>
struct nvkm_client;
struct nvkm_dmaobj {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 5a51842bc241..6427747b6f77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -4,6 +4,7 @@
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
#include <core/engine.h>
struct nvkm_fifo_chan;
+struct nvkm_gpuobj;
enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_UCODE = 0,
@@ -78,7 +79,7 @@ struct nvkm_falcon_func {
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
- void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
+ void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32);
void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
@@ -113,7 +114,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
bool);
void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
-void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
+void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
void nvkm_falcon_start(struct nvkm_falcon *);
int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 025f400c9f5d..c17b3a9bf8fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -2,6 +2,7 @@
#ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
#include <core/engine.h>
+#include <core/object.h>
#include <core/event.h>
#define NVKM_FIFO_CHID_NR 4096
@@ -22,7 +23,7 @@ struct nvkm_fifo_chan {
u16 chid;
struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push;
- struct nvkm_vm *vm;
+ struct nvkm_vmm *vmm;
void __iomem *user;
u64 addr;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index 91f1e0efe061..f6bd94c7e0f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -9,17 +9,22 @@ struct nvkm_bar {
struct nvkm_subdev subdev;
spinlock_t lock;
+ bool bar2;
/* whether the BAR supports to be ioremapped WC or should be uncached */
bool iomap_uncached;
};
+struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar2_init(struct nvkm_device *);
+void nvkm_bar_bar2_fini(struct nvkm_device *);
+struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *);
-struct nvkm_vm *nvkm_bar_kmap(struct nvkm_bar *);
-int nvkm_bar_umap(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
int nv50_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int g84_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 4da68dd52619..adb78f7d083a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -2,8 +2,7 @@
#ifndef __NVKM_FB_H__
#define __NVKM_FB_H__
#include <core/subdev.h>
-
-#include <subdev/mmu.h>
+#include <core/mm.h>
/* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1
@@ -22,22 +21,6 @@
#define NVKM_RAM_TYPE_VM 0x7f
#define NV_MEM_COMP_VM 0x03
-struct nvkm_mem {
- struct drm_device *dev;
-
- struct nvkm_vma bar_vma;
- struct nvkm_vma vma[2];
- u8 page_shift;
-
- struct nvkm_mm_node *tag;
- struct nvkm_mm_node *mem;
- dma_addr_t *pages;
- u32 memtype;
- u64 offset;
- u64 size;
- struct sg_table *sg;
-};
-
struct nvkm_fb_tile {
struct nvkm_mm_node *tag;
u32 addr;
@@ -51,6 +34,7 @@ struct nvkm_fb {
struct nvkm_subdev subdev;
struct nvkm_ram *ram;
+ struct nvkm_mm tags;
struct {
struct nvkm_fb_tile region[16];
@@ -63,7 +47,6 @@ struct nvkm_fb {
struct nvkm_memory *mmu_wr;
};
-bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
void nvkm_fb_tile_init(struct nvkm_fb *, int region, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nvkm_fb_tile_fini(struct nvkm_fb *, int region, struct nvkm_fb_tile *);
@@ -130,8 +113,11 @@ struct nvkm_ram {
u64 size;
#define NVKM_RAM_MM_SHIFT 12
+#define NVKM_RAM_MM_ANY (NVKM_MM_HEAP_ANY + 0)
+#define NVKM_RAM_MM_NORMAL (NVKM_MM_HEAP_ANY + 1)
+#define NVKM_RAM_MM_NOMAP (NVKM_MM_HEAP_ANY + 2)
+#define NVKM_RAM_MM_MIXED (NVKM_MM_HEAP_ANY + 3)
struct nvkm_mm vram;
- struct nvkm_mm tags;
u64 stolen;
int ranks;
@@ -148,6 +134,10 @@ struct nvkm_ram {
struct nvkm_ram_data target;
};
+int
+nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+ bool contig, bool back, struct nvkm_memory **);
+
struct nvkm_ram_func {
u64 upper;
u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
@@ -158,14 +148,8 @@ struct nvkm_ram_func {
void *(*dtor)(struct nvkm_ram *);
int (*init)(struct nvkm_ram *);
- int (*get)(struct nvkm_ram *, u64 size, u32 align, u32 size_nc,
- u32 type, struct nvkm_mem **);
- void (*put)(struct nvkm_ram *, struct nvkm_mem **);
-
int (*calc)(struct nvkm_ram *, u32 freq);
int (*prog)(struct nvkm_ram *);
void (*tidy)(struct nvkm_ram *);
};
-
-extern const u8 gf100_pte_storage_type_map[256];
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 91126fd29222..36ed520ed2d0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -10,6 +10,7 @@ struct nvkm_instmem {
spinlock_t lock;
struct list_head list;
+ struct list_head boot;
u32 reserved;
struct nvkm_memory *vbios;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 63b7ad1f9ce2..95b611554d53 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -15,8 +15,7 @@ struct nvkm_ltc {
u32 num_tags;
u32 tag_base;
- struct nvkm_mm tags;
- struct nvkm_mm_node *tag_ram;
+ struct nvkm_memory *tag_ram;
int zbc_min;
int zbc_max;
@@ -24,9 +23,7 @@ struct nvkm_ltc {
u32 zbc_depth[NVKM_LTC_MAX_ZBC_CNT];
};
-int nvkm_ltc_tags_alloc(struct nvkm_ltc *, u32 count, struct nvkm_mm_node **);
-void nvkm_ltc_tags_free(struct nvkm_ltc *, struct nvkm_mm_node **);
-void nvkm_ltc_tags_clear(struct nvkm_ltc *, u32 first, u32 count);
+void nvkm_ltc_tags_clear(struct nvkm_device *, u32 first, u32 count);
int nvkm_ltc_zbc_color_get(struct nvkm_ltc *, int index, const u32[4]);
int nvkm_ltc_zbc_depth_get(struct nvkm_ltc *, int index, const u32);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 0fdfc610ceb3..0760b93e9d1f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -2,68 +2,130 @@
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
-#include <core/mm.h>
-struct nvkm_device;
-struct nvkm_mem;
-
-struct nvkm_vm_pgt {
- struct nvkm_memory *mem[2];
- u32 refcount[2];
-};
-
-struct nvkm_vm_pgd {
- struct list_head head;
- struct nvkm_gpuobj *obj;
-};
struct nvkm_vma {
struct list_head head;
- int refcount;
- struct nvkm_vm *vm;
- struct nvkm_mm_node *node;
- u64 offset;
- u32 access;
+ struct rb_node tree;
+ u64 addr;
+ u64 size:50;
+ bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
+ bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
+#define NVKM_VMA_PAGE_NONE 7
+ u8 page:3; /* Requested page type (index, or NONE for automatic). */
+ u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
+ bool used:1; /* Region allocated. */
+ bool part:1; /* Region was split from an allocated region by map(). */
+ bool user:1; /* Region user-allocated. */
+ bool busy:1; /* Region busy (for temporarily preventing user access). */
+ struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
+ struct nvkm_tags *tags; /* Compression tag reference. */
};
-struct nvkm_vm {
+struct nvkm_vmm {
+ const struct nvkm_vmm_func *func;
struct nvkm_mmu *mmu;
-
+ const char *name;
+ u32 debug;
+ struct kref kref;
struct mutex mutex;
- struct nvkm_mm mm;
- struct kref refcount;
- struct list_head pgd_list;
+ u64 start;
+ u64 limit;
+
+ struct nvkm_vmm_pt *pd;
+ struct list_head join;
+
+ struct list_head list;
+ struct rb_root free;
+ struct rb_root root;
+
+ bool bootstrapped;
atomic_t engref[NVKM_SUBDEV_NR];
- struct nvkm_vm_pgt *pgt;
- u32 fpde;
- u32 lpde;
+ dma_addr_t null;
+ void *nullp;
};
-int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *, struct nvkm_vm **);
-int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
-int nvkm_vm_boot(struct nvkm_vm *, u64 size);
-int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
- struct nvkm_vma *);
-void nvkm_vm_put(struct nvkm_vma *);
-void nvkm_vm_map(struct nvkm_vma *, struct nvkm_mem *);
-void nvkm_vm_map_at(struct nvkm_vma *, u64 offset, struct nvkm_mem *);
-void nvkm_vm_unmap(struct nvkm_vma *);
-void nvkm_vm_unmap_at(struct nvkm_vma *, u64 offset, u64 length);
+int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *, const char *name, struct nvkm_vmm **);
+struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
+void nvkm_vmm_unref(struct nvkm_vmm **);
+int nvkm_vmm_boot(struct nvkm_vmm *);
+int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
+void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
+int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
+void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
+
+struct nvkm_vmm_map {
+ struct nvkm_memory *memory;
+ u64 offset;
+
+ struct nvkm_mm_node *mem;
+ struct scatterlist *sgl;
+ dma_addr_t *dma;
+ u64 off;
+
+ const struct nvkm_vmm_page *page;
+
+ struct nvkm_tags *tags;
+ u64 next;
+ u64 type;
+ u64 ctag;
+};
+
+int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
+ struct nvkm_vmm_map *);
+void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
+
+struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
+struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
struct nvkm_mmu {
const struct nvkm_mmu_func *func;
struct nvkm_subdev subdev;
- u64 limit;
u8 dma_bits;
- u8 lpg_shift;
+
+ int heap_nr;
+ struct {
+#define NVKM_MEM_VRAM 0x01
+#define NVKM_MEM_HOST 0x02
+#define NVKM_MEM_COMP 0x04
+#define NVKM_MEM_DISP 0x08
+ u8 type;
+ u64 size;
+ } heap[4];
+
+ int type_nr;
+ struct {
+#define NVKM_MEM_KIND 0x10
+#define NVKM_MEM_MAPPABLE 0x20
+#define NVKM_MEM_COHERENT 0x40
+#define NVKM_MEM_UNCACHED 0x80
+ u8 type;
+ u8 heap;
+ } type[16];
+
+ struct nvkm_vmm *vmm;
+
+ struct {
+ struct mutex mutex;
+ struct list_head list;
+ } ptc, ptp;
+
+ struct nvkm_device_oclass user;
};
int nv04_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm200_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 98fe1d0fd592..b1ac47eb786e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -98,4 +98,5 @@ int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index f98f800cc011..ece650a0c5f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -34,6 +34,7 @@
#include "nouveau_gem.h"
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv)
@@ -134,7 +135,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
}
if (chan->ntfy) {
- nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
+ nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
}
@@ -184,29 +185,33 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- if (nvxx_device(device)->func->pci)
+ if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = dev->pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- if (nvxx_device(device)->func->pci)
+ if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
getparam->value = dev->pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (!nvxx_device(device)->func->pci)
- getparam->value = 3;
- else
- if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
- getparam->value = 0;
- else
- if (!pci_is_pcie(dev->pdev))
- getparam->value = 1;
- else
- getparam->value = 2;
- break;
+ switch (device->info.platform) {
+ case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
+ case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
+ case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
+ case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
+ case NV_DEVICE_INFO_V0_IGP :
+ if (!pci_is_pcie(dev->pdev))
+ getparam->value = 1;
+ else
+ getparam->value = 2;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
case NOUVEAU_GETPARAM_FB_SIZE:
getparam->value = drm->gem.vram_available;
break;
@@ -329,8 +334,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
- &chan->ntfy_vma);
+ ret = nouveau_vma_new(chan->ntfy, &cli->vmm, &chan->ntfy_vma);
if (ret)
goto done;
}
@@ -340,7 +344,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+ ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
done:
if (ret)
nouveau_abi16_chan_fini(abi16, chan);
@@ -548,8 +552,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
- args.start += chan->ntfy_vma.offset;
- args.limit += chan->ntfy_vma.offset;
+ args.start += chan->ntfy_vma->addr;
+ args.limit += chan->ntfy_vma->addr;
} else
if (drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 441100aa2320..36fde1ff3ad5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -24,7 +24,7 @@ struct nouveau_abi16_chan {
struct nouveau_channel *chan;
struct list_head notifiers;
struct nouveau_bo *ntfy;
- struct nvkm_vma ntfy_vma;
+ struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index dd6fba55ad5d..66bf2aff4a3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1478,9 +1478,13 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
case 1:
entry->dpconf.link_bw = 270000;
break;
- default:
+ case 2:
entry->dpconf.link_bw = 540000;
break;
+ case 3:
+ default:
+ entry->dpconf.link_bw = 810000;
+ break;
}
switch ((conf & 0x0f000000) >> 24) {
case 0xf:
@@ -1964,7 +1968,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
* The microcode entries are found by the "HWSQ" signature.
*/
- const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
+ static const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
const int sz = sizeof(hwsq_signature);
int hwsq_offset;
@@ -1980,7 +1984,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvbios *bios = &drm->vbios;
- const uint8_t edid_sig[] = {
+ static const uint8_t edid_sig[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
uint16_t offset = 0;
uint16_t newoffset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e427f80344c4..2615912430cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -37,6 +37,12 @@
#include "nouveau_bo.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if900b.h>
/*
* NV10-NV40 tiling helpers
@@ -48,8 +54,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_device *device = nvxx_device(&drm->client.device);
- struct nvkm_fb *fb = device->fb;
+ struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
@@ -97,7 +102,7 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
- u32 size, u32 pitch, u32 flags)
+ u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
@@ -120,8 +125,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
}
if (found)
- nv10_bo_update_tile_region(dev, found, addr, size,
- pitch, flags);
+ nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
return found;
}
@@ -155,27 +159,27 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
struct nvif_device *device = &drm->client.device;
if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
- if (nvbo->tile_mode) {
+ if (nvbo->mode) {
if (device->info.chipset >= 0x40) {
*align = 65536;
- *size = roundup_64(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x30) {
*align = 32768;
- *size = roundup_64(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x20) {
*align = 16384;
- *size = roundup_64(*size, 64 * nvbo->tile_mode);
+ *size = roundup_64(*size, 64 * nvbo->mode);
} else if (device->info.chipset >= 0x10) {
*align = 16384;
- *size = roundup_64(*size, 32 * nvbo->tile_mode);
+ *size = roundup_64(*size, 32 * nvbo->mode);
}
}
} else {
- *size = roundup_64(*size, (1 << nvbo->page_shift));
- *align = max((1 << nvbo->page_shift), *align);
+ *size = roundup_64(*size, (1 << nvbo->page));
+ *align = max((1 << nvbo->page), *align);
}
*size = roundup_64(*size, PAGE_SIZE);
@@ -187,11 +191,13 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
struct sg_table *sg, struct reservation_object *robj,
struct nouveau_bo **pnvbo)
{
- struct nouveau_drm *drm = nouveau_drm(cli->dev);
+ struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
+ struct nvif_mmu *mmu = &cli->mmu;
+ struct nvif_vmm *vmm = &cli->vmm.vmm;
size_t acc_size;
- int ret;
int type = ttm_bo_type_device;
+ int ret, i, pi = -1;
if (!size) {
NV_WARN(drm, "skipped size %016llx\n", size);
@@ -207,19 +213,80 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
- nvbo->tile_mode = tile_mode;
- nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->cli = cli;
- if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
- nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+ /* This is confusing, and doesn't actually mean we want an uncached
+ * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
+ * into in nouveau_gem_new().
+ */
+ if (flags & TTM_PL_FLAG_UNCACHED) {
+ /* Determine if we can get a cache-coherent map, forcing
+ * uncached mapping if we can't.
+ */
+ if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+ nvbo->force_coherent = true;
+ }
+
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return -EINVAL;
+ }
+
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return -EINVAL;
+ }
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
+ }
+ nvbo->mode = tile_mode;
+ nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
+
+ /* Determine the desirable target GPU page size for the buffer. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Because we cannot currently allow VMM maps to fail
+ * during buffer migration, we need to determine page
+ * size for the buffer up-front, and pre-allocate its
+ * page tables.
+ *
+ * Skip page sizes that can't support needed domains.
+ */
+ if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
+ (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host)
+ continue;
+
+ /* Select this page size if it's the first that supports
+ * the potential memory domains, or when it's compatible
+ * with the requested compression settings.
+ */
+ if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+ pi = i;
+
+ /* Stop once the buffer is larger than the current page size. */
+ if (size >= 1ULL << vmm->page[i].shift)
+ break;
+ }
+
+ if (WARN_ON(pi < 0))
+ return -EINVAL;
- nvbo->page_shift = 12;
- if (drm->client.vm) {
- if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
+ /* Disable compression if suitable settings couldn't be found. */
+ if (nvbo->comp && !vmm->page[pi].comp) {
+ if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+ nvbo->kind = mmu->kind[nvbo->kind];
+ nvbo->comp = 0;
}
+ nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
@@ -262,7 +329,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
* Make sure that the color and depth buffers are handled
@@ -270,7 +337,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
* speed up when alpha-blending and depth-test are enabled
* at the same time.
*/
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+ if (nvbo->zeta) {
fpfn = vram_pages / 2;
lpfn = ~0;
} else {
@@ -321,14 +388,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
memtype == TTM_PL_FLAG_VRAM && contig) {
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- struct nvkm_mem *mem = bo->mem.mm_node;
- if (!nvkm_mm_contiguous(mem->mem))
- evict = true;
- }
- nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
+ if (!nvbo->contig) {
+ nvbo->contig = true;
force = true;
+ evict = true;
}
}
@@ -376,7 +439,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
out:
if (force && ret)
- nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
+ nvbo->contig = false;
ttm_bo_unreserve(bo);
return ret;
}
@@ -446,7 +509,6 @@ void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -458,7 +520,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+ dma_sync_single_for_device(drm->dev->dev,
+ ttm_dma->dma_address[i],
PAGE_SIZE, DMA_TO_DEVICE);
}
@@ -466,7 +529,6 @@ void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
int i;
@@ -478,7 +540,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+ dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_FROM_DEVICE);
}
@@ -568,6 +630,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nvif_mmu *mmu = &drm->client.mmu;
switch (type) {
case TTM_PL_SYSTEM:
@@ -584,7 +647,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
/* Some BARs do not support being ioremapped WC */
- if (nvxx_bar(&drm->client.device)->iomap_uncached) {
+ const u8 type = mmu->type[drm->ttm.type_vram].type;
+ if (type & NVIF_MEM_UNCACHED) {
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->default_caching = TTM_PL_FLAG_UNCACHED;
}
@@ -659,14 +723,14 @@ static int
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 10);
if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
- OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
- OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE);
OUT_RING (chan, PAGE_SIZE);
@@ -691,9 +755,9 @@ static int
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
int ret;
@@ -729,9 +793,9 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
int ret;
@@ -768,9 +832,9 @@ static int
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
int ret;
@@ -806,14 +870,14 @@ static int
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
- OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
- OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, 0x00000000 /* COPY */);
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
}
@@ -824,15 +888,15 @@ static int
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
- OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
- OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
- OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
+ OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
+ OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
+ OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
}
return ret;
@@ -858,12 +922,12 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
{
- struct nvkm_mem *mem = old_reg->mm_node;
+ struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 length = (new_reg->num_pages << PAGE_SHIFT);
- u64 src_offset = mem->vma[0].offset;
- u64 dst_offset = mem->vma[1].offset;
- int src_tiled = !!mem->memtype;
- int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
+ u64 src_offset = mem->vma[0].addr;
+ u64 dst_offset = mem->vma[1].addr;
+ int src_tiled = !!mem->kind;
+ int dst_tiled = !!nouveau_mem(new_reg)->kind;
int ret;
while (length) {
@@ -1000,25 +1064,31 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_mem_reg *reg)
{
- struct nvkm_mem *old_mem = bo->mem.mm_node;
- struct nvkm_mem *new_mem = reg->mm_node;
- u64 size = (u64)reg->num_pages << PAGE_SHIFT;
+ struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
+ struct nouveau_mem *new_mem = nouveau_mem(reg);
+ struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
- ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
- NV_MEM_ACCESS_RW, &old_mem->vma[0]);
+ ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
+ old_mem->mem.size, &old_mem->vma[0]);
if (ret)
return ret;
- ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
- NV_MEM_ACCESS_RW, &old_mem->vma[1]);
+ ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
+ new_mem->mem.size, &old_mem->vma[1]);
+ if (ret)
+ goto done;
+
+ ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
+ if (ret)
+ goto done;
+
+ ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
+done:
if (ret) {
- nvkm_vm_put(&old_mem->vma[0]);
- return ret;
+ nvif_vmm_put(vmm, &old_mem->vma[1]);
+ nvif_vmm_put(vmm, &old_mem->vma[0]);
}
-
- nvkm_vm_map(&old_mem->vma[0], old_mem);
- nvkm_vm_map(&old_mem->vma[1], new_mem);
return 0;
}
@@ -1200,21 +1270,23 @@ static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
struct ttm_mem_reg *new_reg)
{
+ struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
/* ttm can now (stupidly) pass the driver bos it didn't create... */
if (bo->destroy != nouveau_bo_del_ttm)
return;
- list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
- (new_reg->mem_type == TTM_PL_VRAM ||
- nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
- nvkm_vm_map(vma, new_reg->mm_node);
- } else {
+ if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
+ mem->mem.page == nvbo->page) {
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ nouveau_vma_map(vma, mem);
+ }
+ } else {
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
WARN_ON(ttm_bo_wait(bo, false, false));
- nvkm_vm_unmap(vma);
+ nouveau_vma_unmap(vma);
}
}
}
@@ -1234,8 +1306,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
- nvbo->tile_mode,
- nvbo->tile_flags);
+ nvbo->mode, nvbo->zeta);
}
return 0;
@@ -1331,8 +1402,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
- struct nvkm_mem *mem = reg->mm_node;
- int ret;
+ struct nouveau_mem *mem = nouveau_mem(reg);
reg->bus.addr = NULL;
reg->bus.offset = 0;
@@ -1353,7 +1423,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
reg->bus.is_iomem = !drm->agp.cma;
}
#endif
- if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
+ if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
/* untiled */
break;
/* fallthrough, tiled memory */
@@ -1361,19 +1431,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
reg->bus.offset = reg->start << PAGE_SHIFT;
reg->bus.base = device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
- int page_shift = 12;
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
- page_shift = mem->page_shift;
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+ union {
+ struct nv50_mem_map_v0 nv50;
+ struct gf100_mem_map_v0 gf100;
+ } args;
+ u64 handle, length;
+ u32 argc = 0;
+ int ret;
+
+ switch (mem->mem.object.oclass) {
+ case NVIF_CLASS_MEM_NV50:
+ args.nv50.version = 0;
+ args.nv50.ro = 0;
+ args.nv50.kind = mem->kind;
+ args.nv50.comp = mem->comp;
+ break;
+ case NVIF_CLASS_MEM_GF100:
+ args.gf100.version = 0;
+ args.gf100.ro = 0;
+ args.gf100.kind = mem->kind;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
- ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
- &mem->bar_vma);
- if (ret)
- return ret;
+ ret = nvif_object_map_handle(&mem->mem.object,
+ &argc, argc,
+ &handle, &length);
+ if (ret != 1)
+ return ret ? ret : -EINVAL;
- nvkm_vm_map(&mem->bar_vma, mem);
- reg->bus.offset = mem->bar_vma.offset;
+ reg->bus.base = 0;
+ reg->bus.offset = handle;
}
break;
default:
@@ -1385,13 +1476,22 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
{
- struct nvkm_mem *mem = reg->mm_node;
-
- if (!mem->bar_vma.node)
- return;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_mem *mem = nouveau_mem(reg);
- nvkm_vm_unmap(&mem->bar_vma);
- nvkm_vm_put(&mem->bar_vma);
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
+ switch (reg->mem_type) {
+ case TTM_PL_TT:
+ if (mem->kind)
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ case TTM_PL_VRAM:
+ nvif_object_unmap_handle(&mem->mem.object);
+ break;
+ default:
+ break;
+ }
+ }
}
static int
@@ -1408,7 +1508,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
- !nouveau_bo_tile_layout(nvbo))
+ !nvbo->kind)
return 0;
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
@@ -1445,9 +1545,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
- struct nvkm_device *device;
- struct drm_device *dev;
- struct device *pdev;
+ struct device *dev;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1464,9 +1562,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
- device = nvxx_device(&drm->client.device);
- dev = drm->dev;
- pdev = device->dev;
+ dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
@@ -1476,7 +1572,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev->dev);
+ return ttm_dma_populate((void *)ttm, dev);
}
#endif
@@ -1488,12 +1584,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
dma_addr_t addr;
- addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+ addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(pdev, addr)) {
+ if (dma_mapping_error(dev, addr)) {
while (i--) {
- dma_unmap_page(pdev, ttm_dma->dma_address[i],
+ dma_unmap_page(dev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0;
}
@@ -1511,9 +1607,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
- struct nvkm_device *device;
- struct drm_device *dev;
- struct device *pdev;
+ struct device *dev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1521,9 +1615,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
- device = nvxx_device(&drm->client.device);
- dev = drm->dev;
- pdev = device->dev;
+ dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
@@ -1534,14 +1626,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate((void *)ttm, dev->dev);
+ ttm_dma_unpopulate((void *)ttm, dev);
return;
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
- dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+ dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
}
@@ -1576,48 +1668,3 @@ struct ttm_bo_driver nouveau_bo_driver = {
.io_mem_free = &nouveau_ttm_io_mem_free,
.io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
-
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
-{
- struct nvkm_vma *vma;
- list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (vma->vm == vm)
- return vma;
- }
-
- return NULL;
-}
-
-int
-nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
- struct nvkm_vma *vma)
-{
- const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- int ret;
-
- ret = nvkm_vm_get(vm, size, nvbo->page_shift,
- NV_MEM_ACCESS_RW, vma);
- if (ret)
- return ret;
-
- if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
- (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
- nvbo->page_shift != vma->vm->mmu->lpg_shift))
- nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
-
- list_add_tail(&vma->head, &nvbo->vma_list);
- vma->refcount = 1;
- return 0;
-}
-
-void
-nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
-{
- if (vma->node) {
- if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- list_del(&vma->head);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 4caade5dee50..7b5cc5c73d20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -25,12 +25,16 @@ struct nouveau_bo {
bool validate_mapped;
struct list_head vma_list;
- unsigned page_shift;
struct nouveau_cli *cli;
- u32 tile_mode;
- u32 tile_flags;
+ unsigned contig:1;
+ unsigned page:5;
+ unsigned kind:8;
+ unsigned comp:3;
+ unsigned zeta:3;
+ unsigned mode;
+
struct nouveau_drm_tile *tile;
/* Only valid if allocated via nouveau_gem_new() and iff you hold a
@@ -90,13 +94,6 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
-struct nvkm_vma *
-nouveau_bo_vma_find(struct nouveau_bo *, struct nvkm_vm *);
-
-int nouveau_bo_vma_add(struct nouveau_bo *, struct nvkm_vm *,
- struct nvkm_vma *);
-void nouveau_bo_vma_del(struct nouveau_bo *, struct nvkm_vma *);
-
/* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem *
nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index dbc41fa86ee8..af1116655910 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -40,6 +40,7 @@
#include "nouveau_chan.h"
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
+#include "nouveau_vmm.h"
MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
@@ -83,6 +84,14 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
+ struct nouveau_cli *cli = (void *)chan->user.client;
+ bool super;
+
+ if (cli) {
+ super = cli->base.super;
+ cli->base.super = true;
+ }
+
if (chan->fence)
nouveau_fence(chan->drm)->context_del(chan);
nvif_object_fini(&chan->nvsw);
@@ -91,12 +100,15 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_notify_fini(&chan->kill);
nvif_object_fini(&chan->user);
nvif_object_fini(&chan->push.ctxdma);
- nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
+ nouveau_vma_del(&chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->pin_refcnt)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
+
+ if (cli)
+ cli->base.super = super;
}
*pchan = NULL;
}
@@ -106,7 +118,6 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
u32 size, struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
- struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
u32 target;
@@ -142,11 +153,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
* pushbuf lives in, this is because the GEM code requires that
* we be able to call out to other (indirect) push buffers
*/
- chan->push.vma.offset = chan->push.buffer->bo.offset;
+ chan->push.addr = chan->push.buffer->bo.offset;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
- &chan->push.vma);
+ ret = nouveau_vma_new(chan->push.buffer, &cli->vmm,
+ &chan->push.vma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -155,7 +166,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
+
+ chan->push.addr = chan->push.vma->addr;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
@@ -185,7 +198,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
}
}
@@ -203,6 +216,7 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 engine, struct nouveau_channel **pchan)
{
+ struct nouveau_cli *cli = (void *)device->object.client;
static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B,
@@ -233,22 +247,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
args.kepler.version = 0;
args.kepler.engines = engine;
args.kepler.ilength = 0x02000;
- args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
- args.kepler.vm = 0;
+ args.kepler.ioffset = 0x10000 + chan->push.addr;
+ args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.kepler);
} else
if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
args.fermi.version = 0;
args.fermi.ilength = 0x02000;
- args.fermi.ioffset = 0x10000 + chan->push.vma.offset;
- args.fermi.vm = 0;
+ args.fermi.ioffset = 0x10000 + chan->push.addr;
+ args.fermi.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.fermi);
} else {
args.nv50.version = 0;
args.nv50.ilength = 0x02000;
- args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+ args.nv50.ioffset = 0x10000 + chan->push.addr;
args.nv50.pushbuf = nvif_handle(&chan->push.ctxdma);
- args.nv50.vm = 0;
+ args.nv50.vmm = nvif_handle(&cli->vmm.vmm.object);
size = sizeof(args.nv50);
}
@@ -293,7 +307,7 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
/* create channel object */
args.version = 0;
args.pushbuf = nvif_handle(&chan->push.ctxdma);
- args.offset = chan->push.vma.offset;
+ args.offset = chan->push.addr;
do {
ret = nvif_object_init(&device->object, 0, *oclass++,
@@ -314,11 +328,10 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
struct nvif_device *device = chan->device;
struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_drm *drm = chan->drm;
- struct nvkm_mmu *mmu = nvxx_mmu(device);
struct nv_dma_v0 args = {};
int ret, i;
- nvif_object_map(&chan->user);
+ nvif_object_map(&chan->user, NULL, 0);
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
@@ -339,7 +352,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;
args.access = NV_DMA_V0_ACCESS_RDWR;
@@ -356,7 +369,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = cli->vm->mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
} else
if (chan->drm->agp.bridge) {
args.target = NV_DMA_V0_TARGET_AGP;
@@ -368,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
args.target = NV_DMA_V0_TARGET_VM;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = mmu->limit - 1;
+ args.limit = cli->vmm.vmm.limit - 1;
}
ret = nvif_object_init(&chan->user, gart, NV_DMA_IN_MEMORY,
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 9463a78613cb..14607c16a2bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -17,8 +17,9 @@ struct nouveau_channel {
struct {
struct nouveau_bo *buffer;
- struct nvkm_vma vma;
+ struct nouveau_vma *vma;
struct nvif_object ctxdma;
+ u64 addr;
} push;
/* TODO: this will be reworked in the near future */
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 70d8e0d69ad5..69d6e61a01ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -373,7 +373,7 @@ find_encoder(struct drm_connector *connector, int type)
if (!id)
break;
- enc = drm_encoder_find(dev, id);
+ enc = drm_encoder_find(dev, NULL, id);
if (!enc)
continue;
nv_encoder = nouveau_encoder(enc);
@@ -441,7 +441,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (id == 0)
break;
- encoder = drm_encoder_find(dev, id);
+ encoder = drm_encoder_find(dev, NULL, id);
if (!encoder)
continue;
nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 34cd144681b9..270ba56f2756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,15 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
-
-#include <subdev/mmu.h>
-
#include "nouveau_drv.h"
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
- struct nvkm_vma vma;
+ struct nouveau_vma *vma;
u32 r_handle;
u32 r_format;
u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 2634a1a79888..10e84f6ca2b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include "nouveau_vmm.h"
void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
@@ -71,11 +72,11 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
return -EBUSY;
}
- if (val < chan->push.vma.offset ||
- val > chan->push.vma.offset + (chan->dma.max << 2))
+ if (val < chan->push.addr ||
+ val > chan->push.addr + (chan->dma.max << 2))
return -EINVAL;
- return (val - chan->push.vma.offset) >> 2;
+ return (val - chan->push.addr) >> 2;
}
void
@@ -84,13 +85,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
{
struct nouveau_cli *cli = (void *)chan->user.client;
struct nouveau_bo *pb = chan->push.buffer;
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset;
- vma = nouveau_bo_vma_find(bo, cli->vm);
+ vma = nouveau_vma_find(bo, &cli->vmm);
BUG_ON(!vma);
- offset = vma->offset + delta;
+ offset = vma->addr + delta;
BUG_ON(chan->dma.ib_free < 1);
@@ -224,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
* instruct the GPU to jump back to the start right
* after processing the currently pending commands.
*/
- OUT_RING(chan, chan->push.vma.offset | 0x20000000);
+ OUT_RING(chan, chan->push.addr | 0x20000000);
/* wait for GET to depart from the skips area.
* prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index aff3a9d0a1fc..74e10b14a7da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -140,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
#define WRITE_PUT(val) do { \
mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
- nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+ nvif_wr32(&chan->user, chan->user_put, ((val) << 2) + chan->push.addr);\
} while (0)
static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 595630d1fb9e..8d4a5be3b913 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -111,33 +111,119 @@ nouveau_name(struct drm_device *dev)
return nouveau_platform_name(to_platform_device(dev->dev));
}
+static inline bool
+nouveau_cli_work_ready(struct dma_fence *fence, bool wait)
+{
+ if (!dma_fence_is_signaled(fence)) {
+ if (!wait)
+ return false;
+ WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+ }
+ dma_fence_put(fence);
+ return true;
+}
+
+static void
+nouveau_cli_work_flush(struct nouveau_cli *cli, bool wait)
+{
+ struct nouveau_cli_work *work, *wtmp;
+ mutex_lock(&cli->lock);
+ list_for_each_entry_safe(work, wtmp, &cli->worker, head) {
+ if (!work->fence || nouveau_cli_work_ready(work->fence, wait)) {
+ list_del(&work->head);
+ work->func(work);
+ }
+ }
+ mutex_unlock(&cli->lock);
+}
+
+static void
+nouveau_cli_work_fence(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct nouveau_cli_work *work = container_of(cb, typeof(*work), cb);
+ schedule_work(&work->cli->work);
+}
+
+void
+nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
+ struct nouveau_cli_work *work)
+{
+ work->fence = dma_fence_get(fence);
+ work->cli = cli;
+ mutex_lock(&cli->lock);
+ list_add_tail(&work->head, &cli->worker);
+ mutex_unlock(&cli->lock);
+ if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
+ nouveau_cli_work_fence(fence, &work->cb);
+}
+
+static void
+nouveau_cli_work(struct work_struct *w)
+{
+ struct nouveau_cli *cli = container_of(w, typeof(*cli), work);
+ nouveau_cli_work_flush(cli, false);
+}
+
static void
nouveau_cli_fini(struct nouveau_cli *cli)
{
- nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
+ nouveau_cli_work_flush(cli, true);
usif_client_fini(cli);
+ nouveau_vmm_fini(&cli->vmm);
+ nvif_mmu_fini(&cli->mmu);
nvif_device_fini(&cli->device);
+ mutex_lock(&cli->drm->master.lock);
nvif_client_fini(&cli->base);
+ mutex_unlock(&cli->drm->master.lock);
}
static int
nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
struct nouveau_cli *cli)
{
+ static const struct nvif_mclass
+ mems[] = {
+ { NVIF_CLASS_MEM_GF100, -1 },
+ { NVIF_CLASS_MEM_NV50 , -1 },
+ { NVIF_CLASS_MEM_NV04 , -1 },
+ {}
+ };
+ static const struct nvif_mclass
+ mmus[] = {
+ { NVIF_CLASS_MMU_GF100, -1 },
+ { NVIF_CLASS_MMU_NV50 , -1 },
+ { NVIF_CLASS_MMU_NV04 , -1 },
+ {}
+ };
+ static const struct nvif_mclass
+ vmms[] = {
+ { NVIF_CLASS_VMM_GP100, -1 },
+ { NVIF_CLASS_VMM_GM200, -1 },
+ { NVIF_CLASS_VMM_GF100, -1 },
+ { NVIF_CLASS_VMM_NV50 , -1 },
+ { NVIF_CLASS_VMM_NV04 , -1 },
+ {}
+ };
u64 device = nouveau_name(drm->dev);
int ret;
snprintf(cli->name, sizeof(cli->name), "%s", sname);
- cli->dev = drm->dev;
+ cli->drm = drm;
mutex_init(&cli->mutex);
usif_client_init(cli);
- if (cli == &drm->client) {
+ INIT_WORK(&cli->work, nouveau_cli_work);
+ INIT_LIST_HEAD(&cli->worker);
+ mutex_init(&cli->lock);
+
+ if (cli == &drm->master) {
ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
cli->name, device, &cli->base);
} else {
- ret = nvif_client_init(&drm->client.base, cli->name, device,
+ mutex_lock(&drm->master.lock);
+ ret = nvif_client_init(&drm->master.base, cli->name, device,
&cli->base);
+ mutex_unlock(&drm->master.lock);
}
if (ret) {
NV_ERROR(drm, "Client allocation failed: %d\n", ret);
@@ -154,6 +240,38 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
goto done;
}
+ ret = nvif_mclass(&cli->device.object, mmus);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported MMU class\n");
+ goto done;
+ }
+
+ ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
+ if (ret) {
+ NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+ goto done;
+ }
+
+ ret = nvif_mclass(&cli->mmu.object, vmms);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported VMM class\n");
+ goto done;
+ }
+
+ ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
+ if (ret) {
+ NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+ goto done;
+ }
+
+ ret = nvif_mclass(&cli->mmu.object, mems);
+ if (ret < 0) {
+ NV_ERROR(drm, "No supported MEM class\n");
+ goto done;
+ }
+
+ cli->mem = &mems[ret];
+ return 0;
done:
if (ret)
nouveau_cli_fini(cli);
@@ -433,6 +551,10 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
+ ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
+ if (ret)
+ return ret;
+
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
return ret;
@@ -456,21 +578,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_vga_init(drm);
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- if (!nvxx_device(&drm->client.device)->mmu) {
- ret = -ENOSYS;
- goto fail_device;
- }
-
- ret = nvkm_vm_new(nvxx_device(&drm->client.device),
- 0, (1ULL << 40), 0x1000, NULL,
- &drm->client.vm);
- if (ret)
- goto fail_device;
-
- nvxx_client(&drm->client.base)->vm = drm->client.vm;
- }
-
ret = nouveau_ttm_init(drm);
if (ret)
goto fail_ttm;
@@ -516,8 +623,8 @@ fail_bios:
nouveau_ttm_fini(drm);
fail_ttm:
nouveau_vga_fini(drm);
-fail_device:
nouveau_cli_fini(&drm->client);
+ nouveau_cli_fini(&drm->master);
kfree(drm);
return ret;
}
@@ -550,6 +657,7 @@ nouveau_drm_unload(struct drm_device *dev)
if (drm->hdmi_device)
pci_dev_put(drm->hdmi_device);
nouveau_cli_fini(&drm->client);
+ nouveau_cli_fini(&drm->master);
kfree(drm);
}
@@ -618,7 +726,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
NV_DEBUG(drm, "suspending object tree...\n");
- ret = nvif_client_suspend(&drm->client.base);
+ ret = nvif_client_suspend(&drm->master.base);
if (ret)
goto fail_client;
@@ -642,7 +750,7 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- nvif_client_resume(&drm->client.base);
+ nvif_client_resume(&drm->master.base);
NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
@@ -850,15 +958,6 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
cli->base.super = false;
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
- (1ULL << 40), 0x1000, NULL, &cli->vm);
- if (ret)
- goto done;
-
- nvxx_client(&cli->base)->vm = cli->vm;
- }
-
fpriv->driver_priv = cli;
mutex_lock(&drm->client.mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 77dea95c1bf1..3331e82ae9e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -6,7 +6,7 @@
#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
#define DRIVER_NAME "nouveau"
-#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla"
+#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla/Tegra K1+"
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
@@ -43,6 +43,8 @@
#include <nvif/client.h>
#include <nvif/device.h>
#include <nvif/ioctl.h>
+#include <nvif/mmu.h>
+#include <nvif/vmm.h>
#include <drm/drmP.h>
@@ -62,6 +64,7 @@ struct platform_device;
#include "nouveau_fence.h"
#include "nouveau_bios.h"
+#include "nouveau_vmm.h"
struct nouveau_drm_tile {
struct nouveau_fence *fence;
@@ -87,19 +90,37 @@ enum nouveau_drm_handle {
struct nouveau_cli {
struct nvif_client base;
- struct drm_device *dev;
+ struct nouveau_drm *drm;
struct mutex mutex;
struct nvif_device device;
+ struct nvif_mmu mmu;
+ struct nouveau_vmm vmm;
+ const struct nvif_mclass *mem;
- struct nvkm_vm *vm; /*XXX*/
struct list_head head;
void *abi16;
struct list_head objects;
struct list_head notifys;
char name[32];
+
+ struct work_struct work;
+ struct list_head worker;
+ struct mutex lock;
};
+struct nouveau_cli_work {
+ void (*func)(struct nouveau_cli_work *);
+ struct nouveau_cli *cli;
+ struct list_head head;
+
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
+};
+
+void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
+ struct nouveau_cli_work *);
+
static inline struct nouveau_cli *
nouveau_cli(struct drm_file *fpriv)
{
@@ -110,6 +131,7 @@ nouveau_cli(struct drm_file *fpriv)
#include <nvif/device.h>
struct nouveau_drm {
+ struct nouveau_cli master;
struct nouveau_cli client;
struct drm_device *dev;
@@ -134,6 +156,9 @@ struct nouveau_drm {
struct nouveau_channel *chan;
struct nvif_object copy;
int mtrr;
+ int type_vram;
+ int type_host;
+ int type_ncoh;
} ttm;
/* GEM interface support */
@@ -205,7 +230,7 @@ void nouveau_drm_device_remove(struct drm_device *dev);
#define NV_PRINTK(l,c,f,a...) do { \
struct nouveau_cli *_cli = (c); \
- dev_##l(_cli->dev->dev, "%s: "f, _cli->name, ##a); \
+ dev_##l(_cli->drm->dev->dev, "%s: "f, _cli->name, ##a); \
} while(0)
#define NV_FATAL(drm,f,a...) NV_PRINTK(crit, &(drm)->client, f, ##a)
#define NV_ERROR(drm,f,a...) NV_PRINTK(err, &(drm)->client, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2b12d82aac15..c533d8e04afc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -48,6 +48,7 @@
#include "nouveau_bo.h"
#include "nouveau_fbcon.h"
#include "nouveau_chan.h"
+#include "nouveau_vmm.h"
#include "nouveau_crtc.h"
@@ -348,7 +349,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
+ ret = nouveau_vma_new(nvbo, &drm->client.vmm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -402,7 +403,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
out_unlock:
if (chan)
- nouveau_bo_vma_del(fb->nvbo, &fb->vma);
+ nouveau_vma_del(&fb->vma);
nouveau_bo_unmap(fb->nvbo);
out_unpin:
nouveau_bo_unpin(fb->nvbo);
@@ -429,7 +430,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
drm_fb_helper_fini(&fbcon->helper);
if (nouveau_fb->nvbo) {
- nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
+ nouveau_vma_del(&nouveau_fb->vma);
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_unpin(nouveau_fb->nvbo);
drm_framebuffer_unreference(&nouveau_fb->base);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 99e14e3e0fe4..503fa94dc06d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -199,62 +199,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
WARN_ON(ret);
}
-struct nouveau_fence_work {
- struct work_struct work;
- struct dma_fence_cb cb;
- void (*func)(void *);
- void *data;
-};
-
-static void
-nouveau_fence_work_handler(struct work_struct *kwork)
-{
- struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
- work->func(work->data);
- kfree(work);
-}
-
-static void nouveau_fence_work_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
-
- schedule_work(&work->work);
-}
-
-void
-nouveau_fence_work(struct dma_fence *fence,
- void (*func)(void *), void *data)
-{
- struct nouveau_fence_work *work;
-
- if (dma_fence_is_signaled(fence))
- goto err;
-
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work) {
- /*
- * this might not be a nouveau fence any more,
- * so force a lazy wait here
- */
- WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
- true, false));
- goto err;
- }
-
- INIT_WORK(&work->work, nouveau_fence_work_handler);
- work->func = func;
- work->data = data;
-
- if (dma_fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
- goto err_free;
- return;
-
-err_free:
- kfree(work);
-err:
- func(data);
-}
-
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
@@ -474,8 +418,6 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
if (!fence)
return -ENOMEM;
- fence->sysmem = sysmem;
-
ret = nouveau_fence_emit(fence, chan);
if (ret)
nouveau_fence_unref(&fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c9b399ad89e6..5bd8d30d1657 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -13,8 +13,6 @@ struct nouveau_fence {
struct list_head head;
- bool sysmem;
-
struct nouveau_channel __rcu *channel;
unsigned long timeout;
};
@@ -25,7 +23,6 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_work(struct dma_fence *, void (*)(void *), void *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
@@ -91,14 +88,12 @@ int nouveau_flip_complete(struct nvif_notify *);
struct nv84_fence_chan {
struct nouveau_fence_chan base;
- struct nvkm_vma vma;
- struct nvkm_vma vma_gart;
+ struct nouveau_vma *vma;
};
struct nv84_fence_priv {
struct nouveau_fence_priv base;
struct nouveau_bo *bo;
- struct nouveau_bo *bo_gart;
u32 *suspend;
struct mutex mutex;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 2170534101ca..efc89aaef66a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -31,6 +31,10 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_vmm.h"
+
+#include <nvif/class.h>
void
nouveau_gem_object_del(struct drm_gem_object *gem)
@@ -64,66 +68,61 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nvkm_vma *vma;
struct device *dev = drm->dev->dev;
+ struct nouveau_vma *vma;
int ret;
- if (!cli->vm)
+ if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
- vma = nouveau_bo_vma_find(nvbo, cli->vm);
- if (!vma) {
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (!vma) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = pm_runtime_get_sync(dev);
- if (ret < 0 && ret != -EACCES) {
- kfree(vma);
- goto out;
- }
-
- ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
- if (ret)
- kfree(vma);
-
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- } else {
- vma->refcount++;
- }
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0 && ret != -EACCES)
+ goto out;
+ ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
out:
ttm_bo_unreserve(&nvbo->bo);
return ret;
}
+struct nouveau_gem_object_unmap {
+ struct nouveau_cli_work work;
+ struct nouveau_vma *vma;
+};
+
static void
-nouveau_gem_object_delete(void *data)
+nouveau_gem_object_delete(struct nouveau_vma *vma)
{
- struct nvkm_vma *vma = data;
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- kfree(vma);
+ nouveau_vma_del(&vma);
}
static void
-nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
+nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
+{
+ struct nouveau_gem_object_unmap *work =
+ container_of(w, typeof(*work), work);
+ nouveau_gem_object_delete(work->vma);
+ kfree(work);
+}
+
+static void
+nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
+ struct nouveau_gem_object_unmap *work;
struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
- list_del(&vma->head);
+ list_del_init(&vma->head);
if (fobj && fobj->shared_count > 1)
ttm_bo_wait(&nvbo->bo, false, false);
@@ -133,14 +132,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
else
fence = reservation_object_get_excl(nvbo->bo.resv);
- if (fence && mapped) {
- nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
- } else {
- if (mapped)
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- kfree(vma);
+ if (!fence || !mapped) {
+ nouveau_gem_object_delete(vma);
+ return;
+ }
+
+ if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
+ WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+ nouveau_gem_object_delete(vma);
+ return;
}
+
+ work->work.func = nouveau_gem_object_delete_work;
+ work->vma = vma;
+ nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
}
void
@@ -150,19 +155,19 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
int ret;
- if (!cli->vm)
+ if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
- vma = nouveau_bo_vma_find(nvbo, cli->vm);
+ vma = nouveau_vma_find(nvbo, &cli->vmm);
if (vma) {
- if (--vma->refcount == 0) {
+ if (--vma->refs == 0) {
ret = pm_runtime_get_sync(dev);
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma);
@@ -179,7 +184,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{
- struct nouveau_drm *drm = nouveau_drm(cli->dev);
+ struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
@@ -227,7 +232,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
- struct nvkm_vma *vma;
+ struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
@@ -236,18 +241,25 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
- if (cli->vm) {
- vma = nouveau_bo_vma_find(nvbo, cli->vm);
+ if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
+ vma = nouveau_vma_find(nvbo, &cli->vmm);
if (!vma)
return -EINVAL;
- rep->offset = vma->offset;
+ rep->offset = vma->addr;
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
- rep->tile_mode = nvbo->tile_mode;
- rep->tile_flags = nvbo->tile_flags;
+ rep->tile_mode = nvbo->mode;
+ rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
+ rep->tile_flags |= nvbo->kind << 8;
+ else
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
+ rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
+ else
+ rep->tile_flags |= nvbo->zeta;
return 0;
}
@@ -255,18 +267,11 @@ int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- if (!nvkm_fb_memtype_valid(fb, req->info.tile_flags)) {
- NV_PRINTK(err, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
- return -EINVAL;
- }
-
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
@@ -791,7 +796,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
bo[push[i].bo_index].user_priv;
uint32_t cmd;
- cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
+ cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
@@ -843,7 +848,7 @@ out_next:
req->suffix1 = 0x00000000;
} else {
req->suffix0 = 0x20000000 |
- (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
+ (chan->push.addr + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 0456c94a5d4d..fe39998f65cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -7,9 +7,6 @@
#include "nouveau_drv.h"
#include "nouveau_bo.h"
-#define nouveau_bo_tile_layout(nvbo) \
- ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
-
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 000000000000..589a9621db76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_mem.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+
+#include <drm/ttm/ttm_bo_driver.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+
+int
+nouveau_mem_map(struct nouveau_mem *mem,
+ struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+ union {
+ struct nv50_vmm_map_v0 nv50;
+ struct gf100_vmm_map_v0 gf100;
+ } args;
+ u32 argc = 0;
+ bool super;
+ int ret;
+
+ switch (vmm->object.oclass) {
+ case NVIF_CLASS_VMM_NV04:
+ break;
+ case NVIF_CLASS_VMM_NV50:
+ args.nv50.version = 0;
+ args.nv50.ro = 0;
+ args.nv50.priv = 0;
+ args.nv50.kind = mem->kind;
+ args.nv50.comp = mem->comp;
+ argc = sizeof(args.nv50);
+ break;
+ case NVIF_CLASS_VMM_GF100:
+ case NVIF_CLASS_VMM_GM200:
+ case NVIF_CLASS_VMM_GP100:
+ args.gf100.version = 0;
+ if (mem->mem.type & NVIF_MEM_VRAM)
+ args.gf100.vol = 0;
+ else
+ args.gf100.vol = 1;
+ args.gf100.ro = 0;
+ args.gf100.priv = 0;
+ args.gf100.kind = mem->kind;
+ argc = sizeof(args.gf100);
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ super = vmm->object.client->super;
+ vmm->object.client->super = true;
+ ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc,
+ &mem->mem, 0);
+ vmm->object.client->super = super;
+ return ret;
+}
+
+void
+nouveau_mem_fini(struct nouveau_mem *mem)
+{
+ nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[1]);
+ nvif_vmm_put(&mem->cli->drm->client.vmm.vmm, &mem->vma[0]);
+ mutex_lock(&mem->cli->drm->master.lock);
+ nvif_mem_fini(&mem->mem);
+ mutex_unlock(&mem->cli->drm->master.lock);
+}
+
+int
+nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ struct nouveau_cli *cli = mem->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_mmu *mmu = &cli->mmu;
+ struct nvif_mem_ram_v0 args = {};
+ bool super = cli->base.super;
+ u8 type;
+ int ret;
+
+ if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+ type = drm->ttm.type_ncoh;
+ else
+ type = drm->ttm.type_host;
+
+ if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
+ mem->comp = mem->kind = 0;
+ if (mem->comp && !(mmu->type[type].type & NVIF_MEM_COMP)) {
+ if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
+ mem->kind = mmu->kind[mem->kind];
+ mem->comp = 0;
+ }
+
+ if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
+ else args.dma = tt->dma_address;
+
+ mutex_lock(&drm->master.lock);
+ cli->base.super = true;
+ ret = nvif_mem_init_type(mmu, cli->mem->oclass, type, PAGE_SHIFT,
+ reg->num_pages << PAGE_SHIFT,
+ &args, sizeof(args), &mem->mem);
+ cli->base.super = super;
+ mutex_unlock(&drm->master.lock);
+ return ret;
+}
+
+int
+nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ struct nouveau_cli *cli = mem->cli;
+ struct nouveau_drm *drm = cli->drm;
+ struct nvif_mmu *mmu = &cli->mmu;
+ bool super = cli->base.super;
+ u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
+ int ret;
+
+ mutex_lock(&drm->master.lock);
+ cli->base.super = true;
+ switch (cli->mem->oclass) {
+ case NVIF_CLASS_MEM_GF100:
+ ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+ drm->ttm.type_vram, page, size,
+ &(struct gf100_mem_v0) {
+ .contig = contig,
+ }, sizeof(struct gf100_mem_v0),
+ &mem->mem);
+ break;
+ case NVIF_CLASS_MEM_NV50:
+ ret = nvif_mem_init_type(mmu, cli->mem->oclass,
+ drm->ttm.type_vram, page, size,
+ &(struct nv50_mem_v0) {
+ .bankswz = mmu->kind[mem->kind] == 2,
+ .contig = contig,
+ }, sizeof(struct nv50_mem_v0),
+ &mem->mem);
+ break;
+ default:
+ ret = -ENOSYS;
+ WARN_ON(1);
+ break;
+ }
+ cli->base.super = super;
+ mutex_unlock(&drm->master.lock);
+
+ reg->start = mem->mem.addr >> PAGE_SHIFT;
+ return ret;
+}
+
+void
+nouveau_mem_del(struct ttm_mem_reg *reg)
+{
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ nouveau_mem_fini(mem);
+ kfree(reg->mm_node);
+ reg->mm_node = NULL;
+}
+
+int
+nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
+ struct ttm_mem_reg *reg)
+{
+ struct nouveau_mem *mem;
+
+ if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+ return -ENOMEM;
+ mem->cli = cli;
+ mem->kind = kind;
+ mem->comp = comp;
+
+ reg->mm_node = mem;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
new file mode 100644
index 000000000000..f6d039e73812
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -0,0 +1,30 @@
+#ifndef __NOUVEAU_MEM_H__
+#define __NOUVEAU_MEM_H__
+#include <drm/ttm/ttm_bo_api.h>
+struct ttm_dma_tt;
+
+#include <nvif/mem.h>
+#include <nvif/vmm.h>
+
+static inline struct nouveau_mem *
+nouveau_mem(struct ttm_mem_reg *reg)
+{
+ return reg->mm_node;
+}
+
+struct nouveau_mem {
+ struct nouveau_cli *cli;
+ u8 kind;
+ u8 comp;
+ struct nvif_mem mem;
+ struct nvif_vma vma[2];
+};
+
+int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
+ struct ttm_mem_reg *);
+void nouveau_mem_del(struct ttm_mem_reg *);
+int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
+int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
+void nouveau_mem_fini(struct nouveau_mem *);
+int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index fde11ce466e4..11f6ca89769b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include "nouveau_drv.h"
+#include "nouveau_mem.h"
#include "nouveau_ttm.h"
struct nouveau_sgdma_be {
@@ -10,7 +11,7 @@ struct nouveau_sgdma_be {
* nouve_bo.c works properly, otherwise have to move them here
*/
struct ttm_dma_tt ttm;
- struct nvkm_mem *node;
+ struct nouveau_mem *mem;
};
static void
@@ -28,19 +29,20 @@ static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nvkm_mem *node = reg->mm_node;
-
- if (ttm->sg) {
- node->sg = ttm->sg;
- node->pages = NULL;
- } else {
- node->sg = NULL;
- node->pages = nvbe->ttm.dma_address;
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ int ret;
+
+ ret = nouveau_mem_host(reg, &nvbe->ttm);
+ if (ret)
+ return ret;
+
+ ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
+ if (ret) {
+ nouveau_mem_fini(mem);
+ return ret;
}
- node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
- nvkm_vm_map(&node->vma[0], node);
- nvbe->node = node;
+ nvbe->mem = mem;
return 0;
}
@@ -48,7 +50,7 @@ static int
nv04_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- nvkm_vm_unmap(&nvbe->node->vma[0]);
+ nouveau_mem_fini(nvbe->mem);
return 0;
}
@@ -62,30 +64,20 @@ static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- struct nvkm_mem *node = reg->mm_node;
-
- /* noop: bound in move_notify() */
- if (ttm->sg) {
- node->sg = ttm->sg;
- node->pages = NULL;
- } else {
- node->sg = NULL;
- node->pages = nvbe->ttm.dma_address;
- }
- node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
- return 0;
-}
+ struct nouveau_mem *mem = nouveau_mem(reg);
+ int ret;
-static int
-nv50_sgdma_unbind(struct ttm_tt *ttm)
-{
- /* noop: unbound in move_notify() */
+ ret = nouveau_mem_host(reg, &nvbe->ttm);
+ if (ret)
+ return ret;
+
+ nvbe->mem = mem;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
.bind = nv50_sgdma_bind,
- .unbind = nv50_sgdma_unbind,
+ .unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index b0ad7fcefcf5..08b974b30482 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -23,53 +23,37 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include "nouveau_drv.h"
-#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "nouveau_mem.h"
+#include "nouveau_ttm.h"
#include <drm/drm_legacy.h>
#include <core/tegra.h>
static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+nouveau_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
- man->priv = fb;
return 0;
}
static int
-nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+nouveau_manager_fini(struct ttm_mem_type_manager *man)
{
- man->priv = NULL;
return 0;
}
-static inline void
-nvkm_mem_node_cleanup(struct nvkm_mem *node)
+static void
+nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
{
- if (node->vma[0].node) {
- nvkm_vm_unmap(&node->vma[0]);
- nvkm_vm_put(&node->vma[0]);
- }
-
- if (node->vma[1].node) {
- nvkm_vm_unmap(&node->vma[1]);
- nvkm_vm_put(&node->vma[1]);
- }
+ nouveau_mem_del(reg);
}
static void
-nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *reg)
+nouveau_manager_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
- nvkm_mem_node_cleanup(reg->mm_node);
- ram->func->put(ram, (struct nvkm_mem **)&reg->mm_node);
}
static int
@@ -78,192 +62,105 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *reg)
{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_mem *node;
- u32 size_nc = 0;
+ struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_mem *mem;
int ret;
if (drm->client.device.info.ram_size == 0)
return -ENOMEM;
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
- size_nc = 1 << nvbo->page_shift;
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ mem = nouveau_mem(reg);
+ if (ret)
+ return ret;
- ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
- reg->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0x3ff, &node);
+ ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
if (ret) {
- reg->mm_node = NULL;
- return (ret == -ENOSPC) ? 0 : ret;
+ nouveau_mem_del(reg);
+ if (ret == -ENOSPC) {
+ reg->mm_node = NULL;
+ return 0;
+ }
+ return ret;
}
- node->page_shift = nvbo->page_shift;
-
- reg->mm_node = node;
- reg->start = node->offset >> PAGE_SHIFT;
return 0;
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
- .init = nouveau_vram_manager_init,
- .takedown = nouveau_vram_manager_fini,
+ .init = nouveau_manager_init,
+ .takedown = nouveau_manager_fini,
.get_node = nouveau_vram_manager_new,
- .put_node = nouveau_vram_manager_del,
+ .put_node = nouveau_manager_del,
+ .debug = nouveau_manager_debug,
};
static int
-nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
- return 0;
-}
-
-static int
-nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
- return 0;
-}
-
-static void
-nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *reg)
-{
- nvkm_mem_node_cleanup(reg->mm_node);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
-}
-
-static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_mem_reg *reg)
{
- struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_mem *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
+ struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_mem *mem;
+ int ret;
- node->page_shift = 12;
-
- switch (drm->client.device.info.family) {
- case NV_DEVICE_INFO_V0_TNT:
- case NV_DEVICE_INFO_V0_CELSIUS:
- case NV_DEVICE_INFO_V0_KELVIN:
- case NV_DEVICE_INFO_V0_RANKINE:
- case NV_DEVICE_INFO_V0_CURIE:
- break;
- case NV_DEVICE_INFO_V0_TESLA:
- if (drm->client.device.info.chipset != 0x50)
- node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
- break;
- case NV_DEVICE_INFO_V0_FERMI:
- case NV_DEVICE_INFO_V0_KEPLER:
- case NV_DEVICE_INFO_V0_MAXWELL:
- case NV_DEVICE_INFO_V0_PASCAL:
- node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
- break;
- default:
- NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
- drm->client.device.info.family);
- break;
- }
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ mem = nouveau_mem(reg);
+ if (ret)
+ return ret;
- reg->mm_node = node;
- reg->start = 0;
+ reg->start = 0;
return 0;
}
-static void
-nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
- .init = nouveau_gart_manager_init,
- .takedown = nouveau_gart_manager_fini,
+ .init = nouveau_manager_init,
+ .takedown = nouveau_manager_fini,
.get_node = nouveau_gart_manager_new,
- .put_node = nouveau_gart_manager_del,
- .debug = nouveau_gart_manager_debug
+ .put_node = nouveau_manager_del,
+ .debug = nouveau_manager_debug
};
-/*XXX*/
-#include <subdev/mmu/nv04.h>
-static int
-nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
-{
- struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
- struct nv04_mmu *priv = (void *)mmu;
- struct nvkm_vm *vm = NULL;
- nvkm_vm_ref(priv->vm, &vm, NULL);
- man->priv = vm;
- return 0;
-}
-
-static int
-nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
-{
- struct nvkm_vm *vm = man->priv;
- nvkm_vm_ref(NULL, &vm, NULL);
- man->priv = NULL;
- return 0;
-}
-
-static void
-nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
-{
- struct nvkm_mem *node = reg->mm_node;
- if (node->vma[0].node)
- nvkm_vm_put(&node->vma[0]);
- kfree(reg->mm_node);
- reg->mm_node = NULL;
-}
-
static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_mem_reg *reg)
{
- struct nvkm_mem *node;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_drm *drm = nvbo->cli->drm;
+ struct nouveau_mem *mem;
int ret;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- node->page_shift = 12;
+ ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
+ mem = nouveau_mem(reg);
+ if (ret)
+ return ret;
- ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
- NV_MEM_ACCESS_RW, &node->vma[0]);
+ ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
+ reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
- kfree(node);
+ nouveau_mem_del(reg);
+ if (ret == -ENOSPC) {
+ reg->mm_node = NULL;
+ return 0;
+ }
return ret;
}
- reg->mm_node = node;
- reg->start = node->vma[0].offset >> PAGE_SHIFT;
+ reg->start = mem->vma[0].addr >> PAGE_SHIFT;
return 0;
}
-static void
-nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
const struct ttm_mem_type_manager_func nv04_gart_manager = {
- .init = nv04_gart_manager_init,
- .takedown = nv04_gart_manager_fini,
+ .init = nouveau_manager_init,
+ .takedown = nouveau_manager_fini,
.get_node = nv04_gart_manager_new,
- .put_node = nv04_gart_manager_del,
- .debug = nv04_gart_manager_debug
+ .put_node = nouveau_manager_del,
+ .debug = nouveau_manager_debug
};
int
@@ -343,44 +240,43 @@ nouveau_ttm_init(struct nouveau_drm *drm)
{
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nvkm_pci *pci = device->pci;
+ struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev;
- u8 bits;
- int ret;
+ int typei, ret;
- if (pci && pci->agp.bridge) {
- drm->agp.bridge = pci->agp.bridge;
- drm->agp.base = pci->agp.base;
- drm->agp.size = pci->agp.size;
- drm->agp.cma = pci->agp.cma;
- }
+ typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
+ NVIF_MEM_COHERENT);
+ if (typei < 0)
+ return -ENOSYS;
- bits = nvxx_mmu(&drm->client.device)->dma_bits;
- if (nvxx_device(&drm->client.device)->func->pci) {
- if (drm->agp.bridge)
- bits = 32;
- } else if (device->func->tegra) {
- struct nvkm_device_tegra *tegra = device->func->tegra(device);
+ drm->ttm.type_host = typei;
- /*
- * If the platform can use a IOMMU, then the addressable DMA
- * space is constrained by the IOMMU bit
- */
- if (tegra->func->iommu_bit)
- bits = min(bits, tegra->func->iommu_bit);
+ typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+ if (typei < 0)
+ return -ENOSYS;
- }
+ drm->ttm.type_ncoh = typei;
- ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
- if (ret && bits != 32) {
- bits = 32;
- ret = dma_set_mask(dev->dev, DMA_BIT_MASK(bits));
+ if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
+ drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
+ NVIF_MEM_KIND |
+ NVIF_MEM_COMP |
+ NVIF_MEM_DISP);
+ if (typei < 0)
+ return -ENOSYS;
+
+ drm->ttm.type_vram = typei;
+ } else {
+ drm->ttm.type_vram = -1;
}
- if (ret)
- return ret;
- ret = dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(bits));
- if (ret)
- dma_set_coherent_mask(dev->dev, DMA_BIT_MASK(32));
+ if (pci && pci->agp.bridge) {
+ drm->agp.bridge = pci->agp.bridge;
+ drm->agp.base = pci->agp.base;
+ drm->agp.size = pci->agp.size;
+ drm->agp.cma = pci->agp.cma;
+ }
ret = nouveau_ttm_global_init(drm);
if (ret)
@@ -391,7 +287,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
- bits <= 32 ? true : false);
+ drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
@@ -415,7 +311,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* GART init */
if (!drm->agp.bridge) {
- drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
+ drm->gem.gart_available = drm->client.vmm.vmm.limit;
} else {
drm->gem.gart_available = drm->agp.size;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
new file mode 100644
index 000000000000..9e2628dd8e4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nouveau_vmm.h"
+#include "nouveau_drv.h"
+#include "nouveau_bo.h"
+#include "nouveau_mem.h"
+
+void
+nouveau_vma_unmap(struct nouveau_vma *vma)
+{
+ if (vma->mem) {
+ nvif_vmm_unmap(&vma->vmm->vmm, vma->addr);
+ vma->mem = NULL;
+ }
+}
+
+int
+nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem)
+{
+ struct nvif_vma tmp = { .addr = vma->addr };
+ int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp);
+ if (ret)
+ return ret;
+ vma->mem = mem;
+ return 0;
+}
+
+struct nouveau_vma *
+nouveau_vma_find(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm)
+{
+ struct nouveau_vma *vma;
+
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ if (vma->vmm == vmm)
+ return vma;
+ }
+
+ return NULL;
+}
+
+void
+nouveau_vma_del(struct nouveau_vma **pvma)
+{
+ struct nouveau_vma *vma = *pvma;
+ if (vma && --vma->refs <= 0) {
+ if (likely(vma->addr != ~0ULL)) {
+ struct nvif_vma tmp = { .addr = vma->addr, .size = 1 };
+ nvif_vmm_put(&vma->vmm->vmm, &tmp);
+ }
+ list_del(&vma->head);
+ *pvma = NULL;
+ kfree(*pvma);
+ }
+}
+
+int
+nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
+ struct nouveau_vma **pvma)
+{
+ struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
+ struct nouveau_vma *vma;
+ struct nvif_vma tmp;
+ int ret;
+
+ if ((vma = *pvma = nouveau_vma_find(nvbo, vmm))) {
+ vma->refs++;
+ return 0;
+ }
+
+ if (!(vma = *pvma = kmalloc(sizeof(*vma), GFP_KERNEL)))
+ return -ENOMEM;
+ vma->vmm = vmm;
+ vma->refs = 1;
+ vma->addr = ~0ULL;
+ vma->mem = NULL;
+ list_add_tail(&vma->head, &nvbo->vma_list);
+
+ if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ mem->mem.page == nvbo->page) {
+ ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
+ mem->mem.size, &tmp);
+ if (ret)
+ goto done;
+
+ vma->addr = tmp.addr;
+ ret = nouveau_vma_map(vma, mem);
+ } else {
+ ret = nvif_vmm_get(&vmm->vmm, PTES, false, mem->mem.page, 0,
+ mem->mem.size, &tmp);
+ vma->addr = tmp.addr;
+ }
+
+done:
+ if (ret)
+ nouveau_vma_del(pvma);
+ return ret;
+}
+
+void
+nouveau_vmm_fini(struct nouveau_vmm *vmm)
+{
+ nvif_vmm_fini(&vmm->vmm);
+ vmm->cli = NULL;
+}
+
+int
+nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
+{
+ int ret = nvif_vmm_init(&cli->mmu, oclass, PAGE_SIZE, 0, NULL, 0,
+ &vmm->vmm);
+ if (ret)
+ return ret;
+
+ vmm->cli = cli;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
new file mode 100644
index 000000000000..5c31f43678d3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -0,0 +1,31 @@
+#ifndef __NOUVEAU_VMA_H__
+#define __NOUVEAU_VMA_H__
+#include <nvif/vmm.h>
+struct nouveau_bo;
+struct nouveau_mem;
+
+struct nouveau_vma {
+ struct nouveau_vmm *vmm;
+ int refs;
+ struct list_head head;
+ u64 addr;
+
+ struct nouveau_mem *mem;
+};
+
+struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);
+int nouveau_vma_new(struct nouveau_bo *, struct nouveau_vmm *,
+ struct nouveau_vma **);
+void nouveau_vma_del(struct nouveau_vma **);
+int nouveau_vma_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vma_unmap(struct nouveau_vma *);
+
+struct nouveau_vmm {
+ struct nouveau_cli *cli;
+ struct nvif_vmm vmm;
+ struct nvkm_vm *vm;
+};
+
+int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
+void nouveau_vmm_fini(struct nouveau_vmm *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index fb47d46050ec..584466ef688f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -318,7 +318,7 @@ nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
ret = nvif_object_init(disp, 0, oclass[0],
data, size, &chan->user);
if (ret == 0)
- nvif_object_map(&chan->user);
+ nvif_object_map(&chan->user, NULL, 0);
nvif_object_sclass_put(&sclass);
return ret;
}
@@ -424,7 +424,7 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
{
struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
struct nv50_dmac_ctxdma *ctxdma;
- const u8 kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ const u8 kind = fb->nvbo->kind;
const u32 handle = 0xfb000000 | kind;
struct {
struct nv_dma_v0 base;
@@ -510,6 +510,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
int ret;
mutex_init(&dmac->lock);
+ INIT_LIST_HEAD(&dmac->ctxdma);
dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
&dmac->handle, GFP_KERNEL);
@@ -556,7 +557,6 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
- INIT_LIST_HEAD(&dmac->ctxdma);
return ret;
}
@@ -847,7 +847,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
asyw->image.w = fb->base.width;
asyw->image.h = fb->base.height;
- asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
+ asyw->image.kind = fb->nvbo->kind;
if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
asyw->interval = 0;
@@ -857,9 +857,9 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
if (asyw->image.kind) {
asyw->image.layout = 0;
if (drm->client.device.info.chipset >= 0xc0)
- asyw->image.block = fb->nvbo->tile_mode >> 4;
+ asyw->image.block = fb->nvbo->mode >> 4;
else
- asyw->image.block = fb->nvbo->tile_mode;
+ asyw->image.block = fb->nvbo->mode;
asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
} else {
asyw->image.layout = 1;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 327dcd7901ed..facd18564e0d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -25,6 +25,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma.offset));
- OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma->addr));
+ OUT_RING(chan, lower_32_bits(fb->vma->addr));
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -248,8 +249,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma.offset));
- OUT_RING(chan, lower_32_bits(fb->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma->addr));
+ OUT_RING(chan, lower_32_bits(fb->vma->addr));
FIRE_RING(chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index bd7a8a1e4ad9..5f0c0c27d5dc 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -25,6 +25,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
+#include "nouveau_vmm.h"
#include "nv50_display.h"
@@ -68,12 +69,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct nv84_fence_chan *fctx = chan->fence;
- u64 addr = chan->chid * 16;
-
- if (fence->sysmem)
- addr += fctx->vma_gart.offset;
- else
- addr += fctx->vma.offset;
+ u64 addr = fctx->vma->addr + chan->chid * 16;
return fctx->base.emit32(chan, addr, fence->base.seqno);
}
@@ -83,12 +79,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
struct nv84_fence_chan *fctx = chan->fence;
- u64 addr = prev->chid * 16;
-
- if (fence->sysmem)
- addr += fctx->vma_gart.offset;
- else
- addr += fctx->vma.offset;
+ u64 addr = fctx->vma->addr + prev->chid * 16;
return fctx->base.sync32(chan, addr, fence->base.seqno);
}
@@ -108,8 +99,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
mutex_lock(&priv->mutex);
- nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
- nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ nouveau_vma_del(&fctx->vma);
mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
@@ -137,11 +127,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sequence = nv84_fence_read(chan);
mutex_lock(&priv->mutex);
- ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
- if (ret == 0) {
- ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
- &fctx->vma_gart);
- }
+ ret = nouveau_vma_new(priv->bo, &cli->vmm, &fctx->vma);
mutex_unlock(&priv->mutex);
if (ret)
@@ -182,10 +168,6 @@ static void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_bo_unmap(priv->bo_gart);
- if (priv->bo_gart)
- nouveau_bo_unpin(priv->bo_gart);
- nouveau_bo_ref(NULL, &priv->bo_gart);
nouveau_bo_unmap(priv->bo);
if (priv->bo)
nouveau_bo_unpin(priv->bo);
@@ -238,21 +220,6 @@ nv84_fence_create(struct nouveau_drm *drm)
nouveau_bo_ref(NULL, &priv->bo);
}
- if (ret == 0)
- ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
- TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
- 0, NULL, NULL, &priv->bo_gart);
- if (ret == 0) {
- ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
- if (ret == 0) {
- ret = nouveau_bo_map(priv->bo_gart);
- if (ret)
- nouveau_bo_unpin(priv->bo_gart);
- }
- if (ret)
- nouveau_bo_ref(NULL, &priv->bo_gart);
- }
-
if (ret)
nv84_fence_destroy(drm);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 90f27bfa381f..c0deef4fe727 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -25,6 +25,7 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
+#include "nouveau_vmm.h"
int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
@@ -239,8 +240,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma.offset));
- OUT_RING (chan, lower_32_bits(fb->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma->addr));
+ OUT_RING (chan, lower_32_bits(fb->vma->addr));
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -250,8 +251,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma.offset));
- OUT_RING (chan, lower_32_bits(fb->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma->addr));
+ OUT_RING (chan, lower_32_bits(fb->vma->addr));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 067b5e9f5ec1..f1675a4ab6fa 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -2,4 +2,7 @@ nvif-y := nvif/object.o
nvif-y += nvif/client.o
nvif-y += nvif/device.o
nvif-y += nvif/driver.o
+nvif-y += nvif/mem.o
+nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o
+nvif-y += nvif/vmm.o
diff --git a/drivers/gpu/drm/nouveau/nvif/mem.c b/drivers/gpu/drm/nouveau/nvif/mem.c
new file mode 100644
index 000000000000..0f9382c60145
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/mem.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mem.h>
+#include <nvif/client.h>
+
+#include <nvif/if000a.h>
+
+void
+nvif_mem_fini(struct nvif_mem *mem)
+{
+ nvif_object_fini(&mem->object);
+}
+
+int
+nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+ struct nvif_mem_v0 *args;
+ u8 stack[128];
+ int ret;
+
+ mem->object.client = NULL;
+ if (type < 0)
+ return -EINVAL;
+
+ if (sizeof(*args) + argc > sizeof(stack)) {
+ if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+ return -ENOMEM;
+ } else {
+ args = (void *)stack;
+ }
+ args->version = 0;
+ args->type = type;
+ args->page = page;
+ args->size = size;
+ memcpy(args->data, argv, argc);
+
+ ret = nvif_object_init(&mmu->object, 0, oclass, args,
+ sizeof(*args) + argc, &mem->object);
+ if (ret == 0) {
+ mem->type = mmu->type[type].type;
+ mem->page = args->page;
+ mem->addr = args->addr;
+ mem->size = args->size;
+ }
+
+ if (args != (void *)stack)
+ kfree(args);
+ return ret;
+
+}
+
+int
+nvif_mem_init(struct nvif_mmu *mmu, s32 oclass, u8 type, u8 page,
+ u64 size, void *argv, u32 argc, struct nvif_mem *mem)
+{
+ int ret = -EINVAL, i;
+
+ mem->object.client = NULL;
+
+ for (i = 0; ret && i < mmu->type_nr; i++) {
+ if ((mmu->type[i].type & type) == type) {
+ ret = nvif_mem_init_type(mmu, oclass, i, page, size,
+ argv, argc, mem);
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/mmu.c b/drivers/gpu/drm/nouveau/nvif/mmu.c
new file mode 100644
index 000000000000..15d0dcbf7ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/mmu.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/mmu.h>
+
+#include <nvif/class.h>
+#include <nvif/if0008.h>
+
+void
+nvif_mmu_fini(struct nvif_mmu *mmu)
+{
+ kfree(mmu->kind);
+ kfree(mmu->type);
+ kfree(mmu->heap);
+ nvif_object_fini(&mmu->object);
+}
+
+int
+nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu)
+{
+ struct nvif_mmu_v0 args;
+ int ret, i;
+
+ args.version = 0;
+ mmu->heap = NULL;
+ mmu->type = NULL;
+ mmu->kind = NULL;
+
+ ret = nvif_object_init(parent, 0, oclass, &args, sizeof(args),
+ &mmu->object);
+ if (ret)
+ goto done;
+
+ mmu->dmabits = args.dmabits;
+ mmu->heap_nr = args.heap_nr;
+ mmu->type_nr = args.type_nr;
+ mmu->kind_nr = args.kind_nr;
+
+ mmu->heap = kmalloc(sizeof(*mmu->heap) * mmu->heap_nr, GFP_KERNEL);
+ mmu->type = kmalloc(sizeof(*mmu->type) * mmu->type_nr, GFP_KERNEL);
+ if (ret = -ENOMEM, !mmu->heap || !mmu->type)
+ goto done;
+
+ mmu->kind = kmalloc(sizeof(*mmu->kind) * mmu->kind_nr, GFP_KERNEL);
+ if (!mmu->kind && mmu->kind_nr)
+ goto done;
+
+ for (i = 0; i < mmu->heap_nr; i++) {
+ struct nvif_mmu_heap_v0 args = { .index = i };
+
+ ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_HEAP,
+ &args, sizeof(args));
+ if (ret)
+ goto done;
+
+ mmu->heap[i].size = args.size;
+ }
+
+ for (i = 0; i < mmu->type_nr; i++) {
+ struct nvif_mmu_type_v0 args = { .index = i };
+
+ ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_TYPE,
+ &args, sizeof(args));
+ if (ret)
+ goto done;
+
+ mmu->type[i].type = 0;
+ if (args.vram) mmu->type[i].type |= NVIF_MEM_VRAM;
+ if (args.host) mmu->type[i].type |= NVIF_MEM_HOST;
+ if (args.comp) mmu->type[i].type |= NVIF_MEM_COMP;
+ if (args.disp) mmu->type[i].type |= NVIF_MEM_DISP;
+ if (args.kind ) mmu->type[i].type |= NVIF_MEM_KIND;
+ if (args.mappable) mmu->type[i].type |= NVIF_MEM_MAPPABLE;
+ if (args.coherent) mmu->type[i].type |= NVIF_MEM_COHERENT;
+ if (args.uncached) mmu->type[i].type |= NVIF_MEM_UNCACHED;
+ mmu->type[i].heap = args.heap;
+ }
+
+ if (mmu->kind_nr) {
+ struct nvif_mmu_kind_v0 *kind;
+ u32 argc = sizeof(*kind) + sizeof(*kind->data) * mmu->kind_nr;
+
+ if (ret = -ENOMEM, !(kind = kmalloc(argc, GFP_KERNEL)))
+ goto done;
+ kind->version = 0;
+ kind->count = mmu->kind_nr;
+
+ ret = nvif_object_mthd(&mmu->object, NVIF_MMU_V0_KIND,
+ kind, argc);
+ if (ret == 0)
+ memcpy(mmu->kind, kind->data, kind->count);
+ kfree(kind);
+ }
+
+done:
+ if (ret)
+ nvif_mmu_fini(mmu);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index c3fb6a20f567..40adfe9b334b 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -166,46 +166,77 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
}
void
-nvif_object_unmap(struct nvif_object *object)
+nvif_object_unmap_handle(struct nvif_object *object)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_unmap unmap;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_UNMAP,
+ };
+
+ nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_object_map_handle(struct nvif_object *object, void *argv, u32 argc,
+ u64 *handle, u64 *length)
{
- if (object->map.size) {
- struct nvif_client *client = object->client;
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_unmap unmap;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_UNMAP,
- };
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_map_v0 map;
+ } *args;
+ u32 argn = sizeof(*args) + argc;
+ int ret, maptype;
+
+ if (!(args = kzalloc(argn, GFP_KERNEL)))
+ return -ENOMEM;
+ args->ioctl.type = NVIF_IOCTL_V0_MAP;
+ memcpy(args->map.data, argv, argc);
- if (object->map.ptr) {
+ ret = nvif_object_ioctl(object, args, argn, NULL);
+ *handle = args->map.handle;
+ *length = args->map.length;
+ maptype = args->map.type;
+ kfree(args);
+ return ret ? ret : (maptype == NVIF_IOCTL_MAP_V0_IO);
+}
+
+void
+nvif_object_unmap(struct nvif_object *object)
+{
+ struct nvif_client *client = object->client;
+ if (object->map.ptr) {
+ if (object->map.size) {
client->driver->unmap(client, object->map.ptr,
object->map.size);
- object->map.ptr = NULL;
+ object->map.size = 0;
}
-
- nvif_object_ioctl(object, &args, sizeof(args), NULL);
- object->map.size = 0;
+ object->map.ptr = NULL;
+ nvif_object_unmap_handle(object);
}
}
int
-nvif_object_map(struct nvif_object *object)
+nvif_object_map(struct nvif_object *object, void *argv, u32 argc)
{
struct nvif_client *client = object->client;
- struct {
- struct nvif_ioctl_v0 ioctl;
- struct nvif_ioctl_map_v0 map;
- } args = {
- .ioctl.type = NVIF_IOCTL_V0_MAP,
- };
- int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
- if (ret == 0) {
- object->map.size = args.map.length;
- object->map.ptr = client->driver->map(client, args.map.handle,
- object->map.size);
- if (ret = -ENOMEM, object->map.ptr)
+ u64 handle, length;
+ int ret = nvif_object_map_handle(object, argv, argc, &handle, &length);
+ if (ret >= 0) {
+ if (ret) {
+ object->map.ptr = client->driver->map(client,
+ handle,
+ length);
+ if (ret = -ENOMEM, object->map.ptr) {
+ object->map.size = length;
+ return 0;
+ }
+ } else {
+ object->map.ptr = (void *)(unsigned long)handle;
return 0;
- nvif_object_unmap(object);
+ }
+ nvif_object_unmap_handle(object);
}
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
new file mode 100644
index 000000000000..31cdb2d2e1ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/vmm.h>
+#include <nvif/mem.h>
+
+#include <nvif/if000c.h>
+
+int
+nvif_vmm_unmap(struct nvif_vmm *vmm, u64 addr)
+{
+ return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_UNMAP,
+ &(struct nvif_vmm_unmap_v0) { .addr = addr },
+ sizeof(struct nvif_vmm_unmap_v0));
+}
+
+int
+nvif_vmm_map(struct nvif_vmm *vmm, u64 addr, u64 size, void *argv, u32 argc,
+ struct nvif_mem *mem, u64 offset)
+{
+ struct nvif_vmm_map_v0 *args;
+ u8 stack[16];
+ int ret;
+
+ if (sizeof(*args) + argc > sizeof(stack)) {
+ if (!(args = kmalloc(sizeof(*args) + argc, GFP_KERNEL)))
+ return -ENOMEM;
+ } else {
+ args = (void *)stack;
+ }
+
+ args->version = 0;
+ args->addr = addr;
+ args->size = size;
+ args->memory = nvif_handle(&mem->object);
+ args->offset = offset;
+ memcpy(args->data, argv, argc);
+
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_MAP,
+ args, sizeof(*args) + argc);
+ if (args != (void *)stack)
+ kfree(args);
+ return ret;
+}
+
+void
+nvif_vmm_put(struct nvif_vmm *vmm, struct nvif_vma *vma)
+{
+ if (vma->size) {
+ WARN_ON(nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PUT,
+ &(struct nvif_vmm_put_v0) {
+ .addr = vma->addr,
+ }, sizeof(struct nvif_vmm_put_v0)));
+ vma->size = 0;
+ }
+}
+
+int
+nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
+ u8 page, u8 align, u64 size, struct nvif_vma *vma)
+{
+ struct nvif_vmm_get_v0 args;
+ int ret;
+
+ args.version = vma->size = 0;
+ args.sparse = sparse;
+ args.page = page;
+ args.align = align;
+ args.size = size;
+
+ switch (type) {
+ case ADDR: args.type = NVIF_VMM_GET_V0_ADDR; break;
+ case PTES: args.type = NVIF_VMM_GET_V0_PTES; break;
+ case LAZY: args.type = NVIF_VMM_GET_V0_LAZY; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_GET,
+ &args, sizeof(args));
+ if (ret == 0) {
+ vma->addr = args.addr;
+ vma->size = args.size;
+ }
+ return ret;
+}
+
+void
+nvif_vmm_fini(struct nvif_vmm *vmm)
+{
+ kfree(vmm->page);
+ nvif_object_fini(&vmm->object);
+}
+
+int
+nvif_vmm_init(struct nvif_mmu *mmu, s32 oclass, u64 addr, u64 size,
+ void *argv, u32 argc, struct nvif_vmm *vmm)
+{
+ struct nvif_vmm_v0 *args;
+ u32 argn = sizeof(*args) + argc;
+ int ret = -ENOSYS, i;
+
+ vmm->object.client = NULL;
+ vmm->page = NULL;
+
+ if (!(args = kmalloc(argn, GFP_KERNEL)))
+ return -ENOMEM;
+ args->version = 0;
+ args->addr = addr;
+ args->size = size;
+ memcpy(args->data, argv, argc);
+
+ ret = nvif_object_init(&mmu->object, 0, oclass, args, argn,
+ &vmm->object);
+ if (ret)
+ goto done;
+
+ vmm->start = args->addr;
+ vmm->limit = args->size;
+
+ vmm->page_nr = args->page_nr;
+ vmm->page = kmalloc(sizeof(*vmm->page) * vmm->page_nr, GFP_KERNEL);
+ if (!vmm->page) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vmm->page_nr; i++) {
+ struct nvif_vmm_page_v0 args = { .index = i };
+
+ ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
+ &args, sizeof(args));
+ if (ret)
+ break;
+
+ vmm->page[i].shift = args.shift;
+ vmm->page[i].sparse = args.sparse;
+ vmm->page[i].vram = args.vram;
+ vmm->page[i].host = args.host;
+ vmm->page[i].comp = args.comp;
+ }
+
+done:
+ if (ret)
+ nvif_vmm_fini(vmm);
+ kfree(args);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index 0d3a896892b4..ac671202919e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -301,5 +301,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
client->ntfy = ntfy;
+ INIT_LIST_HEAD(&client->umem);
+ spin_lock_init(&client->lock);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index b6c916954a10..657231c3c098 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -126,6 +126,15 @@ nvkm_engine_init(struct nvkm_subdev *subdev)
return ret;
}
+static int
+nvkm_engine_preinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_engine *engine = nvkm_engine(subdev);
+ if (engine->func->preinit)
+ engine->func->preinit(engine);
+ return 0;
+}
+
static void *
nvkm_engine_dtor(struct nvkm_subdev *subdev)
{
@@ -138,6 +147,7 @@ nvkm_engine_dtor(struct nvkm_subdev *subdev)
static const struct nvkm_subdev_func
nvkm_engine_func = {
.dtor = nvkm_engine_dtor,
+ .preinit = nvkm_engine_preinit,
.init = nvkm_engine_init,
.fini = nvkm_engine_fini,
.intr = nvkm_engine_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index a7bd22706b2a..d6de2b3ed2c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
}
/* accessor functions for gpuobjs allocated directly from instmem */
+static int
+nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_heap_map,
};
static const struct nvkm_gpuobj_func
@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32,
+ .map = nvkm_gpuobj_heap_map,
};
static void *
@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire,
+ .map = nvkm_gpuobj_heap_map,
};
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static int
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
+ vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_map,
};
static const struct nvkm_gpuobj_func
@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32,
+ .map = nvkm_gpuobj_map,
};
static void *
@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire,
+ .map = nvkm_gpuobj_map,
};
static int
@@ -185,7 +208,7 @@ nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero,
gpuobj->size = nvkm_memory_size(gpuobj->memory);
}
- return nvkm_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
+ return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1);
}
void
@@ -196,7 +219,7 @@ nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj)
if (gpuobj->parent)
nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node);
nvkm_mm_fini(&gpuobj->heap);
- nvkm_memory_del(&gpuobj->memory);
+ nvkm_memory_unref(&gpuobj->memory);
kfree(*pgpuobj);
*pgpuobj = NULL;
}
@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
return ret;
}
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
- u32 access, struct nvkm_vma *vma)
-{
- struct nvkm_memory *memory = gpuobj->memory;
- int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
- if (ret == 0)
- nvkm_memory_map(memory, vma, 0);
- return ret;
-}
-
-void
-nvkm_gpuobj_unmap(struct nvkm_vma *vma)
-{
- if (vma->node) {
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- }
-}
-
/* the below is basically only here to support sharing the paged dma object
* for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
* anywhere else.
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
index be19bbe56bba..d777df5a64e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
@@ -53,7 +53,7 @@ nvkm_ioctl_sclass(struct nvkm_client *client,
union {
struct nvif_ioctl_sclass_v0 v0;
} *args = data;
- struct nvkm_oclass oclass;
+ struct nvkm_oclass oclass = { .client = client };
int ret = -ENOSYS, i = 0;
nvif_ioctl(object, "sclass size %d\n", size);
@@ -257,13 +257,19 @@ nvkm_ioctl_map(struct nvkm_client *client,
union {
struct nvif_ioctl_map_v0 v0;
} *args = data;
+ enum nvkm_object_map type;
int ret = -ENOSYS;
nvif_ioctl(object, "map size %d\n", size);
- if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
nvif_ioctl(object, "map vers %d\n", args->v0.version);
- ret = nvkm_object_map(object, &args->v0.handle,
- &args->v0.length);
+ ret = nvkm_object_map(object, data, size, &type,
+ &args->v0.handle,
+ &args->v0.length);
+ if (type == NVKM_OBJECT_MAP_IO)
+ args->v0.type = NVIF_IOCTL_MAP_V0_IO;
+ else
+ args->v0.type = NVIF_IOCTL_MAP_V0_VA;
}
return ret;
@@ -281,6 +287,7 @@ nvkm_ioctl_unmap(struct nvkm_client *client,
nvif_ioctl(object, "unmap size %d\n", size);
if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
nvif_ioctl(object, "unmap\n");
+ ret = nvkm_object_unmap(object);
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 8903c04c977e..e85a08ecd9da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -22,27 +22,117 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include <core/memory.h>
+#include <core/mm.h>
+#include <subdev/fb.h>
#include <subdev/instmem.h>
void
+nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
+ struct nvkm_tags **ptags)
+{
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_tags *tags = *ptags;
+ if (tags) {
+ mutex_lock(&fb->subdev.mutex);
+ if (refcount_dec_and_test(&tags->refcount)) {
+ nvkm_mm_free(&fb->tags, &tags->mn);
+ kfree(memory->tags);
+ memory->tags = NULL;
+ }
+ mutex_unlock(&fb->subdev.mutex);
+ *ptags = NULL;
+ }
+}
+
+int
+nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
+ u32 nr, void (*clr)(struct nvkm_device *, u32, u32),
+ struct nvkm_tags **ptags)
+{
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_tags *tags;
+
+ mutex_lock(&fb->subdev.mutex);
+ if ((tags = memory->tags)) {
+ /* If comptags exist for the memory, but a different amount
+ * than requested, the buffer is being mapped with settings
+ * that are incompatible with existing mappings.
+ */
+ if (tags->mn && tags->mn->length != nr) {
+ mutex_unlock(&fb->subdev.mutex);
+ return -EINVAL;
+ }
+
+ refcount_inc(&tags->refcount);
+ mutex_unlock(&fb->subdev.mutex);
+ *ptags = tags;
+ return 0;
+ }
+
+ if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
+ mutex_unlock(&fb->subdev.mutex);
+ return -ENOMEM;
+ }
+
+ if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+ if (clr)
+ clr(device, tags->mn->offset, tags->mn->length);
+ } else {
+ /* Failure to allocate HW comptags is not an error, the
+ * caller should fall back to an uncompressed map.
+ *
+ * As memory can be mapped in multiple places, we still
+ * need to track the allocation failure and ensure that
+ * any additional mappings remain uncompressed.
+ *
+ * This is handled by returning an empty nvkm_tags.
+ */
+ tags->mn = NULL;
+ }
+
+ refcount_set(&tags->refcount, 1);
+ mutex_unlock(&fb->subdev.mutex);
+ *ptags = tags;
+ return 0;
+}
+
+void
nvkm_memory_ctor(const struct nvkm_memory_func *func,
struct nvkm_memory *memory)
{
memory->func = func;
+ kref_init(&memory->kref);
+}
+
+static void
+nvkm_memory_del(struct kref *kref)
+{
+ struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
+ if (!WARN_ON(!memory->func)) {
+ if (memory->func->dtor)
+ memory = memory->func->dtor(memory);
+ kfree(memory);
+ }
}
void
-nvkm_memory_del(struct nvkm_memory **pmemory)
+nvkm_memory_unref(struct nvkm_memory **pmemory)
{
struct nvkm_memory *memory = *pmemory;
- if (memory && !WARN_ON(!memory->func)) {
- if (memory->func->dtor)
- *pmemory = memory->func->dtor(memory);
- kfree(*pmemory);
+ if (memory) {
+ kref_put(&memory->kref, nvkm_memory_del);
*pmemory = NULL;
}
}
+struct nvkm_memory *
+nvkm_memory_ref(struct nvkm_memory *memory)
+{
+ if (memory)
+ kref_get(&memory->kref);
+ return memory;
+}
+
int
nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
u64 size, u32 align, bool zero,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
index 5c7891234eea..f78a06a6b2f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/mm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c
@@ -237,7 +237,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
}
int
-nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
+nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
{
struct nvkm_mm_node *node, *prev;
u32 next;
@@ -274,7 +274,8 @@ nvkm_mm_init(struct nvkm_mm *mm, u32 offset, u32 length, u32 block)
list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free);
- node->heap = ++mm->heap_nodes;
+ node->heap = heap;
+ mm->heap_nodes++;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index acd76fd4f6d8..301a5e5b5f7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -102,10 +102,19 @@ nvkm_object_ntfy(struct nvkm_object *object, u32 mthd,
}
int
-nvkm_object_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
if (likely(object->func->map))
- return object->func->map(object, addr, size);
+ return object->func->map(object, argv, argc, type, addr, size);
+ return -ENODEV;
+}
+
+int
+nvkm_object_unmap(struct nvkm_object *object)
+{
+ if (likely(object->func->unmap))
+ return object->func->unmap(object);
return -ENODEV;
}
@@ -259,6 +268,7 @@ nvkm_object_dtor(struct nvkm_object *object)
}
nvif_debug(object, "destroy running...\n");
+ nvkm_object_unmap(object);
if (object->func->dtor)
data = object->func->dtor(object);
nvkm_engine_unref(&object->engine);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
index e31a0479add0..16299837a296 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
@@ -37,9 +37,17 @@ nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd,
}
static int
-nvkm_oproxy_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
- return nvkm_object_map(nvkm_oproxy(object)->object, addr, size);
+ struct nvkm_oproxy *oproxy = nvkm_oproxy(object);
+ return nvkm_object_map(oproxy->object, argv, argc, type, addr, size);
+}
+
+static int
+nvkm_oproxy_unmap(struct nvkm_object *object)
+{
+ return nvkm_object_unmap(nvkm_oproxy(object)->object);
}
static int
@@ -171,6 +179,7 @@ nvkm_oproxy_func = {
.mthd = nvkm_oproxy_mthd,
.ntfy = nvkm_oproxy_ntfy,
.map = nvkm_oproxy_map,
+ .unmap = nvkm_oproxy_unmap,
.rd08 = nvkm_oproxy_rd08,
.rd16 = nvkm_oproxy_rd16,
.rd32 = nvkm_oproxy_rd32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
index 89da47234016..ccba4ae73cc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
@@ -21,6 +21,7 @@
*/
#include <core/ramht.h>
#include <core/engine.h>
+#include <core/object.h>
static u32
nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e096a5d9c292..e14643615698 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -927,7 +927,7 @@ nv84_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g84_pci_new,
.therm = g84_therm_new,
@@ -959,7 +959,7 @@ nv86_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g84_pci_new,
.therm = g84_therm_new,
@@ -991,7 +991,7 @@ nv92_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g92_pci_new,
.therm = g84_therm_new,
@@ -1023,7 +1023,7 @@ nv94_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1055,7 +1055,7 @@ nv96_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1087,7 +1087,7 @@ nv98_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1119,7 +1119,7 @@ nva0_chipset = {
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
.mc = g84_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1151,7 +1151,7 @@ nva3_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1185,7 +1185,7 @@ nva5_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1218,7 +1218,7 @@ nva8_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1251,7 +1251,7 @@ nvaa_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1283,7 +1283,7 @@ nvac_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = g98_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.therm = g84_therm_new,
@@ -1315,7 +1315,7 @@ nvaf_chipset = {
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
.mc = gt215_mc_new,
- .mmu = nv50_mmu_new,
+ .mmu = g84_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
.pmu = gt215_pmu_new,
@@ -1678,7 +1678,7 @@ nve4_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
@@ -1717,7 +1717,7 @@ nve6_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
@@ -1756,7 +1756,7 @@ nve7_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
@@ -1790,7 +1790,7 @@ nvea_chipset = {
.imem = gk20a_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk20a_mmu_new,
.pmu = gk20a_pmu_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -1820,7 +1820,7 @@ nvf0_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
@@ -1858,7 +1858,7 @@ nvf1_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk104_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
@@ -1896,7 +1896,7 @@ nv106_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
@@ -1934,7 +1934,7 @@ nv108_chipset = {
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
@@ -1958,7 +1958,7 @@ nv108_chipset = {
static const struct nvkm_device_chip
nv117_chipset = {
.name = "GM107",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.clk = gk104_clk_new,
@@ -1972,7 +1972,7 @@ nv117_chipset = {
.imem = nv50_instmem_new,
.ltc = gm107_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -1992,7 +1992,7 @@ nv117_chipset = {
static const struct nvkm_device_chip
nv118_chipset = {
.name = "GM108",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.clk = gk104_clk_new,
@@ -2006,7 +2006,7 @@ nv118_chipset = {
.imem = nv50_instmem_new,
.ltc = gm107_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gk104_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2026,7 +2026,7 @@ nv118_chipset = {
static const struct nvkm_device_chip
nv120_chipset = {
.name = "GM200",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2039,7 +2039,7 @@ nv120_chipset = {
.imem = nv50_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2061,7 +2061,7 @@ nv120_chipset = {
static const struct nvkm_device_chip
nv124_chipset = {
.name = "GM204",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2074,7 +2074,7 @@ nv124_chipset = {
.imem = nv50_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2096,7 +2096,7 @@ nv124_chipset = {
static const struct nvkm_device_chip
nv126_chipset = {
.name = "GM206",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2109,7 +2109,7 @@ nv126_chipset = {
.imem = nv50_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm200_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
@@ -2131,7 +2131,7 @@ nv126_chipset = {
static const struct nvkm_device_chip
nv12b_chipset = {
.name = "GM20B",
- .bar = gk20a_bar_new,
+ .bar = gm20b_bar_new,
.bus = gf100_bus_new,
.clk = gm20b_clk_new,
.fb = gm20b_fb_new,
@@ -2140,7 +2140,7 @@ nv12b_chipset = {
.imem = gk20a_instmem_new,
.ltc = gm200_ltc_new,
.mc = gk20a_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gm20b_mmu_new,
.pmu = gm20b_pmu_new,
.secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
@@ -2156,7 +2156,7 @@ nv12b_chipset = {
static const struct nvkm_device_chip
nv130_chipset = {
.name = "GP100",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2168,7 +2168,8 @@ nv130_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gm200_secboot_new,
.pci = gp100_pci_new,
.pmu = gp100_pmu_new,
@@ -2190,7 +2191,7 @@ nv130_chipset = {
static const struct nvkm_device_chip
nv132_chipset = {
.name = "GP102",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2202,7 +2203,8 @@ nv132_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2224,7 +2226,7 @@ nv132_chipset = {
static const struct nvkm_device_chip
nv134_chipset = {
.name = "GP104",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2236,7 +2238,8 @@ nv134_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2258,7 +2261,7 @@ nv134_chipset = {
static const struct nvkm_device_chip
nv136_chipset = {
.name = "GP106",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2270,7 +2273,8 @@ nv136_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2292,7 +2296,7 @@ nv136_chipset = {
static const struct nvkm_device_chip
nv137_chipset = {
.name = "GP107",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2304,7 +2308,8 @@ nv137_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.secboot = gp102_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
@@ -2326,7 +2331,7 @@ nv137_chipset = {
static const struct nvkm_device_chip
nv138_chipset = {
.name = "GP108",
- .bar = gf100_bar_new,
+ .bar = gm107_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
@@ -2338,7 +2343,8 @@ nv138_chipset = {
.imem = nv50_instmem_new,
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
- .mmu = gf100_mmu_new,
+ .mmu = gp100_mmu_new,
+ .therm = gp100_therm_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2355,7 +2361,7 @@ nv138_chipset = {
static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
- .bar = gk20a_bar_new,
+ .bar = gm20b_bar_new,
.bus = gf100_bus_new,
.fb = gp10b_fb_new,
.fuse = gm107_fuse_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
index f279162f48c6..ebcc5c52fbd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -2,7 +2,7 @@
#ifndef __NVKM_DEVICE_CTRL_H__
#define __NVKM_DEVICE_CTRL_H__
#define nvkm_control(p) container_of((p), struct nvkm_control, object)
-#include <core/device.h>
+#include <core/object.h>
struct nvkm_control {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 74a1ffa425f7..f302d2b5782a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1627,7 +1627,7 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
const struct nvkm_device_pci_vendor *pciv;
const char *name = NULL;
struct nvkm_device_pci *pdev;
- int ret;
+ int ret, bits;
ret = pci_enable_device(pci_dev);
if (ret)
@@ -1679,17 +1679,17 @@ nvkm_device_pci_new(struct pci_dev *pci_dev, const char *cfg, const char *dbg,
if (ret)
return ret;
- /*
- * Set a preliminary DMA mask based on the .dma_bits member of the
- * MMU subdevice. This allows other subdevices to create DMA mappings
- * in their init() or oneinit() methods, which may be called before the
- * TTM layer sets the DMA mask definitively.
- * This is necessary for platforms where the default DMA mask of 32
- * does not cover any system memory, i.e., when all RAM is > 4 GB.
- */
- if (pdev->device.mmu)
- dma_set_mask_and_coherent(&pci_dev->dev,
- DMA_BIT_MASK(pdev->device.mmu->dma_bits));
+ /* Set DMA mask based on capabilities reported by the MMU subdev. */
+ if (pdev->device.mmu && !pdev->device.pci->agp.bridge)
+ bits = pdev->device.mmu->dma_bits;
+ else
+ bits = 32;
+
+ ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(bits));
+ if (ret && bits != 32) {
+ dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ pdev->device.mmu->dma_bits = 32;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 189ed80e21ff..78597da6313a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -136,7 +136,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
if (ret)
goto free_domain;
- ret = nvkm_mm_init(&tdev->iommu.mm, 0,
+ ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
(1ULL << tdev->func->iommu_bit) >>
tdev->iommu.pgshift, 1);
if (ret)
@@ -216,7 +216,7 @@ nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
if (tdev->irq) {
free_irq(tdev->irq, tdev);
tdev->irq = 0;
- };
+ }
}
static int
@@ -309,8 +309,6 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
/**
* The IOMMU bit defines the upper limit of the GPU-addressable space.
- * This will be refined in nouveau_ttm_init but we need to do it early
- * for instmem to behave properly
*/
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit));
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 513ee6b79553..17adcb4e8854 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -206,10 +206,12 @@ nvkm_udevice_wr32(struct nvkm_object *object, u64 addr, u32 data)
}
static int
-nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
+ *type = NVKM_OBJECT_MAP_IO;
*addr = device->func->resource_addr(device, 0);
*size = device->func->resource_size(device, 0);
return 0;
@@ -292,6 +294,11 @@ nvkm_udevice_child_get(struct nvkm_object *object, int index,
if (!sclass) {
switch (index) {
case 0: sclass = &nvkm_control_oclass; break;
+ case 1:
+ if (!device->mmu)
+ return -EINVAL;
+ sclass = &device->mmu->user;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index 0c0310498afd..723dcbde2ac2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -191,11 +191,13 @@ nv50_disp_chan_ntfy(struct nvkm_object *object, u32 type,
}
static int
-nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nv50_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
+ *type = NVKM_OBJECT_MAP_IO;
*addr = device->func->resource_addr(device, 0) +
0x640000 + (chan->chid.user * 0x1000);
*size = 0x001000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 5701b3221a54..40681db91a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -2,6 +2,7 @@
#ifndef __NV50_DISP_CHAN_H__
#define __NV50_DISP_CHAN_H__
#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
+#include <core/object.h>
#include "nv50.h"
struct nv50_disp_chan {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index cd6dd8742dc6..4548c031b937 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -148,7 +148,7 @@ void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
#define IOR_MSG(i,l,f,a...) do { \
struct nvkm_ior *_ior = (i); \
- nvkm_##l(&_ior->disp->engine.subdev, "%s: "f, _ior->name, ##a); \
+ nvkm_##l(&_ior->disp->engine.subdev, "%s: "f"\n", _ior->name, ##a); \
} while(0)
#define IOR_WARN(i,f,a...) IOR_MSG((i), warn, f, ##a)
#define IOR_DBG(i,f,a...) IOR_MSG((i), debug, f, ##a)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
index c95942ef8216..49ef7e57aad4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -26,7 +26,7 @@
#include <core/gpuobj.h>
#include <subdev/fb.h>
-#include <subdev/mmu/nv04.h>
+#include <subdev/mmu/vmm.h>
#include <nvif/class.h>
@@ -49,8 +49,8 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int ret;
if (dmaobj->clone) {
- struct nv04_mmu *mmu = nv04_mmu(device->mmu);
- struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
+ struct nvkm_memory *pgt =
+ device->mmu->vmm->pd->pt[0]->memory;
if (!dmaobj->base.start)
return nvkm_gpuobj_wrap(pgt, pgpuobj);
nvkm_kmap(pgt);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 2e7b4e2105ef..816ccaedfc73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -99,7 +99,7 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
const u32 base = falcon->addr;
if (!suspend) {
- nvkm_memory_del(&falcon->core);
+ nvkm_memory_unref(&falcon->core);
if (falcon->external) {
vfree(falcon->data.data);
vfree(falcon->code.data);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index 660ca7aa95ea..64f6b7654a08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -27,6 +27,7 @@
#include <core/client.h>
#include <core/gpuobj.h>
#include <core/notify.h>
+#include <subdev/mc.h>
#include <nvif/event.h>
#include <nvif/unpack.h>
@@ -278,6 +279,12 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
return 0;
}
+static void
+nvkm_fifo_preinit(struct nvkm_engine *engine)
+{
+ nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO);
+}
+
static int
nvkm_fifo_init(struct nvkm_engine *engine)
{
@@ -302,6 +309,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
static const struct nvkm_engine_func
nvkm_fifo = {
.dtor = nvkm_fifo_dtor,
+ .preinit = nvkm_fifo_preinit,
.oneinit = nvkm_fifo_oneinit,
.init = nvkm_fifo_init,
.fini = nvkm_fifo_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index fab760ae922f..d83485385934 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -117,8 +117,8 @@ nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
if (chan->func->engine_dtor)
chan->func->engine_dtor(chan, engine);
nvkm_object_del(&engn->object);
- if (chan->vm)
- atomic_dec(&chan->vm->engref[engine->subdev.index]);
+ if (chan->vmm)
+ atomic_dec(&chan->vmm->engref[engine->subdev.index]);
}
}
@@ -151,8 +151,8 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
.engine = oclass->engine,
};
- if (chan->vm)
- atomic_inc(&chan->vm->engref[engine->subdev.index]);
+ if (chan->vmm)
+ atomic_inc(&chan->vmm->engref[engine->subdev.index]);
if (engine->func->fifo.cclass) {
ret = engine->func->fifo.cclass(chan, &cclass,
@@ -253,9 +253,11 @@ nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
}
static int
-nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
+nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
+ *type = NVKM_OBJECT_MAP_IO;
*addr = chan->addr;
*size = chan->size;
return 0;
@@ -325,7 +327,10 @@ nvkm_fifo_chan_dtor(struct nvkm_object *object)
if (chan->user)
iounmap(chan->user);
- nvkm_vm_ref(NULL, &chan->vm, NULL);
+ if (chan->vmm) {
+ nvkm_vmm_part(chan->vmm, chan->inst->memory);
+ nvkm_vmm_unref(&chan->vmm);
+ }
nvkm_gpuobj_del(&chan->push);
nvkm_gpuobj_del(&chan->inst);
@@ -347,13 +352,12 @@ nvkm_fifo_chan_func = {
int
nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
- u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
- const struct nvkm_oclass *oclass,
+ u64 hvmm, u64 push, u64 engines, int bar, u32 base,
+ u32 user, const struct nvkm_oclass *oclass,
struct nvkm_fifo_chan *chan)
{
struct nvkm_client *client = oclass->client;
struct nvkm_device *device = fifo->engine.subdev.device;
- struct nvkm_mmu *mmu = device->mmu;
struct nvkm_dmaobj *dmaobj;
unsigned long flags;
int ret;
@@ -382,16 +386,19 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
}
/* channel address space */
- if (!vm && mmu) {
- if (!client->vm || client->vm->mmu == mmu) {
- ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
- if (ret)
- return ret;
- } else {
+ if (hvmm) {
+ struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+
+ if (vmm->mmu != device->mmu)
return -EINVAL;
- }
- } else {
- return -ENOENT;
+
+ ret = nvkm_vmm_join(vmm, chan->inst->memory);
+ if (ret)
+ return ret;
+
+ chan->vmm = nvkm_vmm_ref(vmm);
}
/* allocate channel id */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
index 61797c4dd07a..a5c998fe4485 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c
@@ -229,15 +229,18 @@ g84_fifo_chan_func = {
};
int
-g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
const struct nvkm_oclass *oclass,
struct nv50_fifo_chan *chan)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
+ if (!vmm)
+ return -EINVAL;
+
ret = nvkm_fifo_chan_ctor(&g84_fifo_chan_func, &fifo->base,
- 0x10000, 0x1000, false, vm, push,
+ 0x10000, 0x1000, false, vmm, push,
(1ULL << NVKM_ENGINE_BSP) |
(1ULL << NVKM_ENGINE_CE0) |
(1ULL << NVKM_ENGINE_CIPHER) |
@@ -277,9 +280,5 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret)
return ret;
- ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
- if (ret)
- return ret;
-
- return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+ return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index 27002caba420..b653664e081b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -12,12 +12,9 @@ struct gf100_fifo_chan {
struct list_head head;
bool killed;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-
struct {
struct nvkm_gpuobj *inst;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma;
} engn[NVKM_SUBDEV_NR];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index ec10be2984a9..1208e3d9dbe2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -13,12 +13,9 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
-
struct {
struct nvkm_gpuobj *inst;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma;
} engn[NVKM_SUBDEV_NR];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
index 25b60aff40e4..85f7dbf53c99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c
@@ -206,7 +206,6 @@ void *
nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
- nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
nvkm_ramht_del(&chan->ramht);
nvkm_gpuobj_del(&chan->pgd);
nvkm_gpuobj_del(&chan->eng);
@@ -229,15 +228,18 @@ nv50_fifo_chan_func = {
};
int
-nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
+nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vmm, u64 push,
const struct nvkm_oclass *oclass,
struct nv50_fifo_chan *chan)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int ret;
+ if (!vmm)
+ return -EINVAL;
+
ret = nvkm_fifo_chan_ctor(&nv50_fifo_chan_func, &fifo->base,
- 0x10000, 0x1000, false, vm, push,
+ 0x10000, 0x1000, false, vmm, push,
(1ULL << NVKM_ENGINE_DMAOBJ) |
(1ULL << NVKM_ENGINE_SW) |
(1ULL << NVKM_ENGINE_GR) |
@@ -262,9 +264,5 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret)
return ret;
- ret = nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
- if (ret)
- return ret;
-
- return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
+ return nvkm_ramht_new(device, 0x8000, 16, chan->base.inst, &chan->ramht);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index ad9aa157e078..2e3c4005b874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -14,19 +14,18 @@ struct nv50_fifo_chan {
struct nvkm_gpuobj *eng;
struct nvkm_gpuobj *pgd;
struct nvkm_ramht *ramht;
- struct nvkm_vm *vm;
struct nvkm_gpuobj *engn[NVKM_SUBDEV_NR];
};
-int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+int nv50_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
void *nv50_fifo_chan_dtor(struct nvkm_fifo_chan *);
void nv50_fifo_chan_fini(struct nvkm_fifo_chan *);
void nv50_fifo_chan_engine_dtor(struct nvkm_fifo_chan *, struct nvkm_engine *);
void nv50_fifo_chan_object_dtor(struct nvkm_fifo_chan *, int);
-int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vm, u64 push,
+int g84_fifo_chan_ctor(struct nv50_fifo *, u64 vmm, u64 push,
const struct nvkm_oclass *, struct nv50_fifo_chan *);
extern const struct nvkm_fifo_chan_oclass nv50_fifo_dma_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
index caa914074752..fc34cddcd2f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c
@@ -44,9 +44,9 @@ g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+ nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
"pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -57,7 +57,7 @@ g84_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index 0a7b6ed5ed28..c213122cf088 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -95,6 +95,7 @@ nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c = fifo->ramfc;
+ nvkm_kmap(fctx);
do {
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
@@ -102,6 +103,7 @@ nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
} while ((++c)->bits);
+ nvkm_done(fctx);
c = fifo->ramfc;
do {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
index 480bc3777be5..8043718ad150 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv50.c
@@ -44,9 +44,9 @@ nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel dma size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel dma vers %d vm %llx "
+ nvif_ioctl(parent, "create channel dma vers %d vmm %llx "
"pushbuf %llx offset %016llx\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.offset);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -57,7 +57,7 @@ nv50_fifo_dma_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index cd468ab1db12..f69576868164 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -559,6 +559,7 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int ret;
/* Determine number of PBDMAs by checking valid enable bits. */
@@ -584,12 +585,12 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
if (ret)
return ret;
- ret = nvkm_bar_umap(device->bar, 128 * 0x1000, 12, &fifo->user.bar);
+ ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+ &fifo->user.bar);
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
static void
@@ -628,7 +629,7 @@ gf100_fifo_init(struct nvkm_fifo *base)
}
nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
- nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -639,10 +640,11 @@ static void *
gf100_fifo_dtor(struct nvkm_fifo *base)
{
struct gf100_fifo *fifo = gf100_fifo(base);
- nvkm_vm_put(&fifo->user.bar);
- nvkm_memory_del(&fifo->user.mem);
- nvkm_memory_del(&fifo->runlist.mem[0]);
- nvkm_memory_del(&fifo->runlist.mem[1]);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+ nvkm_memory_unref(&fifo->user.mem);
+ nvkm_memory_unref(&fifo->runlist.mem[0]);
+ nvkm_memory_unref(&fifo->runlist.mem[1]);
return fifo;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index 571a6edb3f97..68f97ba03df6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -27,7 +27,7 @@ struct gf100_fifo {
struct {
struct nvkm_memory *mem;
- struct nvkm_vma bar;
+ struct nvkm_vma *bar;
} user;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index a7e55c422501..84bd703dd897 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -771,6 +771,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
int engn, runl, pbid, ret, i, j;
enum nvkm_devidx engidx;
u32 *map;
@@ -834,13 +835,12 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
if (ret)
return ret;
- ret = nvkm_bar_umap(device->bar, fifo->base.nr * 0x200, 12,
- &fifo->user.bar);
+ ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
+ &fifo->user.bar);
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
}
static void
@@ -866,7 +866,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
}
- nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -876,14 +876,15 @@ static void *
gk104_fifo_dtor(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
int i;
- nvkm_vm_put(&fifo->user.bar);
- nvkm_memory_del(&fifo->user.mem);
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
+ nvkm_memory_unref(&fifo->user.mem);
for (i = 0; i < fifo->runlist_nr; i++) {
- nvkm_memory_del(&fifo->runlist[i].mem[1]);
- nvkm_memory_del(&fifo->runlist[i].mem[0]);
+ nvkm_memory_unref(&fifo->runlist[i].mem[1]);
+ nvkm_memory_unref(&fifo->runlist[i].mem[0]);
}
return fifo;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 0506c5290936..1579785cf941 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -38,7 +38,7 @@ struct gk104_fifo {
struct {
struct nvkm_memory *mem;
- struct nvkm_vma bar;
+ struct nvkm_vma *bar;
} user;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
index 77c2f2a28bf3..2121f517b1dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c
@@ -45,10 +45,10 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"pushbuf %llx ioffset %016llx "
"ilength %08x\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.ioffset, args->v0.ilength);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -59,7 +59,7 @@ g84_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = g84_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = g84_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index f9e0377d3d24..75f9632789b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -111,7 +111,7 @@ gf100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ u64 addr = chan->engn[engine->subdev.index].vma->addr;
nvkm_kmap(inst);
nvkm_wo32(inst, offset + 0x00, lower_32_bits(addr) | 4);
nvkm_wo32(inst, offset + 0x04, upper_32_bits(addr));
@@ -126,7 +126,7 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
@@ -146,8 +146,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
+ &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
+ chan->engn[engn].vma, NULL, 0);
}
static void
@@ -190,10 +195,7 @@ gf100_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
static void *
gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
- struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
- nvkm_gpuobj_del(&chan->pgd);
- return chan;
+ return gf100_fifo_chan(base);
}
static const struct nvkm_fifo_chan_func
@@ -216,7 +218,6 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct fermi_channel_gpfifo_v0 v0;
} *args = data;
struct gf100_fifo *fifo = gf100_fifo(base);
- struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_object *parent = oclass->parent;
struct gf100_fifo_chan *chan;
u64 usermem, ioffset, ilength;
@@ -224,10 +225,12 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x\n",
- args->v0.version, args->v0.vm, args->v0.ioffset,
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength);
+ if (!args->v0.vmm)
+ return -EINVAL;
} else
return ret;
@@ -239,7 +242,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gf100_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, args->v0.vm, 0,
+ 0x1000, 0x1000, true, args->v0.vmm, 0,
(1ULL << NVKM_ENGINE_CE0) |
(1ULL << NVKM_ENGINE_CE1) |
(1ULL << NVKM_ENGINE_GR) |
@@ -247,29 +250,13 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
(1ULL << NVKM_ENGINE_MSPPP) |
(1ULL << NVKM_ENGINE_MSVLD) |
(1ULL << NVKM_ENGINE_SW),
- 1, fifo->user.bar.offset, 0x1000,
+ 1, fifo->user.bar->addr, 0x1000,
oclass, &chan->base);
if (ret)
return ret;
args->v0.chid = chan->base.chid;
- /* page directory */
- ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
- if (ret)
- return ret;
-
- nvkm_kmap(chan->base.inst);
- nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
- nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
- nvkm_done(chan->base.inst);
-
- ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
- if (ret)
- return ret;
-
/* clear channel control registers */
usermem = chan->base.chid * 0x1000;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 8abf6f8ef445..80c87521bebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -117,7 +117,7 @@ gk104_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
if (offset) {
- u64 addr = chan->engn[engine->subdev.index].vma.offset;
+ u64 addr = chan->engn[engine->subdev.index].vma->addr;
u32 datalo = lower_32_bits(addr) | 0x00000004;
u32 datahi = upper_32_bits(addr);
nvkm_kmap(inst);
@@ -138,7 +138,7 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ nvkm_vmm_put(chan->base.vmm, &chan->engn[engine->subdev.index].vma);
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
@@ -158,8 +158,13 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vmm_get(chan->base.vmm, 12, chan->engn[engn].inst->size,
+ &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->base.vmm,
+ chan->engn[engn].vma, NULL, 0);
}
static void
@@ -203,10 +208,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
static void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
- struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
- nvkm_gpuobj_del(&chan->pgd);
- return chan;
+ return gk104_fifo_chan(base);
}
static const struct nvkm_fifo_chan_func
@@ -229,17 +231,19 @@ struct gk104_fifo_chan_func {
static int
gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
struct gk104_fifo *fifo, u32 *engmask, u16 *chid,
- u64 vm, u64 ioffset, u64 ilength,
+ u64 vmm, u64 ioffset, u64 ilength,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = -1, ret = -ENOSYS, i, j;
u32 engines = 0, present = 0;
u64 subdevs = 0;
u64 usermem;
+ if (!vmm)
+ return -EINVAL;
+
/* Determine which downstream engines are present */
for (i = 0; i < fifo->engine_nr; i++) {
struct nvkm_engine *engine = fifo->engine[i].engine;
@@ -285,30 +289,14 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
INIT_LIST_HEAD(&chan->head);
ret = nvkm_fifo_chan_ctor(&gk104_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, vm, 0, subdevs,
- 1, fifo->user.bar.offset, 0x200,
+ 0x1000, 0x1000, true, vmm, 0, subdevs,
+ 1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
return ret;
*chid = chan->base.chid;
- /* Page directory. */
- ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
- if (ret)
- return ret;
-
- nvkm_kmap(chan->base.inst);
- nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
- nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
- nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
- nvkm_done(chan->base.inst);
-
- ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
- if (ret)
- return ret;
-
/* Clear channel control registers. */
usermem = chan->base.chid * 0x200;
ilength = order_base_2(ilength / 8);
@@ -373,18 +361,17 @@ gk104_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x engine %08x\n",
- args->v0.version, args->v0.vm, args->v0.ioffset,
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
args->v0.ilength, args->v0.engines);
return gk104_fifo_gpfifo_new_(gk104_fifo_gpfifo, fifo,
&args->v0.engines,
&args->v0.chid,
- args->v0.vm,
+ args->v0.vmm,
args->v0.ioffset,
args->v0.ilength,
oclass, pobject);
-
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
index c5a7de9db259..d8f28ec1e4a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifonv50.c
@@ -45,10 +45,10 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- nvif_ioctl(parent, "create channel gpfifo vers %d vm %llx "
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"pushbuf %llx ioffset %016llx "
"ilength %08x\n",
- args->v0.version, args->v0.vm, args->v0.pushbuf,
+ args->v0.version, args->v0.vmm, args->v0.pushbuf,
args->v0.ioffset, args->v0.ilength);
if (!args->v0.pushbuf)
return -EINVAL;
@@ -59,7 +59,7 @@ nv50_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
return -ENOMEM;
*pobject = &chan->base.object;
- ret = nv50_fifo_chan_ctor(fifo, args->v0.vm, args->v0.pushbuf,
+ ret = nv50_fifo_chan_ctor(fifo, args->v0.vmm, args->v0.pushbuf,
oclass, chan);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
index 66eb12c2b5ba..fa6e094d8068 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -100,8 +100,8 @@ void *
nv50_fifo_dtor(struct nvkm_fifo *base)
{
struct nv50_fifo *fifo = nv50_fifo(base);
- nvkm_memory_del(&fifo->runlist[1]);
- nvkm_memory_del(&fifo->runlist[0]);
+ nvkm_memory_unref(&fifo->runlist[1]);
+ nvkm_memory_unref(&fifo->runlist[0]);
return fifo;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index bc77eea351a5..881015080d83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -986,14 +986,14 @@ gf100_grctx_pack_tpc[] = {
******************************************************************************/
int
-gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, u32 access)
+gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, bool priv)
{
if (info->data) {
info->buffer[info->buffer_nr] = round_up(info->addr, align);
info->addr = info->buffer[info->buffer_nr] + size;
info->data->size = size;
info->data->align = align;
- info->data->access = access;
+ info->data->priv = priv;
info->data++;
return info->buffer_nr++;
}
@@ -1028,9 +1028,8 @@ void
gf100_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
@@ -1041,9 +1040,8 @@ void
gf100_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -1057,9 +1055,8 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
int gpc, tpc;
u32 bo = 0;
@@ -1267,85 +1264,87 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_mc_unk260(device, 1);
}
+#define CB_RESERVED 0x80000
+
int
gf100_grctx_generate(struct gf100_gr *gr)
{
const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_memory *chan;
+ struct nvkm_memory *inst = NULL;
+ struct nvkm_memory *data = NULL;
+ struct nvkm_vmm *vmm = NULL;
+ struct nvkm_vma *ctx = NULL;
struct gf100_grctx info;
int ret, i;
u64 addr;
- /* allocate memory to for a "channel", which we'll use to generate
- * the default context values
+ /* Allocate memory to for a "channel", which we'll use to generate
+ * the default context values.
*/
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x80000 + gr->size,
- 0x1000, true, &chan);
- if (ret) {
- nvkm_error(subdev, "failed to allocate chan memory, %d\n", ret);
- return ret;
- }
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ 0x1000, 0x1000, true, &inst);
+ if (ret)
+ goto done;
- addr = nvkm_memory_addr(chan);
+ ret = nvkm_vmm_new(device, 0, 0, NULL, 0, NULL, "grctx", &vmm);
+ if (ret)
+ goto done;
- /* PGD pointer */
- nvkm_kmap(chan);
- nvkm_wo32(chan, 0x0200, lower_32_bits(addr + 0x1000));
- nvkm_wo32(chan, 0x0204, upper_32_bits(addr + 0x1000));
- nvkm_wo32(chan, 0x0208, 0xffffffff);
- nvkm_wo32(chan, 0x020c, 0x000000ff);
+ vmm->debug = subdev->debug;
- /* PGT[0] pointer */
- nvkm_wo32(chan, 0x1000, 0x00000000);
- nvkm_wo32(chan, 0x1004, 0x00000001 | (addr + 0x2000) >> 8);
+ ret = nvkm_vmm_join(vmm, inst);
+ if (ret)
+ goto done;
- /* identity-map the whole "channel" into its own vm */
- for (i = 0; i < nvkm_memory_size(chan) / 4096; i++) {
- u64 addr = ((nvkm_memory_addr(chan) + (i * 4096)) >> 8) | 1;
- nvkm_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
- nvkm_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
- }
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ CB_RESERVED + gr->size, 0, true, &data);
+ if (ret)
+ goto done;
- /* context pointer (virt) */
- nvkm_wo32(chan, 0x0210, 0x00080004);
- nvkm_wo32(chan, 0x0214, 0x00000000);
- nvkm_done(chan);
+ ret = nvkm_vmm_get(vmm, 0, nvkm_memory_size(data), &ctx);
+ if (ret)
+ goto done;
- nvkm_wr32(device, 0x100cb8, (addr + 0x1000) >> 8);
- nvkm_wr32(device, 0x100cbc, 0x80000001);
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100c80) & 0x00008000)
- break;
- );
+ ret = nvkm_memory_map(data, 0, vmm, ctx, NULL, 0);
+ if (ret)
+ goto done;
+
+
+ /* Setup context pointer. */
+ nvkm_kmap(inst);
+ nvkm_wo32(inst, 0x0210, lower_32_bits(ctx->addr + CB_RESERVED) | 4);
+ nvkm_wo32(inst, 0x0214, upper_32_bits(ctx->addr + CB_RESERVED));
+ nvkm_done(inst);
- /* setup default state for mmio list construction */
+ /* Setup default state for mmio list construction. */
info.gr = gr;
info.data = gr->mmio_data;
info.mmio = gr->mmio_list;
- info.addr = 0x2000 + (i * 8);
+ info.addr = ctx->addr;
info.buffer_nr = 0;
- /* make channel current */
+ /* Make channel current. */
+ addr = nvkm_memory_addr(inst) >> 12;
if (gr->firmware) {
nvkm_wr32(device, 0x409840, 0x00000030);
- nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr);
nvkm_wr32(device, 0x409504, 0x00000003);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x00000010)
break;
);
- nvkm_kmap(chan);
- nvkm_wo32(chan, 0x8001c, 1);
- nvkm_wo32(chan, 0x80020, 0);
- nvkm_wo32(chan, 0x80028, 0);
- nvkm_wo32(chan, 0x8002c, 0);
- nvkm_done(chan);
+ nvkm_kmap(data);
+ nvkm_wo32(data, 0x1c, 1);
+ nvkm_wo32(data, 0x20, 0);
+ nvkm_wo32(data, 0x28, 0);
+ nvkm_wo32(data, 0x2c, 0);
+ nvkm_done(data);
} else {
nvkm_wr32(device, 0x409840, 0x80000000);
- nvkm_wr32(device, 0x409500, 0x80000000 | addr >> 12);
+ nvkm_wr32(device, 0x409500, 0x80000000 | addr);
nvkm_wr32(device, 0x409504, 0x00000001);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x409800) & 0x80000000)
@@ -1355,8 +1354,8 @@ gf100_grctx_generate(struct gf100_gr *gr)
grctx->main(gr, &info);
- /* trigger a context unload by unsetting the "next channel valid" bit
- * and faking a context switch interrupt
+ /* Trigger a context unload by unsetting the "next channel valid" bit
+ * and faking a context switch interrupt.
*/
nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
nvkm_wr32(device, 0x409000, 0x00000100);
@@ -1370,17 +1369,21 @@ gf100_grctx_generate(struct gf100_gr *gr)
gr->data = kmalloc(gr->size, GFP_KERNEL);
if (gr->data) {
- nvkm_kmap(chan);
+ nvkm_kmap(data);
for (i = 0; i < gr->size; i += 4)
- gr->data[i / 4] = nvkm_ro32(chan, 0x80000 + i);
- nvkm_done(chan);
+ gr->data[i / 4] = nvkm_ro32(data, CB_RESERVED + i);
+ nvkm_done(data);
ret = 0;
} else {
ret = -ENOMEM;
}
done:
- nvkm_memory_del(&chan);
+ nvkm_vmm_put(vmm, &ctx);
+ nvkm_memory_unref(&data);
+ nvkm_vmm_part(vmm, inst);
+ nvkm_vmm_unref(&vmm);
+ nvkm_memory_unref(&inst);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 2812ca511c9c..5199e5aa0cb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -12,7 +12,7 @@ struct gf100_grctx {
u64 addr;
};
-int gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, u32 access);
+int gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, bool priv);
void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int);
#define mmio_vram(a,b,c,d) gf100_grctx_mmio_data((a), (b), (c), (d))
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
index 505cdcbfc085..82f71b10c06e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf108.c
@@ -735,9 +735,8 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 74a64e3fd59a..19301d88577d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -187,9 +187,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index c46b3fdf7203..825c8fd500bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -847,9 +847,8 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
@@ -861,9 +860,8 @@ void
gk104_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 4c4b5ab6e46d..9b43d4ce3eaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -867,9 +867,8 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418e24, 0x00000000, s, b);
@@ -881,9 +880,8 @@ void
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -900,9 +898,8 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
const u32 alpha = grctx->alpha_nr;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
+ const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 7833bc777a29..88ea322d956c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -33,9 +33,8 @@ void
gp100_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
- const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
- const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@@ -51,9 +50,8 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
const u32 attrib = grctx->attrib_nr;
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size, (1 << s), access);
+ const int b = mmio_vram(info, size, (1 << s), false);
const int max_batches = 0xffff;
u32 ao = 0;
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index 80b7ab0bee3a..7a66b4c2eb18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -38,9 +38,8 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
const u32 attrib = grctx->attrib_nr;
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
- const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
- const int b = mmio_vram(info, size, (1 << s), access);
+ const int b = mmio_vram(info, size, (1 << s), false);
const int max_batches = 0xffff;
u32 ao = 0;
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 99689f4de502..2f8dc107047d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -37,6 +37,7 @@
#include <nvif/class.h>
#include <nvif/cl9097.h>
+#include <nvif/if900d.h>
#include <nvif/unpack.h>
/*******************************************************************************
@@ -327,13 +328,13 @@ gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
if (!gr->firmware) {
nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
- nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
+ nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma->addr >> 8);
} else {
nvkm_wo32(*pgpuobj, 0xf4, 0);
nvkm_wo32(*pgpuobj, 0xf8, 0);
nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
- nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
- nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
+ nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma->addr));
+ nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma->addr));
nvkm_wo32(*pgpuobj, 0x1c, 1);
nvkm_wo32(*pgpuobj, 0x20, 0);
nvkm_wo32(*pgpuobj, 0x28, 0);
@@ -350,18 +351,13 @@ gf100_gr_chan_dtor(struct nvkm_object *object)
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
- if (chan->data[i].vma.node) {
- nvkm_vm_unmap(&chan->data[i].vma);
- nvkm_vm_put(&chan->data[i].vma);
- }
- nvkm_memory_del(&chan->data[i].mem);
+ nvkm_vmm_put(chan->vmm, &chan->data[i].vma);
+ nvkm_memory_unref(&chan->data[i].mem);
}
- if (chan->mmio_vma.node) {
- nvkm_vm_unmap(&chan->mmio_vma);
- nvkm_vm_put(&chan->mmio_vma);
- }
- nvkm_memory_del(&chan->mmio);
+ nvkm_vmm_put(chan->vmm, &chan->mmio_vma);
+ nvkm_memory_unref(&chan->mmio);
+ nvkm_vmm_unref(&chan->vmm);
return chan;
}
@@ -380,6 +376,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
struct gf100_gr_data *data = gr->mmio_data;
struct gf100_gr_mmio *mmio = gr->mmio_list;
struct gf100_gr_chan *chan;
+ struct gf100_vmm_map_v0 args = { .priv = 1 };
struct nvkm_device *device = gr->base.engine.subdev.device;
int ret, i;
@@ -387,6 +384,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
return -ENOMEM;
nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
chan->gr = gr;
+ chan->vmm = nvkm_vmm_ref(fifoch->vmm);
*pobject = &chan->object;
/* allocate memory for a "mmio list" buffer that's used by the HUB
@@ -398,12 +396,14 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret)
return ret;
- ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
- NV_MEM_ACCESS_SYS, &chan->mmio_vma);
+ ret = nvkm_vmm_get(fifoch->vmm, 12, 0x1000, &chan->mmio_vma);
if (ret)
return ret;
- nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+ ret = nvkm_memory_map(chan->mmio, 0, fifoch->vmm,
+ chan->mmio_vma, &args, sizeof(args));
+ if (ret)
+ return ret;
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
@@ -413,13 +413,19 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret)
return ret;
- ret = nvkm_vm_get(fifoch->vm,
- nvkm_memory_size(chan->data[i].mem), 12,
- data->access, &chan->data[i].vma);
+ ret = nvkm_vmm_get(fifoch->vmm, 12,
+ nvkm_memory_size(chan->data[i].mem),
+ &chan->data[i].vma);
+ if (ret)
+ return ret;
+
+ args.priv = data->priv;
+
+ ret = nvkm_memory_map(chan->data[i].mem, 0, chan->vmm,
+ chan->data[i].vma, &args, sizeof(args));
if (ret)
return ret;
- nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
data++;
}
@@ -430,7 +436,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
u32 data = mmio->data;
if (mmio->buffer >= 0) {
- u64 info = chan->data[mmio->buffer].vma.offset;
+ u64 info = chan->data[mmio->buffer].vma->addr;
data |= info >> mmio->shift;
}
@@ -1855,8 +1861,12 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
int ret;
ret = nvkm_firmware_get(device, fwname, &fw);
- if (ret)
- return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
+ if (ret) {
+ ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
+ if (ret)
+ return -ENODEV;
+ return 0;
+ }
fuc->size = fw->size;
fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
@@ -1903,25 +1913,33 @@ gf100_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
return 0;
}
+void
+gf100_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
+
+ nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0x00000001);
+ nvkm_wr32(device, 0x4188a4, 0x00000000);
+ nvkm_wr32(device, 0x418888, 0x00000000);
+ nvkm_wr32(device, 0x41888c, 0x00000000);
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+ nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(fb->mmu_wr) >> 8);
+ nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(fb->mmu_rd) >> 8);
+}
+
int
gf100_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
- struct nvkm_fb *fb = device->fb;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int i;
- nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
+ gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->func->mmio);
@@ -2036,6 +2054,7 @@ gf100_gr_gpccs_ucode = {
static const struct gf100_gr_func
gf100_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf100_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index a36e45a4a635..d7c2adb9b543 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -45,7 +45,7 @@
struct gf100_gr_data {
u32 size;
u32 align;
- u32 access;
+ bool priv;
};
struct gf100_gr_mmio {
@@ -156,18 +156,20 @@ int gp100_gr_init(struct gf100_gr *);
void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
+#include <core/object.h>
struct gf100_gr_chan {
struct nvkm_object object;
struct gf100_gr *gr;
+ struct nvkm_vmm *vmm;
struct nvkm_memory *mmio;
- struct nvkm_vma mmio_vma;
+ struct nvkm_vma *mmio_vma;
int mmio_nr;
struct {
struct nvkm_memory *mem;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma;
} data[4];
};
@@ -253,6 +255,7 @@ extern const struct gf100_gr_init gf100_gr_init_mpc_0[];
extern const struct gf100_gr_init gf100_gr_init_be_0[];
extern const struct gf100_gr_init gf100_gr_init_fe_1[];
extern const struct gf100_gr_init gf100_gr_init_pe_1[];
+void gf100_gr_init_gpc_mmu(struct gf100_gr *);
extern const struct gf100_gr_init gf104_gr_init_ds_0[];
extern const struct gf100_gr_init gf104_gr_init_tex_0[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index d736dcd55ea2..ec0f11983b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -115,6 +115,7 @@ gf104_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf104_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf104_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 2f0d24498427..cc152eb74123 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -106,6 +106,7 @@ gf108_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf108_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf108_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index d1d942eb86af..10d2d73ca8c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -87,6 +87,7 @@ gf110_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf110_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf110_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index 0124e468086e..ac09a07c4150 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -123,6 +123,7 @@ gf117_gr_gpccs_ucode = {
static const struct gf100_gr_func
gf117_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf117_gr_pack_mmio,
.fecs.ucode = &gf117_gr_fecs_ucode,
.gpccs.ucode = &gf117_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 8d8e4cafe28f..7f449ec6f760 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -178,6 +178,7 @@ gf119_gr_pack_mmio[] = {
static const struct gf100_gr_func
gf119_gr = {
.init = gf100_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.mmio = gf119_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index ec22da6c99fc..5e82f94c2245 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,8 +24,6 @@
#include "gf100.h"
#include "ctxgf100.h"
-#include <subdev/fb.h>
-
#include <nvif/class.h>
/*******************************************************************************
@@ -207,21 +205,13 @@ int
gk104_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
- struct nvkm_fb *fb = device->fb;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int i;
- nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08a4), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0888), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
+ gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->func->mmio);
@@ -339,6 +329,7 @@ gk104_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk104_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk104_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index f31b171a4102..a38e19b61c1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -183,6 +183,7 @@ gk110_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk110_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk110_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index d76dd178007f..1912c0bfd7ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -103,6 +103,7 @@ gk110b_gr_pack_mmio[] = {
static const struct gf100_gr_func
gk110b_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk110b_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index 14bbe6ed02a9..1fc258163f25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -162,6 +162,7 @@ gk208_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk208_gr = {
.init = gk104_gr_init,
+ .init_gpc_mmu = gf100_gr_init_gpc_mmu,
.init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk208_gr_pack_mmio,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index df2cd864147c..111c8bb4497b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -60,7 +60,7 @@ void *
nv20_gr_chan_dtor(struct nvkm_object *object)
{
struct nv20_gr_chan *chan = nv20_gr_chan(object);
- nvkm_memory_del(&chan->inst);
+ nvkm_memory_unref(&chan->inst);
return chan;
}
@@ -324,7 +324,7 @@ void *
nv20_gr_dtor(struct nvkm_gr *base)
{
struct nv20_gr *gr = nv20_gr(base);
- nvkm_memory_del(&gr->ctxtab);
+ nvkm_memory_unref(&gr->ctxtab);
return gr;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index ad7e53bb7c23..979dc5f7b32e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -20,6 +20,7 @@ void nv20_gr_tile(struct nvkm_gr *, int, struct nvkm_fb_tile *);
int nv30_gr_init(struct nvkm_gr *);
#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
+#include <core/object.h>
struct nv20_gr_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index 89b773233ac5..731400937edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -17,6 +17,7 @@ void nv40_gr_intr(struct nvkm_gr *);
u64 nv40_gr_units(struct nvkm_gr *);
#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
+#include <core/object.h>
struct nv40_gr_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 567fa4f3e518..5b9d99bee207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -20,6 +20,7 @@ u64 nv50_gr_units(struct nvkm_gr *);
int g84_gr_tlb_flush(struct nvkm_gr *);
#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
+#include <core/object.h>
struct nv50_gr_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index 1ac2b4558bec..b31fad8bdaad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -19,6 +19,7 @@ struct nv31_mpeg_func {
};
#define nv31_mpeg_chan(p) container_of((p), struct nv31_mpeg_chan, object)
+#include <core/object.h>
struct nv31_mpeg_chan {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index 4e528851e9c0..6df880a39019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -24,6 +24,7 @@
#include "priv.h"
#include <core/gpuobj.h>
+#include <core/object.h>
#include <subdev/timer.h>
#include <nvif/class.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 17240d54b1eb..9fad3611a843 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -68,6 +68,7 @@ struct nvkm_specdom {
};
#define nvkm_perfdom(p) container_of((p), struct nvkm_perfdom, object)
+#include <core/object.h>
struct nvkm_perfdom {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
index b1fa69314e4a..d42862fc43fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -2,9 +2,11 @@
#ifndef __NVKM_SW_CHAN_H__
#define __NVKM_SW_CHAN_H__
#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
-#include "priv.h"
+#include <core/object.h>
#include <core/event.h>
+#include "priv.h"
+
struct nvkm_sw_chan {
const struct nvkm_sw_chan_func *func;
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
index 7050a9e49db1..d7034950ba87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -2,7 +2,7 @@
#ifndef __NVKM_NVSW_H__
#define __NVKM_NVSW_H__
#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
-#include "priv.h"
+#include <core/object.h>
struct nvkm_nvsw {
struct nvkm_object object;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 06bdb67a0205..70549381e082 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -86,7 +86,7 @@ nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend)
nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */
if (!suspend)
- nvkm_memory_del(&xtensa->gpu_fw);
+ nvkm_memory_unref(&xtensa->gpu_fw);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 1b7f48efd8b1..14be41f24155 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -60,7 +60,7 @@ nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
}
void
-nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst)
+nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
{
if (!falcon->func->bind_context) {
nvkm_error(falcon->user,
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index 669c24028470..9def926f24d4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -180,7 +180,7 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
static void
-nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
+nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{
u32 inst_loc;
u32 fbif;
@@ -216,7 +216,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
/* Set context */
- switch (nvkm_memory_target(ctx->memory)) {
+ switch (nvkm_memory_target(ctx)) {
case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
@@ -228,7 +228,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
/* Enable context */
nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
nvkm_falcon_wr32(falcon, 0x054,
- ((ctx->addr >> 12) & 0xfffffff) |
+ ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
(inst_loc << 28) | (1 << 30));
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index 1e138b337955..e5830453813d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -3,3 +3,5 @@ nvkm-y += nvkm/subdev/bar/nv50.o
nvkm-y += nvkm/subdev/bar/g84.o
nvkm-y += nvkm/subdev/bar/gf100.o
nvkm-y += nvkm/subdev/bar/gk20a.o
+nvkm-y += nvkm/subdev/bar/gm107.o
+nvkm-y += nvkm/subdev/bar/gm20b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index c561d148cebc..9646adec57cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -30,39 +30,76 @@ nvkm_bar_flush(struct nvkm_bar *bar)
bar->func->flush(bar);
}
-struct nvkm_vm *
-nvkm_bar_kmap(struct nvkm_bar *bar)
+struct nvkm_vmm *
+nvkm_bar_bar1_vmm(struct nvkm_device *device)
{
- /* disallow kmap() until after vm has been bootstrapped */
- if (bar && bar->func->kmap && bar->subdev.oneinit)
- return bar->func->kmap(bar);
+ return device->bar->func->bar1.vmm(device->bar);
+}
+
+struct nvkm_vmm *
+nvkm_bar_bar2_vmm(struct nvkm_device *device)
+{
+ /* Denies access to BAR2 when it's not initialised, used by INSTMEM
+ * to know when object access needs to go through the BAR0 window.
+ */
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->bar2)
+ return bar->func->bar2.vmm(bar);
return NULL;
}
-int
-nvkm_bar_umap(struct nvkm_bar *bar, u64 size, int type, struct nvkm_vma *vma)
+void
+nvkm_bar_bar2_fini(struct nvkm_device *device)
{
- return bar->func->umap(bar, size, type, vma);
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->bar2) {
+ bar->func->bar2.fini(bar);
+ bar->bar2 = false;
+ }
+}
+
+void
+nvkm_bar_bar2_init(struct nvkm_device *device)
+{
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->subdev.oneinit && !bar->bar2 && bar->func->bar2.init) {
+ bar->func->bar2.init(bar);
+ bar->func->bar2.wait(bar);
+ bar->bar2 = true;
+ }
}
static int
-nvkm_bar_oneinit(struct nvkm_subdev *subdev)
+nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- return bar->func->oneinit(bar);
+ bar->func->bar1.fini(bar);
+ return 0;
}
static int
nvkm_bar_init(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
- return bar->func->init(bar);
+ bar->func->bar1.init(bar);
+ bar->func->bar1.wait(bar);
+ if (bar->func->init)
+ bar->func->init(bar);
+ return 0;
+}
+
+static int
+nvkm_bar_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_bar *bar = nvkm_bar(subdev);
+ return bar->func->oneinit(bar);
}
static void *
nvkm_bar_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_bar *bar = nvkm_bar(subdev);
+ nvkm_bar_bar2_fini(subdev->device);
return bar->func->dtor(bar);
}
@@ -71,6 +108,7 @@ nvkm_bar = {
.dtor = nvkm_bar_dtor,
.oneinit = nvkm_bar_oneinit,
.init = nvkm_bar_init,
+ .fini = nvkm_bar_fini,
};
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
index ef717136c838..87f26f54b481 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/g84.c
@@ -44,8 +44,14 @@ g84_bar_func = {
.dtor = nv50_bar_dtor,
.oneinit = nv50_bar_oneinit,
.init = nv50_bar_init,
- .kmap = nv50_bar_kmap,
- .umap = nv50_bar_umap,
+ .bar1.init = nv50_bar_bar1_init,
+ .bar1.fini = nv50_bar_bar1_fini,
+ .bar1.wait = nv50_bar_bar1_wait,
+ .bar1.vmm = nv50_bar_bar1_vmm,
+ .bar2.init = nv50_bar_bar2_init,
+ .bar2.fini = nv50_bar_bar2_fini,
+ .bar2.wait = nv50_bar_bar1_wait,
+ .bar2.vmm = nv50_bar_bar2_vmm,
.flush = g84_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index 676c167c95b9..a3ba7f50198b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -23,39 +23,73 @@
*/
#include "gf100.h"
-#include <core/gpuobj.h>
+#include <core/memory.h>
#include <core/option.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
-static struct nvkm_vm *
-gf100_bar_kmap(struct nvkm_bar *base)
+struct nvkm_vmm *
+gf100_bar_bar1_vmm(struct nvkm_bar *base)
{
- return gf100_bar(base)->bar[0].vm;
+ return gf100_bar(base)->bar[1].vmm;
}
-int
-gf100_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
+void
+gf100_bar_bar1_wait(struct nvkm_bar *base)
+{
+ /* NFI why it's twice. */
+ nvkm_bar_flush(base);
+ nvkm_bar_flush(base);
+}
+
+void
+gf100_bar_bar1_fini(struct nvkm_bar *bar)
{
+ nvkm_mask(bar->subdev.device, 0x001704, 0x80000000, 0x00000000);
+}
+
+void
+gf100_bar_bar1_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
- return nvkm_vm_get(bar->bar[1].vm, size, type, NV_MEM_ACCESS_RW, vma);
+ const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
+ nvkm_wr32(device, 0x001704, 0x80000000 | addr);
+}
+
+struct nvkm_vmm *
+gf100_bar_bar2_vmm(struct nvkm_bar *base)
+{
+ return gf100_bar(base)->bar[0].vmm;
+}
+
+void
+gf100_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ nvkm_mask(bar->subdev.device, 0x001714, 0x80000000, 0x00000000);
+}
+
+void
+gf100_bar_bar2_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gf100_bar *bar = gf100_bar(base);
+ u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
+ if (bar->bar2_halve)
+ addr |= 0x40000000;
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
static int
-gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
- struct lock_class_key *key, int bar_nr)
+gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
+ struct lock_class_key *key, int bar_nr)
{
struct nvkm_device *device = bar->base.subdev.device;
- struct nvkm_vm *vm;
resource_size_t bar_len;
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
- &bar_vm->mem);
- if (ret)
- return ret;
-
- ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
+ &bar_vm->inst);
if (ret)
return ret;
@@ -63,98 +97,64 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
if (bar_nr == 3 && bar->bar2_halve)
bar_len >>= 1;
- ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
+ ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
+ (bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+ atomic_inc(&bar_vm->vmm->engref[NVKM_SUBDEV_BAR]);
+ bar_vm->vmm->debug = bar->base.subdev.debug;
/*
* Bootstrap page table lookup.
*/
if (bar_nr == 3) {
- ret = nvkm_vm_boot(vm, bar_len);
- if (ret) {
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_boot(bar_vm->vmm);
+ if (ret)
return ret;
- }
}
- ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
- if (ret)
- return ret;
-
- nvkm_kmap(bar_vm->mem);
- nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
- nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
- nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
- nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
- nvkm_done(bar_vm->mem);
- return 0;
+ return nvkm_vmm_join(bar_vm->vmm, bar_vm->inst);
}
int
gf100_bar_oneinit(struct nvkm_bar *base)
{
static struct lock_class_key bar1_lock;
- static struct lock_class_key bar3_lock;
+ static struct lock_class_key bar2_lock;
struct gf100_bar *bar = gf100_bar(base);
int ret;
- /* BAR3 */
- if (bar->base.func->kmap) {
- ret = gf100_bar_ctor_vm(bar, &bar->bar[0], &bar3_lock, 3);
+ /* BAR2 */
+ if (bar->base.func->bar2.init) {
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
if (ret)
return ret;
+
+ bar->base.subdev.oneinit = true;
+ nvkm_bar_bar2_init(bar->base.subdev.device);
}
/* BAR1 */
- ret = gf100_bar_ctor_vm(bar, &bar->bar[1], &bar1_lock, 1);
+ ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
if (ret)
return ret;
return 0;
}
-int
-gf100_bar_init(struct nvkm_bar *base)
-{
- struct gf100_bar *bar = gf100_bar(base);
- struct nvkm_device *device = bar->base.subdev.device;
- u32 addr;
-
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
-
- addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
- nvkm_wr32(device, 0x001704, 0x80000000 | addr);
-
- if (bar->bar[0].mem) {
- addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- if (bar->bar2_halve)
- addr |= 0x40000000;
- nvkm_wr32(device, 0x001714, 0x80000000 | addr);
- }
-
- return 0;
-}
-
void *
gf100_bar_dtor(struct nvkm_bar *base)
{
struct gf100_bar *bar = gf100_bar(base);
- nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
- nvkm_gpuobj_del(&bar->bar[1].pgd);
- nvkm_memory_del(&bar->bar[1].mem);
+ nvkm_vmm_part(bar->bar[1].vmm, bar->bar[1].inst);
+ nvkm_vmm_unref(&bar->bar[1].vmm);
+ nvkm_memory_unref(&bar->bar[1].inst);
- if (bar->bar[0].vm) {
- nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
- nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
- }
- nvkm_gpuobj_del(&bar->bar[0].pgd);
- nvkm_memory_del(&bar->bar[0].mem);
+ nvkm_vmm_part(bar->bar[0].vmm, bar->bar[0].inst);
+ nvkm_vmm_unref(&bar->bar[0].vmm);
+ nvkm_memory_unref(&bar->bar[0].inst);
return bar;
}
@@ -175,9 +175,14 @@ static const struct nvkm_bar_func
gf100_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
- .init = gf100_bar_init,
- .kmap = gf100_bar_kmap,
- .umap = gf100_bar_umap,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gf100_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .bar2.init = gf100_bar_bar2_init,
+ .bar2.fini = gf100_bar_bar2_fini,
+ .bar2.wait = gf100_bar_bar1_wait,
+ .bar2.vmm = gf100_bar_bar2_vmm,
.flush = g84_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index 9accd7923788..4f2b66e8d795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -4,22 +4,24 @@
#define gf100_bar(p) container_of((p), struct gf100_bar, base)
#include "priv.h"
-struct gf100_bar_vm {
- struct nvkm_memory *mem;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
+struct gf100_barN {
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
};
struct gf100_bar {
struct nvkm_bar base;
bool bar2_halve;
- struct gf100_bar_vm bar[2];
+ struct gf100_barN bar[2];
};
int gf100_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
int, struct nvkm_bar **);
void *gf100_bar_dtor(struct nvkm_bar *);
int gf100_bar_oneinit(struct nvkm_bar *);
-int gf100_bar_init(struct nvkm_bar *);
-int gf100_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
+void gf100_bar_bar1_init(struct nvkm_bar *);
+void gf100_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *gf100_bar_bar1_vmm(struct nvkm_bar *);
+void gf100_bar_bar2_init(struct nvkm_bar *);
+struct nvkm_vmm *gf100_bar_bar2_vmm(struct nvkm_bar *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
index 9232fab4274c..b10077d38839 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c
@@ -25,8 +25,10 @@ static const struct nvkm_bar_func
gk20a_bar_func = {
.dtor = gf100_bar_dtor,
.oneinit = gf100_bar_oneinit,
- .init = gf100_bar_init,
- .umap = gf100_bar_umap,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gf100_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
.flush = g84_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
new file mode 100644
index 000000000000..3ddf9222d935
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm107.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/timer.h>
+
+void
+gm107_bar_bar1_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x001710) & 0x00000003))
+ break;
+ );
+}
+
+static void
+gm107_bar_bar2_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x001710) & 0x0000000c))
+ break;
+ );
+}
+
+static const struct nvkm_bar_func
+gm107_bar_func = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gm107_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .bar2.init = gf100_bar_bar2_init,
+ .bar2.fini = gf100_bar_bar2_fini,
+ .bar2.wait = gm107_bar_bar2_wait,
+ .bar2.vmm = gf100_bar_bar2_vmm,
+ .flush = g84_bar_flush,
+};
+
+int
+gm107_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ return gf100_bar_new_(&gm107_bar_func, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
new file mode 100644
index 000000000000..950bff1955ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gm20b.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+static const struct nvkm_bar_func
+gm20b_bar_func = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .bar1.init = gf100_bar_bar1_init,
+ .bar1.fini = gf100_bar_bar1_fini,
+ .bar1.wait = gm107_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .flush = g84_bar_flush,
+};
+
+int
+gm20b_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ int ret = gf100_bar_new_(&gm20b_bar_func, device, index, pbar);
+ if (ret == 0)
+ (*pbar)->iomap_uncached = true;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 6eff637ac301..157b076a1272 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -28,19 +28,6 @@
#include <subdev/mmu.h>
#include <subdev/timer.h>
-struct nvkm_vm *
-nv50_bar_kmap(struct nvkm_bar *base)
-{
- return nv50_bar(base)->bar3_vm;
-}
-
-int
-nv50_bar_umap(struct nvkm_bar *base, u64 size, int type, struct nvkm_vma *vma)
-{
- struct nv50_bar *bar = nv50_bar(base);
- return nvkm_vm_get(bar->bar1_vm, size, type, NV_MEM_ACCESS_RW, vma);
-}
-
static void
nv50_bar_flush(struct nvkm_bar *base)
{
@@ -56,14 +43,72 @@ nv50_bar_flush(struct nvkm_bar *base)
spin_unlock_irqrestore(&bar->base.lock, flags);
}
+struct nvkm_vmm *
+nv50_bar_bar1_vmm(struct nvkm_bar *base)
+{
+ return nv50_bar(base)->bar1_vmm;
+}
+
+void
+nv50_bar_bar1_wait(struct nvkm_bar *base)
+{
+ nvkm_bar_flush(base);
+}
+
+void
+nv50_bar_bar1_fini(struct nvkm_bar *bar)
+{
+ nvkm_wr32(bar->subdev.device, 0x001708, 0x00000000);
+}
+
+void
+nv50_bar_bar1_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct nv50_bar *bar = nv50_bar(base);
+ nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
+}
+
+struct nvkm_vmm *
+nv50_bar_bar2_vmm(struct nvkm_bar *base)
+{
+ return nv50_bar(base)->bar2_vmm;
+}
+
+void
+nv50_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ nvkm_wr32(bar->subdev.device, 0x00170c, 0x00000000);
+}
+
+void
+nv50_bar_bar2_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct nv50_bar *bar = nv50_bar(base);
+ nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
+ nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
+ nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar2->node->offset >> 4);
+}
+
+void
+nv50_bar_init(struct nvkm_bar *base)
+{
+ struct nv50_bar *bar = nv50_bar(base);
+ struct nvkm_device *device = bar->base.subdev.device;
+ int i;
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
+}
+
int
nv50_bar_oneinit(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
struct nvkm_device *device = bar->base.subdev.device;
static struct lock_class_key bar1_lock;
- static struct lock_class_key bar3_lock;
- struct nvkm_vm *vm;
+ static struct lock_class_key bar2_lock;
u64 start, limit;
int ret;
@@ -80,51 +125,54 @@ nv50_bar_oneinit(struct nvkm_bar *base)
if (ret)
return ret;
- /* BAR3 */
+ /* BAR2 */
start = 0x0100000000ULL;
limit = start + device->func->resource_size(device, 3);
- ret = nvkm_vm_new(device, start, limit - start, start, &bar3_lock, &vm);
+ ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+ &bar2_lock, "bar2", &bar->bar2_vmm);
if (ret)
return ret;
- atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+ atomic_inc(&bar->bar2_vmm->engref[NVKM_SUBDEV_BAR]);
+ bar->bar2_vmm->debug = bar->base.subdev.debug;
- ret = nvkm_vm_boot(vm, limit-- - start);
+ ret = nvkm_vmm_boot(bar->bar2_vmm);
if (ret)
return ret;
- ret = nvkm_vm_ref(vm, &bar->bar3_vm, bar->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_join(bar->bar2_vmm, bar->mem->memory);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar3);
+ ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar2);
if (ret)
return ret;
- nvkm_kmap(bar->bar3);
- nvkm_wo32(bar->bar3, 0x00, 0x7fc00000);
- nvkm_wo32(bar->bar3, 0x04, lower_32_bits(limit));
- nvkm_wo32(bar->bar3, 0x08, lower_32_bits(start));
- nvkm_wo32(bar->bar3, 0x0c, upper_32_bits(limit) << 24 |
+ nvkm_kmap(bar->bar2);
+ nvkm_wo32(bar->bar2, 0x00, 0x7fc00000);
+ nvkm_wo32(bar->bar2, 0x04, lower_32_bits(limit));
+ nvkm_wo32(bar->bar2, 0x08, lower_32_bits(start));
+ nvkm_wo32(bar->bar2, 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
- nvkm_wo32(bar->bar3, 0x10, 0x00000000);
- nvkm_wo32(bar->bar3, 0x14, 0x00000000);
- nvkm_done(bar->bar3);
+ nvkm_wo32(bar->bar2, 0x10, 0x00000000);
+ nvkm_wo32(bar->bar2, 0x14, 0x00000000);
+ nvkm_done(bar->bar2);
+
+ bar->base.subdev.oneinit = true;
+ nvkm_bar_bar2_init(device);
/* BAR1 */
start = 0x0000000000ULL;
limit = start + device->func->resource_size(device, 1);
- ret = nvkm_vm_new(device, start, limit-- - start, start, &bar1_lock, &vm);
- if (ret)
- return ret;
+ ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
+ &bar1_lock, "bar1", &bar->bar1_vmm);
- atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
+ atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
+ bar->bar1_vmm->debug = bar->base.subdev.debug;
- ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_join(bar->bar1_vmm, bar->mem->memory);
if (ret)
return ret;
@@ -144,45 +192,21 @@ nv50_bar_oneinit(struct nvkm_bar *base)
return 0;
}
-int
-nv50_bar_init(struct nvkm_bar *base)
-{
- struct nv50_bar *bar = nv50_bar(base);
- struct nvkm_device *device = bar->base.subdev.device;
- int i;
-
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
- nvkm_wr32(device, 0x100c80, 0x00060001);
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
- break;
- ) < 0)
- return -EBUSY;
-
- nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
- nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
- nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
- nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar3->node->offset >> 4);
- for (i = 0; i < 8; i++)
- nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
- return 0;
-}
-
void *
nv50_bar_dtor(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
- nvkm_gpuobj_del(&bar->bar1);
- nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
- nvkm_gpuobj_del(&bar->bar3);
- if (bar->bar3_vm) {
- nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
- nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
+ if (bar->mem) {
+ nvkm_gpuobj_del(&bar->bar1);
+ nvkm_vmm_part(bar->bar1_vmm, bar->mem->memory);
+ nvkm_vmm_unref(&bar->bar1_vmm);
+ nvkm_gpuobj_del(&bar->bar2);
+ nvkm_vmm_part(bar->bar2_vmm, bar->mem->memory);
+ nvkm_vmm_unref(&bar->bar2_vmm);
+ nvkm_gpuobj_del(&bar->pgd);
+ nvkm_gpuobj_del(&bar->pad);
+ nvkm_gpuobj_del(&bar->mem);
}
- nvkm_gpuobj_del(&bar->pgd);
- nvkm_gpuobj_del(&bar->pad);
- nvkm_gpuobj_del(&bar->mem);
return bar;
}
@@ -204,8 +228,14 @@ nv50_bar_func = {
.dtor = nv50_bar_dtor,
.oneinit = nv50_bar_oneinit,
.init = nv50_bar_init,
- .kmap = nv50_bar_kmap,
- .umap = nv50_bar_umap,
+ .bar1.init = nv50_bar_bar1_init,
+ .bar1.fini = nv50_bar_bar1_fini,
+ .bar1.wait = nv50_bar_bar1_wait,
+ .bar1.vmm = nv50_bar_bar1_vmm,
+ .bar2.init = nv50_bar_bar2_init,
+ .bar2.fini = nv50_bar_bar2_fini,
+ .bar2.wait = nv50_bar_bar1_wait,
+ .bar2.vmm = nv50_bar_bar2_vmm,
.flush = nv50_bar_flush,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index ce9ab9110b08..2fe833f6d9f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -10,18 +10,20 @@ struct nv50_bar {
struct nvkm_gpuobj *mem;
struct nvkm_gpuobj *pad;
struct nvkm_gpuobj *pgd;
- struct nvkm_vm *bar1_vm;
+ struct nvkm_vmm *bar1_vmm;
struct nvkm_gpuobj *bar1;
- struct nvkm_vm *bar3_vm;
- struct nvkm_gpuobj *bar3;
+ struct nvkm_vmm *bar2_vmm;
+ struct nvkm_gpuobj *bar2;
};
int nv50_bar_new_(const struct nvkm_bar_func *, struct nvkm_device *,
int, u32 pgd_addr, struct nvkm_bar **);
void *nv50_bar_dtor(struct nvkm_bar *);
int nv50_bar_oneinit(struct nvkm_bar *);
-int nv50_bar_init(struct nvkm_bar *);
-struct nvkm_vm *nv50_bar_kmap(struct nvkm_bar *);
-int nv50_bar_umap(struct nvkm_bar *, u64, int, struct nvkm_vma *);
-void nv50_bar_unmap(struct nvkm_bar *, struct nvkm_vma *);
+void nv50_bar_init(struct nvkm_bar *);
+void nv50_bar_bar1_init(struct nvkm_bar *);
+void nv50_bar_bar1_wait(struct nvkm_bar *);
+struct nvkm_vmm *nv50_bar_bar1_vmm(struct nvkm_bar *);
+void nv50_bar_bar2_init(struct nvkm_bar *);
+struct nvkm_vmm *nv50_bar_bar2_vmm(struct nvkm_bar *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 63d111c8afd4..01ba5b26666e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -10,11 +10,25 @@ void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
struct nvkm_bar_func {
void *(*dtor)(struct nvkm_bar *);
int (*oneinit)(struct nvkm_bar *);
- int (*init)(struct nvkm_bar *);
- struct nvkm_vm *(*kmap)(struct nvkm_bar *);
- int (*umap)(struct nvkm_bar *, u64 size, int type, struct nvkm_vma *);
+ void (*init)(struct nvkm_bar *);
+
+ struct {
+ void (*init)(struct nvkm_bar *);
+ void (*fini)(struct nvkm_bar *);
+ void (*wait)(struct nvkm_bar *);
+ struct nvkm_vmm *(*vmm)(struct nvkm_bar *);
+ } bar1, bar2;
+
void (*flush)(struct nvkm_bar *);
};
+void nv50_bar_bar1_fini(struct nvkm_bar *);
+void nv50_bar_bar2_fini(struct nvkm_bar *);
+
void g84_bar_flush(struct nvkm_bar *);
+
+void gf100_bar_bar1_fini(struct nvkm_bar *);
+void gf100_bar_bar2_fini(struct nvkm_bar *);
+
+void gm107_bar_bar1_wait(struct nvkm_bar *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
index 23caef8df17f..73e463ed55c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/iccsense.c
@@ -99,7 +99,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
rail->extdev_id = nvbios_rd08(bios, entry + 0x1);
res_start = 0x5;
break;
- };
+ }
if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev))
continue;
@@ -115,7 +115,7 @@ nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense)
default:
rail->resistor_count = 0;
break;
- };
+ }
for (r = 0; r < rail->resistor_count; ++r) {
rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index b58ee99f7bfc..9cc10e438b3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -36,6 +36,8 @@
#include <subdev/i2c.h>
#include <subdev/vga.h>
+#include <linux/kernel.h>
+
#define bioslog(lvl, fmt, args...) do { \
nvkm_printk(init->subdev, lvl, info, "0x%08x[%c]: "fmt, \
init->offset, init_exec(init) ? \
@@ -2271,8 +2273,6 @@ static struct nvbios_init_opcode {
[0xaa] = { init_reserved },
};
-#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
-
int
nvbios_exec(struct nvbios_init *init)
{
@@ -2281,7 +2281,8 @@ nvbios_exec(struct nvbios_init *init)
init->nested++;
while (init->offset) {
u8 opcode = nvbios_rd08(bios, init->offset);
- if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
+ if (opcode >= ARRAY_SIZE(init_opcode) ||
+ !init_opcode[opcode].exec) {
error("unknown opcode 0x%02x\n", opcode);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
index 7e83c3985020..20ff5173cf8f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/timing.c
@@ -115,16 +115,21 @@ nvbios_timingEp(struct nvkm_bios *bios, int idx,
switch (min_t(u8, *hdr, 25)) {
case 25:
p->timing_10_24 = nvbios_rd08(bios, data + 0x18);
+ /* fall through */
case 24:
case 23:
case 22:
p->timing_10_21 = nvbios_rd08(bios, data + 0x15);
+ /* fall through */
case 21:
p->timing_10_20 = nvbios_rd08(bios, data + 0x14);
+ /* fall through */
case 20:
p->timing_10_CWL = nvbios_rd08(bios, data + 0x13);
+ /* fall through */
case 19:
p->timing_10_18 = nvbios_rd08(bios, data + 0x12);
+ /* fall through */
case 18:
case 17:
p->timing_10_16 = nvbios_rd08(bios, data + 0x10);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
index 158977f8a6e6..c3dae05348eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.c
@@ -119,11 +119,11 @@ powerctrl_1_shift(int chip_version, int reg)
switch (reg) {
case 0x680520:
- shift += 4;
+ shift += 4; /* fall through */
case 0x680508:
- shift += 4;
+ shift += 4; /* fall through */
case 0x680504:
- shift += 4;
+ shift += 4; /* fall through */
case 0x680500:
shift += 4;
}
@@ -245,11 +245,11 @@ setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
switch (reg1) {
case 0x680504:
- shift_c040 += 2;
+ shift_c040 += 2; /* fall through */
case 0x680500:
- shift_c040 += 2;
+ shift_c040 += 2; /* fall through */
case 0x680520:
- shift_c040 += 2;
+ shift_c040 += 2; /* fall through */
case 0x680508:
shift_c040 += 2;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index a7049c041594..73b5d46104bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -31,12 +31,6 @@
#include <engine/gr.h>
#include <engine/mpeg.h>
-bool
-nvkm_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
- return fb->func->memtype_valid(fb, memtype);
-}
-
void
nvkm_fb_tile_fini(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
{
@@ -100,6 +94,7 @@ static int
nvkm_fb_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
+ u32 tags = 0;
if (fb->func->ram_new) {
int ret = fb->func->ram_new(fb, &fb->ram);
@@ -115,7 +110,16 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
return ret;
}
- return 0;
+ /* Initialise compression tag allocator.
+ *
+ * LTC oneinit() will override this on Fermi and newer.
+ */
+ if (fb->func->tags) {
+ tags = fb->func->tags(fb);
+ nvkm_debug(subdev, "%d comptags\n", tags);
+ }
+
+ return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
}
static int
@@ -135,8 +139,13 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
- if (fb->func->init_page)
- fb->func->init_page(fb);
+
+ if (fb->func->init_page) {
+ ret = fb->func->init_page(fb);
+ if (WARN_ON(ret))
+ return ret;
+ }
+
if (fb->func->init_unkn)
fb->func->init_unkn(fb);
return 0;
@@ -148,12 +157,13 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
struct nvkm_fb *fb = nvkm_fb(subdev);
int i;
- nvkm_memory_del(&fb->mmu_wr);
- nvkm_memory_del(&fb->mmu_rd);
+ nvkm_memory_unref(&fb->mmu_wr);
+ nvkm_memory_unref(&fb->mmu_rd);
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.fini(fb, i, &fb->tile.region[i]);
+ nvkm_mm_fini(&fb->tags);
nvkm_ram_del(&fb->ram);
if (fb->func->dtor)
@@ -176,7 +186,8 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
- fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
+ fb->func->default_bigpage);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
index 9c28392d07e4..06bf95c0c549 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/g84.c
@@ -27,6 +27,7 @@
static const struct nv50_fb_func
g84_fb = {
.ram_new = nv50_ram_new,
+ .tags = nv20_fb_tags,
.trap = 0x001d07ff,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index a239e73562c8..47d28c279707 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -27,15 +27,6 @@
#include <core/memory.h>
#include <core/option.h>
-extern const u8 gf100_pte_storage_type_map[256];
-
-bool
-gf100_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
- u8 memtype = (tile_flags & 0x0000ff00) >> 8;
- return likely((gf100_pte_storage_type_map[memtype] != 0xff));
-}
-
void
gf100_fb_intr(struct nvkm_fb *base)
{
@@ -80,20 +71,17 @@ gf100_fb_oneinit(struct nvkm_fb *base)
return 0;
}
-void
+int
gf100_fb_init_page(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
switch (fb->page) {
- case 16:
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
- break;
- case 17:
+ case 16: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); break;
+ case 17: nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); break;
default:
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
- fb->page = 17;
- break;
+ return -EINVAL;
}
+ return 0;
}
void
@@ -143,7 +131,7 @@ gf100_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf100_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 1756f7b02858..ab261310753a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -18,7 +18,5 @@ void gf100_fb_intr(struct nvkm_fb *);
void gp100_fb_init(struct nvkm_fb *);
-void gm200_fb_init_page(struct nvkm_fb *fb);
void gm200_fb_init(struct nvkm_fb *base);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
index 56af84aa333b..4a9f463745b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -32,7 +32,7 @@ gf108_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf108_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 4245e2e6e604..0a6e8eaad42c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -32,7 +32,7 @@ gk104_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 5d34d6136616..a7e29b125094 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -30,7 +30,7 @@ gk20a_fb = {
.init = gf100_fb_init,
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index db699025f546..69c876d5d1c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -32,7 +32,7 @@ gm107_fb = {
.init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 17,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index d83da5ddbc1e..8137e19d3292 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -26,22 +26,18 @@
#include <core/memory.h>
-void
+int
gm200_fb_init_page(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
switch (fb->page) {
- case 16:
- nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
- break;
- case 17:
- nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
- break;
+ case 16: nvkm_mask(device, 0x100c80, 0x00001801, 0x00001001); break;
+ case 17: nvkm_mask(device, 0x100c80, 0x00001801, 0x00000000); break;
+ case 0: nvkm_mask(device, 0x100c80, 0x00001800, 0x00001800); break;
default:
- nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
- fb->page = 0;
- break;
+ return -EINVAL;
}
+ return 0;
}
void
@@ -69,7 +65,7 @@ gm200_fb = {
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm200_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 0 /* per-instance. */,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
index b87c233bcd6d..12db61e31128 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm20b.c
@@ -30,7 +30,7 @@ gm20b_fb = {
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
- .memtype_valid = gf100_fb_memtype_valid,
+ .default_bigpage = 0 /* per-instance. */,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index 98474aec1921..147f69b30cd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -59,7 +59,6 @@ gp100_fb = {
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index 73b4ae1c73dc..b84b9861ef26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -33,7 +33,6 @@ gp102_fb = {
.init = gp100_fb_init,
.init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new,
- .memtype_valid = gf100_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
index f2b1fbf428d5..af8e43979dc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -28,7 +28,6 @@ gp10b_fb = {
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
- .memtype_valid = gf100_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
index ebb30608d5ef..9266559b45f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gt215.c
@@ -27,6 +27,7 @@
static const struct nv50_fb_func
gt215_fb = {
.ram_new = gt215_ram_new,
+ .tags = nv20_fb_tags,
.trap = 0x000d0fff,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
index 8ff2e5db4571..c886664533c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv04.c
@@ -25,14 +25,6 @@
#include "ram.h"
#include "regsnv04.h"
-bool
-nv04_fb_memtype_valid(struct nvkm_fb *fb, u32 tile_flags)
-{
- if (!(tile_flags & 0xff00))
- return true;
- return false;
-}
-
static void
nv04_fb_init(struct nvkm_fb *fb)
{
@@ -49,7 +41,6 @@ static const struct nvkm_fb_func
nv04_fb = {
.init = nv04_fb_init,
.ram_new = nv04_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
index e8c44f5a3d84..c998b7e96aa3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv10.c
@@ -61,7 +61,6 @@ nv10_fb = {
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv10_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
index 2ae0beb87567..7b9f04f44af8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv1a.c
@@ -33,7 +33,6 @@ nv1a_fb = {
.tile.fini = nv10_fb_tile_fini,
.tile.prog = nv10_fb_tile_prog,
.ram_new = nv1a_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
index 126865dfe777..a021d21ff153 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv20.c
@@ -45,7 +45,7 @@ nv20_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -63,7 +63,7 @@ nv20_fb_tile_fini(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
- nvkm_mm_free(&fb->ram->tags, &tile->tag);
+ nvkm_mm_free(&fb->tags, &tile->tag);
}
void
@@ -77,15 +77,22 @@ nv20_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile)
nvkm_wr32(device, 0x100300 + (i * 0x04), tile->zcomp);
}
+u32
+nv20_fb_tags(struct nvkm_fb *fb)
+{
+ const u32 tags = nvkm_rd32(fb->subdev.device, 0x100320);
+ return tags ? tags + 1 : 0;
+}
+
static const struct nvkm_fb_func
nv20_fb = {
+ .tags = nv20_fb_tags,
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv20_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
index c56746d2a502..7709f5fe9a45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv25.c
@@ -32,7 +32,7 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
@@ -44,13 +44,13 @@ nv25_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
static const struct nvkm_fb_func
nv25_fb = {
+ .tags = nv20_fb_tags,
.tile.regions = 8,
.tile.init = nv20_fb_tile_init,
.tile.comp = nv25_fb_tile_comp,
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
index 2a7c4831b821..8aa782666507 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv30.c
@@ -51,7 +51,7 @@ nv30_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -116,6 +116,7 @@ nv30_fb_init(struct nvkm_fb *fb)
static const struct nvkm_fb_func
nv30_fb = {
+ .tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -123,7 +124,6 @@ nv30_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
index 1604b3789ad1..6e83dcff72e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv35.c
@@ -32,7 +32,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -45,6 +45,7 @@ nv35_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
static const struct nvkm_fb_func
nv35_fb = {
+ .tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv35_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
index 80cc0a6e3416..2a07617bb44c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv36.c
@@ -32,7 +32,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
@@ -45,6 +45,7 @@ nv36_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
static const struct nvkm_fb_func
nv36_fb = {
+ .tags = nv20_fb_tags,
.init = nv30_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv36_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv20_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
index deec46a310f8..955160778b5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv40.c
@@ -33,7 +33,7 @@ nv40_fb_tile_comp(struct nvkm_fb *fb, int i, u32 size, u32 flags,
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
- !nvkm_mm_head(&fb->ram->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
@@ -51,6 +51,7 @@ nv40_fb_init(struct nvkm_fb *fb)
static const struct nvkm_fb_func
nv40_fb = {
+ .tags = nv20_fb_tags,
.init = nv40_fb_init,
.tile.regions = 8,
.tile.init = nv30_fb_tile_init,
@@ -58,7 +59,6 @@ nv40_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv20_fb_tile_prog,
.ram_new = nv40_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
index 79e57dd5a00f..b77f08d34cc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv41.c
@@ -45,6 +45,7 @@ nv41_fb_init(struct nvkm_fb *fb)
static const struct nvkm_fb_func
nv41_fb = {
+ .tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 12,
.tile.init = nv30_fb_tile_init,
@@ -52,7 +53,6 @@ nv41_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
index 06246cce5ec4..b59dc486083d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv44.c
@@ -62,7 +62,6 @@ nv44_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
index 3598a1aa65be..cab7d20fa039 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv46.c
@@ -48,7 +48,6 @@ nv46_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
index c505e4429314..a8b0ad4c871d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv47.c
@@ -28,6 +28,7 @@
static const struct nvkm_fb_func
nv47_fb = {
+ .tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
@@ -35,7 +36,6 @@ nv47_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv41_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
index 7b91b9f170e5..d0b317bb0252 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv49.c
@@ -28,6 +28,7 @@
static const struct nvkm_fb_func
nv49_fb = {
+ .tags = nv20_fb_tags,
.init = nv41_fb_init,
.tile.regions = 15,
.tile.init = nv30_fb_tile_init,
@@ -35,7 +36,6 @@ nv49_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv41_fb_tile_prog,
.ram_new = nv49_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
index 4e98210c1b1c..6a6f0c086071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv4e.c
@@ -34,7 +34,6 @@ nv4e_fb = {
.tile.fini = nv20_fb_tile_fini,
.tile.prog = nv44_fb_tile_prog,
.ram_new = nv44_ram_new,
- .memtype_valid = nv04_fb_memtype_valid,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 0595e0722bfc..b2f5bf8144ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -28,18 +28,6 @@
#include <core/enum.h>
#include <engine/fifo.h>
-int
-nv50_fb_memtype[0x80] = {
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
- 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
-};
-
static int
nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
{
@@ -47,12 +35,6 @@ nv50_fb_ram_new(struct nvkm_fb *base, struct nvkm_ram **pram)
return fb->func->ram_new(&fb->base, pram);
}
-static bool
-nv50_fb_memtype_valid(struct nvkm_fb *fb, u32 memtype)
-{
- return nv50_fb_memtype[(memtype & 0xff00) >> 8] != 0;
-}
-
static const struct nvkm_enum vm_dispatch_subclients[] = {
{ 0x00000000, "GRCTX" },
{ 0x00000001, "NOTIFY" },
@@ -244,6 +226,15 @@ nv50_fb_init(struct nvkm_fb *base)
nvkm_wr32(device, 0x100c90, fb->func->trap);
}
+static u32
+nv50_fb_tags(struct nvkm_fb *base)
+{
+ struct nv50_fb *fb = nv50_fb(base);
+ if (fb->func->tags)
+ return fb->func->tags(&fb->base);
+ return 0;
+}
+
static void *
nv50_fb_dtor(struct nvkm_fb *base)
{
@@ -262,11 +253,11 @@ nv50_fb_dtor(struct nvkm_fb *base)
static const struct nvkm_fb_func
nv50_fb_ = {
.dtor = nv50_fb_dtor,
+ .tags = nv50_fb_tags,
.oneinit = nv50_fb_oneinit,
.init = nv50_fb_init,
.intr = nv50_fb_intr,
.ram_new = nv50_fb_ram_new,
- .memtype_valid = nv50_fb_memtype_valid,
};
int
@@ -287,6 +278,7 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
static const struct nv50_fb_func
nv50_fb = {
.ram_new = nv50_ram_new,
+ .tags = nv20_fb_tags,
.trap = 0x000707ff,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index a37758c76268..dacc696387b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -13,10 +13,10 @@ struct nv50_fb {
struct nv50_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
+ u32 (*tags)(struct nvkm_fb *);
u32 trap;
};
int nv50_fb_new_(const struct nv50_fb_func *, struct nvkm_device *, int index,
struct nvkm_fb **pfb);
-extern int nv50_fb_memtype[0x80];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 8e87b887d4f5..9351188d5d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -7,9 +7,10 @@ struct nvkm_bios;
struct nvkm_fb_func {
void *(*dtor)(struct nvkm_fb *);
+ u32 (*tags)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
- void (*init_page)(struct nvkm_fb *);
+ int (*init_page)(struct nvkm_fb *);
void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
@@ -25,7 +26,7 @@ struct nvkm_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
- bool (*memtype_valid)(struct nvkm_fb *, u32 memtype);
+ u8 default_bigpage;
};
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
@@ -34,13 +35,12 @@ int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
int index, struct nvkm_fb **);
int nvkm_fb_bios_memtype(struct nvkm_bios *);
-bool nv04_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
-
void nv10_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nv10_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
void nv10_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
+u32 nv20_fb_tags(struct nvkm_fb *);
void nv20_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
void nv20_fb_tile_fini(struct nvkm_fb *, int i, struct nvkm_fb_tile *);
@@ -63,8 +63,7 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
int gf100_fb_oneinit(struct nvkm_fb *);
-void gf100_fb_init_page(struct nvkm_fb *);
-bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+int gf100_fb_init_page(struct nvkm_fb *);
-void gm200_fb_init_page(struct nvkm_fb *);
+int gm200_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index c17d559dbfbe..24c7bd505731 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -21,8 +21,132 @@
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#define nvkm_vram(p) container_of((p), struct nvkm_vram, memory)
#include "ram.h"
+#include <core/memory.h>
+#include <subdev/mmu.h>
+
+struct nvkm_vram {
+ struct nvkm_memory memory;
+ struct nvkm_ram *ram;
+ u8 page;
+ struct nvkm_mm_node *mn;
+};
+
+static int
+nvkm_vram_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_vram *vram = nvkm_vram(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &vram->memory,
+ .offset = offset,
+ .mem = vram->mn,
+ };
+
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static u64
+nvkm_vram_size(struct nvkm_memory *memory)
+{
+ return (u64)nvkm_mm_size(nvkm_vram(memory)->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u64
+nvkm_vram_addr(struct nvkm_memory *memory)
+{
+ struct nvkm_vram *vram = nvkm_vram(memory);
+ if (!nvkm_mm_contiguous(vram->mn))
+ return ~0ULL;
+ return (u64)nvkm_mm_addr(vram->mn) << NVKM_RAM_MM_SHIFT;
+}
+
+static u8
+nvkm_vram_page(struct nvkm_memory *memory)
+{
+ return nvkm_vram(memory)->page;
+}
+
+static enum nvkm_memory_target
+nvkm_vram_target(struct nvkm_memory *memory)
+{
+ return NVKM_MEM_TARGET_VRAM;
+}
+
+static void *
+nvkm_vram_dtor(struct nvkm_memory *memory)
+{
+ struct nvkm_vram *vram = nvkm_vram(memory);
+ struct nvkm_mm_node *next = vram->mn;
+ struct nvkm_mm_node *node;
+ mutex_lock(&vram->ram->fb->subdev.mutex);
+ while ((node = next)) {
+ next = node->next;
+ nvkm_mm_free(&vram->ram->vram, &node);
+ }
+ mutex_unlock(&vram->ram->fb->subdev.mutex);
+ return vram;
+}
+
+static const struct nvkm_memory_func
+nvkm_vram = {
+ .dtor = nvkm_vram_dtor,
+ .target = nvkm_vram_target,
+ .page = nvkm_vram_page,
+ .addr = nvkm_vram_addr,
+ .size = nvkm_vram_size,
+ .map = nvkm_vram_map,
+};
+
+int
+nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
+ bool contig, bool back, struct nvkm_memory **pmemory)
+{
+ struct nvkm_ram *ram;
+ struct nvkm_mm *mm;
+ struct nvkm_mm_node **node, *r;
+ struct nvkm_vram *vram;
+ u8 page = max(rpage, (u8)NVKM_RAM_MM_SHIFT);
+ u32 align = (1 << page) >> NVKM_RAM_MM_SHIFT;
+ u32 max = ALIGN(size, 1 << page) >> NVKM_RAM_MM_SHIFT;
+ u32 min = contig ? max : align;
+ int ret;
+
+ if (!device->fb || !(ram = device->fb->ram))
+ return -ENODEV;
+ ram = device->fb->ram;
+ mm = &ram->vram;
+
+ if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_memory_ctor(&nvkm_vram, &vram->memory);
+ vram->ram = ram;
+ vram->page = page;
+ *pmemory = &vram->memory;
+
+ mutex_lock(&ram->fb->subdev.mutex);
+ node = &vram->mn;
+ do {
+ if (back)
+ ret = nvkm_mm_tail(mm, heap, type, max, min, align, &r);
+ else
+ ret = nvkm_mm_head(mm, heap, type, max, min, align, &r);
+ if (ret) {
+ mutex_unlock(&ram->fb->subdev.mutex);
+ nvkm_memory_unref(pmemory);
+ return ret;
+ }
+
+ *node = r;
+ node = &r->next;
+ max -= r->length;
+ } while (max);
+ mutex_unlock(&ram->fb->subdev.mutex);
+ return 0;
+}
+
int
nvkm_ram_init(struct nvkm_ram *ram)
{
@@ -38,7 +162,6 @@ nvkm_ram_del(struct nvkm_ram **pram)
if (ram && !WARN_ON(!ram->func)) {
if (ram->func->dtor)
*pram = ram->func->dtor(ram);
- nvkm_mm_fini(&ram->tags);
nvkm_mm_fini(&ram->vram);
kfree(*pram);
*pram = NULL;
@@ -47,8 +170,7 @@ nvkm_ram_del(struct nvkm_ram **pram)
int
nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
- enum nvkm_ram_type type, u64 size, u32 tags,
- struct nvkm_ram *ram)
+ enum nvkm_ram_type type, u64 size, struct nvkm_ram *ram)
{
static const char *name[] = {
[NVKM_RAM_TYPE_UNKNOWN] = "of unknown memory type",
@@ -73,28 +195,20 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
ram->size = size;
if (!nvkm_mm_initialised(&ram->vram)) {
- ret = nvkm_mm_init(&ram->vram, 0, size >> NVKM_RAM_MM_SHIFT, 1);
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL, 0,
+ size >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
}
- if (!nvkm_mm_initialised(&ram->tags)) {
- ret = nvkm_mm_init(&ram->tags, 0, tags ? ++tags : 0, 1);
- if (ret)
- return ret;
-
- nvkm_debug(subdev, "%d compression tags\n", tags);
- }
-
return 0;
}
int
nvkm_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
- enum nvkm_ram_type type, u64 size, u32 tags,
- struct nvkm_ram **pram)
+ enum nvkm_ram_type type, u64 size, struct nvkm_ram **pram)
{
if (!(*pram = kzalloc(sizeof(**pram), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_ram_ctor(func, fb, type, size, tags, *pram);
+ return nvkm_ram_ctor(func, fb, type, size, *pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b2122d261f8d..330132e95b6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -4,11 +4,9 @@
#include "priv.h"
int nvkm_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
- enum nvkm_ram_type, u64 size, u32 tags,
- struct nvkm_ram *);
+ enum nvkm_ram_type, u64 size, struct nvkm_ram *);
int nvkm_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
- enum nvkm_ram_type, u64 size, u32 tags,
- struct nvkm_ram **);
+ enum nvkm_ram_type, u64 size, struct nvkm_ram **);
void nvkm_ram_del(struct nvkm_ram **);
int nvkm_ram_init(struct nvkm_ram *);
@@ -16,9 +14,6 @@ extern const struct nvkm_ram_func nv04_ram_func;
int nv50_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
struct nvkm_ram *);
-int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
-void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
-void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
int gf100_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
struct nvkm_ram **);
@@ -29,8 +24,6 @@ u32 gf100_ram_probe_fbp(const struct nvkm_ram_func *,
u32 gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
struct nvkm_device *, int, int *);
u32 gf100_ram_probe_fbpa_amount(struct nvkm_device *, int);
-int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
-void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
int gf100_ram_init(struct nvkm_ram *);
int gf100_ram_calc(struct nvkm_ram *, u32);
int gf100_ram_prog(struct nvkm_ram *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 4a9bd4f1cb93..ac87a3b6b7c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -32,7 +32,6 @@
#include <subdev/bios/timing.h>
#include <subdev/clk.h>
#include <subdev/clk/pll.h>
-#include <subdev/ltc.h>
struct gf100_ramfuc {
struct ramfuc base;
@@ -420,86 +419,6 @@ gf100_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->fuc, false);
}
-void
-gf100_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
-{
- struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
- struct nvkm_mem *mem = *pmem;
-
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&ram->fb->subdev.mutex);
- if (mem->tag)
- nvkm_ltc_tags_free(ltc, &mem->tag);
- __nv50_ram_put(ram, mem);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- kfree(mem);
-}
-
-int
-gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nvkm_mem **pmem)
-{
- struct nvkm_ltc *ltc = ram->fb->subdev.device->ltc;
- struct nvkm_mm *mm = &ram->vram;
- struct nvkm_mm_node **node, *r;
- struct nvkm_mem *mem;
- int type = (memtype & 0x0ff);
- int back = (memtype & 0x800);
- const bool comp = gf100_pte_storage_type_map[type] != type;
- int ret;
-
- size >>= NVKM_RAM_MM_SHIFT;
- align >>= NVKM_RAM_MM_SHIFT;
- ncmin >>= NVKM_RAM_MM_SHIFT;
- if (!ncmin)
- ncmin = size;
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- mem->size = size;
-
- mutex_lock(&ram->fb->subdev.mutex);
- if (comp) {
- /* compression only works with lpages */
- if (align == (1 << (17 - NVKM_RAM_MM_SHIFT))) {
- int n = size >> 5;
- nvkm_ltc_tags_alloc(ltc, n, &mem->tag);
- }
-
- if (unlikely(!mem->tag))
- type = gf100_pte_storage_type_map[type];
- }
- mem->memtype = type;
-
- node = &mem->mem;
- do {
- if (back)
- ret = nvkm_mm_tail(mm, 0, 1, size, ncmin, align, &r);
- else
- ret = nvkm_mm_head(mm, 0, 1, size, ncmin, align, &r);
- if (ret) {
- mutex_unlock(&ram->fb->subdev.mutex);
- ram->func->put(ram, &mem);
- return ret;
- }
-
- *node = r;
- node = &r->next;
- size -= r->length;
- } while (size);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
- *pmem = mem;
- return 0;
-}
-
int
gf100_ram_init(struct nvkm_ram *base)
{
@@ -604,7 +523,7 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
- ret = nvkm_ram_ctor(func, fb, type, total, 0, ram);
+ ret = nvkm_ram_ctor(func, fb, type, total, ram);
if (ret)
return ret;
@@ -617,7 +536,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
*/
if (lower != total) {
/* The common memory amount is addressed normally. */
- ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
@@ -625,13 +545,15 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
/* And the rest is much higher in the physical address
* space, and may not be usable for certain operations.
*/
- ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT,
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_MIXED,
+ ubase >> NVKM_RAM_MM_SHIFT,
(usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
} else {
/* GPUs without mixed-memory are a lot nicer... */
- ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(total - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
if (ret)
@@ -738,8 +660,6 @@ gf100_ram = {
.probe_fbp_amount = gf100_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
index 985ec64cf369..70a06e3cd55a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -48,8 +48,6 @@ gf108_ram = {
.probe_fbp_amount = gf108_ram_probe_fbp_amount,
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.init = gf100_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gf100_ram_calc,
.prog = gf100_ram_prog,
.tidy = gf100_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 75814f15eb53..8bcb7e79a0cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1704,8 +1704,6 @@ gk104_ram = {
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index 3f0b56347291..27c68e3f9772 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -39,8 +39,6 @@ gm107_ram = {
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
index fd8facf90476..6b0cac1fe7b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -54,8 +54,6 @@ gm200_ram = {
.probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
.dtor = gk104_ram_dtor,
.init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
.calc = gk104_ram_calc,
.prog = gk104_ram_prog,
.tidy = gk104_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index df8a87333b67..adb62a6beb63 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -84,8 +84,6 @@ gp100_ram = {
.probe_fbp_amount = gm200_ram_probe_fbp_amount,
.probe_fbpa_amount = gp100_ram_probe_fbpa,
.init = gp100_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
index f10664372161..920b3d347803 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c
@@ -26,6 +26,7 @@
#include "ram.h"
#include "ramfuc.h"
+#include <core/memory.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0205.h>
@@ -86,7 +87,7 @@ struct gt215_ltrain {
u32 r_100720;
u32 r_1111e0;
u32 r_111400;
- struct nvkm_mem *mem;
+ struct nvkm_memory *memory;
};
struct gt215_ram {
@@ -279,10 +280,10 @@ gt215_link_train_init(struct gt215_ram *ram)
struct gt215_ltrain *train = &ram->ltrain;
struct nvkm_device *device = ram->base.fb->subdev.device;
struct nvkm_bios *bios = device->bios;
- struct nvkm_mem *mem;
struct nvbios_M0205E M0205E;
u8 ver, hdr, cnt, len;
u32 r001700;
+ u64 addr;
int ret, i = 0;
train->state = NVA3_TRAIN_UNSUPPORTED;
@@ -297,14 +298,14 @@ gt215_link_train_init(struct gt215_ram *ram)
train->state = NVA3_TRAIN_ONCE;
- ret = ram->base.func->get(&ram->base, 0x8000, 0x10000, 0, 0x800,
- &ram->ltrain.mem);
+ ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 16, 0x8000,
+ true, true, &ram->ltrain.memory);
if (ret)
return ret;
- mem = ram->ltrain.mem;
+ addr = nvkm_memory_addr(ram->ltrain.memory);
- nvkm_wr32(device, 0x100538, 0x10000000 | (mem->offset >> 16));
+ nvkm_wr32(device, 0x100538, 0x10000000 | (addr >> 16));
nvkm_wr32(device, 0x1005a8, 0x0000ffff);
nvkm_mask(device, 0x10f800, 0x00000001, 0x00000001);
@@ -320,7 +321,7 @@ gt215_link_train_init(struct gt215_ram *ram)
/* And upload the pattern */
r001700 = nvkm_rd32(device, 0x1700);
- nvkm_wr32(device, 0x1700, mem->offset >> 16);
+ nvkm_wr32(device, 0x1700, addr >> 16);
for (i = 0; i < 16; i++)
nvkm_wr32(device, 0x700000 + (i << 2), pattern[i]);
for (i = 0; i < 16; i++)
@@ -336,8 +337,7 @@ gt215_link_train_init(struct gt215_ram *ram)
static void
gt215_link_train_fini(struct gt215_ram *ram)
{
- if (ram->ltrain.mem)
- ram->base.func->put(&ram->base, &ram->ltrain.mem);
+ nvkm_memory_unref(&ram->ltrain.memory);
}
/*
@@ -931,8 +931,6 @@ static const struct nvkm_ram_func
gt215_ram_func = {
.dtor = gt215_ram_dtor,
.init = gt215_ram_init,
- .get = nv50_ram_get,
- .put = nv50_ram_put,
.calc = gt215_ram_calc,
.prog = gt215_ram_prog,
.tidy = gt215_ram_tidy,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
index 017a91de74a0..7de18e53ef45 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/rammcp77.c
@@ -53,8 +53,6 @@ mcp77_ram_init(struct nvkm_ram *base)
static const struct nvkm_ram_func
mcp77_ram_func = {
.init = mcp77_ram_init,
- .get = nv50_ram_get,
- .put = nv50_ram_put,
};
int
@@ -73,7 +71,7 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
*pram = &ram->base;
ret = nvkm_ram_ctor(&mcp77_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
- size, 0, &ram->base);
+ size, &ram->base);
if (ret)
return ret;
@@ -81,7 +79,8 @@ mcp77_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
ram->base.stolen = base;
nvkm_mm_fini(&ram->base.vram);
- return nvkm_mm_init(&ram->base.vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ return nvkm_mm_init(&ram->base.vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
index 6f053a03d61c..cc764a93f1a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv04.c
@@ -61,5 +61,5 @@ nv04_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
else
type = NVKM_RAM_TYPE_SDRAM;
- return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
+ return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
index dfd155c98dbb..afe54e323b18 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv10.c
@@ -36,5 +36,5 @@ nv10_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
else
type = NVKM_RAM_TYPE_SDRAM;
- return nvkm_ram_new_(&nv04_ram_func, fb, type, size, 0, pram);
+ return nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
index 3c6a8710e812..4c07d10bb976 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c
@@ -44,5 +44,5 @@ nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
}
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_STOLEN,
- mib * 1024 * 1024, 0, pram);
+ mib * 1024 * 1024, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
index 747e47c10cc7..71d63d7daa75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv20.c
@@ -29,7 +29,6 @@ nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
struct nvkm_device *device = fb->subdev.device;
u32 pbus1218 = nvkm_rd32(device, 0x001218);
u32 size = (nvkm_rd32(device, 0x10020c) & 0xff000000);
- u32 tags = nvkm_rd32(device, 0x100320);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -40,7 +39,7 @@ nv20_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
case 0x00000300: type = NVKM_RAM_TYPE_GDDR2; break;
}
- ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, tags, pram);
+ ret = nvkm_ram_new_(&nv04_ram_func, fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 70c63535d56b..2b12e388f47a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -187,13 +187,13 @@ nv40_ram_func = {
int
nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type type, u64 size,
- u32 tags, struct nvkm_ram **pram)
+ struct nvkm_ram **pram)
{
struct nv40_ram *ram;
if (!(ram = kzalloc(sizeof(*ram), GFP_KERNEL)))
return -ENOMEM;
*pram = &ram->base;
- return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, tags, &ram->base);
+ return nvkm_ram_ctor(&nv40_ram_func, fb, type, size, &ram->base);
}
int
@@ -202,7 +202,6 @@ nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
struct nvkm_device *device = fb->subdev.device;
u32 pbus1218 = nvkm_rd32(device, 0x001218);
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
- u32 tags = nvkm_rd32(device, 0x100320);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -213,7 +212,7 @@ nv40_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
case 0x00000300: type = NVKM_RAM_TYPE_DDR2 ; break;
}
- ret = nv40_ram_new_(fb, type, size, tags, pram);
+ ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
index 8549fdf2437c..11f6bb2936b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -10,6 +10,6 @@ struct nv40_ram {
u32 coef;
};
-int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, u32,
+int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64,
struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
index 114828be292e..d3fea3726461 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv41.c
@@ -28,7 +28,6 @@ nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
- u32 tags = nvkm_rd32(device, 0x100320);
u32 fb474 = nvkm_rd32(device, 0x100474);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -40,7 +39,7 @@ nv41_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
if (fb474 & 0x00000001)
type = NVKM_RAM_TYPE_DDR1;
- ret = nv40_ram_new_(fb, type, size, tags, pram);
+ ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
index bc56fbf1c788..ab2630e5e6fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv44.c
@@ -38,5 +38,5 @@ nv44_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
if (fb474 & 0x00000001)
type = NVKM_RAM_TYPE_DDR1;
- return nv40_ram_new_(fb, type, size, 0, pram);
+ return nv40_ram_new_(fb, type, size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
index c01f4b1022b8..946ca7c2e0b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv49.c
@@ -28,7 +28,6 @@ nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
- u32 tags = nvkm_rd32(device, 0x100320);
u32 fb914 = nvkm_rd32(device, 0x100914);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -40,7 +39,7 @@ nv49_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
case 0x00000003: break;
}
- ret = nv40_ram_new_(fb, type, size, tags, pram);
+ ret = nv40_ram_new_(fb, type, size, pram);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
index fa3c2e06203d..02b8bdbc819f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv4e.c
@@ -29,5 +29,5 @@ nv4e_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
struct nvkm_device *device = fb->subdev.device;
u32 size = nvkm_rd32(device, 0x10020c) & 0xff000000;
return nvkm_ram_new_(&nv04_ram_func, fb, NVKM_RAM_TYPE_UNKNOWN,
- size, 0, pram);
+ size, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
index 6549b0588309..2ccb4b6be153 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c
@@ -493,100 +493,8 @@ nv50_ram_tidy(struct nvkm_ram *base)
ram_exec(&ram->hwsq, false);
}
-void
-__nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem *mem)
-{
- struct nvkm_mm_node *next = mem->mem;
- struct nvkm_mm_node *node;
- while ((node = next)) {
- next = node->next;
- nvkm_mm_free(&ram->vram, &node);
- }
- nvkm_mm_free(&ram->tags, &mem->tag);
-}
-
-void
-nv50_ram_put(struct nvkm_ram *ram, struct nvkm_mem **pmem)
-{
- struct nvkm_mem *mem = *pmem;
-
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&ram->fb->subdev.mutex);
- __nv50_ram_put(ram, mem);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- kfree(mem);
-}
-
-int
-nv50_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nvkm_mem **pmem)
-{
- struct nvkm_mm *heap = &ram->vram;
- struct nvkm_mm *tags = &ram->tags;
- struct nvkm_mm_node **node, *r;
- struct nvkm_mem *mem;
- int comp = (memtype & 0x300) >> 8;
- int type = (memtype & 0x07f);
- int back = (memtype & 0x800);
- int min, max, ret;
-
- max = (size >> NVKM_RAM_MM_SHIFT);
- min = ncmin ? (ncmin >> NVKM_RAM_MM_SHIFT) : max;
- align >>= NVKM_RAM_MM_SHIFT;
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- mutex_lock(&ram->fb->subdev.mutex);
- if (comp) {
- if (align == (1 << (16 - NVKM_RAM_MM_SHIFT))) {
- int n = (max >> 4) * comp;
-
- ret = nvkm_mm_head(tags, 0, 1, n, n, 1, &mem->tag);
- if (ret)
- mem->tag = NULL;
- }
-
- if (unlikely(!mem->tag))
- comp = 0;
- }
-
- mem->memtype = (comp << 7) | type;
- mem->size = max;
-
- type = nv50_fb_memtype[type];
- node = &mem->mem;
- do {
- if (back)
- ret = nvkm_mm_tail(heap, 0, type, max, min, align, &r);
- else
- ret = nvkm_mm_head(heap, 0, type, max, min, align, &r);
- if (ret) {
- mutex_unlock(&ram->fb->subdev.mutex);
- ram->func->put(ram, &mem);
- return ret;
- }
-
- *node = r;
- node = &r->next;
- max -= r->length;
- } while (max);
- mutex_unlock(&ram->fb->subdev.mutex);
-
- mem->offset = (u64)mem->mem->offset << NVKM_RAM_MM_SHIFT;
- *pmem = mem;
- return 0;
-}
-
static const struct nvkm_ram_func
nv50_ram_func = {
- .get = nv50_ram_get,
- .put = nv50_ram_put,
.calc = nv50_ram_calc,
.prog = nv50_ram_prog,
.tidy = nv50_ram_tidy,
@@ -639,7 +547,6 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
u64 size = nvkm_rd32(device, 0x10020c);
- u32 tags = nvkm_rd32(device, 0x100320);
enum nvkm_ram_type type = NVKM_RAM_TYPE_UNKNOWN;
int ret;
@@ -660,7 +567,7 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
size = (size & 0x000000ff) << 32 | (size & 0xffffff00);
- ret = nvkm_ram_ctor(func, fb, type, size, tags, ram);
+ ret = nvkm_ram_ctor(func, fb, type, size, ram);
if (ret)
return ret;
@@ -669,7 +576,8 @@ nv50_ram_ctor(const struct nvkm_ram_func *func,
ram->ranks = (nvkm_rd32(device, 0x100200) & 0x4) ? 2 : 1;
nvkm_mm_fini(&ram->vram);
- return nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ return nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+ rsvd_head >> NVKM_RAM_MM_SHIFT,
(size - rsvd_head - rsvd_tail) >> NVKM_RAM_MM_SHIFT,
nv50_fb_vram_rblock(ram) >> NVKM_RAM_MM_SHIFT);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 10c987a654ec..364ea4492acc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -23,181 +23,90 @@
*/
#include "priv.h"
-#include <core/memory.h>
#include <subdev/bar.h>
/******************************************************************************
* instmem object base implementation
*****************************************************************************/
-#define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
-
-struct nvkm_instobj {
- struct nvkm_memory memory;
- struct nvkm_memory *parent;
- struct nvkm_instmem *imem;
- struct list_head head;
- u32 *suspend;
- void __iomem *map;
-};
-
-static enum nvkm_memory_target
-nvkm_instobj_target(struct nvkm_memory *memory)
-{
- memory = nvkm_instobj(memory)->parent;
- return nvkm_memory_target(memory);
-}
-
-static u64
-nvkm_instobj_addr(struct nvkm_memory *memory)
-{
- memory = nvkm_instobj(memory)->parent;
- return nvkm_memory_addr(memory);
-}
-
-static u64
-nvkm_instobj_size(struct nvkm_memory *memory)
-{
- memory = nvkm_instobj(memory)->parent;
- return nvkm_memory_size(memory);
-}
-
static void
-nvkm_instobj_release(struct nvkm_memory *memory)
+nvkm_instobj_load(struct nvkm_instobj *iobj)
{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- nvkm_bar_flush(iobj->imem->subdev.device->bar);
-}
-
-static void __iomem *
-nvkm_instobj_acquire(struct nvkm_memory *memory)
-{
- return nvkm_instobj(memory)->map;
-}
-
-static u32
-nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset)
-{
- return ioread32_native(nvkm_instobj(memory)->map + offset);
-}
-
-static void
-nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
-{
- iowrite32_native(data, nvkm_instobj(memory)->map + offset);
-}
+ struct nvkm_memory *memory = &iobj->memory;
+ const u64 size = nvkm_memory_size(memory);
+ void __iomem *map;
+ int i;
-static void
-nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
-{
- memory = nvkm_instobj(memory)->parent;
- nvkm_memory_map(memory, vma, offset);
-}
+ if (!(map = nvkm_kmap(memory))) {
+ for (i = 0; i < size; i += 4)
+ nvkm_wo32(memory, i, iobj->suspend[i / 4]);
+ } else {
+ memcpy_toio(map, iobj->suspend, size);
+ }
+ nvkm_done(memory);
-static void *
-nvkm_instobj_dtor(struct nvkm_memory *memory)
-{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- spin_lock(&iobj->imem->lock);
- list_del(&iobj->head);
- spin_unlock(&iobj->imem->lock);
- nvkm_memory_del(&iobj->parent);
- return iobj;
+ kvfree(iobj->suspend);
+ iobj->suspend = NULL;
}
-static const struct nvkm_memory_func
-nvkm_instobj_func = {
- .dtor = nvkm_instobj_dtor,
- .target = nvkm_instobj_target,
- .addr = nvkm_instobj_addr,
- .size = nvkm_instobj_size,
- .acquire = nvkm_instobj_acquire,
- .release = nvkm_instobj_release,
- .rd32 = nvkm_instobj_rd32,
- .wr32 = nvkm_instobj_wr32,
- .map = nvkm_instobj_map,
-};
-
-static void
-nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+static int
+nvkm_instobj_save(struct nvkm_instobj *iobj)
{
- memory = nvkm_instobj(memory)->parent;
- nvkm_memory_boot(memory, vm);
-}
+ struct nvkm_memory *memory = &iobj->memory;
+ const u64 size = nvkm_memory_size(memory);
+ void __iomem *map;
+ int i;
-static void
-nvkm_instobj_release_slow(struct nvkm_memory *memory)
-{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- nvkm_instobj_release(memory);
- nvkm_done(iobj->parent);
-}
+ iobj->suspend = kvmalloc(size, GFP_KERNEL);
+ if (!iobj->suspend)
+ return -ENOMEM;
-static void __iomem *
-nvkm_instobj_acquire_slow(struct nvkm_memory *memory)
-{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- iobj->map = nvkm_kmap(iobj->parent);
- if (iobj->map)
- memory->func = &nvkm_instobj_func;
- return iobj->map;
+ if (!(map = nvkm_kmap(memory))) {
+ for (i = 0; i < size; i += 4)
+ iobj->suspend[i / 4] = nvkm_ro32(memory, i);
+ } else {
+ memcpy_fromio(iobj->suspend, map, size);
+ }
+ nvkm_done(memory);
+ return 0;
}
-static u32
-nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
+void
+nvkm_instobj_dtor(struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- return nvkm_ro32(iobj->parent, offset);
+ spin_lock(&imem->lock);
+ list_del(&iobj->head);
+ spin_unlock(&imem->lock);
}
-static void
-nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
+void
+nvkm_instobj_ctor(const struct nvkm_memory_func *func,
+ struct nvkm_instmem *imem, struct nvkm_instobj *iobj)
{
- struct nvkm_instobj *iobj = nvkm_instobj(memory);
- return nvkm_wo32(iobj->parent, offset, data);
+ nvkm_memory_ctor(func, &iobj->memory);
+ iobj->suspend = NULL;
+ spin_lock(&imem->lock);
+ list_add_tail(&iobj->head, &imem->list);
+ spin_unlock(&imem->lock);
}
-static const struct nvkm_memory_func
-nvkm_instobj_func_slow = {
- .dtor = nvkm_instobj_dtor,
- .target = nvkm_instobj_target,
- .addr = nvkm_instobj_addr,
- .size = nvkm_instobj_size,
- .boot = nvkm_instobj_boot,
- .acquire = nvkm_instobj_acquire_slow,
- .release = nvkm_instobj_release_slow,
- .rd32 = nvkm_instobj_rd32_slow,
- .wr32 = nvkm_instobj_wr32_slow,
- .map = nvkm_instobj_map,
-};
-
int
nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
struct nvkm_memory **pmemory)
{
+ struct nvkm_subdev *subdev = &imem->subdev;
struct nvkm_memory *memory = NULL;
- struct nvkm_instobj *iobj;
u32 offset;
int ret;
ret = imem->func->memory_new(imem, size, align, zero, &memory);
- if (ret)
+ if (ret) {
+ nvkm_error(subdev, "OOM: %08x %08x %d\n", size, align, ret);
goto done;
-
- if (!imem->func->persistent) {
- if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL))) {
- ret = -ENOMEM;
- goto done;
- }
-
- nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
- iobj->parent = memory;
- iobj->imem = imem;
- spin_lock(&iobj->imem->lock);
- list_add_tail(&iobj->head, &imem->list);
- spin_unlock(&iobj->imem->lock);
- memory = &iobj->memory;
}
+ nvkm_trace(subdev, "new %08x %08x %d: %010llx %010llx\n", size, align,
+ zero, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+
if (!imem->func->zero && zero) {
void __iomem *map = nvkm_kmap(memory);
if (unlikely(!map)) {
@@ -211,7 +120,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
done:
if (ret)
- nvkm_memory_del(&memory);
+ nvkm_memory_unref(&memory);
*pmemory = memory;
return ret;
}
@@ -232,39 +141,46 @@ nvkm_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
return imem->func->wr32(imem, addr, data);
}
+void
+nvkm_instmem_boot(struct nvkm_instmem *imem)
+{
+ /* Separate bootstrapped objects from normal list, as we need
+ * to make sure they're accessed with the slowpath on suspend
+ * and resume.
+ */
+ struct nvkm_instobj *iobj, *itmp;
+ spin_lock(&imem->lock);
+ list_for_each_entry_safe(iobj, itmp, &imem->list, head) {
+ list_move_tail(&iobj->head, &imem->boot);
+ }
+ spin_unlock(&imem->lock);
+}
+
static int
nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
- int i;
-
- if (imem->func->fini)
- imem->func->fini(imem);
if (suspend) {
list_for_each_entry(iobj, &imem->list, head) {
- struct nvkm_memory *memory = iobj->parent;
- u64 size = nvkm_memory_size(memory);
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
- iobj->suspend = vmalloc(size);
- if (!iobj->suspend)
- return -ENOMEM;
+ nvkm_bar_bar2_fini(subdev->device);
- for (i = 0; i < size; i += 4)
- iobj->suspend[i / 4] = nvkm_ro32(memory, i);
+ list_for_each_entry(iobj, &imem->boot, head) {
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
}
}
- return 0;
-}
+ if (imem->func->fini)
+ imem->func->fini(imem);
-static int
-nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
-{
- struct nvkm_instmem *imem = nvkm_instmem(subdev);
- if (imem->func->oneinit)
- return imem->func->oneinit(imem);
return 0;
}
@@ -273,22 +189,31 @@ nvkm_instmem_init(struct nvkm_subdev *subdev)
{
struct nvkm_instmem *imem = nvkm_instmem(subdev);
struct nvkm_instobj *iobj;
- int i;
+
+ list_for_each_entry(iobj, &imem->boot, head) {
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
+ }
+
+ nvkm_bar_bar2_init(subdev->device);
list_for_each_entry(iobj, &imem->list, head) {
- if (iobj->suspend) {
- struct nvkm_memory *memory = iobj->parent;
- u64 size = nvkm_memory_size(memory);
- for (i = 0; i < size; i += 4)
- nvkm_wo32(memory, i, iobj->suspend[i / 4]);
- vfree(iobj->suspend);
- iobj->suspend = NULL;
- }
+ if (iobj->suspend)
+ nvkm_instobj_load(iobj);
}
return 0;
}
+static int
+nvkm_instmem_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_instmem *imem = nvkm_instmem(subdev);
+ if (imem->func->oneinit)
+ return imem->func->oneinit(imem);
+ return 0;
+}
+
static void *
nvkm_instmem_dtor(struct nvkm_subdev *subdev)
{
@@ -315,4 +240,5 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
+ INIT_LIST_HEAD(&imem->boot);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index cd5adbec5e57..985f2990ab0d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -44,14 +44,13 @@
#include "priv.h"
#include <core/memory.h>
-#include <core/mm.h>
#include <core/tegra.h>
-#include <subdev/fb.h>
#include <subdev/ltc.h>
+#include <subdev/mmu.h>
struct gk20a_instobj {
struct nvkm_memory memory;
- struct nvkm_mem mem;
+ struct nvkm_mm_node *mn;
struct gk20a_instmem *imem;
/* CPU mapping */
@@ -119,16 +118,22 @@ gk20a_instobj_target(struct nvkm_memory *memory)
return NVKM_MEM_TARGET_NCOH;
}
+static u8
+gk20a_instobj_page(struct nvkm_memory *memory)
+{
+ return 12;
+}
+
static u64
gk20a_instobj_addr(struct nvkm_memory *memory)
{
- return gk20a_instobj(memory)->mem.offset;
+ return (u64)gk20a_instobj(memory)->mn->offset << 12;
}
static u64
gk20a_instobj_size(struct nvkm_memory *memory)
{
- return (u64)gk20a_instobj(memory)->mem.size << 12;
+ return (u64)gk20a_instobj(memory)->mn->length << 12;
}
/*
@@ -272,12 +277,18 @@ gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
node->vaddr[offset / 4] = data;
}
-static void
-gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static int
+gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &node->memory,
+ .offset = offset,
+ .mem = node->mn,
+ };
- nvkm_vm_map_at(vma, offset, &node->mem);
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
}
static void *
@@ -290,8 +301,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
if (unlikely(!node->base.vaddr))
goto out;
- dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
- node->handle, imem->attrs);
+ dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
+ node->base.vaddr, node->handle, imem->attrs);
out:
return node;
@@ -303,7 +314,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
- struct nvkm_mm_node *r = node->base.mem.mem;
+ struct nvkm_mm_node *r = node->base.mn;
int i;
if (unlikely(!r))
@@ -321,7 +332,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
/* Unmap pages from GPU address space and free them */
- for (i = 0; i < node->base.mem.size; i++) {
+ for (i = 0; i < node->base.mn->length; i++) {
iommu_unmap(imem->domain,
(r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
@@ -342,12 +353,11 @@ static const struct nvkm_memory_func
gk20a_instobj_func_dma = {
.dtor = gk20a_instobj_dtor_dma,
.target = gk20a_instobj_target,
+ .page = gk20a_instobj_page,
.addr = gk20a_instobj_addr,
.size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_dma,
.release = gk20a_instobj_release_dma,
- .rd32 = gk20a_instobj_rd32,
- .wr32 = gk20a_instobj_wr32,
.map = gk20a_instobj_map,
};
@@ -355,13 +365,18 @@ static const struct nvkm_memory_func
gk20a_instobj_func_iommu = {
.dtor = gk20a_instobj_dtor_iommu,
.target = gk20a_instobj_target,
+ .page = gk20a_instobj_page,
.addr = gk20a_instobj_addr,
.size = gk20a_instobj_size,
.acquire = gk20a_instobj_acquire_iommu,
.release = gk20a_instobj_release_iommu,
+ .map = gk20a_instobj_map,
+};
+
+static const struct nvkm_memory_ptrs
+gk20a_instobj_ptrs = {
.rd32 = gk20a_instobj_rd32,
.wr32 = gk20a_instobj_wr32,
- .map = gk20a_instobj_map,
};
static int
@@ -377,6 +392,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
*_node = &node->base;
nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
+ node->base.memory.ptrs = &gk20a_instobj_ptrs;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
@@ -397,8 +413,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->r.offset = node->handle >> 12;
node->r.length = (npages << PAGE_SHIFT) >> 12;
- node->base.mem.offset = node->handle;
- node->base.mem.mem = &node->r;
+ node->base.mn = &node->r;
return 0;
}
@@ -424,6 +439,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
node->dma_addrs = (void *)(node->pages + npages);
nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
+ node->base.memory.ptrs = &gk20a_instobj_ptrs;
/* Allocate backing memory */
for (i = 0; i < npages; i++) {
@@ -474,8 +490,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
/* IOMMU bit tells that an address is to be resolved through the IOMMU */
r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
- node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
- node->base.mem.mem = r;
+ node->base.mn = r;
return 0;
release_area:
@@ -523,13 +538,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
node->imem = imem;
- /* present memory for being mapped using small pages */
- node->mem.size = size >> 12;
- node->mem.memtype = 0;
- node->mem.page_shift = 12;
-
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
- size, align, node->mem.offset);
+ size, align, (u64)node->mn->offset << 12);
return 0;
}
@@ -554,7 +564,6 @@ static const struct nvkm_instmem_func
gk20a_instmem = {
.dtor = gk20a_instmem_dtor,
.memory_new = gk20a_instobj_new,
- .persistent = true,
.zero = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 6133c8bb2d42..6bf0dad46919 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -24,7 +24,6 @@
#define nv04_instmem(p) container_of((p), struct nv04_instmem, base)
#include "priv.h"
-#include <core/memory.h>
#include <core/ramht.h>
struct nv04_instmem {
@@ -35,30 +34,39 @@ struct nv04_instmem {
/******************************************************************************
* instmem object implementation
*****************************************************************************/
-#define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
+#define nv04_instobj(p) container_of((p), struct nv04_instobj, base.memory)
struct nv04_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nv04_instmem *imem;
struct nvkm_mm_node *node;
};
-static enum nvkm_memory_target
-nv04_instobj_target(struct nvkm_memory *memory)
+static void
+nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return NVKM_MEM_TARGET_INST;
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ struct nvkm_device *device = iobj->imem->base.subdev.device;
+ nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
}
-static u64
-nv04_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- return nv04_instobj(memory)->node->offset;
+ struct nv04_instobj *iobj = nv04_instobj(memory);
+ struct nvkm_device *device = iobj->imem->base.subdev.device;
+ return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
}
-static u64
-nv04_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv04_instobj_ptrs = {
+ .rd32 = nv04_instobj_rd32,
+ .wr32 = nv04_instobj_wr32,
+};
+
+static void
+nv04_instobj_release(struct nvkm_memory *memory)
{
- return nv04_instobj(memory)->node->length;
}
static void __iomem *
@@ -69,25 +77,22 @@ nv04_instobj_acquire(struct nvkm_memory *memory)
return device->pri + 0x700000 + iobj->node->offset;
}
-static void
-nv04_instobj_release(struct nvkm_memory *memory)
+static u64
+nv04_instobj_size(struct nvkm_memory *memory)
{
+ return nv04_instobj(memory)->node->length;
}
-static u32
-nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+static u64
+nv04_instobj_addr(struct nvkm_memory *memory)
{
- struct nv04_instobj *iobj = nv04_instobj(memory);
- struct nvkm_device *device = iobj->imem->base.subdev.device;
- return nvkm_rd32(device, 0x700000 + iobj->node->offset + offset);
+ return nv04_instobj(memory)->node->offset;
}
-static void
-nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+static enum nvkm_memory_target
+nv04_instobj_target(struct nvkm_memory *memory)
{
- struct nv04_instobj *iobj = nv04_instobj(memory);
- struct nvkm_device *device = iobj->imem->base.subdev.device;
- nvkm_wr32(device, 0x700000 + iobj->node->offset + offset, data);
+ return NVKM_MEM_TARGET_INST;
}
static void *
@@ -97,6 +102,7 @@ nv04_instobj_dtor(struct nvkm_memory *memory)
mutex_lock(&iobj->imem->base.subdev.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.subdev.mutex);
+ nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -108,8 +114,6 @@ nv04_instobj_func = {
.addr = nv04_instobj_addr,
.acquire = nv04_instobj_acquire,
.release = nv04_instobj_release,
- .rd32 = nv04_instobj_rd32,
- .wr32 = nv04_instobj_wr32,
};
static int
@@ -122,9 +126,10 @@ nv04_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
- *pmemory = &iobj->memory;
+ *pmemory = &iobj->base.memory;
- nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory);
+ nvkm_instobj_ctor(&nv04_instobj_func, &imem->base, &iobj->base);
+ iobj->base.memory.ptrs = &nv04_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex);
@@ -160,7 +165,7 @@ nv04_instmem_oneinit(struct nvkm_instmem *base)
/* PRAMIN aperture maps over the end of VRAM, reserve it */
imem->base.reserved = 512 * 1024;
- ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
+ ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret)
return ret;
@@ -194,10 +199,10 @@ static void *
nv04_instmem_dtor(struct nvkm_instmem *base)
{
struct nv04_instmem *imem = nv04_instmem(base);
- nvkm_memory_del(&imem->base.ramfc);
- nvkm_memory_del(&imem->base.ramro);
+ nvkm_memory_unref(&imem->base.ramfc);
+ nvkm_memory_unref(&imem->base.ramro);
nvkm_ramht_del(&imem->base.ramht);
- nvkm_memory_del(&imem->base.vbios);
+ nvkm_memory_unref(&imem->base.vbios);
nvkm_mm_fini(&imem->heap);
return imem;
}
@@ -209,7 +214,6 @@ nv04_instmem = {
.rd32 = nv04_instmem_rd32,
.wr32 = nv04_instmem_wr32,
.memory_new = nv04_instobj_new,
- .persistent = false,
.zero = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
index c0543875e490..086c118488ef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv40.c
@@ -24,7 +24,6 @@
#define nv40_instmem(p) container_of((p), struct nv40_instmem, base)
#include "priv.h"
-#include <core/memory.h>
#include <core/ramht.h>
#include <engine/gr/nv40.h>
@@ -37,30 +36,38 @@ struct nv40_instmem {
/******************************************************************************
* instmem object implementation
*****************************************************************************/
-#define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
+#define nv40_instobj(p) container_of((p), struct nv40_instobj, base.memory)
struct nv40_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nv40_instmem *imem;
struct nvkm_mm_node *node;
};
-static enum nvkm_memory_target
-nv40_instobj_target(struct nvkm_memory *memory)
+static void
+nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return NVKM_MEM_TARGET_INST;
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
}
-static u64
-nv40_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
{
- return nv40_instobj(memory)->node->offset;
+ struct nv40_instobj *iobj = nv40_instobj(memory);
+ return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
}
-static u64
-nv40_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv40_instobj_ptrs = {
+ .rd32 = nv40_instobj_rd32,
+ .wr32 = nv40_instobj_wr32,
+};
+
+static void
+nv40_instobj_release(struct nvkm_memory *memory)
{
- return nv40_instobj(memory)->node->length;
+ wmb();
}
static void __iomem *
@@ -70,23 +77,22 @@ nv40_instobj_acquire(struct nvkm_memory *memory)
return iobj->imem->iomem + iobj->node->offset;
}
-static void
-nv40_instobj_release(struct nvkm_memory *memory)
+static u64
+nv40_instobj_size(struct nvkm_memory *memory)
{
+ return nv40_instobj(memory)->node->length;
}
-static u32
-nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+static u64
+nv40_instobj_addr(struct nvkm_memory *memory)
{
- struct nv40_instobj *iobj = nv40_instobj(memory);
- return ioread32_native(iobj->imem->iomem + iobj->node->offset + offset);
+ return nv40_instobj(memory)->node->offset;
}
-static void
-nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+static enum nvkm_memory_target
+nv40_instobj_target(struct nvkm_memory *memory)
{
- struct nv40_instobj *iobj = nv40_instobj(memory);
- iowrite32_native(data, iobj->imem->iomem + iobj->node->offset + offset);
+ return NVKM_MEM_TARGET_INST;
}
static void *
@@ -96,6 +102,7 @@ nv40_instobj_dtor(struct nvkm_memory *memory)
mutex_lock(&iobj->imem->base.subdev.mutex);
nvkm_mm_free(&iobj->imem->heap, &iobj->node);
mutex_unlock(&iobj->imem->base.subdev.mutex);
+ nvkm_instobj_dtor(&iobj->imem->base, &iobj->base);
return iobj;
}
@@ -107,8 +114,6 @@ nv40_instobj_func = {
.addr = nv40_instobj_addr,
.acquire = nv40_instobj_acquire,
.release = nv40_instobj_release,
- .rd32 = nv40_instobj_rd32,
- .wr32 = nv40_instobj_wr32,
};
static int
@@ -121,9 +126,10 @@ nv40_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
- *pmemory = &iobj->memory;
+ *pmemory = &iobj->base.memory;
- nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory);
+ nvkm_instobj_ctor(&nv40_instobj_func, &imem->base, &iobj->base);
+ iobj->base.memory.ptrs = &nv40_instobj_ptrs;
iobj->imem = imem;
mutex_lock(&imem->base.subdev.mutex);
@@ -171,7 +177,7 @@ nv40_instmem_oneinit(struct nvkm_instmem *base)
imem->base.reserved += 512 * 1024; /* object storage */
imem->base.reserved = round_up(imem->base.reserved, 4096);
- ret = nvkm_mm_init(&imem->heap, 0, imem->base.reserved, 1);
+ ret = nvkm_mm_init(&imem->heap, 0, 0, imem->base.reserved, 1);
if (ret)
return ret;
@@ -209,10 +215,10 @@ static void *
nv40_instmem_dtor(struct nvkm_instmem *base)
{
struct nv40_instmem *imem = nv40_instmem(base);
- nvkm_memory_del(&imem->base.ramfc);
- nvkm_memory_del(&imem->base.ramro);
+ nvkm_memory_unref(&imem->base.ramfc);
+ nvkm_memory_unref(&imem->base.ramro);
nvkm_ramht_del(&imem->base.ramht);
- nvkm_memory_del(&imem->base.vbios);
+ nvkm_memory_unref(&imem->base.vbios);
nvkm_mm_fini(&imem->heap);
if (imem->iomem)
iounmap(imem->iomem);
@@ -226,7 +232,6 @@ nv40_instmem = {
.rd32 = nv40_instmem_rd32,
.wr32 = nv40_instmem_wr32,
.memory_new = nv40_instobj_new,
- .persistent = false,
.zero = false,
};
@@ -248,8 +253,8 @@ nv40_instmem_new(struct nvkm_device *device, int index,
else
bar = 3;
- imem->iomem = ioremap(device->func->resource_addr(device, bar),
- device->func->resource_size(device, bar));
+ imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
+ device->func->resource_size(device, bar));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 6d512c062ae3..1ba7289684aa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -31,147 +31,293 @@
struct nv50_instmem {
struct nvkm_instmem base;
- unsigned long lock_flags;
- spinlock_t lock;
u64 addr;
+
+ /* Mappings that can be evicted when BAR2 space has been exhausted. */
+ struct list_head lru;
};
/******************************************************************************
* instmem object implementation
*****************************************************************************/
-#define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
+#define nv50_instobj(p) container_of((p), struct nv50_instobj, base.memory)
struct nv50_instobj {
- struct nvkm_memory memory;
+ struct nvkm_instobj base;
struct nv50_instmem *imem;
- struct nvkm_mem *mem;
- struct nvkm_vma bar;
+ struct nvkm_memory *ram;
+ struct nvkm_vma *bar;
+ refcount_t maps;
void *map;
+ struct list_head lru;
};
-static enum nvkm_memory_target
-nv50_instobj_target(struct nvkm_memory *memory)
+static void
+nv50_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return NVKM_MEM_TARGET_VRAM;
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
+ u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imem->base.lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
+ }
+ nvkm_wr32(device, 0x700000 + addr, data);
+ spin_unlock_irqrestore(&imem->base.lock, flags);
}
-static u64
-nv50_instobj_addr(struct nvkm_memory *memory)
+static u32
+nv50_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset)
{
- return nv50_instobj(memory)->mem->offset;
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u64 base = (nvkm_memory_addr(iobj->ram) + offset) & 0xffffff00000ULL;
+ u64 addr = (nvkm_memory_addr(iobj->ram) + offset) & 0x000000fffffULL;
+ u32 data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imem->base.lock, flags);
+ if (unlikely(imem->addr != base)) {
+ nvkm_wr32(device, 0x001700, base >> 16);
+ imem->addr = base;
+ }
+ data = nvkm_rd32(device, 0x700000 + addr);
+ spin_unlock_irqrestore(&imem->base.lock, flags);
+ return data;
}
-static u64
-nv50_instobj_size(struct nvkm_memory *memory)
+static const struct nvkm_memory_ptrs
+nv50_instobj_slow = {
+ .rd32 = nv50_instobj_rd32_slow,
+ .wr32 = nv50_instobj_wr32_slow,
+};
+
+static void
+nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
{
- return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT;
+ iowrite32_native(data, nv50_instobj(memory)->map + offset);
}
+static u32
+nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
+{
+ return ioread32_native(nv50_instobj(memory)->map + offset);
+}
+
+static const struct nvkm_memory_ptrs
+nv50_instobj_fast = {
+ .rd32 = nv50_instobj_rd32,
+ .wr32 = nv50_instobj_wr32,
+};
+
static void
-nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
+nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
{
- struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nvkm_subdev *subdev = &iobj->imem->base.subdev;
+ struct nv50_instmem *imem = iobj->imem;
+ struct nv50_instobj *eobj;
+ struct nvkm_memory *memory = &iobj->base.memory;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_vma *bar = NULL, *ebar;
u64 size = nvkm_memory_size(memory);
- void __iomem *map;
+ void *emap;
int ret;
- iobj->map = ERR_PTR(-ENOMEM);
-
- ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
- if (ret == 0) {
- map = ioremap(device->func->resource_addr(device, 3) +
- (u32)iobj->bar.offset, size);
- if (map) {
- nvkm_memory_map(memory, &iobj->bar, 0);
- iobj->map = map;
- } else {
- nvkm_warn(subdev, "PRAMIN ioremap failed\n");
- nvkm_vm_put(&iobj->bar);
+ /* Attempt to allocate BAR2 address-space and map the object
+ * into it. The lock has to be dropped while doing this due
+ * to the possibility of recursion for page table allocation.
+ */
+ mutex_unlock(&subdev->mutex);
+ while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) {
+ /* Evict unused mappings, and keep retrying until we either
+ * succeed,or there's no more objects left on the LRU.
+ */
+ mutex_lock(&subdev->mutex);
+ eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru);
+ if (eobj) {
+ nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
+ nvkm_memory_addr(&eobj->base.memory),
+ nvkm_memory_size(&eobj->base.memory),
+ eobj->bar->addr);
+ list_del_init(&eobj->lru);
+ ebar = eobj->bar;
+ eobj->bar = NULL;
+ emap = eobj->map;
+ eobj->map = NULL;
}
- } else {
- nvkm_warn(subdev, "PRAMIN exhausted\n");
+ mutex_unlock(&subdev->mutex);
+ if (!eobj)
+ break;
+ iounmap(emap);
+ nvkm_vmm_put(vmm, &ebar);
}
+
+ if (ret == 0)
+ ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
+ mutex_lock(&subdev->mutex);
+ if (ret || iobj->bar) {
+ /* We either failed, or another thread beat us. */
+ mutex_unlock(&subdev->mutex);
+ nvkm_vmm_put(vmm, &bar);
+ mutex_lock(&subdev->mutex);
+ return;
+ }
+
+ /* Make the mapping visible to the host. */
+ iobj->bar = bar;
+ iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
+ (u32)iobj->bar->addr, size);
+ if (!iobj->map) {
+ nvkm_warn(subdev, "PRAMIN ioremap failed\n");
+ nvkm_vmm_put(vmm, &iobj->bar);
+ }
+}
+
+static int
+nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ memory = nv50_instobj(memory)->ram;
+ return nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
}
static void
nv50_instobj_release(struct nvkm_memory *memory)
{
- struct nv50_instmem *imem = nv50_instobj(memory)->imem;
- spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ struct nv50_instmem *imem = iobj->imem;
+ struct nvkm_subdev *subdev = &imem->base.subdev;
+
+ wmb();
+ nvkm_bar_flush(subdev->device->bar);
+
+ if (refcount_dec_and_mutex_lock(&iobj->maps, &subdev->mutex)) {
+ /* Add the now-unused mapping to the LRU instead of directly
+ * unmapping it here, in case we need to map it again later.
+ */
+ if (likely(iobj->lru.next) && iobj->map) {
+ BUG_ON(!list_empty(&iobj->lru));
+ list_add_tail(&iobj->lru, &imem->lru);
+ }
+
+ /* Switch back to NULL accessors when last map is gone. */
+ iobj->base.memory.ptrs = NULL;
+ mutex_unlock(&subdev->mutex);
+ }
}
static void __iomem *
nv50_instobj_acquire(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nv50_instmem *imem = iobj->imem;
- struct nvkm_bar *bar = imem->base.subdev.device->bar;
- struct nvkm_vm *vm;
- unsigned long flags;
+ struct nvkm_instmem *imem = &iobj->imem->base;
+ struct nvkm_vmm *vmm;
+ void __iomem *map = NULL;
- if (!iobj->map && (vm = nvkm_bar_kmap(bar)))
- nvkm_memory_boot(memory, vm);
- if (!IS_ERR_OR_NULL(iobj->map))
+ /* Already mapped? */
+ if (refcount_inc_not_zero(&iobj->maps))
return iobj->map;
- spin_lock_irqsave(&imem->lock, flags);
- imem->lock_flags = flags;
- return NULL;
-}
+ /* Take the lock, and re-check that another thread hasn't
+ * already mapped the object in the meantime.
+ */
+ mutex_lock(&imem->subdev.mutex);
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ mutex_unlock(&imem->subdev.mutex);
+ return iobj->map;
+ }
-static u32
-nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset)
-{
- struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nv50_instmem *imem = iobj->imem;
- struct nvkm_device *device = imem->base.subdev.device;
- u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
- u32 data;
+ /* Attempt to get a direct CPU mapping of the object. */
+ if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) {
+ if (!iobj->map)
+ nv50_instobj_kmap(iobj, vmm);
+ map = iobj->map;
+ }
- if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
- imem->addr = base;
+ if (!refcount_inc_not_zero(&iobj->maps)) {
+ /* Exclude object from eviction while it's being accessed. */
+ if (likely(iobj->lru.next))
+ list_del_init(&iobj->lru);
+
+ if (map)
+ iobj->base.memory.ptrs = &nv50_instobj_fast;
+ else
+ iobj->base.memory.ptrs = &nv50_instobj_slow;
+ refcount_inc(&iobj->maps);
}
- data = nvkm_rd32(device, 0x700000 + addr);
- return data;
+
+ mutex_unlock(&imem->subdev.mutex);
+ return map;
}
static void
-nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
+nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vmm *vmm)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nv50_instmem *imem = iobj->imem;
- struct nvkm_device *device = imem->base.subdev.device;
- u64 base = (iobj->mem->offset + offset) & 0xffffff00000ULL;
- u64 addr = (iobj->mem->offset + offset) & 0x000000fffffULL;
-
- if (unlikely(imem->addr != base)) {
- nvkm_wr32(device, 0x001700, base >> 16);
- imem->addr = base;
+ struct nvkm_instmem *imem = &iobj->imem->base;
+
+ /* Exclude bootstrapped objects (ie. the page tables for the
+ * instmem BAR itself) from eviction.
+ */
+ mutex_lock(&imem->subdev.mutex);
+ if (likely(iobj->lru.next)) {
+ list_del_init(&iobj->lru);
+ iobj->lru.next = NULL;
}
- nvkm_wr32(device, 0x700000 + addr, data);
+
+ nv50_instobj_kmap(iobj, vmm);
+ nvkm_instmem_boot(imem);
+ mutex_unlock(&imem->subdev.mutex);
}
-static void
-nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static u64
+nv50_instobj_size(struct nvkm_memory *memory)
{
- struct nv50_instobj *iobj = nv50_instobj(memory);
- nvkm_vm_map_at(vma, offset, iobj->mem);
+ return nvkm_memory_size(nv50_instobj(memory)->ram);
+}
+
+static u64
+nv50_instobj_addr(struct nvkm_memory *memory)
+{
+ return nvkm_memory_addr(nv50_instobj(memory)->ram);
+}
+
+static enum nvkm_memory_target
+nv50_instobj_target(struct nvkm_memory *memory)
+{
+ return nvkm_memory_target(nv50_instobj(memory)->ram);
}
static void *
nv50_instobj_dtor(struct nvkm_memory *memory)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
- struct nvkm_ram *ram = iobj->imem->base.subdev.device->fb->ram;
- if (!IS_ERR_OR_NULL(iobj->map)) {
- nvkm_vm_put(&iobj->bar);
- iounmap(iobj->map);
+ struct nvkm_instmem *imem = &iobj->imem->base;
+ struct nvkm_vma *bar;
+ void *map = map;
+
+ mutex_lock(&imem->subdev.mutex);
+ if (likely(iobj->lru.next))
+ list_del(&iobj->lru);
+ map = iobj->map;
+ bar = iobj->bar;
+ mutex_unlock(&imem->subdev.mutex);
+
+ if (map) {
+ struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
+ iounmap(map);
+ if (likely(vmm)) /* Can be NULL during BAR destructor. */
+ nvkm_vmm_put(vmm, &bar);
}
- ram->func->put(ram, &iobj->mem);
+
+ nvkm_memory_unref(&iobj->ram);
+ nvkm_instobj_dtor(imem, &iobj->base);
return iobj;
}
@@ -184,8 +330,6 @@ nv50_instobj_func = {
.boot = nv50_instobj_boot,
.acquire = nv50_instobj_acquire,
.release = nv50_instobj_release,
- .rd32 = nv50_instobj_rd32,
- .wr32 = nv50_instobj_wr32,
.map = nv50_instobj_map,
};
@@ -195,25 +339,19 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
{
struct nv50_instmem *imem = nv50_instmem(base);
struct nv50_instobj *iobj;
- struct nvkm_ram *ram = imem->base.subdev.device->fb->ram;
- int ret;
+ struct nvkm_device *device = imem->base.subdev.device;
+ u8 page = max(order_base_2(align), 12);
if (!(iobj = kzalloc(sizeof(*iobj), GFP_KERNEL)))
return -ENOMEM;
- *pmemory = &iobj->memory;
+ *pmemory = &iobj->base.memory;
- nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory);
+ nvkm_instobj_ctor(&nv50_instobj_func, &imem->base, &iobj->base);
iobj->imem = imem;
+ refcount_set(&iobj->maps, 0);
+ INIT_LIST_HEAD(&iobj->lru);
- size = max((size + 4095) & ~4095, (u32)4096);
- align = max((align + 4095) & ~4095, (u32)4096);
-
- ret = ram->func->get(ram, size, align, 0, 0x800, &iobj->mem);
- if (ret)
- return ret;
-
- iobj->mem->page_shift = 12;
- return 0;
+ return nvkm_ram_get(device, 0, 1, page, size, true, true, &iobj->ram);
}
/******************************************************************************
@@ -230,7 +368,6 @@ static const struct nvkm_instmem_func
nv50_instmem = {
.fini = nv50_instmem_fini,
.memory_new = nv50_instobj_new,
- .persistent = false,
.zero = false,
};
@@ -243,7 +380,7 @@ nv50_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&nv50_instmem, device, index, &imem->base);
- spin_lock_init(&imem->lock);
+ INIT_LIST_HEAD(&imem->lru);
*pimem = &imem->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 021e7a1f39a1..b9e4751b9921 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -12,10 +12,22 @@ struct nvkm_instmem_func {
void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
int (*memory_new)(struct nvkm_instmem *, u32 size, u32 align,
bool zero, struct nvkm_memory **);
- bool persistent;
bool zero;
};
void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
int index, struct nvkm_instmem *);
+void nvkm_instmem_boot(struct nvkm_instmem *);
+
+#include <core/memory.h>
+
+struct nvkm_instobj {
+ struct nvkm_memory memory;
+ struct list_head head;
+ u32 *suspend;
+};
+
+void nvkm_instobj_ctor(const struct nvkm_memory_func *func,
+ struct nvkm_instmem *, struct nvkm_instobj *);
+void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 0c7ef250dcaf..1f185274d3e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -23,26 +23,12 @@
*/
#include "priv.h"
-#include <subdev/fb.h>
-
-int
-nvkm_ltc_tags_alloc(struct nvkm_ltc *ltc, u32 n, struct nvkm_mm_node **pnode)
-{
- int ret = nvkm_mm_head(&ltc->tags, 0, 1, n, n, 1, pnode);
- if (ret)
- *pnode = NULL;
- return ret;
-}
-
-void
-nvkm_ltc_tags_free(struct nvkm_ltc *ltc, struct nvkm_mm_node **pnode)
-{
- nvkm_mm_free(&ltc->tags, pnode);
-}
+#include <core/memory.h>
void
-nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
+nvkm_ltc_tags_clear(struct nvkm_device *device, u32 first, u32 count)
{
+ struct nvkm_ltc *ltc = device->ltc;
const u32 limit = first + count - 1;
BUG_ON((first > limit) || (limit >= ltc->num_tags));
@@ -116,10 +102,7 @@ static void *
nvkm_ltc_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_ltc *ltc = nvkm_ltc(subdev);
- struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
- nvkm_mm_fini(&ltc->tags);
- if (ram)
- nvkm_mm_free(&ram->vram, &ltc->tag_ram);
+ nvkm_memory_unref(&ltc->tag_ram);
return ltc;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index 4a0fa0a9b802..a21ef45b8572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -23,6 +23,7 @@
*/
#include "priv.h"
+#include <core/memory.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
@@ -152,7 +153,10 @@ gf100_ltc_flush(struct nvkm_ltc *ltc)
int
gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
{
- struct nvkm_ram *ram = ltc->subdev.device->fb->ram;
+ struct nvkm_device *device = ltc->subdev.device;
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_ram *ram = fb->ram;
+ u32 bits = (nvkm_rd32(device, 0x100c80) & 0x00001000) ? 16 : 17;
u32 tag_size, tag_margin, tag_align;
int ret;
@@ -164,8 +168,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
ltc->num_tags = (ram->size >> 17) / 4;
- if (ltc->num_tags > (1 << 17))
- ltc->num_tags = 1 << 17; /* we have 17 bits in PTE */
+ if (ltc->num_tags > (1 << bits))
+ ltc->num_tags = 1 << bits; /* we have 16/17 bits in PTE */
ltc->num_tags = (ltc->num_tags + 63) & ~63; /* round up to 64 */
tag_align = ltc->ltc_nr * 0x800;
@@ -181,14 +185,13 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
*/
tag_size = (ltc->num_tags / 64) * 0x6000 + tag_margin;
tag_size += tag_align;
- tag_size = (tag_size + 0xfff) >> 12; /* round up */
- ret = nvkm_mm_tail(&ram->vram, 1, 1, tag_size, tag_size, 1,
- &ltc->tag_ram);
+ ret = nvkm_ram_get(device, NVKM_RAM_MM_NORMAL, 0x01, 12, tag_size,
+ true, true, &ltc->tag_ram);
if (ret) {
ltc->num_tags = 0;
} else {
- u64 tag_base = ((u64)ltc->tag_ram->offset << 12) + tag_margin;
+ u64 tag_base = nvkm_memory_addr(ltc->tag_ram) + tag_margin;
tag_base += tag_align - 1;
do_div(tag_base, tag_align);
@@ -197,7 +200,8 @@ gf100_ltc_oneinit_tag_ram(struct nvkm_ltc *ltc)
}
mm_init:
- return nvkm_mm_init(&ltc->tags, 0, ltc->num_tags, 1);
+ nvkm_mm_fini(&fb->tags);
+ return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
index 0bdfb2f40266..e34d42108019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -45,7 +45,7 @@ gp100_ltc_oneinit(struct nvkm_ltc *ltc)
ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
/*XXX: tagram allocation - TBD */
- return nvkm_mm_init(&ltc->tags, 0, 0, 1);
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 012c9db687b2..352a65f9371c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -3,4 +3,33 @@ nvkm-y += nvkm/subdev/mmu/nv04.o
nvkm-y += nvkm/subdev/mmu/nv41.o
nvkm-y += nvkm/subdev/mmu/nv44.o
nvkm-y += nvkm/subdev/mmu/nv50.o
+nvkm-y += nvkm/subdev/mmu/g84.o
nvkm-y += nvkm/subdev/mmu/gf100.o
+nvkm-y += nvkm/subdev/mmu/gk104.o
+nvkm-y += nvkm/subdev/mmu/gk20a.o
+nvkm-y += nvkm/subdev/mmu/gm200.o
+nvkm-y += nvkm/subdev/mmu/gm20b.o
+nvkm-y += nvkm/subdev/mmu/gp100.o
+nvkm-y += nvkm/subdev/mmu/gp10b.o
+
+nvkm-y += nvkm/subdev/mmu/mem.o
+nvkm-y += nvkm/subdev/mmu/memnv04.o
+nvkm-y += nvkm/subdev/mmu/memnv50.o
+nvkm-y += nvkm/subdev/mmu/memgf100.o
+
+nvkm-y += nvkm/subdev/mmu/vmm.o
+nvkm-y += nvkm/subdev/mmu/vmmnv04.o
+nvkm-y += nvkm/subdev/mmu/vmmnv41.o
+nvkm-y += nvkm/subdev/mmu/vmmnv44.o
+nvkm-y += nvkm/subdev/mmu/vmmnv50.o
+nvkm-y += nvkm/subdev/mmu/vmmgf100.o
+nvkm-y += nvkm/subdev/mmu/vmmgk104.o
+nvkm-y += nvkm/subdev/mmu/vmmgk20a.o
+nvkm-y += nvkm/subdev/mmu/vmmgm200.o
+nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
+nvkm-y += nvkm/subdev/mmu/vmmgp100.o
+nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
+
+nvkm-y += nvkm/subdev/mmu/umem.o
+nvkm-y += nvkm/subdev/mmu/ummu.o
+nvkm-y += nvkm/subdev/mmu/uvmm.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index 455da298227f..ee11ccaf0563 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -21,480 +21,367 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "ummu.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
+#include <subdev/bar.h>
#include <subdev/fb.h>
-void
-nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_mm_node *r = node->mem;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- delta = 0;
- while (r) {
- u64 phys = (u64)r->offset << 12;
- u32 num = r->length >> bits;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->map(vma, pgt, node, pte, len, phys, delta);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- phys += len << (bits + 12);
- pde++;
- pte = 0;
- }
-
- delta += (u64)len << vma->node->type;
- }
- r = r->next;
- };
-
- mmu->func->flush(vm);
-}
+#include <nvif/if500d.h>
+#include <nvif/if900d.h>
-static void
-nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
- struct nvkm_mem *mem)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- unsigned m, sglen;
- u32 end, len;
- int i;
- struct scatterlist *sg;
-
- for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
- sglen = sg_dma_len(sg) >> PAGE_SHIFT;
-
- end = pte + sglen;
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- for (m = 0; m < len; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
-
- if (num == 0)
- goto finish;
- }
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- if (m < sglen) {
- for (; m < sglen; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
- if (num == 0)
- goto finish;
- }
- }
-
- }
-finish:
- mmu->func->flush(vm);
-}
+struct nvkm_mmu_ptp {
+ struct nvkm_mmu_pt *pt;
+ struct list_head head;
+ u8 shift;
+ u16 mask;
+ u16 free;
+};
static void
-nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
- struct nvkm_mem *mem)
+nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)
{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- dma_addr_t *list = mem->pages;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->map_sg(vma, pgt, mem, pte, len, list);
-
- num -= len;
- pte += len;
- list += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
+ const int slot = pt->base >> pt->ptp->shift;
+ struct nvkm_mmu_ptp *ptp = pt->ptp;
+
+ /* If there were no free slots in the parent allocation before,
+ * there will be now, so return PTP to the cache.
+ */
+ if (!ptp->free)
+ list_add(&ptp->head, &mmu->ptp.list);
+ ptp->free |= BIT(slot);
+
+ /* If there's no more sub-allocations, destroy PTP. */
+ if (ptp->free == ptp->mask) {
+ nvkm_mmu_ptc_put(mmu, force, &ptp->pt);
+ list_del(&ptp->head);
+ kfree(ptp);
}
- mmu->func->flush(vm);
+ kfree(pt);
}
-void
-nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
+struct nvkm_mmu_pt *
+nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero)
{
- if (node->sg)
- nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
- else
- if (node->pages)
- nvkm_vm_map_sg(vma, 0, node->size << 12, node);
- else
- nvkm_vm_map_at(vma, 0, node);
-}
+ struct nvkm_mmu_pt *pt;
+ struct nvkm_mmu_ptp *ptp;
+ int slot;
+
+ if (!(pt = kzalloc(sizeof(*pt), GFP_KERNEL)))
+ return NULL;
+
+ ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head);
+ if (!ptp) {
+ /* Need to allocate a new parent to sub-allocate from. */
+ if (!(ptp = kmalloc(sizeof(*ptp), GFP_KERNEL))) {
+ kfree(pt);
+ return NULL;
+ }
-void
-nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
-{
- struct nvkm_vm *vm = vma->vm;
- struct nvkm_mmu *mmu = vm->mmu;
- int big = vma->node->type != mmu->func->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> mmu->func->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (mmu->func->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- mmu->func->unmap(vma, pgt, pte, len);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
+ ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false);
+ if (!ptp->pt) {
+ kfree(ptp);
+ kfree(pt);
+ return NULL;
}
- }
- mmu->func->flush(vm);
+ ptp->shift = order_base_2(size);
+ slot = nvkm_memory_size(ptp->pt->memory) >> ptp->shift;
+ ptp->mask = (1 << slot) - 1;
+ ptp->free = ptp->mask;
+ list_add(&ptp->head, &mmu->ptp.list);
+ }
+ pt->ptp = ptp;
+ pt->sub = true;
+
+ /* Sub-allocate from parent object, removing PTP from cache
+ * if there's no more free slots left.
+ */
+ slot = __ffs(ptp->free);
+ ptp->free &= ~BIT(slot);
+ if (!ptp->free)
+ list_del(&ptp->head);
+
+ pt->memory = pt->ptp->pt->memory;
+ pt->base = slot << ptp->shift;
+ pt->addr = pt->ptp->pt->addr + pt->base;
+ return pt;
}
-void
-nvkm_vm_unmap(struct nvkm_vma *vma)
-{
- nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
-}
+struct nvkm_mmu_ptc {
+ struct list_head head;
+ struct list_head item;
+ u32 size;
+ u32 refs;
+};
-static void
-nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
+static inline struct nvkm_mmu_ptc *
+nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgd *vpgd;
- struct nvkm_vm_pgt *vpgt;
- struct nvkm_memory *pgt;
- u32 pde;
-
- for (pde = fpde; pde <= lpde; pde++) {
- vpgt = &vm->pgt[pde - vm->fpde];
- if (--vpgt->refcount[big])
- continue;
-
- pgt = vpgt->mem[big];
- vpgt->mem[big] = NULL;
-
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
- }
+ struct nvkm_mmu_ptc *ptc;
- mmu->func->flush(vm);
+ list_for_each_entry(ptc, &mmu->ptc.list, head) {
+ if (ptc->size == size)
+ return ptc;
+ }
- nvkm_memory_del(&pgt);
+ ptc = kmalloc(sizeof(*ptc), GFP_KERNEL);
+ if (ptc) {
+ INIT_LIST_HEAD(&ptc->item);
+ ptc->size = size;
+ ptc->refs = 0;
+ list_add(&ptc->head, &mmu->ptc.list);
}
+
+ return ptc;
}
-static int
-nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
+void
+nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- struct nvkm_vm_pgd *vpgd;
- int big = (type != mmu->func->spg_shift);
- u32 pgt_size;
- int ret;
-
- pgt_size = (1 << (mmu->func->pgt_bits + 12)) >> type;
- pgt_size *= 8;
-
- ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
- pgt_size, 0x1000, true, &vpgt->mem[big]);
- if (unlikely(ret))
- return ret;
+ struct nvkm_mmu_pt *pt = *ppt;
+ if (pt) {
+ /* Handle sub-allocated page tables. */
+ if (pt->sub) {
+ mutex_lock(&mmu->ptp.mutex);
+ nvkm_mmu_ptp_put(mmu, force, pt);
+ mutex_unlock(&mmu->ptp.mutex);
+ return;
+ }
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
+ /* Either cache or free the object. */
+ mutex_lock(&mmu->ptc.mutex);
+ if (pt->ptc->refs < 8 /* Heuristic. */ && !force) {
+ list_add_tail(&pt->head, &pt->ptc->item);
+ pt->ptc->refs++;
+ } else {
+ nvkm_memory_unref(&pt->memory);
+ kfree(pt);
+ }
+ mutex_unlock(&mmu->ptc.mutex);
}
-
- vpgt->refcount[big]++;
- return 0;
}
-int
-nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
- struct nvkm_vma *vma)
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero)
{
- struct nvkm_mmu *mmu = vm->mmu;
- u32 align = (1 << page_shift) >> 12;
- u32 msize = size >> 12;
- u32 fpde, lpde, pde;
+ struct nvkm_mmu_ptc *ptc;
+ struct nvkm_mmu_pt *pt;
int ret;
- mutex_lock(&vm->mutex);
- ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
- &vma->node);
- if (unlikely(ret != 0)) {
- mutex_unlock(&vm->mutex);
- return ret;
+ /* Sub-allocated page table (ie. GP100 LPT). */
+ if (align < 0x1000) {
+ mutex_lock(&mmu->ptp.mutex);
+ pt = nvkm_mmu_ptp_get(mmu, align, zero);
+ mutex_unlock(&mmu->ptp.mutex);
+ return pt;
}
- fpde = (vma->node->offset >> mmu->func->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
+ /* Lookup cache for this page table size. */
+ mutex_lock(&mmu->ptc.mutex);
+ ptc = nvkm_mmu_ptc_find(mmu, size);
+ if (!ptc) {
+ mutex_unlock(&mmu->ptc.mutex);
+ return NULL;
+ }
- for (pde = fpde; pde <= lpde; pde++) {
- struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- int big = (vma->node->type != mmu->func->spg_shift);
+ /* If there's a free PT in the cache, reuse it. */
+ pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head);
+ if (pt) {
+ if (zero)
+ nvkm_fo64(pt->memory, 0, 0, size >> 3);
+ list_del(&pt->head);
+ ptc->refs--;
+ mutex_unlock(&mmu->ptc.mutex);
+ return pt;
+ }
+ mutex_unlock(&mmu->ptc.mutex);
- if (likely(vpgt->refcount[big])) {
- vpgt->refcount[big]++;
- continue;
- }
+ /* No such luck, we need to allocate. */
+ if (!(pt = kmalloc(sizeof(*pt), GFP_KERNEL)))
+ return NULL;
+ pt->ptc = ptc;
+ pt->sub = false;
- ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
- if (ret) {
- if (pde != fpde)
- nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
- nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&vm->mutex);
- return ret;
- }
+ ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+ size, align, zero, &pt->memory);
+ if (ret) {
+ kfree(pt);
+ return NULL;
}
- mutex_unlock(&vm->mutex);
- vma->vm = NULL;
- nvkm_vm_ref(vm, &vma->vm, NULL);
- vma->offset = (u64)vma->node->offset << 12;
- vma->access = access;
- return 0;
+ pt->base = 0;
+ pt->addr = nvkm_memory_addr(pt->memory);
+ return pt;
}
void
-nvkm_vm_put(struct nvkm_vma *vma)
-{
- struct nvkm_mmu *mmu;
- struct nvkm_vm *vm;
- u32 fpde, lpde;
-
- if (unlikely(vma->node == NULL))
- return;
- vm = vma->vm;
- mmu = vm->mmu;
-
- fpde = (vma->node->offset >> mmu->func->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
-
- mutex_lock(&vm->mutex);
- nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
- nvkm_mm_free(&vm->mm, &vma->node);
- mutex_unlock(&vm->mutex);
-
- nvkm_vm_ref(NULL, &vma->vm, NULL);
-}
-
-int
-nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
+nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_memory *pgt;
- int ret;
-
- ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
- (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
- if (ret == 0) {
- vm->pgt[0].refcount[0] = 1;
- vm->pgt[0].mem[0] = pgt;
- nvkm_memory_boot(pgt, vm);
+ struct nvkm_mmu_ptc *ptc;
+ list_for_each_entry(ptc, &mmu->ptc.list, head) {
+ struct nvkm_mmu_pt *pt, *tt;
+ list_for_each_entry_safe(pt, tt, &ptc->item, head) {
+ nvkm_memory_unref(&pt->memory);
+ list_del(&pt->head);
+ kfree(pt);
+ }
}
-
- return ret;
}
-int
-nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
+static void
+nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu)
{
- static struct lock_class_key _key;
- struct nvkm_vm *vm;
- u64 mm_length = (offset + length) - mm_offset;
- int ret;
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm)
- return -ENOMEM;
+ struct nvkm_mmu_ptc *ptc, *ptct;
- __mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
- INIT_LIST_HEAD(&vm->pgd_list);
- vm->mmu = mmu;
- kref_init(&vm->refcount);
- vm->fpde = offset >> (mmu->func->pgt_bits + 12);
- vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
-
- vm->pgt = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
- if (!vm->pgt) {
- kfree(vm);
- return -ENOMEM;
+ list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) {
+ WARN_ON(!list_empty(&ptc->item));
+ list_del(&ptc->head);
+ kfree(ptc);
}
-
- ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
- block >> 12);
- if (ret) {
- vfree(vm->pgt);
- kfree(vm);
- return ret;
- }
-
- *pvm = vm;
-
- return 0;
}
-int
-nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *key, struct nvkm_vm **pvm)
+static void
+nvkm_mmu_ptc_init(struct nvkm_mmu *mmu)
{
- struct nvkm_mmu *mmu = device->mmu;
- if (!mmu->func->create)
- return -EINVAL;
- return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
+ mutex_init(&mmu->ptc.mutex);
+ INIT_LIST_HEAD(&mmu->ptc.list);
+ mutex_init(&mmu->ptp.mutex);
+ INIT_LIST_HEAD(&mmu->ptp.list);
}
-static int
-nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
+static void
+nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type)
{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_vm_pgd *vpgd;
- int i;
-
- if (!pgd)
- return 0;
-
- vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
- if (!vpgd)
- return -ENOMEM;
-
- vpgd->obj = pgd;
-
- mutex_lock(&vm->mutex);
- for (i = vm->fpde; i <= vm->lpde; i++)
- mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
- list_add(&vpgd->head, &vm->pgd_list);
- mutex_unlock(&vm->mutex);
- return 0;
+ if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) {
+ mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type;
+ mmu->type[mmu->type_nr].heap = heap;
+ mmu->type_nr++;
+ }
}
-static void
-nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
+static int
+nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size)
{
- struct nvkm_vm_pgd *vpgd, *tmp;
-
- if (!mpgd)
- return;
-
- mutex_lock(&vm->mutex);
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- if (vpgd->obj == mpgd) {
- list_del(&vpgd->head);
- kfree(vpgd);
- break;
+ if (size) {
+ if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) {
+ mmu->heap[mmu->heap_nr].type = type;
+ mmu->heap[mmu->heap_nr].size = size;
+ return mmu->heap_nr++;
}
}
- mutex_unlock(&vm->mutex);
+ return -EINVAL;
}
static void
-nvkm_vm_del(struct kref *kref)
+nvkm_mmu_host(struct nvkm_mmu *mmu)
{
- struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
- struct nvkm_vm_pgd *vpgd, *tmp;
-
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- nvkm_vm_unlink(vm, vpgd->obj);
- }
-
- nvkm_mm_fini(&vm->mm);
- vfree(vm->pgt);
- kfree(vm);
+ struct nvkm_device *device = mmu->subdev.device;
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys;
+ int heap;
+
+ /* Non-mappable system memory. */
+ heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL);
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Non-coherent, cached, system memory.
+ *
+ * Block-linear mappings of system memory must be done through
+ * BAR1, and cannot be supported on systems where we're unable
+ * to map BAR1 with write-combining.
+ */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar || device->bar->iomap_uncached)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+ else
+ nvkm_mmu_type(mmu, heap, type);
+
+ /* Coherent, cached, system memory.
+ *
+ * Unsupported on systems that aren't able to support snooped
+ * mappings, and also for block-linear mappings which must be
+ * done through BAR1.
+ */
+ type |= NVKM_MEM_COHERENT;
+ if (device->func->cpu_coherent)
+ nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND);
+
+ /* Uncached system memory. */
+ nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED);
}
-int
-nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
+static void
+nvkm_mmu_vram(struct nvkm_mmu *mmu)
{
- if (ref) {
- int ret = nvkm_vm_link(ref, pgd);
- if (ret)
- return ret;
-
- kref_get(&ref->refcount);
- }
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_mm *mm = &device->fb->ram->vram;
+ const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+ const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+ const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+ u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
+ u8 heap = NVKM_MEM_VRAM;
+ int heapM, heapN, heapU;
+
+ /* Mixed-memory doesn't support compression or display. */
+ heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT);
+
+ heap |= NVKM_MEM_COMP;
+ heap |= NVKM_MEM_DISP;
+ heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT);
+ heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT);
+
+ /* Add non-mappable VRAM types first so that they're preferred
+ * over anything else. Mixed-memory will be slower than other
+ * heaps, it's prioritised last.
+ */
+ nvkm_mmu_type(mmu, heapU, type);
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+
+ /* Add host memory types next, under the assumption that users
+ * wanting mappable memory want to use them as staging buffers
+ * or the like.
+ */
+ nvkm_mmu_host(mmu);
+
+ /* Mappable VRAM types go last, as they're basically the worst
+ * possible type to ask for unless there's no other choice.
+ */
+ if (device->bar) {
+ /* Write-combined BAR1 access. */
+ type |= NVKM_MEM_MAPPABLE;
+ if (!device->bar->iomap_uncached) {
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
+ }
- if (*ptr) {
- nvkm_vm_unlink(*ptr, pgd);
- kref_put(&(*ptr)->refcount, nvkm_vm_del);
+ /* Uncached BAR1 access. */
+ type |= NVKM_MEM_COHERENT;
+ type |= NVKM_MEM_UNCACHED;
+ nvkm_mmu_type(mmu, heapN, type);
+ nvkm_mmu_type(mmu, heapM, type);
}
-
- *ptr = ref;
- return 0;
}
static int
nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
- if (mmu->func->oneinit)
- return mmu->func->oneinit(mmu);
+
+ /* Determine available memory types. */
+ if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram)
+ nvkm_mmu_vram(mmu);
+ else
+ nvkm_mmu_host(mmu);
+
+ if (mmu->func->vmm.global) {
+ int ret = nvkm_vmm_new(subdev->device, 0, 0, NULL, 0, NULL,
+ "gart", &mmu->vmm);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -511,8 +398,10 @@ static void *
nvkm_mmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_mmu *mmu = nvkm_mmu(subdev);
- if (mmu->func->dtor)
- return mmu->func->dtor(mmu);
+
+ nvkm_vmm_unref(&mmu->vmm);
+
+ nvkm_mmu_ptc_fini(mmu);
return mmu;
}
@@ -529,9 +418,10 @@ nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
{
nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
mmu->func = func;
- mmu->limit = func->limit;
mmu->dma_bits = func->dma_bits;
- mmu->lpg_shift = func->lpg_shift;
+ nvkm_mmu_ptc_init(mmu);
+ mmu->user.ctor = nvkm_ummu_new;
+ mmu->user.base = func->mmu.user;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
new file mode 100644
index 000000000000..8accda5a772b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/g84.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+g84_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x0200 },
+ .kind = nv50_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+g84_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&g84_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
index 7ac507c927bb..2d075246dc46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gf100.c
@@ -21,197 +21,65 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "mem.h"
+#include "vmm.h"
-#include <subdev/fb.h>
-#include <subdev/ltc.h>
-#include <subdev/timer.h>
-
-#include <core/gpuobj.h>
+#include <nvif/class.h>
/* Map from compressed to corresponding uncompressed storage type.
* The value 0xff represents an invalid storage type.
*/
-const u8 gf100_pte_storage_type_map[256] =
-{
- 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
- 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
- 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
- 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
- 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
- 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
- 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
- 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
- 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
- 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
- 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
- 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
- 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
- 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
- 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
- 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
-};
-
-
-static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
-{
- u32 pde[2] = { 0, 0 };
-
- if (pgt[0])
- pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
- if (pgt[1])
- pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
-
- nvkm_kmap(pgd);
- nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
- nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
- nvkm_done(pgd);
-}
-
-static inline u64
-gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
-{
- phys >>= 8;
-
- phys |= 0x00000001; /* present */
- if (vma->access & NV_MEM_ACCESS_SYS)
- phys |= 0x00000002;
-
- phys |= ((u64)target << 32);
- phys |= ((u64)memtype << 36);
- return phys;
-}
-
-static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
- u64 next = 1 << (vma->node->type - 8);
-
- phys = gf100_vm_addr(vma, phys, mem->memtype, 0);
- pte <<= 3;
-
- if (mem->tag) {
- struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc;
- u32 tag = mem->tag->offset + (delta >> 17);
- phys |= (u64)tag << (32 + 12);
- next |= (u64)1 << (32 + 12);
- nvkm_ltc_tags_clear(ltc, tag, cnt);
- }
-
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
- nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
- phys += next;
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
- /* compressed storage types are invalid for system memory */
- u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
-
- nvkm_kmap(pgt);
- pte <<= 3;
- while (cnt--) {
- u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
- nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
- nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- nvkm_kmap(pgt);
- pte <<= 3;
- while (cnt--) {
- nvkm_wo32(pgt, pte + 0, 0x00000000);
- nvkm_wo32(pgt, pte + 4, 0x00000000);
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-gf100_vm_flush(struct nvkm_vm *vm)
-{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_device *device = mmu->subdev.device;
- struct nvkm_vm_pgd *vpgd;
- u32 type;
-
- type = 0x00000001; /* PAGE_ALL */
- if (atomic_read(&vm->engref[NVKM_SUBDEV_BAR]))
- type |= 0x00000004; /* HUB_ONLY */
-
- mutex_lock(&mmu->subdev.mutex);
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- /* looks like maybe a "free flush slots" counter, the
- * faster you write to 0x100cbc to more it decreases
- */
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
- break;
- );
-
- nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
- nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
-
- /* wait for flush to be queued? */
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100c80) & 0x00008000)
- break;
- );
- }
- mutex_unlock(&mmu->subdev.mutex);
-}
-
-static int
-gf100_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *key, struct nvkm_vm **pvm)
+const u8 *
+gf100_mmu_kind(struct nvkm_mmu *mmu, int *count)
{
- return nvkm_vm_create(mmu, offset, length, mm_offset, 4096, key, pvm);
+ static const u8
+ kind[256] = {
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+ };
+
+ *count = ARRAY_SIZE(kind);
+ return kind;
}
static const struct nvkm_mmu_func
gf100_mmu = {
- .limit = (1ULL << 40),
.dma_bits = 40,
- .pgt_bits = 27 - 12,
- .spg_shift = 12,
- .lpg_shift = 17,
- .create = gf100_vm_create,
- .map_pgt = gf100_vm_map_pgt,
- .map = gf100_vm_map,
- .map_sg = gf100_vm_map_sg,
- .unmap = gf100_vm_unmap,
- .flush = gf100_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gf100_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
new file mode 100644
index 000000000000..3d7d1eb1cff9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk104.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk104_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk104_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gk104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gk104_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
new file mode 100644
index 000000000000..ac74965a60d4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gk20a.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gk20a_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GF100}, gk20a_vmm_new },
+ .kind = gf100_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gk20a_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&gk20a_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
new file mode 100644
index 000000000000..dbf644ebac97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm200.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+
+const u8 *
+gm200_mmu_kind(struct nvkm_mmu *mmu, int *count)
+{
+ static const u8
+ kind[256] = {
+ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, /* 0x00 */
+ 0x01, 0x01, 0x01, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff, 0x11, /* 0x10 */
+ 0x11, 0x11, 0x11, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x26, 0x27, /* 0x20 */
+ 0x28, 0x29, 0x2a, 0x2b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30 */
+ 0xff, 0xff, 0x26, 0x27, 0x28, 0x29, 0x26, 0x27,
+ 0x28, 0x29, 0xff, 0xff, 0xff, 0xff, 0x46, 0xff, /* 0x40 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x46, 0x46, 0x46, 0x46, 0xff, 0xff, 0xff, /* 0x50 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70 */
+ 0xff, 0xff, 0xff, 0x7b, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7b, 0x7b, /* 0x80 */
+ 0x7b, 0x7b, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x8b, 0x8c, 0x8d, 0x8e, 0xa7, /* 0xa0 */
+ 0xa8, 0xa9, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xc3, 0xff, 0xff, 0xff, 0xff, /* 0xc0 */
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xc3, 0xc3,
+ 0xc3, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0 */
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, /* 0xe0 */
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff,
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0 */
+ 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfd, 0xfe, 0xff
+ };
+ *count = ARRAY_SIZE(kind);
+ return kind;
+}
+
+static const struct nvkm_mmu_func
+gm200_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm200_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+static const struct nvkm_mmu_func
+gm200_mmu_fixed = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm200_vmm_new_fixed },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gm200_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (device->fb->page)
+ return nvkm_mmu_new_(&gm200_mmu_fixed, device, index, pmmu);
+ return nvkm_mmu_new_(&gm200_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
index 1df8154d0626..7353a94b4091 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gm20b.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,27 +18,38 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
*/
-#ifndef POLARIS10_SMC_H
-#define POLARIS10_SMC_H
+#include "mem.h"
+#include "vmm.h"
-#include "smumgr.h"
+#include <subdev/fb.h>
+#include <nvif/class.h>
-int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int polaris10_init_smc_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
-int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
-int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member);
-uint32_t polaris10_get_mac_definition(uint32_t value);
-int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr);
-bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr);
-int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
+static const struct nvkm_mmu_func
+gm20b_mmu = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
-#endif
+static const struct nvkm_mmu_func
+gm20b_mmu_fixed = {
+ .dma_bits = 40,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GM200}, gm20b_vmm_new_fixed },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+int
+gm20b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (device->fb->page)
+ return nvkm_mmu_new_(&gm20b_mmu_fixed, device, index, pmmu);
+ return nvkm_mmu_new_(&gm20b_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
index 13c8dbbccaf2..651b8805c67c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp100.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,23 +18,28 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
*/
-#ifndef _ICELAND_SMC_H
-#define _ICELAND_SMC_H
+#include "mem.h"
+#include "vmm.h"
-#include "smumgr.h"
+#include <core/option.h>
+#include <nvif/class.h>
-int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
-int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
-int iceland_init_smc_table(struct pp_hwmgr *hwmgr);
-int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
-int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr);
-uint32_t iceland_get_offsetof(uint32_t type, uint32_t member);
-uint32_t iceland_get_mac_definition(uint32_t value);
-int iceland_process_firmware_header(struct pp_hwmgr *hwmgr);
-int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr);
-#endif
+static const struct nvkm_mmu_func
+gp100_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+int
+gp100_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+ return gm200_mmu_new(device, index, pmmu);
+ return nvkm_mmu_new_(&gp100_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
new file mode 100644
index 000000000000..3bd3db31e0bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+gp10b_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+gp10b_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ if (!nvkm_boolopt(device->cfgopt, "GP100MmuLayout", true))
+ return gm20b_mmu_new(device, index, pmmu);
+ return nvkm_mmu_new_(&gp10b_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
new file mode 100644
index 000000000000..39808489f21d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define nvkm_mem(p) container_of((p), struct nvkm_mem, memory)
+#include "mem.h"
+
+#include <core/memory.h>
+
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+struct nvkm_mem {
+ struct nvkm_memory memory;
+ enum nvkm_memory_target target;
+ struct nvkm_mmu *mmu;
+ u64 pages;
+ struct page **mem;
+ union {
+ struct scatterlist *sgl;
+ dma_addr_t *dma;
+ };
+};
+
+static enum nvkm_memory_target
+nvkm_mem_target(struct nvkm_memory *memory)
+{
+ return nvkm_mem(memory)->target;
+}
+
+static u8
+nvkm_mem_page(struct nvkm_memory *memory)
+{
+ return PAGE_SHIFT;
+}
+
+static u64
+nvkm_mem_addr(struct nvkm_memory *memory)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->pages == 1 && mem->mem)
+ return mem->dma[0];
+ return ~0ULL;
+}
+
+static u64
+nvkm_mem_size(struct nvkm_memory *memory)
+{
+ return nvkm_mem(memory)->pages << PAGE_SHIFT;
+}
+
+static int
+nvkm_mem_map_dma(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &mem->memory,
+ .offset = offset,
+ .dma = mem->dma,
+ };
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static void *
+nvkm_mem_dtor(struct nvkm_memory *memory)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->mem) {
+ while (mem->pages--) {
+ dma_unmap_page(mem->mmu->subdev.device->dev,
+ mem->dma[mem->pages], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(mem->mem[mem->pages]);
+ }
+ kvfree(mem->dma);
+ kvfree(mem->mem);
+ }
+ return mem;
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_dma = {
+ .dtor = nvkm_mem_dtor,
+ .target = nvkm_mem_target,
+ .page = nvkm_mem_page,
+ .addr = nvkm_mem_addr,
+ .size = nvkm_mem_size,
+ .map = nvkm_mem_map_dma,
+};
+
+static int
+nvkm_mem_map_sgl(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ struct nvkm_vmm_map map = {
+ .memory = &mem->memory,
+ .offset = offset,
+ .sgl = mem->sgl,
+ };
+ return nvkm_vmm_map(vmm, vma, argv, argc, &map);
+}
+
+static const struct nvkm_memory_func
+nvkm_mem_sgl = {
+ .dtor = nvkm_mem_dtor,
+ .target = nvkm_mem_target,
+ .page = nvkm_mem_page,
+ .addr = nvkm_mem_addr,
+ .size = nvkm_mem_size,
+ .map = nvkm_mem_map_sgl,
+};
+
+int
+nvkm_mem_map_host(struct nvkm_memory *memory, void **pmap)
+{
+ struct nvkm_mem *mem = nvkm_mem(memory);
+ if (mem->mem) {
+ *pmap = vmap(mem->mem, mem->pages, VM_MAP, PAGE_KERNEL);
+ return *pmap ? 0 : -EFAULT;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_mem_new_host(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ struct device *dev = mmu->subdev.device->dev;
+ union {
+ struct nvif_mem_ram_vn vn;
+ struct nvif_mem_ram_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ enum nvkm_memory_target target;
+ struct nvkm_mem *mem;
+ gfp_t gfp = GFP_USER | __GFP_ZERO;
+
+ if ( (mmu->type[type].type & NVKM_MEM_COHERENT) &&
+ !(mmu->type[type].type & NVKM_MEM_UNCACHED))
+ target = NVKM_MEM_TARGET_HOST;
+ else
+ target = NVKM_MEM_TARGET_NCOH;
+
+ if (page != PAGE_SHIFT)
+ return -EINVAL;
+
+ if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
+ return -ENOMEM;
+ mem->target = target;
+ mem->mmu = mmu;
+ *pmemory = &mem->memory;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if (args->v0.dma) {
+ nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+ mem->dma = args->v0.dma;
+ } else {
+ nvkm_memory_ctor(&nvkm_mem_sgl, &mem->memory);
+ mem->sgl = args->v0.sgl;
+ }
+
+ if (!IS_ALIGNED(size, PAGE_SIZE))
+ return -EINVAL;
+ mem->pages = size >> PAGE_SHIFT;
+ return 0;
+ } else
+ if ( (ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ kfree(mem);
+ return ret;
+ }
+
+ nvkm_memory_ctor(&nvkm_mem_dma, &mem->memory);
+ size = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
+
+ if (!(mem->mem = kvmalloc(sizeof(*mem->mem) * size, GFP_KERNEL)))
+ return -ENOMEM;
+ if (!(mem->dma = kvmalloc(sizeof(*mem->dma) * size, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (mmu->dma_bits > 32)
+ gfp |= GFP_HIGHUSER;
+ else
+ gfp |= GFP_DMA32;
+
+ for (mem->pages = 0; size; size--, mem->pages++) {
+ struct page *p = alloc_page(gfp);
+ if (!p)
+ return -ENOMEM;
+
+ mem->dma[mem->pages] = dma_map_page(mmu->subdev.device->dev,
+ p, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, mem->dma[mem->pages])) {
+ __free_page(p);
+ return -ENOMEM;
+ }
+
+ mem->mem[mem->pages] = p;
+ }
+
+ return 0;
+}
+
+int
+nvkm_mem_new_type(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ struct nvkm_memory *memory = NULL;
+ int ret;
+
+ if (mmu->type[type].type & NVKM_MEM_VRAM) {
+ ret = mmu->func->mem.vram(mmu, type, page, size,
+ argv, argc, &memory);
+ } else {
+ ret = nvkm_mem_new_host(mmu, type, page, size,
+ argv, argc, &memory);
+ }
+
+ if (ret)
+ nvkm_memory_unref(&memory);
+ *pmemory = memory;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
new file mode 100644
index 000000000000..234267e1b215
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.h
@@ -0,0 +1,23 @@
+#ifndef __NVKM_MEM_H__
+#define __NVKM_MEM_H__
+#include "priv.h"
+
+int nvkm_mem_new_type(struct nvkm_mmu *, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **);
+int nvkm_mem_map_host(struct nvkm_memory *, void **pmap);
+
+int nv04_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int nv04_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+
+int nv50_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int nv50_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+
+int gf100_mem_new(struct nvkm_mmu *, int, u8, u64, void *, u32,
+ struct nvkm_memory **);
+int gf100_mem_map(struct nvkm_mmu *, struct nvkm_memory *, void *, u32,
+ u64 *, u64 *, struct nvkm_vma **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
new file mode 100644
index 000000000000..d9c9bee45222
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memgf100.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if900b.h>
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+int
+gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ struct gf100_vmm_map_v0 uvmm = {};
+ union {
+ struct gf100_mem_map_vn vn;
+ struct gf100_mem_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ uvmm.ro = args->v0.ro;
+ uvmm.kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ } else
+ return ret;
+
+ ret = nvkm_vmm_get(bar, nvkm_memory_page(memory),
+ nvkm_memory_size(memory), pvma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+ if (ret)
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *psize = (*pvma)->size;
+ return 0;
+}
+
+int
+gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct gf100_mem_vn vn;
+ struct gf100_mem_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool contig;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ contig = args->v0.contig;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ contig = false;
+ } else
+ return ret;
+
+ if (mmu->type[type].type & (NVKM_MEM_DISP | NVKM_MEM_COMP))
+ type = NVKM_RAM_MM_NORMAL;
+ else
+ type = NVKM_RAM_MM_MIXED;
+
+ return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+ size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
new file mode 100644
index 000000000000..79a3b0cc9f5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv04.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/fb.h>
+
+#include <nvif/if000b.h>
+#include <nvif/unpack.h>
+
+int
+nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ union {
+ struct nv04_mem_map_vn vn;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ const u64 addr = nvkm_memory_addr(memory);
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + addr;
+ *psize = nvkm_memory_size(memory);
+ *pvma = ERR_PTR(-ENODEV);
+ return 0;
+}
+
+int
+nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct nv04_mem_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ return ret;
+
+ if (mmu->type[type].type & NVKM_MEM_MAPPABLE)
+ type = NVKM_RAM_MM_NORMAL;
+ else
+ type = NVKM_RAM_MM_NOMAP;
+
+ return nvkm_ram_get(mmu->subdev.device, type, 0x01, page,
+ size, true, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
new file mode 100644
index 000000000000..46759b89fc1f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/memnv50.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+
+#include <core/memory.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#include <nvif/class.h>
+#include <nvif/if500b.h>
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+int
+nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
+ u32 argc, u64 *paddr, u64 *psize, struct nvkm_vma **pvma)
+{
+ struct nv50_vmm_map_v0 uvmm = {};
+ union {
+ struct nv50_mem_map_vn vn;
+ struct nv50_mem_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
+ u64 size = nvkm_memory_size(memory);
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ uvmm.ro = args->v0.ro;
+ uvmm.kind = args->v0.kind;
+ uvmm.comp = args->v0.comp;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ } else
+ return ret;
+
+ ret = nvkm_vmm_get(bar, 12, size, pvma);
+ if (ret)
+ return ret;
+
+ *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
+ *psize = (*pvma)->size;
+ return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
+}
+
+int
+nv50_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **pmemory)
+{
+ union {
+ struct nv50_mem_vn vn;
+ struct nv50_mem_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool contig;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ type = args->v0.bankswz ? 0x02 : 0x01;
+ contig = args->v0.contig;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ type = 0x01;
+ contig = false;
+ } else
+ return -ENOSYS;
+
+ return nvkm_ram_get(mmu->subdev.device, NVKM_RAM_MM_NORMAL, type,
+ page, size, contig, false, pmemory);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index 37927c3fdc3e..d201c887c2cd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -21,129 +21,21 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
-
-#define NV04_PDMA_SIZE (128 * 1024 * 1024)
-#define NV04_PDMA_PAGE ( 4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- pte = 0x00008 + (pte * 4);
- nvkm_kmap(pgt);
- while (cnt) {
- u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
- u32 phys = (u32)*list++;
- while (cnt && page--) {
- nvkm_wo32(pgt, pte, phys | 3);
- phys += NV04_PDMA_PAGE;
- pte += 4;
- cnt -= 1;
- }
- }
- nvkm_done(pgt);
-}
-
-static void
-nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- pte = 0x00008 + (pte * 4);
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte, 0x00000000);
- pte += 4;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv04_vm_flush(struct nvkm_vm *vm)
-{
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv04_mmu_oneinit(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_memory *dma;
- int ret;
-
- ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
- &mmu->vm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
- (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
- 16, true, &dma);
- mmu->vm->pgt[0].mem[0] = dma;
- mmu->vm->pgt[0].refcount[0] = 1;
- if (ret)
- return ret;
-
- nvkm_kmap(dma);
- nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
- nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
- nvkm_done(dma);
- return 0;
-}
-
-void *
-nv04_mmu_dtor(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- if (mmu->vm) {
- nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
- nvkm_vm_ref(NULL, &mmu->vm, NULL);
- }
- if (mmu->nullp) {
- dma_free_coherent(device->dev, 16 * 1024,
- mmu->nullp, mmu->null);
- }
- return mmu;
-}
-
-int
-nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
- int index, struct nvkm_mmu **pmmu)
-{
- struct nv04_mmu *mmu;
- if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
- return -ENOMEM;
- *pmmu = &mmu->base;
- nvkm_mmu_ctor(func, device, index, &mmu->base);
- return 0;
-}
+#include <nvif/class.h>
const struct nvkm_mmu_func
nv04_mmu = {
- .oneinit = nv04_mmu_oneinit,
- .dtor = nv04_mmu_dtor,
- .limit = NV04_PDMA_SIZE,
.dma_bits = 32,
- .pgt_bits = 32 - 12,
- .spg_shift = 12,
- .lpg_shift = 12,
- .map_sg = nv04_vm_map_sg,
- .unmap = nv04_vm_unmap,
- .flush = nv04_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
};
int
nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
- return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
deleted file mode 100644
index 9c35c43635c2..000000000000
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NV04_MMU_PRIV__
-#define __NV04_MMU_PRIV__
-#define nv04_mmu(p) container_of((p), struct nv04_mmu, base)
-#include "priv.h"
-
-struct nv04_mmu {
- struct nvkm_mmu base;
- struct nvkm_vm *vm;
- dma_addr_t null;
- void *nullp;
-};
-
-int nv04_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
- int index, struct nvkm_mmu **);
-void *nv04_mmu_dtor(struct nvkm_mmu *);
-
-extern const struct nvkm_mmu_func nv04_mmu;
-#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
index c6a26f907009..adca81895c09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -21,113 +21,29 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
#include <core/option.h>
-#include <subdev/timer.h>
-#define NV41_GART_SIZE (512 * 1024 * 1024)
-#define NV41_GART_PAGE ( 4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- pte = pte * 4;
- nvkm_kmap(pgt);
- while (cnt) {
- u32 page = PAGE_SIZE / NV41_GART_PAGE;
- u64 phys = (u64)*list++;
- while (cnt && page--) {
- nvkm_wo32(pgt, pte, (phys >> 7) | 1);
- phys += NV41_GART_PAGE;
- pte += 4;
- cnt -= 1;
- }
- }
- nvkm_done(pgt);
-}
-
-static void
-nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- pte = pte * 4;
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte, 0x00000000);
- pte += 4;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv41_vm_flush(struct nvkm_vm *vm)
-{
- struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
- struct nvkm_device *device = mmu->base.subdev.device;
-
- mutex_lock(&mmu->base.subdev.mutex);
- nvkm_wr32(device, 0x100810, 0x00000022);
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100810) & 0x00000020)
- break;
- );
- nvkm_wr32(device, 0x100810, 0x00000000);
- mutex_unlock(&mmu->base.subdev.mutex);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv41_mmu_oneinit(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- int ret;
-
- ret = nvkm_vm_create(&mmu->base, 0, NV41_GART_SIZE, 0, 4096, NULL,
- &mmu->vm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
- (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
- &mmu->vm->pgt[0].mem[0]);
- mmu->vm->pgt[0].refcount[0] = 1;
- return ret;
-}
+#include <nvif/class.h>
static void
-nv41_mmu_init(struct nvkm_mmu *base)
+nv41_mmu_init(struct nvkm_mmu *mmu)
{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
- nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
+ struct nvkm_device *device = mmu->subdev.device;
+ nvkm_wr32(device, 0x100800, 0x00000002 | mmu->vmm->pd->pt[0]->addr);
nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
nvkm_wr32(device, 0x100820, 0x00000000);
}
static const struct nvkm_mmu_func
nv41_mmu = {
- .dtor = nv04_mmu_dtor,
- .oneinit = nv41_mmu_oneinit,
.init = nv41_mmu_init,
- .limit = NV41_GART_SIZE,
.dma_bits = 39,
- .pgt_bits = 32 - 12,
- .spg_shift = 12,
- .lpg_shift = 12,
- .map_sg = nv41_vm_map_sg,
- .unmap = nv41_vm_unmap,
- .flush = nv41_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv41_vmm_new, true },
};
int
@@ -137,5 +53,5 @@ nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, index, pmmu);
- return nv04_mmu_new_(&nv41_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv41_mmu, device, index, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
index a648c2395545..598c53a27bde 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
@@ -21,176 +21,18 @@
*
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
#include <core/option.h>
-#include <subdev/timer.h>
-#define NV44_GART_SIZE (512 * 1024 * 1024)
-#define NV44_GART_PAGE ( 4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
- dma_addr_t *list, u32 pte, u32 cnt)
-{
- u32 base = (pte << 2) & ~0x0000000f;
- u32 tmp[4];
-
- tmp[0] = nvkm_ro32(pgt, base + 0x0);
- tmp[1] = nvkm_ro32(pgt, base + 0x4);
- tmp[2] = nvkm_ro32(pgt, base + 0x8);
- tmp[3] = nvkm_ro32(pgt, base + 0xc);
-
- while (cnt--) {
- u32 addr = list ? (*list++ >> 12) : (null >> 12);
- switch (pte++ & 0x3) {
- case 0:
- tmp[0] &= ~0x07ffffff;
- tmp[0] |= addr;
- break;
- case 1:
- tmp[0] &= ~0xf8000000;
- tmp[0] |= addr << 27;
- tmp[1] &= ~0x003fffff;
- tmp[1] |= addr >> 5;
- break;
- case 2:
- tmp[1] &= ~0xffc00000;
- tmp[1] |= addr << 22;
- tmp[2] &= ~0x0001ffff;
- tmp[2] |= addr >> 10;
- break;
- case 3:
- tmp[2] &= ~0xfffe0000;
- tmp[2] |= addr << 17;
- tmp[3] &= ~0x00000fff;
- tmp[3] |= addr >> 15;
- break;
- }
- }
-
- nvkm_wo32(pgt, base + 0x0, tmp[0]);
- nvkm_wo32(pgt, base + 0x4, tmp[1]);
- nvkm_wo32(pgt, base + 0x8, tmp[2]);
- nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
-}
-
-static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
- u32 tmp[4];
- int i;
-
- nvkm_kmap(pgt);
- if (pte & 3) {
- u32 max = 4 - (pte & 3);
- u32 part = (cnt > max) ? max : cnt;
- nv44_vm_fill(pgt, mmu->null, list, pte, part);
- pte += part;
- list += part;
- cnt -= part;
- }
-
- while (cnt >= 4) {
- for (i = 0; i < 4; i++)
- tmp[i] = *list++ >> 12;
- nvkm_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
- nvkm_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
- nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
- nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
- cnt -= 4;
- }
-
- if (cnt)
- nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
- nvkm_done(pgt);
-}
-
-static void
-nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
-
- nvkm_kmap(pgt);
- if (pte & 3) {
- u32 max = 4 - (pte & 3);
- u32 part = (cnt > max) ? max : cnt;
- nv44_vm_fill(pgt, mmu->null, NULL, pte, part);
- pte += part;
- cnt -= part;
- }
-
- while (cnt >= 4) {
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- nvkm_wo32(pgt, pte++ * 4, 0x00000000);
- cnt -= 4;
- }
-
- if (cnt)
- nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
- nvkm_done(pgt);
-}
-
-static void
-nv44_vm_flush(struct nvkm_vm *vm)
-{
- struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
- struct nvkm_device *device = mmu->base.subdev.device;
- nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
- nvkm_wr32(device, 0x100808, 0x00000020);
- nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x100808) & 0x00000001)
- break;
- );
- nvkm_wr32(device, 0x100808, 0x00000000);
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv44_mmu_oneinit(struct nvkm_mmu *base)
-{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- int ret;
-
- mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
- &mmu->null, GFP_KERNEL);
- if (!mmu->nullp) {
- nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
- mmu->null = 0;
- }
-
- ret = nvkm_vm_create(&mmu->base, 0, NV44_GART_SIZE, 0, 4096, NULL,
- &mmu->vm);
- if (ret)
- return ret;
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
- (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
- 512 * 1024, true,
- &mmu->vm->pgt[0].mem[0]);
- mmu->vm->pgt[0].refcount[0] = 1;
- return ret;
-}
+#include <nvif/class.h>
static void
-nv44_mmu_init(struct nvkm_mmu *base)
+nv44_mmu_init(struct nvkm_mmu *mmu)
{
- struct nv04_mmu *mmu = nv04_mmu(base);
- struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
+ struct nvkm_device *device = mmu->subdev.device;
+ struct nvkm_memory *pt = mmu->vmm->pd->pt[0]->memory;
u32 addr;
/* calculate vram address of this PRAMIN block, object must be
@@ -198,11 +40,11 @@ nv44_mmu_init(struct nvkm_mmu *base)
* of 512KiB for this to work correctly
*/
addr = nvkm_rd32(device, 0x10020c);
- addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
+ addr -= ((nvkm_memory_addr(pt) >> 19) + 1) << 19;
nvkm_wr32(device, 0x100850, 0x80000000);
- nvkm_wr32(device, 0x100818, mmu->null);
- nvkm_wr32(device, 0x100804, NV44_GART_SIZE);
+ nvkm_wr32(device, 0x100818, mmu->vmm->null);
+ nvkm_wr32(device, 0x100804, (nvkm_memory_size(pt) / 4) * 4096);
nvkm_wr32(device, 0x100850, 0x00008000);
nvkm_mask(device, 0x10008c, 0x00000200, 0x00000200);
nvkm_wr32(device, 0x100820, 0x00000000);
@@ -212,17 +54,11 @@ nv44_mmu_init(struct nvkm_mmu *base)
static const struct nvkm_mmu_func
nv44_mmu = {
- .dtor = nv04_mmu_dtor,
- .oneinit = nv44_mmu_oneinit,
.init = nv44_mmu_init,
- .limit = NV44_GART_SIZE,
.dma_bits = 39,
- .pgt_bits = 32 - 12,
- .spg_shift = 12,
- .lpg_shift = 12,
- .map_sg = nv44_vm_map_sg,
- .unmap = nv44_vm_unmap,
- .flush = nv44_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV04}},
+ .mem = {{ -1, -1, NVIF_CLASS_MEM_NV04}, nv04_mem_new, nv04_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv44_vmm_new, true },
};
int
@@ -232,5 +68,5 @@ nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, index, pmmu);
- return nv04_mmu_new_(&nv44_mmu, device, index, pmmu);
+ return nvkm_mmu_new_(&nv44_mmu, device, index, pmmu);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
index a1f8d65f0276..db3dfbbb2aa0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c
@@ -21,207 +21,52 @@
*
* Authors: Ben Skeggs
*/
-#include "priv.h"
+#include "mem.h"
+#include "vmm.h"
-#include <core/gpuobj.h>
-#include <subdev/fb.h>
-#include <subdev/timer.h>
-#include <engine/gr.h>
+#include <nvif/class.h>
-static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
+const u8 *
+nv50_mmu_kind(struct nvkm_mmu *base, int *count)
{
- u64 phys = 0xdeadcafe00000000ULL;
- u32 coverage = 0;
-
- if (pgt[0]) {
- /* present, 4KiB pages */
- phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
- coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
- } else
- if (pgt[1]) {
- /* present, 64KiB pages */
- phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
- coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
- }
-
- if (phys & 1) {
- if (coverage <= 32 * 1024 * 1024)
- phys |= 0x60;
- else if (coverage <= 64 * 1024 * 1024)
- phys |= 0x40;
- else if (coverage <= 128 * 1024 * 1024)
- phys |= 0x20;
- }
-
- nvkm_kmap(pgd);
- nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
- nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
- nvkm_done(pgd);
-}
-
-static inline u64
-vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target)
-{
- phys |= 1; /* present */
- phys |= (u64)memtype << 40;
- phys |= target << 4;
- if (vma->access & NV_MEM_ACCESS_SYS)
- phys |= (1 << 6);
- if (!(vma->access & NV_MEM_ACCESS_WO))
- phys |= (1 << 3);
- return phys;
-}
-
-static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
- struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram;
- u32 comp = (mem->memtype & 0x180) >> 7;
- u32 block, target;
- int i;
-
- /* IGPs don't have real VRAM, re-target to stolen system memory */
- target = 0;
- if (ram->stolen) {
- phys += ram->stolen;
- target = 3;
- }
-
- phys = vm_addr(vma, phys, mem->memtype, target);
- pte <<= 3;
- cnt <<= 3;
-
- nvkm_kmap(pgt);
- while (cnt) {
- u32 offset_h = upper_32_bits(phys);
- u32 offset_l = lower_32_bits(phys);
-
- for (i = 7; i >= 0; i--) {
- block = 1 << (i + 3);
- if (cnt >= block && !(pte & (block - 1)))
- break;
- }
- offset_l |= (i << 7);
-
- phys += block << (vma->node->type - 3);
- cnt -= block;
- if (comp) {
- u32 tag = mem->tag->offset + ((delta >> 16) * comp);
- offset_h |= (tag << 17);
- delta += block << (vma->node->type - 3);
- }
-
- while (block) {
- nvkm_wo32(pgt, pte + 0, offset_l);
- nvkm_wo32(pgt, pte + 4, offset_h);
- pte += 8;
- block -= 8;
- }
- }
- nvkm_done(pgt);
-}
-
-static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
- pte <<= 3;
- nvkm_kmap(pgt);
- while (cnt--) {
- u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
- nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
- nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- pte <<= 3;
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte + 0, 0x00000000);
- nvkm_wo32(pgt, pte + 4, 0x00000000);
- pte += 8;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv50_vm_flush(struct nvkm_vm *vm)
-{
- struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_subdev *subdev = &mmu->subdev;
- struct nvkm_device *device = subdev->device;
- int i, vme;
-
- mutex_lock(&subdev->mutex);
- for (i = 0; i < NVKM_SUBDEV_NR; i++) {
- if (!atomic_read(&vm->engref[i]))
- continue;
-
- /* unfortunate hw bug workaround... */
- if (i == NVKM_ENGINE_GR && device->gr) {
- int ret = nvkm_gr_tlb_flush(device->gr);
- if (ret != -ENODEV)
- continue;
- }
-
- switch (i) {
- case NVKM_ENGINE_GR : vme = 0x00; break;
- case NVKM_ENGINE_VP :
- case NVKM_ENGINE_MSPDEC: vme = 0x01; break;
- case NVKM_SUBDEV_BAR : vme = 0x06; break;
- case NVKM_ENGINE_MSPPP :
- case NVKM_ENGINE_MPEG : vme = 0x08; break;
- case NVKM_ENGINE_BSP :
- case NVKM_ENGINE_MSVLD : vme = 0x09; break;
- case NVKM_ENGINE_CIPHER:
- case NVKM_ENGINE_SEC : vme = 0x0a; break;
- case NVKM_ENGINE_CE0 : vme = 0x0d; break;
- default:
- continue;
- }
-
- nvkm_wr32(device, 0x100c80, (vme << 16) | 1);
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
- break;
- ) < 0)
- nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
- }
- mutex_unlock(&subdev->mutex);
-}
-
-static int
-nv50_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *key, struct nvkm_vm **pvm)
-{
- u32 block = (1 << (mmu->func->pgt_bits + 12));
- if (block > length)
- block = length;
-
- return nvkm_vm_create(mmu, offset, length, mm_offset, block, key, pvm);
+ /* 0x01: no bank swizzle
+ * 0x02: bank swizzled
+ * 0x7f: invalid
+ *
+ * 0x01/0x02 are values understood by the VRAM allocator,
+ * and are required to avoid mixing the two types within
+ * a certain range.
+ */
+ static const u8
+ kind[128] = {
+ 0x01, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x00 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x10 */
+ 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x20 */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, /* 0x30 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, /* 0x40 */
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x01, 0x01, 0x01, 0x7f, /* 0x50 */
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x7f, /* 0x60 */
+ 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02,
+ 0x01, 0x7f, 0x02, 0x7f, 0x01, 0x7f, 0x02, 0x7f, /* 0x70 */
+ 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x7f, 0x7f
+ };
+ *count = ARRAY_SIZE(kind);
+ return kind;
}
static const struct nvkm_mmu_func
nv50_mmu = {
- .limit = (1ULL << 40),
.dma_bits = 40,
- .pgt_bits = 29 - 12,
- .spg_shift = 12,
- .lpg_shift = 16,
- .create = nv50_vm_create,
- .map_pgt = nv50_vm_map_pgt,
- .map = nv50_vm_map,
- .map_sg = nv50_vm_map_sg,
- .unmap = nv50_vm_unmap,
- .flush = nv50_vm_flush,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map },
+ .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, nv50_vmm_new, false, 0x1400 },
+ .kind = nv50_mmu_kind,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index bf37f313b5bb..948a48c21be4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -10,31 +10,57 @@ int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *,
int index, struct nvkm_mmu **);
struct nvkm_mmu_func {
- void *(*dtor)(struct nvkm_mmu *);
- int (*oneinit)(struct nvkm_mmu *);
void (*init)(struct nvkm_mmu *);
- u64 limit;
u8 dma_bits;
- u32 pgt_bits;
- u8 spg_shift;
- u8 lpg_shift;
-
- int (*create)(struct nvkm_mmu *, u64 offset, u64 length, u64 mm_offset,
- struct lock_class_key *, struct nvkm_vm **);
-
- void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
- struct nvkm_memory *pgt[2]);
- void (*map)(struct nvkm_vma *, struct nvkm_memory *,
- struct nvkm_mem *, u32 pte, u32 cnt,
- u64 phys, u64 delta);
- void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
- struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
- void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
- u32 pte, u32 cnt);
- void (*flush)(struct nvkm_vm *);
+
+ struct {
+ struct nvkm_sclass user;
+ } mmu;
+
+ struct {
+ struct nvkm_sclass user;
+ int (*vram)(struct nvkm_mmu *, int type, u8 page, u64 size,
+ void *argv, u32 argc, struct nvkm_memory **);
+ int (*umap)(struct nvkm_mmu *, struct nvkm_memory *, void *argv,
+ u32 argc, u64 *addr, u64 *size, struct nvkm_vma **);
+ } mem;
+
+ struct {
+ struct nvkm_sclass user;
+ int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *,
+ const char *name, struct nvkm_vmm **);
+ bool global;
+ u32 pd_offset;
+ } vmm;
+
+ const u8 *(*kind)(struct nvkm_mmu *, int *count);
+ bool kind_sys;
+};
+
+extern const struct nvkm_mmu_func nv04_mmu;
+
+const u8 *nv50_mmu_kind(struct nvkm_mmu *, int *count);
+
+const u8 *gf100_mmu_kind(struct nvkm_mmu *, int *count);
+
+const u8 *gm200_mmu_kind(struct nvkm_mmu *, int *);
+
+struct nvkm_mmu_pt {
+ union {
+ struct nvkm_mmu_ptc *ptc;
+ struct nvkm_mmu_ptp *ptp;
+ };
+ struct nvkm_memory *memory;
+ bool sub;
+ u16 base;
+ u64 addr;
+ struct list_head head;
};
-int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
- struct lock_class_key *, struct nvkm_vm **);
+void nvkm_mmu_ptc_dump(struct nvkm_mmu *);
+struct nvkm_mmu_pt *
+nvkm_mmu_ptc_get(struct nvkm_mmu *, u32 size, u32 align, bool zero);
+void nvkm_mmu_ptc_put(struct nvkm_mmu *, bool force, struct nvkm_mmu_pt **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
new file mode 100644
index 000000000000..fac2f9a45ea6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+#include <subdev/bar.h>
+
+#include <nvif/class.h>
+#include <nvif/if000a.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_umem;
+struct nvkm_memory *
+nvkm_umem_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_client *master = client->object.client;
+ struct nvkm_memory *memory = NULL;
+ struct nvkm_object *object;
+ struct nvkm_umem *umem;
+
+ object = nvkm_object_search(client, handle, &nvkm_umem);
+ if (IS_ERR(object)) {
+ if (client->super && client != master) {
+ spin_lock(&master->lock);
+ list_for_each_entry(umem, &master->umem, head) {
+ if (umem->object.object == handle) {
+ memory = nvkm_memory_ref(umem->memory);
+ break;
+ }
+ }
+ spin_unlock(&master->lock);
+ }
+ } else {
+ umem = nvkm_umem(object);
+ if (!umem->priv || client->super)
+ memory = nvkm_memory_ref(umem->memory);
+ }
+
+ return memory ? memory : ERR_PTR(-ENOENT);
+}
+
+static int
+nvkm_umem_unmap(struct nvkm_object *object)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+
+ if (!umem->map)
+ return -EEXIST;
+
+ if (umem->io) {
+ if (!IS_ERR(umem->bar)) {
+ struct nvkm_device *device = umem->mmu->subdev.device;
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &umem->bar);
+ } else {
+ umem->bar = NULL;
+ }
+ } else {
+ vunmap(umem->map);
+ umem->map = NULL;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_umem_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *handle, u64 *length)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+ struct nvkm_mmu *mmu = umem->mmu;
+
+ if (!umem->mappable)
+ return -EINVAL;
+ if (umem->map)
+ return -EEXIST;
+
+ if ((umem->type & NVKM_MEM_HOST) && !argc) {
+ int ret = nvkm_mem_map_host(umem->memory, &umem->map);
+ if (ret)
+ return ret;
+
+ *handle = (unsigned long)(void *)umem->map;
+ *length = nvkm_memory_size(umem->memory);
+ *type = NVKM_OBJECT_MAP_VA;
+ return 0;
+ } else
+ if ((umem->type & NVKM_MEM_VRAM) ||
+ (umem->type & NVKM_MEM_KIND)) {
+ int ret = mmu->func->mem.umap(mmu, umem->memory, argv, argc,
+ handle, length, &umem->bar);
+ if (ret)
+ return ret;
+
+ *type = NVKM_OBJECT_MAP_IO;
+ } else {
+ return -EINVAL;
+ }
+
+ umem->io = (*type == NVKM_OBJECT_MAP_IO);
+ return 0;
+}
+
+static void *
+nvkm_umem_dtor(struct nvkm_object *object)
+{
+ struct nvkm_umem *umem = nvkm_umem(object);
+ spin_lock(&umem->object.client->lock);
+ list_del_init(&umem->head);
+ spin_unlock(&umem->object.client->lock);
+ nvkm_memory_unref(&umem->memory);
+ return umem;
+}
+
+static const struct nvkm_object_func
+nvkm_umem = {
+ .dtor = nvkm_umem_dtor,
+ .map = nvkm_umem_map,
+ .unmap = nvkm_umem_unmap,
+};
+
+int
+nvkm_umem_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+ union {
+ struct nvif_mem_v0 v0;
+ } *args = argv;
+ struct nvkm_umem *umem;
+ int type, ret = -ENOSYS;
+ u8 page;
+ u64 size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ type = args->v0.type;
+ page = args->v0.page;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (type >= mmu->type_nr)
+ return -EINVAL;
+
+ if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_umem, oclass, &umem->object);
+ umem->mmu = mmu;
+ umem->type = mmu->type[type].type;
+ umem->priv = oclass->client->super;
+ INIT_LIST_HEAD(&umem->head);
+ *pobject = &umem->object;
+
+ if (mmu->type[type].type & NVKM_MEM_MAPPABLE) {
+ page = max_t(u8, page, PAGE_SHIFT);
+ umem->mappable = true;
+ }
+
+ ret = nvkm_mem_new_type(mmu, type, page, size, argv, argc,
+ &umem->memory);
+ if (ret)
+ return ret;
+
+ spin_lock(&umem->object.client->lock);
+ list_add(&umem->head, &umem->object.client->umem);
+ spin_unlock(&umem->object.client->lock);
+
+ args->v0.page = nvkm_memory_page(umem->memory);
+ args->v0.addr = nvkm_memory_addr(umem->memory);
+ args->v0.size = nvkm_memory_size(umem->memory);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
new file mode 100644
index 000000000000..85cf692d620a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h
@@ -0,0 +1,26 @@
+#ifndef __NVKM_UMEM_H__
+#define __NVKM_UMEM_H__
+#define nvkm_umem(p) container_of((p), struct nvkm_umem, object)
+#include <core/object.h>
+#include "mem.h"
+
+struct nvkm_umem {
+ struct nvkm_object object;
+ struct nvkm_mmu *mmu;
+ u8 type:8;
+ bool priv:1;
+ bool mappable:1;
+ bool io:1;
+
+ struct nvkm_memory *memory;
+ struct list_head head;
+
+ union {
+ struct nvkm_vma *bar;
+ void *map;
+ };
+};
+
+int nvkm_umem_new(const struct nvkm_oclass *, void *argv, u32 argc,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
new file mode 100644
index 000000000000..353f10f92b77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ummu.h"
+#include "umem.h"
+#include "uvmm.h"
+
+#include <core/client.h>
+
+#include <nvif/if0008.h>
+#include <nvif/unpack.h>
+
+static int
+nvkm_ummu_sclass(struct nvkm_object *object, int index,
+ struct nvkm_oclass *oclass)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu;
+
+ if (mmu->func->mem.user.oclass && oclass->client->super) {
+ if (index-- == 0) {
+ oclass->base = mmu->func->mem.user;
+ oclass->ctor = nvkm_umem_new;
+ return 0;
+ }
+ }
+
+ if (mmu->func->vmm.user.oclass) {
+ if (index-- == 0) {
+ oclass->base = mmu->func->vmm.user;
+ oclass->ctor = nvkm_uvmm_new;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvkm_ummu_heap(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_heap_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u8 index;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= mmu->heap_nr)
+ return -EINVAL;
+ args->v0.size = mmu->heap[index].size;
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_type(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_type_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ u8 type, index;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= mmu->type_nr)
+ return -EINVAL;
+ type = mmu->type[index].type;
+ args->v0.heap = mmu->type[index].heap;
+ args->v0.vram = !!(type & NVKM_MEM_VRAM);
+ args->v0.host = !!(type & NVKM_MEM_HOST);
+ args->v0.comp = !!(type & NVKM_MEM_COMP);
+ args->v0.disp = !!(type & NVKM_MEM_DISP);
+ args->v0.kind = !!(type & NVKM_MEM_KIND);
+ args->v0.mappable = !!(type & NVKM_MEM_MAPPABLE);
+ args->v0.coherent = !!(type & NVKM_MEM_COHERENT);
+ args->v0.uncached = !!(type & NVKM_MEM_UNCACHED);
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_kind(struct nvkm_ummu *ummu, void *argv, u32 argc)
+{
+ struct nvkm_mmu *mmu = ummu->mmu;
+ union {
+ struct nvif_mmu_kind_v0 v0;
+ } *args = argv;
+ const u8 *kind = NULL;
+ int ret = -ENOSYS, count = 0;
+
+ if (mmu->func->kind)
+ kind = mmu->func->kind(mmu, &count);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ if (argc != args->v0.count * sizeof(*args->v0.data))
+ return -EINVAL;
+ if (args->v0.count > count)
+ return -EINVAL;
+ memcpy(args->v0.data, kind, args->v0.count);
+ } else
+ return ret;
+
+ return 0;
+}
+
+static int
+nvkm_ummu_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+ struct nvkm_ummu *ummu = nvkm_ummu(object);
+ switch (mthd) {
+ case NVIF_MMU_V0_HEAP: return nvkm_ummu_heap(ummu, argv, argc);
+ case NVIF_MMU_V0_TYPE: return nvkm_ummu_type(ummu, argv, argc);
+ case NVIF_MMU_V0_KIND: return nvkm_ummu_kind(ummu, argv, argc);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct nvkm_object_func
+nvkm_ummu = {
+ .mthd = nvkm_ummu_mthd,
+ .sclass = nvkm_ummu_sclass,
+};
+
+int
+nvkm_ummu_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ union {
+ struct nvif_mmu_v0 v0;
+ } *args = argv;
+ struct nvkm_mmu *mmu = device->mmu;
+ struct nvkm_ummu *ummu;
+ int ret = -ENOSYS, kinds = 0;
+
+ if (mmu->func->kind)
+ mmu->func->kind(mmu, &kinds);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ args->v0.dmabits = mmu->dma_bits;
+ args->v0.heap_nr = mmu->heap_nr;
+ args->v0.type_nr = mmu->type_nr;
+ args->v0.kind_nr = kinds;
+ } else
+ return ret;
+
+ if (!(ummu = kzalloc(sizeof(*ummu), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_ummu, oclass, &ummu->object);
+ ummu->mmu = mmu;
+ *pobject = &ummu->object;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
new file mode 100644
index 000000000000..0cd510dcfc68
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ummu.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UMMU_H__
+#define __NVKM_UMMU_H__
+#define nvkm_ummu(p) container_of((p), struct nvkm_ummu, object)
+#include <core/object.h>
+#include "priv.h"
+
+struct nvkm_ummu {
+ struct nvkm_object object;
+ struct nvkm_mmu *mmu;
+};
+
+int nvkm_ummu_new(struct nvkm_device *, const struct nvkm_oclass *,
+ void *argv, u32 argc, struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
new file mode 100644
index 000000000000..fa81d0c1ba41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "uvmm.h"
+#include "umem.h"
+#include "ummu.h"
+
+#include <core/client.h>
+#include <core/memory.h>
+
+#include <nvif/if000c.h>
+#include <nvif/unpack.h>
+
+static const struct nvkm_object_func nvkm_uvmm;
+struct nvkm_vmm *
+nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
+{
+ struct nvkm_object *object;
+
+ object = nvkm_object_search(client, handle, &nvkm_uvmm);
+ if (IS_ERR(object))
+ return (void *)object;
+
+ return nvkm_uvmm(object)->vmm;
+}
+
+static int
+nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_unmap_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ u64 addr;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ vma = nvkm_vmm_node_search(vmm, addr);
+ if (ret = -ENOENT, !vma || vma->addr != addr) {
+ VMM_DEBUG(vmm, "lookup %016llx: %016llx",
+ addr, vma ? vma->addr : ~0ULL);
+ goto done;
+ }
+
+ if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+ vma->user, !client->super, vma->busy);
+ goto done;
+ }
+
+ if (ret = -EINVAL, !vma->memory) {
+ VMM_DEBUG(vmm, "unmapped");
+ goto done;
+ }
+
+ nvkm_vmm_unmap_locked(vmm, vma);
+ ret = 0;
+done:
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_map_v0 v0;
+ } *args = argv;
+ u64 addr, size, handle, offset;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ struct nvkm_memory *memory;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
+ addr = args->v0.addr;
+ size = args->v0.size;
+ handle = args->v0.memory;
+ offset = args->v0.offset;
+ } else
+ return ret;
+
+ if (IS_ERR((memory = nvkm_umem_search(client, handle)))) {
+ VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
+ return PTR_ERR(memory);
+ }
+
+ mutex_lock(&vmm->mutex);
+ if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
+ VMM_DEBUG(vmm, "lookup %016llx", addr);
+ goto fail;
+ }
+
+ if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+ vma->user, !client->super, vma->busy);
+ goto fail;
+ }
+
+ if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
+ if (addr + size > vma->addr + vma->size || vma->memory ||
+ (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
+ VMM_DEBUG(vmm, "split %d %d %d "
+ "%016llx %016llx %016llx %016llx",
+ !!vma->memory, vma->refd, vma->mapref,
+ addr, size, vma->addr, (u64)vma->size);
+ goto fail;
+ }
+
+ if (vma->addr != addr) {
+ const u64 tail = vma->size + vma->addr - addr;
+ if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
+ goto fail;
+ vma->part = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ }
+
+ if (vma->size != size) {
+ const u64 tail = vma->size - size;
+ struct nvkm_vma *tmp;
+ if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
+ nvkm_vmm_unmap_region(vmm, vma);
+ goto fail;
+ }
+ tmp->part = true;
+ nvkm_vmm_node_insert(vmm, tmp);
+ }
+ }
+ vma->busy = true;
+ mutex_unlock(&vmm->mutex);
+
+ ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
+ if (ret == 0) {
+ /* Successful map will clear vma->busy. */
+ nvkm_memory_unref(&memory);
+ return 0;
+ }
+
+ mutex_lock(&vmm->mutex);
+ vma->busy = false;
+ nvkm_vmm_unmap_region(vmm, vma);
+fail:
+ mutex_unlock(&vmm->mutex);
+ nvkm_memory_unref(&memory);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_put_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ u64 addr;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ addr = args->v0.addr;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ vma = nvkm_vmm_node_search(vmm, args->v0.addr);
+ if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
+ VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
+ vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
+ goto done;
+ }
+
+ if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
+ VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
+ vma->user, !client->super, vma->busy);
+ goto done;
+ }
+
+ nvkm_vmm_put_locked(vmm, vma);
+ ret = 0;
+done:
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ struct nvkm_client *client = uvmm->object.client;
+ union {
+ struct nvif_vmm_get_v0 v0;
+ } *args = argv;
+ struct nvkm_vmm *vmm = uvmm->vmm;
+ struct nvkm_vma *vma;
+ int ret = -ENOSYS;
+ bool getref, mapref, sparse;
+ u8 page, align;
+ u64 size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
+ mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
+ sparse = args->v0.sparse;
+ page = args->v0.page;
+ align = args->v0.align;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
+ page, align, size, &vma);
+ mutex_unlock(&vmm->mutex);
+ if (ret)
+ return ret;
+
+ args->v0.addr = vma->addr;
+ vma->user = !client->super;
+ return ret;
+}
+
+static int
+nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
+{
+ union {
+ struct nvif_vmm_page_v0 v0;
+ } *args = argv;
+ const struct nvkm_vmm_page *page;
+ int ret = -ENOSYS;
+ u8 type, index, nr;
+
+ page = uvmm->vmm->func->page;
+ for (nr = 0; page[nr].shift; nr++);
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ if ((index = args->v0.index) >= nr)
+ return -EINVAL;
+ type = page[index].type;
+ args->v0.shift = page[index].shift;
+ args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
+ args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
+ args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
+ args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
+ } else
+ return -ENOSYS;
+
+ return 0;
+}
+
+static int
+nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
+{
+ struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+ switch (mthd) {
+ case NVIF_VMM_V0_PAGE : return nvkm_uvmm_mthd_page (uvmm, argv, argc);
+ case NVIF_VMM_V0_GET : return nvkm_uvmm_mthd_get (uvmm, argv, argc);
+ case NVIF_VMM_V0_PUT : return nvkm_uvmm_mthd_put (uvmm, argv, argc);
+ case NVIF_VMM_V0_MAP : return nvkm_uvmm_mthd_map (uvmm, argv, argc);
+ case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static void *
+nvkm_uvmm_dtor(struct nvkm_object *object)
+{
+ struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
+ nvkm_vmm_unref(&uvmm->vmm);
+ return uvmm;
+}
+
+static const struct nvkm_object_func
+nvkm_uvmm = {
+ .dtor = nvkm_uvmm_dtor,
+ .mthd = nvkm_uvmm_mthd,
+};
+
+int
+nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
+ const bool more = oclass->base.maxver >= 0;
+ union {
+ struct nvif_vmm_v0 v0;
+ } *args = argv;
+ const struct nvkm_vmm_page *page;
+ struct nvkm_uvmm *uvmm;
+ int ret = -ENOSYS;
+ u64 addr, size;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
+ addr = args->v0.addr;
+ size = args->v0.size;
+ } else
+ return ret;
+
+ if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
+ *pobject = &uvmm->object;
+
+ if (!mmu->vmm) {
+ ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc,
+ NULL, "user", &uvmm->vmm);
+ if (ret)
+ return ret;
+
+ uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
+ } else {
+ if (size)
+ return -EINVAL;
+
+ uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
+ }
+
+ page = uvmm->vmm->func->page;
+ args->v0.page_nr = 0;
+ while (page && (page++)->shift)
+ args->v0.page_nr++;
+ args->v0.addr = uvmm->vmm->start;
+ args->v0.size = uvmm->vmm->limit;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
new file mode 100644
index 000000000000..71dab55e18a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.h
@@ -0,0 +1,14 @@
+#ifndef __NVKM_UVMM_H__
+#define __NVKM_UVMM_H__
+#define nvkm_uvmm(p) container_of((p), struct nvkm_uvmm, object)
+#include <core/object.h>
+#include "vmm.h"
+
+struct nvkm_uvmm {
+ struct nvkm_object object;
+ struct nvkm_vmm *vmm;
+};
+
+int nvkm_uvmm_new(const struct nvkm_oclass *, void *argv, u32 argc,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
new file mode 100644
index 000000000000..e35d3e17cd7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -0,0 +1,1513 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define NVKM_VMM_LEVELS_MAX 5
+#include "vmm.h"
+
+#include <subdev/fb.h>
+
+static void
+nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
+{
+ struct nvkm_vmm_pt *pgt = *ppgt;
+ if (pgt) {
+ kvfree(pgt->pde);
+ kfree(pgt);
+ *ppgt = NULL;
+ }
+}
+
+
+static struct nvkm_vmm_pt *
+nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
+ const struct nvkm_vmm_page *page)
+{
+ const u32 pten = 1 << desc->bits;
+ struct nvkm_vmm_pt *pgt;
+ u32 lpte = 0;
+
+ if (desc->type > PGT) {
+ if (desc->type == SPT) {
+ const struct nvkm_vmm_desc *pair = page[-1].desc;
+ lpte = pten >> (desc->bits - pair->bits);
+ } else {
+ lpte = pten;
+ }
+ }
+
+ if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
+ return NULL;
+ pgt->page = page ? page->shift : 0;
+ pgt->sparse = sparse;
+
+ if (desc->type == PGD) {
+ pgt->pde = kvzalloc(sizeof(*pgt->pde) * pten, GFP_KERNEL);
+ if (!pgt->pde) {
+ kfree(pgt);
+ return NULL;
+ }
+ }
+
+ return pgt;
+}
+
+struct nvkm_vmm_iter {
+ const struct nvkm_vmm_page *page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vmm *vmm;
+ u64 cnt;
+ u16 max, lvl;
+ u32 pte[NVKM_VMM_LEVELS_MAX];
+ struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
+ int flush;
+};
+
+#ifdef CONFIG_NOUVEAU_DEBUG_MMU
+static const char *
+nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
+{
+ switch (desc->type) {
+ case PGD: return "PGD";
+ case PGT: return "PGT";
+ case SPT: return "SPT";
+ case LPT: return "LPT";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void
+nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
+{
+ int lvl;
+ for (lvl = it->max; lvl >= 0; lvl--) {
+ if (lvl >= it->lvl)
+ buf += sprintf(buf, "%05x:", it->pte[lvl]);
+ else
+ buf += sprintf(buf, "xxxxx:");
+ }
+}
+
+#define TRA(i,f,a...) do { \
+ char _buf[NVKM_VMM_LEVELS_MAX * 7]; \
+ struct nvkm_vmm_iter *_it = (i); \
+ nvkm_vmm_trace(_it, _buf); \
+ VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
+} while(0)
+#else
+#define TRA(i,f,a...)
+#endif
+
+static inline void
+nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
+{
+ it->flush = min(it->flush, it->max - it->lvl);
+}
+
+static inline void
+nvkm_vmm_flush(struct nvkm_vmm_iter *it)
+{
+ if (it->flush != NVKM_VMM_LEVELS_MAX) {
+ if (it->vmm->func->flush) {
+ TRA(it, "flush: %d", it->flush);
+ it->vmm->func->flush(it->vmm, it->flush);
+ }
+ it->flush = NVKM_VMM_LEVELS_MAX;
+ }
+}
+
+static void
+nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc[it->lvl].type == SPT;
+ struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
+ struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
+ struct nvkm_mmu_pt *pt = pgt->pt[type];
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 pdei = it->pte[it->lvl + 1];
+
+ /* Recurse up the tree, unreferencing/destroying unneeded PDs. */
+ it->lvl++;
+ if (--pgd->refs[0]) {
+ const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
+ /* PD has other valid PDEs, so we need a proper update. */
+ TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+ pgt->pt[type] = NULL;
+ if (!pgt->refs[!type]) {
+ /* PDE no longer required. */
+ if (pgd->pt[0]) {
+ if (pgt->sparse) {
+ func->sparse(vmm, pgd->pt[0], pdei, 1);
+ pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
+ } else {
+ func->unmap(vmm, pgd->pt[0], pdei, 1);
+ pgd->pde[pdei] = NULL;
+ }
+ } else {
+ /* Special handling for Tesla-class GPUs,
+ * where there's no central PD, but each
+ * instance has its own embedded PD.
+ */
+ func->pde(vmm, pgd, pdei);
+ pgd->pde[pdei] = NULL;
+ }
+ } else {
+ /* PDE was pointing at dual-PTs and we're removing
+ * one of them, leaving the other in place.
+ */
+ func->pde(vmm, pgd, pdei);
+ }
+
+ /* GPU may have cached the PTs, flush before freeing. */
+ nvkm_vmm_flush_mark(it);
+ nvkm_vmm_flush(it);
+ } else {
+ /* PD has no valid PDEs left, so we can just destroy it. */
+ nvkm_vmm_unref_pdes(it);
+ }
+
+ /* Destroy PD/PT. */
+ TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
+ nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
+ if (!pgt->refs[!type])
+ nvkm_vmm_pt_del(&pgt);
+ it->lvl--;
+}
+
+static void
+nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+ const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+ const u32 sptb = desc->bits - pair->bits;
+ const u32 sptn = 1 << sptb;
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+ /* Determine how many SPTEs are being touched under each LPTE,
+ * and drop reference counts.
+ */
+ for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+ const u32 pten = min(sptn - spti, ptes);
+ pgt->pte[lpti] -= pten;
+ ptes -= pten;
+ }
+
+ /* We're done here if there's no corresponding LPT. */
+ if (!pgt->refs[0])
+ return;
+
+ for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+ /* Skip over any LPTEs that still have valid SPTEs. */
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
+ break;
+ }
+ continue;
+ }
+
+ /* As there's no more non-UNMAPPED SPTEs left in the range
+ * covered by a number of LPTEs, the LPTEs once again take
+ * control over their address range.
+ *
+ * Determine how many LPTEs need to transition state.
+ */
+ pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
+ break;
+ pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
+ }
+
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
+ pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
+ } else
+ if (pair->func->invalid) {
+ /* If the MMU supports it, restore the LPTE to the
+ * INVALID state to tell the MMU there is no point
+ * trying to fetch the corresponding SPTEs.
+ */
+ TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
+ pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
+ }
+ }
+}
+
+static bool
+nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = it->pt[0];
+
+ /* Drop PTE references. */
+ pgt->refs[type] -= ptes;
+
+ /* Dual-PTs need special handling, unless PDE becoming invalid. */
+ if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
+ nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
+
+ /* PT no longer neeed? Destroy it. */
+ if (!pgt->refs[type]) {
+ it->lvl++;
+ TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
+ it->lvl--;
+ nvkm_vmm_unref_pdes(it);
+ return false; /* PTE writes for unmap() not necessary. */
+ }
+
+ return true;
+}
+
+static void
+nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
+ const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *pair = it->page[-1].desc;
+ const u32 sptb = desc->bits - pair->bits;
+ const u32 sptn = 1 << sptb;
+ struct nvkm_vmm *vmm = it->vmm;
+ u32 spti = ptei & (sptn - 1), lpti, pteb;
+
+ /* Determine how many SPTEs are being touched under each LPTE,
+ * and increase reference counts.
+ */
+ for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
+ const u32 pten = min(sptn - spti, ptes);
+ pgt->pte[lpti] += pten;
+ ptes -= pten;
+ }
+
+ /* We're done here if there's no corresponding LPT. */
+ if (!pgt->refs[0])
+ return;
+
+ for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
+ /* Skip over any LPTEs that already have valid SPTEs. */
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
+ break;
+ }
+ continue;
+ }
+
+ /* As there are now non-UNMAPPED SPTEs in the range covered
+ * by a number of LPTEs, we need to transfer control of the
+ * address range to the SPTEs.
+ *
+ * Determine how many LPTEs need to transition state.
+ */
+ pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
+ if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
+ break;
+ pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
+ }
+
+ if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
+ const u32 spti = pteb * sptn;
+ const u32 sptc = ptes * sptn;
+ /* The entire LPTE is marked as sparse, we need
+ * to make sure that the SPTEs are too.
+ */
+ TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
+ desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
+ /* Sparse LPTEs prevent SPTEs from being accessed. */
+ TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
+ pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+ } else
+ if (pair->func->invalid) {
+ /* MMU supports blocking SPTEs by marking an LPTE
+ * as INVALID. We need to reverse that here.
+ */
+ TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
+ pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
+ }
+ }
+}
+
+static bool
+nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = it->pt[0];
+
+ /* Take PTE references. */
+ pgt->refs[type] += ptes;
+
+ /* Dual-PTs need special handling. */
+ if (desc->type == SPT)
+ nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
+
+ return true;
+}
+
+static void
+nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
+ struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
+{
+ if (desc->type == PGD) {
+ while (ptes--)
+ pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
+ } else
+ if (desc->type == LPT) {
+ memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
+ }
+}
+
+static bool
+nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ struct nvkm_vmm_pt *pt = it->pt[0];
+ if (it->desc->type == PGD)
+ memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
+ else
+ if (it->desc->type == LPT)
+ memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
+ return nvkm_vmm_unref_ptes(it, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
+ return nvkm_vmm_ref_ptes(it, ptei, ptes);
+}
+
+static bool
+nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+ const int type = desc->type == SPT;
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ const bool zero = !pgt->sparse && !desc->func->invalid;
+ struct nvkm_vmm *vmm = it->vmm;
+ struct nvkm_mmu *mmu = vmm->mmu;
+ struct nvkm_mmu_pt *pt;
+ u32 pten = 1 << desc->bits;
+ u32 pteb, ptei, ptes;
+ u32 size = desc->size * pten;
+
+ pgd->refs[0]++;
+
+ pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
+ if (!pgt->pt[type]) {
+ it->lvl--;
+ nvkm_vmm_unref_pdes(it);
+ return false;
+ }
+
+ if (zero)
+ goto done;
+
+ pt = pgt->pt[type];
+
+ if (desc->type == LPT && pgt->refs[1]) {
+ /* SPT already exists covering the same range as this LPT,
+ * which means we need to be careful that any LPTEs which
+ * overlap valid SPTEs are unmapped as opposed to invalid
+ * or sparse, which would prevent the MMU from looking at
+ * the SPTEs on some GPUs.
+ */
+ for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
+ bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
+ bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
+ if (spte != next)
+ break;
+ }
+
+ if (!spte) {
+ if (pgt->sparse)
+ desc->func->sparse(vmm, pt, pteb, ptes);
+ else
+ desc->func->invalid(vmm, pt, pteb, ptes);
+ memset(&pgt->pte[pteb], 0x00, ptes);
+ } else {
+ desc->func->unmap(vmm, pt, pteb, ptes);
+ while (ptes--)
+ pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
+ }
+ }
+ } else {
+ if (pgt->sparse) {
+ nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
+ desc->func->sparse(vmm, pt, 0, pten);
+ } else {
+ desc->func->invalid(vmm, pt, 0, pten);
+ }
+ }
+
+done:
+ TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
+ it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
+ nvkm_vmm_flush_mark(it);
+ return true;
+}
+
+static bool
+nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+
+ pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
+ if (!pgt) {
+ if (!pgd->refs[0])
+ nvkm_vmm_unref_pdes(it);
+ return false;
+ }
+
+ pgd->pde[pdei] = pgt;
+ return true;
+}
+
+static inline u64
+nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, const char *name, bool ref,
+ bool (*REF_PTES)(struct nvkm_vmm_iter *, u32, u32),
+ nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
+ nvkm_vmm_pxe_func CLR_PTES)
+{
+ const struct nvkm_vmm_desc *desc = page->desc;
+ struct nvkm_vmm_iter it;
+ u64 bits = addr >> page->shift;
+
+ it.page = page;
+ it.desc = desc;
+ it.vmm = vmm;
+ it.cnt = size >> page->shift;
+ it.flush = NVKM_VMM_LEVELS_MAX;
+
+ /* Deconstruct address into PTE indices for each mapping level. */
+ for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
+ it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
+ bits >>= desc[it.lvl].bits;
+ }
+ it.max = --it.lvl;
+ it.pt[it.max] = vmm->pd;
+
+ it.lvl = 0;
+ TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
+ addr, size, page->shift, it.cnt);
+ it.lvl = it.max;
+
+ /* Depth-first traversal of page tables. */
+ while (it.cnt) {
+ struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
+ const int type = desc->type == SPT;
+ const u32 pten = 1 << desc->bits;
+ const u32 ptei = it.pte[0];
+ const u32 ptes = min_t(u64, it.cnt, pten - ptei);
+
+ /* Walk down the tree, finding page tables for each level. */
+ for (; it.lvl; it.lvl--) {
+ const u32 pdei = it.pte[it.lvl];
+ struct nvkm_vmm_pt *pgd = pgt;
+
+ /* Software PT. */
+ if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
+ if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
+ goto fail;
+ }
+ it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
+
+ /* Hardware PT.
+ *
+ * This is a separate step from above due to GF100 and
+ * newer having dual page tables at some levels, which
+ * are refcounted independently.
+ */
+ if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
+ if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
+ goto fail;
+ }
+ }
+
+ /* Handle PTE updates. */
+ if (!REF_PTES || REF_PTES(&it, ptei, ptes)) {
+ struct nvkm_mmu_pt *pt = pgt->pt[type];
+ if (MAP_PTES || CLR_PTES) {
+ if (MAP_PTES)
+ MAP_PTES(vmm, pt, ptei, ptes, map);
+ else
+ CLR_PTES(vmm, pt, ptei, ptes);
+ nvkm_vmm_flush_mark(&it);
+ }
+ }
+
+ /* Walk back up the tree to the next position. */
+ it.pte[it.lvl] += ptes;
+ it.cnt -= ptes;
+ if (it.cnt) {
+ while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
+ it.pte[it.lvl++] = 0;
+ it.pte[it.lvl]++;
+ }
+ }
+ };
+
+ nvkm_vmm_flush(&it);
+ return ~0ULL;
+
+fail:
+ /* Reconstruct the failure address so the caller is able to
+ * reverse any partially completed operations.
+ */
+ addr = it.pte[it.max--];
+ do {
+ addr = addr << desc[it.max].bits;
+ addr |= it.pte[it.max];
+ } while (it.max--);
+
+ return addr << page->shift;
+}
+
+static void
+nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false,
+ nvkm_vmm_sparse_unref_ptes, NULL, NULL,
+ page->desc->func->invalid ?
+ page->desc->func->invalid : page->desc->func->unmap);
+}
+
+static int
+nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
+ true, nvkm_vmm_sparse_ref_ptes, NULL,
+ NULL, page->desc->func->sparse);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
+ return -ENOMEM;
+ }
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ int m = 0, i;
+ u64 start = addr;
+ u64 block;
+
+ while (size) {
+ /* Limit maximum page size based on remaining size. */
+ while (size < (1ULL << page[m].shift))
+ m++;
+ i = m;
+
+ /* Find largest page size suitable for alignment. */
+ while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
+ i++;
+
+ /* Determine number of PTEs at this page size. */
+ if (i != m) {
+ /* Limited to alignment boundary of next page size. */
+ u64 next = 1ULL << page[i - 1].shift;
+ u64 part = ALIGN(addr, next) - addr;
+ if (size - part >= next)
+ block = (part >> page[i].shift) << page[i].shift;
+ else
+ block = (size >> page[i].shift) << page[i].shift;
+ } else {
+ block = (size >> page[i].shift) << page[i].shift;;
+ }
+
+ /* Perform operation. */
+ if (ref) {
+ int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
+ if (ret) {
+ if ((size = addr - start))
+ nvkm_vmm_ptes_sparse(vmm, start, size, false);
+ return ret;
+ }
+ } else {
+ nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
+ }
+
+ size -= block;
+ addr += block;
+ }
+
+ return 0;
+}
+
+static void
+nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
+ false, nvkm_vmm_unref_ptes, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+}
+
+static int
+nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
+ nvkm_vmm_ref_ptes, func, map, NULL);
+ if (fail != ~0ULL) {
+ if ((size = fail - addr))
+ nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void
+nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, bool sparse)
+{
+ const struct nvkm_vmm_desc_func *func = page->desc->func;
+ nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, NULL, NULL, NULL,
+ sparse ? func->sparse : func->invalid ? func->invalid :
+ func->unmap);
+}
+
+static void
+nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size, struct nvkm_vmm_map *map,
+ nvkm_vmm_pte_func func)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "map", false,
+ NULL, func, map, NULL);
+}
+
+static void
+nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ nvkm_vmm_iter(vmm, page, addr, size, "unref", false,
+ nvkm_vmm_unref_ptes, NULL, NULL, NULL);
+}
+
+static int
+nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
+ u64 addr, u64 size)
+{
+ u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true,
+ nvkm_vmm_ref_ptes, NULL, NULL, NULL);
+ if (fail != ~0ULL) {
+ if (fail != addr)
+ nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static inline struct nvkm_vma *
+nvkm_vma_new(u64 addr, u64 size)
+{
+ struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma) {
+ vma->addr = addr;
+ vma->size = size;
+ vma->page = NVKM_VMA_PAGE_NONE;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ }
+ return vma;
+}
+
+struct nvkm_vma *
+nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
+{
+ struct nvkm_vma *new;
+
+ BUG_ON(vma->size == tail);
+
+ if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
+ return NULL;
+ vma->size -= tail;
+
+ new->mapref = vma->mapref;
+ new->sparse = vma->sparse;
+ new->page = vma->page;
+ new->refd = vma->refd;
+ new->used = vma->used;
+ new->part = vma->part;
+ new->user = vma->user;
+ new->busy = vma->busy;
+ list_add(&new->head, &vma->head);
+ return new;
+}
+
+static void
+nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct rb_node **ptr = &vmm->free.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*ptr) {
+ struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+ parent = *ptr;
+ if (vma->size < this->size)
+ ptr = &parent->rb_left;
+ else
+ if (vma->size > this->size)
+ ptr = &parent->rb_right;
+ else
+ if (vma->addr < this->addr)
+ ptr = &parent->rb_left;
+ else
+ if (vma->addr > this->addr)
+ ptr = &parent->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&vma->tree, parent, ptr);
+ rb_insert_color(&vma->tree, &vmm->free);
+}
+
+void
+nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct rb_node **ptr = &vmm->root.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*ptr) {
+ struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
+ parent = *ptr;
+ if (vma->addr < this->addr)
+ ptr = &parent->rb_left;
+ else
+ if (vma->addr > this->addr)
+ ptr = &parent->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&vma->tree, parent, ptr);
+ rb_insert_color(&vma->tree, &vmm->root);
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
+{
+ struct rb_node *node = vmm->root.rb_node;
+ while (node) {
+ struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+ if (addr < vma->addr)
+ node = node->rb_left;
+ else
+ if (addr >= vma->addr + vma->size)
+ node = node->rb_right;
+ else
+ return vma;
+ }
+ return NULL;
+}
+
+static void
+nvkm_vmm_dtor(struct nvkm_vmm *vmm)
+{
+ struct nvkm_vma *vma;
+ struct rb_node *node;
+
+ while ((node = rb_first(&vmm->root))) {
+ struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
+ nvkm_vmm_put(vmm, &vma);
+ }
+
+ if (vmm->bootstrapped) {
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ const u64 limit = vmm->limit - vmm->start;
+
+ while (page[1].shift)
+ page++;
+
+ nvkm_mmu_ptc_dump(vmm->mmu);
+ nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
+ }
+
+ vma = list_first_entry(&vmm->list, typeof(*vma), head);
+ list_del(&vma->head);
+ kfree(vma);
+ WARN_ON(!list_empty(&vmm->list));
+
+ if (vmm->nullp) {
+ dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
+ vmm->nullp, vmm->null);
+ }
+
+ if (vmm->pd) {
+ nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
+ nvkm_vmm_pt_del(&vmm->pd);
+ }
+}
+
+int
+nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 pd_header, u64 addr, u64 size, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm *vmm)
+{
+ static struct lock_class_key _key;
+ const struct nvkm_vmm_page *page = func->page;
+ const struct nvkm_vmm_desc *desc;
+ struct nvkm_vma *vma;
+ int levels, bits = 0;
+
+ vmm->func = func;
+ vmm->mmu = mmu;
+ vmm->name = name;
+ vmm->debug = mmu->subdev.debug;
+ kref_init(&vmm->kref);
+
+ __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
+
+ /* Locate the smallest page size supported by the backend, it will
+ * have the the deepest nesting of page tables.
+ */
+ while (page[1].shift)
+ page++;
+
+ /* Locate the structure that describes the layout of the top-level
+ * page table, and determine the number of valid bits in a virtual
+ * address.
+ */
+ for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
+ bits += desc->bits;
+ bits += page->shift;
+ desc--;
+
+ if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
+ return -EINVAL;
+
+ vmm->start = addr;
+ vmm->limit = size ? (addr + size) : (1ULL << bits);
+ if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
+ return -EINVAL;
+
+ /* Allocate top-level page table. */
+ vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
+ if (!vmm->pd)
+ return -ENOMEM;
+ vmm->pd->refs[0] = 1;
+ INIT_LIST_HEAD(&vmm->join);
+
+ /* ... and the GPU storage for it, except on Tesla-class GPUs that
+ * have the PD embedded in the instance structure.
+ */
+ if (desc->size) {
+ const u32 size = pd_header + desc->size * (1 << desc->bits);
+ vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
+ if (!vmm->pd->pt[0])
+ return -ENOMEM;
+ }
+
+ /* Initialise address-space MM. */
+ INIT_LIST_HEAD(&vmm->list);
+ vmm->free = RB_ROOT;
+ vmm->root = RB_ROOT;
+
+ if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
+ return -ENOMEM;
+
+ nvkm_vmm_free_insert(vmm, vma);
+ list_add(&vma->head, &vmm->list);
+ return 0;
+}
+
+int
+nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 hdr, u64 addr, u64 size, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
+ return -ENOMEM;
+ return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
+}
+
+#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \
+ list_entry((root)->head.dir, struct nvkm_vma, head)
+
+void
+nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct nvkm_vma *next;
+
+ nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+ nvkm_memory_unref(&vma->memory);
+
+ if (vma->part) {
+ struct nvkm_vma *prev = node(vma, prev);
+ if (!prev->memory) {
+ prev->size += vma->size;
+ rb_erase(&vma->tree, &vmm->root);
+ list_del(&vma->head);
+ kfree(vma);
+ vma = prev;
+ }
+ }
+
+ next = node(vma, next);
+ if (next && next->part) {
+ if (!next->memory) {
+ vma->size += next->size;
+ rb_erase(&next->tree, &vmm->root);
+ list_del(&next->head);
+ kfree(next);
+ }
+ }
+}
+
+void
+nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
+
+ if (vma->mapref) {
+ nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse);
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ } else {
+ nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse);
+ }
+
+ nvkm_vmm_unmap_region(vmm, vma);
+}
+
+void
+nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ if (vma->memory) {
+ mutex_lock(&vmm->mutex);
+ nvkm_vmm_unmap_locked(vmm, vma);
+ mutex_unlock(&vmm->mutex);
+ }
+}
+
+static int
+nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ switch (nvkm_memory_target(map->memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
+ VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
+ return -EINVAL;
+ }
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ case NVKM_MEM_TARGET_NCOH:
+ if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
+ VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
+ return -EINVAL;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOSYS;
+ }
+
+ if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
+ !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
+ !IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
+ nvkm_memory_page(map->memory) < map->page->shift) {
+ VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
+ vma->addr, (u64)vma->size, map->offset, map->page->shift,
+ nvkm_memory_page(map->memory));
+ return -EINVAL;
+ }
+
+ return vmm->func->valid(vmm, argv, argc, map);
+}
+
+static int
+nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ for (map->page = vmm->func->page; map->page->shift; map->page++) {
+ VMM_DEBUG(vmm, "trying %d", map->page->shift);
+ if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int
+nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc, struct nvkm_vmm_map *map)
+{
+ nvkm_vmm_pte_func func;
+ int ret;
+
+ /* Make sure we won't overrun the end of the memory object. */
+ if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
+ VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
+ nvkm_memory_size(map->memory),
+ map->offset, (u64)vma->size);
+ return -EINVAL;
+ }
+
+ /* Check remaining arguments for validity. */
+ if (vma->page == NVKM_VMA_PAGE_NONE &&
+ vma->refd == NVKM_VMA_PAGE_NONE) {
+ /* Find the largest page size we can perform the mapping at. */
+ const u32 debug = vmm->debug;
+ vmm->debug = 0;
+ ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+ vmm->debug = debug;
+ if (ret) {
+ VMM_DEBUG(vmm, "invalid at any page size");
+ nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
+ return -EINVAL;
+ }
+ } else {
+ /* Page size of the VMA is already pre-determined. */
+ if (vma->refd != NVKM_VMA_PAGE_NONE)
+ map->page = &vmm->func->page[vma->refd];
+ else
+ map->page = &vmm->func->page[vma->page];
+
+ ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
+ if (ret) {
+ VMM_DEBUG(vmm, "invalid %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Deal with the 'offset' argument, and fetch the backend function. */
+ map->off = map->offset;
+ if (map->mem) {
+ for (; map->off; map->mem = map->mem->next) {
+ u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
+ if (size > map->off)
+ break;
+ map->off -= size;
+ }
+ func = map->page->desc->func->mem;
+ } else
+ if (map->sgl) {
+ for (; map->off; map->sgl = sg_next(map->sgl)) {
+ u64 size = sg_dma_len(map->sgl);
+ if (size > map->off)
+ break;
+ map->off -= size;
+ }
+ func = map->page->desc->func->sgl;
+ } else {
+ map->dma += map->offset >> PAGE_SHIFT;
+ map->off = map->offset & PAGE_MASK;
+ func = map->page->desc->func->dma;
+ }
+
+ /* Perform the map. */
+ if (vma->refd == NVKM_VMA_PAGE_NONE) {
+ ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
+ if (ret)
+ return ret;
+
+ vma->refd = map->page - vmm->func->page;
+ } else {
+ nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
+ }
+
+ nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
+ nvkm_memory_unref(&vma->memory);
+ vma->memory = nvkm_memory_ref(map->memory);
+ vma->tags = map->tags;
+ return 0;
+}
+
+int
+nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ int ret;
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
+ vma->busy = false;
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+static void
+nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ struct nvkm_vma *prev, *next;
+
+ if ((prev = node(vma, prev)) && !prev->used) {
+ rb_erase(&prev->tree, &vmm->free);
+ list_del(&prev->head);
+ vma->addr = prev->addr;
+ vma->size += prev->size;
+ kfree(prev);
+ }
+
+ if ((next = node(vma, next)) && !next->used) {
+ rb_erase(&next->tree, &vmm->free);
+ list_del(&next->head);
+ vma->size += next->size;
+ kfree(next);
+ }
+
+ nvkm_vmm_free_insert(vmm, vma);
+}
+
+void
+nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ struct nvkm_vma *next = vma;
+
+ BUG_ON(vma->part);
+
+ if (vma->mapref || !vma->sparse) {
+ do {
+ const bool map = next->memory != NULL;
+ const u8 refd = next->refd;
+ const u64 addr = next->addr;
+ u64 size = next->size;
+
+ /* Merge regions that are in the same state. */
+ while ((next = node(next, next)) && next->part &&
+ (next->memory != NULL) == map &&
+ (next->refd == refd))
+ size += next->size;
+
+ if (map) {
+ /* Region(s) are mapped, merge the unmap
+ * and dereference into a single walk of
+ * the page tree.
+ */
+ nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
+ size, vma->sparse);
+ } else
+ if (refd != NVKM_VMA_PAGE_NONE) {
+ /* Drop allocation-time PTE references. */
+ nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
+ }
+ } while (next && next->part);
+ }
+
+ /* Merge any mapped regions that were split from the initial
+ * address-space allocation back into the allocated VMA, and
+ * release memory/compression resources.
+ */
+ next = vma;
+ do {
+ if (next->memory)
+ nvkm_vmm_unmap_region(vmm, next);
+ } while ((next = node(vma, next)) && next->part);
+
+ if (vma->sparse && !vma->mapref) {
+ /* Sparse region that was allocated with a fixed page size,
+ * meaning all relevant PTEs were referenced once when the
+ * region was allocated, and remained that way, regardless
+ * of whether memory was mapped into it afterwards.
+ *
+ * The process of unmapping, unsparsing, and dereferencing
+ * PTEs can be done in a single page tree walk.
+ */
+ nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
+ } else
+ if (vma->sparse) {
+ /* Sparse region that wasn't allocated with a fixed page size,
+ * PTE references were taken both at allocation time (to make
+ * the GPU see the region as sparse), and when mapping memory
+ * into the region.
+ *
+ * The latter was handled above, and the remaining references
+ * are dealt with here.
+ */
+ nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
+ }
+
+ /* Remove VMA from the list of allocated nodes. */
+ rb_erase(&vma->tree, &vmm->root);
+
+ /* Merge VMA back into the free list. */
+ vma->page = NVKM_VMA_PAGE_NONE;
+ vma->refd = NVKM_VMA_PAGE_NONE;
+ vma->used = false;
+ vma->user = false;
+ nvkm_vmm_put_region(vmm, vma);
+}
+
+void
+nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
+{
+ struct nvkm_vma *vma = *pvma;
+ if (vma) {
+ mutex_lock(&vmm->mutex);
+ nvkm_vmm_put_locked(vmm, vma);
+ mutex_unlock(&vmm->mutex);
+ *pvma = NULL;
+ }
+}
+
+int
+nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
+ u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
+{
+ const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
+ struct rb_node *node = NULL, *temp;
+ struct nvkm_vma *vma = NULL, *tmp;
+ u64 addr, tail;
+ int ret;
+
+ VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
+ "shift: %d align: %d size: %016llx",
+ getref, mapref, sparse, shift, align, size);
+
+ /* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
+ if (unlikely(!size || (!getref && !mapref && sparse))) {
+ VMM_DEBUG(vmm, "args %016llx %d %d %d",
+ size, getref, mapref, sparse);
+ return -EINVAL;
+ }
+
+ /* Tesla-class GPUs can only select page size per-PDE, which means
+ * we're required to know the mapping granularity up-front to find
+ * a suitable region of address-space.
+ *
+ * The same goes if we're requesting up-front allocation of PTES.
+ */
+ if (unlikely((getref || vmm->func->page_block) && !shift)) {
+ VMM_DEBUG(vmm, "page size required: %d %016llx",
+ getref, vmm->func->page_block);
+ return -EINVAL;
+ }
+
+ /* If a specific page size was requested, determine its index and
+ * make sure the requested size is a multiple of the page size.
+ */
+ if (shift) {
+ for (page = vmm->func->page; page->shift; page++) {
+ if (shift == page->shift)
+ break;
+ }
+
+ if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
+ VMM_DEBUG(vmm, "page %d %016llx", shift, size);
+ return -EINVAL;
+ }
+ align = max_t(u8, align, shift);
+ } else {
+ align = max_t(u8, align, 12);
+ }
+
+ /* Locate smallest block that can possibly satisfy the allocation. */
+ temp = vmm->free.rb_node;
+ while (temp) {
+ struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
+ if (this->size < size) {
+ temp = temp->rb_right;
+ } else {
+ node = temp;
+ temp = temp->rb_left;
+ }
+ }
+
+ if (unlikely(!node))
+ return -ENOSPC;
+
+ /* Take into account alignment restrictions, trying larger blocks
+ * in turn until we find a suitable free block.
+ */
+ do {
+ struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
+ struct nvkm_vma *prev = node(this, prev);
+ struct nvkm_vma *next = node(this, next);
+ const int p = page - vmm->func->page;
+
+ addr = this->addr;
+ if (vmm->func->page_block && prev && prev->page != p)
+ addr = ALIGN(addr, vmm->func->page_block);
+ addr = ALIGN(addr, 1ULL << align);
+
+ tail = this->addr + this->size;
+ if (vmm->func->page_block && next && next->page != p)
+ tail = ALIGN_DOWN(addr, vmm->func->page_block);
+
+ if (addr <= tail && tail - addr >= size) {
+ rb_erase(&this->tree, &vmm->free);
+ vma = this;
+ break;
+ }
+ } while ((node = rb_next(node)));
+
+ if (unlikely(!vma))
+ return -ENOSPC;
+
+ /* If the VMA we found isn't already exactly the requested size,
+ * it needs to be split, and the remaining free blocks returned.
+ */
+ if (addr != vma->addr) {
+ if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
+ nvkm_vmm_put_region(vmm, vma);
+ return -ENOMEM;
+ }
+ nvkm_vmm_free_insert(vmm, vma);
+ vma = tmp;
+ }
+
+ if (size != vma->size) {
+ if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+ nvkm_vmm_put_region(vmm, vma);
+ return -ENOMEM;
+ }
+ nvkm_vmm_free_insert(vmm, tmp);
+ }
+
+ /* Pre-allocate page tables and/or setup sparse mappings. */
+ if (sparse && getref)
+ ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
+ else if (sparse)
+ ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
+ else if (getref)
+ ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
+ else
+ ret = 0;
+ if (ret) {
+ nvkm_vmm_put_region(vmm, vma);
+ return ret;
+ }
+
+ vma->mapref = mapref && !getref;
+ vma->sparse = sparse;
+ vma->page = page - vmm->func->page;
+ vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
+ vma->used = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ *pvma = vma;
+ return 0;
+}
+
+int
+nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
+{
+ int ret;
+ mutex_lock(&vmm->mutex);
+ ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
+ mutex_unlock(&vmm->mutex);
+ return ret;
+}
+
+void
+nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ if (vmm->func->part && inst) {
+ mutex_lock(&vmm->mutex);
+ vmm->func->part(vmm, inst);
+ mutex_unlock(&vmm->mutex);
+ }
+}
+
+int
+nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ int ret = 0;
+ if (vmm->func->join) {
+ mutex_lock(&vmm->mutex);
+ ret = vmm->func->join(vmm, inst);
+ mutex_unlock(&vmm->mutex);
+ }
+ return ret;
+}
+
+static bool
+nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, u32 ptei, u32 ptes)
+{
+ const struct nvkm_vmm_desc *desc = it->desc;
+ const int type = desc->type == SPT;
+ nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
+ return false;
+}
+
+int
+nvkm_vmm_boot(struct nvkm_vmm *vmm)
+{
+ const struct nvkm_vmm_page *page = vmm->func->page;
+ const u64 limit = vmm->limit - vmm->start;
+ int ret;
+
+ while (page[1].shift)
+ page++;
+
+ ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
+ if (ret)
+ return ret;
+
+ nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false,
+ nvkm_vmm_boot_ptes, NULL, NULL, NULL);
+ vmm->bootstrapped = true;
+ return 0;
+}
+
+static void
+nvkm_vmm_del(struct kref *kref)
+{
+ struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
+ nvkm_vmm_dtor(vmm);
+ kfree(vmm);
+}
+
+void
+nvkm_vmm_unref(struct nvkm_vmm **pvmm)
+{
+ struct nvkm_vmm *vmm = *pvmm;
+ if (vmm) {
+ kref_put(&vmm->kref, nvkm_vmm_del);
+ *pvmm = NULL;
+ }
+}
+
+struct nvkm_vmm *
+nvkm_vmm_ref(struct nvkm_vmm *vmm)
+{
+ if (vmm)
+ kref_get(&vmm->kref);
+ return vmm;
+}
+
+int
+nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
+ u32 argc, struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_mmu *mmu = device->mmu;
+ struct nvkm_vmm *vmm = NULL;
+ int ret;
+ ret = mmu->func->vmm.ctor(mmu, addr, size, argv, argc, key, name, &vmm);
+ if (ret)
+ nvkm_vmm_unref(&vmm);
+ *pvmm = vmm;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
new file mode 100644
index 000000000000..6d8f61ea467a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -0,0 +1,310 @@
+#ifndef __NVKM_VMM_H__
+#define __NVKM_VMM_H__
+#include "priv.h"
+#include <core/memory.h>
+enum nvkm_memory_target;
+
+struct nvkm_vmm_pt {
+ /* Some GPUs have a mapping level with a dual page tables to
+ * support large and small pages in the same address-range.
+ *
+ * We track the state of both page tables in one place, which
+ * is why there's multiple PT pointers/refcounts here.
+ */
+ struct nvkm_mmu_pt *pt[2];
+ u32 refs[2];
+
+ /* Page size handled by this PT.
+ *
+ * Tesla backend needs to know this when writinge PDEs,
+ * otherwise unnecessary.
+ */
+ u8 page;
+
+ /* Entire page table sparse.
+ *
+ * Used to propagate sparseness to child page tables.
+ */
+ bool sparse:1;
+
+ /* Tracking for page directories.
+ *
+ * The array is indexed by PDE, and will either point to the
+ * child page table, or indicate the PDE is marked as sparse.
+ **/
+#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
+#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
+#define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY)
+ struct nvkm_vmm_pt **pde;
+
+ /* Tracking for dual page tables.
+ *
+ * There's one entry for each LPTE, keeping track of whether
+ * there are valid SPTEs in the same address-range.
+ *
+ * This information is used to manage LPTE state transitions.
+ */
+#define NVKM_VMM_PTE_SPARSE 0x80
+#define NVKM_VMM_PTE_VALID 0x40
+#define NVKM_VMM_PTE_SPTES 0x3f
+ u8 pte[];
+};
+
+typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *,
+ struct nvkm_mmu_pt *, u32 ptei, u32 ptes);
+typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *,
+ struct nvkm_vmm_pt *, u32 pdei);
+typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *);
+
+struct nvkm_vmm_desc_func {
+ nvkm_vmm_pxe_func invalid;
+ nvkm_vmm_pxe_func unmap;
+ nvkm_vmm_pxe_func sparse;
+
+ nvkm_vmm_pde_func pde;
+
+ nvkm_vmm_pte_func mem;
+ nvkm_vmm_pte_func dma;
+ nvkm_vmm_pte_func sgl;
+};
+
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgd;
+void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32);
+extern const struct nvkm_vmm_desc_func gf100_vmm_pgt;
+void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32,
+ struct nvkm_vmm_map *);
+
+void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32);
+
+struct nvkm_vmm_desc {
+ enum {
+ PGD,
+ PGT,
+ SPT,
+ LPT,
+ } type;
+ u8 bits; /* VMA bits covered by PT. */
+ u8 size; /* Bytes-per-PTE. */
+ u32 align; /* PT address alignment. */
+ const struct nvkm_vmm_desc_func *func;
+};
+
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[];
+extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[];
+
+extern const struct nvkm_vmm_desc gp100_vmm_desc_12[];
+extern const struct nvkm_vmm_desc gp100_vmm_desc_16[];
+
+struct nvkm_vmm_page {
+ u8 shift;
+ const struct nvkm_vmm_desc *desc;
+#define NVKM_VMM_PAGE_SPARSE 0x01
+#define NVKM_VMM_PAGE_VRAM 0x02
+#define NVKM_VMM_PAGE_HOST 0x04
+#define NVKM_VMM_PAGE_COMP 0x08
+#define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE)
+#define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
+#define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
+#define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
+#define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
+ u8 type;
+};
+
+struct nvkm_vmm_func {
+ int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
+ void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
+
+ int (*aper)(enum nvkm_memory_target);
+ int (*valid)(struct nvkm_vmm *, void *argv, u32 argc,
+ struct nvkm_vmm_map *);
+ void (*flush)(struct nvkm_vmm *, int depth);
+
+ u64 page_block;
+ const struct nvkm_vmm_page page[];
+};
+
+struct nvkm_vmm_join {
+ struct nvkm_memory *inst;
+ struct list_head head;
+};
+
+int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
+ u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
+ const char *name, struct nvkm_vmm **);
+int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
+ u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
+ const char *name, struct nvkm_vmm *);
+struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
+ bool sparse, u8 page, u8 align, u64 size,
+ struct nvkm_vma **pvma);
+void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
+void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
+void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
+
+struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
+void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
+
+int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
+ u64, u64, void *, u32, struct lock_class_key *,
+ const char *, struct nvkm_vmm **);
+int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+
+int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *);
+int gf100_vmm_aper(enum nvkm_memory_target);
+int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gf100_vmm_flush_(struct nvkm_vmm *, int);
+void gf100_vmm_flush(struct nvkm_vmm *, int);
+
+int gk20a_vmm_aper(enum nvkm_memory_target);
+
+int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
+int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
+int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
+void gp100_vmm_flush(struct nvkm_vmm *, int);
+
+int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gk20a_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
+int gm200_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm200_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm20b_vmm_new_fixed(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gm20b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gp100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
+
+#define VMM_PRINT(l,v,p,f,a...) do { \
+ struct nvkm_vmm *_vmm = (v); \
+ if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \
+ nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n", \
+ _vmm->name, ##a); \
+ } \
+} while(0)
+#define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a)
+#define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a)
+#define VMM_SPAM(v,f,a...) VMM_PRINT(NV_DBG_SPAM , (v), dbg, f, ##a)
+
+#define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do { \
+ nvkm_kmap((PT)->memory); \
+ while (PTEN) { \
+ u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift; \
+ u64 _addr = ((BASE) + MAP->off); \
+ \
+ if (_ptes > PTEN) { \
+ MAP->off += PTEN << MAP->page->shift; \
+ _ptes = PTEN; \
+ } else { \
+ MAP->off = 0; \
+ NEXT; \
+ } \
+ \
+ VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes); \
+ \
+ FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
+ PTEI += _ptes; \
+ PTEN -= _ptes; \
+ }; \
+ nvkm_done((PT)->memory); \
+} while(0)
+
+#define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT), \
+ ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT), \
+ (MAP->mem = MAP->mem->next))
+#define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ *MAP->dma, PAGE_SIZE, MAP->dma++)
+#define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL) \
+ VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \
+ sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl), \
+ (MAP->sgl = sg_next(MAP->sgl)))
+
+#define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c))
+#define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d))
+#define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \
+ const u32 _pteo = (o); u##b _data = (d); \
+ VMM_SPAM((v), " %010llx "f, (m)->addr + _pteo, _data, ##a); \
+ VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \
+} while(0)
+
+#define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 32, WO, "%08x")
+#define VMM_FO032(m,v,o,d,c) \
+ VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c))
+
+#define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 64, WO, "%016llx")
+#define VMM_FO064(m,v,o,d,c) \
+ VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c))
+
+#define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \
+ u32 _pteo = (o), _ptes = (c); \
+ const u64 _addr = (m)->addr + _pteo; \
+ VMM_SPAM((v), " %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a); \
+ while (_ptes--) { \
+ nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo)); \
+ nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi)); \
+ _pteo += 0x10; \
+ } \
+} while(0)
+
+#define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "")
+#define VMM_FO128(m,v,o,lo,hi,c) do { \
+ nvkm_kmap((m)->memory); \
+ VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \
+ nvkm_done((m)->memory); \
+} while(0)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
new file mode 100644
index 000000000000..faf5a7e9265e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+#include <subdev/timer.h>
+
+#include <nvif/if900d.h>
+#include <nvif/unpack.h>
+
+static inline void
+gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 base = (addr >> 8) | map->type;
+ u64 data = base;
+
+ if (map->ctag && !(map->next & (1ULL << 44))) {
+ while (ptes--) {
+ data = base | ((map->ctag >> 1) << 44);
+ if (!(map->ctag++ & 1))
+ data |= BIT_ULL(60);
+
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ base += map->next;
+ }
+ } else {
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ data += map->next;
+ }
+ }
+}
+
+void
+gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = (*map->dma++ >> 8) | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte);
+}
+
+void
+gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .mem = gf100_vmm_pgt_mem,
+ .dma = gf100_vmm_pgt_dma,
+ .sgl = gf100_vmm_pgt_sgl,
+};
+
+void
+gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ struct nvkm_mmu_pt *pt;
+ u64 data = 0;
+
+ if ((pt = pgt->pt[0])) {
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 0; break;
+ case NVKM_MEM_TARGET_HOST: data |= 2ULL << 0;
+ data |= BIT_ULL(35); /* VOL */
+ break;
+ case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 0; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ data |= pt->addr >> 8;
+ }
+
+ if ((pt = pgt->pt[1])) {
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 1ULL << 32; break;
+ case NVKM_MEM_TARGET_HOST: data |= 2ULL << 32;
+ data |= BIT_ULL(34); /* VOL */
+ break;
+ case NVKM_MEM_TARGET_NCOH: data |= 3ULL << 32; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ data |= pt->addr << 24;
+ }
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * 8, data);
+ nvkm_done(pd->memory);
+}
+
+const struct nvkm_vmm_desc_func
+gf100_vmm_pgd = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .pde = gf100_vmm_pgd_pde,
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+gf100_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+void
+gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 type = depth << 24;
+
+ type = 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+
+ mutex_lock(&subdev->mutex);
+ /* Looks like maybe a "free flush slots" counter, the
+ * faster you write to 0x100cbc to more it decreases.
+ */
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00ff0000)
+ break;
+ );
+
+ nvkm_wr32(device, 0x100cb8, vmm->pd->pt[0]->addr >> 8);
+ nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
+
+ /* Wait for flush to be queued? */
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100c80) & 0x00008000)
+ break;
+ );
+ mutex_unlock(&subdev->mutex);
+}
+
+void
+gf100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ gf100_vmm_flush_(vmm, 0);
+}
+
+int
+gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const struct nvkm_vmm_page *page = map->page;
+ const bool gm20x = page->desc->func->sparse != NULL;
+ union {
+ struct gf100_vmm_map_vn vn;
+ struct gf100_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_memory *memory = map->memory;
+ u8 kind, priv, ro, vol;
+ int kindn, aper, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->next = (1 << page->shift) >> 8;
+ map->type = map->ctag = 0;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ vol = !!args->v0.vol;
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ vol = target == NVKM_MEM_TARGET_HOST;
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+ if (kind >= kindn || kindm[kind] == 0xff) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (kindm[kind] != kind) {
+ u32 comp = (page->shift == 16 && !gm20x) ? 16 : 17;
+ u32 tags = ALIGN(nvkm_memory_size(memory), 1 << 17) >> comp;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+
+ if (map->tags->mn) {
+ u64 tags = map->tags->mn->offset + (map->offset >> 17);
+ if (page->shift == 17 || !gm20x) {
+ map->type |= tags << 44;
+ map->ctag |= 1ULL << 44;
+ map->next |= 1ULL << 44;
+ } else {
+ map->ctag |= tags << 1 | 1;
+ }
+ } else {
+ kind = kindm[kind];
+ }
+ }
+
+ map->type |= BIT(0);
+ map->type |= (u64)priv << 1;
+ map->type |= (u64) ro << 2;
+ map->type |= (u64) vol << 32;
+ map->type |= (u64)aper << 33;
+ map->type |= (u64)kind << 36;
+ return 0;
+}
+
+int
+gf100_vmm_aper(enum nvkm_memory_target target)
+{
+ switch (target) {
+ case NVKM_MEM_TARGET_VRAM: return 0;
+ case NVKM_MEM_TARGET_HOST: return 2;
+ case NVKM_MEM_TARGET_NCOH: return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+void
+gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ nvkm_fo64(inst, 0x0200, 0x00000000, 2);
+}
+
+int
+gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+ struct nvkm_mmu_pt *pd = vmm->pd->pt[0];
+
+ switch (nvkm_memory_target(pd->memory)) {
+ case NVKM_MEM_TARGET_VRAM: base |= 0ULL << 0; break;
+ case NVKM_MEM_TARGET_HOST: base |= 2ULL << 0;
+ base |= BIT_ULL(2) /* VOL. */;
+ break;
+ case NVKM_MEM_TARGET_NCOH: base |= 3ULL << 0; break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ base |= pd->addr;
+
+ nvkm_kmap(inst);
+ nvkm_wo64(inst, 0x0200, base);
+ nvkm_wo64(inst, 0x0208, vmm->limit - 1);
+ nvkm_done(inst);
+ return 0;
+}
+
+int
+gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ return gf100_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gf100_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gf100_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+gf100_vmm_new_(const struct nvkm_vmm_func *func_16,
+ const struct nvkm_vmm_func *func_17,
+ struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ switch (mmu->subdev.device->fb->page) {
+ case 16: return nv04_vmm_new_(func_16, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+ case 17: return nv04_vmm_new_(func_17, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+int
+gf100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
new file mode 100644
index 000000000000..0ebb7bccfcd2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk104.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+void
+gk104_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(1) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gk104_vmm_lpt = {
+ .invalid = gk104_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .mem = gf100_vmm_pgt_mem,
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+ { PGD, 13, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gf100_vmm_pgt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gk104_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gk104_vmm_lpt },
+ { PGD, 14, 8, 0x1000, &gf100_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gk104_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+gk104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gk104_vmm_16, &gk104_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
new file mode 100644
index 000000000000..8086994a0446
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgk20a.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <core/memory.h>
+
+int
+gk20a_vmm_aper(enum nvkm_memory_target target)
+{
+ switch (target) {
+ case NVKM_MEM_TARGET_NCOH: return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct nvkm_vmm_func
+gk20a_vmm_17 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 17, &gk104_vmm_desc_17_17[0], NVKM_VMM_PAGE_xxHC },
+ { 12, &gk104_vmm_desc_17_12[0], NVKM_VMM_PAGE_xxHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gk20a_vmm_16 = {
+ .join = gf100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 16, &gk104_vmm_desc_16_16[0], NVKM_VMM_PAGE_xxHC },
+ { 12, &gk104_vmm_desc_16_12[0], NVKM_VMM_PAGE_xxHx },
+ {}
+ }
+};
+
+int
+gk20a_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gk20a_vmm_16, &gk20a_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
new file mode 100644
index 000000000000..a1676a4644fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm200.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/ifb00d.h>
+#include <nvif/unpack.h>
+
+static void
+gm200_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(32) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgt_sparse,
+ .mem = gf100_vmm_pgt_mem,
+ .dma = gf100_vmm_pgt_dma,
+ .sgl = gf100_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_lpt = {
+ .invalid = gk104_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgt_sparse,
+ .mem = gf100_vmm_pgt_mem,
+};
+
+static void
+gm200_vmm_pgd_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+ VMM_FO064(pt, vmm, pdei * 8, BIT_ULL(35) /* VOL_BIG. */, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gm200_vmm_pgd = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gm200_vmm_pgd_sparse,
+ .pde = gf100_vmm_pgd_pde,
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_12[] = {
+ { SPT, 15, 8, 0x1000, &gm200_vmm_spt },
+ { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_17_17[] = {
+ { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+ { PGD, 13, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_12[] = {
+ { SPT, 14, 8, 0x1000, &gm200_vmm_spt },
+ { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gm200_vmm_desc_16_16[] = {
+ { LPT, 10, 8, 0x1000, &gm200_vmm_lpt },
+ { PGD, 14, 8, 0x1000, &gm200_vmm_pgd },
+ {}
+};
+
+int
+gm200_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base)
+{
+ if (vmm->func->page[1].shift == 16)
+ base |= BIT_ULL(11);
+ return gf100_vmm_join_(vmm, inst, base);
+}
+
+int
+gm200_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ return gm200_vmm_join_(vmm, inst, 0);
+}
+
+static const struct nvkm_vmm_func
+gm200_vmm_17 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+ { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gm200_vmm_16 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+ { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gm200_vmm_new_(const struct nvkm_vmm_func *func_16,
+ const struct nvkm_vmm_func *func_17,
+ struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ const struct nvkm_vmm_func *func;
+ union {
+ struct gm200_vmm_vn vn;
+ struct gm200_vmm_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ switch (args->v0.bigpage) {
+ case 16: func = func_16; break;
+ case 17: func = func_17; break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ func = func_17;
+ } else
+ return ret;
+
+ return nvkm_vmm_new_(func, mmu, 0, addr, size, key, name, pvmm);
+}
+
+int
+gm200_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gm200_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
+
+int
+gm200_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gm200_vmm_16, &gm200_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
new file mode 100644
index 000000000000..64d4b6cff8dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgm20b.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gm20b_vmm_17 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_17_17[1], NVKM_VMM_PAGE_Sxxx },
+ { 17, &gm200_vmm_desc_17_17[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gm200_vmm_desc_17_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+static const struct nvkm_vmm_func
+gm20b_vmm_16 = {
+ .join = gm200_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gf100_vmm_valid,
+ .flush = gf100_vmm_flush,
+ .page = {
+ { 27, &gm200_vmm_desc_16_16[1], NVKM_VMM_PAGE_Sxxx },
+ { 16, &gm200_vmm_desc_16_16[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gm200_vmm_desc_16_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+int
+gm20b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return gm200_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
+
+int
+gm20b_vmm_new_fixed(struct nvkm_mmu *mmu, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return gf100_vmm_new_(&gm20b_vmm_16, &gm20b_vmm_17, mmu, addr,
+ size, argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
new file mode 100644
index 000000000000..059fafe0e771
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/ltc.h>
+
+#include <nvif/ifc00d.h>
+#include <nvif/unpack.h>
+
+static inline void
+gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = (addr >> 4) | map->type;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ data += map->next;
+ }
+}
+
+static void
+gp100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = (*map->dma++ >> 4) | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte);
+}
+
+static void
+gp100_vmm_pgt_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + VOL tells the MMU to treat the PTE as sparse. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(3) /* VOL. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_spt = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .mem = gp100_vmm_pgt_mem,
+ .dma = gp100_vmm_pgt_dma,
+ .sgl = gp100_vmm_pgt_sgl,
+};
+
+static void
+gp100_vmm_lpt_invalid(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ /* VALID_FALSE + PRIV tells the MMU to ignore corresponding SPTEs. */
+ VMM_FO064(pt, vmm, ptei * 8, BIT_ULL(5) /* PRIV. */, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_lpt = {
+ .invalid = gp100_vmm_lpt_invalid,
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .mem = gp100_vmm_pgt_mem,
+};
+
+static inline void
+gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 data = (addr >> 4) | map->type;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes--) {
+ VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
+ data += map->next;
+ }
+}
+
+static void
+gp100_vmm_pd0_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte);
+}
+
+static inline bool
+gp100_vmm_pde(struct nvkm_mmu_pt *pt, u64 *data)
+{
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: *data |= 1ULL << 1; break;
+ case NVKM_MEM_TARGET_HOST: *data |= 2ULL << 1;
+ *data |= BIT_ULL(3); /* VOL. */
+ break;
+ case NVKM_MEM_TARGET_NCOH: *data |= 3ULL << 1; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+ *data |= pt->addr >> 4;
+ return true;
+}
+
+static void
+gp100_vmm_pd0_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data[2] = {};
+
+ if (pgt->pt[0] && !gp100_vmm_pde(pgt->pt[0], &data[0]))
+ return;
+ if (pgt->pt[1] && !gp100_vmm_pde(pgt->pt[1], &data[1]))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO128(pd, vmm, pdei * 0x10, data[0], data[1]);
+ nvkm_done(pd->memory);
+}
+
+static void
+gp100_vmm_pd0_sparse(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ /* VALID_FALSE + VOL_BIG tells the MMU to treat the PDE as sparse. */
+ VMM_FO128(pt, vmm, pdei * 0x10, BIT_ULL(3) /* VOL_BIG. */, 0ULL, pdes);
+}
+
+static void
+gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 pdei, u32 pdes)
+{
+ VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd0 = {
+ .unmap = gp100_vmm_pd0_unmap,
+ .sparse = gp100_vmm_pd0_sparse,
+ .pde = gp100_vmm_pd0_pde,
+ .mem = gp100_vmm_pd0_mem,
+};
+
+static void
+gp100_vmm_pd1_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
+ struct nvkm_mmu_pt *pd = pgd->pt[0];
+ u64 data = 0;
+
+ if (!gp100_vmm_pde(pgt->pt[0], &data))
+ return;
+
+ nvkm_kmap(pd->memory);
+ VMM_WO064(pd, vmm, pdei * 8, data);
+ nvkm_done(pd->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+gp100_vmm_desc_pd1 = {
+ .unmap = gf100_vmm_pgt_unmap,
+ .sparse = gp100_vmm_pgt_sparse,
+ .pde = gp100_vmm_pd1_pde,
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_16[] = {
+ { LPT, 5, 8, 0x0100, &gp100_vmm_desc_lpt },
+ { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ {}
+};
+
+const struct nvkm_vmm_desc
+gp100_vmm_desc_12[] = {
+ { SPT, 9, 8, 0x1000, &gp100_vmm_desc_spt },
+ { PGD, 8, 16, 0x1000, &gp100_vmm_desc_pd0 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 9, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ { PGD, 2, 8, 0x1000, &gp100_vmm_desc_pd1 },
+ {}
+};
+
+int
+gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const enum nvkm_memory_target target = nvkm_memory_target(map->memory);
+ const struct nvkm_vmm_page *page = map->page;
+ union {
+ struct gp100_vmm_map_vn vn;
+ struct gp100_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_memory *memory = map->memory;
+ u8 kind, priv, ro, vol;
+ int kindn, aper, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->next = (1ULL << page->shift) >> 4;
+ map->type = 0;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ vol = !!args->v0.vol;
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ vol = target == NVKM_MEM_TARGET_HOST;
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ aper = vmm->func->aper(target);
+ if (WARN_ON(aper < 0))
+ return aper;
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+ if (kind >= kindn || kindm[kind] == 0xff) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (kindm[kind] != kind) {
+ u64 tags = nvkm_memory_size(memory) >> 16;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_tags_get(memory, device, tags,
+ nvkm_ltc_tags_clear,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+
+ if (map->tags->mn) {
+ tags = map->tags->mn->offset + (map->offset >> 16);
+ map->ctag |= ((1ULL << page->shift) >> 16) << 36;
+ map->type |= tags << 36;
+ map->next |= map->ctag;
+ } else {
+ kind = kindm[kind];
+ }
+ }
+
+ map->type |= BIT(0);
+ map->type |= (u64)aper << 1;
+ map->type |= (u64) vol << 3;
+ map->type |= (u64)priv << 5;
+ map->type |= (u64) ro << 6;
+ map->type |= (u64)kind << 56;
+ return 0;
+}
+
+void
+gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ gf100_vmm_flush_(vmm, 5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth);
+}
+
+int
+gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
+ return gf100_vmm_join_(vmm, inst, base);
+}
+
+static const struct nvkm_vmm_func
+gp100_vmm = {
+ .join = gp100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = gp100_vmm_flush,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+gp100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&gp100_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
new file mode 100644
index 000000000000..3dcc6bddb32f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+static const struct nvkm_vmm_func
+gp10b_vmm = {
+ .join = gp100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gk20a_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = gp100_vmm_flush,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SxHx },
+ {}
+ }
+};
+
+int
+gp10b_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&gp10b_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
new file mode 100644
index 000000000000..0cab1ffc9f64
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvif/if000d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u32 data = addr | 0x00000003; /* PRESENT, RW. */
+ while (ptes--) {
+ VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
+ data += 0x00001000;
+ }
+}
+
+static void
+nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+}
+
+static void
+nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ while (ptes--)
+ VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv04_vmm_desc_pgt = {
+ .unmap = nv04_vmm_pgt_unmap,
+ .dma = nv04_vmm_pgt_dma,
+ .sgl = nv04_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv04_vmm_desc_12[] = {
+ { PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
+ {}
+};
+
+int
+nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ union {
+ struct nv04_vmm_map_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ VMM_DEBUG(vmm, "args");
+ return ret;
+}
+
+static const struct nvkm_vmm_func
+nv04_vmm = {
+ .valid = nv04_vmm_valid,
+ .page = {
+ { 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
+ u32 pd_header, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ union {
+ struct nv04_vmm_vn vn;
+ } *args = argv;
+ int ret;
+
+ ret = nvkm_vmm_new_(func, mmu, pd_header, addr, size, key, name, pvmm);
+ if (ret)
+ return ret;
+
+ return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
+}
+
+int
+nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_memory *mem;
+ struct nvkm_vmm *vmm;
+ int ret;
+
+ ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,
+ argv, argc, key, name, &vmm);
+ *pvmm = vmm;
+ if (ret)
+ return ret;
+
+ mem = vmm->pd->pt[0]->memory;
+ nvkm_kmap(mem);
+ nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
+ nvkm_wo32(mem, 0x00004, vmm->limit - 1);
+ nvkm_done(mem);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
new file mode 100644
index 000000000000..b595f130e573
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv41_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u32 data = (addr >> 7) | 0x00000001; /* VALID. */
+ while (ptes--) {
+ VMM_WO032(pt, vmm, ptei++ * 4, data);
+ data += 0x00000020;
+ }
+}
+
+static void
+nv41_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+}
+
+static void
+nv41_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u32 data = (*map->dma++ >> 7) | 0x00000001;
+ VMM_WO032(pt, vmm, ptei++ * 4, data);
+ }
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv41_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO032(pt, vmm, ptei * 4, 0, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv41_vmm_desc_pgt = {
+ .unmap = nv41_vmm_pgt_unmap,
+ .dma = nv41_vmm_pgt_dma,
+ .sgl = nv41_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv41_vmm_desc_12[] = {
+ { PGT, 17, 4, 0x1000, &nv41_vmm_desc_pgt },
+ {}
+};
+
+static void
+nv41_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+
+ mutex_lock(&subdev->mutex);
+ nvkm_wr32(device, 0x100810, 0x00000022);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100810) & 0x00000020)
+ break;
+ );
+ nvkm_wr32(device, 0x100810, 0x00000000);
+ mutex_unlock(&subdev->mutex);
+}
+
+static const struct nvkm_vmm_func
+nv41_vmm = {
+ .valid = nv04_vmm_valid,
+ .flush = nv41_vmm_flush,
+ .page = {
+ { 12, &nv41_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv41_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&nv41_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
new file mode 100644
index 000000000000..b834e4352334
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+nv44_vmm_pgt_fill(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ dma_addr_t *list, u32 ptei, u32 ptes)
+{
+ u32 pteo = (ptei << 2) & ~0x0000000f;
+ u32 tmp[4];
+
+ tmp[0] = nvkm_ro32(pt->memory, pteo + 0x0);
+ tmp[1] = nvkm_ro32(pt->memory, pteo + 0x4);
+ tmp[2] = nvkm_ro32(pt->memory, pteo + 0x8);
+ tmp[3] = nvkm_ro32(pt->memory, pteo + 0xc);
+
+ while (ptes--) {
+ u32 addr = (list ? *list++ : vmm->null) >> 12;
+ switch (ptei++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
+ }
+ }
+
+ VMM_WO032(pt, vmm, pteo + 0x0, tmp[0]);
+ VMM_WO032(pt, vmm, pteo + 0x4, tmp[1]);
+ VMM_WO032(pt, vmm, pteo + 0x8, tmp[2]);
+ VMM_WO032(pt, vmm, pteo + 0xc, tmp[3] | 0x40000000);
+}
+
+static void
+nv44_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ dma_addr_t tmp[4], i;
+
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ for (i = 0; i < pten; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes >= 4) {
+ for (i = 0; i < 4; i++, addr += 0x1000)
+ tmp[i] = addr >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ for (i = 0; i < ptes; i++, addr += 0x1000)
+ tmp[i] = addr;
+ nv44_vmm_pgt_fill(vmm, pt, tmp, ptei, ptes);
+ }
+}
+
+static void
+nv44_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+}
+
+static void
+nv44_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ map->dma += pten;
+ }
+
+ while (ptes >= 4) {
+ u32 tmp[4], i;
+ for (i = 0; i < 4; i++)
+ tmp[i] = *map->dma++ >> 12;
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[0] >> 0 | tmp[1] << 27);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[1] >> 5 | tmp[2] << 22);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[2] >> 10 | tmp[3] << 17);
+ VMM_WO032(pt, vmm, ptei++ * 4, tmp[3] >> 15 | 0x40000000);
+ ptes -= 4;
+ }
+
+ if (ptes) {
+ nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes);
+ map->dma += ptes;
+ }
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv44_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ nvkm_kmap(pt->memory);
+ if (ptei & 3) {
+ const u32 pten = min(ptes, 4 - (ptei & 3));
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, pten);
+ ptei += pten;
+ ptes -= pten;
+ }
+
+ while (ptes > 4) {
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ VMM_WO032(pt, vmm, ptei++ * 4, 0x00000000);
+ ptes -= 4;
+ }
+
+ if (ptes)
+ nv44_vmm_pgt_fill(vmm, pt, NULL, ptei, ptes);
+ nvkm_done(pt->memory);
+}
+
+static const struct nvkm_vmm_desc_func
+nv44_vmm_desc_pgt = {
+ .unmap = nv44_vmm_pgt_unmap,
+ .dma = nv44_vmm_pgt_dma,
+ .sgl = nv44_vmm_pgt_sgl,
+};
+
+static const struct nvkm_vmm_desc
+nv44_vmm_desc_12[] = {
+ { PGT, 17, 4, 0x80000, &nv44_vmm_desc_pgt },
+ {}
+};
+
+static void
+nv44_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ nvkm_wr32(device, 0x100814, vmm->limit - 4096);
+ nvkm_wr32(device, 0x100808, 0x000000020);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x100808) & 0x00000001)
+ break;
+ );
+ nvkm_wr32(device, 0x100808, 0x00000000);
+}
+
+static const struct nvkm_vmm_func
+nv44_vmm = {
+ .valid = nv04_vmm_valid,
+ .flush = nv44_vmm_flush,
+ .page = {
+ { 12, &nv44_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
+ {}
+ }
+};
+
+int
+nv44_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ struct nvkm_subdev *subdev = &mmu->subdev;
+ struct nvkm_vmm *vmm;
+ int ret;
+
+ ret = nv04_vmm_new_(&nv44_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, &vmm);
+ *pvmm = vmm;
+ if (ret)
+ return ret;
+
+ vmm->nullp = dma_alloc_coherent(subdev->device->dev, 16 * 1024,
+ &vmm->null, GFP_KERNEL);
+ if (!vmm->nullp) {
+ nvkm_warn(subdev, "unable to allocate dummy pages\n");
+ vmm->null = 0;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
new file mode 100644
index 000000000000..863a2edd9861
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <engine/gr.h>
+
+#include <nvif/if500d.h>
+#include <nvif/unpack.h>
+
+static inline void
+nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u64 next = addr | map->type, data;
+ u32 pten;
+ int log2blk;
+
+ map->type += ptes * map->ctag;
+
+ while (ptes) {
+ for (log2blk = 7; log2blk >= 0; log2blk--) {
+ pten = 1 << log2blk;
+ if (ptes >= pten && IS_ALIGNED(ptei, pten))
+ break;
+ }
+
+ data = next | (log2blk << 7);
+ next += pten * map->next;
+ ptes -= pten;
+
+ while (pten--)
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ }
+}
+
+static void
+nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ if (map->page->shift == PAGE_SHIFT) {
+ VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
+ nvkm_kmap(pt->memory);
+ while (ptes--) {
+ const u64 data = *map->dma++ | map->type;
+ VMM_WO064(pt, vmm, ptei++ * 8, data);
+ map->type += map->ctag;
+ }
+ nvkm_done(pt->memory);
+ return;
+ }
+
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
+}
+
+static void
+nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgt = {
+ .unmap = nv50_vmm_pgt_unmap,
+ .mem = nv50_vmm_pgt_mem,
+ .dma = nv50_vmm_pgt_dma,
+ .sgl = nv50_vmm_pgt_sgl,
+};
+
+static bool
+nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
+{
+ struct nvkm_mmu_pt *pt;
+ u64 data = 0xdeadcafe00000000ULL;
+ if (pgt && (pt = pgt->pt[0])) {
+ switch (pgt->page) {
+ case 16: data = 0x00000001; break;
+ case 12: data = 0x00000003;
+ switch (nvkm_memory_size(pt->memory)) {
+ case 0x100000: data |= 0x00000000; break;
+ case 0x040000: data |= 0x00000020; break;
+ case 0x020000: data |= 0x00000040; break;
+ case 0x010000: data |= 0x00000060; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ switch (nvkm_memory_target(pt->memory)) {
+ case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
+ case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
+ case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ data |= pt->addr;
+ }
+ *pdata = data;
+ return true;
+}
+
+static void
+nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
+{
+ struct nvkm_vmm_join *join;
+ u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
+ u64 data;
+
+ if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
+ return;
+
+ list_for_each_entry(join, &vmm->join, head) {
+ nvkm_kmap(join->inst);
+ nvkm_wo64(join->inst, pdeo, data);
+ nvkm_done(join->inst);
+ }
+}
+
+static const struct nvkm_vmm_desc_func
+nv50_vmm_pgd = {
+ .pde = nv50_vmm_pgd_pde,
+};
+
+static const struct nvkm_vmm_desc
+nv50_vmm_desc_12[] = {
+ { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
+ { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+ {}
+};
+
+static const struct nvkm_vmm_desc
+nv50_vmm_desc_16[] = {
+ { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
+ { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
+ {}
+};
+
+static void
+nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ int i, id;
+
+ mutex_lock(&subdev->mutex);
+ for (i = 0; i < NVKM_SUBDEV_NR; i++) {
+ if (!atomic_read(&vmm->engref[i]))
+ continue;
+
+ /* unfortunate hw bug workaround... */
+ if (i == NVKM_ENGINE_GR && device->gr) {
+ int ret = nvkm_gr_tlb_flush(device->gr);
+ if (ret != -ENODEV)
+ continue;
+ }
+
+ switch (i) {
+ case NVKM_ENGINE_GR : id = 0x00; break;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: id = 0x01; break;
+ case NVKM_SUBDEV_BAR : id = 0x06; break;
+ case NVKM_ENGINE_MSPPP :
+ case NVKM_ENGINE_MPEG : id = 0x08; break;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : id = 0x09; break;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : id = 0x0a; break;
+ case NVKM_ENGINE_CE0 : id = 0x0d; break;
+ default:
+ continue;
+ }
+
+ nvkm_wr32(device, 0x100c80, (id << 16) | 1);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
+ break;
+ ) < 0)
+ nvkm_error(subdev, "%s mmu invalidate timeout\n",
+ nvkm_subdev_name[i]);
+ }
+ mutex_unlock(&subdev->mutex);
+}
+
+static int
+nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ const struct nvkm_vmm_page *page = map->page;
+ union {
+ struct nv50_vmm_map_vn vn;
+ struct nv50_vmm_map_v0 v0;
+ } *args = argv;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+ struct nvkm_ram *ram = device->fb->ram;
+ struct nvkm_memory *memory = map->memory;
+ u8 aper, kind, comp, priv, ro;
+ int kindn, ret = -ENOSYS;
+ const u8 *kindm;
+
+ map->type = map->ctag = 0;
+ map->next = 1 << page->shift;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ ro = !!args->v0.ro;
+ priv = !!args->v0.priv;
+ kind = args->v0.kind & 0x7f;
+ comp = args->v0.comp & 0x03;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ ro = 0;
+ priv = 0;
+ kind = 0x00;
+ comp = 0;
+ } else {
+ VMM_DEBUG(vmm, "args");
+ return ret;
+ }
+
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_VRAM:
+ if (ram->stolen) {
+ map->type |= ram->stolen;
+ aper = 3;
+ } else {
+ aper = 0;
+ }
+ break;
+ case NVKM_MEM_TARGET_HOST:
+ aper = 2;
+ break;
+ case NVKM_MEM_TARGET_NCOH:
+ aper = 3;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
+ if (kind >= kindn || kindm[kind] == 0x7f) {
+ VMM_DEBUG(vmm, "kind %02x", kind);
+ return -EINVAL;
+ }
+
+ if (map->mem && map->mem->type != kindm[kind]) {
+ VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
+ kindm[kind], map->mem->type);
+ return -EINVAL;
+ }
+
+ if (comp) {
+ u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
+ if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
+ VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_tags_get(memory, device, tags, NULL,
+ &map->tags);
+ if (ret) {
+ VMM_DEBUG(vmm, "comp %d", ret);
+ return ret;
+ }
+
+ if (map->tags->mn) {
+ u32 tags = map->tags->mn->offset + (map->offset >> 16);
+ map->ctag |= (u64)comp << 49;
+ map->type |= (u64)comp << 47;
+ map->type |= (u64)tags << 49;
+ map->next |= map->ctag;
+ }
+ }
+
+ map->type |= BIT(0); /* Valid. */
+ map->type |= (u64)ro << 3;
+ map->type |= (u64)aper << 4;
+ map->type |= (u64)priv << 6;
+ map->type |= (u64)kind << 40;
+ return 0;
+}
+
+static void
+nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ struct nvkm_vmm_join *join;
+
+ list_for_each_entry(join, &vmm->join, head) {
+ if (join->inst == inst) {
+ list_del(&join->head);
+ kfree(join);
+ break;
+ }
+ }
+}
+
+static int
+nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
+{
+ const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
+ struct nvkm_vmm_join *join;
+ int ret = 0;
+ u64 data;
+ u32 pdei;
+
+ if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
+ return -ENOMEM;
+ join->inst = inst;
+ list_add_tail(&join->head, &vmm->join);
+
+ nvkm_kmap(join->inst);
+ for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
+ if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
+ ret = -EINVAL;
+ break;
+ }
+ nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
+ }
+ nvkm_done(join->inst);
+ return ret;
+}
+
+static const struct nvkm_vmm_func
+nv50_vmm = {
+ .join = nv50_vmm_join,
+ .part = nv50_vmm_part,
+ .valid = nv50_vmm_valid,
+ .flush = nv50_vmm_flush,
+ .page_block = 1 << 29,
+ .page = {
+ { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
+ { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
+ {}
+ }
+};
+
+int
+nv50_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
+ struct lock_class_key *key, const char *name,
+ struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&nv50_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index a4cb82495cee..b1b1f3626b96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -87,7 +87,7 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
if (pci->irq >= 0) {
free_irq(pci->irq, pci);
pci->irq = -1;
- };
+ }
if (pci->agp.bridge)
nvkm_agp_fini(pci);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index 73ca1203281d..5e91b3f90065 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -39,7 +39,7 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_subdev *subdev = &gsb->base.subdev;
- struct nvkm_vma vma;
+ struct nvkm_vma *vma = NULL;
u32 start_address;
int ret;
@@ -48,12 +48,16 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
return ret;
/* Map the HS firmware so the HS bootloader can see it */
- ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
+ ret = nvkm_vmm_get(gsb->vmm, 12, blob->size, &vma);
if (ret) {
nvkm_falcon_put(falcon, subdev);
return ret;
}
+ ret = nvkm_memory_map(blob, 0, gsb->vmm, vma, NULL, 0);
+ if (ret)
+ goto end;
+
/* Reset and set the falcon up */
ret = nvkm_falcon_reset(falcon);
if (ret)
@@ -61,7 +65,7 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
nvkm_falcon_bind_context(falcon, gsb->inst);
/* Load the HS bootloader into the falcon's IMEM/DMEM */
- ret = sb->acr->func->load(sb->acr, falcon, blob, vma.offset);
+ ret = sb->acr->func->load(sb->acr, falcon, blob, vma->addr);
if (ret < 0)
goto end;
@@ -91,7 +95,7 @@ end:
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
/* We don't need the ACR firmware anymore */
- nvkm_gpuobj_unmap(&vma);
+ nvkm_vmm_put(gsb->vmm, &vma);
nvkm_falcon_put(falcon, subdev);
return ret;
@@ -102,37 +106,26 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_device *device = sb->subdev.device;
- struct nvkm_vm *vm;
- const u64 vm_area_len = 600 * 1024;
int ret;
/* Allocate instance block and VM */
- ret = nvkm_gpuobj_new(device, 0x1000, 0, true, NULL, &gsb->inst);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
+ &gsb->inst);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(device, 0x8000, 0, true, NULL, &gsb->pgd);
+ ret = nvkm_vmm_new(device, 0, 600 * 1024, NULL, 0, NULL, "acr",
+ &gsb->vmm);
if (ret)
return ret;
- ret = nvkm_vm_new(device, 0, vm_area_len, 0, NULL, &vm);
- if (ret)
- return ret;
-
- atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]);
+ atomic_inc(&gsb->vmm->engref[NVKM_SUBDEV_PMU]);
+ gsb->vmm->debug = gsb->base.subdev.debug;
- ret = nvkm_vm_ref(vm, &gsb->vm, gsb->pgd);
- nvkm_vm_ref(NULL, &vm, NULL);
+ ret = nvkm_vmm_join(gsb->vmm, gsb->inst);
if (ret)
return ret;
- nvkm_kmap(gsb->inst);
- nvkm_wo32(gsb->inst, 0x200, lower_32_bits(gsb->pgd->addr));
- nvkm_wo32(gsb->inst, 0x204, upper_32_bits(gsb->pgd->addr));
- nvkm_wo32(gsb->inst, 0x208, lower_32_bits(vm_area_len - 1));
- nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
- nvkm_done(gsb->inst);
-
if (sb->acr->func->oneinit) {
ret = sb->acr->func->oneinit(sb->acr, sb);
if (ret)
@@ -160,9 +153,9 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
sb->acr->func->dtor(sb->acr);
- nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
- nvkm_gpuobj_del(&gsb->pgd);
- nvkm_gpuobj_del(&gsb->inst);
+ nvkm_vmm_part(gsb->vmm, gsb->inst);
+ nvkm_vmm_unref(&gsb->vmm);
+ nvkm_memory_unref(&gsb->inst);
return gsb;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
index c8ab3d76bdef..62c5e162099a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -29,9 +29,8 @@ struct gm200_secboot {
struct nvkm_secboot base;
/* Instance block & address space used for HS FW execution */
- struct nvkm_gpuobj *inst;
- struct nvkm_gpuobj *pgd;
- struct nvkm_vm *vm;
+ struct nvkm_memory *inst;
+ struct nvkm_vmm *vmm;
};
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index ee989210725e..6f10b098676c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -183,7 +183,7 @@ acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
break;
);
if (reg & BIT(4)) {
- nvkm_debug(subdev, "applying workaround for start bug...");
+ nvkm_debug(subdev, "applying workaround for start bug...\n");
nvkm_falcon_start(sb->boot_falcon);
nvkm_msec(subdev->device, 1,
if ((reg = nvkm_rd32(subdev->device,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index 885e919a8720..d9091f029506 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -25,6 +25,7 @@
#include <subdev/secboot.h>
#include <subdev/mmu.h>
+struct nvkm_gpuobj;
struct nvkm_secboot_func {
int (*oneinit)(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 2bafcc1d1818..7ba56b12badd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/therm/gt215.o
nvkm-y += nvkm/subdev/therm/gf119.o
nvkm-y += nvkm/subdev/therm/gm107.o
nvkm-y += nvkm/subdev/therm/gm200.o
+nvkm-y += nvkm/subdev/therm/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 952a7cb0a59a..f27fc6d0d4c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -341,7 +341,8 @@ nvkm_therm_init(struct nvkm_subdev *subdev)
{
struct nvkm_therm *therm = nvkm_therm(subdev);
- therm->func->init(therm);
+ if (therm->func->init)
+ therm->func->init(therm);
if (therm->suspend >= 0) {
/* restore the pwm value only when on manual or auto mode */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
new file mode 100644
index 000000000000..9f0dea3f61dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Rhys Kidd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rhys Kidd
+ */
+#include "priv.h"
+
+static int
+gp100_temp_get(struct nvkm_therm *therm)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ struct nvkm_subdev *subdev = &therm->subdev;
+ u32 tsensor = nvkm_rd32(device, 0x020460);
+ u32 inttemp = (tsensor & 0x0001fff8);
+
+ /* device SHADOWed */
+ if (tsensor & 0x40000000)
+ nvkm_trace(subdev, "reading temperature from SHADOWed sensor\n");
+
+ /* device valid */
+ if (tsensor & 0x20000000)
+ return (inttemp >> 8);
+ else
+ return -ENODEV;
+}
+
+static const struct nvkm_therm_func
+gp100_therm = {
+ .temp_get = gp100_temp_get,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gp100_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&gp100_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index d9d25df6fc1b..4600d3841c25 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -165,11 +165,15 @@ static bool hdmic_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
+ bool connected;
if (gpio_is_valid(ddata->hpd_gpio))
- return gpio_get_value_cansleep(ddata->hpd_gpio);
+ connected = gpio_get_value_cansleep(ddata->hpd_gpio);
else
- return in->ops.hdmi->detect(in);
+ connected = in->ops.hdmi->detect(in);
+ if (!connected && in->ops.hdmi->lost_hotplug)
+ in->ops.hdmi->lost_hotplug(in);
+ return connected;
}
static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index a9e9d667c55e..e3d98d78fc40 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -51,6 +51,8 @@ static int tpd_connect(struct omap_dss_device *dssdev,
dssdev->dst = dst;
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
+ gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
+
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
@@ -69,6 +71,7 @@ static void tpd_disconnect(struct omap_dss_device *dssdev,
return;
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
+ gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
dst->src = NULL;
dssdev->dst = NULL;
@@ -146,25 +149,22 @@ static int tpd_read_edid(struct omap_dss_device *dssdev,
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (!gpiod_get_value_cansleep(ddata->hpd_gpio))
return -ENODEV;
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
-
- r = in->ops.hdmi->read_edid(in, edid, len);
-
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
-
- return r;
+ return in->ops.hdmi->read_edid(in, edid, len);
}
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+ bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio);
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
+ if (!connected && in->ops.hdmi->lost_hotplug)
+ in->ops.hdmi->lost_hotplug(in);
+ return connected;
}
static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index 8b87d5cf45fc..f24ebf7f61dd 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -65,6 +65,14 @@ config OMAP4_DSS_HDMI
help
HDMI support for OMAP4 based SoCs.
+config OMAP4_DSS_HDMI_CEC
+ bool "Enable HDMI CEC support for OMAP4"
+ depends on OMAP4_DSS_HDMI
+ select CEC_CORE
+ default y
+ ---help---
+ When selected the HDMI transmitter will support the CEC feature.
+
config OMAP5_DSS_HDMI
bool "HDMI support for OMAP5"
default n
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 62d5b4f45420..904101c5e79d 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -15,5 +15,6 @@ omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \
hdmi_phy.o
omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o
omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o
ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index a820b394af09..c2609c448ddc 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/hdmi.h>
#include <sound/omap-hdmi-audio.h>
+#include <media/cec.h>
#include "omapdss.h"
#include "dss.h"
@@ -264,6 +265,10 @@ struct hdmi_core_data {
void __iomem *base;
bool cts_swmode;
bool audio_use_mclk;
+
+ struct hdmi_wp_data *wp;
+ unsigned int core_pwr_cnt;
+ struct cec_adapter *adap;
};
static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx,
@@ -373,7 +378,7 @@ struct omap_hdmi {
bool audio_configured;
struct omap_dss_audio audio_config;
- /* This lock should be taken when booleans bellow are touched. */
+ /* This lock should be taken when booleans below are touched. */
spinlock_t audio_playing_lock;
bool audio_playing;
bool display_enabled;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index f169348da377..a598dfdeb585 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -36,9 +36,11 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
+#include <media/cec.h>
#include "omapdss.h"
#include "hdmi4_core.h"
+#include "hdmi4_cec.h"
#include "dss.h"
#include "hdmi.h"
@@ -70,7 +72,8 @@ static void hdmi_runtime_put(void)
static irqreturn_t hdmi_irq_handler(int irq, void *data)
{
- struct hdmi_wp_data *wp = data;
+ struct omap_hdmi *hdmi = data;
+ struct hdmi_wp_data *wp = &hdmi->wp;
u32 irqstatus;
irqstatus = hdmi_wp_get_irqstatus(wp);
@@ -95,6 +98,13 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
} else if (irqstatus & HDMI_IRQ_LINK_DISCONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
}
+ if (irqstatus & HDMI_IRQ_CORE) {
+ u32 intr4 = hdmi_read_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4);
+
+ hdmi_write_reg(hdmi->core.base, HDMI_CORE_SYS_INTR4, intr4);
+ if (intr4 & 8)
+ hdmi4_cec_irq(&hdmi->core);
+ }
return IRQ_HANDLED;
}
@@ -123,14 +133,19 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
{
int r;
+ if (hdmi.core.core_pwr_cnt++)
+ return 0;
+
r = regulator_enable(hdmi.vdda_reg);
if (r)
- return r;
+ goto err_reg_enable;
r = hdmi_runtime_get();
if (r)
goto err_runtime_get;
+ hdmi4_core_powerdown_disable(&hdmi.core);
+
/* Make selection of HDMI in DSS */
dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
@@ -140,12 +155,17 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
err_runtime_get:
regulator_disable(hdmi.vdda_reg);
+err_reg_enable:
+ hdmi.core.core_pwr_cnt--;
return r;
}
static void hdmi_power_off_core(struct omap_dss_device *dssdev)
{
+ if (--hdmi.core.core_pwr_cnt)
+ return;
+
hdmi.core_enabled = false;
hdmi_runtime_put();
@@ -166,8 +186,8 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
return r;
/* disable and clear irqs */
- hdmi_wp_clear_irqenable(wp, 0xffffffff);
- hdmi_wp_set_irqstatus(wp, 0xffffffff);
+ hdmi_wp_clear_irqenable(wp, ~HDMI_IRQ_CORE);
+ hdmi_wp_set_irqstatus(wp, ~HDMI_IRQ_CORE);
vm = &hdmi.cfg.vm;
@@ -242,7 +262,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
{
enum omap_channel channel = dssdev->dispc_channel;
- hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
+ hdmi_wp_clear_irqenable(&hdmi.wp, ~HDMI_IRQ_CORE);
hdmi_wp_video_stop(&hdmi.wp);
@@ -393,11 +413,11 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-static int hdmi_core_enable(struct omap_dss_device *dssdev)
+int hdmi4_core_enable(struct omap_dss_device *dssdev)
{
int r = 0;
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+ DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
mutex_lock(&hdmi.lock);
@@ -415,9 +435,9 @@ err0:
return r;
}
-static void hdmi_core_disable(struct omap_dss_device *dssdev)
+void hdmi4_core_disable(struct omap_dss_device *dssdev)
{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
+ DSSDBG("Enter omapdss_hdmi4_core_disable\n");
mutex_lock(&hdmi.lock);
@@ -475,19 +495,28 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
need_enable = hdmi.core_enabled == false;
if (need_enable) {
- r = hdmi_core_enable(dssdev);
+ r = hdmi4_core_enable(dssdev);
if (r)
return r;
}
r = read_edid(edid, len);
-
+ if (r >= 256)
+ hdmi4_cec_set_phys_addr(&hdmi.core,
+ cec_get_edid_phys_addr(edid, r, NULL));
+ else
+ hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
if (need_enable)
- hdmi_core_disable(dssdev);
+ hdmi4_core_disable(dssdev);
return r;
}
+static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
+{
+ hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
+}
+
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
@@ -514,6 +543,7 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.get_timings = hdmi_display_get_timings,
.read_edid = hdmi_read_edid,
+ .lost_hotplug = hdmi_lost_hotplug,
.set_infoframe = hdmi_set_infoframe,
.set_hdmi_mode = hdmi_set_hdmi_mode,
};
@@ -715,6 +745,10 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
goto err;
+ r = hdmi4_cec_init(pdev, &hdmi.core, &hdmi.wp);
+ if (r)
+ goto err;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
@@ -724,7 +758,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
+ IRQF_ONESHOT, "OMAP HDMI", &hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
goto err;
@@ -759,6 +793,8 @@ static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
hdmi_uninit_output(pdev);
+ hdmi4_cec_uninit(&hdmi.core);
+
hdmi_pll_uninit(&hdmi.pll);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
new file mode 100644
index 000000000000..d86873f2abe6
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -0,0 +1,381 @@
+/*
+ * HDMI CEC
+ *
+ * Based on the CEC code from hdmi_ti_4xxx_ip.c from Android.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Authors: Yong Zhi
+ * Mythri pk <mythripk@ti.com>
+ *
+ * Heavily modified to use the linux CEC framework:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dss.h"
+#include "hdmi.h"
+#include "hdmi4_core.h"
+#include "hdmi4_cec.h"
+
+/* HDMI CEC */
+#define HDMI_CEC_DEV_ID 0x900
+#define HDMI_CEC_SPEC 0x904
+
+/* Not really a debug register, more a low-level control register */
+#define HDMI_CEC_DBG_3 0x91C
+#define HDMI_CEC_TX_INIT 0x920
+#define HDMI_CEC_TX_DEST 0x924
+#define HDMI_CEC_SETUP 0x938
+#define HDMI_CEC_TX_COMMAND 0x93C
+#define HDMI_CEC_TX_OPERAND 0x940
+#define HDMI_CEC_TRANSMIT_DATA 0x97C
+#define HDMI_CEC_CA_7_0 0x988
+#define HDMI_CEC_CA_15_8 0x98C
+#define HDMI_CEC_INT_STATUS_0 0x998
+#define HDMI_CEC_INT_STATUS_1 0x99C
+#define HDMI_CEC_INT_ENABLE_0 0x990
+#define HDMI_CEC_INT_ENABLE_1 0x994
+#define HDMI_CEC_RX_CONTROL 0x9B0
+#define HDMI_CEC_RX_COUNT 0x9B4
+#define HDMI_CEC_RX_CMD_HEADER 0x9B8
+#define HDMI_CEC_RX_COMMAND 0x9BC
+#define HDMI_CEC_RX_OPERAND 0x9C0
+
+#define HDMI_CEC_TX_FIFO_INT_MASK 0x64
+#define HDMI_CEC_RETRANSMIT_CNT_INT_MASK 0x2
+
+#define HDMI_CORE_CEC_RETRY 200
+
+static void hdmi_cec_received_msg(struct hdmi_core_data *core)
+{
+ u32 cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
+
+ /* While there are CEC frames in the FIFO */
+ while (cnt & 0x70) {
+ /* and the frame doesn't have an error */
+ if (!(cnt & 0x80)) {
+ struct cec_msg msg = {};
+ unsigned int i;
+
+ /* then read the message */
+ msg.len = cnt & 0xf;
+ msg.msg[0] = hdmi_read_reg(core->base,
+ HDMI_CEC_RX_CMD_HEADER);
+ msg.msg[1] = hdmi_read_reg(core->base,
+ HDMI_CEC_RX_COMMAND);
+ for (i = 0; i < msg.len; i++) {
+ unsigned int reg = HDMI_CEC_RX_OPERAND + i * 4;
+
+ msg.msg[2 + i] =
+ hdmi_read_reg(core->base, reg);
+ }
+ msg.len += 2;
+ cec_received_msg(core->adap, &msg);
+ }
+ /* Clear the current frame from the FIFO */
+ hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 1);
+ /* Wait until the current frame is cleared */
+ while (hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL) & 1)
+ udelay(1);
+ /*
+ * Re-read the count register and loop to see if there are
+ * more messages in the FIFO.
+ */
+ cnt = hdmi_read_reg(core->base, HDMI_CEC_RX_COUNT) & 0xff;
+ }
+}
+
+static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1)
+{
+ if (stat1 & 2) {
+ u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
+
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, (dbg3 >> 4) & 7, 0, 0);
+ } else if (stat1 & 1) {
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_ARB_LOST |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, 0, 0, 0);
+ } else if (stat1 == 0) {
+ cec_transmit_done(core->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
+ }
+}
+
+void hdmi4_cec_irq(struct hdmi_core_data *core)
+{
+ u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0);
+ u32 stat1 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
+
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1);
+
+ if (stat0 & 0x40)
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
+ else if (stat0 & 0x24)
+ hdmi_cec_transmit_fifo_empty(core, stat1);
+ if (stat1 & 2) {
+ u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
+
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, (dbg3 >> 4) & 7, 0, 0);
+ } else if (stat1 & 1) {
+ cec_transmit_done(core->adap,
+ CEC_TX_STATUS_ARB_LOST |
+ CEC_TX_STATUS_MAX_RETRIES,
+ 0, 0, 0, 0);
+ }
+ if (stat0 & 0x02)
+ hdmi_cec_received_msg(core);
+ if (stat1 & 0x3)
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
+}
+
+static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int retry = HDMI_CORE_CEC_RETRY;
+ int temp;
+
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7);
+ while (retry) {
+ temp = hdmi_read_reg(core->base, HDMI_CEC_DBG_3);
+ if (FLD_GET(temp, 7, 7) == 0)
+ break;
+ retry--;
+ }
+ return retry != 0;
+}
+
+static bool hdmi_cec_clear_rx_fifo(struct cec_adapter *adap)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int retry = HDMI_CORE_CEC_RETRY;
+ int temp;
+
+ hdmi_write_reg(core->base, HDMI_CEC_RX_CONTROL, 0x3);
+ retry = HDMI_CORE_CEC_RETRY;
+ while (retry) {
+ temp = hdmi_read_reg(core->base, HDMI_CEC_RX_CONTROL);
+ if (FLD_GET(temp, 1, 0) == 0)
+ break;
+ retry--;
+ }
+ return retry != 0;
+}
+
+static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int temp, err;
+
+ if (!enable) {
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
+ hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
+ hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
+ hdmi4_core_disable(NULL);
+ return 0;
+ }
+ err = hdmi4_core_enable(NULL);
+ if (err)
+ return err;
+
+ /* Clear TX FIFO */
+ if (!hdmi_cec_clear_tx_fifo(adap)) {
+ pr_err("cec-%s: could not clear TX FIFO\n", adap->name);
+ return -EIO;
+ }
+
+ /* Clear RX FIFO */
+ if (!hdmi_cec_clear_rx_fifo(adap)) {
+ pr_err("cec-%s: could not clear RX FIFO\n", adap->name);
+ return -EIO;
+ }
+
+ /* Clear CEC interrupts */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
+ hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1));
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
+ hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0));
+
+ /* Enable HDMI core interrupts */
+ hdmi_wp_set_irqenable(core->wp, HDMI_IRQ_CORE);
+ /* Unmask CEC interrupt */
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0x1, 3, 3);
+ /*
+ * Enable CEC interrupts:
+ * Transmit Buffer Full/Empty Change event
+ * Transmitter FIFO Empty event
+ * Receiver FIFO Not Empty event
+ */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26);
+ /*
+ * Enable CEC interrupts:
+ * RX FIFO Overrun Error event
+ * Short Pulse Detected event
+ * Frame Retransmit Count Exceeded event
+ * Start Bit Irregularity event
+ */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f);
+
+ /* cec calibration enable (self clearing) */
+ hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03);
+ msleep(20);
+ hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x04);
+
+ temp = hdmi_read_reg(core->base, HDMI_CEC_SETUP);
+ if (FLD_GET(temp, 4, 4) != 0) {
+ temp = FLD_MOD(temp, 0, 4, 4);
+ hdmi_write_reg(core->base, HDMI_CEC_SETUP, temp);
+
+ /*
+ * If we enabled CEC in middle of a CEC message on the bus,
+ * we could have start bit irregularity and/or short
+ * pulse event. Clear them now.
+ */
+ temp = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_1);
+ temp = FLD_MOD(0x0, 0x5, 2, 0);
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, temp);
+ }
+ return 0;
+}
+
+static int hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ u32 v;
+
+ if (log_addr == CEC_LOG_ADDR_INVALID) {
+ hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, 0);
+ hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, 0);
+ return 0;
+ }
+ if (log_addr <= 7) {
+ v = hdmi_read_reg(core->base, HDMI_CEC_CA_7_0);
+ v |= 1 << log_addr;
+ hdmi_write_reg(core->base, HDMI_CEC_CA_7_0, v);
+ } else {
+ v = hdmi_read_reg(core->base, HDMI_CEC_CA_15_8);
+ v |= 1 << (log_addr - 8);
+ hdmi_write_reg(core->base, HDMI_CEC_CA_15_8, v);
+ }
+ return 0;
+}
+
+static int hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct hdmi_core_data *core = cec_get_drvdata(adap);
+ int temp;
+ u32 i;
+
+ /* Clear TX FIFO */
+ if (!hdmi_cec_clear_tx_fifo(adap)) {
+ pr_err("cec-%s: could not clear TX FIFO for transmit\n",
+ adap->name);
+ return -EIO;
+ }
+
+ /* Clear TX interrupts */
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0,
+ HDMI_CEC_TX_FIFO_INT_MASK);
+
+ hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1,
+ HDMI_CEC_RETRANSMIT_CNT_INT_MASK);
+
+ /* Set the retry count */
+ REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, attempts - 1, 6, 4);
+
+ /* Set the initiator addresses */
+ hdmi_write_reg(core->base, HDMI_CEC_TX_INIT, cec_msg_initiator(msg));
+
+ /* Set destination id */
+ temp = cec_msg_destination(msg);
+ if (msg->len == 1)
+ temp |= 0x80;
+ hdmi_write_reg(core->base, HDMI_CEC_TX_DEST, temp);
+ if (msg->len == 1)
+ return 0;
+
+ /* Setup command and arguments for the command */
+ hdmi_write_reg(core->base, HDMI_CEC_TX_COMMAND, msg->msg[1]);
+
+ for (i = 0; i < msg->len - 2; i++)
+ hdmi_write_reg(core->base, HDMI_CEC_TX_OPERAND + i * 4,
+ msg->msg[2 + i]);
+
+ /* Operand count */
+ hdmi_write_reg(core->base, HDMI_CEC_TRANSMIT_DATA,
+ (msg->len - 2) | 0x10);
+ return 0;
+}
+
+static const struct cec_adap_ops hdmi_cec_adap_ops = {
+ .adap_enable = hdmi_cec_adap_enable,
+ .adap_log_addr = hdmi_cec_adap_log_addr,
+ .adap_transmit = hdmi_cec_adap_transmit,
+};
+
+void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa)
+{
+ cec_s_phys_addr(core->adap, pa, false);
+}
+
+int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp)
+{
+ const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
+ unsigned int ret;
+
+ core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
+ "omap4", caps, CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(core->adap);
+ if (ret < 0)
+ return ret;
+ core->wp = wp;
+
+ /*
+ * Initialize CEC clock divider: CEC needs 2MHz clock hence
+ * set the devider to 24 to get 48/24=2MHz clock
+ */
+ REG_FLD_MOD(core->wp->base, HDMI_WP_CLK, 0x18, 5, 0);
+
+ ret = cec_register_adapter(core->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_delete_adapter(core->adap);
+ return ret;
+ }
+ return 0;
+}
+
+void hdmi4_cec_uninit(struct hdmi_core_data *core)
+{
+ cec_unregister_adapter(core->adap);
+}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
new file mode 100644
index 000000000000..0292337c97cc
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h
@@ -0,0 +1,55 @@
+/*
+ * HDMI header definition for OMAP4 HDMI CEC IP
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HDMI4_CEC_H_
+#define _HDMI4_CEC_H_
+
+struct hdmi_core_data;
+struct hdmi_wp_data;
+struct platform_device;
+
+/* HDMI CEC funcs */
+#ifdef CONFIG_OMAP4_DSS_HDMI_CEC
+void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa);
+void hdmi4_cec_irq(struct hdmi_core_data *core);
+int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp);
+void hdmi4_cec_uninit(struct hdmi_core_data *core);
+#else
+static inline void hdmi4_cec_set_phys_addr(struct hdmi_core_data *core, u16 pa)
+{
+}
+
+static inline void hdmi4_cec_irq(struct hdmi_core_data *core)
+{
+}
+
+static inline int hdmi4_cec_init(struct platform_device *pdev,
+ struct hdmi_core_data *core,
+ struct hdmi_wp_data *wp)
+{
+ return 0;
+}
+
+static inline void hdmi4_cec_uninit(struct hdmi_core_data *core)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 365cf07daa01..62e451162d96 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -208,9 +208,9 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
}
-static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
+void hdmi4_core_powerdown_disable(struct hdmi_core_data *core)
{
- DSSDBG("Enter hdmi_core_powerdown_disable\n");
+ DSSDBG("Enter hdmi4_core_powerdown_disable\n");
REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
}
@@ -335,9 +335,6 @@ void hdmi4_configure(struct hdmi_core_data *core,
*/
hdmi_core_swreset_assert(core);
- /* power down off */
- hdmi_core_powerdown_disable(core);
-
v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index a069f96ec6f6..b6ab579e44d2 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -266,6 +266,10 @@ void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
+int hdmi4_core_enable(struct omap_dss_device *dssdev);
+void hdmi4_core_disable(struct omap_dss_device *dssdev);
+void hdmi4_core_powerdown_disable(struct hdmi_core_data *core);
+
int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 47a331670963..990422b35784 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -395,6 +395,7 @@ struct omapdss_hdmi_ops {
struct videomode *vm);
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ void (*lost_hotplug)(struct omap_dss_device *dssdev);
bool (*detect)(struct omap_dss_device *dssdev);
int (*register_hpd_cb)(struct omap_dss_device *dssdev,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d84a031fae24..726f3fb3312d 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -63,6 +63,15 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_ORISETECH_OTM8009A
+ tristate "Orise Technology otm8009a 480x800 dsi 2dl panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Orise Technology
+ otm8009a 480x800 dsi 2dl panel.
+
config DRM_PANEL_PANASONIC_VVX10F034N00
tristate "Panasonic VVX10F034N00 1920x1200 video mode panel"
depends on OF
@@ -73,6 +82,14 @@ config DRM_PANEL_PANASONIC_VVX10F034N00
WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
Xperia Z2 tablets
+config DRM_PANEL_RASPBERRYPI_TOUCHSCREEN
+ tristate "Raspberry Pi 7-inch touchscreen panel"
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for the Raspberry
+ Pi 7" Touchscreen. To compile this driver as a module,
+ choose M here.
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
@@ -80,12 +97,28 @@ config DRM_PANEL_SAMSUNG_S6E3HA2
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E63J0X03
+ tristate "Samsung S6E63J0X03 DSI command mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SEIKO_43WVF1G
+ tristate "Seiko 43WVF1G panel"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable support for the Seiko
+ 43WVF1G controller for 800x480 LCD panels
+
config DRM_PANEL_SHARP_LQ101R1SX01
tristate "Sharp LQ101R1SX01 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index d73d3e661cec..2c4e1a93e05f 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -4,10 +4,14 @@ obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
+obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
new file mode 100644
index 000000000000..c189cd6329c8
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * Authors: Philippe Cornu <philippe.cornu@st.com>
+ * Yannick Fertre <yannick.fertre@st.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <video/mipi_display.h>
+
+#define DRV_NAME "orisetech_otm8009a"
+
+#define OTM8009A_BACKLIGHT_DEFAULT 240
+#define OTM8009A_BACKLIGHT_MAX 255
+
+/* Manufacturer Command Set */
+#define MCS_ADRSFT 0x0000 /* Address Shift Function */
+#define MCS_PANSET 0xB3A6 /* Panel Type Setting */
+#define MCS_SD_CTRL 0xC0A2 /* Source Driver Timing Setting */
+#define MCS_P_DRV_M 0xC0B4 /* Panel Driving Mode */
+#define MCS_OSC_ADJ 0xC181 /* Oscillator Adjustment for Idle/Normal mode */
+#define MCS_RGB_VID_SET 0xC1A1 /* RGB Video Mode Setting */
+#define MCS_SD_PCH_CTRL 0xC480 /* Source Driver Precharge Control */
+#define MCS_NO_DOC1 0xC48A /* Command not documented */
+#define MCS_PWR_CTRL1 0xC580 /* Power Control Setting 1 */
+#define MCS_PWR_CTRL2 0xC590 /* Power Control Setting 2 for Normal Mode */
+#define MCS_PWR_CTRL4 0xC5B0 /* Power Control Setting 4 for DC Voltage */
+#define MCS_PANCTRLSET1 0xCB80 /* Panel Control Setting 1 */
+#define MCS_PANCTRLSET2 0xCB90 /* Panel Control Setting 2 */
+#define MCS_PANCTRLSET3 0xCBA0 /* Panel Control Setting 3 */
+#define MCS_PANCTRLSET4 0xCBB0 /* Panel Control Setting 4 */
+#define MCS_PANCTRLSET5 0xCBC0 /* Panel Control Setting 5 */
+#define MCS_PANCTRLSET6 0xCBD0 /* Panel Control Setting 6 */
+#define MCS_PANCTRLSET7 0xCBE0 /* Panel Control Setting 7 */
+#define MCS_PANCTRLSET8 0xCBF0 /* Panel Control Setting 8 */
+#define MCS_PANU2D1 0xCC80 /* Panel U2D Setting 1 */
+#define MCS_PANU2D2 0xCC90 /* Panel U2D Setting 2 */
+#define MCS_PANU2D3 0xCCA0 /* Panel U2D Setting 3 */
+#define MCS_PAND2U1 0xCCB0 /* Panel D2U Setting 1 */
+#define MCS_PAND2U2 0xCCC0 /* Panel D2U Setting 2 */
+#define MCS_PAND2U3 0xCCD0 /* Panel D2U Setting 3 */
+#define MCS_GOAVST 0xCE80 /* GOA VST Setting */
+#define MCS_GOACLKA1 0xCEA0 /* GOA CLKA1 Setting */
+#define MCS_GOACLKA3 0xCEB0 /* GOA CLKA3 Setting */
+#define MCS_GOAECLK 0xCFC0 /* GOA ECLK Setting */
+#define MCS_NO_DOC2 0xCFD0 /* Command not documented */
+#define MCS_GVDDSET 0xD800 /* GVDD/NGVDD */
+#define MCS_VCOMDC 0xD900 /* VCOM Voltage Setting */
+#define MCS_GMCT2_2P 0xE100 /* Gamma Correction 2.2+ Setting */
+#define MCS_GMCT2_2N 0xE200 /* Gamma Correction 2.2- Setting */
+#define MCS_NO_DOC3 0xF5B6 /* Command not documented */
+#define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */
+#define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */
+
+struct otm8009a {
+ struct device *dev;
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+ bool enabled;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 32729,
+ .hdisplay = 480,
+ .hsync_start = 480 + 120,
+ .hsync_end = 480 + 120 + 63,
+ .htotal = 480 + 120 + 63 + 120,
+ .vdisplay = 800,
+ .vsync_start = 800 + 12,
+ .vsync_end = 800 + 12 + 12,
+ .vtotal = 800 + 12 + 12 + 12,
+ .vrefresh = 50,
+ .flags = 0,
+ .width_mm = 52,
+ .height_mm = 86,
+};
+
+static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel)
+{
+ return container_of(panel, struct otm8009a, panel);
+}
+
+static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ if (mipi_dsi_dcs_write_buffer(dsi, data, len) < 0)
+ DRM_WARN("mipi dsi dcs write buffer failed\n");
+}
+
+#define dcs_write_seq(ctx, seq...) \
+({ \
+ static const u8 d[] = { seq }; \
+ otm8009a_dcs_write_buf(ctx, d, ARRAY_SIZE(d)); \
+})
+
+#define dcs_write_cmd_at(ctx, cmd, seq...) \
+({ \
+ dcs_write_seq(ctx, MCS_ADRSFT, (cmd) & 0xFF); \
+ dcs_write_seq(ctx, (cmd) >> 8, seq); \
+})
+
+static int otm8009a_init_sequence(struct otm8009a *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /* Enter CMD2 */
+ dcs_write_cmd_at(ctx, MCS_CMD2_ENA1, 0x80, 0x09, 0x01);
+
+ /* Enter Orise Command2 */
+ dcs_write_cmd_at(ctx, MCS_CMD2_ENA2, 0x80, 0x09);
+
+ dcs_write_cmd_at(ctx, MCS_SD_PCH_CTRL, 0x30);
+ mdelay(10);
+
+ dcs_write_cmd_at(ctx, MCS_NO_DOC1, 0x40);
+ mdelay(10);
+
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL4 + 1, 0xA9);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 1, 0x34);
+ dcs_write_cmd_at(ctx, MCS_P_DRV_M, 0x50);
+ dcs_write_cmd_at(ctx, MCS_VCOMDC, 0x4E);
+ dcs_write_cmd_at(ctx, MCS_OSC_ADJ, 0x66); /* 65Hz */
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 2, 0x01);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 5, 0x34);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 4, 0x33);
+ dcs_write_cmd_at(ctx, MCS_GVDDSET, 0x79, 0x79);
+ dcs_write_cmd_at(ctx, MCS_SD_CTRL + 1, 0x1B);
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL1 + 2, 0x83);
+ dcs_write_cmd_at(ctx, MCS_SD_PCH_CTRL + 1, 0x83);
+ dcs_write_cmd_at(ctx, MCS_RGB_VID_SET, 0x0E);
+ dcs_write_cmd_at(ctx, MCS_PANSET, 0x00, 0x01);
+
+ dcs_write_cmd_at(ctx, MCS_GOAVST, 0x85, 0x01, 0x00, 0x84, 0x01, 0x00);
+ dcs_write_cmd_at(ctx, MCS_GOACLKA1, 0x18, 0x04, 0x03, 0x39, 0x00, 0x00,
+ 0x00, 0x18, 0x03, 0x03, 0x3A, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_GOACLKA3, 0x18, 0x02, 0x03, 0x3B, 0x00, 0x00,
+ 0x00, 0x18, 0x01, 0x03, 0x3C, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_GOAECLK, 0x01, 0x01, 0x20, 0x20, 0x00, 0x00,
+ 0x01, 0x02, 0x00, 0x00);
+
+ dcs_write_cmd_at(ctx, MCS_NO_DOC2, 0x00);
+
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET5, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET6, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4,
+ 4, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ dcs_write_cmd_at(ctx, MCS_PANCTRLSET8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF);
+
+ dcs_write_cmd_at(ctx, MCS_PANU2D1, 0x00, 0x26, 0x09, 0x0B, 0x01, 0x25,
+ 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_PANU2D2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x0A, 0x0C, 0x02);
+ dcs_write_cmd_at(ctx, MCS_PANU2D3, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_PAND2U1, 0x00, 0x25, 0x0C, 0x0A, 0x02, 0x26,
+ 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_at(ctx, MCS_PAND2U2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x0B, 0x09, 0x01);
+ dcs_write_cmd_at(ctx, MCS_PAND2U3, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+
+ dcs_write_cmd_at(ctx, MCS_PWR_CTRL1 + 1, 0x66);
+
+ dcs_write_cmd_at(ctx, MCS_NO_DOC3, 0x06);
+
+ dcs_write_cmd_at(ctx, MCS_GMCT2_2P, 0x00, 0x09, 0x0F, 0x0E, 0x07, 0x10,
+ 0x0B, 0x0A, 0x04, 0x07, 0x0B, 0x08, 0x0F, 0x10, 0x0A,
+ 0x01);
+ dcs_write_cmd_at(ctx, MCS_GMCT2_2N, 0x00, 0x09, 0x0F, 0x0E, 0x07, 0x10,
+ 0x0B, 0x0A, 0x04, 0x07, 0x0B, 0x08, 0x0F, 0x10, 0x0A,
+ 0x01);
+
+ /* Exit CMD2 */
+ dcs_write_cmd_at(ctx, MCS_CMD2_ENA1, 0xFF, 0xFF, 0xFF);
+
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret)
+ return ret;
+
+ /* Wait for sleep out exit */
+ mdelay(120);
+
+ /* Default portrait 480x800 rgb24 */
+ dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0,
+ default_mode.hdisplay - 1);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
+ if (ret)
+ return ret;
+
+ /* See otm8009a driver documentation for pixel format descriptions */
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT |
+ MIPI_DCS_PIXEL_FMT_24BIT << 4);
+ if (ret)
+ return ret;
+
+ /* Disable CABC feature */
+ dcs_write_seq(ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret)
+ return ret;
+
+ /* Send Command GRAM memory write (no parameters) */
+ dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
+
+ return 0;
+}
+
+static int otm8009a_disable(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->enabled)
+ return 0; /* This is not an issue so we return 0 here */
+
+ /* Power off the backlight. Note: end-user still controls brightness */
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ret = backlight_update_status(ctx->bl_dev);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret)
+ return ret;
+
+ msleep(120);
+
+ ctx->enabled = false;
+
+ return 0;
+}
+
+static int otm8009a_unprepare(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+
+ if (!ctx->prepared)
+ return 0;
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ }
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int otm8009a_prepare(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(100);
+ }
+
+ ret = otm8009a_init_sequence(ctx);
+ if (ret)
+ return ret;
+
+ ctx->prepared = true;
+
+ /*
+ * Power on the backlight. Note: end-user still controls brightness
+ * Note: ctx->prepared must be true before updating the backlight.
+ */
+ ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(ctx->bl_dev);
+
+ return 0;
+}
+
+static int otm8009a_enable(struct drm_panel *panel)
+{
+ struct otm8009a *ctx = panel_to_otm8009a(panel);
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int otm8009a_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs otm8009a_drm_funcs = {
+ .disable = otm8009a_disable,
+ .unprepare = otm8009a_unprepare,
+ .prepare = otm8009a_prepare,
+ .enable = otm8009a_enable,
+ .get_modes = otm8009a_get_modes,
+};
+
+/*
+ * DSI-BASED BACKLIGHT
+ */
+
+static int otm8009a_backlight_update_status(struct backlight_device *bd)
+{
+ struct otm8009a *ctx = bl_get_data(bd);
+ u8 data[2];
+
+ if (!ctx->prepared) {
+ DRM_DEBUG("lcd not ready yet for setting its backlight!\n");
+ return -ENXIO;
+ }
+
+ if (bd->props.power <= FB_BLANK_NORMAL) {
+ /* Power on the backlight with the requested brightness
+ * Note We can not use mipi_dsi_dcs_set_display_brightness()
+ * as otm8009a driver support only 8-bit brightness (1 param).
+ */
+ data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
+ data[1] = bd->props.brightness;
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+
+ /* set Brightness Control & Backlight on */
+ data[1] = 0x24;
+
+ } else {
+ /* Power off the backlight: set Brightness Control & Bl off */
+ data[1] = 0;
+ }
+
+ /* Update Brightness Control & Backlight */
+ data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+
+ return 0;
+}
+
+static const struct backlight_ops otm8009a_backlight_ops = {
+ .update_status = otm8009a_backlight_update_status,
+};
+
+static int otm8009a_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct otm8009a *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &otm8009a_drm_funcs;
+
+ ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
+ &otm8009a_backlight_ops, NULL);
+ if (IS_ERR(ctx->bl_dev)) {
+ dev_err(dev, "failed to register backlight device\n");
+ return PTR_ERR(ctx->bl_dev);
+ }
+
+ ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
+ ctx->bl_dev->props.brightness = OTM8009A_BACKLIGHT_DEFAULT;
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+ ctx->bl_dev->props.type = BACKLIGHT_RAW;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach failed. Is host ready?\n");
+ drm_panel_remove(&ctx->panel);
+ backlight_device_unregister(ctx->bl_dev);
+ return ret;
+ }
+
+ DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ return 0;
+}
+
+static int otm8009a_remove(struct mipi_dsi_device *dsi)
+{
+ struct otm8009a *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ backlight_device_unregister(ctx->bl_dev);
+
+ return 0;
+}
+
+static const struct of_device_id orisetech_otm8009a_of_match[] = {
+ { .compatible = "orisetech,otm8009a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, orisetech_otm8009a_of_match);
+
+static struct mipi_dsi_driver orisetech_otm8009a_driver = {
+ .probe = otm8009a_probe,
+ .remove = otm8009a_remove,
+ .driver = {
+ .name = DRV_NAME "_panel",
+ .of_match_table = orisetech_otm8009a_of_match,
+ },
+};
+module_mipi_dsi_driver(orisetech_otm8009a_driver);
+
+MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("DRM driver for Orise Tech OTM8009A MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
new file mode 100644
index 000000000000..890fd6ff397c
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright © 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Portions of this file (derived from panel-simple.c) are:
+ *
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * Raspberry Pi 7" touchscreen panel driver.
+ *
+ * The 7" touchscreen consists of a DPI LCD panel, a Toshiba
+ * TC358762XBG DSI-DPI bridge, and an I2C-connected Atmel ATTINY88-MUR
+ * controlling power management, the LCD PWM, and initial register
+ * setup of the Tohsiba.
+ *
+ * This driver controls the TC358762 and ATTINY88, presenting a DSI
+ * device with a drm_panel.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pm.h>
+
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#define RPI_DSI_DRIVER_NAME "rpi-ts-dsi"
+
+/* I2C registers of the Atmel microcontroller. */
+enum REG_ADDR {
+ REG_ID = 0x80,
+ REG_PORTA, /* BIT(2) for horizontal flip, BIT(3) for vertical flip */
+ REG_PORTB,
+ REG_PORTC,
+ REG_PORTD,
+ REG_POWERON,
+ REG_PWM,
+ REG_DDRA,
+ REG_DDRB,
+ REG_DDRC,
+ REG_DDRD,
+ REG_TEST,
+ REG_WR_ADDRL,
+ REG_WR_ADDRH,
+ REG_READH,
+ REG_READL,
+ REG_WRITEH,
+ REG_WRITEL,
+ REG_ID2,
+};
+
+/* DSI D-PHY Layer Registers */
+#define D0W_DPHYCONTTX 0x0004
+#define CLW_DPHYCONTRX 0x0020
+#define D0W_DPHYCONTRX 0x0024
+#define D1W_DPHYCONTRX 0x0028
+#define COM_DPHYCONTRX 0x0038
+#define CLW_CNTRL 0x0040
+#define D0W_CNTRL 0x0044
+#define D1W_CNTRL 0x0048
+#define DFTMODE_CNTRL 0x0054
+
+/* DSI PPI Layer Registers */
+#define PPI_STARTPPI 0x0104
+#define PPI_BUSYPPI 0x0108
+#define PPI_LINEINITCNT 0x0110
+#define PPI_LPTXTIMECNT 0x0114
+#define PPI_CLS_ATMR 0x0140
+#define PPI_D0S_ATMR 0x0144
+#define PPI_D1S_ATMR 0x0148
+#define PPI_D0S_CLRSIPOCOUNT 0x0164
+#define PPI_D1S_CLRSIPOCOUNT 0x0168
+#define CLS_PRE 0x0180
+#define D0S_PRE 0x0184
+#define D1S_PRE 0x0188
+#define CLS_PREP 0x01A0
+#define D0S_PREP 0x01A4
+#define D1S_PREP 0x01A8
+#define CLS_ZERO 0x01C0
+#define D0S_ZERO 0x01C4
+#define D1S_ZERO 0x01C8
+#define PPI_CLRFLG 0x01E0
+#define PPI_CLRSIPO 0x01E4
+#define HSTIMEOUT 0x01F0
+#define HSTIMEOUTENABLE 0x01F4
+
+/* DSI Protocol Layer Registers */
+#define DSI_STARTDSI 0x0204
+#define DSI_BUSYDSI 0x0208
+#define DSI_LANEENABLE 0x0210
+# define DSI_LANEENABLE_CLOCK BIT(0)
+# define DSI_LANEENABLE_D0 BIT(1)
+# define DSI_LANEENABLE_D1 BIT(2)
+
+#define DSI_LANESTATUS0 0x0214
+#define DSI_LANESTATUS1 0x0218
+#define DSI_INTSTATUS 0x0220
+#define DSI_INTMASK 0x0224
+#define DSI_INTCLR 0x0228
+#define DSI_LPTXTO 0x0230
+#define DSI_MODE 0x0260
+#define DSI_PAYLOAD0 0x0268
+#define DSI_PAYLOAD1 0x026C
+#define DSI_SHORTPKTDAT 0x0270
+#define DSI_SHORTPKTREQ 0x0274
+#define DSI_BTASTA 0x0278
+#define DSI_BTACLR 0x027C
+
+/* DSI General Registers */
+#define DSIERRCNT 0x0300
+#define DSISIGMOD 0x0304
+
+/* DSI Application Layer Registers */
+#define APLCTRL 0x0400
+#define APLSTAT 0x0404
+#define APLERR 0x0408
+#define PWRMOD 0x040C
+#define RDPKTLN 0x0410
+#define PXLFMT 0x0414
+#define MEMWRCMD 0x0418
+
+/* LCDC/DPI Host Registers */
+#define LCDCTRL 0x0420
+#define HSR 0x0424
+#define HDISPR 0x0428
+#define VSR 0x042C
+#define VDISPR 0x0430
+#define VFUEN 0x0434
+
+/* DBI-B Host Registers */
+#define DBIBCTRL 0x0440
+
+/* SPI Master Registers */
+#define SPICMR 0x0450
+#define SPITCR 0x0454
+
+/* System Controller Registers */
+#define SYSSTAT 0x0460
+#define SYSCTRL 0x0464
+#define SYSPLL1 0x0468
+#define SYSPLL2 0x046C
+#define SYSPLL3 0x0470
+#define SYSPMCTRL 0x047C
+
+/* GPIO Registers */
+#define GPIOC 0x0480
+#define GPIOO 0x0484
+#define GPIOI 0x0488
+
+/* I2C Registers */
+#define I2CCLKCTRL 0x0490
+
+/* Chip/Rev Registers */
+#define IDREG 0x04A0
+
+/* Debug Registers */
+#define WCMDQUEUE 0x0500
+#define RCMDQUEUE 0x0504
+
+struct rpi_touchscreen {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+ struct i2c_client *i2c;
+};
+
+static const struct drm_display_mode rpi_touchscreen_modes[] = {
+ {
+ /* Modeline comes from the Raspberry Pi firmware, with HFP=1
+ * plugged in and clock re-computed from that.
+ */
+ .clock = 25979400 / 1000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 1,
+ .hsync_end = 800 + 1 + 2,
+ .htotal = 800 + 1 + 2 + 46,
+ .vdisplay = 480,
+ .vsync_start = 480 + 7,
+ .vsync_end = 480 + 7 + 2,
+ .vtotal = 480 + 7 + 2 + 21,
+ .vrefresh = 60,
+ },
+};
+
+static struct rpi_touchscreen *panel_to_ts(struct drm_panel *panel)
+{
+ return container_of(panel, struct rpi_touchscreen, base);
+}
+
+static u8 rpi_touchscreen_i2c_read(struct rpi_touchscreen *ts, u8 reg)
+{
+ return i2c_smbus_read_byte_data(ts->i2c, reg);
+}
+
+static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
+ u8 reg, u8 val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
+ if (ret)
+ dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+}
+
+static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
+{
+#if 0
+ /* The firmware uses LP DSI transactions like this to bring up
+ * the hardware, which should be faster than using I2C to then
+ * pass to the Toshiba. However, I was unable to get it to
+ * work.
+ */
+ u8 msg[] = {
+ reg,
+ reg >> 8,
+ val,
+ val >> 8,
+ val >> 16,
+ val >> 24,
+ };
+
+ mipi_dsi_dcs_write_buffer(ts->dsi, msg, sizeof(msg));
+#else
+ rpi_touchscreen_i2c_write(ts, REG_WR_ADDRH, reg >> 8);
+ rpi_touchscreen_i2c_write(ts, REG_WR_ADDRL, reg);
+ rpi_touchscreen_i2c_write(ts, REG_WRITEH, val >> 8);
+ rpi_touchscreen_i2c_write(ts, REG_WRITEL, val);
+#endif
+
+ return 0;
+}
+
+static int rpi_touchscreen_disable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+
+ rpi_touchscreen_i2c_write(ts, REG_PWM, 0);
+
+ rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
+ udelay(1);
+
+ return 0;
+}
+
+static int rpi_touchscreen_noop(struct drm_panel *panel)
+{
+ return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+ int i;
+
+ rpi_touchscreen_i2c_write(ts, REG_POWERON, 1);
+ /* Wait for nPWRDWN to go low to indicate poweron is done. */
+ for (i = 0; i < 100; i++) {
+ if (rpi_touchscreen_i2c_read(ts, REG_PORTB) & 1)
+ break;
+ }
+
+ rpi_touchscreen_write(ts, DSI_LANEENABLE,
+ DSI_LANEENABLE_CLOCK |
+ DSI_LANEENABLE_D0);
+ rpi_touchscreen_write(ts, PPI_D0S_CLRSIPOCOUNT, 0x05);
+ rpi_touchscreen_write(ts, PPI_D1S_CLRSIPOCOUNT, 0x05);
+ rpi_touchscreen_write(ts, PPI_D0S_ATMR, 0x00);
+ rpi_touchscreen_write(ts, PPI_D1S_ATMR, 0x00);
+ rpi_touchscreen_write(ts, PPI_LPTXTIMECNT, 0x03);
+
+ rpi_touchscreen_write(ts, SPICMR, 0x00);
+ rpi_touchscreen_write(ts, LCDCTRL, 0x00100150);
+ rpi_touchscreen_write(ts, SYSCTRL, 0x040f);
+ msleep(100);
+
+ rpi_touchscreen_write(ts, PPI_STARTPPI, 0x01);
+ rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
+ msleep(100);
+
+ /* Turn on the backlight. */
+ rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
+
+ /* Default to the same orientation as the closed source
+ * firmware used for the panel. Runtime rotation
+ * configuration will be supported using VC4's plane
+ * orientation bits.
+ */
+ rpi_touchscreen_i2c_write(ts, REG_PORTA, BIT(2));
+
+ return 0;
+}
+
+static int rpi_touchscreen_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_device *drm = panel->drm;
+ unsigned int i, num = 0;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ for (i = 0; i < ARRAY_SIZE(rpi_touchscreen_modes); i++) {
+ const struct drm_display_mode *m = &rpi_touchscreen_modes[i];
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(drm, m);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ continue;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = 154;
+ connector->display_info.height_mm = 86;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return num;
+}
+
+static const struct drm_panel_funcs rpi_touchscreen_funcs = {
+ .disable = rpi_touchscreen_disable,
+ .unprepare = rpi_touchscreen_noop,
+ .prepare = rpi_touchscreen_noop,
+ .enable = rpi_touchscreen_enable,
+ .get_modes = rpi_touchscreen_get_modes,
+};
+
+static int rpi_touchscreen_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct rpi_touchscreen *ts;
+ struct device_node *endpoint, *dsi_host_node;
+ struct mipi_dsi_host *host;
+ int ret, ver;
+ struct mipi_dsi_device_info info = {
+ .type = RPI_DSI_DRIVER_NAME,
+ .channel = 0,
+ .node = NULL,
+ };
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, ts);
+
+ ts->i2c = i2c;
+
+ ver = rpi_touchscreen_i2c_read(ts, REG_ID);
+ if (ver < 0) {
+ dev_err(dev, "Atmel I2C read failed: %d\n", ver);
+ return -ENODEV;
+ }
+
+ switch (ver) {
+ case 0xde: /* ver 1 */
+ case 0xc3: /* ver 2 */
+ break;
+ default:
+ dev_err(dev, "Unknown Atmel firmware revision: 0x%02x\n", ver);
+ return -ENODEV;
+ }
+
+ /* Turn off at boot, so we can cleanly sequence powering on. */
+ rpi_touchscreen_i2c_write(ts, REG_POWERON, 0);
+
+ /* Look up the DSI host. It needs to probe before we do. */
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host) {
+ of_node_put(endpoint);
+ return -EPROBE_DEFER;
+ }
+
+ info.node = of_graph_get_remote_port(endpoint);
+ of_node_put(endpoint);
+
+ ts->dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(ts->dsi)) {
+ dev_err(dev, "DSI device registration failed: %ld\n",
+ PTR_ERR(ts->dsi));
+ return PTR_ERR(ts->dsi);
+ }
+
+ ts->base.dev = dev;
+ ts->base.funcs = &rpi_touchscreen_funcs;
+
+ /* This appears last, as it's what will unblock the DSI host
+ * driver's component bind function.
+ */
+ ret = drm_panel_add(&ts->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rpi_touchscreen_remove(struct i2c_client *i2c)
+{
+ struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
+
+ mipi_dsi_detach(ts->dsi);
+
+ drm_panel_remove(&ts->base);
+
+ mipi_dsi_device_unregister(ts->dsi);
+ kfree(ts->dsi);
+
+ return 0;
+}
+
+static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ int ret;
+
+ dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM);
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 1;
+
+ ret = mipi_dsi_attach(dsi);
+
+ if (ret)
+ dev_err(&dsi->dev, "failed to attach dsi to host: %d\n", ret);
+
+ return ret;
+}
+
+static struct mipi_dsi_driver rpi_touchscreen_dsi_driver = {
+ .driver.name = RPI_DSI_DRIVER_NAME,
+ .probe = rpi_touchscreen_dsi_probe,
+};
+
+static const struct of_device_id rpi_touchscreen_of_ids[] = {
+ { .compatible = "raspberrypi,7inch-touchscreen-panel" },
+ { } /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, rpi_touchscreen_of_ids);
+
+static struct i2c_driver rpi_touchscreen_driver = {
+ .driver = {
+ .name = "rpi_touchscreen",
+ .of_match_table = rpi_touchscreen_of_ids,
+ },
+ .probe = rpi_touchscreen_probe,
+ .remove = rpi_touchscreen_remove,
+};
+
+static int __init rpi_touchscreen_init(void)
+{
+ mipi_dsi_driver_register(&rpi_touchscreen_dsi_driver);
+ return i2c_add_driver(&rpi_touchscreen_driver);
+}
+module_init(rpi_touchscreen_init);
+
+static void __exit rpi_touchscreen_exit(void)
+{
+ i2c_del_driver(&rpi_touchscreen_driver);
+ mipi_dsi_driver_unregister(&rpi_touchscreen_dsi_driver);
+}
+module_exit(rpi_touchscreen_exit);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Raspberry Pi 7-inch touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
new file mode 100644
index 000000000000..aeb32aa58899
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -0,0 +1,532 @@
+/*
+ * MIPI-DSI based S6E63J0X03 AMOLED lcd 1.63 inch panel driver.
+ *
+ * Copyright (c) 2014-2017 Samsung Electronics Co., Ltd
+ *
+ * Inki Dae <inki.dae@samsung.com>
+ * Hoegeun Kwon <hoegeun.kwon@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
+
+#define MCS_LEVEL2_KEY 0xf0
+#define MCS_MTP_KEY 0xf1
+#define MCS_MTP_SET3 0xd4
+
+#define MAX_BRIGHTNESS 100
+#define DEFAULT_BRIGHTNESS 80
+
+#define NUM_GAMMA_STEPS 9
+#define GAMMA_CMD_CNT 28
+
+#define FIRST_COLUMN 20
+
+struct s6e63j0x03 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 4649,
+ .hdisplay = 320,
+ .hsync_start = 320 + 1,
+ .hsync_end = 320 + 1 + 1,
+ .htotal = 320 + 1 + 1 + 1,
+ .vdisplay = 320,
+ .vsync_start = 320 + 150,
+ .vsync_end = 320 + 150 + 1,
+ .vtotal = 320 + 150 + 1 + 2,
+ .vrefresh = 30,
+ .flags = 0,
+};
+
+static const unsigned char gamma_tbl[NUM_GAMMA_STEPS][GAMMA_CMD_CNT] = {
+ { /* Gamma 10 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x7f, 0x7f, 0x7f, 0x52, 0x6b, 0x6f, 0x26,
+ 0x28, 0x2d, 0x28, 0x26, 0x27, 0x33, 0x34, 0x32, 0x36, 0x36,
+ 0x35, 0x00, 0xab, 0x00, 0xae, 0x00, 0xbf
+ },
+ { /* gamma 30 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x70, 0x7f, 0x7f, 0x4e, 0x64, 0x69, 0x26,
+ 0x27, 0x2a, 0x28, 0x29, 0x27, 0x31, 0x32, 0x31, 0x35, 0x34,
+ 0x35, 0x00, 0xc4, 0x00, 0xca, 0x00, 0xdc
+ },
+ { /* gamma 60 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x65, 0x7b, 0x7d, 0x5f, 0x67, 0x68, 0x2a,
+ 0x28, 0x29, 0x28, 0x2a, 0x27, 0x31, 0x2f, 0x30, 0x34, 0x33,
+ 0x34, 0x00, 0xd9, 0x00, 0xe4, 0x00, 0xf5
+ },
+ { /* gamma 90 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x4d, 0x6f, 0x71, 0x67, 0x6a, 0x6c, 0x29,
+ 0x28, 0x28, 0x28, 0x29, 0x27, 0x30, 0x2e, 0x30, 0x32, 0x31,
+ 0x31, 0x00, 0xea, 0x00, 0xf6, 0x01, 0x09
+ },
+ { /* gamma 120 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x3d, 0x66, 0x68, 0x69, 0x69, 0x69, 0x28,
+ 0x28, 0x27, 0x28, 0x28, 0x27, 0x30, 0x2e, 0x2f, 0x31, 0x31,
+ 0x30, 0x00, 0xf9, 0x01, 0x05, 0x01, 0x1b
+ },
+ { /* gamma 150 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x31, 0x51, 0x53, 0x66, 0x66, 0x67, 0x28,
+ 0x29, 0x27, 0x28, 0x27, 0x27, 0x2e, 0x2d, 0x2e, 0x31, 0x31,
+ 0x30, 0x01, 0x04, 0x01, 0x11, 0x01, 0x29
+ },
+ { /* gamma 200 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x2f, 0x4f, 0x51, 0x67, 0x65, 0x65, 0x29,
+ 0x2a, 0x28, 0x27, 0x25, 0x26, 0x2d, 0x2c, 0x2c, 0x30, 0x30,
+ 0x30, 0x01, 0x14, 0x01, 0x23, 0x01, 0x3b
+ },
+ { /* gamma 240 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x2c, 0x4d, 0x50, 0x65, 0x63, 0x64, 0x2a,
+ 0x2c, 0x29, 0x26, 0x24, 0x25, 0x2c, 0x2b, 0x2b, 0x30, 0x30,
+ 0x30, 0x01, 0x1e, 0x01, 0x2f, 0x01, 0x47
+ },
+ { /* gamma 300 */
+ MCS_MTP_SET3,
+ 0x00, 0x00, 0x00, 0x38, 0x61, 0x64, 0x65, 0x63, 0x64, 0x28,
+ 0x2a, 0x27, 0x26, 0x23, 0x25, 0x2b, 0x2b, 0x2a, 0x30, 0x2f,
+ 0x30, 0x01, 0x2d, 0x01, 0x3f, 0x01, 0x57
+ }
+};
+
+static inline struct s6e63j0x03 *panel_to_s6e63j0x03(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e63j0x03, panel);
+}
+
+static inline ssize_t s6e63j0x03_dcs_write_seq(struct s6e63j0x03 *ctx,
+ const void *seq, size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ return mipi_dsi_dcs_write_buffer(dsi, seq, len);
+}
+
+#define s6e63j0x03_dcs_write_seq_static(ctx, seq...) \
+ ({ \
+ static const u8 d[] = { seq }; \
+ s6e63j0x03_dcs_write_seq(ctx, d, ARRAY_SIZE(d)); \
+ })
+
+static inline int s6e63j0x03_enable_lv2_command(struct s6e63j0x03 *ctx)
+{
+ return s6e63j0x03_dcs_write_seq_static(ctx, MCS_LEVEL2_KEY, 0x5a, 0x5a);
+}
+
+static inline int s6e63j0x03_apply_mtp_key(struct s6e63j0x03 *ctx, bool on)
+{
+ if (on)
+ return s6e63j0x03_dcs_write_seq_static(ctx,
+ MCS_MTP_KEY, 0x5a, 0x5a);
+
+ return s6e63j0x03_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0xa5, 0xa5);
+}
+
+static int s6e63j0x03_power_on(struct s6e63j0x03 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+
+ return 0;
+}
+
+static int s6e63j0x03_power_off(struct s6e63j0x03 *ctx)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static unsigned int s6e63j0x03_get_brightness_index(unsigned int brightness)
+{
+ unsigned int index;
+
+ index = brightness / (MAX_BRIGHTNESS / NUM_GAMMA_STEPS);
+
+ if (index >= NUM_GAMMA_STEPS)
+ index = NUM_GAMMA_STEPS - 1;
+
+ return index;
+}
+
+static int s6e63j0x03_update_gamma(struct s6e63j0x03 *ctx,
+ unsigned int brightness)
+{
+ struct backlight_device *bl_dev = ctx->bl_dev;
+ unsigned int index = s6e63j0x03_get_brightness_index(brightness);
+ int ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, true);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_dcs_write_seq(ctx, gamma_tbl[index], GAMMA_CMD_CNT);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, false);
+ if (ret < 0)
+ return ret;
+
+ bl_dev->props.brightness = brightness;
+
+ return 0;
+}
+
+static int s6e63j0x03_set_brightness(struct backlight_device *bl_dev)
+{
+ struct s6e63j0x03 *ctx = bl_get_data(bl_dev);
+ unsigned int brightness = bl_dev->props.brightness;
+
+ return s6e63j0x03_update_gamma(ctx, brightness);
+}
+
+static const struct backlight_ops s6e63j0x03_bl_ops = {
+ .update_status = s6e63j0x03_set_brightness,
+};
+
+static int s6e63j0x03_disable(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ return ret;
+
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ return 0;
+}
+
+static int s6e63j0x03_unprepare(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ int ret;
+
+ ret = s6e63j0x03_power_off(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+
+ return 0;
+}
+
+static int s6e63j0x03_panel_init(struct s6e63j0x03 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = s6e63j0x03_enable_lv2_command(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, true);
+ if (ret < 0)
+ return ret;
+
+ /* set porch adjustment */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf2, 0x1c, 0x28);
+ if (ret < 0)
+ return ret;
+
+ /* set frame freq */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb5, 0x00, 0x02, 0x00);
+ if (ret < 0)
+ return ret;
+
+ /* set caset, paset */
+ ret = mipi_dsi_dcs_set_column_address(dsi, FIRST_COLUMN,
+ default_mode.hdisplay - 1 + FIRST_COLUMN);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
+ if (ret < 0)
+ return ret;
+
+ /* set ltps timming 0, 1 */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf8, 0x08, 0x08, 0x08, 0x17,
+ 0x00, 0x2a, 0x02, 0x26, 0x00, 0x00, 0x02, 0x00, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf7, 0x02);
+ if (ret < 0)
+ return ret;
+
+ /* set param pos te_edge */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb0, 0x01);
+ if (ret < 0)
+ return ret;
+
+ /* set te rising edge */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xe2, 0x0f);
+ if (ret < 0)
+ return ret;
+
+ /* set param pos default */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb0, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, false);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int s6e63j0x03_prepare(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ int ret;
+
+ ret = s6e63j0x03_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_panel_init(ctx);
+ if (ret < 0)
+ goto err;
+
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+
+err:
+ s6e63j0x03_power_off(ctx);
+ return ret;
+}
+
+static int s6e63j0x03_enable(struct drm_panel *panel)
+{
+ struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ msleep(120);
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, true);
+ if (ret < 0)
+ return ret;
+
+ /* set elvss_cond */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb1, 0x00, 0x09);
+ if (ret < 0)
+ return ret;
+
+ /* set pos */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx,
+ MIPI_DCS_SET_ADDRESS_MODE, 0x40);
+ if (ret < 0)
+ return ret;
+
+ /* set default white brightness */
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff);
+ if (ret < 0)
+ return ret;
+
+ /* set white ctrl */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ if (ret < 0)
+ return ret;
+
+ /* set acl off */
+ ret = s6e63j0x03_dcs_write_seq_static(ctx,
+ MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e63j0x03_apply_mtp_key(ctx, false);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0)
+ return ret;
+
+ ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+
+ return 0;
+}
+
+static int s6e63j0x03_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 29;
+ connector->display_info.height_mm = 29;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e63j0x03_funcs = {
+ .disable = s6e63j0x03_disable,
+ .unprepare = s6e63j0x03_unprepare,
+ .prepare = s6e63j0x03_prepare,
+ .enable = s6e63j0x03_enable,
+ .get_modes = s6e63j0x03_get_modes,
+};
+
+static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e63j0x03 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct s6e63j0x03), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_EOT_PACKET;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpio: %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &s6e63j0x03_funcs;
+
+ ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
+ &s6e63j0x03_bl_ops, NULL);
+ if (IS_ERR(ctx->bl_dev)) {
+ dev_err(dev, "failed to register backlight device\n");
+ return PTR_ERR(ctx->bl_dev);
+ }
+
+ ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS;
+ ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS;
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ goto unregister_backlight;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ goto remove_panel;
+
+ return ret;
+
+remove_panel:
+ drm_panel_remove(&ctx->panel);
+
+unregister_backlight:
+ backlight_device_unregister(ctx->bl_dev);
+
+ return ret;
+}
+
+static int s6e63j0x03_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e63j0x03 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ backlight_device_unregister(ctx->bl_dev);
+
+ return 0;
+}
+
+static const struct of_device_id s6e63j0x03_of_match[] = {
+ { .compatible = "samsung,s6e63j0x03" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e63j0x03_of_match);
+
+static struct mipi_dsi_driver s6e63j0x03_driver = {
+ .probe = s6e63j0x03_probe,
+ .remove = s6e63j0x03_remove,
+ .driver = {
+ .name = "panel_samsung_s6e63j0x03",
+ .of_match_table = s6e63j0x03_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e63j0x03_driver);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Hoegeun Kwon <hoegeun.kwon@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based s6e63j0x03 AMOLED LCD Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
new file mode 100644
index 000000000000..71c09ed436ae
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2017 NXP Semiconductors.
+ * Author: Marco Franchi <marco.franchi@nxp.com>
+ *
+ * Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+#include <video/display_timing.h>
+#include <video/videomode.h>
+
+struct seiko_panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int num_modes;
+ const struct display_timing *timings;
+ unsigned int num_timings;
+
+ unsigned int bpc;
+
+ /**
+ * @width: width (in millimeters) of the panel's active display area
+ * @height: height (in millimeters) of the panel's active display area
+ */
+ struct {
+ unsigned int width;
+ unsigned int height;
+ } size;
+
+ u32 bus_format;
+ u32 bus_flags;
+};
+
+struct seiko_panel {
+ struct drm_panel base;
+ bool prepared;
+ bool enabled;
+ const struct seiko_panel_desc *desc;
+ struct backlight_device *backlight;
+ struct regulator *dvdd;
+ struct regulator *avdd;
+};
+
+static inline struct seiko_panel *to_seiko_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct seiko_panel, base);
+}
+
+static int seiko_panel_get_fixed_modes(struct seiko_panel *panel)
+{
+ struct drm_connector *connector = panel->base.connector;
+ struct drm_device *drm = panel->base.drm;
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(drm);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_timings == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ for (i = 0; i < panel->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &panel->desc->modes[i];
+
+ mode = drm_mode_duplicate(drm, m);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ continue;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ connector->display_info.bpc = panel->desc->bpc;
+ connector->display_info.width_mm = panel->desc->size.width;
+ connector->display_info.height_mm = panel->desc->size.height;
+ if (panel->desc->bus_format)
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel->desc->bus_format, 1);
+ connector->display_info.bus_flags = panel->desc->bus_flags;
+
+ return num;
+}
+
+static int seiko_panel_disable(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ if (!p->enabled)
+ return 0;
+
+ if (p->backlight) {
+ p->backlight->props.power = FB_BLANK_POWERDOWN;
+ p->backlight->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(p->backlight);
+ }
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int seiko_panel_unprepare(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ if (!p->prepared)
+ return 0;
+
+ regulator_disable(p->avdd);
+
+ /* Add a 100ms delay as per the panel datasheet */
+ msleep(100);
+
+ regulator_disable(p->dvdd);
+
+ p->prepared = false;
+
+ return 0;
+}
+
+static int seiko_panel_prepare(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+ int err;
+
+ if (p->prepared)
+ return 0;
+
+ err = regulator_enable(p->dvdd);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable dvdd: %d\n", err);
+ return err;
+ }
+
+ /* Add a 100ms delay as per the panel datasheet */
+ msleep(100);
+
+ err = regulator_enable(p->avdd);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enable avdd: %d\n", err);
+ goto disable_dvdd;
+ }
+
+ p->prepared = true;
+
+ return 0;
+
+disable_dvdd:
+ regulator_disable(p->dvdd);
+ return err;
+}
+
+static int seiko_panel_enable(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ if (p->enabled)
+ return 0;
+
+ if (p->backlight) {
+ p->backlight->props.state &= ~BL_CORE_FBBLANK;
+ p->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(p->backlight);
+ }
+
+ p->enabled = true;
+
+ return 0;
+}
+
+static int seiko_panel_get_modes(struct drm_panel *panel)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+
+ /* add hard-coded panel modes */
+ return seiko_panel_get_fixed_modes(p);
+}
+
+static int seiko_panel_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct seiko_panel *p = to_seiko_panel(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
+static const struct drm_panel_funcs seiko_panel_funcs = {
+ .disable = seiko_panel_disable,
+ .unprepare = seiko_panel_unprepare,
+ .prepare = seiko_panel_prepare,
+ .enable = seiko_panel_enable,
+ .get_modes = seiko_panel_get_modes,
+ .get_timings = seiko_panel_get_timings,
+};
+
+static int seiko_panel_probe(struct device *dev,
+ const struct seiko_panel_desc *desc)
+{
+ struct device_node *backlight;
+ struct seiko_panel *panel;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->enabled = false;
+ panel->prepared = false;
+ panel->desc = desc;
+
+ panel->dvdd = devm_regulator_get(dev, "dvdd");
+ if (IS_ERR(panel->dvdd))
+ return PTR_ERR(panel->dvdd);
+
+ panel->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(panel->avdd))
+ return PTR_ERR(panel->avdd);
+
+ backlight = of_parse_phandle(dev->of_node, "backlight", 0);
+ if (backlight) {
+ panel->backlight = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!panel->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ drm_panel_init(&panel->base);
+ panel->base.dev = dev;
+ panel->base.funcs = &seiko_panel_funcs;
+
+ err = drm_panel_add(&panel->base);
+ if (err < 0)
+ return err;
+
+ dev_set_drvdata(dev, panel);
+
+ return 0;
+}
+
+static int seiko_panel_remove(struct platform_device *pdev)
+{
+ struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
+
+ drm_panel_detach(&panel->base);
+ drm_panel_remove(&panel->base);
+
+ seiko_panel_disable(&panel->base);
+
+ if (panel->backlight)
+ put_device(&panel->backlight->dev);
+
+ return 0;
+}
+
+static void seiko_panel_shutdown(struct platform_device *pdev)
+{
+ struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
+
+ seiko_panel_disable(&panel->base);
+}
+
+static const struct display_timing seiko_43wvf1g_timing = {
+ .pixelclock = { 33500000, 33500000, 33500000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 164, 164, 164 },
+ .hback_porch = { 89, 89, 89 },
+ .hsync_len = { 10, 10, 10 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_DE_LOW,
+};
+
+static const struct seiko_panel_desc seiko_43wvf1g = {
+ .timings = &seiko_43wvf1g_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 93,
+ .height = 57,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
+static const struct of_device_id platform_of_match[] = {
+ {
+ .compatible = "sii,43wvf1g",
+ .data = &seiko_43wvf1g,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static int seiko_panel_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ id = of_match_node(platform_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return seiko_panel_probe(&pdev->dev, id->data);
+}
+
+static struct platform_driver seiko_panel_platform_driver = {
+ .driver = {
+ .name = "seiko_panel",
+ .of_match_table = platform_of_match,
+ },
+ .probe = seiko_panel_platform_probe,
+ .remove = seiko_panel_remove,
+ .shutdown = seiko_panel_shutdown,
+};
+module_platform_driver(seiko_panel_platform_driver);
+
+MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com");
+MODULE_DESCRIPTION("Seiko 43WVF1G panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 474fa759e06e..b7c4709f7b34 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -187,8 +187,7 @@ static int panel_simple_unprepare(struct drm_panel *panel)
if (!p->prepared)
return 0;
- if (p->enable_gpio)
- gpiod_set_value_cansleep(p->enable_gpio, 0);
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
@@ -214,8 +213,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
return err;
}
- if (p->enable_gpio)
- gpiod_set_value_cansleep(p->enable_gpio, 1);
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
if (p->desc->delay.prepare)
msleep(p->desc->delay.prepare);
@@ -315,7 +313,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
GPIOD_OUT_LOW);
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
- dev_err(dev, "failed to request GPIO: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to request GPIO: %d\n", err);
return err;
}
@@ -369,6 +368,7 @@ static int panel_simple_remove(struct device *dev)
drm_panel_remove(&panel->base);
panel_simple_disable(&panel->base);
+ panel_simple_unprepare(&panel->base);
if (panel->ddc)
put_device(&panel->ddc->dev);
@@ -384,6 +384,7 @@ static void panel_simple_shutdown(struct device *dev)
struct panel_simple *panel = dev_get_drvdata(dev);
panel_simple_disable(&panel->base);
+ panel_simple_unprepare(&panel->base);
}
static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
@@ -1007,6 +1008,10 @@ static const struct panel_desc hitachi_tx23d38vm0caa = {
.width = 195,
.height = 117,
},
+ .delay = {
+ .enable = 160,
+ .disable = 160,
+ },
};
static const struct drm_display_mode innolux_at043tn24_mode = {
@@ -1017,8 +1022,8 @@ static const struct drm_display_mode innolux_at043tn24_mode = {
.htotal = 480 + 2 + 41 + 2,
.vdisplay = 272,
.vsync_start = 272 + 2,
- .vsync_end = 272 + 2 + 11,
- .vtotal = 272 + 2 + 11 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1032,6 +1037,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
static const struct drm_display_mode innolux_at070tn92_mode = {
@@ -1522,8 +1528,8 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.modes = &olimex_lcd_olinuxino_43ts_mode,
.num_modes = 1,
.size = {
- .width = 105,
- .height = 67,
+ .width = 95,
+ .height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
@@ -1831,6 +1837,30 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode toshiba_lt089ac29000_mode = {
+ .clock = 79500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 192,
+ .hsync_end = 1280 + 192 + 128,
+ .htotal = 1280 + 192 + 128 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 20,
+ .vsync_end = 768 + 20 + 7,
+ .vtotal = 768 + 20 + 7 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc toshiba_lt089ac29000 = {
+ .modes = &toshiba_lt089ac29000_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 194,
+ .height = 116,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
static const struct drm_display_mode tpk_f07a_0102_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -2113,6 +2143,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
+ .compatible = "toshiba,lt089ac29000",
+ .data = &toshiba_lt089ac29000,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index bbfba87cd1a8..e5e2abd66491 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -6,7 +6,8 @@ config DRM_PL111
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_PANEL
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the PL111 CLCD controller.
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index f2874bbdaa14..9c5e8dba8ac6 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-pl111_drm-y += pl111_connector.o \
- pl111_display.o \
+pl111_drm-y += pl111_display.o \
+ pl111_versatile.o \
pl111_drv.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
diff --git a/drivers/gpu/drm/pl111/pl111_connector.c b/drivers/gpu/drm/pl111/pl111_connector.c
deleted file mode 100644
index d335f9a29ce4..000000000000
--- a/drivers/gpu/drm/pl111/pl111_connector.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
- *
- * Parts of this file were based on sources as follows:
- *
- * Copyright (c) 2006-2008 Intel Corporation
- * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (C) 2011 Texas Instruments
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms of
- * such GNU licence.
- *
- */
-
-/**
- * pl111_drm_connector.c
- * Implementation of the connector functions for PL111 DRM
- */
-#include <linux/amba/clcd-regs.h>
-#include <linux/version.h>
-#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-
-#include "pl111_drm.h"
-
-static void pl111_connector_destroy(struct drm_connector *connector)
-{
- struct pl111_drm_connector *pl111_connector =
- to_pl111_connector(connector);
-
- if (pl111_connector->panel)
- drm_panel_detach(pl111_connector->panel);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static enum drm_connector_status pl111_connector_detect(struct drm_connector
- *connector, bool force)
-{
- struct pl111_drm_connector *pl111_connector =
- to_pl111_connector(connector);
-
- return (pl111_connector->panel ?
- connector_status_connected :
- connector_status_disconnected);
-}
-
-static int pl111_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct pl111_drm_connector *pl111_connector =
- to_pl111_connector(connector);
-
- if (!pl111_connector->panel)
- return 0;
-
- return drm_panel_get_modes(pl111_connector->panel);
-}
-
-const struct drm_connector_funcs connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = pl111_connector_destroy,
- .detect = pl111_connector_detect,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = pl111_connector_helper_get_modes,
-};
-
-/* Walks the OF graph to find the panel node and then asks DRM to look
- * up the panel.
- */
-static struct drm_panel *pl111_get_panel(struct device *dev)
-{
- struct device_node *endpoint, *panel_node;
- struct device_node *np = dev->of_node;
- struct drm_panel *panel;
-
- endpoint = of_graph_get_next_endpoint(np, NULL);
- if (!endpoint) {
- dev_err(dev, "no endpoint to fetch panel\n");
- return NULL;
- }
-
- /* don't proceed if we have an endpoint but no panel_node tied to it */
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_err(dev, "no valid panel node\n");
- return NULL;
- }
-
- panel = of_drm_find_panel(panel_node);
- of_node_put(panel_node);
-
- return panel;
-}
-
-int pl111_connector_init(struct drm_device *dev)
-{
- struct pl111_drm_dev_private *priv = dev->dev_private;
- struct pl111_drm_connector *pl111_connector = &priv->connector;
- struct drm_connector *connector = &pl111_connector->connector;
-
- drm_connector_init(dev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
- drm_connector_helper_add(connector, &connector_helper_funcs);
-
- pl111_connector->panel = pl111_get_panel(dev->dev);
- if (pl111_connector->panel)
- drm_panel_attach(pl111_connector->panel, connector);
-
- return 0;
-}
-
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
index 0d9dee199b2c..7ddc7e3b9e7d 100644
--- a/drivers/gpu/drm/pl111/pl111_debugfs.c
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -22,8 +22,14 @@ static const struct {
REGDEF(CLCD_TIM2),
REGDEF(CLCD_TIM3),
REGDEF(CLCD_UBAS),
+ REGDEF(CLCD_LBAS),
REGDEF(CLCD_PL111_CNTL),
REGDEF(CLCD_PL111_IENB),
+ REGDEF(CLCD_PL111_RIS),
+ REGDEF(CLCD_PL111_MIS),
+ REGDEF(CLCD_PL111_ICR),
+ REGDEF(CLCD_PL111_UCUR),
+ REGDEF(CLCD_PL111_LCUR),
};
int pl111_debugfs_regs(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index b58c988d9da0..06c4bf756b69 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -21,7 +21,6 @@
#include <linux/of_graph.h>
#include <drm/drmP.h>
-#include <drm/drm_panel.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -94,7 +93,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
struct pl111_drm_dev_private *priv = drm->dev_private;
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_connector *connector = &priv->connector.connector;
+ struct drm_connector *connector = priv->connector;
u32 cntl;
u32 ppl, hsw, hfp, hbp;
u32 lpp, vsw, vfp, vbp;
@@ -156,10 +155,8 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
writel(0, priv->regs + CLCD_TIM3);
- drm_panel_prepare(priv->connector.panel);
-
- /* Enable and Power Up */
- cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDPWR | CNTL_LCDVCOMP(1);
+ /* Hard-code TFT panel */
+ cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
/* Note that the the hardware's format reader takes 'r' from
* the low bit, while DRM formats list channels from high bit
@@ -202,9 +199,21 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
break;
}
- writel(cntl, priv->regs + CLCD_PL111_CNTL);
+ /* Power sequence: first enable and chill */
+ writel(cntl, priv->regs + priv->ctrl);
+
+ /*
+ * We expect this delay to stabilize the contrast
+ * voltage Vee as stipulated by the manual
+ */
+ msleep(20);
+
+ if (priv->variant_display_enable)
+ priv->variant_display_enable(drm, fb->format->format);
- drm_panel_enable(priv->connector.panel);
+ /* Power Up */
+ cntl |= CNTL_LCDPWR;
+ writel(cntl, priv->regs + priv->ctrl);
drm_crtc_vblank_on(crtc);
}
@@ -214,15 +223,28 @@ void pl111_display_disable(struct drm_simple_display_pipe *pipe)
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
+ u32 cntl;
drm_crtc_vblank_off(crtc);
- drm_panel_disable(priv->connector.panel);
+ /* Power Down */
+ cntl = readl(priv->regs + priv->ctrl);
+ if (cntl & CNTL_LCDPWR) {
+ cntl &= ~CNTL_LCDPWR;
+ writel(cntl, priv->regs + priv->ctrl);
+ }
+
+ /*
+ * We expect this delay to stabilize the contrast voltage Vee as
+ * stipulated by the manual
+ */
+ msleep(20);
- /* Disable and Power Down */
- writel(0, priv->regs + CLCD_PL111_CNTL);
+ if (priv->variant_display_disable)
+ priv->variant_display_disable(drm);
- drm_panel_unprepare(priv->connector.panel);
+ /* Disable */
+ writel(0, priv->regs + priv->ctrl);
clk_disable_unprepare(priv->clk);
}
@@ -260,7 +282,7 @@ int pl111_enable_vblank(struct drm_device *drm, unsigned int crtc)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
- writel(CLCD_IRQ_NEXTBASE_UPDATE, priv->regs + CLCD_PL111_IENB);
+ writel(CLCD_IRQ_NEXTBASE_UPDATE, priv->regs + priv->ienb);
return 0;
}
@@ -269,7 +291,7 @@ void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
- writel(0, priv->regs + CLCD_PL111_IENB);
+ writel(0, priv->regs + priv->ienb);
}
static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
@@ -413,22 +435,6 @@ int pl111_display_init(struct drm_device *drm)
struct device_node *endpoint;
u32 tft_r0b0g0[3];
int ret;
- static const u32 formats[] = {
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_BGR565,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ABGR4444,
- DRM_FORMAT_XBGR4444,
- DRM_FORMAT_ARGB4444,
- DRM_FORMAT_XRGB4444,
- };
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (!endpoint)
@@ -444,21 +450,16 @@ int pl111_display_init(struct drm_device *drm)
}
of_node_put(endpoint);
- if (tft_r0b0g0[0] != 0 ||
- tft_r0b0g0[1] != 8 ||
- tft_r0b0g0[2] != 16) {
- dev_err(dev, "arm,pl11x,tft-r0g0b0-pads != [0,8,16] not yet supported\n");
- return -EINVAL;
- }
-
ret = pl111_init_clock_divider(drm);
if (ret)
return ret;
ret = drm_simple_display_pipe_init(drm, &priv->pipe,
&pl111_display_funcs,
- formats, ARRAY_SIZE(formats),
- NULL, &priv->connector.connector);
+ priv->variant->formats,
+ priv->variant->nformats,
+ NULL,
+ priv->connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 5c685bfc8fdc..440f53ebee8c 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -21,25 +21,43 @@
#include <drm/drm_gem.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_bridge.h>
#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
#define CLCD_IRQ_NEXTBASE_UPDATE BIT(2)
struct drm_minor;
-struct pl111_drm_connector {
- struct drm_connector connector;
- struct drm_panel *panel;
+/**
+ * struct pl111_variant_data - encodes IP differences
+ * @name: the name of this variant
+ * @is_pl110: this is the early PL110 variant
+ * @formats: array of supported pixel formats on this variant
+ * @nformats: the length of the array of supported pixel formats
+ */
+struct pl111_variant_data {
+ const char *name;
+ bool is_pl110;
+ const u32 *formats;
+ unsigned int nformats;
};
struct pl111_drm_dev_private {
struct drm_device *drm;
- struct pl111_drm_connector connector;
+ struct drm_connector *connector;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
struct drm_simple_display_pipe pipe;
struct drm_fbdev_cma *fbdev;
void *regs;
+ u32 ienb;
+ u32 ctrl;
/* The pixel clock (a reference to our clock divider off of CLCDCLK). */
struct clk *clk;
/* pl111's internal clock divider. */
@@ -48,20 +66,15 @@ struct pl111_drm_dev_private {
* subsystem and pl111_display_enable().
*/
spinlock_t tim2_lock;
+ const struct pl111_variant_data *variant;
+ void (*variant_display_enable) (struct drm_device *drm, u32 format);
+ void (*variant_display_disable) (struct drm_device *drm);
};
-#define to_pl111_connector(x) \
- container_of(x, struct pl111_drm_connector, connector)
-
int pl111_display_init(struct drm_device *dev);
int pl111_enable_vblank(struct drm_device *drm, unsigned int crtc);
void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc);
irqreturn_t pl111_irq(int irq, void *data);
-int pl111_connector_init(struct drm_device *dev);
-int pl111_encoder_init(struct drm_device *dev);
-int pl111_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
int pl111_debugfs_init(struct drm_minor *minor);
#endif /* _PL111_DRM_H_ */
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 581c452cede1..201d57d5cb54 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -41,9 +41,6 @@
* - Fix race between setting plane base address and getting IRQ for
* vsync firing the pageflip completion.
*
- * - Expose the correct set of formats we can support based on the
- * "arm,pl11x,tft-r0g0b0-pads" DT property.
- *
* - Use the "max-memory-bandwidth" DT property to filter the
* supported formats.
*
@@ -68,8 +65,12 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_panel.h>
#include "pl111_drm.h"
+#include "pl111_versatile.h"
#define DRIVER_DESC "DRM module for PL111"
@@ -83,6 +84,8 @@ static int pl111_modeset_init(struct drm_device *dev)
{
struct drm_mode_config *mode_config;
struct pl111_drm_dev_private *priv = dev->dev_private;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
int ret = 0;
drm_mode_config_init(dev);
@@ -93,34 +96,43 @@ static int pl111_modeset_init(struct drm_device *dev)
mode_config->min_height = 1;
mode_config->max_height = 768;
- ret = pl111_connector_init(dev);
- if (ret) {
- dev_err(dev->dev, "Failed to create pl111_drm_connector\n");
- goto out_config;
- }
-
- /* Don't actually attach if we didn't find a drm_panel
- * attached to us. This will allow a kernel to include both
- * the fbdev pl111 driver and this one, and choose between
- * them based on which subsystem has support for the panel.
- */
- if (!priv->connector.panel) {
- dev_info(dev->dev,
- "Disabling due to lack of DRM panel device.\n");
- ret = -ENODEV;
- goto out_config;
+ ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
+ 0, 0, &panel, &bridge);
+ if (ret && ret != -ENODEV)
+ return ret;
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto out_config;
+ }
+ /*
+ * TODO: when we are using a different bridge than a panel
+ * (such as a dumb VGA connector) we need to devise a different
+ * method to get the connector out of the bridge.
+ */
}
ret = pl111_display_init(dev);
if (ret != 0) {
dev_err(dev->dev, "Failed to init display\n");
- goto out_config;
+ goto out_bridge;
}
+ ret = drm_simple_display_pipe_attach_bridge(&priv->pipe,
+ bridge);
+ if (ret)
+ return ret;
+
+ priv->bridge = bridge;
+ priv->panel = panel;
+ priv->connector = panel->connector;
+
ret = drm_vblank_init(dev, 1);
if (ret != 0) {
dev_err(dev->dev, "Failed to init vblank\n");
- goto out_config;
+ goto out_bridge;
}
drm_mode_config_reset(dev);
@@ -132,6 +144,9 @@ static int pl111_modeset_init(struct drm_device *dev)
goto finish;
+out_bridge:
+ if (panel)
+ drm_panel_bridge_remove(bridge);
out_config:
drm_mode_config_cleanup(dev);
finish:
@@ -183,6 +198,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
{
struct device *dev = &amba_dev->dev;
struct pl111_drm_dev_private *priv;
+ struct pl111_variant_data *variant = id->data;
struct drm_device *drm;
int ret;
@@ -196,6 +212,33 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
amba_set_drvdata(amba_dev, drm);
priv->drm = drm;
drm->dev_private = priv;
+ priv->variant = variant;
+
+ /*
+ * The PL110 and PL111 variants have two registers
+ * swapped: interrupt enable and control. For this reason
+ * we use offsets that we can change per variant.
+ */
+ if (variant->is_pl110) {
+ /*
+ * The ARM Versatile boards are even more special:
+ * their PrimeCell ID say they are PL110 but the
+ * control and interrupt enable registers are anyway
+ * swapped to the PL111 order so they are not following
+ * the PL110 datasheet.
+ */
+ if (of_machine_is_compatible("arm,versatile-ab") ||
+ of_machine_is_compatible("arm,versatile-pb")) {
+ priv->ienb = CLCD_PL111_IENB;
+ priv->ctrl = CLCD_PL111_CNTL;
+ } else {
+ priv->ienb = CLCD_PL110_IENB;
+ priv->ctrl = CLCD_PL110_CNTL;
+ }
+ } else {
+ priv->ienb = CLCD_PL111_IENB;
+ priv->ctrl = CLCD_PL111_CNTL;
+ }
priv->regs = devm_ioremap_resource(dev, &amba_dev->res);
if (IS_ERR(priv->regs)) {
@@ -204,15 +247,19 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
}
/* turn off interrupts before requesting the irq */
- writel(0, priv->regs + CLCD_PL111_IENB);
+ writel(0, priv->regs + priv->ienb);
ret = devm_request_irq(dev, amba_dev->irq[0], pl111_irq, 0,
- "pl111", priv);
+ variant->name, priv);
if (ret != 0) {
dev_err(dev, "%s failed irq %d\n", __func__, ret);
return ret;
}
+ ret = pl111_versatile_init(dev, priv);
+ if (ret)
+ goto dev_unref;
+
ret = pl111_modeset_init(drm);
if (ret != 0)
goto dev_unref;
@@ -236,16 +283,70 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_dev_unregister(drm);
if (priv->fbdev)
drm_fbdev_cma_fini(priv->fbdev);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
drm_dev_unref(drm);
return 0;
}
-static struct amba_id pl111_id_table[] = {
+/*
+ * This variant exist in early versions like the ARM Integrator
+ * and this version lacks the 565 and 444 pixel formats.
+ */
+static const u32 pl110_pixel_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+};
+
+static const struct pl111_variant_data pl110_variant = {
+ .name = "PL110",
+ .is_pl110 = true,
+ .formats = pl110_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_pixel_formats),
+};
+
+/* RealView, Versatile Express etc use this modern variant */
+static const u32 pl111_pixel_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+};
+
+static const struct pl111_variant_data pl111_variant = {
+ .name = "PL111",
+ .formats = pl111_pixel_formats,
+ .nformats = ARRAY_SIZE(pl111_pixel_formats),
+};
+
+static const struct amba_id pl111_id_table[] = {
+ {
+ .id = 0x00041110,
+ .mask = 0x000fffff,
+ .data = (void*)&pl110_variant,
+ },
{
.id = 0x00041111,
.mask = 0x000fffff,
+ .data = (void*)&pl111_variant,
},
{0, 0},
};
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
new file mode 100644
index 000000000000..97d4af6925a3
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -0,0 +1,270 @@
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "pl111_versatile.h"
+#include "pl111_drm.h"
+
+static struct regmap *versatile_syscon_map;
+
+/*
+ * We detect the different syscon types from the compatible strings.
+ */
+enum versatile_clcd {
+ INTEGRATOR_CLCD_CM,
+ VERSATILE_CLCD,
+ REALVIEW_CLCD_EB,
+ REALVIEW_CLCD_PB1176,
+ REALVIEW_CLCD_PB11MP,
+ REALVIEW_CLCD_PBA8,
+ REALVIEW_CLCD_PBX,
+};
+
+static const struct of_device_id versatile_clcd_of_match[] = {
+ {
+ .compatible = "arm,core-module-integrator",
+ .data = (void *)INTEGRATOR_CLCD_CM,
+ },
+ {
+ .compatible = "arm,versatile-sysreg",
+ .data = (void *)VERSATILE_CLCD,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_CLCD_EB,
+ },
+ {
+ .compatible = "arm,realview-pb1176-syscon",
+ .data = (void *)REALVIEW_CLCD_PB1176,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_CLCD_PB11MP,
+ },
+ {
+ .compatible = "arm,realview-pba8-syscon",
+ .data = (void *)REALVIEW_CLCD_PBA8,
+ },
+ {
+ .compatible = "arm,realview-pbx-syscon",
+ .data = (void *)REALVIEW_CLCD_PBX,
+ },
+ {},
+};
+
+/*
+ * Core module CLCD control on the Integrator/CP, bits
+ * 8 thru 19 of the CM_CONTROL register controls a bunch
+ * of CLCD settings.
+ */
+#define INTEGRATOR_HDR_CTRL_OFFSET 0x0C
+#define INTEGRATOR_CLCD_LCDBIASEN BIT(8)
+#define INTEGRATOR_CLCD_LCDBIASUP BIT(9)
+#define INTEGRATOR_CLCD_LCDBIASDN BIT(10)
+/* Bits 11,12,13 controls the LCD type */
+#define INTEGRATOR_CLCD_LCDMUX_MASK (BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCDMUX_LCD24 BIT(11)
+#define INTEGRATOR_CLCD_LCDMUX_VGA565 BIT(12)
+#define INTEGRATOR_CLCD_LCDMUX_SHARP (BIT(11)|BIT(12))
+#define INTEGRATOR_CLCD_LCDMUX_VGA555 BIT(13)
+#define INTEGRATOR_CLCD_LCDMUX_VGA24 (BIT(11)|BIT(12)|BIT(13))
+#define INTEGRATOR_CLCD_LCD0_EN BIT(14)
+#define INTEGRATOR_CLCD_LCD1_EN BIT(15)
+/* R/L flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC1 BIT(16)
+/* U/D flip on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC2 BIT(17)
+/* No connection on Sharp */
+#define INTEGRATOR_CLCD_LCD_STATIC BIT(18)
+/* 0 = 24bit VGA, 1 = 18bit VGA */
+#define INTEGRATOR_CLCD_LCD_N24BITEN BIT(19)
+
+#define INTEGRATOR_CLCD_MASK (INTEGRATOR_CLCD_LCDBIASEN | \
+ INTEGRATOR_CLCD_LCDBIASUP | \
+ INTEGRATOR_CLCD_LCDBIASDN | \
+ INTEGRATOR_CLCD_LCDMUX_MASK | \
+ INTEGRATOR_CLCD_LCD0_EN | \
+ INTEGRATOR_CLCD_LCD1_EN | \
+ INTEGRATOR_CLCD_LCD_STATIC1 | \
+ INTEGRATOR_CLCD_LCD_STATIC2 | \
+ INTEGRATOR_CLCD_LCD_STATIC | \
+ INTEGRATOR_CLCD_LCD_N24BITEN)
+
+static void pl111_integrator_enable(struct drm_device *drm, u32 format)
+{
+ u32 val;
+
+ dev_info(drm->dev, "enable Integrator CLCD connectors\n");
+
+ /* FIXME: really needed? */
+ val = INTEGRATOR_CLCD_LCD_STATIC1 | INTEGRATOR_CLCD_LCD_STATIC2 |
+ INTEGRATOR_CLCD_LCD0_EN | INTEGRATOR_CLCD_LCD1_EN;
+
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ break;
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB565:
+ /* truecolor RGB565 */
+ val |= INTEGRATOR_CLCD_LCDMUX_VGA565;
+ break;
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_XRGB1555:
+ /* Pseudocolor, RGB555, BGR555 */
+ val |= INTEGRATOR_CLCD_LCDMUX_VGA555;
+ break;
+ default:
+ dev_err(drm->dev, "unhandled format on Integrator 0x%08x\n",
+ format);
+ break;
+ }
+
+ regmap_update_bits(versatile_syscon_map,
+ INTEGRATOR_HDR_CTRL_OFFSET,
+ INTEGRATOR_CLCD_MASK,
+ val);
+}
+
+/*
+ * This configuration register in the Versatile and RealView
+ * family is uniformly present but appears more and more
+ * unutilized starting with the RealView series.
+ */
+#define SYS_CLCD 0x50
+#define SYS_CLCD_MODE_MASK (BIT(0)|BIT(1))
+#define SYS_CLCD_MODE_888 0
+#define SYS_CLCD_MODE_5551 BIT(0)
+#define SYS_CLCD_MODE_565_R_LSB BIT(1)
+#define SYS_CLCD_MODE_565_B_LSB (BIT(0)|BIT(1))
+#define SYS_CLCD_CONNECTOR_MASK (BIT(2)|BIT(3)|BIT(4)|BIT(5))
+#define SYS_CLCD_NLCDIOON BIT(2)
+#define SYS_CLCD_VDDPOSSWITCH BIT(3)
+#define SYS_CLCD_PWR3V5SWITCH BIT(4)
+#define SYS_CLCD_VDDNEGSWITCH BIT(5)
+
+static void pl111_versatile_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable Versatile CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ 0);
+}
+
+static void pl111_versatile_enable(struct drm_device *drm, u32 format)
+{
+ u32 val = 0;
+
+ dev_info(drm->dev, "enable Versatile CLCD connectors\n");
+
+ switch (format) {
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB8888:
+ val |= SYS_CLCD_MODE_888;
+ break;
+ case DRM_FORMAT_BGR565:
+ val |= SYS_CLCD_MODE_565_R_LSB;
+ break;
+ case DRM_FORMAT_RGB565:
+ val |= SYS_CLCD_MODE_565_B_LSB;
+ break;
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
+ val |= SYS_CLCD_MODE_5551;
+ break;
+ default:
+ dev_err(drm->dev, "unhandled format on Versatile 0x%08x\n",
+ format);
+ break;
+ }
+
+ /* Set up the MUX */
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_MODE_MASK,
+ val);
+
+ /* Then enable the display */
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+}
+
+static void pl111_realview_clcd_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable RealView CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ 0);
+}
+
+static void pl111_realview_clcd_enable(struct drm_device *drm, u32 format)
+{
+ dev_info(drm->dev, "enable RealView CLCD connectors\n");
+ regmap_update_bits(versatile_syscon_map,
+ SYS_CLCD,
+ SYS_CLCD_CONNECTOR_MASK,
+ SYS_CLCD_NLCDIOON | SYS_CLCD_PWR3V5SWITCH);
+}
+
+int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
+{
+ const struct of_device_id *clcd_id;
+ enum versatile_clcd versatile_clcd_type;
+ struct device_node *np;
+ struct regmap *map;
+
+ np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
+ &clcd_id);
+ if (!np) {
+ /* Non-ARM reference designs, just bail out */
+ return 0;
+ }
+ versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+
+ map = syscon_node_to_regmap(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "no Versatile syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ switch (versatile_clcd_type) {
+ case INTEGRATOR_CLCD_CM:
+ versatile_syscon_map = map;
+ priv->variant_display_enable = pl111_integrator_enable;
+ dev_info(dev, "set up callbacks for Integrator PL110\n");
+ break;
+ case VERSATILE_CLCD:
+ versatile_syscon_map = map;
+ priv->variant_display_enable = pl111_versatile_enable;
+ priv->variant_display_disable = pl111_versatile_disable;
+ dev_info(dev, "set up callbacks for Versatile PL110+\n");
+ break;
+ case REALVIEW_CLCD_EB:
+ case REALVIEW_CLCD_PB1176:
+ case REALVIEW_CLCD_PB11MP:
+ case REALVIEW_CLCD_PBA8:
+ case REALVIEW_CLCD_PBX:
+ versatile_syscon_map = map;
+ priv->variant_display_enable = pl111_realview_clcd_enable;
+ priv->variant_display_disable = pl111_realview_clcd_disable;
+ dev_info(dev, "set up callbacks for RealView PL111\n");
+ break;
+ default:
+ dev_info(dev, "unknown Versatile system controller\n");
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pl111_versatile_init);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.h b/drivers/gpu/drm/pl111/pl111_versatile.h
new file mode 100644
index 000000000000..41aa6d969dc6
--- /dev/null
+++ b/drivers/gpu/drm/pl111/pl111_versatile.h
@@ -0,0 +1,9 @@
+#include <linux/device.h>
+#include "pl111_drm.h"
+
+#ifndef PL111_VERSATILE_H
+#define PL111_VERSATILE_H
+
+int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv);
+
+#endif
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 74fc9362ecf9..c0fb52c6d4ca 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -219,7 +219,7 @@ int qxl_garbage_collect(struct qxl_device *qdev)
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
- QXL_INFO(qdev, "popped %lld\n", id);
+ DRM_DEBUG_DRIVER("popped %lld\n", id);
while (id) {
release = qxl_release_from_id_locked(qdev, id);
if (release == NULL)
@@ -229,8 +229,8 @@ int qxl_garbage_collect(struct qxl_device *qdev)
next_id = info->next;
qxl_release_unmap(qdev, release, info);
- QXL_INFO(qdev, "popped %lld, next %lld\n", id,
- next_id);
+ DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id,
+ next_id);
switch (release->type) {
case QXL_RELEASE_DRAWABLE:
@@ -248,7 +248,7 @@ int qxl_garbage_collect(struct qxl_device *qdev)
}
}
- QXL_INFO(qdev, "%s: %d\n", __func__, i);
+ DRM_DEBUG_DRIVER("%d\n", i);
return i;
}
@@ -381,17 +381,19 @@ void qxl_io_create_primary(struct qxl_device *qdev,
{
struct qxl_surface_create *create;
- QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
- qdev->ram_header);
+ DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
create = &qdev->ram_header->create_surface;
create->format = bo->surf.format;
create->width = bo->surf.width;
create->height = bo->surf.height;
create->stride = bo->surf.stride;
- create->mem = qxl_bo_physical_address(qdev, bo, offset);
+ if (bo->shadow) {
+ create->mem = qxl_bo_physical_address(qdev, bo->shadow, offset);
+ } else {
+ create->mem = qxl_bo_physical_address(qdev, bo, offset);
+ }
- QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
- bo->kptr);
+ DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr);
create->flags = QXL_SURF_FLAG_KEEP_DATA;
create->type = QXL_SURF_TYPE_PRIMARY;
@@ -401,7 +403,7 @@ void qxl_io_create_primary(struct qxl_device *qdev,
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
{
- QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
+ DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id);
wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index afbf50d0c08f..4756b3c9bf2c 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -305,7 +305,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
+ struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
+ WARN_ON(bo->shadow);
drm_gem_object_unreference_unlocked(qxl_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(qxl_fb);
@@ -508,6 +510,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
.x2 = qfb->base.width,
.y2 = qfb->base.height
};
+ bool same_shadow = false;
if (old_state->fb) {
qfb_old = to_qxl_framebuffer(old_state->fb);
@@ -519,15 +522,23 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
if (bo == bo_old)
return;
+ if (bo_old && bo_old->shadow && bo->shadow &&
+ bo_old->shadow == bo->shadow) {
+ same_shadow = true;
+ }
+
if (bo_old && bo_old->is_primary) {
- qxl_io_destroy_primary(qdev);
+ if (!same_shadow)
+ qxl_io_destroy_primary(qdev);
bo_old->is_primary = false;
}
if (!bo->is_primary) {
- qxl_io_create_primary(qdev, 0, bo);
+ if (!same_shadow)
+ qxl_io_create_primary(qdev, 0, bo);
bo->is_primary = true;
}
+
qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
}
@@ -679,8 +690,9 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
+ struct qxl_device *qdev = plane->dev->dev_private;
struct drm_gem_object *obj;
- struct qxl_bo *user_bo;
+ struct qxl_bo *user_bo, *old_bo = NULL;
int ret;
if (!new_state->fb)
@@ -689,6 +701,32 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
obj = to_qxl_framebuffer(new_state->fb)->obj;
user_bo = gem_to_qxl_bo(obj);
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+ user_bo->is_dumb && !user_bo->shadow) {
+ if (plane->state->fb) {
+ obj = to_qxl_framebuffer(plane->state->fb)->obj;
+ old_bo = gem_to_qxl_bo(obj);
+ }
+ if (old_bo && old_bo->shadow &&
+ user_bo->gem_base.size == old_bo->gem_base.size &&
+ plane->state->crtc == new_state->crtc &&
+ plane->state->crtc_w == new_state->crtc_w &&
+ plane->state->crtc_h == new_state->crtc_h &&
+ plane->state->src_x == new_state->src_x &&
+ plane->state->src_y == new_state->src_y &&
+ plane->state->src_w == new_state->src_w &&
+ plane->state->src_h == new_state->src_h &&
+ plane->state->rotation == new_state->rotation &&
+ plane->state->zpos == new_state->zpos) {
+ drm_gem_object_get(&old_bo->shadow->gem_base);
+ user_bo->shadow = old_bo->shadow;
+ } else {
+ qxl_bo_create(qdev, user_bo->gem_base.size,
+ true, true, QXL_GEM_DOMAIN_VRAM, NULL,
+ &user_bo->shadow);
+ }
+ }
+
ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
if (ret)
return ret;
@@ -713,6 +751,11 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
obj = to_qxl_framebuffer(old_state->fb)->obj;
user_bo = gem_to_qxl_bo(obj);
qxl_bo_unpin(user_bo);
+
+ if (user_bo->shadow && !user_bo->is_primary) {
+ drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
+ user_bo->shadow = NULL;
+ }
}
static const uint32_t qxl_cursor_plane_formats[] = {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3397a1907336..08752c0ffb35 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -62,33 +62,9 @@
#define QXL_DEBUGFS_MAX_COMPONENTS 32
-extern int qxl_log_level;
extern int qxl_num_crtc;
extern int qxl_max_ioctls;
-enum {
- QXL_INFO_LEVEL = 1,
- QXL_DEBUG_LEVEL = 2,
-};
-
-#define QXL_INFO(qdev, fmt, ...) do { \
- if (qxl_log_level >= QXL_INFO_LEVEL) { \
- qxl_io_log(qdev, fmt, __VA_ARGS__); \
- } \
- } while (0)
-#define QXL_DEBUG(qdev, fmt, ...) do { \
- if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
- qxl_io_log(qdev, fmt, __VA_ARGS__); \
- } \
- } while (0)
-#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
- static int done; \
- if (!done) { \
- done = 1; \
- QXL_INFO(qdev, fmt, __VA_ARGS__); \
- } \
- } while (0)
-
#define DRM_FILE_OFFSET 0x100000000ULL
#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
@@ -113,6 +89,8 @@ struct qxl_bo {
/* Constant after initialization */
struct drm_gem_object gem_base;
bool is_primary; /* is this now a primary surface */
+ bool is_dumb;
+ struct qxl_bo *shadow;
bool hw_surf_alloc;
struct qxl_surface surf;
uint32_t surface_id;
@@ -351,7 +329,7 @@ int qxl_check_idle(struct qxl_ring *ring);
static inline void *
qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
{
- QXL_INFO(qdev, "not implemented (%lu)\n", physical);
+ DRM_DEBUG_DRIVER("not implemented (%lu)\n", physical);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 5e65d5d2d937..11085ab01374 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -63,6 +63,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
&handle);
if (r)
return r;
+ qobj->is_dumb = true;
args->pitch = pitch;
args->handle = handle;
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 844c4a31ca13..23af3e352673 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -240,18 +240,15 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
return ret;
qbo = gem_to_qxl_bo(gobj);
- QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
- mode_cmd.height, mode_cmd.pitches[0]);
+ DRM_DEBUG_DRIVER("%dx%d %d\n", mode_cmd.width,
+ mode_cmd.height, mode_cmd.pitches[0]);
shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
/* TODO: what's the usual response to memory allocation errors? */
BUG_ON(!shadow);
- QXL_INFO(qdev,
- "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
- qxl_bo_gpu_offset(qbo),
- qxl_bo_mmap_offset(qbo),
- qbo->kptr,
- shadow);
+ DRM_DEBUG_DRIVER("surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
+ qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo),
+ qbo->kptr, shadow);
size = mode_cmd.pitches[0] * mode_cmd.height;
info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e6ec845b5be0..a6da6fa6ad58 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -154,7 +154,7 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return handle;
}
*ret = release;
- QXL_INFO(qdev, "allocated release %d\n", handle);
+ DRM_DEBUG_DRIVER("allocated release %d\n", handle);
release->id = handle;
return handle;
}
@@ -179,8 +179,7 @@ void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
- QXL_INFO(qdev, "release %d, type %d\n", release->id,
- release->type);
+ DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7ecf8a4b9fe6..ab4823875311 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -136,8 +136,8 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
"filp->private_data->minor->dev->dev_private == NULL\n");
return -EINVAL;
}
- QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
- __func__, filp->private_data, vma->vm_pgoff);
+ DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
+ filp->private_data, vma->vm_pgoff);
r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
if (unlikely(r != 0))
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 0ad8244b5ccf..92ccd7aed0d4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -103,12 +103,9 @@ radeon-y += \
radeon-y += \
radeon_vce.o \
vce_v1_0.o \
- vce_v2_0.o \
- radeon_kfd.o
+ vce_v2_0.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
-
-CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 432cb46f6a34..3e798593e042 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -45,34 +45,32 @@ static char *pre_emph_names[] = {
/***** radeon AUX functions *****/
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
@@ -304,10 +302,10 @@ static int convert_bpc_to_bpp(int bpc)
/***** radeon specific DP functions *****/
-int radeon_dp_get_dp_link_config(struct drm_connector *connector,
- const u8 dpcd[DP_DPCD_SIZE],
- unsigned pix_clock,
- unsigned *dp_lanes, unsigned *dp_rate)
+static int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ const u8 dpcd[DP_DPCD_SIZE],
+ unsigned pix_clock,
+ unsigned *dp_lanes, unsigned *dp_rate)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index c97fbb2ab48b..7e1b04dc5593 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -184,6 +184,7 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 parameter);
@@ -1651,6 +1652,27 @@ static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
}
#endif
+static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_Failed;
+
+ WREG32(SMC_MESSAGE_0, msg);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(SMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0);
+
+ return (PPSMC_Result)tmp;
+}
+
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 parameter)
{
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index 723220ffbea2..dff2a63df38f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -330,7 +330,6 @@ int ci_program_jump_on_start(struct radeon_device *rdev);
void ci_stop_smc_clock(struct radeon_device *rdev);
void ci_start_smc_clock(struct radeon_device *rdev);
bool ci_is_smc_running(struct radeon_device *rdev);
-PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
int ci_read_smc_sram_dword(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 3356a21d97ec..371121913756 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -163,27 +163,6 @@ bool ci_is_smc_running(struct radeon_device *rdev)
return false;
}
-PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
-{
- u32 tmp;
- int i;
-
- if (!ci_is_smc_running(rdev))
- return PPSMC_Result_Failed;
-
- WREG32(SMC_MESSAGE_0, msg);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(SMC_RESP_0);
- if (tmp != 0)
- break;
- udelay(1);
- }
- tmp = RREG32(SMC_RESP_0);
-
- return (PPSMC_Result)tmp;
-}
-
#if 0
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 3cb6c55b268d..898f9a078830 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -33,7 +33,6 @@
#include "cik_blit_shaders.h"
#include "radeon_ucode.h"
#include "clearstate_ci.h"
-#include "radeon_kfd.h"
#define SH_MEM_CONFIG_GFX_DEFAULT \
ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
@@ -5684,10 +5683,9 @@ int cik_vm_init(struct radeon_device *rdev)
/*
* number of VMs
* VMID 0 is reserved for System
- * radeon graphics/compute will use VMIDs 1-7
- * amdkfd will use VMIDs 8-15
+ * radeon graphics/compute will use VMIDs 1-15
*/
- rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
+ rdev->vm_manager.nvm = 16;
/* base offset of vram pages */
if (rdev->flags & RADEON_IS_IGP) {
u64 tmp = RREG32(MC_VM_FB_OFFSET);
@@ -7589,9 +7587,6 @@ restart_ih:
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
- radeon_kfd_interrupt(rdev,
- (const void *) &rdev->ih.ring[ring_index]);
-
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
@@ -8486,10 +8481,6 @@ static int cik_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_kfd_resume(rdev);
- if (r)
- return r;
-
return 0;
}
@@ -8538,7 +8529,6 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
- radeon_kfd_suspend(rdev);
radeon_pm_suspend(rdev);
radeon_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index e21015475ed5..cda16fcd43bb 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -30,8 +30,6 @@
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
-#define RADEON_NUM_OF_VMIDS 8
-
/* DIDT IND registers */
#define DIDT_SQ_CTRL0 0x0
# define DIDT_CTRL_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c31e660e35db..7d39ed63e5be 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1456,7 +1456,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 97fd58e97043..c96b31950ca7 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -887,7 +887,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = R600_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e82a99cb2459..ab32830c4e23 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -58,7 +58,7 @@ enum r600_hdmi_iec_status_bits {
static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
- struct r600_audio_pin status;
+ struct r600_audio_pin status = {};
uint32_t value;
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8cbaeec090c9..a8e546569858 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2456,9 +2456,6 @@ struct radeon_device {
u64 vram_pin_size;
u64 gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
};
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2f642cbefd8e..59dcefb2df3b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -263,7 +263,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -290,7 +290,7 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -404,7 +404,7 @@ static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *conn
int enc_id = connector->encoder_ids[0];
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1368,7 +1368,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
+ encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1454,7 +1454,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1473,7 +1473,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -1620,7 +1620,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@@ -1649,7 +1649,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f4becad0a78c..31dd04f6baa1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -43,7 +43,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
-#include "radeon_kfd.h"
/*
* KMS wrapper.
@@ -338,14 +337,6 @@ static int radeon_pci_probe(struct pci_dev *pdev,
{
int ret;
- /*
- * Initialize amdkfd before starting radeon. If it was not loaded yet,
- * defer radeon probing
- */
- ret = radeon_kfd_init();
- if (ret == -EPROBE_DEFER)
- return ret;
-
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
@@ -645,7 +636,6 @@ static int __init radeon_init(void)
static void __exit radeon_exit(void)
{
- radeon_kfd_fini();
pci_unregister_driver(pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fd25361ac681..2fcf805d3a16 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -322,10 +322,10 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
+ drm_framebuffer_unregister_private(&rfb->base);
+ drm_framebuffer_cleanup(&rfb->base);
}
drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3386452bd2f0..cf3deb283da5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
else
r = 0;
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put_unlocked(gobj);
return r;
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
deleted file mode 100644
index f6578c96925c..000000000000
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <drm/drmP.h>
-#include "radeon.h"
-#include "cikd.h"
-#include "cik_reg.h"
-#include "radeon_kfd.h"
-#include "radeon_ucode.h"
-#include <linux/firmware.h>
-#include "cik_structs.h"
-
-#define CIK_PIPE_PER_MEC (4)
-
-static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
- TCP_WATCH0_ADDR_H, TCP_WATCH0_ADDR_L, TCP_WATCH0_CNTL,
- TCP_WATCH1_ADDR_H, TCP_WATCH1_ADDR_L, TCP_WATCH1_CNTL,
- TCP_WATCH2_ADDR_H, TCP_WATCH2_ADDR_L, TCP_WATCH2_CNTL,
- TCP_WATCH3_ADDR_H, TCP_WATCH3_ADDR_L, TCP_WATCH3_CNTL
-};
-
-struct kgd_mem {
- struct radeon_bo *bo;
- uint64_t gpu_addr;
- void *cpu_ptr;
-};
-
-
-static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
-
-static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-
-static uint64_t get_vmem_size(struct kgd_dev *kgd);
-static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
-
-static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
-
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
-
-static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_pipeline = kgd_init_pipeline,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
-};
-
-static const struct kgd2kfd_calls *kgd2kfd;
-
-int radeon_kfd_init(void)
-{
- int ret;
-
-#if defined(CONFIG_HSA_AMD_MODULE)
- int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
-
- kgd2kfd_init_p = symbol_request(kgd2kfd_init);
-
- if (kgd2kfd_init_p == NULL)
- return -ENOENT;
-
- ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
- if (ret) {
- symbol_put(kgd2kfd_init);
- kgd2kfd = NULL;
- }
-
-#elif defined(CONFIG_HSA_AMD)
- ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
- if (ret)
- kgd2kfd = NULL;
-
-#else
- ret = -ENOENT;
-#endif
-
- return ret;
-}
-
-void radeon_kfd_fini(void)
-{
- if (kgd2kfd) {
- kgd2kfd->exit();
- symbol_put(kgd2kfd_init);
- }
-}
-
-void radeon_kfd_device_probe(struct radeon_device *rdev)
-{
- if (kgd2kfd)
- rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
- rdev->pdev, &kfd2kgd);
-}
-
-void radeon_kfd_device_init(struct radeon_device *rdev)
-{
- int i, queue, pipe, mec;
-
- if (rdev->kfd) {
- struct kgd2kfd_shared_resources gpu_resources = {
- .compute_vmid_bitmap = 0xFF00,
- .num_pipe_per_mec = 4,
- .num_queue_per_pipe = 8
- };
-
- bitmap_zero(gpu_resources.queue_bitmap, KGD_MAX_QUEUES);
-
- for (i = 0; i < KGD_MAX_QUEUES; ++i) {
- queue = i % gpu_resources.num_queue_per_pipe;
- pipe = (i / gpu_resources.num_queue_per_pipe)
- % gpu_resources.num_pipe_per_mec;
- mec = (i / gpu_resources.num_queue_per_pipe)
- / gpu_resources.num_pipe_per_mec;
-
- if (mec == 0 && pipe > 0)
- set_bit(i, gpu_resources.queue_bitmap);
- }
-
- radeon_doorbell_get_kfd_info(rdev,
- &gpu_resources.doorbell_physical_address,
- &gpu_resources.doorbell_aperture_size,
- &gpu_resources.doorbell_start_offset);
-
- kgd2kfd->device_init(rdev->kfd, &gpu_resources);
- }
-}
-
-void radeon_kfd_device_fini(struct radeon_device *rdev)
-{
- if (rdev->kfd) {
- kgd2kfd->device_exit(rdev->kfd);
- rdev->kfd = NULL;
- }
-}
-
-void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry)
-{
- if (rdev->kfd)
- kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
-}
-
-void radeon_kfd_suspend(struct radeon_device *rdev)
-{
- if (rdev->kfd)
- kgd2kfd->suspend(rdev->kfd);
-}
-
-int radeon_kfd_resume(struct radeon_device *rdev)
-{
- int r = 0;
-
- if (rdev->kfd)
- r = kgd2kfd->resume(rdev->kfd);
-
- return r;
-}
-
-static int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
- struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
- int r;
-
- BUG_ON(kgd == NULL);
- BUG_ON(gpu_addr == NULL);
- BUG_ON(cpu_ptr == NULL);
-
- *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if ((*mem) == NULL)
- return -ENOMEM;
-
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
- RADEON_GEM_GTT_WC, NULL, NULL, &(*mem)->bo);
- if (r) {
- dev_err(rdev->dev,
- "failed to allocate BO for amdkfd (%d)\n", r);
- return r;
- }
-
- /* map the buffer */
- r = radeon_bo_reserve((*mem)->bo, true);
- if (r) {
- dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
- goto allocate_mem_reserve_bo_failed;
- }
-
- r = radeon_bo_pin((*mem)->bo, RADEON_GEM_DOMAIN_GTT,
- &(*mem)->gpu_addr);
- if (r) {
- dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
- goto allocate_mem_pin_bo_failed;
- }
- *gpu_addr = (*mem)->gpu_addr;
-
- r = radeon_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
- if (r) {
- dev_err(rdev->dev,
- "(%d) failed to map bo to kernel for amdkfd\n", r);
- goto allocate_mem_kmap_bo_failed;
- }
- *cpu_ptr = (*mem)->cpu_ptr;
-
- radeon_bo_unreserve((*mem)->bo);
-
- return 0;
-
-allocate_mem_kmap_bo_failed:
- radeon_bo_unpin((*mem)->bo);
-allocate_mem_pin_bo_failed:
- radeon_bo_unreserve((*mem)->bo);
-allocate_mem_reserve_bo_failed:
- radeon_bo_unref(&(*mem)->bo);
-
- return r;
-}
-
-static void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
-{
- struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
-
- BUG_ON(mem == NULL);
-
- radeon_bo_reserve(mem->bo, true);
- radeon_bo_kunmap(mem->bo);
- radeon_bo_unpin(mem->bo);
- radeon_bo_unreserve(mem->bo);
- radeon_bo_unref(&(mem->bo));
- kfree(mem);
-}
-
-static uint64_t get_vmem_size(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- BUG_ON(kgd == NULL);
-
- return rdev->mc.real_vram_size;
-}
-
-static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- return rdev->asic->get_gpu_clock_counter(rdev);
-}
-
-static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = (struct radeon_device *)kgd;
-
- /* The sclk is in quantas of 10kHz */
- return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
-}
-
-static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
-{
- return (struct radeon_device *)kgd;
-}
-
-static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
-
- writel(value, (void __iomem *)(rdev->rmmio + offset));
-}
-
-static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
-
- return readl((void __iomem *)(rdev->rmmio + offset));
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
- uint32_t queue, uint32_t vmid)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
- uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
-
- mutex_lock(&rdev->srbm_mutex);
- write_register(kgd, SRBM_GFX_CNTL, value);
-}
-
-static void unlock_srbm(struct kgd_dev *kgd)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
-
- write_register(kgd, SRBM_GFX_CNTL, 0);
- mutex_unlock(&rdev->srbm_mutex);
-}
-
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t queue_id)
-{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
- uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
-
- lock_srbm(kgd, mec, pipe, queue_id, 0);
-}
-
-static void release_queue(struct kgd_dev *kgd)
-{
- unlock_srbm(kgd);
-}
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
-{
- lock_srbm(kgd, 0, 0, 0, vmid);
-
- write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
- write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base);
- write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
- write_register(kgd, SH_MEM_BASES, sh_mem_bases);
-
- unlock_srbm(kgd);
-}
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid)
-{
- /*
- * We have to assume that there is no outstanding mapping.
- * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0
- * because a mapping is in progress or because a mapping finished and
- * the SW cleared it.
- * So the protocol is to always wait & clear.
- */
- uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
- ATC_VMID_PASID_MAPPING_VALID_MASK;
-
- write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t),
- pasid_mapping);
-
- while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) &
- (1U << vmid)))
- cpu_relax();
- write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
-
- /* Mapping vmid to pasid also for IH block */
- write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
- pasid_mapping);
-
- return 0;
-}
-
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr)
-{
- /* nothing to do here */
- return 0;
-}
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
-{
- uint32_t mec;
- uint32_t pipe;
-
- mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
- pipe = (pipe_id % CIK_PIPE_PER_MEC);
-
- lock_srbm(kgd, mec, pipe, 0, 0);
-
- write_register(kgd, CPC_INT_CNTL,
- TIME_STAMP_INT_ENABLE | OPCODE_ERROR_INT_ENABLE);
-
- unlock_srbm(kgd);
-
- return 0;
-}
-
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
-{
- uint32_t retval;
-
- retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
- m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
-
- pr_debug("kfd: sdma base address: 0x%x\n", retval);
-
- return retval;
-}
-
-static inline struct cik_mqd *get_mqd(void *mqd)
-{
- return (struct cik_mqd *)mqd;
-}
-
-static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
-{
- return (struct cik_sdma_rlc_registers *)mqd;
-}
-
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm)
-{
- uint32_t wptr_shadow, is_wptr_shadow_valid;
- struct cik_mqd *m;
-
- m = get_mqd(mqd);
-
- is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
-
- acquire_queue(kgd, pipe_id, queue_id);
- write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
- write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
- write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control);
-
- write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
- write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
- write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
-
- write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
- write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
- write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
-
- write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
-
- write_register(kgd, CP_HQD_PERSISTENT_STATE,
- m->cp_hqd_persistent_state);
- write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
- write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
-
- write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO,
- m->cp_hqd_atomic0_preop_lo);
-
- write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI,
- m->cp_hqd_atomic0_preop_hi);
-
- write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO,
- m->cp_hqd_atomic1_preop_lo);
-
- write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI,
- m->cp_hqd_atomic1_preop_hi);
-
- write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR,
- m->cp_hqd_pq_rptr_report_addr_lo);
-
- write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
- m->cp_hqd_pq_rptr_report_addr_hi);
-
- write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
-
- write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR,
- m->cp_hqd_pq_wptr_poll_addr_lo);
-
- write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI,
- m->cp_hqd_pq_wptr_poll_addr_hi);
-
- write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL,
- m->cp_hqd_pq_doorbell_control);
-
- write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid);
-
- write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum);
-
- write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
- write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
-
- write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
-
- if (is_wptr_shadow_valid)
- write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow);
-
- write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active);
- release_queue(kgd);
-
- return 0;
-}
-
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
-{
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
-
- m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_VIRTUAL_ADDR,
- m->sdma_rlc_virtual_addr);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_BASE,
- m->sdma_rlc_rb_base);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_BASE_HI,
- m->sdma_rlc_rb_base_hi);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_LO,
- m->sdma_rlc_rb_rptr_addr_lo);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_RPTR_ADDR_HI,
- m->sdma_rlc_rb_rptr_addr_hi);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
-
- write_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_CNTL,
- m->sdma_rlc_rb_cntl);
-
- return 0;
-}
-
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
-{
- uint32_t act;
- bool retval = false;
- uint32_t low, high;
-
- acquire_queue(kgd, pipe_id, queue_id);
- act = read_register(kgd, CP_HQD_ACTIVE);
- if (act) {
- low = lower_32_bits(queue_address >> 8);
- high = upper_32_bits(queue_address >> 8);
-
- if (low == read_register(kgd, CP_HQD_PQ_BASE) &&
- high == read_register(kgd, CP_HQD_PQ_BASE_HI))
- retval = true;
- }
- release_queue(kgd);
- return retval;
-}
-
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
-{
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
- uint32_t sdma_rlc_rb_cntl;
-
- m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
-
- sdma_rlc_rb_cntl = read_register(kgd,
- sdma_base_addr + SDMA0_RLC0_RB_CNTL);
-
- if (sdma_rlc_rb_cntl & SDMA_RB_ENABLE)
- return true;
-
- return false;
-}
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- uint32_t temp;
-
- acquire_queue(kgd, pipe_id, queue_id);
- write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0);
-
- write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type);
-
- while (true) {
- temp = read_register(kgd, CP_HQD_ACTIVE);
- if (temp & 0x1)
- break;
- if (timeout == 0) {
- pr_err("kfd: cp queue preemption time out (%dms)\n",
- temp);
- release_queue(kgd);
- return -ETIME;
- }
- msleep(20);
- timeout -= 20;
- }
-
- release_queue(kgd);
- return 0;
-}
-
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int timeout)
-{
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
- uint32_t temp;
-
- m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
-
- temp = read_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL);
- temp = temp & ~SDMA_RB_ENABLE;
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_CNTL, temp);
-
- while (true) {
- temp = read_register(kgd, sdma_base_addr +
- SDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA_RLC_IDLE)
- break;
- if (timeout == 0)
- return -ETIME;
- msleep(20);
- timeout -= 20;
- }
-
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_DOORBELL, 0);
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_RPTR, 0);
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_WPTR, 0);
- write_register(kgd, sdma_base_addr + SDMA0_RLC0_RB_BASE, 0);
-
- return 0;
-}
-
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
-{
- union TCP_WATCH_CNTL_BITS cntl;
- unsigned int i;
-
- cntl.u32All = 0;
-
- cntl.bitfields.valid = 0;
- cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
- cntl.bitfields.atc = 1;
-
- /* Turning off this address until we set all the registers */
- for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
- write_register(kgd,
- watchRegs[i * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL],
- cntl.u32All);
-
- return 0;
-}
-
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- union TCP_WATCH_CNTL_BITS cntl;
-
- cntl.u32All = cntl_val;
-
- /* Turning off this watch point until we set all the registers */
- cntl.bitfields.valid = 0;
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL],
- cntl.u32All);
-
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_HI],
- addr_hi);
-
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_LO],
- addr_lo);
-
- /* Enable the watch point */
- cntl.bitfields.valid = 1;
-
- write_register(kgd,
- watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL],
- cntl.u32All);
-
- return 0;
-}
-
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd)
-{
- struct radeon_device *rdev = get_radeon_device(kgd);
- uint32_t data;
-
- mutex_lock(&rdev->grbm_idx_mutex);
-
- write_register(kgd, GRBM_GFX_INDEX, gfx_index_val);
- write_register(kgd, SQ_CMD, sq_cmd);
-
- /* Restore the GRBM_GFX_INDEX register */
-
- data = INSTANCE_BROADCAST_WRITES | SH_BROADCAST_WRITES |
- SE_BROADCAST_WRITES;
-
- write_register(kgd, GRBM_GFX_INDEX, data);
-
- mutex_unlock(&rdev->grbm_idx_mutex);
-
- return 0;
-}
-
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset)
-{
- return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]
- / 4;
-}
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid)
-{
- uint32_t reg;
- struct radeon_device *rdev = (struct radeon_device *) kgd;
-
- reg = RREG32(ATC_VMID0_PASID_MAPPING + vmid*4);
- return reg & ATC_VMID_PASID_MAPPING_VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct radeon_device *rdev = (struct radeon_device *) kgd;
-
- reg = RREG32(ATC_VMID0_PASID_MAPPING + vmid*4);
- return reg & ATC_VMID_PASID_MAPPING_PASID_MASK;
-}
-
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
-{
- struct radeon_device *rdev = (struct radeon_device *) kgd;
-
- return WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
-}
-
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct radeon_device *rdev = (struct radeon_device *) kgd;
- const union radeon_firmware_header *hdr;
-
- BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union radeon_firmware_header *)
- rdev->mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- case KGD_ENGINE_SDMA2:
- hdr = (const union radeon_firmware_header *)
- rdev->sdma_fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dfee8f7d94ae..cde037f213d7 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -34,8 +34,6 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#include "radeon_kfd.h"
-
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx(void);
#else
@@ -68,8 +66,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
pm_runtime_forbid(dev->dev);
}
- radeon_kfd_device_fini(rdev);
-
radeon_acpi_fini(rdev);
radeon_modeset_fini(rdev);
@@ -174,9 +170,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- radeon_kfd_device_probe(rdev);
- radeon_kfd_device_init(rdev);
-
if (radeon_is_px(dev)) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index da44ac234f64..ca0a7ed28c9b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -762,10 +762,6 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
-extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
- const u8 *dpcd,
- unsigned pix_clock,
- unsigned *dp_lanes, unsigned *dp_rate);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index db8f079e441e..bc26efd1793e 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -205,5 +205,5 @@ DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/radeon
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index bf69bf9086bf..6ada64db00e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -597,7 +597,7 @@ release_sg:
kfree(ttm->sg);
release_pages:
- release_pages(ttm->pages, pinned, 0);
+ release_pages(ttm->pages, pinned);
return r;
}
@@ -725,8 +725,6 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@@ -762,33 +760,13 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
}
#endif
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
- return 0;
+ return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm);
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
@@ -815,14 +793,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
+ ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
}
int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 7278b9703c15..566d1a948c8f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -18,6 +18,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/of_graph.h>
#include <linux/wait.h>
@@ -213,7 +214,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static void rcar_du_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index dcc539ba85d6..0ccc76217ee4 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -57,4 +57,13 @@ config ROCKCHIP_INNO_HDMI
for the Innosilicon HDMI driver. If you want to enable
HDMI on RK3036 based SoC, you should select this option.
+config ROCKCHIP_LVDS
+ bool "Rockchip LVDS support"
+ depends on DRM_ROCKCHIP
+ depends on PINCTRL && OF
+ help
+ Choose this option to enable support for Rockchip LVDS controllers.
+ Rockchip rk3288 SoC has LVDS TX Controller can be used, and it
+ support LVDS, rgb, dual LVDS output mode. say Y to enable its
+ driver.
endif
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 305409818ffb..a314e2109e76 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -13,5 +13,6 @@ rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
+rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 9606121fa185..93b7102dd008 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -72,7 +72,7 @@ struct rockchip_dp_device {
struct reset_control *rst;
struct work_struct psr_work;
- spinlock_t psr_lock;
+ struct mutex psr_lock;
unsigned int psr_state;
const struct rockchip_dp_chip_data *data;
@@ -83,21 +83,20 @@ struct rockchip_dp_device {
static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
{
struct rockchip_dp_device *dp = to_dp(encoder);
- unsigned long flags;
if (!analogix_dp_psr_supported(dp->dev))
return;
- dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
+ DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
- spin_lock_irqsave(&dp->psr_lock, flags);
+ mutex_lock(&dp->psr_lock);
if (enabled)
dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
else
dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
schedule_work(&dp->psr_work);
- spin_unlock_irqrestore(&dp->psr_lock, flags);
+ mutex_unlock(&dp->psr_lock);
}
static void analogix_dp_psr_work(struct work_struct *work)
@@ -105,21 +104,20 @@ static void analogix_dp_psr_work(struct work_struct *work)
struct rockchip_dp_device *dp =
container_of(work, typeof(*dp), psr_work);
int ret;
- unsigned long flags;
ret = rockchip_drm_wait_vact_end(dp->encoder.crtc,
PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
if (ret) {
- dev_err(dp->dev, "line flag interrupt did not arrive\n");
+ DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n");
return;
}
- spin_lock_irqsave(&dp->psr_lock, flags);
+ mutex_lock(&dp->psr_lock);
if (dp->psr_state == EDP_VSC_PSR_STATE_ACTIVE)
analogix_dp_enable_psr(dp->dev);
else
analogix_dp_disable_psr(dp->dev);
- spin_unlock_irqrestore(&dp->psr_lock, flags);
+ mutex_unlock(&dp->psr_lock);
}
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
@@ -140,13 +138,13 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
- dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
return ret;
}
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
- dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to dp pre init %d\n", ret);
clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -211,17 +209,17 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
else
val = dp->data->lcdsel_big;
- dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+ DRM_DEV_DEBUG(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
ret = clk_prepare_enable(dp->grfclk);
if (ret < 0) {
- dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
if (ret != 0)
- dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(dp->grfclk);
}
@@ -277,7 +275,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
- dev_err(dev, "failed to get rockchip,grf property\n");
+ DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n");
return PTR_ERR(dp->grf);
}
@@ -287,31 +285,31 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
} else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(dp->grfclk)) {
- dev_err(dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(dev, "failed to get grf clock\n");
return PTR_ERR(dp->grfclk);
}
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
- dev_err(dev, "failed to get pclk property\n");
+ DRM_DEV_ERROR(dev, "failed to get pclk property\n");
return PTR_ERR(dp->pclk);
}
dp->rst = devm_reset_control_get(dev, "dp");
if (IS_ERR(dp->rst)) {
- dev_err(dev, "failed to get dp reset control\n");
+ DRM_DEV_ERROR(dev, "failed to get dp reset control\n");
return PTR_ERR(dp->rst);
}
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
- dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
return ret;
}
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
- dev_err(dp->dev, "failed to pre init %d\n", ret);
+ DRM_DEV_ERROR(dp->dev, "failed to pre init %d\n", ret);
clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -381,7 +379,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
- spin_lock_init(&dp->psr_lock);
+ mutex_init(&dp->psr_lock);
dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a57da051f516..275844d0d0ec 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -287,14 +287,6 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *
-cdn_dp_connector_best_encoder(struct drm_connector *connector)
-{
- struct cdn_dp_device *dp = connector_to_dp(connector);
-
- return &dp->encoder;
-}
-
static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
@@ -346,7 +338,6 @@ static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = {
.get_modes = cdn_dp_connector_get_modes,
- .best_encoder = cdn_dp_connector_best_encoder,
.mode_valid = cdn_dp_connector_mode_valid,
};
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index b14d211f6c21..eb3042c6d1b2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -323,7 +323,7 @@ int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff;
dp->fw_version |= reg << 24;
- dev_dbg(dp->dev, "firmware version: %x\n", dp->fw_version);
+ DRM_DEV_DEBUG(dp->dev, "firmware version: %x\n", dp->fw_version);
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index 9a20b9dc27c8..b15755b6129c 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -430,9 +430,9 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
testdin = max_mbps_to_testdin(dsi->lane_mbps);
if (testdin < 0) {
- dev_err(dsi->dev,
- "failed to get testdin for %dmbps lane clock\n",
- dsi->lane_mbps);
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get testdin for %dmbps lane clock\n",
+ dsi->lane_mbps);
return testdin;
}
@@ -443,7 +443,7 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
ret = clk_prepare_enable(dsi->phy_cfg_clk);
if (ret) {
- dev_err(dsi->dev, "Failed to enable phy_cfg_clk\n");
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
return ret;
}
@@ -501,7 +501,7 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev, "failed to wait for phy lock state\n");
+ DRM_DEV_ERROR(dsi->dev, "failed to wait for phy lock state\n");
goto phy_init_end;
}
@@ -509,8 +509,8 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
val, val & STOP_STATE_CLK_LANE, 1000,
PHY_STATUS_TIMEOUT_US);
if (ret < 0)
- dev_err(dsi->dev,
- "failed to wait for phy clk lane stop state\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to wait for phy clk lane stop state\n");
phy_init_end:
clk_disable_unprepare(dsi->phy_cfg_clk);
@@ -529,8 +529,9 @@ static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi,
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
if (bpp < 0) {
- dev_err(dsi->dev, "failed to get bpp for pixel format %d\n",
- dsi->format);
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get bpp for pixel format %d\n",
+ dsi->format);
return bpp;
}
@@ -541,7 +542,8 @@ static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi,
if (tmp < max_mbps)
target_mbps = tmp;
else
- dev_err(dsi->dev, "DPHY clock frequency is out of range\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "DPHY clock frequency is out of range\n");
}
pllref = DIV_ROUND_UP(clk_get_rate(dsi->pllref_clk), USEC_PER_SEC);
@@ -582,8 +584,9 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
struct dw_mipi_dsi *dsi = host_to_dsi(host);
if (device->lanes > dsi->pdata->max_data_lanes) {
- dev_err(dsi->dev, "the number of data lanes(%u) is too many\n",
- device->lanes);
+ DRM_DEV_ERROR(dsi->dev,
+ "the number of data lanes(%u) is too many\n",
+ device->lanes);
return -EINVAL;
}
@@ -632,7 +635,8 @@ static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
val, !(val & GEN_CMD_FULL), 1000,
CMD_PKT_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev, "failed to get available command FIFO\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get available command FIFO\n");
return ret;
}
@@ -643,7 +647,7 @@ static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
val, (val & mask) == mask,
1000, CMD_PKT_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev, "failed to write command FIFO\n");
+ DRM_DEV_ERROR(dsi->dev, "failed to write command FIFO\n");
return ret;
}
@@ -663,8 +667,9 @@ static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
data |= tx_buf[1] << 8;
if (msg->tx_len > 2) {
- dev_err(dsi->dev, "too long tx buf length %zu for short write\n",
- msg->tx_len);
+ DRM_DEV_ERROR(dsi->dev,
+ "too long tx buf length %zu for short write\n",
+ msg->tx_len);
return -EINVAL;
}
@@ -682,8 +687,9 @@ static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
u32 val;
if (msg->tx_len < 3) {
- dev_err(dsi->dev, "wrong tx buf length %zu for long write\n",
- msg->tx_len);
+ DRM_DEV_ERROR(dsi->dev,
+ "wrong tx buf length %zu for long write\n",
+ msg->tx_len);
return -EINVAL;
}
@@ -704,8 +710,8 @@ static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
val, !(val & GEN_PLD_W_FULL), 1000,
CMD_PKT_STATUS_TIMEOUT_US);
if (ret < 0) {
- dev_err(dsi->dev,
- "failed to get available write payload FIFO\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get available write payload FIFO\n");
return ret;
}
}
@@ -731,8 +737,8 @@ static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
break;
default:
- dev_err(dsi->dev, "unsupported message type 0x%02x\n",
- msg->type);
+ DRM_DEV_ERROR(dsi->dev, "unsupported message type 0x%02x\n",
+ msg->type);
ret = -EINVAL;
}
@@ -935,7 +941,7 @@ static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
return;
if (clk_prepare_enable(dsi->pclk)) {
- dev_err(dsi->dev, "%s: Failed to enable pclk\n", __func__);
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
return;
}
@@ -967,7 +973,7 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
return;
if (clk_prepare_enable(dsi->pclk)) {
- dev_err(dsi->dev, "%s: Failed to enable pclk\n", __func__);
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
return;
}
@@ -991,7 +997,7 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
*/
ret = clk_prepare_enable(dsi->grf_clk);
if (ret) {
- dev_err(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
return;
}
@@ -1004,7 +1010,7 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
if (drm_panel_prepare(dsi->panel))
- dev_err(dsi->dev, "failed to prepare panel\n");
+ DRM_DEV_ERROR(dsi->dev, "failed to prepare panel\n");
dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_VID_MODE);
drm_panel_enable(dsi->panel);
@@ -1017,7 +1023,8 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
val = pdata->dsi0_en_bit << 16;
regmap_write(dsi->grf_regmap, pdata->grf_switch_reg, val);
- dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
+ DRM_DEV_DEBUG(dsi->dev,
+ "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
dsi->dpms_mode = DRM_MODE_DPMS_ON;
clk_disable_unprepare(dsi->grf_clk);
@@ -1111,7 +1118,7 @@ static int dw_mipi_dsi_register(struct drm_device *drm,
ret = drm_encoder_init(drm, &dsi->encoder, &dw_mipi_dsi_encoder_funcs,
DRM_MODE_ENCODER_DSI, NULL);
if (ret) {
- dev_err(dev, "Failed to initialize encoder with drm\n");
+ DRM_DEV_ERROR(dev, "Failed to initialize encoder with drm\n");
return ret;
}
@@ -1133,7 +1140,7 @@ static int rockchip_mipi_parse_dt(struct dw_mipi_dsi *dsi)
dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dsi->grf_regmap)) {
- dev_err(dsi->dev, "Unable to get rockchip,grf\n");
+ DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(dsi->grf_regmap);
}
@@ -1205,14 +1212,15 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
- dev_err(dev, "Unable to get pll reference clock: %d\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Unable to get pll reference clock: %d\n", ret);
return ret;
}
dsi->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dsi->pclk)) {
ret = PTR_ERR(dsi->pclk);
- dev_err(dev, "Unable to get pclk: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Unable to get pclk: %d\n", ret);
return ret;
}
@@ -1226,7 +1234,8 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
if (ret == -ENOENT) {
apb_rst = NULL;
} else {
- dev_err(dev, "Unable to get reset control: %d\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Unable to get reset control: %d\n", ret);
return ret;
}
}
@@ -1234,7 +1243,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
if (apb_rst) {
ret = clk_prepare_enable(dsi->pclk);
if (ret) {
- dev_err(dev, "%s: Failed to enable pclk\n", __func__);
+ DRM_DEV_ERROR(dev, "Failed to enable pclk\n");
return ret;
}
@@ -1249,7 +1258,8 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
if (IS_ERR(dsi->phy_cfg_clk)) {
ret = PTR_ERR(dsi->phy_cfg_clk);
- dev_err(dev, "Unable to get phy_cfg_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Unable to get phy_cfg_clk: %d\n", ret);
return ret;
}
}
@@ -1258,20 +1268,20 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
dsi->grf_clk = devm_clk_get(dev, "grf");
if (IS_ERR(dsi->grf_clk)) {
ret = PTR_ERR(dsi->grf_clk);
- dev_err(dev, "Unable to get grf_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
return ret;
}
}
ret = clk_prepare_enable(dsi->pllref_clk);
if (ret) {
- dev_err(dev, "%s: Failed to enable pllref_clk\n", __func__);
+ DRM_DEV_ERROR(dev, "Failed to enable pllref_clk\n");
return ret;
}
ret = dw_mipi_dsi_register(drm, dsi);
if (ret) {
- dev_err(dev, "Failed to register mipi_dsi: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to register mipi_dsi: %d\n", ret);
goto err_pllref;
}
@@ -1281,7 +1291,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
dsi->dsi_host.dev = dev;
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
- dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
goto err_cleanup;
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index ccd5d595ada7..1eb02a82fd91 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -168,7 +168,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
- dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
+ DRM_DEV_ERROR(hdmi->dev, "Unable to get rockchip,grf\n");
return PTR_ERR(hdmi->regmap);
}
@@ -178,7 +178,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
} else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->vpll_clk)) {
- dev_err(hdmi->dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
return PTR_ERR(hdmi->vpll_clk);
}
@@ -188,13 +188,14 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
} else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(hdmi->grf_clk)) {
- dev_err(hdmi->dev, "failed to get grf clock\n");
+ DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n");
return PTR_ERR(hdmi->grf_clk);
}
ret = clk_prepare_enable(hdmi->vpll_clk);
if (ret) {
- dev_err(hdmi->dev, "Failed to enable HDMI vpll: %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev,
+ "Failed to enable HDMI vpll: %d\n", ret);
return ret;
}
@@ -259,17 +260,17 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(hdmi->grf_clk);
if (ret < 0) {
- dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev, "failed to enable grfclk %d\n", ret);
return;
}
ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
if (ret != 0)
- dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev, "Could not write to GRF: %d\n", ret);
clk_disable_unprepare(hdmi->grf_clk);
- dev_dbg(hdmi->dev, "vop %s output to hdmi\n",
- ret ? "LIT" : "BIG");
+ DRM_DEV_DEBUG(hdmi->dev, "vop %s output to hdmi\n",
+ ret ? "LIT" : "BIG");
}
static int
@@ -368,7 +369,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
ret = rockchip_hdmi_parse_dt(hdmi);
if (ret) {
- dev_err(hdmi->dev, "Unable to parse OF data\n");
+ DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n");
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 7a251a54e792..ee584d87111f 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -224,7 +224,7 @@ static void inno_hdmi_set_pwr_mode(struct inno_hdmi *hdmi, int mode)
break;
default:
- dev_err(hdmi->dev, "Unknown power mode %d\n", mode);
+ DRM_DEV_ERROR(hdmi->dev, "Unknown power mode %d\n", mode);
}
}
@@ -742,8 +742,9 @@ static int inno_hdmi_i2c_xfer(struct i2c_adapter *adap,
hdmi_writeb(hdmi, HDMI_INTERRUPT_STATUS1, m_INT_EDID_READY);
for (i = 0; i < num; i++) {
- dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n",
- i + 1, num, msgs[i].len, msgs[i].flags);
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
if (msgs[i].flags & I2C_M_RD)
ret = inno_hdmi_i2c_read(hdmi, &msgs[i]);
@@ -806,7 +807,7 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi)
hdmi->i2c = i2c;
- dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+ DRM_DEV_INFO(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
return adap;
}
@@ -838,13 +839,14 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
hdmi->pclk = devm_clk_get(hdmi->dev, "pclk");
if (IS_ERR(hdmi->pclk)) {
- dev_err(hdmi->dev, "Unable to get HDMI pclk clk\n");
+ DRM_DEV_ERROR(hdmi->dev, "Unable to get HDMI pclk clk\n");
return PTR_ERR(hdmi->pclk);
}
ret = clk_prepare_enable(hdmi->pclk);
if (ret) {
- dev_err(hdmi->dev, "Cannot enable HDMI pclk clock: %d\n", ret);
+ DRM_DEV_ERROR(hdmi->dev,
+ "Cannot enable HDMI pclk clock: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index ff3d0f5efbb1..76d63de5921d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -58,7 +58,7 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
ret = iommu_attach_device(private->domain, dev);
if (ret) {
- dev_err(dev, "Failed to attach iommu device\n");
+ DRM_DEV_ERROR(dev, "Failed to attach iommu device\n");
return ret;
}
@@ -373,8 +373,9 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
iommu = of_parse_phandle(port->parent, "iommus", 0);
if (!iommu || !of_device_is_available(iommu->parent)) {
- dev_dbg(dev, "no iommu attached for %pOF, using non-iommu buffers\n",
- port->parent);
+ DRM_DEV_DEBUG(dev,
+ "no iommu attached for %pOF, using non-iommu buffers\n",
+ port->parent);
/*
* if there is a crtc not support iommu, force set all
* crtc use non-iommu buffer.
@@ -389,12 +390,13 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
}
if (i == 0) {
- dev_err(dev, "missing 'ports' property\n");
+ DRM_DEV_ERROR(dev, "missing 'ports' property\n");
return -ENODEV;
}
if (!found) {
- dev_err(dev, "No available vop found for display-subsystem.\n");
+ DRM_DEV_ERROR(dev,
+ "No available vop found for display-subsystem.\n");
return -ENODEV;
}
@@ -453,6 +455,8 @@ static int __init rockchip_drm_init(void)
num_rockchip_sub_drivers = 0;
ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_DRM_ROCKCHIP);
+ ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver,
+ CONFIG_ROCKCHIP_LVDS);
ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
CONFIG_ROCKCHIP_ANALOGIX_DP);
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c7e96b82cf63..498dfbc52cec 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -69,5 +69,6 @@ extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_driver;
extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
+extern struct platform_driver rockchip_lvds_driver;
extern struct platform_driver vop_platform_driver;
#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 70773041785b..cd2ace0c3caa 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -100,8 +100,9 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm
ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
&rockchip_drm_fb_funcs);
if (ret) {
- dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
- ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to initialize framebuffer: %d\n",
+ ret);
kfree(rockchip_fb);
return ERR_PTR(ret);
}
@@ -134,7 +135,8 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
- dev_err(dev->dev, "Failed to lookup GEM object\n");
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to lookup GEM object\n");
ret = -ENXIO;
goto err_gem_object_unreference;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 724579ebf947..e6650553f5d6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -76,7 +76,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- dev_err(dev->dev, "Failed to create framebuffer info.\n");
+ DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n");
ret = PTR_ERR(fbi);
goto out;
}
@@ -84,7 +84,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
private->fbdev_bo);
if (IS_ERR(helper->fb)) {
- dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto out;
}
@@ -138,21 +139,24 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
if (ret < 0) {
- dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
- ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to initialize drm fb helper - %d.\n",
+ ret);
return ret;
}
ret = drm_fb_helper_single_add_all_connectors(helper);
if (ret < 0) {
- dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to add connectors - %d.\n", ret);
goto err_drm_fb_helper_fini;
}
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
- dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
- ret);
+ DRM_DEV_ERROR(dev->dev,
+ "Failed to set initial hw config - %d.\n",
+ ret);
goto err_drm_fb_helper_fini;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 1869c8bb76c8..1d9655576b6e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -220,7 +220,7 @@ static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
unsigned int i, count = obj->size >> PAGE_SHIFT;
- unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long user_count = vma_pages(vma);
unsigned long uaddr = vma->vm_start;
unsigned long offset = vma->vm_pgoff;
unsigned long end = user_count + offset;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index bf9ed0e63973..19128b4dea54 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -160,7 +160,7 @@ static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
int offset, mask, shift;
if (!reg || !reg->mask) {
- dev_dbg(vop->dev, "Warning: not support %s\n", reg_name);
+ DRM_DEV_DEBUG(vop->dev, "Warning: not support %s\n", reg_name);
return;
}
@@ -499,7 +499,7 @@ static int vop_enable(struct drm_crtc *crtc)
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
- dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
@@ -523,7 +523,8 @@ static int vop_enable(struct drm_crtc *crtc)
*/
ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
if (ret) {
- dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
+ DRM_DEV_ERROR(vop->dev,
+ "failed to attach dma mapping, %d\n", ret);
goto err_disable_aclk;
}
@@ -1361,42 +1362,42 @@ static int vop_initial(struct vop *vop)
vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
if (IS_ERR(vop->hclk)) {
- dev_err(vop->dev, "failed to get hclk source\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get hclk source\n");
return PTR_ERR(vop->hclk);
}
vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
if (IS_ERR(vop->aclk)) {
- dev_err(vop->dev, "failed to get aclk source\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get aclk source\n");
return PTR_ERR(vop->aclk);
}
vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
if (IS_ERR(vop->dclk)) {
- dev_err(vop->dev, "failed to get dclk source\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get dclk source\n");
return PTR_ERR(vop->dclk);
}
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
- dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
+ DRM_DEV_ERROR(vop->dev, "failed to get pm runtime: %d\n", ret);
return ret;
}
ret = clk_prepare(vop->dclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare dclk\n");
+ DRM_DEV_ERROR(vop->dev, "failed to prepare dclk\n");
goto err_put_pm_runtime;
}
/* Enable both the hclk and aclk to setup the vop */
ret = clk_prepare_enable(vop->hclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare/enable hclk\n");
+ DRM_DEV_ERROR(vop->dev, "failed to prepare/enable hclk\n");
goto err_unprepare_dclk;
}
ret = clk_prepare_enable(vop->aclk);
if (ret < 0) {
- dev_err(vop->dev, "failed to prepare/enable aclk\n");
+ DRM_DEV_ERROR(vop->dev, "failed to prepare/enable aclk\n");
goto err_disable_hclk;
}
@@ -1405,7 +1406,7 @@ static int vop_initial(struct vop *vop)
*/
ahb_rst = devm_reset_control_get(vop->dev, "ahb");
if (IS_ERR(ahb_rst)) {
- dev_err(vop->dev, "failed to get ahb reset\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get ahb reset\n");
ret = PTR_ERR(ahb_rst);
goto err_disable_aclk;
}
@@ -1434,7 +1435,7 @@ static int vop_initial(struct vop *vop)
*/
vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
if (IS_ERR(vop->dclk_rst)) {
- dev_err(vop->dev, "failed to get dclk reset\n");
+ DRM_DEV_ERROR(vop->dev, "failed to get dclk reset\n");
ret = PTR_ERR(vop->dclk_rst);
goto err_disable_aclk;
}
@@ -1511,7 +1512,7 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
vop_line_flag_irq_disable(vop);
if (jiffies_left == 0) {
- dev_err(vop->dev, "Timeout waiting for IRQ\n");
+ DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n");
return -ETIMEDOUT;
}
@@ -1558,7 +1559,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "cannot find irq for vop\n");
+ DRM_DEV_ERROR(dev, "cannot find irq for vop\n");
return irq;
}
vop->irq = (unsigned int)irq;
@@ -1584,7 +1585,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
ret = vop_initial(vop);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev,
+ "cannot initial vop dev - err %d\n", ret);
goto err_disable_pm_runtime;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
new file mode 100644
index 000000000000..84911bdc27d1
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Mark Yao <mark.yao@rock-chips.com>
+ * Sandy Huang <hjc@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include <linux/component.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+#include "rockchip_lvds.h"
+
+#define DISPLAY_OUTPUT_RGB 0
+#define DISPLAY_OUTPUT_LVDS 1
+#define DISPLAY_OUTPUT_DUAL_LVDS 2
+
+#define connector_to_lvds(c) \
+ container_of(c, struct rockchip_lvds, connector)
+
+#define encoder_to_lvds(c) \
+ container_of(c, struct rockchip_lvds, encoder)
+
+/**
+ * rockchip_lvds_soc_data - rockchip lvds Soc private data
+ * @ch1_offset: lvds channel 1 registe offset
+ * grf_soc_con6: general registe offset for LVDS contrl
+ * grf_soc_con7: general registe offset for LVDS contrl
+ * has_vop_sel: to indicate whether need to choose from different VOP.
+ */
+struct rockchip_lvds_soc_data {
+ u32 ch1_offset;
+ int grf_soc_con6;
+ int grf_soc_con7;
+ bool has_vop_sel;
+};
+
+struct rockchip_lvds {
+ struct device *dev;
+ void __iomem *regs;
+ struct regmap *grf;
+ struct clk *pclk;
+ const struct rockchip_lvds_soc_data *soc_data;
+ int output; /* rgb lvds or dual lvds output */
+ int format; /* vesa or jeida format */
+ struct drm_device *drm_dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+ struct dev_pin_info *pins;
+};
+
+static inline void lvds_writel(struct rockchip_lvds *lvds, u32 offset, u32 val)
+{
+ writel_relaxed(val, lvds->regs + offset);
+ if (lvds->output == DISPLAY_OUTPUT_LVDS)
+ return;
+ writel_relaxed(val, lvds->regs + offset + lvds->soc_data->ch1_offset);
+}
+
+static inline int lvds_name_to_format(const char *s)
+{
+ if (strncmp(s, "jeida-18", 8) == 0)
+ return LVDS_JEIDA_18;
+ else if (strncmp(s, "jeida-24", 8) == 0)
+ return LVDS_JEIDA_24;
+ else if (strncmp(s, "vesa-24", 7) == 0)
+ return LVDS_VESA_24;
+
+ return -EINVAL;
+}
+
+static inline int lvds_name_to_output(const char *s)
+{
+ if (strncmp(s, "rgb", 3) == 0)
+ return DISPLAY_OUTPUT_RGB;
+ else if (strncmp(s, "lvds", 4) == 0)
+ return DISPLAY_OUTPUT_LVDS;
+ else if (strncmp(s, "duallvds", 8) == 0)
+ return DISPLAY_OUTPUT_DUAL_LVDS;
+
+ return -EINVAL;
+}
+
+static int rockchip_lvds_poweron(struct rockchip_lvds *lvds)
+{
+ int ret;
+ u32 val;
+
+ ret = clk_enable(lvds->pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to enable lvds pclk %d\n", ret);
+ return ret;
+ }
+ ret = pm_runtime_get_sync(lvds->dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to get pm runtime: %d\n", ret);
+ clk_disable(lvds->pclk);
+ return ret;
+ }
+ val = RK3288_LVDS_CH0_REG0_LANE4_EN | RK3288_LVDS_CH0_REG0_LANE3_EN |
+ RK3288_LVDS_CH0_REG0_LANE2_EN | RK3288_LVDS_CH0_REG0_LANE1_EN |
+ RK3288_LVDS_CH0_REG0_LANE0_EN;
+ if (lvds->output == DISPLAY_OUTPUT_RGB) {
+ val |= RK3288_LVDS_CH0_REG0_TTL_EN |
+ RK3288_LVDS_CH0_REG0_LANECK_EN;
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG4,
+ RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE |
+ RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG5,
+ RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA |
+ RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA);
+ } else {
+ val |= RK3288_LVDS_CH0_REG0_LVDS_EN |
+ RK3288_LVDS_CH0_REG0_LANECK_EN;
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG0, val);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG1,
+ RK3288_LVDS_CH0_REG1_LANECK_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE4_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE3_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE2_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE1_BIAS |
+ RK3288_LVDS_CH0_REG1_LANE0_BIAS);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG2,
+ RK3288_LVDS_CH0_REG2_RESERVE_ON |
+ RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE |
+ RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE |
+ RK3288_LVDS_PLL_FBDIV_REG2(0x46));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG4, 0x00);
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG5, 0x00);
+ }
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG3, RK3288_LVDS_PLL_FBDIV_REG3(0x46));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REGD, RK3288_LVDS_PLL_PREDIV_REGD(0x0a));
+ lvds_writel(lvds, RK3288_LVDS_CH0_REG20, RK3288_LVDS_CH0_REG20_LSB);
+
+ lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
+
+ return 0;
+}
+
+static void rockchip_lvds_poweroff(struct rockchip_lvds *lvds)
+{
+ int ret;
+ u32 val;
+
+ lvds_writel(lvds, RK3288_LVDS_CFG_REG21, RK3288_LVDS_CFG_REG21_TX_ENABLE);
+ lvds_writel(lvds, RK3288_LVDS_CFG_REGC, RK3288_LVDS_CFG_REGC_PLL_ENABLE);
+ val = LVDS_DUAL | LVDS_TTL_EN | LVDS_CH0_EN | LVDS_CH1_EN | LVDS_PWRDN;
+ val |= val << 16;
+ ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
+ if (ret != 0)
+ DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
+
+ pm_runtime_put(lvds->dev);
+ clk_disable(lvds->pclk);
+}
+
+static const struct drm_connector_funcs rockchip_lvds_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int rockchip_lvds_connector_get_modes(struct drm_connector *connector)
+{
+ struct rockchip_lvds *lvds = connector_to_lvds(connector);
+ struct drm_panel *panel = lvds->panel;
+
+ return drm_panel_get_modes(panel);
+}
+
+static const
+struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = {
+ .get_modes = rockchip_lvds_connector_get_modes,
+};
+
+static void rockchip_lvds_grf_config(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+ u8 pin_hsync = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 1 : 0;
+ u8 pin_dclk = (mode->flags & DRM_MODE_FLAG_PCSYNC) ? 1 : 0;
+ u32 val;
+ int ret;
+
+ /* iomux to LCD data/sync mode */
+ if (lvds->output == DISPLAY_OUTPUT_RGB)
+ if (lvds->pins && !IS_ERR(lvds->pins->default_state))
+ pinctrl_select_state(lvds->pins->p,
+ lvds->pins->default_state);
+ val = lvds->format | LVDS_CH0_EN;
+ if (lvds->output == DISPLAY_OUTPUT_RGB)
+ val |= LVDS_TTL_EN | LVDS_CH1_EN;
+ else if (lvds->output == DISPLAY_OUTPUT_DUAL_LVDS)
+ val |= LVDS_DUAL | LVDS_CH1_EN;
+
+ if ((mode->htotal - mode->hsync_start) & 0x01)
+ val |= LVDS_START_PHASE_RST_1;
+
+ val |= (pin_dclk << 8) | (pin_hsync << 9);
+ val |= (0xffff << 16);
+ ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con7, val);
+ if (ret != 0) {
+ DRM_DEV_ERROR(lvds->dev, "Could not write to GRF: %d\n", ret);
+ return;
+ }
+}
+
+static int rockchip_lvds_set_vop_source(struct rockchip_lvds *lvds,
+ struct drm_encoder *encoder)
+{
+ u32 val;
+ int ret;
+
+ if (!lvds->soc_data->has_vop_sel)
+ return 0;
+
+ ret = drm_of_encoder_active_endpoint_id(lvds->dev->of_node, encoder);
+ if (ret < 0)
+ return ret;
+
+ val = RK3288_LVDS_SOC_CON6_SEL_VOP_LIT << 16;
+ if (ret)
+ val |= RK3288_LVDS_SOC_CON6_SEL_VOP_LIT;
+
+ ret = regmap_write(lvds->grf, lvds->soc_data->grf_soc_con6, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+rockchip_lvds_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_LVDS;
+
+ return 0;
+}
+
+static void rockchip_lvds_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
+ int ret;
+
+ drm_panel_prepare(lvds->panel);
+ ret = rockchip_lvds_poweron(lvds);
+ if (ret < 0) {
+ DRM_DEV_ERROR(lvds->dev, "failed to power on lvds: %d\n", ret);
+ drm_panel_unprepare(lvds->panel);
+ }
+ rockchip_lvds_grf_config(encoder, mode);
+ rockchip_lvds_set_vop_source(lvds, encoder);
+ drm_panel_enable(lvds->panel);
+}
+
+static void rockchip_lvds_encoder_disable(struct drm_encoder *encoder)
+{
+ struct rockchip_lvds *lvds = encoder_to_lvds(encoder);
+
+ drm_panel_disable(lvds->panel);
+ rockchip_lvds_poweroff(lvds);
+ drm_panel_unprepare(lvds->panel);
+}
+
+static const
+struct drm_encoder_helper_funcs rockchip_lvds_encoder_helper_funcs = {
+ .enable = rockchip_lvds_encoder_enable,
+ .disable = rockchip_lvds_encoder_disable,
+ .atomic_check = rockchip_lvds_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct rockchip_lvds_soc_data rk3288_lvds_data = {
+ .ch1_offset = 0x100,
+ .grf_soc_con6 = 0x025c,
+ .grf_soc_con7 = 0x0260,
+ .has_vop_sel = true,
+};
+
+static const struct of_device_id rockchip_lvds_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3288-lvds",
+ .data = &rk3288_lvds_data
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_lvds_dt_ids);
+
+static int rockchip_lvds_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rockchip_lvds *lvds = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct device_node *remote = NULL;
+ struct device_node *port, *endpoint;
+ int ret = 0, child_count = 0;
+ const char *name;
+ u32 endpoint_id;
+
+ lvds->drm_dev = drm_dev;
+ port = of_graph_get_port_by_id(dev->of_node, 1);
+ if (!port) {
+ DRM_DEV_ERROR(dev,
+ "can't found port point, please init lvds panel port!\n");
+ return -EINVAL;
+ }
+ for_each_child_of_node(port, endpoint) {
+ child_count++;
+ of_property_read_u32(endpoint, "reg", &endpoint_id);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
+ &lvds->panel, &lvds->bridge);
+ if (!ret)
+ break;
+ }
+ if (!child_count) {
+ DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
+ ret = -EINVAL;
+ goto err_put_port;
+ } else if (ret) {
+ DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
+ ret = -EPROBE_DEFER;
+ goto err_put_port;
+ }
+ if (lvds->panel)
+ remote = lvds->panel->dev->of_node;
+ else
+ remote = lvds->bridge->of_node;
+ if (of_property_read_string(dev->of_node, "rockchip,output", &name))
+ /* default set it as output rgb */
+ lvds->output = DISPLAY_OUTPUT_RGB;
+ else
+ lvds->output = lvds_name_to_output(name);
+
+ if (lvds->output < 0) {
+ DRM_DEV_ERROR(dev, "invalid output type [%s]\n", name);
+ ret = lvds->output;
+ goto err_put_remote;
+ }
+
+ if (of_property_read_string(remote, "data-mapping", &name))
+ /* default set it as format vesa 18 */
+ lvds->format = LVDS_VESA_18;
+ else
+ lvds->format = lvds_name_to_format(name);
+
+ if (lvds->format < 0) {
+ DRM_DEV_ERROR(dev, "invalid data-mapping format [%s]\n", name);
+ ret = lvds->format;
+ goto err_put_remote;
+ }
+
+ encoder = &lvds->encoder;
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dev->of_node);
+
+ ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS, NULL);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize encoder: %d\n", ret);
+ goto err_put_remote;
+ }
+
+ drm_encoder_helper_add(encoder, &rockchip_lvds_encoder_helper_funcs);
+
+ if (lvds->panel) {
+ connector = &lvds->connector;
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ ret = drm_connector_init(drm_dev, connector,
+ &rockchip_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize connector: %d\n", ret);
+ goto err_free_encoder;
+ }
+
+ drm_connector_helper_add(connector,
+ &rockchip_lvds_connector_helper_funcs);
+
+ ret = drm_mode_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
+ }
+
+ ret = drm_panel_attach(lvds->panel, connector);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach panel: %d\n", ret);
+ goto err_free_connector;
+ }
+ } else {
+ lvds->bridge->encoder = encoder;
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach bridge: %d\n", ret);
+ goto err_free_encoder;
+ }
+ encoder->bridge = lvds->bridge;
+ }
+
+ pm_runtime_enable(dev);
+ of_node_put(remote);
+ of_node_put(port);
+
+ return 0;
+
+err_free_connector:
+ drm_connector_cleanup(connector);
+err_free_encoder:
+ drm_encoder_cleanup(encoder);
+err_put_remote:
+ of_node_put(remote);
+err_put_port:
+ of_node_put(port);
+
+ return ret;
+}
+
+static void rockchip_lvds_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rockchip_lvds *lvds = dev_get_drvdata(dev);
+
+ rockchip_lvds_encoder_disable(&lvds->encoder);
+ if (lvds->panel)
+ drm_panel_detach(lvds->panel);
+ pm_runtime_disable(dev);
+ drm_connector_cleanup(&lvds->connector);
+ drm_encoder_cleanup(&lvds->encoder);
+}
+
+static const struct component_ops rockchip_lvds_component_ops = {
+ .bind = rockchip_lvds_bind,
+ .unbind = rockchip_lvds_unbind,
+};
+
+static int rockchip_lvds_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_lvds *lvds;
+ const struct of_device_id *match;
+ struct resource *res;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+
+ lvds->dev = dev;
+ match = of_match_node(rockchip_lvds_dt_ids, dev->of_node);
+ if (!match)
+ return -ENODEV;
+ lvds->soc_data = match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lvds->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(lvds->regs))
+ return PTR_ERR(lvds->regs);
+
+ lvds->pclk = devm_clk_get(&pdev->dev, "pclk_lvds");
+ if (IS_ERR(lvds->pclk)) {
+ DRM_DEV_ERROR(dev, "could not get pclk_lvds\n");
+ return PTR_ERR(lvds->pclk);
+ }
+
+ lvds->pins = devm_kzalloc(lvds->dev, sizeof(*lvds->pins),
+ GFP_KERNEL);
+ if (!lvds->pins)
+ return -ENOMEM;
+
+ lvds->pins->p = devm_pinctrl_get(lvds->dev);
+ if (IS_ERR(lvds->pins->p)) {
+ DRM_DEV_ERROR(dev, "no pinctrl handle\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ } else {
+ lvds->pins->default_state =
+ pinctrl_lookup_state(lvds->pins->p, "lcdc");
+ if (IS_ERR(lvds->pins->default_state)) {
+ DRM_DEV_ERROR(dev, "no default pinctrl state\n");
+ devm_kfree(lvds->dev, lvds->pins);
+ lvds->pins = NULL;
+ }
+ }
+
+ lvds->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(lvds->grf)) {
+ DRM_DEV_ERROR(dev, "missing rockchip,grf property\n");
+ return PTR_ERR(lvds->grf);
+ }
+
+ dev_set_drvdata(dev, lvds);
+
+ ret = clk_prepare(lvds->pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to prepare pclk_lvds\n");
+ return ret;
+ }
+ ret = component_add(&pdev->dev, &rockchip_lvds_component_ops);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "failed to add component\n");
+ clk_unprepare(lvds->pclk);
+ }
+
+ return ret;
+}
+
+static int rockchip_lvds_remove(struct platform_device *pdev)
+{
+ struct rockchip_lvds *lvds = dev_get_drvdata(&pdev->dev);
+
+ component_del(&pdev->dev, &rockchip_lvds_component_ops);
+ clk_unprepare(lvds->pclk);
+
+ return 0;
+}
+
+struct platform_driver rockchip_lvds_driver = {
+ .probe = rockchip_lvds_probe,
+ .remove = rockchip_lvds_remove,
+ .driver = {
+ .name = "rockchip-lvds",
+ .of_match_table = of_match_ptr(rockchip_lvds_dt_ids),
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
new file mode 100644
index 000000000000..15810b737809
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Sandy Huang <hjc@rock-chips.com>
+ * Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_LVDS_
+#define _ROCKCHIP_LVDS_
+
+#define RK3288_LVDS_CH0_REG0 0x00
+#define RK3288_LVDS_CH0_REG0_LVDS_EN BIT(7)
+#define RK3288_LVDS_CH0_REG0_TTL_EN BIT(6)
+#define RK3288_LVDS_CH0_REG0_LANECK_EN BIT(5)
+#define RK3288_LVDS_CH0_REG0_LANE4_EN BIT(4)
+#define RK3288_LVDS_CH0_REG0_LANE3_EN BIT(3)
+#define RK3288_LVDS_CH0_REG0_LANE2_EN BIT(2)
+#define RK3288_LVDS_CH0_REG0_LANE1_EN BIT(1)
+#define RK3288_LVDS_CH0_REG0_LANE0_EN BIT(0)
+
+#define RK3288_LVDS_CH0_REG1 0x04
+#define RK3288_LVDS_CH0_REG1_LANECK_BIAS BIT(5)
+#define RK3288_LVDS_CH0_REG1_LANE4_BIAS BIT(4)
+#define RK3288_LVDS_CH0_REG1_LANE3_BIAS BIT(3)
+#define RK3288_LVDS_CH0_REG1_LANE2_BIAS BIT(2)
+#define RK3288_LVDS_CH0_REG1_LANE1_BIAS BIT(1)
+#define RK3288_LVDS_CH0_REG1_LANE0_BIAS BIT(0)
+
+#define RK3288_LVDS_CH0_REG2 0x08
+#define RK3288_LVDS_CH0_REG2_RESERVE_ON BIT(7)
+#define RK3288_LVDS_CH0_REG2_LANECK_LVDS_MODE BIT(6)
+#define RK3288_LVDS_CH0_REG2_LANE4_LVDS_MODE BIT(5)
+#define RK3288_LVDS_CH0_REG2_LANE3_LVDS_MODE BIT(4)
+#define RK3288_LVDS_CH0_REG2_LANE2_LVDS_MODE BIT(3)
+#define RK3288_LVDS_CH0_REG2_LANE1_LVDS_MODE BIT(2)
+#define RK3288_LVDS_CH0_REG2_LANE0_LVDS_MODE BIT(1)
+#define RK3288_LVDS_CH0_REG2_PLL_FBDIV8 BIT(0)
+
+#define RK3288_LVDS_CH0_REG3 0x0c
+#define RK3288_LVDS_CH0_REG3_PLL_FBDIV_MASK 0xff
+
+#define RK3288_LVDS_CH0_REG4 0x10
+#define RK3288_LVDS_CH0_REG4_LANECK_TTL_MODE BIT(5)
+#define RK3288_LVDS_CH0_REG4_LANE4_TTL_MODE BIT(4)
+#define RK3288_LVDS_CH0_REG4_LANE3_TTL_MODE BIT(3)
+#define RK3288_LVDS_CH0_REG4_LANE2_TTL_MODE BIT(2)
+#define RK3288_LVDS_CH0_REG4_LANE1_TTL_MODE BIT(1)
+#define RK3288_LVDS_CH0_REG4_LANE0_TTL_MODE BIT(0)
+
+#define RK3288_LVDS_CH0_REG5 0x14
+#define RK3288_LVDS_CH0_REG5_LANECK_TTL_DATA BIT(5)
+#define RK3288_LVDS_CH0_REG5_LANE4_TTL_DATA BIT(4)
+#define RK3288_LVDS_CH0_REG5_LANE3_TTL_DATA BIT(3)
+#define RK3288_LVDS_CH0_REG5_LANE2_TTL_DATA BIT(2)
+#define RK3288_LVDS_CH0_REG5_LANE1_TTL_DATA BIT(1)
+#define RK3288_LVDS_CH0_REG5_LANE0_TTL_DATA BIT(0)
+
+#define RK3288_LVDS_CFG_REGC 0x30
+#define RK3288_LVDS_CFG_REGC_PLL_ENABLE 0x00
+#define RK3288_LVDS_CFG_REGC_PLL_DISABLE 0xff
+
+#define RK3288_LVDS_CH0_REGD 0x34
+#define RK3288_LVDS_CH0_REGD_PLL_PREDIV_MASK 0x1f
+
+#define RK3288_LVDS_CH0_REG20 0x80
+#define RK3288_LVDS_CH0_REG20_MSB 0x45
+#define RK3288_LVDS_CH0_REG20_LSB 0x44
+
+#define RK3288_LVDS_CFG_REG21 0x84
+#define RK3288_LVDS_CFG_REG21_TX_ENABLE 0x92
+#define RK3288_LVDS_CFG_REG21_TX_DISABLE 0x00
+#define RK3288_LVDS_CH1_OFFSET 0x100
+
+/* fbdiv value is split over 2 registers, with bit8 in reg2 */
+#define RK3288_LVDS_PLL_FBDIV_REG2(_fbd) \
+ (_fbd & BIT(8) ? RK3288_LVDS_CH0_REG2_PLL_FBDIV8 : 0)
+#define RK3288_LVDS_PLL_FBDIV_REG3(_fbd) \
+ (_fbd & RK3288_LVDS_CH0_REG3_PLL_FBDIV_MASK)
+#define RK3288_LVDS_PLL_PREDIV_REGD(_pd) \
+ (_pd & RK3288_LVDS_CH0_REGD_PLL_PREDIV_MASK)
+
+#define RK3288_LVDS_SOC_CON6_SEL_VOP_LIT BIT(3)
+
+#define LVDS_FMT_MASK (0x07 << 16)
+#define LVDS_MSB BIT(3)
+#define LVDS_DUAL BIT(4)
+#define LVDS_FMT_1 BIT(5)
+#define LVDS_TTL_EN BIT(6)
+#define LVDS_START_PHASE_RST_1 BIT(7)
+#define LVDS_DCLK_INV BIT(8)
+#define LVDS_CH0_EN BIT(11)
+#define LVDS_CH1_EN BIT(12)
+#define LVDS_PWRDN BIT(15)
+
+#define LVDS_24BIT (0 << 1)
+#define LVDS_18BIT (1 << 1)
+#define LVDS_FORMAT_VESA (0 << 0)
+#define LVDS_FORMAT_JEIDA (1 << 0)
+
+#define LVDS_VESA_24 0
+#define LVDS_JEIDA_24 1
+#define LVDS_VESA_18 2
+#define LVDS_JEIDA_18 3
+
+#endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 94de7b9f6fde..4a39049e901a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -533,7 +533,7 @@ static int vop_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
if (!dev->of_node) {
- dev_err(dev, "can't find vop devices\n");
+ DRM_DEV_ERROR(dev, "can't find vop devices\n");
return -ENODEV;
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 388a0fc13564..d36919b14da7 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <video/sh_mobile_meram.h>
@@ -131,7 +132,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
}
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 1700c542cd93..9e9343101738 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_of.h>
@@ -145,7 +146,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
}
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = sti_output_poll_changed,
.atomic_check = sti_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 852bf2293b05..83314aee65cb 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -463,11 +463,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = dvo;
bridge->funcs = &sti_dvo_bridge_funcs;
bridge->of_node = dvo->dev.of_node;
- err = drm_bridge_add(bridge);
- if (err) {
- DRM_ERROR("Failed to add bridge\n");
- return err;
- }
+ drm_bridge_add(bridge);
err = drm_bridge_attach(encoder, bridge, NULL);
if (err) {
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index b333b37f3f89..c857663eafc2 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "ltdc.h"
@@ -31,7 +32,7 @@ static void drv_output_poll_changed(struct drm_device *ddev)
}
static const struct drm_mode_config_funcs drv_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = drv_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 568c5d0461ea..e5b6310240fe 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -113,11 +113,13 @@ static enum dsi_color dsi_color_from_mipi(enum mipi_dsi_pixel_format fmt)
static int dsi_pll_get_clkout_khz(int clkin_khz, int idf, int ndiv, int odf)
{
+ int divisor = idf * odf;
+
/* prevent from division by 0 */
- if (idf * odf)
- return DIV_ROUND_CLOSEST(clkin_khz * ndiv, idf * odf);
+ if (!divisor)
+ return 0;
- return 0;
+ return DIV_ROUND_CLOSEST(clkin_khz * ndiv, divisor);
}
static int dsi_pll_get_params(int clkin_khz, int clkout_khz,
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index d394a03632c4..735c9081202a 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -791,9 +791,8 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int ltdc_encoder_init(struct drm_device *ddev)
+static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
{
- struct ltdc_device *ldev = ddev->dev_private;
struct drm_encoder *encoder;
int ret;
@@ -807,7 +806,7 @@ static int ltdc_encoder_init(struct drm_device *ddev)
drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
- ret = drm_bridge_attach(encoder, ldev->bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
drm_encoder_cleanup(encoder);
return -EINVAL;
@@ -936,12 +935,9 @@ int ltdc_load(struct drm_device *ddev)
ret = PTR_ERR(bridge);
goto err;
}
- ldev->is_panel_bridge = true;
}
- ldev->bridge = bridge;
-
- ret = ltdc_encoder_init(ddev);
+ ret = ltdc_encoder_init(ddev, bridge);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
goto err;
@@ -972,8 +968,7 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- if (ldev->is_panel_bridge)
- drm_panel_bridge_remove(bridge);
+ drm_panel_bridge_remove(bridge);
clk_disable_unprepare(ldev->pixel_clk);
@@ -986,8 +981,7 @@ void ltdc_unload(struct drm_device *ddev)
DRM_DEBUG_DRIVER("\n");
- if (ldev->is_panel_bridge)
- drm_panel_bridge_remove(ldev->bridge);
+ drm_of_panel_bridge_remove(ddev->dev->of_node, 0, 0);
clk_disable_unprepare(ldev->pixel_clk);
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index bc6d6f6419a9..ae437557d715 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -24,8 +24,6 @@ struct ltdc_device {
struct drm_fbdev_cma *fbdev;
void __iomem *regs;
struct clk *pixel_clk; /* lcd pixel clock */
- struct drm_bridge *bridge;
- bool is_panel_bridge;
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
u32 error_status;
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 55b32368f8fb..0c2f8c7facae 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -1,25 +1,26 @@
# SPDX-License-Identifier: GPL-2.0
-sun4i-drm-y += sun4i_drv.o
-sun4i-drm-y += sun4i_framebuffer.o
+sun4i-backend-y += sun4i_backend.o sun4i_layer.o
-sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
-sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
-sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
-sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
+sun4i-drm-y += sun4i_drv.o
+sun4i-drm-y += sun4i_framebuffer.o
-sun4i-tcon-y += sun4i_tcon.o
-sun4i-tcon-y += sun4i_rgb.o
-sun4i-tcon-y += sun4i_dotclock.o
-sun4i-tcon-y += sun4i_crtc.o
+sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
+sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
+sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
+sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
-sun4i-backend-y += sun4i_backend.o sun4i_layer.o
+sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o
-sun8i-mixer-y += sun8i_mixer.o sun8i_layer.o
+sun4i-tcon-y += sun4i_crtc.o
+sun4i-tcon-y += sun4i_dotclock.o
+sun4i-tcon-y += sun4i_tcon.o
+sun4i-tcon-y += sun4i_rgb.o
-obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
-obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
+obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o
+obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
+obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
-obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
-obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
+obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index ec5943627aa5..847eecbe4d14 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -20,6 +20,7 @@
#include <linux/component.h>
#include <linux/list.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/reset.h>
@@ -28,6 +29,11 @@
#include "sun4i_layer.h"
#include "sunxi_engine.h"
+struct sun4i_backend_quirks {
+ /* backend <-> TCON muxing selection done in backend */
+ bool needs_output_muxing;
+};
+
static const u32 sunxi_rgb2yuv_coef[12] = {
0x00000107, 0x00000204, 0x00000064, 0x00000108,
0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
@@ -209,24 +215,20 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
u32 lo_paddr, hi_paddr;
dma_addr_t paddr;
- int bpp;
-
- /* Get the physical address of the buffer in memory */
- gem = drm_fb_cma_get_gem_obj(fb, 0);
-
- DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
-
- /* Compute the start of the displayed memory */
- bpp = fb->format->cpp[0];
- paddr = gem->paddr + fb->offsets[0];
- paddr += (state->src_x >> 16) * bpp;
- paddr += (state->src_y >> 16) * fb->pitches[0];
+ /* Get the start of the displayed memory */
+ paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+ /*
+ * backend DMA accesses DRAM directly, bypassing the system
+ * bus. As such, the address range is different and the buffer
+ * address needs to be corrected.
+ */
+ paddr -= PHYS_OFFSET;
+
/* Write the 32 lower bits of the address (in bits) */
lo_paddr = paddr << 3;
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
@@ -349,6 +351,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_backend *backend;
+ const struct sun4i_backend_quirks *quirks;
struct resource *res;
void __iomem *regs;
int i, ret;
@@ -369,13 +372,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (IS_ERR(regs))
return PTR_ERR(regs);
- backend->engine.regs = devm_regmap_init_mmio(dev, regs,
- &sun4i_backend_regmap_config);
- if (IS_ERR(backend->engine.regs)) {
- dev_err(dev, "Couldn't create the backend regmap\n");
- return PTR_ERR(backend->engine.regs);
- }
-
backend->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(backend->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
@@ -421,9 +417,23 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
}
}
+ backend->engine.regs = devm_regmap_init_mmio(dev, regs,
+ &sun4i_backend_regmap_config);
+ if (IS_ERR(backend->engine.regs)) {
+ dev_err(dev, "Couldn't create the backend regmap\n");
+ return PTR_ERR(backend->engine.regs);
+ }
+
list_add_tail(&backend->engine.list, &drv->engine_list);
- /* Reset the registers */
+ /*
+ * Many of the backend's layer configuration registers have
+ * undefined default values. This poses a risk as we use
+ * regmap_update_bits in some places, and don't overwrite
+ * the whole register.
+ *
+ * Clear the registers here to have something predictable.
+ */
for (i = 0x800; i < 0x1000; i += 4)
regmap_write(backend->engine.regs, i, 0);
@@ -436,6 +446,27 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
SUN4I_BACKEND_MODCTL_DEBE_EN |
SUN4I_BACKEND_MODCTL_START_CTL);
+ /* Set output selection if needed */
+ quirks = of_device_get_match_data(dev);
+ if (quirks->needs_output_muxing) {
+ /*
+ * We assume there is no dynamic muxing of backends
+ * and TCONs, so we select the backend with same ID.
+ *
+ * While dynamic selection might be interesting, since
+ * the CRTC is tied to the TCON, while the layers are
+ * tied to the backends, this means, we will need to
+ * switch between groups of layers. There might not be
+ * a way to represent this constraint in DRM.
+ */
+ regmap_update_bits(backend->engine.regs,
+ SUN4I_BACKEND_MODCTL_REG,
+ SUN4I_BACKEND_MODCTL_OUT_SEL,
+ (backend->engine.id
+ ? SUN4I_BACKEND_MODCTL_OUT_LCD1
+ : SUN4I_BACKEND_MODCTL_OUT_LCD0));
+ }
+
return 0;
err_disable_ram_clk:
@@ -483,10 +514,44 @@ static int sun4i_backend_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_backend_quirks sun4i_backend_quirks = {
+ .needs_output_muxing = true,
+};
+
+static const struct sun4i_backend_quirks sun5i_backend_quirks = {
+};
+
+static const struct sun4i_backend_quirks sun6i_backend_quirks = {
+};
+
+static const struct sun4i_backend_quirks sun7i_backend_quirks = {
+ .needs_output_muxing = true,
+};
+
+static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
+};
+
static const struct of_device_id sun4i_backend_of_table[] = {
- { .compatible = "allwinner,sun5i-a13-display-backend" },
- { .compatible = "allwinner,sun6i-a31-display-backend" },
- { .compatible = "allwinner,sun8i-a33-display-backend" },
+ {
+ .compatible = "allwinner,sun4i-a10-display-backend",
+ .data = &sun4i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-display-backend",
+ .data = &sun5i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun6i-a31-display-backend",
+ .data = &sun6i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-display-backend",
+ .data = &sun7i_backend_quirks,
+ },
+ {
+ .compatible = "allwinner,sun8i-a33-display-backend",
+ .data = &sun8i_a33_backend_quirks,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 21945af67a9d..ac3cc029f5cd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -25,7 +25,8 @@
#define SUN4I_BACKEND_MODCTL_LINE_SEL BIT(29)
#define SUN4I_BACKEND_MODCTL_ITLMOD_EN BIT(28)
#define SUN4I_BACKEND_MODCTL_OUT_SEL GENMASK(22, 20)
-#define SUN4I_BACKEND_MODCTL_OUT_LCD (0 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_LCD0 (0 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_LCD1 (1 << 20)
#define SUN4I_BACKEND_MODCTL_OUT_FE0 (6 << 20)
#define SUN4I_BACKEND_MODCTL_OUT_FE1 (7 << 20)
#define SUN4I_BACKEND_MODCTL_HWC_EN BIT(16)
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index d097c6f93ad0..5decae0069d0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -30,6 +30,22 @@
#include "sunxi_engine.h"
#include "sun4i_tcon.h"
+/*
+ * While this isn't really working in the DRM theory, in practice we
+ * can only ever have one encoder per TCON since we have a mux in our
+ * TCON.
+ */
+static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -72,11 +88,12 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
- sun4i_tcon_disable(scrtc->tcon);
+ sun4i_tcon_set_status(scrtc->tcon, encoder, false);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
@@ -90,11 +107,21 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
- sun4i_tcon_enable(scrtc->tcon);
+ sun4i_tcon_set_status(scrtc->tcon, encoder, true);
+}
+
+static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+
+ sun4i_tcon_mode_set(scrtc->tcon, encoder, mode);
}
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
@@ -102,6 +129,7 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
.atomic_flush = sun4i_crtc_atomic_flush,
.atomic_enable = sun4i_crtc_atomic_enable,
.atomic_disable = sun4i_crtc_atomic_disable,
+ .mode_set_nofb = sun4i_crtc_mode_set_nofb,
};
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index ace59651892f..75c76cdd82bc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -11,6 +11,7 @@
*/
#include <linux/component.h>
+#include <linux/kfifo.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
@@ -106,11 +107,6 @@ static int sun4i_drv_bind(struct device *dev)
goto free_drm;
}
- /* drm_vblank_init calls kcalloc, which can fail */
- ret = drm_vblank_init(drm, 1);
- if (ret)
- goto free_mem_region;
-
drm_mode_config_init(drm);
ret = component_bind_all(drm->dev, drm);
@@ -119,6 +115,11 @@ static int sun4i_drv_bind(struct device *dev)
goto cleanup_mode_config;
}
+ /* drm_vblank_init calls kcalloc, which can fail */
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret)
+ goto free_mem_region;
+
drm->irq_enabled = true;
/* Remove early framebuffers (ie. simplefb) */
@@ -177,16 +178,20 @@ static bool sun4i_drv_node_is_connector(struct device_node *node)
static bool sun4i_drv_node_is_frontend(struct device_node *node)
{
- return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
+ return of_device_is_compatible(node, "allwinner,sun4i-a10-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
}
static bool sun4i_drv_node_is_tcon(struct device_node *node)
{
- return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
+ return of_device_is_compatible(node, "allwinner,sun4i-a10-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun5i-a13-tcon") ||
of_device_is_compatible(node, "allwinner,sun6i-a31-tcon") ||
of_device_is_compatible(node, "allwinner,sun6i-a31s-tcon") ||
+ of_device_is_compatible(node, "allwinner,sun7i-a20-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-a33-tcon") ||
of_device_is_compatible(node, "allwinner,sun8i-v3s-tcon");
}
@@ -200,7 +205,33 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
+/*
+ * The encoder drivers use drm_of_find_possible_crtcs to get upstream
+ * crtcs from the device tree using of_graph. For the results to be
+ * correct, encoders must be probed/bound after _all_ crtcs have been
+ * created. The existing code uses a depth first recursive traversal
+ * of the of_graph, which means the encoders downstream of the TCON
+ * get add right after the first TCON. The second TCON or CRTC will
+ * never be properly associated with encoders connected to it.
+ *
+ * Also, in a dual display pipeline setup, both frontends can feed
+ * either backend, and both backends can feed either TCON, we want
+ * all components of the same type to be added before the next type
+ * in the pipeline. Fortunately, the pipelines are perfectly symmetric,
+ * i.e. components of the same type are at the same depth when counted
+ * from the frontend. The only exception is the third pipeline in
+ * the A80 SoC, which we do not support anyway.
+ *
+ * Hence we can use a breadth first search traversal order to add
+ * components. We do not need to check for duplicates. The component
+ * matching system handles this for us.
+ */
+struct endpoint_list {
+ DECLARE_KFIFO(fifo, struct device_node *, 16);
+};
+
static int sun4i_drv_add_endpoints(struct device *dev,
+ struct endpoint_list *list,
struct component_match **match,
struct device_node *node)
{
@@ -264,10 +295,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
}
}
- /* Walk down our tree */
- count += sun4i_drv_add_endpoints(dev, match, remote);
-
- of_node_put(remote);
+ kfifo_put(&list->fifo, remote);
}
return count;
@@ -276,8 +304,11 @@ static int sun4i_drv_add_endpoints(struct device *dev,
static int sun4i_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
- struct device_node *np = pdev->dev.of_node;
- int i, count = 0;
+ struct device_node *np = pdev->dev.of_node, *endpoint;
+ struct endpoint_list list;
+ int i, ret, count = 0;
+
+ INIT_KFIFO(list.fifo);
for (i = 0;; i++) {
struct device_node *pipeline = of_parse_phandle(np,
@@ -286,12 +317,19 @@ static int sun4i_drv_probe(struct platform_device *pdev)
if (!pipeline)
break;
- count += sun4i_drv_add_endpoints(&pdev->dev, &match,
- pipeline);
- of_node_put(pipeline);
+ kfifo_put(&list.fifo, pipeline);
+ }
+
+ while (kfifo_get(&list.fifo, &endpoint)) {
+ /* process this endpoint */
+ ret = sun4i_drv_add_endpoints(&pdev->dev, &list, &match,
+ endpoint);
+
+ /* sun4i_drv_add_endpoints can fail to allocate memory */
+ if (ret < 0)
+ return ret;
- DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
- count, i);
+ count += ret;
}
if (count)
@@ -308,10 +346,12 @@ static int sun4i_drv_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_drv_of_table[] = {
+ { .compatible = "allwinner,sun4i-a10-display-engine" },
{ .compatible = "allwinner,sun5i-a10s-display-engine" },
{ .compatible = "allwinner,sun5i-a13-display-engine" },
{ .compatible = "allwinner,sun6i-a31-display-engine" },
{ .compatible = "allwinner,sun6i-a31s-display-engine" },
+ { .compatible = "allwinner,sun7i-a20-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 9872e0fc03b0..2992f0a6b349 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
#include "sun4i_drv.h"
@@ -28,7 +29,7 @@ static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
.output_poll_changed = sun4i_de_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
};
struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index a1f8cba251a2..b685ee11623d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -14,6 +14,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <linux/regmap.h>
#include <media/cec-pin.h>
@@ -58,16 +59,24 @@
#define SUN4I_HDMI_PAD_CTRL0_TXEN BIT(23)
#define SUN4I_HDMI_PAD_CTRL1_REG 0x204
+#define SUN4I_HDMI_PAD_CTRL1_UNKNOWN BIT(24) /* set on A31 */
#define SUN4I_HDMI_PAD_CTRL1_AMP_OPT BIT(23)
#define SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT BIT(22)
#define SUN4I_HDMI_PAD_CTRL1_EMP_OPT BIT(20)
#define SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT BIT(19)
+#define SUN4I_HDMI_PAD_CTRL1_PWSCK BIT(18)
+#define SUN4I_HDMI_PAD_CTRL1_PWSDT BIT(17)
#define SUN4I_HDMI_PAD_CTRL1_REG_DEN BIT(15)
#define SUN4I_HDMI_PAD_CTRL1_REG_DENCK BIT(14)
#define SUN4I_HDMI_PAD_CTRL1_REG_EMP(n) (((n) & 7) << 10)
#define SUN4I_HDMI_PAD_CTRL1_HALVE_CLK BIT(6)
#define SUN4I_HDMI_PAD_CTRL1_REG_AMP(n) (((n) & 7) << 3)
+/* These bits seem to invert the TMDS data channels */
+#define SUN4I_HDMI_PAD_CTRL1_INVERT_R BIT(2)
+#define SUN4I_HDMI_PAD_CTRL1_INVERT_G BIT(1)
+#define SUN4I_HDMI_PAD_CTRL1_INVERT_B BIT(0)
+
#define SUN4I_HDMI_PLL_CTRL_REG 0x208
#define SUN4I_HDMI_PLL_CTRL_PLL_EN BIT(31)
#define SUN4I_HDMI_PLL_CTRL_BWS BIT(30)
@@ -152,21 +161,106 @@
#define SUN4I_HDMI_DDC_FIFO_SIZE 16
+/* A31 specific */
+#define SUN6I_HDMI_DDC_CTRL_REG 0x500
+#define SUN6I_HDMI_DDC_CTRL_RESET BIT(31)
+#define SUN6I_HDMI_DDC_CTRL_START_CMD BIT(27)
+#define SUN6I_HDMI_DDC_CTRL_SDA_ENABLE BIT(6)
+#define SUN6I_HDMI_DDC_CTRL_SCL_ENABLE BIT(4)
+#define SUN6I_HDMI_DDC_CTRL_ENABLE BIT(0)
+
+#define SUN6I_HDMI_DDC_CMD_REG 0x508
+#define SUN6I_HDMI_DDC_CMD_BYTE_COUNT(count) ((count) << 16)
+/* command types in lower 3 bits are the same as sun4i */
+
+#define SUN6I_HDMI_DDC_ADDR_REG 0x50c
+#define SUN6I_HDMI_DDC_ADDR_SEGMENT(seg) (((seg) & 0xff) << 24)
+#define SUN6I_HDMI_DDC_ADDR_EDDC(addr) (((addr) & 0xff) << 16)
+#define SUN6I_HDMI_DDC_ADDR_OFFSET(off) (((off) & 0xff) << 8)
+#define SUN6I_HDMI_DDC_ADDR_SLAVE(addr) (((addr) & 0xff) << 1)
+
+#define SUN6I_HDMI_DDC_INT_STATUS_REG 0x514
+#define SUN6I_HDMI_DDC_INT_STATUS_TIMEOUT BIT(8)
+/* lower 8 bits are the same as sun4i */
+
+#define SUN6I_HDMI_DDC_FIFO_CTRL_REG 0x518
+#define SUN6I_HDMI_DDC_FIFO_CTRL_CLEAR BIT(15)
+/* lower 9 bits are the same as sun4i */
+
+#define SUN6I_HDMI_DDC_CLK_REG 0x520
+/* DDC CLK bit fields are the same, but the formula is not */
+
+#define SUN6I_HDMI_DDC_FIFO_DATA_REG 0x580
+
enum sun4i_hdmi_pkt_type {
SUN4I_HDMI_PKT_AVI = 2,
SUN4I_HDMI_PKT_END = 15,
};
+struct sun4i_hdmi_variant {
+ bool has_ddc_parent_clk;
+ bool has_reset_control;
+
+ u32 pad_ctrl0_init_val;
+ u32 pad_ctrl1_init_val;
+ u32 pll_ctrl_init_val;
+
+ struct reg_field ddc_clk_reg;
+ u8 ddc_clk_pre_divider;
+ u8 ddc_clk_m_offset;
+
+ u8 tmds_clk_div_offset;
+
+ /* Register fields for I2C adapter */
+ struct reg_field field_ddc_en;
+ struct reg_field field_ddc_start;
+ struct reg_field field_ddc_reset;
+ struct reg_field field_ddc_addr_reg;
+ struct reg_field field_ddc_slave_addr;
+ struct reg_field field_ddc_int_mask;
+ struct reg_field field_ddc_int_status;
+ struct reg_field field_ddc_fifo_clear;
+ struct reg_field field_ddc_fifo_rx_thres;
+ struct reg_field field_ddc_fifo_tx_thres;
+ struct reg_field field_ddc_byte_count;
+ struct reg_field field_ddc_cmd;
+ struct reg_field field_ddc_sda_en;
+ struct reg_field field_ddc_sck_en;
+
+ /* DDC FIFO register offset */
+ u32 ddc_fifo_reg;
+
+ /*
+ * DDC FIFO threshold boundary conditions
+ *
+ * This is used to cope with the threshold boundary condition
+ * being slightly different on sun5i and sun6i.
+ *
+ * On sun5i the threshold is exclusive, i.e. does not include,
+ * the value of the threshold. ( > for RX; < for TX )
+ * On sun6i the threshold is inclusive, i.e. includes, the
+ * value of the threshold. ( >= for RX; <= for TX )
+ */
+ bool ddc_fifo_thres_incl;
+
+ bool ddc_fifo_has_dir;
+};
+
struct sun4i_hdmi {
struct drm_connector connector;
struct drm_encoder encoder;
struct device *dev;
void __iomem *base;
+ struct regmap *regmap;
+
+ /* Reset control */
+ struct reset_control *reset;
/* Parent clocks */
struct clk *bus_clk;
struct clk *mod_clk;
+ struct clk *ddc_parent_clk;
struct clk *pll0_clk;
struct clk *pll1_clk;
@@ -176,10 +270,28 @@ struct sun4i_hdmi {
struct i2c_adapter *i2c;
+ /* Regmap fields for I2C adapter */
+ struct regmap_field *field_ddc_en;
+ struct regmap_field *field_ddc_start;
+ struct regmap_field *field_ddc_reset;
+ struct regmap_field *field_ddc_addr_reg;
+ struct regmap_field *field_ddc_slave_addr;
+ struct regmap_field *field_ddc_int_mask;
+ struct regmap_field *field_ddc_int_status;
+ struct regmap_field *field_ddc_fifo_clear;
+ struct regmap_field *field_ddc_fifo_rx_thres;
+ struct regmap_field *field_ddc_fifo_tx_thres;
+ struct regmap_field *field_ddc_byte_count;
+ struct regmap_field *field_ddc_cmd;
+ struct regmap_field *field_ddc_sda_en;
+ struct regmap_field *field_ddc_sck_en;
+
struct sun4i_drv *drv;
bool hdmi_monitor;
struct cec_adapter *cec_adap;
+
+ const struct sun4i_hdmi_variant *variant;
};
int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *clk);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 4692e8c345ed..e826da34e919 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -11,13 +11,16 @@
*/
#include <linux/clk-provider.h>
+#include <linux/regmap.h>
-#include "sun4i_tcon.h"
#include "sun4i_hdmi.h"
struct sun4i_ddc {
struct clk_hw hw;
struct sun4i_hdmi *hdmi;
+ struct regmap_field *reg;
+ u8 pre_div;
+ u8 m_offset;
};
static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw)
@@ -27,6 +30,8 @@ static inline struct sun4i_ddc *hw_to_ddc(struct clk_hw *hw)
static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long parent_rate,
+ const u8 pre_div,
+ const u8 m_offset,
u8 *m, u8 *n)
{
unsigned long best_rate = 0;
@@ -36,7 +41,8 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
- tmp_rate = (((parent_rate / 2) / 10) >> _n) / (_m + 1);
+ tmp_rate = (((parent_rate / pre_div) / 10) >> _n) /
+ (_m + m_offset);
if (tmp_rate > rate)
continue;
@@ -60,21 +66,25 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- return sun4i_ddc_calc_divider(rate, *prate, NULL, NULL);
+ struct sun4i_ddc *ddc = hw_to_ddc(hw);
+
+ return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div,
+ ddc->m_offset, NULL, NULL);
}
static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
- u32 reg;
+ unsigned int reg;
u8 m, n;
- reg = readl(ddc->hdmi->base + SUN4I_HDMI_DDC_CLK_REG);
- m = (reg >> 3) & 0x7;
+ regmap_field_read(ddc->reg, &reg);
+ m = (reg >> 3) & 0xf;
n = reg & 0x7;
- return (((parent_rate / 2) / 10) >> n) / (m + 1);
+ return (((parent_rate / ddc->pre_div) / 10) >> n) /
+ (m + ddc->m_offset);
}
static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -83,10 +93,12 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
struct sun4i_ddc *ddc = hw_to_ddc(hw);
u8 div_m, div_n;
- sun4i_ddc_calc_divider(rate, parent_rate, &div_m, &div_n);
+ sun4i_ddc_calc_divider(rate, parent_rate, ddc->pre_div,
+ ddc->m_offset, &div_m, &div_n);
- writel(SUN4I_HDMI_DDC_CLK_M(div_m) | SUN4I_HDMI_DDC_CLK_N(div_n),
- ddc->hdmi->base + SUN4I_HDMI_DDC_CLK_REG);
+ regmap_field_write(ddc->reg,
+ SUN4I_HDMI_DDC_CLK_M(div_m) |
+ SUN4I_HDMI_DDC_CLK_N(div_n));
return 0;
}
@@ -111,6 +123,11 @@ int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *parent)
if (!ddc)
return -ENOMEM;
+ ddc->reg = devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->ddc_clk_reg);
+ if (IS_ERR(ddc->reg))
+ return PTR_ERR(ddc->reg);
+
init.name = "hdmi-ddc";
init.ops = &sun4i_ddc_ops;
init.parent_names = &parent_name;
@@ -118,6 +135,8 @@ int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *parent)
ddc->hdmi = hdmi;
ddc->hw.init = &init;
+ ddc->pre_div = hdmi->variant->ddc_clk_pre_divider;
+ ddc->m_offset = hdmi->variant->ddc_clk_m_offset;
hdmi->ddc_clk = devm_clk_register(hdmi->dev, &ddc->hw);
if (IS_ERR(hdmi->ddc_clk))
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 3cf1a6932fac..dda904ec0534 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -20,14 +20,16 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_hdmi.h"
-#include "sun4i_tcon.h"
static inline struct sun4i_hdmi *
drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
@@ -83,8 +85,6 @@ static int sun4i_hdmi_atomic_check(struct drm_encoder *encoder,
static void sun4i_hdmi_disable(struct drm_encoder *encoder)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
u32 val;
DRM_DEBUG_DRIVER("Disabling the HDMI Output\n");
@@ -92,22 +92,16 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
-
- sun4i_tcon_channel_disable(tcon, 1);
}
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
u32 val = 0;
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
- sun4i_tcon_channel_enable(tcon, 1);
-
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
@@ -125,15 +119,9 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
unsigned int x, y;
u32 val;
- sun4i_tcon1_mode_set(tcon, mode);
- sun4i_tcon_set_mux(tcon, 1, encoder);
-
- clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
clk_set_rate(hdmi->mod_clk, mode->crtc_clock * 1000);
clk_set_rate(hdmi->tmds_clk, mode->crtc_clock * 1000);
@@ -141,6 +129,22 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
writel(SUN4I_HDMI_UNKNOWN_INPUT_SYNC,
hdmi->base + SUN4I_HDMI_UNKNOWN_REG);
+ /*
+ * Setup output pad (?) controls
+ *
+ * This is done here instead of at probe/bind time because
+ * the controller seems to toggle some of the bits on its own.
+ *
+ * We can't just initialize the register there, we need to
+ * protect the clock bits that have already been read out and
+ * cached by the clock framework.
+ */
+ val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
+ val &= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
+ val |= hdmi->variant->pad_ctrl1_init_val;
+ writel(val, hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
+ val = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
+
/* Setup timing registers */
writel(SUN4I_HDMI_VID_TIMING_X(mode->hdisplay) |
SUN4I_HDMI_VID_TIMING_Y(mode->vdisplay),
@@ -267,6 +271,176 @@ static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = {
};
#endif
+#define SUN4I_HDMI_PAD_CTRL1_MASK (GENMASK(24, 7) | GENMASK(5, 0))
+#define SUN4I_HDMI_PLL_CTRL_MASK (GENMASK(31, 8) | GENMASK(3, 0))
+
+/* Only difference from sun5i is AMP is 4 instead of 6 */
+static const struct sun4i_hdmi_variant sun4i_variant = {
+ .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
+ SUN4I_HDMI_PAD_CTRL0_CKEN |
+ SUN4I_HDMI_PAD_CTRL0_PWENG |
+ SUN4I_HDMI_PAD_CTRL0_PWEND |
+ SUN4I_HDMI_PAD_CTRL0_PWENC |
+ SUN4I_HDMI_PAD_CTRL0_LDODEN |
+ SUN4I_HDMI_PAD_CTRL0_LDOCEN |
+ SUN4I_HDMI_PAD_CTRL0_BIASEN,
+ .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(4) |
+ SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
+ SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
+ SUN4I_HDMI_PAD_CTRL1_REG_DEN |
+ SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
+ .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
+ SUN4I_HDMI_PLL_CTRL_CS(7) |
+ SUN4I_HDMI_PLL_CTRL_CP_S(15) |
+ SUN4I_HDMI_PLL_CTRL_S(7) |
+ SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
+ SUN4I_HDMI_PLL_CTRL_SDIV2 |
+ SUN4I_HDMI_PLL_CTRL_LDO2_EN |
+ SUN4I_HDMI_PLL_CTRL_LDO1_EN |
+ SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
+ SUN4I_HDMI_PLL_CTRL_BWS |
+ SUN4I_HDMI_PLL_CTRL_PLL_EN,
+
+ .ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
+ .ddc_clk_pre_divider = 2,
+ .ddc_clk_m_offset = 1,
+
+ .field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
+ .field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
+ .field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
+ .field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
+ .field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
+ .field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
+ .field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
+ .field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
+ .field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
+ .field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
+ .field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
+ .field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
+ .field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
+
+ .ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
+ .ddc_fifo_has_dir = true,
+};
+
+static const struct sun4i_hdmi_variant sun5i_variant = {
+ .pad_ctrl0_init_val = SUN4I_HDMI_PAD_CTRL0_TXEN |
+ SUN4I_HDMI_PAD_CTRL0_CKEN |
+ SUN4I_HDMI_PAD_CTRL0_PWENG |
+ SUN4I_HDMI_PAD_CTRL0_PWEND |
+ SUN4I_HDMI_PAD_CTRL0_PWENC |
+ SUN4I_HDMI_PAD_CTRL0_LDODEN |
+ SUN4I_HDMI_PAD_CTRL0_LDOCEN |
+ SUN4I_HDMI_PAD_CTRL0_BIASEN,
+ .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
+ SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
+ SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
+ SUN4I_HDMI_PAD_CTRL1_REG_DEN |
+ SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMP_OPT,
+ .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
+ SUN4I_HDMI_PLL_CTRL_CS(7) |
+ SUN4I_HDMI_PLL_CTRL_CP_S(15) |
+ SUN4I_HDMI_PLL_CTRL_S(7) |
+ SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
+ SUN4I_HDMI_PLL_CTRL_SDIV2 |
+ SUN4I_HDMI_PLL_CTRL_LDO2_EN |
+ SUN4I_HDMI_PLL_CTRL_LDO1_EN |
+ SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
+ SUN4I_HDMI_PLL_CTRL_BWS |
+ SUN4I_HDMI_PLL_CTRL_PLL_EN,
+
+ .ddc_clk_reg = REG_FIELD(SUN4I_HDMI_DDC_CLK_REG, 0, 6),
+ .ddc_clk_pre_divider = 2,
+ .ddc_clk_m_offset = 1,
+
+ .field_ddc_en = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 31, 31),
+ .field_ddc_start = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 30, 30),
+ .field_ddc_reset = REG_FIELD(SUN4I_HDMI_DDC_CTRL_REG, 0, 0),
+ .field_ddc_addr_reg = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 31),
+ .field_ddc_slave_addr = REG_FIELD(SUN4I_HDMI_DDC_ADDR_REG, 0, 6),
+ .field_ddc_int_status = REG_FIELD(SUN4I_HDMI_DDC_INT_STATUS_REG, 0, 8),
+ .field_ddc_fifo_clear = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 31, 31),
+ .field_ddc_fifo_rx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
+ .field_ddc_fifo_tx_thres = REG_FIELD(SUN4I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
+ .field_ddc_byte_count = REG_FIELD(SUN4I_HDMI_DDC_BYTE_COUNT_REG, 0, 9),
+ .field_ddc_cmd = REG_FIELD(SUN4I_HDMI_DDC_CMD_REG, 0, 2),
+ .field_ddc_sda_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 9, 9),
+ .field_ddc_sck_en = REG_FIELD(SUN4I_HDMI_DDC_LINE_CTRL_REG, 8, 8),
+
+ .ddc_fifo_reg = SUN4I_HDMI_DDC_FIFO_DATA_REG,
+ .ddc_fifo_has_dir = true,
+};
+
+static const struct sun4i_hdmi_variant sun6i_variant = {
+ .has_ddc_parent_clk = true,
+ .has_reset_control = true,
+ .pad_ctrl0_init_val = 0xff |
+ SUN4I_HDMI_PAD_CTRL0_TXEN |
+ SUN4I_HDMI_PAD_CTRL0_CKEN |
+ SUN4I_HDMI_PAD_CTRL0_PWENG |
+ SUN4I_HDMI_PAD_CTRL0_PWEND |
+ SUN4I_HDMI_PAD_CTRL0_PWENC |
+ SUN4I_HDMI_PAD_CTRL0_LDODEN |
+ SUN4I_HDMI_PAD_CTRL0_LDOCEN,
+ .pad_ctrl1_init_val = SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
+ SUN4I_HDMI_PAD_CTRL1_REG_EMP(4) |
+ SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
+ SUN4I_HDMI_PAD_CTRL1_REG_DEN |
+ SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_PWSDT |
+ SUN4I_HDMI_PAD_CTRL1_PWSCK |
+ SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
+ SUN4I_HDMI_PAD_CTRL1_AMP_OPT |
+ SUN4I_HDMI_PAD_CTRL1_UNKNOWN,
+ .pll_ctrl_init_val = SUN4I_HDMI_PLL_CTRL_VCO_S(8) |
+ SUN4I_HDMI_PLL_CTRL_CS(3) |
+ SUN4I_HDMI_PLL_CTRL_CP_S(10) |
+ SUN4I_HDMI_PLL_CTRL_S(4) |
+ SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) |
+ SUN4I_HDMI_PLL_CTRL_SDIV2 |
+ SUN4I_HDMI_PLL_CTRL_LDO2_EN |
+ SUN4I_HDMI_PLL_CTRL_LDO1_EN |
+ SUN4I_HDMI_PLL_CTRL_HV_IS_33 |
+ SUN4I_HDMI_PLL_CTRL_PLL_EN,
+
+ .ddc_clk_reg = REG_FIELD(SUN6I_HDMI_DDC_CLK_REG, 0, 6),
+ .ddc_clk_pre_divider = 1,
+ .ddc_clk_m_offset = 2,
+
+ .tmds_clk_div_offset = 1,
+
+ .field_ddc_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 0, 0),
+ .field_ddc_start = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 27, 27),
+ .field_ddc_reset = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 31, 31),
+ .field_ddc_addr_reg = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 31),
+ .field_ddc_slave_addr = REG_FIELD(SUN6I_HDMI_DDC_ADDR_REG, 1, 7),
+ .field_ddc_int_status = REG_FIELD(SUN6I_HDMI_DDC_INT_STATUS_REG, 0, 8),
+ .field_ddc_fifo_clear = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 18, 18),
+ .field_ddc_fifo_rx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 4, 7),
+ .field_ddc_fifo_tx_thres = REG_FIELD(SUN6I_HDMI_DDC_FIFO_CTRL_REG, 0, 3),
+ .field_ddc_byte_count = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 16, 25),
+ .field_ddc_cmd = REG_FIELD(SUN6I_HDMI_DDC_CMD_REG, 0, 2),
+ .field_ddc_sda_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 6, 6),
+ .field_ddc_sck_en = REG_FIELD(SUN6I_HDMI_DDC_CTRL_REG, 4, 4),
+
+ .ddc_fifo_reg = SUN6I_HDMI_DDC_FIFO_DATA_REG,
+ .ddc_fifo_thres_incl = true,
+};
+
+static const struct regmap_config sun4i_hdmi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x580,
+};
+
static int sun4i_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -285,6 +459,10 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drv = drv;
+ hdmi->variant = of_device_get_match_data(dev);
+ if (!hdmi->variant)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(hdmi->base)) {
@@ -292,10 +470,25 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->base);
}
+ if (hdmi->variant->has_reset_control) {
+ hdmi->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(hdmi->reset)) {
+ dev_err(dev, "Couldn't get the HDMI reset control\n");
+ return PTR_ERR(hdmi->reset);
+ }
+
+ ret = reset_control_deassert(hdmi->reset);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert HDMI reset\n");
+ return ret;
+ }
+ }
+
hdmi->bus_clk = devm_clk_get(dev, "ahb");
if (IS_ERR(hdmi->bus_clk)) {
dev_err(dev, "Couldn't get the HDMI bus clock\n");
- return PTR_ERR(hdmi->bus_clk);
+ ret = PTR_ERR(hdmi->bus_clk);
+ goto err_assert_reset;
}
clk_prepare_enable(hdmi->bus_clk);
@@ -321,45 +514,37 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_mod_clk;
}
+ hdmi->regmap = devm_regmap_init_mmio(dev, hdmi->base,
+ &sun4i_hdmi_regmap_config);
+ if (IS_ERR(hdmi->regmap)) {
+ dev_err(dev, "Couldn't create HDMI encoder regmap\n");
+ return PTR_ERR(hdmi->regmap);
+ }
+
ret = sun4i_tmds_create(hdmi);
if (ret) {
dev_err(dev, "Couldn't create the TMDS clock\n");
goto err_disable_mod_clk;
}
+ if (hdmi->variant->has_ddc_parent_clk) {
+ hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
+ if (IS_ERR(hdmi->ddc_parent_clk)) {
+ dev_err(dev, "Couldn't get the HDMI DDC clock\n");
+ return PTR_ERR(hdmi->ddc_parent_clk);
+ }
+ } else {
+ hdmi->ddc_parent_clk = hdmi->tmds_clk;
+ }
+
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
- writel(SUN4I_HDMI_PAD_CTRL0_TXEN | SUN4I_HDMI_PAD_CTRL0_CKEN |
- SUN4I_HDMI_PAD_CTRL0_PWENG | SUN4I_HDMI_PAD_CTRL0_PWEND |
- SUN4I_HDMI_PAD_CTRL0_PWENC | SUN4I_HDMI_PAD_CTRL0_LDODEN |
- SUN4I_HDMI_PAD_CTRL0_LDOCEN | SUN4I_HDMI_PAD_CTRL0_BIASEN,
+ writel(hdmi->variant->pad_ctrl0_init_val,
hdmi->base + SUN4I_HDMI_PAD_CTRL0_REG);
- /*
- * We can't just initialize the register there, we need to
- * protect the clock bits that have already been read out and
- * cached by the clock framework.
- */
- reg = readl(hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
- reg &= SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
- reg |= SUN4I_HDMI_PAD_CTRL1_REG_AMP(6) |
- SUN4I_HDMI_PAD_CTRL1_REG_EMP(2) |
- SUN4I_HDMI_PAD_CTRL1_REG_DENCK |
- SUN4I_HDMI_PAD_CTRL1_REG_DEN |
- SUN4I_HDMI_PAD_CTRL1_EMPCK_OPT |
- SUN4I_HDMI_PAD_CTRL1_EMP_OPT |
- SUN4I_HDMI_PAD_CTRL1_AMPCK_OPT |
- SUN4I_HDMI_PAD_CTRL1_AMP_OPT;
- writel(reg, hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
-
reg = readl(hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg &= SUN4I_HDMI_PLL_CTRL_DIV_MASK;
- reg |= SUN4I_HDMI_PLL_CTRL_VCO_S(8) | SUN4I_HDMI_PLL_CTRL_CS(7) |
- SUN4I_HDMI_PLL_CTRL_CP_S(15) | SUN4I_HDMI_PLL_CTRL_S(7) |
- SUN4I_HDMI_PLL_CTRL_VCO_GAIN(4) | SUN4I_HDMI_PLL_CTRL_SDIV2 |
- SUN4I_HDMI_PLL_CTRL_LDO2_EN | SUN4I_HDMI_PLL_CTRL_LDO1_EN |
- SUN4I_HDMI_PLL_CTRL_HV_IS_33 | SUN4I_HDMI_PLL_CTRL_BWS |
- SUN4I_HDMI_PLL_CTRL_PLL_EN;
+ reg |= hdmi->variant->pll_ctrl_init_val;
writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
ret = sun4i_hdmi_i2c_create(dev, hdmi);
@@ -429,6 +614,8 @@ err_disable_mod_clk:
clk_disable_unprepare(hdmi->mod_clk);
err_disable_bus_clk:
clk_disable_unprepare(hdmi->bus_clk);
+err_assert_reset:
+ reset_control_assert(hdmi->reset);
return ret;
}
@@ -463,7 +650,9 @@ static int sun4i_hdmi_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_hdmi_of_table[] = {
- { .compatible = "allwinner,sun5i-a10s-hdmi" },
+ { .compatible = "allwinner,sun4i-a10-hdmi", .data = &sun4i_variant, },
+ { .compatible = "allwinner,sun5i-a10s-hdmi", .data = &sun5i_variant, },
+ { .compatible = "allwinner,sun6i-a31-hdmi", .data = &sun6i_variant, },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_hdmi_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
index 2e42d09ab42e..58e9d37e8c17 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -25,8 +25,6 @@
/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */
#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX
-/* FIFO request bit is set when FIFO level is below TX_THRESHOLD during write */
-#define TX_THRESHOLD 1
static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
{
@@ -39,27 +37,36 @@ static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE;
u32 reg;
+ /*
+ * If threshold is inclusive, then the FIFO may only have
+ * RX_THRESHOLD number of bytes, instead of RX_THRESHOLD + 1.
+ */
+ int read_len = RX_THRESHOLD +
+ (hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
- /* Limit transfer length by FIFO threshold */
- len = min_t(int, len, read ? (RX_THRESHOLD + 1) :
- (SUN4I_HDMI_DDC_FIFO_SIZE - TX_THRESHOLD + 1));
+ /*
+ * Limit transfer length by FIFO threshold or FIFO size.
+ * For TX the threshold is for an empty FIFO.
+ */
+ len = min_t(int, len, read ? read_len : SUN4I_HDMI_DDC_FIFO_SIZE);
/* Wait until error, FIFO request bit set or transfer complete */
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG, reg,
- reg & mask, len * byte_time_ns, 100000))
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_int_status, reg,
+ reg & mask, len * byte_time_ns,
+ 100000))
return -ETIMEDOUT;
if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK)
return -EIO;
if (read)
- readsb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+ readsb(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
else
- writesb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+ writesb(hdmi->base + hdmi->variant->ddc_fifo_reg, buf, len);
- /* Clear FIFO request bit */
- writel(SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST,
- hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ /* Clear FIFO request bit by forcing a write to that bit */
+ regmap_field_force_write(hdmi->field_ddc_int_status,
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST);
return len;
}
@@ -70,50 +77,52 @@ static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
u32 reg;
/* Set FIFO direction */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
- reg |= (msg->flags & I2C_M_RD) ?
- SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
- SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
- writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ if (hdmi->variant->ddc_fifo_has_dir) {
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
+ reg |= (msg->flags & I2C_M_RD) ?
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
+ writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ }
+
+ /* Clear address register (not cleared by soft reset) */
+ regmap_field_write(hdmi->field_ddc_addr_reg, 0);
/* Set I2C address */
- writel(SUN4I_HDMI_DDC_ADDR_SLAVE(msg->addr),
- hdmi->base + SUN4I_HDMI_DDC_ADDR_REG);
-
- /* Set FIFO RX/TX thresholds and clear FIFO */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
- reg |= SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR;
- reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK;
- reg |= SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(RX_THRESHOLD);
- reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK;
- reg |= SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(TX_THRESHOLD);
- writel(reg, hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG,
- reg,
- !(reg & SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR),
- 100, 2000))
+ regmap_field_write(hdmi->field_ddc_slave_addr, msg->addr);
+
+ /*
+ * Set FIFO RX/TX thresholds and clear FIFO
+ *
+ * If threshold is inclusive, we can set the TX threshold to
+ * 0 instead of 1.
+ */
+ regmap_field_write(hdmi->field_ddc_fifo_tx_thres,
+ hdmi->variant->ddc_fifo_thres_incl ? 0 : 1);
+ regmap_field_write(hdmi->field_ddc_fifo_rx_thres, RX_THRESHOLD);
+ regmap_field_write(hdmi->field_ddc_fifo_clear, 1);
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_fifo_clear,
+ reg, !reg, 100, 2000))
return -EIO;
/* Set transfer length */
- writel(msg->len, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG);
+ regmap_field_write(hdmi->field_ddc_byte_count, msg->len);
/* Set command */
- writel(msg->flags & I2C_M_RD ?
- SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
- SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE,
- hdmi->base + SUN4I_HDMI_DDC_CMD_REG);
+ regmap_field_write(hdmi->field_ddc_cmd,
+ msg->flags & I2C_M_RD ?
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE);
- /* Clear interrupt status bits */
- writel(SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
- SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
- SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE,
- hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ /* Clear interrupt status bits by forcing a write */
+ regmap_field_force_write(hdmi->field_ddc_int_status,
+ SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
+ SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE);
/* Start command */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ regmap_field_write(hdmi->field_ddc_start, 1);
/* Transfer bytes */
for (i = 0; i < msg->len; i += len) {
@@ -124,14 +133,12 @@ static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
}
/* Wait for command to finish */
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG,
- reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD),
- 100, 100000))
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_start,
+ reg, !reg, 100, 100000))
return -EIO;
/* Check for errors */
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ regmap_field_read(hdmi->field_ddc_int_status, &reg);
if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) ||
!(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) {
return -EIO;
@@ -154,20 +161,21 @@ static int sun4i_hdmi_i2c_xfer(struct i2c_adapter *adap,
return -EINVAL;
}
+ /* DDC clock needs to be enabled for the module to work */
+ clk_prepare_enable(hdmi->ddc_clk);
+ clk_set_rate(hdmi->ddc_clk, 100000);
+
/* Reset I2C controller */
- writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_RESET),
- 100, 2000))
+ regmap_field_write(hdmi->field_ddc_en, 1);
+ regmap_field_write(hdmi->field_ddc_reset, 1);
+ if (regmap_field_read_poll_timeout(hdmi->field_ddc_reset,
+ reg, !reg, 100, 2000)) {
+ clk_disable_unprepare(hdmi->ddc_clk);
return -EIO;
+ }
- writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE |
- SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE,
- hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG);
-
- clk_prepare_enable(hdmi->ddc_clk);
- clk_set_rate(hdmi->ddc_clk, 100000);
+ regmap_field_write(hdmi->field_ddc_sck_en, 1);
+ regmap_field_write(hdmi->field_ddc_sda_en, 1);
for (i = 0; i < num; i++) {
err = xfer_msg(hdmi, &msgs[i]);
@@ -191,12 +199,105 @@ static const struct i2c_algorithm sun4i_hdmi_i2c_algorithm = {
.functionality = sun4i_hdmi_i2c_func,
};
+static int sun4i_hdmi_init_regmap_fields(struct sun4i_hdmi *hdmi)
+{
+ hdmi->field_ddc_en =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_en);
+ if (IS_ERR(hdmi->field_ddc_en))
+ return PTR_ERR(hdmi->field_ddc_en);
+
+ hdmi->field_ddc_start =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_start);
+ if (IS_ERR(hdmi->field_ddc_start))
+ return PTR_ERR(hdmi->field_ddc_start);
+
+ hdmi->field_ddc_reset =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_reset);
+ if (IS_ERR(hdmi->field_ddc_reset))
+ return PTR_ERR(hdmi->field_ddc_reset);
+
+ hdmi->field_ddc_addr_reg =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_addr_reg);
+ if (IS_ERR(hdmi->field_ddc_addr_reg))
+ return PTR_ERR(hdmi->field_ddc_addr_reg);
+
+ hdmi->field_ddc_slave_addr =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_slave_addr);
+ if (IS_ERR(hdmi->field_ddc_slave_addr))
+ return PTR_ERR(hdmi->field_ddc_slave_addr);
+
+ hdmi->field_ddc_int_mask =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_int_mask);
+ if (IS_ERR(hdmi->field_ddc_int_mask))
+ return PTR_ERR(hdmi->field_ddc_int_mask);
+
+ hdmi->field_ddc_int_status =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_int_status);
+ if (IS_ERR(hdmi->field_ddc_int_status))
+ return PTR_ERR(hdmi->field_ddc_int_status);
+
+ hdmi->field_ddc_fifo_clear =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_fifo_clear);
+ if (IS_ERR(hdmi->field_ddc_fifo_clear))
+ return PTR_ERR(hdmi->field_ddc_fifo_clear);
+
+ hdmi->field_ddc_fifo_rx_thres =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_fifo_rx_thres);
+ if (IS_ERR(hdmi->field_ddc_fifo_rx_thres))
+ return PTR_ERR(hdmi->field_ddc_fifo_rx_thres);
+
+ hdmi->field_ddc_fifo_tx_thres =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_fifo_tx_thres);
+ if (IS_ERR(hdmi->field_ddc_fifo_tx_thres))
+ return PTR_ERR(hdmi->field_ddc_fifo_tx_thres);
+
+ hdmi->field_ddc_byte_count =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_byte_count);
+ if (IS_ERR(hdmi->field_ddc_byte_count))
+ return PTR_ERR(hdmi->field_ddc_byte_count);
+
+ hdmi->field_ddc_cmd =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_cmd);
+ if (IS_ERR(hdmi->field_ddc_cmd))
+ return PTR_ERR(hdmi->field_ddc_cmd);
+
+ hdmi->field_ddc_sda_en =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_sda_en);
+ if (IS_ERR(hdmi->field_ddc_sda_en))
+ return PTR_ERR(hdmi->field_ddc_sda_en);
+
+ hdmi->field_ddc_sck_en =
+ devm_regmap_field_alloc(hdmi->dev, hdmi->regmap,
+ hdmi->variant->field_ddc_sck_en);
+ if (IS_ERR(hdmi->field_ddc_sck_en))
+ return PTR_ERR(hdmi->field_ddc_sck_en);
+
+ return 0;
+}
+
int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
{
struct i2c_adapter *adap;
int ret = 0;
- ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk);
+ ret = sun4i_ddc_create(hdmi, hdmi->ddc_parent_clk);
+ if (ret)
+ return ret;
+
+ ret = sun4i_hdmi_init_regmap_fields(hdmi);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index 5cf2527bffc8..dc332ea56f6c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -12,12 +12,13 @@
#include <linux/clk-provider.h>
-#include "sun4i_tcon.h"
#include "sun4i_hdmi.h"
struct sun4i_tmds {
struct clk_hw hw;
struct sun4i_hdmi *hdmi;
+
+ u8 div_offset;
};
static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw)
@@ -28,6 +29,7 @@ static inline struct sun4i_tmds *hw_to_tmds(struct clk_hw *hw)
static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
unsigned long parent_rate,
+ u8 div_offset,
u8 *div,
bool *half)
{
@@ -35,7 +37,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
u8 best_m = 0, m;
bool is_double;
- for (m = 1; m < 16; m++) {
+ for (m = div_offset ?: 1; m < (16 + div_offset); m++) {
u8 d;
for (d = 1; d < 3; d++) {
@@ -67,11 +69,12 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
static int sun4i_tmds_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_hw *parent;
+ struct sun4i_tmds *tmds = hw_to_tmds(hw);
+ struct clk_hw *parent = NULL;
unsigned long best_parent = 0;
unsigned long rate = req->rate;
int best_div = 1, best_half = 1;
- int i, j;
+ int i, j, p;
/*
* We only consider PLL3, since the TCON is very likely to be
@@ -79,32 +82,38 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw,
* clock, so we should not need to do anything.
*/
- parent = clk_hw_get_parent_by_index(hw, 0);
- if (!parent)
- return -EINVAL;
-
- for (i = 1; i < 3; i++) {
- for (j = 1; j < 16; j++) {
- unsigned long ideal = rate * i * j;
- unsigned long rounded;
-
- rounded = clk_hw_round_rate(parent, ideal);
-
- if (rounded == ideal) {
- best_parent = rounded;
- best_half = i;
- best_div = j;
- goto out;
- }
-
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
- best_parent = rounded;
- best_div = i;
+ for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
+ parent = clk_hw_get_parent_by_index(hw, p);
+ if (!parent)
+ continue;
+
+ for (i = 1; i < 3; i++) {
+ for (j = tmds->div_offset ?: 1;
+ j < (16 + tmds->div_offset); j++) {
+ unsigned long ideal = rate * i * j;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(parent, ideal);
+
+ if (rounded == ideal) {
+ best_parent = rounded;
+ best_half = i;
+ best_div = j;
+ goto out;
+ }
+
+ if (abs(rate - rounded / i) <
+ abs(rate - best_parent / best_div)) {
+ best_parent = rounded;
+ best_div = i;
+ }
}
}
}
+ if (!parent)
+ return -EINVAL;
+
out:
req->rate = best_parent / best_half / best_div;
req->best_parent_rate = best_parent;
@@ -124,7 +133,7 @@ static unsigned long sun4i_tmds_recalc_rate(struct clk_hw *hw,
parent_rate /= 2;
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
- reg = (reg >> 4) & 0xf;
+ reg = ((reg >> 4) & 0xf) + tmds->div_offset;
if (!reg)
reg = 1;
@@ -139,7 +148,8 @@ static int sun4i_tmds_set_rate(struct clk_hw *hw, unsigned long rate,
u32 reg;
u8 div;
- sun4i_tmds_calc_divider(rate, parent_rate, &div, &half);
+ sun4i_tmds_calc_divider(rate, parent_rate, tmds->div_offset,
+ &div, &half);
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PAD_CTRL1_REG);
reg &= ~SUN4I_HDMI_PAD_CTRL1_HALVE_CLK;
@@ -149,7 +159,7 @@ static int sun4i_tmds_set_rate(struct clk_hw *hw, unsigned long rate,
reg = readl(tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
reg &= ~SUN4I_HDMI_PLL_CTRL_DIV_MASK;
- writel(reg | SUN4I_HDMI_PLL_CTRL_DIV(div),
+ writel(reg | SUN4I_HDMI_PLL_CTRL_DIV(div - tmds->div_offset),
tmds->hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
return 0;
@@ -216,6 +226,7 @@ int sun4i_tmds_create(struct sun4i_hdmi *hdmi)
tmds->hdmi = hdmi;
tmds->hw.init = &init;
+ tmds->div_offset = hdmi->variant->tmds_clk_div_offset;
hdmi->tmds_clk = devm_clk_register(hdmi->dev, &tmds->hw);
if (IS_ERR(hdmi->tmds_clk))
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 7cd7090ad63a..832f8f9bc47f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -134,13 +134,10 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel))
+ if (!IS_ERR(tcon->panel)) {
drm_panel_prepare(tcon->panel);
-
- sun4i_tcon_channel_enable(tcon, 0);
-
- if (!IS_ERR(tcon->panel))
drm_panel_enable(tcon->panel);
+ }
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
@@ -150,31 +147,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (!IS_ERR(tcon->panel))
+ if (!IS_ERR(tcon->panel)) {
drm_panel_disable(tcon->panel);
-
- sun4i_tcon_channel_disable(tcon, 0);
-
- if (!IS_ERR(tcon->panel))
drm_panel_unprepare(tcon->panel);
-}
-
-static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
-
- sun4i_tcon0_mode_set(tcon, mode);
- sun4i_tcon_set_mux(tcon, 0, encoder);
-
- /* FIXME: This seems to be board specific */
- clk_set_phase(tcon->dclk, 120);
+ }
}
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
- .mode_set = sun4i_rgb_encoder_mode_set,
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index d9791292553e..e122f5b2a395 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -14,9 +14,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <uapi/drm/drm_mode.h>
+
#include <linux/component.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
@@ -32,66 +35,61 @@
#include "sun4i_tcon.h"
#include "sunxi_engine.h"
-void sun4i_tcon_disable(struct sun4i_tcon *tcon)
-{
- DRM_DEBUG_DRIVER("Disabling TCON\n");
-
- /* Disable the TCON */
- regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
- SUN4I_TCON_GCTL_TCON_ENABLE, 0);
-}
-EXPORT_SYMBOL(sun4i_tcon_disable);
-
-void sun4i_tcon_enable(struct sun4i_tcon *tcon)
+static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
+ bool enabled)
{
- DRM_DEBUG_DRIVER("Enabling TCON\n");
-
- /* Enable the TCON */
- regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
- SUN4I_TCON_GCTL_TCON_ENABLE,
- SUN4I_TCON_GCTL_TCON_ENABLE);
-}
-EXPORT_SYMBOL(sun4i_tcon_enable);
+ struct clk *clk;
-void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
-{
- DRM_DEBUG_DRIVER("Disabling TCON channel %d\n", channel);
-
- /* Disable the TCON's channel */
- if (channel == 0) {
+ switch (channel) {
+ case 0:
regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
- SUN4I_TCON0_CTL_TCON_ENABLE, 0);
- clk_disable_unprepare(tcon->dclk);
+ SUN4I_TCON0_CTL_TCON_ENABLE,
+ enabled ? SUN4I_TCON0_CTL_TCON_ENABLE : 0);
+ clk = tcon->dclk;
+ break;
+ case 1:
+ WARN_ON(!tcon->quirks->has_channel_1);
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_TCON_ENABLE,
+ enabled ? SUN4I_TCON1_CTL_TCON_ENABLE : 0);
+ clk = tcon->sclk1;
+ break;
+ default:
+ DRM_WARN("Unknown channel... doing nothing\n");
return;
}
- WARN_ON(!tcon->quirks->has_channel_1);
- regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
- SUN4I_TCON1_CTL_TCON_ENABLE, 0);
- clk_disable_unprepare(tcon->sclk1);
+ if (enabled)
+ clk_prepare_enable(clk);
+ else
+ clk_disable_unprepare(clk);
}
-EXPORT_SYMBOL(sun4i_tcon_channel_disable);
-void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
+void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ bool enabled)
{
- DRM_DEBUG_DRIVER("Enabling TCON channel %d\n", channel);
-
- /* Enable the TCON's channel */
- if (channel == 0) {
- regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
- SUN4I_TCON0_CTL_TCON_ENABLE,
- SUN4I_TCON0_CTL_TCON_ENABLE);
- clk_prepare_enable(tcon->dclk);
+ int channel;
+
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_NONE:
+ channel = 0;
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ case DRM_MODE_ENCODER_TVDAC:
+ channel = 1;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
return;
}
- WARN_ON(!tcon->quirks->has_channel_1);
- regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
- SUN4I_TCON1_CTL_TCON_ENABLE,
- SUN4I_TCON1_CTL_TCON_ENABLE);
- clk_prepare_enable(tcon->sclk1);
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_TCON_ENABLE,
+ enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0);
+
+ sun4i_tcon_channel_set_status(tcon, channel, enabled);
}
-EXPORT_SYMBOL(sun4i_tcon_channel_enable);
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
{
@@ -109,30 +107,40 @@ void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
}
EXPORT_SYMBOL(sun4i_tcon_enable_vblank);
-void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
- struct drm_encoder *encoder)
+/*
+ * This function is a helper for TCON output muxing. The TCON output
+ * muxing control register in earlier SoCs (without the TCON TOP block)
+ * are located in TCON0. This helper returns a pointer to TCON0's
+ * sun4i_tcon structure, or NULL if not found.
+ */
+static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
{
- u32 val;
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_tcon *tcon;
- if (!tcon->quirks->has_unknown_mux)
- return;
+ list_for_each_entry(tcon, &drv->tcon_list, list)
+ if (tcon->id == 0)
+ return tcon;
- if (channel != 1)
- return;
+ dev_warn(drm->dev,
+ "TCON0 not found, display output muxing may not work\n");
- if (encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
- val = 1;
- else
- val = 0;
+ return NULL;
+}
- /*
- * FIXME: Undocumented bits
- */
- regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, val);
+void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
+ const struct drm_encoder *encoder)
+{
+ int ret = -ENOTSUPP;
+
+ if (tcon->quirks->set_mux)
+ ret = tcon->quirks->set_mux(tcon, encoder);
+
+ DRM_DEBUG_DRIVER("Muxing encoder %s to CRTC %s: %d\n",
+ encoder->name, encoder->crtc->name, ret);
}
-EXPORT_SYMBOL(sun4i_tcon_set_mux);
-static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
+static int sun4i_tcon_get_clk_delay(const struct drm_display_mode *mode,
int channel)
{
int delay = mode->vtotal - mode->vdisplay;
@@ -150,15 +158,26 @@ static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
return delay;
}
-void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode)
+static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
+ const struct drm_display_mode *mode)
+{
+ /* Configure the dot clock */
+ clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+
+ /* Set the resolution */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
+ SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
+ SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
+}
+
+static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ const struct drm_display_mode *mode)
{
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
- /* Configure the dot clock */
- clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+ sun4i_tcon0_mode_set_common(tcon, mode);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -166,11 +185,6 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON0_CTL_CLK_DELAY_MASK,
SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
- /* Set the resolution */
- regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
- SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
- SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
-
/*
* This is called a backporch in the register documentation,
* but it really is the back porch + hsync
@@ -224,10 +238,9 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
/* Enable the output on the pins */
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0);
}
-EXPORT_SYMBOL(sun4i_tcon0_mode_set);
-void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode)
+static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
+ const struct drm_display_mode *mode)
{
unsigned int bp, hsync, vsync, vtotal;
u8 clk_delay;
@@ -315,7 +328,26 @@ void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON_GCTL_IOMAP_MASK,
SUN4I_TCON_GCTL_IOMAP_TCON1);
}
-EXPORT_SYMBOL(sun4i_tcon1_mode_set);
+
+void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_NONE:
+ sun4i_tcon0_mode_set_rgb(tcon, mode);
+ sun4i_tcon_set_mux(tcon, 0, encoder);
+ break;
+ case DRM_MODE_ENCODER_TVDAC:
+ case DRM_MODE_ENCODER_TMDS:
+ sun4i_tcon1_mode_set(tcon, mode);
+ sun4i_tcon_set_mux(tcon, 1, encoder);
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown encoder type, doing nothing...\n");
+ }
+}
+EXPORT_SYMBOL(sun4i_tcon_mode_set);
static void sun4i_tcon_finish_page_flip(struct drm_device *dev,
struct sun4i_crtc *scrtc)
@@ -463,42 +495,170 @@ static int sun4i_tcon_init_regmap(struct device *dev,
* function in fact searches the corresponding engine, and the ID is
* requested via the get_id function of the engine.
*/
-static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
- struct device_node *node)
+static struct sunxi_engine *
+sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
+ struct device_node *node)
{
struct device_node *port, *ep, *remote;
- struct sunxi_engine *engine;
+ struct sunxi_engine *engine = ERR_PTR(-EINVAL);
port = of_graph_get_port_by_id(node, 0);
if (!port)
return ERR_PTR(-EINVAL);
+ /*
+ * This only works if there is only one path from the TCON
+ * to any display engine. Otherwise the probe order of the
+ * TCONs and display engines is not guaranteed. They may
+ * either bind to the wrong one, or worse, bind to the same
+ * one if additional checks are not done.
+ *
+ * Bail out if there are multiple input connections.
+ */
+ if (of_get_available_child_count(port) != 1)
+ goto out_put_port;
+
+ /* Get the first connection without specifying an ID */
+ ep = of_get_next_available_child(port, NULL);
+ if (!ep)
+ goto out_put_port;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote)
+ goto out_put_ep;
+
+ /* does this node match any registered engines? */
+ list_for_each_entry(engine, &drv->engine_list, list)
+ if (remote == engine->node)
+ goto out_put_remote;
+
+ /* keep looking through upstream ports */
+ engine = sun4i_tcon_find_engine_traverse(drv, remote);
+
+out_put_remote:
+ of_node_put(remote);
+out_put_ep:
+ of_node_put(ep);
+out_put_port:
+ of_node_put(port);
+
+ return engine;
+}
+
+/*
+ * The device tree binding says that the remote endpoint ID of any
+ * connection between components, up to and including the TCON, of
+ * the display pipeline should be equal to the actual ID of the local
+ * component. Thus we can look at any one of the input connections of
+ * the TCONs, and use that connection's remote endpoint ID as our own.
+ *
+ * Since the user of this function already finds the input port,
+ * the port is passed in directly without further checks.
+ */
+static int sun4i_tcon_of_get_id_from_port(struct device_node *port)
+{
+ struct device_node *ep;
+ int ret = -EINVAL;
+
+ /* try finding an upstream endpoint */
for_each_available_child_of_node(port, ep) {
- remote = of_graph_get_remote_port_parent(ep);
+ struct device_node *remote;
+ u32 reg;
+
+ remote = of_graph_get_remote_endpoint(ep);
if (!remote)
continue;
- /* does this node match any registered engines? */
- list_for_each_entry(engine, &drv->engine_list, list) {
- if (remote == engine->node) {
- of_node_put(remote);
- of_node_put(port);
- return engine;
- }
- }
+ ret = of_property_read_u32(remote, "reg", &reg);
+ if (ret)
+ continue;
- /* keep looking through upstream ports */
- engine = sun4i_tcon_find_engine(drv, remote);
- if (!IS_ERR(engine)) {
- of_node_put(remote);
- of_node_put(port);
- return engine;
- }
+ ret = reg;
}
+ return ret;
+}
+
+/*
+ * Once we know the TCON's id, we can look through the list of
+ * engines to find a matching one. We assume all engines have
+ * been probed and added to the list.
+ */
+static struct sunxi_engine *sun4i_tcon_get_engine_by_id(struct sun4i_drv *drv,
+ int id)
+{
+ struct sunxi_engine *engine;
+
+ list_for_each_entry(engine, &drv->engine_list, list)
+ if (engine->id == id)
+ return engine;
+
return ERR_PTR(-EINVAL);
}
+/*
+ * On SoCs with the old display pipeline design (Display Engine 1.0),
+ * we assumed the TCON was always tied to just one backend. However
+ * this proved not to be the case. On the A31, the TCON can select
+ * either backend as its source. On the A20 (and likely on the A10),
+ * the backend can choose which TCON to output to.
+ *
+ * The device tree binding says that the remote endpoint ID of any
+ * connection between components, up to and including the TCON, of
+ * the display pipeline should be equal to the actual ID of the local
+ * component. Thus we should be able to look at any one of the input
+ * connections of the TCONs, and use that connection's remote endpoint
+ * ID as our own.
+ *
+ * However the connections between the backend and TCON were assumed
+ * to be always singular, and their endpoit IDs were all incorrectly
+ * set to 0. This means for these old device trees, we cannot just look
+ * up the remote endpoint ID of a TCON input endpoint. TCON1 would be
+ * incorrectly identified as TCON0.
+ *
+ * This function first checks if the TCON node has 2 input endpoints.
+ * If so, then the device tree is a corrected version, and it will use
+ * sun4i_tcon_of_get_id() and sun4i_tcon_get_engine_by_id() from above
+ * to fetch the ID and engine directly. If not, then it is likely an
+ * old device trees, where the endpoint IDs were incorrect, but did not
+ * have endpoint connections between the backend and TCON across
+ * different display pipelines. It will fall back to the old method of
+ * traversing the of_graph to try and find a matching engine by device
+ * node.
+ *
+ * In the case of single display pipeline device trees, either method
+ * works.
+ */
+static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
+ struct device_node *node)
+{
+ struct device_node *port;
+ struct sunxi_engine *engine;
+
+ port = of_graph_get_port_by_id(node, 0);
+ if (!port)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Is this a corrected device tree with cross pipeline
+ * connections between the backend and TCON?
+ */
+ if (of_get_child_count(port) > 1) {
+ /* Get our ID directly from an upstream endpoint */
+ int id = sun4i_tcon_of_get_id_from_port(port);
+
+ /* Get our engine by matching our ID */
+ engine = sun4i_tcon_get_engine_by_id(drv, id);
+
+ of_node_put(port);
+ return engine;
+ }
+
+ /* Fallback to old method by traversing input endpoints */
+ of_node_put(port);
+ return sun4i_tcon_find_engine_traverse(drv, node);
+}
+
static int sun4i_tcon_bind(struct device *dev, struct device *master,
void *data)
{
@@ -530,10 +690,7 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
}
/* Make sure our TCON is reset */
- if (!reset_control_status(tcon->lcd_rst))
- reset_control_assert(tcon->lcd_rst);
-
- ret = reset_control_deassert(tcon->lcd_rst);
+ ret = reset_control_reset(tcon->lcd_rst);
if (ret) {
dev_err(dev, "Couldn't deassert our reset line\n");
return ret;
@@ -574,6 +731,25 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
if (ret < 0)
goto err_free_clocks;
+ if (tcon->quirks->needs_de_be_mux) {
+ /*
+ * We assume there is no dynamic muxing of backends
+ * and TCONs, so we select the backend with same ID.
+ *
+ * While dynamic selection might be interesting, since
+ * the CRTC is tied to the TCON, while the layers are
+ * tied to the backends, this means, we will need to
+ * switch between groups of layers. There might not be
+ * a way to represent this constraint in DRM.
+ */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+ SUN4I_TCON0_CTL_SRC_SEL_MASK,
+ tcon->id);
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_SRC_SEL_MASK,
+ tcon->id);
+ }
+
list_add_tail(&tcon->list, &drv->tcon_list);
return 0;
@@ -623,17 +799,97 @@ static int sun4i_tcon_remove(struct platform_device *pdev)
return 0;
}
+/* platform specific TCON muxing callbacks */
+static int sun4i_a10_tcon_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
+ u32 shift;
+
+ if (!tcon0)
+ return -EINVAL;
+
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_TMDS:
+ /* HDMI */
+ shift = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
+ 0x3 << shift, tcon->id << shift);
+
+ return 0;
+}
+
+static int sun5i_a13_tcon_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ u32 val;
+
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
+ val = 1;
+ else
+ val = 0;
+
+ /*
+ * FIXME: Undocumented bits
+ */
+ return regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, val);
+}
+
+static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ struct sun4i_tcon *tcon0 = sun4i_get_tcon0(encoder->dev);
+ u32 shift;
+
+ if (!tcon0)
+ return -EINVAL;
+
+ switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_TMDS:
+ /* HDMI */
+ shift = 8;
+ break;
+ default:
+ /* TODO A31 has MIPI DSI but A31s does not */
+ return -EINVAL;
+ }
+
+ regmap_update_bits(tcon0->regs, SUN4I_TCON_MUX_CTRL_REG,
+ 0x3 << shift, tcon->id << shift);
+
+ return 0;
+}
+
+static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
+ .has_channel_1 = true,
+ .set_mux = sun4i_a10_tcon_set_mux,
+};
+
static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
- .has_unknown_mux = true,
- .has_channel_1 = true,
+ .has_channel_1 = true,
+ .set_mux = sun5i_a13_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
- .has_channel_1 = true,
+ .has_channel_1 = true,
+ .needs_de_be_mux = true,
+ .set_mux = sun6i_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
- .has_channel_1 = true,
+ .has_channel_1 = true,
+ .needs_de_be_mux = true,
+};
+
+static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
+ .has_channel_1 = true,
+ /* Same display pipeline structure as A10 */
+ .set_mux = sun4i_a10_tcon_set_mux,
};
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
@@ -645,9 +901,11 @@ static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
};
static const struct of_device_id sun4i_tcon_of_table[] = {
+ { .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks },
{ .compatible = "allwinner,sun5i-a13-tcon", .data = &sun5i_a13_quirks },
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 552c88ec16be..f61bf6d83b4a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -37,6 +37,7 @@
#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31)
#define SUN4I_TCON0_CTL_CLK_DELAY_MASK GENMASK(8, 4)
#define SUN4I_TCON0_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK)
+#define SUN4I_TCON0_CTL_SRC_SEL_MASK GENMASK(2, 0)
#define SUN4I_TCON0_DCLK_REG 0x44
#define SUN4I_TCON0_DCLK_GATE_BIT (31)
@@ -85,6 +86,7 @@
#define SUN4I_TCON1_CTL_INTERLACE_ENABLE BIT(20)
#define SUN4I_TCON1_CTL_CLK_DELAY_MASK GENMASK(8, 4)
#define SUN4I_TCON1_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON1_CTL_CLK_DELAY_MASK)
+#define SUN4I_TCON1_CTL_SRC_SEL_MASK GENMASK(1, 0)
#define SUN4I_TCON1_BASIC0_REG 0x94
#define SUN4I_TCON1_BASIC0_X(width) ((((width) - 1) & 0xfff) << 16)
@@ -143,9 +145,14 @@
#define SUN4I_TCON_MAX_CHANNELS 2
+struct sun4i_tcon;
+
struct sun4i_tcon_quirks {
- bool has_unknown_mux; /* sun5i has undocumented mux */
bool has_channel_1; /* a33 does not have channel 1 */
+ bool needs_de_be_mux; /* sun6i needs mux to select backend */
+
+ /* callback to handle tcon muxing options */
+ int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
};
struct sun4i_tcon {
@@ -183,22 +190,11 @@ struct sun4i_tcon {
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
struct drm_panel *sun4i_tcon_find_panel(struct device_node *node);
-/* Global Control */
-void sun4i_tcon_disable(struct sun4i_tcon *tcon);
-void sun4i_tcon_enable(struct sun4i_tcon *tcon);
-
-/* Channel Control */
-void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel);
-void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel);
-
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
-
-/* Mode Related Controls */
-void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
- struct drm_encoder *encoder);
-void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode);
-void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
- struct drm_display_mode *mode);
+void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ const struct drm_display_mode *mode);
+void sun4i_tcon_set_status(struct sun4i_tcon *crtc,
+ const struct drm_encoder *encoder, bool enable);
#endif /* __SUN4I_TCON_H__ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 050cfd43c7a0..b070d522ed8d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -24,7 +24,6 @@
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
-#include "sun4i_tcon.h"
#include "sunxi_engine.h"
#define SUN4I_TVE_EN_REG 0x000
@@ -345,12 +344,9 @@ static void sun4i_tv_disable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
DRM_DEBUG_DRIVER("Disabling the TV Output\n");
- sun4i_tcon_channel_disable(tcon, 1);
-
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
0);
@@ -362,7 +358,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
@@ -371,8 +366,6 @@ static void sun4i_tv_enable(struct drm_encoder *encoder)
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
SUN4I_TVE_EN_ENABLE);
-
- sun4i_tcon_channel_enable(tcon, 1);
}
static void sun4i_tv_mode_set(struct drm_encoder *encoder,
@@ -380,13 +373,8 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
- struct sun4i_tcon *tcon = crtc->tcon;
const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
- sun4i_tcon1_mode_set(tcon, mode);
- sun4i_tcon_set_mux(tcon, 1, encoder);
-
/* Enable and map the DAC to the output */
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_DAC_MAP_MASK,
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index dc58ab140151..cf54847a8bd1 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,6 +9,7 @@ config DRM_TEGRA
select DRM_PANEL
select TEGRA_HOST1X
select IOMMU_IOVA if IOMMU_SUPPORT
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 4df39112e38e..24a5ef4f5bb8 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/iommu.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -23,16 +24,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
-struct tegra_dc_soc_info {
- bool supports_border_color;
- bool supports_interlacing;
- bool supports_cursor;
- bool supports_block_linear;
- unsigned int pitch_align;
- bool has_powergate;
- bool broken_reset;
-};
-
struct tegra_plane {
struct drm_plane base;
unsigned int index;
@@ -559,14 +550,21 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
return 0;
}
-static void tegra_dc_disable_window(struct tegra_dc *dc, int index)
+static void tegra_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
+ struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
+ struct tegra_plane *p = to_tegra_plane(plane);
unsigned long flags;
u32 value;
+ /* rien ne va plus */
+ if (!old_state || !old_state->crtc)
+ return;
+
spin_lock_irqsave(&dc->lock, flags);
- value = WINDOW_A_SELECT << index;
+ value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
@@ -591,7 +589,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
return;
if (!plane->state->visible)
- return tegra_dc_disable_window(dc, p->index);
+ return tegra_plane_atomic_disable(plane, old_state);
memset(&window, 0, sizeof(window));
window.src.x = plane->state->src.x1 >> 16;
@@ -627,25 +625,10 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
tegra_dc_setup_window(dc, p->index, &window);
}
-static void tegra_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_dc *dc;
-
- /* rien ne va plus */
- if (!old_state || !old_state->crtc)
- return;
-
- dc = to_tegra_dc(old_state->crtc);
-
- tegra_dc_disable_window(dc, p->index);
-}
-
-static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = {
+static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
.atomic_check = tegra_plane_atomic_check,
- .atomic_update = tegra_plane_atomic_update,
.atomic_disable = tegra_plane_atomic_disable,
+ .atomic_update = tegra_plane_atomic_update,
};
static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
@@ -685,7 +668,7 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
return ERR_PTR(err);
}
- drm_plane_helper_add(&plane->base, &tegra_primary_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
return &plane->base;
}
@@ -880,12 +863,6 @@ static const uint32_t tegra_overlay_plane_formats[] = {
DRM_FORMAT_YUV422,
};
-static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = {
- .atomic_check = tegra_plane_atomic_check,
- .atomic_update = tegra_plane_atomic_update,
- .atomic_disable = tegra_plane_atomic_disable,
-};
-
static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int index)
@@ -913,7 +890,7 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
return ERR_PTR(err);
}
- drm_plane_helper_add(&plane->base, &tegra_overlay_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &tegra_plane_helper_funcs);
return &plane->base;
}
@@ -1161,6 +1138,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+ err = clk_set_rate(dc->clk, state->pclk);
+ if (err < 0)
+ dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+ dc->clk, state->pclk, err);
}
static void tegra_dc_stop(struct tegra_dc *dc)
@@ -1756,7 +1738,7 @@ static int tegra_dc_init(struct host1x_client *client)
struct drm_plane *cursor = NULL;
int err;
- dc->syncpt = host1x_syncpt_request(dc->dev, flags);
+ dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
@@ -1985,7 +1967,6 @@ static int tegra_dc_parse_dt(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -1994,14 +1975,11 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (!dc)
return -ENOMEM;
- id = of_match_node(tegra_dc_of_match, pdev->dev.of_node);
- if (!id)
- return -ENODEV;
+ dc->soc = of_device_get_match_data(&pdev->dev);
spin_lock_init(&dc->lock);
INIT_LIST_HEAD(&dc->list);
dc->dev = &pdev->dev;
- dc->soc = id->data;
err = tegra_dc_parse_dt(dc);
if (err < 0)
@@ -2019,8 +1997,22 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->rst);
}
- if (!dc->soc->broken_reset)
- reset_control_assert(dc->rst);
+ /* assert reset and disable clock */
+ if (!dc->soc->broken_reset) {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
+
+ clk_disable_unprepare(dc->clk);
+ }
if (dc->soc->has_powergate) {
if (dc->pipe == 0)
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 4a268635749b..cb100b6e3282 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -10,6 +10,126 @@
#ifndef TEGRA_DC_H
#define TEGRA_DC_H 1
+#include <linux/host1x.h>
+
+#include <drm/drm_crtc.h>
+
+#include "drm.h"
+
+struct tegra_output;
+
+struct tegra_dc_stats {
+ unsigned long frames;
+ unsigned long vblank;
+ unsigned long underflow;
+ unsigned long overflow;
+};
+
+struct tegra_dc_soc_info {
+ bool supports_border_color;
+ bool supports_interlacing;
+ bool supports_cursor;
+ bool supports_block_linear;
+ unsigned int pitch_align;
+ bool has_powergate;
+ bool broken_reset;
+};
+
+struct tegra_dc {
+ struct host1x_client client;
+ struct host1x_syncpt *syncpt;
+ struct device *dev;
+ spinlock_t lock;
+
+ struct drm_crtc base;
+ unsigned int powergate;
+ int pipe;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *regs;
+ int irq;
+
+ struct tegra_output *rgb;
+
+ struct tegra_dc_stats stats;
+ struct list_head list;
+
+ struct drm_info_list *debugfs_files;
+ struct drm_minor *minor;
+ struct dentry *debugfs;
+
+ /* page-flip handling */
+ struct drm_pending_vblank_event *event;
+
+ const struct tegra_dc_soc_info *soc;
+
+ struct iommu_domain *domain;
+};
+
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+ return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
+ unsigned int offset)
+{
+ trace_dc_writel(dc->dev, offset, value);
+ writel(value, dc->regs + (offset << 2));
+}
+
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset)
+{
+ u32 value = readl(dc->regs + (offset << 2));
+
+ trace_dc_readl(dc->dev, offset, value);
+
+ return value;
+}
+
+struct tegra_dc_window {
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } src;
+ struct {
+ unsigned int x;
+ unsigned int y;
+ unsigned int w;
+ unsigned int h;
+ } dst;
+ unsigned int bits_per_pixel;
+ unsigned int stride[2];
+ unsigned long base[3];
+ bool bottom_up;
+
+ struct tegra_bo_tiling tiling;
+ u32 format;
+ u32 swap;
+};
+
+/* from dc.c */
+void tegra_dc_commit(struct tegra_dc *dc);
+int tegra_dc_state_setup_clock(struct tegra_dc *dc,
+ struct drm_crtc_state *crtc_state,
+ struct clk *clk, unsigned long pclk,
+ unsigned int div);
+
+/* from rgb.c */
+int tegra_dc_rgb_probe(struct tegra_dc *dc);
+int tegra_dc_rgb_remove(struct tegra_dc *dc);
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
#define SYNCPT_CNTRL_NO_STALL (1 << 8)
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 597d563d636a..52552b9b89ef 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order,
- carveout_end >> order);
+ carveout_start >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
@@ -386,12 +385,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_cmdbufs = args->num_cmdbufs;
unsigned int num_relocs = args->num_relocs;
unsigned int num_waitchks = args->num_waitchks;
- struct drm_tegra_cmdbuf __user *cmdbufs =
- (void __user *)(uintptr_t)args->cmdbufs;
- struct drm_tegra_reloc __user *relocs =
- (void __user *)(uintptr_t)args->relocs;
- struct drm_tegra_waitchk __user *waitchks =
- (void __user *)(uintptr_t)args->waitchks;
+ struct drm_tegra_cmdbuf __user *user_cmdbufs;
+ struct drm_tegra_reloc __user *user_relocs;
+ struct drm_tegra_waitchk __user *user_waitchks;
+ struct drm_tegra_syncpt __user *user_syncpt;
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_gem_object **refs;
@@ -400,6 +397,11 @@ int tegra_drm_submit(struct tegra_drm_context *context,
unsigned int num_refs;
int err;
+ user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
+ user_relocs = u64_to_user_ptr(args->relocs);
+ user_waitchks = u64_to_user_ptr(args->waitchks);
+ user_syncpt = u64_to_user_ptr(args->syncpts);
+
/* We don't yet support other than one syncpt_incr struct per submit */
if (args->num_syncpts != 1)
return -EINVAL;
@@ -440,7 +442,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct tegra_bo *obj;
u64 offset;
- if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
+ if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
err = -EFAULT;
goto fail;
}
@@ -476,7 +478,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
num_cmdbufs--;
- cmdbufs++;
+ user_cmdbufs++;
}
/* copy and resolve relocations from submit */
@@ -485,7 +487,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct tegra_bo *obj;
err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
- &relocs[num_relocs], drm,
+ &user_relocs[num_relocs], drm,
file);
if (err < 0)
goto fail;
@@ -519,9 +521,8 @@ int tegra_drm_submit(struct tegra_drm_context *context,
struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
struct tegra_bo *obj;
- err = host1x_waitchk_copy_from_user(wait,
- &waitchks[num_waitchks],
- file);
+ err = host1x_waitchk_copy_from_user(
+ wait, &user_waitchks[num_waitchks], file);
if (err < 0)
goto fail;
@@ -539,8 +540,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
}
- if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
- sizeof(syncpt))) {
+ if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
err = -EFAULT;
goto fail;
}
@@ -1317,6 +1317,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
+ { .compatible = "nvidia,tegra186-vic", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 063f5d397526..ddae331ad8b6 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -119,104 +119,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
dma_addr_t iova);
-struct tegra_dc_soc_info;
-struct tegra_output;
-
-struct tegra_dc_stats {
- unsigned long frames;
- unsigned long vblank;
- unsigned long underflow;
- unsigned long overflow;
-};
-
-struct tegra_dc {
- struct host1x_client client;
- struct host1x_syncpt *syncpt;
- struct device *dev;
- spinlock_t lock;
-
- struct drm_crtc base;
- unsigned int powergate;
- int pipe;
-
- struct clk *clk;
- struct reset_control *rst;
- void __iomem *regs;
- int irq;
-
- struct tegra_output *rgb;
-
- struct tegra_dc_stats stats;
- struct list_head list;
-
- struct drm_info_list *debugfs_files;
- struct drm_minor *minor;
- struct dentry *debugfs;
-
- /* page-flip handling */
- struct drm_pending_vblank_event *event;
-
- const struct tegra_dc_soc_info *soc;
-
- struct iommu_domain *domain;
-};
-
-static inline struct tegra_dc *
-host1x_client_to_dc(struct host1x_client *client)
-{
- return container_of(client, struct tegra_dc, client);
-}
-
-static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
-{
- return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
-}
-
-static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
- unsigned int offset)
-{
- trace_dc_writel(dc->dev, offset, value);
- writel(value, dc->regs + (offset << 2));
-}
-
-static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset)
-{
- u32 value = readl(dc->regs + (offset << 2));
-
- trace_dc_readl(dc->dev, offset, value);
-
- return value;
-}
-
-struct tegra_dc_window {
- struct {
- unsigned int x;
- unsigned int y;
- unsigned int w;
- unsigned int h;
- } src;
- struct {
- unsigned int x;
- unsigned int y;
- unsigned int w;
- unsigned int h;
- } dst;
- unsigned int bits_per_pixel;
- unsigned int stride[2];
- unsigned long base[3];
- bool bottom_up;
-
- struct tegra_bo_tiling tiling;
- u32 format;
- u32 swap;
-};
-
-/* from dc.c */
-void tegra_dc_commit(struct tegra_dc *dc);
-int tegra_dc_state_setup_clock(struct tegra_dc *dc,
- struct drm_crtc_state *crtc_state,
- struct clk *clk, unsigned long pclk,
- unsigned int div);
+struct cec_notifier;
struct tegra_output {
struct device_node *of_node;
@@ -225,6 +128,7 @@ struct tegra_output {
struct drm_panel *panel;
struct i2c_adapter *ddc;
const struct edid *edid;
+ struct cec_notifier *notifier;
unsigned int hpd_irq;
int hpd_gpio;
enum of_gpio_flags hpd_gpio_flags;
@@ -243,12 +147,6 @@ static inline struct tegra_output *connector_to_output(struct drm_connector *c)
return container_of(c, struct tegra_output, connector);
}
-/* from rgb.c */
-int tegra_dc_rgb_probe(struct tegra_dc *dc);
-int tegra_dc_rgb_remove(struct tegra_dc *dc);
-int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
-int tegra_dc_rgb_exit(struct tegra_dc *dc);
-
/* from output.c */
int tegra_output_probe(struct tegra_output *output);
void tegra_output_remove(struct tegra_output *output);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 6ea070da7718..9a8ea93016a9 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -36,7 +36,7 @@ static int gr2d_init(struct host1x_client *client)
if (!gr2d->channel)
return -ENOMEM;
- client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
host1x_channel_put(gr2d->channel);
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index cee2ab645cde..28c4ef63065b 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -46,7 +46,7 @@ static int gr3d_init(struct host1x_client *client)
if (!gr3d->channel)
return -ENOMEM;
- client->syncpts[0] = host1x_syncpt_request(client->dev, flags);
+ client->syncpts[0] = host1x_syncpt_request(client, flags);
if (!client->syncpts[0]) {
host1x_channel_put(gr3d->channel);
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 5b9d83b71943..6434b3d3d1ba 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/hdmi.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -21,6 +22,8 @@
#include <sound/hda_verbs.h>
+#include <media/cec-notifier.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
@@ -1663,20 +1666,15 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
static int tegra_hdmi_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
- match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
- if (!match)
- return -ENODEV;
-
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
- hdmi->config = match->data;
+ hdmi->config = of_device_get_match_data(&pdev->dev);
hdmi->dev = &pdev->dev;
hdmi->audio_source = AUTO;
@@ -1725,6 +1723,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->vdd);
}
+ hdmi->output.notifier = cec_notifier_get(&pdev->dev);
+ if (hdmi->output.notifier == NULL)
+ return -ENOMEM;
+
hdmi->output.dev = &pdev->dev;
err = tegra_output_probe(&hdmi->output);
@@ -1783,6 +1785,9 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
tegra_output_remove(&hdmi->output);
+ if (hdmi->output.notifier)
+ cec_notifier_put(hdmi->output.notifier);
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 595d1ec3e02e..1cfbacea8113 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -11,6 +11,8 @@
#include <drm/drm_panel.h>
#include "drm.h"
+#include <media/cec-notifier.h>
+
int tegra_output_connector_get_modes(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
@@ -32,6 +34,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
else if (output->ddc)
edid = drm_get_edid(connector, output->ddc);
+ cec_notifier_set_phys_addr_from_edid(output->notifier, edid);
drm_mode_connector_update_edid_property(connector, edid);
if (edid) {
@@ -68,6 +71,9 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
}
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(output->notifier);
+
return status;
}
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 7ab1d1dc7cd7..4bcacd3f4861 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2536,20 +2536,17 @@ MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
static int tegra_sor_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device_node *np;
struct tegra_sor *sor;
struct resource *regs;
int err;
- match = of_match_device(tegra_sor_of_match, &pdev->dev);
-
sor = devm_kzalloc(&pdev->dev, sizeof(*sor), GFP_KERNEL);
if (!sor)
return -ENOMEM;
+ sor->soc = of_device_get_match_data(&pdev->dev);
sor->output.dev = sor->dev = &pdev->dev;
- sor->soc = match->data;
sor->settings = devm_kmemdup(&pdev->dev, sor->soc->settings,
sor->soc->num_settings *
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 2448229fa653..18024183aa2b 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -167,7 +167,7 @@ static int vic_init(struct host1x_client *client)
goto detach_device;
}
- client->syncpts[0] = host1x_syncpt_request(client->dev, 0);
+ client->syncpts[0] = host1x_syncpt_request(client, 0);
if (!client->syncpts[0]) {
err = -ENOMEM;
goto free_channel;
@@ -270,29 +270,33 @@ static const struct vic_config vic_t210_config = {
.firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
};
+#define NVIDIA_TEGRA_186_VIC_FIRMWARE "nvidia/tegra186/vic04_ucode.bin"
+
+static const struct vic_config vic_t186_config = {
+ .firmware = NVIDIA_TEGRA_186_VIC_FIRMWARE,
+};
+
static const struct of_device_id vic_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
+ { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
{ },
};
static int vic_probe(struct platform_device *pdev)
{
- struct vic_config *vic_config = NULL;
struct device *dev = &pdev->dev;
struct host1x_syncpt **syncpts;
struct resource *regs;
- const struct of_device_id *match;
struct vic *vic;
int err;
- match = of_match_device(vic_match, dev);
- vic_config = (struct vic_config *)match->data;
-
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
+ vic->config = of_device_get_match_data(dev);
+
syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
if (!syncpts)
return -ENOMEM;
@@ -321,7 +325,7 @@ static int vic_probe(struct platform_device *pdev)
if (err < 0)
return err;
- err = falcon_read_firmware(&vic->falcon, vic_config->firmware);
+ err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
goto exit_falcon;
@@ -334,7 +338,6 @@ static int vic_probe(struct platform_device *pdev)
vic->client.base.syncpts = syncpts;
vic->client.base.num_syncpts = 1;
vic->dev = dev;
- vic->config = vic_config;
INIT_LIST_HEAD(&vic->client.list);
vic->client.ops = &vic_ops;
@@ -405,3 +408,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 406fe4544b83..6ef4d1a1e3a9 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -24,6 +24,7 @@
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/of_graph.h>
+#include <linux/math64.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -48,6 +49,7 @@ struct tilcdc_crtc {
unsigned int lcd_fck_rate;
ktime_t last_vblank;
+ unsigned int hvtotal_us;
struct drm_framebuffer *curr_fb;
struct drm_framebuffer *next_fb;
@@ -75,7 +77,7 @@ static void unref_worker(struct drm_flip_work *work, void *val)
struct drm_device *dev = tilcdc_crtc->base.dev;
mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_unreference(val);
+ drm_framebuffer_put(val);
mutex_unlock(&dev->mode_config.mutex);
}
@@ -292,6 +294,12 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN);
}
+uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
+{
+ return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
+ mode->clock);
+}
+
static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
@@ -456,9 +464,12 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
set_scanout(crtc, fb);
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
crtc->hwmode = crtc->state->adjusted_mode;
+
+ tilcdc_crtc->hvtotal_us =
+ tilcdc_mode_hvtotal(&crtc->hwmode);
}
static void tilcdc_crtc_enable(struct drm_crtc *crtc)
@@ -467,7 +478,6 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
unsigned long flags;
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
mutex_lock(&tilcdc_crtc->enable_lock);
if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -564,7 +574,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
static void tilcdc_crtc_disable(struct drm_crtc *crtc)
{
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
tilcdc_crtc_off(crtc, false);
}
@@ -608,9 +617,7 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
- drm_modeset_lock(&crtc->mutex, NULL);
- tilcdc_crtc_disable(crtc);
- drm_modeset_unlock(&crtc->mutex);
+ tilcdc_crtc_shutdown(crtc);
flush_workqueue(priv->wq);
@@ -626,14 +633,12 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
if (tilcdc_crtc->event) {
dev_err(dev->dev, "already pending page flip!\n");
return -EBUSY;
}
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
crtc->primary->fb = fb;
tilcdc_crtc->event = event;
@@ -648,7 +653,7 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
+ tilcdc_crtc->hvtotal_us);
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
@@ -728,11 +733,39 @@ static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
}
+static void tilcdc_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int ret;
+
+ drm_atomic_helper_crtc_reset(crtc);
+
+ /* Turn the raster off if it for some reason is on. */
+ pm_runtime_get_sync(dev->dev);
+ if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
+ /* Enable DMA Frame Done Interrupt */
+ tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
+ tilcdc_clear_irqstatus(dev, 0xffffffff);
+
+ tilcdc_crtc->frame_done = false;
+ tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+
+ ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
+ tilcdc_crtc->frame_done,
+ msecs_to_jiffies(500));
+ if (ret == 0)
+ dev_err(dev->dev, "%s: timeout waiting for framedone\n",
+ __func__);
+ }
+ pm_runtime_put_sync(dev->dev);
+}
+
static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
.destroy = tilcdc_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = tilcdc_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = tilcdc_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b0d70f943cec..72ce063aa0d8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -23,6 +23,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -65,7 +66,7 @@ static struct of_device_id tilcdc_of_match[];
static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
@@ -225,7 +226,7 @@ static void tilcdc_fini(struct drm_device *dev)
pm_runtime_disable(dev->dev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 1813a3623ce6..8eebb5f826a6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -418,7 +418,7 @@ static int panel_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id panel_of_match[] = {
+static const struct of_device_id panel_of_match[] = {
{ .compatible = "ti,tilcdc,panel", },
{ },
};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
index 623a9140493c..d2b9e5f04724 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
@@ -145,7 +145,6 @@ static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
__dtb_tilcdc_slave_compat_begin;
static void *overlay_data;
struct device_node *overlay;
- int ret;
if (!size) {
pr_warn("%s: No overlay data\n", __func__);
@@ -163,13 +162,6 @@ static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
return NULL;
}
- of_node_set_flag(overlay, OF_DETACHED);
- ret = of_resolve_phandles(overlay);
- if (ret) {
- pr_err("%s: Failed to resolve phandles: %d\n", __func__, ret);
- return NULL;
- }
-
return overlay;
}
@@ -204,7 +196,7 @@ static void __init tilcdc_convert_slave_node(void)
/* For all memory needed for the overlay tree. This memory can
be freed after the overlay has been applied. */
struct kfree_table kft;
- int ret;
+ int ovcs_id, ret;
if (kfree_table_init(&kft))
return;
@@ -247,9 +239,11 @@ static void __init tilcdc_convert_slave_node(void)
tilcdc_node_disable(slave);
- ret = of_overlay_create(overlay);
+ ovcs_id = 0;
+ ret = of_overlay_apply(overlay, &ovcs_id);
if (ret)
- pr_err("%s: Creating overlay failed: %d\n", __func__, ret);
+ pr_err("%s: Applying overlay changeset failed: %d\n",
+ __func__, ret);
else
pr_info("%s: ti,tilcdc,slave node successfully converted\n",
__func__);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 1e2dfb1b1d6b..7e3643462a08 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -289,8 +289,6 @@ static const struct tilcdc_module_ops tfp410_module_ops = {
* Device:
*/
-static struct of_device_id tfp410_of_match[];
-
static int tfp410_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -375,7 +373,7 @@ static int tfp410_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id tfp410_of_match[] = {
+static const struct of_device_id tfp410_of_match[] = {
{ .compatible = "ti,tilcdc,tfp410", },
{ },
};
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 551709e6b114..1a8a57cad431 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
@@ -128,7 +129,7 @@ tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
{
struct tinydrm_device *tdev = drm->dev_private;
- return drm_fb_cma_create_with_funcs(drm, file_priv, mode_cmd,
+ return drm_gem_fb_create_with_funcs(drm, file_priv, mode_cmd,
tdev->fb_funcs);
}
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index 177e9d861001..f41fc506ff87 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modes.h>
#include <drm/tinydrm/tinydrm.h>
@@ -50,7 +51,6 @@ static int tinydrm_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
.get_modes = tinydrm_connector_get_modes,
- .best_encoder = drm_atomic_helper_best_encoder,
};
static enum drm_connector_status
@@ -144,7 +144,7 @@ EXPORT_SYMBOL(tinydrm_display_pipe_update);
* @pipe: Simple display pipe
* @plane_state: Plane state
*
- * This function uses drm_fb_cma_prepare_fb() to check if the plane FB has an
+ * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has an
* dma-buf attached, extracts the exclusive fence and attaches it to plane
* state for the atomic helper to wait on. Drivers can use this as their
* &drm_simple_display_pipe_funcs->prepare_fb callback.
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(tinydrm_display_pipe_update);
int tinydrm_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
- return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(tinydrm_display_pipe_prepare_fb);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 7e5bb7d6f655..6a83b3093254 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -31,7 +31,7 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
ret = regulator_enable(mipi->regulator);
if (ret) {
- dev_err(dev, "Failed to enable regulator %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to enable regulator %d\n", ret);
return ret;
}
@@ -42,7 +42,7 @@ static int mi0283qt_init(struct mipi_dbi *mipi)
mipi_dbi_hw_reset(mipi);
ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
if (ret) {
- dev_err(dev, "Error sending command %d\n", ret);
+ DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
regulator_disable(mipi->regulator);
return ret;
}
@@ -163,7 +163,6 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct tinydrm_device *tdev;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
@@ -175,13 +174,13 @@ static int mi0283qt_probe(struct spi_device *spi)
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return PTR_ERR(mipi->reset);
}
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
- dev_err(dev, "Failed to get gpio 'dc'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
return PTR_ERR(dc);
}
@@ -215,20 +214,9 @@ static int mi0283qt_probe(struct spi_device *spi)
return ret;
}
- tdev = &mipi->tinydrm;
-
- ret = devm_tinydrm_register(tdev);
- if (ret)
- return ret;
-
spi_set_drvdata(spi, mipi);
- DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
- tdev->drm->driver->name, dev_name(dev),
- spi->max_speed_hz / 1000000,
- tdev->drm->primary->index);
-
- return 0;
+ return devm_tinydrm_register(&mipi->tinydrm);
}
static void mi0283qt_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 2caeabcd3458..d43e992ab432 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -9,6 +9,7 @@
* (at your option) any later version.
*/
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#include <linux/debugfs.h>
@@ -253,8 +254,8 @@ out_unlock:
}
static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = mipi_dbi_fb_dirty,
};
@@ -842,6 +843,8 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
return -ENOMEM;
}
+ DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
+
return 0;
}
EXPORT_SYMBOL(mipi_dbi_spi_init);
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 30dc97b3ff21..75740630c410 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,6 +26,7 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -473,8 +474,7 @@ static void repaper_get_temperature(struct repaper_epd *epd)
ret = thermal_zone_get_temp(epd->thermal, &temperature);
if (ret) {
- dev_err(&epd->spi->dev, "Failed to get temperature (%d)\n",
- ret);
+ DRM_DEV_ERROR(&epd->spi->dev, "Failed to get temperature (%d)\n", ret);
return;
}
@@ -629,15 +629,15 @@ out_unlock:
mutex_unlock(&tdev->dirty_lock);
if (ret)
- dev_err(fb->dev->dev, "Failed to update display (%d)\n", ret);
+ DRM_DEV_ERROR(fb->dev->dev, "Failed to update display (%d)\n", ret);
kfree(buf);
return ret;
}
static const struct drm_framebuffer_funcs repaper_fb_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = repaper_fb_dirty,
};
@@ -703,7 +703,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
}
if (!i) {
- dev_err(dev, "timeout waiting for panel to become ready.\n");
+ DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
power_off(epd);
return;
}
@@ -725,9 +725,9 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
ret = repaper_read_val(spi, 0x0f);
if (ret < 0 || !(ret & 0x80)) {
if (ret < 0)
- dev_err(dev, "failed to read chip (%d)\n", ret);
+ DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
else
- dev_err(dev, "panel is reported broken\n");
+ DRM_DEV_ERROR(dev, "panel is reported broken\n");
power_off(epd);
return;
}
@@ -767,7 +767,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
/* check DC/DC */
ret = repaper_read_val(spi, 0x0f);
if (ret < 0) {
- dev_err(dev, "failed to read chip (%d)\n", ret);
+ DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
power_off(epd);
return;
}
@@ -779,7 +779,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
}
if (!dc_ok) {
- dev_err(dev, "dc/dc failed\n");
+ DRM_DEV_ERROR(dev, "dc/dc failed\n");
power_off(epd);
return;
}
@@ -959,7 +959,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->panel_on)) {
ret = PTR_ERR(epd->panel_on);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'panel-on'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'panel-on'\n");
return ret;
}
@@ -967,7 +967,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->discharge)) {
ret = PTR_ERR(epd->discharge);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'discharge'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'discharge'\n");
return ret;
}
@@ -975,7 +975,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->reset)) {
ret = PTR_ERR(epd->reset);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return ret;
}
@@ -983,7 +983,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->busy)) {
ret = PTR_ERR(epd->busy);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'busy'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'busy'\n");
return ret;
}
@@ -991,8 +991,7 @@ static int repaper_probe(struct spi_device *spi)
&thermal_zone)) {
epd->thermal = thermal_zone_get_zone_by_name(thermal_zone);
if (IS_ERR(epd->thermal)) {
- dev_err(dev, "Failed to get thermal zone: %s\n",
- thermal_zone);
+ DRM_DEV_ERROR(dev, "Failed to get thermal zone: %s\n", thermal_zone);
return PTR_ERR(epd->thermal);
}
}
@@ -1033,7 +1032,7 @@ static int repaper_probe(struct spi_device *spi)
if (IS_ERR(epd->border)) {
ret = PTR_ERR(epd->border);
if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio 'border'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'border'\n");
return ret;
}
@@ -1078,19 +1077,11 @@ static int repaper_probe(struct spi_device *spi)
return ret;
drm_mode_config_reset(tdev->drm);
-
- ret = devm_tinydrm_register(tdev);
- if (ret)
- return ret;
-
spi_set_drvdata(spi, tdev);
- DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
- tdev->drm->driver->name, dev_name(dev),
- spi->max_speed_hz / 1000000,
- tdev->drm->primary->index);
+ DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
- return 0;
+ return devm_tinydrm_register(tdev);
}
static void repaper_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index b439956a07f4..0a2c60da5c0e 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,6 +17,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -167,8 +168,8 @@ out_unlock:
}
static const struct drm_framebuffer_funcs st7586_fb_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
.dirty = st7586_fb_dirty,
};
@@ -187,7 +188,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
mipi_dbi_hw_reset(mipi);
ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
if (ret) {
- dev_err(dev, "Error sending command %d\n", ret);
+ DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
return;
}
@@ -343,7 +344,6 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
- struct tinydrm_device *tdev;
struct mipi_dbi *mipi;
struct gpio_desc *a0;
u32 rotation = 0;
@@ -355,13 +355,13 @@ static int st7586_probe(struct spi_device *spi)
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
- dev_err(dev, "Failed to get gpio 'reset'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return PTR_ERR(mipi->reset);
}
a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
if (IS_ERR(a0)) {
- dev_err(dev, "Failed to get gpio 'a0'\n");
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'a0'\n");
return PTR_ERR(a0);
}
@@ -388,20 +388,9 @@ static int st7586_probe(struct spi_device *spi)
if (ret)
return ret;
- tdev = &mipi->tinydrm;
-
- ret = devm_tinydrm_register(tdev);
- if (ret)
- return ret;
-
spi_set_drvdata(spi, mipi);
- DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
- tdev->drm->driver->name, dev_name(dev),
- spi->max_speed_hz / 1000000,
- tdev->drm->primary->index);
-
- return 0;
+ return devm_tinydrm_register(&mipi->tinydrm);
}
static void st7586_shutdown(struct spi_device *spi)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 180ce6296416..c088703777e2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -150,8 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
dma_fence_put(bo->moving);
- if (bo->resv == &bo->ttm_resv)
- reservation_object_fini(&bo->ttm_resv);
+ reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
@@ -402,14 +401,11 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
if (bo->resv == &bo->ttm_resv)
return 0;
- reservation_object_init(&bo->ttm_resv);
BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
- if (r) {
+ if (r)
reservation_object_unlock(&bo->ttm_resv);
- reservation_object_fini(&bo->ttm_resv);
- }
return r;
}
@@ -440,28 +436,30 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
struct ttm_bo_global *glob = bo->glob;
int ret;
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ reservation_object_wait_timeout_rcu(bo->resv, true, false,
+ 30 * HZ);
+ spin_lock(&glob->lru_lock);
+ goto error;
+ }
+
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, NULL);
-
if (!ret) {
- if (!ttm_bo_wait(bo, false, true)) {
+ if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
- return;
- }
-
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle and free it.
- */
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait(bo, true, true);
ttm_bo_cleanup_memtype_use(bo);
return;
}
+
ttm_bo_flush_all_fences(bo);
/*
@@ -474,11 +472,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
__ttm_bo_unreserve(bo);
}
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
+error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
@@ -1203,8 +1202,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
lockdep_assert_held(&bo->resv->lock.base);
} else {
bo->resv = &bo->ttm_resv;
- reservation_object_init(&bo->ttm_resv);
}
+ reservation_object_init(&bo->ttm_resv);
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
bo->priority = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c934ad5b3903..e7a519f1849b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -474,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
+ mutex_init(&fbo->wu_mutex);
fbo->moving = NULL;
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
@@ -587,7 +588,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long offset, size;
int ret;
- BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 29855be96be0..e96374990398 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -546,8 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page,
- bool no_wait, bool interruptible)
+ struct page *page, uint64_t size)
{
struct ttm_mem_zone *zone = NULL;
@@ -564,11 +563,11 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
- return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
- interruptible);
+ return ttm_mem_global_alloc_zone(glob, zone, size, false, false);
}
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
+void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
+ uint64_t size)
{
struct ttm_mem_zone *zone = NULL;
@@ -579,10 +578,9 @@ void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
- ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
+ ttm_mem_global_free_zone(glob, zone, size);
}
-
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 871599826773..316f831ad5f0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -95,7 +95,7 @@ struct ttm_pool_opts {
unsigned small;
};
-#define NUM_POOLS 4
+#define NUM_POOLS 6
/**
* struct ttm_pool_manager - Holds memory pools for fst allocation
@@ -122,6 +122,8 @@ struct ttm_pool_manager {
struct ttm_page_pool uc_pool;
struct ttm_page_pool wc_pool_dma32;
struct ttm_page_pool uc_pool_dma32;
+ struct ttm_page_pool wc_pool_huge;
+ struct ttm_page_pool uc_pool_huge;
} ;
};
};
@@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
/**
* Select the right pool or requested caching state and ttm flags. */
-static struct ttm_page_pool *ttm_get_pool(int flags,
- enum ttm_caching_state cstate)
+static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
+ enum ttm_caching_state cstate)
{
int pool_index;
@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
else
pool_index = 0x1;
- if (flags & TTM_PAGE_FLAG_DMA32)
+ if (flags & TTM_PAGE_FLAG_DMA32) {
+ if (huge)
+ return NULL;
pool_index |= 0x2;
+ } else if (huge) {
+ pool_index |= 0x4;
+ }
+
return &_manager->pools[pool_index];
}
@@ -321,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("Failed to allocate memory for pool free operation\n");
+ pr_debug("Failed to allocate memory for pool free operation\n");
return 0;
}
@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
* pages returned in pages array.
*/
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count)
+ int ttm_flags, enum ttm_caching_state cstate,
+ unsigned count, unsigned order)
{
struct page **caching_array;
struct page *p;
int r = 0;
- unsigned i, cpages;
+ unsigned i, j, cpages;
+ unsigned npages = 1 << order;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
@@ -507,15 +517,15 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("Unable to allocate table for new pages\n");
+ pr_debug("Unable to allocate table for new pages\n");
return -ENOMEM;
}
for (i = 0, cpages = 0; i < count; ++i) {
- p = alloc_page(gfp_flags);
+ p = alloc_pages(gfp_flags, order);
if (!p) {
- pr_err("Unable to get page %u\n", i);
+ pr_debug("Unable to get page %u\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
goto out;
}
+ list_add(&p->lru, pages);
+
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
- if (!PageHighMem(p))
+ if (PageHighMem(p))
+ continue;
+
#endif
- {
- caching_array[cpages++] = p;
+ for (j = 0; j < npages; ++j) {
+ caching_array[cpages++] = p++;
if (cpages == max_cpages) {
r = ttm_set_pages_caching(caching_array,
@@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
cpages = 0;
}
}
-
- list_add(&p->lru, pages);
}
if (cpages) {
@@ -573,9 +585,9 @@ out:
* Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
-static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
- int ttm_flags, enum ttm_caching_state cstate, unsigned count,
- unsigned long *irq_flags)
+static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count, unsigned long *irq_flags)
{
struct page *p;
int r;
@@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
INIT_LIST_HEAD(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
- cstate, alloc_size);
+ cstate, alloc_size, 0);
spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) {
@@ -613,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- pr_err("Failed to fill pool (%p)\n", pool);
+ pr_debug("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &new_pages, lru) {
++cpages;
@@ -627,22 +639,25 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
}
/**
- * Cut 'count' number of pages from the pool and put them on the return list.
+ * Allocate pages from the pool and put them on the return list.
*
- * @return count of pages still required to fulfill the request.
+ * @return zero for success or negative error code.
*/
-static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count)
+static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+ struct list_head *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count, unsigned order)
{
unsigned long irq_flags;
struct list_head *p;
unsigned i;
+ int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags);
- ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
+ if (!order)
+ ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
+ &irq_flags);
if (count >= pool->npages) {
/* take all pages from the pool */
@@ -672,32 +687,126 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
count = 0;
out:
spin_unlock_irqrestore(&pool->lock, irq_flags);
- return count;
+
+ /* clear the pages coming from the pool if requested */
+ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ struct page *page;
+
+ list_for_each_entry(page, pages, lru) {
+ if (PageHighMem(page))
+ clear_highpage(page);
+ else
+ clear_page(page_address(page));
+ }
+ }
+
+ /* If pool didn't have enough pages allocate new one. */
+ if (count) {
+ gfp_t gfp_flags = pool->gfp_flags;
+
+ /* set zero flag for page allocation if required */
+ if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ /* ttm_alloc_new_pages doesn't reference pool so we can run
+ * multiple requests in parallel.
+ **/
+ r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
+ count, order);
+ }
+
+ return r;
}
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
+ struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_page(pages[i]);
- pages[i] = NULL;
+ i = 0;
+ while (i < npages) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct page *p = pages[i];
+#endif
+ unsigned order = 0, j;
+
+ if (!pages[i]) {
+ ++i;
+ continue;
+ }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
+
+ if (j == HPAGE_PMD_NR)
+ order = HPAGE_PMD_ORDER;
+#endif
+
+ if (page_count(pages[i]) != 1)
+ pr_err("Erroneous page count. Leaking pages.\n");
+ __free_pages(pages[i], order);
+
+ j = 1 << order;
+ while (j) {
+ pages[i++] = NULL;
+ --j;
}
}
return;
}
+ i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (huge) {
+ unsigned max_size, n2free;
+
+ spin_lock_irqsave(&huge->lock, irq_flags);
+ while (i < npages) {
+ struct page *p = pages[i];
+ unsigned j;
+
+ if (!p)
+ break;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
+
+ if (j != HPAGE_PMD_NR)
+ break;
+
+ list_add_tail(&pages[i]->lru, &huge->list);
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = NULL;
+ huge->npages++;
+ }
+
+ /* Check that we don't go over the pool limit */
+ max_size = _manager->options.max_size;
+ max_size /= HPAGE_PMD_NR;
+ if (huge->npages > max_size)
+ n2free = huge->npages - max_size;
+ else
+ n2free = 0;
+ spin_unlock_irqrestore(&huge->lock, irq_flags);
+ if (n2free)
+ ttm_page_pool_free(huge, n2free, false);
+ }
+#endif
+
spin_lock_irqsave(&pool->lock, irq_flags);
- for (i = 0; i < npages; i++) {
+ while (i < npages) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n");
@@ -705,6 +814,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
pages[i] = NULL;
pool->npages++;
}
+ ++i;
}
/* Check that we don't go over the pool limit */
npages = 0;
@@ -727,75 +837,96 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
+#endif
struct list_head plist;
struct page *p = NULL;
- gfp_t gfp_flags = GFP_USER;
unsigned count;
int r;
- /* set zero flag for page allocation if required */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
/* No pool for cached pages */
if (pool == NULL) {
+ gfp_t gfp_flags = GFP_USER;
+ unsigned i;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ unsigned j;
+#endif
+
+ /* set zero flag for page allocation if required */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < npages; ++r) {
+ i = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ while (npages >= HPAGE_PMD_NR) {
+ gfp_t huge_flags = gfp_flags;
+
+ huge_flags |= GFP_TRANSHUGE;
+ huge_flags &= ~__GFP_MOVABLE;
+ huge_flags &= ~__GFP_COMP;
+ p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+ if (!p)
+ break;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = p++;
+
+ npages -= HPAGE_PMD_NR;
+ }
+#endif
+
+ while (npages) {
p = alloc_page(gfp_flags);
if (!p) {
-
- pr_err("Unable to allocate page\n");
+ pr_debug("Unable to allocate page\n");
return -ENOMEM;
}
- pages[r] = p;
+ pages[i++] = p;
+ --npages;
}
return 0;
}
- /* combine zero flag to pool flags */
- gfp_flags |= pool->gfp_flags;
-
- /* First we take pages from the pool */
- INIT_LIST_HEAD(&plist);
- npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
- /* clear the pages coming from the pool if requested */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (huge && npages >= HPAGE_PMD_NR) {
+ INIT_LIST_HEAD(&plist);
+ ttm_page_pool_get_pages(huge, &plist, flags, cstate,
+ npages / HPAGE_PMD_NR,
+ HPAGE_PMD_ORDER);
+
list_for_each_entry(p, &plist, lru) {
- if (PageHighMem(p))
- clear_highpage(p);
- else
- clear_page(page_address(p));
+ unsigned j;
+
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[count++] = &p[j];
}
}
+#endif
- /* If pool didn't have enough pages allocate new one. */
- if (npages > 0) {
- /* ttm_alloc_new_pages doesn't reference pool so we can run
- * multiple requests in parallel.
- **/
- INIT_LIST_HEAD(&plist);
- r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
- if (r) {
- /* If there is any pages in the list put them back to
- * the pool. */
- pr_err("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
- return r;
- }
+ INIT_LIST_HEAD(&plist);
+ r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
+ npages - count, 0);
+
+ list_for_each_entry(p, &plist, lru)
+ pages[count++] = p;
+
+ if (r) {
+ /* If there is any pages in the list put them back to
+ * the pool.
+ */
+ pr_debug("Failed to allocate extra pages for large request\n");
+ ttm_put_pages(pages, count, flags, cstate);
+ return r;
}
return 0;
@@ -832,6 +963,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
GFP_USER | GFP_DMA32, "uc dma");
+ ttm_page_pool_init_locked(&_manager->wc_pool_huge,
+ GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
+ "wc huge");
+
+ ttm_page_pool_init_locked(&_manager->uc_pool_huge,
+ GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
+ , "uc huge");
+
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
@@ -873,17 +1012,16 @@ int ttm_pool_populate(struct ttm_tt *ttm)
if (ttm->state != tt_unpopulated)
return 0;
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_get_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- if (ret != 0) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
+ ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
+ PAGE_SIZE);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
@@ -908,18 +1046,91 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
unsigned i;
for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i]) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- ttm_put_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- }
+ if (!ttm->pages[i])
+ continue;
+
+ ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i],
+ PAGE_SIZE);
}
+ ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
+ ttm->caching_state);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i, j;
+ int r;
+
+ r = ttm_pool_populate(&tt->ttm);
+ if (r)
+ return r;
+
+ for (i = 0; i < tt->ttm.num_pages; ++i) {
+ struct page *p = tt->ttm.pages[i];
+ size_t num_pages = 1;
+
+ for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+ if (++p != tt->ttm.pages[j])
+ break;
+
+ ++num_pages;
+ }
+
+ tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
+ 0, num_pages * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, tt->dma_address[i])) {
+ while (i--) {
+ dma_unmap_page(dev, tt->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ tt->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+ return -EFAULT;
+ }
+
+ for (j = 1; j < num_pages; ++j) {
+ tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
+ ++i;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_populate_and_map_pages);
+
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i, j;
+
+ for (i = 0; i < tt->ttm.num_pages;) {
+ struct page *p = tt->ttm.pages[i];
+ size_t num_pages = 1;
+
+ if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
+ ++i;
+ continue;
+ }
+
+ for (j = i + 1; j < tt->ttm.num_pages; ++j) {
+ if (++p != tt->ttm.pages[j])
+ break;
+
+ ++num_pages;
+ }
+
+ dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ i += num_pages;
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+}
+EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
+#endif
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
@@ -929,12 +1140,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
seq_printf(m, "No pool allocator running.\n");
return 0;
}
- seq_printf(m, "%6s %12s %13s %8s\n",
+ seq_printf(m, "%7s %12s %13s %8s\n",
h[0], h[1], h[2], h[3]);
for (i = 0; i < NUM_POOLS; ++i) {
p = &_manager->pools[i];
- seq_printf(m, "%6s %12ld %13ld %8d\n",
+ seq_printf(m, "%7s %12ld %13ld %8d\n",
p->name, p->nrefills,
p->nfrees, p->npages);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 90ddbdca93bd..6b2627fe9bc1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -60,37 +60,32 @@
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define IS_UNDEFINED (0)
-#define IS_WC (1<<1)
-#define IS_UC (1<<2)
-#define IS_CACHED (1<<3)
-#define IS_DMA32 (1<<4)
+#define VADDR_FLAG_HUGE_POOL 1UL
enum pool_type {
- POOL_IS_UNDEFINED,
- POOL_IS_WC = IS_WC,
- POOL_IS_UC = IS_UC,
- POOL_IS_CACHED = IS_CACHED,
- POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
- POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
- POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+ IS_UNDEFINED = 0,
+ IS_WC = 1 << 1,
+ IS_UC = 1 << 2,
+ IS_CACHED = 1 << 3,
+ IS_DMA32 = 1 << 4,
+ IS_HUGE = 1 << 5
};
+
/*
- * The pool structure. There are usually six pools:
+ * The pool structure. There are up to nine pools:
* - generic (not restricted to DMA32):
* - write combined, uncached, cached.
* - dma32 (up to 2^32 - so up 4GB):
* - write combined, uncached, cached.
+ * - huge (not restricted to DMA32):
+ * - write combined, uncached, cached.
* for each 'struct device'. The 'cached' is for pages that are actively used.
* The other ones can be shrunk by the shrinker API if neccessary.
* @pools: The 'struct device->dma_pools' link.
* @type: Type of the pool
- * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * @lock: Protects the free_list from concurrnet access. Must be
* used with irqsave/irqrestore variants because pool allocator maybe called
* from delayed work.
- * @inuse_list: Pool of pages that are in use. The order is very important and
- * it is in the order that the TTM pages that are put back are in.
* @free_list: Pool of pages that are free to be used. No order requirements.
* @dev: The device that is associated with these pools.
* @size: Size used during DMA allocation.
@@ -107,7 +102,6 @@ struct dma_pool {
struct list_head pools; /* The 'struct device->dma_pools link */
enum pool_type type;
spinlock_t lock;
- struct list_head inuse_list;
struct list_head free_list;
struct device *dev;
unsigned size;
@@ -124,13 +118,14 @@ struct dma_pool {
* The accounting page keeping track of the allocated page along with
* the DMA address.
* @page_list: The link to the 'page_list' in 'struct dma_pool'.
- * @vaddr: The virtual address of the page
+ * @vaddr: The virtual address of the page and a flag if the page belongs to a
+ * huge pool
* @dma: The bus address of the page. If the page is not allocated
* via the DMA API, it will be -1.
*/
struct dma_page {
struct list_head page_list;
- void *vaddr;
+ unsigned long vaddr;
struct page *p;
dma_addr_t dma;
};
@@ -329,7 +324,8 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
{
dma_addr_t dma = d_page->dma;
- dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+ d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
+ dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
kfree(d_page);
d_page = NULL;
@@ -337,19 +333,22 @@ static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
{
struct dma_page *d_page;
+ void *vaddr;
d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
if (!d_page)
return NULL;
- d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
- &d_page->dma,
- pool->gfp_flags);
- if (d_page->vaddr) {
- if (is_vmalloc_addr(d_page->vaddr))
- d_page->p = vmalloc_to_page(d_page->vaddr);
+ vaddr = dma_alloc_coherent(pool->dev, pool->size, &d_page->dma,
+ pool->gfp_flags);
+ if (vaddr) {
+ if (is_vmalloc_addr(vaddr))
+ d_page->p = vmalloc_to_page(vaddr);
else
- d_page->p = virt_to_page(d_page->vaddr);
+ d_page->p = virt_to_page(vaddr);
+ d_page->vaddr = (unsigned long)vaddr;
+ if (pool->type & IS_HUGE)
+ d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
} else {
kfree(d_page);
d_page = NULL;
@@ -381,11 +380,40 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
}
/* set memory back to wb and free the pages. */
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+ struct page *page = d_page->p;
+ unsigned i, num_pages;
+ int ret;
+
+ /* Don't set WB on WB page pool. */
+ if (!(pool->type & IS_CACHED)) {
+ num_pages = pool->size / PAGE_SIZE;
+ for (i = 0; i < num_pages; ++i, ++page) {
+ ret = set_pages_array_wb(&page, 1);
+ if (ret) {
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
+ }
+ }
+ }
+
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+}
+
static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
struct page *pages[], unsigned npages)
{
struct dma_page *d_page, *tmp;
+ if (pool->type & IS_HUGE) {
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
+ ttm_dma_page_put(pool, d_page);
+
+ return;
+ }
+
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
set_pages_array_wb(pages, npages))
@@ -398,17 +426,6 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
}
}
-static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
-{
- /* Don't set WB on WB page pool. */
- if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
-
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
-}
-
/*
* Free pages from pool.
*
@@ -446,7 +463,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("%s: Failed to allocate memory for pool free operation\n",
+ pr_debug("%s: Failed to allocate memory for pool free operation\n",
pool->dev_name);
return 0;
}
@@ -577,8 +594,8 @@ static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
enum pool_type type)
{
- char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
- enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+ const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
+ enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
struct device_pools *sec_pool = NULL;
struct dma_pool *pool = NULL, **ptr;
unsigned i;
@@ -609,18 +626,24 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
sec_pool->pool = pool;
INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->inuse_list);
INIT_LIST_HEAD(&pool->pools);
spin_lock_init(&pool->lock);
pool->dev = dev;
pool->npages_free = pool->npages_in_use = 0;
pool->nfrees = 0;
pool->gfp_flags = flags;
- pool->size = PAGE_SIZE;
+ if (type & IS_HUGE)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ pool->size = HPAGE_PMD_SIZE;
+#else
+ BUG();
+#endif
+ else
+ pool->size = PAGE_SIZE;
pool->type = type;
pool->nrefills = 0;
p = pool->name;
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
if (type & t[i]) {
p += snprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
@@ -724,7 +747,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
struct dma_page *dma_p;
struct page *p;
int r = 0;
- unsigned i, cpages;
+ unsigned i, j, npages, cpages;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
@@ -732,7 +755,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("%s: Unable to allocate table for new pages\n",
+ pr_debug("%s: Unable to allocate table for new pages\n",
pool->dev_name);
return -ENOMEM;
}
@@ -745,8 +768,8 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
for (i = 0, cpages = 0; i < count; ++i) {
dma_p = __ttm_dma_alloc_page(pool);
if (!dma_p) {
- pr_err("%s: Unable to get page %u\n",
- pool->dev_name, i);
+ pr_debug("%s: Unable to get page %u\n",
+ pool->dev_name, i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -762,28 +785,32 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
goto out;
}
p = dma_p->p;
+ list_add(&dma_p->page_list, d_pages);
+
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
- if (!PageHighMem(p))
+ if (PageHighMem(p))
+ continue;
#endif
- {
- caching_array[cpages++] = p;
+
+ npages = pool->size / PAGE_SIZE;
+ for (j = 0; j < npages; ++j) {
+ caching_array[cpages++] = p + j;
if (cpages == max_cpages) {
/* Note: Cannot hold the spinlock */
r = ttm_set_pages_caching(pool, caching_array,
- cpages);
+ cpages);
if (r) {
ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
+ pool, d_pages, caching_array,
+ cpages);
goto out;
}
cpages = 0;
}
}
- list_add(&dma_p->page_list, d_pages);
}
if (cpages) {
@@ -828,8 +855,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
struct dma_page *d_page;
unsigned cpages = 0;
- pr_err("%s: Failed to fill %s pool (r:%d)!\n",
- pool->dev_name, pool->name, r);
+ pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
list_for_each_entry(d_page, &d_pages, page_list) {
cpages++;
@@ -871,6 +898,27 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
return r;
}
+static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ gfp_t gfp_flags;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ if (huge) {
+ gfp_flags |= GFP_TRANSHUGE;
+ gfp_flags &= ~__GFP_MOVABLE;
+ gfp_flags &= ~__GFP_COMP;
+ }
+
+ return gfp_flags;
+}
+
/*
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
@@ -879,33 +927,70 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
enum pool_type type;
unsigned i;
- gfp_t gfp_flags;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ i = 0;
+
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags = GFP_USER | GFP_DMA32;
- else
- gfp_flags = GFP_HIGHUSER;
- if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
+ goto skip_huge;
+
+ pool = ttm_dma_find_pool(dev, type | IS_HUGE);
+ if (!pool) {
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
+
+ pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
+ if (IS_ERR_OR_NULL(pool))
+ goto skip_huge;
+ }
+
+ while (num_pages >= HPAGE_PMD_NR) {
+ unsigned j;
+
+ ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (ret != 0)
+ break;
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ pool->size);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+
+ for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
+ ttm->pages[j] = ttm->pages[j - 1] + 1;
+ ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
+ PAGE_SIZE;
+ }
+
+ i += HPAGE_PMD_NR;
+ num_pages -= HPAGE_PMD_NR;
+ }
+
+skip_huge:
+#endif
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
+
pool = ttm_dma_pool_init(dev, gfp_flags, type);
- if (IS_ERR_OR_NULL(pool)) {
+ if (IS_ERR_OR_NULL(pool))
return -ENOMEM;
- }
}
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- for (i = 0; i < ttm->num_pages; ++i) {
+ while (num_pages) {
ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
if (ret != 0) {
ttm_dma_unpopulate(ttm_dma, dev);
@@ -913,11 +998,14 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
+ pool->size);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
+
+ ++i;
+ --num_pages;
}
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
@@ -941,10 +1029,33 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
struct dma_page *d_page, *next;
enum pool_type type;
bool is_cached = false;
- unsigned count = 0, i, npages = 0;
+ unsigned count, i, npages = 0;
unsigned long irq_flags;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ pool = ttm_dma_find_pool(dev, type | IS_HUGE);
+ if (pool) {
+ count = 0;
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ page_list) {
+ if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
+ continue;
+
+ count++;
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p, pool->size);
+ ttm_dma_page_put(pool, d_page);
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ pool->npages_in_use -= count;
+ pool->nfrees += count;
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ }
+#endif
+
pool = ttm_dma_find_pool(dev, type);
if (!pool)
return;
@@ -953,6 +1064,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
ttm_to_type(ttm->page_flags, tt_cached)) == pool);
/* make sure pages array match list and count number of pages */
+ count = 0;
list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
ttm->pages[count] = d_page->p;
count++;
@@ -978,13 +1090,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
if (is_cached) {
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p);
+ d_page->p, pool->size);
ttm_dma_page_put(pool, d_page);
}
} else {
for (i = 0; i < count; i++) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
+ ttm->pages[i], pool->size);
}
}
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
new file mode 100644
index 000000000000..c5f03bf4570c
--- /dev/null
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -0,0 +1,16 @@
+config DRM_TVE200
+ tristate "DRM Support for Faraday TV Encoder TVE200"
+ depends on DRM
+ depends on CMA
+ depends on ARM || COMPILE_TEST
+ depends on OF
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ help
+ Choose this option for DRM support for the Faraday TV Encoder
+ TVE200 Controller.
+ If M is selected the module will be called tve200_drm.
diff --git a/drivers/gpu/drm/tve200/Makefile b/drivers/gpu/drm/tve200/Makefile
new file mode 100644
index 000000000000..6b7a6a1dcbf8
--- /dev/null
+++ b/drivers/gpu/drm/tve200/Makefile
@@ -0,0 +1,4 @@
+tve200_drm-y += tve200_display.o \
+ tve200_drv.o
+
+obj-$(CONFIG_DRM_TVE200) += tve200_drm.o
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
new file mode 100644
index 000000000000..2c668bd6d997
--- /dev/null
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on sources as follows:
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Copyright (C) 2007 Amos Lee <amos_lee@storlinksemi.com>
+ * Copyright (C) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2017 Eric Anholt
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms of
+ * such GNU licence.
+ */
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/dma-buf.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "tve200_drm.h"
+
+irqreturn_t tve200_irq(int irq, void *data)
+{
+ struct tve200_drm_dev_private *priv = data;
+ u32 stat;
+ u32 val;
+
+ stat = readl(priv->regs + TVE200_INT_STAT);
+
+ if (!stat)
+ return IRQ_NONE;
+
+ /*
+ * Vblank IRQ
+ *
+ * The hardware is a bit tilted: the line stays high after clearing
+ * the vblank IRQ, firing many more interrupts. We counter this
+ * by toggling the IRQ back and forth from firing at vblank and
+ * firing at start of active image, which works around the problem
+ * since those occur strictly in sequence, and we get two IRQs for each
+ * frame, one at start of Vblank (that we make call into the CRTC) and
+ * another one at the start of the image (that we discard).
+ */
+ if (stat & TVE200_INT_V_STATUS) {
+ val = readl(priv->regs + TVE200_CTRL);
+ /* We have an actual start of vsync */
+ if (!(val & TVE200_VSTSTYPE_BITS)) {
+ drm_crtc_handle_vblank(&priv->pipe.crtc);
+ /* Toggle trigger to start of active image */
+ val |= TVE200_VSTSTYPE_VAI;
+ } else {
+ /* Toggle trigger back to start of vsync */
+ val &= ~TVE200_VSTSTYPE_BITS;
+ }
+ writel(val, priv->regs + TVE200_CTRL);
+ } else
+ dev_err(priv->drm->dev, "stray IRQ %08x\n", stat);
+
+ /* Clear the interrupt once done */
+ writel(stat, priv->regs + TVE200_INT_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static int tve200_display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *pstate,
+ struct drm_crtc_state *cstate)
+{
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *old_fb = pipe->plane.state->fb;
+ struct drm_framebuffer *fb = pstate->fb;
+
+ /*
+ * We support these specific resolutions and nothing else.
+ */
+ if (!(mode->hdisplay == 352 && mode->vdisplay == 240) && /* SIF(525) */
+ !(mode->hdisplay == 352 && mode->vdisplay == 288) && /* CIF(625) */
+ !(mode->hdisplay == 640 && mode->vdisplay == 480) && /* VGA */
+ !(mode->hdisplay == 720 && mode->vdisplay == 480) && /* D1 */
+ !(mode->hdisplay == 720 && mode->vdisplay == 576)) { /* D1 */
+ DRM_DEBUG_KMS("unsupported display mode (%u x %u)\n",
+ mode->hdisplay, mode->vdisplay);
+ return -EINVAL;
+ }
+
+ if (fb) {
+ u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+
+ /* FB base address must be dword aligned. */
+ if (offset & 3) {
+ DRM_DEBUG_KMS("FB not 32-bit aligned\n");
+ return -EINVAL;
+ }
+
+ /*
+ * There's no pitch register, the mode's hdisplay
+ * controls this.
+ */
+ if (fb->pitches[0] != mode->hdisplay * fb->format->cpp[0]) {
+ DRM_DEBUG_KMS("can't handle pitches\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We can't change the FB format in a flicker-free
+ * manner (and only update it during CRTC enable).
+ */
+ if (old_fb && old_fb->format != fb->format)
+ cstate->mode_changed = true;
+ }
+
+ return 0;
+}
+
+static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *cstate)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_device *drm = crtc->dev;
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_connector *connector = priv->connector;
+ u32 format = fb->format->format;
+ u32 ctrl1 = 0;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Function 1 */
+ ctrl1 |= TVE200_CTRL_CSMODE;
+ /* Interlace mode for CCIR656: parameterize? */
+ ctrl1 |= TVE200_CTRL_NONINTERLACE;
+ /* 32 words per burst */
+ ctrl1 |= TVE200_CTRL_BURST_32_WORDS;
+ /* 16 retries */
+ ctrl1 |= TVE200_CTRL_RETRYCNT_16;
+ /* NTSC mode: parametrize? */
+ ctrl1 |= TVE200_CTRL_NTSC;
+
+ /* Vsync IRQ at start of Vsync at first */
+ ctrl1 |= TVE200_VSTSTYPE_VSYNC;
+
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ ctrl1 |= TVE200_CTRL_TVCLKP;
+
+ if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */
+ (mode->hdisplay == 352 && mode->vdisplay == 288)) { /* CIF(625) */
+ ctrl1 |= TVE200_CTRL_IPRESOL_CIF;
+ dev_info(drm->dev, "CIF mode\n");
+ } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
+ ctrl1 |= TVE200_CTRL_IPRESOL_VGA;
+ dev_info(drm->dev, "VGA mode\n");
+ } else if ((mode->hdisplay == 720 && mode->vdisplay == 480) ||
+ (mode->hdisplay == 720 && mode->vdisplay == 576)) {
+ ctrl1 |= TVE200_CTRL_IPRESOL_D1;
+ dev_info(drm->dev, "D1 mode\n");
+ }
+
+ if (format & DRM_FORMAT_BIG_ENDIAN) {
+ ctrl1 |= TVE200_CTRL_BBBP;
+ format &= ~DRM_FORMAT_BIG_ENDIAN;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ ctrl1 |= TVE200_IPDMOD_RGB888;
+ break;
+ case DRM_FORMAT_RGB565:
+ ctrl1 |= TVE200_IPDMOD_RGB565;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ ctrl1 |= TVE200_IPDMOD_RGB555;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ ctrl1 |= TVE200_IPDMOD_RGB888 | TVE200_BGR;
+ break;
+ case DRM_FORMAT_BGR565:
+ ctrl1 |= TVE200_IPDMOD_RGB565 | TVE200_BGR;
+ break;
+ case DRM_FORMAT_XBGR1555:
+ ctrl1 |= TVE200_IPDMOD_RGB555 | TVE200_BGR;
+ break;
+ case DRM_FORMAT_YUYV:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_CR0Y1CB0Y0;
+ break;
+ case DRM_FORMAT_YVYU:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_CB0Y1CR0Y0;
+ break;
+ case DRM_FORMAT_UYVY:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_Y1CR0Y0CB0;
+ break;
+ case DRM_FORMAT_VYUY:
+ ctrl1 |= TVE200_IPDMOD_YUV422;
+ ctrl1 |= TVE200_CTRL_YCBCRODR_Y1CB0Y0CR0;
+ break;
+ case DRM_FORMAT_YUV420:
+ ctrl1 |= TVE200_CTRL_YUV420;
+ ctrl1 |= TVE200_IPDMOD_YUV420;
+ break;
+ default:
+ dev_err(drm->dev, "Unknown FB format 0x%08x\n",
+ fb->format->format);
+ break;
+ }
+
+ ctrl1 |= TVE200_TVEEN;
+
+ /* Turn it on */
+ writel(ctrl1, priv->regs + TVE200_CTRL);
+
+ drm_crtc_vblank_on(crtc);
+}
+
+static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ drm_crtc_vblank_off(crtc);
+
+ /* Disable and Power Down */
+ writel(0, priv->regs + TVE200_CTRL);
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static void tve200_display_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_pstate)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *drm = crtc->dev;
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_plane_state *pstate = plane->state;
+ struct drm_framebuffer *fb = pstate->fb;
+
+ if (fb) {
+ /* For RGB, the Y component is used as base address */
+ writel(drm_fb_cma_get_gem_addr(fb, pstate, 0),
+ priv->regs + TVE200_Y_FRAME_BASE_ADDR);
+
+ /* For three plane YUV we need two more addresses */
+ if (fb->format->format == DRM_FORMAT_YUV420) {
+ writel(drm_fb_cma_get_gem_addr(fb, pstate, 1),
+ priv->regs + TVE200_U_FRAME_BASE_ADDR);
+ writel(drm_fb_cma_get_gem_addr(fb, pstate, 2),
+ priv->regs + TVE200_V_FRAME_BASE_ADDR);
+ }
+ }
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+int tve200_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
+ return 0;
+}
+
+void tve200_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ writel(0, priv->regs + TVE200_INT_EN);
+}
+
+static int tve200_display_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
+ .check = tve200_display_check,
+ .enable = tve200_display_enable,
+ .disable = tve200_display_disable,
+ .update = tve200_display_update,
+ .prepare_fb = tve200_display_prepare_fb,
+};
+
+int tve200_display_init(struct drm_device *drm)
+{
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+ int ret;
+ static const u32 formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ /*
+ * The controller actually supports any YCbCr ordering,
+ * for packed YCbCr. This just lists the orderings that
+ * DRM supports.
+ */
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ /* This uses three planes */
+ DRM_FORMAT_YUV420,
+ };
+
+ ret = drm_simple_display_pipe_init(drm, &priv->pipe,
+ &tve200_display_funcs,
+ formats, ARRAY_SIZE(formats),
+ NULL,
+ priv->connector);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tve200/tve200_drm.h b/drivers/gpu/drm/tve200/tve200_drm.h
new file mode 100644
index 000000000000..628b79324c48
--- /dev/null
+++ b/drivers/gpu/drm/tve200/tve200_drm.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on sources as follows:
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Copyright (C) 2007 Amos Lee <amos_lee@storlinksemi.com>
+ * Copyright (C) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2017 Eric Anholt
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms of
+ * such GNU licence.
+ */
+
+#ifndef _TVE200_DRM_H_
+#define _TVE200_DRM_H_
+
+/* Bits 2-31 are valid physical base addresses */
+#define TVE200_Y_FRAME_BASE_ADDR 0x00
+#define TVE200_U_FRAME_BASE_ADDR 0x04
+#define TVE200_V_FRAME_BASE_ADDR 0x08
+
+#define TVE200_INT_EN 0x0C
+#define TVE200_INT_CLR 0x10
+#define TVE200_INT_STAT 0x14
+#define TVE200_INT_BUS_ERR BIT(7)
+#define TVE200_INT_V_STATUS BIT(6) /* vertical blank */
+#define TVE200_INT_V_NEXT_FRAME BIT(5)
+#define TVE200_INT_U_NEXT_FRAME BIT(4)
+#define TVE200_INT_Y_NEXT_FRAME BIT(3)
+#define TVE200_INT_V_FIFO_UNDERRUN BIT(2)
+#define TVE200_INT_U_FIFO_UNDERRUN BIT(1)
+#define TVE200_INT_Y_FIFO_UNDERRUN BIT(0)
+#define TVE200_FIFO_UNDERRUNS (TVE200_INT_V_FIFO_UNDERRUN | \
+ TVE200_INT_U_FIFO_UNDERRUN | \
+ TVE200_INT_Y_FIFO_UNDERRUN)
+
+#define TVE200_CTRL 0x18
+#define TVE200_CTRL_YUV420 BIT(31)
+#define TVE200_CTRL_CSMODE BIT(30)
+#define TVE200_CTRL_NONINTERLACE BIT(28) /* 0 = non-interlace CCIR656 */
+#define TVE200_CTRL_TVCLKP BIT(27) /* Inverted clock phase */
+/* Bits 24..26 define the burst size after arbitration on the bus */
+#define TVE200_CTRL_BURST_4_WORDS (0 << 24)
+#define TVE200_CTRL_BURST_8_WORDS (1 << 24)
+#define TVE200_CTRL_BURST_16_WORDS (2 << 24)
+#define TVE200_CTRL_BURST_32_WORDS (3 << 24)
+#define TVE200_CTRL_BURST_64_WORDS (4 << 24)
+#define TVE200_CTRL_BURST_128_WORDS (5 << 24)
+#define TVE200_CTRL_BURST_256_WORDS (6 << 24)
+#define TVE200_CTRL_BURST_0_WORDS (7 << 24) /* ? */
+/*
+ * Bits 16..23 is the retry count*16 before issueing a new AHB transfer
+ * on the AHB bus.
+ */
+#define TVE200_CTRL_RETRYCNT_MASK GENMASK(23, 16)
+#define TVE200_CTRL_RETRYCNT_16 (1 << 16)
+#define TVE200_CTRL_BBBP BIT(15) /* 0 = little-endian */
+/* Bits 12..14 define the YCbCr ordering */
+#define TVE200_CTRL_YCBCRODR_CB0Y0CR0Y1 (0 << 12)
+#define TVE200_CTRL_YCBCRODR_Y0CB0Y1CR0 (1 << 12)
+#define TVE200_CTRL_YCBCRODR_CR0Y0CB0Y1 (2 << 12)
+#define TVE200_CTRL_YCBCRODR_Y1CB0Y0CR0 (3 << 12)
+#define TVE200_CTRL_YCBCRODR_CR0Y1CB0Y0 (4 << 12)
+#define TVE200_CTRL_YCBCRODR_Y1CR0Y0CB0 (5 << 12)
+#define TVE200_CTRL_YCBCRODR_CB0Y1CR0Y0 (6 << 12)
+#define TVE200_CTRL_YCBCRODR_Y0CR0Y1CB0 (7 << 12)
+/* Bits 10..11 define the input resolution (framebuffer size) */
+#define TVE200_CTRL_IPRESOL_CIF (0 << 10)
+#define TVE200_CTRL_IPRESOL_VGA (1 << 10)
+#define TVE200_CTRL_IPRESOL_D1 (2 << 10)
+#define TVE200_CTRL_NTSC BIT(9) /* 0 = PAL, 1 = NTSC */
+#define TVE200_CTRL_INTERLACE BIT(8) /* 1 = interlace, only for D1 */
+#define TVE200_IPDMOD_RGB555 (0 << 6) /* TVE200_CTRL_YUV420 = 0 */
+#define TVE200_IPDMOD_RGB565 (1 << 6)
+#define TVE200_IPDMOD_RGB888 (2 << 6)
+#define TVE200_IPDMOD_YUV420 (2 << 6) /* TVE200_CTRL_YUV420 = 1 */
+#define TVE200_IPDMOD_YUV422 (3 << 6)
+/* Bits 4 & 5 define when to fire the vblank IRQ */
+#define TVE200_VSTSTYPE_VSYNC (0 << 4) /* start of vsync */
+#define TVE200_VSTSTYPE_VBP (1 << 4) /* start of v back porch */
+#define TVE200_VSTSTYPE_VAI (2 << 4) /* start of v active image */
+#define TVE200_VSTSTYPE_VFP (3 << 4) /* start of v front porch */
+#define TVE200_VSTSTYPE_BITS (BIT(4) | BIT(5))
+#define TVE200_BGR BIT(1) /* 0 = RGB, 1 = BGR */
+#define TVE200_TVEEN BIT(0) /* Enable TVE block */
+
+#define TVE200_CTRL_2 0x1c
+#define TVE200_CTRL_3 0x20
+
+#define TVE200_CTRL_4 0x24
+#define TVE200_CTRL_4_RESET BIT(0) /* triggers reset of TVE200 */
+
+#include <drm/drm_gem.h>
+#include <drm/drm_simple_kms_helper.h>
+
+struct tve200_drm_dev_private {
+ struct drm_device *drm;
+
+ struct drm_connector *connector;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ struct drm_simple_display_pipe pipe;
+ struct drm_fbdev_cma *fbdev;
+
+ void *regs;
+ struct clk *pclk;
+ struct clk *clk;
+};
+
+#define to_tve200_connector(x) \
+ container_of(x, struct tve200_drm_connector, connector)
+
+int tve200_display_init(struct drm_device *dev);
+int tve200_enable_vblank(struct drm_device *drm, unsigned int crtc);
+void tve200_disable_vblank(struct drm_device *drm, unsigned int crtc);
+irqreturn_t tve200_irq(int irq, void *data);
+int tve200_connector_init(struct drm_device *dev);
+int tve200_encoder_init(struct drm_device *dev);
+int tve200_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+#endif /* _TVE200_DRM_H_ */
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
new file mode 100644
index 000000000000..bd6c9454d767
--- /dev/null
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Parts of this file were based on sources as follows:
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Copyright (C) 2007 Amos Lee <amos_lee@storlinksemi.com>
+ * Copyright (C) 2007 Dave Airlie <airlied@linux.ie>
+ * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2017 Eric Anholt
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms of
+ * such GNU licence.
+ */
+
+/**
+ * DOC: Faraday TV Encoder TVE200 DRM Driver
+ *
+ * The Faraday TV Encoder TVE200 is also known as the Gemini TV Interface
+ * Controller (TVC) and is found in the Gemini Chipset from Storlink
+ * Semiconductor (later Storm Semiconductor, later Cortina Systems)
+ * but also in the Grain Media GM8180 chipset. On the Gemini the module
+ * is connected to 8 data lines and a single clock line, comprising an
+ * 8-bit BT.656 interface.
+ *
+ * This is a very basic YUV display driver. The datasheet specifies that
+ * it supports the ITU BT.656 standard. It requires a 27 MHz clock which is
+ * the hallmark of any TV encoder supporting both PAL and NTSC.
+ *
+ * This driver exposes a standard KMS interface for this TV encoder.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-buf.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+#include <drm/drm_bridge.h>
+
+#include "tve200_drm.h"
+
+#define DRIVER_DESC "DRM module for Faraday TVE200"
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tve200_modeset_init(struct drm_device *dev)
+{
+ struct drm_mode_config *mode_config;
+ struct tve200_drm_dev_private *priv = dev->dev_private;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ int ret = 0;
+
+ drm_mode_config_init(dev);
+ mode_config = &dev->mode_config;
+ mode_config->funcs = &mode_config_funcs;
+ mode_config->min_width = 352;
+ mode_config->max_width = 720;
+ mode_config->min_height = 240;
+ mode_config->max_height = 576;
+
+ ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
+ 0, 0, &panel, &bridge);
+ if (ret && ret != -ENODEV)
+ return ret;
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto out_bridge;
+ }
+ } else {
+ /*
+ * TODO: when we are using a different bridge than a panel
+ * (such as a dumb VGA connector) we need to devise a different
+ * method to get the connector out of the bridge.
+ */
+ dev_err(dev->dev, "the bridge is not a panel\n");
+ goto out_bridge;
+ }
+
+ ret = tve200_display_init(dev);
+ if (ret) {
+ dev_err(dev->dev, "failed to init display\n");
+ goto out_bridge;
+ }
+
+ ret = drm_simple_display_pipe_attach_bridge(&priv->pipe,
+ bridge);
+ if (ret) {
+ dev_err(dev->dev, "failed to attach bridge\n");
+ goto out_bridge;
+ }
+
+ priv->panel = panel;
+ priv->connector = panel->connector;
+ priv->bridge = bridge;
+
+ dev_info(dev->dev, "attached to panel %s\n",
+ dev_name(panel->dev));
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret) {
+ dev_err(dev->dev, "failed to init vblank\n");
+ goto out_bridge;
+ }
+
+ drm_mode_config_reset(dev);
+
+ /*
+ * Passing in 16 here will make the RGB656 mode the default
+ * Passing in 32 will use XRGB8888 mode
+ */
+ priv->fbdev = drm_fbdev_cma_init(dev, 16,
+ dev->mode_config.num_connector);
+ drm_kms_helper_poll_init(dev);
+
+ goto finish;
+
+out_bridge:
+ if (panel)
+ drm_panel_bridge_remove(bridge);
+ drm_mode_config_cleanup(dev);
+finish:
+ return ret;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+
+static void tve200_lastclose(struct drm_device *dev)
+{
+ struct tve200_drm_dev_private *priv = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(priv->fbdev);
+}
+
+static struct drm_driver tve200_drm_driver = {
+ .driver_features =
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ .lastclose = tve200_lastclose,
+ .ioctls = NULL,
+ .fops = &drm_fops,
+ .name = "tve200",
+ .desc = DRIVER_DESC,
+ .date = "20170703",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ .enable_vblank = tve200_enable_vblank,
+ .disable_vblank = tve200_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+};
+
+static int tve200_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tve200_drm_dev_private *priv;
+ struct drm_device *drm;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ drm = drm_dev_alloc(&tve200_drm_driver, dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+ platform_set_drvdata(pdev, drm);
+ priv->drm = drm;
+ drm->dev_private = priv;
+
+ /* Clock the silicon so we can access the registers */
+ priv->pclk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(priv->pclk)) {
+ dev_err(dev, "unable to get PCLK\n");
+ ret = PTR_ERR(priv->pclk);
+ goto dev_unref;
+ }
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dev, "failed to enable PCLK\n");
+ goto dev_unref;
+ }
+
+ /* This clock is for the pixels (27MHz) */
+ priv->clk = devm_clk_get(dev, "TVE");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "unable to get TVE clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto clk_disable;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "%s failed mmio\n", __func__);
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ /* turn off interrupts before requesting the irq */
+ writel(0, priv->regs + TVE200_INT_EN);
+
+ ret = devm_request_irq(dev, irq, tve200_irq, 0, "tve200", priv);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", ret);
+ goto clk_disable;
+ }
+
+ ret = tve200_modeset_init(drm);
+ if (ret)
+ goto clk_disable;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto clk_disable;
+
+ return 0;
+
+clk_disable:
+ clk_disable_unprepare(priv->pclk);
+dev_unref:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static int tve200_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+ struct tve200_drm_dev_private *priv = drm->dev_private;
+
+ drm_dev_unregister(drm);
+ if (priv->fbdev)
+ drm_fbdev_cma_fini(priv->fbdev);
+ if (priv->panel)
+ drm_panel_bridge_remove(priv->bridge);
+ drm_mode_config_cleanup(drm);
+ clk_disable_unprepare(priv->pclk);
+ drm_dev_unref(drm);
+
+ return 0;
+}
+
+static const struct of_device_id tve200_of_match[] = {
+ {
+ .compatible = "faraday,tve200",
+ },
+ {},
+};
+
+static struct platform_driver tve200_driver = {
+ .driver = {
+ .name = "tve200",
+ .of_match_table = of_match_ptr(tve200_of_match),
+ },
+ .probe = tve200_probe,
+ .remove = tve200_remove,
+};
+module_platform_driver(tve200_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 9f9a49748d17..c3dc1fd20cb4 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -14,70 +14,95 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
+#include "udl_connector.h"
#include "udl_drv.h"
-/* dummy connector to just get EDID,
- all UDL appear to have a DVI-D */
-
-static u8 *udl_get_edid(struct udl_device *udl)
+static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
+ u8 *buff)
{
- u8 *block;
- char *rbuf;
int ret, i;
+ u8 *read_buff;
- block = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (block == NULL)
- return NULL;
-
- rbuf = kmalloc(2, GFP_KERNEL);
- if (rbuf == NULL)
- goto error;
+ read_buff = kmalloc(2, GFP_KERNEL);
+ if (!read_buff)
+ return false;
for (i = 0; i < EDID_LENGTH; i++) {
+ int bval = (i + block_idx * EDID_LENGTH) << 8;
ret = usb_control_msg(udl->udev,
- usb_rcvctrlpipe(udl->udev, 0), (0x02),
- (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
- HZ);
+ usb_rcvctrlpipe(udl->udev, 0),
+ (0x02), (0x80 | (0x02 << 5)), bval,
+ 0xA1, read_buff, 2, HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
- goto error;
+ kfree(read_buff);
+ return false;
}
- block[i] = rbuf[1];
+ buff[i] = read_buff[1];
}
- kfree(rbuf);
- return block;
-
-error:
- kfree(block);
- kfree(rbuf);
- return NULL;
+ kfree(read_buff);
+ return true;
}
-static int udl_get_modes(struct drm_connector *connector)
+static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
+ int *result_buff_size)
{
- struct udl_device *udl = connector->dev->dev_private;
- struct edid *edid;
- int ret;
-
- edid = (struct edid *)udl_get_edid(udl);
- if (!edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
- return 0;
+ int i, extensions;
+ u8 *block_buff = NULL, *buff_ptr;
+
+ block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (block_buff == NULL)
+ return false;
+
+ if (udl_get_edid_block(udl, 0, block_buff) &&
+ memchr_inv(block_buff, 0, EDID_LENGTH)) {
+ extensions = ((struct edid *)block_buff)->extensions;
+ if (extensions > 0) {
+ /* we have to read all extensions one by one */
+ *result_buff_size = EDID_LENGTH * (extensions + 1);
+ *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
+ buff_ptr = *result_buff;
+ if (buff_ptr == NULL) {
+ kfree(block_buff);
+ return false;
+ }
+ memcpy(buff_ptr, block_buff, EDID_LENGTH);
+ kfree(block_buff);
+ buff_ptr += EDID_LENGTH;
+ for (i = 1; i < extensions; ++i) {
+ if (udl_get_edid_block(udl, i, buff_ptr)) {
+ buff_ptr += EDID_LENGTH;
+ } else {
+ kfree(*result_buff);
+ *result_buff = NULL;
+ return false;
+ }
+ }
+ return true;
+ }
+ /* we have only base edid block */
+ *result_buff = block_buff;
+ *result_buff_size = EDID_LENGTH;
+ return true;
}
- /*
- * We only read the main block, but if the monitor reports extension
- * blocks then the drm edid code expects them to be present, so patch
- * the extension count to 0.
- */
- edid->checksum += edid->extensions;
- edid->extensions = 0;
-
- drm_mode_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- return ret;
+ kfree(block_buff);
+
+ return false;
+}
+
+static int udl_get_modes(struct drm_connector *connector)
+{
+ struct udl_drm_connector *udl_connector =
+ container_of(connector,
+ struct udl_drm_connector,
+ connector);
+
+ drm_mode_connector_update_edid_property(connector, udl_connector->edid);
+ if (udl_connector->edid)
+ return drm_add_edid_modes(connector, udl_connector->edid);
+ return 0;
}
static int udl_mode_valid(struct drm_connector *connector,
@@ -96,8 +121,26 @@ static int udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- if (drm_dev_is_unplugged(connector->dev))
+ u8 *edid_buff = NULL;
+ int edid_buff_size = 0;
+ struct udl_device *udl = connector->dev->dev_private;
+ struct udl_drm_connector *udl_connector =
+ container_of(connector,
+ struct udl_drm_connector,
+ connector);
+
+ /* cleanup previous edid */
+ if (udl_connector->edid != NULL) {
+ kfree(udl_connector->edid);
+ udl_connector->edid = NULL;
+ }
+
+
+ if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
return connector_status_disconnected;
+
+ udl_connector->edid = (struct edid *)edid_buff;
+
return connector_status_connected;
}
@@ -105,7 +148,7 @@ static struct drm_encoder*
udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
}
static int udl_connector_set_property(struct drm_connector *connector,
@@ -117,8 +160,14 @@ static int udl_connector_set_property(struct drm_connector *connector,
static void udl_connector_destroy(struct drm_connector *connector)
{
+ struct udl_drm_connector *udl_connector =
+ container_of(connector,
+ struct udl_drm_connector,
+ connector);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
+ kfree(udl_connector->edid);
kfree(connector);
}
@@ -138,17 +187,22 @@ static const struct drm_connector_funcs udl_connector_funcs = {
int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
{
+ struct udl_drm_connector *udl_connector;
struct drm_connector *connector;
- connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
- if (!connector)
+ udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
+ if (!udl_connector)
return -ENOMEM;
- drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
+ connector = &udl_connector->connector;
+ drm_connector_init(dev, connector, &udl_connector_funcs,
+ DRM_MODE_CONNECTOR_DVII);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
+ connector->polled = DRM_CONNECTOR_POLL_HPD |
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_connector.h b/drivers/gpu/drm/udl/udl_connector.h
new file mode 100644
index 000000000000..0fb0db5c4612
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_connector.h
@@ -0,0 +1,13 @@
+#ifndef __UDL_CONNECTOR_H__
+#define __UDL_CONNECTOR_H__
+
+#include <drm/drm_crtc.h>
+
+struct udl_drm_connector {
+ struct drm_connector connector;
+ /* last udl_detect edid */
+ struct edid *edid;
+};
+
+
+#endif //__UDL_CONNECTOR_H__
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 31421b6b586e..3c45a3064726 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -14,6 +14,9 @@
static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
+ struct drm_device *dev = usb_get_intfdata(interface);
+
+ drm_kms_helper_poll_disable(dev);
return 0;
}
@@ -21,6 +24,7 @@ static int udl_usb_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ drm_kms_helper_poll_enable(dev);
udl_modeset_restore(dev);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0328b2c7b210..f1ec4528a73e 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -11,6 +11,7 @@
* more details.
*/
#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
@@ -350,6 +351,8 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_fb;
+ drm_kms_helper_poll_init(dev);
+
return 0;
err_fb:
udl_fbdev_cleanup(dev);
@@ -371,6 +374,8 @@ void udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
+ drm_kms_helper_poll_fini(dev);
+
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 837c82757339..f5500df51686 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -25,5 +25,3 @@ vc4-y := \
vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
obj-$(CONFIG_DRM_VC4) += vc4.o
-
-CFLAGS_vc4_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 3afdbf4bc10b..98a6cb9f44fc 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -53,6 +53,17 @@ static void vc4_bo_stats_dump(struct vc4_dev *vc4)
vc4->bo_labels[i].size_allocated / 1024,
vc4->bo_labels[i].num_allocated);
}
+
+ mutex_lock(&vc4->purgeable.lock);
+ if (vc4->purgeable.num)
+ DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
+ vc4->purgeable.size / 1024, vc4->purgeable.num);
+
+ if (vc4->purgeable.purged_num)
+ DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
+ vc4->purgeable.purged_size / 1024,
+ vc4->purgeable.purged_num);
+ mutex_unlock(&vc4->purgeable.lock);
}
#ifdef CONFIG_DEBUG_FS
@@ -75,6 +86,17 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
}
mutex_unlock(&vc4->bo_lock);
+ mutex_lock(&vc4->purgeable.lock);
+ if (vc4->purgeable.num)
+ seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
+ vc4->purgeable.size / 1024, vc4->purgeable.num);
+
+ if (vc4->purgeable.purged_num)
+ seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
+ vc4->purgeable.purged_size / 1024,
+ vc4->purgeable.purged_num);
+ mutex_unlock(&vc4->purgeable.lock);
+
return 0;
}
#endif
@@ -247,6 +269,109 @@ static void vc4_bo_cache_purge(struct drm_device *dev)
mutex_unlock(&vc4->bo_lock);
}
+void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ mutex_lock(&vc4->purgeable.lock);
+ list_add_tail(&bo->size_head, &vc4->purgeable.list);
+ vc4->purgeable.num++;
+ vc4->purgeable.size += bo->base.base.size;
+ mutex_unlock(&vc4->purgeable.lock);
+}
+
+static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ /* list_del_init() is used here because the caller might release
+ * the purgeable lock in order to acquire the madv one and update the
+ * madv status.
+ * During this short period of time a user might decide to mark
+ * the BO as unpurgeable, and if bo->madv is set to
+ * VC4_MADV_DONTNEED it will try to remove the BO from the
+ * purgeable list which will fail if the ->next/prev fields
+ * are set to LIST_POISON1/LIST_POISON2 (which is what
+ * list_del() does).
+ * Re-initializing the list element guarantees that list_del()
+ * will work correctly even if it's a NOP.
+ */
+ list_del_init(&bo->size_head);
+ vc4->purgeable.num--;
+ vc4->purgeable.size -= bo->base.base.size;
+}
+
+void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+
+ mutex_lock(&vc4->purgeable.lock);
+ vc4_bo_remove_from_purgeable_pool_locked(bo);
+ mutex_unlock(&vc4->purgeable.lock);
+}
+
+static void vc4_bo_purge(struct drm_gem_object *obj)
+{
+ struct vc4_bo *bo = to_vc4_bo(obj);
+ struct drm_device *dev = obj->dev;
+
+ WARN_ON(!mutex_is_locked(&bo->madv_lock));
+ WARN_ON(bo->madv != VC4_MADV_DONTNEED);
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
+ dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
+ bo->base.vaddr = NULL;
+ bo->madv = __VC4_MADV_PURGED;
+}
+
+static void vc4_bo_userspace_cache_purge(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ mutex_lock(&vc4->purgeable.lock);
+ while (!list_empty(&vc4->purgeable.list)) {
+ struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list,
+ struct vc4_bo, size_head);
+ struct drm_gem_object *obj = &bo->base.base;
+ size_t purged_size = 0;
+
+ vc4_bo_remove_from_purgeable_pool_locked(bo);
+
+ /* Release the purgeable lock while we're purging the BO so
+ * that other people can continue inserting things in the
+ * purgeable pool without having to wait for all BOs to be
+ * purged.
+ */
+ mutex_unlock(&vc4->purgeable.lock);
+ mutex_lock(&bo->madv_lock);
+
+ /* Since we released the purgeable pool lock before acquiring
+ * the BO madv one, the user may have marked the BO as WILLNEED
+ * and re-used it in the meantime.
+ * Before purging the BO we need to make sure
+ * - it is still marked as DONTNEED
+ * - it has not been re-inserted in the purgeable list
+ * - it is not used by HW blocks
+ * If one of these conditions is not met, just skip the entry.
+ */
+ if (bo->madv == VC4_MADV_DONTNEED &&
+ list_empty(&bo->size_head) &&
+ !refcount_read(&bo->usecnt)) {
+ purged_size = bo->base.base.size;
+ vc4_bo_purge(obj);
+ }
+ mutex_unlock(&bo->madv_lock);
+ mutex_lock(&vc4->purgeable.lock);
+
+ if (purged_size) {
+ vc4->purgeable.purged_size += purged_size;
+ vc4->purgeable.purged_num++;
+ }
+ }
+ mutex_unlock(&vc4->purgeable.lock);
+}
+
static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
uint32_t size,
enum vc4_kernel_bo_type type)
@@ -293,6 +418,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
if (!bo)
return ERR_PTR(-ENOMEM);
+ bo->madv = VC4_MADV_WILLNEED;
+ refcount_set(&bo->usecnt, 0);
+ mutex_init(&bo->madv_lock);
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
@@ -330,16 +458,38 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
* CMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
+ cma_obj = drm_gem_cma_create(dev, size);
+ }
+ if (IS_ERR(cma_obj)) {
+ /*
+ * Still not enough CMA memory, purge the userspace BO
+ * cache and retry.
+ * This is sub-optimal since we purge the whole userspace
+ * BO cache which forces user that want to re-use the BO to
+ * restore its initial content.
+ * Ideally, we should purge entries one by one and retry
+ * after each to see if CMA allocation succeeds. Or even
+ * better, try to find an entry with at least the same
+ * size.
+ */
+ vc4_bo_userspace_cache_purge(dev);
cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
- DRM_ERROR("Failed to allocate from CMA:\n");
- vc4_bo_stats_dump(vc4);
- return ERR_PTR(-ENOMEM);
- }
+ }
+
+ if (IS_ERR(cma_obj)) {
+ DRM_ERROR("Failed to allocate from CMA:\n");
+ vc4_bo_stats_dump(vc4);
+ return ERR_PTR(-ENOMEM);
}
bo = to_vc4_bo(&cma_obj->base);
+ /* By default, BOs do not support the MADV ioctl. This will be enabled
+ * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
+ * BOs).
+ */
+ bo->madv = __VC4_MADV_NOTSUPP;
+
mutex_lock(&vc4->bo_lock);
vc4_bo_set_label(&cma_obj->base, type);
mutex_unlock(&vc4->bo_lock);
@@ -365,6 +515,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
if (IS_ERR(bo))
return PTR_ERR(bo);
+ bo->madv = VC4_MADV_WILLNEED;
+
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put_unlocked(&bo->base.base);
@@ -403,6 +555,12 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
struct vc4_bo *bo = to_vc4_bo(gem_bo);
struct list_head *cache_list;
+ /* Remove the BO from the purgeable list. */
+ mutex_lock(&bo->madv_lock);
+ if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
+ vc4_bo_remove_from_purgeable_pool(bo);
+ mutex_unlock(&bo->madv_lock);
+
mutex_lock(&vc4->bo_lock);
/* If the object references someone else's memory, we can't cache it.
*/
@@ -418,7 +576,8 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
}
/* If this object was partially constructed but CMA allocation
- * had failed, just free it.
+ * had failed, just free it. Can also happen when the BO has been
+ * purged.
*/
if (!bo->base.vaddr) {
vc4_bo_destroy(bo);
@@ -437,6 +596,10 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
bo->validated_shader = NULL;
}
+ /* Reset madv and usecnt before adding the BO to the cache. */
+ bo->madv = __VC4_MADV_NOTSUPP;
+ refcount_set(&bo->usecnt, 0);
+
bo->t_format = false;
bo->free_time = jiffies;
list_add(&bo->size_head, cache_list);
@@ -461,6 +624,56 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
mutex_unlock(&vc4->bo_lock);
}
+int vc4_bo_inc_usecnt(struct vc4_bo *bo)
+{
+ int ret;
+
+ /* Fast path: if the BO is already retained by someone, no need to
+ * check the madv status.
+ */
+ if (refcount_inc_not_zero(&bo->usecnt))
+ return 0;
+
+ mutex_lock(&bo->madv_lock);
+ switch (bo->madv) {
+ case VC4_MADV_WILLNEED:
+ refcount_inc(&bo->usecnt);
+ ret = 0;
+ break;
+ case VC4_MADV_DONTNEED:
+ /* We shouldn't use a BO marked as purgeable if at least
+ * someone else retained its content by incrementing usecnt.
+ * Luckily the BO hasn't been purged yet, but something wrong
+ * is happening here. Just throw an error instead of
+ * authorizing this use case.
+ */
+ case __VC4_MADV_PURGED:
+ /* We can't use a purged BO. */
+ default:
+ /* Invalid madv value. */
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&bo->madv_lock);
+
+ return ret;
+}
+
+void vc4_bo_dec_usecnt(struct vc4_bo *bo)
+{
+ /* Fast path: if the BO is still retained by someone, no need to test
+ * the madv value.
+ */
+ if (refcount_dec_not_one(&bo->usecnt))
+ return;
+
+ mutex_lock(&bo->madv_lock);
+ if (refcount_dec_and_test(&bo->usecnt) &&
+ bo->madv == VC4_MADV_DONTNEED)
+ vc4_bo_add_to_purgeable_pool(bo);
+ mutex_unlock(&bo->madv_lock);
+}
+
static void vc4_bo_cache_time_timer(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
@@ -480,18 +693,52 @@ struct dma_buf *
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
{
struct vc4_bo *bo = to_vc4_bo(obj);
+ struct dma_buf *dmabuf;
+ int ret;
if (bo->validated_shader) {
DRM_DEBUG("Attempting to export shader BO\n");
return ERR_PTR(-EINVAL);
}
- return drm_gem_prime_export(dev, obj, flags);
+ /* Note: as soon as the BO is exported it becomes unpurgeable, because
+ * noone ever decrements the usecnt even if the reference held by the
+ * exported BO is released. This shouldn't be a problem since we don't
+ * expect exported BOs to be marked as purgeable.
+ */
+ ret = vc4_bo_inc_usecnt(bo);
+ if (ret) {
+ DRM_ERROR("Failed to increment BO usecnt\n");
+ return ERR_PTR(ret);
+ }
+
+ dmabuf = drm_gem_prime_export(dev, obj, flags);
+ if (IS_ERR(dmabuf))
+ vc4_bo_dec_usecnt(bo);
+
+ return dmabuf;
+}
+
+int vc4_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct vc4_bo *bo = to_vc4_bo(obj);
+
+ /* The only reason we would end up here is when user-space accesses
+ * BO's memory after it's been purged.
+ */
+ mutex_lock(&bo->madv_lock);
+ WARN_ON(bo->madv != __VC4_MADV_PURGED);
+ mutex_unlock(&bo->madv_lock);
+
+ return VM_FAULT_SIGBUS;
}
int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_gem_object *gem_obj;
+ unsigned long vm_pgoff;
struct vc4_bo *bo;
int ret;
@@ -507,16 +754,36 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
+ if (bo->madv != VC4_MADV_WILLNEED) {
+ DRM_DEBUG("mmaping of %s BO not allowed\n",
+ bo->madv == VC4_MADV_DONTNEED ?
+ "purgeable" : "purged");
+ return -EINVAL;
+ }
+
/*
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_pgoff = 0;
+ /* This ->vm_pgoff dance is needed to make all parties happy:
+ * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated
+ * mem-region, hence the need to set it to zero (the value set by
+ * the DRM core is a virtual offset encoding the GEM object-id)
+ * - the mmap() core logic needs ->vm_pgoff to be restored to its
+ * initial value before returning from this function because it
+ * encodes the offset of this GEM in the dev->anon_inode pseudo-file
+ * and this information will be used when we invalidate userspace
+ * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()).
+ */
+ vm_pgoff = vma->vm_pgoff;
+ vma->vm_pgoff = 0;
ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
bo->base.paddr, vma->vm_end - vma->vm_start);
+ vma->vm_pgoff = vm_pgoff;
+
if (ret)
drm_gem_vm_close(vma);
@@ -580,6 +847,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
+ bo->madv = VC4_MADV_WILLNEED;
+
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put_unlocked(&bo->base.base);
@@ -633,6 +902,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
+ bo->madv = VC4_MADV_WILLNEED;
+
if (copy_from_user(bo->base.vaddr,
(void __user *)(uintptr_t)args->data,
args->size)) {
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 519cefef800d..72c9dbd81d7f 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -97,8 +97,6 @@ struct vc4_dpi {
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct drm_bridge *bridge;
- bool is_panel_bridge;
void __iomem *regs;
@@ -251,10 +249,11 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
struct device *dev = &dpi->pdev->dev;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
int ret;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dpi->bridge);
+ &panel, &bridge);
if (ret) {
/* If nothing was connected in the DT, that's not an
* error.
@@ -265,13 +264,10 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
return ret;
}
- if (panel) {
- dpi->bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DPI);
- dpi->is_panel_bridge = true;
- }
+ if (panel)
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
- return drm_bridge_attach(dpi->encoder, dpi->bridge, NULL);
+ return drm_bridge_attach(dpi->encoder, bridge, NULL);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
@@ -352,8 +348,7 @@ static void vc4_dpi_unbind(struct device *dev, struct device *master,
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dpi *dpi = dev_get_drvdata(dev);
- if (dpi->is_panel_bridge)
- drm_panel_bridge_remove(dpi->bridge);
+ drm_of_panel_bridge_remove(dev->of_node, 0, 0);
drm_encoder_cleanup(dpi->encoder);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 1c96edcb302b..e3c29729da2e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -100,6 +100,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_VC4_PARAM_SUPPORTS_ETC1:
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
+ case DRM_VC4_PARAM_SUPPORTS_MADVISE:
args->value = true;
break;
default:
@@ -117,6 +118,12 @@ static void vc4_lastclose(struct drm_device *dev)
drm_fbdev_cma_restore_mode(vc4->fbdev);
}
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
static const struct file_operations vc4_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -142,6 +149,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
@@ -166,7 +174,7 @@ static struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
.gem_free_object_unlocked = vc4_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .gem_vm_ops = &vc4_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 87f2d8e5c134..9c0d380c96f2 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -74,6 +74,19 @@ struct vc4_dev {
/* Protects bo_cache and bo_labels. */
struct mutex bo_lock;
+ /* Purgeable BO pool. All BOs in this pool can have their memory
+ * reclaimed if the driver is unable to allocate new BOs. We also
+ * keep stats related to the purge mechanism here.
+ */
+ struct {
+ struct list_head list;
+ unsigned int num;
+ size_t size;
+ unsigned int purged_num;
+ size_t purged_size;
+ struct mutex lock;
+ } purgeable;
+
uint64_t dma_fence_context;
/* Sequence number for the last job queued in bin_job_list.
@@ -192,6 +205,16 @@ struct vc4_bo {
* for user-allocated labels.
*/
int label;
+
+ /* Count the number of active users. This is needed to determine
+ * whether we can move the BO to the purgeable list or not (when the BO
+ * is used by the GPU or the display engine we can't purge it).
+ */
+ refcount_t usecnt;
+
+ /* Store purgeable/purged state here */
+ u32 madv;
+ struct mutex madv_lock;
};
static inline struct vc4_bo *
@@ -503,6 +526,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
@@ -513,6 +537,10 @@ void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
+int vc4_bo_inc_usecnt(struct vc4_bo *bo);
+void vc4_bo_dec_usecnt(struct vc4_bo *bo);
+void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
+void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
@@ -557,6 +585,8 @@ void vc4_job_handle_completed(struct vc4_dev *vc4);
int vc4_queue_seqno_cb(struct drm_device *dev,
struct vc4_seqno_cb *cb, uint64_t seqno,
void (*func)(struct vc4_seqno_cb *cb));
+int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index d1e0dc908048..94085f8bcd68 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
@@ -504,7 +505,6 @@ struct vc4_dsi {
struct mipi_dsi_host dsi_host;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
- bool is_panel_bridge;
void __iomem *regs;
@@ -859,14 +859,11 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
pll_clock = parent_rate / divider;
pixel_clock_hz = pll_clock / dsi->divider;
- /* Round up the clk_set_rate() request slightly, since
- * PLLD_DSI1 is an integer divider and its rate selection will
- * never round up.
- */
- adjusted_mode->clock = pixel_clock_hz / 1000 + 1;
+ adjusted_mode->clock = pixel_clock_hz / 1000;
/* Given the new pixel clock, adjust HFP to keep vrefresh the same. */
- adjusted_mode->htotal = pixel_clock_hz / (mode->vrefresh * mode->vtotal);
+ adjusted_mode->htotal = adjusted_mode->clock * mode->htotal /
+ mode->clock;
adjusted_mode->hsync_end += adjusted_mode->htotal - mode->htotal;
adjusted_mode->hsync_start += adjusted_mode->htotal - mode->htotal;
@@ -900,7 +897,11 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_dump_regs(dsi);
}
- phy_clock = pixel_clock_hz * dsi->divider;
+ /* Round up the clk_set_rate() request slightly, since
+ * PLLD_DSI1 is an integer divider and its rate selection will
+ * never round up.
+ */
+ phy_clock = (pixel_clock_hz + 1000) * dsi->divider;
ret = clk_set_rate(dsi->pll_phy_clock, phy_clock);
if (ret) {
dev_err(&dsi->pdev->dev,
@@ -1288,7 +1289,6 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
- int ret = 0;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1323,34 +1323,12 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
- dsi->bridge = of_drm_find_bridge(device->dev.of_node);
- if (!dsi->bridge) {
- struct drm_panel *panel =
- of_drm_find_panel(device->dev.of_node);
-
- dsi->bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DSI);
- if (IS_ERR(dsi->bridge)) {
- ret = PTR_ERR(dsi->bridge);
- dsi->bridge = NULL;
- return ret;
- }
- dsi->is_panel_bridge = true;
- }
-
- return drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ return 0;
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
- struct vc4_dsi *dsi = host_to_dsi(host);
-
- if (dsi->is_panel_bridge) {
- drm_panel_bridge_remove(dsi->bridge);
- dsi->bridge = NULL;
- }
-
return 0;
}
@@ -1382,6 +1360,27 @@ static void dsi_handle_error(struct vc4_dsi *dsi,
*ret = IRQ_HANDLED;
}
+/*
+ * Initial handler for port 1 where we need the reg_dma workaround.
+ * The register DMA writes sleep, so we can't do it in the top half.
+ * Instead we use IRQF_ONESHOT so that the IRQ gets disabled in the
+ * parent interrupt contrller until our interrupt thread is done.
+ */
+static irqreturn_t vc4_dsi_irq_defer_to_thread_handler(int irq, void *data)
+{
+ struct vc4_dsi *dsi = data;
+ u32 stat = DSI_PORT_READ(INT_STAT);
+
+ if (!stat)
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Normal IRQ handler for port 0, or the threaded IRQ handler for port
+ * 1 where we need the reg_dma workaround.
+ */
static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)
{
struct vc4_dsi *dsi = data;
@@ -1492,16 +1491,13 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_dsi *dsi;
+ struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
+ struct drm_panel *panel;
const struct of_device_id *match;
dma_cap_mask_t dma_mask;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
-
match = of_match_device(vc4_dsi_dt_match, dev);
if (!match)
return -ENODEV;
@@ -1516,7 +1512,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
vc4_dsi_encoder->dsi = dsi;
dsi->encoder = &vc4_dsi_encoder->base.base;
- dsi->pdev = pdev;
dsi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
@@ -1565,8 +1560,15 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
/* Clear any existing interrupt state. */
DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT));
- ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
- vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
+ if (dsi->reg_dma_mem)
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
+ vc4_dsi_irq_defer_to_thread_handler,
+ vc4_dsi_irq_handler,
+ IRQF_ONESHOT,
+ "vc4 dsi", dsi);
+ else
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_dsi_irq_handler, 0, "vc4 dsi", dsi);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get interrupt: %d\n", ret);
@@ -1597,6 +1599,18 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
+ &panel, &dsi->bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(dsi->bridge))
+ return PTR_ERR(dsi->bridge);
+ }
+
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
if (ret) {
@@ -1615,12 +1629,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
- dsi->dsi_host.ops = &vc4_dsi_host_ops;
- dsi->dsi_host.dev = dev;
-
- mipi_dsi_host_register(&dsi->dsi_host);
-
- dev_set_drvdata(dev, dsi);
+ ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ if (ret) {
+ dev_err(dev, "bridge attach failed: %d\n", ret);
+ return ret;
+ }
pm_runtime_enable(dev);
@@ -1638,8 +1651,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
vc4_dsi_encoder_destroy(dsi->encoder);
- mipi_dsi_host_unregister(&dsi->dsi_host);
-
if (dsi->port == 1)
vc4->dsi1 = NULL;
}
@@ -1651,12 +1662,47 @@ static const struct component_ops vc4_dsi_ops = {
static int vc4_dsi_dev_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &vc4_dsi_ops);
+ struct device *dev = &pdev->dev;
+ struct vc4_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+ dev_set_drvdata(dev, dsi);
+
+ dsi->pdev = pdev;
+
+ /* Note, the initialization sequence for DSI and panels is
+ * tricky. The component bind above won't get past its
+ * -EPROBE_DEFER until the panel/bridge probes. The
+ * panel/bridge will return -EPROBE_DEFER until it has a
+ * mipi_dsi_host to register its device to. So, we register
+ * the host during pdev probe time, so vc4 as a whole can then
+ * -EPROBE_DEFER its component bind process until the panel
+ * successfully attaches.
+ */
+ dsi->dsi_host.ops = &vc4_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ mipi_dsi_host_register(&dsi->dsi_host);
+
+ ret = component_add(&pdev->dev, &vc4_dsi_ops);
+ if (ret) {
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ return ret;
+ }
+
+ return 0;
}
static int vc4_dsi_dev_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct vc4_dsi *dsi = dev_get_drvdata(dev);
+
component_del(&pdev->dev, &vc4_dsi_ops);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index d0c6bfb68c4e..e00ac2f3a264 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -188,11 +188,22 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
+ bo = to_vc4_bo(&exec[i]->bo[j]->base);
+
+ /* Retain BOs just in case they were marked purgeable.
+ * This prevents the BO from being purged before
+ * someone had a chance to dump the hang state.
+ */
+ WARN_ON(!refcount_read(&bo->usecnt));
+ refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
+ /* No need to retain BOs coming from the ->unref_list
+ * because they are naturally unpurgeable.
+ */
drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
@@ -233,6 +244,26 @@ vc4_save_hang_state(struct drm_device *dev)
state->fdbgs = V3D_READ(V3D_FDBGS);
state->errstat = V3D_READ(V3D_ERRSTAT);
+ /* We need to turn purgeable BOs into unpurgeable ones so that
+ * userspace has a chance to dump the hang state before the kernel
+ * decides to purge those BOs.
+ * Note that BO consistency at dump time cannot be guaranteed. For
+ * example, if the owner of these BOs decides to re-use them or mark
+ * them purgeable again there's nothing we can do to prevent it.
+ */
+ for (i = 0; i < kernel_state->user_state.bo_count; i++) {
+ struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]);
+
+ if (bo->madv == __VC4_MADV_NOTSUPP)
+ continue;
+
+ mutex_lock(&bo->madv_lock);
+ if (!WARN_ON(bo->madv == __VC4_MADV_PURGED))
+ bo->madv = VC4_MADV_WILLNEED;
+ refcount_dec(&bo->usecnt);
+ mutex_unlock(&bo->madv_lock);
+ }
+
spin_lock_irqsave(&vc4->job_lock, irqflags);
if (vc4->hang_state) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
@@ -639,9 +670,6 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
* The command validator needs to reference BOs by their index within
* the submitted job's BO list. This does the validation of the job's
* BO list and reference counting for the lifetime of the job.
- *
- * Note that this function doesn't need to unreference the BOs on
- * failure, because that will happen at vc4_complete_exec() time.
*/
static int
vc4_cl_lookup_bos(struct drm_device *dev,
@@ -693,16 +721,47 @@ vc4_cl_lookup_bos(struct drm_device *dev,
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
i, handles[i]);
ret = -EINVAL;
- spin_unlock(&file_priv->table_lock);
- goto fail;
+ break;
}
+
drm_gem_object_get(bo);
exec->bo[i] = (struct drm_gem_cma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
+ if (ret)
+ goto fail_put_bo;
+
+ for (i = 0; i < exec->bo_count; i++) {
+ ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ if (ret)
+ goto fail_dec_usecnt;
+ }
+
+ kvfree(handles);
+ return 0;
+
+fail_dec_usecnt:
+ /* Decrease usecnt on acquired objects.
+ * We cannot rely on vc4_complete_exec() to release resources here,
+ * because vc4_complete_exec() has no information about which BO has
+ * had its ->usecnt incremented.
+ * To make things easier we just free everything explicitly and set
+ * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
+ * step.
+ */
+ for (i-- ; i >= 0; i--)
+ vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
+
+fail_put_bo:
+ /* Release any reference to acquired objects. */
+ for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
+ drm_gem_object_put_unlocked(&exec->bo[i]->base);
+
fail:
kvfree(handles);
+ kvfree(exec->bo);
+ exec->bo = NULL;
return ret;
}
@@ -833,8 +892,12 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
dma_fence_signal(exec->fence);
if (exec->bo) {
- for (i = 0; i < exec->bo_count; i++)
+ for (i = 0; i < exec->bo_count; i++) {
+ struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+
+ vc4_bo_dec_usecnt(bo);
drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ }
kvfree(exec->bo);
}
@@ -1098,6 +1161,9 @@ vc4_gem_init(struct drm_device *dev)
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
mutex_init(&vc4->power_lock);
+
+ INIT_LIST_HEAD(&vc4->purgeable.list);
+ mutex_init(&vc4->purgeable.lock);
}
void
@@ -1121,3 +1187,81 @@ vc4_gem_destroy(struct drm_device *dev)
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
}
+
+int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vc4_gem_madvise *args = data;
+ struct drm_gem_object *gem_obj;
+ struct vc4_bo *bo;
+ int ret;
+
+ switch (args->madv) {
+ case VC4_MADV_DONTNEED:
+ case VC4_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ bo = to_vc4_bo(gem_obj);
+
+ /* Only BOs exposed to userspace can be purged. */
+ if (bo->madv == __VC4_MADV_NOTSUPP) {
+ DRM_DEBUG("madvise not supported on this BO\n");
+ ret = -EINVAL;
+ goto out_put_gem;
+ }
+
+ /* Not sure it's safe to purge imported BOs. Let's just assume it's
+ * not until proven otherwise.
+ */
+ if (gem_obj->import_attach) {
+ DRM_DEBUG("madvise not supported on imported BOs\n");
+ ret = -EINVAL;
+ goto out_put_gem;
+ }
+
+ mutex_lock(&bo->madv_lock);
+
+ if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED &&
+ !refcount_read(&bo->usecnt)) {
+ /* If the BO is about to be marked as purgeable, is not used
+ * and is not already purgeable or purged, add it to the
+ * purgeable list.
+ */
+ vc4_bo_add_to_purgeable_pool(bo);
+ } else if (args->madv == VC4_MADV_WILLNEED &&
+ bo->madv == VC4_MADV_DONTNEED &&
+ !refcount_read(&bo->usecnt)) {
+ /* The BO has not been purged yet, just remove it from
+ * the purgeable list.
+ */
+ vc4_bo_remove_from_purgeable_pool(bo);
+ }
+
+ /* Save the purged state. */
+ args->retained = bo->madv != __VC4_MADV_PURGED;
+
+ /* Update internal madv state only if the bo was not purged. */
+ if (bo->madv != __VC4_MADV_PURGED)
+ bo->madv = args->madv;
+
+ mutex_unlock(&bo->madv_lock);
+
+ ret = 0;
+
+out_put_gem:
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 937da8dd65b8..fa37a1c07cf6 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -309,16 +309,13 @@ static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs =
static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct drm_connector *connector = NULL;
+ struct drm_connector *connector;
struct vc4_hdmi_connector *hdmi_connector;
- int ret = 0;
hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector),
GFP_KERNEL);
- if (!hdmi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!hdmi_connector)
+ return ERR_PTR(-ENOMEM);
connector = &hdmi_connector->base;
hdmi_connector->encoder = encoder;
@@ -336,12 +333,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
drm_mode_connector_attach_encoder(connector, encoder);
return connector;
-
- fail:
- if (connector)
- vc4_hdmi_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 2968b3ebb895..423a23ed8fc2 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -23,6 +23,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
+#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -547,14 +548,24 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
break;
- case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+
+ case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
+ /* For T-tiled, the FB pitch is "how many bytes from
+ * one row to the next, such that pitch * tile_h ==
+ * tile_size * tiles_per_row."
+ */
+ u32 tile_size_shift = 12; /* T tiles are 4kb */
+ u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
+ u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
+
tiling = SCALER_CTL0_TILING_256B_OR_T;
- pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET),
- VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L),
- VC4_SET_FIELD((vc4_state->src_w[0] + 31) >> 5,
- SCALER_PITCH0_TILE_WIDTH_R));
+ pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) |
+ VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) |
+ VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R));
break;
+ }
+
default:
DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
(long long)fb->modifier);
@@ -764,21 +775,40 @@ static int vc4_prepare_fb(struct drm_plane *plane,
{
struct vc4_bo *bo;
struct dma_fence *fence;
+ int ret;
if ((plane->state->fb == state->fb) || !state->fb)
return 0;
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+
+ ret = vc4_bo_inc_usecnt(bo);
+ if (ret)
+ return ret;
+
fence = reservation_object_get_excl_rcu(bo->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
+static void vc4_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct vc4_bo *bo;
+
+ if (plane->state->fb == state->fb || !state->fb)
+ return;
+
+ bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ vc4_bo_dec_usecnt(bo);
+}
+
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
.prepare_fb = vc4_prepare_fb,
+ .cleanup_fb = vc4_cleanup_fb,
};
static void vc4_plane_destroy(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/vc4/vc4_trace.h b/drivers/gpu/drm/vc4/vc4_trace.h
index ad7b1ea720c2..deafb32923e1 100644
--- a/drivers/gpu/drm/vc4/vc4_trace.h
+++ b/drivers/gpu/drm/vc4/vc4_trace.h
@@ -59,5 +59,5 @@ TRACE_EVENT(vc4_wait_for_seqno_end,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/vc4
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 0677bbf4ec7e..fb2609434df7 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -34,6 +34,7 @@
#include <drm/drm_legacy.h>
#include "via_verifier.h"
#include "via_drv.h"
+#include <linux/kernel.h>
typedef enum {
state_command,
@@ -1102,10 +1103,7 @@ setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
void via_init_command_verifier(void)
{
- setup_hazard_table(init_table1, table1,
- sizeof(init_table1) / sizeof(hz_init_t));
- setup_hazard_table(init_table2, table2,
- sizeof(init_table2) / sizeof(hz_init_t));
- setup_hazard_table(init_table3, table3,
- sizeof(init_table3) / sizeof(hz_init_t));
+ setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
+ setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
+ setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index b6d52055a11f..41b0930f7968 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -53,7 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
- drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+ drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(virtio_gpu_fb);
}
@@ -327,7 +327,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 72ad7b103448..92fb27753b9e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -72,7 +72,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
*obj_p = &obj->gem_base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&obj->gem_base);
+ drm_gem_object_put_unlocked(&obj->gem_base);
*handle_p = handle;
return 0;
@@ -130,7 +130,7 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
return -ENOENT;
obj = gem_to_virtio_gpu_obj(gobj);
*offset_p = virtio_gpu_object_mmap_offset(obj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b94bd5440e57..0528edb4a2bf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -86,7 +86,7 @@ static void virtio_gpu_unref_list(struct list_head *head)
bo = buf->bo;
qobj = container_of(bo, struct virtio_gpu_object, tbo);
- drm_gem_object_unreference_unlocked(&qobj->gem_base);
+ drm_gem_object_put_unlocked(&qobj->gem_base);
}
}
@@ -304,7 +304,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
return ret;
}
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
rc->res_handle = res_id; /* similiar to a VM address */
rc->bo_handle = handle;
@@ -341,7 +341,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
ri->size = qobj->gem_base.size;
ri->res_handle = qobj->hw_res_handle;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -389,7 +389,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -439,7 +439,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
out_unres:
virtio_gpu_object_unreserve(qobj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -462,7 +462,7 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
nowait = true;
ret = virtio_gpu_object_wait(qobj, nowait);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 5ec24fd801cd..01be355525e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -286,7 +286,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -ENOENT;
@@ -369,7 +369,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- fb = drm_framebuffer_lookup(dev, arg->fb_id);
+ fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
DRM_ERROR("Invalid framebuffer id.\n");
ret = -ENOENT;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b850562fbdd6..0545740b3724 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1726,7 +1726,7 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
return 0;
}
- crtc = drm_crtc_find(dev, arg->crtc_id);
+ crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
if (!crtc) {
ret = -ENOENT;
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index d1552d3e0652..bc5f6026573d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -360,8 +360,8 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
true);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index ca3afae2db1f..90b5437fd787 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -549,8 +549,8 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
true);
vmw_fence_obj_unreference(&fence);
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index a552e4ea5440..6ac094ee8983 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 45244828fc1f..e8b8266c0cde 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drmP.h>
@@ -40,7 +41,7 @@ static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
}
static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+ .fb_create = drm_gem_fb_create,
.output_poll_changed = zx_drm_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index c0b80244158d..b92016ce09b7 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -12,6 +12,7 @@ host1x-y = \
hw/host1x01.o \
hw/host1x02.o \
hw/host1x04.o \
- hw/host1x05.o
+ hw/host1x05.o \
+ hw/host1x06.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index f9cde03030fd..2e57c9cea696 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -320,6 +320,7 @@ struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.pm = &host1x_device_pm_ops,
+ .force_dma = true,
};
static void __host1x_device_del(struct host1x_device *device)
@@ -403,12 +404,13 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name);
- of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release;
device->dev.of_node = host1x->dev->of_node;
device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev;
+ of_dma_configure(&device->dev, host1x->dev->of_node);
+
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
kfree(device);
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index db9b91d1384c..2fb93c27c1d9 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -128,8 +128,7 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
* host1x_channel_request() - Allocate a channel
* @device: Host1x unit this channel will be used to send commands to
*
- * Allocates a new host1x channel for @device. If there are no free channels,
- * this will sleep until one becomes available. May return NULL if CDMA
+ * Allocates a new host1x channel for @device. May return NULL if CDMA
* initialization fails.
*/
struct host1x_channel *host1x_channel_request(struct device *dev)
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 2aae0e63214c..dc77ec452ffc 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -40,7 +40,19 @@ void host1x_debug_output(struct output *o, const char *fmt, ...)
len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
va_end(args);
- o->fn(o->ctx, o->buf, len);
+ o->fn(o->ctx, o->buf, len, false);
+}
+
+void host1x_debug_cont(struct output *o, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+
+ va_start(args, fmt);
+ len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
+ va_end(args);
+
+ o->fn(o->ctx, o->buf, len, true);
}
static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
diff --git a/drivers/gpu/host1x/debug.h b/drivers/gpu/host1x/debug.h
index 4595b2e0799f..990cce47e737 100644
--- a/drivers/gpu/host1x/debug.h
+++ b/drivers/gpu/host1x/debug.h
@@ -24,22 +24,28 @@
struct host1x;
struct output {
- void (*fn)(void *ctx, const char *str, size_t len);
+ void (*fn)(void *ctx, const char *str, size_t len, bool cont);
void *ctx;
char buf[256];
};
-static inline void write_to_seqfile(void *ctx, const char *str, size_t len)
+static inline void write_to_seqfile(void *ctx, const char *str, size_t len,
+ bool cont)
{
seq_write((struct seq_file *)ctx, str, len);
}
-static inline void write_to_printk(void *ctx, const char *str, size_t len)
+static inline void write_to_printk(void *ctx, const char *str, size_t len,
+ bool cont)
{
- pr_info("%s", str);
+ if (cont)
+ pr_cont("%s", str);
+ else
+ pr_info("%s", str);
}
void __printf(2, 3) host1x_debug_output(struct output *o, const char *fmt, ...);
+void __printf(2, 3) host1x_debug_cont(struct output *o, const char *fmt, ...);
extern unsigned int host1x_debug_trace_cmdbuf;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 7f22c5c37660..bf67c3aeb634 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -39,6 +39,17 @@
#include "hw/host1x02.h"
#include "hw/host1x04.h"
#include "hw/host1x05.h"
+#include "hw/host1x06.h"
+
+void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
+{
+ writel(v, host1x->hv_regs + r);
+}
+
+u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r)
+{
+ return readl(host1x->hv_regs + r);
+}
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -104,7 +115,19 @@ static const struct host1x_info host1x05_info = {
.dma_mask = DMA_BIT_MASK(34),
};
+static const struct host1x_info host1x06_info = {
+ .nb_channels = 63,
+ .nb_pts = 576,
+ .nb_mlocks = 24,
+ .nb_bases = 16,
+ .init = host1x06_init,
+ .sync_offset = 0x0,
+ .dma_mask = DMA_BIT_MASK(34),
+ .has_hypervisor = true,
+};
+
static const struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
@@ -116,20 +139,37 @@ MODULE_DEVICE_TABLE(of, host1x_of_match);
static int host1x_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
struct host1x *host;
- struct resource *regs;
+ struct resource *regs, *hv_regs = NULL;
int syncpt_irq;
int err;
- id = of_match_device(host1x_of_match, &pdev->dev);
- if (!id)
- return -EINVAL;
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_err(&pdev->dev, "failed to get registers\n");
- return -ENXIO;
+ host->info = of_device_get_match_data(&pdev->dev);
+
+ if (host->info->has_hypervisor) {
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vm");
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get vm registers\n");
+ return -ENXIO;
+ }
+
+ hv_regs = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "hypervisor");
+ if (!hv_regs) {
+ dev_err(&pdev->dev,
+ "failed to get hypervisor registers\n");
+ return -ENXIO;
+ }
+ } else {
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "failed to get registers\n");
+ return -ENXIO;
+ }
}
syncpt_irq = platform_get_irq(pdev, 0);
@@ -138,15 +178,10 @@ static int host1x_probe(struct platform_device *pdev)
return syncpt_irq;
}
- host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
- if (!host)
- return -ENOMEM;
-
mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices);
INIT_LIST_HEAD(&host->list);
host->dev = &pdev->dev;
- host->info = id->data;
/* set common host1x device data */
platform_set_drvdata(pdev, host);
@@ -155,6 +190,12 @@ static int host1x_probe(struct platform_device *pdev)
if (IS_ERR(host->regs))
return PTR_ERR(host->regs);
+ if (host->info->has_hypervisor) {
+ host->hv_regs = devm_ioremap_resource(&pdev->dev, hv_regs);
+ if (IS_ERR(host->hv_regs))
+ return PTR_ERR(host->hv_regs);
+ }
+
dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
if (host->info->init) {
@@ -198,8 +239,7 @@ static int host1x_probe(struct platform_device *pdev)
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order,
- geometry->aperture_start >> order,
- geometry->aperture_end >> order);
+ geometry->aperture_start >> order);
host->iova_end = geometry->aperture_end;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index ffdbc15b749b..502769726480 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -79,6 +79,9 @@ struct host1x_syncpt_ops {
u32 (*load)(struct host1x_syncpt *syncpt);
int (*cpu_incr)(struct host1x_syncpt *syncpt);
int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr);
+ void (*assign_to_channel)(struct host1x_syncpt *syncpt,
+ struct host1x_channel *channel);
+ void (*enable_protection)(struct host1x *host);
};
struct host1x_intr_ops {
@@ -100,12 +103,14 @@ struct host1x_info {
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
unsigned int sync_offset; /* offset of syncpoint registers */
u64 dma_mask; /* mask of addressable memory */
+ bool has_hypervisor; /* has hypervisor registers */
};
struct host1x {
const struct host1x_info *info;
void __iomem *regs;
+ void __iomem *hv_regs; /* hypervisor region */
struct host1x_syncpt *syncpt;
struct host1x_syncpt_base *bases;
struct device *dev;
@@ -140,6 +145,8 @@ struct host1x {
struct list_head list;
};
+void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
+u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r);
void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
u32 host1x_sync_readl(struct host1x *host1x, u32 r);
void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v);
@@ -182,6 +189,18 @@ static inline int host1x_hw_syncpt_patch_wait(struct host1x *host,
return host->syncpt_op->patch_wait(sp, patch_addr);
}
+static inline void host1x_hw_syncpt_assign_to_channel(
+ struct host1x *host, struct host1x_syncpt *sp,
+ struct host1x_channel *ch)
+{
+ return host->syncpt_op->assign_to_channel(sp, ch);
+}
+
+static inline void host1x_hw_syncpt_enable_protection(struct host1x *host)
+{
+ return host->syncpt_op->enable_protection(host);
+}
+
static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
void (*syncpt_thresh_work)(struct work_struct *))
{
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 6b231119193e..ce320534cbed 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -172,6 +172,30 @@ static void cdma_stop(struct host1x_cdma *cdma)
mutex_unlock(&cdma->lock);
}
+static void cdma_hw_cmdproc_stop(struct host1x *host, struct host1x_channel *ch,
+ bool stop)
+{
+#if HOST1X_HW >= 6
+ host1x_ch_writel(ch, stop ? 0x1 : 0x0, HOST1X_CHANNEL_CMDPROC_STOP);
+#else
+ u32 cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
+ if (stop)
+ cmdproc_stop |= BIT(ch->id);
+ else
+ cmdproc_stop &= ~BIT(ch->id);
+ host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+#endif
+}
+
+static void cdma_hw_teardown(struct host1x *host, struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ host1x_ch_writel(ch, 0x1, HOST1X_CHANNEL_TEARDOWN);
+#else
+ host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
+#endif
+}
+
/*
* Stops both channel's command processor and CDMA immediately.
* Also, tears down the channel and resets corresponding module.
@@ -180,7 +204,6 @@ static void cdma_freeze(struct host1x_cdma *cdma)
{
struct host1x *host = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
- u32 cmdproc_stop;
if (cdma->torndown && !cdma->running) {
dev_warn(host->dev, "Already torn down\n");
@@ -189,9 +212,7 @@ static void cdma_freeze(struct host1x_cdma *cdma)
dev_dbg(host->dev, "freezing channel (id %d)\n", ch->id);
- cmdproc_stop = host1x_sync_readl(host, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop |= BIT(ch->id);
- host1x_sync_writel(host, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+ cdma_hw_cmdproc_stop(host, ch, true);
dev_dbg(host->dev, "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
__func__, host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET),
@@ -201,7 +222,7 @@ static void cdma_freeze(struct host1x_cdma *cdma)
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
- host1x_sync_writel(host, BIT(ch->id), HOST1X_SYNC_CH_TEARDOWN);
+ cdma_hw_teardown(host, ch);
cdma->running = false;
cdma->torndown = true;
@@ -211,15 +232,12 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct host1x_channel *ch = cdma_to_channel(cdma);
- u32 cmdproc_stop;
dev_dbg(host1x->dev,
"resuming channel (id %u, DMAGET restart = 0x%x)\n",
ch->id, getptr);
- cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop &= ~BIT(ch->id);
- host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
+ cdma_hw_cmdproc_stop(host1x, ch, false);
cdma->torndown = false;
cdma_timeout_restart(cdma, getptr);
@@ -232,7 +250,7 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
*/
static void cdma_timeout_handler(struct work_struct *work)
{
- u32 prev_cmdproc, cmdproc_stop, syncpt_val;
+ u32 syncpt_val;
struct host1x_cdma *cdma;
struct host1x *host1x;
struct host1x_channel *ch;
@@ -254,12 +272,7 @@ static void cdma_timeout_handler(struct work_struct *work)
}
/* stop processing to get a clean snapshot */
- prev_cmdproc = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop = prev_cmdproc | BIT(ch->id);
- host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
-
- dev_dbg(host1x->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
- prev_cmdproc, cmdproc_stop);
+ cdma_hw_cmdproc_stop(host1x, ch, true);
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
@@ -268,9 +281,7 @@ static void cdma_timeout_handler(struct work_struct *work)
dev_dbg(host1x->dev,
"cdma_timeout: expired, but buffer had completed\n");
/* restore */
- cmdproc_stop = prev_cmdproc & ~(BIT(ch->id));
- host1x_sync_writel(host1x, cmdproc_stop,
- HOST1X_SYNC_CMDPROC_STOP);
+ cdma_hw_cmdproc_stop(host1x, ch, false);
mutex_unlock(&cdma->lock);
return;
}
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 8447a56c41ca..9af758785a11 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -147,6 +147,8 @@ static int channel_submit(struct host1x_job *job)
syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
+ host1x_hw_syncpt_assign_to_channel(host, sp, ch);
+
job->syncpt_end = syncval;
/* add a setclass for modules that require it */
@@ -178,10 +180,32 @@ error:
return err;
}
+static void enable_gather_filter(struct host1x *host,
+ struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ u32 val;
+
+ if (!host->hv_regs)
+ return;
+
+ val = host1x_hypervisor_readl(
+ host, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+ val |= BIT(ch->id % 32);
+ host1x_hypervisor_writel(
+ host, val, HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(ch->id / 32));
+#elif HOST1X_HW >= 4
+ host1x_ch_writel(ch,
+ HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(1),
+ HOST1X_CHANNEL_CHANNELCTRL);
+#endif
+}
+
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+ enable_gather_filter(dev, ch);
return 0;
}
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index 7a4a3286e4a7..989476801f9d 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -30,6 +30,13 @@ enum {
HOST1X_OPCODE_IMM = 0x04,
HOST1X_OPCODE_RESTART = 0x05,
HOST1X_OPCODE_GATHER = 0x06,
+ HOST1X_OPCODE_SETSTRMID = 0x07,
+ HOST1X_OPCODE_SETAPPID = 0x08,
+ HOST1X_OPCODE_SETPYLD = 0x09,
+ HOST1X_OPCODE_INCR_W = 0x0a,
+ HOST1X_OPCODE_NONINCR_W = 0x0b,
+ HOST1X_OPCODE_GATHER_W = 0x0c,
+ HOST1X_OPCODE_RESTART_W = 0x0d,
HOST1X_OPCODE_EXTEND = 0x0e,
};
@@ -38,67 +45,122 @@ enum {
HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01,
};
-static unsigned int show_channel_command(struct output *o, u32 val)
+#define INVALID_PAYLOAD 0xffffffff
+
+static unsigned int show_channel_command(struct output *o, u32 val,
+ u32 *payload)
{
- unsigned int mask, subop;
+ unsigned int mask, subop, num, opcode;
+
+ opcode = val >> 28;
- switch (val >> 28) {
+ switch (opcode) {
case HOST1X_OPCODE_SETCLASS:
mask = val & 0x3f;
if (mask) {
- host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
+ host1x_debug_cont(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
val >> 6 & 0x3ff,
val >> 16 & 0xfff, mask);
return hweight8(mask);
}
- host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ host1x_debug_cont(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
return 0;
case HOST1X_OPCODE_INCR:
- host1x_debug_output(o, "INCR(offset=%03x, [",
+ num = val & 0xffff;
+ host1x_debug_cont(o, "INCR(offset=%03x, [",
val >> 16 & 0xfff);
- return val & 0xffff;
+ if (!num)
+ host1x_debug_cont(o, "])\n");
+
+ return num;
case HOST1X_OPCODE_NONINCR:
- host1x_debug_output(o, "NONINCR(offset=%03x, [",
+ num = val & 0xffff;
+ host1x_debug_cont(o, "NONINCR(offset=%03x, [",
val >> 16 & 0xfff);
- return val & 0xffff;
+ if (!num)
+ host1x_debug_cont(o, "])\n");
+
+ return num;
case HOST1X_OPCODE_MASK:
mask = val & 0xffff;
- host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
+ host1x_debug_cont(o, "MASK(offset=%03x, mask=%03x, [",
val >> 16 & 0xfff, mask);
+ if (!mask)
+ host1x_debug_cont(o, "])\n");
+
return hweight16(mask);
case HOST1X_OPCODE_IMM:
- host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
+ host1x_debug_cont(o, "IMM(offset=%03x, data=%03x)\n",
val >> 16 & 0xfff, val & 0xffff);
return 0;
case HOST1X_OPCODE_RESTART:
- host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
+ host1x_debug_cont(o, "RESTART(offset=%08x)\n", val << 4);
return 0;
case HOST1X_OPCODE_GATHER:
- host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
+ host1x_debug_cont(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
val >> 16 & 0xfff, val >> 15 & 0x1,
val >> 14 & 0x1, val & 0x3fff);
return 1;
+#if HOST1X_HW >= 6
+ case HOST1X_OPCODE_SETSTRMID:
+ host1x_debug_cont(o, "SETSTRMID(offset=%06x)\n",
+ val & 0x3fffff);
+ return 0;
+
+ case HOST1X_OPCODE_SETAPPID:
+ host1x_debug_cont(o, "SETAPPID(appid=%02x)\n", val & 0xff);
+ return 0;
+
+ case HOST1X_OPCODE_SETPYLD:
+ *payload = val & 0xffff;
+ host1x_debug_cont(o, "SETPYLD(data=%04x)\n", *payload);
+ return 0;
+
+ case HOST1X_OPCODE_INCR_W:
+ case HOST1X_OPCODE_NONINCR_W:
+ host1x_debug_cont(o, "%s(offset=%06x, ",
+ opcode == HOST1X_OPCODE_INCR_W ?
+ "INCR_W" : "NONINCR_W",
+ val & 0x3fffff);
+ if (*payload == 0) {
+ host1x_debug_cont(o, "[])\n");
+ return 0;
+ } else if (*payload == INVALID_PAYLOAD) {
+ host1x_debug_cont(o, "unknown)\n");
+ return 0;
+ } else {
+ host1x_debug_cont(o, "[");
+ return *payload;
+ }
+
+ case HOST1X_OPCODE_GATHER_W:
+ host1x_debug_cont(o, "GATHER_W(count=%04x, addr=[",
+ val & 0x3fff);
+ return 2;
+#endif
+
case HOST1X_OPCODE_EXTEND:
subop = val >> 24 & 0xf;
if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
- host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
+ host1x_debug_cont(o, "ACQUIRE_MLOCK(index=%d)\n",
val & 0xff);
else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
- host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
+ host1x_debug_cont(o, "RELEASE_MLOCK(index=%d)\n",
val & 0xff);
else
- host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
+ host1x_debug_cont(o, "EXTEND_UNKNOWN(%08x)\n", val);
return 0;
default:
+ host1x_debug_cont(o, "UNKNOWN\n");
return 0;
}
}
@@ -110,6 +172,7 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
/* Map dmaget cursor to corresponding mem handle */
u32 offset = phys_addr - pin_addr;
unsigned int data_count = 0, i;
+ u32 payload = INVALID_PAYLOAD;
/*
* Sometimes we're given different hardware address to the same
@@ -126,11 +189,11 @@ static void show_gather(struct output *o, phys_addr_t phys_addr,
u32 val = *(map_addr + offset / 4 + i);
if (!data_count) {
- host1x_debug_output(o, "%08x: %08x:", addr, val);
- data_count = show_channel_command(o, val);
+ host1x_debug_output(o, "%08x: %08x: ", addr, val);
+ data_count = show_channel_command(o, val, &payload);
} else {
- host1x_debug_output(o, "%08x%s", val,
- data_count > 0 ? ", " : "])\n");
+ host1x_debug_cont(o, "%08x%s", val,
+ data_count > 1 ? ", " : "])\n");
data_count--;
}
}
@@ -174,138 +237,11 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
}
}
-static void host1x_debug_show_channel_cdma(struct host1x *host,
- struct host1x_channel *ch,
- struct output *o)
-{
- struct host1x_cdma *cdma = &ch->cdma;
- u32 dmaput, dmaget, dmactrl;
- u32 cbstat, cbread;
- u32 val, base, baseval;
-
- dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
- dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
- dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
- cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
- cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
-
- host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
-
- if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
- !ch->cdma.push_buffer.mapped) {
- host1x_debug_output(o, "inactive\n\n");
- return;
- }
-
- if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT)
- host1x_debug_output(o, "waiting on syncpt %d val %d\n",
- cbread >> 24, cbread & 0xffffff);
- else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
- HOST1X_CLASS_HOST1X &&
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
- base = (cbread >> 16) & 0xff;
- baseval =
- host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
- val = cbread & 0xffff;
- host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
- cbread >> 24, baseval + val, base,
- baseval, val);
- } else
- host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
- HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
- cbread);
-
- host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
- dmaput, dmaget, dmactrl);
- host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
-
- show_channel_gathers(o, cdma);
- host1x_debug_output(o, "\n");
-}
-
-static void host1x_debug_show_channel_fifo(struct host1x *host,
- struct host1x_channel *ch,
- struct output *o)
-{
- u32 val, rd_ptr, wr_ptr, start, end;
- unsigned int data_count = 0;
-
- host1x_debug_output(o, "%u: fifo:\n", ch->id);
-
- val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
- host1x_debug_output(o, "FIFOSTAT %08x\n", val);
- if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
- host1x_debug_output(o, "[empty]\n");
- return;
- }
-
- host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
- host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
- HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
- HOST1X_SYNC_CFPEEK_CTRL);
-
- val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
- rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
- wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
-
- val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
- start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
- end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
-
- do {
- host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
- host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
- HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
- HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
- HOST1X_SYNC_CFPEEK_CTRL);
- val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
-
- if (!data_count) {
- host1x_debug_output(o, "%08x:", val);
- data_count = show_channel_command(o, val);
- } else {
- host1x_debug_output(o, "%08x%s", val,
- data_count > 0 ? ", " : "])\n");
- data_count--;
- }
-
- if (rd_ptr == end)
- rd_ptr = start;
- else
- rd_ptr++;
- } while (rd_ptr != wr_ptr);
-
- if (data_count)
- host1x_debug_output(o, ", ...])\n");
- host1x_debug_output(o, "\n");
-
- host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
-}
-
-static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
-{
- unsigned int i;
-
- host1x_debug_output(o, "---- mlocks ----\n");
-
- for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
- u32 owner =
- host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
- if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
- host1x_debug_output(o, "%u: locked by channel %u\n",
- i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
- else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
- host1x_debug_output(o, "%u: locked by cpu\n", i);
- else
- host1x_debug_output(o, "%u: unlocked\n", i);
- }
-
- host1x_debug_output(o, "\n");
-}
+#if HOST1X_HW >= 6
+#include "debug_hw_1x06.c"
+#else
+#include "debug_hw_1x01.c"
+#endif
static const struct host1x_debug_ops host1x_debug_ops = {
.show_channel_cdma = host1x_debug_show_channel_cdma,
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x01.c b/drivers/gpu/host1x/hw/debug_hw_1x01.c
new file mode 100644
index 000000000000..8790d5fd5f20
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw_1x01.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2013 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+
+static void host1x_debug_show_channel_cdma(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ struct host1x_cdma *cdma = &ch->cdma;
+ u32 dmaput, dmaget, dmactrl;
+ u32 cbstat, cbread;
+ u32 val, base, baseval;
+
+ dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
+ dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
+ dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
+ cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
+ cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
+
+ host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
+
+ if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
+ !ch->cdma.push_buffer.mapped) {
+ host1x_debug_output(o, "inactive\n\n");
+ return;
+ }
+
+ if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT)
+ host1x_debug_output(o, "waiting on syncpt %d val %d\n",
+ cbread >> 24, cbread & 0xffffff);
+ else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
+ HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
+ base = (cbread >> 16) & 0xff;
+ baseval =
+ host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
+ val = cbread & 0xffff;
+ host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
+ cbread >> 24, baseval + val, base,
+ baseval, val);
+ } else
+ host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
+ HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
+ cbread);
+
+ host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ dmaput, dmaget, dmactrl);
+ host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
+
+ show_channel_gathers(o, cdma);
+ host1x_debug_output(o, "\n");
+}
+
+static void host1x_debug_show_channel_fifo(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ u32 val, rd_ptr, wr_ptr, start, end;
+ unsigned int data_count = 0;
+
+ host1x_debug_output(o, "%u: fifo:\n", ch->id);
+
+ val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
+ host1x_debug_output(o, "FIFOSTAT %08x\n", val);
+ if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
+ host1x_debug_output(o, "[empty]\n");
+ return;
+ }
+
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+ host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+ HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
+ HOST1X_SYNC_CFPEEK_CTRL);
+
+ val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
+ rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
+ wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
+
+ val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
+ start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
+ end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
+
+ do {
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+ host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
+ HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
+ HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
+ HOST1X_SYNC_CFPEEK_CTRL);
+ val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
+
+ if (!data_count) {
+ host1x_debug_output(o, "%08x: ", val);
+ data_count = show_channel_command(o, val, NULL);
+ } else {
+ host1x_debug_cont(o, "%08x%s", val,
+ data_count > 1 ? ", " : "])\n");
+ data_count--;
+ }
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+ } while (rd_ptr != wr_ptr);
+
+ if (data_count)
+ host1x_debug_cont(o, ", ...])\n");
+ host1x_debug_output(o, "\n");
+
+ host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
+}
+
+static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
+{
+ unsigned int i;
+
+ host1x_debug_output(o, "---- mlocks ----\n");
+
+ for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
+ u32 owner =
+ host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
+ if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
+ host1x_debug_output(o, "%u: locked by channel %u\n",
+ i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
+ else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
+ host1x_debug_output(o, "%u: locked by cpu\n", i);
+ else
+ host1x_debug_output(o, "%u: unlocked\n", i);
+ }
+
+ host1x_debug_output(o, "\n");
+}
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x06.c b/drivers/gpu/host1x/hw/debug_hw_1x06.c
new file mode 100644
index 000000000000..b503c740c022
--- /dev/null
+++ b/drivers/gpu/host1x/hw/debug_hw_1x06.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@android.com>
+ *
+ * Copyright (C) 2011-2017 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+
+static void host1x_debug_show_channel_cdma(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ struct host1x_cdma *cdma = &ch->cdma;
+ u32 dmaput, dmaget, dmactrl;
+ u32 offset, class;
+ u32 ch_stat;
+
+ dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
+ dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
+ dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
+ offset = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_OFFSET);
+ class = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDP_CLASS);
+ ch_stat = host1x_ch_readl(ch, HOST1X_CHANNEL_CHANNELSTAT);
+
+ host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
+
+ if (dmactrl & HOST1X_CHANNEL_DMACTRL_DMASTOP ||
+ !ch->cdma.push_buffer.mapped) {
+ host1x_debug_output(o, "inactive\n\n");
+ return;
+ }
+
+ if (class == HOST1X_CLASS_HOST1X && offset == HOST1X_UCLASS_WAIT_SYNCPT)
+ host1x_debug_output(o, "waiting on syncpt\n");
+ else
+ host1x_debug_output(o, "active class %02x, offset %04x\n",
+ class, offset);
+
+ host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
+ dmaput, dmaget, dmactrl);
+ host1x_debug_output(o, "CHANNELSTAT %02x\n", ch_stat);
+
+ show_channel_gathers(o, cdma);
+ host1x_debug_output(o, "\n");
+}
+
+static void host1x_debug_show_channel_fifo(struct host1x *host,
+ struct host1x_channel *ch,
+ struct output *o)
+{
+ u32 val, rd_ptr, wr_ptr, start, end;
+ u32 payload = INVALID_PAYLOAD;
+ unsigned int data_count = 0;
+
+ host1x_debug_output(o, "%u: fifo:\n", ch->id);
+
+ val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_STAT);
+ host1x_debug_output(o, "CMDFIFO_STAT %08x\n", val);
+ if (val & HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY) {
+ host1x_debug_output(o, "[empty]\n");
+ return;
+ }
+
+ val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA);
+ host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val);
+
+ /* Peek pointer values are invalid during SLCG, so disable it */
+ host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE);
+
+ val = 0;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id);
+ host1x_hypervisor_writel(host, val, HOST1X_HV_CMDFIFO_PEEK_CTRL);
+
+ val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_PEEK_PTRS);
+ rd_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(val);
+ wr_ptr = HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(val);
+
+ val = host1x_hypervisor_readl(host, HOST1X_HV_CMDFIFO_SETUP(ch->id));
+ start = HOST1X_HV_CMDFIFO_SETUP_BASE_V(val);
+ end = HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(val);
+
+ do {
+ val = 0;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE;
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(ch->id);
+ val |= HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(rd_ptr);
+ host1x_hypervisor_writel(host, val,
+ HOST1X_HV_CMDFIFO_PEEK_CTRL);
+
+ val = host1x_hypervisor_readl(host,
+ HOST1X_HV_CMDFIFO_PEEK_READ);
+
+ if (!data_count) {
+ host1x_debug_output(o, "%03x 0x%08x: ",
+ rd_ptr - start, val);
+ data_count = show_channel_command(o, val, &payload);
+ } else {
+ host1x_debug_cont(o, "%08x%s", val,
+ data_count > 1 ? ", " : "])\n");
+ data_count--;
+ }
+
+ if (rd_ptr == end)
+ rd_ptr = start;
+ else
+ rd_ptr++;
+ } while (rd_ptr != wr_ptr);
+
+ if (data_count)
+ host1x_debug_cont(o, ", ...])\n");
+ host1x_debug_output(o, "\n");
+
+ host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL);
+ host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE);
+}
+
+static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
+{
+ /* TODO */
+}
diff --git a/drivers/gpu/host1x/hw/host1x01.c b/drivers/gpu/host1x/hw/host1x01.c
index 859b73beb4d0..bb124f8b4af8 100644
--- a/drivers/gpu/host1x/hw/host1x01.c
+++ b/drivers/gpu/host1x/hw/host1x01.c
@@ -21,6 +21,8 @@
#include "host1x01_hardware.h"
/* include code */
+#define HOST1X_HW 1
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
index 928946c2144b..c5f85dbedb98 100644
--- a/drivers/gpu/host1x/hw/host1x02.c
+++ b/drivers/gpu/host1x/hw/host1x02.c
@@ -21,6 +21,8 @@
#include "host1x02_hardware.h"
/* include code */
+#define HOST1X_HW 2
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x04.c b/drivers/gpu/host1x/hw/host1x04.c
index 8007c70fa9c4..f102a1a7743f 100644
--- a/drivers/gpu/host1x/hw/host1x04.c
+++ b/drivers/gpu/host1x/hw/host1x04.c
@@ -21,6 +21,8 @@
#include "host1x04_hardware.h"
/* include code */
+#define HOST1X_HW 4
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x05.c b/drivers/gpu/host1x/hw/host1x05.c
index 047097ce3bad..2b1239d6ec67 100644
--- a/drivers/gpu/host1x/hw/host1x05.c
+++ b/drivers/gpu/host1x/hw/host1x05.c
@@ -21,6 +21,8 @@
#include "host1x05_hardware.h"
/* include code */
+#define HOST1X_HW 5
+
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
diff --git a/drivers/gpu/host1x/hw/host1x06.c b/drivers/gpu/host1x/hw/host1x06.c
new file mode 100644
index 000000000000..a66230827c59
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x06.c
@@ -0,0 +1,44 @@
+/*
+ * Host1x init for Tegra186 SoCs
+ *
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x06.h"
+#include "host1x06_hardware.h"
+
+/* include code */
+#define HOST1X_HW 6
+
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x06_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x06.h b/drivers/gpu/host1x/hw/host1x06.h
new file mode 100644
index 000000000000..d9abe1489241
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x06.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra186 SoCs
+ *
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X06_H
+#define HOST1X_HOST1X06_H
+
+struct host1x;
+
+int host1x06_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x06_hardware.h b/drivers/gpu/host1x/hw/host1x06_hardware.h
new file mode 100644
index 000000000000..3039c92ea605
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x06_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra186
+ *
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X06_HARDWARE_H
+#define __HOST1X_HOST1X06_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x06_uclass.h"
+#include "hw_host1x06_vm.h"
+#include "hw_host1x06_hypervisor.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x04_channel.h b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
index 95e6f96142b9..2e8b635aa660 100644
--- a/drivers/gpu/host1x/hw/hw_host1x04_channel.h
+++ b/drivers/gpu/host1x/hw/hw_host1x04_channel.h
@@ -117,5 +117,17 @@ static inline u32 host1x_channel_dmactrl_dmainitget(void)
}
#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
host1x_channel_dmactrl_dmainitget()
+static inline u32 host1x_channel_channelctrl_r(void)
+{
+ return 0x98;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL \
+ host1x_channel_channelctrl_r()
+static inline u32 host1x_channel_channelctrl_kernel_filter_gbuffer_f(u32 v)
+{
+ return (v & 0x1) << 2;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(v) \
+ host1x_channel_channelctrl_kernel_filter_gbuffer_f(v)
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x05_channel.h b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
index fce6e2c1ff4c..abbbc2641ce6 100644
--- a/drivers/gpu/host1x/hw/hw_host1x05_channel.h
+++ b/drivers/gpu/host1x/hw/hw_host1x05_channel.h
@@ -117,5 +117,17 @@ static inline u32 host1x_channel_dmactrl_dmainitget(void)
}
#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
host1x_channel_dmactrl_dmainitget()
+static inline u32 host1x_channel_channelctrl_r(void)
+{
+ return 0x98;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL \
+ host1x_channel_channelctrl_r()
+static inline u32 host1x_channel_channelctrl_kernel_filter_gbuffer_f(u32 v)
+{
+ return (v & 0x1) << 2;
+}
+#define HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER(v) \
+ host1x_channel_channelctrl_kernel_filter_gbuffer_f(v)
#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h b/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h
new file mode 100644
index 000000000000..c05dab8a178b
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x06_hypervisor.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_HV_SYNCPT_PROT_EN 0x1ac4
+#define HOST1X_HV_SYNCPT_PROT_EN_CH_EN BIT(1)
+#define HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(x) (0x2020 + (x * 4))
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL 0x233c
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(x) (x)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(x) ((x) << 16)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE BIT(31)
+#define HOST1X_HV_CMDFIFO_PEEK_READ 0x2340
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS 0x2344
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(x) ((x) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP(x) (0x2588 + (x * 4))
+#define HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP_BASE_V(x) ((x) & 0xfff)
+#define HOST1X_HV_ICG_EN_OVERRIDE 0x2aa8
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
new file mode 100644
index 000000000000..4457486c72b0
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X06_UCLASS_H
+#define HOST1X_HW_HOST1X06_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_vm.h b/drivers/gpu/host1x/hw/hw_host1x06_vm.h
new file mode 100644
index 000000000000..e54b33902332
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x06_vm.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_CHANNEL_DMASTART 0x0000
+#define HOST1X_CHANNEL_DMASTART_HI 0x0004
+#define HOST1X_CHANNEL_DMAPUT 0x0008
+#define HOST1X_CHANNEL_DMAPUT_HI 0x000c
+#define HOST1X_CHANNEL_DMAGET 0x0010
+#define HOST1X_CHANNEL_DMAGET_HI 0x0014
+#define HOST1X_CHANNEL_DMAEND 0x0018
+#define HOST1X_CHANNEL_DMAEND_HI 0x001c
+#define HOST1X_CHANNEL_DMACTRL 0x0020
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP BIT(0)
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST BIT(1)
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET BIT(2)
+#define HOST1X_CHANNEL_CMDFIFO_STAT 0x0024
+#define HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY BIT(13)
+#define HOST1X_CHANNEL_CMDFIFO_RDATA 0x0028
+#define HOST1X_CHANNEL_CMDP_OFFSET 0x0030
+#define HOST1X_CHANNEL_CMDP_CLASS 0x0034
+#define HOST1X_CHANNEL_CHANNELSTAT 0x0038
+#define HOST1X_CHANNEL_CMDPROC_STOP 0x0048
+#define HOST1X_CHANNEL_TEARDOWN 0x004c
+
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(x) (0x6400 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(x) (0x6464 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4*(x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_BASE(x) (0x8000 + 4*(x))
+#define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8a00 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_CH_APP(x) (0x9384 + 4*(x))
+#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 37ebb51703fa..329239237090 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -72,6 +72,23 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
}
}
+static void intr_hw_init(struct host1x *host, u32 cpm)
+{
+#if HOST1X_HW < 6
+ /* disable the ip_busy_timeout. this prevents write drops */
+ host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
+
+ /*
+ * increase the auto-ack timout to the maximum value. 2d will hang
+ * otherwise on Tegra2.
+ */
+ host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
+
+ /* update host clocks per usec */
+ host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
+#endif
+}
+
static int
_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
void (*syncpt_thresh_work)(struct work_struct *))
@@ -92,17 +109,7 @@ _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
return err;
}
- /* disable the ip_busy_timeout. this prevents write drops */
- host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
-
- /*
- * increase the auto-ack timout to the maximum value. 2d will hang
- * otherwise on Tegra2.
- */
- host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
-
- /* update host clocks per usec */
- host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
+ intr_hw_init(host, cpm);
return 0;
}
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 7b0270d60742..7dfd47d74f89 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -106,6 +106,50 @@ static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
return 0;
}
+/**
+ * syncpt_assign_to_channel() - Assign syncpoint to channel
+ * @sp: syncpoint
+ * @ch: channel
+ *
+ * On chips with the syncpoint protection feature (Tegra186+), assign @sp to
+ * @ch, preventing other channels from incrementing the syncpoints. If @ch is
+ * NULL, unassigns the syncpoint.
+ *
+ * On older chips, do nothing.
+ */
+static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
+ struct host1x_channel *ch)
+{
+#if HOST1X_HW >= 6
+ struct host1x *host = sp->host;
+
+ if (!host->hv_regs)
+ return;
+
+ host1x_sync_writel(host,
+ HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
+ HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
+#endif
+}
+
+/**
+ * syncpt_enable_protection() - Enable syncpoint protection
+ * @host: host1x instance
+ *
+ * On chips with the syncpoint protection feature (Tegra186+), enable this
+ * feature. On older chips, do nothing.
+ */
+static void syncpt_enable_protection(struct host1x *host)
+{
+#if HOST1X_HW >= 6
+ if (!host->hv_regs)
+ return;
+
+ host1x_hypervisor_writel(host, HOST1X_HV_SYNCPT_PROT_EN_CH_EN,
+ HOST1X_HV_SYNCPT_PROT_EN);
+#endif
+}
+
static const struct host1x_syncpt_ops host1x_syncpt_ops = {
.restore = syncpt_restore,
.restore_wait_base = syncpt_restore_wait_base,
@@ -113,4 +157,6 @@ static const struct host1x_syncpt_ops host1x_syncpt_ops = {
.load = syncpt_load,
.cpu_incr = syncpt_cpu_incr,
.patch_wait = syncpt_patch_wait,
+ .assign_to_channel = syncpt_assign_to_channel,
+ .enable_protection = syncpt_enable_protection,
};
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 048ac9e344ce..a2a952adc136 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -54,7 +54,7 @@ static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
}
static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
- struct device *dev,
+ struct host1x_client *client,
unsigned long flags)
{
int i;
@@ -76,11 +76,11 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
}
name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
- dev ? dev_name(dev) : NULL);
+ client ? dev_name(client->dev) : NULL);
if (!name)
goto free_base;
- sp->dev = dev;
+ sp->client = client;
sp->name = name;
if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
@@ -398,6 +398,13 @@ int host1x_syncpt_init(struct host1x *host)
for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
+
+ /*
+ * Unassign syncpt from channels for purposes of Tegra186
+ * syncpoint protection. This prevents any channel from
+ * accessing it until it is reassigned.
+ */
+ host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
}
for (i = 0; i < host->info->nb_bases; i++)
@@ -408,6 +415,7 @@ int host1x_syncpt_init(struct host1x *host)
host->bases = bases;
host1x_syncpt_restore(host);
+ host1x_hw_syncpt_enable_protection(host);
/* Allocate sync point to use for clearing waits for expired fences */
host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
@@ -419,7 +427,7 @@ int host1x_syncpt_init(struct host1x *host)
/**
* host1x_syncpt_request() - request a syncpoint
- * @dev: device requesting the syncpoint
+ * @client: client requesting the syncpoint
* @flags: flags
*
* host1x client drivers can use this function to allocate a syncpoint for
@@ -427,12 +435,12 @@ int host1x_syncpt_init(struct host1x *host)
* use by the client exclusively. When no longer using a syncpoint, a host1x
* client driver needs to release it using host1x_syncpt_free().
*/
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags)
{
- struct host1x *host = dev_get_drvdata(dev->parent);
+ struct host1x *host = dev_get_drvdata(client->parent->parent);
- return host1x_syncpt_alloc(host, dev, flags);
+ return host1x_syncpt_alloc(host, client, flags);
}
EXPORT_SYMBOL(host1x_syncpt_request);
@@ -456,7 +464,7 @@ void host1x_syncpt_free(struct host1x_syncpt *sp)
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
- sp->dev = NULL;
+ sp->client = NULL;
sp->name = NULL;
sp->client_managed = false;
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index f719205105ac..9d88d37c2397 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -44,7 +44,7 @@ struct host1x_syncpt {
const char *name;
bool client_managed;
struct host1x *host;
- struct device *dev;
+ struct host1x_client *client;
struct host1x_syncpt_base *base;
/* interrupt data */
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 76875f6299b8..d35d6d271f3f 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1402,29 +1402,14 @@ static struct miscdevice vga_arb_device = {
MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
};
-static int __init vga_arb_device_init(void)
+static void __init vga_arb_select_default_device(void)
{
- int rc;
struct pci_dev *pdev;
struct vga_device *vgadev;
- rc = misc_register(&vga_arb_device);
- if (rc < 0)
- pr_err("error %d registering device\n", rc);
-
- bus_register_notifier(&pci_bus_type, &pci_notifier);
-
- /* We add all pci devices satisfying vga class in the arbiter by
- * default */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_ANY_ID, pdev)) != NULL)
- vga_arbiter_add_pci_device(pdev);
-
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
list_for_each_entry(vgadev, &vga_list, list) {
struct device *dev = &vgadev->pdev->dev;
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
/*
* Override vga_arbiter_add_pci_device()'s I/O based detection
* as it may take the wrong device (e.g. on Apple system under
@@ -1461,13 +1446,66 @@ static int __init vga_arb_device_init(void)
vgaarb_info(dev, "overriding boot device\n");
vga_set_default_device(vgadev->pdev);
}
+ }
#endif
+
+ if (!vga_default_device()) {
+ list_for_each_entry(vgadev, &vga_list, list) {
+ struct device *dev = &vgadev->pdev->dev;
+ u16 cmd;
+
+ pdev = vgadev->pdev;
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
+ vga_set_default_device(pdev);
+ break;
+ }
+ }
+ }
+
+ if (!vga_default_device()) {
+ vgadev = list_first_entry_or_null(&vga_list,
+ struct vga_device, list);
+ if (vgadev) {
+ struct device *dev = &vgadev->pdev->dev;
+ vgaarb_info(dev, "setting as boot device (VGA legacy resources not available)\n");
+ vga_set_default_device(vgadev->pdev);
+ }
+ }
+}
+
+static int __init vga_arb_device_init(void)
+{
+ int rc;
+ struct pci_dev *pdev;
+ struct vga_device *vgadev;
+
+ rc = misc_register(&vga_arb_device);
+ if (rc < 0)
+ pr_err("error %d registering device\n", rc);
+
+ bus_register_notifier(&pci_bus_type, &pci_notifier);
+
+ /* We add all PCI devices satisfying VGA class in the arbiter by
+ * default */
+ pdev = NULL;
+ while ((pdev =
+ pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_ANY_ID, pdev)) != NULL)
+ vga_arbiter_add_pci_device(pdev);
+
+ list_for_each_entry(vgadev, &vga_list, list) {
+ struct device *dev = &vgadev->pdev->dev;
+
if (vgadev->bridge_has_one_vga)
vgaarb_info(dev, "bridge control possible\n");
else
vgaarb_info(dev, "no bridge control possible\n");
}
+ vga_arb_select_default_device();
+
pr_info("loaded\n");
return rc;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 374301fcbc86..779c5ae47f36 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -230,7 +230,7 @@ config HID_CMEDIA
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
- depends on USB_HID && I2C && GPIOLIB
+ depends on USB_HID && HIDRAW && I2C && GPIOLIB
select GPIOLIB_IRQCHIP
---help---
Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
@@ -750,11 +750,10 @@ config HID_PRIMAX
HID standard.
config HID_RETRODE
- tristate "Retrode"
+ tristate "Retrode 2 USB adapter for vintage video games"
depends on USB_HID
---help---
Support for
-
* Retrode 2 cartridge and controller adapter
config HID_ROCCAT
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index ed9c0ea5b026..b1eeb4839bfc 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -52,8 +52,30 @@
#define ADDRESS_U1_PAD_BTN 0x00800052
#define ADDRESS_U1_SP_BTN 0x0080009F
+#define T4_INPUT_REPORT_LEN sizeof(struct t4_input_report)
+#define T4_FEATURE_REPORT_LEN T4_INPUT_REPORT_LEN
+#define T4_FEATURE_REPORT_ID 7
+#define T4_CMD_REGISTER_READ 0x08
+#define T4_CMD_REGISTER_WRITE 0x07
+
+#define T4_ADDRESS_BASE 0xC2C0
+#define PRM_SYS_CONFIG_1 (T4_ADDRESS_BASE + 0x0002)
+#define T4_PRM_FEED_CONFIG_1 (T4_ADDRESS_BASE + 0x0004)
+#define T4_PRM_FEED_CONFIG_4 (T4_ADDRESS_BASE + 0x001A)
+#define T4_PRM_ID_CONFIG_3 (T4_ADDRESS_BASE + 0x00B0)
+
+
+#define T4_FEEDCFG4_ADVANCED_ABS_ENABLE 0x01
+#define T4_I2C_ABS 0x78
+
+#define T4_COUNT_PER_ELECTRODE 256
#define MAX_TOUCHES 5
+enum dev_num {
+ U1,
+ T4,
+ UNKNOWN,
+};
/**
* struct u1_data
*
@@ -61,43 +83,173 @@
* @input2: pointer to the kernel input2 device
* @hdev: pointer to the struct hid_device
*
- * @dev_ctrl: device control parameter
* @dev_type: device type
- * @sen_line_num_x: number of sensor line of X
- * @sen_line_num_y: number of sensor line of Y
- * @pitch_x: sensor pitch of X
- * @pitch_y: sensor pitch of Y
- * @resolution: resolution
- * @btn_info: button information
+ * @max_fingers: total number of fingers
+ * @has_sp: boolean of sp existense
+ * @sp_btn_info: button information
* @x_active_len_mm: active area length of X (mm)
* @y_active_len_mm: active area length of Y (mm)
* @x_max: maximum x coordinate value
* @y_max: maximum y coordinate value
+ * @x_min: minimum x coordinate value
+ * @y_min: minimum y coordinate value
* @btn_cnt: number of buttons
* @sp_btn_cnt: number of stick buttons
*/
-struct u1_dev {
+struct alps_dev {
struct input_dev *input;
struct input_dev *input2;
struct hid_device *hdev;
- u8 dev_ctrl;
- u8 dev_type;
- u8 sen_line_num_x;
- u8 sen_line_num_y;
- u8 pitch_x;
- u8 pitch_y;
- u8 resolution;
- u8 btn_info;
+ enum dev_num dev_type;
+ u8 max_fingers;
+ u8 has_sp;
u8 sp_btn_info;
u32 x_active_len_mm;
u32 y_active_len_mm;
u32 x_max;
u32 y_max;
+ u32 x_min;
+ u32 y_min;
u32 btn_cnt;
u32 sp_btn_cnt;
};
+struct t4_contact_data {
+ u8 palm;
+ u8 x_lo;
+ u8 x_hi;
+ u8 y_lo;
+ u8 y_hi;
+};
+
+struct t4_input_report {
+ u8 reportID;
+ u8 numContacts;
+ struct t4_contact_data contact[5];
+ u8 button;
+ u8 track[5];
+ u8 zx[5], zy[5];
+ u8 palmTime[5];
+ u8 kilroy;
+ u16 timeStamp;
+};
+
+static u16 t4_calc_check_sum(u8 *buffer,
+ unsigned long offset, unsigned long length)
+{
+ u16 sum1 = 0xFF, sum2 = 0xFF;
+ unsigned long i = 0;
+
+ if (offset + length >= 50)
+ return 0;
+
+ while (length > 0) {
+ u32 tlen = length > 20 ? 20 : length;
+
+ length -= tlen;
+
+ do {
+ sum1 += buffer[offset + i];
+ sum2 += sum1;
+ i++;
+ } while (--tlen > 0);
+
+ sum1 = (sum1 & 0xFF) + (sum1 >> 8);
+ sum2 = (sum2 & 0xFF) + (sum2 >> 8);
+ }
+
+ sum1 = (sum1 & 0xFF) + (sum1 >> 8);
+ sum2 = (sum2 & 0xFF) + (sum2 >> 8);
+
+ return(sum2 << 8 | sum1);
+}
+
+static int t4_read_write_register(struct hid_device *hdev, u32 address,
+ u8 *read_val, u8 write_val, bool read_flag)
+{
+ int ret;
+ u16 check_sum;
+ u8 *input;
+ u8 *readbuf;
+
+ input = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ input[0] = T4_FEATURE_REPORT_ID;
+ if (read_flag) {
+ input[1] = T4_CMD_REGISTER_READ;
+ input[8] = 0x00;
+ } else {
+ input[1] = T4_CMD_REGISTER_WRITE;
+ input[8] = write_val;
+ }
+ put_unaligned_le32(address, input + 2);
+ input[6] = 1;
+ input[7] = 0;
+
+ /* Calculate the checksum */
+ check_sum = t4_calc_check_sum(input, 1, 8);
+ input[9] = (u8)check_sum;
+ input[10] = (u8)(check_sum >> 8);
+ input[11] = 0;
+
+ ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, input,
+ T4_FEATURE_REPORT_LEN,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to read command (%d)\n", ret);
+ goto exit;
+ }
+
+ readbuf = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
+ if (read_flag) {
+ if (!readbuf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, readbuf,
+ T4_FEATURE_REPORT_LEN,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed read register (%d)\n", ret);
+ goto exit_readbuf;
+ }
+
+ if (*(u32 *)&readbuf[6] != address) {
+ dev_err(&hdev->dev, "read register address error (%x,%x)\n",
+ *(u32 *)&readbuf[6], address);
+ goto exit_readbuf;
+ }
+
+ if (*(u16 *)&readbuf[10] != 1) {
+ dev_err(&hdev->dev, "read register size error (%x)\n",
+ *(u16 *)&readbuf[10]);
+ goto exit_readbuf;
+ }
+
+ check_sum = t4_calc_check_sum(readbuf, 6, 7);
+ if (*(u16 *)&readbuf[13] != check_sum) {
+ dev_err(&hdev->dev, "read register checksum error (%x,%x)\n",
+ *(u16 *)&readbuf[13], check_sum);
+ goto exit_readbuf;
+ }
+
+ *read_val = readbuf[12];
+ }
+
+ ret = 0;
+
+exit_readbuf:
+ kfree(readbuf);
+exit:
+ kfree(input);
+ return ret;
+}
+
static int u1_read_write_register(struct hid_device *hdev, u32 address,
u8 *read_val, u8 write_val, bool read_flag)
{
@@ -165,21 +317,60 @@ exit:
return ret;
}
-static int alps_raw_event(struct hid_device *hdev,
- struct hid_report *report, u8 *data, int size)
+static int t4_raw_event(struct alps_dev *hdata, u8 *data, int size)
+{
+ unsigned int x, y, z;
+ int i;
+ struct t4_input_report *p_report = (struct t4_input_report *)data;
+
+ if (!data)
+ return 0;
+ for (i = 0; i < hdata->max_fingers; i++) {
+ x = p_report->contact[i].x_hi << 8 | p_report->contact[i].x_lo;
+ y = p_report->contact[i].y_hi << 8 | p_report->contact[i].y_lo;
+ y = hdata->y_max - y + hdata->y_min;
+ z = (p_report->contact[i].palm < 0x80 &&
+ p_report->contact[i].palm > 0) * 62;
+ if (x == 0xffff) {
+ x = 0;
+ y = 0;
+ z = 0;
+ }
+ input_mt_slot(hdata->input, i);
+
+ input_mt_report_slot_state(hdata->input,
+ MT_TOOL_FINGER, z != 0);
+
+ if (!z)
+ continue;
+
+ input_report_abs(hdata->input, ABS_MT_POSITION_X, x);
+ input_report_abs(hdata->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(hdata->input, ABS_MT_PRESSURE, z);
+ }
+ input_mt_sync_frame(hdata->input);
+
+ input_report_key(hdata->input, BTN_LEFT, p_report->button);
+
+ input_sync(hdata->input);
+ return 1;
+}
+
+static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
{
unsigned int x, y, z;
int i;
short sp_x, sp_y;
- struct u1_dev *hdata = hid_get_drvdata(hdev);
+ if (!data)
+ return 0;
switch (data[0]) {
case U1_MOUSE_REPORT_ID:
break;
case U1_FEATURE_REPORT_ID:
break;
case U1_ABSOLUTE_REPORT_ID:
- for (i = 0; i < MAX_TOUCHES; i++) {
+ for (i = 0; i < hdata->max_fingers; i++) {
u8 *contact = &data[i * 5];
x = get_unaligned_le16(contact + 3);
@@ -241,122 +432,253 @@ static int alps_raw_event(struct hid_device *hdev,
return 0;
}
-#ifdef CONFIG_PM
-static int alps_post_reset(struct hid_device *hdev)
+static int alps_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
{
- return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+ int ret = 0;
+ struct alps_dev *hdata = hid_get_drvdata(hdev);
+
+ switch (hdev->product) {
+ case HID_PRODUCT_ID_T4_BTNLESS:
+ ret = t4_raw_event(hdata, data, size);
+ break;
+ default:
+ ret = u1_raw_event(hdata, data, size);
+ break;
+ }
+ return ret;
}
-static int alps_post_resume(struct hid_device *hdev)
+static int __maybe_unused alps_post_reset(struct hid_device *hdev)
{
- return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+ int ret = -1;
+ struct alps_dev *data = hid_get_drvdata(hdev);
+
+ switch (data->dev_type) {
+ case T4:
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1,
+ NULL, T4_I2C_ABS, false);
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4,
+ NULL, T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false);
+ break;
+ case U1:
+ ret = u1_read_write_register(hdev,
+ ADDRESS_U1_DEV_CTRL_1, NULL,
+ U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+ break;
+ default:
+ break;
+ }
+ return ret;
}
-#endif /* CONFIG_PM */
-static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+static int __maybe_unused alps_post_resume(struct hid_device *hdev)
{
- struct u1_dev *data = hid_get_drvdata(hdev);
- struct input_dev *input = hi->input, *input2;
- struct u1_dev devInfo;
- int ret;
- int res_x, res_y, i;
-
- data->input = input;
-
- hid_dbg(hdev, "Opening low level driver\n");
- ret = hid_hw_open(hdev);
- if (ret)
- return ret;
+ return alps_post_reset(hdev);
+}
- /* Allow incoming hid reports */
- hid_device_io_start(hdev);
+static int u1_init(struct hid_device *hdev, struct alps_dev *pri_data)
+{
+ int ret;
+ u8 tmp, dev_ctrl, sen_line_num_x, sen_line_num_y;
+ u8 pitch_x, pitch_y, resolution;
/* Device initialization */
ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- &devInfo.dev_ctrl, 0, true);
+ &dev_ctrl, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret);
goto exit;
}
- devInfo.dev_ctrl &= ~U1_DISABLE_DEV;
- devInfo.dev_ctrl |= U1_TP_ABS_MODE;
+ dev_ctrl &= ~U1_DISABLE_DEV;
+ dev_ctrl |= U1_TP_ABS_MODE;
ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, devInfo.dev_ctrl, false);
+ NULL, dev_ctrl, false);
if (ret < 0) {
dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X,
- &devInfo.sen_line_num_x, 0, true);
+ &sen_line_num_x, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y,
- &devInfo.sen_line_num_y, 0, true);
+ &sen_line_num_y, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X,
- &devInfo.pitch_x, 0, true);
+ &pitch_x, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y,
- &devInfo.pitch_y, 0, true);
+ &pitch_y, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS,
- &devInfo.resolution, 0, true);
+ &resolution, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret);
goto exit;
}
+ pri_data->x_active_len_mm =
+ (pitch_x * (sen_line_num_x - 1)) / 10;
+ pri_data->y_active_len_mm =
+ (pitch_y * (sen_line_num_y - 1)) / 10;
+
+ pri_data->x_max =
+ (resolution << 2) * (sen_line_num_x - 1);
+ pri_data->x_min = 1;
+ pri_data->y_max =
+ (resolution << 2) * (sen_line_num_y - 1);
+ pri_data->y_min = 1;
ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN,
- &devInfo.btn_info, 0, true);
+ &tmp, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret);
goto exit;
}
+ if ((tmp & 0x0F) == (tmp & 0xF0) >> 4) {
+ pri_data->btn_cnt = (tmp & 0x0F);
+ } else {
+ /* Button pad */
+ pri_data->btn_cnt = 1;
+ }
+ pri_data->has_sp = 0;
/* Check StickPointer device */
ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP,
- &devInfo.dev_type, 0, true);
+ &tmp, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret);
goto exit;
}
+ if (tmp & U1_DEVTYPE_SP_SUPPORT) {
+ dev_ctrl |= U1_SP_ABS_MODE;
+ ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+ NULL, dev_ctrl, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed SP mode (%d)\n", ret);
+ goto exit;
+ }
- devInfo.x_active_len_mm =
- (devInfo.pitch_x * (devInfo.sen_line_num_x - 1)) / 10;
- devInfo.y_active_len_mm =
- (devInfo.pitch_y * (devInfo.sen_line_num_y - 1)) / 10;
+ ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN,
+ &pri_data->sp_btn_info, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret);
+ goto exit;
+ }
+ pri_data->has_sp = 1;
+ }
+ pri_data->max_fingers = 5;
+exit:
+ return ret;
+}
- devInfo.x_max =
- (devInfo.resolution << 2) * (devInfo.sen_line_num_x - 1);
- devInfo.y_max =
- (devInfo.resolution << 2) * (devInfo.sen_line_num_y - 1);
+static int T4_init(struct hid_device *hdev, struct alps_dev *pri_data)
+{
+ int ret;
+ u8 tmp, sen_line_num_x, sen_line_num_y;
+
+ ret = t4_read_write_register(hdev, T4_PRM_ID_CONFIG_3, &tmp, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_ID_CONFIG_3 (%d)\n", ret);
+ goto exit;
+ }
+ sen_line_num_x = 16 + ((tmp & 0x0F) | (tmp & 0x08 ? 0xF0 : 0));
+ sen_line_num_y = 12 + (((tmp & 0xF0) >> 4) | (tmp & 0x80 ? 0xF0 : 0));
+
+ pri_data->x_max = sen_line_num_x * T4_COUNT_PER_ELECTRODE;
+ pri_data->x_min = T4_COUNT_PER_ELECTRODE;
+ pri_data->y_max = sen_line_num_y * T4_COUNT_PER_ELECTRODE;
+ pri_data->y_min = T4_COUNT_PER_ELECTRODE;
+ pri_data->x_active_len_mm = pri_data->y_active_len_mm = 0;
+ pri_data->btn_cnt = 1;
+
+ ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, &tmp, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret);
+ goto exit;
+ }
+ tmp |= 0x02;
+ ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, NULL, tmp, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1,
+ NULL, T4_I2C_ABS, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_1 (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4, NULL,
+ T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_4 (%d)\n", ret);
+ goto exit;
+ }
+ pri_data->max_fingers = 5;
+ pri_data->has_sp = 0;
+exit:
+ return ret;
+}
+
+static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct alps_dev *data = hid_get_drvdata(hdev);
+ struct input_dev *input = hi->input, *input2;
+ int ret;
+ int res_x, res_y, i;
+
+ data->input = input;
+
+ hid_dbg(hdev, "Opening low level driver\n");
+ ret = hid_hw_open(hdev);
+ if (ret)
+ return ret;
+
+ /* Allow incoming hid reports */
+ hid_device_io_start(hdev);
+ switch (data->dev_type) {
+ case T4:
+ ret = T4_init(hdev, data);
+ break;
+ case U1:
+ ret = u1_init(hdev, data);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ goto exit;
__set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_POSITION_X, 1, devInfo.x_max, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 1, devInfo.y_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ data->x_min, data->x_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ data->y_min, data->y_max, 0, 0);
- if (devInfo.x_active_len_mm && devInfo.y_active_len_mm) {
- res_x = (devInfo.x_max - 1) / devInfo.x_active_len_mm;
- res_y = (devInfo.y_max - 1) / devInfo.y_active_len_mm;
+ if (data->x_active_len_mm && data->y_active_len_mm) {
+ res_x = (data->x_max - 1) / data->x_active_len_mm;
+ res_y = (data->y_max - 1) / data->y_active_len_mm;
input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
@@ -364,49 +686,25 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 64, 0, 0);
- input_mt_init_slots(input, MAX_TOUCHES, INPUT_MT_POINTER);
+ input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER);
__set_bit(EV_KEY, input->evbit);
- if ((devInfo.btn_info & 0x0F) == (devInfo.btn_info & 0xF0) >> 4) {
- devInfo.btn_cnt = (devInfo.btn_info & 0x0F);
- } else {
- /* Button pad */
- devInfo.btn_cnt = 1;
+
+ if (data->btn_cnt == 1)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- }
- for (i = 0; i < devInfo.btn_cnt; i++)
+ for (i = 0; i < data->btn_cnt; i++)
__set_bit(BTN_LEFT + i, input->keybit);
-
/* Stick device initialization */
- if (devInfo.dev_type & U1_DEVTYPE_SP_SUPPORT) {
-
+ if (data->has_sp) {
input2 = input_allocate_device();
if (!input2) {
- ret = -ENOMEM;
- goto exit;
- }
-
- data->input2 = input2;
-
- devInfo.dev_ctrl |= U1_SP_ABS_MODE;
- ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, devInfo.dev_ctrl, false);
- if (ret < 0) {
- dev_err(&hdev->dev, "failed SP mode (%d)\n", ret);
- input_free_device(input2);
- goto exit;
- }
-
- ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN,
- &devInfo.sp_btn_info, 0, true);
- if (ret < 0) {
- dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret);
input_free_device(input2);
goto exit;
}
+ data->input2 = input2;
input2->phys = input->phys;
input2->name = "DualPoint Stick";
input2->id.bustype = BUS_I2C;
@@ -416,8 +714,8 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
input2->dev.parent = input->dev.parent;
__set_bit(EV_KEY, input2->evbit);
- devInfo.sp_btn_cnt = (devInfo.sp_btn_info & 0x0F);
- for (i = 0; i < devInfo.sp_btn_cnt; i++)
+ data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
+ for (i = 0; i < data->sp_btn_cnt; i++)
__set_bit(BTN_LEFT + i, input2->keybit);
__set_bit(EV_REL, input2->evbit);
@@ -426,8 +724,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
__set_bit(INPUT_PROP_POINTER, input2->propbit);
__set_bit(INPUT_PROP_POINTING_STICK, input2->propbit);
- ret = input_register_device(data->input2);
- if (ret) {
+ if (input_register_device(data->input2)) {
input_free_device(input2);
goto exit;
}
@@ -448,10 +745,9 @@ static int alps_input_mapping(struct hid_device *hdev,
static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
- struct u1_dev *data = NULL;
+ struct alps_dev *data = NULL;
int ret;
-
- data = devm_kzalloc(&hdev->dev, sizeof(struct u1_dev), GFP_KERNEL);
+ data = devm_kzalloc(&hdev->dev, sizeof(struct alps_dev), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -466,6 +762,18 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
+ switch (hdev->product) {
+ case HID_DEVICE_ID_ALPS_T4_BTNLESS:
+ data->dev_type = T4;
+ break;
+ case HID_DEVICE_ID_ALPS_U1_DUAL:
+ case HID_DEVICE_ID_ALPS_U1:
+ data->dev_type = U1;
+ break;
+ default:
+ data->dev_type = UNKNOWN;
+ }
+
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
@@ -483,6 +791,10 @@ static void alps_remove(struct hid_device *hdev)
static const struct hid_device_id alps_id[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
{ }
};
MODULE_DEVICE_TABLE(hid, alps_id);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 50c294be8324..1bb7b63b3150 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -67,6 +67,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_USE_KBD_BACKLIGHT BIT(5)
#define QUIRK_T100_KEYBOARD BIT(6)
#define QUIRK_T100CHI BIT(7)
+#define QUIRK_G752_KEYBOARD BIT(8)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -670,6 +671,11 @@ static void asus_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static const __u8 asus_g752_fixed_rdesc[] = {
+ 0x19, 0x00, /* Usage Minimum (0x00) */
+ 0x2A, 0xFF, 0x00, /* Usage Maximum (0xFF) */
+};
+
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -708,6 +714,27 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[391] = 0xff;
rdesc[402] = 0x00;
}
+ if (drvdata->quirks & QUIRK_G752_KEYBOARD &&
+ *rsize == 75 && rdesc[61] == 0x15 && rdesc[62] == 0x00) {
+ /* report is missing usage mninum and maximum */
+ __u8 *new_rdesc;
+ size_t new_size = *rsize + sizeof(asus_g752_fixed_rdesc);
+
+ new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL);
+ if (new_rdesc == NULL)
+ return rdesc;
+
+ hid_info(hdev, "Fixing up Asus G752 keyb report descriptor\n");
+ /* copy the valid part */
+ memcpy(new_rdesc, rdesc, 61);
+ /* insert missing part */
+ memcpy(new_rdesc + 61, asus_g752_fixed_rdesc, sizeof(asus_g752_fixed_rdesc));
+ /* copy remaining data */
+ memcpy(new_rdesc + 61 + sizeof(asus_g752_fixed_rdesc), rdesc + 61, *rsize - 61);
+
+ *rsize = new_size;
+ rdesc = new_rdesc;
+ }
return rdesc;
}
@@ -718,10 +745,12 @@ static const struct hid_device_id asus_devices[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD), I2C_TOUCHPAD_QUIRKS },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
+ USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1), QUIRK_USE_KBD_BACKLIGHT },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 330ca983828b..f3fcb836a1f9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1662,7 +1662,7 @@ static struct bin_attribute dev_bin_attr_report_desc = {
.size = HID_MAX_DESCRIPTOR_SIZE,
};
-static struct device_attribute dev_attr_country = {
+static const struct device_attribute dev_attr_country = {
.attr = { .name = "country", .mode = 0444 },
.show = show_country,
};
@@ -1889,6 +1889,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
#endif
#if IS_ENABLED(CONFIG_HID_ALPS)
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLE)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
@@ -1979,6 +1982,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
@@ -2329,6 +2333,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
@@ -3121,4 +3126,3 @@ MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 078026f63b6f..68cdc962265b 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -21,7 +21,7 @@
* Data Sheet:
* http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf
* Programming Interface Specification:
- * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
+ * https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf
*/
#include <linux/gpio.h>
@@ -196,6 +196,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
+ if (ret >= 0)
+ ret = -EIO;
goto exit;
}
@@ -205,8 +207,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
- if (ret < 0) {
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
+ if (ret >= 0)
+ ret = -EIO;
goto exit;
}
@@ -214,7 +218,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
exit:
mutex_unlock(&dev->lock);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 6039f071fab1..3aa2bb9f0f81 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -313,7 +313,7 @@ static void mousevsc_on_receive(struct hv_device *device,
break;
default:
- pr_err("unsupported hid msg type - type %d len %d",
+ pr_err("unsupported hid msg type - type %d len %d\n",
hid_msg->header.type, hid_msg->header.size);
break;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index be2e005c3c51..5da3d6256d25 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -77,6 +77,9 @@
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
#define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
+#define HID_DEVICE_ID_ALPS_U1 0x1215
+#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
+
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
@@ -182,6 +185,7 @@
#define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
+#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -509,6 +513,9 @@
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
+#define I2C_VENDOR_ID_HANTICK 0x0911
+#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
+
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
@@ -729,6 +736,9 @@
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
+#define USB_VENDOR_ID_MCS 0x16d0
+#define USB_DEVICE_ID_MCS_GAMEPADBLOCK 0x0bcc
+
#define USB_VENDOR_ID_MGE 0x0463
#define USB_DEVICE_ID_MGE_UPS 0xffff
#define USB_DEVICE_ID_MGE_UPS1 0x0001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 199f6a01fc62..04d01b57d94c 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -797,6 +797,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
map_key_clear(BTN_STYLUS);
break;
+ case 0x45: /* ERASER */
+ /*
+ * This event is reported when eraser tip touches the surface.
+ * Actual eraser (BTN_TOOL_RUBBER) is set by Invert usage when
+ * tool gets in proximity.
+ */
+ map_key_clear(BTN_TOUCH);
+ break;
+
case 0x46: /* TabletPick */
case 0x5a: /* SecondaryBarrelSwitch */
map_key_clear(BTN_STYLUS2);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 52026dc94d5c..596227ddb6e0 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -756,7 +756,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
/* Setup wireless link with Logitech Wii wheel */
if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
- const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ static const unsigned char cbuf[] = {
+ 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
if (!buf) {
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 1fc12e357035..512d67e1aae3 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -474,9 +474,7 @@ static int lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effec
static void lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- s32 *value = report->field[0]->value;
+ s32 *value;
u32 expand_a, expand_b;
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 614054af904a..19cc980eebce 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1957,7 +1957,8 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* initialize with zero autocenter to get wheel in usable state */
hidpp_ff_set_autocenter(dev, 0);
- hid_info(hid, "Force feeback support loaded (firmware release %d).\n", version);
+ hid_info(hid, "Force feedback support loaded (firmware release %d).\n",
+ version);
return 0;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 20b40ad26325..42ed887ba0be 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -34,7 +34,8 @@ module_param(emulate_scroll_wheel, bool, 0644);
MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel");
static unsigned int scroll_speed = 32;
-static int param_set_scroll_speed(const char *val, struct kernel_param *kp) {
+static int param_set_scroll_speed(const char *val,
+ const struct kernel_param *kp) {
unsigned long speed;
if (!val || kstrtoul(val, 0, &speed) || speed > 63)
return -EINVAL;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 9e8c4d2ba11d..65ea23be9677 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input/mt.h>
+#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -112,6 +113,7 @@ struct mt_device {
struct mt_slot curdata; /* placeholder of incoming data */
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
+ struct hid_device *hdev; /* hid_device we're attached to */
struct mt_fields *fields; /* temporary placeholder for storing the
multitouch fields */
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
@@ -136,6 +138,9 @@ struct mt_device {
bool serial_maybe; /* need to check for serial protocol */
bool curvalid; /* is the current contact valid? */
unsigned mt_flags; /* flags to pass to input-mt */
+ __s32 dev_time; /* the scan time provided by the device */
+ unsigned long jiffies; /* the frame's jiffies */
+ int timestamp; /* the timestamp to be sent */
};
static void mt_post_parse_default_settings(struct mt_device *td);
@@ -177,6 +182,12 @@ static void mt_post_parse(struct mt_device *td);
#define MT_DEFAULT_MAXCONTACT 10
#define MT_MAX_MAXCONTACT 250
+/*
+ * Resync device and local timestamps after that many microseconds without
+ * receiving data.
+ */
+#define MAX_TIMESTAMP_INTERVAL 1000000
+
#define MT_USB_DEVICE(v, p) HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p)
#define MT_BT_DEVICE(v, p) HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p)
@@ -583,6 +594,12 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_pressure);
mt_store_field(usage, td, hi);
return 1;
+ case HID_DG_SCANTIME:
+ hid_map_usage(hi, usage, bit, max,
+ EV_MSC, MSC_TIMESTAMP);
+ input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
+ mt_store_field(usage, td, hi);
+ return 1;
case HID_DG_CONTACTCOUNT:
/* Ignore if indexes are out of bounds. */
if (field->index >= field->report->maxfield ||
@@ -718,6 +735,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
{
input_mt_sync_frame(input);
+ input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
input_sync(input);
td->num_received = 0;
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
@@ -727,6 +745,28 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
}
+static int mt_compute_timestamp(struct mt_device *td, struct hid_field *field,
+ __s32 value)
+{
+ long delta = value - td->dev_time;
+ unsigned long jdelta = jiffies_to_usecs(jiffies - td->jiffies);
+
+ td->jiffies = jiffies;
+ td->dev_time = value;
+
+ if (delta < 0)
+ delta += field->logical_maximum;
+
+ /* HID_DG_SCANTIME is expressed in 100us, we want it in us. */
+ delta *= 100;
+
+ if (jdelta > MAX_TIMESTAMP_INTERVAL)
+ /* No data received for a while, resync the timestamp. */
+ return 0;
+ else
+ return td->timestamp + delta;
+}
+
static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -787,6 +827,9 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
case HID_DG_HEIGHT:
td->curdata.h = value;
break;
+ case HID_DG_SCANTIME:
+ td->timestamp = mt_compute_timestamp(td, field, value);
+ break;
case HID_DG_CONTACTCOUNT:
break;
case HID_DG_TOUCH:
@@ -1246,10 +1289,10 @@ static void mt_release_contacts(struct hid_device *hid)
td->num_received = 0;
}
-static void mt_expired_timeout(unsigned long arg)
+static void mt_expired_timeout(struct timer_list *t)
{
- struct hid_device *hdev = (void *)arg;
- struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_device *td = from_timer(td, t, release_timer);
+ struct hid_device *hdev = td->hdev;
/*
* An input report came in just before we release the sticky fingers,
@@ -1280,6 +1323,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
}
+ td->hdev = hdev;
td->mtclass = *mtclass;
td->inputmode = -1;
td->maxcontact_report_id = -1;
@@ -1331,7 +1375,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
*/
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
- setup_timer(&td->release_timer, mt_expired_timeout, (long)hdev);
+ timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);
if (ret != 0)
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index ef241d66562e..0f43c4292685 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -368,6 +368,11 @@ static int rmi_check_sanity(struct hid_device *hdev, u8 *data, int size)
static int rmi_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
+ struct rmi_data *hdata = hid_get_drvdata(hdev);
+
+ if (!(hdata->device_flags & RMI_DEVICE))
+ return 0;
+
size = rmi_check_sanity(hdev, data, size);
if (size < 2)
return 0;
@@ -713,9 +718,11 @@ static void rmi_remove(struct hid_device *hdev)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
- clear_bit(RMI_STARTED, &hdata->flags);
- cancel_work_sync(&hdata->reset_work);
- rmi_unregister_transport_device(&hdata->xport);
+ if (hdata->device_flags & RMI_DEVICE) {
+ clear_bit(RMI_STARTED, &hdata->flags);
+ cancel_work_sync(&hdata->reset_work);
+ rmi_unregister_transport_device(&hdata->xport);
+ }
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index d03203a82e8f..b9dc3ac4d4aa 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1439,10 +1439,16 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
goto out;
}
- ret = hid_hw_output_report(hdev, buf, 1);
- if (ret < 0) {
- hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
- ret = 0;
+ /*
+ * But the USB interrupt would cause SHANWAN controllers to
+ * start rumbling non-stop.
+ */
+ if (strcmp(hdev->name, "SHANWAN PS3 GamePad")) {
+ ret = hid_hw_output_report(hdev, buf, 1);
+ if (ret < 0) {
+ hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+ ret = 0;
+ }
}
out:
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index b83376077d72..bea8def64f43 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -242,6 +242,8 @@ static const struct hid_device_id tm_devices[] = {
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
.driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605), /* NASCAR PRO FF2 Wheel */
+ .driver_data = (unsigned long)ff_joystick },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653), /* RGT Force Feedback CLUTCH Raging Wheel */
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 9145c2129a96..e054ee43c1e2 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -46,6 +46,7 @@
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
/* flags */
#define I2C_HID_STARTED 0
@@ -168,6 +169,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ 0, 0 }
};
@@ -252,7 +255,9 @@ static int __i2c_hid_command(struct i2c_client *client,
ret = 0;
- if (wait) {
+ if (wait && (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET)) {
+ msleep(100);
+ } else if (wait) {
i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
if (!wait_event_timeout(ihid->wait,
!test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 045b5da9b992..640dfb937c69 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -101,10 +101,10 @@ static int hid_start_in(struct hid_device *hid)
}
/* I/O retry timer routine */
-static void hid_retry_timeout(unsigned long _hid)
+static void hid_retry_timeout(struct timer_list *t)
{
- struct hid_device *hid = (struct hid_device *) _hid;
- struct usbhid_device *usbhid = hid->driver_data;
+ struct usbhid_device *usbhid = from_timer(usbhid, t, io_retry);
+ struct hid_device *hid = usbhid->hid;
dev_dbg(&usbhid->intf->dev, "retrying intr urb\n");
if (hid_start_in(hid))
@@ -1373,7 +1373,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
init_waitqueue_head(&usbhid->wait);
INIT_WORK(&usbhid->reset_work, hid_reset);
- setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
+ timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
spin_lock_init(&usbhid->lock);
ret = hid_add_device(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index f489a5cfcb48..331f7f34ec14 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -170,6 +170,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK, HID_QUIRK_MULTI_INPUT },
{ 0, 0 }
};
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 906e654fb0ba..ee71ad9b6cc1 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -196,6 +196,13 @@ static void wacom_feature_mapping(struct hid_device *hdev,
kfree(data);
break;
}
+
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x4200 /* Dell Canvas 27 */ &&
+ field->application == HID_UP_MSVENDOR) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ }
}
/*
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index aa692e28b2cd..16af6886e828 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2140,6 +2140,12 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
return;
+ case HID_DG_BARRELSWITCH:
+ wacom_wac->hid_data.barrelswitch = value;
+ return;
+ case HID_DG_BARRELSWITCH2:
+ wacom_wac->hid_data.barrelswitch2 = value;
+ return;
case HID_DG_TOOLSERIALNUMBER:
if (value) {
wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
@@ -2217,11 +2223,11 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
if (!usage->type || delay_pen_events(wacom_wac))
return;
- /* send pen events only when the pen is in/entering/leaving proximity */
- if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
- return;
-
- input_event(input, usage->type, usage->code, value);
+ /* send pen events only when the pen is in range */
+ if (wacom_wac->hid_data.inrange_state)
+ input_event(input, usage->type, usage->code, value);
+ else if (wacom_wac->shared->stylus_in_proximity && !wacom_wac->hid_data.sense_state)
+ input_event(input, usage->type, usage->code, 0);
}
static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -2236,11 +2242,11 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->pen_input;
- bool prox = wacom_wac->hid_data.inrange_state;
- bool range = wacom_wac->hid_data.sense_state;
+ bool range = wacom_wac->hid_data.inrange_state;
+ bool sense = wacom_wac->hid_data.sense_state;
- if (!wacom_wac->tool[0] && prox) { /* first in prox */
- /* Going into proximity select tool */
+ if (!wacom_wac->tool[0] && range) { /* first in range */
+ /* Going into range select tool */
if (wacom_wac->hid_data.invert_state)
wacom_wac->tool[0] = BTN_TOOL_RUBBER;
else if (wacom_wac->id[0])
@@ -2250,10 +2256,16 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
}
/* keep pen state for touch events */
- wacom_wac->shared->stylus_in_proximity = range;
+ wacom_wac->shared->stylus_in_proximity = sense;
if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
int id = wacom_wac->id[0];
+ int sw_state = wacom_wac->hid_data.barrelswitch |
+ (wacom_wac->hid_data.barrelswitch2 << 1);
+
+ input_report_key(input, BTN_STYLUS, sw_state == 1);
+ input_report_key(input, BTN_STYLUS2, sw_state == 2);
+ input_report_key(input, BTN_STYLUS3, sw_state == 3);
/*
* Non-USI EMR tools should have their IDs mangled to
@@ -2269,10 +2281,10 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
*/
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
- input_report_key(input, wacom_wac->tool[0], prox);
+ input_report_key(input, wacom_wac->tool[0], sense);
if (wacom_wac->serial[0]) {
input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
- input_report_abs(input, ABS_MISC, prox ? id : 0);
+ input_report_abs(input, ABS_MISC, sense ? id : 0);
}
wacom_wac->hid_data.tipswitch = false;
@@ -2280,7 +2292,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
input_sync(input);
}
- if (!prox) {
+ if (!sense) {
wacom_wac->tool[0] = 0;
wacom_wac->id[0] = 0;
wacom_wac->serial[0] = 0;
@@ -3300,9 +3312,11 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
else
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- if (features->type == HID_GENERIC)
- /* setup has already been done */
+ if (features->type == HID_GENERIC) {
+ /* setup has already been done; apply otherwise-undetectible quirks */
+ input_set_capability(input_dev, EV_KEY, BTN_STYLUS3);
return 0;
+ }
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 8a03654048bf..64d8f014602e 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -166,6 +166,7 @@
((f)->physical == HID_DG_PEN) || \
((f)->application == HID_DG_PEN) || \
((f)->application == HID_DG_DIGITIZER) || \
+ ((f)->application == WACOM_HID_WD_PEN) || \
((f)->application == WACOM_HID_WD_DIGITIZER) || \
((f)->application == WACOM_HID_G9_PEN) || \
((f)->application == WACOM_HID_G11_PEN))
@@ -291,6 +292,8 @@ struct hid_data {
bool inrange_state;
bool invert_state;
bool tipswitch;
+ bool barrelswitch;
+ bool barrelswitch2;
int x;
int y;
int pressure;
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index 57f70c28fa38..cf9c2a332ad8 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -773,13 +773,13 @@ static int __init hsc_init(void)
if ((max_data_size < 4) || (max_data_size > 0x10000) ||
(max_data_size & (max_data_size - 1))) {
- pr_err("Invalid max read/write data size");
+ pr_err("Invalid max read/write data size\n");
return -EINVAL;
}
ret = hsi_register_client_driver(&hsc_driver);
if (ret) {
- pr_err("Error while registering HSI/SSI driver %d", ret);
+ pr_err("Error while registering HSI/SSI driver %d\n", ret);
return ret;
}
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 88e48b346916..41a09f506803 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -334,7 +334,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event,
case POST_RATE_CHANGE:
dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n",
clk_data->old_rate, clk_data->new_rate);
- omap_ssi->fck_rate = DIV_ROUND_CLOSEST(clk_data->new_rate, 1000); /* KHz */
+ omap_ssi->fck_rate = DIV_ROUND_CLOSEST(clk_data->new_rate, 1000); /* kHz */
for (i = 0; i < ssi->num_ports; i++) {
omap_port = omap_ssi->port[i];
@@ -467,9 +467,9 @@ static int ssi_hw_init(struct hsi_controller *ssi)
}
/* Resetting GDD */
writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
- /* Get FCK rate in KHz */
+ /* Get FCK rate in kHz */
omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000);
- dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate);
+ dev_dbg(&ssi->device, "SSI fck rate %lu kHz\n", omap_ssi->fck_rate);
writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG);
omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON;
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index e7b1d796ba2e..14c22786b519 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -3,7 +3,9 @@ obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+CFLAGS_hv_trace.o = -I$(src)
+
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
- channel_mgmt.o ring_buffer.o
+ channel_mgmt.o ring_buffer.o hv_trace.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 894b67ac2cae..19f0cf37e0ed 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -43,6 +43,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
+ trace_vmbus_setevent(channel);
+
/*
* For channels marked as in "low latency" mode
* bypass the monitor page mechanism.
@@ -185,6 +187,8 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
ret = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
+ trace_vmbus_open(open_msg, ret);
+
if (ret != 0) {
err = ret;
goto error_clean_msglist;
@@ -234,13 +238,18 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
+ int ret;
memset(&conn_msg, 0, sizeof(conn_msg));
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
conn_msg.host_service_id = *shv_host_servie_id;
- return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+ ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+
+ trace_vmbus_send_tl_connect_request(&conn_msg, ret);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
@@ -433,6 +442,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
+
+ trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
+
if (ret != 0)
goto cleanup;
@@ -448,6 +460,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadl_body,
submsginfo->msgsize - sizeof(*submsginfo),
true);
+
+ trace_vmbus_establish_gpadl_body(gpadl_body, ret);
+
if (ret != 0)
goto cleanup;
@@ -511,6 +526,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
+ trace_vmbus_teardown_gpadl(msg, ret);
+
if (ret)
goto post_msg_err;
@@ -589,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
true);
+ trace_vmbus_close_internal(msg, ret);
+
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
/*
@@ -745,6 +764,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
+ desc.reserved = 0;
desc.rangecount = pagecount;
for (i = 0; i < pagecount; i++) {
@@ -788,6 +808,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = requestid;
+ desc->reserved = 0;
desc->rangecount = 1;
bufferlist[0].iov_base = desc;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 379b0df123be..ec5454f3f4a6 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,7 @@ static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
- kfree_rcu(channel, rcu);
+ kobject_put(&channel->kobj);
}
static void percpu_channel_enq(void *arg)
@@ -373,12 +373,15 @@ static void percpu_channel_deq(void *arg)
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
+ int ret;
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
- vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
- true);
+ ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
+ true);
+
+ trace_vmbus_release_relid(&msg, ret);
}
void hv_process_channel_removal(u32 relid)
@@ -520,6 +523,14 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
newchannel->state = CHANNEL_OPEN_STATE;
if (!fnew) {
+ struct hv_device *dev
+ = newchannel->primary_channel->device_obj;
+
+ if (vmbus_add_channel_kobj(dev, newchannel)) {
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ goto err_free_chan;
+ }
+
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
@@ -805,6 +816,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer = (struct vmbus_channel_offer_channel *)hdr;
+ trace_vmbus_onoffer(offer);
+
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
@@ -846,6 +859,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
rescind = (struct vmbus_channel_rescind_offer *)hdr;
+ trace_vmbus_onoffer_rescind(rescind);
+
/*
* The offer msg and the corresponding rescind msg
* from the host are guranteed to be ordered -
@@ -974,6 +989,8 @@ static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
result = (struct vmbus_channel_open_result *)hdr;
+ trace_vmbus_onopen_result(result);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1018,6 +1035,8 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
+ trace_vmbus_ongpadl_created(gpadlcreated);
+
/*
* Find the establish msg, copy the result and signal/unblock the wait
* event
@@ -1066,6 +1085,8 @@ static void vmbus_ongpadl_torndown(
gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
+ trace_vmbus_ongpadl_torndown(gpadl_torndown);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1109,6 +1130,9 @@ static void vmbus_onversion_response(
unsigned long flags;
version_response = (struct vmbus_channel_version_response *)hdr;
+
+ trace_vmbus_onversion_response(version_response);
+
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
@@ -1168,6 +1192,8 @@ void vmbus_onmessage(void *context)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
size = msg->header.payload_size;
+ trace_vmbus_on_message(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
pr_err("Received invalid channel message type %d size %d\n",
hdr->msgtype, size);
@@ -1201,9 +1227,11 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
-
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
+
+ trace_vmbus_request_offers(ret);
+
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f41901f80b64..447371f4de56 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -117,6 +117,9 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact),
true);
+
+ trace_vmbus_negotiate_version(msg, ret);
+
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
@@ -319,6 +322,8 @@ void vmbus_on_event(unsigned long data)
struct vmbus_channel *channel = (void *) data;
unsigned long time_limit = jiffies + 2;
+ trace_vmbus_on_event(channel);
+
do {
void (*callback_fn)(void *);
@@ -409,6 +414,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
if (!channel->is_dedicated_interrupt)
vmbus_send_interrupt(child_relid);
+ ++channel->sig_events;
+
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv_trace.c b/drivers/hv/hv_trace.c
new file mode 100644
index 000000000000..df47acd01a81
--- /dev/null
+++ b/drivers/hv/hv_trace.c
@@ -0,0 +1,4 @@
+#include "hyperv_vmbus.h"
+
+#define CREATE_TRACE_POINTS
+#include "hv_trace.h"
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
new file mode 100644
index 000000000000..d635ee95b20d
--- /dev/null
+++ b/drivers/hv/hv_trace.h
@@ -0,0 +1,327 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyperv
+
+#if !defined(_HV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _HV_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(vmbus_hdr_msg,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr),
+ TP_STRUCT__entry(__field(unsigned int, msgtype)),
+ TP_fast_assign(__entry->msgtype = hdr->msgtype;),
+ TP_printk("msgtype=%u", __entry->msgtype)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_msg_dpc,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_message,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+TRACE_EVENT(vmbus_onoffer,
+ TP_PROTO(const struct vmbus_channel_offer_channel *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u8, monitorid)
+ __field(u16, is_ddc_int)
+ __field(u32, connection_id)
+ __array(char, if_type, 16)
+ __array(char, if_instance, 16)
+ __field(u16, chn_flags)
+ __field(u16, mmio_mb)
+ __field(u16, sub_idx)
+ ),
+ TP_fast_assign(__entry->child_relid = offer->child_relid;
+ __entry->monitorid = offer->monitorid;
+ __entry->is_ddc_int = offer->is_dedicated_interrupt;
+ __entry->connection_id = offer->connection_id;
+ memcpy(__entry->if_type,
+ &offer->offer.if_type.b, 16);
+ memcpy(__entry->if_instance,
+ &offer->offer.if_instance.b, 16);
+ __entry->chn_flags = offer->offer.chn_flags;
+ __entry->mmio_mb = offer->offer.mmio_megabytes;
+ __entry->sub_idx = offer->offer.sub_channel_index;
+ ),
+ TP_printk("child_relid 0x%x, monitorid 0x%x, is_dedicated %d, "
+ "connection_id 0x%x, if_type %pUl, if_instance %pUl, "
+ "chn_flags 0x%x, mmio_megabytes %d, sub_channel_index %d",
+ __entry->child_relid, __entry->monitorid,
+ __entry->is_ddc_int, __entry->connection_id,
+ __entry->if_type, __entry->if_instance,
+ __entry->chn_flags, __entry->mmio_mb,
+ __entry->sub_idx
+ )
+ );
+
+TRACE_EVENT(vmbus_onoffer_rescind,
+ TP_PROTO(const struct vmbus_channel_rescind_offer *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(__field(u32, child_relid)),
+ TP_fast_assign(__entry->child_relid = offer->child_relid),
+ TP_printk("child_relid 0x%x", __entry->child_relid)
+ );
+
+TRACE_EVENT(vmbus_onopen_result,
+ TP_PROTO(const struct vmbus_channel_open_result *result),
+ TP_ARGS(result),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = result->child_relid;
+ __entry->openid = result->openid;
+ __entry->status = result->status;
+ ),
+ TP_printk("child_relid 0x%x, openid %d, status %d",
+ __entry->child_relid, __entry->openid, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_created,
+ TP_PROTO(const struct vmbus_channel_gpadl_created *gpadlcreated),
+ TP_ARGS(gpadlcreated),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = gpadlcreated->child_relid;
+ __entry->gpadl = gpadlcreated->gpadl;
+ __entry->status = gpadlcreated->creation_status;
+ ),
+ TP_printk("child_relid 0x%x, gpadl 0x%x, creation_status %d",
+ __entry->child_relid, __entry->gpadl, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_torndown,
+ TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
+ TP_ARGS(gpadltorndown),
+ TP_STRUCT__entry(__field(u32, gpadl)),
+ TP_fast_assign(__entry->gpadl = gpadltorndown->gpadl),
+ TP_printk("gpadl 0x%x", __entry->gpadl)
+ );
+
+TRACE_EVENT(vmbus_onversion_response,
+ TP_PROTO(const struct vmbus_channel_version_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u8, ver)
+ ),
+ TP_fast_assign(__entry->ver = response->version_supported;
+ ),
+ TP_printk("version_supported %d", __entry->ver)
+ );
+
+TRACE_EVENT(vmbus_request_offers,
+ TP_PROTO(int ret),
+ TP_ARGS(ret),
+ TP_STRUCT__entry(__field(int, ret)),
+ TP_fast_assign(__entry->ret = ret),
+ TP_printk("sending ret %d", __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_open,
+ TP_PROTO(const struct vmbus_channel_open_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, gpadlhandle)
+ __field(u32, target_vp)
+ __field(u32, offset)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->openid = msg->openid;
+ __entry->gpadlhandle = msg->ringbuffer_gpadlhandle;
+ __entry->target_vp = msg->target_vp;
+ __entry->offset = msg->downstream_ringbuffer_pageoffset;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, openid %d, "
+ "gpadlhandle 0x%x, target_vp 0x%x, offset 0x%x, ret %d",
+ __entry->child_relid, __entry->openid,
+ __entry->gpadlhandle, __entry->target_vp,
+ __entry->offset, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_close_internal,
+ TP_PROTO(const struct vmbus_channel_close_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d", __entry->child_relid,
+ __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_header,
+ TP_PROTO(const struct vmbus_channel_gpadl_header *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u16, range_buflen)
+ __field(u16, rangecount)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->range_buflen = msg->range_buflen;
+ __entry->rangecount = msg->rangecount;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, range_buflen %d "
+ "rangecount %d, ret %d",
+ __entry->child_relid, __entry->gpadl,
+ __entry->range_buflen, __entry->rangecount, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_body,
+ TP_PROTO(const struct vmbus_channel_gpadl_body *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, msgnumber)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->msgnumber = msg->msgnumber;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending msgnumber %d, gpadl 0x%x, ret %d",
+ __entry->msgnumber, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_teardown_gpadl,
+ TP_PROTO(const struct vmbus_channel_gpadl_teardown *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, ret %d",
+ __entry->child_relid, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_negotiate_version,
+ TP_PROTO(const struct vmbus_channel_initiate_contact *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, ver)
+ __field(u32, target_vcpu)
+ __field(int, ret)
+ __field(u64, int_page)
+ __field(u64, mon_page1)
+ __field(u64, mon_page2)
+ ),
+ TP_fast_assign(
+ __entry->ver = msg->vmbus_version_requested;
+ __entry->target_vcpu = msg->target_vcpu;
+ __entry->int_page = msg->interrupt_page;
+ __entry->mon_page1 = msg->monitor_page1;
+ __entry->mon_page2 = msg->monitor_page2;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending vmbus_version_requested %d, target_vcpu 0x%x, "
+ "pages %llx:%llx:%llx, ret %d",
+ __entry->ver, __entry->target_vcpu, __entry->int_page,
+ __entry->mon_page1, __entry->mon_page2, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_release_relid,
+ TP_PROTO(const struct vmbus_channel_relid_released *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d",
+ __entry->child_relid, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_send_tl_connect_request,
+ TP_PROTO(const struct vmbus_channel_tl_connect_request *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __array(char, guest_id, 16)
+ __array(char, host_id, 16)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
+ memcpy(__entry->host_id, &msg->host_service_id.b, 16);
+ __entry->ret = ret;
+ ),
+ TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
+ "ret %d",
+ __entry->guest_id, __entry->host_id, __entry->ret
+ )
+ );
+
+DECLARE_EVENT_CLASS(vmbus_channel,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel),
+ TP_STRUCT__entry(__field(u32, relid)),
+ TP_fast_assign(__entry->relid = channel->offermsg.child_relid),
+ TP_printk("relid 0x%x", __entry->relid)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_chan_sched,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_setevent,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_on_event,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hv_trace
+#endif /* _HV_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 49569f8fe038..22300ec7b556 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -31,6 +31,8 @@
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include "hv_trace.h"
+
/*
* Timeout for services such as KVP and fcopy.
*/
@@ -373,6 +375,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
+int vmbus_add_channel_kobj(struct hv_device *device_obj,
+ struct vmbus_channel *channel);
struct vmbus_channel *relid2channel(u32 relid);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 937801ac2fe0..76ed9a216f10 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -65,7 +65,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
regs = current_pt_regs();
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -75,7 +75,7 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -107,28 +107,30 @@ static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
}
-static u8 channel_monitor_group(struct vmbus_channel *channel)
+static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
}
-static u8 channel_monitor_offset(struct vmbus_channel *channel)
+static u8 channel_monitor_offset(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid % 32;
}
-static u32 channel_pending(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_pending(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
+
return monitor_page->trigger_group[monitor_group].pending;
}
-static u32 channel_latency(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_latency(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
+
return monitor_page->latency[monitor_group][monitor_offset];
}
@@ -833,6 +835,8 @@ void vmbus_on_msg_dpc(unsigned long data)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ trace_vmbus_on_msg_dpc(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
goto msg_handled;
@@ -942,6 +946,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (channel->rescind)
continue;
+ trace_vmbus_chan_sched(channel);
+
+ ++channel->interrupts;
+
switch (channel->callback_mode) {
case HV_CALL_ISR:
vmbus_channel_isr(channel);
@@ -1133,6 +1141,159 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
+
+/*
+ * Called when last reference to channel is gone.
+ */
+static void vmbus_chan_release(struct kobject *kobj)
+{
+ struct vmbus_channel *channel
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ kfree_rcu(channel, rcu);
+}
+
+struct vmbus_chan_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
+ ssize_t (*store)(struct vmbus_channel *chan,
+ const char *buf, size_t count);
+};
+#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
+ struct vmbus_chan_attribute chan_attr_##_name \
+ = __ATTR(_name, _mode, _show, _store)
+#define VMBUS_CHAN_ATTR_RW(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
+#define VMBUS_CHAN_ATTR_RO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
+#define VMBUS_CHAN_ATTR_WO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
+
+static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ const struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(chan, buf);
+}
+
+static const struct sysfs_ops vmbus_chan_sysfs_ops = {
+ .show = vmbus_chan_attr_show,
+};
+
+static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(out_mask);
+
+static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(in_mask);
+
+static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+}
+VMBUS_CHAN_ATTR_RO(read_avail);
+
+static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+}
+VMBUS_CHAN_ATTR_RO(write_avail);
+
+static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%u\n", channel->target_cpu);
+}
+VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
+
+static ssize_t channel_pending_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_pending(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
+
+static ssize_t channel_latency_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_latency(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
+
+static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->interrupts);
+}
+VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
+
+static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->sig_events);
+}
+VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+
+static struct attribute *vmbus_chan_attrs[] = {
+ &chan_attr_out_mask.attr,
+ &chan_attr_in_mask.attr,
+ &chan_attr_read_avail.attr,
+ &chan_attr_write_avail.attr,
+ &chan_attr_cpu.attr,
+ &chan_attr_pending.attr,
+ &chan_attr_latency.attr,
+ &chan_attr_interrupts.attr,
+ &chan_attr_events.attr,
+ NULL
+};
+
+static struct kobj_type vmbus_chan_ktype = {
+ .sysfs_ops = &vmbus_chan_sysfs_ops,
+ .release = vmbus_chan_release,
+ .default_attrs = vmbus_chan_attrs,
+};
+
+/*
+ * vmbus_add_channel_kobj - setup a sub-directory under device/channels
+ */
+int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+{
+ struct kobject *kobj = &channel->kobj;
+ u32 relid = channel->offermsg.child_relid;
+ int ret;
+
+ kobj->kset = dev->channels_kset;
+ ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
+ "%u", relid);
+ if (ret)
+ return ret;
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return 0;
+}
+
/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
@@ -1164,7 +1325,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
*/
int vmbus_device_register(struct hv_device *child_device_obj)
{
- int ret = 0;
+ struct kobject *kobj = &child_device_obj->device.kobj;
+ int ret;
dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
@@ -1178,13 +1340,32 @@ int vmbus_device_register(struct hv_device *child_device_obj)
* binding...which will eventually call vmbus_match() and vmbus_probe()
*/
ret = device_register(&child_device_obj->device);
-
- if (ret)
+ if (ret) {
pr_err("Unable to register child device\n");
- else
- pr_debug("child device %s registered\n",
- dev_name(&child_device_obj->device));
+ return ret;
+ }
+
+ child_device_obj->channels_kset = kset_create_and_add("channels",
+ NULL, kobj);
+ if (!child_device_obj->channels_kset) {
+ ret = -ENOMEM;
+ goto err_dev_unregister;
+ }
+
+ ret = vmbus_add_channel_kobj(child_device_obj,
+ child_device_obj->channel);
+ if (ret) {
+ pr_err("Unable to register primary channeln");
+ goto err_kset_unregister;
+ }
+
+ return 0;
+
+err_kset_unregister:
+ kset_unregister(child_device_obj->channels_kset);
+err_dev_unregister:
+ device_unregister(&child_device_obj->device);
return ret;
}
@@ -1534,7 +1715,7 @@ static int __init hv_acpi_init(void)
{
int ret, t;
- if (x86_hyper != &x86_hyper_ms_hyperv)
+ if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return -ENODEV;
init_completion(&probe_event);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d65431417b17..7ad017690e3a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -552,6 +552,7 @@ config SENSORS_G762
config SENSORS_GPIO_FAN
tristate "GPIO fan"
+ depends on OF_GPIO
depends on GPIOLIB || COMPILE_TEST
depends on THERMAL || THERMAL=n
help
@@ -862,6 +863,20 @@ tristate "MAX31722 temperature sensor"
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX6621
+ tristate "Maxim MAX6621 sensor chip"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for MAX6621 sensor chip.
+ MAX6621 is a PECI-to-I2C translator provides an efficient,
+ low-cost solution for PECI-to-SMBus/I2C protocol conversion.
+ It allows reading the temperature from the PECI-compliant
+ host directly from up to four PECI-enabled CPUs.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6621.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 23e195a5a2f3..0fe489fab663 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -118,6 +118,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 4875e99b59c9..6d34c05a4f83 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -579,7 +579,6 @@ static ssize_t show_pwm_enable(struct device *dev,
mutex_unlock(&data->update_lock);
val = config | (altbit << 3);
- newval = 0;
if (val == 3 || val >= 10)
newval = 255;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 69b97d45e3cb..63a95e23ca81 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -7,19 +7,19 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
-#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/sysfs.h>
#include <linux/regmap.h>
+#include <linux/sysfs.h>
#include <linux/thermal.h>
/* ASPEED PWM & FAN Tach Register Definition */
@@ -161,7 +161,7 @@
* 11: reserved.
*/
#define M_TACH_MODE 0x02 /* 10b */
-#define M_TACH_UNIT 0x00c0
+#define M_TACH_UNIT 0x0210
#define INIT_FAN_CTRL 0xFF
/* How long we sleep in us while waiting for an RPM result. */
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 9c355b9d31c5..5c9a52599cf6 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -29,21 +29,24 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/hwmon.h>
-#include <linux/gpio.h>
-#include <linux/gpio-fan.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/thermal.h>
+struct gpio_fan_speed {
+ int rpm;
+ int ctrl_val;
+};
+
struct gpio_fan_data {
- struct platform_device *pdev;
+ struct device *dev;
struct device *hwmon_dev;
/* Cooling device if any */
struct thermal_cooling_device *cdev;
struct mutex lock; /* lock GPIOs operations. */
- int num_ctrl;
- unsigned *ctrl;
+ int num_gpios;
+ struct gpio_desc **gpios;
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
@@ -51,7 +54,7 @@ struct gpio_fan_data {
int resume_speed;
#endif
bool pwm_enable;
- struct gpio_fan_alarm *alarm;
+ struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
};
@@ -64,8 +67,8 @@ static void fan_alarm_notify(struct work_struct *ws)
struct gpio_fan_data *fan_data =
container_of(ws, struct gpio_fan_data, alarm_work);
- sysfs_notify(&fan_data->pdev->dev.kobj, NULL, "fan1_alarm");
- kobject_uevent(&fan_data->pdev->dev.kobj, KOBJ_CHANGE);
+ sysfs_notify(&fan_data->dev->kobj, NULL, "fan1_alarm");
+ kobject_uevent(&fan_data->dev->kobj, KOBJ_CHANGE);
}
static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
@@ -81,47 +84,30 @@ static ssize_t fan1_alarm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- struct gpio_fan_alarm *alarm = fan_data->alarm;
- int value = gpio_get_value_cansleep(alarm->gpio);
- if (alarm->active_low)
- value = !value;
-
- return sprintf(buf, "%d\n", value);
+ return sprintf(buf, "%d\n",
+ gpiod_get_value_cansleep(fan_data->alarm_gpio));
}
static DEVICE_ATTR_RO(fan1_alarm);
-static int fan_alarm_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_alarm *alarm)
+static int fan_alarm_init(struct gpio_fan_data *fan_data)
{
- int err;
int alarm_irq;
- struct platform_device *pdev = fan_data->pdev;
-
- fan_data->alarm = alarm;
-
- err = devm_gpio_request(&pdev->dev, alarm->gpio, "GPIO fan alarm");
- if (err)
- return err;
-
- err = gpio_direction_input(alarm->gpio);
- if (err)
- return err;
+ struct device *dev = fan_data->dev;
/*
* If the alarm GPIO don't support interrupts, just leave
* without initializing the fail notification support.
*/
- alarm_irq = gpio_to_irq(alarm->gpio);
- if (alarm_irq < 0)
+ alarm_irq = gpiod_to_irq(fan_data->alarm_gpio);
+ if (alarm_irq <= 0)
return 0;
INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
- err = devm_request_irq(&pdev->dev, alarm_irq, fan_alarm_irq_handler,
- IRQF_SHARED, "GPIO fan alarm", fan_data);
- return err;
+ return devm_request_irq(dev, alarm_irq, fan_alarm_irq_handler,
+ IRQF_SHARED, "GPIO fan alarm", fan_data);
}
/*
@@ -133,8 +119,9 @@ static void __set_fan_ctrl(struct gpio_fan_data *fan_data, int ctrl_val)
{
int i;
- for (i = 0; i < fan_data->num_ctrl; i++)
- gpio_set_value_cansleep(fan_data->ctrl[i], (ctrl_val >> i) & 1);
+ for (i = 0; i < fan_data->num_gpios; i++)
+ gpiod_set_value_cansleep(fan_data->gpios[i],
+ (ctrl_val >> i) & 1);
}
static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
@@ -142,10 +129,10 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
int i;
int ctrl_val = 0;
- for (i = 0; i < fan_data->num_ctrl; i++) {
+ for (i = 0; i < fan_data->num_gpios; i++) {
int value;
- value = gpio_get_value_cansleep(fan_data->ctrl[i]);
+ value = gpiod_get_value_cansleep(fan_data->gpios[i]);
ctrl_val |= (value << i);
}
return ctrl_val;
@@ -170,7 +157,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
if (fan_data->speed[i].ctrl_val == ctrl_val)
return i;
- dev_warn(&fan_data->pdev->dev,
+ dev_warn(fan_data->dev,
"missing speed array entry for GPIO value 0x%x\n", ctrl_val);
return -ENODEV;
@@ -328,9 +315,9 @@ static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
- if (index == 0 && !data->alarm)
+ if (index == 0 && !data->alarm_gpio)
return 0;
- if (index > 0 && !data->ctrl)
+ if (index > 0 && !data->gpios)
return 0;
return attr->mode;
@@ -358,30 +345,25 @@ static const struct attribute_group *gpio_fan_groups[] = {
NULL
};
-static int fan_ctrl_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_platform_data *pdata)
+static int fan_ctrl_init(struct gpio_fan_data *fan_data)
{
- struct platform_device *pdev = fan_data->pdev;
- int num_ctrl = pdata->num_ctrl;
- unsigned *ctrl = pdata->ctrl;
+ int num_gpios = fan_data->num_gpios;
+ struct gpio_desc **gpios = fan_data->gpios;
int i, err;
- for (i = 0; i < num_ctrl; i++) {
- err = devm_gpio_request(&pdev->dev, ctrl[i],
- "GPIO fan control");
- if (err)
- return err;
-
- err = gpio_direction_output(ctrl[i],
- gpio_get_value_cansleep(ctrl[i]));
+ for (i = 0; i < num_gpios; i++) {
+ /*
+ * The GPIO descriptors were retrieved with GPIOD_ASIS so here
+ * we set the GPIO into output mode, carefully preserving the
+ * current value by setting it to whatever it is already set
+ * (no surprise changes in default fan speed).
+ */
+ err = gpiod_direction_output(gpios[i],
+ gpiod_get_value_cansleep(gpios[i]));
if (err)
return err;
}
- fan_data->num_ctrl = num_ctrl;
- fan_data->ctrl = ctrl;
- fan_data->num_speed = pdata->num_speed;
- fan_data->speed = pdata->speed;
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
if (fan_data->speed_index < 0)
@@ -432,67 +414,47 @@ static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
.set_cur_state = gpio_fan_set_cur_state,
};
-#ifdef CONFIG_OF_GPIO
/*
* Translate OpenFirmware node properties into platform_data
*/
-static int gpio_fan_get_of_pdata(struct device *dev,
- struct gpio_fan_platform_data *pdata)
+static int gpio_fan_get_of_data(struct gpio_fan_data *fan_data)
{
- struct device_node *node;
struct gpio_fan_speed *speed;
- unsigned *ctrl;
+ struct device *dev = fan_data->dev;
+ struct device_node *np = dev->of_node;
+ struct gpio_desc **gpios;
unsigned i;
u32 u;
struct property *prop;
const __be32 *p;
- node = dev->of_node;
-
/* Alarm GPIO if one exists */
- if (of_gpio_named_count(node, "alarm-gpios") > 0) {
- struct gpio_fan_alarm *alarm;
- int val;
- enum of_gpio_flags flags;
-
- alarm = devm_kzalloc(dev, sizeof(struct gpio_fan_alarm),
- GFP_KERNEL);
- if (!alarm)
- return -ENOMEM;
-
- val = of_get_named_gpio_flags(node, "alarm-gpios", 0, &flags);
- if (val < 0)
- return val;
- alarm->gpio = val;
- alarm->active_low = flags & OF_GPIO_ACTIVE_LOW;
-
- pdata->alarm = alarm;
- }
+ fan_data->alarm_gpio = devm_gpiod_get_optional(dev, "alarm", GPIOD_IN);
+ if (IS_ERR(fan_data->alarm_gpio))
+ return PTR_ERR(fan_data->alarm_gpio);
/* Fill GPIO pin array */
- pdata->num_ctrl = of_gpio_count(node);
- if (pdata->num_ctrl <= 0) {
- if (pdata->alarm)
+ fan_data->num_gpios = gpiod_count(dev, NULL);
+ if (fan_data->num_gpios <= 0) {
+ if (fan_data->alarm_gpio)
return 0;
dev_err(dev, "DT properties empty / missing");
return -ENODEV;
}
- ctrl = devm_kzalloc(dev, pdata->num_ctrl * sizeof(unsigned),
- GFP_KERNEL);
- if (!ctrl)
+ gpios = devm_kzalloc(dev,
+ fan_data->num_gpios * sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!gpios)
return -ENOMEM;
- for (i = 0; i < pdata->num_ctrl; i++) {
- int val;
-
- val = of_get_gpio(node, i);
- if (val < 0)
- return val;
- ctrl[i] = val;
+ for (i = 0; i < fan_data->num_gpios; i++) {
+ gpios[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ if (IS_ERR(gpios[i]))
+ return PTR_ERR(gpios[i]);
}
- pdata->ctrl = ctrl;
+ fan_data->gpios = gpios;
/* Get number of RPM/ctrl_val pairs in speed map */
- prop = of_find_property(node, "gpio-fan,speed-map", &i);
+ prop = of_find_property(np, "gpio-fan,speed-map", &i);
if (!prop) {
dev_err(dev, "gpio-fan,speed-map DT property missing");
return -ENODEV;
@@ -502,7 +464,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
dev_err(dev, "gpio-fan,speed-map contains zero/odd number of entries");
return -ENODEV;
}
- pdata->num_speed = i / 2;
+ fan_data->num_speed = i / 2;
/*
* Populate speed map
@@ -510,12 +472,12 @@ static int gpio_fan_get_of_pdata(struct device *dev,
* this needs splitting into pairs to create gpio_fan_speed structs
*/
speed = devm_kzalloc(dev,
- pdata->num_speed * sizeof(struct gpio_fan_speed),
+ fan_data->num_speed * sizeof(struct gpio_fan_speed),
GFP_KERNEL);
if (!speed)
return -ENOMEM;
p = NULL;
- for (i = 0; i < pdata->num_speed; i++) {
+ for (i = 0; i < fan_data->num_speed; i++) {
p = of_prop_next_u32(prop, p, &u);
if (!p)
return -ENODEV;
@@ -525,7 +487,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
return -ENODEV;
speed[i].ctrl_val = u;
}
- pdata->speed = speed;
+ fan_data->speed = speed;
return 0;
}
@@ -535,76 +497,58 @@ static const struct of_device_id of_gpio_fan_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
-#endif /* CONFIG_OF_GPIO */
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
struct gpio_fan_data *fan_data;
- struct gpio_fan_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
- fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data),
+ fan_data = devm_kzalloc(dev, sizeof(struct gpio_fan_data),
GFP_KERNEL);
if (!fan_data)
return -ENOMEM;
-#ifdef CONFIG_OF_GPIO
- if (!pdata) {
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct gpio_fan_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- err = gpio_fan_get_of_pdata(&pdev->dev, pdata);
- if (err)
- return err;
- }
-#else /* CONFIG_OF_GPIO */
- if (!pdata)
- return -EINVAL;
-#endif /* CONFIG_OF_GPIO */
+ fan_data->dev = dev;
+ err = gpio_fan_get_of_data(fan_data);
+ if (err)
+ return err;
- fan_data->pdev = pdev;
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
/* Configure alarm GPIO if available. */
- if (pdata->alarm) {
- err = fan_alarm_init(fan_data, pdata->alarm);
+ if (fan_data->alarm_gpio) {
+ err = fan_alarm_init(fan_data);
if (err)
return err;
}
/* Configure control GPIOs if available. */
- if (pdata->ctrl && pdata->num_ctrl > 0) {
- if (!pdata->speed || pdata->num_speed <= 1)
+ if (fan_data->gpios && fan_data->num_gpios > 0) {
+ if (!fan_data->speed || fan_data->num_speed <= 1)
return -EINVAL;
- err = fan_ctrl_init(fan_data, pdata);
+ err = fan_ctrl_init(fan_data);
if (err)
return err;
}
/* Make this driver part of hwmon class. */
fan_data->hwmon_dev =
- devm_hwmon_device_register_with_groups(&pdev->dev,
+ devm_hwmon_device_register_with_groups(dev,
"gpio_fan", fan_data,
gpio_fan_groups);
if (IS_ERR(fan_data->hwmon_dev))
return PTR_ERR(fan_data->hwmon_dev);
-#ifdef CONFIG_OF_GPIO
+
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
+ fan_data->cdev = thermal_of_cooling_device_register(np,
"gpio-fan",
fan_data,
&gpio_fan_cool_ops);
-#else /* CONFIG_OF_GPIO */
- /* Optional cooling device register for non Device tree platforms */
- fan_data->cdev = thermal_cooling_device_register("gpio-fan", fan_data,
- &gpio_fan_cool_ops);
-#endif /* CONFIG_OF_GPIO */
- dev_info(&pdev->dev, "GPIO fan initialized\n");
+ dev_info(dev, "GPIO fan initialized\n");
return 0;
}
@@ -616,7 +560,7 @@ static int gpio_fan_remove(struct platform_device *pdev)
if (!IS_ERR(fan_data->cdev))
thermal_cooling_device_unregister(fan_data->cdev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, 0);
return 0;
@@ -632,7 +576,7 @@ static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl) {
+ if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
set_fan_speed(fan_data, 0);
}
@@ -644,7 +588,7 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, fan_data->resume_speed);
return 0;
@@ -663,9 +607,7 @@ static struct platform_driver gpio_fan_driver = {
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
-#ifdef CONFIG_OF_GPIO
.of_match_table = of_match_ptr(of_gpio_fan_match),
-#endif
},
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index ce3b91f22e30..46a54ed23410 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -36,6 +36,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
/* Provide lock for writing to NB_SMU_IND_ADDR */
static DEFINE_MUTEX(nb_smu_ind_mutex);
+#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#endif
+
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -61,31 +65,72 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
*/
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
- int offset, u32 *val)
+/* F17h M01h Access througn SMN */
+#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+
+struct k10temp_data {
+ struct pci_dev *pdev;
+ void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
+ int temp_offset;
+};
+
+struct tctl_offset {
+ u8 model;
+ char const *id;
+ int offset;
+};
+
+static const struct tctl_offset tctl_offset_table[] = {
+ { 0x17, "AMD Ryzen 7 1600X", 20000 },
+ { 0x17, "AMD Ryzen 7 1700X", 20000 },
+ { 0x17, "AMD Ryzen 7 1800X", 20000 },
+ { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+};
+
+static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
+{
+ pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
+}
+
+static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
+ unsigned int base, int offset, u32 *val)
{
mutex_lock(&nb_smu_ind_mutex);
pci_bus_write_config_dword(pdev->bus, devfn,
- 0xb8, offset);
+ base, offset);
pci_bus_read_config_dword(pdev->bus, devfn,
- 0xbc, val);
+ base + 4, val);
mutex_unlock(&nb_smu_ind_mutex);
}
+static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
+ F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
+static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
static ssize_t temp1_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct k10temp_data *data = dev_get_drvdata(dev);
u32 regval;
- struct pci_dev *pdev = dev_get_drvdata(dev);
-
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
- amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
- F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
- &regval);
- } else {
- pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
- }
- return sprintf(buf, "%u\n", (regval >> 21) * 125);
+ unsigned int temp;
+
+ data->read_tempreg(data->pdev, &regval);
+ temp = (regval >> 21) * 125;
+ temp -= data->temp_offset;
+
+ return sprintf(buf, "%u\n", temp);
}
static ssize_t temp1_max_show(struct device *dev,
@@ -98,11 +143,12 @@ static ssize_t show_temp_crit(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
int show_hyst = attr->index;
u32 regval;
int value;
- pci_read_config_dword(dev_get_drvdata(dev),
+ pci_read_config_dword(data->pdev,
REG_HARDWARE_THERMAL_CONTROL, &regval);
value = ((regval >> 16) & 0x7f) * 500 + 52000;
if (show_hyst)
@@ -119,7 +165,8 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct pci_dev *pdev = dev_get_drvdata(dev);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ struct pci_dev *pdev = data->pdev;
if (index >= 2) {
u32 reg_caps, reg_htc;
@@ -187,7 +234,9 @@ static int k10temp_probe(struct pci_dev *pdev,
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
+ struct k10temp_data *data;
struct device *hwmon_dev;
+ int i;
if (unreliable) {
if (!force) {
@@ -199,7 +248,31 @@ static int k10temp_probe(struct pci_dev *pdev,
"unreliable CPU thermal sensor; check erratum 319\n");
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev,
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+
+ if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
+ boot_cpu_data.x86_model == 0x70))
+ data->read_tempreg = read_tempreg_nb_f15;
+ else if (boot_cpu_data.x86 == 0x17)
+ data->read_tempreg = read_tempreg_nb_f17;
+ else
+ data->read_tempreg = read_tempreg_pci;
+
+ for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
+ const struct tctl_offset *entry = &tctl_offset_table[i];
+
+ if (boot_cpu_data.x86 == entry->model &&
+ strstr(boot_cpu_data.x86_model_id, entry->id)) {
+ data->temp_offset = entry->offset;
+ break;
+ }
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
k10temp_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -214,6 +287,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index a18278938494..76d966932941 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -303,10 +303,20 @@ static const struct i2c_device_id max1619_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
+#ifdef CONFIG_OF
+static const struct of_device_id max1619_of_match[] = {
+ { .compatible = "maxim,max1619", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, max1619_of_match);
+#endif
+
static struct i2c_driver max1619_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max1619",
+ .of_match_table = of_match_ptr(max1619_of_match),
},
.probe = max1619_probe,
.id_table = max1619_id,
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
new file mode 100644
index 000000000000..35555f0eefb9
--- /dev/null
+++ b/drivers/hwmon/max6621.c
@@ -0,0 +1,593 @@
+/*
+ * Hardware monitoring driver for Maxim MAX6621
+ *
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define MAX6621_DRV_NAME "max6621"
+#define MAX6621_TEMP_INPUT_REG_NUM 9
+#define MAX6621_TEMP_INPUT_MIN -127000
+#define MAX6621_TEMP_INPUT_MAX 128000
+#define MAX6621_TEMP_ALERT_CHAN_SHIFT 1
+
+#define MAX6621_TEMP_S0D0_REG 0x00
+#define MAX6621_TEMP_S0D1_REG 0x01
+#define MAX6621_TEMP_S1D0_REG 0x02
+#define MAX6621_TEMP_S1D1_REG 0x03
+#define MAX6621_TEMP_S2D0_REG 0x04
+#define MAX6621_TEMP_S2D1_REG 0x05
+#define MAX6621_TEMP_S3D0_REG 0x06
+#define MAX6621_TEMP_S3D1_REG 0x07
+#define MAX6621_TEMP_MAX_REG 0x08
+#define MAX6621_TEMP_MAX_ADDR_REG 0x0a
+#define MAX6621_TEMP_ALERT_CAUSE_REG 0x0b
+#define MAX6621_CONFIG0_REG 0x0c
+#define MAX6621_CONFIG1_REG 0x0d
+#define MAX6621_CONFIG2_REG 0x0e
+#define MAX6621_CONFIG3_REG 0x0f
+#define MAX6621_TEMP_S0_ALERT_REG 0x10
+#define MAX6621_TEMP_S1_ALERT_REG 0x11
+#define MAX6621_TEMP_S2_ALERT_REG 0x12
+#define MAX6621_TEMP_S3_ALERT_REG 0x13
+#define MAX6621_CLEAR_ALERT_REG 0x15
+#define MAX6621_REG_MAX (MAX6621_CLEAR_ALERT_REG + 1)
+#define MAX6621_REG_TEMP_SHIFT 0x06
+
+#define MAX6621_ENABLE_TEMP_ALERTS_BIT 4
+#define MAX6621_ENABLE_I2C_CRC_BIT 5
+#define MAX6621_ENABLE_ALTERNATE_DATA 6
+#define MAX6621_ENABLE_LOCKUP_TO 7
+#define MAX6621_ENABLE_S0D0_BIT 8
+#define MAX6621_ENABLE_S3D1_BIT 15
+#define MAX6621_ENABLE_TEMP_ALL GENMASK(MAX6621_ENABLE_S3D1_BIT, \
+ MAX6621_ENABLE_S0D0_BIT)
+#define MAX6621_POLL_DELAY_MASK 0x5
+#define MAX6621_CONFIG0_INIT (MAX6621_ENABLE_TEMP_ALL | \
+ BIT(MAX6621_ENABLE_LOCKUP_TO) | \
+ BIT(MAX6621_ENABLE_I2C_CRC_BIT) | \
+ MAX6621_POLL_DELAY_MASK)
+#define MAX6621_PECI_BIT_TIME 0x2
+#define MAX6621_PECI_RETRY_NUM 0x3
+#define MAX6621_CONFIG1_INIT ((MAX6621_PECI_BIT_TIME << 8) | \
+ MAX6621_PECI_RETRY_NUM)
+
+/* Error codes */
+#define MAX6621_TRAN_FAILED 0x8100 /*
+ * PECI transaction failed for more
+ * than the configured number of
+ * consecutive retries.
+ */
+#define MAX6621_POOL_DIS 0x8101 /*
+ * Polling disabled for requested
+ * socket/domain.
+ */
+#define MAX6621_POOL_UNCOMPLETE 0x8102 /*
+ * First poll not yet completed for
+ * requested socket/domain (on
+ * startup).
+ */
+#define MAX6621_SD_DIS 0x8103 /*
+ * Read maximum temperature requested,
+ * but no sockets/domains enabled or
+ * all enabled sockets/domains have
+ * errors; or read maximum temperature
+ * address requested, but read maximum
+ * temperature was not called.
+ */
+#define MAX6621_ALERT_DIS 0x8104 /*
+ * Get alert socket/domain requested,
+ * but no alert active.
+ */
+#define MAX6621_PECI_ERR_MIN 0x8000 /* Intel spec PECI error min value. */
+#define MAX6621_PECI_ERR_MAX 0x80ff /* Intel spec PECI error max value. */
+
+static const u32 max6621_temp_regs[] = {
+ MAX6621_TEMP_MAX_REG, MAX6621_TEMP_S0D0_REG, MAX6621_TEMP_S1D0_REG,
+ MAX6621_TEMP_S2D0_REG, MAX6621_TEMP_S3D0_REG, MAX6621_TEMP_S0D1_REG,
+ MAX6621_TEMP_S1D1_REG, MAX6621_TEMP_S2D1_REG, MAX6621_TEMP_S3D1_REG,
+};
+
+static const char *const max6621_temp_labels[] = {
+ "maximum",
+ "socket0_0",
+ "socket1_0",
+ "socket2_0",
+ "socket3_0",
+ "socket0_1",
+ "socket1_1",
+ "socket2_1",
+ "socket3_1",
+};
+
+static const int max6621_temp_alert_chan2reg[] = {
+ MAX6621_TEMP_S0_ALERT_REG,
+ MAX6621_TEMP_S1_ALERT_REG,
+ MAX6621_TEMP_S2_ALERT_REG,
+ MAX6621_TEMP_S3_ALERT_REG,
+};
+
+/**
+ * struct max6621_data - private data:
+ *
+ * @client: I2C client;
+ * @regmap: register map handle;
+ * @input_chan2reg: mapping from channel to register;
+ */
+struct max6621_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int input_chan2reg[MAX6621_TEMP_INPUT_REG_NUM + 1];
+};
+
+static long max6621_temp_mc2reg(long val)
+{
+ return (val / 1000L) << MAX6621_REG_TEMP_SHIFT;
+}
+
+static umode_t
+max6621_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ /* Skip channels which are not physically conncted. */
+ if (((struct max6621_data *)data)->input_chan2reg[channel] < 0)
+ return 0;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_offset:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int max6621_verify_reg_data(struct device *dev, int regval)
+{
+ if (regval >= MAX6621_PECI_ERR_MIN &&
+ regval <= MAX6621_PECI_ERR_MAX) {
+ dev_dbg(dev, "PECI error code - err 0x%04x.\n",
+ regval);
+
+ return -EIO;
+ }
+
+ switch (regval) {
+ case MAX6621_TRAN_FAILED:
+ dev_dbg(dev, "PECI transaction failed - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_POOL_DIS:
+ dev_dbg(dev, "Polling disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_POOL_UNCOMPLETE:
+ dev_dbg(dev, "First poll not completed on startup - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_SD_DIS:
+ dev_dbg(dev, "Resource is disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_ALERT_DIS:
+ dev_dbg(dev, "No alert active - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+}
+
+static int
+max6621_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 regval;
+ int reg;
+ s8 temp;
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = data->input_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ /*
+ * Bit MAX6621_REG_TEMP_SHIFT represents 1 degree step.
+ * The temperature is given in two's complement and 8
+ * bits is used for the register conversion.
+ */
+ temp = (regval >> MAX6621_REG_TEMP_SHIFT);
+ *val = temp * 1000L;
+
+ break;
+ case hwmon_temp_offset:
+ ret = regmap_read(data->regmap, MAX6621_CONFIG2_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = (regval >> MAX6621_REG_TEMP_SHIFT) *
+ 1000L;
+
+ break;
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = regval * 1000L;
+
+ break;
+ case hwmon_temp_crit_alarm:
+ /*
+ * Set val to zero to recover the case, when reading
+ * MAX6621_TEMP_ALERT_CAUSE_REG results in for example
+ * MAX6621_ALERT_DIS. Reading will return with error,
+ * but in such case alarm should be returned as 0.
+ */
+ *val = 0;
+ ret = regmap_read(data->regmap,
+ MAX6621_TEMP_ALERT_CAUSE_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret) {
+ /* Do not report error if alert is disabled. */
+ if (regval == MAX6621_ALERT_DIS)
+ return 0;
+ else
+ return ret;
+ }
+
+ /*
+ * Clear the alert automatically, using send-byte
+ * smbus protocol for clearing alert.
+ */
+ if (regval) {
+ ret = i2c_smbus_write_byte(data->client,
+ MAX6621_CLEAR_ALERT_REG);
+ if (ret)
+ return ret;
+ }
+
+ *val = !!regval;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+max6621_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 reg;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_offset:
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = max6621_temp_mc2reg(val);
+
+ return regmap_write(data->regmap,
+ MAX6621_CONFIG2_REG, val);
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = val / 1000L;
+
+ return regmap_write(data->regmap, reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+max6621_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = max6621_temp_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool max6621_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_MAX_ADDR_REG:
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default max6621_regmap_default[] = {
+ { MAX6621_CONFIG0_REG, MAX6621_CONFIG0_INIT },
+ { MAX6621_CONFIG1_REG, MAX6621_CONFIG1_INIT },
+};
+
+static const struct regmap_config max6621_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = MAX6621_REG_MAX,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = max6621_writeable_reg,
+ .readable_reg = max6621_readable_reg,
+ .volatile_reg = max6621_volatile_reg,
+ .reg_defaults = max6621_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(max6621_regmap_default),
+};
+
+static u32 max6621_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_chip = {
+ .type = hwmon_chip,
+ .config = max6621_chip_config,
+};
+
+static const u32 max6621_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_temp = {
+ .type = hwmon_temp,
+ .config = max6621_temp_config,
+};
+
+static const struct hwmon_channel_info *max6621_info[] = {
+ &max6621_chip,
+ &max6621_temp,
+ NULL
+};
+
+static const struct hwmon_ops max6621_hwmon_ops = {
+ .read = max6621_read,
+ .write = max6621_write,
+ .read_string = max6621_read_string,
+ .is_visible = max6621_is_visible,
+};
+
+static const struct hwmon_chip_info max6621_chip_info = {
+ .ops = &max6621_hwmon_ops,
+ .info = max6621_info,
+};
+
+static int max6621_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct max6621_data *data;
+ struct device *hwmon_dev;
+ int i;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = devm_regmap_init_i2c(client, &max6621_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ /* Set CONFIG0 register masking temperature alerts and PEC. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG0_REG,
+ MAX6621_CONFIG0_INIT);
+ if (ret)
+ return ret;
+
+ /* Set CONFIG1 register for PEC access retry number. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG1_REG,
+ MAX6621_CONFIG1_INIT);
+ if (ret)
+ return ret;
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(data->regmap);
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ return ret;
+
+ /* Verify which temperature input registers are enabled. */
+ for (i = 0; i < MAX6621_TEMP_INPUT_REG_NUM; i++) {
+ ret = i2c_smbus_read_word_data(client, max6621_temp_regs[i]);
+ if (ret < 0)
+ return ret;
+ ret = max6621_verify_reg_data(dev, ret);
+ if (ret) {
+ data->input_chan2reg[i] = -1;
+ continue;
+ }
+
+ data->input_chan2reg[i] = max6621_temp_regs[i];
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max6621_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max6621_id[] = {
+ { MAX6621_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6621_id);
+
+static const struct of_device_id max6621_of_match[] = {
+ { .compatible = "maxim,max6621" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max6621_of_match);
+
+static struct i2c_driver max6621_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = MAX6621_DRV_NAME,
+ .of_match_table = of_match_ptr(max6621_of_match),
+ },
+ .probe = max6621_probe,
+ .id_table = max6621_id,
+};
+
+module_i2c_driver(max6621_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Driver for Maxim MAX6621");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 40019325b517..08479006c7f9 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -114,6 +114,16 @@ config SENSORS_MAX20751
This driver can also be built as a module. If so, the module will
be called max20751.
+config SENSORS_MAX31785
+ tristate "Maxim MAX31785 and compatibles"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX31785.
+
+ This driver can also be built as a module. If so, the module will
+ be called max31785.
+
config SENSORS_MAX34440
tristate "Maxim MAX34440 and compatibles"
default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index e9364420a512..ea0e39518c21 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
+obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
new file mode 100644
index 000000000000..9313849d5160
--- /dev/null
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum max31785_regs {
+ MFR_REVISION = 0x9b,
+};
+
+#define MAX31785_NR_PAGES 23
+
+#define MAX31785_FAN_FUNCS \
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+
+#define MAX31785_TEMP_FUNCS \
+ (PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
+
+#define MAX31785_VOUT_FUNCS \
+ (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+
+static const struct pmbus_driver_info max31785_info = {
+ .pages = MAX31785_NR_PAGES,
+
+ /* RPM */
+ .format[PSC_FAN] = direct,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = MAX31785_FAN_FUNCS,
+ .func[1] = MAX31785_FAN_FUNCS,
+ .func[2] = MAX31785_FAN_FUNCS,
+ .func[3] = MAX31785_FAN_FUNCS,
+ .func[4] = MAX31785_FAN_FUNCS,
+ .func[5] = MAX31785_FAN_FUNCS,
+
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[6] = MAX31785_TEMP_FUNCS,
+ .func[7] = MAX31785_TEMP_FUNCS,
+ .func[8] = MAX31785_TEMP_FUNCS,
+ .func[9] = MAX31785_TEMP_FUNCS,
+ .func[10] = MAX31785_TEMP_FUNCS,
+ .func[11] = MAX31785_TEMP_FUNCS,
+ .func[12] = MAX31785_TEMP_FUNCS,
+ .func[13] = MAX31785_TEMP_FUNCS,
+ .func[14] = MAX31785_TEMP_FUNCS,
+ .func[15] = MAX31785_TEMP_FUNCS,
+ .func[16] = MAX31785_TEMP_FUNCS,
+
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .func[17] = MAX31785_VOUT_FUNCS,
+ .func[18] = MAX31785_VOUT_FUNCS,
+ .func[19] = MAX31785_VOUT_FUNCS,
+ .func[20] = MAX31785_VOUT_FUNCS,
+ .func[21] = MAX31785_VOUT_FUNCS,
+ .func[22] = MAX31785_VOUT_FUNCS,
+};
+
+static int max31785_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ s64 ret;
+
+ info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ *info = max31785_info;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 255);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id max31785_id[] = {
+ { "max31785", 0 },
+ { "max31785a", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max31785_id);
+
+static struct i2c_driver max31785_driver = {
+ .driver = {
+ .name = "max31785",
+ },
+ .probe = max31785_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max31785_id,
+};
+
+module_i2c_driver(max31785_driver);
+
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("PMBus driver for the Maxim MAX31785");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 4efa2bd4f6d8..fa613bd209e3 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops;
/* Function declarations */
void pmbus_clear_cache(struct i2c_client *client);
-int pmbus_set_page(struct i2c_client *client, u8 page);
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
+int pmbus_set_page(struct i2c_client *client, int page);
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg);
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word);
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 302f0aef59de..52a58b8b6e1b 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -136,13 +136,13 @@ void pmbus_clear_cache(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_clear_cache);
-int pmbus_set_page(struct i2c_client *client, u8 page)
+int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
int rv = 0;
int newpage;
- if (page != data->currpage) {
+ if (page >= 0 && page != data->currpage) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
if (newpage != page)
@@ -158,11 +158,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_write_byte(client, value);
}
@@ -186,7 +184,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
return pmbus_write_byte(client, page, value);
}
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
+ u16 word)
{
int rv;
@@ -219,7 +218,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
return pmbus_write_word_data(client, page, reg, word);
}
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -255,11 +254,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_read_byte_data(client, reg);
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index e4d642b673c6..25d28343ba93 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -18,13 +18,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/mutex.h>
-#include <linux/platform_data/sht15.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -34,7 +32,8 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/bitrev.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
@@ -122,7 +121,8 @@ static const u8 sht15_crc8_table[] = {
/**
* struct sht15_data - device instance specific data
- * @pdata: platform data (gpio's etc).
+ * @sck: clock GPIO line
+ * @data: data GPIO line
* @read_work: bh of interrupt handler.
* @wait_queue: wait queue for getting values from device.
* @val_temp: last temperature value read from device.
@@ -150,7 +150,8 @@ static const u8 sht15_crc8_table[] = {
* @interrupt_handled: flag used to indicate a handler has been scheduled.
*/
struct sht15_data {
- struct sht15_platform_data *pdata;
+ struct gpio_desc *sck;
+ struct gpio_desc *data;
struct work_struct read_work;
wait_queue_head_t wait_queue;
uint16_t val_temp;
@@ -205,16 +206,16 @@ static int sht15_connection_reset(struct sht15_data *data)
{
int i, err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
for (i = 0; i < 9; ++i) {
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return 0;
@@ -227,11 +228,11 @@ static int sht15_connection_reset(struct sht15_data *data)
*/
static inline void sht15_send_bit(struct sht15_data *data, int val)
{
- gpio_set_value(data->pdata->gpio_data, val);
+ gpiod_set_value(data->data, val);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL); /* clock low time */
}
@@ -248,23 +249,23 @@ static int sht15_transmission_start(struct sht15_data *data)
int err;
/* ensure data is high and output */
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 0);
+ gpiod_set_value(data->data, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -292,20 +293,20 @@ static int sht15_wait_for_response(struct sht15_data *data)
{
int err;
- err = gpio_direction_input(data->pdata->gpio_data);
+ err = gpiod_direction_input(data->data);
if (err)
return err;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- if (gpio_get_value(data->pdata->gpio_data)) {
- gpio_set_value(data->pdata->gpio_sck, 0);
+ if (gpiod_get_value(data->data)) {
+ gpiod_set_value(data->sck, 0);
dev_err(data->dev, "Command not acknowledged\n");
err = sht15_connection_reset(data);
if (err)
return err;
return -EIO;
}
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -360,17 +361,17 @@ static int sht15_ack(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 0);
+ err = gpiod_direction_output(data->data, 0);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
- return gpio_direction_input(data->pdata->gpio_data);
+ return gpiod_direction_input(data->data);
}
/**
@@ -383,13 +384,13 @@ static int sht15_end_transmission(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -405,10 +406,10 @@ static u8 sht15_read_byte(struct sht15_data *data)
for (i = 0; i < 8; ++i) {
byte <<= 1;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- byte |= !!gpio_get_value(data->pdata->gpio_data);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ byte |= !!gpiod_get_value(data->data);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return byte;
@@ -428,7 +429,7 @@ static int sht15_send_status(struct sht15_data *data, u8 status)
err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
if (err)
return err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
@@ -528,14 +529,14 @@ static int sht15_measurement(struct sht15_data *data,
if (ret)
return ret;
- ret = gpio_direction_input(data->pdata->gpio_data);
+ ret = gpiod_direction_input(data->data);
if (ret)
return ret;
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
- if (gpio_get_value(data->pdata->gpio_data) == 0) {
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
+ if (gpiod_get_value(data->data) == 0) {
+ disable_irq_nosync(gpiod_to_irq(data->data));
/* Only relevant if the interrupt hasn't occurred. */
if (!atomic_read(&data->interrupt_handled))
schedule_work(&data->read_work);
@@ -547,7 +548,7 @@ static int sht15_measurement(struct sht15_data *data,
data->state = SHT15_READING_NOTHING;
return -EIO;
} else if (ret == 0) { /* timeout occurred */
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
return ret;
@@ -826,15 +827,15 @@ static void sht15_bh_read_data(struct work_struct *work_s)
read_work);
/* Firstly, verify the line is low */
- if (gpio_get_value(data->pdata->gpio_data)) {
+ if (gpiod_get_value(data->data)) {
/*
* If not, then start the interrupt again - care here as could
* have gone low in meantime so verify it hasn't!
*/
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
/* If still not occurred or another handler was scheduled */
- if (gpio_get_value(data->pdata->gpio_data)
+ if (gpiod_get_value(data->data)
|| atomic_read(&data->interrupt_handled))
return;
}
@@ -918,53 +919,12 @@ static const struct of_device_id sht15_dt_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, sht15_dt_match);
-
-/*
- * This function returns NULL if pdev isn't a device instatiated by dt,
- * a pointer to pdata if it could successfully get all information
- * from dt or a negative ERR_PTR() on error.
- */
-static struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct sht15_platform_data *pdata;
-
- /* no device tree device */
- if (!np)
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->gpio_data = of_get_named_gpio(np, "data-gpios", 0);
- if (pdata->gpio_data < 0) {
- if (pdata->gpio_data != -EPROBE_DEFER)
- dev_err(dev, "data-gpios not found\n");
- return ERR_PTR(pdata->gpio_data);
- }
-
- pdata->gpio_sck = of_get_named_gpio(np, "clk-gpios", 0);
- if (pdata->gpio_sck < 0) {
- if (pdata->gpio_sck != -EPROBE_DEFER)
- dev_err(dev, "clk-gpios not found\n");
- return ERR_PTR(pdata->gpio_sck);
- }
-
- return pdata;
-}
-#else
-static inline struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- return NULL;
-}
#endif
static int sht15_probe(struct platform_device *pdev)
{
int ret;
struct sht15_data *data;
- u8 status = 0;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -977,25 +937,6 @@ static int sht15_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
init_waitqueue_head(&data->wait_queue);
- data->pdata = sht15_probe_dt(&pdev->dev);
- if (IS_ERR(data->pdata))
- return PTR_ERR(data->pdata);
- if (data->pdata == NULL) {
- data->pdata = dev_get_platdata(&pdev->dev);
- if (data->pdata == NULL) {
- dev_err(&pdev->dev, "no platform data supplied\n");
- return -EINVAL;
- }
- }
-
- data->supply_uv = data->pdata->supply_mv * 1000;
- if (data->pdata->checksum)
- data->checksumming = true;
- if (data->pdata->no_otp_reload)
- status |= SHT15_STATUS_NO_OTP_RELOAD;
- if (data->pdata->low_resolution)
- status |= SHT15_STATUS_LOW_RESOLUTION;
-
/*
* If a regulator is available,
* query what the supply voltage actually is!
@@ -1030,21 +971,20 @@ static int sht15_probe(struct platform_device *pdev)
}
/* Try requesting the GPIOs */
- ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
- GPIOF_OUT_INIT_LOW, "SHT15 sck");
- if (ret) {
+ data->sck = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_LOW);
+ if (IS_ERR(data->sck)) {
+ ret = PTR_ERR(data->sck);
dev_err(&pdev->dev, "clock line GPIO request failed\n");
goto err_release_reg;
}
-
- ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
- "SHT15 data");
- if (ret) {
+ data->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(data->data)) {
+ ret = PTR_ERR(data->data);
dev_err(&pdev->dev, "data line GPIO request failed\n");
goto err_release_reg;
}
- ret = devm_request_irq(&pdev->dev, gpio_to_irq(data->pdata->gpio_data),
+ ret = devm_request_irq(&pdev->dev, gpiod_to_irq(data->data),
sht15_interrupt_fired,
IRQF_TRIGGER_FALLING,
"sht15 data",
@@ -1053,7 +993,7 @@ static int sht15_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get irq for data line\n");
goto err_release_reg;
}
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
goto err_release_reg;
@@ -1061,13 +1001,6 @@ static int sht15_probe(struct platform_device *pdev)
if (ret)
goto err_release_reg;
- /* write status with platform data options */
- if (status) {
- ret = sht15_send_status(data, status);
- if (ret)
- goto err_release_reg;
- }
-
ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group);
if (ret) {
dev_err(&pdev->dev, "sysfs create failed\n");
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 3f940fb67dc6..7fe152d92350 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -396,7 +396,7 @@ static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->max_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->max_alert);
}
static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
@@ -413,7 +413,7 @@ static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->min_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->min_alert);
}
static ssize_t show_input(struct device *dev, struct device_attribute *attr,
@@ -428,7 +428,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->temp);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->temp);
}
static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
@@ -436,7 +436,7 @@ static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm);
}
static ssize_t set_therm(struct device *dev, struct device_attribute *attr,
@@ -478,7 +478,7 @@ static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->hyst);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->hyst);
}
static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
@@ -518,7 +518,7 @@ static ssize_t show_therm_trip(struct device *dev,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm_trip);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm_trip);
}
static ssize_t show_max(struct device *dev, struct device_attribute *attr,
@@ -526,7 +526,7 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_max);
}
static ssize_t set_max(struct device *dev, struct device_attribute *attr,
@@ -560,7 +560,7 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_min);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_min);
}
static ssize_t set_min(struct device *dev, struct device_attribute *attr,
@@ -594,7 +594,7 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ return snprintf(buf, PAGE_SIZE, "%d\n",
stts751_intervals[priv->interval]);
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index dab5c515d5a3..5ba9d9f1daa1 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1676,7 +1676,9 @@ static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ static const int watchdog_minors[] = {
+ WATCHDOG_MINOR, 212, 213, 214, 215
+ };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index e1be61095532..a3cd91f23267 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -91,6 +91,11 @@
#define to_xgene_hwmon_dev(cl) \
container_of(cl, struct xgene_hwmon_dev, mbox_client)
+enum xgene_hwmon_version {
+ XGENE_HWMON_V1 = 0,
+ XGENE_HWMON_V2 = 1,
+};
+
struct slimpro_resp_msg {
u32 msg;
u32 param1;
@@ -609,6 +614,15 @@ static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
+ {"APMC0D29", XGENE_HWMON_V1},
+ {"APMC0D8A", XGENE_HWMON_V2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
+#endif
+
static int xgene_hwmon_probe(struct platform_device *pdev)
{
struct xgene_hwmon_dev *ctx;
@@ -651,6 +665,15 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
+ const struct acpi_device_id *acpi_id;
+ int version;
+
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ if (!acpi_id)
+ return -EINVAL;
+
+ version = (int)acpi_id->driver_data;
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
@@ -693,7 +716,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
*/
ctx->comm_base_addr = cppc_ss->base_address;
if (ctx->comm_base_addr) {
- ctx->pcc_comm_addr = memremap(ctx->comm_base_addr,
+ if (version == XGENE_HWMON_V2)
+ ctx->pcc_comm_addr = (void __force *)ioremap(
+ ctx->comm_base_addr,
+ cppc_ss->length);
+ else
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
cppc_ss->length,
MEMREMAP_WB);
} else {
@@ -761,14 +790,6 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
- {"APMC0D29", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
-#endif
-
static const struct of_device_id xgene_hwmon_of_match[] = {
{.compatible = "apm,xgene-slimpro-hwmon"},
{}
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index accc2056f7c6..8f4357e2626c 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -199,8 +199,8 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
static const struct amba_id replicator_ids[] = {
{
- .id = 0x0003b909,
- .mask = 0x0003ffff,
+ .id = 0x000bb909,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 56ecd7aff5eb..e03e58933141 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -748,8 +748,8 @@ static const struct dev_pm_ops etb_dev_pm_ops = {
static const struct amba_id etb_ids[] = {
{
- .id = 0x0003b907,
- .mask = 0x0003ffff,
+ .id = 0x000bb907,
+ .mask = 0x000fffff,
},
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index e5b1ec57dbde..39f42fdd503d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -901,33 +901,33 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
static const struct amba_id etm_ids[] = {
{ /* ETM 3.3 */
- .id = 0x0003b921,
- .mask = 0x0003ffff,
+ .id = 0x000bb921,
+ .mask = 0x000fffff,
.data = "ETM 3.3",
},
{ /* ETM 3.5 - Cortex-A5 */
- .id = 0x0003b955,
- .mask = 0x0003ffff,
+ .id = 0x000bb955,
+ .mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* ETM 3.5 */
- .id = 0x0003b956,
- .mask = 0x0003ffff,
+ .id = 0x000bb956,
+ .mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* PTM 1.0 */
- .id = 0x0003b950,
- .mask = 0x0003ffff,
+ .id = 0x000bb950,
+ .mask = 0x000fffff,
.data = "PTM 1.0",
},
{ /* PTM 1.1 */
- .id = 0x0003b95f,
- .mask = 0x0003ffff,
+ .id = 0x000bb95f,
+ .mask = 0x000fffff,
.data = "PTM 1.1",
},
{ /* PTM 1.1 Qualcomm */
- .id = 0x0003006f,
- .mask = 0x0003ffff,
+ .id = 0x000b006f,
+ .mask = 0x000fffff,
.data = "PTM 1.1",
},
{ 0, 0},
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 77642e0e955b..fd3c396717f6 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -248,8 +248,8 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
static const struct amba_id funnel_ids[] = {
{
- .id = 0x0003b908,
- .mask = 0x0003ffff,
+ .id = 0x000bb908,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 92a780a6df1d..15e7ef3891f5 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -917,13 +917,13 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
static const struct amba_id stm_ids[] = {
{
- .id = 0x0003b962,
- .mask = 0x0003ffff,
+ .id = 0x000bb962,
+ .mask = 0x000fffff,
.data = "STM32",
},
{
- .id = 0x0003b963,
- .mask = 0x0003ffff,
+ .id = 0x000bb963,
+ .mask = 0x000fffff,
.data = "STM500",
},
{ 0, 0},
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 2ff4a66a3caa..0ea04f588de0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -439,8 +439,8 @@ out:
static const struct amba_id tmc_ids[] = {
{
- .id = 0x0003b961,
- .mask = 0x0003ffff,
+ .id = 0x000bb961,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC 600 TMC-ETR/ETS */
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index d7a3e453016d..bef49a3a5ca7 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -194,8 +194,8 @@ static const struct dev_pm_ops tpiu_dev_pm_ops = {
static const struct amba_id tpiu_ids[] = {
{
- .id = 0x0003b912,
- .mask = 0x0003ffff,
+ .id = 0x000bb912,
+ .mask = 0x000fffff,
},
{
.id = 0x0004b912,
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 6c0ae2996326..33e9a1b6ea7c 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -187,8 +187,8 @@ static struct configfs_attribute *stp_policy_node_attrs[] = {
NULL,
};
-static struct config_item_type stp_policy_type;
-static struct config_item_type stp_policy_node_type;
+static const struct config_item_type stp_policy_type;
+static const struct config_item_type stp_policy_node_type;
static struct config_group *
stp_policy_node_make(struct config_group *group, const char *name)
@@ -236,7 +236,7 @@ static struct configfs_group_operations stp_policy_node_group_ops = {
.drop_item = stp_policy_node_drop,
};
-static struct config_item_type stp_policy_node_type = {
+static const struct config_item_type stp_policy_node_type = {
.ct_item_ops = &stp_policy_node_item_ops,
.ct_group_ops = &stp_policy_node_group_ops,
.ct_attrs = stp_policy_node_attrs,
@@ -311,7 +311,7 @@ static struct configfs_group_operations stp_policy_group_ops = {
.make_group = stp_policy_node_make,
};
-static struct config_item_type stp_policy_type = {
+static const struct config_item_type stp_policy_type = {
.ct_item_ops = &stp_policy_item_ops,
.ct_group_ops = &stp_policy_group_ops,
.ct_attrs = stp_policy_attrs,
@@ -380,7 +380,7 @@ static struct configfs_group_operations stp_policies_group_ops = {
.make_group = stp_policies_make,
};
-static struct config_item_type stp_policies_type = {
+static const struct config_item_type stp_policies_type = {
.ct_group_ops = &stp_policies_group_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 45a3f3ca29b3..009345d8f49d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -198,6 +198,11 @@ config I2C_CHT_WC
SMBus controller found in the Intel Cherry Trail Whiskey Cove PMIC
found on some Intel Cherry Trail systems.
+ Note this controller is hooked up to a TI bq24292i charger-IC,
+ combined with a FUSB302 Type-C port-controller as such it is advised
+ to also select CONFIG_CHARGER_BQ24190=m and CONFIG_TYPEC_FUSB302=m
+ (the fusb302 driver currently is in drivers/staging).
+
config I2C_NFORCE2
tristate "Nvidia nForce2, nForce3 and nForce4"
depends on PCI
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 284f8670dbeb..7d4aeb4465b3 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -27,6 +27,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/slab.h>
/* I2C Register */
@@ -132,6 +133,7 @@ struct aspeed_i2c_bus {
struct i2c_adapter adap;
struct device *dev;
void __iomem *base;
+ struct reset_control *rst;
/* Synchronizes I/O mem access to base. */
spinlock_t lock;
struct completion cmd_complete;
@@ -847,6 +849,14 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
/* We just need the clock rate, we don't actually use the clk object. */
devm_clk_put(&pdev->dev, parent_clk);
+ bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(bus->rst)) {
+ dev_err(&pdev->dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(bus->rst);
+ }
+ reset_control_deassert(bus->rst);
+
ret = of_property_read_u32(pdev->dev.of_node,
"bus-frequency", &bus->bus_frequency);
if (ret < 0) {
@@ -917,6 +927,8 @@ static int aspeed_i2c_remove_bus(struct platform_device *pdev)
spin_unlock_irqrestore(&bus->lock, flags);
+ reset_control_assert(bus->rst);
+
i2c_del_adapter(&bus->adap);
return 0;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 190bbbc7bfee..0d05dadb2dc5 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -25,6 +26,7 @@
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/power/bq24190_charger.h>
#include <linux/slab.h>
#define CHT_WC_I2C_CTRL 0x5e24
@@ -232,13 +234,35 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
.name = "cht_wc_ext_chrg_irq_chip",
};
+static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+
static const struct property_entry bq24190_props[] = {
- PROPERTY_ENTRY_STRING("extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
PROPERTY_ENTRY_BOOL("omit-battery-class"),
PROPERTY_ENTRY_BOOL("disable-reset"),
{ }
};
+static struct regulator_consumer_supply fusb302_consumer = {
+ .supply = "vbus",
+ /* Must match fusb302 dev_name in intel_cht_int33fe.c */
+ .dev_name = "i2c-fusb302",
+};
+
+static const struct regulator_init_data bq24190_vbus_init_data = {
+ .constraints = {
+ /* The name is used in intel_cht_int33fe.c do not change. */
+ .name = "cht_wc_usb_typec_vbus",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &fusb302_consumer,
+ .num_consumer_supplies = 1,
+};
+
+static struct bq24190_platform_data bq24190_pdata = {
+ .regulator_init_data = &bq24190_vbus_init_data,
+};
+
static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
@@ -246,7 +270,9 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
struct i2c_board_info board_info = {
.type = "bq24190",
.addr = 0x6b,
+ .dev_name = "bq24190",
.properties = bq24190_props,
+ .platform_data = &bq24190_pdata,
};
int ret, reg, irq;
@@ -314,11 +340,21 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
if (ret)
goto remove_irq_domain;
- board_info.irq = adap->client_irq;
- adap->client = i2c_new_device(&adap->adapter, &board_info);
- if (!adap->client) {
- ret = -ENOMEM;
- goto del_adapter;
+ /*
+ * Normally the Whiskey Cove PMIC is paired with a TI bq24292i charger,
+ * connected to this i2c bus, and a max17047 fuel-gauge and a fusb302
+ * USB Type-C controller connected to another i2c bus. In this setup
+ * the max17047 and fusb302 devices are enumerated through an INT33FE
+ * ACPI device. If this device is present register an i2c-client for
+ * the TI bq24292i charger.
+ */
+ if (acpi_dev_present("INT33FE", NULL, -1)) {
+ board_info.irq = adap->client_irq;
+ adap->client = i2c_new_device(&adap->adapter, &board_info);
+ if (!adap->client) {
+ ret = -ENOMEM;
+ goto del_adapter;
+ }
}
platform_set_drvdata(pdev, adap);
@@ -335,7 +371,8 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
{
struct cht_wc_i2c_adap *adap = platform_get_drvdata(pdev);
- i2c_unregister_device(adap->client);
+ if (adap->client)
+ i2c_unregister_device(adap->client);
i2c_del_adapter(&adap->adapter);
irq_domain_remove(adap->irq_domain);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index b8c43535f16c..2ead9b9eebb7 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -36,6 +36,7 @@
#include <linux/gpio.h>
#include <linux/of_device.h>
#include <linux/platform_data/i2c-davinci.h>
+#include <linux/pm_runtime.h>
/* ----- global defines ----------------------------------------------- */
@@ -122,6 +123,9 @@
/* set the SDA GPIO low */
#define DAVINCI_I2C_DCLR_PDCLR1 BIT(1)
+/* timeout for pm runtime autosuspend */
+#define DAVINCI_I2C_PM_TIMEOUT 1000 /* ms */
+
struct davinci_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -500,7 +504,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
/* This should be 0 if all bytes were transferred
* or dev->cmd_err denotes an error.
*/
- dev_err(dev->dev, "abnormal termination buf_len=%i\n",
+ dev_err(dev->dev, "abnormal termination buf_len=%zu\n",
dev->buf_len);
dev->terminate = 1;
wmb();
@@ -541,10 +545,17 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to runtime_get device: %d\n", ret);
+ pm_runtime_put_noidle(dev->dev);
+ return ret;
+ }
+
ret = i2c_davinci_wait_bus_not_busy(dev);
if (ret < 0) {
dev_warn(dev->dev, "timeout waiting for bus ready\n");
- return ret;
+ goto out;
}
for (i = 0; i < num; i++) {
@@ -552,14 +563,19 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s [%d/%d] ret: %d\n", __func__, i + 1, num,
ret);
if (ret < 0)
- return ret;
+ goto out;
}
+ ret = num;
#ifdef CONFIG_CPU_FREQ
complete(&dev->xfr_complete);
#endif
- return num;
+out:
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return ret;
}
static u32 i2c_davinci_func(struct i2c_adapter *adap)
@@ -599,6 +615,9 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
int count = 0;
u16 w;
+ if (pm_runtime_suspended(dev->dev))
+ return IRQ_NONE;
+
while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) {
dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
if (count++ == 100) {
@@ -802,13 +821,24 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
- clk_prepare_enable(dev->clk);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(dev->base)) {
- r = PTR_ERR(dev->base);
- goto err_unuse_clocks;
+ return PTR_ERR(dev->base);
+ }
+
+ pm_runtime_set_autosuspend_delay(dev->dev,
+ DAVINCI_I2C_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(dev->dev);
+
+ pm_runtime_enable(dev->dev);
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ dev_err(dev->dev, "failed to runtime_get device: %d\n", r);
+ pm_runtime_put_noidle(dev->dev);
+ return r;
}
i2c_davinci_init(dev);
@@ -849,27 +879,40 @@ static int davinci_i2c_probe(struct platform_device *pdev)
if (r)
goto err_unuse_clocks;
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
err_unuse_clocks:
- clk_disable_unprepare(dev->clk);
- dev->clk = NULL;
+ pm_runtime_dont_use_autosuspend(dev->dev);
+ pm_runtime_put_sync(dev->dev);
+ pm_runtime_disable(dev->dev);
+
return r;
}
static int davinci_i2c_remove(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev = platform_get_drvdata(pdev);
+ int ret;
i2c_davinci_cpufreq_deregister(dev);
i2c_del_adapter(&dev->adapter);
- clk_disable_unprepare(dev->clk);
- dev->clk = NULL;
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
+ pm_runtime_dont_use_autosuspend(dev->dev);
+ pm_runtime_put_sync(dev->dev);
+ pm_runtime_disable(dev->dev);
+
return 0;
}
@@ -880,7 +923,6 @@ static int davinci_i2c_suspend(struct device *dev)
/* put I2C into reset */
davinci_i2c_reset_ctrl(i2c_dev, 0);
- clk_disable_unprepare(i2c_dev->clk);
return 0;
}
@@ -889,7 +931,6 @@ static int davinci_i2c_resume(struct device *dev)
{
struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- clk_prepare_enable(i2c_dev->clk);
/* take I2C out of reset */
davinci_i2c_reset_ctrl(i2c_dev, 1);
@@ -899,6 +940,8 @@ static int davinci_i2c_resume(struct device *dev)
static const struct dev_pm_ops davinci_i2c_pm = {
.suspend = davinci_i2c_suspend,
.resume = davinci_i2c_resume,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
#define davinci_i2c_pm_ops (&davinci_i2c_pm)
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 9fee4c054d3d..21bf619a86c5 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -280,6 +280,8 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_disabled;
+ bool suspended;
+ bool skip_resume;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0e65b97842b4..58add69a441c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -249,6 +249,14 @@ static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
}
}
+static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev)
+{
+ pm_runtime_disable(dev->dev);
+
+ if (dev->pm_disabled)
+ pm_runtime_put_noidle(dev->dev);
+}
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -257,7 +265,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
u32 acpi_speed, ht = 0;
struct resource *mem;
int i, irq, ret;
- const int supported_speeds[] = { 0, 100000, 400000, 1000000, 3400000 };
+ static const int supported_speeds[] = {
+ 0, 100000, 400000, 1000000, 3400000
+ };
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -362,14 +372,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
- if (dev->pm_disabled) {
- pm_runtime_forbid(&pdev->dev);
- } else {
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
+ /* The code below assumes runtime PM to be disabled. */
+ WARN_ON(pm_runtime_enabled(&pdev->dev));
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+
+ if (dev->pm_disabled)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pm_runtime_enable(&pdev->dev);
if (dev->mode == DW_IC_SLAVE)
ret = i2c_dw_probe_slave(dev);
@@ -382,8 +395,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
return ret;
exit_probe:
- if (!dev->pm_disabled)
- pm_runtime_disable(&pdev->dev);
+ dw_i2c_plat_pm_cleanup(dev);
exit_reset:
if (!IS_ERR_OR_NULL(dev->rst))
reset_control_assert(dev->rst);
@@ -402,8 +414,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
- if (!dev->pm_disabled)
- pm_runtime_disable(&pdev->dev);
+ dw_i2c_plat_pm_cleanup(dev);
+
if (!IS_ERR_OR_NULL(dev->rst))
reset_control_assert(dev->rst);
@@ -437,13 +449,20 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif
#ifdef CONFIG_PM
-static int dw_i2c_plat_runtime_suspend(struct device *dev)
+static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ if (i_dev->suspended) {
+ i_dev->skip_resume = true;
+ return 0;
+ }
+
i_dev->disable(i_dev);
i2c_dw_plat_prepare_clk(i_dev, false);
+ i_dev->suspended = true;
+
return 0;
}
@@ -451,27 +470,27 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ if (!i_dev->suspended)
+ return 0;
+
+ if (i_dev->skip_resume) {
+ i_dev->skip_resume = false;
+ return 0;
+ }
+
i2c_dw_plat_prepare_clk(i_dev, true);
i_dev->init(i_dev);
- return 0;
-}
+ i_dev->suspended = false;
-#ifdef CONFIG_PM_SLEEP
-static int dw_i2c_plat_suspend(struct device *dev)
-{
- pm_runtime_resume(dev);
- return dw_i2c_plat_runtime_suspend(dev);
+ return 0;
}
-#endif
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete,
- SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
- dw_i2c_plat_resume,
- NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+ SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
};
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 0ef8fcc6ac3a..d80ea6ce91bb 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -14,27 +14,17 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
struct i2c_gpio_private_data {
+ struct gpio_desc *sda;
+ struct gpio_desc *scl;
struct i2c_adapter adap;
struct i2c_algo_bit_data bit_data;
struct i2c_gpio_platform_data pdata;
};
-/* Toggle SDA by changing the direction of the pin */
-static void i2c_gpio_setsda_dir(void *data, int state)
-{
- struct i2c_gpio_platform_data *pdata = data;
-
- if (state)
- gpio_direction_input(pdata->sda_pin);
- else
- gpio_direction_output(pdata->sda_pin, 0);
-}
-
/*
* Toggle SDA by changing the output value of the pin. This is only
* valid for pins configured as open drain (i.e. setting the value
@@ -42,20 +32,9 @@ static void i2c_gpio_setsda_dir(void *data, int state)
*/
static void i2c_gpio_setsda_val(void *data, int state)
{
- struct i2c_gpio_platform_data *pdata = data;
-
- gpio_set_value(pdata->sda_pin, state);
-}
-
-/* Toggle SCL by changing the direction of the pin. */
-static void i2c_gpio_setscl_dir(void *data, int state)
-{
- struct i2c_gpio_platform_data *pdata = data;
+ struct i2c_gpio_private_data *priv = data;
- if (state)
- gpio_direction_input(pdata->scl_pin);
- else
- gpio_direction_output(pdata->scl_pin, 0);
+ gpiod_set_value(priv->sda, state);
}
/*
@@ -66,44 +45,23 @@ static void i2c_gpio_setscl_dir(void *data, int state)
*/
static void i2c_gpio_setscl_val(void *data, int state)
{
- struct i2c_gpio_platform_data *pdata = data;
+ struct i2c_gpio_private_data *priv = data;
- gpio_set_value(pdata->scl_pin, state);
+ gpiod_set_value(priv->scl, state);
}
static int i2c_gpio_getsda(void *data)
{
- struct i2c_gpio_platform_data *pdata = data;
+ struct i2c_gpio_private_data *priv = data;
- return gpio_get_value(pdata->sda_pin);
+ return gpiod_get_value(priv->sda);
}
static int i2c_gpio_getscl(void *data)
{
- struct i2c_gpio_platform_data *pdata = data;
-
- return gpio_get_value(pdata->scl_pin);
-}
-
-static int of_i2c_gpio_get_pins(struct device_node *np,
- unsigned int *sda_pin, unsigned int *scl_pin)
-{
- if (of_gpio_count(np) < 2)
- return -ENODEV;
-
- *sda_pin = of_get_gpio(np, 0);
- *scl_pin = of_get_gpio(np, 1);
-
- if (*sda_pin == -EPROBE_DEFER || *scl_pin == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (!gpio_is_valid(*sda_pin) || !gpio_is_valid(*scl_pin)) {
- pr_err("%pOF: invalid GPIO pins, sda=%d/scl=%d\n",
- np, *sda_pin, *scl_pin);
- return -ENODEV;
- }
+ struct i2c_gpio_private_data *priv = data;
- return 0;
+ return gpiod_get_value(priv->scl);
}
static void of_i2c_gpio_get_props(struct device_node *np,
@@ -124,72 +82,105 @@ static void of_i2c_gpio_get_props(struct device_node *np,
of_property_read_bool(np, "i2c-gpio,scl-output-only");
}
+static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
+ const char *con_id,
+ unsigned int index,
+ enum gpiod_flags gflags)
+{
+ struct gpio_desc *retdesc;
+ int ret;
+
+ retdesc = devm_gpiod_get(dev, con_id, gflags);
+ if (!IS_ERR(retdesc)) {
+ dev_dbg(dev, "got GPIO from name %s\n", con_id);
+ return retdesc;
+ }
+
+ retdesc = devm_gpiod_get_index(dev, NULL, index, gflags);
+ if (!IS_ERR(retdesc)) {
+ dev_dbg(dev, "got GPIO from index %u\n", index);
+ return retdesc;
+ }
+
+ ret = PTR_ERR(retdesc);
+
+ /* FIXME: hack in the old code, is this really necessary? */
+ if (ret == -EINVAL)
+ retdesc = ERR_PTR(-EPROBE_DEFER);
+
+ /* This happens if the GPIO driver is not yet probed, let's defer */
+ if (ret == -ENOENT)
+ retdesc = ERR_PTR(-EPROBE_DEFER);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "error trying to get descriptor: %d\n", ret);
+
+ return retdesc;
+}
+
static int i2c_gpio_probe(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
struct i2c_algo_bit_data *bit_data;
struct i2c_adapter *adap;
- unsigned int sda_pin, scl_pin;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ enum gpiod_flags gflags;
int ret;
- /* First get the GPIO pins; if it fails, we'll defer the probe. */
- if (pdev->dev.of_node) {
- ret = of_i2c_gpio_get_pins(pdev->dev.of_node,
- &sda_pin, &scl_pin);
- if (ret)
- return ret;
- } else {
- if (!dev_get_platdata(&pdev->dev))
- return -ENXIO;
- pdata = dev_get_platdata(&pdev->dev);
- sda_pin = pdata->sda_pin;
- scl_pin = pdata->scl_pin;
- }
-
- ret = devm_gpio_request(&pdev->dev, sda_pin, "sda");
- if (ret) {
- if (ret == -EINVAL)
- ret = -EPROBE_DEFER; /* Try again later */
- return ret;
- }
- ret = devm_gpio_request(&pdev->dev, scl_pin, "scl");
- if (ret) {
- if (ret == -EINVAL)
- ret = -EPROBE_DEFER; /* Try again later */
- return ret;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
adap = &priv->adap;
bit_data = &priv->bit_data;
pdata = &priv->pdata;
- if (pdev->dev.of_node) {
- pdata->sda_pin = sda_pin;
- pdata->scl_pin = scl_pin;
- of_i2c_gpio_get_props(pdev->dev.of_node, pdata);
+ if (np) {
+ of_i2c_gpio_get_props(np, pdata);
} else {
- memcpy(pdata, dev_get_platdata(&pdev->dev), sizeof(*pdata));
+ /*
+ * If all platform data settings are zero it is OK
+ * to not provide any platform data from the board.
+ */
+ if (dev_get_platdata(dev))
+ memcpy(pdata, dev_get_platdata(dev), sizeof(*pdata));
}
- if (pdata->sda_is_open_drain) {
- gpio_direction_output(pdata->sda_pin, 1);
- bit_data->setsda = i2c_gpio_setsda_val;
- } else {
- gpio_direction_input(pdata->sda_pin);
- bit_data->setsda = i2c_gpio_setsda_dir;
- }
+ /*
+ * First get the GPIO pins; if it fails, we'll defer the probe.
+ * If the SDA line is marked from platform data or device tree as
+ * "open drain" it means something outside of our control is making
+ * this line being handled as open drain, and we should just handle
+ * it as any other output. Else we enforce open drain as this is
+ * required for an I2C bus.
+ */
+ if (pdata->sda_is_open_drain)
+ gflags = GPIOD_OUT_HIGH;
+ else
+ gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
+ priv->sda = i2c_gpio_get_desc(dev, "sda", 0, gflags);
+ if (IS_ERR(priv->sda))
+ return PTR_ERR(priv->sda);
+
+ /*
+ * If the SCL line is marked from platform data or device tree as
+ * "open drain" it means something outside of our control is making
+ * this line being handled as open drain, and we should just handle
+ * it as any other output. Else we enforce open drain as this is
+ * required for an I2C bus.
+ */
+ if (pdata->scl_is_open_drain)
+ gflags = GPIOD_OUT_LOW;
+ else
+ gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+ priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
+ if (IS_ERR(priv->scl))
+ return PTR_ERR(priv->scl);
- if (pdata->scl_is_open_drain || pdata->scl_is_output_only) {
- gpio_direction_output(pdata->scl_pin, 1);
- bit_data->setscl = i2c_gpio_setscl_val;
- } else {
- gpio_direction_input(pdata->scl_pin);
- bit_data->setscl = i2c_gpio_setscl_dir;
- }
+ bit_data->setsda = i2c_gpio_setsda_val;
+ bit_data->setscl = i2c_gpio_setscl_val;
if (!pdata->scl_is_output_only)
bit_data->getscl = i2c_gpio_getscl;
@@ -207,18 +198,18 @@ static int i2c_gpio_probe(struct platform_device *pdev)
else
bit_data->timeout = HZ / 10; /* 100 ms */
- bit_data->data = pdata;
+ bit_data->data = priv;
adap->owner = THIS_MODULE;
- if (pdev->dev.of_node)
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ if (np)
+ strlcpy(adap->name, dev_name(dev), sizeof(adap->name));
else
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
adap->algo_data = bit_data;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
+ adap->dev.parent = dev;
+ adap->dev.of_node = np;
adap->nr = pdev->id;
ret = i2c_bit_add_numbered_bus(adap);
@@ -227,8 +218,13 @@ static int i2c_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "using pins %u (SDA) and %u (SCL%s)\n",
- pdata->sda_pin, pdata->scl_pin,
+ /*
+ * FIXME: using global GPIO numbers is not helpful. If/when we
+ * get accessors to get the actual name of the GPIO line,
+ * from the descriptor, then provide that instead.
+ */
+ dev_info(dev, "using lines %u (SDA) and %u (SCL%s)\n",
+ desc_to_gpio(priv->sda), desc_to_gpio(priv->scl),
pdata->scl_is_output_only
? ", no clock stretching" : "");
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index eb1d91b986fd..f038858b6c54 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -82,6 +82,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -280,6 +281,8 @@
#define ISR_COMPLETE(err) (ISR_COMPLETE_M | (ISR_STATUS_M & (err)))
#define ISR_FATAL(err) (ISR_COMPLETE(err) | ISR_FATAL_M)
+#define IMG_I2C_PM_TIMEOUT 1000 /* ms */
+
enum img_i2c_mode {
MODE_INACTIVE,
MODE_RAW,
@@ -408,6 +411,9 @@ struct img_i2c {
unsigned int raw_timeout;
};
+static int img_i2c_runtime_suspend(struct device *dev);
+static int img_i2c_runtime_resume(struct device *dev);
+
static void img_i2c_writel(struct img_i2c *i2c, u32 offset, u32 value)
{
writel(value, i2c->base + offset);
@@ -826,9 +832,9 @@ next_atomic_cmd:
* Timer function to check if something has gone wrong in automatic mode (so we
* don't have to handle so many interrupts just to catch an exception).
*/
-static void img_i2c_check_timer(unsigned long arg)
+static void img_i2c_check_timer(struct timer_list *t)
{
- struct img_i2c *i2c = (struct img_i2c *)arg;
+ struct img_i2c *i2c = from_timer(i2c, t, check_timer);
unsigned long flags;
unsigned int line_status;
@@ -1054,8 +1060,8 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
atomic = true;
}
- ret = clk_prepare_enable(i2c->scb_clk);
- if (ret)
+ ret = pm_runtime_get_sync(adap->dev.parent);
+ if (ret < 0)
return ret;
for (i = 0; i < num; i++) {
@@ -1131,7 +1137,8 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
break;
}
- clk_disable_unprepare(i2c->scb_clk);
+ pm_runtime_mark_last_busy(adap->dev.parent);
+ pm_runtime_put_autosuspend(adap->dev.parent);
return i2c->msg_status ? i2c->msg_status : num;
}
@@ -1149,12 +1156,13 @@ static const struct i2c_algorithm img_i2c_algo = {
static int img_i2c_init(struct img_i2c *i2c)
{
unsigned int clk_khz, bitrate_khz, clk_period, tckh, tckl, tsdh;
- unsigned int i, ret, data, prescale, inc, int_bitrate, filt;
+ unsigned int i, data, prescale, inc, int_bitrate, filt;
struct img_i2c_timings timing;
u32 rev;
+ int ret;
- ret = clk_prepare_enable(i2c->scb_clk);
- if (ret)
+ ret = pm_runtime_get_sync(i2c->adap.dev.parent);
+ if (ret < 0)
return ret;
rev = img_i2c_readl(i2c, SCB_CORE_REV_REG);
@@ -1163,7 +1171,8 @@ static int img_i2c_init(struct img_i2c *i2c)
"Unknown hardware revision (%d.%d.%d.%d)\n",
(rev >> 24) & 0xff, (rev >> 16) & 0xff,
(rev >> 8) & 0xff, rev & 0xff);
- clk_disable_unprepare(i2c->scb_clk);
+ pm_runtime_mark_last_busy(i2c->adap.dev.parent);
+ pm_runtime_put_autosuspend(i2c->adap.dev.parent);
return -EINVAL;
}
@@ -1314,7 +1323,8 @@ static int img_i2c_init(struct img_i2c *i2c)
/* Perform a synchronous sequence to reset the bus */
ret = img_i2c_reset_bus(i2c);
- clk_disable_unprepare(i2c->scb_clk);
+ pm_runtime_mark_last_busy(i2c->adap.dev.parent);
+ pm_runtime_put_autosuspend(i2c->adap.dev.parent);
return ret;
}
@@ -1362,8 +1372,7 @@ static int img_i2c_probe(struct platform_device *pdev)
}
/* Set up the exception check timer */
- setup_timer(&i2c->check_timer, img_i2c_check_timer,
- (unsigned long)i2c);
+ timer_setup(&i2c->check_timer, img_i2c_check_timer, 0);
i2c->bitrate = timings[0].max_bitrate;
if (!of_property_read_u32(node, "clock-frequency", &val))
@@ -1384,22 +1393,30 @@ static int img_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
- ret = clk_prepare_enable(i2c->sys_clk);
- if (ret)
- return ret;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_I2C_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_i2c_runtime_resume(&pdev->dev);
+ if (ret)
+ return ret;
+ }
ret = img_i2c_init(i2c);
if (ret)
- goto disable_clk;
+ goto rpm_disable;
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0)
- goto disable_clk;
+ goto rpm_disable;
return 0;
-disable_clk:
- clk_disable_unprepare(i2c->sys_clk);
+rpm_disable:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_i2c_runtime_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return ret;
}
@@ -1408,19 +1425,55 @@ static int img_i2c_remove(struct platform_device *dev)
struct img_i2c *i2c = platform_get_drvdata(dev);
i2c_del_adapter(&i2c->adap);
+ pm_runtime_disable(&dev->dev);
+ if (!pm_runtime_status_suspended(&dev->dev))
+ img_i2c_runtime_suspend(&dev->dev);
+
+ return 0;
+}
+
+static int img_i2c_runtime_suspend(struct device *dev)
+{
+ struct img_i2c *i2c = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(i2c->scb_clk);
clk_disable_unprepare(i2c->sys_clk);
return 0;
}
+static int img_i2c_runtime_resume(struct device *dev)
+{
+ struct img_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(i2c->sys_clk);
+ if (ret) {
+ dev_err(dev, "Unable to enable sys clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(i2c->scb_clk);
+ if (ret) {
+ dev_err(dev, "Unable to enable scb clock\n");
+ clk_disable_unprepare(i2c->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int img_i2c_suspend(struct device *dev)
{
struct img_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
- img_i2c_switch_mode(i2c, MODE_SUSPEND);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
- clk_disable_unprepare(i2c->sys_clk);
+ img_i2c_switch_mode(i2c, MODE_SUSPEND);
return 0;
}
@@ -1430,7 +1483,7 @@ static int img_i2c_resume(struct device *dev)
struct img_i2c *i2c = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare_enable(i2c->sys_clk);
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
@@ -1440,7 +1493,12 @@ static int img_i2c_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(img_i2c_pm, img_i2c_suspend, img_i2c_resume);
+static const struct dev_pm_ops img_i2c_pm = {
+ SET_RUNTIME_PM_OPS(img_i2c_runtime_suspend,
+ img_i2c_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_i2c_suspend, img_i2c_resume)
+};
static const struct of_device_id img_scb_i2c_match[] = {
{ .compatible = "img,scb-i2c" },
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 96caf378b1dc..950a9d74f54d 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -322,7 +322,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
static u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
- struct device_node *node = NULL;
+ struct device_node *node;
u32 __iomem *reg;
u32 val = 0;
@@ -700,7 +700,7 @@ static int fsl_i2c_probe(struct platform_device *op)
}
}
- if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
+ if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
prop = of_get_property(op->dev.of_node, "clock-frequency",
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 23c2ea2baedc..b9172f08fd05 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -487,6 +487,22 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
}
/*
+ * Try bus recovery, but only if SDA is actually low.
+ */
+static int omap_i2c_recover_bus(struct omap_i2c_dev *omap)
+{
+ u16 systest;
+
+ systest = omap_i2c_read_reg(omap, OMAP_I2C_SYSTEST_REG);
+ if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) &&
+ (systest & OMAP_I2C_SYSTEST_SDA_I_FUNC))
+ return 0; /* bus seems to already be fine */
+ if (!(systest & OMAP_I2C_SYSTEST_SCL_I_FUNC))
+ return -EBUSY; /* recovery would not fix SCL */
+ return i2c_recover_bus(&omap->adapter);
+}
+
+/*
* Waiting on Bus Busy
*/
static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap)
@@ -496,7 +512,7 @@ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap)
timeout = jiffies + OMAP_I2C_TIMEOUT;
while (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
if (time_after(jiffies, timeout))
- return i2c_recover_bus(&omap->adapter);
+ return omap_i2c_recover_bus(omap);
msleep(1);
}
@@ -577,8 +593,13 @@ static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *omap)
}
if (time_after(jiffies, timeout)) {
+ /*
+ * SDA or SCL were low for the entire timeout without
+ * any activity detected. Most likely, a slave is
+ * locking up the bus with no master driving the clock.
+ */
dev_warn(omap->dev, "timeout waiting for bus ready\n");
- return -ETIMEDOUT;
+ return omap_i2c_recover_bus(omap);
}
msleep(1);
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index faa8fb8f2b8f..fa41ff799533 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -123,7 +123,6 @@ static struct i2c_adapter parport_adapter = {
/* SMBus alert support */
static struct i2c_smbus_alert_setup alert_data = {
- .alert_edge_triggered = 1,
};
static struct i2c_client *ara;
static struct lineop parport_ctrl_irq = {
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a8e54df4aed6..319209a07353 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -237,7 +237,6 @@ static void i2c_parport_attach(struct parport *port)
/* Setup SMBus alert if supported */
if (adapter_parm[type].smbus_alert) {
- adapter->alert_data.alert_edge_triggered = 1;
adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
&adapter->alert_data);
if (adapter->ara)
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 42d6b3a226f8..a542041df0cd 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -112,7 +112,6 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
jiffies, expires);
timer->expires = jiffies + expires;
- timer->data = (unsigned long)alg_data;
add_timer(timer);
}
@@ -435,9 +434,9 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void i2c_pnx_timeout(unsigned long data)
+static void i2c_pnx_timeout(struct timer_list *t)
{
- struct i2c_pnx_algo_data *alg_data = (struct i2c_pnx_algo_data *)data;
+ struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
u32 ctl;
dev_err(&alg_data->adapter.dev,
@@ -659,8 +658,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
if (IS_ERR(alg_data->clk))
return PTR_ERR(alg_data->clk);
- setup_timer(&alg_data->mif.timer, i2c_pnx_timeout,
- (unsigned long)alg_data);
+ timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
"%s", pdev->name);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index c811af4c8d81..95c2f1ce3cad 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -84,12 +84,7 @@
#define ICSR2_NACKF 0x10
-/* ICBRx (@ PCLK 33MHz) */
#define ICBR_RESERVED 0xe0 /* Should be 1 on writes */
-#define ICBRL_SP100K (19 | ICBR_RESERVED)
-#define ICBRH_SP100K (16 | ICBR_RESERVED)
-#define ICBRL_SP400K (21 | ICBR_RESERVED)
-#define ICBRH_SP400K (9 | ICBR_RESERVED)
#define RIIC_INIT_MSG -1
@@ -288,48 +283,99 @@ static const struct i2c_algorithm riic_algo = {
.functionality = riic_func,
};
-static int riic_init_hw(struct riic_dev *riic, u32 spd)
+static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
{
int ret;
unsigned long rate;
+ int total_ticks, cks, brl, brh;
ret = clk_prepare_enable(riic->clk);
if (ret)
return ret;
+ if (t->bus_freq_hz > 400000) {
+ dev_err(&riic->adapter.dev,
+ "unsupported bus speed (%dHz). 400000 max\n",
+ t->bus_freq_hz);
+ clk_disable_unprepare(riic->clk);
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(riic->clk);
+
/*
- * TODO: Implement formula to calculate the timing values depending on
- * variable parent clock rate and arbitrary bus speed
+ * Assume the default register settings:
+ * FER.SCLE = 1 (SCL sync circuit enabled, adds 2 or 3 cycles)
+ * FER.NFE = 1 (noise circuit enabled)
+ * MR3.NF = 0 (1 cycle of noise filtered out)
+ *
+ * Freq (CKS=000) = (I2CCLK + tr + tf)/ (BRH + 3 + 1) + (BRL + 3 + 1)
+ * Freq (CKS!=000) = (I2CCLK + tr + tf)/ (BRH + 2 + 1) + (BRL + 2 + 1)
*/
- rate = clk_get_rate(riic->clk);
- if (rate != 33325000) {
- dev_err(&riic->adapter.dev,
- "invalid parent clk (%lu). Must be 33325000Hz\n", rate);
+
+ /*
+ * Determine reference clock rate. We must be able to get the desired
+ * frequency with only 62 clock ticks max (31 high, 31 low).
+ * Aim for a duty of 60% LOW, 40% HIGH.
+ */
+ total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
+
+ for (cks = 0; cks < 7; cks++) {
+ /*
+ * 60% low time must be less than BRL + 2 + 1
+ * BRL max register value is 0x1F.
+ */
+ brl = ((total_ticks * 6) / 10);
+ if (brl <= (0x1F + 3))
+ break;
+
+ total_ticks /= 2;
+ rate /= 2;
+ }
+
+ if (brl > (0x1F + 3)) {
+ dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
+ (unsigned long)t->bus_freq_hz);
clk_disable_unprepare(riic->clk);
return -EINVAL;
}
+ brh = total_ticks - brl;
+
+ /* Remove automatic clock ticks for sync circuit and NF */
+ if (cks == 0) {
+ brl -= 4;
+ brh -= 4;
+ } else {
+ brl -= 3;
+ brh -= 3;
+ }
+
+ /*
+ * Remove clock ticks for rise and fall times. Convert ns to clock
+ * ticks.
+ */
+ brl -= t->scl_fall_ns / (1000000000 / rate);
+ brh -= t->scl_rise_ns / (1000000000 / rate);
+
+ /* Adjust for min register values for when SCLE=1 and NFE=1 */
+ if (brl < 1)
+ brl = 1;
+ if (brh < 1)
+ brh = 1;
+
+ pr_debug("i2c-riic: freq=%lu, duty=%d, fall=%lu, rise=%lu, cks=%d, brl=%d, brh=%d\n",
+ rate / total_ticks, ((brl + 3) * 100) / (brl + brh + 6),
+ t->scl_fall_ns / (1000000000 / rate),
+ t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
+
/* Changing the order of accessing IICRST and ICE may break things! */
writeb(ICCR1_IICRST | ICCR1_SOWP, riic->base + RIIC_ICCR1);
riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
- switch (spd) {
- case 100000:
- writeb(ICMR1_CKS(3), riic->base + RIIC_ICMR1);
- writeb(ICBRH_SP100K, riic->base + RIIC_ICBRH);
- writeb(ICBRL_SP100K, riic->base + RIIC_ICBRL);
- break;
- case 400000:
- writeb(ICMR1_CKS(1), riic->base + RIIC_ICMR1);
- writeb(ICBRH_SP400K, riic->base + RIIC_ICBRH);
- writeb(ICBRL_SP400K, riic->base + RIIC_ICBRL);
- break;
- default:
- dev_err(&riic->adapter.dev,
- "unsupported bus speed (%dHz). Use 100000 or 400000\n", spd);
- clk_disable_unprepare(riic->clk);
- return -EINVAL;
- }
+ writeb(ICMR1_CKS(cks), riic->base + RIIC_ICMR1);
+ writeb(brh | ICBR_RESERVED, riic->base + RIIC_ICBRH);
+ writeb(brl | ICBR_RESERVED, riic->base + RIIC_ICBRL);
writeb(0, riic->base + RIIC_ICSER);
writeb(ICMR3_ACKWP | ICMR3_RDRFS, riic->base + RIIC_ICMR3);
@@ -351,11 +397,10 @@ static struct riic_irq_desc riic_irqs[] = {
static int riic_i2c_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct riic_dev *riic;
struct i2c_adapter *adap;
struct resource *res;
- u32 bus_rate = 0;
+ struct i2c_timings i2c_t;
int i, ret;
riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
@@ -396,8 +441,9 @@ static int riic_i2c_probe(struct platform_device *pdev)
init_completion(&riic->msg_done);
- of_property_read_u32(np, "clock-frequency", &bus_rate);
- ret = riic_init_hw(riic, bus_rate);
+ i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);
+
+ ret = riic_init_hw(riic, &i2c_t);
if (ret)
return ret;
@@ -408,7 +454,8 @@ static int riic_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, riic);
- dev_info(&pdev->dev, "registered with %dHz bus speed\n", bus_rate);
+ dev_info(&pdev->dev, "registered with %dHz bus speed\n",
+ i2c_t.bus_freq_hz);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 6f2aaeb7c4fa..c03acdf71397 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -881,7 +881,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
struct sh_mobile_i2c_data *pd;
struct i2c_adapter *adap;
struct resource *res;
- const struct of_device_id *match;
+ const struct sh_mobile_dt_config *config;
int ret;
u32 bus_speed;
@@ -913,10 +913,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
pd->bus_speed = ret ? STANDARD_MODE : bus_speed;
pd->clks_per_count = 1;
- match = of_match_device(sh_mobile_i2c_dt_ids, &dev->dev);
- if (match) {
- const struct sh_mobile_dt_config *config = match->data;
-
+ config = of_device_get_match_data(&dev->dev);
+ if (config) {
pd->clks_per_count = config->clks_per_count;
if (config->setup)
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index addd90a8cb59..7c7fc01116a1 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -282,8 +282,7 @@ static void taos_disconnect(struct serio *serio)
{
struct taos_data *taos = serio_get_drvdata(serio);
- if (taos->client)
- i2c_unregister_device(taos->client);
+ i2c_unregister_device(taos->client);
i2c_del_adapter(&taos->adapter);
serio_close(serio);
kfree(taos);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index df0976f4432a..19f8eec38717 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -118,8 +118,6 @@ static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk)
static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
struct device_node *node)
{
- u32 type;
-
if (!node)
return -EINVAL;
@@ -127,10 +125,6 @@ static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
if (!i2c->alert_data.irq)
return -EINVAL;
- type = irqd_get_trigger_type(irq_get_irq_data(i2c->alert_data.irq));
- i2c->alert_data.alert_edge_triggered =
- (type & IRQ_TYPE_LEVEL_MASK) ? 1 : 0;
-
i2c->ara = i2c_setup_smbus_alert(&i2c->adap, &i2c->alert_data);
if (!i2c->ara)
return -ENODEV;
@@ -149,8 +143,7 @@ static int thunder_i2c_smbus_setup(struct octeon_i2c *i2c,
static void thunder_i2c_smbus_remove(struct octeon_i2c *i2c)
{
- if (i2c->ara)
- i2c_unregister_device(i2c->ara);
+ i2c_unregister_device(i2c->ara);
}
static int thunder_i2c_probe_pci(struct pci_dev *pdev,
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 7e89ba6fcf6f..a7ac746018ad 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -129,6 +129,11 @@ struct slimpro_i2c_dev {
#define to_slimpro_i2c_dev(cl) \
container_of(cl, struct slimpro_i2c_dev, mbox_client)
+enum slimpro_i2c_version {
+ XGENE_SLIMPRO_I2C_V1 = 0,
+ XGENE_SLIMPRO_I2C_V2 = 1,
+};
+
/*
* This function tests and clears a bitmask then returns its old value
*/
@@ -476,6 +481,15 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
+ const struct acpi_device_id *acpi_id;
+ int version = XGENE_SLIMPRO_I2C_V1;
+
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ if (!acpi_id)
+ return -EINVAL;
+
+ version = (int)acpi_id->driver_data;
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx))
@@ -514,9 +528,16 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
*/
ctx->comm_base_addr = cppc_ss->base_address;
if (ctx->comm_base_addr) {
- ctx->pcc_comm_addr = memremap(ctx->comm_base_addr,
- cppc_ss->length,
- MEMREMAP_WB);
+ if (version == XGENE_SLIMPRO_I2C_V2)
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
+ cppc_ss->length,
+ MEMREMAP_WT);
+ else
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
+ cppc_ss->length,
+ MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENOENT;
@@ -581,7 +602,8 @@ MODULE_DEVICE_TABLE(of, xgene_slimpro_i2c_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_slimpro_i2c_acpi_ids[] = {
- {"APMC0D40", 0},
+ {"APMC0D40", XGENE_SLIMPRO_I2C_V1},
+ {"APMC0D8B", XGENE_SLIMPRO_I2C_V2},
{}
};
MODULE_DEVICE_TABLE(acpi, xgene_slimpro_i2c_acpi_ids);
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 6b106e94bc09..b970bf8f38e5 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -81,9 +82,12 @@ struct xlp9xx_i2c_dev {
struct completion msg_complete;
int irq;
bool msg_read;
+ bool len_recv;
+ bool client_pec;
u32 __iomem *base;
u32 msg_buf_remaining;
u32 msg_len;
+ u32 ip_clk_hz;
u32 clk_hz;
u32 msg_err;
u8 *msg_buf;
@@ -141,10 +145,25 @@ static void xlp9xx_i2c_fill_tx_fifo(struct xlp9xx_i2c_dev *priv)
static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
{
u32 len, i;
- u8 *buf = priv->msg_buf;
+ u8 rlen, *buf = priv->msg_buf;
len = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_FIFOWCNT) &
XLP9XX_I2C_FIFO_WCNT_MASK;
+ if (!len)
+ return;
+ if (priv->len_recv) {
+ /* read length byte */
+ rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+ *buf++ = rlen;
+ len--;
+ if (priv->client_pec)
+ ++rlen;
+ /* update remaining bytes and message length */
+ priv->msg_buf_remaining = rlen;
+ priv->msg_len = rlen + 1;
+ priv->len_recv = false;
+ }
+
len = min(priv->msg_buf_remaining, len);
for (i = 0; i < len; i++, buf++)
*buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
@@ -213,7 +232,7 @@ static int xlp9xx_i2c_init(struct xlp9xx_i2c_dev *priv)
* The controller uses 5 * SCL clock internally.
* So prescale value should be divided by 5.
*/
- prescale = DIV_ROUND_UP(XLP9XX_I2C_IP_CLK_FREQ, priv->clk_hz);
+ prescale = DIV_ROUND_UP(priv->ip_clk_hz, priv->clk_hz);
prescale = ((prescale - 8) / 5) - 1;
xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_RST);
xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_EN |
@@ -228,7 +247,7 @@ static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
int last_msg)
{
unsigned long timeleft;
- u32 intr_mask, cmd, val;
+ u32 intr_mask, cmd, val, len;
priv->msg_buf = msg->buf;
priv->msg_buf_remaining = priv->msg_len = msg->len;
@@ -261,9 +280,13 @@ static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
else
val &= ~XLP9XX_I2C_CTRL_ADDMODE;
+ priv->len_recv = msg->flags & I2C_M_RECV_LEN;
+ len = priv->len_recv ? XLP9XX_I2C_FIFO_SIZE : msg->len;
+ priv->client_pec = msg->flags & I2C_CLIENT_PEC;
+
/* set data length to be transferred */
val = (val & ~XLP9XX_I2C_CTRL_MCTLEN_MASK) |
- (msg->len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT);
+ (len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT);
xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, val);
/* fill fifo during tx */
@@ -310,6 +333,9 @@ static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
return -ETIMEDOUT;
}
+ /* update msg->len with actual received length */
+ if (msg->flags & I2C_M_RECV_LEN)
+ msg->len = priv->msg_len;
return 0;
}
@@ -342,9 +368,19 @@ static const struct i2c_algorithm xlp9xx_i2c_algo = {
static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
struct xlp9xx_i2c_dev *priv)
{
+ struct clk *clk;
u32 freq;
int err;
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ priv->ip_clk_hz = XLP9XX_I2C_IP_CLK_FREQ;
+ dev_dbg(&pdev->dev, "using default input frequency %u\n",
+ priv->ip_clk_hz);
+ } else {
+ priv->ip_clk_hz = clk_get_rate(clk);
+ }
+
err = device_property_read_u32(&pdev->dev, "clock-frequency", &freq);
if (err) {
freq = XLP9XX_I2C_DEFAULT_FREQ;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 56e46581b84b..706164b4c5be 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -29,6 +29,7 @@
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/irqflags.h>
@@ -205,9 +206,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
*/
while (i++ < RECOVERY_CLK_CNT * 2) {
if (val) {
- /* Break if SDA is high */
- if (bri->get_sda && bri->get_sda(adap))
- break;
/* SCL shouldn't be low here */
if (!bri->get_scl(adap)) {
dev_err(&adap->dev,
@@ -215,6 +213,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
ret = -EBUSY;
break;
}
+ /* Break if SDA is high */
+ if (bri->get_sda && bri->get_sda(adap))
+ break;
}
val = !val;
@@ -222,6 +223,10 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
ndelay(RECOVERY_NDELAY);
}
+ /* check if recovery actually succeeded */
+ if (bri->get_sda && !bri->get_sda(adap))
+ ret = -EBUSY;
+
if (bri->unprepare_recovery)
bri->unprepare_recovery(adap);
@@ -666,10 +671,16 @@ static void i2c_adapter_unlock_bus(struct i2c_adapter *adapter,
}
static void i2c_dev_set_name(struct i2c_adapter *adap,
- struct i2c_client *client)
+ struct i2c_client *client,
+ struct i2c_board_info const *info)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ if (info && info->dev_name) {
+ dev_set_name(&client->dev, "i2c-%s", info->dev_name);
+ return;
+ }
+
if (adev) {
dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev));
return;
@@ -766,7 +777,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.of_node = info->of_node;
client->dev.fwnode = info->fwnode;
- i2c_dev_set_name(adap, client);
+ i2c_dev_set_name(adap, client, info);
if (info->properties) {
status = device_add_properties(&client->dev, info->properties);
@@ -808,6 +819,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ if (!client)
+ return;
if (client->dev.of_node)
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
if (ACPI_COMPANION(&client->dev))
@@ -1259,6 +1272,10 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
goto out_list;
}
+ res = of_i2c_setup_smbus_alert(adap);
+ if (res)
+ goto out_reg;
+
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
pm_runtime_no_callbacks(&adap->dev);
@@ -1290,6 +1307,10 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
return 0;
+out_reg:
+ init_completion(&adap->dev_released);
+ device_unregister(&adap->dev);
+ wait_for_completion(&adap->dev_released);
out_list:
mutex_lock(&core_lock);
idr_remove(&i2c_adapter_idr, adap->nr);
@@ -1417,8 +1438,7 @@ static int __unregister_client(struct device *dev, void *dummy)
static int __unregister_dummy(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
- if (client)
- i2c_unregister_device(client);
+ i2c_unregister_device(client);
return 0;
}
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 10f00a82ec9d..4bb9927afd01 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#define CREATE_TRACE_POINTS
#include <trace/events/smbus.h>
@@ -592,3 +593,57 @@ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
return i;
}
EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
+
+/**
+ * i2c_setup_smbus_alert - Setup SMBus alert support
+ * @adapter: the target adapter
+ * @setup: setup data for the SMBus alert handler
+ * Context: can sleep
+ *
+ * Setup handling of the SMBus alert protocol on a given I2C bus segment.
+ *
+ * Handling can be done either through our IRQ handler, or by the
+ * adapter (from its handler, periodic polling, or whatever).
+ *
+ * NOTE that if we manage the IRQ, we *MUST* know if it's level or
+ * edge triggered in order to hand it to the workqueue correctly.
+ * If triggering the alert seems to wedge the system, you probably
+ * should have said it's level triggered.
+ *
+ * This returns the ara client, which should be saved for later use with
+ * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
+ * to indicate an error.
+ */
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup)
+{
+ struct i2c_board_info ara_board_info = {
+ I2C_BOARD_INFO("smbus_alert", 0x0c),
+ .platform_data = setup,
+ };
+
+ return i2c_new_device(adapter, &ara_board_info);
+}
+EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
+
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
+int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter)
+{
+ struct i2c_client *client;
+ int irq;
+
+ irq = of_property_match_string(adapter->dev.of_node, "interrupt-names",
+ "smbus_alert");
+ if (irq == -EINVAL || irq == -ENODATA)
+ return 0;
+ else if (irq < 0)
+ return irq;
+
+ client = i2c_setup_smbus_alert(adapter, NULL);
+ if (!client)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_i2c_setup_smbus_alert);
+#endif
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index f9271c713d20..5a1dd7f13bac 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -21,12 +21,11 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
struct i2c_smbus_alert {
- unsigned int alert_edge_triggered:1;
- int irq;
struct work_struct alert;
struct i2c_client *ara; /* Alert response address */
};
@@ -72,13 +71,12 @@ static int smbus_do_alert(struct device *dev, void *addrp)
* The alert IRQ handler needs to hand work off to a task which can issue
* SMBus calls, because those sleeping calls can't be made in IRQ context.
*/
-static void smbus_alert(struct work_struct *work)
+static irqreturn_t smbus_alert(int irq, void *d)
{
- struct i2c_smbus_alert *alert;
+ struct i2c_smbus_alert *alert = d;
struct i2c_client *ara;
unsigned short prev_addr = 0; /* Not a valid address */
- alert = container_of(work, struct i2c_smbus_alert, alert);
ara = alert->ara;
for (;;) {
@@ -115,21 +113,17 @@ static void smbus_alert(struct work_struct *work)
prev_addr = data.addr;
}
- /* We handled all alerts; re-enable level-triggered IRQs */
- if (!alert->alert_edge_triggered)
- enable_irq(alert->irq);
+ return IRQ_HANDLED;
}
-static irqreturn_t smbalert_irq(int irq, void *d)
+static void smbalert_work(struct work_struct *work)
{
- struct i2c_smbus_alert *alert = d;
+ struct i2c_smbus_alert *alert;
- /* Disable level-triggered IRQs until we handle them */
- if (!alert->alert_edge_triggered)
- disable_irq_nosync(irq);
+ alert = container_of(work, struct i2c_smbus_alert, alert);
+
+ smbus_alert(0, alert);
- schedule_work(&alert->alert);
- return IRQ_HANDLED;
}
/* Setup SMBALERT# infrastructure */
@@ -139,28 +133,35 @@ static int smbalert_probe(struct i2c_client *ara,
struct i2c_smbus_alert_setup *setup = dev_get_platdata(&ara->dev);
struct i2c_smbus_alert *alert;
struct i2c_adapter *adapter = ara->adapter;
- int res;
+ int res, irq;
alert = devm_kzalloc(&ara->dev, sizeof(struct i2c_smbus_alert),
GFP_KERNEL);
if (!alert)
return -ENOMEM;
- alert->alert_edge_triggered = setup->alert_edge_triggered;
- alert->irq = setup->irq;
- INIT_WORK(&alert->alert, smbus_alert);
+ if (setup) {
+ irq = setup->irq;
+ } else {
+ irq = of_irq_get_byname(adapter->dev.of_node, "smbus_alert");
+ if (irq <= 0)
+ return irq;
+ }
+
+ INIT_WORK(&alert->alert, smbalert_work);
alert->ara = ara;
- if (setup->irq > 0) {
- res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
- 0, "smbus_alert", alert);
+ if (irq > 0) {
+ res = devm_request_threaded_irq(&ara->dev, irq,
+ NULL, smbus_alert,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "smbus_alert", alert);
if (res)
return res;
}
i2c_set_clientdata(ara, alert);
- dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n",
- setup->alert_edge_triggered ? "edge" : "level");
+ dev_info(&adapter->dev, "supports SMBALERT#\n");
return 0;
}
@@ -190,38 +191,6 @@ static struct i2c_driver smbalert_driver = {
};
/**
- * i2c_setup_smbus_alert - Setup SMBus alert support
- * @adapter: the target adapter
- * @setup: setup data for the SMBus alert handler
- * Context: can sleep
- *
- * Setup handling of the SMBus alert protocol on a given I2C bus segment.
- *
- * Handling can be done either through our IRQ handler, or by the
- * adapter (from its handler, periodic polling, or whatever).
- *
- * NOTE that if we manage the IRQ, we *MUST* know if it's level or
- * edge triggered in order to hand it to the workqueue correctly.
- * If triggering the alert seems to wedge the system, you probably
- * should have said it's level triggered.
- *
- * This returns the ara client, which should be saved for later use with
- * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
- * to indicate an error.
- */
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup)
-{
- struct i2c_board_info ara_board_info = {
- I2C_BOARD_INFO("smbus_alert", 0x0c),
- .platform_data = setup,
- };
-
- return i2c_new_device(adapter, &ara_board_info);
-}
-EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
-
-/**
* i2c_handle_smbus_alert - Handle an SMBus alert
* @ara: the ARA client on the relevant adapter
* Context: can't sleep
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 7b992db38021..2ca068d8b92d 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -246,36 +246,6 @@ static irqreturn_t pca954x_irq_handler(int irq, void *dev_id)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static void pca954x_irq_mask(struct irq_data *idata)
-{
- struct pca954x *data = irq_data_get_irq_chip_data(idata);
- unsigned int pos = idata->hwirq;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&data->lock, flags);
-
- data->irq_mask &= ~BIT(pos);
- if (!data->irq_mask)
- disable_irq(data->client->irq);
-
- raw_spin_unlock_irqrestore(&data->lock, flags);
-}
-
-static void pca954x_irq_unmask(struct irq_data *idata)
-{
- struct pca954x *data = irq_data_get_irq_chip_data(idata);
- unsigned int pos = idata->hwirq;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&data->lock, flags);
-
- if (!data->irq_mask)
- enable_irq(data->client->irq);
- data->irq_mask |= BIT(pos);
-
- raw_spin_unlock_irqrestore(&data->lock, flags);
-}
-
static int pca954x_irq_set_type(struct irq_data *idata, unsigned int type)
{
if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_LOW)
@@ -285,8 +255,6 @@ static int pca954x_irq_set_type(struct irq_data *idata, unsigned int type)
static struct irq_chip pca954x_irq_chip = {
.name = "i2c-mux-pca954x",
- .irq_mask = pca954x_irq_mask,
- .irq_unmask = pca954x_irq_unmask,
.irq_set_type = pca954x_irq_set_type,
};
@@ -294,7 +262,7 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
- int c, err, irq;
+ int c, irq;
if (!data->chip->has_irq || client->irq <= 0)
return 0;
@@ -309,29 +277,31 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
for (c = 0; c < data->chip->nchans; c++) {
irq = irq_create_mapping(data->irq, c);
+ if (!irq) {
+ dev_err(&client->dev, "failed irq create map\n");
+ return -EINVAL;
+ }
irq_set_chip_data(irq, data);
irq_set_chip_and_handler(irq, &pca954x_irq_chip,
handle_simple_irq);
}
- err = devm_request_threaded_irq(&client->dev, data->client->irq, NULL,
- pca954x_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED,
- "pca954x", data);
- if (err)
- goto err_req_irq;
+ return 0;
+}
- disable_irq(data->client->irq);
+static void pca954x_cleanup(struct i2c_mux_core *muxc)
+{
+ struct pca954x *data = i2c_mux_priv(muxc);
+ int c, irq;
- return 0;
-err_req_irq:
- for (c = 0; c < data->chip->nchans; c++) {
- irq = irq_find_mapping(data->irq, c);
- irq_dispose_mapping(irq);
+ if (data->irq) {
+ for (c = 0; c < data->chip->nchans; c++) {
+ irq = irq_find_mapping(data->irq, c);
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(data->irq);
}
- irq_domain_remove(data->irq);
-
- return err;
+ i2c_mux_del_adapters(muxc);
}
/*
@@ -391,7 +361,7 @@ static int pca954x_probe(struct i2c_client *client,
ret = pca954x_irq_setup(muxc);
if (ret)
- goto fail_del_adapters;
+ goto fail_cleanup;
/* Now create an adapter for each channel */
for (num = 0; num < data->chip->nchans; num++) {
@@ -414,7 +384,16 @@ static int pca954x_probe(struct i2c_client *client,
ret = i2c_mux_add_adapter(muxc, force, num, class);
if (ret)
- goto fail_del_adapters;
+ goto fail_cleanup;
+ }
+
+ if (data->irq) {
+ ret = devm_request_threaded_irq(&client->dev, data->client->irq,
+ NULL, pca954x_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "pca954x", data);
+ if (ret)
+ goto fail_cleanup;
}
dev_info(&client->dev,
@@ -424,26 +403,16 @@ static int pca954x_probe(struct i2c_client *client,
return 0;
-fail_del_adapters:
- i2c_mux_del_adapters(muxc);
+fail_cleanup:
+ pca954x_cleanup(muxc);
return ret;
}
static int pca954x_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
- struct pca954x *data = i2c_mux_priv(muxc);
- int c, irq;
- if (data->irq) {
- for (c = 0; c < data->chip->nchans; c++) {
- irq = irq_find_mapping(data->irq, c);
- irq_dispose_mapping(irq);
- }
- irq_domain_remove(data->irq);
- }
-
- i2c_mux_del_adapters(muxc);
+ pca954x_cleanup(muxc);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index d97031804de8..f6c9c3dc6cad 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -107,9 +107,9 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
put_device(&adapter->dev);
mux->data.n_values = of_get_child_count(np);
- if (of_find_property(np, "little-endian", NULL)) {
+ if (of_property_read_bool(np, "little-endian")) {
mux->data.little_endian = true;
- } else if (of_find_property(np, "big-endian", NULL)) {
+ } else if (of_property_read_bool(np, "big-endian")) {
mux->data.little_endian = false;
} else {
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : \
@@ -122,10 +122,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
#error Endianness not defined?
#endif
}
- if (of_find_property(np, "write-only", NULL))
- mux->data.write_only = true;
- else
- mux->data.write_only = false;
+ mux->data.write_only = of_property_read_bool(np, "write-only");
values = devm_kzalloc(&pdev->dev,
sizeof(*mux->data.values) * mux->data.n_values,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index c99a25c075bc..cf1fb3fb5d26 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -117,7 +117,9 @@ config BLK_DEV_DELKIN
config BLK_DEV_IDECD
tristate "Include IDE/ATAPI CDROM support"
+ depends on BLK_DEV
select IDE_ATAPI
+ select CDROM
---help---
If you have a CD-ROM drive using the ATAPI protocol, say Y. ATAPI is
a newer protocol used by IDE CD-ROM and TAPE drives, similar to the
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 14d1e7d9a1d6..0e6bc631a1ca 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive)
struct request *rq = drive->hwif->rq;
unsigned long wait = 0;
- debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]);
+ debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]);
/*
* Some commands are *slow* and normally take a long time to complete.
@@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
return ide_do_reset(drive);
}
- debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
+ debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]);
/* Retry operation */
ide_retry_pc(drive);
@@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pad_transfer(drive, write, bcount);
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
- rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
+ scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len);
/* And set the interrupt handler again */
ide_set_handler(drive, ide_pc_intr, timeout);
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index dccdca9eda38..ad8a125defdd 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -90,9 +90,9 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request_flags(drive->queue, REQ_OP_DRV_IN,
+ BLK_MQ_REQ_PREEMPT);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
- rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index d127ace6aa57..6ee866fcc5dd 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -244,7 +244,7 @@ struct chs_geom {
static unsigned int ide_disks;
static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
-static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
+static int ide_set_disk_chs(const char *str, const struct kernel_param *kp)
{
unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1;
@@ -328,7 +328,7 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
static unsigned int ide_ignore_cable;
-static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
+static int ide_set_ignore_cable(const char *s, const struct kernel_param *kp)
{
int i, j = 1;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f0b06b14e782..b2ccce5fb071 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -913,10 +913,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
+ bool uninitialized_var(tick);
int cpu = smp_processor_id();
- cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
-
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
@@ -924,12 +923,19 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
- if (!(lapic_timer_reliable_states & (1 << (cstate))))
- tick_broadcast_enter();
+ if (!static_cpu_has(X86_FEATURE_ARAT)) {
+ cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
+ MWAIT_CSTATE_MASK) + 1;
+ tick = false;
+ if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
+ tick = true;
+ tick_broadcast_enter();
+ }
+ }
mwait_idle_with_hints(eax, ecx);
- if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
tick_broadcast_exit();
return index;
@@ -1061,7 +1067,7 @@ static const struct idle_cpu idle_cpu_dnv = {
};
#define ICPU(model, cpu) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
@@ -1125,6 +1131,11 @@ static int __init intel_idle_probe(void)
return -ENODEV;
}
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_debug("Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
+
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 15de262015df..c6d9517d7611 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -148,6 +148,17 @@ config HID_SENSOR_ACCEL_3D
To compile this driver as a module, choose M here: the
module will be called hid-sensor-accel-3d.
+config IIO_CROS_EC_ACCEL_LEGACY
+ tristate "ChromeOS EC Legacy Accelerometer Sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select CROS_EC_LPC_REGISTER_DEVICE
+ help
+ Say yes here to get support for accelerometers on Chromebook using
+ legacy EC firmware.
+ Sensor data is retrieved through IO memory.
+ Newer devices should use IIO_CROS_EC_SENSORS.
+
config IIO_ST_ACCEL_3AXIS
tristate "STMicroelectronics accelerometers 3-Axis Driver"
depends on (I2C || SPI_MASTER) && SYSFS
@@ -219,8 +230,8 @@ config KXCJK1013
select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build a driver for the Kionix KXCJK-1013
- triaxial acceleration sensor. This driver also supports KXCJ9-1008
- and KXTJ2-1009.
+ triaxial acceleration sensor. This driver also supports KXCJ9-1008,
+ KXTJ2-1009 and KXTF9.
To compile this driver as a module, choose M here: the module will
be called kxcjk-1013.
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 703e7c21f547..368aedb6377a 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -44,6 +44,8 @@ obj-$(CONFIG_SCA3000) += sca3000.o
obj-$(CONFIG_STK8312) += stk8312.o
obj-$(CONFIG_STK8BA50) += stk8ba50.o
+obj-$(CONFIG_IIO_CROS_EC_ACCEL_LEGACY) += cros_ec_accel_legacy.o
+
obj-$(CONFIG_IIO_SSP_SENSORS_COMMONS) += ssp_accel_sensor.o
obj-$(CONFIG_IIO_ST_ACCEL_3AXIS) += st_accel.o
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 9ccb5828db98..7251d0e63d74 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -95,7 +95,6 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info adxl345_info = {
- .driver_module = THIS_MODULE,
.read_raw = adxl345_read_raw,
};
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3dec972ca672..cb9765a3de60 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -536,7 +536,6 @@ static const struct iio_info bma180_info = {
.attrs = &bma180_attrs_group,
.read_raw = bma180_read_raw,
.write_raw = bma180_write_raw,
- .driver_module = THIS_MODULE,
};
static const char * const bma180_power_modes[] = { "low_noise", "low_power" };
@@ -700,7 +699,6 @@ static int bma180_trig_try_reen(struct iio_trigger *trig)
static const struct iio_trigger_ops bma180_trigger_ops = {
.set_trigger_state = bma180_data_rdy_trigger_set_state,
.try_reenable = bma180_trig_try_reen,
- .owner = THIS_MODULE,
};
static int bma180_probe(struct i2c_client *client,
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 5099f295dd37..e25d91c017ed 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -186,7 +186,6 @@ static int bma220_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info bma220_info = {
- .driver_module = THIS_MODULE,
.read_raw = bma220_read_raw,
.write_raw = bma220_write_raw,
.attrs = &bma220_attribute_group,
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 807299dd45eb..870f92ef61c2 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1094,7 +1094,6 @@ static const struct iio_info bmc150_accel_info = {
.write_event_value = bmc150_accel_write_event,
.write_event_config = bmc150_accel_write_event_config,
.read_event_config = bmc150_accel_read_event_config,
- .driver_module = THIS_MODULE,
};
static const struct iio_info bmc150_accel_info_fifo = {
@@ -1108,7 +1107,6 @@ static const struct iio_info bmc150_accel_info_fifo = {
.validate_trigger = bmc150_accel_validate_trigger,
.hwfifo_set_watermark = bmc150_accel_set_watermark,
.hwfifo_flush_to_buffer = bmc150_accel_fifo_flush,
- .driver_module = THIS_MODULE,
};
static const unsigned long bmc150_accel_scan_masks[] = {
@@ -1200,7 +1198,6 @@ static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
.set_trigger_state = bmc150_accel_trigger_set_state,
.try_reenable = bmc150_accel_trig_try_reen,
- .owner = THIS_MODULE,
};
static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
new file mode 100644
index 000000000000..063e89eff791
--- /dev/null
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -0,0 +1,423 @@
+/*
+ * Driver for older Chrome OS EC accelerometer
+ *
+ * Copyright 2017 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the memory mapper cros-ec interface to communicate
+ * with the Chrome OS EC about accelerometer data.
+ * Accelerometer access is presented through iio sysfs.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "cros-ec-accel-legacy"
+
+/*
+ * Sensor scale hard coded at 10 bits per g, computed as:
+ * g / (2^10 - 1) = 0.009586168; with g = 9.80665 m.s^-2
+ */
+#define ACCEL_LEGACY_NSCALE 9586168
+
+/* Indices for EC sensor values. */
+enum {
+ X,
+ Y,
+ Z,
+ MAX_AXIS,
+};
+
+/* State data for cros_ec_accel_legacy iio driver. */
+struct cros_ec_accel_legacy_state {
+ struct cros_ec_device *ec;
+
+ /*
+ * Array holding data from a single capture. 2 bytes per channel
+ * for the 3 channels plus the timestamp which is always last and
+ * 8-bytes aligned.
+ */
+ s16 capture_data[8];
+ s8 sign[MAX_AXIS];
+ u8 sensor_num;
+};
+
+static int ec_cmd_read_u8(struct cros_ec_device *ec, unsigned int offset,
+ u8 *dest)
+{
+ return ec->cmd_readmem(ec, offset, 1, dest);
+}
+
+static int ec_cmd_read_u16(struct cros_ec_device *ec, unsigned int offset,
+ u16 *dest)
+{
+ __le16 tmp;
+ int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
+
+ *dest = le16_to_cpu(tmp);
+
+ return ret;
+}
+
+/**
+ * read_ec_until_not_busy() - Read from EC status byte until it reads not busy.
+ * @st: Pointer to state information for device.
+ *
+ * This function reads EC status until its busy bit gets cleared. It does not
+ * wait indefinitely and returns -EIO if the EC status is still busy after a
+ * few hundreds milliseconds.
+ *
+ * Return: 8-bit status if ok, -EIO on error
+ */
+static int read_ec_until_not_busy(struct cros_ec_accel_legacy_state *st)
+{
+ struct cros_ec_device *ec = st->ec;
+ u8 status;
+ int attempts = 0;
+
+ ec_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+ while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
+ /* Give up after enough attempts, return error. */
+ if (attempts++ >= 50)
+ return -EIO;
+
+ /* Small delay every so often. */
+ if (attempts % 5 == 0)
+ msleep(25);
+
+ ec_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+ }
+
+ return status;
+}
+
+/**
+ * read_ec_accel_data_unsafe() - Read acceleration data from EC shared memory.
+ * @st: Pointer to state information for device.
+ * @scan_mask: Bitmap of the sensor indices to scan.
+ * @data: Location to store data.
+ *
+ * This is the unsafe function for reading the EC data. It does not guarantee
+ * that the EC will not modify the data as it is being read in.
+ */
+static void read_ec_accel_data_unsafe(struct cros_ec_accel_legacy_state *st,
+ unsigned long scan_mask, s16 *data)
+{
+ int i = 0;
+ int num_enabled = bitmap_weight(&scan_mask, MAX_AXIS);
+
+ /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
+ while (num_enabled--) {
+ i = find_next_bit(&scan_mask, MAX_AXIS, i);
+ ec_cmd_read_u16(st->ec,
+ EC_MEMMAP_ACC_DATA +
+ sizeof(s16) *
+ (1 + i + st->sensor_num * MAX_AXIS),
+ data);
+ *data *= st->sign[i];
+ i++;
+ data++;
+ }
+}
+
+/**
+ * read_ec_accel_data() - Read acceleration data from EC shared memory.
+ * @st: Pointer to state information for device.
+ * @scan_mask: Bitmap of the sensor indices to scan.
+ * @data: Location to store data.
+ *
+ * This is the safe function for reading the EC data. It guarantees that
+ * the data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 if ok, -ve on error
+ */
+static int read_ec_accel_data(struct cros_ec_accel_legacy_state *st,
+ unsigned long scan_mask, s16 *data)
+{
+ u8 samp_id = 0xff;
+ u8 status = 0;
+ int ret;
+ int attempts = 0;
+
+ /*
+ * Continually read all data from EC until the status byte after
+ * all reads reflects that the EC is not busy and the sample id
+ * matches the sample id from before all reads. This guarantees
+ * that data read in was not modified by the EC while reading.
+ */
+ while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
+ EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
+ /* If we have tried to read too many times, return error. */
+ if (attempts++ >= 5)
+ return -EIO;
+
+ /* Read status byte until EC is not busy. */
+ ret = read_ec_until_not_busy(st);
+ if (ret < 0)
+ return ret;
+ status = ret;
+
+ /*
+ * Store the current sample id so that we can compare to the
+ * sample id after reading the data.
+ */
+ samp_id = status & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
+
+ /* Read all EC data, format it, and store it into data. */
+ read_ec_accel_data_unsafe(st, scan_mask, data);
+
+ /* Read status byte. */
+ ec_cmd_read_u8(st->ec, EC_MEMMAP_ACC_STATUS, &status);
+ }
+
+ return 0;
+}
+
+static int cros_ec_accel_legacy_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+ s16 data = 0;
+ int ret = IIO_VAL_INT;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = read_ec_accel_data(st, (1 << chan->scan_index), &data);
+ if (ret)
+ return ret;
+ *val = data;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = ACCEL_LEGACY_NSCALE;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ /* Calibration not supported. */
+ *val = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cros_ec_accel_legacy_write(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ /*
+ * Do nothing but don't return an error code to allow calibration
+ * script to work.
+ */
+ if (mask == IIO_CHAN_INFO_CALIBBIAS)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct iio_info cros_ec_accel_legacy_info = {
+ .read_raw = &cros_ec_accel_legacy_read,
+ .write_raw = &cros_ec_accel_legacy_write,
+};
+
+/**
+ * cros_ec_accel_legacy_capture() - The trigger handler function
+ * @irq: The interrupt number.
+ * @p: Private data - always a pointer to the poll func.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t cros_ec_accel_legacy_capture(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+
+ /* Clear capture data. */
+ memset(st->capture_data, 0, sizeof(st->capture_data));
+
+ /*
+ * Read data based on which channels are enabled in scan mask. Note
+ * that on a capture we are always reading the calibrated data.
+ */
+ read_ec_accel_data(st, *indio_dev->active_scan_mask, st->capture_data);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, (void *)st->capture_data,
+ iio_get_time_ns(indio_dev));
+
+ /*
+ * Tell the core we are done with this trigger and ready for the
+ * next one.
+ */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static char *cros_ec_accel_legacy_loc_strings[] = {
+ [MOTIONSENSE_LOC_BASE] = "base",
+ [MOTIONSENSE_LOC_LID] = "lid",
+ [MOTIONSENSE_LOC_MAX] = "unknown",
+};
+
+static ssize_t cros_ec_accel_legacy_loc(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%s\n",
+ cros_ec_accel_legacy_loc_strings[st->sensor_num +
+ MOTIONSENSE_LOC_BASE]);
+}
+
+static ssize_t cros_ec_accel_legacy_id(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->sensor_num);
+}
+
+static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
+ {
+ .name = "id",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = cros_ec_accel_legacy_id,
+ },
+ {
+ .name = "location",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = cros_ec_accel_legacy_loc,
+ },
+ { }
+};
+
+#define CROS_EC_ACCEL_LEGACY_CHAN(_axis) \
+ { \
+ .type = IIO_ACCEL, \
+ .channel2 = IIO_MOD_X + (_axis), \
+ .modified = 1, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = cros_ec_accel_legacy_ext_info, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ }, \
+ } \
+
+static struct iio_chan_spec ec_accel_channels[] = {
+ CROS_EC_ACCEL_LEGACY_CHAN(X),
+ CROS_EC_ACCEL_LEGACY_CHAN(Y),
+ CROS_EC_ACCEL_LEGACY_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(MAX_AXIS)
+};
+
+static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
+ struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+ struct iio_dev *indio_dev;
+ struct cros_ec_accel_legacy_state *state;
+ int ret, i;
+
+ if (!ec || !ec->ec_dev) {
+ dev_warn(&pdev->dev, "No EC device found.\n");
+ return -EINVAL;
+ }
+
+ if (!ec->ec_dev->cmd_readmem) {
+ dev_warn(&pdev->dev, "EC does not support direct reads.\n");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ state = iio_priv(indio_dev);
+ state->ec = ec->ec_dev;
+ state->sensor_num = sensor_platform->sensor_num;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = pdev->name;
+ indio_dev->channels = ec_accel_channels;
+ /*
+ * Present the channel using HTML5 standard:
+ * need to invert X and Y and invert some lid axis.
+ */
+ for (i = X ; i < MAX_AXIS; i++) {
+ switch (i) {
+ case X:
+ ec_accel_channels[X].scan_index = Y;
+ case Y:
+ ec_accel_channels[Y].scan_index = X;
+ case Z:
+ ec_accel_channels[Z].scan_index = Z;
+ }
+ if (state->sensor_num == MOTIONSENSE_LOC_LID && i != Y)
+ state->sign[i] = -1;
+ else
+ state->sign[i] = 1;
+ }
+ indio_dev->num_channels = ARRAY_SIZE(ec_accel_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &cros_ec_accel_legacy_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ cros_ec_accel_legacy_capture,
+ NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct platform_driver cros_ec_accel_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_accel_legacy_probe,
+};
+module_platform_driver(cros_ec_accel_platform_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC legacy accelerometer driver");
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index ed8343aeac9c..6c214783241c 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -88,7 +88,6 @@ static int da280_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info da280_info = {
- .driver_module = THIS_MODULE,
.read_raw = da280_read_raw,
};
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
index c0c1620d2a2f..aa64bca00955 100644
--- a/drivers/iio/accel/da311.c
+++ b/drivers/iio/accel/da311.c
@@ -212,7 +212,6 @@ static int da311_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info da311_info = {
- .driver_module = THIS_MODULE,
.read_raw = da311_read_raw,
};
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index 656ca8e1927f..d87e2c751475 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -124,7 +124,6 @@ static int dmard06_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dmard06_info = {
- .driver_module = THIS_MODULE,
.read_raw = dmard06_read_raw,
};
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index d3a28f96565c..16a7e74f5e9a 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -93,7 +93,6 @@ static int dmard09_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dmard09_info = {
- .driver_module = THIS_MODULE,
.read_raw = dmard09_read_raw,
};
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
index b8736cc75656..9518ea00167e 100644
--- a/drivers/iio/accel/dmard10.c
+++ b/drivers/iio/accel/dmard10.c
@@ -170,7 +170,6 @@ static int dmard10_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dmard10_info = {
- .driver_module = THIS_MODULE,
.read_raw = dmard10_read_raw,
};
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 2238a26aba63..c066a3bdbff7 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -225,7 +225,6 @@ static int accel_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info accel_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &accel_3d_read_raw,
.write_raw = &accel_3d_write_raw,
};
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 3f968c46e667..af53a1084ee5 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -34,6 +34,13 @@
#define KXCJK1013_DRV_NAME "kxcjk1013"
#define KXCJK1013_IRQ_NAME "kxcjk1013_event"
+#define KXTF9_REG_HP_XOUT_L 0x00
+#define KXTF9_REG_HP_XOUT_H 0x01
+#define KXTF9_REG_HP_YOUT_L 0x02
+#define KXTF9_REG_HP_YOUT_H 0x03
+#define KXTF9_REG_HP_ZOUT_L 0x04
+#define KXTF9_REG_HP_ZOUT_H 0x05
+
#define KXCJK1013_REG_XOUT_L 0x06
/*
* From low byte X axis register, all the other addresses of Y and Z can be
@@ -48,17 +55,33 @@
#define KXCJK1013_REG_DCST_RESP 0x0C
#define KXCJK1013_REG_WHO_AM_I 0x0F
-#define KXCJK1013_REG_INT_SRC1 0x16
+#define KXTF9_REG_TILT_POS_CUR 0x10
+#define KXTF9_REG_TILT_POS_PREV 0x11
+#define KXTF9_REG_INT_SRC1 0x15
+#define KXCJK1013_REG_INT_SRC1 0x16 /* compatible, but called INT_SRC2 in KXTF9 ds */
#define KXCJK1013_REG_INT_SRC2 0x17
#define KXCJK1013_REG_STATUS_REG 0x18
#define KXCJK1013_REG_INT_REL 0x1A
#define KXCJK1013_REG_CTRL1 0x1B
-#define KXCJK1013_REG_CTRL2 0x1D
+#define KXTF9_REG_CTRL2 0x1C
+#define KXCJK1013_REG_CTRL2 0x1D /* mostly compatible, CTRL_REG3 in KTXF9 ds */
#define KXCJK1013_REG_INT_CTRL1 0x1E
#define KXCJK1013_REG_INT_CTRL2 0x1F
+#define KXTF9_REG_INT_CTRL3 0x20
#define KXCJK1013_REG_DATA_CTRL 0x21
+#define KXTF9_REG_TILT_TIMER 0x28
#define KXCJK1013_REG_WAKE_TIMER 0x29
+#define KXTF9_REG_TDT_TIMER 0x2B
+#define KXTF9_REG_TDT_THRESH_H 0x2C
+#define KXTF9_REG_TDT_THRESH_L 0x2D
+#define KXTF9_REG_TDT_TAP_TIMER 0x2E
+#define KXTF9_REG_TDT_TOTAL_TIMER 0x2F
+#define KXTF9_REG_TDT_LATENCY_TIMER 0x30
+#define KXTF9_REG_TDT_WINDOW_TIMER 0x31
#define KXCJK1013_REG_SELF_TEST 0x3A
+#define KXTF9_REG_WAKE_THRESH 0x5A
+#define KXTF9_REG_TILT_ANGLE 0x5C
+#define KXTF9_REG_HYST_SET 0x5F
#define KXCJK1013_REG_WAKE_THRES 0x6A
#define KXCJK1013_REG_CTRL1_BIT_PC1 BIT(7)
@@ -67,14 +90,33 @@
#define KXCJK1013_REG_CTRL1_BIT_GSEL1 BIT(4)
#define KXCJK1013_REG_CTRL1_BIT_GSEL0 BIT(3)
#define KXCJK1013_REG_CTRL1_BIT_WUFE BIT(1)
-#define KXCJK1013_REG_INT_REG1_BIT_IEA BIT(4)
-#define KXCJK1013_REG_INT_REG1_BIT_IEN BIT(5)
+
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEU BIT(2) /* KXTF9 */
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEL BIT(3)
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEA BIT(4)
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEN BIT(5)
+
+#define KXTF9_REG_TILT_BIT_LEFT_EDGE BIT(5)
+#define KXTF9_REG_TILT_BIT_RIGHT_EDGE BIT(4)
+#define KXTF9_REG_TILT_BIT_LOWER_EDGE BIT(3)
+#define KXTF9_REG_TILT_BIT_UPPER_EDGE BIT(2)
+#define KXTF9_REG_TILT_BIT_FACE_DOWN BIT(1)
+#define KXTF9_REG_TILT_BIT_FACE_UP BIT(0)
#define KXCJK1013_DATA_MASK_12_BIT 0x0FFF
#define KXCJK1013_MAX_STARTUP_TIME_US 100000
#define KXCJK1013_SLEEP_DELAY_MS 2000
+#define KXCJK1013_REG_INT_SRC1_BIT_TPS BIT(0) /* KXTF9 */
+#define KXCJK1013_REG_INT_SRC1_BIT_WUFS BIT(1)
+#define KXCJK1013_REG_INT_SRC1_MASK_TDTS (BIT(2) | BIT(3)) /* KXTF9 */
+#define KXCJK1013_REG_INT_SRC1_TAP_NONE 0
+#define KXCJK1013_REG_INT_SRC1_TAP_SINGLE BIT(2)
+#define KXCJK1013_REG_INT_SRC1_TAP_DOUBLE BIT(3)
+#define KXCJK1013_REG_INT_SRC1_BIT_DRDY BIT(4)
+
+/* KXCJK: INT_SOURCE2: motion detect, KXTF9: INT_SRC_REG1: tap detect */
#define KXCJK1013_REG_INT_SRC2_BIT_ZP BIT(0)
#define KXCJK1013_REG_INT_SRC2_BIT_ZN BIT(1)
#define KXCJK1013_REG_INT_SRC2_BIT_YP BIT(2)
@@ -88,6 +130,7 @@ enum kx_chipset {
KXCJK1013,
KXCJ91008,
KXTJ21009,
+ KXTF9,
KX_MAX_CHIPS /* this must be last */
};
@@ -128,15 +171,42 @@ enum kxcjk1013_range {
KXCJK1013_RANGE_8G,
};
-static const struct {
+struct kx_odr_map {
int val;
int val2;
int odr_bits;
-} samp_freq_table[] = { {0, 781000, 0x08}, {1, 563000, 0x09},
- {3, 125000, 0x0A}, {6, 250000, 0x0B}, {12, 500000, 0},
- {25, 0, 0x01}, {50, 0, 0x02}, {100, 0, 0x03},
- {200, 0, 0x04}, {400, 0, 0x05}, {800, 0, 0x06},
- {1600, 0, 0x07} };
+ int wuf_bits;
+};
+
+static const struct kx_odr_map samp_freq_table[] = {
+ { 0, 781000, 0x08, 0x00 },
+ { 1, 563000, 0x09, 0x01 },
+ { 3, 125000, 0x0A, 0x02 },
+ { 6, 250000, 0x0B, 0x03 },
+ { 12, 500000, 0x00, 0x04 },
+ { 25, 0, 0x01, 0x05 },
+ { 50, 0, 0x02, 0x06 },
+ { 100, 0, 0x03, 0x06 },
+ { 200, 0, 0x04, 0x06 },
+ { 400, 0, 0x05, 0x06 },
+ { 800, 0, 0x06, 0x06 },
+ { 1600, 0, 0x07, 0x06 },
+};
+
+static const char *const kxcjk1013_samp_freq_avail =
+ "0.781000 1.563000 3.125000 6.250000 12.500000 25 50 100 200 400 800 1600";
+
+static const struct kx_odr_map kxtf9_samp_freq_table[] = {
+ { 25, 0, 0x01, 0x00 },
+ { 50, 0, 0x02, 0x01 },
+ { 100, 0, 0x03, 0x01 },
+ { 200, 0, 0x04, 0x01 },
+ { 400, 0, 0x05, 0x01 },
+ { 800, 0, 0x06, 0x01 },
+};
+
+static const char *const kxtf9_samp_freq_avail =
+ "25 50 100 200 400 800";
/* Refer to section 4 of the specification */
static const struct {
@@ -188,6 +258,15 @@ static const struct {
{0x06, 3000},
{0x07, 2000},
},
+ /* KXTF9 */
+ {
+ {0x01, 81000},
+ {0x02, 41000},
+ {0x03, 21000},
+ {0x04, 11000},
+ {0x05, 5100},
+ {0x06, 2700},
+ },
};
static const struct {
@@ -198,23 +277,6 @@ static const struct {
{19163, 1, 0},
{38326, 0, 1} };
-static const struct {
- int val;
- int val2;
- int odr_bits;
-} wake_odr_data_rate_table[] = { {0, 781000, 0x00},
- {1, 563000, 0x01},
- {3, 125000, 0x02},
- {6, 250000, 0x03},
- {12, 500000, 0x04},
- {25, 0, 0x05},
- {50, 0, 0x06},
- {100, 0, 0x06},
- {200, 0, 0x06},
- {400, 0, 0x06},
- {800, 0, 0x06},
- {1600, 0, 0x06} };
-
static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
enum kxcjk1013_mode mode)
{
@@ -341,9 +403,9 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
}
if (data->active_high_intr)
- ret |= KXCJK1013_REG_INT_REG1_BIT_IEA;
+ ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEA;
else
- ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEA;
+ ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEA;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
ret);
@@ -401,7 +463,7 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
{
- int ret;
+ int waketh_reg, ret;
ret = i2c_smbus_write_byte_data(data->client,
KXCJK1013_REG_WAKE_TIMER,
@@ -412,8 +474,9 @@ static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
return ret;
}
- ret = i2c_smbus_write_byte_data(data->client,
- KXCJK1013_REG_WAKE_THRES,
+ waketh_reg = data->chipset == KXTF9 ?
+ KXTF9_REG_WAKE_THRESH : KXCJK1013_REG_WAKE_THRES;
+ ret = i2c_smbus_write_byte_data(data->client, waketh_reg,
data->wake_thres);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_wake_thres\n");
@@ -449,9 +512,9 @@ static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
}
if (status)
- ret |= KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEN;
else
- ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
ret);
@@ -509,9 +572,9 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
}
if (status)
- ret |= KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEN;
else
- ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
ret);
@@ -547,28 +610,30 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
return 0;
}
-static int kxcjk1013_convert_freq_to_bit(int val, int val2)
+static const struct kx_odr_map *kxcjk1013_find_odr_value(
+ const struct kx_odr_map *map, size_t map_size, int val, int val2)
{
int i;
- for (i = 0; i < ARRAY_SIZE(samp_freq_table); ++i) {
- if (samp_freq_table[i].val == val &&
- samp_freq_table[i].val2 == val2) {
- return samp_freq_table[i].odr_bits;
- }
+ for (i = 0; i < map_size; ++i) {
+ if (map[i].val == val && map[i].val2 == val2)
+ return &map[i];
}
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
-static int kxcjk1013_convert_wake_odr_to_bit(int val, int val2)
+static int kxcjk1013_convert_odr_value(const struct kx_odr_map *map,
+ size_t map_size, int odr_bits,
+ int *val, int *val2)
{
int i;
- for (i = 0; i < ARRAY_SIZE(wake_odr_data_rate_table); ++i) {
- if (wake_odr_data_rate_table[i].val == val &&
- wake_odr_data_rate_table[i].val2 == val2) {
- return wake_odr_data_rate_table[i].odr_bits;
+ for (i = 0; i < map_size; ++i) {
+ if (map[i].odr_bits == odr_bits) {
+ *val = map[i].val;
+ *val2 = map[i].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -578,16 +643,24 @@ static int kxcjk1013_convert_wake_odr_to_bit(int val, int val2)
static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
{
int ret;
- int odr_bits;
enum kxcjk1013_mode store_mode;
+ const struct kx_odr_map *odr_setting;
ret = kxcjk1013_get_mode(data, &store_mode);
if (ret < 0)
return ret;
- odr_bits = kxcjk1013_convert_freq_to_bit(val, val2);
- if (odr_bits < 0)
- return odr_bits;
+ if (data->chipset == KXTF9)
+ odr_setting = kxcjk1013_find_odr_value(kxtf9_samp_freq_table,
+ ARRAY_SIZE(kxtf9_samp_freq_table),
+ val, val2);
+ else
+ odr_setting = kxcjk1013_find_odr_value(samp_freq_table,
+ ARRAY_SIZE(samp_freq_table),
+ val, val2);
+
+ if (IS_ERR(odr_setting))
+ return PTR_ERR(odr_setting);
/* To change ODR, the chip must be set to STANDBY as per spec */
ret = kxcjk1013_set_mode(data, STANDBY);
@@ -595,20 +668,16 @@ static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
return ret;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_DATA_CTRL,
- odr_bits);
+ odr_setting->odr_bits);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing data_ctrl\n");
return ret;
}
- data->odr_bits = odr_bits;
-
- odr_bits = kxcjk1013_convert_wake_odr_to_bit(val, val2);
- if (odr_bits < 0)
- return odr_bits;
+ data->odr_bits = odr_setting->odr_bits;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_CTRL2,
- odr_bits);
+ odr_setting->wuf_bits);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_ctrl2\n");
return ret;
@@ -625,17 +694,14 @@ static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
static int kxcjk1013_get_odr(struct kxcjk1013_data *data, int *val, int *val2)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(samp_freq_table); ++i) {
- if (samp_freq_table[i].odr_bits == data->odr_bits) {
- *val = samp_freq_table[i].val;
- *val2 = samp_freq_table[i].val2;
- return IIO_VAL_INT_PLUS_MICRO;
- }
- }
-
- return -EINVAL;
+ if (data->chipset == KXTF9)
+ return kxcjk1013_convert_odr_value(kxtf9_samp_freq_table,
+ ARRAY_SIZE(kxtf9_samp_freq_table),
+ data->odr_bits, val, val2);
+ else
+ return kxcjk1013_convert_odr_value(samp_freq_table,
+ ARRAY_SIZE(samp_freq_table),
+ data->odr_bits, val, val2);
}
static int kxcjk1013_get_acc_reg(struct kxcjk1013_data *data, int axis)
@@ -886,13 +952,29 @@ static int kxcjk1013_buffer_postdisable(struct iio_dev *indio_dev)
return kxcjk1013_set_power_state(data, false);
}
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
- "0.781000 1.563000 3.125000 6.250000 12.500000 25 50 100 200 400 800 1600");
+static ssize_t kxcjk1013_get_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ const char *str;
+
+ if (data->chipset == KXTF9)
+ str = kxtf9_samp_freq_avail;
+ else
+ str = kxcjk1013_samp_freq_avail;
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static IIO_DEVICE_ATTR(in_accel_sampling_frequency_available, S_IRUGO,
+ kxcjk1013_get_samp_freq_avail, NULL, 0);
static IIO_CONST_ATTR(in_accel_scale_available, "0.009582 0.019163 0.038326");
static struct attribute *kxcjk1013_attributes[] = {
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_sampling_frequency_available.dev_attr.attr,
&iio_const_attr_in_accel_scale_available.dev_attr.attr,
NULL,
};
@@ -950,7 +1032,6 @@ static const struct iio_info kxcjk1013_info = {
.write_event_value = kxcjk1013_write_event,
.write_event_config = kxcjk1013_write_event_config,
.read_event_config = kxcjk1013_read_event_config,
- .driver_module = THIS_MODULE,
};
static const unsigned long kxcjk1013_scan_masks[] = {0x7, 0};
@@ -1036,9 +1117,74 @@ static int kxcjk1013_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops kxcjk1013_trigger_ops = {
.set_trigger_state = kxcjk1013_data_rdy_trigger_set_state,
.try_reenable = kxcjk1013_trig_try_reen,
- .owner = THIS_MODULE,
};
+static void kxcjk1013_report_motion_event(struct iio_dev *indio_dev)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ int ret = i2c_smbus_read_byte_data(data->client,
+ KXCJK1013_REG_INT_SRC2);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_src2\n");
+ return;
+ }
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_XN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_XP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_YN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_YP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+}
+
static irqreturn_t kxcjk1013_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
@@ -1051,66 +1197,17 @@ static irqreturn_t kxcjk1013_event_handler(int irq, void *private)
goto ack_intr;
}
- if (ret & 0x02) {
- ret = i2c_smbus_read_byte_data(data->client,
- KXCJK1013_REG_INT_SRC2);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "Error reading reg_int_src2\n");
- goto ack_intr;
- }
-
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_XN)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- data->timestamp);
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_XP)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- data->timestamp);
-
-
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_YN)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- data->timestamp);
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_YP)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- data->timestamp);
-
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZN)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- data->timestamp);
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZP)
+ if (ret & KXCJK1013_REG_INT_SRC1_BIT_WUFS) {
+ if (data->chipset == KXTF9)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_MOD_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
data->timestamp);
+ else
+ kxcjk1013_report_motion_event(indio_dev);
}
ack_intr:
@@ -1403,6 +1500,7 @@ static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcjk1013", KXCJK1013},
{"kxcj91008", KXCJ91008},
{"kxtj21009", KXTJ21009},
+ {"kxtf9", KXTF9},
{"SMO8500", KXCJ91008},
{}
};
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 9af60ac70738..0c0df4fce420 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -390,7 +390,6 @@ static const struct iio_info kxsd9_info = {
.read_raw = &kxsd9_read_raw,
.write_raw = &kxsd9_write_raw,
.attrs = &kxsd9_attribute_group,
- .driver_module = THIS_MODULE,
};
/* Four channels apart from timestamp, scan mask = 0x0f */
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index 4ea2ff623a6d..8b11604eed63 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -107,7 +107,6 @@ static int mc3230_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mc3230_info = {
- .driver_module = THIS_MODULE,
.read_raw = mc3230_read_raw,
};
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 6551085bedd7..da0ceaac46b5 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -199,7 +199,6 @@ static const struct iio_info mma7455_info = {
.attrs = &mma7455_group,
.read_raw = mma7455_read_raw,
.write_raw = mma7455_write_raw,
- .driver_module = THIS_MODULE,
};
#define MMA7455_CHANNEL(axis, idx) { \
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 42fa57e41bdd..f1a13724efb3 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -168,7 +168,6 @@ static int mma7660_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mma7660_info = {
- .driver_module = THIS_MODULE,
.read_raw = mma7660_read_raw,
.attrs = &mma7660_attribute_group,
};
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index eb6e3dc789b2..bfd4bc806fc2 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -59,7 +59,9 @@
#define MMA8452_FF_MT_THS 0x17
#define MMA8452_FF_MT_THS_MASK 0x7f
#define MMA8452_FF_MT_COUNT 0x18
+#define MMA8452_FF_MT_CHAN_SHIFT 3
#define MMA8452_TRANSIENT_CFG 0x1d
+#define MMA8452_TRANSIENT_CFG_CHAN(chan) BIT(chan + 1)
#define MMA8452_TRANSIENT_CFG_HPF_BYP BIT(0)
#define MMA8452_TRANSIENT_CFG_ELE BIT(4)
#define MMA8452_TRANSIENT_SRC 0x1e
@@ -69,6 +71,7 @@
#define MMA8452_TRANSIENT_THS 0x1f
#define MMA8452_TRANSIENT_THS_MASK GENMASK(6, 0)
#define MMA8452_TRANSIENT_COUNT 0x20
+#define MMA8452_TRANSIENT_CHAN_SHIFT 1
#define MMA8452_CTRL_REG1 0x2a
#define MMA8452_CTRL_ACTIVE BIT(0)
#define MMA8452_CTRL_DR_MASK GENMASK(5, 3)
@@ -107,6 +110,51 @@ struct mma8452_data {
const struct mma_chip_info *chip_info;
};
+ /**
+ * struct mma8452_event_regs - chip specific data related to events
+ * @ev_cfg: event config register address
+ * @ev_cfg_ele: latch bit in event config register
+ * @ev_cfg_chan_shift: number of the bit to enable events in X
+ * direction; in event config register
+ * @ev_src: event source register address
+ * @ev_ths: event threshold register address
+ * @ev_ths_mask: mask for the threshold value
+ * @ev_count: event count (period) register address
+ *
+ * Since not all chips supported by the driver support comparing high pass
+ * filtered data for events (interrupts), different interrupt sources are
+ * used for different chips and the relevant registers are included here.
+ */
+struct mma8452_event_regs {
+ u8 ev_cfg;
+ u8 ev_cfg_ele;
+ u8 ev_cfg_chan_shift;
+ u8 ev_src;
+ u8 ev_ths;
+ u8 ev_ths_mask;
+ u8 ev_count;
+};
+
+static const struct mma8452_event_regs ev_regs_accel_falling = {
+ .ev_cfg = MMA8452_FF_MT_CFG,
+ .ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
+ .ev_cfg_chan_shift = MMA8452_FF_MT_CHAN_SHIFT,
+ .ev_src = MMA8452_FF_MT_SRC,
+ .ev_ths = MMA8452_FF_MT_THS,
+ .ev_ths_mask = MMA8452_FF_MT_THS_MASK,
+ .ev_count = MMA8452_FF_MT_COUNT
+};
+
+static const struct mma8452_event_regs ev_regs_accel_rising = {
+ .ev_cfg = MMA8452_TRANSIENT_CFG,
+ .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
+ .ev_cfg_chan_shift = MMA8452_TRANSIENT_CHAN_SHIFT,
+ .ev_src = MMA8452_TRANSIENT_SRC,
+ .ev_ths = MMA8452_TRANSIENT_THS,
+ .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
+ .ev_count = MMA8452_TRANSIENT_COUNT,
+};
+
/**
* struct mma_chip_info - chip specific data
* @chip_id: WHO_AM_I register's value
@@ -116,40 +164,16 @@ struct mma8452_data {
* @mma_scales: scale factors for converting register values
* to m/s^2; 3 modes: 2g, 4g, 8g; 2 integers
* per mode: m/s^2 and micro m/s^2
- * @ev_cfg: event config register address
- * @ev_cfg_ele: latch bit in event config register
- * @ev_cfg_chan_shift: number of the bit to enable events in X
- * direction; in event config register
- * @ev_src: event source register address
- * @ev_src_xe: bit in event source register that indicates
- * an event in X direction
- * @ev_src_ye: bit in event source register that indicates
- * an event in Y direction
- * @ev_src_ze: bit in event source register that indicates
- * an event in Z direction
- * @ev_ths: event threshold register address
- * @ev_ths_mask: mask for the threshold value
- * @ev_count: event count (period) register address
- *
- * Since not all chips supported by the driver support comparing high pass
- * filtered data for events (interrupts), different interrupt sources are
- * used for different chips and the relevant registers are included here.
+ * @all_events: all events supported by this chip
+ * @enabled_events: event flags enabled and handled by this driver
*/
struct mma_chip_info {
u8 chip_id;
const struct iio_chan_spec *channels;
int num_channels;
const int mma_scales[3][2];
- u8 ev_cfg;
- u8 ev_cfg_ele;
- u8 ev_cfg_chan_shift;
- u8 ev_src;
- u8 ev_src_xe;
- u8 ev_src_ye;
- u8 ev_src_ze;
- u8 ev_ths;
- u8 ev_ths_mask;
- u8 ev_count;
+ int all_events;
+ int enabled_events;
};
enum {
@@ -394,11 +418,11 @@ static ssize_t mma8452_show_os_ratio_avail(struct device *dev,
}
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
-static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
+static IIO_DEVICE_ATTR(in_accel_scale_available, 0444,
mma8452_show_scale_avail, NULL, 0);
static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available,
- S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
-static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, S_IRUGO,
+ 0444, mma8452_show_hp_cutoff_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, 0444,
mma8452_show_os_ratio_avail, NULL, 0);
static int mma8452_get_samp_freq_index(struct mma8452_data *data,
@@ -602,9 +626,8 @@ static int mma8452_set_power_mode(struct mma8452_data *data, u8 mode)
static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
{
int val;
- const struct mma_chip_info *chip = data->chip_info;
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ val = i2c_smbus_read_byte_data(data->client, MMA8452_FF_MT_CFG);
if (val < 0)
return val;
@@ -614,29 +637,28 @@ static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
{
int val;
- const struct mma_chip_info *chip = data->chip_info;
if ((state && mma8452_freefall_mode_enabled(data)) ||
(!state && !(mma8452_freefall_mode_enabled(data))))
return 0;
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ val = i2c_smbus_read_byte_data(data->client, MMA8452_FF_MT_CFG);
if (val < 0)
return val;
if (state) {
- val |= BIT(idx_x + chip->ev_cfg_chan_shift);
- val |= BIT(idx_y + chip->ev_cfg_chan_shift);
- val |= BIT(idx_z + chip->ev_cfg_chan_shift);
+ val |= BIT(idx_x + MMA8452_FF_MT_CHAN_SHIFT);
+ val |= BIT(idx_y + MMA8452_FF_MT_CHAN_SHIFT);
+ val |= BIT(idx_z + MMA8452_FF_MT_CHAN_SHIFT);
val &= ~MMA8452_FF_MT_CFG_OAE;
} else {
- val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_x + MMA8452_FF_MT_CHAN_SHIFT);
+ val &= ~BIT(idx_y + MMA8452_FF_MT_CHAN_SHIFT);
+ val &= ~BIT(idx_z + MMA8452_FF_MT_CHAN_SHIFT);
val |= MMA8452_FF_MT_CFG_OAE;
}
- return mma8452_change_config(data, chip->ev_cfg, val);
+ return mma8452_change_config(data, MMA8452_FF_MT_CFG, val);
}
static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
@@ -740,7 +762,37 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int mma8452_read_thresh(struct iio_dev *indio_dev,
+static int mma8452_get_event_regs(struct mma8452_data *data,
+ const struct iio_chan_spec *chan, enum iio_event_direction dir,
+ const struct mma8452_event_regs **ev_reg)
+{
+ if (!chan)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ if ((data->chip_info->all_events
+ & MMA8452_INT_TRANS) &&
+ (data->chip_info->enabled_events
+ & MMA8452_INT_TRANS))
+ *ev_reg = &ev_regs_accel_rising;
+ else
+ *ev_reg = &ev_regs_accel_falling;
+ return 0;
+ case IIO_EV_DIR_FALLING:
+ *ev_reg = &ev_regs_accel_falling;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma8452_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
@@ -749,21 +801,24 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
{
struct mma8452_data *data = iio_priv(indio_dev);
int ret, us, power_mode;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
switch (info) {
case IIO_EV_INFO_VALUE:
- ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_ths);
+ ret = i2c_smbus_read_byte_data(data->client, ev_regs->ev_ths);
if (ret < 0)
return ret;
- *val = ret & data->chip_info->ev_ths_mask;
+ *val = ret & ev_regs->ev_ths_mask;
return IIO_VAL_INT;
case IIO_EV_INFO_PERIOD:
- ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_count);
+ ret = i2c_smbus_read_byte_data(data->client, ev_regs->ev_count);
if (ret < 0)
return ret;
@@ -800,7 +855,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
}
}
-static int mma8452_write_thresh(struct iio_dev *indio_dev,
+static int mma8452_write_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
@@ -809,14 +864,18 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
{
struct mma8452_data *data = iio_priv(indio_dev);
int ret, reg, steps;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
switch (info) {
case IIO_EV_INFO_VALUE:
- if (val < 0 || val > MMA8452_TRANSIENT_THS_MASK)
+ if (val < 0 || val > ev_regs->ev_ths_mask)
return -EINVAL;
- return mma8452_change_config(data, data->chip_info->ev_ths,
- val);
+ return mma8452_change_config(data, ev_regs->ev_ths, val);
case IIO_EV_INFO_PERIOD:
ret = mma8452_get_power_mode(data);
@@ -830,8 +889,7 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
if (steps < 0 || steps > 0xff)
return -EINVAL;
- return mma8452_change_config(data, data->chip_info->ev_count,
- steps);
+ return mma8452_change_config(data, ev_regs->ev_count, steps);
case IIO_EV_INFO_HIGH_PASS_FILTER_3DB:
reg = i2c_smbus_read_byte_data(data->client,
@@ -861,23 +919,24 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev,
enum iio_event_direction dir)
{
struct mma8452_data *data = iio_priv(indio_dev);
- const struct mma_chip_info *chip = data->chip_info;
int ret;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
switch (dir) {
case IIO_EV_DIR_FALLING:
return mma8452_freefall_mode_enabled(data);
case IIO_EV_DIR_RISING:
- if (mma8452_freefall_mode_enabled(data))
- return 0;
-
ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_cfg);
+ ev_regs->ev_cfg);
if (ret < 0)
return ret;
return !!(ret & BIT(chan->scan_index +
- chip->ev_cfg_chan_shift));
+ ev_regs->ev_cfg_chan_shift));
default:
return -EINVAL;
}
@@ -890,8 +949,12 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
int state)
{
struct mma8452_data *data = iio_priv(indio_dev);
- const struct mma_chip_info *chip = data->chip_info;
int val, ret;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
ret = mma8452_set_runtime_pm_state(data->client, state);
if (ret)
@@ -901,28 +964,30 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
case IIO_EV_DIR_FALLING:
return mma8452_set_freefall_mode(data, state);
case IIO_EV_DIR_RISING:
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ val = i2c_smbus_read_byte_data(data->client, ev_regs->ev_cfg);
if (val < 0)
return val;
if (state) {
if (mma8452_freefall_mode_enabled(data)) {
- val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_x + ev_regs->ev_cfg_chan_shift);
+ val &= ~BIT(idx_y + ev_regs->ev_cfg_chan_shift);
+ val &= ~BIT(idx_z + ev_regs->ev_cfg_chan_shift);
val |= MMA8452_FF_MT_CFG_OAE;
}
- val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ val |= BIT(chan->scan_index +
+ ev_regs->ev_cfg_chan_shift);
} else {
if (mma8452_freefall_mode_enabled(data))
return 0;
- val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ val &= ~BIT(chan->scan_index +
+ ev_regs->ev_cfg_chan_shift);
}
- val |= chip->ev_cfg_ele;
+ val |= ev_regs->ev_cfg_ele;
- return mma8452_change_config(data, chip->ev_cfg, val);
+ return mma8452_change_config(data, ev_regs->ev_cfg, val);
default:
return -EINVAL;
}
@@ -934,35 +999,25 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
s64 ts = iio_get_time_ns(indio_dev);
int src;
- src = i2c_smbus_read_byte_data(data->client, data->chip_info->ev_src);
+ src = i2c_smbus_read_byte_data(data->client, MMA8452_TRANSIENT_SRC);
if (src < 0)
return;
- if (mma8452_freefall_mode_enabled(data)) {
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_FALLING),
- ts);
- return;
- }
-
- if (src & data->chip_info->ev_src_xe)
+ if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
- if (src & data->chip_info->ev_src_ye)
+ if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
- if (src & data->chip_info->ev_src_ze)
+ if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
IIO_EV_TYPE_MAG,
@@ -974,7 +1029,6 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
{
struct iio_dev *indio_dev = p;
struct mma8452_data *data = iio_priv(indio_dev);
- const struct mma_chip_info *chip = data->chip_info;
int ret = IRQ_NONE;
int src;
@@ -982,15 +1036,29 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
if (src < 0)
return IRQ_NONE;
+ if (!(src & data->chip_info->enabled_events))
+ return IRQ_NONE;
+
if (src & MMA8452_INT_DRDY) {
iio_trigger_poll_chained(indio_dev->trig);
ret = IRQ_HANDLED;
}
- if ((src & MMA8452_INT_TRANS &&
- chip->ev_src == MMA8452_TRANSIENT_SRC) ||
- (src & MMA8452_INT_FF_MT &&
- chip->ev_src == MMA8452_FF_MT_SRC)) {
+ if (src & MMA8452_INT_FF_MT) {
+ if (mma8452_freefall_mode_enabled(data)) {
+ s64 ts = iio_get_time_ns(indio_dev);
+
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ }
+ ret = IRQ_HANDLED;
+ }
+
+ if (src & MMA8452_INT_TRANS) {
mma8452_transient_interrupt(indio_dev);
ret = IRQ_HANDLED;
}
@@ -1020,8 +1088,8 @@ done:
}
static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval)
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
{
int ret;
struct mma8452_data *data = iio_priv(indio_dev);
@@ -1222,96 +1290,87 @@ static const struct mma_chip_info mma_chip_info_table[] = {
* g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
*/
.mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
[mma8452] = {
.chip_id = MMA8452_DEVICE_ID,
.channels = mma8452_channels,
.num_channels = ARRAY_SIZE(mma8452_channels),
.mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
[mma8453] = {
.chip_id = MMA8453_DEVICE_ID,
.channels = mma8453_channels,
.num_channels = ARRAY_SIZE(mma8453_channels),
.mma_scales = { {0, 38307}, {0, 76614}, {0, 153228} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
[mma8652] = {
.chip_id = MMA8652_DEVICE_ID,
.channels = mma8652_channels,
.num_channels = ARRAY_SIZE(mma8652_channels),
.mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} },
- .ev_cfg = MMA8452_FF_MT_CFG,
- .ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
- .ev_cfg_chan_shift = 3,
- .ev_src = MMA8452_FF_MT_SRC,
- .ev_src_xe = MMA8452_FF_MT_SRC_XHE,
- .ev_src_ye = MMA8452_FF_MT_SRC_YHE,
- .ev_src_ze = MMA8452_FF_MT_SRC_ZHE,
- .ev_ths = MMA8452_FF_MT_THS,
- .ev_ths_mask = MMA8452_FF_MT_THS_MASK,
- .ev_count = MMA8452_FF_MT_COUNT,
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_FF_MT,
},
[mma8653] = {
.chip_id = MMA8653_DEVICE_ID,
.channels = mma8653_channels,
.num_channels = ARRAY_SIZE(mma8653_channels),
.mma_scales = { {0, 38307}, {0, 76614}, {0, 153228} },
- .ev_cfg = MMA8452_FF_MT_CFG,
- .ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
- .ev_cfg_chan_shift = 3,
- .ev_src = MMA8452_FF_MT_SRC,
- .ev_src_xe = MMA8452_FF_MT_SRC_XHE,
- .ev_src_ye = MMA8452_FF_MT_SRC_YHE,
- .ev_src_ze = MMA8452_FF_MT_SRC_ZHE,
- .ev_ths = MMA8452_FF_MT_THS,
- .ev_ths_mask = MMA8452_FF_MT_THS_MASK,
- .ev_count = MMA8452_FF_MT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_FF_MT,
},
[fxls8471] = {
.chip_id = FXLS8471_DEVICE_ID,
.channels = mma8451_channels,
.num_channels = ARRAY_SIZE(mma8451_channels),
.mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
};
@@ -1332,12 +1391,11 @@ static const struct iio_info mma8452_info = {
.read_raw = &mma8452_read_raw,
.write_raw = &mma8452_write_raw,
.event_attrs = &mma8452_event_attribute_group,
- .read_event_value = &mma8452_read_thresh,
- .write_event_value = &mma8452_write_thresh,
+ .read_event_value = &mma8452_read_event_value,
+ .write_event_value = &mma8452_write_event_value,
.read_event_config = &mma8452_read_event_config,
.write_event_config = &mma8452_write_event_config,
.debugfs_reg_access = &mma8452_reg_access_dbg,
- .driver_module = THIS_MODULE,
};
static const unsigned long mma8452_scan_masks[] = {0x7, 0};
@@ -1368,7 +1426,6 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops mma8452_trigger_ops = {
.set_trigger_state = mma8452_data_rdy_trigger_set_state,
.validate_device = iio_trigger_validate_own_device,
- .owner = THIS_MODULE,
};
static int mma8452_trigger_setup(struct iio_dev *indio_dev)
@@ -1509,16 +1566,6 @@ static int mma8452_probe(struct i2c_client *client,
return ret;
if (client->irq) {
- /*
- * Although we enable the interrupt sources once and for
- * all here the event detection itself is not enabled until
- * userspace asks for it by mma8452_write_event_config()
- */
- int supported_interrupts = MMA8452_INT_DRDY |
- MMA8452_INT_TRANS |
- MMA8452_INT_FF_MT;
- int enabled_interrupts = MMA8452_INT_TRANS |
- MMA8452_INT_FF_MT;
int irq2;
irq2 = of_irq_get_byname(client->dev.of_node, "INT2");
@@ -1527,8 +1574,8 @@ static int mma8452_probe(struct i2c_client *client,
dev_dbg(&client->dev, "using interrupt line INT2\n");
} else {
ret = i2c_smbus_write_byte_data(client,
- MMA8452_CTRL_REG5,
- supported_interrupts);
+ MMA8452_CTRL_REG5,
+ data->chip_info->all_events);
if (ret < 0)
return ret;
@@ -1536,8 +1583,8 @@ static int mma8452_probe(struct i2c_client *client,
}
ret = i2c_smbus_write_byte_data(client,
- MMA8452_CTRL_REG4,
- enabled_interrupts);
+ MMA8452_CTRL_REG4,
+ data->chip_info->enabled_events);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 1f53f08476f5..da7c21504f38 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -332,7 +332,6 @@ static const struct iio_chan_spec mma9551_channels[] = {
};
static const struct iio_info mma9551_info = {
- .driver_module = THIS_MODULE,
.read_raw = mma9551_read_raw,
.read_event_config = mma9551_read_event_config,
.write_event_config = mma9551_write_event_config,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 36bf19733be0..b52a3f182190 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -987,7 +987,6 @@ static const struct iio_chan_spec mma9553_channels[] = {
};
static const struct iio_info mma9553_info = {
- .driver_module = THIS_MODULE,
.read_raw = mma9553_read_raw,
.write_raw = mma9553_write_raw,
.read_event_config = mma9553_read_event_config,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index c23f47af7256..58099e40d717 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -264,7 +264,6 @@ static int mxc4005_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mxc4005_info = {
- .driver_module = THIS_MODULE,
.read_raw = mxc4005_read_raw,
.write_raw = mxc4005_write_raw,
.attrs = &mxc4005_attrs_group,
@@ -376,7 +375,6 @@ static int mxc4005_trigger_try_reen(struct iio_trigger *trig)
static const struct iio_trigger_ops mxc4005_trigger_ops = {
.set_trigger_state = mxc4005_set_trigger_state,
.try_reenable = mxc4005_trigger_try_reen,
- .owner = THIS_MODULE,
};
static int mxc4005_chip_init(struct mxc4005_data *data)
diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c
index 0abad6948201..ddd50d1781c5 100644
--- a/drivers/iio/accel/mxc6255.c
+++ b/drivers/iio/accel/mxc6255.c
@@ -78,7 +78,6 @@ static int mxc6255_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mxc6255_info = {
- .driver_module = THIS_MODULE,
.read_raw = mxc6255_read_raw,
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 39ab210c44f6..f33dadf7b262 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1454,7 +1454,6 @@ static const struct iio_info sca3000_info = {
.write_event_value = &sca3000_write_event_value,
.read_event_config = &sca3000_read_event_config,
.write_event_config = &sca3000_write_event_config,
- .driver_module = THIS_MODULE,
};
static int sca3000_probe(struct spi_device *spi)
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 0fe521609a3a..2f931e4837e5 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -32,6 +32,8 @@ enum st_accel_type {
H3LIS331DL,
LIS331DL,
LIS3LV02DL,
+ LIS2DW12,
+ LIS3DHH,
ST_ACCEL_MAX,
};
@@ -52,6 +54,8 @@ enum st_accel_type {
#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
#define LNG2DM_ACCEL_DEV_NAME "lng2dm"
+#define LIS2DW12_ACCEL_DEV_NAME "lis2dw12"
+#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 752856b3a849..460aa58e0159 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -159,12 +159,16 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x10,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
.addr_ihl = 0x25,
.mask_ihl = 0x02,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x23,
@@ -229,14 +233,24 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x02,
- .mask_int2 = 0x10,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x02,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x23,
@@ -313,12 +327,16 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x08,
},
.drdy_irq = {
- .addr = 0x23,
- .mask_int1 = 0x80,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x23,
+ .mask = 0x80,
+ },
.addr_ihl = 0x23,
.mask_ihl = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
.ig1 = {
.en_addr = 0x23,
.en_mask = 0x08,
@@ -387,9 +405,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x01,
},
.drdy_irq = {
- .addr = 0x21,
- .mask_int1 = 0x04,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .int1 = {
+ .addr = 0x21,
+ .mask = 0x04,
+ },
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x21,
@@ -444,14 +467,24 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x04,
- .mask_int2 = 0x20,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x04,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x20,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x21,
@@ -513,9 +546,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x02,
- .mask_int2 = 0x10,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x02,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
},
@@ -567,9 +605,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.bdu = {
},
.drdy_irq = {
- .addr = 0x21,
- .mask_int1 = 0x04,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .int1 = {
+ .addr = 0x21,
+ .mask = 0x04,
+ },
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x21,
@@ -635,12 +678,16 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x10,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
.addr_ihl = 0x25,
.mask_ihl = 0x02,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x23,
@@ -649,6 +696,139 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = true,
.bootime = 2,
},
+ {
+ .wai = 0x44,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2DW12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01, },
+ { .hz = 12, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = 0x25,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(976),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(1952),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(3904),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(7808),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x21,
+ .mask = 0x08,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x23,
+ .mask = 0x01,
+ .addr_od = 0x22,
+ .mask_od = 0x20,
+ },
+ .int2 = {
+ .addr = 0x24,
+ .mask = 0x01,
+ .addr_od = 0x22,
+ .mask_od = 0x20,
+ },
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x08,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x01,
+ },
+ },
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(0),
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
+ {
+ .wai = 0x11,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS3DHH_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ /* just ODR = 1100Hz available */
+ .odr_avl = {
+ { .hz = 1100, .value = 0x00, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0x80,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .gain = IIO_G_TO_M_S_2(76),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = 0x01,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x21,
+ .mask = 0x80,
+ .addr_od = 0x23,
+ .mask_od = 0x04,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x80,
+ .addr_od = 0x23,
+ .mask_od = 0x08,
+ },
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -721,7 +901,6 @@ static const struct attribute_group st_accel_attribute_group = {
};
static const struct iio_info accel_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_accel_attribute_group,
.read_raw = &st_accel_read_raw,
.write_raw = &st_accel_write_raw,
@@ -730,7 +909,6 @@ static const struct iio_info accel_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_accel_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 18cafb9f2468..363429b5686c 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -94,6 +94,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lng2dm-accel",
.data = LNG2DM_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2dw12",
+ .data = LIS2DW12_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -129,6 +133,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ H3LIS331DL_ACCEL_DEV_NAME, H3LIS331DL },
{ LIS331DL_ACCEL_DEV_NAME, LIS331DL },
{ LIS3LV02DL_ACCEL_DEV_NAME, LIS3LV02DL },
+ { LIS2DW12_ACCEL_DEV_NAME, LIS2DW12 },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 915fa49085f7..dcc9bd243a52 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -82,6 +82,14 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis331dl-accel",
.data = LIS331DL_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2dw12",
+ .data = LIS2DW12_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis3dhh",
+ .data = LIS3DHH_ACCEL_DEV_NAME,
+ },
{}
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -133,6 +141,8 @@ static const struct spi_device_id st_accel_id_table[] = {
{ H3LIS331DL_ACCEL_DEV_NAME },
{ LIS331DL_ACCEL_DEV_NAME },
{ LIS3LV02DL_ACCEL_DEV_NAME },
+ { LIS2DW12_ACCEL_DEV_NAME },
+ { LIS3DHH_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index e31023dc5f1b..cacc0da2f874 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -237,7 +237,6 @@ static int stk8312_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops stk8312_trigger_ops = {
.set_trigger_state = stk8312_data_rdy_trigger_set_state,
- .owner = THIS_MODULE,
};
static int stk8312_set_sample_rate(struct stk8312_data *data, u8 rate)
@@ -421,7 +420,6 @@ static int stk8312_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stk8312_info = {
- .driver_module = THIS_MODULE,
.read_raw = stk8312_read_raw,
.write_raw = stk8312_write_raw,
.attrs = &stk8312_attribute_group,
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 300d955bad00..576b6b140f08 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -179,7 +179,6 @@ static int stk8ba50_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops stk8ba50_trigger_ops = {
.set_trigger_state = stk8ba50_data_rdy_trigger_set_state,
- .owner = THIS_MODULE,
};
static int stk8ba50_set_power(struct stk8ba50_data *data, bool mode)
@@ -307,7 +306,6 @@ static int stk8ba50_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stk8ba50_info = {
- .driver_module = THIS_MODULE,
.read_raw = stk8ba50_read_raw,
.write_raw = stk8ba50_write_raw,
.attrs = &stk8ba50_attribute_group,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 1d13bf03c758..ef86296b8b0d 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -477,12 +477,13 @@ config MAX9611
called max9611.
config MCP320X
- tristate "Microchip Technology MCP3x01/02/04/08"
+ tristate "Microchip Technology MCP3x01/02/04/08 and MCP3550/1/3"
depends on SPI
help
Say yes here to build support for Microchip Technology's
MCP3001, MCP3002, MCP3004, MCP3008, MCP3201, MCP3202, MCP3204,
- MCP3208 or MCP3301 analog to digital converter.
+ MCP3208, MCP3301, MCP3550, MCP3551 and MCP3553 analog to digital
+ converters.
This driver can also be built as a module. If so, the module will be
called mcp320x.
@@ -595,7 +596,7 @@ config QCOM_SPMI_VADC
config RCAR_GYRO_ADC
tristate "Renesas R-Car GyroADC driver"
- depends on ARCH_RCAR_GEN2 || (ARM && COMPILE_TEST)
+ depends on ARCH_RCAR_GEN2 || COMPILE_TEST
help
Say yes here to build support for the GyroADC found in Renesas
R-Car Gen2 SoCs. This block is a simple SPI offload engine for
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index b8d5cfd57ec4..605eb5e7e829 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -280,7 +280,6 @@ static AD7266_DECLARE_DIFF_CHANNELS_FIXED(u, 'u');
static const struct iio_info ad7266_info = {
.read_raw = &ad7266_read_raw,
.update_scan_mode = &ad7266_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const unsigned long ad7266_available_scan_masks[] = {
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index 1d90b02732bb..a862b5d8fb4b 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -461,7 +461,6 @@ static const struct iio_info ad7291_info = {
.write_event_config = &ad7291_write_event_config,
.read_event_value = &ad7291_read_event_value,
.write_event_value = &ad7291_write_event_value,
- .driver_module = THIS_MODULE,
};
static int ad7291_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index e399bf04c73a..2b20c6c8ec7f 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -280,7 +280,6 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad7298_info = {
.read_raw = &ad7298_read_raw,
.update_scan_mode = ad7298_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int ad7298_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index b7ecf9aab90f..b7706bf10ffe 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -195,7 +195,6 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
};
static const struct iio_info ad7476_info = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7476_read_raw,
};
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
index ce45037295d8..3ae14fc8c649 100644
--- a/drivers/iio/adc/ad7766.c
+++ b/drivers/iio/adc/ad7766.c
@@ -185,7 +185,6 @@ static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = {
};
static const struct iio_info ad7766_info = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7766_read_raw,
};
@@ -208,7 +207,6 @@ static int ad7766_set_trigger_state(struct iio_trigger *trig, bool enable)
}
static const struct iio_trigger_ops ad7766_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ad7766_set_trigger_state,
.validate_device = iio_trigger_validate_own_device,
};
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 34e353c43ac8..70fbf92f9827 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -308,13 +308,11 @@ static const struct iio_info ad7791_info = {
.read_raw = &ad7791_read_raw,
.attrs = &ad7791_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad7791_no_filter_info = {
.read_raw = &ad7791_read_raw,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static int ad7791_setup(struct ad7791_state *st,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 47c3d7f32900..801afb61310b 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -563,7 +563,6 @@ static const struct iio_info ad7793_info = {
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
.attrs = &ad7793_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad7797_info = {
@@ -572,7 +571,6 @@ static const struct iio_info ad7797_info = {
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
.attrs = &ad7793_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
#define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 7a483bfbd70c..205c0f1761aa 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -229,7 +229,6 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
static const struct iio_info ad7887_info = {
.read_raw = &ad7887_read_raw,
- .driver_module = THIS_MODULE,
};
static int ad7887_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 77a675e11ebb..ffb7e089969c 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -262,7 +262,6 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad7923_info = {
.read_raw = &ad7923_read_raw,
.update_scan_mode = ad7923_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int ad7923_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 22426ae4af97..e1da67d5ee22 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -526,13 +526,11 @@ static const struct attribute_group ad799x_event_attrs_group = {
static const struct iio_info ad7991_info = {
.read_raw = &ad799x_read_raw,
- .driver_module = THIS_MODULE,
.update_scan_mode = ad799x_update_scan_mode,
};
static const struct iio_info ad7993_4_7_8_noirq_info = {
.read_raw = &ad799x_read_raw,
- .driver_module = THIS_MODULE,
.update_scan_mode = ad799x_update_scan_mode,
};
@@ -543,7 +541,6 @@ static const struct iio_info ad7993_4_7_8_irq_info = {
.write_event_config = &ad799x_write_event_config,
.read_event_value = &ad799x_read_event_value,
.write_event_value = &ad799x_write_event_value,
- .driver_module = THIS_MODULE,
.update_scan_mode = ad799x_update_scan_mode,
};
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 22c4c17cd996..cf1b048b0665 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -463,7 +463,6 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig)
EXPORT_SYMBOL_GPL(ad_sd_validate_trigger);
static const struct iio_trigger_ops ad_sd_trigger_ops = {
- .owner = THIS_MODULE,
};
static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index c02b23d675cb..8a958d5f1905 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -165,7 +165,6 @@ static int aspeed_adc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info aspeed_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = aspeed_adc_read_raw,
.write_raw = aspeed_adc_write_raw,
.debugfs_reg_access = aspeed_adc_reg_access,
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index a70ef7fec95f..755a493c2a2c 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -348,7 +348,6 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
}
static const struct iio_trigger_ops at91_adc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &at91_adc_configure_trigger,
.try_reenable = &at91_adc_reenable_trigger,
};
@@ -584,7 +583,6 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
static const struct iio_info at91_adc_info = {
.read_raw = &at91_adc_read_raw,
.write_raw = &at91_adc_write_raw,
- .driver_module = THIS_MODULE,
};
static void at91_adc_hw_init(struct at91_adc_state *st)
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 15109728cae7..3836d4222a3e 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -594,7 +594,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops at91_adc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &at91_adc_configure_trigger,
};
@@ -976,7 +975,6 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
}
static const struct iio_info at91_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = &at91_adc_read_raw,
};
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 11e177180ea0..a30a97245e91 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -464,12 +464,10 @@ static int axp20x_write_raw(struct iio_dev *indio_dev,
static const struct iio_info axp20x_adc_iio_info = {
.read_raw = axp20x_read_raw,
.write_raw = axp20x_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_info axp22x_adc_iio_info = {
.read_raw = axp22x_read_raw,
- .driver_module = THIS_MODULE,
};
static int axp20x_adc_rate(int rate)
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 462a99c13e7a..60c9e853dd81 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -183,7 +183,6 @@ static int axp288_adc_set_state(struct regmap *regmap)
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
- .driver_module = THIS_MODULE,
};
static int axp288_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 7f4f9c4150e3..7af59a4bbd8d 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -492,7 +492,6 @@ static int iproc_adc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info iproc_adc_iio_info = {
.read_raw = &iproc_adc_read_raw,
- .driver_module = THIS_MODULE,
};
#define IPROC_ADC_CHANNEL(_index, _id) { \
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 71c806ecc722..72d8fa94ab31 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -277,7 +277,6 @@ static irqreturn_t berlin2_adc_tsen_irq(int irq, void *private)
}
static const struct iio_info berlin2_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = berlin2_adc_read_raw,
};
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 91636c0ba5b5..707d8b24b072 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -262,7 +262,6 @@ static int cc10001_update_scan_mode(struct iio_dev *indio_dev,
}
static const struct iio_info cc10001_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cc10001_adc_read_raw,
.update_scan_mode = &cc10001_update_scan_mode,
};
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 6e419d5a7c14..3576ec73ec23 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -932,7 +932,6 @@ err_unlock:
static const struct iio_info cpcap_adc_info = {
.read_raw = &cpcap_adc_read,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/iio/adc/da9150-gpadc.c b/drivers/iio/adc/da9150-gpadc.c
index 3445107e10b7..0a5d9ce79164 100644
--- a/drivers/iio/adc/da9150-gpadc.c
+++ b/drivers/iio/adc/da9150-gpadc.c
@@ -249,7 +249,6 @@ static int da9150_gpadc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info da9150_gpadc_info = {
.read_raw = &da9150_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
#define DA9150_GPADC_CHANNEL(_id, _hw_id, _type, chan_info, \
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index ab8d6aed5085..c64c6675cae6 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -479,7 +479,6 @@ static const struct iio_info dln2_adc_info = {
.read_raw = dln2_adc_read_raw,
.write_raw = dln2_adc_write_raw,
.update_scan_mode = dln2_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
@@ -604,10 +603,6 @@ static void dln2_adc_event(struct platform_device *pdev, u16 echo,
iio_trigger_poll(dln2->trig);
}
-static const struct iio_trigger_ops dln2_adc_trigger_ops = {
- .owner = THIS_MODULE,
-};
-
static int dln2_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -665,7 +660,6 @@ static int dln2_adc_probe(struct platform_device *pdev)
dev_err(dev, "failed to allocate trigger\n");
return -ENOMEM;
}
- dln2->trig->ops = &dln2_adc_trigger_ops;
iio_trigger_set_drvdata(dln2->trig, dln2);
devm_iio_trigger_register(dev, dln2->trig);
iio_trigger_set_immutable(indio_dev, dln2->trig);
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
index fef15c0d7c9c..4ebda8ab54fe 100644
--- a/drivers/iio/adc/envelope-detector.c
+++ b/drivers/iio/adc/envelope-detector.c
@@ -322,7 +322,6 @@ static const struct iio_chan_spec envelope_detector_iio_channel = {
static const struct iio_info envelope_detector_info = {
.read_raw = &envelope_detector_read_raw,
- .driver_module = THIS_MODULE,
};
static int envelope_detector_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index a179ac476c6d..81c901507ad2 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -150,7 +150,6 @@ static int ep93xx_read_raw(struct iio_dev *iiodev,
}
static const struct iio_info ep93xx_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = ep93xx_read_raw,
};
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 6c5a7be9f8c1..f10443f92e4c 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -657,7 +657,6 @@ static int exynos_adc_reg_access(struct iio_dev *indio_dev,
static const struct iio_info exynos_adc_iio_info = {
.read_raw = &exynos_read_raw,
.debugfs_reg_access = &exynos_adc_reg_access,
- .driver_module = THIS_MODULE,
};
#define ADC_CHANNEL(_index, _id) { \
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index adf7dc712937..6f6c9a348158 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -408,7 +408,6 @@ static const struct iio_chan_spec hi8435_channels[] = {
};
static const struct iio_info hi8435_info = {
- .driver_module = THIS_MODULE,
.read_raw = hi8435_read_raw,
.read_event_config = hi8435_read_event_config,
.write_event_config = hi8435_write_event_config,
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 27005d84ed73..d10b9f13d557 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -374,7 +374,6 @@ static const struct attribute_group hx711_attribute_group = {
};
static const struct iio_info hx711_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = hx711_read_raw,
.write_raw = hx711_write_raw,
.write_raw_get_fmt = hx711_write_raw_get_fmt,
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 254b29a68b9d..cfab31162845 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -412,7 +412,6 @@ static int imx7d_adc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info imx7d_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = &imx7d_adc_read_raw,
.debugfs_reg_access = &imx7d_adc_reg_access,
};
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index f387b972e4f4..84a43871f7dc 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -123,7 +123,7 @@ struct ina2xx_chip_info {
struct task_struct *task;
const struct ina2xx_config *config;
struct mutex state_lock;
- unsigned int shunt_resistor;
+ unsigned int shunt_resistor_uohm;
int avg;
int int_time_vbus; /* Bus voltage integration time uS */
int int_time_vshunt; /* Shunt voltage integration time uS */
@@ -436,7 +436,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
/*
* Set current LSB to 1mA, shunt is in uOhms
* (equation 13 in datasheet). We hardcode a Current_LSB
- * of 1.0 x10-6. The only remaining parameter is RShunt.
+ * of 1.0 x10-3. The only remaining parameter is RShunt.
* There is no need to expose the CALIBRATION register
* to the user for now. But we need to reset this register
* if the user updates RShunt after driver init, e.g upon
@@ -445,7 +445,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
{
u16 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
- chip->shunt_resistor);
+ chip->shunt_resistor_uohm);
return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
}
@@ -455,7 +455,7 @@ static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
if (val <= 0 || val > chip->config->calibration_factor)
return -EINVAL;
- chip->shunt_resistor = val;
+ chip->shunt_resistor_uohm = val;
return 0;
}
@@ -465,8 +465,9 @@ static ssize_t ina2xx_shunt_resistor_show(struct device *dev,
char *buf)
{
struct ina2xx_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
+ int vals[2] = { chip->shunt_resistor_uohm, 1000000 };
- return sprintf(buf, "%d\n", chip->shunt_resistor);
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 1, vals);
}
static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
@@ -474,14 +475,13 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
const char *buf, size_t len)
{
struct ina2xx_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
- unsigned long val;
- int ret;
+ int val, val_fract, ret;
- ret = kstrtoul((const char *) buf, 10, &val);
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
if (ret)
return ret;
- ret = set_shunt_resistor(chip, val);
+ ret = set_shunt_resistor(chip, val * 1000000 + val_fract);
if (ret)
return ret;
@@ -778,7 +778,6 @@ static const struct attribute_group ina226_attribute_group = {
};
static const struct iio_info ina219_info = {
- .driver_module = THIS_MODULE,
.attrs = &ina219_attribute_group,
.read_raw = ina2xx_read_raw,
.write_raw = ina2xx_write_raw,
@@ -786,7 +785,6 @@ static const struct iio_info ina219_info = {
};
static const struct iio_info ina226_info = {
- .driver_module = THIS_MODULE,
.attrs = &ina226_attribute_group,
.read_raw = ina2xx_read_raw,
.write_raw = ina2xx_write_raw,
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
index 152cfc8e1c7b..3bc4df916420 100644
--- a/drivers/iio/adc/lp8788_adc.c
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -125,7 +125,6 @@ static int lp8788_adc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info lp8788_adc_info = {
.read_raw = &lp8788_adc_read_raw,
- .driver_module = THIS_MODULE,
};
#define LP8788_CHAN(_id, _type) { \
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index 3ef18f4b27f0..041dc4a3f66c 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -116,7 +116,6 @@ static int lpc18xx_adc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info lpc18xx_adc_info = {
.read_raw = lpc18xx_adc_read_raw,
- .driver_module = THIS_MODULE,
};
static int lpc18xx_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index 6a5b9a9bc662..20b36690fa4f 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -104,7 +104,6 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
static const struct iio_info lpc32xx_adc_iio_info = {
.read_raw = &lpc32xx_read_raw,
- .driver_module = THIS_MODULE,
};
#define LPC32XX_ADC_CHANNEL(_index) { \
diff --git a/drivers/iio/adc/ltc2471.c b/drivers/iio/adc/ltc2471.c
index 29b7ed60cdb0..b88102b751cf 100644
--- a/drivers/iio/adc/ltc2471.c
+++ b/drivers/iio/adc/ltc2471.c
@@ -98,7 +98,6 @@ static const struct iio_chan_spec ltc2473_channel[] = {
static const struct iio_info ltc2471_info = {
.read_raw = ltc2471_read_raw,
- .driver_module = THIS_MODULE,
};
static int ltc2471_i2c_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/ltc2485.c b/drivers/iio/adc/ltc2485.c
index eab91f12454a..b24c14037fd4 100644
--- a/drivers/iio/adc/ltc2485.c
+++ b/drivers/iio/adc/ltc2485.c
@@ -90,7 +90,6 @@ static const struct iio_chan_spec ltc2485_channel[] = {
static const struct iio_info ltc2485_info = {
.read_raw = ltc2485_read_raw,
- .driver_module = THIS_MODULE,
};
static int ltc2485_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 5bf8011dcde9..f1f7cdf66fbd 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -186,7 +186,6 @@ static const struct iio_chan_spec ltc2497_channel[] = {
static const struct iio_info ltc2497_info = {
.read_raw = ltc2497_read_raw,
- .driver_module = THIS_MODULE,
};
static int ltc2497_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index ebc715927e63..375da6491499 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -381,13 +381,11 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
}
static const struct iio_trigger_ops max1027_trigger_ops = {
- .owner = THIS_MODULE,
.validate_device = &iio_trigger_validate_own_device,
.set_trigger_state = &max1027_set_trigger_state,
};
static const struct iio_info max1027_info = {
- .driver_module = THIS_MODULE,
.read_raw = &max1027_read_raw,
.validate_trigger = &max1027_validate_trigger,
.debugfs_reg_access = &max1027_debugfs_reg_access,
diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c
index 1180bcc22ff1..af59ab2e650c 100644
--- a/drivers/iio/adc/max11100.c
+++ b/drivers/iio/adc/max11100.c
@@ -100,7 +100,6 @@ static int max11100_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info max11100_info = {
- .driver_module = THIS_MODULE,
.read_raw = max11100_read_raw,
};
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 2e9648a078c4..49db9e9ae625 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -155,7 +155,6 @@ static int max1118_read_raw(struct iio_dev *indio_dev,
static const struct iio_info max1118_info = {
.read_raw = max1118_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t max1118_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 80eada4886b3..7f1848dac9bf 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1029,7 +1029,6 @@ static int max1363_update_scan_mode(struct iio_dev *indio_dev,
static const struct iio_info max1238_info = {
.read_raw = &max1363_read_raw,
- .driver_module = THIS_MODULE,
.update_scan_mode = &max1363_update_scan_mode,
};
@@ -1040,7 +1039,6 @@ static const struct iio_info max1363_info = {
.write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
.update_scan_mode = &max1363_update_scan_mode,
- .driver_module = THIS_MODULE,
.event_attrs = &max1363_event_attribute_group,
};
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index b1dd17cbce58..0538ff8c4ac1 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -460,7 +460,6 @@ static const struct attribute_group max9611_attribute_group = {
};
static const struct iio_info indio_info = {
- .driver_module = THIS_MODULE,
.read_raw = max9611_read_raw,
.attrs = &max9611_attribute_group,
};
@@ -573,7 +572,6 @@ static int max9611_probe(struct i2c_client *client,
static struct i2c_driver max9611_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = max9611_of_table,
},
.probe = max9611_probe,
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 071dd23a33d9..a04856d8afdb 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -19,6 +19,11 @@
* ------------
* 13 bit converter
* MCP3301
+ * ------------
+ * 22 bit converter
+ * MCP3550
+ * MCP3551
+ * MCP3553
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
@@ -28,6 +33,7 @@
* http://ww1.microchip.com/downloads/en/DeviceDoc/21034D.pdf mcp3202
* http://ww1.microchip.com/downloads/en/DeviceDoc/21298c.pdf mcp3204/08
* http://ww1.microchip.com/downloads/en/DeviceDoc/21700E.pdf mcp3301
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21950D.pdf mcp3550/1/3
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -51,25 +57,45 @@ enum {
mcp3204,
mcp3208,
mcp3301,
+ mcp3550_50,
+ mcp3550_60,
+ mcp3551,
+ mcp3553,
};
struct mcp320x_chip_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
unsigned int resolution;
+ unsigned int conv_time; /* usec */
};
+/**
+ * struct mcp320x - Microchip SPI ADC instance
+ * @spi: SPI slave (parent of the IIO device)
+ * @msg: SPI message to select a channel and receive a value from the ADC
+ * @transfer: SPI transfers used by @msg
+ * @start_conv_msg: SPI message to start a conversion by briefly asserting CS
+ * @start_conv_transfer: SPI transfer used by @start_conv_msg
+ * @reg: regulator generating Vref
+ * @lock: protects read sequences
+ * @chip_info: ADC properties
+ * @tx_buf: buffer for @transfer[0] (not used on single-channel converters)
+ * @rx_buf: buffer for @transfer[1]
+ */
struct mcp320x {
struct spi_device *spi;
struct spi_message msg;
struct spi_transfer transfer[2];
+ struct spi_message start_conv_msg;
+ struct spi_transfer start_conv_transfer;
struct regulator *reg;
struct mutex lock;
const struct mcp320x_chip_info *chip_info;
u8 tx_buf ____cacheline_aligned;
- u8 rx_buf[2];
+ u8 rx_buf[4];
};
static int mcp320x_channel_to_tx_data(int device_index,
@@ -78,10 +104,6 @@ static int mcp320x_channel_to_tx_data(int device_index,
int start_bit = 1;
switch (device_index) {
- case mcp3001:
- case mcp3201:
- case mcp3301:
- return 0;
case mcp3002:
case mcp3202:
return ((start_bit << 4) | (!differential << 3) |
@@ -102,21 +124,24 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
{
int ret;
- adc->rx_buf[0] = 0;
- adc->rx_buf[1] = 0;
- adc->tx_buf = mcp320x_channel_to_tx_data(device_index,
- channel, differential);
-
- if (device_index != mcp3001 && device_index != mcp3201 && device_index != mcp3301) {
- ret = spi_sync(adc->spi, &adc->msg);
- if (ret < 0)
- return ret;
- } else {
- ret = spi_read(adc->spi, &adc->rx_buf, sizeof(adc->rx_buf));
+ if (adc->chip_info->conv_time) {
+ ret = spi_sync(adc->spi, &adc->start_conv_msg);
if (ret < 0)
return ret;
+
+ usleep_range(adc->chip_info->conv_time,
+ adc->chip_info->conv_time + 100);
}
+ memset(&adc->rx_buf, 0, sizeof(adc->rx_buf));
+ if (adc->chip_info->num_channels > 1)
+ adc->tx_buf = mcp320x_channel_to_tx_data(device_index, channel,
+ differential);
+
+ ret = spi_sync(adc->spi, &adc->msg);
+ if (ret < 0)
+ return ret;
+
switch (device_index) {
case mcp3001:
*val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
@@ -138,6 +163,31 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
*val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
| adc->rx_buf[1], 12);
return 0;
+ case mcp3550_50:
+ case mcp3550_60:
+ case mcp3551:
+ case mcp3553: {
+ u32 raw = be32_to_cpup((u32 *)adc->rx_buf);
+
+ if (!(adc->spi->mode & SPI_CPOL))
+ raw <<= 1; /* strip Data Ready bit in SPI mode 0,0 */
+
+ /*
+ * If the input is within -vref and vref, bit 21 is the sign.
+ * Up to 12% overrange or underrange are allowed, in which case
+ * bit 23 is the sign and bit 0 to 21 is the value.
+ */
+ raw >>= 8;
+ if (raw & BIT(22) && raw & BIT(23))
+ return -EIO; /* cannot have overrange AND underrange */
+ else if (raw & BIT(22))
+ raw &= ~BIT(22); /* overrange */
+ else if (raw & BIT(23) || raw & BIT(21))
+ raw |= GENMASK(31, 22); /* underrange or negative */
+
+ *val = (s32)raw;
+ return 0;
+ }
default:
return -EINVAL;
}
@@ -248,7 +298,6 @@ static const struct iio_chan_spec mcp3208_channels[] = {
static const struct iio_info mcp320x_info = {
.read_raw = mcp320x_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
@@ -297,6 +346,31 @@ static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
.num_channels = ARRAY_SIZE(mcp3201_channels),
.resolution = 13
},
+ [mcp3550_50] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ /* 2% max deviation + 144 clock periods to exit shutdown */
+ .conv_time = 80000 * 1.02 + 144000 / 102.4,
+ },
+ [mcp3550_60] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ .conv_time = 66670 * 1.02 + 144000 / 122.88,
+ },
+ [mcp3551] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ .conv_time = 73100 * 1.02 + 144000 / 112.64,
+ },
+ [mcp3553] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ .conv_time = 16670 * 1.02 + 144000 / 122.88,
+ },
};
static int mcp320x_probe(struct spi_device *spi)
@@ -304,7 +378,7 @@ static int mcp320x_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
struct mcp320x *adc;
const struct mcp320x_chip_info *chip_info;
- int ret;
+ int ret, device_index;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
if (!indio_dev)
@@ -320,7 +394,8 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->info = &mcp320x_info;
spi_set_drvdata(spi, indio_dev);
- chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
+ device_index = spi_get_device_id(spi)->driver_data;
+ chip_info = &mcp320x_chip_infos[device_index];
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
@@ -329,10 +404,41 @@ static int mcp320x_probe(struct spi_device *spi)
adc->transfer[0].tx_buf = &adc->tx_buf;
adc->transfer[0].len = sizeof(adc->tx_buf);
adc->transfer[1].rx_buf = adc->rx_buf;
- adc->transfer[1].len = sizeof(adc->rx_buf);
+ adc->transfer[1].len = DIV_ROUND_UP(chip_info->resolution, 8);
+
+ if (chip_info->num_channels == 1)
+ /* single-channel converters are rx only (no MOSI pin) */
+ spi_message_init_with_transfers(&adc->msg,
+ &adc->transfer[1], 1);
+ else
+ spi_message_init_with_transfers(&adc->msg, adc->transfer,
+ ARRAY_SIZE(adc->transfer));
- spi_message_init_with_transfers(&adc->msg, adc->transfer,
- ARRAY_SIZE(adc->transfer));
+ switch (device_index) {
+ case mcp3550_50:
+ case mcp3550_60:
+ case mcp3551:
+ case mcp3553:
+ /* rx len increases from 24 to 25 bit in SPI mode 0,0 */
+ if (!(spi->mode & SPI_CPOL))
+ adc->transfer[1].len++;
+
+ /* conversions are started by asserting CS pin for 8 usec */
+ adc->start_conv_transfer.delay_usecs = 8;
+ spi_message_init_with_transfers(&adc->start_conv_msg,
+ &adc->start_conv_transfer, 1);
+
+ /*
+ * If CS was previously kept low (continuous conversion mode)
+ * and then changed to high, the chip is in shutdown.
+ * Sometimes it fails to wake from shutdown and clocks out
+ * only 0xffffff. The magic sequence of performing two
+ * conversions without delay between them resets the chip
+ * and ensures all subsequent conversions succeed.
+ */
+ mcp320x_adc_conversion(adc, 0, 1, device_index, &ret);
+ mcp320x_adc_conversion(adc, 0, 1, device_index, &ret);
+ }
adc->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(adc->reg))
@@ -370,62 +476,29 @@ static int mcp320x_remove(struct spi_device *spi)
#if defined(CONFIG_OF)
static const struct of_device_id mcp320x_dt_ids[] = {
/* NOTE: The use of compatibles with no vendor prefix is deprecated. */
- {
- .compatible = "mcp3001",
- .data = &mcp320x_chip_infos[mcp3001],
- }, {
- .compatible = "mcp3002",
- .data = &mcp320x_chip_infos[mcp3002],
- }, {
- .compatible = "mcp3004",
- .data = &mcp320x_chip_infos[mcp3004],
- }, {
- .compatible = "mcp3008",
- .data = &mcp320x_chip_infos[mcp3008],
- }, {
- .compatible = "mcp3201",
- .data = &mcp320x_chip_infos[mcp3201],
- }, {
- .compatible = "mcp3202",
- .data = &mcp320x_chip_infos[mcp3202],
- }, {
- .compatible = "mcp3204",
- .data = &mcp320x_chip_infos[mcp3204],
- }, {
- .compatible = "mcp3208",
- .data = &mcp320x_chip_infos[mcp3208],
- }, {
- .compatible = "mcp3301",
- .data = &mcp320x_chip_infos[mcp3301],
- }, {
- .compatible = "microchip,mcp3001",
- .data = &mcp320x_chip_infos[mcp3001],
- }, {
- .compatible = "microchip,mcp3002",
- .data = &mcp320x_chip_infos[mcp3002],
- }, {
- .compatible = "microchip,mcp3004",
- .data = &mcp320x_chip_infos[mcp3004],
- }, {
- .compatible = "microchip,mcp3008",
- .data = &mcp320x_chip_infos[mcp3008],
- }, {
- .compatible = "microchip,mcp3201",
- .data = &mcp320x_chip_infos[mcp3201],
- }, {
- .compatible = "microchip,mcp3202",
- .data = &mcp320x_chip_infos[mcp3202],
- }, {
- .compatible = "microchip,mcp3204",
- .data = &mcp320x_chip_infos[mcp3204],
- }, {
- .compatible = "microchip,mcp3208",
- .data = &mcp320x_chip_infos[mcp3208],
- }, {
- .compatible = "microchip,mcp3301",
- .data = &mcp320x_chip_infos[mcp3301],
- }, {
- }
+ { .compatible = "mcp3001" },
+ { .compatible = "mcp3002" },
+ { .compatible = "mcp3004" },
+ { .compatible = "mcp3008" },
+ { .compatible = "mcp3201" },
+ { .compatible = "mcp3202" },
+ { .compatible = "mcp3204" },
+ { .compatible = "mcp3208" },
+ { .compatible = "mcp3301" },
+ { .compatible = "microchip,mcp3001" },
+ { .compatible = "microchip,mcp3002" },
+ { .compatible = "microchip,mcp3004" },
+ { .compatible = "microchip,mcp3008" },
+ { .compatible = "microchip,mcp3201" },
+ { .compatible = "microchip,mcp3202" },
+ { .compatible = "microchip,mcp3204" },
+ { .compatible = "microchip,mcp3208" },
+ { .compatible = "microchip,mcp3301" },
+ { .compatible = "microchip,mcp3550-50" },
+ { .compatible = "microchip,mcp3550-60" },
+ { .compatible = "microchip,mcp3551" },
+ { .compatible = "microchip,mcp3553" },
+ { }
};
MODULE_DEVICE_TABLE(of, mcp320x_dt_ids);
#endif
@@ -440,6 +513,10 @@ static const struct spi_device_id mcp320x_id[] = {
{ "mcp3204", mcp3204 },
{ "mcp3208", mcp3208 },
{ "mcp3301", mcp3301 },
+ { "mcp3550-50", mcp3550_50 },
+ { "mcp3550-60", mcp3550_60 },
+ { "mcp3551", mcp3551 },
+ { "mcp3553", mcp3553 },
{ }
};
MODULE_DEVICE_TABLE(spi, mcp320x_id);
@@ -456,5 +533,5 @@ static struct spi_driver mcp320x_driver = {
module_spi_driver(mcp320x_driver);
MODULE_AUTHOR("Oskar Andero <oskar.andero@gmail.com>");
-MODULE_DESCRIPTION("Microchip Technology MCP3x01/02/04/08");
+MODULE_DESCRIPTION("Microchip Technology MCP3x01/02/04/08 and MCP3550/1/3");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 63de705086ed..819f26011500 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -327,7 +327,6 @@ static const struct iio_info mcp3422_info = {
.write_raw = mcp3422_write_raw,
.write_raw_get_fmt = mcp3422_write_raw_get_fmt,
.attrs = &mcp3422_attribute_group,
- .driver_module = THIS_MODULE,
};
static int mcp3422_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 8f3606de4eaf..c80261748d8f 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -80,7 +80,6 @@ static int z188_iio_read_raw(struct iio_dev *iio_dev,
static const struct iio_info z188_adc_info = {
.read_raw = &z188_iio_read_raw,
- .driver_module = THIS_MODULE,
};
static void men_z188_config_channels(void __iomem *addr)
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 2e8dbb89c8c9..9c6932ffc0af 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -840,7 +840,6 @@ out:
static const struct iio_info meson_sar_adc_iio_info = {
.read_raw = meson_sar_adc_iio_info_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 414cf44bf19d..95d76abb64ec 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -180,7 +180,6 @@ static int mt6577_auxadc_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mt6577_auxadc_info = {
- .driver_module = THIS_MODULE,
.read_raw = &mt6577_auxadc_read_raw,
};
@@ -306,6 +305,7 @@ static SIMPLE_DEV_PM_OPS(mt6577_auxadc_pm_ops,
static const struct of_device_id mt6577_auxadc_of_match[] = {
{ .compatible = "mediatek,mt2701-auxadc", },
+ { .compatible = "mediatek,mt2712-auxadc", },
{ .compatible = "mediatek,mt7622-auxadc", },
{ .compatible = "mediatek,mt8173-auxadc", },
{ }
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index d32b34638c2f..c627513d9f0f 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -382,7 +382,6 @@ static const struct attribute_group mxs_lradc_adc_attribute_group = {
};
static const struct iio_info mxs_lradc_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = mxs_lradc_adc_read_raw,
.write_raw = mxs_lradc_adc_write_raw,
.write_raw_get_fmt = mxs_lradc_adc_write_raw_get_fmt,
@@ -455,7 +454,6 @@ static int mxs_lradc_adc_configure_trigger(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops mxs_lradc_adc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &mxs_lradc_adc_configure_trigger,
};
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index 08f446695f97..8997e74a8847 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -402,7 +402,6 @@ static int nau7802_write_raw_get_fmt(struct iio_dev *indio_dev,
}
static const struct iio_info nau7802_info = {
- .driver_module = THIS_MODULE,
.read_raw = &nau7802_read_raw,
.write_raw = &nau7802_write_raw,
.write_raw_get_fmt = nau7802_write_raw_get_fmt,
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 7d61b566e148..69b9affeef1e 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -430,7 +430,6 @@ out:
static const struct iio_info palmas_gpadc_iio_info = {
.read_raw = palmas_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
#define PALMAS_ADC_CHAN_IIO(chan, _type, chan_info) \
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index cea8f1fb444a..b093ecddf1a8 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -728,7 +728,6 @@ static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
}
static const struct iio_info pm8xxx_xoadc_info = {
- .driver_module = THIS_MODULE,
.of_xlate = pm8xxx_of_xlate,
.read_raw = pm8xxx_read_raw,
};
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index fabd24edc2a1..3f062cd61aba 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -356,7 +356,6 @@ static int iadc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info iadc_info = {
.read_raw = iadc_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t iadc_isr(int irq, void *dev_id)
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 9e600bfd1765..3680e0d47412 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -506,7 +506,6 @@ static int vadc_of_xlate(struct iio_dev *indio_dev,
static const struct iio_info vadc_info = {
.read_raw = vadc_read_raw,
.of_xlate = vadc_of_xlate,
- .driver_module = THIS_MODULE,
};
struct vadc_channels {
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index 27a318164619..dcb50172186f 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -277,7 +277,6 @@ static int rcar_gyroadc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info rcar_gyroadc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = rcar_gyroadc_read_raw,
.debugfs_reg_access = rcar_gyroadc_reg_access,
};
@@ -349,7 +348,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
continue;
}
- childmode = (unsigned int)of_id->data;
+ childmode = (uintptr_t)of_id->data;
switch (childmode) {
case RCAR_GYROADC_MODE_SELECT_1_MB88101A:
sample_width = 12;
@@ -488,8 +487,6 @@ err:
static int rcar_gyroadc_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(rcar_gyroadc_match, &pdev->dev);
struct device *dev = &pdev->dev;
struct rcar_gyroadc *priv;
struct iio_dev *indio_dev;
@@ -526,7 +523,8 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->model = (enum rcar_gyroadc_model)of_id->data;
+ priv->model = (enum rcar_gyroadc_model)
+ of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 5f612d694b33..1f98566d5b3c 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -125,7 +125,6 @@ static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
static const struct iio_info rockchip_saradc_iio_info = {
.read_raw = rockchip_saradc_read_raw,
- .driver_module = THIS_MODULE,
};
#define ADC_CHANNEL(_index, _id) { \
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index 5dd61f6a57b9..b1da2c46107c 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -254,7 +254,6 @@ static int spear_adc_configure(struct spear_adc_state *st)
static const struct iio_info spear_adc_info = {
.read_raw = &spear_adc_read_raw,
.write_raw = &spear_adc_write_raw,
- .driver_module = THIS_MODULE,
};
static int spear_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 804198eb0eef..6aefef99f935 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -139,6 +139,11 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
}
rate = clk_get_rate(priv->aclk);
+ if (!rate) {
+ dev_err(&pdev->dev, "Invalid clock rate: 0\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
break;
@@ -216,6 +221,10 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
* From spec: PLL output musn't exceed max rate
*/
rate = clk_get_rate(priv->aclk);
+ if (!rate) {
+ dev_err(&pdev->dev, "Invalid adc clock rate: 0\n");
+ return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(stm32h7_adc_ckmodes_spec); i++) {
ckmode = stm32h7_adc_ckmodes_spec[i].ckmode;
@@ -232,6 +241,10 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
/* Synchronous clock modes (e.g. ckmode is 1, 2 or 3) */
rate = clk_get_rate(priv->bclk);
+ if (!rate) {
+ dev_err(&pdev->dev, "Invalid bus clock rate: 0\n");
+ return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(stm32h7_adc_ckmodes_spec); i++) {
ckmode = stm32h7_adc_ckmodes_spec[i].ckmode;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 4df32cf1650e..c9d96f935dba 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -531,6 +531,7 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
{ TIM2_TRGO, STM32_EXT11 },
{ TIM4_TRGO, STM32_EXT12 },
{ TIM6_TRGO, STM32_EXT13 },
+ { TIM15_TRGO, STM32_EXT14 },
{ TIM3_CH4, STM32_EXT15 },
{ LPTIM1_OUT, STM32_EXT18 },
{ LPTIM2_OUT, STM32_EXT19 },
@@ -1385,7 +1386,6 @@ static const struct iio_info stm32_adc_iio_info = {
.update_scan_mode = stm32_adc_update_scan_mode,
.debugfs_reg_access = stm32_adc_debugfs_reg_access,
.of_xlate = stm32_adc_of_xlate,
- .driver_module = THIS_MODULE,
};
static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c
index 2da741d27540..17b021f33180 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/adc/stx104.c
@@ -172,7 +172,6 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stx104_info = {
- .driver_module = THIS_MODULE,
.read_raw = stx104_read_raw,
.write_raw = stx104_write_raw
};
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 137f577d9432..04d7147e0110 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -352,7 +352,6 @@ static int sun4i_gpadc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info sun4i_gpadc_iio_info = {
.read_raw = sun4i_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
@@ -502,17 +501,15 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
struct iio_dev *indio_dev)
{
struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
- const struct of_device_id *of_dev;
struct resource *mem;
void __iomem *base;
int ret;
- of_dev = of_match_device(sun4i_gpadc_of_id, &pdev->dev);
- if (!of_dev)
+ info->data = of_device_get_match_data(&pdev->dev);
+ if (!info->data)
return -ENODEV;
info->no_irq = true;
- info->data = (struct gpadc_data *)of_dev->data;
indio_dev->num_channels = ARRAY_SIZE(sun8i_a33_gpadc_channels);
indio_dev->channels = sun8i_a33_gpadc_channels;
@@ -529,17 +526,10 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
return ret;
}
- if (!IS_ENABLED(CONFIG_THERMAL_OF))
- return 0;
+ if (IS_ENABLED(CONFIG_THERMAL_OF))
+ info->sensor_device = &pdev->dev;
- info->sensor_device = &pdev->dev;
- info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0,
- info, &sun4i_ts_tz_ops);
- if (IS_ERR(info->tzd))
- dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
- PTR_ERR(info->tzd));
-
- return PTR_ERR_OR_ZERO(info->tzd);
+ return 0;
}
static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
@@ -586,15 +576,6 @@ static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
* return the temperature.
*/
info->sensor_device = pdev->dev.parent;
- info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
- 0, info,
- &sun4i_ts_tz_ops);
- if (IS_ERR(info->tzd)) {
- dev_err(&pdev->dev,
- "could not register thermal sensor: %ld\n",
- PTR_ERR(info->tzd));
- return PTR_ERR(info->tzd);
- }
} else {
indio_dev->num_channels =
ARRAY_SIZE(sun4i_gpadc_channels_no_temp);
@@ -664,6 +645,22 @@ static int sun4i_gpadc_probe(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+ if (IS_ENABLED(CONFIG_THERMAL_OF)) {
+ info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
+ 0, info,
+ &sun4i_ts_tz_ops);
+ /*
+ * Do not fail driver probing when failing to register in
+ * thermal because no thermal DT node is found.
+ */
+ if (IS_ERR(info->tzd) && PTR_ERR(info->tzd) != -ENODEV) {
+ dev_err(&pdev->dev,
+ "could not register thermal sensor: %ld\n",
+ PTR_ERR(info->tzd));
+ return PTR_ERR(info->tzd);
+ }
+ }
+
ret = devm_iio_device_register(&pdev->dev, indio_dev);
if (ret < 0) {
dev_err(&pdev->dev, "could not register the device\n");
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 319172cf7da8..405e3779c0c5 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -124,7 +124,6 @@ static struct adcxx1c_model adcxx1c_models[] = {
static const struct iio_info adc081c_info = {
.read_raw = adc081c_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t adc081c_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index e952e94a14af..188dae705bf7 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -195,7 +195,6 @@ static int adc0832_read_raw(struct iio_dev *iio,
static const struct iio_info adc0832_info = {
.read_raw = adc0832_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t adc0832_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index a355121c11a4..25504640e126 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -186,7 +186,6 @@ static int adc084s021_buffer_postdisable(struct iio_dev *indio_dev)
static const struct iio_info adc084s021_info = {
.read_raw = adc084s021_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_buffer_setup_ops adc084s021_buffer_setup_ops = {
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index de4e5ac98c6e..841203edaac5 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -220,7 +220,6 @@ static int adc108s102_read_raw(struct iio_dev *indio_dev,
static const struct iio_info adc108s102_info = {
.read_raw = &adc108s102_read_raw,
.update_scan_mode = &adc108s102_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int adc108s102_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 072f03bfe6a0..703d68ae96b7 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -164,7 +164,7 @@ static int __adc12138_start_conv(struct adc12138 *adc,
void *data, int len)
{
- const u8 ch_to_mux[] = { 0, 4, 1, 5, 2, 6, 3, 7 };
+ static const u8 ch_to_mux[] = { 0, 4, 1, 5, 2, 6, 3, 7 };
u8 mode = (ch_to_mux[channel->channel] << 4) |
(channel->differential ? 0 : 0x80);
@@ -277,7 +277,6 @@ static int adc12138_read_raw(struct iio_dev *iio,
static const struct iio_info adc12138_info = {
.read_raw = adc12138_read_raw,
- .driver_module = THIS_MODULE,
};
static int adc12138_init(struct adc12138 *adc)
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 89dfbd31be5c..7cf39b3e2416 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -130,7 +130,6 @@ static const struct adc128_configuration adc128_config[] = {
static const struct iio_info adc128_info = {
.read_raw = adc128_read_raw,
- .driver_module = THIS_MODULE,
};
static int adc128_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index 4836a0d7aef5..10fa7677ac4b 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -173,7 +173,6 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info ti_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = ti_adc_read_raw,
};
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index e0dc20488335..6a114dcb4a3a 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -332,7 +332,7 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
- int ret, pga, dr, conv_time;
+ int ret, pga, dr, dr_old, conv_time;
unsigned int old, mask, cfg;
if (chan < 0 || chan >= ADS1015_CHANNELS)
@@ -358,17 +358,17 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
}
cfg = (old & ~mask) | (cfg & mask);
-
- ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
- if (ret)
- return ret;
-
- if (old != cfg || data->conv_invalid) {
- int dr_old = (old & ADS1015_CFG_DR_MASK) >>
- ADS1015_CFG_DR_SHIFT;
-
+ if (old != cfg) {
+ ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
+ if (ret)
+ return ret;
+ data->conv_invalid = true;
+ }
+ if (data->conv_invalid) {
+ dr_old = (old & ADS1015_CFG_DR_MASK) >> ADS1015_CFG_DR_SHIFT;
conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+ conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
usleep_range(conv_time, conv_time + 1);
data->conv_invalid = false;
}
@@ -821,7 +821,6 @@ static const struct attribute_group ads1115_attribute_group = {
};
static const struct iio_info ads1015_info = {
- .driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
.read_event_value = ads1015_read_event,
@@ -832,7 +831,6 @@ static const struct iio_info ads1015_info = {
};
static const struct iio_info ads1115_info = {
- .driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
.read_event_value = ads1015_read_event,
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index a376190914ad..0225c1b333ab 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -372,7 +372,6 @@ static int ti_ads7950_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ti_ads7950_info = {
.read_raw = &ti_ads7950_read_raw,
.update_scan_mode = ti_ads7950_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int ti_ads7950_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 4a163496d9e4..079f133144b0 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -369,7 +369,6 @@ static const struct iio_info ads8688_info = {
.write_raw = &ads8688_write_raw,
.write_raw_get_fmt = &ads8688_write_raw_get_fmt,
.attrs = &ads8688_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct ads8688_chip_info ads8688_chip_info_tbl[] = {
@@ -474,7 +473,6 @@ MODULE_DEVICE_TABLE(of, ads8688_of_match);
static struct spi_driver ads8688_driver = {
.driver = {
.name = "ads8688",
- .owner = THIS_MODULE,
},
.probe = ads8688_probe,
.remove = ads8688_remove,
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 78d91a069ea4..2290024c89fc 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -157,7 +157,6 @@ static int tlc4541_read_raw(struct iio_dev *indio_dev,
static const struct iio_info tlc4541_info = {
.read_raw = &tlc4541_read_raw,
- .driver_module = THIS_MODULE,
};
static int tlc4541_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 6cbed7eb118a..b3e573cc6f5f 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -533,7 +533,6 @@ err_unlock:
static const struct iio_info tiadc_info = {
.read_raw = &tiadc_read_raw,
- .driver_module = THIS_MODULE,
};
static int tiadc_request_dma(struct platform_device *pdev,
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index e3cfb91bffc6..8c019bb6625f 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -212,7 +212,6 @@ static int twl4030_madc_read(struct iio_dev *iio_dev,
static const struct iio_info twl4030_madc_iio_info = {
.read_raw = &twl4030_madc_read,
- .driver_module = THIS_MODULE,
};
#define TWL4030_ADC_CHANNEL(_channel, _type, _name) { \
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index bc0e60b9da45..dc83f8f6c3d3 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -843,7 +843,6 @@ static const struct iio_chan_spec twl6032_gpadc_iio_channels[] = {
static const struct iio_info twl6030_gpadc_iio_info = {
.read_raw = &twl6030_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct twl6030_gpadc_platform_data twl6030_pdata = {
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index c168e0db329a..bbcb7a4d7edf 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -799,7 +799,6 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info vf610_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = &vf610_read_raw,
.write_raw = &vf610_write_raw,
.debugfs_reg_access = &vf610_adc_reg_access,
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index 3be2e35721cc..53eb5a4136fe 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -107,7 +107,6 @@ error:
static const struct iio_info vprbrd_adc_iio_info = {
.read_raw = &vprbrd_iio_read_raw,
- .driver_module = THIS_MODULE,
};
static int vprbrd_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 4a60497a1f19..d4f21d1be6c8 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -675,7 +675,6 @@ err_out:
}
static const struct iio_trigger_ops xadc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &xadc_trigger_set_state,
};
@@ -1028,7 +1027,6 @@ static const struct iio_info xadc_info = {
.read_event_value = &xadc_read_event_value,
.write_event_value = &xadc_write_event_value,
.update_scan_mode = &xadc_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const struct of_device_id xadc_of_match_table[] = {
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 102c7174da5b..43667866321e 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -117,7 +117,6 @@ static int ad8366_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad8366_info = {
.read_raw = &ad8366_read_raw,
.write_raw = &ad8366_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD8366_CHAN(_channel) { \
diff --git a/drivers/iio/chemical/ams-iaq-core.c b/drivers/iio/chemical/ams-iaq-core.c
index c948ad2ee9ad..d9e5950ad24a 100644
--- a/drivers/iio/chemical/ams-iaq-core.c
+++ b/drivers/iio/chemical/ams-iaq-core.c
@@ -141,7 +141,6 @@ err_out:
static const struct iio_info ams_iaqcore_info = {
.read_raw = ams_iaqcore_read_raw,
- .driver_module = THIS_MODULE,
};
static int ams_iaqcore_probe(struct i2c_client *client,
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index ef761a508630..8c4e05580091 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -344,7 +344,6 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
}
static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
- .owner = THIS_MODULE,
};
static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
@@ -499,7 +498,6 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info atlas_info = {
- .driver_module = THIS_MODULE,
.read_raw = atlas_read_raw,
.write_raw = atlas_write_raw,
};
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 840a6cbd5f0f..97bce8345c6a 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
@@ -59,6 +60,8 @@
#define CCS811_MODE_IAQ_60SEC 0x30
#define CCS811_MODE_RAW_DATA 0x40
+#define CCS811_MEAS_MODE_INTERRUPT BIT(3)
+
#define CCS811_VOLTAGE_MASK 0x3FF
struct ccs811_reading {
@@ -73,6 +76,8 @@ struct ccs811_data {
struct i2c_client *client;
struct mutex lock; /* Protect readings */
struct ccs811_reading buffer;
+ struct iio_trigger *drdy_trig;
+ bool drdy_trig_on;
};
static const struct iio_chan_spec ccs811_channels[] = {
@@ -193,10 +198,14 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&data->lock);
ret = ccs811_get_measurement(data);
if (ret < 0) {
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -228,6 +237,7 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
}
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
@@ -270,7 +280,31 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ccs811_info = {
.read_raw = ccs811_read_raw,
- .driver_module = THIS_MODULE,
+};
+
+static int ccs811_set_trigger_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct ccs811_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, CCS811_MEAS_MODE);
+ if (ret < 0)
+ return ret;
+
+ if (state)
+ ret |= CCS811_MEAS_MODE_INTERRUPT;
+ else
+ ret &= ~CCS811_MEAS_MODE_INTERRUPT;
+
+ data->drdy_trig_on = state;
+
+ return i2c_smbus_write_byte_data(data->client, CCS811_MEAS_MODE, ret);
+}
+
+static const struct iio_trigger_ops ccs811_trigger_ops = {
+ .set_trigger_state = ccs811_set_trigger_state,
};
static irqreturn_t ccs811_trigger_handler(int irq, void *p)
@@ -298,6 +332,17 @@ err:
return IRQ_HANDLED;
}
+static irqreturn_t ccs811_data_rdy_trigger_poll(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct ccs811_data *data = iio_priv(indio_dev);
+
+ if (data->drdy_trig_on)
+ iio_trigger_poll(data->drdy_trig);
+
+ return IRQ_HANDLED;
+}
+
static int ccs811_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -346,16 +391,48 @@ static int ccs811_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->info = &ccs811_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ccs811_channels;
indio_dev->num_channels = ARRAY_SIZE(ccs811_channels);
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ccs811_data_rdy_trigger_poll,
+ NULL,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "ccs811_irq", indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ goto err_poweroff;
+ }
+
+ data->drdy_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->drdy_trig) {
+ ret = -ENOMEM;
+ goto err_poweroff;
+ }
+
+ data->drdy_trig->dev.parent = &client->dev;
+ data->drdy_trig->ops = &ccs811_trigger_ops;
+ iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
+ indio_dev->trig = data->drdy_trig;
+ iio_trigger_get(indio_dev->trig);
+ ret = iio_trigger_register(data->drdy_trig);
+ if (ret)
+ goto err_poweroff;
+ }
+
ret = iio_triggered_buffer_setup(indio_dev, NULL,
ccs811_trigger_handler, NULL);
if (ret < 0) {
dev_err(&client->dev, "triggered buffer setup failed\n");
- goto err_poweroff;
+ goto err_trigger_unregister;
}
ret = iio_device_register(indio_dev);
@@ -367,6 +444,9 @@ static int ccs811_probe(struct i2c_client *client,
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->drdy_trig)
+ iio_trigger_unregister(data->drdy_trig);
err_poweroff:
i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE, CCS811_MODE_IDLE);
@@ -376,9 +456,12 @@ err_poweroff:
static int ccs811_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ccs811_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
+ if (data->drdy_trig)
+ iio_trigger_unregister(data->drdy_trig);
return i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE,
CCS811_MODE_IDLE);
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index f75eea6822f2..9c9095ba4227 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -326,7 +326,6 @@ static int vz89x_read_raw(struct iio_dev *indio_dev,
static const struct iio_info vz89x_info = {
.attrs = &vz89x_attrs_group,
.read_raw = vz89x_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct vz89x_chip_data vz89x_chips[] = {
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 38e8783e4b05..ed8063f2da99 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -185,7 +185,6 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev,
static const struct iio_info ec_sensors_info = {
.read_raw = &cros_ec_sensors_read,
.write_raw = &cros_ec_sensors_write,
- .driver_module = THIS_MODULE,
};
static int cros_ec_sensors_probe(struct platform_device *pdev)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 0e4b379ada45..cfb6588565ba 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -179,6 +179,10 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
int ret;
atomic_set(&st->user_requested_state, state);
+
+ if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
+ pm_runtime_enable(&st->pdev->dev);
+
if (state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
@@ -221,7 +225,8 @@ static void hid_sensor_set_power_work(struct work_struct *work)
if (attrb->latency_ms > 0)
hid_sensor_set_report_latency(attrb, attrb->latency_ms);
- _hid_sensor_power_state(attrb, true);
+ if (atomic_read(&attrb->user_requested_state))
+ _hid_sensor_power_state(attrb, true);
}
static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
@@ -232,7 +237,9 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
{
- pm_runtime_disable(&attrb->pdev->dev);
+ if (atomic_read(&attrb->runtime_pm_enable))
+ pm_runtime_disable(&attrb->pdev->dev);
+
pm_runtime_set_suspended(&attrb->pdev->dev);
pm_runtime_put_noidle(&attrb->pdev->dev);
@@ -243,7 +250,6 @@ void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
EXPORT_SYMBOL(hid_sensor_remove_trigger);
static const struct iio_trigger_ops hid_sensor_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &hid_sensor_data_rdy_trigger_set_state,
};
@@ -283,7 +289,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
INIT_WORK(&attrb->work, hid_sensor_set_power_work);
pm_suspend_ignore_children(&attrb->pdev->dev, true);
- pm_runtime_enable(&attrb->pdev->dev);
/* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 02e833b14db0..57db19182e95 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -93,6 +93,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ if (!sdata->sensor_settings->odr.addr)
+ return 0;
+
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
goto st_sensors_match_odr_error;
@@ -221,11 +224,14 @@ EXPORT_SYMBOL(st_sensors_set_enable);
int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ int err = 0;
- return st_sensors_write_data_with_mask(indio_dev,
+ if (sdata->sensor_settings->enable_axis.addr)
+ err = st_sensors_write_data_with_mask(indio_dev,
sdata->sensor_settings->enable_axis.addr,
sdata->sensor_settings->enable_axis.mask,
axis_enable);
+ return err;
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
@@ -283,7 +289,8 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
struct st_sensor_data *sdata = iio_priv(indio_dev);
/* Sensor does not support interrupts */
- if (sdata->sensor_settings->drdy_irq.addr == 0) {
+ if (!sdata->sensor_settings->drdy_irq.int1.addr &&
+ !sdata->sensor_settings->drdy_irq.int2.addr) {
if (pdata->drdy_int_pin)
dev_info(&indio_dev->dev,
"DRDY on pin INT%d specified, but sensor "
@@ -294,7 +301,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
switch (pdata->drdy_int_pin) {
case 1:
- if (sdata->sensor_settings->drdy_irq.mask_int1 == 0) {
+ if (!sdata->sensor_settings->drdy_irq.int1.mask) {
dev_err(&indio_dev->dev,
"DRDY on INT1 not available.\n");
return -EINVAL;
@@ -302,7 +309,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
sdata->drdy_int_pin = 1;
break;
case 2:
- if (sdata->sensor_settings->drdy_irq.mask_int2 == 0) {
+ if (!sdata->sensor_settings->drdy_irq.int2.mask) {
dev_err(&indio_dev->dev,
"DRDY on INT2 not available.\n");
return -EINVAL;
@@ -315,7 +322,8 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
}
if (pdata->open_drain) {
- if (!sdata->sensor_settings->drdy_irq.addr_od)
+ if (!sdata->sensor_settings->drdy_irq.int1.addr_od &&
+ !sdata->sensor_settings->drdy_irq.int2.addr_od)
dev_err(&indio_dev->dev,
"open drain requested but unsupported.\n");
else
@@ -442,11 +450,21 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
}
if (sdata->int_pin_open_drain) {
+ u8 addr, mask;
+
+ if (sdata->drdy_int_pin == 1) {
+ addr = sdata->sensor_settings->drdy_irq.int1.addr_od;
+ mask = sdata->sensor_settings->drdy_irq.int1.mask_od;
+ } else {
+ addr = sdata->sensor_settings->drdy_irq.int2.addr_od;
+ mask = sdata->sensor_settings->drdy_irq.int2.mask_od;
+ }
+
dev_info(&indio_dev->dev,
- "set interrupt line to open drain mode\n");
- err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor_settings->drdy_irq.addr_od,
- sdata->sensor_settings->drdy_irq.mask_od, 1);
+ "set interrupt line to open drain mode on pin %d\n",
+ sdata->drdy_int_pin);
+ err = st_sensors_write_data_with_mask(indio_dev, addr,
+ mask, 1);
if (err < 0)
return err;
}
@@ -460,17 +478,18 @@ EXPORT_SYMBOL(st_sensors_init_sensor);
int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
{
int err;
- u8 drdy_mask;
+ u8 drdy_addr, drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor_settings->drdy_irq.addr) {
+ if (!sdata->sensor_settings->drdy_irq.int1.addr &&
+ !sdata->sensor_settings->drdy_irq.int2.addr) {
/*
* there are some devices (e.g. LIS3MDL) where drdy line is
* routed to a given pin and it is not possible to select a
* different one. Take into account irq status register
* to understand if irq trigger can be properly supported
*/
- if (sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ if (sdata->sensor_settings->drdy_irq.stat_drdy.addr)
sdata->hw_irq_trigger = enable;
return 0;
}
@@ -485,18 +504,20 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
goto st_accel_set_dataready_irq_error;
}
- if (sdata->drdy_int_pin == 1)
- drdy_mask = sdata->sensor_settings->drdy_irq.mask_int1;
- else
- drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
+ if (sdata->drdy_int_pin == 1) {
+ drdy_addr = sdata->sensor_settings->drdy_irq.int1.addr;
+ drdy_mask = sdata->sensor_settings->drdy_irq.int1.mask;
+ } else {
+ drdy_addr = sdata->sensor_settings->drdy_irq.int2.addr;
+ drdy_mask = sdata->sensor_settings->drdy_irq.int2.mask;
+ }
/* Flag to the poll function that the hardware trigger is in use */
sdata->hw_irq_trigger = enable;
/* Enable/Disable the interrupt generator for data ready. */
- err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor_settings->drdy_irq.addr,
- drdy_mask, (int)enable);
+ err = st_sensors_write_data_with_mask(indio_dev, drdy_addr,
+ drdy_mask, (int)enable);
st_accel_set_dataready_irq_error:
return err;
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index fa73e6795359..fdcc5a891958 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -31,7 +31,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
int ret;
/* How would I know if I can't check it? */
- if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
return -EINVAL;
/* No scan mask, no interrupt */
@@ -39,23 +39,15 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
return 0;
ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev,
"error checking samples available\n");
return ret;
}
- /*
- * the lower bits of .active_scan_mask[0] is directly mapped
- * to the channels on the sensor: either bit 0 for
- * one-dimensional sensors, or e.g. x,y,z for accelerometers,
- * gyroscopes or magnetometers. No sensor use more than 3
- * channels, so cut the other status bits here.
- */
- status &= 0x07;
- if (status & (u8)indio_dev->active_scan_mask[0])
+ if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
return 1;
return 0;
@@ -212,7 +204,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* it was "our" interrupt.
*/
if (sdata->int_pin_open_drain &&
- sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ sdata->sensor_settings->drdy_irq.stat_drdy.addr)
irq_trig |= IRQF_SHARED;
err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index ba3d9030cd51..b56985078d8c 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -185,7 +185,6 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info quad8_info = {
- .driver_module = THIS_MODULE,
.read_raw = quad8_read_raw,
.write_raw = quad8_write_raw
};
diff --git a/drivers/iio/counter/stm32-lptimer-cnt.c b/drivers/iio/counter/stm32-lptimer-cnt.c
index 1c5909bb1605..81ae5f74216d 100644
--- a/drivers/iio/counter/stm32-lptimer-cnt.c
+++ b/drivers/iio/counter/stm32-lptimer-cnt.c
@@ -178,7 +178,6 @@ static int stm32_lptim_read_raw(struct iio_dev *indio_dev,
static const struct iio_info stm32_lptim_cnt_iio_info = {
.read_raw = stm32_lptim_read_raw,
.write_raw = stm32_lptim_write_raw,
- .driver_module = THIS_MODULE,
};
static const char *const stm32_lptim_quadrature_modes[] = {
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 25bed2d7d2b9..965d5c0d2468 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -60,7 +60,8 @@ config AD5446
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
- AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs.
+ AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
+ as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
To compile this driver as a module, choose M here: the
module will be called ad5446.
@@ -221,6 +222,15 @@ config DPOT_DAC
To compile this driver as a module, choose M here: the module will be
called dpot-dac.
+config DS4424
+ tristate "Maxim Integrated DS4422/DS4424 DAC driver"
+ depends on I2C
+ help
+ If you say yes here you get support for Maxim chips DS4422, DS4424.
+
+ This driver can also be built as a module. If so, the module
+ will be called ds4424.
+
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -300,6 +310,16 @@ config STM32_DAC
config STM32_DAC_CORE
tristate
+config TI_DAC082S085
+ tristate "Texas Instruments 8/10/12-bit 2/4-channel DAC driver"
+ depends on SPI_MASTER
+ help
+ Driver for the Texas Instruments (formerly National Semiconductor)
+ DAC082S085, DAC102S085, DAC122S085, DAC084S085, DAC104S085 and
+ DAC124S085.
+
+ If compiled as a module, it will be called ti-dac082s085.
+
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
depends on OF
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 993511ba4b17..81e710ed7491 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
+obj-$(CONFIG_DS4424) += ds4424.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_LTC2632) += ltc2632.o
obj-$(CONFIG_M62332) += m62332.o
@@ -33,4 +34,5 @@ obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4922) += mcp4922.o
obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o
obj-$(CONFIG_STM32_DAC) += stm32-dac.o
+obj-$(CONFIG_TI_DAC082S085) += ti-dac082s085.o
obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 3f9399c27869..bf4fc40ec84d 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -366,7 +366,6 @@ static int ad5064_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5064_info = {
.read_raw = ad5064_read_raw,
.write_raw = ad5064_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 8ba0e9c50176..0209316d5566 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -425,7 +425,6 @@ static const struct iio_info ad5360_info = {
.read_raw = ad5360_read_raw,
.write_raw = ad5360_write_raw,
.attrs = &ad5360_attribute_group,
- .driver_module = THIS_MODULE,
};
static const char * const ad5360_vref_name[] = {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 97d2c5111f43..845fd1c0fd9d 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -237,7 +237,6 @@ static int ad5380_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5380_info = {
.read_raw = ad5380_read_raw,
.write_raw = ad5380_write_raw,
- .driver_module = THIS_MODULE,
};
static struct iio_chan_spec_ext_info ad5380_ext_info[] = {
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 559061ab1982..8e9633d8de67 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -465,7 +465,6 @@ static const struct iio_info ad5421_info = {
.read_event_config = ad5421_read_event_config,
.write_event_config = ad5421_write_event_config,
.read_event_value = ad5421_read_event_value,
- .driver_module = THIS_MODULE,
};
static int ad5421_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index b555552a0d80..fd26a4272fc5 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -212,7 +212,6 @@ static int ad5446_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5446_info = {
.read_raw = ad5446_read_raw,
.write_raw = ad5446_write_raw,
- .driver_module = THIS_MODULE,
};
static int ad5446_probe(struct device *dev, const char *name,
@@ -461,10 +460,22 @@ static const struct spi_device_id ad5446_spi_ids[] = {
{"ad5660-2500", ID_AD5660_2500},
{"ad5660-1250", ID_AD5660_1250},
{"ad5662", ID_AD5662},
+ {"dac081s101", ID_AD5300}, /* compatible Texas Instruments chips */
+ {"dac101s101", ID_AD5310},
+ {"dac121s101", ID_AD5320},
+ {"dac7512", ID_AD5320},
{}
};
MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
+#ifdef CONFIG_OF
+static const struct of_device_id ad5446_of_ids[] = {
+ { .compatible = "ti,dac7512" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5446_of_ids);
+#endif
+
static int ad5446_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -481,6 +492,7 @@ static int ad5446_spi_remove(struct spi_device *spi)
static struct spi_driver ad5446_spi_driver = {
.driver = {
.name = "ad5446",
+ .of_match_table = of_match_ptr(ad5446_of_ids),
},
.probe = ad5446_spi_probe,
.remove = ad5446_spi_remove,
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 5f3202339420..317a74129932 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -193,7 +193,6 @@ static int ad5449_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5449_info = {
.read_raw = ad5449_read_raw,
.write_raw = ad5449_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD5449_CHANNEL(chan, bits) { \
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 712d86b4be09..d9037ea59168 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -232,7 +232,6 @@ static const struct iio_info ad5504_info = {
.write_raw = ad5504_write_raw,
.read_raw = ad5504_read_raw,
.event_attrs = &ad5504_ev_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 69bde5909854..9234c6a09a93 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -474,7 +474,6 @@ static const struct iio_info ad5592r_info = {
.read_raw = ad5592r_read_raw,
.write_raw = ad5592r_write_raw,
.write_raw_get_fmt = ad5592r_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
};
static ssize_t ad5592r_show_scale_available(struct iio_dev *iio_dev,
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 5489ec43b95d..13fdb4dfe356 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -149,7 +149,6 @@ static ssize_t ad5624r_write_dac_powerdown(struct iio_dev *indio_dev,
static const struct iio_info ad5624r_info = {
.write_raw = ad5624r_write_raw,
.read_raw = ad5624r_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index d1d8450c19f6..20254df7f9c7 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -252,7 +252,6 @@ static int ad5686_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5686_info = {
.read_raw = ad5686_read_raw,
.write_raw = ad5686_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 5f7968232564..2d03cc89ba50 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -417,7 +417,6 @@ static ssize_t ad5755_write_powerdown(struct iio_dev *indio_dev, uintptr_t priv,
static const struct iio_info ad5755_info = {
.read_raw = ad5755_read_raw,
.write_raw = ad5755_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5755_ext_info[] = {
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index d6510d6928b3..05017c8bbd00 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -251,7 +251,6 @@ static int ad5761_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5761_info = {
.read_raw = &ad5761_read_raw,
.write_raw = &ad5761_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD5761_CHAN(_bits) { \
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index 9a547bbf7d2b..033f20eca616 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -268,7 +268,6 @@ static int ad5764_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5764_info = {
.read_raw = ad5764_read_raw,
.write_raw = ad5764_write_raw,
- .driver_module = THIS_MODULE,
};
static int ad5764_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 33e4ae5c42f8..7569bf6868c2 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -340,7 +340,6 @@ static int ad5791_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5791_info = {
.read_raw = &ad5791_read_raw,
.write_raw = &ad5791_write_raw,
- .driver_module = THIS_MODULE,
};
static int ad5791_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 4b0f942b8914..8f3bd19b6dc3 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -161,7 +161,6 @@ static int ad7303_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad7303_info = {
.read_raw = ad7303_read_raw,
.write_raw = ad7303_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index f06faa1aec09..aef5808c9865 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -92,7 +92,6 @@ static int ad8801_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad8801_info = {
.read_raw = ad8801_read_raw,
.write_raw = ad8801_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD8801_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
index a8dffd938615..6898b0c79013 100644
--- a/drivers/iio/dac/cio-dac.c
+++ b/drivers/iio/dac/cio-dac.c
@@ -85,7 +85,6 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info cio_dac_info = {
- .driver_module = THIS_MODULE,
.read_raw = cio_dac_read_raw,
.write_raw = cio_dac_write_raw
};
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index 960a2b430480..aaa2103d7c2b 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -128,7 +128,6 @@ static const struct iio_info dpot_dac_info = {
.read_raw = dpot_dac_read_raw,
.read_avail = dpot_dac_read_avail,
.write_raw = dpot_dac_write_raw,
- .driver_module = THIS_MODULE,
};
static int dpot_dac_channel_max_ohms(struct iio_dev *indio_dev)
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
new file mode 100644
index 000000000000..883a47562055
--- /dev/null
+++ b/drivers/iio/dac/ds4424.c
@@ -0,0 +1,341 @@
+/*
+ * Maxim Integrated
+ * 7-bit, Multi-Channel Sink/Source Current DAC Driver
+ * Copyright (C) 2017 Maxim Integrated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/consumer.h>
+
+#define DS4422_MAX_DAC_CHANNELS 2
+#define DS4424_MAX_DAC_CHANNELS 4
+
+#define DS4424_DAC_ADDR(chan) ((chan) + 0xf8)
+#define DS4424_SOURCE_I 1
+#define DS4424_SINK_I 0
+
+#define DS4424_CHANNEL(chan) { \
+ .type = IIO_CURRENT, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+/*
+ * DS4424 DAC control register 8 bits
+ * [7] 0: to sink; 1: to source
+ * [6:0] steps to sink/source
+ * bit[7] looks like a sign bit, but the value of the register is
+ * not a two's complement code considering the bit[6:0] is a absolute
+ * distance from the zero point.
+ */
+union ds4424_raw_data {
+ struct {
+ u8 dx:7;
+ u8 source_bit:1;
+ };
+ u8 bits;
+};
+
+enum ds4424_device_ids {
+ ID_DS4422,
+ ID_DS4424,
+};
+
+struct ds4424_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ uint8_t save[DS4424_MAX_DAC_CHANNELS];
+ struct regulator *vcc_reg;
+ uint8_t raw[DS4424_MAX_DAC_CHANNELS];
+};
+
+static const struct iio_chan_spec ds4424_channels[] = {
+ DS4424_CHANNEL(0),
+ DS4424_CHANNEL(1),
+ DS4424_CHANNEL(2),
+ DS4424_CHANNEL(3),
+};
+
+static int ds4424_get_value(struct iio_dev *indio_dev,
+ int *val, int channel)
+{
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_read_byte_data(data->client, DS4424_DAC_ADDR(channel));
+ if (ret < 0)
+ goto fail;
+
+ *val = ret;
+
+fail:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int ds4424_set_value(struct iio_dev *indio_dev,
+ int val, struct iio_chan_spec const *chan)
+{
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte_data(data->client,
+ DS4424_DAC_ADDR(chan->channel), val);
+ if (ret < 0)
+ goto fail;
+
+ data->raw[chan->channel] = val;
+
+fail:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int ds4424_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ union ds4424_raw_data raw;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ds4424_get_value(indio_dev, val, chan->channel);
+ if (ret < 0) {
+ pr_err("%s : ds4424_get_value returned %d\n",
+ __func__, ret);
+ return ret;
+ }
+ raw.bits = *val;
+ *val = raw.dx;
+ if (raw.source_bit == DS4424_SINK_I)
+ *val = -*val;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ds4424_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ union ds4424_raw_data raw;
+
+ if (val2 != 0)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < S8_MIN || val > S8_MAX)
+ return -EINVAL;
+
+ if (val > 0) {
+ raw.source_bit = DS4424_SOURCE_I;
+ raw.dx = val;
+ } else {
+ raw.source_bit = DS4424_SINK_I;
+ raw.dx = -val;
+ }
+
+ return ds4424_set_value(indio_dev, raw.bits, chan);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ds4424_verify_chip(struct iio_dev *indio_dev)
+{
+ int ret, val;
+
+ ret = ds4424_get_value(indio_dev, &val, DS4424_DAC_ADDR(0));
+ if (ret < 0)
+ dev_err(&indio_dev->dev,
+ "%s failed. ret: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int __maybe_unused ds4424_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ data->save[i] = data->raw[i];
+ ret = ds4424_set_value(indio_dev, 0,
+ &indio_dev->channels[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static int __maybe_unused ds4424_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = ds4424_set_value(indio_dev, data->save[i],
+ &indio_dev->channels[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ds4424_pm_ops, ds4424_suspend, ds4424_resume);
+
+static const struct iio_info ds4424_info = {
+ .read_raw = ds4424_read_raw,
+ .write_raw = ds4424_write_raw,
+};
+
+static int ds4424_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ds4424_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio dev alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ indio_dev->name = id->name;
+ indio_dev->dev.of_node = client->dev.of_node;
+ indio_dev->dev.parent = &client->dev;
+
+ if (!client->dev.of_node) {
+ dev_err(&client->dev,
+ "Not found DT.\n");
+ return -ENODEV;
+ }
+
+ data->vcc_reg = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(data->vcc_reg)) {
+ dev_err(&client->dev,
+ "Failed to get vcc-supply regulator. err: %ld\n",
+ PTR_ERR(data->vcc_reg));
+ return PTR_ERR(data->vcc_reg);
+ }
+
+ mutex_init(&data->lock);
+ ret = regulator_enable(data->vcc_reg);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Unable to enable the regulator.\n");
+ return ret;
+ }
+
+ usleep_range(1000, 1200);
+ ret = ds4424_verify_chip(indio_dev);
+ if (ret < 0)
+ goto fail;
+
+ switch (id->driver_data) {
+ case ID_DS4422:
+ indio_dev->num_channels = DS4422_MAX_DAC_CHANNELS;
+ break;
+ case ID_DS4424:
+ indio_dev->num_channels = DS4424_MAX_DAC_CHANNELS;
+ break;
+ default:
+ dev_err(&client->dev,
+ "ds4424: Invalid chip id.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ indio_dev->channels = ds4424_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ds4424_info;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "iio_device_register failed. ret: %d\n", ret);
+ goto fail;
+ }
+
+ return ret;
+
+fail:
+ regulator_disable(data->vcc_reg);
+ return ret;
+}
+
+static int ds4424_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ds4424_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(data->vcc_reg);
+
+ return 0;
+}
+
+static const struct i2c_device_id ds4424_id[] = {
+ { "ds4422", ID_DS4422 },
+ { "ds4424", ID_DS4424 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ds4424_id);
+
+static const struct of_device_id ds4424_of_match[] = {
+ { .compatible = "maxim,ds4422" },
+ { .compatible = "maxim,ds4424" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, ds4424_of_match);
+
+static struct i2c_driver ds4424_driver = {
+ .driver = {
+ .name = "ds4424",
+ .of_match_table = ds4424_of_match,
+ .pm = &ds4424_pm_ops,
+ },
+ .probe = ds4424_probe,
+ .remove = ds4424_remove,
+ .id_table = ds4424_id,
+};
+module_i2c_driver(ds4424_driver);
+
+MODULE_DESCRIPTION("Maxim DS4424 DAC Driver");
+MODULE_AUTHOR("Ismail H. Kose <ismail.kose@maximintegrated.com>");
+MODULE_AUTHOR("Vishal Sood <vishal.sood@maximintegrated.com>");
+MODULE_AUTHOR("David Jung <david.jung@maximintegrated.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 55d1456a059d..7036f77fdf23 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -103,7 +103,6 @@ static int lpc18xx_dac_write_raw(struct iio_dev *indio_dev,
static const struct iio_info lpc18xx_dac_info = {
.read_raw = lpc18xx_dac_read_raw,
.write_raw = lpc18xx_dac_write_raw,
- .driver_module = THIS_MODULE,
};
static int lpc18xx_dac_probe(struct platform_device *pdev)
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index ac5e05f6eb8b..af2ddd0dd341 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -159,7 +159,6 @@ static ssize_t ltc2632_write_dac_powerdown(struct iio_dev *indio_dev,
static const struct iio_info ltc2632_info = {
.write_raw = ltc2632_write_raw,
.read_raw = ltc2632_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ltc2632_ext_info[] = {
diff --git a/drivers/iio/dac/m62332.c b/drivers/iio/dac/m62332.c
index 76e8b044b979..19031943dabe 100644
--- a/drivers/iio/dac/m62332.c
+++ b/drivers/iio/dac/m62332.c
@@ -174,7 +174,6 @@ static SIMPLE_DEV_PM_OPS(m62332_pm_ops, m62332_suspend, m62332_resume);
static const struct iio_info m62332_info = {
.read_raw = m62332_read_raw,
.write_raw = m62332_write_raw,
- .driver_module = THIS_MODULE,
};
#define M62332_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index 5507b3970b4b..1d853247a205 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -137,7 +137,6 @@ static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
static const struct iio_info max517_info = {
.read_raw = max517_read_raw,
.write_raw = max517_write_raw,
- .driver_module = THIS_MODULE,
};
#define MAX517_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 193fac3059a3..d0ecc1fdd8fc 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -300,7 +300,6 @@ static SIMPLE_DEV_PM_OPS(max5821_pm_ops, max5821_suspend, max5821_resume);
static const struct iio_info max5821_info = {
.read_raw = max5821_read_raw,
.write_raw = max5821_write_raw,
- .driver_module = THIS_MODULE,
};
static int max5821_probe(struct i2c_client *client,
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 6ab1f23e5a79..afa856d10c26 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -363,7 +363,6 @@ static const struct iio_info mcp4725_info = {
.read_raw = mcp4725_read_raw,
.write_raw = mcp4725_write_raw,
.attrs = &mcp4725_attribute_group,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_OF
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index 3854d201a5d6..bf9aa3fc0534 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -119,7 +119,6 @@ static const struct iio_chan_spec mcp4922_channels[3][MCP4922_NUM_CHANNELS] = {
static const struct iio_info mcp4922_info = {
.read_raw = &mcp4922_read_raw,
.write_raw = &mcp4922_write_raw,
- .driver_module = THIS_MODULE,
};
static int mcp4922_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index c1864e8aa851..9ffab02bf9f9 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -156,7 +156,6 @@ static const struct iio_info stm32_dac_iio_info = {
.read_raw = stm32_dac_read_raw,
.write_raw = stm32_dac_write_raw,
.debugfs_reg_access = stm32_dac_debugfs_reg_access,
- .driver_module = THIS_MODULE,
};
static const char * const stm32_dac_powerdown_modes[] = {
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
new file mode 100644
index 000000000000..4e1e28339c84
--- /dev/null
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -0,0 +1,368 @@
+/*
+ * ti-dac082s085.c - Texas Instruments 8/10/12-bit 2/4-channel DAC driver
+ *
+ * Copyright (C) 2017 KUNBUS GmbH
+ *
+ * http://www.ti.com/lit/ds/symlink/dac082s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac102s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac122s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac084s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac104s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac124s085.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+enum { dual_8bit, dual_10bit, dual_12bit, quad_8bit, quad_10bit, quad_12bit };
+
+struct ti_dac_spec {
+ u8 num_channels;
+ u8 resolution;
+};
+
+static const struct ti_dac_spec ti_dac_spec[] = {
+ [dual_8bit] = { .num_channels = 2, .resolution = 8 },
+ [dual_10bit] = { .num_channels = 2, .resolution = 10 },
+ [dual_12bit] = { .num_channels = 2, .resolution = 12 },
+ [quad_8bit] = { .num_channels = 4, .resolution = 8 },
+ [quad_10bit] = { .num_channels = 4, .resolution = 10 },
+ [quad_12bit] = { .num_channels = 4, .resolution = 12 },
+};
+
+/**
+ * struct ti_dac_chip - TI DAC chip
+ * @lock: protects write sequences
+ * @vref: regulator generating Vref
+ * @mesg: SPI message to perform a write
+ * @xfer: SPI transfer used by @mesg
+ * @val: cached value of each output
+ * @powerdown: whether the chip is powered down
+ * @powerdown_mode: selected by the user
+ * @resolution: resolution of the chip
+ * @buf: buffer for @xfer
+ */
+struct ti_dac_chip {
+ struct mutex lock;
+ struct regulator *vref;
+ struct spi_message mesg;
+ struct spi_transfer xfer;
+ u16 val[4];
+ bool powerdown;
+ u8 powerdown_mode;
+ u8 resolution;
+ u8 buf[2] ____cacheline_aligned;
+};
+
+#define WRITE_NOT_UPDATE(chan) (0x00 | (chan) << 6)
+#define WRITE_AND_UPDATE(chan) (0x10 | (chan) << 6)
+#define WRITE_ALL_UPDATE 0x20
+#define POWERDOWN(mode) (0x30 | ((mode) + 1) << 6)
+
+static int ti_dac_cmd(struct ti_dac_chip *ti_dac, u8 cmd, u16 val)
+{
+ u8 shift = 12 - ti_dac->resolution;
+
+ ti_dac->buf[0] = cmd | (val >> (8 - shift));
+ ti_dac->buf[1] = (val << shift) & 0xff;
+ return spi_sync(ti_dac->mesg.spi, &ti_dac->mesg);
+}
+
+static const char * const ti_dac_powerdown_modes[] = {
+ "2.5kohm_to_gnd", "100kohm_to_gnd", "three_state",
+};
+
+static int ti_dac_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+
+ return ti_dac->powerdown_mode;
+}
+
+static int ti_dac_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (ti_dac->powerdown_mode == mode)
+ return 0;
+
+ mutex_lock(&ti_dac->lock);
+ if (ti_dac->powerdown) {
+ ret = ti_dac_cmd(ti_dac, POWERDOWN(mode), 0);
+ if (ret)
+ goto out;
+ }
+ ti_dac->powerdown_mode = mode;
+
+out:
+ mutex_unlock(&ti_dac->lock);
+ return ret;
+}
+
+static const struct iio_enum ti_dac_powerdown_mode = {
+ .items = ti_dac_powerdown_modes,
+ .num_items = ARRAY_SIZE(ti_dac_powerdown_modes),
+ .get = ti_dac_get_powerdown_mode,
+ .set = ti_dac_set_powerdown_mode,
+};
+
+static ssize_t ti_dac_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", ti_dac->powerdown);
+}
+
+static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ bool powerdown;
+ int ret;
+
+ ret = strtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ if (ti_dac->powerdown == powerdown)
+ return len;
+
+ mutex_lock(&ti_dac->lock);
+ if (powerdown)
+ ret = ti_dac_cmd(ti_dac, POWERDOWN(ti_dac->powerdown_mode), 0);
+ else
+ ret = ti_dac_cmd(ti_dac, WRITE_AND_UPDATE(0), ti_dac->val[0]);
+ if (!ret)
+ ti_dac->powerdown = powerdown;
+ mutex_unlock(&ti_dac->lock);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ti_dac_read_powerdown,
+ .write = ti_dac_write_powerdown,
+ .shared = IIO_SHARED_BY_TYPE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode),
+ { },
+};
+
+#define TI_DAC_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .channel = (chan), \
+ .address = (chan), \
+ .indexed = true, \
+ .output = true, \
+ .datasheet_name = (const char[]){ 'A' + (chan), 0 }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = ti_dac_ext_info, \
+}
+
+static const struct iio_chan_spec ti_dac_channels[] = {
+ TI_DAC_CHANNEL(0),
+ TI_DAC_CHANNEL(1),
+ TI_DAC_CHANNEL(2),
+ TI_DAC_CHANNEL(3),
+};
+
+static int ti_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = ti_dac->val[chan->channel];
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(ti_dac->vref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = ti_dac->resolution;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ti_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (ti_dac->val[chan->channel] == val)
+ return 0;
+
+ if (val >= (1 << ti_dac->resolution) || val < 0)
+ return -EINVAL;
+
+ if (ti_dac->powerdown)
+ return -EBUSY;
+
+ mutex_lock(&ti_dac->lock);
+ ret = ti_dac_cmd(ti_dac, WRITE_AND_UPDATE(chan->channel), val);
+ if (!ret)
+ ti_dac->val[chan->channel] = val;
+ mutex_unlock(&ti_dac->lock);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ti_dac_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info ti_dac_info = {
+ .read_raw = ti_dac_read_raw,
+ .write_raw = ti_dac_write_raw,
+ .write_raw_get_fmt = ti_dac_write_raw_get_fmt,
+};
+
+static int ti_dac_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ const struct ti_dac_spec *spec;
+ struct ti_dac_chip *ti_dac;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*ti_dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &ti_dac_info;
+ indio_dev->name = spi->modalias;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ti_dac_channels;
+ spi_set_drvdata(spi, indio_dev);
+
+ ti_dac = iio_priv(indio_dev);
+ ti_dac->xfer.tx_buf = &ti_dac->buf;
+ ti_dac->xfer.len = sizeof(ti_dac->buf);
+ spi_message_init_with_transfers(&ti_dac->mesg, &ti_dac->xfer, 1);
+ ti_dac->mesg.spi = spi;
+
+ spec = &ti_dac_spec[spi_get_device_id(spi)->driver_data];
+ indio_dev->num_channels = spec->num_channels;
+ ti_dac->resolution = spec->resolution;
+
+ ti_dac->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(ti_dac->vref))
+ return PTR_ERR(ti_dac->vref);
+
+ ret = regulator_enable(ti_dac->vref);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&ti_dac->lock);
+
+ ret = ti_dac_cmd(ti_dac, WRITE_ALL_UPDATE, 0);
+ if (ret) {
+ dev_err(dev, "failed to initialize outputs to 0\n");
+ goto err;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ mutex_destroy(&ti_dac->lock);
+ regulator_disable(ti_dac->vref);
+ return ret;
+}
+
+static int ti_dac_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ mutex_destroy(&ti_dac->lock);
+ regulator_disable(ti_dac->vref);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ti_dac_of_id[] = {
+ { .compatible = "ti,dac082s085" },
+ { .compatible = "ti,dac102s085" },
+ { .compatible = "ti,dac122s085" },
+ { .compatible = "ti,dac084s085" },
+ { .compatible = "ti,dac104s085" },
+ { .compatible = "ti,dac124s085" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_dac_of_id);
+#endif
+
+static const struct spi_device_id ti_dac_spi_id[] = {
+ { "dac082s085", dual_8bit },
+ { "dac102s085", dual_10bit },
+ { "dac122s085", dual_12bit },
+ { "dac084s085", quad_8bit },
+ { "dac104s085", quad_10bit },
+ { "dac124s085", quad_12bit },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ti_dac_spi_id);
+
+static struct spi_driver ti_dac_driver = {
+ .driver = {
+ .name = "ti-dac082s085",
+ .of_match_table = of_match_ptr(ti_dac_of_id),
+ },
+ .probe = ti_dac_probe,
+ .remove = ti_dac_remove,
+ .id_table = ti_dac_spi_id,
+};
+module_spi_driver(ti_dac_driver);
+
+MODULE_AUTHOR("Lukas Wunner <lukas@wunner.de>");
+MODULE_DESCRIPTION("Texas Instruments 8/10/12-bit 2/4-channel DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index c4ec7779b394..5dccdd16cab3 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -167,7 +167,6 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info vf610_dac_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = &vf610_read_raw,
.write_raw = &vf610_write_raw,
};
diff --git a/drivers/iio/dummy/Kconfig b/drivers/iio/dummy/Kconfig
index aa5824d96a43..5a29fbd3c531 100644
--- a/drivers/iio/dummy/Kconfig
+++ b/drivers/iio/dummy/Kconfig
@@ -5,7 +5,7 @@ menu "IIO dummy driver"
depends on IIO
config IIO_DUMMY_EVGEN
- select IRQ_WORK
+ select IRQ_SIM
tristate
config IIO_SIMPLE_DUMMY
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index 9e83f348df51..fe8884543da0 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -24,97 +24,46 @@
#include "iio_dummy_evgen.h"
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/irq_work.h>
+#include <linux/irq_sim.h>
/* Fiddly bit of faking and irq without hardware */
#define IIO_EVENTGEN_NO 10
/**
- * struct iio_dummy_handle_irq - helper struct to simulate interrupt generation
- * @work: irq_work used to run handlers from hardirq context
- * @irq: fake irq line number to trigger an interrupt
- */
-struct iio_dummy_handle_irq {
- struct irq_work work;
- int irq;
-};
-
-/**
- * struct iio_dummy_evgen - evgen state
- * @chip: irq chip we are faking
- * @base: base of irq range
- * @enabled: mask of which irqs are enabled
- * @inuse: mask of which irqs are connected
* @regs: irq regs we are faking
* @lock: protect the evgen state
- * @handler: helper for a 'hardware-like' interrupt simulation
+ * @inuse: mask of which irqs are connected
+ * @irq_sim: interrupt simulator
+ * @base: base of irq range
*/
struct iio_dummy_eventgen {
- struct irq_chip chip;
- int base;
- bool enabled[IIO_EVENTGEN_NO];
- bool inuse[IIO_EVENTGEN_NO];
struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
struct mutex lock;
- struct iio_dummy_handle_irq handler;
+ bool inuse[IIO_EVENTGEN_NO];
+ struct irq_sim irq_sim;
+ int base;
};
/* We can only ever have one instance of this 'device' */
static struct iio_dummy_eventgen *iio_evgen;
-static const char *iio_evgen_name = "iio_dummy_evgen";
-
-static void iio_dummy_event_irqmask(struct irq_data *d)
-{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
- struct iio_dummy_eventgen *evgen =
- container_of(chip, struct iio_dummy_eventgen, chip);
-
- evgen->enabled[d->irq - evgen->base] = false;
-}
-
-static void iio_dummy_event_irqunmask(struct irq_data *d)
-{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
- struct iio_dummy_eventgen *evgen =
- container_of(chip, struct iio_dummy_eventgen, chip);
-
- evgen->enabled[d->irq - evgen->base] = true;
-}
-
-static void iio_dummy_work_handler(struct irq_work *work)
-{
- struct iio_dummy_handle_irq *irq_handler;
-
- irq_handler = container_of(work, struct iio_dummy_handle_irq, work);
- handle_simple_irq(irq_to_desc(irq_handler->irq));
-}
static int iio_dummy_evgen_create(void)
{
- int ret, i;
+ int ret;
iio_evgen = kzalloc(sizeof(*iio_evgen), GFP_KERNEL);
if (!iio_evgen)
return -ENOMEM;
- iio_evgen->base = irq_alloc_descs(-1, 0, IIO_EVENTGEN_NO, 0);
- if (iio_evgen->base < 0) {
- ret = iio_evgen->base;
+ ret = irq_sim_init(&iio_evgen->irq_sim, IIO_EVENTGEN_NO);
+ if (ret) {
kfree(iio_evgen);
return ret;
}
- iio_evgen->chip.name = iio_evgen_name;
- iio_evgen->chip.irq_mask = &iio_dummy_event_irqmask;
- iio_evgen->chip.irq_unmask = &iio_dummy_event_irqunmask;
- for (i = 0; i < IIO_EVENTGEN_NO; i++) {
- irq_set_chip(iio_evgen->base + i, &iio_evgen->chip);
- irq_set_handler(iio_evgen->base + i, &handle_simple_irq);
- irq_modify_status(iio_evgen->base + i,
- IRQ_NOREQUEST | IRQ_NOAUTOEN,
- IRQ_NOPROBE);
- }
- init_irq_work(&iio_evgen->handler.work, iio_dummy_work_handler);
+
+ iio_evgen->base = irq_sim_irqnum(&iio_evgen->irq_sim, 0);
mutex_init(&iio_evgen->lock);
+
return 0;
}
@@ -132,15 +81,17 @@ int iio_dummy_evgen_get_irq(void)
return -ENODEV;
mutex_lock(&iio_evgen->lock);
- for (i = 0; i < IIO_EVENTGEN_NO; i++)
+ for (i = 0; i < IIO_EVENTGEN_NO; i++) {
if (!iio_evgen->inuse[i]) {
- ret = iio_evgen->base + i;
+ ret = irq_sim_irqnum(&iio_evgen->irq_sim, i);
iio_evgen->inuse[i] = true;
break;
}
+ }
mutex_unlock(&iio_evgen->lock);
if (i == IIO_EVENTGEN_NO)
return -ENOMEM;
+
return ret;
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq);
@@ -167,7 +118,7 @@ EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_regs);
static void iio_dummy_evgen_free(void)
{
- irq_free_descs(iio_evgen->base, IIO_EVENTGEN_NO);
+ irq_sim_fini(&iio_evgen->irq_sim);
kfree(iio_evgen);
}
@@ -192,9 +143,7 @@ static ssize_t iio_evgen_poke(struct device *dev,
iio_evgen->regs[this_attr->address].reg_id = this_attr->address;
iio_evgen->regs[this_attr->address].reg_data = event;
- iio_evgen->handler.irq = iio_evgen->base + this_attr->address;
- if (iio_evgen->enabled[this_attr->address])
- irq_work_queue(&iio_evgen->handler.work);
+ irq_sim_fire(&iio_evgen->irq_sim, this_attr->address);
return len;
}
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index ad3410e528b6..62052479c349 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -26,7 +26,7 @@
#include <linux/iio/sw_device.h>
#include "iio_simple_dummy.h"
-static struct config_item_type iio_dummy_type = {
+static const struct config_item_type iio_dummy_type = {
.ct_owner = THIS_MODULE,
};
@@ -519,7 +519,6 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
* Device type specific information.
*/
static const struct iio_info iio_dummy_info = {
- .driver_module = THIS_MODULE,
.read_raw = &iio_dummy_read_raw,
.write_raw = &iio_dummy_write_raw,
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 99eba524f6dd..ddb6a334ae68 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -738,7 +738,6 @@ static const struct iio_info ad9523_info = {
.write_raw = &ad9523_write_raw,
.debugfs_reg_access = &ad9523_reg_access,
.attrs = &ad9523_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9523_setup(struct iio_dev *indio_dev)
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index d2d824b446f5..6d768431d90e 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -374,7 +374,6 @@ static const struct iio_chan_spec adf4350_chan = {
static const struct iio_info adf4350_info = {
.debugfs_reg_access = &adf4350_reg_access,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_OF
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index ad31a1372a04..a551ebde4762 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -163,7 +163,6 @@ static const struct iio_chan_spec adis16080_channels[] = {
static const struct iio_info adis16080_info = {
.read_raw = &adis16080_read_raw,
- .driver_module = THIS_MODULE,
};
enum {
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index e5241f41e65e..aea80ab04122 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -137,7 +137,6 @@ static const struct iio_chan_spec adis16130_channels[] = {
static const struct iio_info adis16130_info = {
.read_raw = &adis16130_read_raw,
- .driver_module = THIS_MODULE,
};
static int adis16130_probe(struct spi_device *spi)
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index b04faf93e1bc..90ec4bed62b7 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -398,7 +398,6 @@ static const struct attribute_group adis16136_attribute_group = {
};
static const struct iio_info adis16136_info = {
- .driver_module = THIS_MODULE,
.attrs = &adis16136_attribute_group,
.read_raw = &adis16136_read_raw,
.write_raw = &adis16136_write_raw,
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 7da8825f4791..a8cb1ca349d9 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -321,7 +321,6 @@ static const struct iio_info adis16260_info = {
.read_raw = &adis16260_read_raw,
.write_raw = &adis16260_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis1620_status_error_msgs[] = {
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index a330d4288bb0..5d39fd008378 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -405,7 +405,6 @@ static const struct iio_chan_spec adxrs450_channels[2][2] = {
};
static const struct iio_info adxrs450_info = {
- .driver_module = THIS_MODULE,
.read_raw = &adxrs450_read_raw,
.write_raw = &adxrs450_write_raw,
};
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 821919dd245b..15046172e437 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -858,7 +858,6 @@ static const struct iio_info bmg160_info = {
.write_event_value = bmg160_write_event,
.write_event_config = bmg160_write_event_config,
.read_event_config = bmg160_read_event_config,
- .driver_module = THIS_MODULE,
};
static const unsigned long bmg160_accel_scan_masks[] = {
@@ -956,7 +955,6 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops bmg160_trigger_ops = {
.set_trigger_state = bmg160_data_rdy_trigger_set_state,
.try_reenable = bmg160_trig_try_reen,
- .owner = THIS_MODULE,
};
static irqreturn_t bmg160_event_handler(int irq, void *private)
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index c67ce2ac4715..f59995a90387 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -186,7 +186,6 @@ static int gyro_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info gyro_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &gyro_3d_read_raw,
.write_raw = &gyro_3d_write_raw,
};
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index eef50e91f17c..59770e5b6660 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -101,7 +101,6 @@ error_ret:
}
static const struct iio_trigger_ops itg3200_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &itg3200_data_rdy_trigger_set_state,
};
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index cfa2db04a8ab..7adecb562c81 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -278,7 +278,6 @@ static const struct iio_chan_spec itg3200_channels[] = {
static const struct iio_info itg3200_info = {
.read_raw = &itg3200_read_raw,
.write_raw = &itg3200_write_raw,
- .driver_module = THIS_MODULE,
};
static const unsigned long itg3200_available_scan_masks[] = { 0xffffffff, 0x0 };
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index e0d241a9aa30..77fac81a3adc 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -742,7 +742,6 @@ static const struct attribute_group mpu3050_attribute_group = {
};
static const struct iio_info mpu3050_info = {
- .driver_module = THIS_MODULE,
.read_raw = mpu3050_read_raw,
.write_raw = mpu3050_write_raw,
.attrs = &mpu3050_attribute_group,
@@ -1032,7 +1031,6 @@ static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
}
static const struct iio_trigger_ops mpu3050_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = mpu3050_drdy_trigger_set_state,
};
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index e366422e8512..b31064ba37b9 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -111,14 +111,23 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int2 = 0x08,
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x08,
+ },
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -181,14 +190,23 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int2 = 0x08,
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x08,
+ },
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -246,14 +264,23 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int2 = 0x08,
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x08,
+ },
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -326,7 +353,6 @@ static const struct attribute_group st_gyro_attribute_group = {
};
static const struct iio_info gyro_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_gyro_attribute_group,
.read_raw = &st_gyro_read_raw,
.write_raw = &st_gyro_write_raw,
@@ -335,7 +361,6 @@ static const struct iio_info gyro_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_gyro_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 6bb23a49e81e..a739fff01c6b 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -309,7 +309,6 @@ static const struct iio_info afe4403_iio_info = {
.attrs = &afe440x_attribute_group,
.read_raw = afe4403_read_raw,
.write_raw = afe4403_write_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t afe4403_trigger_handler(int irq, void *private)
@@ -354,7 +353,6 @@ err:
}
static const struct iio_trigger_ops afe4403_trigger_ops = {
- .owner = THIS_MODULE,
};
#define AFE4403_TIMING_PAIRS \
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 964f5231a831..11910922e655 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -328,7 +328,6 @@ static const struct iio_info afe4404_iio_info = {
.attrs = &afe440x_attribute_group,
.read_raw = afe4404_read_raw,
.write_raw = afe4404_write_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t afe4404_trigger_handler(int irq, void *private)
@@ -355,7 +354,6 @@ err:
}
static const struct iio_trigger_ops afe4404_trigger_ops = {
- .owner = THIS_MODULE,
};
/* Default timings from data-sheet */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 849d71747f9f..91aef5df24a1 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -420,7 +420,6 @@ static int max30100_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info max30100_info = {
- .driver_module = THIS_MODULE,
.read_raw = max30100_read_raw,
};
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 839b875c29b9..203ffb9cad6a 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -381,7 +381,6 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info max30102_info = {
- .driver_module = THIS_MODULE,
.read_raw = max30102_read_raw,
};
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index ff96b6d0fdae..7d8669dc6547 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -215,7 +215,6 @@ static int am2315_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info am2315_info = {
- .driver_module = THIS_MODULE,
.read_raw = am2315_read_raw,
};
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 2a22ad920333..df6bab40d6fa 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -284,7 +284,6 @@ err:
}
static const struct iio_info dht11_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = dht11_read_raw,
};
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 7851bd90ef64..d8438310b6d4 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -357,7 +357,6 @@ static const struct iio_info hdc100x_info = {
.read_raw = hdc100x_read_raw,
.write_raw = hdc100x_write_raw,
.attrs = &hdc100x_attribute_group,
- .driver_module = THIS_MODULE,
};
static int hdc100x_probe(struct i2c_client *client,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index 6e09c1acfe51..beab6d6fd6e1 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -125,7 +125,6 @@ static int humidity_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info humidity_info = {
- .driver_module = THIS_MODULE,
.read_raw = &humidity_read_raw,
.write_raw = &humidity_write_raw,
};
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 9690dfe9a844..e971ea425268 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -47,7 +47,6 @@ static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops hts221_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = hts221_trig_set_state,
};
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 32524a8dc66f..daef177219b6 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -573,7 +573,6 @@ static const struct attribute_group hts221_attribute_group = {
};
static const struct iio_info hts221_info = {
- .driver_module = THIS_MODULE,
.attrs = &hts221_attribute_group,
.read_raw = hts221_read_raw,
.write_raw = hts221_write_raw,
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 2c4b9be85a05..f5a2701ba6dd 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -175,7 +175,6 @@ static const struct iio_info htu21_info = {
.read_raw = htu21_read_raw,
.write_raw = htu21_write_raw,
.attrs = &htu21_attribute_group,
- .driver_module = THIS_MODULE,
};
static int htu21_probe(struct i2c_client *client,
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index 6297766e93d0..1fd19f035a5d 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -124,7 +124,6 @@ static const struct iio_chan_spec si7005_channels[] = {
static const struct iio_info si7005_info = {
.read_raw = si7005_read_raw,
- .driver_module = THIS_MODULE,
};
static int si7005_probe(struct i2c_client *client,
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 345a7656c5ef..1b2ec8df1a72 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -108,7 +108,6 @@ static const struct iio_chan_spec si7020_channels[] = {
static const struct iio_info si7020_info = {
.read_raw = si7020_read_raw,
- .driver_module = THIS_MODULE,
};
static int si7020_probe(struct i2c_client *client,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 9b697d35dbef..46a569005a13 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -833,7 +833,6 @@ static struct adis16400_chip_info adis16400_chips[] = {
};
static const struct iio_info adis16400_info = {
- .driver_module = THIS_MODULE,
.read_raw = &adis16400_read_raw,
.write_raw = &adis16400_write_raw,
.update_scan_mode = adis16400_update_scan_mode,
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 12898424d838..7a33d6bd60e0 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -720,7 +720,6 @@ static const struct iio_info adis16480_info = {
.read_raw = &adis16480_read_raw,
.write_raw = &adis16480_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int adis16480_stop_device(struct iio_dev *indio_dev)
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index f53e9a803a0e..0dd5a381be64 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -25,7 +25,6 @@ static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig,
}
static const struct iio_trigger_ops adis_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &adis_data_rdy_trigger_set_state,
};
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index cfd225ed1c8d..c85659ca9507 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -482,7 +482,6 @@ static const struct attribute_group bmi160_attrs_group = {
};
static const struct iio_info bmi160_info = {
- .driver_module = THIS_MODULE,
.read_raw = bmi160_read_raw,
.write_raw = bmi160_write_raw,
.attrs = &bmi160_attrs_group,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 44830bce13df..7d64be353403 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -542,7 +542,9 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
break;
default:
result = -EINVAL;
+ break;
}
+ break;
default:
result = -EINVAL;
break;
@@ -570,10 +572,12 @@ error_write_raw_unlock:
*/
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{
- const int hz[] = {188, 98, 42, 20, 10, 5};
- const int d[] = {INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
- INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
- INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ};
+ static const int hz[] = {188, 98, 42, 20, 10, 5};
+ static const int d[] = {
+ INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ
+ };
int i, h, result;
u8 data;
@@ -795,7 +799,6 @@ static const struct attribute_group inv_attribute_group = {
};
static const struct iio_info mpu_info = {
- .driver_module = THIS_MODULE,
.read_raw = &inv_mpu6050_read_raw,
.write_raw = &inv_mpu6050_write_raw,
.write_raw_get_fmt = &inv_write_raw_get_fmt,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 540070f0a230..f963f9fc98c0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -114,7 +114,6 @@ static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
}
static const struct iio_trigger_ops inv_mpu_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
};
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 2e7dd5754a56..44b3f5397343 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1003,7 +1003,6 @@ static int kmx61_mag_validate_trigger(struct iio_dev *indio_dev,
}
static const struct iio_info kmx61_acc_info = {
- .driver_module = THIS_MODULE,
.read_raw = kmx61_read_raw,
.write_raw = kmx61_write_raw,
.attrs = &kmx61_acc_attribute_group,
@@ -1015,7 +1014,6 @@ static const struct iio_info kmx61_acc_info = {
};
static const struct iio_info kmx61_mag_info = {
- .driver_module = THIS_MODULE,
.read_raw = kmx61_read_raw,
.write_raw = kmx61_write_raw,
.attrs = &kmx61_mag_attribute_group,
@@ -1087,7 +1085,6 @@ static int kmx61_trig_try_reenable(struct iio_trigger *trig)
static const struct iio_trigger_ops kmx61_trigger_ops = {
.set_trigger_state = kmx61_data_rdy_trigger_set_state,
.try_reenable = kmx61_trig_try_reenable,
- .owner = THIS_MODULE,
};
static irqreturn_t kmx61_event_handler(int irq, void *private)
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 46352c7bff43..4fdb7fcc3ea8 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -29,8 +29,6 @@ enum st_lsm6dsx_hw_id {
#define ST_LSM6DSX_CHAN_SIZE 2
#define ST_LSM6DSX_SAMPLE_SIZE 6
-#define ST_LSM6DSX_SAMPLE_DEPTH (ST_LSM6DSX_SAMPLE_SIZE / \
- ST_LSM6DSX_CHAN_SIZE)
#if defined(CONFIG_SPI_MASTER)
#define ST_LSM6DSX_RX_MAX_LENGTH 256
@@ -52,10 +50,38 @@ struct st_lsm6dsx_reg {
u8 mask;
};
+/**
+ * struct st_lsm6dsx_fifo_ops - ST IMU FIFO settings
+ * @fifo_th: FIFO threshold register info (addr + mask).
+ * @fifo_diff: FIFO diff status register info (addr + mask).
+ * @th_wl: FIFO threshold word length.
+ */
+struct st_lsm6dsx_fifo_ops {
+ struct {
+ u8 addr;
+ u16 mask;
+ } fifo_th;
+ struct {
+ u8 addr;
+ u16 mask;
+ } fifo_diff;
+ u8 th_wl;
+};
+
+/**
+ * struct st_lsm6dsx_settings - ST IMU sensor settings
+ * @wai: Sensor WhoAmI default value.
+ * @max_fifo_size: Sensor max fifo length in FIFO words.
+ * @id: List of hw id supported by the driver configuration.
+ * @decimator: List of decimator register info (addr + mask).
+ * @fifo_ops: Sensor hw FIFO parameters.
+ */
struct st_lsm6dsx_settings {
u8 wai;
u16 max_fifo_size;
enum st_lsm6dsx_hw_id id[ST_LSM6DSX_MAX_ID];
+ struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
+ struct st_lsm6dsx_fifo_ops fifo_ops;
};
enum st_lsm6dsx_sensor_id {
@@ -79,7 +105,6 @@ enum st_lsm6dsx_fifo_mode {
* @watermark: Sensor watermark level.
* @sip: Number of samples in a given pattern.
* @decimator: FIFO decimation factor.
- * @decimator_mask: Sensor mask for decimation register.
* @delta_ts: Delta time between two consecutive interrupts.
* @ts: Latest timestamp from the interrupt handler.
*/
@@ -94,7 +119,6 @@ struct st_lsm6dsx_sensor {
u16 watermark;
u8 sip;
u8 decimator;
- u8 decimator_mask;
s64 delta_ts;
s64 ts;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index e2737dc71b67..755c472e8a05 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -35,10 +35,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_FIFO_THL_ADDR 0x06
-#define ST_LSM6DSX_REG_FIFO_THH_ADDR 0x07
-#define ST_LSM6DSX_FIFO_TH_MASK GENMASK(11, 0)
-#define ST_LSM6DSX_REG_FIFO_DEC_GXL_ADDR 0x08
#define ST_LSM6DSX_REG_HLACTIVE_ADDR 0x12
#define ST_LSM6DSX_REG_HLACTIVE_MASK BIT(5)
#define ST_LSM6DSX_REG_PP_OD_ADDR 0x12
@@ -46,8 +42,6 @@
#define ST_LSM6DSX_REG_FIFO_MODE_ADDR 0x0a
#define ST_LSM6DSX_FIFO_MODE_MASK GENMASK(2, 0)
#define ST_LSM6DSX_FIFO_ODR_MASK GENMASK(6, 3)
-#define ST_LSM6DSX_REG_FIFO_DIFFL_ADDR 0x3a
-#define ST_LSM6DSX_FIFO_DIFF_MASK GENMASK(11, 0)
#define ST_LSM6DSX_FIFO_EMPTY_MASK BIT(12)
#define ST_LSM6DSX_REG_FIFO_OUTL_ADDR 0x3e
@@ -110,8 +104,9 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
st_lsm6dsx_get_max_min_odr(hw, &max_odr, &min_odr);
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
- sensor = iio_priv(hw->iio_devs[i]);
+ const struct st_lsm6dsx_reg *dec_reg;
+ sensor = iio_priv(hw->iio_devs[i]);
/* update fifo decimators and sample in pattern */
if (hw->enable_mask & BIT(sensor->id)) {
sensor->sip = sensor->odr / min_odr;
@@ -123,12 +118,13 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
data = 0;
}
- err = st_lsm6dsx_write_with_mask(hw,
- ST_LSM6DSX_REG_FIFO_DEC_GXL_ADDR,
- sensor->decimator_mask, data);
- if (err < 0)
- return err;
-
+ dec_reg = &hw->settings->decimator[sensor->id];
+ if (dec_reg->addr) {
+ err = st_lsm6dsx_write_with_mask(hw, dec_reg->addr,
+ dec_reg->mask, data);
+ if (err < 0)
+ return err;
+ }
sip += sensor->sip;
}
hw->sip = sip;
@@ -139,23 +135,10 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
enum st_lsm6dsx_fifo_mode fifo_mode)
{
- u8 data;
int err;
- switch (fifo_mode) {
- case ST_LSM6DSX_FIFO_BYPASS:
- data = fifo_mode;
- break;
- case ST_LSM6DSX_FIFO_CONT:
- data = (ST_LSM6DSX_MAX_FIFO_ODR_VAL <<
- __ffs(ST_LSM6DSX_FIFO_ODR_MASK)) | fifo_mode;
- break;
- default:
- return -EINVAL;
- }
-
- err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
- sizeof(data), &data);
+ err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+ ST_LSM6DSX_FIFO_MODE_MASK, fifo_mode);
if (err < 0)
return err;
@@ -164,9 +147,20 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
return 0;
}
+static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
+ bool enable)
+{
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ u8 data;
+
+ data = hw->enable_mask ? ST_LSM6DSX_MAX_FIFO_ODR_VAL : 0;
+ return st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+ ST_LSM6DSX_FIFO_ODR_MASK, data);
+}
+
int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
{
- u16 fifo_watermark = ~0, cur_watermark, sip = 0;
+ u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
struct st_lsm6dsx_hw *hw = sensor->hw;
struct st_lsm6dsx_sensor *cur_sensor;
__le16 wdata;
@@ -191,20 +185,21 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
fifo_watermark = max_t(u16, fifo_watermark, sip);
fifo_watermark = (fifo_watermark / sip) * sip;
- fifo_watermark = fifo_watermark * ST_LSM6DSX_SAMPLE_DEPTH;
+ fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
mutex_lock(&hw->lock);
- err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_FIFO_THH_ADDR,
+ err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_th.addr + 1,
sizeof(data), &data);
if (err < 0)
goto out;
- fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) |
- (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
+ fifo_th_mask = hw->settings->fifo_ops.fifo_th.mask;
+ fifo_watermark = ((data << 8) & ~fifo_th_mask) |
+ (fifo_watermark & fifo_th_mask);
wdata = cpu_to_le16(fifo_watermark);
- err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR,
+ err = hw->tf->write(hw->dev, hw->settings->fifo_ops.fifo_th.addr,
sizeof(wdata), (u8 *)&wdata);
out:
mutex_unlock(&hw->lock);
@@ -223,6 +218,7 @@ out:
static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
{
u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
+ u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
int err, acc_sip, gyro_sip, read_len, samples, offset;
struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
s64 acc_ts, acc_delta_ts, gyro_ts, gyro_delta_ts;
@@ -230,7 +226,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
u8 buff[pattern_len];
__le16 fifo_status;
- err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_FIFO_DIFFL_ADDR,
+ err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_diff.addr,
sizeof(fifo_status), (u8 *)&fifo_status);
if (err < 0)
return err;
@@ -238,7 +234,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
- fifo_len = (le16_to_cpu(fifo_status) & ST_LSM6DSX_FIFO_DIFF_MASK) *
+ fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
ST_LSM6DSX_CHAN_SIZE;
samples = fifo_len / ST_LSM6DSX_SAMPLE_SIZE;
fifo_len = (fifo_len / pattern_len) * pattern_len;
@@ -345,6 +341,10 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
return err;
}
+ err = st_lsm6dsx_set_fifo_odr(sensor, enable);
+ if (err < 0)
+ return err;
+
err = st_lsm6dsx_update_decimators(hw);
if (err < 0)
return err;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b485540da89e..239c735242be 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -42,8 +42,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_ACC_DEC_MASK GENMASK(2, 0)
-#define ST_LSM6DSX_REG_GYRO_DEC_MASK GENMASK(5, 3)
#define ST_LSM6DSX_REG_INT1_ADDR 0x0d
#define ST_LSM6DSX_REG_INT2_ADDR 0x0e
#define ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK BIT(3)
@@ -54,10 +52,6 @@
#define ST_LSM6DSX_REG_BDU_MASK BIT(6)
#define ST_LSM6DSX_REG_INT2_ON_INT1_ADDR 0x13
#define ST_LSM6DSX_REG_INT2_ON_INT1_MASK BIT(5)
-#define ST_LSM6DSX_REG_ROUNDING_ADDR 0x16
-#define ST_LSM6DSX_REG_ROUNDING_MASK BIT(2)
-#define ST_LSM6DSX_REG_LIR_ADDR 0x58
-#define ST_LSM6DSX_REG_LIR_MASK BIT(0)
#define ST_LSM6DSX_REG_ACC_ODR_ADDR 0x10
#define ST_LSM6DSX_REG_ACC_ODR_MASK GENMASK(7, 4)
@@ -160,25 +154,88 @@ static const struct st_lsm6dsx_fs_table_entry st_lsm6dsx_fs_table[] = {
static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
{
.wai = 0x69,
- .max_fifo_size = 8192,
+ .max_fifo_size = 1365,
.id = {
[0] = ST_LSM6DS3_ID,
},
+ .decimator = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x08,
+ .mask = GENMASK(2, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x08,
+ .mask = GENMASK(5, 3),
+ },
+ },
+ .fifo_ops = {
+ .fifo_th = {
+ .addr = 0x06,
+ .mask = GENMASK(11, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(11, 0),
+ },
+ .th_wl = 3, /* 1LSB = 2B */
+ },
},
{
.wai = 0x69,
- .max_fifo_size = 4096,
+ .max_fifo_size = 682,
.id = {
[0] = ST_LSM6DS3H_ID,
},
+ .decimator = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x08,
+ .mask = GENMASK(2, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x08,
+ .mask = GENMASK(5, 3),
+ },
+ },
+ .fifo_ops = {
+ .fifo_th = {
+ .addr = 0x06,
+ .mask = GENMASK(11, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(11, 0),
+ },
+ .th_wl = 3, /* 1LSB = 2B */
+ },
},
{
.wai = 0x6a,
- .max_fifo_size = 4096,
+ .max_fifo_size = 682,
.id = {
[0] = ST_LSM6DSL_ID,
[1] = ST_LSM6DSM_ID,
},
+ .decimator = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x08,
+ .mask = GENMASK(2, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x08,
+ .mask = GENMASK(5, 3),
+ },
+ },
+ .fifo_ops = {
+ .fifo_th = {
+ .addr = 0x06,
+ .mask = GENMASK(11, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(11, 0),
+ },
+ .th_wl = 3, /* 1LSB = 2B */
+ },
},
};
@@ -322,7 +379,6 @@ static int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr,
return -EINVAL;
*val = st_lsm6dsx_odr_table[sensor->id].odr_avl[i].val;
- sensor->odr = odr;
return 0;
}
@@ -449,6 +505,8 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
u8 data;
err = st_lsm6dsx_check_odr(sensor, val, &data);
+ if (!err)
+ sensor->odr = val;
break;
}
default:
@@ -465,10 +523,9 @@ static int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
struct st_lsm6dsx_hw *hw = sensor->hw;
- int err, max_fifo_len;
+ int err;
- max_fifo_len = hw->settings->max_fifo_size / ST_LSM6DSX_SAMPLE_SIZE;
- if (val < 1 || val > max_fifo_len)
+ if (val < 1 || val > hw->settings->max_fifo_size)
return -EINVAL;
err = st_lsm6dsx_update_watermark(sensor, val);
@@ -530,7 +587,6 @@ static const struct attribute_group st_lsm6dsx_acc_attribute_group = {
};
static const struct iio_info st_lsm6dsx_acc_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_lsm6dsx_acc_attribute_group,
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
@@ -548,7 +604,6 @@ static const struct attribute_group st_lsm6dsx_gyro_attribute_group = {
};
static const struct iio_info st_lsm6dsx_gyro_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_lsm6dsx_gyro_attribute_group,
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
@@ -608,23 +663,12 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
msleep(200);
- /* latch interrupts */
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_LIR_ADDR,
- ST_LSM6DSX_REG_LIR_MASK, 1);
- if (err < 0)
- return err;
-
/* enable Block Data Update */
err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_BDU_ADDR,
ST_LSM6DSX_REG_BDU_MASK, 1);
if (err < 0)
return err;
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_ROUNDING_ADDR,
- ST_LSM6DSX_REG_ROUNDING_MASK, 1);
- if (err < 0)
- return err;
-
/* enable FIFO watermak interrupt */
err = st_lsm6dsx_get_drdy_reg(hw, &drdy_int_reg);
if (err < 0)
@@ -662,7 +706,6 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_acc_channels);
iio_dev->info = &st_lsm6dsx_acc_info;
- sensor->decimator_mask = ST_LSM6DSX_REG_ACC_DEC_MASK;
scnprintf(sensor->name, sizeof(sensor->name), "%s_accel",
name);
break;
@@ -671,7 +714,6 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_gyro_channels);
iio_dev->info = &st_lsm6dsx_gyro_info;
- sensor->decimator_mask = ST_LSM6DSX_REG_GYRO_DEC_MASK;
scnprintf(sensor->name, sizeof(sensor->name), "%s_gyro",
name);
break;
diff --git a/drivers/iio/industrialio-configfs.c b/drivers/iio/industrialio-configfs.c
index 45ce2bc47180..5a0aae119369 100644
--- a/drivers/iio/industrialio-configfs.c
+++ b/drivers/iio/industrialio-configfs.c
@@ -17,7 +17,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/configfs.h>
-static struct config_item_type iio_root_group_type = {
+static const struct config_item_type iio_root_group_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a47428b4d31b..9c4cfd19b739 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1662,14 +1662,11 @@ static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
-/**
- * iio_device_register() - register a device with the IIO subsystem
- * @indio_dev: Device structure filled by the device driver
- **/
-int iio_device_register(struct iio_dev *indio_dev)
+int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
int ret;
+ indio_dev->driver_module = this_mod;
/* If the calling driver did not initialize of_node, do it here */
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
@@ -1715,7 +1712,8 @@ int iio_device_register(struct iio_dev *indio_dev)
indio_dev->setup_ops = &noop_ring_setup_ops;
cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
- indio_dev->chrdev.owner = indio_dev->info->driver_module;
+
+ indio_dev->chrdev.owner = this_mod;
ret = cdev_device_add(&indio_dev->chrdev, &indio_dev->dev);
if (ret < 0)
@@ -1733,7 +1731,7 @@ error_unreg_debugfs:
iio_device_unregister_debugfs(indio_dev);
return ret;
}
-EXPORT_SYMBOL(iio_device_register);
+EXPORT_SYMBOL(__iio_device_register);
/**
* iio_device_unregister() - unregister a device from the IIO subsystem
@@ -1765,23 +1763,8 @@ static void devm_iio_device_unreg(struct device *dev, void *res)
iio_device_unregister(*(struct iio_dev **)res);
}
-/**
- * devm_iio_device_register - Resource-managed iio_device_register()
- * @dev: Device to allocate iio_dev for
- * @indio_dev: Device structure filled by the device driver
- *
- * Managed iio_device_register. The IIO device registered with this
- * function is automatically unregistered on driver detach. This function
- * calls iio_device_register() internally. Refer to that function for more
- * information.
- *
- * If an iio_dev registered with this function needs to be unregistered
- * separately, devm_iio_device_unregister() must be used.
- *
- * RETURNS:
- * 0 on success, negative error number on failure.
- */
-int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
+int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
+ struct module *this_mod)
{
struct iio_dev **ptr;
int ret;
@@ -1791,7 +1774,7 @@ int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
return -ENOMEM;
*ptr = indio_dev;
- ret = iio_device_register(indio_dev);
+ ret = __iio_device_register(indio_dev, this_mod);
if (!ret)
devres_add(dev, ptr);
else
@@ -1799,7 +1782,7 @@ int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
return ret;
}
-EXPORT_SYMBOL_GPL(devm_iio_device_register);
+EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
* devm_iio_device_unregister - Resource-managed iio_device_unregister()
diff --git a/drivers/iio/industrialio-sw-device.c b/drivers/iio/industrialio-sw-device.c
index 81b49cfca452..90df97c542f6 100644
--- a/drivers/iio/industrialio-sw-device.c
+++ b/drivers/iio/industrialio-sw-device.c
@@ -19,9 +19,9 @@
#include <linux/configfs.h>
static struct config_group *iio_devices_group;
-static struct config_item_type iio_device_type_group_type;
+static const struct config_item_type iio_device_type_group_type;
-static struct config_item_type iio_devices_group_type = {
+static const struct config_item_type iio_devices_group_type = {
.ct_owner = THIS_MODULE,
};
@@ -156,7 +156,7 @@ static struct configfs_group_operations device_ops = {
.drop_item = &device_drop_group,
};
-static struct config_item_type iio_device_type_group_type = {
+static const struct config_item_type iio_device_type_group_type = {
.ct_group_ops = &device_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c
index 8d24fb159cc9..bc6b7fb43e3a 100644
--- a/drivers/iio/industrialio-sw-trigger.c
+++ b/drivers/iio/industrialio-sw-trigger.c
@@ -19,9 +19,9 @@
#include <linux/configfs.h>
static struct config_group *iio_triggers_group;
-static struct config_item_type iio_trigger_type_group_type;
+static const struct config_item_type iio_trigger_type_group_type;
-static struct config_item_type iio_triggers_group_type = {
+static const struct config_item_type iio_triggers_group_type = {
.ct_owner = THIS_MODULE,
};
@@ -156,7 +156,7 @@ static struct configfs_group_operations trigger_ops = {
.drop_item = &trigger_drop_group,
};
-static struct config_item_type iio_trigger_type_group_type = {
+static const struct config_item_type iio_trigger_type_group_type = {
.ct_group_ops = &trigger_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 4061fed93f1f..ce66699c7fcc 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -66,13 +66,12 @@ ATTRIBUTE_GROUPS(iio_trig_dev);
static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
-int iio_trigger_register(struct iio_trigger *trig_info)
+int __iio_trigger_register(struct iio_trigger *trig_info,
+ struct module *this_mod)
{
int ret;
- /* trig_info->ops is required for the module member */
- if (!trig_info->ops)
- return -EINVAL;
+ trig_info->owner = this_mod;
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
if (trig_info->id < 0)
@@ -105,7 +104,7 @@ error_unregister_id:
ida_simple_remove(&iio_trigger_ida, trig_info->id);
return ret;
}
-EXPORT_SYMBOL(iio_trigger_register);
+EXPORT_SYMBOL(__iio_trigger_register);
void iio_trigger_unregister(struct iio_trigger *trig_info)
{
@@ -206,7 +205,8 @@ EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
- if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig);
@@ -250,7 +250,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
/* Prevent the module from being removed whilst attached to a trigger */
- __module_get(pf->indio_dev->info->driver_module);
+ __module_get(pf->indio_dev->driver_module);
/* Get irq number */
pf->irq = iio_trigger_get_irq(trig);
@@ -265,7 +265,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
goto out_put_irq;
/* Enable trigger in driver */
- if (trig->ops->set_trigger_state && notinuse) {
+ if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret < 0)
goto out_free_irq;
@@ -286,7 +286,7 @@ out_free_irq:
out_put_irq:
iio_trigger_put_irq(trig, pf->irq);
out_put_module:
- module_put(pf->indio_dev->info->driver_module);
+ module_put(pf->indio_dev->driver_module);
return ret;
}
@@ -298,7 +298,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
= (bitmap_weight(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER)
== 1);
- if (trig->ops->set_trigger_state && no_other_users) {
+ if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
return ret;
@@ -307,7 +307,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
trig->attached_own_device = false;
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
- module_put(pf->indio_dev->info->driver_module);
+ module_put(pf->indio_dev->driver_module);
return ret;
}
@@ -428,7 +428,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
goto out_trigger_put;
}
- if (trig && trig->ops->validate_device) {
+ if (trig && trig->ops && trig->ops->validate_device) {
ret = trig->ops->validate_device(trig, indio_dev);
if (ret)
goto out_trigger_put;
@@ -663,9 +663,10 @@ static void devm_iio_trigger_unreg(struct device *dev, void *res)
}
/**
- * devm_iio_trigger_register - Resource-managed iio_trigger_register()
+ * __devm_iio_trigger_register - Resource-managed iio_trigger_register()
* @dev: device this trigger was allocated for
* @trig_info: trigger to register
+ * @this_mod: module registering the trigger
*
* Managed iio_trigger_register(). The IIO trigger registered with this
* function is automatically unregistered on driver detach. This function
@@ -678,7 +679,9 @@ static void devm_iio_trigger_unreg(struct device *dev, void *res)
* RETURNS:
* 0 on success, negative error number on failure.
*/
-int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
+int __devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info,
+ struct module *this_mod)
{
struct iio_trigger **ptr;
int ret;
@@ -688,7 +691,7 @@ int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
return -ENOMEM;
*ptr = trig_info;
- ret = iio_trigger_register(trig_info);
+ ret = __iio_trigger_register(trig_info, this_mod);
if (!ret)
devres_add(dev, ptr);
else
@@ -696,7 +699,7 @@ int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
return ret;
}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
+EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
/**
* devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index f0b47c501f4e..c35e2f8df339 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -171,7 +171,6 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info acpi_als_info = {
- .driver_module = THIS_MODULE,
.read_raw = acpi_als_read_raw,
};
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 0113fc843a81..e45bb6a277c2 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -245,7 +245,6 @@ static const struct iio_info adjd_s311_info = {
.read_raw = adjd_s311_read_raw,
.write_raw = adjd_s311_write_raw,
.update_scan_mode = adjd_s311_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int adjd_s311_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index 6aac6513fd41..66623facea9a 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -168,7 +168,6 @@ static int al3320a_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info al3320a_info = {
- .driver_module = THIS_MODULE,
.read_raw = al3320a_read_raw,
.write_raw = al3320a_write_raw,
.attrs = &al3320a_attribute_group,
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 05eacd1ee40f..5c15736fb93e 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -337,12 +337,10 @@ static int apds9300_write_interrupt_config(struct iio_dev *indio_dev,
}
static const struct iio_info apds9300_info_no_irq = {
- .driver_module = THIS_MODULE,
.read_raw = apds9300_read_raw,
};
static const struct iio_info apds9300_info = {
- .driver_module = THIS_MODULE,
.read_raw = apds9300_read_raw,
.read_event_value = apds9300_read_thresh,
.write_event_value = apds9300_write_thresh,
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 518a47e9377b..a8fa00e31c39 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -744,7 +744,6 @@ static int apds9960_write_event_config(struct iio_dev *indio_dev,
}
static const struct iio_info apds9960_info = {
- .driver_module = THIS_MODULE,
.attrs = &apds9960_attribute_group,
.read_raw = apds9960_read_raw,
.write_raw = apds9960_write_raw,
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 6c61187e630f..a814828e69f5 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -217,7 +217,6 @@ static const struct attribute_group bh1750_attribute_group = {
};
static const struct iio_info bh1750_info = {
- .driver_module = THIS_MODULE,
.attrs = &bh1750_attribute_group,
.read_raw = bh1750_read_raw,
.write_raw = bh1750_write_raw,
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index b54dcba05a82..036f3bbe323c 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -128,7 +128,6 @@ static int bh1780_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info bh1780_info = {
- .driver_module = THIS_MODULE,
.read_raw = bh1780_read_raw,
.debugfs_reg_access = bh1780_debugfs_reg_access,
};
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index d6fd0dace74f..aebf7dd071af 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -292,7 +292,6 @@ static const struct attribute_group cm32181_attribute_group = {
};
static const struct iio_info cm32181_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cm32181_read_raw,
.write_raw = &cm32181_write_raw,
.attrs = &cm32181_attribute_group,
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 263e97235ea0..c639cf276ee6 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -322,7 +322,6 @@ static const struct attribute_group cm3232_attribute_group = {
};
static const struct iio_info cm3232_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cm3232_read_raw,
.write_raw = &cm3232_write_raw,
.attrs = &cm3232_attribute_group,
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index d823c112d54b..83b08b6dc60f 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -211,7 +211,6 @@ static int cm3323_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info cm3323_info = {
- .driver_module = THIS_MODULE,
.read_raw = cm3323_read_raw,
.write_raw = cm3323_write_raw,
.attrs = &cm3323_attribute_group,
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 980624e9ffb5..e454bc6a33c6 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -126,7 +126,6 @@ static int cm3605_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info cm3605_info = {
- .driver_module = THIS_MODULE,
.read_raw = cm3605_read_raw,
};
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 9d66e89c57ef..1dd8ed0121b3 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -612,7 +612,6 @@ static const struct attribute_group cm36651_attribute_group = {
};
static const struct iio_info cm36651_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cm36651_read_raw,
.write_raw = &cm36651_write_raw,
.read_event_value = &cm36651_read_prox_thresh,
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index 721722376fd0..b2a46b390d5c 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -175,7 +175,6 @@ static int cros_ec_light_prox_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_light_prox_info = {
.read_raw = &cros_ec_light_prox_read,
.write_raw = &cros_ec_light_prox_write,
- .driver_module = THIS_MODULE,
};
static int cros_ec_light_prox_probe(struct platform_device *pdev)
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 6ada9149f142..44b13fbcd093 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1384,7 +1384,6 @@ static const struct iio_info gp2ap020a00f_info = {
.read_event_config = &gp2ap020a00f_read_event_config,
.write_event_value = &gp2ap020a00f_write_event_val,
.write_event_config = &gp2ap020a00f_write_event_config,
- .driver_module = THIS_MODULE,
};
static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
@@ -1481,7 +1480,6 @@ static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = {
};
static const struct iio_trigger_ops gp2ap020a00f_trigger_ops = {
- .owner = THIS_MODULE,
};
static int gp2ap020a00f_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 059d964772c7..befd693a4a31 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -177,7 +177,6 @@ static int als_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info als_info = {
- .driver_module = THIS_MODULE,
.read_raw = &als_read_raw,
.write_raw = &als_write_raw,
};
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 73fced8a63b7..45107f7537b5 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -156,7 +156,6 @@ static int prox_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info prox_info = {
- .driver_module = THIS_MODULE,
.read_raw = &prox_read_raw,
.write_raw = &prox_write_raw,
};
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index 61f5924b472d..b45400f8fef4 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -624,14 +624,12 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
static const struct iio_info isl29018_info = {
.attrs = &isl29018_group,
- .driver_module = THIS_MODULE,
.read_raw = isl29018_read_raw,
.write_raw = isl29018_write_raw,
};
static const struct iio_info isl29023_info = {
.attrs = &isl29023_group,
- .driver_module = THIS_MODULE,
.read_raw = isl29018_read_raw,
.write_raw = isl29018_write_raw,
};
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 3d09c1fc4dad..f9912ab4f65c 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -536,7 +536,6 @@ static const struct iio_chan_spec isl29028_channels[] = {
static const struct iio_info isl29028_info = {
.attrs = &isl29108_group,
- .driver_module = THIS_MODULE,
.read_raw = isl29028_read_raw,
.write_raw = isl29028_write_raw,
};
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index 1d2c0c8a1d4f..ed38edcd5efe 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -214,7 +214,6 @@ static const struct iio_info isl29125_info = {
.read_raw = isl29125_read_raw,
.write_raw = isl29125_write_raw,
.attrs = &isl29125_attribute_group,
- .driver_module = THIS_MODULE,
};
static int isl29125_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index e8a8931b4f50..811505d925b3 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -271,7 +271,6 @@ static const struct iio_chan_spec jsa1212_channels[] = {
};
static const struct iio_info jsa1212_info = {
- .driver_module = THIS_MODULE,
.read_raw = &jsa1212_read_raw,
};
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index 0443fd2e8757..36208a3652e9 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -827,7 +827,6 @@ static int lm3533_als_disable(struct lm3533_als *als)
static const struct iio_info lm3533_als_info = {
.attrs = &lm3533_als_attribute_group,
.event_attrs = &lm3533_als_event_attribute_group,
- .driver_module = THIS_MODULE,
.read_raw = &lm3533_als_read_raw,
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 67838edd8b37..830a2d45aa4d 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1158,7 +1158,6 @@ static const struct iio_info ltr501_info_no_irq = {
.read_raw = ltr501_read_raw,
.write_raw = ltr501_write_raw,
.attrs = &ltr501_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ltr501_info = {
@@ -1169,14 +1168,12 @@ static const struct iio_info ltr501_info = {
.write_event_value = &ltr501_write_event,
.read_event_config = &ltr501_read_event_config,
.write_event_config = &ltr501_write_event_config,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ltr301_info_no_irq = {
.read_raw = ltr501_read_raw,
.write_raw = ltr501_write_raw,
.attrs = &ltr301_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ltr301_info = {
@@ -1187,7 +1184,6 @@ static const struct iio_info ltr301_info = {
.write_event_value = &ltr501_write_event,
.read_event_config = &ltr501_read_event_config,
.write_event_config = &ltr501_write_event_config,
- .driver_module = THIS_MODULE,
};
static struct ltr501_chip_info ltr501_chip_info_tbl[] = {
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 81bd8e8da4a6..bcdb0eb9e537 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -402,7 +402,6 @@ static const struct attribute_group max44000_attribute_group = {
};
static const struct iio_info max44000_info = {
- .driver_module = THIS_MODULE,
.read_raw = max44000_read_raw,
.write_raw = max44000_write_raw,
.write_raw_get_fmt = max44000_write_raw_get_fmt,
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index b91ebc3483ce..54d88b60e303 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -585,7 +585,6 @@ err:
}
static const struct iio_info opt3001_info = {
- .driver_module = THIS_MODULE,
.attrs = &opt3001_attribute_group,
.read_raw = opt3001_read_raw,
.write_raw = opt3001_write_raw,
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 76a9e12b46bc..30ea1a088dd9 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -306,7 +306,6 @@ static int pa12203001_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info pa12203001_info = {
- .driver_module = THIS_MODULE,
.read_raw = pa12203001_read_raw,
.write_raw = pa12203001_write_raw,
.attrs = &pa12203001_attr_group,
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index a6efa12613a2..ffe9ce798ea2 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -538,7 +538,6 @@ static int rpr0521_pxs_drdy_set_state(struct iio_trigger *trigger,
static const struct iio_trigger_ops rpr0521_trigger_ops = {
.set_trigger_state = rpr0521_pxs_drdy_set_state,
- .owner = THIS_MODULE,
};
@@ -830,7 +829,6 @@ static int rpr0521_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info rpr0521_info = {
- .driver_module = THIS_MODULE,
.read_raw = rpr0521_read_raw,
.write_raw = rpr0521_write_raw,
.attrs = &rpr0521_attribute_group,
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 096034c126a4..76f16f9c7616 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -989,14 +989,12 @@ static const struct attribute_group si114x_attribute_group = {
static const struct iio_info si1132_info = {
.read_raw = si1145_read_raw,
.write_raw = si1145_write_raw,
- .driver_module = THIS_MODULE,
.attrs = &si1132_attribute_group,
};
static const struct iio_info si114x_info = {
.read_raw = si1145_read_raw,
.write_raw = si1145_write_raw,
- .driver_module = THIS_MODULE,
.attrs = &si114x_attribute_group,
};
@@ -1237,7 +1235,6 @@ disable:
}
static const struct iio_trigger_ops si1145_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = si1145_trigger_set_state,
};
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 45cf8b0a4363..6e2a169da950 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -409,7 +409,6 @@ static int stk3310_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stk3310_info = {
- .driver_module = THIS_MODULE,
.read_raw = stk3310_read_raw,
.write_raw = stk3310_write_raw,
.attrs = &stk3310_attribute_group,
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index a795afb7667b..205e5659ce6b 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -241,7 +241,6 @@ static const struct iio_info tcs3414_info = {
.read_raw = tcs3414_read_raw,
.write_raw = tcs3414_write_raw,
.attrs = &tcs3414_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tcs3414_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 09e6ca5e332e..e7923b514d7a 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -13,7 +13,7 @@
*
* Datasheet: http://ams.com/eng/content/download/319364/1117183/file/TCS3472_Datasheet_EN_v2.pdf
*
- * TODO: interrupt support, thresholds, wait time
+ * TODO: wait time
*/
#include <linux/module.h>
@@ -23,6 +23,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
@@ -31,12 +32,15 @@
#define TCS3472_COMMAND BIT(7)
#define TCS3472_AUTO_INCR BIT(5)
+#define TCS3472_SPECIAL_FUNC (BIT(5) | BIT(6))
+
+#define TCS3472_INTR_CLEAR (TCS3472_COMMAND | TCS3472_SPECIAL_FUNC | 0x06)
#define TCS3472_ENABLE (TCS3472_COMMAND | 0x00)
#define TCS3472_ATIME (TCS3472_COMMAND | 0x01)
#define TCS3472_WTIME (TCS3472_COMMAND | 0x03)
-#define TCS3472_AILT (TCS3472_COMMAND | 0x04)
-#define TCS3472_AIHT (TCS3472_COMMAND | 0x06)
+#define TCS3472_AILT (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x04)
+#define TCS3472_AIHT (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x06)
#define TCS3472_PERS (TCS3472_COMMAND | 0x0c)
#define TCS3472_CONFIG (TCS3472_COMMAND | 0x0d)
#define TCS3472_CONTROL (TCS3472_COMMAND | 0x0f)
@@ -47,19 +51,42 @@
#define TCS3472_GDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x18)
#define TCS3472_BDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x1a)
+#define TCS3472_STATUS_AINT BIT(4)
#define TCS3472_STATUS_AVALID BIT(0)
+#define TCS3472_ENABLE_AIEN BIT(4)
#define TCS3472_ENABLE_AEN BIT(1)
#define TCS3472_ENABLE_PON BIT(0)
#define TCS3472_CONTROL_AGAIN_MASK (BIT(0) | BIT(1))
struct tcs3472_data {
struct i2c_client *client;
+ struct mutex lock;
+ u16 low_thresh;
+ u16 high_thresh;
u8 enable;
u8 control;
u8 atime;
+ u8 apers;
u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
};
+static const struct iio_event_spec tcs3472_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
#define TCS3472_CHANNEL(_color, _si, _addr) { \
.type = IIO_INTENSITY, \
.modified = 1, \
@@ -75,6 +102,8 @@ struct tcs3472_data {
.storagebits = 16, \
.endianness = IIO_CPU, \
}, \
+ .event_spec = _si ? NULL : tcs3472_events, \
+ .num_event_specs = _si ? 0 : ARRAY_SIZE(tcs3472_events), \
}
static const int tcs3472_agains[] = { 1, 4, 16, 60 };
@@ -182,6 +211,166 @@ static int tcs3472_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+/*
+ * Translation from APERS field value to the number of consecutive out-of-range
+ * clear channel values before an interrupt is generated
+ */
+static const int tcs3472_intr_pers[] = {
+ 0, 1, 2, 3, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60
+};
+
+static int tcs3472_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int period;
+
+ mutex_lock(&data->lock);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = (dir == IIO_EV_DIR_RISING) ?
+ data->high_thresh : data->low_thresh;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ period = (256 - data->atime) * 2400 *
+ tcs3472_intr_pers[data->apers];
+ *val = period / USEC_PER_SEC;
+ *val2 = period % USEC_PER_SEC;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int tcs3472_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 command;
+ int period;
+ int i;
+
+ mutex_lock(&data->lock);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ command = TCS3472_AIHT;
+ break;
+ case IIO_EV_DIR_FALLING:
+ command = TCS3472_AILT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+ ret = i2c_smbus_write_word_data(data->client, command, val);
+ if (ret)
+ goto error;
+
+ if (dir == IIO_EV_DIR_RISING)
+ data->high_thresh = val;
+ else
+ data->low_thresh = val;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ period = val * USEC_PER_SEC + val2;
+ for (i = 1; i < ARRAY_SIZE(tcs3472_intr_pers) - 1; i++) {
+ if (period <= (256 - data->atime) * 2400 *
+ tcs3472_intr_pers[i])
+ break;
+ }
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_PERS, i);
+ if (ret)
+ goto error;
+
+ data->apers = i;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+error:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int tcs3472_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = !!(data->enable & TCS3472_ENABLE_AIEN);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int tcs3472_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ u8 enable_old;
+
+ mutex_lock(&data->lock);
+
+ enable_old = data->enable;
+
+ if (state)
+ data->enable |= TCS3472_ENABLE_AIEN;
+ else
+ data->enable &= ~TCS3472_ENABLE_AIEN;
+
+ if (enable_old != data->enable) {
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable);
+ if (ret)
+ data->enable = enable_old;
+ }
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static irqreturn_t tcs3472_event_handler(int irq, void *priv)
+{
+ struct iio_dev *indio_dev = priv;
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_STATUS);
+ if (ret >= 0 && (ret & TCS3472_STATUS_AINT)) {
+ iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+
+ i2c_smbus_read_byte_data(data->client, TCS3472_INTR_CLEAR);
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -245,8 +434,11 @@ static const struct attribute_group tcs3472_attribute_group = {
static const struct iio_info tcs3472_info = {
.read_raw = tcs3472_read_raw,
.write_raw = tcs3472_write_raw,
+ .read_event_value = tcs3472_read_event,
+ .write_event_value = tcs3472_write_event,
+ .read_event_config = tcs3472_read_event_config,
+ .write_event_config = tcs3472_write_event_config,
.attrs = &tcs3472_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tcs3472_probe(struct i2c_client *client,
@@ -263,6 +455,7 @@ static int tcs3472_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3472_info;
@@ -292,12 +485,29 @@ static int tcs3472_probe(struct i2c_client *client,
return ret;
data->atime = ret;
+ ret = i2c_smbus_read_word_data(data->client, TCS3472_AILT);
+ if (ret < 0)
+ return ret;
+ data->low_thresh = ret;
+
+ ret = i2c_smbus_read_word_data(data->client, TCS3472_AIHT);
+ if (ret < 0)
+ return ret;
+ data->high_thresh = ret;
+
+ data->apers = 1;
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_PERS,
+ data->apers);
+ if (ret < 0)
+ return ret;
+
ret = i2c_smbus_read_byte_data(data->client, TCS3472_ENABLE);
if (ret < 0)
return ret;
/* enable device */
data->enable = ret | TCS3472_ENABLE_PON | TCS3472_ENABLE_AEN;
+ data->enable &= ~TCS3472_ENABLE_AIEN;
ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
data->enable);
if (ret < 0)
@@ -308,12 +518,24 @@ static int tcs3472_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq, NULL,
+ tcs3472_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED |
+ IRQF_ONESHOT,
+ client->name, indio_dev);
+ if (ret)
+ goto buffer_cleanup;
+ }
+
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto buffer_cleanup;
+ goto free_irq;
return 0;
+free_irq:
+ free_irq(client->irq, indio_dev);
buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
return ret;
@@ -321,8 +543,19 @@ buffer_cleanup:
static int tcs3472_powerdown(struct tcs3472_data *data)
{
- return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
- data->enable & ~(TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+ int ret;
+ u8 enable_mask = TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON;
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable & ~enable_mask);
+ if (!ret)
+ data->enable &= ~enable_mask;
+
+ mutex_unlock(&data->lock);
+
+ return ret;
}
static int tcs3472_remove(struct i2c_client *client)
@@ -330,6 +563,7 @@ static int tcs3472_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
+ free_irq(client->irq, indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
tcs3472_powerdown(iio_priv(indio_dev));
@@ -348,8 +582,19 @@ static int tcs3472_resume(struct device *dev)
{
struct tcs3472_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
- return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
- data->enable | (TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+ int ret;
+ u8 enable_mask = TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON;
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable | enable_mask);
+ if (!ret)
+ data->enable |= enable_mask;
+
+ mutex_unlock(&data->lock);
+
+ return ret;
}
#endif
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 7599693f7fe9..6bbb0b1e6032 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -697,13 +697,11 @@ static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
}
static const struct iio_info tsl2563_info_no_irq = {
- .driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
};
static const struct iio_info tsl2563_info = {
- .driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
.read_event_value = &tsl2563_read_thresh,
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index fb711ed4862e..f2e50edaa242 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -804,7 +804,6 @@ static int tsl2583_write_raw(struct iio_dev *indio_dev,
static const struct iio_info tsl2583_info = {
.attrs = &tsl2583_attribute_group,
- .driver_module = THIS_MODULE,
.read_raw = tsl2583_read_raw,
.write_raw = tsl2583_write_raw,
};
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index cf94ec72b181..06171cb76e23 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -144,7 +144,6 @@ static const struct iio_info tsl4531_info = {
.read_raw = tsl4531_read_raw,
.write_raw = tsl4531_write_raw,
.attrs = &tsl4531_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tsl4531_check_id(struct i2c_client *client)
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index d571ad7291ed..68e52943879a 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -714,7 +714,6 @@ err:
}
static const struct iio_info us5182d_info = {
- .driver_module = THIS_MODULE,
.read_raw = us5182d_read_raw,
.write_raw = us5182d_write_raw,
.attrs = &us5182d_attr_group,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 360b6e98137a..c599a90506ad 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -155,7 +155,6 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
static const struct iio_info vcnl4000_info = {
.read_raw = vcnl4000_read_raw,
- .driver_module = THIS_MODULE,
};
static int vcnl4000_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
index bc1c4cb782cd..f4bf3c5b5eda 100644
--- a/drivers/iio/light/veml6070.c
+++ b/drivers/iio/light/veml6070.c
@@ -136,7 +136,6 @@ static int veml6070_read_raw(struct iio_dev *indio_dev,
static const struct iio_info veml6070_info = {
.read_raw = veml6070_read_raw,
- .driver_module = THIS_MODULE,
};
static int veml6070_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index 6e25b724d941..192c77ef3608 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/delay.h>
+#include <linux/util_macros.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -86,6 +87,8 @@
struct vl6180_data {
struct i2c_client *client;
struct mutex lock;
+ unsigned int als_gain_milli;
+ unsigned int als_it_ms;
};
enum { VL6180_ALS, VL6180_RANGE, VL6180_PROX };
@@ -275,19 +278,17 @@ static const struct iio_chan_spec vl6180_channels[] = {
};
/*
- * Columns 3 & 4 represent the same value in decimal and hex notations.
- * Kept in order to avoid the datatype conversion while reading the
- * hardware_gain.
+ * Available Ambient Light Sensor gain settings, 1/1000th, and
+ * corresponding setting for the VL6180_ALS_GAIN register
*/
-static const int vl6180_als_gain[8][4] = {
- { 1, 0, 70, VL6180_ALS_GAIN_1 },
- { 1, 250000, 69, VL6180_ALS_GAIN_1_25 },
- { 1, 670000, 68, VL6180_ALS_GAIN_1_67 },
- { 2, 500000, 67, VL6180_ALS_GAIN_2_5 },
- { 5, 0, 66, VL6180_ALS_GAIN_5 },
- { 10, 0, 65, VL6180_ALS_GAIN_10 },
- { 20, 0, 64, VL6180_ALS_GAIN_20 },
- { 40, 0, 71, VL6180_ALS_GAIN_40 }
+static const int vl6180_als_gain_tab[8] = {
+ 1000, 1250, 1670, 2500, 5000, 10000, 20000, 40000
+};
+static const u8 vl6180_als_gain_tab_bits[8] = {
+ VL6180_ALS_GAIN_1, VL6180_ALS_GAIN_1_25,
+ VL6180_ALS_GAIN_1_67, VL6180_ALS_GAIN_2_5,
+ VL6180_ALS_GAIN_5, VL6180_ALS_GAIN_10,
+ VL6180_ALS_GAIN_20, VL6180_ALS_GAIN_40
};
static int vl6180_read_raw(struct iio_dev *indio_dev,
@@ -295,7 +296,7 @@ static int vl6180_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct vl6180_data *data = iio_priv(indio_dev);
- int ret, i;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -306,19 +307,20 @@ static int vl6180_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
- ret = vl6180_read_word(data->client, VL6180_ALS_IT);
- if (ret < 0)
- return ret;
- *val = 0; /* 1 count = 1ms (0 = 1ms) */
- *val2 = (ret + 1) * 1000; /* convert to seconds */
+ *val = data->als_it_ms;
+ *val2 = 1000;
+
+ return IIO_VAL_FRACTIONAL;
- return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_LIGHT:
- *val = 0; /* one ALS count is 0.32 Lux */
- *val2 = 320000;
- break;
+ /* one ALS count is 0.32 Lux @ gain 1, IT 100 ms */
+ *val = 32000; /* 0.32 * 1000 * 100 */
+ *val2 = data->als_gain_milli * data->als_it_ms;
+
+ return IIO_VAL_FRACTIONAL;
+
case IIO_DISTANCE:
*val = 0; /* sensor reports mm, scale to meter */
*val2 = 1000;
@@ -329,17 +331,11 @@ static int vl6180_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = vl6180_read_byte(data->client, VL6180_ALS_GAIN);
- if (ret < 0)
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(vl6180_als_gain); i++) {
- if (ret == vl6180_als_gain[i][2]) {
- *val = vl6180_als_gain[i][0];
- *val2 = vl6180_als_gain[i][1];
- }
- }
+ *val = data->als_gain_milli;
+ *val2 = 1000;
+
+ return IIO_VAL_FRACTIONAL;
- return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -365,37 +361,53 @@ static int vl6180_hold(struct vl6180_data *data, bool hold)
static int vl6180_set_als_gain(struct vl6180_data *data, int val, int val2)
{
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(vl6180_als_gain); i++) {
- if (val == vl6180_als_gain[i][0] &&
- val2 == vl6180_als_gain[i][1]) {
- mutex_lock(&data->lock);
- ret = vl6180_hold(data, true);
- if (ret < 0)
- goto fail;
- ret = vl6180_write_byte(data->client, VL6180_ALS_GAIN,
- vl6180_als_gain[i][3]);
-fail:
- vl6180_hold(data, false);
- mutex_unlock(&data->lock);
- return ret;
- }
- }
+ int i, ret, gain;
- return -EINVAL;
+ if (val < 1 || val > 40)
+ return -EINVAL;
+
+ gain = (val * 1000000 + val2) / 1000;
+ if (gain < 1 || gain > 40000)
+ return -EINVAL;
+
+ i = find_closest(gain, vl6180_als_gain_tab,
+ ARRAY_SIZE(vl6180_als_gain_tab));
+
+ mutex_lock(&data->lock);
+ ret = vl6180_hold(data, true);
+ if (ret < 0)
+ goto fail;
+
+ ret = vl6180_write_byte(data->client, VL6180_ALS_GAIN,
+ vl6180_als_gain_tab_bits[i]);
+
+ if (ret >= 0)
+ data->als_gain_milli = vl6180_als_gain_tab[i];
+
+fail:
+ vl6180_hold(data, false);
+ mutex_unlock(&data->lock);
+ return ret;
}
-static int vl6180_set_it(struct vl6180_data *data, int val2)
+static int vl6180_set_it(struct vl6180_data *data, int val, int val2)
{
- int ret;
+ int ret, it_ms;
+
+ it_ms = (val2 + 500) / 1000; /* round to ms */
+ if (val != 0 || it_ms < 1 || it_ms > 512)
+ return -EINVAL;
mutex_lock(&data->lock);
ret = vl6180_hold(data, true);
if (ret < 0)
goto fail;
- ret = vl6180_write_word(data->client, VL6180_ALS_IT,
- (val2 - 500) / 1000); /* write value in ms */
+
+ ret = vl6180_write_word(data->client, VL6180_ALS_IT, it_ms - 1);
+
+ if (ret >= 0)
+ data->als_it_ms = it_ms;
+
fail:
vl6180_hold(data, false);
mutex_unlock(&data->lock);
@@ -411,10 +423,8 @@ static int vl6180_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- if (val != 0 || val2 < 500 || val2 >= 512500)
- return -EINVAL;
+ return vl6180_set_it(data, val, val2);
- return vl6180_set_it(data, val2);
case IIO_CHAN_INFO_HARDWAREGAIN:
if (chan->type != IIO_LIGHT)
return -EINVAL;
@@ -429,7 +439,6 @@ static const struct iio_info vl6180_info = {
.read_raw = vl6180_read_raw,
.write_raw = vl6180_write_raw,
.attrs = &vl6180_attribute_group,
- .driver_module = THIS_MODULE,
};
static int vl6180_init(struct vl6180_data *data)
@@ -468,11 +477,13 @@ static int vl6180_init(struct vl6180_data *data)
return ret;
/* ALS integration time: 100ms */
+ data->als_it_ms = 100;
ret = vl6180_write_word(client, VL6180_ALS_IT, VL6180_ALS_IT_100);
if (ret < 0)
return ret;
/* ALS gain: 1 */
+ data->als_gain_milli = 1000;
ret = vl6180_write_byte(client, VL6180_ALS_GAIN, VL6180_ALS_GAIN_1);
if (ret < 0)
return ret;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 0bff76e96950..93be1f4c0f27 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -658,7 +658,6 @@ static const unsigned long ak8974_scan_masks[] = { 0x7, 0 };
static const struct iio_info ak8974_info = {
.read_raw = &ak8974_read_raw,
- .driver_module = THIS_MODULE,
};
static bool ak8974_writeable_reg(struct device *dev, unsigned int reg)
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 4ff883942f7b..c09329069d0a 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -781,7 +781,6 @@ static const unsigned long ak8975_scan_masks[] = { 0x7, 0 };
static const struct iio_info ak8975_info = {
.read_raw = &ak8975_read_raw,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_ACPI
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d104fb8d9379..d91cb845e3d6 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -651,7 +651,6 @@ static const struct iio_info bmc150_magn_info = {
.attrs = &bmc150_magn_attrs_group,
.read_raw = bmc150_magn_read_raw,
.write_raw = bmc150_magn_write_raw,
- .driver_module = THIS_MODULE,
};
static const unsigned long bmc150_magn_scan_masks[] = {
@@ -811,7 +810,6 @@ err_unlock:
static const struct iio_trigger_ops bmc150_magn_trigger_ops = {
.set_trigger_state = bmc150_magn_data_rdy_trigger_set_state,
.try_reenable = bmc150_magn_trig_try_reen,
- .owner = THIS_MODULE,
};
static int bmc150_magn_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 0e791b02ed4a..a1fd9d591818 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -282,7 +282,6 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info magn_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &magn_3d_read_raw,
.write_raw = &magn_3d_write_raw,
};
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index ba3e2a374ee5..ada142fb7aa3 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -597,7 +597,6 @@ static const struct iio_info hmc5843_info = {
.read_raw = &hmc5843_read_raw,
.write_raw = &hmc5843_write_raw,
.write_raw_get_fmt = &hmc5843_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
};
static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index dad8d57f7402..b34ace76d31b 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -333,7 +333,6 @@ static const struct iio_info mag3110_info = {
.attrs = &mag3110_group,
.read_raw = &mag3110_read_raw,
.write_raw = &mag3110_write_raw,
- .driver_module = THIS_MODULE,
};
static const unsigned long mag3110_scan_masks[] = {0x7, 0xf, 0};
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 176e14a61558..6b640c6338c9 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -418,7 +418,6 @@ static int mmc35240_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mmc35240_info = {
- .driver_module = THIS_MODULE,
.read_raw = mmc35240_read_raw,
.write_raw = mmc35240_write_raw,
.attrs = &mmc35240_attribute_group,
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 08aafba4481c..72f6d1335a04 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -317,7 +317,14 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.drdy_irq = {
/* drdy line is routed drdy pin */
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x22,
+ .value = BIT(2),
},
.multi_read_bit = true,
.bootime = 2,
@@ -359,9 +366,14 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.mask = 0x10,
},
.drdy_irq = {
- .addr = 0x62,
- .mask_int1 = 0x01,
- .addr_stat_drdy = 0x67,
+ .int1 = {
+ .addr = 0x62,
+ .mask = 0x01,
+ },
+ .stat_drdy = {
+ .addr = 0x67,
+ .mask = 0x07,
+ },
},
.multi_read_bit = false,
.bootime = 2,
@@ -438,7 +450,6 @@ static const struct attribute_group st_magn_attribute_group = {
};
static const struct iio_info magn_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_magn_attribute_group,
.read_raw = &st_magn_read_raw,
.write_raw = &st_magn_write_raw,
@@ -447,7 +458,6 @@ static const struct iio_info magn_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_magn_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index 37ba007f8dca..60621ccd67e4 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -173,7 +173,6 @@ static const struct iio_info mux_info = {
.read_raw = mux_read_raw,
.read_avail = mux_read_avail,
.write_raw = mux_write_raw,
- .driver_module = THIS_MODULE,
};
static ssize_t mux_read_ext_info(struct iio_dev *indio_dev, uintptr_t private,
@@ -285,6 +284,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache = devm_kzalloc(dev,
sizeof(*child->ext_info_cache) *
num_ext_info, GFP_KERNEL);
+ if (!child->ext_info_cache)
+ return -ENOMEM;
+
for (i = 0; i < num_ext_info; ++i) {
child->ext_info_cache[i].size = -1;
@@ -309,6 +311,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1,
GFP_KERNEL);
+ if (!child->ext_info_cache[i].data)
+ return -ENOMEM;
+
child->ext_info_cache[i].data[ret] = 0;
child->ext_info_cache[i].size = ret;
}
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index fd1b3696ee42..1e5451d1ff88 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -186,7 +186,6 @@ static int incl_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info incl_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &incl_3d_read_raw,
.write_raw = &incl_3d_write_raw,
};
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 98fe0c5df380..a69db2002414 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -138,7 +138,6 @@ static int dev_rot_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dev_rot_info = {
- .driver_module = THIS_MODULE,
.read_raw_multi = &dev_rot_read_raw,
.write_raw = &dev_rot_write_raw,
};
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
index fb9e2a337dc2..9b0ff4ab2f9c 100644
--- a/drivers/iio/potentiometer/ds1803.c
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -110,7 +110,6 @@ static int ds1803_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ds1803_info = {
.read_raw = ds1803_read_raw,
.write_raw = ds1803_write_raw,
- .driver_module = THIS_MODULE,
};
static int ds1803_probe(struct i2c_client *client,
diff --git a/drivers/iio/potentiometer/max5481.c b/drivers/iio/potentiometer/max5481.c
index 926554991244..ffe2761333a2 100644
--- a/drivers/iio/potentiometer/max5481.c
+++ b/drivers/iio/potentiometer/max5481.c
@@ -119,7 +119,6 @@ static int max5481_write_raw(struct iio_dev *indio_dev,
static const struct iio_info max5481_info = {
.read_raw = max5481_read_raw,
.write_raw = max5481_write_raw,
- .driver_module = THIS_MODULE,
};
#if defined(CONFIG_OF)
@@ -207,7 +206,6 @@ MODULE_DEVICE_TABLE(acpi, max5481_acpi_match);
static struct spi_driver max5481_driver = {
.driver = {
.name = "max5481",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(max5481_match),
.acpi_match_table = ACPI_PTR(max5481_acpi_match),
},
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 6c50939a2e83..5042d3e09b12 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -83,7 +83,6 @@ static int max5487_write_raw(struct iio_dev *indio_dev,
static const struct iio_info max5487_info = {
.read_raw = max5487_read_raw,
.write_raw = max5487_write_raw,
- .driver_module = THIS_MODULE,
};
static int max5487_spi_probe(struct spi_device *spi)
@@ -147,7 +146,6 @@ MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
static struct spi_driver max5487_driver = {
.driver = {
.name = "max5487",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(max5487_acpi_match),
},
.id_table = max5487_id,
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
index 4e7e2c6c522c..b3e30db246cc 100644
--- a/drivers/iio/potentiometer/mcp4131.c
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -237,7 +237,6 @@ static int mcp4131_write_raw(struct iio_dev *indio_dev,
static const struct iio_info mcp4131_info = {
.read_raw = mcp4131_read_raw,
.write_raw = mcp4131_write_raw,
- .driver_module = THIS_MODULE,
};
static int mcp4131_probe(struct spi_device *spi)
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 314353d7ab59..114ab876fcc6 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -207,7 +207,6 @@ static const struct iio_info mcp4531_info = {
.read_raw = mcp4531_read_raw,
.read_avail = mcp4531_read_avail,
.write_raw = mcp4531_write_raw,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_OF
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 7b6b54531ea2..93f9d4a8c9aa 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -106,7 +106,6 @@ static int tpl0102_write_raw(struct iio_dev *indio_dev,
static const struct iio_info tpl0102_info = {
.read_raw = tpl0102_read_raw,
.write_raw = tpl0102_write_raw,
- .driver_module = THIS_MODULE,
};
static int tpl0102_probe(struct i2c_client *client,
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index afa8de3418d0..007710991f15 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -207,7 +207,6 @@ static int lmp91000_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info lmp91000_info = {
- .driver_module = THIS_MODULE,
.read_raw = lmp91000_read_raw,
};
@@ -283,7 +282,6 @@ static int lmp91000_buffer_cb(const void *val, void *private)
}
static const struct iio_trigger_ops lmp91000_trigger_ops = {
- .owner = THIS_MODULE,
};
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
index 43bdd0b9155f..46a220c70d6a 100644
--- a/drivers/iio/pressure/abp060mg.c
+++ b/drivers/iio/pressure/abp060mg.c
@@ -168,7 +168,6 @@ static int abp060mg_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info abp060mg_info = {
- .driver_module = THIS_MODULE,
.read_raw = abp060mg_read_raw,
};
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 8f26428804a2..fd1da26a62e4 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -561,7 +561,6 @@ static const struct attribute_group bmp280_attrs_group = {
};
static const struct iio_info bmp280_info = {
- .driver_module = THIS_MODULE,
.read_raw = &bmp280_read_raw,
.write_raw = &bmp280_write_raw,
.attrs = &bmp280_attrs_group,
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 48b2a30f57ae..4599fde4dd25 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -120,14 +120,12 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_baro_info = {
.read_raw = &cros_ec_baro_read,
.write_raw = &cros_ec_baro_write,
- .driver_module = THIS_MODULE,
};
static int cros_ec_baro_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
- struct cros_ec_device *ec_device;
struct iio_dev *indio_dev;
struct cros_ec_baro_state *state;
struct iio_chan_spec *channel;
@@ -137,7 +135,6 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
dev_warn(dev, "No CROS EC device found.\n");
return -EINVAL;
}
- ec_device = ec_dev->ec_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
if (!indio_dev)
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 6848d8c80eff..4c437918f1d2 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -160,7 +160,6 @@ static int press_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info press_info = {
- .driver_module = THIS_MODULE,
.read_raw = &press_read_raw,
.write_raw = &press_write_raw,
};
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
index 8c7b3ec3d84a..406934ea6228 100644
--- a/drivers/iio/pressure/hp03.c
+++ b/drivers/iio/pressure/hp03.c
@@ -208,7 +208,6 @@ static int hp03_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info hp03_info = {
- .driver_module = THIS_MODULE,
.read_raw = &hp03_read_raw,
};
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 12f769e86355..c38c19678cf6 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -351,7 +351,6 @@ static const struct iio_info hp206c_info = {
.attrs = &hp206c_attribute_group,
.read_raw = hp206c_read_raw,
.write_raw = hp206c_write_raw,
- .driver_module = THIS_MODULE,
};
static int hp206c_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index 8f2bce213248..ab4786d0102b 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -144,7 +144,6 @@ static const struct iio_chan_spec mpl115_channels[] = {
static const struct iio_info mpl115_info = {
.read_raw = &mpl115_read_raw,
- .driver_module = THIS_MODULE,
};
int mpl115_probe(struct device *dev, const char *name,
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 619b963714c7..7537547fb7ee 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -218,7 +218,6 @@ static const struct iio_chan_spec mpl3115_channels[] = {
static const struct iio_info mpl3115_info = {
.read_raw = &mpl3115_read_raw,
- .driver_module = THIS_MODULE,
};
static int mpl3115_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 2a77a2f15752..f950cfde5db9 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -384,7 +384,6 @@ static const struct iio_info ms5611_info = {
.read_raw = &ms5611_read_raw,
.write_raw = &ms5611_write_raw,
.attrs = &ms5611_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ms5611_init(struct iio_dev *indio_dev)
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index c413f8a84a63..e2f73e6dc58f 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -124,7 +124,6 @@ static const struct iio_info ms5637_info = {
.read_raw = ms5637_read_raw,
.write_raw = ms5637_write_raw,
.attrs = &ms5637_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ms5637_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 7d995937adba..e67eb0d971bf 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -19,6 +19,8 @@ enum st_press_type {
LPS25H,
LPS331AP,
LPS22HB,
+ LPS33HW,
+ LPS35HW,
ST_PRESS_MAX,
};
@@ -26,6 +28,8 @@ enum st_press_type {
#define LPS25H_PRESS_DEV_NAME "lps25h"
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
#define LPS22HB_PRESS_DEV_NAME "lps22hb"
+#define LPS33HW_PRESS_DEV_NAME "lps33hw"
+#define LPS35HW_PRESS_DEV_NAME "lps35hw"
/**
* struct st_sensors_platform_data - default press platform data
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 34611a8ea2ce..349e5c713c03 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -280,14 +280,28 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask = 0x04,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x04,
- .mask_int2 = 0x20,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x04,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x20,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x20,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -335,8 +349,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.addr = 0x20,
.mask = 0x04,
},
- .drdy_irq = {
- .addr = 0,
+ .sim = {
+ .addr = 0x20,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -388,14 +403,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask = 0x04,
},
.drdy_irq = {
- .addr = 0x23,
- .mask_int1 = 0x01,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x23,
+ .mask = 0x01,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x20,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -410,6 +433,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS22HB_PRESS_DEV_NAME,
+ [1] = LPS33HW_PRESS_DEV_NAME,
+ [2] = LPS35HW_PRESS_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
.num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
@@ -447,14 +472,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask = 0x02,
},
.drdy_irq = {
- .addr = 0x12,
- .mask_int1 = 0x04,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x12,
+ .mask = 0x04,
+ .addr_od = 0x12,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x12,
.mask_ihl = 0x80,
- .addr_od = 0x12,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x10,
+ .value = BIT(0),
},
.multi_read_bit = false,
.bootime = 2,
@@ -547,7 +580,6 @@ static const struct attribute_group st_press_attribute_group = {
};
static const struct iio_info press_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_press_attribute_group,
.read_raw = &st_press_read_raw,
.write_raw = &st_press_write_raw,
@@ -556,7 +588,6 @@ static const struct iio_info press_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_press_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
@@ -605,7 +636,8 @@ int st_press_common_probe(struct iio_dev *indio_dev)
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
/* Some devices don't support a data ready pin. */
- if (!pdata && press_data->sensor_settings->drdy_irq.addr)
+ if (!pdata && (press_data->sensor_settings->drdy_irq.int1.addr ||
+ press_data->sensor_settings->drdy_irq.int2.addr))
pdata = (struct st_sensors_platform_data *)&default_press_pdata;
err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 7f15e927fa2b..fbb59059e942 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -37,6 +37,14 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hb-press",
.data = LPS22HB_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps33hw",
+ .data = LPS33HW_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps35hw",
+ .data = LPS35HW_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -59,6 +67,8 @@ static const struct i2c_device_id st_press_id_table[] = {
{ LPS25H_PRESS_DEV_NAME, LPS25H },
{ LPS331AP_PRESS_DEV_NAME, LPS331AP },
{ LPS22HB_PRESS_DEV_NAME, LPS22HB },
+ { LPS33HW_PRESS_DEV_NAME, LPS33HW },
+ { LPS35HW_PRESS_DEV_NAME, LPS35HW },
{},
};
MODULE_DEVICE_TABLE(i2c, st_press_id_table);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index f5ebd36bb4bf..9a3441b128e7 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -41,6 +41,14 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hb-press",
.data = LPS22HB_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps33hw",
+ .data = LPS33HW_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps35hw",
+ .data = LPS35HW_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -83,6 +91,8 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{ LPS22HB_PRESS_DEV_NAME },
+ { LPS33HW_PRESS_DEV_NAME },
+ { LPS35HW_PRESS_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_press_id_table);
diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c
index 2667e71721f5..92c00f603b1d 100644
--- a/drivers/iio/pressure/t5403.c
+++ b/drivers/iio/pressure/t5403.c
@@ -209,7 +209,6 @@ static const struct iio_info t5403_info = {
.read_raw = &t5403_read_raw,
.write_raw = &t5403_write_raw,
.attrs = &t5403_attribute_group,
- .driver_module = THIS_MODULE,
};
static int t5403_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 91431454eb85..81d8f24eaeb4 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -1390,7 +1390,6 @@ static int zpa2326_set_trigger_state(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops zpa2326_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = zpa2326_set_trigger_state,
};
@@ -1590,7 +1589,6 @@ static const struct iio_chan_spec zpa2326_channels[] = {
};
static const struct iio_info zpa2326_info = {
- .driver_module = THIS_MODULE,
.attrs = &zpa2326_attribute_group,
.read_raw = zpa2326_read_raw,
.write_raw = zpa2326_write_raw,
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index ae070950f920..fcb1c4ba5e41 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -32,6 +32,16 @@ config LIDAR_LITE_V2
To compile this driver as a module, choose M here: the
module will be called pulsedlight-lite-v2
+config RFD77402
+ tristate "RFD77402 ToF sensor"
+ depends on I2C
+ help
+ Say Y to build a driver for the RFD77420 Time-of-Flight (distance)
+ sensor module with I2C interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rfd77402.
+
config SRF04
tristate "Devantech SRF04 ultrasonic ranger sensor"
depends on GPIOLIB
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index ad6686391f08..4f4ed45e87ef 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
+obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
obj-$(CONFIG_SX9500) += sx9500.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 4a48b7ba3a1c..b6249af48014 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -221,7 +221,6 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info as3935_info = {
- .driver_module = THIS_MODULE,
.attrs = &as3935_attribute_group,
.read_raw = &as3935_read_raw,
};
@@ -247,7 +246,6 @@ err_read:
}
static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
- .owner = THIS_MODULE,
};
static void as3935_event_work(struct work_struct *work)
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 36c1ddc251aa..4d56f67b24c6 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -249,7 +249,6 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
}
static const struct iio_info lidar_info = {
- .driver_module = THIS_MODULE,
.read_raw = lidar_read_raw,
};
diff --git a/drivers/iio/proximity/rfd77402.c b/drivers/iio/proximity/rfd77402.c
new file mode 100644
index 000000000000..fe29fb1a19a6
--- /dev/null
+++ b/drivers/iio/proximity/rfd77402.c
@@ -0,0 +1,352 @@
+/*
+ * rfd77402.c - Support for RF Digital RFD77402 Time-of-Flight (distance) sensor
+ *
+ * Copyright 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address 0x4c
+ *
+ * TODO: interrupt
+ * https://media.digikey.com/pdf/Data%20Sheets/RF%20Digital%20PDFs/RFD77402.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+
+#define RFD77402_DRV_NAME "rfd77402"
+
+#define RFD77402_ICSR 0x00 /* Interrupt Control Status Register */
+#define RFD77402_ICSR_INT_MODE BIT(2)
+#define RFD77402_ICSR_INT_POL BIT(3)
+#define RFD77402_ICSR_RESULT BIT(4)
+#define RFD77402_ICSR_M2H_MSG BIT(5)
+#define RFD77402_ICSR_H2M_MSG BIT(6)
+#define RFD77402_ICSR_RESET BIT(7)
+
+#define RFD77402_CMD_R 0x04
+#define RFD77402_CMD_SINGLE 0x01
+#define RFD77402_CMD_STANDBY 0x10
+#define RFD77402_CMD_MCPU_OFF 0x11
+#define RFD77402_CMD_MCPU_ON 0x12
+#define RFD77402_CMD_RESET BIT(6)
+#define RFD77402_CMD_VALID BIT(7)
+
+#define RFD77402_STATUS_R 0x06
+#define RFD77402_STATUS_PM_MASK GENMASK(4, 0)
+#define RFD77402_STATUS_STANDBY 0x00
+#define RFD77402_STATUS_MCPU_OFF 0x10
+#define RFD77402_STATUS_MCPU_ON 0x18
+
+#define RFD77402_RESULT_R 0x08
+#define RFD77402_RESULT_DIST_MASK GENMASK(12, 2)
+#define RFD77402_RESULT_ERR_MASK GENMASK(14, 13)
+#define RFD77402_RESULT_VALID BIT(15)
+
+#define RFD77402_PMU_CFG 0x14
+#define RFD77402_PMU_MCPU_INIT BIT(9)
+
+#define RFD77402_I2C_INIT_CFG 0x1c
+#define RFD77402_I2C_ADDR_INCR BIT(0)
+#define RFD77402_I2C_DATA_INCR BIT(2)
+#define RFD77402_I2C_HOST_DEBUG BIT(5)
+#define RFD77402_I2C_MCPU_DEBUG BIT(6)
+
+#define RFD77402_CMD_CFGR_A 0x0c
+#define RFD77402_CMD_CFGR_B 0x0e
+#define RFD77402_HFCFG_0 0x20
+#define RFD77402_HFCFG_1 0x22
+#define RFD77402_HFCFG_2 0x24
+#define RFD77402_HFCFG_3 0x26
+
+#define RFD77402_MOD_CHIP_ID 0x28
+
+/* magic configuration values from datasheet */
+static const struct {
+ u8 reg;
+ u16 val;
+} rf77402_tof_config[] = {
+ {RFD77402_CMD_CFGR_A, 0xe100},
+ {RFD77402_CMD_CFGR_B, 0x10ff},
+ {RFD77402_HFCFG_0, 0x07d0},
+ {RFD77402_HFCFG_1, 0x5008},
+ {RFD77402_HFCFG_2, 0xa041},
+ {RFD77402_HFCFG_3, 0x45d4},
+};
+
+struct rfd77402_data {
+ struct i2c_client *client;
+ /* Serialize reads from the sensor */
+ struct mutex lock;
+};
+
+static const struct iio_chan_spec rfd77402_channels[] = {
+ {
+ .type = IIO_DISTANCE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int rfd77402_set_state(struct rfd77402_data *data, u8 state, u16 check)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, RFD77402_CMD_R,
+ state | RFD77402_CMD_VALID);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10000, 20000);
+
+ ret = i2c_smbus_read_word_data(data->client, RFD77402_STATUS_R);
+ if (ret < 0)
+ return ret;
+ if ((ret & RFD77402_STATUS_PM_MASK) != check)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int rfd77402_measure(struct rfd77402_data *data)
+{
+ int ret;
+ int tries = 10;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_MCPU_ON,
+ RFD77402_STATUS_MCPU_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, RFD77402_CMD_R,
+ RFD77402_CMD_SINGLE |
+ RFD77402_CMD_VALID);
+ if (ret < 0)
+ goto err;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, RFD77402_ICSR);
+ if (ret < 0)
+ goto err;
+ if (ret & RFD77402_ICSR_RESULT)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = i2c_smbus_read_word_data(data->client, RFD77402_RESULT_R);
+ if (ret < 0)
+ goto err;
+
+ if ((ret & RFD77402_RESULT_ERR_MASK) ||
+ !(ret & RFD77402_RESULT_VALID)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ return (ret & RFD77402_RESULT_DIST_MASK) >> 2;
+
+err:
+ rfd77402_set_state(data, RFD77402_CMD_MCPU_OFF,
+ RFD77402_STATUS_MCPU_OFF);
+ return ret;
+}
+
+static int rfd77402_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct rfd77402_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = rfd77402_measure(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* 1 LSB is 1 mm */
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info rfd77402_info = {
+ .read_raw = rfd77402_read_raw,
+};
+
+static int rfd77402_init(struct rfd77402_data *data)
+{
+ int ret, i;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_STANDBY,
+ RFD77402_STATUS_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ /* configure INT pad as push-pull, active low */
+ ret = i2c_smbus_write_byte_data(data->client, RFD77402_ICSR,
+ RFD77402_ICSR_INT_MODE);
+ if (ret < 0)
+ return ret;
+
+ /* I2C configuration */
+ ret = i2c_smbus_write_word_data(data->client, RFD77402_I2C_INIT_CFG,
+ RFD77402_I2C_ADDR_INCR |
+ RFD77402_I2C_DATA_INCR |
+ RFD77402_I2C_HOST_DEBUG |
+ RFD77402_I2C_MCPU_DEBUG);
+ if (ret < 0)
+ return ret;
+
+ /* set initialization */
+ ret = i2c_smbus_write_word_data(data->client, RFD77402_PMU_CFG, 0x0500);
+ if (ret < 0)
+ return ret;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_MCPU_OFF,
+ RFD77402_STATUS_MCPU_OFF);
+ if (ret < 0)
+ return ret;
+
+ /* set initialization */
+ ret = i2c_smbus_write_word_data(data->client, RFD77402_PMU_CFG, 0x0600);
+ if (ret < 0)
+ return ret;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_MCPU_ON,
+ RFD77402_STATUS_MCPU_ON);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(rf77402_tof_config); i++) {
+ ret = i2c_smbus_write_word_data(data->client,
+ rf77402_tof_config[i].reg,
+ rf77402_tof_config[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_STANDBY,
+ RFD77402_STATUS_STANDBY);
+
+ return ret;
+}
+
+static int rfd77402_powerdown(struct rfd77402_data *data)
+{
+ return rfd77402_set_state(data, RFD77402_CMD_STANDBY,
+ RFD77402_STATUS_STANDBY);
+}
+
+static int rfd77402_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rfd77402_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, RFD77402_MOD_CHIP_ID);
+ if (ret < 0)
+ return ret;
+ if (ret != 0xad01 && ret != 0xad02) /* known chip ids */
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &rfd77402_info;
+ indio_dev->channels = rfd77402_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rfd77402_channels);
+ indio_dev->name = RFD77402_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = rfd77402_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_powerdown;
+
+ return 0;
+
+err_powerdown:
+ rfd77402_powerdown(data);
+ return ret;
+}
+
+static int rfd77402_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ rfd77402_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rfd77402_suspend(struct device *dev)
+{
+ struct rfd77402_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return rfd77402_powerdown(data);
+}
+
+static int rfd77402_resume(struct device *dev)
+{
+ struct rfd77402_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return rfd77402_init(data);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(rfd77402_pm_ops, rfd77402_suspend, rfd77402_resume);
+
+static const struct i2c_device_id rfd77402_id[] = {
+ { "rfd77402", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rfd77402_id);
+
+static struct i2c_driver rfd77402_driver = {
+ .driver = {
+ .name = RFD77402_DRV_NAME,
+ .pm = &rfd77402_pm_ops,
+ },
+ .probe = rfd77402_probe,
+ .remove = rfd77402_remove,
+ .id_table = rfd77402_id,
+};
+
+module_i2c_driver(rfd77402_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("RFD77402 Time-of-Flight sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index e37667f933b3..09c7b9c095b0 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -203,7 +203,6 @@ static int srf04_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info srf04_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = srf04_read_raw,
};
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 9380d545aab1..f2bf783f829a 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -436,7 +436,6 @@ static const struct iio_chan_spec srf08_channels[] = {
static const struct iio_info srf08_info = {
.read_raw = srf08_read_raw,
.attrs = &srf08_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
@@ -445,7 +444,6 @@ static const struct iio_info srf08_info = {
*/
static const struct iio_info srf02_info = {
.read_raw = srf08_read_raw,
- .driver_module = THIS_MODULE,
};
static int srf08_probe(struct i2c_client *client,
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index f42b3a1c75ff..53c5d653e780 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -615,7 +615,6 @@ static const struct attribute_group sx9500_attribute_group = {
};
static const struct iio_info sx9500_info = {
- .driver_module = THIS_MODULE,
.attrs = &sx9500_attribute_group,
.read_raw = &sx9500_read_raw,
.write_raw = &sx9500_write_raw,
@@ -650,7 +649,6 @@ out:
static const struct iio_trigger_ops sx9500_trigger_ops = {
.set_trigger_state = sx9500_set_trigger_state,
- .owner = THIS_MODULE,
};
static irqreturn_t sx9500_trigger_handler(int irq, void *private)
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index c01efeca4002..beaf6fd3e337 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -123,7 +123,6 @@ static int temperature_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info temperature_info = {
- .driver_module = THIS_MODULE,
.read_raw = &temperature_read_raw,
.write_raw = &temperature_write_raw,
};
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index d70e2e53d6a7..e8b7e0b6c8ad 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -208,7 +208,6 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info maxim_thermocouple_info = {
- .driver_module = THIS_MODULE,
.read_raw = maxim_thermocouple_read_raw,
};
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 2077eef4095c..d619e8634a00 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -400,7 +400,6 @@ static const struct iio_info mlx90614_info = {
.write_raw = mlx90614_write_raw,
.write_raw_get_fmt = mlx90614_write_raw_get_fmt,
.attrs = &mlx90614_attr_group,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_PM
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 18c9b43c02cb..a9b5b7cc7836 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -179,7 +179,6 @@ static const struct iio_info tmp006_info = {
.read_raw = tmp006_read_raw,
.write_raw = tmp006_write_raw,
.attrs = &tmp006_attribute_group,
- .driver_module = THIS_MODULE,
};
static bool tmp006_check_identification(struct i2c_client *client)
diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c
index 0615324d054c..0e3f2d432e10 100644
--- a/drivers/iio/temperature/tmp007.c
+++ b/drivers/iio/temperature/tmp007.c
@@ -426,7 +426,6 @@ static const struct iio_info tmp007_info = {
.read_event_value = tmp007_read_thresh,
.write_event_value = tmp007_write_thresh,
.attrs = &tmp007_attribute_group,
- .driver_module = THIS_MODULE,
};
static bool tmp007_identify(struct i2c_client *client)
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index d8aa211d76e4..3799d007c8e7 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -111,7 +111,6 @@ static const struct iio_chan_spec tsys01_channels[] = {
static const struct iio_info tsys01_info = {
.read_raw = tsys01_read_raw,
- .driver_module = THIS_MODULE,
};
static bool tsys01_crc_valid(u16 *n_prom)
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index c0a19a000387..9b2e56fa5fd5 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -120,7 +120,6 @@ static const struct iio_info tsys02d_info = {
.read_raw = tsys02d_read_raw,
.write_raw = tsys02d_write_raw,
.attrs = &tsys02d_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tsys02d_probe(struct i2c_client *client,
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index a1cad6cc2e0f..7accd0187ba1 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -30,7 +30,7 @@ struct iio_hrtimer_info {
ktime_t period;
};
-static struct config_item_type iio_hrtimer_type = {
+static const struct config_item_type iio_hrtimer_type = {
.ct_owner = THIS_MODULE,
};
@@ -114,7 +114,6 @@ static int iio_trig_hrtimer_set_state(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops iio_hrtimer_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = iio_trig_hrtimer_set_state,
};
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index e18f12b74610..171c4ed03543 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -29,7 +29,6 @@ static irqreturn_t iio_interrupt_trigger_poll(int irq, void *private)
}
static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
- .owner = THIS_MODULE,
};
static int iio_interrupt_trigger_probe(struct platform_device *pdev)
diff --git a/drivers/iio/trigger/iio-trig-loop.c b/drivers/iio/trigger/iio-trig-loop.c
index dc6be28f96fe..94a90e0a3fdb 100644
--- a/drivers/iio/trigger/iio-trig-loop.c
+++ b/drivers/iio/trigger/iio-trig-loop.c
@@ -36,7 +36,7 @@ struct iio_loop_info {
struct task_struct *task;
};
-static struct config_item_type iio_loop_type = {
+static const struct config_item_type iio_loop_type = {
.ct_owner = THIS_MODULE,
};
@@ -74,7 +74,6 @@ static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state)
static const struct iio_trigger_ops iio_loop_trigger_ops = {
.set_trigger_state = iio_loop_trigger_set_state,
- .owner = THIS_MODULE,
};
static struct iio_sw_trigger *iio_trig_loop_probe(const char *name)
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 202e8b89caf2..3f0dc9a1a514 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -127,7 +127,6 @@ static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = {
};
static const struct iio_trigger_ops iio_sysfs_trigger_ops = {
- .owner = THIS_MODULE,
};
static int iio_sysfs_trigger_probe(int id)
diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c
index 241eae6a4306..de361d879929 100644
--- a/drivers/iio/trigger/stm32-lptimer-trigger.c
+++ b/drivers/iio/trigger/stm32-lptimer-trigger.c
@@ -37,7 +37,6 @@ static int stm32_lptim_validate_device(struct iio_trigger *trig,
}
static const struct iio_trigger_ops stm32_lptim_trigger_ops = {
- .owner = THIS_MODULE,
.validate_device = stm32_lptim_validate_device,
};
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index eb212f8c8879..b542dc484969 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -355,7 +355,6 @@ static const struct attribute_group *stm32_trigger_attr_groups[] = {
};
static const struct iio_trigger_ops timer_trigger_ops = {
- .owner = THIS_MODULE,
};
static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
@@ -493,7 +492,6 @@ static int stm32_counter_validate_trigger(struct iio_dev *indio_dev,
}
static const struct iio_info stm32_trigger_info = {
- .driver_module = THIS_MODULE,
.validate_trigger = stm32_counter_validate_trigger,
.read_raw = stm32_counter_read_raw,
.write_raw = stm32_counter_write_raw
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 3726205c8704..98ac46ed7214 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,6 +1,5 @@
menuconfig INFINIBAND
tristate "InfiniBand support"
- depends on PCI || BROKEN
depends on HAS_IOMEM
depends on NET
depends on INET
@@ -46,6 +45,7 @@ config INFINIBAND_EXP_USER_ACCESS
config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
+ depends on MMU
default y
config INFINIBAND_ON_DEMAND_PAGING
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 9c0a2b5c834e..504b926552c6 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -15,7 +15,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
security.o nldev.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
-ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
+ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
ib_cm-y := cm.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 12523f630b61..f4e8185bccd3 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -229,8 +229,9 @@ void rdma_addr_unregister_client(struct rdma_addr_client *client)
}
EXPORT_SYMBOL(rdma_addr_unregister_client);
-int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
- const unsigned char *dst_dev_addr)
+void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev,
+ const unsigned char *dst_dev_addr)
{
dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
@@ -238,7 +239,6 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
if (dst_dev_addr)
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
dev_addr->bound_dev_if = dev->ifindex;
- return 0;
}
EXPORT_SYMBOL(rdma_copy_addr);
@@ -247,15 +247,14 @@ int rdma_translate_ip(const struct sockaddr *addr,
u16 *vlan_id)
{
struct net_device *dev;
- int ret = -EADDRNOTAVAIL;
if (dev_addr->bound_dev_if) {
dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!dev)
return -ENODEV;
- ret = rdma_copy_addr(dev_addr, dev, NULL);
+ rdma_copy_addr(dev_addr, dev, NULL);
dev_put(dev);
- return ret;
+ return 0;
}
switch (addr->sa_family) {
@@ -264,9 +263,9 @@ int rdma_translate_ip(const struct sockaddr *addr,
((const struct sockaddr_in *)addr)->sin_addr.s_addr);
if (!dev)
- return ret;
+ return -EADDRNOTAVAIL;
- ret = rdma_copy_addr(dev_addr, dev, NULL);
+ rdma_copy_addr(dev_addr, dev, NULL);
dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
@@ -279,7 +278,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
if (ipv6_chk_addr(dev_addr->net,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
- ret = rdma_copy_addr(dev_addr, dev, NULL);
+ rdma_copy_addr(dev_addr, dev, NULL);
dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
@@ -290,7 +289,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
break;
#endif
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rdma_translate_ip);
@@ -336,7 +335,7 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
const void *daddr)
{
struct neighbour *n;
- int ret;
+ int ret = 0;
n = dst_neigh_lookup(dst, daddr);
@@ -346,7 +345,7 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
- ret = rdma_copy_addr(dev_addr, dst->dev, n->ha);
+ rdma_copy_addr(dev_addr, dst->dev, n->ha);
}
rcu_read_unlock();
@@ -494,7 +493,9 @@ static int addr_resolve_neigh(struct dst_entry *dst,
if (!(dst->dev->flags & IFF_NOARP))
return fetch_ha(dst, addr, dst_in, seq);
- return rdma_copy_addr(addr, dst->dev, NULL);
+ rdma_copy_addr(addr, dst->dev, NULL);
+
+ return 0;
}
static int addr_resolve(struct sockaddr *src_in,
@@ -852,7 +853,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
- addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
+ addr_wq = alloc_ordered_workqueue("ib_addr", 0);
if (!addr_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 4c4b46586af2..f6b159d79977 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1472,31 +1472,29 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
sa_path_set_dlid(primary_path,
- htonl(ntohs(req_msg->primary_local_lid)));
+ ntohs(req_msg->primary_local_lid));
sa_path_set_slid(primary_path,
- htonl(ntohs(req_msg->primary_remote_lid)));
+ ntohs(req_msg->primary_remote_lid));
} else {
lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
- sa_path_set_dlid(primary_path, cpu_to_be32(lid));
+ sa_path_set_dlid(primary_path, lid);
lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
- sa_path_set_slid(primary_path, cpu_to_be32(lid));
+ sa_path_set_slid(primary_path, lid);
}
if (!cm_req_has_alt_path(req_msg))
return;
if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(alt_path,
- htonl(ntohs(req_msg->alt_local_lid)));
- sa_path_set_slid(alt_path,
- htonl(ntohs(req_msg->alt_remote_lid)));
+ sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
+ sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
} else {
lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
- sa_path_set_dlid(alt_path, cpu_to_be32(lid));
+ sa_path_set_dlid(alt_path, lid);
lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
- sa_path_set_slid(alt_path, cpu_to_be32(lid));
+ sa_path_set_slid(alt_path, lid);
}
}
@@ -1575,7 +1573,7 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
- if (req_msg->alt_local_lid)
+ if (cm_req_has_alt_path(req_msg))
param->alternate_path = &work->path[1];
else
param->alternate_path = NULL;
@@ -1856,7 +1854,8 @@ static int cm_req_handler(struct cm_work *work)
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
memset(&work->path[0], 0, sizeof(work->path[0]));
- memset(&work->path[1], 0, sizeof(work->path[1]));
+ if (cm_req_has_alt_path(req_msg))
+ memset(&work->path[1], 0, sizeof(work->path[1]));
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num,
@@ -2810,6 +2809,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
+ /* fall through */
default:
ret = -EINVAL;
goto error1;
@@ -3037,14 +3037,14 @@ static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
u32 lid;
if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
- sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
+ sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
+ sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
} else {
lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
- sa_path_set_dlid(path, cpu_to_be32(lid));
+ sa_path_set_dlid(path, lid);
lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
- sa_path_set_slid(path, cpu_to_be32(lid));
+ sa_path_set_slid(path, lid);
}
}
@@ -3817,14 +3817,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct cm_port *port = mad_agent->context;
struct cm_work *work;
enum ib_cm_event_type event;
+ bool alt_path = false;
u16 attr_id;
int paths = 0;
int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
- paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
- alt_local_lid != 0);
+ alt_path = cm_req_has_alt_path((struct cm_req_msg *)
+ mad_recv_wc->recv_buf.mad);
+ paths = 1 + (alt_path != 0);
event = IB_CM_REQ_RECEIVED;
break;
case CM_MRA_ATTR_ID:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 852c8fec8088..1fdb473b5df7 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1540,7 +1540,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
return id_priv;
}
-static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
+static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
{
return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
}
@@ -1846,9 +1846,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
- ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
- if (ret)
- goto err;
+ rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
} else {
if (!cma_protocol_roce(listen_id) &&
cma_any_addr(cma_src_addr(id_priv))) {
@@ -1894,9 +1892,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
goto err;
if (net_dev) {
- ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
- if (ret)
- goto err;
+ rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
} else {
if (!cma_any_addr(cma_src_addr(id_priv))) {
ret = cma_translate_addr(cma_src_addr(id_priv),
@@ -1942,7 +1938,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event;
struct net_device *net_dev;
- int offset, ret;
+ u8 offset;
+ int ret;
listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
if (IS_ERR(listen_id))
@@ -3440,7 +3437,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
struct ib_cm_sidr_req_param req;
struct ib_cm_id *id;
void *private_data;
- int offset, ret;
+ u8 offset;
+ int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
@@ -3497,7 +3495,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct rdma_route *route;
void *private_data;
struct ib_cm_id *id;
- int offset, ret;
+ u8 offset;
+ int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 54076a3e8007..31dfee0c8295 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -186,7 +186,7 @@ static struct configfs_attribute *cma_configfs_attributes[] = {
NULL,
};
-static struct config_item_type cma_port_group_type = {
+static const struct config_item_type cma_port_group_type = {
.ct_attrs = cma_configfs_attributes,
.ct_owner = THIS_MODULE
};
@@ -263,7 +263,7 @@ static struct configfs_item_operations cma_ports_item_ops = {
.release = release_cma_ports_group
};
-static struct config_item_type cma_ports_group_type = {
+static const struct config_item_type cma_ports_group_type = {
.ct_item_ops = &cma_ports_item_ops,
.ct_owner = THIS_MODULE
};
@@ -272,7 +272,7 @@ static struct configfs_item_operations cma_device_item_ops = {
.release = release_cma_dev
};
-static struct config_item_type cma_device_group_type = {
+static const struct config_item_type cma_device_group_type = {
.ct_item_ops = &cma_device_item_ops,
.ct_owner = THIS_MODULE
};
@@ -323,7 +323,7 @@ static struct configfs_group_operations cma_subsys_group_ops = {
.make_group = make_cma_dev,
};
-static struct config_item_type cma_subsys_type = {
+static const struct config_item_type cma_subsys_type = {
.ct_group_ops = &cma_subsys_group_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index fcf42f6bb82a..e9e189ec7502 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -447,9 +447,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
- struct iwcm_id_private *cm_id_priv;
-
- cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
destroy_cm_id(cm_id);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index f8f53bb90837..cb91245e9163 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
unsigned long flags;
int ret;
+ INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
ret = ib_mad_enforce_security(mad_agent_priv,
mad_recv_wc->wc->pkey_index);
if (ret) {
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv);
+ return;
}
- INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 6ca607e8e293..c8963e91f92a 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -384,21 +384,17 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
count += ret;
prev_wr = &ctx->sig->data.reg_wr.wr;
- if (prot_sg_cnt) {
- ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
- prot_sg, prot_sg_cnt, 0);
- if (ret < 0)
- goto out_destroy_data_mr;
- count += ret;
+ ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
+ prot_sg, prot_sg_cnt, 0);
+ if (ret < 0)
+ goto out_destroy_data_mr;
+ count += ret;
- if (ctx->sig->prot.inv_wr.next)
- prev_wr->next = &ctx->sig->prot.inv_wr;
- else
- prev_wr->next = &ctx->sig->prot.reg_wr.wr;
- prev_wr = &ctx->sig->prot.reg_wr.wr;
- } else {
- ctx->sig->prot.mr = NULL;
- }
+ if (ctx->sig->prot.inv_wr.next)
+ prev_wr->next = &ctx->sig->prot.inv_wr;
+ else
+ prev_wr->next = &ctx->sig->prot.reg_wr.wr;
+ prev_wr = &ctx->sig->prot.reg_wr.wr;
ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
if (!ctx->sig->sig_mr) {
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 88bdafb297f5..23278ed5be45 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey,
if (ret)
return ret;
- if (qp_sec->qp == qp_sec->qp->real_qp) {
- list_for_each_entry(shared_qp_sec,
- &qp_sec->shared_qp_list,
- shared_qp_list) {
- ret = security_ib_pkey_access(shared_qp_sec->security,
- subnet_prefix,
- pkey);
- if (ret)
- return ret;
- }
+ list_for_each_entry(shared_qp_sec,
+ &qp_sec->shared_qp_list,
+ shared_qp_list) {
+ ret = security_ib_pkey_access(shared_qp_sec->security,
+ subnet_prefix,
+ pkey);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp,
int ret = 0;
struct ib_ports_pkeys *tmp_pps;
struct ib_ports_pkeys *new_pps;
- bool special_qp = (qp->qp_type == IB_QPT_SMI ||
- qp->qp_type == IB_QPT_GSI ||
- qp->qp_type >= IB_QPT_RESERVED1);
+ struct ib_qp *real_qp = qp->real_qp;
+ bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
+ real_qp->qp_type == IB_QPT_GSI ||
+ real_qp->qp_type >= IB_QPT_RESERVED1);
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
(qp_attr_mask & IB_QP_ALT_PATH));
+ /* The port/pkey settings are maintained only for the real QP. Open
+ * handles on the real QP will be in the shared_qp_list. When
+ * enforcing security on the real QP all the shared QPs will be
+ * checked as well.
+ */
+
if (pps_change && !special_qp) {
- mutex_lock(&qp->qp_sec->mutex);
- new_pps = get_new_pps(qp,
+ mutex_lock(&real_qp->qp_sec->mutex);
+ new_pps = get_new_pps(real_qp,
qp_attr,
qp_attr_mask);
@@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp,
if (!ret)
ret = check_qp_port_pkey_settings(new_pps,
- qp->qp_sec);
+ real_qp->qp_sec);
}
if (!ret)
- ret = qp->device->modify_qp(qp->real_qp,
- qp_attr,
- qp_attr_mask,
- udata);
+ ret = real_qp->device->modify_qp(real_qp,
+ qp_attr,
+ qp_attr_mask,
+ udata);
if (pps_change && !special_qp) {
/* Clean up the lists and free the appropriate
@@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp,
if (ret) {
tmp_pps = new_pps;
} else {
- tmp_pps = qp->qp_sec->ports_pkeys;
- qp->qp_sec->ports_pkeys = new_pps;
+ tmp_pps = real_qp->qp_sec->ports_pkeys;
+ real_qp->qp_sec->ports_pkeys = new_pps;
}
if (tmp_pps) {
@@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
port_pkey_list_remove(&tmp_pps->alt);
}
kfree(tmp_pps);
- mutex_unlock(&qp->qp_sec->mutex);
+ mutex_unlock(&real_qp->qp_sec->mutex);
}
return ret;
}
@@ -692,20 +697,13 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
{
- int ret;
-
if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
return -EACCES;
- ret = ib_security_pkey_access(map->agent.device,
- map->agent.port_num,
- pkey_index,
- map->agent.security);
-
- if (ret)
- return ret;
-
- return 0;
+ return ib_security_pkey_access(map->agent.device,
+ map->agent.port_num,
+ pkey_index,
+ map->agent.security);
}
#endif /* CONFIG_SECURITY_INFINIBAND */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index abc5ab581f82..e30d86fa1855 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -108,8 +108,22 @@ static ssize_t port_attr_show(struct kobject *kobj,
return port_attr->show(p, port_attr, buf);
}
+static ssize_t port_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct port_attribute *port_attr =
+ container_of(attr, struct port_attribute, attr);
+ struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+
+ if (!port_attr->store)
+ return -EIO;
+ return port_attr->store(p, port_attr, buf, count);
+}
+
static const struct sysfs_ops port_sysfs_ops = {
- .show = port_attr_show
+ .show = port_attr_show,
+ .store = port_attr_store
};
static ssize_t gid_attr_show(struct kobject *kobj,
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 55e8f5ed8b3c..2aadf5813a40 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -39,11 +39,44 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
+#include <linux/interval_tree_generic.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
+/*
+ * The ib_umem list keeps track of memory regions for which the HW
+ * device request to receive notification when the related memory
+ * mapping is changed.
+ *
+ * ib_umem_lock protects the list.
+ */
+
+static u64 node_start(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_start(umem_odp->umem);
+}
+
+/* Note that the representation of the intervals in the interval tree
+ * considers the ending point as contained in the interval, while the
+ * function ib_umem_end returns the first address which is not contained
+ * in the umem.
+ */
+static u64 node_last(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_end(umem_odp->umem) - 1;
+}
+
+INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
+ node_start, node_last, static, rbt_ib_umem)
+
static void ib_umem_notifier_start_account(struct ib_umem *item)
{
mutex_lock(&item->odp_data->umem_mutex);
@@ -754,3 +787,42 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
mutex_unlock(&umem->odp_data->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
+
+/* @last is not a part of the interval. See comment for function
+ * node_last.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
+ u64 start, u64 last,
+ umem_call_back cb,
+ void *cookie)
+{
+ int ret_val = 0;
+ struct umem_odp_node *node, *next;
+ struct ib_umem_odp *umem;
+
+ if (unlikely(start == last))
+ return ret_val;
+
+ for (node = rbt_ib_umem_iter_first(root, start, last - 1);
+ node; node = next) {
+ next = rbt_ib_umem_iter_next(node, start, last - 1);
+ umem = container_of(node, struct ib_umem_odp, interval_tree);
+ ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ }
+
+ return ret_val;
+}
+EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
+
+struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
+ u64 addr, u64 length)
+{
+ struct umem_odp_node *node;
+
+ node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
+ if (node)
+ return container_of(node, struct ib_umem_odp, interval_tree);
+ return NULL;
+
+}
+EXPORT_SYMBOL(rbt_ib_umem_lookup);
diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c
deleted file mode 100644
index fc801920e341..000000000000
--- a/drivers/infiniband/core/umem_rbtree.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interval_tree_generic.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <rdma/ib_umem_odp.h>
-
-/*
- * The ib_umem list keeps track of memory regions for which the HW
- * device request to receive notification when the related memory
- * mapping is changed.
- *
- * ib_umem_lock protects the list.
- */
-
-static inline u64 node_start(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_start(umem_odp->umem);
-}
-
-/* Note that the representation of the intervals in the interval tree
- * considers the ending point as contained in the interval, while the
- * function ib_umem_end returns the first address which is not contained
- * in the umem.
- */
-static inline u64 node_last(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_end(umem_odp->umem) - 1;
-}
-
-INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
- node_start, node_last, , rbt_ib_umem)
-
-/* @last is not a part of the interval. See comment for function
- * node_last.
- */
-int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
- u64 start, u64 last,
- umem_call_back cb,
- void *cookie)
-{
- int ret_val = 0;
- struct umem_odp_node *node, *next;
- struct ib_umem_odp *umem;
-
- if (unlikely(start == last))
- return ret_val;
-
- for (node = rbt_ib_umem_iter_first(root, start, last - 1);
- node; node = next) {
- next = rbt_ib_umem_iter_next(node, start, last - 1);
- umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem->umem, start, last, cookie) || ret_val;
- }
-
- return ret_val;
-}
-EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
-
-struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
- u64 addr, u64 length)
-{
- struct umem_odp_node *node;
-
- node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
- if (node)
- return container_of(node, struct ib_umem_odp, interval_tree);
- return NULL;
-
-}
-EXPORT_SYMBOL(rbt_ib_umem_lookup);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c1696e6084b2..4b64dd02e090 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.status = 0;
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
- packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
+ /*
+ * On OPA devices it is okay to lose the upper 16 bits of LID as this
+ * information is obtained elsewhere. Mask off the upper 16 bits.
+ */
+ if (agent->device->port_immutable[agent->port_num].core_cap_flags &
+ RDMA_CORE_PORT_INTEL_OPA)
+ packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
+ mad_recv_wc->wc->slid);
+ else
+ packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
@@ -506,7 +515,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid);
}
- ah = rdma_create_ah(agent->qp->pd, &ah_attr);
+ ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_up;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 37c8903e7fd0..deccefb71a6b 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -47,21 +47,28 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
-#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
- do { \
- (udata)->inbuf = (const void __user *) (ibuf); \
- (udata)->outbuf = (void __user *) (obuf); \
- (udata)->inlen = (ilen); \
- (udata)->outlen = (olen); \
- } while (0)
+static inline void
+ib_uverbs_init_udata(struct ib_udata *udata,
+ const void __user *ibuf,
+ void __user *obuf,
+ size_t ilen, size_t olen)
+{
+ udata->inbuf = ibuf;
+ udata->outbuf = obuf;
+ udata->inlen = ilen;
+ udata->outlen = olen;
+}
-#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
- do { \
- (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
- (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
- (udata)->inlen = (ilen); \
- (udata)->outlen = (olen); \
- } while (0)
+static inline void
+ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
+ const void __user *ibuf,
+ void __user *obuf,
+ size_t ilen, size_t olen)
+{
+ ib_uverbs_init_udata(udata,
+ ilen ? ibuf : NULL, olen ? obuf : NULL,
+ ilen, olen);
+}
/*
* Our lifetime rules for these structs are the following:
@@ -299,5 +306,6 @@ IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
IB_UVERBS_DECLARE_EX_CMD(modify_qp);
+IB_UVERBS_DECLARE_EX_CMD(modify_cq);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 52a2cf2d83aa..16d55710b116 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -91,8 +91,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err;
}
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -141,8 +141,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err_fd;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_file;
}
@@ -238,8 +237,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -295,8 +293,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.link_layer = rdma_port_get_link_layer(ib_dev,
cmd.port_num);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -320,8 +317,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -344,8 +341,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -490,8 +486,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -556,8 +552,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
atomic_inc(&xrcd->usecnt);
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -655,8 +650,8 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -705,8 +700,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
resp.rkey = mr->rkey;
resp.mr_handle = uobj->id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -748,8 +742,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -800,8 +794,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
resp.lkey = mr->lkey;
resp.rkey = mr->rkey;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
ret = -EFAULT;
else
ret = in_len;
@@ -867,8 +860,8 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
goto err_free;
}
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long)cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -889,8 +882,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
resp.rkey = mw->rkey;
resp.mw_handle = uobj->id;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp))) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
ret = -EFAULT;
goto err_copy;
}
@@ -956,8 +948,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
uobj_file.uobj);
ib_uverbs_init_event_queue(&ev_file->ev_queue);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
uobj_alloc_abort(uobj);
return -EFAULT;
}
@@ -1087,10 +1078,11 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
+ ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
+ sizeof(cmd), sizeof(resp));
- INIT_UDATA(&uhw, buf + sizeof(cmd),
- (unsigned long)cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -1173,8 +1165,8 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -1188,8 +1180,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
resp.cqe = cq->cqe;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp.cqe))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
ret = -EFAULT;
out:
@@ -1249,7 +1240,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
return -EINVAL;
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
- header_ptr = (void __user *)(unsigned long) cmd.response;
+ header_ptr = u64_to_user_ptr(cmd.response);
data_ptr = header_ptr + sizeof resp;
memset(&resp, 0, sizeof resp);
@@ -1343,8 +1334,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
resp.async_events_reported = obj->async_events_reported;
uverbs_uobject_put(uobj);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -1501,7 +1491,8 @@ static int create_qp(struct ib_uverbs_file *file,
IB_QP_CREATE_MANAGED_RECV |
IB_QP_CREATE_SCATTER_FCS |
IB_QP_CREATE_CVLAN_STRIPPING |
- IB_QP_CREATE_SOURCE_QPN)) {
+ IB_QP_CREATE_SOURCE_QPN |
+ IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
ret = -EINVAL;
goto err_put;
}
@@ -1650,10 +1641,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
- resp_size);
- INIT_UDATA(&uhw, buf + sizeof(cmd),
- (unsigned long)cmd.response + resp_size,
+ ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
+ sizeof(cmd), resp_size);
+ ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + resp_size,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - resp_size);
@@ -1750,8 +1741,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -1795,8 +1786,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
resp.qpn = qp->qp_num;
resp.qp_handle = obj->uevent.uobject.id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_destroy;
}
@@ -1911,8 +1901,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp.max_inline_data = init_attr->cap.max_inline_data;
resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out:
@@ -2042,7 +2031,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
return -EOPNOTSUPP;
- INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
out_len);
@@ -2126,8 +2115,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
resp.events_reported = obj->uevent.events_reported;
uverbs_uobject_put(uobj);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -2311,8 +2299,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
break;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out_put:
@@ -2460,8 +2447,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
}
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out:
@@ -2510,8 +2496,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out:
@@ -2537,7 +2522,6 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
struct rdma_ah_attr attr;
int ret;
struct ib_udata udata;
- u8 *dmac;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -2548,8 +2532,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
return -EINVAL;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long)cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -2580,28 +2564,20 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
} else {
rdma_ah_set_ah_flags(&attr, 0);
}
- dmac = rdma_ah_retrieve_dmac(&attr);
- if (dmac)
- memset(dmac, 0, ETH_ALEN);
-
- ah = pd->device->create_ah(pd, &attr, &udata);
+ ah = rdma_create_user_ah(pd, &attr, &udata);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_put;
}
- ah->device = pd->device;
- ah->pd = pd;
- atomic_inc(&pd->usecnt);
ah->uobject = uobj;
uobj->user_handle = cmd.user_handle;
uobj->object = ah;
resp.ah_handle = uobj->id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -3627,8 +3603,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
xcmd.max_sge = cmd.max_sge;
xcmd.srq_limit = cmd.srq_limit;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -3654,8 +3630,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -3680,7 +3656,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
+ ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
@@ -3731,8 +3707,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
resp.max_sge = attr.max_sge;
resp.srq_limit = attr.srq_limit;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -3773,8 +3748,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
}
resp.events_reported = obj->events_reported;
uverbs_uobject_put(uobj);
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
return -EFAULT;
return in_len;
@@ -3878,7 +3852,58 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.tm_caps.max_sge = attr.tm_caps.max_sge;
resp.tm_caps.flags = attr.tm_caps.flags;
resp.response_length += sizeof(resp.tm_caps);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
+ goto end;
+
+ resp.cq_moderation_caps.max_cq_moderation_count =
+ attr.cq_caps.max_cq_moderation_count;
+ resp.cq_moderation_caps.max_cq_moderation_period =
+ attr.cq_caps.max_cq_moderation_period;
+ resp.response_length += sizeof(resp.cq_moderation_caps);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
}
+
+int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_modify_cq cmd = {};
+ struct ib_cq *cq;
+ size_t required_cmd_sz;
+ int ret;
+
+ required_cmd_sz = offsetof(typeof(cmd), reserved) +
+ sizeof(cmd.reserved);
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ /* sanity checks */
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (!cmd.attr_mask || cmd.reserved)
+ return -EINVAL;
+
+ if (cmd.attr_mask > IB_CQ_MODERATE)
+ return -EOPNOTSUPP;
+
+ cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
+ if (!cq)
+ return -EINVAL;
+
+ ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
+
+ uobj_put_obj_read(cq);
+
+ return ret;
+}
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 5286ad57d903..71ff2644e053 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -241,9 +241,7 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
struct uverbs_attr *curr_attr;
unsigned long *curr_bitmap;
size_t ctx_size;
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
-#endif
if (hdr->reserved)
return -EINVAL;
@@ -269,13 +267,10 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
(method_spec->num_child_attrs / BITS_PER_LONG +
method_spec->num_buckets);
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
if (ctx_size <= UVERBS_OPTIMIZE_USING_STACK_SZ)
ctx = (void *)data;
-
if (!ctx)
-#endif
- ctx = kmalloc(ctx_size, GFP_KERNEL);
+ ctx = kmalloc(ctx_size, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -311,10 +306,8 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
file, method_spec, ctx->uverbs_attr_bundle);
out:
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
- if (ctx_size > UVERBS_OPTIMIZE_USING_STACK_SZ)
-#endif
- kfree(ctx);
+ if (ctx != (void *)data)
+ kfree(ctx);
return err;
}
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
index 76ddb6564578..062485f9300d 100644
--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
+++ b/drivers/infiniband/core/uverbs_ioctl_merge.c
@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
min_id) ||
WARN(attr_obj_with_special_access &&
!(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
- "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy aceess but isn't mandatory\n",
+ "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
min_id) ||
WARN(IS_ATTR_OBJECT(attr) &&
attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index dc2aed6fb21b..381fd9c096ae 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -128,6 +128,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
[IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp,
+ [IB_USER_VERBS_EX_CMD_MODIFY_CQ] = ib_uverbs_ex_modify_cq,
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -763,7 +764,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
if (!access_ok(VERIFY_WRITE,
- (void __user *) (unsigned long) ex_hdr.response,
+ u64_to_user_ptr(ex_hdr.response),
(hdr.out_words + ex_hdr.provider_out_words) * 8)) {
ret = -EFAULT;
goto out;
@@ -775,19 +776,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
}
- INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
- hdr.in_words * 8, hdr.out_words * 8);
+ ib_uverbs_init_udata_buf_or_null(&ucore, buf,
+ u64_to_user_ptr(ex_hdr.response),
+ hdr.in_words * 8, hdr.out_words * 8);
- INIT_UDATA_BUF_OR_NULL(&uhw,
- buf + ucore.inlen,
- (unsigned long) ex_hdr.response + ucore.outlen,
- ex_hdr.provider_in_words * 8,
- ex_hdr.provider_out_words * 8);
+ ib_uverbs_init_udata_buf_or_null(&uhw,
+ buf + ucore.inlen,
+ u64_to_user_ptr(ex_hdr.response) + ucore.outlen,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
- ret = uverbs_ex_cmd_table[command](file,
- ib_dev,
- &ucore,
- &uhw);
+ ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
if (!ret)
ret = written_count;
} else {
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index bd0acf376af0..bb372b4713a4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -69,8 +69,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
- (rdma_ah_get_dlid(ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
(!rdma_ah_conv_opa_to_ib(device, &conv_ah, ah_attr)))
src = &conv_ah;
@@ -176,18 +175,18 @@ EXPORT_SYMBOL(ib_copy_path_rec_to_user);
void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
struct ib_user_path_rec *src)
{
- __be32 slid, dlid;
+ u32 slid, dlid;
memset(dst, 0, sizeof(*dst));
if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
(ib_is_opa_gid((union ib_gid *)src->dgid))) {
dst->rec_type = SA_PATH_REC_TYPE_OPA;
- slid = htonl(opa_get_lid_from_gid((union ib_gid *)src->sgid));
- dlid = htonl(opa_get_lid_from_gid((union ib_gid *)src->dgid));
+ slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
+ dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
} else {
dst->rec_type = SA_PATH_REC_TYPE_IB;
- slid = htonl(ntohs(src->slid));
- dlid = htonl(ntohs(src->dlid));
+ slid = ntohs(src->slid);
+ dlid = ntohs(src->dlid);
}
memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 0a98579700ec..c3ee5d9b336d 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -227,26 +227,26 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
* to use uverbs_attr_bundle instead of ib_udata.
* Assume attr == 0 is input and attr == 1 is output.
*/
- void __user *inbuf;
- size_t inbuf_len = 0;
- void __user *outbuf;
- size_t outbuf_len = 0;
const struct uverbs_attr *uhw_in =
uverbs_attr_get(ctx, UVERBS_UHW_IN);
const struct uverbs_attr *uhw_out =
uverbs_attr_get(ctx, UVERBS_UHW_OUT);
if (!IS_ERR(uhw_in)) {
- inbuf = uhw_in->ptr_attr.ptr;
- inbuf_len = uhw_in->ptr_attr.len;
+ udata->inbuf = uhw_in->ptr_attr.ptr;
+ udata->inlen = uhw_in->ptr_attr.len;
+ } else {
+ udata->inbuf = NULL;
+ udata->inlen = 0;
}
if (!IS_ERR(uhw_out)) {
- outbuf = uhw_out->ptr_attr.ptr;
- outbuf_len = uhw_out->ptr_attr.len;
+ udata->outbuf = uhw_out->ptr_attr.ptr;
+ udata->outlen = uhw_out->ptr_attr.len;
+ } else {
+ udata->outbuf = NULL;
+ udata->outlen = 0;
}
-
- INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len);
}
static int uverbs_create_cq_handler(struct ib_device *ib_dev,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index de57d6c11a25..3fb8fb6cc824 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -53,6 +53,9 @@
#include "core_priv.h"
+static int ib_resolve_eth_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr);
+
static const char * const ib_events[] = {
[IB_EVENT_CQ_ERR] = "CQ error",
[IB_EVENT_QP_FATAL] = "QP fatal error",
@@ -302,11 +305,13 @@ EXPORT_SYMBOL(ib_dealloc_pd);
/* Address handles */
-struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
+static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
struct ib_ah *ah;
- ah = pd->device->create_ah(pd, ah_attr, NULL);
+ ah = pd->device->create_ah(pd, ah_attr, udata);
if (!IS_ERR(ah)) {
ah->device = pd->device;
@@ -318,8 +323,42 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
return ah;
}
+
+struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
+{
+ return _rdma_create_ah(pd, ah_attr, NULL);
+}
EXPORT_SYMBOL(rdma_create_ah);
+/**
+ * rdma_create_user_ah - Creates an address handle for the
+ * given address vector.
+ * It resolves destination mac address for ah attribute of RoCE type.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ * @udata: pointer to user's input output buffer information need by
+ * provider driver.
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata)
+{
+ int err;
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ err = ib_resolve_eth_dmac(pd->device, ah_attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ return _rdma_create_ah(pd, ah_attr, udata);
+}
+EXPORT_SYMBOL(rdma_create_user_ah);
+
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
{
const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
@@ -1221,8 +1260,8 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
-int ib_resolve_eth_dmac(struct ib_device *device,
- struct rdma_ah_attr *ah_attr)
+static int ib_resolve_eth_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
{
int ret = 0;
struct ib_global_route *grh;
@@ -1281,7 +1320,6 @@ int ib_resolve_eth_dmac(struct ib_device *device,
out:
return ret;
}
-EXPORT_SYMBOL(ib_resolve_eth_dmac);
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
@@ -1512,12 +1550,12 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
}
EXPORT_SYMBOL(ib_create_cq);
-int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
return cq->device->modify_cq ?
cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
}
-EXPORT_SYMBOL(ib_modify_cq);
+EXPORT_SYMBOL(rdma_set_cq_moderation);
int ib_destroy_cq(struct ib_cq *cq)
{
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 0d89621d9fe8..2032db7db766 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
ctx->idx = tbl_idx;
ctx->refcnt = 1;
ctx_tbl[tbl_idx] = ctx;
+ *context = ctx;
return rc;
}
@@ -665,7 +666,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
struct bnxt_re_ah *ah;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
int rc;
- u16 vlan_tag;
u8 nw_type;
struct ib_gid_attr sgid_attr;
@@ -711,11 +711,8 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
grh->sgid_index);
goto fail;
}
- if (sgid_attr.ndev) {
- if (is_vlan_dev(sgid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
+ if (sgid_attr.ndev)
dev_put(sgid_attr.ndev);
- }
/* Get network header type for this GID */
nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
switch (nw_type) {
@@ -729,14 +726,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
break;
}
- rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- ah_attr->roce.dmac, &vlan_tag,
- &sgid_attr.ndev->ifindex,
- NULL);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
- goto fail;
- }
}
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
@@ -796,6 +785,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
struct bnxt_re_dev *rdev = qp->rdev;
int rc;
+ bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
@@ -1643,7 +1633,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
u8 ip_version = 0;
u16 vlan_id = 0xFFFF;
void *buf;
- int i, rc = 0, size;
+ int i, rc = 0;
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
@@ -1760,7 +1750,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
/* Pack the QP1 to the transmit buffer */
buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
if (buf) {
- size = ib_ud_header_pack(&qp->qp1_hdr, buf);
+ ib_ud_header_pack(&qp->qp1_hdr, buf);
for (i = wqe->num_sge; i; i--) {
wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
@@ -2216,7 +2206,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
struct ib_recv_wr *wr)
{
struct bnxt_qplib_swqe wqe;
- int rc = 0, payload_sz = 0;
+ int rc = 0;
memset(&wqe, 0, sizeof(wqe));
while (wr) {
@@ -2231,8 +2221,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
rc = -EINVAL;
break;
}
- payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
- wr->num_sge);
+ bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
wqe.wr_id = wr->wr_id;
wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
@@ -2569,7 +2558,7 @@ static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
u16 raweth_qp1_flags2)
{
- bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
+ bool is_ipv6 = false, is_ipv4 = false;
/* raweth_qp1_flags Bit 9-6 indicates itype */
if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
@@ -2580,7 +2569,6 @@ static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
raweth_qp1_flags2 &
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
- is_udp = true;
/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
(raweth_qp1_flags2 &
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
@@ -2781,6 +2769,32 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
wc->wc_flags |= IB_WC_GRH;
}
+static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
+ u16 *vid, u8 *sl)
+{
+ bool ret = false;
+ u32 metadata;
+ u16 tpid;
+
+ metadata = orig_cqe->raweth_qp1_metadata;
+ if (orig_cqe->raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
+ tpid = ((metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
+ if (tpid == ETH_P_8021Q) {
+ *vid = metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
+ *sl = (metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
@@ -2800,12 +2814,14 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
- u32 tbl_idx;
struct bnxt_re_dev *rdev = qp->rdev;
struct bnxt_re_qp *qp1_qp = NULL;
struct bnxt_qplib_cqe *orig_cqe = NULL;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
int nw_type;
+ u32 tbl_idx;
+ u16 vlan_id;
+ u8 sl;
tbl_idx = cqe->wr_id;
@@ -2820,6 +2836,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
wc->ex.imm_data = orig_cqe->immdata;
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
+ if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
+ wc->vlan_id = vlan_id;
+ wc->sl = sl;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
wc->port_num = 1;
wc->vendor_err = orig_cqe->status;
@@ -3008,8 +3029,10 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
enum ib_cq_notify_flags ib_cqn_flags)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- int type = 0;
+ int type = 0, rc = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&cq->cq_lock, flags);
/* Trigger on the very next completion */
if (ib_cqn_flags & IB_CQ_NEXT_COMP)
type = DBR_DBR_TYPE_CQ_ARMALL;
@@ -3019,12 +3042,15 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
/* Poll to see if there are missed events */
if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
- return 1;
-
+ !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
+ rc = 1;
+ goto exit;
+ }
bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
- return 0;
+exit:
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return rc;
}
/* Memory Regions */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e7450ea92aa9..aafc19aa5de1 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -78,6 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
/* Mutex to protect the list of bnxt_re devices added */
static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
@@ -92,11 +93,22 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
{
}
+static void bnxt_re_shutdown(void *p)
+{
+ struct bnxt_re_dev *rdev = p;
+
+ if (!rdev)
+ return;
+
+ bnxt_re_ib_unreg(rdev, false);
+}
+
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
.ulp_async_notifier = NULL,
.ulp_stop = bnxt_re_stop,
.ulp_start = bnxt_re_start,
- .ulp_sriov_config = bnxt_re_sriov_config
+ .ulp_sriov_config = bnxt_re_sriov_config,
+ .ulp_shutdown = bnxt_re_shutdown
};
/* RoCE -> Net driver */
@@ -1071,9 +1083,10 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
*/
rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
BNXT_RE_MAX_QPC_COUNT);
- if (rc)
+ if (rc) {
+ pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
goto fail;
-
+ }
rc = bnxt_re_net_ring_alloc
(rdev, rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr,
rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index e8afc47f8949..61764f7aa79b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -160,11 +160,6 @@ void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
{
- struct bnxt_qplib_cq *scq, *rcq;
-
- scq = qp->scq;
- rcq = qp->rcq;
-
if (qp->sq.flushed) {
qp->sq.flushed = false;
list_del(&qp->sq_flush);
@@ -297,6 +292,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
break;
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
switch (type) {
case NQ_BASE_TYPE_CQ_NOTIFICATION:
@@ -1118,6 +1119,11 @@ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
continue;
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
case CQ_BASE_CQE_TYPE_REQ:
case CQ_BASE_CQE_TYPE_TERMINAL:
@@ -1360,7 +1366,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
break;
}
- /* else, just fall thru */
+ /* fall thru */
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
{
@@ -1901,6 +1907,11 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
/* If the next hwcqe is a REQ */
if ((peek_hwcqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK) ==
@@ -2107,6 +2118,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq);
__bnxt_qplib_add_flush_qp(qp);
@@ -2170,6 +2182,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq);
__bnxt_qplib_add_flush_qp(qp);
@@ -2241,6 +2254,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
+ cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
rq = &qp->rq;
if (wr_id_idx > rq->hwq.max_elements) {
@@ -2257,6 +2271,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq);
__bnxt_qplib_add_flush_qp(qp);
@@ -2445,6 +2460,11 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
break;
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
/* From the device's respective CQE format to qplib_wc*/
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
case CQ_BASE_CQE_TYPE_REQ:
@@ -2518,3 +2538,10 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
atomic_set(&cq->arm_state, 1);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
}
+
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
+{
+ flush_workqueue(qp->scq->nq->cqn_wq);
+ if (qp->scq != qp->rcq)
+ flush_workqueue(qp->rcq->nq->cqn_wq);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 8ead70ca1c1d..c582d4ec8173 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -478,4 +478,5 @@ void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe *cqe,
int num_cqes);
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 2bdb1562bd21..bb5574adf195 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -88,7 +88,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
unsigned long flags;
u32 size, opcode;
u16 cookie, cbit;
- int pg, idx;
u8 *preq;
opcode = req->opcode;
@@ -149,9 +148,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
preq = (u8 *)req;
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
do {
- pg = 0;
- idx = 0;
-
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
@@ -172,14 +168,14 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
rcfw->seq_num++;
cmdq_prod = cmdq->prod;
- if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
+ if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
/* The very first doorbell write
* is required to set this flag
* which prompts the FW to reset
* its internal pointers
*/
- cmdq_prod |= FIRMWARE_FIRST_FLAG;
- rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
+ cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
+ clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
}
/* ring CMDQ DB */
@@ -306,6 +302,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
"QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
+ if (!qp)
+ break;
bnxt_qplib_acquire_cq_locks(qp, &flags);
bnxt_qplib_mark_qp_error(qp);
bnxt_qplib_release_cq_locks(qp, &flags);
@@ -361,6 +359,10 @@ static void bnxt_qplib_service_creq(unsigned long data)
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
type = creqe->type & CREQ_BASE_TYPE_MASK;
switch (type) {
@@ -622,7 +624,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
/* General */
rcfw->seq_num = 0;
- rcfw->flags = FIRMWARE_FIRST_FLAG;
+ set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
sizeof(unsigned long));
rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 85b16da287f9..2946a7cfae82 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -162,9 +162,9 @@ struct bnxt_qplib_rcfw {
unsigned long *cmdq_bitmap;
u32 bmap_size;
unsigned long flags;
-#define FIRMWARE_INITIALIZED_FLAG BIT(0)
-#define FIRMWARE_FIRST_FLAG BIT(31)
-#define FIRMWARE_TIMED_OUT BIT(3)
+#define FIRMWARE_INITIALIZED_FLAG 0
+#define FIRMWARE_FIRST_FLAG 31
+#define FIRMWARE_TIMED_OUT 3
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index e87207526d2c..2e5c052da5a9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -169,7 +169,7 @@ struct bnxt_qplib_ctx {
u32 cq_count;
struct bnxt_qplib_hwq cq_tbl;
struct bnxt_qplib_hwq tim_tbl;
-#define MAX_TQM_ALLOC_REQ 32
+#define MAX_TQM_ALLOC_REQ 48
#define MAX_TQM_ALLOC_BLK_SIZE 8
u8 tqm_count[MAX_TQM_ALLOC_REQ];
struct bnxt_qplib_hwq tqm_pde;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index e277e54a05eb..9543ce51a28a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -720,13 +720,12 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
struct cmdq_map_tc_to_cos req;
struct creq_map_tc_to_cos_resp resp;
u16 cmd_flags = 0;
- int rc = 0;
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
req.cos0 = cpu_to_le16(cids[0]);
req.cos1 = cpu_to_le16(cids[1]);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
+ 0);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index eeb55b2db57e..c3cba6063a03 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2644,7 +2644,7 @@ struct creq_query_func_resp_sb {
u8 l2_db_space_size;
__le16 max_srq;
__le32 max_gid;
- __le32 tqm_alloc_reqs[8];
+ __le32 tqm_alloc_reqs[12];
};
/* Set resources command response (16 bytes) */
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
index 2b6352b85485..431be733fbbe 100644
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ b/drivers/infiniband/hw/cxgb3/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_CXGB3
tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3 && INET
+ depends on CHELSIO_T3
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 3eff6541bd6f..3328acc53c2a 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
- __u32 ptr;
+ __u32 ptr = wq->sq_rptr + count;
int flushed = 0;
- struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
+ struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- ptr = wq->sq_rptr + count;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (ptr != wq->sq_wptr) {
sqp->signaled = 0;
insert_sq_cqe(wq, cq, sqp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 86975370a4c0..1c90c86fc8b8 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -107,7 +107,7 @@ static struct workqueue_struct *workq;
static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(unsigned long arg);
+static void ep_timeout(struct timer_list *t);
static void connect_reply_upcall(struct iwch_ep *ep, int status);
static void start_ep_timer(struct iwch_ep *ep)
@@ -119,8 +119,6 @@ static void start_ep_timer(struct iwch_ep *ep)
} else
get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- ep->timer.data = (unsigned long)ep;
- ep->timer.function = ep_timeout;
add_timer(&ep->timer);
}
@@ -1399,7 +1397,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
child_ep->l2t = l2t;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- init_timer(&child_ep->timer);
+ timer_setup(&child_ep->timer, ep_timeout, 0);
cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
accept_cr(child_ep, req->peer_ip, skb);
goto out;
@@ -1719,9 +1717,9 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
-static void ep_timeout(unsigned long arg)
+static void ep_timeout(struct timer_list *t)
{
- struct iwch_ep *ep = (struct iwch_ep *)arg;
+ struct iwch_ep *ep = from_timer(ep, t, timer);
struct iwch_qp_attributes attrs;
unsigned long flags;
int abort = 1;
@@ -1760,8 +1758,8 @@ static void ep_timeout(unsigned long arg)
int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
- int err;
struct iwch_ep *ep = to_ep(cm_id);
+
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (state_read(&ep->com) == DEAD) {
@@ -1772,8 +1770,8 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
if (mpa_rev == 0)
abort_connection(ep, NULL, GFP_KERNEL);
else {
- err = send_mpa_reject(ep, pdata, pdata_len);
- err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
+ send_mpa_reject(ep, pdata, pdata_len);
+ iwch_ep_disconnect(ep, 0, GFP_KERNEL);
}
put_ep(&ep->com);
return 0;
@@ -1899,7 +1897,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto out;
}
- init_timer(&ep->timer);
+ timer_setup(&ep->timer, ep_timeout, 0);
ep->plen = conn_param->private_data_len;
if (ep->plen)
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 099e76f3758a..a578ca559e11 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -969,7 +969,6 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
insert_mmap(ucontext, mm2);
}
qhp->ibqp.qp_num = qhp->wq.qpid;
- init_timer(&(qhp->timer));
pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 9e216edec4c0..2e38ddefea8a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -168,7 +168,6 @@ struct iwch_qp {
atomic_t refcnt;
wait_queue_head_t wait;
enum IWCH_QP_FLAGS flags;
- struct timer_list timer;
};
static inline int qp_quiesced(struct iwch_qp *qhp)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 7f633da0185d..3871e1fd8395 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -722,10 +722,13 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
*/
static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
struct iwch_cq *schp)
+ __releases(&qhp->lock)
+ __acquires(&qhp->lock)
{
int count;
int flushed;
+ lockdep_assert_held(&qhp->lock);
pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index afe8b28e0878..0a671a61fc92 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
- depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
+ depends on CHELSIO_T4 && INET
select CHELSIO_LIB
select GENERIC_ALLOCATOR
---help---
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index daf7a56e5d7e..21db3b48a617 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -99,10 +99,6 @@ module_param(enable_tcp_window_scaling, int, 0644);
MODULE_PARM_DESC(enable_tcp_window_scaling,
"Enable tcp window scaling (default=1)");
-int c4iw_debug;
-module_param(c4iw_debug, int, 0644);
-MODULE_PARM_DESC(c4iw_debug, "obsolete");
-
static int peer2peer = 1;
module_param(peer2peer, int, 0644);
MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
@@ -144,7 +140,7 @@ static struct workqueue_struct *workq;
static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(unsigned long arg);
+static void ep_timeout(struct timer_list *t);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
@@ -180,7 +176,7 @@ static void ref_qp(struct c4iw_ep *ep)
static void start_ep_timer(struct c4iw_ep *ep)
{
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
if (timer_pending(&ep->timer)) {
pr_err("%s timer already started! ep %p\n",
__func__, ep);
@@ -189,14 +185,12 @@ static void start_ep_timer(struct c4iw_ep *ep)
clear_bit(TIMEOUT, &ep->com.flags);
c4iw_get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- ep->timer.data = (unsigned long)ep;
- ep->timer.function = ep_timeout;
add_timer(&ep->timer);
}
static int stop_ep_timer(struct c4iw_ep *ep)
{
- pr_debug("%s ep %p stopping\n", __func__, ep);
+ pr_debug("ep %p stopping\n", ep);
del_timer_sync(&ep->timer);
if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
c4iw_put_ep(&ep->com);
@@ -212,7 +206,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- pr_debug("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
@@ -229,7 +223,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- pr_debug("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
@@ -263,10 +257,10 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
- pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
- ep->mss, ep->emss);
+ pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
+ ep->emss);
}
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
@@ -287,7 +281,7 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
mutex_lock(&epc->mutex);
- pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ pr_debug("%s -> %s\n", states[epc->state], states[new]);
__state_set(epc, new);
mutex_unlock(&epc->mutex);
return;
@@ -318,11 +312,18 @@ static void *alloc_ep(int size, gfp_t gfp)
epc = kzalloc(size, gfp);
if (epc) {
+ epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
+ if (!epc->wr_waitp) {
+ kfree(epc);
+ epc = NULL;
+ goto out;
+ }
kref_init(&epc->kref);
mutex_init(&epc->mutex);
- c4iw_init_wr_wait(&epc->wr_wait);
+ c4iw_init_wr_wait(epc->wr_waitp);
}
- pr_debug("%s alloc ep %p\n", __func__, epc);
+ pr_debug("alloc ep %p\n", epc);
+out:
return epc;
}
@@ -384,7 +385,7 @@ void _c4iw_free_ep(struct kref *kref)
struct c4iw_ep *ep;
ep = container_of(kref, struct c4iw_ep, com.kref);
- pr_debug("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
+ pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
@@ -407,6 +408,7 @@ void _c4iw_free_ep(struct kref *kref)
}
if (!skb_queue_empty(&ep->com.ep_skb_list))
skb_queue_purge(&ep->com.ep_skb_list);
+ c4iw_put_wr_wait(ep->com.wr_waitp);
kfree(ep);
}
@@ -570,7 +572,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
struct c4iw_rdev *rdev = &ep->com.dev->rdev;
struct cpl_abort_req *req = cplhdr(skb);
- pr_debug("%s rdev %p\n", __func__, rdev);
+ pr_debug("rdev %p\n", rdev);
req->cmd = CPL_ABORT_NO_RST;
skb_get(skb);
ret = c4iw_ofld_send(rdev, skb);
@@ -647,7 +649,7 @@ static int send_halfclose(struct c4iw_ep *ep)
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!skb))
return -ENOMEM;
@@ -662,7 +664,7 @@ static int send_abort(struct c4iw_ep *ep)
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!req_skb))
return -ENOMEM;
@@ -725,7 +727,7 @@ static int send_connect(struct c4iw_ep *ep)
roundup(sizev4, 16) :
roundup(sizev6, 16);
- pr_debug("%s ep %p atid %u\n", __func__, ep, ep->atid);
+ pr_debug("ep %p atid %u\n", ep, ep->atid);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
@@ -824,13 +826,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t5req->rsvd);
+ pr_debug("snd_isn %u\n", t5req->rsvd);
t5req->opt2 = cpu_to_be32(opt2);
} else {
t6req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t6req->rsvd);
+ pr_debug("snd_isn %u\n", t6req->rsvd);
t6req->opt2 = cpu_to_be32(opt2);
}
}
@@ -877,13 +879,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req6->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t5req6->rsvd);
+ pr_debug("snd_isn %u\n", t5req6->rsvd);
t5req6->opt2 = cpu_to_be32(opt2);
} else {
t6req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req6->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t6req6->rsvd);
+ pr_debug("snd_isn %u\n", t6req6->rsvd);
t6req6->opt2 = cpu_to_be32(opt2);
}
@@ -907,10 +909,8 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
struct mpa_message *mpa;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
-
- BUG_ON(skb_cloned(skb));
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + ep->plen;
if (mpa_rev_to_use == 2)
@@ -961,7 +961,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
if (mpa_rev_to_use == 2) {
mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
sizeof (struct mpa_v2_conn_params));
- pr_debug("%s initiator ird %u ord %u\n", __func__, ep->ird,
+ pr_debug("initiator ird %u ord %u\n", ep->ird,
ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
@@ -994,7 +994,6 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
*/
skb_get(skb);
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
if (ret)
@@ -1014,8 +1013,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1080,7 +1079,6 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb_get(skb);
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
- BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
ep->snd_seq += mpalen;
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -1094,8 +1092,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1185,7 +1183,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_atid(t, atid);
- pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+ pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
mutex_lock(&ep->com.mutex);
@@ -1229,7 +1227,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = status;
@@ -1246,7 +1244,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_DISCONNECT;
if (ep->com.cm_id) {
@@ -1261,7 +1259,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = -ECONNRESET;
@@ -1278,8 +1276,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u status %d\n",
- __func__, ep, ep->hwtid, status);
+ pr_debug("ep %p tid %u status %d\n",
+ ep, ep->hwtid, status);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
@@ -1308,7 +1306,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
}
}
- pr_debug("%s ep %p tid %u status %d\n", __func__, ep,
+ pr_debug("ep %p tid %u status %d\n", ep,
ep->hwtid, status);
set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
@@ -1322,7 +1320,7 @@ static int connect_request_upcall(struct c4iw_ep *ep)
struct iw_cm_event event;
int ret;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
memcpy(&event.local_addr, &ep->com.local_addr,
@@ -1359,13 +1357,13 @@ static void established_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
event.ird = ep->ord;
event.ord = ep->ird;
if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(ESTAB_UPCALL, &ep->com.history);
}
@@ -1377,8 +1375,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
u32 credit_dack;
- pr_debug("%s ep %p tid %u credits %u\n",
- __func__, ep, ep->hwtid, credits);
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
pr_err("update_rx_credits - cannot alloc skb!\n");
@@ -1429,7 +1427,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
int err;
int disconnect = 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1527,8 +1525,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
- pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n",
- __func__,
+ pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
resp_ird, resp_ord, ep->ird, ep->ord);
/*
@@ -1573,8 +1570,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
- __func__, ep->mpa_attr.crc_enabled,
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
+ ep->mpa_attr.crc_enabled,
ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type, p2p_type);
@@ -1670,7 +1667,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
struct mpa_v2_conn_params *mpa_v2_params;
u16 plen;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1679,7 +1676,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
goto err_stop_timer;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
/*
* Copy the new data into our accumulation buffer.
@@ -1695,7 +1692,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len < sizeof(*mpa))
return 0;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
mpa = (struct mpa_message *) ep->mpa_pkt;
/*
@@ -1758,8 +1755,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
ep->ord = min_t(u32, ep->ord,
cur_max_read_depth(ep->com.dev));
- pr_debug("%s initiator ird %u ord %u\n",
- __func__, ep->ird, ep->ord);
+ pr_debug("initiator ird %u ord %u\n",
+ ep->ird, ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
if (ntohs(mpa_v2_params->ord) &
@@ -1776,8 +1773,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
- __func__,
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type);
@@ -1816,7 +1812,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
@@ -1836,7 +1832,6 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
update_rx_credits(ep, dlen);
- BUG_ON(!ep->com.qp);
if (status)
pr_err("%s Unexpected streaming data." \
" qpid %u ep %p state %d tid %u status %d\n",
@@ -1870,11 +1865,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
pr_warn("Abort rpl to freed endpoint\n");
return 0;
}
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
__state_set(&ep->com, DEAD);
release = 1;
break;
@@ -1994,8 +1989,8 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
ep->snd_win = snd_win;
ep->rcv_win = rcv_win;
- pr_debug("%s snd_win %d rcv_win %d\n",
- __func__, ep->snd_win, ep->rcv_win);
+ pr_debug("snd_win %d rcv_win %d\n",
+ ep->snd_win, ep->rcv_win);
}
#define ACT_OPEN_RETRY_COUNT 2
@@ -2100,9 +2095,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
int iptype;
__u8 *ra;
- pr_debug("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
- init_timer(&ep->timer);
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
/* When MPA revision is different on nodes, the node with MPA_rev=2
* tries to reconnect with MPA_rev 1 for the same EP through
@@ -2110,7 +2104,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
* further connection establishment. As we are using the same EP pointer
* for reconnect, few skbs are used during the previous c4iw_connect(),
* which leaves the EP with inadequate skbs for further
- * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
+ * c4iw_reconnect(), Further causing a crash due to an empty
* skb_list() during peer_abort(). Allocate skbs which is already used.
*/
size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
@@ -2163,8 +2157,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
goto fail4;
}
- pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
@@ -2215,12 +2209,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
- pr_debug("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+ pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
status, status2errno(status));
if (cxgb_is_neg_adv(status)) {
- pr_debug("%s Connection problems for atid %u status %u (%s)\n",
- __func__, atid, status, neg_adv_str(status));
+ pr_debug("Connection problems for atid %u status %u (%s)\n",
+ atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -2316,12 +2310,12 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
- pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
- pr_debug("%s ep %p status %d error %d\n", __func__, ep,
+ pr_debug("ep %p status %d error %d\n", ep,
rpl->status, status2errno(rpl->status));
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
out:
return 0;
@@ -2334,11 +2328,11 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
- pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
- pr_debug("%s ep %p\n", __func__, ep);
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ pr_debug("ep %p\n", ep);
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
out:
return 0;
@@ -2356,8 +2350,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- BUG_ON(skb_cloned(skb));
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
skb_get(skb);
rpl = cplhdr(skb);
@@ -2427,7 +2420,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
- pr_debug("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
+ pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
}
rpl->opt0 = cpu_to_be64(opt0);
@@ -2440,8 +2433,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{
- pr_debug("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
- BUG_ON(skb_cloned(skb));
+ pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
skb_trim(skb, sizeof(struct cpl_tid_release));
release_tid(&dev->rdev, hwtid, skb);
return;
@@ -2466,13 +2458,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
- pr_debug("%s connect request on invalid stid %d\n",
- __func__, stid);
+ pr_err("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
if (state_read(&parent_ep->com) != LISTEN) {
- pr_debug("%s - listening ep not in LISTEN\n", __func__);
+ pr_err("%s - listening ep not in LISTEN\n", __func__);
goto reject;
}
@@ -2481,16 +2473,16 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
/* Find output route */
if (iptype == 4) {
- pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
+ pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
*(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port, tos);
} else {
- pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
+ pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
@@ -2576,10 +2568,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+ pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
- init_timer(&child_ep->timer);
+ timer_setup(&child_ep->timer, ep_timeout, 0);
cxgb4_insert_tid(t, child_ep, hwtid,
child_ep->com.local_addr.ss_family);
insert_ep_tid(child_ep);
@@ -2613,11 +2605,11 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
int ret;
ep = get_ep_from_tid(dev, tid);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
- pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
+ pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid,
ntohs(req->tcp_opt));
set_emss(ep, ntohs(req->tcp_opt));
@@ -2650,7 +2642,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
dst_confirm(ep->dst);
set_bit(PEER_CLOSE, &ep->com.history);
@@ -2673,12 +2665,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
*/
__state_set(&ep->com, CLOSING);
pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case FPDU_MODE:
start_ep_timer(ep);
@@ -2714,7 +2706,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
}
mutex_unlock(&ep->com.mutex);
if (disconnect)
@@ -2741,16 +2733,16 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
if (cxgb_is_neg_adv(req->status)) {
- pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
goto deref_ep;
}
- pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
@@ -2760,7 +2752,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* MPA_REQ_SENT
*/
if (ep->com.state != MPA_REQ_SENT)
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
@@ -2783,8 +2775,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* do some housekeeping so as to re-initiate the
* connection
*/
- pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n",
- __func__, mpa_rev);
+ pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
+ __func__, mpa_rev);
ep->retry_with_mpa_v1 = 1;
}
break;
@@ -2810,11 +2802,11 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case ABORTING:
break;
case DEAD:
- pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
mutex_unlock(&ep->com.mutex);
goto deref_ep;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
dst_confirm(ep->dst);
@@ -2875,7 +2867,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/* The cm_id may be null if we failed to connect */
mutex_lock(&ep->com.mutex);
@@ -2901,7 +2893,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case DEAD:
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
mutex_unlock(&ep->com.mutex);
@@ -2919,7 +2911,6 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
ep = get_ep_from_tid(dev, tid);
- BUG_ON(!ep);
if (ep && ep->com.qp) {
pr_warn("TERM received tid %u qpid %u\n",
@@ -2950,19 +2941,19 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u credits %u\n",
- __func__, ep, ep->hwtid, credits);
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
if (credits == 0) {
- pr_debug("%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ pr_debug("0 credit ack ep %p tid %u state %u\n",
+ ep, ep->hwtid, state_read(&ep->com));
goto out;
}
dst_confirm(ep->dst);
if (ep->mpa_skb) {
- pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
- __func__, ep, ep->hwtid,
- state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
+ ep, ep->hwtid, state_read(&ep->com),
+ ep->mpa_attr.initiator ? 1 : 0);
mutex_lock(&ep->com.mutex);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
@@ -2980,7 +2971,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
int abort;
struct c4iw_ep *ep = to_ep(cm_id);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3011,7 +3002,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
int abort = 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3019,7 +3010,10 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto err_out;
}
- BUG_ON(!qp);
+ if (!qp) {
+ err = -EINVAL;
+ goto err_out;
+ }
set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
@@ -3064,7 +3058,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ird = 1;
}
- pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+ pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
@@ -3204,7 +3198,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail1;
}
- init_timer(&ep->timer);
+ timer_setup(&ep->timer, ep_timeout, 0);
ep->plen = conn_param->private_data_len;
if (ep->plen)
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
@@ -3220,12 +3214,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
- pr_debug("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+ pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
goto fail2;
}
ref_qp(ep);
- pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+ pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
ep->com.qp, cm_id);
/*
@@ -3263,8 +3257,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
- __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
+ pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+ &laddr->sin_addr, ntohs(laddr->sin_port),
ra, ntohs(raddr->sin_port));
ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
laddr->sin_addr.s_addr,
@@ -3285,8 +3279,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
- __func__, laddr6->sin6_addr.s6_addr,
+ pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+ laddr6->sin6_addr.s6_addr,
ntohs(laddr6->sin6_port),
raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
@@ -3309,8 +3303,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail4;
}
- pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
@@ -3348,14 +3342,14 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
if (err)
return err;
}
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
ep->stid, &sin6->sin6_addr,
sin6->sin6_port,
ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
@@ -3391,13 +3385,13 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
}
} while (err == -EBUSY);
} else {
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
ep->stid, sin->sin_addr.s_addr, sin->sin_port,
0, ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
@@ -3424,7 +3418,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail1;
}
skb_queue_head_init(&ep->com.ep_skb_list);
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
ep->com.dev = dev;
@@ -3478,7 +3472,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
int err;
struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
might_sleep();
state_set(&ep->com, DEAD);
@@ -3489,13 +3483,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
} else {
struct sockaddr_in6 *sin6;
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
if (err)
goto done;
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
0, 0, __func__);
sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
@@ -3519,7 +3513,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
mutex_lock(&ep->com.mutex);
- pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
+ pr_debug("ep %p state %s, abrupt %d\n", ep,
states[ep->com.state], abrupt);
/*
@@ -3573,11 +3567,11 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- pr_debug("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_info("%s ignoring disconnect ep %p state %u\n",
+ __func__, ep, ep->com.state);
break;
default:
- BUG();
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
@@ -3636,6 +3630,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
send_fw_act_open_req(ep, atid);
return;
}
+ /* fall through */
case FW_EADDRINUSE:
set_bit(ACT_RETRY_INUSE, &ep->com.history);
if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
@@ -3676,9 +3671,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
int ret;
rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
- BUG_ON(!rpl_skb);
if (req->retval) {
- pr_debug("%s passive open failure %d\n", __func__, req->retval);
+ pr_err("%s passive open failure %d\n", __func__, req->retval);
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.pas_ofld_conn_fails++;
mutex_unlock(&dev->rdev.stats.lock);
@@ -3874,7 +3868,6 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct net_device *pdev;
u16 rss_qid, eth_hdr_len;
int step;
- u32 tx_chan;
struct neighbour *neigh;
/* Drop all non-SYN packets */
@@ -3895,8 +3888,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!lep) {
- pr_debug("%s connect request on invalid stid %d\n",
- __func__, stid);
+ pr_warn("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
@@ -3933,7 +3926,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
skb_set_transport_header(skb, (void *)tcph - (void *)rss);
skb_get(skb);
- pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+ pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
ntohs(tcph->source), iph->tos);
@@ -3941,15 +3934,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
iph->daddr, iph->saddr, tcph->dest,
tcph->source, iph->tos);
if (!dst) {
- pr_err("%s - failed to find dst entry!\n",
- __func__);
+ pr_err("%s - failed to find dst entry!\n", __func__);
goto reject;
}
neigh = dst_neigh_lookup_skb(dst, skb);
if (!neigh) {
- pr_err("%s - failed to allocate neigh!\n",
- __func__);
+ pr_err("%s - failed to allocate neigh!\n", __func__);
goto free_dst;
}
@@ -3958,14 +3949,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
dev_put(pdev);
} else {
pdev = get_real_dev(neigh->dev);
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
}
neigh_release(neigh);
if (!e) {
@@ -4032,8 +4021,7 @@ static void process_timeout(struct c4iw_ep *ep)
int abort = 1;
mutex_lock(&ep->com.mutex);
- pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
@@ -4109,7 +4097,6 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- BUG_ON(!work_handlers[opcode]);
ret = work_handlers[opcode](dev, skb);
if (!ret)
kfree_skb(skb);
@@ -4119,9 +4106,9 @@ static void process_work(struct work_struct *work)
static DECLARE_WORK(skb_work, process_work);
-static void ep_timeout(unsigned long arg)
+static void ep_timeout(struct timer_list *t)
{
- struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+ struct c4iw_ep *ep = from_timer(ep, t, timer);
int kickit = 0;
spin_lock(&timeout_lock);
@@ -4176,15 +4163,15 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_wr_wait *wr_waitp;
int ret;
- pr_debug("%s type %u\n", __func__, rpl->type);
+ pr_debug("type %u\n", rpl->type);
switch (rpl->type) {
case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
- pr_debug("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
if (wr_waitp)
- c4iw_wake_up(wr_waitp, ret ? -ret : 0);
+ c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
break;
case FW6_TYPE_CQE:
@@ -4214,15 +4201,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
+ pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
neg_adv_str(req->status));
goto out;
}
- pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
out:
sched(dev, skb);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index be07da1997e6..ea55e95cd2c5 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -33,12 +33,12 @@
#include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
+ struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
- struct c4iw_wr_wait wr_wait;
int ret;
wr_len = sizeof *res_wr + sizeof *res;
@@ -50,17 +50,14 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
res->u.cq.iqid = cpu_to_be32(cq->cqid);
- c4iw_init_wr_wait(&wr_wait);
- ret = c4iw_ofld_send(rdev, skb);
- if (!ret) {
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
- }
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
kfree(cq->sw_queue);
dma_free_coherent(&(rdev->lldi.pdev->dev),
@@ -71,13 +68,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
}
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
int user = (uctx != &rdev->uctx);
- struct c4iw_wr_wait wr_wait;
int ret;
struct sk_buff *skb;
@@ -119,7 +116,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -139,13 +136,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
- c4iw_init_wr_wait(&wr_wait);
-
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- goto err4;
- pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
if (ret)
goto err4;
@@ -178,7 +170,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
{
struct t4_cqe cqe;
- pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
@@ -196,8 +188,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
int flushed = 0;
int in_use = wq->rq.in_use - count;
- BUG_ON(in_use < 0);
- pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
+ pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
wq, cq, wq->rq.in_use, count);
while (in_use--) {
insert_recv_cqe(wq, cq);
@@ -211,7 +202,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
{
struct t4_cqe cqe;
- pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
@@ -239,14 +230,11 @@ int c4iw_flush_sq(struct c4iw_qp *qhp)
if (wq->sq.flush_cidx == -1)
wq->sq.flush_cidx = wq->sq.cidx;
idx = wq->sq.flush_cidx;
- BUG_ON(idx >= wq->sq.size);
while (idx != wq->sq.pidx) {
swsqe = &wq->sq.sw_sq[idx];
- BUG_ON(swsqe->flushed);
swsqe->flushed = 1;
insert_sq_cqe(wq, cq, swsqe);
if (wq->sq.oldest_read == swsqe) {
- BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
advance_oldest_read(wq);
}
flushed++;
@@ -267,7 +255,6 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
if (wq->sq.flush_cidx == -1)
wq->sq.flush_cidx = wq->sq.cidx;
cidx = wq->sq.flush_cidx;
- BUG_ON(cidx > wq->sq.size);
while (cidx != wq->sq.pidx) {
swsqe = &wq->sq.sw_sq[cidx];
@@ -276,13 +263,11 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
cidx = 0;
} else if (swsqe->complete) {
- BUG_ON(swsqe->flushed);
-
/*
* Insert this completed cqe into the swcq.
*/
- pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
- __func__, cidx, cq->sw_pidx);
+ pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
+ cidx, cq->sw_pidx);
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq);
@@ -337,7 +322,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
struct t4_swsqe *swsqe;
int ret;
- pr_debug("%s cqid 0x%x\n", __func__, chp->cq.cqid);
+ pr_debug("cqid 0x%x\n", chp->cq.cqid);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
/*
@@ -430,7 +415,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
u32 ptr;
*count = 0;
- pr_debug("%s count zero %d\n", __func__, *count);
+ pr_debug("count zero %d\n", *count);
ptr = cq->sw_cidx;
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
@@ -440,7 +425,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
if (++ptr == cq->size)
ptr = 0;
}
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
+ pr_debug("cq %p count %d\n", cq, *count);
}
/*
@@ -471,8 +456,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (ret)
return ret;
- pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+ pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+ CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
CQE_WRID_LOW(hw_cqe));
@@ -603,8 +588,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
struct t4_swsqe *swsqe;
- pr_debug("%s out of order completion going in sw_sq at idx %u\n",
- __func__, CQE_WRID_SQ_IDX(hw_cqe));
+ pr_debug("out of order completion going in sw_sq at idx %u\n",
+ CQE_WRID_SQ_IDX(hw_cqe));
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
swsqe->cqe = *hw_cqe;
swsqe->complete = 1;
@@ -621,7 +606,6 @@ proc_cqe:
*/
if (SQ_TYPE(hw_cqe)) {
int idx = CQE_WRID_SQ_IDX(hw_cqe);
- BUG_ON(idx >= wq->sq.size);
/*
* Account for any unsignaled completions completed by
@@ -635,18 +619,16 @@ proc_cqe:
wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
else
wq->sq.in_use -= idx - wq->sq.cidx;
- BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx;
- pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+ pr_debug("completing sq idx %u\n", wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
- pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
+ pr_debug("completing rq idx %u\n", wq->rq.cidx);
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
- BUG_ON(t4_rq_empty(wq));
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_rq_consume(wq);
@@ -661,12 +643,12 @@ flush_wq:
skip_cqe:
if (SW_CQE(hw_cqe)) {
- pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->sw_cidx);
+ pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
+ cq, cq->cqid, cq->sw_cidx);
t4_swcq_consume(cq);
} else {
- pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->cidx);
+ pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
+ cq, cq->cqid, cq->cidx);
t4_hwcq_consume(cq);
}
return ret;
@@ -712,8 +694,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;
- pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
- __func__, CQE_QPID(&cqe),
+ pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
+ CQE_QPID(&cqe),
CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
CQE_STATUS(&cqe), CQE_LEN(&cqe),
CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
@@ -857,7 +839,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext;
- pr_debug("%s ib_cq %p\n", __func__, ib_cq);
+ pr_debug("ib_cq %p\n", ib_cq);
chp = to_c4iw_cq(ib_cq);
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -868,8 +850,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
: NULL;
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
- chp->destroy_skb);
- chp->destroy_skb = NULL;
+ chp->destroy_skb, chp->wr_waitp);
+ c4iw_put_wr_wait(chp->wr_waitp);
kfree(chp);
return 0;
}
@@ -889,7 +871,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2;
- pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+ pr_debug("ib_dev %p entries %d\n", ibdev, entries);
if (attr->flags)
return ERR_PTR(-EINVAL);
@@ -901,12 +883,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
+ chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!chp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_chp;
+ }
+ c4iw_init_wr_wait(chp->wr_waitp);
wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
if (!chp->destroy_skb) {
ret = -ENOMEM;
- goto err1;
+ goto err_free_wr_wait;
}
if (ib_context)
@@ -947,9 +935,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp->cq.vector = vector;
ret = create_cq(&rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ chp->wr_waitp);
if (ret)
- goto err2;
+ goto err_free_skb;
chp->rhp = rhp;
chp->cq.size--; /* status page */
@@ -960,16 +949,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ret)
- goto err3;
+ goto err_destroy_cq;
if (ucontext) {
ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
- goto err4;
+ goto err_remove_handle;
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2)
- goto err5;
+ goto err_free_mm;
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
@@ -984,7 +973,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
- goto err6;
+ goto err_free_mm2;
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
@@ -996,23 +985,25 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
mm2->len = PAGE_SIZE;
insert_mmap(ucontext, mm2);
}
- pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
- __func__, chp->cq.cqid, chp, chp->cq.size,
+ pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
+ chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
return &chp->ibcq;
-err6:
+err_free_mm2:
kfree(mm2);
-err5:
+err_free_mm:
kfree(mm);
-err4:
+err_remove_handle:
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
-err3:
+err_destroy_cq:
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
- chp->destroy_skb);
-err2:
+ chp->destroy_skb, chp->wr_waitp);
+err_free_skb:
kfree_skb(chp->destroy_skb);
-err1:
+err_free_wr_wait:
+ c4iw_put_wr_wait(chp->wr_waitp);
+err_free_chp:
kfree(chp);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index fc886f81b885..af77d128d242 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -64,14 +64,9 @@ module_param(c4iw_wr_log_size_order, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log_size_order,
"Number of entries (log2) in the work request timing log.");
-struct uld_ctx {
- struct list_head entry;
- struct cxgb4_lld_info lldi;
- struct c4iw_dev *dev;
-};
-
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
@@ -811,8 +806,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = rdev->lldi.udb_density - 1;
rdev->cqmask = rdev->lldi.ucq_density - 1;
- pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
- __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+ pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
+ pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
rdev->lldi.vr->pbl.start,
rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
@@ -912,7 +907,7 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
c4iw_destroy_resource(&rdev->resource);
}
-static void c4iw_dealloc(struct uld_ctx *ctx)
+void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
@@ -935,7 +930,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
- pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev);
+ pr_debug("c4iw_dev %p\n", ctx->dev);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
@@ -969,8 +964,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi = *infop;
/* init various hw-queue params based on lld info */
- pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
- __func__, devp->rdev.lldi.sge_ingpadboundary,
+ pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+ devp->rdev.lldi.sge_ingpadboundary,
devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries =
@@ -1069,8 +1064,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
}
ctx->lldi = *infop;
- pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
- __func__, pci_name(ctx->lldi.pdev),
+ pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+ pci_name(ctx->lldi.pdev),
ctx->lldi.nchan, ctx->lldi.nrxq,
ctx->lldi.ntxq, ctx->lldi.nports);
@@ -1102,8 +1097,8 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
if (unlikely(!skb))
return NULL;
- __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
- sizeof(struct rss_header) - pktshift);
+ __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift);
/*
* This skb will contain:
@@ -1203,13 +1198,11 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct uld_ctx *ctx = handle;
- pr_debug("%s new_state %u\n", __func__, new_state);
+ pr_debug("new_state %u\n", new_state);
switch (new_state) {
case CXGB4_STATE_UP:
pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) {
- int ret;
-
ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) {
pr_err("%s: initialization failed: %ld\n",
@@ -1218,12 +1211,9 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
ctx->dev = NULL;
break;
}
- ret = c4iw_register_device(ctx->dev);
- if (ret) {
- pr_err("%s: RDMA registration failed: %d\n",
- pci_name(ctx->lldi.pdev), ret);
- c4iw_dealloc(ctx);
- }
+
+ INIT_WORK(&ctx->reg_work, c4iw_register_device);
+ queue_work(reg_workq, &ctx->reg_work);
}
break;
case CXGB4_STATE_DOWN:
@@ -1518,6 +1508,27 @@ static struct cxgb4_uld_info c4iw_uld_info = {
.control = c4iw_uld_control,
};
+void _c4iw_free_wr_wait(struct kref *kref)
+{
+ struct c4iw_wr_wait *wr_waitp;
+
+ wr_waitp = container_of(kref, struct c4iw_wr_wait, kref);
+ pr_debug("Free wr_wait %p\n", wr_waitp);
+ kfree(wr_waitp);
+}
+
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
+{
+ struct c4iw_wr_wait *wr_waitp;
+
+ wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
+ if (wr_waitp) {
+ kref_init(&wr_waitp->kref);
+ pr_debug("wr_wait %p\n", wr_waitp);
+ }
+ return wr_waitp;
+}
+
static int __init c4iw_init_module(void)
{
int err;
@@ -1530,6 +1541,12 @@ static int __init c4iw_init_module(void)
if (!c4iw_debugfs_root)
pr_warn("could not create debugfs entry, continuing\n");
+ reg_workq = create_singlethread_workqueue("Register_iWARP_device");
+ if (!reg_workq) {
+ pr_err("Failed creating workqueue to register iwarp device\n");
+ return -ENOMEM;
+ }
+
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0;
@@ -1546,6 +1563,8 @@ static void __exit c4iw_exit_module(void)
kfree(ctx);
}
mutex_unlock(&dev_mutex);
+ flush_workqueue(reg_workq);
+ destroy_workqueue(reg_workq);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 8f963df0bffc..a252d5c40ae3 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&chp->cq)) {
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ }
}
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -234,7 +236,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
- pr_debug("%s unknown cqid 0x%x\n", __func__, qid);
+ pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index 0161ae6ad629..5c2cfdea06ad 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -73,7 +73,6 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
unsigned long flags;
obj -= alloc->start;
- BUG_ON((int)obj < 0);
spin_lock_irqsave(&alloc->lock, flags);
clear_bit(obj, alloc->table);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 819a30635d53..470f97a79ebb 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -202,18 +202,50 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
struct c4iw_wr_wait {
struct completion completion;
int ret;
+ struct kref kref;
};
+void _c4iw_free_wr_wait(struct kref *kref);
+
+static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
+ kref_read(&wr_waitp->kref));
+ WARN_ON(kref_read(&wr_waitp->kref) == 0);
+ kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
+}
+
+static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
+ kref_read(&wr_waitp->kref));
+ WARN_ON(kref_read(&wr_waitp->kref) == 0);
+ kref_get(&wr_waitp->kref);
+}
+
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
{
wr_waitp->ret = 0;
init_completion(&wr_waitp->completion);
}
-static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
+static inline void _c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret,
+ bool deref)
{
wr_waitp->ret = ret;
complete(&wr_waitp->completion);
+ if (deref)
+ c4iw_put_wr_wait(wr_waitp);
+}
+
+static inline void c4iw_wake_up_noref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+ _c4iw_wake_up(wr_waitp, ret, false);
+}
+
+static inline void c4iw_wake_up_deref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+ _c4iw_wake_up(wr_waitp, ret, true);
}
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
@@ -230,18 +262,40 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) {
- pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
- func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+ pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+ func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO;
+ goto out;
}
-out:
if (wr_waitp->ret)
pr_debug("%s: FW reply %d tid %u qpid %u\n",
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+out:
return wr_waitp->ret;
}
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
+
+static inline int c4iw_ref_send_wait(struct c4iw_rdev *rdev,
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp,
+ u32 hwtid, u32 qpid,
+ const char *func)
+{
+ int ret;
+
+ pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func, wr_waitp, hwtid,
+ qpid);
+ c4iw_get_wr_wait(wr_waitp);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret) {
+ c4iw_put_wr_wait(wr_waitp);
+ return ret;
+ }
+ return c4iw_wait_for_reply(rdev, wr_waitp, hwtid, qpid, func);
+}
+
enum db_state {
NORMAL = 0,
FLOW_CONTROL = 1,
@@ -268,6 +322,13 @@ struct c4iw_dev {
wait_queue_head_t wait;
};
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct c4iw_dev *dev;
+ struct work_struct reg_work;
+};
+
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
{
return container_of(ibdev, struct c4iw_dev, ibdev);
@@ -310,7 +371,6 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
idr_preload_end();
}
- BUG_ON(ret == -ENOSPC);
return ret < 0 ? ret : 0;
}
@@ -394,6 +454,7 @@ struct c4iw_mr {
dma_addr_t mpl_addr;
u32 max_mpl_len;
u32 mpl_len;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
@@ -407,6 +468,7 @@ struct c4iw_mw {
struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
@@ -423,6 +485,7 @@ struct c4iw_cq {
spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
@@ -480,10 +543,10 @@ struct c4iw_qp {
struct mutex mutex;
struct kref kref;
wait_queue_head_t wait;
- struct timer_list timer;
int sq_sig_all;
struct work_struct free_work;
struct c4iw_ucontext *ucontext;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -537,8 +600,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
if (mm->key == key && mm->len == len) {
list_del_init(&mm->entry);
spin_unlock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, key,
+ pr_debug("key 0x%x addr 0x%llx len %d\n", key,
(unsigned long long)mm->addr, mm->len);
return mm;
}
@@ -551,8 +613,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm)
{
spin_lock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, mm->key, (unsigned long long)mm->addr, mm->len);
+ pr_debug("key 0x%x addr 0x%llx len %d\n",
+ mm->key, (unsigned long long)mm->addr, mm->len);
list_add_tail(&mm->entry, &ucontext->mmaps);
spin_unlock(&ucontext->mmap_lock);
}
@@ -671,16 +733,14 @@ enum c4iw_mmid_state {
#define MPA_V2_IRD_ORD_MASK 0x3FFF
#define c4iw_put_ep(ep) { \
- pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
- __func__, __LINE__, \
+ pr_debug("put_ep ep %p refcnt %d\n", \
ep, kref_read(&((ep)->kref))); \
WARN_ON(kref_read(&((ep)->kref)) < 1); \
kref_put(&((ep)->kref), _c4iw_free_ep); \
}
#define c4iw_get_ep(ep) { \
- pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
- __func__, __LINE__, \
+ pr_debug("get_ep ep %p, refcnt %d\n", \
ep, kref_read(&((ep)->kref))); \
kref_get(&((ep)->kref)); \
}
@@ -841,7 +901,7 @@ struct c4iw_ep_common {
struct mutex mutex;
struct sockaddr_storage local_addr;
struct sockaddr_storage remote_addr;
- struct c4iw_wr_wait wr_wait;
+ struct c4iw_wr_wait *wr_waitp;
unsigned long flags;
unsigned long history;
};
@@ -935,7 +995,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_register_device(struct c4iw_dev *dev);
+void c4iw_register_device(struct work_struct *work);
void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void);
void c4iw_cm_term(void);
@@ -961,6 +1021,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw);
+void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
@@ -990,7 +1051,6 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
-int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
void c4iw_flush_hw_cq(struct c4iw_cq *chp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
@@ -1018,5 +1078,6 @@ extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index c2fba76becd4..7e0eb201cc26 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -60,18 +60,18 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
u32 len, dma_addr_t data,
- int wait, struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
struct ulp_mem_io *req;
struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
- struct c4iw_wr_wait wr_wait;
addr &= 0x7FFFFFF;
- if (wait)
- c4iw_init_wr_wait(&wr_wait);
+ if (wr_waitp)
+ c4iw_init_wr_wait(wr_waitp);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
if (!skb) {
@@ -84,8 +84,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
- (wait ? FW_WR_COMPL_F : 0));
- req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
+ (wr_waitp ? FW_WR_COMPL_F : 0));
+ req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
T5_ULP_MEMIO_ORDER_V(1) |
@@ -100,22 +100,21 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- return ret;
- if (wait)
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ if (wr_waitp)
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
return ret;
}
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, struct sk_buff *skb)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
struct ulp_mem_io *req;
struct ulptx_idata *sc;
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
- struct c4iw_wr_wait wr_wait;
__be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
if (is_t4(rdev->lldi.adapter_type))
@@ -124,9 +123,9 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF;
- pr_debug("%s addr 0x%x len %u\n", __func__, addr, len);
+ pr_debug("addr 0x%x len %u\n", addr, len);
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
- c4iw_init_wr_wait(&wr_wait);
+ c4iw_init_wr_wait(wr_waitp);
for (i = 0; i < num_wqe; i++) {
copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
@@ -147,7 +146,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
FW_WR_COMPL_F);
- req->wr.wr_lo = (__force __be64)(unsigned long)&wr_wait;
+ req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
@@ -173,19 +172,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (copy_len % T4_ULPTX_MIN_IO)
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
(copy_len % T4_ULPTX_MIN_IO));
- ret = c4iw_ofld_send(rdev, skb);
- skb = NULL;
+ if (i == (num_wqe-1))
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
+ __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
if (ret)
- return ret;
+ break;
+ skb = NULL;
len -= C4IW_MAX_INLINE_SIZE;
}
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
return ret;
}
static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, struct sk_buff *skb)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
u32 remain = len;
u32 dmalen;
@@ -208,7 +211,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
- !remain, skb);
+ skb, remain ? NULL : wr_waitp);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -216,7 +219,8 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
daddr += dmalen;
}
if (remain)
- ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb);
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
+ wr_waitp);
out:
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret;
@@ -227,23 +231,33 @@ out:
* If data is NULL, clear len byte of memory to zero.
*/
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, struct sk_buff *skb)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
- if (rdev->lldi.ulptx_memwrite_dsgl && use_dsgl) {
- if (len > inline_threshold) {
- if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
- pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
- pci_name(rdev->lldi.pdev));
- return _c4iw_write_mem_inline(rdev, addr, len,
- data, skb);
- } else {
- return 0;
- }
- } else
- return _c4iw_write_mem_inline(rdev, addr,
- len, data, skb);
- } else
- return _c4iw_write_mem_inline(rdev, addr, len, data, skb);
+ int ret;
+
+ if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ goto out;
+ }
+
+ if (len <= inline_threshold) {
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ goto out;
+ }
+
+ ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
+ if (ret) {
+ pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ }
+out:
+ return ret;
+
}
/*
@@ -257,7 +271,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
int bind_enabled, u32 zbva, u64 to,
u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
{
int err;
struct fw_ri_tpte tpt;
@@ -285,8 +299,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_unlock(&rdev->stats.lock);
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
}
- pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
+ pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ stag_state, type, pdid, stag_idx);
/* write TPT entry */
if (reset_tpt_entry)
@@ -311,7 +325,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt, skb);
+ sizeof(tpt), &tpt, skb, wr_waitp);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -323,45 +337,50 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
}
static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
+ u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
{
int err;
- pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev->lldi.vr->pbl.start,
+ pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size);
- err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
+ wr_waitp);
return err;
}
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr, struct sk_buff *skb)
+ u32 pbl_addr, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
- pbl_size, pbl_addr, skb);
+ pbl_size, pbl_addr, skb, wr_waitp);
}
-static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
+static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
+ struct c4iw_wr_wait *wr_waitp)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
- 0UL, 0, 0, 0, 0, NULL);
+ 0UL, 0, 0, 0, 0, NULL, wr_waitp);
}
static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
- 0, skb);
+ 0, skb, wr_waitp);
}
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
- u32 pbl_size, u32 pbl_addr)
+ u32 pbl_size, u32 pbl_addr,
+ struct c4iw_wr_wait *wr_waitp)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
- 0UL, 0, 0, pbl_size, pbl_addr, NULL);
+ 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
}
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
@@ -372,7 +391,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
}
@@ -388,14 +407,15 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
mhp->attr.va_fbo, mhp->attr.len ?
mhp->attr.len : -1, shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
+ mhp->wr_waitp);
if (ret)
return ret;
ret = finish_mem_reg(mhp, stag);
if (ret) {
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
mhp->dereg_skb = NULL;
}
return ret;
@@ -422,18 +442,24 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
int ret;
u32 stag = T4_STAG_UNSET;
- pr_debug("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_mhp;
+ }
+ c4iw_init_wr_wait(mhp->wr_waitp);
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
if (!mhp->dereg_skb) {
ret = -ENOMEM;
- goto err0;
+ goto err_free_wr_wait;
}
mhp->rhp = rhp;
@@ -449,20 +475,22 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
- NULL);
+ NULL, mhp->wr_waitp);
if (ret)
- goto err1;
+ goto err_free_skb;
ret = finish_mem_reg(mhp, stag);
if (ret)
- goto err2;
+ goto err_dereg_mem;
return &mhp->ibmr;
-err2:
+err_dereg_mem:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
-err1:
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_skb:
kfree_skb(mhp->dereg_skb);
-err0:
+err_free_mhp:
kfree(mhp);
return ERR_PTR(ret);
}
@@ -473,13 +501,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
__be64 *pages;
int shift, n, len;
int i, k, entry;
- int err = 0;
+ int err = -ENOMEM;
struct scatterlist *sg;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
- pr_debug("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
if (length == ~0ULL)
return ERR_PTR(-EINVAL);
@@ -496,34 +524,31 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp)
+ goto err_free_mhp;
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
- if (!mhp->dereg_skb) {
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
+ if (!mhp->dereg_skb)
+ goto err_free_wr_wait;
mhp->rhp = rhp;
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree_skb(mhp->dereg_skb);
- kfree(mhp);
- return ERR_PTR(err);
- }
+ if (IS_ERR(mhp->umem))
+ goto err_free_skb;
shift = mhp->umem->page_shift;
n = mhp->umem->nmap;
err = alloc_pbl(mhp, n);
if (err)
- goto err;
+ goto err_umem_release;
pages = (__be64 *) __get_free_page(GFP_KERNEL);
if (!pages) {
err = -ENOMEM;
- goto err_pbl;
+ goto err_pbl_free;
}
i = n = 0;
@@ -536,7 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (i == PAGE_SIZE / sizeof *pages) {
err = write_pbl(&mhp->rhp->rdev,
pages,
- mhp->attr.pbl_addr + (n << 3), i);
+ mhp->attr.pbl_addr + (n << 3), i,
+ mhp->wr_waitp);
if (err)
goto pbl_done;
n += i;
@@ -547,12 +573,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (i)
err = write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (n << 3), i);
+ mhp->attr.pbl_addr + (n << 3), i,
+ mhp->wr_waitp);
pbl_done:
free_page((unsigned long) pages);
if (err)
- goto err_pbl;
+ goto err_pbl_free;
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
@@ -563,17 +590,20 @@ pbl_done:
err = register_mem(rhp, php, mhp, shift);
if (err)
- goto err_pbl;
+ goto err_pbl_free;
return &mhp->ibmr;
-err_pbl:
+err_pbl_free:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
-
-err:
+err_umem_release:
ib_umem_release(mhp->umem);
+err_free_skb:
kfree_skb(mhp->dereg_skb);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
kfree(mhp);
return ERR_PTR(err);
}
@@ -597,13 +627,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto free_mhp;
+ }
+
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
if (!mhp->dereg_skb) {
ret = -ENOMEM;
- goto free_mhp;
+ goto free_wr_wait;
}
- ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+ ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
if (ret)
goto free_skb;
mhp->rhp = rhp;
@@ -616,13 +652,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = -ENOMEM;
goto dealloc_win;
}
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmw);
dealloc_win:
- deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
+ mhp->wr_waitp);
free_skb:
kfree_skb(mhp->dereg_skb);
+free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
free_mhp:
kfree(mhp);
return ERR_PTR(ret);
@@ -638,10 +677,12 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
remove_handle(rhp, &rhp->mmidr, mmid);
- deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
+ mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
+ c4iw_put_wr_wait(mhp->wr_waitp);
kfree(mhp);
- pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
return 0;
}
@@ -671,23 +712,31 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
goto err;
}
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_mhp;
+ }
+ c4iw_init_wr_wait(mhp->wr_waitp);
+
mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
length, &mhp->mpl_addr, GFP_KERNEL);
if (!mhp->mpl) {
ret = -ENOMEM;
- goto err_mpl;
+ goto err_free_wr_wait;
}
mhp->max_mpl_len = length;
mhp->rhp = rhp;
ret = alloc_pbl(mhp, max_num_sg);
if (ret)
- goto err1;
+ goto err_free_dma;
mhp->attr.pbl_size = max_num_sg;
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr,
+ mhp->wr_waitp);
if (ret)
- goto err2;
+ goto err_free_pbl;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_NSMR;
mhp->attr.stag = stag;
@@ -696,21 +745,23 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
ret = -ENOMEM;
- goto err3;
+ goto err_dereg;
}
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmr);
-err3:
+err_dereg:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
-err2:
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_pbl:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
-err1:
+err_free_dma:
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
-err_mpl:
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
kfree(mhp);
err:
return ERR_PTR(ret);
@@ -744,7 +795,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
struct c4iw_mr *mhp;
u32 mmid;
- pr_debug("%s ib_mr %p\n", __func__, ib_mr);
+ pr_debug("ib_mr %p\n", ib_mr);
mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp;
@@ -754,7 +805,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
if (mhp->attr.pbl_size)
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
@@ -762,7 +813,8 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem)
ib_umem_release(mhp->umem);
- pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+ pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 346e8334279a..1b5c6cd2ac4d 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
- pr_debug("%s ucontext %p\n", __func__, ucontext);
+ pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
- pr_debug("%s context %p\n", __func__, context);
+ pr_debug("context %p\n", context);
c4iw_put_ucontext(ucontext);
return 0;
}
@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
ret = -ENOMEM;
@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_ucontext *ucontext;
u64 addr;
- pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
+ pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
key, len);
if (vma->vm_start & (PAGE_SIZE-1))
@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
php = to_c4iw_pd(pd);
rhp = php->rhp;
- pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+ pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
u32 pdid;
struct c4iw_dev *rhp;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock);
- pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
+ pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
return &php->ibpd;
}
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
*pkey = 0;
return 0;
}
@@ -308,10 +308,11 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct c4iw_dev *dev;
- pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
+ pr_debug("ibdev %p, port %d, index %d, gid %p\n",
+ ibdev, port, index, gid);
+ if (!port)
+ return -EINVAL;
dev = to_c4iw_dev(ibdev);
- BUG_ON(port == 0);
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
return 0;
@@ -323,7 +324,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
struct c4iw_dev *dev;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
if (uhw->inlen || uhw->outlen)
return -EINVAL;
@@ -364,7 +365,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct net_device *netdev;
struct in_device *inetdev;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1];
@@ -406,7 +407,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
@@ -419,7 +420,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
@@ -429,7 +430,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
@@ -521,7 +522,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev);
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
@@ -530,13 +531,14 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
}
-int c4iw_register_device(struct c4iw_dev *dev)
+void c4iw_register_device(struct work_struct *work)
{
int ret;
int i;
+ struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
+ struct c4iw_dev *dev = ctx->dev;
- pr_debug("%s c4iw_dev %p\n", __func__, dev);
- BUG_ON(!dev->rdev.lldi.ports[0]);
+ pr_debug("c4iw_dev %p\n", dev);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
@@ -609,8 +611,10 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
- if (!dev->ibdev.iwcm)
- return -ENOMEM;
+ if (!dev->ibdev.iwcm) {
+ ret = -ENOMEM;
+ goto err_dealloc_ctx;
+ }
dev->ibdev.iwcm->connect = c4iw_connect;
dev->ibdev.iwcm->accept = c4iw_accept_cr;
@@ -625,27 +629,31 @@ int c4iw_register_device(struct c4iw_dev *dev)
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
- goto bail1;
+ goto err_kfree_iwcm;
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
if (ret)
- goto bail2;
+ goto err_unregister_device;
}
- return 0;
-bail2:
+ return;
+err_unregister_device:
ib_unregister_device(&dev->ibdev);
-bail1:
+err_kfree_iwcm:
kfree(dev->ibdev.iwcm);
- return ret;
+err_dealloc_ctx:
+ pr_err("%s - Failed registering iwarp device: %d\n",
+ pci_name(ctx->lldi.pdev), ret);
+ c4iw_dealloc(ctx);
+ return;
}
void c4iw_unregister_device(struct c4iw_dev *dev)
{
int i;
- pr_debug("%s c4iw_dev %p\n", __func__, dev);
+ pr_debug("c4iw_dev %p\n", dev);
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cb7fc0d35d1d..5ee7fe433136 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -194,13 +194,13 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct t4_cq *rcq, struct t4_cq *scq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
{
int user = (uctx != &rdev->uctx);
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
- struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
int ret = 0;
int eqsize;
@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = -ENOMEM;
goto free_sq;
}
- pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
- __func__, wq->sq.queue,
+ pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue),
wq->rq.queue,
(unsigned long long)virt_to_phys(wq->rq.queue));
@@ -299,7 +299,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_NRES_V(2) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -352,17 +352,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
- c4iw_init_wr_wait(&wr_wait);
-
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- goto free_dma;
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
if (ret)
goto free_dma;
- pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
- __func__, wq->sq.qid, wq->rq.qid, wq->db,
+ pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
+ wq->sq.qid, wq->rq.qid, wq->db,
wq->sq.bar2_va, wq->rq.bar2_va);
return 0;
@@ -693,7 +689,6 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
if (++p == (__be64 *)&sq->queue[sq->size])
p = (__be64 *)sq->queue;
}
- BUG_ON(rem < 0);
while (rem) {
*p = 0;
rem -= sizeof(*p);
@@ -724,12 +719,13 @@ static void free_qp_work(struct work_struct *work)
ucontext = qhp->ucontext;
rhp = qhp->rhp;
- pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+ pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
if (ucontext)
c4iw_put_ucontext(ucontext);
+ c4iw_put_wr_wait(qhp->wr_waitp);
kfree(qhp);
}
@@ -738,19 +734,19 @@ static void queue_qp_free(struct kref *kref)
struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref);
- pr_debug("%s qhp %p\n", __func__, qhp);
+ pr_debug("qhp %p\n", qhp);
queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
}
void c4iw_qp_add_ref(struct ib_qp *qp)
{
- pr_debug("%s ib_qp %p\n", __func__, qp);
+ pr_debug("ib_qp %p\n", qp);
kref_get(&to_c4iw_qp(qp)->kref);
}
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
- pr_debug("%s ib_qp %p\n", __func__, qp);
+ pr_debug("ib_qp %p\n", qp);
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
}
@@ -817,10 +813,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
t4_swcq_produce(cq);
spin_unlock_irqrestore(&schp->lock, flag);
- spin_lock_irqsave(&schp->comp_handler_lock, flag);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&schp->cq)) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
}
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -846,10 +844,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
t4_swcq_produce(cq);
spin_unlock_irqrestore(&rchp->lock, flag);
- spin_lock_irqsave(&rchp->comp_handler_lock, flag);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq,
- rchp->ibcq.cq_context);
- spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&rchp->cq)) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
}
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break;
default:
- pr_debug("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
+ pr_warn("%s post of type=%d TBD!\n", __func__,
+ wr->opcode);
err = -EINVAL;
}
if (err) {
@@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
- pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
- __func__,
+ pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
(unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
swsqe->opcode, swsqe->read_len);
wr = wr->next;
@@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16;
- pr_debug("%s cookie 0x%llx pidx %u\n",
- __func__,
+ pr_debug("cookie 0x%llx pidx %u\n",
(unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
@@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
struct sk_buff *skb;
struct terminate_message *term;
- pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
@@ -1255,33 +1253,36 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int rq_flushed, sq_flushed;
unsigned long flag;
- pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+ pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
- /* locking hierarchy: cq lock first, then qp lock. */
+ /* locking hierarchy: cqs lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag);
+ if (schp != rchp)
+ spin_lock(&schp->lock);
spin_lock(&qhp->lock);
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
+ if (schp != rchp)
+ spin_unlock(&schp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
return;
}
qhp->wq.flushed = 1;
+ t4_set_wq_in_error(&qhp->wq);
c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&rchp->lock, flag);
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&schp->lock, flag);
- spin_lock(&qhp->lock);
if (schp != rchp)
c4iw_flush_hw_cq(schp);
sq_flushed = c4iw_flush_sq(qhp);
+
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&schp->lock, flag);
+ if (schp != rchp)
+ spin_unlock(&schp->lock);
+ spin_unlock_irqrestore(&rchp->lock, flag);
if (schp == rchp) {
if (t4_clear_cq_armed(&rchp->cq) &&
@@ -1315,8 +1316,8 @@ static void flush_qp(struct c4iw_qp *qhp)
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
schp = to_c4iw_cq(qhp->ibqp.send_cq);
- t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) {
+ t4_set_wq_in_error(&qhp->wq);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
@@ -1340,8 +1341,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int ret;
struct sk_buff *skb;
- pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- ep->hwtid);
+ pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
skb = skb_dequeue(&ep->com.ep_skb_list);
if (WARN_ON(!skb))
@@ -1357,23 +1357,20 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (uintptr_t)&ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)ep->com.wr_waitp;
wqe->u.fini.type = FW_RI_TYPE_FINI;
- ret = c4iw_ofld_send(&rhp->rdev, skb);
- if (ret)
- goto out;
- ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
- qhp->wq.sq.qid, __func__);
-out:
- pr_debug("%s ret %d\n", __func__, ret);
+ ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+
+ pr_debug("ret %d\n", ret);
return ret;
}
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{
- pr_debug("%s p2p_type = %d\n", __func__, p2p_type);
+ pr_debug("p2p_type = %d\n", p2p_type);
memset(&init->u, 0, sizeof init->u);
switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
@@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int ret;
struct sk_buff *skb;
- pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
+ pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
@@ -1427,7 +1424,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_FLOWID_V(qhp->ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
@@ -1464,18 +1461,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
- ret = c4iw_ofld_send(&rhp->rdev, skb);
- if (ret)
- goto err1;
-
- ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
- qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+ ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
if (!ret)
goto out;
-err1:
+
free_ird(rhp, qhp->attr.max_ird);
out:
- pr_debug("%s ret %d\n", __func__, ret);
+ pr_debug("ret %d\n", ret);
return ret;
}
@@ -1492,8 +1485,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int free = 0;
struct c4iw_ep *ep = NULL;
- pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
- __func__,
+ pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
(mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
@@ -1582,7 +1574,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_RTS:
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
- BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
t4_set_wq_in_error(&qhp->wq);
set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
@@ -1680,7 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
goto out;
err:
- pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
+ pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
qhp->wq.sq.qid);
/* disassociate the LLP connection */
@@ -1691,7 +1682,6 @@ err:
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
abort = 1;
- BUG_ON(!ep);
flush_qp(qhp);
wake_up(&qhp->wait);
out:
@@ -1717,7 +1707,7 @@ out:
*/
if (free)
c4iw_put_ep(&ep->com);
- pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
+ pr_debug("exit state %d\n", qhp->attr.state);
return ret;
}
@@ -1747,7 +1737,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_qp_rem_ref(ib_qp);
- pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
+ pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
return 0;
}
@@ -1766,7 +1756,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
- pr_debug("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC)
return ERR_PTR(-EINVAL);
@@ -1798,6 +1788,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp)
return ERR_PTR(-ENOMEM);
+
+ qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!qhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_qhp;
+ }
+
qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize =
(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
@@ -1814,9 +1811,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
}
ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ qhp->wr_waitp);
if (ret)
- goto err1;
+ goto err_free_wr_wait;
attrs->cap.max_recv_wr = rqsize - 1;
attrs->cap.max_send_wr = sqsize - 1;
@@ -1847,35 +1845,35 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
- goto err2;
+ goto err_destroy_qp;
- if (udata) {
+ if (udata && ucontext) {
sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
if (!sq_key_mm) {
ret = -ENOMEM;
- goto err3;
+ goto err_remove_handle;
}
rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
if (!rq_key_mm) {
ret = -ENOMEM;
- goto err4;
+ goto err_free_sq_key;
}
sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
if (!sq_db_key_mm) {
ret = -ENOMEM;
- goto err5;
+ goto err_free_rq_key;
}
rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
if (!rq_db_key_mm) {
ret = -ENOMEM;
- goto err6;
+ goto err_free_sq_db_key;
}
if (t4_sq_onchip(&qhp->wq.sq)) {
ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
GFP_KERNEL);
if (!ma_sync_key_mm) {
ret = -ENOMEM;
- goto err7;
+ goto err_free_rq_db_key;
}
uresp.flags = C4IW_QPF_ONCHIP;
} else
@@ -1905,7 +1903,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
- goto err8;
+ goto err_free_ma_sync_key;
sq_key_mm->key = uresp.sq_key;
sq_key_mm->addr = qhp->wq.sq.phys_addr;
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
@@ -1935,30 +1933,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->ucontext = ucontext;
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
- init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry);
- pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
- __func__,
+ pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp;
-err8:
+err_free_ma_sync_key:
kfree(ma_sync_key_mm);
-err7:
+err_free_rq_db_key:
kfree(rq_db_key_mm);
-err6:
+err_free_sq_db_key:
kfree(sq_db_key_mm);
-err5:
+err_free_rq_key:
kfree(rq_key_mm);
-err4:
+err_free_sq_key:
kfree(sq_key_mm);
-err3:
+err_remove_handle:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
-err2:
+err_destroy_qp:
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-err1:
+err_free_wr_wait:
+ c4iw_put_wr_wait(qhp->wr_waitp);
+err_free_qhp:
kfree(qhp);
return ERR_PTR(ret);
}
@@ -1971,7 +1969,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum c4iw_qp_attr_mask mask = 0;
struct c4iw_qp_attributes attrs;
- pr_debug("%s ib_qp %p\n", __func__, ibqp);
+ pr_debug("ib_qp %p\n", ibqp);
/* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -2017,7 +2015,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
{
- pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+ pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 8ff0cbe5cb16..3cf25997ed2b 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{
- pr_debug("%s entry 0x%x\n", __func__, entry);
+ pr_debug("entry 0x%x\n", entry);
c4iw_id_free(id_table, entry);
}
@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids);
@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
- pr_debug("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("failed to add PBL chunk (%x/%x)\n",
+ pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
pr_warn("Failed to add all PBL chunks (%x/%x)\n",
pbl_start, pbl_top - pbl_start);
@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
}
pbl_chunk >>= 1;
} else {
- pr_debug("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("added PBL chunk (%x/%x)\n",
+ pbl_start, pbl_chunk);
pbl_start += pbl_chunk;
}
}
@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
if (!addr)
pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+ pr_debug("addr 0x%x size %d\n", addr, size << 6);
mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
- pr_debug("%s failed to add RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("failed to add RQT chunk (%x/%x)\n",
+ rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
pr_warn("Failed to add all RQT chunks (%x/%x)\n",
rqt_start, rqt_top - rqt_start);
@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
}
rqt_chunk >>= 1;
} else {
- pr_debug("%s added RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("added RQT chunk (%x/%x)\n",
+ rqt_start, rqt_chunk);
rqt_start += rqt_chunk;
}
}
@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size);
if (addr) {
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
while (start < top) {
chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
- pr_debug("%s failed to add OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("failed to add OCQP chunk (%x/%x)\n",
+ start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
start, top - start);
@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
}
chunk >>= 1;
} else {
- pr_debug("%s added OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("added OCQP chunk (%x/%x)\n",
+ start, chunk);
start += chunk;
}
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e765c00303cd..e9ea94268d51 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -171,7 +171,7 @@ struct t4_cqe {
__be32 msn;
} rcqe;
struct {
- u32 stag;
+ __be32 stag;
u16 nada2;
u16 cidx;
} scqe;
@@ -425,7 +425,6 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_sq_consume(struct t4_wq *wq)
{
- BUG_ON(wq->sq.in_use < 1);
if (wq->sq.cidx == wq->sq.flush_cidx)
wq->sq.flush_cidx = -1;
wq->sq.in_use--;
@@ -466,14 +465,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
wmb();
if (wq->sq.bar2_va) {
if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
- pr_debug("%s: WC wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
pio_copy((u64 __iomem *)
(wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
(u64 *)wqe);
} else {
- pr_debug("%s: DB wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
wq->sq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -493,14 +490,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
wmb();
if (wq->rq.bar2_va) {
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
- pr_debug("%s: WC wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
pio_copy((u64 __iomem *)
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
(void *)wqe);
} else {
- pr_debug("%s: DB wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
wq->rq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -601,10 +596,11 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
- pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
- __func__, cq->cqid);
+ pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
cq->error = 1;
- BUG_ON(1);
+ cq->sw_in_use--;
+ return;
}
if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0;
@@ -612,7 +608,6 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
static inline void t4_swcq_consume(struct t4_cq *cq)
{
- BUG_ON(cq->sw_in_use < 1);
cq->sw_in_use--;
if (++cq->sw_cidx == cq->size)
cq->sw_cidx = 0;
@@ -658,7 +653,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
ret = -EOVERFLOW;
cq->error = 1;
pr_err("cq overflow cqid %u\n", cq->cqid);
- BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
/* Ensure CQE is flushed to memory */
@@ -673,10 +667,9 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{
if (cq->sw_in_use == cq->size) {
- pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
- __func__, cq->cqid);
+ pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
cq->error = 1;
- BUG_ON(1);
return NULL;
}
if (cq->sw_in_use)
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 010c709ba3bb..58c531db4f4a 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr {
__u16 wrid;
__u8 r1[3];
__u8 len16;
- __u32 r2;
- __u32 stag;
+ __be32 r2;
+ __be32 stag;
struct fw_ri_tpte tpte;
__u64 pbl[2];
};
diff --git a/drivers/infiniband/hw/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
index 522b40ed9937..e8133870ee87 100644
--- a/drivers/infiniband/hw/hfi1/aspm.h
+++ b/drivers/infiniband/hw/hfi1/aspm.h
@@ -218,9 +218,9 @@ unlock:
}
/* Timer function for re-enabling ASPM in the absence of interrupt activity */
-static inline void aspm_ctx_timer_function(unsigned long data)
+static inline void aspm_ctx_timer_function(struct timer_list *t)
{
- struct hfi1_ctxtdata *rcd = (struct hfi1_ctxtdata *)data;
+ struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer);
unsigned long flags;
spin_lock_irqsave(&rcd->aspm_lock, flags);
@@ -281,8 +281,7 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
{
spin_lock_init(&rcd->aspm_lock);
- setup_timer(&rcd->aspm_timer, aspm_ctx_timer_function,
- (unsigned long)rcd);
+ timer_setup(&rcd->aspm_timer, aspm_ctx_timer_function, 0);
rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
aspm_mode == ASPM_MODE_DYNAMIC &&
rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 0be42787759f..4f057e8ffe50 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1036,7 +1036,6 @@ static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
u8 *flag_bits, u16 *link_widths);
static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
u8 *device_rev);
-static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
u8 *tx_polarity_inversion,
@@ -5538,9 +5537,9 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
* associated with them.
*/
#define RCVERR_CHECK_TIME 10
-static void update_rcverr_timer(unsigned long opaque)
+static void update_rcverr_timer(struct timer_list *t)
{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+ struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
struct hfi1_pportdata *ppd = dd->pport;
u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
@@ -5559,7 +5558,7 @@ static void update_rcverr_timer(unsigned long opaque)
static int init_rcverr(struct hfi1_devdata *dd)
{
- setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
+ timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
/* Assume the hardware counter has been reset */
dd->rcv_ovfl_cnt = 0;
return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
@@ -5567,9 +5566,8 @@ static int init_rcverr(struct hfi1_devdata *dd)
static void free_rcverr(struct hfi1_devdata *dd)
{
- if (dd->rcverr_timer.data)
+ if (dd->rcverr_timer.function)
del_timer_sync(&dd->rcverr_timer);
- dd->rcverr_timer.data = 0;
}
static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
@@ -6520,12 +6518,11 @@ static void _dc_start(struct hfi1_devdata *dd)
if (!dd->dc_shutdown)
return;
- /* Take the 8051 out of reset */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
- /* Wait until 8051 is ready */
- if (wait_fm_ready(dd, TIMEOUT_8051_START))
- dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
- __func__);
+ /*
+ * Take the 8051 out of reset, wait until 8051 is ready, and set host
+ * version bit.
+ */
+ release_and_wait_ready_8051_firmware(dd);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -6819,7 +6816,8 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
rcd = hfi1_rcd_get_by_index(dd, i);
/* Ensure all non-user contexts(including vnic) are enabled */
- if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
+ if (!rcd ||
+ (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
hfi1_rcd_put(rcd);
continue;
}
@@ -7199,27 +7197,6 @@ static int lcb_to_port_ltp(int lcb_crc)
return port_ltp;
}
-/*
- * Our neighbor has indicated that we are allowed to act as a fabric
- * manager, so place the full management partition key in the second
- * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
- * that we should already have the limited management partition key in
- * array element 1, and also that the port is not yet up when
- * add_full_mgmt_pkey() is invoked.
- */
-static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
- if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
- dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
- __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
- ppd->pkeys[2] = FULL_MGMT_P_KEY;
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
- hfi1_event_pkey_change(ppd->dd, ppd->port);
-}
-
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{
if (ppd->pkeys[2] != 0) {
@@ -7416,11 +7393,7 @@ void handle_verify_cap(struct work_struct *work)
&partner_supported_crc);
read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
read_remote_device_id(dd, &device_id, &device_rev);
- /*
- * And the 'MgmtAllowed' information, which is exchanged during
- * LNI, is also be available at this point.
- */
- read_mgmt_allowed(dd, &ppd->mgmt_allowed);
+
/* print the active widths */
get_link_widths(dd, &active_tx, &active_rx);
dd_dev_info(dd,
@@ -7548,9 +7521,6 @@ void handle_verify_cap(struct work_struct *work)
write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
set_8051_lcb_access(dd);
- if (ppd->mgmt_allowed)
- add_full_mgmt_pkey(ppd);
-
/* tell the 8051 to go to LinkUp */
set_link_state(ppd, HLS_GOING_UP);
}
@@ -8124,8 +8094,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
/* Check for non-user contexts, including vnic */
- if ((source < dd->first_dyn_alloc_ctxt) ||
- (rcd->sc && (rcd->sc->type == SC_KERNEL)))
+ if (source < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
rcd->do_interrupt(rcd, 0);
else
handle_user_interrupt(rcd);
@@ -8155,8 +8124,8 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
/* only pay attention to user urgent interrupts */
- if ((source >= dd->first_dyn_alloc_ctxt) &&
- (!rcd->sc || (rcd->sc->type == SC_USER)))
+ if (source >= dd->first_dyn_alloc_ctxt &&
+ !rcd->is_vnic)
handle_user_interrupt(rcd);
hfi1_rcd_put(rcd);
@@ -8595,30 +8564,23 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
}
/*
+ * If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
+ * will still continue executing.
+ *
* Returns:
* < 0 = Linux error, not able to get access
* > 0 = 8051 command RETURN_CODE
*/
-static int do_8051_command(
- struct hfi1_devdata *dd,
- u32 type,
- u64 in_data,
- u64 *out_data)
+static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
{
u64 reg, completed;
int return_code;
unsigned long timeout;
+ lockdep_assert_held(&dd->dc8051_lock);
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
- mutex_lock(&dd->dc8051_lock);
-
- /* We can't send any commands to the 8051 if it's in reset */
- if (dd->dc_shutdown) {
- return_code = -ENODEV;
- goto fail;
- }
-
/*
* If an 8051 host command timed out previously, then the 8051 is
* stuck.
@@ -8719,6 +8681,29 @@ static int do_8051_command(
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
fail:
+ return return_code;
+}
+
+/*
+ * Returns:
+ * < 0 = Linux error, not able to get access
+ * > 0 = 8051 command RETURN_CODE
+ */
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
+{
+ int return_code;
+
+ mutex_lock(&dd->dc8051_lock);
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
+ return_code = _do_8051_command(dd, type, in_data, out_data);
+
+fail:
mutex_unlock(&dd->dc8051_lock);
return return_code;
}
@@ -8728,16 +8713,17 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
}
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
+static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
{
u64 data;
int ret;
+ lockdep_assert_held(&dd->dc8051_lock);
data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
| (u64)config_data << LOAD_DATA_DATA_SHIFT;
- ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
"load 8051 config: field id %d, lane %d, err %d\n",
@@ -8746,6 +8732,18 @@ int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
return ret;
}
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
+{
+ int return_code;
+
+ mutex_lock(&dd->dc8051_lock);
+ return_code = _load_8051_config(dd, field_id, lane_id, config_data);
+ mutex_unlock(&dd->dc8051_lock);
+
+ return return_code;
+}
+
/*
* Read the 8051 firmware "registers". Use the RAM directly. Always
* set the result, even on error.
@@ -8861,13 +8859,14 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
u32 frame;
u32 mask;
+ lockdep_assert_held(&dd->dc8051_lock);
mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
/* Clear, then set field */
frame &= ~mask;
frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
- return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
- frame);
+ return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
}
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
@@ -8932,14 +8931,6 @@ static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
}
-static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
-{
- u32 frame;
-
- read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
- *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
-}
-
static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
{
read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
@@ -9161,25 +9152,6 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
}
/*
- * Set the SerDes to internal loopback mode.
- * Returns 0 on success, -errno on error.
- */
-static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
- if (ret == HCMD_SUCCESS)
- return 0;
- dd_dev_err(dd,
- "Set physical link state to SerDes Loopback failed with return %d\n",
- ret);
- if (ret >= 0)
- ret = -EINVAL;
- return ret;
-}
-
-/*
* Do all special steps to set up loopback.
*/
static int init_loopback(struct hfi1_devdata *dd)
@@ -9204,13 +9176,11 @@ static int init_loopback(struct hfi1_devdata *dd)
return 0;
}
- /* handle serdes loopback */
- if (loopback == LOOPBACK_SERDES) {
- /* internal serdes loopack needs quick linkup on RTL */
- if (dd->icode == ICODE_RTL_SILICON)
- quick_linkup = 1;
- return set_serdes_loopback_mode(dd);
- }
+ /*
+ * SerDes loopback init sequence is handled in set_local_link_attributes
+ */
+ if (loopback == LOOPBACK_SERDES)
+ return 0;
/* LCB loopback - handled at poll time */
if (loopback == LOOPBACK_LCB) {
@@ -9269,7 +9239,7 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
u8 tx_polarity_inversion;
u8 rx_polarity_inversion;
int ret;
-
+ u32 misc_bits = 0;
/* reset our fabric serdes to clear any lingering problems */
fabric_serdes_reset(dd);
@@ -9315,7 +9285,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
- ret = write_vc_local_link_width(dd, 0, 0,
+ /*
+ * SerDes loopback init sequence requires
+ * setting bit 0 of MISC_CONFIG_BITS
+ */
+ if (loopback == LOOPBACK_SERDES)
+ misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
+
+ ret = write_vc_local_link_width(dd, misc_bits, 0,
opa_to_vc_link_widths(
ppd->link_width_enabled));
if (ret != HCMD_SUCCESS)
@@ -9809,9 +9786,9 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
cancel_delayed_work_sync(&ppd->start_link_work);
ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
- OPA_LINKDOWN_REASON_SMA_DISABLED);
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
+ OPA_LINKDOWN_REASON_REBOOT);
set_link_state(ppd, HLS_DN_OFFLINE);
/* disable the port */
@@ -9952,7 +9929,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
goto unimplemented;
case HFI1_IB_CFG_OP_VLS:
- val = ppd->vls_operational;
+ val = ppd->actual_vls_operational;
break;
case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
@@ -9967,7 +9944,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
val = ppd->phy_error_threshold;
break;
case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- val = dd->link_default;
+ val = HLS_DEFAULT;
break;
case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
@@ -10170,6 +10147,10 @@ static const char * const state_complete_reasons[] = {
[0x33] =
"Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
[0x34] = tx_out_of_policy,
+ [0x35] = "Negotiated link width is mutually exclusive",
+ [0x36] =
+ "Timed out before receiving verifycap frames in VerifyCap.Exchange",
+ [0x37] = "Unable to resolve secure data exchange",
};
static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
@@ -10298,9 +10279,6 @@ static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
- /* adjust ppd->statusp, if needed */
- update_statusp(ppd, IB_PORT_DOWN);
-
dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
}
@@ -10382,6 +10360,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
force_logical_link_state_down(ppd);
ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+ update_statusp(ppd, IB_PORT_DOWN);
/*
* The LNI has a mandatory wait time after the physical state
@@ -10569,7 +10548,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
orig_new_state = state;
if (state == HLS_DN_DOWNDEF)
- state = dd->link_default;
+ state = HLS_DEFAULT;
/* interpret poll -> poll as a link bounce */
poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
@@ -10643,6 +10622,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
handle_linkup_change(dd, 1);
ppd->host_link_state = HLS_UP_INIT;
+ update_statusp(ppd, IB_PORT_INIT);
break;
case HLS_UP_ARMED:
if (ppd->host_link_state != HLS_UP_INIT)
@@ -10664,6 +10644,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
break;
}
ppd->host_link_state = HLS_UP_ARMED;
+ update_statusp(ppd, IB_PORT_ARMED);
/*
* The simulator does not currently implement SMA messages,
* so neighbor_normal is not set. Set it here when we first
@@ -10686,6 +10667,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
/* tell all engines to go running */
sdma_all_running(dd);
ppd->host_link_state = HLS_UP_ACTIVE;
+ update_statusp(ppd, IB_PORT_ACTIVE);
/* Signal the IB layer that the port has went active */
event.device = &dd->verbs_dev.rdi.ibdev;
@@ -12089,9 +12071,8 @@ static void free_cntrs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
int i;
- if (dd->synth_stats_timer.data)
+ if (dd->synth_stats_timer.function)
del_timer_sync(&dd->synth_stats_timer);
- dd->synth_stats_timer.data = 0;
ppd = (struct hfi1_pportdata *)(dd + 1);
for (i = 0; i < dd->num_pports; i++, ppd++) {
kfree(ppd->cntrs);
@@ -12367,9 +12348,9 @@ static void do_update_synth_timer(struct work_struct *work)
}
}
-static void update_synth_timer(unsigned long opaque)
+static void update_synth_timer(struct timer_list *t)
{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+ struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
@@ -12387,8 +12368,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
const int bit_type_32_sz = strlen(bit_type_32);
/* set up the stats timer; the add_timer is done at the end */
- setup_timer(&dd->synth_stats_timer, update_synth_timer,
- (unsigned long)dd);
+ timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
/***********************/
/* per device counters */
@@ -12701,6 +12681,17 @@ const char *opa_pstate_name(u32 pstate)
return "unknown";
}
+/**
+ * update_statusp - Update userspace status flag
+ * @ppd: Port data structure
+ * @state: port state information
+ *
+ * Actual port status is determined by the host_link_state value
+ * in the ppd.
+ *
+ * host_link_state MUST be updated before updating the user space
+ * statusp.
+ */
static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
{
/*
@@ -12726,9 +12717,11 @@ static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
break;
}
}
+ dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
+ opa_lstate_name(state), state);
}
-/*
+/**
* wait_logical_linkstate - wait for an IB link state change to occur
* @ppd: port device
* @state: the state to wait for
@@ -12759,11 +12752,6 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
msleep(20);
}
- update_statusp(ppd, state);
- dd_dev_info(ppd->dd,
- "logical state changed to %s (0x%x)\n",
- opa_lstate_name(state),
- state);
return 0;
}
@@ -12910,6 +12898,32 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
return ret;
}
+/**
+ * get_int_mask - get 64 bit int mask
+ * @dd - the devdata
+ * @i - the csr (relative to CCE_INT_MASK)
+ *
+ * Returns the mask with the urgent interrupt mask
+ * bit clear for kernel receive contexts.
+ */
+static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
+{
+ u64 mask = U64_MAX; /* default to no change */
+
+ if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
+ int j = (i - (IS_RCVURGENT_START / 64)) * 64;
+ int k = !j ? IS_RCVURGENT_START % 64 : 0;
+
+ if (j)
+ j -= IS_RCVURGENT_START % 64;
+ /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
+ for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
+ /* convert to bit in mask and clear */
+ mask &= ~BIT_ULL(k);
+ }
+ return mask;
+}
+
/* ========================================================================= */
/*
@@ -12923,9 +12937,12 @@ void set_intr_state(struct hfi1_devdata *dd, u32 enable)
* In HFI, the mask needs to be 1 to allow interrupts.
*/
if (enable) {
- /* enable all interrupts */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
+ /* enable all interrupts but urgent on kernel contexts */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
+ u64 mask = get_int_mask(dd, i);
+
+ write_csr(dd, CCE_INT_MASK + (8 * i), mask);
+ }
init_qsfp_int(dd);
} else {
@@ -12980,7 +12997,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
if (!me->arg) /* => no irq, no affinity */
continue;
hfi1_put_irq_affinity(dd, me);
- free_irq(me->irq, me->arg);
+ pci_free_irq(dd->pcidev, i, me->arg);
}
/* clean structures */
@@ -12990,7 +13007,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
} else {
/* INTx */
if (dd->requested_intx_irq) {
- free_irq(dd->pcidev->irq, dd);
+ pci_free_irq(dd->pcidev, 0, dd);
dd->requested_intx_irq = 0;
}
disable_intx(dd->pcidev);
@@ -13049,10 +13066,8 @@ static int request_intx_irq(struct hfi1_devdata *dd)
{
int ret;
- snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
- dd->unit);
- ret = request_irq(dd->pcidev->irq, general_interrupt,
- IRQF_SHARED, dd->intx_name, dd);
+ ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
+ DRIVER_NAME "_%d", dd->unit);
if (ret)
dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
ret);
@@ -13074,7 +13089,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
first_sdma = last_general;
last_sdma = first_sdma + dd->num_sdma;
first_rx = last_sdma;
- last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
+ last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
/* VNIC MSIx interrupts get mapped when VNIC contexts are created */
dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
@@ -13095,13 +13110,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
int idx;
struct hfi1_ctxtdata *rcd = NULL;
struct sdma_engine *sde = NULL;
+ char name[MAX_NAME_SIZE];
- /* obtain the arguments to request_irq */
+ /* obtain the arguments to pci_request_irq */
if (first_general <= i && i < last_general) {
idx = i - first_general;
handler = general_interrupt;
arg = dd;
- snprintf(me->name, sizeof(me->name),
+ snprintf(name, sizeof(name),
DRIVER_NAME "_%d", dd->unit);
err_info = "general";
me->type = IRQ_GENERAL;
@@ -13110,14 +13126,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
sde = &dd->per_sdma[idx];
handler = sdma_interrupt;
arg = sde;
- snprintf(me->name, sizeof(me->name),
+ snprintf(name, sizeof(name),
DRIVER_NAME "_%d sdma%d", dd->unit, idx);
err_info = "sdma";
remap_sdma_interrupts(dd, idx, i);
me->type = IRQ_SDMA;
} else if (first_rx <= i && i < last_rx) {
idx = i - first_rx;
- rcd = hfi1_rcd_get_by_index(dd, idx);
+ rcd = hfi1_rcd_get_by_index_safe(dd, idx);
if (rcd) {
/*
* Set the interrupt register and mask for this
@@ -13129,7 +13145,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
handler = receive_context_interrupt;
thread = receive_context_thread;
arg = rcd;
- snprintf(me->name, sizeof(me->name),
+ snprintf(name, sizeof(name),
DRIVER_NAME "_%d kctxt%d",
dd->unit, idx);
err_info = "receive context";
@@ -13150,18 +13166,10 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
if (!arg)
continue;
/* make sure the name is terminated */
- me->name[sizeof(me->name) - 1] = 0;
+ name[sizeof(name) - 1] = 0;
me->irq = pci_irq_vector(dd->pcidev, i);
- /*
- * On err return me->irq. Don't need to clear this
- * because 'arg' has not been set, and cleanup will
- * do the right thing.
- */
- if (me->irq < 0)
- return me->irq;
-
- ret = request_threaded_irq(me->irq, handler, thread, 0,
- me->name, arg);
+ ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
+ name);
if (ret) {
dd_dev_err(dd,
"unable to allocate %s interrupt, irq %d, index %d, err %d\n",
@@ -13169,7 +13177,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
return ret;
}
/*
- * assign arg after request_irq call, so it will be
+ * assign arg after pci_request_irq call, so it will be
* cleaned up
*/
me->arg = arg;
@@ -13187,7 +13195,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
int i;
if (!dd->num_msix_entries) {
- synchronize_irq(dd->pcidev->irq);
+ synchronize_irq(pci_irq_vector(dd->pcidev, 0));
return;
}
@@ -13208,7 +13216,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
return;
hfi1_put_irq_affinity(dd, me);
- free_irq(me->irq, me->arg);
+ pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
me->arg = NULL;
}
@@ -13231,28 +13239,21 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
rcd->imask = ((u64)1) <<
((IS_RCVAVAIL_START + idx) % 64);
-
- snprintf(me->name, sizeof(me->name),
- DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
- me->name[sizeof(me->name) - 1] = 0;
me->type = IRQ_RCVCTXT;
me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
- if (me->irq < 0) {
- dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
- idx, me->irq);
- return;
- }
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
- ret = request_threaded_irq(me->irq, receive_context_interrupt,
- receive_context_thread, 0, me->name, arg);
+ ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
+ receive_context_interrupt,
+ receive_context_thread, arg,
+ DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
if (ret) {
dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
me->irq, idx, ret);
return;
}
/*
- * assign arg after request_irq call, so it will be
+ * assign arg after pci_request_irq call, so it will be
* cleaned up
*/
me->arg = arg;
@@ -13261,7 +13262,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
if (ret) {
dd_dev_err(dd,
"unable to pin IRQ %d\n", ret);
- free_irq(me->irq, me->arg);
+ pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
}
}
@@ -13294,8 +13295,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
* slow source, SDMACleanupDone)
* N interrupts - one per used SDMA engine
* M interrupt - one per kernel receive context
+ * V interrupt - one for each VNIC context
*/
- total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
/* ask for MSI-X interrupts */
request = request_msix(dd, total);
@@ -13356,15 +13358,18 @@ fail:
* in array of contexts
* freectxts - number of free user contexts
* num_send_contexts - number of PIO send contexts being used
+ * num_vnic_contexts - number of contexts reserved for VNIC
*/
static int set_up_context_variables(struct hfi1_devdata *dd)
{
unsigned long num_kernel_contexts;
+ u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
int total_contexts;
int ret;
unsigned ngroups;
int qos_rmt_count;
int user_rmt_reduced;
+ u32 n_usr_ctxts;
/*
* Kernel receive contexts:
@@ -13393,59 +13398,63 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_kernel_contexts);
num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
}
+
+ /* Accommodate VNIC contexts if possible */
+ if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
+ dd_dev_err(dd, "No receive contexts available for VNIC\n");
+ num_vnic_contexts = 0;
+ }
+ total_contexts = num_kernel_contexts + num_vnic_contexts;
+
/*
* User contexts:
* - default to 1 user context per real (non-HT) CPU core if
* num_user_contexts is negative
*/
if (num_user_contexts < 0)
- num_user_contexts =
- cpumask_weight(&node_affinity.real_cpu_mask);
-
- total_contexts = num_kernel_contexts + num_user_contexts;
-
+ n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
+ else
+ n_usr_ctxts = num_user_contexts;
/*
* Adjust the counts given a global max.
*/
- if (total_contexts > dd->chip_rcv_contexts) {
+ if (total_contexts + n_usr_ctxts > dd->chip_rcv_contexts) {
dd_dev_err(dd,
- "Reducing # user receive contexts to: %d, from %d\n",
- (int)(dd->chip_rcv_contexts - num_kernel_contexts),
- (int)num_user_contexts);
- num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
+ "Reducing # user receive contexts to: %d, from %u\n",
+ (int)(dd->chip_rcv_contexts - total_contexts),
+ n_usr_ctxts);
/* recalculate */
- total_contexts = num_kernel_contexts + num_user_contexts;
+ n_usr_ctxts = dd->chip_rcv_contexts - total_contexts;
}
/* each user context requires an entry in the RMT */
qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
- if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
+ if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
dd_dev_err(dd,
- "RMT size is reducing the number of user receive contexts from %d to %d\n",
- (int)num_user_contexts,
+ "RMT size is reducing the number of user receive contexts from %u to %d\n",
+ n_usr_ctxts,
user_rmt_reduced);
/* recalculate */
- num_user_contexts = user_rmt_reduced;
- total_contexts = num_kernel_contexts + num_user_contexts;
+ n_usr_ctxts = user_rmt_reduced;
}
- /* Accommodate VNIC contexts */
- if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
- total_contexts += HFI1_NUM_VNIC_CTXT;
+ total_contexts += n_usr_ctxts;
/* the first N are kernel contexts, the rest are user/vnic contexts */
dd->num_rcv_contexts = total_contexts;
dd->n_krcv_queues = num_kernel_contexts;
dd->first_dyn_alloc_ctxt = num_kernel_contexts;
- dd->num_user_contexts = num_user_contexts;
- dd->freectxts = num_user_contexts;
+ dd->num_vnic_contexts = num_vnic_contexts;
+ dd->num_user_contexts = n_usr_ctxts;
+ dd->freectxts = n_usr_ctxts;
dd_dev_info(dd,
- "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
+ "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
(int)dd->chip_rcv_contexts,
(int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues,
- (int)dd->num_rcv_contexts - dd->n_krcv_queues);
+ dd->num_vnic_contexts,
+ dd->num_user_contexts);
/*
* Receive array allocation:
@@ -14962,8 +14971,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
init_vl_arb_caches(ppd);
}
- dd->link_default = HLS_DN_POLL;
-
/*
* Do remaining PCIe setup and save PCIe values in dd.
* Any error printing is already done by the init code.
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 50b8645d0b87..133e313feca4 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -560,7 +560,7 @@ enum {
/* timeouts */
#define LINK_RESTART_DELAY 1000 /* link restart delay, in ms */
#define TIMEOUT_8051_START 5000 /* 8051 start timeout, in ms */
-#define DC8051_COMMAND_TIMEOUT 20000 /* DC8051 command timeout, in ms */
+#define DC8051_COMMAND_TIMEOUT 1000 /* DC8051 command timeout, in ms */
#define FREEZE_STATUS_TIMEOUT 20 /* wait for freeze indicators, in ms */
#define VL_STATUS_CLEAR_TIMEOUT 5000 /* per-VL status clear, in ms */
#define CCE_STATUS_TIMEOUT 10 /* time to clear CCE Status, in ms */
@@ -583,6 +583,9 @@ enum {
#define LOOPBACK_LCB 2
#define LOOPBACK_CABLE 3 /* external cable */
+/* set up serdes bit in MISC_CONFIG_BITS */
+#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
+
/* read and write hardware registers */
u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
@@ -710,6 +713,7 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch);
int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
void read_guid(struct hfi1_devdata *dd);
+int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
u8 neigh_reason, u8 rem_reason);
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 3e27794ec750..7108d4d92259 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -328,6 +328,7 @@ struct diag_pkt {
#define SC15_PACKET 0xF
#define SIZE_OF_CRC 1
#define SIZE_OF_LT 1
+#define MAX_16B_PADDING 12 /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
#define LIM_MGMT_P_KEY 0x7FFF
#define FULL_MGMT_P_KEY 0xFFFF
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 36ae1fd86502..2e6e0c516041 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -71,13 +71,13 @@ static ssize_t hfi1_seq_read(
loff_t *ppos)
{
struct dentry *d = file->f_path.dentry;
- int srcu_idx;
ssize_t r;
- r = debugfs_use_file_start(d, &srcu_idx);
- if (likely(!r))
- r = seq_read(file, buf, size, ppos);
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(d);
+ if (unlikely(r))
+ return r;
+ r = seq_read(file, buf, size, ppos);
+ debugfs_file_put(d);
return r;
}
@@ -87,13 +87,13 @@ static loff_t hfi1_seq_lseek(
int whence)
{
struct dentry *d = file->f_path.dentry;
- int srcu_idx;
loff_t r;
- r = debugfs_use_file_start(d, &srcu_idx);
- if (likely(!r))
- r = seq_lseek(file, offset, whence);
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(d);
+ if (unlikely(r))
+ return r;
+ r = seq_lseek(file, offset, whence);
+ debugfs_file_put(d);
return r;
}
@@ -165,6 +165,17 @@ static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
{
}
+static int opcode_stats_show(struct seq_file *s, u8 i, u64 packets, u64 bytes)
+{
+ if (!packets && !bytes)
+ return SEQ_SKIP;
+ seq_printf(s, "%02x %llu/%llu\n", i,
+ (unsigned long long)packets,
+ (unsigned long long)bytes);
+
+ return 0;
+}
+
static int _opcode_stats_seq_show(struct seq_file *s, void *v)
{
loff_t *spos = v;
@@ -182,19 +193,49 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
}
hfi1_rcd_put(rcd);
}
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu\n", i,
- (unsigned long long)n_packets,
- (unsigned long long)n_bytes);
-
- return 0;
+ return opcode_stats_show(s, i, n_packets, n_bytes);
}
DEBUGFS_SEQ_FILE_OPS(opcode_stats);
DEBUGFS_SEQ_FILE_OPEN(opcode_stats)
DEBUGFS_FILE_OPS(opcode_stats);
+static void *_tx_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return _opcode_stats_seq_start(s, pos);
+}
+
+static void *_tx_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return _opcode_stats_seq_next(s, v, pos);
+}
+
+static void _tx_opcode_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _tx_opcode_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos;
+ int j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ for_each_possible_cpu(j) {
+ struct hfi1_opcode_stats_perctx *s =
+ per_cpu_ptr(dd->tx_opstats, j);
+ n_packets += s->stats[i].n_packets;
+ n_bytes += s->stats[i].n_bytes;
+ }
+ return opcode_stats_show(s, i, n_packets, n_bytes);
+}
+
+DEBUGFS_SEQ_FILE_OPS(tx_opcode_stats);
+DEBUGFS_SEQ_FILE_OPEN(tx_opcode_stats)
+DEBUGFS_FILE_OPS(tx_opcode_stats);
+
static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
{
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
@@ -243,7 +284,7 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
spos = v;
i = *spos;
- rcd = hfi1_rcd_get_by_index(dd, i);
+ rcd = hfi1_rcd_get_by_index_safe(dd, i);
if (!rcd)
return SEQ_SKIP;
@@ -402,7 +443,7 @@ static int _rcds_seq_show(struct seq_file *s, void *v)
loff_t *spos = v;
loff_t i = *spos;
- rcd = hfi1_rcd_get_by_index(dd, i);
+ rcd = hfi1_rcd_get_by_index_safe(dd, i);
if (rcd)
seqfile_dump_rcd(s, rcd);
hfi1_rcd_put(rcd);
@@ -1363,6 +1404,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
return;
}
DEBUGFS_SEQ_FILE_CREATE(opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(tx_opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 7372cc00cb2d..4f65ac671044 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -433,6 +433,12 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
packet->numpkt = 0;
}
+/* We support only two types - 9B and 16B for now */
+static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &return_cnp,
+ [HFI1_PKT_TYPE_16B] = &return_cnp_16B
+};
+
void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
@@ -866,7 +872,7 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
- rcd = hfi1_rcd_get_by_index(dd, ctxt);
+ rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
if (rcd) {
rcd->do_interrupt =
&handle_receive_interrupt_nodma_rtail;
@@ -895,7 +901,7 @@ static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
- rcd = hfi1_rcd_get_by_index(dd, ctxt);
+ rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
if (rcd) {
rcd->do_interrupt =
&handle_receive_interrupt_dma_rtail;
@@ -923,10 +929,9 @@ void set_all_slowpath(struct hfi1_devdata *dd)
rcd = hfi1_rcd_get_by_index(dd, i);
if (!rcd)
continue;
- if ((i < dd->first_dyn_alloc_ctxt) ||
- (rcd->sc && (rcd->sc->type == SC_KERNEL))) {
+ if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
rcd->do_interrupt = &handle_receive_interrupt;
- }
+
hfi1_rcd_put(rcd);
}
}
@@ -1252,9 +1257,9 @@ void shutdown_led_override(struct hfi1_pportdata *ppd)
write_csr(dd, DCC_CFG_LED_CNTRL, 0);
}
-static void run_led_override(unsigned long opaque)
+static void run_led_override(struct timer_list *t)
{
- struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
+ struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
struct hfi1_devdata *dd = ppd->dd;
unsigned long timeout;
int phase_idx;
@@ -1298,8 +1303,7 @@ void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
* timeout so the handler will be called soon to look at our request.
*/
if (!timer_pending(&ppd->led_override_timer)) {
- setup_timer(&ppd->led_override_timer, run_led_override,
- (unsigned long)ppd);
+ timer_setup(&ppd->led_override_timer, run_led_override, 0);
ppd->led_override_timer.expires = jiffies + 1;
add_timer(&ppd->led_override_timer);
atomic_set(&ppd->led_override_timer_active, 1);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index d9a1e9893136..7750a9c38b06 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -78,16 +78,20 @@ static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
static u64 kvirt_to_phys(void *addr);
-static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
static void init_subctxts(struct hfi1_ctxtdata *uctxt,
const struct hfi1_user_info *uinfo);
static int init_user_ctxt(struct hfi1_filedata *fd,
struct hfi1_ctxtdata *uctxt);
static void user_init(struct hfi1_ctxtdata *uctxt);
-static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len);
-static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len);
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
static int setup_base_ctxt(struct hfi1_filedata *fd,
struct hfi1_ctxtdata *uctxt);
static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
@@ -101,10 +105,11 @@ static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- unsigned long events);
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
+ unsigned long arg);
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- int start_stop);
+ unsigned long arg);
static int vma_fault(struct vm_fault *vmf);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
@@ -221,13 +226,8 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_user_info uinfo;
- struct hfi1_tid_info tinfo;
int ret = 0;
- unsigned long addr;
int uval = 0;
- unsigned long ul_uval = 0;
- u16 uval16 = 0;
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
@@ -237,171 +237,55 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT:
- if (uctxt)
- return -EINVAL;
-
- if (copy_from_user(&uinfo,
- (struct hfi1_user_info __user *)arg,
- sizeof(uinfo)))
- return -EFAULT;
-
- ret = assign_ctxt(fd, &uinfo);
+ ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
break;
+
case HFI1_IOCTL_CTXT_INFO:
- ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
- sizeof(struct hfi1_ctxt_info));
+ ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
break;
+
case HFI1_IOCTL_USER_INFO:
- ret = get_base_info(fd, (void __user *)(unsigned long)arg,
- sizeof(struct hfi1_base_info));
+ ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
break;
+
case HFI1_IOCTL_CREDIT_UPD:
if (uctxt)
sc_return_credits(uctxt->sc);
break;
case HFI1_IOCTL_TID_UPDATE:
- if (copy_from_user(&tinfo,
- (struct hfi11_tid_info __user *)arg,
- sizeof(tinfo)))
- return -EFAULT;
-
- ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
- if (!ret) {
- /*
- * Copy the number of tidlist entries we used
- * and the length of the buffer we registered.
- */
- addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- return -EFAULT;
-
- addr = arg + offsetof(struct hfi1_tid_info, length);
- if (copy_to_user((void __user *)addr, &tinfo.length,
- sizeof(tinfo.length)))
- ret = -EFAULT;
- }
+ ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_TID_FREE:
- if (copy_from_user(&tinfo,
- (struct hfi11_tid_info __user *)arg,
- sizeof(tinfo)))
- return -EFAULT;
-
- ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
- if (ret)
- break;
- addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- ret = -EFAULT;
+ ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_TID_INVAL_READ:
- if (copy_from_user(&tinfo,
- (struct hfi11_tid_info __user *)arg,
- sizeof(tinfo)))
- return -EFAULT;
-
- ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
- if (ret)
- break;
- addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- ret = -EFAULT;
+ ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_RECV_CTRL:
- ret = get_user(uval, (int __user *)arg);
- if (ret != 0)
- return -EFAULT;
- ret = manage_rcvq(uctxt, fd->subctxt, uval);
+ ret = manage_rcvq(uctxt, fd->subctxt, arg);
break;
case HFI1_IOCTL_POLL_TYPE:
- ret = get_user(uval, (int __user *)arg);
- if (ret != 0)
+ if (get_user(uval, (int __user *)arg))
return -EFAULT;
uctxt->poll_type = (typeof(uctxt->poll_type))uval;
break;
case HFI1_IOCTL_ACK_EVENT:
- ret = get_user(ul_uval, (unsigned long __user *)arg);
- if (ret != 0)
- return -EFAULT;
- ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
+ ret = user_event_ack(uctxt, fd->subctxt, arg);
break;
case HFI1_IOCTL_SET_PKEY:
- ret = get_user(uval16, (u16 __user *)arg);
- if (ret != 0)
- return -EFAULT;
- if (HFI1_CAP_IS_USET(PKEY_CHECK))
- ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
- else
- return -EPERM;
+ ret = set_ctxt_pkey(uctxt, arg);
break;
- case HFI1_IOCTL_CTXT_RESET: {
- struct send_context *sc;
- struct hfi1_devdata *dd;
-
- if (!uctxt || !uctxt->dd || !uctxt->sc)
- return -EINVAL;
-
- /*
- * There is no protection here. User level has to
- * guarantee that no one will be writing to the send
- * context while it is being re-initialized.
- * If user level breaks that guarantee, it will break
- * it's own context and no one else's.
- */
- dd = uctxt->dd;
- sc = uctxt->sc;
- /*
- * Wait until the interrupt handler has marked the
- * context as halted or frozen. Report error if we time
- * out.
- */
- wait_event_interruptible_timeout(
- sc->halt_wait, (sc->flags & SCF_HALTED),
- msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (!(sc->flags & SCF_HALTED))
- return -ENOLCK;
-
- /*
- * If the send context was halted due to a Freeze,
- * wait until the device has been "unfrozen" before
- * resetting the context.
- */
- if (sc->flags & SCF_FROZEN) {
- wait_event_interruptible_timeout(
- dd->event_queue,
- !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
- msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (dd->flags & HFI1_FROZEN)
- return -ENOLCK;
-
- if (dd->flags & HFI1_FORCED_FREEZE)
- /*
- * Don't allow context reset if we are into
- * forced freeze
- */
- return -ENODEV;
-
- sc_disable(sc);
- ret = sc_enable(sc);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
- } else {
- ret = sc_restart(sc);
- }
- if (!ret)
- sc_return_credits(sc);
+ case HFI1_IOCTL_CTXT_RESET:
+ ret = ctxt_reset(uctxt);
break;
- }
case HFI1_IOCTL_GET_VERS:
uval = HFI1_USER_SWVERSION;
@@ -595,9 +479,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* Use the page where this context's flags are. User level
* knows where it's own bitmap is within the page.
*/
- memaddr = (unsigned long)(dd->events +
- ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
+ memaddr = (unsigned long)
+ (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
memlen = PAGE_SIZE;
/*
* v3.7 removes VM_RESERVED but the effect is kept by
@@ -779,8 +662,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
* Clear any left over, unhandled events so the next process that
* gets this context doesn't get confused.
*/
- ev = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
+ ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
*ev = 0;
spin_lock_irqsave(&dd->uctxt_lock, flags);
@@ -891,21 +773,29 @@ static int complete_subctxt(struct hfi1_filedata *fd)
return ret;
}
-static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
{
int ret;
- unsigned int swmajor, swminor;
+ unsigned int swmajor;
struct hfi1_ctxtdata *uctxt = NULL;
+ struct hfi1_user_info uinfo;
+
+ if (fd->uctxt)
+ return -EINVAL;
+
+ if (sizeof(uinfo) != len)
+ return -EINVAL;
- swmajor = uinfo->userversion >> 16;
+ if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
+ return -EFAULT;
+
+ swmajor = uinfo.userversion >> 16;
if (swmajor != HFI1_USER_SWMAJOR)
return -ENODEV;
- if (uinfo->subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
+ if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
return -EINVAL;
- swminor = uinfo->userversion & 0xffff;
-
/*
* Acquire the mutex to protect against multiple creations of what
* could be a shared base context.
@@ -915,14 +805,14 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
* Get a sub context if available (fd->uctxt will be set).
* ret < 0 error, 0 no context, 1 sub-context found
*/
- ret = find_sub_ctxt(fd, uinfo);
+ ret = find_sub_ctxt(fd, &uinfo);
/*
* Allocate a base context if context sharing is not required or a
* sub context wasn't found.
*/
if (!ret)
- ret = allocate_ctxt(fd, fd->dd, uinfo, &uctxt);
+ ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
mutex_unlock(&hfi1_mutex);
@@ -1230,12 +1120,13 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
}
-static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len)
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
{
struct hfi1_ctxt_info cinfo;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- int ret = 0;
+
+ if (sizeof(cinfo) != len)
+ return -EINVAL;
memset(&cinfo, 0, sizeof(cinfo));
cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
@@ -1265,10 +1156,10 @@ static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
- if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
- ret = -EFAULT;
+ if (copy_to_user((void __user *)arg, &cinfo, len))
+ return -EFAULT;
- return ret;
+ return 0;
}
static int init_user_ctxt(struct hfi1_filedata *fd,
@@ -1344,18 +1235,18 @@ done:
return ret;
}
-static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len)
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
{
struct hfi1_base_info binfo;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- ssize_t sz;
unsigned offset;
- int ret = 0;
trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
+ if (sizeof(binfo) != len)
+ return -EINVAL;
+
memset(&binfo, 0, sizeof(binfo));
binfo.hw_version = dd->revision;
binfo.sw_version = HFI1_KERN_SWVERSION;
@@ -1385,39 +1276,152 @@ static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
fd->subctxt,
uctxt->egrbufs.rcvtids[0].dma);
binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
- fd->subctxt, 0);
+ fd->subctxt, 0);
/*
* user regs are at
* (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
*/
binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
- fd->subctxt, 0);
- offset = offset_in_page((((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
- sizeof(*dd->events));
+ fd->subctxt, 0);
+ offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
+ sizeof(*dd->events));
binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
- fd->subctxt,
- offset);
+ fd->subctxt,
+ offset);
binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
- fd->subctxt,
- dd->status);
+ fd->subctxt,
+ dd->status);
if (HFI1_CAP_IS_USET(DMA_RTAIL))
binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
- fd->subctxt, 0);
+ fd->subctxt, 0);
if (uctxt->subctxt_cnt) {
binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
- uctxt->ctxt,
- fd->subctxt, 0);
- binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
uctxt->ctxt,
fd->subctxt, 0);
+ binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
+ uctxt->ctxt,
+ fd->subctxt, 0);
binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
- uctxt->ctxt,
- fd->subctxt, 0);
+ uctxt->ctxt,
+ fd->subctxt, 0);
}
- sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
- if (copy_to_user(ubase, &binfo, sz))
+
+ if (copy_to_user((void __user *)arg, &binfo, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * user_exp_rcv_setup - Set up the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_setup.
+ *
+ */
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
+ if (!ret) {
+ /*
+ * Copy the number of tidlist entries we used
+ * and the length of the buffer we registered.
+ */
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ return -EFAULT;
+
+ addr = arg + offsetof(struct hfi1_tid_info, length);
+ if (copy_to_user((void __user *)addr, &tinfo.length,
+ sizeof(tinfo.length)))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/**
+ * user_exp_rcv_clear - Clear the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * The hfi1_user_exp_rcv_clear() can be called from the error path. Because
+ * of this, we need to use this wrapper to copy the user space information
+ * before doing the clear.
+ */
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
+ if (!ret) {
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+/**
+ * user_exp_rcv_invalid - Invalidate the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_invalid.
+ *
+ */
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (!fd->invalid_tids)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
+ if (ret)
+ return ret;
+
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
ret = -EFAULT;
+
return ret;
}
@@ -1485,14 +1489,13 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
ctxt++) {
uctxt = hfi1_rcd_get_by_index(dd, ctxt);
if (uctxt) {
- unsigned long *evs = dd->events +
- (uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS;
+ unsigned long *evs;
int i;
/*
* subctxt_cnt is 0 if not shared, so do base
* separately, first, then remaining subctxt, if any
*/
+ evs = dd->events + uctxt_offset(uctxt);
set_bit(evtbit, evs);
for (i = 1; i < uctxt->subctxt_cnt; i++)
set_bit(evtbit, evs + i);
@@ -1514,13 +1517,18 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
* re-init the software copy of the head register
*/
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- int start_stop)
+ unsigned long arg)
{
struct hfi1_devdata *dd = uctxt->dd;
unsigned int rcvctrl_op;
+ int start_stop;
if (subctxt)
- goto bail;
+ return 0;
+
+ if (get_user(start_stop, (int __user *)arg))
+ return -EFAULT;
+
/* atomically clear receive enable ctxt. */
if (start_stop) {
/*
@@ -1539,7 +1547,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
}
hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
/* always; new head should be equal to new tail; see above */
-bail:
+
return 0;
}
@@ -1549,17 +1557,20 @@ bail:
* set, if desired, and checks again in future.
*/
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- unsigned long events)
+ unsigned long arg)
{
int i;
struct hfi1_devdata *dd = uctxt->dd;
unsigned long *evs;
+ unsigned long events;
if (!dd->events)
return 0;
- evs = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + subctxt;
+ if (get_user(events, (unsigned long __user *)arg))
+ return -EFAULT;
+
+ evs = dd->events + uctxt_offset(uctxt) + subctxt;
for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
if (!test_bit(i, &events))
@@ -1569,26 +1580,89 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
return 0;
}
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
{
- int ret = -ENOENT, i, intable = 0;
+ int i;
struct hfi1_pportdata *ppd = uctxt->ppd;
struct hfi1_devdata *dd = uctxt->dd;
+ u16 pkey;
- if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
- ret = -EINVAL;
- goto done;
- }
+ if (!HFI1_CAP_IS_USET(PKEY_CHECK))
+ return -EPERM;
+
+ if (get_user(pkey, (u16 __user *)arg))
+ return -EFAULT;
+
+ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
+ return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
- if (pkey == ppd->pkeys[i]) {
- intable = 1;
- break;
- }
+ if (pkey == ppd->pkeys[i])
+ return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
+
+ return -ENOENT;
+}
+
+/**
+ * ctxt_reset - Reset the user context
+ * @uctxt: valid user context
+ */
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
+{
+ struct send_context *sc;
+ struct hfi1_devdata *dd;
+ int ret = 0;
+
+ if (!uctxt || !uctxt->dd || !uctxt->sc)
+ return -EINVAL;
+
+ /*
+ * There is no protection here. User level has to guarantee that
+ * no one will be writing to the send context while it is being
+ * re-initialized. If user level breaks that guarantee, it will
+ * break it's own context and no one else's.
+ */
+ dd = uctxt->dd;
+ sc = uctxt->sc;
+
+ /*
+ * Wait until the interrupt handler has marked the context as
+ * halted or frozen. Report error if we time out.
+ */
+ wait_event_interruptible_timeout(
+ sc->halt_wait, (sc->flags & SCF_HALTED),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (!(sc->flags & SCF_HALTED))
+ return -ENOLCK;
+
+ /*
+ * If the send context was halted due to a Freeze, wait until the
+ * device has been "unfrozen" before resetting the context.
+ */
+ if (sc->flags & SCF_FROZEN) {
+ wait_event_interruptible_timeout(
+ dd->event_queue,
+ !(READ_ONCE(dd->flags) & HFI1_FROZEN),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (dd->flags & HFI1_FROZEN)
+ return -ENOLCK;
+
+ if (dd->flags & HFI1_FORCED_FREEZE)
+ /*
+ * Don't allow context reset if we are into
+ * forced freeze
+ */
+ return -ENODEV;
+
+ sc_disable(sc);
+ ret = sc_enable(sc);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
+ } else {
+ ret = sc_restart(sc);
+ }
+ if (!ret)
+ sc_return_credits(sc);
- if (intable)
- ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
-done:
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 5aea8f47e670..98868df78a7e 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -70,6 +70,11 @@
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
#define HOST_INTERFACE_VERSION 1
+MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
+MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
+
static uint fw_8051_load = 1;
static uint fw_fabric_serdes_load = 1;
static uint fw_pcie_serdes_load = 1;
@@ -113,6 +118,12 @@ struct css_header {
#define MU_SIZE 8
#define EXPONENT_SIZE 4
+/* size of platform configuration partition */
+#define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
+
+/* size of file of plaform configuration encoded in format version 4 */
+#define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
+
/* the file itself */
struct firmware_file {
struct css_header css_header;
@@ -965,6 +976,46 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
}
/*
+ * Clear all reset bits, releasing the 8051.
+ * Wait for firmware to be ready to accept host requests.
+ * Then, set host version bit.
+ *
+ * This function executes even if the 8051 is in reset mode when
+ * dd->dc_shutdown == 1.
+ *
+ * Expects dd->dc8051_lock to be held.
+ */
+int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ lockdep_assert_held(&dd->dc8051_lock);
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
+ /*
+ * Wait for firmware to be ready to accept host
+ * requests.
+ */
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) {
+ dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
+ get_firmware_state(dd));
+ return ret;
+ }
+
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
* Load the 8051 firmware.
*/
static int load_8051_firmware(struct hfi1_devdata *dd,
@@ -1029,31 +1080,22 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
if (ret)
return ret;
- /* clear all reset bits, releasing the 8051 */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
/*
+ * Clear all reset bits, releasing the 8051.
* DC reset step 5. Wait for firmware to be ready to accept host
* requests.
+ * Then, set host version bit.
*/
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) { /* timed out */
- dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
- get_firmware_state(dd));
- return -ETIMEDOUT;
- }
+ mutex_lock(&dd->dc8051_lock);
+ ret = release_and_wait_ready_8051_firmware(dd);
+ mutex_unlock(&dd->dc8051_lock);
+ if (ret)
+ return ret;
read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
(int)ver_major, (int)ver_minor, (int)ver_patch);
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
- ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to set host interface version, return 0x%x\n",
- ret);
- return -EIO;
- }
return 0;
}
@@ -1387,7 +1429,14 @@ int acquire_hw_mutex(struct hfi1_devdata *dd)
unsigned long timeout;
int try = 0;
u8 mask = 1 << dd->hfi1_id;
- u8 user;
+ u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+ if (user == mask) {
+ dd_dev_info(dd,
+ "Hardware mutex already acquired, mutex mask %u\n",
+ (u32)mask);
+ return 0;
+ }
retry:
timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
@@ -1418,7 +1467,15 @@ retry:
void release_hw_mutex(struct hfi1_devdata *dd)
{
- write_csr(dd, ASIC_CFG_MUTEX, 0);
+ u8 mask = 1 << dd->hfi1_id;
+ u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+ if (user != mask)
+ dd_dev_warn(dd,
+ "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
+ (u32)user, (u32)mask);
+ else
+ write_csr(dd, ASIC_CFG_MUTEX, 0);
}
/* return the given resource bit(s) as a mask for the given HFI */
@@ -1733,7 +1790,7 @@ static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
ver_start /= 8;
meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
- if (meta_ver < 5) {
+ if (meta_ver < 4) {
dd_dev_info(
dd, "%s:Please update platform config\n", __func__);
return -EINVAL;
@@ -1774,7 +1831,20 @@ int parse_platform_config(struct hfi1_devdata *dd)
/* Field is file size in DWORDs */
file_length = (*ptr) * 4;
- ptr++;
+
+ /*
+ * Length can't be larger than partition size. Assume platform
+ * config format version 4 is being used. Interpret the file size
+ * field as header instead by not moving the pointer.
+ */
+ if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
+ dd_dev_info(dd,
+ "%s:File length out of bounds, using alternative format\n",
+ __func__);
+ file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
+ } else {
+ ptr++;
+ }
if (file_length > dd->platform_config.size) {
dd_dev_info(dd, "%s:File claims to be larger than read size\n",
@@ -1789,7 +1859,8 @@ int parse_platform_config(struct hfi1_devdata *dd)
/*
* In both cases where we proceed, using the self-reported file length
- * is the safer option
+ * is the safer option. In case of old format a predefined value is
+ * being used.
*/
while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
header1 = *ptr;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 3ac9c307a285..4a9b4d7efe63 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -95,6 +95,9 @@
#define DROP_PACKET_OFF 0
#define DROP_PACKET_ON 1
+#define NEIGHBOR_TYPE_HFI 0
+#define NEIGHBOR_TYPE_SWITCH 1
+
extern unsigned long hfi1_cap_mask;
#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
#define HFI1_CAP_UGET_MASK(mask, cap) \
@@ -164,9 +167,7 @@ extern const struct pci_error_handlers hfi1_pci_err_handler;
* Below contains all data related to a single context (formerly called port).
*/
-#ifdef CONFIG_DEBUG_FS
struct hfi1_opcode_stats_perctx;
-#endif
struct ctxt_eager_bufs {
ssize_t size; /* total size of eager buffers */
@@ -283,7 +284,7 @@ struct hfi1_ctxtdata {
u64 imask; /* clear interrupt mask */
int ireg; /* clear interrupt register */
unsigned numa_id; /* numa node of this context */
- /* verbs stats per CTX */
+ /* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
/* Is ASPM interrupt supported for this context */
@@ -390,6 +391,7 @@ struct hfi1_packet {
/*
* OPA 16B L2/L4 Encodings
*/
+#define OPA_16B_L4_9B 0x00
#define OPA_16B_L2_TYPE 0x02
#define OPA_16B_L4_IB_LOCAL 0x09
#define OPA_16B_L4_IB_GLOBAL 0x0A
@@ -535,6 +537,8 @@ struct rvt_sge_state;
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
#define HLS_DOWN ~(HLS_UP)
+#define HLS_DEFAULT HLS_DN_POLL
+
/* use this MTU size if none other is given */
#define HFI1_DEFAULT_ACTIVE_MTU 10240
/* use this MTU size as the default maximum */
@@ -616,7 +620,6 @@ struct hfi1_msix_entry {
enum irq_type type;
int irq;
void *arg;
- char name[MAX_NAME_SIZE];
cpumask_t mask;
struct irq_affinity_notify notify;
};
@@ -1047,6 +1050,8 @@ struct hfi1_devdata {
u64 z_send_schedule;
u64 __percpu *send_schedule;
+ /* number of reserved contexts for VNIC usage */
+ u16 num_vnic_contexts;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
/* number of pio send contexts in use by the driver */
@@ -1109,8 +1114,7 @@ struct hfi1_devdata {
u16 rcvegrbufsize_shift;
/* both sides of the PCIe link are gen3 capable */
u8 link_gen3_capable;
- /* default link down value (poll/sleep) */
- u8 link_default;
+ u8 dc_shutdown;
/* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width;
/* localbus speed in MHz */
@@ -1183,7 +1187,6 @@ struct hfi1_devdata {
/* INTx information */
u32 requested_intx_irq; /* did we request one? */
- char intx_name[MAX_NAME_SIZE]; /* INTx name */
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1274,6 +1277,8 @@ struct hfi1_devdata {
/* receive context data */
struct hfi1_ctxtdata **rcd;
u64 __percpu *int_counter;
+ /* verbs tx opcode stats */
+ struct hfi1_opcode_stats_perctx __percpu *tx_opstats;
/* device (not port) flags, basically device capabilities */
u16 flags;
/* Number of physical ports available */
@@ -1295,7 +1300,6 @@ struct hfi1_devdata {
u8 oui1;
u8 oui2;
u8 oui3;
- u8 dc_shutdown;
/* Timer and counter used to detect RcvBufOvflCnt changes */
struct timer_list rcverr_timer;
@@ -1373,8 +1377,12 @@ struct hfi1_filedata {
extern struct list_head hfi1_dev_list;
extern spinlock_t hfi1_devs_lock;
struct hfi1_devdata *hfi1_lookup(int unit);
-extern u32 hfi1_cpulist_count;
-extern unsigned long *hfi1_cpulist;
+
+static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
+{
+ return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
+ HFI1_MAX_SHARED_CTXTS;
+}
int hfi1_init(struct hfi1_devdata *dd, int reinit);
int hfi1_count_active_units(void);
@@ -1396,6 +1404,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+ u16 ctxt);
struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
@@ -1531,11 +1541,6 @@ typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh);
-/* We support only two types - 9B and 16B for now */
-static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
- [HFI1_PKT_TYPE_9B] = &return_cnp,
- [HFI1_PKT_TYPE_16B] = &return_cnp_16B
-};
#define PKEY_CHECK_INVALID -1
int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
u8 sc5, int8_t s_pkey_index);
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index fba77001c3a7..8e3b3e7d829a 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -123,8 +123,6 @@ MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user
static inline u64 encode_rcv_header_entry_size(u16 size);
static struct idr hfi1_unit_table;
-u32 hfi1_cpulist_count;
-unsigned long *hfi1_cpulist;
static int hfi1_create_kctxt(struct hfi1_devdata *dd,
struct hfi1_pportdata *ppd)
@@ -286,6 +284,27 @@ static int allocate_rcd_index(struct hfi1_devdata *dd,
}
/**
+ * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
+ * array
+ * @dd: pointer to a valid devdata structure
+ * @ctxt: the index of an possilbe rcd
+ *
+ * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
+ * ctxt index is valid.
+ *
+ * The caller is responsible for making the _put().
+ *
+ */
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+ u16 ctxt)
+{
+ if (ctxt < dd->num_rcv_contexts)
+ return hfi1_rcd_get_by_index(dd, ctxt);
+
+ return NULL;
+}
+
+/**
* hfi1_rcd_get_by_index
* @dd: pointer to a valid devdata structure
* @ctxt: the index of an possilbe rcd
@@ -1006,7 +1025,7 @@ static void stop_timers(struct hfi1_devdata *dd)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
- if (ppd->led_override_timer.data) {
+ if (ppd->led_override_timer.function) {
del_timer_sync(&ppd->led_override_timer);
atomic_set(&ppd->led_override_timer_active, 0);
}
@@ -1198,6 +1217,7 @@ static void __hfi1_free_devdata(struct kobject *kobj)
free_percpu(dd->int_counter);
free_percpu(dd->rcv_limit);
free_percpu(dd->send_schedule);
+ free_percpu(dd->tx_opstats);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1272,39 +1292,27 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
goto bail;
}
dd->rcv_limit = alloc_percpu(u64);
if (!dd->rcv_limit) {
ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu rcv_limit\n");
goto bail;
}
dd->send_schedule = alloc_percpu(u64);
if (!dd->send_schedule) {
ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
goto bail;
}
- if (!hfi1_cpulist_count) {
- u32 count = num_online_cpus();
-
- hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
- GFP_KERNEL);
- if (hfi1_cpulist)
- hfi1_cpulist_count = count;
- else
- hfi1_early_err(
- &pdev->dev,
- "Could not alloc cpulist info, cpu affinity might be wrong\n");
+ dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
+ if (!dd->tx_opstats) {
+ ret = -ENOMEM;
+ goto bail;
}
+
kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
@@ -1477,8 +1485,6 @@ static void __exit hfi1_mod_cleanup(void)
node_affinity_destroy();
hfi1_wss_exit();
hfi1_dbg_exit();
- hfi1_cpulist_count = 0;
- kfree(hfi1_cpulist);
idr_destroy(&hfi1_unit_table);
dispose_firmware(); /* asymmetric with obtain_firmware() */
@@ -1801,8 +1807,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
sizeof(u32));
- if ((rcd->ctxt < dd->first_dyn_alloc_ctxt) ||
- (rcd->sc && (rcd->sc->type == SC_KERNEL)))
+ if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER;
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 96845dfed5c5..387305b768e9 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -53,6 +53,42 @@
#include "common.h"
#include "sdma.h"
+#define LINK_UP_DELAY 500 /* in microseconds */
+
+static void set_mgmt_allowed(struct hfi1_pportdata *ppd)
+{
+ u32 frame;
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (ppd->neighbor_type == NEIGHBOR_TYPE_HFI) {
+ ppd->mgmt_allowed = 1;
+ } else {
+ read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
+ ppd->mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT)
+ & MGMT_ALLOWED_MASK;
+ }
+}
+
+/*
+ * Our neighbor has indicated that we are allowed to act as a fabric
+ * manager, so place the full management partition key in the second
+ * (0-based) pkey array position. Note that we should already have
+ * the limited management partition key in array element 1, and also
+ * that the port is not yet up when add_full_mgmt_pkey() is invoked.
+ */
+static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
+ if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
+ dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
+ __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
+ ppd->pkeys[2] = FULL_MGMT_P_KEY;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+}
+
/**
* format_hwmsg - format a single hwerror message
* @msg message buffer
@@ -102,9 +138,16 @@ static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
ib_dispatch_event(&event);
}
-/*
+/**
+ * handle_linkup_change - finish linkup/down state changes
+ * @dd: valid device
+ * @linkup: link state information
+ *
* Handle a linkup or link down notification.
+ * The HW needs time to finish its link up state change. Give it that chance.
+ *
* This is called outside an interrupt.
+ *
*/
void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
{
@@ -151,6 +194,18 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
ppd->neighbor_guid, ppd->neighbor_type,
ppd->neighbor_port_number);
+ /* HW needs LINK_UP_DELAY to settle, give it that chance */
+ udelay(LINK_UP_DELAY);
+
+ /*
+ * 'MgmtAllowed' information, which is exchanged during
+ * LNI, is available at this point.
+ */
+ set_mgmt_allowed(ppd);
+
+ if (ppd->mgmt_allowed)
+ add_full_mgmt_pkey(ppd);
+
/* physical link went up */
ppd->linkup = 1;
ppd->offline_disabled_reason =
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index f4c0ffc040cc..cf8dba34fe30 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -98,6 +98,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
memset(data, 0, size);
}
+static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
+ return ppd->pkeys[pkey_idx];
+
+ return 0;
+}
+
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
{
struct ib_event event;
@@ -399,9 +409,9 @@ static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
ib_free_send_mad(send_buf);
}
-void hfi1_handle_trap_timer(unsigned long data)
+void hfi1_handle_trap_timer(struct timer_list *t)
{
- struct hfi1_ibport *ibp = (struct hfi1_ibport *)data;
+ struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
struct trap_node *trap = NULL;
unsigned long flags;
int i;
@@ -711,6 +721,7 @@ static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
/* Bad mkey not a violation below level 2 */
if (ibp->rvp.mkeyprot < 2)
break;
+ /* fall through */
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
if (ibp->rvp.mkey_violations != 0xFFFF)
@@ -1227,8 +1238,7 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
}
static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
- u32 logical_state, u32 phys_state,
- int suppress_idle_sma)
+ u32 logical_state, u32 phys_state)
{
struct hfi1_devdata *dd = ppd->dd;
u32 link_state;
@@ -1309,7 +1319,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
break;
case IB_PORT_ARMED:
ret = set_link_state(ppd, HLS_UP_ARMED);
- if ((ret == 0) && (suppress_idle_sma == 0))
+ if (!ret)
send_idle_sma(dd, SMA_IDLE_ARM);
break;
case IB_PORT_ACTIVE:
@@ -1603,8 +1613,10 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
ppd->is_sm_config_started = 1;
} else if (ls_new == IB_PORT_ARMED) {
- if (ppd->is_sm_config_started == 0)
+ if (ppd->is_sm_config_started == 0) {
invalid = 1;
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
}
}
@@ -1621,9 +1633,11 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
* is down or is being set to down.
*/
- ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
- if (ret)
- return ret;
+ if (!invalid) {
+ ret = set_port_states(ppd, smp, ls_new, ps_new);
+ if (ret)
+ return ret;
+ }
ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
max_len);
@@ -2100,17 +2114,18 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
ppd->is_sm_config_started = 1;
} else if (ls_new == IB_PORT_ARMED) {
- if (ppd->is_sm_config_started == 0)
+ if (ppd->is_sm_config_started == 0) {
invalid = 1;
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
}
}
- ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
- if (ret)
- return ret;
-
- if (invalid)
- smp->status |= IB_SMP_INVALID_FIELD;
+ if (!invalid) {
+ ret = set_port_states(ppd, smp, ls_new, ps_new);
+ if (ret)
+ return ret;
+ }
return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
max_len);
@@ -2888,7 +2903,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
struct _vls_dctrs *vlinfo;
size_t response_data_size;
u32 num_ports;
- u8 num_pslm;
u8 lq, num_vls;
u8 res_lli, res_ler;
u64 port_mask;
@@ -2898,7 +2912,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
int vfi;
num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
- num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
vl_select_mask = be32_to_cpu(req->vl_select_mask);
res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
@@ -3688,7 +3701,11 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
*new_cc_state = *old_cc_state;
- new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+ if (ppd->total_cct_entry)
+ new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+ else
+ new_cc_state->cct.ccti_limit = 0;
+
memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
@@ -3751,7 +3768,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
- s64 ts;
+ u64 ts;
int i;
if (am || smp_length_check(sizeof(*cong_log), max_len)) {
@@ -3769,7 +3786,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
ppd->threshold_cong_event_map,
sizeof(cong_log->threshold_cong_event_map));
/* keep timestamp in units of 1.024 usec */
- ts = ktime_to_ns(ktime_get()) / 1024;
+ ts = ktime_get_ns() / 1024;
cong_log->current_time_stamp = cpu_to_be32(ts);
for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
struct opa_hfi1_cong_log_event_internal *cce =
@@ -3781,7 +3798,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
* required to wrap the counter are supposed to
* be zeroed (CA10-49 IBTA, release 1.2.1, V1).
*/
- if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
+ if ((ts - cce->timestamp) / 2 > U32_MAX)
continue;
memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
memcpy(cong_log->events[i].remote_qp_number_cn_entry,
@@ -4260,6 +4277,18 @@ void clear_linkup_counters(struct hfi1_devdata *dd)
dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
}
+static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
+{
+ unsigned int i;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
+ if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
+ return 1;
+
+ return 0;
+}
+
/*
* is_local_mad() returns 1 if 'mad' is sent from, and destined to the
* local node, 0 otherwise.
@@ -4293,7 +4322,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
const struct ib_wc *in_wc)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid = ib_lid_cpu16(in_wc->slid);
u16 pkey;
if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
@@ -4320,10 +4348,71 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
*/
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
return 0;
- ingress_pkey_table_fail(ppd, pkey, slid);
+ /*
+ * On OPA devices it is okay to lose the upper 16 bits of LID as this
+ * information is obtained elsewhere. Mask off the upper 16 bits.
+ */
+ ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
return 1;
}
+/**
+ * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
+ * @ibp: IB port data
+ * @in_mad: MAD packet with header and data
+ * @in_wc: Work completion data such as source LID, port number, etc.
+ *
+ * These are all the possible logic rules for validating a pkey:
+ *
+ * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
+ * and NOT self-originated packet:
+ * Drop MAD packet as it should always be part of the
+ * management partition unless it's a self-originated packet.
+ *
+ * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
+ * The packet is coming from a management node and the receiving node
+ * is also a management node, so it is safe for the packet to go through.
+ *
+ * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
+ * Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
+ * It could be an FM misconfiguration.
+ *
+ * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
+ * It is safe for the packet to go through since a non-management node is
+ * talking to another non-management node.
+ *
+ * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
+ * Drop the packet because a non-management node is talking to a
+ * management node, and it could be an attack.
+ *
+ * For the implementation, these rules can be simplied to only checking
+ * for (a) and (e). There's no need to check for rule (b) as
+ * the packet doesn't need to be dropped. Rule (c) is not possible in
+ * the driver as LIM_MGMT_P_KEY is always in the pkey table.
+ *
+ * Return:
+ * 0 - pkey is okay, -EINVAL it's a bad pkey
+ */
+static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
+ const struct opa_mad *in_mad,
+ const struct ib_wc *in_wc)
+{
+ u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
+
+ /* Rule (a) from above */
+ if (!is_local_mad(ibp, in_mad, in_wc) &&
+ pkey_value != LIM_MGMT_P_KEY &&
+ pkey_value != FULL_MGMT_P_KEY)
+ return -EINVAL;
+
+ /* Rule (e) from above */
+ if (pkey_value == LIM_MGMT_P_KEY &&
+ is_full_mgmt_pkey_in_table(ibp))
+ return -EINVAL;
+
+ return 0;
+}
+
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
u8 port, const struct opa_mad *in_mad,
struct opa_mad *out_mad,
@@ -4663,8 +4752,11 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
out_mad, &resp_len);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf_opa(ibdev, port, in_mad, out_mad,
- &resp_len);
+ ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
+ if (ret)
+ return IB_MAD_RESULT_FAILURE;
+
+ ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
goto bail;
default:
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 4c1245072093..c4938f3d97c8 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -239,7 +239,7 @@ struct opa_hfi1_cong_log_event_internal {
u8 sl;
u8 svc_type;
u32 rlid;
- s64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
+ u64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
};
struct opa_hfi1_cong_log_event {
@@ -428,6 +428,6 @@ struct sc2vlnt {
COUNTER_MASK(1, 4))
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
-void hfi1_handle_trap_timer(unsigned long data);
+void hfi1_handle_trap_timer(struct timer_list *t);
#endif /* _HFI1_MAD_H */
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 175002c046ed..e7b3ce123da6 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -67,12 +67,9 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static inline void mmu_notifier_range_start(struct mmu_notifier *,
- struct mm_struct *,
- unsigned long, unsigned long);
-static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
- struct mm_struct *,
- unsigned long, unsigned long);
+static void mmu_notifier_range_start(struct mmu_notifier *,
+ struct mm_struct *,
+ unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
static void do_remove(struct mmu_rb_handler *handler,
@@ -286,17 +283,10 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node);
}
-static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
- mmu_notifier_mem_invalidate(mn, mm, start, end);
-}
-
-static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static void mmu_notifier_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 7108a4b5e94c..4c1198bc5e70 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -703,7 +703,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
{
struct send_context_info *sci;
struct send_context *sc = NULL;
- int req_type = type;
dma_addr_t dma;
unsigned long flags;
u64 reg;
@@ -730,13 +729,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
return NULL;
}
- /*
- * VNIC contexts are dynamically allocated.
- * Hence, pick a user context for VNIC.
- */
- if (type == SC_VNIC)
- type = SC_USER;
-
spin_lock_irqsave(&dd->sc_lock, flags);
ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
if (ret) {
@@ -746,15 +738,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
return NULL;
}
- /*
- * VNIC contexts are used by kernel driver.
- * Hence, mark them as kernel contexts.
- */
- if (req_type == SC_VNIC) {
- dd->send_contexts[sw_index].type = SC_KERNEL;
- type = SC_KERNEL;
- }
-
sci = &dd->send_contexts[sw_index];
sci->sc = sc;
@@ -1423,14 +1406,14 @@ retry:
goto done;
}
/* copy from receiver cache line and recalculate */
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
avail =
(unsigned long)sc->credits -
(sc->fill - sc->alloc_free);
if (blocks > avail) {
/* still no room, actively update */
sc_release_update(sc);
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
trycount++;
goto retry;
}
@@ -1667,7 +1650,7 @@ void sc_release_update(struct send_context *sc)
/* call sent buffer callbacks */
code = -1; /* code not yet set */
- head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
+ head = READ_ONCE(sc->sr_head); /* snapshot the head */
tail = sc->sr_tail;
while (head != tail) {
pbuf = &sc->sr[tail].pbuf;
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 99ca5edb0b43..058b08f459ab 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -54,12 +54,6 @@
#define SC_USER 3 /* must be the last one: it may take all left */
#define SC_MAX 4 /* count of send context types */
-/*
- * SC_VNIC types are allocated (dynamically) from the user context pool,
- * (SC_USER) and used by kernel driver as kernel contexts (SC_KERNEL).
- */
-#define SC_VNIC SC_MAX
-
/* invalid send context index */
#define INVALID_SCI 0xff
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e1cf0c08ca6f..fd01a760259f 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -276,7 +276,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (IS_ERR(ps->s_txreq))
goto bail_no_tx;
- ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
@@ -1966,7 +1965,7 @@ static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
cc_event->svc_type = svc_type;
cc_event->rlid = rlid;
/* keep timestamp in units of 1.024 usec */
- cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
+ cc_event->timestamp = ktime_get_ns() / 1024;
spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
}
@@ -2175,7 +2174,7 @@ send_middle:
goto no_immediate_data;
if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
goto send_last_inv;
- /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
+ /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
@@ -2220,7 +2219,7 @@ send_last:
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
/*
* It seems that IB mandates the presence of an SL in a
* work completion only for the UD transport (see section
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index b3291f0fde9a..2c7fc6e331ea 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
@@ -560,7 +560,7 @@ do_write:
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
@@ -825,11 +825,9 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp = ps->ibp;
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 bth1 = 0;
u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
u16 lrh0 = HFI1_LRH_BTH;
- u16 slid;
u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
extra_bytes) >> 2);
@@ -866,13 +864,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
}
hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
-
- if (!ppd->lid)
- slid = be16_to_cpu(IB_LID_PERMISSIVE);
- else
- slid = ppd->lid |
- (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
- ((1 << ppd->lmc) - 1));
hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
lrh0,
qp->s_hdrwords + nwords,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6781bcdb10b3..31c8f89b5fc8 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -491,10 +491,10 @@ static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
}
}
-static void sdma_err_progress_check(unsigned long data)
+static void sdma_err_progress_check(struct timer_list *t)
{
unsigned index;
- struct sdma_engine *sde = (struct sdma_engine *)data;
+ struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
dd_dev_err(sde->dd, "SDE progress check event\n");
for (index = 0; index < sde->dd->num_sdma; index++) {
@@ -1392,6 +1392,13 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
return ret;
idle_cnt = ns_to_cclock(dd, idle_cnt);
+ if (idle_cnt)
+ dd->default_desc1 =
+ SDMA_DESC1_HEAD_TO_HOST_FLAG;
+ else
+ dd->default_desc1 =
+ SDMA_DESC1_INT_REQ_FLAG;
+
if (!sdma_desct_intr)
sdma_desct_intr = SDMA_DESC_INTR;
@@ -1436,13 +1443,6 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
sde->tail_csr =
get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
- if (idle_cnt)
- dd->default_desc1 =
- SDMA_DESC1_HEAD_TO_HOST_FLAG;
- else
- dd->default_desc1 =
- SDMA_DESC1_INT_REQ_FLAG;
-
tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
(unsigned long)sde);
@@ -1453,8 +1453,8 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
sde->progress_check_head = 0;
- setup_timer(&sde->err_progress_check_timer,
- sdma_err_progress_check, (unsigned long)sde);
+ timer_setup(&sde->err_progress_check_timer,
+ sdma_err_progress_check, 0);
sde->descq = dma_zalloc_coherent(
&dd->pcidev->dev,
@@ -1465,13 +1465,8 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
if (!sde->descq)
goto bail;
sde->tx_ring =
- kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
- GFP_KERNEL);
- if (!sde->tx_ring)
- sde->tx_ring =
- vzalloc(
- sizeof(struct sdma_txreq *) *
- descq_cnt);
+ kvzalloc_node(sizeof(struct sdma_txreq *) * descq_cnt,
+ GFP_KERNEL, dd->node);
if (!sde->tx_ring)
goto bail;
}
@@ -1725,7 +1720,7 @@ retry:
swhead = sde->descq_head & sde->sdma_mask;
/* this code is really bad for cache line trading */
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
cnt = sde->descq_cnt;
if (swhead < swtail)
@@ -1872,7 +1867,7 @@ retry:
if ((status & sde->idle_mask) && !idle_check_done) {
u16 swtail;
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
if (swtail != hwhead) {
hwhead = (u16)read_sde_csr(sde, SD(HEAD));
idle_check_done = 1;
@@ -2144,7 +2139,6 @@ void sdma_dumpstate(struct sdma_engine *sde)
static void dump_sdma_state(struct sdma_engine *sde)
{
- struct hw_sdma_desc *descq;
struct hw_sdma_desc *descqp;
u64 desc[2];
u64 addr;
@@ -2155,7 +2149,6 @@ static void dump_sdma_state(struct sdma_engine *sde)
head = sde->descq_head & sde->sdma_mask;
tail = sde->descq_tail & sde->sdma_mask;
cnt = sdma_descq_freecnt(sde);
- descq = sde->descq;
dd_dev_err(sde->dd,
"SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
@@ -2222,7 +2215,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
u16 len;
head = sde->descq_head & sde->sdma_mask;
- tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
seq_printf(s, SDE_FMT, sde->this_idx,
sde->cpu,
sdma_state_name(sde->state.current_state),
@@ -2593,7 +2586,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
* 7220, e.g.
*/
ss->go_s99_running = 1;
- /* fall through and start dma engine */
+ /* fall through -- and start dma engine */
case sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&sde->state);
@@ -3016,6 +3009,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
case sdma_event_e60_hw_halted:
need_progress = 1;
sdma_err_progress_check_schedule(sde);
+ /* fall through */
case sdma_event_e90_sw_halted:
/*
* SW initiated halt does not perform engines
@@ -3305,7 +3299,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde)
return -EINVAL;
}
while (1) {
- nr = ffz(ACCESS_ONCE(sde->ahg_bits));
+ nr = ffz(READ_ONCE(sde->ahg_bits));
if (nr > 31) {
trace_hfi1_ahg_allocate(sde, -ENOSPC);
return -ENOSPC;
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 107011d8613b..374c59784950 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
{
return sde->descq_cnt -
(sde->descq_tail -
- ACCESS_ONCE(sde->descq_head)) - 1;
+ READ_ONCE(sde->descq_head)) - 1;
}
static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 6d2702ef34ac..25e867393463 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device,
* give a more accurate picture of total contexts available.
*/
return scnprintf(buf, PAGE_SIZE, "%u\n",
- min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt,
+ min(dd->num_user_contexts,
(u32)dd->sc_sizes[SC_USER].count));
}
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 9938bb983ce6..959a80429ee9 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -91,12 +91,17 @@ u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
return __get_16b_hdr_len(&opa_hdr->opah);
}
-const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
+const char *hfi1_trace_get_packet_l4_str(u8 l4)
{
- if (packet->etype != RHF_RCV_TYPE_BYPASS)
- return "IB";
+ if (l4)
+ return "16B";
+ else
+ return "9B";
+}
- switch (hfi1_16B_get_l2(packet->hdr)) {
+const char *hfi1_trace_get_packet_l2_str(u8 l2)
+{
+ switch (l2) {
case 0:
return "0";
case 1:
@@ -109,14 +114,6 @@ const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
return "";
}
-const char *hfi1_trace_get_packet_type_str(u8 l4)
-{
- if (l4)
- return "16B";
- else
- return "9B";
-}
-
#define IMM_PRN "imm:%d"
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
@@ -154,7 +151,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
*opcode = ib_bth_get_opcode(ohdr);
*tver = ib_bth_get_tver(ohdr);
*pkey = ib_bth_get_pkey(ohdr);
- *psn = ib_bth_get_psn(ohdr);
+ *psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
@@ -169,7 +166,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
*pad = ib_bth_get_pad(ohdr);
*se = ib_bth_get_se(ohdr);
*tver = ib_bth_get_tver(ohdr);
- *psn = ib_bth_get_psn(ohdr);
+ *psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index af50c0793450..8540463ef3f7 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -44,6 +44,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
+
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype) \
+__print_symbolic(etype, \
+ packettype_name(EXPECTED), \
+ packettype_name(EAGER), \
+ packettype_name(IB), \
+ packettype_name(ERROR), \
+ packettype_name(BYPASS))
+
#include "trace_dbg.h"
#include "trace_misc.h"
#include "trace_ctxts.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index 6721f84dafa5..fb631278eccd 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -99,8 +99,7 @@ u8 ibhdr_exhdr_len(struct ib_header *hdr);
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
-const char *hfi1_trace_get_packet_type_str(u8 l4);
-const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet);
+const char *hfi1_trace_get_packet_l4_str(u8 l4);
void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
u8 *ack, u8 *becn, u8 *fecn, u8 *mig,
u8 *se, u8 *pad, u8 *opcode, u8 *tver,
@@ -129,6 +128,8 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
u8 se, u8 pad, u8 opcode, const char *opname,
u8 tver, u16 pkey, u32 psn, u32 qpn);
+const char *hfi1_trace_get_packet_l2_str(u8 l2);
+
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
@@ -136,8 +137,6 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
__print_symbolic(lrh, \
lrh_name(LRH_BTH), \
lrh_name(LRH_GRH))
-#define PKT_ENTRY(pkt) __string(ptype, hfi1_trace_get_packet_str(packet))
-#define PKT_ASSIGN(pkt) __assign_str(ptype, hfi1_trace_get_packet_str(packet))
DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
TP_PROTO(struct hfi1_devdata *dd,
@@ -146,12 +145,12 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
TP_ARGS(dd, packet, sc5),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
- PKT_ENTRY(packet)
- __field(bool, bypass)
+ __field(u8, etype)
__field(u8, ack)
__field(u8, age)
__field(u8, becn)
__field(u8, fecn)
+ __field(u8, l2)
__field(u8, l4)
__field(u8, lnh)
__field(u8, lver)
@@ -176,10 +175,10 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
- PKT_ASSIGN(packet);
- if (packet->etype == RHF_RCV_TYPE_BYPASS) {
- __entry->bypass = true;
+ __entry->etype = packet->etype;
+ __entry->l2 = hfi1_16B_get_l2(packet->hdr);
+ if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
hfi1_trace_parse_16b_hdr(packet->hdr,
&__entry->age,
&__entry->becn,
@@ -203,7 +202,6 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
&__entry->psn,
&__entry->qpn);
} else {
- __entry->bypass = false;
hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
&__entry->lnh,
&__entry->lver,
@@ -233,9 +231,13 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
),
TP_printk("[%s] (%s) %s %s hlen:%d %s",
__get_str(dev),
- __get_str(ptype),
+ __entry->etype != RHF_RCV_TYPE_BYPASS ?
+ show_packettype(__entry->etype) :
+ hfi1_trace_get_packet_l2_str(
+ __entry->l2),
hfi1_trace_fmt_lrh(p,
- __entry->bypass,
+ __entry->etype ==
+ RHF_RCV_TYPE_BYPASS,
__entry->age,
__entry->becn,
__entry->fecn,
@@ -252,7 +254,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__entry->dlid,
__entry->slid),
hfi1_trace_fmt_bth(p,
- __entry->bypass,
+ __entry->etype ==
+ RHF_RCV_TYPE_BYPASS,
__entry->ack,
__entry->becn,
__entry->fecn,
@@ -284,7 +287,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
TP_ARGS(dd, opah, sc5),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
- __field(bool, bypass)
+ __field(u8, hdr_type)
__field(u8, ack)
__field(u8, age)
__field(u8, becn)
@@ -316,8 +319,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
DD_DEV_ASSIGN(dd);
- if (opah->hdr_type) {
- __entry->bypass = true;
+ __entry->hdr_type = opah->hdr_type;
+ if (__entry->hdr_type) {
hfi1_trace_parse_16b_hdr(&opah->opah,
&__entry->age,
&__entry->becn,
@@ -331,7 +334,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->dlid,
&__entry->slid);
- if (entry->l4 == OPA_16B_L4_IB_LOCAL)
+ if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &opah->opah.u.oth;
else
ohdr = &opah->opah.u.l.oth;
@@ -345,7 +348,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->psn,
&__entry->qpn);
} else {
- __entry->bypass = false;
+ __entry->l4 = OPA_16B_L4_9B;
hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
&__entry->lnh,
&__entry->lver,
@@ -354,7 +357,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->len,
&__entry->dlid,
&__entry->slid);
- if (entry->lnh == HFI1_LRH_BTH)
+ if (__entry->lnh == HFI1_LRH_BTH)
ohdr = &opah->ibh.u.oth;
else
ohdr = &opah->ibh.u.l.oth;
@@ -378,9 +381,9 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
),
TP_printk("[%s] (%s) %s %s hlen:%d %s",
__get_str(dev),
- hfi1_trace_get_packet_type_str(__entry->l4),
+ hfi1_trace_get_packet_l4_str(__entry->l4),
hfi1_trace_fmt_lrh(p,
- __entry->bypass,
+ !!__entry->hdr_type,
__entry->age,
__entry->becn,
__entry->fecn,
@@ -397,7 +400,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
__entry->dlid,
__entry->slid),
hfi1_trace_fmt_bth(p,
- __entry->bypass,
+ !!__entry->hdr_type,
__entry->ack,
__entry->becn,
__entry->fecn,
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index f9909d240dcc..4d487fee105d 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,15 +62,6 @@ __print_symbolic(type, \
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_rx
-#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
-#define show_packettype(etype) \
-__print_symbolic(etype, \
- packettype_name(EXPECTED), \
- packettype_name(EAGER), \
- packettype_name(IB), \
- packettype_name(ERROR), \
- packettype_name(BYPASS))
-
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_devdata *dd,
u32 ctxt,
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 0b646173ca22..991bbee04821 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -93,7 +93,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto done_free_tx;
}
- ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
@@ -121,7 +120,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
+ if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
}
@@ -463,7 +462,7 @@ last_imm:
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
/*
* It seems that IB mandates the presence of an SL in a
* work completion only for the UD transport (see section
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 2ba74fdd6f15..beb5091eccca 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -265,8 +265,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} else {
wc.pkey_index = 0;
}
- wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
+ wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1))) & U16_MAX;
/* Check for loopback when the port lid is not set */
if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
@@ -854,7 +854,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
int mgmt_pkey_idx = -1;
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct ib_header *hdr = packet->hdr;
void *data = packet->payload;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
@@ -880,7 +879,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
dlid_is_permissive = (dlid == permissive_lid);
slid_is_permissive = (slid == permissive_lid);
} else {
- hdr = packet->hdr;
pkey = ib_bth_get_pkey(ohdr);
dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
@@ -1039,7 +1037,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
}
if (slid_is_permissive)
slid = be32_to_cpu(OPA_LID_PERMISSIVE);
- wc.slid = slid;
+ wc.slid = slid & U16_MAX;
wc.sl = sl_from_sc;
/*
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 6f6c14df383e..c1c596adcd01 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -542,14 +542,10 @@ int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
unsigned long *ev = uctxt->dd->events +
- (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
+ (uctxt_offset(uctxt) + fd->subctxt);
u32 *array;
int ret = 0;
- if (!fd->invalid_tids)
- return -EINVAL;
-
/*
* copy_to_user() can sleep, which will leave the invalid_lock
* locked and cause the MMU notifier to be blocked on the lock
@@ -942,8 +938,7 @@ static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
* process in question.
*/
ev = uctxt->dd->events +
- (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
+ (uctxt_offset(uctxt) + fdata->subctxt);
set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
}
fdata->invalid_tid_idx++;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index c0c0e0445cbf..a3a7b33196d6 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
if (ret != -EBUSY) {
req->status = ret;
WRITE_ONCE(req->has_error, 1);
- if (ACCESS_ONCE(req->seqcomp) ==
+ if (READ_ONCE(req->seqcomp) ==
req->seqsubmitted - 1)
goto free_req;
return ret;
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/
if (req->data_len) {
iovec = &req->iovs[req->iov_idx];
- if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
goto free_txreq;
@@ -956,10 +956,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
struct hfi1_user_sdma_pkt_q *pq = req->pq;
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- SDMA_DBG(req, "Failed page array alloc");
+ if (!pages)
return -ENOMEM;
- }
memcpy(pages, node->pages, node->npages * sizeof(*pages));
npages -= node->npages;
@@ -1254,20 +1252,25 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
struct user_sdma_txreq *tx, u32 datalen)
{
u32 ahg[AHG_KDETH_ARRAY_SIZE];
- int diff = 0;
+ int idx = 0;
u8 omfactor; /* KDETH.OM */
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &req->hdr;
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
+ size_t array_size = ARRAY_SIZE(ahg);
if (PBC2LRH(pbclen) != lrhlen) {
/* PBC.PbcLengthDWs */
- AHG_HEADER_SET(ahg, diff, 0, 0, 12,
- cpu_to_le16(LRH2PBC(lrhlen)));
+ idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
+ (__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
+ if (idx < 0)
+ return idx;
/* LRH.PktLen (we need the full 16 bits due to byte swap) */
- AHG_HEADER_SET(ahg, diff, 3, 0, 16,
- cpu_to_be16(lrhlen >> 2));
+ idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
+ (__force u16)cpu_to_be16(lrhlen >> 2));
+ if (idx < 0)
+ return idx;
}
/*
@@ -1278,12 +1281,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
val32 |= 1UL << 31;
- AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
- AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
+ idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
+ (__force u16)cpu_to_be16(val32 >> 16));
+ if (idx < 0)
+ return idx;
+ idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
+ (__force u16)cpu_to_be16(val32 & 0xffff));
+ if (idx < 0)
+ return idx;
/* KDETH.Offset */
- AHG_HEADER_SET(ahg, diff, 15, 0, 16,
- cpu_to_le16(req->koffset & 0xffff));
- AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
+ idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
+ (__force u16)cpu_to_le16(req->koffset & 0xffff));
+ if (idx < 0)
+ return idx;
+ idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
+ (__force u16)cpu_to_le16(req->koffset >> 16));
+ if (idx < 0)
+ return idx;
if (req_opcode(req->info.ctrl) == EXPECTED) {
__le16 val;
@@ -1310,10 +1324,13 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
KDETH_OM_SMALL_SHIFT;
/* KDETH.OM and KDETH.OFFSET (TID) */
- AHG_HEADER_SET(ahg, diff, 7, 0, 16,
- ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
+ idx = ahg_header_set(
+ ahg, idx, array_size, 7, 0, 16,
+ ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
((req->tidoffset >> omfactor)
- & 0x7fff)));
+ & 0x7fff)));
+ if (idx < 0)
+ return idx;
/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
(EXP_TID_GET(tidval, IDX) & 0x3ff));
@@ -1330,21 +1347,22 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
AHG_KDETH_INTR_SHIFT));
}
- AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
+ idx = ahg_header_set(ahg, idx, array_size,
+ 7, 16, 14, (__force u16)val);
+ if (idx < 0)
+ return idx;
}
- if (diff < 0)
- return diff;
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
req->info.comp_idx, req->sde->this_idx,
- req->ahg_idx, ahg, diff, tidval);
+ req->ahg_idx, ahg, idx, tidval);
sdma_txinit_ahg(&tx->txreq,
SDMA_TXREQ_F_USE_AHG,
- datalen, req->ahg_idx, diff,
+ datalen, req->ahg_idx, idx,
ahg, sizeof(req->hdr),
user_sdma_txreq_cb);
- return diff;
+ return idx;
}
/*
@@ -1390,7 +1408,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
} else {
if (status != SDMA_TXREQ_S_OK)
req->status = status;
- if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
+ if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
(READ_ONCE(req->done) ||
READ_ONCE(req->has_error))) {
user_sdma_free_request(req, false);
@@ -1410,6 +1428,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
{
+ int i;
+
if (!list_empty(&req->txps)) {
struct sdma_txreq *t, *p;
@@ -1421,22 +1441,20 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
kmem_cache_free(req->pq->txreq_cache, tx);
}
}
- if (req->data_iovs) {
- struct sdma_mmu_node *node;
- int i;
-
- for (i = 0; i < req->data_iovs; i++) {
- node = req->iovs[i].node;
- if (!node)
- continue;
-
- if (unpin)
- hfi1_mmu_rb_remove(req->pq->handler,
- &node->rb);
- else
- atomic_dec(&node->refcount);
- }
+
+ for (i = 0; i < req->data_iovs; i++) {
+ struct sdma_mmu_node *node = req->iovs[i].node;
+
+ if (!node)
+ continue;
+
+ if (unpin)
+ hfi1_mmu_rb_remove(req->pq->handler,
+ &node->rb);
+ else
+ atomic_dec(&node->refcount);
}
+
kfree(req->tids);
clear_bit(req->info.comp_idx, req->pq->req_in_use);
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 9b8bb5634c0d..a3d192424344 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -80,15 +80,26 @@
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
-#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
- do { \
- if ((idx) < ARRAY_SIZE((arr))) \
- (arr)[(idx++)] = sdma_build_ahg_descriptor( \
- (__force u16)(value), (dw), (bit), \
- (width)); \
- else \
- return -ERANGE; \
- } while (0)
+/**
+ * Build an SDMA AHG header update descriptor and save it to an array.
+ * @arr - Array to save the descriptor to.
+ * @idx - Index of the array at which the descriptor will be saved.
+ * @array_size - Size of the array arr.
+ * @dw - Update index into the header in DWs.
+ * @bit - Start bit.
+ * @width - Field width.
+ * @value - 16 bits of immediate data to write into the field.
+ * Returns -ERANGE if idx is invalid. If successful, returns the next index
+ * (idx + 1) of the array to be used for the next descriptor.
+ */
+static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
+ u8 dw, u8 bit, u8 width, u16 value)
+{
+ if ((size_t)idx >= array_size)
+ return -ERANGE;
+ arr[idx++] = sdma_build_ahg_descriptor(value, dw, bit, width);
+ return idx;
+}
/* Tx request flag bits */
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index e232f3c608b4..a38785e224cc 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -146,6 +146,9 @@ static int pio_wait(struct rvt_qp *qp,
/* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24
+/* 16B trailing buffer */
+static const u8 trail_buf[MAX_16B_PADDING];
+
static uint wss_threshold;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -667,9 +670,9 @@ void hfi1_16B_rcv(struct hfi1_packet *packet)
* This is called from a timer to check for QPs
* which need kernel memory in order to send a packet.
*/
-static void mem_timer(unsigned long data)
+static void mem_timer(struct timer_list *t)
{
- struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
+ struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
struct list_head *list = &dev->memwait;
struct rvt_qp *qp = NULL;
struct iowait *wait;
@@ -793,6 +796,27 @@ bail_txadd:
return ret;
}
+/**
+ * update_tx_opstats - record stats by opcode
+ * @qp; the qp
+ * @ps: transmit packet state
+ * @plen: the plen in dwords
+ *
+ * This is a routine to record the tx opstats after a
+ * packet has been presented to the egress mechanism.
+ */
+static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ u32 plen)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
+
+ inc_opstats(plen * 4, &s->stats[ps->opcode]);
+ put_cpu_ptr(s);
+#endif
+}
+
/*
* Build the number of DMA descriptors needed to send length bytes of data.
*
@@ -812,9 +836,7 @@ static int build_verbs_tx_desc(
int ret = 0;
struct hfi1_sdma_header *phdr = &tx->phdr;
u16 hdrbytes = tx->hdr_dwords << 2;
- u32 *hdr;
u8 extra_bytes = 0;
- static char trail_buf[12]; /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
if (tx->phdr.hdr.hdr_type) {
/*
@@ -823,9 +845,6 @@ static int build_verbs_tx_desc(
*/
extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
(SIZE_OF_CRC << 2) + SIZE_OF_LT;
- hdr = (u32 *)&phdr->hdr.opah;
- } else {
- hdr = (u32 *)&phdr->hdr.ibh;
}
if (!ahg_info->ahgcount) {
ret = sdma_txinit_ahg(
@@ -869,9 +888,9 @@ static int build_verbs_tx_desc(
}
/* add icrc, lt byte, and padding to flit */
- if (extra_bytes != 0)
+ if (extra_bytes)
ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
- trail_buf, extra_bytes);
+ (void *)trail_buf, extra_bytes);
bail_txadd:
return ret;
@@ -891,14 +910,12 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u8 sc5 = priv->s_sc;
int ret;
u32 dwords;
- bool bypass = false;
if (ps->s_txreq->phdr.hdr.hdr_type) {
u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
SIZE_OF_LT) >> 2;
- bypass = true;
} else {
dwords = (len + 3) >> 2;
}
@@ -938,6 +955,8 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
goto bail_ecomm;
return ret;
}
+
+ update_tx_opstats(qp, ps, plen);
trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
return ret;
@@ -1033,8 +1052,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
int wc_status = IB_WC_SUCCESS;
int ret = 0;
pio_release_cb cb = NULL;
- u32 lrh0_16b;
- bool bypass = false;
u8 extra_bytes = 0;
if (ps->s_txreq->phdr.hdr.hdr_type) {
@@ -1043,8 +1060,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
dwords = (len + extra_bytes) >> 2;
hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
- lrh0_16b = ps->s_txreq->phdr.hdr.opah.lrh[0];
- bypass = true;
} else {
dwords = (len + 3) >> 2;
hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
@@ -1128,21 +1143,14 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
len -= slen;
}
}
- /*
- * Bypass packet will need to copy additional
- * bytes to accommodate for CRC and LT bytes
- */
- if (extra_bytes) {
- u8 *empty_buf;
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+ seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
- empty_buf = kcalloc(extra_bytes, sizeof(u8),
- GFP_KERNEL);
- seg_pio_copy_mid(pbuf, empty_buf, extra_bytes);
- kfree(empty_buf);
- }
seg_pio_copy_end(pbuf);
}
+ update_tx_opstats(qp, ps, plen);
trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
@@ -1636,8 +1644,7 @@ static void init_ibport(struct hfi1_pportdata *ppd)
for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
- setup_timer(&ibp->rvp.trap_timer, hfi1_handle_trap_timer,
- (unsigned long)ibp);
+ timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
spin_lock_init(&ibp->rvp.lock);
/* Set the prefix to the default value (see ch. 4.1.1) */
@@ -1844,7 +1851,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
/* Only need to initialize non-zero fields. */
- setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+ timer_setup(&dev->mem_timer, mem_timer, 0);
seqlock_init(&dev->iowait_lock);
seqlock_init(&dev->txwait_lock);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 76216f2ef35a..cec7a4b34d16 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -92,6 +92,8 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->psc = priv->s_sendcontext;
/* so that we can test if the sdma decriptors are there */
tx->txreq.num_desc = 0;
+ /* Set the header type */
+ tx->phdr.hdr.hdr_type = priv->hdr_type;
return tx;
}
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index f419cbb05928..5d65582fe4d9 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -67,8 +67,6 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
unsigned int rcvctrl_ops = 0;
int ret;
- hfi1_init_ctxt(uctxt->sc);
-
uctxt->do_interrupt = &handle_receive_interrupt;
/* Now allocate the RcvHdr queue and eager buffers. */
@@ -96,8 +94,6 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
-
- uctxt->is_vnic = true;
done:
return ret;
}
@@ -122,20 +118,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_CAP_KGET(NODROP_EGR_FULL) |
HFI1_CAP_KGET(DMA_RTAIL);
uctxt->seq_cnt = 1;
-
- /* Allocate and enable a PIO send context */
- uctxt->sc = sc_alloc(dd, SC_VNIC, uctxt->rcvhdrqentsize,
- uctxt->numa_id);
-
- ret = uctxt->sc ? 0 : -ENOMEM;
- if (ret)
- goto bail;
-
- dd_dev_dbg(dd, "allocated vnic send context %u(%u)\n",
- uctxt->sc->sw_index, uctxt->sc->hw_context);
- ret = sc_enable(uctxt->sc);
- if (ret)
- goto bail;
+ uctxt->is_vnic = true;
if (dd->num_msix_entries)
hfi1_set_vnic_msix_info(uctxt);
@@ -144,11 +127,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
*vnic_ctxt = uctxt;
- return ret;
-bail:
- hfi1_free_ctxt(uctxt);
- dd_dev_dbg(dd, "vnic allocation failed. rc %d\n", ret);
- return ret;
+ return 0;
}
static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
@@ -170,18 +149,6 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
- /*
- * VNIC contexts are allocated from user context pool.
- * Release them back to user context pool.
- *
- * Reset context integrity checks to default.
- * (writes to CSRs probably belong in chip.c)
- */
- write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
- hfi1_pkt_default_send_ctxt_mask(dd, SC_USER));
- sc_disable(uctxt->sc);
-
- dd->send_contexts[uctxt->sc->sw_index].type = SC_USER;
uctxt->event_flags = 0;
@@ -840,6 +807,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
struct rdma_netdev *rn;
int i, size, rc;
+ if (!dd->num_vnic_contexts)
+ return ERR_PTR(-ENOMEM);
+
if (!port_num || (port_num > dd->num_pports))
return ERR_PTR(-EINVAL);
@@ -848,7 +818,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT);
+ dd->chip_sdma_engines, dd->num_vnic_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
@@ -856,7 +826,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
vinfo->num_tx_q = dd->chip_sdma_engines;
- vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT;
+ vinfo->num_rx_q = dd->num_vnic_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
rn->set_id = hfi1_vnic_set_vesw_id;
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 61c93bbd230d..fddb5fdf92de 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,10 +1,31 @@
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
- depends on (ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
- is used in Hisilicon Hi1610 and more further ICT SoC.
+ is used in Hisilicon Hip06 and more further ICT SoC based on
+ platform device.
To compile this driver as a module, choose M here: the module
will be called hns-roce.
+
+config INFINIBAND_HNS_HIP06
+ tristate "Hisilicon Hip06 Family RoCE support"
+ depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
+ ---help---
+ RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
+ Hip07 SoC. These RoCE engines are platform devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hns-roce-hw-v1.
+
+config INFINIBAND_HNS_HIP08
+ tristate "Hisilicon Hip08 Family RoCE support"
+ depends on INFINIBAND_HNS && PCI && HNS3
+ ---help---
+ RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
+ The RoCE engine is a PCI device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 7e8ebd24dcae..ff426a625e13 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -2,7 +2,13 @@
# Makefile for the Hisilicon RoCE drivers.
#
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
- hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
+ hns_roce_cq.o hns_roce_alloc.o
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
+hns-roce-hw-v1-objs := hns_roce_hw_v1.o
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+hns-roce-hw-v2-objs := hns_roce_hw_v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index d545302b8ef8..7dd6a66ea244 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -44,11 +44,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
- struct in6_addr in6;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
union ib_gid sgid;
int ret;
@@ -58,18 +57,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
return ERR_PTR(-ENOMEM);
/* Get mac address */
- memcpy(&in6, grh->dgid.raw, sizeof(grh->dgid.raw));
- if (rdma_is_multicast_addr(&in6)) {
- rdma_get_mcast_mac(&in6, ah->av.mac);
- } else {
- u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
-
- if (!dmac) {
- kfree(ah);
- return ERR_PTR(-EINVAL);
- }
- memcpy(ah->av.mac, dmac, ETH_ALEN);
- }
+ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
/* Get source gid */
ret = ib_get_cached_gid(ibpd->device, rdma_ah_get_port_num(ah_attr),
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index e1b433cdd5e2..3e4c5253ab5c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
{
hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
}
+EXPORT_SYMBOL_GPL(hns_roce_bitmap_free);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj)
@@ -160,39 +161,47 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf)
{
int i;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
- if (bits_per_long == 64)
+ if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
- dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
+ dma_free_coherent(dev, 1 << buf->page_shift,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_buf_free);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
- struct hns_roce_buf *buf)
+ struct hns_roce_buf *buf, u32 page_shift)
{
int i = 0;
dma_addr_t t;
struct page **pages;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
+ u32 page_size = 1 << page_shift;
+ u32 order;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
if (size <= max_direct) {
buf->nbufs = 1;
/* Npages calculated by page_size */
- buf->npages = 1 << get_order(size);
- buf->page_shift = PAGE_SHIFT;
+ order = get_order(size);
+ if (order <= page_shift - PAGE_SHIFT)
+ order = 0;
+ else
+ order -= page_shift - PAGE_SHIFT;
+ buf->npages = 1 << order;
+ buf->page_shift = page_shift;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
if (!buf->direct.buf)
@@ -207,9 +216,9 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
memset(buf->direct.buf, 0, size);
} else {
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->nbufs = (size + page_size - 1) / page_size;
buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
+ buf->page_shift = page_shift;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
@@ -218,16 +227,16 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf = dma_alloc_coherent(dev,
- PAGE_SIZE, &t,
+ page_size, &t,
GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
- memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+ memset(buf->page_list[i].buf, 0, page_size);
}
- if (bits_per_long == 64) {
+ if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
GFP_KERNEL);
if (!pages)
@@ -241,6 +250,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
kfree(pages);
if (!buf->direct.buf)
goto err_free;
+ } else {
+ buf->direct.buf = NULL;
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index b94dcd823ad1..1085cb249bc1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -38,69 +38,7 @@
#define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32
-#define STATUS_MASK 0xff
#define CMD_TOKEN_MASK 0x1f
-#define GO_BIT_TIMEOUT_MSECS 10000
-
-enum {
- HCR_TOKEN_OFFSET = 0x14,
- HCR_STATUS_OFFSET = 0x18,
- HCR_GO_BIT = 15,
-};
-
-static int cmd_pending(struct hns_roce_dev *hr_dev)
-{
- u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
-
- return (!!(status & (1 << HCR_GO_BIT)));
-}
-
-/* this function should be serialized with "hcr_mutex" */
-static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
- u64 in_param, u64 out_param,
- u32 in_modifier, u8 op_modifier, u16 op,
- u16 token, int event)
-{
- struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- struct device *dev = &hr_dev->pdev->dev;
- u32 __iomem *hcr = (u32 *)cmd->hcr;
- int ret = -EAGAIN;
- unsigned long end;
- u32 val = 0;
-
- end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
- while (cmd_pending(hr_dev)) {
- if (time_after(jiffies, end)) {
- dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
- (int)end);
- goto out;
- }
- cond_resched();
- }
-
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
- op);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
- ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
- ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
-
- __raw_writeq(cpu_to_le64(in_param), hcr + 0);
- __raw_writeq(cpu_to_le64(out_param), hcr + 2);
- __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
- /* Memory barrier */
- wmb();
-
- __raw_writel(cpu_to_le32(val), hcr + 5);
-
- mmiowb();
- ret = 0;
-
-out:
- return ret;
-}
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier,
@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
int event)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- int ret = -EAGAIN;
+ int ret;
mutex_lock(&cmd->hcr_mutex);
- ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op, token,
- event);
+ ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, token, event);
mutex_unlock(&cmd->hcr_mutex);
return ret;
@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8 op_modifier, u16 op,
unsigned long timeout)
{
- struct device *dev = &hr_dev->pdev->dev;
- u8 __iomem *hcr = hr_dev->cmd.hcr;
- unsigned long end = 0;
- u32 status = 0;
+ struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN, 0);
if (ret) {
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
- goto out;
- }
-
- end = msecs_to_jiffies(timeout) + jiffies;
- while (cmd_pending(hr_dev) && time_before(jiffies, end))
- cond_resched();
-
- if (cmd_pending(hr_dev)) {
- dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
- ret = -ETIMEDOUT;
- goto out;
+ return ret;
}
- status = le32_to_cpu((__force __be32)
- __raw_readl(hcr + HCR_STATUS_OFFSET));
- if ((status & STATUS_MASK) != 0x1) {
- dev_err(dev, "mailbox status 0x%x!\n", status);
- ret = -EBUSY;
- goto out;
- }
-
-out:
- return ret;
+ return hr_dev->hw->chk_mbox(hr_dev, timeout);
}
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
unsigned long timeout)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cmd_context *context;
- int ret = 0;
+ struct device *dev = hr_dev->dev;
+ int ret;
spin_lock(&cmd->context_lock);
WARN_ON(cmd->free_head < 0);
@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
in_modifier, op_modifier, op,
timeout);
}
+EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0;
hr_dev->cmd.toggle = 1;
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
- hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
HNS_ROCE_MAILBOX_SIZE,
HNS_ROCE_MAILBOX_SIZE, 0);
@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox
return mailbox;
}
+EXPORT_SYMBOL_GPL(hns_roce_alloc_cmd_mailbox);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox)
@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
+EXPORT_SYMBOL_GPL(hns_roce_free_cmd_mailbox);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index f5a9ee2fc53d..b1c94223c28b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -37,6 +37,60 @@
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum {
+ /* QPC BT commands */
+ HNS_ROCE_CMD_WRITE_QPC_BT0 = 0x0,
+ HNS_ROCE_CMD_WRITE_QPC_BT1 = 0x1,
+ HNS_ROCE_CMD_WRITE_QPC_BT2 = 0x2,
+ HNS_ROCE_CMD_READ_QPC_BT0 = 0x4,
+ HNS_ROCE_CMD_READ_QPC_BT1 = 0x5,
+ HNS_ROCE_CMD_READ_QPC_BT2 = 0x6,
+ HNS_ROCE_CMD_DESTROY_QPC_BT0 = 0x8,
+ HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
+ HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
+
+ /* QPC operation */
+ HNS_ROCE_CMD_MODIFY_QPC = 0x41,
+ HNS_ROCE_CMD_QUERY_QPC = 0x42,
+
+ HNS_ROCE_CMD_MODIFY_CQC = 0x52,
+ /* CQC BT commands */
+ HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
+ HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
+ HNS_ROCE_CMD_WRITE_CQC_BT2 = 0x12,
+ HNS_ROCE_CMD_READ_CQC_BT0 = 0x14,
+ HNS_ROCE_CMD_READ_CQC_BT1 = 0x15,
+ HNS_ROCE_CMD_READ_CQC_BT2 = 0x1b,
+ HNS_ROCE_CMD_DESTROY_CQC_BT0 = 0x18,
+ HNS_ROCE_CMD_DESTROY_CQC_BT1 = 0x19,
+ HNS_ROCE_CMD_DESTROY_CQC_BT2 = 0x1a,
+
+ /* MPT BT commands */
+ HNS_ROCE_CMD_WRITE_MPT_BT0 = 0x20,
+ HNS_ROCE_CMD_WRITE_MPT_BT1 = 0x21,
+ HNS_ROCE_CMD_WRITE_MPT_BT2 = 0x22,
+ HNS_ROCE_CMD_READ_MPT_BT0 = 0x24,
+ HNS_ROCE_CMD_READ_MPT_BT1 = 0x25,
+ HNS_ROCE_CMD_READ_MPT_BT2 = 0x26,
+ HNS_ROCE_CMD_DESTROY_MPT_BT0 = 0x28,
+ HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
+ HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
+
+ /* MPT commands */
+ HNS_ROCE_CMD_QUERY_MPT = 0x62,
+
+ /* SRQC BT commands */
+ HNS_ROCE_CMD_WRITE_SRQC_BT0 = 0x30,
+ HNS_ROCE_CMD_WRITE_SRQC_BT1 = 0x31,
+ HNS_ROCE_CMD_WRITE_SRQC_BT2 = 0x32,
+ HNS_ROCE_CMD_READ_SRQC_BT0 = 0x34,
+ HNS_ROCE_CMD_READ_SRQC_BT1 = 0x35,
+ HNS_ROCE_CMD_READ_SRQC_BT2 = 0x36,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+};
+
+enum {
/* TPT commands */
HNS_ROCE_CMD_SW2HW_MPT = 0xd,
HNS_ROCE_CMD_HW2SW_MPT = 0xf,
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 4af403e1348c..7ecb7a4147a8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -341,6 +341,7 @@
#define ROCEE_BT_CMD_L_REG 0x200
#define ROCEE_MB1_REG 0x210
+#define ROCEE_MB6_REG 0x224
#define ROCEE_DB_SQ_L_0_REG 0x230
#define ROCEE_DB_OTHERS_L_0_REG 0x238
#define ROCEE_QP1C_CFG0_0_REG 0x270
@@ -362,4 +363,26 @@
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
+/* V2 ROCEE REG */
+#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
+#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
+#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
+#define ROCEE_TX_CMQ_TAIL_REG 0x07010
+#define ROCEE_TX_CMQ_HEAD_REG 0x07014
+
+#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
+#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
+#define ROCEE_RX_CMQ_DEPTH_REG 0x07020
+#define ROCEE_RX_CMQ_TAIL_REG 0x07024
+#define ROCEE_RX_CMQ_HEAD_REG 0x07028
+
+#define ROCEE_VF_SMAC_CFG0_REG 0x12000
+#define ROCEE_VF_SMAC_CFG1_REG 0x12004
+
+#define ROCEE_VF_SGID_CFG0_REG 0x10000
+#define ROCEE_VF_SGID_CFG1_REG 0x10004
+#define ROCEE_VF_SGID_CFG2_REG 0x10008
+#define ROCEE_VF_SGID_CFG3_REG 0x1000c
+#define ROCEE_VF_SGID_CFG4_REG 0x10010
+
#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index b89fd711019e..2111b57a3489 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(&hr_dev->pdev->dev,
+ dev_err(hr_dev->dev,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
event_type, hr_cq->cqn);
return;
@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector)
{
- struct hns_roce_cmd_mailbox *mailbox = NULL;
- struct hns_roce_cq_table *cq_table = NULL;
- struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct hns_roce_hem_table *mtt_table;
+ struct hns_roce_cq_table *cq_table;
+ struct device *dev = hr_dev->dev;
dma_addr_t dma_handle;
- u64 *mtts = NULL;
- int ret = 0;
+ u64 *mtts;
+ int ret;
cq_table = &hr_dev->cq_table;
/* Get the physical address of cq buf */
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ mtt_table = &hr_dev->mr_table.mtt_cqe_table;
+ else
+ mtt_table = &hr_dev->mr_table.mtt_table;
+
+ mtts = hns_roce_table_find(hr_dev, mtt_table,
hr_mtt->first_seg, &dma_handle);
if (!mtts) {
dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
@@ -150,6 +156,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
}
hr_cq->cons_index = 0;
+ hr_cq->arm_sn = 1;
hr_cq->uar = hr_uar;
atomic_set(&hr_cq->refcount, 1);
@@ -182,21 +189,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
-
- /* Waiting interrupt process procedure carried out */
- synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
-
- /* wait for all interrupt processed */
- if (atomic_dec_and_test(&hr_cq->refcount))
- complete(&hr_cq->free);
- wait_for_completion(&hr_cq->free);
+ if (hr_dev->eq_table.eq) {
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
+ }
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
@@ -205,6 +213,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
+EXPORT_SYMBOL_GPL(hns_roce_free_cq);
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_ucontext *context,
@@ -212,14 +221,31 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_umem **umem, u64 buf_addr, int cqe)
{
int ret;
+ u32 page_shift;
+ u32 npages;
*umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
- (*umem)->page_shift, &buf->hr_mtt);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ else
+ buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+
+ if (hr_dev->caps.cqe_buf_pg_sz) {
+ npages = (ib_umem_page_count(*umem) +
+ (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.cqe_buf_pg_sz);
+ page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
+ &buf->hr_mtt);
+ } else {
+ ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
+ (*umem)->page_shift,
+ &buf->hr_mtt);
+ }
if (ret)
goto err_buf;
@@ -241,12 +267,19 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_cq_buf *buf, u32 nent)
{
int ret;
+ u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- PAGE_SIZE * 2, &buf->hr_buf);
+ (1 << page_shift) * 2, &buf->hr_buf,
+ page_shift);
if (ret)
goto out;
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ else
+ buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret)
@@ -281,13 +314,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd;
struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector;
int cq_entries = attr->cqe;
- int ret = 0;
+ int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@@ -295,13 +328,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
return ERR_PTR(-EINVAL);
}
- hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
+ hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
if (!hr_cq)
return ERR_PTR(-ENOMEM);
- /* In v1 engine, parameter verification */
- if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
- cq_entries = HNS_ROCE_MIN_CQE_NUM;
+ if (hr_dev->caps.min_cqes)
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
@@ -335,8 +367,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
uar = &hr_dev->priv_uar;
- hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
- 0x1000 * uar->index;
+ hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * uar->index;
}
/* Allocate cq index, fill cq_context */
@@ -353,7 +385,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
* problems if tptr is set to zero here, so we initialze it in user
* space.
*/
- if (!context)
+ if (!context && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
/* Get created cq handler and carry out event */
@@ -385,6 +417,7 @@ err_cq:
kfree(hr_cq);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{
@@ -410,10 +443,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
return ret;
}
+EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&hr_dev->cq_table.tree,
@@ -423,13 +457,14 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
return;
}
+ ++cq->arm_sn;
cq->comp(cq);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&cq_table->tree,
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index e493a61e14e1..01d3d695cbba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -78,6 +78,8 @@
#define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16
+#define HNS_ROCE_HOP_NUM_0 0xff
+
#define BITMAP_NO_RR 0
#define BITMAP_RR 1
@@ -168,6 +170,16 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
};
+enum {
+ HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+};
+
+enum hns_roce_mtt_type {
+ MTT_TYPE_WQE,
+ MTT_TYPE_CQE,
+};
+
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0
@@ -229,15 +241,21 @@ struct hns_roce_hem_table {
unsigned long num_obj;
/*Single obj size */
unsigned long obj_size;
+ unsigned long table_chunk_size;
int lowmem;
struct mutex mutex;
struct hns_roce_hem **hem;
+ u64 **bt_l1;
+ dma_addr_t *bt_l1_dma_addr;
+ u64 **bt_l0;
+ dma_addr_t *bt_l0_dma_addr;
};
struct hns_roce_mtt {
- unsigned long first_seg;
- int order;
- int page_shift;
+ unsigned long first_seg;
+ int order;
+ int page_shift;
+ enum hns_roce_mtt_type mtt_type;
};
/* Only support 4K page size for mr register */
@@ -255,6 +273,19 @@ struct hns_roce_mr {
int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
+ u32 pbl_size;/* PA number in the PBL */
+ u64 pbl_ba;/* page table address */
+ u32 l0_chunk_last_num;/* L0 last number */
+ u32 l1_chunk_last_num;/* L1 last number */
+ u64 **pbl_bt_l2;/* PBL BT L2 */
+ u64 **pbl_bt_l1;/* PBL BT L1 */
+ u64 *pbl_bt_l0;/* PBL BT L0 */
+ dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */
+ dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */
+ dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */
+ u32 pbl_ba_pg_sz;/* BT chunk page size */
+ u32 pbl_buf_pg_sz;/* buf chunk page size */
+ u32 pbl_hop_num;/* multi-hop number */
};
struct hns_roce_mr_table {
@@ -262,6 +293,8 @@ struct hns_roce_mr_table {
struct hns_roce_buddy mtt_buddy;
struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table;
+ struct hns_roce_buddy mtt_cqe_buddy;
+ struct hns_roce_hem_table mtt_cqe_table;
};
struct hns_roce_wq {
@@ -277,6 +310,12 @@ struct hns_roce_wq {
void __iomem *db_reg_l;
};
+struct hns_roce_sge {
+ int sge_cnt; /* SGE num */
+ int offset;
+ int sge_shift;/* SGE size */
+};
+
struct hns_roce_buf_list {
void *buf;
dma_addr_t map;
@@ -308,6 +347,7 @@ struct hns_roce_cq {
u32 cons_index;
void __iomem *cq_db_l;
u16 *tptr_addr;
+ int arm_sn;
unsigned long cqn;
u32 vector;
atomic_t refcount;
@@ -328,6 +368,7 @@ struct hns_roce_qp_table {
spinlock_t lock;
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
};
struct hns_roce_cq_table {
@@ -367,7 +408,6 @@ struct hns_roce_cmd_context {
struct hns_roce_cmdq {
struct dma_pool *pool;
- u8 __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
/*
@@ -429,6 +469,9 @@ struct hns_roce_qp {
atomic_t refcount;
struct completion free;
+
+ struct hns_roce_sge sge;
+ u32 next_sge;
};
struct hns_roce_sqp {
@@ -439,7 +482,6 @@ struct hns_roce_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
struct notifier_block nb;
- struct notifier_block nb_inet;
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
@@ -477,16 +519,20 @@ struct hns_roce_caps {
u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */
+ u32 max_srq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
int num_cqs;
int max_cqes;
+ int min_cqes;
+ u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
int num_comp_vectors; /* 32 ceq */
int num_other_vectors;
int num_mtpts;
u32 num_mtt_segs;
+ u32 num_cqe_segs;
int reserved_mrws;
int reserved_uars;
int num_pds;
@@ -498,29 +544,70 @@ struct hns_roce_caps {
int mtpt_entry_sz;
int qpc_entry_sz;
int irrl_entry_sz;
+ int trrl_entry_sz;
int cqc_entry_sz;
+ u32 pbl_ba_pg_sz;
+ u32 pbl_buf_pg_sz;
+ u32 pbl_hop_num;
int aeqe_depth;
int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
enum ib_mtu max_mtu;
+ u32 qpc_bt_num;
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
+ u32 mpt_bt_num;
+ u32 qpc_ba_pg_sz;
+ u32 qpc_buf_pg_sz;
+ u32 qpc_hop_num;
+ u32 srqc_ba_pg_sz;
+ u32 srqc_buf_pg_sz;
+ u32 srqc_hop_num;
+ u32 cqc_ba_pg_sz;
+ u32 cqc_buf_pg_sz;
+ u32 cqc_hop_num;
+ u32 mpt_ba_pg_sz;
+ u32 mpt_buf_pg_sz;
+ u32 mpt_hop_num;
+ u32 mtt_ba_pg_sz;
+ u32 mtt_buf_pg_sz;
+ u32 mtt_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+ u32 chunk_sz; /* chunk size in non multihop mode*/
+ u64 flags;
};
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
- void (*hw_profile)(struct hns_roce_dev *hr_dev);
+ int (*cmq_init)(struct hns_roce_dev *hr_dev);
+ void (*cmq_exit)(struct hns_roce_dev *hr_dev);
+ int (*hw_profile)(struct hns_roce_dev *hr_dev);
int (*hw_init)(struct hns_roce_dev *hr_dev);
void (*hw_exit)(struct hns_roce_dev *hr_dev);
- void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid);
- void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+ int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
+ u16 token, int event);
+ int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid, const struct ib_gid_attr *attr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx);
+ int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int flags, u32 pdn,
+ int mr_access_flags, u64 iova, u64 size,
+ void *mb_buf);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
+ int (*set_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj);
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
@@ -535,12 +622,14 @@ struct hns_roce_hw {
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq);
- void *priv;
+ int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
};
struct hns_roce_dev {
struct ib_device ib_dev;
struct platform_device *pdev;
+ struct pci_dev *pci_dev;
+ struct device *dev;
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
@@ -569,9 +658,12 @@ struct hns_roce_dev {
int cmd_mod;
int loop_idc;
+ u32 sdb_offset;
+ u32 odb_offset;
dma_addr_t tptr_dma_addr; /*only for hw v1*/
u32 tptr_size; /*only for hw v1*/
- struct hns_roce_hw *hw;
+ const struct hns_roce_hw *hw;
+ void *priv;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -635,12 +727,14 @@ static inline struct hns_roce_qp
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{
u32 bits_per_long_val = BITS_PER_LONG;
+ u32 page_size = 1 << buf->page_shift;
- if (bits_per_long_val == 64 || buf->nbufs == 1)
+ if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
+ buf->nbufs == 1)
return (char *)(buf->direct.buf) + offset;
else
- return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) +
- (offset & (PAGE_SIZE - 1));
+ return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
+ (offset & (page_size - 1));
}
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
@@ -702,6 +796,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
@@ -711,7 +808,7 @@ unsigned long key_to_hw_index(u32 key);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
- struct hns_roce_buf *buf);
+ struct hns_roce_buf *buf, u32 page_shift);
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem);
@@ -723,6 +820,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
+void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
@@ -749,7 +847,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
-
-extern struct hns_roce_hw hns_roce_hw_v1;
+int hns_roce_init(struct hns_roce_dev *hr_dev);
+void hns_roce_exit(struct hns_roce_dev *hr_dev);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index b0f43735de1a..d184431e2bf5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -558,7 +558,7 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
writel(eqshift_val, eqc);
/* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
+ writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
/*
* Configure eq extended address 45~49 bit.
@@ -572,13 +572,13 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
roce_set_field(eqcuridx_val,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- writel(eqcuridx_val, (u8 *)eqc + 8);
+ writel(eqcuridx_val, eqc + 8);
/* Configure eq consumer index */
roce_set_field(eqconsindx_val,
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- writel(eqconsindx_val, (u8 *)eqc + 0xc);
+ writel(eqconsindx_val, eqc + 0xc);
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index c5104e0b2916..8b733a66fae5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -36,14 +36,165 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
-#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
-#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
-
#define DMA_ADDR_T_SHIFT 12
#define BT_BA_SHIFT 32
-struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
- gfp_t gfp_mask)
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
+{
+ if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
+ (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
+
+static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
+ u32 bt_chunk_num)
+{
+ int i;
+
+ for (i = 0; i < bt_chunk_num; i++)
+ if (hem[start_idx + i])
+ return false;
+
+ return true;
+}
+
+static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
+{
+ int i;
+
+ for (i = 0; i < bt_chunk_num; i++)
+ if (bt[start_idx + i])
+ return false;
+
+ return true;
+}
+
+static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
+{
+ if (check_whether_bt_num_3(table_type, hop_num))
+ return 3;
+ else if (check_whether_bt_num_2(table_type, hop_num))
+ return 2;
+ else if (check_whether_bt_num_1(table_type, hop_num))
+ return 1;
+ else
+ return 0;
+}
+
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop)
+{
+ struct device *dev = hr_dev->dev;
+ u32 chunk_ba_num;
+ u32 table_idx;
+ u32 bt_num;
+ u32 chunk_size;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
+ mhop->hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
+ mhop->hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
+ mhop->hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_MTT:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = mhop->bt_chunk_size / 8;
+ mhop->hop_num = hr_dev->caps.mtt_hop_num;
+ break;
+ case HEM_TYPE_CQE:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = mhop->bt_chunk_size / 8;
+ mhop->hop_num = hr_dev->caps.cqe_hop_num;
+ break;
+ default:
+ dev_err(dev, "Table %d not support multi-hop addressing!\n",
+ table->type);
+ return -EINVAL;
+ }
+
+ if (!obj)
+ return 0;
+
+ /*
+ * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
+ * MTT/CQE alloc hem for bt pages.
+ */
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+ chunk_ba_num = mhop->bt_chunk_size / 8;
+ chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
+ mhop->bt_chunk_size;
+ table_idx = (*obj & (table->num_obj - 1)) /
+ (chunk_size / table->obj_size);
+ switch (bt_num) {
+ case 3:
+ mhop->l2_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
+ mhop->l0_idx = table_idx / chunk_ba_num / chunk_ba_num;
+ break;
+ case 2:
+ mhop->l1_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l0_idx = table_idx / chunk_ba_num;
+ break;
+ case 1:
+ mhop->l0_idx = table_idx;
+ break;
+ default:
+ dev_err(dev, "Table %d not support hop_num = %d!\n",
+ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+ if (mhop->l0_idx >= mhop->ba_l0_num)
+ mhop->l0_idx %= mhop->ba_l0_num;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hns_roce_calc_hem_mhop);
+
+static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+ int npages,
+ unsigned long hem_alloc_size,
+ gfp_t gfp_mask)
{
struct hns_roce_hem_chunk *chunk = NULL;
struct hns_roce_hem *hem;
@@ -61,7 +212,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
hem->refcount = 0;
INIT_LIST_HEAD(&hem->chunk_list);
- order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
+ order = get_order(hem_alloc_size);
while (npages > 0) {
if (!chunk) {
@@ -84,7 +235,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
* memory, directly return fail.
*/
mem = &chunk->mem[chunk->npages];
- buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
+ buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
goto fail;
@@ -115,7 +266,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev,
+ dma_free_coherent(hr_dev->dev,
chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
@@ -128,8 +279,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
- struct device *dev = &hr_dev->pdev->dev;
spinlock_t *lock = &hr_dev->bt_cmd_lock;
+ struct device *dev = hr_dev->dev;
unsigned long end = 0;
unsigned long flags;
struct hns_roce_hem_iter iter;
@@ -142,7 +293,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
/* Find the HEM(Hardware Entry Memory) entry */
unsigned long i = (obj & (table->num_obj - 1)) /
- (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+ (table->table_chunk_size / table->obj_size);
switch (table->type) {
case HEM_TYPE_QPC:
@@ -209,14 +360,185 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret;
}
+static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_mhop mhop;
+ struct hns_roce_hem_iter iter;
+ u32 buf_chunk_size;
+ u32 bt_chunk_size;
+ u32 chunk_ba_num;
+ u32 hop_num;
+ u32 size;
+ u32 bt_num;
+ u64 hem_idx;
+ u64 bt_l1_idx = 0;
+ u64 bt_l0_idx = 0;
+ u64 bt_ba;
+ unsigned long mhop_obj = obj;
+ int bt_l1_allocated = 0;
+ int bt_l0_allocated = 0;
+ int step_idx;
+ int ret;
+
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ if (ret)
+ return ret;
+
+ buf_chunk_size = mhop.buf_chunk_size;
+ bt_chunk_size = mhop.bt_chunk_size;
+ hop_num = mhop.hop_num;
+ chunk_ba_num = bt_chunk_size / 8;
+
+ bt_num = hns_roce_get_bt_num(table->type, hop_num);
+ switch (bt_num) {
+ case 3:
+ hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+ mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
+ bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ bt_l0_idx = mhop.l0_idx;
+ break;
+ case 2:
+ hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ bt_l0_idx = mhop.l0_idx;
+ break;
+ case 1:
+ hem_idx = mhop.l0_idx;
+ break;
+ default:
+ dev_err(dev, "Table %d not support hop_num = %d!\n",
+ table->type, hop_num);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->mutex);
+
+ if (table->hem[hem_idx]) {
+ ++table->hem[hem_idx]->refcount;
+ goto out;
+ }
+
+ /* alloc L1 BA's chunk */
+ if ((check_whether_bt_num_3(table->type, hop_num) ||
+ check_whether_bt_num_2(table->type, hop_num)) &&
+ !table->bt_l0[bt_l0_idx]) {
+ table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
+ &(table->bt_l0_dma_addr[bt_l0_idx]),
+ GFP_KERNEL);
+ if (!table->bt_l0[bt_l0_idx]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ bt_l0_allocated = 1;
+
+ /* set base address to hardware */
+ if (table->type < HEM_TYPE_MTT) {
+ step_idx = 0;
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed!\n");
+ goto err_dma_alloc_l1;
+ }
+ }
+ }
+
+ /* alloc L2 BA's chunk */
+ if (check_whether_bt_num_3(table->type, hop_num) &&
+ !table->bt_l1[bt_l1_idx]) {
+ table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
+ &(table->bt_l1_dma_addr[bt_l1_idx]),
+ GFP_KERNEL);
+ if (!table->bt_l1[bt_l1_idx]) {
+ ret = -ENOMEM;
+ goto err_dma_alloc_l1;
+ }
+ bt_l1_allocated = 1;
+ *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
+ table->bt_l1_dma_addr[bt_l1_idx];
+
+ /* set base address to hardware */
+ step_idx = 1;
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed!\n");
+ goto err_alloc_hem_buf;
+ }
+ }
+
+ /*
+ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
+ * alloc bt space chunk for MTT/CQE.
+ */
+ size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
+ table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
+ size >> PAGE_SHIFT,
+ size,
+ (table->lowmem ? GFP_KERNEL :
+ GFP_HIGHUSER) | __GFP_NOWARN);
+ if (!table->hem[hem_idx]) {
+ ret = -ENOMEM;
+ goto err_alloc_hem_buf;
+ }
+
+ hns_roce_hem_first(table->hem[hem_idx], &iter);
+ bt_ba = hns_roce_hem_addr(&iter);
+
+ if (table->type < HEM_TYPE_MTT) {
+ if (hop_num == 2) {
+ *(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
+ step_idx = 2;
+ } else if (hop_num == 1) {
+ *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
+ step_idx = 1;
+ } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
+ step_idx = 0;
+ }
+
+ /* set HEM base address to hardware */
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed!\n");
+ goto err_alloc_hem_buf;
+ }
+ } else if (hop_num == 2) {
+ *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
+ }
+
+ ++table->hem[hem_idx]->refcount;
+ goto out;
+
+err_alloc_hem_buf:
+ if (bt_l1_allocated) {
+ dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
+ table->bt_l1_dma_addr[bt_l1_idx]);
+ table->bt_l1[bt_l1_idx] = NULL;
+ }
+
+err_dma_alloc_l1:
+ if (bt_l0_allocated) {
+ dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
+ table->bt_l0_dma_addr[bt_l0_idx]);
+ table->bt_l0[bt_l0_idx] = NULL;
+ }
+
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret = 0;
unsigned long i;
- i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
+ if (hns_roce_check_whether_mhop(hr_dev, table->type))
+ return hns_roce_table_mhop_get(hr_dev, table, obj);
+
+ i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
table->obj_size);
mutex_lock(&table->mutex);
@@ -227,7 +549,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
}
table->hem[i] = hns_roce_alloc_hem(hr_dev,
- HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+ table->table_chunk_size >> PAGE_SHIFT,
+ table->table_chunk_size,
(table->lowmem ? GFP_KERNEL :
GFP_HIGHUSER) | __GFP_NOWARN);
if (!table->hem[i]) {
@@ -237,6 +560,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
/* Set HEM base address(128K/page, pa) to Hardware */
if (hns_roce_set_hem(hr_dev, table, obj)) {
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed.\n");
goto out;
@@ -248,20 +573,139 @@ out:
return ret;
}
+static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj,
+ int check_refcount)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_mhop mhop;
+ unsigned long mhop_obj = obj;
+ u32 bt_chunk_size;
+ u32 chunk_ba_num;
+ u32 hop_num;
+ u32 start_idx;
+ u32 bt_num;
+ u64 hem_idx;
+ u64 bt_l1_idx = 0;
+ int ret;
+
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ if (ret)
+ return;
+
+ bt_chunk_size = mhop.bt_chunk_size;
+ hop_num = mhop.hop_num;
+ chunk_ba_num = bt_chunk_size / 8;
+
+ bt_num = hns_roce_get_bt_num(table->type, hop_num);
+ switch (bt_num) {
+ case 3:
+ hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+ mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
+ bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ break;
+ case 2:
+ hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ break;
+ case 1:
+ hem_idx = mhop.l0_idx;
+ break;
+ default:
+ dev_err(dev, "Table %d not support hop_num = %d!\n",
+ table->type, hop_num);
+ return;
+ }
+
+ mutex_lock(&table->mutex);
+
+ if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
+ mutex_unlock(&table->mutex);
+ return;
+ }
+
+ if (table->type < HEM_TYPE_MTT && hop_num == 1) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+ } else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+ } else if (table->type < HEM_TYPE_MTT &&
+ hop_num == HNS_ROCE_HOP_NUM_0) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+ }
+
+ /*
+ * free buffer space chunk for QPC/MTPT/CQC/SRQC.
+ * free bt space chunk for MTT/CQE.
+ */
+ hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
+ table->hem[hem_idx] = NULL;
+
+ if (check_whether_bt_num_2(table->type, hop_num)) {
+ start_idx = mhop.l0_idx * chunk_ba_num;
+ if (hns_roce_check_hem_null(table->hem, start_idx,
+ chunk_ba_num)) {
+ if (table->type < HEM_TYPE_MTT &&
+ hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ dma_free_coherent(dev, bt_chunk_size,
+ table->bt_l0[mhop.l0_idx],
+ table->bt_l0_dma_addr[mhop.l0_idx]);
+ table->bt_l0[mhop.l0_idx] = NULL;
+ }
+ } else if (check_whether_bt_num_3(table->type, hop_num)) {
+ start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+ mhop.l1_idx * chunk_ba_num;
+ if (hns_roce_check_hem_null(table->hem, start_idx,
+ chunk_ba_num)) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ dma_free_coherent(dev, bt_chunk_size,
+ table->bt_l1[bt_l1_idx],
+ table->bt_l1_dma_addr[bt_l1_idx]);
+ table->bt_l1[bt_l1_idx] = NULL;
+
+ start_idx = mhop.l0_idx * chunk_ba_num;
+ if (hns_roce_check_bt_null(table->bt_l1, start_idx,
+ chunk_ba_num)) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj,
+ 0))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ dma_free_coherent(dev, bt_chunk_size,
+ table->bt_l0[mhop.l0_idx],
+ table->bt_l0_dma_addr[mhop.l0_idx]);
+ table->bt_l0[mhop.l0_idx] = NULL;
+ }
+ }
+ }
+
+ mutex_unlock(&table->mutex);
+}
+
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
unsigned long i;
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_table_mhop_put(hr_dev, table, obj, 1);
+ return;
+ }
+
i = (obj & (table->num_obj - 1)) /
- (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+ (table->table_chunk_size / table->obj_size);
mutex_lock(&table->mutex);
if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */
- if (hr_dev->hw->clear_hem(hr_dev, table, obj))
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -271,23 +715,48 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex);
}
-void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
- dma_addr_t *dma_handle)
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj, dma_addr_t *dma_handle)
{
struct hns_roce_hem_chunk *chunk;
- unsigned long idx;
- int i;
- int offset, dma_offset;
+ struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
struct page *page = NULL;
+ unsigned long mhop_obj = obj;
+ unsigned long obj_per_chunk;
+ unsigned long idx_offset;
+ int offset, dma_offset;
+ int i, j;
+ u32 hem_idx = 0;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex);
- idx = (obj & (table->num_obj - 1)) * table->obj_size;
- hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
- dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ obj_per_chunk = table->table_chunk_size / table->obj_size;
+ hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
+ idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
+ dma_offset = offset = idx_offset * table->obj_size;
+ } else {
+ hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ /* mtt mhop */
+ i = mhop.l0_idx;
+ j = mhop.l1_idx;
+ if (mhop.hop_num == 2)
+ hem_idx = i * (mhop.bt_chunk_size / 8) + j;
+ else if (mhop.hop_num == 1 ||
+ mhop.hop_num == HNS_ROCE_HOP_NUM_0)
+ hem_idx = i;
+
+ hem = table->hem[hem_idx];
+ dma_offset = offset = (obj & (table->num_obj - 1)) *
+ table->obj_size % mhop.bt_chunk_size;
+ if (mhop.hop_num == 2)
+ dma_offset = offset = 0;
+ }
if (!hem)
goto out;
@@ -314,14 +783,21 @@ out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
}
+EXPORT_SYMBOL_GPL(hns_roce_table_find);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end)
{
- unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
- unsigned long i = 0;
- int ret = 0;
+ struct hns_roce_hem_mhop mhop;
+ unsigned long inc = table->table_chunk_size / table->obj_size;
+ unsigned long i;
+ int ret;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ inc = mhop.bt_chunk_size / table->obj_size;
+ }
/* Allocate MTT entry memory according to chunk(128K) */
for (i = start; i <= end; i += inc) {
@@ -344,10 +820,16 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end)
{
+ struct hns_roce_hem_mhop mhop;
+ unsigned long inc = table->table_chunk_size / table->obj_size;
unsigned long i;
- for (i = start; i <= end;
- i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ inc = mhop.bt_chunk_size / table->obj_size;
+ }
+
+ for (i = start; i <= end; i += inc)
hns_roce_table_put(hr_dev, table, i);
}
@@ -356,15 +838,120 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long obj_size, unsigned long nobj,
int use_lowmem)
{
+ struct device *dev = hr_dev->dev;
unsigned long obj_per_chunk;
unsigned long num_hem;
- obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
- num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ if (!hns_roce_check_whether_mhop(hr_dev, type)) {
+ table->table_chunk_size = hr_dev->caps.chunk_sz;
+ obj_per_chunk = table->table_chunk_size / obj_size;
+ num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
+ if (!table->hem)
+ return -ENOMEM;
+ } else {
+ unsigned long buf_chunk_size;
+ unsigned long bt_chunk_size;
+ unsigned long bt_chunk_num;
+ unsigned long num_bt_l0 = 0;
+ u32 hop_num;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.qpc_bt_num;
+ hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.mpt_bt_num;
+ hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.cqc_bt_num;
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.srqc_bt_num;
+ hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_MTT:
+ buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = buf_chunk_size;
+ hop_num = hr_dev->caps.mtt_hop_num;
+ break;
+ case HEM_TYPE_CQE:
+ buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = buf_chunk_size;
+ hop_num = hr_dev->caps.cqe_hop_num;
+ break;
+ default:
+ dev_err(dev,
+ "Table %d not support to init hem table here!\n",
+ type);
+ return -EINVAL;
+ }
+ obj_per_chunk = buf_chunk_size / obj_size;
+ num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ bt_chunk_num = bt_chunk_size / 8;
+ if (table->type >= HEM_TYPE_MTT)
+ num_bt_l0 = bt_chunk_num;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem),
+ GFP_KERNEL);
+ if (!table->hem)
+ goto err_kcalloc_hem_buf;
+
+ if (check_whether_bt_num_3(table->type, hop_num)) {
+ unsigned long num_bt_l1;
+
+ num_bt_l1 = (num_hem + bt_chunk_num - 1) /
+ bt_chunk_num;
+ table->bt_l1 = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1),
+ GFP_KERNEL);
+ if (!table->bt_l1)
+ goto err_kcalloc_bt_l1;
+
+ table->bt_l1_dma_addr = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1_dma_addr),
+ GFP_KERNEL);
+
+ if (!table->bt_l1_dma_addr)
+ goto err_kcalloc_l1_dma;
+ }
- table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
- if (!table->hem)
- return -ENOMEM;
+ if (check_whether_bt_num_2(table->type, hop_num) ||
+ check_whether_bt_num_3(table->type, hop_num)) {
+ table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
+ GFP_KERNEL);
+ if (!table->bt_l0)
+ goto err_kcalloc_bt_l0;
+
+ table->bt_l0_dma_addr = kcalloc(num_bt_l0,
+ sizeof(*table->bt_l0_dma_addr),
+ GFP_KERNEL);
+ if (!table->bt_l0_dma_addr)
+ goto err_kcalloc_l0_dma;
+ }
+ }
table->type = type;
table->num_hem = num_hem;
@@ -374,18 +961,72 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
mutex_init(&table->mutex);
return 0;
+
+err_kcalloc_l0_dma:
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+
+err_kcalloc_bt_l0:
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(table->hem);
+ table->hem = NULL;
+
+err_kcalloc_hem_buf:
+ return -ENOMEM;
+}
+
+static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct hns_roce_hem_mhop mhop;
+ u32 buf_chunk_size;
+ int i;
+ u64 obj;
+
+ hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
+ mhop.bt_chunk_size;
+
+ for (i = 0; i < table->num_hem; ++i) {
+ obj = i * buf_chunk_size / table->obj_size;
+ if (table->hem[i])
+ hns_roce_table_mhop_put(hr_dev, table, obj, 0);
+ }
+
+ kfree(table->hem);
+ table->hem = NULL;
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+ kfree(table->bt_l0_dma_addr);
+ table->bt_l0_dma_addr = NULL;
}
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
unsigned long i;
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_cleanup_mhop_hem_table(hr_dev, table);
+ return;
+ }
+
for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table,
- i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
+ i * table->table_chunk_size / table->obj_size, 0))
dev_err(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -398,7 +1039,13 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 435748858252..db66db12075e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -47,13 +47,27 @@ enum {
/* UNMAP HEM */
HEM_TYPE_MTT,
+ HEM_TYPE_CQE,
HEM_TYPE_IRRL,
+ HEM_TYPE_TRRL,
};
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist)))
+#define check_whether_bt_num_3(type, hop_num) \
+ (type < HEM_TYPE_MTT && hop_num == 2)
+
+#define check_whether_bt_num_2(type, hop_num) \
+ ((type < HEM_TYPE_MTT && hop_num == 1) || \
+ (type >= HEM_TYPE_MTT && hop_num == 2))
+
+#define check_whether_bt_num_1(type, hop_num) \
+ ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (type >= HEM_TYPE_MTT && hop_num == 1) || \
+ (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
+
enum {
HNS_ROCE_HEM_PAGE_SHIFT = 12,
HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
@@ -77,12 +91,23 @@ struct hns_roce_hem_iter {
int page_idx;
};
+struct hns_roce_hem_mhop {
+ u32 hop_num;
+ u32 buf_chunk_size;
+ u32 bt_chunk_size;
+ u32 ba_l0_num;
+ u32 l0_idx;/* level 0 base address table index */
+ u32 l1_idx;/* level 1 base address table index */
+ u32 l2_idx;/* level 2 base address table index */
+};
+
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj);
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj);
-void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
@@ -97,6 +122,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop);
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)
@@ -105,7 +134,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
iter->chunk = list_empty(&hem->chunk_list) ? NULL :
list_entry(hem->chunk_list.next,
struct hns_roce_hem_chunk, list);
- iter->page_idx = 0;
+ iter->page_idx = 0;
}
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 747efd1ae5a6..af27168faf0f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -34,6 +34,7 @@
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <rdma/ib_umem.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
@@ -56,8 +57,8 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
rseg->len = 0;
}
-int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
@@ -316,8 +317,8 @@ out:
return ret;
}
-int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
{
int ret = 0;
int nreq = 0;
@@ -472,7 +473,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
dma_addr_t sdb_dma_addr;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
/* Configure extend SDB threshold */
@@ -511,7 +512,7 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
dma_addr_t odb_dma_addr;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
/* Configure extend ODB threshold */
@@ -547,7 +548,7 @@ static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
dma_addr_t odb_dma_addr;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
@@ -668,7 +669,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
u8 port = 0;
u8 sl;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
/* Reserved cq for loop qp */
@@ -816,7 +817,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
int ret;
int i;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
@@ -850,7 +851,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
u32 odb_evt_mod;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
memset(db, 0, sizeof(*db));
@@ -876,7 +877,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
return 0;
}
-void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
+static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
{
struct hns_roce_recreate_lp_qp_work *lp_qp_work;
struct hns_roce_dev *hr_dev;
@@ -906,11 +907,13 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
unsigned long end =
msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
GFP_KERNEL);
+ if (!lp_qp_work)
+ return -ENOMEM;
INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
@@ -982,7 +985,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
hr_dev = to_hr_dev(mr_work->ib_dev);
dev = &hr_dev->pdev->dev;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
mr_free_cq = free_mr->mr_free_cq;
@@ -1001,6 +1004,11 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
}
}
+ if (!ne) {
+ dev_err(dev, "Reserved loop qp is absent!\n");
+ goto free_work;
+ }
+
do {
ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
if (ret < 0) {
@@ -1025,7 +1033,8 @@ free_work:
kfree(mr_work);
}
-int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_mr_free_work *mr_work;
@@ -1038,7 +1047,7 @@ int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
int npages;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
if (mr->enabled) {
@@ -1103,7 +1112,7 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_db_table *db;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
if (db->sdb_ext_mod) {
@@ -1133,7 +1142,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
struct hns_roce_raq_table *raq;
struct device *dev = &hr_dev->pdev->dev;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
raq = &priv->raq_table;
raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
@@ -1210,7 +1219,7 @@ static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_raq_table *raq;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
raq = &priv->raq_table;
dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
@@ -1244,7 +1253,7 @@ static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
int ret;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
@@ -1286,7 +1295,7 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
@@ -1304,7 +1313,7 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
struct hns_roce_buf_list *tptr_buf;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf;
/*
@@ -1330,7 +1339,7 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_buf_list *tptr_buf;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf;
dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
@@ -1344,7 +1353,7 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
@@ -1368,7 +1377,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_free_mr *free_mr;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
flush_workqueue(free_mr->free_mr_wq);
@@ -1383,7 +1392,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
* @enable: true -- drop reset, false -- reset
* return 0 - success , negative --fail
*/
-int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
+static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
{
struct device_node *dsaf_node;
struct device *dev = &hr_dev->pdev->dev;
@@ -1432,7 +1441,7 @@ static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_des_qp *des_qp;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
des_qp = &priv->des_qp;
des_qp->requeue_flag = 1;
@@ -1450,7 +1459,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_des_qp *des_qp;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
des_qp = &priv->des_qp;
des_qp->requeue_flag = 0;
@@ -1458,7 +1467,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
destroy_workqueue(des_qp->qp_wq);
}
-void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
+static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
{
int i = 0;
struct hns_roce_caps *caps = &hr_dev->caps;
@@ -1474,7 +1483,9 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
+ caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
+ caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
@@ -1503,6 +1514,7 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
+ caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
for (i = 0; i < caps->num_ports; i++)
caps->pkey_table_len[i] = 1;
@@ -1524,9 +1536,11 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
ROCEE_ACK_DELAY_REG));
caps->max_mtu = IB_MTU_2048;
+
+ return 0;
}
-int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
+static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
{
int ret;
u32 val;
@@ -1605,7 +1619,7 @@ error_failed_raq_init:
return ret;
}
-void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
+static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_free_mr_free(hr_dev);
@@ -1616,8 +1630,82 @@ void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
hns_roce_db_free(hr_dev);
}
-void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid)
+static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
+
+ return (!!(status & (1 << HCR_GO_BIT)));
+}
+
+static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier,
+ u16 op, u16 token, int event)
+{
+ u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
+ unsigned long end;
+ u32 val = 0;
+
+ end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
+ while (hns_roce_v1_cmd_pending(hr_dev)) {
+ if (time_after(jiffies, end)) {
+ dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
+ (int)jiffies, (int)end);
+ return -EAGAIN;
+ }
+ cond_resched();
+ }
+
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
+ op);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
+ ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
+ ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
+
+ __raw_writeq(cpu_to_le64(in_param), hcr + 0);
+ __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+ __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
+ /* Memory barrier */
+ wmb();
+
+ __raw_writel(cpu_to_le32(val), hcr + 5);
+
+ mmiowb();
+
+ return 0;
+}
+
+static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
+ unsigned long timeout)
+{
+ u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
+ unsigned long end = 0;
+ u32 status = 0;
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (hns_roce_v1_cmd_pending(hr_dev)) {
+ dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+ return -ETIMEDOUT;
+ }
+
+ status = le32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET));
+ if ((status & STATUS_MASK) != 0x1) {
+ dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+ int gid_index, union ib_gid *gid,
+ const struct ib_gid_attr *attr)
{
u32 *p = NULL;
u8 gid_idx = 0;
@@ -1639,9 +1727,12 @@ void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
p = (u32 *)&gid->raw[0xc];
roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
(HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ return 0;
}
-void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
+static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+ u8 *addr)
{
u32 reg_smac_l;
u16 reg_smac_h;
@@ -1654,8 +1745,13 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
* because of smac not equal to dmac.
* We Need to release and create reserved qp again.
*/
- if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
- dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
+ if (hr_dev->hw->dereg_mr) {
+ int ret;
+
+ ret = hns_roce_v1_recreate_lp_qp(hr_dev);
+ if (ret && ret != -ETIMEDOUT)
+ return ret;
+ }
p = (u32 *)(&addr[0]);
reg_smac_l = *p;
@@ -1670,10 +1766,12 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
val);
+
+ return 0;
}
-void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu)
+static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
+ enum ib_mtu mtu)
{
u32 val;
@@ -1685,8 +1783,8 @@ void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
val);
}
-int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
+static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx)
{
struct hns_roce_v1_mpt_entry *mpt_entry;
struct scatterlist *sg;
@@ -1858,7 +1956,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
return get_sw_cqe(hr_cq, hr_cq->cons_index);
}
-void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
+static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
u32 doorbell[2];
@@ -1931,9 +2029,10 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
spin_unlock_irq(&hr_cq->lock);
}
-void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
- dma_addr_t dma_handle, int nent, u32 vector)
+static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf,
+ u64 *mtts, dma_addr_t dma_handle, int nent,
+ u32 vector)
{
struct hns_roce_cq_context *cq_context = NULL;
struct hns_roce_buf_list *tptr_buf;
@@ -1941,7 +2040,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
dma_addr_t tptr_dma_addr;
int offset;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf;
cq_context = mb_buf;
@@ -2018,7 +2117,13 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
}
-int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
{
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
@@ -2279,8 +2384,9 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return ret;
}
-int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj)
+static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
@@ -2289,7 +2395,7 @@ int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
void __iomem *bt_cmd;
u64 bt_ba = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
switch (table->type) {
case HEM_TYPE_QPC:
@@ -2441,14 +2547,14 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int rq_pa_start;
u32 reg_val;
u64 *mtts;
- u32 *addr;
+ u32 __iomem *addr;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search QP buf's MTTs */
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle);
if (!mtts) {
dev_err(dev, "qp buf pa find failed\n");
@@ -2523,8 +2629,9 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
/* Copy context to QP1C register */
- addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
+ addr = (u32 __iomem *)(hr_dev->reg_base +
+ ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->phy_port * sizeof(*context));
writel(context->qp1c_bytes_4, addr);
writel(context->sq_rq_bt_l, addr + 1);
@@ -2595,7 +2702,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return -ENOMEM;
/* Search qp buf's mtts */
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle);
if (mtts == NULL) {
dev_err(dev, "qp buf pa find failed\n");
@@ -2603,8 +2710,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
/* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn,
- &dma_handle_2);
+ mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &dma_handle_2);
if (mtts_2 == NULL) {
dev_err(dev, "qp irrl_table find failed\n");
goto out;
@@ -2800,10 +2907,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
ilog2((unsigned int)attr->max_dest_rd_atomic));
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
- attr->dest_qp_num);
+ if (attr_mask & IB_QP_DEST_QPN)
+ roce_set_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
+ attr->dest_qp_num);
/* Configure GID index */
port_num = rdma_ah_get_port_num(&attr->ah_attr);
@@ -3143,7 +3251,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
if (ibqp->uobject) {
hr_qp->rq.db_reg_l = hr_dev->reg_base +
- ROCEE_DB_OTHERS_L_0_REG +
+ hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
}
@@ -3177,9 +3285,10 @@ out:
return ret;
}
-int hns_roce_v1_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
+static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
{
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
@@ -3270,6 +3379,7 @@ static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->path_mtu = IB_MTU_256;
qp_attr->path_mig_state = IB_MIG_ARMED;
qp_attr->qkey = QKEY_VAL;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
qp_attr->rq_psn = 0;
qp_attr->sq_psn = 0;
qp_attr->dest_qp_num = 1;
@@ -3351,6 +3461,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
QP_CONTEXT_QPC_BYTES_48_MTU_M,
QP_CONTEXT_QPC_BYTES_48_MTU_S);
qp_attr->path_mig_state = IB_MIG_ARMED;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
qp_attr->qkey = QKEY_VAL;
@@ -3406,10 +3517,10 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
+ qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
- qp_attr->max_dest_rd_atomic = roce_get_field(context->qpc_bytes_32,
+ qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
@@ -3444,8 +3555,9 @@ out:
return ret;
}
-int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
{
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -3454,6 +3566,53 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
}
+static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
+ u32 *old_send, u32 *old_retry,
+ u32 *tsp_st, u32 *success_flags)
+{
+ u32 sdb_retry_cnt;
+ u32 sdb_send_ptr;
+ u32 cur_cnt, old_cnt;
+ u32 send_ptr;
+
+ sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+ sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
+ cur_cnt = roce_get_field(sdb_send_ptr,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
+ old_cnt = roce_get_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(*old_retry,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ *success_flags = 1;
+ } else {
+ old_cnt = roce_get_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
+ *success_flags = 1;
+ } else {
+ send_ptr = roce_get_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ roce_set_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
+ send_ptr);
+ }
+ }
+}
+
static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
u32 sdb_issue_ptr,
@@ -3461,12 +3620,10 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
u32 *wait_stage)
{
struct device *dev = &hr_dev->pdev->dev;
- u32 sdb_retry_cnt, old_retry;
u32 sdb_send_ptr, old_send;
u32 success_flags = 0;
- u32 cur_cnt, old_cnt;
unsigned long end;
- u32 send_ptr;
+ u32 old_retry;
u32 inv_cnt;
u32 tsp_st;
@@ -3524,47 +3681,9 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
- sdb_send_ptr = roce_read(hr_dev,
- ROCEE_SDB_SEND_PTR_REG);
- sdb_retry_cnt = roce_read(hr_dev,
- ROCEE_SDB_RETRY_CNT_REG);
- cur_cnt = roce_get_field(sdb_send_ptr,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- if (!roce_get_bit(tsp_st,
- ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
- old_cnt = roce_get_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(old_retry,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
- success_flags = 1;
- } else {
- old_cnt = roce_get_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
- if (cur_cnt - old_cnt >
- SDB_ST_CMP_VAL) {
- success_flags = 1;
- } else {
- send_ptr =
- roce_get_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- roce_set_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
- send_ptr);
- }
- }
+ hns_roce_check_sdb_status(hr_dev, &old_send,
+ &old_retry, &tsp_st,
+ &success_flags);
} while (!success_flags);
}
@@ -3664,7 +3783,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
hr_dev = to_hr_dev(qp_work_entry->ib_dev);
dev = &hr_dev->pdev->dev;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
hr_qp = qp_work_entry->qp;
qpn = hr_qp->qpn;
@@ -3781,7 +3900,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
qp_work->sche_cnt = qp_work_entry.sche_cnt;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
queue_work(priv->des_qp.qp_wq, &qp_work->work);
dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
}
@@ -3789,7 +3908,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
return 0;
}
-int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
+static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
@@ -3841,18 +3960,19 @@ int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
return ret;
}
-struct hns_roce_v1_priv hr_v1_priv;
-
-struct hns_roce_hw hns_roce_hw_v1 = {
+static const struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile,
.hw_init = hns_roce_v1_init,
.hw_exit = hns_roce_v1_exit,
+ .post_mbox = hns_roce_v1_post_mbox,
+ .chk_mbox = hns_roce_v1_chk_mbox,
.set_gid = hns_roce_v1_set_gid,
.set_mac = hns_roce_v1_set_mac,
.set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc,
+ .modify_cq = hns_roce_v1_modify_cq,
.clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
.query_qp = hns_roce_v1_query_qp,
@@ -3863,5 +3983,258 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
- .priv = &hr_v1_priv,
};
+
+static const struct of_device_id hns_roce_of_match[] = {
+ { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hns_roce_of_match);
+
+static const struct acpi_device_id hns_roce_acpi_match[] = {
+ { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
+
+static int hns_roce_node_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ /* get the 'device' corresponding to the matching 'fwnode' */
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_roce_node_match);
+ /* get the platform device */
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct platform_device *pdev = NULL;
+ struct net_device *netdev = NULL;
+ struct device_node *net_node;
+ struct resource *res;
+ int port_cnt = 0;
+ u8 phy_port;
+ int ret;
+ int i;
+
+ /* check if we are compatible with the underlying SoC */
+ if (dev_of_node(dev)) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(hns_roce_of_match, dev->of_node);
+ if (!of_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific DT data!\n");
+ return -ENXIO;
+ }
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
+ if (!acpi_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific ACPI data!\n");
+ return -ENXIO;
+ }
+ } else {
+ dev_err(dev, "can't read compatibility data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ /* get the mapped register base address */
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
+ hr_dev->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hr_dev->reg_base))
+ return PTR_ERR(hr_dev->reg_base);
+
+ /* read the node_guid of IB device from the DT or ACPI */
+ ret = device_property_read_u8_array(dev, "node-guid",
+ (u8 *)&hr_dev->ib_dev.node_guid,
+ GUID_LEN);
+ if (ret) {
+ dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+ return ret;
+ }
+
+ /* get the RoCE associated ethernet ports or netdevices */
+ for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
+ if (dev_of_node(dev)) {
+ net_node = of_parse_phandle(dev->of_node, "eth-handle",
+ i);
+ if (!net_node)
+ continue;
+ pdev = of_find_device_by_node(net_node);
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+ struct fwnode_handle *fwnode;
+
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "eth-handle",
+ i, &args);
+ if (ret)
+ continue;
+ fwnode = acpi_fwnode_handle(args.adev);
+ pdev = hns_roce_find_pdev(fwnode);
+ } else {
+ dev_err(dev, "cannot read data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ if (pdev) {
+ netdev = platform_get_drvdata(pdev);
+ phy_port = (u8)i;
+ if (netdev) {
+ hr_dev->iboe.netdevs[port_cnt] = netdev;
+ hr_dev->iboe.phy_port[port_cnt] = phy_port;
+ } else {
+ dev_err(dev, "no netdev found with pdev %s\n",
+ pdev->name);
+ return -ENODEV;
+ }
+ port_cnt++;
+ }
+ }
+
+ if (port_cnt == 0) {
+ dev_err(dev, "unable to get eth-handle for available ports!\n");
+ return -EINVAL;
+ }
+
+ hr_dev->caps.num_ports = port_cnt;
+
+ /* cmd issue mode: 0 is poll, 1 is event */
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
+
+ /* read the interrupt names from the DT or ACPI */
+ ret = device_property_read_string_array(dev, "interrupt-names",
+ hr_dev->irq_names,
+ HNS_ROCE_MAX_IRQ_NUM);
+ if (ret < 0) {
+ dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
+ return ret;
+ }
+
+ /* fetch the interrupt numbers */
+ for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
+ if (hr_dev->irq[i] <= 0) {
+ dev_err(dev, "platform get of irq[=%d] failed!\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hns_roce_probe - RoCE driver entrance
+ * @pdev: pointer to platform device
+ * Return : int
+ *
+ */
+static int hns_roce_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct hns_roce_dev *hr_dev;
+ struct device *dev = &pdev->dev;
+
+ hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ if (!hr_dev)
+ return -ENOMEM;
+
+ hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
+ if (!hr_dev->priv) {
+ ret = -ENOMEM;
+ goto error_failed_kzalloc;
+ }
+
+ hr_dev->pdev = pdev;
+ hr_dev->dev = dev;
+ platform_set_drvdata(pdev, hr_dev);
+
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
+ dev_err(dev, "Not usable DMA addressing mode\n");
+ ret = -EIO;
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_get_cfg(hr_dev);
+ if (ret) {
+ dev_err(dev, "Get Configuration failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "RoCE engine init failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ return 0;
+
+error_failed_get_cfg:
+ kfree(hr_dev->priv);
+
+error_failed_kzalloc:
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return ret;
+}
+
+/**
+ * hns_roce_remove - remove RoCE device
+ * @pdev: pointer to platform device
+ */
+static int hns_roce_remove(struct platform_device *pdev)
+{
+ struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
+
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return 0;
+}
+
+static struct platform_driver hns_roce_driver = {
+ .probe = hns_roce_probe,
+ .remove = hns_roce_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hns_roce_of_match,
+ .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
+ },
+};
+
+module_platform_driver(hns_roce_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index b213b5e6fef1..21a07ef0afc9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -72,6 +72,8 @@
#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
+#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17)
+
#define HNS_ROCE_V1_EXT_RAQ_WF 8
#define HNS_ROCE_V1_RAQ_ENTRY 64
#define HNS_ROCE_V1_RAQ_DEPTH 32768
@@ -948,6 +950,11 @@ struct hns_roce_qp_context {
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
+#define STATUS_MASK 0xff
+#define GO_BIT_TIMEOUT_MSECS 10000
+#define HCR_STATUS_OFFSET 0x18
+#define HCR_GO_BIT 15
+
struct hns_roce_rq_db {
u32 u32_4;
u32 u32_8;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
new file mode 100644
index 000000000000..8f719c00467b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -0,0 +1,3296 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <rdma/ib_umem.h>
+
+#include "hnae3.h"
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_hw_v2.h"
+
+static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
+ struct ib_sge *sg)
+{
+ dseg->lkey = cpu_to_le32(sg->lkey);
+ dseg->addr = cpu_to_le64(sg->addr);
+ dseg->len = cpu_to_le32(sg->length);
+}
+
+static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_v2_db sq_db;
+ unsigned int sge_ind = 0;
+ unsigned int wqe_sz = 0;
+ unsigned int owner_bit;
+ unsigned long flags;
+ unsigned int ind;
+ void *wqe = NULL;
+ int ret = 0;
+ int nreq;
+ int i;
+
+ if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+ dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+ *bad_wr = NULL;
+ return -EOPNOTSUPP;
+ }
+
+ if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+ dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+ ind = qp->sq_next_wqe;
+ sge_ind = qp->next_sge;
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+ dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, qp->sq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
+ qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
+ wr->wr_id;
+
+ owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
+ rc_sq_wqe = wqe;
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+ for (i = 0; i < wr->num_sge; i++)
+ rc_sq_wqe->msg_len += wr->sg_list[i].length;
+
+ rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
+ break;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
+ break;
+ default:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_MASK);
+ break;
+ }
+
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ dseg = wqe;
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (rc_sq_wqe->msg_len >
+ hr_dev->caps.max_sq_inline) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ dev_err(dev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len,
+ hr_dev->caps.max_sq_inline);
+ goto out;
+ }
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe += wr->sg_list[i].length;
+ wqe_sz += wr->sg_list[i].length;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+ } else {
+ if (wr->num_sge <= 2) {
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_seg_v2(dseg + i,
+ wr->sg_list + i);
+ } else {
+ roce_set_field(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < 2; i++)
+ set_data_seg_v2(dseg + i,
+ wr->sg_list + i);
+
+ dseg = get_send_extend_sge(qp,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < wr->num_sge - 2; i++) {
+ set_data_seg_v2(dseg + i,
+ wr->sg_list + 2 + i);
+ sge_ind++;
+ }
+ }
+
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+ wqe_sz += wr->num_sge *
+ sizeof(struct hns_roce_v2_wqe_data_seg);
+ }
+ ind++;
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ sq_db.byte_4 = 0;
+ sq_db.parameter = 0;
+
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+ V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+ V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
+ V2_DB_PARAMETER_CONS_IDX_S,
+ qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+ V2_DB_PARAMETER_SL_S, qp->sl);
+
+ hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l);
+
+ qp->sq_next_wqe = ind;
+ qp->next_sge = sge_ind;
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return ret;
+}
+
+static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_v2_db rq_db;
+ unsigned long flags;
+ void *wqe = NULL;
+ int ret = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ spin_lock_irqsave(&hr_qp->rq.lock, flags);
+ ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
+
+ if (hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_ERR) {
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, hr_qp->rq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe = get_recv_wqe(hr_qp, ind);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+ for (i = 0; i < wr->num_sge; i++) {
+ if (!wr->sg_list[i].length)
+ continue;
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+
+ if (i < hr_qp->rq.max_gs) {
+ dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].addr = 0;
+ }
+
+ hr_qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ hr_qp->rq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ rq_db.byte_4 = 0;
+ rq_db.parameter = 0;
+
+ roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+ V2_DB_BYTE_4_TAG_S, hr_qp->qpn);
+ roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+ V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
+ roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
+ V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
+
+ hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l);
+ }
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+
+ return ret;
+}
+
+static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *ring)
+{
+ dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
+ ring->desc_num * sizeof(struct hns_roce_cmq_desc),
+ DMA_BIDIRECTIONAL);
+ kfree(ring->desc);
+}
+
+static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
+ &priv->cmq.csq : &priv->cmq.crq;
+
+ ring->flag = ring_type;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ return hns_roce_alloc_cmq_desc(hr_dev, ring);
+}
+
+static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
+ &priv->cmq.csq : &priv->cmq.crq;
+ dma_addr_t dma = ring->desc_dma_addr;
+
+ if (ring_type == TYPE_CSQ) {
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
+ (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
+ HNS_ROCE_CMQ_ENABLE);
+ roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
+ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
+ } else {
+ roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
+ roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
+ (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
+ HNS_ROCE_CMQ_ENABLE);
+ roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
+ roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
+ }
+}
+
+static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ int ret;
+
+ /* Setup the queue entries for command queue */
+ priv->cmq.csq.desc_num = 1024;
+ priv->cmq.crq.desc_num = 1024;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&priv->cmq.csq.lock);
+ spin_lock_init(&priv->cmq.crq.lock);
+
+ /* Setup Tx write back timeout */
+ priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
+
+ /* Init CSQ */
+ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
+ if (ret) {
+ dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Init CRQ */
+ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
+ if (ret) {
+ dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
+ goto err_crq;
+ }
+
+ /* Init CSQ REG */
+ hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
+
+ /* Init CRQ REG */
+ hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
+
+ return 0;
+
+err_crq:
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+
+ return ret;
+}
+
+static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
+}
+
+static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
+ enum hns_roce_opcode_type opcode,
+ bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag =
+ cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
+}
+
+static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+
+ return head == priv->cmq.csq.next_to_use;
+}
+
+static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ struct hns_roce_cmq_desc *desc;
+ u16 ntc = csq->next_to_clean;
+ u32 head;
+ int clean = 0;
+
+ desc = &csq->desc[ntc];
+ head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+ while (head != ntc) {
+ memset(desc, 0, sizeof(*desc));
+ ntc++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ desc = &csq->desc[ntc];
+ clean++;
+ }
+ csq->next_to_clean = ntc;
+
+ return clean;
+}
+
+static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ struct hns_roce_cmq_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ u16 desc_ret;
+ int ret = 0;
+ int ntc;
+
+ spin_lock_bh(&csq->lock);
+
+ if (num > hns_roce_cmq_space(csq)) {
+ spin_unlock_bh(&csq->lock);
+ return -EBUSY;
+ }
+
+ /*
+ * Record the location of desc in the cmq for this time
+ * which will be use for hardware to write back
+ */
+ ntc = csq->next_to_use;
+
+ while (handle < num) {
+ desc_to_use = &csq->desc[csq->next_to_use];
+ *desc_to_use = desc[handle];
+ dev_dbg(hr_dev->dev, "set cmq desc:\n");
+ csq->next_to_use++;
+ if (csq->next_to_use == csq->desc_num)
+ csq->next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
+
+ /*
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
+ do {
+ if (hns_roce_cmq_csq_done(hr_dev))
+ break;
+ udelay(1);
+ timeout++;
+ } while (timeout < priv->cmq.tx_timeout);
+ }
+
+ if (hns_roce_cmq_csq_done(hr_dev)) {
+ complete = true;
+ handle = 0;
+ while (handle < num) {
+ /* get the result of hardware write back */
+ desc_to_use = &csq->desc[ntc];
+ desc[handle] = *desc_to_use;
+ dev_dbg(hr_dev->dev, "Get cmq desc:\n");
+ desc_ret = desc[handle].retval;
+ if (desc_ret == CMD_EXEC_SUCCESS)
+ ret = 0;
+ else
+ ret = -EIO;
+ priv->cmq.last_status = desc_ret;
+ ntc++;
+ handle++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ }
+ }
+
+ if (!complete)
+ ret = -EAGAIN;
+
+ /* clean the command send queue */
+ handle = hns_roce_cmq_csq_clean(hr_dev);
+ if (handle != num)
+ dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
+ handle, num);
+
+ spin_unlock_bh(&csq->lock);
+
+ return ret;
+}
+
+static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_query_version *resp;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ resp = (struct hns_roce_query_version *)desc.data;
+ hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
+ hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
+
+ return 0;
+}
+
+static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cfg_global_param *req;
+ struct hns_roce_cmq_desc desc;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
+ false);
+
+ req = (struct hns_roce_cfg_global_param *)desc.data;
+ memset(req, 0, sizeof(*req));
+ roce_set_field(req->time_cfg_udp_port,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
+ roce_set_field(req->time_cfg_udp_port,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_pf_res *res;
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_QUERY_PF_RES, true);
+
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ ret = hns_roce_cmq_send(hr_dev, desc, 2);
+ if (ret)
+ return ret;
+
+ res = (struct hns_roce_pf_res *)desc[0].data;
+
+ hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
+ PF_RES_DATA_1_PF_QPC_BT_NUM_M,
+ PF_RES_DATA_1_PF_QPC_BT_NUM_S);
+ hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
+ PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
+ PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
+ hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
+ PF_RES_DATA_3_PF_CQC_BT_NUM_M,
+ PF_RES_DATA_3_PF_CQC_BT_NUM_S);
+ hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
+ PF_RES_DATA_4_PF_MPT_BT_NUM_M,
+ PF_RES_DATA_4_PF_MPT_BT_NUM_S);
+
+ return 0;
+}
+
+static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_vf_res_a *req_a;
+ struct hns_roce_vf_res_b *req_b;
+ int i;
+
+ req_a = (struct hns_roce_vf_res_a *)desc[0].data;
+ req_b = (struct hns_roce_vf_res_b *)desc[1].data;
+ memset(req_a, 0, sizeof(*req_a));
+ memset(req_b, 0, sizeof(*req_b));
+ for (i = 0; i < 2; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_ALLOC_VF_RES, false);
+
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ if (i == 0) {
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
+ HNS_ROCE_VF_QPC_BT_NUM);
+
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
+ HNS_ROCE_VF_SRQC_BT_NUM);
+
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
+ HNS_ROCE_VF_CQC_BT_NUM);
+
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
+ HNS_ROCE_VF_MPT_BT_NUM);
+
+ roce_set_field(req_a->vf_eqc_bt_idx_num,
+ VF_RES_A_DATA_5_VF_EQC_IDX_M,
+ VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
+ roce_set_field(req_a->vf_eqc_bt_idx_num,
+ VF_RES_A_DATA_5_VF_EQC_NUM_M,
+ VF_RES_A_DATA_5_VF_EQC_NUM_S,
+ HNS_ROCE_VF_EQC_NUM);
+ } else {
+ roce_set_field(req_b->vf_smac_idx_num,
+ VF_RES_B_DATA_1_VF_SMAC_IDX_M,
+ VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
+ roce_set_field(req_b->vf_smac_idx_num,
+ VF_RES_B_DATA_1_VF_SMAC_NUM_M,
+ VF_RES_B_DATA_1_VF_SMAC_NUM_S,
+ HNS_ROCE_VF_SMAC_NUM);
+
+ roce_set_field(req_b->vf_sgid_idx_num,
+ VF_RES_B_DATA_2_VF_SGID_IDX_M,
+ VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
+ roce_set_field(req_b->vf_sgid_idx_num,
+ VF_RES_B_DATA_2_VF_SGID_NUM_M,
+ VF_RES_B_DATA_2_VF_SGID_NUM_S,
+ HNS_ROCE_VF_SGID_NUM);
+
+ roce_set_field(req_b->vf_qid_idx_sl_num,
+ VF_RES_B_DATA_3_VF_QID_IDX_M,
+ VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
+ roce_set_field(req_b->vf_qid_idx_sl_num,
+ VF_RES_B_DATA_3_VF_SL_NUM_M,
+ VF_RES_B_DATA_3_VF_SL_NUM_S,
+ HNS_ROCE_VF_SL_NUM);
+ }
+ }
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
+{
+ u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
+ u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
+ u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
+ u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
+ struct hns_roce_cfg_bt_attr *req;
+ struct hns_roce_cmq_desc desc;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
+ req = (struct hns_roce_cfg_bt_attr *)desc.data;
+ memset(req, 0, sizeof(*req));
+
+ roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
+ hr_dev->caps.qpc_ba_pg_sz);
+ roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
+ hr_dev->caps.qpc_buf_pg_sz);
+ roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
+ CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
+ qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
+
+ roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
+ hr_dev->caps.srqc_ba_pg_sz);
+ roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
+ hr_dev->caps.srqc_buf_pg_sz);
+ roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
+ CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
+ srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
+
+ roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
+ hr_dev->caps.cqc_ba_pg_sz);
+ roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
+ hr_dev->caps.cqc_buf_pg_sz);
+ roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
+ CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
+ cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
+
+ roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
+ hr_dev->caps.mpt_ba_pg_sz);
+ roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
+ hr_dev->caps.mpt_buf_pg_sz);
+ roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
+ CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
+ mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ int ret;
+
+ ret = hns_roce_cmq_query_hw_info(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_config_global_param(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
+ ret);
+ }
+
+ /* Get pf resource owned by every pf */
+ ret = hns_roce_query_pf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hr_dev->vendor_part_id = 0;
+ hr_dev->sys_image_guid = 0;
+
+ caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
+ caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
+ caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
+ caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
+ caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
+ caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
+ caps->num_uars = HNS_ROCE_V2_UAR_NUM;
+ caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
+ caps->num_aeq_vectors = 1;
+ caps->num_comp_vectors = 63;
+ caps->num_other_vectors = 0;
+ caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
+ caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+ caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+ caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
+ caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
+ caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
+ caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
+ caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
+ caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
+ caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
+ caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
+ caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
+ caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
+ caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
+ caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
+ caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
+ caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
+ caps->reserved_lkey = 0;
+ caps->reserved_pds = 0;
+ caps->reserved_mrws = 1;
+ caps->reserved_uars = 0;
+ caps->reserved_cqs = 0;
+
+ caps->qpc_ba_pg_sz = 0;
+ caps->qpc_buf_pg_sz = 0;
+ caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->srqc_ba_pg_sz = 0;
+ caps->srqc_buf_pg_sz = 0;
+ caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->cqc_ba_pg_sz = 0;
+ caps->cqc_buf_pg_sz = 0;
+ caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->mpt_ba_pg_sz = 0;
+ caps->mpt_buf_pg_sz = 0;
+ caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->pbl_ba_pg_sz = 0;
+ caps->pbl_buf_pg_sz = 0;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->mtt_ba_pg_sz = 0;
+ caps->mtt_buf_pg_sz = 0;
+ caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
+ caps->cqe_ba_pg_sz = 0;
+ caps->cqe_buf_pg_sz = 0;
+ caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
+ caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
+
+ caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+ caps->pkey_table_len[0] = 1;
+ caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->local_ca_ack_delay = 0;
+ caps->max_mtu = IB_MTU_4096;
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret)
+ dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
+
+ return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
+}
+
+static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
+
+ return status & HNS_ROCE_HW_MB_STATUS_MASK;
+}
+
+static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier,
+ u16 op, u16 token, int event)
+{
+ struct device *dev = hr_dev->dev;
+ u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
+ ROCEE_VF_MB_CFG0_REG);
+ unsigned long end;
+ u32 val0 = 0;
+ u32 val1 = 0;
+
+ end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
+ while (hns_roce_v2_cmd_pending(hr_dev)) {
+ if (time_after(jiffies, end)) {
+ dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
+ (int)end);
+ return -EAGAIN;
+ }
+ cond_resched();
+ }
+
+ roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
+ HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
+ roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
+ HNS_ROCE_VF_MB4_CMD_SHIFT, op);
+ roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
+ HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
+ roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
+ HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
+
+ __raw_writeq(cpu_to_le64(in_param), hcr + 0);
+ __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+
+ /* Memory barrier */
+ wmb();
+
+ __raw_writel(cpu_to_le32(val0), hcr + 4);
+ __raw_writel(cpu_to_le32(val1), hcr + 5);
+
+ mmiowb();
+
+ return 0;
+}
+
+static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
+ unsigned long timeout)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long end = 0;
+ u32 status;
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (hns_roce_v2_cmd_pending(hr_dev)) {
+ dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+ return -ETIMEDOUT;
+ }
+
+ status = hns_roce_v2_cmd_complete(hr_dev);
+ if (status != 0x1) {
+ dev_err(dev, "mailbox status 0x%x!\n", status);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+ int gid_index, union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+{
+ enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
+ u32 *p;
+ u32 val;
+
+ if (!gid || !attr)
+ return -EINVAL;
+
+ if (attr->gid_type == IB_GID_TYPE_ROCE)
+ sgid_type = GID_TYPE_FLAG_ROCE_V1;
+
+ if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ if (ipv6_addr_v4mapped((void *)gid))
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
+ else
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
+ }
+
+ p = (u32 *)&gid->raw[0];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
+ 0x20 * gid_index);
+
+ p = (u32 *)&gid->raw[4];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
+ 0x20 * gid_index);
+
+ p = (u32 *)&gid->raw[8];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
+ 0x20 * gid_index);
+
+ p = (u32 *)&gid->raw[0xc];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
+ 0x20 * gid_index);
+
+ val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
+ roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
+ ROCEE_VF_SGID_CFG4_SGID_TYPE_S, sgid_type);
+
+ roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
+
+ return 0;
+}
+
+static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+ u8 *addr)
+{
+ u16 reg_smac_h;
+ u32 reg_smac_l;
+ u32 val;
+
+ reg_smac_l = *(u32 *)(&addr[0]);
+ roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
+ 0x08 * phy_port);
+ val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
+
+ reg_smac_h = *(u16 *)(&addr[4]);
+ roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
+ ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
+ roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
+
+ return 0;
+}
+
+static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+ struct scatterlist *sg;
+ u64 *pages;
+ int entry;
+ int i;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
+ HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+ mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
+ (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
+ (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
+ (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
+ (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+ mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
+ mr->type == MR_TYPE_MR ? 0 : 1);
+ mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
+
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
+ mpt_entry->lkey = cpu_to_le32(mr->key);
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+
+ if (mr->type == MR_TYPE_DMA)
+ return 0;
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
+ V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+ mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
+
+ pages = (u64 *)__get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+ pages[i] = ((u64)sg_dma_address(sg)) >> 6;
+
+ /* Record the first 2 entry directly to MTPT table */
+ if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+ break;
+ i++;
+ }
+
+ mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+ roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
+ V2_MPT_BYTE_56_PA0_H_S,
+ upper_32_bits(pages[0]));
+ mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
+
+ mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+ roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
+ V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+
+ free_page((unsigned long)pages);
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
+ mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
+
+ return 0;
+}
+
+static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int flags,
+ u32 pdn, int mr_access_flags, u64 iova,
+ u64 size, void *mb_buf)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+
+ if (flags & IB_MR_REREG_PD) {
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, pdn);
+ mr->pd = pdn;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
+ V2_MPT_BYTE_8_BIND_EN_S,
+ (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
+ V2_MPT_BYTE_8_ATOMIC_EN_S,
+ (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
+ (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
+ (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
+ (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+ mpt_entry->pbl_ba_l =
+ cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba,
+ V2_MPT_BYTE_48_PBL_BA_H_M,
+ V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+ mpt_entry->byte_48_mode_ba =
+ cpu_to_le32(mpt_entry->byte_48_mode_ba);
+
+ mr->iova = iova;
+ mr->size = size;
+ }
+
+ return 0;
+}
+
+static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+{
+ return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
+ n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+}
+
+static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+{
+ struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
+
+ /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
+ return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
+ !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+}
+
+static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
+{
+ return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
+}
+
+static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
+{
+ struct hns_roce_v2_cq_db cq_db;
+
+ cq_db.byte_4 = 0;
+ cq_db.parameter = 0;
+
+ roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
+ V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
+ roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
+ V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
+
+ roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
+ V2_CQ_DB_PARAMETER_CONS_IDX_S,
+ cons_index & ((hr_cq->cq_depth << 1) - 1));
+ roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
+ V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
+
+ hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
+
+}
+
+static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ struct hns_roce_v2_cqe *cqe, *dest;
+ u32 prod_index;
+ int nfreed = 0;
+ u8 owner_bit;
+
+ for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
+ ++prod_index) {
+ if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
+ break;
+ }
+
+ /*
+ * Now backwards through the CQ, removing CQ entries
+ * that match our QP by overwriting them with next entries.
+ */
+ while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
+ cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
+ if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+ V2_CQE_BYTE_16_LCL_QPN_S) &
+ HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
+ /* In v1 engine, not support SRQ */
+ ++nfreed;
+ } else if (nfreed) {
+ dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
+ hr_cq->ib_cq.cqe);
+ owner_bit = roce_get_bit(dest->byte_4,
+ V2_CQE_BYTE_4_OWNER_S);
+ memcpy(dest, cqe, sizeof(*cqe));
+ roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
+ owner_bit);
+ }
+ }
+
+ if (nfreed) {
+ hr_cq->cons_index += nfreed;
+ /*
+ * Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
+ wmb();
+ hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ }
+}
+
+static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ spin_lock_irq(&hr_cq->lock);
+ __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
+ spin_unlock_irq(&hr_cq->lock);
+}
+
+static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf,
+ u64 *mtts, dma_addr_t dma_handle, int nent,
+ u32 vector)
+{
+ struct hns_roce_v2_cq_context *cq_context;
+
+ cq_context = mb_buf;
+ memset(cq_context, 0, sizeof(*cq_context));
+
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
+ V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
+ V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
+ V2_CQC_BYTE_4_CEQN_S, vector);
+ cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
+
+ roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
+ V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
+
+ cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_cur_blk_addr =
+ cpu_to_le32(cq_context->cqe_cur_blk_addr);
+
+ roce_set_field(cq_context->byte_16_hop_addr,
+ V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
+ V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
+ cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
+ roce_set_field(cq_context->byte_16_hop_addr,
+ V2_CQC_BYTE_16_CQE_HOP_NUM_M,
+ V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
+ HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
+
+ cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
+ roce_set_field(cq_context->byte_24_pgsz_addr,
+ V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
+ V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
+ cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
+ roce_set_field(cq_context->byte_24_pgsz_addr,
+ V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
+ V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
+ hr_dev->caps.cqe_ba_pg_sz);
+ roce_set_field(cq_context->byte_24_pgsz_addr,
+ V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
+ V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
+ hr_dev->caps.cqe_buf_pg_sz);
+
+ cq_context->cqe_ba = (u32)(dma_handle >> 3);
+
+ roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
+ V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+}
+
+static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ u32 notification_flag;
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+ doorbell[1] = 0;
+
+ notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
+ /*
+ * flags = 0; Notification Flag = 1, next
+ * flags = 1; Notification Flag = 0, solocited
+ */
+ roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
+ hr_cq->cqn);
+ roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
+ HNS_ROCE_V2_CQ_DB_NTR);
+ roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
+ V2_CQ_DB_PARAMETER_CONS_IDX_S,
+ hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
+ roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
+ V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
+ roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
+ notification_flag);
+
+ hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+
+ return 0;
+}
+
+static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
+ struct hns_roce_qp **cur_qp, struct ib_wc *wc)
+{
+ struct hns_roce_dev *hr_dev;
+ struct hns_roce_v2_cqe *cqe;
+ struct hns_roce_qp *hr_qp;
+ struct hns_roce_wq *wq;
+ int is_send;
+ u16 wqe_ctr;
+ u32 opcode;
+ u32 status;
+ int qpn;
+
+ /* Find cqe according to consumer index */
+ cqe = next_cqe_sw_v2(hr_cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ ++hr_cq->cons_index;
+ /* Memory barrier */
+ rmb();
+
+ /* 0->SQ, 1->RQ */
+ is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
+
+ qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+ V2_CQE_BYTE_16_LCL_QPN_S);
+
+ if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
+ hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (unlikely(!hr_qp)) {
+ dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
+ return -EINVAL;
+ }
+ *cur_qp = hr_qp;
+ }
+
+ wc->qp = &(*cur_qp)->ibqp;
+ wc->vendor_err = 0;
+
+ status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
+ V2_CQE_BYTE_4_STATUS_S);
+ switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
+ case HNS_ROCE_CQE_V2_SUCCESS:
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
+ wc->status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_MW_BIND_ERR:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
+ wc->status = IB_WC_BAD_RESP_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
+ wc->status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
+ wc->status = IB_WC_REM_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
+ wc->status = IB_WC_REM_ABORT_ERR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* CQE status error, directly return */
+ if (wc->status != IB_WC_SUCCESS)
+ return 0;
+
+ if (is_send) {
+ wc->wc_flags = 0;
+ /* SQ corresponding to CQE */
+ switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
+ case HNS_ROCE_SQ_OPCODE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case HNS_ROCE_SQ_OPCODE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ break;
+ case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
+ wc->opcode = IB_WC_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
+ wc->opcode = IB_WC_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
+ wc->opcode = IB_WC_MASKED_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
+ wc->opcode = IB_WC_MASKED_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
+ wc->opcode = IB_WC_REG_MR;
+ break;
+ case HNS_ROCE_SQ_OPCODE_BIND_MW:
+ wc->opcode = IB_WC_REG_MR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ wq = &(*cur_qp)->sq;
+ if ((*cur_qp)->sq_signal_bits) {
+ /*
+ * If sg_signal_bit is 1,
+ * firstly tail pointer updated to wqe
+ * which current cqe correspond to
+ */
+ wqe_ctr = (u16)roce_get_field(cqe->byte_4,
+ V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S);
+ wq->tail += (wqe_ctr - (u16)wq->tail) &
+ (wq->wqe_cnt - 1);
+ }
+
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ } else {
+ /* RQ correspond to CQE */
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+
+ opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S);
+ switch (opcode & 0x1f) {
+ case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* Update tail pointer, record wr_id */
+ wq = &(*cur_qp)->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+
+ wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
+ V2_CQE_BYTE_32_SL_S);
+ wc->src_qp = (u8)roce_get_field(cqe->byte_32,
+ V2_CQE_BYTE_32_RMT_QPN_M,
+ V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->wc_flags |= (roce_get_bit(cqe->byte_32,
+ V2_CQE_BYTE_32_GRH_S) ?
+ IB_WC_GRH : 0);
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct hns_roce_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+
+ spin_lock_irqsave(&hr_cq->lock, flags);
+
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
+ break;
+ }
+
+ if (npolled) {
+ /* Memory barrier */
+ wmb();
+ hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ }
+
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
+
+ return npolled;
+}
+
+static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct hns_roce_hem_iter iter;
+ struct hns_roce_hem_mhop mhop;
+ struct hns_roce_hem *hem;
+ unsigned long mhop_obj = obj;
+ int i, j, k;
+ int ret = 0;
+ u64 hem_idx = 0;
+ u64 l1_idx = 0;
+ u64 bt_ba = 0;
+ u32 chunk_ba_num;
+ u32 hop_num;
+ u16 op = 0xff;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+ return 0;
+
+ hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ i = mhop.l0_idx;
+ j = mhop.l1_idx;
+ k = mhop.l2_idx;
+ hop_num = mhop.hop_num;
+ chunk_ba_num = mhop.bt_chunk_size / 8;
+
+ if (hop_num == 2) {
+ hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
+ k;
+ l1_idx = i * chunk_ba_num + j;
+ } else if (hop_num == 1) {
+ hem_idx = i * chunk_ba_num + j;
+ } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
+ hem_idx = i;
+ }
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ op = HNS_ROCE_CMD_WRITE_QPC_BT0;
+ break;
+ case HEM_TYPE_MTPT:
+ op = HNS_ROCE_CMD_WRITE_MPT_BT0;
+ break;
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_WRITE_CQC_BT0;
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+ return 0;
+ }
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (check_whether_last_step(hop_num, step_idx)) {
+ hem = table->hem[hem_idx];
+ for (hns_roce_hem_first(hem, &iter);
+ !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
+ bt_ba = hns_roce_hem_addr(&iter);
+
+ /* configure the ba, tag, and op */
+ ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
+ obj, 0, op,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ }
+ } else {
+ if (step_idx == 0)
+ bt_ba = table->bt_l0_dma_addr[i];
+ else if (step_idx == 1 && hop_num == 2)
+ bt_ba = table->bt_l1_dma_addr[l1_idx];
+
+ /* configure the ba, tag, and op */
+ ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
+ 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ }
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret = 0;
+ u16 op = 0xff;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+ return 0;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
+ break;
+ case HEM_TYPE_MTPT:
+ op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
+ break;
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ break;
+ default:
+ dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
+ table->type);
+ return 0;
+ }
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ /* configure the tag and op */
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, context, sizeof(*context) * 2);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
+ HNS_ROCE_CMD_MODIFY_QPC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, 0);
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
+ V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
+ V2_QPC_BYTE_20_RQWS_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+
+ /* No VLAN need to set 0xFFF */
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
+ V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
+ V2_QPC_BYTE_24_VLAN_IDX_S, 0);
+
+ /*
+ * Set some fields in context to zero, Because the default values
+ * of all fields in context are zero, we need not set them to 0 again.
+ * but we should set the relevant fields of context mask to 0.
+ */
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
+
+ roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
+ V2_QPC_BYTE_60_MAPID_S, 0);
+
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
+
+ roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, 0);
+ if (ibqp->srq) {
+ roce_set_field(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
+ to_hr_srq(ibqp->srq)->srqn);
+ roce_set_field(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 0);
+ }
+
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
+ V2_QPC_BYTE_92_SRQ_INFO_S, 0);
+
+ roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
+ V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_104_rq_sge,
+ V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
+ V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
+
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
+
+ qpc_mask->rq_rnr_timer = 0;
+ qpc_mask->rx_msg_len = 0;
+ qpc_mask->rx_rkey_pkt_info = 0;
+ qpc_mask->rx_va = 0;
+
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+ V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+ V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
+
+ roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
+ V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
+ V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
+
+ roce_set_field(qpc_mask->byte_144_raq,
+ V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
+ V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
+ roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
+ 0);
+ roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
+ V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
+ roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
+
+ roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
+ V2_QPC_BYTE_148_RQ_MSN_S, 0);
+ roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
+ V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
+
+ roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+ roce_set_field(qpc_mask->byte_152_raq,
+ V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
+ V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
+
+ roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
+ V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
+
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
+
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
+
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
+ V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
+
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
+ roce_set_field(qpc_mask->byte_172_sq_psn,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
+
+ roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
+ 0);
+
+ roce_set_field(qpc_mask->byte_176_msg_pktn,
+ V2_QPC_BYTE_176_MSG_USE_PKTN_M,
+ V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
+ roce_set_field(qpc_mask->byte_176_msg_pktn,
+ V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
+ V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
+
+ roce_set_field(qpc_mask->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
+ V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
+
+ qpc_mask->cur_sge_offset = 0;
+
+ roce_set_field(qpc_mask->byte_192_ext_sge,
+ V2_QPC_BYTE_192_CUR_SGE_IDX_M,
+ V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_192_ext_sge,
+ V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
+ V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
+
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+
+ roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
+ V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_200_sq_max,
+ V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
+ V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
+
+ roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
+ roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
+
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+ V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+ qpc_mask->sq_timer = 0;
+
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+
+ qpc_mask->irrl_cur_sge_offset = 0;
+
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_RX_ACK_MSN_M,
+ V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
+ V2_QPC_BYTE_248_IRRL_PSN_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
+ 0);
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
+ 0);
+
+ hr_qp->access_flags = attr->qp_access_flags;
+ hr_qp->pkey_index = attr->pkey_index;
+ roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, 0);
+
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
+ V2_QPC_BYTE_252_ERR_TYPE_S, 0);
+
+ roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
+ V2_QPC_BYTE_256_RQ_CQE_IDX_M,
+ V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
+ V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
+ V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
+}
+
+static void modify_qp_init_to_init(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, 0);
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ 0);
+ } else {
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ 0);
+ }
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, 0);
+
+ roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, 0);
+
+ roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, 0);
+
+ if (ibqp->srq) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 0);
+ roce_set_field(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
+ to_hr_srq(ibqp->srq)->srqn);
+ roce_set_field(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ context->qkey_xrcd = attr->pkey_index;
+ else
+ context->qkey_xrcd = hr_qp->pkey_index;
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, 0);
+
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, 0);
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
+}
+
+static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = hr_dev->dev;
+ dma_addr_t dma_handle_3;
+ dma_addr_t dma_handle_2;
+ dma_addr_t dma_handle;
+ u32 page_size;
+ u8 port_num;
+ u64 *mtts_3;
+ u64 *mtts_2;
+ u64 *mtts;
+ u8 *dmac;
+ u8 *smac;
+ int port;
+
+ /* Search qp buf's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "qp buf pa find failed\n");
+ return -EINVAL;
+ }
+
+ /* Search IRRL's mtts */
+ mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &dma_handle_2);
+ if (!mtts_2) {
+ dev_err(dev, "qp irrl_table find failed\n");
+ return -EINVAL;
+ }
+
+ /* Search TRRL's mtts */
+ mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
+ hr_qp->qpn, &dma_handle_3);
+ if (!mtts_3) {
+ dev_err(dev, "qp trrl_table find failed\n");
+ return -EINVAL;
+ }
+
+ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
+ dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
+ return -EINVAL;
+ }
+
+ dmac = (u8 *)attr->ah_attr.roce.dmac;
+ context->wqe_sge_ba = (u32)(dma_handle >> 3);
+ qpc_mask->wqe_sge_ba = 0;
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
+ V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
+ roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
+ V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
+
+ roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
+ V2_QPC_BYTE_12_SQ_HOP_NUM_S,
+ hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
+ 0 : hr_dev->caps.mtt_hop_num);
+ roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
+ V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_M,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_S,
+ hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_M,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_M,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_S,
+ hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
+ 0 : hr_dev->caps.mtt_hop_num);
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_M,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
+ hr_dev->caps.mtt_ba_pg_sz);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
+ hr_dev->caps.mtt_buf_pg_sz);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
+
+ roce_set_field(context->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
+ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
+ >> PAGE_ADDR_SHIFT);
+ qpc_mask->rq_cur_blk_addr = 0;
+
+ roce_set_field(context->byte_92_srq_info,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
+ mtts[hr_qp->rq.offset / page_size]
+ >> (32 + PAGE_ADDR_SHIFT));
+ roce_set_field(qpc_mask->byte_92_srq_info,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
+
+ context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
+ >> PAGE_ADDR_SHIFT);
+ qpc_mask->rq_nxt_blk_addr = 0;
+
+ roce_set_field(context->byte_104_rq_sge,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
+ mtts[hr_qp->rq.offset / page_size + 1]
+ >> (32 + PAGE_ADDR_SHIFT));
+ roce_set_field(qpc_mask->byte_104_rq_sge,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+
+ roce_set_field(context->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
+ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+
+ roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+ V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+ V2_QPC_BYTE_132_TRRL_BA_S, 0);
+ context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
+ qpc_mask->trrl_ba = 0;
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
+ V2_QPC_BYTE_140_TRRL_BA_S,
+ (u32)(dma_handle_3 >> (32 + 16 + 4)));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
+ V2_QPC_BYTE_140_TRRL_BA_S, 0);
+
+ context->irrl_ba = (u32)(dma_handle_2 >> 6);
+ qpc_mask->irrl_ba = 0;
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
+ V2_QPC_BYTE_208_IRRL_BA_S,
+ dma_handle_2 >> (32 + 6));
+ roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
+ V2_QPC_BYTE_208_IRRL_BA_S, 0);
+
+ roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
+ roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
+
+ roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
+ hr_qp->sq_signal_bits);
+ roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
+ 0);
+
+ port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
+
+ smac = (u8 *)hr_dev->dev_addr[port];
+ /* when dmac equals smac or loop_idc is 1, it should loopback */
+ if (ether_addr_equal_unaligned(dmac, smac) ||
+ hr_dev->loop_idc == 0x1) {
+ roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+ }
+
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ ilog2((unsigned int)attr->max_dest_rd_atomic));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+ roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, 0);
+
+ /* Configure GID index */
+ port_num = rdma_ah_get_port_num(&attr->ah_attr);
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S,
+ hns_get_gid_index(hr_dev, port_num - 1,
+ grh->sgid_index));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S, 0);
+ memcpy(&(context->dmac), dmac, 4);
+ roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+ V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
+ qpc_mask->dmac = 0;
+ roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+ V2_QPC_BYTE_52_DMAC_S, 0);
+
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
+ roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, grh->flow_label);
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, 0);
+
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+ roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
+ V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
+ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
+
+ context->rq_rnr_timer = 0;
+ qpc_mask->rq_rnr_timer = 0;
+
+ roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
+ roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+ V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+ V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
+
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_LP_SGEN_INI_M,
+ V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_LP_SGEN_INI_M,
+ V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
+
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ ilog2((unsigned int)attr->max_rd_atomic));
+ roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, 0);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ return 0;
+}
+
+static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = hr_dev->dev;
+ dma_addr_t dma_handle;
+ u32 page_size;
+ u64 *mtts;
+
+ /* Search qp buf's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "qp buf pa find failed\n");
+ return -EINVAL;
+ }
+
+ /* If exist optional param, return error */
+ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
+ (attr_mask & IB_QP_CUR_STATE) ||
+ (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+ dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
+ return -EINVAL;
+ }
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
+
+ context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
+ mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ qpc_mask->sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
+
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ context->sq_cur_sge_blk_addr = hr_qp->sq.max_gs > 2 ?
+ ((u32)(mtts[hr_qp->sge.offset / page_size]
+ >> PAGE_ADDR_SHIFT)) : 0;
+ roce_set_field(context->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
+ hr_qp->sq.max_gs > 2 ?
+ (mtts[hr_qp->sge.offset / page_size] >>
+ (32 + PAGE_ADDR_SHIFT)) : 0);
+ qpc_mask->sq_cur_sge_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
+
+ context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+ roce_set_field(context->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
+ mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ qpc_mask->rx_sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+
+ /*
+ * Set some fields in context to zero, Because the default values
+ * of all fields in context are zero, we need not set them to 0 again.
+ * but we should set the relevant fields of context mask to 0.
+ */
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_RX_ACK_MSN_M,
+ V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_IRRL_PSN_M,
+ V2_QPC_BYTE_248_IRRL_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+
+ roce_set_field(context->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
+
+ roce_set_field(context->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
+ roce_set_field(qpc_mask->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
+
+ roce_set_field(context->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+
+ roce_set_bit(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
+
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+ V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+ V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+ V2_QPC_BYTE_244_RNR_CNT_S, 0);
+
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0x100);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+ V2_QPC_BYTE_28_AT_S, attr->timeout);
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+ V2_QPC_BYTE_28_AT_S, 0);
+ }
+
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S,
+ rdma_ah_get_sl(&attr->ah_attr));
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, 0);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+ roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+
+ return 0;
+}
+
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_qp_context *context;
+ struct hns_roce_v2_qp_context *qpc_mask;
+ struct device *dev = hr_dev->dev;
+ int ret = -EINVAL;
+
+ context = kzalloc(2 * sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ qpc_mask = context + 1;
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ memset(qpc_mask, 0xff, sizeof(*qpc_mask));
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ modify_qp_init_to_init(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ goto out;
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ goto out;
+ } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
+ (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
+ (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
+ (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
+ (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
+ /* Nothing */
+ ;
+ } else {
+ dev_err(dev, "Illegal state for QP!\n");
+ goto out;
+ }
+
+ /* Every status migrate must change state */
+ roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ V2_QPC_BYTE_60_QP_ST_S, new_state);
+ roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ V2_QPC_BYTE_60_QP_ST_S, 0);
+
+ /* SW pass context to HW */
+ ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
+ context, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
+ goto out;
+ }
+
+ hr_qp->state = new_state;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ hr_qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
+
+ if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
+ ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
+ if (ibqp->send_cq != ibqp->recv_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->sq_next_wqe = 0;
+ hr_qp->next_sge = 0;
+ }
+
+out:
+ kfree(context);
+ return ret;
+}
+
+static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
+{
+ switch (state) {
+ case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
+ case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
+ case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
+ case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
+ case HNS_ROCE_QP_ST_SQ_DRAINING:
+ case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
+ case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
+ case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
+}
+
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *hr_context)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
+ HNS_ROCE_CMD_QUERY_QPC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
+ goto out;
+ }
+
+ memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_qp_context *context;
+ struct device *dev = hr_dev->dev;
+ int tmp_qp_state;
+ int state;
+ int ret;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (hr_qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ ret = 0;
+ goto done;
+ }
+
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
+ if (ret) {
+ dev_err(dev, "query qpc error\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ state = roce_get_field(context->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
+ tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
+ if (tmp_qp_state == -1) {
+ dev_err(dev, "Illegal ib_qp_state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ hr_qp->state = (u8)tmp_qp_state;
+ qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
+ qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S);
+ qp_attr->path_mig_state = IB_MIG_ARMED;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD)
+ qp_attr->qkey = V2_QKEY_VAL;
+
+ qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S);
+ qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S);
+ qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
+ V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S);
+ qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RRE_S)) << 2) |
+ ((roce_get_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RWE_S)) << 1) |
+ ((roce_get_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_ATE_S)) << 3);
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ struct ib_global_route *grh =
+ rdma_ah_retrieve_grh(&qp_attr->ah_attr);
+
+ rdma_ah_set_sl(&qp_attr->ah_attr,
+ roce_get_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S));
+ grh->flow_label = roce_get_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S);
+ grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S);
+ grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S);
+ grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S);
+
+ memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
+ }
+
+ qp_attr->port_num = hr_qp->port + 1;
+ qp_attr->sq_draining = 0;
+ qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S);
+ qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
+ V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S);
+ qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S);
+ qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_AT_M,
+ V2_QPC_BYTE_28_AT_S);
+ qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
+ V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S);
+ qp_attr->rnr_retry = context->rq_rnr_timer;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
+ }
+
+ qp_init_attr->cap = qp_attr->cap;
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+ kfree(context);
+ return ret;
+}
+
+static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ int is_user)
+{
+ struct hns_roce_cq *send_cq, *recv_cq;
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
+ hr_qp->state, IB_QPS_RESET);
+ if (ret) {
+ dev_err(dev, "modify QP %06lx to ERR failed.\n",
+ hr_qp->qpn);
+ return ret;
+ }
+ }
+
+ send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
+ recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+
+ hns_roce_lock_cqs(send_cq, recv_cq);
+
+ if (!is_user) {
+ __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
+ to_hr_srq(hr_qp->ibqp.srq) : NULL);
+ if (send_cq != recv_cq)
+ __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
+ }
+
+ hns_roce_qp_remove(hr_dev, hr_qp);
+
+ hns_roce_unlock_cqs(send_cq, recv_cq);
+
+ hns_roce_qp_free(hr_dev, hr_qp);
+
+ /* Not special_QP, free their QPN */
+ if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UD))
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+
+ hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+ if (is_user) {
+ ib_umem_release(hr_qp->umem);
+ } else {
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ int ret;
+
+ ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+ if (ret) {
+ dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
+ return ret;
+ }
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ kfree(hr_to_hr_sqp(hr_qp));
+ else
+ kfree(hr_qp);
+
+ return 0;
+}
+
+static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
+ struct hns_roce_v2_cq_context *cq_context;
+ struct hns_roce_cq *hr_cq = to_hr_cq(cq);
+ struct hns_roce_v2_cq_context *cqc_mask;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cq_context = mailbox->buf;
+ cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
+
+ memset(cqc_mask, 0xff, sizeof(*cqc_mask));
+
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ cq_count);
+ roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ 0);
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
+ cq_period);
+ roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
+ 0);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
+ HNS_ROCE_CMD_MODIFY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret)
+ dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
+
+ return ret;
+}
+
+static const struct hns_roce_hw hns_roce_hw_v2 = {
+ .cmq_init = hns_roce_v2_cmq_init,
+ .cmq_exit = hns_roce_v2_cmq_exit,
+ .hw_profile = hns_roce_v2_profile,
+ .post_mbox = hns_roce_v2_post_mbox,
+ .chk_mbox = hns_roce_v2_chk_mbox,
+ .set_gid = hns_roce_v2_set_gid,
+ .set_mac = hns_roce_v2_set_mac,
+ .write_mtpt = hns_roce_v2_write_mtpt,
+ .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
+ .write_cqc = hns_roce_v2_write_cqc,
+ .set_hem = hns_roce_v2_set_hem,
+ .clear_hem = hns_roce_v2_clear_hem,
+ .modify_qp = hns_roce_v2_modify_qp,
+ .query_qp = hns_roce_v2_query_qp,
+ .destroy_qp = hns_roce_v2_destroy_qp,
+ .modify_cq = hns_roce_v2_modify_cq,
+ .post_send = hns_roce_v2_post_send,
+ .post_recv = hns_roce_v2_post_recv,
+ .req_notify_cq = hns_roce_v2_req_notify_cq,
+ .poll_cq = hns_roce_v2_poll_cq,
+};
+
+static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ /* required last entry */
+ {0, }
+};
+
+static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+{
+ const struct pci_device_id *id;
+
+ id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+ if (!id) {
+ dev_err(hr_dev->dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+
+ hr_dev->hw = &hns_roce_hw_v2;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = hr_dev->sdb_offset;
+
+ /* Get info from NIC driver. */
+ hr_dev->reg_base = handle->rinfo.roce_io_base;
+ hr_dev->caps.num_ports = 1;
+ hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
+ hr_dev->iboe.phy_port[0] = 0;
+
+ /* cmd issue mode: 0 is poll, 1 is event */
+ hr_dev->cmd_mod = 0;
+ hr_dev->loop_idc = 0;
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+{
+ struct hns_roce_dev *hr_dev;
+ int ret;
+
+ hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ if (!hr_dev)
+ return -ENOMEM;
+
+ hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
+ if (!hr_dev->priv) {
+ ret = -ENOMEM;
+ goto error_failed_kzalloc;
+ }
+
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
+ handle->priv = hr_dev;
+
+ ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
+ if (ret) {
+ dev_err(hr_dev->dev, "Get Configuration failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_init(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ return 0;
+
+error_failed_get_cfg:
+ kfree(hr_dev->priv);
+
+error_failed_kzalloc:
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return ret;
+}
+
+static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+}
+
+static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
+ .init_instance = hns_roce_hw_v2_init_instance,
+ .uninit_instance = hns_roce_hw_v2_uninit_instance,
+};
+
+static struct hnae3_client hns_roce_hw_v2_client = {
+ .name = "hns_roce_hw_v2",
+ .type = HNAE3_CLIENT_ROCE,
+ .ops = &hns_roce_hw_v2_ops,
+};
+
+static int __init hns_roce_hw_v2_init(void)
+{
+ return hnae3_register_client(&hns_roce_hw_v2_client);
+}
+
+static void __exit hns_roce_hw_v2_exit(void)
+{
+ hnae3_unregister_client(&hns_roce_hw_v2_client);
+}
+
+module_init(hns_roce_hw_v2_init);
+module_exit(hns_roce_hw_v2_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
+MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
new file mode 100644
index 000000000000..04b7a51b8efb
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HW_V2_H
+#define _HNS_ROCE_HW_V2_H
+
+#include <linux/bitops.h>
+
+#define HNS_ROCE_VF_QPC_BT_NUM 256
+#define HNS_ROCE_VF_SRQC_BT_NUM 64
+#define HNS_ROCE_VF_CQC_BT_NUM 64
+#define HNS_ROCE_VF_MPT_BT_NUM 64
+#define HNS_ROCE_VF_EQC_NUM 64
+#define HNS_ROCE_VF_SMAC_NUM 32
+#define HNS_ROCE_VF_SGID_NUM 32
+#define HNS_ROCE_VF_SL_NUM 8
+
+#define HNS_ROCE_V2_MAX_QP_NUM 0x2000
+#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
+#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
+#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
+#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
+#define HNS_ROCE_V2_UAR_NUM 256
+#define HNS_ROCE_V2_PHY_UAR_NUM 1
+#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
+#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
+#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
+#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
+#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
+#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
+#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
+#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
+#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
+#define HNS_ROCE_V2_QPC_ENTRY_SZ 256
+#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
+#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
+#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
+#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
+#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+#define HNS_ROCE_INVALID_LKEY 0x100
+#define HNS_ROCE_CMQ_TX_TIMEOUT 200
+
+#define HNS_ROCE_CONTEXT_HOP_NUM 1
+#define HNS_ROCE_MTT_HOP_NUM 1
+#define HNS_ROCE_CQE_HOP_NUM 1
+#define HNS_ROCE_PBL_HOP_NUM 2
+#define HNS_ROCE_V2_GID_INDEX_NUM 256
+
+#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
+
+#define HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT 0
+#define HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT 1
+#define HNS_ROCE_CMD_FLAG_NEXT_SHIFT 2
+#define HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT 3
+#define HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT 4
+#define HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT 5
+
+#define HNS_ROCE_CMD_FLAG_IN BIT(HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT)
+#define HNS_ROCE_CMD_FLAG_OUT BIT(HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT)
+#define HNS_ROCE_CMD_FLAG_NEXT BIT(HNS_ROCE_CMD_FLAG_NEXT_SHIFT)
+#define HNS_ROCE_CMD_FLAG_WR BIT(HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HNS_ROCE_CMD_FLAG_NO_INTR BIT(HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT)
+#define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
+
+#define HNS_ROCE_CMQ_DESC_NUM_S 3
+#define HNS_ROCE_CMQ_EN_B 16
+#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
+
+#define check_whether_last_step(hop_num, step_idx) \
+ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (step_idx == 1 && hop_num == 1) || \
+ (step_idx == 2 && hop_num == 2))
+
+#define V2_CQ_DB_REQ_NOT_SOL 0
+#define V2_CQ_DB_REQ_NOT 1
+
+#define V2_CQ_STATE_VALID 1
+#define V2_QKEY_VAL 0x80010000
+
+#define GID_LEN_V2 16
+
+#define HNS_ROCE_V2_CQE_QPN_MASK 0x3ffff
+
+enum {
+ HNS_ROCE_V2_WQE_OP_SEND = 0x0,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_INV = 0x1,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE = 0x3,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM = 0x4,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ = 0x5,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP = 0x6,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD = 0x7,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9,
+ HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV = 0xb,
+ HNS_ROCE_V2_WQE_OP_BIND_MW_TYPE = 0xc,
+ HNS_ROCE_V2_WQE_OP_MASK = 0x1f,
+};
+
+enum {
+ HNS_ROCE_SQ_OPCODE_SEND = 0x0,
+ HNS_ROCE_SQ_OPCODE_SEND_WITH_INV = 0x1,
+ HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_SQ_OPCODE_RDMA_WRITE = 0x3,
+ HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM = 0x4,
+ HNS_ROCE_SQ_OPCODE_RDMA_READ = 0x5,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP = 0x6,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD = 0x7,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP = 0x8,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD = 0x9,
+ HNS_ROCE_SQ_OPCODE_FAST_REG_WR = 0xa,
+ HNS_ROCE_SQ_OPCODE_LOCAL_INV = 0xb,
+ HNS_ROCE_SQ_OPCODE_BIND_MW = 0xc,
+};
+
+enum {
+ /* rq operations */
+ HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM = 0x0,
+ HNS_ROCE_V2_OPCODE_SEND = 0x1,
+ HNS_ROCE_V2_OPCODE_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_V2_OPCODE_SEND_WITH_INV = 0x3,
+};
+
+enum {
+ HNS_ROCE_V2_SQ_DB = 0x0,
+ HNS_ROCE_V2_RQ_DB = 0x1,
+ HNS_ROCE_V2_SRQ_DB = 0x2,
+ HNS_ROCE_V2_CQ_DB_PTR = 0x3,
+ HNS_ROCE_V2_CQ_DB_NTR = 0x4,
+};
+
+enum {
+ HNS_ROCE_CQE_V2_SUCCESS = 0x00,
+ HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR = 0x01,
+ HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR = 0x02,
+ HNS_ROCE_CQE_V2_LOCAL_PROT_ERR = 0x04,
+ HNS_ROCE_CQE_V2_WR_FLUSH_ERR = 0x05,
+ HNS_ROCE_CQE_V2_MW_BIND_ERR = 0x06,
+ HNS_ROCE_CQE_V2_BAD_RESP_ERR = 0x10,
+ HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR = 0x11,
+ HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR = 0x12,
+ HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR = 0x13,
+ HNS_ROCE_CQE_V2_REMOTE_OP_ERR = 0x14,
+ HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR = 0x15,
+ HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR = 0x16,
+ HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR = 0x22,
+
+ HNS_ROCE_V2_CQE_STATUS_MASK = 0xff,
+};
+
+/* CMQ command */
+enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
+ HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
+ HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
+ HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
+ HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
+};
+
+enum {
+ TYPE_CRQ,
+ TYPE_CSQ,
+};
+
+enum hns_roce_cmd_return_status {
+ CMD_EXEC_SUCCESS = 0,
+ CMD_NO_AUTH = 1,
+ CMD_NOT_EXEC = 2,
+ CMD_QUEUE_FULL = 3,
+};
+
+enum hns_roce_sgid_type {
+ GID_TYPE_FLAG_ROCE_V1 = 0,
+ GID_TYPE_FLAG_ROCE_V2_IPV4,
+ GID_TYPE_FLAG_ROCE_V2_IPV6,
+};
+
+struct hns_roce_v2_cq_context {
+ u32 byte_4_pg_ceqn;
+ u32 byte_8_cqn;
+ u32 cqe_cur_blk_addr;
+ u32 byte_16_hop_addr;
+ u32 cqe_nxt_blk_addr;
+ u32 byte_24_pgsz_addr;
+ u32 byte_28_cq_pi;
+ u32 byte_32_cq_ci;
+ u32 cqe_ba;
+ u32 byte_40_cqe_ba;
+ u32 byte_44_db_record;
+ u32 db_record_addr;
+ u32 byte_52_cqe_cnt;
+ u32 byte_56_cqe_period_maxcnt;
+ u32 cqe_report_timer;
+ u32 byte_64_se_cqe_idx;
+};
+#define V2_CQC_BYTE_4_CQ_ST_S 0
+#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
+
+#define V2_CQC_BYTE_4_POLL_S 2
+
+#define V2_CQC_BYTE_4_SE_S 3
+
+#define V2_CQC_BYTE_4_OVER_IGNORE_S 4
+
+#define V2_CQC_BYTE_4_COALESCE_S 5
+
+#define V2_CQC_BYTE_4_ARM_ST_S 6
+#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6)
+
+#define V2_CQC_BYTE_4_SHIFT_S 8
+#define V2_CQC_BYTE_4_SHIFT_M GENMASK(12, 8)
+
+#define V2_CQC_BYTE_4_CMD_SN_S 13
+#define V2_CQC_BYTE_4_CMD_SN_M GENMASK(14, 13)
+
+#define V2_CQC_BYTE_4_CEQN_S 15
+#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15)
+
+#define V2_CQC_BYTE_4_PAGE_OFFSET_S 24
+#define V2_CQC_BYTE_4_PAGE_OFFSET_M GENMASK(31, 24)
+
+#define V2_CQC_BYTE_8_CQN_S 0
+#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
+#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_CQC_BYTE_16_CQE_HOP_NUM_S 30
+#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30)
+
+#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S 0
+#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_S 24
+#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_M GENMASK(27, 24)
+
+#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S 28
+#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M GENMASK(31, 28)
+
+#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0
+#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0
+#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_40_CQE_BA_S 0
+#define V2_CQC_BYTE_40_CQE_BA_M GENMASK(28, 0)
+
+#define V2_CQC_BYTE_44_DB_RECORD_EN_S 0
+
+#define V2_CQC_BYTE_52_CQE_CNT_S 0
+#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_56_CQ_MAX_CNT_S 0
+#define V2_CQC_BYTE_56_CQ_MAX_CNT_M GENMASK(15, 0)
+
+#define V2_CQC_BYTE_56_CQ_PERIOD_S 16
+#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16)
+
+#define V2_CQC_BYTE_64_SE_CQE_IDX_S 0
+#define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
+
+enum{
+ V2_MPT_ST_VALID = 0x1,
+};
+
+enum hns_roce_v2_qp_state {
+ HNS_ROCE_QP_ST_RST,
+ HNS_ROCE_QP_ST_INIT,
+ HNS_ROCE_QP_ST_RTR,
+ HNS_ROCE_QP_ST_RTS,
+ HNS_ROCE_QP_ST_SQER,
+ HNS_ROCE_QP_ST_SQD,
+ HNS_ROCE_QP_ST_ERR,
+ HNS_ROCE_QP_ST_SQ_DRAINING,
+ HNS_ROCE_QP_NUM_ST
+};
+
+struct hns_roce_v2_qp_context {
+ u32 byte_4_sqpn_tst;
+ u32 wqe_sge_ba;
+ u32 byte_12_sq_hop;
+ u32 byte_16_buf_ba_pg_sz;
+ u32 byte_20_smac_sgid_idx;
+ u32 byte_24_mtu_tc;
+ u32 byte_28_at_fl;
+ u8 dgid[GID_LEN_V2];
+ u32 dmac;
+ u32 byte_52_udpspn_dmac;
+ u32 byte_56_dqpn_err;
+ u32 byte_60_qpst_mapid;
+ u32 qkey_xrcd;
+ u32 byte_68_rq_db;
+ u32 rq_db_record_addr;
+ u32 byte_76_srqn_op_en;
+ u32 byte_80_rnr_rx_cqn;
+ u32 byte_84_rq_ci_pi;
+ u32 rq_cur_blk_addr;
+ u32 byte_92_srq_info;
+ u32 byte_96_rx_reqmsn;
+ u32 rq_nxt_blk_addr;
+ u32 byte_104_rq_sge;
+ u32 byte_108_rx_reqepsn;
+ u32 rq_rnr_timer;
+ u32 rx_msg_len;
+ u32 rx_rkey_pkt_info;
+ u64 rx_va;
+ u32 byte_132_trrl;
+ u32 trrl_ba;
+ u32 byte_140_raq;
+ u32 byte_144_raq;
+ u32 byte_148_raq;
+ u32 byte_152_raq;
+ u32 byte_156_raq;
+ u32 byte_160_sq_ci_pi;
+ u32 sq_cur_blk_addr;
+ u32 byte_168_irrl_idx;
+ u32 byte_172_sq_psn;
+ u32 byte_176_msg_pktn;
+ u32 sq_cur_sge_blk_addr;
+ u32 byte_184_irrl_idx;
+ u32 cur_sge_offset;
+ u32 byte_192_ext_sge;
+ u32 byte_196_sq_psn;
+ u32 byte_200_sq_max;
+ u32 irrl_ba;
+ u32 byte_208_irrl;
+ u32 byte_212_lsn;
+ u32 sq_timer;
+ u32 byte_220_retry_psn_msn;
+ u32 byte_224_retry_msg;
+ u32 rx_sq_cur_blk_addr;
+ u32 byte_232_irrl_sge;
+ u32 irrl_cur_sge_offset;
+ u32 byte_240_irrl_tail;
+ u32 byte_244_rnr_rxack;
+ u32 byte_248_ack_psn;
+ u32 byte_252_err_txcqn;
+ u32 byte_256_sqflush_rqcqe;
+};
+
+#define V2_QPC_BYTE_4_TST_S 0
+#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
+
+#define V2_QPC_BYTE_4_SGE_SHIFT_S 3
+#define V2_QPC_BYTE_4_SGE_SHIFT_M GENMASK(7, 3)
+
+#define V2_QPC_BYTE_4_SQPN_S 8
+#define V2_QPC_BYTE_4_SQPN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_12_WQE_SGE_BA_S 0
+#define V2_QPC_BYTE_12_WQE_SGE_BA_M GENMASK(28, 0)
+
+#define V2_QPC_BYTE_12_SQ_HOP_NUM_S 29
+#define V2_QPC_BYTE_12_SQ_HOP_NUM_M GENMASK(30, 29)
+
+#define V2_QPC_BYTE_12_RSVD_LKEY_EN_S 31
+
+#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S 0
+#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M GENMASK(3, 0)
+
+#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S 4
+#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define V2_QPC_BYTE_16_PD_S 8
+#define V2_QPC_BYTE_16_PD_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_20_RQ_HOP_NUM_S 0
+#define V2_QPC_BYTE_20_RQ_HOP_NUM_M GENMASK(1, 0)
+
+#define V2_QPC_BYTE_20_SGE_HOP_NUM_S 2
+#define V2_QPC_BYTE_20_SGE_HOP_NUM_M GENMASK(3, 2)
+
+#define V2_QPC_BYTE_20_RQWS_S 4
+#define V2_QPC_BYTE_20_RQWS_M GENMASK(7, 4)
+
+#define V2_QPC_BYTE_20_SQ_SHIFT_S 8
+#define V2_QPC_BYTE_20_SQ_SHIFT_M GENMASK(11, 8)
+
+#define V2_QPC_BYTE_20_RQ_SHIFT_S 12
+#define V2_QPC_BYTE_20_RQ_SHIFT_M GENMASK(15, 12)
+
+#define V2_QPC_BYTE_20_SGID_IDX_S 16
+#define V2_QPC_BYTE_20_SGID_IDX_M GENMASK(23, 16)
+
+#define V2_QPC_BYTE_20_SMAC_IDX_S 24
+#define V2_QPC_BYTE_20_SMAC_IDX_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_24_HOP_LIMIT_S 0
+#define V2_QPC_BYTE_24_HOP_LIMIT_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_24_TC_S 8
+#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
+
+#define V2_QPC_BYTE_24_VLAN_IDX_S 16
+#define V2_QPC_BYTE_24_VLAN_IDX_M GENMASK(27, 16)
+
+#define V2_QPC_BYTE_24_MTU_S 28
+#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
+
+#define V2_QPC_BYTE_28_FL_S 0
+#define V2_QPC_BYTE_28_FL_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_28_SL_S 20
+#define V2_QPC_BYTE_28_SL_M GENMASK(23, 20)
+
+#define V2_QPC_BYTE_28_CNP_TX_FLAG_S 24
+
+#define V2_QPC_BYTE_28_CE_FLAG_S 25
+
+#define V2_QPC_BYTE_28_LBI_S 26
+
+#define V2_QPC_BYTE_28_AT_S 27
+#define V2_QPC_BYTE_28_AT_M GENMASK(31, 27)
+
+#define V2_QPC_BYTE_52_DMAC_S 0
+#define V2_QPC_BYTE_52_DMAC_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_52_UDPSPN_S 16
+#define V2_QPC_BYTE_52_UDPSPN_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_56_DQPN_S 0
+#define V2_QPC_BYTE_56_DQPN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_56_SQ_TX_ERR_S 24
+#define V2_QPC_BYTE_56_SQ_RX_ERR_S 25
+#define V2_QPC_BYTE_56_RQ_TX_ERR_S 26
+#define V2_QPC_BYTE_56_RQ_RX_ERR_S 27
+
+#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
+#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
+
+#define V2_QPC_BYTE_60_MAPID_S 0
+#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
+
+#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13
+
+#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14
+
+#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15
+
+#define V2_QPC_BYTE_60_TEMPID_S 16
+#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
+
+#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
+
+#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
+#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
+
+#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
+
+#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
+
+#define V2_QPC_BYTE_60_QP_ST_S 29
+#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
+
+#define V2_QPC_BYTE_68_RQ_RECORD_EN_S 0
+
+#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S 1
+#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M GENMASK(31, 1)
+
+#define V2_QPC_BYTE_76_SRQN_S 0
+#define V2_QPC_BYTE_76_SRQN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_76_SRQ_EN_S 24
+
+#define V2_QPC_BYTE_76_RRE_S 25
+
+#define V2_QPC_BYTE_76_RWE_S 26
+
+#define V2_QPC_BYTE_76_ATE_S 27
+
+#define V2_QPC_BYTE_76_RQIE_S 28
+
+#define V2_QPC_BYTE_80_RX_CQN_S 0
+#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
+#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
+
+#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S 0
+#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S 16
+#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_92_SRQ_INFO_S 20
+#define V2_QPC_BYTE_92_SRQ_INFO_M GENMASK(31, 20)
+
+#define V2_QPC_BYTE_96_RX_REQ_MSN_S 0
+#define V2_QPC_BYTE_96_RX_REQ_MSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S 0
+#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S 24
+#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_108_INV_CREDIT_S 0
+
+#define V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S 3
+
+#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S 4
+#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M GENMASK(6, 4)
+
+#define V2_QPC_BYTE_108_RX_REQ_RNR_S 7
+
+#define V2_QPC_BYTE_108_RX_REQ_EPSN_S 8
+#define V2_QPC_BYTE_108_RX_REQ_EPSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_S 0
+#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_S 8
+#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_M GENMASK(15, 8)
+
+#define V2_QPC_BYTE_132_TRRL_BA_S 16
+#define V2_QPC_BYTE_132_TRRL_BA_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_140_TRRL_BA_S 0
+#define V2_QPC_BYTE_140_TRRL_BA_M GENMASK(11, 0)
+
+#define V2_QPC_BYTE_140_RR_MAX_S 12
+#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
+
+#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
+
+#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
+#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
+
+#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S 24
+#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
+
+#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
+#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
+
+#define V2_QPC_BYTE_144_RESP_RTY_FLG_S 31
+
+#define V2_QPC_BYTE_148_RQ_MSN_S 0
+#define V2_QPC_BYTE_148_RQ_MSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
+#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_152_RAQ_PSN_S 8
+#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
+#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_156_RAQ_USE_PKTN_S 0
+#define V2_QPC_BYTE_156_RAQ_USE_PKTN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S 0
+#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S 16
+#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S 20
+
+#define V2_QPC_BYTE_168_SQ_INVLD_FLG_S 21
+
+#define V2_QPC_BYTE_168_LP_SGEN_INI_S 22
+#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22)
+
+#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
+#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
+
+#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
+#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
+
+#define V2_QPC_BYTE_172_ACK_REQ_FREQ_S 0
+#define V2_QPC_BYTE_172_ACK_REQ_FREQ_M GENMASK(5, 0)
+
+#define V2_QPC_BYTE_172_MSG_RNR_FLG_S 6
+
+#define V2_QPC_BYTE_172_FRE_S 7
+
+#define V2_QPC_BYTE_172_SQ_CUR_PSN_S 8
+#define V2_QPC_BYTE_172_SQ_CUR_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_176_MSG_USE_PKTN_S 0
+#define V2_QPC_BYTE_176_MSG_USE_PKTN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_S 24
+#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S 0
+#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_184_IRRL_IDX_MSB_S 20
+#define V2_QPC_BYTE_184_IRRL_IDX_MSB_M GENMASK(31, 20)
+
+#define V2_QPC_BYTE_192_CUR_SGE_IDX_S 0
+#define V2_QPC_BYTE_192_CUR_SGE_IDX_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S 24
+#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_196_IRRL_HEAD_S 0
+#define V2_QPC_BYTE_196_IRRL_HEAD_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_196_SQ_MAX_PSN_S 8
+#define V2_QPC_BYTE_196_SQ_MAX_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_200_SQ_MAX_IDX_S 0
+#define V2_QPC_BYTE_200_SQ_MAX_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_S 16
+#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_208_IRRL_BA_S 0
+#define V2_QPC_BYTE_208_IRRL_BA_M GENMASK(25, 0)
+
+#define V2_QPC_BYTE_208_PKT_RNR_FLG_S 26
+
+#define V2_QPC_BYTE_208_PKT_RTY_FLG_S 27
+
+#define V2_QPC_BYTE_208_RMT_E2E_S 28
+
+#define V2_QPC_BYTE_208_SR_MAX_S 29
+#define V2_QPC_BYTE_208_SR_MAX_M GENMASK(31, 29)
+
+#define V2_QPC_BYTE_212_LSN_S 0
+#define V2_QPC_BYTE_212_LSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_212_RETRY_NUM_INIT_S 24
+#define V2_QPC_BYTE_212_RETRY_NUM_INIT_M GENMASK(26, 24)
+
+#define V2_QPC_BYTE_212_CHECK_FLG_S 27
+#define V2_QPC_BYTE_212_CHECK_FLG_M GENMASK(28, 27)
+
+#define V2_QPC_BYTE_212_RETRY_CNT_S 29
+#define V2_QPC_BYTE_212_RETRY_CNT_M GENMASK(31, 29)
+
+#define V2_QPC_BYTE_220_RETRY_MSG_MSN_S 0
+#define V2_QPC_BYTE_220_RETRY_MSG_MSN_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_220_RETRY_MSG_PSN_S 16
+#define V2_QPC_BYTE_220_RETRY_MSG_PSN_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_224_RETRY_MSG_PSN_S 0
+#define V2_QPC_BYTE_224_RETRY_MSG_PSN_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S 8
+#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
+#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
+
+#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
+#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_240_IRRL_TAIL_RD_S 8
+#define V2_QPC_BYTE_240_IRRL_TAIL_RD_M GENMASK(15, 8)
+
+#define V2_QPC_BYTE_240_RX_ACK_MSN_S 16
+#define V2_QPC_BYTE_240_RX_ACK_MSN_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_244_RX_ACK_EPSN_S 0
+#define V2_QPC_BYTE_244_RX_ACK_EPSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_244_RNR_NUM_INIT_S 24
+#define V2_QPC_BYTE_244_RNR_NUM_INIT_M GENMASK(26, 24)
+
+#define V2_QPC_BYTE_244_RNR_CNT_S 27
+#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
+
+#define V2_QPC_BYTE_248_IRRL_PSN_S 0
+#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_248_ACK_PSN_ERR_S 24
+
+#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S 25
+#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M GENMASK(26, 25)
+
+#define V2_QPC_BYTE_248_IRRL_PSN_VLD_S 27
+
+#define V2_QPC_BYTE_248_RNR_RETRY_FLAG_S 28
+
+#define V2_QPC_BYTE_248_CQ_ERR_IND_S 31
+
+#define V2_QPC_BYTE_252_TX_CQN_S 0
+#define V2_QPC_BYTE_252_TX_CQN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_252_SIG_TYPE_S 24
+
+#define V2_QPC_BYTE_252_ERR_TYPE_S 25
+#define V2_QPC_BYTE_252_ERR_TYPE_M GENMASK(31, 25)
+
+#define V2_QPC_BYTE_256_RQ_CQE_IDX_S 0
+#define V2_QPC_BYTE_256_RQ_CQE_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
+#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
+
+struct hns_roce_v2_cqe {
+ u32 byte_4;
+ u32 rkey_immtdata;
+ u32 byte_12;
+ u32 byte_16;
+ u32 byte_cnt;
+ u32 smac;
+ u32 byte_28;
+ u32 byte_32;
+};
+
+#define V2_CQE_BYTE_4_OPCODE_S 0
+#define V2_CQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_CQE_BYTE_4_RQ_INLINE_S 5
+
+#define V2_CQE_BYTE_4_S_R_S 6
+
+#define V2_CQE_BYTE_4_OWNER_S 7
+
+#define V2_CQE_BYTE_4_STATUS_S 8
+#define V2_CQE_BYTE_4_STATUS_M GENMASK(15, 8)
+
+#define V2_CQE_BYTE_4_WQE_INDX_S 16
+#define V2_CQE_BYTE_4_WQE_INDX_M GENMASK(31, 16)
+
+#define V2_CQE_BYTE_12_XRC_SRQN_S 0
+#define V2_CQE_BYTE_12_XRC_SRQN_M GENMASK(23, 0)
+
+#define V2_CQE_BYTE_16_LCL_QPN_S 0
+#define V2_CQE_BYTE_16_LCL_QPN_M GENMASK(23, 0)
+
+#define V2_CQE_BYTE_16_SUB_STATUS_S 24
+#define V2_CQE_BYTE_16_SUB_STATUS_M GENMASK(31, 24)
+
+#define V2_CQE_BYTE_28_SMAC_4_S 0
+#define V2_CQE_BYTE_28_SMAC_4_M GENMASK(7, 0)
+
+#define V2_CQE_BYTE_28_SMAC_5_S 8
+#define V2_CQE_BYTE_28_SMAC_5_M GENMASK(15, 8)
+
+#define V2_CQE_BYTE_28_PORT_TYPE_S 16
+#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
+
+#define V2_CQE_BYTE_32_RMT_QPN_S 0
+#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
+
+#define V2_CQE_BYTE_32_SL_S 24
+#define V2_CQE_BYTE_32_SL_M GENMASK(26, 24)
+
+#define V2_CQE_BYTE_32_PORTN_S 27
+#define V2_CQE_BYTE_32_PORTN_M GENMASK(29, 27)
+
+#define V2_CQE_BYTE_32_GRH_S 30
+
+#define V2_CQE_BYTE_32_LPK_S 31
+
+struct hns_roce_v2_mpt_entry {
+ __le32 byte_4_pd_hop_st;
+ __le32 byte_8_mw_cnt_en;
+ __le32 byte_12_mw_pa;
+ __le32 bound_lkey;
+ __le32 len_l;
+ __le32 len_h;
+ __le32 lkey;
+ __le32 va_l;
+ __le32 va_h;
+ __le32 pbl_size;
+ __le32 pbl_ba_l;
+ __le32 byte_48_mode_ba;
+ __le32 pa0_l;
+ __le32 byte_56_pa0_h;
+ __le32 pa1_l;
+ __le32 byte_64_buf_pa1;
+};
+
+#define V2_MPT_BYTE_4_MPT_ST_S 0
+#define V2_MPT_BYTE_4_MPT_ST_M GENMASK(1, 0)
+
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_S 2
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_M GENMASK(3, 2)
+
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_S 4
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_M GENMASK(7, 4)
+
+#define V2_MPT_BYTE_4_PD_S 8
+#define V2_MPT_BYTE_4_PD_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_8_RA_EN_S 0
+
+#define V2_MPT_BYTE_8_R_INV_EN_S 1
+
+#define V2_MPT_BYTE_8_L_INV_EN_S 2
+
+#define V2_MPT_BYTE_8_BIND_EN_S 3
+
+#define V2_MPT_BYTE_8_ATOMIC_EN_S 4
+
+#define V2_MPT_BYTE_8_RR_EN_S 5
+
+#define V2_MPT_BYTE_8_RW_EN_S 6
+
+#define V2_MPT_BYTE_8_LW_EN_S 7
+
+#define V2_MPT_BYTE_12_PA_S 1
+
+#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
+
+#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
+#define V2_MPT_BYTE_12_MW_BIND_QPN_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_48_PBL_BA_H_S 0
+#define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0)
+
+#define V2_MPT_BYTE_48_BLK_MODE_S 29
+
+#define V2_MPT_BYTE_56_PA0_H_S 0
+#define V2_MPT_BYTE_56_PA0_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PA1_H_S 0
+#define V2_MPT_BYTE_64_PA1_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
+
+#define V2_DB_BYTE_4_TAG_S 0
+#define V2_DB_BYTE_4_TAG_M GENMASK(23, 0)
+
+#define V2_DB_BYTE_4_CMD_S 24
+#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
+
+#define V2_DB_PARAMETER_CONS_IDX_S 0
+#define V2_DB_PARAMETER_CONS_IDX_M GENMASK(15, 0)
+
+#define V2_DB_PARAMETER_SL_S 16
+#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
+
+struct hns_roce_v2_cq_db {
+ u32 byte_4;
+ u32 parameter;
+};
+
+#define V2_CQ_DB_BYTE_4_TAG_S 0
+#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
+
+#define V2_CQ_DB_BYTE_4_CMD_S 24
+#define V2_CQ_DB_BYTE_4_CMD_M GENMASK(27, 24)
+
+#define V2_CQ_DB_PARAMETER_CONS_IDX_S 0
+#define V2_CQ_DB_PARAMETER_CONS_IDX_M GENMASK(23, 0)
+
+#define V2_CQ_DB_PARAMETER_CMD_SN_S 25
+#define V2_CQ_DB_PARAMETER_CMD_SN_M GENMASK(26, 25)
+
+#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+
+struct hns_roce_v2_rc_send_wqe {
+ u32 byte_4;
+ u32 msg_len;
+ u32 inv_key_immtdata;
+ u32 byte_16;
+ u32 byte_20;
+ u32 rkey;
+ u64 va;
+};
+
+#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
+#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7
+
+#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8
+
+#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9
+
+#define V2_RC_SEND_WQE_BYTE_4_SO_S 10
+
+#define V2_RC_SEND_WQE_BYTE_4_SE_S 11
+
+#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
+
+#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
+#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
+
+#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24
+#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+
+struct hns_roce_v2_wqe_data_seg {
+ __be32 len;
+ __be32 lkey;
+ __be64 addr;
+};
+
+struct hns_roce_v2_db {
+ u32 byte_4;
+ u32 parameter;
+};
+
+struct hns_roce_query_version {
+ __le16 rocee_vendor_id;
+ __le16 rocee_hw_version;
+ __le32 rsv[5];
+};
+
+struct hns_roce_cfg_global_param {
+ __le32 time_cfg_udp_port;
+ __le32 rsv[5];
+};
+
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S 0
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M GENMASK(9, 0)
+
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
+
+struct hns_roce_pf_res {
+ __le32 rsv;
+ __le32 qpc_bt_idx_num;
+ __le32 srqc_bt_idx_num;
+ __le32 cqc_bt_idx_num;
+ __le32 mpt_bt_idx_num;
+ __le32 eqc_bt_idx_num;
+};
+
+#define PF_RES_DATA_1_PF_QPC_BT_IDX_S 0
+#define PF_RES_DATA_1_PF_QPC_BT_IDX_M GENMASK(10, 0)
+
+#define PF_RES_DATA_1_PF_QPC_BT_NUM_S 16
+#define PF_RES_DATA_1_PF_QPC_BT_NUM_M GENMASK(27, 16)
+
+#define PF_RES_DATA_2_PF_SRQC_BT_IDX_S 0
+#define PF_RES_DATA_2_PF_SRQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_2_PF_SRQC_BT_NUM_S 16
+#define PF_RES_DATA_2_PF_SRQC_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_3_PF_CQC_BT_IDX_S 0
+#define PF_RES_DATA_3_PF_CQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_3_PF_CQC_BT_NUM_S 16
+#define PF_RES_DATA_3_PF_CQC_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_4_PF_MPT_BT_IDX_S 0
+#define PF_RES_DATA_4_PF_MPT_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_4_PF_MPT_BT_NUM_S 16
+#define PF_RES_DATA_4_PF_MPT_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_5_PF_EQC_BT_IDX_S 0
+#define PF_RES_DATA_5_PF_EQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
+#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
+
+struct hns_roce_vf_res_a {
+ u32 vf_id;
+ u32 vf_qpc_bt_idx_num;
+ u32 vf_srqc_bt_idx_num;
+ u32 vf_cqc_bt_idx_num;
+ u32 vf_mpt_bt_idx_num;
+ u32 vf_eqc_bt_idx_num;
+};
+
+#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
+#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_M GENMASK(10, 0)
+
+#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_S 16
+#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_M GENMASK(27, 16)
+
+#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S 0
+#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S 16
+#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_S 0
+#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_S 16
+#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_S 0
+#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_S 16
+#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_5_VF_EQC_IDX_S 0
+#define VF_RES_A_DATA_5_VF_EQC_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_5_VF_EQC_NUM_S 16
+#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
+
+struct hns_roce_vf_res_b {
+ u32 rsv0;
+ u32 vf_smac_idx_num;
+ u32 vf_sgid_idx_num;
+ u32 vf_qid_idx_sl_num;
+ u32 rsv[2];
+};
+
+#define VF_RES_B_DATA_0_VF_ID_S 0
+#define VF_RES_B_DATA_0_VF_ID_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_1_VF_SMAC_IDX_S 0
+#define VF_RES_B_DATA_1_VF_SMAC_IDX_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_1_VF_SMAC_NUM_S 8
+#define VF_RES_B_DATA_1_VF_SMAC_NUM_M GENMASK(16, 8)
+
+#define VF_RES_B_DATA_2_VF_SGID_IDX_S 0
+#define VF_RES_B_DATA_2_VF_SGID_IDX_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_2_VF_SGID_NUM_S 8
+#define VF_RES_B_DATA_2_VF_SGID_NUM_M GENMASK(16, 8)
+
+#define VF_RES_B_DATA_3_VF_QID_IDX_S 0
+#define VF_RES_B_DATA_3_VF_QID_IDX_M GENMASK(9, 0)
+
+#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
+#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
+
+/* Reg field definition */
+#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S 0
+#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M GENMASK(15, 0)
+
+#define ROCEE_VF_SGID_CFG4_SGID_TYPE_S 0
+#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
+
+struct hns_roce_cfg_bt_attr {
+ u32 vf_qpc_cfg;
+ u32 vf_srqc_cfg;
+ u32 vf_cqc_cfg;
+ u32 vf_mpt_cfg;
+ u32 rsv[2];
+};
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+
+struct hns_roce_cmq_desc {
+ u16 opcode;
+ u16 flag;
+ u16 retval;
+ u16 rsv;
+ u32 data[6];
+};
+
+#define ROCEE_VF_MB_CFG0_REG 0x40
+#define ROCEE_VF_MB_STATUS_REG 0x58
+
+#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
+
+#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
+#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
+
+#define HNS_ROCE_VF_MB4_TAG_MASK 0xFFFFFF00
+#define HNS_ROCE_VF_MB4_TAG_SHIFT 8
+
+#define HNS_ROCE_VF_MB4_CMD_MASK 0xFF
+#define HNS_ROCE_VF_MB4_CMD_SHIFT 0
+
+#define HNS_ROCE_VF_MB5_EVENT_MASK 0x10000
+#define HNS_ROCE_VF_MB5_EVENT_SHIFT 16
+
+#define HNS_ROCE_VF_MB5_TOKEN_MASK 0xFFFF
+#define HNS_ROCE_VF_MB5_TOKEN_SHIFT 0
+
+struct hns_roce_v2_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hns_roce_cmq_desc *desc;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 flag;
+ spinlock_t lock; /* command queue lock */
+};
+
+struct hns_roce_v2_cmq {
+ struct hns_roce_v2_cmq_ring csq;
+ struct hns_roce_v2_cmq_ring crq;
+ u16 tx_timeout;
+ u16 last_status;
+};
+
+struct hns_roce_v2_priv {
+ struct hns_roce_v2_cmq cmq;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d9777b662eba..cf02ac2d3596 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -57,20 +57,21 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
{
return gid_index * hr_dev->caps.num_ports + port;
}
+EXPORT_SYMBOL_GPL(hns_get_gid_index);
-static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
u32 i = 0;
if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
- return;
+ return 0;
for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
hr_dev->dev_addr[port][i] = addr[i];
phy_port = hr_dev->iboe.phy_port[port];
- hr_dev->hw->set_mac(hr_dev, phy_port, addr);
+ return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
@@ -80,17 +81,19 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
struct hns_roce_dev *hr_dev = to_hr_dev(device);
u8 port = port_num - 1;
unsigned long flags;
+ int ret;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
+ ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid,
+ attr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- return 0;
+ return ret;
}
static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
@@ -100,24 +103,26 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
union ib_gid zgid = { {0} };
u8 port = port_num - 1;
unsigned long flags;
+ int ret;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
+ ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, NULL);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- return 0;
+ return ret;
}
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
unsigned long event)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct net_device *netdev;
+ int ret = 0;
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
@@ -130,7 +135,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
case NETDEV_CHANGE:
case NETDEV_REGISTER:
case NETDEV_CHANGEADDR:
- hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
+ ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
break;
case NETDEV_DOWN:
/*
@@ -142,7 +147,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
break;
}
- return 0;
+ return ret;
}
static int hns_roce_netdev_event(struct notifier_block *self,
@@ -171,12 +176,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
{
+ int ret;
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
- hr_dev->caps.max_mtu);
- hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
+ if (hr_dev->hw->set_mtu)
+ hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
+ hr_dev->caps.max_mtu);
+ ret = hns_roce_set_mac(hr_dev, i,
+ hr_dev->iboe.netdevs[i]->dev_addr);
+ if (ret)
+ return ret;
}
return 0;
@@ -200,7 +210,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN;
- props->max_sge = hr_dev->caps.max_sq_sg;
+ props->max_sge = max(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
@@ -238,7 +248,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct net_device *net_dev;
unsigned long flags;
enum ib_mtu mtu;
@@ -379,7 +389,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
- } else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
+ hr_dev->tptr_size) {
/* vm_pgoff: 1 -- TPTR */
if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
@@ -398,8 +409,6 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr attr;
int ret;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
-
ret = ib_query_port(ib_dev, port_num, &attr);
if (ret)
return ret;
@@ -408,6 +417,9 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
return 0;
}
@@ -416,7 +428,6 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
- unregister_inetaddr_notifier(&iboe->nb_inet);
unregister_netdevice_notifier(&iboe->nb);
ib_unregister_device(&hr_dev->ib_dev);
}
@@ -426,7 +437,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
int ret;
struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock);
@@ -492,6 +503,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* CQ */
ib_dev->create_cq = hns_roce_ib_create_cq;
+ ib_dev->modify_cq = hr_dev->hw->modify_cq;
ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
ib_dev->poll_cq = hr_dev->hw->poll_cq;
@@ -500,6 +512,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->get_dma_mr = hns_roce_get_dma_mr;
ib_dev->reg_user_mr = hns_roce_reg_user_mr;
ib_dev->dereg_mr = hns_roce_dereg_mr;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
+ ib_dev->rereg_user_mr = hns_roce_rereg_user_mr;
+ ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
+ }
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
@@ -531,173 +547,10 @@ error_failed_setup_mtu_mac:
return ret;
}
-static const struct of_device_id hns_roce_of_match[] = {
- { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
- {},
-};
-MODULE_DEVICE_TABLE(of, hns_roce_of_match);
-
-static const struct acpi_device_id hns_roce_acpi_match[] = {
- { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
-
-static int hns_roce_node_match(struct device *dev, void *fwnode)
-{
- return dev->fwnode == fwnode;
-}
-
-static struct
-platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
-{
- struct device *dev;
-
- /* get the 'device'corresponding to matching 'fwnode' */
- dev = bus_find_device(&platform_bus_type, NULL,
- fwnode, hns_roce_node_match);
- /* get the platform device */
- return dev ? to_platform_device(dev) : NULL;
-}
-
-static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
-{
- int i;
- int ret;
- u8 phy_port;
- int port_cnt = 0;
- struct device *dev = &hr_dev->pdev->dev;
- struct device_node *net_node;
- struct net_device *netdev = NULL;
- struct platform_device *pdev = NULL;
- struct resource *res;
-
- /* check if we are compatible with the underlying SoC */
- if (dev_of_node(dev)) {
- const struct of_device_id *of_id;
-
- of_id = of_match_node(hns_roce_of_match, dev->of_node);
- if (!of_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (struct hns_roce_hw *)of_id->data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific DT data!\n");
- return -ENXIO;
- }
- } else if (is_acpi_device_node(dev->fwnode)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
- if (!acpi_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific ACPI data!\n");
- return -ENXIO;
- }
- } else {
- dev_err(dev, "can't read compatibility data from DT or ACPI\n");
- return -ENXIO;
- }
-
- /* get the mapped register base address */
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "memory resource not found!\n");
- return -EINVAL;
- }
- hr_dev->reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(hr_dev->reg_base))
- return PTR_ERR(hr_dev->reg_base);
-
- /* read the node_guid of IB device from the DT or ACPI */
- ret = device_property_read_u8_array(dev, "node-guid",
- (u8 *)&hr_dev->ib_dev.node_guid,
- GUID_LEN);
- if (ret) {
- dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
- return ret;
- }
-
- /* get the RoCE associated ethernet ports or netdevices */
- for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
- if (dev_of_node(dev)) {
- net_node = of_parse_phandle(dev->of_node, "eth-handle",
- i);
- if (!net_node)
- continue;
- pdev = of_find_device_by_node(net_node);
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
- struct fwnode_handle *fwnode;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "eth-handle",
- i, &args);
- if (ret)
- continue;
- fwnode = acpi_fwnode_handle(args.adev);
- pdev = hns_roce_find_pdev(fwnode);
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- if (pdev) {
- netdev = platform_get_drvdata(pdev);
- phy_port = (u8)i;
- if (netdev) {
- hr_dev->iboe.netdevs[port_cnt] = netdev;
- hr_dev->iboe.phy_port[port_cnt] = phy_port;
- } else {
- dev_err(dev, "no netdev found with pdev %s\n",
- pdev->name);
- return -ENODEV;
- }
- port_cnt++;
- }
- }
-
- if (port_cnt == 0) {
- dev_err(dev, "unable to get eth-handle for available ports!\n");
- return -EINVAL;
- }
-
- hr_dev->caps.num_ports = port_cnt;
-
- /* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 1;
- hr_dev->loop_idc = 0;
-
- /* read the interrupt names from the DT or ACPI */
- ret = device_property_read_string_array(dev, "interrupt-names",
- hr_dev->irq_names,
- HNS_ROCE_MAX_IRQ_NUM);
- if (ret < 0) {
- dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
- return ret;
- }
-
- /* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
- hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
- if (hr_dev->irq[i] <= 0) {
- dev_err(dev, "platform get of irq[=%d] failed!\n", i);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{
int ret;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
@@ -707,6 +560,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
return ret;
}
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table,
+ HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
+ hr_dev->caps.num_cqe_segs, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
+ goto err_unmap_cqe;
+ }
+ }
+
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1);
@@ -733,16 +597,35 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
goto err_unmap_qp;
}
+ if (hr_dev->caps.trrl_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table,
+ HEM_TYPE_TRRL,
+ hr_dev->caps.trrl_entry_sz *
+ hr_dev->caps.max_qp_dest_rdma,
+ hr_dev->caps.num_qps, 1);
+ if (ret) {
+ dev_err(dev,
+ "Failed to init trrl_table memory, aborting.\n");
+ goto err_unmap_irrl;
+ }
+ }
+
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
hr_dev->caps.num_cqs, 1);
if (ret) {
dev_err(dev, "Failed to init CQ context memory, aborting.\n");
- goto err_unmap_irrl;
+ goto err_unmap_trrl;
}
return 0;
+err_unmap_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+
err_unmap_irrl:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
@@ -754,6 +637,12 @@ err_unmap_dmpt:
err_unmap_mtt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table);
+
+err_unmap_cqe:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
return ret;
}
@@ -766,7 +655,7 @@ err_unmap_mtt:
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
int ret;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
@@ -826,56 +715,45 @@ err_uar_table_free:
return ret;
}
-/**
- * hns_roce_probe - RoCE driver entrance
- * @pdev: pointer to platform device
- * Return : int
- *
- */
-static int hns_roce_probe(struct platform_device *pdev)
+int hns_roce_init(struct hns_roce_dev *hr_dev)
{
int ret;
- struct hns_roce_dev *hr_dev;
- struct device *dev = &pdev->dev;
-
- hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
- if (!hr_dev)
- return -ENOMEM;
-
- hr_dev->pdev = pdev;
- platform_set_drvdata(pdev, hr_dev);
+ struct device *dev = hr_dev->dev;
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
- dev_err(dev, "Not usable DMA addressing mode\n");
- ret = -EIO;
- goto error_failed_get_cfg;
+ if (hr_dev->hw->reset) {
+ ret = hr_dev->hw->reset(hr_dev, true);
+ if (ret) {
+ dev_err(dev, "Reset RoCE engine failed!\n");
+ return ret;
+ }
}
- ret = hns_roce_get_cfg(hr_dev);
- if (ret) {
- dev_err(dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
+ if (hr_dev->hw->cmq_init) {
+ ret = hr_dev->hw->cmq_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "Init RoCE Command Queue failed!\n");
+ goto error_failed_cmq_init;
+ }
}
- ret = hr_dev->hw->reset(hr_dev, true);
+ ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) {
- dev_err(dev, "Reset RoCE engine failed!\n");
- goto error_failed_get_cfg;
+ dev_err(dev, "Get RoCE engine profile failed!\n");
+ goto error_failed_cmd_init;
}
- hr_dev->hw->hw_profile(hr_dev);
-
ret = hns_roce_cmd_init(hr_dev);
if (ret) {
dev_err(dev, "cmd init failed!\n");
goto error_failed_cmd_init;
}
- ret = hns_roce_init_eq_table(hr_dev);
- if (ret) {
- dev_err(dev, "eq init failed!\n");
- goto error_failed_eq_table;
+ if (hr_dev->cmd_mod) {
+ ret = hns_roce_init_eq_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
+ }
}
if (hr_dev->cmd_mod) {
@@ -898,10 +776,12 @@ static int hns_roce_probe(struct platform_device *pdev)
goto error_failed_setup_hca;
}
- ret = hr_dev->hw->hw_init(hr_dev);
- if (ret) {
- dev_err(dev, "hw_init failed!\n");
- goto error_failed_engine_init;
+ if (hr_dev->hw->hw_init) {
+ ret = hr_dev->hw->hw_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "hw_init failed!\n");
+ goto error_failed_engine_init;
+ }
}
ret = hns_roce_register_device(hr_dev);
@@ -911,7 +791,8 @@ static int hns_roce_probe(struct platform_device *pdev)
return 0;
error_failed_register_device:
- hr_dev->hw->hw_exit(hr_dev);
+ if (hr_dev->hw->hw_exit)
+ hr_dev->hw->hw_exit(hr_dev);
error_failed_engine_init:
hns_roce_cleanup_bitmap(hr_dev);
@@ -924,58 +805,47 @@ error_failed_init_hem:
hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
- hns_roce_cleanup_eq_table(hr_dev);
+ if (hr_dev->cmd_mod)
+ hns_roce_cleanup_eq_table(hr_dev);
error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev);
error_failed_cmd_init:
- ret = hr_dev->hw->reset(hr_dev, false);
- if (ret)
- dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
+ if (hr_dev->hw->cmq_exit)
+ hr_dev->hw->cmq_exit(hr_dev);
-error_failed_get_cfg:
- ib_dealloc_device(&hr_dev->ib_dev);
+error_failed_cmq_init:
+ if (hr_dev->hw->reset) {
+ ret = hr_dev->hw->reset(hr_dev, false);
+ if (ret)
+ dev_err(dev, "Dereset RoCE engine failed!\n");
+ }
return ret;
}
+EXPORT_SYMBOL_GPL(hns_roce_init);
-/**
- * hns_roce_remove - remove RoCE device
- * @pdev: pointer to platform device
- */
-static int hns_roce_remove(struct platform_device *pdev)
+void hns_roce_exit(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
-
hns_roce_unregister_device(hr_dev);
- hr_dev->hw->hw_exit(hr_dev);
+ if (hr_dev->hw->hw_exit)
+ hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev);
hns_roce_cleanup_hem(hr_dev);
if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev);
- hns_roce_cleanup_eq_table(hr_dev);
+ if (hr_dev->cmd_mod)
+ hns_roce_cleanup_eq_table(hr_dev);
hns_roce_cmd_cleanup(hr_dev);
- hr_dev->hw->reset(hr_dev, false);
-
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return 0;
+ if (hr_dev->hw->cmq_exit)
+ hr_dev->hw->cmq_exit(hr_dev);
+ if (hr_dev->hw->reset)
+ hr_dev->hw->reset(hr_dev, false);
}
-
-static struct platform_driver hns_roce_driver = {
- .probe = hns_roce_probe,
- .remove = hns_roce_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = hns_roce_of_match,
- .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
- },
-};
-
-module_platform_driver(hns_roce_driver);
+EXPORT_SYMBOL_GPL(hns_roce_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index e387360e3780..da86a8117bd5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -47,6 +47,7 @@ unsigned long key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
+EXPORT_SYMBOL_GPL(key_to_hw_index);
static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
@@ -65,6 +66,7 @@ int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
+EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
unsigned long *seg)
@@ -175,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
}
static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
- unsigned long *seg)
+ unsigned long *seg, u32 mtt_type)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- int ret = 0;
+ struct hns_roce_hem_table *table;
+ struct hns_roce_buddy *buddy;
+ int ret;
+
+ if (mtt_type == MTT_TYPE_WQE) {
+ buddy = &mr_table->mtt_buddy;
+ table = &mr_table->mtt_table;
+ } else {
+ buddy = &mr_table->mtt_cqe_buddy;
+ table = &mr_table->mtt_cqe_table;
+ }
- ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
+ ret = hns_roce_buddy_alloc(buddy, order, seg);
if (ret == -1)
return -1;
- if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
+ if (hns_roce_table_get_range(hr_dev, table, *seg,
*seg + (1 << order) - 1)) {
- hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
+ hns_roce_buddy_free(buddy, *seg, order);
return -1;
}
@@ -196,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
struct hns_roce_mtt *mtt)
{
- int ret = 0;
+ int ret;
int i;
/* Page num is zero, correspond to DMA memory register */
@@ -215,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
++mtt->order;
/* Allocate MTT entry */
- ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
+ ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
+ mtt->mtt_type);
if (ret == -1)
return -ENOMEM;
@@ -229,18 +242,261 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
if (mtt->order < 0)
return;
- hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ if (mtt->mtt_type == MTT_TYPE_WQE) {
+ hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
+ mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
+ mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+ } else {
+ hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
+ mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
+ mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+ }
+}
+EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
+
+static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int err_loop_index,
+ int loop_i, int loop_j)
+{
+ struct device *dev = hr_dev->dev;
+ u32 mhop_num;
+ u32 pbl_bt_sz;
+ u64 bt_idx;
+ int i, j;
+
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+ mhop_num = hr_dev->caps.pbl_hop_num;
+
+ i = loop_i;
+ if (mhop_num == 3 && err_loop_index == 2) {
+ for (; i >= 0; i--) {
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ if (i == loop_i && j >= loop_j)
+ break;
+
+ bt_idx = i * pbl_bt_sz / 8 + j;
+ dma_free_coherent(dev, pbl_bt_sz,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+ }
+ }
+ } else if (mhop_num == 3 && err_loop_index == 1) {
+ for (i -= 1; i >= 0; i--) {
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ bt_idx = i * pbl_bt_sz / 8 + j;
+ dma_free_coherent(dev, pbl_bt_sz,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+ }
+ }
+ } else if (mhop_num == 2 && err_loop_index == 1) {
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+ } else {
+ dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
+ mhop_num, err_loop_index);
+ return;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
+ mr->pbl_bt_l0 = NULL;
+ mr->pbl_l0_dma_addr = 0;
+}
+
+/* PBL multi hop addressing */
+static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
+ struct hns_roce_mr *mr)
+{
+ struct device *dev = hr_dev->dev;
+ int mr_alloc_done = 0;
+ int npages_allocated;
+ int i = 0, j = 0;
+ u32 pbl_bt_sz;
+ u32 mhop_num;
+ u64 pbl_last_bt_num;
+ u64 pbl_bt_cnt = 0;
+ u64 bt_idx;
+ u64 size;
+
+ mhop_num = hr_dev->caps.pbl_hop_num;
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+ pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
+
+ if (mhop_num == HNS_ROCE_HOP_NUM_0)
+ return 0;
+
+ /* hop_num = 1 */
+ if (mhop_num == 1) {
+ if (npages > pbl_bt_sz / 8) {
+ dev_err(dev, "npages %d is larger than buf_pg_sz!",
+ npages);
+ return -EINVAL;
+ }
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf)
+ return -ENOMEM;
+
+ mr->pbl_size = npages;
+ mr->pbl_ba = mr->pbl_dma_addr;
+ mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+ return 0;
+ }
+
+ mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
+ sizeof(*mr->pbl_l1_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_l1_dma_addr)
+ return -ENOMEM;
+
+ mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1)
+ goto err_kcalloc_bt_l1;
+
+ if (mhop_num == 3) {
+ mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
+ sizeof(*mr->pbl_l2_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_l2_dma_addr)
+ goto err_kcalloc_l2_dma;
+
+ mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
+ sizeof(*mr->pbl_bt_l2),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l2)
+ goto err_kcalloc_bt_l2;
+ }
+
+ /* alloc L0 BT */
+ mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
+ &(mr->pbl_l0_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l0)
+ goto err_dma_alloc_l0;
+
+ if (mhop_num == 2) {
+ /* alloc L1 BT */
+ for (i = 0; i < pbl_bt_sz / 8; i++) {
+ if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+ size = pbl_bt_sz;
+ } else {
+ npages_allocated = i * (pbl_bt_sz / 8);
+ size = (npages - npages_allocated) * 8;
+ }
+ mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
+ &(mr->pbl_l1_dma_addr[i]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1[i]) {
+ hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+ goto err_dma_alloc_l0;
+ }
+
+ *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+ pbl_bt_cnt++;
+ if (pbl_bt_cnt >= pbl_last_bt_num)
+ break;
+ }
+ } else if (mhop_num == 3) {
+ /* alloc L1, L2 BT */
+ for (i = 0; i < pbl_bt_sz / 8; i++) {
+ mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
+ &(mr->pbl_l1_dma_addr[i]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1[i]) {
+ hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+ goto err_dma_alloc_l0;
+ }
+
+ *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ bt_idx = i * pbl_bt_sz / 8 + j;
+
+ if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+ size = pbl_bt_sz;
+ } else {
+ npages_allocated = bt_idx *
+ (pbl_bt_sz / 8);
+ size = (npages - npages_allocated) * 8;
+ }
+ mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
+ dev, size,
+ &(mr->pbl_l2_dma_addr[bt_idx]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l2[bt_idx]) {
+ hns_roce_loop_free(hr_dev, mr, 2, i, j);
+ goto err_dma_alloc_l0;
+ }
+
+ *(mr->pbl_bt_l1[i] + j) =
+ mr->pbl_l2_dma_addr[bt_idx];
+
+ pbl_bt_cnt++;
+ if (pbl_bt_cnt >= pbl_last_bt_num) {
+ mr_alloc_done = 1;
+ break;
+ }
+ }
+
+ if (mr_alloc_done)
+ break;
+ }
+ }
+
+ mr->l0_chunk_last_num = i + 1;
+ if (mhop_num == 3)
+ mr->l1_chunk_last_num = j + 1;
+
+ mr->pbl_size = npages;
+ mr->pbl_ba = mr->pbl_l0_dma_addr;
+ mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+
+ return 0;
+
+err_dma_alloc_l0:
+ kfree(mr->pbl_bt_l2);
+ mr->pbl_bt_l2 = NULL;
+
+err_kcalloc_bt_l2:
+ kfree(mr->pbl_l2_dma_addr);
+ mr->pbl_l2_dma_addr = NULL;
+
+err_kcalloc_l2_dma:
+ kfree(mr->pbl_bt_l1);
+ mr->pbl_bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(mr->pbl_l1_dma_addr);
+ mr->pbl_l1_dma_addr = NULL;
+
+ return -ENOMEM;
}
static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
u64 size, u32 access, int npages,
struct hns_roce_mr *mr)
{
+ struct device *dev = hr_dev->dev;
unsigned long index = 0;
int ret = 0;
- struct device *dev = &hr_dev->pdev->dev;
/* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
@@ -258,22 +514,117 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0;
+ /* PBL multi-hop addressing parameters */
+ mr->pbl_bt_l2 = NULL;
+ mr->pbl_bt_l1 = NULL;
+ mr->pbl_bt_l0 = NULL;
+ mr->pbl_l2_dma_addr = NULL;
+ mr->pbl_l1_dma_addr = NULL;
+ mr->pbl_l0_dma_addr = 0;
} else {
mr->type = MR_TYPE_MR;
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
- return -ENOMEM;
+ if (!hr_dev->caps.pbl_hop_num) {
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf)
+ return -ENOMEM;
+ } else {
+ ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+ }
}
- return 0;
+ return ret;
+}
+
+static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
+{
+ struct device *dev = hr_dev->dev;
+ int npages_allocated;
+ int npages;
+ int i, j;
+ u32 pbl_bt_sz;
+ u32 mhop_num;
+ u64 bt_idx;
+
+ npages = ib_umem_page_count(mr->umem);
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+ mhop_num = hr_dev->caps.pbl_hop_num;
+
+ if (mhop_num == HNS_ROCE_HOP_NUM_0)
+ return;
+
+ /* hop_num = 1 */
+ if (mhop_num == 1) {
+ dma_free_coherent(dev, (unsigned int)(npages * 8),
+ mr->pbl_buf, mr->pbl_dma_addr);
+ return;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
+ mr->pbl_l0_dma_addr);
+
+ if (mhop_num == 2) {
+ for (i = 0; i < mr->l0_chunk_last_num; i++) {
+ if (i == mr->l0_chunk_last_num - 1) {
+ npages_allocated = i * (pbl_bt_sz / 8);
+
+ dma_free_coherent(dev,
+ (npages - npages_allocated) * 8,
+ mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ break;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+ }
+ } else if (mhop_num == 3) {
+ for (i = 0; i < mr->l0_chunk_last_num; i++) {
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ bt_idx = i * (pbl_bt_sz / 8) + j;
+
+ if ((i == mr->l0_chunk_last_num - 1)
+ && j == mr->l1_chunk_last_num - 1) {
+ npages_allocated = bt_idx *
+ (pbl_bt_sz / 8);
+
+ dma_free_coherent(dev,
+ (npages - npages_allocated) * 8,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+
+ break;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+ }
+ }
+ }
+
+ kfree(mr->pbl_bt_l1);
+ kfree(mr->pbl_l1_dma_addr);
+ mr->pbl_bt_l1 = NULL;
+ mr->pbl_l1_dma_addr = NULL;
+ if (mhop_num == 3) {
+ kfree(mr->pbl_bt_l2);
+ kfree(mr->pbl_l2_dma_addr);
+ mr->pbl_bt_l2 = NULL;
+ mr->pbl_l2_dma_addr = NULL;
+ }
}
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int npages = 0;
int ret;
@@ -286,10 +637,18 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
- dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
- mr->pbl_dma_addr);
+
+ if (!hr_dev->caps.pbl_hop_num)
+ dma_free_coherent(dev, (unsigned int)(npages * 8),
+ mr->pbl_buf, mr->pbl_dma_addr);
+ else
+ hns_roce_mhop_free(hr_dev, mr);
}
+ if (mr->enabled)
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
+ key_to_hw_index(mr->key));
+
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mr->key), BITMAP_NO_RR);
}
@@ -299,7 +658,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
{
int ret;
unsigned long mtpt_idx = key_to_hw_index(mr->key);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
@@ -345,28 +704,44 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, u32 start_index,
u32 npages, u64 *page_list)
{
- u32 i = 0;
- __le64 *mtts = NULL;
+ struct hns_roce_hem_table *table;
dma_addr_t dma_handle;
+ __le64 *mtts;
u32 s = start_index * sizeof(u64);
+ u32 bt_page_size;
+ u32 i;
+
+ if (mtt->mtt_type == MTT_TYPE_WQE)
+ bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
+ else
+ bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
/* All MTTs must fit in the same page */
- if (start_index / (PAGE_SIZE / sizeof(u64)) !=
- (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
+ if (start_index / (bt_page_size / sizeof(u64)) !=
+ (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
return -EINVAL;
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
return -EINVAL;
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ if (mtt->mtt_type == MTT_TYPE_WQE)
+ table = &hr_dev->mr_table.mtt_table;
+ else
+ table = &hr_dev->mr_table.mtt_cqe_table;
+
+ mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
&dma_handle);
if (!mtts)
return -ENOMEM;
/* Save page addr, low 12 bits : 0 */
- for (i = 0; i < npages; ++i)
- mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ if (!hr_dev->caps.mtt_hop_num)
+ mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
+ else
+ mtts[i] = cpu_to_le64(page_list[i]);
+ }
return 0;
}
@@ -377,12 +752,18 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
{
int chunk;
int ret;
+ u32 bt_page_size;
if (mtt->order < 0)
return -EINVAL;
+ if (mtt->mtt_type == MTT_TYPE_WQE)
+ bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
+ else
+ bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
+
while (npages > 0) {
- chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
+ chunk = min_t(int, bt_page_size / sizeof(u64), npages);
ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
page_list);
@@ -400,9 +781,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
{
- u32 i = 0;
- int ret = 0;
- u64 *page_list = NULL;
+ u64 *page_list;
+ int ret;
+ u32 i;
page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
@@ -425,7 +806,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- int ret = 0;
+ int ret;
ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
hr_dev->caps.num_mtpts,
@@ -439,8 +820,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
if (ret)
goto err_buddy;
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+ ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
+ ilog2(hr_dev->caps.num_cqe_segs));
+ if (ret)
+ goto err_buddy_cqe;
+ }
return 0;
+err_buddy_cqe:
+ hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+
err_buddy:
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret;
@@ -451,13 +841,15 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
}
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
{
- int ret = 0;
- struct hns_roce_mr *mr = NULL;
+ struct hns_roce_mr *mr;
+ int ret;
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (mr == NULL)
@@ -489,25 +881,44 @@ err_free:
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem)
{
+ struct device *dev = hr_dev->dev;
struct scatterlist *sg;
+ unsigned int order;
int i, k, entry;
+ int npage = 0;
int ret = 0;
+ int len;
+ u64 page_addr;
u64 *pages;
+ u32 bt_page_size;
u32 n;
- int len;
- pages = (u64 *) __get_free_page(GFP_KERNEL);
+ order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz :
+ hr_dev->caps.cqe_ba_pg_sz;
+ bt_page_size = 1 << (order + PAGE_SHIFT);
+
+ pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
if (!pages)
return -ENOMEM;
i = n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> mtt->page_shift;
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) +
- (k << umem->page_shift);
- if (i == PAGE_SIZE / sizeof(u64)) {
+ page_addr =
+ sg_dma_address(sg) + (k << umem->page_shift);
+ if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
+ if (page_addr & ((1 << mtt->page_shift) - 1)) {
+ dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
+ page_addr, mtt->page_shift);
+ ret = -EINVAL;
+ goto out;
+ }
+ pages[i++] = page_addr;
+ }
+ npage++;
+ if (i == bt_page_size / sizeof(u64)) {
ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
pages);
if (ret)
@@ -526,16 +937,44 @@ out:
return ret;
}
-static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
+static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr,
struct ib_umem *umem)
{
- int i = 0;
- int entry;
struct scatterlist *sg;
+ int i = 0, j = 0, k;
+ int entry;
+ int len;
+ u64 page_addr;
+ u32 pbl_bt_sz;
+
+ if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
+ return 0;
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
- i++;
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (k = 0; k < len; ++k) {
+ page_addr = sg_dma_address(sg) +
+ (k << umem->page_shift);
+
+ if (!hr_dev->caps.pbl_hop_num) {
+ mr->pbl_buf[i++] = page_addr >> 12;
+ } else if (hr_dev->caps.pbl_hop_num == 1) {
+ mr->pbl_buf[i++] = page_addr;
+ } else {
+ if (hr_dev->caps.pbl_hop_num == 2)
+ mr->pbl_bt_l1[i][j] = page_addr;
+ else if (hr_dev->caps.pbl_hop_num == 3)
+ mr->pbl_bt_l2[i][j] = page_addr;
+
+ j++;
+ if (j >= (pbl_bt_sz / 8)) {
+ i++;
+ j = 0;
+ }
+ }
+ }
}
/* Memory barrier */
@@ -549,10 +988,12 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_mr *mr = NULL;
- int ret = 0;
- int n = 0;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_mr *mr;
+ int bt_size;
+ int ret;
+ int n;
+ int i;
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -573,11 +1014,27 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_umem;
}
- if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
- dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
- length);
- ret = -EINVAL;
- goto err_umem;
+ if (!hr_dev->caps.pbl_hop_num) {
+ if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
+ dev_err(dev,
+ " MR len %lld err. MR is limited to 4G at most!\n",
+ length);
+ ret = -EINVAL;
+ goto err_umem;
+ }
+ } else {
+ int pbl_size = 1;
+
+ bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
+ for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
+ pbl_size *= bt_size;
+ if (n > pbl_size) {
+ dev_err(dev,
+ " MR len %lld err. MR page num is limited to %d!\n",
+ length, pbl_size);
+ ret = -EINVAL;
+ goto err_umem;
+ }
}
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
@@ -585,7 +1042,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (ret)
goto err_umem;
- ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
+ ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret)
goto err_mr;
@@ -608,6 +1065,129 @@ err_free:
return ERR_PTR(ret);
}
+int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ unsigned long mtpt_idx;
+ u32 pdn = 0;
+ int npages;
+ int ret;
+
+ if (!mr->enabled)
+ return -EINVAL;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
+ HNS_ROCE_CMD_QUERY_MPT,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret)
+ goto free_cmd_mbox;
+
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
+ if (ret)
+ dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+
+ mr->enabled = 0;
+
+ if (flags & IB_MR_REREG_PD)
+ pdn = to_hr_pd(pd)->pdn;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num)
+ hns_roce_mhop_free(hr_dev, mr);
+ else
+ dma_free_coherent(dev, npages * 8, mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+ ib_umem_release(mr->umem);
+
+ mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(mr->umem)) {
+ ret = PTR_ERR(mr->umem);
+ mr->umem = NULL;
+ goto free_cmd_mbox;
+ }
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num) {
+ ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+ if (ret)
+ goto release_umem;
+ } else {
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf) {
+ ret = -ENOMEM;
+ goto release_umem;
+ }
+ }
+ }
+
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+ mr_access_flags, virt_addr,
+ length, mailbox->buf);
+ if (ret) {
+ if (flags & IB_MR_REREG_TRANS)
+ goto release_umem;
+ else
+ goto free_cmd_mbox;
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+ if (ret) {
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num)
+ hns_roce_mhop_free(hr_dev, mr);
+ else
+ dma_free_coherent(dev, npages * 8,
+ mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+
+ goto release_umem;
+ }
+ }
+
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
+ if (ret) {
+ dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ goto release_umem;
+ }
+
+ mr->enabled = 1;
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->access = mr_access_flags;
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+release_umem:
+ ib_umem_release(mr->umem);
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
int hns_roce_dereg_mr(struct ib_mr *ibmr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index a64500fa1145..bdab2188c04a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -31,6 +31,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/pci.h>
#include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd;
int ret;
@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
return &pd->ibpd;
}
+EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
int hns_roce_dealloc_pd(struct ib_pd *pd)
{
@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
return 0;
}
+EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
uar->index = (uar->index - 1) %
(hr_dev->caps.phy_num_uars - 1) + 1;
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
- return -EINVAL;
+ if (!dev_is_pci(hr_dev->dev)) {
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
+ uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
+ } else {
+ uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
+ >> PAGE_SHIFT);
}
- uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index f5dd21c2d275..49586ec8126a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -44,7 +44,7 @@
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_qp *qp;
spin_lock(&qp_table->lock);
@@ -136,6 +136,7 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
return HNS_ROCE_QP_NUM_STATE;
}
}
+EXPORT_SYMBOL_GPL(to_hns_roce_state);
static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp)
@@ -153,7 +154,7 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
- dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
+ dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
goto err_put_irrl;
}
@@ -171,7 +172,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret;
if (!qpn)
@@ -193,13 +194,23 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
goto err_put_qp;
}
+ if (hr_dev->caps.trrl_entry_sz) {
+ /* Alloc memory for TRRL */
+ ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "TRRL table get failed\n");
+ goto err_put_irrl;
+ }
+ }
+
spin_lock_irq(&qp_table->lock);
ret = radix_tree_insert(&hr_dev->qp_table_tree,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(dev, "QPC radix_tree_insert failed\n");
- goto err_put_irrl;
+ goto err_put_trrl;
}
atomic_set(&hr_qp->refcount, 1);
@@ -207,6 +218,10 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
return 0;
+err_put_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+
err_put_irrl:
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
@@ -227,6 +242,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->qpn & (hr_dev->caps.num_qps - 1));
spin_unlock_irqrestore(&qp_table->lock, flags);
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
@@ -237,10 +253,14 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
wait_for_completion(&hr_qp->free);
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_free);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt)
@@ -252,13 +272,14 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
}
+EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, int is_user, int has_srq,
struct hns_roce_qp *hr_qp)
{
+ struct device *dev = hr_dev->dev;
u32 max_cnt;
- struct device *dev = &hr_dev->pdev->dev;
/* Check the validity of QP support capacity */
if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
@@ -282,20 +303,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- /* In v1 engine, parameter verification procession */
- max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
- cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
+ if (hr_dev->caps.min_wqes)
+ max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
+ else
+ max_cnt = cap->max_recv_wr;
+
hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
+ dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
return -EINVAL;
}
max_cnt = max(1U, cap->max_recv_sge);
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
- /* WQE is fixed for 64B */
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+ if (hr_dev->caps.max_rq_sg <= 2)
+ hr_qp->rq.wqe_shift =
+ ilog2(hr_dev->caps.max_rq_desc_sz);
+ else
+ hr_qp->rq.wqe_shift =
+ ilog2(hr_dev->caps.max_rq_desc_sz
+ * hr_qp->rq.max_gs);
}
cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
@@ -305,32 +333,79 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
}
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride);
+ u32 page_size;
+ u32 max_cnt;
/* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
- dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
+ dev_err(hr_dev->dev, "check SQ size error!\n");
+ return -EINVAL;
+ }
+
+ if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
+ dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
+ cap->max_send_sge);
return -EINVAL;
}
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ max_cnt = max(1U, cap->max_send_sge);
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+ else
+ hr_qp->sq.max_gs = max_cnt;
+
+ if (hr_qp->sq.max_gs > 2)
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ (hr_qp->sq.max_gs - 2));
+ hr_qp->sge.sge_shift = 4;
+
/* Get buf size, SQ and RQ are aligned to page_szie */
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ if (hr_dev->caps.max_sq_sg <= 2) {
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
- hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.offset = 0;
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
+ } else {
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->rq.wqe_shift), page_size) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift), page_size) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), page_size);
+
+ hr_qp->sq.offset = 0;
+ if (hr_qp->sge.sge_cnt) {
+ hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
+ (hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
+ hr_qp->rq.offset = hr_qp->sge.offset +
+ HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift),
+ page_size);
+ } else {
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
+ (hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
+ }
+ }
return 0;
}
@@ -339,13 +414,15 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
+ u32 page_size;
u32 max_cnt;
+ int size;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
- dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
+ dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
@@ -353,27 +430,46 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0;
- /* In v1 engine, parameter verification procession */
- max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
- cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
+ if (hr_dev->caps.min_wqes)
+ max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
+ else
+ max_cnt = cap->max_send_wr;
+
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
+ dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+ else
+ hr_qp->sq.max_gs = max_cnt;
- /* Get buf size, SQ and RQ are aligned to page_szie */
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
+ if (hr_qp->sq.max_gs > 2) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ (hr_qp->sq.max_gs - 2));
+ hr_qp->sge.sge_shift = 4;
+ }
+
+ /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
+ size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
+ page_size);
+
+ if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+ hr_qp->sge.offset = size;
+ size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift, page_size);
+ }
+
+ hr_qp->rq.offset = size;
+ size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
+ page_size);
+ hr_qp->buff_size = size;
/* Get wr and sge number which send */
cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
@@ -391,10 +487,12 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_udata *udata, unsigned long sqpn,
struct hns_roce_qp *hr_qp)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd;
unsigned long qpn = 0;
int ret = 0;
+ u32 page_shift;
+ u32 npages;
mutex_init(&hr_qp->mutex);
spin_lock_init(&hr_qp->sq.lock);
@@ -421,7 +519,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
- ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
+ ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
+ &ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
goto err_out;
@@ -436,8 +535,21 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
- hr_qp->umem->page_shift, &hr_qp->mtt);
+ hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
+ if (hr_dev->caps.mtt_buf_pg_sz) {
+ npages = (ib_umem_page_count(hr_qp->umem) +
+ (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.mtt_buf_pg_sz);
+ page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, npages,
+ page_shift,
+ &hr_qp->mtt);
+ } else {
+ ret = hns_roce_mtt_init(hr_dev,
+ ib_umem_page_count(hr_qp->umem),
+ hr_qp->umem->page_shift,
+ &hr_qp->mtt);
+ }
if (ret) {
dev_err(dev, "hns_roce_mtt_init error for create qp\n");
goto err_buf;
@@ -472,20 +584,22 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
/* QP doorbell register address */
- hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
+ hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
- hr_qp->rq.db_reg_l = hr_dev->reg_base +
- ROCEE_DB_OTHERS_L_0_REG +
+ hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
/* Allocate QP buf */
- if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
- &hr_qp->hr_buf)) {
+ page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
+ (1 << page_shift) * 2,
+ &hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
goto err_out;
}
+ hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
/* Write MTT */
ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
hr_qp->hr_buf.page_shift, &hr_qp->mtt);
@@ -522,7 +636,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
}
- if ((init_attr->qp_type) == IB_QPT_GSI) {
+ if (init_attr->qp_type == IB_QPT_GSI &&
+ hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ /* In v1 engine, GSI QP context in RoCE engine's register */
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) {
dev_err(dev, "hns_roce_qp_alloc failed!\n");
@@ -571,7 +687,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
@@ -629,6 +745,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return &hr_qp->ibqp;
}
+EXPORT_SYMBOL_GPL(hns_roce_create_qp);
int to_hr_qp_type(int qp_type)
{
@@ -647,6 +764,7 @@ int to_hr_qp_type(int qp_type)
return transport_type;
}
+EXPORT_SYMBOL_GPL(to_hr_qp_type);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
@@ -654,7 +772,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret = -EINVAL;
int p;
enum ib_mtu active_mtu;
@@ -692,7 +810,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
- if (attr->path_mtu > IB_MTU_2048 ||
+ if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
+ attr->path_mtu > IB_MTU_4096) ||
+ (hr_dev->caps.max_mtu == IB_MTU_2048 &&
+ attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
@@ -716,9 +837,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- ret = -EPERM;
- dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
- new_state);
+ ret = 0;
goto out;
}
@@ -745,6 +864,7 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
@@ -761,6 +881,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
spin_unlock_irq(&recv_cq->lock);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
__be32 send_ieth(struct ib_send_wr *wr)
{
@@ -774,6 +895,7 @@ __be32 send_ieth(struct ib_send_wr *wr)
return 0;
}
}
+EXPORT_SYMBOL_GPL(send_ieth);
static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
{
@@ -785,11 +907,20 @@ void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
+EXPORT_SYMBOL_GPL(get_recv_wqe);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
+EXPORT_SYMBOL_GPL(get_send_wqe);
+
+void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
+{
+ return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
+ (n << hr_qp->sge.sge_shift));
+}
+EXPORT_SYMBOL_GPL(get_send_extend_sge);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq)
@@ -808,6 +939,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
return cur + nreq >= hr_wq->max_post;
}
+EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
@@ -823,7 +955,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_qps - 1, SQP_NUM,
reserved_from_top);
if (ret) {
- dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
+ dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
ret);
return ret;
}
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index 6e7d27a14061..f6d20ba88c03 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
+ depends on PCI
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index a65e4cbdce2f..4ae9131b6350 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -119,9 +119,6 @@
#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
-#define I40IW_MTU_TO_MSS 40
-#define I40IW_DEFAULT_MSS 1460
-
struct i40iw_cqp_compl_info {
u32 op_ret_val;
u16 maj_err_code;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 5230dd3c938c..493d6ef3d2d5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1188,7 +1188,7 @@ static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node
* i40iw_cm_timer_tick - system's timer expired callback
* @pass: Pointing to cm_core
*/
-static void i40iw_cm_timer_tick(unsigned long pass)
+static void i40iw_cm_timer_tick(struct timer_list *t)
{
unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
struct i40iw_cm_node *cm_node;
@@ -1196,10 +1196,9 @@ static void i40iw_cm_timer_tick(unsigned long pass)
struct list_head *list_core_temp;
struct i40iw_sc_vsi *vsi;
struct list_head *list_node;
- struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
+ struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
u32 settimer = 0;
unsigned long timetosend;
- struct i40iw_sc_dev *dev;
unsigned long flags;
struct list_head timer_list;
@@ -1267,13 +1266,15 @@ static void i40iw_cm_timer_tick(unsigned long pass)
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
goto done;
}
- cm_node->cm_core->stats_pkt_retrans++;
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
vsi = &cm_node->iwdev->vsi;
- dev = cm_node->dev;
- atomic_inc(&send_entry->sqbuf->refcount);
- i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+
+ if (!cm_node->ack_rcvd) {
+ atomic_inc(&send_entry->sqbuf->refcount);
+ i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ cm_node->cm_core->stats_pkt_retrans++;
+ }
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (send_entry->send_retrans) {
send_entry->retranscount--;
@@ -1524,8 +1525,8 @@ static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool acti
break;
}
}
- if (!ret)
- clear_bit(port, cm_core->active_side_ports);
+ if (!ret)
+ clear_bit(port, cm_core->active_side_ports);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
} else {
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
@@ -2181,6 +2182,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
cm_node->cm_id = cm_info->cm_id;
ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
spin_lock_init(&cm_node->retrans_list_lock);
+ cm_node->ack_rcvd = false;
atomic_set(&cm_node->ref_count, 1);
/* associate our parent CM core */
@@ -2191,7 +2193,8 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
- cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
+ cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4) :
+ (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6);
cm_node->iwdev = iwdev;
cm_node->dev = &iwdev->sc_dev;
@@ -2406,6 +2409,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
case I40IW_CM_STATE_FIN_WAIT1:
case I40IW_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
+ /* fall through */
case I40IW_CM_STATE_TIME_WAIT:
cm_node->state = I40IW_CM_STATE_CLOSED;
i40iw_rem_ref_cm_node(cm_node);
@@ -2719,7 +2723,10 @@ static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->ack_rcvd = false;
i40iw_handle_rcv_mpa(cm_node, rbuf);
+ } else {
+ cm_node->ack_rcvd = true;
}
break;
case I40IW_CM_STATE_LISTENING:
@@ -3195,8 +3202,7 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
INIT_LIST_HEAD(&cm_core->connected_nodes);
INIT_LIST_HEAD(&cm_core->listen_nodes);
- setup_timer(&cm_core->tcp_timer, i40iw_cm_timer_tick,
- (unsigned long)cm_core);
+ timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 45abef76295b..0d5840d2c4fc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -360,6 +360,7 @@ struct i40iw_cm_node {
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
struct i40iw_kmem_info mpa_hdr;
+ bool ack_rcvd;
};
/* structure for client or CM to fill when making CM api calls. */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 42ca5346777d..d88c6cf47cf2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -348,7 +348,10 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
u16 qs_handle;
int i;
- vsi->mss = l2params->mss;
+ if (vsi->mtu != l2params->mtu) {
+ vsi->mtu = l2params->mtu;
+ i40iw_reinitialize_ieq(dev);
+ }
i40iw_fill_qos_list(l2params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -374,7 +377,7 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
* i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
* @qp: qp to be removed from qos
*/
-static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
{
struct i40iw_sc_vsi *vsi = qp->vsi;
unsigned long flags;
@@ -479,6 +482,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */
+
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
@@ -1774,6 +1781,53 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
+
+ switch (info->ae_id) {
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case I40IW_AE_BAD_CLOSE:
+ case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case I40IW_AE_STAG_ZERO_INVALID:
+ case I40IW_AE_IB_RREQ_AND_Q1_FULL:
+ case I40IW_AE_WQE_UNEXPECTED_OPCODE:
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ case I40IW_AE_DDP_NO_L_BIT:
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case I40IW_AE_INVALID_ARP_ENTRY:
+ case I40IW_AE_INVALID_TCP_OPTION_RCVD:
+ case I40IW_AE_STALE_ARP_ENTRY:
+ case I40IW_AE_LLP_CLOSE_COMPLETE:
+ case I40IW_AE_LLP_CONNECTION_RESET:
+ case I40IW_AE_LLP_FIN_RECEIVED:
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ case I40IW_AE_LLP_SYN_RECEIVED:
+ case I40IW_AE_LLP_TERMINATE_RECEIVED:
+ case I40IW_AE_LLP_TOO_MANY_RETRIES:
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ case I40IW_AE_RESET_SENT:
+ case I40IW_AE_TERMINATE_SENT:
+ case I40IW_AE_RESET_NOT_SENT:
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ case I40IW_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ }
+
switch (ae_src) {
case I40IW_AE_SOURCE_RQ:
case I40IW_AE_SOURCE_RQ_0011:
@@ -1807,6 +1861,8 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
info->out_rdrsp = true;
break;
+ case I40IW_AE_SOURCE_RSVD:
+ /* fallthrough */
default:
break;
}
@@ -2357,7 +2413,6 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
- qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
return 0;
}
@@ -2399,7 +2454,6 @@ static enum i40iw_status_code i40iw_sc_qp_create(
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
@@ -2462,7 +2516,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
@@ -2694,7 +2747,7 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
- LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
+ LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
if (info->iwarp_info_valid) {
qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
@@ -4376,10 +4429,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
break;
- case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
- (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
- break;
case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
@@ -4395,7 +4444,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
(LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
break;
case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
- case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
break;
@@ -4541,7 +4589,8 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
vsi->dev = info->dev;
vsi->back_vsi = info->back_vsi;
- vsi->mss = info->params->mss;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_queue = info->exception_lan_queue;
i40iw_fill_qos_list(info->params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -4873,6 +4922,7 @@ enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40
vsi->pestat = info->pestat;
vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
if (info->stats_initialize) {
i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
@@ -5018,14 +5068,12 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
u8 db_size;
spin_lock_init(&dev->cqp_lock);
- INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
i40iw_device_init_uk(&dev->dev_uk);
dev->debug_mask = info->debug_mask;
dev->hmc_fn_id = info->hmc_fn_id;
- dev->exception_lan_queue = info->exception_lan_queue;
dev->is_pf = info->is_pf;
dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 2ebaadbed379..65ec39e3746b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -73,6 +73,10 @@
#define I40IW_FIRST_NON_PF_STAT 4
+#define I40IW_MTU_TO_MSS_IPV4 40
+#define I40IW_MTU_TO_MSS_IPV6 60
+#define I40IW_DEFAULT_MTU 1500
+
#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
#define LS_32_1(val, bits) (u32)(val << bits)
@@ -128,6 +132,7 @@
&_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \
)
+#define I40IW_AE_SOURCE_RSVD 0x0
#define I40IW_AE_SOURCE_RQ 0x1
#define I40IW_AE_SOURCE_RQ_0011 0x3
@@ -539,9 +544,6 @@
#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52
#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)
-#define I40IW_CQPSQ_QP_STATRSRC_SHIFT 53
-#define I40IW_CQPSQ_QP_STATRSRC_MASK (1ULL << I40IW_CQPSQ_QP_STATRSRC_SHIFT)
-
#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54
#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \
(1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)
@@ -1105,6 +1107,9 @@
#define I40IWQPC_SNDMSS_SHIFT 16
#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)
+#define I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT 16
+#define I40IW_UDA_QPC_MAXFRAMESIZE_MASK (0x3fffUL << I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT)
+
#define I40IWQPC_VLANTAG_SHIFT 32
#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
@@ -1296,8 +1301,13 @@
(0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)
/* wqe size considering 32 bytes per wqe*/
-#define I40IWQP_SW_MIN_WQSIZE 4 /* 128 bytes */
-#define I40IWQP_SW_MAX_WQSIZE 2048 /* 2048 bytes */
+#define I40IW_QP_SW_MIN_WQSIZE 4 /*in WRs*/
+#define I40IW_SQ_RSVD 2
+#define I40IW_RQ_RSVD 1
+#define I40IW_MAX_QUANTAS_PER_WR 2
+#define I40IW_QP_SW_MAX_SQ_QUANTAS 2048
+#define I40IW_QP_SW_MAX_RQ_QUANTAS 16384
+#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTAS / I40IW_MAX_QUANTAS_PER_WR) - 1)
#define I40IWQP_OP_RDMA_WRITE 0
#define I40IWQP_OP_RDMA_READ 1
@@ -1636,7 +1646,8 @@ enum i40iw_alignment {
#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define I40IW_AE_AMP_WQE_INVALID_PARAMETER 0x0130
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
#define I40IW_AE_BAD_CLOSE 0x0201
#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
#define I40IW_AE_CQ_OPERATION_ERROR 0x0203
@@ -1644,12 +1655,10 @@ enum i40iw_alignment {
#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
#define I40IW_AE_STAG_ZERO_INVALID 0x0206
#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define I40IW_AE_SRQ_LIMIT 0x0209
#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a
#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b
#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220
#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID 0x0302
#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305
@@ -1663,12 +1672,10 @@ enum i40iw_alignment {
#define I40IW_AE_INVALID_ARP_ENTRY 0x0401
#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402
#define I40IW_AE_STALE_ARP_ENTRY 0x0403
-#define I40IW_AE_INVALID_WQE_LENGTH 0x0404
#define I40IW_AE_INVALID_MAC_ENTRY 0x0405
#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501
#define I40IW_AE_LLP_CONNECTION_RESET 0x0502
#define I40IW_AE_LLP_FIN_RECEIVED 0x0503
-#define I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506
#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507
@@ -1685,9 +1692,6 @@ enum i40iw_alignment {
#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700
#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define I40IW_AE_UDA_XMIT_FRAG_SEQ 0x0800
-#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0801
-#define I40IW_AE_UDA_XMIT_IPADDR_MISMATCH 0x0802
#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900
#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 476867a3f584..e96bdafbcbb3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -408,8 +408,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
ctx_info->err_rq_idx_valid = false;
+ /* fall through */
default:
if (!info->sq && ctx_info->err_rq_idx_valid) {
ctx_info->err_rq_idx = info->wqe_idx;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 27590ae21881..e824296713e2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -353,6 +353,8 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
i40iw_destroy_ceq(iwdev, iwceq);
}
+
+ iwdev->sc_dev.ceq_valid = false;
}
/**
@@ -810,17 +812,16 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
iwdev->ceqs_count++;
}
-
exit:
- if (status) {
- if (!iwdev->ceqs_count) {
- kfree(iwdev->ceqlist);
- iwdev->ceqlist = NULL;
- } else {
- status = 0;
- }
+ if (status && !iwdev->ceqs_count) {
+ kfree(iwdev->ceqlist);
+ iwdev->ceqlist = NULL;
+ return status;
+ } else {
+ iwdev->sc_dev.ceq_valid = true;
+ return 0;
}
- return status;
+
}
/**
@@ -958,13 +959,13 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
memset(&info, 0, sizeof(info));
info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
info.cq_id = 2;
- info.qp_id = iwdev->sc_dev.exception_lan_queue;
+ info.qp_id = iwdev->vsi.exception_lan_queue;
info.count = 1;
info.pd_id = 2;
info.sq_size = 8192;
info.rq_size = 8192;
- info.buf_size = 2048;
- info.tx_buf_cnt = 16384;
+ info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
+ info.tx_buf_cnt = 4096;
status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
i40iw_pr_err("ieq create fail\n");
@@ -972,6 +973,21 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
}
/**
+ * i40iw_reinitialize_ieq - destroy and re-create ieq
+ * @dev: iwarp device
+ */
+void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
+ if (i40iw_initialize_ieq(iwdev)) {
+ iwdev->reset = true;
+ i40iw_request_reset(iwdev);
+ }
+}
+
+/**
* i40iw_hmc_setup - create hmc objects for the device
* @iwdev: iwarp device
*
@@ -1327,8 +1343,8 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
info.bar0 = ldev->hw_addr;
info.hw = &iwdev->hw;
info.debug_mask = debug;
- l2params.mss =
- (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
+ l2params.mtu =
+ (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
qset = ldev->params.qos.prio_qos[i].qs_handle;
l2params.qs_handle_list[i] = qset;
@@ -1338,7 +1354,6 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
iwdev->dcb = true;
}
i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
- info.exception_lan_queue = 1;
info.vchnl_send = i40iw_virtchnl_send;
status = i40iw_device_init(&iwdev->sc_dev, &info);
@@ -1348,6 +1363,7 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
vsi_info.dev = &iwdev->sc_dev;
vsi_info.back_vsi = (void *)iwdev;
vsi_info.params = &l2params;
+ vsi_info.exception_lan_queue = 1;
i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
if (dev->is_pf) {
@@ -1748,7 +1764,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
- l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
+ l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
INIT_WORK(&work->work, i40iw_l2params_worker);
queue_work(iwdev->param_wq, &work->work);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index 5498ad01c280..11d3a2a72100 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -86,7 +86,7 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
-
+void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
@@ -123,5 +123,6 @@ enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
struct i40iw_virt_mem *mem);
u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
+void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 59f70676f0e0..796a815b53fd 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -488,7 +488,7 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
- set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
+ set_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE));
set_64bit_val(qp_ctx, 56, 0);
set_64bit_val(qp_ctx, 64, 1);
@@ -611,12 +611,14 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
qp->user_pri = 0;
i40iw_qp_add_qos(qp);
i40iw_puda_qp_setctx(rsrc);
- if (rsrc->ceq_valid)
+ if (rsrc->dev->ceq_valid)
ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
else
ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
- if (ret)
+ if (ret) {
+ i40iw_qp_rem_qos(qp);
i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
+ }
return ret;
}
@@ -704,7 +706,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
if (ret)
goto error;
- if (rsrc->ceq_valid)
+ if (rsrc->dev->ceq_valid)
ret = i40iw_cqp_cq_create_cmd(dev, cq);
else
ret = i40iw_puda_cq_wqe(dev, cq);
@@ -724,7 +726,7 @@ static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
struct i40iw_ccq_cqe_info compl_info;
struct i40iw_sc_dev *dev = rsrc->dev;
- if (rsrc->ceq_valid) {
+ if (rsrc->dev->ceq_valid) {
i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
return;
}
@@ -757,7 +759,7 @@ static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
struct i40iw_ccq_cqe_info compl_info;
struct i40iw_sc_dev *dev = rsrc->dev;
- if (rsrc->ceq_valid) {
+ if (rsrc->dev->ceq_valid) {
i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
return;
}
@@ -813,6 +815,7 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
switch (rsrc->completion) {
case PUDA_HASH_CRC_COMPLETE:
i40iw_free_hash_desc(rsrc->hash_desc);
+ /* fall through */
case PUDA_QP_CREATED:
if (!reset)
i40iw_puda_free_qp(rsrc);
@@ -921,7 +924,6 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
rsrc->xmit_complete = i40iw_ieq_tx_compl;
}
- rsrc->ceq_valid = info->ceq_valid;
rsrc->type = info->type;
rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
@@ -1400,7 +1402,8 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
pfpdu->rcv_nxt = fps;
pfpdu->fps = fps;
pfpdu->mode = true;
- pfpdu->max_fpdu_data = ieq->vsi->mss;
+ pfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV4) :
+ (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV6);
pfpdu->pmode_count++;
INIT_LIST_HEAD(rxlist);
i40iw_ieq_check_first_buf(buf, fps);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index dba05ce7d392..660aa3edae56 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -100,7 +100,6 @@ struct i40iw_puda_rsrc_info {
enum puda_resource_type type; /* ILQ or IEQ */
u32 count;
u16 pd_id;
- bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
@@ -125,7 +124,6 @@ struct i40iw_puda_rsrc {
enum puda_resource_type type;
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
u16 mss;
- bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 63118f6d5ab4..a27d392c92a2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -250,6 +250,7 @@ struct i40iw_vsi_pestat {
struct i40iw_dev_hw_stats last_read_hw_stats;
struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
struct timer_list stats_timer;
+ struct i40iw_sc_vsi *vsi;
spinlock_t lock; /* rdma stats lock */
};
@@ -380,7 +381,6 @@ struct i40iw_sc_qp {
u8 *q2_buf;
u64 qp_compl_ctx;
u16 qs_handle;
- u16 exception_lan_queue;
u16 push_idx;
u8 sq_tph_val;
u8 rq_tph_val;
@@ -459,7 +459,8 @@ struct i40iw_sc_vsi {
u32 ieq_count;
struct i40iw_virt_mem ieq_mem;
struct i40iw_puda_rsrc *ieq;
- u16 mss;
+ u16 exception_lan_queue;
+ u16 mtu;
u8 fcn_id;
bool stats_fcn_id_alloc;
struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
@@ -501,10 +502,10 @@ struct i40iw_sc_dev {
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
u32 debug_mask;
- u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
bool vchnl_up;
+ bool ceq_valid;
u8 vf_id;
wait_queue_head_t vf_reqs;
u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
@@ -534,7 +535,6 @@ struct i40iw_create_qp_info {
bool ord_valid;
bool tcp_ctx_valid;
bool cq_num_valid;
- bool static_rsrc;
bool arp_cache_idx_valid;
};
@@ -546,7 +546,6 @@ struct i40iw_modify_qp_info {
bool ord_valid;
bool tcp_ctx_valid;
bool cq_num_valid;
- bool static_rsrc;
bool arp_cache_idx_valid;
bool reset_tcp_conn;
bool remove_hash_idx;
@@ -568,13 +567,14 @@ struct i40iw_ccq_cqe_info {
struct i40iw_l2params {
u16 qs_handle_list[I40IW_MAX_USER_PRIORITY];
- u16 mss;
+ u16 mtu;
};
struct i40iw_vsi_init_info {
struct i40iw_sc_dev *dev;
void *back_vsi;
struct i40iw_l2params *params;
+ u16 exception_lan_queue;
};
struct i40iw_vsi_stats_info {
@@ -592,7 +592,6 @@ struct i40iw_device_init_info {
struct i40iw_hw *hw;
void __iomem *bar0;
enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
- u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
u32 debug_mask;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 0aadb7a0d1aa..3ec5389a81a1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -821,6 +821,18 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
pring = &qp->rq_ring;
} else {
+ if (qp->first_sq_wq) {
+ qp->first_sq_wq = false;
+ if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) {
+ I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ I40IW_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+ memset(info, 0, sizeof(struct i40iw_cq_poll_info));
+ return i40iw_cq_poll_completion(cq, info);
+ }
+ }
+
if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
@@ -882,8 +894,21 @@ exit:
}
/**
+ * i40iw_qp_roundup - return round up QP WQ depth
+ * @wqdepth: WQ depth in quantas to round up
+ */
+static int i40iw_qp_round_up(u32 wqdepth)
+{
+ int scount = 1;
+
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+ return ++wqdepth;
+}
+
+/**
* i40iw_get_wqe_shift - get shift count for maximum wqe size
- * @wqdepth: depth of wq required.
* @sge: Maximum Scatter Gather Elements wqe
* @inline_data: Maximum inline data size
* @shift: Returns the shift needed based on sge
@@ -893,22 +918,48 @@ exit:
* For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
* Shift of 2 otherwise (wqe size of 128 bytes).
*/
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift)
+void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
{
- u32 size;
-
*shift = 0;
if (sge > 1 || inline_data > 16)
*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
+}
- /* check if wqdepth is multiple of 2 or not */
+/*
+ * i40iw_get_sqdepth - get SQ depth (quantas)
+ * @sq_size: SQ size
+ * @shift: shift which determines size of WQE
+ * @sqdepth: depth of SQ
+ *
+ */
+enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
+{
+ *sqdepth = i40iw_qp_round_up((sq_size << shift) + I40IW_SQ_RSVD);
- if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
+ if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
+ *sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
+ else if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS)
return I40IW_ERR_INVALID_SIZE;
- size = wqdepth << *shift; /* multiple of 32 bytes count */
- if (size > I40IWQP_SW_MAX_WQSIZE)
+ return 0;
+}
+
+/*
+ * i40iw_get_rq_depth - get RQ depth (quantas)
+ * @rq_size: RQ size
+ * @shift: shift which determines size of WQE
+ * @rqdepth: depth of RQ
+ *
+ */
+enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
+{
+ *rqdepth = i40iw_qp_round_up((rq_size << shift) + I40IW_RQ_RSVD);
+
+ if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
+ *rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
+ else if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS)
return I40IW_ERR_INVALID_SIZE;
+
return 0;
}
@@ -962,9 +1013,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
return I40IW_ERR_INVALID_FRAG_COUNT;
- ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
- if (ret_code)
- return ret_code;
+ i40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -988,6 +1037,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
I40IW_RING_MOVE_TAIL(qp->sq_ring);
I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
qp->swqe_polarity = 1;
+ qp->first_sq_wq = true;
qp->swqe_polarity_deferred = 1;
qp->rwqe_polarity = 0;
@@ -997,9 +1047,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
switch (info->abi_ver) {
case 4:
- ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
- if (ret_code)
- return ret_code;
+ i40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift);
break;
case 5: /* fallthrough until next ABI version */
default:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 84be6f13b9c5..e73efc59a0ab 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -204,18 +204,6 @@ struct i40iw_post_inline_send {
u32 len;
};
-struct i40iw_post_send_w_inv {
- i40iw_sgl sg_list;
- u32 num_sges;
- i40iw_stag remote_stag_to_inv;
-};
-
-struct i40iw_post_inline_send_w_inv {
- void *data;
- u32 len;
- i40iw_stag remote_stag_to_inv;
-};
-
struct i40iw_rdma_write {
i40iw_sgl lo_sg_list;
u32 num_lo_sges;
@@ -257,9 +245,6 @@ struct i40iw_post_sq_info {
bool defer_flag;
union {
struct i40iw_post_send send;
- struct i40iw_post_send send_w_sol;
- struct i40iw_post_send_w_inv send_w_inv;
- struct i40iw_post_send_w_inv send_w_sol_inv;
struct i40iw_rdma_write rdma_write;
struct i40iw_rdma_read rdma_read;
struct i40iw_rdma_read rdma_read_inv;
@@ -267,9 +252,6 @@ struct i40iw_post_sq_info {
struct i40iw_inv_local_stag inv_local_stag;
struct i40iw_inline_rdma_write inline_rdma_write;
struct i40iw_post_inline_send inline_send;
- struct i40iw_post_inline_send inline_send_w_sol;
- struct i40iw_post_inline_send_w_inv inline_send_w_inv;
- struct i40iw_post_inline_send_w_inv inline_send_w_sol_inv;
} op;
};
@@ -376,6 +358,7 @@ struct i40iw_qp_uk {
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
+ bool first_sq_wq;
bool deferred_flag;
};
@@ -442,5 +425,7 @@ enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size);
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift);
+void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift);
+enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth);
+enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index e52dbbb4165e..8845dba7c438 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -168,11 +168,16 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
if (netdev != event_netdev)
return NOTIFY_DONE;
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
+ if (upper_dev) {
+ struct in_device *in;
+
+ rcu_read_lock();
+ in = __in_dev_get_rcu(upper_dev);
+ local_ipaddr = ntohl(in->ifa_list->ifa_address);
+ rcu_read_unlock();
+ } else {
local_ipaddr = ntohl(ifa->ifa_address);
+ }
switch (event) {
case NETDEV_DOWN:
action = I40IW_ARP_DELETE;
@@ -870,9 +875,9 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
* i40iw_terminate_imeout - timeout happened
* @context: points to iwarp qp
*/
-static void i40iw_terminate_timeout(unsigned long context)
+static void i40iw_terminate_timeout(struct timer_list *t)
{
- struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
+ struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
i40iw_terminate_done(qp, 1);
@@ -889,8 +894,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
iwqp = (struct i40iw_qp *)qp->back_qp;
i40iw_add_ref(&iwqp->ibqp);
- setup_timer(&iwqp->terminate_timer, i40iw_terminate_timeout,
- (unsigned long)iwqp);
+ timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
iwqp->terminate_timer.expires = jiffies + HZ;
add_timer(&iwqp->terminate_timer);
}
@@ -1445,11 +1449,12 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
* @vsi: pointer to the vsi structure
*/
-static void i40iw_hw_stats_timeout(unsigned long vsi)
+static void i40iw_hw_stats_timeout(struct timer_list *t)
{
- struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
+ struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
+ stats_timer);
+ struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
- struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
struct i40iw_vsi_pestat *vf_devstat = NULL;
u16 iw_vf_idx;
unsigned long flags;
@@ -1480,8 +1485,7 @@ void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
{
struct i40iw_vsi_pestat *devstat = vsi->pestat;
- setup_timer(&devstat->stats_timer, i40iw_hw_stats_timeout,
- (unsigned long)vsi);
+ timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
mod_timer(&devstat->stats_timer,
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 62be0a41ad0b..3c6f3ce88f89 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -69,7 +69,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
props->max_qp = iwdev->max_qp - iwdev->used_qps;
- props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
+ props->max_qp_wr = I40IW_MAX_QP_WRS;
props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
props->max_cq = iwdev->max_cq - iwdev->used_cqs;
props->max_cqe = iwdev->max_cqe;
@@ -381,22 +381,6 @@ static int i40iw_dealloc_pd(struct ib_pd *ibpd)
}
/**
- * i40iw_qp_roundup - return round up qp ring size
- * @wr_ring_size: ring size to round up
- */
-static int i40iw_qp_roundup(u32 wr_ring_size)
-{
- int scount = 1;
-
- if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
- wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
-
- for (wr_ring_size--; scount <= 16; scount *= 2)
- wr_ring_size |= wr_ring_size >> scount;
- return ++wr_ring_size;
-}
-
-/**
* i40iw_get_pbl - Retrieve pbl from a list given a virtual
* address
* @va: user virtual address
@@ -515,21 +499,19 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
{
struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
u32 sqdepth, rqdepth;
- u32 sq_size, rq_size;
u8 sqshift;
u32 size;
enum i40iw_status_code status;
struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
- rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
-
- status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
+ i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
+ status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
if (status)
return -ENOMEM;
- sqdepth = sq_size << sqshift;
- rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
+ status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
+ if (status)
+ return -ENOMEM;
size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
@@ -559,8 +541,8 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
- ukinfo->sq_size = sq_size;
- ukinfo->rq_size = rq_size;
+ ukinfo->sq_size = sqdepth >> sqshift;
+ ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
ukinfo->qp_id = iwqp->ibqp.qp_num;
return 0;
}
@@ -2204,6 +2186,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ukqp = &iwqp->sc_qp.qp_uk;
spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (iwqp->flush_issued) {
+ err = -EINVAL;
+ goto out;
+ }
+
while (ib_wr) {
inv_stag = false;
memset(&info, 0, sizeof(info));
@@ -2346,6 +2334,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ib_wr = ib_wr->next;
}
+out:
if (err)
*bad_wr = ib_wr;
else
@@ -2378,6 +2367,12 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
memset(&post_recv, 0, sizeof(post_recv));
spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (iwqp->flush_issued) {
+ err = -EINVAL;
+ goto out;
+ }
+
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 538c46a73248..6dee4fdc5d67 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -92,12 +92,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
int ret;
memcpy(&in6, grh->dgid.raw, sizeof(in6));
- if (rdma_is_multicast_addr(&in6)) {
+ if (rdma_is_multicast_addr(&in6))
is_mcast = 1;
- rdma_get_mcast_mac(&in6, ah->av.eth.mac);
- } else {
- memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
- }
+
+ memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr),
grh->sgid_index, &sgid, &gid_attr);
if (ret)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cab796341697..bf4f14a1b4fc 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -140,14 +140,18 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
{
int err;
int cqe_size = dev->dev->caps.cqe_size;
+ int shift;
+ int n;
*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
- (*umem)->page_shift, &buf->mtt);
+ n = ib_umem_page_count(*umem);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
+ err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+
if (err)
goto err_buf;
@@ -768,11 +772,13 @@ repoll:
switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
case MLX4_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX4_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX4_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX4_OPCODE_SEND:
case MLX4_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c636842c5be0..8c8a16791a3f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -563,6 +563,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_wq_type_rq = props->max_qp;
}
+ props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
+
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@@ -581,6 +584,23 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx4_wqe_data_seg);
}
+ if (uhw->outlen >= resp.response_length + sizeof(resp.rss_caps)) {
+ resp.response_length += sizeof(resp.rss_caps);
+ if (props->rss_caps.supported_qpts) {
+ resp.rss_caps.rx_hash_function =
+ MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP;
+ }
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -2733,6 +2753,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
+ ibdev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
+
if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
IB_LINK_LAYER_ETHERNET) ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 70eb9f917303..81ffc007e0a1 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -944,6 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
switch (sa_mad->mad_hdr.method) {
case IB_MGMT_METHOD_SET:
may_create = 1;
+ /* fall through */
case IB_SA_METHOD_DELETE:
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 1fa19820355a..e14919c15b06 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -47,6 +47,7 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
#define MLX4_IB_DRV_NAME "mlx4_ib"
@@ -644,12 +645,18 @@ enum query_device_resp_mask {
QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
};
+struct mlx4_ib_rss_caps {
+ __u64 rx_hash_fields_mask; /* enum mlx4_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx4_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
struct mlx4_uverbs_ex_query_device_resp {
- __u32 comp_mask;
- __u32 response_length;
- __u64 hca_core_clock_offset;
- __u32 max_inl_recv_sz;
- __u32 reserved;
+ __u32 comp_mask;
+ __u32 response_length;
+ __u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ struct mlx4_ib_rss_caps rss_caps;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@@ -929,5 +936,7 @@ struct ib_rwq_ind_table
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ int *num_of_mtts);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e6f77f63da75..313bfb9ccb71 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,50 +87,286 @@ err_free:
return ERR_PTR(err);
}
+enum {
+ MLX4_MAX_MTT_SHIFT = 31
+};
+
+static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
+ struct mlx4_mtt *mtt,
+ u64 mtt_size, u64 mtt_shift, u64 len,
+ u64 cur_start_addr, u64 *pages,
+ int *start_index, int *npages)
+{
+ u64 cur_end_addr = cur_start_addr + len;
+ u64 cur_end_addr_aligned = 0;
+ u64 mtt_entries;
+ int err = 0;
+ int k;
+
+ len += (cur_start_addr & (mtt_size - 1ULL));
+ cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
+ len += (cur_end_addr_aligned - cur_end_addr);
+ if (len & (mtt_size - 1ULL)) {
+ pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
+ len, mtt_size);
+ return -EINVAL;
+ }
+
+ mtt_entries = (len >> mtt_shift);
+
+ /*
+ * Align the MTT start address to the mtt_size.
+ * Required to handle cases when the MR starts in the middle of an MTT
+ * record. Was not required in old code since the physical addresses
+ * provided by the dma subsystem were page aligned, which was also the
+ * MTT size.
+ */
+ cur_start_addr = round_down(cur_start_addr, mtt_size);
+ /* A new block is started ... */
+ for (k = 0; k < mtt_entries; ++k) {
+ pages[*npages] = cur_start_addr + (mtt_size * k);
+ (*npages)++;
+ /*
+ * Be friendly to mlx4_write_mtt() and pass it chunks of
+ * appropriate size.
+ */
+ if (*npages == PAGE_SIZE / sizeof(u64)) {
+ err = mlx4_write_mtt(dev->dev, mtt, *start_index,
+ *npages, pages);
+ if (err)
+ return err;
+
+ (*start_index) += *npages;
+ *npages = 0;
+ }
+ }
+
+ return 0;
+}
+
+static inline u64 alignment_of(u64 ptr)
+{
+ return ilog2(ptr & (~(ptr - 1)));
+}
+
+static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
+ u64 current_block_end,
+ u64 block_shift)
+{
+ /* Check whether the alignment of the new block is aligned as well as
+ * the previous block.
+ * Block address must start with zeros till size of entity_size.
+ */
+ if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
+ /*
+ * It is not as well aligned as the previous block-reduce the
+ * mtt size accordingly. Here we take the last right bit which
+ * is 1.
+ */
+ block_shift = alignment_of(next_block_start);
+
+ /*
+ * Check whether the alignment of the end of previous block - is it
+ * aligned as well as the start of the block
+ */
+ if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
+ /*
+ * It is not as well aligned as the start of the block -
+ * reduce the mtt size accordingly.
+ */
+ block_shift = alignment_of(current_block_end);
+
+ return block_shift;
+}
+
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
u64 *pages;
- int i, k, entry;
- int n;
- int len;
+ u64 len = 0;
int err = 0;
+ u64 mtt_size;
+ u64 cur_start_addr = 0;
+ u64 mtt_shift;
+ int start_index = 0;
+ int npages = 0;
struct scatterlist *sg;
+ int i;
pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
- i = n = 0;
+ mtt_shift = mtt->page_shift;
+ mtt_size = 1ULL << mtt_shift;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> mtt->page_shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) +
- (k << umem->page_shift);
- /*
- * Be friendly to mlx4_write_mtt() and
- * pass it chunks of appropriate size.
- */
- if (i == PAGE_SIZE / sizeof (u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, n,
- i, pages);
- if (err)
- goto out;
- n += i;
- i = 0;
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ if (cur_start_addr + len == sg_dma_address(sg)) {
+ /* still the same block */
+ len += sg_dma_len(sg);
+ continue;
}
+ /*
+ * A new block is started ...
+ * If len is malaligned, write an extra mtt entry to cover the
+ * misaligned area (round up the division)
+ */
+ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
+ mtt_shift, len,
+ cur_start_addr,
+ pages, &start_index,
+ &npages);
+ if (err)
+ goto out;
+
+ cur_start_addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
}
- if (i)
- err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
+ /* Handle the last block */
+ if (len > 0) {
+ /*
+ * If len is malaligned, write an extra mtt entry to cover
+ * the misaligned area (round up the division)
+ */
+ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
+ mtt_shift, len,
+ cur_start_addr, pages,
+ &start_index, &npages);
+ if (err)
+ goto out;
+ }
+
+ if (npages)
+ err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
out:
free_page((unsigned long) pages);
return err;
}
+/*
+ * Calculate optimal mtt size based on contiguous pages.
+ * Function will return also the number of pages that are not aligned to the
+ * calculated mtt_size to be added to total number of pages. For that we should
+ * check the first chunk length & last chunk length and if not aligned to
+ * mtt_size we should increment the non_aligned_pages number. All chunks in the
+ * middle already handled as part of mtt shift calculation for both their start
+ * & end addresses.
+ */
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ int *num_of_mtts)
+{
+ u64 block_shift = MLX4_MAX_MTT_SHIFT;
+ u64 min_shift = umem->page_shift;
+ u64 last_block_aligned_end = 0;
+ u64 current_block_start = 0;
+ u64 first_block_start = 0;
+ u64 current_block_len = 0;
+ u64 last_block_end = 0;
+ struct scatterlist *sg;
+ u64 current_block_end;
+ u64 misalignment_bits;
+ u64 next_block_start;
+ u64 total_len = 0;
+ int i;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ /*
+ * Initialization - save the first chunk start as the
+ * current_block_start - block means contiguous pages.
+ */
+ if (current_block_len == 0 && current_block_start == 0) {
+ current_block_start = sg_dma_address(sg);
+ first_block_start = current_block_start;
+ /*
+ * Find the bits that are different between the physical
+ * address and the virtual address for the start of the
+ * MR.
+ * umem_get aligned the start_va to a page boundary.
+ * Therefore, we need to align the start va to the same
+ * boundary.
+ * misalignment_bits is needed to handle the case of a
+ * single memory region. In this case, the rest of the
+ * logic will not reduce the block size. If we use a
+ * block size which is bigger than the alignment of the
+ * misalignment bits, we might use the virtual page
+ * number instead of the physical page number, resulting
+ * in access to the wrong data.
+ */
+ misalignment_bits =
+ (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
+ ^ current_block_start;
+ block_shift = min(alignment_of(misalignment_bits),
+ block_shift);
+ }
+
+ /*
+ * Go over the scatter entries and check if they continue the
+ * previous scatter entry.
+ */
+ next_block_start = sg_dma_address(sg);
+ current_block_end = current_block_start + current_block_len;
+ /* If we have a split (non-contig.) between two blocks */
+ if (current_block_end != next_block_start) {
+ block_shift = mlx4_ib_umem_calc_block_mtt
+ (next_block_start,
+ current_block_end,
+ block_shift);
+
+ /*
+ * If we reached the minimum shift for 4k page we stop
+ * the loop.
+ */
+ if (block_shift <= min_shift)
+ goto end;
+
+ /*
+ * If not saved yet we are in first block - we save the
+ * length of first block to calculate the
+ * non_aligned_pages number at the end.
+ */
+ total_len += current_block_len;
+
+ /* Start a new block */
+ current_block_start = next_block_start;
+ current_block_len = sg_dma_len(sg);
+ continue;
+ }
+ /* The scatter entry is another part of the current block,
+ * increase the block size.
+ * An entry in the scatter can be larger than 4k (page) as of
+ * dma mapping which merge some blocks together.
+ */
+ current_block_len += sg_dma_len(sg);
+ }
+
+ /* Account for the last block in the total len */
+ total_len += current_block_len;
+ /* Add to the first block the misalignment that it suffers from. */
+ total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
+ last_block_end = current_block_start + current_block_len;
+ last_block_aligned_end = round_up(last_block_end, 1 << block_shift);
+ total_len += (last_block_aligned_end - last_block_end);
+
+ if (total_len & ((1ULL << block_shift) - 1ULL))
+ pr_warn("misaligned total length detected (%llu, %llu)!",
+ total_len, block_shift);
+
+ *num_of_mtts = total_len >> block_shift;
+end:
+ if (block_shift < min_shift) {
+ /*
+ * If shift is less than the min we set a warning and return the
+ * min shift.
+ */
+ pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
+
+ block_shift = min_shift;
+ }
+ return block_shift;
+}
+
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -155,7 +391,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
n = ib_umem_page_count(mr->umem);
- shift = mr->umem->page_shift;
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b6b33d99b0b4..013049bcdb53 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1038,6 +1038,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct mlx4_ib_create_wq wq;
} ucmd;
size_t copy_len;
+ int shift;
+ int n;
copy_len = (src == MLX4_IB_QP_SRC) ?
sizeof(struct mlx4_ib_create_qp) :
@@ -1100,8 +1102,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
}
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
- qp->umem->page_shift, &qp->mtt);
+ n = ib_umem_page_count(qp->umem);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+
if (err)
goto err_buf;
@@ -2182,11 +2186,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
(to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
- if (rwq_ind_tbl) {
- fill_qp_rss_context(context, qp);
- context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
- }
-
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
else {
@@ -2216,7 +2215,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->mtu_msgmax = (IB_MTU_4096 << 5) |
ilog2(dev->dev->caps.max_gso_sz);
else
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
pr_err("path MTU (%u) is invalid\n",
@@ -2387,6 +2386,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->pd = cpu_to_be32(pd->pdn);
if (!rwq_ind_tbl) {
+ context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
get_cqs(qp, src_type, &send_cq, &recv_cq);
} else { /* Set dummy CQs to be compatible with HV and PRM */
send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq);
@@ -2394,7 +2394,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
}
context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
- context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
/* Set "fast registration enabled" for all kernel QPs */
if (!ibuobject)
@@ -2513,7 +2512,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
/* set QP to receive both tunneled & non-tunneled packets */
- if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ if (!rwq_ind_tbl)
context->srqn = cpu_to_be32(7 << 28);
}
}
@@ -2562,6 +2561,13 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
}
}
+ if (rwq_ind_tbl &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT) {
+ fill_qp_rss_context(context, qp);
+ context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
+ }
+
err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
to_mlx4_state(new_state), context, optpar,
sqd_event, &qp->mqp);
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 3363e29157f6..fe269f680103 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -89,10 +89,6 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
resp.response_length = min_resp_len;
- err = ib_resolve_eth_dmac(pd->device, ah_attr);
- if (err)
- return ERR_PTR(err);
-
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 2aa53f427685..18705cbcdc8c 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -124,11 +124,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
case MLX5_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX5_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
@@ -752,13 +754,13 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int err;
ucmdlen = udata->inlen < sizeof(ucmd) ?
- (sizeof(ucmd) - sizeof(ucmd.reserved)) : sizeof(ucmd);
+ (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
if (ucmdlen == sizeof(ucmd) &&
- ucmd.reserved != 0)
+ (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
return -EINVAL;
if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
@@ -802,8 +804,10 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) {
- if (unlikely((*cqe_size != 64) ||
- !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
+ if (!((*cqe_size == 128 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
+ (*cqe_size == 64 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
err = -EOPNOTSUPP;
mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
*cqe_size);
@@ -826,6 +830,19 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
ilog2(ucmd.cqe_comp_res_format));
}
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
+ if (*cqe_size != 128 ||
+ !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev,
+ "CQE padding is not supported for CQE size of %dB!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
+ }
+
return 0;
err_cqb:
@@ -985,7 +1002,10 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->cqe_size = cqe_size;
cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
- MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
@@ -1129,6 +1149,9 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
+ if (cq_period > MLX5_MAX_CQ_PERIOD)
+ return -EINVAL;
+
err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
cq_period, cq_count);
if (err)
@@ -1335,7 +1358,10 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 552f7bd4ecc3..543d0a4c8bf3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -715,6 +715,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+ if (MLX5_CAP_GEN(mdev, end_pad))
+ props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -787,6 +790,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
}
+ if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+ props->cq_caps.max_cq_moderation_count =
+ MLX5_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period =
+ MLX5_MAX_CQ_PERIOD;
+ }
+
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
@@ -824,8 +834,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), reserved, uhw->outlen))
- resp.response_length += sizeof(resp.reserved);
+ if (field_avail(typeof(resp), flags, uhw->outlen)) {
+ resp.response_length += sizeof(resp.flags);
+
+ if (MLX5_CAP_GEN(mdev, cqe_compression_128))
+ resp.flags |=
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
+
+ if (MLX5_CAP_GEN(mdev, cqe_128_always))
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
+ }
if (field_avail(typeof(resp), sw_parsing_caps,
uhw->outlen)) {
@@ -848,6 +866,36 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
+ if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
+ resp.response_length += sizeof(resp.striding_rq_caps);
+ if (MLX5_CAP_GEN(mdev, striding_rq)) {
+ resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
+ if (field_avail(typeof(resp), tunnel_offloads_caps,
+ uhw->outlen)) {
+ resp.response_length += sizeof(resp.tunnel_offloads_caps);
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -3097,6 +3145,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = MLX5_IB_QPT_REG_UMR;
+ qp->send_cq = init_attr->send_cq;
+ qp->recv_cq = init_attr->recv_cq;
attr->qp_state = IB_QPS_INIT;
attr->port_num = 1;
@@ -3979,7 +4029,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 189e80cd6b2f..6dd8cac78de2 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -228,6 +228,7 @@ struct wr_list {
enum mlx5_ib_rq_flags {
MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
+ MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
};
struct mlx5_ib_wq {
@@ -254,8 +255,14 @@ struct mlx5_ib_wq {
enum mlx5_ib_wq_flags {
MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
+ MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
};
+#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
+#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
+#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
+#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+
struct mlx5_ib_rwq {
struct ib_wq ibwq;
struct mlx5_core_qp core_qp;
@@ -264,6 +271,9 @@ struct mlx5_ib_rwq {
u32 log_rq_size;
u32 rq_page_offset;
u32 log_page_size;
+ u32 log_num_strides;
+ u32 two_byte_shift_en;
+ u32 single_stride_log_num_of_bytes;
struct ib_umem *umem;
size_t buf_size;
unsigned int page_shift;
@@ -389,6 +399,7 @@ struct mlx5_ib_qp {
struct list_head cq_send_list;
u32 rate_limit;
u32 underlay_qpn;
+ bool tunnel_offload_en;
};
struct mlx5_ib_cq_buf {
@@ -411,6 +422,8 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_RSS = 1 << 8,
MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
MLX5_IB_QP_UNDERLAY = 1 << 10,
+ MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
+ MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
};
struct mlx5_umr_wr {
@@ -435,6 +448,10 @@ struct mlx5_shared_mr_info {
struct ib_umem *umem;
};
+enum mlx5_ib_cq_pr_flags {
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
+};
+
struct mlx5_ib_cq {
struct ib_cq ibcq;
struct mlx5_core_cq mcq;
@@ -457,6 +474,7 @@ struct mlx5_ib_cq {
struct list_head wc_list;
enum ib_cq_notify_flags notify_flags;
struct work_struct notify_work;
+ u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
};
struct mlx5_ib_wc {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 37bbc543847a..9beee9cef137 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1230,13 +1230,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
- mlx5_ib_dbg(dev, "cache empty for order %d", order);
+ mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL;
}
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL;
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error;
}
use_umr = false;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3d701c7a4c91..e2197bdda89c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -32,6 +32,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
+#include <linux/kernel.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -929,9 +930,8 @@ static int mlx5_ib_mr_initiator_pfault_handler(
return -EFAULT;
}
- if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) /
- sizeof(mlx5_ib_odp_opcode_cap[0]) ||
- !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
+ if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
+ !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
opcode);
return -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index acb79d3a4f1d..31ad28853efa 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1178,8 +1178,8 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, end_padding_mode,
- MLX5_GET(qpc, qpc, end_padding_mode));
+ if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1204,8 +1204,16 @@ static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}
+static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, u32 tdn)
+ struct mlx5_ib_rq *rq, u32 tdn,
+ bool tunnel_offload_en)
{
u32 *in;
void *tirc;
@@ -1221,6 +1229,8 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
+ if (tunnel_offload_en)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
@@ -1266,12 +1276,15 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
+ if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
+ rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
err = create_raw_packet_qp_rq(dev, rq, in);
if (err)
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(dev, rq, tdn);
+ err = create_raw_packet_qp_tir(dev, rq, tdn,
+ qp->tunnel_offload_en);
if (err)
goto err_destroy_rq;
}
@@ -1358,7 +1371,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (udata->outlen < min_resp_len)
return -EINVAL;
- required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
+ required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
@@ -1381,8 +1394,20 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
- if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
- mlx5_ib_dbg(dev, "invalid reserved\n");
+ if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ mlx5_ib_dbg(dev, "invalid flags\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
+ !tunnel_offload_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
return -EOPNOTSUPP;
}
@@ -1405,6 +1430,15 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(tirc, tirc, transport_domain, tdn);
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
+ if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
switch (ucmd.rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
@@ -1604,6 +1638,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
+ !tunnel_offload_supported(mdev)) {
+ mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->tunnel_offload_en = true;
+ }
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (init_attr->qp_type != IB_QPT_UD ||
@@ -1781,6 +1823,19 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags |= MLX5_IB_QP_LSO;
}
+ if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto err;
+ } else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ } else {
+ qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
+ }
+ }
+
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
@@ -1825,6 +1880,7 @@ err_create:
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
+err:
kvfree(in);
return err;
}
@@ -2283,8 +2339,12 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
return err;
memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
- path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
- grh->sgid_index);
+ if (qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT)
+ path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
+ grh->sgid_index);
path->dci_cfi_prio_sl = (sl & 0x7) << 4;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
@@ -3858,7 +3918,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
unsigned idx;
int err = 0;
- int inl = 0;
int num_sge;
void *seg;
int nreq;
@@ -4053,6 +4112,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
goto out;
}
+ /* fall through */
case MLX5_IB_QPT_HW_GSI:
set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg);
@@ -4116,7 +4176,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
goto out;
}
- inl = 1;
size += sz;
} else {
dpseg = seg;
@@ -4707,9 +4766,27 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, wq_type,
+ rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
+ if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ } else {
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ }
+ }
MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ rwq->single_stride_log_num_of_bytes -
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
+ }
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
@@ -4791,7 +4868,8 @@ static int prepare_user_rq(struct ib_pd *pd,
int err;
size_t required_cmd_sz;
- required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
+ + sizeof(ucmd.single_stride_log_num_of_bytes);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
@@ -4809,14 +4887,39 @@ static int prepare_user_rq(struct ib_pd *pd,
return -EFAULT;
}
- if (ucmd.comp_mask) {
+ if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
mlx5_ib_dbg(dev, "invalid comp mask\n");
return -EOPNOTSUPP;
- }
-
- if (ucmd.reserved) {
- mlx5_ib_dbg(dev, "invalid reserved\n");
- return -EOPNOTSUPP;
+ } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
+ if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
+ mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if ((ucmd.single_stride_log_num_of_bytes <
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
+ (ucmd.single_stride_log_num_of_bytes >
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
+ mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
+ ucmd.single_stride_log_num_of_bytes,
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
+ return -EINVAL;
+ }
+ if ((ucmd.single_wqe_log_num_of_strides >
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (ucmd.single_wqe_log_num_of_strides <
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
+ mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ return -EINVAL;
+ }
+ rwq->single_stride_log_num_of_bytes =
+ ucmd.single_stride_log_num_of_bytes;
+ rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
+ rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
}
err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
@@ -5054,6 +5157,12 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(rqc, rqc, vsd,
(wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
}
+
+ if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
}
if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index e36a9bc52268..f3e80dec1334 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -473,11 +473,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
goto err_unmap_eqp;
}
- mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
- dev_lim->cqc_entry_sz,
- mdev->limits.num_cqs,
- mdev->limits.reserved_cqs,
- 0, 0);
+ mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
+ dev_lim->cqc_entry_sz,
+ mdev->limits.num_cqs,
+ mdev->limits.reserved_cqs,
+ 0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 942ca84713c9..42b68aa999fc 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -178,11 +178,16 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
/* fall through */
case NETDEV_CHANGEADDR:
/* Add the address to the IP table */
- if (upper_dev)
- nesvnic->local_ipaddr =
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
- else
+ if (upper_dev) {
+ struct in_device *in;
+
+ rcu_read_lock();
+ in = __in_dev_get_rcu(upper_dev);
+ nesvnic->local_ipaddr = in->ifa_list->ifa_address;
+ rcu_read_unlock();
+ } else {
nesvnic->local_ipaddr = ifa->ifa_address;
+ }
nes_write_indexed(nesdev,
NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
@@ -757,18 +762,18 @@ static void nes_remove(struct pci_dev *pcidev)
int netdev_index = 0;
unsigned long flags;
- if (nesdev->netdev_count) {
- netdev = nesdev->netdev[netdev_index];
- if (netdev) {
- netif_stop_queue(netdev);
- unregister_netdev(netdev);
- nes_netdev_destroy(netdev);
+ if (nesdev->netdev_count) {
+ netdev = nesdev->netdev[netdev_index];
+ if (netdev) {
+ netif_stop_queue(netdev);
+ unregister_netdev(netdev);
+ nes_netdev_destroy(netdev);
- nesdev->netdev[netdev_index] = NULL;
- nesdev->netdev_count--;
- nesdev->nesadapter->netdev_count--;
- }
+ nesdev->netdev[netdev_index] = NULL;
+ nesdev->netdev_count--;
+ nesdev->nesadapter->netdev_count--;
}
+ }
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 3f9e56e8b379..00c27291dc26 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -536,7 +536,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
int nes_destroy_cqp(struct nes_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
void nes_recheck_link_status(struct work_struct *work);
-void nes_terminate_timeout(unsigned long context);
+void nes_terminate_timeout(struct timer_list *t);
/* nes_nic.c */
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
@@ -575,8 +575,8 @@ void nes_put_cqp_request(struct nes_device *nesdev,
struct nes_cqp_request *cqp_request);
void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *);
int nes_arp_table(struct nes_device *, u32, u8 *, u32);
-void nes_mh_fix(unsigned long);
-void nes_clc(unsigned long);
+void nes_mh_fix(struct timer_list *t);
+void nes_clc(struct timer_list *t);
void nes_dump_mem(unsigned int, void *, int);
u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index de4025deaa4a..c56ca2a74df5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -840,7 +840,7 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
/**
* nes_cm_timer_tick
*/
-static void nes_cm_timer_tick(unsigned long pass)
+static void nes_cm_timer_tick(struct timer_list *unused)
{
unsigned long flags;
unsigned long nexttimeout = jiffies + NES_LONG_TIME;
@@ -1389,7 +1389,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
struct rtable *rt;
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
__be32 dst_ipaddr = htonl(dst_ip);
@@ -1400,11 +1399,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
return rc;
}
- if (netif_is_bond_slave(nesvnic->netdev))
- netdev = netdev_master_upper_dev_get(nesvnic->netdev);
- else
- netdev = nesvnic->netdev;
-
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
@@ -1768,6 +1762,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
+ /* fall through */
case NES_CM_STATE_TIME_WAIT:
cm_node->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -2670,8 +2665,7 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
return NULL;
INIT_LIST_HEAD(&cm_core->connected_nodes);
- init_timer(&cm_core->tcp_timer);
- cm_core->tcp_timer.function = nes_cm_timer_tick;
+ timer_setup(&cm_core->tcp_timer, nes_cm_timer_tick, 0);
cm_core->mtu = NES_CM_DEFAULT_MTU;
cm_core->state = NES_CM_STATE_INITED;
@@ -3074,7 +3068,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
u32 crc_value;
int ret;
int passive_state;
- struct nes_ib_device *nesibdev;
struct ib_mr *ibmr = NULL;
struct nes_pd *nespd;
u64 tagged_offset;
@@ -3157,7 +3150,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr != laddr->sin_addr.s_addr) {
u64temp = (unsigned long)nesqp;
- nesibdev = nesvnic->nesibdev;
nespd = nesqp->nespd;
tagged_offset = (u64)(unsigned long)*start_buff;
ibmr = nes_reg_phys_mr(&nespd->ibpd,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b0adf65e4bdb..18a7de1c3923 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -381,6 +381,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
sizeof nesadapter->pft_mcast_map);
/* populate the new nesadapter */
+ nesadapter->nesdev = nesdev;
nesadapter->devfn = nesdev->pcidev->devfn;
nesadapter->bus_number = nesdev->pcidev->bus->number;
nesadapter->ref_count = 1;
@@ -598,19 +599,15 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
}
if (nesadapter->hw_rev == NE020_REV) {
- init_timer(&nesadapter->mh_timer);
- nesadapter->mh_timer.function = nes_mh_fix;
+ timer_setup(&nesadapter->mh_timer, nes_mh_fix, 0);
nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */
- nesadapter->mh_timer.data = (unsigned long)nesdev;
add_timer(&nesadapter->mh_timer);
} else {
nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000);
}
- init_timer(&nesadapter->lc_timer);
- nesadapter->lc_timer.function = nes_clc;
+ timer_setup(&nesadapter->lc_timer, nes_clc, 0);
nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
- nesadapter->lc_timer.data = (unsigned long)nesdev;
add_timer(&nesadapter->lc_timer);
list_add_tail(&nesadapter->list, &nes_adapter_list);
@@ -1623,9 +1620,9 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
/**
* nes_rq_wqes_timeout
*/
-static void nes_rq_wqes_timeout(unsigned long parm)
+static void nes_rq_wqes_timeout(struct timer_list *t)
{
- struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
+ struct nes_vnic *nesvnic = from_timer(nesvnic, t, rq_wqes_timer);
printk("%s: Timer fired.\n", __func__);
atomic_set(&nesvnic->rx_skb_timer_running, 0);
if (atomic_read(&nesvnic->rx_skbs_needed))
@@ -1849,8 +1846,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
wqe_count -= counter;
nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id);
} while (wqe_count);
- setup_timer(&nesvnic->rq_wqes_timer, nes_rq_wqes_timeout,
- (unsigned long)nesvnic);
+ timer_setup(&nesvnic->rq_wqes_timer, nes_rq_wqes_timeout, 0);
nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
{
@@ -1861,8 +1857,9 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
}
if ((nesdev->nesadapter->allow_unaligned_fpdus) &&
(nes_init_mgt_qp(nesdev, netdev, nesvnic))) {
- nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n", netdev->name);
- nes_destroy_nic_qp(nesvnic);
+ nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n",
+ netdev->name);
+ nes_destroy_nic_qp(nesvnic);
return -ENOMEM;
}
@@ -3474,9 +3471,9 @@ static void nes_terminate_received(struct nes_device *nesdev,
}
/* Timeout routine in case terminate fails to complete */
-void nes_terminate_timeout(unsigned long context)
+void nes_terminate_timeout(struct timer_list *t)
{
- struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+ struct nes_qp *nesqp = from_timer(nesqp, t, terminate_timer);
nes_terminate_done(nesqp, 1);
}
@@ -3631,7 +3628,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
}
-
+ /* fall through */
case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 1b66ef1e9937..3c56470816a8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1164,6 +1164,7 @@ struct nes_adapter {
u8 log_port;
/* PCI information */
+ struct nes_device *nesdev;
unsigned int devfn;
unsigned char bus_number;
unsigned char OneG_Mode;
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 77226cf4ea02..21e0ebd39a05 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -122,9 +122,10 @@ static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
/**
* nes_mgt_rq_wqes_timeout
*/
-static void nes_mgt_rq_wqes_timeout(unsigned long parm)
+static void nes_mgt_rq_wqes_timeout(struct timer_list *t)
{
- struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
+ struct nes_vnic_mgt *mgtvnic = from_timer(mgtvnic, t,
+ rq_wqes_timer);
atomic_set(&mgtvnic->rx_skb_timer_running, 0);
if (atomic_read(&mgtvnic->rx_skbs_needed))
@@ -1040,8 +1041,8 @@ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct
mgtvnic->mgt.rx_skb[counter] = skb;
}
- setup_timer(&mgtvnic->rq_wqes_timer, nes_mgt_rq_wqes_timeout,
- (unsigned long)mgtvnic);
+ timer_setup(&mgtvnic->rq_wqes_timer, nes_mgt_rq_wqes_timeout,
+ 0);
wqe_count = NES_MGT_WQ_COUNT - 1;
mgtvnic->mgt.rq_head = wqe_count;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 5921ea3d50ae..0a75164cedea 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -926,11 +926,10 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nesadapter->pft_mcast_map[mc_index] !=
nesvnic->nic_index &&
mc_index < max_pft_entries_avaiable) {
- nes_debug(NES_DBG_NIC_RX,
- "mc_index=%d skipping nic_index=%d, "
- "used for=%d \n", mc_index,
- nesvnic->nic_index,
- nesadapter->pft_mcast_map[mc_index]);
+ nes_debug(NES_DBG_NIC_RX,
+ "mc_index=%d skipping nic_index=%d, used for=%d\n",
+ mc_index, nesvnic->nic_index,
+ nesadapter->pft_mcast_map[mc_index]);
mc_index++;
}
if (mc_index >= max_pft_entries_avaiable)
@@ -1746,8 +1745,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
nesvnic->rdma_enabled = 0;
}
nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
- init_timer(&nesvnic->event_timer);
- nesvnic->event_timer.function = NULL;
+ timer_setup(&nesvnic->event_timer, NULL, 0);
spin_lock_init(&nesvnic->tx_lock);
spin_lock_init(&nesvnic->port_ibevent_lock);
nesdev->netdev[nesdev->netdev_count] = netdev;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 37331e2fdc5f..21b4a8373acf 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -740,11 +740,11 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
/**
* nes_mh_fix
*/
-void nes_mh_fix(unsigned long parm)
+void nes_mh_fix(struct timer_list *t)
{
+ struct nes_adapter *nesadapter = from_timer(nesadapter, t, mh_timer);
+ struct nes_device *nesdev = nesadapter->nesdev;
unsigned long flags;
- struct nes_device *nesdev = (struct nes_device *)parm;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_vnic *nesvnic;
u32 used_chunks_tx;
u32 temp_used_chunks_tx;
@@ -753,7 +753,6 @@ void nes_mh_fix(unsigned long parm)
u32 mac_tx_frames_low;
u32 mac_tx_frames_high;
u32 mac_tx_pauses;
- u32 serdes_status;
u32 reset_value;
u32 tx_control;
u32 tx_config;
@@ -846,7 +845,7 @@ void nes_mh_fix(unsigned long parm)
}
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
- serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
@@ -859,7 +858,7 @@ void nes_mh_fix(unsigned long parm)
} else {
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
}
- serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control);
@@ -881,17 +880,16 @@ no_mh_work:
/**
* nes_clc
*/
-void nes_clc(unsigned long parm)
+void nes_clc(struct timer_list *t)
{
+ struct nes_adapter *nesadapter = from_timer(nesadapter, t, lc_timer);
unsigned long flags;
- struct nes_device *nesdev = (struct nes_device *)parm;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nesadapter->link_interrupt_count[0] = 0;
- nesadapter->link_interrupt_count[1] = 0;
- nesadapter->link_interrupt_count[2] = 0;
- nesadapter->link_interrupt_count[3] = 0;
+ nesadapter->link_interrupt_count[0] = 0;
+ nesadapter->link_interrupt_count[1] = 0;
+ nesadapter->link_interrupt_count[2] = 0;
+ nesadapter->link_interrupt_count[3] = 0;
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 442b9bdc0f03..db46b7b53fb4 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1304,8 +1304,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
init_completion(&nesqp->rq_drained);
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
- setup_timer(&nesqp->terminate_timer, nes_terminate_timeout,
- (unsigned long)nesqp);
+ timer_setup(&nesqp->terminate_timer, nes_terminate_timeout, 0);
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
@@ -2865,11 +2864,11 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
/* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
- if (nesqp->hte_added) {
- nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
- next_iwarp_state |= NES_CQP_QP_DEL_HTE;
- nesqp->hte_added = 0;
- }
+ if (nesqp->hte_added) {
+ nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
+ next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+ nesqp->hte_added = 0;
+ }
if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
(nesdev->iw_status) &&
(nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
@@ -3560,7 +3559,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
- entry->opcode = IB_WC_RECV;
+ entry->opcode = IB_WC_RECV;
nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
@@ -3788,9 +3787,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
/**
* nes_handle_delayed_event
*/
-static void nes_handle_delayed_event(unsigned long data)
+static void nes_handle_delayed_event(struct timer_list *t)
{
- struct nes_vnic *nesvnic = (void *) data;
+ struct nes_vnic *nesvnic = from_timer(nesvnic, t, event_timer);
if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
struct ib_event event;
@@ -3820,8 +3819,7 @@ void nes_port_ibevent(struct nes_vnic *nesvnic)
if (!nesvnic->event_timer.function) {
ib_dispatch_event(&event);
nesvnic->last_dispatched_event = event.event;
- nesvnic->event_timer.function = nes_handle_delayed_event;
- nesvnic->event_timer.data = (unsigned long) nesvnic;
+ nesvnic->event_timer.function = (TIMER_FUNC_TYPE)nes_handle_delayed_event;
nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
add_timer(&nesvnic->event_timer);
} else {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index d0249e463338..dec650930ca6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -201,21 +201,6 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
/* Get network header type for this GID */
ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
- if ((pd->uctx) &&
- (!rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw)) &&
- (!rdma_link_local_addr((struct in6_addr *)grh->dgid.raw))) {
- status = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- attr->roce.dmac,
- &vlan_tag,
- &sgid_attr.ndev->ifindex,
- NULL);
- if (status) {
- pr_err("%s(): Failed to resolve dmac from gid."
- "status = %d\n", __func__, status);
- goto av_conf_err;
- }
- }
-
status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
if (status)
goto av_conf_err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 65b166cc7437..0ba695a88b62 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1093,7 +1093,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp = &mqe->u.rsp;
if (cqe_status || ext_status) {
- pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
+ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
__func__, cqe_status, ext_status);
if (rsp) {
/* This is for embedded cmds. */
@@ -1947,7 +1947,7 @@ mbx_err:
int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
{
- int status = -ENOMEM;
+ int status;
struct ocrdma_dealloc_lkey *cmd;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
@@ -1956,9 +1956,7 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
cmd->lkey = lkey;
cmd->rsvd_frmr = fr_mr ? 1 : 0;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
-mbx_err:
+
kfree(cmd);
return status;
}
@@ -3186,8 +3184,8 @@ void ocrdma_eqd_set_task(struct work_struct *work)
{
struct ocrdma_dev *dev =
container_of(work, struct ocrdma_dev, eqd_work.work);
- struct ocrdma_eq *eq = 0;
- int i, num = 0, status = -EINVAL;
+ struct ocrdma_eq *eq = NULL;
+ int i, num = 0;
u64 eq_intr;
for (i = 0; i < dev->eq_cnt; i++) {
@@ -3209,7 +3207,7 @@ void ocrdma_eqd_set_task(struct work_struct *work)
}
if (num)
- status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+ ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 66056f9a9700..e528d7acb7f6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -658,7 +658,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
if (reset) {
status = ocrdma_mbx_rdma_stats(dev, true);
if (status) {
- pr_err("Failed to reset stats = %d", status);
+ pr_err("Failed to reset stats = %d\n", status);
goto err;
}
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 27d5e8d9f08d..7866fd8051f6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -66,9 +66,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *sgid)
{
int ret;
- struct ocrdma_dev *dev;
- dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
if (index >= OCRDMA_MAX_SGID)
return -EINVAL;
@@ -2247,6 +2245,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_SEND_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
+ /* fall through */
case IB_WR_SEND:
hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
ocrdma_build_send(qp, hdr, wr);
@@ -2260,6 +2259,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
+ /* fall through */
case IB_WR_RDMA_WRITE:
hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
status = ocrdma_build_write(qp, hdr, wr);
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
index 60e867d80b88..9b9e3b1d2705 100644
--- a/drivers/infiniband/hw/qedr/Kconfig
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_QEDR
tristate "QLogic RoCE driver"
depends on 64BIT && QEDE
+ depends on PCI
select QED_LL2
select QED_OOO
select QED_RDMA
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
index ba7067c77f2f..1c0bc4f78550 100644
--- a/drivers/infiniband/hw/qedr/Makefile
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
-qedr-y := main.o verbs.o qedr_cm.o
+qedr-y := main.o verbs.o qedr_roce_cm.o qedr_iw_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 97d033f51dc9..50812b33291b 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -33,16 +33,20 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_mad.h>
#include <linux/netdevice.h>
#include <linux/iommu.h>
#include <linux/pci.h>
#include <net/addrconf.h>
+#include <linux/idr.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
+#include "qedr_iw_cm.h"
MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
MODULE_AUTHOR("QLogic Corporation");
@@ -50,8 +54,8 @@ MODULE_LICENSE("Dual BSD/GPL");
#define QEDR_WQ_MULTIPLIER_DFT (3)
-void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
- enum ib_event_type type)
+static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+ enum ib_event_type type)
{
struct ib_event ibev;
@@ -92,8 +96,84 @@ static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
return qdev->ndev;
}
+static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = 1;
+ immutable->gid_tbl_len = 1;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ immutable->max_mad_size = 0;
+
+ return 0;
+}
+
+static int qedr_iw_register_device(struct qedr_dev *dev)
+{
+ dev->ibdev.node_type = RDMA_NODE_RNIC;
+ dev->ibdev.query_gid = qedr_iw_query_gid;
+
+ dev->ibdev.get_port_immutable = qedr_iw_port_immutable;
+
+ dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+ if (!dev->ibdev.iwcm)
+ return -ENOMEM;
+
+ dev->ibdev.iwcm->connect = qedr_iw_connect;
+ dev->ibdev.iwcm->accept = qedr_iw_accept;
+ dev->ibdev.iwcm->reject = qedr_iw_reject;
+ dev->ibdev.iwcm->create_listen = qedr_iw_create_listen;
+ dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen;
+ dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref;
+ dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref;
+ dev->ibdev.iwcm->get_qp = qedr_iw_get_qp;
+
+ memcpy(dev->ibdev.iwcm->ifname,
+ dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname));
+
+ return 0;
+}
+
+static void qedr_roce_register_device(struct qedr_dev *dev)
+{
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+ dev->ibdev.query_gid = qedr_query_gid;
+
+ dev->ibdev.add_gid = qedr_add_gid;
+ dev->ibdev.del_gid = qedr_del_gid;
+
+ dev->ibdev.get_port_immutable = qedr_roce_port_immutable;
+}
+
static int qedr_register_device(struct qedr_dev *dev)
{
+ int rc;
+
strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
dev->ibdev.node_guid = dev->attr.node_guid;
@@ -121,18 +201,21 @@ static int qedr_register_device(struct qedr_dev *dev)
QEDR_UVERBS(POST_SEND) |
QEDR_UVERBS(POST_RECV);
+ if (IS_IWARP(dev)) {
+ rc = qedr_iw_register_device(dev);
+ if (rc)
+ return rc;
+ } else {
+ qedr_roce_register_device(dev);
+ }
+
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->num_cnq;
- dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.query_device = qedr_query_device;
dev->ibdev.query_port = qedr_query_port;
dev->ibdev.modify_port = qedr_modify_port;
- dev->ibdev.query_gid = qedr_query_gid;
- dev->ibdev.add_gid = qedr_add_gid;
- dev->ibdev.del_gid = qedr_del_gid;
-
dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
dev->ibdev.mmap = qedr_mmap;
@@ -166,7 +249,7 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.post_recv = qedr_post_recv;
dev->ibdev.process_mad = qedr_process_mad;
- dev->ibdev.get_port_immutable = qedr_port_immutable;
+
dev->ibdev.get_netdev = qedr_get_netdev;
dev->ibdev.dev.parent = &dev->pdev->dev;
@@ -217,6 +300,9 @@ static void qedr_free_resources(struct qedr_dev *dev)
{
int i;
+ if (IS_IWARP(dev))
+ destroy_workqueue(dev->iwarp_wq);
+
for (i = 0; i < dev->num_cnq; i++) {
qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
@@ -241,6 +327,12 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
spin_lock_init(&dev->sgid_lock);
+ if (IS_IWARP(dev)) {
+ spin_lock_init(&dev->idr_lock);
+ idr_init(&dev->qpidr);
+ dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+ }
+
/* Allocate Status blocks for CNQ */
dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
GFP_KERNEL);
@@ -597,12 +689,12 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
return 0;
}
-void qedr_unaffiliated_event(void *context, u8 event_code)
+static void qedr_unaffiliated_event(void *context, u8 event_code)
{
pr_err("unaffiliated event not implemented yet\n");
}
-void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
{
#define EVENT_TYPE_NOT_DEFINED 0
#define EVENT_TYPE_CQ 1
@@ -716,6 +808,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
in_params->events = &events;
in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
in_params->max_mtu = dev->ndev->mtu;
+ dev->iwarp_max_mtu = dev->ndev->mtu;
ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
rc = dev->ops->rdma_init(dev->cdev, in_params);
@@ -726,7 +819,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
if (rc)
goto out;
- dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+ dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr;
dev->db_phys_addr = out_params.dpi_phys_addr;
dev->db_size = out_params.dpi_size;
dev->dpi = out_params.dpi;
@@ -740,7 +833,7 @@ out:
return rc;
}
-void qedr_stop_hw(struct qedr_dev *dev)
+static void qedr_stop_hw(struct qedr_dev *dev)
{
dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
dev->ops->rdma_stop(dev->rdma_ctx);
@@ -777,6 +870,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
goto init_err;
dev->user_dpm_enabled = dev_info.user_dpm_enabled;
+ dev->rdma_type = dev_info.rdma_type;
dev->num_hwfns = dev_info.common.num_hwfns;
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 254083b524bd..86d4511e0d75 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -33,6 +33,7 @@
#define __QEDR_H__
#include <linux/pci.h>
+#include <linux/idr.h>
#include <rdma/ib_addr.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_chain.h>
@@ -43,6 +44,8 @@
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
#define DP_NAME(dev) ((dev)->ibdev.name)
+#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
+#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
#define DP_DEBUG(dev, module, fmt, ...) \
pr_debug("(%s) " module ": " fmt, \
@@ -56,6 +59,7 @@
#define QEDR_MSG_SQ " SQ"
#define QEDR_MSG_QP " QP"
#define QEDR_MSG_GSI " GSI"
+#define QEDR_MSG_IWARP " IW"
#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
@@ -160,6 +164,11 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
+ enum qed_rdma_type rdma_type;
+ spinlock_t idr_lock; /* Protect qpidr data-structure */
+ struct idr qpidr;
+ struct workqueue_struct *iwarp_wq;
+ u16 iwarp_max_mtu;
unsigned long enet_state;
@@ -317,6 +326,9 @@ struct qedr_qp_hwq_info {
/* DB */
void __iomem *db;
union db_prod32 db_data;
+
+ void __iomem *iwarp_db2;
+ union db_prod32 iwarp_db2_data;
};
#define QEDR_INC_SW_IDX(p_info, index) \
@@ -337,7 +349,7 @@ enum qedr_qp_err_bitmap {
struct qedr_qp {
struct ib_qp ibqp; /* must be first */
struct qedr_dev *dev;
-
+ struct qedr_iw_ep *ep;
struct qedr_qp_hwq_info sq;
struct qedr_qp_hwq_info rq;
@@ -394,6 +406,8 @@ struct qedr_qp {
/* Relevant to qps created from user space only (applications) */
struct qedr_userq usq;
struct qedr_userq urq;
+ atomic_t refcnt;
+ bool destroyed;
};
struct qedr_ah {
@@ -474,6 +488,21 @@ static inline int qedr_get_dmac(struct qedr_dev *dev,
return 0;
}
+struct qedr_iw_listener {
+ struct qedr_dev *dev;
+ struct iw_cm_id *cm_id;
+ int backlog;
+ void *qed_handle;
+};
+
+struct qedr_iw_ep {
+ struct qedr_dev *dev;
+ struct iw_cm_id *cm_id;
+ struct qedr_qp *qp;
+ void *qed_context;
+ u8 during_connect;
+};
+
static inline
struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
{
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index 5c98d2055cad..b7587f10e7de 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -655,8 +655,10 @@ struct rdma_sq_rdma_wqe_1st {
#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
-#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
-#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 7
u8 wqe_size;
u8 prev_wqe_size;
};
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
new file mode 100644
index 000000000000..478b7317b80a
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -0,0 +1,749 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/addrconf.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+#include <net/flow.h>
+#include "qedr.h"
+#include "qedr_iw_cm.h"
+
+static inline void
+qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_info->local_port);
+ raddr->sin_port = htons(cm_info->remote_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]);
+ raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]);
+}
+
+static inline void
+qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 =
+ (struct sockaddr_in6 *)&event->remote_addr;
+ int i;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_info->local_port);
+ raddr6->sin6_port = htons(cm_info->remote_port);
+
+ for (i = 0; i < 4; i++) {
+ laddr6->sin6_addr.in6_u.u6_addr32[i] =
+ htonl(cm_info->local_ip[i]);
+ raddr6->sin6_addr.in6_u.u6_addr32[i] =
+ htonl(cm_info->remote_ip[i]);
+ }
+}
+
+static void
+qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context;
+ struct qedr_dev *dev = listener->dev;
+ struct iw_cm_event event;
+ struct qedr_iw_ep *ep;
+
+ ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+ if (!ep)
+ return;
+
+ ep->dev = dev;
+ ep->qed_context = params->ep_context;
+
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ event.status = params->status;
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ params->cm_info->ip_version == QED_TCP_IPV4)
+ qedr_fill_sockaddr4(params->cm_info, &event);
+ else
+ qedr_fill_sockaddr6(params->cm_info, &event);
+
+ event.provider_data = (void *)ep;
+ event.private_data = (void *)params->cm_info->private_data;
+ event.private_data_len = (u8)params->cm_info->private_data_len;
+ event.ord = params->cm_info->ord;
+ event.ird = params->cm_info->ird;
+
+ listener->cm_id->event_handler(listener->cm_id, &event);
+}
+
+static void
+qedr_iw_issue_event(void *context,
+ struct qed_iwarp_cm_event_params *params,
+ enum iw_cm_event_type event_type)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct iw_cm_event event;
+
+ memset(&event, 0, sizeof(event));
+ event.status = params->status;
+ event.event = event_type;
+
+ if (params->cm_info) {
+ event.ird = params->cm_info->ird;
+ event.ord = params->cm_info->ord;
+ event.private_data_len = params->cm_info->private_data_len;
+ event.private_data = (void *)params->cm_info->private_data;
+ }
+
+ if (ep->cm_id)
+ ep->cm_id->event_handler(ep->cm_id, &event);
+}
+
+static void
+qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ if (ep->cm_id) {
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
+
+ ep->cm_id->rem_ref(ep->cm_id);
+ ep->cm_id = NULL;
+ }
+}
+
+static void
+qedr_iw_qp_event(void *context,
+ struct qed_iwarp_cm_event_params *params,
+ enum ib_event_type ib_event, char *str)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct ib_qp *ibqp = &ep->qp->ibqp;
+ struct ib_event event;
+
+ DP_NOTICE(dev, "QP error received: %s\n", str);
+
+ if (ibqp->event_handler) {
+ event.event = ib_event;
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+struct qedr_discon_work {
+ struct work_struct work;
+ struct qedr_iw_ep *ep;
+ enum qed_iwarp_event_type event;
+ int status;
+};
+
+static void qedr_iw_disconnect_worker(struct work_struct *work)
+{
+ struct qedr_discon_work *dwork =
+ container_of(work, struct qedr_discon_work, work);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_iw_ep *ep = dwork->ep;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp = ep->qp;
+ struct iw_cm_event event;
+
+ if (qp->destroyed) {
+ kfree(dwork);
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+ return;
+ }
+
+ memset(&event, 0, sizeof(event));
+ event.status = dwork->status;
+ event.event = IW_CM_EVENT_DISCONNECT;
+
+ /* Success means graceful disconnect was requested. modifying
+ * to SQD is translated to graceful disconnect. O/w reset is sent
+ */
+ if (dwork->status)
+ qp_params.new_state = QED_ROCE_QP_STATE_ERR;
+ else
+ qp_params.new_state = QED_ROCE_QP_STATE_SQD;
+
+ kfree(dwork);
+
+ if (ep->cm_id)
+ ep->cm_id->event_handler(ep->cm_id, &event);
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+
+ dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
+
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+}
+
+static void
+qedr_iw_disconnect_event(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_discon_work *work;
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp = ep->qp;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ qedr_iw_qp_add_ref(&qp->ibqp);
+ work->ep = ep;
+ work->event = params->event;
+ work->status = params->status;
+
+ INIT_WORK(&work->work, qedr_iw_disconnect_worker);
+ queue_work(dev->iwarp_wq, &work->work);
+}
+
+static void
+qedr_iw_passive_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+
+ /* We will only reach the following state if MPA_REJECT was called on
+ * passive. In this case there will be no associated QP.
+ */
+ if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "PASSIVE connection refused releasing ep...\n");
+ kfree(ep);
+ return;
+ }
+
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
+
+ if (params->status < 0)
+ qedr_iw_close_event(context, params);
+}
+
+static int
+qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct qed_iwarp_send_rtr_in rtr_in;
+
+ rtr_in.ep_context = params->ep_context;
+
+ return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
+}
+
+static int
+qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+
+ switch (params->event) {
+ case QED_IWARP_EVENT_MPA_REQUEST:
+ qedr_iw_mpa_request(context, params);
+ break;
+ case QED_IWARP_EVENT_ACTIVE_MPA_REPLY:
+ qedr_iw_mpa_reply(context, params);
+ break;
+ case QED_IWARP_EVENT_PASSIVE_COMPLETE:
+ ep->during_connect = 0;
+ qedr_iw_passive_complete(context, params);
+ break;
+
+ case QED_IWARP_EVENT_ACTIVE_COMPLETE:
+ ep->during_connect = 0;
+ qedr_iw_issue_event(context,
+ params,
+ IW_CM_EVENT_CONNECT_REPLY);
+ if (params->status < 0) {
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ ep->cm_id->rem_ref(ep->cm_id);
+ ep->cm_id = NULL;
+ }
+ break;
+ case QED_IWARP_EVENT_DISCONNECT:
+ qedr_iw_disconnect_event(context, params);
+ break;
+ case QED_IWARP_EVENT_CLOSE:
+ ep->during_connect = 0;
+ qedr_iw_close_event(context, params);
+ break;
+ case QED_IWARP_EVENT_RQ_EMPTY:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_RQ_EMPTY");
+ break;
+ case QED_IWARP_EVENT_IRQ_FULL:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_IRQ_FULL");
+ break;
+ case QED_IWARP_EVENT_LLP_TIMEOUT:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_LLP_TIMEOUT");
+ break;
+ case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+ "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR");
+ break;
+ case QED_IWARP_EVENT_CQ_OVERFLOW:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_CQ_OVERFLOW");
+ break;
+ case QED_IWARP_EVENT_QP_CATASTROPHIC:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_QP_CATASTROPHIC");
+ break;
+ case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+ "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR");
+ break;
+ case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR");
+ break;
+ case QED_IWARP_EVENT_TERMINATE_RECEIVED:
+ DP_NOTICE(dev, "Got terminate message\n");
+ break;
+ default:
+ DP_NOTICE(dev, "Unknown event received %d\n", params->event);
+ break;
+ };
+ return 0;
+}
+
+static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr)
+{
+ struct net_device *ndev;
+ u16 vlan_id = 0;
+
+ ndev = ip_dev_find(&init_net, htonl(addr[0]));
+
+ if (ndev) {
+ vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ dev_put(ndev);
+ }
+ if (vlan_id == 0xffff)
+ vlan_id = 0;
+ return vlan_id;
+}
+
+static u16 qedr_iw_get_vlan_ipv6(u32 *addr)
+{
+ struct net_device *ndev = NULL;
+ struct in6_addr laddr6;
+ u16 vlan_id = 0;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return vlan_id;
+
+ for (i = 0; i < 4; i++)
+ laddr6.in6_u.u6_addr32[i] = htonl(addr[i]);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ndev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) {
+ vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ if (vlan_id == 0xffff)
+ vlan_id = 0;
+
+ return vlan_id;
+}
+
+static int
+qedr_addr4_resolve(struct qedr_dev *dev,
+ struct sockaddr_in *src_in,
+ struct sockaddr_in *dst_in, u8 *dst_mac)
+{
+ __be32 src_ip = src_in->sin_addr.s_addr;
+ __be32 dst_ip = dst_in->sin_addr.s_addr;
+ struct neighbour *neigh = NULL;
+ struct rtable *rt = NULL;
+ int rc = 0;
+
+ rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
+ if (IS_ERR(rt)) {
+ DP_ERR(dev, "ip_route_output returned error\n");
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
+
+ if (neigh) {
+ rcu_read_lock();
+ if (neigh->nud_state & NUD_VALID) {
+ ether_addr_copy(dst_mac, neigh->ha);
+ DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ rcu_read_unlock();
+ neigh_release(neigh);
+ }
+
+ ip_rt_put(rt);
+
+ return rc;
+}
+
+static int
+qedr_addr6_resolve(struct qedr_dev *dev,
+ struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in, u8 *dst_mac)
+{
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int rc = 0;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+
+ if ((!dst) || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ DP_ERR(dev,
+ "ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ }
+ return -EINVAL;
+ }
+ neigh = dst_neigh_lookup(dst, &dst_in);
+
+ if (neigh) {
+ rcu_read_lock();
+ if (neigh->nud_state & NUD_VALID) {
+ ether_addr_copy(dst_mac, neigh->ha);
+ DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ rcu_read_unlock();
+ neigh_release(neigh);
+ }
+
+ dst_release(dst);
+
+ return rc;
+}
+
+int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ struct qed_iwarp_connect_out out_params;
+ struct qed_iwarp_connect_in in_params;
+ struct qed_iwarp_cm_info *cm_info;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct qedr_iw_ep *ep;
+ struct qedr_qp *qp;
+ int rc = 0;
+ int i;
+
+ qp = idr_find(&dev->qpidr, conn_param->qpn);
+
+ laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "Connect source address: %pISpc, remote address: %pISpc\n",
+ &cm_id->local_addr, &cm_id->remote_addr);
+
+ if (!laddr->sin_port || !raddr->sin_port)
+ return -EINVAL;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->dev = dev;
+ ep->qp = qp;
+ qp->ep = ep;
+ cm_id->add_ref(cm_id);
+ ep->cm_id = cm_id;
+
+ in_params.event_cb = qedr_iw_event_handler;
+ in_params.cb_context = ep;
+
+ cm_info = &in_params.cm_info;
+ memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip));
+ memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip));
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ cm_id->remote_addr.ss_family == AF_INET) {
+ cm_info->ip_version = QED_TCP_IPV4;
+
+ cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info->remote_port = ntohs(raddr->sin_port);
+ cm_info->local_port = ntohs(laddr->sin_port);
+ cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip);
+
+ rc = qedr_addr4_resolve(dev, laddr, raddr,
+ (u8 *)in_params.remote_mac_addr);
+
+ in_params.mss = dev->iwarp_max_mtu -
+ (sizeof(struct iphdr) + sizeof(struct tcphdr));
+
+ } else {
+ in_params.cm_info.ip_version = QED_TCP_IPV6;
+
+ for (i = 0; i < 4; i++) {
+ cm_info->remote_ip[i] =
+ ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]);
+ cm_info->local_ip[i] =
+ ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+ }
+
+ cm_info->local_port = ntohs(laddr6->sin6_port);
+ cm_info->remote_port = ntohs(raddr6->sin6_port);
+
+ in_params.mss = dev->iwarp_max_mtu -
+ (sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
+
+ cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip);
+
+ rc = qedr_addr6_resolve(dev, laddr6, raddr6,
+ (u8 *)in_params.remote_mac_addr);
+ }
+ if (rc)
+ goto err;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n",
+ conn_param->ord, conn_param->ird, conn_param->private_data,
+ conn_param->private_data_len, qp->rq_psn);
+
+ cm_info->ord = conn_param->ord;
+ cm_info->ird = conn_param->ird;
+ cm_info->private_data = conn_param->private_data;
+ cm_info->private_data_len = conn_param->private_data_len;
+ in_params.qp = qp->qed_qp;
+ memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+
+ ep->during_connect = 1;
+ rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
+ if (rc)
+ goto err;
+
+ return rc;
+
+err:
+ cm_id->rem_ref(cm_id);
+ kfree(ep);
+ return rc;
+}
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ struct qedr_iw_listener *listener;
+ struct qed_iwarp_listen_in iparams;
+ struct qed_iwarp_listen_out oparams;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ int rc;
+ int i;
+
+ laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "Create Listener address: %pISpc\n", &cm_id->local_addr);
+
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return -ENOMEM;
+
+ listener->dev = dev;
+ cm_id->add_ref(cm_id);
+ listener->cm_id = cm_id;
+ listener->backlog = backlog;
+
+ iparams.cb_context = listener;
+ iparams.event_cb = qedr_iw_event_handler;
+ iparams.max_backlog = backlog;
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ cm_id->local_addr.ss_family == AF_INET) {
+ iparams.ip_version = QED_TCP_IPV4;
+ memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr));
+
+ iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ iparams.port = ntohs(laddr->sin_port);
+ iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr);
+ } else {
+ iparams.ip_version = QED_TCP_IPV6;
+
+ for (i = 0; i < 4; i++) {
+ iparams.ip_addr[i] =
+ ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+ }
+
+ iparams.port = ntohs(laddr6->sin6_port);
+
+ iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr);
+ }
+ rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
+ if (rc)
+ goto err;
+
+ listener->qed_handle = oparams.handle;
+ cm_id->provider_data = listener;
+ return rc;
+
+err:
+ cm_id->rem_ref(cm_id);
+ kfree(listener);
+ return rc;
+}
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct qedr_iw_listener *listener = cm_id->provider_data;
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ int rc = 0;
+
+ if (listener->qed_handle)
+ rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx,
+ listener->qed_handle);
+
+ cm_id->rem_ref(cm_id);
+ return rc;
+}
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp;
+ struct qed_iwarp_accept_in params;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
+
+ qp = idr_find(&dev->qpidr, conn_param->qpn);
+ if (!qp) {
+ DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
+ return -EINVAL;
+ }
+
+ ep->qp = qp;
+ qp->ep = ep;
+ cm_id->add_ref(cm_id);
+ ep->cm_id = cm_id;
+
+ params.ep_context = ep->qed_context;
+ params.cb_context = ep;
+ params.qp = ep->qp->qed_qp;
+ params.private_data = conn_param->private_data;
+ params.private_data_len = conn_param->private_data_len;
+ params.ird = conn_param->ird;
+ params.ord = conn_param->ord;
+
+ ep->during_connect = 1;
+ rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
+ if (rc)
+ goto err;
+
+ return rc;
+err:
+ ep->during_connect = 0;
+ cm_id->rem_ref(cm_id);
+ return rc;
+}
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+ struct qedr_dev *dev = ep->dev;
+ struct qed_iwarp_reject_in params;
+
+ params.ep_context = ep->qed_context;
+ params.cb_context = ep;
+ params.private_data = pdata;
+ params.private_data_len = pdata_len;
+ ep->qp = NULL;
+
+ return dev->ops->iwarp_reject(dev->rdma_ctx, &params);
+}
+
+void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+ atomic_inc(&qp->refcnt);
+}
+
+void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+ if (atomic_dec_and_test(&qp->refcnt)) {
+ spin_lock_irq(&qp->dev->idr_lock);
+ idr_remove(&qp->dev->qpidr, qp->qp_id);
+ spin_unlock_irq(&qp->dev->idr_lock);
+ kfree(qp);
+ }
+}
+
+struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ return idr_find(&dev->qpidr, qpn);
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.h b/drivers/infiniband/hw/qedr/qedr_iw_cm.h
new file mode 100644
index 000000000000..08f4b1067e6c
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.h
@@ -0,0 +1,49 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <rdma/iw_cm.h>
+
+int qedr_iw_connect(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+
+void qedr_iw_qp_add_ref(struct ib_qp *qp);
+
+void qedr_iw_qp_rem_ref(struct ib_qp *qp);
+
+struct ib_qp *qedr_iw_get_qp(struct ib_device *dev, int qpn);
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index ad8965397cf7..2bdbb12bfc69 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -48,7 +48,7 @@
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
-#include "qedr_cm.h"
+#include "qedr_roce_cm.h"
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
{
@@ -64,11 +64,11 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
dev->gsi_qp = qp;
}
-void qedr_ll2_complete_tx_packet(void *cxt,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet)
+static void qedr_ll2_complete_tx_packet(void *cxt, u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
{
struct qedr_dev *dev = (struct qedr_dev *)cxt;
struct qed_roce_ll2_packet *pkt = cookie;
@@ -93,8 +93,8 @@ void qedr_ll2_complete_tx_packet(void *cxt,
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
}
-void qedr_ll2_complete_rx_packet(void *cxt,
- struct qed_ll2_comp_rx_data *data)
+static void qedr_ll2_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
{
struct qedr_dev *dev = (struct qedr_dev *)cxt;
struct qedr_cq *cq = dev->gsi_rqcq;
@@ -122,10 +122,9 @@ void qedr_ll2_complete_rx_packet(void *cxt,
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
}
-void qedr_ll2_release_rx_packet(void *cxt,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr, bool b_last_packet)
+static void qedr_ll2_release_rx_packet(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t rx_buf_addr,
+ bool b_last_packet)
{
/* Do nothing... */
}
@@ -237,7 +236,7 @@ static int qedr_ll2_post_tx(struct qedr_dev *dev,
return 0;
}
-int qedr_ll2_stop(struct qedr_dev *dev)
+static int qedr_ll2_stop(struct qedr_dev *dev)
{
int rc;
@@ -260,8 +259,8 @@ int qedr_ll2_stop(struct qedr_dev *dev)
return rc;
}
-int qedr_ll2_start(struct qedr_dev *dev,
- struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
+static int qedr_ll2_start(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
{
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
@@ -660,7 +659,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dev->gsi_ll2_handle,
wr->sg_list[0].addr,
wr->sg_list[0].length,
- 0 /* cookie */,
+ NULL /* cookie */,
1 /* notify_fw */);
if (rc) {
DP_ERR(dev,
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
index a55916323ea9..a55916323ea9 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 769ac07c3c8e..b26aa88dab48 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -49,7 +49,7 @@
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
-#include "qedr_cm.h"
+#include "qedr_roce_cm.h"
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
@@ -70,6 +70,20 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return 0;
}
+int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *sgid)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ memset(sgid->raw, 0, sizeof(sgid->raw));
+ ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
+ sgid->global.interface_id, sgid->global.subnet_prefix);
+
+ return 0;
+}
+
int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *sgid)
{
@@ -263,8 +277,13 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
attr->sm_lid = 0;
attr->sm_sl = 0;
attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
- attr->gid_tbl_len = QEDR_MAX_SGID;
- attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ attr->gid_tbl_len = 1;
+ attr->pkey_tbl_len = 1;
+ } else {
+ attr->gid_tbl_len = QEDR_MAX_SGID;
+ attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ }
attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
attr->qkey_viol_cntr = 0;
get_link_speed_and_width(rdma_port->link_speed,
@@ -770,7 +789,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
struct qedr_dev *dev,
struct qedr_userq *q,
u64 buf_addr, size_t buf_len,
- int access, int dmasync)
+ int access, int dmasync,
+ int alloc_and_init)
{
u32 fw_pages;
int rc;
@@ -791,19 +811,27 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
if (rc)
goto err0;
- q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
- if (IS_ERR(q->pbl_tbl)) {
- rc = PTR_ERR(q->pbl_tbl);
- goto err0;
- }
-
+ if (alloc_and_init) {
+ q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+ if (IS_ERR(q->pbl_tbl)) {
+ rc = PTR_ERR(q->pbl_tbl);
+ goto err0;
+ }
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
FW_PAGE_SHIFT);
+ } else {
+ q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
+ if (!q->pbl_tbl) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+ }
return 0;
err0:
ib_umem_release(q->umem);
+ q->umem = NULL;
return rc;
}
@@ -929,7 +957,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
- ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+ ureq.len, IB_ACCESS_LOCAL_WRITE,
+ 1, 1);
if (rc)
goto err0;
@@ -1222,18 +1251,34 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
return 0;
}
-static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+static void qedr_copy_rq_uresp(struct qedr_dev *dev,
+ struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
{
- uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ /* iWARP requires two doorbells per RQ. */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ uresp->rq_db_offset =
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+ uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+ } else {
+ uresp->rq_db_offset =
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ }
+
uresp->rq_icid = qp->icid;
}
-static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+static void qedr_copy_sq_uresp(struct qedr_dev *dev,
+ struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
{
uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
- uresp->sq_icid = qp->icid + 1;
+
+ /* iWARP uses the same cid for rq and sq */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ uresp->sq_icid = qp->icid;
+ else
+ uresp->sq_icid = qp->icid + 1;
}
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
@@ -1243,8 +1288,8 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
int rc;
memset(&uresp, 0, sizeof(uresp));
- qedr_copy_sq_uresp(&uresp, qp);
- qedr_copy_rq_uresp(&uresp, qp);
+ qedr_copy_sq_uresp(dev, &uresp, qp);
+ qedr_copy_rq_uresp(dev, &uresp, qp);
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp.qp_id = qp->qp_id;
@@ -1264,6 +1309,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
spin_lock_init(&qp->q_lock);
+ atomic_set(&qp->refcnt, 1);
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
@@ -1334,6 +1380,52 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
}
+static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
+{
+ int rc;
+
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+ return 0;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&dev->idr_lock);
+
+ rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
+
+ spin_unlock_irq(&dev->idr_lock);
+ idr_preload_end();
+
+ return rc < 0 ? rc : 0;
+}
+
+static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
+{
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+ return;
+
+ spin_lock_irq(&dev->idr_lock);
+ idr_remove(&dev->qpidr, id);
+ spin_unlock_irq(&dev->idr_lock);
+}
+
+static inline void
+qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
+ qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
+
+ qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
+ &qp->usq.pbl_info, FW_PAGE_SHIFT);
+
+ qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+ qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+
+ qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
+ &qp->urq.pbl_info, FW_PAGE_SHIFT);
+}
+
static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
{
if (qp->usq.umem)
@@ -1355,12 +1447,11 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct qed_rdma_create_qp_out_params out_params;
struct qedr_pd *pd = get_qedr_pd(ibpd);
struct ib_ucontext *ib_ctx = NULL;
- struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
+ int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
ib_ctx = ibpd->uobject->context;
- ctx = get_qedr_ucontext(ib_ctx);
memset(&ureq, 0, sizeof(ureq));
rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
@@ -1371,14 +1462,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* SQ - read access only (0), dma sync not required (0) */
rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, 0, 0);
+ ureq.sq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
/* RQ - read access only (0), dma sync not required (0) */
rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0);
-
+ ureq.rq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
@@ -1399,6 +1489,9 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
goto err1;
}
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iwarp_populate_user_qp(dev, qp, &out_params);
+
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
@@ -1419,6 +1512,21 @@ err1:
return rc;
}
+static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid;
+
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+ qp->rq.iwarp_db2 = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+ qp->rq.iwarp_db2_data.data.icid = qp->icid;
+ qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+}
+
static int
qedr_roce_create_kernel_qp(struct qedr_dev *dev,
struct qedr_qp *qp,
@@ -1465,8 +1573,71 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
qp->icid = out_params.icid;
qedr_set_roce_db_info(dev, qp);
+ return rc;
+}
- return 0;
+static int
+qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qed_rdma_create_qp_in_params *in_params,
+ u32 n_sq_elems, u32 n_rq_elems)
+{
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_chain_ext_pbl ext_pbl;
+ int rc;
+
+ in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ QED_CHAIN_MODE_PBL);
+ in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ QED_CHAIN_MODE_PBL);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ in_params, &out_params);
+
+ if (!qp->qed_qp)
+ return -EINVAL;
+
+ /* Now we allocate the chain */
+ ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
+ ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ &qp->sq.pbl, &ext_pbl);
+
+ if (rc)
+ goto err;
+
+ ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
+ ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ &qp->rq.pbl, &ext_pbl);
+
+ if (rc)
+ goto err;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+
+ qedr_set_iwarp_db_info(dev, qp);
+ return rc;
+
+err:
+ dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+
+ return rc;
}
static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
@@ -1541,8 +1712,12 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
- rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
- n_sq_elems, n_rq_elems);
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
+ n_sq_elems, n_rq_elems);
+ else
+ rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
+ n_sq_elems, n_rq_elems);
if (rc)
qedr_cleanup_kernel(dev, qp);
@@ -1602,6 +1777,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
+ rc = qedr_idr_add(dev, qp, qp->qp_id);
+ if (rc)
+ goto err;
+
return &qp->ibqp;
err:
@@ -1689,10 +1868,13 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Update doorbell (in case post_recv was
* done before move to RTR)
*/
- wmb();
- writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write takes effect */
- mmiowb();
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ wmb();
+ writel(qp->rq.db_data.raw, qp->rq.db);
+ /* Make sure write takes effect */
+ mmiowb();
+ }
break;
case QED_ROCE_QP_STATE_ERR:
break;
@@ -1786,16 +1968,18 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
else
new_qp_state = old_qp_state;
- if (!ib_modify_qp_is_ok
- (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
- DP_ERR(dev,
- "modify qp: invalid attribute mask=0x%x specified for\n"
- "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
- attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
- new_qp_state);
- rc = -EINVAL;
- goto err;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
+ ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ DP_ERR(dev,
+ "modify qp: invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+ attr_mask, qp->qp_id, ibqp->qp_type,
+ old_qp_state, new_qp_state);
+ rc = -EINVAL;
+ goto err;
+ }
}
/* Translate the masks... */
@@ -2082,7 +2266,7 @@ err:
return rc;
}
-int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
{
int rc = 0;
@@ -2111,15 +2295,34 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type);
- if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
- (qp->state != QED_ROCE_QP_STATE_ERR) &&
- (qp->state != QED_ROCE_QP_STATE_INIT)) {
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_INIT)) {
- attr.qp_state = IB_QPS_ERR;
- attr_mask |= IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+ attr_mask |= IB_QP_STATE;
- /* Change the QP state to ERROR */
- qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ /* Change the QP state to ERROR */
+ qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ }
+ } else {
+ /* Wait for the connect/accept to complete */
+ if (qp->ep) {
+ int wait_count = 1;
+
+ while (qp->ep->during_connect) {
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "Still in during connect/accept\n");
+
+ msleep(100);
+ if (wait_count++ > 200) {
+ DP_NOTICE(dev,
+ "during connect timeout\n");
+ break;
+ }
+ }
+ }
}
if (qp->qp_type == IB_QPT_GSI)
@@ -2127,8 +2330,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
qedr_free_qp_resources(dev, qp);
- kfree(qp);
-
+ if (atomic_dec_and_test(&qp->refcnt)) {
+ qedr_idr_remove(dev, qp->qp_id);
+ kfree(qp);
+ }
return rc;
}
@@ -2395,7 +2600,6 @@ err0:
struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
enum ib_mr_type mr_type, u32 max_num_sg)
{
- struct qedr_dev *dev;
struct qedr_mr *mr;
if (mr_type != IB_MR_TYPE_MEM_REG)
@@ -2406,8 +2610,6 @@ struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
if (IS_ERR(mr))
return ERR_PTR(-EINVAL);
- dev = mr->dev;
-
return &mr->ibmr;
}
@@ -2740,6 +2942,7 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
case IB_WR_SEND_WITH_INV:
return IB_WC_SEND;
case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
return IB_WC_RDMA_READ;
case IB_WR_ATOMIC_CMP_AND_SWP:
return IB_WC_COMP_SWAP;
@@ -2900,11 +3103,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
break;
case IB_WR_RDMA_READ_WITH_INV:
- DP_ERR(dev,
- "RDMA READ WITH INVALIDATE not supported\n");
- *bad_wr = wr;
- rc = -EINVAL;
- break;
+ SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
+ /* fallthrough -- same is identical to RDMA READ */
case IB_WR_RDMA_READ:
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
@@ -3014,15 +3214,17 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
- (qp->state != QED_ROCE_QP_STATE_ERR) &&
- (qp->state != QED_ROCE_QP_STATE_SQD)) {
- spin_unlock_irqrestore(&qp->q_lock, flags);
- *bad_wr = wr;
- DP_DEBUG(dev, QEDR_MSG_CQ,
- "QP in wrong state! QP icid=0x%x state %d\n",
- qp->icid, qp->state);
- return -EINVAL;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_SQD)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "QP in wrong state! QP icid=0x%x state %d\n",
+ qp->icid, qp->state);
+ return -EINVAL;
+ }
}
while (wr) {
@@ -3142,6 +3344,11 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* Make sure write sticks */
mmiowb();
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
+ mmiowb(); /* for second doorbell */
+ }
+
wr = wr->next;
}
@@ -3603,23 +3810,3 @@ int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS;
}
-
-int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
- RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
-
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 0f8ab49d5a1a..1a94425dea33 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -39,6 +39,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask,
struct ib_port_modify *props);
int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid);
int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index e0fdb9201423..cb06314a2ae2 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_QIB
tristate "Intel PCIe HCA support"
depends on 64BIT && INFINIBAND_RDMAVT
+ depends on PCI
---help---
This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index f9e1c69603a5..092ed8103842 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -102,18 +102,6 @@ extern const struct pci_error_handlers qib_pci_err_handler;
#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
/*
- * Struct used to indicate which errors are logged in each of the
- * error-counters that are logged to EEPROM. A counter is incremented
- * _once_ (saturating at 255) for each event with any bits set in
- * the error or hwerror register masks below.
- */
-#define QIB_EEP_LOG_CNT (4)
-struct qib_eep_log_mask {
- u64 errs_to_log;
- u64 hwerrs_to_log;
-};
-
-/*
* Below contains all data related to a single context (formerly called port).
*/
@@ -443,14 +431,12 @@ struct qib_irq_notify;
#endif
struct qib_msix_entry {
- int irq;
void *arg;
#ifdef CONFIG_INFINIBAND_QIB_DCA
int dca;
int rcv;
struct qib_irq_notify *notifier;
#endif
- char name[MAX_NAME_SIZE];
cpumask_var_t mask;
};
@@ -1081,11 +1067,6 @@ struct qib_devdata {
/* control high-level access to EEPROM */
struct mutex eep_lock;
uint64_t traffic_wds;
- /*
- * masks for which bits of errs, hwerrs that cause
- * each of the counters to increment.
- */
- struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
struct qib_diag_client *diag_client;
spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
struct diag_observer_list_elt *diag_observer_list;
@@ -1188,7 +1169,7 @@ int qib_set_lid(struct qib_pportdata *, u32, u8);
void qib_hol_down(struct qib_pportdata *);
void qib_hol_init(struct qib_pportdata *);
void qib_hol_up(struct qib_pportdata *);
-void qib_hol_event(unsigned long);
+void qib_hol_event(struct timer_list *);
void qib_disable_after_error(struct qib_devdata *);
int qib_set_uevent_bits(struct qib_pportdata *, const int);
@@ -1299,10 +1280,9 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
const void *buffer, int len);
void qib_get_eeprom_info(struct qib_devdata *);
-#define qib_inc_eeprom_err(dd, eidx, incr)
void qib_dump_lookup_output_queue(struct qib_devdata *);
void qib_force_pio_avail_update(struct qib_devdata *);
-void qib_clear_symerror_on_linkup(unsigned long opaque);
+void qib_clear_symerror_on_linkup(struct timer_list *t);
/*
* Set LED override, only the two LSBs have "public" meaning, but
@@ -1434,10 +1414,8 @@ int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
const struct pci_device_id *);
void qib_pcie_ddcleanup(struct qib_devdata *);
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
-int qib_reinit_intr(struct qib_devdata *);
-void qib_enable_intx(struct qib_devdata *dd);
-void qib_nomsi(struct qib_devdata *);
-void qib_nomsix(struct qib_devdata *);
+void qib_free_irq(struct qib_devdata *dd);
+int qib_reinit_intr(struct qib_devdata *dd);
void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
/* interrupts for device */
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
index a5356cb4252e..9ecaab6232e3 100644
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -67,7 +67,6 @@ struct qib_chip_specific {
u32 lastbuf_for_pio;
u32 updthresh; /* current AvailUpdThld */
u32 updthresh_dflt; /* default AvailUpdThld */
- int irq;
u8 presets_needed;
u8 relock_timer_active;
char emsgbuf[128];
@@ -75,6 +74,7 @@ struct qib_chip_specific {
char bitsmsgbuf[64];
struct timer_list relock_timer;
unsigned int relock_interval; /* in jiffies */
+ struct qib_devdata *dd;
};
struct qib_chippport_specific {
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 775018b32b0d..a9377eee8734 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -761,7 +761,6 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
{
struct qib_diag_client *dc = fp->private_data;
struct qib_devdata *dd = dc->dd;
- void __iomem *kreg_base;
ssize_t ret;
if (dc->pid != current->pid) {
@@ -769,8 +768,6 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
goto bail;
}
- kreg_base = dd->kregbase;
-
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
@@ -838,7 +835,6 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
{
struct qib_diag_client *dc = fp->private_data;
struct qib_devdata *dd = dc->dd;
- void __iomem *kreg_base;
ssize_t ret;
if (dc->pid != current->pid) {
@@ -846,8 +842,6 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
goto bail;
}
- kreg_base = dd->kregbase;
-
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 719906a9fd51..33d3335385e8 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -682,9 +682,10 @@ int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
/* Below is "non-zero" to force override, but both actual LEDs are off */
#define LED_OVER_BOTH_OFF (8)
-static void qib_run_led_override(unsigned long opaque)
+static void qib_run_led_override(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t,
+ led_override_timer);
struct qib_devdata *dd = ppd->dd;
int timeoff;
int ph_idx;
@@ -735,9 +736,7 @@ void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
*/
if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
/* Need to start timer */
- init_timer(&ppd->led_override_timer);
- ppd->led_override_timer.function = qib_run_led_override;
- ppd->led_override_timer.data = (unsigned long) ppd;
+ timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
ppd->led_override_timer.expires = jiffies + 1;
add_timer(&ppd->led_override_timer);
} else {
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 9396c1807cc3..2d6a191afec0 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -696,15 +696,8 @@ static void qib_clean_part_key(struct qib_ctxtdata *rcd,
struct qib_devdata *dd)
{
int i, j, pchanged = 0;
- u64 oldpkey;
struct qib_pportdata *ppd = rcd->ppd;
- /* for debugging only */
- oldpkey = (u64) ppd->pkeys[0] |
- ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
-
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i])
continue;
@@ -1817,7 +1810,6 @@ static int qib_close(struct inode *in, struct file *fp)
struct qib_devdata *dd;
unsigned long flags;
unsigned ctxt;
- pid_t pid;
mutex_lock(&qib_mutex);
@@ -1859,7 +1851,6 @@ static int qib_close(struct inode *in, struct file *fp)
spin_lock_irqsave(&dd->uctxt_lock, flags);
ctxt = rcd->ctxt;
dd->rcd[ctxt] = NULL;
- pid = rcd->pid;
rcd->pid = 0;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 3259a60e4f4f..8a15e5c7dd91 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -245,7 +245,6 @@ struct qib_chip_specific {
u64 iblnkerrsnap;
u64 ibcctrl; /* shadow for kr_ibcctrl */
u32 lastlinkrecov; /* link recovery issue */
- int irq;
u32 cntrnamelen;
u32 portcntrnamelen;
u32 ncntrs;
@@ -266,6 +265,7 @@ struct qib_chip_specific {
u64 rpkts; /* total packets received (sample result) */
u64 xmit_wait; /* # of ticks no data sent (sample result) */
struct timer_list pma_timer;
+ struct qib_pportdata *ppd;
char emsgbuf[128];
char bitsmsgbuf[64];
u8 pma_sample_status;
@@ -749,7 +749,6 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
u32 bits, ctrl;
int isfatal = 0;
char *bitsmsg;
- int log_idx;
hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
if (!hwerrs)
@@ -771,11 +770,6 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
hwerrs &= dd->cspec->hwerrmask;
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
-
/*
* Make sure we get this much out, unless told to be quiet,
* or it's occurred within the last 5 seconds.
@@ -1005,7 +999,6 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
char *msg;
u64 ignore_this_time = 0;
u64 iserr = 0;
- int log_idx;
struct qib_pportdata *ppd = dd->pport;
u64 mask;
@@ -1016,10 +1009,6 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
/* do these first, they are most important */
if (errs & ERR_MASK(HardwareErr))
qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
if (errs & ~IB_E_BITSEXTANT)
qib_dev_err(dd,
@@ -1485,15 +1474,6 @@ static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
}
-static void qib_6120_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_nomsi(dd);
-}
-
/**
* qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
* @dd: the qlogic_ib device
@@ -1502,7 +1482,7 @@ static void qib_6120_free_irq(struct qib_devdata *dd)
*/
static void qib_6120_setup_cleanup(struct qib_devdata *dd)
{
- qib_6120_free_irq(dd);
+ qib_free_irq(dd);
kfree(dd->cspec->cntrs);
kfree(dd->cspec->portcntrs);
if (dd->cspec->dummy_hdrq) {
@@ -1706,6 +1686,8 @@ bail:
*/
static void qib_setup_6120_interrupt(struct qib_devdata *dd)
{
+ int ret;
+
/*
* If the chip supports added error indication via GPIO pins,
* enable interrupts on those bits so the interrupt routine
@@ -1719,19 +1701,12 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
}
- if (!dd->cspec->irq)
+ ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
+ QIB_DRV_NAME);
+ if (ret)
qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- else {
- int ret;
-
- ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
- QIB_DRV_NAME, dd);
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup interrupt (irq=%d): %d\n",
- dd->cspec->irq, ret);
- }
+ "Couldn't setup interrupt (irq=%d): %d\n",
+ pci_irq_vector(dd->pcidev, 0), ret);
}
/**
@@ -1929,7 +1904,6 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
u32 type, unsigned long pa)
{
u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- u32 tidx;
if (!dd->kregbase)
return;
@@ -1953,7 +1927,6 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
else /* for now, always full 4KB page */
pa |= 2 << 29;
}
- tidx = tidptr - dd->egrtidbase;
writel(pa, tidp32);
mmiowb();
}
@@ -2647,9 +2620,9 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
* need traffic_wds done the way it is
* called from add_timer
*/
-static void qib_get_6120_faststats(unsigned long opaque)
+static void qib_get_6120_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, stats_timer);
struct qib_pportdata *ppd = dd->pport;
unsigned long flags;
u64 traffic_wds;
@@ -2937,10 +2910,10 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
return ret;
}
-static void pma_6120_timer(unsigned long data)
+static void pma_6120_timer(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)data;
- struct qib_chip_specific *cs = ppd->dd->cspec;
+ struct qib_chip_specific *cs = from_timer(cs, t, pma_timer);
+ struct qib_pportdata *ppd = cs->ppd;
struct qib_ibport *ibp = &ppd->ibport_data;
unsigned long flags;
@@ -3205,6 +3178,7 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->num_pports = 1;
dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
+ dd->cspec->ppd = ppd;
ppd->cpspec = NULL; /* not used in this chip */
spin_lock_init(&dd->cspec->kernel_tid_lock);
@@ -3242,20 +3216,6 @@ static int init_6120_variables(struct qib_devdata *dd)
if (qib_unordered_wc())
dd->flags |= QIB_PIO_FLUSH_WC;
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
-
- /* Ignore errors in PIO/PBC on systems with unordered write-combining */
- if (qib_unordered_wc())
- dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
-
- dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
-
- dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
-
ret = qib_init_pportdata(ppd, dd, 0, 1);
if (ret)
goto bail;
@@ -3289,11 +3249,8 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->rhdrhead_intr_off = 1ULL << 32;
/* setup the stats timer; the add_timer is done at end of init */
- setup_timer(&dd->stats_timer, qib_get_6120_faststats,
- (unsigned long)dd);
-
- setup_timer(&dd->cspec->pma_timer, pma_6120_timer,
- (unsigned long)ppd);
+ timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
+ timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
dd->ureg_align = qib_read_kreg32(dd, kr_palign);
@@ -3490,7 +3447,7 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
dd->f_bringup_serdes = qib_6120_bringup_serdes;
dd->f_cleanup = qib_6120_setup_cleanup;
dd->f_clear_tids = qib_6120_clear_tids;
- dd->f_free_irq = qib_6120_free_irq;
+ dd->f_free_irq = qib_free_irq;
dd->f_get_base_info = qib_6120_get_base_info;
dd->f_get_msgheader = qib_6120_get_msgheader;
dd->f_getsendbuf = qib_6120_getsendbuf;
@@ -3559,8 +3516,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
if (qib_pcie_params(dd, 8, NULL))
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
- dd->cspec->irq = pdev->irq; /* save IRQ */
-
/* clear diagctrl register, in case diags were running and crashed */
qib_write_kreg(dd, kr_hwdiagctrl, 0);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 04bdd3d487b1..bdff2326731e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1042,9 +1042,11 @@ done:
return iserr;
}
-static void reenable_7220_chase(unsigned long opaque)
+static void reenable_7220_chase(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_chippport_specific *cpspec = from_timer(cpspec, t,
+ chase_timer);
+ struct qib_pportdata *ppd = &cpspec->pportdata;
ppd->cpspec->chase_timer.expires = 0;
qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -1094,7 +1096,6 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
char *msg;
u64 ignore_this_time = 0;
u64 iserr = 0;
- int log_idx;
struct qib_pportdata *ppd = dd->pport;
u64 mask;
@@ -1105,10 +1106,6 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
/* do these first, they are most important */
if (errs & ERR_MASK(HardwareErr))
qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
if (errs & QLOGIC_IB_E_SDMAERRS)
sdma_7220_errors(ppd, errs);
@@ -1302,7 +1299,6 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
u32 bits, ctrl;
int isfatal = 0;
char *bitsmsg;
- int log_idx;
hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
if (!hwerrs)
@@ -1326,10 +1322,6 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
hwerrs &= dd->cspec->hwerrmask;
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
RXE_PARITY))
qib_devinfo(dd->pcidev,
@@ -1663,7 +1655,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
dd->control | QLOGIC_IB_C_FREEZEMODE);
ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.data) /* if initted */
+ if (ppd->cpspec->chase_timer.function) /* if initted */
del_timer_sync(&ppd->cpspec->chase_timer);
if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
@@ -1780,15 +1772,6 @@ static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
}
-static void qib_7220_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_nomsi(dd);
-}
-
/*
* qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
* @dd: the qlogic_ib device
@@ -1798,7 +1781,7 @@ static void qib_7220_free_irq(struct qib_devdata *dd)
*/
static void qib_setup_7220_cleanup(struct qib_devdata *dd)
{
- qib_7220_free_irq(dd);
+ qib_free_irq(dd);
kfree(dd->cspec->cntrs);
kfree(dd->cspec->portcntrs);
}
@@ -2026,20 +2009,14 @@ bail:
*/
static void qib_setup_7220_interrupt(struct qib_devdata *dd)
{
- if (!dd->cspec->irq)
- qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- else {
- int ret = request_irq(dd->cspec->irq, qib_7220intr,
- dd->msi_lo ? 0 : IRQF_SHARED,
- QIB_DRV_NAME, dd);
+ int ret;
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup %s interrupt (irq=%d): %d\n",
- dd->msi_lo ? "MSI" : "INTx",
- dd->cspec->irq, ret);
- }
+ ret = pci_request_irq(dd->pcidev, 0, qib_7220intr, NULL, dd,
+ QIB_DRV_NAME);
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n",
+ dd->pcidev->msi_enabled ? "MSI" : "INTx",
+ pci_irq_vector(dd->pcidev, 0), ret);
}
/**
@@ -3263,9 +3240,9 @@ done:
* need traffic_wds done the way it is
* called from add_timer
*/
-static void qib_get_7220_faststats(unsigned long opaque)
+static void qib_get_7220_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, stats_timer);
struct qib_pportdata *ppd = dd->pport;
unsigned long flags;
u64 traffic_wds;
@@ -3302,16 +3279,12 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd)
return 0;
qib_devinfo(dd->pcidev,
- "MSI interrupt not detected, trying INTx interrupts\n");
- qib_7220_free_irq(dd);
- qib_enable_intx(dd);
- /*
- * Some newer kernels require free_irq before disable_msi,
- * and irq can be changed during disable and INTx enable
- * and we need to therefore use the pcidev->irq value,
- * not our saved MSI value.
- */
- dd->cspec->irq = dd->pcidev->irq;
+ "MSI interrupt not detected, trying INTx interrupts\n");
+
+ qib_free_irq(dd);
+ dd->msi_lo = 0;
+ if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
+ qib_dev_err(dd, "Failed to enable INTx\n");
qib_setup_7220_interrupt(dd);
return 1;
}
@@ -3543,7 +3516,6 @@ static void autoneg_7220_work(struct work_struct *work)
{
struct qib_pportdata *ppd;
struct qib_devdata *dd;
- u64 startms;
u32 i;
unsigned long flags;
@@ -3551,8 +3523,6 @@ static void autoneg_7220_work(struct work_struct *work)
autoneg_work.work)->pportdata;
dd = ppd->dd;
- startms = jiffies_to_msecs(jiffies);
-
/*
* Busy wait for this first part, it should be at most a
* few hundred usec, since we scheduled ourselves for 2msec.
@@ -3997,6 +3967,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->num_pports = 1;
dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
+ dd->cspec->dd = dd;
ppd->cpspec = cpspec;
spin_lock_init(&dd->cspec->sdepb_lock);
@@ -4035,16 +4006,6 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->flags |= qib_special_trigger ?
QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
-
- dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
-
- dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
-
init_waitqueue_head(&cpspec->autoneg_wait);
INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
@@ -4069,8 +4030,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
if (!qib_mini_init)
qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
- setup_timer(&ppd->cpspec->chase_timer, reenable_7220_chase,
- (unsigned long)ppd);
+ timer_setup(&ppd->cpspec->chase_timer, reenable_7220_chase, 0);
qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
@@ -4095,9 +4055,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->rhdrhead_intr_off = 1ULL << 32;
/* setup the stats timer; the add_timer is done at end of init */
- init_timer(&dd->stats_timer);
- dd->stats_timer.function = qib_get_7220_faststats;
- dd->stats_timer.data = (unsigned long) dd;
+ timer_setup(&dd->stats_timer, qib_get_7220_faststats, 0);
dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
/*
@@ -4535,7 +4493,7 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
dd->f_bringup_serdes = qib_7220_bringup_serdes;
dd->f_cleanup = qib_setup_7220_cleanup;
dd->f_clear_tids = qib_7220_clear_tids;
- dd->f_free_irq = qib_7220_free_irq;
+ dd->f_free_irq = qib_free_irq;
dd->f_get_base_info = qib_7220_get_base_info;
dd->f_get_msgheader = qib_7220_get_msgheader;
dd->f_getsendbuf = qib_7220_getsendbuf;
@@ -4618,9 +4576,6 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
- /* save IRQ for possible later use */
- dd->cspec->irq = pdev->irq;
-
if (qib_read_kreg64(dd, kr_hwerrstatus) &
QLOGIC_IB_HWE_SERDESPLLFAILED)
qib_write_kreg(dd, kr_hwerrclear,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 14cadf6d6214..6265dac415fc 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -150,7 +150,7 @@ static struct kparam_string kp_txselect = {
.string = txselect_list,
.maxlen = MAX_ATTEN_LEN
};
-static int setup_txselect(const char *, struct kernel_param *);
+static int setup_txselect(const char *, const struct kernel_param *);
module_param_call(txselect, setup_txselect, param_get_string,
&kp_txselect, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(txselect,
@@ -553,7 +553,6 @@ struct qib_chip_specific {
u32 updthresh; /* current AvailUpdThld */
u32 updthresh_dflt; /* default AvailUpdThld */
u32 r1;
- int irq;
u32 num_msix_entries;
u32 sdmabufcnt;
u32 lastbuf_for_pio;
@@ -756,10 +755,8 @@ static void check_7322_rxe_status(struct qib_pportdata *);
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
#ifdef CONFIG_INFINIBAND_QIB_DCA
static void qib_setup_dca(struct qib_devdata *dd);
-static void setup_dca_notifier(struct qib_devdata *dd,
- struct qib_msix_entry *m);
-static void reset_dca_notifier(struct qib_devdata *dd,
- struct qib_msix_entry *m);
+static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
+static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
#endif
/**
@@ -1647,7 +1644,6 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
u64 iserr = 0;
u64 errs;
u64 mask;
- int log_idx;
qib_stats.sps_errints++;
errs = qib_read_kreg64(dd, kr_errstatus);
@@ -1665,10 +1661,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
if (errs & QIB_E_HARDWARE) {
*msg = '\0';
qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- } else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
+ }
if (errs & QIB_E_SPKTERRS) {
qib_disarm_7322_senderrbufs(dd->pport);
@@ -1739,9 +1732,10 @@ static void qib_error_tasklet(unsigned long data)
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
}
-static void reenable_chase(unsigned long opaque)
+static void reenable_chase(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
+ struct qib_pportdata *ppd = cp->ppd;
ppd->cpspec->chase_timer.expires = 0;
qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -2531,7 +2525,7 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.data) /* if initted */
+ if (ppd->cpspec->chase_timer.function) /* if initted */
del_timer_sync(&ppd->cpspec->chase_timer);
/*
@@ -2778,7 +2772,7 @@ static void qib_setup_dca(struct qib_devdata *dd)
qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
cspec->dca_rcvhdr_ctrl[i]);
for (i = 0; i < cspec->num_msix_entries; i++)
- setup_dca_notifier(dd, &cspec->msix_entries[i]);
+ setup_dca_notifier(dd, i);
}
static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
@@ -2820,49 +2814,41 @@ static void qib_irq_notifier_release(struct kref *ref)
}
#endif
-/*
- * Disable MSIx interrupt if enabled, call generic MSIx code
- * to cleanup, and clear pending MSIx interrupts.
- * Used for fallback to INTx, after reset, and when MSIx setup fails.
- */
-static void qib_7322_nomsix(struct qib_devdata *dd)
+static void qib_7322_free_irq(struct qib_devdata *dd)
{
u64 intgranted;
- int n;
+ int i;
dd->cspec->main_int_mask = ~0ULL;
- n = dd->cspec->num_msix_entries;
- if (n) {
- int i;
- dd->cspec->num_msix_entries = 0;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < dd->cspec->num_msix_entries; i++) {
+ /* only free IRQs that were allocated */
+ if (dd->cspec->msix_entries[i].arg) {
#ifdef CONFIG_INFINIBAND_QIB_DCA
- reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
+ reset_dca_notifier(dd, i);
#endif
- irq_set_affinity_hint(
- dd->cspec->msix_entries[i].irq, NULL);
+ irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
+ NULL);
free_cpumask_var(dd->cspec->msix_entries[i].mask);
- free_irq(dd->cspec->msix_entries[i].irq,
- dd->cspec->msix_entries[i].arg);
+ pci_free_irq(dd->pcidev, i,
+ dd->cspec->msix_entries[i].arg);
}
- qib_nomsix(dd);
}
+
+ /* If num_msix_entries was 0, disable the INTx IRQ */
+ if (!dd->cspec->num_msix_entries)
+ pci_free_irq(dd->pcidev, 0, dd);
+ else
+ dd->cspec->num_msix_entries = 0;
+
+ pci_free_irq_vectors(dd->pcidev);
+
/* make sure no MSIx interrupts are left pending */
intgranted = qib_read_kreg64(dd, kr_intgranted);
if (intgranted)
qib_write_kreg(dd, kr_intgranted, intgranted);
}
-static void qib_7322_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_7322_nomsix(dd);
-}
-
static void qib_setup_7322_cleanup(struct qib_devdata *dd)
{
int i;
@@ -3329,22 +3315,20 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data)
#ifdef CONFIG_INFINIBAND_QIB_DCA
-static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
+static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
{
- if (!m->dca)
+ if (!dd->cspec->msix_entries[msixnum].dca)
return;
- qib_devinfo(dd->pcidev,
- "Disabling notifier on HCA %d irq %d\n",
- dd->unit,
- m->irq);
- irq_set_affinity_notifier(
- m->irq,
- NULL);
- m->notifier = NULL;
+
+ qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
+ dd->unit, pci_irq_vector(dd->pcidev, msixnum));
+ irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
+ dd->cspec->msix_entries[msixnum].notifier = NULL;
}
-static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
+static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
{
+ struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
struct qib_irq_notify *n;
if (!m->dca)
@@ -3354,7 +3338,7 @@ static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
int ret;
m->notifier = n;
- n->notify.irq = m->irq;
+ n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
n->notify.notify = qib_irq_notifier_notify;
n->notify.release = qib_irq_notifier_release;
n->arg = m->arg;
@@ -3415,22 +3399,17 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
if (!dd->cspec->num_msix_entries) {
/* Try to get INTx interrupt */
try_intx:
- if (!dd->pcidev->irq) {
- qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- goto bail;
- }
- ret = request_irq(dd->pcidev->irq, qib_7322intr,
- IRQF_SHARED, QIB_DRV_NAME, dd);
+ ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
+ QIB_DRV_NAME);
if (ret) {
- qib_dev_err(dd,
+ qib_dev_err(
+ dd,
"Couldn't setup INTx interrupt (irq=%d): %d\n",
- dd->pcidev->irq, ret);
- goto bail;
+ pci_irq_vector(dd->pcidev, 0), ret);
+ return;
}
- dd->cspec->irq = dd->pcidev->irq;
dd->cspec->main_int_mask = ~0ULL;
- goto bail;
+ return;
}
/* Try to get MSIx interrupts */
@@ -3453,15 +3432,10 @@ try_intx:
for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
irq_handler_t handler;
void *arg;
- u64 val;
int lsb, reg, sh;
#ifdef CONFIG_INFINIBAND_QIB_DCA
int dca = 0;
#endif
-
- dd->cspec->msix_entries[msixnum].
- name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
- = '\0';
if (i < ARRAY_SIZE(irq_table)) {
if (irq_table[i].port) {
/* skip if for a non-configured port */
@@ -3475,11 +3449,10 @@ try_intx:
#endif
lsb = irq_table[i].lsb;
handler = irq_table[i].handler;
- snprintf(dd->cspec->msix_entries[msixnum].name,
- sizeof(dd->cspec->msix_entries[msixnum].name)
- - 1,
- QIB_DRV_NAME "%d%s", dd->unit,
- irq_table[i].name);
+ ret = pci_request_irq(dd->pcidev, msixnum, handler,
+ NULL, arg, QIB_DRV_NAME "%d%s",
+ dd->unit,
+ irq_table[i].name);
} else {
unsigned ctxt;
@@ -3495,37 +3468,25 @@ try_intx:
#endif
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
- snprintf(dd->cspec->msix_entries[msixnum].name,
- sizeof(dd->cspec->msix_entries[msixnum].name)
- - 1,
- QIB_DRV_NAME "%d (kctx)", dd->unit);
+ ret = pci_request_irq(dd->pcidev, msixnum, handler,
+ NULL, arg,
+ QIB_DRV_NAME "%d (kctx)",
+ dd->unit);
}
- dd->cspec->msix_entries[msixnum].irq = pci_irq_vector(
- dd->pcidev, msixnum);
- if (dd->cspec->msix_entries[msixnum].irq < 0) {
- qib_dev_err(dd,
- "Couldn't get MSIx irq (vec=%d): %d\n",
- msixnum,
- dd->cspec->msix_entries[msixnum].irq);
- qib_7322_nomsix(dd);
- goto try_intx;
- }
- ret = request_irq(dd->cspec->msix_entries[msixnum].irq,
- handler, 0,
- dd->cspec->msix_entries[msixnum].name,
- arg);
if (ret) {
/*
* Shouldn't happen since the enable said we could
* have as many as we are trying to setup here.
*/
qib_dev_err(dd,
- "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
- msixnum,
- dd->cspec->msix_entries[msixnum].irq,
- ret);
- qib_7322_nomsix(dd);
+ "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
+ msixnum,
+ pci_irq_vector(dd->pcidev, msixnum),
+ ret);
+ qib_7322_free_irq(dd);
+ pci_alloc_irq_vectors(dd->pcidev, 1, 1,
+ PCI_IRQ_LEGACY);
goto try_intx;
}
dd->cspec->msix_entries[msixnum].arg = arg;
@@ -3541,8 +3502,8 @@ try_intx:
mask &= ~(1ULL << lsb);
redirect[reg] |= ((u64) msixnum) << sh;
}
- val = qib_read_kreg64(dd, 2 * msixnum + 1 +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ qib_read_kreg64(dd, 2 * msixnum + 1 +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
if (firstcpu < nr_cpu_ids &&
zalloc_cpumask_var(
&dd->cspec->msix_entries[msixnum].mask,
@@ -3559,7 +3520,7 @@ try_intx:
dd->cspec->msix_entries[msixnum].mask);
}
irq_set_affinity_hint(
- dd->cspec->msix_entries[msixnum].irq,
+ pci_irq_vector(dd->pcidev, msixnum),
dd->cspec->msix_entries[msixnum].mask);
}
msixnum++;
@@ -3570,7 +3531,6 @@ try_intx:
dd->cspec->main_int_mask = mask;
tasklet_init(&dd->error_tasklet, qib_error_tasklet,
(unsigned long)dd);
-bail:;
}
/**
@@ -3674,8 +3634,9 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
/* no interrupts till re-initted */
qib_7322_set_intr_state(dd, 0);
+ qib_7322_free_irq(dd);
+
if (msix_entries) {
- qib_7322_nomsix(dd);
/* can be up to 512 bytes, too big for stack */
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
sizeof(u64), GFP_KERNEL);
@@ -3765,11 +3726,11 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
write_7322_init_portregs(&dd->pport[i]);
write_7322_initregs(dd);
- if (qib_pcie_params(dd, dd->lbus_width,
- &dd->cspec->num_msix_entries))
+ if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
qib_dev_err(dd,
"Reset failed to setup PCIe or interrupts; continuing anyway\n");
+ dd->cspec->num_msix_entries = msix_entries;
qib_setup_7322_interrupt(dd, 1);
for (i = 0; i < dd->num_pports; ++i) {
@@ -5138,9 +5099,9 @@ done:
*
* called from add_timer
*/
-static void qib_get_7322_faststats(unsigned long opaque)
+static void qib_get_7322_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, stats_timer);
struct qib_pportdata *ppd;
unsigned long flags;
u64 traffic_wds;
@@ -5197,8 +5158,9 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd)
qib_devinfo(dd->pcidev,
"MSIx interrupt not detected, trying INTx interrupts\n");
- qib_7322_nomsix(dd);
- qib_enable_intx(dd);
+ qib_7322_free_irq(dd);
+ if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
+ qib_dev_err(dd, "Failed to enable INTx\n");
qib_setup_7322_interrupt(dd, 0);
return 1;
}
@@ -5396,16 +5358,11 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
static void autoneg_7322_work(struct work_struct *work)
{
struct qib_pportdata *ppd;
- struct qib_devdata *dd;
- u64 startms;
u32 i;
unsigned long flags;
ppd = container_of(work, struct qib_chippport_specific,
autoneg_work.work)->ppd;
- dd = ppd->dd;
-
- startms = jiffies_to_msecs(jiffies);
/*
* Busy wait for this first part, it should be at most a
@@ -6169,7 +6126,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
}
/* handle the txselect parameter changing */
-static int setup_txselect(const char *str, struct kernel_param *kp)
+static int setup_txselect(const char *str, const struct kernel_param *kp)
{
struct qib_devdata *dd;
unsigned long val;
@@ -6614,8 +6571,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
if (!qib_mini_init)
write_7322_init_portregs(ppd);
- setup_timer(&cp->chase_timer, reenable_chase,
- (unsigned long)ppd);
+ timer_setup(&cp->chase_timer, reenable_chase, 0);
ppd++;
}
@@ -6641,8 +6597,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
/* setup the stats timer; the add_timer is done at end of init */
- setup_timer(&dd->stats_timer, qib_get_7322_faststats,
- (unsigned long)dd);
+ timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
dd->ureg_align = 0x10000; /* 64KB alignment */
@@ -7845,13 +7800,12 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
{
struct qib_devdata *dd = ppd->dd;
int chan;
- u32 rbc;
for (chan = 0; chan < SERDES_CHANS; ++chan) {
ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
data, mask);
- rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- addr, 0, 0);
+ ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
+ 0, 0);
}
}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index c5a4c65636d6..85dfbba427f6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -93,7 +93,7 @@ unsigned qib_cc_table_size;
module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-static void verify_interrupt(unsigned long);
+static void verify_interrupt(struct timer_list *);
static struct idr qib_unit_table;
u32 qib_cpulist_count;
@@ -233,8 +233,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
spin_lock_init(&ppd->cc_shadow_lock);
init_waitqueue_head(&ppd->state_wait);
- setup_timer(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup,
- (unsigned long)ppd);
+ timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
ppd->qib_wq = NULL;
ppd->ibport_data.pmastats =
@@ -428,8 +427,7 @@ static int loadtime_init(struct qib_devdata *dd)
qib_get_eeprom_info(dd);
/* setup time (don't start yet) to verify we got interrupt */
- setup_timer(&dd->intrchk_timer, verify_interrupt,
- (unsigned long)dd);
+ timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
done:
return ret;
}
@@ -493,9 +491,9 @@ static void enable_chip(struct qib_devdata *dd)
}
}
-static void verify_interrupt(unsigned long opaque)
+static void verify_interrupt(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
u64 int_counter;
if (!dd)
@@ -753,8 +751,7 @@ done:
continue;
if (dd->flags & QIB_HAS_SEND_DMA)
ret = qib_setup_sdma(ppd);
- setup_timer(&ppd->hol_timer, qib_hol_event,
- (unsigned long)ppd);
+ timer_setup(&ppd->hol_timer, qib_hol_event, 0);
ppd->hol_state = QIB_HOL_UP;
}
@@ -815,23 +812,19 @@ static void qib_stop_timers(struct qib_devdata *dd)
struct qib_pportdata *ppd;
int pidx;
- if (dd->stats_timer.data) {
+ if (dd->stats_timer.function)
del_timer_sync(&dd->stats_timer);
- dd->stats_timer.data = 0;
- }
- if (dd->intrchk_timer.data) {
+ if (dd->intrchk_timer.function)
del_timer_sync(&dd->intrchk_timer);
- dd->intrchk_timer.data = 0;
- }
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
- if (ppd->hol_timer.data)
+ if (ppd->hol_timer.function)
del_timer_sync(&ppd->hol_timer);
- if (ppd->led_override_timer.data) {
+ if (ppd->led_override_timer.function) {
del_timer_sync(&ppd->led_override_timer);
atomic_set(&ppd->led_override_timer_active, 0);
}
- if (ppd->symerr_clear_timer.data)
+ if (ppd->symerr_clear_timer.function)
del_timer_sync(&ppd->symerr_clear_timer);
}
}
@@ -1674,8 +1667,9 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
}
if (!rcd->rcvegrbuf_phys) {
rcd->rcvegrbuf_phys =
- kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
- GFP_KERNEL, rcd->node_id);
+ kmalloc_array_node(chunk,
+ sizeof(rcd->rcvegrbuf_phys[0]),
+ GFP_KERNEL, rcd->node_id);
if (!rcd->rcvegrbuf_phys)
goto bail_rcvegrbuf;
}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a014fd4cd076..65c3b964ad1b 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -141,7 +141,7 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
qib_hol_up(ppd); /* useful only for 6120 now */
*ppd->statusp |=
QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
- qib_clear_symerror_on_linkup((unsigned long)ppd);
+ qib_clear_symerror_on_linkup(&ppd->symerr_clear_timer);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
@@ -170,9 +170,9 @@ skip_ibchange:
signal_ib_event(ppd, ev);
}
-void qib_clear_symerror_on_linkup(unsigned long opaque)
+void qib_clear_symerror_on_linkup(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t, symerr_clear_timer);
if (ppd->lflags & QIBL_LINKACTIVE)
return;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 82d9da9b6997..4845d000c22f 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -280,7 +280,7 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
{
struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 vendor, majrev, minrev;
+ u32 majrev, minrev;
unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
/* GUID 0 is illegal */
@@ -303,7 +303,6 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
minrev = dd->minrev;
nip->revision = cpu_to_be32((majrev << 16) | minrev);
nip->local_port_num = port;
- vendor = dd->vendorid;
nip->vendor_id[0] = QIB_SRC_OUI_1;
nip->vendor_id[1] = QIB_SRC_OUI_2;
nip->vendor_id[2] = QIB_SRC_OUI_3;
@@ -434,6 +433,7 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
/* Bad mkey not a violation below level 2 */
if (ibp->rvp.mkeyprot < 2)
break;
+ /* fall through */
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
if (ibp->rvp.mkey_violations != 0xFFFF)
@@ -2446,9 +2446,9 @@ bail:
return ret;
}
-static void xmit_wait_timer_func(unsigned long opaque)
+static void xmit_wait_timer_func(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t, cong_stats.timer);
struct qib_devdata *dd = dd_from_ppd(ppd);
unsigned long flags;
u8 status;
@@ -2478,10 +2478,8 @@ void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
/* Initialize xmit_wait structure */
dd->pport[port_idx].cong_stats.counter = 0;
- init_timer(&dd->pport[port_idx].cong_stats.timer);
- dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func;
- dd->pport[port_idx].cong_stats.timer.data =
- (unsigned long)(&dd->pport[port_idx]);
+ timer_setup(&dd->pport[port_idx].cong_stats.timer,
+ xmit_wait_timer_func, 0);
dd->pport[port_idx].cong_stats.timer.expires = 0;
add_timer(&dd->pport[port_idx].cong_stats.timer);
}
@@ -2492,7 +2490,7 @@ void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
struct qib_devdata *dd = container_of(ibdev,
struct qib_devdata, verbs_dev);
- if (dd->pport[port_idx].cong_stats.timer.data)
+ if (dd->pport[port_idx].cong_stats.timer.function)
del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
if (dd->pport[port_idx].ibport_data.smi_ah)
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index d90403e31a9d..5ac7b31c346b 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -193,7 +193,7 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
* chip reset (the kernel PCI infrastructure doesn't yet handle that
* correctly.
*/
-static void qib_msi_setup(struct qib_devdata *dd, int pos)
+static void qib_cache_msi_info(struct qib_devdata *dd, int pos)
{
struct pci_dev *pdev = dd->pcidev;
u16 control;
@@ -208,64 +208,39 @@ static void qib_msi_setup(struct qib_devdata *dd, int pos)
&dd->msi_data);
}
-static int qib_allocate_irqs(struct qib_devdata *dd, u32 maxvec)
-{
- unsigned int flags = PCI_IRQ_LEGACY;
-
- /* Check our capabilities */
- if (dd->pcidev->msix_cap) {
- flags |= PCI_IRQ_MSIX;
- } else {
- if (dd->pcidev->msi_cap) {
- flags |= PCI_IRQ_MSI;
- /* Get msi_lo and msi_hi */
- qib_msi_setup(dd, dd->pcidev->msi_cap);
- }
- }
-
- if (!(flags & (PCI_IRQ_MSIX | PCI_IRQ_MSI)))
- qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
-
- return pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
-}
-
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
{
u16 linkstat, speed;
int nvec;
int maxvec;
- int ret = 0;
+ unsigned int flags = PCI_IRQ_MSIX | PCI_IRQ_MSI;
if (!pci_is_pcie(dd->pcidev)) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
dd->lbus_width = 1;
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- ret = -1;
+ nvec = -1;
goto bail;
}
+ if (dd->flags & QIB_HAS_INTX)
+ flags |= PCI_IRQ_LEGACY;
maxvec = (nent && *nent) ? *nent : 1;
- nvec = qib_allocate_irqs(dd, maxvec);
- if (nvec < 0) {
- ret = nvec;
+ nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
+ if (nvec < 0)
goto bail;
- }
/*
- * If nent exists, make sure to record how many vectors were allocated
+ * If nent exists, make sure to record how many vectors were allocated.
+ * If msix_enabled is false, return 0 so the fallback code works
+ * correctly.
*/
- if (nent) {
- *nent = nvec;
+ if (nent)
+ *nent = !dd->pcidev->msix_enabled ? 0 : nvec;
- /*
- * If we requested (nent) MSIX, but msix_enabled is not set,
- * pci_alloc_irq_vectors() enabled INTx.
- */
- if (!dd->pcidev->msix_enabled)
- qib_dev_err(dd,
- "no msix vectors allocated, using INTx\n");
- }
+ if (dd->pcidev->msi_enabled)
+ qib_cache_msi_info(dd, dd->pcidev->msi_cap);
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
/*
@@ -306,7 +281,21 @@ bail:
/* fill in string, even on errors */
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
"PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
- return ret;
+ return nvec < 0 ? nvec : 0;
+}
+
+/**
+ * qib_free_irq - Cleanup INTx and MSI interrupts
+ * @dd: valid pointer to qib dev data
+ *
+ * Since cleanup for INTx and MSI interrupts is trivial, have a common
+ * routine.
+ *
+ */
+void qib_free_irq(struct qib_devdata *dd)
+{
+ pci_free_irq(dd->pcidev, 0, dd);
+ pci_free_irq_vectors(dd->pcidev);
}
/*
@@ -351,10 +340,10 @@ int qib_reinit_intr(struct qib_devdata *dd)
dd->msi_data);
ret = 1;
bail:
- if (!ret && (dd->flags & QIB_HAS_INTX)) {
- qib_enable_intx(dd);
+ qib_free_irq(dd);
+
+ if (!ret && (dd->flags & QIB_HAS_INTX))
ret = 1;
- }
/* and now set the pci master bit again */
pci_set_master(dd->pcidev);
@@ -363,56 +352,6 @@ bail:
}
/*
- * Disable msi interrupt if enabled, and clear msi_lo.
- * This is used primarily for the fallback to INTx, but
- * is also used in reinit after reset, and during cleanup.
- */
-void qib_nomsi(struct qib_devdata *dd)
-{
- dd->msi_lo = 0;
- pci_free_irq_vectors(dd->pcidev);
-}
-
-/*
- * Same as qib_nosmi, but for MSIx.
- */
-void qib_nomsix(struct qib_devdata *dd)
-{
- pci_free_irq_vectors(dd->pcidev);
-}
-
-/*
- * Similar to pci_intx(pdev, 1), except that we make sure
- * msi(x) is off.
- */
-void qib_enable_intx(struct qib_devdata *dd)
-{
- u16 cw, new;
- int pos;
- struct pci_dev *pdev = dd->pcidev;
-
- if (pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY) < 0)
- qib_dev_err(dd, "Failed to enable INTx\n");
-
- pos = pdev->msi_cap;
- if (pos) {
- /* then turn off MSI */
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- new = cw & ~PCI_MSI_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
- }
- pos = pdev->msix_cap;
- if (pos) {
- /* then turn off MSIx */
- pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
- new = cw & ~PCI_MSIX_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
- }
-}
-
-/*
* These two routines are helper routines for the device reset code
* to move all the pcie code out of the chip-specific driver code.
*/
@@ -458,7 +397,6 @@ MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
*/
static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
{
- int r;
struct pci_dev *parent;
u16 devid;
u32 mask, bits, val;
@@ -513,7 +451,7 @@ static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
pci_read_config_dword(parent, 0x48, &val);
val &= ~mask;
val |= bits;
- r = pci_write_config_dword(parent, 0x48, val);
+ pci_write_config_dword(parent, 0x48, val);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e9a91736b12d..8f5754fb8579 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1869,7 +1869,7 @@ send_middle:
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
- /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
+ /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 53efbb0b40c4..9a37e844d4c8 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index c72775f27212..12caf3db8c34 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -755,7 +755,6 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
int addr;
int ret;
unsigned long flags;
- const char *op;
/* Pick appropriate transaction reg and "Chip select" for this serdes */
switch (sdnum) {
@@ -775,7 +774,6 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
return -1;
}
- op = rd_notwr ? "Rd" : "Wr";
spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
@@ -1390,11 +1388,11 @@ module_param_named(relock_by_timer, qib_relock_by_timer, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-static void qib_run_relock(unsigned long opaque)
+static void qib_run_relock(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *)opaque;
+ struct qib_chip_specific *cs = from_timer(cs, t, relock_timer);
+ struct qib_devdata *dd = cs->dd;
struct qib_pportdata *ppd = dd->pport;
- struct qib_chip_specific *cs = dd->cspec;
int timeoff;
/*
@@ -1440,9 +1438,7 @@ void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
/* If timer has not yet been started, do so. */
if (!cs->relock_timer_active) {
cs->relock_timer_active = 1;
- init_timer(&cs->relock_timer);
- cs->relock_timer.function = qib_run_relock;
- cs->relock_timer.data = (unsigned long) dd;
+ timer_setup(&cs->relock_timer, qib_run_relock, 0);
cs->relock_interval = timeout;
cs->relock_timer.expires = jiffies + timeout;
add_timer(&cs->relock_timer);
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 891873b38a1e..c3690bd51582 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -808,7 +808,7 @@ void __qib_sdma_process_event(struct qib_pportdata *ppd,
* bringing the link up with traffic active on
* 7220, e.g. */
ss->go_s99_running = 1;
- /* fall through and start dma engine */
+ /* fall through -- and start dma engine */
case qib_sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&ppd->sdma_state);
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index eface3b3dacf..29785eb84646 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -179,8 +179,6 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
pppd[i] = NULL;
for (i = 0; i < cnt; i++) {
- int which;
-
if (!test_bit(i, mask))
continue;
/*
@@ -201,9 +199,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
(!test_bit(i << 1, dd->pioavailkernel) &&
find_ctxt(dd, i))) {
__set_bit(i, dd->pio_need_disarm);
- which = 0;
} else {
- which = 1;
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
}
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
@@ -552,9 +548,9 @@ void qib_hol_up(struct qib_pportdata *ppd)
/*
* This is only called via the timer.
*/
-void qib_hol_event(unsigned long opaque)
+void qib_hol_event(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
/* If hardware error, etc, skip. */
if (!(ppd->dd->flags & QIB_INITTED))
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 498e2202e72c..bddcc37ace44 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index be4907453ac4..15962ed193ce 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9d92aeb8d9a1..c55000501582 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -389,9 +389,9 @@ drop:
* This is called from a timer to check for QPs
* which need kernel memory in order to send a packet.
*/
-static void mem_timer(unsigned long data)
+static void mem_timer(struct timer_list *t)
{
- struct qib_ibdev *dev = (struct qib_ibdev *) data;
+ struct qib_ibdev *dev = from_timer(dev, t, mem_timer);
struct list_head *list = &dev->memwait;
struct rvt_qp *qp = NULL;
struct qib_qp_priv *priv = NULL;
@@ -701,7 +701,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
*/
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
{
- struct rvt_qp *qp, *nqp;
+ struct rvt_qp *qp;
struct qib_qp_priv *qpp, *nqpp;
struct rvt_qp *qps[20];
struct qib_ibdev *dev;
@@ -714,7 +714,6 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
/* Search wait list for first QP wanting DMA descriptors. */
list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
qp = qpp->owner;
- nqp = nqpp->owner;
if (qp->port_num != ppd->port)
continue;
if (n == ARRAY_SIZE(qps))
@@ -1532,7 +1531,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
init_ibport(ppd + i);
/* Only need to initialize non-zero fields. */
- setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+ timer_setup(&dev->mem_timer, mem_timer, 0);
INIT_LIST_HEAD(&dev->piowait);
INIT_LIST_HEAD(&dev->dmawait);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 092d4e11a633..912d8ef04352 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -392,14 +392,12 @@ int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
void *data)
{
int status = 0;
- int vnic_idx;
struct ib_event ib_event;
enum ib_qp_state old_state;
struct usnic_transport_spec *trans_spec;
struct usnic_ib_qp_grp_flow *qp_flow;
old_state = qp_grp->state;
- vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
trans_spec = (struct usnic_transport_spec *) data;
spin_lock(&qp_grp->lock);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
index b1458be1d402..a8a2314c9531 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -84,30 +84,7 @@ struct usnic_ib_qp_grp_flow {
char dentry_name[32];
};
-static const struct
-usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
- { /*USNIC_TRANSPORT_UNKNOWN*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
- { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
- { /*USNIC_TRANSPORT_IPV4_UDP*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
-};
+extern const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX];
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 32956f9f5715..685ef2293cb8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -43,6 +43,7 @@
#include "usnic_ib_qp_grp.h"
#include "usnic_vnic.h"
#include "usnic_ib_verbs.h"
+#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
#include "usnic_ib_sysfs.h"
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index e4113ef09315..aa2456a4f9bd 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -42,6 +42,7 @@
#include "usnic_ib.h"
#include "usnic_common_util.h"
#include "usnic_ib_qp_grp.h"
+#include "usnic_ib_verbs.h"
#include "usnic_fwd.h"
#include "usnic_log.h"
#include "usnic_uiom.h"
@@ -50,6 +51,30 @@
#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
+const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
+ { /*USNIC_TRANSPORT_UNKNOWN*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_IPV4_UDP*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+};
+
static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
{
*fw_ver = *((u64 *)fw_ver_str);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile
index 0194ed19f542..2f52e0a044a0 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/Makefile
+++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
-vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_verbs.o
+vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_srq.o pvrdma_verbs.o
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 984aa3484928..63bc2efc34eb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -162,6 +162,22 @@ struct pvrdma_ah {
struct pvrdma_av av;
};
+struct pvrdma_srq {
+ struct ib_srq ibsrq;
+ int offset;
+ spinlock_t lock; /* SRQ lock. */
+ int wqe_cnt;
+ int wqe_size;
+ int max_gs;
+ struct ib_umem *umem;
+ struct pvrdma_ring_state *ring;
+ struct pvrdma_page_dir pdir;
+ u32 srq_handle;
+ int npages;
+ refcount_t refcnt;
+ wait_queue_head_t wait;
+};
+
struct pvrdma_qp {
struct ib_qp ibqp;
u32 qp_handle;
@@ -171,6 +187,7 @@ struct pvrdma_qp {
struct ib_umem *rumem;
struct ib_umem *sumem;
struct pvrdma_page_dir pdir;
+ struct pvrdma_srq *srq;
int npages;
int npages_send;
int npages_recv;
@@ -210,6 +227,8 @@ struct pvrdma_dev {
struct pvrdma_page_dir cq_pdir;
struct pvrdma_cq **cq_tbl;
spinlock_t cq_tbl_lock;
+ struct pvrdma_srq **srq_tbl;
+ spinlock_t srq_tbl_lock;
struct pvrdma_qp **qp_tbl;
spinlock_t qp_tbl_lock;
struct pvrdma_uar_table uar_table;
@@ -221,6 +240,7 @@ struct pvrdma_dev {
bool ib_active;
atomic_t num_qps;
atomic_t num_cqs;
+ atomic_t num_srqs;
atomic_t num_pds;
atomic_t num_ahs;
@@ -256,6 +276,11 @@ static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
return container_of(ibcq, struct pvrdma_cq, ibcq);
}
+static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct pvrdma_srq, ibsrq);
+}
+
static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct pvrdma_user_mr, ibmr);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index df0a6b525021..6fd5a8f4e2f6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -339,6 +339,10 @@ enum {
PVRDMA_CMD_DESTROY_UC,
PVRDMA_CMD_CREATE_BIND,
PVRDMA_CMD_DESTROY_BIND,
+ PVRDMA_CMD_CREATE_SRQ,
+ PVRDMA_CMD_MODIFY_SRQ,
+ PVRDMA_CMD_QUERY_SRQ,
+ PVRDMA_CMD_DESTROY_SRQ,
PVRDMA_CMD_MAX,
};
@@ -361,6 +365,10 @@ enum {
PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
+ PVRDMA_CMD_CREATE_SRQ_RESP,
+ PVRDMA_CMD_MODIFY_SRQ_RESP,
+ PVRDMA_CMD_QUERY_SRQ_RESP,
+ PVRDMA_CMD_DESTROY_SRQ_RESP,
PVRDMA_CMD_MAX_RESP,
};
@@ -495,6 +503,46 @@ struct pvrdma_cmd_destroy_cq {
u8 reserved[4];
};
+struct pvrdma_cmd_create_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 nchunks;
+ struct pvrdma_srq_attr attrs;
+ u8 srq_type;
+ u8 reserved[7];
+};
+
+struct pvrdma_cmd_create_srq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 srqn;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_modify_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u32 attr_mask;
+ struct pvrdma_srq_attr attrs;
+};
+
+struct pvrdma_cmd_query_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_query_srq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_srq_attr attrs;
+};
+
+struct pvrdma_cmd_destroy_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u8 reserved[4];
+};
+
struct pvrdma_cmd_create_qp {
struct pvrdma_cmd_hdr hdr;
u64 pdir_dma;
@@ -594,6 +642,10 @@ union pvrdma_cmd_req {
struct pvrdma_cmd_destroy_qp destroy_qp;
struct pvrdma_cmd_create_bind create_bind;
struct pvrdma_cmd_destroy_bind destroy_bind;
+ struct pvrdma_cmd_create_srq create_srq;
+ struct pvrdma_cmd_modify_srq modify_srq;
+ struct pvrdma_cmd_query_srq query_srq;
+ struct pvrdma_cmd_destroy_srq destroy_srq;
};
union pvrdma_cmd_resp {
@@ -608,6 +660,8 @@ union pvrdma_cmd_resp {
struct pvrdma_cmd_create_qp_resp create_qp_resp;
struct pvrdma_cmd_query_qp_resp query_qp_resp;
struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
+ struct pvrdma_cmd_create_srq_resp create_srq_resp;
+ struct pvrdma_cmd_query_srq_resp query_srq_resp;
};
#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6ce709a67959..1f4e18717a00 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -118,6 +118,7 @@ static int pvrdma_init_device(struct pvrdma_dev *dev)
spin_lock_init(&dev->cmd_lock);
sema_init(&dev->cmd_sema, 1);
atomic_set(&dev->num_qps, 0);
+ atomic_set(&dev->num_srqs, 0);
atomic_set(&dev->num_cqs, 0);
atomic_set(&dev->num_pds, 0);
atomic_set(&dev->num_ahs, 0);
@@ -254,9 +255,32 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
goto err_cq_free;
spin_lock_init(&dev->qp_tbl_lock);
+ /* Check if SRQ is supported by backend */
+ if (dev->dsr->caps.max_srq) {
+ dev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+
+ dev->ib_dev.create_srq = pvrdma_create_srq;
+ dev->ib_dev.modify_srq = pvrdma_modify_srq;
+ dev->ib_dev.query_srq = pvrdma_query_srq;
+ dev->ib_dev.destroy_srq = pvrdma_destroy_srq;
+ dev->ib_dev.post_srq_recv = pvrdma_post_srq_recv;
+
+ dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
+ sizeof(struct pvrdma_srq *),
+ GFP_KERNEL);
+ if (!dev->srq_tbl)
+ goto err_qp_free;
+ }
+ spin_lock_init(&dev->srq_tbl_lock);
+
ret = ib_register_device(&dev->ib_dev, NULL);
if (ret)
- goto err_qp_free;
+ goto err_srq_free;
for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
ret = device_create_file(&dev->ib_dev.dev,
@@ -271,6 +295,8 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
err_class:
ib_unregister_device(&dev->ib_dev);
+err_srq_free:
+ kfree(dev->srq_tbl);
err_qp_free:
kfree(dev->qp_tbl);
err_cq_free:
@@ -353,6 +379,35 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
}
}
+static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
+{
+ struct pvrdma_srq *srq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ if (dev->srq_tbl)
+ srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
+ else
+ srq = NULL;
+ if (srq)
+ refcount_inc(&srq->refcnt);
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ if (srq && srq->ibsrq.event_handler) {
+ struct ib_srq *ibsrq = &srq->ibsrq;
+ struct ib_event e;
+
+ e.device = ibsrq->device;
+ e.element.srq = ibsrq;
+ e.event = type; /* 1:1 mapping for now. */
+ ibsrq->event_handler(&e, ibsrq->srq_context);
+ }
+ if (srq) {
+ if (refcount_dec_and_test(&srq->refcnt))
+ wake_up(&srq->wait);
+ }
+}
+
static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
enum ib_event_type event)
{
@@ -423,6 +478,7 @@ static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
case PVRDMA_EVENT_SRQ_ERR:
case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
+ pvrdma_srq_event(dev, eqe->info, eqe->type);
break;
case PVRDMA_EVENT_PORT_ACTIVE:
@@ -1059,6 +1115,7 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
iounmap(dev->regs);
kfree(dev->sgid_tbl);
kfree(dev->cq_tbl);
+ kfree(dev->srq_tbl);
kfree(dev->qp_tbl);
pvrdma_uar_table_cleanup(dev);
iounmap(dev->driver_uar.map);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index ed34d5a581fa..10420a18d02f 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -198,6 +198,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct pvrdma_create_qp ucmd;
unsigned long flags;
int ret;
+ bool is_srq = !!init_attr->srq;
if (init_attr->create_flags) {
dev_warn(&dev->pdev->dev,
@@ -214,6 +215,12 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
+ if (is_srq && !dev->dsr->caps.max_srq) {
+ dev_warn(&dev->pdev->dev,
+ "SRQs not supported by device\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
return ERR_PTR(-ENOMEM);
@@ -252,26 +259,36 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
- /* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem = ib_umem_get(pd->uobject->context,
- ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
- if (IS_ERR(qp->rumem)) {
- ret = PTR_ERR(qp->rumem);
- goto err_qp;
+ if (!is_srq) {
+ /* set qp->sq.wqe_cnt, shift, buf_size.. */
+ qp->rumem = ib_umem_get(pd->uobject->context,
+ ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0, 0);
+ if (IS_ERR(qp->rumem)) {
+ ret = PTR_ERR(qp->rumem);
+ goto err_qp;
+ }
+ qp->srq = NULL;
+ } else {
+ qp->rumem = NULL;
+ qp->srq = to_vsrq(init_attr->srq);
}
qp->sumem = ib_umem_get(pd->uobject->context,
ucmd.sbuf_addr,
ucmd.sbuf_size, 0, 0);
if (IS_ERR(qp->sumem)) {
- ib_umem_release(qp->rumem);
+ if (!is_srq)
+ ib_umem_release(qp->rumem);
ret = PTR_ERR(qp->sumem);
goto err_qp;
}
qp->npages_send = ib_umem_page_count(qp->sumem);
- qp->npages_recv = ib_umem_page_count(qp->rumem);
+ if (!is_srq)
+ qp->npages_recv = ib_umem_page_count(qp->rumem);
+ else
+ qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
qp->is_kernel = true;
@@ -312,12 +329,14 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!qp->is_kernel) {
pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
- pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem,
- qp->npages_send);
+ if (!is_srq)
+ pvrdma_page_dir_insert_umem(&qp->pdir,
+ qp->rumem,
+ qp->npages_send);
} else {
/* Ring state is always the first page. */
qp->sq.ring = qp->pdir.pages[0];
- qp->rq.ring = &qp->sq.ring[1];
+ qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
}
break;
default:
@@ -333,6 +352,10 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
+ if (is_srq)
+ cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle;
+ else
+ cmd->srq_handle = 0;
cmd->max_send_wr = init_attr->cap.max_send_wr;
cmd->max_recv_wr = init_attr->cap.max_recv_wr;
cmd->max_send_sge = init_attr->cap.max_send_sge;
@@ -340,6 +363,8 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
cmd->max_inline_data = init_attr->cap.max_inline_data;
cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
+ cmd->is_srq = is_srq;
+ cmd->lkey = 0;
cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
cmd->total_chunks = qp->npages;
cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
@@ -815,6 +840,12 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return -EINVAL;
}
+ if (qp->srq) {
+ dev_warn(&dev->pdev->dev, "QP associated with SRQ\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&qp->rq.lock, flags);
while (wr) {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
new file mode 100644
index 000000000000..826ccb864596
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2016-2017 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ /* No support for kernel clients. */
+ return -EOPNOTSUPP;
+}
+
+/**
+ * pvrdma_query_srq - query shared receive queue
+ * @ibsrq: the shared receive queue to query
+ * @srq_attr: attributes to query and return to client
+ *
+ * @return: 0 for success, otherwise returns an errno.
+ */
+int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
+ struct pvrdma_srq *srq = to_vsrq(ibsrq);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
+ struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
+ cmd->srq_handle = srq->srq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query shared receive queue, error: %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ srq_attr->srq_limit = resp->attrs.srq_limit;
+ srq_attr->max_wr = resp->attrs.max_wr;
+ srq_attr->max_sge = resp->attrs.max_sge;
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_srq - create shared receive queue
+ * @pd: protection domain
+ * @init_attr: shared receive queue attributes
+ * @udata: user data
+ *
+ * @return: the ib_srq pointer on success, otherwise returns an errno.
+ */
+struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_srq *srq = NULL;
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
+ struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
+ struct pvrdma_create_srq ucmd;
+ unsigned long flags;
+ int ret;
+
+ if (!(pd->uobject && udata)) {
+ /* No support for kernel clients. */
+ dev_warn(&dev->pdev->dev,
+ "no shared receive queue support for kernel client\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (init_attr->srq_type != IB_SRQT_BASIC) {
+ dev_warn(&dev->pdev->dev,
+ "shared receive queue type %d not supported\n",
+ init_attr->srq_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
+ init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
+ dev_warn(&dev->pdev->dev,
+ "shared receive queue size invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
+ return ERR_PTR(-ENOMEM);
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ ret = -ENOMEM;
+ goto err_srq;
+ }
+
+ spin_lock_init(&srq->lock);
+ refcount_set(&srq->refcnt, 1);
+ init_waitqueue_head(&srq->wait);
+
+ dev_dbg(&dev->pdev->dev,
+ "create shared receive queue from user space\n");
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_srq;
+ }
+
+ srq->umem = ib_umem_get(pd->uobject->context,
+ ucmd.buf_addr,
+ ucmd.buf_size, 0, 0);
+ if (IS_ERR(srq->umem)) {
+ ret = PTR_ERR(srq->umem);
+ goto err_srq;
+ }
+
+ srq->npages = ib_umem_page_count(srq->umem);
+
+ if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in shared receive queue\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
+ cmd->srq_type = init_attr->srq_type;
+ cmd->nchunks = srq->npages;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->attrs.max_wr = init_attr->attr.max_wr;
+ cmd->attrs.max_sge = init_attr->attr.max_sge;
+ cmd->attrs.srq_limit = init_attr->attr.srq_limit;
+ cmd->pdir_dma = srq->pdir.dir_dma;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create shared receive queue, error: %d\n",
+ ret);
+ goto err_page_dir;
+ }
+
+ srq->srq_handle = resp->srqn;
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ /* Copy udata back. */
+ if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
+ dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
+ pvrdma_destroy_srq(&srq->ibsrq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &srq->ibsrq;
+
+err_page_dir:
+ pvrdma_page_dir_cleanup(dev, &srq->pdir);
+err_umem:
+ ib_umem_release(srq->umem);
+err_srq:
+ kfree(srq);
+ atomic_dec(&dev->num_srqs);
+
+ return ERR_PTR(ret);
+}
+
+static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ dev->srq_tbl[srq->srq_handle] = NULL;
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ refcount_dec(&srq->refcnt);
+ wait_event(srq->wait, !refcount_read(&srq->refcnt));
+
+ /* There is no support for kernel clients, so this is safe. */
+ ib_umem_release(srq->umem);
+
+ pvrdma_page_dir_cleanup(dev, &srq->pdir);
+
+ kfree(srq);
+
+ atomic_dec(&dev->num_srqs);
+}
+
+/**
+ * pvrdma_destroy_srq - destroy shared receive queue
+ * @srq: the shared receive queue to destroy
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_destroy_srq(struct ib_srq *srq)
+{
+ struct pvrdma_srq *vsrq = to_vsrq(srq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
+ struct pvrdma_dev *dev = to_vdev(srq->device);
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
+ cmd->srq_handle = vsrq->srq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "destroy shared receive queue failed, error: %d\n",
+ ret);
+
+ pvrdma_free_srq(dev, vsrq);
+
+ return 0;
+}
+
+/**
+ * pvrdma_modify_srq - modify shared receive queue attributes
+ * @ibsrq: the shared receive queue to modify
+ * @attr: the shared receive queue's new attributes
+ * @attr_mask: attributes mask
+ * @udata: user data
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
+ int ret;
+
+ /* Only support SRQ limit. */
+ if (!(attr_mask & IB_SRQ_LIMIT))
+ return -EINVAL;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
+ cmd->srq_handle = vsrq->srq_handle;
+ cmd->attrs.srq_limit = attr->srq_limit;
+ cmd->attr_mask = attr_mask;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not modify shared receive queue, error: %d\n",
+ ret);
+
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 48776f5ffb0e..16b96616ef7e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -85,6 +85,9 @@ int pvrdma_query_device(struct ib_device *ibdev,
props->max_sge = dev->dsr->caps.max_sge;
props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge,
dev->dsr->caps.max_sge_rd);
+ props->max_srq = dev->dsr->caps.max_srq;
+ props->max_srq_wr = dev->dsr->caps.max_srq_wr;
+ props->max_srq_sge = dev->dsr->caps.max_srq_sge;
props->max_cq = dev->dsr->caps.max_cq;
props->max_cqe = dev->dsr->caps.max_cqe;
props->max_mr = dev->dsr->caps.max_mr;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 002a9b066e70..b7b25728a7e5 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -324,6 +324,13 @@ enum pvrdma_mw_type {
PVRDMA_MW_TYPE_2 = 2,
};
+struct pvrdma_srq_attr {
+ u32 max_wr;
+ u32 max_sge;
+ u32 srq_limit;
+ u32 reserved;
+};
+
struct pvrdma_qp_attr {
enum pvrdma_qp_state qp_state;
enum pvrdma_qp_state cur_qp_state;
@@ -420,6 +427,17 @@ int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int pvrdma_destroy_ah(struct ib_ah *ah);
+
+struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
+int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int pvrdma_destroy_srq(struct ib_srq *srq);
+int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index fdd001ce13d8..2b5513da7e83 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
depends on 64BIT
+ depends on PCI
select DMA_VIRT_OPS
---help---
This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 1f12b69a0d07..b3a38c5e4cad 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -351,7 +351,7 @@ int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int last = 0;
int ret = 0;
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ if (ibqp->qp_num <= 1)
return -EINVAL;
spin_lock_irq(&ibp->lock);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 22df09ae809e..9177df60742a 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -57,7 +57,7 @@
#include "vt.h"
#include "trace.h"
-static void rvt_rc_timeout(unsigned long arg);
+static void rvt_rc_timeout(struct timer_list *t);
/*
* Convert the AETH RNR timeout code into the number of microseconds.
@@ -238,7 +238,7 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
rdi->qp_dev->qp_table =
- kmalloc_node(rdi->qp_dev->qp_table_size *
+ kmalloc_array_node(rdi->qp_dev->qp_table_size,
sizeof(*rdi->qp_dev->qp_table),
GFP_KERNEL, rdi->dparms.node);
if (!rdi->qp_dev->qp_table)
@@ -717,7 +717,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
/* take qp out the hash and wait for it to be unused */
rvt_remove_qp(rdi, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
/* grab the lock b/c it was locked at call time */
spin_lock_irq(&qp->r_lock);
@@ -807,6 +806,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (init_attr->port_num == 0 ||
init_attr->port_num > ibpd->device->phys_port_cnt)
return ERR_PTR(-EINVAL);
+ /* fall through */
case IB_QPT_UC:
case IB_QPT_RC:
case IB_QPT_UD:
@@ -845,7 +845,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_qp;
}
/* initialize timers needed for rc qp */
- setup_timer(&qp->s_timer, rvt_rc_timeout, (unsigned long)qp);
+ timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
qp->s_rnr_timer.function = rvt_rc_rnr_retry;
@@ -894,8 +894,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
atomic_set(&qp->refcount, 0);
atomic_set(&qp->local_ops_pending, 0);
init_waitqueue_head(&qp->wait);
- init_timer(&qp->s_timer);
- qp->s_timer.data = (unsigned long)qp;
INIT_LIST_HEAD(&qp->rspwait);
qp->state = IB_QPS_RESET;
qp->s_wq = swq;
@@ -1073,7 +1071,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
rdi->driver_f.notify_error_qp(qp);
/* Schedule the sending tasklet to drain the send work queue. */
- if (ACCESS_ONCE(qp->s_last) != qp->s_head)
+ if (READ_ONCE(qp->s_last) != qp->s_head)
rdi->driver_f.schedule_send(qp);
rvt_clear_mr_refs(qp, 0);
@@ -1443,6 +1441,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
spin_unlock(&qp->s_hlock);
spin_unlock_irq(&qp->r_lock);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
@@ -1686,7 +1685,7 @@ static inline int rvt_qp_is_avail(
if (likely(qp->s_avail))
return 0;
smp_read_barrier_depends(); /* see rc.c */
- slast = ACCESS_ONCE(qp->s_last);
+ slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
else
@@ -1917,7 +1916,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* ahead and kick the send engine into gear. Otherwise we will always
* just schedule the send to happen later.
*/
- call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
+ call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
for (; wr; wr = wr->next) {
err = rvt_post_one_wr(qp, wr, &call_send);
@@ -2132,9 +2131,9 @@ EXPORT_SYMBOL(rvt_del_timers_sync);
/**
* This is called from s_timer for missing responses.
*/
-static void rvt_rc_timeout(unsigned long arg)
+static void rvt_rc_timeout(struct timer_list *t)
{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
+ struct rvt_qp *qp = from_timer(qp, t, s_timer);
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
unsigned long flags;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 9eb12c2e3c74..6cdc40ed8a9f 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -136,9 +136,9 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
-void retransmit_timer(unsigned long data)
+void retransmit_timer(struct timer_list *t)
{
- struct rxe_qp *qp = (struct rxe_qp *)data;
+ struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
if (qp->valid) {
qp->comp.timeout = 1;
@@ -270,8 +270,8 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
if ((syn & AETH_TYPE_MASK) != AETH_ACK)
return COMPST_ERROR;
- /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
- * doesn't have an AETH)
+ /* fall through */
+ /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
*/
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (wqe->wr.opcode != IB_WR_RDMA_READ &&
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 77b3ed0df936..d7472a442a2c 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -218,8 +218,8 @@ static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
qp->resp.res_head = 0;
}
-void retransmit_timer(unsigned long data);
-void rnr_nak_timer(unsigned long data);
+void retransmit_timer(struct timer_list *t);
+void rnr_nak_timer(struct timer_list *t);
/* rxe_srq.c */
#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index c1b5f38f31a5..b4a8acc7bb7d 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -394,21 +394,25 @@ void *rxe_alloc(struct rxe_pool *pool)
kref_get(&pool->rxe->ref_cnt);
- if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
- atomic_dec(&pool->num_elem);
- rxe_dev_put(pool->rxe);
- rxe_pool_put(pool);
- return NULL;
- }
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto out_put_pool;
elem = kmem_cache_zalloc(pool_cache(pool),
(pool->flags & RXE_POOL_ATOMIC) ?
GFP_ATOMIC : GFP_KERNEL);
+ if (!elem)
+ goto out_put_pool;
elem->pool = pool;
kref_init(&elem->ref_cnt);
return elem;
+
+out_put_pool:
+ atomic_dec(&pool->num_elem);
+ rxe_dev_put(pool->rxe);
+ rxe_pool_put(pool);
+ return NULL;
}
void rxe_elem_release(struct kref *kref)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 00bda9380a2e..4469592b839d 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -275,8 +275,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
if (init->qp_type == IB_QPT_RC) {
- setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
- setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
+ timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
+ timer_setup(&qp->retrans_timer, retransmit_timer, 0);
}
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d84222f9d5d2..26a7f923045b 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -118,9 +118,9 @@ static void req_retry(struct rxe_qp *qp)
}
}
-void rnr_nak_timer(unsigned long data)
+void rnr_nak_timer(struct timer_list *t)
{
- struct rxe_qp *qp = (struct rxe_qp *)data;
+ struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
rxe_run_task(&qp->req.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index ea3810b29273..08f05ac5f5d5 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -71,7 +71,7 @@ void rxe_do_task(unsigned long data)
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
- /* fall through to */
+ /* fall through */
case TASK_STATE_ARMED:
spin_unlock_irqrestore(&task->state_lock, flags);
return;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 0b362f49a10a..d03002b9d84d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -644,6 +644,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE_WITH_IMM:
wr->ex.imm_data = ibwr->ex.imm_data;
+ /* fall through */
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
@@ -1191,6 +1192,7 @@ int rxe_register_device(struct rxe_dev *rxe)
int err;
int i;
struct ib_device *dev = &rxe->ib_dev;
+ struct crypto_shash *tfm;
strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
@@ -1288,12 +1290,13 @@ int rxe_register_device(struct rxe_dev *rxe)
dev->get_hw_stats = rxe_ib_get_hw_stats;
dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
- rxe->tfm = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(rxe->tfm)) {
+ tfm = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(tfm)) {
pr_err("failed to allocate crc algorithm err:%ld\n",
- PTR_ERR(rxe->tfm));
- return PTR_ERR(rxe->tfm);
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
}
+ rxe->tfm = tfm;
err = ib_register_device(dev, NULL);
if (err) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 4a5c7a07a631..8033a006277f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -111,7 +111,7 @@ enum {
IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- MAX_SEND_CQE = 16,
+ MAX_SEND_CQE = 64,
IPOIB_CM_COPYBREAK = 256,
IPOIB_NON_CHILD = 0,
@@ -331,7 +331,8 @@ struct ipoib_dev_priv {
struct net_device *dev;
- struct napi_struct napi;
+ struct napi_struct send_napi;
+ struct napi_struct recv_napi;
unsigned long flags;
@@ -381,7 +382,6 @@ struct ipoib_dev_priv {
unsigned tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
- unsigned tx_outstanding;
struct ib_wc send_wc[MAX_SEND_CQE];
struct ib_recv_wr rx_wr;
@@ -409,7 +409,6 @@ struct ipoib_dev_priv {
#endif
u64 hca_caps;
struct ipoib_ethtool_st ethtool;
- struct timer_list poll_timer;
unsigned max_send_sge;
bool sm_fullmember_sendonly_support;
const struct net_device_ops *rn_ops;
@@ -476,9 +475,10 @@ extern struct workqueue_struct *ipoib_workqueue;
/* functions */
-int ipoib_poll(struct napi_struct *napi, int budget);
-void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
-void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
+int ipoib_rx_poll(struct napi_struct *napi, int budget);
+int ipoib_tx_poll(struct napi_struct *napi, int budget);
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr);
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct rdma_ah_attr *attr);
@@ -500,7 +500,7 @@ void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
const char *format);
-void ipoib_ib_tx_timer_func(unsigned long ctx);
+void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7774654c2ccb..87f4bd99cdf7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -594,9 +594,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
- ipoib_dbg(priv, "cm recv error "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ ipoib_dbg(priv,
+ "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
goto repost;
@@ -757,30 +757,35 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return;
}
+ if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
+ netif_stop_queue(dev);
+ }
+
skb_orphan(skb);
skb_dst_drop(skb);
+ if (netif_queue_stopped(dev))
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) {
+ ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ napi_schedule(&priv->send_napi);
+ }
+
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
- ipoib_warn(priv, "post_send failed, error %d\n", rc);
+ ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
} else {
netif_trans_update(dev);
++tx->tx_head;
-
- if (++priv->tx_outstanding == ipoib_sendq_size) {
- ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
- tx->qp->qp_num);
- netif_stop_queue(dev);
- rc = ib_req_notify_cq(priv->send_cq,
- IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
- if (rc < 0)
- ipoib_warn(priv, "request notify on send CQ failed\n");
- else if (rc)
- ipoib_send_comp_handler(priv->send_cq, dev);
- }
+ ++priv->tx_head;
}
}
@@ -814,9 +819,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_tx_lock(dev);
++tx->tx_tail;
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
- netif_queue_stopped(dev) &&
- test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ ++priv->tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
@@ -829,11 +836,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
wc->status == IB_WC_RETRY_EXC_ERR)
ipoib_dbg(priv,
- "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
else
ipoib_warn(priv,
- "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
@@ -1045,7 +1052,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr attr = {
- .send_cq = priv->recv_cq,
+ .send_cq = priv->send_cq,
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = ipoib_sendq_size,
@@ -1219,9 +1226,10 @@ timeout:
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
- ++p->tx_tail;
netif_tx_lock_bh(p->dev);
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
+ ++p->tx_tail;
+ ++priv->tx_tail;
+ if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 8dc1e6225cc8..2706bf26cbac 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -99,8 +99,9 @@ static int ipoib_set_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames > 0xffff)
return -EINVAL;
- ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames,
- coal->rx_coalesce_usecs);
+ ret = rdma_set_cq_moderation(priv->recv_cq,
+ coal->rx_max_coalesced_frames,
+ coal->rx_coalesce_usecs);
if (ret && ret != -ENOSYS) {
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
return ret;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fe690f82af29..3b96cdaf9a83 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -192,8 +192,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- ipoib_warn(priv, "failed recv event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
@@ -264,7 +264,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&priv->napi, skb);
+ napi_gro_receive(&priv->recv_napi, skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -406,16 +406,17 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
- netif_queue_stopped(dev) &&
- test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_qp_state_validate *qp_work;
- ipoib_warn(priv, "failed send event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed send event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
if (!qp_work)
@@ -430,17 +431,23 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
static int poll_tx(struct ipoib_dev_priv *priv)
{
int n, i;
+ struct ib_wc *wc;
n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
- for (i = 0; i < n; ++i)
- ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
-
+ for (i = 0; i < n; ++i) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
+ else
+ ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
+ }
return n == MAX_SEND_CQE;
}
-int ipoib_poll(struct napi_struct *napi, int budget)
+int ipoib_rx_poll(struct napi_struct *napi, int budget)
{
- struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
+ struct ipoib_dev_priv *priv =
+ container_of(napi, struct ipoib_dev_priv, recv_napi);
struct net_device *dev = priv->dev;
int done;
int t;
@@ -464,8 +471,9 @@ poll_more:
ipoib_cm_handle_rx_wc(dev, wc);
else
ipoib_ib_handle_rx_wc(dev, wc);
- } else
- ipoib_cm_handle_tx_wc(priv->dev, wc);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
}
if (n != t)
@@ -484,33 +492,47 @@ poll_more:
return done;
}
-void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
+int ipoib_tx_poll(struct napi_struct *napi, int budget)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
+ send_napi);
+ struct net_device *dev = priv->dev;
+ int n, i;
+ struct ib_wc *wc;
- napi_schedule(&priv->napi);
-}
+poll_more:
+ n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
-static void drain_tx_cq(struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ for (i = 0; i < n; i++) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(dev, wc);
+ else
+ ipoib_ib_handle_tx_wc(dev, wc);
+ }
- netif_tx_lock(dev);
- while (poll_tx(priv))
- ; /* nothing */
+ if (n < budget) {
+ napi_complete(napi);
+ if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) &&
+ napi_reschedule(napi))
+ goto poll_more;
+ }
+ return n < 0 ? 0 : n;
+}
- if (netif_queue_stopped(dev))
- mod_timer(&priv->poll_timer, jiffies + 1);
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
+{
+ struct ipoib_dev_priv *priv = ctx_ptr;
- netif_tx_unlock(dev);
+ napi_schedule(&priv->recv_napi);
}
-void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
{
- struct ipoib_dev_priv *priv = ipoib_priv(dev_ptr);
+ struct ipoib_dev_priv *priv = ctx_ptr;
- mod_timer(&priv->poll_timer, jiffies);
+ napi_schedule(&priv->send_napi);
}
static inline int post_send(struct ipoib_dev_priv *priv,
@@ -611,23 +633,25 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
-
- if (++priv->tx_outstanding == ipoib_sendq_size) {
+ /* increase the tx_head after send success, but use it for queue state */
+ if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
- ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
}
skb_orphan(skb);
skb_dst_drop(skb);
+ if (netif_queue_stopped(dev))
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS))
+ ipoib_warn(priv, "request notify on send CQ failed\n");
+
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address, dqpn, tx_req, phead, hlen);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
- --priv->tx_outstanding;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
@@ -639,11 +663,6 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
rc = priv->tx_head;
++priv->tx_head;
}
-
- if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
- while (poll_tx(priv))
- ; /* nothing */
-
return rc;
}
@@ -732,6 +751,22 @@ static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
new_state, qp_attr.qp_state);
}
+static void ipoib_napi_enable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_enable(&priv->recv_napi);
+ napi_enable(&priv->send_napi);
+}
+
+static void ipoib_napi_disable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_disable(&priv->recv_napi);
+ napi_disable(&priv->send_napi);
+}
+
int ipoib_ib_dev_stop_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -741,7 +776,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
int i;
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_disable(&priv->napi);
+ ipoib_napi_disable(dev);
ipoib_cm_dev_stop(dev);
@@ -773,7 +808,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
- --priv->tx_outstanding;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
@@ -799,7 +833,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dbg(priv, "All sends and receives done.\n");
timeout:
- del_timer_sync(&priv->poll_timer);
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
@@ -821,11 +854,6 @@ int ipoib_ib_dev_stop(struct net_device *dev)
return 0;
}
-void ipoib_ib_tx_timer_func(unsigned long ctx)
-{
- drain_tx_cq((struct net_device *)ctx);
-}
-
int ipoib_ib_dev_open_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -850,7 +878,7 @@ int ipoib_ib_dev_open_default(struct net_device *dev)
}
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_enable(&priv->napi);
+ ipoib_napi_enable(dev);
return 0;
out:
@@ -965,8 +993,9 @@ void ipoib_drain_cq(struct net_device *dev)
ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
else
ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
- } else
- ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
}
} while (n == IPOIB_NUM_WC);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index dcc77014018d..12b7f911f0e5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -51,7 +51,6 @@
#include <net/addrconf.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
-#include <linux/pci.h>
#define DRV_VERSION "1.0.0"
@@ -1617,13 +1616,29 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
wait_for_completion(&priv->ntbl.deleted);
}
+static void ipoib_napi_add(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
+ netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
+}
+
+static void ipoib_napi_del(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_del(&priv->recv_napi);
+ netif_napi_del(&priv->send_napi);
+}
+
static void ipoib_dev_uninit_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_transport_dev_cleanup(dev);
- netif_napi_del(&priv->napi);
+ ipoib_napi_del(dev);
ipoib_cm_dev_cleanup(dev);
@@ -1638,7 +1653,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
+ ipoib_napi_add(dev);
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
@@ -1666,9 +1681,6 @@ static int ipoib_dev_init_default(struct net_device *dev)
priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
- setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
- (unsigned long)dev);
-
return 0;
out_tx_ring_cleanup:
@@ -1678,7 +1690,7 @@ out_rx_ring_cleanup:
kfree(priv->rx_ring);
out:
- netif_napi_del(&priv->napi);
+ ipoib_napi_del(dev);
return -ENOMEM;
}
@@ -2314,7 +2326,8 @@ static void ipoib_add_one(struct ib_device *device)
}
if (!count) {
- kfree(dev_list);
+ pr_err("Failed to init port, removing it\n");
+ ipoib_remove_one(device, dev_list);
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index bb64baf25309..a1ed25422b72 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -156,7 +156,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
};
struct ib_cq_init_attr cq_attr = {};
- int ret, size;
+ int ret, size, req_vec;
int i;
size = ipoib_recvq_size + 1;
@@ -171,17 +171,21 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (ret != -ENOSYS)
return -ENODEV;
+ req_vec = (priv->port - 1) * 2;
+
cq_attr.cqe = size;
- priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
- dev, &cq_attr);
+ cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
+ priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
+ priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
cq_attr.cqe = ipoib_sendq_size;
- priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
- dev, &cq_attr);
+ cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors;
+ priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
+ priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
@@ -208,6 +212,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
goto out_free_send_cq;
}
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ goto out_free_send_cq;
+
for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 55a73b0ed4c6..56b7240a3fc3 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1146,7 +1146,7 @@ void iser_err_comp(struct ib_wc *wc, const char *type)
if (wc->status != IB_WC_WR_FLUSH_ERR) {
struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
- iser_err("%s failure: %s (%d) vend_err %x\n", type,
+ iser_err("%s failure: %s (%d) vend_err %#x\n", type,
ib_wc_status_msg(wc->status), wc->status,
wc->vendor_err);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index ceabdb85df8b..720dfb3a1ac2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -788,10 +788,11 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
* the rdma cm id
*/
return 1;
- case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_REJECTED:
isert_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
- case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+ /* fall through */
+ case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
break;
@@ -1569,9 +1570,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
- /*
- * Fall-through
- */
+ /* fall through */
default:
iscsit_release_cmd(cmd);
break;
@@ -1749,8 +1748,9 @@ isert_do_control_comp(struct work_struct *work)
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
iscsit_tmr_post_handler(cmd, cmd->conn);
- case ISTATE_SEND_REJECT: /* FALLTHRU */
- case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
+ /* fall through */
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
ib_dev, false);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index afa938bd26d6..4be3aef40bd2 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -139,6 +139,7 @@ void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter)
rcu_assign_pointer(adapter->mactbl, NULL);
synchronize_rcu();
opa_vnic_free_mac_tbl(mactbl);
+ adapter->info.vport.mac_tbl_digest = 0;
mutex_unlock(&adapter->mactbl_lock);
}
@@ -405,6 +406,42 @@ u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
return vl;
}
+/* opa_vnic_get_rc - return the routing control */
+static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
+ struct sk_buff *skb)
+{
+ u8 proto, rout_ctrl;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IPV6):
+ proto = ipv6_hdr(skb)->nexthdr;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV6);
+ break;
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV4);
+ break;
+ default:
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, DEFAULT);
+ }
+
+ return rout_ctrl;
+}
+
/* opa_vnic_calc_entropy - calculate the packet entropy */
u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
@@ -447,7 +484,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
struct __opa_veswport_info *info = &adapter->info;
struct opa_vnic_skb_mdata *mdata;
- u8 def_port, sc, entropy, *hdr;
+ u8 def_port, sc, rc, entropy, *hdr;
u16 len, l4_hdr;
u32 dlid;
@@ -458,6 +495,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
len = opa_vnic_wire_length(skb);
dlid = opa_vnic_get_dlid(adapter, skb, def_port);
sc = opa_vnic_get_sc(info, skb);
+ rc = opa_vnic_get_rc(info, skb);
l4_hdr = info->vesw.vesw_id;
mdata = skb_push(skb, sizeof(*mdata));
@@ -470,6 +508,6 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
}
opa_vnic_make_header(hdr, info->vport.encap_slid, dlid, len,
- info->vesw.pkey, entropy, sc, 0,
+ info->vesw.pkey, entropy, sc, rc,
OPA_VNIC_L4_ETHR, l4_hdr);
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index 4c434b9dd84c..e4c9bf2ef7e2 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -103,6 +103,17 @@
#define OPA_VNIC_ETH_LINK_UP 1
#define OPA_VNIC_ETH_LINK_DOWN 2
+/* routing control */
+#define OPA_VNIC_ENCAP_RC_DEFAULT 0
+#define OPA_VNIC_ENCAP_RC_IPV4 4
+#define OPA_VNIC_ENCAP_RC_IPV4_UDP 8
+#define OPA_VNIC_ENCAP_RC_IPV4_TCP 12
+#define OPA_VNIC_ENCAP_RC_IPV6 16
+#define OPA_VNIC_ENCAP_RC_IPV6_TCP 20
+#define OPA_VNIC_ENCAP_RC_IPV6_UDP 24
+
+#define OPA_VNIC_ENCAP_RC_EXT(w, b) (((w) >> OPA_VNIC_ENCAP_RC_ ## b) & 0x7)
+
/**
* struct opa_vesw_info - OPA vnic switch information
* @fabric_id: 10-bit fabric id
@@ -111,8 +122,8 @@
* @pkey: partition key
* @u_mcast_dlid: unknown multicast dlid
* @u_ucast_dlid: array of unknown unicast dlids
- * @eth_mtu: MTUs for each vlan PCP
- * @eth_mtu_non_vlan: MTU for non vlan packets
+ * @rc: routing control
+ * @eth_mtu: Ethernet MTU
*/
struct opa_vesw_info {
__be16 fabric_id;
@@ -128,9 +139,10 @@ struct opa_vesw_info {
__be32 u_mcast_dlid;
__be32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
- u8 rsvd3[44];
- __be16 eth_mtu[OPA_VNIC_MAX_NUM_PCP];
- __be16 eth_mtu_non_vlan;
+ __be32 rc;
+
+ u8 rsvd3[56];
+ __be16 eth_mtu;
u8 rsvd4[2];
} __packed;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index ca29e6d5aedc..afd95f432262 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -89,9 +89,10 @@ struct __opa_vesw_info {
u32 u_mcast_dlid;
u32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
- u8 rsvd3[44];
- u16 eth_mtu[OPA_VNIC_MAX_NUM_PCP];
- u16 eth_mtu_non_vlan;
+ u32 rc;
+
+ u8 rsvd3[56];
+ u16 eth_mtu;
u8 rsvd4[2];
} __packed;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 1a3c25364b64..ce57e0f10289 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -112,6 +112,27 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
return rc;
}
+static void opa_vnic_update_state(struct opa_vnic_adapter *adapter, bool up)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+
+ mutex_lock(&adapter->lock);
+ /* Operational state can only be DROP_ALL or FORWARDING */
+ if ((info->vport.config_state == OPA_VNIC_STATE_FORWARDING) && up) {
+ info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ } else {
+ info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ }
+
+ if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING)
+ netif_dormant_off(adapter->netdev);
+ else
+ netif_dormant_on(adapter->netdev);
+ mutex_unlock(&adapter->lock);
+}
+
/* opa_vnic_process_vema_config - process vema configuration updates */
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
{
@@ -130,7 +151,7 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
memcpy(saddr.sa_data, info->vport.base_mac_addr,
ARRAY_SIZE(info->vport.base_mac_addr));
mutex_lock(&adapter->lock);
- eth_mac_addr(netdev, &saddr);
+ eth_commit_mac_addr_change(netdev, &saddr);
memcpy(adapter->vema_mac_addr,
info->vport.base_mac_addr, ETH_ALEN);
mutex_unlock(&adapter->lock);
@@ -140,7 +161,7 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
/* Handle MTU limit change */
rtnl_lock();
- netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu_non_vlan,
+ netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu,
netdev->min_mtu);
if (netdev->mtu > netdev->max_mtu)
dev_set_mtu(netdev, netdev->max_mtu);
@@ -164,14 +185,8 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
adapter->flow_tbl[i] = port_count ? port_num[i % port_count] :
OPA_VNIC_INVALID_PORT;
- /* Operational state can only be DROP_ALL or FORWARDING */
- if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING) {
- info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
- netif_dormant_off(netdev);
- } else {
- info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
- netif_dormant_on(netdev);
- }
+ /* update state */
+ opa_vnic_update_state(adapter, !!(netdev->flags & IFF_UP));
}
/*
@@ -183,6 +198,7 @@ static inline void opa_vnic_set_pod_values(struct opa_vnic_adapter *adapter)
adapter->info.vport.max_smac_ent = OPA_VNIC_MAX_SMAC_LIMIT;
adapter->info.vport.config_state = OPA_VNIC_STATE_DROP_ALL;
adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ adapter->info.vesw.eth_mtu = ETH_DATA_LEN;
}
/* opa_vnic_set_mac_addr - change mac address */
@@ -268,8 +284,8 @@ static int opa_netdev_open(struct net_device *netdev)
return rc;
}
- /* Update eth link status and send trap */
- adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, true);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
@@ -287,8 +303,8 @@ static int opa_netdev_close(struct net_device *netdev)
return rc;
}
- /* Update eth link status and send trap */
- adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, false);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 21f0b481edcc..4b615c1451e7 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -186,6 +186,7 @@ static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
+ port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
}
/**
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
index c2733964379c..868b5aec1537 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
@@ -176,11 +176,10 @@ void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
info->u_ucast_dlid[i] = cpu_to_be32(src->u_ucast_dlid[i]);
- memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
- for (i = 0; i < OPA_VNIC_MAX_NUM_PCP; i++)
- info->eth_mtu[i] = cpu_to_be16(src->eth_mtu[i]);
+ info->rc = cpu_to_be32(src->rc);
- info->eth_mtu_non_vlan = cpu_to_be16(src->eth_mtu_non_vlan);
+ memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
+ info->eth_mtu = cpu_to_be16(src->eth_mtu);
memcpy(info->rsvd4, src->rsvd4, ARRAY_SIZE(src->rsvd4));
}
@@ -211,11 +210,10 @@ void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
dst->u_ucast_dlid[i] = be32_to_cpu(info->u_ucast_dlid[i]);
- memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
- for (i = 0; i < OPA_VNIC_MAX_NUM_PCP; i++)
- dst->eth_mtu[i] = be16_to_cpu(info->eth_mtu[i]);
+ dst->rc = be32_to_cpu(info->rc);
- dst->eth_mtu_non_vlan = be16_to_cpu(info->eth_mtu_non_vlan);
+ memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
+ dst->eth_mtu = be16_to_cpu(info->eth_mtu);
memcpy(dst->rsvd4, info->rsvd4, ARRAY_SIZE(info->rsvd4));
}
@@ -348,7 +346,7 @@ void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
struct opa_veswport_iface_macs *macs)
{
- u16 start_idx, tot_macs, num_macs, idx = 0, count = 0;
+ u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0;
struct netdev_hw_addr *ha;
start_idx = be16_to_cpu(macs->start_idx);
@@ -359,8 +357,10 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
/* Do not include EM specified MAC address */
if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr,
- ARRAY_SIZE(adapter->info.vport.base_mac_addr)))
+ ARRAY_SIZE(adapter->info.vport.base_mac_addr))) {
+ em_macs++;
continue;
+ }
if (start_idx > idx++)
continue;
@@ -383,7 +383,7 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
}
tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) +
- netdev_uc_count(adapter->netdev);
+ netdev_uc_count(adapter->netdev) - em_macs;
macs->tot_macs_in_lst = cpu_to_be16(tot_macs);
macs->num_macs_in_msg = cpu_to_be16(count);
macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fa5ccdb3bb2a..972d4b3c5223 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -464,20 +464,20 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
/**
* srp_destroy_qp() - destroy an RDMA queue pair
- * @qp: RDMA queue pair.
+ * @ch: SRP RDMA channel.
*
* Drain the qp before destroying it. This avoids that the receive
* completion handler can access the queue pair while it is
* being destroyed.
*/
-static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
+static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
spin_lock_irq(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
spin_unlock_irq(&ch->lock);
- ib_drain_qp(qp);
- ib_destroy_qp(qp);
+ ib_drain_qp(ch->qp);
+ ib_destroy_qp(ch->qp);
}
static int srp_create_ch_ib(struct srp_rdma_ch *ch)
@@ -550,7 +550,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
}
if (ch->qp)
- srp_destroy_qp(ch, ch->qp);
+ srp_destroy_qp(ch);
if (ch->recv_cq)
ib_free_cq(ch->recv_cq);
if (ch->send_cq)
@@ -617,7 +617,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
ib_destroy_fmr_pool(ch->fmr_pool);
}
- srp_destroy_qp(ch, ch->qp);
+ srp_destroy_qp(ch);
ib_free_cq(ch->send_cq);
ib_free_cq(ch->recv_cq);
@@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status,
static int srp_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
- int ret;
+ int ret = -ENODEV;
ch->path.numb_path = 1;
init_completion(&ch->done);
+ /*
+ * Avoid that the SCSI host can be removed by srp_remove_target()
+ * before srp_path_rec_completion() is called.
+ */
+ if (!scsi_host_get(target->scsi_host))
+ goto out;
+
ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
@@ -684,18 +691,41 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
GFP_KERNEL,
srp_path_rec_completion,
ch, &ch->path_query);
- if (ch->path_query_id < 0)
- return ch->path_query_id;
+ ret = ch->path_query_id;
+ if (ret < 0)
+ goto put;
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
- return ret;
+ goto put;
- if (ch->status < 0)
+ ret = ch->status;
+ if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed\n");
- return ch->status;
+put:
+ scsi_host_put(target->scsi_host);
+
+out:
+ return ret;
+}
+
+static u8 srp_get_subnet_timeout(struct srp_host *host)
+{
+ struct ib_port_attr attr;
+ int ret;
+ u8 subnet_timeout = 18;
+
+ ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
+ if (ret == 0)
+ subnet_timeout = attr.subnet_timeout;
+
+ if (unlikely(subnet_timeout < 15))
+ pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
+ dev_name(&host->srp_dev->dev->dev), subnet_timeout);
+
+ return subnet_timeout;
}
static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
@@ -706,6 +736,9 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
struct srp_login_req priv;
} *req = NULL;
int status;
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
@@ -728,8 +761,8 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
* module parameters if anyone cared about setting them.
*/
req->param.responder_resources = 4;
- req->param.remote_cm_response_timeout = 20;
- req->param.local_cm_response_timeout = 20;
+ req->param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->param.local_cm_response_timeout = subnet_timeout + 2;
req->param.retry_count = target->tl_retry_count;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
@@ -1279,7 +1312,6 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pd *pd = target->pd;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
@@ -1295,9 +1327,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
if (state->npages == 0)
return 0;
- if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (state->npages == 1 && target->global_rkey) {
srp_map_desc(state, state->base_dma_addr, state->dma_len,
- pd->unsafe_global_rkey);
+ target->global_rkey);
goto reset_state;
}
@@ -1337,7 +1369,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pd *pd = target->pd;
struct ib_send_wr *bad_wr;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
@@ -1353,12 +1384,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (sg_nents == 1 && target->global_rkey) {
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
sg_dma_len(state->sg) - sg_offset,
- pd->unsafe_global_rkey);
+ target->global_rkey);
if (sg_offset_p)
*sg_offset_p = 0;
return 1;
@@ -1520,7 +1551,7 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
- target->pd->unsafe_global_rkey);
+ target->global_rkey);
}
return 0;
@@ -1618,7 +1649,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
struct srp_target_port *target = ch->target;
- struct ib_pd *pd = target->pd;
struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count, ret;
@@ -1654,7 +1684,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
- if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (count == 1 && target->global_rkey) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
@@ -1664,7 +1694,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(pd->unsafe_global_rkey);
+ buf->key = cpu_to_be32(target->global_rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
req->nmdesc = 0;
@@ -1735,14 +1765,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
- if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (!target->global_rkey) {
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
goto unmap;
req->nmdesc++;
} else {
- idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
+ idb_rkey = cpu_to_be32(target->global_rkey);
}
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
@@ -2403,7 +2433,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
- sa_path_set_dlid(&ch->path, htonl(ntohs(cpi->redirect_lid)));
+ sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
ch->path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
@@ -3318,8 +3348,8 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
- target->pd = host->srp_dev->pd;
target->lkey = host->srp_dev->pd->local_dma_lkey;
+ target->global_rkey = host->srp_dev->global_rkey;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
@@ -3638,6 +3668,10 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->pd))
goto free_dev;
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
+ WARN_ON_ONCE(srp_dev->global_rkey == 0);
+ }
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ab9077b81d5a..a814f5ef16f9 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -90,6 +90,7 @@ struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
struct ib_pd *pd;
+ u32 global_rkey;
u64 mr_page_mask;
int mr_page_size;
int mr_max_size;
@@ -179,7 +180,7 @@ struct srp_target_port {
spinlock_t lock;
/* read only in the hot path */
- struct ib_pd *pd;
+ u32 global_rkey;
struct srp_rdma_ch *ch;
u32 ch_count;
u32 lkey;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9e8e9220f816..8a1bd354b1cc 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -80,7 +80,7 @@ module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
"Shared receive queue (SRQ) size.");
-static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
}
@@ -295,6 +295,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
{
struct srpt_device *sdev = sport->sdev;
struct ib_dm_ioc_profile *iocp;
+ int send_queue_depth;
iocp = (struct ib_dm_ioc_profile *)mad->data;
@@ -310,6 +311,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
return;
}
+ if (sdev->use_srq)
+ send_queue_depth = sdev->srq_size;
+ else
+ send_queue_depth = min(SRPT_RQ_SIZE,
+ sdev->device->attrs.max_qp_wr);
+
memset(iocp, 0, sizeof(*iocp));
strcpy(iocp->id_string, SRPT_ID_STRING);
iocp->guid = cpu_to_be64(srpt_service_guid);
@@ -322,7 +329,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
- iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+ iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
iocp->rdma_read_depth = 4;
iocp->send_size = cpu_to_be32(srp_max_req_size);
iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
@@ -686,6 +693,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
{
int i;
+ if (!ioctx_ring)
+ return;
+
for (i = 0; i < ring_size; ++i)
srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
kfree(ioctx_ring);
@@ -757,7 +767,7 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
/**
* srpt_post_recv() - Post an IB receive request.
*/
-static int srpt_post_recv(struct srpt_device *sdev,
+static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
{
struct ib_sge list;
@@ -766,7 +776,7 @@ static int srpt_post_recv(struct srpt_device *sdev,
BUG_ON(!sdev);
list.addr = ioctx->ioctx.dma;
list.length = srp_max_req_size;
- list.lkey = sdev->pd->local_dma_lkey;
+ list.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_recv_done;
wr.wr_cqe = &ioctx->ioctx.cqe;
@@ -774,7 +784,10 @@ static int srpt_post_recv(struct srpt_device *sdev,
wr.sg_list = &list;
wr.num_sge = 1;
- return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ if (sdev->use_srq)
+ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ else
+ return ib_post_recv(ch->qp, &wr, &bad_wr);
}
/**
@@ -1517,7 +1530,7 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
break;
}
- srpt_post_recv(ch->sport->sdev, recv_ioctx);
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
return;
out_wait:
@@ -1616,7 +1629,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
u32 srp_sq_size = sport->port_attrib.srp_sq_size;
- int ret;
+ int i, ret;
WARN_ON(ch->rq_size < 1);
@@ -1640,7 +1653,6 @@ retry:
= (void(*)(struct ib_event *, void*))srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
- qp_init->srq = sdev->srq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
qp_init->qp_type = IB_QPT_RC;
/*
@@ -1650,10 +1662,16 @@ retry:
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
- qp_init->cap.max_send_wr = srp_sq_size / 2;
+ qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
+ if (sdev->use_srq) {
+ qp_init->srq = sdev->srq;
+ } else {
+ qp_init->cap.max_recv_wr = ch->rq_size;
+ qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
+ }
ch->qp = ib_create_qp(sdev->pd, qp_init);
if (IS_ERR(ch->qp)) {
@@ -1679,6 +1697,10 @@ retry:
if (ret)
goto err_destroy_qp;
+ if (!sdev->use_srq)
+ for (i = 0; i < ch->rq_size; i++)
+ srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
+
out:
kfree(qp_init);
return ret;
@@ -1765,19 +1787,65 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-static void __srpt_close_all_ch(struct srpt_device *sdev)
+/*
+ * Send DREQ and wait for DREP. Return true if and only if this function
+ * changed the state of @ch.
+ */
+static bool srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
+ __must_hold(&sdev->mutex)
{
+ DECLARE_COMPLETION_ONSTACK(release_done);
+ struct srpt_device *sdev = ch->sport->sdev;
+ bool wait;
+
+ lockdep_assert_held(&sdev->mutex);
+
+ pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
+ ch->state);
+
+ WARN_ON(ch->release_done);
+ ch->release_done = &release_done;
+ wait = !list_empty(&ch->list);
+ srpt_disconnect_ch(ch);
+ mutex_unlock(&sdev->mutex);
+
+ if (!wait)
+ goto out;
+
+ while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
+ ch->sess_name, ch->qp->qp_num, ch->state);
+
+out:
+ mutex_lock(&sdev->mutex);
+ return wait;
+}
+
+static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
+ __must_hold(&sdev->mutex)
+{
+ struct srpt_device *sdev = sport->sdev;
struct srpt_rdma_ch *ch;
lockdep_assert_held(&sdev->mutex);
+ if (sport->enabled == enabled)
+ return;
+ sport->enabled = enabled;
+ if (sport->enabled)
+ return;
+
+again:
list_for_each_entry(ch, &sdev->rch_list, list) {
- if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s-%d because target %s has been disabled\n",
- ch->sess_name, ch->qp->qp_num,
- sdev->device->name);
- srpt_close_ch(ch);
+ if (ch->sport == sport) {
+ pr_info("%s: closing channel %s-%d\n",
+ sdev->device->name, ch->sess_name,
+ ch->qp->qp_num);
+ if (srpt_disconnect_ch_sync(ch))
+ goto again;
+ }
}
+
}
static void srpt_free_ch(struct kref *kref)
@@ -1818,6 +1886,10 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size,
ch->rsp_size, DMA_TO_DEVICE);
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ sdev, ch->rq_size,
+ srp_max_req_size, DMA_FROM_DEVICE);
+
mutex_lock(&sdev->mutex);
list_del_init(&ch->list);
if (ch->release_done)
@@ -1953,10 +2025,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->cm_id = cm_id;
cm_id->context = ch;
/*
- * Avoid QUEUE_FULL conditions by limiting the number of buffers used
- * for the SRP protocol to the command queue size.
+ * ch->rq_size should be at least as large as the initiator queue
+ * depth to avoid that the initiator driver has to report QUEUE_FULL
+ * to the SCSI mid-layer.
*/
- ch->rq_size = SRPT_RQ_SIZE;
+ ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
@@ -1974,6 +2047,19 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->ioctx_ring[i]->ch = ch;
list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
}
+ if (!sdev->use_srq) {
+ ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_recv_ring[0]),
+ srp_max_req_size,
+ DMA_FROM_DEVICE);
+ if (!ch->ioctx_recv_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
+ rej->reason =
+ cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ goto free_ring;
+ }
+ }
ret = srpt_create_ch_ib(ch);
if (ret) {
@@ -1981,7 +2067,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
- goto free_ring;
+ goto free_recv_ring;
}
ret = srpt_ch_qp_rtr(ch, ch->qp);
@@ -2072,6 +2158,11 @@ release_channel:
destroy_ib:
srpt_destroy_ch_ib(ch);
+free_recv_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ ch->sport->sdev, ch->rq_size,
+ srp_max_req_size, DMA_FROM_DEVICE);
+
free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
@@ -2342,7 +2433,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
sge.addr = ioctx->ioctx.dma;
sge.length = resp_len;
- sge.lkey = sdev->pd->local_dma_lkey;
+ sge.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_send_done;
send_wr.next = NULL;
@@ -2417,8 +2508,7 @@ static int srpt_release_sdev(struct srpt_device *sdev)
mutex_lock(&sdev->mutex);
for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
- sdev->port[i].enabled = false;
- __srpt_close_all_ch(sdev);
+ srpt_set_enabled(&sdev->port[i], false);
mutex_unlock(&sdev->mutex);
res = wait_event_interruptible(sdev->ch_releaseQ,
@@ -2465,6 +2555,74 @@ static struct se_wwn *srpt_lookup_wwn(const char *name)
return wwn;
}
+static void srpt_free_srq(struct srpt_device *sdev)
+{
+ if (!sdev->srq)
+ return;
+
+ ib_destroy_srq(sdev->srq);
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
+ sdev->srq = NULL;
+}
+
+static int srpt_alloc_srq(struct srpt_device *sdev)
+{
+ struct ib_srq_init_attr srq_attr = {
+ .event_handler = srpt_srq_event,
+ .srq_context = (void *)sdev,
+ .attr.max_wr = sdev->srq_size,
+ .attr.max_sge = 1,
+ .srq_type = IB_SRQT_BASIC,
+ };
+ struct ib_device *device = sdev->device;
+ struct ib_srq *srq;
+ int i;
+
+ WARN_ON_ONCE(sdev->srq);
+ srq = ib_create_srq(sdev->pd, &srq_attr);
+ if (IS_ERR(srq)) {
+ pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
+ return PTR_ERR(srq);
+ }
+
+ pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
+ sdev->device->attrs.max_srq_wr, device->name);
+
+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+ sizeof(*sdev->ioctx_ring[0]),
+ srp_max_req_size, DMA_FROM_DEVICE);
+ if (!sdev->ioctx_ring) {
+ ib_destroy_srq(srq);
+ return -ENOMEM;
+ }
+
+ sdev->use_srq = true;
+ sdev->srq = srq;
+
+ for (i = 0; i < sdev->srq_size; ++i)
+ srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+
+ return 0;
+}
+
+static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
+{
+ struct ib_device *device = sdev->device;
+ int ret = 0;
+
+ if (!use_srq) {
+ srpt_free_srq(sdev);
+ sdev->use_srq = false;
+ } else if (use_srq && !sdev->srq) {
+ ret = srpt_alloc_srq(sdev);
+ }
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
+ sdev->use_srq, ret);
+ return ret;
+}
+
/**
* srpt_add_one() - Infiniband device addition callback function.
*/
@@ -2472,7 +2630,6 @@ static void srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
- struct ib_srq_init_attr srq_attr;
int i;
pr_debug("device = %p\n", device);
@@ -2490,29 +2647,18 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->pd))
goto free_dev;
- sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
-
- srq_attr.event_handler = srpt_srq_event;
- srq_attr.srq_context = (void *)sdev;
- srq_attr.attr.max_wr = sdev->srq_size;
- srq_attr.attr.max_sge = 1;
- srq_attr.attr.srq_limit = 0;
- srq_attr.srq_type = IB_SRQT_BASIC;
+ sdev->lkey = sdev->pd->local_dma_lkey;
- sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
- if (IS_ERR(sdev->srq))
- goto err_pd;
+ sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
- pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
- __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
- device->name);
+ srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
if (!srpt_service_guid)
srpt_service_guid = be64_to_cpu(device->node_guid);
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id))
- goto err_srq;
+ goto err_ring;
/* print out target login information */
pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
@@ -2532,16 +2678,6 @@ static void srpt_add_one(struct ib_device *device)
srpt_event_handler);
ib_register_event_handler(&sdev->event_handler);
- sdev->ioctx_ring = (struct srpt_recv_ioctx **)
- srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
- sizeof(*sdev->ioctx_ring[0]),
- srp_max_req_size, DMA_FROM_DEVICE);
- if (!sdev->ioctx_ring)
- goto err_event;
-
- for (i = 0; i < sdev->srq_size; ++i)
- srpt_post_recv(sdev, sdev->ioctx_ring[i]);
-
WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
@@ -2551,12 +2687,13 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+ sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
sdev->device->name, i);
- goto err_ring;
+ goto err_event;
}
}
@@ -2569,17 +2706,12 @@ out:
pr_debug("added %s.\n", device->name);
return;
-err_ring:
- srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
- sdev->srq_size, srp_max_req_size,
- DMA_FROM_DEVICE);
err_event:
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
ib_destroy_cm_id(sdev->cm_id);
-err_srq:
- ib_destroy_srq(sdev->srq);
-err_pd:
+err_ring:
+ srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
free_dev:
kfree(sdev);
@@ -2622,12 +2754,10 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
spin_unlock(&srpt_dev_lock);
srpt_release_sdev(sdev);
- ib_destroy_srq(sdev->srq);
+ srpt_free_srq(sdev);
+
ib_dealloc_pd(sdev->pd);
- srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
- sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
- sdev->ioctx_ring = NULL;
kfree(sdev);
}
@@ -2706,27 +2836,12 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
*/
static void srpt_close_session(struct se_session *se_sess)
{
- DECLARE_COMPLETION_ONSTACK(release_done);
struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
struct srpt_device *sdev = ch->sport->sdev;
- bool wait;
-
- pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
- ch->state);
mutex_lock(&sdev->mutex);
- BUG_ON(ch->release_done);
- ch->release_done = &release_done;
- wait = !list_empty(&ch->list);
- srpt_disconnect_ch(ch);
+ srpt_disconnect_ch_sync(ch);
mutex_unlock(&sdev->mutex);
-
- if (!wait)
- return;
-
- while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
- pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
- ch->sess_name, ch->qp->qp_num, ch->state);
}
/**
@@ -2777,7 +2892,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
{
const char *p;
unsigned len, count, leading_zero_bytes;
- int ret, rc;
+ int ret;
p = name;
if (strncasecmp(p, "0x", 2) == 0)
@@ -2789,10 +2904,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
count = min(len / 2, 16U);
leading_zero_bytes = 16 - count;
memset(i_port_id, 0, leading_zero_bytes);
- rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
- if (rc < 0)
- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
- ret = 0;
+ ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
+ if (ret < 0)
+ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
out:
return ret;
}
@@ -2926,14 +3040,55 @@ static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
return count;
}
+static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ return sprintf(page, "%d\n", sport->port_attrib.use_srq);
+}
+
+static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+ struct srpt_device *sdev = sport->sdev;
+ unsigned long val;
+ bool enabled;
+ int ret;
+
+ ret = kstrtoul(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val != !!val)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&sdev->mutex);
+ if (ret < 0)
+ return ret;
+ enabled = sport->enabled;
+ /* Log out all initiator systems before changing 'use_srq'. */
+ srpt_set_enabled(sport, false);
+ sport->port_attrib.use_srq = val;
+ srpt_use_srq(sdev, sport->port_attrib.use_srq);
+ srpt_set_enabled(sport, enabled);
+ mutex_unlock(&sdev->mutex);
+
+ return count;
+}
+
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
+CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
&srpt_tpg_attrib_attr_srp_max_rdma_size,
&srpt_tpg_attrib_attr_srp_max_rsp_size,
&srpt_tpg_attrib_attr_srp_sq_size,
+ &srpt_tpg_attrib_attr_use_srq,
NULL,
};
@@ -2951,7 +3106,6 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
struct srpt_device *sdev = sport->sdev;
- struct srpt_rdma_ch *ch;
unsigned long tmp;
int ret;
@@ -2965,24 +3119,11 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
return -EINVAL;
}
- if (sport->enabled == tmp)
- goto out;
- sport->enabled = tmp;
- if (sport->enabled)
- goto out;
mutex_lock(&sdev->mutex);
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->sport == sport) {
- pr_debug("%s: ch %p %s-%d\n", __func__, ch,
- ch->sess_name, ch->qp->qp_num);
- srpt_disconnect_ch(ch);
- srpt_close_ch(ch);
- }
- }
+ srpt_set_enabled(sport, tmp);
mutex_unlock(&sdev->mutex);
-out:
return count;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 1b817e51b84b..673387d365a3 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -252,6 +252,7 @@ enum rdma_ch_state {
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
* @ioctx_ring: Send ring.
+ * @ioctx_recv_ring: Receive I/O context ring.
* @list: Node for insertion in the srpt_device.rch_list list.
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
* list contains struct srpt_ioctx elements and is protected
@@ -281,6 +282,7 @@ struct srpt_rdma_ch {
struct list_head free_list;
enum rdma_ch_state state;
struct srpt_send_ioctx **ioctx_ring;
+ struct srpt_recv_ioctx **ioctx_recv_ring;
struct list_head list;
struct list_head cmd_wait_list;
struct se_session *sess;
@@ -295,11 +297,13 @@ struct srpt_rdma_ch {
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
+ * @use_srq: Whether or not to use SRQ.
*/
struct srpt_port_attrib {
u32 srp_max_rdma_size;
u32 srp_max_rsp_size;
u32 srp_sq_size;
+ bool use_srq;
};
/**
@@ -343,10 +347,11 @@ struct srpt_port {
* struct srpt_device - Information associated by SRPT with a single HCA.
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
- * @mr: L_Key (local key) with write access to all local memory.
+ * @lkey: L_Key (local key) with write access to all local memory.
* @srq: Per-HCA SRQ (shared receive queue).
* @cm_id: Connection identifier.
* @srq_size: SRQ size.
+ * @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
* @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
* @ch_releaseQ: Enables waiting for removal from rch_list.
@@ -358,9 +363,11 @@ struct srpt_port {
struct srpt_device {
struct ib_device *device;
struct ib_pd *pd;
+ u32 lkey;
struct ib_srq *srq;
struct ib_cm_id *cm_id;
int srq_size;
+ bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
struct list_head rch_list;
wait_queue_head_t ch_releaseQ;
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index fcc6c3368182..2743ed4656e4 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -412,10 +412,10 @@ static void ml_play_effects(struct ml_device *ml)
ml_schedule_timer(ml);
}
-static void ml_effect_timer(unsigned long timer_data)
+static void ml_effect_timer(struct timer_list *t)
{
- struct input_dev *dev = (struct input_dev *)timer_data;
- struct ml_device *ml = dev->ff->private;
+ struct ml_device *ml = from_timer(ml, t, timer);
+ struct input_dev *dev = ml->dev;
unsigned long flags;
pr_debug("timer: updating effects\n");
@@ -526,7 +526,7 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
ml->private = data;
ml->play_effect = play_effect;
ml->gain = 0xffff;
- setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev);
+ timer_setup(&ml->timer, ml_effect_timer, 0);
set_bit(FF_GAIN, dev->ffbit);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 762bfb9487dc..44916ef4a424 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -76,7 +76,7 @@ static void input_start_autorepeat(struct input_dev *dev, int code)
{
if (test_bit(EV_REP, dev->evbit) &&
dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
- dev->timer.data) {
+ dev->timer.function) {
dev->repeat_key = code;
mod_timer(&dev->timer,
jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
@@ -179,9 +179,9 @@ static void input_pass_event(struct input_dev *dev,
* dev->event_lock here to avoid racing with input_event
* which may cause keys get "stuck".
*/
-static void input_repeat_key(unsigned long data)
+static void input_repeat_key(struct timer_list *t)
{
- struct input_dev *dev = (void *) data;
+ struct input_dev *dev = from_timer(dev, t, timer);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -1784,7 +1784,7 @@ struct input_dev *input_allocate_device(void)
device_initialize(&dev->dev);
mutex_init(&dev->mutex);
spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
+ timer_setup(&dev->timer, NULL, 0);
INIT_LIST_HEAD(&dev->h_list);
INIT_LIST_HEAD(&dev->node);
@@ -2047,8 +2047,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
*/
void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
{
- dev->timer.data = (unsigned long) dev;
- dev->timer.function = input_repeat_key;
+ dev->timer.function = (TIMER_FUNC_TYPE)input_repeat_key;
dev->rep[REP_DELAY] = delay;
dev->rep[REP_PERIOD] = period;
}
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index c43f087a496d..ca734ea97e53 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -654,6 +654,7 @@ static void gc_psx_report_one(struct gc_pad *pad, unsigned char psx_type,
input_report_key(dev, BTN_THUMBL, ~data[0] & 0x04);
input_report_key(dev, BTN_THUMBR, ~data[0] & 0x02);
+ /* fall through */
case GC_PSX_NEGCON:
case GC_PSX_ANALOG:
@@ -887,6 +888,7 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_SNES:
for (i = 4; i < 8; i++)
__set_bit(gc_snes_btn[i], input_dev->keybit);
+ /* fall through */
case GC_NES:
for (i = 0; i < 4; i++)
__set_bit(gc_snes_btn[i], input_dev->keybit);
@@ -894,6 +896,7 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_MULTI2:
__set_bit(BTN_THUMB, input_dev->keybit);
+ /* fall through */
case GC_MULTI:
__set_bit(BTN_TRIGGER, input_dev->keybit);
break;
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 4a95b224169f..5e602a6852b7 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -672,16 +672,16 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
switch (i * m) {
case 60:
- sw->number++;
+ sw->number++; /* fall through */
case 45: /* Ambiguous packet length */
if (j <= 40) { /* ID length less or eq 40 -> FSP */
case 43:
sw->type = SW_ID_FSP;
break;
}
- sw->number++;
+ sw->number++; /* fall through */
case 30:
- sw->number++;
+ sw->number++; /* fall through */
case 15:
sw->type = SW_ID_GP;
break;
@@ -697,9 +697,9 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
sw->type = SW_ID_PP;
break;
case 66:
- sw->bits = 3;
+ sw->bits = 3; /* fall through */
case 198:
- sw->length = 22;
+ sw->length = 22; /* fall through */
case 64:
sw->type = SW_ID_3DP;
if (j == 160)
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index e9712a1b7cad..bb3faeff8cac 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -162,6 +162,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
break;
}
spaceball->escape = 0;
+ /* fall through */
case 'M':
case 'Q':
case 'S':
@@ -169,6 +170,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
spaceball->escape = 0;
data &= 0x1f;
}
+ /* fall through */
default:
if (spaceball->escape)
spaceball->escape = 0;
@@ -234,11 +236,13 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
input_dev->keybit[BIT_WORD(BTN_A)] |= BIT_MASK(BTN_A) |
BIT_MASK(BTN_B) | BIT_MASK(BTN_C) |
BIT_MASK(BTN_MODE);
+ /* fall through */
default:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_2) |
BIT_MASK(BTN_3) | BIT_MASK(BTN_4) |
BIT_MASK(BTN_5) | BIT_MASK(BTN_6) |
BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
+ /* fall through */
case SPACEBALL_3003C:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_1) |
BIT_MASK(BTN_8);
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 39bcbc38997f..8a07a426f88e 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -127,10 +127,9 @@ static inline void bfin_kpad_clear_irq(void)
bfin_write_KPAD_ROWCOL(0xFFFF);
}
-static void bfin_kpad_timer(unsigned long data)
+static void bfin_kpad_timer(struct timer_list *t)
{
- struct platform_device *pdev = (struct platform_device *) data;
- struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
+ struct bf54x_kpad *bf54x_kpad = from_timer(bf54x_kpad, t, timer);
if (bfin_kpad_get_keypressed(bf54x_kpad)) {
/* Try again later */
@@ -298,7 +297,7 @@ static int bfin_kpad_probe(struct platform_device *pdev)
/* Init Keypad Key Up/Release test timer */
- setup_timer(&bf54x_kpad->timer, bfin_kpad_timer, (unsigned long) pdev);
+ timer_setup(&bf54x_kpad->timer, bfin_kpad_timer, 0);
bfin_write_KPAD_PRESCALE(bfin_kpad_get_prescale(TIME_SCALE));
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index e9f0ebf3267a..87e613dc33b8 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -419,9 +419,9 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void gpio_keys_irq_timer(unsigned long _data)
+static void gpio_keys_irq_timer(struct timer_list *t)
{
- struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
+ struct gpio_button_data *bdata = from_timer(bdata, t, release_timer);
struct input_dev *input = bdata->input;
unsigned long flags;
@@ -582,8 +582,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
}
bdata->release_delay = button->debounce_interval;
- setup_timer(&bdata->release_timer,
- gpio_keys_irq_timer, (unsigned long)bdata);
+ timer_setup(&bdata->release_timer, gpio_keys_irq_timer, 0);
isr = gpio_keys_irq_isr;
irqflags = 0;
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 2165f3dd328b..25d61d8d4fc4 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -184,9 +184,9 @@ static void imx_keypad_fire_events(struct imx_keypad *keypad,
/*
* imx_keypad_check_for_events is the timer handler.
*/
-static void imx_keypad_check_for_events(unsigned long data)
+static void imx_keypad_check_for_events(struct timer_list *t)
{
- struct imx_keypad *keypad = (struct imx_keypad *) data;
+ struct imx_keypad *keypad = from_timer(keypad, t, check_matrix_timer);
unsigned short matrix_volatile_state[MAX_MATRIX_KEY_COLS];
unsigned short reg_val;
bool state_changed, is_zero_matrix;
@@ -456,8 +456,8 @@ static int imx_keypad_probe(struct platform_device *pdev)
keypad->irq = irq;
keypad->stable_count = 0;
- setup_timer(&keypad->check_matrix_timer,
- imx_keypad_check_for_events, (unsigned long) keypad);
+ timer_setup(&keypad->check_matrix_timer,
+ imx_keypad_check_for_events, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index 0d74312d5b02..30d610758595 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -210,9 +210,9 @@ static irqreturn_t locomokbd_interrupt(int irq, void *dev_id)
/*
* LoCoMo timer checking for released keys
*/
-static void locomokbd_timer_callback(unsigned long data)
+static void locomokbd_timer_callback(struct timer_list *t)
{
- struct locomokbd *locomokbd = (struct locomokbd *) data;
+ struct locomokbd *locomokbd = from_timer(locomokbd, t, timer);
locomokbd_scankeyboard(locomokbd);
}
@@ -264,8 +264,7 @@ static int locomokbd_probe(struct locomo_dev *dev)
spin_lock_init(&locomokbd->lock);
- setup_timer(&locomokbd->timer, locomokbd_timer_callback,
- (unsigned long)locomokbd);
+ timer_setup(&locomokbd->timer, locomokbd_timer_callback, 0);
locomokbd->suspend_jiffies = jiffies;
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 146b26f665f6..7bd107910a6e 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -41,7 +41,7 @@
#undef NEW_BOARD_LEARNING_MODE
static void omap_kp_tasklet(unsigned long);
-static void omap_kp_timer(unsigned long);
+static void omap_kp_timer(struct timer_list *);
static unsigned char keypad_state[8];
static DEFINE_MUTEX(kp_enable_mutex);
@@ -74,7 +74,7 @@ static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void omap_kp_timer(unsigned long data)
+static void omap_kp_timer(struct timer_list *unused)
{
tasklet_schedule(&kp_tasklet);
}
@@ -233,7 +233,7 @@ static int omap_kp_probe(struct platform_device *pdev)
col_idx = 0;
row_idx = 0;
- setup_timer(&omap_kp->timer, omap_kp_timer, (unsigned long)omap_kp);
+ timer_setup(&omap_kp->timer, omap_kp_timer, 0);
/* get the irq and init timer*/
kp_tasklet.data = (unsigned long) omap_kp;
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 7544888c4749..53c768b95939 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -45,9 +45,9 @@ struct pwrkey_drv_data {
struct input_dev *input;
};
-static void imx_imx_snvs_check_for_events(unsigned long data)
+static void imx_imx_snvs_check_for_events(struct timer_list *t)
{
- struct pwrkey_drv_data *pdata = (struct pwrkey_drv_data *) data;
+ struct pwrkey_drv_data *pdata = from_timer(pdata, t, check_timer);
struct input_dev *input = pdata->input;
u32 state;
@@ -134,8 +134,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
/* clear the unexpected interrupt before driver ready */
regmap_write(pdata->snvs, SNVS_LPSR_REG, SNVS_LPSR_SPO);
- setup_timer(&pdata->check_timer,
- imx_imx_snvs_check_for_events, (unsigned long) pdata);
+ timer_setup(&pdata->check_timer, imx_imx_snvs_check_for_events, 0);
input = devm_input_allocate_device(&pdev->dev);
if (!input) {
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index edc1385ca00b..875205f445b5 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -251,9 +251,9 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
writel(val, kbc->mmio + KBC_CONTROL_0);
}
-static void tegra_kbc_keypress_timer(unsigned long data)
+static void tegra_kbc_keypress_timer(struct timer_list *t)
{
- struct tegra_kbc *kbc = (struct tegra_kbc *)data;
+ struct tegra_kbc *kbc = from_timer(kbc, t, timer);
unsigned long flags;
u32 val;
unsigned int i;
@@ -655,7 +655,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
+ timer_setup(&kbc->timer, tegra_kbc_keypress_timer, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
kbc->mmio = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 2b2d02f408bb..a3e79bf5a04b 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -796,7 +796,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
if (pdata->watermark) {
ac->int_mask |= WATERMARK;
- if (!FIFO_MODE(pdata->fifo_mode))
+ if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->pdata.fifo_mode |= FIFO_STREAM;
} else {
ac->int_mask |= DATA_READY;
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 2e8f801932be..a1db1e5040dc 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev)
haptic->suspended = false;
- magnitude = ACCESS_ONCE(haptic->magnitude);
+ magnitude = READ_ONCE(haptic->magnitude);
if (magnitude)
regulator_haptic_set_voltage(haptic, magnitude);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 443151de90c6..39ddd9a73feb 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -31,6 +31,7 @@
* 0.1 20/06/2002
* - first public version
*/
+#include <uapi/linux/uinput.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -38,10 +39,47 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
-#include <linux/uinput.h>
#include <linux/input/mt.h>
#include "../input-compat.h"
+#define UINPUT_NAME "uinput"
+#define UINPUT_BUFFER_SIZE 16
+#define UINPUT_NUM_REQUESTS 16
+
+enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
+
+struct uinput_request {
+ unsigned int id;
+ unsigned int code; /* UI_FF_UPLOAD, UI_FF_ERASE */
+
+ int retval;
+ struct completion done;
+
+ union {
+ unsigned int effect_id;
+ struct {
+ struct ff_effect *effect;
+ struct ff_effect *old;
+ } upload;
+ } u;
+};
+
+struct uinput_device {
+ struct input_dev *dev;
+ struct mutex mutex;
+ enum uinput_state state;
+ wait_queue_head_t waitq;
+ unsigned char ready;
+ unsigned char head;
+ unsigned char tail;
+ struct input_event buff[UINPUT_BUFFER_SIZE];
+ unsigned int ff_effects_max;
+
+ struct uinput_request *requests[UINPUT_NUM_REQUESTS];
+ wait_queue_head_t requests_waitq;
+ spinlock_t requests_lock;
+};
+
static int uinput_dev_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
@@ -149,7 +187,11 @@ static int uinput_request_submit(struct uinput_device *udev,
if (retval)
goto out;
- wait_for_completion(&request->done);
+ if (!wait_for_completion_timeout(&request->done, 30 * HZ)) {
+ retval = -ETIMEDOUT;
+ goto out;
+ }
+
retval = request->retval;
out:
@@ -320,6 +362,10 @@ static int uinput_create_device(struct uinput_device *udev)
dev->flush = uinput_dev_flush;
}
+ dev->event = uinput_dev_event;
+
+ input_set_drvdata(udev->dev, udev);
+
error = input_register_device(udev->dev);
if (error)
goto fail2;
@@ -402,18 +448,6 @@ static int uinput_validate_absbits(struct input_dev *dev)
return 0;
}
-static int uinput_allocate_device(struct uinput_device *udev)
-{
- udev->dev = input_allocate_device();
- if (!udev->dev)
- return -ENOMEM;
-
- udev->dev->event = uinput_dev_event;
- input_set_drvdata(udev->dev, udev);
-
- return 0;
-}
-
static int uinput_dev_setup(struct uinput_device *udev,
struct uinput_setup __user *arg)
{
@@ -489,9 +523,9 @@ static int uinput_setup_device_legacy(struct uinput_device *udev,
return -EINVAL;
if (!udev->dev) {
- retval = uinput_allocate_device(udev);
- if (retval)
- return retval;
+ udev->dev = input_allocate_device();
+ if (!udev->dev)
+ return -ENOMEM;
}
dev = udev->dev;
@@ -822,162 +856,163 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
return retval;
if (!udev->dev) {
- retval = uinput_allocate_device(udev);
- if (retval)
+ udev->dev = input_allocate_device();
+ if (!udev->dev) {
+ retval = -ENOMEM;
goto out;
+ }
}
switch (cmd) {
- case UI_GET_VERSION:
- if (put_user(UINPUT_VERSION,
- (unsigned int __user *)p))
- retval = -EFAULT;
- goto out;
+ case UI_GET_VERSION:
+ if (put_user(UINPUT_VERSION, (unsigned int __user *)p))
+ retval = -EFAULT;
+ goto out;
- case UI_DEV_CREATE:
- retval = uinput_create_device(udev);
- goto out;
+ case UI_DEV_CREATE:
+ retval = uinput_create_device(udev);
+ goto out;
- case UI_DEV_DESTROY:
- uinput_destroy_device(udev);
- goto out;
+ case UI_DEV_DESTROY:
+ uinput_destroy_device(udev);
+ goto out;
- case UI_DEV_SETUP:
- retval = uinput_dev_setup(udev, p);
- goto out;
+ case UI_DEV_SETUP:
+ retval = uinput_dev_setup(udev, p);
+ goto out;
- /* UI_ABS_SETUP is handled in the variable size ioctls */
+ /* UI_ABS_SETUP is handled in the variable size ioctls */
- case UI_SET_EVBIT:
- retval = uinput_set_bit(arg, evbit, EV_MAX);
- goto out;
+ case UI_SET_EVBIT:
+ retval = uinput_set_bit(arg, evbit, EV_MAX);
+ goto out;
- case UI_SET_KEYBIT:
- retval = uinput_set_bit(arg, keybit, KEY_MAX);
- goto out;
+ case UI_SET_KEYBIT:
+ retval = uinput_set_bit(arg, keybit, KEY_MAX);
+ goto out;
- case UI_SET_RELBIT:
- retval = uinput_set_bit(arg, relbit, REL_MAX);
- goto out;
+ case UI_SET_RELBIT:
+ retval = uinput_set_bit(arg, relbit, REL_MAX);
+ goto out;
- case UI_SET_ABSBIT:
- retval = uinput_set_bit(arg, absbit, ABS_MAX);
- goto out;
+ case UI_SET_ABSBIT:
+ retval = uinput_set_bit(arg, absbit, ABS_MAX);
+ goto out;
- case UI_SET_MSCBIT:
- retval = uinput_set_bit(arg, mscbit, MSC_MAX);
- goto out;
+ case UI_SET_MSCBIT:
+ retval = uinput_set_bit(arg, mscbit, MSC_MAX);
+ goto out;
- case UI_SET_LEDBIT:
- retval = uinput_set_bit(arg, ledbit, LED_MAX);
- goto out;
+ case UI_SET_LEDBIT:
+ retval = uinput_set_bit(arg, ledbit, LED_MAX);
+ goto out;
+
+ case UI_SET_SNDBIT:
+ retval = uinput_set_bit(arg, sndbit, SND_MAX);
+ goto out;
- case UI_SET_SNDBIT:
- retval = uinput_set_bit(arg, sndbit, SND_MAX);
+ case UI_SET_FFBIT:
+ retval = uinput_set_bit(arg, ffbit, FF_MAX);
+ goto out;
+
+ case UI_SET_SWBIT:
+ retval = uinput_set_bit(arg, swbit, SW_MAX);
+ goto out;
+
+ case UI_SET_PROPBIT:
+ retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX);
+ goto out;
+
+ case UI_SET_PHYS:
+ if (udev->state == UIST_CREATED) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_SET_FFBIT:
- retval = uinput_set_bit(arg, ffbit, FF_MAX);
+ phys = strndup_user(p, 1024);
+ if (IS_ERR(phys)) {
+ retval = PTR_ERR(phys);
goto out;
+ }
+
+ kfree(udev->dev->phys);
+ udev->dev->phys = phys;
+ goto out;
- case UI_SET_SWBIT:
- retval = uinput_set_bit(arg, swbit, SW_MAX);
+ case UI_BEGIN_FF_UPLOAD:
+ retval = uinput_ff_upload_from_user(p, &ff_up);
+ if (retval)
goto out;
- case UI_SET_PROPBIT:
- retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX);
+ req = uinput_request_find(udev, ff_up.request_id);
+ if (!req || req->code != UI_FF_UPLOAD ||
+ !req->u.upload.effect) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_SET_PHYS:
- if (udev->state == UIST_CREATED) {
- retval = -EINVAL;
- goto out;
- }
+ ff_up.retval = 0;
+ ff_up.effect = *req->u.upload.effect;
+ if (req->u.upload.old)
+ ff_up.old = *req->u.upload.old;
+ else
+ memset(&ff_up.old, 0, sizeof(struct ff_effect));
- phys = strndup_user(p, 1024);
- if (IS_ERR(phys)) {
- retval = PTR_ERR(phys);
- goto out;
- }
+ retval = uinput_ff_upload_to_user(p, &ff_up);
+ goto out;
- kfree(udev->dev->phys);
- udev->dev->phys = phys;
+ case UI_BEGIN_FF_ERASE:
+ if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
+ retval = -EFAULT;
goto out;
+ }
- case UI_BEGIN_FF_UPLOAD:
- retval = uinput_ff_upload_from_user(p, &ff_up);
- if (retval)
- goto out;
-
- req = uinput_request_find(udev, ff_up.request_id);
- if (!req || req->code != UI_FF_UPLOAD ||
- !req->u.upload.effect) {
- retval = -EINVAL;
- goto out;
- }
-
- ff_up.retval = 0;
- ff_up.effect = *req->u.upload.effect;
- if (req->u.upload.old)
- ff_up.old = *req->u.upload.old;
- else
- memset(&ff_up.old, 0, sizeof(struct ff_effect));
-
- retval = uinput_ff_upload_to_user(p, &ff_up);
+ req = uinput_request_find(udev, ff_erase.request_id);
+ if (!req || req->code != UI_FF_ERASE) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_BEGIN_FF_ERASE:
- if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
- retval = -EFAULT;
- goto out;
- }
-
- req = uinput_request_find(udev, ff_erase.request_id);
- if (!req || req->code != UI_FF_ERASE) {
- retval = -EINVAL;
- goto out;
- }
-
- ff_erase.retval = 0;
- ff_erase.effect_id = req->u.effect_id;
- if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) {
- retval = -EFAULT;
- goto out;
- }
-
+ ff_erase.retval = 0;
+ ff_erase.effect_id = req->u.effect_id;
+ if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) {
+ retval = -EFAULT;
goto out;
+ }
- case UI_END_FF_UPLOAD:
- retval = uinput_ff_upload_from_user(p, &ff_up);
- if (retval)
- goto out;
+ goto out;
- req = uinput_request_find(udev, ff_up.request_id);
- if (!req || req->code != UI_FF_UPLOAD ||
- !req->u.upload.effect) {
- retval = -EINVAL;
- goto out;
- }
+ case UI_END_FF_UPLOAD:
+ retval = uinput_ff_upload_from_user(p, &ff_up);
+ if (retval)
+ goto out;
- req->retval = ff_up.retval;
- complete(&req->done);
+ req = uinput_request_find(udev, ff_up.request_id);
+ if (!req || req->code != UI_FF_UPLOAD ||
+ !req->u.upload.effect) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_END_FF_ERASE:
- if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
- retval = -EFAULT;
- goto out;
- }
+ req->retval = ff_up.retval;
+ complete(&req->done);
+ goto out;
- req = uinput_request_find(udev, ff_erase.request_id);
- if (!req || req->code != UI_FF_ERASE) {
- retval = -EINVAL;
- goto out;
- }
+ case UI_END_FF_ERASE:
+ if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
+ retval = -EFAULT;
+ goto out;
+ }
- req->retval = ff_erase.retval;
- complete(&req->done);
+ req = uinput_request_find(udev, ff_erase.request_id);
+ if (!req || req->code != UI_FF_ERASE) {
+ retval = -EINVAL;
goto out;
+ }
+
+ req->retval = ff_erase.retval;
+ complete(&req->done);
+ goto out;
}
size = _IOC_SIZE(cmd);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 850b00e3ad8e..579b899add26 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1587,10 +1587,10 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
return PSMOUSE_GOOD_DATA;
}
-static void alps_flush_packet(unsigned long data)
+static void alps_flush_packet(struct timer_list *t)
{
- struct psmouse *psmouse = (struct psmouse *)data;
- struct alps_data *priv = psmouse->private;
+ struct alps_data *priv = from_timer(priv, t, timer);
+ struct psmouse *psmouse = priv->psmouse;
serio_pause_rx(psmouse->ps2dev.serio);
@@ -2702,7 +2702,7 @@ static int alps_set_protocol(struct psmouse *psmouse,
{
psmouse->private = priv;
- setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+ timer_setup(&priv->timer, alps_flush_packet, 0);
priv->proto_version = protocol->version;
priv->byte0 = protocol->byte0;
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index b64b81599f7e..f2aabf7f906f 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -227,6 +227,7 @@
struct byd_data {
struct timer_list timer;
+ struct psmouse *psmouse;
s32 abs_x;
s32 abs_y;
typeof(jiffies) last_touch_time;
@@ -251,10 +252,10 @@ static void byd_report_input(struct psmouse *psmouse)
input_sync(dev);
}
-static void byd_clear_touch(unsigned long data)
+static void byd_clear_touch(struct timer_list *t)
{
- struct psmouse *psmouse = (struct psmouse *)data;
- struct byd_data *priv = psmouse->private;
+ struct byd_data *priv = from_timer(priv, t, timer);
+ struct psmouse *psmouse = priv->psmouse;
serio_pause_rx(psmouse->ps2dev.serio);
priv->touch = false;
@@ -478,7 +479,8 @@ int byd_init(struct psmouse *psmouse)
if (!priv)
return -ENOMEM;
- setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse);
+ priv->psmouse = psmouse;
+ timer_setup(&priv->timer, byd_clear_touch, 0);
psmouse->private = priv;
psmouse->disconnect = byd_disconnect;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index d6135900da64..2111a85d0b17 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -1141,10 +1142,13 @@ static int elan_probe(struct i2c_client *client,
return error;
/*
- * Systems using device tree should set up interrupt via DTS,
- * the rest will use the default falling edge interrupts.
+ * Platform code (ACPI, DTS) should normally set up interrupt
+ * for us, but in case it did not let's fall back to using falling
+ * edge to be compatible with older Chromebooks.
*/
- irqflags = dev->of_node ? 0 : IRQF_TRIGGER_FALLING;
+ irqflags = irq_get_trigger_type(client->irq);
+ if (!irqflags)
+ irqflags = IRQF_TRIGGER_FALLING;
error = devm_request_threaded_irq(dev, client->irq, NULL, elan_isr,
irqflags | IRQF_ONESHOT,
@@ -1255,7 +1259,6 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
{ "ELAN0608", 0 },
- { "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN060C", 0 },
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index ced07391304b..a26d8be6f795 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -2,6 +2,7 @@
* Driver for simulating a mouse on GPIO lines.
*
* Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,9 +12,35 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
-#include <linux/gpio.h>
-#include <linux/gpio_mouse.h>
-
+#include <linux/gpio/consumer.h>
+#include <linux/property.h>
+#include <linux/of.h>
+
+/**
+ * struct gpio_mouse
+ * @scan_ms: the scan interval in milliseconds.
+ * @up: GPIO line for up value.
+ * @down: GPIO line for down value.
+ * @left: GPIO line for left value.
+ * @right: GPIO line for right value.
+ * @bleft: GPIO line for left button.
+ * @bmiddle: GPIO line for middle button.
+ * @bright: GPIO line for right button.
+ *
+ * This struct must be added to the platform_device in the board code.
+ * It is used by the gpio_mouse driver to setup GPIO lines and to
+ * calculate mouse movement.
+ */
+struct gpio_mouse {
+ u32 scan_ms;
+ struct gpio_desc *up;
+ struct gpio_desc *down;
+ struct gpio_desc *left;
+ struct gpio_desc *right;
+ struct gpio_desc *bleft;
+ struct gpio_desc *bmiddle;
+ struct gpio_desc *bright;
+};
/*
* Timer function which is run every scan_ms ms when the device is opened.
@@ -21,24 +48,22 @@
*/
static void gpio_mouse_scan(struct input_polled_dev *dev)
{
- struct gpio_mouse_platform_data *gpio = dev->private;
+ struct gpio_mouse *gpio = dev->private;
struct input_dev *input = dev->input;
int x, y;
- if (gpio->bleft >= 0)
+ if (gpio->bleft)
input_report_key(input, BTN_LEFT,
- gpio_get_value(gpio->bleft) ^ gpio->polarity);
- if (gpio->bmiddle >= 0)
+ gpiod_get_value(gpio->bleft));
+ if (gpio->bmiddle)
input_report_key(input, BTN_MIDDLE,
- gpio_get_value(gpio->bmiddle) ^ gpio->polarity);
- if (gpio->bright >= 0)
+ gpiod_get_value(gpio->bmiddle));
+ if (gpio->bright)
input_report_key(input, BTN_RIGHT,
- gpio_get_value(gpio->bright) ^ gpio->polarity);
+ gpiod_get_value(gpio->bright));
- x = (gpio_get_value(gpio->right) ^ gpio->polarity)
- - (gpio_get_value(gpio->left) ^ gpio->polarity);
- y = (gpio_get_value(gpio->down) ^ gpio->polarity)
- - (gpio_get_value(gpio->up) ^ gpio->polarity);
+ x = gpiod_get_value(gpio->right) - gpiod_get_value(gpio->left);
+ y = gpiod_get_value(gpio->down) - gpiod_get_value(gpio->up);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -47,65 +72,61 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
static int gpio_mouse_probe(struct platform_device *pdev)
{
- struct gpio_mouse_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct gpio_mouse *gmouse;
struct input_polled_dev *input_poll;
struct input_dev *input;
- int pin, i;
- int error;
-
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data\n");
- error = -ENXIO;
- goto out;
- }
-
- if (pdata->scan_ms < 0) {
- dev_err(&pdev->dev, "invalid scan time\n");
- error = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < GPIO_MOUSE_PIN_MAX; i++) {
- pin = pdata->pins[i];
-
- if (pin < 0) {
-
- if (i <= GPIO_MOUSE_PIN_RIGHT) {
- /* Mouse direction is required. */
- dev_err(&pdev->dev,
- "missing GPIO for directions\n");
- error = -EINVAL;
- goto out_free_gpios;
- }
-
- if (i == GPIO_MOUSE_PIN_BLEFT)
- dev_dbg(&pdev->dev, "no left button defined\n");
-
- } else {
- error = gpio_request(pin, "gpio_mouse");
- if (error) {
- dev_err(&pdev->dev, "fail %d pin (%d idx)\n",
- pin, i);
- goto out_free_gpios;
- }
-
- gpio_direction_input(pin);
- }
+ int ret;
+
+ gmouse = devm_kzalloc(dev, sizeof(*gmouse), GFP_KERNEL);
+ if (!gmouse)
+ return -ENOMEM;
+
+ /* Assign some default scanning time */
+ ret = device_property_read_u32(dev, "scan-interval-ms",
+ &gmouse->scan_ms);
+ if (ret || gmouse->scan_ms == 0) {
+ dev_warn(dev, "invalid scan time, set to 50 ms\n");
+ gmouse->scan_ms = 50;
}
- input_poll = input_allocate_polled_device();
+ gmouse->up = devm_gpiod_get(dev, "up", GPIOD_IN);
+ if (IS_ERR(gmouse->up))
+ return PTR_ERR(gmouse->up);
+ gmouse->down = devm_gpiod_get(dev, "down", GPIOD_IN);
+ if (IS_ERR(gmouse->down))
+ return PTR_ERR(gmouse->down);
+ gmouse->left = devm_gpiod_get(dev, "left", GPIOD_IN);
+ if (IS_ERR(gmouse->left))
+ return PTR_ERR(gmouse->left);
+ gmouse->right = devm_gpiod_get(dev, "right", GPIOD_IN);
+ if (IS_ERR(gmouse->right))
+ return PTR_ERR(gmouse->right);
+
+ gmouse->bleft = devm_gpiod_get_optional(dev, "button-left", GPIOD_IN);
+ if (IS_ERR(gmouse->bleft))
+ return PTR_ERR(gmouse->bleft);
+ gmouse->bmiddle = devm_gpiod_get_optional(dev, "button-middle",
+ GPIOD_IN);
+ if (IS_ERR(gmouse->bmiddle))
+ return PTR_ERR(gmouse->bmiddle);
+ gmouse->bright = devm_gpiod_get_optional(dev, "button-right",
+ GPIOD_IN);
+ if (IS_ERR(gmouse->bright))
+ return PTR_ERR(gmouse->bright);
+
+ input_poll = devm_input_allocate_polled_device(dev);
if (!input_poll) {
- dev_err(&pdev->dev, "not enough memory for input device\n");
- error = -ENOMEM;
- goto out_free_gpios;
+ dev_err(dev, "not enough memory for input device\n");
+ return -ENOMEM;
}
platform_set_drvdata(pdev, input_poll);
/* set input-polldev handlers */
- input_poll->private = pdata;
+ input_poll->private = gmouse;
input_poll->poll = gpio_mouse_scan;
- input_poll->poll_interval = pdata->scan_ms;
+ input_poll->poll_interval = gmouse->scan_ms;
input = input_poll->input;
input->name = pdev->name;
@@ -114,63 +135,39 @@ static int gpio_mouse_probe(struct platform_device *pdev)
input_set_capability(input, EV_REL, REL_X);
input_set_capability(input, EV_REL, REL_Y);
- if (pdata->bleft >= 0)
+ if (gmouse->bleft)
input_set_capability(input, EV_KEY, BTN_LEFT);
- if (pdata->bmiddle >= 0)
+ if (gmouse->bmiddle)
input_set_capability(input, EV_KEY, BTN_MIDDLE);
- if (pdata->bright >= 0)
+ if (gmouse->bright)
input_set_capability(input, EV_KEY, BTN_RIGHT);
- error = input_register_polled_device(input_poll);
- if (error) {
- dev_err(&pdev->dev, "could not register input device\n");
- goto out_free_polldev;
+ ret = input_register_polled_device(input_poll);
+ if (ret) {
+ dev_err(dev, "could not register input device\n");
+ return ret;
}
- dev_dbg(&pdev->dev, "%d ms scan time, buttons: %s%s%s\n",
- pdata->scan_ms,
- pdata->bleft < 0 ? "" : "left ",
- pdata->bmiddle < 0 ? "" : "middle ",
- pdata->bright < 0 ? "" : "right");
+ dev_dbg(dev, "%d ms scan time, buttons: %s%s%s\n",
+ gmouse->scan_ms,
+ gmouse->bleft ? "" : "left ",
+ gmouse->bmiddle ? "" : "middle ",
+ gmouse->bright ? "" : "right");
return 0;
-
- out_free_polldev:
- input_free_polled_device(input_poll);
-
- out_free_gpios:
- while (--i >= 0) {
- pin = pdata->pins[i];
- if (pin)
- gpio_free(pin);
- }
- out:
- return error;
}
-static int gpio_mouse_remove(struct platform_device *pdev)
-{
- struct input_polled_dev *input = platform_get_drvdata(pdev);
- struct gpio_mouse_platform_data *pdata = input->private;
- int pin, i;
-
- input_unregister_polled_device(input);
- input_free_polled_device(input);
-
- for (i = 0; i < GPIO_MOUSE_PIN_MAX; i++) {
- pin = pdata->pins[i];
- if (pin >= 0)
- gpio_free(pin);
- }
-
- return 0;
-}
+static const struct of_device_id gpio_mouse_of_match[] = {
+ { .compatible = "gpio-mouse", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_mouse_of_match);
static struct platform_driver gpio_mouse_device_driver = {
.probe = gpio_mouse_probe,
- .remove = gpio_mouse_remove,
.driver = {
.name = "gpio_mouse",
+ .of_match_table = gpio_mouse_of_match,
}
};
module_platform_driver(gpio_mouse_device_driver);
@@ -179,4 +176,3 @@ MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("GPIO mouse driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
-
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index 0f586780ceb4..1ae5c1ef3f5b 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -316,11 +316,9 @@ static int vmmouse_enable(struct psmouse *psmouse)
/*
* Array of supported hypervisors.
*/
-static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = {
- &x86_hyper_vmware,
-#ifdef CONFIG_KVM_GUEST
- &x86_hyper_kvm,
-#endif
+static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = {
+ X86_HYPER_VMWARE,
+ X86_HYPER_KVM,
};
/**
@@ -331,7 +329,7 @@ static bool vmmouse_check_hypervisor(void)
int i;
for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++)
- if (vmmouse_supported_hypervisors[i] == x86_hyper)
+ if (vmmouse_supported_hypervisors[i] == x86_hyper_type)
return true;
return false;
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index f5206e2c767e..5343f2c08f15 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -73,7 +73,7 @@ enum rmi_f54_report_type {
F54_MAX_REPORT_TYPE,
};
-const char *rmi_f54_report_type_names[] = {
+static const char * const rmi_f54_report_type_names[] = {
[F54_REPORT_NONE] = "Unknown",
[F54_8BIT_IMAGE] = "Normalized 8-Bit Image",
[F54_16BIT_IMAGE] = "Normalized 16-Bit Image",
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 65605e4ef3cf..d66d01c5373b 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -784,7 +784,7 @@ static void hil_mlcs_process(unsigned long unused)
/************************* Keepalive timer task *********************/
-static void hil_mlcs_timer(unsigned long data)
+static void hil_mlcs_timer(struct timer_list *unused)
{
hil_mlcs_probe = 1;
tasklet_schedule(&hil_mlcs_tasklet);
@@ -998,7 +998,7 @@ int hil_mlc_unregister(hil_mlc *mlc)
static int __init hil_mlc_init(void)
{
- setup_timer(&hil_mlcs_kicker, &hil_mlcs_timer, 0);
+ timer_setup(&hil_mlcs_kicker, &hil_mlcs_timer, 0);
mod_timer(&hil_mlcs_kicker, jiffies + HZ);
tasklet_enable(&hil_mlcs_tasklet);
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 8eef6849d066..1d7c7d81a5ef 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -794,7 +794,7 @@ int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback)
/************************* Keepalive timer task *********************/
-static void hp_sdc_kicker(unsigned long data)
+static void hp_sdc_kicker(struct timer_list *unused)
{
tasklet_schedule(&hp_sdc.task);
/* Re-insert the periodic task. */
@@ -909,9 +909,8 @@ static int __init hp_sdc_init(void)
down(&s_sync); /* Wait for t_sync to complete */
/* Create the keepalive task */
- init_timer(&hp_sdc.kicker);
+ timer_setup(&hp_sdc.kicker, hp_sdc_kicker, 0);
hp_sdc.kicker.expires = jiffies + HZ;
- hp_sdc.kicker.function = &hp_sdc_kicker;
add_timer(&hp_sdc.kicker);
hp_sdc.dev_err = 0;
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index b50e3817f3c4..c62cceb97bb1 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -366,6 +366,7 @@ static int ps2_gpio_probe(struct platform_device *pdev)
gpiod_cansleep(drvdata->gpio_clk)) {
dev_err(dev, "GPIO data or clk are connected via slow bus\n");
error = -EINVAL;
+ goto err_free_serio;
}
drvdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index b3e688911fd9..f9e5c793f4f0 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -47,6 +47,8 @@ struct ps2if {
struct serio *io;
struct sa1111_dev *dev;
void __iomem *base;
+ int rx_irq;
+ int tx_irq;
unsigned int open;
spinlock_t lock;
unsigned int head;
@@ -64,22 +66,22 @@ static irqreturn_t ps2_rxint(int irq, void *dev_id)
struct ps2if *ps2if = dev_id;
unsigned int scancode, flag, status;
- status = sa1111_readl(ps2if->base + PS2STAT);
+ status = readl_relaxed(ps2if->base + PS2STAT);
while (status & PS2STAT_RXF) {
if (status & PS2STAT_STP)
- sa1111_writel(PS2STAT_STP, ps2if->base + PS2STAT);
+ writel_relaxed(PS2STAT_STP, ps2if->base + PS2STAT);
flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) |
(status & PS2STAT_RXP ? 0 : SERIO_PARITY);
- scancode = sa1111_readl(ps2if->base + PS2DATA) & 0xff;
+ scancode = readl_relaxed(ps2if->base + PS2DATA) & 0xff;
if (hweight8(scancode) & 1)
flag ^= SERIO_PARITY;
serio_interrupt(ps2if->io, scancode, flag);
- status = sa1111_readl(ps2if->base + PS2STAT);
+ status = readl_relaxed(ps2if->base + PS2STAT);
}
return IRQ_HANDLED;
@@ -94,12 +96,12 @@ static irqreturn_t ps2_txint(int irq, void *dev_id)
unsigned int status;
spin_lock(&ps2if->lock);
- status = sa1111_readl(ps2if->base + PS2STAT);
+ status = readl_relaxed(ps2if->base + PS2STAT);
if (ps2if->head == ps2if->tail) {
disable_irq_nosync(irq);
/* done */
} else if (status & PS2STAT_TXE) {
- sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA);
+ writel_relaxed(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA);
ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1);
}
spin_unlock(&ps2if->lock);
@@ -122,11 +124,11 @@ static int ps2_write(struct serio *io, unsigned char val)
/*
* If the TX register is empty, we can go straight out.
*/
- if (sa1111_readl(ps2if->base + PS2STAT) & PS2STAT_TXE) {
- sa1111_writel(val, ps2if->base + PS2DATA);
+ if (readl_relaxed(ps2if->base + PS2STAT) & PS2STAT_TXE) {
+ writel_relaxed(val, ps2if->base + PS2DATA);
} else {
if (ps2if->head == ps2if->tail)
- enable_irq(ps2if->dev->irq[1]);
+ enable_irq(ps2if->tx_irq);
head = (ps2if->head + 1) & (sizeof(ps2if->buf) - 1);
if (head != ps2if->tail) {
ps2if->buf[ps2if->head] = val;
@@ -147,30 +149,30 @@ static int ps2_open(struct serio *io)
if (ret)
return ret;
- ret = request_irq(ps2if->dev->irq[0], ps2_rxint, 0,
+ ret = request_irq(ps2if->rx_irq, ps2_rxint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
- ps2if->dev->irq[0], ret);
+ ps2if->rx_irq, ret);
sa1111_disable_device(ps2if->dev);
return ret;
}
- ret = request_irq(ps2if->dev->irq[1], ps2_txint, 0,
+ ret = request_irq(ps2if->tx_irq, ps2_txint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
- ps2if->dev->irq[1], ret);
- free_irq(ps2if->dev->irq[0], ps2if);
+ ps2if->tx_irq, ret);
+ free_irq(ps2if->rx_irq, ps2if);
sa1111_disable_device(ps2if->dev);
return ret;
}
ps2if->open = 1;
- enable_irq_wake(ps2if->dev->irq[0]);
+ enable_irq_wake(ps2if->rx_irq);
- sa1111_writel(PS2CR_ENA, ps2if->base + PS2CR);
+ writel_relaxed(PS2CR_ENA, ps2if->base + PS2CR);
return 0;
}
@@ -178,14 +180,14 @@ static void ps2_close(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
- sa1111_writel(0, ps2if->base + PS2CR);
+ writel_relaxed(0, ps2if->base + PS2CR);
- disable_irq_wake(ps2if->dev->irq[0]);
+ disable_irq_wake(ps2if->rx_irq);
ps2if->open = 0;
- free_irq(ps2if->dev->irq[1], ps2if);
- free_irq(ps2if->dev->irq[0], ps2if);
+ free_irq(ps2if->tx_irq, ps2if);
+ free_irq(ps2if->rx_irq, ps2if);
sa1111_disable_device(ps2if->dev);
}
@@ -198,7 +200,7 @@ static void ps2_clear_input(struct ps2if *ps2if)
int maxread = 100;
while (maxread--) {
- if ((sa1111_readl(ps2if->base + PS2DATA) & 0xff) == 0xff)
+ if ((readl_relaxed(ps2if->base + PS2DATA) & 0xff) == 0xff)
break;
}
}
@@ -208,11 +210,11 @@ static unsigned int ps2_test_one(struct ps2if *ps2if,
{
unsigned int val;
- sa1111_writel(PS2CR_ENA | mask, ps2if->base + PS2CR);
+ writel_relaxed(PS2CR_ENA | mask, ps2if->base + PS2CR);
- udelay(2);
+ udelay(10);
- val = sa1111_readl(ps2if->base + PS2STAT);
+ val = readl_relaxed(ps2if->base + PS2STAT);
return val & (PS2STAT_KBC | PS2STAT_KBD);
}
@@ -243,7 +245,7 @@ static int ps2_test(struct ps2if *ps2if)
ret = -ENODEV;
}
- sa1111_writel(0, ps2if->base + PS2CR);
+ writel_relaxed(0, ps2if->base + PS2CR);
return ret;
}
@@ -264,7 +266,6 @@ static int ps2_probe(struct sa1111_dev *dev)
goto free;
}
-
serio->id.type = SERIO_8042;
serio->write = ps2_write;
serio->open = ps2_open;
@@ -279,6 +280,18 @@ static int ps2_probe(struct sa1111_dev *dev)
spin_lock_init(&ps2if->lock);
+ ps2if->rx_irq = sa1111_get_irq(dev, 0);
+ if (ps2if->rx_irq <= 0) {
+ ret = ps2if->rx_irq ? : -ENXIO;
+ goto free;
+ }
+
+ ps2if->tx_irq = sa1111_get_irq(dev, 1);
+ if (ps2if->tx_irq <= 0) {
+ ret = ps2if->tx_irq ? : -ENXIO;
+ goto free;
+ }
+
/*
* Request the physical region for this PS2 port.
*/
@@ -297,8 +310,8 @@ static int ps2_probe(struct sa1111_dev *dev)
sa1111_enable_device(ps2if->dev);
/* Incoming clock is 8MHz */
- sa1111_writel(0, ps2if->base + PS2CLKDIV);
- sa1111_writel(127, ps2if->base + PS2PRECNT);
+ writel_relaxed(0, ps2if->base + PS2CLKDIV);
+ writel_relaxed(127, ps2if->base + PS2PRECNT);
/*
* Flush any pending input.
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 64b30fe273fd..38a226f9fcbd 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -316,6 +316,16 @@ config TOUCHSCREEN_EGALAX_SERIAL
To compile this driver as a module, choose M here: the
module will be called egalax_ts_serial.
+config TOUCHSCREEN_EXC3000
+ tristate "EETI EXC3000 multi-touch panel support"
+ depends on I2C
+ help
+ Say Y here to enable support for I2C connected EETI
+ EXC3000 multi-touch panels.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exc3000.
+
config TOUCHSCREEN_FUJITSU
tristate "Fujitsu serial touchscreen"
select SERIO
@@ -344,6 +354,17 @@ config TOUCHSCREEN_GOODIX
To compile this driver as a module, choose M here: the
module will be called goodix.
+config TOUCHSCREEN_HIDEEP
+ tristate "HiDeep Touch IC"
+ depends on I2C
+ help
+ Say Y here if you have a touchscreen using HiDeep.
+
+ If unsure, say N.
+
+ To compile this driver as a moudle, choose M here : the
+ module will be called hideep_ts.
+
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
@@ -383,6 +404,17 @@ config TOUCHSCREEN_S3C2410
To compile this driver as a module, choose M here: the
module will be called s3c2410_ts.
+config TOUCHSCREEN_S6SY761
+ tristate "Samsung S6SY761 Touchscreen driver"
+ depends on I2C
+ help
+ Say Y if you have the Samsung S6SY761 driver
+
+ If unsure, say N
+
+ To compile this driver as module, choose M here: the
+ module will be called s6sy761.
+
config TOUCHSCREEN_GUNZE
tristate "Gunze AHL-51S touchscreen"
select SERIO
@@ -727,7 +759,7 @@ config TOUCHSCREEN_WM831X
config TOUCHSCREEN_WM97XX
tristate "Support for WM97xx AC97 touchscreen controllers"
- depends on AC97_BUS
+ depends on AC97_BUS || AC97_BUS_NEW
help
Say Y here if you have a Wolfson Microelectronics WM97xx
touchscreen connected to your system. Note that this option
@@ -949,7 +981,7 @@ config TOUCHSCREEN_USB_NEXIO
config TOUCHSCREEN_USB_EASYTOUCH
default y
- bool "EasyTouch USB Touch controller device support" if EMBEDDED
+ bool "EasyTouch USB Touch controller device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
help
Say Y here if you have an EasyTouch USB Touch controller.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 850c1562555a..d2a2b3b7af27 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -38,8 +38,10 @@ obj-$(CONFIG_TOUCHSCREEN_ELAN) += elants_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX_SERIAL) += egalax_ts_serial.o
+obj-$(CONFIG_TOUCHSCREEN_EXC3000) += exc3000.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
+obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
@@ -65,6 +67,7 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_RM_TS) += raydium_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+obj-$(CONFIG_TOUCHSCREEN_S6SY761) += s6sy761.o
obj-$(CONFIG_TOUCHSCREEN_SILEAD) += silead.o
obj-$(CONFIG_TOUCHSCREEN_SIS_I2C) += sis_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 9c250ae780d9..0381c7809d1b 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -385,9 +385,9 @@ static inline void ad7877_ts_event_release(struct ad7877 *ts)
input_sync(input_dev);
}
-static void ad7877_timer(unsigned long handle)
+static void ad7877_timer(struct timer_list *t)
{
- struct ad7877 *ts = (void *)handle;
+ struct ad7877 *ts = from_timer(ts, t, timer);
unsigned long flags;
spin_lock_irqsave(&ts->lock, flags);
@@ -718,7 +718,7 @@ static int ad7877_probe(struct spi_device *spi)
ts->spi = spi;
ts->input = input_dev;
- setup_timer(&ts->timer, ad7877_timer, (unsigned long) ts);
+ timer_setup(&ts->timer, ad7877_timer, 0);
mutex_init(&ts->mutex);
spin_lock_init(&ts->lock);
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 196028c45210..6bad23ee47a1 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -237,9 +237,9 @@ static void ad7879_ts_event_release(struct ad7879 *ts)
input_sync(input_dev);
}
-static void ad7879_timer(unsigned long handle)
+static void ad7879_timer(struct timer_list *t)
{
- struct ad7879 *ts = (void *)handle;
+ struct ad7879 *ts = from_timer(ts, t, timer);
ad7879_ts_event_release(ts);
}
@@ -524,13 +524,6 @@ static int ad7879_parse_dt(struct device *dev, struct ad7879 *ts)
return 0;
}
-static void ad7879_cleanup_sysfs(void *_ts)
-{
- struct ad7879 *ts = _ts;
-
- sysfs_remove_group(&ts->dev->kobj, &ad7879_attr_group);
-}
-
int ad7879_probe(struct device *dev, struct regmap *regmap,
int irq, u16 bustype, u8 devid)
{
@@ -577,7 +570,7 @@ int ad7879_probe(struct device *dev, struct regmap *regmap,
ts->irq = irq;
ts->regmap = regmap;
- setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts);
+ timer_setup(&ts->timer, ad7879_timer, 0);
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
input_dev->name = "AD7879 Touchscreen";
@@ -658,11 +651,7 @@ int ad7879_probe(struct device *dev, struct regmap *regmap,
__ad7879_disable(ts);
- err = sysfs_create_group(&dev->kobj, &ad7879_attr_group);
- if (err)
- return err;
-
- err = devm_add_action_or_reset(dev, ad7879_cleanup_sysfs, ts);
+ err = devm_device_add_group(dev, &ad7879_attr_group);
if (err)
return err;
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index 8cf0b2be2df4..9140a43cfe20 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -208,9 +208,12 @@ static void atmel_wm97xx_acc_pen_up(struct wm97xx *wm)
}
}
-static void atmel_wm97xx_pen_timer(unsigned long data)
+static void atmel_wm97xx_pen_timer(struct timer_list *t)
{
- atmel_wm97xx_acc_pen_up((struct wm97xx *)data);
+ struct atmel_wm97xx *atmel_wm97xx = from_timer(atmel_wm97xx, t,
+ pen_timer);
+
+ atmel_wm97xx_acc_pen_up(atmel_wm97xx->wm);
}
static int atmel_wm97xx_acc_startup(struct wm97xx *wm)
@@ -348,8 +351,7 @@ static int __init atmel_wm97xx_probe(struct platform_device *pdev)
atmel_wm97xx->gpio_pen = atmel_gpio_line;
atmel_wm97xx->gpio_irq = gpio_to_irq(atmel_wm97xx->gpio_pen);
- setup_timer(&atmel_wm97xx->pen_timer, atmel_wm97xx_pen_timer,
- (unsigned long)wm);
+ timer_setup(&atmel_wm97xx->pen_timer, atmel_wm97xx_pen_timer, 0);
ret = request_irq(atmel_wm97xx->ac97c_irq,
atmel_wm97xx_channel_b_interrupt,
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index beaf61ce775b..727c3232517c 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -201,13 +201,21 @@ static int cyttsp4_si_get_cydata(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.test_ofs <= si->si_ofs.cydata_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset test_ofs: %zu, cydata_ofs: %zu\n",
+ __func__, si->si_ofs.test_ofs, si->si_ofs.cydata_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.cydata_size = si->si_ofs.test_ofs - si->si_ofs.cydata_ofs;
dev_dbg(cd->dev, "%s: cydata size: %zd\n", __func__,
si->si_ofs.cydata_size);
p = krealloc(si->si_ptrs.cydata, si->si_ofs.cydata_size, GFP_KERNEL);
if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc cydata memory\n", __func__);
+ dev_err(cd->dev, "%s: failed to allocate cydata memory\n",
+ __func__);
return -ENOMEM;
}
si->si_ptrs.cydata = p;
@@ -270,11 +278,19 @@ static int cyttsp4_si_get_test_data(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.pcfg_ofs <= si->si_ofs.test_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset pcfg_ofs: %zu, test_ofs: %zu\n",
+ __func__, si->si_ofs.pcfg_ofs, si->si_ofs.test_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.test_size = si->si_ofs.pcfg_ofs - si->si_ofs.test_ofs;
p = krealloc(si->si_ptrs.test, si->si_ofs.test_size, GFP_KERNEL);
if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc test memory\n", __func__);
+ dev_err(cd->dev, "%s: failed to allocate test memory\n",
+ __func__);
return -ENOMEM;
}
si->si_ptrs.test = p;
@@ -321,14 +337,20 @@ static int cyttsp4_si_get_pcfg_data(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.opcfg_ofs <= si->si_ofs.pcfg_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset opcfg_ofs: %zu, pcfg_ofs: %zu\n",
+ __func__, si->si_ofs.opcfg_ofs, si->si_ofs.pcfg_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.pcfg_size = si->si_ofs.opcfg_ofs - si->si_ofs.pcfg_ofs;
p = krealloc(si->si_ptrs.pcfg, si->si_ofs.pcfg_size, GFP_KERNEL);
if (p == NULL) {
- rc = -ENOMEM;
- dev_err(cd->dev, "%s: fail alloc pcfg memory r=%d\n",
- __func__, rc);
- return rc;
+ dev_err(cd->dev, "%s: failed to allocate pcfg memory\n",
+ __func__);
+ return -ENOMEM;
}
si->si_ptrs.pcfg = p;
@@ -367,13 +389,20 @@ static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.ddata_ofs <= si->si_ofs.opcfg_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset ddata_ofs: %zu, opcfg_ofs: %zu\n",
+ __func__, si->si_ofs.ddata_ofs, si->si_ofs.opcfg_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.opcfg_size = si->si_ofs.ddata_ofs - si->si_ofs.opcfg_ofs;
p = krealloc(si->si_ptrs.opcfg, si->si_ofs.opcfg_size, GFP_KERNEL);
if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc opcfg memory\n", __func__);
- rc = -ENOMEM;
- goto cyttsp4_si_get_opcfg_data_exit;
+ dev_err(cd->dev, "%s: failed to allocate opcfg memory\n",
+ __func__);
+ return -ENOMEM;
}
si->si_ptrs.opcfg = p;
@@ -382,7 +411,7 @@ static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
if (rc < 0) {
dev_err(cd->dev, "%s: fail read opcfg data r=%d\n",
__func__, rc);
- goto cyttsp4_si_get_opcfg_data_exit;
+ return rc;
}
si->si_ofs.cmd_ofs = si->si_ptrs.opcfg->cmd_ofs;
si->si_ofs.rep_ofs = si->si_ptrs.opcfg->rep_ofs;
@@ -447,8 +476,7 @@ static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.opcfg,
si->si_ofs.opcfg_size, "sysinfo_opcfg_data");
-cyttsp4_si_get_opcfg_data_exit:
- return rc;
+ return 0;
}
static int cyttsp4_si_get_ddata(struct cyttsp4 *cd)
@@ -1237,9 +1265,9 @@ static void cyttsp4_stop_wd_timer(struct cyttsp4 *cd)
del_timer_sync(&cd->watchdog_timer);
}
-static void cyttsp4_watchdog_timer(unsigned long handle)
+static void cyttsp4_watchdog_timer(struct timer_list *t)
{
- struct cyttsp4 *cd = (struct cyttsp4 *)handle;
+ struct cyttsp4 *cd = from_timer(cd, t, watchdog_timer);
dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
@@ -2074,8 +2102,7 @@ struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops,
}
/* Setup watchdog timer */
- setup_timer(&cd->watchdog_timer, cyttsp4_watchdog_timer,
- (unsigned long)cd);
+ timer_setup(&cd->watchdog_timer, cyttsp4_watchdog_timer, 0);
/*
* call startup directly to ensure that the device
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 5bf63f76ddda..c53a3d7239e7 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -70,8 +70,10 @@
#define EDT_RAW_DATA_DELAY 1000 /* usec */
enum edt_ver {
- M06,
- M09,
+ EDT_M06,
+ EDT_M09,
+ EDT_M12,
+ GENERIC_FT,
};
struct edt_reg_addr {
@@ -179,14 +181,16 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
int error;
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
cmd = 0xf9; /* tell the controller to send touch data */
offset = 5; /* where the actual touch data starts */
tplen = 4; /* data comes in so called frames */
crclen = 1; /* length of the crc data */
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
cmd = 0x0;
offset = 3;
tplen = 6;
@@ -209,8 +213,8 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
goto out;
}
- /* M09 does not send header or CRC */
- if (tsdata->version == M06) {
+ /* M09/M12 does not send header or CRC */
+ if (tsdata->version == EDT_M06) {
if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
rdbuf[2] != datalen) {
dev_err_ratelimited(dev,
@@ -233,7 +237,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
continue;
/* M06 sometimes sends bogus coordinates in TOUCH_DOWN */
- if (tsdata->version == M06 && type == TOUCH_EVENT_DOWN)
+ if (tsdata->version == EDT_M06 && type == TOUCH_EVENT_DOWN)
continue;
x = ((buf[0] << 8) | buf[1]) & 0x0fff;
@@ -264,14 +268,16 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
u8 wrbuf[4];
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
wrbuf[2] = value;
wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
return edt_ft5x06_ts_readwrite(tsdata->client, 4,
wrbuf, 0, NULL);
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
wrbuf[0] = addr;
wrbuf[1] = value;
@@ -290,7 +296,7 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
int error;
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
wrbuf[1] |= tsdata->factory_mode ? 0x80 : 0x40;
@@ -309,7 +315,9 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
}
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
wrbuf[0] = addr;
error = edt_ft5x06_ts_readwrite(tsdata->client, 1,
wrbuf, 1, rdbuf);
@@ -368,11 +376,13 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
}
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
addr = attr->addr_m06;
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
addr = attr->addr_m09;
break;
@@ -437,11 +447,13 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
}
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
addr = attr->addr_m06;
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
addr = attr->addr_m09;
break;
@@ -466,14 +478,18 @@ out:
return error ?: count;
}
+/* m06, m09: range 0-31, m12: range 0-5 */
static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
M09_REGISTER_GAIN, 0, 31);
+/* m06, m09: range 0-31, m12: range 0-16 */
static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
M09_REGISTER_OFFSET, 0, 31);
+/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
- M09_REGISTER_THRESHOLD, 0, 80);
+ M09_REGISTER_THRESHOLD, 0, 255);
+/* m06: range 3 to 14, m12: (0x64: 100Hz) */
static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
- NO_REGISTER, 3, 14);
+ NO_REGISTER, 0, 255);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
@@ -508,7 +524,7 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
}
/* mode register is 0x3c when in the work mode */
- if (tsdata->version == M09)
+ if (tsdata->version != EDT_M06)
goto m09_out;
error = edt_ft5x06_register_write(tsdata, WORK_REGISTER_OPMODE, 0x03);
@@ -545,7 +561,7 @@ err_out:
return error;
m09_out:
- dev_err(&client->dev, "No factory mode support for M09\n");
+ dev_err(&client->dev, "No factory mode support for M09/M12/GENERIC_FT\n");
return -EINVAL;
}
@@ -770,16 +786,17 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
* to have garbage in there
*/
memset(rdbuf, 0, sizeof(rdbuf));
- error = edt_ft5x06_ts_readwrite(client, 1, "\xbb",
+ error = edt_ft5x06_ts_readwrite(client, 1, "\xBB",
EDT_NAME_LEN - 1, rdbuf);
if (error)
return error;
- /* if we find something consistent, stay with that assumption
- * at least M09 won't send 3 bytes here
+ /* Probe content for something consistent.
+ * M06 starts with a response byte, M12 gives the data directly.
+ * M09/Generic does not provide model number information.
*/
- if (!(strncasecmp(rdbuf + 1, "EP0", 3))) {
- tsdata->version = M06;
+ if (!strncasecmp(rdbuf + 1, "EP0", 3)) {
+ tsdata->version = EDT_M06;
/* remove last '$' end marker */
rdbuf[EDT_NAME_LEN - 1] = '\0';
@@ -792,9 +809,31 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
*p++ = '\0';
strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ } else if (!strncasecmp(rdbuf, "EP0", 3)) {
+ tsdata->version = EDT_M12;
+
+ /* remove last '$' end marker */
+ rdbuf[EDT_NAME_LEN - 2] = '\0';
+ if (rdbuf[EDT_NAME_LEN - 3] == '$')
+ rdbuf[EDT_NAME_LEN - 3] = '\0';
+
+ /* look for Model/Version separator */
+ p = strchr(rdbuf, '*');
+ if (p)
+ *p++ = '\0';
+ strlcpy(model_name, rdbuf, EDT_NAME_LEN);
+ strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
} else {
- /* since there are only two versions around (M06, M09) */
- tsdata->version = M09;
+ /* If it is not an EDT M06/M12 touchscreen, then the model
+ * detection is a bit hairy. The different ft5x06
+ * firmares around don't reliably implement the
+ * identification registers. Well, we'll take a shot.
+ *
+ * The main difference between generic focaltec based
+ * touches and EDT M09 is that we know how to retrieve
+ * the max coordinates for the latter.
+ */
+ tsdata->version = GENERIC_FT;
error = edt_ft5x06_ts_readwrite(client, 1, "\xA6",
2, rdbuf);
@@ -808,8 +847,34 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
if (error)
return error;
- snprintf(model_name, EDT_NAME_LEN, "EP0%i%i0M09",
- rdbuf[0] >> 4, rdbuf[0] & 0x0F);
+ /* This "model identification" is not exact. Unfortunately
+ * not all firmwares for the ft5x06 put useful values in
+ * the identification registers.
+ */
+ switch (rdbuf[0]) {
+ case 0x35: /* EDT EP0350M09 */
+ case 0x43: /* EDT EP0430M09 */
+ case 0x50: /* EDT EP0500M09 */
+ case 0x57: /* EDT EP0570M09 */
+ case 0x70: /* EDT EP0700M09 */
+ tsdata->version = EDT_M09;
+ snprintf(model_name, EDT_NAME_LEN, "EP0%i%i0M09",
+ rdbuf[0] >> 4, rdbuf[0] & 0x0F);
+ break;
+ case 0xa1: /* EDT EP1010ML00 */
+ tsdata->version = EDT_M09;
+ snprintf(model_name, EDT_NAME_LEN, "EP%i%i0ML00",
+ rdbuf[0] >> 4, rdbuf[0] & 0x0F);
+ break;
+ case 0x5a: /* Solomon Goldentek Display */
+ snprintf(model_name, EDT_NAME_LEN, "GKTW50SCED1R0");
+ break;
+ default:
+ snprintf(model_name, EDT_NAME_LEN,
+ "generic ft5x06 (%02x)",
+ rdbuf[0]);
+ break;
+ }
}
return 0;
@@ -853,8 +918,17 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
if (reg_addr->reg_report_rate != NO_REGISTER)
tsdata->report_rate = edt_ft5x06_register_read(tsdata,
reg_addr->reg_report_rate);
- tsdata->num_x = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_x);
- tsdata->num_y = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_y);
+ if (tsdata->version == EDT_M06 ||
+ tsdata->version == EDT_M09 ||
+ tsdata->version == EDT_M12) {
+ tsdata->num_x = edt_ft5x06_register_read(tsdata,
+ reg_addr->reg_num_x);
+ tsdata->num_y = edt_ft5x06_register_read(tsdata,
+ reg_addr->reg_num_y);
+ } else {
+ tsdata->num_x = -1;
+ tsdata->num_y = -1;
+ }
}
static void
@@ -863,7 +937,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
reg_addr->reg_threshold = WORK_REGISTER_THRESHOLD;
reg_addr->reg_report_rate = WORK_REGISTER_REPORT_RATE;
reg_addr->reg_gain = WORK_REGISTER_GAIN;
@@ -872,7 +946,8 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
reg_addr->reg_num_y = WORK_REGISTER_NUM_Y;
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
reg_addr->reg_report_rate = NO_REGISTER;
reg_addr->reg_gain = M09_REGISTER_GAIN;
@@ -880,6 +955,13 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
reg_addr->reg_num_x = M09_REGISTER_NUM_X;
reg_addr->reg_num_y = M09_REGISTER_NUM_Y;
break;
+
+ case GENERIC_FT:
+ /* this is a guesswork */
+ reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
+ reg_addr->reg_gain = M09_REGISTER_GAIN;
+ reg_addr->reg_offset = M09_REGISTER_OFFSET;
+ break;
}
}
@@ -969,10 +1051,20 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
input->id.bustype = BUS_I2C;
input->dev.parent = &client->dev;
- input_set_abs_params(input, ABS_MT_POSITION_X,
- 0, tsdata->num_x * 64 - 1, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- 0, tsdata->num_y * 64 - 1, 0, 0);
+ if (tsdata->version == EDT_M06 ||
+ tsdata->version == EDT_M09 ||
+ tsdata->version == EDT_M12) {
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ 0, tsdata->num_x * 64 - 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ 0, tsdata->num_y * 64 - 1, 0, 0);
+ } else {
+ /* Unknown maximum values. Specify via devicetree */
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ 0, 65535, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ 0, 65535, 0, 0);
+ }
touchscreen_parse_properties(input, true, &tsdata->prop);
@@ -998,13 +1090,13 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+ error = devm_device_add_group(&client->dev, &edt_ft5x06_attr_group);
if (error)
return error;
error = input_register_device(input);
if (error)
- goto err_remove_attrs;
+ return error;
edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
device_init_wakeup(&client->dev, 1);
@@ -1016,10 +1108,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->reset_gpio ? desc_to_gpio(tsdata->reset_gpio) : -1);
return 0;
-
-err_remove_attrs:
- sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
- return error;
}
static int edt_ft5x06_ts_remove(struct i2c_client *client)
@@ -1027,7 +1115,6 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
edt_ft5x06_ts_teardown_debugfs(tsdata);
- sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
return 0;
}
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 0f4cda7282a2..e102d7764bc2 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1070,13 +1070,6 @@ static const struct attribute_group elants_attribute_group = {
.attrs = elants_attributes,
};
-static void elants_i2c_remove_sysfs_group(void *_data)
-{
- struct elants_data *ts = _data;
-
- sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
-}
-
static int elants_i2c_power_on(struct elants_data *ts)
{
int error;
@@ -1289,23 +1282,13 @@ static int elants_i2c_probe(struct i2c_client *client,
if (!client->dev.of_node)
device_init_wakeup(&client->dev, true);
- error = sysfs_create_group(&client->dev.kobj, &elants_attribute_group);
+ error = devm_device_add_group(&client->dev, &elants_attribute_group);
if (error) {
dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
error);
return error;
}
- error = devm_add_action(&client->dev,
- elants_i2c_remove_sysfs_group, ts);
- if (error) {
- elants_i2c_remove_sysfs_group(ts);
- dev_err(&client->dev,
- "Failed to add sysfs cleanup action: %d\n",
- error);
- return error;
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
new file mode 100644
index 000000000000..37437a53cd1a
--- /dev/null
+++ b/drivers/input/touchscreen/exc3000.c
@@ -0,0 +1,223 @@
+/*
+ * Driver for I2C connected EETI EXC3000 multiple touch controller
+ *
+ * Copyright (C) 2017 Ahmet Inan <inan@distec.de>
+ *
+ * minimal implementation based on egalax_ts.c and egalax_i2c.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/timer.h>
+#include <asm/unaligned.h>
+
+#define EXC3000_NUM_SLOTS 10
+#define EXC3000_SLOTS_PER_FRAME 5
+#define EXC3000_LEN_FRAME 66
+#define EXC3000_LEN_POINT 10
+#define EXC3000_MT_EVENT 6
+#define EXC3000_TIMEOUT_MS 100
+
+struct exc3000_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct touchscreen_properties prop;
+ struct timer_list timer;
+ u8 buf[2 * EXC3000_LEN_FRAME];
+};
+
+static void exc3000_report_slots(struct input_dev *input,
+ struct touchscreen_properties *prop,
+ const u8 *buf, int num)
+{
+ for (; num--; buf += EXC3000_LEN_POINT) {
+ if (buf[0] & BIT(0)) {
+ input_mt_slot(input, buf[1]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(input, prop,
+ get_unaligned_le16(buf + 2),
+ get_unaligned_le16(buf + 4),
+ true);
+ }
+ }
+}
+
+static void exc3000_timer(struct timer_list *t)
+{
+ struct exc3000_data *data = from_timer(data, t, timer);
+
+ input_mt_sync_frame(data->input);
+ input_sync(data->input);
+}
+
+static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
+{
+ int ret;
+
+ ret = i2c_master_send(client, "'", 2);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 2)
+ return -EIO;
+
+ ret = i2c_master_recv(client, buf, EXC3000_LEN_FRAME);
+ if (ret < 0)
+ return ret;
+
+ if (ret != EXC3000_LEN_FRAME)
+ return -EIO;
+
+ if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME ||
+ buf[2] != EXC3000_MT_EVENT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int exc3000_read_data(struct i2c_client *client,
+ u8 *buf, int *n_slots)
+{
+ int error;
+
+ error = exc3000_read_frame(client, buf);
+ if (error)
+ return error;
+
+ *n_slots = buf[3];
+ if (!*n_slots || *n_slots > EXC3000_NUM_SLOTS)
+ return -EINVAL;
+
+ if (*n_slots > EXC3000_SLOTS_PER_FRAME) {
+ /* Read 2nd frame to get the rest of the contacts. */
+ error = exc3000_read_frame(client, buf + EXC3000_LEN_FRAME);
+ if (error)
+ return error;
+
+ /* 2nd chunk must have number of contacts set to 0. */
+ if (buf[EXC3000_LEN_FRAME + 3] != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
+{
+ struct exc3000_data *data = dev_id;
+ struct input_dev *input = data->input;
+ u8 *buf = data->buf;
+ int slots, total_slots;
+ int error;
+
+ error = exc3000_read_data(data->client, buf, &total_slots);
+ if (error) {
+ /* Schedule a timer to release "stuck" contacts */
+ mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS));
+ goto out;
+ }
+
+ /*
+ * We read full state successfully, no contacts will be "stuck".
+ */
+ del_timer_sync(&data->timer);
+
+ while (total_slots > 0) {
+ slots = min(total_slots, EXC3000_SLOTS_PER_FRAME);
+ exc3000_report_slots(input, &data->prop, buf + 4, slots);
+ total_slots -= slots;
+ buf += EXC3000_LEN_FRAME;
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int exc3000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct exc3000_data *data;
+ struct input_dev *input;
+ int error;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ timer_setup(&data->timer, exc3000_timer, 0);
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
+
+ data->input = input;
+
+ input->name = "EETI EXC3000 Touch Screen";
+ input->id.bustype = BUS_I2C;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ touchscreen_parse_properties(input, true, &data->prop);
+
+ error = input_mt_init_slots(input, EXC3000_NUM_SLOTS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return error;
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, exc3000_interrupt, IRQF_ONESHOT,
+ client->name, data);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct i2c_device_id exc3000_id[] = {
+ { "exc3000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, exc3000_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id exc3000_of_match[] = {
+ { .compatible = "eeti,exc3000" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, exc3000_of_match);
+#endif
+
+static struct i2c_driver exc3000_driver = {
+ .driver = {
+ .name = "exc3000",
+ .of_match_table = of_match_ptr(exc3000_of_match),
+ },
+ .id_table = exc3000_id,
+ .probe = exc3000_probe,
+};
+
+module_i2c_driver(exc3000_driver);
+
+MODULE_AUTHOR("Ahmet Inan <inan@distec.de>");
+MODULE_DESCRIPTION("I2C connected EETI EXC3000 multiple touch controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b3bbad7d2282..69d0b8cbc71f 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -31,9 +31,18 @@
#include <linux/of.h>
#include <asm/unaligned.h>
+struct goodix_ts_data;
+
+struct goodix_chip_data {
+ u16 config_addr;
+ int config_len;
+ int (*check_config)(struct goodix_ts_data *, const struct firmware *);
+};
+
struct goodix_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
+ const struct goodix_chip_data *chip;
int abs_x_max;
int abs_y_max;
bool swapped_x_y;
@@ -41,7 +50,6 @@ struct goodix_ts_data {
bool inverted_y;
unsigned int max_touch_num;
unsigned int int_trigger_type;
- int cfg_len;
struct gpio_desc *gpiod_int;
struct gpio_desc *gpiod_rst;
u16 id;
@@ -69,7 +77,8 @@ struct goodix_ts_data {
#define GOODIX_CMD_SCREEN_OFF 0x05
#define GOODIX_READ_COOR_ADDR 0x814E
-#define GOODIX_REG_CONFIG_DATA 0x8047
+#define GOODIX_GT1X_REG_CONFIG_DATA 0x8050
+#define GOODIX_GT9X_REG_CONFIG_DATA 0x8047
#define GOODIX_REG_ID 0x8140
#define GOODIX_BUFFER_STATUS_READY BIT(7)
@@ -79,6 +88,35 @@ struct goodix_ts_data {
#define MAX_CONTACTS_LOC 5
#define TRIGGER_LOC 6
+static int goodix_check_cfg_8(struct goodix_ts_data *ts,
+ const struct firmware *cfg);
+static int goodix_check_cfg_16(struct goodix_ts_data *ts,
+ const struct firmware *cfg);
+
+static const struct goodix_chip_data gt1x_chip_data = {
+ .config_addr = GOODIX_GT1X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_MAX_LENGTH,
+ .check_config = goodix_check_cfg_16,
+};
+
+static const struct goodix_chip_data gt911_chip_data = {
+ .config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_911_LENGTH,
+ .check_config = goodix_check_cfg_8,
+};
+
+static const struct goodix_chip_data gt967_chip_data = {
+ .config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_967_LENGTH,
+ .check_config = goodix_check_cfg_8,
+};
+
+static const struct goodix_chip_data gt9x_chip_data = {
+ .config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_MAX_LENGTH,
+ .check_config = goodix_check_cfg_8,
+};
+
static const unsigned long goodix_irq_flags[] = {
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_EDGE_FALLING,
@@ -177,22 +215,25 @@ static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value)
return goodix_i2c_write(client, reg, &value, sizeof(value));
}
-static int goodix_get_cfg_len(u16 id)
+static const struct goodix_chip_data *goodix_get_chip_data(u16 id)
{
switch (id) {
+ case 1151:
+ return &gt1x_chip_data;
+
case 911:
case 9271:
case 9110:
case 927:
case 928:
- return GOODIX_CONFIG_911_LENGTH;
+ return &gt911_chip_data;
case 912:
case 967:
- return GOODIX_CONFIG_967_LENGTH;
+ return &gt967_chip_data;
default:
- return GOODIX_CONFIG_MAX_LENGTH;
+ return &gt9x_chip_data;
}
}
@@ -332,25 +373,12 @@ static int goodix_request_irq(struct goodix_ts_data *ts)
ts->irq_flags, ts->client->name, ts);
}
-/**
- * goodix_check_cfg - Checks if config fw is valid
- *
- * @ts: goodix_ts_data pointer
- * @cfg: firmware config data
- */
-static int goodix_check_cfg(struct goodix_ts_data *ts,
- const struct firmware *cfg)
+static int goodix_check_cfg_8(struct goodix_ts_data *ts,
+ const struct firmware *cfg)
{
- int i, raw_cfg_len;
+ int i, raw_cfg_len = cfg->size - 2;
u8 check_sum = 0;
- if (cfg->size > GOODIX_CONFIG_MAX_LENGTH) {
- dev_err(&ts->client->dev,
- "The length of the config fw is not correct");
- return -EINVAL;
- }
-
- raw_cfg_len = cfg->size - 2;
for (i = 0; i < raw_cfg_len; i++)
check_sum += cfg->data[i];
check_sum = (~check_sum) + 1;
@@ -369,6 +397,48 @@ static int goodix_check_cfg(struct goodix_ts_data *ts,
return 0;
}
+static int goodix_check_cfg_16(struct goodix_ts_data *ts,
+ const struct firmware *cfg)
+{
+ int i, raw_cfg_len = cfg->size - 3;
+ u16 check_sum = 0;
+
+ for (i = 0; i < raw_cfg_len; i += 2)
+ check_sum += get_unaligned_be16(&cfg->data[i]);
+ check_sum = (~check_sum) + 1;
+ if (check_sum != get_unaligned_be16(&cfg->data[raw_cfg_len])) {
+ dev_err(&ts->client->dev,
+ "The checksum of the config fw is not correct");
+ return -EINVAL;
+ }
+
+ if (cfg->data[raw_cfg_len + 2] != 1) {
+ dev_err(&ts->client->dev,
+ "Config fw must have Config_Fresh register set");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * goodix_check_cfg - Checks if config fw is valid
+ *
+ * @ts: goodix_ts_data pointer
+ * @cfg: firmware config data
+ */
+static int goodix_check_cfg(struct goodix_ts_data *ts,
+ const struct firmware *cfg)
+{
+ if (cfg->size > GOODIX_CONFIG_MAX_LENGTH) {
+ dev_err(&ts->client->dev,
+ "The length of the config fw is not correct");
+ return -EINVAL;
+ }
+
+ return ts->chip->check_config(ts, cfg);
+}
+
/**
* goodix_send_cfg - Write fw config to device
*
@@ -384,7 +454,7 @@ static int goodix_send_cfg(struct goodix_ts_data *ts,
if (error)
return error;
- error = goodix_i2c_write(ts->client, GOODIX_REG_CONFIG_DATA, cfg->data,
+ error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg->data,
cfg->size);
if (error) {
dev_err(&ts->client->dev, "Failed to write config data: %d",
@@ -511,8 +581,8 @@ static void goodix_read_config(struct goodix_ts_data *ts)
u8 config[GOODIX_CONFIG_MAX_LENGTH];
int error;
- error = goodix_i2c_read(ts->client, GOODIX_REG_CONFIG_DATA,
- config, ts->cfg_len);
+ error = goodix_i2c_read(ts->client, ts->chip->config_addr,
+ config, ts->chip->config_len);
if (error) {
dev_warn(&ts->client->dev,
"Error reading config (%d), using defaults\n",
@@ -592,7 +662,7 @@ static int goodix_i2c_test(struct i2c_client *client)
u8 test;
while (retry++ < 2) {
- error = goodix_i2c_read(client, GOODIX_REG_CONFIG_DATA,
+ error = goodix_i2c_read(client, GOODIX_REG_ID,
&test, 1);
if (!error)
return 0;
@@ -762,7 +832,7 @@ static int goodix_ts_probe(struct i2c_client *client,
return error;
}
- ts->cfg_len = goodix_get_cfg_len(ts->id);
+ ts->chip = goodix_get_chip_data(ts->id);
if (ts->gpiod_int && ts->gpiod_rst) {
/* update device config */
@@ -891,6 +961,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
+ { .compatible = "goodix,gt1151" },
{ .compatible = "goodix,gt911" },
{ .compatible = "goodix,gt9110" },
{ .compatible = "goodix,gt912" },
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
new file mode 100644
index 000000000000..fc080a7c2e1f
--- /dev/null
+++ b/drivers/input/touchscreen/hideep.c
@@ -0,0 +1,1120 @@
+/*
+ * Copyright (C) 2012-2017 Hideep, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foudation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#define HIDEEP_TS_NAME "HiDeep Touchscreen"
+#define HIDEEP_I2C_NAME "hideep_ts"
+
+#define HIDEEP_MT_MAX 10
+#define HIDEEP_KEY_MAX 3
+
+/* count(2) + touch data(100) + key data(6) */
+#define HIDEEP_MAX_EVENT 108UL
+
+#define HIDEEP_TOUCH_EVENT_INDEX 2
+#define HIDEEP_KEY_EVENT_INDEX 102
+
+/* Touch & key event */
+#define HIDEEP_EVENT_ADDR 0x240
+
+/* command list */
+#define HIDEEP_RESET_CMD 0x9800
+
+/* event bit */
+#define HIDEEP_MT_RELEASED BIT(4)
+#define HIDEEP_KEY_PRESSED BIT(7)
+#define HIDEEP_KEY_FIRST_PRESSED BIT(8)
+#define HIDEEP_KEY_PRESSED_MASK (HIDEEP_KEY_PRESSED | \
+ HIDEEP_KEY_FIRST_PRESSED)
+
+#define HIDEEP_KEY_IDX_MASK 0x0f
+
+/* For NVM */
+#define HIDEEP_YRAM_BASE 0x40000000
+#define HIDEEP_PERIPHERAL_BASE 0x50000000
+#define HIDEEP_ESI_BASE (HIDEEP_PERIPHERAL_BASE + 0x00000000)
+#define HIDEEP_FLASH_BASE (HIDEEP_PERIPHERAL_BASE + 0x01000000)
+#define HIDEEP_SYSCON_BASE (HIDEEP_PERIPHERAL_BASE + 0x02000000)
+
+#define HIDEEP_SYSCON_MOD_CON (HIDEEP_SYSCON_BASE + 0x0000)
+#define HIDEEP_SYSCON_SPC_CON (HIDEEP_SYSCON_BASE + 0x0004)
+#define HIDEEP_SYSCON_CLK_CON (HIDEEP_SYSCON_BASE + 0x0008)
+#define HIDEEP_SYSCON_CLK_ENA (HIDEEP_SYSCON_BASE + 0x000C)
+#define HIDEEP_SYSCON_RST_CON (HIDEEP_SYSCON_BASE + 0x0010)
+#define HIDEEP_SYSCON_WDT_CON (HIDEEP_SYSCON_BASE + 0x0014)
+#define HIDEEP_SYSCON_WDT_CNT (HIDEEP_SYSCON_BASE + 0x0018)
+#define HIDEEP_SYSCON_PWR_CON (HIDEEP_SYSCON_BASE + 0x0020)
+#define HIDEEP_SYSCON_PGM_ID (HIDEEP_SYSCON_BASE + 0x00F4)
+
+#define HIDEEP_FLASH_CON (HIDEEP_FLASH_BASE + 0x0000)
+#define HIDEEP_FLASH_STA (HIDEEP_FLASH_BASE + 0x0004)
+#define HIDEEP_FLASH_CFG (HIDEEP_FLASH_BASE + 0x0008)
+#define HIDEEP_FLASH_TIM (HIDEEP_FLASH_BASE + 0x000C)
+#define HIDEEP_FLASH_CACHE_CFG (HIDEEP_FLASH_BASE + 0x0010)
+#define HIDEEP_FLASH_PIO_SIG (HIDEEP_FLASH_BASE + 0x400000)
+
+#define HIDEEP_ESI_TX_INVALID (HIDEEP_ESI_BASE + 0x0008)
+
+#define HIDEEP_PERASE 0x00040000
+#define HIDEEP_WRONLY 0x00100000
+
+#define HIDEEP_NVM_MASK_OFS 0x0000000C
+#define HIDEEP_NVM_DEFAULT_PAGE 0
+#define HIDEEP_NVM_SFR_WPAGE 1
+#define HIDEEP_NVM_SFR_RPAGE 2
+
+#define HIDEEP_PIO_SIG 0x00400000
+#define HIDEEP_PROT_MODE 0x03400000
+
+#define HIDEEP_NVM_PAGE_SIZE 128
+
+#define HIDEEP_DWZ_INFO 0x000002C0
+
+struct hideep_event {
+ __le16 x;
+ __le16 y;
+ __le16 z;
+ u8 w;
+ u8 flag;
+ u8 type;
+ u8 index;
+};
+
+struct dwz_info {
+ __be32 code_start;
+ u8 code_crc[12];
+
+ __be32 c_code_start;
+ __be16 gen_ver;
+ __be16 c_code_len;
+
+ __be32 vr_start;
+ __be16 rsv0;
+ __be16 vr_len;
+
+ __be32 ft_start;
+ __be16 vr_version;
+ __be16 ft_len;
+
+ __be16 core_ver;
+ __be16 boot_ver;
+
+ __be16 release_ver;
+ __be16 custom_ver;
+
+ u8 factory_id;
+ u8 panel_type;
+ u8 model_name[6];
+
+ __be16 extra_option;
+ __be16 product_code;
+
+ __be16 vendor_id;
+ __be16 product_id;
+};
+
+struct pgm_packet {
+ struct {
+ u8 unused[3];
+ u8 len;
+ __be32 addr;
+ } header;
+ __be32 payload[HIDEEP_NVM_PAGE_SIZE / sizeof(__be32)];
+};
+
+#define HIDEEP_XFER_BUF_SIZE sizeof(struct pgm_packet)
+
+struct hideep_ts {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct regmap *reg;
+
+ struct touchscreen_properties prop;
+
+ struct gpio_desc *reset_gpio;
+
+ struct regulator *vcc_vdd;
+ struct regulator *vcc_vid;
+
+ struct mutex dev_mutex;
+
+ u32 tch_count;
+ u32 lpm_count;
+
+ /*
+ * Data buffer to read packet from the device (contacts and key
+ * states). We align it on double-word boundary to keep word-sized
+ * fields in contact data and double-word-sized fields in program
+ * packet aligned.
+ */
+ u8 xfer_buf[HIDEEP_XFER_BUF_SIZE] __aligned(4);
+
+ int key_num;
+ u32 key_codes[HIDEEP_KEY_MAX];
+
+ struct dwz_info dwz_info;
+
+ unsigned int fw_size;
+ u32 nvm_mask;
+};
+
+static int hideep_pgm_w_mem(struct hideep_ts *ts, u32 addr,
+ const __be32 *data, size_t count)
+{
+ struct pgm_packet *packet = (void *)ts->xfer_buf;
+ size_t len = count * sizeof(*data);
+ struct i2c_msg msg = {
+ .addr = ts->client->addr,
+ .len = len + sizeof(packet->header.len) +
+ sizeof(packet->header.addr),
+ .buf = &packet->header.len,
+ };
+ int ret;
+
+ if (len > HIDEEP_NVM_PAGE_SIZE)
+ return -EINVAL;
+
+ packet->header.len = 0x80 | (count - 1);
+ packet->header.addr = cpu_to_be32(addr);
+ memcpy(packet->payload, data, len);
+
+ ret = i2c_transfer(ts->client->adapter, &msg, 1);
+ if (ret != 1)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int hideep_pgm_r_mem(struct hideep_ts *ts, u32 addr,
+ __be32 *data, size_t count)
+{
+ struct pgm_packet *packet = (void *)ts->xfer_buf;
+ size_t len = count * sizeof(*data);
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .len = sizeof(packet->header.len) +
+ sizeof(packet->header.addr),
+ .buf = &packet->header.len,
+ },
+ {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = (u8 *)data,
+ },
+ };
+ int ret;
+
+ if (len > HIDEEP_NVM_PAGE_SIZE)
+ return -EINVAL;
+
+ packet->header.len = count - 1;
+ packet->header.addr = cpu_to_be32(addr);
+
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg))
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int hideep_pgm_r_reg(struct hideep_ts *ts, u32 addr, u32 *val)
+{
+ __be32 data;
+ int error;
+
+ error = hideep_pgm_r_mem(ts, addr, &data, 1);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "read of register %#08x failed: %d\n",
+ addr, error);
+ return error;
+ }
+
+ *val = be32_to_cpu(data);
+ return 0;
+}
+
+static int hideep_pgm_w_reg(struct hideep_ts *ts, u32 addr, u32 val)
+{
+ __be32 data = cpu_to_be32(val);
+ int error;
+
+ error = hideep_pgm_w_mem(ts, addr, &data, 1);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "write to register %#08x (%#08x) failed: %d\n",
+ addr, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+#define SW_RESET_IN_PGM(clk) \
+{ \
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CNT, (clk)); \
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CON, 0x03); \
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CON, 0x01); \
+}
+
+#define SET_FLASH_PIO(ce) \
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CON, \
+ 0x01 | ((ce) << 1))
+
+#define SET_PIO_SIG(x, y) \
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_PIO_SIG + (x), (y))
+
+#define SET_FLASH_HWCONTROL() \
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CON, 0x00)
+
+#define NVM_W_SFR(x, y) \
+{ \
+ SET_FLASH_PIO(1); \
+ SET_PIO_SIG(x, y); \
+ SET_FLASH_PIO(0); \
+}
+
+static void hideep_pgm_set(struct hideep_ts *ts)
+{
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CON, 0x00);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_SPC_CON, 0x00);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_CLK_ENA, 0xFF);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_CLK_CON, 0x01);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_PWR_CON, 0x01);
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_TIM, 0x03);
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CACHE_CFG, 0x00);
+}
+
+static int hideep_pgm_get_pattern(struct hideep_ts *ts, u32 *pattern)
+{
+ u16 p1 = 0xAF39;
+ u16 p2 = 0xDF9D;
+ int error;
+
+ error = regmap_bulk_write(ts->reg, p1, &p2, 1);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: regmap_bulk_write() failed with %d\n",
+ __func__, error);
+ return error;
+ }
+
+ usleep_range(1000, 1100);
+
+ /* flush invalid Tx load register */
+ error = hideep_pgm_w_reg(ts, HIDEEP_ESI_TX_INVALID, 0x01);
+ if (error)
+ return error;
+
+ error = hideep_pgm_r_reg(ts, HIDEEP_SYSCON_PGM_ID, pattern);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int hideep_enter_pgm(struct hideep_ts *ts)
+{
+ int retry_count = 10;
+ u32 pattern;
+ int error;
+
+ while (retry_count--) {
+ error = hideep_pgm_get_pattern(ts, &pattern);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "hideep_pgm_get_pattern failed: %d\n", error);
+ } else if (pattern != 0x39AF9DDF) {
+ dev_err(&ts->client->dev, "%s: bad pattern: %#08x\n",
+ __func__, pattern);
+ } else {
+ dev_dbg(&ts->client->dev, "found magic code");
+
+ hideep_pgm_set(ts);
+ usleep_range(1000, 1100);
+
+ return 0;
+ }
+ }
+
+ dev_err(&ts->client->dev, "failed to enter pgm mode\n");
+ SW_RESET_IN_PGM(1000);
+ return -EIO;
+}
+
+static void hideep_nvm_unlock(struct hideep_ts *ts)
+{
+ u32 unmask_code;
+
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_SFR_RPAGE);
+ hideep_pgm_r_reg(ts, 0x0000000C, &unmask_code);
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
+
+ /* make it unprotected code */
+ unmask_code &= ~HIDEEP_PROT_MODE;
+
+ /* compare unmask code */
+ if (unmask_code != ts->nvm_mask)
+ dev_warn(&ts->client->dev,
+ "read mask code different %#08x vs %#08x",
+ unmask_code, ts->nvm_mask);
+
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_SFR_WPAGE);
+ SET_FLASH_PIO(0);
+
+ NVM_W_SFR(HIDEEP_NVM_MASK_OFS, ts->nvm_mask);
+ SET_FLASH_HWCONTROL();
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
+}
+
+static int hideep_check_status(struct hideep_ts *ts)
+{
+ int time_out = 100;
+ int status;
+ int error;
+
+ while (time_out--) {
+ error = hideep_pgm_r_reg(ts, HIDEEP_FLASH_STA, &status);
+ if (!error && status)
+ return 0;
+
+ usleep_range(1000, 1100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int hideep_program_page(struct hideep_ts *ts, u32 addr,
+ const __be32 *ucode, size_t xfer_count)
+{
+ u32 val;
+ int error;
+
+ error = hideep_check_status(ts);
+ if (error)
+ return -EBUSY;
+
+ addr &= ~(HIDEEP_NVM_PAGE_SIZE - 1);
+
+ SET_FLASH_PIO(0);
+ SET_FLASH_PIO(1);
+
+ /* erase page */
+ SET_PIO_SIG(HIDEEP_PERASE | addr, 0xFFFFFFFF);
+
+ SET_FLASH_PIO(0);
+
+ error = hideep_check_status(ts);
+ if (error)
+ return -EBUSY;
+
+ /* write page */
+ SET_FLASH_PIO(1);
+
+ val = be32_to_cpu(ucode[0]);
+ SET_PIO_SIG(HIDEEP_WRONLY | addr, val);
+
+ hideep_pgm_w_mem(ts, HIDEEP_FLASH_PIO_SIG | HIDEEP_WRONLY,
+ ucode, xfer_count);
+
+ val = be32_to_cpu(ucode[xfer_count - 1]);
+ SET_PIO_SIG(124, val);
+
+ SET_FLASH_PIO(0);
+
+ usleep_range(1000, 1100);
+
+ error = hideep_check_status(ts);
+ if (error)
+ return -EBUSY;
+
+ SET_FLASH_HWCONTROL();
+
+ return 0;
+}
+
+static int hideep_program_nvm(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ struct pgm_packet *packet_r = (void *)ts->xfer_buf;
+ __be32 *current_ucode = packet_r->payload;
+ size_t xfer_len;
+ size_t xfer_count;
+ u32 addr = 0;
+ int error;
+
+ hideep_nvm_unlock(ts);
+
+ while (ucode_len > 0) {
+ xfer_len = min_t(size_t, ucode_len, HIDEEP_NVM_PAGE_SIZE);
+ xfer_count = xfer_len / sizeof(*ucode);
+
+ error = hideep_pgm_r_mem(ts, 0x00000000 + addr,
+ current_ucode, xfer_count);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: failed to read page at offset %#08x: %d\n",
+ __func__, addr, error);
+ return error;
+ }
+
+ /* See if the page needs updating */
+ if (memcmp(ucode, current_ucode, xfer_len)) {
+ error = hideep_program_page(ts, addr,
+ ucode, xfer_count);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: iwrite failure @%#08x: %d\n",
+ __func__, addr, error);
+ return error;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ ucode += xfer_count;
+ addr += xfer_len;
+ ucode_len -= xfer_len;
+ }
+
+ return 0;
+}
+
+static int hideep_verify_nvm(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ struct pgm_packet *packet_r = (void *)ts->xfer_buf;
+ __be32 *current_ucode = packet_r->payload;
+ size_t xfer_len;
+ size_t xfer_count;
+ u32 addr = 0;
+ int i;
+ int error;
+
+ while (ucode_len > 0) {
+ xfer_len = min_t(size_t, ucode_len, HIDEEP_NVM_PAGE_SIZE);
+ xfer_count = xfer_len / sizeof(*ucode);
+
+ error = hideep_pgm_r_mem(ts, 0x00000000 + addr,
+ current_ucode, xfer_count);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: failed to read page at offset %#08x: %d\n",
+ __func__, addr, error);
+ return error;
+ }
+
+ if (memcmp(ucode, current_ucode, xfer_len)) {
+ const u8 *ucode_bytes = (const u8 *)ucode;
+ const u8 *current_bytes = (const u8 *)current_ucode;
+
+ for (i = 0; i < xfer_len; i++)
+ if (ucode_bytes[i] != current_bytes[i])
+ dev_err(&ts->client->dev,
+ "%s: mismatch @%#08x: (%#02x vs %#02x)\n",
+ __func__, addr + i,
+ ucode_bytes[i],
+ current_bytes[i]);
+
+ return -EIO;
+ }
+
+ ucode += xfer_count;
+ addr += xfer_len;
+ ucode_len -= xfer_len;
+ }
+
+ return 0;
+}
+
+static int hideep_load_dwz(struct hideep_ts *ts)
+{
+ u16 product_code;
+ int error;
+
+ error = hideep_enter_pgm(ts);
+ if (error)
+ return error;
+
+ msleep(50);
+
+ error = hideep_pgm_r_mem(ts, HIDEEP_DWZ_INFO,
+ (void *)&ts->dwz_info,
+ sizeof(ts->dwz_info) / sizeof(__be32));
+
+ SW_RESET_IN_PGM(10);
+ msleep(50);
+
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to fetch DWZ data: %d\n", error);
+ return error;
+ }
+
+ product_code = be16_to_cpu(ts->dwz_info.product_code);
+
+ switch (product_code & 0xF0) {
+ case 0x40:
+ dev_dbg(&ts->client->dev, "used crimson IC");
+ ts->fw_size = 1024 * 48;
+ ts->nvm_mask = 0x00310000;
+ break;
+ case 0x60:
+ dev_dbg(&ts->client->dev, "used lime IC");
+ ts->fw_size = 1024 * 64;
+ ts->nvm_mask = 0x0030027B;
+ break;
+ default:
+ dev_err(&ts->client->dev, "product code is wrong: %#04x",
+ product_code);
+ return -EINVAL;
+ }
+
+ dev_dbg(&ts->client->dev, "firmware release version: %#04x",
+ be16_to_cpu(ts->dwz_info.release_ver));
+
+ return 0;
+}
+
+static int hideep_flash_firmware(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ int retry_cnt = 3;
+ int error;
+
+ while (retry_cnt--) {
+ error = hideep_program_nvm(ts, ucode, ucode_len);
+ if (!error) {
+ error = hideep_verify_nvm(ts, ucode, ucode_len);
+ if (!error)
+ return 0;
+ }
+ }
+
+ return error;
+}
+
+static int hideep_update_firmware(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ int error, error2;
+
+ dev_dbg(&ts->client->dev, "starting firmware update");
+
+ /* enter program mode */
+ error = hideep_enter_pgm(ts);
+ if (error)
+ return error;
+
+ error = hideep_flash_firmware(ts, ucode, ucode_len);
+ if (error)
+ dev_err(&ts->client->dev,
+ "firmware update failed: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "firmware updated successfully\n");
+
+ SW_RESET_IN_PGM(1000);
+
+ error2 = hideep_load_dwz(ts);
+ if (error2)
+ dev_err(&ts->client->dev,
+ "failed to load dwz after firmware update: %d\n",
+ error2);
+
+ return error ?: error2;
+}
+
+static int hideep_power_on(struct hideep_ts *ts)
+{
+ int error = 0;
+
+ error = regulator_enable(ts->vcc_vdd);
+ if (error)
+ dev_err(&ts->client->dev,
+ "failed to enable 'vdd' regulator: %d", error);
+
+ usleep_range(999, 1000);
+
+ error = regulator_enable(ts->vcc_vid);
+ if (error)
+ dev_err(&ts->client->dev,
+ "failed to enable 'vcc_vid' regulator: %d",
+ error);
+
+ msleep(30);
+
+ if (ts->reset_gpio) {
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ } else {
+ error = regmap_write(ts->reg, HIDEEP_RESET_CMD, 0x01);
+ if (error)
+ dev_err(&ts->client->dev,
+ "failed to send 'reset' command: %d\n", error);
+ }
+
+ msleep(50);
+
+ return error;
+}
+
+static void hideep_power_off(void *data)
+{
+ struct hideep_ts *ts = data;
+
+ if (ts->reset_gpio)
+ gpiod_set_value(ts->reset_gpio, 1);
+
+ regulator_disable(ts->vcc_vid);
+ regulator_disable(ts->vcc_vdd);
+}
+
+#define __GET_MT_TOOL_TYPE(type) ((type) == 0x01 ? MT_TOOL_FINGER : MT_TOOL_PEN)
+
+static void hideep_report_slot(struct input_dev *input,
+ const struct hideep_event *event)
+{
+ input_mt_slot(input, event->index & 0x0f);
+ input_mt_report_slot_state(input,
+ __GET_MT_TOOL_TYPE(event->type),
+ !(event->flag & HIDEEP_MT_RELEASED));
+ if (!(event->flag & HIDEEP_MT_RELEASED)) {
+ input_report_abs(input, ABS_MT_POSITION_X,
+ le16_to_cpup(&event->x));
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ le16_to_cpup(&event->y));
+ input_report_abs(input, ABS_MT_PRESSURE,
+ le16_to_cpup(&event->z));
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, event->w);
+ }
+}
+
+static void hideep_parse_and_report(struct hideep_ts *ts)
+{
+ const struct hideep_event *events =
+ (void *)&ts->xfer_buf[HIDEEP_TOUCH_EVENT_INDEX];
+ const u8 *keys = &ts->xfer_buf[HIDEEP_KEY_EVENT_INDEX];
+ int touch_count = ts->xfer_buf[0];
+ int key_count = ts->xfer_buf[1] & 0x0f;
+ int lpm_count = ts->xfer_buf[1] & 0xf0;
+ int i;
+
+ /* get touch event count */
+ dev_dbg(&ts->client->dev, "mt = %d, key = %d, lpm = %02x",
+ touch_count, key_count, lpm_count);
+
+ touch_count = min(touch_count, HIDEEP_MT_MAX);
+ for (i = 0; i < touch_count; i++)
+ hideep_report_slot(ts->input_dev, events + i);
+
+ key_count = min(key_count, HIDEEP_KEY_MAX);
+ for (i = 0; i < key_count; i++) {
+ u8 key_data = keys[i * 2];
+
+ input_report_key(ts->input_dev,
+ ts->key_codes[key_data & HIDEEP_KEY_IDX_MASK],
+ key_data & HIDEEP_KEY_PRESSED_MASK);
+ }
+
+ input_mt_sync_frame(ts->input_dev);
+ input_sync(ts->input_dev);
+}
+
+static irqreturn_t hideep_irq(int irq, void *handle)
+{
+ struct hideep_ts *ts = handle;
+ int error;
+
+ BUILD_BUG_ON(HIDEEP_MAX_EVENT > HIDEEP_XFER_BUF_SIZE);
+
+ error = regmap_bulk_read(ts->reg, HIDEEP_EVENT_ADDR,
+ ts->xfer_buf, HIDEEP_MAX_EVENT / 2);
+ if (error) {
+ dev_err(&ts->client->dev, "failed to read events: %d\n", error);
+ goto out;
+ }
+
+ hideep_parse_and_report(ts);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int hideep_get_axis_info(struct hideep_ts *ts)
+{
+ __le16 val[2];
+ int error;
+
+ error = regmap_bulk_read(ts->reg, 0x28, val, ARRAY_SIZE(val));
+ if (error)
+ return error;
+
+ ts->prop.max_x = le16_to_cpup(val);
+ ts->prop.max_y = le16_to_cpup(val + 1);
+
+ dev_dbg(&ts->client->dev, "X: %d, Y: %d",
+ ts->prop.max_x, ts->prop.max_y);
+
+ return 0;
+}
+
+static int hideep_init_input(struct hideep_ts *ts)
+{
+ struct device *dev = &ts->client->dev;
+ int i;
+ int error;
+
+ ts->input_dev = devm_input_allocate_device(dev);
+ if (!ts->input_dev) {
+ dev_err(dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ ts->input_dev->name = HIDEEP_TS_NAME;
+ ts->input_dev->id.bustype = BUS_I2C;
+ input_set_drvdata(ts->input_dev, ts);
+
+ input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_abs_params(ts->input_dev, ABS_MT_PRESSURE, 0, 65535, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
+ touchscreen_parse_properties(ts->input_dev, true, &ts->prop);
+
+ if (ts->prop.max_x == 0 || ts->prop.max_y == 0) {
+ error = hideep_get_axis_info(ts);
+ if (error)
+ return error;
+ }
+
+ error = input_mt_init_slots(ts->input_dev, HIDEEP_MT_MAX,
+ INPUT_MT_DIRECT);
+ if (error)
+ return error;
+
+ ts->key_num = device_property_read_u32_array(dev, "linux,keycodes",
+ NULL, 0);
+ if (ts->key_num > HIDEEP_KEY_MAX) {
+ dev_err(dev, "too many keys defined: %d\n",
+ ts->key_num);
+ return -EINVAL;
+ }
+
+ if (ts->key_num <= 0) {
+ dev_dbg(dev,
+ "missing or malformed 'linux,keycodes' property\n");
+ } else {
+ error = device_property_read_u32_array(dev, "linux,keycodes",
+ ts->key_codes,
+ ts->key_num);
+ if (error) {
+ dev_dbg(dev, "failed to read keymap: %d", error);
+ return error;
+ }
+
+ if (ts->key_num) {
+ ts->input_dev->keycode = ts->key_codes;
+ ts->input_dev->keycodesize = sizeof(ts->key_codes[0]);
+ ts->input_dev->keycodemax = ts->key_num;
+
+ for (i = 0; i < ts->key_num; i++)
+ input_set_capability(ts->input_dev, EV_KEY,
+ ts->key_codes[i]);
+ }
+ }
+
+ error = input_register_device(ts->input_dev);
+ if (error) {
+ dev_err(dev, "failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static ssize_t hideep_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ const struct firmware *fw_entry;
+ char *fw_name;
+ int mode;
+ int error;
+
+ error = kstrtoint(buf, 0, &mode);
+ if (error)
+ return error;
+
+ fw_name = kasprintf(GFP_KERNEL, "hideep_ts_%04x.bin",
+ be16_to_cpu(ts->dwz_info.product_id));
+ if (!fw_name)
+ return -ENOMEM;
+
+ error = request_firmware(&fw_entry, fw_name, dev);
+ if (error) {
+ dev_err(dev, "failed to request firmware %s: %d",
+ fw_name, error);
+ goto out_free_fw_name;
+ }
+
+ if (fw_entry->size % sizeof(__be32)) {
+ dev_err(dev, "invalid firmware size %zu\n", fw_entry->size);
+ error = -EINVAL;
+ goto out_release_fw;
+ }
+
+ if (fw_entry->size > ts->fw_size) {
+ dev_err(dev, "fw size (%zu) is too big (memory size %d)\n",
+ fw_entry->size, ts->fw_size);
+ error = -EFBIG;
+ goto out_release_fw;
+ }
+
+ mutex_lock(&ts->dev_mutex);
+ disable_irq(client->irq);
+
+ error = hideep_update_firmware(ts, (const __be32 *)fw_entry->data,
+ fw_entry->size);
+
+ enable_irq(client->irq);
+ mutex_unlock(&ts->dev_mutex);
+
+out_release_fw:
+ release_firmware(fw_entry);
+out_free_fw_name:
+ kfree(fw_name);
+
+ return error ?: count;
+}
+
+static ssize_t hideep_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ ssize_t len;
+
+ mutex_lock(&ts->dev_mutex);
+ len = scnprintf(buf, PAGE_SIZE, "%04x\n",
+ be16_to_cpu(ts->dwz_info.release_ver));
+ mutex_unlock(&ts->dev_mutex);
+
+ return len;
+}
+
+static ssize_t hideep_product_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ ssize_t len;
+
+ mutex_lock(&ts->dev_mutex);
+ len = scnprintf(buf, PAGE_SIZE, "%04x\n",
+ be16_to_cpu(ts->dwz_info.product_id));
+ mutex_unlock(&ts->dev_mutex);
+
+ return len;
+}
+
+static DEVICE_ATTR(version, 0664, hideep_fw_version_show, NULL);
+static DEVICE_ATTR(product_id, 0664, hideep_product_id_show, NULL);
+static DEVICE_ATTR(update_fw, 0664, NULL, hideep_update_fw);
+
+static struct attribute *hideep_ts_sysfs_entries[] = {
+ &dev_attr_version.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_update_fw.attr,
+ NULL,
+};
+
+static const struct attribute_group hideep_ts_attr_group = {
+ .attrs = hideep_ts_sysfs_entries,
+};
+
+static int __maybe_unused hideep_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+
+ disable_irq(client->irq);
+ hideep_power_off(ts);
+
+ return 0;
+}
+
+static int __maybe_unused hideep_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ int error;
+
+ error = hideep_power_on(ts);
+ if (error) {
+ dev_err(&client->dev, "power on failed");
+ return error;
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hideep_pm_ops, hideep_suspend, hideep_resume);
+
+static const struct regmap_config hideep_regmap_config = {
+ .reg_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .max_register = 0xffff,
+};
+
+static int hideep_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hideep_ts *ts;
+ int error;
+
+ /* check i2c bus */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "check i2c device error");
+ return -ENODEV;
+ }
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "missing irq: %d\n", client->irq);
+ return -EINVAL;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ mutex_init(&ts->dev_mutex);
+
+ ts->reg = devm_regmap_init_i2c(client, &hideep_regmap_config);
+ if (IS_ERR(ts->reg)) {
+ error = PTR_ERR(ts->reg);
+ dev_err(&client->dev,
+ "failed to initialize regmap: %d\n", error);
+ return error;
+ }
+
+ ts->vcc_vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(ts->vcc_vdd))
+ return PTR_ERR(ts->vcc_vdd);
+
+ ts->vcc_vid = devm_regulator_get(&client->dev, "vid");
+ if (IS_ERR(ts->vcc_vid))
+ return PTR_ERR(ts->vcc_vid);
+
+ ts->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio))
+ return PTR_ERR(ts->reset_gpio);
+
+ error = hideep_power_on(ts);
+ if (error) {
+ dev_err(&client->dev, "power on failed: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&client->dev, hideep_power_off, ts);
+ if (error)
+ return error;
+
+ error = hideep_load_dwz(ts);
+ if (error) {
+ dev_err(&client->dev, "failed to load dwz: %d", error);
+ return error;
+ }
+
+ error = hideep_init_input(ts);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, hideep_irq, IRQF_ONESHOT,
+ client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
+ }
+
+ error = devm_device_add_group(&client->dev, &hideep_ts_attr_group);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to add sysfs attributes: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id hideep_i2c_id[] = {
+ { HIDEEP_I2C_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, hideep_i2c_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hideep_acpi_id[] = {
+ { "HIDP0001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hideep_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id hideep_match_table[] = {
+ { .compatible = "hideep,hideep-ts" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hideep_match_table);
+#endif
+
+static struct i2c_driver hideep_driver = {
+ .driver = {
+ .name = HIDEEP_I2C_NAME,
+ .of_match_table = of_match_ptr(hideep_match_table),
+ .acpi_match_table = ACPI_PTR(hideep_acpi_id),
+ .pm = &hideep_pm_ops,
+ },
+ .id_table = hideep_i2c_id,
+ .probe = hideep_probe,
+};
+
+module_i2c_driver(hideep_driver);
+
+MODULE_DESCRIPTION("Driver for HiDeep Touchscreen Controller");
+MODULE_AUTHOR("anthony.kim@hideep.com");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 05108c2fea93..6892f0e28918 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1433,13 +1433,6 @@ static const struct attribute_group mip4_attr_group = {
.attrs = mip4_attrs,
};
-static void mip4_sysfs_remove(void *_data)
-{
- struct mip4_ts *ts = _data;
-
- sysfs_remove_group(&ts->client->dev.kobj, &mip4_attr_group);
-}
-
static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct mip4_ts *ts;
@@ -1535,21 +1528,13 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
return error;
}
- error = sysfs_create_group(&client->dev.kobj, &mip4_attr_group);
+ error = devm_device_add_group(&client->dev, &mip4_attr_group);
if (error) {
dev_err(&client->dev,
"Failed to create sysfs attribute group: %d\n", error);
return error;
}
- error = devm_add_action(&client->dev, mip4_sysfs_remove, ts);
- if (error) {
- mip4_sysfs_remove(ts);
- dev_err(&client->dev,
- "Failed to install sysfs remoce action: %d\n", error);
- return error;
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/mxs-lradc-ts.c b/drivers/input/touchscreen/mxs-lradc-ts.c
index 3707e927f770..c850b517854e 100644
--- a/drivers/input/touchscreen/mxs-lradc-ts.c
+++ b/drivers/input/touchscreen/mxs-lradc-ts.c
@@ -584,7 +584,7 @@ static void mxs_lradc_ts_hw_init(struct mxs_lradc_ts *ts)
static int mxs_lradc_ts_register(struct mxs_lradc_ts *ts)
{
- struct input_dev *input = ts->ts_input;
+ struct input_dev *input;
struct device *dev = ts->dev;
input = devm_input_allocate_device(dev);
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 4f1d3fd5d412..100538d64fff 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -943,13 +943,6 @@ static const struct attribute_group raydium_i2c_attribute_group = {
.attrs = raydium_i2c_attributes,
};
-static void raydium_i2c_remove_sysfs_group(void *_data)
-{
- struct raydium_data *ts = _data;
-
- sysfs_remove_group(&ts->client->dev.kobj, &raydium_i2c_attribute_group);
-}
-
static int raydium_i2c_power_on(struct raydium_data *ts)
{
int error;
@@ -1120,7 +1113,7 @@ static int raydium_i2c_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&client->dev.kobj,
+ error = devm_device_add_group(&client->dev,
&raydium_i2c_attribute_group);
if (error) {
dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
@@ -1128,15 +1121,6 @@ static int raydium_i2c_probe(struct i2c_client *client,
return error;
}
- error = devm_add_action(&client->dev,
- raydium_i2c_remove_sysfs_group, ts);
- if (error) {
- raydium_i2c_remove_sysfs_group(ts);
- dev_err(&client->dev,
- "Failed to add sysfs cleanup action: %d\n", error);
- return error;
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c
index eeaf6ff03597..bda0500c9b57 100644
--- a/drivers/input/touchscreen/rohm_bu21023.c
+++ b/drivers/input/touchscreen/rohm_bu21023.c
@@ -1103,13 +1103,6 @@ static void rohm_ts_close(struct input_dev *input_dev)
ts->initialized = false;
}
-static void rohm_ts_remove_sysfs_group(void *_dev)
-{
- struct device *dev = _dev;
-
- sysfs_remove_group(&dev->kobj, &rohm_ts_attr_group);
-}
-
static int rohm_bu21023_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1180,20 +1173,12 @@ static int rohm_bu21023_i2c_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&dev->kobj, &rohm_ts_attr_group);
+ error = devm_device_add_group(dev, &rohm_ts_attr_group);
if (error) {
dev_err(dev, "failed to create sysfs group: %d\n", error);
return error;
}
- error = devm_add_action(dev, rohm_ts_remove_sysfs_group, dev);
- if (error) {
- rohm_ts_remove_sysfs_group(dev);
- dev_err(dev, "Failed to add sysfs cleanup action: %d\n",
- error);
- return error;
- }
-
return error;
}
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
new file mode 100644
index 000000000000..26b1cb8a88ec
--- /dev/null
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * Author: Andi Shyti <andi.shyti@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Samsung S6SY761 Touchscreen device driver
+ */
+
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+/* commands */
+#define S6SY761_SENSE_ON 0x10
+#define S6SY761_SENSE_OFF 0x11
+#define S6SY761_TOUCH_FUNCTION 0x30 /* R/W for get/set */
+#define S6SY761_FIRMWARE_INTEGRITY 0x21
+#define S6SY761_PANEL_INFO 0x23
+#define S6SY761_DEVICE_ID 0x52
+#define S6SY761_BOOT_STATUS 0x55
+#define S6SY761_READ_ONE_EVENT 0x60
+#define S6SY761_READ_ALL_EVENT 0x61
+#define S6SY761_CLEAR_EVENT_STACK 0x62
+#define S6SY761_APPLICATION_MODE 0xe4
+
+/* events */
+#define S6SY761_EVENT_INFO 0x02
+#define S6SY761_EVENT_VENDOR_INFO 0x07
+
+/* info */
+#define S6SY761_INFO_BOOT_COMPLETE 0x00
+
+/* firmware status */
+#define S6SY761_FW_OK 0x80
+
+/*
+ * the functionalities are put as a reference
+ * as in the device I am using none of them
+ * works therefore not used in this driver yet.
+ */
+/* touchscreen functionalities */
+#define S6SY761_MASK_TOUCH BIT(0)
+#define S6SY761_MASK_HOVER BIT(1)
+#define S6SY761_MASK_COVER BIT(2)
+#define S6SY761_MASK_GLOVE BIT(3)
+#define S6SY761_MASK_STYLUS BIT(4)
+#define S6SY761_MASK_PALM BIT(5)
+#define S6SY761_MASK_WET BIT(6)
+#define S6SY761_MASK_PROXIMITY BIT(7)
+
+/* boot status (BS) */
+#define S6SY761_BS_BOOT_LOADER 0x10
+#define S6SY761_BS_APPLICATION 0x20
+
+/* event id */
+#define S6SY761_EVENT_ID_COORDINATE 0x00
+#define S6SY761_EVENT_ID_STATUS 0x01
+
+/* event register masks */
+#define S6SY761_MASK_TOUCH_STATE 0xc0 /* byte 0 */
+#define S6SY761_MASK_TID 0x3c
+#define S6SY761_MASK_EID 0x03
+#define S6SY761_MASK_X 0xf0 /* byte 3 */
+#define S6SY761_MASK_Y 0x0f
+#define S6SY761_MASK_Z 0x3f /* byte 6 */
+#define S6SY761_MASK_LEFT_EVENTS 0x3f /* byte 7 */
+#define S6SY761_MASK_TOUCH_TYPE 0xc0 /* MSB in byte 6, LSB in byte 7 */
+
+/* event touch state values */
+#define S6SY761_TS_NONE 0x00
+#define S6SY761_TS_PRESS 0x01
+#define S6SY761_TS_MOVE 0x02
+#define S6SY761_TS_RELEASE 0x03
+
+/* application modes */
+#define S6SY761_APP_NORMAL 0x0
+#define S6SY761_APP_LOW_POWER 0x1
+#define S6SY761_APP_TEST 0x2
+#define S6SY761_APP_FLASH 0x3
+#define S6SY761_APP_SLEEP 0x4
+
+#define S6SY761_EVENT_SIZE 8
+#define S6SY761_EVENT_COUNT 32
+#define S6SY761_DEVID_SIZE 3
+#define S6SY761_PANEL_ID_SIZE 11
+#define S6SY761_TS_STATUS_SIZE 5
+#define S6SY761_MAX_FINGERS 10
+
+#define S6SY761_DEV_NAME "s6sy761"
+
+enum s6sy761_regulators {
+ S6SY761_REGULATOR_VDD,
+ S6SY761_REGULATOR_AVDD,
+};
+
+struct s6sy761_data {
+ struct i2c_client *client;
+ struct regulator_bulk_data regulators[2];
+ struct input_dev *input;
+ struct touchscreen_properties prop;
+
+ u8 data[S6SY761_EVENT_SIZE * S6SY761_EVENT_COUNT];
+
+ u16 devid;
+ u8 tx_channel;
+};
+
+/*
+ * We can't simply use i2c_smbus_read_i2c_block_data because we
+ * need to read more than 255 bytes
+ */
+static int s6sy761_read_events(struct s6sy761_data *sdata, u16 n_events)
+{
+ u8 cmd = S6SY761_READ_ALL_EVENT;
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = sdata->client->addr,
+ .len = 1,
+ .buf = &cmd,
+ },
+ {
+ .addr = sdata->client->addr,
+ .flags = I2C_M_RD,
+ .len = (n_events * S6SY761_EVENT_SIZE),
+ .buf = sdata->data + S6SY761_EVENT_SIZE,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(sdata->client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ return ret == ARRAY_SIZE(msgs) ? 0 : -EIO;
+}
+
+static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
+ u8 *event, u8 tid)
+{
+ u8 major = event[4];
+ u8 minor = event[5];
+ u8 z = event[6] & S6SY761_MASK_Z;
+ u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
+ u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+
+ input_mt_slot(sdata->input, tid);
+
+ input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, true);
+ input_report_abs(sdata->input, ABS_MT_POSITION_X, x);
+ input_report_abs(sdata->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(sdata->input, ABS_MT_TOUCH_MAJOR, major);
+ input_report_abs(sdata->input, ABS_MT_TOUCH_MINOR, minor);
+ input_report_abs(sdata->input, ABS_MT_PRESSURE, z);
+
+ input_sync(sdata->input);
+}
+
+static void s6sy761_report_release(struct s6sy761_data *sdata,
+ u8 *event, u8 tid)
+{
+ input_mt_slot(sdata->input, tid);
+ input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false);
+
+ input_sync(sdata->input);
+}
+
+static void s6sy761_handle_coordinates(struct s6sy761_data *sdata, u8 *event)
+{
+ u8 tid;
+ u8 touch_state;
+
+ if (unlikely(!(event[0] & S6SY761_MASK_TID)))
+ return;
+
+ tid = ((event[0] & S6SY761_MASK_TID) >> 2) - 1;
+ touch_state = (event[0] & S6SY761_MASK_TOUCH_STATE) >> 6;
+
+ switch (touch_state) {
+
+ case S6SY761_TS_NONE:
+ break;
+ case S6SY761_TS_RELEASE:
+ s6sy761_report_release(sdata, event, tid);
+ break;
+ case S6SY761_TS_PRESS:
+ case S6SY761_TS_MOVE:
+ s6sy761_report_coordinates(sdata, event, tid);
+ break;
+ }
+}
+
+static void s6sy761_handle_events(struct s6sy761_data *sdata, u8 n_events)
+{
+ int i;
+
+ for (i = 0; i < n_events; i++) {
+ u8 *event = &sdata->data[i * S6SY761_EVENT_SIZE];
+ u8 event_id = event[0] & S6SY761_MASK_EID;
+
+ if (!event[0])
+ return;
+
+ switch (event_id) {
+
+ case S6SY761_EVENT_ID_COORDINATE:
+ s6sy761_handle_coordinates(sdata, event);
+ break;
+
+ case S6SY761_EVENT_ID_STATUS:
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static irqreturn_t s6sy761_irq_handler(int irq, void *dev)
+{
+ struct s6sy761_data *sdata = dev;
+ int ret;
+ u8 n_events;
+
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_READ_ONE_EVENT,
+ S6SY761_EVENT_SIZE,
+ sdata->data);
+ if (ret < 0) {
+ dev_err(&sdata->client->dev, "failed to read events\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!sdata->data[0])
+ return IRQ_HANDLED;
+
+ n_events = sdata->data[7] & S6SY761_MASK_LEFT_EVENTS;
+ if (unlikely(n_events > S6SY761_EVENT_COUNT - 1))
+ return IRQ_HANDLED;
+
+ if (n_events) {
+ ret = s6sy761_read_events(sdata, n_events);
+ if (ret < 0) {
+ dev_err(&sdata->client->dev, "failed to read events\n");
+ return IRQ_HANDLED;
+ }
+ }
+
+ s6sy761_handle_events(sdata, n_events + 1);
+
+ return IRQ_HANDLED;
+}
+
+static int s6sy761_input_open(struct input_dev *dev)
+{
+ struct s6sy761_data *sdata = input_get_drvdata(dev);
+
+ return i2c_smbus_write_byte(sdata->client, S6SY761_SENSE_ON);
+}
+
+static void s6sy761_input_close(struct input_dev *dev)
+{
+ struct s6sy761_data *sdata = input_get_drvdata(dev);
+ int ret;
+
+ ret = i2c_smbus_write_byte(sdata->client, S6SY761_SENSE_OFF);
+ if (ret)
+ dev_err(&sdata->client->dev, "failed to turn off sensing\n");
+}
+
+static ssize_t s6sy761_sysfs_devid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%#x\n", sdata->devid);
+}
+
+static DEVICE_ATTR(devid, 0444, s6sy761_sysfs_devid, NULL);
+
+static struct attribute *s6sy761_sysfs_attrs[] = {
+ &dev_attr_devid.attr,
+ NULL
+};
+
+static struct attribute_group s6sy761_attribute_group = {
+ .attrs = s6sy761_sysfs_attrs
+};
+
+static int s6sy761_power_on(struct s6sy761_data *sdata)
+{
+ u8 buffer[S6SY761_EVENT_SIZE];
+ u8 event;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+ if (ret)
+ return ret;
+
+ msleep(140);
+
+ /* double check whether the touch is functional */
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_READ_ONE_EVENT,
+ S6SY761_EVENT_SIZE,
+ buffer);
+ if (ret < 0)
+ return ret;
+
+ event = (buffer[0] >> 2) & 0xf;
+
+ if ((event != S6SY761_EVENT_INFO &&
+ event != S6SY761_EVENT_VENDOR_INFO) ||
+ buffer[1] != S6SY761_INFO_BOOT_COMPLETE) {
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_byte_data(sdata->client, S6SY761_BOOT_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* for some reasons the device might be stuck in the bootloader */
+ if (ret != S6SY761_BS_APPLICATION)
+ return -ENODEV;
+
+ /* enable touch functionality */
+ ret = i2c_smbus_write_word_data(sdata->client,
+ S6SY761_TOUCH_FUNCTION,
+ S6SY761_MASK_TOUCH);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int s6sy761_hw_init(struct s6sy761_data *sdata,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ u8 buffer[S6SY761_PANEL_ID_SIZE]; /* larger read size */
+ int ret;
+
+ ret = s6sy761_power_on(sdata);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_DEVICE_ID,
+ S6SY761_DEVID_SIZE,
+ buffer);
+ if (ret < 0)
+ return ret;
+
+ sdata->devid = get_unaligned_be16(buffer + 1);
+
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_PANEL_INFO,
+ S6SY761_PANEL_ID_SIZE,
+ buffer);
+ if (ret < 0)
+ return ret;
+
+ *max_x = get_unaligned_be16(buffer);
+ *max_y = get_unaligned_be16(buffer + 2);
+
+ /* if no tx channels defined, at least keep one */
+ sdata->tx_channel = max_t(u8, buffer[8], 1);
+
+ ret = i2c_smbus_read_byte_data(sdata->client,
+ S6SY761_FIRMWARE_INTEGRITY);
+ if (ret < 0)
+ return ret;
+ else if (ret != S6SY761_FW_OK)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void s6sy761_power_off(void *data)
+{
+ struct s6sy761_data *sdata = data;
+
+ disable_irq(sdata->client->irq);
+ regulator_bulk_disable(ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+}
+
+static int s6sy761_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct s6sy761_data *sdata;
+ unsigned int max_x, max_y;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ sdata = devm_kzalloc(&client->dev, sizeof(*sdata), GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, sdata);
+ sdata->client = client;
+
+ sdata->regulators[S6SY761_REGULATOR_VDD].supply = "vdd";
+ sdata->regulators[S6SY761_REGULATOR_AVDD].supply = "avdd";
+ err = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(&client->dev, s6sy761_power_off, sdata);
+ if (err)
+ return err;
+
+ err = s6sy761_hw_init(sdata, &max_x, &max_y);
+ if (err)
+ return err;
+
+ sdata->input = devm_input_allocate_device(&client->dev);
+ if (!sdata->input)
+ return -ENOMEM;
+
+ sdata->input->name = S6SY761_DEV_NAME;
+ sdata->input->id.bustype = BUS_I2C;
+ sdata->input->open = s6sy761_input_open;
+ sdata->input->close = s6sy761_input_close;
+
+ input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ touchscreen_parse_properties(sdata->input, true, &sdata->prop);
+
+ if (!input_abs_get_max(sdata->input, ABS_X) ||
+ !input_abs_get_max(sdata->input, ABS_Y)) {
+ dev_warn(&client->dev, "the axis have not been set\n");
+ }
+
+ err = input_mt_init_slots(sdata->input, sdata->tx_channel,
+ INPUT_MT_DIRECT);
+ if (err)
+ return err;
+
+ input_set_drvdata(sdata->input, sdata);
+
+ err = input_register_device(sdata->input);
+ if (err)
+ return err;
+
+ err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ s6sy761_irq_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "s6sy761_irq", sdata);
+ if (err)
+ return err;
+
+ err = devm_device_add_group(&client->dev, &s6sy761_attribute_group);
+ if (err)
+ return err;
+
+ pm_runtime_enable(&client->dev);
+
+ return 0;
+}
+
+static int s6sy761_remove(struct i2c_client *client)
+{
+ pm_runtime_disable(&client->dev);
+
+ return 0;
+}
+
+static int __maybe_unused s6sy761_runtime_suspend(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ return i2c_smbus_write_byte_data(sdata->client,
+ S6SY761_APPLICATION_MODE, S6SY761_APP_SLEEP);
+}
+
+static int __maybe_unused s6sy761_runtime_resume(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ return i2c_smbus_write_byte_data(sdata->client,
+ S6SY761_APPLICATION_MODE, S6SY761_APP_NORMAL);
+}
+
+static int __maybe_unused s6sy761_suspend(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ s6sy761_power_off(sdata);
+
+ return 0;
+}
+
+static int __maybe_unused s6sy761_resume(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ enable_irq(sdata->client->irq);
+
+ return s6sy761_power_on(sdata);
+}
+
+static const struct dev_pm_ops s6sy761_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(s6sy761_suspend, s6sy761_resume)
+ SET_RUNTIME_PM_OPS(s6sy761_runtime_suspend,
+ s6sy761_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id s6sy761_of_match[] = {
+ { .compatible = "samsung,s6sy761", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, s6sy761_of_match);
+#endif
+
+static const struct i2c_device_id s6sy761_id[] = {
+ { "s6sy761", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, s6sy761_id);
+
+static struct i2c_driver s6sy761_driver = {
+ .driver = {
+ .name = S6SY761_DEV_NAME,
+ .of_match_table = of_match_ptr(s6sy761_of_match),
+ .pm = &s6sy761_pm_ops,
+ },
+ .probe = s6sy761_probe,
+ .remove = s6sy761_remove,
+ .id_table = s6sy761_id,
+};
+
+module_i2c_driver(s6sy761_driver);
+
+MODULE_AUTHOR("Andi Shyti <andi.shyti@samsung.com>");
+MODULE_DESCRIPTION("Samsung S6SY761 Touch Screen");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index be5615c6bf8f..d5dfa4053bbf 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -29,7 +29,6 @@
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/platform_data/st1232_pdata.h>
#define ST1232_TS_NAME "st1232-ts"
@@ -152,10 +151,9 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
}
static int st1232_ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
- struct st1232_pdata *pdata = dev_get_platdata(&client->dev);
struct input_dev *input_dev;
int error;
@@ -180,13 +178,7 @@ static int st1232_ts_probe(struct i2c_client *client,
ts->client = client;
ts->input_dev = input_dev;
- if (pdata)
- ts->reset_gpio = pdata->reset_gpio;
- else if (client->dev.of_node)
- ts->reset_gpio = of_get_gpio(client->dev.of_node, 0);
- else
- ts->reset_gpio = -ENODEV;
-
+ ts->reset_gpio = of_get_gpio(client->dev.of_node, 0);
if (gpio_is_valid(ts->reset_gpio)) {
error = devm_gpio_request(&client->dev, ts->reset_gpio, NULL);
if (error) {
@@ -281,13 +273,11 @@ static const struct i2c_device_id st1232_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
-#ifdef CONFIG_OF
static const struct of_device_id st1232_ts_dt_ids[] = {
{ .compatible = "sitronix,st1232", },
{ }
};
MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
-#endif
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
@@ -295,7 +285,7 @@ static struct i2c_driver st1232_ts_driver = {
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
- .of_match_table = of_match_ptr(st1232_ts_dt_ids),
+ .of_match_table = st1232_ts_dt_ids,
.pm = &st1232_ts_pm_ops,
},
};
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 8c6c6178ec12..c12d01899939 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -725,8 +725,7 @@ static int stmfts_probe(struct i2c_client *client,
}
}
- err = sysfs_create_group(&sdata->client->dev.kobj,
- &stmfts_attribute_group);
+ err = devm_device_add_group(&client->dev, &stmfts_attribute_group);
if (err)
return err;
@@ -738,7 +737,6 @@ static int stmfts_probe(struct i2c_client *client,
static int stmfts_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
- sysfs_remove_group(&client->dev.kobj, &stmfts_attribute_group);
return 0;
}
diff --git a/drivers/input/touchscreen/tsc2007_iio.c b/drivers/input/touchscreen/tsc2007_iio.c
index 27b25a9fce83..e27a956f5f2b 100644
--- a/drivers/input/touchscreen/tsc2007_iio.c
+++ b/drivers/input/touchscreen/tsc2007_iio.c
@@ -104,7 +104,6 @@ static int tsc2007_read_raw(struct iio_dev *indio_dev,
static const struct iio_info tsc2007_iio_info = {
.read_raw = tsc2007_read_raw,
- .driver_module = THIS_MODULE,
};
int tsc2007_iio_configure(struct tsc2007 *ts)
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index abf27578beb1..e0fde590df8e 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -202,9 +202,9 @@ out:
return IRQ_HANDLED;
}
-static void tsc200x_penup_timer(unsigned long data)
+static void tsc200x_penup_timer(struct timer_list *t)
{
- struct tsc200x *ts = (struct tsc200x *)data;
+ struct tsc200x *ts = from_timer(ts, t, penup_timer);
unsigned long flags;
spin_lock_irqsave(&ts->lock, flags);
@@ -506,7 +506,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
mutex_init(&ts->mutex);
spin_lock_init(&ts->lock);
- setup_timer(&ts->penup_timer, tsc200x_penup_timer, (unsigned long)ts);
+ timer_setup(&ts->penup_timer, tsc200x_penup_timer, 0);
INIT_DELAYED_WORK(&ts->esd_work, tsc200x_esd_work);
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index da6004e97753..638c1d78ca3a 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -146,9 +146,9 @@ static irqreturn_t w90p910_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void w90p910_check_pen_up(unsigned long data)
+static void w90p910_check_pen_up(struct timer_list *t)
{
- struct w90p910_ts *w90p910_ts = (struct w90p910_ts *) data;
+ struct w90p910_ts *w90p910_ts = from_timer(w90p910_ts, t, timer);
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
@@ -232,8 +232,7 @@ static int w90x900ts_probe(struct platform_device *pdev)
w90p910_ts->input = input_dev;
w90p910_ts->state = TS_IDLE;
spin_lock_init(&w90p910_ts->lock);
- setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
- (unsigned long)w90p910_ts);
+ timer_setup(&w90p910_ts->timer, w90p910_check_pen_up, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index a9132603ab34..d351efd18f89 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1106,7 +1106,7 @@ static int wdt87xx_ts_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&client->dev.kobj, &wdt87xx_attr_group);
+ error = devm_device_add_group(&client->dev, &wdt87xx_attr_group);
if (error) {
dev_err(&client->dev, "create sysfs failed: %d\n", error);
return error;
@@ -1115,13 +1115,6 @@ static int wdt87xx_ts_probe(struct i2c_client *client,
return 0;
}
-static int wdt87xx_ts_remove(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &wdt87xx_attr_group);
-
- return 0;
-}
-
static int __maybe_unused wdt87xx_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1179,7 +1172,6 @@ MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
static struct i2c_driver wdt87xx_driver = {
.probe = wdt87xx_ts_probe,
- .remove = wdt87xx_ts_remove,
.id_table = wdt87xx_dev_id,
.driver = {
.name = WDT87XX_NAME,
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index c9d1c91e1887..fd714ee881f7 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -44,6 +44,7 @@
#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
+#include <linux/mfd/wm97xx.h>
#include <linux/workqueue.h>
#include <linux/wm97xx.h>
#include <linux/uaccess.h>
@@ -581,27 +582,85 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
wm->codec->acc_enable(wm, 0);
}
-static int wm97xx_probe(struct device *dev)
+static int wm97xx_register_touch(struct wm97xx *wm)
{
- struct wm97xx *wm;
- struct wm97xx_pdata *pdata = dev_get_platdata(dev);
- int ret = 0, id = 0;
+ struct wm97xx_pdata *pdata = dev_get_platdata(wm->dev);
+ int ret;
- wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL);
- if (!wm)
+ wm->input_dev = devm_input_allocate_device(wm->dev);
+ if (wm->input_dev == NULL)
return -ENOMEM;
- mutex_init(&wm->codec_mutex);
- wm->dev = dev;
- dev_set_drvdata(dev, wm);
- wm->ac97 = to_ac97_t(dev);
+ /* set up touch configuration */
+ wm->input_dev->name = "wm97xx touchscreen";
+ wm->input_dev->phys = "wm97xx";
+ wm->input_dev->open = wm97xx_ts_input_open;
+ wm->input_dev->close = wm97xx_ts_input_close;
+
+ __set_bit(EV_ABS, wm->input_dev->evbit);
+ __set_bit(EV_KEY, wm->input_dev->evbit);
+ __set_bit(BTN_TOUCH, wm->input_dev->keybit);
+
+ input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
+ abs_x[2], 0);
+ input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
+ abs_y[2], 0);
+ input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
+ abs_p[2], 0);
+
+ input_set_drvdata(wm->input_dev, wm);
+ wm->input_dev->dev.parent = wm->dev;
+
+ ret = input_register_device(wm->input_dev);
+ if (ret)
+ return ret;
+
+ /*
+ * register our extended touch device (for machine specific
+ * extensions)
+ */
+ wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
+ if (!wm->touch_dev) {
+ ret = -ENOMEM;
+ goto touch_err;
+ }
+ platform_set_drvdata(wm->touch_dev, wm);
+ wm->touch_dev->dev.parent = wm->dev;
+ wm->touch_dev->dev.platform_data = pdata;
+ ret = platform_device_add(wm->touch_dev);
+ if (ret < 0)
+ goto touch_reg_err;
+
+ return 0;
+touch_reg_err:
+ platform_device_put(wm->touch_dev);
+touch_err:
+ input_unregister_device(wm->input_dev);
+ wm->input_dev = NULL;
+
+ return ret;
+}
+
+static void wm97xx_unregister_touch(struct wm97xx *wm)
+{
+ platform_device_unregister(wm->touch_dev);
+ input_unregister_device(wm->input_dev);
+ wm->input_dev = NULL;
+}
+
+static int _wm97xx_probe(struct wm97xx *wm)
+{
+ int id = 0;
+
+ mutex_init(&wm->codec_mutex);
+ dev_set_drvdata(wm->dev, wm);
/* check that we have a supported codec */
id = wm97xx_reg_read(wm, AC97_VENDOR_ID1);
if (id != WM97XX_ID1) {
- dev_err(dev, "Device with vendor %04x is not a wm97xx\n", id);
- ret = -ENODEV;
- goto alloc_err;
+ dev_err(wm->dev,
+ "Device with vendor %04x is not a wm97xx\n", id);
+ return -ENODEV;
}
wm->id = wm97xx_reg_read(wm, AC97_VENDOR_ID2);
@@ -629,8 +688,7 @@ static int wm97xx_probe(struct device *dev)
default:
dev_err(wm->dev, "Support for wm97%02x not compiled in.\n",
wm->id & 0xff);
- ret = -ENODEV;
- goto alloc_err;
+ return -ENODEV;
}
/* set up physical characteristics */
@@ -644,79 +702,58 @@ static int wm97xx_probe(struct device *dev)
wm->gpio[4] = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
wm->gpio[5] = wm97xx_reg_read(wm, AC97_MISC_AFE);
- wm->input_dev = input_allocate_device();
- if (wm->input_dev == NULL) {
- ret = -ENOMEM;
- goto alloc_err;
- }
-
- /* set up touch configuration */
- wm->input_dev->name = "wm97xx touchscreen";
- wm->input_dev->phys = "wm97xx";
- wm->input_dev->open = wm97xx_ts_input_open;
- wm->input_dev->close = wm97xx_ts_input_close;
-
- __set_bit(EV_ABS, wm->input_dev->evbit);
- __set_bit(EV_KEY, wm->input_dev->evbit);
- __set_bit(BTN_TOUCH, wm->input_dev->keybit);
-
- input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
- abs_x[2], 0);
- input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
- abs_y[2], 0);
- input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
- abs_p[2], 0);
+ return wm97xx_register_touch(wm);
+}
- input_set_drvdata(wm->input_dev, wm);
- wm->input_dev->dev.parent = dev;
+static void wm97xx_remove_battery(struct wm97xx *wm)
+{
+ platform_device_unregister(wm->battery_dev);
+}
- ret = input_register_device(wm->input_dev);
- if (ret < 0)
- goto dev_alloc_err;
+static int wm97xx_add_battery(struct wm97xx *wm,
+ struct wm97xx_batt_pdata *pdata)
+{
+ int ret;
- /* register our battery device */
wm->battery_dev = platform_device_alloc("wm97xx-battery", -1);
- if (!wm->battery_dev) {
- ret = -ENOMEM;
- goto batt_err;
- }
+ if (!wm->battery_dev)
+ return -ENOMEM;
+
platform_set_drvdata(wm->battery_dev, wm);
- wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
+ wm->battery_dev->dev.parent = wm->dev;
+ wm->battery_dev->dev.platform_data = pdata;
ret = platform_device_add(wm->battery_dev);
- if (ret < 0)
- goto batt_reg_err;
+ if (ret)
+ platform_device_put(wm->battery_dev);
- /* register our extended touch device (for machine specific
- * extensions) */
- wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
- if (!wm->touch_dev) {
- ret = -ENOMEM;
- goto touch_err;
- }
- platform_set_drvdata(wm->touch_dev, wm);
- wm->touch_dev->dev.parent = dev;
- wm->touch_dev->dev.platform_data = pdata;
- ret = platform_device_add(wm->touch_dev);
+ return ret;
+}
+
+static int wm97xx_probe(struct device *dev)
+{
+ struct wm97xx *wm;
+ int ret;
+ struct wm97xx_pdata *pdata = dev_get_platdata(dev);
+
+ wm = devm_kzalloc(dev, sizeof(struct wm97xx), GFP_KERNEL);
+ if (!wm)
+ return -ENOMEM;
+
+ wm->dev = dev;
+ wm->ac97 = to_ac97_t(dev);
+
+ ret = _wm97xx_probe(wm);
+ if (ret)
+ return ret;
+
+ ret = wm97xx_add_battery(wm, pdata ? pdata->batt_pdata : NULL);
if (ret < 0)
- goto touch_reg_err;
+ goto batt_err;
return ret;
- touch_reg_err:
- platform_device_put(wm->touch_dev);
- touch_err:
- platform_device_del(wm->battery_dev);
- batt_reg_err:
- platform_device_put(wm->battery_dev);
- batt_err:
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
- dev_alloc_err:
- input_free_device(wm->input_dev);
- alloc_err:
- kfree(wm);
-
+batt_err:
+ wm97xx_unregister_touch(wm);
return ret;
}
@@ -724,14 +761,45 @@ static int wm97xx_remove(struct device *dev)
{
struct wm97xx *wm = dev_get_drvdata(dev);
- platform_device_unregister(wm->battery_dev);
- platform_device_unregister(wm->touch_dev);
- input_unregister_device(wm->input_dev);
- kfree(wm);
+ wm97xx_remove_battery(wm);
+ wm97xx_unregister_touch(wm);
return 0;
}
+static int wm97xx_mfd_probe(struct platform_device *pdev)
+{
+ struct wm97xx *wm;
+ struct wm97xx_platform_data *mfd_pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ wm = devm_kzalloc(&pdev->dev, sizeof(struct wm97xx), GFP_KERNEL);
+ if (!wm)
+ return -ENOMEM;
+
+ wm->dev = &pdev->dev;
+ wm->ac97 = mfd_pdata->ac97;
+
+ ret = _wm97xx_probe(wm);
+ if (ret)
+ return ret;
+
+ ret = wm97xx_add_battery(wm, mfd_pdata->batt_pdata);
+ if (ret < 0)
+ goto batt_err;
+
+ return ret;
+
+batt_err:
+ wm97xx_unregister_touch(wm);
+ return ret;
+}
+
+static int wm97xx_mfd_remove(struct platform_device *pdev)
+{
+ return wm97xx_remove(&pdev->dev);
+}
+
static int __maybe_unused wm97xx_suspend(struct device *dev)
{
struct wm97xx *wm = dev_get_drvdata(dev);
@@ -828,21 +896,41 @@ EXPORT_SYMBOL_GPL(wm97xx_unregister_mach_ops);
static struct device_driver wm97xx_driver = {
.name = "wm97xx-ts",
+#ifdef CONFIG_AC97_BUS
.bus = &ac97_bus_type,
+#endif
.owner = THIS_MODULE,
.probe = wm97xx_probe,
.remove = wm97xx_remove,
.pm = &wm97xx_pm_ops,
};
+static struct platform_driver wm97xx_mfd_driver = {
+ .driver = {
+ .name = "wm97xx-ts",
+ .pm = &wm97xx_pm_ops,
+ },
+ .probe = wm97xx_mfd_probe,
+ .remove = wm97xx_mfd_remove,
+};
+
static int __init wm97xx_init(void)
{
- return driver_register(&wm97xx_driver);
+ int ret;
+
+ ret = platform_driver_register(&wm97xx_mfd_driver);
+ if (ret)
+ return ret;
+
+ if (IS_BUILTIN(CONFIG_AC97_BUS))
+ ret = driver_register(&wm97xx_driver);
+ return ret;
}
static void __exit wm97xx_exit(void)
{
driver_unregister(&wm97xx_driver);
+ platform_driver_unregister(&wm97xx_mfd_driver);
}
module_init(wm97xx_init);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8e8874d23717..7d5eb004091d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -63,7 +63,6 @@
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
@@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev,
if (dma_mask > DMA_BIT_MASK(32))
pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(DMA_BIT_MASK(32)));
+ IOVA_PFN(DMA_BIT_MASK(32)), false);
if (!pfn)
- pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
+ pfn = alloc_iova_fast(&dma_dom->iovad, pages,
+ IOVA_PFN(dma_mask), true);
return (pfn << PAGE_SHIFT);
}
@@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (!dma_dom->domain.pt_root)
goto free_dma_dom;
- init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
- IOVA_START_PFN, DMA_32BIT_PFN);
+ init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
goto free_dma_dom;
@@ -2383,11 +2382,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size,
int dir)
{
- dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
- flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -2696,8 +2693,7 @@ static int init_reserved_iova_ranges(void)
struct pci_dev *pdev = NULL;
struct iova *val;
- init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
- IOVA_START_PFN, DMA_32BIT_PFN);
+ init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -3155,7 +3151,7 @@ static void amd_iommu_apply_resv_region(struct device *dev,
unsigned long start, end;
start = IOVA_PFN(region->start);
- end = IOVA_PFN(region->start + region->length);
+ end = IOVA_PFN(region->start + region->length - 1);
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
}
@@ -3663,11 +3659,11 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count)
+static int alloc_irq_index(u16 devid, int count, bool align)
{
struct irq_remap_table *table;
+ int index, c, alignment = 1;
unsigned long flags;
- int index, c;
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
@@ -3677,16 +3673,21 @@ static int alloc_irq_index(u16 devid, int count)
if (!table)
return -ENODEV;
+ if (align)
+ alignment = roundup_pow_of_two(count);
+
spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */
- for (c = 0, index = table->min_index;
- index < MAX_IRQS_PER_TABLE;
- ++index) {
- if (!iommu->irte_ops->is_allocated(table, index))
+ for (index = ALIGN(table->min_index, alignment), c = 0;
+ index < MAX_IRQS_PER_TABLE;) {
+ if (!iommu->irte_ops->is_allocated(table, index)) {
c += 1;
- else
- c = 0;
+ } else {
+ c = 0;
+ index = ALIGN(index + 1, alignment);
+ continue;
+ }
if (c == count) {
for (; c != 0; --c)
@@ -3695,6 +3696,8 @@ static int alloc_irq_index(u16 devid, int count)
index -= count - 1;
goto out;
}
+
+ index++;
}
index = -ENOSPC;
@@ -4099,7 +4102,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
else
ret = -ENOMEM;
} else {
- index = alloc_irq_index(devid, nr_irqs);
+ bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
+
+ index = alloc_irq_index(devid, nr_irqs, align);
}
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
@@ -4173,16 +4178,26 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
-static void irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+ struct amd_ir_data *ir_data,
+ struct irq_2_irte *irte_info,
+ struct irq_cfg *cfg);
+
+static int irq_remapping_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct irq_cfg *cfg = irqd_cfg(irq_data);
- if (iommu)
- iommu->irte_ops->activate(data->entry, irte_info->devid,
- irte_info->index);
+ if (!iommu)
+ return 0;
+
+ iommu->irte_ops->activate(data->entry, irte_info->devid,
+ irte_info->index);
+ amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
+ return 0;
}
static void irq_remapping_deactivate(struct irq_domain *domain,
@@ -4269,6 +4284,22 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
}
+
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+ struct amd_ir_data *ir_data,
+ struct irq_2_irte *irte_info,
+ struct irq_cfg *cfg)
+{
+
+ /*
+ * Atomically updates the IRTE with the new destination, vector
+ * and flushes the interrupt entry cache.
+ */
+ iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+ irte_info->index, cfg->vector,
+ cfg->dest_apicid);
+}
+
static int amd_ir_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
@@ -4286,13 +4317,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
- /*
- * Atomically updates the IRTE with the new destination, vector
- * and flushes the interrupt entry cache.
- */
- iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
- irte_info->index, cfg->vector, cfg->dest_apicid);
-
+ amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
/*
* After this point, all the interrupts will start arriving
* at the new destination. So, time to cleanup the previous
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e67ba6c40faf..f122071688fd 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -316,6 +316,7 @@
#define ARM64_TCR_TBI0_MASK 0x1UL
#define CTXDESC_CD_0_AA64 (1UL << 41)
+#define CTXDESC_CD_0_S (1UL << 44)
#define CTXDESC_CD_0_R (1UL << 45)
#define CTXDESC_CD_0_A (1UL << 46)
#define CTXDESC_CD_0_ASET_SHIFT 47
@@ -377,7 +378,16 @@
#define CMDQ_SYNC_0_CS_SHIFT 12
#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
+#define CMDQ_SYNC_0_CS_IRQ (1UL << CMDQ_SYNC_0_CS_SHIFT)
#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
+#define CMDQ_SYNC_0_MSH_SHIFT 22
+#define CMDQ_SYNC_0_MSH_ISH (3UL << CMDQ_SYNC_0_MSH_SHIFT)
+#define CMDQ_SYNC_0_MSIATTR_SHIFT 24
+#define CMDQ_SYNC_0_MSIATTR_OIWB (0xfUL << CMDQ_SYNC_0_MSIATTR_SHIFT)
+#define CMDQ_SYNC_0_MSIDATA_SHIFT 32
+#define CMDQ_SYNC_0_MSIDATA_MASK 0xffffffffUL
+#define CMDQ_SYNC_1_MSIADDR_SHIFT 0
+#define CMDQ_SYNC_1_MSIADDR_MASK 0xffffffffffffcUL
/* Event queue */
#define EVTQ_ENT_DWORDS 4
@@ -408,20 +418,12 @@
/* High-level queue structures */
#define ARM_SMMU_POLL_TIMEOUT_US 100
-#define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */
+#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */
+#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
-/* Until ACPICA headers cover IORT rev. C */
-#ifndef ACPI_IORT_SMMU_HISILICON_HI161X
-#define ACPI_IORT_SMMU_HISILICON_HI161X 0x1
-#endif
-
-#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
-#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
-#endif
-
static bool disable_bypass;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
@@ -504,6 +506,10 @@ struct arm_smmu_cmdq_ent {
} pri;
#define CMDQ_OP_CMD_SYNC 0x46
+ struct {
+ u32 msidata;
+ u64 msiaddr;
+ } sync;
};
};
@@ -604,6 +610,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
#define ARM_SMMU_FEAT_STALLS (1 << 11)
#define ARM_SMMU_FEAT_HYP (1 << 12)
+#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -616,6 +623,7 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
+ atomic_t sync_nr;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@@ -634,6 +642,8 @@ struct arm_smmu_device {
struct arm_smmu_strtab_cfg strtab_cfg;
+ u32 sync_count;
+
/* IOMMU core code handle */
struct iommu_device iommu;
};
@@ -757,26 +767,29 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
* Wait for the SMMU to consume items. If drain is true, wait until the queue
* is empty. Otherwise, wait until there is at least one free slot.
*/
-static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
+static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
{
ktime_t timeout;
- unsigned int delay = 1;
+ unsigned int delay = 1, spin_cnt = 0;
- /* Wait longer if it's queue drain */
- timeout = ktime_add_us(ktime_get(), drain ?
- ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US :
+ /* Wait longer if it's a CMD_SYNC */
+ timeout = ktime_add_us(ktime_get(), sync ?
+ ARM_SMMU_CMDQ_SYNC_TIMEOUT_US :
ARM_SMMU_POLL_TIMEOUT_US);
- while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
+ while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) {
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
if (wfe) {
wfe();
- } else {
+ } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) {
cpu_relax();
+ continue;
+ } else {
udelay(delay);
delay *= 2;
+ spin_cnt = 0;
}
}
@@ -878,7 +891,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
}
break;
case CMDQ_OP_CMD_SYNC:
- cmd[0] |= CMDQ_SYNC_0_CS_SEV;
+ if (ent->sync.msiaddr)
+ cmd[0] |= CMDQ_SYNC_0_CS_IRQ;
+ else
+ cmd[0] |= CMDQ_SYNC_0_CS_SEV;
+ cmd[0] |= CMDQ_SYNC_0_MSH_ISH | CMDQ_SYNC_0_MSIATTR_OIWB;
+ cmd[0] |= (u64)ent->sync.msidata << CMDQ_SYNC_0_MSIDATA_SHIFT;
+ cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
break;
default:
return -ENOENT;
@@ -936,13 +955,22 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
+static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
+{
+ struct arm_smmu_queue *q = &smmu->cmdq.q;
+ bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+
+ while (queue_insert_raw(q, cmd) == -ENOSPC) {
+ if (queue_poll_cons(q, false, wfe))
+ dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ }
+}
+
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{
u64 cmd[CMDQ_ENT_DWORDS];
unsigned long flags;
- bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
- struct arm_smmu_queue *q = &smmu->cmdq.q;
if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
@@ -951,14 +979,76 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
}
spin_lock_irqsave(&smmu->cmdq.lock, flags);
- while (queue_insert_raw(q, cmd) == -ENOSPC) {
- if (queue_poll_cons(q, false, wfe))
- dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
- }
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+}
- if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
- dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
+/*
+ * The difference between val and sync_idx is bounded by the maximum size of
+ * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
+ */
+static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
+{
+ ktime_t timeout;
+ u32 val;
+
+ timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US);
+ val = smp_cond_load_acquire(&smmu->sync_count,
+ (int)(VAL - sync_idx) >= 0 ||
+ !ktime_before(ktime_get(), timeout));
+
+ return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0;
+}
+
+static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
+{
+ u64 cmd[CMDQ_ENT_DWORDS];
+ unsigned long flags;
+ struct arm_smmu_cmdq_ent ent = {
+ .opcode = CMDQ_OP_CMD_SYNC,
+ .sync = {
+ .msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
+ .msiaddr = virt_to_phys(&smmu->sync_count),
+ },
+ };
+
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+
+ spin_lock_irqsave(&smmu->cmdq.lock, flags);
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+
+ return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
+}
+
+static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+{
+ u64 cmd[CMDQ_ENT_DWORDS];
+ unsigned long flags;
+ bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
+ int ret;
+
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+
+ spin_lock_irqsave(&smmu->cmdq.lock, flags);
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+
+ return ret;
+}
+
+static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+{
+ int ret;
+ bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
+ (smmu->features & ARM_SMMU_FEAT_COHERENCY);
+
+ ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu)
+ : __arm_smmu_cmdq_issue_sync(smmu);
+ if (ret)
+ dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
}
/* Context descriptor manipulation functions */
@@ -996,6 +1086,11 @@ static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
CTXDESC_CD_0_V;
+
+ /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+ if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+ val |= CTXDESC_CD_0_S;
+
cfg->cdptr[0] = cpu_to_le64(val);
val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
@@ -1029,8 +1124,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
};
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
@@ -1094,7 +1188,11 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
<< STRTAB_STE_1_SHCFG_SHIFT);
dst[2] = 0; /* Nuke the VMID */
- if (ste_live)
+ /*
+ * The SMMU can perform negative caching, so we must sync
+ * the STE regardless of whether the old value was live.
+ */
+ if (smmu)
arm_smmu_sync_ste_for_sid(smmu, sid);
return;
}
@@ -1112,7 +1210,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#endif
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
- if (smmu->features & ARM_SMMU_FEAT_STALLS)
+ if (smmu->features & ARM_SMMU_FEAT_STALLS &&
+ !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
@@ -1275,12 +1374,6 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
-{
- /* We don't actually use CMD_SYNC interrupts for anything */
- return IRQ_HANDLED;
-}
-
static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
@@ -1313,10 +1406,8 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
if (active & GERROR_MSI_EVTQ_ABT_ERR)
dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
- if (active & GERROR_MSI_CMDQ_ABT_ERR) {
+ if (active & GERROR_MSI_CMDQ_ABT_ERR)
dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
- arm_smmu_cmdq_sync_handler(irq, smmu->dev);
- }
if (active & GERROR_PRIQ_ABT_ERR)
dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
@@ -1345,17 +1436,13 @@ static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
{
arm_smmu_gerror_handler(irq, dev);
- arm_smmu_cmdq_sync_handler(irq, dev);
return IRQ_WAKE_THREAD;
}
/* IO_PGTABLE API */
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
{
- struct arm_smmu_cmdq_ent cmd;
-
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_tlb_sync(void *cookie)
@@ -1743,6 +1830,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+{
+ struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+
+ if (smmu)
+ __arm_smmu_tlb_sync(smmu);
+}
+
static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
@@ -1963,6 +2058,8 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
@@ -2147,6 +2244,7 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
+ atomic_set(&smmu->sync_nr, 0);
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@@ -2265,15 +2363,6 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
dev_warn(smmu->dev, "failed to enable evtq irq\n");
}
- irq = smmu->cmdq.q.irq;
- if (irq) {
- ret = devm_request_irq(smmu->dev, irq,
- arm_smmu_cmdq_sync_handler, 0,
- "arm-smmu-v3-cmdq-sync", smmu);
- if (ret < 0)
- dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
- }
-
irq = smmu->gerr_irq;
if (irq) {
ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
@@ -2399,8 +2488,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Invalidate any cached configuration */
cmd.opcode = CMDQ_OP_CFGI_ALL;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
/* Invalidate any stale TLB entries */
if (smmu->features & ARM_SMMU_FEAT_HYP) {
@@ -2410,8 +2498,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
@@ -2532,13 +2619,14 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
* register, but warn on mismatch.
*/
if (!!(reg & IDR0_COHACC) != coherent)
- dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
+ dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
coherent ? "true" : "false");
switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
- case IDR0_STALL_MODEL_STALL:
- /* Fallthrough */
case IDR0_STALL_MODEL_FORCE:
+ smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
+ /* Fallthrough */
+ case IDR0_STALL_MODEL_STALL:
smmu->features |= ARM_SMMU_FEAT_STALLS;
}
@@ -2665,7 +2753,7 @@ static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
break;
- case ACPI_IORT_SMMU_HISILICON_HI161X:
+ case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
break;
}
@@ -2783,10 +2871,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (irq > 0)
smmu->priq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "cmdq-sync");
- if (irq > 0)
- smmu->cmdq.q.irq = irq;
-
irq = platform_get_irq_byname(pdev, "gerror");
if (irq > 0)
smmu->gerr_irq = irq;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 3bdb799d3b4b..78d4c6b8f1ba 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -59,6 +59,7 @@
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
@@ -119,14 +120,6 @@ enum arm_smmu_implementation {
CAVIUM_SMMUV2,
};
-/* Until ACPICA headers cover IORT rev. C */
-#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
-#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
-#endif
-#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
-#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
-#endif
-
struct arm_smmu_s2cr {
struct iommu_group *group;
int count;
@@ -250,6 +243,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
+ const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
@@ -735,7 +729,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- const struct iommu_gather_ops *tlb_ops;
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
@@ -813,7 +806,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
- tlb_ops = &arm_smmu_s1_tlb_ops;
+ smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -833,9 +826,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas = min(oas, 40UL);
}
if (smmu->version == ARM_SMMU_V2)
- tlb_ops = &arm_smmu_s2_tlb_ops_v2;
+ smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
else
- tlb_ops = &arm_smmu_s2_tlb_ops_v1;
+ smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
@@ -863,7 +856,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
- .tlb = tlb_ops,
+ .tlb = smmu_domain->tlb_ops,
.iommu_dev = smmu->dev,
};
@@ -1259,6 +1252,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->tlb_ops)
+ smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+}
+
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -1562,6 +1563,8 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
@@ -1606,7 +1609,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
* Allow unmatched Stream IDs to allocate bypass
* TLB entries for reduced latency.
*/
- reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9d1cebe7f6cb..25914d36c5ac 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t(unsigned long, base_pfn,
domain->geometry.aperture_start >> order);
- end_pfn = min_t(unsigned long, end_pfn,
- domain->geometry.aperture_end >> order);
}
- /*
- * PCI devices may have larger DMA masks, but still prefer allocating
- * within a 32-bit mask to avoid DAC addressing. Such limitations don't
- * apply to the typical platform device, so for those we may as well
- * leave the cache limit at the top of their range to save an rb_last()
- * traversal on every allocation.
- */
- if (dev && dev_is_pci(dev))
- end_pfn &= DMA_BIT_MASK(32) >> order;
/* start_pfn is always nonzero for an already-initialised domain */
if (iovad->start_pfn) {
@@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
pr_warn("Incompatible range for DMA domain\n");
return -EFAULT;
}
- /*
- * If we have devices with different DMA masks, move the free
- * area cache limit down for the benefit of the smaller one.
- */
- iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn);
return 0;
}
- init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+ init_iova_domain(iovad, 1UL << order, base_pfn);
if (!dev)
return 0;
@@ -386,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
- iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
+ iova = alloc_iova_fast(iovad, iova_len,
+ DMA_BIT_MASK(32) >> shift, false);
if (!iova)
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
+ true);
return (dma_addr_t)iova << shift;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 57c920c1372d..9a7ffd13c7f0 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -801,13 +801,16 @@ int __init dmar_dev_scope_init(void)
dmar_free_pci_notify_info(info);
}
}
-
- bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
}
return dmar_dev_scope_status;
}
+void dmar_register_bus_notifier(void)
+{
+ bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
+}
+
int __init dmar_table_init(void)
{
@@ -1676,7 +1679,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
- writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
+ writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
+ iommu->reg + DMAR_FSTS_REG);
unlock_exit:
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 25c2c75f5332..79c45650f8de 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -263,6 +263,7 @@ struct exynos_iommu_domain {
struct sysmmu_drvdata {
struct device *sysmmu; /* SYSMMU controller device */
struct device *master; /* master device (owner) */
+ struct device_link *link; /* runtime PM link to master */
void __iomem *sfrbase; /* our registers */
struct clk *clk; /* SYSMMU's clock */
struct clk *aclk; /* SYSMMU's aclk clock */
@@ -1250,6 +1251,8 @@ static struct iommu_group *get_device_iommu_group(struct device *dev)
static int exynos_iommu_add_device(struct device *dev)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
struct iommu_group *group;
if (!has_sysmmu(dev))
@@ -1260,6 +1263,15 @@ static int exynos_iommu_add_device(struct device *dev)
if (IS_ERR(group))
return PTR_ERR(group);
+ list_for_each_entry(data, &owner->controllers, owner_node) {
+ /*
+ * SYSMMU will be runtime activated via device link
+ * (dependency) to its master device, so there are no
+ * direct calls to pm_runtime_get/put in this driver.
+ */
+ data->link = device_link_add(dev, data->sysmmu,
+ DL_FLAG_PM_RUNTIME);
+ }
iommu_group_put(group);
return 0;
@@ -1268,6 +1280,7 @@ static int exynos_iommu_add_device(struct device *dev)
static void exynos_iommu_remove_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
return;
@@ -1283,6 +1296,9 @@ static void exynos_iommu_remove_device(struct device *dev)
}
}
iommu_group_remove_device(dev);
+
+ list_for_each_entry(data, &owner->controllers, owner_node)
+ device_link_del(data->link);
}
static int exynos_iommu_of_xlate(struct device *dev,
@@ -1316,13 +1332,6 @@ static int exynos_iommu_of_xlate(struct device *dev,
list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev;
- /*
- * SYSMMU will be runtime activated via device link (dependency) to its
- * master device, so there are no direct calls to pm_runtime_get/put
- * in this driver.
- */
- device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
-
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6784a05dd6b2..a0babdbf7146 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -82,8 +82,6 @@
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
-#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
/* page table handling */
#define LEVEL_STRIDE (9)
@@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova;
int i;
- init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
+ init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
unsigned long sagaw;
int err;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
err = init_iova_flush_queue(&domain->iovad,
iommu_flush_iova, iova_entry_free);
@@ -2058,7 +2054,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
+ if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
@@ -3473,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev,
* from higher range
*/
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)));
+ IOVA_PFN(DMA_BIT_MASK(32)), false);
if (iova_pfn)
return iova_pfn;
}
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
+ IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
@@ -4752,6 +4749,16 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
+ up_write(&dmar_global_lock);
+
+ /*
+ * The bus notifier takes the dmar_global_lock, so lockdep will
+ * complain later when we register it under the lock.
+ */
+ dmar_register_bus_notifier();
+
+ down_write(&dmar_global_lock);
+
if (no_iommu || dmar_disabled) {
/*
* We exit the function here to ensure IOMMU's remapping and
@@ -4897,8 +4904,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f6697e55c2d4..ed1cf7c5a43b 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -292,7 +292,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
int pasid_max;
int ret;
- if (WARN_ON(!iommu))
+ if (WARN_ON(!iommu || !iommu->pasid_table))
return -EINVAL;
if (dev_is_pci(dev)) {
@@ -458,6 +458,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
+ svm->iommu->pasid_table[svm->pasid].val = 0;
+ wmb();
idr_remove(&svm->iommu->pasid_idr, svm->pasid);
if (svm->mm)
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 25842b566c39..76a193c7fcfc 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1122,6 +1122,24 @@ struct irq_remap_ops intel_irq_remap_ops = {
.get_irq_domain = intel_get_irq_domain,
};
+static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
+{
+ struct intel_ir_data *ir_data = irqd->chip_data;
+ struct irte *irte = &ir_data->irte_entry;
+ struct irq_cfg *cfg = irqd_cfg(irqd);
+
+ /*
+ * Atomically updates the IRTE with the new destination, vector
+ * and flushes the interrupt entry cache.
+ */
+ irte->vector = cfg->vector;
+ irte->dest_id = IRTE_DEST(cfg->dest_apicid);
+
+ /* Update the hardware only if the interrupt is in remapped mode. */
+ if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+ modify_irte(&ir_data->irq_2_iommu, irte);
+}
+
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
@@ -1140,27 +1158,15 @@ static int
intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct intel_ir_data *ir_data = data->chip_data;
- struct irte *irte = &ir_data->irte_entry;
- struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
int ret;
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
- /*
- * Atomically updates the IRTE with the new destination, vector
- * and flushes the interrupt entry cache.
- */
- irte->vector = cfg->vector;
- irte->dest_id = IRTE_DEST(cfg->dest_apicid);
-
- /* Update the hardware only if the interrupt is in remapped mode. */
- if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
- modify_irte(&ir_data->irq_2_iommu, irte);
-
+ intel_ir_reconfigure_irte(data, false);
/*
* After this point, all the interrupts will start arriving
* at the new destination. So, time to cleanup the previous
@@ -1390,12 +1396,11 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
-static void intel_irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+static int intel_irq_remapping_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
- struct intel_ir_data *data = irq_data->chip_data;
-
- modify_irte(&data->irq_2_iommu, &data->irte_entry);
+ intel_ir_reconfigure_irte(irq_data, true);
+ return 0;
}
static void intel_irq_remapping_deactivate(struct irq_domain *domain,
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 6961fc393f0b..2ca08dc9331c 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -660,16 +660,11 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
- size_t unmapped;
if (WARN_ON(upper_32_bits(iova)))
return 0;
- unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
- if (unmapped)
- io_pgtable_tlb_sync(&data->iop);
-
- return unmapped;
+ return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
}
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index e8018a308868..51e5c43caed1 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -609,7 +609,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
- size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
@@ -617,11 +616,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
- unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
- if (unmapped)
- io_pgtable_tlb_sync(&data->iop);
-
- return unmapped;
+ return __arm_lpae_unmap(data, iova, size, lvl, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 33edfa794ae9..466aaa8ba841 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,6 +24,9 @@
#include <linux/bitops.h>
#include <linux/cpu.h>
+/* The anchor node sits above the top of the usable address space */
+#define IOVA_ANCHOR ~0UL
+
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
@@ -37,7 +40,7 @@ static void fq_flush_timeout(unsigned long data);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
- unsigned long start_pfn, unsigned long pfn_32bit)
+ unsigned long start_pfn)
{
/*
* IOVA granularity will normally be equal to the smallest
@@ -48,12 +51,16 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
- iovad->cached32_node = NULL;
+ iovad->cached_node = &iovad->anchor.node;
+ iovad->cached32_node = &iovad->anchor.node;
iovad->granule = granule;
iovad->start_pfn = start_pfn;
- iovad->dma_32bit_pfn = pfn_32bit + 1;
+ iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->flush_cb = NULL;
iovad->fq = NULL;
+ iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
+ rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
+ rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -108,50 +115,36 @@ int init_iova_flush_queue(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(init_iova_flush_queue);
static struct rb_node *
-__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
+__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
- if ((*limit_pfn > iovad->dma_32bit_pfn) ||
- (iovad->cached32_node == NULL))
- return rb_last(&iovad->rbroot);
- else {
- struct rb_node *prev_node = rb_prev(iovad->cached32_node);
- struct iova *curr_iova =
- rb_entry(iovad->cached32_node, struct iova, node);
- *limit_pfn = curr_iova->pfn_lo;
- return prev_node;
- }
+ if (limit_pfn <= iovad->dma_32bit_pfn)
+ return iovad->cached32_node;
+
+ return iovad->cached_node;
}
static void
-__cached_rbnode_insert_update(struct iova_domain *iovad,
- unsigned long limit_pfn, struct iova *new)
+__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
{
- if (limit_pfn != iovad->dma_32bit_pfn)
- return;
- iovad->cached32_node = &new->node;
+ if (new->pfn_hi < iovad->dma_32bit_pfn)
+ iovad->cached32_node = &new->node;
+ else
+ iovad->cached_node = &new->node;
}
static void
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
- struct rb_node *curr;
- if (!iovad->cached32_node)
- return;
- curr = iovad->cached32_node;
- cached_iova = rb_entry(curr, struct iova, node);
-
- if (free->pfn_lo >= cached_iova->pfn_lo) {
- struct rb_node *node = rb_next(&free->node);
- struct iova *iova = rb_entry(node, struct iova, node);
+ cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+ if (free->pfn_hi < iovad->dma_32bit_pfn &&
+ free->pfn_lo >= cached_iova->pfn_lo)
+ iovad->cached32_node = rb_next(&free->node);
- /* only cache if it's below 32bit pfn */
- if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
- iovad->cached32_node = node;
- else
- iovad->cached32_node = NULL;
- }
+ cached_iova = rb_entry(iovad->cached_node, struct iova, node);
+ if (free->pfn_lo >= cached_iova->pfn_lo)
+ iovad->cached_node = rb_next(&free->node);
}
/* Insert the iova into domain rbtree by holding writer lock */
@@ -182,63 +175,43 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
rb_insert_color(&iova->node, root);
}
-/*
- * Computes the padding size required, to make the start address
- * naturally aligned on the power-of-two order of its size
- */
-static unsigned int
-iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
-{
- return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1);
-}
-
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long size, unsigned long limit_pfn,
struct iova *new, bool size_aligned)
{
- struct rb_node *prev, *curr = NULL;
+ struct rb_node *curr, *prev;
+ struct iova *curr_iova;
unsigned long flags;
- unsigned long saved_pfn;
- unsigned int pad_size = 0;
+ unsigned long new_pfn;
+ unsigned long align_mask = ~0UL;
+
+ if (size_aligned)
+ align_mask <<= fls_long(size - 1);
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- saved_pfn = limit_pfn;
- curr = __get_cached_rbnode(iovad, &limit_pfn);
- prev = curr;
- while (curr) {
- struct iova *curr_iova = rb_entry(curr, struct iova, node);
-
- if (limit_pfn <= curr_iova->pfn_lo) {
- goto move_left;
- } else if (limit_pfn > curr_iova->pfn_hi) {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn)
- break; /* found a free slot */
- }
- limit_pfn = curr_iova->pfn_lo;
-move_left:
+ curr = __get_cached_rbnode(iovad, limit_pfn);
+ curr_iova = rb_entry(curr, struct iova, node);
+ do {
+ limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
+ new_pfn = (limit_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
- }
+ curr_iova = rb_entry(curr, struct iova, node);
+ } while (curr && new_pfn <= curr_iova->pfn_hi);
- if (!curr) {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
- }
+ if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
}
/* pfn_lo will point to size aligned address if size_aligned is set */
- new->pfn_lo = limit_pfn - (size + pad_size);
+ new->pfn_lo = new_pfn;
new->pfn_hi = new->pfn_lo + size - 1;
/* If we have 'prev', it's a valid place to start the insertion. */
iova_insert_rbtree(&iovad->rbroot, new, prev);
- __cached_rbnode_insert_update(iovad, saved_pfn, new);
+ __cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
@@ -258,7 +231,8 @@ EXPORT_SYMBOL(alloc_iova_mem);
void free_iova_mem(struct iova *iova)
{
- kmem_cache_free(iova_cache, iova);
+ if (iova->pfn_lo != IOVA_ANCHOR)
+ kmem_cache_free(iova_cache, iova);
}
EXPORT_SYMBOL(free_iova_mem);
@@ -342,15 +316,12 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
while (node) {
struct iova *iova = rb_entry(node, struct iova, node);
- /* If pfn falls within iova's range, return iova */
- if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
- return iova;
- }
-
if (pfn < iova->pfn_lo)
node = node->rb_left;
- else if (pfn > iova->pfn_lo)
+ else if (pfn > iova->pfn_hi)
node = node->rb_right;
+ else
+ return iova; /* pfn falls within iova's range */
}
return NULL;
@@ -424,18 +395,19 @@ EXPORT_SYMBOL_GPL(free_iova);
* @iovad: - iova domain in question
* @size: - size of page frames to allocate
* @limit_pfn: - max limit address
+ * @flush_rcache: - set to flush rcache on regular allocation failure
* This function tries to satisfy an iova allocation from the rcache,
- * and falls back to regular allocation on failure.
+ * and falls back to regular allocation on failure. If regular allocation
+ * fails too and the flush_rcache flag is set then the rcache will be flushed.
*/
unsigned long
alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn)
+ unsigned long limit_pfn, bool flush_rcache)
{
- bool flushed_rcache = false;
unsigned long iova_pfn;
struct iova *new_iova;
- iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
+ iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
if (iova_pfn)
return iova_pfn;
@@ -444,11 +416,11 @@ retry:
if (!new_iova) {
unsigned int cpu;
- if (flushed_rcache)
+ if (!flush_rcache)
return 0;
/* Try replenishing IOVAs by flushing rcache. */
- flushed_rcache = true;
+ flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
goto retry;
@@ -570,7 +542,7 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data)
{
- struct iova_fq *fq = get_cpu_ptr(iovad->fq);
+ struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
unsigned long flags;
unsigned idx;
@@ -600,8 +572,6 @@ void queue_iova(struct iova_domain *iovad,
if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
-
- put_cpu_ptr(iovad->fq);
}
EXPORT_SYMBOL_GPL(queue_iova);
@@ -612,21 +582,12 @@ EXPORT_SYMBOL_GPL(queue_iova);
*/
void put_iova_domain(struct iova_domain *iovad)
{
- struct rb_node *node;
- unsigned long flags;
+ struct iova *iova, *tmp;
free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = rb_first(&iovad->rbroot);
- while (node) {
- struct iova *iova = rb_entry(node, struct iova, node);
-
- rb_erase(node, &iovad->rbroot);
+ rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova);
- node = rb_first(&iovad->rbroot);
- }
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
EXPORT_SYMBOL_GPL(put_iova_domain);
@@ -695,6 +656,10 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova;
unsigned int overlap = 0;
+ /* Don't allow nonsensical pfns */
+ if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
+ return NULL;
+
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
@@ -738,6 +703,9 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
struct iova *iova = rb_entry(node, struct iova, node);
struct iova *new_iova;
+ if (iova->pfn_lo == IOVA_ANCHOR)
+ continue;
+
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
if (!new_iova)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
@@ -855,12 +823,21 @@ static bool iova_magazine_empty(struct iova_magazine *mag)
static unsigned long iova_magazine_pop(struct iova_magazine *mag,
unsigned long limit_pfn)
{
+ int i;
+ unsigned long pfn;
+
BUG_ON(iova_magazine_empty(mag));
- if (mag->pfns[mag->size - 1] >= limit_pfn)
- return 0;
+ /* Only fall back to the rbtree if we have no suitable pfns at all */
+ for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
+ if (i == 0)
+ return 0;
- return mag->pfns[--mag->size];
+ /* Swap it to pop it */
+ pfn = mag->pfns[i];
+ mag->pfns[i] = mag->pfns[--mag->size];
+
+ return pfn;
}
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
@@ -1011,27 +988,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
return 0;
- return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
-}
-
-/*
- * Free a cpu's rcache.
- */
-static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
- struct iova_rcache *rcache)
-{
- struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
- unsigned long flags;
-
- spin_lock_irqsave(&cpu_rcache->lock, flags);
-
- iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
- iova_magazine_free(cpu_rcache->loaded);
-
- iova_magazine_free_pfns(cpu_rcache->prev, iovad);
- iova_magazine_free(cpu_rcache->prev);
-
- spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
}
/*
@@ -1040,21 +997,20 @@ static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
static void free_iova_rcaches(struct iova_domain *iovad)
{
struct iova_rcache *rcache;
- unsigned long flags;
+ struct iova_cpu_rcache *cpu_rcache;
unsigned int cpu;
int i, j;
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
- for_each_possible_cpu(cpu)
- free_cpu_iova_rcache(cpu, iovad, rcache);
- spin_lock_irqsave(&rcache->lock, flags);
+ for_each_possible_cpu(cpu) {
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ iova_magazine_free(cpu_rcache->loaded);
+ iova_magazine_free(cpu_rcache->prev);
+ }
free_percpu(rcache->cpu_rcaches);
- for (j = 0; j < rcache->depot_size; ++j) {
- iova_magazine_free_pfns(rcache->depot[j], iovad);
+ for (j = 0; j < rcache->depot_size; ++j)
iova_magazine_free(rcache->depot[j]);
- }
- spin_unlock_irqrestore(&rcache->lock, flags);
}
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 195d6e93ac71..8dce3a9de9d8 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -19,30 +19,49 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
+#else
+#define arm_iommu_create_mapping(...) NULL
+#define arm_iommu_attach_device(...) -ENODEV
+#define arm_iommu_release_mapping(...) do {} while (0)
+#define arm_iommu_detach_device(...) do {} while (0)
#endif
#include "io-pgtable.h"
-#define IPMMU_CTX_MAX 1
+#define IPMMU_CTX_MAX 8
+
+struct ipmmu_features {
+ bool use_ns_alias_offset;
+ bool has_cache_leaf_nodes;
+ unsigned int number_of_contexts;
+ bool setup_imbuscr;
+ bool twobit_imttbcr_sl0;
+};
struct ipmmu_vmsa_device {
struct device *dev;
void __iomem *base;
struct iommu_device iommu;
-
+ struct ipmmu_vmsa_device *root;
+ const struct ipmmu_features *features;
unsigned int num_utlbs;
+ unsigned int num_ctx;
spinlock_t lock; /* Protects ctx and domains[] */
DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
+ struct iommu_group *group;
struct dma_iommu_mapping *mapping;
};
@@ -57,18 +76,12 @@ struct ipmmu_vmsa_domain {
spinlock_t lock; /* Protects mappings */
};
-struct ipmmu_vmsa_iommu_priv {
- struct ipmmu_vmsa_device *mmu;
- struct device *dev;
- struct list_head list;
-};
-
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
{
return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
}
-static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
+static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
{
return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
}
@@ -133,6 +146,10 @@ static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
#define IMTTBCR_TSZ0_MASK (7 << 0)
#define IMTTBCR_TSZ0_SHIFT O
+#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
+#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
+#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
+
#define IMBUSCR 0x000c
#define IMBUSCR_DVM (1 << 2)
#define IMBUSCR_BUSSEL_SYS (0 << 0)
@@ -194,6 +211,36 @@ static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
#define IMUASID_ASID0_SHIFT 0
/* -----------------------------------------------------------------------------
+ * Root device handling
+ */
+
+static struct platform_driver ipmmu_driver;
+
+static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
+{
+ return mmu->root == mmu;
+}
+
+static int __ipmmu_check_device(struct device *dev, void *data)
+{
+ struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
+ struct ipmmu_vmsa_device **rootp = data;
+
+ if (ipmmu_is_root(mmu))
+ *rootp = mmu;
+
+ return 0;
+}
+
+static struct ipmmu_vmsa_device *ipmmu_find_root(void)
+{
+ struct ipmmu_vmsa_device *root = NULL;
+
+ return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
+ __ipmmu_check_device) == 0 ? root : NULL;
+}
+
+/* -----------------------------------------------------------------------------
* Read/Write Access
*/
@@ -208,15 +255,29 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
iowrite32(data, mmu->base + offset);
}
-static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
+static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
+ unsigned int reg)
{
- return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
+ return ipmmu_read(domain->mmu->root,
+ domain->context_id * IM_CTX_SIZE + reg);
}
-static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
- u32 data)
+static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
+ unsigned int reg, u32 data)
{
- ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_write(domain->mmu->root,
+ domain->context_id * IM_CTX_SIZE + reg, data);
+}
+
+static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
+ unsigned int reg, u32 data)
+{
+ if (domain->mmu != domain->mmu->root)
+ ipmmu_write(domain->mmu,
+ domain->context_id * IM_CTX_SIZE + reg, data);
+
+ ipmmu_write(domain->mmu->root,
+ domain->context_id * IM_CTX_SIZE + reg, data);
}
/* -----------------------------------------------------------------------------
@@ -228,7 +289,7 @@ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
{
unsigned int count = 0;
- while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
+ while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
cpu_relax();
if (++count == TLB_LOOP_TIMEOUT) {
dev_err_ratelimited(domain->mmu->dev,
@@ -243,9 +304,9 @@ static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
{
u32 reg;
- reg = ipmmu_ctx_read(domain, IMCTR);
+ reg = ipmmu_ctx_read_root(domain, IMCTR);
reg |= IMCTR_FLUSH;
- ipmmu_ctx_write(domain, IMCTR, reg);
+ ipmmu_ctx_write_all(domain, IMCTR, reg);
ipmmu_tlb_sync(domain);
}
@@ -313,11 +374,12 @@ static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
spin_lock_irqsave(&mmu->lock, flags);
- ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX);
- if (ret != IPMMU_CTX_MAX) {
+ ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
+ if (ret != mmu->num_ctx) {
mmu->domains[ret] = domain;
set_bit(ret, mmu->ctx);
- }
+ } else
+ ret = -EBUSY;
spin_unlock_irqrestore(&mmu->lock, flags);
@@ -340,6 +402,7 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
u64 ttbr;
+ u32 tmp;
int ret;
/*
@@ -364,51 +427,59 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
*/
- domain->cfg.iommu_dev = domain->mmu->dev;
+ domain->cfg.iommu_dev = domain->mmu->root->dev;
/*
* Find an unused context.
*/
- ret = ipmmu_domain_allocate_context(domain->mmu, domain);
- if (ret == IPMMU_CTX_MAX)
- return -EBUSY;
+ ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
+ if (ret < 0)
+ return ret;
domain->context_id = ret;
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
domain);
if (!domain->iop) {
- ipmmu_domain_free_context(domain->mmu, domain->context_id);
+ ipmmu_domain_free_context(domain->mmu->root,
+ domain->context_id);
return -EINVAL;
}
/* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
- ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
- ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
+ ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
+ ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
/*
* TTBCR
* We use long descriptors with inner-shareable WBWA tables and allocate
* the whole 32-bit VA space to TTBR0.
*/
- ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
- IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
- IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
+ if (domain->mmu->features->twobit_imttbcr_sl0)
+ tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
+ else
+ tmp = IMTTBCR_SL0_LVL_1;
+
+ ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
+ IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
+ IMTTBCR_IRGN0_WB_WA | tmp);
/* MAIR0 */
- ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
+ ipmmu_ctx_write_root(domain, IMMAIR0,
+ domain->cfg.arm_lpae_s1_cfg.mair[0]);
/* IMBUSCR */
- ipmmu_ctx_write(domain, IMBUSCR,
- ipmmu_ctx_read(domain, IMBUSCR) &
- ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
+ if (domain->mmu->features->setup_imbuscr)
+ ipmmu_ctx_write_root(domain, IMBUSCR,
+ ipmmu_ctx_read_root(domain, IMBUSCR) &
+ ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
/*
* IMSTR
* Clear all interrupt flags.
*/
- ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
+ ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
/*
* IMCTR
@@ -417,7 +488,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
* software management as we have no use for it. Flush the TLB as
* required when modifying the context registers.
*/
- ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
+ ipmmu_ctx_write_all(domain, IMCTR,
+ IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
return 0;
}
@@ -430,9 +502,9 @@ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
*
* TODO: Is TLB flush really needed ?
*/
- ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
+ ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
ipmmu_tlb_sync(domain);
- ipmmu_domain_free_context(domain->mmu, domain->context_id);
+ ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
}
/* -----------------------------------------------------------------------------
@@ -446,11 +518,11 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
u32 status;
u32 iova;
- status = ipmmu_ctx_read(domain, IMSTR);
+ status = ipmmu_ctx_read_root(domain, IMSTR);
if (!(status & err_mask))
return IRQ_NONE;
- iova = ipmmu_ctx_read(domain, IMEAR);
+ iova = ipmmu_ctx_read_root(domain, IMEAR);
/*
* Clear the error status flags. Unlike traditional interrupt flag
@@ -458,7 +530,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
* seems to require 0. The error address register must be read before,
* otherwise its value will be 0.
*/
- ipmmu_ctx_write(domain, IMSTR, 0);
+ ipmmu_ctx_write_root(domain, IMSTR, 0);
/* Log fatal errors. */
if (status & IMSTR_MHIT)
@@ -499,7 +571,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
/*
* Check interrupts for all active contexts.
*/
- for (i = 0; i < IPMMU_CTX_MAX; i++) {
+ for (i = 0; i < mmu->num_ctx; i++) {
if (!mmu->domains[i])
continue;
if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
@@ -528,6 +600,27 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
return &domain->io_domain;
}
+static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
+{
+ struct iommu_domain *io_domain = NULL;
+
+ switch (type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ io_domain = __ipmmu_domain_alloc(type);
+ break;
+
+ case IOMMU_DOMAIN_DMA:
+ io_domain = __ipmmu_domain_alloc(type);
+ if (io_domain && iommu_get_dma_cookie(io_domain)) {
+ kfree(io_domain);
+ io_domain = NULL;
+ }
+ break;
+ }
+
+ return io_domain;
+}
+
static void ipmmu_domain_free(struct iommu_domain *io_domain)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -536,6 +629,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
* Free the domain resources. We assume that all devices have already
* been detached.
*/
+ iommu_put_dma_cookie(io_domain);
ipmmu_domain_destroy_context(domain);
free_io_pgtable_ops(domain->iop);
kfree(domain);
@@ -544,15 +638,14 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct device *dev)
{
- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
- struct ipmmu_vmsa_device *mmu = priv->mmu;
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags;
unsigned int i;
int ret = 0;
- if (!priv || !priv->mmu) {
+ if (!mmu) {
dev_err(dev, "Cannot attach to IPMMU\n");
return -ENXIO;
}
@@ -563,6 +656,13 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
/* The domain hasn't been used yet, initialize it. */
domain->mmu = mmu;
ret = ipmmu_domain_init_context(domain);
+ if (ret < 0) {
+ dev_err(dev, "Unable to initialize IPMMU context\n");
+ domain->mmu = NULL;
+ } else {
+ dev_info(dev, "Using IPMMU context %u\n",
+ domain->context_id);
+ }
} else if (domain->mmu != mmu) {
/*
* Something is wrong, we can't attach two devices using
@@ -619,6 +719,14 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
return domain->iop->unmap(domain->iop, iova, size);
}
+static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
+{
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
+
+ if (domain->mmu)
+ ipmmu_tlb_flush_all(domain);
+}
+
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
@@ -633,62 +741,53 @@ static int ipmmu_init_platform_device(struct device *dev,
struct of_phandle_args *args)
{
struct platform_device *ipmmu_pdev;
- struct ipmmu_vmsa_iommu_priv *priv;
ipmmu_pdev = of_find_device_by_node(args->np);
if (!ipmmu_pdev)
return -ENODEV;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->mmu = platform_get_drvdata(ipmmu_pdev);
- priv->dev = dev;
- dev->iommu_fwspec->iommu_priv = priv;
+ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
return 0;
}
+static bool ipmmu_slave_whitelist(struct device *dev)
+{
+ /* By default, do not allow use of IPMMU */
+ return false;
+}
+
+static const struct soc_device_attribute soc_r8a7795[] = {
+ { .soc_id = "r8a7795", },
+ { /* sentinel */ }
+};
+
static int ipmmu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
+ /* For R-Car Gen3 use a white list to opt-in slave devices */
+ if (soc_device_match(soc_r8a7795) && !ipmmu_slave_whitelist(dev))
+ return -ENODEV;
+
iommu_fwspec_add_ids(dev, spec->args, 1);
/* Initialize once - xlate() will call multiple times */
- if (to_priv(dev))
+ if (to_ipmmu(dev))
return 0;
return ipmmu_init_platform_device(dev, spec);
}
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
-
-static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
+static int ipmmu_init_arm_mapping(struct device *dev)
{
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
- return __ipmmu_domain_alloc(type);
-}
-
-static int ipmmu_add_device(struct device *dev)
-{
- struct ipmmu_vmsa_device *mmu = NULL;
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct iommu_group *group;
int ret;
- /*
- * Only let through devices that have been verified in xlate()
- */
- if (!to_priv(dev))
- return -ENODEV;
-
/* Create a device group and add the device to it. */
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
- ret = PTR_ERR(group);
- goto error;
+ return PTR_ERR(group);
}
ret = iommu_group_add_device(group, dev);
@@ -696,8 +795,7 @@ static int ipmmu_add_device(struct device *dev)
if (ret < 0) {
dev_err(dev, "Failed to add device to IPMMU group\n");
- group = NULL;
- goto error;
+ return ret;
}
/*
@@ -709,7 +807,6 @@ static int ipmmu_add_device(struct device *dev)
* - Make the mapping size configurable ? We currently use a 2GB mapping
* at a 1GB offset to ensure that NULL VAs will fault.
*/
- mmu = to_priv(dev)->mmu;
if (!mmu->mapping) {
struct dma_iommu_mapping *mapping;
@@ -734,159 +831,73 @@ static int ipmmu_add_device(struct device *dev)
return 0;
error:
- if (mmu)
+ iommu_group_remove_device(dev);
+ if (mmu->mapping)
arm_iommu_release_mapping(mmu->mapping);
- if (!IS_ERR_OR_NULL(group))
- iommu_group_remove_device(dev);
-
return ret;
}
-static void ipmmu_remove_device(struct device *dev)
-{
- arm_iommu_detach_device(dev);
- iommu_group_remove_device(dev);
-}
-
-static const struct iommu_ops ipmmu_ops = {
- .domain_alloc = ipmmu_domain_alloc,
- .domain_free = ipmmu_domain_free,
- .attach_dev = ipmmu_attach_device,
- .detach_dev = ipmmu_detach_device,
- .map = ipmmu_map,
- .unmap = ipmmu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = ipmmu_iova_to_phys,
- .add_device = ipmmu_add_device,
- .remove_device = ipmmu_remove_device,
- .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
- .of_xlate = ipmmu_of_xlate,
-};
-
-#endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
-
-#ifdef CONFIG_IOMMU_DMA
-
-static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
-static LIST_HEAD(ipmmu_slave_devices);
-
-static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
-{
- struct iommu_domain *io_domain = NULL;
-
- switch (type) {
- case IOMMU_DOMAIN_UNMANAGED:
- io_domain = __ipmmu_domain_alloc(type);
- break;
-
- case IOMMU_DOMAIN_DMA:
- io_domain = __ipmmu_domain_alloc(type);
- if (io_domain)
- iommu_get_dma_cookie(io_domain);
- break;
- }
-
- return io_domain;
-}
-
-static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
-{
- switch (io_domain->type) {
- case IOMMU_DOMAIN_DMA:
- iommu_put_dma_cookie(io_domain);
- /* fall-through */
- default:
- ipmmu_domain_free(io_domain);
- break;
- }
-}
-
-static int ipmmu_add_device_dma(struct device *dev)
+static int ipmmu_add_device(struct device *dev)
{
struct iommu_group *group;
/*
* Only let through devices that have been verified in xlate()
*/
- if (!to_priv(dev))
+ if (!to_ipmmu(dev))
return -ENODEV;
+ if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
+ return ipmmu_init_arm_mapping(dev);
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
- spin_lock(&ipmmu_slave_devices_lock);
- list_add(&to_priv(dev)->list, &ipmmu_slave_devices);
- spin_unlock(&ipmmu_slave_devices_lock);
+ iommu_group_put(group);
return 0;
}
-static void ipmmu_remove_device_dma(struct device *dev)
+static void ipmmu_remove_device(struct device *dev)
{
- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
-
- spin_lock(&ipmmu_slave_devices_lock);
- list_del(&priv->list);
- spin_unlock(&ipmmu_slave_devices_lock);
-
+ arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
}
-static struct device *ipmmu_find_sibling_device(struct device *dev)
-{
- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
- struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL;
- bool found = false;
-
- spin_lock(&ipmmu_slave_devices_lock);
-
- list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) {
- if (priv == sibling_priv)
- continue;
- if (sibling_priv->mmu == priv->mmu) {
- found = true;
- break;
- }
- }
-
- spin_unlock(&ipmmu_slave_devices_lock);
-
- return found ? sibling_priv->dev : NULL;
-}
-
-static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
+static struct iommu_group *ipmmu_find_group(struct device *dev)
{
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct iommu_group *group;
- struct device *sibling;
- sibling = ipmmu_find_sibling_device(dev);
- if (sibling)
- group = iommu_group_get(sibling);
- if (!sibling || IS_ERR(group))
- group = generic_device_group(dev);
+ if (mmu->group)
+ return iommu_group_ref_get(mmu->group);
+
+ group = iommu_group_alloc();
+ if (!IS_ERR(group))
+ mmu->group = group;
return group;
}
static const struct iommu_ops ipmmu_ops = {
- .domain_alloc = ipmmu_domain_alloc_dma,
- .domain_free = ipmmu_domain_free_dma,
+ .domain_alloc = ipmmu_domain_alloc,
+ .domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
+ .flush_iotlb_all = ipmmu_iotlb_sync,
+ .iotlb_sync = ipmmu_iotlb_sync,
.map_sg = default_iommu_map_sg,
.iova_to_phys = ipmmu_iova_to_phys,
- .add_device = ipmmu_add_device_dma,
- .remove_device = ipmmu_remove_device_dma,
- .device_group = ipmmu_find_group_dma,
+ .add_device = ipmmu_add_device,
+ .remove_device = ipmmu_remove_device,
+ .device_group = ipmmu_find_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
};
-#endif /* CONFIG_IOMMU_DMA */
-
/* -----------------------------------------------------------------------------
* Probe/remove and init
*/
@@ -896,10 +907,40 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
unsigned int i;
/* Disable all contexts. */
- for (i = 0; i < 4; ++i)
+ for (i = 0; i < mmu->num_ctx; ++i)
ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
}
+static const struct ipmmu_features ipmmu_features_default = {
+ .use_ns_alias_offset = true,
+ .has_cache_leaf_nodes = false,
+ .number_of_contexts = 1, /* software only tested with one context */
+ .setup_imbuscr = true,
+ .twobit_imttbcr_sl0 = false,
+};
+
+static const struct ipmmu_features ipmmu_features_r8a7795 = {
+ .use_ns_alias_offset = false,
+ .has_cache_leaf_nodes = true,
+ .number_of_contexts = 8,
+ .setup_imbuscr = false,
+ .twobit_imttbcr_sl0 = true,
+};
+
+static const struct of_device_id ipmmu_of_ids[] = {
+ {
+ .compatible = "renesas,ipmmu-vmsa",
+ .data = &ipmmu_features_default,
+ }, {
+ .compatible = "renesas,ipmmu-r8a7795",
+ .data = &ipmmu_features_r8a7795,
+ }, {
+ /* Terminator */
+ },
+};
+
+MODULE_DEVICE_TABLE(of, ipmmu_of_ids);
+
static int ipmmu_probe(struct platform_device *pdev)
{
struct ipmmu_vmsa_device *mmu;
@@ -917,6 +958,8 @@ static int ipmmu_probe(struct platform_device *pdev)
mmu->num_utlbs = 32;
spin_lock_init(&mmu->lock);
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
+ mmu->features = of_device_get_match_data(&pdev->dev);
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
/* Map I/O memory and request IRQ. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -936,34 +979,71 @@ static int ipmmu_probe(struct platform_device *pdev)
* Offset the registers base unconditionally to point to the non-secure
* alias space for now.
*/
- mmu->base += IM_NS_ALIAS_OFFSET;
+ if (mmu->features->use_ns_alias_offset)
+ mmu->base += IM_NS_ALIAS_OFFSET;
+
+ mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
+ mmu->features->number_of_contexts);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ found\n");
- return irq;
- }
- ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
- dev_name(&pdev->dev), mmu);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
- return ret;
- }
+ /*
+ * Determine if this IPMMU instance is a root device by checking for
+ * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
+ */
+ if (!mmu->features->has_cache_leaf_nodes ||
+ !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
+ mmu->root = mmu;
+ else
+ mmu->root = ipmmu_find_root();
- ipmmu_device_reset(mmu);
+ /*
+ * Wait until the root device has been registered for sure.
+ */
+ if (!mmu->root)
+ return -EPROBE_DEFER;
+
+ /* Root devices have mandatory IRQs */
+ if (ipmmu_is_root(mmu)) {
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ found\n");
+ return irq;
+ }
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
- dev_name(&pdev->dev));
- if (ret)
- return ret;
+ ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
+ dev_name(&pdev->dev), mmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
+ return ret;
+ }
- iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
- iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode);
+ ipmmu_device_reset(mmu);
+ }
- ret = iommu_device_register(&mmu->iommu);
- if (ret)
- return ret;
+ /*
+ * Register the IPMMU to the IOMMU subsystem in the following cases:
+ * - R-Car Gen2 IPMMU (all devices registered)
+ * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
+ */
+ if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
+ iommu_device_set_fwnode(&mmu->iommu,
+ &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&mmu->iommu);
+ if (ret)
+ return ret;
+
+#if defined(CONFIG_IOMMU_DMA)
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &ipmmu_ops);
+#endif
+ }
/*
* We can't create the ARM mapping here as it requires the bus to have
@@ -983,20 +1063,13 @@ static int ipmmu_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&mmu->iommu);
iommu_device_unregister(&mmu->iommu);
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
arm_iommu_release_mapping(mmu->mapping);
-#endif
ipmmu_device_reset(mmu);
return 0;
}
-static const struct of_device_id ipmmu_of_ids[] = {
- { .compatible = "renesas,ipmmu-vmsa", },
- { }
-};
-
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
@@ -1008,15 +1081,22 @@ static struct platform_driver ipmmu_driver = {
static int __init ipmmu_init(void)
{
+ static bool setup_done;
int ret;
+ if (setup_done)
+ return 0;
+
ret = platform_driver_register(&ipmmu_driver);
if (ret < 0)
return ret;
+#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &ipmmu_ops);
+#endif
+ setup_done = true;
return 0;
}
@@ -1028,6 +1108,19 @@ static void __exit ipmmu_exit(void)
subsys_initcall(ipmmu_init);
module_exit(ipmmu_exit);
+#ifdef CONFIG_IOMMU_DMA
+static int __init ipmmu_vmsa_iommu_of_setup(struct device_node *np)
+{
+ ipmmu_init();
+ return 0;
+}
+
+IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa",
+ ipmmu_vmsa_iommu_of_setup);
+IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795",
+ ipmmu_vmsa_iommu_of_setup);
+#endif
+
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 16d33ac19db0..f227d73e7bf6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -392,6 +392,11 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
return unmapsz;
}
+static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
+{
+ mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+}
+
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -491,6 +496,8 @@ static struct iommu_ops mtk_iommu_ops = {
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = mtk_iommu_iotlb_sync,
+ .iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index bc1efbfb9ddf..542930cd183d 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
.remove = mtk_iommu_remove,
.driver = {
- .name = "mtk-iommu",
+ .name = "mtk-iommu-v1",
.of_match_table = mtk_iommu_of_ids,
.pm = &mtk_iommu_pm_ops,
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index bd67e1b2c64e..e135ab830ebf 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -2,6 +2,7 @@
* omap iommu: tlb and pagetable primitives
*
* Copyright (C) 2008-2010 Nokia Corporation
+ * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
@@ -71,13 +72,23 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
**/
void omap_iommu_save_ctx(struct device *dev)
{
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- u32 *p = obj->ctx;
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *obj;
+ u32 *p;
int i;
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- p[i] = iommu_read_reg(obj, i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ if (!arch_data)
+ return;
+
+ while (arch_data->iommu_dev) {
+ obj = arch_data->iommu_dev;
+ p = obj->ctx;
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ p[i] = iommu_read_reg(obj, i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
+ p[i]);
+ }
+ arch_data++;
}
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
@@ -88,13 +99,23 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
**/
void omap_iommu_restore_ctx(struct device *dev)
{
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- u32 *p = obj->ctx;
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *obj;
+ u32 *p;
int i;
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- iommu_write_reg(obj, p[i], i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ if (!arch_data)
+ return;
+
+ while (arch_data->iommu_dev) {
+ obj = arch_data->iommu_dev;
+ p = obj->ctx;
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ iommu_write_reg(obj, p[i], i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
+ p[i]);
+ }
+ arch_data++;
}
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
@@ -805,7 +826,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
struct iommu_domain *domain = obj->domain;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- if (!omap_domain->iommu_dev)
+ if (!omap_domain->dev)
return IRQ_NONE;
errs = iommu_report_fault(obj, &da);
@@ -893,6 +914,24 @@ static void omap_iommu_detach(struct omap_iommu *obj)
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
+static bool omap_iommu_can_register(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
+ return true;
+
+ /*
+ * restrict IOMMU core registration only for processor-port MDMA MMUs
+ * on DRA7 DSPs
+ */
+ if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
+ (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
+ return true;
+
+ return false;
+}
+
static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
struct omap_iommu *obj)
{
@@ -984,19 +1023,22 @@ static int omap_iommu_probe(struct platform_device *pdev)
return err;
platform_set_drvdata(pdev, obj);
- obj->group = iommu_group_alloc();
- if (IS_ERR(obj->group))
- return PTR_ERR(obj->group);
+ if (omap_iommu_can_register(pdev)) {
+ obj->group = iommu_group_alloc();
+ if (IS_ERR(obj->group))
+ return PTR_ERR(obj->group);
- err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name);
- if (err)
- goto out_group;
+ err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
+ obj->name);
+ if (err)
+ goto out_group;
- iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
+ iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
- err = iommu_device_register(&obj->iommu);
- if (err)
- goto out_sysfs;
+ err = iommu_device_register(&obj->iommu);
+ if (err)
+ goto out_sysfs;
+ }
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
@@ -1018,11 +1060,13 @@ static int omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
- iommu_group_put(obj->group);
- obj->group = NULL;
+ if (obj->group) {
+ iommu_group_put(obj->group);
+ obj->group = NULL;
- iommu_device_sysfs_remove(&obj->iommu);
- iommu_device_unregister(&obj->iommu);
+ iommu_device_sysfs_remove(&obj->iommu);
+ iommu_device_unregister(&obj->iommu);
+ }
omap_iommu_debugfs_remove(obj);
@@ -1068,11 +1112,13 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu = omap_domain->iommu_dev;
- struct device *dev = oiommu->dev;
+ struct device *dev = omap_domain->dev;
+ struct omap_iommu_device *iommu;
+ struct omap_iommu *oiommu;
struct iotlb_entry e;
int omap_pgsz;
- u32 ret;
+ u32 ret = -EINVAL;
+ int i;
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
@@ -1084,9 +1130,24 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
iotlb_init_entry(&e, da, pa, omap_pgsz);
- ret = omap_iopgtable_store_entry(oiommu, &e);
- if (ret)
- dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
+ iommu = omap_domain->iommus;
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+ oiommu = iommu->iommu_dev;
+ ret = omap_iopgtable_store_entry(oiommu, &e);
+ if (ret) {
+ dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
+ ret);
+ break;
+ }
+ }
+
+ if (ret) {
+ while (i--) {
+ iommu--;
+ oiommu = iommu->iommu_dev;
+ iopgtable_clear_entry(oiommu, da);
+ }
+ }
return ret;
}
@@ -1095,12 +1156,90 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu = omap_domain->iommu_dev;
- struct device *dev = oiommu->dev;
+ struct device *dev = omap_domain->dev;
+ struct omap_iommu_device *iommu;
+ struct omap_iommu *oiommu;
+ bool error = false;
+ size_t bytes = 0;
+ int i;
dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
- return iopgtable_clear_entry(oiommu, da);
+ iommu = omap_domain->iommus;
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+ oiommu = iommu->iommu_dev;
+ bytes = iopgtable_clear_entry(oiommu, da);
+ if (!bytes)
+ error = true;
+ }
+
+ /*
+ * simplify return - we are only checking if any of the iommus
+ * reported an error, but not if all of them are unmapping the
+ * same number of entries. This should not occur due to the
+ * mirror programming.
+ */
+ return error ? 0 : bytes;
+}
+
+static int omap_iommu_count(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ int count = 0;
+
+ while (arch_data->iommu_dev) {
+ count++;
+ arch_data++;
+ }
+
+ return count;
+}
+
+/* caller should call cleanup if this function fails */
+static int omap_iommu_attach_init(struct device *dev,
+ struct omap_iommu_domain *odomain)
+{
+ struct omap_iommu_device *iommu;
+ int i;
+
+ odomain->num_iommus = omap_iommu_count(dev);
+ if (!odomain->num_iommus)
+ return -EINVAL;
+
+ odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
+ GFP_ATOMIC);
+ if (!odomain->iommus)
+ return -ENOMEM;
+
+ iommu = odomain->iommus;
+ for (i = 0; i < odomain->num_iommus; i++, iommu++) {
+ iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
+ if (!iommu->pgtable)
+ return -ENOMEM;
+
+ /*
+ * should never fail, but please keep this around to ensure
+ * we keep the hardware happy
+ */
+ if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
+ IOPGD_TABLE_SIZE)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
+{
+ int i;
+ struct omap_iommu_device *iommu = odomain->iommus;
+
+ for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
+ kfree(iommu->pgtable);
+
+ kfree(odomain->iommus);
+ odomain->num_iommus = 0;
+ odomain->iommus = NULL;
}
static int
@@ -1108,8 +1247,10 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
int ret = 0;
+ int i;
if (!arch_data || !arch_data->iommu_dev) {
dev_err(dev, "device doesn't have an associated iommu\n");
@@ -1118,26 +1259,49 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_lock(&omap_domain->lock);
- /* only a single device is supported per domain for now */
- if (omap_domain->iommu_dev) {
+ /* only a single client device can be attached to a domain */
+ if (omap_domain->dev) {
dev_err(dev, "iommu domain is already attached\n");
ret = -EBUSY;
goto out;
}
- oiommu = arch_data->iommu_dev;
-
- /* get a handle to and enable the omap iommu */
- ret = omap_iommu_attach(oiommu, omap_domain->pgtable);
+ ret = omap_iommu_attach_init(dev, omap_domain);
if (ret) {
- dev_err(dev, "can't get omap iommu: %d\n", ret);
- goto out;
+ dev_err(dev, "failed to allocate required iommu data %d\n",
+ ret);
+ goto init_fail;
+ }
+
+ iommu = omap_domain->iommus;
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
+ /* configure and enable the omap iommu */
+ oiommu = arch_data->iommu_dev;
+ ret = omap_iommu_attach(oiommu, iommu->pgtable);
+ if (ret) {
+ dev_err(dev, "can't get omap iommu: %d\n", ret);
+ goto attach_fail;
+ }
+
+ oiommu->domain = domain;
+ iommu->iommu_dev = oiommu;
}
- omap_domain->iommu_dev = oiommu;
omap_domain->dev = dev;
- oiommu->domain = domain;
+ goto out;
+
+attach_fail:
+ while (i--) {
+ iommu--;
+ arch_data--;
+ oiommu = iommu->iommu_dev;
+ omap_iommu_detach(oiommu);
+ iommu->iommu_dev = NULL;
+ oiommu->domain = NULL;
+ }
+init_fail:
+ omap_iommu_detach_fini(omap_domain);
out:
spin_unlock(&omap_domain->lock);
return ret;
@@ -1146,21 +1310,40 @@ out:
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
- struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_device *iommu = omap_domain->iommus;
+ struct omap_iommu *oiommu;
+ int i;
+
+ if (!omap_domain->dev) {
+ dev_err(dev, "domain has no attached device\n");
+ return;
+ }
/* only a single device is supported per domain for now */
- if (omap_domain->iommu_dev != oiommu) {
- dev_err(dev, "invalid iommu device\n");
+ if (omap_domain->dev != dev) {
+ dev_err(dev, "invalid attached device\n");
return;
}
- iopgtable_clear_entry_all(oiommu);
+ /*
+ * cleanup in the reverse order of attachment - this addresses
+ * any h/w dependencies between multiple instances, if any
+ */
+ iommu += (omap_domain->num_iommus - 1);
+ arch_data += (omap_domain->num_iommus - 1);
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
+ oiommu = iommu->iommu_dev;
+ iopgtable_clear_entry_all(oiommu);
+
+ omap_iommu_detach(oiommu);
+ iommu->iommu_dev = NULL;
+ oiommu->domain = NULL;
+ }
- omap_iommu_detach(oiommu);
+ omap_iommu_detach_fini(omap_domain);
- omap_domain->iommu_dev = NULL;
omap_domain->dev = NULL;
- oiommu->domain = NULL;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1182,18 +1365,7 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain)
- goto out;
-
- omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
- if (!omap_domain->pgtable)
- goto fail_nomem;
-
- /*
- * should never fail, but please keep this around to ensure
- * we keep the hardware happy
- */
- if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
- goto fail_align;
+ return NULL;
spin_lock_init(&omap_domain->lock);
@@ -1202,13 +1374,6 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
omap_domain->domain.geometry.force_aperture = true;
return &omap_domain->domain;
-
-fail_align:
- kfree(omap_domain->pgtable);
-fail_nomem:
- kfree(omap_domain);
-out:
- return NULL;
}
static void omap_iommu_domain_free(struct iommu_domain *domain)
@@ -1219,10 +1384,9 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
* An iommu device is still attached
* (currently, only one device can be attached) ?
*/
- if (omap_domain->iommu_dev)
+ if (omap_domain->dev)
_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
- kfree(omap_domain->pgtable);
kfree(omap_domain);
}
@@ -1230,11 +1394,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu = omap_domain->iommu_dev;
+ struct omap_iommu_device *iommu = omap_domain->iommus;
+ struct omap_iommu *oiommu = iommu->iommu_dev;
struct device *dev = oiommu->dev;
u32 *pgd, *pte;
phys_addr_t ret = 0;
+ /*
+ * all the iommus within the domain will have identical programming,
+ * so perform the lookup using just the first iommu
+ */
iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
if (pte) {
@@ -1260,11 +1429,12 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
static int omap_iommu_add_device(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data;
+ struct omap_iommu_arch_data *arch_data, *tmp;
struct omap_iommu *oiommu;
struct iommu_group *group;
struct device_node *np;
struct platform_device *pdev;
+ int num_iommus, i;
int ret;
/*
@@ -1276,36 +1446,57 @@ static int omap_iommu_add_device(struct device *dev)
if (!dev->of_node)
return 0;
- np = of_parse_phandle(dev->of_node, "iommus", 0);
- if (!np)
+ /*
+ * retrieve the count of IOMMU nodes using phandle size as element size
+ * since #iommu-cells = 0 for OMAP
+ */
+ num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
+ sizeof(phandle));
+ if (num_iommus < 0)
return 0;
- pdev = of_find_device_by_node(np);
- if (WARN_ON(!pdev)) {
- of_node_put(np);
- return -EINVAL;
- }
+ arch_data = kzalloc((num_iommus + 1) * sizeof(*arch_data), GFP_KERNEL);
+ if (!arch_data)
+ return -ENOMEM;
- oiommu = platform_get_drvdata(pdev);
- if (!oiommu) {
- of_node_put(np);
- return -EINVAL;
- }
+ for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
+ np = of_parse_phandle(dev->of_node, "iommus", i);
+ if (!np) {
+ kfree(arch_data);
+ return -EINVAL;
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (WARN_ON(!pdev)) {
+ of_node_put(np);
+ kfree(arch_data);
+ return -EINVAL;
+ }
+
+ oiommu = platform_get_drvdata(pdev);
+ if (!oiommu) {
+ of_node_put(np);
+ kfree(arch_data);
+ return -EINVAL;
+ }
+
+ tmp->iommu_dev = oiommu;
- arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
- if (!arch_data) {
of_node_put(np);
- return -ENOMEM;
}
+ /*
+ * use the first IOMMU alone for the sysfs device linking.
+ * TODO: Evaluate if a single iommu_group needs to be
+ * maintained for both IOMMUs
+ */
+ oiommu = arch_data->iommu_dev;
ret = iommu_device_link(&oiommu->iommu, dev);
if (ret) {
kfree(arch_data);
- of_node_put(np);
return ret;
}
- arch_data->iommu_dev = oiommu;
dev->archdata.iommu = arch_data;
/*
@@ -1321,8 +1512,6 @@ static int omap_iommu_add_device(struct device *dev)
}
iommu_group_put(group);
- of_node_put(np);
-
return 0;
}
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index a675af29a6ec..1703159ef5af 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -29,17 +29,26 @@ struct iotlb_entry {
};
/**
+ * struct omap_iommu_device - omap iommu device data
+ * @pgtable: page table used by an omap iommu attached to a domain
+ * @iommu_dev: pointer to store an omap iommu instance attached to a domain
+ */
+struct omap_iommu_device {
+ u32 *pgtable;
+ struct omap_iommu *iommu_dev;
+};
+
+/**
* struct omap_iommu_domain - omap iommu domain
- * @pgtable: the page table
- * @iommu_dev: an omap iommu device attached to this domain. only a single
- * iommu device can be attached for now.
+ * @num_iommus: number of iommus in this domain
+ * @iommus: omap iommu device data for all iommus in this domain
* @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
* @domain: generic domain handle used by iommu core code
*/
struct omap_iommu_domain {
- u32 *pgtable;
- struct omap_iommu *iommu_dev;
+ u32 num_iommus;
+ struct omap_iommu_device *iommus;
struct device *dev;
spinlock_t lock;
struct iommu_domain domain;
@@ -97,17 +106,6 @@ struct iotlb_lock {
short vict;
};
-/**
- * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
- * @dev: iommu client device
- */
-static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
-{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
-
- return arch_data->iommu_dev;
-}
-
/*
* MMU Register offsets
*/
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c8a587d034b0..e07f02d00c68 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -66,6 +66,7 @@ struct qcom_iommu_ctx {
void __iomem *base;
bool secure_init;
u8 asid; /* asid and ctx bank # are 1:1 */
+ struct iommu_domain *domain;
};
struct qcom_iommu_domain {
@@ -194,12 +195,15 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
- dev_err_ratelimited(ctx->dev,
- "Unhandled context fault: fsr=0x%x, "
- "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
- fsr, iova, fsynr, ctx->asid);
+ if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
+ dev_err_ratelimited(ctx->dev,
+ "Unhandled context fault: fsr=0x%x, "
+ "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
+ fsr, iova, fsynr, ctx->asid);
+ }
iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
+ iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
return IRQ_HANDLED;
}
@@ -274,12 +278,14 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
- SCTLR_M | SCTLR_S1_ASIDPNE;
+ SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
if (IS_ENABLED(CONFIG_BIG_ENDIAN))
reg |= SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
+
+ ctx->domain = domain;
}
mutex_unlock(&qcom_domain->init_mutex);
@@ -395,6 +401,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+
+ ctx->domain = NULL;
}
pm_runtime_put_sync(qcom_iommu->dev);
@@ -443,6 +451,19 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
+{
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
+ struct io_pgtable, ops);
+ if (!qcom_domain->pgtbl_ops)
+ return;
+
+ pm_runtime_get_sync(qcom_domain->iommu->dev);
+ qcom_iommu_tlb_sync(pgtable->cookie);
+ pm_runtime_put_sync(qcom_domain->iommu->dev);
+}
+
static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -570,6 +591,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = qcom_iommu_iotlb_sync,
+ .iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.add_device = qcom_iommu_add_device,
.remove_device = qcom_iommu_remove_device,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9d8a1dd2e2c2..53380bd72ea4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1,3 +1,5 @@
+menu "IRQ chip support"
+
config IRQCHIP
def_bool y
depends on OF_IRQ
@@ -151,6 +153,9 @@ config CLPS711X_IRQCHIP
select SPARSE_IRQ
default y
+config OMPIC
+ bool
+
config OR1K_PIC
bool
select IRQ_DOMAIN
@@ -304,6 +309,7 @@ config EZNPS_GIC
config STM32_EXTI
bool
select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
config QCOM_IRQ_COMBINER
bool "QCOM IRQ combiner support"
@@ -321,3 +327,13 @@ config IRQ_UNIPHIER_AIDET
select IRQ_DOMAIN_HIERARCHY
help
Support for the UniPhier AIDET (ARM Interrupt Detector).
+
+config MESON_IRQ_GPIO
+ bool "Meson GPIO Interrupt Multiplexer"
+ depends on ARCH_MESON
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support Meson SoC Family GPIO Interrupt Multiplexer
+
+endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b842dfdc903f..dae7282bfdef 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
+obj-$(CONFIG_OMPIC) += irq-ompic.o
obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
@@ -80,3 +81,5 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
+obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
+obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 815b88dd18f2..f20200af0992 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -76,8 +76,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
return -ENOMEM;
i2c_ic->base = of_iomap(node, 0);
- if (IS_ERR(i2c_ic->base)) {
- ret = PTR_ERR(i2c_ic->base);
+ if (!i2c_ic->base) {
+ ret = -ENOMEM;
goto err_free_ic;
}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index b009b916a292..691d20eb0bec 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -1,7 +1,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,35 +31,82 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-/* Register offsets in the L2 interrupt controller */
-#define CPU_STATUS 0x00
-#define CPU_SET 0x04
-#define CPU_CLEAR 0x08
-#define CPU_MASK_STATUS 0x0c
-#define CPU_MASK_SET 0x10
-#define CPU_MASK_CLEAR 0x14
+struct brcmstb_intc_init_params {
+ irq_flow_handler_t handler;
+ int cpu_status;
+ int cpu_clear;
+ int cpu_mask_status;
+ int cpu_mask_set;
+ int cpu_mask_clear;
+};
+
+/* Register offsets in the L2 latched interrupt controller */
+static const struct brcmstb_intc_init_params l2_edge_intc_init = {
+ .handler = handle_edge_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = 0x08,
+ .cpu_mask_status = 0x0c,
+ .cpu_mask_set = 0x10,
+ .cpu_mask_clear = 0x14
+};
+
+/* Register offsets in the L2 level interrupt controller */
+static const struct brcmstb_intc_init_params l2_lvl_intc_init = {
+ .handler = handle_level_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = -1, /* Register not present */
+ .cpu_mask_status = 0x04,
+ .cpu_mask_set = 0x08,
+ .cpu_mask_clear = 0x0C
+};
/* L2 intc private data structure */
struct brcmstb_l2_intc_data {
- int parent_irq;
- void __iomem *base;
struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ int status_offset;
+ int mask_offset;
bool can_wake;
u32 saved_mask; /* for suspend/resume */
};
+/**
+ * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
+ * @d: irq_data
+ *
+ * Chip has separate enable/disable registers instead of a single mask
+ * register and pending interrupt is acknowledged by setting a bit.
+ *
+ * Note: This function is generic and could easily be added to the
+ * generic irqchip implementation if there ever becomes a will to do so.
+ * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
+ *
+ * e.g.: https://patchwork.kernel.org/patch/9831047/
+ */
+static void brcmstb_l2_mask_and_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, mask, ct->regs.disable);
+ *ct->mask_cache &= ~mask;
+ irq_reg_writel(gc, mask, ct->regs.ack);
+ irq_gc_unlock(gc);
+}
+
static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
- struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int irq;
u32 status;
chained_irq_enter(chip, desc);
- status = irq_reg_readl(gc, CPU_STATUS) &
- ~(irq_reg_readl(gc, CPU_MASK_STATUS));
+ status = irq_reg_readl(b->gc, b->status_offset) &
+ ~(irq_reg_readl(b->gc, b->mask_offset));
if (status == 0) {
raw_spin_lock(&desc->lock);
@@ -70,10 +117,8 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
do {
irq = ffs(status) - 1;
- /* ack at our level */
- irq_reg_writel(gc, 1 << irq, CPU_CLEAR);
status &= ~(1 << irq);
- generic_handle_irq(irq_find_mapping(b->domain, irq));
+ generic_handle_irq(irq_linear_revmap(b->domain, irq));
} while (status);
out:
chained_irq_exit(chip, desc);
@@ -82,16 +127,17 @@ out:
static void brcmstb_l2_intc_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
/* Save the current mask */
- b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS);
+ b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
if (b->can_wake) {
/* Program the wakeup mask */
- irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET);
- irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR);
+ irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
+ irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
irq_gc_unlock(gc);
}
@@ -99,49 +145,56 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
static void brcmstb_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
- /* Clear unmasked non-wakeup interrupts */
- irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR);
+ if (ct->chip.irq_ack) {
+ /* Clear unmasked non-wakeup interrupts */
+ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
+ ct->regs.ack);
+ }
/* Restore the saved mask */
- irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET);
- irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR);
+ irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
+ irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
irq_gc_unlock(gc);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
- struct device_node *parent)
+ struct device_node *parent,
+ const struct brcmstb_intc_init_params
+ *init_params)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct brcmstb_l2_intc_data *data;
- struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret;
unsigned int flags;
+ int parent_irq;
+ void __iomem *base;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->base = of_iomap(np, 0);
- if (!data->base) {
+ base = of_iomap(np, 0);
+ if (!base) {
pr_err("failed to remap intc L2 registers\n");
ret = -ENOMEM;
goto out_free;
}
/* Disable all interrupts by default */
- writel(0xffffffff, data->base + CPU_MASK_SET);
+ writel(0xffffffff, base + init_params->cpu_mask_set);
/* Wakeup interrupts may be retained from S5 (cold boot) */
data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
- if (!data->can_wake)
- writel(0xffffffff, data->base + CPU_CLEAR);
+ if (!data->can_wake && (init_params->cpu_clear >= 0))
+ writel(0xffffffff, base + init_params->cpu_clear);
- data->parent_irq = irq_of_parse_and_map(np, 0);
- if (!data->parent_irq) {
+ parent_irq = irq_of_parse_and_map(np, 0);
+ if (!parent_irq) {
pr_err("failed to find parent interrupt\n");
ret = -EINVAL;
goto out_unmap;
@@ -163,29 +216,39 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* Allocate a single Generic IRQ chip for this node */
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
- np->full_name, handle_edge_irq, clr, 0, flags);
+ np->full_name, init_params->handler, clr, 0, flags);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
}
/* Set the IRQ chaining logic */
- irq_set_chained_handler_and_data(data->parent_irq,
+ irq_set_chained_handler_and_data(parent_irq,
brcmstb_l2_intc_irq_handle, data);
- gc = irq_get_domain_generic_chip(data->domain, 0);
- gc->reg_base = data->base;
- gc->private = data;
- ct = gc->chip_types;
-
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->regs.ack = CPU_CLEAR;
+ data->gc = irq_get_domain_generic_chip(data->domain, 0);
+ data->gc->reg_base = base;
+ data->gc->private = data;
+ data->status_offset = init_params->cpu_status;
+ data->mask_offset = init_params->cpu_mask_status;
+
+ ct = data->gc->chip_types;
+
+ if (init_params->cpu_clear >= 0) {
+ ct->regs.ack = init_params->cpu_clear;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+ } else {
+ /* No Ack - but still slightly more efficient to define this */
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+ }
ct->chip.irq_mask = irq_gc_mask_disable_reg;
- ct->regs.disable = CPU_MASK_SET;
+ ct->regs.disable = init_params->cpu_mask_set;
+ ct->regs.mask = init_params->cpu_mask_status;
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
- ct->regs.enable = CPU_MASK_CLEAR;
+ ct->regs.enable = init_params->cpu_mask_clear;
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
@@ -195,21 +258,35 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* This IRQ chip can wake the system, set all child interrupts
* in wake_enabled mask
*/
- gc->wake_enabled = 0xffffffff;
+ data->gc->wake_enabled = 0xffffffff;
ct->chip.irq_set_wake = irq_gc_set_wake;
}
pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
- data->base, data->parent_irq);
+ base, parent_irq);
return 0;
out_free_domain:
irq_domain_remove(data->domain);
out_unmap:
- iounmap(data->base);
+ iounmap(base);
out_free:
kfree(data);
return ret;
}
-IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init);
+
+int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
+}
+IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
+
+int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
+}
+IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
+ brcmstb_l2_lvl_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 9ae71804b5dd..30017df5b54c 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -40,8 +40,9 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
for (; quirks->desc; quirks++) {
if (quirks->iidr != (quirks->mask & iidr))
continue;
- quirks->init(data);
- pr_info("GIC: enabling workaround for %s\n", quirks->desc);
+ if (quirks->init(data))
+ pr_info("GIC: enabling workaround for %s\n",
+ quirks->desc);
}
}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 205e5fddf6da..3919cd7c5285 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -23,7 +23,7 @@
struct gic_quirk {
const char *desc;
- void (*init)(void *data);
+ bool (*init)(void *data);
u32 iidr;
u32 mask;
};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e88395605e32..4039e64cd342 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -83,6 +83,8 @@ struct its_baser {
u32 psz;
};
+struct its_device;
+
/*
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
@@ -97,12 +99,18 @@ struct its_node {
struct its_cmd_block *cmd_write;
struct its_baser tables[GITS_BASER_NR_REGS];
struct its_collection *collections;
+ struct fwnode_handle *fwnode_handle;
+ u64 (*get_msi_base)(struct its_device *its_dev);
struct list_head its_device_list;
u64 flags;
+ unsigned long list_nr;
u32 ite_size;
u32 device_ids;
int numa_node;
+ unsigned int msi_domain_flags;
+ u32 pre_its_base; /* for Socionext Synquacer */
bool is_v4;
+ int vlpi_redist_offset;
};
#define ITS_ITT_ALIGN SZ_256
@@ -152,12 +160,6 @@ static DEFINE_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
-/*
- * We have a maximum number of 16 ITSs in the whole system if we're
- * using the ITSList mechanism
- */
-#define ITS_LIST_MAX 16
-
static unsigned long its_list_map;
static u16 vmovp_seq_num;
static DEFINE_RAW_SPINLOCK(vmovp_lock);
@@ -272,10 +274,12 @@ struct its_cmd_block {
#define ITS_CMD_QUEUE_SZ SZ_64K
#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
-typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
+typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
+ struct its_cmd_block *,
struct its_cmd_desc *);
-typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
+ struct its_cmd_block *,
struct its_cmd_desc *);
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
@@ -379,7 +383,8 @@ static inline void its_fixup_cmd(struct its_cmd_block *cmd)
cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
}
-static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapd_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
unsigned long itt_addr;
@@ -399,7 +404,8 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
return NULL;
}
-static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapc_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_MAPC);
@@ -412,7 +418,8 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
return desc->its_mapc_cmd.col;
}
-static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -431,7 +438,8 @@ static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_movi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -449,7 +457,8 @@ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_discard_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -466,7 +475,8 @@ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_inv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -483,7 +493,8 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_int_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -500,7 +511,8 @@ static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_clear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -517,7 +529,8 @@ static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_invall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_INVALL);
@@ -528,7 +541,8 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
return NULL;
}
-static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_VINVALL);
@@ -539,17 +553,20 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
return desc->its_vinvall_cmd.vpe;
}
-static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
unsigned long vpt_addr;
+ u64 target;
vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
its_encode_cmd(cmd, GITS_CMD_VMAPP);
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
- its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
+ its_encode_target(cmd, target);
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
@@ -558,7 +575,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
return desc->its_vmapp_cmd.vpe;
}
-static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
@@ -580,7 +598,8 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
return desc->its_vmapti_cmd.vpe;
}
-static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
@@ -602,14 +621,18 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
return desc->its_vmovi_cmd.vpe;
}
-static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ u64 target;
+
+ target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
its_encode_cmd(cmd, GITS_CMD_VMOVP);
its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
- its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
+ its_encode_target(cmd, target);
its_fixup_cmd(cmd);
@@ -688,9 +711,9 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
dsb(ishst);
}
-static void its_wait_for_range_completion(struct its_node *its,
- struct its_cmd_block *from,
- struct its_cmd_block *to)
+static int its_wait_for_range_completion(struct its_node *its,
+ struct its_cmd_block *from,
+ struct its_cmd_block *to)
{
u64 rd_idx, from_idx, to_idx;
u32 count = 1000000; /* 1s! */
@@ -711,12 +734,15 @@ static void its_wait_for_range_completion(struct its_node *its,
count--;
if (!count) {
- pr_err_ratelimited("ITS queue timeout\n");
- return;
+ pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
+ from_idx, to_idx, rd_idx);
+ return -1;
}
cpu_relax();
udelay(1);
}
+
+ return 0;
}
/* Warning, macro hell follows */
@@ -736,7 +762,7 @@ void name(struct its_node *its, \
raw_spin_unlock_irqrestore(&its->lock, flags); \
return; \
} \
- sync_obj = builder(cmd, desc); \
+ sync_obj = builder(its, cmd, desc); \
its_flush_cmd(its, cmd); \
\
if (sync_obj) { \
@@ -744,7 +770,7 @@ void name(struct its_node *its, \
if (!sync_cmd) \
goto post; \
\
- buildfn(sync_cmd, sync_obj); \
+ buildfn(its, sync_cmd, sync_obj); \
its_flush_cmd(its, sync_cmd); \
} \
\
@@ -752,10 +778,12 @@ post: \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
- its_wait_for_range_completion(its, cmd, next_cmd); \
+ if (its_wait_for_range_completion(its, cmd, next_cmd)) \
+ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
}
-static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
+static void its_build_sync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
struct its_collection *sync_col)
{
its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
@@ -767,7 +795,8 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
struct its_collection, its_build_sync_cmd)
-static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
+static void its_build_vsync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
struct its_vpe *sync_vpe)
{
its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
@@ -899,21 +928,16 @@ static void its_send_vmovi(struct its_device *dev, u32 id)
its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
}
-static void its_send_vmapp(struct its_vpe *vpe, bool valid)
+static void its_send_vmapp(struct its_node *its,
+ struct its_vpe *vpe, bool valid)
{
struct its_cmd_desc desc;
- struct its_node *its;
desc.its_vmapp_cmd.vpe = vpe;
desc.its_vmapp_cmd.valid = valid;
+ desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
- list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
- continue;
-
- desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
- its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
- }
+ its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
}
static void its_send_vmovp(struct its_vpe *vpe)
@@ -951,6 +975,9 @@ static void its_send_vmovp(struct its_vpe *vpe)
if (!its->is_v4)
continue;
+ if (!vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
@@ -958,18 +985,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
-static void its_send_vinvall(struct its_vpe *vpe)
+static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
{
struct its_cmd_desc desc;
- struct its_node *its;
desc.its_vinvall_cmd.vpe = vpe;
-
- list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
- continue;
- its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
- }
+ its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
}
/*
@@ -991,9 +1012,15 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
if (irqd_is_forwarded_to_vcpu(d)) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
prop_page = its_dev->event_map.vm->vprop_page;
- hwirq = its_dev->event_map.vlpi_maps[event].vintid;
+ map = &its_dev->event_map.vlpi_maps[event];
+ hwirq = map->vintid;
+
+ /* Remember the updated property */
+ map->properties &= ~clr;
+ map->properties |= set | LPI_PROP_GROUP1;
} else {
prop_page = gic_rdists->prop_page;
hwirq = d->hwirq;
@@ -1099,6 +1126,13 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return IRQ_SET_MASK_OK_DONE;
}
+static u64 its_irq_get_msi_base(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ return its->phys_base + GITS_TRANSLATER;
+}
+
static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1106,7 +1140,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
u64 addr;
its = its_dev->its;
- addr = its->phys_base + GITS_TRANSLATER;
+ addr = its->get_msi_base(its_dev);
msg->address_lo = lower_32_bits(addr);
msg->address_hi = upper_32_bits(addr);
@@ -1133,6 +1167,60 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return 0;
}
+static void its_map_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ /*
+ * If the VM wasn't mapped yet, iterate over the vpes and get
+ * them mapped now.
+ */
+ vm->vlpi_count[its->list_nr]++;
+
+ if (vm->vlpi_count[its->list_nr] == 1) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ struct its_vpe *vpe = vm->vpes[i];
+ struct irq_data *d = irq_get_irq_data(vpe->irq);
+
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ if (!--vm->vlpi_count[its->list_nr]) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++)
+ its_send_vmapp(its, vm->vpes[i], false);
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1168,12 +1256,23 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
/* Already mapped, move it around */
its_send_vmovi(its_dev, event);
} else {
+ /* Ensure all the VPEs are mapped on this ITS */
+ its_map_vm(its_dev->its, info->map->vm);
+
+ /*
+ * Flag the interrupt as forwarded so that we can
+ * start poking the virtual property table.
+ */
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Write out the property to the prop table */
+ lpi_write_config(d, 0xff, info->map->properties);
+
/* Drop the physical mapping */
its_send_discard(its_dev, event);
/* and install the virtual one */
its_send_vmapti(its_dev, event);
- irqd_set_forwarded_to_vcpu(d);
/* Increment the number of VLPIs */
its_dev->event_map.nr_vlpis++;
@@ -1229,6 +1328,9 @@ static int its_vlpi_unmap(struct irq_data *d)
LPI_PROP_ENABLED |
LPI_PROP_GROUP1));
+ /* Potentially unmap the VM from this ITS */
+ its_unmap_vm(its_dev->its, its_dev->event_map.vm);
+
/*
* Drop the refcount and make the device available again if
* this was the last VLPI.
@@ -1669,23 +1771,14 @@ static void its_free_tables(struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
- u64 typer = gic_read_typer(its->base + GITS_TYPER);
- u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_RaWaWb;
u32 psz = SZ_64K;
int err, i;
- if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
- /*
- * erratum 22375: only alloc 8MB table size
- * erratum 24313: ignore memory access type
- */
- cache = GITS_BASER_nCnB;
- ids = 0x14; /* 20 bits, 8MB */
- }
-
- its->device_ids = ids;
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
+ /* erratum 24313: ignore memory access type */
+ cache = GITS_BASER_nCnB;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
struct its_baser *baser = its->tables + i;
@@ -2209,8 +2302,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
}
-static void its_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d)
+static int its_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool early)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
@@ -2228,6 +2321,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
/* Map the GIC IRQ and event to the device */
its_send_mapti(its_dev, d->hwirq, event);
+ return 0;
}
static void its_irq_domain_deactivate(struct irq_domain *domain,
@@ -2394,6 +2488,8 @@ static int its_vpe_set_affinity(struct irq_data *d,
its_vpe_db_proxy_move(vpe, from, cpu);
}
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return IRQ_SET_MASK_OK_DONE;
}
@@ -2461,6 +2557,26 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
}
}
+static void its_vpe_invall(struct its_vpe *vpe)
+{
+ struct its_node *its;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
+ /*
+ * Sending a VINVALL to a single ITS is enough, as all
+ * we need is to reach the redistributors.
+ */
+ its_send_vinvall(its, vpe);
+ return;
+ }
+}
+
static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
@@ -2476,7 +2592,7 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
return 0;
case INVALL_VPE:
- its_send_vinvall(vpe);
+ its_vpe_invall(vpe);
return 0;
default:
@@ -2701,23 +2817,51 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
return err;
}
-static void its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d)
+static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool early)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /* If we use the list map, we issue VMAPP on demand... */
+ if (its_list_map)
+ return 0;
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(vpe, true);
- its_send_vinvall(vpe);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
+ return 0;
}
static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * If we use the list map, we unmap the VPE once no VLPIs are
+ * associated with the VM.
+ */
+ if (its_list_map)
+ return;
- its_send_vmapp(vpe, false);
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ its_send_vmapp(its, vpe, false);
+ }
}
static const struct irq_domain_ops its_vpe_domain_ops = {
@@ -2760,26 +2904,85 @@ static int its_force_quiescent(void __iomem *base)
}
}
-static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
+static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
{
struct its_node *its = data;
+ /* erratum 22375: only alloc 8MB table size */
+ its->device_ids = 0x14; /* 20 bits, 8MB */
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
+
+ return true;
}
-static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
{
struct its_node *its = data;
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+
+ return true;
}
-static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
{
struct its_node *its = data;
/* On QDF2400, the size of the ITE is 16Bytes */
its->ite_size = 16;
+
+ return true;
+}
+
+static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ /*
+ * The Socionext Synquacer SoC has a so-called 'pre-ITS',
+ * which maps 32-bit writes targeted at a separate window of
+ * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
+ * with device ID taken from bits [device_id_bits + 1:2] of
+ * the window offset.
+ */
+ return its->pre_its_base + (its_dev->device_id << 2);
+}
+
+static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
+{
+ struct its_node *its = data;
+ u32 pre_its_window[2];
+ u32 ids;
+
+ if (!fwnode_property_read_u32_array(its->fwnode_handle,
+ "socionext,synquacer-pre-its",
+ pre_its_window,
+ ARRAY_SIZE(pre_its_window))) {
+
+ its->pre_its_base = pre_its_window[0];
+ its->get_msi_base = its_irq_get_msi_base_pre_its;
+
+ ids = ilog2(pre_its_window[1]) - 2;
+ if (its->device_ids > ids)
+ its->device_ids = ids;
+
+ /* the pre-ITS breaks isolation, so disable MSI remapping */
+ its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return true;
+ }
+ return false;
+}
+
+static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
+{
+ struct its_node *its = data;
+
+ /*
+ * Hip07 insists on using the wrong address for the VLPI
+ * page. Trick it into doing the right thing...
+ */
+ its->vlpi_redist_offset = SZ_128K;
+ return true;
}
static const struct gic_quirk its_quirks[] = {
@@ -2807,6 +3010,27 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_qdf2400_e0065,
},
#endif
+#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
+ {
+ /*
+ * The Socionext Synquacer SoC incorporates ARM's own GIC-500
+ * implementation, but with a 'pre-ITS' added that requires
+ * special handling in software.
+ */
+ .desc = "ITS: Socionext Synquacer pre-ITS",
+ .iidr = 0x0001143b,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_socionext_synquacer,
+ },
+#endif
+#ifdef CONFIG_HISILICON_ERRATUM_161600802
+ {
+ .desc = "ITS: Hip07 erratum 161600802",
+ .iidr = 0x00000004,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_hip07_161600802,
+ },
+#endif
{
}
};
@@ -2835,7 +3059,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain->parent = its_parent;
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
+ inner_domain->flags |= its->msi_domain_flags;
info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain->host_data = info;
@@ -2896,8 +3120,8 @@ static int __init its_compute_its_list_map(struct resource *res,
* locking. Should this change, we should address
* this.
*/
- its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
- if (its_number >= ITS_LIST_MAX) {
+ its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
+ if (its_number >= GICv4_ITS_LIST_MAX) {
pr_err("ITS@%pa: No ITSList entry available!\n",
&res->start);
return -EINVAL;
@@ -2965,6 +3189,7 @@ static int __init its_probe_one(struct resource *res,
its->base = its_base;
its->phys_base = res->start;
its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
+ its->device_ids = GITS_TYPER_DEVBITS(typer);
its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
if (its->is_v4) {
if (!(typer & GITS_TYPER_VMOVP)) {
@@ -2972,6 +3197,8 @@ static int __init its_probe_one(struct resource *res,
if (err < 0)
goto out_free_its;
+ its->list_nr = err;
+
pr_info("ITS@%pa: Using ITS number %d\n",
&res->start, err);
} else {
@@ -2988,6 +3215,9 @@ static int __init its_probe_one(struct resource *res,
goto out_free_its;
}
its->cmd_write = its->cmd_base;
+ its->fwnode_handle = handle;
+ its->get_msi_base = its_irq_get_msi_base;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
its_enable_quirks(its);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b5df99c6f680..b54b55597ffb 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -55,6 +55,7 @@ struct gic_chip_data {
struct irq_domain *domain;
u64 redist_stride;
u32 nr_redist_regions;
+ bool has_rss;
unsigned int irq_nr;
struct partition_desc *ppi_descs[16];
};
@@ -63,7 +64,9 @@ static struct gic_chip_data gic_data __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
static struct gic_kvm_info gic_v3_kvm_info;
+static DEFINE_PER_CPU(bool, has_rss);
+#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
@@ -526,6 +529,10 @@ static void gic_update_vlpi_properties(void)
static void gic_cpu_sys_reg_init(void)
{
+ int i, cpu = smp_processor_id();
+ u64 mpidr = cpu_logical_map(cpu);
+ u64 need_rss = MPIDR_RS(mpidr);
+
/*
* Need to check that the SRE bit has actually been set. If
* not, it means that SRE is disabled at EL2. We're going to
@@ -557,6 +564,30 @@ static void gic_cpu_sys_reg_init(void)
/* ... and let's hit the road... */
gic_write_grpen1(1);
+
+ /* Keep the RSS capability status in per_cpu variable */
+ per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
+
+ /* Check all the CPUs have capable of sending SGIs to other CPUs */
+ for_each_online_cpu(i) {
+ bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
+
+ need_rss |= MPIDR_RS(cpu_logical_map(i));
+ if (need_rss && (!have_rss))
+ pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
+ cpu, (unsigned long)mpidr,
+ i, (unsigned long)cpu_logical_map(i));
+ }
+
+ /**
+ * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
+ * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
+ * UNPREDICTABLE choice of :
+ * - The write is ignored.
+ * - The RS field is treated as 0.
+ */
+ if (need_rss && (!gic_data.has_rss))
+ pr_crit_once("RSS is required but GICD doesn't support it\n");
}
static int gic_dist_supports_lpis(void)
@@ -591,6 +622,9 @@ static void gic_cpu_init(void)
#ifdef CONFIG_SMP
+#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
+#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
+
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
@@ -605,13 +639,6 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
u16 tlist = 0;
while (cpu < nr_cpu_ids) {
- /*
- * If we ever get a cluster of more than 16 CPUs, just
- * scream and skip that CPU.
- */
- if (WARN_ON((mpidr & 0xff) >= 16))
- goto out;
-
tlist |= 1 << (mpidr & 0xf);
next_cpu = cpumask_next(cpu, mask);
@@ -621,7 +648,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
mpidr = cpu_logical_map(cpu);
- if (cluster_id != (mpidr & ~0xffUL)) {
+ if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
cpu--;
goto out;
}
@@ -643,6 +670,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
irq << ICC_SGI1R_SGI_ID_SHIFT |
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
+ MPIDR_TO_SGI_RS(cluster_id) |
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
@@ -663,7 +691,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
smp_wmb();
for_each_cpu(cpu, mask) {
- unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+ u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
u16 tlist;
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@@ -1007,6 +1035,10 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free;
}
+ gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
+ pr_info("Distributor has %sRange Selector support\n",
+ gic_data.has_rss ? "" : "no ");
+
set_handle_irq(gic_handle_irq);
gic_update_vlpi_properties();
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 651d726e8b12..f641e8e2c78d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1256,6 +1256,19 @@ static void gic_teardown(struct gic_chip_data *gic)
#ifdef CONFIG_OF
static int gic_cnt __initdata;
+static bool gicv2_force_probe;
+
+static int __init gicv2_force_probe_cfg(char *buf)
+{
+ return strtobool(buf, &gicv2_force_probe);
+}
+early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
+
+static bool gic_check_gicv2(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + GIC_CPU_IDENT);
+ return (val & 0xff0fff) == 0x02043B;
+}
static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
{
@@ -1265,20 +1278,60 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
if (!is_hyp_mode_available())
return false;
- if (resource_size(&cpuif_res) < SZ_8K)
- return false;
- if (resource_size(&cpuif_res) == SZ_128K) {
- u32 val_low, val_high;
+ if (resource_size(&cpuif_res) < SZ_8K) {
+ void __iomem *alt;
+ /*
+ * Check for a stupid firmware that only exposes the
+ * first page of a GICv2.
+ */
+ if (!gic_check_gicv2(*base))
+ return false;
+ if (!gicv2_force_probe) {
+ pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
+ return false;
+ }
+
+ alt = ioremap(cpuif_res.start, SZ_8K);
+ if (!alt)
+ return false;
+ if (!gic_check_gicv2(alt + SZ_4K)) {
+ /*
+ * The first page was that of a GICv2, and
+ * the second was *something*. Let's trust it
+ * to be a GICv2, and update the mapping.
+ */
+ pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
+ &cpuif_res.start);
+ iounmap(*base);
+ *base = alt;
+ return true;
+ }
+
+ /*
+ * We detected *two* initial GICv2 pages in a
+ * row. Could be a GICv2 aliased over two 64kB
+ * pages. Update the resource, map the iospace, and
+ * pray.
+ */
+ iounmap(alt);
+ alt = ioremap(cpuif_res.start, SZ_128K);
+ if (!alt)
+ return false;
+ pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
+ &cpuif_res.start);
+ cpuif_res.end = cpuif_res.start + SZ_128K -1;
+ iounmap(*base);
+ *base = alt;
+ }
+ if (resource_size(&cpuif_res) == SZ_128K) {
/*
- * Verify that we have the first 4kB of a GIC400
+ * Verify that we have the first 4kB of a GICv2
* aliased over the first 64kB by checking the
* GICC_IIDR register on both ends.
*/
- val_low = readl_relaxed(*base + GIC_CPU_IDENT);
- val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
- if ((val_low & 0xffff0fff) != 0x0202043B ||
- val_low != val_high)
+ if (!gic_check_gicv2(*base) ||
+ !gic_check_gicv2(*base + 0xf000))
return false;
/*
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 119f4ef0d421..57e3d900f19e 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -316,6 +316,7 @@ static const struct of_device_id ls_scfg_msi_id[] = {
{ .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
{ .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
+ { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
new file mode 100644
index 000000000000..a59bdbc0b9bb
--- /dev/null
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define NUM_CHANNEL 8
+#define MAX_INPUT_MUX 256
+
+#define REG_EDGE_POL 0x00
+#define REG_PIN_03_SEL 0x04
+#define REG_PIN_47_SEL 0x08
+#define REG_FILTER_SEL 0x0c
+
+#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x)))
+#define REG_EDGE_POL_EDGE(x) BIT(x)
+#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
+#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
+#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
+
+struct meson_gpio_irq_params {
+ unsigned int nr_hwirq;
+};
+
+static const struct meson_gpio_irq_params meson8_params = {
+ .nr_hwirq = 134,
+};
+
+static const struct meson_gpio_irq_params meson8b_params = {
+ .nr_hwirq = 119,
+};
+
+static const struct meson_gpio_irq_params gxbb_params = {
+ .nr_hwirq = 133,
+};
+
+static const struct meson_gpio_irq_params gxl_params = {
+ .nr_hwirq = 110,
+};
+
+static const struct of_device_id meson_irq_gpio_matches[] = {
+ { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
+ { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
+ { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
+ { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
+ { }
+};
+
+struct meson_gpio_irq_controller {
+ unsigned int nr_hwirq;
+ void __iomem *base;
+ u32 channel_irqs[NUM_CHANNEL];
+ DECLARE_BITMAP(channel_map, NUM_CHANNEL);
+ spinlock_t lock;
+};
+
+static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+ unsigned int reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(ctl->base + reg);
+ tmp &= ~mask;
+ tmp |= val;
+ writel_relaxed(tmp, ctl->base + reg);
+}
+
+static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+{
+ return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+}
+
+static int
+meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ unsigned long hwirq,
+ u32 **channel_hwirq)
+{
+ unsigned int reg, idx;
+
+ spin_lock(&ctl->lock);
+
+ /* Find a free channel */
+ idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
+ if (idx >= NUM_CHANNEL) {
+ spin_unlock(&ctl->lock);
+ pr_err("No channel available\n");
+ return -ENOSPC;
+ }
+
+ /* Mark the channel as used */
+ set_bit(idx, ctl->channel_map);
+
+ /*
+ * Setup the mux of the channel to route the signal of the pad
+ * to the appropriate input of the GIC
+ */
+ reg = meson_gpio_irq_channel_to_reg(idx);
+ meson_gpio_irq_update_bits(ctl, reg,
+ 0xff << REG_PIN_SEL_SHIFT(idx),
+ hwirq << REG_PIN_SEL_SHIFT(idx));
+
+ /*
+ * Get the hwirq number assigned to this channel through
+ * a pointer the channel_irq table. The added benifit of this
+ * method is that we can also retrieve the channel index with
+ * it, using the table base.
+ */
+ *channel_hwirq = &(ctl->channel_irqs[idx]);
+
+ spin_unlock(&ctl->lock);
+
+ pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
+ hwirq, idx, **channel_hwirq);
+
+ return 0;
+}
+
+static unsigned int
+meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ return channel_hwirq - ctl->channel_irqs;
+}
+
+static void
+meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+ clear_bit(idx, ctl->channel_map);
+}
+
+static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
+ unsigned int type,
+ u32 *channel_hwirq)
+{
+ u32 val = 0;
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+ /*
+ * The controller has a filter block to operate in either LEVEL or
+ * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and
+ * EDGE_FALLING support (which the GIC does not support), the filter
+ * block is also able to invert the input signal it gets before
+ * providing it to the GIC.
+ */
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ if (type == IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_EDGE(idx);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_LOW(idx);
+
+ spin_lock(&ctl->lock);
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ REG_EDGE_POL_MASK(idx), val);
+
+ spin_unlock(&ctl->lock);
+
+ return 0;
+}
+
+static unsigned int meson_gpio_irq_type_output(unsigned int type)
+{
+ unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
+
+ type &= ~IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The polarity of the signal provided to the GIC should always
+ * be high.
+ */
+ if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ type |= IRQ_TYPE_LEVEL_HIGH;
+ else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ type |= IRQ_TYPE_EDGE_RISING;
+
+ return type;
+}
+
+static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct meson_gpio_irq_controller *ctl = data->domain->host_data;
+ u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
+ if (ret)
+ return ret;
+
+ return irq_chip_set_type_parent(data,
+ meson_gpio_irq_type_output(type));
+}
+
+static struct irq_chip meson_gpio_irq_chip = {
+ .name = "meson-gpio-irqchip",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = meson_gpio_irq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int meson_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain,
+ unsigned int virq,
+ u32 hwirq,
+ unsigned int type)
+{
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = hwirq;
+ fwspec.param[2] = meson_gpio_irq_type_output(type);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int meson_gpio_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ unsigned long hwirq;
+ u32 *channel_hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_allocate_gic_irq(domain, virq,
+ *channel_hwirq, type);
+ if (ret < 0) {
+ pr_err("failed to allocate gic irq %u\n", *channel_hwirq);
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &meson_gpio_irq_chip, channel_hwirq);
+
+ return 0;
+}
+
+static void meson_gpio_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ struct irq_data *irq_data;
+ u32 *channel_hwirq;
+
+ if (WARN_ON(nr_irqs != 1))
+ return;
+
+ irq_domain_free_irqs_parent(domain, virq, 1);
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ channel_hwirq = irq_data_get_irq_chip_data(irq_data);
+
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+}
+
+static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
+ .alloc = meson_gpio_irq_domain_alloc,
+ .free = meson_gpio_irq_domain_free,
+ .translate = meson_gpio_irq_domain_translate,
+};
+
+static int __init meson_gpio_irq_parse_dt(struct device_node *node,
+ struct meson_gpio_irq_controller *ctl)
+{
+ const struct of_device_id *match;
+ const struct meson_gpio_irq_params *params;
+ int ret;
+
+ match = of_match_node(meson_irq_gpio_matches, node);
+ if (!match)
+ return -ENODEV;
+
+ params = match->data;
+ ctl->nr_hwirq = params->nr_hwirq;
+
+ ret = of_property_read_variable_u32_array(node,
+ "amlogic,channel-interrupts",
+ ctl->channel_irqs,
+ NUM_CHANNEL,
+ NUM_CHANNEL);
+ if (ret < 0) {
+ pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init meson_gpio_irq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct meson_gpio_irq_controller *ctl;
+ int ret;
+
+ if (!parent) {
+ pr_err("missing parent interrupt node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("unable to obtain parent domain\n");
+ return -ENXIO;
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ spin_lock_init(&ctl->lock);
+
+ ctl->base = of_iomap(node, 0);
+ if (!ctl->base) {
+ ret = -ENOMEM;
+ goto free_ctl;
+ }
+
+ ret = meson_gpio_irq_parse_dt(node, ctl);
+ if (ret)
+ goto free_channel_irqs;
+
+ domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq,
+ of_node_to_fwnode(node),
+ &meson_gpio_irq_domain_ops,
+ ctl);
+ if (!domain) {
+ pr_err("failed to add domain\n");
+ ret = -ENODEV;
+ goto free_channel_irqs;
+ }
+
+ pr_info("%d to %d gpio interrupt mux initialized\n",
+ ctl->nr_hwirq, NUM_CHANNEL);
+
+ return 0;
+
+free_channel_irqs:
+ iounmap(ctl->base);
+free_ctl:
+ kfree(ctl);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
+ meson_gpio_irq_of_init);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c90976d7e53c..ef92a4d2038e 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -6,8 +6,12 @@
* Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
+
+#define pr_fmt(fmt) "irq-mips-gic: " fmt
+
#include <linux/bitmap.h>
#include <linux/clocksource.h>
+#include <linux/cpuhotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -48,12 +52,16 @@ static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
-static int gic_vpes;
static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
-DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
-DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+
+static struct gic_all_vpes_chip_data {
+ u32 map;
+ bool mask;
+} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
static void gic_clear_pcpu_masks(unsigned int intr)
{
@@ -194,46 +202,46 @@ static void gic_ack_irq(struct irq_data *d)
static int gic_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned int irq, pol, trig, dual;
unsigned long flags;
- bool is_edge;
+
+ irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
spin_lock_irqsave(&gic_lock, flags);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
- change_gic_pol(irq, GIC_POL_FALLING_EDGE);
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = true;
+ pol = GIC_POL_FALLING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_EDGE_RISING:
- change_gic_pol(irq, GIC_POL_RISING_EDGE);
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = true;
+ pol = GIC_POL_RISING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_EDGE_BOTH:
- /* polarity is irrelevant in this case */
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_DUAL);
- is_edge = true;
+ pol = 0; /* Doesn't matter */
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_DUAL;
break;
case IRQ_TYPE_LEVEL_LOW:
- change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
- change_gic_trig(irq, GIC_TRIG_LEVEL);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = false;
+ pol = GIC_POL_ACTIVE_LOW;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_LEVEL_HIGH:
default:
- change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
- change_gic_trig(irq, GIC_TRIG_LEVEL);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = false;
+ pol = GIC_POL_ACTIVE_HIGH;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
break;
}
- if (is_edge)
+ change_gic_pol(irq, pol);
+ change_gic_trig(irq, trig);
+ change_gic_dual(irq, dual);
+
+ if (trig == GIC_TRIG_EDGE)
irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
handle_edge_irq, NULL);
else
@@ -338,13 +346,17 @@ static struct irq_chip gic_local_irq_controller = {
static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
- int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- int i;
+ struct gic_all_vpes_chip_data *cd;
unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = false;
spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_rmask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
@@ -352,22 +364,40 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
- int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- int i;
+ struct gic_all_vpes_chip_data *cd;
unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = true;
spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_smask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
}
+static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+{
+ struct gic_all_vpes_chip_data *cd;
+ unsigned int intr;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+
+ write_gic_vl_map(intr, cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+}
+
static struct irq_chip gic_all_vpes_local_irq_controller = {
- .name = "MIPS GIC Local",
- .irq_mask = gic_mask_local_irq_all_vpes,
- .irq_unmask = gic_unmask_local_irq_all_vpes,
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq_all_vpes,
+ .irq_unmask = gic_unmask_local_irq_all_vpes,
+ .irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@@ -382,39 +412,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
gic_handle_shared_int(true);
}
-static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- int intr = GIC_HWIRQ_TO_LOCAL(hw);
- int i;
- unsigned long flags;
- u32 val;
-
- if (!gic_local_irq_is_routable(intr))
- return -EPERM;
-
- if (intr > GIC_LOCAL_INT_FDC) {
- pr_err("Invalid local IRQ %d\n", intr);
- return -EINVAL;
- }
-
- if (intr == GIC_LOCAL_INT_TIMER) {
- /* CONFIG_MIPS_CMP workaround (see __gic_init) */
- val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
- } else {
- val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
- }
-
- spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
- write_gic_vo_map(intr, val);
- }
- spin_unlock_irqrestore(&gic_lock, flags);
-
- return 0;
-}
-
static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw, unsigned int cpu)
{
@@ -457,7 +454,11 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
- int err;
+ struct gic_all_vpes_chip_data *cd;
+ unsigned long flags;
+ unsigned int intr;
+ int err, cpu;
+ u32 map;
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
/* verify that shared irqs don't conflict with an IPI irq */
@@ -474,8 +475,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
return gic_shared_irq_domain_map(d, virq, hwirq, 0);
}
- switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
+ intr = GIC_HWIRQ_TO_LOCAL(hwirq);
+ map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+
+ switch (intr) {
case GIC_LOCAL_INT_TIMER:
+ /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+ map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
+ /* fall-through */
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
/*
@@ -483,9 +490,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
* the rest of the MIPS kernel code does not use the
* percpu IRQ API for them.
*/
+ cd = &gic_all_vpes_chip_data[intr];
+ cd->map = map;
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_all_vpes_local_irq_controller,
- NULL);
+ cd);
if (err)
return err;
@@ -504,7 +513,17 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
break;
}
- return gic_local_irq_domain_map(d, virq, hwirq);
+ if (!gic_local_irq_is_routable(intr))
+ return -EPERM;
+
+ spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_map(intr, map);
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
}
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -636,11 +655,25 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
.match = gic_ipi_domain_match,
};
+static int gic_cpu_startup(unsigned int cpu)
+{
+ /* Enable or disable EIC */
+ change_gic_vl_ctl(GIC_VX_CTL_EIC,
+ cpu_has_veic ? GIC_VX_CTL_EIC : 0);
+
+ /* Clear all local IRQ masks (ie. disable all local interrupts) */
+ write_gic_vl_rmask(~0);
+
+ /* Invoke irq_cpu_online callbacks to enable desired interrupts */
+ irq_cpu_online();
+
+ return 0;
+}
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
- unsigned int cpu_vec, i, j, gicconfig, cpu, v[2];
+ unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
@@ -655,7 +688,7 @@ static int __init gic_of_init(struct device_node *node,
cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
if (cpu_vec == hweight_long(ST0_IM)) {
- pr_err("No CPU vectors available for GIC\n");
+ pr_err("No CPU vectors available\n");
return -ENODEV;
}
@@ -668,8 +701,10 @@ static int __init gic_of_init(struct device_node *node,
gic_base = read_gcr_gic_base() &
~CM_GCR_GIC_BASE_GICEN;
gic_len = 0x20000;
+ pr_warn("Using inherited base address %pa\n",
+ &gic_base);
} else {
- pr_err("Failed to get GIC memory range\n");
+ pr_err("Failed to get memory range\n");
return -ENODEV;
}
} else {
@@ -690,17 +725,7 @@ static int __init gic_of_init(struct device_node *node,
gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
- gic_vpes = gicconfig & GIC_CONFIG_PVPS;
- gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
- gic_vpes = gic_vpes + 1;
-
if (cpu_has_veic) {
- /* Set EIC mode for all VPEs */
- for_each_present_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_ctl(GIC_VX_CTL_EIC);
- }
-
/* Always use vector 1 in EIC mode */
gic_cpu_pin = 0;
timer_cpu_pin = gic_cpu_pin;
@@ -737,7 +762,7 @@ static int __init gic_of_init(struct device_node *node,
gic_shared_intrs, 0,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain) {
- pr_err("Failed to add GIC IRQ domain");
+ pr_err("Failed to add IRQ domain");
return -ENXIO;
}
@@ -746,7 +771,7 @@ static int __init gic_of_init(struct device_node *node,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
node, &gic_ipi_domain_ops, NULL);
if (!gic_ipi_domain) {
- pr_err("Failed to add GIC IPI domain");
+ pr_err("Failed to add IPI domain");
return -ENXIO;
}
@@ -756,10 +781,12 @@ static int __init gic_of_init(struct device_node *node,
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
bitmap_set(ipi_resrv, v[0], v[1]);
} else {
- /* Make the last 2 * gic_vpes available for IPIs */
- bitmap_set(ipi_resrv,
- gic_shared_intrs - 2 * gic_vpes,
- 2 * gic_vpes);
+ /*
+ * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
+ * meeting the requirements of arch/mips SMP.
+ */
+ num_ipis = 2 * num_possible_cpus();
+ bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
}
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
@@ -773,15 +800,8 @@ static int __init gic_of_init(struct device_node *node,
write_gic_rmask(i);
}
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
- for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
- if (!gic_local_irq_is_routable(j))
- continue;
- write_gic_vo_rmask(BIT(j));
- }
- }
-
- return 0;
+ return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ "irqchip/mips/gic:starting",
+ gic_cpu_startup, NULL);
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index b04a8ac6e744..d360a6eddd6d 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -25,10 +25,6 @@
#include <linux/irqchip/irq-omap-intc.h>
-/* Define these here for now until we drop all board-files */
-#define OMAP24XX_IC_BASE 0x480fe000
-#define OMAP34XX_IC_BASE 0x48200000
-
/* selected INTC register offsets */
#define INTC_REVISION 0x0000
@@ -70,8 +66,8 @@ static struct omap_intc_regs intc_context;
static struct irq_domain *domain;
static void __iomem *omap_irq_base;
-static int omap_nr_pending = 3;
-static int omap_nr_irqs = 96;
+static int omap_nr_pending;
+static int omap_nr_irqs;
static void intc_writel(u32 reg, u32 val)
{
@@ -364,14 +360,6 @@ omap_intc_handle_irq(struct pt_regs *regs)
handle_domain_irq(domain, irqnr, regs);
}
-void __init omap3_init_irq(void)
-{
- omap_nr_irqs = 96;
- omap_nr_pending = 3;
- omap_init_irq(OMAP34XX_IC_BASE, NULL);
- set_handle_irq(omap_intc_handle_irq);
-}
-
static int __init intc_of_init(struct device_node *node,
struct device_node *parent)
{
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
new file mode 100644
index 000000000000..cf6d0c455518
--- /dev/null
+++ b/drivers/irqchip/irq-ompic.c
@@ -0,0 +1,202 @@
+/*
+ * Open Multi-Processor Interrupt Controller driver
+ *
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The ompic device handles IPI communication between cores in multi-core
+ * OpenRISC systems.
+ *
+ * Registers
+ *
+ * For each CPU the ompic has 2 registers. The control register for sending
+ * and acking IPIs and the status register for receiving IPIs. The register
+ * layouts are as follows:
+ *
+ * Control register
+ * +---------+---------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * ----------+---------+----------+----------
+ * | IRQ ACK | IRQ GEN | DST CORE | DATA |
+ * +---------+---------+----------+---------+
+ *
+ * Status register
+ * +----------+-------------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * -----------+-------------+----------+---------+
+ * | Reserved | IRQ Pending | SRC CORE | DATA |
+ * +----------+-------------+----------+---------+
+ *
+ * Architecture
+ *
+ * - The ompic generates a level interrupt to the CPU PIC when a message is
+ * ready. Messages are delivered via the memory bus.
+ * - The ompic does not have any interrupt input lines.
+ * - The ompic is wired to the same irq line on each core.
+ * - Devices are wired to the same irq line on each core.
+ *
+ * +---------+ +---------+
+ * | CPU | | CPU |
+ * | Core 0 |<==\ (memory access) /==>| Core 1 |
+ * | [ PIC ]| | | | [ PIC ]|
+ * +----^-^--+ | | +----^-^--+
+ * | | v v | |
+ * <====|=|=================================|=|==> (memory bus)
+ * | | ^ ^ | |
+ * (ipi | +------|---------+--------|-------|-+ (device irq)
+ * irq | | | | |
+ * core0)| +------|---------|--------|-------+ (ipi irq core1)
+ * | | | | |
+ * +----o-o-+ | +--------+ |
+ * | ompic |<===/ | Device |<===/
+ * | IPI | +--------+
+ * +--------+*
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <linux/irqchip.h>
+
+#define OMPIC_CPUBYTES 8
+#define OMPIC_CTRL(cpu) (0x0 + (cpu * OMPIC_CPUBYTES))
+#define OMPIC_STAT(cpu) (0x4 + (cpu * OMPIC_CPUBYTES))
+
+#define OMPIC_CTRL_IRQ_ACK (1 << 31)
+#define OMPIC_CTRL_IRQ_GEN (1 << 30)
+#define OMPIC_CTRL_DST(cpu) (((cpu) & 0x3fff) << 16)
+
+#define OMPIC_STAT_IRQ_PENDING (1 << 30)
+
+#define OMPIC_DATA(x) ((x) & 0xffff)
+
+DEFINE_PER_CPU(unsigned long, ops);
+
+static void __iomem *ompic_base;
+
+static inline u32 ompic_readreg(void __iomem *base, loff_t offset)
+{
+ return ioread32be(base + offset);
+}
+
+static void ompic_writereg(void __iomem *base, loff_t offset, u32 data)
+{
+ iowrite32be(data, base + offset);
+}
+
+static void ompic_raise_softirq(const struct cpumask *mask,
+ unsigned int ipi_msg)
+{
+ unsigned int dst_cpu;
+ unsigned int src_cpu = smp_processor_id();
+
+ for_each_cpu(dst_cpu, mask) {
+ set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
+
+ /*
+ * On OpenRISC the atomic set_bit() call implies a memory
+ * barrier. Otherwise we would need: smp_wmb(); paired
+ * with the read in ompic_ipi_handler.
+ */
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(src_cpu),
+ OMPIC_CTRL_IRQ_GEN |
+ OMPIC_CTRL_DST(dst_cpu) |
+ OMPIC_DATA(1));
+ }
+}
+
+static irqreturn_t ompic_ipi_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long *pending_ops = &per_cpu(ops, cpu);
+ unsigned long ops;
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
+ while ((ops = xchg(pending_ops, 0)) != 0) {
+
+ /*
+ * On OpenRISC the atomic xchg() call implies a memory
+ * barrier. Otherwise we may need an smp_rmb(); paired
+ * with the write in ompic_raise_softirq.
+ */
+
+ do {
+ unsigned long ipi_msg;
+
+ ipi_msg = __ffs(ops);
+ ops &= ~(1UL << ipi_msg);
+
+ handle_IPI(ipi_msg);
+ } while (ops);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init ompic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ int irq;
+ int ret;
+
+ /* Validate the DT */
+ if (ompic_base) {
+ pr_err("ompic: duplicate ompic's are not supported");
+ return -EEXIST;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("ompic: reg property requires an address and size");
+ return -EINVAL;
+ }
+
+ if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) {
+ pr_err("ompic: reg size, currently %d must be at least %d",
+ resource_size(&res),
+ (num_possible_cpus() * OMPIC_CPUBYTES));
+ return -EINVAL;
+ }
+
+ /* Setup the device */
+ ompic_base = ioremap(res.start, resource_size(&res));
+ if (IS_ERR(ompic_base)) {
+ pr_err("ompic: unable to map registers");
+ return PTR_ERR(ompic_base);
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("ompic: unable to parse device irq");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ ret = request_irq(irq, ompic_ipi_handler, IRQF_PERCPU,
+ "ompic_ipi", NULL);
+ if (ret)
+ goto out_irq_disp;
+
+ set_smp_cross_call(ompic_raise_softirq);
+
+ return 0;
+
+out_irq_disp:
+ irq_dispose_mapping(irq);
+out_unmap:
+ iounmap(ompic_base);
+ ompic_base = NULL;
+ return ret;
+}
+IRQCHIP_DECLARE(ompic, "openrisc,ompic", ompic_of_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 713177d97c7a..06f29cf5018a 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -389,9 +389,8 @@ MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
static int intc_irqpin_probe(struct platform_device *pdev)
{
- const struct intc_irqpin_config *config = NULL;
+ const struct intc_irqpin_config *config;
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
struct intc_irqpin_priv *p;
struct intc_irqpin_iomem *i;
struct resource *io[INTC_IRQPIN_REG_NR];
@@ -422,11 +421,9 @@ static int intc_irqpin_probe(struct platform_device *pdev)
p->pdev = pdev;
platform_set_drvdata(pdev, p);
- of_id = of_match_device(intc_irqpin_dt_ids, dev);
- if (of_id && of_id->data) {
- config = of_id->data;
+ config = of_device_get_match_data(dev);
+ if (config)
p->needs_clk = config->needs_clk;
- }
p->clk = devm_clk_get(dev, NULL);
if (IS_ERR(p->clk)) {
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
new file mode 100644
index 000000000000..1b6e2f7c59af
--- /dev/null
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for Socionext External Interrupt Unit (EXIU)
+ *
+ * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on irq-tegra.c:
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2010,2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define NUM_IRQS 32
+
+#define EIMASK 0x00
+#define EISRCSEL 0x04
+#define EIREQSTA 0x08
+#define EIRAWREQSTA 0x0C
+#define EIREQCLR 0x10
+#define EILVL 0x14
+#define EIEDG 0x18
+#define EISIR 0x1C
+
+struct exiu_irq_data {
+ void __iomem *base;
+ u32 spi_base;
+};
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+ irq_chip_eoi_parent(d);
+}
+
+static void exiu_irq_mask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_mask_parent(d);
+}
+
+static void exiu_irq_unmask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_unmask_parent(d);
+}
+
+static void exiu_irq_enable(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ /* clear interrupts that were latched while disabled */
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_enable_parent(d);
+}
+
+static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EILVL);
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ val |= BIT(d->hwirq);
+ else
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EILVL);
+
+ val = readl_relaxed(data->base + EIEDG);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ val &= ~BIT(d->hwirq);
+ else
+ val |= BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIEDG);
+
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip exiu_irq_chip = {
+ .name = "EXIU",
+ .irq_eoi = exiu_irq_eoi,
+ .irq_enable = exiu_irq_enable,
+ .irq_mask = exiu_irq_mask,
+ .irq_unmask = exiu_irq_unmask,
+ .irq_set_type = exiu_irq_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_EOI_THREADED |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int exiu_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct exiu_irq_data *info = domain->host_data;
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *hwirq = fwspec->param[1] - info->spi_base;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct exiu_irq_data *info = dom->host_data;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ WARN_ON(nr_irqs != 1);
+ hwirq = fwspec->param[1] - info->spi_base;
+ irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = dom->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
+}
+
+static const struct irq_domain_ops exiu_domain_ops = {
+ .translate = exiu_domain_translate,
+ .alloc = exiu_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init exiu_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct exiu_irq_data *data;
+ int err;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) {
+ pr_err("%pOF: failed to parse 'spi-base' property\n", node);
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ data->base = of_iomap(node, 0);
+ if (IS_ERR(data->base)) {
+ err = PTR_ERR(data->base);
+ goto out_free;
+ }
+
+ /* clear and mask all interrupts */
+ writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
+ writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
+ &exiu_domain_ops, data);
+ if (!domain) {
+ pr_err("%pOF: failed to allocate domain\n", node);
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
+ parent);
+
+ return 0;
+
+out_unmap:
+ iounmap(data->base);
+out_free:
+ kfree(data);
+ return err;
+}
+IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 45363ff8d06f..31ab0dee2ce7 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -14,27 +14,99 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#define EXTI_IMR 0x0
-#define EXTI_EMR 0x4
-#define EXTI_RTSR 0x8
-#define EXTI_FTSR 0xc
-#define EXTI_SWIER 0x10
-#define EXTI_PR 0x14
+#define IRQS_PER_BANK 32
+
+struct stm32_exti_bank {
+ u32 imr_ofst;
+ u32 emr_ofst;
+ u32 rtsr_ofst;
+ u32 ftsr_ofst;
+ u32 swier_ofst;
+ u32 pr_ofst;
+};
+
+static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
+ .imr_ofst = 0x00,
+ .emr_ofst = 0x04,
+ .rtsr_ofst = 0x08,
+ .ftsr_ofst = 0x0C,
+ .swier_ofst = 0x10,
+ .pr_ofst = 0x14,
+};
+
+static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
+ &stm32f4xx_exti_b1,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
+ .imr_ofst = 0x80,
+ .emr_ofst = 0x84,
+ .rtsr_ofst = 0x00,
+ .ftsr_ofst = 0x04,
+ .swier_ofst = 0x08,
+ .pr_ofst = 0x88,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
+ .imr_ofst = 0x90,
+ .emr_ofst = 0x94,
+ .rtsr_ofst = 0x20,
+ .ftsr_ofst = 0x24,
+ .swier_ofst = 0x28,
+ .pr_ofst = 0x98,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
+ .imr_ofst = 0xA0,
+ .emr_ofst = 0xA4,
+ .rtsr_ofst = 0x40,
+ .ftsr_ofst = 0x44,
+ .swier_ofst = 0x48,
+ .pr_ofst = 0xA8,
+};
+
+static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
+ &stm32h7xx_exti_b1,
+ &stm32h7xx_exti_b2,
+ &stm32h7xx_exti_b3,
+};
+
+static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
+{
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+
+ return irq_reg_readl(gc, stm32_bank->pr_ofst);
+}
+
+static void stm32_exti_irq_ack(struct irq_chip_generic *gc, u32 mask)
+{
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+
+ irq_reg_writel(gc, mask, stm32_bank->pr_ofst);
+}
static void stm32_irq_handler(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- struct irq_chip_generic *gc = domain->gc->gc[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int virq, nbanks = domain->gc->num_chips;
+ struct irq_chip_generic *gc;
+ const struct stm32_exti_bank *stm32_bank;
unsigned long pending;
- int n;
+ int n, i, irq_base = 0;
chained_irq_enter(chip, desc);
- while ((pending = irq_reg_readl(gc, EXTI_PR))) {
- for_each_set_bit(n, &pending, BITS_PER_LONG) {
- generic_handle_irq(irq_find_mapping(domain, n));
- irq_reg_writel(gc, BIT(n), EXTI_PR);
+ for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
+ gc = irq_get_domain_generic_chip(domain, irq_base);
+ stm32_bank = gc->private;
+
+ while ((pending = stm32_exti_pending(gc))) {
+ for_each_set_bit(n, &pending, IRQS_PER_BANK) {
+ virq = irq_find_mapping(domain, irq_base + n);
+ generic_handle_irq(virq);
+ stm32_exti_irq_ack(gc, BIT(n));
+ }
}
}
@@ -44,13 +116,14 @@ static void stm32_irq_handler(struct irq_desc *desc)
static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- int pin = data->hwirq;
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+ int pin = data->hwirq % IRQS_PER_BANK;
u32 rtsr, ftsr;
irq_gc_lock(gc);
- rtsr = irq_reg_readl(gc, EXTI_RTSR);
- ftsr = irq_reg_readl(gc, EXTI_FTSR);
+ rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
+ ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -70,8 +143,8 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
- irq_reg_writel(gc, rtsr, EXTI_RTSR);
- irq_reg_writel(gc, ftsr, EXTI_FTSR);
+ irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
+ irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
irq_gc_unlock(gc);
@@ -81,17 +154,18 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- int pin = data->hwirq;
- u32 emr;
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+ int pin = data->hwirq % IRQS_PER_BANK;
+ u32 imr;
irq_gc_lock(gc);
- emr = irq_reg_readl(gc, EXTI_EMR);
+ imr = irq_reg_readl(gc, stm32_bank->imr_ofst);
if (on)
- emr |= BIT(pin);
+ imr |= BIT(pin);
else
- emr &= ~BIT(pin);
- irq_reg_writel(gc, emr, EXTI_EMR);
+ imr &= ~BIT(pin);
+ irq_reg_writel(gc, imr, stm32_bank->imr_ofst);
irq_gc_unlock(gc);
@@ -101,11 +175,12 @@ static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *data)
{
- struct irq_chip_generic *gc = d->gc->gc[0];
+ struct irq_chip_generic *gc;
struct irq_fwspec *fwspec = data;
irq_hw_number_t hwirq;
hwirq = fwspec->param[0];
+ gc = irq_get_domain_generic_chip(d, hwirq);
irq_map_generic_chip(d, virq, hwirq);
irq_domain_set_info(d, virq, hwirq, &gc->chip_types->chip, gc,
@@ -129,8 +204,9 @@ struct irq_domain_ops irq_exti_domain_ops = {
.free = stm32_exti_free,
};
-static int __init stm32_exti_init(struct device_node *node,
- struct device_node *parent)
+static int
+__init stm32_exti_init(const struct stm32_exti_bank **stm32_exti_banks,
+ int bank_nr, struct device_node *node)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
int nr_irqs, nr_exti, ret, i;
@@ -144,23 +220,16 @@ static int __init stm32_exti_init(struct device_node *node,
return -ENOMEM;
}
- /* Determine number of irqs supported */
- writel_relaxed(~0UL, base + EXTI_RTSR);
- nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
- writel_relaxed(0, base + EXTI_RTSR);
-
- pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
-
- domain = irq_domain_add_linear(node, nr_exti,
+ domain = irq_domain_add_linear(node, bank_nr * IRQS_PER_BANK,
&irq_exti_domain_ops, NULL);
if (!domain) {
pr_err("%s: Could not register interrupt domain.\n",
- node->name);
+ node->name);
ret = -ENOMEM;
goto out_unmap;
}
- ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
+ ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
handle_edge_irq, clr, 0, 0);
if (ret) {
pr_err("%pOF: Could not allocate generic interrupt chip.\n",
@@ -168,18 +237,41 @@ static int __init stm32_exti_init(struct device_node *node,
goto out_free_domain;
}
- gc = domain->gc->gc[0];
- gc->reg_base = base;
- gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types->chip.name = gc->chip_types[0].chip.name;
- gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
- gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
- gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
- gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
- gc->chip_types->regs.ack = EXTI_PR;
- gc->chip_types->regs.mask = EXTI_IMR;
- gc->chip_types->handler = handle_edge_irq;
+ for (i = 0; i < bank_nr; i++) {
+ const struct stm32_exti_bank *stm32_bank = stm32_exti_banks[i];
+ u32 irqs_mask;
+
+ gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
+
+ gc->reg_base = base;
+ gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
+ gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
+ gc->chip_types->regs.ack = stm32_bank->pr_ofst;
+ gc->chip_types->regs.mask = stm32_bank->imr_ofst;
+ gc->private = (void *)stm32_bank;
+
+ /* Determine number of irqs supported */
+ writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
+ irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
+ nr_exti = fls(readl_relaxed(base + stm32_bank->rtsr_ofst));
+
+ /*
+ * This IP has no reset, so after hot reboot we should
+ * clear registers to avoid residue
+ */
+ writel_relaxed(0, base + stm32_bank->imr_ofst);
+ writel_relaxed(0, base + stm32_bank->emr_ofst);
+ writel_relaxed(0, base + stm32_bank->rtsr_ofst);
+ writel_relaxed(0, base + stm32_bank->ftsr_ofst);
+ writel_relaxed(~0UL, base + stm32_bank->pr_ofst);
+
+ pr_info("%s: bank%d, External IRQs available:%#x\n",
+ node->full_name, i, irqs_mask);
+ }
nr_irqs = of_irq_count(node);
for (i = 0; i < nr_irqs; i++) {
@@ -198,4 +290,20 @@ out_unmap:
return ret;
}
-IRQCHIP_DECLARE(stm32_exti, "st,stm32-exti", stm32_exti_init);
+static int __init stm32f4_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(stm32f4xx_exti_banks,
+ ARRAY_SIZE(stm32f4xx_exti_banks), np);
+}
+
+IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
+
+static int __init stm32h7_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(stm32h7xx_exti_banks,
+ ARRAY_SIZE(stm32h7xx_exti_banks), np);
+}
+
+IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index dce6632daae1..ae2b2669af1b 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -156,7 +156,7 @@ _set_debug(struct fritzcard *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct fritzcard *card;
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index d5bdbaf93a1a..1fc290659e94 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -244,7 +244,7 @@ _set_debug(struct inf_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct inf_hw *card;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 6a6d848bd18e..89d9ba8ed535 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -111,7 +111,7 @@ _set_debug(struct tiger_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct tiger_hw *card;
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 9815bb4eec9c..1f1446ed8d5f 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -94,7 +94,7 @@ _set_debug(struct sfax_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct sfax_hw *card;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 536d5137f49d..5acf6ab67cd3 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -101,7 +101,7 @@ _set_debug(struct w6692_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct w6692_hw *card;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 52ea34e337cd..318a28fd58fe 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -57,6 +57,16 @@ config LEDS_AAT1290
depends on PINCTRL
help
This option enables support for the LEDs on the AAT1290.
+config LEDS_APU
+ tristate "Front panel LED support for PC Engines APU/APU2 boards"
+ depends on LEDS_CLASS
+ depends on X86 && DMI
+ help
+ This driver makes the PC Engines APU/APU2 front panel LEDs
+ accessible from userspace programs through the LED subsystem.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-apu.
config LEDS_AS3645A
tristate "AS3645A LED flash controller support"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 35980450db9b..a2a6b5a4f86d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
+obj-$(CONFIG_LEDS_APU) += leds-apu.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index ef1360445413..fd83c7f77a95 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -45,9 +45,9 @@ static int __led_set_brightness_blocking(struct led_classdev *led_cdev,
return led_cdev->brightness_set_blocking(led_cdev, value);
}
-static void led_timer_function(unsigned long data)
+static void led_timer_function(struct timer_list *t)
{
- struct led_classdev *led_cdev = (void *)data;
+ struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer);
unsigned long brightness;
unsigned long delay;
@@ -178,8 +178,7 @@ void led_init_core(struct led_classdev *led_cdev)
{
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
- setup_timer(&led_cdev->blink_timer, led_timer_function,
- (unsigned long)led_cdev);
+ timer_setup(&led_cdev->blink_timer, led_timer_function, 0);
}
EXPORT_SYMBOL_GPL(led_init_core);
diff --git a/drivers/leds/leds-apu.c b/drivers/leds/leds-apu.c
new file mode 100644
index 000000000000..74820aab9497
--- /dev/null
+++ b/drivers/leds/leds-apu.c
@@ -0,0 +1,278 @@
+/*
+ * drivers/leds/leds-apu.c
+ * Copyright (C) 2017 Alan Mizrahi, alan at mizrahi dot com dot ve
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define APU1_FCH_ACPI_MMIO_BASE 0xFED80000
+#define APU1_FCH_GPIO_BASE (APU1_FCH_ACPI_MMIO_BASE + 0x01BD)
+#define APU1_LEDON 0x08
+#define APU1_LEDOFF 0xC8
+#define APU1_NUM_GPIO 3
+#define APU1_IOSIZE sizeof(u8)
+
+#define APU2_FCH_ACPI_MMIO_BASE 0xFED80000
+#define APU2_FCH_GPIO_BASE (APU2_FCH_ACPI_MMIO_BASE + 0x1500)
+#define APU2_GPIO_BIT_WRITE 22
+#define APU2_APU2_NUM_GPIO 4
+#define APU2_IOSIZE sizeof(u32)
+
+/* LED access parameters */
+struct apu_param {
+ void __iomem *addr; /* for ioread/iowrite */
+};
+
+/* LED private data */
+struct apu_led_priv {
+ struct led_classdev cdev;
+ struct apu_param param;
+};
+#define cdev_to_priv(c) container_of(c, struct apu_led_priv, cdev)
+
+/* LED profile */
+struct apu_led_profile {
+ const char *name;
+ enum led_brightness brightness;
+ unsigned long offset; /* for devm_ioremap */
+};
+
+/* Supported platform types */
+enum apu_led_platform_types {
+ APU1_LED_PLATFORM,
+ APU2_LED_PLATFORM,
+};
+
+struct apu_led_pdata {
+ struct platform_device *pdev;
+ struct apu_led_priv *pled;
+ const struct apu_led_profile *profile;
+ enum apu_led_platform_types platform;
+ int num_led_instances;
+ int iosize; /* for devm_ioremap() */
+ spinlock_t lock;
+};
+
+static struct apu_led_pdata *apu_led;
+
+static const struct apu_led_profile apu1_led_profile[] = {
+ { "apu:green:1", LED_ON, APU1_FCH_GPIO_BASE + 0 * APU1_IOSIZE },
+ { "apu:green:2", LED_OFF, APU1_FCH_GPIO_BASE + 1 * APU1_IOSIZE },
+ { "apu:green:3", LED_OFF, APU1_FCH_GPIO_BASE + 2 * APU1_IOSIZE },
+};
+
+static const struct apu_led_profile apu2_led_profile[] = {
+ { "apu2:green:1", LED_ON, APU2_FCH_GPIO_BASE + 68 * APU2_IOSIZE },
+ { "apu2:green:2", LED_OFF, APU2_FCH_GPIO_BASE + 69 * APU2_IOSIZE },
+ { "apu2:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
+};
+
+static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
+ {
+ .ident = "apu",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "APU")
+ }
+ },
+ {
+ .ident = "apu2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU2")
+ }
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, apu_led_dmi_table);
+
+static void apu1_led_brightness_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct apu_led_priv *pled = cdev_to_priv(led);
+
+ spin_lock(&apu_led->lock);
+ iowrite8(value ? APU1_LEDON : APU1_LEDOFF, pled->param.addr);
+ spin_unlock(&apu_led->lock);
+}
+
+static void apu2_led_brightness_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct apu_led_priv *pled = cdev_to_priv(led);
+ u32 value_new;
+
+ spin_lock(&apu_led->lock);
+
+ value_new = ioread32(pled->param.addr);
+
+ if (value)
+ value_new &= ~BIT(APU2_GPIO_BIT_WRITE);
+ else
+ value_new |= BIT(APU2_GPIO_BIT_WRITE);
+
+ iowrite32(value_new, pled->param.addr);
+
+ spin_unlock(&apu_led->lock);
+}
+
+static int apu_led_config(struct device *dev, struct apu_led_pdata *apuld)
+{
+ int i;
+ int err;
+
+ apu_led->pled = devm_kzalloc(dev,
+ sizeof(struct apu_led_priv) * apu_led->num_led_instances,
+ GFP_KERNEL);
+
+ if (!apu_led->pled)
+ return -ENOMEM;
+
+ for (i = 0; i < apu_led->num_led_instances; i++) {
+ struct apu_led_priv *pled = &apu_led->pled[i];
+ struct led_classdev *led_cdev = &pled->cdev;
+
+ led_cdev->name = apu_led->profile[i].name;
+ led_cdev->brightness = apu_led->profile[i].brightness;
+ led_cdev->max_brightness = 1;
+ led_cdev->flags = LED_CORE_SUSPENDRESUME;
+ if (apu_led->platform == APU1_LED_PLATFORM)
+ led_cdev->brightness_set = apu1_led_brightness_set;
+ else if (apu_led->platform == APU2_LED_PLATFORM)
+ led_cdev->brightness_set = apu2_led_brightness_set;
+
+ pled->param.addr = devm_ioremap(dev,
+ apu_led->profile[i].offset, apu_led->iosize);
+ if (!pled->param.addr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = led_classdev_register(dev, led_cdev);
+ if (err)
+ goto error;
+
+ led_cdev->brightness_set(led_cdev, apu_led->profile[i].brightness);
+ }
+
+ return 0;
+
+error:
+ while (i-- > 0)
+ led_classdev_unregister(&apu_led->pled[i].cdev);
+
+ return err;
+}
+
+static int __init apu_led_probe(struct platform_device *pdev)
+{
+ apu_led = devm_kzalloc(&pdev->dev, sizeof(*apu_led), GFP_KERNEL);
+
+ if (!apu_led)
+ return -ENOMEM;
+
+ apu_led->pdev = pdev;
+
+ if (dmi_match(DMI_BOARD_NAME, "APU")) {
+ apu_led->profile = apu1_led_profile;
+ apu_led->platform = APU1_LED_PLATFORM;
+ apu_led->num_led_instances = ARRAY_SIZE(apu1_led_profile);
+ apu_led->iosize = APU1_IOSIZE;
+ } else if (dmi_match(DMI_BOARD_NAME, "APU2")) {
+ apu_led->profile = apu2_led_profile;
+ apu_led->platform = APU2_LED_PLATFORM;
+ apu_led->num_led_instances = ARRAY_SIZE(apu2_led_profile);
+ apu_led->iosize = APU2_IOSIZE;
+ }
+
+ spin_lock_init(&apu_led->lock);
+ return apu_led_config(&pdev->dev, apu_led);
+}
+
+static struct platform_driver apu_led_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static int __init apu_led_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "PC Engines")) {
+ pr_err("No PC Engines board detected\n");
+ return -ENODEV;
+ }
+ if (!(dmi_match(DMI_PRODUCT_NAME, "APU") || dmi_match(DMI_PRODUCT_NAME, "APU2"))) {
+ pr_err("Unknown PC Engines board: %s\n",
+ dmi_get_system_info(DMI_PRODUCT_NAME));
+ return -ENODEV;
+ }
+
+ pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("Device allocation failed\n");
+ return PTR_ERR(pdev);
+ }
+
+ err = platform_driver_probe(&apu_led_driver, apu_led_probe);
+ if (err) {
+ pr_err("Probe platform driver failed\n");
+ platform_device_unregister(pdev);
+ }
+
+ return err;
+}
+
+static void __exit apu_led_exit(void)
+{
+ int i;
+
+ for (i = 0; i < apu_led->num_led_instances; i++)
+ led_classdev_unregister(&apu_led->pled[i].cdev);
+
+ platform_device_unregister(apu_led->pdev);
+ platform_driver_unregister(&apu_led_driver);
+}
+
+module_init(apu_led_init);
+module_exit(apu_led_exit);
+
+MODULE_AUTHOR("Alan Mizrahi");
+MODULE_DESCRIPTION("PC Engines APU family LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds_apu");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 924e50aefb00..52b6f529e278 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -323,7 +323,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
if (status != LP5523_ENG_STATUS_MASK) {
dev_err(&chip->cl->dev,
- "cound not configure LED engine, status = 0x%.2x\n",
+ "could not configure LED engine, status = 0x%.2x\n",
status);
ret = -1;
}
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 905729191d3e..78183f90820e 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -61,6 +61,10 @@
#define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */
#define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */
+#define PCA955X_GPIO_INPUT LED_OFF
+#define PCA955X_GPIO_HIGH LED_OFF
+#define PCA955X_GPIO_LOW LED_FULL
+
enum pca955x_type {
pca9550,
pca9551,
@@ -329,9 +333,9 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset,
struct pca955x_led *led = &pca955x->leds[offset];
if (val)
- return pca955x_led_set(&led->led_cdev, LED_FULL);
- else
- return pca955x_led_set(&led->led_cdev, LED_OFF);
+ return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_HIGH);
+
+ return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW);
}
static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
@@ -355,8 +359,11 @@ static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
static int pca955x_gpio_direction_input(struct gpio_chip *gc,
unsigned int offset)
{
- /* To use as input ensure pin is not driven */
- return pca955x_set_value(gc, offset, 0);
+ struct pca955x *pca955x = gpiochip_get_data(gc);
+ struct pca955x_led *led = &pca955x->leds[offset];
+
+ /* To use as input ensure pin is not driven. */
+ return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_INPUT);
}
static int pca955x_gpio_direction_output(struct gpio_chip *gc,
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 45222a7f4f75..c12c16fb1b9c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -715,7 +715,7 @@ tca6507_led_dt_init(struct i2c_client *client)
if (of_property_match_string(child, "compatible", "gpio") >= 0)
led.flags |= TCA6507_MAKE_GPIO;
ret = of_property_read_u32(child, "reg", &reg);
- if (ret != 0 || reg < 0 || reg >= NUM_LEDS)
+ if (ret != 0 || reg >= NUM_LEDS)
continue;
tca_leds[reg] = led;
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 3f9ddb9fafa7..bb090216b4dc 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -77,6 +77,15 @@ config LEDS_TRIGGER_CPU
If unsure, say N.
+config LEDS_TRIGGER_ACTIVITY
+ tristate "LED activity Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by a immediate CPU usage.
+ The flash frequency and duty cycle varies from faint flashes to
+ intense brightness depending on the instant CPU load.
+ If unsure, say N.
+
config LEDS_TRIGGER_GPIO
tristate "LED GPIO Trigger"
depends on LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 9f2e868811e2..4a8b6cff7761 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
+obj-$(CONFIG_LEDS_TRIGGER_ACTIVITY) += ledtrig-activity.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
new file mode 100644
index 000000000000..5081894082bd
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -0,0 +1,275 @@
+/*
+ * Activity LED trigger
+ *
+ * Copyright (C) 2017 Willy Tarreau <w@1wt.eu>
+ * Partially based on Atsushi Nemoto's ledtrig-heartbeat.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include "../leds.h"
+
+static int panic_detected;
+
+struct activity_data {
+ struct timer_list timer;
+ struct led_classdev *led_cdev;
+ u64 last_used;
+ u64 last_boot;
+ int time_left;
+ int state;
+ int invert;
+};
+
+static void led_activity_function(struct timer_list *t)
+{
+ struct activity_data *activity_data = from_timer(activity_data, t,
+ timer);
+ struct led_classdev *led_cdev = activity_data->led_cdev;
+ struct timespec boot_time;
+ unsigned int target;
+ unsigned int usage;
+ int delay;
+ u64 curr_used;
+ u64 curr_boot;
+ s32 diff_used;
+ s32 diff_boot;
+ int cpus;
+ int i;
+
+ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags))
+ led_cdev->blink_brightness = led_cdev->new_blink_brightness;
+
+ if (unlikely(panic_detected)) {
+ /* full brightness in case of panic */
+ led_set_brightness_nosleep(led_cdev, led_cdev->blink_brightness);
+ return;
+ }
+
+ get_monotonic_boottime(&boot_time);
+
+ cpus = 0;
+ curr_used = 0;
+
+ for_each_possible_cpu(i) {
+ curr_used += kcpustat_cpu(i).cpustat[CPUTIME_USER]
+ + kcpustat_cpu(i).cpustat[CPUTIME_NICE]
+ + kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]
+ + kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]
+ + kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ cpus++;
+ }
+
+ /* We come here every 100ms in the worst case, so that's 100M ns of
+ * cumulated time. By dividing by 2^16, we get the time resolution
+ * down to 16us, ensuring we won't overflow 32-bit computations below
+ * even up to 3k CPUs, while keeping divides cheap on smaller systems.
+ */
+ curr_boot = timespec_to_ns(&boot_time) * cpus;
+ diff_boot = (curr_boot - activity_data->last_boot) >> 16;
+ diff_used = (curr_used - activity_data->last_used) >> 16;
+ activity_data->last_boot = curr_boot;
+ activity_data->last_used = curr_used;
+
+ if (diff_boot <= 0 || diff_used < 0)
+ usage = 0;
+ else if (diff_used >= diff_boot)
+ usage = 100;
+ else
+ usage = 100 * diff_used / diff_boot;
+
+ /*
+ * Now we know the total boot_time multiplied by the number of CPUs, and
+ * the total idle+wait time for all CPUs. We'll compare how they evolved
+ * since last call. The % of overall CPU usage is :
+ *
+ * 1 - delta_idle / delta_boot
+ *
+ * What we want is that when the CPU usage is zero, the LED must blink
+ * slowly with very faint flashes that are detectable but not disturbing
+ * (typically 10ms every second, or 10ms ON, 990ms OFF). Then we want
+ * blinking frequency to increase up to the point where the load is
+ * enough to saturate one core in multi-core systems or 50% in single
+ * core systems. At this point it should reach 10 Hz with a 10/90 duty
+ * cycle (10ms ON, 90ms OFF). After this point, the blinking frequency
+ * remains stable (10 Hz) and only the duty cycle increases to report
+ * the activity, up to the point where we have 90ms ON, 10ms OFF when
+ * all cores are saturated. It's important that the LED never stays in
+ * a steady state so that it's easy to distinguish an idle or saturated
+ * machine from a hung one.
+ *
+ * This gives us :
+ * - a target CPU usage of min(50%, 100%/#CPU) for a 10% duty cycle
+ * (10ms ON, 90ms OFF)
+ * - below target :
+ * ON_ms = 10
+ * OFF_ms = 90 + (1 - usage/target) * 900
+ * - above target :
+ * ON_ms = 10 + (usage-target)/(100%-target) * 80
+ * OFF_ms = 90 - (usage-target)/(100%-target) * 80
+ *
+ * In order to keep a good responsiveness, we cap the sleep time to
+ * 100 ms and keep track of the sleep time left. This allows us to
+ * quickly change it if needed.
+ */
+
+ activity_data->time_left -= 100;
+ if (activity_data->time_left <= 0) {
+ activity_data->time_left = 0;
+ activity_data->state = !activity_data->state;
+ led_set_brightness_nosleep(led_cdev,
+ (activity_data->state ^ activity_data->invert) ?
+ led_cdev->blink_brightness : LED_OFF);
+ }
+
+ target = (cpus > 1) ? (100 / cpus) : 50;
+
+ if (usage < target)
+ delay = activity_data->state ?
+ 10 : /* ON */
+ 990 - 900 * usage / target; /* OFF */
+ else
+ delay = activity_data->state ?
+ 10 + 80 * (usage - target) / (100 - target) : /* ON */
+ 90 - 80 * (usage - target) / (100 - target); /* OFF */
+
+
+ if (!activity_data->time_left || delay <= activity_data->time_left)
+ activity_data->time_left = delay;
+
+ delay = min_t(int, activity_data->time_left, 100);
+ mod_timer(&activity_data->timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static ssize_t led_invert_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct activity_data *activity_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%u\n", activity_data->invert);
+}
+
+static ssize_t led_invert_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct activity_data *activity_data = led_cdev->trigger_data;
+ unsigned long state;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ activity_data->invert = !!state;
+
+ return size;
+}
+
+static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
+
+static void activity_activate(struct led_classdev *led_cdev)
+{
+ struct activity_data *activity_data;
+ int rc;
+
+ activity_data = kzalloc(sizeof(*activity_data), GFP_KERNEL);
+ if (!activity_data)
+ return;
+
+ led_cdev->trigger_data = activity_data;
+ rc = device_create_file(led_cdev->dev, &dev_attr_invert);
+ if (rc) {
+ kfree(led_cdev->trigger_data);
+ return;
+ }
+
+ activity_data->led_cdev = led_cdev;
+ timer_setup(&activity_data->timer, led_activity_function, 0);
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
+ led_activity_function(&activity_data->timer);
+ set_bit(LED_BLINK_SW, &led_cdev->work_flags);
+ led_cdev->activated = true;
+}
+
+static void activity_deactivate(struct led_classdev *led_cdev)
+{
+ struct activity_data *activity_data = led_cdev->trigger_data;
+
+ if (led_cdev->activated) {
+ del_timer_sync(&activity_data->timer);
+ device_remove_file(led_cdev->dev, &dev_attr_invert);
+ kfree(activity_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
+ led_cdev->activated = false;
+ }
+}
+
+static struct led_trigger activity_led_trigger = {
+ .name = "activity",
+ .activate = activity_activate,
+ .deactivate = activity_deactivate,
+};
+
+static int activity_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ led_trigger_unregister(&activity_led_trigger);
+ return NOTIFY_DONE;
+}
+
+static int activity_panic_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ panic_detected = 1;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block activity_reboot_nb = {
+ .notifier_call = activity_reboot_notifier,
+};
+
+static struct notifier_block activity_panic_nb = {
+ .notifier_call = activity_panic_notifier,
+};
+
+static int __init activity_init(void)
+{
+ int rc = led_trigger_register(&activity_led_trigger);
+
+ if (!rc) {
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &activity_panic_nb);
+ register_reboot_notifier(&activity_reboot_nb);
+ }
+ return rc;
+}
+
+static void __exit activity_exit(void)
+{
+ unregister_reboot_notifier(&activity_reboot_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &activity_panic_nb);
+ led_trigger_unregister(&activity_led_trigger);
+}
+
+module_init(activity_init);
+module_exit(activity_exit);
+
+MODULE_AUTHOR("Willy Tarreau <w@1wt.eu>");
+MODULE_DESCRIPTION("Activity LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index e95ea65380c8..f0896de410b8 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -25,19 +25,23 @@
static int panic_heartbeats;
struct heartbeat_trig_data {
+ struct led_classdev *led_cdev;
unsigned int phase;
unsigned int period;
struct timer_list timer;
unsigned int invert;
};
-static void led_heartbeat_function(unsigned long data)
+static void led_heartbeat_function(struct timer_list *t)
{
- struct led_classdev *led_cdev = (struct led_classdev *) data;
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ from_timer(heartbeat_data, t, timer);
+ struct led_classdev *led_cdev;
unsigned long brightness = LED_OFF;
unsigned long delay = 0;
+ led_cdev = heartbeat_data->led_cdev;
+
if (unlikely(panic_heartbeats)) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
return;
@@ -127,18 +131,18 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
return;
led_cdev->trigger_data = heartbeat_data;
+ heartbeat_data->led_cdev = led_cdev;
rc = device_create_file(led_cdev->dev, &dev_attr_invert);
if (rc) {
kfree(led_cdev->trigger_data);
return;
}
- setup_timer(&heartbeat_data->timer,
- led_heartbeat_function, (unsigned long) led_cdev);
+ timer_setup(&heartbeat_data->timer, led_heartbeat_function, 0);
heartbeat_data->phase = 0;
if (!led_cdev->blink_brightness)
led_cdev->blink_brightness = led_cdev->max_brightness;
- led_heartbeat_function(heartbeat_data->timer.data);
+ led_heartbeat_function(&heartbeat_data->timer);
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
led_cdev->activated = true;
}
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 7e6011bd3646..7acce64b692a 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -33,12 +33,14 @@ struct transient_trig_data {
int restore_state;
unsigned long duration;
struct timer_list timer;
+ struct led_classdev *led_cdev;
};
-static void transient_timer_function(unsigned long data)
+static void transient_timer_function(struct timer_list *t)
{
- struct led_classdev *led_cdev = (struct led_classdev *) data;
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ from_timer(transient_data, t, timer);
+ struct led_classdev *led_cdev = transient_data->led_cdev;
transient_data->activate = 0;
led_set_brightness_nosleep(led_cdev, transient_data->restore_state);
@@ -169,6 +171,7 @@ static void transient_trig_activate(struct led_classdev *led_cdev)
return;
}
led_cdev->trigger_data = tdata;
+ tdata->led_cdev = led_cdev;
rc = device_create_file(led_cdev->dev, &dev_attr_activate);
if (rc)
@@ -182,8 +185,7 @@ static void transient_trig_activate(struct led_classdev *led_cdev)
if (rc)
goto err_out_state;
- setup_timer(&tdata->timer, transient_timer_function,
- (unsigned long) led_cdev);
+ timer_setup(&tdata->timer, transient_timer_function, 0);
led_cdev->activated = true;
return;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index ead61a93cb4e..2a953efec4e1 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,7 +4,8 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK && HAS_DMA
+ depends on BLOCK && HAS_DMA && PCI
+ select BLK_DEV_NVME
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index ddae430b6eae..83249b43dd06 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/sem.h>
#include <linux/bitmap.h>
+#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
@@ -138,7 +139,6 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
int prev_nr_luns;
int i, j;
- nr_chnls = nr_luns / dev->geo.luns_per_chnl;
nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
@@ -226,6 +226,24 @@ static const struct block_device_operations nvm_fops = {
.owner = THIS_MODULE,
};
+static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
+{
+ struct nvm_tgt_type *tmp, *tt = NULL;
+
+ if (lock)
+ down_write(&nvm_tgtt_lock);
+
+ list_for_each_entry(tmp, &nvm_tgt_types, list)
+ if (!strcmp(name, tmp->name)) {
+ tt = tmp;
+ break;
+ }
+
+ if (lock)
+ up_write(&nvm_tgtt_lock);
+ return tt;
+}
+
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
struct nvm_ioctl_create_simple *s = &create->conf.s;
@@ -316,6 +334,8 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
list_add_tail(&t->list, &dev->targets);
mutex_unlock(&dev->mlock);
+ __module_get(tt->owner);
+
return 0;
err_sysfs:
if (tt->exit)
@@ -351,6 +371,7 @@ static void __nvm_remove_target(struct nvm_target *t)
nvm_remove_tgt_dev(t->dev, 1);
put_disk(tdisk);
+ module_put(t->type->owner);
list_del(&t->list);
kfree(t);
@@ -532,25 +553,6 @@ void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
}
EXPORT_SYMBOL(nvm_part_to_tgt);
-struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
-{
- struct nvm_tgt_type *tmp, *tt = NULL;
-
- if (lock)
- down_write(&nvm_tgtt_lock);
-
- list_for_each_entry(tmp, &nvm_tgt_types, list)
- if (!strcmp(name, tmp->name)) {
- tt = tmp;
- break;
- }
-
- if (lock)
- up_write(&nvm_tgtt_lock);
- return tt;
-}
-EXPORT_SYMBOL(nvm_find_target_type);
-
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
@@ -571,9 +573,9 @@ void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
if (!tt)
return;
- down_write(&nvm_lock);
+ down_write(&nvm_tgtt_lock);
list_del(&tt->list);
- up_write(&nvm_lock);
+ up_write(&nvm_tgtt_lock);
}
EXPORT_SYMBOL(nvm_unregister_tgt_type);
@@ -602,6 +604,52 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
+static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
+ const struct ppa_addr *ppas, int nr_ppas)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_geo *geo = &tgt_dev->geo;
+ int i, plane_cnt, pl_idx;
+ struct ppa_addr ppa;
+
+ if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
+ rqd->nr_ppas = nr_ppas;
+ rqd->ppa_addr = ppas[0];
+
+ return 0;
+ }
+
+ rqd->nr_ppas = nr_ppas;
+ rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
+ if (!rqd->ppa_list) {
+ pr_err("nvm: failed to allocate dma memory\n");
+ return -ENOMEM;
+ }
+
+ plane_cnt = geo->plane_mode;
+ rqd->nr_ppas *= plane_cnt;
+
+ for (i = 0; i < nr_ppas; i++) {
+ for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ ppa = ppas[i];
+ ppa.g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
+ }
+ }
+
+ return 0;
+}
+
+static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
+ struct nvm_rq *rqd)
+{
+ if (!rqd->ppa_list)
+ return;
+
+ nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
+}
+
+
int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
int nr_ppas, int type)
{
@@ -616,7 +664,7 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
memset(&rqd, 0, sizeof(struct nvm_rq));
- nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
+ nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
nvm_rq_tgt_to_dev(tgt_dev, &rqd);
ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
@@ -658,12 +706,25 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io);
-static void nvm_end_io_sync(struct nvm_rq *rqd)
+int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- struct completion *waiting = rqd->private;
+ struct nvm_dev *dev = tgt_dev->parent;
+ int ret;
- complete(waiting);
+ if (!dev->ops->submit_io_sync)
+ return -ENODEV;
+
+ nvm_rq_tgt_to_dev(tgt_dev, rqd);
+
+ rqd->dev = tgt_dev;
+
+ /* In case of error, fail with right address format */
+ ret = dev->ops->submit_io_sync(dev, rqd);
+ nvm_rq_dev_to_tgt(tgt_dev, rqd);
+
+ return ret;
}
+EXPORT_SYMBOL(nvm_submit_io_sync);
int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
int nr_ppas)
@@ -671,25 +732,21 @@ int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
struct nvm_geo *geo = &tgt_dev->geo;
struct nvm_rq rqd;
int ret;
- DECLARE_COMPLETION_ONSTACK(wait);
memset(&rqd, 0, sizeof(struct nvm_rq));
rqd.opcode = NVM_OP_ERASE;
- rqd.end_io = nvm_end_io_sync;
- rqd.private = &wait;
rqd.flags = geo->plane_mode >> 1;
- ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
+ ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
if (ret)
return ret;
- ret = nvm_submit_io(tgt_dev, &rqd);
+ ret = nvm_submit_io_sync(tgt_dev, &rqd);
if (ret) {
pr_err("rrpr: erase I/O submission failed: %d\n", ret);
goto free_ppa_list;
}
- wait_for_completion_io(&wait);
free_ppa_list:
nvm_free_rqd_ppalist(tgt_dev, &rqd);
@@ -775,57 +832,6 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
}
EXPORT_SYMBOL(nvm_put_area);
-int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
- const struct ppa_addr *ppas, int nr_ppas, int vblk)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_geo *geo = &tgt_dev->geo;
- int i, plane_cnt, pl_idx;
- struct ppa_addr ppa;
-
- if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
- rqd->nr_ppas = nr_ppas;
- rqd->ppa_addr = ppas[0];
-
- return 0;
- }
-
- rqd->nr_ppas = nr_ppas;
- rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("nvm: failed to allocate dma memory\n");
- return -ENOMEM;
- }
-
- if (!vblk) {
- for (i = 0; i < nr_ppas; i++)
- rqd->ppa_list[i] = ppas[i];
- } else {
- plane_cnt = geo->plane_mode;
- rqd->nr_ppas *= plane_cnt;
-
- for (i = 0; i < nr_ppas; i++) {
- for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
- ppa = ppas[i];
- ppa.g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
- }
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_set_rqd_ppalist);
-
-void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
-{
- if (!rqd->ppa_list)
- return;
-
- nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-}
-EXPORT_SYMBOL(nvm_free_rqd_ppalist);
-
void nvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_dev *tgt_dev = rqd->dev;
@@ -1177,7 +1183,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
info->version[1] = NVM_VERSION_MINOR;
info->version[2] = NVM_VERSION_PATCH;
- down_write(&nvm_lock);
+ down_write(&nvm_tgtt_lock);
list_for_each_entry(tt, &nvm_tgt_types, list) {
struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
@@ -1190,7 +1196,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
}
info->tgtsize = tgt_iter;
- up_write(&nvm_lock);
+ up_write(&nvm_tgtt_lock);
if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
kfree(info);
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 024a8fc93069..0d227ef7d1b9 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -43,8 +43,10 @@ retry:
if (unlikely(!bio_has_data(bio)))
goto out;
- w_ctx.flags = flags;
pblk_ppa_set_empty(&w_ctx.ppa);
+ w_ctx.flags = flags;
+ if (bio->bi_opf & REQ_PREFLUSH)
+ w_ctx.flags |= PBLK_FLUSH_ENTRY;
for (i = 0; i < nr_entries; i++) {
void *data = bio_data(bio);
@@ -73,12 +75,11 @@ out:
* On GC the incoming lbas are not necessarily sequential. Also, some of the
* lbas might not be valid entries, which are marked as empty by the GC thread
*/
-int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
- unsigned int nr_entries, unsigned int nr_rec_entries,
- struct pblk_line *gc_line, unsigned long flags)
+int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
{
struct pblk_w_ctx w_ctx;
unsigned int bpos, pos;
+ void *data = gc_rq->data;
int i, valid_entries;
/* Update the write buffer head (mem) with the entries that we can
@@ -86,28 +87,29 @@ int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
* rollback from here on.
*/
retry:
- if (!pblk_rb_may_write_gc(&pblk->rwb, nr_rec_entries, &bpos)) {
+ if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) {
io_schedule();
goto retry;
}
- w_ctx.flags = flags;
+ w_ctx.flags = PBLK_IOTYPE_GC;
pblk_ppa_set_empty(&w_ctx.ppa);
- for (i = 0, valid_entries = 0; i < nr_entries; i++) {
- if (lba_list[i] == ADDR_EMPTY)
+ for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) {
+ if (gc_rq->lba_list[i] == ADDR_EMPTY)
continue;
- w_ctx.lba = lba_list[i];
+ w_ctx.lba = gc_rq->lba_list[i];
pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries);
- pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_line, pos);
+ pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line,
+ gc_rq->paddr_list[i], pos);
data += PBLK_EXPOSED_PAGE_SIZE;
valid_entries++;
}
- WARN_ONCE(nr_rec_entries != valid_entries,
+ WARN_ONCE(gc_rq->secs_to_gc != valid_entries,
"pblk: inconsistent GC write\n");
#ifdef CONFIG_NVM_DEBUG
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 81501644fb15..ce90213a42fa 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -18,6 +18,31 @@
#include "pblk.h"
+static void pblk_line_mark_bb(struct work_struct *work)
+{
+ struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
+ ws);
+ struct pblk *pblk = line_ws->pblk;
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct ppa_addr *ppa = line_ws->priv;
+ int ret;
+
+ ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
+ if (ret) {
+ struct pblk_line *line;
+ int pos;
+
+ line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
+ pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
+
+ pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
+ line->id, pos);
+ }
+
+ kfree(ppa);
+ mempool_free(line_ws, pblk->gen_ws_pool);
+}
+
static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
struct ppa_addr *ppa)
{
@@ -33,7 +58,8 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
line->id, pos);
- pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
+ pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
+ GFP_ATOMIC, pblk->bb_wq);
}
static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
@@ -63,7 +89,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
struct pblk *pblk = rqd->private;
__pblk_end_io_erase(pblk, rqd);
- mempool_free(rqd, pblk->g_rq_pool);
+ mempool_free(rqd, pblk->e_rq_pool);
}
void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
@@ -77,11 +103,7 @@ void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
* that newer updates are not overwritten.
*/
spin_lock(&line->lock);
- if (line->state == PBLK_LINESTATE_GC ||
- line->state == PBLK_LINESTATE_FREE) {
- spin_unlock(&line->lock);
- return;
- }
+ WARN_ON(line->state == PBLK_LINESTATE_FREE);
if (test_and_set_bit(paddr, line->invalid_bitmap)) {
WARN_ONCE(1, "pblk: double invalidate\n");
@@ -98,8 +120,7 @@ void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
spin_lock(&l_mg->gc_lock);
spin_lock(&line->lock);
/* Prevent moving a line that has just been chosen for GC */
- if (line->state == PBLK_LINESTATE_GC ||
- line->state == PBLK_LINESTATE_FREE) {
+ if (line->state == PBLK_LINESTATE_GC) {
spin_unlock(&line->lock);
spin_unlock(&l_mg->gc_lock);
return;
@@ -150,17 +171,25 @@ static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
spin_unlock(&pblk->trans_lock);
}
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
+/* Caller must guarantee that the request is a valid type */
+struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
{
mempool_t *pool;
struct nvm_rq *rqd;
int rq_size;
- if (rw == WRITE) {
+ switch (type) {
+ case PBLK_WRITE:
+ case PBLK_WRITE_INT:
pool = pblk->w_rq_pool;
rq_size = pblk_w_rq_size;
- } else {
- pool = pblk->g_rq_pool;
+ break;
+ case PBLK_READ:
+ pool = pblk->r_rq_pool;
+ rq_size = pblk_g_rq_size;
+ break;
+ default:
+ pool = pblk->e_rq_pool;
rq_size = pblk_g_rq_size;
}
@@ -170,15 +199,30 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
return rqd;
}
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
+/* Typically used on completion path. Cannot guarantee request consistency */
+void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
{
+ struct nvm_tgt_dev *dev = pblk->dev;
mempool_t *pool;
- if (rw == WRITE)
+ switch (type) {
+ case PBLK_WRITE:
+ kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
+ case PBLK_WRITE_INT:
pool = pblk->w_rq_pool;
- else
- pool = pblk->g_rq_pool;
+ break;
+ case PBLK_READ:
+ pool = pblk->r_rq_pool;
+ break;
+ case PBLK_ERASE:
+ pool = pblk->e_rq_pool;
+ break;
+ default:
+ pr_err("pblk: trying to free unknown rqd type\n");
+ return;
+ }
+ nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
mempool_free(rqd, pool);
}
@@ -190,10 +234,9 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
WARN_ON(off + nr_pages != bio->bi_vcnt);
- bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
for (i = off; i < nr_pages + off; i++) {
bv = bio->bi_io_vec[i];
- mempool_free(bv.bv_page, pblk->page_pool);
+ mempool_free(bv.bv_page, pblk->page_bio_pool);
}
}
@@ -205,14 +248,12 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int i, ret;
for (i = 0; i < nr_pages; i++) {
- page = mempool_alloc(pblk->page_pool, flags);
- if (!page)
- goto err;
+ page = mempool_alloc(pblk->page_bio_pool, flags);
ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
if (ret != PBLK_EXPOSED_PAGE_SIZE) {
pr_err("pblk: could not add page to bio\n");
- mempool_free(page, pblk->page_pool);
+ mempool_free(page, pblk->page_bio_pool);
goto err;
}
}
@@ -245,13 +286,6 @@ void pblk_write_should_kick(struct pblk *pblk)
pblk_write_kick(pblk);
}
-void pblk_end_bio_sync(struct bio *bio)
-{
- struct completion *waiting = bio->bi_private;
-
- complete(waiting);
-}
-
void pblk_end_io_sync(struct nvm_rq *rqd)
{
struct completion *waiting = rqd->private;
@@ -259,7 +293,7 @@ void pblk_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}
-void pblk_wait_for_meta(struct pblk *pblk)
+static void pblk_wait_for_meta(struct pblk *pblk)
{
do {
if (!atomic_read(&pblk->inflight_io))
@@ -336,17 +370,6 @@ void pblk_discard(struct pblk *pblk, struct bio *bio)
pblk_invalidate_range(pblk, slba, nr_secs);
}
-struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
-{
- struct ppa_addr ppa;
-
- spin_lock(&pblk->trans_lock);
- ppa = pblk_trans_map_get(pblk, lba);
- spin_unlock(&pblk->trans_lock);
-
- return ppa;
-}
-
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
{
atomic_long_inc(&pblk->write_failed);
@@ -389,39 +412,38 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
struct nvm_tgt_dev *dev = pblk->dev;
#ifdef CONFIG_NVM_DEBUG
- struct ppa_addr *ppa_list;
+ int ret;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
- if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
- WARN_ON(1);
- return -EINVAL;
- }
+ ret = pblk_check_io(pblk, rqd);
+ if (ret)
+ return ret;
+#endif
- if (rqd->opcode == NVM_OP_PWRITE) {
- struct pblk_line *line;
- struct ppa_addr ppa;
- int i;
+ atomic_inc(&pblk->inflight_io);
- for (i = 0; i < rqd->nr_ppas; i++) {
- ppa = ppa_list[i];
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ return nvm_submit_io(dev, rqd);
+}
- spin_lock(&line->lock);
- if (line->state != PBLK_LINESTATE_OPEN) {
- pr_err("pblk: bad ppa: line:%d,state:%d\n",
- line->id, line->state);
- WARN_ON(1);
- spin_unlock(&line->lock);
- return -EINVAL;
- }
- spin_unlock(&line->lock);
- }
- }
+int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+#ifdef CONFIG_NVM_DEBUG
+ int ret;
+
+ ret = pblk_check_io(pblk, rqd);
+ if (ret)
+ return ret;
#endif
atomic_inc(&pblk->inflight_io);
- return nvm_submit_io(dev, rqd);
+ return nvm_submit_io_sync(dev, rqd);
+}
+
+static void pblk_bio_map_addr_endio(struct bio *bio)
+{
+ bio_put(bio);
}
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
@@ -460,6 +482,8 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
kaddr += PAGE_SIZE;
}
+
+ bio->bi_end_io = pblk_bio_map_addr_endio;
out:
return bio;
}
@@ -486,12 +510,14 @@ void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
u64 addr;
int i;
+ spin_lock(&line->lock);
addr = find_next_zero_bit(line->map_bitmap,
pblk->lm.sec_per_line, line->cur_sec);
line->cur_sec = addr - nr_secs;
for (i = 0; i < nr_secs; i++, line->cur_sec--)
WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
+ spin_unlock(&line->lock);
}
u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
@@ -565,12 +591,11 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
int cmd_op, bio_op;
int i, j;
int ret;
- DECLARE_COMPLETION_ONSTACK(wait);
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
bio_op = REQ_OP_WRITE;
cmd_op = NVM_OP_PWRITE;
- } else if (dir == READ) {
+ } else if (dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
} else
@@ -607,13 +632,11 @@ next_rq:
rqd.dma_ppa_list = dma_ppa_list;
rqd.opcode = cmd_op;
rqd.nr_ppas = rq_ppas;
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
struct pblk_sec_meta *meta_list = rqd.meta_list;
- rqd.flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
for (i = 0; i < rqd.nr_ppas; ) {
spin_lock(&line->lock);
paddr = __pblk_alloc_page(pblk, line, min);
@@ -662,25 +685,17 @@ next_rq:
}
}
- ret = pblk_submit_io(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
pr_err("pblk: emeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_rqd_dma;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: emeta I/O timed out\n");
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
-
- if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
- bio_put(bio);
if (rqd.error) {
- if (dir == WRITE)
+ if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
else
pblk_log_read_err(pblk, &rqd);
@@ -721,14 +736,13 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
int i, ret;
int cmd_op, bio_op;
int flags;
- DECLARE_COMPLETION_ONSTACK(wait);
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
bio_op = REQ_OP_WRITE;
cmd_op = NVM_OP_PWRITE;
- flags = pblk_set_progr_mode(pblk, WRITE);
+ flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == READ) {
+ } else if (dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -758,15 +772,13 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
rqd.opcode = cmd_op;
rqd.flags = flags;
rqd.nr_ppas = lm->smeta_sec;
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta_list = rqd.meta_list;
rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
meta_list[i].lba = lba_list[paddr] = addr_empty;
@@ -778,21 +790,17 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
* the write thread is the only one sending write and erase commands,
* there is no need to take the LUN semaphore.
*/
- ret = pblk_submit_io(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
pr_err("pblk: smeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_ppa_list;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: smeta I/O timed out\n");
- }
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
- if (dir == WRITE)
+ if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
else
pblk_log_read_err(pblk, &rqd);
@@ -808,14 +816,14 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
{
u64 bpaddr = pblk_line_smeta_start(pblk, line);
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
+ return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
}
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
void *emeta_buf)
{
return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
- line->emeta_ssec, READ);
+ line->emeta_ssec, PBLK_READ);
}
static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -824,7 +832,7 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd->opcode = NVM_OP_ERASE;
rqd->ppa_addr = ppa;
rqd->nr_ppas = 1;
- rqd->flags = pblk_set_progr_mode(pblk, ERASE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
rqd->bio = NULL;
}
@@ -832,19 +840,15 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_rq rqd;
int ret = 0;
- DECLARE_COMPLETION_ONSTACK(wait);
memset(&rqd, 0, sizeof(struct nvm_rq));
pblk_setup_e_rq(pblk, &rqd, ppa);
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
-
/* The write thread schedules erases so that it minimizes disturbances
* with writes. Thus, there is no need to take the LUN semaphore.
*/
- ret = pblk_submit_io(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -857,11 +861,6 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
goto out;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: sync erase timed out\n");
- }
-
out:
rqd.private = pblk;
__pblk_end_io_erase(pblk, &rqd);
@@ -976,7 +975,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
smeta_buf->header.id = cpu_to_le32(line->id);
smeta_buf->header.type = cpu_to_le16(line->type);
- smeta_buf->header.version = cpu_to_le16(1);
+ smeta_buf->header.version = SMETA_VERSION;
/* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
@@ -1046,7 +1045,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
- if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
+ if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
pr_debug("pblk: line smeta I/O failed. Retry\n");
return 1;
}
@@ -1056,7 +1055,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
/* Mark emeta metadata sectors as bad sectors. We need to consider bad
* blocks to make sure that there are enough sectors to store emeta
*/
- bit = lm->sec_per_line;
off = lm->sec_per_line - lm->emeta_sec[0];
bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
while (nr_bb) {
@@ -1093,25 +1091,21 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
struct pblk_line_meta *lm = &pblk->lm;
int blk_in_line = atomic_read(&line->blk_in_line);
- line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
+ line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
if (!line->map_bitmap)
return -ENOMEM;
- memset(line->map_bitmap, 0, lm->sec_bitmap_len);
- /* invalid_bitmap is special since it is used when line is closed. No
- * need to zeroized; it will be initialized using bb info form
- * map_bitmap
- */
- line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
+ /* will be initialized using bb info from map_bitmap */
+ line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
if (!line->invalid_bitmap) {
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
return -ENOMEM;
}
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_FREE) {
- mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
+ kfree(line->invalid_bitmap);
spin_unlock(&line->lock);
WARN(1, "pblk: corrupted line %d, state %d\n",
line->id, line->state);
@@ -1163,7 +1157,7 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
{
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
@@ -1328,6 +1322,41 @@ static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
pblk->state = PBLK_STATE_STOPPING;
}
+static void pblk_line_close_meta_sync(struct pblk *pblk)
+{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct pblk_line *line, *tline;
+ LIST_HEAD(list);
+
+ spin_lock(&l_mg->close_lock);
+ if (list_empty(&l_mg->emeta_list)) {
+ spin_unlock(&l_mg->close_lock);
+ return;
+ }
+
+ list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
+ spin_unlock(&l_mg->close_lock);
+
+ list_for_each_entry_safe(line, tline, &list, list) {
+ struct pblk_emeta *emeta = line->emeta;
+
+ while (emeta->mem < lm->emeta_len[0]) {
+ int ret;
+
+ ret = pblk_submit_meta_io(pblk, line);
+ if (ret) {
+ pr_err("pblk: sync meta line %d failed (%d)\n",
+ line->id, ret);
+ return;
+ }
+ }
+ }
+
+ pblk_wait_for_meta(pblk);
+ flush_workqueue(pblk->close_wq);
+}
+
void pblk_pipeline_stop(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1361,17 +1390,17 @@ void pblk_pipeline_stop(struct pblk *pblk)
spin_unlock(&l_mg->free_lock);
}
-void pblk_line_replace_data(struct pblk *pblk)
+struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- struct pblk_line *cur, *new;
+ struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
int is_next = 0;
cur = l_mg->data_line;
new = l_mg->data_next;
if (!new)
- return;
+ goto out;
l_mg->data_line = new;
spin_lock(&l_mg->free_lock);
@@ -1379,7 +1408,7 @@ void pblk_line_replace_data(struct pblk *pblk)
l_mg->data_line = NULL;
l_mg->data_next = NULL;
spin_unlock(&l_mg->free_lock);
- return;
+ goto out;
}
pblk_line_setup_metadata(new, l_mg, &pblk->lm);
@@ -1391,7 +1420,7 @@ retry_erase:
/* If line is not fully erased, erase it */
if (atomic_read(&new->left_eblks)) {
if (pblk_line_erase(pblk, new))
- return;
+ goto out;
} else {
io_schedule();
}
@@ -1402,7 +1431,7 @@ retry_setup:
if (!pblk_line_init_metadata(pblk, new, cur)) {
new = pblk_line_retry(pblk, new);
if (!new)
- return;
+ goto out;
goto retry_setup;
}
@@ -1410,7 +1439,7 @@ retry_setup:
if (!pblk_line_init_bb(pblk, new, 1)) {
new = pblk_line_retry(pblk, new);
if (!new)
- return;
+ goto out;
goto retry_setup;
}
@@ -1434,14 +1463,15 @@ retry_setup:
if (is_next)
pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
+
+out:
+ return new;
}
void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
{
- if (line->map_bitmap)
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
- if (line->invalid_bitmap)
- mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
+ kfree(line->invalid_bitmap);
*line->vsc = cpu_to_le32(EMPTY_ENTRY);
@@ -1451,11 +1481,10 @@ void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
line->emeta = NULL;
}
-void pblk_line_put(struct kref *ref)
+static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
{
- struct pblk_line *line = container_of(ref, struct pblk_line, ref);
- struct pblk *pblk = line->pblk;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_gc *gc = &pblk->gc;
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
@@ -1464,6 +1493,8 @@ void pblk_line_put(struct kref *ref)
pblk_line_free(pblk, line);
spin_unlock(&line->lock);
+ atomic_dec(&gc->pipeline_gc);
+
spin_lock(&l_mg->free_lock);
list_add_tail(&line->list, &l_mg->free_list);
l_mg->nr_free_lines++;
@@ -1472,13 +1503,49 @@ void pblk_line_put(struct kref *ref)
pblk_rl_free_lines_inc(&pblk->rl, line);
}
+static void pblk_line_put_ws(struct work_struct *work)
+{
+ struct pblk_line_ws *line_put_ws = container_of(work,
+ struct pblk_line_ws, ws);
+ struct pblk *pblk = line_put_ws->pblk;
+ struct pblk_line *line = line_put_ws->line;
+
+ __pblk_line_put(pblk, line);
+ mempool_free(line_put_ws, pblk->gen_ws_pool);
+}
+
+void pblk_line_put(struct kref *ref)
+{
+ struct pblk_line *line = container_of(ref, struct pblk_line, ref);
+ struct pblk *pblk = line->pblk;
+
+ __pblk_line_put(pblk, line);
+}
+
+void pblk_line_put_wq(struct kref *ref)
+{
+ struct pblk_line *line = container_of(ref, struct pblk_line, ref);
+ struct pblk *pblk = line->pblk;
+ struct pblk_line_ws *line_put_ws;
+
+ line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
+ if (!line_put_ws)
+ return;
+
+ line_put_ws->pblk = pblk;
+ line_put_ws->line = line;
+ line_put_ws->priv = NULL;
+
+ INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
+ queue_work(pblk->r_end_wq, &line_put_ws->ws);
+}
+
int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_rq *rqd;
int err;
- rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
- memset(rqd, 0, pblk_g_rq_size);
+ rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
pblk_setup_e_rq(pblk, rqd, ppa);
@@ -1517,41 +1584,6 @@ int pblk_line_is_full(struct pblk_line *line)
return (line->left_msecs == 0);
}
-void pblk_line_close_meta_sync(struct pblk *pblk)
-{
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- struct pblk_line_meta *lm = &pblk->lm;
- struct pblk_line *line, *tline;
- LIST_HEAD(list);
-
- spin_lock(&l_mg->close_lock);
- if (list_empty(&l_mg->emeta_list)) {
- spin_unlock(&l_mg->close_lock);
- return;
- }
-
- list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
- spin_unlock(&l_mg->close_lock);
-
- list_for_each_entry_safe(line, tline, &list, list) {
- struct pblk_emeta *emeta = line->emeta;
-
- while (emeta->mem < lm->emeta_len[0]) {
- int ret;
-
- ret = pblk_submit_meta_io(pblk, line);
- if (ret) {
- pr_err("pblk: sync meta line %d failed (%d)\n",
- line->id, ret);
- return;
- }
- }
- }
-
- pblk_wait_for_meta(pblk);
- flush_workqueue(pblk->close_wq);
-}
-
static void pblk_line_should_sync_meta(struct pblk *pblk)
{
if (pblk_rl_is_limit(&pblk->rl))
@@ -1582,15 +1614,13 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
list_add_tail(&line->list, move_list);
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
spin_unlock(&line->lock);
spin_unlock(&l_mg->gc_lock);
-
- pblk_gc_should_kick(pblk);
}
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
@@ -1624,43 +1654,16 @@ void pblk_line_close_ws(struct work_struct *work)
struct pblk_line *line = line_ws->line;
pblk_line_close(pblk, line);
- mempool_free(line_ws, pblk->line_ws_pool);
-}
-
-void pblk_line_mark_bb(struct work_struct *work)
-{
- struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
- ws);
- struct pblk *pblk = line_ws->pblk;
- struct nvm_tgt_dev *dev = pblk->dev;
- struct ppa_addr *ppa = line_ws->priv;
- int ret;
-
- ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
- if (ret) {
- struct pblk_line *line;
- int pos;
-
- line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
- pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
-
- pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
- line->id, pos);
- }
-
- kfree(ppa);
- mempool_free(line_ws, pblk->line_ws_pool);
+ mempool_free(line_ws, pblk->gen_ws_pool);
}
-void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
- void (*work)(struct work_struct *),
+void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
+ void (*work)(struct work_struct *), gfp_t gfp_mask,
struct workqueue_struct *wq)
{
struct pblk_line_ws *line_ws;
- line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
- if (!line_ws)
- return;
+ line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
line_ws->pblk = pblk;
line_ws->line = line;
@@ -1689,16 +1692,8 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
#endif
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
- if (ret) {
- switch (ret) {
- case -ETIME:
- pr_err("pblk: lun semaphore timed out\n");
- break;
- case -EINTR:
- pr_err("pblk: lun semaphore timed out\n");
- break;
- }
- }
+ if (ret == -ETIME || ret == -EINTR)
+ pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
}
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
@@ -1758,13 +1753,11 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
rlun = &pblk->luns[bit];
up(&rlun->wr_sem);
}
-
- kfree(lun_bitmap);
}
void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
{
- struct ppa_addr l2p_ppa;
+ struct ppa_addr ppa_l2p;
/* logic error: lba out-of-bounds. Ignore update */
if (!(lba < pblk->rl.nr_secs)) {
@@ -1773,10 +1766,10 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
}
spin_lock(&pblk->trans_lock);
- l2p_ppa = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
- if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
- pblk_map_invalidate(pblk, l2p_ppa);
+ if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
+ pblk_map_invalidate(pblk, ppa_l2p);
pblk_trans_map_set(pblk, lba, ppa);
spin_unlock(&pblk->trans_lock);
@@ -1784,6 +1777,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
{
+
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(!pblk_addr_in_cache(ppa));
@@ -1793,16 +1787,16 @@ void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
pblk_update_map(pblk, lba, ppa);
}
-int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
- struct pblk_line *gc_line)
+int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
+ struct pblk_line *gc_line, u64 paddr_gc)
{
- struct ppa_addr l2p_ppa;
+ struct ppa_addr ppa_l2p, ppa_gc;
int ret = 1;
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
- BUG_ON(!pblk_addr_in_cache(ppa));
- BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
+ BUG_ON(!pblk_addr_in_cache(ppa_new));
+ BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
#endif
/* logic error: lba out-of-bounds. Ignore update */
@@ -1812,36 +1806,41 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
}
spin_lock(&pblk->trans_lock);
- l2p_ppa = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
+ ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
+
+ if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
+ spin_lock(&gc_line->lock);
+ WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
+ "pblk: corrupted GC update");
+ spin_unlock(&gc_line->lock);
- /* Prevent updated entries to be overwritten by GC */
- if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
- pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
ret = 0;
goto out;
}
- pblk_trans_map_set(pblk, lba, ppa);
+ pblk_trans_map_set(pblk, lba, ppa_new);
out:
spin_unlock(&pblk->trans_lock);
return ret;
}
-void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
- struct ppa_addr entry_line)
+void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
+ struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
{
- struct ppa_addr l2p_line;
+ struct ppa_addr ppa_l2p;
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a device address */
- BUG_ON(pblk_addr_in_cache(ppa));
+ BUG_ON(pblk_addr_in_cache(ppa_mapped));
#endif
/* Invalidate and discard padded entries */
if (lba == ADDR_EMPTY) {
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->padded_wb);
#endif
- pblk_map_invalidate(pblk, ppa);
+ if (!pblk_ppa_empty(ppa_mapped))
+ pblk_map_invalidate(pblk, ppa_mapped);
return;
}
@@ -1852,22 +1851,22 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
}
spin_lock(&pblk->trans_lock);
- l2p_line = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
/* Do not update L2P if the cacheline has been updated. In this case,
* the mapped ppa must be invalidated
*/
- if (l2p_line.ppa != entry_line.ppa) {
- if (!pblk_ppa_empty(ppa))
- pblk_map_invalidate(pblk, ppa);
+ if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
+ if (!pblk_ppa_empty(ppa_mapped))
+ pblk_map_invalidate(pblk, ppa_mapped);
goto out;
}
#ifdef CONFIG_NVM_DEBUG
- WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
+ WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
#endif
- pblk_trans_map_set(pblk, lba, ppa);
+ pblk_trans_map_set(pblk, lba, ppa_mapped);
out:
spin_unlock(&pblk->trans_lock);
}
@@ -1878,23 +1877,32 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
int i;
spin_lock(&pblk->trans_lock);
- for (i = 0; i < nr_secs; i++)
- ppas[i] = pblk_trans_map_get(pblk, blba + i);
+ for (i = 0; i < nr_secs; i++) {
+ struct ppa_addr ppa;
+
+ ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
+
+ /* If the L2P entry maps to a line, the reference is valid */
+ if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
+ int line_id = pblk_dev_ppa_to_line(ppa);
+ struct pblk_line *line = &pblk->lines[line_id];
+
+ kref_get(&line->ref);
+ }
+ }
spin_unlock(&pblk->trans_lock);
}
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs)
{
- sector_t lba;
+ u64 lba;
int i;
spin_lock(&pblk->trans_lock);
for (i = 0; i < nr_secs; i++) {
lba = lba_list[i];
- if (lba == ADDR_EMPTY) {
- ppas[i].ppa = ADDR_EMPTY;
- } else {
+ if (lba != ADDR_EMPTY) {
/* logic error: lba out-of-bounds. Ignore update */
if (!(lba < pblk->rl.nr_secs)) {
WARN(1, "pblk: corrupted L2P map request\n");
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 6090d28f7995..00d5698d64a9 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -20,7 +20,8 @@
static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
{
- vfree(gc_rq->data);
+ if (gc_rq->data)
+ vfree(gc_rq->data);
kfree(gc_rq);
}
@@ -41,10 +42,7 @@ static int pblk_gc_write(struct pblk *pblk)
spin_unlock(&gc->w_lock);
list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
- pblk_write_gc_to_cache(pblk, gc_rq->data, gc_rq->lba_list,
- gc_rq->nr_secs, gc_rq->secs_to_gc,
- gc_rq->line, PBLK_IOTYPE_GC);
-
+ pblk_write_gc_to_cache(pblk, gc_rq);
list_del(&gc_rq->list);
kref_put(&gc_rq->line->ref, pblk_line_put);
pblk_gc_free_gc_rq(gc_rq);
@@ -58,42 +56,59 @@ static void pblk_gc_writer_kick(struct pblk_gc *gc)
wake_up_process(gc->gc_writer_ts);
}
-/*
- * Responsible for managing all memory related to a gc request. Also in case of
- * failure
- */
-static int pblk_gc_move_valid_secs(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
+static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
+{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct list_head *move_list;
+
+ spin_lock(&line->lock);
+ WARN_ON(line->state != PBLK_LINESTATE_GC);
+ line->state = PBLK_LINESTATE_CLOSED;
+ move_list = pblk_line_gc_list(pblk, line);
+ spin_unlock(&line->lock);
+
+ if (move_list) {
+ spin_lock(&l_mg->gc_lock);
+ list_add_tail(&line->list, move_list);
+ spin_unlock(&l_mg->gc_lock);
+ }
+}
+
+static void pblk_gc_line_ws(struct work_struct *work)
{
+ struct pblk_line_ws *gc_rq_ws = container_of(work,
+ struct pblk_line_ws, ws);
+ struct pblk *pblk = gc_rq_ws->pblk;
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
- struct pblk_line *line = gc_rq->line;
- void *data;
- unsigned int secs_to_gc;
- int ret = 0;
+ struct pblk_line *line = gc_rq_ws->line;
+ struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
+ int ret;
- data = vmalloc(gc_rq->nr_secs * geo->sec_size);
- if (!data) {
- ret = -ENOMEM;
+ up(&gc->gc_sem);
+
+ gc_rq->data = vmalloc(gc_rq->nr_secs * geo->sec_size);
+ if (!gc_rq->data) {
+ pr_err("pblk: could not GC line:%d (%d/%d)\n",
+ line->id, *line->vsc, gc_rq->nr_secs);
goto out;
}
/* Read from GC victim block */
- if (pblk_submit_read_gc(pblk, gc_rq->lba_list, data, gc_rq->nr_secs,
- &secs_to_gc, line)) {
- ret = -EFAULT;
- goto free_data;
+ ret = pblk_submit_read_gc(pblk, gc_rq);
+ if (ret) {
+ pr_err("pblk: failed GC read in line:%d (err:%d)\n",
+ line->id, ret);
+ goto out;
}
- if (!secs_to_gc)
- goto free_rq;
-
- gc_rq->data = data;
- gc_rq->secs_to_gc = secs_to_gc;
+ if (!gc_rq->secs_to_gc)
+ goto out;
retry:
spin_lock(&gc->w_lock);
- if (gc->w_entries >= PBLK_GC_W_QD) {
+ if (gc->w_entries >= PBLK_GC_RQ_QD) {
spin_unlock(&gc->w_lock);
pblk_gc_writer_kick(&pblk->gc);
usleep_range(128, 256);
@@ -105,53 +120,13 @@ retry:
pblk_gc_writer_kick(&pblk->gc);
- return 0;
+ kfree(gc_rq_ws);
+ return;
-free_rq:
- kfree(gc_rq);
-free_data:
- vfree(data);
out:
+ pblk_gc_free_gc_rq(gc_rq);
kref_put(&line->ref, pblk_line_put);
- return ret;
-}
-
-static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
-{
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- struct list_head *move_list;
-
- spin_lock(&line->lock);
- WARN_ON(line->state != PBLK_LINESTATE_GC);
- line->state = PBLK_LINESTATE_CLOSED;
- move_list = pblk_line_gc_list(pblk, line);
- spin_unlock(&line->lock);
-
- if (move_list) {
- spin_lock(&l_mg->gc_lock);
- list_add_tail(&line->list, move_list);
- spin_unlock(&l_mg->gc_lock);
- }
-}
-
-static void pblk_gc_line_ws(struct work_struct *work)
-{
- struct pblk_line_ws *line_rq_ws = container_of(work,
- struct pblk_line_ws, ws);
- struct pblk *pblk = line_rq_ws->pblk;
- struct pblk_gc *gc = &pblk->gc;
- struct pblk_line *line = line_rq_ws->line;
- struct pblk_gc_rq *gc_rq = line_rq_ws->priv;
-
- up(&gc->gc_sem);
-
- if (pblk_gc_move_valid_secs(pblk, gc_rq)) {
- pr_err("pblk: could not GC all sectors: line:%d (%d/%d)\n",
- line->id, *line->vsc,
- gc_rq->nr_secs);
- }
-
- mempool_free(line_rq_ws, pblk->line_ws_pool);
+ kfree(gc_rq_ws);
}
static void pblk_gc_line_prepare_ws(struct work_struct *work)
@@ -164,17 +139,24 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_gc *gc = &pblk->gc;
struct line_emeta *emeta_buf;
- struct pblk_line_ws *line_rq_ws;
+ struct pblk_line_ws *gc_rq_ws;
struct pblk_gc_rq *gc_rq;
__le64 *lba_list;
+ unsigned long *invalid_bitmap;
int sec_left, nr_secs, bit;
int ret;
+ invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ if (!invalid_bitmap) {
+ pr_err("pblk: could not allocate GC invalid bitmap\n");
+ goto fail_free_ws;
+ }
+
emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type,
GFP_KERNEL);
if (!emeta_buf) {
pr_err("pblk: cannot use GC emeta\n");
- return;
+ goto fail_free_bitmap;
}
ret = pblk_line_read_emeta(pblk, line, emeta_buf);
@@ -193,7 +175,11 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
goto fail_free_emeta;
}
+ spin_lock(&line->lock);
+ bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
sec_left = pblk_line_vsc(line);
+ spin_unlock(&line->lock);
+
if (sec_left < 0) {
pr_err("pblk: corrupted GC line (%d)\n", line->id);
goto fail_free_emeta;
@@ -207,11 +193,12 @@ next_rq:
nr_secs = 0;
do {
- bit = find_next_zero_bit(line->invalid_bitmap, lm->sec_per_line,
+ bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
bit + 1);
if (bit > line->emeta_ssec)
break;
+ gc_rq->paddr_list[nr_secs] = bit;
gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
} while (nr_secs < pblk->max_write_pgs);
@@ -223,19 +210,25 @@ next_rq:
gc_rq->nr_secs = nr_secs;
gc_rq->line = line;
- line_rq_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL);
- if (!line_rq_ws)
+ gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
+ if (!gc_rq_ws)
goto fail_free_gc_rq;
- line_rq_ws->pblk = pblk;
- line_rq_ws->line = line;
- line_rq_ws->priv = gc_rq;
+ gc_rq_ws->pblk = pblk;
+ gc_rq_ws->line = line;
+ gc_rq_ws->priv = gc_rq;
+
+ /* The write GC path can be much slower than the read GC one due to
+ * the budget imposed by the rate-limiter. Balance in case that we get
+ * back pressure from the write GC path.
+ */
+ while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
+ io_schedule();
- down(&gc->gc_sem);
kref_get(&line->ref);
- INIT_WORK(&line_rq_ws->ws, pblk_gc_line_ws);
- queue_work(gc->gc_line_reader_wq, &line_rq_ws->ws);
+ INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
+ queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
sec_left -= nr_secs;
if (sec_left > 0)
@@ -243,10 +236,11 @@ next_rq:
out:
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
- mempool_free(line_ws, pblk->line_ws_pool);
+ kfree(line_ws);
+ kfree(invalid_bitmap);
kref_put(&line->ref, pblk_line_put);
- atomic_dec(&gc->inflight_gc);
+ atomic_dec(&gc->read_inflight_gc);
return;
@@ -254,10 +248,14 @@ fail_free_gc_rq:
kfree(gc_rq);
fail_free_emeta:
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+fail_free_bitmap:
+ kfree(invalid_bitmap);
+fail_free_ws:
+ kfree(line_ws);
+
pblk_put_line_back(pblk, line);
kref_put(&line->ref, pblk_line_put);
- mempool_free(line_ws, pblk->line_ws_pool);
- atomic_dec(&gc->inflight_gc);
+ atomic_dec(&gc->read_inflight_gc);
pr_err("pblk: Failed to GC line %d\n", line->id);
}
@@ -269,19 +267,40 @@ static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
- line_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL);
+ line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
if (!line_ws)
return -ENOMEM;
line_ws->pblk = pblk;
line_ws->line = line;
+ atomic_inc(&gc->pipeline_gc);
INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
queue_work(gc->gc_reader_wq, &line_ws->ws);
return 0;
}
+static void pblk_gc_reader_kick(struct pblk_gc *gc)
+{
+ wake_up_process(gc->gc_reader_ts);
+}
+
+static void pblk_gc_kick(struct pblk *pblk)
+{
+ struct pblk_gc *gc = &pblk->gc;
+
+ pblk_gc_writer_kick(gc);
+ pblk_gc_reader_kick(gc);
+
+ /* If we're shutting down GC, let's not start it up again */
+ if (gc->gc_enabled) {
+ wake_up_process(gc->gc_ts);
+ mod_timer(&gc->gc_timer,
+ jiffies + msecs_to_jiffies(GC_TIME_MSECS));
+ }
+}
+
static int pblk_gc_read(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
@@ -305,11 +324,6 @@ static int pblk_gc_read(struct pblk *pblk)
return 0;
}
-static void pblk_gc_reader_kick(struct pblk_gc *gc)
-{
- wake_up_process(gc->gc_reader_ts);
-}
-
static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
struct list_head *group_list)
{
@@ -338,26 +352,17 @@ static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
return ((gc->gc_active) && (nr_blocks_need > nr_blocks_free));
}
-/*
- * Lines with no valid sectors will be returned to the free list immediately. If
- * GC is activated - either because the free block count is under the determined
- * threshold, or because it is being forced from user space - only lines with a
- * high count of invalid sectors will be recycled.
- */
-static void pblk_gc_run(struct pblk *pblk)
+void pblk_gc_free_full_lines(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line *line;
- struct list_head *group_list;
- bool run_gc;
- int inflight_gc, gc_group = 0, prev_group = 0;
do {
spin_lock(&l_mg->gc_lock);
if (list_empty(&l_mg->gc_full_list)) {
spin_unlock(&l_mg->gc_lock);
- break;
+ return;
}
line = list_first_entry(&l_mg->gc_full_list,
@@ -371,11 +376,30 @@ static void pblk_gc_run(struct pblk *pblk)
list_del(&line->list);
spin_unlock(&l_mg->gc_lock);
+ atomic_inc(&gc->pipeline_gc);
kref_put(&line->ref, pblk_line_put);
} while (1);
+}
+
+/*
+ * Lines with no valid sectors will be returned to the free list immediately. If
+ * GC is activated - either because the free block count is under the determined
+ * threshold, or because it is being forced from user space - only lines with a
+ * high count of invalid sectors will be recycled.
+ */
+static void pblk_gc_run(struct pblk *pblk)
+{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_gc *gc = &pblk->gc;
+ struct pblk_line *line;
+ struct list_head *group_list;
+ bool run_gc;
+ int read_inflight_gc, gc_group = 0, prev_group = 0;
+
+ pblk_gc_free_full_lines(pblk);
run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
- if (!run_gc || (atomic_read(&gc->inflight_gc) >= PBLK_GC_L_QD))
+ if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
return;
next_gc_group:
@@ -402,14 +426,14 @@ next_gc_group:
list_add_tail(&line->list, &gc->r_list);
spin_unlock(&gc->r_lock);
- inflight_gc = atomic_inc_return(&gc->inflight_gc);
+ read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
pblk_gc_reader_kick(gc);
prev_group = 1;
/* No need to queue up more GC lines than we can handle */
run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
- if (!run_gc || inflight_gc >= PBLK_GC_L_QD)
+ if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
break;
} while (1);
@@ -418,16 +442,6 @@ next_gc_group:
goto next_gc_group;
}
-void pblk_gc_kick(struct pblk *pblk)
-{
- struct pblk_gc *gc = &pblk->gc;
-
- wake_up_process(gc->gc_ts);
- pblk_gc_writer_kick(gc);
- pblk_gc_reader_kick(gc);
- mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
-}
-
static void pblk_gc_timer(unsigned long data)
{
struct pblk *pblk = (struct pblk *)data;
@@ -465,6 +479,7 @@ static int pblk_gc_writer_ts(void *data)
static int pblk_gc_reader_ts(void *data)
{
struct pblk *pblk = data;
+ struct pblk_gc *gc = &pblk->gc;
while (!kthread_should_stop()) {
if (!pblk_gc_read(pblk))
@@ -473,6 +488,18 @@ static int pblk_gc_reader_ts(void *data)
io_schedule();
}
+#ifdef CONFIG_NVM_DEBUG
+ pr_info("pblk: flushing gc pipeline, %d lines left\n",
+ atomic_read(&gc->pipeline_gc));
+#endif
+
+ do {
+ if (!atomic_read(&gc->pipeline_gc))
+ break;
+
+ schedule();
+ } while (1);
+
return 0;
}
@@ -486,10 +513,10 @@ void pblk_gc_should_start(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
- if (gc->gc_enabled && !gc->gc_active)
+ if (gc->gc_enabled && !gc->gc_active) {
pblk_gc_start(pblk);
-
- pblk_gc_kick(pblk);
+ pblk_gc_kick(pblk);
+ }
}
/*
@@ -510,6 +537,11 @@ void pblk_gc_should_stop(struct pblk *pblk)
pblk_gc_stop(pblk, 0);
}
+void pblk_gc_should_kick(struct pblk *pblk)
+{
+ pblk_rl_update_rates(&pblk->rl);
+}
+
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
int *gc_active)
{
@@ -576,7 +608,8 @@ int pblk_gc_init(struct pblk *pblk)
gc->gc_forced = 0;
gc->gc_enabled = 1;
gc->w_entries = 0;
- atomic_set(&gc->inflight_gc, 0);
+ atomic_set(&gc->read_inflight_gc, 0);
+ atomic_set(&gc->pipeline_gc, 0);
/* Workqueue that reads valid sectors from a line and submit them to the
* GC writer to be recycled.
@@ -602,7 +635,7 @@ int pblk_gc_init(struct pblk *pblk)
spin_lock_init(&gc->w_lock);
spin_lock_init(&gc->r_lock);
- sema_init(&gc->gc_sem, 128);
+ sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
INIT_LIST_HEAD(&gc->w_list);
INIT_LIST_HEAD(&gc->r_list);
@@ -625,24 +658,24 @@ void pblk_gc_exit(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
- flush_workqueue(gc->gc_reader_wq);
- flush_workqueue(gc->gc_line_reader_wq);
-
- del_timer(&gc->gc_timer);
+ gc->gc_enabled = 0;
+ del_timer_sync(&gc->gc_timer);
pblk_gc_stop(pblk, 1);
if (gc->gc_ts)
kthread_stop(gc->gc_ts);
+ if (gc->gc_reader_ts)
+ kthread_stop(gc->gc_reader_ts);
+
+ flush_workqueue(gc->gc_reader_wq);
if (gc->gc_reader_wq)
destroy_workqueue(gc->gc_reader_wq);
+ flush_workqueue(gc->gc_line_reader_wq);
if (gc->gc_line_reader_wq)
destroy_workqueue(gc->gc_line_reader_wq);
if (gc->gc_writer_ts)
kthread_stop(gc->gc_writer_ts);
-
- if (gc->gc_reader_ts)
- kthread_stop(gc->gc_reader_ts);
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 1b0f61233c21..f62112ba5482 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -20,8 +20,8 @@
#include "pblk.h"
-static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
- *pblk_w_rq_cache, *pblk_line_meta_cache;
+static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
+ *pblk_w_rq_cache;
static DECLARE_RWSEM(pblk_lock);
struct bio_set *pblk_bio_set;
@@ -46,7 +46,7 @@ static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
* user I/Os. Unless stalled, the rate limiter leaves at least 256KB
* available for user I/O.
*/
- if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
+ if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
blk_queue_split(q, &bio);
return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
@@ -76,6 +76,28 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static size_t pblk_trans_map_size(struct pblk *pblk)
+{
+ int entry_size = 8;
+
+ if (pblk->ppaf_bitsize < 32)
+ entry_size = 4;
+
+ return entry_size * pblk->rl.nr_secs;
+}
+
+#ifdef CONFIG_NVM_DEBUG
+static u32 pblk_l2p_crc(struct pblk *pblk)
+{
+ size_t map_size;
+ u32 crc = ~(u32)0;
+
+ map_size = pblk_trans_map_size(pblk);
+ crc = crc32_le(crc, pblk->trans_map, map_size);
+ return crc;
+}
+#endif
+
static void pblk_l2p_free(struct pblk *pblk)
{
vfree(pblk->trans_map);
@@ -85,12 +107,10 @@ static int pblk_l2p_init(struct pblk *pblk)
{
sector_t i;
struct ppa_addr ppa;
- int entry_size = 8;
+ size_t map_size;
- if (pblk->ppaf_bitsize < 32)
- entry_size = 4;
-
- pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
+ map_size = pblk_trans_map_size(pblk);
+ pblk->trans_map = vmalloc(map_size);
if (!pblk->trans_map)
return -ENOMEM;
@@ -132,7 +152,6 @@ static int pblk_rwb_init(struct pblk *pblk)
}
/* Minimum pages needed within a lun */
-#define PAGE_POOL_SIZE 16
#define ADDR_POOL_SIZE 64
static int pblk_set_ppaf(struct pblk *pblk)
@@ -182,12 +201,10 @@ static int pblk_set_ppaf(struct pblk *pblk)
static int pblk_init_global_caches(struct pblk *pblk)
{
- char cache_name[PBLK_CACHE_NAME_LEN];
-
down_write(&pblk_lock);
- pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
+ pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
sizeof(struct pblk_line_ws), 0, 0, NULL);
- if (!pblk_blk_ws_cache) {
+ if (!pblk_ws_cache) {
up_write(&pblk_lock);
return -ENOMEM;
}
@@ -195,7 +212,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
pblk_rec_cache = kmem_cache_create("pblk_rec",
sizeof(struct pblk_rec_ctx), 0, 0, NULL);
if (!pblk_rec_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
+ kmem_cache_destroy(pblk_ws_cache);
up_write(&pblk_lock);
return -ENOMEM;
}
@@ -203,7 +220,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
0, 0, NULL);
if (!pblk_g_rq_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
+ kmem_cache_destroy(pblk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
up_write(&pblk_lock);
return -ENOMEM;
@@ -212,30 +229,25 @@ static int pblk_init_global_caches(struct pblk *pblk)
pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
0, 0, NULL);
if (!pblk_w_rq_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
+ kmem_cache_destroy(pblk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
kmem_cache_destroy(pblk_g_rq_cache);
up_write(&pblk_lock);
return -ENOMEM;
}
-
- snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
- pblk->disk->disk_name);
- pblk_line_meta_cache = kmem_cache_create(cache_name,
- pblk->lm.sec_bitmap_len, 0, 0, NULL);
- if (!pblk_line_meta_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- kmem_cache_destroy(pblk_w_rq_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
up_write(&pblk_lock);
return 0;
}
+static void pblk_free_global_caches(struct pblk *pblk)
+{
+ kmem_cache_destroy(pblk_ws_cache);
+ kmem_cache_destroy(pblk_rec_cache);
+ kmem_cache_destroy(pblk_g_rq_cache);
+ kmem_cache_destroy(pblk_w_rq_cache);
+}
+
static int pblk_core_init(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -247,70 +259,80 @@ static int pblk_core_init(struct pblk *pblk)
if (pblk_init_global_caches(pblk))
return -ENOMEM;
- pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!pblk->page_pool)
- return -ENOMEM;
+ /* Internal bios can be at most the sectors signaled by the device. */
+ pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
+ 0);
+ if (!pblk->page_bio_pool)
+ goto free_global_caches;
- pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE,
- pblk_blk_ws_cache);
- if (!pblk->line_ws_pool)
- goto free_page_pool;
+ pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
+ pblk_ws_cache);
+ if (!pblk->gen_ws_pool)
+ goto free_page_bio_pool;
pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
if (!pblk->rec_pool)
- goto free_blk_ws_pool;
+ goto free_gen_ws_pool;
- pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
pblk_g_rq_cache);
- if (!pblk->g_rq_pool)
+ if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk_g_rq_cache);
+ if (!pblk->e_rq_pool)
+ goto free_r_rq_pool;
+
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
- goto free_g_rq_pool;
-
- pblk->line_meta_pool =
- mempool_create_slab_pool(PBLK_META_POOL_SIZE,
- pblk_line_meta_cache);
- if (!pblk->line_meta_pool)
- goto free_w_rq_pool;
+ goto free_e_rq_pool;
pblk->close_wq = alloc_workqueue("pblk-close-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
if (!pblk->close_wq)
- goto free_line_meta_pool;
+ goto free_w_rq_pool;
pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!pblk->bb_wq)
goto free_close_wq;
- if (pblk_set_ppaf(pblk))
+ pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!pblk->r_end_wq)
goto free_bb_wq;
+ if (pblk_set_ppaf(pblk))
+ goto free_r_end_wq;
+
if (pblk_rwb_init(pblk))
- goto free_bb_wq;
+ goto free_r_end_wq;
INIT_LIST_HEAD(&pblk->compl_list);
return 0;
+free_r_end_wq:
+ destroy_workqueue(pblk->r_end_wq);
free_bb_wq:
destroy_workqueue(pblk->bb_wq);
free_close_wq:
destroy_workqueue(pblk->close_wq);
-free_line_meta_pool:
- mempool_destroy(pblk->line_meta_pool);
free_w_rq_pool:
mempool_destroy(pblk->w_rq_pool);
-free_g_rq_pool:
- mempool_destroy(pblk->g_rq_pool);
+free_e_rq_pool:
+ mempool_destroy(pblk->e_rq_pool);
+free_r_rq_pool:
+ mempool_destroy(pblk->r_rq_pool);
free_rec_pool:
mempool_destroy(pblk->rec_pool);
-free_blk_ws_pool:
- mempool_destroy(pblk->line_ws_pool);
-free_page_pool:
- mempool_destroy(pblk->page_pool);
+free_gen_ws_pool:
+ mempool_destroy(pblk->gen_ws_pool);
+free_page_bio_pool:
+ mempool_destroy(pblk->page_bio_pool);
+free_global_caches:
+ pblk_free_global_caches(pblk);
return -ENOMEM;
}
@@ -319,21 +341,20 @@ static void pblk_core_free(struct pblk *pblk)
if (pblk->close_wq)
destroy_workqueue(pblk->close_wq);
+ if (pblk->r_end_wq)
+ destroy_workqueue(pblk->r_end_wq);
+
if (pblk->bb_wq)
destroy_workqueue(pblk->bb_wq);
- mempool_destroy(pblk->page_pool);
- mempool_destroy(pblk->line_ws_pool);
+ mempool_destroy(pblk->page_bio_pool);
+ mempool_destroy(pblk->gen_ws_pool);
mempool_destroy(pblk->rec_pool);
- mempool_destroy(pblk->g_rq_pool);
+ mempool_destroy(pblk->r_rq_pool);
+ mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
- mempool_destroy(pblk->line_meta_pool);
- kmem_cache_destroy(pblk_blk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- kmem_cache_destroy(pblk_w_rq_cache);
- kmem_cache_destroy(pblk_line_meta_cache);
+ pblk_free_global_caches(pblk);
}
static void pblk_luns_free(struct pblk *pblk)
@@ -372,13 +393,11 @@ static void pblk_line_meta_free(struct pblk *pblk)
kfree(l_mg->bb_aux);
kfree(l_mg->vsc_list);
- spin_lock(&l_mg->free_lock);
for (i = 0; i < PBLK_DATA_LINES; i++) {
kfree(l_mg->sline_meta[i]);
pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
kfree(l_mg->eline_meta[i]);
}
- spin_unlock(&l_mg->free_lock);
kfree(pblk->lines);
}
@@ -507,6 +526,13 @@ static int pblk_lines_configure(struct pblk *pblk, int flags)
}
}
+#ifdef CONFIG_NVM_DEBUG
+ pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#endif
+
+ /* Free full lines directly as GC has not been started yet */
+ pblk_gc_free_full_lines(pblk);
+
if (!line) {
/* Configure next line for user data */
line = pblk_line_get_first_data(pblk);
@@ -630,7 +656,10 @@ static int pblk_lines_alloc_metadata(struct pblk *pblk)
fail_free_emeta:
while (--i >= 0) {
- vfree(l_mg->eline_meta[i]->buf);
+ if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
+ vfree(l_mg->eline_meta[i]->buf);
+ else
+ kfree(l_mg->eline_meta[i]->buf);
kfree(l_mg->eline_meta[i]);
}
@@ -681,8 +710,8 @@ static int pblk_lines_init(struct pblk *pblk)
lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
- lm->high_thrs = lm->sec_per_line / 2;
- lm->mid_thrs = lm->sec_per_line / 4;
+ lm->mid_thrs = lm->sec_per_line / 2;
+ lm->high_thrs = lm->sec_per_line / 4;
lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct
@@ -713,9 +742,13 @@ add_emeta_page:
goto add_emeta_page;
}
- lm->emeta_bb = geo->nr_luns - i;
- lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0],
- geo->sec_per_blk);
+ lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
+
+ lm->min_blk_line = 1;
+ if (geo->nr_luns > 1)
+ lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
+ lm->emeta_sec[0], geo->sec_per_blk);
+
if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
lm->blk_per_line);
@@ -890,6 +923,11 @@ static void pblk_exit(void *private)
down_write(&pblk_lock);
pblk_gc_exit(pblk);
pblk_tear_down(pblk);
+
+#ifdef CONFIG_NVM_DEBUG
+ pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#endif
+
pblk_free(pblk);
up_write(&pblk_lock);
}
@@ -911,7 +949,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
int ret;
if (dev->identity.dom & NVM_RSP_L2P) {
- pr_err("pblk: device-side L2P table not supported. (%x)\n",
+ pr_err("pblk: host-side L2P table not supported. (%x)\n",
dev->identity.dom);
return ERR_PTR(-EINVAL);
}
@@ -923,6 +961,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->dev = dev;
pblk->disk = tdisk;
pblk->state = PBLK_STATE_RUNNING;
+ pblk->gc.gc_enabled = 0;
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
@@ -944,6 +983,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
atomic_long_set(&pblk->recov_writes, 0);
atomic_long_set(&pblk->recov_writes, 0);
atomic_long_set(&pblk->recov_gc_writes, 0);
+ atomic_long_set(&pblk->recov_gc_reads, 0);
#endif
atomic_long_set(&pblk->read_failed, 0);
@@ -1012,6 +1052,10 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->rwb.nr_entries);
wake_up_process(pblk->writer_ts);
+
+ /* Check if we need to start GC */
+ pblk_gc_should_kick(pblk);
+
return pblk;
fail_stop_writer:
@@ -1044,6 +1088,7 @@ static struct nvm_tgt_type tt_pblk = {
.sysfs_init = pblk_sysfs_init,
.sysfs_exit = pblk_sysfs_exit,
+ .owner = THIS_MODULE,
};
static int __init pblk_module_init(void)
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index fddb924f6dde..6f3ecde2140f 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -25,16 +25,28 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
unsigned int valid_secs)
{
struct pblk_line *line = pblk_line_get_data(pblk);
- struct pblk_emeta *emeta = line->emeta;
+ struct pblk_emeta *emeta;
struct pblk_w_ctx *w_ctx;
- __le64 *lba_list = emeta_to_lbas(pblk, emeta->buf);
+ __le64 *lba_list;
u64 paddr;
int nr_secs = pblk->min_write_pgs;
int i;
+ if (pblk_line_is_full(line)) {
+ struct pblk_line *prev_line = line;
+
+ line = pblk_line_replace_data(pblk);
+ pblk_line_close_meta(pblk, prev_line);
+ }
+
+ emeta = line->emeta;
+ lba_list = emeta_to_lbas(pblk, emeta->buf);
+
paddr = pblk_alloc_page(pblk, line, nr_secs);
for (i = 0; i < nr_secs; i++, paddr++) {
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+
/* ppa to be sent to the device */
ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
@@ -51,22 +63,14 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
w_ctx->ppa = ppa_list[i];
meta_list[i].lba = cpu_to_le64(w_ctx->lba);
lba_list[paddr] = cpu_to_le64(w_ctx->lba);
- line->nr_valid_lbas++;
+ if (lba_list[paddr] != addr_empty)
+ line->nr_valid_lbas++;
} else {
- __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
lba_list[paddr] = meta_list[i].lba = addr_empty;
__pblk_map_invalidate(pblk, line, paddr);
}
}
- if (pblk_line_is_full(line)) {
- struct pblk_line *prev_line = line;
-
- pblk_line_replace_data(pblk);
- pblk_line_close_meta(pblk, prev_line);
- }
-
pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
}
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 9bc32578a766..b8f78e401482 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -201,8 +201,7 @@ unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
return subm;
}
-static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
- unsigned int to_update)
+static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_line *line;
@@ -213,7 +212,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
int flags;
for (i = 0; i < to_update; i++) {
- entry = &rb->entries[*l2p_upd];
+ entry = &rb->entries[rb->l2p_update];
w_ctx = &entry->w_ctx;
flags = READ_ONCE(entry->w_ctx.flags);
@@ -230,7 +229,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
- *l2p_upd = (*l2p_upd + 1) & (rb->nr_entries - 1);
+ rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
}
pblk_rl_out(&pblk->rl, user_io, gc_io);
@@ -258,7 +257,7 @@ static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
count = nr_entries - space;
/* l2p_update used exclusively under rb->w_lock */
- ret = __pblk_rb_update_l2p(rb, &rb->l2p_update, count);
+ ret = __pblk_rb_update_l2p(rb, count);
out:
return ret;
@@ -280,7 +279,7 @@ void pblk_rb_sync_l2p(struct pblk_rb *rb)
sync = smp_load_acquire(&rb->sync);
to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
- __pblk_rb_update_l2p(rb, &rb->l2p_update, to_update);
+ __pblk_rb_update_l2p(rb, to_update);
spin_unlock(&rb->w_lock);
}
@@ -325,8 +324,8 @@ void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
}
void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
- struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
- unsigned int ring_pos)
+ struct pblk_w_ctx w_ctx, struct pblk_line *line,
+ u64 paddr, unsigned int ring_pos)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -341,7 +340,7 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
__pblk_rb_write_entry(rb, data, w_ctx, entry);
- if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, gc_line))
+ if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
entry->w_ctx.lba = ADDR_EMPTY;
flags = w_ctx.flags | PBLK_WRITTEN_DATA;
@@ -355,7 +354,6 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
{
struct pblk_rb_entry *entry;
unsigned int subm, sync_point;
- int flags;
subm = READ_ONCE(rb->subm);
@@ -369,12 +367,6 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
entry = &rb->entries[sync_point];
- flags = READ_ONCE(entry->w_ctx.flags);
- flags |= PBLK_FLUSH_ENTRY;
-
- /* Release flags on context. Protect from writes */
- smp_store_release(&entry->w_ctx.flags, flags);
-
/* Protect syncs */
smp_store_release(&rb->sync_point, sync_point);
@@ -454,6 +446,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
/* Protect from read count */
smp_store_release(&rb->mem, mem);
+
return 1;
}
@@ -558,12 +551,13 @@ out:
* persist data on the write buffer to the media.
*/
unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
- struct bio *bio, unsigned int pos,
- unsigned int nr_entries, unsigned int count)
+ unsigned int pos, unsigned int nr_entries,
+ unsigned int count)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct request_queue *q = pblk->dev->q;
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
+ struct bio *bio = rqd->bio;
struct pblk_rb_entry *entry;
struct page *page;
unsigned int pad = 0, to_read = nr_entries;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index d682e89e6493..ca79d8fb3e60 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -39,21 +39,15 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned long *read_bitmap)
+ sector_t blba, unsigned long *read_bitmap)
{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
struct bio *bio = rqd->bio;
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
- sector_t blba = pblk_get_lba(bio);
int nr_secs = rqd->nr_ppas;
bool advanced_bio = false;
int i, j = 0;
- /* logic error: lba out-of-bounds. Ignore read request */
- if (blba + nr_secs >= pblk->rl.nr_secs) {
- WARN(1, "pblk: read lbas out of bounds\n");
- return;
- }
-
pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
for (i = 0; i < nr_secs; i++) {
@@ -63,6 +57,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap));
+ meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
if (unlikely(!advanced_bio)) {
bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
@@ -82,6 +77,7 @@ retry:
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
+ meta_list[i].lba = cpu_to_le64(lba);
advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
@@ -117,10 +113,51 @@ static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_OK;
}
-static void pblk_end_io_read(struct nvm_rq *rqd)
+static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
+ sector_t blba)
+{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
+ int nr_lbas = rqd->nr_ppas;
+ int i;
+
+ for (i = 0; i < nr_lbas; i++) {
+ u64 lba = le64_to_cpu(meta_list[i].lba);
+
+ if (lba == ADDR_EMPTY)
+ continue;
+
+ WARN(lba != blba + i, "pblk: corrupted read LBA\n");
+ }
+}
+
+static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list;
+ int i;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ struct ppa_addr ppa = ppa_list[i];
+ struct pblk_line *line;
+
+ line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ kref_put(&line->ref, pblk_line_put_wq);
+ }
+}
+
+static void pblk_end_user_read(struct bio *bio)
+{
+#ifdef CONFIG_NVM_DEBUG
+ WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
+#endif
+ bio_endio(bio);
+ bio_put(bio);
+}
+
+static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
+ bool put_line)
{
- struct pblk *pblk = rqd->private;
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
@@ -131,47 +168,51 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
#endif
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
+ pblk_read_check(pblk, rqd, r_ctx->lba);
bio_put(bio);
- if (r_ctx->private) {
- struct bio *orig_bio = r_ctx->private;
+ if (r_ctx->private)
+ pblk_end_user_read((struct bio *)r_ctx->private);
-#ifdef CONFIG_NVM_DEBUG
- WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
-#endif
- bio_endio(orig_bio);
- bio_put(orig_bio);
- }
+ if (put_line)
+ pblk_read_put_rqd_kref(pblk, rqd);
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
#endif
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
atomic_dec(&pblk->inflight_io);
}
+static void pblk_end_io_read(struct nvm_rq *rqd)
+{
+ struct pblk *pblk = rqd->private;
+
+ __pblk_end_io_read(pblk, rqd, true);
+}
+
static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
unsigned int bio_init_idx,
unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
struct bio_vec src_bv, dst_bv;
void *ppa_ptr = NULL;
void *src_p, *dst_p;
dma_addr_t dma_ppa_list = 0;
+ __le64 *lba_list_mem, *lba_list_media;
int nr_secs = rqd->nr_ppas;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
int i, ret, hole;
- DECLARE_COMPLETION_ONSTACK(wait);
+
+ /* Re-use allocated memory for intermediate lbas */
+ lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
+ lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
new_bio = bio_alloc(GFP_KERNEL, nr_holes);
- if (!new_bio) {
- pr_err("pblk: could not alloc read bio\n");
- return NVM_IO_ERR;
- }
if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
goto err;
@@ -181,34 +222,29 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
goto err;
}
+ for (i = 0; i < nr_secs; i++)
+ lba_list_mem[i] = meta_list[i].lba;
+
new_bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
- new_bio->bi_private = &wait;
- new_bio->bi_end_io = pblk_end_bio_sync;
rqd->bio = new_bio;
rqd->nr_ppas = nr_holes;
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
- rqd->end_io = NULL;
- if (unlikely(nr_secs > 1 && nr_holes == 1)) {
+ if (unlikely(nr_holes == 1)) {
ppa_ptr = rqd->ppa_list;
dma_ppa_list = rqd->dma_ppa_list;
rqd->ppa_addr = rqd->ppa_list[0];
}
- ret = pblk_submit_read_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
bio_put(rqd->bio);
- pr_err("pblk: read IO submission failed\n");
+ pr_err("pblk: sync read IO submission failed\n");
goto err;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: partial read I/O timed out\n");
- }
-
if (rqd->error) {
atomic_long_inc(&pblk->read_failed);
#ifdef CONFIG_NVM_DEBUG
@@ -216,15 +252,31 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
#endif
}
- if (unlikely(nr_secs > 1 && nr_holes == 1)) {
+ if (unlikely(nr_holes == 1)) {
+ struct ppa_addr ppa;
+
+ ppa = rqd->ppa_addr;
rqd->ppa_list = ppa_ptr;
rqd->dma_ppa_list = dma_ppa_list;
+ rqd->ppa_list[0] = ppa;
+ }
+
+ for (i = 0; i < nr_secs; i++) {
+ lba_list_media[i] = meta_list[i].lba;
+ meta_list[i].lba = lba_list_mem[i];
}
/* Fill the holes in the original bio */
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
+ int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
+ struct pblk_line *line = &pblk->lines[line_id];
+
+ kref_put(&line->ref, pblk_line_put);
+
+ meta_list[hole].lba = lba_list_media[i];
+
src_bv = new_bio->bi_io_vec[i++];
dst_bv = bio->bi_io_vec[bio_init_idx + hole];
@@ -238,7 +290,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
kunmap_atomic(src_p);
kunmap_atomic(dst_p);
- mempool_free(src_bv.bv_page, pblk->page_pool);
+ mempool_free(src_bv.bv_page, pblk->page_bio_pool);
hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
} while (hole < nr_secs);
@@ -246,34 +298,26 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
bio_put(new_bio);
/* Complete the original bio and associated request */
+ bio_endio(bio);
rqd->bio = bio;
rqd->nr_ppas = nr_secs;
- rqd->private = pblk;
- bio_endio(bio);
- pblk_end_io_read(rqd);
+ __pblk_end_io_read(pblk, rqd, false);
return NVM_IO_OK;
err:
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
- rqd->private = pblk;
- pblk_end_io_read(rqd);
+ __pblk_end_io_read(pblk, rqd, false);
return NVM_IO_ERR;
}
static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned long *read_bitmap)
+ sector_t lba, unsigned long *read_bitmap)
{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
struct bio *bio = rqd->bio;
struct ppa_addr ppa;
- sector_t lba = pblk_get_lba(bio);
-
- /* logic error: lba out-of-bounds. Ignore read request */
- if (lba >= pblk->rl.nr_secs) {
- WARN(1, "pblk: read lba out of bounds\n");
- return;
- }
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
@@ -284,6 +328,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(ppa)) {
WARN_ON(test_and_set_bit(0, read_bitmap));
+ meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
return;
}
@@ -295,9 +340,12 @@ retry:
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry;
}
+
WARN_ON(test_and_set_bit(0, read_bitmap));
+ meta_list[0].lba = cpu_to_le64(lba);
+
#ifdef CONFIG_NVM_DEBUG
- atomic_long_inc(&pblk->cache_reads);
+ atomic_long_inc(&pblk->cache_reads);
#endif
} else {
rqd->ppa_addr = ppa;
@@ -309,22 +357,24 @@ retry:
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
+ struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
- unsigned long read_bitmap; /* Max 64 ppas per request */
unsigned int bio_init_idx;
+ unsigned long read_bitmap; /* Max 64 ppas per request */
int ret = NVM_IO_ERR;
- if (nr_secs > PBLK_MAX_REQ_ADDRS)
+ /* logic error: lba out-of-bounds. Ignore read request */
+ if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
+ WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
+ (unsigned long long)blba, nr_secs);
return NVM_IO_ERR;
+ }
bitmap_zero(&read_bitmap, nr_secs);
- rqd = pblk_alloc_rqd(pblk, READ);
- if (IS_ERR(rqd)) {
- pr_err_ratelimited("pblk: not able to alloc rqd");
- return NVM_IO_ERR;
- }
+ rqd = pblk_alloc_rqd(pblk, PBLK_READ);
rqd->opcode = NVM_OP_PREAD;
rqd->bio = bio;
@@ -332,6 +382,9 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
+ r_ctx = nvm_rq_to_pdu(rqd);
+ r_ctx->lba = blba;
+
/* Save the index for this bio's start. This is needed in case
* we need to fill a partial read.
*/
@@ -348,23 +401,22 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
- pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
+ pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
} else {
- pblk_read_rq(pblk, rqd, &read_bitmap);
+ pblk_read_rq(pblk, rqd, blba, &read_bitmap);
}
bio_get(bio);
if (bitmap_full(&read_bitmap, nr_secs)) {
bio_endio(bio);
atomic_inc(&pblk->inflight_io);
- pblk_end_io_read(rqd);
+ __pblk_end_io_read(pblk, rqd, false);
return NVM_IO_OK;
}
/* All sectors are to be read from the device */
if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
struct bio *int_bio = NULL;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
/* Clone read bio to deal with read errors internally */
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
@@ -399,40 +451,46 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_OK;
fail_rqd_free:
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_line *line, u64 *lba_list,
- unsigned int nr_secs)
+ u64 *paddr_list_gc, unsigned int nr_secs)
{
- struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppa_gc;
int valid_secs = 0;
int i;
- pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
+ pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
for (i = 0; i < nr_secs; i++) {
- if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
- pblk_ppa_empty(ppas[i])) {
- lba_list[i] = ADDR_EMPTY;
+ if (lba_list[i] == ADDR_EMPTY)
+ continue;
+
+ ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
+ if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
+ paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
continue;
}
- rqd->ppa_list[valid_secs++] = ppas[i];
+ rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
}
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(valid_secs, &pblk->inflight_reads);
#endif
+
return valid_secs;
}
static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
- struct pblk_line *line, sector_t lba)
+ struct pblk_line *line, sector_t lba,
+ u64 paddr_gc)
{
- struct ppa_addr ppa;
+ struct ppa_addr ppa_l2p, ppa_gc;
int valid_secs = 0;
if (lba == ADDR_EMPTY)
@@ -445,15 +503,14 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
}
spin_lock(&pblk->trans_lock);
- ppa = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
spin_unlock(&pblk->trans_lock);
- /* Ignore updated values until the moment */
- if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
- pblk_ppa_empty(ppa))
+ ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
+ if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
goto out;
- rqd->ppa_addr = ppa;
+ rqd->ppa_addr = ppa_l2p;
valid_secs = 1;
#ifdef CONFIG_NVM_DEBUG
@@ -464,42 +521,44 @@ out:
return valid_secs;
}
-int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
- unsigned int nr_secs, unsigned int *secs_to_gc,
- struct pblk_line *line)
+int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct bio *bio;
struct nvm_rq rqd;
- int ret, data_len;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int data_len;
+ int ret = NVM_IO_OK;
memset(&rqd, 0, sizeof(struct nvm_rq));
rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd.dma_meta_list);
if (!rqd.meta_list)
- return NVM_IO_ERR;
+ return -ENOMEM;
- if (nr_secs > 1) {
+ if (gc_rq->nr_secs > 1) {
rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
- *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
- nr_secs);
- if (*secs_to_gc == 1)
+ gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
+ gc_rq->lba_list,
+ gc_rq->paddr_list,
+ gc_rq->nr_secs);
+ if (gc_rq->secs_to_gc == 1)
rqd.ppa_addr = rqd.ppa_list[0];
} else {
- *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
+ gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
+ gc_rq->lba_list[0],
+ gc_rq->paddr_list[0]);
}
- if (!(*secs_to_gc))
+ if (!(gc_rq->secs_to_gc))
goto out;
- data_len = (*secs_to_gc) * geo->sec_size;
- bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
- PBLK_KMALLOC_META, GFP_KERNEL);
+ data_len = (gc_rq->secs_to_gc) * geo->sec_size;
+ bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
+ PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
goto err_free_dma;
@@ -509,23 +568,16 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
bio_set_op_attrs(bio, REQ_OP_READ, 0);
rqd.opcode = NVM_OP_PREAD;
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
- rqd.nr_ppas = *secs_to_gc;
+ rqd.nr_ppas = gc_rq->secs_to_gc;
rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd.bio = bio;
- ret = pblk_submit_read_io(pblk, &rqd);
- if (ret) {
- bio_endio(bio);
+ if (pblk_submit_io_sync(pblk, &rqd)) {
+ ret = -EIO;
pr_err("pblk: GC read request failed\n");
- goto err_free_dma;
+ goto err_free_bio;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: GC read I/O timed out\n");
- }
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
@@ -536,16 +588,18 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
}
#ifdef CONFIG_NVM_DEBUG
- atomic_long_add(*secs_to_gc, &pblk->sync_reads);
- atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
- atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
+ atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
+ atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
+ atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
#endif
out:
nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
- return NVM_IO_OK;
+ return ret;
+err_free_bio:
+ bio_put(bio);
err_free_dma:
nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
- return NVM_IO_ERR;
+ return ret;
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index cb556e06673e..eadb3eb5d4dc 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -34,10 +34,6 @@ void pblk_submit_rec(struct work_struct *work)
max_secs);
bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
- if (!bio) {
- pr_err("pblk: not able to create recovery bio\n");
- return;
- }
bio->bi_iter.bi_sector = 0;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -71,7 +67,7 @@ void pblk_submit_rec(struct work_struct *work)
err:
bio_put(bio);
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
}
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
@@ -84,12 +80,7 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_c_ctx *rec_ctx;
int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
- rec_rqd = pblk_alloc_rqd(pblk, WRITE);
- if (IS_ERR(rec_rqd)) {
- pr_err("pblk: could not create recovery req.\n");
- return -ENOMEM;
- }
-
+ rec_rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
rec_ctx = nvm_rq_to_pdu(rec_rqd);
/* Copy completion bitmap, but exclude the first X completed entries */
@@ -142,19 +133,19 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
struct pblk_emeta *emeta = line->emeta;
struct line_emeta *emeta_buf = emeta->buf;
__le64 *lba_list;
- int data_start;
- int nr_data_lbas, nr_valid_lbas, nr_lbas = 0;
- int i;
+ u64 data_start, data_end;
+ u64 nr_valid_lbas, nr_lbas = 0;
+ u64 i;
lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
if (!lba_list)
return 1;
data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
- nr_data_lbas = lm->sec_per_line - lm->emeta_sec[0];
+ data_end = line->emeta_ssec;
nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
- for (i = data_start; i < nr_data_lbas && nr_lbas < nr_valid_lbas; i++) {
+ for (i = data_start; i < data_end; i++) {
struct ppa_addr ppa;
int pos;
@@ -181,8 +172,8 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
}
if (nr_valid_lbas != nr_lbas)
- pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
- line->id, emeta_buf->nr_valid_lbas, nr_lbas);
+ pr_err("pblk: line %d - inconsistent lba list(%llu/%llu)\n",
+ line->id, nr_valid_lbas, nr_lbas);
line->left_msecs = 0;
@@ -225,7 +216,6 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
int rq_ppas, rq_len;
int i, j;
int ret = 0;
- DECLARE_COMPLETION_ONSTACK(wait);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -262,8 +252,6 @@ next_read_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
- rqd->end_io = pblk_end_io_sync;
- rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -289,19 +277,13 @@ next_read_rq:
}
/* If read fails, more padding is needed */
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
return ret;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: L2P recovery read timed out\n");
- return -EINTR;
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
/* At this point, the read should not fail. If it does, it is a problem
* we cannot recover from here. Need FTL log.
@@ -338,13 +320,10 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
{
struct pblk_pad_rq *pad_rq = rqd->private;
struct pblk *pblk = pad_rq->pblk;
- struct nvm_tgt_dev *dev = pblk->dev;
pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
- bio_put(rqd->bio);
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
atomic_dec(&pblk->inflight_io);
kref_put(&pad_rq->ref, pblk_recov_complete);
@@ -404,25 +383,21 @@ next_pad_rq:
ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
- rqd = pblk_alloc_rqd(pblk, WRITE);
- if (IS_ERR(rqd)) {
- ret = PTR_ERR(rqd);
- goto fail_free_meta;
- }
-
bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto fail_free_rqd;
+ goto fail_free_meta;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
+
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
- rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
rqd->meta_list = meta_list;
rqd->nr_ppas = rq_ppas;
rqd->ppa_list = ppa_list;
@@ -490,8 +465,6 @@ free_rq:
fail_free_bio:
bio_put(bio);
-fail_free_rqd:
- pblk_free_rqd(pblk, rqd, WRITE);
fail_free_meta:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
fail_free_pad:
@@ -522,7 +495,6 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
int ret = 0;
int rec_round;
int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
- DECLARE_COMPLETION_ONSTACK(wait);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -557,8 +529,6 @@ next_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
- rqd->end_io = pblk_end_io_sync;
- rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -584,18 +554,13 @@ next_rq:
addr_to_gen_ppa(pblk, w_ptr, line->id);
}
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
return ret;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: L2P recovery read timed out\n");
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
/* This should not happen since the read failed during normal recovery,
* but the media works funny sometimes...
@@ -663,7 +628,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
int i, j;
int ret = 0;
int left_ppas = pblk_calc_sec_in_line(pblk, line);
- DECLARE_COMPLETION_ONSTACK(wait);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -696,8 +660,6 @@ next_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
- rqd->end_io = pblk_end_io_sync;
- rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -723,19 +685,14 @@ next_rq:
addr_to_gen_ppa(pblk, paddr, line->id);
}
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
bio_put(bio);
return ret;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: L2P recovery read timed out\n");
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
/* Reached the end of the written line */
if (rqd->error) {
@@ -785,15 +742,9 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
dma_addr_t dma_ppa_list, dma_meta_list;
int done, ret = 0;
- rqd = pblk_alloc_rqd(pblk, READ);
- if (IS_ERR(rqd))
- return PTR_ERR(rqd);
-
meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
- if (!meta_list) {
- ret = -ENOMEM;
- goto free_rqd;
- }
+ if (!meta_list)
+ return -ENOMEM;
ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
@@ -804,6 +755,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
goto free_meta_list;
}
+ rqd = pblk_alloc_rqd(pblk, PBLK_READ);
+
p.ppa_list = ppa_list;
p.meta_list = meta_list;
p.rqd = rqd;
@@ -832,8 +785,6 @@ out:
kfree(data);
free_meta_list:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
-free_rqd:
- pblk_free_rqd(pblk, rqd, READ);
return ret;
}
@@ -851,11 +802,33 @@ static void pblk_recov_line_add_ordered(struct list_head *head,
__list_add(&line->list, t->list.prev, &t->list);
}
-struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
+static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_line_meta *lm = &pblk->lm;
+ unsigned int emeta_secs;
+ u64 emeta_start;
+ struct ppa_addr ppa;
+ int pos;
+
+ emeta_secs = lm->emeta_sec[0];
+ emeta_start = lm->sec_per_line;
+
+ while (emeta_secs) {
+ emeta_start--;
+ ppa = addr_to_pblk_ppa(pblk, emeta_start, line->id);
+ pos = pblk_ppa_to_pos(geo, ppa);
+ if (!test_bit(pos, line->blk_bitmap))
+ emeta_secs--;
+ }
+
+ return emeta_start;
+}
+
+struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
+{
+ struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line, *tline, *data_line = NULL;
struct pblk_smeta *smeta;
@@ -900,9 +873,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
continue;
- if (le16_to_cpu(smeta_buf->header.version) != 1) {
+ if (smeta_buf->header.version != SMETA_VERSION) {
pr_err("pblk: found incompatible line version %u\n",
- smeta_buf->header.version);
+ le16_to_cpu(smeta_buf->header.version));
return ERR_PTR(-EINVAL);
}
@@ -954,15 +927,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
/* Verify closed blocks and recover this portion of L2P table*/
list_for_each_entry_safe(line, tline, &recov_list, list) {
- int off, nr_bb;
-
recovered_lines++;
- /* Calculate where emeta starts based on the line bb */
- off = lm->sec_per_line - lm->emeta_sec[0];
- nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
- off -= nr_bb * geo->sec_per_pl;
- line->emeta_ssec = off;
+ line->emeta_ssec = pblk_line_emeta_start(pblk, line);
line->emeta = emeta;
memset(line->emeta->buf, 0, lm->emeta_len[0]);
@@ -987,7 +954,7 @@ next:
list_move_tail(&line->list, move_list);
spin_unlock(&l_mg->gc_lock);
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index 2e6a5361baf0..abae31fd434e 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -96,9 +96,11 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
*
* Only the total number of free blocks is used to configure the rate limiter.
*/
-static int pblk_rl_update_rates(struct pblk_rl *rl, unsigned long max)
+void pblk_rl_update_rates(struct pblk_rl *rl)
{
+ struct pblk *pblk = container_of(rl, struct pblk, rl);
unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
+ int max = rl->rb_budget;
if (free_blocks >= rl->high) {
rl->rb_user_max = max;
@@ -124,23 +126,18 @@ static int pblk_rl_update_rates(struct pblk_rl *rl, unsigned long max)
rl->rb_state = PBLK_RL_LOW;
}
- return rl->rb_state;
+ if (rl->rb_state == (PBLK_RL_MID | PBLK_RL_LOW))
+ pblk_gc_should_start(pblk);
+ else
+ pblk_gc_should_stop(pblk);
}
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
- struct pblk *pblk = container_of(rl, struct pblk, rl);
int blk_in_line = atomic_read(&line->blk_in_line);
- int ret;
atomic_add(blk_in_line, &rl->free_blocks);
- /* Rates will not change that often - no need to lock update */
- ret = pblk_rl_update_rates(rl, rl->rb_budget);
-
- if (ret == (PBLK_RL_MID | PBLK_RL_LOW))
- pblk_gc_should_start(pblk);
- else
- pblk_gc_should_stop(pblk);
+ pblk_rl_update_rates(rl);
}
void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
@@ -148,19 +145,7 @@ void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
int blk_in_line = atomic_read(&line->blk_in_line);
atomic_sub(blk_in_line, &rl->free_blocks);
-}
-
-void pblk_gc_should_kick(struct pblk *pblk)
-{
- struct pblk_rl *rl = &pblk->rl;
- int ret;
-
- /* Rates will not change that often - no need to lock update */
- ret = pblk_rl_update_rates(rl, rl->rb_budget);
- if (ret == (PBLK_RL_MID | PBLK_RL_LOW))
- pblk_gc_should_start(pblk);
- else
- pblk_gc_should_stop(pblk);
+ pblk_rl_update_rates(rl);
}
int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -168,14 +153,9 @@ int pblk_rl_high_thrs(struct pblk_rl *rl)
return rl->high;
}
-int pblk_rl_low_thrs(struct pblk_rl *rl)
-{
- return rl->low;
-}
-
-int pblk_rl_sysfs_rate_show(struct pblk_rl *rl)
+int pblk_rl_max_io(struct pblk_rl *rl)
{
- return rl->rb_user_max;
+ return rl->rb_max_io;
}
static void pblk_rl_u_timer(unsigned long data)
@@ -214,6 +194,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
/* To start with, all buffer is available to user I/O writers */
rl->rb_budget = budget;
rl->rb_user_max = budget;
+ rl->rb_max_io = budget >> 1;
rl->rb_gc_max = 0;
rl->rb_state = PBLK_RL_HIGH;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 95fb434e2f01..cd49e8875d4e 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -253,7 +253,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz += snprintf(page + sz, PAGE_SIZE - sz,
"GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, queue:%d\n",
gc_full, gc_high, gc_mid, gc_low, gc_empty,
- atomic_read(&pblk->gc.inflight_gc));
+ atomic_read(&pblk->gc.read_inflight_gc));
sz += snprintf(page + sz, PAGE_SIZE - sz,
"data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 3ad9e56d2473..6c1cafafef53 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -20,7 +20,6 @@
static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
- struct nvm_tgt_dev *dev = pblk->dev;
struct bio *original_bio;
unsigned long ret;
int i;
@@ -33,16 +32,18 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
bio_endio(original_bio);
}
+ if (c_ctx->nr_padded)
+ pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
+ c_ctx->nr_padded);
+
#ifdef CONFIG_NVM_DEBUG
- atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
+ atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
#endif
ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
-
bio_put(rqd->bio);
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
return ret;
}
@@ -107,10 +108,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
ppa_list = &rqd->ppa_addr;
recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
- if (!recovery) {
- pr_err("pblk: could not allocate recovery context\n");
- return;
- }
+
INIT_LIST_HEAD(&recovery->failed);
bit = -1;
@@ -175,7 +173,6 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
static void pblk_end_io_write_meta(struct nvm_rq *rqd)
{
struct pblk *pblk = rqd->private;
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
struct pblk_line *line = m_ctx->private;
struct pblk_emeta *emeta = line->emeta;
@@ -187,19 +184,13 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
pblk_log_write_err(pblk, rqd);
pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
}
-#ifdef CONFIG_NVM_DEBUG
- else
- WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
-#endif
sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
if (sync == emeta->nr_entries)
- pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws,
- pblk->close_wq);
+ pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
+ GFP_ATOMIC, pblk->close_wq);
- bio_put(rqd->bio);
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
atomic_dec(&pblk->inflight_io);
}
@@ -213,7 +204,7 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
/* Setup write request */
rqd->opcode = NVM_OP_PWRITE;
rqd->nr_ppas = nr_secs;
- rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
rqd->private = pblk;
rqd->end_io = end_io;
@@ -229,15 +220,16 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
}
static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
- struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
+ struct ppa_addr *erase_ppa)
{
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line *e_line = pblk_line_get_erase(pblk);
+ struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
unsigned int valid = c_ctx->nr_valid;
unsigned int padded = c_ctx->nr_padded;
unsigned int nr_secs = valid + padded;
unsigned long *lun_bitmap;
- int ret = 0;
+ int ret;
lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
if (!lun_bitmap)
@@ -279,7 +271,7 @@ int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
rqd->ppa_status = (u64)0;
- rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
return ret;
}
@@ -303,55 +295,6 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
return secs_to_sync;
}
-static inline int pblk_valid_meta_ppa(struct pblk *pblk,
- struct pblk_line *meta_line,
- struct ppa_addr *ppa_list, int nr_ppas)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct pblk_line *data_line;
- struct ppa_addr ppa, ppa_opt;
- u64 paddr;
- int i;
-
- data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
- paddr = pblk_lookup_page(pblk, meta_line);
- ppa = addr_to_gen_ppa(pblk, paddr, 0);
-
- if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
- return 1;
-
- /* Schedule a metadata I/O that is half the distance from the data I/O
- * with regards to the number of LUNs forming the pblk instance. This
- * balances LUN conflicts across every I/O.
- *
- * When the LUN configuration changes (e.g., due to GC), this distance
- * can align, which would result on a LUN deadlock. In this case, modify
- * the distance to not be optimal, but allow metadata I/Os to succeed.
- */
- ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
- if (unlikely(ppa_opt.ppa == ppa.ppa)) {
- data_line->meta_distance--;
- return 0;
- }
-
- for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
- if (ppa_list[i].g.ch == ppa_opt.g.ch &&
- ppa_list[i].g.lun == ppa_opt.g.lun)
- return 1;
-
- if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
- for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
- if (ppa_list[i].g.ch == ppa.g.ch &&
- ppa_list[i].g.lun == ppa.g.lun)
- return 0;
-
- return 1;
- }
-
- return 0;
-}
-
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -370,11 +313,8 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
int i, j;
int ret;
- rqd = pblk_alloc_rqd(pblk, READ);
- if (IS_ERR(rqd)) {
- pr_err("pblk: cannot allocate write req.\n");
- return PTR_ERR(rqd);
- }
+ rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
+
m_ctx = nvm_rq_to_pdu(rqd);
m_ctx->private = meta_line;
@@ -407,8 +347,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
if (emeta->mem >= lm->emeta_len[0]) {
spin_lock(&l_mg->close_lock);
list_del(&meta_line->list);
- WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
- "pblk: corrupt meta line %d\n", meta_line->id);
spin_unlock(&l_mg->close_lock);
}
@@ -428,18 +366,51 @@ fail_rollback:
pblk_dealloc_page(pblk, meta_line, rq_ppas);
list_add(&meta_line->list, &meta_line->list);
spin_unlock(&l_mg->close_lock);
-
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
fail_free_bio:
- if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
- bio_put(bio);
+ bio_put(bio);
fail_free_rqd:
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
return ret;
}
-static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
- int prev_n)
+static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
+ struct pblk_line *meta_line,
+ struct nvm_rq *data_rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
+ struct pblk_line *data_line = pblk_line_get_data(pblk);
+ struct ppa_addr ppa, ppa_opt;
+ u64 paddr;
+ int pos_opt;
+
+ /* Schedule a metadata I/O that is half the distance from the data I/O
+ * with regards to the number of LUNs forming the pblk instance. This
+ * balances LUN conflicts across every I/O.
+ *
+ * When the LUN configuration changes (e.g., due to GC), this distance
+ * can align, which would result on metadata and data I/Os colliding. In
+ * this case, modify the distance to not be optimal, but move the
+ * optimal in the right direction.
+ */
+ paddr = pblk_lookup_page(pblk, meta_line);
+ ppa = addr_to_gen_ppa(pblk, paddr, 0);
+ ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
+ pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
+
+ if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
+ test_bit(pos_opt, data_line->blk_bitmap))
+ return true;
+
+ if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
+ data_line->meta_distance--;
+
+ return false;
+}
+
+static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
+ struct nvm_rq *data_rqd)
{
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -449,57 +420,45 @@ static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
retry:
if (list_empty(&l_mg->emeta_list)) {
spin_unlock(&l_mg->close_lock);
- return 0;
+ return NULL;
}
meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
- if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
+ if (meta_line->emeta->mem >= lm->emeta_len[0])
goto retry;
spin_unlock(&l_mg->close_lock);
- if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
- return 0;
+ if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
+ return NULL;
- return pblk_submit_meta_io(pblk, meta_line);
+ return meta_line;
}
static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
struct ppa_addr erase_ppa;
+ struct pblk_line *meta_line;
int err;
ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
- err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
+ err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
if (err) {
pr_err("pblk: could not setup write request: %d\n", err);
return NVM_IO_ERR;
}
- if (likely(ppa_empty(erase_ppa))) {
- /* Submit metadata write for previous data line */
- err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
- if (err) {
- pr_err("pblk: metadata I/O submission failed: %d", err);
- return NVM_IO_ERR;
- }
+ meta_line = pblk_should_submit_meta_io(pblk, rqd);
- /* Submit data write for current data line */
- err = pblk_submit_io(pblk, rqd);
- if (err) {
- pr_err("pblk: data I/O submission failed: %d\n", err);
- return NVM_IO_ERR;
- }
- } else {
- /* Submit data write for current data line */
- err = pblk_submit_io(pblk, rqd);
- if (err) {
- pr_err("pblk: data I/O submission failed: %d\n", err);
- return NVM_IO_ERR;
- }
+ /* Submit data write for current data line */
+ err = pblk_submit_io(pblk, rqd);
+ if (err) {
+ pr_err("pblk: data I/O submission failed: %d\n", err);
+ return NVM_IO_ERR;
+ }
- /* Submit available erase for next data line */
+ if (!ppa_empty(erase_ppa)) {
+ /* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
struct nvm_tgt_dev *dev = pblk->dev;
@@ -512,6 +471,15 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
}
}
+ if (meta_line) {
+ /* Submit metadata write for previous data line */
+ err = pblk_submit_meta_io(pblk, meta_line);
+ if (err) {
+ pr_err("pblk: metadata I/O submission failed: %d", err);
+ return NVM_IO_ERR;
+ }
+ }
+
return NVM_IO_OK;
}
@@ -521,7 +489,8 @@ static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
if (c_ctx->nr_padded)
- pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
+ pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
+ c_ctx->nr_padded);
}
static int pblk_submit_write(struct pblk *pblk)
@@ -543,31 +512,24 @@ static int pblk_submit_write(struct pblk *pblk)
if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
return 1;
- rqd = pblk_alloc_rqd(pblk, WRITE);
- if (IS_ERR(rqd)) {
- pr_err("pblk: cannot allocate write req.\n");
- return 1;
- }
-
- bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
- if (!bio) {
- pr_err("pblk: cannot allocate write bio\n");
- goto fail_free_rqd;
- }
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- rqd->bio = bio;
-
secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
if (secs_to_sync > pblk->max_write_pgs) {
pr_err("pblk: bad buffer sync calculation\n");
- goto fail_put_bio;
+ return 1;
}
secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
- if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
+ bio = bio_alloc(GFP_KERNEL, secs_to_sync);
+
+ bio->bi_iter.bi_sector = 0; /* internal bio */
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
+ rqd->bio = bio;
+
+ if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
secs_avail)) {
pr_err("pblk: corrupted write bio\n");
goto fail_put_bio;
@@ -586,8 +548,7 @@ fail_free_bio:
pblk_free_write_rqd(pblk, rqd);
fail_put_bio:
bio_put(bio);
-fail_free_rqd:
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
return 1;
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 67e623bd5c2d..90961033a79f 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -40,10 +40,6 @@
#define PBLK_MAX_REQ_ADDRS (64)
#define PBLK_MAX_REQ_ADDRS_PW (6)
-#define PBLK_WS_POOL_SIZE (128)
-#define PBLK_META_POOL_SIZE (128)
-#define PBLK_READ_REQ_POOL_SIZE (1024)
-
#define PBLK_NR_CLOSE_JOBS (4)
#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
@@ -59,7 +55,15 @@
for ((i) = 0, rlun = &(pblk)->luns[0]; \
(i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
-#define ERASE 2 /* READ = 0, WRITE = 1 */
+/* Static pool sizes */
+#define PBLK_GEN_WS_POOL_SIZE (2)
+
+enum {
+ PBLK_READ = READ,
+ PBLK_WRITE = WRITE,/* Write from write buffer */
+ PBLK_WRITE_INT, /* Internal write - no write buffer */
+ PBLK_ERASE,
+};
enum {
/* IO Types */
@@ -95,6 +99,7 @@ enum {
};
#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
+#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
/* write buffer completion context */
struct pblk_c_ctx {
@@ -106,9 +111,10 @@ struct pblk_c_ctx {
unsigned int nr_padded;
};
-/* generic context */
+/* read context */
struct pblk_g_ctx {
void *private;
+ u64 lba;
};
/* Pad context */
@@ -207,6 +213,7 @@ struct pblk_lun {
struct pblk_gc_rq {
struct pblk_line *line;
void *data;
+ u64 paddr_list[PBLK_MAX_REQ_ADDRS];
u64 lba_list[PBLK_MAX_REQ_ADDRS];
int nr_secs;
int secs_to_gc;
@@ -231,7 +238,10 @@ struct pblk_gc {
struct timer_list gc_timer;
struct semaphore gc_sem;
- atomic_t inflight_gc;
+ atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
+ atomic_t pipeline_gc; /* Number of lines in the GC pipeline -
+ * started reads to finished writes
+ */
int w_entries;
struct list_head w_list;
@@ -267,6 +277,7 @@ struct pblk_rl {
int rb_gc_max; /* Max buffer entries available for GC I/O */
int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
int rb_state; /* Rate-limiter current state */
+ int rb_max_io; /* Maximum size for an I/O giving the config */
atomic_t rb_user_cnt; /* User I/O buffer counter */
atomic_t rb_gc_cnt; /* GC I/O buffer counter */
@@ -310,6 +321,7 @@ enum {
};
#define PBLK_MAGIC 0x70626c6b /*pblk*/
+#define SMETA_VERSION cpu_to_le16(1)
struct line_header {
__le32 crc;
@@ -618,15 +630,16 @@ struct pblk {
struct list_head compl_list;
- mempool_t *page_pool;
- mempool_t *line_ws_pool;
+ mempool_t *page_bio_pool;
+ mempool_t *gen_ws_pool;
mempool_t *rec_pool;
- mempool_t *g_rq_pool;
+ mempool_t *r_rq_pool;
mempool_t *w_rq_pool;
- mempool_t *line_meta_pool;
+ mempool_t *e_rq_pool;
struct workqueue_struct *close_wq;
struct workqueue_struct *bb_wq;
+ struct workqueue_struct *r_end_wq;
struct timer_list wtimer;
@@ -657,15 +670,15 @@ int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
struct pblk_w_ctx w_ctx, unsigned int pos);
void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
- struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
- unsigned int pos);
+ struct pblk_w_ctx w_ctx, struct pblk_line *line,
+ u64 paddr, unsigned int pos);
struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
void pblk_rb_flush(struct pblk_rb *rb);
void pblk_rb_sync_l2p(struct pblk_rb *rb);
unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
- struct bio *bio, unsigned int pos,
- unsigned int nr_entries, unsigned int count);
+ unsigned int pos, unsigned int nr_entries,
+ unsigned int count);
unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list,
unsigned int max);
@@ -692,24 +705,23 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
/*
* pblk core
*/
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
+struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
+void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx);
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
-void pblk_wait_for_meta(struct pblk *pblk);
-struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
void pblk_discard(struct pblk *pblk, struct bio *bio);
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
+int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
unsigned int nr_secs, unsigned int len,
int alloc_type, gfp_t gfp_mask);
struct pblk_line *pblk_line_get(struct pblk *pblk);
struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
-void pblk_line_replace_data(struct pblk *pblk);
+struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
struct pblk_line *pblk_line_get_data(struct pblk *pblk);
@@ -719,19 +731,18 @@ int pblk_line_is_full(struct pblk_line *line);
void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_close_meta_sync(struct pblk *pblk);
void pblk_line_close_ws(struct work_struct *work);
void pblk_pipeline_stop(struct pblk *pblk);
-void pblk_line_mark_bb(struct work_struct *work);
-void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
- void (*work)(struct work_struct *),
- struct workqueue_struct *wq);
+void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
+ void (*work)(struct work_struct *), gfp_t gfp_mask,
+ struct workqueue_struct *wq);
u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
void *emeta_buf);
int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
void pblk_line_put(struct kref *ref);
+void pblk_line_put_wq(struct kref *ref);
struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
@@ -745,7 +756,6 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
unsigned long *lun_bitmap);
-void pblk_end_bio_sync(struct bio *bio);
void pblk_end_io_sync(struct nvm_rq *rqd);
int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int nr_pages);
@@ -760,7 +770,7 @@ void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
struct ppa_addr ppa, struct ppa_addr entry_line);
int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
- struct pblk_line *gc_line);
+ struct pblk_line *gc_line, u64 paddr);
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs);
void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
@@ -771,9 +781,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
*/
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags);
-int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
- unsigned int nr_entries, unsigned int nr_rec_entries,
- struct pblk_line *gc_line, unsigned long flags);
+int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk map
@@ -797,9 +805,7 @@ void pblk_write_should_kick(struct pblk *pblk);
*/
extern struct bio_set *pblk_bio_set;
int pblk_submit_read(struct pblk *pblk, struct bio *bio);
-int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
- unsigned int nr_secs, unsigned int *secs_to_gc,
- struct pblk_line *line);
+int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk recovery
*/
@@ -815,7 +821,7 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
* pblk gc
*/
#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
-#define PBLK_GC_W_QD 128 /* Queue depth for inflight GC write I/Os */
+#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
@@ -824,7 +830,7 @@ void pblk_gc_exit(struct pblk *pblk);
void pblk_gc_should_start(struct pblk *pblk);
void pblk_gc_should_stop(struct pblk *pblk);
void pblk_gc_should_kick(struct pblk *pblk);
-void pblk_gc_kick(struct pblk *pblk);
+void pblk_gc_free_full_lines(struct pblk *pblk);
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
int *gc_active);
int pblk_gc_sysfs_force(struct pblk *pblk, int force);
@@ -834,8 +840,8 @@ int pblk_gc_sysfs_force(struct pblk *pblk, int force);
*/
void pblk_rl_init(struct pblk_rl *rl, int budget);
void pblk_rl_free(struct pblk_rl *rl);
+void pblk_rl_update_rates(struct pblk_rl *rl);
int pblk_rl_high_thrs(struct pblk_rl *rl);
-int pblk_rl_low_thrs(struct pblk_rl *rl);
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
@@ -843,10 +849,9 @@ void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
-int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
+int pblk_rl_max_io(struct pblk_rl *rl);
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_set_space_limit(struct pblk_rl *rl, int entries_left);
int pblk_rl_is_limit(struct pblk_rl *rl);
/*
@@ -892,13 +897,7 @@ static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
static inline int pblk_line_vsc(struct pblk_line *line)
{
- int vsc;
-
- spin_lock(&line->lock);
- vsc = le32_to_cpu(*line->vsc);
- spin_unlock(&line->lock);
-
- return vsc;
+ return le32_to_cpu(*line->vsc);
}
#define NVM_MEM_PAGE_WRITE (8)
@@ -1140,7 +1139,7 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
flags = geo->plane_mode >> 1;
- if (type == WRITE)
+ if (type == PBLK_WRITE)
flags |= NVM_IO_SCRAMBLE_ENABLE;
return flags;
@@ -1200,7 +1199,6 @@ static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
}
-#endif
static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
struct ppa_addr *ppas, int nr_ppas)
@@ -1221,14 +1219,50 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
ppa->g.sec < geo->sec_per_pg)
continue;
-#ifdef CONFIG_NVM_DEBUG
print_ppa(ppa, "boundary", i);
-#endif
+
return 1;
}
return 0;
}
+static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct ppa_addr *ppa_list;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (rqd->opcode == NVM_OP_PWRITE) {
+ struct pblk_line *line;
+ struct ppa_addr ppa;
+ int i;
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ ppa = ppa_list[i];
+ line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+
+ spin_lock(&line->lock);
+ if (line->state != PBLK_LINESTATE_OPEN) {
+ pr_err("pblk: bad ppa: line:%d,state:%d\n",
+ line->id, line->state);
+ WARN_ON(1);
+ spin_unlock(&line->lock);
+ return -EINVAL;
+ }
+ spin_unlock(&line->lock);
+ }
+ }
+
+ return 0;
+}
+#endif
+
static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
{
struct pblk_line_meta *lm = &pblk->lm;
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index dbc4a3e63396..15db69d8ba69 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -30,8 +30,6 @@
/*#define DEBUG_ADB_IOP*/
-extern void iop_ism_irq(int, void *);
-
static struct adb_request *current_req;
static struct adb_request *last_req;
#if 0
@@ -266,7 +264,7 @@ int adb_iop_autopoll(int devs)
void adb_iop_poll(void)
{
if (adb_iop_state == idle) adb_iop_start();
- iop_ism_irq(0, (void *) ADB_IOP);
+ iop_ism_irq_poll(ADB_IOP);
}
int adb_iop_reset_bus(void)
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c5731e5e3c6c..ba2f1525f4ee 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -163,9 +163,10 @@ config BCM_PDC_MBOX
config BCM_FLEXRM_MBOX
tristate "Broadcom FlexRM Mailbox"
depends on ARM64
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_DMA
select GENERIC_MSI_IRQ_DOMAIN
- default ARCH_BCM_IPROC
+ default m if ARCH_BCM_IPROC
help
Mailbox implementation of the Broadcom FlexRM ring manager,
which provides access to various offload engines on Broadcom
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index ae6146311934..a8cf4333a68f 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1,10 +1,18 @@
-/* Broadcom FlexRM Mailbox Driver
- *
+/*
* Copyright (C) 2017 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Broadcom FlexRM Mailbox Driver
*
* Each Broadcom FlexSparx4 offload engine is implemented as an
* extension to Broadcom FlexRM ring manager. The FlexRM ring
@@ -1116,8 +1124,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
err = flexrm_cmpl_desc_to_error(desc);
if (err < 0) {
dev_warn(ring->mbox->dev,
- "got completion desc=0x%lx with error %d",
- (unsigned long)desc, err);
+ "ring%d got completion desc=0x%lx with error %d\n",
+ ring->num, (unsigned long)desc, err);
}
/* Determine request id from completion descriptor */
@@ -1127,8 +1135,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
msg = ring->requests[reqid];
if (!msg) {
dev_warn(ring->mbox->dev,
- "null msg pointer for completion desc=0x%lx",
- (unsigned long)desc);
+ "ring%d null msg pointer for completion desc=0x%lx\n",
+ ring->num, (unsigned long)desc);
continue;
}
@@ -1238,7 +1246,9 @@ static int flexrm_startup(struct mbox_chan *chan)
ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
GFP_KERNEL, &ring->bd_dma_base);
if (!ring->bd_base) {
- dev_err(ring->mbox->dev, "can't allocate BD memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate BD memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail;
}
@@ -1261,7 +1271,9 @@ static int flexrm_startup(struct mbox_chan *chan)
ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
GFP_KERNEL, &ring->cmpl_dma_base);
if (!ring->cmpl_base) {
- dev_err(ring->mbox->dev, "can't allocate completion memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate completion memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail_free_bd_memory;
}
@@ -1269,7 +1281,8 @@ static int flexrm_startup(struct mbox_chan *chan)
/* Request IRQ */
if (ring->irq == UINT_MAX) {
- dev_err(ring->mbox->dev, "ring IRQ not available\n");
+ dev_err(ring->mbox->dev,
+ "ring%d IRQ not available\n", ring->num);
ret = -ENODEV;
goto fail_free_cmpl_memory;
}
@@ -1278,7 +1291,8 @@ static int flexrm_startup(struct mbox_chan *chan)
flexrm_irq_thread,
0, dev_name(ring->mbox->dev), ring);
if (ret) {
- dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
+ dev_err(ring->mbox->dev,
+ "failed to request ring%d IRQ\n", ring->num);
goto fail_free_cmpl_memory;
}
ring->irq_requested = true;
@@ -1291,7 +1305,9 @@ static int flexrm_startup(struct mbox_chan *chan)
&ring->irq_aff_hint);
ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
if (ret) {
- dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n");
+ dev_err(ring->mbox->dev,
+ "failed to set IRQ affinity hint for ring%d\n",
+ ring->num);
goto fail_free_irq;
}
@@ -1365,8 +1381,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
- /* Flush ring with timeout of 1s */
- timeout = 1000;
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
ring->regs + RING_CONTROL);
do {
@@ -1374,7 +1390,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
FLUSH_DONE_MASK)
break;
mdelay(1);
- } while (timeout--);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "setting ring%d flush state timedout\n", ring->num);
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "clearing ring%d flush state timedout\n", ring->num);
/* Abort all in-flight requests */
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 97fb956bb6e0..93f3d4d61fa7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -30,6 +30,7 @@
#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
+static bool mbox_data_ready;
static struct dentry *root_debugfs_dir;
struct mbox_test_device {
@@ -152,16 +153,14 @@ out:
static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
{
- unsigned char data;
+ bool data_ready;
unsigned long flags;
spin_lock_irqsave(&tdev->lock, flags);
- data = tdev->rx_buffer[0];
+ data_ready = mbox_data_ready;
spin_unlock_irqrestore(&tdev->lock, flags);
- if (data != '\0')
- return true;
- return false;
+ return data_ready;
}
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
@@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
*(touser + l) = '\0';
memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
+ mbox_data_ready = false;
spin_unlock_irqrestore(&tdev->lock, flags);
@@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
message, MBOX_MAX_MSG_LEN);
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
+ mbox_data_ready = true;
spin_unlock_irqrestore(&tdev->lock, flags);
wake_up_interruptible(&tdev->waitq);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 537f4f6d009b..674b35f402f5 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -351,7 +351,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method |= TXDONE_BY_ACK;
+ chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
@@ -418,7 +418,7 @@ void mbox_free_channel(struct mbox_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
module_put(chan->mbox->dev->driver->owner);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index c5e8b9cb170d..2517038a8452 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -906,7 +906,11 @@ static int __init omap_mbox_init(void)
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
- return platform_driver_register(&omap_mbox_driver);
+ err = platform_driver_register(&omap_mbox_driver);
+ if (err)
+ class_unregister(&omap_mbox_class);
+
+ return err;
}
subsys_initcall(omap_mbox_init);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 9b7005e1345e..3ef7f036ceea 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -69,7 +69,6 @@
#include "mailbox.h"
-#define MAX_PCC_SUBSPACES 256
#define MBOX_IRQ_NAME "pcc-mbox"
static struct mbox_chan *pcc_mbox_channels;
@@ -266,7 +265,7 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method |= TXDONE_BY_ACK;
+ chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
@@ -312,7 +311,7 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
spin_unlock_irqrestore(&chan->lock, flags);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4a249ee86364..83b9362be09c 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -178,7 +178,7 @@ config MD_FAULTY
config MD_CLUSTER
- tristate "Cluster Support for MD (EXPERIMENTAL)"
+ tristate "Cluster Support for MD"
depends on BLK_DEV_MD
depends on DLM
default n
@@ -188,7 +188,8 @@ config MD_CLUSTER
nodes in the cluster can access the MD devices simultaneously.
This brings the redundancy (and uptime) of RAID levels across the
- nodes of the cluster.
+ nodes of the cluster. Currently, it can work with raid1 and raid10
+ (limited support).
If unsure, say N.
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index e94b6f9be941..f701bb211783 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -19,9 +19,12 @@ dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
dm-cache-smq-y += dm-cache-policy-smq.o
dm-era-y += dm-era-target.o
dm-verity-y += dm-verity-target.o
-md-mod-y += md.o bitmap.o
+md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
+linear-y += md-linear.o
+multipath-y += md-multipath.o
+faulty-y += md-faulty.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 08035634795c..a27d85232ce1 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -407,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
finish_wait(&ca->set->bucket_wait, &w);
out:
- wake_up_process(ca->alloc_thread);
+ if (ca->alloc_thread)
+ wake_up_process(ca->alloc_thread);
trace_bcache_alloc(ca, reserve);
@@ -442,6 +443,11 @@ out:
b->prio = INITIAL_PRIO;
}
+ if (ca->set->avail_nbuckets > 0) {
+ ca->set->avail_nbuckets--;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
+
return r;
}
@@ -449,6 +455,11 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
{
SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
+
+ if (ca->set->avail_nbuckets < ca->set->nbuckets) {
+ ca->set->avail_nbuckets++;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
}
void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -601,7 +612,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
/*
* If we had to allocate, we might race and not need to allocate the
- * second time we call find_data_bucket(). If we allocated a bucket but
+ * second time we call pick_data_bucket(). If we allocated a bucket but
* didn't use it, drop the refcount bch_bucket_alloc_set() took:
*/
if (KEY_PTRS(&alloc.key))
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index abd31e847f96..843877e017e1 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -185,6 +185,7 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
+#include <linux/refcount.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -266,9 +267,6 @@ struct bcache_device {
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
- unsigned long sectors_dirty_last;
- long sectors_dirty_derivative;
-
struct bio_set *bio_split;
unsigned data_csum:1;
@@ -300,7 +298,7 @@ struct cached_dev {
struct semaphore sb_write_mutex;
/* Refcount on the cache set. Always nonzero when we're caching. */
- atomic_t count;
+ refcount_t count;
struct work_struct detach;
/*
@@ -363,12 +361,14 @@ struct cached_dev {
uint64_t writeback_rate_target;
int64_t writeback_rate_proportional;
- int64_t writeback_rate_derivative;
- int64_t writeback_rate_change;
+ int64_t writeback_rate_integral;
+ int64_t writeback_rate_integral_scaled;
+ int32_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
- unsigned writeback_rate_d_term;
+ unsigned writeback_rate_i_term_inverse;
unsigned writeback_rate_p_term_inverse;
+ unsigned writeback_rate_minimum;
};
enum alloc_reserve {
@@ -582,6 +582,7 @@ struct cache_set {
uint8_t need_gc;
struct gc_stat gc_stats;
size_t nbuckets;
+ size_t avail_nbuckets;
struct task_struct *gc_thread;
/* Where in the btree gc currently is */
@@ -807,13 +808,13 @@ do { \
static inline void cached_dev_put(struct cached_dev *dc)
{
- if (atomic_dec_and_test(&dc->count))
+ if (refcount_dec_and_test(&dc->count))
schedule_work(&dc->detach);
}
static inline bool cached_dev_get(struct cached_dev *dc)
{
- if (!atomic_inc_not_zero(&dc->count))
+ if (!refcount_inc_not_zero(&dc->count))
return false;
/* Paired with the mb in cached_dev_attach */
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 658c54b3b07a..11c5503d31dc 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1241,6 +1241,11 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
__bch_btree_mark_key(c, level, k);
}
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
+{
+ stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
+}
+
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
@@ -1652,9 +1657,8 @@ static void btree_gc_start(struct cache_set *c)
mutex_unlock(&c->bucket_lock);
}
-static size_t bch_btree_gc_finish(struct cache_set *c)
+static void bch_btree_gc_finish(struct cache_set *c)
{
- size_t available = 0;
struct bucket *b;
struct cache *ca;
unsigned i;
@@ -1691,6 +1695,7 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
}
rcu_read_unlock();
+ c->avail_nbuckets = 0;
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1712,18 +1717,16 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
- available++;
+ c->avail_nbuckets++;
}
}
mutex_unlock(&c->bucket_lock);
- return available;
}
static void bch_btree_gc(struct cache_set *c)
{
int ret;
- unsigned long available;
struct gc_stat stats;
struct closure writes;
struct btree_op op;
@@ -1746,14 +1749,14 @@ static void bch_btree_gc(struct cache_set *c)
pr_warn("gc failed!");
} while (ret);
- available = bch_btree_gc_finish(c);
+ bch_btree_gc_finish(c);
wake_up_allocators(c);
bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t);
stats.data <<= 9;
- stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
+ bch_update_bucket_in_use(c, &stats);
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
trace_bcache_gc_end(c);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 42204d61bc95..d211e2c25b6b 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -306,5 +306,5 @@ void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
struct keybuf_key *bch_keybuf_next(struct keybuf *);
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
struct bkey *, keybuf_pred_fn *);
-
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
#endif
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 965907ce1e20..ccfbea6f9f6b 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -252,6 +252,12 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
static inline void closure_queue(struct closure *cl)
{
struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
if (wq) {
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 3475d6628e21..3a7aed7282b2 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -27,12 +27,12 @@ struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *);
-static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
+static unsigned cache_mode(struct cached_dev *dc)
{
return BDEV_CACHE_MODE(&dc->sb);
}
-static bool verify(struct cached_dev *dc, struct bio *bio)
+static bool verify(struct cached_dev *dc)
{
return dc->verify;
}
@@ -370,7 +370,7 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
struct cache_set *c = dc->disk.c;
- unsigned mode = cache_mode(dc, bio);
+ unsigned mode = cache_mode(dc);
unsigned sectors, congested = bch_get_congested(c);
struct task_struct *task = current;
struct io *i;
@@ -385,6 +385,14 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
op_is_write(bio_op(bio))))
goto skip;
+ /*
+ * Flag for bypass if the IO is for read-ahead or background,
+ * unless the read-ahead request is for metadata (eg, for gfs2).
+ */
+ if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
+ !(bio->bi_opf & REQ_META))
+ goto skip;
+
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
pr_debug("skipping unaligned io");
@@ -463,6 +471,7 @@ struct search {
unsigned recoverable:1;
unsigned write:1;
unsigned read_dirty_data:1;
+ unsigned cache_missed:1;
unsigned long start_time;
@@ -649,6 +658,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->orig_bio = bio;
s->cache_miss = NULL;
+ s->cache_missed = 0;
s->d = d;
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
@@ -698,8 +708,16 @@ static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- if (s->recoverable) {
+ /*
+ * If cache device is dirty (dc->has_dirty is non-zero), then
+ * recovery a failed read request from cached device may get a
+ * stale data back. So read failure recovery is only permitted
+ * when cache device is clean.
+ */
+ if (s->recoverable &&
+ (dc && !atomic_read(&dc->has_dirty))) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
@@ -740,7 +758,7 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
+ if (verify(dc) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
bio_complete(s);
@@ -760,12 +778,12 @@ static void cached_dev_read_done_bh(struct closure *cl)
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
- !s->cache_miss, s->iop.bypass);
+ !s->cache_missed, s->iop.bypass);
trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
if (s->iop.status)
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
- else if (s->iop.bio || verify(dc, &s->bio.bio))
+ else if (s->iop.bio || verify(dc))
continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
else
continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
@@ -779,6 +797,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ s->cache_missed = 1;
+
if (s->cache_miss || s->iop.bypass) {
miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
@@ -892,7 +912,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
- cache_mode(dc, bio),
+ cache_mode(dc),
s->iop.bypass)) {
s->iop.bypass = false;
s->iop.writeback = true;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index fc0a31b13ac4..b4d28928dec5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -53,12 +53,15 @@ LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
static int bcache_major;
-static DEFINE_IDA(bcache_minor);
+static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
-#define BCACHE_MINORS 16 /* partition support */
+/* limitation of partitions number on single bcache device */
+#define BCACHE_MINORS 128
+/* limitation of bcache devices number on single system */
+#define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
/* Superblock */
@@ -721,6 +724,16 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
closure_get(&c->caching);
}
+static inline int first_minor_to_idx(int first_minor)
+{
+ return (first_minor/BCACHE_MINORS);
+}
+
+static inline int idx_to_first_minor(int idx)
+{
+ return (idx * BCACHE_MINORS);
+}
+
static void bcache_device_free(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
@@ -734,7 +747,8 @@ static void bcache_device_free(struct bcache_device *d)
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
if (d->disk) {
- ida_simple_remove(&bcache_minor, d->disk->first_minor);
+ ida_simple_remove(&bcache_device_idx,
+ first_minor_to_idx(d->disk->first_minor));
put_disk(d->disk);
}
@@ -751,7 +765,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
{
struct request_queue *q;
size_t n;
- int minor;
+ int idx;
if (!d->stripe_size)
d->stripe_size = 1 << 31;
@@ -776,25 +790,24 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->full_dirty_stripes)
return -ENOMEM;
- minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
- if (minor < 0)
- return minor;
-
- minor *= BCACHE_MINORS;
+ idx = ida_simple_get(&bcache_device_idx, 0,
+ BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
+ if (idx < 0)
+ return idx;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
BIOSET_NEED_BVECS |
BIOSET_NEED_RESCUER)) ||
!(d->disk = alloc_disk(BCACHE_MINORS))) {
- ida_simple_remove(&bcache_minor, minor);
+ ida_simple_remove(&bcache_device_idx, idx);
return -ENOMEM;
}
set_capacity(d->disk, sectors);
- snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
d->disk->major = bcache_major;
- d->disk->first_minor = minor;
+ d->disk->first_minor = idx_to_first_minor(idx);
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
@@ -889,7 +902,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
closure_init_stack(&cl);
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
- BUG_ON(atomic_read(&dc->count));
+ BUG_ON(refcount_read(&dc->count));
mutex_lock(&bch_register_lock);
@@ -1016,7 +1029,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
* dc->c must be set before dc->count != 0 - paired with the mb in
* cached_dev_get()
*/
- atomic_set(&dc->count, 1);
+ refcount_set(&dc->count, 1);
/* Block writeback thread, but spawn it */
down_write(&dc->writeback_lock);
@@ -1028,7 +1041,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
- atomic_inc(&dc->count);
+ refcount_inc(&dc->count);
bch_writeback_queue(dc);
}
@@ -1129,9 +1142,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
if (ret)
return ret;
- set_capacity(dc->disk.disk,
- dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
-
dc->disk.disk->queue->backing_dev_info->ra_pages =
max(dc->disk.disk->queue->backing_dev_info->ra_pages,
q->backing_dev_info->ra_pages);
@@ -2085,6 +2095,7 @@ static void bcache_exit(void)
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
}
static int __init bcache_init(void)
@@ -2103,14 +2114,15 @@ static int __init bcache_init(void)
bcache_major = register_blkdev(0, "bcache");
if (bcache_major < 0) {
unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
return bcache_major;
}
if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
- sysfs_create_files(bcache_kobj, files) ||
bch_request_init() ||
- bch_debug_init(bcache_kobj))
+ bch_debug_init(bcache_kobj) ||
+ sysfs_create_files(bcache_kobj, files))
goto err;
return 0;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 234b2f5b286d..b4184092c727 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -82,8 +82,9 @@ rw_attribute(writeback_delay);
rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
-rw_attribute(writeback_rate_d_term);
+rw_attribute(writeback_rate_i_term_inverse);
rw_attribute(writeback_rate_p_term_inverse);
+rw_attribute(writeback_rate_minimum);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -131,15 +132,16 @@ SHOW(__bch_cached_dev)
sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
- var_print(writeback_rate_d_term);
+ var_print(writeback_rate_i_term_inverse);
var_print(writeback_rate_p_term_inverse);
+ var_print(writeback_rate_minimum);
if (attr == &sysfs_writeback_rate_debug) {
char rate[20];
char dirty[20];
char target[20];
char proportional[20];
- char derivative[20];
+ char integral[20];
char change[20];
s64 next_io;
@@ -147,7 +149,7 @@ SHOW(__bch_cached_dev)
bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
bch_hprint(proportional,dc->writeback_rate_proportional << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(integral, dc->writeback_rate_integral_scaled << 9);
bch_hprint(change, dc->writeback_rate_change << 9);
next_io = div64_s64(dc->writeback_rate.next - local_clock(),
@@ -158,11 +160,11 @@ SHOW(__bch_cached_dev)
"dirty:\t\t%s\n"
"target:\t\t%s\n"
"proportional:\t%s\n"
- "derivative:\t%s\n"
+ "integral:\t%s\n"
"change:\t\t%s/sec\n"
"next io:\t%llims\n",
rate, dirty, target, proportional,
- derivative, change, next_io);
+ integral, change, next_io);
}
sysfs_hprint(dirty_data,
@@ -214,7 +216,7 @@ STORE(__cached_dev)
dc->writeback_rate.rate, 1, INT_MAX);
d_strtoul_nonzero(writeback_rate_update_seconds);
- d_strtoul(writeback_rate_d_term);
+ d_strtoul(writeback_rate_i_term_inverse);
d_strtoul_nonzero(writeback_rate_p_term_inverse);
d_strtoi_h(sequential_cutoff);
@@ -320,7 +322,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_percent,
&sysfs_writeback_rate,
&sysfs_writeback_rate_update_seconds,
- &sysfs_writeback_rate_d_term,
+ &sysfs_writeback_rate_i_term_inverse,
&sysfs_writeback_rate_p_term_inverse,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
@@ -746,6 +748,11 @@ static struct attribute *bch_cache_set_internal_files[] = {
};
KTYPE(bch_cache_set_internal);
+static int __bch_cache_cmp(const void *l, const void *r)
+{
+ return *((uint16_t *)r) - *((uint16_t *)l);
+}
+
SHOW(__bch_cache)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
@@ -770,9 +777,6 @@ SHOW(__bch_cache)
CACHE_REPLACEMENT(&ca->sb));
if (attr == &sysfs_priority_stats) {
- int cmp(const void *l, const void *r)
- { return *((uint16_t *) r) - *((uint16_t *) l); }
-
struct bucket *b;
size_t n = ca->sb.nbuckets, i;
size_t unused = 0, available = 0, dirty = 0, meta = 0;
@@ -801,7 +805,7 @@ SHOW(__bch_cache)
p[i] = ca->buckets[i].prio;
mutex_unlock(&ca->set->bucket_lock);
- sort(p, n, sizeof(uint16_t), cmp, NULL);
+ sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
while (n &&
!cached[n - 1])
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 176d3c2ef5f5..e548b8b51322 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -232,8 +232,14 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
d->next += div_u64(done * NSEC_PER_SEC, d->rate);
- if (time_before64(now + NSEC_PER_SEC, d->next))
- d->next = now + NSEC_PER_SEC;
+ /* Bound the time. Don't let us fall further than 2 seconds behind
+ * (this prevents unnecessary backlog that would make it impossible
+ * to catch up). If we're ahead of the desired writeback rate,
+ * don't let us sleep more than 2.5 seconds (so we can notice/respond
+ * if the control system tells us to speed up!).
+ */
+ if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next))
+ d->next = now + NSEC_PER_SEC * 5LLU / 2LLU;
if (time_after64(now - NSEC_PER_SEC * 2, d->next))
d->next = now - NSEC_PER_SEC * 2;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index f54b58282f77..ed5e8a412eb8 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -442,10 +442,10 @@ struct bch_ratelimit {
uint64_t next;
/*
- * Rate at which we want to do work, in units per nanosecond
+ * Rate at which we want to do work, in units per second
* The units here correspond to the units passed to bch_next_delay()
*/
- unsigned rate;
+ uint32_t rate;
};
static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 70454f2ad2fa..56a37884ca8b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -26,48 +26,63 @@ static void __update_writeback_rate(struct cached_dev *dc)
bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
-
int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
c->cached_dev_sectors);
- /* PD controller */
-
+ /*
+ * PI controller:
+ * Figures out the amount that should be written per second.
+ *
+ * First, the error (number of sectors that are dirty beyond our
+ * target) is calculated. The error is accumulated (numerically
+ * integrated).
+ *
+ * Then, the proportional value and integral value are scaled
+ * based on configured values. These are stored as inverses to
+ * avoid fixed point math and to make configuration easy-- e.g.
+ * the default value of 40 for writeback_rate_p_term_inverse
+ * attempts to write at a rate that would retire all the dirty
+ * blocks in 40 seconds.
+ *
+ * The writeback_rate_i_inverse value of 10000 means that 1/10000th
+ * of the error is accumulated in the integral term per second.
+ * This acts as a slow, long-term average that is not subject to
+ * variations in usage like the p term.
+ */
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
- int64_t derivative = dirty - dc->disk.sectors_dirty_last;
- int64_t proportional = dirty - target;
- int64_t change;
-
- dc->disk.sectors_dirty_last = dirty;
-
- /* Scale to sectors per second */
-
- proportional *= dc->writeback_rate_update_seconds;
- proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
-
- derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
-
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- (dc->writeback_rate_d_term /
- dc->writeback_rate_update_seconds) ?: 1, 0);
-
- derivative *= dc->writeback_rate_d_term;
- derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
-
- change = proportional + derivative;
+ int64_t error = dirty - target;
+ int64_t proportional_scaled =
+ div_s64(error, dc->writeback_rate_p_term_inverse);
+ int64_t integral_scaled;
+ uint32_t new_rate;
+
+ if ((error < 0 && dc->writeback_rate_integral > 0) ||
+ (error > 0 && time_before64(local_clock(),
+ dc->writeback_rate.next + NSEC_PER_MSEC))) {
+ /*
+ * Only decrease the integral term if it's more than
+ * zero. Only increase the integral term if the device
+ * is keeping up. (Don't wind up the integral
+ * ineffectively in either case).
+ *
+ * It's necessary to scale this by
+ * writeback_rate_update_seconds to keep the integral
+ * term dimensioned properly.
+ */
+ dc->writeback_rate_integral += error *
+ dc->writeback_rate_update_seconds;
+ }
- /* Don't increase writeback rate if the device isn't keeping up */
- if (change > 0 &&
- time_after64(local_clock(),
- dc->writeback_rate.next + NSEC_PER_MSEC))
- change = 0;
+ integral_scaled = div_s64(dc->writeback_rate_integral,
+ dc->writeback_rate_i_term_inverse);
- dc->writeback_rate.rate =
- clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
- 1, NSEC_PER_MSEC);
+ new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
+ dc->writeback_rate_minimum, NSEC_PER_SEC);
- dc->writeback_rate_proportional = proportional;
- dc->writeback_rate_derivative = derivative;
- dc->writeback_rate_change = change;
+ dc->writeback_rate_proportional = proportional_scaled;
+ dc->writeback_rate_integral_scaled = integral_scaled;
+ dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
+ dc->writeback_rate.rate = new_rate;
dc->writeback_rate_target = target;
}
@@ -180,13 +195,21 @@ static void write_dirty(struct closure *cl)
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
- io->bio.bi_iter.bi_sector = KEY_START(&w->key);
- bio_set_dev(&io->bio, io->dc->bdev);
- io->bio.bi_end_io = dirty_endio;
+ /*
+ * IO errors are signalled using the dirty bit on the key.
+ * If we failed to read, we should not attempt to write to the
+ * backing device. Instead, immediately go to write_dirty_finish
+ * to clean up.
+ */
+ if (KEY_DIRTY(&w->key)) {
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
+ bio_set_dev(&io->bio, io->dc->bdev);
+ io->bio.bi_end_io = dirty_endio;
- closure_bio_submit(&io->bio, cl);
+ closure_bio_submit(&io->bio, cl);
+ }
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -418,6 +441,8 @@ static int bch_writeback_thread(void *arg)
struct cached_dev *dc = arg;
bool searched_full_index;
+ bch_ratelimit_reset(&dc->writeback_rate);
+
while (!kthread_should_stop()) {
down_write(&dc->writeback_lock);
if (!atomic_read(&dc->has_dirty) ||
@@ -445,7 +470,6 @@ static int bch_writeback_thread(void *arg)
up_write(&dc->writeback_lock);
- bch_ratelimit_reset(&dc->writeback_rate);
read_dirty(dc);
if (searched_full_index) {
@@ -455,6 +479,8 @@ static int bch_writeback_thread(void *arg)
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
delay = schedule_timeout_interruptible(delay);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
}
}
@@ -492,8 +518,6 @@ void bch_sectors_dirty_init(struct bcache_device *d)
bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
-
- d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -507,10 +531,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_percent = 10;
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
+ dc->writeback_rate_minimum = 8;
dc->writeback_rate_update_seconds = 5;
- dc->writeback_rate_d_term = 30;
- dc->writeback_rate_p_term_inverse = 6000;
+ dc->writeback_rate_p_term_inverse = 40;
+ dc->writeback_rate_i_term_inverse = 10000;
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 151544740148..a9e3ffb4b03c 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -77,7 +77,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
+ return (op_is_sync(bio->bi_opf) ||
+ bio->bi_opf & (REQ_META|REQ_PRIO) ||
+ in_use <= CUTOFF_WRITEBACK);
}
static inline void bch_writeback_queue(struct cached_dev *dc)
@@ -90,7 +92,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
{
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
- atomic_inc(&dc->count);
+ refcount_inc(&dc->count);
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d216a8f7bc22..33bb074d6941 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void)
BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
BUG_ON(dm_bufio_client_count < 0);
- dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
+ dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
/*
* Use default if set to 0 and report the actual cache size used.
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
{
unsigned long buffers;
- if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+ if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
@@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
- unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+ unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
@@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
+ return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
static unsigned get_max_age_hz(void)
{
- unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
+ unsigned max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 707233891291..1d0af0a21fc7 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -161,8 +161,17 @@ EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
static bool max_work_reached(struct background_tracker *b)
{
- // FIXME: finish
- return false;
+ return atomic_read(&b->pending_promotes) +
+ atomic_read(&b->pending_writebacks) +
+ atomic_read(&b->pending_demotes) >= b->max_work;
+}
+
+struct bt_work *alloc_work(struct background_tracker *b)
+{
+ if (max_work_reached(b))
+ return NULL;
+
+ return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
}
int btracker_queue(struct background_tracker *b,
@@ -174,10 +183,7 @@ int btracker_queue(struct background_tracker *b,
if (pwork)
*pwork = NULL;
- if (max_work_reached(b))
- return -ENOMEM;
-
- w = kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
+ w = alloc_work(b);
if (!w)
return -ENOMEM;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4a4e9c75fc4c..0d7212410e21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -13,6 +13,7 @@
#include "persistent-data/dm-transaction-manager.h"
#include <linux/device-mapper.h>
+#include <linux/refcount.h>
/*----------------------------------------------------------------*/
@@ -100,7 +101,7 @@ struct cache_disk_superblock {
} __packed;
struct dm_cache_metadata {
- atomic_t ref_count;
+ refcount_t ref_count;
struct list_head list;
unsigned version;
@@ -753,7 +754,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
}
cmd->version = metadata_version;
- atomic_set(&cmd->ref_count, 1);
+ refcount_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -791,7 +792,7 @@ static struct dm_cache_metadata *lookup(struct block_device *bdev)
list_for_each_entry(cmd, &table, list)
if (cmd->bdev == bdev) {
- atomic_inc(&cmd->ref_count);
+ refcount_inc(&cmd->ref_count);
return cmd;
}
@@ -862,7 +863,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
- if (atomic_dec_and_test(&cmd->ref_count)) {
+ if (refcount_dec_and_test(&cmd->ref_count)) {
mutex_lock(&table_lock);
list_del(&cmd->list);
mutex_unlock(&table_lock);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e5eb9c9b4bc8..4ab23d0075f6 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -213,6 +213,19 @@ static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
l->nr_elts--;
}
+static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
+{
+ struct entry *e;
+
+ for (e = l_head(es, l); e; e = l_next(es, e))
+ if (!e->sentinel) {
+ l_del(es, l, e);
+ return e;
+ }
+
+ return NULL;
+}
+
static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
{
struct entry *e;
@@ -719,7 +732,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
if (l_empty(&ea->free))
return NULL;
- e = l_pop_tail(ea->es, &ea->free);
+ e = l_pop_head(ea->es, &ea->free);
init_entry(e);
ea->nr_allocated++;
@@ -1158,13 +1171,13 @@ static void clear_pending(struct smq_policy *mq, struct entry *e)
e->pending_work = false;
}
-static void queue_writeback(struct smq_policy *mq)
+static void queue_writeback(struct smq_policy *mq, bool idle)
{
int r;
struct policy_work work;
struct entry *e;
- e = q_peek(&mq->dirty, mq->dirty.nr_levels, !mq->migrations_allowed);
+ e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
if (e) {
mark_pending(mq, e);
q_del(&mq->dirty, e);
@@ -1174,12 +1187,16 @@ static void queue_writeback(struct smq_policy *mq)
work.cblock = infer_cblock(mq, e);
r = btracker_queue(mq->bg_work, &work, NULL);
- WARN_ON_ONCE(r); // FIXME: finish, I think we have to get rid of this race.
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->dirty, e);
+ }
}
}
static void queue_demotion(struct smq_policy *mq)
{
+ int r;
struct policy_work work;
struct entry *e;
@@ -1189,7 +1206,7 @@ static void queue_demotion(struct smq_policy *mq)
e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) {
if (!clean_target_met(mq, true))
- queue_writeback(mq);
+ queue_writeback(mq, false);
return;
}
@@ -1199,12 +1216,17 @@ static void queue_demotion(struct smq_policy *mq)
work.op = POLICY_DEMOTE;
work.oblock = e->oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, NULL);
+ r = btracker_queue(mq->bg_work, &work, NULL);
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->clean, e);
+ }
}
static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
struct policy_work **workp)
{
+ int r;
struct entry *e;
struct policy_work work;
@@ -1234,7 +1256,9 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
work.op = POLICY_PROMOTE;
work.oblock = oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, workp);
+ r = btracker_queue(mq->bg_work, &work, workp);
+ if (r)
+ free_entry(&mq->cache_alloc, e);
}
/*----------------------------------------------------------------*/
@@ -1418,7 +1442,7 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) {
if (!clean_target_met(mq, idle)) {
- queue_writeback(mq);
+ queue_writeback(mq, idle);
r = btracker_issue(mq->bg_work, result);
}
}
@@ -1778,7 +1802,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
mq->next_hotspot_period = jiffies;
mq->next_cache_period = jiffies;
- mq->bg_work = btracker_create(10240); /* FIXME: hard coded value */
+ mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
if (!mq->bg_work)
goto bad_btracker;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8785134c9f1f..cf23a14f9c6a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -408,9 +408,7 @@ struct cache {
int sectors_per_block_shift;
spinlock_t lock;
- struct list_head deferred_cells;
struct bio_list deferred_bios;
- struct bio_list deferred_writethrough_bios;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
atomic_t nr_allocated_migrations;
@@ -446,10 +444,10 @@ struct cache {
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
struct work_struct deferred_bio_worker;
- struct work_struct deferred_writethrough_worker;
struct work_struct migration_worker;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
+ struct bio_set *bs;
mempool_t *migration_pool;
@@ -490,15 +488,6 @@ struct per_bio_data {
struct dm_bio_prison_cell_v2 *cell;
struct dm_hook_info hook_info;
sector_t len;
-
- /*
- * writethrough fields. These MUST remain at the end of this
- * structure and the 'cache' member must be the first as it
- * is used to determine the offset of the writethrough fields.
- */
- struct cache *cache;
- dm_cblock_t cblock;
- struct dm_bio_details bio_details;
};
struct dm_cache_migration {
@@ -515,19 +504,19 @@ struct dm_cache_migration {
/*----------------------------------------------------------------*/
-static bool writethrough_mode(struct cache_features *f)
+static bool writethrough_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITETHROUGH;
+ return cache->features.io_mode == CM_IO_WRITETHROUGH;
}
-static bool writeback_mode(struct cache_features *f)
+static bool writeback_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITEBACK;
+ return cache->features.io_mode == CM_IO_WRITEBACK;
}
-static inline bool passthrough_mode(struct cache_features *f)
+static inline bool passthrough_mode(struct cache *cache)
{
- return unlikely(f->io_mode == CM_IO_PASSTHROUGH);
+ return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
}
/*----------------------------------------------------------------*/
@@ -537,14 +526,9 @@ static void wake_deferred_bio_worker(struct cache *cache)
queue_work(cache->wq, &cache->deferred_bio_worker);
}
-static void wake_deferred_writethrough_worker(struct cache *cache)
-{
- queue_work(cache->wq, &cache->deferred_writethrough_worker);
-}
-
static void wake_migration_worker(struct cache *cache)
{
- if (passthrough_mode(&cache->features))
+ if (passthrough_mode(cache))
return;
queue_work(cache->wq, &cache->migration_worker);
@@ -567,10 +551,13 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
struct dm_cache_migration *mg;
mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
- if (mg) {
- mg->cache = cache;
- atomic_inc(&mg->cache->nr_allocated_migrations);
- }
+ if (!mg)
+ return NULL;
+
+ memset(mg, 0, sizeof(*mg));
+
+ mg->cache = cache;
+ atomic_inc(&cache->nr_allocated_migrations);
return mg;
}
@@ -618,27 +605,16 @@ static unsigned lock_level(struct bio *bio)
* Per bio data
*--------------------------------------------------------------*/
-/*
- * If using writeback, leave out struct per_bio_data's writethrough fields.
- */
-#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
-#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
-
-static size_t get_per_bio_data_size(struct cache *cache)
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
- return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
-}
-
-static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
-{
- struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
BUG_ON(!pb);
return pb;
}
-static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
{
- struct per_bio_data *pb = get_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -678,7 +654,6 @@ static void defer_bios(struct cache *cache, struct bio_list *bios)
static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
{
bool r;
- size_t pb_size;
struct per_bio_data *pb;
struct dm_cell_key_v2 key;
dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
@@ -703,8 +678,7 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
if (cell != cell_prealloc)
free_prison_cell(cache, cell_prealloc);
- pb_size = get_per_bio_data_size(cache);
- pb = get_per_bio_data(bio, pb_size);
+ pb = get_per_bio_data(bio);
pb->cell = cell;
return r;
@@ -856,28 +830,35 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb;
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
+ pb = get_per_bio_data(bio);
pb->tick = true;
cache->need_tick_bio = false;
}
spin_unlock_irqrestore(&cache->lock, flags);
}
-static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock)
+static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, bool bio_has_pbd)
{
- // FIXME: this is called way too much.
- check_if_tick_bio_needed(cache, bio);
+ if (bio_has_pbd)
+ check_if_tick_bio_needed(cache, bio);
remap_to_origin(cache, bio);
if (bio_data_dir(bio) == WRITE)
clear_discard(cache, oblock_to_dblock(cache, oblock));
}
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock)
+{
+ // FIXME: check_if_tick_bio_needed() is called way too much through this interface
+ __remap_to_origin_clear_discard(cache, bio, oblock, true);
+}
+
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
@@ -908,10 +889,10 @@ static bool accountable_bio(struct cache *cache, struct bio *bio)
static void accounted_begin(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb;
if (accountable_bio(cache, bio)) {
+ pb = get_per_bio_data(bio);
pb->len = bio_sectors(bio);
iot_io_begin(&cache->tracker, pb->len);
}
@@ -919,8 +900,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
static void accounted_complete(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
iot_io_end(&cache->tracker, pb->len);
}
@@ -937,57 +917,26 @@ static void issue_op(struct bio *bio, void *context)
accounted_request(cache, bio);
}
-static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
- bio_list_add(&cache->deferred_writethrough_bios, bio);
- spin_unlock_irqrestore(&cache->lock, flags);
-
- wake_deferred_writethrough_worker(cache);
-}
-
-static void writethrough_endio(struct bio *bio)
-{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
-
- dm_unhook_bio(&pb->hook_info, bio);
-
- if (bio->bi_status) {
- bio_endio(bio);
- return;
- }
-
- dm_bio_restore(&pb->bio_details, bio);
- remap_to_cache(pb->cache, bio, pb->cblock);
-
- /*
- * We can't issue this bio directly, since we're in interrupt
- * context. So it gets put on a bio list for processing by the
- * worker thread.
- */
- defer_writethrough_bio(pb->cache, bio);
-}
-
/*
- * FIXME: send in parallel, huge latency as is.
* When running in writethrough mode we need to send writes to clean blocks
- * to both the cache and origin devices. In future we'd like to clone the
- * bio and send them in parallel, but for now we're doing them in
- * series as this is easier.
+ * to both the cache and origin devices. Clone the bio and send them in parallel.
*/
-static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, dm_cblock_t cblock)
+static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+ struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
+
+ BUG_ON(!origin_bio);
- pb->cache = cache;
- pb->cblock = cblock;
- dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
- dm_bio_record(&pb->bio_details, bio);
+ bio_chain(origin_bio, bio);
+ /*
+ * Passing false to __remap_to_origin_clear_discard() skips
+ * all code that might use per_bio_data (since clone doesn't have it)
+ */
+ __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
+ submit_bio(origin_bio);
- remap_to_origin_clear_discard(pb->cache, bio, oblock);
+ remap_to_cache(cache, bio, cblock);
}
/*----------------------------------------------------------------
@@ -1201,6 +1150,18 @@ static void background_work_end(struct cache *cache)
/*----------------------------------------------------------------*/
+static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+{
+ return (bio_data_dir(bio) == WRITE) &&
+ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+}
+
+static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
+{
+ return writeback_mode(cache) &&
+ (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
+}
+
static void quiesce(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{
@@ -1248,8 +1209,7 @@ static int copy(struct dm_cache_migration *mg, bool promote)
static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
free_prison_cell(cache, pb->cell);
@@ -1260,23 +1220,21 @@ static void overwrite_endio(struct bio *bio)
{
struct dm_cache_migration *mg = bio->bi_private;
struct cache *cache = mg->cache;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
dm_unhook_bio(&pb->hook_info, bio);
if (bio->bi_status)
mg->k.input = bio->bi_status;
- queue_continuation(mg->cache->wq, &mg->k);
+ queue_continuation(cache->wq, &mg->k);
}
static void overwrite(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{
struct bio *bio = mg->overwrite_bio;
- size_t pb_data_size = get_per_bio_data_size(mg->cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
@@ -1474,13 +1432,51 @@ static void mg_upgrade_lock(struct work_struct *ws)
}
}
+static void mg_full_copy(struct work_struct *ws)
+{
+ struct dm_cache_migration *mg = ws_to_mg(ws);
+ struct cache *cache = mg->cache;
+ struct policy_work *op = mg->op;
+ bool is_policy_promote = (op->op == POLICY_PROMOTE);
+
+ if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
+ is_discarded_oblock(cache, op->oblock)) {
+ mg_upgrade_lock(ws);
+ return;
+ }
+
+ init_continuation(&mg->k, mg_upgrade_lock);
+
+ if (copy(mg, is_policy_promote)) {
+ DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
+ mg->k.input = BLK_STS_IOERR;
+ mg_complete(mg, false);
+ }
+}
+
static void mg_copy(struct work_struct *ws)
{
- int r;
struct dm_cache_migration *mg = ws_to_mg(ws);
if (mg->overwrite_bio) {
/*
+ * No exclusive lock was held when we last checked if the bio
+ * was optimisable. So we have to check again in case things
+ * have changed (eg, the block may no longer be discarded).
+ */
+ if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
+ /*
+ * Fallback to a real full copy after doing some tidying up.
+ */
+ bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
+ BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
+ mg->overwrite_bio = NULL;
+ inc_io_migrations(mg->cache);
+ mg_full_copy(ws);
+ return;
+ }
+
+ /*
* It's safe to do this here, even though it's new data
* because all IO has been locked out of the block.
*
@@ -1489,26 +1485,8 @@ static void mg_copy(struct work_struct *ws)
*/
overwrite(mg, mg_update_metadata_after_copy);
- } else {
- struct cache *cache = mg->cache;
- struct policy_work *op = mg->op;
- bool is_policy_promote = (op->op == POLICY_PROMOTE);
-
- if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
- is_discarded_oblock(cache, op->oblock)) {
- mg_upgrade_lock(ws);
- return;
- }
-
- init_continuation(&mg->k, mg_upgrade_lock);
-
- r = copy(mg, is_policy_promote);
- if (r) {
- DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
- mg->k.input = BLK_STS_IOERR;
- mg_complete(mg, false);
- }
- }
+ } else
+ mg_full_copy(ws);
}
static int mg_lock_writes(struct dm_cache_migration *mg)
@@ -1567,9 +1545,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
return -ENOMEM;
}
- memset(mg, 0, sizeof(*mg));
-
- mg->cache = cache;
mg->op = op;
mg->overwrite_bio = bio;
@@ -1703,9 +1678,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return -ENOMEM;
}
- memset(mg, 0, sizeof(*mg));
-
- mg->cache = cache;
mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock;
mg->invalidate_oblock = oblock;
@@ -1748,26 +1720,12 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
/*----------------------------------------------------------------*/
-static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
-{
- return (bio_data_dir(bio) == WRITE) &&
- (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
-}
-
-static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
-{
- return writeback_mode(&cache->features) &&
- (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
-}
-
static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
bool *commit_needed)
{
int r, data_dir;
bool rb, background_queued;
dm_cblock_t cblock;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
*commit_needed = false;
@@ -1816,6 +1774,8 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
}
if (r == -ENOENT) {
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
/*
* Miss.
*/
@@ -1823,7 +1783,6 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
if (pb->req_nr == 0) {
accounted_begin(cache, bio);
remap_to_origin_clear_discard(cache, bio, block);
-
} else {
/*
* This is a duplicate writethrough io that is no
@@ -1842,18 +1801,17 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
* Passthrough always maps to the origin, invalidating any
* cache blocks that are written to.
*/
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
if (bio_data_dir(bio) == WRITE) {
bio_drop_shared_lock(cache, bio);
atomic_inc(&cache->stats.demotion);
invalidate_start(cache, cblock, block, bio);
} else
remap_to_origin_clear_discard(cache, bio, block);
-
} else {
- if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+ if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
!is_dirty(cache, cblock)) {
- remap_to_origin_then_cache(cache, bio, block, cblock);
+ remap_to_origin_and_cache(cache, bio, block, cblock);
accounted_begin(cache, bio);
} else
remap_to_cache_dirty(cache, bio, block, cblock);
@@ -1922,8 +1880,7 @@ static blk_status_t commit_op(void *context)
static bool process_flush_bio(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (!pb->req_nr)
remap_to_origin(cache, bio);
@@ -1983,28 +1940,6 @@ static void process_deferred_bios(struct work_struct *ws)
schedule_commit(&cache->committer);
}
-static void process_deferred_writethrough_bios(struct work_struct *ws)
-{
- struct cache *cache = container_of(ws, struct cache, deferred_writethrough_worker);
-
- unsigned long flags;
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock_irqsave(&cache->lock, flags);
- bio_list_merge(&bios, &cache->deferred_writethrough_bios);
- bio_list_init(&cache->deferred_writethrough_bios);
- spin_unlock_irqrestore(&cache->lock, flags);
-
- /*
- * These bios have already been through accounted_begin()
- */
- while ((bio = bio_list_pop(&bios)))
- generic_make_request(bio);
-}
-
/*----------------------------------------------------------------
* Main worker loop
*--------------------------------------------------------------*/
@@ -2112,6 +2047,9 @@ static void destroy(struct cache *cache)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);
+ if (cache->bs)
+ bioset_free(cache->bs);
+
kfree(cache);
}
@@ -2555,8 +2493,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discards_supported = true;
ti->split_discard_bios = false;
+ ti->per_io_data_size = sizeof(struct per_bio_data);
+
cache->features = ca->features;
- ti->per_io_data_size = get_per_bio_data_size(cache);
+ if (writethrough_mode(cache)) {
+ /* Create bioset for writethrough bios issued to origin */
+ cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
+ if (!cache->bs)
+ goto bad;
+ }
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -2618,7 +2563,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
bool all_clean;
r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
@@ -2637,9 +2582,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
spin_lock_init(&cache->lock);
- INIT_LIST_HEAD(&cache->deferred_cells);
bio_list_init(&cache->deferred_bios);
- bio_list_init(&cache->deferred_writethrough_bios);
atomic_set(&cache->nr_allocated_migrations, 0);
atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
@@ -2678,8 +2621,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
- INIT_WORK(&cache->deferred_writethrough_worker,
- process_deferred_writethrough_bios);
INIT_WORK(&cache->migration_worker, check_migrations);
INIT_DELAYED_WORK(&cache->waker, do_waker);
@@ -2795,9 +2736,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
int r;
bool commit_needed;
dm_oblock_t block = get_bio_block(cache, bio);
- size_t pb_data_size = get_per_bio_data_size(cache);
- init_per_bio_data(bio, pb_data_size);
+ init_per_bio_data(bio);
if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
@@ -2821,13 +2761,11 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return r;
}
-static int cache_end_io(struct dm_target *ti, struct bio *bio,
- blk_status_t *error)
+static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
{
struct cache *cache = ti->private;
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (pb->tick) {
policy_tick(cache->policy, false);
@@ -3243,13 +3181,13 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("1 ");
- if (writethrough_mode(&cache->features))
+ if (writethrough_mode(cache))
DMEMIT("writethrough ");
- else if (passthrough_mode(&cache->features))
+ else if (passthrough_mode(cache))
DMEMIT("passthrough ");
- else if (writeback_mode(&cache->features))
+ else if (writeback_mode(cache))
DMEMIT("writeback ");
else {
@@ -3415,7 +3353,7 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
unsigned i;
struct cblock_range range;
- if (!passthrough_mode(&cache->features)) {
+ if (!passthrough_mode(cache)) {
DMERR("%s: cache has to be in passthrough mode for invalidation",
cache_device_name(cache));
return -EPERM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 203144762f36..6a14f945783c 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -29,7 +29,6 @@ struct dm_kobject_holder {
* DM targets must _not_ deference a mapped_device to directly access its members!
*/
struct mapped_device {
- struct srcu_struct io_barrier;
struct mutex suspend_lock;
/*
@@ -127,6 +126,8 @@ struct mapped_device {
struct blk_mq_tag_set *tag_set;
bool use_blk_mq:1;
bool init_tio_pdu:1;
+
+ struct srcu_struct io_barrier;
};
void dm_init_md_queue(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 96ab46512e1f..9fc12f556534 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
/* Reject unexpected unaligned bio. */
- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
+ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req);
@@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
int r = 0;
/* Reject unexpected unaligned bio. */
- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
+ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 096fe9b66c50..61180783ef42 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -6,6 +6,7 @@
* This file is released under the GPL.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
@@ -80,13 +81,13 @@ struct journal_entry {
#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
#if BITS_PER_LONG == 64
-#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
+#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
#endif
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
@@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in
static int dm_integrity_failed(struct dm_integrity_c *ic)
{
- return ACCESS_ONCE(ic->failed);
+ return READ_ONCE(ic->failed);
}
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
@@ -1376,7 +1377,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
struct bvec_iter iter;
struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) {
- if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
+ if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
bv.bv_offset, bv.bv_len, ic->sectors_per_block);
return DM_MAPIO_KILL;
@@ -1545,7 +1546,7 @@ retry_kmap:
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
queue_work(ic->commit_wq, &ic->commit_work);
} else {
schedule_autocommit(ic);
@@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w)
ic->n_committed_sections += commit_sections;
spin_unlock_irq(&ic->endio_wait.lock);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
queue_work(ic->writer_wq, &ic->writer_work);
release_flush_bios:
@@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (ACCESS_ONCE(ic->suspending))
+ if (READ_ONCE(ic->suspending))
return;
spin_lock_irq(&ic->endio_wait.lock);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index cf2c67e35eaf..eb45cc3df31d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
try_again:
spin_lock_irq(&throttle_spinlock);
- throttle = ACCESS_ONCE(t->throttle);
+ throttle = READ_ONCE(t->throttle);
if (likely(throttle >= 100))
goto skip_limit;
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
t->num_io_jobs--;
- if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ if (likely(READ_ONCE(t->throttle) >= 100))
goto skip_limit;
if (!t->num_io_jobs) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8b80a9ce9ea9..189badbeddaf 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -10,9 +10,11 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/uio.h>
#define DM_MSG_PREFIX "log-writes"
@@ -246,27 +248,108 @@ error:
return -1;
}
+static int write_inline_data(struct log_writes_c *lc, void *entry,
+ size_t entrylen, void *data, size_t datalen,
+ sector_t sector)
+{
+ int num_pages, bio_pages, pg_datalen, pg_sectorlen, i;
+ struct page *page;
+ struct bio *bio;
+ size_t ret;
+ void *ptr;
+
+ while (datalen) {
+ num_pages = ALIGN(datalen, PAGE_SIZE) >> PAGE_SHIFT;
+ bio_pages = min(num_pages, BIO_MAX_PAGES);
+
+ atomic_inc(&lc->io_blocks);
+
+ bio = bio_alloc(GFP_KERNEL, bio_pages);
+ if (!bio) {
+ DMERR("Couldn't alloc inline data bio");
+ goto error;
+ }
+
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio_set_dev(bio, lc->logdev->bdev);
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ for (i = 0; i < bio_pages; i++) {
+ pg_datalen = min_t(int, datalen, PAGE_SIZE);
+ pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ DMERR("Couldn't alloc inline data page");
+ goto error_bio;
+ }
+
+ ptr = kmap_atomic(page);
+ memcpy(ptr, data, pg_datalen);
+ if (pg_sectorlen > pg_datalen)
+ memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
+ kunmap_atomic(ptr);
+
+ ret = bio_add_page(bio, page, pg_sectorlen, 0);
+ if (ret != pg_sectorlen) {
+ DMERR("Couldn't add page of inline data");
+ __free_page(page);
+ goto error_bio;
+ }
+
+ datalen -= pg_datalen;
+ data += pg_datalen;
+ }
+ submit_bio(bio);
+
+ sector += bio_pages * PAGE_SECTORS;
+ }
+ return 0;
+error_bio:
+ bio_free_pages(bio);
+ bio_put(bio);
+error:
+ put_io_block(lc);
+ return -1;
+}
+
static int log_one_block(struct log_writes_c *lc,
struct pending_block *block, sector_t sector)
{
struct bio *bio;
struct log_write_entry entry;
- size_t ret;
+ size_t metadatalen, ret;
int i;
entry.sector = cpu_to_le64(block->sector);
entry.nr_sectors = cpu_to_le64(block->nr_sectors);
entry.flags = cpu_to_le64(block->flags);
entry.data_len = cpu_to_le64(block->datalen);
+
+ metadatalen = (block->flags & LOG_MARK_FLAG) ? block->datalen : 0;
if (write_metadata(lc, &entry, sizeof(entry), block->data,
- block->datalen, sector)) {
+ metadatalen, sector)) {
free_pending_block(lc, block);
return -1;
}
+ sector += dev_to_bio_sectors(lc, 1);
+
+ if (block->datalen && metadatalen == 0) {
+ if (write_inline_data(lc, &entry, sizeof(entry), block->data,
+ block->datalen, sector)) {
+ free_pending_block(lc, block);
+ return -1;
+ }
+ /* we don't support both inline data & bio data */
+ goto out;
+ }
+
if (!block->vec_cnt)
goto out;
- sector += dev_to_bio_sectors(lc, 1);
atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
@@ -527,6 +610,51 @@ static int log_mark(struct log_writes_c *lc, char *data)
return 0;
}
+static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pending_block *block;
+
+ if (!bytes)
+ return 0;
+
+ block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
+ if (!block) {
+ DMERR("Error allocating dax pending block");
+ return -ENOMEM;
+ }
+
+ block->data = kzalloc(bytes, GFP_KERNEL);
+ if (!block->data) {
+ DMERR("Error allocating dax data space");
+ kfree(block);
+ return -ENOMEM;
+ }
+
+ /* write data provided via the iterator */
+ if (!copy_from_iter(block->data, bytes, i)) {
+ DMERR("Error copying dax data");
+ kfree(block->data);
+ kfree(block);
+ return -EIO;
+ }
+
+ /* rewind the iterator so that the block driver can use it */
+ iov_iter_revert(i, bytes);
+
+ block->datalen = bytes;
+ block->sector = bio_to_dev_sectors(lc, sector);
+ block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
+
+ atomic_inc(&lc->pending_blocks);
+ spin_lock_irq(&lc->blocks_lock);
+ list_add_tail(&block->list, &lc->unflushed_blocks);
+ spin_unlock_irq(&lc->blocks_lock);
+ wake_up_process(lc->log_kthread);
+
+ return 0;
+}
+
static void log_writes_dtr(struct dm_target *ti)
{
struct log_writes_c *lc = ti->private;
@@ -792,9 +920,46 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
limits->io_min = limits->physical_block_size;
}
+static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+ int ret;
+
+ ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
+ if (ret)
+ return ret;
+ return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
+}
+
+static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
+ pgoff_t pgoff, void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+ int err;
+
+ if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+
+ /* Don't bother doing anything if logging has been disabled */
+ if (!lc->logging_enabled)
+ goto dax_copy;
+
+ err = log_dax(lc, sector, bytes, i);
+ if (err) {
+ DMWARN("Error %d logging DAX write", err);
+ return 0;
+ }
+dax_copy:
+ return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+}
+
static struct target_type log_writes_target = {
.name = "log-writes",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = log_writes_ctr,
.dtr = log_writes_dtr,
@@ -805,6 +970,8 @@ static struct target_type log_writes_target = {
.message = log_writes_message,
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
+ .direct_access = log_writes_dax_direct_access,
+ .dax_copy_from_iter = log_writes_dax_copy_from_iter,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 11f273d2f018..204606bef9e2 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
pgpath = path_to_pgpath(path);
- if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+ if (unlikely(READ_ONCE(m->current_pg) != pg)) {
/* Only update current_pgpath if pg changed */
spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
}
/* Were we instructed to switch PG? */
- if (lockless_dereference(m->next_pg)) {
+ if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
/* Don't change PG until it has no remaining paths */
check_current_pg:
- pg = lockless_dereference(m->current_pg);
+ pg = READ_ONCE(m->current_pg);
if (pg) {
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request *clone;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, nr_bytes);
@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bool queue_io;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, nr_bytes);
@@ -641,14 +641,6 @@ static void process_queued_bios(struct work_struct *work)
blk_finish_plug(&plug);
}
-static void assign_bit(bool value, long nr, unsigned long *addr)
-{
- if (value)
- set_bit(nr, addr);
- else
- clear_bit(nr, addr);
-}
-
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
@@ -658,11 +650,11 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- assign_bit((save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
- (!save_old_value && queue_if_no_path),
- MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
- assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
- MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
+ (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
+ (!save_old_value && queue_if_no_path));
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
+ queue_if_no_path || dm_noflush_suspending(m->ti));
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -1588,8 +1580,8 @@ static void multipath_resume(struct dm_target *ti)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
- MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1804,7 +1796,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct pgpath *current_pgpath;
int r;
- current_pgpath = lockless_dereference(m->current_pgpath);
+ current_pgpath = READ_ONCE(m->current_pgpath);
if (!current_pgpath)
current_pgpath = choose_pgpath(m, 0);
@@ -1826,7 +1818,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
if (r == -ENOTCONN) {
- if (!lockless_dereference(m->current_pg)) {
+ if (!READ_ONCE(m->current_pg)) {
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
@@ -1895,9 +1887,9 @@ static int multipath_busy(struct dm_target *ti)
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
- pg = lockless_dereference(m->current_pg);
- next_pg = lockless_dereference(m->next_pg);
- if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+ pg = READ_ONCE(m->current_pg);
+ next_pg = READ_ONCE(m->next_pg);
+ if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
pg = next_pg;
if (!pg) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2245d06d2045..366c625b9591 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -12,7 +12,7 @@
#include "raid1.h"
#include "raid5.h"
#include "raid10.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include <linux/device-mapper.h>
@@ -2143,13 +2143,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
struct dm_raid_superblock *refsb;
uint64_t events_sb, events_refsb;
- rdev->sb_start = 0;
- rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
- if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
- DMERR("superblock size of a logical block is no longer valid");
- return -EINVAL;
- }
-
r = read_disk_sb(rdev, rdev->sb_size, false);
if (r)
return r;
@@ -2494,6 +2487,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (test_bit(Journal, &rdev->flags))
continue;
+ if (!rdev->meta_bdev)
+ continue;
+
+ /* Set superblock offset/size for metadata device. */
+ rdev->sb_start = 0;
+ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+ if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
+ DMERR("superblock size of a logical block is no longer valid");
+ return -EINVAL;
+ }
+
/*
* Skipping super_load due to CTR_FLAG_SYNC will cause
* the array to undergo initialization again as
@@ -2506,9 +2510,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
- if (!rdev->meta_bdev)
- continue;
-
r = super_load(rdev, freshest);
switch (r) {
@@ -3629,8 +3630,11 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
+ if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ mddev_lock_nointr(&rs->md);
mddev_suspend(&rs->md);
+ mddev_unlock(&rs->md);
+ }
rs->md.ro = 1;
}
@@ -3887,8 +3891,11 @@ static void raid_resume(struct dm_target *ti)
if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ mddev_lock_nointr(mddev);
mddev_resume(mddev);
+ mddev_unlock(mddev);
+ }
}
static struct target_type raid_target = {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index eadfcfd106ff..9d32f25489c2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -56,7 +56,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
int dm_request_based(struct mapped_device *md)
{
- return blk_queue_stackable(md->queue);
+ return queue_is_rq_based(md->queue);
}
static void dm_old_start_queue(struct request_queue *q)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index a7868503d135..29bc51084c82 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -432,7 +432,7 @@ do_sync_free:
synchronize_rcu_expedited();
dm_stat_free(&s->rcu_head);
} else {
- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
call_rcu(&s->rcu_head, dm_stat_free);
}
return 0;
@@ -640,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
*/
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
- (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ (bi_sector == (READ_ONCE(last->last_sector) &&
((bi_rw == WRITE) ==
- (ACCESS_ONCE(last->last_rw) == WRITE))
+ (READ_ONCE(last->last_rw) == WRITE))
));
- ACCESS_ONCE(last->last_sector) = end_sector;
- ACCESS_ONCE(last->last_rw) = bi_rw;
+ WRITE_ONCE(last->last_sector, end_sector);
+ WRITE_ONCE(last->last_rw, bi_rw);
}
rcu_read_lock();
@@ -694,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
for_each_possible_cpu(cpu) {
p = &s->stat_percpu[cpu][x];
- shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
- shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
- shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
- shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
- shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
- shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
- shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
- shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
- shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
- shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
- shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
- shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
- shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
+ shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
}
}
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 4c8de1ff78ca..8d0ba879777e 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
switch_get_position(sctx, region_nr, &region_index, &bit);
- return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
+ return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
((1 << sctx->region_table_entry_bits) - 1);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ef7b8f201f73..433bd3f3014c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -451,15 +451,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
return r;
}
- atomic_set(&dd->count, 0);
+ refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
+ refcount_inc(&dd->count);
}
- atomic_inc(&dd->count);
*result = dd->dm_dev;
return 0;
@@ -515,7 +515,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
dm_device_name(ti->table->md), d->name);
return;
}
- if (atomic_dec_and_test(&dd->count)) {
+ if (refcount_dec_and_test(&dd->count)) {
dm_put_table_device(ti->table->md, d);
list_del(&dd->list);
kfree(dd);
@@ -1000,7 +1000,7 @@ verify_rq_based:
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
- if (!blk_queue_stackable(q)) {
+ if (!queue_is_rq_based(q)) {
DMERR("table load rejected: including"
" non-request-stackable devices");
return -EINVAL;
@@ -1847,19 +1847,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
-
- /*
- * QUEUE_FLAG_STACKABLE must be set after all queue settings are
- * visible to other CPUs because, once the flag is set, incoming bios
- * are processed by request-based dm, which refers to the queue
- * settings.
- * Until the flag set, bios are passed to bio-based dm and queued to
- * md->deferred where queue settings are not needed yet.
- * Those bios are passed to request-based dm at the resume time.
- */
- smp_mb();
- if (dm_table_request_based(t))
- queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1e25705209c2..89e5dff9b4cf 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
- unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
+ unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index bda3caca23ca..aedb8222836b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
-/*
- * Callback function for asynchrnous crypto API completion notification
- */
-static void verity_op_done(struct crypto_async_request *base, int err)
-{
- struct verity_result *res = (struct verity_result *)base->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
-/*
- * Wait for async crypto API callback
- */
-static inline int verity_complete_op(struct verity_result *res, int ret)
-{
- switch (ret) {
- case 0:
- break;
-
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&res->completion);
- if (!ret)
- ret = res->err;
- reinit_completion(&res->completion);
- break;
-
- default:
- DMERR("verity_wait_hash: crypto op submission failed: %d", ret);
- }
-
- if (unlikely(ret < 0))
- DMERR("verity_wait_hash: crypto op failed: %d", ret);
-
- return ret;
-}
-
static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
struct scatterlist sg;
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
- return verity_complete_op(res, crypto_ahash_update(req));
+ return crypto_wait_req(crypto_ahash_update(req), wait);
}
/*
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
int r;
ahash_request_set_tfm(req, v->tfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- verity_op_done, (void *)res);
- init_completion(&res->completion);
+ crypto_req_done, (void *)wait);
+ crypto_init_wait(wait);
- r = verity_complete_op(res, crypto_ahash_init(req));
+ r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
DMERR("crypto_ahash_init failed: %d", r);
@@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
}
if (likely(v->salt_size && (v->version >= 1)))
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
return r;
}
static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
- u8 *digest, struct verity_result *res)
+ u8 *digest, struct crypto_wait *wait)
{
int r;
if (unlikely(v->salt_size && (!v->version))) {
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
DMERR("verity_hash_final failed updating salt: %d", r);
@@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
}
ahash_request_set_crypt(req, NULL, digest, 0);
- r = verity_complete_op(res, crypto_ahash_final(req));
+ r = crypto_wait_req(crypto_ahash_final(req), wait);
out:
return r;
}
@@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len, u8 *digest)
{
int r;
- struct verity_result res;
+ struct crypto_wait wait;
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_update(v, req, data, len, &res);
+ r = verity_hash_update(v, req, data, len, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_final(v, req, digest, &res);
+ r = verity_hash_final(v, req, digest, &wait);
out:
return r;
@@ -389,7 +348,7 @@ out:
* Calculates the digest for the given bio
*/
int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
- struct bvec_iter *iter, struct verity_result *res)
+ struct bvec_iter *iter, struct crypto_wait *wait)
{
unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
@@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
*/
sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, len);
- r = verity_complete_op(res, crypto_ahash_update(req));
+ r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r < 0)) {
DMERR("verity_for_io_block crypto op failed: %d", r);
@@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io)
struct dm_verity *v = io->v;
struct bvec_iter start;
unsigned b;
- struct verity_result res;
+ struct crypto_wait wait;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
return r;
start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &res);
+ r = verity_for_io_block(v, io, &io->iter, &wait);
if (unlikely(r < 0))
return r;
r = verity_hash_final(v, req, verity_io_real_digest(v, io),
- &res);
+ &wait);
if (unlikely(r < 0))
return r;
@@ -589,7 +548,7 @@ static void verity_prefetch_io(struct work_struct *work)
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
- unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
+ unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index a59e0ada6fd3..b675bc015512 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -90,11 +90,6 @@ struct dm_verity_io {
*/
};
-struct verity_result {
- struct completion completion;
- int err;
-};
-
static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v,
struct dm_verity_io *io)
{
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index b87c1741da4b..6d7bda6f8190 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
struct dmz_target *dmz = ti->private;
struct request_queue *q;
struct dmz_dev *dev;
+ sector_t aligned_capacity;
int ret;
/* Get the target device */
@@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
goto err;
}
+ q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- if (ti->begin || (ti->len != dev->capacity)) {
+ aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
+ if (ti->begin ||
+ ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported";
ret = -EINVAL;
goto err;
}
- q = bdev_get_queue(dev->bdev);
- dev->zone_nr_sectors = q->limits.chunk_sectors;
+ dev->zone_nr_sectors = blk_queue_zone_sectors(q);
dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
@@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dmz_target *dmz = ti->private;
+ struct dmz_dev *dev = dmz->dev;
+ sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
- return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data);
+ return fn(ti, dmz->ddev, 0, capacity, data);
}
static struct target_type dmz_type = {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4be85324f44d..de17b7193299 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
+#include <linux/refcount.h>
#define DM_MSG_PREFIX "core"
@@ -98,7 +99,7 @@ struct dm_md_mempools {
struct table_device {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct dm_dev dm_dev;
};
@@ -114,7 +115,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
- int param = ACCESS_ONCE(*module_param);
+ int param = READ_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
@@ -136,7 +137,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned param = ACCESS_ONCE(*module_param);
+ unsigned param = READ_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
@@ -685,10 +686,11 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
format_dev_t(td->dm_dev.name, dev);
- atomic_set(&td->count, 0);
+ refcount_set(&td->count, 1);
list_add(&td->list, &md->table_devices);
+ } else {
+ refcount_inc(&td->count);
}
- atomic_inc(&td->count);
mutex_unlock(&md->table_devices_lock);
*result = &td->dm_dev;
@@ -701,7 +703,7 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
struct table_device *td = container_of(d, struct table_device, dm_dev);
mutex_lock(&md->table_devices_lock);
- if (atomic_dec_and_test(&td->count)) {
+ if (refcount_dec_and_test(&td->count)) {
close_table_device(td, md);
list_del(&td->list);
kfree(td);
@@ -718,7 +720,7 @@ static void free_table_devices(struct list_head *devices)
struct table_device *td = list_entry(tmp, struct table_device, list);
DMWARN("dm_destroy: %s still exists with %d references",
- td->dm_dev.name, atomic_read(&td->count));
+ td->dm_dev.name, refcount_read(&td->count));
kfree(td);
}
}
@@ -1619,17 +1621,6 @@ static void dm_wq_work(struct work_struct *work);
void dm_init_md_queue(struct mapped_device *md)
{
/*
- * Request-based dm devices cannot be stacked on top of bio-based dm
- * devices. The type of this dm device may not have been decided yet.
- * The type is decided at the first table loading time.
- * To prevent problematic device stacking, clear the queue flag
- * for request stacking support until then.
- *
- * This queue is new, so no concurrency on the queue_flags.
- */
- queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
-
- /*
* Initialize data that will only be used by a non-blk-mq DM queue
* - must do so here (in alloc_dev callchain) before queue is used
*/
@@ -1695,7 +1686,7 @@ static struct mapped_device *alloc_dev(int minor)
struct mapped_device *md;
void *old_md;
- md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
+ md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
@@ -1795,7 +1786,7 @@ bad_io_barrier:
bad_minor:
module_put(THIS_MODULE);
bad_module_get:
- kfree(md);
+ kvfree(md);
return NULL;
}
@@ -1814,7 +1805,7 @@ static void free_dev(struct mapped_device *md)
free_minor(minor);
module_put(THIS_MODULE);
- kfree(md);
+ kvfree(md);
}
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
@@ -2072,17 +2063,12 @@ struct mapped_device *dm_get_md(dev_t dev)
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
- if (md) {
- if ((md == MINOR_ALLOCED ||
- (MINOR(disk_devt(dm_disk(md))) != minor) ||
- dm_deleting_md(md) ||
- test_bit(DMF_FREEING, &md->flags))) {
- md = NULL;
- goto out;
- }
- dm_get(md);
+ if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
+ test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
}
-
+ dm_get(md);
out:
spin_unlock(&_minor_lock);
@@ -2709,11 +2695,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
- if (test_bit(DMF_FREEING, &md->flags) ||
- dm_deleting_md(md))
- return NULL;
-
+ spin_lock(&_minor_lock);
+ if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
+ }
dm_get(md);
+out:
+ spin_unlock(&_minor_lock);
+
return md;
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 38c84c0a35d4..36399bb875dd 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -19,6 +19,7 @@
#include <linux/hdreg.h>
#include <linux/completion.h>
#include <linux/kobject.h>
+#include <linux/refcount.h>
#include "dm-stats.h"
@@ -38,7 +39,7 @@
*/
struct dm_dev_internal {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct dm_dev *dm_dev;
};
diff --git a/drivers/md/bitmap.c b/drivers/md/md-bitmap.c
index d2121637b4ab..239c7bb3929b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -29,7 +29,7 @@
#include <linux/seq_file.h>
#include <trace/events/block.h>
#include "md.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
static inline char *bmname(struct bitmap *bitmap)
{
@@ -368,7 +368,7 @@ static int read_page(struct file *file, unsigned long index,
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
+ bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
if (!bh) {
ret = -ENOMEM;
goto out;
@@ -459,7 +459,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
/* rocking back to read-only */
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
- sb->state = cpu_to_le32(bitmap->flags);
+ /*
+ * clear BITMAP_WRITE_ERROR bit to protect against the case that
+ * a bitmap write error occurred but the later writes succeeded.
+ */
+ sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
/* Just in case these have been changed via sysfs: */
sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
@@ -625,7 +629,7 @@ re_read:
err = read_sb_page(bitmap->mddev,
offset,
sb_page,
- 0, PAGE_SIZE);
+ 0, sizeof(bitmap_super_t));
}
if (err)
return err;
@@ -1816,6 +1820,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
BUG_ON(file && mddev->bitmap_info.offset);
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
+ mdname(mddev));
+ return ERR_PTR(-EBUSY);
+ }
+
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
return ERR_PTR(-ENOMEM);
@@ -2123,7 +2133,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (store.sb_page && bitmap->storage.sb_page)
memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page),
- PAGE_SIZE);
+ sizeof(bitmap_super_t));
bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store;
@@ -2152,6 +2162,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
for (k = 0; k < page; k++) {
kfree(new_bp[k].map);
}
+ kfree(new_bp);
/* restore some fields from old_counts */
bitmap->counts.bp = old_counts.bp;
@@ -2202,6 +2213,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
block += old_blocks;
}
+ if (bitmap->counts.bp != old_counts.bp) {
+ unsigned long k;
+ for (k = 0; k < old_counts.pages; k++)
+ if (!old_counts.bp[k].hijacked)
+ kfree(old_counts.bp[k].map);
+ kfree(old_counts.bp);
+ }
+
if (!init) {
int i;
while (block < (chunks << chunkshift)) {
diff --git a/drivers/md/bitmap.h b/drivers/md/md-bitmap.h
index 5df35ca90f58..5df35ca90f58 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/md-bitmap.h
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 03082e17c65c..79bfbc840385 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -15,7 +15,7 @@
#include <linux/sched.h>
#include <linux/raid/md_p.h>
#include "md.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "md-cluster.h"
#define LVB_SIZE 64
@@ -442,10 +442,11 @@ static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
static void remove_suspend_info(struct mddev *mddev, int slot)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+ mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
__remove_suspend_info(cinfo, slot);
spin_unlock_irq(&cinfo->suspend_lock);
- mddev->pers->quiesce(mddev, 2);
+ mddev->pers->quiesce(mddev, 0);
}
@@ -492,13 +493,12 @@ static void process_suspend_info(struct mddev *mddev,
s->lo = lo;
s->hi = hi;
mddev->pers->quiesce(mddev, 1);
- mddev->pers->quiesce(mddev, 0);
spin_lock_irq(&cinfo->suspend_lock);
/* Remove existing entry (if exists) before adding */
__remove_suspend_info(cinfo, slot);
list_add(&s->list, &cinfo->suspend_list);
spin_unlock_irq(&cinfo->suspend_lock);
- mddev->pers->quiesce(mddev, 2);
+ mddev->pers->quiesce(mddev, 0);
}
static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
@@ -1094,7 +1094,7 @@ static void metadata_update_cancel(struct mddev *mddev)
/*
* return 0 if all the bitmaps have the same sync_size
*/
-int cluster_check_sync_size(struct mddev *mddev)
+static int cluster_check_sync_size(struct mddev *mddev)
{
int i, rv;
bitmap_super_t *sb;
@@ -1478,7 +1478,7 @@ static struct md_cluster_operations cluster_ops = {
static int __init cluster_init(void)
{
- pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n");
+ pr_warn("md-cluster: support raid1 and raid10 (limited support)\n");
pr_info("Registering Cluster MD functions\n");
register_md_cluster_operations(&cluster_ops, THIS_MODULE);
return 0;
diff --git a/drivers/md/faulty.c b/drivers/md/md-faulty.c
index 38264b38420f..38264b38420f 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/md-faulty.c
diff --git a/drivers/md/linear.c b/drivers/md/md-linear.c
index c464fb48039a..773fc70dced7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/md-linear.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <trace/events/block.h>
#include "md.h"
-#include "linear.h"
+#include "md-linear.h"
/*
* find which device holds a particular offset
diff --git a/drivers/md/linear.h b/drivers/md/md-linear.h
index 8381d651d4ed..8381d651d4ed 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/md-linear.h
diff --git a/drivers/md/multipath.c b/drivers/md/md-multipath.c
index b68e0666b9b0..e40065bdbfc8 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/md-multipath.c
@@ -25,7 +25,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "md.h"
-#include "multipath.h"
+#include "md-multipath.h"
#define MAX_WORK_PER_DISK 128
@@ -243,7 +243,6 @@ static void print_multipath_conf (struct mpconf *conf)
static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
- struct request_queue *q;
int err = -EEXIST;
int path;
struct multipath_info *p;
@@ -257,7 +256,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
- q = rdev->bdev->bd_disk->queue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/md/multipath.h b/drivers/md/md-multipath.h
index 0adb941f485a..0adb941f485a 100644
--- a/drivers/md/multipath.h
+++ b/drivers/md/md-multipath.h
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0ff1bbf6c90e..c3dc134b9fb5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -69,7 +69,7 @@
#include <trace/events/block.h>
#include "md.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "md-cluster.h"
#ifndef MODULE
@@ -266,16 +266,31 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
+static bool is_suspended(struct mddev *mddev, struct bio *bio)
+{
+ if (mddev->suspended)
+ return true;
+ if (bio_data_dir(bio) != WRITE)
+ return false;
+ if (mddev->suspend_lo >= mddev->suspend_hi)
+ return false;
+ if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
+ return false;
+ if (bio_end_sector(bio) < mddev->suspend_lo)
+ return false;
+ return true;
+}
+
void md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
rcu_read_lock();
- if (mddev->suspended) {
+ if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
+ if (!is_suspended(mddev, bio))
break;
rcu_read_unlock();
schedule();
@@ -344,12 +359,17 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
void mddev_suspend(struct mddev *mddev)
{
WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
+ lockdep_assert_held(&mddev->reconfig_mutex);
if (mddev->suspended++)
return;
synchronize_rcu();
wake_up(&mddev->sb_wait);
+ set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ smp_mb__after_atomic();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
+ clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
del_timer_sync(&mddev->safemode_timer);
}
@@ -357,6 +377,7 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
+ lockdep_assert_held(&mddev->reconfig_mutex);
if (--mddev->suspended)
return;
wake_up(&mddev->sb_wait);
@@ -663,6 +684,7 @@ void mddev_unlock(struct mddev *mddev)
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
+ wake_up(&mddev->sb_wait);
spin_unlock(&pers_lock);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -2313,7 +2335,7 @@ static void export_array(struct mddev *mddev)
static bool set_in_sync(struct mddev *mddev)
{
- WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
+ lockdep_assert_held(&mddev->lock);
if (!mddev->in_sync) {
mddev->sync_checkers++;
spin_unlock(&mddev->lock);
@@ -2432,10 +2454,18 @@ repeat:
}
}
- /* First make sure individual recovery_offsets are correct */
+ /*
+ * First make sure individual recovery_offsets are correct
+ * curr_resync_completed can only be used during recovery.
+ * During reshape/resync it might use array-addresses rather
+ * that device addresses.
+ */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(Journal, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
@@ -2651,7 +2681,7 @@ state_show(struct md_rdev *rdev, char *page)
{
char *sep = ",";
size_t len = 0;
- unsigned long flags = ACCESS_ONCE(rdev->flags);
+ unsigned long flags = READ_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
(!test_bit(ExternalBbl, &flags) &&
@@ -4824,7 +4854,7 @@ suspend_lo_show(struct mddev *mddev, char *page)
static ssize_t
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
- unsigned long long old, new;
+ unsigned long long new;
int err;
err = kstrtoull(buf, 10, &new);
@@ -4840,16 +4870,10 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
goto unlock;
- old = mddev->suspend_lo;
+ mddev_suspend(mddev);
mddev->suspend_lo = new;
- if (new >= old)
- /* Shrinking suspended region */
- mddev->pers->quiesce(mddev, 2);
- else {
- /* Expanding suspended region - need to wait */
- mddev->pers->quiesce(mddev, 1);
- mddev->pers->quiesce(mddev, 0);
- }
+ mddev_resume(mddev);
+
err = 0;
unlock:
mddev_unlock(mddev);
@@ -4867,7 +4891,7 @@ suspend_hi_show(struct mddev *mddev, char *page)
static ssize_t
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
- unsigned long long old, new;
+ unsigned long long new;
int err;
err = kstrtoull(buf, 10, &new);
@@ -4880,19 +4904,13 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
err = -EINVAL;
- if (mddev->pers == NULL ||
- mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL)
goto unlock;
- old = mddev->suspend_hi;
+
+ mddev_suspend(mddev);
mddev->suspend_hi = new;
- if (new <= old)
- /* Shrinking suspended region */
- mddev->pers->quiesce(mddev, 2);
- else {
- /* Expanding suspended region - need to wait */
- mddev->pers->quiesce(mddev, 1);
- mddev->pers->quiesce(mddev, 0);
- }
+ mddev_resume(mddev);
+
err = 0;
unlock:
mddev_unlock(mddev);
@@ -5357,7 +5375,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
return NULL;
}
-static int add_named_array(const char *val, struct kernel_param *kp)
+static int add_named_array(const char *val, const struct kernel_param *kp)
{
/*
* val must be "md_*" or "mdNNN".
@@ -5834,8 +5852,14 @@ void md_stop(struct mddev *mddev)
* This is called from dm-raid
*/
__md_stop(mddev);
- if (mddev->bio_set)
+ if (mddev->bio_set) {
bioset_free(mddev->bio_set);
+ mddev->bio_set = NULL;
+ }
+ if (mddev->sync_set) {
+ bioset_free(mddev->sync_set);
+ mddev->sync_set = NULL;
+ }
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -6362,7 +6386,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
break;
}
}
- if (has_journal) {
+ if (has_journal || mddev->bitmap) {
export_rdev(rdev);
return -EBUSY;
}
@@ -6618,22 +6642,26 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
if (mddev->pers) {
- mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1);
+ mddev_suspend(mddev);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
err = bitmap_load(mddev);
} else
err = PTR_ERR(bitmap);
- }
- if (fd < 0 || err) {
+ if (err) {
+ bitmap_destroy(mddev);
+ fd = -1;
+ }
+ mddev_resume(mddev);
+ } else if (fd < 0) {
+ mddev_suspend(mddev);
bitmap_destroy(mddev);
- fd = -1; /* make sure to put the file */
+ mddev_resume(mddev);
}
- mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
struct file *f = mddev->bitmap_info.file;
@@ -6735,7 +6763,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
{
- WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
+ lockdep_assert_held(&mddev->reconfig_mutex);
if (mddev->external_size)
return;
@@ -6917,8 +6945,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- mddev->pers->quiesce(mddev, 1);
bitmap = bitmap_create(mddev, -1);
+ mddev_suspend(mddev);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
@@ -6926,7 +6954,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
rv = PTR_ERR(bitmap);
if (rv)
bitmap_destroy(mddev);
- mddev->pers->quiesce(mddev, 0);
+ mddev_resume(mddev);
} else {
/* remove the bitmap */
if (!mddev->bitmap) {
@@ -6949,9 +6977,9 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.nodes = 0;
md_cluster_ops->leave(mddev);
}
- mddev->pers->quiesce(mddev, 1);
+ mddev_suspend(mddev);
bitmap_destroy(mddev);
- mddev->pers->quiesce(mddev, 0);
+ mddev_resume(mddev);
mddev->bitmap_info.offset = 0;
}
}
@@ -7468,8 +7496,8 @@ void md_wakeup_thread(struct md_thread *thread)
{
if (thread) {
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
- if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags))
- wake_up(&thread->wqueue);
+ set_bit(THREAD_WAKEUP, &thread->flags);
+ wake_up(&thread->wqueue);
}
}
EXPORT_SYMBOL(md_wakeup_thread);
@@ -8039,7 +8067,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended);
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
+ mddev->suspended);
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
percpu_ref_put(&mddev->writes_pending);
return false;
@@ -8110,7 +8139,6 @@ void md_allow_write(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
/* wait for the dirty state to be recorded in the metadata */
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
} else
spin_unlock(&mddev->lock);
@@ -8477,16 +8505,19 @@ void md_do_sync(struct md_thread *thread)
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(Journal, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < mddev->curr_resync)
- rdev->recovery_offset = mddev->curr_resync;
- rcu_read_unlock();
+ if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ mddev->delta_disks >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < mddev->curr_resync)
+ rdev->recovery_offset = mddev->curr_resync;
+ rcu_read_unlock();
+ }
}
}
skip:
@@ -8813,6 +8844,16 @@ void md_check_recovery(struct mddev *mddev)
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
+ } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
+ /* Write superblock - thread that called mddev_suspend()
+ * holds reconfig_mutex for us.
+ */
+ set_bit(MD_UPDATING_SB, &mddev->flags);
+ smp_mb__after_atomic();
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
+ md_update_sb(mddev, 0);
+ clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
+ wake_up(&mddev->sb_wait);
}
}
EXPORT_SYMBOL(md_check_recovery);
@@ -9274,11 +9315,11 @@ static __exit void md_exit(void)
subsys_initcall(md_init);
module_exit(md_exit)
-static int get_ro(char *buffer, struct kernel_param *kp)
+static int get_ro(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%d", start_readonly);
}
-static int set_ro(const char *val, struct kernel_param *kp)
+static int set_ro(const char *val, const struct kernel_param *kp)
{
return kstrtouint(val, 10, (unsigned int *)&start_readonly);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d8287d3cd1bf..7d6bcf0eba0c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -237,6 +237,12 @@ enum mddev_flags {
*/
MD_HAS_PPL, /* The raid array has PPL feature set */
MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
+ MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update
+ * the metadata without taking reconfig_mutex.
+ */
+ MD_UPDATING_SB, /* md_check_recovery is updating the metadata
+ * without explicitly holding reconfig_mutex.
+ */
};
enum mddev_sb_flags {
@@ -494,11 +500,6 @@ static inline void mddev_lock_nointr(struct mddev *mddev)
mutex_lock(&mddev->reconfig_mutex);
}
-static inline int mddev_is_locked(struct mddev *mddev)
-{
- return mutex_is_locked(&mddev->reconfig_mutex);
-}
-
static inline int mddev_trylock(struct mddev *mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
@@ -538,12 +539,11 @@ struct md_personality
int (*check_reshape) (struct mddev *mddev);
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
- /* quiesce moves between quiescence states
- * 0 - fully active
- * 1 - no new requests allowed
- * others - reserved
+ /* quiesce suspends or resumes internal processing.
+ * 1 - stop new actions and wait for action io to complete
+ * 0 - return to normal behaviour
*/
- void (*quiesce) (struct mddev *mddev, int state);
+ void (*quiesce) (struct mddev *mddev, int quiesce);
/* takeover is used to transition an array from one
* personality to another. The new personality must be able
* to handle the data in the current layout.
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 4aed69d9dd17..aec449243966 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
+#include <linux/kernel.h>
#define DM_MSG_PREFIX "space map metadata"
@@ -111,7 +112,7 @@ static bool brb_empty(struct bop_ring_buffer *brb)
static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
{
unsigned r = old + 1;
- return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+ return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
}
static int brb_push(struct bop_ring_buffer *brb,
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 5a00fc118470..5ecba9eef441 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -768,7 +768,7 @@ static void *raid0_takeover(struct mddev *mddev)
return ERR_PTR(-EINVAL);
}
-static void raid0_quiesce(struct mddev *mddev, int state)
+static void raid0_quiesce(struct mddev *mddev, int quiesce)
{
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f3f3e40dc9d8..cc9d337a1ed3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,13 +37,12 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
-#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include "md.h"
#include "raid1.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_HAS_JOURNAL) | \
@@ -990,14 +989,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
_wait_barrier(conf, idx);
}
-static void wait_all_barriers(struct r1conf *conf)
-{
- int idx;
-
- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
- _wait_barrier(conf, idx);
-}
-
static void _allow_barrier(struct r1conf *conf, int idx)
{
atomic_dec(&conf->nr_pending[idx]);
@@ -1011,14 +1002,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
_allow_barrier(conf, idx);
}
-static void allow_all_barriers(struct r1conf *conf)
-{
- int idx;
-
- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
- _allow_barrier(conf, idx);
-}
-
/* conf->resync_lock should be held */
static int get_unqueued_pending(struct r1conf *conf)
{
@@ -1303,42 +1286,28 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int first_clone;
int max_sectors;
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
-
-
- if ((bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_iter.bi_sector < mddev->suspend_hi) ||
- (mddev_is_clustered(mddev) &&
+ if (mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+ bio->bi_iter.bi_sector, bio_end_sector(bio))) {
- /*
- * As the suspend_* range is controlled by userspace, we want
- * an interruptible wait.
- */
DEFINE_WAIT(w);
for (;;) {
- sigset_t full, old;
prepare_to_wait(&conf->wait_barrier,
- &w, TASK_INTERRUPTIBLE);
- if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_iter.bi_sector >= mddev->suspend_hi ||
- (mddev_is_clustered(mddev) &&
- !md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector,
- bio_end_sector(bio))))
+ &w, TASK_IDLE);
+ if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))
break;
- sigfillset(&full);
- sigprocmask(SIG_BLOCK, &full, &old);
schedule();
- sigprocmask(SIG_SETMASK, &old, NULL);
}
finish_wait(&conf->wait_barrier, &w);
}
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
wait_barrier(conf, bio->bi_iter.bi_sector);
r1_bio = alloc_r1bio(mddev, bio);
@@ -1654,8 +1623,12 @@ static void print_conf(struct r1conf *conf)
static void close_sync(struct r1conf *conf)
{
- wait_all_barriers(conf);
- allow_all_barriers(conf);
+ int idx;
+
+ for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
+ _wait_barrier(conf, idx);
+ _allow_barrier(conf, idx);
+ }
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
@@ -3277,21 +3250,14 @@ static int raid1_reshape(struct mddev *mddev)
return 0;
}
-static void raid1_quiesce(struct mddev *mddev, int state)
+static void raid1_quiesce(struct mddev *mddev, int quiesce)
{
struct r1conf *conf = mddev->private;
- switch(state) {
- case 2: /* wake for suspend */
- wake_up(&conf->wait_barrier);
- break;
- case 1:
+ if (quiesce)
freeze_array(conf, 0);
- break;
- case 0:
+ else
unfreeze_array(conf);
- break;
- }
}
static void *raid1_takeover(struct mddev *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 374df5796649..b9edbc747a95 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -29,7 +29,7 @@
#include "md.h"
#include "raid10.h"
#include "raid0.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
/*
* RAID10 provides a combination of RAID0 and RAID1 functionality.
@@ -136,10 +136,13 @@ static void r10bio_pool_free(void *r10_bio, void *data)
kfree(r10_bio);
}
+#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
/* amount of memory to reserve for resync requests */
#define RESYNC_WINDOW (1024*1024)
/* maximum number of concurrent requests, memory permitting */
#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
+#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
/*
* When performing a resync, we need to read and compare, so
@@ -383,12 +386,11 @@ static void raid10_end_read_request(struct bio *bio)
{
int uptodate = !bio->bi_status;
struct r10bio *r10_bio = bio->bi_private;
- int slot, dev;
+ int slot;
struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
- dev = r10_bio->devs[slot].devnum;
rdev = r10_bio->devs[slot].rdev;
/*
* this branch is our 'one mirror IO has finished' event handler:
@@ -748,7 +750,6 @@ static struct md_rdev *read_balance(struct r10conf *conf,
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
- sectors = r10_bio->sectors;
best_slot = -1;
best_rdev = NULL;
best_dist = MaxSector;
@@ -761,8 +762,11 @@ static struct md_rdev *read_balance(struct r10conf *conf,
* the resync window. We take the first readable disk when
* above the resync window.
*/
- if (conf->mddev->recovery_cp < MaxSector
- && (this_sector + sectors >= conf->next_resync))
+ if ((conf->mddev->recovery_cp < MaxSector
+ && (this_sector + sectors >= conf->next_resync)) ||
+ (mddev_is_clustered(conf->mddev) &&
+ md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
+ this_sector + sectors)))
do_balance = 0;
for (slot = 0; slot < conf->copies ; slot++) {
@@ -1293,6 +1297,22 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
sector_t sectors;
int max_sectors;
+ if ((mddev_is_clustered(mddev) &&
+ md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))) {
+ DEFINE_WAIT(w);
+ for (;;) {
+ prepare_to_wait(&conf->wait_barrier,
+ &w, TASK_IDLE);
+ if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector, bio_end_sector(bio)))
+ break;
+ schedule();
+ }
+ finish_wait(&conf->wait_barrier, &w);
+ }
+
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
@@ -2575,7 +2595,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *bio;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev = r10_bio->devs[slot].rdev;
- sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -2586,7 +2605,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
* frozen.
*/
bio = r10_bio->devs[slot].bio;
- bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
@@ -2826,6 +2844,43 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
}
/*
+ * Set cluster_sync_high since we need other nodes to add the
+ * range [cluster_sync_low, cluster_sync_high] to suspend list.
+ */
+static void raid10_set_cluster_sync_high(struct r10conf *conf)
+{
+ sector_t window_size;
+ int extra_chunk, chunks;
+
+ /*
+ * First, here we define "stripe" as a unit which across
+ * all member devices one time, so we get chunks by use
+ * raid_disks / near_copies. Otherwise, if near_copies is
+ * close to raid_disks, then resync window could increases
+ * linearly with the increase of raid_disks, which means
+ * we will suspend a really large IO window while it is not
+ * necessary. If raid_disks is not divisible by near_copies,
+ * an extra chunk is needed to ensure the whole "stripe" is
+ * covered.
+ */
+
+ chunks = conf->geo.raid_disks / conf->geo.near_copies;
+ if (conf->geo.raid_disks % conf->geo.near_copies == 0)
+ extra_chunk = 0;
+ else
+ extra_chunk = 1;
+ window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
+
+ /*
+ * At least use a 32M window to align with raid1's resync window
+ */
+ window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
+ CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
+
+ conf->cluster_sync_high = conf->cluster_sync_low + window_size;
+}
+
+/*
* perform a "sync" on one "block"
*
* We need to make sure that no normal I/O request - particularly write
@@ -2897,6 +2952,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sector = mddev->resync_max_sectors;
if (sector_nr >= max_sector) {
+ conf->cluster_sync_low = 0;
+ conf->cluster_sync_high = 0;
+
/* If we aborted, we need to abort the
* sync on the 'current' bitmap chucks (there can
* be several when recovering multiple devices).
@@ -3251,7 +3309,17 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* resync. Schedule a read for every block at this virt offset */
int count = 0;
- bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
+ /*
+ * Since curr_resync_completed could probably not update in
+ * time, and we will set cluster_sync_low based on it.
+ * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
+ * safety reason, which ensures curr_resync_completed is
+ * updated in bitmap_cond_end_sync.
+ */
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS >
+ conf->cluster_sync_high));
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) &&
@@ -3385,6 +3453,52 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} while (++page_idx < RESYNC_PAGES);
r10_bio->sectors = nr_sectors;
+ if (mddev_is_clustered(mddev) &&
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ /* It is resync not recovery */
+ if (conf->cluster_sync_high < sector_nr + nr_sectors) {
+ conf->cluster_sync_low = mddev->curr_resync_completed;
+ raid10_set_cluster_sync_high(conf);
+ /* Send resync message */
+ md_cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+ } else if (mddev_is_clustered(mddev)) {
+ /* This is recovery not resync */
+ sector_t sect_va1, sect_va2;
+ bool broadcast_msg = false;
+
+ for (i = 0; i < conf->geo.raid_disks; i++) {
+ /*
+ * sector_nr is a device address for recovery, so we
+ * need translate it to array address before compare
+ * with cluster_sync_high.
+ */
+ sect_va1 = raid10_find_virt(conf, sector_nr, i);
+
+ if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
+ broadcast_msg = true;
+ /*
+ * curr_resync_completed is similar as
+ * sector_nr, so make the translation too.
+ */
+ sect_va2 = raid10_find_virt(conf,
+ mddev->curr_resync_completed, i);
+
+ if (conf->cluster_sync_low == 0 ||
+ conf->cluster_sync_low > sect_va2)
+ conf->cluster_sync_low = sect_va2;
+ }
+ }
+ if (broadcast_msg) {
+ raid10_set_cluster_sync_high(conf);
+ md_cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+ }
+
while (biolist) {
bio = biolist;
biolist = biolist->bi_next;
@@ -3644,6 +3758,18 @@ static int raid10_run(struct mddev *mddev)
if (!conf)
goto out;
+ if (mddev_is_clustered(conf->mddev)) {
+ int fc, fo;
+
+ fc = (mddev->layout >> 8) & 255;
+ fo = mddev->layout & (1<<16);
+ if (fc > 1 || fo > 0) {
+ pr_err("only near layout is supported by clustered"
+ " raid10\n");
+ goto out;
+ }
+ }
+
mddev->thread = conf->thread;
conf->thread = NULL;
@@ -3832,18 +3958,14 @@ static void raid10_free(struct mddev *mddev, void *priv)
kfree(conf);
}
-static void raid10_quiesce(struct mddev *mddev, int state)
+static void raid10_quiesce(struct mddev *mddev, int quiesce)
{
struct r10conf *conf = mddev->private;
- switch(state) {
- case 1:
+ if (quiesce)
raise_barrier(conf, 0);
- break;
- case 0:
+ else
lower_barrier(conf);
- break;
- }
}
static int raid10_resize(struct mddev *mddev, sector_t sectors)
@@ -4578,15 +4700,18 @@ static int handle_reshape_read_error(struct mddev *mddev,
/* Use sync reads to get the blocks from somewhere else */
int sectors = r10_bio->sectors;
struct r10conf *conf = mddev->private;
- struct {
- struct r10bio r10_bio;
- struct r10dev devs[conf->copies];
- } on_stack;
- struct r10bio *r10b = &on_stack.r10_bio;
+ struct r10bio *r10b;
int slot = 0;
int idx = 0;
struct page **pages;
+ r10b = kmalloc(sizeof(*r10b) +
+ sizeof(struct r10dev) * conf->copies, GFP_NOIO);
+ if (!r10b) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ return -ENOMEM;
+ }
+
/* reshape IOs share pages from .devs[0].bio */
pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
@@ -4635,11 +4760,13 @@ static int handle_reshape_read_error(struct mddev *mddev,
/* couldn't read this block, must give up */
set_bit(MD_RECOVERY_INTR,
&mddev->recovery);
+ kfree(r10b);
return -EIO;
}
sectors -= s;
idx++;
}
+ kfree(r10b);
return 0;
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index abceccab6671..db2ac22ac1b4 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -89,6 +89,12 @@ struct r10conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+
+ /*
+ * Keep track of cluster resync window to send to other nodes.
+ */
+ sector_t cluster_sync_low;
+ sector_t cluster_sync_high;
};
/*
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0b7406ac8ce1..f1c86d938502 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -23,7 +23,7 @@
#include <linux/types.h>
#include "md.h"
#include "raid5.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "raid5-log.h"
/*
@@ -539,7 +539,7 @@ static void r5l_log_run_stripes(struct r5l_log *log)
{
struct r5l_io_unit *io, *next;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
/* don't change list order */
@@ -555,7 +555,7 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
{
struct r5l_io_unit *io, *next;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
/* don't change list order */
@@ -693,6 +693,8 @@ static void r5c_disable_writeback_async(struct work_struct *work)
struct r5l_log *log = container_of(work, struct r5l_log,
disable_writeback_work);
struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ int locked = 0;
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
return;
@@ -701,11 +703,15 @@ static void r5c_disable_writeback_async(struct work_struct *work)
/* wait superblock change before suspend */
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
-
- mddev_suspend(mddev);
- log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
- mddev_resume(mddev);
+ conf->log == NULL ||
+ (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
+ (locked = mddev_trylock(mddev))));
+ if (locked) {
+ mddev_suspend(mddev);
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ mddev_resume(mddev);
+ mddev_unlock(mddev);
+ }
}
static void r5l_submit_current_io(struct r5l_log *log)
@@ -1194,7 +1200,7 @@ static void r5l_run_no_mem_stripe(struct r5l_log *log)
{
struct stripe_head *sh;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
if (!list_empty(&log->no_mem_stripes)) {
sh = list_first_entry(&log->no_mem_stripes,
@@ -1210,7 +1216,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
struct r5l_io_unit *io, *next;
bool found = false;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
/* don't change list order */
@@ -1382,7 +1388,7 @@ static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
* raid5_release_stripe() while holding conf->device_lock
*/
BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
- assert_spin_locked(&conf->device_lock);
+ lockdep_assert_held(&conf->device_lock);
list_del_init(&sh->lru);
atomic_inc(&sh->count);
@@ -1409,7 +1415,7 @@ void r5c_flush_cache(struct r5conf *conf, int num)
int count;
struct stripe_head *sh, *next;
- assert_spin_locked(&conf->device_lock);
+ lockdep_assert_held(&conf->device_lock);
if (!conf->log)
return;
@@ -1583,21 +1589,21 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
md_wakeup_thread(log->reclaim_thread);
}
-void r5l_quiesce(struct r5l_log *log, int state)
+void r5l_quiesce(struct r5l_log *log, int quiesce)
{
struct mddev *mddev;
- if (!log || state == 2)
+ if (!log)
return;
- if (state == 0)
- kthread_unpark(log->reclaim_thread->tsk);
- else if (state == 1) {
+
+ if (quiesce) {
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
kthread_park(log->reclaim_thread->tsk);
r5l_wake_reclaim(log, MaxSector);
r5l_do_reclaim(log);
- }
+ } else
+ kthread_unpark(log->reclaim_thread->tsk);
}
bool r5l_log_disk_error(struct r5conf *conf)
@@ -3165,6 +3171,8 @@ void r5l_exit_log(struct r5conf *conf)
conf->log = NULL;
synchronize_rcu();
+ /* Ensure disable_writeback_work wakes up and exits */
+ wake_up(&conf->mddev->sb_wait);
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
mempool_destroy(log->meta_pool);
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 7f9ad5f7cda0..284578b0a349 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -9,7 +9,7 @@ extern void r5l_write_stripe_run(struct r5l_log *log);
extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
-extern void r5l_quiesce(struct r5l_log *log, int state);
+extern void r5l_quiesce(struct r5l_log *log, int quiesce);
extern bool r5l_log_disk_error(struct r5conf *conf);
extern bool r5c_is_writeback(struct r5l_log *log);
extern int
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index cd026c88f7ef..628c0bf7b9fd 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -758,7 +758,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)sector);
rdev = conf->disks[dd_idx].rdev;
- if (!rdev) {
+ if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
+ sector >= rdev->recovery_offset)) {
pr_debug("%s:%*s data member disk %d missing\n",
__func__, indent, "", dd_idx);
update_parity = false;
@@ -1296,8 +1297,7 @@ int ppl_init_log(struct r5conf *conf)
if (ret) {
goto err;
- } else if (!mddev->pers &&
- mddev->recovery_cp == 0 && !mddev->degraded &&
+ } else if (!mddev->pers && mddev->recovery_cp == 0 &&
ppl_conf->recovered_entries > 0 &&
ppl_conf->mismatch_count == 0) {
/*
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 928e24a07133..31dc25e2871a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -55,7 +55,6 @@
#include <linux/ratelimit.h>
#include <linux/nodemask.h>
#include <linux/flex_array.h>
-#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include <linux/list_sort.h>
@@ -63,7 +62,7 @@
#include "md.h"
#include "raid5.h"
#include "raid0.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "raid5-log.h"
#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
@@ -1818,8 +1817,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
struct r5dev *dev = &sh->dev[i];
if (dev->written || i == pd_idx || i == qd_idx) {
- if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
+ if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
set_bit(R5_UPTODATE, &dev->flags);
+ if (test_bit(STRIPE_EXPAND_READY, &sh->state))
+ set_bit(R5_Expanded, &dev->flags);
+ }
if (fua)
set_bit(R5_WantFUA, &dev->flags);
if (sync)
@@ -5682,28 +5684,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
goto retry;
}
- if (rw == WRITE &&
- logical_sector >= mddev->suspend_lo &&
- logical_sector < mddev->suspend_hi) {
- raid5_release_stripe(sh);
- /* As the suspend_* range is controlled by
- * userspace, we want an interruptible
- * wait.
- */
- prepare_to_wait(&conf->wait_for_overlap,
- &w, TASK_INTERRUPTIBLE);
- if (logical_sector >= mddev->suspend_lo &&
- logical_sector < mddev->suspend_hi) {
- sigset_t full, old;
- sigfillset(&full);
- sigprocmask(SIG_BLOCK, &full, &old);
- schedule();
- sigprocmask(SIG_SETMASK, &old, NULL);
- do_prepare = true;
- }
- goto retry;
- }
-
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
!add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
/* Stripe is busy expanding or
@@ -5758,6 +5738,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
*/
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
+ struct md_rdev *rdev;
sector_t first_sector, last_sector;
int raid_disks = conf->previous_raid_disks;
int data_disks = raid_disks - conf->max_degraded;
@@ -5880,6 +5861,15 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
return 0;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
+ if (!mddev->reshape_backwards)
+ /* Can update recovery_offset */
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < sector_nr)
+ rdev->recovery_offset = sector_nr;
+
conf->reshape_checkpoint = jiffies;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
@@ -5978,6 +5968,14 @@ finish:
goto ret;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
+ if (!mddev->reshape_backwards)
+ /* Can update recovery_offset */
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < sector_nr)
+ rdev->recovery_offset = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
@@ -6072,7 +6070,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
*/
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
@@ -7156,6 +7154,13 @@ static int raid5_run(struct mddev *mddev)
min_offset_diff = diff;
}
+ if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
+ (mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
+ pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape.
* Difficulties arise if the stripe we would write to
@@ -7958,6 +7963,7 @@ static void end_reshape(struct r5conf *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+ struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
@@ -7965,6 +7971,11 @@ static void end_reshape(struct r5conf *conf)
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
+ rdev_for_each(rdev, conf->mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags))
+ rdev->recovery_offset = MaxSector;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
@@ -8020,16 +8031,12 @@ static void raid5_finish_reshape(struct mddev *mddev)
}
}
-static void raid5_quiesce(struct mddev *mddev, int state)
+static void raid5_quiesce(struct mddev *mddev, int quiesce)
{
struct r5conf *conf = mddev->private;
- switch(state) {
- case 2: /* resume for a suspend */
- wake_up(&conf->wait_for_overlap);
- break;
-
- case 1: /* stop all writes */
+ if (quiesce) {
+ /* stop all writes */
lock_all_device_hash_locks_irq(conf);
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
@@ -8045,17 +8052,15 @@ static void raid5_quiesce(struct mddev *mddev, int state)
unlock_all_device_hash_locks_irq(conf);
/* allow reshape to continue */
wake_up(&conf->wait_for_overlap);
- break;
-
- case 0: /* re-enable writes */
+ } else {
+ /* re-enable writes */
lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
wake_up(&conf->wait_for_quiescent);
wake_up(&conf->wait_for_overlap);
unlock_all_device_hash_locks_irq(conf);
- break;
}
- r5l_quiesce(conf->log, state);
+ r5l_quiesce(conf->log, quiesce);
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f8a808d45034..98f88c43f62c 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -86,7 +86,7 @@ void cec_queue_event_fh(struct cec_fh *fh,
const struct cec_event *new_ev, u64 ts)
{
static const u8 max_events[CEC_NUM_EVENTS] = {
- 1, 1, 64, 64,
+ 1, 1, 64, 64, 8, 8,
};
struct cec_event_entry *entry;
unsigned int ev_idx = new_ev->event - 1;
@@ -170,6 +170,22 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
}
EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
+/* Notify userspace that the HPD pin changed state at the given time. */
+void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
+{
+ struct cec_event ev = {
+ .event = is_high ? CEC_EVENT_PIN_HPD_HIGH :
+ CEC_EVENT_PIN_HPD_LOW,
+ };
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
+ mutex_unlock(&adap->devnode.lock);
+}
+EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
+
/*
* Queue a new message for this filehandle.
*
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index a079f7fe018c..3dba3aa34a43 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -32,6 +32,7 @@
#include <media/cec-pin.h>
#include "cec-priv.h"
+#include "cec-pin-priv.h"
static inline struct cec_devnode *cec_devnode_data(struct file *filp)
{
@@ -529,7 +530,7 @@ static int cec_open(struct inode *inode, struct file *filp)
* Initial events that are automatically sent when the cec device is
* opened.
*/
- struct cec_event ev_state = {
+ struct cec_event ev = {
.event = CEC_EVENT_STATE_CHANGE,
.flags = CEC_EVENT_FL_INITIAL_STATE,
};
@@ -569,9 +570,19 @@ static int cec_open(struct inode *inode, struct file *filp)
filp->private_data = fh;
/* Queue up initial state events */
- ev_state.state_change.phys_addr = adap->phys_addr;
- ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
- cec_queue_event_fh(fh, &ev_state, 0);
+ ev.state_change.phys_addr = adap->phys_addr;
+ ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ cec_queue_event_fh(fh, &ev, 0);
+#ifdef CONFIG_CEC_PIN
+ if (adap->pin && adap->pin->ops->read_hpd) {
+ err = adap->pin->ops->read_hpd(adap);
+ if (err >= 0) {
+ ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
+ CEC_EVENT_PIN_HPD_LOW;
+ cec_queue_event_fh(fh, &ev, 0);
+ }
+ }
+#endif
list_add(&fh->list, &devnode->fhs);
mutex_unlock(&devnode->lock);
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 648136e552d5..5870da6a567f 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -112,10 +112,6 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
int minor;
int ret;
- /* Initialization */
- INIT_LIST_HEAD(&devnode->fhs);
- mutex_init(&devnode->lock);
-
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
@@ -242,6 +238,10 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
INIT_LIST_HEAD(&adap->wait_queue);
init_waitqueue_head(&adap->kthread_waitq);
+ /* adap->devnode initialization */
+ INIT_LIST_HEAD(&adap->devnode.fhs);
+ mutex_init(&adap->devnode.lock);
+
adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
if (IS_ERR(adap->kthread)) {
pr_err("cec-%s: kernel_thread() failed\n", name);
@@ -277,7 +277,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->input_id.version = 1;
adap->rc->driver_name = CEC_NAME;
adap->rc->allowed_protocols = RC_PROTO_BIT_CEC;
- adap->rc->enabled_protocols = RC_PROTO_BIT_CEC;
adap->rc->priv = adap;
adap->rc->map_name = RC_MAP_CEC;
adap->rc->timeout = MS_TO_NS(100);
diff --git a/drivers/media/cec/cec-pin-priv.h b/drivers/media/cec/cec-pin-priv.h
new file mode 100644
index 000000000000..7d0def199762
--- /dev/null
+++ b/drivers/media/cec/cec-pin-priv.h
@@ -0,0 +1,133 @@
+/*
+ * cec-pin-priv.h - internal cec-pin header
+ *
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LINUX_CEC_PIN_PRIV_H
+#define LINUX_CEC_PIN_PRIV_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <media/cec-pin.h>
+
+enum cec_pin_state {
+ /* CEC is off */
+ CEC_ST_OFF,
+ /* CEC is idle, waiting for Rx or Tx */
+ CEC_ST_IDLE,
+
+ /* Tx states */
+
+ /* Pending Tx, waiting for Signal Free Time to expire */
+ CEC_ST_TX_WAIT,
+ /* Low-drive was detected, wait for bus to go high */
+ CEC_ST_TX_WAIT_FOR_HIGH,
+ /* Drive CEC low for the start bit */
+ CEC_ST_TX_START_BIT_LOW,
+ /* Drive CEC high for the start bit */
+ CEC_ST_TX_START_BIT_HIGH,
+ /* Drive CEC low for the 0 bit */
+ CEC_ST_TX_DATA_BIT_0_LOW,
+ /* Drive CEC high for the 0 bit */
+ CEC_ST_TX_DATA_BIT_0_HIGH,
+ /* Drive CEC low for the 1 bit */
+ CEC_ST_TX_DATA_BIT_1_LOW,
+ /* Drive CEC high for the 1 bit */
+ CEC_ST_TX_DATA_BIT_1_HIGH,
+ /*
+ * Wait for start of sample time to check for Ack bit or first
+ * four initiator bits to check for Arbitration Lost.
+ */
+ CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE,
+ /* Wait for end of bit period after sampling */
+ CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE,
+
+ /* Rx states */
+
+ /* Start bit low detected */
+ CEC_ST_RX_START_BIT_LOW,
+ /* Start bit high detected */
+ CEC_ST_RX_START_BIT_HIGH,
+ /* Wait for bit sample time */
+ CEC_ST_RX_DATA_SAMPLE,
+ /* Wait for earliest end of bit period after sampling */
+ CEC_ST_RX_DATA_POST_SAMPLE,
+ /* Wait for CEC to go high (i.e. end of bit period */
+ CEC_ST_RX_DATA_HIGH,
+ /* Drive CEC low to send 0 Ack bit */
+ CEC_ST_RX_ACK_LOW,
+ /* End of 0 Ack time, wait for earliest end of bit period */
+ CEC_ST_RX_ACK_LOW_POST,
+ /* Wait for CEC to go high (i.e. end of bit period */
+ CEC_ST_RX_ACK_HIGH_POST,
+ /* Wait for earliest end of bit period and end of message */
+ CEC_ST_RX_ACK_FINISH,
+
+ /* Start low drive */
+ CEC_ST_LOW_DRIVE,
+ /* Monitor pin using interrupts */
+ CEC_ST_RX_IRQ,
+
+ /* Total number of pin states */
+ CEC_PIN_STATES
+};
+
+#define CEC_NUM_PIN_EVENTS 128
+
+#define CEC_PIN_IRQ_UNCHANGED 0
+#define CEC_PIN_IRQ_DISABLE 1
+#define CEC_PIN_IRQ_ENABLE 2
+
+struct cec_pin {
+ struct cec_adapter *adap;
+ const struct cec_pin_ops *ops;
+ struct task_struct *kthread;
+ wait_queue_head_t kthread_waitq;
+ struct hrtimer timer;
+ ktime_t ts;
+ unsigned int wait_usecs;
+ u16 la_mask;
+ bool enabled;
+ bool monitor_all;
+ bool rx_eom;
+ bool enable_irq_failed;
+ enum cec_pin_state state;
+ struct cec_msg tx_msg;
+ u32 tx_bit;
+ bool tx_nacked;
+ u32 tx_signal_free_time;
+ struct cec_msg rx_msg;
+ u32 rx_bit;
+
+ struct cec_msg work_rx_msg;
+ u8 work_tx_status;
+ ktime_t work_tx_ts;
+ atomic_t work_irq_change;
+ atomic_t work_pin_events;
+ unsigned int work_pin_events_wr;
+ unsigned int work_pin_events_rd;
+ ktime_t work_pin_ts[CEC_NUM_PIN_EVENTS];
+ bool work_pin_is_high[CEC_NUM_PIN_EVENTS];
+ ktime_t timer_ts;
+ u32 timer_cnt;
+ u32 timer_100ms_overruns;
+ u32 timer_300ms_overruns;
+ u32 timer_max_overrun;
+ u32 timer_sum_overrun;
+};
+
+#endif
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index c003b8eac617..b48dfe844118 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -20,6 +20,7 @@
#include <linux/sched/types.h>
#include <media/cec-pin.h>
+#include "cec-pin-priv.h"
/* All timings are in microseconds */
@@ -132,7 +133,7 @@ static void cec_pin_to_idle(struct cec_pin *pin)
pin->rx_msg.len = 0;
memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg));
pin->state = CEC_ST_IDLE;
- pin->ts = 0;
+ pin->ts = ns_to_ktime(0);
}
/*
@@ -426,7 +427,7 @@ static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts)
v = cec_pin_read(pin);
if (v && pin->rx_eom) {
pin->work_rx_msg = pin->rx_msg;
- pin->work_rx_msg.rx_ts = ts;
+ pin->work_rx_msg.rx_ts = ktime_to_ns(ts);
wake_up_interruptible(&pin->kthread_waitq);
pin->ts = ts;
pin->state = CEC_ST_RX_ACK_FINISH;
@@ -457,7 +458,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
s32 delta;
ts = ktime_get();
- if (pin->timer_ts) {
+ if (ktime_to_ns(pin->timer_ts)) {
delta = ktime_us_delta(ts, pin->timer_ts);
pin->timer_cnt++;
if (delta > 100 && pin->state != CEC_ST_IDLE) {
@@ -481,17 +482,19 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (pin->wait_usecs > 150) {
pin->wait_usecs -= 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, 100000);
+ hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
if (pin->wait_usecs > 100) {
pin->wait_usecs /= 2;
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
- hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(pin->wait_usecs * 1000));
return HRTIMER_RESTART;
}
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
- hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(pin->wait_usecs * 1000));
pin->wait_usecs = 0;
return HRTIMER_RESTART;
}
@@ -531,7 +534,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
pin->state = CEC_ST_RX_START_BIT_LOW;
break;
}
- if (pin->ts == 0)
+ if (ktime_to_ns(pin->ts) == 0)
pin->ts = ts;
if (pin->tx_msg.len) {
/*
@@ -572,12 +575,13 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (!adap->monitor_pin_cnt || states[pin->state].usecs <= 150) {
pin->wait_usecs = 0;
pin->timer_ts = ktime_add_us(ts, states[pin->state].usecs);
- hrtimer_forward_now(timer, states[pin->state].usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(states[pin->state].usecs * 1000));
return HRTIMER_RESTART;
}
pin->wait_usecs = states[pin->state].usecs - 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, 100000);
+ hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
@@ -596,7 +600,7 @@ static int cec_pin_thread_func(void *_adap)
if (pin->work_rx_msg.len) {
cec_received_msg_ts(adap, &pin->work_rx_msg,
- pin->work_rx_msg.rx_ts);
+ ns_to_ktime(pin->work_rx_msg.rx_ts));
pin->work_rx_msg.len = 0;
}
if (pin->work_tx_status) {
@@ -623,13 +627,15 @@ static int cec_pin_thread_func(void *_adap)
pin->ops->disable_irq(adap);
cec_pin_high(pin);
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
break;
case CEC_PIN_IRQ_ENABLE:
pin->enable_irq_failed = !pin->ops->enable_irq(adap);
if (pin->enable_irq_failed) {
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
}
break;
default:
@@ -653,7 +659,7 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
cec_pin_read(pin);
cec_pin_to_idle(pin);
pin->tx_msg.len = 0;
- pin->timer_ts = 0;
+ pin->timer_ts = ns_to_ktime(0);
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED);
pin->kthread = kthread_run(cec_pin_thread_func, adap,
"cec-pin");
@@ -661,7 +667,8 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
pr_err("cec-pin: kernel_thread() failed\n");
return PTR_ERR(pin->kthread);
}
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
} else {
if (pin->ops->disable_irq)
pin->ops->disable_irq(adap);
@@ -699,7 +706,8 @@ static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
pin->ops->disable_irq(adap);
cec_pin_high(pin);
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
}
return 0;
}
@@ -789,7 +797,7 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN,
CEC_MAX_LOG_ADDRS);
- if (PTR_ERR_OR_ZERO(adap)) {
+ if (IS_ERR(adap)) {
kfree(pin);
return adap;
}
diff --git a/drivers/media/common/cypress_firmware.c b/drivers/media/common/cypress_firmware.c
index 50e3f76d4847..8895158c1962 100644
--- a/drivers/media/common/cypress_firmware.c
+++ b/drivers/media/common/cypress_firmware.c
@@ -74,11 +74,9 @@ int cypress_load_firmware(struct usb_device *udev,
struct hexline *hx;
int ret, pos = 0;
- hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
- if (!hx) {
- dev_err(&udev->dev, "%s: kmalloc() failed\n", KBUILD_MODNAME);
+ hx = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!hx)
return -ENOMEM;
- }
/* stop the CPU */
hx->data[0] = 1;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 930d2c94d5d3..8c87d6837c49 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -163,9 +163,9 @@ void saa7146_buffer_next(struct saa7146_dev *dev,
}
}
-void saa7146_buffer_timeout(unsigned long data)
+void saa7146_buffer_timeout(struct timer_list *t)
{
- struct saa7146_dmaqueue *q = (struct saa7146_dmaqueue*)data;
+ struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
struct saa7146_dev *dev = q->dev;
unsigned long flags;
@@ -559,7 +559,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
vbi->start[1] = 312;
vbi->count[1] = 16;
- init_timer(&vv->vbi_read_timeout);
+ timer_setup(&vv->vbi_read_timeout, NULL, 0);
vv->ov_fb.capability = V4L2_FBUF_CAP_LIST_CLIPPING;
vv->ov_fb.flags = V4L2_FBUF_FLAG_PRIMARY;
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index 371c6f8606de..ce8d78c137f0 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -349,9 +349,10 @@ static void vbi_stop(struct saa7146_fh *fh, struct file *file)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static void vbi_read_timeout(unsigned long data)
+static void vbi_read_timeout(struct timer_list *t)
{
- struct file *file = (struct file*)data;
+ struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout);
+ struct file *file = vv->vbi_read_timeout_file;
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
@@ -366,8 +367,7 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
- setup_timer(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout,
- (unsigned long)(&vv->vbi_dmaq));
+ timer_setup(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout, 0);
vv->vbi_dmaq.dev = dev;
init_waitqueue_head(&vv->vbi_wq);
@@ -402,8 +402,8 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
sizeof(struct saa7146_buf),
file, &dev->v4l2_lock);
- vv->vbi_read_timeout.function = vbi_read_timeout;
- vv->vbi_read_timeout.data = (unsigned long)file;
+ vv->vbi_read_timeout.function = (TIMER_FUNC_TYPE)vbi_read_timeout;
+ vv->vbi_read_timeout_file = file;
/* initialize the brs */
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
@@ -489,7 +489,7 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
return ret;
}
-struct saa7146_use_ops saa7146_vbi_uops = {
+const struct saa7146_use_ops saa7146_vbi_uops = {
.init = vbi_init,
.open = vbi_open,
.release = vbi_close,
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index 37b4654dc21c..2b631eaa65b3 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -1201,8 +1201,7 @@ static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
INIT_LIST_HEAD(&vv->video_dmaq.queue);
- setup_timer(&vv->video_dmaq.timeout, saa7146_buffer_timeout,
- (unsigned long)(&vv->video_dmaq));
+ timer_setup(&vv->video_dmaq.timeout, saa7146_buffer_timeout, 0);
vv->video_dmaq.dev = dev;
/* set some default values */
@@ -1303,7 +1302,7 @@ out:
return ret;
}
-struct saa7146_use_ops saa7146_video_uops = {
+const struct saa7146_use_ops saa7146_video_uops = {
.init = video_init,
.open = video_open,
.release = video_close,
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index e7a0d7798d5b..e4ea2a0c7a24 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -447,7 +447,7 @@ static struct smscore_registry_entry_t *smscore_find_registry(char *devpath)
return entry;
}
}
- entry = kmalloc(sizeof(struct smscore_registry_entry_t), GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
entry->mode = default_mode;
strcpy(entry->devpath, devpath);
@@ -536,9 +536,7 @@ int smscore_register_hotplug(hotplug_t hotplug)
int rc = 0;
kmutex_lock(&g_smscore_deviceslock);
-
- notifyee = kmalloc(sizeof(struct smscore_device_notifyee_t),
- GFP_KERNEL);
+ notifyee = kmalloc(sizeof(*notifyee), GFP_KERNEL);
if (notifyee) {
/* now notify callback about existing devices */
first = &g_smscore_devices;
@@ -627,7 +625,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
{
struct smscore_buffer_t *cb;
- cb = kzalloc(sizeof(struct smscore_buffer_t), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
@@ -655,7 +653,7 @@ int smscore_register_device(struct smsdevice_params_t *params,
struct smscore_device_t *dev;
u8 *buffer;
- dev = kzalloc(sizeof(struct smscore_device_t), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -751,7 +749,7 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
void *buffer, size_t size, struct completion *completion) {
int rc;
- if (completion == NULL)
+ if (!completion)
return -EINVAL;
init_completion(completion);
@@ -1153,8 +1151,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
}
pr_debug("Firmware name: %s\n", fw_filename);
- if (loadfirmware_handler == NULL && !(coredev->device_flags
- & SMS_DEVICE_FAMILY2))
+ if (!loadfirmware_handler &&
+ !(coredev->device_flags & SMS_DEVICE_FAMILY2))
return -EINVAL;
rc = request_firmware(&fw, fw_filename, coredev->device);
@@ -1301,10 +1299,8 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
buffer = kmalloc(sizeof(struct sms_msg_data) +
SMS_DMA_ALIGNMENT, GFP_KERNEL | GFP_DMA);
- if (!buffer) {
- pr_err("Could not allocate buffer for init device message.\n");
+ if (!buffer)
return -ENOMEM;
- }
msg = (struct sms_msg_data *)SMS_ALIGN_ADDRESS(buffer);
SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_INIT_DEVICE_REQ,
@@ -1686,11 +1682,10 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
pr_err("The msg ID already registered to another client.\n");
return -EEXIST;
}
- listentry = kzalloc(sizeof(struct smscore_idlist_t), GFP_KERNEL);
- if (!listentry) {
- pr_err("Can't allocate memory for client id.\n");
+ listentry = kzalloc(sizeof(*listentry), GFP_KERNEL);
+ if (!listentry)
return -ENOMEM;
- }
+
listentry->id = id;
listentry->data_type = data_type;
list_add_locked(&listentry->entry, &client->idlist,
@@ -1724,11 +1719,9 @@ int smscore_register_client(struct smscore_device_t *coredev,
return -EEXIST;
}
- newclient = kzalloc(sizeof(struct smscore_client_t), GFP_KERNEL);
- if (!newclient) {
- pr_err("Failed to allocate memory for client.\n");
+ newclient = kzalloc(sizeof(*newclient), GFP_KERNEL);
+ if (!newclient)
return -ENOMEM;
- }
INIT_LIST_HEAD(&newclient->idlist);
newclient->coredev = coredev;
@@ -1796,7 +1789,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
int rc;
- if (client == NULL) {
+ if (!client) {
pr_err("Got NULL client\n");
return -EINVAL;
}
@@ -1804,7 +1797,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
coredev = client->coredev;
/* check that no other channel with same id exists */
- if (coredev == NULL) {
+ if (!coredev) {
pr_err("Got NULL coredev\n");
return -EINVAL;
}
@@ -1961,7 +1954,7 @@ int smscore_gpio_configure(struct smscore_device_t *coredev, u8 pin_num,
if (pin_num > MAX_GPIO_PIN_NUMBER)
return -EINVAL;
- if (p_gpio_config == NULL)
+ if (!p_gpio_config)
return -EINVAL;
total_len = sizeof(struct sms_msg_hdr) + (sizeof(u32) * 6);
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index a772976cfe26..f96968c11312 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -238,6 +238,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->color_enc = TGP_COLOR_ENC_RGB;
break;
case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
tpg->color_enc = TGP_COLOR_ENC_LUMA;
@@ -352,6 +354,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
tpg->twopixelsize[0] = 2 * 2;
@@ -1056,6 +1060,14 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_GREY:
buf[0][offset] = r_y_h;
break;
+ case V4L2_PIX_FMT_Y10:
+ buf[0][offset] = (r_y_h << 2) & 0xff;
+ buf[0][offset+1] = r_y_h >> 6;
+ break;
+ case V4L2_PIX_FMT_Y12:
+ buf[0][offset] = (r_y_h << 4) & 0xff;
+ buf[0][offset+1] = r_y_h >> 4;
+ break;
case V4L2_PIX_FMT_Y16:
/*
* Ideally both bytes should be set to r_y_h, but then you won't
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 18e4230865be..3ddd44e1ee77 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -329,9 +329,9 @@ static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
return 0;
}
-static void dvb_dmxdev_filter_timeout(unsigned long data)
+static void dvb_dmxdev_filter_timeout(struct timer_list *t)
{
- struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
+ struct dmxdev_filter *dmxdevfilter = from_timer(dmxdevfilter, t, timer);
dmxdevfilter->buffer.error = -ETIMEDOUT;
spin_lock_irq(&dmxdevfilter->dev->lock);
@@ -346,8 +346,6 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
del_timer(&dmxdevfilter->timer);
if (para->timeout) {
- dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
- dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
dmxdevfilter->timer.expires =
jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
add_timer(&dmxdevfilter->timer);
@@ -754,7 +752,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
- init_timer(&dmxdevfilter->timer);
+ timer_setup(&dmxdevfilter->timer, dvb_dmxdev_filter_timeout, 0);
dvbdev->users++;
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 054fd4eb6192..5e795f5f0f41 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -36,12 +36,33 @@
#include "demux.h"
#include "dvb_ringbuffer.h"
+/**
+ * enum dmxdev_type - type of demux filter type.
+ *
+ * @DMXDEV_TYPE_NONE: no filter set.
+ * @DMXDEV_TYPE_SEC: section filter.
+ * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter.
+ */
enum dmxdev_type {
DMXDEV_TYPE_NONE,
DMXDEV_TYPE_SEC,
DMXDEV_TYPE_PES,
};
+/**
+ * enum dmxdev_state - state machine for the dmxdev.
+ *
+ * @DMXDEV_STATE_FREE: indicates that the filter is freed.
+ * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMXDEV_STATE_SET: indicates that the filter parameters are set.
+ * @DMXDEV_STATE_GO: indicates that the filter is running.
+ * @DMXDEV_STATE_DONE: indicates that a packet was already filtered
+ * and the filter is now disabled.
+ * Set only if %DMX_ONESHOT. See
+ * &dmx_sct_filter_params.
+ * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition.
+ */
enum dmxdev_state {
DMXDEV_STATE_FREE,
DMXDEV_STATE_ALLOCATED,
@@ -51,12 +72,49 @@ enum dmxdev_state {
DMXDEV_STATE_TIMEDOUT
};
+/**
+ * struct dmxdev_feed - digital TV dmxdev feed
+ *
+ * @pid: Program ID to be filtered
+ * @ts: pointer to &struct dmx_ts_feed
+ * @next: &struct list_head pointing to the next feed.
+ */
+
struct dmxdev_feed {
u16 pid;
struct dmx_ts_feed *ts;
struct list_head next;
};
+/**
+ * struct dmxdev_filter - digital TV dmxdev filter
+ *
+ * @filter: a dmxdev filter. Currently used only for section filter:
+ * if the filter is Section, it contains a
+ * &struct dmx_section_filter @sec pointer.
+ * @feed: a dmxdev feed. Depending on the feed type, it can be:
+ * for TS feed: a &struct list_head @ts list of TS and PES
+ * feeds;
+ * for section feed: a &struct dmx_section_feed @sec pointer.
+ * @params: dmxdev filter parameters. Depending on the feed type, it
+ * can be:
+ * for section filter: a &struct dmx_sct_filter_params @sec
+ * embedded struct;
+ * for a TS filter: a &struct dmx_pes_filter_params @pes
+ * embedded struct.
+ * @type: type of the dmxdev filter, as defined by &enum dmxdev_type.
+ * @state: state of the dmxdev filter, as defined by &enum dmxdev_state.
+ * @dev: pointer to &struct dmxdev.
+ * @buffer: an embedded &struct dvb_ringbuffer buffer.
+ * @mutex: protects the access to &struct dmxdev_filter.
+ * @timer: &struct timer_list embedded timer, used to check for
+ * feed timeouts.
+ * Only for section filter.
+ * @todo: index for the @secheader.
+ * Only for section filter.
+ * @secheader: buffer cache to parse the section header.
+ * Only for section filter.
+ */
struct dmxdev_filter {
union {
struct dmx_section_filter *sec;
@@ -86,7 +144,23 @@ struct dmxdev_filter {
u8 secheader[3];
};
-
+/**
+ * struct dmxdev - Describes a digital TV demux device.
+ *
+ * @dvbdev: pointer to &struct dvb_device associated with
+ * the demux device node.
+ * @dvr_dvbdev: pointer to &struct dvb_device associated with
+ * the dvr device node.
+ * @filter: pointer to &struct dmxdev_filter.
+ * @demux: pointer to &struct dmx_demux.
+ * @filternum: number of filters.
+ * @capabilities: demux capabilities as defined by &enum dmx_demux_caps.
+ * @exit: flag to indicate that the demux is being released.
+ * @dvr_orig_fe: pointer to &struct dmx_frontend.
+ * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output.
+ * @mutex: protects the usage of this structure.
+ * @lock: protects access to &dmxdev->filter->data.
+ */
struct dmxdev {
struct dvb_device *dvbdev;
struct dvb_device *dvr_dvbdev;
@@ -108,8 +182,20 @@ struct dmxdev {
spinlock_t lock;
};
+/**
+ * dvb_dmxdev_init - initializes a digital TV demux and registers both demux
+ * and DVR devices.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ * @adap: pointer to &struct dvb_adapter.
+ */
+int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap);
-int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *);
+/**
+ * dvb_dmxdev_release - releases a digital TV demux and unregisters it.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ */
void dvb_dmxdev_release(struct dmxdev *dmxdev);
#endif /* _DMXDEV_H_ */
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 6628f80d184f..acade7543b82 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -223,10 +223,10 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
* when the second packet arrives.
*
* Fix:
- * when demux is started, let feed->pusi_seen = 0 to
+ * when demux is started, let feed->pusi_seen = false to
* prevent initial feeding of garbage from the end of
* previous section. When you for the first time see PUSI=1
- * then set feed->pusi_seen = 1
+ * then set feed->pusi_seen = true
*/
static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
const u8 *buf, u8 len)
@@ -318,10 +318,10 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
*/
#endif
/*
- * Discontinuity detected. Reset pusi_seen = 0 to
+ * Discontinuity detected. Reset pusi_seen to
* stop feeding of suspicious data until next PUSI=1 arrives
*/
- feed->pusi_seen = 0;
+ feed->pusi_seen = false;
dvb_dmx_swfilter_section_new(feed);
}
@@ -335,8 +335,8 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
dvb_dmx_swfilter_section_copy_dump(feed, before,
before_len);
- /* before start of new section, set pusi_seen = 1 */
- feed->pusi_seen = 1;
+ /* before start of new section, set pusi_seen */
+ feed->pusi_seen = true;
dvb_dmx_swfilter_section_new(feed);
dvb_dmx_swfilter_section_copy_dump(feed, after,
after_len);
@@ -367,6 +367,7 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
else
feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
}
+ /* Used only on full-featured devices */
if (feed->ts_type & TS_DECODER)
if (feed->demux->write_to_decoder)
feed->demux->write_to_decoder(feed, buf, 188);
@@ -898,14 +899,14 @@ static void prepare_secfilters(struct dvb_demux_feed *dvbdmxfeed)
return;
do {
sf = &f->filter;
- doneq = 0;
+ doneq = false;
for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
mode = sf->filter_mode[i];
mask = sf->filter_mask[i];
f->maskandmode[i] = mask & mode;
doneq |= f->maskandnotmode[i] = mask & ~mode;
}
- f->doneq = doneq ? 1 : 0;
+ f->doneq = doneq ? true : false;
} while ((f = f->next));
}
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 6f572ca8d339..cc048f09aa85 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -26,15 +26,33 @@
#include "demux.h"
-#define DMX_TYPE_TS 0
-#define DMX_TYPE_SEC 1
-#define DMX_TYPE_PES 2
+/**
+ * enum dvb_dmx_filter_type - type of demux feed.
+ *
+ * @DMX_TYPE_TS: feed is in TS mode.
+ * @DMX_TYPE_SEC: feed is in Section mode.
+ */
+enum dvb_dmx_filter_type {
+ DMX_TYPE_TS,
+ DMX_TYPE_SEC,
+};
-#define DMX_STATE_FREE 0
-#define DMX_STATE_ALLOCATED 1
-#define DMX_STATE_SET 2
-#define DMX_STATE_READY 3
-#define DMX_STATE_GO 4
+/**
+ * enum dvb_dmx_state - state machine for a demux filter.
+ *
+ * @DMX_STATE_FREE: indicates that the filter is freed.
+ * @DMX_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMX_STATE_READY: indicates that the filter is ready
+ * to be used.
+ * @DMX_STATE_GO: indicates that the filter is running.
+ */
+enum dvb_dmx_state {
+ DMX_STATE_FREE,
+ DMX_STATE_ALLOCATED,
+ DMX_STATE_READY,
+ DMX_STATE_GO,
+};
#define DVB_DEMUX_MASK_MAX 18
@@ -42,24 +60,66 @@
#define SPEED_PKTS_INTERVAL 50000
+/**
+ * struct dvb_demux_filter - Describes a DVB demux section filter.
+ *
+ * @filter: Section filter as defined by &struct dmx_section_filter.
+ * @maskandmode: logical ``and`` bit mask.
+ * @maskandnotmode: logical ``and not`` bit mask.
+ * @doneq: flag that indicates when a filter is ready.
+ * @next: pointer to the next section filter.
+ * @feed: &struct dvb_demux_feed pointer.
+ * @index: index of the used demux filter.
+ * @state: state of the filter as described by &enum dvb_dmx_state.
+ * @type: type of the filter as described
+ * by &enum dvb_dmx_filter_type.
+ */
+
struct dvb_demux_filter {
struct dmx_section_filter filter;
u8 maskandmode[DMX_MAX_FILTER_SIZE];
u8 maskandnotmode[DMX_MAX_FILTER_SIZE];
- int doneq;
+ bool doneq;
struct dvb_demux_filter *next;
struct dvb_demux_feed *feed;
int index;
- int state;
- int type;
+ enum dvb_dmx_state state;
+ enum dvb_dmx_filter_type type;
+ /* private: used only by av7110 */
u16 hw_handle;
- struct timer_list timer;
};
-#define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head)
-
+/**
+ * struct dvb_demux_feed - describes a DVB field
+ *
+ * @feed: a digital TV feed. It can either be a TS or a section feed:
+ * if the feed is TS, it contains &struct dvb_ts_feed @ts;
+ * if the feed is section, it contains
+ * &struct dmx_section_feed @sec.
+ * @cb: digital TV callbacks. depending on the feed type, it can be:
+ * if the feed is TS, it contains a dmx_ts_cb() @ts callback;
+ * if the feed is section, it contains a dmx_section_cb() @sec
+ * callback.
+ *
+ * @demux: pointer to &struct dvb_demux.
+ * @priv: private data that can optionally be used by a DVB driver.
+ * @type: type of the filter, as defined by &enum dvb_dmx_filter_type.
+ * @state: state of the filter as defined by &enum dvb_dmx_state.
+ * @pid: PID to be filtered.
+ * @timeout: feed timeout.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @ts_type: type of TS, as defined by &enum ts_filter_type.
+ * @pes_type: type of PES, as defined by &enum dmx_ts_pes.
+ * @cc: MPEG-TS packet continuity counter
+ * @pusi_seen: if true, indicates that a discontinuity was detected.
+ * it is used to prevent feeding of garbage from previous section.
+ * @peslen: length of the PES (Packet Elementary Stream).
+ * @list_head: head for the list of digital TV demux feeds.
+ * @index: a unique index for each feed. Can be used as hardware
+ * pid filter index.
+ */
struct dvb_demux_feed {
union {
struct dmx_ts_feed ts;
@@ -73,25 +133,63 @@ struct dvb_demux_feed {
struct dvb_demux *demux;
void *priv;
- int type;
- int state;
+ enum dvb_dmx_filter_type type;
+ enum dvb_dmx_state state;
u16 pid;
ktime_t timeout;
struct dvb_demux_filter *filter;
- int ts_type;
+ enum ts_filter_type ts_type;
enum dmx_ts_pes pes_type;
int cc;
- int pusi_seen; /* prevents feeding of garbage from previous section */
+ bool pusi_seen;
u16 peslen;
struct list_head list_head;
- unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
+ unsigned int index;
};
+/**
+ * struct dvb_demux - represents a digital TV demux
+ * @dmx: embedded &struct dmx_demux with demux capabilities
+ * and callbacks.
+ * @priv: private data that can optionally be used by
+ * a DVB driver.
+ * @filternum: maximum amount of DVB filters.
+ * @feednum: maximum amount of DVB feeds.
+ * @start_feed: callback routine to be called in order to start
+ * a DVB feed.
+ * @stop_feed: callback routine to be called in order to stop
+ * a DVB feed.
+ * @write_to_decoder: callback routine to be called if the feed is TS and
+ * it is routed to an A/V decoder, when a new TS packet
+ * is received.
+ * Used only on av7110-av.c.
+ * @check_crc32: callback routine to check CRC. If not initialized,
+ * dvb_demux will use an internal one.
+ * @memcopy: callback routine to memcopy received data.
+ * If not initialized, dvb_demux will default to memcpy().
+ * @users: counter for the number of demux opened file descriptors.
+ * Currently, it is limited to 10 users.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @feed: pointer to &struct dvb_demux_feed.
+ * @frontend_list: &struct list_head with frontends used by the demux.
+ * @pesfilter: array of &struct dvb_demux_feed with the PES types
+ * that will be filtered.
+ * @pids: list of filtered program IDs.
+ * @feed_list: &struct list_head with feeds.
+ * @tsbuf: temporary buffer used internally to store TS packets.
+ * @tsbufp: temporary buffer index used internally.
+ * @mutex: pointer to &struct mutex used to protect feed set
+ * logic.
+ * @lock: pointer to &spinlock_t, used to protect buffer handling.
+ * @cnt_storage: buffer used for TS/TEI continuity check.
+ * @speed_last_time: &ktime_t used for TS speed check.
+ * @speed_pkts_cnt: packets count used for TS speed check.
+ */
struct dvb_demux {
struct dmx_demux dmx;
void *priv;
@@ -115,8 +213,6 @@ struct dvb_demux {
struct dvb_demux_feed *pesfilter[DMX_PES_OTHER];
u16 pids[DMX_PES_OTHER];
- int playing;
- int recording;
#define DMX_MAX_PID 0x2000
struct list_head feed_list;
@@ -130,15 +226,119 @@ struct dvb_demux {
ktime_t speed_last_time; /* for TS speed check */
uint32_t speed_pkts_cnt; /* for TS speed check */
+
+ /* private: used only on av7110 */
+ int playing;
+ int recording;
};
-int dvb_dmx_init(struct dvb_demux *dvbdemux);
-void dvb_dmx_release(struct dvb_demux *dvbdemux);
-void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
+/**
+ * dvb_dmx_init - initialize a digital TV demux struct.
+ *
+ * @demux: &struct dvb_demux to be initialized.
+ *
+ * Before being able to register a digital TV demux struct, drivers
+ * should call this routine. On its typical usage, some fields should
+ * be initialized at the driver before calling it.
+ *
+ * A typical usecase is::
+ *
+ * dvb->demux.dmx.capabilities =
+ * DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ * DMX_MEMORY_BASED_FILTERING;
+ * dvb->demux.priv = dvb;
+ * dvb->demux.filternum = 256;
+ * dvb->demux.feednum = 256;
+ * dvb->demux.start_feed = driver_start_feed;
+ * dvb->demux.stop_feed = driver_stop_feed;
+ * ret = dvb_dmx_init(&dvb->demux);
+ * if (ret < 0)
+ * return ret;
+ */
+int dvb_dmx_init(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_release - releases a digital TV demux internal buffers.
+ *
+ * @demux: &struct dvb_demux to be released.
+ *
+ * The DVB core internally allocates data at @demux. This routine
+ * releases those data. Please notice that the struct itelf is not
+ * released, as it can be embedded on other structs.
+ */
+void dvb_dmx_release(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * The routine will discard a DVB packet that don't start with 0x47.
+ *
+ * Use this routine if the DVB demux fills MPEG-TS buffers that are
+ * already aligned.
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
+void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count);
+
+/**
+ * dvb_dmx_swfilter - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
+
+/**
+ * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 204 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 204.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 204``.
+ */
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
size_t count);
+
+/**
+ * dvb_dmx_swfilter_raw - make the raw data available to userspace without
+ * filtering
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data
+ * @count: number of packets to be passed. The actual size of each packet
+ * depends on the &dvb_demux->feed->cb.ts logic.
+ *
+ * Use it if the driver needs to deliver the raw payload to userspace without
+ * passing through the kernel demux. That is meant to support some
+ * delivery systems that aren't based on MPEG-TS.
+ *
+ * This function relies on &dvb_demux->feed->cb.ts to actually handle the
+ * buffer.
+ */
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
size_t count);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 9139d01ba7ed..3ad83359098b 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- if (!fepriv)
- return;
-
- dvb_free_device(fepriv->dvbdev);
+ if (fepriv)
+ dvb_free_device(fepriv->dvbdev);
dvb_frontend_invoke_release(fe, fe->ops.release);
- kfree(fepriv);
- fe->frontend_priv = NULL;
+ if (fepriv)
+ kfree(fepriv);
}
static void dvb_frontend_free(struct kref *ref)
@@ -951,8 +949,6 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
memset(c, 0, offsetof(struct dtv_frontend_properties, strength));
c->delivery_system = delsys;
- c->state = DTV_CLEAR;
-
dev_dbg(fe->dvb->device, "%s: Clearing cache for delivery system %d\n",
__func__, c->delivery_system);
@@ -1109,39 +1105,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
};
-static void dtv_property_dump(struct dvb_frontend *fe,
- bool is_set,
- struct dtv_property *tvp)
-{
- int i;
-
- if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) {
- dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n",
- __func__,
- is_set ? "SET" : "GET",
- tvp->cmd);
- return;
- }
-
- dev_dbg(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x (%s)\n", __func__,
- is_set ? "SET" : "GET",
- tvp->cmd,
- dtv_cmds[tvp->cmd].name);
-
- if (dtv_cmds[tvp->cmd].buffer) {
- dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n",
- __func__, tvp->u.buffer.len);
-
- for(i = 0; i < tvp->u.buffer.len; i++)
- dev_dbg(fe->dvb->device,
- "%s: tvp.u.buffer.data[0x%02x] = 0x%02x\n",
- __func__, i, tvp->u.buffer.data[i]);
- } else {
- dev_dbg(fe->dvb->device, "%s: tvp.u.data = 0x%08x\n", __func__,
- tvp->u.data);
- }
-}
-
/* Synchronise the legacy tuning parameters into the cache, so that demodulator
* drivers can use a single set_frontend tuning function, regardless of whether
* it's being used for the legacy or new API, reducing code and complexity.
@@ -1315,17 +1278,15 @@ static int dtv_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static int dvb_frontend_ioctl_legacy(struct file *file,
- unsigned int cmd, void *parg);
-static int dvb_frontend_ioctl_properties(struct file *file,
- unsigned int cmd, void *parg);
+static int dvb_frontend_handle_ioctl(struct file *file,
+ unsigned int cmd, void *parg);
static int dtv_property_process_get(struct dvb_frontend *fe,
const struct dtv_frontend_properties *c,
struct dtv_property *tvp,
struct file *file)
{
- int r, ncaps;
+ int ncaps;
switch(tvp->cmd) {
case DTV_ENUM_DELSYS:
@@ -1536,14 +1497,18 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
return -EINVAL;
}
- /* Allow the frontend to override outgoing properties */
- if (fe->ops.get_property) {
- r = fe->ops.get_property(fe, tvp);
- if (r < 0)
- return r;
- }
-
- dtv_property_dump(fe, false, tvp);
+ if (!dtv_cmds[tvp->cmd].buffer)
+ dev_dbg(fe->dvb->device,
+ "%s: GET cmd 0x%08x (%s) = 0x%08x\n",
+ __func__, tvp->cmd, dtv_cmds[tvp->cmd].name,
+ tvp->u.data);
+ else
+ dev_dbg(fe->dvb->device,
+ "%s: GET cmd 0x%08x (%s) len %d: %*ph\n",
+ __func__,
+ tvp->cmd, dtv_cmds[tvp->cmd].name,
+ tvp->u.buffer.len,
+ tvp->u.buffer.len, tvp->u.buffer.data);
return 0;
}
@@ -1766,23 +1731,36 @@ static int dvbv3_set_delivery_system(struct dvb_frontend *fe)
return emulate_delivery_system(fe, delsys);
}
+/**
+ * dtv_property_process_set - Sets a single DTV property
+ * @fe: Pointer to &struct dvb_frontend
+ * @file: Pointer to &struct file
+ * @cmd: Digital TV command
+ * @data: An unsigned 32-bits number
+ *
+ * This routine assigns the property
+ * value to the corresponding member of
+ * &struct dtv_frontend_properties
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
static int dtv_property_process_set(struct dvb_frontend *fe,
- struct dtv_property *tvp,
- struct file *file)
+ struct file *file,
+ u32 cmd, u32 data)
{
int r = 0;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- /* Allow the frontend to validate incoming properties */
- if (fe->ops.set_property) {
- r = fe->ops.set_property(fe, tvp);
- if (r < 0)
- return r;
- }
-
- dtv_property_dump(fe, true, tvp);
-
- switch(tvp->cmd) {
+ /** Dump DTV command name and value*/
+ if (!cmd || cmd > DTV_MAX_COMMAND)
+ dev_warn(fe->dvb->device, "%s: SET cmd 0x%08x undefined\n",
+ __func__, cmd);
+ else
+ dev_dbg(fe->dvb->device,
+ "%s: SET cmd 0x%08x (%s) to 0x%08x\n",
+ __func__, cmd, dtv_cmds[cmd].name, data);
+ switch (cmd) {
case DTV_CLEAR:
/*
* Reset a cache of data specific to the frontend here. This does
@@ -1791,144 +1769,144 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
dvb_frontend_clear_cache(fe);
break;
case DTV_TUNE:
- /* interpret the cache of data, build either a traditional frontend
- * tunerequest so we can pass validation in the FE_SET_FRONTEND
- * ioctl.
+ /*
+ * Use the cached Digital TV properties to tune the
+ * frontend
*/
- c->state = tvp->cmd;
- dev_dbg(fe->dvb->device, "%s: Finalised property cache\n",
- __func__);
+ dev_dbg(fe->dvb->device,
+ "%s: Setting the frontend from property cache\n",
+ __func__);
r = dtv_set_frontend(fe);
break;
case DTV_FREQUENCY:
- c->frequency = tvp->u.data;
+ c->frequency = data;
break;
case DTV_MODULATION:
- c->modulation = tvp->u.data;
+ c->modulation = data;
break;
case DTV_BANDWIDTH_HZ:
- c->bandwidth_hz = tvp->u.data;
+ c->bandwidth_hz = data;
break;
case DTV_INVERSION:
- c->inversion = tvp->u.data;
+ c->inversion = data;
break;
case DTV_SYMBOL_RATE:
- c->symbol_rate = tvp->u.data;
+ c->symbol_rate = data;
break;
case DTV_INNER_FEC:
- c->fec_inner = tvp->u.data;
+ c->fec_inner = data;
break;
case DTV_PILOT:
- c->pilot = tvp->u.data;
+ c->pilot = data;
break;
case DTV_ROLLOFF:
- c->rolloff = tvp->u.data;
+ c->rolloff = data;
break;
case DTV_DELIVERY_SYSTEM:
- r = dvbv5_set_delivery_system(fe, tvp->u.data);
+ r = dvbv5_set_delivery_system(fe, data);
break;
case DTV_VOLTAGE:
- c->voltage = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE,
+ c->voltage = data;
+ r = dvb_frontend_handle_ioctl(file, FE_SET_VOLTAGE,
(void *)c->voltage);
break;
case DTV_TONE:
- c->sectone = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE,
+ c->sectone = data;
+ r = dvb_frontend_handle_ioctl(file, FE_SET_TONE,
(void *)c->sectone);
break;
case DTV_CODE_RATE_HP:
- c->code_rate_HP = tvp->u.data;
+ c->code_rate_HP = data;
break;
case DTV_CODE_RATE_LP:
- c->code_rate_LP = tvp->u.data;
+ c->code_rate_LP = data;
break;
case DTV_GUARD_INTERVAL:
- c->guard_interval = tvp->u.data;
+ c->guard_interval = data;
break;
case DTV_TRANSMISSION_MODE:
- c->transmission_mode = tvp->u.data;
+ c->transmission_mode = data;
break;
case DTV_HIERARCHY:
- c->hierarchy = tvp->u.data;
+ c->hierarchy = data;
break;
case DTV_INTERLEAVING:
- c->interleaving = tvp->u.data;
+ c->interleaving = data;
break;
/* ISDB-T Support here */
case DTV_ISDBT_PARTIAL_RECEPTION:
- c->isdbt_partial_reception = tvp->u.data;
+ c->isdbt_partial_reception = data;
break;
case DTV_ISDBT_SOUND_BROADCASTING:
- c->isdbt_sb_mode = tvp->u.data;
+ c->isdbt_sb_mode = data;
break;
case DTV_ISDBT_SB_SUBCHANNEL_ID:
- c->isdbt_sb_subchannel = tvp->u.data;
+ c->isdbt_sb_subchannel = data;
break;
case DTV_ISDBT_SB_SEGMENT_IDX:
- c->isdbt_sb_segment_idx = tvp->u.data;
+ c->isdbt_sb_segment_idx = data;
break;
case DTV_ISDBT_SB_SEGMENT_COUNT:
- c->isdbt_sb_segment_count = tvp->u.data;
+ c->isdbt_sb_segment_count = data;
break;
case DTV_ISDBT_LAYER_ENABLED:
- c->isdbt_layer_enabled = tvp->u.data;
+ c->isdbt_layer_enabled = data;
break;
case DTV_ISDBT_LAYERA_FEC:
- c->layer[0].fec = tvp->u.data;
+ c->layer[0].fec = data;
break;
case DTV_ISDBT_LAYERA_MODULATION:
- c->layer[0].modulation = tvp->u.data;
+ c->layer[0].modulation = data;
break;
case DTV_ISDBT_LAYERA_SEGMENT_COUNT:
- c->layer[0].segment_count = tvp->u.data;
+ c->layer[0].segment_count = data;
break;
case DTV_ISDBT_LAYERA_TIME_INTERLEAVING:
- c->layer[0].interleaving = tvp->u.data;
+ c->layer[0].interleaving = data;
break;
case DTV_ISDBT_LAYERB_FEC:
- c->layer[1].fec = tvp->u.data;
+ c->layer[1].fec = data;
break;
case DTV_ISDBT_LAYERB_MODULATION:
- c->layer[1].modulation = tvp->u.data;
+ c->layer[1].modulation = data;
break;
case DTV_ISDBT_LAYERB_SEGMENT_COUNT:
- c->layer[1].segment_count = tvp->u.data;
+ c->layer[1].segment_count = data;
break;
case DTV_ISDBT_LAYERB_TIME_INTERLEAVING:
- c->layer[1].interleaving = tvp->u.data;
+ c->layer[1].interleaving = data;
break;
case DTV_ISDBT_LAYERC_FEC:
- c->layer[2].fec = tvp->u.data;
+ c->layer[2].fec = data;
break;
case DTV_ISDBT_LAYERC_MODULATION:
- c->layer[2].modulation = tvp->u.data;
+ c->layer[2].modulation = data;
break;
case DTV_ISDBT_LAYERC_SEGMENT_COUNT:
- c->layer[2].segment_count = tvp->u.data;
+ c->layer[2].segment_count = data;
break;
case DTV_ISDBT_LAYERC_TIME_INTERLEAVING:
- c->layer[2].interleaving = tvp->u.data;
+ c->layer[2].interleaving = data;
break;
/* Multistream support */
case DTV_STREAM_ID:
case DTV_DVBT2_PLP_ID_LEGACY:
- c->stream_id = tvp->u.data;
+ c->stream_id = data;
break;
/* ATSC-MH */
case DTV_ATSCMH_PARADE_ID:
- fe->dtv_property_cache.atscmh_parade_id = tvp->u.data;
+ fe->dtv_property_cache.atscmh_parade_id = data;
break;
case DTV_ATSCMH_RS_FRAME_ENSEMBLE:
- fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data;
+ fe->dtv_property_cache.atscmh_rs_frame_ensemble = data;
break;
case DTV_LNA:
- c->lna = tvp->u.data;
+ c->lna = data;
if (fe->ops.set_lna)
r = fe->ops.set_lna(fe);
if (r < 0)
@@ -1942,14 +1920,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
return r;
}
-static int dvb_frontend_ioctl(struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_frontend_ioctl(struct file *file, unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- int err = -EOPNOTSUPP;
+ int err;
dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
if (down_interruptible(&fepriv->sem))
@@ -1960,109 +1936,33 @@ static int dvb_frontend_ioctl(struct file *file,
return -ENODEV;
}
- if ((file->f_flags & O_ACCMODE) == O_RDONLY &&
- (_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT ||
- cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
+ /*
+ * If the frontend is opened in read-only mode, only the ioctls
+ * that don't interfere with the tune logic should be accepted.
+ * That allows an external application to monitor the DVB QoS and
+ * statistics parameters.
+ *
+ * That matches all _IOR() ioctls, except for two special cases:
+ * - FE_GET_EVENT is part of the tuning logic on a DVB application;
+ * - FE_DISEQC_RECV_SLAVE_REPLY is part of DiSEqC 2.0
+ * setup
+ * So, those two ioctls should also return -EPERM, as otherwise
+ * reading from them would interfere with a DVB tune application
+ */
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY
+ && (_IOC_DIR(cmd) != _IOC_READ
+ || cmd == FE_GET_EVENT
+ || cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
up(&fepriv->sem);
return -EPERM;
}
- if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
- err = dvb_frontend_ioctl_properties(file, cmd, parg);
- else {
- c->state = DTV_UNDEFINED;
- err = dvb_frontend_ioctl_legacy(file, cmd, parg);
- }
+ err = dvb_frontend_handle_ioctl(file, cmd, parg);
up(&fepriv->sem);
return err;
}
-static int dvb_frontend_ioctl_properties(struct file *file,
- unsigned int cmd, void *parg)
-{
- struct dvb_device *dvbdev = file->private_data;
- struct dvb_frontend *fe = dvbdev->priv;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = 0;
-
- struct dtv_properties *tvps = parg;
- struct dtv_property *tvp = NULL;
- int i;
-
- dev_dbg(fe->dvb->device, "%s:\n", __func__);
-
- if (cmd == FE_SET_PROPERTY) {
- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
- return -EINVAL;
-
- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
- if (IS_ERR(tvp))
- return PTR_ERR(tvp);
-
- for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_set(fe, tvp + i, file);
- if (err < 0)
- goto out;
- (tvp + i)->result = err;
- }
-
- if (c->state == DTV_TUNE)
- dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__);
-
- } else if (cmd == FE_GET_PROPERTY) {
- struct dtv_frontend_properties getp = fe->dtv_property_cache;
-
- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
- return -EINVAL;
-
- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
- if (IS_ERR(tvp))
- return PTR_ERR(tvp);
-
- /*
- * Let's use our own copy of property cache, in order to
- * avoid mangling with DTV zigzag logic, as drivers might
- * return crap, if they don't check if the data is available
- * before updating the properties cache.
- */
- if (fepriv->state != FESTATE_IDLE) {
- err = dtv_get_frontend(fe, &getp, NULL);
- if (err < 0)
- goto out;
- }
- for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_get(fe, &getp, tvp + i, file);
- if (err < 0)
- goto out;
- (tvp + i)->result = err;
- }
-
- if (copy_to_user((void __user *)tvps->props, tvp,
- tvps->num * sizeof(struct dtv_property))) {
- err = -EFAULT;
- goto out;
- }
-
- } else
- err = -EOPNOTSUPP;
-
-out:
- kfree(tvp);
- return err;
-}
-
static int dtv_set_frontend(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
@@ -2200,16 +2100,102 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
}
-static int dvb_frontend_ioctl_legacy(struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_frontend_handle_ioctl(struct file *file,
+ unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = -EOPNOTSUPP;
+ int i, err;
+
+ dev_dbg(fe->dvb->device, "%s:\n", __func__);
switch (cmd) {
+ case FE_SET_PROPERTY: {
+ struct dtv_properties *tvps = parg;
+ struct dtv_property *tvp = NULL;
+
+ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
+ __func__, tvps->num);
+ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
+ __func__, tvps->props);
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+ for (i = 0; i < tvps->num; i++) {
+ err = dtv_property_process_set(fe, file,
+ (tvp + i)->cmd,
+ (tvp + i)->u.data);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+ kfree(tvp);
+ break;
+ }
+ case FE_GET_PROPERTY: {
+ struct dtv_properties *tvps = parg;
+ struct dtv_property *tvp = NULL;
+ struct dtv_frontend_properties getp = fe->dtv_property_cache;
+
+ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
+ __func__, tvps->num);
+ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
+ __func__, tvps->props);
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+ /*
+ * Let's use our own copy of property cache, in order to
+ * avoid mangling with DTV zigzag logic, as drivers might
+ * return crap, if they don't check if the data is available
+ * before updating the properties cache.
+ */
+ if (fepriv->state != FESTATE_IDLE) {
+ err = dtv_get_frontend(fe, &getp, NULL);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+ for (i = 0; i < tvps->num; i++) {
+ err = dtv_property_process_get(fe, &getp,
+ tvp + i, file);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+
+ if (copy_to_user((void __user *)tvps->props, tvp,
+ tvps->num * sizeof(struct dtv_property))) {
+ kfree(tvp);
+ return -EFAULT;
+ }
+ kfree(tvp);
+ break;
+ }
+
case FE_GET_INFO: {
struct dvb_frontend_info* info = parg;
@@ -2273,42 +2259,6 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
break;
}
- case FE_READ_BER:
- if (fe->ops.read_ber) {
- if (fepriv->thread)
- err = fe->ops.read_ber(fe, (__u32 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_SIGNAL_STRENGTH:
- if (fe->ops.read_signal_strength) {
- if (fepriv->thread)
- err = fe->ops.read_signal_strength(fe, (__u16 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_SNR:
- if (fe->ops.read_snr) {
- if (fepriv->thread)
- err = fe->ops.read_snr(fe, (__u16 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_UNCORRECTED_BLOCKS:
- if (fe->ops.read_ucblocks) {
- if (fepriv->thread)
- err = fe->ops.read_ucblocks(fe, (__u32 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
case FE_DISEQC_RESET_OVERLOAD:
if (fe->ops.diseqc_reset_overload) {
err = fe->ops.diseqc_reset_overload(fe);
@@ -2360,6 +2310,23 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
}
break;
+ case FE_DISEQC_RECV_SLAVE_REPLY:
+ if (fe->ops.diseqc_recv_slave_reply)
+ err = fe->ops.diseqc_recv_slave_reply(fe, parg);
+ break;
+
+ case FE_ENABLE_HIGH_LNB_VOLTAGE:
+ if (fe->ops.enable_high_lnb_voltage)
+ err = fe->ops.enable_high_lnb_voltage(fe, (long) parg);
+ break;
+
+ case FE_SET_FRONTEND_TUNE_MODE:
+ fepriv->tune_mode_flags = (unsigned long) parg;
+ err = 0;
+ break;
+
+ /* DEPRECATED dish control ioctls */
+
case FE_DISHNETWORK_SEND_LEGACY_CMD:
if (fe->ops.dishnetwork_send_legacy_command) {
err = fe->ops.dishnetwork_send_legacy_command(fe,
@@ -2425,16 +2392,46 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
}
break;
- case FE_DISEQC_RECV_SLAVE_REPLY:
- if (fe->ops.diseqc_recv_slave_reply)
- err = fe->ops.diseqc_recv_slave_reply(fe, (struct dvb_diseqc_slave_reply*) parg);
+ /* DEPRECATED statistics ioctls */
+
+ case FE_READ_BER:
+ if (fe->ops.read_ber) {
+ if (fepriv->thread)
+ err = fe->ops.read_ber(fe, parg);
+ else
+ err = -EAGAIN;
+ }
break;
- case FE_ENABLE_HIGH_LNB_VOLTAGE:
- if (fe->ops.enable_high_lnb_voltage)
- err = fe->ops.enable_high_lnb_voltage(fe, (long) parg);
+ case FE_READ_SIGNAL_STRENGTH:
+ if (fe->ops.read_signal_strength) {
+ if (fepriv->thread)
+ err = fe->ops.read_signal_strength(fe, parg);
+ else
+ err = -EAGAIN;
+ }
break;
+ case FE_READ_SNR:
+ if (fe->ops.read_snr) {
+ if (fepriv->thread)
+ err = fe->ops.read_snr(fe, parg);
+ else
+ err = -EAGAIN;
+ }
+ break;
+
+ case FE_READ_UNCORRECTED_BLOCKS:
+ if (fe->ops.read_ucblocks) {
+ if (fepriv->thread)
+ err = fe->ops.read_ucblocks(fe, parg);
+ else
+ err = -EAGAIN;
+ }
+ break;
+
+ /* DEPRECATED DVBv3 ioctls */
+
case FE_SET_FRONTEND:
err = dvbv3_set_delivery_system(fe);
if (err)
@@ -2461,11 +2458,10 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
err = dtv_get_frontend(fe, &getp, parg);
break;
}
- case FE_SET_FRONTEND_TUNE_MODE:
- fepriv->tune_mode_flags = (unsigned long) parg;
- err = 0;
- break;
- }
+
+ default:
+ return -ENOTSUPP;
+ } /* switch */
return err;
}
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 907a05bde162..ace0c2fb26c2 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -180,8 +180,8 @@ enum dvbfe_search {
/**
* struct dvb_tuner_ops - Tuner information and callbacks
*
- * @info: embedded struct dvb_tuner_info with tuner properties
- * @release: callback function called when frontend is dettached.
+ * @info: embedded &struct dvb_tuner_info with tuner properties
+ * @release: callback function called when frontend is detached.
* drivers should free any allocated memory.
* @init: callback function used to initialize the tuner device.
* @sleep: callback function used to put the tuner to sleep.
@@ -191,14 +191,14 @@ enum dvbfe_search {
* resuming from suspend.
* @set_params: callback function used to inform the tuner to tune
* into a digital TV channel. The properties to be used
- * are stored at @dvb_frontend.dtv_property_cache;. The
- * tuner demod can change the parameters to reflect the
- * changes needed for the channel to be tuned, and
+ * are stored at &struct dvb_frontend.dtv_property_cache.
+ * The tuner demod can change the parameters to reflect
+ * the changes needed for the channel to be tuned, and
* update statistics. This is the recommended way to set
* the tuner parameters and should be used on newer
* drivers.
* @set_analog_params: callback function used to tune into an analog TV
- * channel on hybrid tuners. It passes @analog_parameters;
+ * channel on hybrid tuners. It passes @analog_parameters
* to the driver.
* @set_config: callback function used to send some tuner-specific
* parameters.
@@ -207,9 +207,9 @@ enum dvbfe_search {
* @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband,
* should return 0.
* @get_status: returns the frontend lock status
- * @get_rf_strength: returns the RF signal strengh. Used mostly to support
+ * @get_rf_strength: returns the RF signal strength. Used mostly to support
* analog TV and radio. Digital TV should report, instead,
- * via DVBv5 API (@dvb_frontend.dtv_property_cache;).
+ * via DVBv5 API (&struct dvb_frontend.dtv_property_cache).
* @get_afc: Used only by analog TV core. Reports the frequency
* drift due to AFC.
* @calc_regs: callback function used to pass register data settings
@@ -217,7 +217,7 @@ enum dvbfe_search {
* @set_frequency: Set a new frequency. Shouldn't be used on newer drivers.
* @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers.
*
- * NOTE: frequencies used on get_frequency and set_frequency are in Hz for
+ * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for
* terrestrial/cable or kHz for satellite.
*
*/
@@ -283,14 +283,14 @@ struct analog_demod_info {
* @set_params: callback function used to inform the demod to set the
* demodulator parameters needed to decode an analog or
* radio channel. The properties are passed via
- * struct @analog_params;.
+ * &struct analog_params.
* @has_signal: returns 0xffff if has signal, or 0 if it doesn't.
* @get_afc: Used only by analog TV core. Reports the frequency
* drift due to AFC.
* @tuner_status: callback function that returns tuner status bits, e. g.
- * TUNER_STATUS_LOCKED and TUNER_STATUS_STEREO.
+ * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO.
* @standby: set the tuner to standby mode.
- * @release: callback function called when frontend is dettached.
+ * @release: callback function called when frontend is detached.
* drivers should free any allocated memory.
* @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
* mux support instead.
@@ -321,10 +321,10 @@ struct dtv_frontend_properties;
* struct dvb_frontend_ops - Demodulation information and callbacks for
* ditialt TV
*
- * @info: embedded struct dvb_tuner_info with tuner properties
+ * @info: embedded &struct dvb_tuner_info with tuner properties
* @delsys: Delivery systems supported by the frontend
* @detach: callback function called when frontend is detached.
- * drivers should clean up, but not yet free the struct
+ * drivers should clean up, but not yet free the &struct
* dvb_frontend allocation.
* @release: callback function called when frontend is ready to be
* freed.
@@ -338,57 +338,57 @@ struct dtv_frontend_properties;
* allow other drivers to write data into their registers.
* Should not be used on new drivers.
* @tune: callback function used by demod drivers that use
- * @DVBFE_ALGO_HW; to tune into a frequency.
+ * @DVBFE_ALGO_HW to tune into a frequency.
* @get_frontend_algo: returns the desired hardware algorithm.
* @set_frontend: callback function used to inform the demod to set the
* parameters for demodulating a digital TV channel.
- * The properties to be used are stored at
- * @dvb_frontend.dtv_property_cache;. The demod can change
+ * The properties to be used are stored at &struct
+ * dvb_frontend.dtv_property_cache. The demod can change
* the parameters to reflect the changes needed for the
* channel to be decoded, and update statistics.
* @get_tune_settings: callback function
* @get_frontend: callback function used to inform the parameters
* actuall in use. The properties to be used are stored at
- * @dvb_frontend.dtv_property_cache; and update
+ * &struct dvb_frontend.dtv_property_cache and update
* statistics. Please notice that it should not return
* an error code if the statistics are not available
* because the demog is not locked.
* @read_status: returns the locking status of the frontend.
* @read_ber: legacy callback function to return the bit error rate.
* Newer drivers should provide such info via DVBv5 API,
- * e. g. @set_frontend;/@get_frontend;, implementing this
+ * e. g. @set_frontend;/@get_frontend, implementing this
* callback only if DVBv3 API compatibility is wanted.
* @read_signal_strength: legacy callback function to return the signal
* strength. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @read_snr: legacy callback function to return the Signal/Noise
* rate. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @read_ucblocks: legacy callback function to return the Uncorrected Error
* Blocks. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @diseqc_reset_overload: callback function to implement the
- * FE_DISEQC_RESET_OVERLOAD ioctl (only Satellite)
+ * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite)
* @diseqc_send_master_cmd: callback function to implement the
- * FE_DISEQC_SEND_MASTER_CMD ioctl (only Satellite).
+ * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite).
* @diseqc_recv_slave_reply: callback function to implement the
- * FE_DISEQC_RECV_SLAVE_REPLY ioctl (only Satellite)
+ * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite)
* @diseqc_send_burst: callback function to implement the
- * FE_DISEQC_SEND_BURST ioctl (only Satellite).
+ * FE_DISEQC_SEND_BURST() ioctl (only Satellite).
* @set_tone: callback function to implement the
- * FE_SET_TONE ioctl (only Satellite).
+ * FE_SET_TONE() ioctl (only Satellite).
* @set_voltage: callback function to implement the
- * FE_SET_VOLTAGE ioctl (only Satellite).
+ * FE_SET_VOLTAGE() ioctl (only Satellite).
* @enable_high_lnb_voltage: callback function to implement the
- * FE_ENABLE_HIGH_LNB_VOLTAGE ioctl (only Satellite).
+ * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite).
* @dishnetwork_send_legacy_command: callback function to implement the
- * FE_DISHNETWORK_SEND_LEGACY_CMD ioctl (only Satellite).
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite).
* Drivers should not use this, except when the DVB
* core emulation fails to provide proper support (e.g.
* if @set_voltage takes more than 8ms to work), and
@@ -399,15 +399,10 @@ struct dtv_frontend_properties;
* @ts_bus_ctrl: callback function used to take control of the TS bus.
* @set_lna: callback function to power on/off/auto the LNA.
* @search: callback function used on some custom algo search algos.
- * @tuner_ops: pointer to struct dvb_tuner_ops
- * @analog_ops: pointer to struct analog_demod_ops
- * @set_property: callback function to allow the frontend to validade
- * incoming properties. Should not be used on new drivers.
- * @get_property: callback function to allow the frontend to override
- * outcoming properties. Should not be used on new drivers.
+ * @tuner_ops: pointer to &struct dvb_tuner_ops
+ * @analog_ops: pointer to &struct analog_demod_ops
*/
struct dvb_frontend_ops {
-
struct dvb_frontend_info info;
u8 delsys[MAX_DELSYS];
@@ -466,9 +461,6 @@ struct dvb_frontend_ops {
struct dvb_tuner_ops tuner_ops;
struct analog_demod_ops analog_ops;
-
- int (*set_property)(struct dvb_frontend* fe, struct dtv_property* tvp);
- int (*get_property)(struct dvb_frontend* fe, struct dtv_property* tvp);
};
#ifdef __DVB_CORE__
@@ -565,15 +557,15 @@ struct dtv_frontend_properties {
enum fe_sec_voltage voltage;
enum fe_sec_tone_mode sectone;
- enum fe_spectral_inversion inversion;
- enum fe_code_rate fec_inner;
+ enum fe_spectral_inversion inversion;
+ enum fe_code_rate fec_inner;
enum fe_transmit_mode transmission_mode;
u32 bandwidth_hz; /* 0 = AUTO */
enum fe_guard_interval guard_interval;
- enum fe_hierarchy hierarchy;
+ enum fe_hierarchy hierarchy;
u32 symbol_rate;
- enum fe_code_rate code_rate_HP;
- enum fe_code_rate code_rate_LP;
+ enum fe_code_rate code_rate_HP;
+ enum fe_code_rate code_rate_LP;
enum fe_pilot pilot;
enum fe_rolloff rolloff;
@@ -628,11 +620,6 @@ struct dtv_frontend_properties {
struct dtv_fe_stats post_bit_count;
struct dtv_fe_stats block_error;
struct dtv_fe_stats block_count;
-
- /* private: */
- /* Cache State */
- u32 state;
-
};
#define DVB_FE_NO_EXIT 0
@@ -643,16 +630,16 @@ struct dtv_frontend_properties {
/**
* struct dvb_frontend - Frontend structure to be used on drivers.
*
- * @refcount: refcount to keep track of struct dvb_frontend
+ * @refcount: refcount to keep track of &struct dvb_frontend
* references
- * @ops: embedded struct dvb_frontend_ops
- * @dvb: pointer to struct dvb_adapter
+ * @ops: embedded &struct dvb_frontend_ops
+ * @dvb: pointer to &struct dvb_adapter
* @demodulator_priv: demod private data
* @tuner_priv: tuner private data
* @frontend_priv: frontend private data
* @sec_priv: SEC private data
* @analog_demod_priv: Analog demod private data
- * @dtv_property_cache: embedded struct dtv_frontend_properties
+ * @dtv_property_cache: embedded &struct dtv_frontend_properties
* @callback: callback function used on some drivers to call
* either the tuner or the demodulator.
* @id: Frontend ID
@@ -681,8 +668,8 @@ struct dvb_frontend {
/**
* dvb_register_frontend() - Registers a DVB frontend at the adapter
*
- * @dvb: pointer to the dvb adapter
- * @fe: pointer to the frontend struct
+ * @dvb: pointer to &struct dvb_adapter
+ * @fe: pointer to &struct dvb_frontend
*
* Allocate and initialize the private data needed by the frontend core to
* manage the frontend and calls dvb_register_device() to register a new
@@ -695,7 +682,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
/**
* dvb_unregister_frontend() - Unregisters a DVB frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* Stops the frontend kthread, calls dvb_unregister_device() and frees the
* private frontend data allocated by dvb_register_frontend().
@@ -709,14 +696,14 @@ int dvb_unregister_frontend(struct dvb_frontend *fe);
/**
* dvb_frontend_detach() - Detaches and frees frontend specific data
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function should be called after dvb_unregister_frontend(). It
* calls the SEC, tuner and demod release functions:
* &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release,
* &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release.
*
- * If the driver is compiled with CONFIG_MEDIA_ATTACH, it also decreases
+ * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases
* the module reference count, needed to allow userspace to remove the
* previously used DVB frontend modules.
*/
@@ -725,7 +712,7 @@ void dvb_frontend_detach(struct dvb_frontend *fe);
/**
* dvb_frontend_suspend() - Suspends a Digital TV frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function prepares a Digital TV frontend to suspend.
*
@@ -743,7 +730,7 @@ int dvb_frontend_suspend(struct dvb_frontend *fe);
/**
* dvb_frontend_resume() - Resumes a Digital TV frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function resumes the usual operation of the tuner after resume.
*
@@ -764,7 +751,7 @@ int dvb_frontend_resume(struct dvb_frontend *fe);
/**
* dvb_frontend_reinitialise() - forces a reinitialisation at the frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\),
* and resets SEC tone and voltage (for Satellite systems).
@@ -779,16 +766,16 @@ void dvb_frontend_reinitialise(struct dvb_frontend *fe);
* dvb_frontend_sleep_until() - Sleep for the amount of time given by
* add_usec parameter
*
- * @waketime: pointer to a struct ktime_t
+ * @waketime: pointer to &struct ktime_t
* @add_usec: time to sleep, in microseconds
*
* This function is used to measure the time required for the
- * %FE_DISHNETWORK_SEND_LEGACY_CMD ioctl to work. It needs to be as precise
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise
* as possible, as it affects the detection of the dish tone command at the
* satellite subsystem.
*
* Its used internally by the DVB frontend core, in order to emulate
- * %FE_DISHNETWORK_SEND_LEGACY_CMD using the &dvb_frontend_ops.set_voltage\(\)
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\)
* callback.
*
* NOTE: it should not be used at the drivers, as the emulation for the
diff --git a/drivers/media/dvb-core/dvb_net.h b/drivers/media/dvb-core/dvb_net.h
index e9b18aa03e02..1eae8bad7cc1 100644
--- a/drivers/media/dvb-core/dvb_net.h
+++ b/drivers/media/dvb-core/dvb_net.h
@@ -30,6 +30,22 @@
#ifdef CONFIG_DVB_NET
+/**
+ * struct dvb_net - describes a DVB network interface
+ *
+ * @dvbdev: pointer to &struct dvb_device.
+ * @device: array of pointers to &struct net_device.
+ * @state: array of integers to each net device. A value
+ * different than zero means that the interface is
+ * in usage.
+ * @exit: flag to indicate when the device is being removed.
+ * @demux: pointer to &struct dmx_demux.
+ * @ioctl_mutex: protect access to this struct.
+ *
+ * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
+ * devices.
+ */
+
struct dvb_net {
struct dvb_device *dvbdev;
struct net_device *device[DVB_NET_DEVICES_MAX];
@@ -39,8 +55,22 @@ struct dvb_net {
struct mutex ioctl_mutex;
};
-void dvb_net_release(struct dvb_net *);
-int dvb_net_init(struct dvb_adapter *, struct dvb_net *, struct dmx_demux *);
+/**
+ * dvb_net_init - nitializes a digital TV network device and registers it.
+ *
+ * @adap: pointer to &struct dvb_adapter.
+ * @dvbnet: pointer to &struct dvb_net.
+ * @dmxdemux: pointer to &struct dmx_demux.
+ */
+int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet,
+ struct dmx_demux *dmxdemux);
+
+/**
+ * dvb_net_release - releases a digital TV network device and unregisters it.
+ *
+ * @dvbnet: pointer to &struct dvb_net.
+ */
+void dvb_net_release(struct dvb_net *dvbnet);
#else
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 2322af1b8742..53011629c9ad 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -66,12 +66,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
{
ssize_t free;
- /* ACCESS_ONCE() to load read pointer on writer side
+ /* READ_ONCE() to load read pointer on writer side
* this pairs with smp_store_release() in dvb_ringbuffer_read(),
* dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
* or dvb_ringbuffer_reset()
*/
- free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
+ free = READ_ONCE(rbuf->pread) - rbuf->pwrite;
if (free <= 0)
free += rbuf->size;
return free-1;
@@ -143,7 +143,7 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
@@ -168,7 +168,7 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 41aad0f99d73..060c60ddfcc3 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -51,8 +51,15 @@ static LIST_HEAD(dvb_adapter_list);
static DEFINE_MUTEX(dvbdev_register_lock);
static const char * const dnames[] = {
- "video", "audio", "sec", "frontend", "demux", "dvr", "ca",
- "net", "osd"
+ [DVB_DEVICE_VIDEO] = "video",
+ [DVB_DEVICE_AUDIO] = "audio",
+ [DVB_DEVICE_SEC] = "sec",
+ [DVB_DEVICE_FRONTEND] = "frontend",
+ [DVB_DEVICE_DEMUX] = "demux",
+ [DVB_DEVICE_DVR] = "dvr",
+ [DVB_DEVICE_CA] = "ca",
+ [DVB_DEVICE_NET] = "net",
+ [DVB_DEVICE_OSD] = "osd"
};
#ifdef CONFIG_DVB_DYNAMIC_MINORS
@@ -60,7 +67,22 @@ static const char * const dnames[] = {
#define DVB_MAX_IDS MAX_DVB_MINORS
#else
#define DVB_MAX_IDS 4
-#define nums2minor(num, type, id) ((num << 6) | (id << 4) | type)
+
+static const u8 minor_type[] = {
+ [DVB_DEVICE_VIDEO] = 0,
+ [DVB_DEVICE_AUDIO] = 1,
+ [DVB_DEVICE_SEC] = 2,
+ [DVB_DEVICE_FRONTEND] = 3,
+ [DVB_DEVICE_DEMUX] = 4,
+ [DVB_DEVICE_DVR] = 5,
+ [DVB_DEVICE_CA] = 6,
+ [DVB_DEVICE_NET] = 7,
+ [DVB_DEVICE_OSD] = 8,
+};
+
+#define nums2minor(num, type, id) \
+ (((num) << 6) | ((id) << 4) | minor_type[type])
+
#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64)
#endif
@@ -426,8 +448,8 @@ static int dvb_register_media_device(struct dvb_device *dvbdev,
}
int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
- const struct dvb_device *template, void *priv, int type,
- int demux_sink_pads)
+ const struct dvb_device *template, void *priv,
+ enum dvb_device_type type, int demux_sink_pads)
{
struct dvb_device *dvbdev;
struct file_operations *dvbdevfops;
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 49189392cf3b..bbc1c20c0529 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -35,15 +35,37 @@
#define DVB_UNSET (-1)
-#define DVB_DEVICE_VIDEO 0
-#define DVB_DEVICE_AUDIO 1
-#define DVB_DEVICE_SEC 2
-#define DVB_DEVICE_FRONTEND 3
-#define DVB_DEVICE_DEMUX 4
-#define DVB_DEVICE_DVR 5
-#define DVB_DEVICE_CA 6
-#define DVB_DEVICE_NET 7
-#define DVB_DEVICE_OSD 8
+/* List of DVB device types */
+
+/**
+ * enum dvb_device_type - type of the Digital TV device
+ *
+ * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI)
+ * @DVB_DEVICE_FRONTEND: Digital TV frontend.
+ * @DVB_DEVICE_DEMUX: Digital TV demux.
+ * @DVB_DEVICE_DVR: Digital TV digital video record (DVR).
+ * @DVB_DEVICE_CA: Digital TV Conditional Access (CA).
+ * @DVB_DEVICE_NET: Digital TV network.
+ *
+ * @DVB_DEVICE_VIDEO: Digital TV video decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_AUDIO: Digital TV audio decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD).
+ * Deprecated. Used only on av7110.
+ */
+enum dvb_device_type {
+ DVB_DEVICE_SEC,
+ DVB_DEVICE_FRONTEND,
+ DVB_DEVICE_DEMUX,
+ DVB_DEVICE_DVR,
+ DVB_DEVICE_CA,
+ DVB_DEVICE_NET,
+
+ DVB_DEVICE_VIDEO,
+ DVB_DEVICE_AUDIO,
+ DVB_DEVICE_OSD,
+};
#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \
static short adapter_nr[] = \
@@ -104,8 +126,7 @@ struct dvb_adapter {
* @list_head: List head with all DVB devices
* @fops: pointer to struct file_operations
* @adapter: pointer to the adapter that holds this device node
- * @type: type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
- * DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
+ * @type: type of the device, as defined by &enum dvb_device_type.
* @minor: devnode minor number. Major number is always DVB_MAJOR.
* @id: device ID number, inside the adapter
* @readers: Initialized by the caller. Each call to open() in Read Only mode
@@ -135,7 +156,7 @@ struct dvb_device {
struct list_head list_head;
const struct file_operations *fops;
struct dvb_adapter *adapter;
- int type;
+ enum dvb_device_type type;
int minor;
u32 id;
@@ -194,9 +215,7 @@ int dvb_unregister_adapter(struct dvb_adapter *adap);
* stored
* @template: Template used to create &pdvbdev;
* @priv: private data
- * @type: type of the device: %DVB_DEVICE_SEC, %DVB_DEVICE_FRONTEND,
- * %DVB_DEVICE_DEMUX, %DVB_DEVICE_DVR, %DVB_DEVICE_CA,
- * %DVB_DEVICE_NET
+ * @type: type of the device, as defined by &enum dvb_device_type.
* @demux_sink_pads: Number of demux outputs, to be used to create the TS
* outputs via the Media Controller.
*/
@@ -204,7 +223,7 @@ int dvb_register_device(struct dvb_adapter *adap,
struct dvb_device **pdvbdev,
const struct dvb_device *template,
void *priv,
- int type,
+ enum dvb_device_type type,
int demux_sink_pads);
/**
@@ -242,7 +261,7 @@ void dvb_unregister_device(struct dvb_device *dvbdev);
* dvb_create_media_graph - Creates media graph for the Digital TV part of the
* device.
*
- * @adap: pointer to struct dvb_adapter
+ * @adap: pointer to &struct dvb_adapter
* @create_rf_connector: if true, it creates the RF connector too
*
* This function checks all DVB-related functions at the media controller
@@ -255,12 +274,23 @@ void dvb_unregister_device(struct dvb_device *dvbdev);
__must_check int dvb_create_media_graph(struct dvb_adapter *adap,
bool create_rf_connector);
+/**
+ * dvb_register_media_controller - registers a media controller at DVB adapter
+ *
+ * @adap: pointer to &struct dvb_adapter
+ * @mdev: pointer to &struct media_device
+ */
static inline void dvb_register_media_controller(struct dvb_adapter *adap,
struct media_device *mdev)
{
adap->mdev = mdev;
}
+/**
+ * dvb_get_media_controller - gets the associated media controller
+ *
+ * @adap: pointer to &struct dvb_adapter
+ */
static inline struct media_device
*dvb_get_media_controller(struct dvb_adapter *adap)
{
@@ -277,20 +307,71 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
#define dvb_get_media_controller(a) NULL
#endif
-int dvb_generic_open (struct inode *inode, struct file *file);
-int dvb_generic_release (struct inode *inode, struct file *file);
-long dvb_generic_ioctl (struct file *file,
- unsigned int cmd, unsigned long arg);
+/**
+ * dvb_generic_open - Digital TV open function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and increment negative use count.
+ */
+int dvb_generic_open(struct inode *inode, struct file *file);
-/* we don't mess with video_usercopy() any more,
-we simply define out own dvb_usercopy(), which will hopefully become
-generic_usercopy() someday... */
+/**
+ * dvb_generic_close - Digital TV close function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and decrement negative use count.
+ */
+int dvb_generic_release(struct inode *inode, struct file *file);
+/**
+ * dvb_generic_ioctl - Digital TV close function, used by DVB devices
+ *
+ * @file: pointer to &struct file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ *
+ * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid.
+ * If so, calls dvb_usercopy().
+ */
+long dvb_generic_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+/**
+ * dvb_usercopy - copies data from/to userspace memory when an ioctl is
+ * issued.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ * @func: function that will actually handle the ioctl
+ *
+ * Ancillary function that uses ioctl direction and size to copy from
+ * userspace. Then, it calls @func, and, if needed, data is copied back
+ * to userspace.
+ */
int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
int (*func)(struct file *file, unsigned int cmd, void *arg));
/** generic DVB attach function. */
#ifdef CONFIG_MEDIA_ATTACH
+
+/**
+ * dvb_attach - attaches a DVB frontend into the DVB core.
+ *
+ * @FUNCTION: function on a frontend module to be called.
+ * @ARGS...: @FUNCTION arguments.
+ *
+ * This ancillary function loads a frontend module in runtime and runs
+ * the @FUNCTION function there, with @ARGS.
+ * As it increments symbol usage cont, at unregister, dvb_detach()
+ * should be called.
+ */
#define dvb_attach(FUNCTION, ARGS...) ({ \
void *__r = NULL; \
typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
@@ -304,6 +385,14 @@ int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
__r; \
})
+/**
+ * dvb_detach - detaches a DVB frontend loaded via dvb_attach()
+ *
+ * @FUNC: attach function
+ *
+ * Decrements usage count for a function previously called via dvb_attach().
+ */
+
#define dvb_detach(FUNC) symbol_put_addr(FUNC)
#else
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 2631d0e0a024..d17722eb4456 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -173,7 +173,7 @@ config DVB_STB6000
tristate "ST STB6000 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
- help
+ help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_STV0299
@@ -187,7 +187,7 @@ config DVB_STV6110
tristate "ST STV6110 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
- help
+ help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_STV0900
@@ -902,7 +902,7 @@ config DVB_HELENE
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ Say Y when you want to support this frontend.
comment "Tools to develop new frontends"
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 98d575f2744c..b1c84ee914f0 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -455,11 +455,10 @@ struct dvb_frontend *as102_attach(const char *name,
struct as102_state *state;
struct dvb_frontend *fe;
- state = kzalloc(sizeof(struct as102_state), GFP_KERNEL);
- if (state == NULL) {
- pr_err("%s: unable to allocate memory for state\n", __func__);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
return NULL;
- }
+
fe = &state->frontend;
fe->demodulator_priv = state;
state->ops = ops;
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 0118c2658cf7..ee1f704f81f2 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -552,13 +552,11 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
const struct cx24113_config *config, struct i2c_adapter *i2c)
{
/* allocate memory for the internal state */
- struct cx24113_state *state =
- kzalloc(sizeof(struct cx24113_state), GFP_KERNEL);
+ struct cx24113_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
int rc;
- if (state == NULL) {
- cx_err("Unable to kzalloc\n");
- goto error;
- }
+
+ if (!state)
+ return NULL;
/* setup the state */
state->config = config;
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index e105532bfba8..8fb3f095e21c 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -221,16 +221,13 @@ static int cx24116_writereg(struct cx24116_state *state, int reg, int data)
static int cx24116_writeregN(struct cx24116_state *state, int reg,
const u8 *data, u16 len)
{
- int ret = -EREMOTEIO;
+ int ret;
struct i2c_msg msg;
u8 *buf;
buf = kmalloc(len + 1, GFP_KERNEL);
- if (buf == NULL) {
- printk("Unable to kmalloc\n");
- ret = -ENOMEM;
- goto error;
- }
+ if (!buf)
+ return -ENOMEM;
*(buf) = reg;
memcpy(buf + 1, data, len);
@@ -251,7 +248,6 @@ static int cx24116_writeregN(struct cx24116_state *state, int reg,
ret = -EREMOTEIO;
}
-error:
kfree(buf);
return ret;
@@ -1121,15 +1117,15 @@ static const struct dvb_frontend_ops cx24116_ops;
struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
struct i2c_adapter *i2c)
{
- struct cx24116_state *state = NULL;
+ struct cx24116_state *state;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct cx24116_state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
- goto error1;
+ return NULL;
state->config = config;
state->i2c = i2c;
@@ -1138,8 +1134,9 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
ret = (cx24116_readreg(state, 0xFF) << 8) |
cx24116_readreg(state, 0xFE);
if (ret != 0x0501) {
+ kfree(state);
printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n");
- goto error2;
+ return NULL;
}
/* create dvb_frontend */
@@ -1147,9 +1144,6 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
-
-error2: kfree(state);
-error1: return NULL;
}
EXPORT_SYMBOL(cx24116_attach);
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 7d04400b18dd..0696bc62dcc9 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -328,7 +328,7 @@ static int WriteTable(struct drxd_state *state, u8 * pTable)
{
int status = 0;
- if (pTable == NULL)
+ if (!pTable)
return 0;
while (!status) {
@@ -640,7 +640,7 @@ static int SetCfgIfAgc(struct drxd_state *state, struct SCfgAgc *cfg)
const u16 maxRur = 8;
static const u16 slowIncrDecLUT[] = {
3, 4, 4, 5, 6 };
- const u16 fastIncrDecLUT[] = {
+ static const u16 fastIncrDecLUT[] = {
14, 15, 15, 16,
17, 18, 18, 19,
20, 21, 22, 23,
@@ -909,9 +909,8 @@ static int load_firmware(struct drxd_state *state, const char *fw_name)
}
state->microcode = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (state->microcode == NULL) {
+ if (!state->microcode) {
release_firmware(fw);
- printk(KERN_ERR "drxd: firmware load failure: no memory\n");
return -ENOMEM;
}
@@ -2630,7 +2629,7 @@ static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
break;
/* Apply I2c address patch to B1 */
- if (!state->type_A && state->m_HiI2cPatch != NULL) {
+ if (!state->type_A && state->m_HiI2cPatch) {
status = WriteTable(state, state->m_HiI2cPatch);
if (status < 0)
break;
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 0b17a45c5640..bd4f8278c906 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -277,10 +277,8 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
u8 *buf;
buf = kmalloc(33, GFP_KERNEL);
- if (buf == NULL) {
- printk(KERN_ERR "Unable to kmalloc\n");
+ if (!buf)
return -ENOMEM;
- }
*(buf) = reg;
@@ -835,17 +833,15 @@ static const struct dvb_frontend_ops ds3000_ops;
struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
struct i2c_adapter *i2c)
{
- struct ds3000_state *state = NULL;
+ struct ds3000_state *state;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct ds3000_state), GFP_KERNEL);
- if (state == NULL) {
- printk(KERN_ERR "Unable to kmalloc\n");
- goto error2;
- }
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
state->config = config;
state->i2c = i2c;
@@ -854,8 +850,9 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
/* check if the demod is present */
ret = ds3000_readreg(state, 0x00) & 0xfe;
if (ret != 0xe0) {
+ kfree(state);
printk(KERN_ERR "Invalid probe, probably not a DS3000\n");
- goto error3;
+ return NULL;
}
printk(KERN_INFO "DS3000 chip version: %d.%d attached.\n",
@@ -873,11 +870,6 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
*/
ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
return &state->frontend;
-
-error3:
- kfree(state);
-error2:
- return NULL;
}
EXPORT_SYMBOL(ds3000_attach);
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 5798079add10..9854096839ae 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1048,16 +1048,6 @@ fail:
return ret;
}
-static int lg216x_get_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
- return (DTV_ATSCMH_FIC_VER == tvp->cmd) ?
- lg216x_get_frontend(fe, c) : 0;
-}
-
-
static int lg2160_set_frontend(struct dvb_frontend *fe)
{
struct lg216x_state *state = fe->demodulator_priv;
@@ -1368,8 +1358,6 @@ static const struct dvb_frontend_ops lg2160_ops = {
.init = lg216x_init,
.sleep = lg216x_sleep,
#endif
- .get_property = lg216x_get_property,
-
.set_frontend = lg2160_set_frontend,
.get_frontend = lg216x_get_frontend,
.get_tune_settings = lg216x_get_tune_settings,
@@ -1396,8 +1384,6 @@ static const struct dvb_frontend_ops lg2161_ops = {
.init = lg216x_init,
.sleep = lg216x_sleep,
#endif
- .get_property = lg216x_get_property,
-
.set_frontend = lg2160_set_frontend,
.get_frontend = lg216x_get_frontend,
.get_tune_settings = lg216x_get_tune_settings,
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index c9b1eb38444e..724e9aac0f11 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/div64.h>
+#include <linux/kernel.h>
#include <linux/dvb/frontend.h>
#include "dvb_math.h"
#include "lgdt3306a.h"
@@ -2072,7 +2073,7 @@ static const short regtab[] = {
0x30aa, /* MPEGLOCK */
};
-#define numDumpRegs (sizeof(regtab)/sizeof(regtab[0]))
+#define numDumpRegs (ARRAY_SIZE(regtab))
static u8 regval1[numDumpRegs] = {0, };
static u8 regval2[numDumpRegs] = {0, };
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index e8ac8c3e2ec0..bdaf9d235fed 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2071,12 +2071,9 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
dev_dbg(&i2c->dev, "%s called.\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL);
- if (state == NULL) {
- dev_err(&i2c->dev,
- "%s: unable to allocate memory for state\n", __func__);
- goto error;
- }
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
/* setup the state */
state->config = config;
@@ -2089,22 +2086,16 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
/* Check if it is a mb86a20s frontend */
rev = mb86a20s_readreg(state, 0);
-
- if (rev == 0x13) {
- dev_info(&i2c->dev,
- "Detected a Fujitsu mb86a20s frontend\n");
- } else {
+ if (rev != 0x13) {
+ kfree(state);
dev_dbg(&i2c->dev,
"Frontend revision %d is unknown - aborting.\n",
rev);
- goto error;
+ return NULL;
}
+ dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
return &state->frontend;
-
-error:
- kfree(state);
- return NULL;
}
EXPORT_SYMBOL(mb86a20s_attach);
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 676c96c216c3..53064e11f5f1 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -43,7 +43,7 @@
#define BYTE2(v) ((v >> 16) & 0xff)
#define BYTE3(v) ((v >> 24) & 0xff)
-LIST_HEAD(mxllist);
+static LIST_HEAD(mxllist);
struct mxl_base {
struct list_head mxllist;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 172fc367ccaa..41d9c513b7e8 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -696,7 +696,6 @@ static int si2168_probe(struct i2c_client *client,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 43d47dfcc7b8..53e66c232d3c 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -357,14 +357,14 @@ static int sp2_exit(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- if (client == NULL)
+ if (!client)
return 0;
s = i2c_get_clientdata(client);
- if (s == NULL)
+ if (!s)
return 0;
- if (s->ca.data == NULL)
+ if (!s->ca.data)
return 0;
dvb_ca_en50221_release(&s->ca);
@@ -381,10 +381,9 @@ static int sp2_probe(struct i2c_client *client,
dev_dbg(&client->dev, "\n");
- s = kzalloc(sizeof(struct sp2), GFP_KERNEL);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index 45cbc898ad25..67f91814b9f7 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -447,12 +447,6 @@ static int stv0288_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int stv0288_set_property(struct dvb_frontend *fe, struct dtv_property *p)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
static int stv0288_set_frontend(struct dvb_frontend *fe)
{
struct stv0288_state *state = fe->demodulator_priv;
@@ -567,7 +561,6 @@ static const struct dvb_frontend_ops stv0288_ops = {
.set_tone = stv0288_set_tone,
.set_voltage = stv0288_set_voltage,
- .set_property = stv0288_set_property,
.set_frontend = stv0288_set_frontend,
};
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index e4fd9c1b0560..6aad0efa3174 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -258,11 +258,9 @@ static int stv6110_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency)
{
struct stv6110_priv *priv = fe->tuner_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 ret = 0x04;
u32 divider, ref, p, presc, i, result_freq, vco_freq;
s32 p_calc, p_calc_opt = 1000, r_div, r_div_opt = 0, p_val;
- s32 srate;
dprintk("%s, freq=%d kHz, mclk=%d Hz\n", __func__,
frequency, priv->mclk);
@@ -273,13 +271,6 @@ static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency)
((((priv->mclk / 1000000) - 16) & 0x1f) << 3);
/* BB_GAIN = db/2 */
- if (fe->ops.set_property && fe->ops.get_property) {
- srate = c->symbol_rate;
- dprintk("%s: Get Frontend parameters: srate=%d\n",
- __func__, srate);
- } else
- srate = 15000000;
-
priv->regs[RSTV6110_CTRL2] &= ~0x0f;
priv->regs[RSTV6110_CTRL2] |= (priv->gain & 0x0f);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 94153895fcd4..3c6d6428f525 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -354,6 +354,14 @@ config VIDEO_TC358743
To compile this driver as a module, choose M here: the
module will be called tc358743.
+config VIDEO_TC358743_CEC
+ bool "Enable Toshiba TC358743 CEC support"
+ depends on VIDEO_TC358743
+ select CEC_CORE
+ ---help---
+ When selected the tc358743 will support the optional
+ HDMI CEC feature.
+
config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
@@ -547,6 +555,14 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+config VIDEO_IMX274
+ tristate "Sony IMX274 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a V4L2 sensor-level driver for the Sony IMX274
+ CMOS image sensor.
+
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index f104650d6000..548a9efce966 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -93,5 +93,6 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_SDR_MAX2175) += max2175.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 3df28f2f9b38..6fb818a775db 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1328,7 +1328,7 @@ static int adv7180_probe(struct i2c_client *client,
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
ret = adv7180_init_controls(state);
if (ret)
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index b33ccfc08708..4aa8e45b5cd3 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -217,6 +217,7 @@ static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int afe_std;
int ret;
mutex_lock(&state->mutex);
@@ -235,8 +236,12 @@ static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
/* Read detected standard */
ret = adv748x_afe_status(afe, NULL, std);
+ afe_std = adv748x_afe_std(afe->curr_norm);
+ if (afe_std < 0)
+ goto unlock;
+
/* Restore original state */
- adv748x_afe_set_video_standard(state, afe->curr_norm);
+ adv748x_afe_set_video_standard(state, afe_std);
unlock:
mutex_unlock(&state->mutex);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f289b8aca1da..c786cd125417 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1948,7 +1948,7 @@ static int adv76xx_set_format(struct v4l2_subdev *sd,
return -EINVAL;
info = adv76xx_format_info(state, format->format.code);
- if (info == NULL)
+ if (!info)
info = adv76xx_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
adv76xx_fill_format(state, &format->format);
@@ -2256,7 +2256,7 @@ static int adv76xx_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
return 0;
}
- if (data == NULL)
+ if (!data)
return -ENODATA;
if (edid->start_block >= state->edid.blocks)
@@ -3316,10 +3316,8 @@ static int adv76xx_probe(struct i2c_client *client,
client->addr << 1);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
- if (!state) {
- v4l_err(client, "Could not allocate adv76xx_state memory!\n");
+ if (!state)
return -ENOMEM;
- }
state->i2c_clients[ADV76XX_PAGE_IO] = client;
@@ -3482,7 +3480,7 @@ static int adv76xx_probe(struct i2c_client *client,
state->i2c_clients[i] =
adv76xx_dummy_client(sd, state->pdata.i2c_addresses[i],
0xf2 + i);
- if (state->i2c_clients[i] == NULL) {
+ if (!state->i2c_clients[i]) {
err = -ENOMEM;
v4l2_err(sd, "failed to create i2c client %u\n", i);
goto err_i2c;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 65f34e7e146f..136aa80a834b 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3467,11 +3467,9 @@ static int adv7842_probe(struct i2c_client *client,
return -ENODEV;
}
- state = devm_kzalloc(&client->dev, sizeof(struct adv7842_state), GFP_KERNEL);
- if (!state) {
- v4l_err(client, "Could not allocate adv7842_state memory!\n");
+ state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
return -ENOMEM;
- }
/* platform data */
state->pdata = *pdata;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 39f51daa7558..f38bf819d805 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -1745,7 +1745,7 @@ static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- v4l2_std_id stds[] = {
+ static const v4l2_std_id stds[] = {
/* 0000 */ V4L2_STD_UNKNOWN,
/* 0001 */ V4L2_STD_NTSC_M,
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 95af4fc99cd0..ed01e8bd4331 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -21,6 +21,11 @@
#define DW9714_NAME "dw9714"
#define DW9714_MAX_FOCUS_POS 1023
/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position
+ */
+#define DW9714_FOCUS_STEPS 1
+/*
* This acts as the minimum granularity of lens movement.
* Keep this value power of 2, so the control steps can be
* uniformly adjusted for gradual lens movement, with desired
@@ -137,7 +142,7 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
v4l2_ctrl_handler_init(hdl, 1);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
- 0, DW9714_MAX_FOCUS_POS, DW9714_CTRL_STEPS, 0);
+ 0, DW9714_MAX_FOCUS_POS, DW9714_FOCUS_STEPS, 0);
if (hdl->error)
dev_err(&client->dev, "%s fail error: 0x%x\n",
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index c14f0fd6ded3..e9eff9039ef5 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1453,7 +1453,7 @@ static int et8ek8_probe(struct i2c_client *client,
goto err_mutex;
}
- ret = v4l2_async_register_subdev(&sensor->subdev);
+ ret = v4l2_async_register_subdev_sensor_common(&sensor->subdev);
if (ret < 0)
goto err_entity;
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
new file mode 100644
index 000000000000..800b9bf9cdd3
--- /dev/null
+++ b/drivers/media/i2c/imx274.c
@@ -0,0 +1,1811 @@
+/*
+ * imx274.c - IMX274 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2017, Leopard Imaging, Inc.
+ *
+ * Leon Luo <leonl@leopardimaging.com>
+ * Edwin Zou <edwinz@leopardimaging.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * See "SHR, SVR Setting" in datasheet
+ */
+#define IMX274_DEFAULT_FRAME_LENGTH (4550)
+#define IMX274_MAX_FRAME_LENGTH (0x000fffff)
+
+/*
+ * See "Frame Rate Adjustment" in datasheet
+ */
+#define IMX274_PIXCLK_CONST1 (72000000)
+#define IMX274_PIXCLK_CONST2 (1000000)
+
+/*
+ * The input gain is shifted by IMX274_GAIN_SHIFT to get
+ * decimal number. The real gain is
+ * (float)input_gain_value / (1 << IMX274_GAIN_SHIFT)
+ */
+#define IMX274_GAIN_SHIFT (8)
+#define IMX274_GAIN_SHIFT_MASK ((1 << IMX274_GAIN_SHIFT) - 1)
+
+/*
+ * See "Analog Gain" and "Digital Gain" in datasheet
+ * min gain is 1X
+ * max gain is calculated based on IMX274_GAIN_REG_MAX
+ */
+#define IMX274_GAIN_REG_MAX (1957)
+#define IMX274_MIN_GAIN (0x01 << IMX274_GAIN_SHIFT)
+#define IMX274_MAX_ANALOG_GAIN ((2048 << IMX274_GAIN_SHIFT)\
+ / (2048 - IMX274_GAIN_REG_MAX))
+#define IMX274_MAX_DIGITAL_GAIN (8)
+#define IMX274_DEF_GAIN (20 << IMX274_GAIN_SHIFT)
+#define IMX274_GAIN_CONST (2048) /* for gain formula */
+
+/*
+ * 1 line time in us = (HMAX / 72), minimal is 4 lines
+ */
+#define IMX274_MIN_EXPOSURE_TIME (4 * 260 / 72)
+
+#define IMX274_DEFAULT_MODE IMX274_MODE_3840X2160
+#define IMX274_MAX_WIDTH (3840)
+#define IMX274_MAX_HEIGHT (2160)
+#define IMX274_MAX_FRAME_RATE (120)
+#define IMX274_MIN_FRAME_RATE (5)
+#define IMX274_DEF_FRAME_RATE (60)
+
+/*
+ * register SHR is limited to (SVR value + 1) x VMAX value - 4
+ */
+#define IMX274_SHR_LIMIT_CONST (4)
+
+/*
+ * Constants for sensor reset delay
+ */
+#define IMX274_RESET_DELAY1 (2000)
+#define IMX274_RESET_DELAY2 (2200)
+
+/*
+ * shift and mask constants
+ */
+#define IMX274_SHIFT_8_BITS (8)
+#define IMX274_SHIFT_16_BITS (16)
+#define IMX274_MASK_LSB_2_BITS (0x03)
+#define IMX274_MASK_LSB_3_BITS (0x07)
+#define IMX274_MASK_LSB_4_BITS (0x0f)
+#define IMX274_MASK_LSB_8_BITS (0x00ff)
+
+#define DRIVER_NAME "IMX274"
+
+/*
+ * IMX274 register definitions
+ */
+#define IMX274_FRAME_LENGTH_ADDR_1 0x30FA /* VMAX, MSB */
+#define IMX274_FRAME_LENGTH_ADDR_2 0x30F9 /* VMAX */
+#define IMX274_FRAME_LENGTH_ADDR_3 0x30F8 /* VMAX, LSB */
+#define IMX274_SVR_REG_MSB 0x300F /* SVR */
+#define IMX274_SVR_REG_LSB 0x300E /* SVR */
+#define IMX274_HMAX_REG_MSB 0x30F7 /* HMAX */
+#define IMX274_HMAX_REG_LSB 0x30F6 /* HMAX */
+#define IMX274_COARSE_TIME_ADDR_MSB 0x300D /* SHR */
+#define IMX274_COARSE_TIME_ADDR_LSB 0x300C /* SHR */
+#define IMX274_ANALOG_GAIN_ADDR_LSB 0x300A /* ANALOG GAIN LSB */
+#define IMX274_ANALOG_GAIN_ADDR_MSB 0x300B /* ANALOG GAIN MSB */
+#define IMX274_DIGITAL_GAIN_REG 0x3012 /* Digital Gain */
+#define IMX274_VFLIP_REG 0x301A /* VERTICAL FLIP */
+#define IMX274_TEST_PATTERN_REG 0x303D /* TEST PATTERN */
+#define IMX274_STANDBY_REG 0x3000 /* STANDBY */
+
+#define IMX274_TABLE_WAIT_MS 0
+#define IMX274_TABLE_END 1
+
+/*
+ * imx274 I2C operation related structure
+ */
+struct reg_8 {
+ u16 addr;
+ u8 val;
+};
+
+static const struct regmap_config imx274_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+enum imx274_mode {
+ IMX274_MODE_3840X2160,
+ IMX274_MODE_1920X1080,
+ IMX274_MODE_1280X720,
+
+ IMX274_MODE_START_STREAM_1,
+ IMX274_MODE_START_STREAM_2,
+ IMX274_MODE_START_STREAM_3,
+ IMX274_MODE_START_STREAM_4,
+ IMX274_MODE_STOP_STREAM
+};
+
+/*
+ * imx274 format related structure
+ */
+struct imx274_frmfmt {
+ u32 mbus_code;
+ enum v4l2_colorspace colorspace;
+ struct v4l2_frmsize_discrete size;
+ enum imx274_mode mode;
+};
+
+/*
+ * imx274 test pattern related structure
+ */
+enum {
+ TEST_PATTERN_DISABLED = 0,
+ TEST_PATTERN_ALL_000H,
+ TEST_PATTERN_ALL_FFFH,
+ TEST_PATTERN_ALL_555H,
+ TEST_PATTERN_ALL_AAAH,
+ TEST_PATTERN_VSP_5AH, /* VERTICAL STRIPE PATTERN 555H/AAAH */
+ TEST_PATTERN_VSP_A5H, /* VERTICAL STRIPE PATTERN AAAH/555H */
+ TEST_PATTERN_VSP_05H, /* VERTICAL STRIPE PATTERN 000H/555H */
+ TEST_PATTERN_VSP_50H, /* VERTICAL STRIPE PATTERN 555H/000H */
+ TEST_PATTERN_VSP_0FH, /* VERTICAL STRIPE PATTERN 000H/FFFH */
+ TEST_PATTERN_VSP_F0H, /* VERTICAL STRIPE PATTERN FFFH/000H */
+ TEST_PATTERN_H_COLOR_BARS,
+ TEST_PATTERN_V_COLOR_BARS,
+};
+
+static const char * const tp_qmenu[] = {
+ "Disabled",
+ "All 000h Pattern",
+ "All FFFh Pattern",
+ "All 555h Pattern",
+ "All AAAh Pattern",
+ "Vertical Stripe (555h / AAAh)",
+ "Vertical Stripe (AAAh / 555h)",
+ "Vertical Stripe (000h / 555h)",
+ "Vertical Stripe (555h / 000h)",
+ "Vertical Stripe (000h / FFFh)",
+ "Vertical Stripe (FFFh / 000h)",
+ "Horizontal Color Bars",
+ "Vertical Color Bars",
+};
+
+/*
+ * All-pixel scan mode (10-bit)
+ * imx274 mode1(refer to datasheet) register configuration with
+ * 3840x2160 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode1_3840x2160_raw10[] = {
+ {0x3004, 0x01},
+ {0x3005, 0x01},
+ {0x3006, 0x00},
+ {0x3007, 0x02},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x01},
+ {0x30F6, 0x07}, /* HMAX, 263 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30dd, 0x01}, /* crop to 2160 */
+ {0x30de, 0x06},
+ {0x30df, 0x00},
+ {0x30e0, 0x12},
+ {0x30e1, 0x00},
+ {0x3037, 0x01}, /* to crop to 3840 */
+ {0x3038, 0x0c},
+ {0x3039, 0x00},
+ {0x303a, 0x0c},
+ {0x303b, 0x0f},
+
+ {0x30EE, 0x01},
+ {0x3130, 0x86},
+ {0x3131, 0x08},
+ {0x3132, 0x7E},
+ {0x3133, 0x08},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x16},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x1F},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x1A},
+ {0x366C, 0x19},
+ {0x366D, 0x17},
+ {0x3A41, 0x08},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * Horizontal/vertical 2/2-line binning
+ * (Horizontal and vertical weightedbinning, 10-bit)
+ * imx274 mode3(refer to datasheet) register configuration with
+ * 1920x1080 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode3_1920x1080_raw10[] = {
+ {0x3004, 0x02},
+ {0x3005, 0x21},
+ {0x3006, 0x00},
+ {0x3007, 0x11},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x02},
+
+ {0x30F6, 0x04}, /* HMAX, 260 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30dd, 0x01}, /* to crop to 1920x1080 */
+ {0x30de, 0x05},
+ {0x30df, 0x00},
+ {0x30e0, 0x04},
+ {0x30e1, 0x00},
+ {0x3037, 0x01},
+ {0x3038, 0x0c},
+ {0x3039, 0x00},
+ {0x303a, 0x0c},
+ {0x303b, 0x0f},
+
+ {0x30EE, 0x01},
+ {0x3130, 0x4E},
+ {0x3131, 0x04},
+ {0x3132, 0x46},
+ {0x3133, 0x04},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x1A},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x00},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x1A},
+ {0x366C, 0x19},
+ {0x366D, 0x17},
+ {0x3A41, 0x08},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * Vertical 2/3 subsampling binning horizontal 3 binning
+ * imx274 mode5(refer to datasheet) register configuration with
+ * 1280x720 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode5_1280x720_raw10[] = {
+ {0x3004, 0x03},
+ {0x3005, 0x31},
+ {0x3006, 0x00},
+ {0x3007, 0x09},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x03},
+
+ {0x30F6, 0x04}, /* HMAX, 260 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30DD, 0x01},
+ {0x30DE, 0x07},
+ {0x30DF, 0x00},
+ {0x40E0, 0x04},
+ {0x30E1, 0x00},
+ {0x3030, 0xD4},
+ {0x3031, 0x02},
+ {0x3032, 0xD0},
+ {0x3033, 0x02},
+
+ {0x30EE, 0x01},
+ {0x3130, 0xE2},
+ {0x3131, 0x02},
+ {0x3132, 0xDE},
+ {0x3133, 0x02},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x1B},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x00},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x19},
+ {0x366C, 0x17},
+ {0x366D, 0x17},
+ {0x3A41, 0x04},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 first step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_1[] = {
+ {IMX274_STANDBY_REG, 0x12},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 second step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_2[] = {
+ {0x3120, 0xF0}, /* clock settings */
+ {0x3121, 0x00}, /* clock settings */
+ {0x3122, 0x02}, /* clock settings */
+ {0x3129, 0x9C}, /* clock settings */
+ {0x312A, 0x02}, /* clock settings */
+ {0x312D, 0x02}, /* clock settings */
+
+ {0x310B, 0x00},
+
+ /* PLSTMG */
+ {0x304C, 0x00}, /* PLSTMG01 */
+ {0x304D, 0x03},
+ {0x331C, 0x1A},
+ {0x331D, 0x00},
+ {0x3502, 0x02},
+ {0x3529, 0x0E},
+ {0x352A, 0x0E},
+ {0x352B, 0x0E},
+ {0x3538, 0x0E},
+ {0x3539, 0x0E},
+ {0x3553, 0x00},
+ {0x357D, 0x05},
+ {0x357F, 0x05},
+ {0x3581, 0x04},
+ {0x3583, 0x76},
+ {0x3587, 0x01},
+ {0x35BB, 0x0E},
+ {0x35BC, 0x0E},
+ {0x35BD, 0x0E},
+ {0x35BE, 0x0E},
+ {0x35BF, 0x0E},
+ {0x366E, 0x00},
+ {0x366F, 0x00},
+ {0x3670, 0x00},
+ {0x3671, 0x00},
+
+ /* PSMIPI */
+ {0x3304, 0x32}, /* PSMIPI1 */
+ {0x3305, 0x00},
+ {0x3306, 0x32},
+ {0x3307, 0x00},
+ {0x3590, 0x32},
+ {0x3591, 0x00},
+ {0x3686, 0x32},
+ {0x3687, 0x00},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 third step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_3[] = {
+ {IMX274_STANDBY_REG, 0x00},
+ {0x303E, 0x02}, /* SYS_MODE = 2 */
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 forth step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_4[] = {
+ {0x30F4, 0x00},
+ {0x3018, 0xA2}, /* XHS VHS OUTUPT */
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 register configuration for stoping stream
+ */
+static const struct reg_8 imx274_stop[] = {
+ {IMX274_STANDBY_REG, 0x01},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 disable test pattern register configuration
+ */
+static const struct reg_8 imx274_tp_disabled[] = {
+ {0x303C, 0x00},
+ {0x377F, 0x00},
+ {0x3781, 0x00},
+ {0x370B, 0x00},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 test pattern register configuration
+ * reg 0x303D defines the test pattern modes
+ */
+static const struct reg_8 imx274_tp_regs[] = {
+ {0x303C, 0x11},
+ {0x370E, 0x01},
+ {0x377F, 0x01},
+ {0x3781, 0x01},
+ {0x370B, 0x11},
+ {IMX274_TABLE_END, 0x00}
+};
+
+static const struct reg_8 *mode_table[] = {
+ [IMX274_MODE_3840X2160] = imx274_mode1_3840x2160_raw10,
+ [IMX274_MODE_1920X1080] = imx274_mode3_1920x1080_raw10,
+ [IMX274_MODE_1280X720] = imx274_mode5_1280x720_raw10,
+
+ [IMX274_MODE_START_STREAM_1] = imx274_start_1,
+ [IMX274_MODE_START_STREAM_2] = imx274_start_2,
+ [IMX274_MODE_START_STREAM_3] = imx274_start_3,
+ [IMX274_MODE_START_STREAM_4] = imx274_start_4,
+ [IMX274_MODE_STOP_STREAM] = imx274_stop,
+};
+
+/*
+ * imx274 format related structure
+ */
+static const struct imx274_frmfmt imx274_formats[] = {
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {3840, 2160},
+ IMX274_MODE_3840X2160},
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {1920, 1080},
+ IMX274_MODE_1920X1080},
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {1280, 720},
+ IMX274_MODE_1280X720},
+};
+
+/*
+ * minimal frame length for each mode
+ * refer to datasheet section "Frame Rate Adjustment (CSI-2)"
+ */
+static const int min_frame_len[] = {
+ 4550, /* mode 1, 4K */
+ 2310, /* mode 3, 1080p */
+ 2310 /* mode 5, 720p */
+};
+
+/*
+ * minimal numbers of SHR register
+ * refer to datasheet table "Shutter Setting (CSI-2)"
+ */
+static const int min_SHR[] = {
+ 12, /* mode 1, 4K */
+ 8, /* mode 3, 1080p */
+ 8 /* mode 5, 720p */
+};
+
+static const int max_frame_rate[] = {
+ 60, /* mode 1 , 4K */
+ 120, /* mode 3, 1080p */
+ 120 /* mode 5, 720p */
+};
+
+/*
+ * Number of clocks per internal offset period
+ * a constant based on mode
+ * refer to section "Integration Time in Each Readout Drive Mode (CSI-2)"
+ * in the datasheet
+ * for the implemented 3 modes, it happens to be the same number
+ */
+static const int nocpiop[] = {
+ 112, /* mode 1 , 4K */
+ 112, /* mode 3, 1080p */
+ 112 /* mode 5, 720p */
+};
+
+/*
+ * struct imx274_ctrls - imx274 ctrl structure
+ * @handler: V4L2 ctrl handler structure
+ * @exposure: Pointer to expsure ctrl structure
+ * @gain: Pointer to gain ctrl structure
+ * @vflip: Pointer to vflip ctrl structure
+ * @test_pattern: Pointer to test pattern ctrl structure
+ */
+struct imx274_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *test_pattern;
+};
+
+/*
+ * struct stim274 - imx274 device structure
+ * @sd: V4L2 subdevice structure
+ * @pd: Media pad structure
+ * @client: Pointer to I2C client
+ * @ctrls: imx274 control structure
+ * @format: V4L2 media bus frame format structure
+ * @frame_rate: V4L2 frame rate structure
+ * @regmap: Pointer to regmap structure
+ * @reset_gpio: Pointer to reset gpio
+ * @lock: Mutex structure
+ * @mode_index: Resolution mode index
+ */
+struct stimx274 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct i2c_client *client;
+ struct imx274_ctrls ctrls;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_fract frame_interval;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct mutex lock; /* mutex lock for operations */
+ u32 mode_index;
+};
+
+/*
+ * Function declaration
+ */
+static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl);
+static int imx274_set_exposure(struct stimx274 *priv, int val);
+static int imx274_set_vflip(struct stimx274 *priv, int val);
+static int imx274_set_test_pattern(struct stimx274 *priv, int val);
+static int imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval);
+
+static inline void msleep_range(unsigned int delay_base)
+{
+ usleep_range(delay_base * 1000, delay_base * 1000 + 500);
+}
+
+/*
+ * v4l2_ctrl and v4l2_subdev related operations
+ */
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler,
+ struct stimx274, ctrls.handler)->sd;
+}
+
+static inline struct stimx274 *to_imx274(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct stimx274, sd);
+}
+
+/*
+ * imx274_regmap_util_write_table_8 - Function for writing register table
+ * @regmap: Pointer to device reg map structure
+ * @table: Table containing register values
+ * @wait_ms_addr: Flag for performing delay
+ * @end_addr: Flag for incating end of table
+ *
+ * This is used to write register table into sensor's reg map.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_regmap_util_write_table_8(struct regmap *regmap,
+ const struct reg_8 table[],
+ u16 wait_ms_addr, u16 end_addr)
+{
+ int err;
+ const struct reg_8 *next;
+ u8 val;
+
+ int range_start = -1;
+ int range_count = 0;
+ u8 range_vals[16];
+ int max_range_vals = ARRAY_SIZE(range_vals);
+
+ for (next = table;; next++) {
+ if ((next->addr != range_start + range_count) ||
+ (next->addr == end_addr) ||
+ (next->addr == wait_ms_addr) ||
+ (range_count == max_range_vals)) {
+ if (range_count == 1)
+ err = regmap_write(regmap,
+ range_start, range_vals[0]);
+ else if (range_count > 1)
+ err = regmap_bulk_write(regmap, range_start,
+ &range_vals[0],
+ range_count);
+
+ if (err)
+ return err;
+
+ range_start = -1;
+ range_count = 0;
+
+ /* Handle special address values */
+ if (next->addr == end_addr)
+ break;
+
+ if (next->addr == wait_ms_addr) {
+ msleep_range(next->val);
+ continue;
+ }
+ }
+
+ val = next->val;
+
+ if (range_start == -1)
+ range_start = next->addr;
+
+ range_vals[range_count++] = val;
+ }
+ return 0;
+}
+
+static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
+{
+ int err;
+
+ err = regmap_read(priv->regmap, addr, (unsigned int *)val);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c read failed, addr = %x\n", __func__, addr);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x\n", __func__,
+ addr, *val);
+ return err;
+}
+
+static inline int imx274_write_reg(struct stimx274 *priv, u16 addr, u8 val)
+{
+ int err;
+
+ err = regmap_write(priv->regmap, addr, val);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c write failed, %x = %x\n", __func__,
+ addr, val);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x\n", __func__,
+ addr, val);
+ return err;
+}
+
+static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
+{
+ return imx274_regmap_util_write_table_8(priv->regmap,
+ table, IMX274_TABLE_WAIT_MS, IMX274_TABLE_END);
+}
+
+/*
+ * imx274_mode_regs - Function for set mode registers per mode index
+ * @priv: Pointer to device structure
+ * @mode: Mode index value
+ *
+ * This is used to start steam per mode index.
+ * mode = 0, start stream for sensor Mode 1: 4K/raw10
+ * mode = 1, start stream for sensor Mode 3: 1080p/raw10
+ * mode = 2, start stream for sensor Mode 5: 720p/raw10
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_mode_regs(struct stimx274 *priv, int mode)
+{
+ int err = 0;
+
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_1]);
+ if (err)
+ return err;
+
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_2]);
+ if (err)
+ return err;
+
+ err = imx274_write_table(priv, mode_table[mode]);
+
+ return err;
+}
+
+/*
+ * imx274_start_stream - Function for starting stream per mode index
+ * @priv: Pointer to device structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_start_stream(struct stimx274 *priv)
+{
+ int err = 0;
+
+ /*
+ * Refer to "Standby Cancel Sequence when using CSI-2" in
+ * imx274 datasheet, it should wait 10ms or more here.
+ * give it 1 extra ms for margin
+ */
+ msleep_range(11);
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_3]);
+ if (err)
+ return err;
+
+ /*
+ * Refer to "Standby Cancel Sequence when using CSI-2" in
+ * imx274 datasheet, it should wait 7ms or more here.
+ * give it 1 extra ms for margin
+ */
+ msleep_range(8);
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_4]);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * imx274_reset - Function called to reset the sensor
+ * @priv: Pointer to device structure
+ * @rst: Input value for determining the sensor's end state after reset
+ *
+ * Set the senor in reset and then
+ * if rst = 0, keep it in reset;
+ * if rst = 1, bring it out of reset.
+ *
+ */
+static void imx274_reset(struct stimx274 *priv, int rst)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(IMX274_RESET_DELAY1, IMX274_RESET_DELAY2);
+ gpiod_set_value_cansleep(priv->reset_gpio, !!rst);
+ usleep_range(IMX274_RESET_DELAY1, IMX274_RESET_DELAY2);
+}
+
+/**
+ * imx274_s_ctrl - This is used to set the imx274 V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the imx274 sensor.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct stimx274 *imx274 = to_imx274(sd);
+ int ret = -EINVAL;
+
+ dev_dbg(&imx274->client->dev,
+ "%s : s_ctrl: %s, value: %d\n", __func__,
+ ctrl->name, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_EXPOSURE\n", __func__);
+ ret = imx274_set_exposure(imx274, ctrl->val);
+ break;
+
+ case V4L2_CID_GAIN:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_GAIN\n", __func__);
+ ret = imx274_set_gain(imx274, ctrl);
+ break;
+
+ case V4L2_CID_VFLIP:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_VFLIP\n", __func__);
+ ret = imx274_set_vflip(imx274, ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_TEST_PATTERN\n", __func__);
+ ret = imx274_set_test_pattern(imx274, ctrl->val);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * imx274_get_fmt - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int imx274_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ mutex_lock(&imx274->lock);
+ fmt->format = imx274->format;
+ mutex_unlock(&imx274->lock);
+ return 0;
+}
+
+/**
+ * imx274_set_fmt - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @format: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct stimx274 *imx274 = to_imx274(sd);
+ struct i2c_client *client = imx274->client;
+ int index;
+
+ dev_dbg(&client->dev,
+ "%s: width = %d height = %d code = %d mbus_code = %d\n",
+ __func__, fmt->width, fmt->height, fmt->code,
+ imx274_formats[imx274->mode_index].mbus_code);
+
+ mutex_lock(&imx274->lock);
+
+ for (index = 0; index < ARRAY_SIZE(imx274_formats); index++) {
+ if (imx274_formats[index].size.width == fmt->width &&
+ imx274_formats[index].size.height == fmt->height)
+ break;
+ }
+
+ if (index >= ARRAY_SIZE(imx274_formats)) {
+ /* default to first format */
+ index = 0;
+ }
+
+ imx274->mode_index = index;
+
+ if (fmt->width > IMX274_MAX_WIDTH)
+ fmt->width = IMX274_MAX_WIDTH;
+ if (fmt->height > IMX274_MAX_HEIGHT)
+ fmt->height = IMX274_MAX_HEIGHT;
+ fmt->width = fmt->width & (~IMX274_MASK_LSB_2_BITS);
+ fmt->height = fmt->height & (~IMX274_MASK_LSB_2_BITS);
+ fmt->field = V4L2_FIELD_NONE;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ cfg->try_fmt = *fmt;
+ else
+ imx274->format = *fmt;
+
+ mutex_unlock(&imx274->lock);
+ return 0;
+}
+
+/**
+ * imx274_g_frame_interval - Get the frame interval
+ * @sd: Pointer to V4L2 Sub device structure
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to get the frame interval.
+ *
+ * Return: 0 on success
+ */
+static int imx274_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ fi->interval = imx274->frame_interval;
+ dev_dbg(&imx274->client->dev, "%s frame rate = %d / %d\n",
+ __func__, imx274->frame_interval.numerator,
+ imx274->frame_interval.denominator);
+
+ return 0;
+}
+
+/**
+ * imx274_s_frame_interval - Set the frame interval
+ * @sd: Pointer to V4L2 Sub device structure
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to set the frame intervavl.
+ *
+ * Return: 0 on success
+ */
+static int imx274_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ struct v4l2_ctrl *ctrl = imx274->ctrls.exposure;
+ int min, max, def;
+ int ret;
+
+ mutex_lock(&imx274->lock);
+ ret = imx274_set_frame_interval(imx274, fi->interval);
+
+ if (!ret) {
+ /*
+ * exposure time range is decided by frame interval
+ * need to update it after frame interal changes
+ */
+ min = IMX274_MIN_EXPOSURE_TIME;
+ max = fi->interval.numerator * 1000000
+ / fi->interval.denominator;
+ def = max;
+ if (__v4l2_ctrl_modify_range(ctrl, min, max, 1, def)) {
+ dev_err(&imx274->client->dev,
+ "Exposure ctrl range update failed\n");
+ goto unlock;
+ }
+
+ /* update exposure time accordingly */
+ imx274_set_exposure(imx274, imx274->ctrls.exposure->val);
+
+ dev_dbg(&imx274->client->dev, "set frame interval to %uus\n",
+ fi->interval.numerator * 1000000
+ / fi->interval.denominator);
+ }
+
+unlock:
+ mutex_unlock(&imx274->lock);
+
+ return ret;
+}
+
+/**
+ * imx274_load_default - load default control values
+ * @priv: Pointer to device structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_load_default(struct stimx274 *priv)
+{
+ int ret;
+
+ /* load default control values */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+ priv->ctrls.exposure->val = 1000000 / IMX274_DEF_FRAME_RATE;
+ priv->ctrls.gain->val = IMX274_DEF_GAIN;
+ priv->ctrls.vflip->val = 0;
+ priv->ctrls.test_pattern->val = TEST_PATTERN_DISABLED;
+
+ /* update frame rate */
+ ret = imx274_set_frame_interval(priv,
+ priv->frame_interval);
+ if (ret)
+ return ret;
+
+ /* update exposure time */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.exposure, priv->ctrls.exposure->val);
+ if (ret)
+ return ret;
+
+ /* update gain */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.gain, priv->ctrls.gain->val);
+ if (ret)
+ return ret;
+
+ /* update vflip */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.vflip, priv->ctrls.vflip->val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * imx274_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @on: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * imx274 sensor.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ int ret = 0;
+
+ dev_dbg(&imx274->client->dev, "%s : %s, mode index = %d\n", __func__,
+ on ? "Stream Start" : "Stream Stop", imx274->mode_index);
+
+ mutex_lock(&imx274->lock);
+
+ if (on) {
+ /* load mode registers */
+ ret = imx274_mode_regs(imx274, imx274->mode_index);
+ if (ret)
+ goto fail;
+
+ /*
+ * update frame rate & expsoure. if the last mode is different,
+ * HMAX could be changed. As the result, frame rate & exposure
+ * are changed.
+ * gain is not affected.
+ */
+ ret = imx274_set_frame_interval(imx274,
+ imx274->frame_interval);
+ if (ret)
+ goto fail;
+
+ /* update exposure time */
+ ret = __v4l2_ctrl_s_ctrl(imx274->ctrls.exposure,
+ imx274->ctrls.exposure->val);
+ if (ret)
+ goto fail;
+
+ /* start stream */
+ ret = imx274_start_stream(imx274);
+ if (ret)
+ goto fail;
+ } else {
+ /* stop stream */
+ ret = imx274_write_table(imx274,
+ mode_table[IMX274_MODE_STOP_STREAM]);
+ if (ret)
+ goto fail;
+ }
+
+ mutex_unlock(&imx274->lock);
+ dev_dbg(&imx274->client->dev,
+ "%s : Done: mode = %d\n", __func__, imx274->mode_index);
+ return 0;
+
+fail:
+ mutex_unlock(&imx274->lock);
+ dev_err(&imx274->client->dev, "s_stream failed\n");
+ return ret;
+}
+
+/*
+ * imx274_get_frame_length - Function for obtaining current frame length
+ * @priv: Pointer to device structure
+ * @val: Pointer to obainted value
+ *
+ * frame_length = vmax x (svr + 1), in unit of hmax.
+ *
+ * Return: 0 on success
+ */
+static int imx274_get_frame_length(struct stimx274 *priv, u32 *val)
+{
+ int err;
+ u16 svr;
+ u32 vmax;
+ u8 reg_val[3];
+
+ /* svr */
+ err = imx274_read_reg(priv, IMX274_SVR_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_SVR_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+
+ svr = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+
+ /* vmax */
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_3, &reg_val[0]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_2, &reg_val[1]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_1, &reg_val[2]);
+ if (err)
+ goto fail;
+
+ vmax = ((reg_val[2] & IMX274_MASK_LSB_3_BITS) << IMX274_SHIFT_16_BITS)
+ + (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+
+ *val = vmax * (svr + 1);
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static int imx274_clamp_coarse_time(struct stimx274 *priv, u32 *val,
+ u32 *frame_length)
+{
+ int err;
+
+ err = imx274_get_frame_length(priv, frame_length);
+ if (err)
+ return err;
+
+ if (*frame_length < min_frame_len[priv->mode_index])
+ *frame_length = min_frame_len[priv->mode_index];
+
+ *val = *frame_length - *val; /* convert to raw shr */
+ if (*val > *frame_length - IMX274_SHR_LIMIT_CONST)
+ *val = *frame_length - IMX274_SHR_LIMIT_CONST;
+ else if (*val < min_SHR[priv->mode_index])
+ *val = min_SHR[priv->mode_index];
+
+ return 0;
+}
+
+/*
+ * imx274_set_digital gain - Function called when setting digital gain
+ * @priv: Pointer to device structure
+ * @dgain: Value of digital gain.
+ *
+ * Digital gain has only 4 steps: 1x, 2x, 4x, and 8x
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_digital_gain(struct stimx274 *priv, u32 dgain)
+{
+ u8 reg_val;
+
+ reg_val = ffs(dgain);
+
+ if (reg_val)
+ reg_val--;
+
+ reg_val = clamp(reg_val, (u8)0, (u8)3);
+
+ return imx274_write_reg(priv, IMX274_DIGITAL_GAIN_REG,
+ reg_val & IMX274_MASK_LSB_4_BITS);
+}
+
+static inline void imx274_calculate_gain_regs(struct reg_8 regs[2], u16 gain)
+{
+ regs->addr = IMX274_ANALOG_GAIN_ADDR_MSB;
+ regs->val = (gain >> IMX274_SHIFT_8_BITS) & IMX274_MASK_LSB_3_BITS;
+
+ (regs + 1)->addr = IMX274_ANALOG_GAIN_ADDR_LSB;
+ (regs + 1)->val = (gain) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_gain - Function called when setting gain
+ * @priv: Pointer to device structure
+ * @val: Value of gain. the real value = val << IMX274_GAIN_SHIFT;
+ * @ctrl: v4l2 control pointer
+ *
+ * Set the gain based on input value.
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl)
+{
+ struct reg_8 reg_list[2];
+ int err;
+ u32 gain, analog_gain, digital_gain, gain_reg;
+ int i;
+
+ gain = (u32)(ctrl->val);
+
+ dev_dbg(&priv->client->dev,
+ "%s : input gain = %d.%d\n", __func__,
+ gain >> IMX274_GAIN_SHIFT,
+ ((gain & IMX274_GAIN_SHIFT_MASK) * 100) >> IMX274_GAIN_SHIFT);
+
+ if (gain > IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN)
+ gain = IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN;
+ else if (gain < IMX274_MIN_GAIN)
+ gain = IMX274_MIN_GAIN;
+
+ if (gain <= IMX274_MAX_ANALOG_GAIN)
+ digital_gain = 1;
+ else if (gain <= IMX274_MAX_ANALOG_GAIN * 2)
+ digital_gain = 2;
+ else if (gain <= IMX274_MAX_ANALOG_GAIN * 4)
+ digital_gain = 4;
+ else
+ digital_gain = IMX274_MAX_DIGITAL_GAIN;
+
+ analog_gain = gain / digital_gain;
+
+ dev_dbg(&priv->client->dev,
+ "%s : digital gain = %d, analog gain = %d.%d\n",
+ __func__, digital_gain, analog_gain >> IMX274_GAIN_SHIFT,
+ ((analog_gain & IMX274_GAIN_SHIFT_MASK) * 100)
+ >> IMX274_GAIN_SHIFT);
+
+ err = imx274_set_digital_gain(priv, digital_gain);
+ if (err)
+ goto fail;
+
+ /* convert to register value, refer to imx274 datasheet */
+ gain_reg = (u32)IMX274_GAIN_CONST -
+ (IMX274_GAIN_CONST << IMX274_GAIN_SHIFT) / analog_gain;
+ if (gain_reg > IMX274_GAIN_REG_MAX)
+ gain_reg = IMX274_GAIN_REG_MAX;
+
+ imx274_calculate_gain_regs(reg_list, (u16)gain_reg);
+
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ if (IMX274_GAIN_CONST - gain_reg == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* convert register value back to gain value */
+ ctrl->val = (IMX274_GAIN_CONST << IMX274_GAIN_SHIFT)
+ / (IMX274_GAIN_CONST - gain_reg) * digital_gain;
+
+ dev_dbg(&priv->client->dev,
+ "%s : GAIN control success, gain_reg = %d, new gain = %d\n",
+ __func__, gain_reg, ctrl->val);
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static inline void imx274_calculate_coarse_time_regs(struct reg_8 regs[2],
+ u32 coarse_time)
+{
+ regs->addr = IMX274_COARSE_TIME_ADDR_MSB;
+ regs->val = (coarse_time >> IMX274_SHIFT_8_BITS)
+ & IMX274_MASK_LSB_8_BITS;
+ (regs + 1)->addr = IMX274_COARSE_TIME_ADDR_LSB;
+ (regs + 1)->val = (coarse_time) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_coarse_time - Function called when setting SHR value
+ * @priv: Pointer to device structure
+ * @val: Value for exposure time in number of line_length, or [HMAX]
+ *
+ * Set SHR value based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_coarse_time(struct stimx274 *priv, u32 *val)
+{
+ struct reg_8 reg_list[2];
+ int err;
+ u32 coarse_time, frame_length;
+ int i;
+
+ coarse_time = *val;
+
+ /* convert exposure_time to appropriate SHR value */
+ err = imx274_clamp_coarse_time(priv, &coarse_time, &frame_length);
+ if (err)
+ goto fail;
+
+ /* prepare SHR registers */
+ imx274_calculate_coarse_time_regs(reg_list, coarse_time);
+
+ /* write to SHR registers */
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ *val = frame_length - coarse_time;
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * imx274_set_exposure - Function called when setting exposure time
+ * @priv: Pointer to device structure
+ * @val: Variable for exposure time, in the unit of micro-second
+ *
+ * Set exposure time based on input value.
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_exposure(struct stimx274 *priv, int val)
+{
+ int err;
+ u16 hmax;
+ u8 reg_val[2];
+ u32 coarse_time; /* exposure time in unit of line (HMAX)*/
+
+ dev_dbg(&priv->client->dev,
+ "%s : EXPOSURE control input = %d\n", __func__, val);
+
+ /* step 1: convert input exposure_time (val) into number of 1[HMAX] */
+
+ /* obtain HMAX value */
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ hmax = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ if (hmax == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ coarse_time = (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2 * val
+ - nocpiop[priv->mode_index]) / hmax;
+
+ /* step 2: convert exposure_time into SHR value */
+
+ /* set SHR */
+ err = imx274_set_coarse_time(priv, &coarse_time);
+ if (err)
+ goto fail;
+
+ priv->ctrls.exposure->val =
+ (coarse_time * hmax + nocpiop[priv->mode_index])
+ / (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2);
+
+ dev_dbg(&priv->client->dev,
+ "%s : EXPOSURE control success\n", __func__);
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+
+ return err;
+}
+
+/*
+ * imx274_set_vflip - Function called when setting vertical flip
+ * @priv: Pointer to device structure
+ * @val: Value for vflip setting
+ *
+ * Set vertical flip based on input value.
+ * val = 0: normal, no vertical flip
+ * val = 1: vertical flip enabled
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_vflip(struct stimx274 *priv, int val)
+{
+ int err;
+
+ err = imx274_write_reg(priv, IMX274_VFLIP_REG, val);
+ if (err) {
+ dev_err(&priv->client->dev, "VFILP control error\n");
+ return err;
+ }
+
+ dev_dbg(&priv->client->dev,
+ "%s : VFLIP control success\n", __func__);
+
+ return 0;
+}
+
+/*
+ * imx274_set_test_pattern - Function called when setting test pattern
+ * @priv: Pointer to device structure
+ * @val: Variable for test pattern
+ *
+ * Set to different test patterns based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_test_pattern(struct stimx274 *priv, int val)
+{
+ int err = 0;
+
+ if (val == TEST_PATTERN_DISABLED) {
+ err = imx274_write_table(priv, imx274_tp_disabled);
+ } else if (val <= TEST_PATTERN_V_COLOR_BARS) {
+ err = imx274_write_reg(priv, IMX274_TEST_PATTERN_REG, val - 1);
+ if (!err)
+ err = imx274_write_table(priv, imx274_tp_regs);
+ } else {
+ err = -EINVAL;
+ }
+
+ if (!err)
+ dev_dbg(&priv->client->dev,
+ "%s : TEST PATTERN control success\n", __func__);
+ else
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+
+ return err;
+}
+
+static inline void imx274_calculate_frame_length_regs(struct reg_8 regs[3],
+ u32 frame_length)
+{
+ regs->addr = IMX274_FRAME_LENGTH_ADDR_1;
+ regs->val = (frame_length >> IMX274_SHIFT_16_BITS)
+ & IMX274_MASK_LSB_4_BITS;
+ (regs + 1)->addr = IMX274_FRAME_LENGTH_ADDR_2;
+ (regs + 1)->val = (frame_length >> IMX274_SHIFT_8_BITS)
+ & IMX274_MASK_LSB_8_BITS;
+ (regs + 2)->addr = IMX274_FRAME_LENGTH_ADDR_3;
+ (regs + 2)->val = (frame_length) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_frame_length - Function called when setting frame length
+ * @priv: Pointer to device structure
+ * @val: Variable for frame length (= VMAX, i.e. vertical drive period length)
+ *
+ * Set frame length based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_frame_length(struct stimx274 *priv, u32 val)
+{
+ struct reg_8 reg_list[3];
+ int err;
+ u32 frame_length;
+ int i;
+
+ dev_dbg(&priv->client->dev, "%s : input length = %d\n",
+ __func__, val);
+
+ frame_length = (u32)val;
+
+ imx274_calculate_frame_length_regs(reg_list, frame_length);
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * imx274_set_frame_interval - Function called when setting frame interval
+ * @priv: Pointer to device structure
+ * @frame_interval: Variable for frame interval
+ *
+ * Change frame interval by updating VMAX value
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval)
+{
+ int err;
+ u32 frame_length, req_frame_rate;
+ u16 svr;
+ u16 hmax;
+ u8 reg_val[2];
+
+ dev_dbg(&priv->client->dev, "%s: input frame interval = %d / %d",
+ __func__, frame_interval.numerator,
+ frame_interval.denominator);
+
+ if (frame_interval.numerator == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ req_frame_rate = (u32)(frame_interval.denominator
+ / frame_interval.numerator);
+
+ /* boundary check */
+ if (req_frame_rate > max_frame_rate[priv->mode_index]) {
+ frame_interval.numerator = 1;
+ frame_interval.denominator =
+ max_frame_rate[priv->mode_index];
+ } else if (req_frame_rate < IMX274_MIN_FRAME_RATE) {
+ frame_interval.numerator = 1;
+ frame_interval.denominator = IMX274_MIN_FRAME_RATE;
+ }
+
+ /*
+ * VMAX = 1/frame_rate x 72M / (SVR+1) / HMAX
+ * frame_length (i.e. VMAX) = (frame_interval) x 72M /(SVR+1) / HMAX
+ */
+
+ /* SVR */
+ err = imx274_read_reg(priv, IMX274_SVR_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_SVR_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ svr = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ dev_dbg(&priv->client->dev,
+ "%s : register SVR = %d\n", __func__, svr);
+
+ /* HMAX */
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ hmax = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ dev_dbg(&priv->client->dev,
+ "%s : register HMAX = %d\n", __func__, hmax);
+
+ if (hmax == 0 || frame_interval.denominator == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ frame_length = IMX274_PIXCLK_CONST1 / (svr + 1) / hmax
+ * frame_interval.numerator
+ / frame_interval.denominator;
+
+ err = imx274_set_frame_length(priv, frame_length);
+ if (err)
+ goto fail;
+
+ priv->frame_interval = frame_interval;
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static const struct v4l2_subdev_pad_ops imx274_pad_ops = {
+ .get_fmt = imx274_get_fmt,
+ .set_fmt = imx274_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops imx274_video_ops = {
+ .g_frame_interval = imx274_g_frame_interval,
+ .s_frame_interval = imx274_s_frame_interval,
+ .s_stream = imx274_s_stream,
+};
+
+static const struct v4l2_subdev_ops imx274_subdev_ops = {
+ .pad = &imx274_pad_ops,
+ .video = &imx274_video_ops,
+};
+
+static const struct v4l2_ctrl_ops imx274_ctrl_ops = {
+ .s_ctrl = imx274_s_ctrl,
+};
+
+static const struct of_device_id imx274_of_id_table[] = {
+ { .compatible = "sony,imx274" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx274_of_id_table);
+
+static const struct i2c_device_id imx274_id[] = {
+ { "IMX274", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, imx274_id);
+
+static int imx274_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct v4l2_subdev *sd;
+ struct stimx274 *imx274;
+ int ret;
+
+ /* initialize imx274 */
+ imx274 = devm_kzalloc(&client->dev, sizeof(*imx274), GFP_KERNEL);
+ if (!imx274)
+ return -ENOMEM;
+
+ mutex_init(&imx274->lock);
+
+ /* initialize regmap */
+ imx274->regmap = devm_regmap_init_i2c(client, &imx274_regmap_config);
+ if (IS_ERR(imx274->regmap)) {
+ dev_err(&client->dev,
+ "regmap init failed: %ld\n", PTR_ERR(imx274->regmap));
+ ret = -ENODEV;
+ goto err_regmap;
+ }
+
+ /* initialize subdevice */
+ imx274->client = client;
+ sd = &imx274->sd;
+ v4l2_i2c_subdev_init(sd, client, &imx274_subdev_ops);
+ strlcpy(sd->name, DRIVER_NAME, sizeof(sd->name));
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* initialize subdev media pad */
+ imx274->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sd->entity, 1, &imx274->pad);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : media entity init Failed %d\n", __func__, ret);
+ goto err_regmap;
+ }
+
+ /* initialize sensor reset gpio */
+ imx274->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(imx274->reset_gpio)) {
+ if (PTR_ERR(imx274->reset_gpio) != -EPROBE_DEFER)
+ dev_err(&client->dev, "Reset GPIO not setup in DT");
+ ret = PTR_ERR(imx274->reset_gpio);
+ goto err_me;
+ }
+
+ /* pull sensor out of reset */
+ imx274_reset(imx274, 1);
+
+ /* initialize controls */
+ ret = v4l2_ctrl_handler_init(&imx274->ctrls.handler, 2);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : ctrl handler init Failed\n", __func__);
+ goto err_me;
+ }
+
+ imx274->ctrls.handler.lock = &imx274->lock;
+
+ /* add new controls */
+ imx274->ctrls.test_pattern = v4l2_ctrl_new_std_menu_items(
+ &imx274->ctrls.handler, &imx274_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(tp_qmenu) - 1, 0, 0, tp_qmenu);
+
+ imx274->ctrls.gain = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_GAIN, IMX274_MIN_GAIN,
+ IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN, 1,
+ IMX274_DEF_GAIN);
+
+ imx274->ctrls.exposure = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_EXPOSURE, IMX274_MIN_EXPOSURE_TIME,
+ 1000000 / IMX274_DEF_FRAME_RATE, 1,
+ IMX274_MIN_EXPOSURE_TIME);
+
+ imx274->ctrls.vflip = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ imx274->sd.ctrl_handler = &imx274->ctrls.handler;
+ if (imx274->ctrls.handler.error) {
+ ret = imx274->ctrls.handler.error;
+ goto err_ctrls;
+ }
+
+ /* setup default controls */
+ ret = v4l2_ctrl_handler_setup(&imx274->ctrls.handler);
+ if (ret) {
+ dev_err(&client->dev,
+ "Error %d setup default controls\n", ret);
+ goto err_ctrls;
+ }
+
+ /* initialize format */
+ imx274->mode_index = IMX274_MODE_3840X2160;
+ imx274->format.width = imx274_formats[0].size.width;
+ imx274->format.height = imx274_formats[0].size.height;
+ imx274->format.field = V4L2_FIELD_NONE;
+ imx274->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ imx274->format.colorspace = V4L2_COLORSPACE_SRGB;
+ imx274->frame_interval.numerator = 1;
+ imx274->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+
+ /* load default control values */
+ ret = imx274_load_default(imx274);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s : imx274_load_default failed %d\n",
+ __func__, ret);
+ goto err_ctrls;
+ }
+
+ /* register subdevice */
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : v4l2_async_register_subdev failed %d\n",
+ __func__, ret);
+ goto err_ctrls;
+ }
+
+ dev_info(&client->dev, "imx274 : imx274 probe success !\n");
+ return 0;
+
+err_ctrls:
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+err_me:
+ media_entity_cleanup(&sd->entity);
+err_regmap:
+ mutex_destroy(&imx274->lock);
+ return ret;
+}
+
+static int imx274_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ /* stop stream */
+ imx274_write_table(imx274, mode_table[IMX274_MODE_STOP_STREAM]);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+ mutex_destroy(&imx274->lock);
+ return 0;
+}
+
+static struct i2c_driver imx274_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = imx274_of_id_table,
+ },
+ .probe = imx274_probe,
+ .remove = imx274_remove,
+ .id_table = imx274_id,
+};
+
+module_i2c_driver(imx274_i2c_driver);
+
+MODULE_AUTHOR("Leon Luo <leonl@leopardimaging.com>");
+MODULE_DESCRIPTION("IMX274 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index a374e2a0ac3d..8b5f7d0435e4 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -460,7 +460,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
*/
rc->map_name = ir->ir_codes;
rc->allowed_protocols = rc_proto;
- rc->enabled_protocols = rc_proto;
if (!rc->driver_name)
rc->driver_name = MODULE_NAME;
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index bf0e821a2b93..2f1966bdc473 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1345,7 +1345,7 @@ static int max2175_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(sd, client, &max2175_ops);
ctx->client = client;
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
/* Controls */
hdl = &ctx->ctrl_hdl;
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 99b992e46702..b1665d97e0fd 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -945,7 +945,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
- return -EPROBE_DEFER;
+ return PTR_ERR(mt9m111->clk);
/* Default HIGHPOWER context */
mt9m111->ctx = &context_b;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index af7af0d14c69..bf7d06f3f21a 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -104,7 +104,6 @@ struct ov13858_reg_list {
/* Link frequency config */
struct ov13858_link_freq_config {
- u32 pixel_rate;
u32 pixels_per_line;
/* PLL registers for this link frequency */
@@ -238,11 +237,11 @@ static const struct ov13858_reg mode_4224x3136_regs[] = {
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0x00},
+ {0x3803, 0x08},
{0x3804, 0x10},
{0x3805, 0x9f},
{0x3806, 0x0c},
- {0x3807, 0x5f},
+ {0x3807, 0x57},
{0x3808, 0x10},
{0x3809, 0x80},
{0x380a, 0x0c},
@@ -948,6 +947,18 @@ static const char * const ov13858_test_pattern_menu[] = {
#define OV13858_LINK_FREQ_INDEX_0 0
#define OV13858_LINK_FREQ_INDEX_1 1
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= 2 * 4;
+ do_div(f, 10);
+
+ return f;
+}
+
/* Menu items for LINK_FREQ V4L2 control */
static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
OV13858_LINK_FREQ_540MHZ,
@@ -958,8 +969,6 @@ static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
static const struct ov13858_link_freq_config
link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = {
{
- /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- .pixel_rate = (OV13858_LINK_FREQ_540MHZ * 2 * 4) / 10,
.pixels_per_line = OV13858_PPL_540MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps),
@@ -967,8 +976,6 @@ static const struct ov13858_link_freq_config
}
},
{
- /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- .pixel_rate = (OV13858_LINK_FREQ_270MHZ * 2 * 4) / 10,
.pixels_per_line = OV13858_PPL_270MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps),
@@ -1385,6 +1392,8 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
s32 vblank_def;
s32 vblank_min;
s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
mutex_lock(&ov13858->mutex);
@@ -1400,9 +1409,10 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
} else {
ov13858->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov13858->link_freq, mode->link_freq_index);
- __v4l2_ctrl_s_ctrl_int64(
- ov13858->pixel_rate,
- link_freq_configs[mode->link_freq_index].pixel_rate);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(ov13858->pixel_rate, pixel_rate);
+
/* Update limits and set FPS to default */
vblank_def = ov13858->cur_mode->vts_def -
ov13858->cur_mode->height;
@@ -1617,6 +1627,10 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
s64 exposure_max;
s64 vblank_def;
s64 vblank_min;
+ s64 hblank;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ const struct ov13858_mode *mode;
int ret;
ctrl_hdlr = &ov13858->ctrl_handler;
@@ -1634,29 +1648,30 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
link_freq_menu_items);
ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]);
/* By default, PIXEL_RATE is read only */
ov13858->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops,
- V4L2_CID_PIXEL_RATE, 0,
- link_freq_configs[0].pixel_rate, 1,
- link_freq_configs[0].pixel_rate);
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
- vblank_def = ov13858->cur_mode->vts_def - ov13858->cur_mode->height;
- vblank_min = ov13858->cur_mode->vts_min - ov13858->cur_mode->height;
+ mode = ov13858->cur_mode;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
ov13858->vblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK,
- vblank_min,
- OV13858_VTS_MAX - ov13858->cur_mode->height, 1,
+ vblank_min, OV13858_VTS_MAX - mode->height, 1,
vblank_def);
+ hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
+ mode->width;
ov13858->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width,
- 1,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width);
+ hblank, hblank, 1, hblank);
ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- exposure_max = ov13858->cur_mode->vts_def - 8;
+ exposure_max = mode->vts_def - 8;
ov13858->exposure = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops,
V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN,
@@ -1746,7 +1761,7 @@ static int ov13858_probe(struct i2c_client *client,
goto error_handler_free;
}
- ret = v4l2_async_register_subdev(&ov13858->sd);
+ ret = v4l2_async_register_subdev_sensor_common(&ov13858->sd);
if (ret < 0)
goto error_media_entity;
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index e6d0c1f64f0b..518868388d65 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -685,7 +685,7 @@ static int ov2640_mask_set(struct i2c_client *client,
static int ov2640_reset(struct i2c_client *client)
{
int ret;
- const struct regval_list reset_seq[] = {
+ static const struct regval_list reset_seq[] = {
{BANK_SEL, BANK_SEL_SENS},
{COM7, COM7_SRST},
ENDMARKER,
@@ -1097,18 +1097,17 @@ static int ov2640_probe(struct i2c_client *client,
return -EIO;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov2640_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&adapter->dev,
- "Failed to allocate memory for private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
if (client->dev.of_node) {
priv->clk = devm_clk_get(&client->dev, "xvclk");
if (IS_ERR(priv->clk))
- return -EPROBE_DEFER;
- clk_prepare_enable(priv->clk);
+ return PTR_ERR(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
}
ret = ov2640_probe_dt(client, priv);
@@ -1116,7 +1115,7 @@ static int ov2640_probe(struct i2c_client *client,
goto err_clk;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
- priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_ctrl_handler_init(&priv->hdl, 2);
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 39a2269c0bee..c89ed6609738 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2271,7 +2271,7 @@ static int ov5640_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
- sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 95ce90fdb876..34179d232a35 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -35,9 +35,18 @@
#define SENSOR_NAME "ov5647"
-#define OV5647_SW_RESET 0x0103
-#define OV5647_REG_CHIPID_H 0x300A
-#define OV5647_REG_CHIPID_L 0x300B
+#define MIPI_CTRL00_CLOCK_LANE_GATE BIT(5)
+#define MIPI_CTRL00_BUS_IDLE BIT(2)
+#define MIPI_CTRL00_CLOCK_LANE_DISABLE BIT(0)
+
+#define OV5647_SW_STANDBY 0x0100
+#define OV5647_SW_RESET 0x0103
+#define OV5647_REG_CHIPID_H 0x300A
+#define OV5647_REG_CHIPID_L 0x300B
+#define OV5640_REG_PAD_OUT 0x300D
+#define OV5647_REG_FRAME_OFF_NUMBER 0x4202
+#define OV5647_REG_MIPI_CTRL00 0x4800
+#define OV5647_REG_MIPI_CTRL14 0x4814
#define REG_TERM 0xfffe
#define VAL_TERM 0xfe
@@ -241,34 +250,43 @@ static int ov5647_set_virtual_channel(struct v4l2_subdev *sd, int channel)
u8 channel_id;
int ret;
- ret = ov5647_read(sd, 0x4814, &channel_id);
+ ret = ov5647_read(sd, OV5647_REG_MIPI_CTRL14, &channel_id);
if (ret < 0)
return ret;
channel_id &= ~(3 << 6);
- return ov5647_write(sd, 0x4814, channel_id | (channel << 6));
+ return ov5647_write(sd, OV5647_REG_MIPI_CTRL14, channel_id | (channel << 6));
}
static int ov5647_stream_on(struct v4l2_subdev *sd)
{
int ret;
- ret = ov5647_write(sd, 0x4202, 0x00);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_BUS_IDLE);
if (ret < 0)
return ret;
- return ov5647_write(sd, 0x300D, 0x00);
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x00);
}
static int ov5647_stream_off(struct v4l2_subdev *sd)
{
int ret;
- ret = ov5647_write(sd, 0x4202, 0x0f);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_CLOCK_LANE_GATE
+ | MIPI_CTRL00_BUS_IDLE | MIPI_CTRL00_CLOCK_LANE_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x0f);
if (ret < 0)
return ret;
- return ov5647_write(sd, 0x300D, 0x01);
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x01);
}
static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
@@ -276,7 +294,7 @@ static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
int ret;
u8 rdval;
- ret = ov5647_read(sd, 0x0100, &rdval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
if (ret < 0)
return ret;
@@ -285,7 +303,7 @@ static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
else
rdval |= 0x01;
- return ov5647_write(sd, 0x0100, rdval);
+ return ov5647_write(sd, OV5647_SW_STANDBY, rdval);
}
static int __sensor_init(struct v4l2_subdev *sd)
@@ -294,7 +312,7 @@ static int __sensor_init(struct v4l2_subdev *sd)
u8 resetval, rdval;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- ret = ov5647_read(sd, 0x0100, &rdval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
if (ret < 0)
return ret;
@@ -309,18 +327,21 @@ static int __sensor_init(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = ov5647_read(sd, 0x0100, &resetval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &resetval);
if (ret < 0)
return ret;
if (!(resetval & 0x01)) {
dev_err(&client->dev, "Device was in SW standby");
- ret = ov5647_write(sd, 0x0100, 0x01);
+ ret = ov5647_write(sd, OV5647_SW_STANDBY, 0x01);
if (ret < 0)
return ret;
}
- return ov5647_write(sd, 0x4800, 0x04);
+ /*
+ * stream off to make the clock lane into LP-11 state.
+ */
+ return ov5647_stream_off(sd);
}
static int ov5647_sensor_power(struct v4l2_subdev *sd, int on)
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 6f7a1d6d2200..9f9196568eb8 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -390,7 +390,10 @@ static const struct ov5670_reg mode_2592x1944_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_1296x972_regs[] = {
@@ -653,7 +656,10 @@ static const struct ov5670_reg mode_1296x972_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_648x486_regs[] = {
@@ -916,7 +922,10 @@ static const struct ov5670_reg mode_648x486_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_2560x1440_regs[] = {
@@ -1178,7 +1187,10 @@ static const struct ov5670_reg mode_2560x1440_regs[] = {
{0x5791, 0x06},
{0x5792, 0x00},
{0x5793, 0x52},
- {0x5794, 0xa3}
+ {0x5794, 0xa3},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_1280x720_regs[] = {
@@ -1441,7 +1453,10 @@ static const struct ov5670_reg mode_1280x720_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_640x360_regs[] = {
@@ -1704,7 +1719,10 @@ static const struct ov5670_reg mode_640x360_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const char * const ov5670_test_pattern_menu[] = {
@@ -2323,8 +2341,6 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
return ret;
}
- ov5670->streaming = true;
-
return 0;
}
@@ -2338,8 +2354,6 @@ static int ov5670_stop_streaming(struct ov5670 *ov5670)
if (ret)
dev_err(&client->dev, "%s failed to set stream\n", __func__);
- ov5670->streaming = false;
-
/* Return success even if it was an error, as there is nothing the
* caller can do about it.
*/
@@ -2370,6 +2384,7 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
ret = ov5670_stop_streaming(ov5670);
pm_runtime_put(&client->dev);
}
+ ov5670->streaming = enable;
goto unlock_and_return;
error:
@@ -2514,7 +2529,7 @@ static int ov5670_probe(struct i2c_client *client)
}
/* Async register for subdev */
- ret = v4l2_async_register_subdev(&ov5670->sd);
+ ret = v4l2_async_register_subdev_sensor_common(&ov5670->sd);
if (ret < 0) {
err_msg = "v4l2_async_register_subdev() error";
goto error_entity_cleanup;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 768f2950ea36..8975d16b2b24 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -951,11 +951,8 @@ static int ov6650_probe(struct i2c_client *client,
int ret;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev,
- "Failed to allocate memory for private data!\n");
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 13);
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e88549f0e704..950a0acf85fb 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -213,6 +213,9 @@ struct ov7670_devtype {
struct ov7670_format_struct; /* coming later */
struct ov7670_info {
struct v4l2_subdev sd;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
struct v4l2_ctrl_handler hdl;
struct {
/* gain cluster */
@@ -229,6 +232,7 @@ struct ov7670_info {
struct v4l2_ctrl *saturation;
struct v4l2_ctrl *hue;
};
+ struct v4l2_mbus_framefmt format;
struct ov7670_format_struct *fmt; /* Current format */
struct clk *clk;
struct gpio_desc *resetb_gpio;
@@ -972,6 +976,9 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
fmt->width = wsize->width;
fmt->height = wsize->height;
fmt->colorspace = ov7670_formats[index].colorspace;
+
+ info->format = *fmt;
+
return 0;
}
@@ -985,6 +992,9 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *mbus_fmt;
+#endif
unsigned char com7;
int ret;
@@ -995,8 +1005,13 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
if (ret)
return ret;
- cfg->try_fmt = format->format;
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ *mbus_fmt = format->format;
return 0;
+#else
+ return -ENOTTY;
+#endif
}
ret = ov7670_try_fmt_internal(sd, &format->format, &ovfmt, &wsize);
@@ -1038,6 +1053,30 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
return 0;
}
+static int ov7670_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov7670_info *info = to_state(sd);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *mbus_fmt;
+#endif
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *mbus_fmt;
+ return 0;
+#else
+ return -ENOTTY;
+#endif
+ } else {
+ format->format = info->format;
+ }
+
+ return 0;
+}
+
/*
* Implement G/S_PARM. There is a "high quality" mode we could try
* to do someday; for now, we just do the frame rate tweak.
@@ -1505,6 +1544,46 @@ static int ov7670_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regis
}
#endif
+static int ov7670_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ if (info->pwdn_gpio)
+ gpiod_set_value(info->pwdn_gpio, !on);
+ if (on && info->resetb_gpio) {
+ gpiod_set_value(info->resetb_gpio, 1);
+ usleep_range(500, 1000);
+ gpiod_set_value(info->resetb_gpio, 0);
+ usleep_range(3000, 5000);
+ }
+
+ return 0;
+}
+
+static void ov7670_get_default_format(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ format->width = info->devtype->win_sizes[0].width;
+ format->height = info->devtype->win_sizes[0].height;
+ format->colorspace = info->fmt->colorspace;
+ format->code = info->fmt->mbus_code;
+ format->field = V4L2_FIELD_NONE;
+}
+
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ ov7670_get_default_format(sd, format);
+
+ return 0;
+}
+#endif
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops ov7670_core_ops = {
@@ -1525,6 +1604,7 @@ static const struct v4l2_subdev_pad_ops ov7670_pad_ops = {
.enum_frame_interval = ov7670_enum_frame_interval,
.enum_frame_size = ov7670_enum_frame_size,
.enum_mbus_code = ov7670_enum_mbus_code,
+ .get_fmt = ov7670_get_fmt,
.set_fmt = ov7670_set_fmt,
};
@@ -1534,6 +1614,12 @@ static const struct v4l2_subdev_ops ov7670_ops = {
.pad = &ov7670_pad_ops,
};
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+static const struct v4l2_subdev_internal_ops ov7670_subdev_internal_ops = {
+ .open = ov7670_open,
+};
+#endif
+
/* ----------------------------------------------------------------------- */
static const struct ov7670_devtype ov7670_devdata[] = {
@@ -1586,6 +1672,11 @@ static int ov7670_probe(struct i2c_client *client,
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ sd->internal_ops = &ov7670_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+#endif
+
info->clock_speed = 30; /* default: a guess */
if (client->dev.platform_data) {
struct ov7670_config *config = client->dev.platform_data;
@@ -1619,29 +1710,34 @@ static int ov7670_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = ov7670_init_gpio(client, info);
- if (ret)
- goto clk_disable;
-
info->clock_speed = clk_get_rate(info->clk) / 1000000;
if (info->clock_speed < 10 || info->clock_speed > 48) {
ret = -EINVAL;
goto clk_disable;
}
+ ret = ov7670_init_gpio(client, info);
+ if (ret)
+ goto clk_disable;
+
+ ov7670_s_power(sd, 1);
+
/* Make sure it's an ov7670 */
ret = ov7670_detect(sd);
if (ret) {
v4l_dbg(1, debug, client,
"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
client->addr << 1, client->adapter->name);
- goto clk_disable;
+ goto power_off;
}
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
info->devtype = &ov7670_devdata[id->driver_data];
info->fmt = &ov7670_formats[0];
+
+ ov7670_get_default_format(sd, &info->format);
+
info->clkrc = 0;
/* Set default frame rate to 30 fps */
@@ -1688,16 +1784,31 @@ static int ov7670_probe(struct i2c_client *client,
v4l2_ctrl_auto_cluster(2, &info->auto_exposure,
V4L2_EXPOSURE_MANUAL, false);
v4l2_ctrl_cluster(2, &info->saturation);
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ info->pad.flags = MEDIA_PAD_FL_SOURCE;
+ info->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&info->sd.entity, 1, &info->pad);
+ if (ret < 0)
+ goto hdl_free;
+#endif
+
v4l2_ctrl_handler_setup(&info->hdl);
ret = v4l2_async_register_subdev(&info->sd);
if (ret < 0)
- goto hdl_free;
+ goto entity_cleanup;
return 0;
+entity_cleanup:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&info->sd.entity);
+#endif
hdl_free:
v4l2_ctrl_handler_free(&info->hdl);
+power_off:
+ ov7670_s_power(sd, 0);
clk_disable:
clk_disable_unprepare(info->clk);
return ret;
@@ -1712,6 +1823,10 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
clk_disable_unprepare(info->clk);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&info->sd.entity);
+#endif
+ ov7670_s_power(sd, 0);
return 0;
}
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 6ffb460e8589..69433e1e2533 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -985,7 +985,6 @@ static const struct v4l2_ctrl_ops ov965x_ctrl_ops = {
static const char * const test_pattern_menu[] = {
"Disabled",
"Color bars",
- NULL
};
static int ov965x_initialize_controls(struct ov965x *ov965x)
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 700f433261d0..e6b717b83b18 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1239,6 +1239,10 @@ static int smiapp_power_on(struct device *dev)
sleep = SMIAPP_RESET_DELAY(sensor->hwcfg->ext_clk);
usleep_range(sleep, sleep);
+ mutex_lock(&sensor->mutex);
+
+ sensor->active = true;
+
/*
* Failures to respond to the address change command have been noticed.
* Those failures seem to be caused by the sensor requiring a longer
@@ -1313,7 +1317,7 @@ static int smiapp_power_on(struct device *dev)
rval = smiapp_write(sensor, SMIAPP_REG_U8_DPHY_CTRL,
SMIAPP_DPHY_CTRL_UI);
if (rval < 0)
- return rval;
+ goto out_cci_addr_fail;
rval = smiapp_call_quirk(sensor, post_poweron);
if (rval) {
@@ -1321,28 +1325,28 @@ static int smiapp_power_on(struct device *dev)
goto out_cci_addr_fail;
}
- /* Are we still initialising...? If yes, return here. */
- if (!sensor->pixel_array)
- return 0;
+ /* Are we still initialising...? If not, proceed with control setup. */
+ if (sensor->pixel_array) {
+ rval = __v4l2_ctrl_handler_setup(
+ &sensor->pixel_array->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
- rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
+ rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
- rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
+ rval = smiapp_update_mode(sensor);
+ if (rval < 0)
+ goto out_cci_addr_fail;
+ }
- mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
mutex_unlock(&sensor->mutex);
- if (rval < 0)
- goto out_cci_addr_fail;
return 0;
out_cci_addr_fail:
-
+ mutex_unlock(&sensor->mutex);
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
@@ -1360,6 +1364,8 @@ static int smiapp_power_off(struct device *dev)
struct smiapp_sensor *sensor =
container_of(ssd, struct smiapp_sensor, ssds[0]);
+ mutex_lock(&sensor->mutex);
+
/*
* Currently power/clock to lens are enable/disabled separately
* but they are essentially the same signals. So if the sensor is
@@ -1372,6 +1378,10 @@ static int smiapp_power_off(struct device *dev)
SMIAPP_REG_U8_SOFTWARE_RESET,
SMIAPP_SOFTWARE_RESET);
+ sensor->active = false;
+
+ mutex_unlock(&sensor->mutex);
+
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
usleep_range(5000, 5000);
@@ -1381,29 +1391,6 @@ static int smiapp_power_off(struct device *dev)
return 0;
}
-static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
-{
- int rval;
-
- if (!on) {
- pm_runtime_mark_last_busy(subdev->dev);
- pm_runtime_put_autosuspend(subdev->dev);
-
- return 0;
- }
-
- rval = pm_runtime_get_sync(subdev->dev);
- if (rval >= 0)
- return 0;
-
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(subdev->dev);
-
- pm_runtime_put(subdev->dev);
-
- return rval;
-}
-
/* -----------------------------------------------------------------------------
* Video stream management
*/
@@ -1560,19 +1547,31 @@ out:
static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
if (sensor->streaming == enable)
return 0;
if (enable) {
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put(&client->dev);
+ return rval;
+ }
+
sensor->streaming = true;
+
rval = smiapp_start_streaming(sensor);
if (rval < 0)
sensor->streaming = false;
} else {
rval = smiapp_stop_streaming(sensor);
sensor->streaming = false;
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
}
return rval;
@@ -2650,7 +2649,6 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
struct smiapp_sensor *sensor = ssd->sensor;
unsigned int i;
- int rval;
mutex_lock(&sensor->mutex);
@@ -2677,22 +2675,6 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&sensor->mutex);
- rval = pm_runtime_get_sync(sd->dev);
- if (rval >= 0)
- return 0;
-
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(sd->dev);
- pm_runtime_put(sd->dev);
-
- return rval;
-}
-
-static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- pm_runtime_mark_last_busy(sd->dev);
- pm_runtime_put_autosuspend(sd->dev);
-
return 0;
}
@@ -2700,10 +2682,6 @@ static const struct v4l2_subdev_video_ops smiapp_video_ops = {
.s_stream = smiapp_set_stream,
};
-static const struct v4l2_subdev_core_ops smiapp_core_ops = {
- .s_power = smiapp_set_power,
-};
-
static const struct v4l2_subdev_pad_ops smiapp_pad_ops = {
.enum_mbus_code = smiapp_enum_mbus_code,
.get_fmt = smiapp_get_format,
@@ -2718,7 +2696,6 @@ static const struct v4l2_subdev_sensor_ops smiapp_sensor_ops = {
};
static const struct v4l2_subdev_ops smiapp_ops = {
- .core = &smiapp_core_ops,
.video = &smiapp_video_ops,
.pad = &smiapp_pad_ops,
.sensor = &smiapp_sensor_ops,
@@ -2732,12 +2709,10 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
.registered = smiapp_registered,
.unregistered = smiapp_unregistered,
.open = smiapp_open,
- .close = smiapp_close,
};
static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
.open = smiapp_open,
- .close = smiapp_close,
};
/* -----------------------------------------------------------------------------
@@ -2829,12 +2804,10 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
/* NVM size is not mandatory */
fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
- rval = fwnode_property_read_u32(fwnode, "clock-frequency",
+ rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
- if (rval) {
- dev_warn(dev, "can't get clock-frequency\n");
- goto out_err;
- }
+ if (rval)
+ dev_info(dev, "can't get clock-frequency\n");
dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
@@ -2894,18 +2867,46 @@ static int smiapp_probe(struct i2c_client *client,
}
sensor->ext_clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
+ if (PTR_ERR(sensor->ext_clk) == -ENOENT) {
+ dev_info(&client->dev, "no clock defined, continuing...\n");
+ sensor->ext_clk = NULL;
+ } else if (IS_ERR(sensor->ext_clk)) {
dev_err(&client->dev, "could not get clock (%ld)\n",
PTR_ERR(sensor->ext_clk));
return -EPROBE_DEFER;
}
- rval = clk_set_rate(sensor->ext_clk, sensor->hwcfg->ext_clk);
- if (rval < 0) {
- dev_err(&client->dev,
- "unable to set clock freq to %u\n",
+ if (sensor->ext_clk) {
+ if (sensor->hwcfg->ext_clk) {
+ unsigned long rate;
+
+ rval = clk_set_rate(sensor->ext_clk,
+ sensor->hwcfg->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock freq to %u\n",
+ sensor->hwcfg->ext_clk);
+ return rval;
+ }
+
+ rate = clk_get_rate(sensor->ext_clk);
+ if (rate != sensor->hwcfg->ext_clk) {
+ dev_err(&client->dev,
+ "can't set clock freq, asked for %u but got %lu\n",
+ sensor->hwcfg->ext_clk, rate);
+ return rval;
+ }
+ } else {
+ sensor->hwcfg->ext_clk = clk_get_rate(sensor->ext_clk);
+ dev_dbg(&client->dev, "obtained clock freq %u\n",
+ sensor->hwcfg->ext_clk);
+ }
+ } else if (sensor->hwcfg->ext_clk) {
+ dev_dbg(&client->dev, "assuming clock freq %u\n",
sensor->hwcfg->ext_clk);
- return rval;
+ } else {
+ dev_err(&client->dev, "unable to obtain clock freq\n");
+ return -EINVAL;
}
sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
@@ -3092,7 +3093,7 @@ static int smiapp_probe(struct i2c_client *client,
if (rval < 0)
goto out_media_entity_cleanup;
- rval = v4l2_async_register_subdev(&sensor->src->sd);
+ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
if (rval < 0)
goto out_media_entity_cleanup;
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index d6779e35d36f..145653dc81da 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -231,6 +231,9 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
len != SMIAPP_REG_32BIT) || flags)
return -EINVAL;
+ if (!sensor->active)
+ return 0;
+
msg.addr = client->addr;
msg.flags = 0; /* Write */
msg.len = 2 + len;
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index f74d695018b9..e6a5ab402d7f 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -206,6 +206,7 @@ struct smiapp_sensor {
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
u8 frame_skip;
+ bool active; /* is the sensor powered on? */
u16 embedded_start; /* embedded data start line */
u16 embedded_end;
u16 image_start; /* image data start line */
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index 0146d1f7aacb..c63948989688 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -335,8 +335,8 @@ static void ov9640_res_roundup(u32 *width, u32 *height)
{
int i;
enum { QQCIF, QQVGA, QCIF, QVGA, CIF, VGA, SXGA };
- int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
- int res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
+ static const int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
+ static const int res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
for (i = 0; i < ARRAY_SIZE(res_x); i++) {
if (res_x[i] >= *width && res_y[i] >= *height) {
@@ -675,12 +675,9 @@ static int ov9640_probe(struct i2c_client *client,
return -EINVAL;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov9640_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev,
- "Failed to allocate memory for private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9640_subdev_ops);
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index cc07b7ae5407..755de2289c39 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -935,11 +935,9 @@ static int ov9740_probe(struct i2c_client *client,
return -EINVAL;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov9740_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev, "Failed to allocate private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9740_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 13);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index e6f5c363ccab..2b8181469b93 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -39,6 +39,7 @@
#include <linux/workqueue.h>
#include <linux/v4l2-dv-timings.h>
#include <linux/hdmi.h>
+#include <media/cec.h>
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -63,6 +64,7 @@ MODULE_LICENSE("GPL");
#define I2C_MAX_XFER_SIZE (EDID_BLOCK_SIZE + 2)
+#define POLL_INTERVAL_CEC_MS 10
#define POLL_INTERVAL_MS 1000
static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
@@ -106,6 +108,8 @@ struct tc358743_state {
u8 csi_lanes_in_use;
struct gpio_desc *reset_gpio;
+
+ struct cec_adapter *cec_adap;
};
static void tc358743_enable_interrupts(struct v4l2_subdev *sd,
@@ -595,6 +599,7 @@ static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
struct tc358743_platform_data *pdata = &state->pdata;
u32 sys_freq;
u32 lockdet_ref;
+ u32 cec_freq;
u16 fh_min;
u16 fh_max;
@@ -626,6 +631,15 @@ static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
i2c_wr8_and_or(sd, NCO_F0_MOD, ~MASK_NCO_F0_MOD,
(pdata->refclk_hz == 27000000) ?
MASK_NCO_F0_MOD_27MHZ : 0x0);
+
+ /*
+ * Trial and error suggests that the default register value
+ * of 656 is for a 42 MHz reference clock. Use that to derive
+ * a new value based on the actual reference clock.
+ */
+ cec_freq = (656 * sys_freq) / 4200;
+ i2c_wr16(sd, CECHCLK, cec_freq);
+ i2c_wr16(sd, CECLCLK, cec_freq);
}
static void tc358743_set_csi_color_space(struct v4l2_subdev *sd)
@@ -814,11 +828,17 @@ static void tc358743_initial_setup(struct v4l2_subdev *sd)
struct tc358743_state *state = to_state(sd);
struct tc358743_platform_data *pdata = &state->pdata;
- /* CEC and IR are not supported by this driver */
- i2c_wr16_and_or(sd, SYSCTL, ~(MASK_CECRST | MASK_IRRST),
- (MASK_CECRST | MASK_IRRST));
+ /*
+ * IR is not supported by this driver.
+ * CEC is only enabled if needed.
+ */
+ i2c_wr16_and_or(sd, SYSCTL, ~(MASK_IRRST | MASK_CECRST),
+ (MASK_IRRST | MASK_CECRST));
tc358743_reset(sd, MASK_CTXRST | MASK_HDMIRST);
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ tc358743_reset(sd, MASK_CECRST);
+#endif
tc358743_sleep_mode(sd, false);
i2c_wr16(sd, FIFOCTL, pdata->fifo_level);
@@ -842,6 +862,133 @@ static void tc358743_initial_setup(struct v4l2_subdev *sd)
i2c_wr8(sd, VOUT_SET3, MASK_VOUT_EXTCNT);
}
+/* --------------- CEC --------------- */
+
+#ifdef CONFIG_VIDEO_TC358743_CEC
+static int tc358743_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+
+ i2c_wr32(sd, CECIMSK, enable ? MASK_CECTIM | MASK_CECRIM : 0);
+ i2c_wr32(sd, CECICLR, MASK_CECTICLR | MASK_CECRICLR);
+ i2c_wr32(sd, CECEN, enable);
+ if (enable)
+ i2c_wr32(sd, CECREN, MASK_CECREN);
+ return 0;
+}
+
+static int tc358743_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ u32 reg;
+
+ reg = i2c_rd32(sd, CECRCTL1);
+ if (enable)
+ reg |= MASK_CECOTH;
+ else
+ reg &= ~MASK_CECOTH;
+ i2c_wr32(sd, CECRCTL1, reg);
+ return 0;
+}
+
+static int tc358743_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int la = 0;
+
+ if (log_addr != CEC_LOG_ADDR_INVALID) {
+ la = i2c_rd32(sd, CECADD);
+ la |= 1 << log_addr;
+ }
+ i2c_wr32(sd, CECADD, la);
+ return 0;
+}
+
+static int tc358743_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int i;
+
+ i2c_wr32(sd, CECTCTL,
+ (cec_msg_is_broadcast(msg) ? MASK_CECBRD : 0) |
+ (signal_free_time - 1));
+ for (i = 0; i < msg->len; i++)
+ i2c_wr32(sd, CECTBUF1 + i * 4,
+ msg->msg[i] | ((i == msg->len - 1) ? MASK_CECTEOM : 0));
+ i2c_wr32(sd, CECTEN, MASK_CECTEN);
+ return 0;
+}
+
+static const struct cec_adap_ops tc358743_cec_adap_ops = {
+ .adap_enable = tc358743_cec_adap_enable,
+ .adap_log_addr = tc358743_cec_adap_log_addr,
+ .adap_transmit = tc358743_cec_adap_transmit,
+ .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
+};
+
+static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+ bool *handled)
+{
+ struct tc358743_state *state = to_state(sd);
+ unsigned int cec_rxint, cec_txint;
+ unsigned int clr = 0;
+
+ cec_rxint = i2c_rd32(sd, CECRSTAT);
+ cec_txint = i2c_rd32(sd, CECTSTAT);
+
+ if (intstatus & MASK_CEC_RINT)
+ clr |= MASK_CECRICLR;
+ if (intstatus & MASK_CEC_TINT)
+ clr |= MASK_CECTICLR;
+ i2c_wr32(sd, CECICLR, clr);
+
+ if ((intstatus & MASK_CEC_TINT) && cec_txint) {
+ if (cec_txint & MASK_CECTIEND)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_OK);
+ else if (cec_txint & MASK_CECTIAL)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_ARB_LOST);
+ else if (cec_txint & MASK_CECTIACK)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_NACK);
+ else if (cec_txint & MASK_CECTIUR) {
+ /*
+ * Not sure when this bit is set. Treat
+ * it as an error for now.
+ */
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_ERROR);
+ }
+ *handled = true;
+ }
+ if ((intstatus & MASK_CEC_RINT) &&
+ (cec_rxint & MASK_CECRIEND)) {
+ struct cec_msg msg = {};
+ unsigned int i;
+ unsigned int v;
+
+ v = i2c_rd32(sd, CECRCTR);
+ msg.len = v & 0x1f;
+ for (i = 0; i < msg.len; i++) {
+ v = i2c_rd32(sd, CECRBUF1 + i * 4);
+ msg.msg[i] = v & 0xff;
+ }
+ cec_received_msg(state->cec_adap, &msg);
+ *handled = true;
+ }
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+}
+
+#endif
+
/* --------------- IRQ --------------- */
static void tc358743_format_change(struct v4l2_subdev *sd)
@@ -1296,6 +1443,15 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
intstatus &= ~MASK_HDMI_INT;
}
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
+ tc358743_cec_isr(sd, intstatus, handled);
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+ intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
+ }
+#endif
+
if (intstatus & MASK_CSI_INT) {
u32 csi_int = i2c_rd32(sd, CSI_INT);
@@ -1325,13 +1481,18 @@ static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static void tc358743_irq_poll_timer(unsigned long arg)
+static void tc358743_irq_poll_timer(struct timer_list *t)
{
- struct tc358743_state *state = (struct tc358743_state *)arg;
+ struct tc358743_state *state = from_timer(state, t, timer);
+ unsigned int msecs;
schedule_work(&state->work_i2c_poll);
-
- mod_timer(&state->timer, jiffies + msecs_to_jiffies(POLL_INTERVAL_MS));
+ /*
+ * If CEC is present, then we need to poll more frequently,
+ * otherwise we will miss CEC messages.
+ */
+ msecs = state->cec_adap ? POLL_INTERVAL_CEC_MS : POLL_INTERVAL_MS;
+ mod_timer(&state->timer, jiffies + msecs_to_jiffies(msecs));
}
static void tc358743_work_i2c_poll(struct work_struct *work)
@@ -1488,7 +1649,7 @@ static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
{
enable_stream(sd, enable);
if (!enable) {
- /* Put all lanes in PL-11 state (STOPSTATE) */
+ /* Put all lanes in LP-11 state (STOPSTATE) */
tc358743_set_csi(sd);
}
@@ -1621,6 +1782,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
{
struct tc358743_state *state = to_state(sd);
u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+ u16 pa;
+ int err;
int i;
v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
@@ -1638,6 +1801,12 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
edid->blocks = EDID_NUM_BLOCKS_MAX;
return -E2BIG;
}
+ pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
+ err = cec_phys_addr_validate(pa, &pa, NULL);
+ if (err)
+ return err;
+
+ cec_phys_addr_invalidate(state->cec_adap);
tc358743_disable_edid(sd);
@@ -1654,6 +1823,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
state->edid_blocks_written = edid->blocks;
+ cec_s_phys_addr(state->cec_adap, pa, false);
+
if (tx_5v_power_present(sd))
tc358743_enable_edid(sd);
@@ -1770,6 +1941,11 @@ static int tc358743_probe_of(struct tc358743_state *state)
goto free_endpoint;
}
+ if (endpoint->bus.mipi_csi2.num_data_lanes > 4) {
+ dev_err(dev, "invalid number of lanes\n");
+ goto free_endpoint;
+ }
+
state->bus = endpoint->bus.mipi_csi2;
ret = clk_prepare_enable(refclk);
@@ -1867,6 +2043,7 @@ static int tc358743_probe(struct i2c_client *client,
struct tc358743_state *state;
struct tc358743_platform_data *pdata = client->dev.platform_data;
struct v4l2_subdev *sd;
+ u16 irq_mask = MASK_HDMI_MSK | MASK_CSI_MSK;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -1929,6 +2106,7 @@ static int tc358743_probe(struct i2c_client *client,
}
state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err < 0)
goto err_hdl;
@@ -1945,6 +2123,17 @@ static int tc358743_probe(struct i2c_client *client,
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
tc358743_delayed_work_enable_hotplug);
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ state->cec_adap = cec_allocate_adapter(&tc358743_cec_adap_ops,
+ state, dev_name(&client->dev),
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL, CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(state->cec_adap)) {
+ err = PTR_ERR(state->cec_adap);
+ goto err_hdl;
+ }
+ irq_mask |= MASK_CEC_RMSK | MASK_CEC_TMSK;
+#endif
+
tc358743_initial_setup(sd);
tc358743_s_dv_timings(sd, &default_timing);
@@ -1964,15 +2153,22 @@ static int tc358743_probe(struct i2c_client *client,
} else {
INIT_WORK(&state->work_i2c_poll,
tc358743_work_i2c_poll);
- state->timer.data = (unsigned long)state;
- state->timer.function = tc358743_irq_poll_timer;
+ timer_setup(&state->timer, tc358743_irq_poll_timer, 0);
state->timer.expires = jiffies +
msecs_to_jiffies(POLL_INTERVAL_MS);
add_timer(&state->timer);
}
+ err = cec_register_adapter(state->cec_adap, &client->dev);
+ if (err < 0) {
+ pr_err("%s: failed to register the cec device\n", __func__);
+ cec_delete_adapter(state->cec_adap);
+ state->cec_adap = NULL;
+ goto err_work_queues;
+ }
+
tc358743_enable_interrupts(sd, tx_5v_power_present(sd));
- i2c_wr16(sd, INTMASK, ~(MASK_HDMI_MSK | MASK_CSI_MSK) & 0xffff);
+ i2c_wr16(sd, INTMASK, ~irq_mask);
err = v4l2_ctrl_handler_setup(sd->ctrl_handler);
if (err)
@@ -1984,6 +2180,7 @@ static int tc358743_probe(struct i2c_client *client,
return 0;
err_work_queues:
+ cec_unregister_adapter(state->cec_adap);
if (!state->i2c_client->irq)
flush_work(&state->work_i2c_poll);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
@@ -2004,6 +2201,7 @@ static int tc358743_remove(struct i2c_client *client)
flush_work(&state->work_i2c_poll);
}
cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cec_unregister_adapter(state->cec_adap);
v4l2_async_unregister_subdev(sd);
v4l2_device_unregister_subdev(sd);
mutex_destroy(&state->confctl_mutex);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
index 657ef50f215f..227b46471793 100644
--- a/drivers/media/i2c/tc358743_regs.h
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -193,8 +193,98 @@
#define CSI_START 0x0518
#define MASK_STRT 0x00000001
-#define CECEN 0x0600
-#define MASK_CECEN 0x0001
+/* *** CEC (32 bit) *** */
+#define CECHCLK 0x0028 /* 16 bits */
+#define MASK_CECHCLK (0x7ff << 0)
+
+#define CECLCLK 0x002a /* 16 bits */
+#define MASK_CECLCLK (0x7ff << 0)
+
+#define CECEN 0x0600
+#define MASK_CECEN 0x0001
+
+#define CECADD 0x0604
+#define CECRST 0x0608
+#define MASK_CECRESET 0x0001
+
+#define CECREN 0x060c
+#define MASK_CECREN 0x0001
+
+#define CECRCTL1 0x0614
+#define MASK_CECACKDIS (1 << 24)
+#define MASK_CECHNC (3 << 20)
+#define MASK_CECLNC (7 << 16)
+#define MASK_CECMIN (7 << 12)
+#define MASK_CECMAX (7 << 8)
+#define MASK_CECDAT (7 << 4)
+#define MASK_CECTOUT (3 << 2)
+#define MASK_CECRIHLD (1 << 1)
+#define MASK_CECOTH (1 << 0)
+
+#define CECRCTL2 0x0618
+#define MASK_CECSWAV3 (7 << 12)
+#define MASK_CECSWAV2 (7 << 8)
+#define MASK_CECSWAV1 (7 << 4)
+#define MASK_CECSWAV0 (7 << 0)
+
+#define CECRCTL3 0x061c
+#define MASK_CECWAV3 (7 << 20)
+#define MASK_CECWAV2 (7 << 16)
+#define MASK_CECWAV1 (7 << 12)
+#define MASK_CECWAV0 (7 << 8)
+#define MASK_CECACKEI (1 << 4)
+#define MASK_CECMINEI (1 << 3)
+#define MASK_CECMAXEI (1 << 2)
+#define MASK_CECRSTEI (1 << 1)
+#define MASK_CECWAVEI (1 << 0)
+
+#define CECTEN 0x0620
+#define MASK_CECTBUSY (1 << 1)
+#define MASK_CECTEN (1 << 0)
+
+#define CECTCTL 0x0628
+#define MASK_CECSTRS (7 << 20)
+#define MASK_CECSPRD (7 << 16)
+#define MASK_CECDTRS (7 << 12)
+#define MASK_CECDPRD (15 << 8)
+#define MASK_CECBRD (1 << 4)
+#define MASK_CECFREE (15 << 0)
+
+#define CECRSTAT 0x062c
+#define MASK_CECRIWA (1 << 6)
+#define MASK_CECRIOR (1 << 5)
+#define MASK_CECRIACK (1 << 4)
+#define MASK_CECRIMIN (1 << 3)
+#define MASK_CECRIMAX (1 << 2)
+#define MASK_CECRISTA (1 << 1)
+#define MASK_CECRIEND (1 << 0)
+
+#define CECTSTAT 0x0630
+#define MASK_CECTIUR (1 << 4)
+#define MASK_CECTIACK (1 << 3)
+#define MASK_CECTIAL (1 << 2)
+#define MASK_CECTIEND (1 << 1)
+
+#define CECRBUF1 0x0634
+#define MASK_CECRACK (1 << 9)
+#define MASK_CECEOM (1 << 8)
+#define MASK_CECRBYTE (0xff << 0)
+
+#define CECTBUF1 0x0674
+#define MASK_CECTEOM (1 << 8)
+#define MASK_CECTBYTE (0xff << 0)
+
+#define CECRCTR 0x06b4
+#define MASK_CECRCTR (0x1f << 0)
+
+#define CECIMSK 0x06c0
+#define MASK_CECTIM (1 << 1)
+#define MASK_CECRIM (1 << 0)
+
+#define CECICLR 0x06cc
+#define MASK_CECTICLR (1 << 1)
+#define MASK_CECRICLR (1 << 0)
+
#define HDMI_INT0 0x8500
#define MASK_I_KEY 0x80
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index ce86534450ac..16a1e08ce06c 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -300,9 +300,9 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
* if available, ...
*/
-static void chip_thread_wake(unsigned long data)
+static void chip_thread_wake(struct timer_list *t)
{
- struct CHIPSTATE *chip = (struct CHIPSTATE*)data;
+ struct CHIPSTATE *chip = from_timer(chip, t, wt);
wake_up_process(chip->thread);
}
@@ -1995,7 +1995,7 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_ctrl_handler_setup(&chip->hdl);
chip->thread = NULL;
- init_timer(&chip->wt);
+ timer_setup(&chip->wt, chip_thread_wake, 0);
if (desc->flags & CHIP_NEED_CHECKMODE) {
if (!desc->getrxsubchans || !desc->setaudmode) {
/* This shouldn't be happen. Warn user, but keep working
@@ -2005,8 +2005,6 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
return 0;
}
/* start async thread */
- chip->wt.function = chip_thread_wake;
- chip->wt.data = (unsigned long)chip;
chip->thread = kthread_run(chip_thread, chip, "%s",
client->name);
if (IS_ERR(chip->thread)) {
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 2ace0410d277..f7c6d64e6031 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -214,12 +214,20 @@ void media_gobj_destroy(struct media_gobj *gobj)
gobj->mdev = NULL;
}
+/*
+ * TODO: Get rid of this.
+ */
+#define MEDIA_ENTITY_MAX_PADS 512
+
int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
struct media_pad *pads)
{
struct media_device *mdev = entity->graph_obj.mdev;
unsigned int i;
+ if (num_pads >= MEDIA_ENTITY_MAX_PADS)
+ return -E2BIG;
+
entity->num_pads = num_pads;
entity->pads = pads;
@@ -280,11 +288,6 @@ static struct media_entity *stack_pop(struct media_graph *graph)
#define link_top(en) ((en)->stack[(en)->top].link)
#define stack_top(en) ((en)->stack[(en)->top].entity)
-/*
- * TODO: Get rid of this.
- */
-#define MEDIA_ENTITY_MAX_PADS 512
-
/**
* media_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
diff --git a/drivers/media/pci/b2c2/Kconfig b/drivers/media/pci/b2c2/Kconfig
index 58761a21caa0..7b818d445f39 100644
--- a/drivers/media/pci/b2c2/Kconfig
+++ b/drivers/media/pci/b2c2/Kconfig
@@ -11,5 +11,5 @@ config DVB_B2C2_FLEXCOP_PCI_DEBUG
depends on DVB_B2C2_FLEXCOP_PCI
select DVB_B2C2_FLEXCOP_DEBUG
help
- Say Y if you want to enable the module option to control debug messages
- of all B2C2 FlexCop drivers.
+ Say Y if you want to enable the module option to control debug messages
+ of all B2C2 FlexCop drivers.
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 227086a2e99c..b366a7e1d976 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3652,9 +3652,9 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
wake_up(&wakeup->vb.done);
}
-static void bttv_irq_timeout(unsigned long data)
+static void bttv_irq_timeout(struct timer_list *t)
{
- struct bttv *btv = (struct bttv *)data;
+ struct bttv *btv = from_timer(btv, t, timeout);
struct bttv_buffer_set old,new;
struct bttv_buffer *ovbi;
struct bttv_buffer *item;
@@ -4043,7 +4043,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
INIT_LIST_HEAD(&btv->capture);
INIT_LIST_HEAD(&btv->vcapture);
- setup_timer(&btv->timeout, bttv_irq_timeout, (unsigned long)btv);
+ timer_setup(&btv->timeout, bttv_irq_timeout, 0);
btv->i2c_rc = -1;
btv->tuner_type = UNSET;
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 73d655d073d6..ac7674700685 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -133,10 +133,10 @@ void bttv_input_irq(struct bttv *btv)
ir_handle_key(btv);
}
-static void bttv_input_timer(unsigned long data)
+static void bttv_input_timer(struct timer_list *t)
{
- struct bttv *btv = (struct bttv*)data;
- struct bttv_ir *ir = btv->remote;
+ struct bttv_ir *ir = from_timer(ir, t, timer);
+ struct bttv *btv = ir->btv;
if (btv->c.type == BTTV_BOARD_ENLTV_FM_2)
ir_enltv_handle_key(btv);
@@ -189,9 +189,9 @@ static u32 bttv_rc5_decode(unsigned int code)
return rc5;
}
-static void bttv_rc5_timer_end(unsigned long data)
+static void bttv_rc5_timer_end(struct timer_list *t)
{
- struct bttv_ir *ir = (struct bttv_ir *)data;
+ struct bttv_ir *ir = from_timer(ir, t, timer);
ktime_t tv;
u32 gap, rc5, scancode;
u8 toggle, command, system;
@@ -296,15 +296,15 @@ static int bttv_rc5_irq(struct bttv *btv)
/* ---------------------------------------------------------------------- */
-static void bttv_ir_start(struct bttv *btv, struct bttv_ir *ir)
+static void bttv_ir_start(struct bttv_ir *ir)
{
if (ir->polling) {
- setup_timer(&ir->timer, bttv_input_timer, (unsigned long)btv);
+ timer_setup(&ir->timer, bttv_input_timer, 0);
ir->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&ir->timer);
} else if (ir->rc5_gpio) {
/* set timer_end for code completion */
- setup_timer(&ir->timer, bttv_rc5_timer_end, (unsigned long)ir);
+ timer_setup(&ir->timer, bttv_rc5_timer_end, 0);
ir->shift_by = 1;
ir->rc5_remote_gap = ir_rc5_remote_gap;
}
@@ -531,6 +531,7 @@ int bttv_input_init(struct bttv *btv)
/* init input device */
ir->dev = rc;
+ ir->btv = btv;
snprintf(ir->name, sizeof(ir->name), "bttv IR (card=%d)",
btv->c.type);
@@ -553,7 +554,7 @@ int bttv_input_init(struct bttv *btv)
rc->driver_name = MODULE_NAME;
btv->remote = ir;
- bttv_ir_start(btv, ir);
+ bttv_ir_start(ir);
/* all done */
err = rc_register_device(rc);
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index e77129c92fa0..67c6583f1d79 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -233,7 +233,7 @@ static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer
bttv_dma_free(q,fh->btv,buf);
}
-struct videobuf_queue_ops bttv_vbi_qops = {
+const struct videobuf_queue_ops bttv_vbi_qops = {
.buf_setup = vbi_buffer_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 9efc4559fa8e..cb1b5e611130 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -122,6 +122,7 @@ struct bttv_format {
struct bttv_ir {
struct rc_dev *dev;
+ struct bttv *btv;
struct timer_list timer;
char name[32];
@@ -281,7 +282,7 @@ int bttv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
-extern struct videobuf_queue_ops bttv_vbi_qops;
+extern const struct videobuf_queue_ops bttv_vbi_qops;
/* ---------------------------------------------------------- */
/* bttv-gpio.c */
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 98b6cb9505d1..3f16cf3f6d74 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -738,9 +738,6 @@ static int cobalt_probe(struct pci_dev *pci_dev,
goto err_i2c;
}
- retval = v4l2_device_register_subdev_nodes(&cobalt->v4l2_dev);
- if (retval)
- goto err_i2c;
retval = cobalt_nodes_register(cobalt);
if (retval) {
cobalt_err("Error %d registering device nodes\n", retval);
@@ -767,8 +764,6 @@ err_pci:
err_wq:
destroy_workqueue(cobalt->irq_work_queues);
err:
- if (retval == 0)
- retval = -ENODEV;
cobalt_err("error %d on initialization\n", retval);
v4l2_device_unregister(&cobalt->v4l2_dev);
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 8654710464cc..8f314ca320c7 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -255,7 +255,7 @@ static void request_module_async(struct work_struct *work)
request_module("cx18-alsa");
/* Initialize cx18-alsa for this instance of the cx18 device */
- if (cx18_ext_init != NULL)
+ if (cx18_ext_init)
cx18_ext_init(dev);
}
@@ -291,11 +291,11 @@ int cx18_msleep_timeout(unsigned int msecs, int intr)
/* Release ioremapped memory */
static void cx18_iounmap(struct cx18 *cx)
{
- if (cx == NULL)
+ if (!cx)
return;
/* Release io memory */
- if (cx->enc_mem != NULL) {
+ if (cx->enc_mem) {
CX18_DEBUG_INFO("releasing enc_mem\n");
iounmap(cx->enc_mem);
cx->enc_mem = NULL;
@@ -649,15 +649,15 @@ static void cx18_process_options(struct cx18 *cx)
CX18_INFO("User specified %s card\n", cx->card->name);
else if (cx->options.cardtype != 0)
CX18_ERR("Unknown user specified type, trying to autodetect card\n");
- if (cx->card == NULL) {
+ if (!cx->card) {
if (cx->pci_dev->subsystem_vendor == CX18_PCI_ID_HAUPPAUGE) {
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
CX18_INFO("Autodetected Hauppauge card\n");
}
}
- if (cx->card == NULL) {
+ if (!cx->card) {
for (i = 0; (cx->card = cx18_get_card(i)); i++) {
- if (cx->card->pci_list == NULL)
+ if (!cx->card->pci_list)
continue;
for (j = 0; cx->card->pci_list[j].device; j++) {
if (cx->pci_dev->device !=
@@ -676,7 +676,7 @@ static void cx18_process_options(struct cx18 *cx)
}
done:
- if (cx->card == NULL) {
+ if (!cx->card) {
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
CX18_ERR("Unknown card: vendor/device: [%04x:%04x]\n",
cx->pci_dev->vendor, cx->pci_dev->device);
@@ -698,7 +698,7 @@ static int cx18_create_in_workq(struct cx18 *cx)
snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
cx->v4l2_dev.name);
cx->in_work_queue = alloc_ordered_workqueue("%s", 0, cx->in_workq_name);
- if (cx->in_work_queue == NULL) {
+ if (!cx->in_work_queue) {
CX18_ERR("Unable to create incoming mailbox handler thread\n");
return -ENOMEM;
}
@@ -909,12 +909,10 @@ static int cx18_probe(struct pci_dev *pci_dev,
return -ENOMEM;
}
- cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
- if (cx == NULL) {
- printk(KERN_ERR "cx18: cannot manage card %d, out of memory\n",
- i);
+ cx = kzalloc(sizeof(*cx), GFP_ATOMIC);
+ if (!cx)
return -ENOMEM;
- }
+
cx->pci_dev = pci_dev;
cx->instance = i;
@@ -1256,7 +1254,7 @@ static void cx18_cancel_out_work_orders(struct cx18 *cx)
{
int i;
for (i = 0; i < CX18_MAX_STREAMS; i++)
- if (&cx->streams[i].video_dev != NULL)
+ if (&cx->streams[i].video_dev)
cancel_work_sync(&cx->streams[i].out_work_order);
}
@@ -1301,7 +1299,7 @@ static void cx18_remove(struct pci_dev *pci_dev)
pci_disable_device(cx->pci_dev);
- if (cx->vbi.sliced_mpeg_data[0] != NULL)
+ if (cx->vbi.sliced_mpeg_data[0])
for (i = 0; i < CX18_VBI_FRAMES; i++)
kfree(cx->vbi.sliced_mpeg_data[i]);
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 98467b2089fa..4f9c2395941b 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -684,9 +684,9 @@ int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
}
-void cx18_vb_timeout(unsigned long data)
+void cx18_vb_timeout(struct timer_list *t)
{
- struct cx18_stream *s = (struct cx18_stream *)data;
+ struct cx18_stream *s = from_timer(s, t, vb_timeout);
struct cx18_videobuf_buffer *buf;
unsigned long flags;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 58b00b433708..37ef34e866cb 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -29,7 +29,7 @@ void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
void cx18_unmute(struct cx18 *cx);
int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma);
-void cx18_vb_timeout(unsigned long data);
+void cx18_vb_timeout(struct timer_list *t);
/* Shared with cx18-alsa module */
int cx18_claim_stream(struct cx18_open_id *id, int type);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 8385411af641..f35f78d66985 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -282,7 +282,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
INIT_WORK(&s->out_work_order, cx18_out_work_handler);
INIT_LIST_HEAD(&s->vb_capture);
- setup_timer(&s->vb_timeout, cx18_vb_timeout, (unsigned long)s);
+ timer_setup(&s->vb_timeout, cx18_vb_timeout, 0);
spin_lock_init(&s->vb_lock);
if (type == CX18_ENC_STREAM_TYPE_YUV) {
spin_lock_init(&s->vbuf_q_lock);
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 78a8836d03e4..28eab9c518c5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1323,7 +1323,7 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
static void tbs_card_init(struct cx23885_dev *dev)
{
int i;
- const u8 buf[] = {
+ static const u8 buf[] = {
0xe0, 0x06, 0x66, 0x33, 0x65,
0x01, 0x17, 0x06, 0xde};
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 0f21467ae88e..ef863492c0ac 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -270,7 +270,7 @@ static const struct i2c_adapter cx23885_i2c_adap_template = {
.algo = &cx23885_i2c_algo_template,
};
-static struct i2c_client cx23885_i2c_client_template = {
+static const struct i2c_client cx23885_i2c_client_template = {
.name = "cx23885 internal",
};
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 369e545cac04..70f9f13bded3 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -254,7 +254,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
}
-struct vb2_ops cx23885_vbi_qops = {
+const struct vb2_ops cx23885_vbi_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index cb714ab60d69..6aab713e0476 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -591,7 +591,7 @@ int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm);
extern int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f);
extern void cx23885_vbi_timeout(unsigned long data);
-extern struct vb2_ops cx23885_vbi_qops;
+extern const struct vb2_ops cx23885_vbi_qops;
extern int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status);
/* cx23885-i2c.c */
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 000049d3c71b..31479a41f359 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -291,7 +291,7 @@ static const struct i2c_adapter cx25821_i2c_adap_template = {
.algo = &cx25821_i2c_algo_template,
};
-static struct i2c_client cx25821_i2c_client_template = {
+static const struct i2c_client cx25821_i2c_client_template = {
.name = "cx25821 internal",
};
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index e02449bf2041..4e9953e61a12 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -593,11 +593,11 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_proto *protocol,
void cx88_i2c_init_ir(struct cx88_core *core)
{
struct i2c_board_info info;
- const unsigned short default_addr_list[] = {
+ static const unsigned short default_addr_list[] = {
0x18, 0x6b, 0x71,
I2C_CLIENT_END
};
- const unsigned short pvr2000_addr_list[] = {
+ static const unsigned short pvr2000_addr_list[] = {
0x18, 0x1a,
I2C_CLIENT_END
};
diff --git a/drivers/media/pci/ddbridge/ddbridge-io.h b/drivers/media/pci/ddbridge/ddbridge-io.h
index a4c6bbe09168..b3646c04f1a7 100644
--- a/drivers/media/pci/ddbridge/ddbridge-io.h
+++ b/drivers/media/pci/ddbridge/ddbridge-io.h
@@ -47,12 +47,12 @@ static inline void ddbwritel(struct ddb *dev, u32 val, u32 adr)
static inline void ddbcpyto(struct ddb *dev, u32 adr, void *src, long count)
{
- return memcpy_toio(dev->regs + adr, src, count);
+ memcpy_toio(dev->regs + adr, src, count);
}
static inline void ddbcpyfrom(struct ddb *dev, void *dst, u32 adr, long count)
{
- return memcpy_fromio(dst, dev->regs + adr, count);
+ memcpy_fromio(dst, dev->regs + adr, count);
}
static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr)
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 54dcac4b2229..6b2ffdc96961 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -770,8 +770,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
init_waitqueue_head(&itv->event_waitq);
init_waitqueue_head(&itv->vsync_waitq);
init_waitqueue_head(&itv->dma_waitq);
- setup_timer(&itv->dma_timer, ivtv_unfinished_dma,
- (unsigned long)itv);
+ timer_setup(&itv->dma_timer, ivtv_unfinished_dma, 0);
itv->cur_dma_stream = -1;
itv->cur_pio_stream = -1;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 5a35e366f4c0..893962ac85de 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -700,7 +700,7 @@ static const struct i2c_algo_bit_data ivtv_i2c_algo_template = {
.timeout = IVTV_ALGO_BIT_TIMEOUT * HZ, /* jiffies */
};
-static struct i2c_client ivtv_i2c_client_template = {
+static const struct i2c_client ivtv_i2c_client_template = {
.name = "ivtv internal",
};
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 6efe1f71262c..63b09bf73bf0 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -1074,9 +1074,9 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
return vsync_force ? IRQ_NONE : IRQ_HANDLED;
}
-void ivtv_unfinished_dma(unsigned long arg)
+void ivtv_unfinished_dma(struct timer_list *t)
{
- struct ivtv *itv = (struct ivtv *)arg;
+ struct ivtv *itv = from_timer(itv, t, dma_timer);
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
return;
diff --git a/drivers/media/pci/ivtv/ivtv-irq.h b/drivers/media/pci/ivtv/ivtv-irq.h
index 1e84433737cc..bcab5f07d37f 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.h
+++ b/drivers/media/pci/ivtv/ivtv-irq.h
@@ -48,6 +48,6 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id);
void ivtv_irq_work_handler(struct kthread_work *work);
void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock);
-void ivtv_unfinished_dma(unsigned long arg);
+void ivtv_unfinished_dma(struct timer_list *t);
#endif
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 11e987860b23..ed855e3df558 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -72,7 +72,7 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(mantis == NULL)) {
+ if (unlikely(!mantis)) {
dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
return IRQ_NONE;
}
@@ -161,11 +161,10 @@ static int hopper_pci_probe(struct pci_dev *pdev,
struct mantis_pci_drvdata *drvdata;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
- int err = 0;
+ int err;
- mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
- if (mantis == NULL) {
- printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ mantis = kzalloc(sizeof(*mantis), GFP_KERNEL);
+ if (!mantis) {
err = -ENOMEM;
goto fail0;
}
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index adc980d33711..4ce8a90d69dc 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -171,13 +171,11 @@ static int mantis_pci_probe(struct pci_dev *pdev,
struct mantis_pci_drvdata *drvdata;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
- int err = 0;
+ int err;
- mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
- if (mantis == NULL) {
- printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ mantis = kzalloc(sizeof(*mantis), GFP_KERNEL);
+ if (!mantis)
return -ENOMEM;
- }
drvdata = (void *)pci_id->driver_data;
mantis->num = devs;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 49e047e4a81e..23999a8cef37 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1626,35 +1626,31 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
meye.mchip_dev = pcidev;
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
- if (!meye.grab_temp) {
- v4l2_err(v4l2_dev, "grab buffer allocation failed\n");
+ if (!meye.grab_temp)
goto outvmalloc;
- }
spin_lock_init(&meye.grabq_lock);
if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
- GFP_KERNEL)) {
- v4l2_err(v4l2_dev, "fifo allocation failed\n");
+ GFP_KERNEL))
goto outkfifoalloc1;
- }
+
spin_lock_init(&meye.doneq_lock);
if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
- GFP_KERNEL)) {
- v4l2_err(v4l2_dev, "fifo allocation failed\n");
+ GFP_KERNEL))
goto outkfifoalloc2;
- }
meye.vdev = meye_template;
meye.vdev.v4l2_dev = &meye.v4l2_dev;
- ret = -EIO;
- if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
+ ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1);
+ if (ret) {
v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
v4l2_err(v4l2_dev, "meye: did you enable the camera in sonypi using the module options ?\n");
goto outsonypienable;
}
- if ((ret = pci_enable_device(meye.mchip_dev))) {
+ ret = pci_enable_device(meye.mchip_dev);
+ if (ret) {
v4l2_err(v4l2_dev, "meye: pci_enable_device failed\n");
goto outenabledev;
}
diff --git a/drivers/media/pci/netup_unidvb/Kconfig b/drivers/media/pci/netup_unidvb/Kconfig
index 0ad37714c7fd..b663154d0cc4 100644
--- a/drivers/media/pci/netup_unidvb/Kconfig
+++ b/drivers/media/pci/netup_unidvb/Kconfig
@@ -1,8 +1,8 @@
config DVB_NETUP_UNIDVB
tristate "NetUP Universal DVB card support"
depends on DVB_CORE && VIDEO_DEV && PCI && I2C && SPI_MASTER
- select VIDEOBUF2_DVB
- select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DVB
+ select VIDEOBUF2_VMALLOC
select DVB_HORUS3A if MEDIA_SUBDRV_AUTOSELECT
select DVB_ASCOT2E if MEDIA_SUBDRV_AUTOSELECT
select DVB_HELENE if MEDIA_SUBDRV_AUTOSELECT
@@ -10,8 +10,8 @@ config DVB_NETUP_UNIDVB
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
---help---
Support for NetUP PCI express Universal DVB card.
- help
- Say Y when you want to support NetUP Dual Universal DVB card
- Card can receive two independent streams in following standards:
+
+ Say Y when you want to support NetUP Dual Universal DVB card.
+ Card can receive two independent streams in following standards:
DVB-S/S2, T/T2, C/C2
- Two CI slots available for CAM modules.
+ Two CI slots available for CAM modules.
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 60e6cd5b3a03..11829c0fa138 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -638,9 +638,9 @@ static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
spin_unlock_irqrestore(&dma->lock, flags);
}
-static void netup_unidvb_dma_timeout(unsigned long data)
+static void netup_unidvb_dma_timeout(struct timer_list *t)
{
- struct netup_dma *dma = (struct netup_dma *)data;
+ struct netup_dma *dma = from_timer(dma, t, timeout);
struct netup_unidvb_dev *ndev = dma->ndev;
dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
@@ -664,8 +664,7 @@ static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
spin_lock_init(&dma->lock);
INIT_WORK(&dma->work, netup_unidvb_dma_worker);
INIT_LIST_HEAD(&dma->free_buffers);
- setup_timer(&dma->timeout, netup_unidvb_dma_timeout,
- (unsigned long)dma);
+ timer_setup(&dma->timeout, netup_unidvb_dma_timeout, 0);
dma->ring_buffer_size = ndev->dma_size / 2;
dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 7976c5a12ca8..9e76de2411ae 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -338,9 +338,9 @@ void saa7134_buffer_next(struct saa7134_dev *dev,
}
}
-void saa7134_buffer_timeout(unsigned long data)
+void saa7134_buffer_timeout(struct timer_list *t)
{
- struct saa7134_dmaqueue *q = (struct saa7134_dmaqueue *)data;
+ struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
struct saa7134_dev *dev = q->dev;
unsigned long flags;
@@ -378,7 +378,7 @@ void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
}
}
spin_unlock_irqrestore(&dev->slock, flags);
- saa7134_buffer_timeout((unsigned long)q); /* also calls del_timer(&q->timeout) */
+ saa7134_buffer_timeout(&q->timeout); /* also calls del_timer(&q->timeout) */
}
EXPORT_SYMBOL_GPL(saa7134_stop_streaming);
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 8f2ed632840f..cf1e526de56a 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -345,7 +345,7 @@ static const struct i2c_adapter saa7134_adap_template = {
.algo = &saa7134_algo,
};
-static struct i2c_client saa7134_client_template = {
+static const struct i2c_client saa7134_client_template = {
.name = "saa7134 internal",
};
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 9337e4615519..2d5abeddc079 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -447,10 +447,10 @@ void saa7134_input_irq(struct saa7134_dev *dev)
}
}
-static void saa7134_input_timer(unsigned long data)
+static void saa7134_input_timer(struct timer_list *t)
{
- struct saa7134_dev *dev = (struct saa7134_dev *)data;
- struct saa7134_card_ir *ir = dev->remote;
+ struct saa7134_card_ir *ir = from_timer(ir, t, timer);
+ struct saa7134_dev *dev = ir->dev->priv;
build_key(dev);
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
@@ -507,8 +507,7 @@ static int __saa7134_ir_start(void *priv)
ir->running = true;
if (ir->polling) {
- setup_timer(&ir->timer, saa7134_input_timer,
- (unsigned long)dev);
+ timer_setup(&ir->timer, saa7134_input_timer, 0);
ir->timer.expires = jiffies + HZ;
add_timer(&ir->timer);
}
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 7414878af9e0..2be703617e29 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -223,8 +223,7 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
dev->ts.nr_packets = ts_nr_packets;
INIT_LIST_HEAD(&dev->ts_q.queue);
- setup_timer(&dev->ts_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->ts_q));
+ timer_setup(&dev->ts_q.timeout, saa7134_buffer_timeout, 0);
dev->ts_q.dev = dev;
dev->ts_q.need_two = 1;
dev->ts_started = 0;
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index bcad9b2d9bb3..57bea543c39b 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -165,7 +165,7 @@ static int buffer_init(struct vb2_buffer *vb2)
return 0;
}
-struct vb2_ops saa7134_vbi_qops = {
+const struct vb2_ops saa7134_vbi_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
@@ -181,8 +181,7 @@ struct vb2_ops saa7134_vbi_qops = {
int saa7134_vbi_init1(struct saa7134_dev *dev)
{
INIT_LIST_HEAD(&dev->vbi_q.queue);
- setup_timer(&dev->vbi_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->vbi_q));
+ timer_setup(&dev->vbi_q.timeout, saa7134_buffer_timeout, 0);
dev->vbi_q.dev = dev;
if (vbibufs < 2)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 51d42bbf969e..82d2a24644e4 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2145,8 +2145,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
dev->automute = 0;
INIT_LIST_HEAD(&dev->video_q.queue);
- setup_timer(&dev->video_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->video_q));
+ timer_setup(&dev->video_q.timeout, saa7134_buffer_timeout, 0);
dev->video_q.dev = dev;
dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
dev->width = 720;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 816b5282d671..39c36e6aefbe 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -773,7 +773,7 @@ int saa7134_buffer_queue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
void saa7134_buffer_finish(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
unsigned int state);
void saa7134_buffer_next(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
-void saa7134_buffer_timeout(unsigned long data);
+void saa7134_buffer_timeout(struct timer_list *t);
void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
int saa7134_set_dmabits(struct saa7134_dev *dev);
@@ -870,7 +870,7 @@ int saa7134_ts_stop(struct saa7134_dev *dev);
/* ----------------------------------------------------------- */
/* saa7134-vbi.c */
-extern struct vb2_ops saa7134_vbi_qops;
+extern const struct vb2_ops saa7134_vbi_qops;
extern struct video_device saa7134_vbi_template;
int saa7134_vbi_init1(struct saa7134_dev *dev);
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index f708cab01fef..d31a2d4494d1 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -260,11 +260,10 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
DEB_EE("\n");
- hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
- if (NULL == hexium) {
- pr_err("not enough kernel memory in hexium_attach()\n");
+ hexium = kzalloc(sizeof(*hexium), GFP_KERNEL);
+ if (!hexium)
return -ENOMEM;
- }
+
dev->ext_priv = hexium;
/* enable i2c-port pins */
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index 01f01580c7ca..043318aa19e2 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -219,11 +219,9 @@ static int hexium_probe(struct saa7146_dev *dev)
return -EFAULT;
}
- hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
- if (NULL == hexium) {
- pr_err("hexium_probe: not enough kernel memory\n");
+ hexium = kzalloc(sizeof(*hexium), GFP_KERNEL);
+ if (!hexium)
return -ENOMEM;
- }
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
@@ -268,7 +266,9 @@ static int hexium_probe(struct saa7146_dev *dev)
/* check if this is an old hexium Orion card by looking at
a saa7110 at address 0x4e */
- if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) {
+ err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ,
+ 0x00, I2C_SMBUS_BYTE_DATA, &data);
+ if (err == 0) {
pr_info("device is a Hexium HV-PCI6/Orion (old)\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index a0d2129c6ca9..c83b2e914dcb 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -98,11 +98,9 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
goto ret;
}
- buf = kzalloc(sizeof(struct saa7164_buffer), GFP_KERNEL);
- if (!buf) {
- log_warn("%s() SAA_ERR_NO_RESOURCES\n", __func__);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
goto ret;
- }
buf->idx = -1;
buf->port = port;
@@ -283,7 +281,7 @@ struct saa7164_user_buffer *saa7164_buffer_alloc_user(struct saa7164_dev *dev,
{
struct saa7164_user_buffer *buf;
- buf = kzalloc(sizeof(struct saa7164_user_buffer), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return NULL;
diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c
index 4bcde7c79dc3..6d13cbb9d010 100644
--- a/drivers/media/pci/saa7164/saa7164-i2c.c
+++ b/drivers/media/pci/saa7164/saa7164-i2c.c
@@ -84,7 +84,7 @@ static const struct i2c_adapter saa7164_i2c_adap_template = {
.algo = &saa7164_i2c_algo_template,
};
-static struct i2c_client saa7164_i2c_client_template = {
+static const struct i2c_client saa7164_i2c_client_template = {
.name = "saa7164 internal",
};
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index f46947d8adf8..6d415bdeef18 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -347,9 +347,9 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
static inline void print_time(char *s)
{
#ifdef DEBUG_TIMING
- struct timeval tv;
- do_gettimeofday(&tv);
- printk("%s: %d.%d\n", s, (int)tv.tv_sec, (int)tv.tv_usec);
+ struct timespec64 ts;
+ ktime_get_real_ts64(&ts);
+ printk("%s: %lld.%09ld\n", s, (s64)ts.tv_sec, ts.tv_nsec);
#endif
}
@@ -1224,7 +1224,7 @@ static int budget_start_feed(struct dvb_demux_feed *feed)
dprintk(2, "av7110: %p\n", budget);
spin_lock(&budget->feedlock1);
- feed->pusi_seen = 0; /* have a clean section start */
+ feed->pusi_seen = false; /* have a clean section start */
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock1);
return status;
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index cd09fd6e6548..cbb150d6cbb1 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -94,7 +94,7 @@ struct infrared {
u8 inversion;
u16 last_key;
u16 last_toggle;
- u8 delay_timer_finished;
+ bool keypressed;
};
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index ca05198de2c2..ee414803e6b5 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -84,15 +84,16 @@ static u16 default_key_map [256] = {
/* key-up timer */
-static void av7110_emit_keyup(unsigned long parm)
+static void av7110_emit_keyup(struct timer_list *t)
{
- struct infrared *ir = (struct infrared *) parm;
+ struct infrared *ir = from_timer(ir, t, keyup_timer);
- if (!ir || !test_bit(ir->last_key, ir->input_dev->key))
+ if (!ir || !ir->keypressed)
return;
input_report_key(ir->input_dev, ir->last_key, 0);
input_sync(ir->input_dev);
+ ir->keypressed = false;
}
@@ -152,29 +153,18 @@ static void av7110_emit_key(unsigned long parm)
return;
}
- if (timer_pending(&ir->keyup_timer)) {
- del_timer(&ir->keyup_timer);
- if (ir->last_key != keycode || toggle != ir->last_toggle) {
- ir->delay_timer_finished = 0;
- input_event(ir->input_dev, EV_KEY, ir->last_key, 0);
- input_event(ir->input_dev, EV_KEY, keycode, 1);
- input_sync(ir->input_dev);
- } else if (ir->delay_timer_finished) {
- input_event(ir->input_dev, EV_KEY, keycode, 2);
- input_sync(ir->input_dev);
- }
- } else {
- ir->delay_timer_finished = 0;
- input_event(ir->input_dev, EV_KEY, keycode, 1);
- input_sync(ir->input_dev);
- }
+ if (ir->keypressed &&
+ (ir->last_key != keycode || toggle != ir->last_toggle))
+ input_event(ir->input_dev, EV_KEY, ir->last_key, 0);
+ input_event(ir->input_dev, EV_KEY, keycode, 1);
+ input_sync(ir->input_dev);
+
+ ir->keypressed = true;
ir->last_key = keycode;
ir->last_toggle = toggle;
- ir->keyup_timer.expires = jiffies + UP_TIMEOUT;
- add_timer(&ir->keyup_timer);
-
+ mod_timer(&ir->keyup_timer, jiffies + UP_TIMEOUT);
}
@@ -204,16 +194,6 @@ static void input_register_keys(struct infrared *ir)
ir->input_dev->keycodemax = ARRAY_SIZE(ir->key_map);
}
-
-/* called by the input driver after rep[REP_DELAY] ms */
-static void input_repeat_key(unsigned long parm)
-{
- struct infrared *ir = (struct infrared *) parm;
-
- ir->delay_timer_finished = 1;
-}
-
-
/* check for configuration changes */
int av7110_check_ir_config(struct av7110 *av7110, int force)
{
@@ -333,8 +313,7 @@ int av7110_ir_init(struct av7110 *av7110)
av_list[av_cnt++] = av7110;
av7110_check_ir_config(av7110, true);
- setup_timer(&av7110->ir.keyup_timer, av7110_emit_keyup,
- (unsigned long)&av7110->ir);
+ timer_setup(&av7110->ir.keyup_timer, av7110_emit_keyup, 0);
input_dev = input_allocate_device();
if (!input_dev)
@@ -365,8 +344,13 @@ int av7110_ir_init(struct av7110 *av7110)
input_free_device(input_dev);
return err;
}
- input_dev->timer.function = input_repeat_key;
- input_dev->timer.data = (unsigned long) &av7110->ir;
+
+ /*
+ * Input core's default autorepeat is 33 cps with 250 msec
+ * delay, let's adjust to numbers more suitable for remote
+ * control.
+ */
+ input_enable_softrepeat(input_dev, 250, 125);
if (av_cnt == 1) {
e = proc_create("av7110_ir", S_IWUSR, NULL, &av7110_ir_proc_fops);
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 97499b2af714..b3dc45b91101 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -330,7 +330,7 @@ static int budget_start_feed(struct dvb_demux_feed *feed)
return -EINVAL;
spin_lock(&budget->feedlock);
- feed->pusi_seen = 0; /* have a clean section start */
+ feed->pusi_seen = false; /* have a clean section start */
if (budget->feeding++ == 0)
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock);
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index 336e2f9bc1b6..7fb3f07bf022 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -72,12 +72,12 @@ static const char *dma_mode_name(unsigned int mode)
}
}
-static int tw686x_dma_mode_get(char *buffer, struct kernel_param *kp)
+static int tw686x_dma_mode_get(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%s", dma_mode_name(dma_mode));
}
-static int tw686x_dma_mode_set(const char *val, struct kernel_param *kp)
+static int tw686x_dma_mode_set(const char *val, const struct kernel_param *kp)
{
if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_MEMCPY)))
dma_mode = TW686X_DMA_MODE_MEMCPY;
@@ -126,9 +126,9 @@ void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel)
* channels "too fast" which makes some TW686x devices very
* angry and freeze the CPU (see note 1).
*/
-static void tw686x_dma_delay(unsigned long data)
+static void tw686x_dma_delay(struct timer_list *t)
{
- struct tw686x_dev *dev = (struct tw686x_dev *)data;
+ struct tw686x_dev *dev = from_timer(dev, t, dma_delay_timer);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
@@ -325,8 +325,7 @@ static int tw686x_probe(struct pci_dev *pci_dev,
goto iounmap;
}
- setup_timer(&dev->dma_delay_timer,
- tw686x_dma_delay, (unsigned long) dev);
+ timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0);
/*
* This must be set right before initializing v4l2_dev.
diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 81cba177cd90..0cdb7d34926d 100644
--- a/drivers/media/pci/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -37,7 +37,7 @@ extern int zr36067_debug;
/* Anybody who uses more than four? */
#define BUZ_MAX 4
-extern struct video_device zoran_template;
+extern const struct video_device zoran_template;
extern int zoran_check_jpg_settings(struct zoran *zr,
struct zoran_jpg_settings *settings,
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index a11cb501c550..d07840072337 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2839,7 +2839,7 @@ static const struct v4l2_file_operations zoran_fops = {
.poll = zoran_poll,
};
-struct video_device zoran_template = {
+const struct video_device zoran_template = {
.name = ZORAN_NAME,
.fops = &zoran_fops,
.ioctl_ops = &zoran_ioctl_ops,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 3c4f7fa7b9d8..fd0c99859d6f 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -458,6 +458,21 @@ config VIDEO_RENESAS_VSP1
To compile this driver as a module, choose M here: the module
will be called vsp1.
+config VIDEO_ROCKCHIP_RGA
+ tristate "Rockchip Raster 2d Graphic Acceleration Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
+ Rockchip RGA is a separate 2D raster graphic acceleration unit.
+ It accelerates 2D graphics operations, such as point/line drawing,
+ image scaling, rotation, BitBLT, alpha blending and image blur/sharpness.
+
+ To compile this driver as a module choose m here.
+
config VIDEO_TI_VPE
tristate "TI VPE (Video Processing Engine) driver"
depends on VIDEO_DEV && VIDEO_V4L2
@@ -553,6 +568,16 @@ config VIDEO_MESON_AO_CEC
This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
+
+config CEC_GPIO
+ tristate "Generic GPIO-based CEC driver"
+ depends on PREEMPT
+ select CEC_CORE
+ select CEC_PIN
+ select GPIOLIB
+ ---help---
+ This is a generic GPIO-based CEC driver.
+ The CEC bus is present in the HDMI connector and enables communication
between compatible devices.
config VIDEO_SAMSUNG_S5P_CEC
@@ -589,6 +614,17 @@ config VIDEO_STM32_HDMI_CEC
CEC bus is present in the HDMI connector and enables communication
between compatible devices.
+config VIDEO_TEGRA_HDMI_CEC
+ tristate "Tegra HDMI CEC driver"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for the Tegra HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
endif #CEC_PLATFORM_DRIVERS
menuconfig SDR_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 327f80a6f82c..003b0bb2cddf 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -27,6 +27,8 @@ obj-$(CONFIG_VIDEO_CODA) += coda/
obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
+obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
obj-$(CONFIG_VIDEO_MUX) += video-mux.o
@@ -47,6 +49,8 @@ obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += sti/cec/
obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra-cec/
+
obj-y += stm32/
obj-y += blackfin/
@@ -63,6 +67,8 @@ obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip/rga/
+
obj-y += omap/
obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index dfcc484cab89..0997c640191d 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2417,6 +2417,11 @@ static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
return vpfe_probe_complete(vpfe);
}
+static const struct v4l2_async_notifier_operations vpfe_async_ops = {
+ .bound = vpfe_async_bound,
+ .complete = vpfe_async_complete,
+};
+
static struct vpfe_config *
vpfe_get_pdata(struct platform_device *pdev)
{
@@ -2590,8 +2595,7 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe->notifier.subdevs = vpfe->cfg->asd;
vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
- vpfe->notifier.bound = vpfe_async_bound;
- vpfe->notifier.complete = vpfe_async_complete;
+ vpfe->notifier.ops = &vpfe_async_ops;
ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
&vpfe->notifier);
if (ret) {
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index e6cef966dcbf..2aadc19235ea 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -43,6 +43,7 @@
/* ISC Clock Status Register */
#define ISC_CLKSR 0x00000020
+#define ISC_CLKSR_SIP BIT(31)
#define ISC_CLK(n) BIT(n)
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index d7103c5f92c3..13f1c1c797b0 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -65,6 +65,7 @@ struct isc_clk {
struct clk_hw hw;
struct clk *clk;
struct regmap *regmap;
+ spinlock_t lock;
u8 id;
u8 parent_id;
u32 div;
@@ -82,41 +83,69 @@ struct isc_subdev_entity {
struct v4l2_subdev *sd;
struct v4l2_async_subdev *asd;
struct v4l2_async_notifier notifier;
- struct v4l2_subdev_pad_config *config;
u32 pfe_cfg0;
struct list_head list;
};
+/* Indicate the format is generated by the sensor */
+#define FMT_FLAG_FROM_SENSOR BIT(0)
+/* Indicate the format is produced by ISC itself */
+#define FMT_FLAG_FROM_CONTROLLER BIT(1)
+/* Indicate a Raw Bayer format */
+#define FMT_FLAG_RAW_FORMAT BIT(2)
+
+#define FMT_FLAG_RAW_FROM_SENSOR (FMT_FLAG_FROM_SENSOR | \
+ FMT_FLAG_RAW_FORMAT)
+
/*
* struct isc_format - ISC media bus format information
* @fourcc: Fourcc code for this format
* @mbus_code: V4L2 media bus format code.
+ * flags: Indicate format from sensor or converted by controller
* @bpp: Bits per pixel (when stored in memory)
- * @reg_bps: reg value for bits per sample
* (when transferred over a bus)
- * @pipeline: pipeline switch
* @sd_support: Subdev supports this format
* @isc_support: ISC can convert raw format to this format
*/
+
struct isc_format {
u32 fourcc;
u32 mbus_code;
+ u32 flags;
u8 bpp;
- u32 reg_bps;
- u32 reg_bay_cfg;
- u32 reg_rlp_mode;
- u32 reg_dcfg_imode;
- u32 reg_dctrl_dview;
-
- u32 pipeline;
-
bool sd_support;
bool isc_support;
};
+/* Pipeline bitmap */
+#define WB_ENABLE BIT(0)
+#define CFA_ENABLE BIT(1)
+#define CC_ENABLE BIT(2)
+#define GAM_ENABLE BIT(3)
+#define GAM_BENABLE BIT(4)
+#define GAM_GENABLE BIT(5)
+#define GAM_RENABLE BIT(6)
+#define CSC_ENABLE BIT(7)
+#define CBC_ENABLE BIT(8)
+#define SUB422_ENABLE BIT(9)
+#define SUB420_ENABLE BIT(10)
+
+#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
+
+struct fmt_config {
+ u32 fourcc;
+
+ u32 pfe_cfg0_bps;
+ u32 cfa_baycfg;
+ u32 rlp_cfg_mode;
+ u32 dcfg_imode;
+ u32 dctrl_dview;
+
+ u32 bits_pipeline;
+};
#define HIST_ENTRIES 512
#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
@@ -181,80 +210,320 @@ struct isc_device {
struct list_head subdev_entities;
};
-#define RAW_FMT_IND_START 0
-#define RAW_FMT_IND_END 11
-#define ISC_FMT_IND_START 12
-#define ISC_FMT_IND_END 14
-
-static struct isc_format isc_formats[] = {
- { V4L2_PIX_FMT_SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_YUV420, 0x0, 12,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC420P, ISC_DCTRL_DVIEW_PLANAR, 0x7fb,
- false, false },
- { V4L2_PIX_FMT_YUV422P, 0x0, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC422P, ISC_DCTRL_DVIEW_PLANAR, 0x3fb,
- false, false },
- { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_RGB565_2X8_LE, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_RGB565,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x7b,
- false, false },
-
- { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_YUYV8_2X8, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
+static struct isc_format formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .mbus_code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .mbus_code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 16,
+ },
+};
+
+struct fmt_config fmt_configs_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC420P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB420_ENABLE | SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC422P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED32,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
};
#define GAMMA_MAX 2
@@ -307,31 +576,80 @@ module_param(sensor_preferred, uint, 0644);
MODULE_PARM_DESC(sensor_preferred,
"Sensor is preferred to output the specified format (1-on 0-off), default 1");
+static int isc_wait_clk_stable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long timeout = jiffies + usecs_to_jiffies(1000);
+ unsigned int status;
+
+ while (time_before(jiffies, timeout)) {
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (!(status & ISC_CLKSR_SIP))
+ return 0;
+
+ usleep_range(10, 250);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int isc_clk_prepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
+ return isc_wait_clk_stable(hw);
+}
+
+static void isc_clk_unprepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ isc_wait_clk_stable(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+}
+
static int isc_clk_enable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
struct regmap *regmap = isc_clk->regmap;
+ unsigned long flags;
+ unsigned int status;
dev_dbg(isc_clk->dev, "ISC CLK: %s, div = %d, parent id = %d\n",
__func__, isc_clk->div, isc_clk->parent_id);
+ spin_lock_irqsave(&isc_clk->lock, flags);
regmap_update_bits(regmap, ISC_CLKCFG,
ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
(isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
(isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
- return 0;
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (status & ISC_CLK(id))
+ return 0;
+ else
+ return -EINVAL;
}
static void isc_clk_disable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
+ unsigned long flags;
+ spin_lock_irqsave(&isc_clk->lock, flags);
regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
}
static int isc_clk_is_enabled(struct clk_hw *hw)
@@ -339,8 +657,14 @@ static int isc_clk_is_enabled(struct clk_hw *hw)
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 status;
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+
return status & ISC_CLK(isc_clk->id) ? 1 : 0;
}
@@ -447,6 +771,8 @@ static int isc_clk_set_rate(struct clk_hw *hw,
}
static const struct clk_ops isc_clk_ops = {
+ .prepare = isc_clk_prepare,
+ .unprepare = isc_clk_unprepare,
.enable = isc_clk_enable,
.disable = isc_clk_disable,
.is_enabled = isc_clk_is_enabled,
@@ -492,6 +818,7 @@ static int isc_clk_register(struct isc_device *isc, unsigned int id)
isc_clk->regmap = regmap;
isc_clk->id = id;
isc_clk->dev = isc->dev;
+ spin_lock_init(&isc_clk->lock);
isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
if (IS_ERR(isc_clk->clk)) {
@@ -575,11 +902,27 @@ static inline bool sensor_is_preferred(const struct isc_format *isc_fmt)
!isc_fmt->isc_support;
}
+static struct fmt_config *get_fmt_config(u32 fourcc)
+{
+ struct fmt_config *config;
+ int i;
+
+ config = &fmt_configs_list[0];
+ for (i = 0; i < ARRAY_SIZE(fmt_configs_list); i++) {
+ if (config->fourcc == fourcc)
+ return config;
+
+ config++;
+ }
+ return NULL;
+}
+
static void isc_start_dma(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
struct v4l2_pix_format *pixfmt = &isc->fmt.fmt.pix;
u32 sizeimage = pixfmt->sizeimage;
+ struct fmt_config *config = get_fmt_config(isc->current_fmt->fourcc);
u32 dctrl_dview;
dma_addr_t addr0;
@@ -602,7 +945,7 @@ static void isc_start_dma(struct isc_device *isc)
if (sensor_is_preferred(isc->current_fmt))
dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
else
- dctrl_dview = isc->current_fmt->reg_dctrl_dview;
+ dctrl_dview = config->dctrl_dview;
regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
@@ -612,6 +955,7 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
u32 val, bay_cfg;
const u32 *gamma;
unsigned int i;
@@ -625,7 +969,7 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
if (!pipeline)
return;
- bay_cfg = isc->raw_fmt->reg_bay_cfg;
+ bay_cfg = config->cfa_baycfg;
regmap_write(regmap, ISC_WB_CFG, bay_cfg);
regmap_write(regmap, ISC_WB_O_RGR, 0x0);
@@ -678,11 +1022,13 @@ static void isc_set_histogram(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
if (ctrls->awb && (ctrls->hist_stat != HIST_ENABLED)) {
- regmap_write(regmap, ISC_HIS_CFG, ISC_HIS_CFG_MODE_R |
- (isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
- ISC_HIS_CFG_RAR);
+ regmap_write(regmap, ISC_HIS_CFG,
+ ISC_HIS_CFG_MODE_R |
+ (config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
+ ISC_HIS_CFG_RAR);
regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
ctrls->hist_id = ISC_HIS_CFG_MODE_R;
@@ -699,8 +1045,10 @@ static void isc_set_histogram(struct isc_device *isc)
}
static inline void isc_get_param(const struct isc_format *fmt,
- u32 *rlp_mode, u32 *dcfg)
+ u32 *rlp_mode, u32 *dcfg)
{
+ struct fmt_config *config = get_fmt_config(fmt->fourcc);
+
*dcfg = ISC_DCFG_YMBSIZE_BEATS8;
switch (fmt->fourcc) {
@@ -712,8 +1060,8 @@ static inline void isc_get_param(const struct isc_format *fmt,
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
- *rlp_mode = fmt->reg_rlp_mode;
- *dcfg |= fmt->reg_dcfg_imode;
+ *rlp_mode = config->rlp_cfg_mode;
+ *dcfg |= config->dcfg_imode;
break;
default:
*rlp_mode = ISC_RLP_CFG_MODE_DAT8;
@@ -726,20 +1074,22 @@ static int isc_configure(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
const struct isc_format *current_fmt = isc->current_fmt;
+ struct fmt_config *curfmt_config = get_fmt_config(current_fmt->fourcc);
+ struct fmt_config *rawfmt_config = get_fmt_config(isc->raw_fmt->fourcc);
struct isc_subdev_entity *subdev = isc->current_subdev;
u32 pfe_cfg0, rlp_mode, dcfg, mask, pipeline;
if (sensor_is_preferred(current_fmt)) {
- pfe_cfg0 = current_fmt->reg_bps;
+ pfe_cfg0 = curfmt_config->pfe_cfg0_bps;
pipeline = 0x0;
isc_get_param(current_fmt, &rlp_mode, &dcfg);
isc->ctrls.hist_stat = HIST_INIT;
} else {
- pfe_cfg0 = isc->raw_fmt->reg_bps;
- pipeline = current_fmt->pipeline;
- rlp_mode = current_fmt->reg_rlp_mode;
- dcfg = current_fmt->reg_dcfg_imode | ISC_DCFG_YMBSIZE_BEATS8 |
- ISC_DCFG_CMBSIZE_BEATS8;
+ pfe_cfg0 = rawfmt_config->pfe_cfg0_bps;
+ pipeline = curfmt_config->bits_pipeline;
+ rlp_mode = curfmt_config->rlp_cfg_mode;
+ dcfg = curfmt_config->dcfg_imode |
+ ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
}
pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
@@ -941,6 +1291,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
{
struct isc_format *isc_fmt;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -971,7 +1322,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
- isc->current_subdev->config, &format);
+ &pad_cfg, &format);
if (ret < 0)
return ret;
@@ -1323,6 +1674,7 @@ static void isc_awb_work(struct work_struct *w)
struct isc_device *isc =
container_of(w, struct isc_device, awb_work);
struct regmap *regmap = isc->regmap;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
struct isc_ctrls *ctrls = &isc->ctrls;
u32 hist_id = ctrls->hist_id;
u32 baysel;
@@ -1340,7 +1692,7 @@ static void isc_awb_work(struct work_struct *w)
}
ctrls->hist_id = hist_id;
- baysel = isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+ baysel = config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
pm_runtime_get_sync(isc->dev);
@@ -1436,17 +1788,15 @@ static void isc_async_unbind(struct v4l2_async_notifier *notifier,
struct isc_device, v4l2_dev);
cancel_work_sync(&isc->awb_work);
video_unregister_device(&isc->video_dev);
- if (isc->current_subdev->config)
- v4l2_subdev_free_pad_config(isc->current_subdev->config);
v4l2_ctrl_handler_free(&isc->ctrls.handler);
}
static struct isc_format *find_format_by_code(unsigned int code, int *index)
{
- struct isc_format *fmt = &isc_formats[0];
+ struct isc_format *fmt = &formats_list[0];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ for (i = 0; i < ARRAY_SIZE(formats_list); i++) {
if (fmt->mbus_code == code) {
*index = i;
return fmt;
@@ -1463,37 +1813,36 @@ static int isc_formats_init(struct isc_device *isc)
struct isc_format *fmt;
struct v4l2_subdev *subdev = isc->current_subdev->sd;
unsigned int num_fmts, i, j;
+ u32 list_size = ARRAY_SIZE(formats_list);
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- fmt = &isc_formats[0];
- for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
- fmt->isc_support = false;
- fmt->sd_support = false;
-
- fmt++;
- }
-
while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &mbus_code)) {
mbus_code.index++;
+
fmt = find_format_by_code(mbus_code.code, &i);
- if (!fmt)
+ if ((!fmt) || (!(fmt->flags & FMT_FLAG_FROM_SENSOR)))
continue;
fmt->sd_support = true;
- if (i <= RAW_FMT_IND_END) {
- for (j = ISC_FMT_IND_START; j <= ISC_FMT_IND_END; j++)
- isc_formats[j].isc_support = true;
-
+ if (fmt->flags & FMT_FLAG_RAW_FORMAT)
isc->raw_fmt = fmt;
- }
}
- fmt = &isc_formats[0];
- for (i = 0, num_fmts = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ fmt = &formats_list[0];
+ for (i = 0; i < list_size; i++) {
+ if (fmt->flags & FMT_FLAG_FROM_CONTROLLER)
+ fmt->isc_support = true;
+
+ fmt++;
+ }
+
+ fmt = &formats_list[0];
+ num_fmts = 0;
+ for (i = 0; i < list_size; i++) {
if (fmt->isc_support || fmt->sd_support)
num_fmts++;
@@ -1505,15 +1854,13 @@ static int isc_formats_init(struct isc_device *isc)
isc->num_user_formats = num_fmts;
isc->user_formats = devm_kcalloc(isc->dev,
- num_fmts, sizeof(struct isc_format *),
+ num_fmts, sizeof(*isc->user_formats),
GFP_KERNEL);
- if (!isc->user_formats) {
- v4l2_err(&isc->v4l2_dev, "could not allocate memory\n");
+ if (!isc->user_formats)
return -ENOMEM;
- }
- fmt = &isc_formats[0];
- for (i = 0, j = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ fmt = &formats_list[0];
+ for (i = 0, j = 0; i < list_size; i++) {
if (fmt->isc_support || fmt->sd_support)
isc->user_formats[j++] = fmt;
@@ -1550,7 +1897,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
- struct isc_subdev_entity *sd_entity;
struct video_device *vdev = &isc->video_dev;
struct vb2_queue *q = &isc->vb2_vidq;
int ret;
@@ -1563,8 +1909,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
isc->current_subdev = container_of(notifier,
struct isc_subdev_entity, notifier);
- sd_entity = isc->current_subdev;
-
mutex_init(&isc->lock);
init_completion(&isc->comp);
@@ -1591,10 +1935,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
INIT_LIST_HEAD(&isc->dma_queue);
spin_lock_init(&isc->dma_queue_lock);
- sd_entity->config = v4l2_subdev_alloc_pad_config(sd_entity->sd);
- if (sd_entity->config == NULL)
- return -ENOMEM;
-
ret = isc_formats_init(isc);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
@@ -1639,6 +1979,12 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations isc_async_ops = {
+ .bound = isc_async_bound,
+ .unbind = isc_async_unbind,
+ .complete = isc_async_complete,
+};
+
static void isc_subdev_cleanup(struct isc_device *isc)
{
struct isc_subdev_entity *subdev_entity;
@@ -1716,7 +2062,7 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity = devm_kzalloc(dev,
sizeof(*subdev_entity), GFP_KERNEL);
- if (subdev_entity == NULL) {
+ if (!subdev_entity) {
of_node_put(rem);
ret = -ENOMEM;
break;
@@ -1724,7 +2070,7 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity->asd = devm_kzalloc(dev,
sizeof(*subdev_entity->asd), GFP_KERNEL);
- if (subdev_entity->asd == NULL) {
+ if (!subdev_entity->asd) {
of_node_put(rem);
ret = -ENOMEM;
break;
@@ -1815,25 +2161,37 @@ static int atmel_isc_probe(struct platform_device *pdev)
return ret;
}
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
ret = isc_clk_init(isc);
if (ret) {
dev_err(dev, "failed to init isc clock: %d\n", ret);
- goto clean_isc_clk;
+ goto unprepare_hclk;
}
isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
/* ispck should be greater or equal to hclock */
ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
if (ret) {
dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto clean_isc_clk;
+ goto unprepare_clk;
}
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
- goto clean_isc_clk;
+ goto unprepare_clk;
}
ret = isc_parse_dt(dev, isc);
@@ -1851,9 +2209,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
subdev_entity->notifier.subdevs = &subdev_entity->asd;
subdev_entity->notifier.num_subdevs = 1;
- subdev_entity->notifier.bound = isc_async_bound;
- subdev_entity->notifier.unbind = isc_async_unbind;
- subdev_entity->notifier.complete = isc_async_complete;
+ subdev_entity->notifier.ops = &isc_async_ops;
ret = v4l2_async_notifier_register(&isc->v4l2_dev,
&subdev_entity->notifier);
@@ -1866,7 +2222,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
break;
}
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ pm_request_idle(dev);
return 0;
@@ -1876,7 +2234,11 @@ cleanup_subdev:
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-clean_isc_clk:
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
isc_clk_cleanup(isc);
return ret;
@@ -1887,6 +2249,8 @@ static int atmel_isc_remove(struct platform_device *pdev)
struct isc_device *isc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
isc_subdev_cleanup(isc);
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 891fa2505efa..e900995143a3 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -411,7 +411,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&isi->irqlock, flags);
list_add_tail(&buf->list, &isi->video_buffer_list);
- if (isi->active == NULL) {
+ if (!isi->active) {
isi->active = buf;
if (vb2_is_streaming(vb->vb2_queue))
start_dma(isi, buf);
@@ -1038,10 +1038,8 @@ static int isi_formats_init(struct atmel_isi *isi)
isi->user_formats = devm_kcalloc(isi->dev,
num_fmts, sizeof(struct isi_format *),
GFP_KERNEL);
- if (!isi->user_formats) {
- dev_err(isi->dev, "could not allocate memory\n");
+ if (!isi->user_formats)
return -ENOMEM;
- }
memcpy(isi->user_formats, isi_fmts,
num_fmts * sizeof(struct isi_format *));
@@ -1105,6 +1103,12 @@ static int isi_graph_notify_bound(struct v4l2_async_notifier *notifier,
return 0;
}
+static const struct v4l2_async_notifier_operations isi_graph_notify_ops = {
+ .bound = isi_graph_notify_bound,
+ .unbind = isi_graph_notify_unbind,
+ .complete = isi_graph_notify_complete,
+};
+
static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
{
struct device_node *ep = NULL;
@@ -1143,7 +1147,7 @@ static int isi_graph_init(struct atmel_isi *isi)
/* Register the subdevices notifier. */
subdevs = devm_kzalloc(isi->dev, sizeof(*subdevs), GFP_KERNEL);
- if (subdevs == NULL) {
+ if (!subdevs) {
of_node_put(isi->entity.node);
return -ENOMEM;
}
@@ -1152,9 +1156,7 @@ static int isi_graph_init(struct atmel_isi *isi)
isi->notifier.subdevs = subdevs;
isi->notifier.num_subdevs = 1;
- isi->notifier.bound = isi_graph_notify_bound;
- isi->notifier.unbind = isi_graph_notify_unbind;
- isi->notifier.complete = isi_graph_notify_complete;
+ isi->notifier.ops = &isi_graph_notify_ops;
ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
if (ret < 0) {
@@ -1176,10 +1178,8 @@ static int atmel_isi_probe(struct platform_device *pdev)
int ret, i;
isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
- if (!isi) {
- dev_err(&pdev->dev, "Can't allocate interface!\n");
+ if (!isi)
return -ENOMEM;
- }
isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
if (IS_ERR(isi->pclk))
@@ -1204,7 +1204,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
return ret;
isi->vdev = video_device_alloc();
- if (isi->vdev == NULL) {
+ if (!isi->vdev) {
ret = -ENOMEM;
goto err_vdev_alloc;
}
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index 37169054b828..478eb2f7d723 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -338,7 +338,6 @@ struct ppi_if *ppi_create_instance(struct platform_device *pdev,
ppi = kzalloc(sizeof(*ppi), GFP_KERNEL);
if (!ppi) {
peripheral_free_list(info->pin_req);
- dev_err(&pdev->dev, "unable to allocate memory for ppi handle\n");
return NULL;
}
ppi->ops = &ppi_ops;
diff --git a/drivers/media/platform/cec-gpio/Makefile b/drivers/media/platform/cec-gpio/Makefile
new file mode 100644
index 000000000000..e82b258afa55
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CEC_GPIO) += cec-gpio.o
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
new file mode 100644
index 000000000000..5debdf08fbe7
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <media/cec-pin.h>
+
+struct cec_gpio {
+ struct cec_adapter *adap;
+ struct device *dev;
+
+ struct gpio_desc *cec_gpio;
+ int cec_irq;
+ bool cec_is_low;
+ bool cec_have_irq;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+ bool hpd_is_high;
+ ktime_t hpd_ts;
+};
+
+static bool cec_gpio_read(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return false;
+ return gpiod_get_value(cec->cec_gpio);
+}
+
+static void cec_gpio_high(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->cec_is_low)
+ return;
+ cec->cec_is_low = false;
+ gpiod_set_value(cec->cec_gpio, 1);
+}
+
+static void cec_gpio_low(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return;
+ if (WARN_ON_ONCE(cec->cec_have_irq))
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+ cec->cec_is_low = true;
+ gpiod_set_value(cec->cec_gpio, 0);
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+ bool is_high = gpiod_get_value(cec->hpd_gpio);
+
+ if (is_high == cec->hpd_is_high)
+ return IRQ_HANDLED;
+ cec->hpd_ts = ktime_get();
+ cec->hpd_is_high = is_high;
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_pin_changed(cec->adap, gpiod_get_value(cec->cec_gpio));
+ return IRQ_HANDLED;
+}
+
+static bool cec_gpio_enable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ return true;
+
+ if (request_irq(cec->cec_irq, cec_gpio_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ adap->name, cec))
+ return false;
+ cec->cec_have_irq = true;
+ return true;
+}
+
+static void cec_gpio_disable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+}
+
+static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ seq_printf(file, "mode: %s\n", cec->cec_is_low ? "low-drive" : "read");
+ if (cec->cec_have_irq)
+ seq_printf(file, "using irq: %d\n", cec->cec_irq);
+ if (cec->hpd_gpio)
+ seq_printf(file, "hpd: %s\n",
+ cec->hpd_is_high ? "high" : "low");
+}
+
+static int cec_gpio_read_hpd(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->hpd_gpio)
+ return -ENOTTY;
+ return gpiod_get_value(cec->hpd_gpio);
+}
+
+static void cec_gpio_free(struct cec_adapter *adap)
+{
+ cec_gpio_disable_irq(adap);
+}
+
+static const struct cec_pin_ops cec_gpio_pin_ops = {
+ .read = cec_gpio_read,
+ .low = cec_gpio_low,
+ .high = cec_gpio_high,
+ .enable_irq = cec_gpio_enable_irq,
+ .disable_irq = cec_gpio_disable_irq,
+ .status = cec_gpio_status,
+ .free = cec_gpio_free,
+ .read_hpd = cec_gpio_read_hpd,
+};
+
+static int cec_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cec_gpio *cec;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ cec->cec_gpio = devm_gpiod_get(dev, "cec", GPIOD_IN);
+ if (IS_ERR(cec->cec_gpio))
+ return PTR_ERR(cec->cec_gpio);
+ cec->cec_irq = gpiod_to_irq(cec->cec_gpio);
+
+ cec->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(cec->hpd_gpio))
+ return PTR_ERR(cec->hpd_gpio);
+
+ cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
+ cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
+ CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ if (IS_ERR(cec->adap))
+ return PTR_ERR(cec->adap);
+
+ if (cec->hpd_gpio) {
+ cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
+ ret = devm_request_threaded_irq(dev, cec->hpd_irq,
+ cec_hpd_gpio_irq_handler,
+ cec_hpd_gpio_irq_handler_thread,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "hpd-gpio", cec);
+ if (ret)
+ return ret;
+ }
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cec);
+ return 0;
+}
+
+static int cec_gpio_remove(struct platform_device *pdev)
+{
+ struct cec_gpio *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ return 0;
+}
+
+static const struct of_device_id cec_gpio_match[] = {
+ {
+ .compatible = "cec-gpio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cec_gpio_match);
+
+static struct platform_driver cec_gpio_pdrv = {
+ .probe = cec_gpio_probe,
+ .remove = cec_gpio_remove,
+ .driver = {
+ .name = "cec-gpio",
+ .of_match_table = cec_gpio_match,
+ },
+};
+
+module_platform_driver(cec_gpio_pdrv);
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CEC GPIO driver");
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 291c40933935..bfc4ecf6f068 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -417,6 +417,10 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
dev->devtype->product != CODA_DX6)
size += ysize / 4;
name = kasprintf(GFP_KERNEL, "fb%d", i);
+ if (!name) {
+ coda_free_framebuffers(ctx);
+ return -ENOMEM;
+ }
ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i],
size, name);
kfree(name);
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index f1b521045d64..3482178cbf01 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -82,8 +82,8 @@ struct ccdc_hw_device {
};
/* Used by CCDC module to register & unregister with vpfe capture driver */
-int vpfe_register_ccdc_device(struct ccdc_hw_device *dev);
-void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev);
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev);
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev);
#endif
#endif
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 6d492dc4c3a9..89cb3094d7e6 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -841,7 +841,7 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
return 0;
}
-static struct ccdc_hw_device ccdc_hw_dev = {
+static const struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM355 CCDC",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 3b2d8a9317b8..5fa0a1f32536 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -776,7 +776,7 @@ static void ccdc_restore_context(void)
regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
}
-static struct ccdc_hw_device ccdc_hw_dev = {
+static const struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM6446 CCDC",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 5813b49391ed..d5ff58494c1e 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1000,7 +1000,7 @@ static int isif_close(struct device *device)
return 0;
}
-static struct ccdc_hw_device isif_hw_dev = {
+static const struct ccdc_hw_device isif_hw_dev = {
.name = "ISIF",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 13d027031ff0..6aabd21fe69f 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -122,7 +122,7 @@ static irqreturn_t venc_isr(int irq, void *arg)
int fid;
int i;
- if ((NULL == arg) || (NULL == disp_dev->dev[0]))
+ if (!arg || !disp_dev->dev[0])
return IRQ_HANDLED;
if (venc_is_second_field(disp_dev))
@@ -337,10 +337,10 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
- if (layer->cur_frm != NULL)
+ if (layer->cur_frm)
vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
- if (layer->next_frm != NULL)
+ if (layer->next_frm)
vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -947,7 +947,7 @@ static int vpbe_display_s_std(struct file *file, void *priv,
if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- if (NULL != vpbe_dev->ops.s_std) {
+ if (vpbe_dev->ops.s_std) {
ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -1000,8 +1000,7 @@ static int vpbe_display_enum_output(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
/* Enumerate outputs */
-
- if (NULL == vpbe_dev->ops.enum_outputs)
+ if (!vpbe_dev->ops.enum_outputs)
return -EINVAL;
ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
@@ -1030,7 +1029,7 @@ static int vpbe_display_s_output(struct file *file, void *priv,
if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- if (NULL == vpbe_dev->ops.set_output)
+ if (!vpbe_dev->ops.set_output)
return -EINVAL;
ret = vpbe_dev->ops.set_output(vpbe_dev, i);
@@ -1077,7 +1076,7 @@ vpbe_display_enum_dv_timings(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
/* Enumerate outputs */
- if (NULL == vpbe_dev->ops.enum_dv_timings)
+ if (!vpbe_dev->ops.enum_dv_timings)
return -EINVAL;
ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings);
@@ -1292,7 +1291,7 @@ static int vpbe_device_get(struct device *dev, void *data)
if (strcmp("vpbe_controller", pdev->name) == 0)
vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
- if (strstr(pdev->name, "vpbe-osd") != NULL)
+ if (strstr(pdev->name, "vpbe-osd"))
vpbe_disp->osd_device = platform_get_drvdata(pdev);
return 0;
@@ -1305,15 +1304,10 @@ static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
struct video_device *vbd = NULL;
/* Allocate memory for four plane display objects */
-
- disp_dev->dev[i] =
- kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
-
- /* If memory allocation fails, return error */
- if (!disp_dev->dev[i]) {
- printk(KERN_ERR "ran out of memory\n");
+ disp_dev->dev[i] = kzalloc(sizeof(*disp_dev->dev[i]), GFP_KERNEL);
+ if (!disp_dev->dev[i])
return -ENOMEM;
- }
+
spin_lock_init(&disp_dev->dev[i]->irqlock);
mutex_init(&disp_dev->dev[i]->opslock);
@@ -1397,8 +1391,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
printk(KERN_DEBUG "vpbe_display_probe\n");
/* Allocate memory for vpbe_display */
- disp_dev = devm_kzalloc(&pdev->dev, sizeof(struct vpbe_display),
- GFP_KERNEL);
+ disp_dev = devm_kzalloc(&pdev->dev, sizeof(*disp_dev), GFP_KERNEL);
if (!disp_dev)
return -ENOMEM;
@@ -1414,7 +1407,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
/* Initialize the vpbe display controller */
- if (NULL != disp_dev->vpbe_dev->ops.initialize) {
+ if (disp_dev->vpbe_dev->ops.initialize) {
err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
disp_dev->vpbe_dev);
if (err) {
@@ -1482,7 +1475,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
probe_out:
for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
/* Unregister video device */
- if (disp_dev->dev[k] != NULL) {
+ if (disp_dev->dev[k]) {
video_unregister_device(&disp_dev->dev[k]->video_dev);
kfree(disp_dev->dev[k]);
}
@@ -1504,7 +1497,7 @@ static int vpbe_display_remove(struct platform_device *pdev)
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
/* deinitialize the vpbe display controller */
- if (NULL != vpbe_dev->ops.deinitialize)
+ if (vpbe_dev->ops.deinitialize)
vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
/* un-register device */
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 6792da16d9c7..7b3f6f8e3dc8 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -115,7 +115,7 @@ static struct vpfe_config_params config_params = {
};
/* ccdc device registered */
-static struct ccdc_hw_device *ccdc_dev;
+static const struct ccdc_hw_device *ccdc_dev;
/* lock for accessing ccdc information */
static DEFINE_MUTEX(ccdc_lock);
/* ccdc configuration */
@@ -203,7 +203,7 @@ static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
* vpfe_register_ccdc_device. CCDC module calls this to
* register with vpfe capture
*/
-int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
{
int ret = 0;
printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
@@ -259,7 +259,7 @@ EXPORT_SYMBOL(vpfe_register_ccdc_device);
* vpfe_unregister_ccdc_device. CCDC module calls this to
* unregister with vpfe capture
*/
-void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev)
{
if (!dev) {
printk(KERN_ERR "invalid ccdc device ptr\n");
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 0ef36cec21d1..a89367ab1e06 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1500,6 +1500,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
static struct vpif_capture_config *
vpif_capture_get_pdata(struct platform_device *pdev)
{
@@ -1691,8 +1696,7 @@ static __init int vpif_probe(struct platform_device *pdev)
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 56fe4e5b396e..ff2f75a328c9 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1232,6 +1232,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
/*
* vpif_probe: This function creates device entries by register itself to the
* V4L2 driver and initializes fields of each channel objects
@@ -1313,8 +1318,7 @@ static __init int vpif_probe(struct platform_device *pdev)
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 43801509dabb..17854a379243 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -958,6 +958,51 @@ static struct gsc_pix_max gsc_v_100_max = {
.target_rot_en_h = 2016,
};
+static struct gsc_pix_max gsc_v_5250_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2016,
+ .real_rot_en_h = 2016,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5420_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2048,
+ .real_rot_en_h = 2048,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5433_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2047,
+ .real_rot_en_h = 2047,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
static struct gsc_pix_min gsc_v_100_min = {
.org_w = 64,
.org_h = 32,
@@ -992,6 +1037,45 @@ static struct gsc_variant gsc_v_100_variant = {
.local_sc_down = 2,
};
+static struct gsc_variant gsc_v_5250_variant = {
+ .pix_max = &gsc_v_5250_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5420_variant = {
+ .pix_max = &gsc_v_5420_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5433_variant = {
+ .pix_max = &gsc_v_5433_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
static struct gsc_driverdata gsc_v_100_drvdata = {
.variant = {
[0] = &gsc_v_100_variant,
@@ -1004,11 +1088,33 @@ static struct gsc_driverdata gsc_v_100_drvdata = {
.num_clocks = 1,
};
+static struct gsc_driverdata gsc_v_5250_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5250_variant,
+ [1] = &gsc_v_5250_variant,
+ [2] = &gsc_v_5250_variant,
+ [3] = &gsc_v_5250_variant,
+ },
+ .num_entities = 4,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_v_5420_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5420_variant,
+ [1] = &gsc_v_5420_variant,
+ },
+ .num_entities = 2,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
static struct gsc_driverdata gsc_5433_drvdata = {
.variant = {
- [0] = &gsc_v_100_variant,
- [1] = &gsc_v_100_variant,
- [2] = &gsc_v_100_variant,
+ [0] = &gsc_v_5433_variant,
+ [1] = &gsc_v_5433_variant,
+ [2] = &gsc_v_5433_variant,
},
.num_entities = 3,
.clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
@@ -1017,13 +1123,21 @@ static struct gsc_driverdata gsc_5433_drvdata = {
static const struct of_device_id exynos_gsc_match[] = {
{
- .compatible = "samsung,exynos5-gsc",
- .data = &gsc_v_100_drvdata,
+ .compatible = "samsung,exynos5250-gsc",
+ .data = &gsc_v_5250_drvdata,
+ },
+ {
+ .compatible = "samsung,exynos5420-gsc",
+ .data = &gsc_v_5420_drvdata,
},
{
.compatible = "samsung,exynos5433-gsc",
.data = &gsc_5433_drvdata,
},
+ {
+ .compatible = "samsung,exynos5-gsc",
+ .data = &gsc_v_100_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, exynos_gsc_match);
@@ -1045,6 +1159,9 @@ static int gsc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ if (drv_data == &gsc_v_100_drvdata)
+ dev_info(dev, "compatible 'exynos5-gsc' is deprecated\n");
+
gsc->id = ret;
if (gsc->id >= drv_data->num_entities) {
dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index c480efb755f5..46a7d242a1a5 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -76,7 +76,7 @@ config VIDEO_EXYNOS4_ISP_DMA_CAPTURE
depends on VIDEO_EXYNOS4_FIMC_IS
select VIDEO_EXYNOS4_IS_COMMON
default y
- help
+ help
This option enables an additional video device node exposing a V4L2
video capture interface for the FIMC-IS ISP raw (Bayer) capture DMA.
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index d4656d5175d7..c15596b56dc9 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1405,6 +1405,11 @@ unlock:
return media_device_register(&fmd->media_dev);
}
+static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
+ .bound = subdev_notifier_bound,
+ .complete = subdev_notifier_complete,
+};
+
static int fimc_md_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1479,8 +1484,7 @@ static int fimc_md_probe(struct platform_device *pdev)
if (fmd->num_sensors > 0) {
fmd->subdev_notifier.subdevs = fmd->async_subdevs;
fmd->subdev_notifier.num_subdevs = fmd->num_sensors;
- fmd->subdev_notifier.bound = subdev_notifier_bound;
- fmd->subdev_notifier.complete = subdev_notifier_complete;
+ fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 4d29860d27b4..6f1b0c799e58 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1004,11 +1004,12 @@ static int omap_vout_open(struct file *file)
struct omap_vout_device *vout = NULL;
vout = video_drvdata(file);
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
if (vout == NULL)
return -ENODEV;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
/* for now, we only support single open */
if (vout->opened)
return -EBUSY;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1a428fe9f070..b7ff3842afc0 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1669,8 +1669,8 @@ static int isp_link_entity(
break;
}
if (i == entity->num_pads) {
- dev_err(isp->dev, "%s: no source pad in external entity\n",
- __func__);
+ dev_err(isp->dev, "%s: no source pad in external entity %s\n",
+ __func__, entity->name);
return -EINVAL;
}
@@ -2001,6 +2001,7 @@ static int isp_remove(struct platform_device *pdev)
__omap3isp_put(isp, false);
media_entity_enum_cleanup(&isp->crashed);
+ v4l2_async_notifier_cleanup(&isp->notifier);
return 0;
}
@@ -2011,44 +2012,41 @@ enum isp_of_phy {
ISP_OF_PHY_CSIPHY2,
};
-static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
- struct isp_async_subdev *isd)
+static int isp_fwnode_parse(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
+ struct isp_async_subdev *isd =
+ container_of(asd, struct isp_async_subdev, asd);
struct isp_bus_cfg *buscfg = &isd->bus;
- struct v4l2_fwnode_endpoint vep;
- unsigned int i;
- int ret;
bool csi1 = false;
-
- ret = v4l2_fwnode_endpoint_parse(fwnode, &vep);
- if (ret)
- return ret;
+ unsigned int i;
dev_dbg(dev, "parsing endpoint %pOF, interface %u\n",
- to_of_node(fwnode), vep.base.port);
+ to_of_node(vep->base.local_fwnode), vep->base.port);
- switch (vep.base.port) {
+ switch (vep->base.port) {
case ISP_OF_PHY_PARALLEL:
buscfg->interface = ISP_INTERFACE_PARALLEL;
buscfg->bus.parallel.data_lane_shift =
- vep.bus.parallel.data_shift;
+ vep->bus.parallel.data_shift;
buscfg->bus.parallel.clk_pol =
- !!(vep.bus.parallel.flags
+ !!(vep->bus.parallel.flags
& V4L2_MBUS_PCLK_SAMPLE_FALLING);
buscfg->bus.parallel.hs_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
buscfg->bus.parallel.vs_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
buscfg->bus.parallel.fld_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
buscfg->bus.parallel.data_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
- buscfg->bus.parallel.bt656 = vep.bus_type == V4L2_MBUS_BT656;
+ !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
+ buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
break;
case ISP_OF_PHY_CSIPHY1:
case ISP_OF_PHY_CSIPHY2:
- switch (vep.bus_type) {
+ switch (vep->bus_type) {
case V4L2_MBUS_CCP2:
case V4L2_MBUS_CSI1:
dev_dbg(dev, "CSI-1/CCP-2 configuration\n");
@@ -2060,11 +2058,11 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
break;
default:
dev_err(dev, "unsupported bus type %u\n",
- vep.bus_type);
+ vep->bus_type);
return -EINVAL;
}
- switch (vep.base.port) {
+ switch (vep->base.port) {
case ISP_OF_PHY_CSIPHY1:
if (csi1)
buscfg->interface = ISP_INTERFACE_CCP2B_PHY1;
@@ -2080,47 +2078,47 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
}
if (csi1) {
buscfg->bus.ccp2.lanecfg.clk.pos =
- vep.bus.mipi_csi1.clock_lane;
+ vep->bus.mipi_csi1.clock_lane;
buscfg->bus.ccp2.lanecfg.clk.pol =
- vep.bus.mipi_csi1.lane_polarity[0];
+ vep->bus.mipi_csi1.lane_polarity[0];
dev_dbg(dev, "clock lane polarity %u, pos %u\n",
buscfg->bus.ccp2.lanecfg.clk.pol,
buscfg->bus.ccp2.lanecfg.clk.pos);
buscfg->bus.ccp2.lanecfg.data[0].pos =
- vep.bus.mipi_csi1.data_lane;
+ vep->bus.mipi_csi1.data_lane;
buscfg->bus.ccp2.lanecfg.data[0].pol =
- vep.bus.mipi_csi1.lane_polarity[1];
+ vep->bus.mipi_csi1.lane_polarity[1];
dev_dbg(dev, "data lane polarity %u, pos %u\n",
buscfg->bus.ccp2.lanecfg.data[0].pol,
buscfg->bus.ccp2.lanecfg.data[0].pos);
buscfg->bus.ccp2.strobe_clk_pol =
- vep.bus.mipi_csi1.clock_inv;
- buscfg->bus.ccp2.phy_layer = vep.bus.mipi_csi1.strobe;
+ vep->bus.mipi_csi1.clock_inv;
+ buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
buscfg->bus.ccp2.ccp2_mode =
- vep.bus_type == V4L2_MBUS_CCP2;
+ vep->bus_type == V4L2_MBUS_CCP2;
buscfg->bus.ccp2.vp_clk_pol = 1;
buscfg->bus.ccp2.crc = 1;
} else {
buscfg->bus.csi2.lanecfg.clk.pos =
- vep.bus.mipi_csi2.clock_lane;
+ vep->bus.mipi_csi2.clock_lane;
buscfg->bus.csi2.lanecfg.clk.pol =
- vep.bus.mipi_csi2.lane_polarities[0];
+ vep->bus.mipi_csi2.lane_polarities[0];
dev_dbg(dev, "clock lane polarity %u, pos %u\n",
buscfg->bus.csi2.lanecfg.clk.pol,
buscfg->bus.csi2.lanecfg.clk.pos);
buscfg->bus.csi2.num_data_lanes =
- vep.bus.mipi_csi2.num_data_lanes;
+ vep->bus.mipi_csi2.num_data_lanes;
for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
buscfg->bus.csi2.lanecfg.data[i].pos =
- vep.bus.mipi_csi2.data_lanes[i];
+ vep->bus.mipi_csi2.data_lanes[i];
buscfg->bus.csi2.lanecfg.data[i].pol =
- vep.bus.mipi_csi2.lane_polarities[i + 1];
+ vep->bus.mipi_csi2.lane_polarities[i + 1];
dev_dbg(dev,
"data lane %u polarity %u, pos %u\n", i,
buscfg->bus.csi2.lanecfg.data[i].pol,
@@ -2137,57 +2135,13 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
default:
dev_warn(dev, "%pOF: invalid interface %u\n",
- to_of_node(fwnode), vep.base.port);
+ to_of_node(vep->base.local_fwnode), vep->base.port);
return -EINVAL;
}
return 0;
}
-static int isp_fwnodes_parse(struct device *dev,
- struct v4l2_async_notifier *notifier)
-{
- struct fwnode_handle *fwnode = NULL;
-
- notifier->subdevs = devm_kcalloc(
- dev, ISP_MAX_SUBDEVS, sizeof(*notifier->subdevs), GFP_KERNEL);
- if (!notifier->subdevs)
- return -ENOMEM;
-
- while (notifier->num_subdevs < ISP_MAX_SUBDEVS &&
- (fwnode = fwnode_graph_get_next_endpoint(
- of_fwnode_handle(dev->of_node), fwnode))) {
- struct isp_async_subdev *isd;
-
- isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
- if (!isd)
- goto error;
-
- if (isp_fwnode_parse(dev, fwnode, isd)) {
- devm_kfree(dev, isd);
- continue;
- }
-
- notifier->subdevs[notifier->num_subdevs] = &isd->asd;
-
- isd->asd.match.fwnode.fwnode =
- fwnode_graph_get_remote_port_parent(fwnode);
- if (!isd->asd.match.fwnode.fwnode) {
- dev_warn(dev, "bad remote port parent\n");
- goto error;
- }
-
- isd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- notifier->num_subdevs++;
- }
-
- return notifier->num_subdevs;
-
-error:
- fwnode_handle_put(fwnode);
- return -EINVAL;
-}
-
static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
{
struct isp_device *isp = container_of(async, struct isp_device,
@@ -2201,7 +2155,7 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return ret;
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
- if (!sd->asd)
+ if (sd->notifier != &isp->notifier)
continue;
ret = isp_link_entity(isp, &sd->entity,
@@ -2217,6 +2171,10 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return media_device_register(&isp->media_dev);
}
+static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
+ .complete = isp_subdev_notifier_complete,
+};
+
/*
* isp_probe - Probe ISP platform device
* @pdev: Pointer to ISP platform device
@@ -2256,15 +2214,17 @@ static int isp_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = isp_fwnodes_parse(&pdev->dev, &isp->notifier);
- if (ret < 0)
- return ret;
-
isp->autoidle = autoidle;
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(
+ &pdev->dev, &isp->notifier, sizeof(struct isp_async_subdev),
+ isp_fwnode_parse);
+ if (ret < 0)
+ goto error;
+
isp->dev = &pdev->dev;
isp->ref_count = 0;
@@ -2385,7 +2345,7 @@ static int isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error_register_entities;
- isp->notifier.complete = isp_subdev_notifier_complete;
+ isp->notifier.ops = &isp_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
if (ret)
@@ -2406,6 +2366,7 @@ error_isp:
isp_xclk_cleanup(isp);
__omap3isp_put(isp, false);
error:
+ v4l2_async_notifier_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
return ret;
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index e528df6efc09..8b9043db94b3 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -220,14 +220,11 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
-
-#define ISP_MAX_SUBDEVS 8
- struct v4l2_subdev *subdevs[ISP_MAX_SUBDEVS];
};
struct isp_async_subdev {
- struct isp_bus_cfg bus;
struct v4l2_async_subdev asd;
+ struct isp_bus_cfg bus;
};
#define v4l2_subdev_to_bus_cfg(sd) \
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index edca993c2b1f..9d3f0cb1d95a 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2221,6 +2221,11 @@ static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
mutex_unlock(&pcdev->mlock);
}
+static const struct v4l2_async_notifier_operations pxa_camera_sensor_ops = {
+ .bound = pxa_camera_sensor_bound,
+ .unbind = pxa_camera_sensor_unbind,
+};
+
/*
* Driver probe, remove, suspend and resume operations
*/
@@ -2489,8 +2494,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->asds[0] = &pcdev->asd;
pcdev->notifier.subdevs = pcdev->asds;
pcdev->notifier.num_subdevs = 1;
- pcdev->notifier.bound = pxa_camera_sensor_bound;
- pcdev->notifier.unbind = pxa_camera_sensor_unbind;
+ pcdev->notifier.ops = &pxa_camera_sensor_ops;
if (!of_have_populated_dt())
pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b22d2dfcd3c2..55232a912950 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -622,6 +622,9 @@ static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ } else {
+ /* On current devices output->wm_num is always <= 2 */
+ break;
}
if (output->wm_idx[i] % 2 == 1)
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.c b/drivers/media/platform/qcom/camss-8x16/camss-video.c
index cf4219e871bd..ffaa2849e0c1 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-video.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-video.c
@@ -21,7 +21,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
-#include <media/videobuf-core.h>
#include <media/videobuf2-dma-sg.h>
#include "camss-video.h"
diff --git a/drivers/media/platform/qcom/camss-8x16/camss.c b/drivers/media/platform/qcom/camss-8x16/camss.c
index a3760b5dd1d1..390a42c17b66 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss.c
@@ -601,6 +601,11 @@ static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
return media_device_register(&camss->media_dev);
}
+static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = {
+ .bound = camss_subdev_notifier_bound,
+ .complete = camss_subdev_notifier_complete,
+};
+
static const struct media_device_ops camss_media_ops = {
.link_notify = v4l2_pipeline_link_notify,
};
@@ -655,8 +660,7 @@ static int camss_probe(struct platform_device *pdev)
goto err_register_entities;
if (camss->notifier.num_subdevs) {
- camss->notifier.bound = camss_subdev_notifier_bound;
- camss->notifier.complete = camss_subdev_notifier_complete;
+ camss->notifier.ops = &camss_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&camss->v4l2_dev,
&camss->notifier);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index cba092bcb76d..a0fe80df0cbd 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -194,7 +194,6 @@ struct venus_buffer {
* @fh: a holder of v4l file handle structure
* @streamon_cap: stream on flag for capture queue
* @streamon_out: stream on flag for output queue
- * @cmd_stop: a flag to signal encoder/decoder commands
* @width: current capture width
* @height: current capture height
* @out_width: current output width
@@ -258,7 +257,6 @@ struct venus_inst {
} controls;
struct v4l2_fh fh;
unsigned int streamon_cap, streamon_out;
- bool cmd_stop;
u32 width;
u32 height;
u32 out_width;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 9b2a401a4891..0ce9559a2924 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
mutex_lock(&inst->lock);
- if (inst->cmd_stop) {
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
- inst->cmd_stop = false;
- goto unlock;
- }
-
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
if (!(inst->streamon_out & inst->streamon_cap))
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index e374c7d1a618..1baf78d3c02d 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -478,6 +478,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(hfi_session_process_buf);
irqreturn_t hfi_isr_thread(int irq, void *dev_id)
{
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 1caae8feaa36..734ce11b0ed0 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
desc->attrs = DMA_ATTR_WRITE_COMBINE;
desc->size = ALIGN(size, SZ_4K);
- desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL,
+ desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
desc->attrs);
if (!desc->kva)
return -ENOMEM;
@@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret)
return ret;
- hdev->ifaceq_table.kva = desc.kva;
- hdev->ifaceq_table.da = desc.da;
- hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE;
- offset = hdev->ifaceq_table.size;
+ hdev->ifaceq_table = desc;
+ offset = IFACEQ_TABLE_SIZE;
for (i = 0; i < IFACEQ_NUM; i++) {
queue = &hdev->queues[i];
@@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret) {
hdev->sfr.da = 0;
} else {
- hdev->sfr.da = desc.da;
- hdev->sfr.kva = desc.kva;
- hdev->sfr.size = ALIGNED_SFR_SIZE;
+ hdev->sfr = desc;
sfr = hdev->sfr.kva;
sfr->buf_size = ALIGNED_SFR_SIZE;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index da611a5eb670..c9e9576bb08a 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
static int
vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
- if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
+ }
return 0;
}
@@ -479,6 +485,7 @@ static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
+ struct hfi_frame_data fdata = {0};
int ret;
ret = vdec_try_decoder_cmd(file, fh, cmd);
@@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
return ret;
mutex_lock(&inst->lock);
- inst->cmd_stop = true;
- mutex_unlock(&inst->lock);
- hfi_session_flush(inst);
+ /*
+ * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
+ * input to signal EOS.
+ */
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ fdata.buffer_type = HFI_BUFFER_INPUT;
+ fdata.flags |= HFI_BUFFERFLAG_EOS;
+ fdata.device_addr = 0xdeadbeef;
- return 0;
+ ret = hfi_session_process_buf(inst, &fdata);
+
+unlock:
+ mutex_unlock(&inst->lock);
+ return ret;
}
static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
@@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
inst->reconfig = false;
inst->sequence_cap = 0;
inst->sequence_out = 0;
- inst->cmd_stop = false;
ret = vdec_init_session(inst);
if (ret)
@@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
- if (inst->cmd_stop) {
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- inst->cmd_stop = false;
- }
-
if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 6f123a387cf9..3fcf0e9b7b29 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
if (!vbuf)
return;
- vb = &vbuf->vb2_buf;
- vb->planes[0].bytesused = bytesused;
- vb->planes[0].data_offset = data_offset;
-
vbuf->flags = flags;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ vb = &vbuf->vb2_buf;
+ vb2_set_plane_payload(vb, 0, bytesused + data_offset);
+ vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
} else {
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 142de447aaaa..108d776f3265 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-fwnode.h>
#include "rcar-vin.h"
@@ -77,14 +78,14 @@ static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
int ret;
/* Verify subdevices mbus format */
- if (!rvin_mbus_supported(&vin->digital)) {
+ if (!rvin_mbus_supported(vin->digital)) {
vin_err(vin, "Unsupported media bus format for %s\n",
- vin->digital.subdev->name);
+ vin->digital->subdev->name);
return -EINVAL;
}
vin_dbg(vin, "Found media bus format for %s: %d\n",
- vin->digital.subdev->name, vin->digital.code);
+ vin->digital->subdev->name, vin->digital->code);
ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
if (ret < 0) {
@@ -103,7 +104,7 @@ static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
rvin_v4l2_remove(vin);
- vin->digital.subdev = NULL;
+ vin->digital->subdev = NULL;
}
static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
@@ -120,117 +121,75 @@ static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
if (ret < 0)
return ret;
- vin->digital.source_pad = ret;
+ vin->digital->source_pad = ret;
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
- vin->digital.sink_pad = ret < 0 ? 0 : ret;
+ vin->digital->sink_pad = ret < 0 ? 0 : ret;
- vin->digital.subdev = subdev;
+ vin->digital->subdev = subdev;
vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
- subdev->name, vin->digital.source_pad,
- vin->digital.sink_pad);
+ subdev->name, vin->digital->source_pad,
+ vin->digital->sink_pad);
return 0;
}
+static const struct v4l2_async_notifier_operations rvin_digital_notify_ops = {
+ .bound = rvin_digital_notify_bound,
+ .unbind = rvin_digital_notify_unbind,
+ .complete = rvin_digital_notify_complete,
+};
+
-static int rvin_digitial_parse_v4l2(struct rvin_dev *vin,
- struct device_node *ep,
- struct v4l2_mbus_config *mbus_cfg)
+static int rvin_digital_parse_v4l2(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
- struct v4l2_fwnode_endpoint v4l2_ep;
- int ret;
+ struct rvin_dev *vin = dev_get_drvdata(dev);
+ struct rvin_graph_entity *rvge =
+ container_of(asd, struct rvin_graph_entity, asd);
- ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep);
- if (ret) {
- vin_err(vin, "Could not parse v4l2 endpoint\n");
- return -EINVAL;
- }
+ if (vep->base.port || vep->base.id)
+ return -ENOTCONN;
- mbus_cfg->type = v4l2_ep.bus_type;
+ rvge->mbus_cfg.type = vep->bus_type;
- switch (mbus_cfg->type) {
+ switch (rvge->mbus_cfg.type) {
case V4L2_MBUS_PARALLEL:
vin_dbg(vin, "Found PARALLEL media bus\n");
- mbus_cfg->flags = v4l2_ep.bus.parallel.flags;
+ rvge->mbus_cfg.flags = vep->bus.parallel.flags;
break;
case V4L2_MBUS_BT656:
vin_dbg(vin, "Found BT656 media bus\n");
- mbus_cfg->flags = 0;
+ rvge->mbus_cfg.flags = 0;
break;
default:
vin_err(vin, "Unknown media bus type\n");
return -EINVAL;
}
- return 0;
-}
-
-static int rvin_digital_graph_parse(struct rvin_dev *vin)
-{
- struct device_node *ep, *np;
- int ret;
-
- vin->digital.asd.match.fwnode.fwnode = NULL;
- vin->digital.subdev = NULL;
-
- /*
- * Port 0 id 0 is local digital input, try to get it.
- * Not all instances can or will have this, that is OK
- */
- ep = of_graph_get_endpoint_by_regs(vin->dev->of_node, 0, 0);
- if (!ep)
- return 0;
-
- np = of_graph_get_remote_port_parent(ep);
- if (!np) {
- vin_err(vin, "No remote parent for digital input\n");
- of_node_put(ep);
- return -EINVAL;
- }
- of_node_put(np);
-
- ret = rvin_digitial_parse_v4l2(vin, ep, &vin->digital.mbus_cfg);
- of_node_put(ep);
- if (ret)
- return ret;
-
- vin->digital.asd.match.fwnode.fwnode = of_fwnode_handle(np);
- vin->digital.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ vin->digital = rvge;
return 0;
}
static int rvin_digital_graph_init(struct rvin_dev *vin)
{
- struct v4l2_async_subdev **subdevs = NULL;
int ret;
- ret = rvin_digital_graph_parse(vin);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(
+ vin->dev, &vin->notifier,
+ sizeof(struct rvin_graph_entity), rvin_digital_parse_v4l2);
if (ret)
return ret;
- if (!vin->digital.asd.match.fwnode.fwnode) {
- vin_dbg(vin, "No digital subdevice found\n");
+ if (!vin->digital)
return -ENODEV;
- }
-
- /* Register the subdevices notifier. */
- subdevs = devm_kzalloc(vin->dev, sizeof(*subdevs), GFP_KERNEL);
- if (subdevs == NULL)
- return -ENOMEM;
-
- subdevs[0] = &vin->digital.asd;
vin_dbg(vin, "Found digital subdevice %pOF\n",
- to_of_node(subdevs[0]->match.fwnode.fwnode));
-
- vin->notifier.num_subdevs = 1;
- vin->notifier.subdevs = subdevs;
- vin->notifier.bound = rvin_digital_notify_bound;
- vin->notifier.unbind = rvin_digital_notify_unbind;
- vin->notifier.complete = rvin_digital_notify_complete;
+ to_of_node(vin->digital->asd.match.fwnode.fwnode));
+ vin->notifier.ops = &rvin_digital_notify_ops;
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
@@ -290,6 +249,8 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (ret)
return ret;
+ platform_set_drvdata(pdev, vin);
+
ret = rvin_digital_graph_init(vin);
if (ret < 0)
goto error;
@@ -297,11 +258,10 @@ static int rcar_vin_probe(struct platform_device *pdev)
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
- platform_set_drvdata(pdev, vin);
-
return 0;
error:
rvin_dma_remove(vin);
+ v4l2_async_notifier_cleanup(&vin->notifier);
return ret;
}
@@ -313,6 +273,7 @@ static int rcar_vin_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
v4l2_async_notifier_unregister(&vin->notifier);
+ v4l2_async_notifier_cleanup(&vin->notifier);
rvin_dma_remove(vin);
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index b136844499f6..23fdff7a7370 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -183,7 +183,7 @@ static int rvin_setup(struct rvin_dev *vin)
/*
* Input interface
*/
- switch (vin->digital.code) {
+ switch (vin->digital->code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
/* BT.601/BT.1358 16bit YCbCr422 */
vnmc |= VNMC_INF_YUV16;
@@ -191,7 +191,7 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
- vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital->mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
input_is_yuv = true;
break;
@@ -200,7 +200,7 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
- vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital->mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
input_is_yuv = true;
break;
@@ -212,11 +212,11 @@ static int rvin_setup(struct rvin_dev *vin)
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
/* Hsync Signal Polarity Select */
- if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ if (!(vin->digital->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_HPS;
/* Vsync Signal Polarity Select */
- if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ if (!(vin->digital->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_VPS;
/*
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index dd37ea811680..b479b882da12 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -111,7 +111,7 @@ static int rvin_reset_format(struct rvin_dev *vin)
struct v4l2_mbus_framefmt *mf = &fmt.format;
int ret;
- fmt.pad = vin->digital.source_pad;
+ fmt.pad = vin->digital->source_pad;
ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
if (ret)
@@ -172,13 +172,13 @@ static int __rvin_try_format_source(struct rvin_dev *vin,
sd = vin_to_source(vin);
- v4l2_fill_mbus_format(&format.format, pix, vin->digital.code);
+ v4l2_fill_mbus_format(&format.format, pix, vin->digital->code);
pad_cfg = v4l2_subdev_alloc_pad_config(sd);
if (pad_cfg == NULL)
return -ENOMEM;
- format.pad = vin->digital.source_pad;
+ format.pad = vin->digital->source_pad;
field = pix->field;
@@ -555,7 +555,7 @@ static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
if (timings->pad)
return -EINVAL;
- timings->pad = vin->digital.sink_pad;
+ timings->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
@@ -607,7 +607,7 @@ static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
if (cap->pad)
return -EINVAL;
- cap->pad = vin->digital.sink_pad;
+ cap->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
@@ -625,7 +625,7 @@ static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital.sink_pad;
+ edid->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, get_edid, edid);
@@ -643,7 +643,7 @@ static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital.sink_pad;
+ edid->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, set_edid, edid);
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index 9bfb5a7c4dc4..5382078143fb 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -126,7 +126,7 @@ struct rvin_dev {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
- struct rvin_graph_entity digital;
+ struct rvin_graph_entity *digital;
struct mutex lock;
struct vb2_queue queue;
@@ -145,7 +145,7 @@ struct rvin_dev {
struct v4l2_rect compose;
};
-#define vin_to_source(vin) vin->digital.subdev
+#define vin_to_source(vin) ((vin)->digital->subdev)
/* Debug */
#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 522364ff0d5d..63c94f4028a7 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -630,7 +630,7 @@ static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr)
{
unsigned int i;
u32 ctr;
- int ret;
+ int ret = -EINVAL;
/*
* When both internal channels are enabled, they can be synchronized
@@ -1185,6 +1185,12 @@ error:
return ret;
}
+static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = {
+ .bound = rcar_drif_notify_bound,
+ .unbind = rcar_drif_notify_unbind,
+ .complete = rcar_drif_notify_complete,
+};
+
/* Read endpoint properties */
static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
struct fwnode_handle *fwnode)
@@ -1347,9 +1353,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
if (ret)
goto error;
- sdr->notifier.bound = rcar_drif_notify_bound;
- sdr->notifier.unbind = rcar_drif_notify_unbind;
- sdr->notifier.complete = rcar_drif_notify_complete;
+ sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
diff --git a/drivers/media/platform/rockchip/rga/Makefile b/drivers/media/platform/rockchip/rga/Makefile
new file mode 100644
index 000000000000..92fe25490ccd
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/Makefile
@@ -0,0 +1,3 @@
+rockchip-rga-objs := rga.o rga-hw.o rga-buf.o
+
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip-rga.o
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
new file mode 100644
index 000000000000..49cacc7a48d1
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int
+rga_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rga_frame *f = rga_get_frame(ctx, vq->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ if (*nplanes)
+ return sizes[0] < f->size ? -EINVAL : 0;
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+
+ return 0;
+}
+
+static int rga_buf_prepare(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ vb2_set_plane_payload(vb, 0, f->size);
+
+ return 0;
+}
+
+static void rga_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(rga->dev);
+
+ if (!ret)
+ return 0;
+
+ for (i = 0; i < q->num_buffers; ++i) {
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+
+ return ret;
+}
+
+static void rga_buf_stop_streaming(struct vb2_queue *q)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+ pm_runtime_put(rga->dev);
+}
+
+const struct vb2_ops rga_qops = {
+ .queue_setup = rga_queue_setup,
+ .buf_prepare = rga_buf_prepare,
+ .buf_queue = rga_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = rga_buf_start_streaming,
+ .stop_streaming = rga_buf_stop_streaming,
+};
+
+/* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
+ * We use it more like a scatter-gather list.
+ */
+void rga_buf_map(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rockchip_rga *rga = ctx->rga;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ unsigned int *pages;
+ unsigned int address, len, i, p;
+ unsigned int mapped_size = 0;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pages = rga->src_mmu_pages;
+ else
+ pages = rga->dst_mmu_pages;
+
+ /* Create local MMU table for RGA */
+ sgt = vb2_plane_cookie(vb, 0);
+
+ for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;
+ address = sg_phys(sgl);
+
+ for (p = 0; p < len; p++) {
+ dma_addr_t phys = address + (p << PAGE_SHIFT);
+
+ pages[mapped_size + p] = phys;
+ }
+
+ mapped_size += len;
+ }
+
+ /* sync local MMU table for RGA */
+ dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
+ 8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
new file mode 100644
index 000000000000..96d1b1b3fe8e
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+enum e_rga_start_pos {
+ LT = 0,
+ LB = 1,
+ RT = 2,
+ RB = 3,
+};
+
+struct rga_addr_offset {
+ unsigned int y_off;
+ unsigned int u_off;
+ unsigned int v_off;
+};
+
+struct rga_corners_addr_offset {
+ struct rga_addr_offset left_top;
+ struct rga_addr_offset right_top;
+ struct rga_addr_offset left_bottom;
+ struct rga_addr_offset right_bottom;
+};
+
+static unsigned int rga_get_scaling(unsigned int src, unsigned int dst)
+{
+ /*
+ * The rga hw scaling factor is a normalized inverse of the
+ * scaling factor.
+ * For example: When source width is 100 and destination width is 200
+ * (scaling of 2x), then the hw factor is NC * 100 / 200.
+ * The normalization factor (NC) is 2^16 = 0x10000.
+ */
+
+ return (src > dst) ? ((dst << 16) / src) : ((src << 16) / dst);
+}
+
+static struct rga_corners_addr_offset
+rga_get_addr_offset(struct rga_frame *frm, unsigned int x, unsigned int y,
+ unsigned int w, unsigned int h)
+{
+ struct rga_corners_addr_offset offsets;
+ struct rga_addr_offset *lt, *lb, *rt, *rb;
+ unsigned int x_div = 0,
+ y_div = 0, uv_stride = 0, pixel_width = 0, uv_factor = 0;
+
+ lt = &offsets.left_top;
+ lb = &offsets.left_bottom;
+ rt = &offsets.right_top;
+ rb = &offsets.right_bottom;
+
+ x_div = frm->fmt->x_div;
+ y_div = frm->fmt->y_div;
+ uv_factor = frm->fmt->uv_factor;
+ uv_stride = frm->stride / x_div;
+ pixel_width = frm->stride / frm->width;
+
+ lt->y_off = y * frm->stride + x * pixel_width;
+ lt->u_off =
+ frm->width * frm->height + (y / y_div) * uv_stride + x / x_div;
+ lt->v_off = lt->u_off + frm->width * frm->height / uv_factor;
+
+ lb->y_off = lt->y_off + (h - 1) * frm->stride;
+ lb->u_off = lt->u_off + (h / y_div - 1) * uv_stride;
+ lb->v_off = lt->v_off + (h / y_div - 1) * uv_stride;
+
+ rt->y_off = lt->y_off + (w - 1) * pixel_width;
+ rt->u_off = lt->u_off + w / x_div - 1;
+ rt->v_off = lt->v_off + w / x_div - 1;
+
+ rb->y_off = lb->y_off + (w - 1) * pixel_width;
+ rb->u_off = lb->u_off + w / x_div - 1;
+ rb->v_off = lb->v_off + w / x_div - 1;
+
+ return offsets;
+}
+
+static struct rga_addr_offset *rga_lookup_draw_pos(struct
+ rga_corners_addr_offset
+ * offsets, u32 rotate_mode,
+ u32 mirr_mode)
+{
+ static enum e_rga_start_pos rot_mir_point_matrix[4][4] = {
+ {
+ LT, RT, LB, RB,
+ },
+ {
+ RT, LT, RB, LB,
+ },
+ {
+ RB, LB, RT, LT,
+ },
+ {
+ LB, RB, LT, RT,
+ },
+ };
+
+ if (!offsets)
+ return NULL;
+
+ switch (rot_mir_point_matrix[rotate_mode][mirr_mode]) {
+ case LT:
+ return &offsets->left_top;
+ case LB:
+ return &offsets->left_bottom;
+ case RT:
+ return &offsets->right_top;
+ case RB:
+ return &offsets->right_bottom;
+ }
+
+ return NULL;
+}
+
+static void rga_cmd_set_src_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7;
+}
+
+static void rga_cmd_set_src1_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 4;
+}
+
+static void rga_cmd_set_dst_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 8;
+}
+
+static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int scale_dst_w, scale_dst_h;
+ unsigned int src_h, src_w, src_x, src_y, dst_h, dst_w, dst_x, dst_y;
+ union rga_src_info src_info;
+ union rga_dst_info dst_info;
+ union rga_src_x_factor x_factor;
+ union rga_src_y_factor y_factor;
+ union rga_src_vir_info src_vir_info;
+ union rga_src_act_info src_act_info;
+ union rga_dst_vir_info dst_vir_info;
+ union rga_dst_act_info dst_act_info;
+
+ struct rga_addr_offset *dst_offset;
+ struct rga_corners_addr_offset offsets;
+ struct rga_corners_addr_offset src_offsets;
+
+ src_h = ctx->in.crop.height;
+ src_w = ctx->in.crop.width;
+ src_x = ctx->in.crop.left;
+ src_y = ctx->in.crop.top;
+ dst_h = ctx->out.crop.height;
+ dst_w = ctx->out.crop.width;
+ dst_x = ctx->out.crop.left;
+ dst_y = ctx->out.crop.top;
+
+ src_info.val = dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_info.val = dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2];
+ x_factor.val = dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ y_factor.val = dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ src_vir_info.val = dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ src_act_info.val = dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_vir_info.val = dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_act_info.val = dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+
+ src_info.data.format = ctx->in.fmt->hw_format;
+ src_info.data.swap = ctx->in.fmt->color_swap;
+ dst_info.data.format = ctx->out.fmt->hw_format;
+ dst_info.data.swap = ctx->out.fmt->color_swap;
+
+ if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+ if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
+ switch (ctx->in.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ src_info.data.csc_mode =
+ RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ src_info.data.csc_mode =
+ RGA_SRC_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+ }
+
+ if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+ switch (ctx->out.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ dst_info.data.csc_mode = RGA_DST_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+
+ if (ctx->vflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_X;
+
+ if (ctx->hflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_Y;
+
+ switch (ctx->rotate) {
+ case 90:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_90_DEGREE;
+ break;
+ case 180:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_180_DEGREE;
+ break;
+ case 270:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_270_DEGREE;
+ break;
+ default:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_0_DEGREE;
+ break;
+ }
+
+ /*
+ * Cacluate the up/down scaling mode/factor.
+ *
+ * RGA used to scale the picture first, and then rotate second,
+ * so we need to swap the w/h when rotate degree is 90/270.
+ */
+ if (src_info.data.rot_mode == RGA_SRC_ROT_MODE_90_DEGREE ||
+ src_info.data.rot_mode == RGA_SRC_ROT_MODE_270_DEGREE) {
+ if (rga->version.major == 0 || rga->version.minor == 0) {
+ if (dst_w == src_h)
+ src_h -= 8;
+ if (abs(src_w - dst_h) < 16)
+ src_w -= 16;
+ }
+
+ scale_dst_h = dst_w;
+ scale_dst_w = dst_h;
+ } else {
+ scale_dst_w = dst_w;
+ scale_dst_h = dst_h;
+ }
+
+ if (src_w == scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_NO;
+ x_factor.val = 0;
+ } else if (src_w > scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_DOWN;
+ x_factor.data.down_scale_factor =
+ rga_get_scaling(src_w, scale_dst_w) + 1;
+ } else {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_UP;
+ x_factor.data.up_scale_factor =
+ rga_get_scaling(src_w - 1, scale_dst_w - 1);
+ }
+
+ if (src_h == scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_NO;
+ y_factor.val = 0;
+ } else if (src_h > scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_DOWN;
+ y_factor.data.down_scale_factor =
+ rga_get_scaling(src_h, scale_dst_h) + 1;
+ } else {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_UP;
+ y_factor.data.up_scale_factor =
+ rga_get_scaling(src_h - 1, scale_dst_h - 1);
+ }
+
+ /*
+ * Cacluate the framebuffer virtual strides and active size,
+ * note that the step of vir_stride / vir_width is 4 byte words
+ */
+ src_vir_info.data.vir_stride = ctx->in.stride >> 2;
+ src_vir_info.data.vir_width = ctx->in.stride >> 2;
+
+ src_act_info.data.act_height = src_h - 1;
+ src_act_info.data.act_width = src_w - 1;
+
+ dst_vir_info.data.vir_stride = ctx->out.stride >> 2;
+ dst_act_info.data.act_height = dst_h - 1;
+ dst_act_info.data.act_width = dst_w - 1;
+
+ /*
+ * Cacluate the source framebuffer base address with offset pixel.
+ */
+ src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y,
+ src_w, src_h);
+
+ /*
+ * Configure the dest framebuffer base address with pixel offset.
+ */
+ offsets = rga_get_addr_offset(&ctx->out, dst_x, dst_y, dst_w, dst_h);
+ dst_offset = rga_lookup_draw_pos(&offsets, src_info.data.rot_mode,
+ src_info.data.mir_mode);
+
+ dest[(RGA_SRC_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.y_off;
+ dest[(RGA_SRC_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.u_off;
+ dest[(RGA_SRC_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.v_off;
+
+ dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2] = x_factor.val;
+ dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2] = y_factor.val;
+ dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = src_vir_info.val;
+ dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = src_act_info.val;
+
+ dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2] = src_info.val;
+
+ dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->y_off;
+ dest[(RGA_DST_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->u_off;
+ dest[(RGA_DST_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->v_off;
+
+ dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = dst_vir_info.val;
+ dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = dst_act_info.val;
+
+ dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2] = dst_info.val;
+}
+
+static void rga_cmd_set_mode(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ union rga_mode_ctrl mode;
+ union rga_alpha_ctrl0 alpha_ctrl0;
+ union rga_alpha_ctrl1 alpha_ctrl1;
+
+ mode.val = 0;
+ alpha_ctrl0.val = 0;
+ alpha_ctrl1.val = 0;
+
+ mode.data.gradient_sat = 1;
+ mode.data.render = RGA_MODE_RENDER_BITBLT;
+ mode.data.bitblt = RGA_MODE_BITBLT_MODE_SRC_TO_DST;
+
+ /* disable alpha blending */
+ dest[(RGA_ALPHA_CTRL0 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl0.val;
+ dest[(RGA_ALPHA_CTRL1 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl1.val;
+
+ dest[(RGA_MODE_CTRL - RGA_MODE_BASE_REG) >> 2] = mode.val;
+}
+
+static void rga_cmd_set(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ memset(rga->cmdbuf_virt, 0, RGA_CMDBUF_SIZE * 4);
+
+ rga_cmd_set_src_addr(ctx, rga->src_mmu_pages);
+ /*
+ * Due to hardware bug,
+ * src1 mmu also should be configured when using alpha blending.
+ */
+ rga_cmd_set_src1_addr(ctx, rga->dst_mmu_pages);
+
+ rga_cmd_set_dst_addr(ctx, rga->dst_mmu_pages);
+ rga_cmd_set_mode(ctx);
+
+ rga_cmd_set_trans_info(ctx);
+
+ rga_write(rga, RGA_CMD_BASE, rga->cmdbuf_phy);
+
+ /* sync CMD buf for RGA */
+ dma_sync_single_for_device(rga->dev, rga->cmdbuf_phy,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+void rga_hw_start(struct rockchip_rga *rga)
+{
+ struct rga_ctx *ctx = rga->curr;
+
+ rga_cmd_set(ctx);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x00);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x22);
+
+ rga_write(rga, RGA_INT, 0x600);
+
+ rga_write(rga, RGA_CMD_CTRL, 0x1);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
new file mode 100644
index 000000000000..ca3c204abe42
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_HW_H__
+#define __RGA_HW_H__
+
+#define RGA_CMDBUF_SIZE 0x20
+
+/* Hardware limits */
+#define MAX_WIDTH 8192
+#define MAX_HEIGHT 8192
+
+#define MIN_WIDTH 34
+#define MIN_HEIGHT 34
+
+#define DEFAULT_WIDTH 100
+#define DEFAULT_HEIGHT 100
+
+#define RGA_TIMEOUT 500
+
+/* Registers address */
+#define RGA_SYS_CTRL 0x0000
+#define RGA_CMD_CTRL 0x0004
+#define RGA_CMD_BASE 0x0008
+#define RGA_INT 0x0010
+#define RGA_MMU_CTRL0 0x0014
+#define RGA_VERSION_INFO 0x0028
+
+#define RGA_MODE_BASE_REG 0x0100
+#define RGA_MODE_MAX_REG 0x017C
+
+#define RGA_MODE_CTRL 0x0100
+#define RGA_SRC_INFO 0x0104
+#define RGA_SRC_Y_RGB_BASE_ADDR 0x0108
+#define RGA_SRC_CB_BASE_ADDR 0x010c
+#define RGA_SRC_CR_BASE_ADDR 0x0110
+#define RGA_SRC1_RGB_BASE_ADDR 0x0114
+#define RGA_SRC_VIR_INFO 0x0118
+#define RGA_SRC_ACT_INFO 0x011c
+#define RGA_SRC_X_FACTOR 0x0120
+#define RGA_SRC_Y_FACTOR 0x0124
+#define RGA_SRC_BG_COLOR 0x0128
+#define RGA_SRC_FG_COLOR 0x012c
+#define RGA_SRC_TR_COLOR0 0x0130
+#define RGA_SRC_TR_COLOR1 0x0134
+
+#define RGA_DST_INFO 0x0138
+#define RGA_DST_Y_RGB_BASE_ADDR 0x013c
+#define RGA_DST_CB_BASE_ADDR 0x0140
+#define RGA_DST_CR_BASE_ADDR 0x0144
+#define RGA_DST_VIR_INFO 0x0148
+#define RGA_DST_ACT_INFO 0x014c
+
+#define RGA_ALPHA_CTRL0 0x0150
+#define RGA_ALPHA_CTRL1 0x0154
+#define RGA_FADING_CTRL 0x0158
+#define RGA_PAT_CON 0x015c
+#define RGA_ROP_CON0 0x0160
+#define RGA_ROP_CON1 0x0164
+#define RGA_MASK_BASE 0x0168
+
+#define RGA_MMU_CTRL1 0x016C
+#define RGA_MMU_SRC_BASE 0x0170
+#define RGA_MMU_SRC1_BASE 0x0174
+#define RGA_MMU_DST_BASE 0x0178
+
+/* Registers value */
+#define RGA_MODE_RENDER_BITBLT 0
+#define RGA_MODE_RENDER_COLOR_PALETTE 1
+#define RGA_MODE_RENDER_RECTANGLE_FILL 2
+#define RGA_MODE_RENDER_UPDATE_PALETTE_LUT_RAM 3
+
+#define RGA_MODE_BITBLT_MODE_SRC_TO_DST 0
+#define RGA_MODE_BITBLT_MODE_SRC_SRC1_TO_DST 1
+
+#define RGA_MODE_CF_ROP4_SOLID 0
+#define RGA_MODE_CF_ROP4_PATTERN 1
+
+#define RGA_COLOR_FMT_ABGR8888 0
+#define RGA_COLOR_FMT_XBGR8888 1
+#define RGA_COLOR_FMT_RGB888 2
+#define RGA_COLOR_FMT_BGR565 4
+#define RGA_COLOR_FMT_ABGR1555 5
+#define RGA_COLOR_FMT_ABGR4444 6
+#define RGA_COLOR_FMT_YUV422SP 8
+#define RGA_COLOR_FMT_YUV422P 9
+#define RGA_COLOR_FMT_YUV420SP 10
+#define RGA_COLOR_FMT_YUV420P 11
+/* SRC_COLOR Palette */
+#define RGA_COLOR_FMT_CP_1BPP 12
+#define RGA_COLOR_FMT_CP_2BPP 13
+#define RGA_COLOR_FMT_CP_4BPP 14
+#define RGA_COLOR_FMT_CP_8BPP 15
+#define RGA_COLOR_FMT_MASK 15
+
+#define RGA_COLOR_NONE_SWAP 0
+#define RGA_COLOR_RB_SWAP 1
+#define RGA_COLOR_ALPHA_SWAP 2
+#define RGA_COLOR_UV_SWAP 4
+
+#define RGA_SRC_CSC_MODE_BYPASS 0
+#define RGA_SRC_CSC_MODE_BT601_R0 1
+#define RGA_SRC_CSC_MODE_BT601_R1 2
+#define RGA_SRC_CSC_MODE_BT709_R0 3
+#define RGA_SRC_CSC_MODE_BT709_R1 4
+
+#define RGA_SRC_ROT_MODE_0_DEGREE 0
+#define RGA_SRC_ROT_MODE_90_DEGREE 1
+#define RGA_SRC_ROT_MODE_180_DEGREE 2
+#define RGA_SRC_ROT_MODE_270_DEGREE 3
+
+#define RGA_SRC_MIRR_MODE_NO 0
+#define RGA_SRC_MIRR_MODE_X 1
+#define RGA_SRC_MIRR_MODE_Y 2
+#define RGA_SRC_MIRR_MODE_X_Y 3
+
+#define RGA_SRC_HSCL_MODE_NO 0
+#define RGA_SRC_HSCL_MODE_DOWN 1
+#define RGA_SRC_HSCL_MODE_UP 2
+
+#define RGA_SRC_VSCL_MODE_NO 0
+#define RGA_SRC_VSCL_MODE_DOWN 1
+#define RGA_SRC_VSCL_MODE_UP 2
+
+#define RGA_SRC_TRANS_ENABLE_R 1
+#define RGA_SRC_TRANS_ENABLE_G 2
+#define RGA_SRC_TRANS_ENABLE_B 4
+#define RGA_SRC_TRANS_ENABLE_A 8
+
+#define RGA_SRC_BIC_COE_SELEC_CATROM 0
+#define RGA_SRC_BIC_COE_SELEC_MITCHELL 1
+#define RGA_SRC_BIC_COE_SELEC_HERMITE 2
+#define RGA_SRC_BIC_COE_SELEC_BSPLINE 3
+
+#define RGA_DST_DITHER_MODE_888_TO_666 0
+#define RGA_DST_DITHER_MODE_888_TO_565 1
+#define RGA_DST_DITHER_MODE_888_TO_555 2
+#define RGA_DST_DITHER_MODE_888_TO_444 3
+
+#define RGA_DST_CSC_MODE_BYPASS 0
+#define RGA_DST_CSC_MODE_BT601_R0 1
+#define RGA_DST_CSC_MODE_BT601_R1 2
+#define RGA_DST_CSC_MODE_BT709_R0 3
+
+#define RGA_ALPHA_ROP_MODE_2 0
+#define RGA_ALPHA_ROP_MODE_3 1
+#define RGA_ALPHA_ROP_MODE_4 2
+
+#define RGA_ALPHA_SELECT_ALPHA 0
+#define RGA_ALPHA_SELECT_ROP 1
+
+#define RGA_ALPHA_MASK_BIG_ENDIAN 0
+#define RGA_ALPHA_MASK_LITTLE_ENDIAN 1
+
+#define RGA_ALPHA_NORMAL 0
+#define RGA_ALPHA_REVERSE 1
+
+#define RGA_ALPHA_BLEND_GLOBAL 0
+#define RGA_ALPHA_BLEND_NORMAL 1
+#define RGA_ALPHA_BLEND_MULTIPLY 2
+
+#define RGA_ALPHA_CAL_CUT 0
+#define RGA_ALPHA_CAL_NORMAL 1
+
+#define RGA_ALPHA_FACTOR_ZERO 0
+#define RGA_ALPHA_FACTOR_ONE 1
+#define RGA_ALPHA_FACTOR_OTHER 2
+#define RGA_ALPHA_FACTOR_OTHER_REVERSE 3
+#define RGA_ALPHA_FACTOR_SELF 4
+
+#define RGA_ALPHA_COLOR_NORMAL 0
+#define RGA_ALPHA_COLOR_MULTIPLY_CAL 1
+
+/* Registers union */
+union rga_mode_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:2] */
+ unsigned int render:3;
+ /* [3:6] */
+ unsigned int bitblt:1;
+ unsigned int cf_rop4_pat:1;
+ unsigned int alpha_zero_key:1;
+ unsigned int gradient_sat:1;
+ /* [7:31] */
+ unsigned int reserved:25;
+ } data;
+};
+
+union rga_src_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:7] */
+ unsigned int swap:3;
+ unsigned int cp_endian:1;
+ /* [8:17] */
+ unsigned int csc_mode:2;
+ unsigned int rot_mode:2;
+ unsigned int mir_mode:2;
+ unsigned int hscl_mode:2;
+ unsigned int vscl_mode:2;
+ /* [18:22] */
+ unsigned int trans_mode:1;
+ unsigned int trans_enable:4;
+ /* [23:25] */
+ unsigned int dither_up_en:1;
+ unsigned int bic_coe_sel:2;
+ /* [26:31] */
+ unsigned int reserved:6;
+ } data;
+};
+
+union rga_src_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_width:15;
+ unsigned int reserved:1;
+ /* [16:25] */
+ unsigned int vir_stride:10;
+ /* [26:31] */
+ unsigned int reserved1:6;
+ } data;
+};
+
+union rga_src_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:13;
+ unsigned int reserved:3;
+ /* [16:31] */
+ unsigned int act_height:13;
+ unsigned int reserved1:3;
+ } data;
+};
+
+union rga_src_x_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+union rga_src_y_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+/* Alpha / Red / Green / Blue */
+union rga_src_cp_gr_color {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int gradient_x:16;
+ /* [16:31] */
+ unsigned int gradient_y:16;
+ } data;
+};
+
+union rga_src_transparency_color0 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmin:8;
+ /* [8:15] */
+ unsigned int trans_gmin:8;
+ /* [16:23] */
+ unsigned int trans_bmin:8;
+ /* [24:31] */
+ unsigned int trans_amin:8;
+ } data;
+};
+
+union rga_src_transparency_color1 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmax:8;
+ /* [8:15] */
+ unsigned int trans_gmax:8;
+ /* [16:23] */
+ unsigned int trans_bmax:8;
+ /* [24:31] */
+ unsigned int trans_amax:8;
+ } data;
+};
+
+union rga_dst_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:6] */
+ unsigned int swap:3;
+ /* [7:9] */
+ unsigned int src1_format:3;
+ /* [10:11] */
+ unsigned int src1_swap:2;
+ /* [12:15] */
+ unsigned int dither_up_en:1;
+ unsigned int dither_down_en:1;
+ unsigned int dither_down_mode:2;
+ /* [16:18] */
+ unsigned int csc_mode:2;
+ unsigned int csc_clip:1;
+ /* [19:31] */
+ unsigned int reserved:13;
+ } data;
+};
+
+union rga_dst_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_stride:15;
+ unsigned int reserved:1;
+ /* [16:31] */
+ unsigned int src1_vir_stride:15;
+ unsigned int reserved1:1;
+ } data;
+};
+
+union rga_dst_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:12;
+ unsigned int reserved:4;
+ /* [16:31] */
+ unsigned int act_height:12;
+ unsigned int reserved1:4;
+ } data;
+};
+
+union rga_alpha_ctrl0 {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int rop_en:1;
+ unsigned int rop_select:1;
+ unsigned int rop_mode:2;
+ /* [4:11] */
+ unsigned int src_fading_val:8;
+ /* [12:20] */
+ unsigned int dst_fading_val:8;
+ unsigned int mask_endian:1;
+ /* [21:31] */
+ unsigned int reserved:11;
+ } data;
+};
+
+union rga_alpha_ctrl1 {
+ unsigned int val;
+ struct {
+ /* [0:1] */
+ unsigned int dst_color_m0:1;
+ unsigned int src_color_m0:1;
+ /* [2:7] */
+ unsigned int dst_factor_m0:3;
+ unsigned int src_factor_m0:3;
+ /* [8:9] */
+ unsigned int dst_alpha_cal_m0:1;
+ unsigned int src_alpha_cal_m0:1;
+ /* [10:13] */
+ unsigned int dst_blend_m0:2;
+ unsigned int src_blend_m0:2;
+ /* [14:15] */
+ unsigned int dst_alpha_m0:1;
+ unsigned int src_alpha_m0:1;
+ /* [16:21] */
+ unsigned int dst_factor_m1:3;
+ unsigned int src_factor_m1:3;
+ /* [22:23] */
+ unsigned int dst_alpha_cal_m1:1;
+ unsigned int src_alpha_cal_m1:1;
+ /* [24:27] */
+ unsigned int dst_blend_m1:2;
+ unsigned int src_blend_m1:2;
+ /* [28:29] */
+ unsigned int dst_alpha_m1:1;
+ unsigned int src_alpha_m1:1;
+ /* [30:31] */
+ unsigned int reserved:2;
+ } data;
+};
+
+union rga_fading_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int fading_offset_r:8;
+ /* [8:15] */
+ unsigned int fading_offset_g:8;
+ /* [16:23] */
+ unsigned int fading_offset_b:8;
+ /* [24:31] */
+ unsigned int fading_en:1;
+ unsigned int reserved:7;
+ } data;
+};
+
+union rga_pat_con {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int width:8;
+ /* [8:15] */
+ unsigned int height:8;
+ /* [16:23] */
+ unsigned int offset_x:8;
+ /* [24:31] */
+ unsigned int offset_y:8;
+ } data;
+};
+
+#endif
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
new file mode 100644
index 000000000000..89296de9cf4a
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -0,0 +1,1010 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static void job_abort(void *prv)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+
+ if (!rga->curr) /* No job currently running */
+ return;
+
+ wait_event_timeout(rga->irq_queue,
+ !rga->curr, msecs_to_jiffies(RGA_TIMEOUT));
+}
+
+static void device_run(void *prv)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_buffer *src, *dst;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rga->ctrl_lock, flags);
+
+ rga->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ rga_buf_map(src);
+ rga_buf_map(dst);
+
+ rga_hw_start(rga);
+
+ spin_unlock_irqrestore(&rga->ctrl_lock, flags);
+}
+
+static irqreturn_t rga_isr(int irq, void *prv)
+{
+ struct rockchip_rga *rga = prv;
+ int intr;
+
+ intr = rga_read(rga, RGA_INT) & 0xf;
+
+ rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
+
+ if (intr & 0x04) {
+ struct vb2_v4l2_buffer *src, *dst;
+ struct rga_ctx *ctx = rga->curr;
+
+ WARN_ON(!ctx);
+
+ rga->curr = NULL;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ WARN_ON(!src);
+ WARN_ON(!dst);
+
+ dst->timecode = src->timecode;
+ dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |= src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(rga->m2m_dev, ctx->fh.m2m_ctx);
+
+ wake_up(&rga->irq_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct v4l2_m2m_ops rga_m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct rga_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &rga_qops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->rga->mutex;
+ src_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &rga_qops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->rga->mutex;
+ dst_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int rga_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rga_ctx *ctx = container_of(ctrl->handler, struct rga_ctx,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->rga->ctrl_lock, flags);
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ ctx->rotate = ctrl->val;
+ break;
+ case V4L2_CID_BG_COLOR:
+ ctx->fill_color = ctrl->val;
+ break;
+ }
+ spin_unlock_irqrestore(&ctx->rga->ctrl_lock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rga_ctrl_ops = {
+ .s_ctrl = rga_s_ctrl,
+};
+
+static int rga_setup_ctrls(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_BG_COLOR, 0, 0xffffffff, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_err(&rga->v4l2_dev, "%s failed\n", __func__);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ return 0;
+}
+
+struct rga_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR4444,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR1555,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_BGR565,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422P,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct rga_fmt *rga_fmt_find(struct v4l2_format *f)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct rga_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .crop.left = 0,
+ .crop.top = 0,
+ .crop.width = DEFAULT_WIDTH,
+ .crop.height = DEFAULT_HEIGHT,
+ .fmt = &formats[0],
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->in;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->out;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int rga_open(struct file *file)
+{
+ struct rockchip_rga *rga = video_drvdata(file);
+ struct rga_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->rga = rga;
+ /* Set default formats */
+ ctx->in = def_frame;
+ ctx->out = def_frame;
+
+ if (mutex_lock_interruptible(&rga->mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rga->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ mutex_unlock(&rga->mutex);
+ kfree(ctx);
+ return ret;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ rga_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static int rga_release(struct file *file)
+{
+ struct rga_ctx *ctx =
+ container_of(file->private_data, struct rga_ctx, fh);
+ struct rockchip_rga *rga = ctx->rga;
+
+ mutex_lock(&rga->mutex);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations rga_fops = {
+ .owner = THIS_MODULE,
+ .open = rga_open,
+ .release = rga_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int
+vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, RGA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "rockchip-rga", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:rga", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ struct rga_fmt *fmt;
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = frm->stride;
+ f->fmt.pix.sizeimage = frm->size;
+ f->fmt.pix.colorspace = frm->colorspace;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_fmt *fmt;
+
+ fmt = rga_fmt_find(f);
+ if (!fmt) {
+ fmt = &formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < MIN_WIDTH)
+ f->fmt.pix.width = MIN_WIDTH;
+ if (f->fmt.pix.height < MIN_HEIGHT)
+ f->fmt.pix.height = MIN_HEIGHT;
+
+ if (fmt->hw_format >= RGA_COLOR_FMT_YUV422SP)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * (f->fmt.pix.width * fmt->depth) >> 3;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+ struct rga_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format.
+ */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&rga->v4l2_dev, "queue (%d) bust\n", f->type);
+ return -EBUSY;
+ }
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+ fmt = rga_fmt_find(f);
+ if (!fmt)
+ return -EINVAL;
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ frm->fmt = fmt;
+ frm->stride = f->fmt.pix.bytesperline;
+ frm->colorspace = f->fmt.pix.colorspace;
+
+ /* Reset crop settings */
+ frm->crop.left = 0;
+ frm->crop.top = 0;
+ frm->crop.width = frm->width;
+ frm->crop.height = frm->height;
+
+ return 0;
+}
+
+static int vidioc_g_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rga_frame *f;
+ bool use_frame = false;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_frame) {
+ s->r = f->crop;
+ } else {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct rga_frame *f;
+ int ret = 0;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * COMPOSE target is only valid for capture buffer type, return
+ * error for output buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * CROP target is only valid for output buffer type, return
+ * error for capture buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ /*
+ * bound and default crop/compose targets are invalid targets to
+ * try/set
+ */
+ default:
+ return -EINVAL;
+ }
+
+ if (s->r.top < 0 || s->r.left < 0) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev,
+ "doesn't support negative values for top & left.\n");
+ return -EINVAL;
+ }
+
+ if (s->r.left + s->r.width > f->width ||
+ s->r.top + s->r.height > f->height ||
+ s->r.width < MIN_WIDTH || s->r.height < MIN_HEIGHT) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev, "unsupported crop value.\n");
+ return -EINVAL;
+ }
+
+ f->crop = s->r;
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops rga_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+};
+
+static struct video_device rga_videodev = {
+ .name = "rockchip-rga",
+ .fops = &rga_fops,
+ .ioctl_ops = &rga_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static int rga_enable_clocks(struct rockchip_rga *rga)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rga->sclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rga->aclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
+ goto err_disable_sclk;
+ }
+
+ ret = clk_prepare_enable(rga->hclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
+ goto err_disable_aclk;
+ }
+
+ return 0;
+
+err_disable_sclk:
+ clk_disable_unprepare(rga->sclk);
+err_disable_aclk:
+ clk_disable_unprepare(rga->aclk);
+
+ return ret;
+}
+
+static void rga_disable_clocks(struct rockchip_rga *rga)
+{
+ clk_disable_unprepare(rga->sclk);
+ clk_disable_unprepare(rga->hclk);
+ clk_disable_unprepare(rga->aclk);
+}
+
+static int rga_parse_dt(struct rockchip_rga *rga)
+{
+ struct reset_control *core_rst, *axi_rst, *ahb_rst;
+
+ core_rst = devm_reset_control_get(rga->dev, "core");
+ if (IS_ERR(core_rst)) {
+ dev_err(rga->dev, "failed to get core reset controller\n");
+ return PTR_ERR(core_rst);
+ }
+
+ axi_rst = devm_reset_control_get(rga->dev, "axi");
+ if (IS_ERR(axi_rst)) {
+ dev_err(rga->dev, "failed to get axi reset controller\n");
+ return PTR_ERR(axi_rst);
+ }
+
+ ahb_rst = devm_reset_control_get(rga->dev, "ahb");
+ if (IS_ERR(ahb_rst)) {
+ dev_err(rga->dev, "failed to get ahb reset controller\n");
+ return PTR_ERR(ahb_rst);
+ }
+
+ reset_control_assert(core_rst);
+ udelay(1);
+ reset_control_deassert(core_rst);
+
+ reset_control_assert(axi_rst);
+ udelay(1);
+ reset_control_deassert(axi_rst);
+
+ reset_control_assert(ahb_rst);
+ udelay(1);
+ reset_control_deassert(ahb_rst);
+
+ rga->sclk = devm_clk_get(rga->dev, "sclk");
+ if (IS_ERR(rga->sclk)) {
+ dev_err(rga->dev, "failed to get sclk clock\n");
+ return PTR_ERR(rga->sclk);
+ }
+
+ rga->aclk = devm_clk_get(rga->dev, "aclk");
+ if (IS_ERR(rga->aclk)) {
+ dev_err(rga->dev, "failed to get aclk clock\n");
+ return PTR_ERR(rga->aclk);
+ }
+
+ rga->hclk = devm_clk_get(rga->dev, "hclk");
+ if (IS_ERR(rga->hclk)) {
+ dev_err(rga->dev, "failed to get hclk clock\n");
+ return PTR_ERR(rga->hclk);
+ }
+
+ return 0;
+}
+
+static int rga_probe(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret = 0;
+ int irq;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
+ if (!rga)
+ return -ENOMEM;
+
+ rga->dev = &pdev->dev;
+ spin_lock_init(&rga->ctrl_lock);
+ mutex_init(&rga->mutex);
+
+ init_waitqueue_head(&rga->irq_queue);
+
+ ret = rga_parse_dt(rga);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+
+ pm_runtime_enable(rga->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ rga->regs = devm_ioremap_resource(rga->dev, res);
+ if (IS_ERR(rga->regs)) {
+ ret = PTR_ERR(rga->regs);
+ goto err_put_clk;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(rga->dev, "failed to get irq\n");
+ ret = irq;
+ goto err_put_clk;
+ }
+
+ ret = devm_request_irq(rga->dev, irq, rga_isr, 0,
+ dev_name(rga->dev), rga);
+ if (ret < 0) {
+ dev_err(rga->dev, "failed to request irq\n");
+ goto err_put_clk;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &rga->v4l2_dev);
+ if (ret)
+ goto err_put_clk;
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&rga->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+ *vfd = rga_videodev;
+ vfd->lock = &rga->mutex;
+ vfd->v4l2_dev = &rga->v4l2_dev;
+
+ video_set_drvdata(vfd, rga);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", rga_videodev.name);
+ rga->vfd = vfd;
+
+ platform_set_drvdata(pdev, rga);
+ rga->m2m_dev = v4l2_m2m_init(&rga_m2m_ops);
+ if (IS_ERR(rga->m2m_dev)) {
+ v4l2_err(&rga->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(rga->m2m_dev);
+ goto unreg_video_dev;
+ }
+
+ pm_runtime_get_sync(rga->dev);
+
+ rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
+ rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
+
+ v4l2_info(&rga->v4l2_dev, "HW Version: 0x%02x.%02x\n",
+ rga->version.major, rga->version.minor);
+
+ pm_runtime_put(rga->dev);
+
+ /* Create CMD buffer */
+ rga->cmdbuf_virt = dma_alloc_attrs(rga->dev, RGA_CMDBUF_SIZE,
+ &rga->cmdbuf_phy, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
+ rga->src_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+ rga->dst_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+
+ def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+ def_frame.size = def_frame.stride * def_frame.height;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ return 0;
+
+rel_vdev:
+ video_device_release(vfd);
+unreg_video_dev:
+ video_unregister_device(rga->vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&rga->v4l2_dev);
+err_put_clk:
+ pm_runtime_disable(rga->dev);
+
+ return ret;
+}
+
+static int rga_remove(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga = platform_get_drvdata(pdev);
+
+ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, &rga->cmdbuf_virt,
+ rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
+
+ free_pages((unsigned long)rga->src_mmu_pages, 3);
+ free_pages((unsigned long)rga->dst_mmu_pages, 3);
+
+ v4l2_info(&rga->v4l2_dev, "Removing\n");
+
+ v4l2_m2m_release(rga->m2m_dev);
+ video_unregister_device(rga->vfd);
+ v4l2_device_unregister(&rga->v4l2_dev);
+
+ pm_runtime_disable(rga->dev);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_suspend(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ rga_disable_clocks(rga);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_resume(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ return rga_enable_clocks(rga);
+}
+
+static const struct dev_pm_ops rga_pm = {
+ SET_RUNTIME_PM_OPS(rga_runtime_suspend,
+ rga_runtime_resume, NULL)
+};
+
+static const struct of_device_id rockchip_rga_match[] = {
+ {
+ .compatible = "rockchip,rk3288-rga",
+ },
+ {
+ .compatible = "rockchip,rk3399-rga",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_rga_match);
+
+static struct platform_driver rga_pdrv = {
+ .probe = rga_probe,
+ .remove = rga_remove,
+ .driver = {
+ .name = RGA_NAME,
+ .pm = &rga_pm,
+ .of_match_table = rockchip_rga_match,
+ },
+};
+
+module_platform_driver(rga_pdrv);
+
+MODULE_AUTHOR("Jacob Chen <jacob-chen@iotwrt.com>");
+MODULE_DESCRIPTION("Rockchip Raster 2d Graphic Acceleration Unit");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
new file mode 100644
index 000000000000..5d43e7ea88af
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_H__
+#define __RGA_H__
+
+#include <linux/platform_device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define RGA_NAME "rockchip-rga"
+
+struct rga_fmt {
+ u32 fourcc;
+ int depth;
+ u8 uv_factor;
+ u8 y_div;
+ u8 x_div;
+ u8 color_swap;
+ u8 hw_format;
+};
+
+struct rga_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ u32 colorspace;
+
+ /* Crop */
+ struct v4l2_rect crop;
+
+ /* Image format */
+ struct rga_fmt *fmt;
+
+ /* Variables that can calculated once and reused */
+ u32 stride;
+ u32 size;
+};
+
+struct rockchip_rga_version {
+ u32 major;
+ u32 minor;
+};
+
+struct rga_ctx {
+ struct v4l2_fh fh;
+ struct rockchip_rga *rga;
+ struct rga_frame in;
+ struct rga_frame out;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Control values */
+ u32 op;
+ u32 hflip;
+ u32 vflip;
+ u32 rotate;
+ u32 fill_color;
+};
+
+struct rockchip_rga {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+
+ struct device *dev;
+ struct regmap *grf;
+ void __iomem *regs;
+ struct clk *sclk;
+ struct clk *aclk;
+ struct clk *hclk;
+ struct rockchip_rga_version version;
+
+ /* vfd lock */
+ struct mutex mutex;
+ /* ctrl parm lock */
+ spinlock_t ctrl_lock;
+
+ wait_queue_head_t irq_queue;
+
+ struct rga_ctx *curr;
+ dma_addr_t cmdbuf_phy;
+ void *cmdbuf_virt;
+ unsigned int *src_mmu_pages;
+ unsigned int *dst_mmu_pages;
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type);
+
+/* RGA Buffers Manage */
+extern const struct vb2_ops rga_qops;
+void rga_buf_map(struct vb2_buffer *vb);
+
+/* RGA Hardware */
+static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
+{
+ writel(value, rga->regs + reg);
+};
+
+static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
+{
+ return readl(rga->regs + reg);
+};
+
+static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
+{
+ u32 temp = rga_read(rga, reg) & ~(mask);
+
+ temp |= val & mask;
+ rga_write(rga, reg, temp);
+};
+
+void rga_hw_start(struct rockchip_rga *rga);
+
+#endif
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 1afde5021ca6..1839a86cc2a5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -470,7 +470,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
{
mfc_err("Interrupt Error: %08x\n", err);
- if (ctx != NULL) {
+ if (ctx) {
/* Error recovery is dependent on the state of context */
switch (ctx->state) {
case MFCINST_RES_CHANGE_INIT:
@@ -508,7 +508,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
{
struct s5p_mfc_dev *dev;
- if (ctx == NULL)
+ if (!ctx)
return;
dev = ctx->dev;
if (ctx->c_ops->post_seq_start) {
@@ -562,7 +562,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_buf *src_buf;
struct s5p_mfc_dev *dev;
- if (ctx == NULL)
+ if (!ctx)
return;
dev = ctx->dev;
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
@@ -1043,12 +1043,9 @@ end:
static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
- struct s5p_mfc_dev *dev = ctx->dev;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int ret;
- if (mutex_lock_interruptible(&dev->mfc_mutex))
- return -ERESTARTSYS;
if (offset < DST_QUEUE_OFF_BASE) {
mfc_debug(2, "mmaping source\n");
ret = vb2_mmap(&ctx->vq_src, vma);
@@ -1057,7 +1054,6 @@ static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
ret = vb2_mmap(&ctx->vq_dst, vma);
}
- mutex_unlock(&dev->mfc_mutex);
return ret;
}
@@ -1083,7 +1079,7 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
struct device *child;
int ret;
- child = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL);
+ child = devm_kzalloc(dev, sizeof(*child), GFP_KERNEL);
if (!child)
return NULL;
@@ -1270,10 +1266,8 @@ static int s5p_mfc_probe(struct platform_device *pdev)
pr_debug("%s++\n", __func__);
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&pdev->dev, "Not enough memory for MFC device\n");
+ if (!dev)
return -ENOMEM;
- }
spin_lock_init(&dev->irqlock);
spin_lock_init(&dev->condlock);
@@ -1291,7 +1285,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
return PTR_ERR(dev->regs_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
+ if (!res) {
dev_err(&pdev->dev, "failed to get irq resource\n");
return -ENOENT;
}
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 1f3c450c7a69..916ff68b73d4 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -1391,6 +1391,12 @@ static int soc_camera_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations soc_camera_async_ops = {
+ .bound = soc_camera_async_bound,
+ .unbind = soc_camera_async_unbind,
+ .complete = soc_camera_async_complete,
+};
+
static int scan_async_group(struct soc_camera_host *ici,
struct v4l2_async_subdev **asd, unsigned int size)
{
@@ -1437,9 +1443,7 @@ static int scan_async_group(struct soc_camera_host *ici,
sasc->notifier.subdevs = asd;
sasc->notifier.num_subdevs = size;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
@@ -1537,9 +1541,7 @@ static int soc_of_bind(struct soc_camera_host *ici,
sasc->notifier.subdevs = &info->subdev;
sasc->notifier.num_subdevs = 1;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 939da6da7644..7e9ed9c7b3e1 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -723,7 +723,7 @@ static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct bdisp_ctx *ctx = fh_to_ctx(fh);
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_pix_format *pix;
struct bdisp_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame)) {
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
index e6f247a983c7..a7e5eed17ada 100644
--- a/drivers/media/platform/sti/hva/hva-h264.c
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -999,7 +999,6 @@ static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
{
struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
- struct hva_buffer *tmp_frame;
u32 stuffing_bytes = 0;
int ret = 0;
@@ -1023,9 +1022,7 @@ static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
&stream->bytesused);
/* switch reference & reconstructed frame */
- tmp_frame = ctx->ref_frame;
- ctx->ref_frame = ctx->rec_frame;
- ctx->rec_frame = tmp_frame;
+ swap(ctx->ref_frame, ctx->rec_frame);
return 0;
err:
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 35ba6f211b79..ac4c450a6c7d 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1495,6 +1495,12 @@ static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
return 0;
}
+static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
+ .bound = dcmi_graph_notify_bound,
+ .unbind = dcmi_graph_notify_unbind,
+ .complete = dcmi_graph_notify_complete,
+};
+
static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
{
struct device_node *ep = NULL;
@@ -1542,9 +1548,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
dcmi->notifier.subdevs = subdevs;
dcmi->notifier.num_subdevs = 1;
- dcmi->notifier.bound = dcmi_graph_notify_bound;
- dcmi->notifier.unbind = dcmi_graph_notify_unbind;
- dcmi->notifier.complete = dcmi_graph_notify_complete;
+ dcmi->notifier.ops = &dcmi_graph_notify_ops;
ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
diff --git a/drivers/media/platform/tegra-cec/Makefile b/drivers/media/platform/tegra-cec/Makefile
new file mode 100644
index 000000000000..f3d81127589f
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra_cec.o
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
new file mode 100644
index 000000000000..807c94c70049
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -0,0 +1,495 @@
+/*
+ * Tegra CEC implementation
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/clk/tegra.h>
+
+#include <media/cec-notifier.h>
+
+#include "tegra_cec.h"
+
+#define TEGRA_CEC_NAME "tegra-cec"
+
+struct tegra_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *cec_base;
+ struct cec_notifier *notifier;
+ int tegra_cec_irq;
+ bool rx_done;
+ bool tx_done;
+ int tx_status;
+ u8 rx_buf[CEC_MAX_MSG_SIZE];
+ u8 rx_buf_cnt;
+ u32 tx_buf[CEC_MAX_MSG_SIZE];
+ u8 tx_buf_cur;
+ u8 tx_buf_cnt;
+};
+
+static inline u32 cec_read(struct tegra_cec *cec, u32 reg)
+{
+ return readl(cec->cec_base + reg);
+}
+
+static inline void cec_write(struct tegra_cec *cec, u32 reg, u32 val)
+{
+ writel(val, cec->cec_base + reg);
+}
+
+static void tegra_cec_error_recovery(struct tegra_cec *cec)
+{
+ u32 hw_ctrl;
+
+ hw_ctrl = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, hw_ctrl);
+}
+
+static irqreturn_t tegra_cec_irq_thread_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+
+ if (cec->tx_done) {
+ cec_transmit_attempt_done(cec->adap, cec->tx_status);
+ cec->tx_done = false;
+ }
+ if (cec->rx_done) {
+ struct cec_msg msg = {};
+
+ msg.len = cec->rx_buf_cnt;
+ memcpy(msg.msg, cec->rx_buf, msg.len);
+ cec_received_msg(cec->adap, &msg);
+ cec->rx_done = false;
+ cec->rx_buf_cnt = 0;
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+ u32 status, mask;
+
+ status = cec_read(cec, TEGRA_CEC_INT_STAT);
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+
+ status &= mask;
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN) {
+ dev_err(dev, "TX underrun, interrupt timing issue!\n");
+
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if ((status & TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED) ||
+ (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)) {
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ if (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)
+ cec->tx_status = CEC_TX_STATUS_LOW_DRIVE;
+ else
+ cec->tx_status = CEC_TX_STATUS_ARB_LOST;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED);
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) {
+ tegra_cec_error_recovery(cec);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ } else {
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_OK;
+ }
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD)
+ dev_warn(dev, "TX NAKed on the fly!\n");
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY) {
+ if (cec->tx_buf_cur == cec->tx_buf_cnt) {
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+ } else {
+ cec_write(cec, TEGRA_CEC_TX_REGISTER,
+ cec->tx_buf[cec->tx_buf_cur++]);
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY);
+ }
+ }
+
+ if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
+ TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
+ TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED));
+ } else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
+ u32 v;
+
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_RX_REGISTER_FULL);
+ v = cec_read(cec, TEGRA_CEC_RX_REGISTER);
+ if (cec->rx_buf_cnt < CEC_MAX_MSG_SIZE)
+ cec->rx_buf[cec->rx_buf_cnt++] = v & 0xff;
+ if (v & TEGRA_CEC_RX_REGISTER_EOM) {
+ cec->rx_done = true;
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+
+ cec->rx_buf_cnt = 0;
+ cec->tx_buf_cnt = 0;
+ cec->tx_buf_cur = 0;
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_MASK, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_SW_CONTROL, 0);
+
+ if (!enable)
+ return 0;
+
+ cec_write(cec, TEGRA_CEC_INPUT_FILTER, (1U << 31) | 0x20);
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_0,
+ (0x7a << TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT) |
+ (0x6d << TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT) |
+ (0x93 << TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT) |
+ (0x86 << TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_1,
+ (0x35 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT) |
+ (0x21 << TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT) |
+ (0x56 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT) |
+ (0x40 << TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_2,
+ (0x50 << TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_0,
+ (0x74 << TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT) |
+ (0x8d << TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT) |
+ (0x08 << TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT) |
+ (0x71 << TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_1,
+ (0x2f << TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT) |
+ (0x13 << TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT) |
+ (0x4b << TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT) |
+ (0x21 << TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_2,
+ (0x07 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT) |
+ (0x05 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT) |
+ (0x03 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN |
+ TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD |
+ TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED |
+ TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
+ TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
+ TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN);
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
+ return 0;
+}
+
+static int tegra_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 state = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ state &= ~TEGRA_CEC_HWCTRL_RX_LADDR_MASK;
+ else
+ state |= TEGRA_CEC_HWCTRL_RX_LADDR((1 << logical_addr));
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, state);
+ return 0;
+}
+
+static int tegra_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 reg = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (enable)
+ reg |= TEGRA_CEC_HWCTRL_RX_SNOOP;
+ else
+ reg &= ~TEGRA_CEC_HWCTRL_RX_SNOOP;
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, reg);
+ return 0;
+}
+
+static int tegra_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time_ms, struct cec_msg *msg)
+{
+ bool retry_xfer = signal_free_time_ms == CEC_SIGNAL_FREE_TIME_RETRY;
+ struct tegra_cec *cec = adap->priv;
+ unsigned int i;
+ u32 mode = 0;
+ u32 mask;
+
+ if (cec_msg_is_broadcast(msg))
+ mode = TEGRA_CEC_TX_REG_BCAST;
+
+ cec->tx_buf_cur = 0;
+ cec->tx_buf_cnt = msg->len;
+
+ for (i = 0; i < msg->len; i++) {
+ cec->tx_buf[i] = mode | msg->msg[i];
+ if (i == 0)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_START_BIT;
+ if (i == msg->len - 1)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_EOM;
+ if (i == 0 && retry_xfer)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_RETRY;
+ }
+
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask | TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ return 0;
+}
+
+static const struct cec_adap_ops tegra_cec_ops = {
+ .adap_enable = tegra_cec_adap_enable,
+ .adap_log_addr = tegra_cec_adap_log_addr,
+ .adap_transmit = tegra_cec_adap_transmit,
+ .adap_monitor_all_enable = tegra_cec_adap_monitor_all_enable,
+};
+
+static int tegra_cec_probe(struct platform_device *pdev)
+{
+ struct platform_device *hdmi_dev;
+ struct device_node *np;
+ struct tegra_cec *cec;
+ struct resource *res;
+ int ret = 0;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
+
+ if (!cec)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to allocate resources for device\n");
+ return -EBUSY;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev,
+ "Unable to request mem region for device\n");
+ return -EBUSY;
+ }
+
+ cec->tegra_cec_irq = platform_get_irq(pdev, 0);
+
+ if (cec->tegra_cec_irq <= 0)
+ return -EBUSY;
+
+ cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+
+ if (!cec->cec_base) {
+ dev_err(&pdev->dev, "Unable to grab IOs for device\n");
+ return -EBUSY;
+ }
+
+ cec->clk = devm_clk_get(&pdev->dev, "cec");
+
+ if (IS_ERR_OR_NULL(cec->clk)) {
+ dev_err(&pdev->dev, "Can't get clock for CEC\n");
+ return -ENOENT;
+ }
+
+ clk_prepare_enable(cec->clk);
+
+ /* set context info. */
+ cec->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, cec);
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->tegra_cec_irq,
+ tegra_cec_irq_handler, tegra_cec_irq_thread_handler,
+ 0, "cec_irq", &pdev->dev);
+
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to request interrupt for device\n");
+ goto clk_error;
+ }
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto clk_error;
+ }
+
+ cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME,
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap)) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Couldn't create cec adapter\n");
+ goto cec_error;
+ }
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register device\n");
+ goto cec_error;
+ }
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ return 0;
+
+cec_error:
+ if (cec->notifier)
+ cec_notifier_put(cec->notifier);
+ cec_delete_adapter(cec->adap);
+clk_error:
+ clk_disable_unprepare(cec->clk);
+ return ret;
+}
+
+static int tegra_cec_remove(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_cec_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ dev_notice(&pdev->dev, "suspended\n");
+ return 0;
+}
+
+static int tegra_cec_resume(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ dev_notice(&pdev->dev, "Resuming\n");
+
+ clk_prepare_enable(cec->clk);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id tegra_cec_of_match[] = {
+ { .compatible = "nvidia,tegra114-cec", },
+ { .compatible = "nvidia,tegra124-cec", },
+ { .compatible = "nvidia,tegra210-cec", },
+ {},
+};
+
+static struct platform_driver tegra_cec_driver = {
+ .driver = {
+ .name = TEGRA_CEC_NAME,
+ .of_match_table = of_match_ptr(tegra_cec_of_match),
+ },
+ .probe = tegra_cec_probe,
+ .remove = tegra_cec_remove,
+
+#ifdef CONFIG_PM
+ .suspend = tegra_cec_suspend,
+ .resume = tegra_cec_resume,
+#endif
+};
+
+module_platform_driver(tegra_cec_driver);
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.h b/drivers/media/platform/tegra-cec/tegra_cec.h
new file mode 100644
index 000000000000..e301513daa87
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.h
@@ -0,0 +1,127 @@
+/*
+ * Tegra CEC register definitions
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TEGRA_CEC_H
+#define TEGRA_CEC_H
+
+/* CEC registers */
+#define TEGRA_CEC_SW_CONTROL 0x000
+#define TEGRA_CEC_HW_CONTROL 0x004
+#define TEGRA_CEC_INPUT_FILTER 0x008
+#define TEGRA_CEC_TX_REGISTER 0x010
+#define TEGRA_CEC_RX_REGISTER 0x014
+#define TEGRA_CEC_RX_TIMING_0 0x018
+#define TEGRA_CEC_RX_TIMING_1 0x01c
+#define TEGRA_CEC_RX_TIMING_2 0x020
+#define TEGRA_CEC_TX_TIMING_0 0x024
+#define TEGRA_CEC_TX_TIMING_1 0x028
+#define TEGRA_CEC_TX_TIMING_2 0x02c
+#define TEGRA_CEC_INT_STAT 0x030
+#define TEGRA_CEC_INT_MASK 0x034
+#define TEGRA_CEC_HW_DEBUG_RX 0x038
+#define TEGRA_CEC_HW_DEBUG_TX 0x03c
+
+#define TEGRA_CEC_HWCTRL_RX_LADDR_MASK 0x7fff
+#define TEGRA_CEC_HWCTRL_RX_LADDR(x) \
+ ((x) & TEGRA_CEC_HWCTRL_RX_LADDR_MASK)
+#define TEGRA_CEC_HWCTRL_RX_SNOOP (1 << 15)
+#define TEGRA_CEC_HWCTRL_RX_NAK_MODE (1 << 16)
+#define TEGRA_CEC_HWCTRL_TX_NAK_MODE (1 << 24)
+#define TEGRA_CEC_HWCTRL_FAST_SIM_MODE (1 << 30)
+#define TEGRA_CEC_HWCTRL_TX_RX_MODE (1 << 31)
+
+#define TEGRA_CEC_INPUT_FILTER_MODE (1 << 31)
+#define TEGRA_CEC_INPUT_FILTER_FIFO_LENGTH_SHIFT 0
+
+#define TEGRA_CEC_TX_REG_DATA_SHIFT 0
+#define TEGRA_CEC_TX_REG_EOM (1 << 8)
+#define TEGRA_CEC_TX_REG_BCAST (1 << 12)
+#define TEGRA_CEC_TX_REG_START_BIT (1 << 16)
+#define TEGRA_CEC_TX_REG_RETRY (1 << 17)
+
+#define TEGRA_CEC_RX_REGISTER_SHIFT 0
+#define TEGRA_CEC_RX_REGISTER_EOM (1 << 8)
+#define TEGRA_CEC_RX_REGISTER_ACK (1 << 9)
+
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT 0
+
+#define TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT 8
+#define TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT 16
+#define TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT 8
+#define TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT 16
+#define TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT 0
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT 4
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT 8
+
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_HW_DEBUG_TX_DURATION_COUNT_SHIFT 0
+#define TEGRA_CEC_HW_DEBUG_TX_TXBIT_COUNT_SHIFT 17
+#define TEGRA_CEC_HW_DEBUG_TX_STATE_SHIFT 21
+#define TEGRA_CEC_HW_DEBUG_TX_FORCELOOUT (1 << 25)
+#define TEGRA_CEC_HW_DEBUG_TX_TXDATABIT_SAMPLE_TIMER (1 << 26)
+
+#endif /* TEGRA_CEC_H */
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 42e383a48ffe..8b586c864524 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1522,6 +1522,11 @@ static int cal_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations cal_async_ops = {
+ .bound = cal_async_bound,
+ .complete = cal_async_complete,
+};
+
static int cal_complete_ctx(struct cal_ctx *ctx)
{
struct video_device *vfd;
@@ -1736,8 +1741,7 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
ctx->asd_list[0] = asd;
ctx->notifier.subdevs = ctx->asd_list;
ctx->notifier.num_subdevs = 1;
- ctx->notifier.bound = cal_async_bound;
- ctx->notifier.complete = cal_async_complete;
+ ctx->notifier.ops = &cal_async_ops;
ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
&ctx->notifier);
if (ret) {
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 51c0eee61ca6..fe088a953860 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -267,11 +267,12 @@ static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
PLATFORM_DEVID_AUTO,
&pdata,
sizeof(pdata));
- if (!vimc->subdevs[i]) {
+ if (IS_ERR(vimc->subdevs[i])) {
+ match = ERR_CAST(vimc->subdevs[i]);
while (--i >= 0)
platform_device_unregister(vimc->subdevs[i]);
- return ERR_PTR(-ENOMEM);
+ return match;
}
component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index f0f423c7ca41..a651527d80db 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -189,6 +189,22 @@ struct vivid_fmt vivid_formats[] = {
.buffers = 1,
},
{
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_Y16,
.vdownsampling = { 1 },
.bit_depth = { 16 },
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index ebfdf334d99c..d881cf09876d 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -351,6 +351,11 @@ static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
return -EINVAL;
}
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
struct device_node *node)
{
@@ -548,8 +553,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
xdev->notifier.subdevs = subdevs;
xdev->notifier.num_subdevs = num_subdevs;
- xdev->notifier.bound = xvip_graph_notify_bound;
- xdev->notifier.complete = xvip_graph_notify_complete;
+ xdev->notifier.ops = &xvip_graph_notify_ops;
ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
if (ret < 0) {
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 6888b7db449d..7575e5370a49 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -281,9 +281,9 @@ static bool cadet_has_rds_data(struct cadet *dev)
}
-static void cadet_handler(unsigned long data)
+static void cadet_handler(struct timer_list *t)
{
- struct cadet *dev = (void *)data;
+ struct cadet *dev = from_timer(dev, t, readtimer);
/* Service the RDS fifo */
if (mutex_trylock(&dev->lock)) {
@@ -309,7 +309,6 @@ static void cadet_handler(unsigned long data)
/*
* Clean up and exit
*/
- setup_timer(&dev->readtimer, cadet_handler, data);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
@@ -318,7 +317,7 @@ static void cadet_start_rds(struct cadet *dev)
{
dev->rdsstat = 1;
outb(0x80, dev->io); /* Select RDS fifo */
- setup_timer(&dev->readtimer, cadet_handler, (unsigned long)dev);
+ timer_setup(&dev->readtimer, cadet_handler, 0);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index 3c0a22a54113..70a2c86774ce 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -254,7 +254,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct raremono_device *radio = video_drvdata(file);
- u32 freq = f->frequency;
+ u32 freq;
unsigned band;
if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index cd76facc22f5..c89a7d5b8c55 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -749,7 +749,7 @@ static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
/*
* si470x_viddev_template - video device interface
*/
-struct video_device si470x_viddev_template = {
+const struct video_device si470x_viddev_template = {
.fops = &si470x_fops,
.name = DRIVER_NAME,
.release = video_device_release_empty,
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 7d2defd9d399..eb7b834a0ae5 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -209,7 +209,7 @@ struct si470x_device {
/**************************************************************************
* Common Functions
**************************************************************************/
-extern struct video_device si470x_viddev_template;
+extern const struct video_device si470x_viddev_template;
extern const struct v4l2_ctrl_ops si470x_ctrl_ops;
int si470x_get_register(struct si470x_device *radio, int regnr);
int si470x_set_register(struct si470x_device *radio, int regnr);
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index c9e349b169c4..2add222ea346 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -7,11 +7,11 @@ config RADIO_WL128X
depends on VIDEO_V4L2 && RFKILL && TTY && TI_ST
depends on GPIOLIB || COMPILE_TEST
help
- Choose Y here if you have this FM radio chip.
+ Choose Y here if you have this FM radio chip.
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/video4linux/API.html>.
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux 2 API. Information on
+ this API and pointers to "v4l2" programs may be found at
+ <file:Documentation/video4linux/API.html>.
endmenu
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index ab3428bf63fe..800d69c3f80b 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -543,13 +543,13 @@ static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
* interrupt process. Therefore reset stage index to re-enable default
* interrupts. So that next interrupt will be processed as usual.
*/
-static void int_timeout_handler(unsigned long data)
+static void int_timeout_handler(struct timer_list *t)
{
struct fmdev *fmdev;
struct fm_irq *fmirq;
fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
- fmdev = (struct fmdev *)data;
+ fmdev = from_timer(fmdev, t, irq_info.timer);
fmirq = &fmdev->irq_info;
fmirq->retry++;
@@ -1550,8 +1550,7 @@ int fmc_prepare(struct fmdev *fmdev)
atomic_set(&fmdev->tx_cnt, 1);
fmdev->resp_comp = NULL;
- setup_timer(&fmdev->irq_info.timer, &int_timeout_handler,
- (unsigned long)fmdev);
+ timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
/*TODO: add FM_STIC_EVENT later */
fmdev->irq_info.mask = FM_MAL_EVENT;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index d9ce8ff55d0c..afb3456d4e20 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -2,7 +2,6 @@
menuconfig RC_CORE
tristate "Remote Controller support"
depends on INPUT
- default y
---help---
Enable support for Remote Controllers on Linux. This is
needed in order to support several video capture adapters,
@@ -179,6 +178,7 @@ config IR_ENE
config IR_HIX5HD2
tristate "Hisilicon hix5hd2 IR remote control"
depends on RC_CORE
+ depends on OF || COMPILE_TEST
help
Say Y here if you want to use hisilicon hix5hd2 remote control.
To compile this driver as a module, choose M here: the module will be
@@ -286,6 +286,7 @@ config IR_REDRAT3
config IR_SPI
tristate "SPI connected IR LED"
depends on SPI && LIRC
+ depends on OF || COMPILE_TEST
---help---
Say Y if you want to use an IR LED connected through SPI bus.
@@ -393,6 +394,7 @@ config RC_LOOPBACK
config IR_GPIO_CIR
tristate "GPIO IR remote control"
depends on RC_CORE
+ depends on (OF && GPIOLIB) || COMPILE_TEST
---help---
Say Y if you want to use GPIO based IR Receiver.
@@ -403,6 +405,7 @@ config IR_GPIO_TX
tristate "GPIO IR Bit Banging Transmitter"
depends on RC_CORE
depends on LIRC
+ depends on (OF && GPIOLIB) || COMPILE_TEST
---help---
Say Y if you want to a GPIO based IR transmitter. This is a
bit banging driver.
@@ -415,6 +418,7 @@ config IR_PWM_TX
depends on RC_CORE
depends on LIRC
depends on PWM
+ depends on OF || COMPILE_TEST
---help---
Say Y if you want to use a PWM based IR transmitter. This is
more power efficient than the bit banging gpio driver.
@@ -469,6 +473,16 @@ config IR_SIR
To compile this driver as a module, choose M here: the module will
be called sir-ir.
+config IR_TANGO
+ tristate "Sigma Designs SMP86xx IR decoder"
+ depends on RC_CORE
+ depends on ARCH_TANGO || COMPILE_TEST
+ ---help---
+ Adds support for the HW IR decoder embedded on Sigma Designs
+ Tango-based systems (SMP86xx, SMP87xx).
+ The HW decoder supports NEC, RC-5, RC-6 IR protocols.
+ When compiled as a module, look for tango-ir.
+
config IR_ZX
tristate "ZTE ZX IR remote control"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index ab3d5a135453..10026477a677 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -45,3 +45,4 @@ obj-$(CONFIG_IR_SERIAL) += serial_ir.o
obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_IR_ZX) += zx-irdec.o
+obj-$(CONFIG_IR_TANGO) += tango-ir.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index d0871d60a723..8e82610ffaad 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -198,7 +198,7 @@ static const struct ati_receiver_type type_firefly = {
.default_keymap = RC_MAP_SNAPSTREAM_FIREFLY
};
-static struct usb_device_id ati_remote_table[] = {
+static const struct usb_device_id ati_remote_table[] = {
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_ati
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index af7ba23e16e1..71b8c9bbf6c4 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -670,9 +670,9 @@ exit:
}
/* timer to simulate tx done interrupt */
-static void ene_tx_irqsim(unsigned long data)
+static void ene_tx_irqsim(struct timer_list *t)
{
- struct ene_device *dev = (struct ene_device *)data;
+ struct ene_device *dev = from_timer(dev, t, tx_sim_timer);
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
@@ -1045,8 +1045,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
- setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
- (long unsigned int)dev);
+ timer_setup(&dev->tx_sim_timer, ene_tx_irqsim, 0);
pr_warn("Simulation of TX activated\n");
}
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 7248b3662285..3d99b51384ac 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -14,119 +14,64 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <media/rc-core.h>
-#include <linux/platform_data/media/gpio-ir-recv.h>
-#define GPIO_IR_DRIVER_NAME "gpio-rc-recv"
#define GPIO_IR_DEVICE_NAME "gpio_ir_recv"
struct gpio_rc_dev {
struct rc_dev *rcdev;
- int gpio_nr;
- bool active_low;
+ struct gpio_desc *gpiod;
+ int irq;
};
-#ifdef CONFIG_OF
-/*
- * Translate OpenFirmware node properties into platform_data
- */
-static int gpio_ir_recv_get_devtree_pdata(struct device *dev,
- struct gpio_ir_recv_platform_data *pdata)
-{
- struct device_node *np = dev->of_node;
- enum of_gpio_flags flags;
- int gpio;
-
- gpio = of_get_gpio_flags(np, 0, &flags);
- if (gpio < 0) {
- if (gpio != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio flags (%d)\n", gpio);
- return gpio;
- }
-
- pdata->gpio_nr = gpio;
- pdata->active_low = (flags & OF_GPIO_ACTIVE_LOW);
- /* probe() takes care of map_name == NULL or allowed_protos == 0 */
- pdata->map_name = of_get_property(np, "linux,rc-map-name", NULL);
- pdata->allowed_protos = 0;
-
- return 0;
-}
-
-static const struct of_device_id gpio_ir_recv_of_match[] = {
- { .compatible = "gpio-ir-receiver", },
- { },
-};
-MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
-
-#else /* !CONFIG_OF */
-
-#define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS)
-
-#endif
-
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
+ int val;
struct gpio_rc_dev *gpio_dev = dev_id;
- int gval;
- int rc = 0;
-
- gval = gpio_get_value(gpio_dev->gpio_nr);
-
- if (gval < 0)
- goto err_get_value;
-
- if (gpio_dev->active_low)
- gval = !gval;
- rc = ir_raw_event_store_edge(gpio_dev->rcdev, gval == 1);
- if (rc < 0)
- goto err_get_value;
+ val = gpiod_get_value(gpio_dev->gpiod);
+ if (val >= 0)
+ ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
-err_get_value:
return IRQ_HANDLED;
}
static int gpio_ir_recv_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
- const struct gpio_ir_recv_platform_data *pdata =
- pdev->dev.platform_data;
int rc;
- if (pdev->dev.of_node) {
- struct gpio_ir_recv_platform_data *dtpdata =
- devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
- if (!dtpdata)
- return -ENOMEM;
- rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata);
- if (rc)
- return rc;
- pdata = dtpdata;
- }
-
- if (!pdata)
- return -EINVAL;
+ if (!np)
+ return -ENODEV;
- if (pdata->gpio_nr < 0)
- return -EINVAL;
-
- gpio_dev = kzalloc(sizeof(struct gpio_rc_dev), GFP_KERNEL);
+ gpio_dev = devm_kzalloc(dev, sizeof(*gpio_dev), GFP_KERNEL);
if (!gpio_dev)
return -ENOMEM;
- rcdev = rc_allocate_device(RC_DRIVER_IR_RAW);
- if (!rcdev) {
- rc = -ENOMEM;
- goto err_allocate_device;
+ gpio_dev->gpiod = devm_gpiod_get(dev, NULL, GPIOD_IN);
+ if (IS_ERR(gpio_dev->gpiod)) {
+ rc = PTR_ERR(gpio_dev->gpiod);
+ /* Just try again if this happens */
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio (%d)\n", rc);
+ return rc;
}
+ gpio_dev->irq = gpiod_to_irq(gpio_dev->gpiod);
+ if (gpio_dev->irq < 0)
+ return gpio_dev->irq;
+
+ rcdev = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
rcdev->priv = gpio_dev;
rcdev->device_name = GPIO_IR_DEVICE_NAME;
@@ -135,92 +80,52 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->input_id.vendor = 0x0001;
rcdev->input_id.product = 0x0001;
rcdev->input_id.version = 0x0100;
- rcdev->dev.parent = &pdev->dev;
- rcdev->driver_name = GPIO_IR_DRIVER_NAME;
+ rcdev->dev.parent = dev;
+ rcdev->driver_name = KBUILD_MODNAME;
rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
- if (pdata->allowed_protos)
- rcdev->allowed_protocols = pdata->allowed_protos;
- else
- rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
+ rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ rcdev->map_name = of_get_property(np, "linux,rc-map-name", NULL);
+ if (!rcdev->map_name)
+ rcdev->map_name = RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
- gpio_dev->gpio_nr = pdata->gpio_nr;
- gpio_dev->active_low = pdata->active_low;
-
- rc = gpio_request(pdata->gpio_nr, "gpio-ir-recv");
- if (rc < 0)
- goto err_gpio_request;
- rc = gpio_direction_input(pdata->gpio_nr);
- if (rc < 0)
- goto err_gpio_direction_input;
- rc = rc_register_device(rcdev);
+ rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {
- dev_err(&pdev->dev, "failed to register rc device\n");
- goto err_register_rc_device;
+ dev_err(dev, "failed to register rc device (%d)\n", rc);
+ return rc;
}
platform_set_drvdata(pdev, gpio_dev);
- rc = request_any_context_irq(gpio_to_irq(pdata->gpio_nr),
- gpio_ir_recv_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "gpio-ir-recv-irq", gpio_dev);
- if (rc < 0)
- goto err_request_irq;
-
- return 0;
-
-err_request_irq:
- rc_unregister_device(rcdev);
- rcdev = NULL;
-err_register_rc_device:
-err_gpio_direction_input:
- gpio_free(pdata->gpio_nr);
-err_gpio_request:
- rc_free_device(rcdev);
-err_allocate_device:
- kfree(gpio_dev);
- return rc;
-}
-
-static int gpio_ir_recv_remove(struct platform_device *pdev)
-{
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
-
- free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev);
- rc_unregister_device(gpio_dev->rcdev);
- gpio_free(gpio_dev->gpio_nr);
- kfree(gpio_dev);
- return 0;
+ return devm_request_irq(dev, gpio_dev->irq, gpio_ir_recv_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "gpio-ir-recv-irq", gpio_dev);
}
#ifdef CONFIG_PM
static int gpio_ir_recv_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- enable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
+ enable_irq_wake(gpio_dev->irq);
else
- disable_irq(gpio_to_irq(gpio_dev->gpio_nr));
+ disable_irq(gpio_dev->irq);
return 0;
}
static int gpio_ir_recv_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- disable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
+ disable_irq_wake(gpio_dev->irq);
else
- enable_irq(gpio_to_irq(gpio_dev->gpio_nr));
+ enable_irq(gpio_dev->irq);
return 0;
}
@@ -231,11 +136,16 @@ static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
};
#endif
+static const struct of_device_id gpio_ir_recv_of_match[] = {
+ { .compatible = "gpio-ir-receiver", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
+
static struct platform_driver gpio_ir_recv_driver = {
.probe = gpio_ir_recv_probe,
- .remove = gpio_ir_recv_remove,
.driver = {
- .name = GPIO_IR_DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(gpio_ir_recv_of_match),
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index a5ea86be8f44..f563ddd7f739 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -137,9 +137,9 @@ static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
dev_err(ir->dev, "submit urb failed: %d", ret);
}
-static void igorplugusb_timer(unsigned long data)
+static void igorplugusb_timer(struct timer_list *t)
{
- struct igorplugusb *ir = (struct igorplugusb *)data;
+ struct igorplugusb *ir = from_timer(ir, t, timer);
igorplugusb_cmd(ir, GET_INFRACODE);
}
@@ -174,7 +174,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
ir->dev = &intf->dev;
- setup_timer(&ir->timer, igorplugusb_timer, (unsigned long)ir);
+ timer_setup(&ir->timer, igorplugusb_timer, 0);
ir->request.bRequest = GET_INFRACODE;
ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
@@ -245,7 +245,7 @@ static void igorplugusb_disconnect(struct usb_interface *intf)
usb_free_urb(ir->urb);
}
-static struct usb_device_id igorplugusb_table[] = {
+static const struct usb_device_id igorplugusb_table[] = {
/* Igor Plug USB (Atmel's Manufact. ID) */
{ USB_DEVICE(0x03eb, 0x0002) },
/* Fit PC2 Infrared Adapter */
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 03fe080278df..bcbabeeab12a 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -92,10 +92,9 @@ static int img_ir_probe(struct platform_device *pdev)
/* Private driver data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "cannot allocate device data\n");
+ if (!priv)
return -ENOMEM;
- }
+
platform_set_drvdata(pdev, priv);
priv->dev = &pdev->dev;
spin_lock_init(&priv->lock);
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 82fdf4cc0824..f54bc5d23893 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -867,9 +867,9 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
}
/* timer function to end waiting for repeat. */
-static void img_ir_end_timer(unsigned long arg)
+static void img_ir_end_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, hw.end_timer);
spin_lock_irq(&priv->lock);
img_ir_end_repeat(priv);
@@ -881,9 +881,9 @@ static void img_ir_end_timer(unsigned long arg)
* cleared when invalid interrupts were generated due to a quirk in the
* img-ir decoder.
*/
-static void img_ir_suspend_timer(unsigned long arg)
+static void img_ir_suspend_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, hw.suspend_timer);
spin_lock_irq(&priv->lock);
/*
@@ -1055,9 +1055,8 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
img_ir_probe_hw_caps(priv);
/* Set up the end timer */
- setup_timer(&hw->end_timer, img_ir_end_timer, (unsigned long)priv);
- setup_timer(&hw->suspend_timer, img_ir_suspend_timer,
- (unsigned long)priv);
+ timer_setup(&hw->end_timer, img_ir_end_timer, 0);
+ timer_setup(&hw->suspend_timer, img_ir_suspend_timer, 0);
/* Register a clock notifier */
if (!IS_ERR(priv->clk)) {
diff --git a/drivers/media/rc/img-ir/img-ir-raw.c b/drivers/media/rc/img-ir/img-ir-raw.c
index 64714efc1145..6e545680d3b6 100644
--- a/drivers/media/rc/img-ir/img-ir-raw.c
+++ b/drivers/media/rc/img-ir/img-ir-raw.c
@@ -67,9 +67,9 @@ void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status)
* order to be assured of the final space. If there are no edges for a certain
* time we use this timer to emit a final sample to satisfy them.
*/
-static void img_ir_echo_timer(unsigned long arg)
+static void img_ir_echo_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, raw.timer);
spin_lock_irq(&priv->lock);
@@ -107,7 +107,7 @@ int img_ir_probe_raw(struct img_ir_priv *priv)
int error;
/* Set up the echo timer */
- setup_timer(&raw->timer, img_ir_echo_timer, (unsigned long)priv);
+ timer_setup(&raw->timer, img_ir_echo_timer, 0);
/* Allocate raw decoder */
raw->rdev = rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 7b3f31cc63d2..b25b35b3f6da 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -346,7 +346,7 @@ static const struct imon_usb_dev_descr imon_ir_raw = {
* devices use the SoundGraph vendor ID (0x15c2). This driver only supports
* the ffdc and later devices, which do onboard decoding.
*/
-static struct usb_device_id imon_usb_id_table[] = {
+static const struct usb_device_id imon_usb_id_table[] = {
/*
* Several devices with this same device ID, all use iMON_PAD.inf
* SoundGraph iMON PAD (IR & VFD)
@@ -602,8 +602,7 @@ static int send_packet(struct imon_context *ictx)
ictx->tx_urb->actual_length = 0;
} else {
/* fill request into kmalloc'ed space: */
- control_req = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
+ control_req = kmalloc(sizeof(*control_req), GFP_KERNEL);
if (control_req == NULL)
return -ENOMEM;
@@ -943,7 +942,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
int seq;
int retval = 0;
struct imon_context *ictx;
- const unsigned char vfd_packet6[] = {
+ static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
ictx = file->private_data;
@@ -1091,9 +1090,9 @@ static void usb_tx_callback(struct urb *urb)
/**
* report touchscreen input
*/
-static void imon_touch_display_timeout(unsigned long data)
+static void imon_touch_display_timeout(struct timer_list *t)
{
- struct imon_context *ictx = (struct imon_context *)data;
+ struct imon_context *ictx = from_timer(ictx, t, ttimer);
if (ictx->display_type != IMON_DISPLAY_TYPE_VGA)
return;
@@ -2047,8 +2046,8 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
{
struct rc_dev *rdev;
int ret;
- const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x88 };
+ static const unsigned char fp_packet[] = {
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88 };
rdev = rc_allocate_device(ictx->dev_descr->flags & IMON_IR_RAW ?
RC_DRIVER_IR_RAW : RC_DRIVER_SCANCODE);
@@ -2310,11 +2309,10 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
struct usb_host_interface *iface_desc;
int ret = -ENOMEM;
- ictx = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
- if (!ictx) {
- dev_err(dev, "%s: kzalloc failed for context", __func__);
+ ictx = kzalloc(sizeof(*ictx), GFP_KERNEL);
+ if (!ictx)
goto exit;
- }
+
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb)
goto rx_urb_alloc_failed;
@@ -2413,8 +2411,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
mutex_lock(&ictx->lock);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
- setup_timer(&ictx->ttimer, imon_touch_display_timeout,
- (unsigned long)ictx);
+ timer_setup(&ictx->ttimer, imon_touch_display_timeout, 0);
}
ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
@@ -2517,6 +2514,11 @@ static int imon_probe(struct usb_interface *interface,
mutex_lock(&driver_lock);
first_if = usb_ifnum_to_if(usbdev, 0);
+ if (!first_if) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
first_if_ctx = usb_get_intfdata(first_if);
if (ifnum == 0) {
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index d2223c04e9ad..8f2f37412fc5 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,7 +35,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
+ if (!dev->raw->lirc.ldev || !dev->raw->lirc.ldev->buf)
return -EINVAL;
/* Packet start */
@@ -84,8 +84,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
(u64)LIRC_VALUE_MASK);
gap_sample = LIRC_SPACE(lirc->gap_duration);
- lirc_buffer_write(dev->raw->lirc.drv->rbuf,
- (unsigned char *) &gap_sample);
+ lirc_buffer_write(dev->raw->lirc.ldev->buf,
+ (unsigned char *)&gap_sample);
lirc->gap = false;
}
@@ -95,9 +95,9 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
TO_US(ev.duration), TO_STR(ev.pulse));
}
- lirc_buffer_write(dev->raw->lirc.drv->rbuf,
+ lirc_buffer_write(dev->raw->lirc.ldev->buf,
(unsigned char *) &sample);
- wake_up(&dev->raw->lirc.drv->rbuf->wait_poll);
+ wake_up(&dev->raw->lirc.ldev->buf->wait_poll);
return 0;
}
@@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (!dev->max_timeout)
return -ENOTTY;
+ /* Check for multiply overflow */
+ if (val > U32_MAX / 1000)
+ return -EINVAL;
+
tmp = val * 1000;
- if (tmp < dev->min_timeout ||
- tmp > dev->max_timeout)
- return -EINVAL;
+ if (tmp < dev->min_timeout || tmp > dev->max_timeout)
+ return -EINVAL;
if (dev->s_timeout)
ret = dev->s_timeout(dev, tmp);
@@ -343,12 +346,12 @@ static const struct file_operations lirc_fops = {
static int ir_lirc_register(struct rc_dev *dev)
{
- struct lirc_driver *drv;
+ struct lirc_dev *ldev;
int rc = -ENOMEM;
unsigned long features = 0;
- drv = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!drv)
+ ldev = lirc_allocate_device();
+ if (!ldev)
return rc;
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
@@ -380,32 +383,29 @@ static int ir_lirc_register(struct rc_dev *dev)
if (dev->max_timeout)
features |= LIRC_CAN_SET_REC_TIMEOUT;
- snprintf(drv->name, sizeof(drv->name), "ir-lirc-codec (%s)",
+ snprintf(ldev->name, sizeof(ldev->name), "ir-lirc-codec (%s)",
dev->driver_name);
- drv->minor = -1;
- drv->features = features;
- drv->data = &dev->raw->lirc;
- drv->rbuf = NULL;
- drv->code_length = sizeof(struct ir_raw_event) * 8;
- drv->chunk_size = sizeof(int);
- drv->buffer_size = LIRCBUF_SIZE;
- drv->fops = &lirc_fops;
- drv->dev = &dev->dev;
- drv->rdev = dev;
- drv->owner = THIS_MODULE;
-
- drv->minor = lirc_register_driver(drv);
- if (drv->minor < 0) {
- rc = -ENODEV;
+ ldev->features = features;
+ ldev->data = &dev->raw->lirc;
+ ldev->buf = NULL;
+ ldev->code_length = sizeof(struct ir_raw_event) * 8;
+ ldev->chunk_size = sizeof(int);
+ ldev->buffer_size = LIRCBUF_SIZE;
+ ldev->fops = &lirc_fops;
+ ldev->dev.parent = &dev->dev;
+ ldev->rdev = dev;
+ ldev->owner = THIS_MODULE;
+
+ rc = lirc_register_device(ldev);
+ if (rc < 0)
goto out;
- }
- dev->raw->lirc.drv = drv;
+ dev->raw->lirc.ldev = ldev;
dev->raw->lirc.dev = dev;
return 0;
out:
- kfree(drv);
+ lirc_free_device(ldev);
return rc;
}
@@ -413,9 +413,8 @@ static int ir_lirc_unregister(struct rc_dev *dev)
{
struct lirc_codec *lirc = &dev->raw->lirc;
- lirc_unregister_driver(lirc->drv->minor);
- kfree(lirc->drv);
- lirc->drv = NULL;
+ lirc_unregister_device(lirc->ldev);
+ lirc->ldev = NULL;
return 0;
}
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 7c572a643656..69d6264d54e6 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -115,9 +115,9 @@ static unsigned char kbd_keycodes[256] = {
KEY_RESERVED
};
-static void mce_kbd_rx_timeout(unsigned long data)
+static void mce_kbd_rx_timeout(struct timer_list *t)
{
- struct mce_kbd_dec *mce_kbd = (struct mce_kbd_dec *)data;
+ struct mce_kbd_dec *mce_kbd = from_timer(mce_kbd, t, rx_timeout);
int i;
unsigned char maskcode;
@@ -389,8 +389,7 @@ static int ir_mce_kbd_register(struct rc_dev *dev)
set_bit(EV_MSC, idev->evbit);
set_bit(MSC_SCAN, idev->mscbit);
- setup_timer(&mce_kbd->rx_timeout, mce_kbd_rx_timeout,
- (unsigned long)mce_kbd);
+ timer_setup(&mce_kbd->rx_timeout, mce_kbd_rx_timeout, 0);
input_set_drvdata(idev, mce_kbd);
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 817c18f2ddd1..a95d09acc22a 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
data->state = STATE_BIT_PULSE;
return 0;
} else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
- rc_repeat(dev);
- IR_dprintk(1, "Repeat last key\n");
data->state = STATE_TRAILER_PULSE;
return 0;
}
@@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
break;
- address = bitrev8((data->bits >> 24) & 0xff);
- not_address = bitrev8((data->bits >> 16) & 0xff);
- command = bitrev8((data->bits >> 8) & 0xff);
- not_command = bitrev8((data->bits >> 0) & 0xff);
+ if (data->count == NEC_NBITS) {
+ address = bitrev8((data->bits >> 24) & 0xff);
+ not_address = bitrev8((data->bits >> 16) & 0xff);
+ command = bitrev8((data->bits >> 8) & 0xff);
+ not_command = bitrev8((data->bits >> 0) & 0xff);
+
+ scancode = ir_nec_bytes_to_scancode(address,
+ not_address,
+ command,
+ not_command,
+ &rc_proto);
- scancode = ir_nec_bytes_to_scancode(address, not_address,
- command, not_command,
- &rc_proto);
+ if (data->is_nec_x)
+ data->necx_repeat = true;
- if (data->is_nec_x)
- data->necx_repeat = true;
+ rc_keydown(dev, rc_proto, scancode, 0);
+ } else {
+ rc_repeat(dev);
+ }
- rc_keydown(dev, rc_proto, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 2d0b26bf2051..50b319355edf 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-alink-dtu-m.o \
rc-anysee.o \
rc-apac-viewcomp.o \
+ rc-astrometa-t2hybrid.o \
rc-asus-pc39.o \
rc-asus-ps3-100.o \
rc-ati-tv-wonder-hd-600.o \
@@ -48,6 +49,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-geekbox.o \
rc-genius-tvgo-a11mce.o \
rc-gotview7135.o \
+ rc-hisi-poplar.o \
+ rc-hisi-tv-demo.o \
rc-imon-mce.o \
rc-imon-pad.o \
rc-iodata-bctv7e.o \
@@ -89,6 +92,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-reddo.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
+ rc-tango.o \
rc-tbs-nec.o \
rc-technisat-ts35.o \
rc-technisat-usb2.o \
diff --git a/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
new file mode 100644
index 000000000000..51690960fec4
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
@@ -0,0 +1,70 @@
+/*
+ * Keytable for the Astrometa T2hybrid remote controller
+ *
+ * Copyright (C) 2017 Oleh Kravchenko <oleg@kaa.org.ua>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table t2hybrid[] = {
+ { 0x4d, KEY_POWER2 },
+ { 0x54, KEY_VIDEO }, /* Source */
+ { 0x16, KEY_MUTE },
+
+ { 0x4c, KEY_RECORD },
+ { 0x05, KEY_CHANNELUP },
+ { 0x0c, KEY_TIME}, /* Timeshift */
+
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x40, KEY_ZOOM }, /* Fullscreen */
+ { 0x1e, KEY_VOLUMEUP },
+
+ { 0x12, KEY_0 },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x1c, KEY_AGAIN }, /* Recall */
+
+ { 0x09, KEY_1 },
+ { 0x1d, KEY_2 },
+ { 0x1f, KEY_3 },
+
+ { 0x0d, KEY_4 },
+ { 0x19, KEY_5 },
+ { 0x1b, KEY_6 },
+
+ { 0x11, KEY_7 },
+ { 0x15, KEY_8 },
+ { 0x17, KEY_9 },
+};
+
+static struct rc_map_list t2hybrid_map = {
+ .map = {
+ .scan = t2hybrid,
+ .size = ARRAY_SIZE(t2hybrid),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_ASTROMETA_T2HYBRID,
+ }
+};
+
+static int __init init_rc_map_t2hybrid(void)
+{
+ return rc_map_register(&t2hybrid_map);
+}
+
+static void __exit exit_rc_map_t2hybrid(void)
+{
+ rc_map_unregister(&t2hybrid_map);
+}
+
+module_init(init_rc_map_t2hybrid)
+module_exit(exit_rc_map_t2hybrid)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index 9882e2cde975..6d5a73b7ccec 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -43,7 +43,8 @@ static struct rc_map_table avermedia_m135a[] = {
{ 0x0213, KEY_RIGHT }, /* -> or L */
{ 0x0212, KEY_LEFT }, /* <- or R */
- { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
+ { 0x0215, KEY_MENU },
+ { 0x0217, KEY_CAMERA }, /* Capturar Imagem or Snapshot */
{ 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
{ 0x0303, KEY_CHANNELUP },
diff --git a/drivers/media/rc/keymaps/rc-hisi-poplar.c b/drivers/media/rc/keymaps/rc-hisi-poplar.c
new file mode 100644
index 000000000000..78728bc7f63a
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-hisi-poplar.c
@@ -0,0 +1,69 @@
+/*
+ * Keytable for remote controller of HiSilicon poplar board.
+ *
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table hisi_poplar_keymap[] = {
+ { 0x0000b292, KEY_1},
+ { 0x0000b293, KEY_2},
+ { 0x0000b2cc, KEY_3},
+ { 0x0000b28e, KEY_4},
+ { 0x0000b28f, KEY_5},
+ { 0x0000b2c8, KEY_6},
+ { 0x0000b28a, KEY_7},
+ { 0x0000b28b, KEY_8},
+ { 0x0000b2c4, KEY_9},
+ { 0x0000b287, KEY_0},
+ { 0x0000b282, KEY_HOMEPAGE},
+ { 0x0000b2ca, KEY_UP},
+ { 0x0000b299, KEY_LEFT},
+ { 0x0000b2c1, KEY_RIGHT},
+ { 0x0000b2d2, KEY_DOWN},
+ { 0x0000b2c5, KEY_DELETE},
+ { 0x0000b29c, KEY_MUTE},
+ { 0x0000b281, KEY_VOLUMEDOWN},
+ { 0x0000b280, KEY_VOLUMEUP},
+ { 0x0000b2dc, KEY_POWER},
+ { 0x0000b29a, KEY_MENU},
+ { 0x0000b28d, KEY_SETUP},
+ { 0x0000b2c5, KEY_BACK},
+ { 0x0000b295, KEY_PLAYPAUSE},
+ { 0x0000b2ce, KEY_ENTER},
+ { 0x0000b285, KEY_CHANNELUP},
+ { 0x0000b286, KEY_CHANNELDOWN},
+ { 0x0000b2da, KEY_NUMERIC_STAR},
+ { 0x0000b2d0, KEY_NUMERIC_POUND},
+};
+
+static struct rc_map_list hisi_poplar_map = {
+ .map = {
+ .scan = hisi_poplar_keymap,
+ .size = ARRAY_SIZE(hisi_poplar_keymap),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_HISI_POPLAR,
+ }
+};
+
+static int __init init_rc_map_hisi_poplar(void)
+{
+ return rc_map_register(&hisi_poplar_map);
+}
+
+static void __exit exit_rc_map_hisi_poplar(void)
+{
+ rc_map_unregister(&hisi_poplar_map);
+}
+
+module_init(init_rc_map_hisi_poplar)
+module_exit(exit_rc_map_hisi_poplar)
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/keymaps/rc-hisi-tv-demo.c b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
new file mode 100644
index 000000000000..4816e3a4a18d
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
@@ -0,0 +1,81 @@
+/*
+ * Keytable for remote controller of HiSilicon tv demo board.
+ *
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table hisi_tv_demo_keymap[] = {
+ { 0x00000092, KEY_1},
+ { 0x00000093, KEY_2},
+ { 0x000000cc, KEY_3},
+ { 0x0000009f, KEY_4},
+ { 0x0000008e, KEY_5},
+ { 0x0000008f, KEY_6},
+ { 0x000000c8, KEY_7},
+ { 0x00000094, KEY_8},
+ { 0x0000008a, KEY_9},
+ { 0x0000008b, KEY_0},
+ { 0x000000ce, KEY_ENTER},
+ { 0x000000ca, KEY_UP},
+ { 0x00000099, KEY_LEFT},
+ { 0x00000084, KEY_PAGEUP},
+ { 0x000000c1, KEY_RIGHT},
+ { 0x000000d2, KEY_DOWN},
+ { 0x00000089, KEY_PAGEDOWN},
+ { 0x000000d1, KEY_MUTE},
+ { 0x00000098, KEY_VOLUMEDOWN},
+ { 0x00000090, KEY_VOLUMEUP},
+ { 0x0000009c, KEY_POWER},
+ { 0x000000d6, KEY_STOP},
+ { 0x00000097, KEY_MENU},
+ { 0x000000cb, KEY_BACK},
+ { 0x000000da, KEY_PLAYPAUSE},
+ { 0x00000080, KEY_INFO},
+ { 0x000000c3, KEY_REWIND},
+ { 0x00000087, KEY_HOMEPAGE},
+ { 0x000000d0, KEY_FASTFORWARD},
+ { 0x000000c4, KEY_SOUND},
+ { 0x00000082, BTN_1},
+ { 0x000000c7, BTN_2},
+ { 0x00000086, KEY_PROGRAM},
+ { 0x000000d9, KEY_SUBTITLE},
+ { 0x00000085, KEY_ZOOM},
+ { 0x0000009b, KEY_RED},
+ { 0x0000009a, KEY_GREEN},
+ { 0x000000c0, KEY_YELLOW},
+ { 0x000000c2, KEY_BLUE},
+ { 0x0000009d, KEY_CHANNELDOWN},
+ { 0x000000cf, KEY_CHANNELUP},
+};
+
+static struct rc_map_list hisi_tv_demo_map = {
+ .map = {
+ .scan = hisi_tv_demo_keymap,
+ .size = ARRAY_SIZE(hisi_tv_demo_keymap),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_HISI_TV_DEMO,
+ }
+};
+
+static int __init init_rc_map_hisi_tv_demo(void)
+{
+ return rc_map_register(&hisi_tv_demo_map);
+}
+
+static void __exit exit_rc_map_hisi_tv_demo(void)
+{
+ rc_map_unregister(&hisi_tv_demo_map);
+}
+
+module_init(init_rc_map_hisi_tv_demo)
+module_exit(exit_rc_map_hisi_tv_demo)
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/keymaps/rc-tango.c b/drivers/media/rc/keymaps/rc-tango.c
new file mode 100644
index 000000000000..1c6e8875d46f
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-tango.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 Sigma Designs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table tango_table[] = {
+ { 0x4cb4a, KEY_POWER },
+ { 0x4cb48, KEY_FILE },
+ { 0x4cb0f, KEY_SETUP },
+ { 0x4cb4d, KEY_SUSPEND },
+ { 0x4cb4e, KEY_VOLUMEUP },
+ { 0x4cb44, KEY_EJECTCD },
+ { 0x4cb13, KEY_TV },
+ { 0x4cb51, KEY_MUTE },
+ { 0x4cb52, KEY_VOLUMEDOWN },
+
+ { 0x4cb41, KEY_1 },
+ { 0x4cb03, KEY_2 },
+ { 0x4cb42, KEY_3 },
+ { 0x4cb45, KEY_4 },
+ { 0x4cb07, KEY_5 },
+ { 0x4cb46, KEY_6 },
+ { 0x4cb55, KEY_7 },
+ { 0x4cb17, KEY_8 },
+ { 0x4cb56, KEY_9 },
+ { 0x4cb1b, KEY_0 },
+ { 0x4cb59, KEY_DELETE },
+ { 0x4cb5a, KEY_CAPSLOCK },
+
+ { 0x4cb47, KEY_BACK },
+ { 0x4cb05, KEY_SWITCHVIDEOMODE },
+ { 0x4cb06, KEY_UP },
+ { 0x4cb43, KEY_LEFT },
+ { 0x4cb01, KEY_RIGHT },
+ { 0x4cb0a, KEY_DOWN },
+ { 0x4cb02, KEY_ENTER },
+ { 0x4cb4b, KEY_INFO },
+ { 0x4cb09, KEY_HOME },
+
+ { 0x4cb53, KEY_MENU },
+ { 0x4cb12, KEY_PREVIOUS },
+ { 0x4cb50, KEY_PLAY },
+ { 0x4cb11, KEY_NEXT },
+ { 0x4cb4f, KEY_TITLE },
+ { 0x4cb0e, KEY_REWIND },
+ { 0x4cb4c, KEY_STOP },
+ { 0x4cb0d, KEY_FORWARD },
+ { 0x4cb57, KEY_MEDIA_REPEAT },
+ { 0x4cb16, KEY_ANGLE },
+ { 0x4cb54, KEY_PAUSE },
+ { 0x4cb15, KEY_SLOW },
+ { 0x4cb5b, KEY_TIME },
+ { 0x4cb1a, KEY_AUDIO },
+ { 0x4cb58, KEY_SUBTITLE },
+ { 0x4cb19, KEY_ZOOM },
+
+ { 0x4cb5f, KEY_RED },
+ { 0x4cb1e, KEY_GREEN },
+ { 0x4cb5c, KEY_YELLOW },
+ { 0x4cb1d, KEY_BLUE },
+};
+
+static struct rc_map_list tango_map = {
+ .map = {
+ .scan = tango_table,
+ .size = ARRAY_SIZE(tango_table),
+ .rc_proto = RC_PROTO_NECX,
+ .name = RC_MAP_TANGO,
+ }
+};
+
+static int __init init_rc_map_tango(void)
+{
+ return rc_map_register(&tango_map);
+}
+
+static void __exit exit_rc_map_tango(void)
+{
+ rc_map_unregister(&tango_map);
+}
+
+module_init(init_rc_map_tango)
+module_exit(exit_rc_map_tango)
+
+MODULE_AUTHOR("Sigma Designs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/keymaps/rc-twinhan1027.c b/drivers/media/rc/keymaps/rc-twinhan1027.c
index 2275b37c61d2..78bb3143a1a8 100644
--- a/drivers/media/rc/keymaps/rc-twinhan1027.c
+++ b/drivers/media/rc/keymaps/rc-twinhan1027.c
@@ -66,7 +66,7 @@ static struct rc_map_list twinhan_vp1027_map = {
.map = {
.scan = twinhan_vp1027,
.size = ARRAY_SIZE(twinhan_vp1027),
- .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
+ .rc_proto = RC_PROTO_NEC,
.name = RC_MAP_TWINHAN_VP1027_DVBS,
}
};
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 9080e39ea391..e16d1138ca48 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -24,96 +24,91 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
#include <media/rc-core.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
-#define NOPLUG -1
#define LOGHEAD "lirc_dev (%s[%d]): "
static dev_t lirc_base_dev;
-struct irctl {
- struct lirc_driver d;
- int attached;
- int open;
+/* Used to keep track of allocated lirc devices */
+#define LIRC_MAX_DEVICES 256
+static DEFINE_IDA(lirc_ida);
- struct mutex irctl_lock;
- struct lirc_buffer *buf;
- bool buf_internal;
- unsigned int chunk_size;
-
- struct device dev;
- struct cdev cdev;
-};
+/* Only used for sysfs but defined to void otherwise */
+static struct class *lirc_class;
-static DEFINE_MUTEX(lirc_dev_lock);
+static void lirc_release_device(struct device *ld)
+{
+ struct lirc_dev *d = container_of(ld, struct lirc_dev, dev);
-static struct irctl *irctls[MAX_IRCTL_DEVICES];
+ put_device(d->dev.parent);
-/* Only used for sysfs but defined to void otherwise */
-static struct class *lirc_class;
+ if (d->buf_internal) {
+ lirc_buffer_free(d->buf);
+ kfree(d->buf);
+ d->buf = NULL;
+ }
+ kfree(d);
+ module_put(THIS_MODULE);
+}
-static void lirc_release(struct device *ld)
+static int lirc_allocate_buffer(struct lirc_dev *d)
{
- struct irctl *ir = container_of(ld, struct irctl, dev);
+ int err;
- put_device(ir->dev.parent);
+ if (d->buf) {
+ d->buf_internal = false;
+ return 0;
+ }
- if (ir->buf_internal) {
- lirc_buffer_free(ir->buf);
- kfree(ir->buf);
+ d->buf = kmalloc(sizeof(*d->buf), GFP_KERNEL);
+ if (!d->buf)
+ return -ENOMEM;
+
+ err = lirc_buffer_init(d->buf, d->chunk_size, d->buffer_size);
+ if (err) {
+ kfree(d->buf);
+ d->buf = NULL;
+ return err;
}
- mutex_lock(&lirc_dev_lock);
- irctls[ir->d.minor] = NULL;
- mutex_unlock(&lirc_dev_lock);
- kfree(ir);
+ d->buf_internal = true;
+ return 0;
}
-static int lirc_allocate_buffer(struct irctl *ir)
+struct lirc_dev *
+lirc_allocate_device(void)
{
- int err = 0;
- int bytes_in_key;
- unsigned int chunk_size;
- unsigned int buffer_size;
- struct lirc_driver *d = &ir->d;
-
- bytes_in_key = BITS_TO_LONGS(d->code_length) +
- (d->code_length % 8 ? 1 : 0);
- buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key;
- chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key;
-
- if (d->rbuf) {
- ir->buf = d->rbuf;
- ir->buf_internal = false;
- } else {
- ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!ir->buf) {
- err = -ENOMEM;
- goto out;
- }
+ struct lirc_dev *d;
- err = lirc_buffer_init(ir->buf, chunk_size, buffer_size);
- if (err) {
- kfree(ir->buf);
- ir->buf = NULL;
- goto out;
- }
-
- ir->buf_internal = true;
- d->rbuf = ir->buf;
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (d) {
+ mutex_init(&d->mutex);
+ device_initialize(&d->dev);
+ d->dev.class = lirc_class;
+ d->dev.release = lirc_release_device;
+ __module_get(THIS_MODULE);
}
- ir->chunk_size = ir->buf->chunk_size;
-out:
- return err;
+ return d;
}
+EXPORT_SYMBOL(lirc_allocate_device);
-int lirc_register_driver(struct lirc_driver *d)
+void lirc_free_device(struct lirc_dev *d)
+{
+ if (!d)
+ return;
+
+ put_device(&d->dev);
+}
+EXPORT_SYMBOL(lirc_free_device);
+
+int lirc_register_device(struct lirc_dev *d)
{
- struct irctl *ir;
int minor;
int err;
@@ -122,8 +117,8 @@ int lirc_register_driver(struct lirc_driver *d)
return -EBADRQC;
}
- if (!d->dev) {
- pr_err("dev pointer not filled in!\n");
+ if (!d->dev.parent) {
+ pr_err("dev parent pointer not filled in!\n");
return -EINVAL;
}
@@ -132,226 +127,146 @@ int lirc_register_driver(struct lirc_driver *d)
return -EINVAL;
}
- if (d->minor >= MAX_IRCTL_DEVICES) {
- dev_err(d->dev, "minor must be between 0 and %d!\n",
- MAX_IRCTL_DEVICES - 1);
- return -EBADRQC;
+ if (!d->buf && d->chunk_size < 1) {
+ pr_err("chunk_size must be set!\n");
+ return -EINVAL;
}
- if (d->code_length < 1 || d->code_length > (BUFLEN * 8)) {
- dev_err(d->dev, "code length must be less than %d bits\n",
- BUFLEN * 8);
- return -EBADRQC;
+ if (!d->buf && d->buffer_size < 1) {
+ pr_err("buffer_size must be set!\n");
+ return -EINVAL;
}
- if (!d->rbuf && !(d->fops && d->fops->read &&
- d->fops->poll && d->fops->unlocked_ioctl)) {
- dev_err(d->dev, "undefined read, poll, ioctl\n");
+ if (d->code_length < 1 || d->code_length > (BUFLEN * 8)) {
+ dev_err(&d->dev, "code length must be less than %d bits\n",
+ BUFLEN * 8);
return -EBADRQC;
}
- mutex_lock(&lirc_dev_lock);
-
- minor = d->minor;
-
- if (minor < 0) {
- /* find first free slot for driver */
- for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++)
- if (!irctls[minor])
- break;
- if (minor == MAX_IRCTL_DEVICES) {
- dev_err(d->dev, "no free slots for drivers!\n");
- err = -ENOMEM;
- goto out_lock;
- }
- } else if (irctls[minor]) {
- dev_err(d->dev, "minor (%d) just registered!\n", minor);
- err = -EBUSY;
- goto out_lock;
- }
-
- ir = kzalloc(sizeof(struct irctl), GFP_KERNEL);
- if (!ir) {
- err = -ENOMEM;
- goto out_lock;
+ if (!d->buf && !(d->fops && d->fops->read &&
+ d->fops->poll && d->fops->unlocked_ioctl)) {
+ dev_err(&d->dev, "undefined read, poll, ioctl\n");
+ return -EBADRQC;
}
- mutex_init(&ir->irctl_lock);
- irctls[minor] = ir;
- d->minor = minor;
-
/* some safety check 8-) */
- d->name[sizeof(d->name)-1] = '\0';
+ d->name[sizeof(d->name) - 1] = '\0';
if (d->features == 0)
d->features = LIRC_CAN_REC_LIRCCODE;
- ir->d = *d;
-
if (LIRC_CAN_REC(d->features)) {
- err = lirc_allocate_buffer(irctls[minor]);
- if (err) {
- kfree(ir);
- goto out_lock;
- }
- d->rbuf = ir->buf;
+ err = lirc_allocate_buffer(d);
+ if (err)
+ return err;
}
- device_initialize(&ir->dev);
- ir->dev.devt = MKDEV(MAJOR(lirc_base_dev), ir->d.minor);
- ir->dev.class = lirc_class;
- ir->dev.parent = d->dev;
- ir->dev.release = lirc_release;
- dev_set_name(&ir->dev, "lirc%d", ir->d.minor);
+ minor = ida_simple_get(&lirc_ida, 0, LIRC_MAX_DEVICES, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
- cdev_init(&ir->cdev, d->fops);
- ir->cdev.owner = ir->d.owner;
- ir->cdev.kobj.parent = &ir->dev.kobj;
-
- err = cdev_add(&ir->cdev, ir->dev.devt, 1);
- if (err)
- goto out_free_dev;
-
- ir->attached = 1;
-
- err = device_add(&ir->dev);
- if (err)
- goto out_cdev;
-
- mutex_unlock(&lirc_dev_lock);
+ d->minor = minor;
+ d->dev.devt = MKDEV(MAJOR(lirc_base_dev), d->minor);
+ dev_set_name(&d->dev, "lirc%d", d->minor);
- get_device(ir->dev.parent);
+ cdev_init(&d->cdev, d->fops);
+ d->cdev.owner = d->owner;
+ d->attached = true;
- dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n",
- ir->d.name, ir->d.minor);
+ err = cdev_device_add(&d->cdev, &d->dev);
+ if (err) {
+ ida_simple_remove(&lirc_ida, minor);
+ return err;
+ }
- return minor;
+ get_device(d->dev.parent);
-out_cdev:
- cdev_del(&ir->cdev);
-out_free_dev:
- put_device(&ir->dev);
-out_lock:
- mutex_unlock(&lirc_dev_lock);
+ dev_info(&d->dev, "lirc_dev: driver %s registered at minor = %d\n",
+ d->name, d->minor);
- return err;
+ return 0;
}
-EXPORT_SYMBOL(lirc_register_driver);
+EXPORT_SYMBOL(lirc_register_device);
-int lirc_unregister_driver(int minor)
+void lirc_unregister_device(struct lirc_dev *d)
{
- struct irctl *ir;
+ if (!d)
+ return;
- if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
- pr_err("minor (%d) must be between 0 and %d!\n",
- minor, MAX_IRCTL_DEVICES - 1);
- return -EBADRQC;
- }
-
- ir = irctls[minor];
- if (!ir) {
- pr_err("failed to get irctl\n");
- return -ENOENT;
- }
+ dev_dbg(&d->dev, "lirc_dev: driver %s unregistered from minor = %d\n",
+ d->name, d->minor);
- mutex_lock(&lirc_dev_lock);
+ mutex_lock(&d->mutex);
- if (ir->d.minor != minor) {
- dev_err(ir->d.dev, "lirc_dev: minor %d device not registered\n",
- minor);
- mutex_unlock(&lirc_dev_lock);
- return -ENOENT;
+ d->attached = false;
+ if (d->open) {
+ dev_dbg(&d->dev, LOGHEAD "releasing opened driver\n",
+ d->name, d->minor);
+ wake_up_interruptible(&d->buf->wait_poll);
}
- dev_dbg(ir->d.dev, "lirc_dev: driver %s unregistered from minor = %d\n",
- ir->d.name, ir->d.minor);
-
- ir->attached = 0;
- if (ir->open) {
- dev_dbg(ir->d.dev, LOGHEAD "releasing opened driver\n",
- ir->d.name, ir->d.minor);
- wake_up_interruptible(&ir->buf->wait_poll);
- }
+ mutex_unlock(&d->mutex);
- mutex_unlock(&lirc_dev_lock);
-
- device_del(&ir->dev);
- cdev_del(&ir->cdev);
- put_device(&ir->dev);
-
- return 0;
+ cdev_device_del(&d->cdev, &d->dev);
+ ida_simple_remove(&lirc_ida, d->minor);
+ put_device(&d->dev);
}
-EXPORT_SYMBOL(lirc_unregister_driver);
+EXPORT_SYMBOL(lirc_unregister_device);
int lirc_dev_fop_open(struct inode *inode, struct file *file)
{
- struct irctl *ir;
- int retval = 0;
-
- if (iminor(inode) >= MAX_IRCTL_DEVICES) {
- pr_err("open result for %d is -ENODEV\n", iminor(inode));
- return -ENODEV;
- }
-
- if (mutex_lock_interruptible(&lirc_dev_lock))
- return -ERESTARTSYS;
-
- ir = irctls[iminor(inode)];
- mutex_unlock(&lirc_dev_lock);
+ struct lirc_dev *d = container_of(inode->i_cdev, struct lirc_dev, cdev);
+ int retval;
- if (!ir) {
- retval = -ENODEV;
- goto error;
- }
+ dev_dbg(&d->dev, LOGHEAD "open called\n", d->name, d->minor);
- dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor);
+ retval = mutex_lock_interruptible(&d->mutex);
+ if (retval)
+ return retval;
- if (ir->d.minor == NOPLUG) {
+ if (!d->attached) {
retval = -ENODEV;
- goto error;
+ goto out;
}
- if (ir->open) {
+ if (d->open) {
retval = -EBUSY;
- goto error;
+ goto out;
}
- if (ir->d.rdev) {
- retval = rc_open(ir->d.rdev);
+ if (d->rdev) {
+ retval = rc_open(d->rdev);
if (retval)
- goto error;
+ goto out;
}
- if (ir->buf)
- lirc_buffer_clear(ir->buf);
+ if (d->buf)
+ lirc_buffer_clear(d->buf);
- ir->open++;
+ d->open++;
-error:
+ lirc_init_pdata(inode, file);
nonseekable_open(inode, file);
+ mutex_unlock(&d->mutex);
+
+ return 0;
+out:
+ mutex_unlock(&d->mutex);
return retval;
}
EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
- struct irctl *ir = irctls[iminor(inode)];
- int ret;
-
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return -EINVAL;
- }
+ struct lirc_dev *d = file->private_data;
- ret = mutex_lock_killable(&lirc_dev_lock);
- WARN_ON(ret);
+ mutex_lock(&d->mutex);
- rc_close(ir->d.rdev);
+ rc_close(d->rdev);
+ d->open--;
- ir->open--;
- if (!ret)
- mutex_unlock(&lirc_dev_lock);
+ mutex_unlock(&d->mutex);
return 0;
}
@@ -359,29 +274,24 @@ EXPORT_SYMBOL(lirc_dev_fop_close);
unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ struct lirc_dev *d = file->private_data;
unsigned int ret;
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return POLLERR;
- }
-
- if (!ir->attached)
+ if (!d->attached)
return POLLHUP | POLLERR;
- if (ir->buf) {
- poll_wait(file, &ir->buf->wait_poll, wait);
+ if (d->buf) {
+ poll_wait(file, &d->buf->wait_poll, wait);
- if (lirc_buffer_empty(ir->buf))
+ if (lirc_buffer_empty(d->buf))
ret = 0;
else
ret = POLLIN | POLLRDNORM;
- } else
+ } else {
ret = POLLERR;
+ }
- dev_dbg(ir->d.dev, LOGHEAD "poll result = %d\n",
- ir->d.name, ir->d.minor, ret);
+ dev_dbg(&d->dev, LOGHEAD "poll result = %d\n", d->name, d->minor, ret);
return ret;
}
@@ -389,48 +299,44 @@ EXPORT_SYMBOL(lirc_dev_fop_poll);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct lirc_dev *d = file->private_data;
__u32 mode;
- int result = 0;
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ int result;
- if (!ir) {
- pr_err("no irctl found!\n");
- return -ENODEV;
- }
+ dev_dbg(&d->dev, LOGHEAD "ioctl called (0x%x)\n",
+ d->name, d->minor, cmd);
- dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n",
- ir->d.name, ir->d.minor, cmd);
+ result = mutex_lock_interruptible(&d->mutex);
+ if (result)
+ return result;
- if (ir->d.minor == NOPLUG || !ir->attached) {
- dev_err(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n",
- ir->d.name, ir->d.minor);
- return -ENODEV;
+ if (!d->attached) {
+ result = -ENODEV;
+ goto out;
}
- mutex_lock(&ir->irctl_lock);
-
switch (cmd) {
case LIRC_GET_FEATURES:
- result = put_user(ir->d.features, (__u32 __user *)arg);
+ result = put_user(d->features, (__u32 __user *)arg);
break;
case LIRC_GET_REC_MODE:
- if (!LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(d->features)) {
result = -ENOTTY;
break;
}
result = put_user(LIRC_REC2MODE
- (ir->d.features & LIRC_CAN_REC_MASK),
+ (d->features & LIRC_CAN_REC_MASK),
(__u32 __user *)arg);
break;
case LIRC_SET_REC_MODE:
- if (!LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(d->features)) {
result = -ENOTTY;
break;
}
result = get_user(mode, (__u32 __user *)arg);
- if (!result && !(LIRC_MODE2REC(mode) & ir->d.features))
+ if (!result && !(LIRC_MODE2REC(mode) & d->features))
result = -EINVAL;
/*
* FIXME: We should actually set the mode somehow but
@@ -438,32 +344,14 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
break;
case LIRC_GET_LENGTH:
- result = put_user(ir->d.code_length, (__u32 __user *)arg);
- break;
- case LIRC_GET_MIN_TIMEOUT:
- if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
- ir->d.min_timeout == 0) {
- result = -ENOTTY;
- break;
- }
-
- result = put_user(ir->d.min_timeout, (__u32 __user *)arg);
- break;
- case LIRC_GET_MAX_TIMEOUT:
- if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
- ir->d.max_timeout == 0) {
- result = -ENOTTY;
- break;
- }
-
- result = put_user(ir->d.max_timeout, (__u32 __user *)arg);
+ result = put_user(d->code_length, (__u32 __user *)arg);
break;
default:
result = -ENOTTY;
}
- mutex_unlock(&ir->irctl_lock);
-
+out:
+ mutex_unlock(&d->mutex);
return result;
}
EXPORT_SYMBOL(lirc_dev_fop_ioctl);
@@ -473,35 +361,34 @@ ssize_t lirc_dev_fop_read(struct file *file,
size_t length,
loff_t *ppos)
{
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ struct lirc_dev *d = file->private_data;
unsigned char *buf;
- int ret = 0, written = 0;
+ int ret, written = 0;
DECLARE_WAITQUEUE(wait, current);
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return -ENODEV;
- }
-
- if (!LIRC_CAN_REC(ir->d.features))
- return -EINVAL;
-
- dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor);
-
- buf = kzalloc(ir->chunk_size, GFP_KERNEL);
+ buf = kzalloc(d->buf->chunk_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (mutex_lock_interruptible(&ir->irctl_lock)) {
- ret = -ERESTARTSYS;
- goto out_unlocked;
+ dev_dbg(&d->dev, LOGHEAD "read called\n", d->name, d->minor);
+
+ ret = mutex_lock_interruptible(&d->mutex);
+ if (ret) {
+ kfree(buf);
+ return ret;
}
- if (!ir->attached) {
+
+ if (!d->attached) {
ret = -ENODEV;
goto out_locked;
}
- if (length % ir->chunk_size) {
+ if (!LIRC_CAN_REC(d->features)) {
+ ret = -EINVAL;
+ goto out_locked;
+ }
+
+ if (length % d->buf->chunk_size) {
ret = -EINVAL;
goto out_locked;
}
@@ -511,14 +398,14 @@ ssize_t lirc_dev_fop_read(struct file *file,
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
- add_wait_queue(&ir->buf->wait_poll, &wait);
+ add_wait_queue(&d->buf->wait_poll, &wait);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < length && ret == 0) {
- if (lirc_buffer_empty(ir->buf)) {
+ if (lirc_buffer_empty(d->buf)) {
/* According to the read(2) man page, 'written' can be
* returned as less than 'length', instead of blocking
* again, returning -EWOULDBLOCK, or returning
@@ -535,36 +422,36 @@ ssize_t lirc_dev_fop_read(struct file *file,
break;
}
- mutex_unlock(&ir->irctl_lock);
+ mutex_unlock(&d->mutex);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
set_current_state(TASK_RUNNING);
- if (mutex_lock_interruptible(&ir->irctl_lock)) {
- ret = -ERESTARTSYS;
- remove_wait_queue(&ir->buf->wait_poll, &wait);
+ ret = mutex_lock_interruptible(&d->mutex);
+ if (ret) {
+ remove_wait_queue(&d->buf->wait_poll, &wait);
goto out_unlocked;
}
- if (!ir->attached) {
+ if (!d->attached) {
ret = -ENODEV;
goto out_locked;
}
} else {
- lirc_buffer_read(ir->buf, buf);
+ lirc_buffer_read(d->buf, buf);
ret = copy_to_user((void __user *)buffer+written, buf,
- ir->buf->chunk_size);
+ d->buf->chunk_size);
if (!ret)
- written += ir->buf->chunk_size;
+ written += d->buf->chunk_size;
else
ret = -EFAULT;
}
}
- remove_wait_queue(&ir->buf->wait_poll, &wait);
+ remove_wait_queue(&d->buf->wait_poll, &wait);
out_locked:
- mutex_unlock(&ir->irctl_lock);
+ mutex_unlock(&d->mutex);
out_unlocked:
kfree(buf);
@@ -573,9 +460,19 @@ out_unlocked:
}
EXPORT_SYMBOL(lirc_dev_fop_read);
+void lirc_init_pdata(struct inode *inode, struct file *file)
+{
+ struct lirc_dev *d = container_of(inode->i_cdev, struct lirc_dev, cdev);
+
+ file->private_data = d;
+}
+EXPORT_SYMBOL(lirc_init_pdata);
+
void *lirc_get_pdata(struct file *file)
{
- return irctls[iminor(file_inode(file))]->d.data;
+ struct lirc_dev *d = file->private_data;
+
+ return d->data;
}
EXPORT_SYMBOL(lirc_get_pdata);
@@ -590,7 +487,7 @@ static int __init lirc_dev_init(void)
return PTR_ERR(lirc_class);
}
- retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES,
+ retval = alloc_chrdev_region(&lirc_base_dev, 0, LIRC_MAX_DEVICES,
"BaseRemoteCtl");
if (retval) {
class_destroy(lirc_class);
@@ -607,7 +504,7 @@ static int __init lirc_dev_init(void)
static void __exit lirc_dev_exit(void)
{
class_destroy(lirc_class);
- unregister_chrdev_region(lirc_base_dev, MAX_IRCTL_DEVICES);
+ unregister_chrdev_region(lirc_base_dev, LIRC_MAX_DEVICES);
pr_info("module unloaded\n");
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index bf7aaff3aa37..a9187b0b46a1 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -188,6 +188,8 @@ enum mceusb_model_type {
TIVO_KIT,
MCE_GEN2_NO_TX,
HAUPPAUGE_CX_HYBRID_TV,
+ EVROMEDIA_FULL_HYBRID_FULLHD,
+ ASTROMETA_T2HYBRID,
};
struct mceusb_model {
@@ -247,9 +249,19 @@ static const struct mceusb_model mceusb_model[] = {
.mce_gen2 = 1,
.rc_map = RC_MAP_TIVO,
},
+ [EVROMEDIA_FULL_HYBRID_FULLHD] = {
+ .name = "Evromedia USB Full Hybrid Full HD",
+ .no_tx = 1,
+ .rc_map = RC_MAP_MSI_DIGIVOX_III,
+ },
+ [ASTROMETA_T2HYBRID] = {
+ .name = "Astrometa T2Hybrid",
+ .no_tx = 1,
+ .rc_map = RC_MAP_ASTROMETA_T2HYBRID,
+ }
};
-static struct usb_device_id mceusb_dev_table[] = {
+static const struct usb_device_id mceusb_dev_table[] = {
/* Original Microsoft MCE IR Transceiver (often HP-branded) */
{ USB_DEVICE(VENDOR_MICROSOFT, 0x006d),
.driver_info = MCE_GEN1 },
@@ -398,6 +410,12 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Adaptec / HP eHome Receiver */
{ USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
+ /* Evromedia USB Full Hybrid Full HD */
+ { USB_DEVICE(0x1b80, 0xd3b2),
+ .driver_info = EVROMEDIA_FULL_HYBRID_FULLHD },
+ /* Astrometa T2hybrid */
+ { USB_DEVICE(0x15f4, 0x0135),
+ .driver_info = ASTROMETA_T2HYBRID },
/* Terminating entry */
{ }
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 7da9c96cb058..ae4dd0c27731 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -106,7 +106,7 @@ struct ir_raw_event_ctrl {
} mce_kbd;
struct lirc_codec {
struct rc_dev *dev;
- struct lirc_driver *drv;
+ struct lirc_dev *ldev;
int carrier_low;
ktime_t gap_start;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 503bc425a187..f6e5ba4fbb49 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -471,9 +471,10 @@ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
}
EXPORT_SYMBOL(ir_raw_encode_scancode);
-static void edge_handle(unsigned long arg)
+static void edge_handle(struct timer_list *t)
{
- struct rc_dev *dev = (struct rc_dev *)arg;
+ struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
+ struct rc_dev *dev = raw->dev;
ktime_t interval = ktime_sub(ktime_get(), dev->raw->last_event);
if (ktime_to_ns(interval) >= dev->timeout) {
@@ -513,8 +514,7 @@ int ir_raw_event_prepare(struct rc_dev *dev)
dev->raw->dev = dev;
dev->change_protocol = change_protocol;
- setup_timer(&dev->raw->edge_handle, edge_handle,
- (unsigned long)dev);
+ timer_setup(&dev->raw->edge_handle, edge_handle, 0);
INIT_KFIFO(dev->raw->kfifo);
return 0;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 981cccd6b988..17950e29d4e3 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/rc-core.h>
+#include <linux/bsearch.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
@@ -439,9 +440,6 @@ static int ir_setkeytable(struct rc_dev *dev,
if (rc)
return rc;
- IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
- rc_map->size, rc_map->alloc);
-
for (i = 0; i < from->size; i++) {
index = ir_establish_scancode(dev, rc_map,
from->scan[i].scancode, false);
@@ -460,6 +458,18 @@ static int ir_setkeytable(struct rc_dev *dev,
return rc;
}
+static int rc_map_cmp(const void *key, const void *elt)
+{
+ const unsigned int *scancode = key;
+ const struct rc_map_table *e = elt;
+
+ if (*scancode < e->scancode)
+ return -1;
+ else if (*scancode > e->scancode)
+ return 1;
+ return 0;
+}
+
/**
* ir_lookup_by_scancode() - locate mapping by scancode
* @rc_map: the struct rc_map to search
@@ -472,21 +482,14 @@ static int ir_setkeytable(struct rc_dev *dev,
static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
unsigned int scancode)
{
- int start = 0;
- int end = rc_map->len - 1;
- int mid;
-
- while (start <= end) {
- mid = (start + end) / 2;
- if (rc_map->scan[mid].scancode < scancode)
- start = mid + 1;
- else if (rc_map->scan[mid].scancode > scancode)
- end = mid - 1;
- else
- return mid;
- }
+ struct rc_map_table *res;
- return -1U;
+ res = bsearch(&scancode, rc_map->scan, rc_map->len,
+ sizeof(struct rc_map_table), rc_map_cmp);
+ if (!res)
+ return -1U;
+ else
+ return res - rc_map->scan;
}
/**
@@ -627,9 +630,9 @@ EXPORT_SYMBOL_GPL(rc_keyup);
* This routine will generate a keyup event some time after a keydown event
* is generated when no further activity has been detected.
*/
-static void ir_timer_keyup(unsigned long cookie)
+static void ir_timer_keyup(struct timer_list *t)
{
- struct rc_dev *dev = (struct rc_dev *)cookie;
+ struct rc_dev *dev = from_timer(dev, t, timer_keyup);
unsigned long flags;
/*
@@ -1480,6 +1483,8 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
if (dev->driver_name)
ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
+ if (dev->device_name)
+ ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
return 0;
}
@@ -1487,7 +1492,10 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static DEVICE_ATTR(protocols, 0644, show_protocols, store_protocols);
+static struct device_attribute dev_attr_ro_protocols =
+__ATTR(protocols, 0444, show_protocols, NULL);
+static struct device_attribute dev_attr_rw_protocols =
+__ATTR(protocols, 0644, show_protocols, store_protocols);
static DEVICE_ATTR(wakeup_protocols, 0644, show_wakeup_protocols,
store_wakeup_protocols);
static RC_FILTER_ATTR(filter, S_IRUGO|S_IWUSR,
@@ -1499,13 +1507,22 @@ static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_WAKEUP, true);
-static struct attribute *rc_dev_protocol_attrs[] = {
- &dev_attr_protocols.attr,
+static struct attribute *rc_dev_rw_protocol_attrs[] = {
+ &dev_attr_rw_protocols.attr,
+ NULL,
+};
+
+static const struct attribute_group rc_dev_rw_protocol_attr_grp = {
+ .attrs = rc_dev_rw_protocol_attrs,
+};
+
+static struct attribute *rc_dev_ro_protocol_attrs[] = {
+ &dev_attr_ro_protocols.attr,
NULL,
};
-static const struct attribute_group rc_dev_protocol_attr_grp = {
- .attrs = rc_dev_protocol_attrs,
+static const struct attribute_group rc_dev_ro_protocol_attr_grp = {
+ .attrs = rc_dev_ro_protocol_attrs,
};
static struct attribute *rc_dev_filter_attrs[] = {
@@ -1529,7 +1546,7 @@ static const struct attribute_group rc_dev_wakeup_filter_attr_grp = {
.attrs = rc_dev_wakeup_filter_attrs,
};
-static struct device_type rc_dev_type = {
+static const struct device_type rc_dev_type = {
.release = rc_dev_release,
.uevent = rc_dev_uevent,
};
@@ -1553,8 +1570,7 @@ struct rc_dev *rc_allocate_device(enum rc_driver_type type)
dev->input_dev->setkeycode = ir_setkeycode;
input_set_drvdata(dev->input_dev, dev);
- setup_timer(&dev->timer_keyup, ir_timer_keyup,
- (unsigned long)dev);
+ timer_setup(&dev->timer_keyup, ir_timer_keyup, 0);
spin_lock_init(&dev->rc_map.lock);
spin_lock_init(&dev->keylock);
@@ -1638,6 +1654,9 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
rc_proto = BIT_ULL(rc_map->rc_proto);
+ if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
+ dev->enabled_protocols = dev->allowed_protocols;
+
if (dev->change_protocol) {
rc = dev->change_protocol(dev, &rc_proto);
if (rc < 0)
@@ -1729,8 +1748,10 @@ int rc_register_device(struct rc_dev *dev)
dev_set_drvdata(&dev->dev, dev);
dev->dev.groups = dev->sysfs_groups;
- if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
- dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
+ if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
+ dev->sysfs_groups[attr++] = &rc_dev_ro_protocol_attr_grp;
+ else if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
+ dev->sysfs_groups[attr++] = &rc_dev_rw_protocol_attr_grp;
if (dev->s_filter)
dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;
if (dev->s_wakeup_filter)
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 6784cb9fc4e7..6bfc24885b5c 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -186,7 +186,7 @@ struct redrat3_error {
} __packed;
/* table of devices that work with this driver */
-static struct usb_device_id redrat3_dev_table[] = {
+static const struct usb_device_id redrat3_dev_table[] = {
/* Original version of the RedRat3 */
{USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3USB_PRODUCT_ID)},
/* Second Version/release of the RedRat3 - RetRat3-II */
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 8b66926bc16a..8bf5637b3a69 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -470,7 +470,7 @@ static int hardware_init_port(void)
return 0;
}
-static void serial_ir_timeout(unsigned long arg)
+static void serial_ir_timeout(struct timer_list *unused)
{
DEFINE_IR_RAW_EVENT(ev);
@@ -540,8 +540,7 @@ static int serial_ir_probe(struct platform_device *dev)
serial_ir.rcdev = rcdev;
- setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
- (unsigned long)&serial_ir);
+ timer_setup(&serial_ir.timeout_timer, serial_ir_timeout, 0);
result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
share_irq ? IRQF_SHARED : 0,
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index bc906fb128d5..76120664b700 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -120,7 +120,7 @@ static void add_read_queue(int flag, unsigned long val)
}
/* SECTION: Hardware */
-static void sir_timeout(unsigned long data)
+static void sir_timeout(struct timer_list *unused)
{
/*
* if last received signal was a pulse, but receiving stopped
@@ -321,7 +321,7 @@ static int sir_ir_probe(struct platform_device *dev)
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->dev.parent = &sir_ir_dev->dev;
- setup_timer(&timerlist, sir_timeout, 0);
+ timer_setup(&timerlist, sir_timeout, 0);
/* get I/O port access and IRQ line */
if (!devm_request_region(&sir_ir_dev->dev, io, 8, KBUILD_MODNAME)) {
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index f03a174ddf9d..4eebfcfc10f3 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -43,7 +43,7 @@
#define USB_STREAMZAP_PRODUCT_ID 0x0000
/* table of devices that work with this driver */
-static struct usb_device_id streamzap_table[] = {
+static const struct usb_device_id streamzap_table[] = {
/* Streamzap Remote Control */
{ USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) },
/* Terminating entry */
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
new file mode 100644
index 000000000000..9d4c17230c3a
--- /dev/null
+++ b/drivers/media/rc/tango-ir.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <media/rc-core.h>
+
+#define DRIVER_NAME "tango-ir"
+
+#define IR_NEC_CTRL 0x00
+#define IR_NEC_DATA 0x04
+#define IR_CTRL 0x08
+#define IR_RC5_CLK_DIV 0x0c
+#define IR_RC5_DATA 0x10
+#define IR_INT 0x14
+
+#define NEC_TIME_BASE 560
+#define RC5_TIME_BASE 1778
+
+#define RC6_CTRL 0x00
+#define RC6_CLKDIV 0x04
+#define RC6_DATA0 0x08
+#define RC6_DATA1 0x0c
+#define RC6_DATA2 0x10
+#define RC6_DATA3 0x14
+#define RC6_DATA4 0x18
+
+#define RC6_CARRIER 36000
+#define RC6_TIME_BASE 16
+
+#define NEC_CAP(n) ((n) << 24)
+#define GPIO_SEL(n) ((n) << 16)
+#define DISABLE_NEC (BIT(4) | BIT(8))
+#define ENABLE_RC5 (BIT(0) | BIT(9))
+#define ENABLE_RC6 (BIT(0) | BIT(7))
+#define ACK_IR_INT (BIT(0) | BIT(1))
+#define ACK_RC6_INT (BIT(31))
+
+#define NEC_ANY (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32)
+
+struct tango_ir {
+ void __iomem *rc5_base;
+ void __iomem *rc6_base;
+ struct rc_dev *rc;
+ struct clk *clk;
+};
+
+static void tango_ir_handle_nec(struct tango_ir *ir)
+{
+ u32 v, code;
+ enum rc_proto proto;
+
+ v = readl_relaxed(ir->rc5_base + IR_NEC_DATA);
+ if (!v) {
+ rc_repeat(ir->rc);
+ return;
+ }
+
+ code = ir_nec_bytes_to_scancode(v, v >> 8, v >> 16, v >> 24, &proto);
+ rc_keydown(ir->rc, proto, code, 0);
+}
+
+static void tango_ir_handle_rc5(struct tango_ir *ir)
+{
+ u32 data, field, toggle, addr, cmd, code;
+
+ data = readl_relaxed(ir->rc5_base + IR_RC5_DATA);
+ if (data & BIT(31))
+ return;
+
+ field = data >> 12 & 1;
+ toggle = data >> 11 & 1;
+ addr = data >> 6 & 0x1f;
+ cmd = (data & 0x3f) | (field ^ 1) << 6;
+
+ code = RC_SCANCODE_RC5(addr, cmd);
+ rc_keydown(ir->rc, RC_PROTO_RC5, code, toggle);
+}
+
+static void tango_ir_handle_rc6(struct tango_ir *ir)
+{
+ u32 data0, data1, toggle, mode, addr, cmd, code;
+
+ data0 = readl_relaxed(ir->rc6_base + RC6_DATA0);
+ data1 = readl_relaxed(ir->rc6_base + RC6_DATA1);
+
+ mode = data0 >> 1 & 7;
+ if (mode != 0)
+ return;
+
+ toggle = data0 & 1;
+ addr = data0 >> 16;
+ cmd = data1;
+
+ code = RC_SCANCODE_RC6_0(addr, cmd);
+ rc_keydown(ir->rc, RC_PROTO_RC6_0, code, toggle);
+}
+
+static irqreturn_t tango_ir_irq(int irq, void *dev_id)
+{
+ struct tango_ir *ir = dev_id;
+ unsigned int rc5_stat;
+ unsigned int rc6_stat;
+
+ rc5_stat = readl_relaxed(ir->rc5_base + IR_INT);
+ writel_relaxed(rc5_stat, ir->rc5_base + IR_INT);
+
+ rc6_stat = readl_relaxed(ir->rc6_base + RC6_CTRL);
+ writel_relaxed(rc6_stat, ir->rc6_base + RC6_CTRL);
+
+ if (!(rc5_stat & 3) && !(rc6_stat & BIT(31)))
+ return IRQ_NONE;
+
+ if (rc5_stat & BIT(0))
+ tango_ir_handle_rc5(ir);
+
+ if (rc5_stat & BIT(1))
+ tango_ir_handle_nec(ir);
+
+ if (rc6_stat & BIT(31))
+ tango_ir_handle_rc6(ir);
+
+ return IRQ_HANDLED;
+}
+
+static int tango_change_protocol(struct rc_dev *dev, u64 *rc_type)
+{
+ struct tango_ir *ir = dev->priv;
+ u32 rc5_ctrl = DISABLE_NEC;
+ u32 rc6_ctrl = 0;
+
+ if (*rc_type & NEC_ANY)
+ rc5_ctrl = 0;
+
+ if (*rc_type & RC_PROTO_BIT_RC5)
+ rc5_ctrl |= ENABLE_RC5;
+
+ if (*rc_type & RC_PROTO_BIT_RC6_0)
+ rc6_ctrl = ENABLE_RC6;
+
+ writel_relaxed(rc5_ctrl, ir->rc5_base + IR_CTRL);
+ writel_relaxed(rc6_ctrl, ir->rc6_base + RC6_CTRL);
+
+ return 0;
+}
+
+static int tango_ir_probe(struct platform_device *pdev)
+{
+ const char *map_name = RC_MAP_TANGO;
+ struct device *dev = &pdev->dev;
+ struct rc_dev *rc;
+ struct tango_ir *ir;
+ struct resource *rc5_res;
+ struct resource *rc6_res;
+ u64 clkrate, clkdiv;
+ int irq, err;
+ u32 val;
+
+ rc5_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rc5_res)
+ return -EINVAL;
+
+ rc6_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rc6_res)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ ir = devm_kzalloc(dev, sizeof(*ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ ir->rc5_base = devm_ioremap_resource(dev, rc5_res);
+ if (IS_ERR(ir->rc5_base))
+ return PTR_ERR(ir->rc5_base);
+
+ ir->rc6_base = devm_ioremap_resource(dev, rc6_res);
+ if (IS_ERR(ir->rc6_base))
+ return PTR_ERR(ir->rc6_base);
+
+ ir->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ir->clk))
+ return PTR_ERR(ir->clk);
+
+ rc = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE);
+ if (!rc)
+ return -ENOMEM;
+
+ of_property_read_string(dev->of_node, "linux,rc-map-name", &map_name);
+
+ rc->device_name = DRIVER_NAME;
+ rc->driver_name = DRIVER_NAME;
+ rc->input_phys = DRIVER_NAME "/input0";
+ rc->map_name = map_name;
+ rc->allowed_protocols = NEC_ANY | RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_0;
+ rc->change_protocol = tango_change_protocol;
+ rc->priv = ir;
+ ir->rc = rc;
+
+ err = clk_prepare_enable(ir->clk);
+ if (err)
+ return err;
+
+ clkrate = clk_get_rate(ir->clk);
+
+ clkdiv = clkrate * NEC_TIME_BASE;
+ do_div(clkdiv, 1000000);
+
+ val = NEC_CAP(31) | GPIO_SEL(12) | clkdiv;
+ writel_relaxed(val, ir->rc5_base + IR_NEC_CTRL);
+
+ clkdiv = clkrate * RC5_TIME_BASE;
+ do_div(clkdiv, 1000000);
+
+ writel_relaxed(DISABLE_NEC, ir->rc5_base + IR_CTRL);
+ writel_relaxed(clkdiv, ir->rc5_base + IR_RC5_CLK_DIV);
+ writel_relaxed(ACK_IR_INT, ir->rc5_base + IR_INT);
+
+ clkdiv = clkrate * RC6_TIME_BASE;
+ do_div(clkdiv, RC6_CARRIER);
+
+ writel_relaxed(ACK_RC6_INT, ir->rc6_base + RC6_CTRL);
+ writel_relaxed((clkdiv >> 2) << 18 | clkdiv, ir->rc6_base + RC6_CLKDIV);
+
+ err = devm_request_irq(dev, irq, tango_ir_irq, IRQF_SHARED,
+ dev_name(dev), ir);
+ if (err)
+ goto err_clk;
+
+ err = devm_rc_register_device(dev, rc);
+ if (err)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, ir);
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(ir->clk);
+ return err;
+}
+
+static int tango_ir_remove(struct platform_device *pdev)
+{
+ struct tango_ir *ir = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ir->clk);
+ return 0;
+}
+
+static const struct of_device_id tango_ir_dt_ids[] = {
+ { .compatible = "sigma,smp8642-ir" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tango_ir_dt_ids);
+
+static struct platform_driver tango_ir_driver = {
+ .probe = tango_ir_probe,
+ .remove = tango_ir_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = tango_ir_dt_ids,
+ },
+};
+module_platform_driver(tango_ir_driver);
+
+MODULE_DESCRIPTION("SMP86xx IR decoder driver");
+MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
index 5a28ce3a1d49..38dbc128340d 100644
--- a/drivers/media/usb/as102/as102_fw.c
+++ b/drivers/media/usb/as102/as102_fw.c
@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
unsigned char *cmd,
const struct firmware *firmware) {
- struct as10x_fw_pkt_t fw_pkt;
+ struct as10x_fw_pkt_t *fw_pkt;
int total_read_bytes = 0, errno = 0;
unsigned char addr_has_changed = 0;
+ fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
+ if (!fw_pkt)
+ return -ENOMEM;
+
+
for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
int read_bytes = 0, data_len = 0;
/* parse intel hex line */
read_bytes = parse_hex_line(
(u8 *) (firmware->data + total_read_bytes),
- fw_pkt.raw.address,
- fw_pkt.raw.data,
+ fw_pkt->raw.address,
+ fw_pkt->raw.data,
&data_len,
&addr_has_changed);
@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
/* detect the end of file */
total_read_bytes += read_bytes;
if (total_read_bytes == firmware->size) {
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x03;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x03;
/* send EOF command */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt, 2, 0);
+ fw_pkt, 2, 0);
if (errno < 0)
goto error;
} else {
if (!addr_has_changed) {
/* prepare command to send */
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x01;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x01;
- data_len += sizeof(fw_pkt.u.request);
- data_len += sizeof(fw_pkt.raw.address);
+ data_len += sizeof(fw_pkt->u.request);
+ data_len += sizeof(fw_pkt->raw.address);
/* send cmd to device */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt,
+ fw_pkt,
data_len,
0);
if (errno < 0)
@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
}
}
error:
+ kfree(fw_pkt);
return (errno == 0) ? total_read_bytes : errno;
}
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index ef7d1b830ca3..1b8ec5d9e7ab 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -342,7 +342,7 @@ static const struct i2c_adapter au0828_i2c_adap_template = {
.algo = &au0828_i2c_algo_template,
};
-static struct i2c_client au0828_i2c_client_template = {
+static const struct i2c_client au0828_i2c_client_template = {
.name = "au0828 internal",
};
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 7996eb83a54e..af68afe085b5 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -269,7 +269,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
static int au0828_probe_i2c_ir(struct au0828_dev *dev)
{
int i = 0;
- const unsigned short addr_list[] = {
+ static const unsigned short addr_list[] = {
0x47, I2C_CLIENT_END
};
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index e0930ce59b8d..9dd6bdb7304f 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -79,7 +79,7 @@ vbi_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-struct vb2_ops au0828_vbi_qops = {
+const struct vb2_ops au0828_vbi_qops = {
.queue_setup = vbi_queue_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 9342402b92f7..654f67c25863 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -67,10 +67,10 @@ static inline void print_err_status(struct au0828_dev *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 05e445fe0b77..f6f37e8ef51d 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -358,7 +358,7 @@ void au0828_dvb_suspend(struct au0828_dev *dev);
void au0828_dvb_resume(struct au0828_dev *dev);
/* au0828-vbi.c */
-extern struct vb2_ops au0828_vbi_qops;
+extern const struct vb2_ops au0828_vbi_qops;
#define dprintk(level, fmt, arg...)\
do { if (au0828_debug & level)\
diff --git a/drivers/media/usb/b2c2/Kconfig b/drivers/media/usb/b2c2/Kconfig
index 17d35833980c..a620ae42dfc8 100644
--- a/drivers/media/usb/b2c2/Kconfig
+++ b/drivers/media/usb/b2c2/Kconfig
@@ -10,6 +10,6 @@ config DVB_B2C2_FLEXCOP_USB_DEBUG
bool "Enable debug for the B2C2 FlexCop drivers"
depends on DVB_B2C2_FLEXCOP_USB
select DVB_B2C2_FLEXCOP_DEBUG
- help
- Say Y if you want to enable the module option to control debug messages
- of all B2C2 FlexCop drivers.
+ help
+ Say Y if you want to enable the module option to control debug messages
+ of all B2C2 FlexCop drivers.
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e0daa9b6c2a0..54d9d0cb326f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -847,7 +847,7 @@ struct cx231xx_board cx231xx_boards[] = {
.demod_addr = 0x64, /* 0xc8 >> 1 */
.demod_i2c_master = I2C_1_MUX_3,
.has_dvb = 1,
- .ir_i2c_master = I2C_0,
+ .decoder = CX231XX_AVDECODER,
.norm = V4L2_STD_PAL,
.output_mode = OUT_MODE_VIP11,
.tuner_addr = 0x60, /* 0xc0 >> 1 */
@@ -872,6 +872,7 @@ struct cx231xx_board cx231xx_boards[] = {
.name = "Astrometa T2hybrid",
.tuner_type = TUNER_ABSENT,
.has_dvb = 1,
+ .decoder = CX231XX_AVDECODER,
.output_mode = OUT_MODE_VIP11,
.agc_analog_digital_select_gpio = 0x01,
.ctl_pin_status_mask = 0xffffffc4,
@@ -1684,7 +1685,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
nr = dev->devno;
assoc_desc = udev->actconfig->intf_assoc[0];
- if (assoc_desc->bFirstInterface != ifnum) {
+ if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
dev_err(d, "Not found matching IAD interface\n");
retval = -ENODEV;
goto err_if;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index c18bb33e060e..54abc1a7c8e1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -179,10 +179,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index 76e901920f6f..d3bfe8e23b1f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -43,10 +43,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -285,7 +285,7 @@ static void vbi_buffer_release(struct videobuf_queue *vq,
free_buffer(vq, buf);
}
-struct videobuf_queue_ops cx231xx_vbi_qops = {
+const struct videobuf_queue_ops cx231xx_vbi_qops = {
.buf_setup = vbi_buffer_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.h b/drivers/media/usb/cx231xx/cx231xx-vbi.h
index 16c7d20a22a4..b33d2bdb621c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.h
@@ -22,7 +22,7 @@
#ifndef _CX231XX_VBI_H
#define _CX231XX_VBI_H
-extern struct videobuf_queue_ops cx231xx_vbi_qops;
+extern const struct videobuf_queue_ops cx231xx_vbi_qops;
#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
#define NTSC_VBI_END_LINE 21
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 179b8481a870..226059fc672b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -199,10 +199,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 096bb75a24e5..2bf3bd81280a 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -628,8 +628,7 @@ static int dvb_usb_fe_sleep(struct dvb_frontend *fe)
}
ret = dvb_usbv2_device_power_ctrl(d, 0);
- if (ret < 0)
- goto err;
+
err:
if (!adap->suspend_resume_active) {
adap->active_fe = -1;
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 0eb33e043079..a221bb8a12b4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -516,7 +516,6 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
data required to program */
block_len = (msg->len / 8);
left_over_len = (msg->len % 8);
- index = 0;
mxl_i2c("block_len %d, left_over_len %d",
block_len, left_over_len);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 95a7b9123f8e..c76e78f9638a 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1598,7 +1598,7 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
struct rtl28xxu_dev *dev = d->priv;
u8 buf[5];
u32 rc_code;
- struct rtl28xxu_reg_val rc_nec_tab[] = {
+ static const struct rtl28xxu_reg_val rc_nec_tab[] = {
{ 0x3033, 0x80 },
{ 0x3020, 0x43 },
{ 0x3021, 0x16 },
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 7ba975bea96a..540886b3bb29 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -37,48 +37,9 @@ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_pr
return 0;
}
-static struct rc_map_table rc_map_a800_table[] = {
- { 0x0201, KEY_MODE }, /* SOURCE */
- { 0x0200, KEY_POWER2 }, /* POWER */
- { 0x0205, KEY_1 }, /* 1 */
- { 0x0206, KEY_2 }, /* 2 */
- { 0x0207, KEY_3 }, /* 3 */
- { 0x0209, KEY_4 }, /* 4 */
- { 0x020a, KEY_5 }, /* 5 */
- { 0x020b, KEY_6 }, /* 6 */
- { 0x020d, KEY_7 }, /* 7 */
- { 0x020e, KEY_8 }, /* 8 */
- { 0x020f, KEY_9 }, /* 9 */
- { 0x0212, KEY_LEFT }, /* L / DISPLAY */
- { 0x0211, KEY_0 }, /* 0 */
- { 0x0213, KEY_RIGHT }, /* R / CH RTN */
- { 0x0217, KEY_CAMERA }, /* SNAP SHOT */
- { 0x0210, KEY_LAST }, /* 16-CH PREV */
- { 0x021e, KEY_VOLUMEDOWN }, /* VOL DOWN */
- { 0x020c, KEY_ZOOM }, /* FULL SCREEN */
- { 0x021f, KEY_VOLUMEUP }, /* VOL UP */
- { 0x0214, KEY_MUTE }, /* MUTE */
- { 0x0208, KEY_AUDIO }, /* AUDIO */
- { 0x0219, KEY_RECORD }, /* RECORD */
- { 0x0218, KEY_PLAY }, /* PLAY */
- { 0x021b, KEY_STOP }, /* STOP */
- { 0x021a, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */
- { 0x021d, KEY_BACK }, /* << / RED */
- { 0x021c, KEY_FORWARD }, /* >> / YELLOW */
- { 0x0203, KEY_TEXT }, /* TELETEXT */
- { 0x0204, KEY_EPG }, /* EPG */
- { 0x0215, KEY_MENU }, /* MENU */
-
- { 0x0303, KEY_CHANNELUP }, /* CH UP */
- { 0x0302, KEY_CHANNELDOWN }, /* CH DOWN */
- { 0x0301, KEY_FIRST }, /* |<< / GREEN */
- { 0x0300, KEY_LAST }, /* >>| / BLUE */
-
-};
-
-static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int a800_rc_query(struct dvb_usb_device *d)
{
- int ret;
+ int ret = 0;
u8 *key = kmalloc(5, GFP_KERNEL);
if (!key)
return -ENOMEM;
@@ -90,11 +51,12 @@ static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
goto out;
}
- /* call the universal NEC remote processor, to find out the key's state and event */
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
- if (key[0] != 0)
- deb_rc("key: %*ph\n", 5, key);
- ret = 0;
+ /* Note that extended nec and nec32 are dropped */
+ if (key[0] == 1)
+ rc_keydown(d->rc_dev, RC_PROTO_NEC,
+ RC_SCANCODE_NEC(key[1], key[3]), 0);
+ else if (key[0] == 2)
+ rc_repeat(d->rc_dev);
out:
kfree(key);
return ret;
@@ -157,11 +119,12 @@ static struct dvb_usb_device_properties a800_properties = {
.power_ctrl = a800_power_ctrl,
.identify_state = a800_identify_state,
- .rc.legacy = {
- .rc_interval = DEFAULT_RC_INTERVAL,
- .rc_map_table = rc_map_a800_table,
- .rc_map_size = ARRAY_SIZE(rc_map_a800_table),
- .rc_query = a800_rc_query,
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_AVERMEDIA_M135A,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = a800_rc_query,
+ .allowed_protos = RC_PROTO_BIT_NEC,
},
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 6020170fe99a..92098c1b78e5 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -291,7 +291,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -325,7 +325,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -478,7 +478,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap)
&stk7700ph_dib7700_xc3028_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7070p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7770p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3056,7 +3056,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
@@ -3109,7 +3109,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
/* initialize IC 0 */
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3139,7 +3139,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3214,7 +3214,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap)
1, 0x10, &tfe7790p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
@@ -3309,7 +3309,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3384,7 +3384,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -3620,7 +3620,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
/* Demodulator not found for some reason? */
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
index 701c10835482..65e2c9e2cdc9 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
@@ -280,10 +280,11 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d)
dev->change_protocol = d->props.rc.core.change_protocol;
dev->allowed_protocols = d->props.rc.core.allowed_protos;
usb_to_input_id(d->udev, &dev->input_id);
- dev->device_name = "IR-receiver inside an USB DVB receiver";
+ dev->device_name = d->desc->name;
dev->input_phys = d->rc_phys;
dev->dev.parent = &d->udev->dev;
dev->priv = d;
+ dev->scancode_mask = d->props.rc.core.scancode_mask;
err = rc_register_device(dev);
if (err < 0) {
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 6c7c4637530f..e71fc86b4fb2 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -208,6 +208,7 @@ struct dvb_rc {
int (*rc_query) (struct dvb_usb_device *d);
int rc_interval;
bool bulk_mode; /* uses bulk mode */
+ u32 scancode_mask;
};
/**
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 0251a4e91d47..41261317bd5c 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -261,28 +261,6 @@ static int jdvbt90502_read_signal_strength(struct dvb_frontend *fe,
return 0;
}
-
-/* filter out un-supported properties to notify users */
-static int jdvbt90502_set_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- int r = 0;
-
- switch (tvp->cmd) {
- case DTV_DELIVERY_SYSTEM:
- if (tvp->u.data != SYS_ISDBT)
- r = -EINVAL;
- break;
- case DTV_CLEAR:
- case DTV_TUNE:
- case DTV_FREQUENCY:
- break;
- default:
- r = -EINVAL;
- }
- return r;
-}
-
static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
@@ -457,8 +435,6 @@ static const struct dvb_frontend_ops jdvbt90502_ops = {
.init = jdvbt90502_init,
.write = _jdvbt90502_write,
- .set_property = jdvbt90502_set_property,
-
.set_frontend = jdvbt90502_set_frontend,
.read_status = jdvbt90502_read_status,
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 13340af0d39c..2527b88beb87 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -97,82 +97,22 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
return vp7045_usb_op(d,SET_TUNER_POWER,&v,1,NULL,0,150);
}
-/* remote control stuff */
-
-/* The keymapping struct. Somehow this should be loaded to the driver, but
- * currently it is hardcoded. */
-static struct rc_map_table rc_map_vp7045_table[] = {
- { 0x0016, KEY_POWER },
- { 0x0010, KEY_MUTE },
- { 0x0003, KEY_1 },
- { 0x0001, KEY_2 },
- { 0x0006, KEY_3 },
- { 0x0009, KEY_4 },
- { 0x001d, KEY_5 },
- { 0x001f, KEY_6 },
- { 0x000d, KEY_7 },
- { 0x0019, KEY_8 },
- { 0x001b, KEY_9 },
- { 0x0015, KEY_0 },
- { 0x0005, KEY_CHANNELUP },
- { 0x0002, KEY_CHANNELDOWN },
- { 0x001e, KEY_VOLUMEUP },
- { 0x000a, KEY_VOLUMEDOWN },
- { 0x0011, KEY_RECORD },
- { 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */
- { 0x0014, KEY_PLAY },
- { 0x001a, KEY_STOP },
- { 0x0040, KEY_REWIND },
- { 0x0012, KEY_FASTFORWARD },
- { 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */
- { 0x004c, KEY_PAUSE },
- { 0x004d, KEY_SCREEN }, /* Full screen mode. */
- { 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */
- { 0x000c, KEY_CANCEL }, /* Cancel */
- { 0x001c, KEY_EPG }, /* EPG */
- { 0x0000, KEY_TAB }, /* Tab */
- { 0x0048, KEY_INFO }, /* Preview */
- { 0x0004, KEY_LIST }, /* RecordList */
- { 0x000f, KEY_TEXT }, /* Teletext */
- { 0x0041, KEY_PREVIOUSSONG },
- { 0x0042, KEY_NEXTSONG },
- { 0x004b, KEY_UP },
- { 0x0051, KEY_DOWN },
- { 0x004e, KEY_LEFT },
- { 0x0052, KEY_RIGHT },
- { 0x004f, KEY_ENTER },
- { 0x0013, KEY_CANCEL },
- { 0x004a, KEY_CLEAR },
- { 0x0054, KEY_PRINT }, /* Capture */
- { 0x0043, KEY_SUBTITLE }, /* Subtitle/CC */
- { 0x0008, KEY_VIDEO }, /* A/V */
- { 0x0007, KEY_SLEEP }, /* Hibernate */
- { 0x0045, KEY_ZOOM }, /* Zoom+ */
- { 0x0018, KEY_RED},
- { 0x0053, KEY_GREEN},
- { 0x005e, KEY_YELLOW},
- { 0x005f, KEY_BLUE}
-};
-
-static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int vp7045_rc_query(struct dvb_usb_device *d)
{
u8 key;
- int i;
vp7045_usb_op(d,RC_VAL_READ,NULL,0,&key,1,20);
deb_rc("remote query key: %x %d\n",key,key);
- if (key == 0x44) {
- *state = REMOTE_NO_KEY_PRESSED;
- return 0;
+ if (key != 0x44) {
+ /*
+ * The 8 bit address isn't available, but since the remote uses
+ * address 0 we'll use that. nec repeats are ignored too, even
+ * though the remote sends them.
+ */
+ rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(0, key), 0);
}
- for (i = 0; i < ARRAY_SIZE(rc_map_vp7045_table); i++)
- if (rc5_data(&rc_map_vp7045_table[i]) == key) {
- *state = REMOTE_KEY_PRESSED;
- *event = rc_map_vp7045_table[i].keycode;
- break;
- }
return 0;
}
@@ -265,11 +205,13 @@ static struct dvb_usb_device_properties vp7045_properties = {
.power_ctrl = vp7045_power_ctrl,
.read_mac_address = vp7045_read_mac_addr,
- .rc.legacy = {
- .rc_interval = 400,
- .rc_map_table = rc_map_vp7045_table,
- .rc_map_size = ARRAY_SIZE(rc_map_vp7045_table),
- .rc_query = vp7045_rc_query,
+ .rc.core = {
+ .rc_interval = 400,
+ .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = vp7045_rc_query,
+ .allowed_protos = RC_PROTO_BIT_NEC,
+ .scancode_mask = 0xff,
},
.num_device_descs = 2,
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 4a7db623fe29..9950a740e04e 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -112,10 +112,10 @@ static inline void print_err_status(struct em28xx *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 66c5012a628a..9bf49d666e5a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -882,7 +882,7 @@ static const struct i2c_adapter em28xx_adap_template = {
.algo = &em28xx_algo,
};
-static struct i2c_client em28xx_client_template = {
+static const struct i2c_client em28xx_client_template = {
.name = "em28xx internal",
};
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
index 8dfcb56bf4b3..9c411aac3878 100644
--- a/drivers/media/usb/em28xx/em28xx-v4l.h
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -16,4 +16,4 @@
int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
void em28xx_stop_vbi_streaming(struct vb2_queue *vq);
-extern struct vb2_ops em28xx_vbi_qops;
+extern const struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 0bac552bbe87..f5123651ef30 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -93,7 +93,7 @@ vbi_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-struct vb2_ops em28xx_vbi_qops = {
+const struct vb2_ops em28xx_vbi_qops = {
.queue_setup = vbi_queue_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 8d253a5df0a9..a2ba2d905952 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -557,10 +557,10 @@ static inline void print_err_status(struct em28xx *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index 3fd94fe7e1eb..d214a21acff7 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -204,11 +204,11 @@ config USB_GSPCA_SE401
tristate "SE401 USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
help
- Say Y here if you want support for cameras based on the
- Endpoints (formerly known as AOX) se401 chip.
+ Say Y here if you want support for cameras based on the
+ Endpoints (formerly known as AOX) se401 chip.
- To compile this driver as a module, choose M here: the
- module will be called gspca_se401.
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_se401.
config USB_GSPCA_SN9C2028
tristate "SONIX Dual-Mode USB Camera Driver"
@@ -224,11 +224,11 @@ config USB_GSPCA_SN9C20X
tristate "SN9C20X USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
help
- Say Y here if you want support for cameras based on the
- sn9c20x chips (SN9C201 and SN9C202).
+ Say Y here if you want support for cameras based on the
+ sn9c20x chips (SN9C201 and SN9C202).
- To compile this driver as a module, choose M here: the
- module will be called gspca_sn9c20x.
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_sn9c20x.
config USB_GSPCA_SONIXB
tristate "SONIX Bayer USB Camera Driver"
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 0f141762abf1..961343873fd0 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1075,7 +1075,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
/* give an index to each format */
index = 0;
- j = 0;
for (i = gspca_dev->cam.nmodes; --i >= 0; ) {
fmt_tb[index] = gspca_dev->cam.cam_mode[i].pixelformat;
j = 0;
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index cdb79c5f0c38..f1537daf4e2e 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -2865,7 +2865,7 @@ static void sd_reset_snapshot(struct gspca_dev *gspca_dev)
static void ov51x_upload_quan_tables(struct sd *sd)
{
- const unsigned char yQuanTable511[] = {
+ static const unsigned char yQuanTable511[] = {
0, 1, 1, 2, 2, 3, 3, 4,
1, 1, 1, 2, 2, 3, 4, 4,
1, 1, 2, 2, 3, 4, 4, 4,
@@ -2876,7 +2876,7 @@ static void ov51x_upload_quan_tables(struct sd *sd)
4, 4, 4, 4, 5, 5, 5, 5
};
- const unsigned char uvQuanTable511[] = {
+ static const unsigned char uvQuanTable511[] = {
0, 2, 2, 3, 4, 4, 4, 4,
2, 2, 2, 4, 4, 4, 4, 4,
2, 2, 3, 4, 4, 4, 4, 4,
@@ -2888,13 +2888,13 @@ static void ov51x_upload_quan_tables(struct sd *sd)
};
/* OV518 quantization tables are 8x4 (instead of 8x8) */
- const unsigned char yQuanTable518[] = {
+ static const unsigned char yQuanTable518[] = {
5, 4, 5, 6, 6, 7, 7, 7,
5, 5, 5, 5, 6, 7, 7, 7,
6, 6, 6, 6, 7, 7, 7, 8,
7, 7, 6, 7, 7, 7, 8, 8
};
- const unsigned char uvQuanTable518[] = {
+ static const unsigned char uvQuanTable518[] = {
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 8,
@@ -2943,7 +2943,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* For 511 and 511+ */
- const struct ov_regvals init_511[] = {
+ static const struct ov_regvals init_511[] = {
{ R51x_SYS_RESET, 0x7f },
{ R51x_SYS_INIT, 0x01 },
{ R51x_SYS_RESET, 0x7f },
@@ -2953,7 +2953,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R51x_SYS_RESET, 0x3d },
};
- const struct ov_regvals norm_511[] = {
+ static const struct ov_regvals norm_511[] = {
{ R511_DRAM_FLOW_CTL, 0x01 },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
@@ -2963,7 +2963,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R511_COMP_LUT_EN, 0x03 },
};
- const struct ov_regvals norm_511_p[] = {
+ static const struct ov_regvals norm_511_p[] = {
{ R511_DRAM_FLOW_CTL, 0xff },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
@@ -2973,7 +2973,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R511_COMP_LUT_EN, 0x03 },
};
- const struct ov_regvals compress_511[] = {
+ static const struct ov_regvals compress_511[] = {
{ 0x70, 0x1f },
{ 0x71, 0x05 },
{ 0x72, 0x06 },
@@ -3009,7 +3009,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* For 518 and 518+ */
- const struct ov_regvals init_518[] = {
+ static const struct ov_regvals init_518[] = {
{ R51x_SYS_RESET, 0x40 },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x3e },
@@ -3020,7 +3020,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
{ 0x5d, 0x03 },
};
- const struct ov_regvals norm_518[] = {
+ static const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -3033,7 +3033,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
{ 0x2f, 0x80 },
};
- const struct ov_regvals norm_518_p[] = {
+ static const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index a097d3dbc141..65ef755adfdc 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -386,7 +386,7 @@ static void msi2500_isoc_handler(struct urb *urb)
if (unlikely(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
- dev_dbg(dev->dev, "URB (%p) unlinked %ssynchronuously\n",
+ dev_dbg(dev->dev, "URB (%p) unlinked %ssynchronously\n",
urb, urb->status == -ENOENT ? "" : "a");
return;
}
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 60a2604e4cb3..1ad913fc30bf 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -44,7 +44,6 @@ config VIDEO_PVRUSB2_DVB
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
---help---
-
This option enables a DVB interface for the pvrusb2 driver.
If your device does not support digital television, this
feature will have no affect on the driver's operation.
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index eb6921d2743e..54b036d39c5b 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -262,7 +262,8 @@ static void pwc_isoc_handler(struct urb *urb)
if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN) {
- PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
+ PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronously.\n",
+ urb, urb->status == -ENOENT ? "" : "a");
return;
}
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index b2f239c4ba42..7fee5766587a 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -485,9 +485,10 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
/* kickstarts the firmware loading. from probe
*/
-static void s2255_timer(unsigned long user_data)
+static void s2255_timer(struct timer_list *t)
{
- struct s2255_fw *data = (struct s2255_fw *)user_data;
+ struct s2255_dev *dev = from_timer(dev, t, timer);
+ struct s2255_fw *data = dev->fw_data;
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
pr_err("s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -2283,7 +2284,7 @@ static int s2255_probe(struct usb_interface *interface,
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
goto errorEP;
}
- setup_timer(&dev->timer, s2255_timer, (unsigned long)dev->fw_data);
+ timer_setup(&dev->timer, s2255_timer, 0);
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
struct s2255_vc *vc = &dev->vc[i];
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 2c70173e3c82..62a12d5356ad 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -246,7 +246,7 @@ static const struct i2c_adapter adap_template = {
.algo = &algo,
};
-static struct i2c_client client_template = {
+static const struct i2c_client client_template = {
.name = "stk1160 internal",
};
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index ce8ebbe395a6..423c03a0638d 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -38,10 +38,10 @@ static inline void print_err_status(struct stk1160 *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c
index 2537643a1808..77347541904d 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/media/usb/tm6000/tm6000-cards.c
@@ -1184,7 +1184,7 @@ static int tm6000_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev;
- struct tm6000_core *dev = NULL;
+ struct tm6000_core *dev;
int i, rc = 0;
int nr = 0;
char *speed;
@@ -1194,22 +1194,21 @@ static int tm6000_usb_probe(struct usb_interface *interface,
/* Selects the proper interface */
rc = usb_set_interface(usbdev, 0, 1);
if (rc < 0)
- goto err;
+ goto report_failure;
/* Check to see next free device and mark as used */
nr = find_first_zero_bit(&tm6000_devused, TM6000_MAXBOARDS);
if (nr >= TM6000_MAXBOARDS) {
printk(KERN_ERR "tm6000: Supports only %i tm60xx boards.\n", TM6000_MAXBOARDS);
- usb_put_dev(usbdev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto put_device;
}
/* Create and initialize dev struct */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- printk(KERN_ERR "tm6000" ": out of memory!\n");
- usb_put_dev(usbdev);
- return -ENOMEM;
+ if (!dev) {
+ rc = -ENOMEM;
+ goto put_device;
}
spin_lock_init(&dev->slock);
mutex_init(&dev->usb_lock);
@@ -1313,8 +1312,7 @@ static int tm6000_usb_probe(struct usb_interface *interface,
if (!dev->isoc_in.endp) {
printk(KERN_ERR "tm6000: probing error: no IN ISOC endpoint!\n");
rc = -ENODEV;
-
- goto err;
+ goto free_device;
}
/* save our data pointer in this interface device */
@@ -1324,17 +1322,18 @@ static int tm6000_usb_probe(struct usb_interface *interface,
rc = tm6000_init_dev(dev);
if (rc < 0)
- goto err;
+ goto free_device;
return 0;
-err:
+free_device:
+ kfree(dev);
+report_failure:
printk(KERN_ERR "tm6000: Error %d while registering\n", rc);
clear_bit(nr, &tm6000_devused);
+put_device:
usb_put_dev(usbdev);
-
- kfree(dev);
return rc;
}
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 097ac321b7e1..c811fc6cf48a 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -45,10 +45,10 @@ static inline void print_err_status(struct tm6000_core *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -123,7 +123,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
}
dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (dvb->bulk_urb == NULL)
+ if (!dvb->bulk_urb)
return -ENOMEM;
pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
@@ -133,9 +133,8 @@ static int tm6000_start_stream(struct tm6000_core *dev)
size = size * 15; /* 512 x 8 or 12 or 15 */
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
- if (dvb->bulk_urb->transfer_buffer == NULL) {
+ if (!dvb->bulk_urb->transfer_buffer) {
usb_free_urb(dvb->bulk_urb);
- printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
return -ENOMEM;
}
@@ -361,7 +360,7 @@ static void unregister_dvb(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
- if (dvb->bulk_urb != NULL) {
+ if (dvb->bulk_urb) {
struct urb *bulk_urb = dvb->bulk_urb;
kfree(bulk_urb->transfer_buffer);
@@ -400,10 +399,8 @@ static int dvb_init(struct tm6000_core *dev)
}
dvb = kzalloc(sizeof(struct tm6000_dvb), GFP_KERNEL);
- if (!dvb) {
- printk(KERN_INFO "Cannot allocate memory\n");
+ if (!dvb)
return -ENOMEM;
- }
dev->dvb = dvb;
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 91889ad9cdd7..397990afe00b 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -352,7 +352,7 @@ static int __tm6000_ir_int_start(struct rc_dev *rc)
dprintk(1, "IR max size: %d\n", size);
ir->int_urb->transfer_buffer = kzalloc(size, GFP_ATOMIC);
- if (ir->int_urb->transfer_buffer == NULL) {
+ if (!ir->int_urb->transfer_buffer) {
usb_free_urb(ir->int_urb);
return err;
}
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index ec8c4d2534dc..9fa25de6b5a9 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -342,10 +342,10 @@ static inline void print_err_status(struct tm6000_core *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -470,20 +470,16 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
int num_bufs = TM6000_NUM_URB_BUF;
int i;
- if (dev->urb_buffer != NULL)
+ if (dev->urb_buffer)
return 0;
dev->urb_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!dev->urb_buffer) {
- tm6000_err("cannot allocate memory for urb buffers\n");
+ if (!dev->urb_buffer)
return -ENOMEM;
- }
dev->urb_dma = kmalloc(sizeof(dma_addr_t *)*num_bufs, GFP_KERNEL);
- if (!dev->urb_dma) {
- tm6000_err("cannot allocate memory for urb dma pointers\n");
+ if (!dev->urb_dma)
return -ENOMEM;
- }
for (i = 0; i < num_bufs; i++) {
dev->urb_buffer[i] = usb_alloc_coherent(
@@ -507,7 +503,7 @@ static int tm6000_free_urb_buffers(struct tm6000_core *dev)
{
int i;
- if (dev->urb_buffer == NULL)
+ if (!dev->urb_buffer)
return 0;
for (i = 0; i < TM6000_NUM_URB_BUF; i++) {
@@ -598,15 +594,12 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
dev->isoc_ctl.num_bufs = num_bufs;
dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!dev->isoc_ctl.urb) {
- tm6000_err("cannot alloc memory for usb buffers\n");
+ if (!dev->isoc_ctl.urb)
return -ENOMEM;
- }
dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
if (!dev->isoc_ctl.transfer_buffer) {
- tm6000_err("cannot allocate memory for usbtransfer\n");
kfree(dev->isoc_ctl.urb);
return -ENOMEM;
}
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index f06f09a0876e..b55b79b8e921 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -84,7 +84,7 @@ static int usbtv_probe(struct usb_interface *intf,
/* Packet size is split into 11 bits of base size and count of
* extra multiplies of it.*/
size = usb_endpoint_maxp(&ep->desc);
- size = (size & 0x07ff) * usb_endpoint_maxp_mult(&ep->desc);
+ size = size * usb_endpoint_maxp_mult(&ep->desc);
/* Device structure */
usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 95b5f4319ec2..3668a04359e8 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -718,8 +718,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
*/
if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) {
ret = usb_control_msg(usbtv->udev,
- usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
if (ret < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 6d22b22cb35b..28b91b7d756f 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2230,7 +2230,7 @@ static int uvc_reset_resume(struct usb_interface *intf)
* Module parameters
*/
-static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
+static int uvc_clock_param_get(char *buffer, const struct kernel_param *kp)
{
if (uvc_clock_param == CLOCK_MONOTONIC)
return sprintf(buffer, "CLOCK_MONOTONIC");
@@ -2238,7 +2238,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
return sprintf(buffer, "CLOCK_REALTIME");
}
-static int uvc_clock_param_set(const char *val, struct kernel_param *kp)
+static int uvc_clock_param_set(const char *val, const struct kernel_param *kp)
{
if (strncasecmp(val, "clock_", strlen("clock_")) == 0)
val += strlen("clock_");
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 4ff8d0aed015..1d888661fd03 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -209,10 +209,8 @@ static int send_control_msg(struct usb_device *udev, u8 request, u16 value,
int status;
unsigned char *transfer_buffer = kmalloc(size, GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(&udev->dev, "kmalloc(%d) failed\n", size);
+ if (!transfer_buffer)
return -ENOMEM;
- }
memcpy(transfer_buffer, cp, size);
@@ -387,9 +385,9 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
vb);
int rc;
- DBG("%s, field=%d, fmt name = %s\n", __func__, field, cam->fmt != NULL ?
- cam->fmt->name : "");
- if (cam->fmt == NULL)
+ DBG("%s, field=%d, fmt name = %s\n", __func__, field,
+ cam->fmt ? cam->fmt->name : "");
+ if (!cam->fmt)
return -EINVAL;
buf->vb.size = cam->width * cam->height * (cam->fmt->depth >> 3);
@@ -789,7 +787,7 @@ static int zr364xx_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct zr364xx_camera *cam = video_drvdata(file);
char pixelformat_name[5];
- if (cam == NULL)
+ if (!cam)
return -ENODEV;
if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG) {
@@ -819,7 +817,7 @@ static int zr364xx_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct zr364xx_camera *cam;
- if (file == NULL)
+ if (!file)
return -ENODEV;
cam = video_drvdata(file);
@@ -981,13 +979,13 @@ static void read_pipe_completion(struct urb *purb)
pipe_info = purb->context;
_DBG("%s %p, status %d\n", __func__, purb, purb->status);
- if (pipe_info == NULL) {
+ if (!pipe_info) {
printk(KERN_ERR KBUILD_MODNAME ": no context!\n");
return;
}
cam = pipe_info->cam;
- if (cam == NULL) {
+ if (!cam) {
printk(KERN_ERR KBUILD_MODNAME ": no context!\n");
return;
}
@@ -1071,7 +1069,7 @@ static void zr364xx_stop_readpipe(struct zr364xx_camera *cam)
{
struct zr364xx_pipeinfo *pipe_info;
- if (cam == NULL) {
+ if (!cam) {
printk(KERN_ERR KBUILD_MODNAME ": invalid device\n");
return;
}
@@ -1275,7 +1273,7 @@ static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
struct zr364xx_camera *cam = video_drvdata(file);
int ret;
- if (cam == NULL) {
+ if (!cam) {
DBG("%s: cam == NULL\n", __func__);
return -ENODEV;
}
@@ -1359,7 +1357,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
pipe->transfer_buffer = kzalloc(pipe->transfer_size,
GFP_KERNEL);
- if (pipe->transfer_buffer == NULL) {
+ if (!pipe->transfer_buffer) {
DBG("out of memory!\n");
return -ENOMEM;
}
@@ -1375,7 +1373,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
DBG("valloc %p, idx %lu, pdata %p\n",
&cam->buffer.frame[i], i,
cam->buffer.frame[i].lpvbits);
- if (cam->buffer.frame[i].lpvbits == NULL) {
+ if (!cam->buffer.frame[i].lpvbits) {
printk(KERN_INFO KBUILD_MODNAME ": out of memory. Using less frames\n");
break;
}
@@ -1423,11 +1421,9 @@ static int zr364xx_probe(struct usb_interface *intf,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- cam = kzalloc(sizeof(struct zr364xx_camera), GFP_KERNEL);
- if (cam == NULL) {
- dev_err(&udev->dev, "cam: out of memory !\n");
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam)
return -ENOMEM;
- }
cam->v4l2_dev.release = zr364xx_release;
err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index d741a8e0fdac..a7c3464976f2 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -22,8 +22,37 @@
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ if (!n->ops || !n->ops->bound)
+ return 0;
+
+ return n->ops->bound(n, subdev, asd);
+}
+
+static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ if (!n->ops || !n->ops->unbind)
+ return;
+
+ n->ops->unbind(n, subdev, asd);
+}
+
+static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
+{
+ if (!n->ops || !n->ops->complete)
+ return 0;
+
+ return n->ops->complete(n);
+}
+
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
#if IS_ENABLED(CONFIG_I2C)
@@ -60,8 +89,8 @@ static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
-static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd)
+static struct v4l2_async_subdev *v4l2_async_find_match(
+ struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd)
{
bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
struct v4l2_async_subdev *asd;
@@ -95,22 +124,96 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
return NULL;
}
-static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+/* Find the sub-device notifier registered by a sub-device driver. */
+static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(
+ struct v4l2_subdev *sd)
{
- int ret;
+ struct v4l2_async_notifier *n;
- if (notifier->bound) {
- ret = notifier->bound(notifier, sd, asd);
- if (ret < 0)
- return ret;
+ list_for_each_entry(n, &notifier_list, list)
+ if (n->sd == sd)
+ return n;
+
+ return NULL;
+}
+
+/* Get v4l2_device related to the notifier if one can be found. */
+static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(
+ struct v4l2_async_notifier *notifier)
+{
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ return notifier->v4l2_dev;
+}
+
+/*
+ * Return true if all child sub-device notifiers are complete, false otherwise.
+ */
+static bool v4l2_async_notifier_can_complete(
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_subdev *sd;
+
+ if (!list_empty(&notifier->waiting))
+ return false;
+
+ list_for_each_entry(sd, &notifier->done, async_list) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(sd);
+
+ if (subdev_notifier &&
+ !v4l2_async_notifier_can_complete(subdev_notifier))
+ return false;
}
- ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
+ return true;
+}
+
+/*
+ * Complete the master notifier if possible. This is done when all async
+ * sub-devices have been bound; v4l2_device is also available then.
+ */
+static int v4l2_async_notifier_try_complete(
+ struct v4l2_async_notifier *notifier)
+{
+ /* Quick check whether there are still more sub-devices here. */
+ if (!list_empty(&notifier->waiting))
+ return 0;
+
+ /* Check the entire notifier tree; find the root notifier first. */
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ /* This is root if it has v4l2_dev. */
+ if (!notifier->v4l2_dev)
+ return 0;
+
+ /* Is everything ready? */
+ if (!v4l2_async_notifier_can_complete(notifier))
+ return 0;
+
+ return v4l2_async_notifier_call_complete(notifier);
+}
+
+static int v4l2_async_notifier_try_all_subdevs(
+ struct v4l2_async_notifier *notifier);
+
+static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct v4l2_async_notifier *subdev_notifier;
+ int ret;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
if (ret < 0) {
- if (notifier->unbind)
- notifier->unbind(notifier, sd, asd);
+ v4l2_device_unregister_subdev(sd);
return ret;
}
@@ -122,8 +225,55 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
/* Move from the global subdevice list to notifier's done */
list_move(&sd->async_list, &notifier->done);
- if (list_empty(&notifier->waiting) && notifier->complete)
- return notifier->complete(notifier);
+ /*
+ * See if the sub-device has a notifier. If not, return here.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (!subdev_notifier || subdev_notifier->parent)
+ return 0;
+
+ /*
+ * Proceed with checking for the sub-device notifier's async
+ * sub-devices, and return the result. The error will be handled by the
+ * caller.
+ */
+ subdev_notifier->parent = notifier;
+
+ return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
+}
+
+/* Test all async sub-devices in a notifier for a match. */
+static int v4l2_async_notifier_try_all_subdevs(
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_notifier_find_v4l2_dev(notifier);
+ struct v4l2_subdev *sd;
+
+ if (!v4l2_dev)
+ return 0;
+
+again:
+ list_for_each_entry(sd, &subdev_list, async_list) {
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ asd = v4l2_async_find_match(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * v4l2_async_match_notify() may lead to registering a
+ * new notifier and thus changing the async subdevs
+ * list. In order to proceed safely from here, restart
+ * parsing the list from the beginning.
+ */
+ goto again;
+ }
return 0;
}
@@ -134,24 +284,107 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
/* Subdevice driver will reprobe and put the subdev back onto the list */
list_del_init(&sd->async_list);
sd->asd = NULL;
- sd->dev = NULL;
}
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+/* Unbind all sub-devices in the notifier tree. */
+static void v4l2_async_notifier_unbind_all_subdevs(
+ struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd, *tmp;
+
+ list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(sd);
+
+ if (subdev_notifier)
+ v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
+ list_move(&sd->async_list, &subdev_list);
+ }
+
+ notifier->parent = NULL;
+}
+
+/* See if an fwnode can be found in a notifier's lists. */
+static bool __v4l2_async_notifier_fwnode_has_async_subdev(
+ struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode)
+{
struct v4l2_async_subdev *asd;
+ struct v4l2_subdev *sd;
+
+ list_for_each_entry(asd, &notifier->waiting, list) {
+ if (asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
+ continue;
+
+ if (asd->match.fwnode.fwnode == fwnode)
+ return true;
+ }
+
+ list_for_each_entry(sd, &notifier->done, async_list) {
+ if (WARN_ON(!sd->asd))
+ continue;
+
+ if (sd->asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
+ continue;
+
+ if (sd->asd->match.fwnode.fwnode == fwnode)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Find out whether an async sub-device was set up for an fwnode already or
+ * whether it exists in a given notifier before @this_index.
+ */
+static bool v4l2_async_notifier_fwnode_has_async_subdev(
+ struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode,
+ unsigned int this_index)
+{
+ unsigned int j;
+
+ lockdep_assert_held(&list_lock);
+
+ /* Check that an fwnode is not being added more than once. */
+ for (j = 0; j < this_index; j++) {
+ struct v4l2_async_subdev *asd = notifier->subdevs[this_index];
+ struct v4l2_async_subdev *other_asd = notifier->subdevs[j];
+
+ if (other_asd->match_type == V4L2_ASYNC_MATCH_FWNODE &&
+ asd->match.fwnode.fwnode ==
+ other_asd->match.fwnode.fwnode)
+ return true;
+ }
+
+ /* Check than an fwnode did not exist in other notifiers. */
+ list_for_each_entry(notifier, &notifier_list, list)
+ if (__v4l2_async_notifier_fwnode_has_async_subdev(
+ notifier, fwnode))
+ return true;
+
+ return false;
+}
+
+static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+{
+ struct device *dev =
+ notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
+ struct v4l2_async_subdev *asd;
+ int ret;
int i;
- if (!v4l2_dev || !notifier->num_subdevs ||
- notifier->num_subdevs > V4L2_MAX_SUBDEVS)
+ if (notifier->num_subdevs > V4L2_MAX_SUBDEVS)
return -EINVAL;
- notifier->v4l2_dev = v4l2_dev;
INIT_LIST_HEAD(&notifier->waiting);
INIT_LIST_HEAD(&notifier->done);
+ mutex_lock(&list_lock);
+
for (i = 0; i < notifier->num_subdevs; i++) {
asd = notifier->subdevs[i];
@@ -159,32 +392,32 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
case V4L2_ASYNC_MATCH_CUSTOM:
case V4L2_ASYNC_MATCH_DEVNAME:
case V4L2_ASYNC_MATCH_I2C:
+ break;
case V4L2_ASYNC_MATCH_FWNODE:
+ if (v4l2_async_notifier_fwnode_has_async_subdev(
+ notifier, asd->match.fwnode.fwnode, i)) {
+ dev_err(dev,
+ "fwnode has already been registered or in notifier's subdev list\n");
+ ret = -EEXIST;
+ goto err_unlock;
+ }
break;
default:
- dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
- "Invalid match type %u on %p\n",
+ dev_err(dev, "Invalid match type %u on %p\n",
asd->match_type, asd);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
list_add_tail(&asd->list, &notifier->waiting);
}
- mutex_lock(&list_lock);
+ ret = v4l2_async_notifier_try_all_subdevs(notifier);
+ if (ret < 0)
+ goto err_unbind;
- list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
- int ret;
-
- asd = v4l2_async_belongs(notifier, sd);
- if (!asd)
- continue;
-
- ret = v4l2_async_test_notify(notifier, sd, asd);
- if (ret < 0) {
- mutex_unlock(&list_lock);
- return ret;
- }
- }
+ ret = v4l2_async_notifier_try_complete(notifier);
+ if (ret < 0)
+ goto err_unbind;
/* Keep also completed notifiers on the list */
list_add(&notifier->list, &notifier_list);
@@ -192,90 +425,114 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * On failure, unbind all sub-devices registered through this notifier.
+ */
+ v4l2_async_notifier_unbind_all_subdevs(notifier);
+
+err_unlock:
+ mutex_unlock(&list_lock);
+
+ return ret;
+}
+
+int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
+{
+ int ret;
+
+ if (WARN_ON(!v4l2_dev || notifier->sd))
+ return -EINVAL;
+
+ notifier->v4l2_dev = v4l2_dev;
+
+ ret = __v4l2_async_notifier_register(notifier);
+ if (ret)
+ notifier->v4l2_dev = NULL;
+
+ return ret;
}
EXPORT_SYMBOL(v4l2_async_notifier_register);
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier)
{
- struct v4l2_subdev *sd, *tmp;
- unsigned int notif_n_subdev = notifier->num_subdevs;
- unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
- struct device **dev;
- int i = 0;
+ int ret;
- if (!notifier->v4l2_dev)
- return;
+ if (WARN_ON(!sd || notifier->v4l2_dev))
+ return -EINVAL;
- dev = kvmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(notifier->v4l2_dev->dev,
- "Failed to allocate device cache!\n");
- }
+ notifier->sd = sd;
- mutex_lock(&list_lock);
+ ret = __v4l2_async_notifier_register(notifier);
+ if (ret)
+ notifier->sd = NULL;
- list_del(&notifier->list);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
- list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
- struct device *d;
+static void __v4l2_async_notifier_unregister(
+ struct v4l2_async_notifier *notifier)
+{
+ if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
+ return;
- d = get_device(sd->dev);
+ v4l2_async_notifier_unbind_all_subdevs(notifier);
- v4l2_async_cleanup(sd);
+ notifier->sd = NULL;
+ notifier->v4l2_dev = NULL;
- /* If we handled USB devices, we'd have to lock the parent too */
- device_release_driver(d);
+ list_del(&notifier->list);
+}
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
+void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
- /*
- * Store device at the device cache, in order to call
- * put_device() on the final step
- */
- if (dev)
- dev[i++] = d;
- else
- put_device(d);
- }
+ __v4l2_async_notifier_unregister(notifier);
mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_notifier_unregister);
- /*
- * Call device_attach() to reprobe devices
- *
- * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
- * executed.
- */
- while (i--) {
- struct device *d = dev[i];
-
- if (d && device_attach(d) < 0) {
- const char *name = "(none)";
- int lock = device_trylock(d);
-
- if (lock && d->driver)
- name = d->driver->name;
- dev_err(d, "Failed to re-probe to %s\n", name);
- if (lock)
- device_unlock(d);
+void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+{
+ unsigned int i;
+
+ if (!notifier || !notifier->max_subdevs)
+ return;
+
+ for (i = 0; i < notifier->num_subdevs; i++) {
+ struct v4l2_async_subdev *asd = notifier->subdevs[i];
+
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_FWNODE:
+ fwnode_handle_put(asd->match.fwnode.fwnode);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ break;
}
- put_device(d);
+
+ kfree(asd);
}
- kvfree(dev);
- notifier->v4l2_dev = NULL;
+ notifier->max_subdevs = 0;
+ notifier->num_subdevs = 0;
- /*
- * Don't care about the waiting list, it is initialised and populated
- * upon notifier registration.
- */
+ kvfree(notifier->subdevs);
+ notifier->subdevs = NULL;
}
-EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
+ struct v4l2_async_notifier *subdev_notifier;
struct v4l2_async_notifier *notifier;
+ int ret;
/*
* No reference taken. The reference is held by the device
@@ -290,41 +547,74 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
INIT_LIST_HEAD(&sd->async_list);
list_for_each_entry(notifier, &notifier_list, list) {
- struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
- if (asd) {
- int ret = v4l2_async_test_notify(notifier, sd, asd);
- mutex_unlock(&list_lock);
- return ret;
- }
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_notifier_find_v4l2_dev(notifier);
+ struct v4l2_async_subdev *asd;
+
+ if (!v4l2_dev)
+ continue;
+
+ asd = v4l2_async_find_match(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_match_notify(notifier, notifier->v4l2_dev, sd,
+ asd);
+ if (ret)
+ goto err_unbind;
+
+ ret = v4l2_async_notifier_try_complete(notifier);
+ if (ret)
+ goto err_unbind;
+
+ goto out_unlock;
}
/* None matched, wait for hot-plugging */
list_add(&sd->async_list, &subdev_list);
+out_unlock:
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * Complete failed. Unbind the sub-devices bound through registering
+ * this async sub-device.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (subdev_notifier)
+ v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+
+ if (sd->asd)
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
+ mutex_unlock(&list_lock);
+
+ return ret;
}
EXPORT_SYMBOL(v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
- struct v4l2_async_notifier *notifier = sd->notifier;
+ mutex_lock(&list_lock);
- if (!sd->asd) {
- if (!list_empty(&sd->async_list))
- v4l2_async_cleanup(sd);
- return;
- }
+ __v4l2_async_notifier_unregister(sd->subdev_notifier);
+ v4l2_async_notifier_cleanup(sd->subdev_notifier);
+ kfree(sd->subdev_notifier);
+ sd->subdev_notifier = NULL;
- mutex_lock(&list_lock);
+ if (sd->asd) {
+ struct v4l2_async_notifier *notifier = sd->notifier;
- list_add(&sd->asd->list, &notifier->waiting);
+ list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_cleanup(sd);
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ }
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
mutex_unlock(&list_lock);
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index dd1db678718c..cbb2ef43945f 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
+static u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+ u32 flags = ctrl->flags;
+
+ if (ctrl->is_ptr)
+ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+ return flags;
+}
+
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
{
memset(ev->reserved, 0, sizeof(ev->reserved));
@@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
ev->id = ctrl->id;
ev->u.ctrl.changes = changes;
ev->u.ctrl.type = ctrl->type;
- ev->u.ctrl.flags = ctrl->flags;
+ ev->u.ctrl.flags = user_flags(ctrl);
if (ctrl->is_ptr)
ev->u.ctrl.value64 = 0;
else
@@ -2003,10 +2013,6 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, err);
return NULL;
}
- if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
if (is_array &&
(type == V4L2_CTRL_TYPE_BUTTON ||
type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
@@ -2577,10 +2583,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
else
qc->id = ctrl->id;
strlcpy(qc->name, ctrl->name, sizeof(qc->name));
- qc->flags = ctrl->flags;
+ qc->flags = user_flags(ctrl);
qc->type = ctrl->type;
- if (ctrl->is_ptr)
- qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
qc->elem_size = ctrl->elem_size;
qc->elems = ctrl->elems;
qc->nr_of_dims = ctrl->nr_of_dims;
@@ -2818,7 +2822,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL)
- return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0;
+ return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 40b2fbfe8865..681b192420d9 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -19,6 +19,7 @@
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
@@ -26,7 +27,9 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
enum v4l2_fwnode_bus_type {
V4L2_FWNODE_BUS_TYPE_GUESS = 0,
@@ -181,25 +184,6 @@ v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
vep->bus_type = V4L2_MBUS_CSI1;
}
-/**
- * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
- * @vep: pointer to the V4L2 fwnode data structure
- *
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
- *
- * NOTE: This function does not parse properties the size of which is variable
- * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
- * new drivers instead.
- *
- * Return: 0 on success or a negative error code on failure.
- */
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep)
{
@@ -239,14 +223,6 @@ int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
-/*
- * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by
- * v4l2_fwnode_endpoint_alloc_parse()
- * @vep - the V4L2 fwnode the resources of which are to be released
- *
- * It is safe to call this function with NULL argument or on a V4L2 fwnode the
- * parsing of which failed.
- */
void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
{
if (IS_ERR_OR_NULL(vep))
@@ -257,29 +233,6 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
-/**
- * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
- *
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
- *
- * v4l2_fwnode_endpoint_alloc_parse() has two important differences to
- * v4l2_fwnode_endpoint_parse():
- *
- * 1. It also parses variable size data.
- *
- * 2. The memory it has allocated to store the variable size data must be freed
- * using v4l2_fwnode_endpoint_free() when no longer needed.
- *
- * Return: Pointer to v4l2_fwnode_endpoint if successful, on an error pointer
- * on error.
- */
struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
struct fwnode_handle *fwnode)
{
@@ -322,24 +275,6 @@ out_err:
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
-/**
- * v4l2_fwnode_endpoint_parse_link() - parse a link between two endpoints
- * @__fwnode: pointer to the endpoint's fwnode at the local end of the link
- * @link: pointer to the V4L2 fwnode link data structure
- *
- * Fill the link structure with the local and remote nodes and port numbers.
- * The local_node and remote_node fields are set to point to the local and
- * remote port's parent nodes respectively (the port parent node being the
- * parent node of the port node if that node isn't a 'ports' node, or the
- * grand-parent node of the port node otherwise).
- *
- * A reference is taken to both the local and remote nodes, the caller must use
- * v4l2_fwnode_endpoint_put_link() to drop the references when done with the
- * link.
- *
- * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be
- * found.
- */
int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
struct v4l2_fwnode_link *link)
{
@@ -374,13 +309,6 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
-/**
- * v4l2_fwnode_put_link() - drop references to nodes in a link
- * @link: pointer to the V4L2 fwnode link data structure
- *
- * Drop references to the local and remote nodes in the link. This function
- * must be called on every link parsed with v4l2_fwnode_parse_link().
- */
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
{
fwnode_handle_put(link->local_node);
@@ -388,6 +316,630 @@ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
+static int v4l2_async_notifier_realloc(struct v4l2_async_notifier *notifier,
+ unsigned int max_subdevs)
+{
+ struct v4l2_async_subdev **subdevs;
+
+ if (max_subdevs <= notifier->max_subdevs)
+ return 0;
+
+ subdevs = kvmalloc_array(
+ max_subdevs, sizeof(*notifier->subdevs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!subdevs)
+ return -ENOMEM;
+
+ if (notifier->subdevs) {
+ memcpy(subdevs, notifier->subdevs,
+ sizeof(*subdevs) * notifier->num_subdevs);
+
+ kvfree(notifier->subdevs);
+ }
+
+ notifier->subdevs = subdevs;
+ notifier->max_subdevs = max_subdevs;
+
+ return 0;
+}
+
+static int v4l2_async_notifier_fwnode_parse_endpoint(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *endpoint, unsigned int asd_struct_size,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ struct v4l2_async_subdev *asd;
+ struct v4l2_fwnode_endpoint *vep;
+ int ret = 0;
+
+ asd = kzalloc(asd_struct_size, GFP_KERNEL);
+ if (!asd)
+ return -ENOMEM;
+
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode.fwnode =
+ fwnode_graph_get_remote_port_parent(endpoint);
+ if (!asd->match.fwnode.fwnode) {
+ dev_warn(dev, "bad remote port parent\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ vep = v4l2_fwnode_endpoint_alloc_parse(endpoint);
+ if (IS_ERR(vep)) {
+ ret = PTR_ERR(vep);
+ dev_warn(dev, "unable to parse V4L2 fwnode endpoint (%d)\n",
+ ret);
+ goto out_err;
+ }
+
+ ret = parse_endpoint ? parse_endpoint(dev, vep, asd) : 0;
+ if (ret == -ENOTCONN)
+ dev_dbg(dev, "ignoring port@%u/endpoint@%u\n", vep->base.port,
+ vep->base.id);
+ else if (ret < 0)
+ dev_warn(dev,
+ "driver could not parse port@%u/endpoint@%u (%d)\n",
+ vep->base.port, vep->base.id, ret);
+ v4l2_fwnode_endpoint_free(vep);
+ if (ret < 0)
+ goto out_err;
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ notifier->num_subdevs++;
+
+ return 0;
+
+out_err:
+ fwnode_handle_put(asd->match.fwnode.fwnode);
+ kfree(asd);
+
+ return ret == -ENOTCONN ? 0 : ret;
+}
+
+static int __v4l2_async_notifier_parse_fwnode_endpoints(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size, unsigned int port, bool has_port,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ struct fwnode_handle *fwnode;
+ unsigned int max_subdevs = notifier->max_subdevs;
+ int ret;
+
+ if (WARN_ON(asd_struct_size < sizeof(struct v4l2_async_subdev)))
+ return -EINVAL;
+
+ for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
+ dev_fwnode(dev), fwnode)); ) {
+ struct fwnode_handle *dev_fwnode;
+ bool is_available;
+
+ dev_fwnode = fwnode_graph_get_port_parent(fwnode);
+ is_available = fwnode_device_is_available(dev_fwnode);
+ fwnode_handle_put(dev_fwnode);
+ if (!is_available)
+ continue;
+
+ if (has_port) {
+ struct fwnode_endpoint ep;
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &ep);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ if (ep.port != port)
+ continue;
+ }
+ max_subdevs++;
+ }
+
+ /* No subdevs to add? Return here. */
+ if (max_subdevs == notifier->max_subdevs)
+ return 0;
+
+ ret = v4l2_async_notifier_realloc(notifier, max_subdevs);
+ if (ret)
+ return ret;
+
+ for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
+ dev_fwnode(dev), fwnode)); ) {
+ struct fwnode_handle *dev_fwnode;
+ bool is_available;
+
+ dev_fwnode = fwnode_graph_get_port_parent(fwnode);
+ is_available = fwnode_device_is_available(dev_fwnode);
+ fwnode_handle_put(dev_fwnode);
+ if (!is_available)
+ continue;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (has_port) {
+ struct fwnode_endpoint ep;
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &ep);
+ if (ret)
+ break;
+
+ if (ep.port != port)
+ continue;
+ }
+
+ ret = v4l2_async_notifier_fwnode_parse_endpoint(
+ dev, notifier, fwnode, asd_struct_size, parse_endpoint);
+ if (ret < 0)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+int v4l2_async_notifier_parse_fwnode_endpoints(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ return __v4l2_async_notifier_parse_fwnode_endpoints(
+ dev, notifier, asd_struct_size, 0, false, parse_endpoint);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints);
+
+int v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size, unsigned int port,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ return __v4l2_async_notifier_parse_fwnode_endpoints(
+ dev, notifier, asd_struct_size, port, true, parse_endpoint);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints_by_port);
+
+/*
+ * v4l2_fwnode_reference_parse - parse references for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ * @prop: the name of the property
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries were found
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+static int v4l2_fwnode_reference_parse(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ const char *prop)
+{
+ struct fwnode_reference_args args;
+ unsigned int index;
+ int ret;
+
+ for (index = 0;
+ !(ret = fwnode_property_get_reference_args(
+ dev_fwnode(dev), prop, NULL, 0, index, &args));
+ index++)
+ fwnode_handle_put(args.fwnode);
+
+ if (!index)
+ return -ENOENT;
+
+ /*
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return the error in cases other than that.
+ */
+ if (ret != -ENOENT && ret != -ENODATA)
+ return ret;
+
+ ret = v4l2_async_notifier_realloc(notifier,
+ notifier->num_subdevs + index);
+ if (ret)
+ return ret;
+
+ for (index = 0; !fwnode_property_get_reference_args(
+ dev_fwnode(dev), prop, NULL, 0, index, &args);
+ index++) {
+ struct v4l2_async_subdev *asd;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ asd->match.fwnode.fwnode = args.fwnode;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+ }
+
+ return 0;
+
+error:
+ fwnode_handle_put(args.fwnode);
+ return ret;
+}
+
+/*
+ * v4l2_fwnode_reference_get_int_prop - parse a reference with integer
+ * arguments
+ * @fwnode: fwnode to read @prop from
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @index: the index of the reference to get
+ * @props: the array of integer property names
+ * @nprops: the number of integer property names in @nprops
+ *
+ * First find an fwnode referred to by the reference at @index in @prop.
+ *
+ * Then under that fwnode, @nprops times, for each property in @props,
+ * iteratively follow child nodes starting from fwnode such that they have the
+ * property in @props array at the index of the child node distance from the
+ * root node and the value of that property matching with the integer argument
+ * of the reference, at the same index.
+ *
+ * The child fwnode reched at the end of the iteration is then returned to the
+ * caller.
+ *
+ * The core reason for this is that you cannot refer to just any node in ACPI.
+ * So to refer to an endpoint (easy in DT) you need to refer to a device, then
+ * provide a list of (property name, property value) tuples where each tuple
+ * uniquely identifies a child node. The first tuple identifies a child directly
+ * underneath the device fwnode, the next tuple identifies a child node
+ * underneath the fwnode identified by the previous tuple, etc. until you
+ * reached the fwnode you need.
+ *
+ * An example with a graph, as defined in Documentation/acpi/dsd/graph.txt:
+ *
+ * Scope (\_SB.PCI0.I2C2)
+ * {
+ * Device (CAM0)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () {
+ * "compatible",
+ * Package () { "nokia,smia" }
+ * },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port0", "PRT0" },
+ * }
+ * })
+ * Name (PRT0, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 0 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP00" },
+ * }
+ * })
+ * Name (EP00, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package() {
+ * \_SB.PCI0.ISP, 4, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * Scope (\_SB.PCI0)
+ * {
+ * Device (ISP)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port4", "PRT4" },
+ * }
+ * })
+ *
+ * Name (PRT4, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 4 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP40" },
+ * }
+ * })
+ *
+ * Name (EP40, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package () {
+ * \_SB.PCI0.I2C2.CAM0,
+ * 0, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * From the EP40 node under ISP device, you could parse the graph remote
+ * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments:
+ *
+ * @fwnode: fwnode referring to EP40 under ISP.
+ * @prop: "remote-endpoint"
+ * @index: 0
+ * @props: "port", "endpoint"
+ * @nprops: 2
+ *
+ * And you'd get back fwnode referring to EP00 under CAM0.
+ *
+ * The same works the other way around: if you use EP00 under CAM0 as the
+ * fwnode, you'll get fwnode referring to EP40 under ISP.
+ *
+ * The same example in DT syntax would look like this:
+ *
+ * cam: cam0 {
+ * compatible = "nokia,smia";
+ *
+ * port {
+ * port = <0>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&isp 4 0>;
+ * };
+ * };
+ * };
+ *
+ * isp: isp {
+ * ports {
+ * port@4 {
+ * port = <4>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&cam 0 0>;
+ * };
+ * };
+ * };
+ * };
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwise failed
+ * -ENOMEM if memory allocation failed
+ */
+static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
+ struct fwnode_handle *fwnode, const char *prop, unsigned int index,
+ const char * const *props, unsigned int nprops)
+{
+ struct fwnode_reference_args fwnode_args;
+ unsigned int *args = fwnode_args.args;
+ struct fwnode_handle *child;
+ int ret;
+
+ /*
+ * Obtain remote fwnode as well as the integer arguments.
+ *
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return -ENOENT in that case.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops,
+ index, &fwnode_args);
+ if (ret)
+ return ERR_PTR(ret == -ENODATA ? -ENOENT : ret);
+
+ /*
+ * Find a node in the tree under the referred fwnode corresponding to
+ * the integer arguments.
+ */
+ fwnode = fwnode_args.fwnode;
+ while (nprops--) {
+ u32 val;
+
+ /* Loop over all child nodes under fwnode. */
+ fwnode_for_each_child_node(fwnode, child) {
+ if (fwnode_property_read_u32(child, *props, &val))
+ continue;
+
+ /* Found property, see if its value matches. */
+ if (val == *args)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ /* No property found; return an error here. */
+ if (!child) {
+ fwnode = ERR_PTR(-ENOENT);
+ break;
+ }
+
+ props++;
+ args++;
+ fwnode = child;
+ }
+
+ return fwnode;
+}
+
+/*
+ * v4l2_fwnode_reference_parse_int_props - parse references for async
+ * sub-devices
+ * @dev: struct device pointer
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @props: the array of integer property names
+ * @nprops: the number of integer properties
+ *
+ * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in
+ * property @prop with integer arguments with child nodes matching in properties
+ * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier
+ * accordingly.
+ *
+ * While it is technically possible to use this function on DT, it is only
+ * meaningful on ACPI. On Device tree you can refer to any node in the tree but
+ * on ACPI the references are limited to devices.
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwisefailed
+ * -ENOMEM if memory allocation failed
+ */
+static int v4l2_fwnode_reference_parse_int_props(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ const char *prop, const char * const *props, unsigned int nprops)
+{
+ struct fwnode_handle *fwnode;
+ unsigned int index;
+ int ret;
+
+ for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(
+ dev_fwnode(dev), prop, index, props,
+ nprops))); index++)
+ fwnode_handle_put(fwnode);
+
+ /*
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return the error in cases other than that.
+ */
+ if (PTR_ERR(fwnode) != -ENOENT && PTR_ERR(fwnode) != -ENODATA)
+ return PTR_ERR(fwnode);
+
+ ret = v4l2_async_notifier_realloc(notifier,
+ notifier->num_subdevs + index);
+ if (ret)
+ return -ENOMEM;
+
+ for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(
+ dev_fwnode(dev), prop, index, props,
+ nprops))); index++) {
+ struct v4l2_async_subdev *asd;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ asd = kzalloc(sizeof(struct v4l2_async_subdev), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ asd->match.fwnode.fwnode = fwnode;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+ }
+
+ return PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
+
+error:
+ fwnode_handle_put(fwnode);
+ return ret;
+}
+
+int v4l2_async_notifier_parse_fwnode_sensor_common(
+ struct device *dev, struct v4l2_async_notifier *notifier)
+{
+ static const char * const led_props[] = { "led" };
+ static const struct {
+ const char *name;
+ const char * const *props;
+ unsigned int nprops;
+ } props[] = {
+ { "flash-leds", led_props, ARRAY_SIZE(led_props) },
+ { "lens-focus", NULL, 0 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ if (props[i].props && is_acpi_node(dev_fwnode(dev)))
+ ret = v4l2_fwnode_reference_parse_int_props(
+ dev, notifier, props[i].name,
+ props[i].props, props[i].nprops);
+ else
+ ret = v4l2_fwnode_reference_parse(
+ dev, notifier, props[i].name);
+ if (ret && ret != -ENOENT) {
+ dev_warn(dev, "parsing property \"%s\" failed (%d)\n",
+ props[i].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_sensor_common);
+
+int v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier;
+ int ret;
+
+ if (WARN_ON(!sd->dev))
+ return -ENODEV;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ ret = v4l2_async_notifier_parse_fwnode_sensor_common(sd->dev,
+ notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_subdev_notifier_register(sd, notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto out_unregister;
+
+ sd->subdev_notifier = notifier;
+
+ return 0;
+
+out_unregister:
+ v4l2_async_notifier_unregister(notifier);
+
+out_cleanup:
+ v4l2_async_notifier_cleanup(notifier);
+ kfree(notifier);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor_common);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b60a6b0841d1..79614992ee21 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -730,9 +730,12 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
- p->stepwise.min_width, p->stepwise.min_height,
- p->stepwise.step_width, p->stepwise.step_height,
- p->stepwise.max_width, p->stepwise.max_height);
+ p->stepwise.min_width,
+ p->stepwise.min_height,
+ p->stepwise.max_width,
+ p->stepwise.max_height,
+ p->stepwise.step_width,
+ p->stepwise.step_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
/* fall through */
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 9f389f36566d..a9806ba6116d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -479,7 +479,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
{
struct vb2_dc_buf *buf;
struct frame_vector *vec;
- unsigned long offset;
+ unsigned int offset;
int n_pages, i;
int ret = 0;
struct sg_table *sgt;
@@ -507,7 +507,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dev = dev;
buf->dma_dir = dma_dir;
- offset = vaddr & ~PAGE_MASK;
+ offset = lower_32_bits(offset_in_page(vaddr));
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
dma_dir == DMA_BIDIRECTIONAL);
if (IS_ERR(vec)) {
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 84eab28665f3..7a93400eea2a 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -99,7 +99,7 @@ module_param(mpt_channel_mapping, int, 0);
MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
static int mpt_debug_level;
-static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
+static int mpt_set_debug_level(const char *val, const struct kernel_param *kp);
module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
&mpt_debug_level, 0600);
MODULE_PARM_DESC(mpt_debug_level,
@@ -242,7 +242,7 @@ pci_enable_io_access(struct pci_dev *pdev)
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
-static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
+static int mpt_set_debug_level(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
MPT_ADAPTER *ioc;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index fc5e4fef89d2..1d20a800e967 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -510,6 +510,19 @@ config INTEL_SOC_PMIC_CHTWC
available before any devices using it are probed. This option also
causes the designware-i2c driver to be builtin for the same reason.
+config INTEL_SOC_PMIC_CHTDC_TI
+ tristate "Support for Intel Cherry Trail Dollar Cove TI PMIC"
+ depends on GPIOLIB
+ depends on I2C
+ depends on ACPI
+ depends on X86
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Select this option for supporting Dollar Cove (TI version) PMIC
+ device that is found on some Intel Cherry Trail systems.
+
config MFD_INTEL_LPSS
tristate
select COMMON_CLK
@@ -1057,6 +1070,22 @@ config MFD_SMSC
To compile this driver as a module, choose M here: the
module will be called smsc.
+config MFD_SC27XX_PMIC
+ tristate "Spreadtrum SC27xx PMICs"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on SPI_MASTER
+ select MFD_CORE
+ select REGMAP_SPI
+ select REGMAP_IRQ
+ help
+ This enables support for the Spreadtrum SC27xx PMICs with SPI
+ interface. The SC27xx series PMICs integrate power management,
+ audio codec, battery management and user interface support
+ function (such as RTC, Typec, indicator and so on) in a single chip.
+
+ This driver provides common support for accessing the SC27xx PMICs,
+ and it also adds the irq_chip parts for handling the PMIC chip events.
+
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
default y if ARCH_U300 || ARCH_U8500 || COMPILE_TEST
@@ -1338,7 +1367,7 @@ config MFD_TPS65090
config MFD_TPS65217
tristate "TI TPS65217 Power Management / White LED chips"
- depends on I2C
+ depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select IRQ_DOMAIN
@@ -1400,7 +1429,7 @@ config MFD_TI_LP87565
config MFD_TPS65218
tristate "TI TPS65218 Power Management chips"
- depends on I2C
+ depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1408,8 +1437,7 @@ config MFD_TPS65218
If you say yes here you get support for the TPS65218 series of
Power Management chips.
These include voltage regulators, gpio and other features
- that are often used in portable devices. Only regulator
- component is currently supported.
+ that are often used in portable devices.
This driver can also be built as a module. If so, the module
will be called tps65218.
@@ -1746,6 +1774,20 @@ config MFD_WM8994
core support for the WM8994, in order to use the actual
functionaltiy of the device other drivers must be enabled.
+config MFD_WM97xx
+ tristate "Wolfson Microelectronics WM97xx"
+ select MFD_CORE
+ select REGMAP_AC97
+ select AC97_BUS_COMPAT
+ depends on AC97_BUS_NEW
+ help
+ The WM9705, WM9712 and WM9713 is a highly integrated hi-fi CODEC
+ designed for smartphone applications. As well as audio functionality
+ it has on board GPIO and a touchscreen functionality which is
+ supported via the relevant subsystems. This driver provides core
+ support for the WM97xx, in order to use the actual functionaltiy of
+ the device other drivers must be enabled.
+
config MFD_STW481X
tristate "Support for ST Microelectronics STw481x"
depends on I2C && (ARCH_NOMADIK || COMPILE_TEST)
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8703ff17998e..d9474ade32e6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
wm8994-objs := wm8994-core.o wm8994-irq.o wm8994-regmap.o
obj-$(CONFIG_MFD_WM8994) += wm8994.o
+obj-$(CONFIG_MFD_WM97xx) += wm97xx-core.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
@@ -219,6 +220,7 @@ intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
+obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
@@ -227,3 +229,4 @@ obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o
obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
+obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 8d46e3ad9529..77875250abe5 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -797,12 +797,7 @@ EXPORT_SYMBOL_GPL(arizona_of_get_type);
static int arizona_of_get_core_pdata(struct arizona *arizona)
{
struct arizona_pdata *pdata = &arizona->pdata;
- struct property *prop;
- const __be32 *cur;
- u32 val;
- u32 pdm_val[ARIZONA_MAX_PDM_SPK];
int ret, i;
- int count = 0;
pdata->reset = of_get_named_gpio(arizona->dev->of_node, "wlf,reset", 0);
if (pdata->reset == -EPROBE_DEFER) {
@@ -836,64 +831,6 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
ret);
}
- of_property_for_each_u32(arizona->dev->of_node, "wlf,inmode", prop,
- cur, val) {
- if (count == ARRAY_SIZE(pdata->inmode))
- break;
-
- pdata->inmode[count] = val;
- count++;
- }
-
- count = 0;
- of_property_for_each_u32(arizona->dev->of_node, "wlf,dmic-ref", prop,
- cur, val) {
- if (count == ARRAY_SIZE(pdata->dmic_ref))
- break;
-
- pdata->dmic_ref[count] = val;
- count++;
- }
-
- count = 0;
- of_property_for_each_u32(arizona->dev->of_node, "wlf,out-mono", prop,
- cur, val) {
- if (count == ARRAY_SIZE(pdata->out_mono))
- break;
-
- pdata->out_mono[count] = !!val;
- count++;
- }
-
- count = 0;
- of_property_for_each_u32(arizona->dev->of_node,
- "wlf,max-channels-clocked",
- prop, cur, val) {
- if (count == ARRAY_SIZE(pdata->max_channels_clocked))
- break;
-
- pdata->max_channels_clocked[count] = val;
- count++;
- }
-
- ret = of_property_read_u32_array(arizona->dev->of_node,
- "wlf,spk-fmt",
- pdm_val,
- ARRAY_SIZE(pdm_val));
-
- if (ret >= 0)
- for (count = 0; count < ARRAY_SIZE(pdata->spk_fmt); ++count)
- pdata->spk_fmt[count] = pdm_val[count];
-
- ret = of_property_read_u32_array(arizona->dev->of_node,
- "wlf,spk-mute",
- pdm_val,
- ARRAY_SIZE(pdm_val));
-
- if (ret >= 0)
- for (count = 0; count < ARRAY_SIZE(pdata->spk_mute); ++count)
- pdata->spk_mute[count] = pdm_val[count];
-
return 0;
}
@@ -1026,7 +963,7 @@ int arizona_dev_init(struct arizona *arizona)
const char * const mclk_name[] = { "mclk1", "mclk2" };
struct device *dev = arizona->dev;
const char *type_name = NULL;
- unsigned int reg, val, mask;
+ unsigned int reg, val;
int (*apply_patch)(struct arizona *) = NULL;
const struct mfd_cell *subdevs = NULL;
int n_subdevs, ret, i;
@@ -1429,73 +1366,6 @@ int arizona_dev_init(struct arizona *arizona)
ARIZONA_MICB1_RATE, val);
}
- for (i = 0; i < ARIZONA_MAX_INPUT; i++) {
- /* Default for both is 0 so noop with defaults */
- val = arizona->pdata.dmic_ref[i]
- << ARIZONA_IN1_DMIC_SUP_SHIFT;
- if (arizona->pdata.inmode[i] & ARIZONA_INMODE_DMIC)
- val |= 1 << ARIZONA_IN1_MODE_SHIFT;
-
- switch (arizona->type) {
- case WM8998:
- case WM1814:
- regmap_update_bits(arizona->regmap,
- ARIZONA_ADC_DIGITAL_VOLUME_1L + (i * 8),
- ARIZONA_IN1L_SRC_SE_MASK,
- (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
- << ARIZONA_IN1L_SRC_SE_SHIFT);
-
- regmap_update_bits(arizona->regmap,
- ARIZONA_ADC_DIGITAL_VOLUME_1R + (i * 8),
- ARIZONA_IN1R_SRC_SE_MASK,
- (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
- << ARIZONA_IN1R_SRC_SE_SHIFT);
-
- mask = ARIZONA_IN1_DMIC_SUP_MASK |
- ARIZONA_IN1_MODE_MASK;
- break;
- default:
- if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
- val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
-
- mask = ARIZONA_IN1_DMIC_SUP_MASK |
- ARIZONA_IN1_MODE_MASK |
- ARIZONA_IN1_SINGLE_ENDED_MASK;
- break;
- }
-
- regmap_update_bits(arizona->regmap,
- ARIZONA_IN1L_CONTROL + (i * 8),
- mask, val);
- }
-
- for (i = 0; i < ARIZONA_MAX_OUTPUT; i++) {
- /* Default is 0 so noop with defaults */
- if (arizona->pdata.out_mono[i])
- val = ARIZONA_OUT1_MONO;
- else
- val = 0;
-
- regmap_update_bits(arizona->regmap,
- ARIZONA_OUTPUT_PATH_CONFIG_1L + (i * 8),
- ARIZONA_OUT1_MONO, val);
- }
-
- for (i = 0; i < ARIZONA_MAX_PDM_SPK; i++) {
- if (arizona->pdata.spk_mute[i])
- regmap_update_bits(arizona->regmap,
- ARIZONA_PDM_SPK1_CTRL_1 + (i * 2),
- ARIZONA_SPK1_MUTE_ENDIAN_MASK |
- ARIZONA_SPK1_MUTE_SEQ1_MASK,
- arizona->pdata.spk_mute[i]);
-
- if (arizona->pdata.spk_fmt[i])
- regmap_update_bits(arizona->regmap,
- ARIZONA_PDM_SPK1_CTRL_2 + (i * 2),
- ARIZONA_SPK1_FMT_MASK,
- arizona->pdata.spk_fmt[i]);
- }
-
pm_runtime_set_active(arizona->dev);
pm_runtime_enable(arizona->dev);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 336de66ca408..2468b431bb22 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -876,6 +876,8 @@ static struct mfd_cell axp813_cells[] = {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
+ }, {
+ .name = "axp20x-regulator",
}
};
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index b3767c3141e5..dbb85caaafed 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -84,8 +84,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
return -ENOMEM;
}
- irq_set_chained_handler(irq, mx25_tsadc_irq_handler);
- irq_set_handler_data(irq, tsadc);
+ irq_set_chained_handler_and_data(irq, mx25_tsadc_irq_handler, tsadc);
return 0;
}
@@ -180,6 +179,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
return devm_of_platform_populate(dev);
}
+static int mx25_tsadc_remove(struct platform_device *pdev)
+{
+ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq) {
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(tsadc->domain);
+ }
+
+ return 0;
+}
+
static const struct of_device_id mx25_tsadc_ids[] = {
{ .compatible = "fsl,imx25-tsadc" },
{ /* Sentinel */ }
@@ -192,6 +204,7 @@ static struct platform_driver mx25_tsadc_driver = {
.of_match_table = of_match_ptr(mx25_tsadc_ids),
},
.probe = mx25_tsadc_probe,
+ .remove = mx25_tsadc_remove,
};
module_platform_driver(mx25_tsadc_driver);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index 694116630ffa..865bbeaaf00c 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -38,12 +38,7 @@ int intel_lpss_resume(struct device *dev);
#ifdef CONFIG_PM_SLEEP
#define INTEL_LPSS_SLEEP_PM_OPS \
.prepare = intel_lpss_prepare, \
- .suspend = intel_lpss_suspend, \
- .resume = intel_lpss_resume, \
- .freeze = intel_lpss_suspend, \
- .thaw = intel_lpss_resume, \
- .poweroff = intel_lpss_suspend, \
- .restore = intel_lpss_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
#else
#define INTEL_LPSS_SLEEP_PM_OPS
#endif
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
new file mode 100644
index 000000000000..861277c6580a
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -0,0 +1,184 @@
+/*
+ * Device access for Dollar Cove TI PMIC
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
+ *
+ * Cleanup and forward-ported
+ * Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define CHTDC_TI_IRQLVL1 0x01
+#define CHTDC_TI_MASK_IRQLVL1 0x02
+
+/* Level 1 IRQs */
+enum {
+ CHTDC_TI_PWRBTN = 0, /* power button */
+ CHTDC_TI_DIETMPWARN, /* thermal */
+ CHTDC_TI_ADCCMPL, /* ADC */
+ /* No IRQ 3 */
+ CHTDC_TI_VBATLOW = 4, /* battery */
+ CHTDC_TI_VBUSDET, /* power source */
+ /* No IRQ 6 */
+ CHTDC_TI_CCEOCAL = 7, /* battery */
+};
+
+static struct resource power_button_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_PWRBTN),
+};
+
+static struct resource thermal_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_DIETMPWARN),
+};
+
+static struct resource adc_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_ADCCMPL),
+};
+
+static struct resource pwrsrc_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_VBUSDET),
+};
+
+static struct resource battery_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_VBATLOW),
+ DEFINE_RES_IRQ(CHTDC_TI_CCEOCAL),
+};
+
+static struct mfd_cell chtdc_ti_dev[] = {
+ {
+ .name = "chtdc_ti_pwrbtn",
+ .num_resources = ARRAY_SIZE(power_button_resources),
+ .resources = power_button_resources,
+ }, {
+ .name = "chtdc_ti_adc",
+ .num_resources = ARRAY_SIZE(adc_resources),
+ .resources = adc_resources,
+ }, {
+ .name = "chtdc_ti_thermal",
+ .num_resources = ARRAY_SIZE(thermal_resources),
+ .resources = thermal_resources,
+ }, {
+ .name = "chtdc_ti_pwrsrc",
+ .num_resources = ARRAY_SIZE(pwrsrc_resources),
+ .resources = pwrsrc_resources,
+ }, {
+ .name = "chtdc_ti_battery",
+ .num_resources = ARRAY_SIZE(battery_resources),
+ .resources = battery_resources,
+ },
+ { .name = "chtdc_ti_region", },
+};
+
+static const struct regmap_config chtdc_ti_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 128,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_irq chtdc_ti_irqs[] = {
+ REGMAP_IRQ_REG(CHTDC_TI_PWRBTN, 0, BIT(CHTDC_TI_PWRBTN)),
+ REGMAP_IRQ_REG(CHTDC_TI_DIETMPWARN, 0, BIT(CHTDC_TI_DIETMPWARN)),
+ REGMAP_IRQ_REG(CHTDC_TI_ADCCMPL, 0, BIT(CHTDC_TI_ADCCMPL)),
+ REGMAP_IRQ_REG(CHTDC_TI_VBATLOW, 0, BIT(CHTDC_TI_VBATLOW)),
+ REGMAP_IRQ_REG(CHTDC_TI_VBUSDET, 0, BIT(CHTDC_TI_VBUSDET)),
+ REGMAP_IRQ_REG(CHTDC_TI_CCEOCAL, 0, BIT(CHTDC_TI_CCEOCAL)),
+};
+
+static const struct regmap_irq_chip chtdc_ti_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .irqs = chtdc_ti_irqs,
+ .num_irqs = ARRAY_SIZE(chtdc_ti_irqs),
+ .num_regs = 1,
+ .status_base = CHTDC_TI_IRQLVL1,
+ .mask_base = CHTDC_TI_MASK_IRQLVL1,
+ .ack_base = CHTDC_TI_IRQLVL1,
+};
+
+static int chtdc_ti_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct intel_soc_pmic *pmic;
+ int ret;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, pmic);
+
+ pmic->regmap = devm_regmap_init_i2c(i2c, &chtdc_ti_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
+ pmic->irq = i2c->irq;
+
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
+ IRQF_ONESHOT, 0,
+ &chtdc_ti_irq_chip,
+ &pmic->irq_chip_data);
+ if (ret)
+ return ret;
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, chtdc_ti_dev,
+ ARRAY_SIZE(chtdc_ti_dev), NULL, 0,
+ regmap_irq_get_domain(pmic->irq_chip_data));
+}
+
+static void chtdc_ti_shutdown(struct i2c_client *i2c)
+{
+ struct intel_soc_pmic *pmic = i2c_get_clientdata(i2c);
+
+ disable_irq(pmic->irq);
+}
+
+static int __maybe_unused chtdc_ti_suspend(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ disable_irq(pmic->irq);
+
+ return 0;
+}
+
+static int __maybe_unused chtdc_ti_resume(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ enable_irq(pmic->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(chtdc_ti_pm_ops, chtdc_ti_suspend, chtdc_ti_resume);
+
+static const struct acpi_device_id chtdc_ti_acpi_ids[] = {
+ { "INT33F5" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, chtdc_ti_acpi_ids);
+
+static struct i2c_driver chtdc_ti_i2c_driver = {
+ .driver = {
+ .name = "intel_soc_pmic_chtdc_ti",
+ .pm = &chtdc_ti_pm_ops,
+ .acpi_match_table = chtdc_ti_acpi_ids,
+ },
+ .probe_new = chtdc_ti_probe,
+ .shutdown = chtdc_ti_shutdown,
+};
+module_i2c_driver(chtdc_ti_i2c_driver);
+
+MODULE_DESCRIPTION("I2C driver for Intel SoC Dollar Cove TI PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 450ae36645aa..cf1120abbf52 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.name = "Avoton SoC",
.iTCO_version = 3,
.gpio_version = AVOTON_GPIO,
+ .spi_type = INTEL_SPI_BYT,
},
[LPC_BAYTRAIL] = {
.name = "Bay Trail SoC",
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 662ae0d9e334..1c05ea0cba61 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -48,7 +48,10 @@ static const struct mfd_cell max77693_devs[] = {
.name = "max77693-charger",
.of_compatible = "maxim,max77693-charger",
},
- { .name = "max77693-muic", },
+ {
+ .name = "max77693-muic",
+ .of_compatible = "maxim,max77693-muic",
+ },
{
.name = "max77693-haptic",
.of_compatible = "maxim,max77693-haptic",
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index 630bd19b2c0a..98e732a7ae96 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -196,8 +196,10 @@ static int mxs_lradc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lradc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
+ if (!res) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
switch (lradc->soc) {
case IMX23_LRADC:
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 40f8bb14fc59..7fcf37ba922c 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -103,8 +103,64 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
}
+static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+ u32 lval;
+
+ if (CHK_PCI_PID(pcr, PID_524A))
+ rtsx_pci_read_config_dword(pcr,
+ PCR_ASPM_SETTING_REG1, &lval);
+ else
+ rtsx_pci_read_config_dword(pcr,
+ PCR_ASPM_SETTING_REG2, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+}
+
+static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+
+ return 0;
+}
+
static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ rts5249_init_from_cfg(pcr);
+ rts5249_init_from_hw(pcr);
+
rtsx_pci_init_cmd(pcr);
/* Rest L1SUB Config */
@@ -125,7 +181,18 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
- return rtsx_pci_send_cmd(pcr, 100);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
}
static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
@@ -285,6 +352,31 @@ static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_send_cmd(pcr, 100);
}
+static void rts5249_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr,
+ pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
static const struct pcr_ops rts5249_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
@@ -297,6 +389,7 @@ static const struct pcr_ops rts5249_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_aspm = rts5249_set_aspm,
};
/* SD Pull Control Enable:
@@ -353,6 +446,8 @@ static const u32 rts5249_ms_pull_ctl_disable_tbl[] = {
void rts5249_init_params(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &(pcr->option);
+
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->ops = &rts5249_pcr_ops;
@@ -372,6 +467,20 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = PM_CTRL3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* Init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
}
static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
@@ -459,6 +568,40 @@ static int rts524a_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
+static void rts5250_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* Run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* L1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
static const struct pcr_ops rts524a_pcr_ops = {
.write_phy = rts524a_write_phy,
.read_phy = rts524a_read_phy,
@@ -473,11 +616,16 @@ static const struct pcr_ops rts524a_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+ .set_aspm = rts5249_set_aspm,
};
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ pcr->option.ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts524a_pcr_ops;
@@ -576,11 +724,16 @@ static const struct pcr_ops rts525a_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rts525a_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+ .set_aspm = rts5249_set_aspm,
};
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ pcr->option.ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 3cf69e5c5703..590fb9aad77d 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -79,6 +79,96 @@ static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
0xFC, 0);
}
+int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+ rtsx_pci_write_register(pcr, MSGTXDATA0,
+ MASK_8_BIT_DEF, (u8) (latency & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA1,
+ MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA2,
+ MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA3,
+ MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
+ rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
+ LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
+
+ return 0;
+}
+
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+ if (pcr->ops->set_ltr_latency)
+ return pcr->ops->set_ltr_latency(pcr, latency);
+ else
+ return rtsx_comm_set_ltr_latency(pcr, latency);
+}
+
+static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ rtsx_pci_enable_aspm(pcr);
+ else
+ rtsx_pci_disable_aspm(pcr);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK;
+ u8 val = 0;
+
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->set_aspm)
+ pcr->ops->set_aspm(pcr, false);
+ else
+ rtsx_comm_set_aspm(pcr, false);
+}
+
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
+{
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
+
+ return 0;
+}
+
+void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
+{
+ if (pcr->ops->set_l1off_cfg_sub_d0)
+ pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
+}
+
+static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_disable_aspm(pcr);
+
+ if (option->ltr_enabled)
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+
+ if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+ rtsx_set_l1off_sub_cfg_d0(pcr, 1);
+}
+
+void rtsx_pm_full_on(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->full_on)
+ pcr->ops->full_on(pcr);
+ else
+ rtsx_comm_pm_full_on(pcr);
+}
+
void rtsx_pci_start_run(struct rtsx_pcr *pcr)
{
/* If pci device removed, don't queue idle work any more */
@@ -89,9 +179,7 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
pcr->state = PDEV_STAT_RUN;
if (pcr->ops->enable_auto_blink)
pcr->ops->enable_auto_blink(pcr);
-
- if (pcr->aspm_en)
- rtsx_pci_disable_aspm(pcr);
+ rtsx_pm_full_on(pcr);
}
mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
@@ -958,6 +1046,41 @@ static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
return 0;
}
+static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->set_aspm)
+ pcr->ops->set_aspm(pcr, true);
+ else
+ rtsx_comm_set_aspm(pcr, true);
+}
+
+static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ltr_enabled) {
+ u32 latency = option->ltr_l1off_latency;
+
+ if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
+ mdelay(option->l1_snooze_delay);
+
+ rtsx_set_ltr_latency(pcr, latency);
+ }
+
+ if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+ rtsx_set_l1off_sub_cfg_d0(pcr, 0);
+
+ rtsx_enable_aspm(pcr);
+}
+
+void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->power_saving)
+ pcr->ops->power_saving(pcr);
+ else
+ rtsx_comm_pm_power_saving(pcr);
+}
+
static void rtsx_pci_idle_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -974,8 +1097,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
- if (pcr->aspm_en)
- rtsx_pci_enable_aspm(pcr);
+ rtsx_pm_power_saving(pcr);
mutex_unlock(&pcr->pcr_mutex);
}
@@ -1063,6 +1185,16 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
if (err < 0)
return err;
+ switch (PCI_PID(pcr)) {
+ case PID_5250:
+ case PID_524A:
+ case PID_525A:
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
+ break;
+ default:
+ break;
+ }
+
/* Enable clk_request_n to enable clock power management */
rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
/* Enter L1 when host tx idle */
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 931d1ae3ce32..ec784e04fe20 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -32,6 +32,18 @@
#define RTS524A_PME_FORCE_CTL 0xFF78
#define RTS524A_PM_CTRL3 0xFF7E
+#define LTR_ACTIVE_LATENCY_DEF 0x883C
+#define LTR_IDLE_LATENCY_DEF 0x892C
+#define LTR_L1OFF_LATENCY_DEF 0x9003
+#define L1_SNOOZE_DELAY_DEF 1
+#define LTR_L1OFF_SSPWRGATE_5249_DEF 0xAF
+#define LTR_L1OFF_SSPWRGATE_5250_DEF 0xFF
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF 0xAC
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF 0xF8
+#define CMD_TIMEOUT_DEF 100
+#define ASPM_MASK_NEG 0xFC
+#define MASK_8_BIT_DEF 0xFF
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -85,5 +97,7 @@ do { \
/* generic operations */
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
#endif
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 40534352e574..ad774161a22d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/slab.h>
#include <linux/sm501.h>
@@ -1107,14 +1108,6 @@ static void sm501_gpio_remove(struct sm501_devdata *sm)
kfree(gpio->regs_res);
}
-static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
-{
- struct sm501_gpio *gpio = &sm->gpio;
- int base = (pin < 32) ? gpio->low.gpio.base : gpio->high.gpio.base;
-
- return (pin % 32) + base;
-}
-
static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
{
return sm->gpio.registered;
@@ -1129,11 +1122,6 @@ static inline void sm501_gpio_remove(struct sm501_devdata *sm)
{
}
-static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
-{
- return -1;
-}
-
static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
{
return 0;
@@ -1145,20 +1133,37 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
{
struct i2c_gpio_platform_data *icd;
struct platform_device *pdev;
+ struct gpiod_lookup_table *lookup;
pdev = sm501_create_subdev(sm, "i2c-gpio", 0,
sizeof(struct i2c_gpio_platform_data));
if (!pdev)
return -ENOMEM;
- icd = dev_get_platdata(&pdev->dev);
-
- /* We keep the pin_sda and pin_scl fields relative in case the
- * same platform data is passed to >1 SM501.
- */
+ /* Create a gpiod lookup using gpiochip-local offsets */
+ lookup = devm_kzalloc(&pdev->dev,
+ sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
+ GFP_KERNEL);
+ lookup->dev_id = "i2c-gpio";
+ if (iic->pin_sda < 32)
+ lookup->table[0].chip_label = "SM501-LOW";
+ else
+ lookup->table[0].chip_label = "SM501-HIGH";
+ lookup->table[0].chip_hwnum = iic->pin_sda % 32;
+ lookup->table[0].con_id = NULL;
+ lookup->table[0].idx = 0;
+ lookup->table[0].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
+ if (iic->pin_scl < 32)
+ lookup->table[1].chip_label = "SM501-LOW";
+ else
+ lookup->table[1].chip_label = "SM501-HIGH";
+ lookup->table[1].chip_hwnum = iic->pin_scl % 32;
+ lookup->table[1].con_id = NULL;
+ lookup->table[1].idx = 1;
+ lookup->table[1].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
+ gpiod_add_lookup_table(lookup);
- icd->sda_pin = sm501_gpio_pin2nr(sm, iic->pin_sda);
- icd->scl_pin = sm501_gpio_pin2nr(sm, iic->pin_scl);
+ icd = dev_get_platdata(&pdev->dev);
icd->timeout = iic->timeout;
icd->udelay = iic->udelay;
@@ -1170,9 +1175,9 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
pdev->id = iic->bus_num;
- dev_info(sm->dev, "registering i2c-%d: sda=%d (%d), scl=%d (%d)\n",
+ dev_info(sm->dev, "registering i2c-%d: sda=%d, scl=%d\n",
iic->bus_num,
- icd->sda_pin, iic->pin_sda, icd->scl_pin, iic->pin_scl);
+ iic->pin_sda, iic->pin_scl);
return sm501_register_device(sm, pdev);
}
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
new file mode 100644
index 000000000000..56a4782f0569
--- /dev/null
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#define SPRD_PMIC_INT_MASK_STATUS 0x0
+#define SPRD_PMIC_INT_RAW_STATUS 0x4
+#define SPRD_PMIC_INT_EN 0x8
+
+#define SPRD_SC2731_IRQ_BASE 0x140
+#define SPRD_SC2731_IRQ_NUMS 16
+
+struct sprd_pmic {
+ struct regmap *regmap;
+ struct device *dev;
+ struct regmap_irq *irqs;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+ int irq;
+};
+
+struct sprd_pmic_data {
+ u32 irq_base;
+ u32 num_irqs;
+};
+
+/*
+ * Since different PMICs of SC27xx series can have different interrupt
+ * base address and irq number, we should save irq number and irq base
+ * in the device data structure.
+ */
+static const struct sprd_pmic_data sc2731_data = {
+ .irq_base = SPRD_SC2731_IRQ_BASE,
+ .num_irqs = SPRD_SC2731_IRQ_NUMS,
+};
+
+static const struct mfd_cell sprd_pmic_devs[] = {
+ {
+ .name = "sc27xx-wdt",
+ .of_compatible = "sprd,sc27xx-wdt",
+ }, {
+ .name = "sc27xx-rtc",
+ .of_compatible = "sprd,sc27xx-rtc",
+ }, {
+ .name = "sc27xx-charger",
+ .of_compatible = "sprd,sc27xx-charger",
+ }, {
+ .name = "sc27xx-chg-timer",
+ .of_compatible = "sprd,sc27xx-chg-timer",
+ }, {
+ .name = "sc27xx-fast-chg",
+ .of_compatible = "sprd,sc27xx-fast-chg",
+ }, {
+ .name = "sc27xx-chg-wdt",
+ .of_compatible = "sprd,sc27xx-chg-wdt",
+ }, {
+ .name = "sc27xx-typec",
+ .of_compatible = "sprd,sc27xx-typec",
+ }, {
+ .name = "sc27xx-flash",
+ .of_compatible = "sprd,sc27xx-flash",
+ }, {
+ .name = "sc27xx-eic",
+ .of_compatible = "sprd,sc27xx-eic",
+ }, {
+ .name = "sc27xx-efuse",
+ .of_compatible = "sprd,sc27xx-efuse",
+ }, {
+ .name = "sc27xx-thermal",
+ .of_compatible = "sprd,sc27xx-thermal",
+ }, {
+ .name = "sc27xx-adc",
+ .of_compatible = "sprd,sc27xx-adc",
+ }, {
+ .name = "sc27xx-audio-codec",
+ .of_compatible = "sprd,sc27xx-audio-codec",
+ }, {
+ .name = "sc27xx-regulator",
+ .of_compatible = "sprd,sc27xx-regulator",
+ }, {
+ .name = "sc27xx-vibrator",
+ .of_compatible = "sprd,sc27xx-vibrator",
+ }, {
+ .name = "sc27xx-keypad-led",
+ .of_compatible = "sprd,sc27xx-keypad-led",
+ }, {
+ .name = "sc27xx-bltc",
+ .of_compatible = "sprd,sc27xx-bltc",
+ }, {
+ .name = "sc27xx-fgu",
+ .of_compatible = "sprd,sc27xx-fgu",
+ }, {
+ .name = "sc27xx-7sreset",
+ .of_compatible = "sprd,sc27xx-7sreset",
+ }, {
+ .name = "sc27xx-poweroff",
+ .of_compatible = "sprd,sc27xx-poweroff",
+ },
+};
+
+static int sprd_pmic_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static int sprd_pmic_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u32 rx_buf[2] = { 0 };
+ int ret;
+
+ /* Now we only support one PMIC register to read every time. */
+ if (reg_size != sizeof(u32) || val_size != sizeof(u32))
+ return -EINVAL;
+
+ /* Copy address to read from into first element of SPI buffer. */
+ memcpy(rx_buf, reg, sizeof(u32));
+ ret = spi_read(spi, rx_buf, 1);
+ if (ret < 0)
+ return ret;
+
+ memcpy(val, rx_buf, val_size);
+ return 0;
+}
+
+static struct regmap_bus sprd_pmic_regmap = {
+ .write = sprd_pmic_spi_write,
+ .read = sprd_pmic_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static const struct regmap_config sprd_pmic_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xffff,
+};
+
+static int sprd_pmic_probe(struct spi_device *spi)
+{
+ struct sprd_pmic *ddata;
+ const struct sprd_pmic_data *pdata;
+ int ret, i;
+
+ pdata = of_device_get_match_data(&spi->dev);
+ if (!pdata) {
+ dev_err(&spi->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->regmap = devm_regmap_init(&spi->dev, &sprd_pmic_regmap,
+ &spi->dev, &sprd_pmic_config);
+ if (IS_ERR(ddata->regmap)) {
+ ret = PTR_ERR(ddata->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map %d\n", ret);
+ return ret;
+ }
+
+ spi_set_drvdata(spi, ddata);
+ ddata->dev = &spi->dev;
+ ddata->irq = spi->irq;
+
+ ddata->irq_chip.name = dev_name(&spi->dev);
+ ddata->irq_chip.status_base =
+ pdata->irq_base + SPRD_PMIC_INT_MASK_STATUS;
+ ddata->irq_chip.mask_base = pdata->irq_base + SPRD_PMIC_INT_EN;
+ ddata->irq_chip.ack_base = 0;
+ ddata->irq_chip.num_regs = 1;
+ ddata->irq_chip.num_irqs = pdata->num_irqs;
+ ddata->irq_chip.mask_invert = true;
+
+ ddata->irqs = devm_kzalloc(&spi->dev, sizeof(struct regmap_irq) *
+ pdata->num_irqs, GFP_KERNEL);
+ if (!ddata->irqs)
+ return -ENOMEM;
+
+ ddata->irq_chip.irqs = ddata->irqs;
+ for (i = 0; i < pdata->num_irqs; i++) {
+ ddata->irqs[i].reg_offset = i / pdata->num_irqs;
+ ddata->irqs[i].mask = BIT(i % pdata->num_irqs);
+ }
+
+ ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
+ &ddata->irq_chip, &ddata->irq_data);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(&spi->dev, PLATFORM_DEVID_AUTO,
+ sprd_pmic_devs, ARRAY_SIZE(sprd_pmic_devs),
+ NULL, 0,
+ regmap_irq_get_domain(ddata->irq_data));
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register device %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_pmic_match[] = {
+ { .compatible = "sprd,sc2731", .data = &sc2731_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sprd_pmic_match);
+
+static struct spi_driver sprd_pmic_driver = {
+ .driver = {
+ .name = "sc27xx-pmic",
+ .bus = &spi_bus_type,
+ .of_match_table = sprd_pmic_match,
+ },
+ .probe = sprd_pmic_probe,
+};
+
+static int __init sprd_pmic_init(void)
+{
+ return spi_register_driver(&sprd_pmic_driver);
+}
+subsys_initcall(sprd_pmic_init);
+
+static void __exit sprd_pmic_exit(void)
+{
+ spi_unregister_driver(&sprd_pmic_driver);
+}
+module_exit(sprd_pmic_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum SC27xx PMICs driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 27986f641f7d..36b96fee4ce6 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -314,7 +314,7 @@ static int ssbi_probe(struct platform_device *pdev)
spin_lock_init(&ssbi->lock);
- return of_platform_populate(np, NULL, NULL, &pdev->dev);
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id ssbi_match_table[] = {
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index ab949eaca6ad..3cc80956260e 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -72,10 +72,12 @@ static int stw481x_get_pctl_reg(struct stw481x *stw481x, u8 reg)
static int stw481x_startup(struct stw481x *stw481x)
{
/* Voltages multiplied by 100 */
- u8 vcore_val[] = { 100, 105, 110, 115, 120, 122, 124, 126, 128,
- 130, 132, 134, 136, 138, 140, 145 };
- u8 vpll_val[] = { 105, 120, 130, 180 };
- u8 vaux_val[] = { 15, 18, 25, 28 };
+ static const u8 vcore_val[] = {
+ 100, 105, 110, 115, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 138, 140, 145
+ };
+ static const u8 vpll_val[] = { 105, 120, 130, 180 };
+ static const u8 vaux_val[] = { 15, 18, 25, 28 };
u8 vcore;
u8 vcore_slp;
u8 vpll;
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f769c7d4e335..7566ce4457a0 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -311,37 +311,20 @@ static const struct regmap_config tps65217_regmap_config = {
};
static const struct of_device_id tps65217_of_match[] = {
- { .compatible = "ti,tps65217", .data = (void *)TPS65217 },
+ { .compatible = "ti,tps65217"},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, tps65217_of_match);
-static int tps65217_probe(struct i2c_client *client,
- const struct i2c_device_id *ids)
+static int tps65217_probe(struct i2c_client *client)
{
struct tps65217 *tps;
unsigned int version;
- unsigned long chip_id = ids->driver_data;
- const struct of_device_id *match;
bool status_off = false;
int ret;
- if (client->dev.of_node) {
- match = of_match_device(tps65217_of_match, &client->dev);
- if (!match) {
- dev_err(&client->dev,
- "Failed to find matching dt id\n");
- return -EINVAL;
- }
- chip_id = (unsigned long)match->data;
- status_off = of_property_read_bool(client->dev.of_node,
- "ti,pmic-shutdown-controller");
- }
-
- if (!chip_id) {
- dev_err(&client->dev, "id is null.\n");
- return -ENODEV;
- }
+ status_off = of_property_read_bool(client->dev.of_node,
+ "ti,pmic-shutdown-controller");
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -349,7 +332,6 @@ static int tps65217_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps);
tps->dev = &client->dev;
- tps->id = chip_id;
tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
if (IS_ERR(tps->regmap)) {
@@ -430,7 +412,7 @@ static struct i2c_driver tps65217_driver = {
.of_match_table = tps65217_of_match,
},
.id_table = tps65217_id_table,
- .probe = tps65217_probe,
+ .probe_new = tps65217_probe,
.remove = tps65217_remove,
};
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 13834a0d2817..910f569ff77c 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -215,17 +215,9 @@ static int tps65218_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct tps65218 *tps;
- const struct of_device_id *match;
int ret;
unsigned int chipid;
- match = of_match_device(of_tps65218_match_table, &client->dev);
- if (!match) {
- dev_err(&client->dev,
- "Failed to find matching dt id\n");
- return -EINVAL;
- }
-
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
diff --git a/drivers/mfd/wm97xx-core.c b/drivers/mfd/wm97xx-core.c
new file mode 100644
index 000000000000..4141ee52a70b
--- /dev/null
+++ b/drivers/mfd/wm97xx-core.c
@@ -0,0 +1,366 @@
+/*
+ * Wolfson WM97xx -- Core device
+ *
+ * Copyright (C) 2017 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Features:
+ * - an AC97 audio codec
+ * - a touchscreen driver
+ * - a GPIO block
+ */
+
+#include <linux/device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/wm97xx.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/wm97xx.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/compat.h>
+
+#define WM9705_VENDOR_ID 0x574d4c05
+#define WM9712_VENDOR_ID 0x574d4c12
+#define WM9713_VENDOR_ID 0x574d4c13
+#define WM97xx_VENDOR_ID_MASK 0xffffffff
+
+struct wm97xx_priv {
+ struct regmap *regmap;
+ struct snd_ac97 *ac97;
+ struct device *dev;
+ struct wm97xx_platform_data codec_pdata;
+};
+
+static bool wm97xx_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_RESET ... AC97_PCM_SURR_DAC_RATE:
+ case AC97_PCM_LR_ADC_RATE:
+ case AC97_CENTER_LFE_MASTER:
+ case AC97_SPDIF ... AC97_LINE1_LEVEL:
+ case AC97_GPIO_CFG ... 0x5c:
+ case AC97_CODEC_CLASS_REV ... AC97_PCI_SID:
+ case 0x74 ... AC97_VENDOR_ID2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm97xx_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_VENDOR_ID1:
+ case AC97_VENDOR_ID2:
+ return false;
+ default:
+ return wm97xx_readable_reg(dev, reg);
+ }
+}
+
+static const struct reg_default wm9705_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x0a, 0x8000 },
+ { 0x0c, 0x8008 },
+ { 0x0e, 0x8008 },
+ { 0x10, 0x8808 },
+ { 0x12, 0x8808 },
+ { 0x14, 0x8808 },
+ { 0x16, 0x8808 },
+ { 0x18, 0x8808 },
+ { 0x1a, 0x0000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x22, 0x0000 },
+ { 0x26, 0x000f },
+ { 0x28, 0x0605 },
+ { 0x2a, 0x0000 },
+ { 0x2c, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x34, 0x2000 },
+ { 0x5a, 0x0000 },
+ { 0x5c, 0x0000 },
+ { 0x72, 0x0808 },
+ { 0x74, 0x0000 },
+ { 0x76, 0x0006 },
+ { 0x78, 0x0000 },
+ { 0x7a, 0x0000 },
+};
+
+static const struct regmap_config wm9705_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm9705_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9705_reg_defaults),
+ .volatile_reg = regmap_ac97_default_volatile,
+ .readable_reg = wm97xx_readable_reg,
+ .writeable_reg = wm97xx_writeable_reg,
+};
+
+static struct mfd_cell wm9705_cells[] = {
+ { .name = "wm9705-codec", },
+ { .name = "wm97xx-ts", },
+};
+
+static bool wm9712_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_REC_GAIN:
+ return true;
+ default:
+ return regmap_ac97_default_volatile(dev, reg);
+ }
+}
+
+static const struct reg_default wm9712_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x08, 0x0f0f },
+ { 0x0a, 0xaaa0 },
+ { 0x0c, 0xc008 },
+ { 0x0e, 0x6808 },
+ { 0x10, 0xe808 },
+ { 0x12, 0xaaa0 },
+ { 0x14, 0xad00 },
+ { 0x16, 0x8000 },
+ { 0x18, 0xe808 },
+ { 0x1a, 0x3000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x22, 0x0000 },
+ { 0x26, 0x000f },
+ { 0x28, 0x0605 },
+ { 0x2a, 0x0410 },
+ { 0x2c, 0xbb80 },
+ { 0x2e, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x34, 0x2000 },
+ { 0x4c, 0xf83e },
+ { 0x4e, 0xffff },
+ { 0x50, 0x0000 },
+ { 0x52, 0x0000 },
+ { 0x56, 0xf83e },
+ { 0x58, 0x0008 },
+ { 0x5c, 0x0000 },
+ { 0x60, 0xb032 },
+ { 0x62, 0x3e00 },
+ { 0x64, 0x0000 },
+ { 0x76, 0x0006 },
+ { 0x78, 0x0001 },
+ { 0x7a, 0x0000 },
+};
+
+static const struct regmap_config wm9712_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm9712_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9712_reg_defaults),
+ .volatile_reg = wm9712_volatile_reg,
+ .readable_reg = wm97xx_readable_reg,
+ .writeable_reg = wm97xx_writeable_reg,
+};
+
+static struct mfd_cell wm9712_cells[] = {
+ { .name = "wm9712-codec", },
+ { .name = "wm97xx-ts", },
+};
+
+static const struct reg_default wm9713_reg_defaults[] = {
+ { 0x02, 0x8080 }, /* Speaker Output Volume */
+ { 0x04, 0x8080 }, /* Headphone Output Volume */
+ { 0x06, 0x8080 }, /* Out3/OUT4 Volume */
+ { 0x08, 0xc880 }, /* Mono Volume */
+ { 0x0a, 0xe808 }, /* LINEIN Volume */
+ { 0x0c, 0xe808 }, /* DAC PGA Volume */
+ { 0x0e, 0x0808 }, /* MIC PGA Volume */
+ { 0x10, 0x00da }, /* MIC Routing Control */
+ { 0x12, 0x8000 }, /* Record PGA Volume */
+ { 0x14, 0xd600 }, /* Record Routing */
+ { 0x16, 0xaaa0 }, /* PCBEEP Volume */
+ { 0x18, 0xaaa0 }, /* VxDAC Volume */
+ { 0x1a, 0xaaa0 }, /* AUXDAC Volume */
+ { 0x1c, 0x0000 }, /* Output PGA Mux */
+ { 0x1e, 0x0000 }, /* DAC 3D control */
+ { 0x20, 0x0f0f }, /* DAC Tone Control*/
+ { 0x22, 0x0040 }, /* MIC Input Select & Bias */
+ { 0x24, 0x0000 }, /* Output Volume Mapping & Jack */
+ { 0x26, 0x7f00 }, /* Powerdown Ctrl/Stat*/
+ { 0x28, 0x0405 }, /* Extended Audio ID */
+ { 0x2a, 0x0410 }, /* Extended Audio Start/Ctrl */
+ { 0x2c, 0xbb80 }, /* Audio DACs Sample Rate */
+ { 0x2e, 0xbb80 }, /* AUXDAC Sample Rate */
+ { 0x32, 0xbb80 }, /* Audio ADCs Sample Rate */
+ { 0x36, 0x4523 }, /* PCM codec control */
+ { 0x3a, 0x2000 }, /* SPDIF control */
+ { 0x3c, 0xfdff }, /* Powerdown 1 */
+ { 0x3e, 0xffff }, /* Powerdown 2 */
+ { 0x40, 0x0000 }, /* General Purpose */
+ { 0x42, 0x0000 }, /* Fast Power-Up Control */
+ { 0x44, 0x0080 }, /* MCLK/PLL Control */
+ { 0x46, 0x0000 }, /* MCLK/PLL Control */
+
+ { 0x4c, 0xfffe }, /* GPIO Pin Configuration */
+ { 0x4e, 0xffff }, /* GPIO Pin Polarity / Type */
+ { 0x50, 0x0000 }, /* GPIO Pin Sticky */
+ { 0x52, 0x0000 }, /* GPIO Pin Wake-Up */
+ /* GPIO Pin Status */
+ { 0x56, 0xfffe }, /* GPIO Pin Sharing */
+ { 0x58, 0x4000 }, /* GPIO PullUp/PullDown */
+ { 0x5a, 0x0000 }, /* Additional Functions 1 */
+ { 0x5c, 0x0000 }, /* Additional Functions 2 */
+ { 0x60, 0xb032 }, /* ALC Control */
+ { 0x62, 0x3e00 }, /* ALC / Noise Gate Control */
+ { 0x64, 0x0000 }, /* AUXDAC input control */
+ { 0x74, 0x0000 }, /* Digitiser Reg 1 */
+ { 0x76, 0x0006 }, /* Digitiser Reg 2 */
+ { 0x78, 0x0001 }, /* Digitiser Reg 3 */
+ { 0x7a, 0x0000 }, /* Digitiser Read Back */
+};
+
+static const struct regmap_config wm9713_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm9713_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9713_reg_defaults),
+ .volatile_reg = regmap_ac97_default_volatile,
+ .readable_reg = wm97xx_readable_reg,
+ .writeable_reg = wm97xx_writeable_reg,
+};
+
+static struct mfd_cell wm9713_cells[] = {
+ { .name = "wm9713-codec", },
+ { .name = "wm97xx-ts", },
+};
+
+static int wm97xx_ac97_probe(struct ac97_codec_device *adev)
+{
+ struct wm97xx_priv *wm97xx;
+ const struct regmap_config *config;
+ struct wm97xx_platform_data *codec_pdata;
+ struct mfd_cell *cells;
+ int ret = -ENODEV, nb_cells, i;
+ struct wm97xx_pdata *pdata = snd_ac97_codec_get_platdata(adev);
+
+ wm97xx = devm_kzalloc(ac97_codec_dev2dev(adev),
+ sizeof(*wm97xx), GFP_KERNEL);
+ if (!wm97xx)
+ return -ENOMEM;
+
+ wm97xx->dev = ac97_codec_dev2dev(adev);
+ wm97xx->ac97 = snd_ac97_compat_alloc(adev);
+ if (IS_ERR(wm97xx->ac97))
+ return PTR_ERR(wm97xx->ac97);
+
+
+ ac97_set_drvdata(adev, wm97xx);
+ dev_info(wm97xx->dev, "wm97xx core found, id=0x%x\n",
+ adev->vendor_id);
+
+ codec_pdata = &wm97xx->codec_pdata;
+ codec_pdata->ac97 = wm97xx->ac97;
+ codec_pdata->batt_pdata = pdata->batt_pdata;
+
+ switch (adev->vendor_id) {
+ case WM9705_VENDOR_ID:
+ config = &wm9705_regmap_config;
+ cells = wm9705_cells;
+ nb_cells = ARRAY_SIZE(wm9705_cells);
+ break;
+ case WM9712_VENDOR_ID:
+ config = &wm9712_regmap_config;
+ cells = wm9712_cells;
+ nb_cells = ARRAY_SIZE(wm9712_cells);
+ break;
+ case WM9713_VENDOR_ID:
+ config = &wm9713_regmap_config;
+ cells = wm9713_cells;
+ nb_cells = ARRAY_SIZE(wm9713_cells);
+ break;
+ default:
+ goto err_free_compat;
+ }
+
+ for (i = 0; i < nb_cells; i++) {
+ cells[i].platform_data = codec_pdata;
+ cells[i].pdata_size = sizeof(*codec_pdata);
+ }
+
+ codec_pdata->regmap = devm_regmap_init_ac97(wm97xx->ac97, config);
+ if (IS_ERR(codec_pdata->regmap)) {
+ ret = PTR_ERR(codec_pdata->regmap);
+ goto err_free_compat;
+ }
+
+ ret = devm_mfd_add_devices(wm97xx->dev, PLATFORM_DEVID_NONE,
+ cells, nb_cells, NULL, 0, NULL);
+ if (ret)
+ goto err_free_compat;
+
+ return ret;
+
+err_free_compat:
+ snd_ac97_compat_release(wm97xx->ac97);
+ return ret;
+}
+
+static int wm97xx_ac97_remove(struct ac97_codec_device *adev)
+{
+ struct wm97xx_priv *wm97xx = ac97_get_drvdata(adev);
+
+ snd_ac97_compat_release(wm97xx->ac97);
+
+ return 0;
+}
+
+static const struct ac97_id wm97xx_ac97_ids[] = {
+ { .id = WM9705_VENDOR_ID, .mask = WM97xx_VENDOR_ID_MASK },
+ { .id = WM9712_VENDOR_ID, .mask = WM97xx_VENDOR_ID_MASK },
+ { .id = WM9713_VENDOR_ID, .mask = WM97xx_VENDOR_ID_MASK },
+ { }
+};
+
+static struct ac97_codec_driver wm97xx_ac97_driver = {
+ .driver = {
+ .name = "wm97xx-core",
+ },
+ .probe = wm97xx_ac97_probe,
+ .remove = wm97xx_ac97_remove,
+ .id_table = wm97xx_ac97_ids,
+};
+
+static int __init wm97xx_module_init(void)
+{
+ return snd_ac97_codec_driver_register(&wm97xx_ac97_driver);
+}
+module_init(wm97xx_module_init);
+
+static void __exit wm97xx_module_exit(void)
+{
+ snd_ac97_codec_driver_unregister(&wm97xx_ac97_driver);
+}
+module_exit(wm97xx_module_exit);
+
+MODULE_DESCRIPTION("WM9712, WM9713 core driver");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8136dc7e863d..f1a5c2357b14 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -393,16 +393,6 @@ config SPEAR13XX_PCIE_GADGET
entry will be created for that controller. User can use these
sysfs node to configure PCIe EP as per his requirements.
-config TI_DAC7512
- tristate "Texas Instruments DAC7512"
- depends on SPI && SYSFS
- help
- If you say yes here you get support for the Texas Instruments
- DAC7512 16-bit digital-to-analog converter.
-
- This driver can also be built as a module. If so, the module
- will be called ti_dac7512.
-
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index ad0e64fdba34..5ca5f64df478 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_DS1682) += ds1682.o
-obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
diff --git a/drivers/misc/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig
index 7f01d8e93992..8a828fe41fad 100644
--- a/drivers/misc/altera-stapl/Kconfig
+++ b/drivers/misc/altera-stapl/Kconfig
@@ -1,4 +1,5 @@
-comment "Altera FPGA firmware download module"
+comment "Altera FPGA firmware download module (requires I2C)"
+ depends on !I2C
config ALTERA_STAPL
tristate "Altera FPGA firmware download module"
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 1922cb8f6b88..1c5b7aec13d4 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -15,7 +15,6 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
@@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name,
return ERR_PTR(-EINVAL);
c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
- kmemcheck_annotate_bitfield(c2dev, flags);
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 764ff5df0dbc..e0b4b36ef010 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -24,6 +25,7 @@
#include <linux/i2c.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_data/at24.h>
+#include <linux/pm_runtime.h>
/*
* I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
@@ -175,6 +177,64 @@ static const struct i2c_device_id at24_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, at24_ids);
+static const struct of_device_id at24_of_match[] = {
+ {
+ .compatible = "atmel,24c00",
+ .data = (void *)AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR)
+ },
+ {
+ .compatible = "atmel,24c01",
+ .data = (void *)AT24_DEVICE_MAGIC(1024 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c02",
+ .data = (void *)AT24_DEVICE_MAGIC(2048 / 8, 0)
+ },
+ {
+ .compatible = "atmel,spd",
+ .data = (void *)AT24_DEVICE_MAGIC(2048 / 8,
+ AT24_FLAG_READONLY | AT24_FLAG_IRUGO)
+ },
+ {
+ .compatible = "atmel,24c04",
+ .data = (void *)AT24_DEVICE_MAGIC(4096 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c08",
+ .data = (void *)AT24_DEVICE_MAGIC(8192 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c16",
+ .data = (void *)AT24_DEVICE_MAGIC(16384 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c32",
+ .data = (void *)AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c64",
+ .data = (void *)AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c128",
+ .data = (void *)AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c256",
+ .data = (void *)AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c512",
+ .data = (void *)AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c1024",
+ .data = (void *)AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16)
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, at24_of_match);
+
static const struct acpi_device_id at24_acpi_ids[] = {
{ "INT3499", AT24_DEVICE_MAGIC(8192 / 8, 0) },
{ }
@@ -501,11 +561,21 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
+ struct i2c_client *client;
char *buf = val;
+ int ret;
if (unlikely(!count))
return count;
+ client = at24_translate_offset(at24, &off);
+
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ return ret;
+ }
+
/*
* Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -518,6 +588,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
status = at24->read_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
return status;
}
buf += status;
@@ -527,17 +598,29 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
+
return 0;
}
static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
+ struct i2c_client *client;
char *buf = val;
+ int ret;
if (unlikely(!count))
return -EINVAL;
+ client = at24_translate_offset(at24, &off);
+
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ return ret;
+ }
+
/*
* Write data to chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -550,6 +633,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
status = at24->write_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
return status;
}
buf += status;
@@ -559,6 +643,8 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
+
return 0;
}
@@ -570,6 +656,10 @@ static void at24_get_pdata(struct device *dev, struct at24_platform_data *chip)
if (device_property_present(dev, "read-only"))
chip->flags |= AT24_FLAG_READONLY;
+ err = device_property_read_u32(dev, "size", &val);
+ if (!err)
+ chip->byte_len = val;
+
err = device_property_read_u32(dev, "pagesize", &val);
if (!err) {
chip->page_size = val;
@@ -598,7 +688,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
} else {
- if (id) {
+ /*
+ * The I2C core allows OF nodes compatibles to match against the
+ * I2C device ID table as a fallback, so check not only if an OF
+ * node is present but also if it matches an OF device ID entry.
+ */
+ if (client->dev.of_node &&
+ of_match_device(at24_of_match, &client->dev)) {
+ magic = (kernel_ulong_t)
+ of_device_get_match_data(&client->dev);
+ } else if (id) {
magic = id->driver_data;
} else {
const struct acpi_device_id *aid;
@@ -739,11 +838,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, at24);
+ /* enable runtime pm */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
/*
* Perform a one-byte test read to verify that the
* chip is functional.
*/
err = at24_read(at24, 0, &test_byte, 1);
+ pm_runtime_idle(&client->dev);
if (err) {
err = -ENODEV;
goto err_clients;
@@ -791,6 +895,8 @@ err_clients:
if (at24->client[i])
i2c_unregister_device(at24->client[i]);
+ pm_runtime_disable(&client->dev);
+
return err;
}
@@ -806,6 +912,9 @@ static int at24_remove(struct i2c_client *client)
for (i = 1; i < at24->num_addresses; i++)
i2c_unregister_device(at24->client[i]);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
return 0;
}
@@ -814,6 +923,7 @@ static int at24_remove(struct i2c_client *client)
static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
+ .of_match_table = at24_of_match,
.acpi_match_table = ACPI_PTR(at24_acpi_ids),
},
.probe = at24_probe,
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 5813b5f25006..3743c87f8ab9 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -182,6 +182,7 @@ struct dma_mapping {
struct list_head card_list; /* list of usr_maps for card */
struct list_head pin_list; /* list of pinned memory for dev */
+ int write; /* writable map? useful in unmapping */
};
static inline void genwqe_mapping_init(struct dma_mapping *m,
@@ -189,6 +190,7 @@ static inline void genwqe_mapping_init(struct dma_mapping *m,
{
memset(m, 0, sizeof(*m));
m->type = type;
+ m->write = 1; /* Assume the maps we create are R/W */
}
/**
@@ -347,6 +349,7 @@ enum genwqe_requ_state {
* @user_size: size of user-space memory area
* @page: buffer for partial pages if needed
* @page_dma_addr: dma address partial pages
+ * @write: should we write it back to userspace?
*/
struct genwqe_sgl {
dma_addr_t sgl_dma_addr;
@@ -356,6 +359,8 @@ struct genwqe_sgl {
void __user *user_addr; /* user-space base-address */
size_t user_size; /* size of memory area */
+ int write;
+
unsigned long nr_pages;
unsigned long fpage_offs;
size_t fpage_size;
@@ -369,7 +374,7 @@ struct genwqe_sgl {
};
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size);
+ void __user *user_addr, size_t user_size, int write);
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index dd4617764f14..3ecfa35457e0 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -942,6 +942,10 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
+
+ if (ats_flags == ATS_TYPE_SGL_RD)
+ m->write = 0;
+
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size, req);
if (rc != 0)
@@ -954,7 +958,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/* create genwqe style scatter gather list */
rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
(void __user *)u_addr,
- u_size);
+ u_size, m->write);
if (rc != 0)
goto err_out;
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 147b83011b58..5c0d917636f7 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -296,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size)
+ void __user *user_addr, size_t user_size, int write)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
@@ -312,6 +312,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->user_addr = user_addr;
sgl->user_size = user_size;
+ sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
@@ -476,14 +477,20 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
int rc = 0;
+ size_t offset;
+ unsigned long res;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
- if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
- sgl->fpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ res = copy_to_user(sgl->user_addr,
+ sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying fpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
@@ -491,12 +498,16 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
- if (copy_to_user(sgl->user_addr + sgl->user_size -
- sgl->lpage_size, sgl->lpage,
- sgl->lpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ offset = sgl->user_size - sgl->lpage_size;
+ res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
+ sgl->lpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying lpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
@@ -599,14 +610,14 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
- 1, /* write by caller */
+ m->write, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- free_user_pages(m->page_list, rc, 0);
+ free_user_pages(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
}
@@ -618,7 +629,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
- free_user_pages(m->page_list, m->nr_pages, 0);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
kfree(m->page_list);
@@ -651,7 +662,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- free_user_pages(m->page_list, m->nr_pages, 1);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
kfree(m->page_list);
m->page_list = NULL;
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 8e540f4e9d52..7e33025b4854 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -155,7 +155,7 @@ int ibmasm_event_buffer_init(struct service_processor *sp)
buffer = kmalloc(sizeof(struct event_buffer), GFP_KERNEL);
if (!buffer)
- return 1;
+ return -ENOMEM;
buffer->next_index = 0;
buffer->next_serial_number = 1;
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index c5a456b0a564..e914b8c80943 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -94,12 +94,14 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number);
snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number);
- if (ibmasm_event_buffer_init(sp)) {
+ result = ibmasm_event_buffer_init(sp);
+ if (result) {
dev_err(sp->dev, "Failed to allocate event buffer\n");
goto error_eventbuffer;
}
- if (ibmasm_heartbeat_init(sp)) {
+ result = ibmasm_heartbeat_init(sp);
+ if (result) {
dev_err(sp->dev, "Failed to allocate heartbeat command\n");
goto error_heartbeat;
}
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index fc7efedbc4be..24108bfad889 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1132,7 +1132,8 @@ static void kgdbts_put_char(u8 chr)
ts.run_test(0, chr);
}
-static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
+static int param_set_kgdbts_var(const char *kmessage,
+ const struct kernel_param *kp)
{
int len = strlen(kmessage);
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 981b3ef71e47..ba92291508dc 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -56,122 +56,54 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off);
#ifdef CONFIG_KPROBES
-static void lkdtm_handler(void);
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off);
-
-
-/* jprobe entry point handlers. */
-static unsigned int jp_do_irq(unsigned int irq)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static irqreturn_t jp_handle_irq_event(unsigned int irq,
- struct irqaction *action)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static void jp_tasklet_action(struct softirq_action *a)
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-struct scan_control;
-
-static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
- struct zone *zone,
- struct scan_control *sc)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-# ifdef CONFIG_IDE
-static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
- struct block_device *bdev, unsigned int cmd,
- unsigned long arg)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-# endif
+# define CRASHPOINT_KPROBE(_symbol) \
+ .kprobe = { \
+ .symbol_name = (_symbol), \
+ .pre_handler = lkdtm_kprobe_handler, \
+ },
+# define CRASHPOINT_WRITE(_symbol) \
+ (_symbol) ? lkdtm_debugfs_entry : direct_entry
+#else
+# define CRASHPOINT_KPROBE(_symbol)
+# define CRASHPOINT_WRITE(_symbol) direct_entry
#endif
/* Crash points */
struct crashpoint {
const char *name;
const struct file_operations fops;
- struct jprobe jprobe;
+ struct kprobe kprobe;
};
-#define CRASHPOINT(_name, _write, _symbol, _entry) \
+#define CRASHPOINT(_name, _symbol) \
{ \
.name = _name, \
.fops = { \
.read = lkdtm_debugfs_read, \
.llseek = generic_file_llseek, \
.open = lkdtm_debugfs_open, \
- .write = _write, \
- }, \
- .jprobe = { \
- .kp.symbol_name = _symbol, \
- .entry = (kprobe_opcode_t *)_entry, \
+ .write = CRASHPOINT_WRITE(_symbol) \
}, \
+ CRASHPOINT_KPROBE(_symbol) \
}
/* Define the possible places where we can trigger a crash point. */
-struct crashpoint crashpoints[] = {
- CRASHPOINT("DIRECT", direct_entry,
- NULL, NULL),
+static struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", NULL),
#ifdef CONFIG_KPROBES
- CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry,
- "do_IRQ", jp_do_irq),
- CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry,
- "handle_IRQ_event", jp_handle_irq_event),
- CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry,
- "tasklet_action", jp_tasklet_action),
- CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry,
- "ll_rw_block", jp_ll_rw_block),
- CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry,
- "shrink_inactive_list", jp_shrink_inactive_list),
- CRASHPOINT("TIMERADD", lkdtm_debugfs_entry,
- "hrtimer_start", jp_hrtimer_start),
- CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry,
- "scsi_dispatch_cmd", jp_scsi_dispatch_cmd),
+ CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
+ CRASHPOINT("INT_HW_IRQ_EN", "handle_IRQ_event"),
+ CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
+ CRASHPOINT("FS_DEVRW", "ll_rw_block"),
+ CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
+ CRASHPOINT("TIMERADD", "hrtimer_start"),
+ CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
# ifdef CONFIG_IDE
- CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry,
- "generic_ide_ioctl", jp_generic_ide_ioctl),
+ CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
# endif
#endif
};
@@ -190,7 +122,7 @@ struct crashtype {
}
/* Define the possible types of crashes that can be triggered. */
-struct crashtype crashtypes[] = {
+static const struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
@@ -254,10 +186,10 @@ struct crashtype crashtypes[] = {
};
-/* Global jprobe entry and crashtype. */
-static struct jprobe *lkdtm_jprobe;
-struct crashpoint *lkdtm_crashpoint;
-struct crashtype *lkdtm_crashtype;
+/* Global kprobe entry and crashtype. */
+static struct kprobe *lkdtm_kprobe;
+static struct crashpoint *lkdtm_crashpoint;
+static const struct crashtype *lkdtm_crashtype;
/* Module parameters */
static int recur_count = -1;
@@ -280,7 +212,7 @@ MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
/* Return the crashtype number or NULL if the name is invalid */
-static struct crashtype *find_crashtype(const char *name)
+static const struct crashtype *find_crashtype(const char *name)
{
int i;
@@ -296,34 +228,35 @@ static struct crashtype *find_crashtype(const char *name)
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
*/
-static noinline void lkdtm_do_action(struct crashtype *crashtype)
+static noinline void lkdtm_do_action(const struct crashtype *crashtype)
{
- BUG_ON(!crashtype || !crashtype->func);
+ if (WARN_ON(!crashtype || !crashtype->func))
+ return;
crashtype->func();
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
- struct crashtype *crashtype)
+ const struct crashtype *crashtype)
{
int ret;
/* If this doesn't have a symbol, just call immediately. */
- if (!crashpoint->jprobe.kp.symbol_name) {
+ if (!crashpoint->kprobe.symbol_name) {
lkdtm_do_action(crashtype);
return 0;
}
- if (lkdtm_jprobe != NULL)
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
lkdtm_crashpoint = crashpoint;
lkdtm_crashtype = crashtype;
- lkdtm_jprobe = &crashpoint->jprobe;
- ret = register_jprobe(lkdtm_jprobe);
+ lkdtm_kprobe = &crashpoint->kprobe;
+ ret = register_kprobe(lkdtm_kprobe);
if (ret < 0) {
- pr_info("Couldn't register jprobe %s\n",
- crashpoint->jprobe.kp.symbol_name);
- lkdtm_jprobe = NULL;
+ pr_info("Couldn't register kprobe %s\n",
+ crashpoint->kprobe.symbol_name);
+ lkdtm_kprobe = NULL;
lkdtm_crashpoint = NULL;
lkdtm_crashtype = NULL;
}
@@ -336,13 +269,14 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
static int crash_count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(crash_count_lock);
-/* Called by jprobe entry points. */
-static void lkdtm_handler(void)
+/* Called by kprobe entry points. */
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
{
unsigned long flags;
bool do_it = false;
- BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
+ if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
+ return 0;
spin_lock_irqsave(&crash_count_lock, flags);
crash_count--;
@@ -357,6 +291,8 @@ static void lkdtm_handler(void)
if (do_it)
lkdtm_do_action(lkdtm_crashtype);
+
+ return 0;
}
static ssize_t lkdtm_debugfs_entry(struct file *f,
@@ -364,7 +300,7 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
size_t count, loff_t *off)
{
struct crashpoint *crashpoint = file_inode(f)->i_private;
- struct crashtype *crashtype = NULL;
+ const struct crashtype *crashtype = NULL;
char *buf;
int err;
@@ -432,7 +368,7 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off)
{
- struct crashtype *crashtype;
+ const struct crashtype *crashtype;
char *buf;
if (count >= PAGE_SIZE)
@@ -468,7 +404,7 @@ static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
{
struct crashpoint *crashpoint = NULL;
- struct crashtype *crashtype = NULL;
+ const struct crashtype *crashtype = NULL;
int ret = -EINVAL;
int i;
@@ -556,8 +492,8 @@ static void __exit lkdtm_module_exit(void)
/* Handle test-specific clean-up. */
lkdtm_usercopy_exit();
- if (lkdtm_jprobe != NULL)
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index e19e6acb191b..374edde72a14 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -23,5 +23,4 @@
EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
-EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index 7d2d5d4a1624..b52e9b97a7c0 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -83,25 +83,6 @@ TRACE_EVENT(mei_pci_cfg_read,
__get_str(dev), __entry->reg, __entry->offs, __entry->val)
);
-TRACE_EVENT(mei_pci_cfg_write,
- TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
- TP_ARGS(dev, reg, offs, val),
- TP_STRUCT__entry(
- __string(dev, dev_name(dev))
- __field(const char *, reg)
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- __assign_str(dev, dev_name(dev))
- __entry->reg = reg;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] pci cfg write %s[%#x] = %#x",
- __get_str(dev), __entry->reg, __entry->offs, __entry->val)
-);
-
#endif /* _MEI_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 78b3172c8e6e..f4f17552c9b8 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -225,7 +225,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
/*
* ME maps runtime suspend/resume to D0i states,
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 0566f9bfa7de..e1b909123fb0 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -141,7 +141,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
/*
* TXE maps runtime suspend/resume to own power gating states,
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 6fd9d367dea7..227cc7443671 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,3 +1,5 @@
+menu "Intel MIC & related support"
+
comment "Intel MIC Bus Driver"
config INTEL_MIC_BUS
@@ -150,3 +152,5 @@ config VOP
if VOP
source "drivers/vhost/Kconfig.vringh"
endif
+
+endmenu
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
index 637cc4686742..b665757ca89a 100644
--- a/drivers/misc/mic/scif/scif_rb.c
+++ b/drivers/misc/mic/scif/scif_rb.c
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb)
* the read barrier in scif_rb_count(..)
*/
wmb();
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si bug: For the case where a Core is performing an EXT_WR
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#endif
}
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* scif_rb_space(..)
*/
mb();
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si Bug: For the case where a Core is performing an EXT_WR
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#endif
}
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 329727e00e97..c824329f7012 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -39,8 +39,7 @@ void scif_rma_ep_init(struct scif_endpt *ep)
struct scif_endpt_rma_info *rma = &ep->rma_info;
mutex_init(&rma->rma_lock);
- init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN,
- SCIF_DMA_64BIT_PFN);
+ init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN);
spin_lock_init(&rma->tc_lock);
mutex_init(&rma->mmn_lock);
INIT_LIST_HEAD(&rma->reg_list);
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c
index e1ef8daedd5a..a036dbb4101e 100644
--- a/drivers/misc/mic/scif/scif_rma_list.c
+++ b/drivers/misc/mic/scif/scif_rma_list.c
@@ -277,7 +277,7 @@ retry:
* Need to restart list traversal if there has been
* an asynchronous list entry deletion.
*/
- if (ACCESS_ONCE(ep->rma_info.async_list_del))
+ if (READ_ONCE(ep->rma_info.async_list_del))
goto retry;
}
mutex_unlock(&ep->rma_info.rma_lock);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index deb203026496..320276f42653 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -92,6 +92,7 @@ struct pci_endpoint_test {
void __iomem *bar[6];
struct completion irq_raised;
int last_irq;
+ int num_irqs;
/* mutex to protect the ioctls */
struct mutex mutex;
struct miscdevice miscdev;
@@ -226,6 +227,9 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
u32 src_crc32;
u32 dst_crc32;
+ if (size > SIZE_MAX - alignment)
+ goto err;
+
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
&orig_src_phys_addr, GFP_KERNEL);
if (!orig_src_addr) {
@@ -311,6 +315,9 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
size_t alignment = test->alignment;
u32 crc32;
+ if (size > SIZE_MAX - alignment)
+ goto err;
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -369,6 +376,9 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
size_t alignment = test->alignment;
u32 crc32;
+ if (size > SIZE_MAX - alignment)
+ goto err;
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -504,6 +514,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0)
dev_err(dev, "failed to get MSI interrupts\n");
+ test->num_irqs = irq;
}
err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
@@ -533,6 +544,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->base = test->bar[test_reg_bar];
if (!test->base) {
+ err = -ENOMEM;
dev_err(dev, "Cannot perform PCI test without BAR%d\n",
test_reg_bar);
goto err_iounmap;
@@ -542,6 +554,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
+ err = id;
dev_err(dev, "unable to get id\n");
goto err_iounmap;
}
@@ -549,17 +562,24 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
- misc_device->name = name;
+ misc_device->name = kstrdup(name, GFP_KERNEL);
+ if (!misc_device->name) {
+ err = -ENOMEM;
+ goto err_ida_remove;
+ }
misc_device->fops = &pci_endpoint_test_fops,
err = misc_register(misc_device);
if (err) {
dev_err(dev, "failed to register device\n");
- goto err_ida_remove;
+ goto err_kfree_name;
}
return 0;
+err_kfree_name:
+ kfree(misc_device->name);
+
err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
@@ -569,6 +589,9 @@ err_iounmap:
pci_iounmap(pdev, test->bar[bar]);
}
+ for (i = 0; i < irq; i++)
+ devm_free_irq(dev, pdev->irq + i, test);
+
err_disable_msi:
pci_disable_msi(pdev);
pci_release_regions(pdev);
@@ -582,19 +605,25 @@ err_disable_pdev:
static void pci_endpoint_test_remove(struct pci_dev *pdev)
{
int id;
+ int i;
enum pci_barno bar;
struct pci_endpoint_test *test = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &test->miscdev;
if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
return;
+ if (id < 0)
+ return;
misc_deregister(&test->miscdev);
+ kfree(misc_device->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
+ for (i = 0; i < test->num_irqs; i++)
+ devm_free_irq(&pdev->dev, pdev->irq + i, test);
pci_disable_msi(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
deleted file mode 100644
index f5456fb7d773..000000000000
--- a/drivers/misc/ti_dac7512.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * dac7512.c - Linux kernel module for
- * Texas Instruments DAC7512
- *
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/of.h>
-
-static ssize_t dac7512_store_val(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct spi_device *spi = to_spi_device(dev);
- unsigned char tmp[2];
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- tmp[0] = val >> 8;
- tmp[1] = val & 0xff;
- spi_write(spi, tmp, sizeof(tmp));
- return count;
-}
-
-static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val);
-
-static struct attribute *dac7512_attributes[] = {
- &dev_attr_value.attr,
- NULL
-};
-
-static const struct attribute_group dac7512_attr_group = {
- .attrs = dac7512_attributes,
-};
-
-static int dac7512_probe(struct spi_device *spi)
-{
- int ret;
-
- spi->bits_per_word = 8;
- spi->mode = SPI_MODE_0;
- ret = spi_setup(spi);
- if (ret < 0)
- return ret;
-
- return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
-}
-
-static int dac7512_remove(struct spi_device *spi)
-{
- sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
- return 0;
-}
-
-static const struct spi_device_id dac7512_id_table[] = {
- { "dac7512", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(spi, dac7512_id_table);
-
-#ifdef CONFIG_OF
-static const struct of_device_id dac7512_of_match[] = {
- { .compatible = "ti,dac7512", },
- { }
-};
-MODULE_DEVICE_TABLE(of, dac7512_of_match);
-#endif
-
-static struct spi_driver dac7512_driver = {
- .driver = {
- .name = "dac7512",
- .of_match_table = of_match_ptr(dac7512_of_match),
- },
- .probe = dac7512_probe,
- .remove = dac7512_remove,
- .id_table = dac7512_id_table,
-};
-
-module_spi_driver(dac7512_driver);
-
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
-MODULE_DESCRIPTION("DAC7512 16-bit DAC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 1e688bfec567..9047c0a529b2 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1271,7 +1271,7 @@ static int __init vmballoon_init(void)
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
*/
- if (x86_hyper != &x86_hyper_vmware)
+ if (x86_hyper_type != X86_HYPER_VMWARE)
return -ENODEV;
for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2ad7b5c69156..ea80ff4cd7f9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -28,6 +28,7 @@
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
+#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
@@ -86,6 +87,7 @@ static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_IDA(mmc_rpmb_ida);
/*
* There is one mmc_blk_data per slot.
@@ -96,6 +98,7 @@ struct mmc_blk_data {
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
+ struct list_head rpmbs;
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
@@ -121,6 +124,32 @@ struct mmc_blk_data {
int area_type;
};
+/* Device type for RPMB character devices */
+static dev_t mmc_rpmb_devt;
+
+/* Bus type for RPMB character devices */
+static struct bus_type mmc_rpmb_bus_type = {
+ .name = "mmc_rpmb",
+};
+
+/**
+ * struct mmc_rpmb_data - special RPMB device type for these areas
+ * @dev: the device for the RPMB area
+ * @chrdev: character device for the RPMB area
+ * @id: unique device ID number
+ * @part_index: partition index (0 on first)
+ * @md: parent MMC block device
+ * @node: list item, so we can put this device on a list
+ */
+struct mmc_rpmb_data {
+ struct device dev;
+ struct cdev chrdev;
+ int id;
+ unsigned int part_index;
+ struct mmc_blk_data *md;
+ struct list_head node;
+};
+
static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
@@ -299,6 +328,7 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ struct mmc_rpmb_data *rpmb;
};
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
@@ -437,14 +467,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_request mrq = {};
struct scatterlist sg;
int err;
- bool is_rpmb = false;
+ unsigned int target_part;
u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- is_rpmb = true;
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+ * partition type for the block device the ioctl() was issued
+ * on.
+ */
+ if (idata->rpmb) {
+ /* Support multiple RPMB partitions */
+ target_part = idata->rpmb->part_index;
+ target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
+ } else {
+ target_part = md->part_type;
+ }
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
@@ -488,7 +529,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
mrq.cmd = &cmd;
- err = mmc_blk_part_switch(card, md->part_type);
+ err = mmc_blk_part_switch(card, target_part);
if (err)
return err;
@@ -498,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (is_rpmb) {
+ if (idata->rpmb) {
err = mmc_set_blockcount(card, data.blocks,
idata->ic.write_flag & (1 << 31));
if (err)
@@ -538,7 +579,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (is_rpmb) {
+ if (idata->rpmb) {
/*
* Ensure RPMB command has completed by polling CMD13
* "Send Status".
@@ -554,7 +595,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_cmd __user *ic_ptr)
+ struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data *idata;
struct mmc_blk_ioc_data *idatas[1];
@@ -566,6 +608,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata->rpmb = rpmb;
card = md->queue.card;
if (IS_ERR(card)) {
@@ -581,7 +625,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
idatas[0] = idata;
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -596,7 +641,8 @@ cmd_done:
}
static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_multi_cmd __user *user)
+ struct mmc_ioc_multi_cmd __user *user,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data **idata = NULL;
struct mmc_ioc_cmd __user *cmds = user->cmds;
@@ -627,6 +673,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
num_of_cmds = i;
goto cmd_err;
}
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata[i]->rpmb = rpmb;
}
card = md->queue.card;
@@ -643,7 +691,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -691,7 +740,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_cmd(md,
- (struct mmc_ioc_cmd __user *)arg);
+ (struct mmc_ioc_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
case MMC_IOC_MULTI_CMD:
@@ -702,7 +752,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_multi_cmd(md,
- (struct mmc_ioc_multi_cmd __user *)arg);
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
default:
@@ -1152,18 +1203,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
-int mmc_access_rpmb(struct mmc_queue *mq)
-{
- struct mmc_blk_data *md = mq->blkdata;
- /*
- * If this is a RPMB partition access, return ture
- */
- if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
- return true;
-
- return false;
-}
-
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1174,17 +1213,19 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
struct mmc_queue_req *mq_rq;
struct mmc_card *card = mq->card;
struct mmc_blk_data *md = mq->blkdata;
- struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
struct mmc_blk_ioc_data **idata;
+ bool rpmb_ioctl;
u8 **ext_csd;
u32 status;
int ret;
int i;
mq_rq = req_to_mmc_queue_req(req);
+ rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
@@ -1192,8 +1233,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
/* Always switch back to main area after RPMB access */
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- mmc_blk_part_switch(card, main_md->part_type);
+ if (rpmb_ioctl)
+ mmc_blk_part_switch(card, 0);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1534,25 +1575,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr,
- bool *do_data_tag)
+ int disable_multi, bool *do_rel_wr_p,
+ bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mmc_queue_req_to_req(mqrq);
+ bool do_rel_wr, do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
- *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
- rq_data_dir(req) == WRITE &&
- (md->flags & MMC_BLK_REL_WR);
+ do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.data = &brq->data;
+ brq->mrq.tag = req->tag;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1567,6 +1610,14 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.blk_addr = blk_rq_pos(req);
+
+ /*
+ * The command queue supports 2 priorities: "high" (1) and "simple" (0).
+ * The eMMC will give "high" priority tasks priority over "simple"
+ * priority tasks. Here we always set "simple" priority by not setting
+ * MMC_DATA_PRIO.
+ */
/*
* The block layer doesn't support all sector count
@@ -1596,18 +1647,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks);
}
- if (*do_rel_wr)
+ if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
+ brq->data.flags |= MMC_DATA_REL_WR;
+ }
/*
* Data tag is used only during writing meta data to speed
* up write and any subsequent read of this meta data
*/
- *do_data_tag = card->ext_csd.data_tag_unit_size &&
- (req->cmd_flags & REQ_META) &&
- (rq_data_dir(req) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ if (do_data_tag)
+ brq->data.flags |= MMC_DATA_DAT_TAG;
mmc_set_data_timeout(&brq->data, card);
@@ -1634,6 +1690,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
+
+ if (do_rel_wr_p)
+ *do_rel_wr_p = do_rel_wr;
+
+ if (do_data_tag_p)
+ *do_data_tag_p = do_data_tag;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1948,7 +2010,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (req && !mq->qcnt)
/* claim host only for the first request */
- mmc_get_card(card);
+ mmc_get_card(card, NULL);
ret = mmc_blk_part_switch(card, md->part_type);
if (ret) {
@@ -2011,7 +2073,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if (!mq->qcnt)
- mmc_put_card(card);
+ mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2068,6 +2130,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
spin_lock_init(&md->lock);
INIT_LIST_HEAD(&md->part);
+ INIT_LIST_HEAD(&md->rpmbs);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
@@ -2186,6 +2249,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
return 0;
}
+/**
+ * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
+ * @filp: the character device file
+ * @cmd: the ioctl() command
+ * @arg: the argument from userspace
+ *
+ * This will essentially just redirect the ioctl()s coming in over to
+ * the main block device spawning the RPMB character device.
+ */
+static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mmc_rpmb_data *rpmb = filp->private_data;
+ int ret;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ ret = mmc_blk_ioctl_cmd(rpmb->md,
+ (struct mmc_ioc_cmd __user *)arg,
+ rpmb);
+ break;
+ case MMC_IOC_MULTI_CMD:
+ ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ rpmb);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ get_device(&rpmb->dev);
+ filp->private_data = rpmb;
+ mmc_blk_get(rpmb->md->disk);
+
+ return nonseekable_open(inode, filp);
+}
+
+static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ put_device(&rpmb->dev);
+ mmc_blk_put(rpmb->md);
+
+ return 0;
+}
+
+static const struct file_operations mmc_rpmb_fileops = {
+ .release = mmc_rpmb_chrdev_release,
+ .open = mmc_rpmb_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = mmc_rpmb_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_rpmb_ioctl_compat,
+#endif
+};
+
+static void mmc_blk_rpmb_device_release(struct device *dev)
+{
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+
+ ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ kfree(rpmb);
+}
+
+static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_index,
+ sector_t size,
+ const char *subname)
+{
+ int devidx, ret;
+ char rpmb_name[DISK_NAME_LEN];
+ char cap_str[10];
+ struct mmc_rpmb_data *rpmb;
+
+ /* This creates the minor number for the RPMB char device */
+ devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return devidx;
+
+ rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
+ if (!rpmb) {
+ ida_simple_remove(&mmc_rpmb_ida, devidx);
+ return -ENOMEM;
+ }
+
+ snprintf(rpmb_name, sizeof(rpmb_name),
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+ rpmb->id = devidx;
+ rpmb->part_index = part_index;
+ rpmb->dev.init_name = rpmb_name;
+ rpmb->dev.bus = &mmc_rpmb_bus_type;
+ rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
+ rpmb->dev.parent = &card->dev;
+ rpmb->dev.release = mmc_blk_rpmb_device_release;
+ device_initialize(&rpmb->dev);
+ dev_set_drvdata(&rpmb->dev, rpmb);
+ rpmb->md = md;
+
+ cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
+ rpmb->chrdev.owner = THIS_MODULE;
+ ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
+ if (ret) {
+ pr_err("%s: could not add character device\n", rpmb_name);
+ goto out_put_device;
+ }
+
+ list_add(&rpmb->node, &md->rpmbs);
+
+ string_get_size((u64)size, 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+
+ pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
+ rpmb_name, mmc_card_id(card),
+ mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
+ MAJOR(mmc_rpmb_devt), rpmb->id);
+
+ return 0;
+
+out_put_device:
+ put_device(&rpmb->dev);
+ return ret;
+}
+
+static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
+
+{
+ cdev_device_del(&rpmb->chrdev, &rpmb->dev);
+ put_device(&rpmb->dev);
+}
+
/* MMC Physical partitions consist of two boot partitions and
* up to four general purpose partitions.
* For each partition enabled in EXT_CSD a block device will be allocatedi
@@ -2194,13 +2409,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int idx, ret = 0;
+ int idx, ret;
if (!mmc_card_mmc(card))
return 0;
for (idx = 0; idx < card->nr_parts; idx++) {
- if (card->part[idx].size) {
+ if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
+ /*
+ * RPMB partitions does not provide block access, they
+ * are only accessed using ioctl():s. Thus create
+ * special RPMB block devices that do not have a
+ * backing block queue for these.
+ */
+ ret = mmc_blk_alloc_rpmb_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ } else if (card->part[idx].size) {
ret = mmc_blk_alloc_part(card, md,
card->part[idx].part_cfg,
card->part[idx].size >> 9,
@@ -2212,7 +2440,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
}
}
- return ret;
+ return 0;
}
static void mmc_blk_remove_req(struct mmc_blk_data *md)
@@ -2249,7 +2477,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
{
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
+ struct mmc_rpmb_data *rpmb;
+ /* Remove RPMB partitions */
+ list_for_each_safe(pos, q, &md->rpmbs) {
+ rpmb = list_entry(pos, struct mmc_rpmb_data, node);
+ list_del(pos);
+ mmc_blk_remove_rpmb_part(rpmb);
+ }
+ /* Remove block partitions */
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
@@ -2568,6 +2804,17 @@ static int __init mmc_blk_init(void)
{
int res;
+ res = bus_register(&mmc_rpmb_bus_type);
+ if (res < 0) {
+ pr_err("mmcblk: could not register RPMB bus type\n");
+ return res;
+ }
+ res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
+ if (res < 0) {
+ pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
+ goto out_bus_unreg;
+ }
+
if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
pr_info("mmcblk: using %d minors per device\n", perdev_minors);
@@ -2575,16 +2822,20 @@ static int __init mmc_blk_init(void)
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
- goto out;
+ goto out_chrdev_unreg;
res = mmc_register_driver(&mmc_driver);
if (res)
- goto out2;
+ goto out_blkdev_unreg;
return 0;
- out2:
+
+out_blkdev_unreg:
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
- out:
+out_chrdev_unreg:
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+out_bus_unreg:
+ bus_unregister(&mmc_rpmb_bus_type);
return res;
}
@@ -2592,6 +2843,7 @@ static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
}
module_init(mmc_blk_init);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 301246513a37..a4b49e25fe96 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -369,10 +369,17 @@ int mmc_add_card(struct mmc_card *card)
*/
void mmc_remove_card(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
+
#ifdef CONFIG_DEBUG_FS
mmc_remove_card_debugfs(card);
#endif
+ if (host->cqe_enabled) {
+ host->cqe_ops->cqe_disable(host);
+ host->cqe_enabled = false;
+ }
+
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
pr_info("%s: SPI card removed\n",
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 66c9cf49ad2f..1f0f44f4dd5f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
-static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
+static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
+ bool cqe)
{
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
}
if (mrq->cmd) {
- pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
- mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
- mrq->cmd->flags);
+ pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), cqe ? "CQE direct " : "",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ } else if (cqe) {
+ pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
+ mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
}
if (mrq->data) {
@@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
-static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
- mmc_mrq_pr_debug(host, mrq);
+ mmc_mrq_pr_debug(host, mrq, false);
WARN_ON(!host->claimed);
@@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+EXPORT_SYMBOL(mmc_start_request);
/*
* mmc_wait_data_done() - done callback for data request
@@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_wait_for_req_done);
+/*
+ * mmc_cqe_start_req - Start a CQE request.
+ * @host: MMC host to start the request
+ * @mrq: request to start
+ *
+ * Start the request, re-tuning if needed and it is possible. Returns an error
+ * code if the request fails to start or -EBUSY if CQE is busy.
+ */
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /*
+ * CQE cannot process re-tuning commands. Caller must hold retuning
+ * while CQE is in use. Re-tuning can happen here only when CQE has no
+ * active requests i.e. this is the first. Note, re-tuning will call
+ * ->cqe_off().
+ */
+ err = mmc_retune(host);
+ if (err)
+ goto out_err;
+
+ mrq->host = host;
+
+ mmc_mrq_pr_debug(host, mrq, true);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ goto out_err;
+
+ err = host->cqe_ops->cqe_request(host, mrq);
+ if (err)
+ goto out_err;
+
+ trace_mmc_request_start(host, mrq);
+
+ return 0;
+
+out_err:
+ if (mrq->cmd) {
+ pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, err);
+ } else {
+ pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
+ mmc_hostname(host), mrq->tag, err);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_start_req);
+
+/**
+ * mmc_cqe_request_done - CQE has finished processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which completed
+ *
+ * CQE drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mmc_should_fail_request(host, mrq);
+
+ /* Flag re-tuning needed on CRC errors */
+ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ))
+ mmc_retune_needed(host);
+
+ trace_mmc_request_done(host, mrq);
+
+ if (mrq->cmd) {
+ pr_debug("%s: CQE req done (direct CMD%u): %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
+ } else {
+ pr_debug("%s: CQE transfer done tag %d\n",
+ mmc_hostname(host), mrq->tag);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ mrq->done(mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_request_done);
+
+/**
+ * mmc_cqe_post_req - CQE post process of a completed MMC request
+ * @host: MMC host
+ * @mrq: MMC request to be processed
+ */
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->cqe_ops->cqe_post_req)
+ host->cqe_ops->cqe_post_req(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_post_req);
+
+/* Arbitrary 1 second timeout */
+#define MMC_CQE_RECOVERY_TIMEOUT 1000
+
+/*
+ * mmc_cqe_recovery - Recover from CQE errors.
+ * @host: MMC host to recover
+ *
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * in eMMC, and discarding the queue in CQE. CQE must call
+ * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
+ * fails to discard its queue.
+ */
+int mmc_cqe_recovery(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_retune_hold_now(host);
+
+ /*
+ * Recovery is expected seldom, if at all, but it reduces performance,
+ * so make sure it is not completely silent.
+ */
+ pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_STOP_TRANSMISSION,
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ mmc_wait_for_cmd(host, &cmd, 0);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ cmd.arg = 1; /* Discard entire queue */
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ mmc_retune_release(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_recovery);
+
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
@@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
}
EXPORT_SYMBOL(mmc_align_data_size);
+/*
+ * Allow claiming an already claimed host if the context is the same or there is
+ * no context but the task is the same.
+ */
+static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ return host->claimer == ctx ||
+ (!ctx && task && host->claimer->task == task);
+}
+
+static inline void mmc_ctx_set_claimer(struct mmc_host *host,
+ struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ if (!host->claimer) {
+ if (ctx)
+ host->claimer = ctx;
+ else
+ host->claimer = &host->default_ctx;
+ }
+ if (task)
+ host->claimer->task = task;
+}
+
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
+ * @ctx: context that claims the host or NULL in which case the default
+ * context will be used
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
@@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size);
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort)
{
+ struct task_struct *task = ctx ? NULL : current;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
@@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
- if (stop || !host->claimed || host->claimer == current)
+ if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
@@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
- host->claimer = current;
+ mmc_ctx_set_claimer(host, ctx, task);
host->claim_cnt += 1;
if (host->claim_cnt == 1)
pm = true;
@@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
+ host->claimer->task = NULL;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
@@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host);
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
-void mmc_get_card(struct mmc_card *card)
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
pm_runtime_get_sync(&card->dev);
- mmc_claim_host(card->host);
+ __mmc_claim_host(card->host, ctx, NULL);
}
EXPORT_SYMBOL(mmc_get_card);
@@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card);
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
-void mmc_put_card(struct mmc_card *card)
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
- mmc_release_host(card->host);
+ struct mmc_host *host = card->host;
+
+ WARN_ON(ctx && host->claimer != ctx);
+
+ mmc_release_host(host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
@@ -1400,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
#endif /* CONFIG_REGULATOR */
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
@@ -1484,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
+int mmc_host_set_uhs_voltage(struct mmc_host *host)
+{
+ u32 clock;
+
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
+
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
+ return -EAGAIN;
+
+ /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+ mmc_delay(10);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ return 0;
+}
+
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {};
int err = 0;
- u32 clock;
/*
* If we cannot switch voltages, return failure so the caller
@@ -1520,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = -EAGAIN;
goto power_cycle;
}
- /*
- * During a signal voltage level switch, the clock must be gated
- * for 5 ms according to the SD spec
- */
- clock = host->ios.clock;
- host->ios.clock = 0;
- mmc_set_ios(host);
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
+ if (mmc_host_set_uhs_voltage(host)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
@@ -1537,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
goto power_cycle;
}
- /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
- mmc_delay(10);
- host->ios.clock = clock;
- mmc_set_ios(host);
-
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca861091a776..71e6c6d7ceb7 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -49,6 +49,7 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_host_set_uhs_voltage(struct mmc_host *host);
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
@@ -107,6 +108,8 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
+
struct mmc_async_req;
struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
@@ -128,10 +131,11 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort);
void mmc_release_host(struct mmc_host *host);
-void mmc_get_card(struct mmc_card *card);
-void mmc_put_card(struct mmc_card *card);
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
/**
* mmc_claim_host - exclusively claim a host
@@ -141,7 +145,11 @@ void mmc_put_card(struct mmc_card *card);
*/
static inline void mmc_claim_host(struct mmc_host *host)
{
- __mmc_claim_host(host, NULL);
+ __mmc_claim_host(host, NULL, NULL);
}
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_cqe_recovery(struct mmc_host *host);
+
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ad88deb2e8f3..35a9e4fd1a9f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -111,12 +111,6 @@ void mmc_retune_hold(struct mmc_host *host)
host->hold_retune += 1;
}
-void mmc_retune_hold_now(struct mmc_host *host)
-{
- host->retune_now = 0;
- host->hold_retune += 1;
-}
-
void mmc_retune_release(struct mmc_host *host)
{
if (host->hold_retune)
@@ -124,6 +118,7 @@ void mmc_retune_release(struct mmc_host *host)
else
WARN_ON(1);
}
+EXPORT_SYMBOL(mmc_retune_release);
int mmc_retune(struct mmc_host *host)
{
@@ -184,7 +179,7 @@ static void mmc_retune_timer(unsigned long data)
int mmc_of_parse(struct mmc_host *host)
{
struct device *dev = host->parent;
- u32 bus_width;
+ u32 bus_width, drv_type;
int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
@@ -326,6 +321,15 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ /* Must be after "non-removable" check */
+ if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ host->fixed_drv_type = drv_type;
+ else
+ dev_err(host->parent,
+ "can't use fixed driver type, media is removable\n");
+ }
+
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent,
@@ -398,6 +402,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_SIZE / 512;
+ host->fixed_drv_type = -EINVAL;
+
return host;
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 77d6f60d1bf9..fb689a1065ed 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -19,12 +19,17 @@ void mmc_unregister_host_class(void);
void mmc_retune_enable(struct mmc_host *host);
void mmc_retune_disable(struct mmc_host *host);
void mmc_retune_hold(struct mmc_host *host);
-void mmc_retune_hold_now(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
+static inline void mmc_retune_hold_now(struct mmc_host *host)
+{
+ host->retune_now = 0;
+ host->hold_retune += 1;
+}
+
static inline void mmc_retune_recheck(struct mmc_host *host)
{
if (host->hold_retune <= 1)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 36217ad5e9b1..a552f61060d2 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -780,6 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a,
@@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_rev.attr,
&dev_attr_pre_eol_info.attr,
&dev_attr_life_time.attr,
&dev_attr_serial.attr,
@@ -1289,13 +1291,18 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
+ int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
+ if (fixed_drv_type >= 0)
+ drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
+ ? fixed_drv_type : 0;
+ else
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
card->drive_strength = drive_strength;
@@ -1786,12 +1793,41 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Enable Command Queue if supported. Note that Packed Commands cannot
+ * be used with Command Queue.
+ */
+ card->ext_csd.cmdq_en = false;
+ if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+ err = mmc_cmdq_enable(card);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling CMDQ failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ err = 0;
+ }
+ }
+ /*
* In some cases (e.g. RPMB or mmc_test), the Command Queue must be
* disabled for a time, so a flag is needed to indicate to re-enable the
* Command Queue.
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
+ if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (err) {
+ pr_err("%s: Failed to enable CQE, error %d\n",
+ mmc_hostname(host), err);
+ } else {
+ host->cqe_enabled = true;
+ pr_info("%s: Command Queue Engine enabled\n",
+ mmc_hostname(host));
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -1911,14 +1947,14 @@ static void mmc_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_remove(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 54686ca4bfb7..908e4db03535 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -977,7 +977,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
from_exception)
return;
- mmc_claim_host(card->host);
if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
timeout = MMC_OPS_TIMEOUT_MS;
use_busy_signal = true;
@@ -995,7 +994,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
- goto out;
+ return;
}
/*
@@ -1007,9 +1006,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
mmc_card_set_doing_bkops(card);
else
mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
}
+EXPORT_SYMBOL(mmc_start_bkops);
/*
* Flush the cache to the non-volatile storage.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 0a4e77a5ba33..4f33d277b125 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+ if (mq && mmc_card_removed(mq->card))
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
@@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ /* Initialize thread_sem even if it is not used */
+ sema_init(&mq->thread_sem, 1);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
- u64 limit = BLK_BOUNCE_HIGH;
int ret = -ENOMEM;
- if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
-
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
}
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
- if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-
- sema_init(&mq->thread_sem, 1);
+ mmc_setup_queue(mq, card);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 6bfba32ffa66..547b457c4251 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -36,12 +36,14 @@ struct mmc_blk_request {
/**
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
* @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
* @MMC_DRV_OP_GET_CARD_STATUS: get card status
* @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
*/
enum mmc_drv_op {
MMC_DRV_OP_IOCTL,
+ MMC_DRV_OP_IOCTL_RPMB,
MMC_DRV_OP_BOOT_WP,
MMC_DRV_OP_GET_CARD_STATUS,
MMC_DRV_OP_GET_EXT_CSD,
@@ -82,6 +84,4 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern int mmc_access_rpmb(struct mmc_queue *);
-
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4fd1620b732d..45bf78f32716 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -908,6 +908,18 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static bool mmc_sd_card_using_v18(struct mmc_card *card)
+{
+ /*
+ * According to the SD spec., the Bus Speed Mode (function group 1) bits
+ * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus
+ * they can be used to determine if the card has already switched to
+ * 1.8V signaling.
+ */
+ return card->sw_caps.sd3_bus_mode &
+ (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -921,9 +933,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
int err;
u32 cid[4];
u32 rocr = 0;
+ bool v18_fixup_failed = false;
WARN_ON(!host->claimed);
-
+retry:
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
@@ -989,6 +1002,36 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
+ /*
+ * If the card has not been power cycled, it may still be using 1.8V
+ * signaling. Detect that situation and try to initialize a UHS-I (1.8V)
+ * transfer mode.
+ */
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ mmc_sd_card_using_v18(card) &&
+ host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
+ /*
+ * Re-read switch information in case it has changed since
+ * oldcard was initialized.
+ */
+ if (oldcard) {
+ err = mmc_read_switch(card);
+ if (err)
+ goto free_card;
+ }
+ if (mmc_sd_card_using_v18(card)) {
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
+ }
+ goto done;
+ }
+ }
+
/* Initialization sequence for UHS-I cards */
if (rocr & SD_ROCR_S18A) {
err = mmc_sd_init_uhs_card(card);
@@ -1021,7 +1064,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+done:
host->card = card;
return 0;
@@ -1056,14 +1099,14 @@ static void mmc_sd_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_sd_remove(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index c771843e4c15..7a2eaf8410a3 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -155,7 +155,8 @@ static int sdio_irq_thread(void *_host)
* holding of the host lock does not cover too much work
* that doesn't require that lock to be held.
*/
- ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
+ ret = __mmc_claim_host(host, NULL,
+ &host->sdio_irq_thread_abort);
if (ret)
break;
ret = process_sdio_pending_irqs(host);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8c15637178ff..567028c9219a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -352,6 +352,19 @@ config MMC_MESON_GX
If you have a controller with this interface, say Y here.
+config MMC_MESON_MX_SDIO
+ tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on HAS_DMA
+ depends on OF
+ help
+ This selects support for the SD/MMC Host Controller on
+ Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
@@ -429,6 +442,7 @@ config MMC_SDHCI_MSM
tristate "Qualcomm SDHCI Controller Support"
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -663,7 +677,7 @@ config MMC_CAVIUM_OCTEON
config MMC_CAVIUM_THUNDERX
tristate "Cavium ThunderX SD/MMC Card Interface support"
depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- depends on GPIOLIB
+ depends on GPIO_THUNDERX
depends on OF_ADDRESS
help
This selects Cavium ThunderX SD/MMC Card Interface.
@@ -899,3 +913,15 @@ config MMC_SDHCI_XENON
This selects Marvell Xenon eMMC/SD/SDIO SDHCI.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_SDHCI_OMAP
+ tristate "TI SDHCI Controller Support"
+ depends on MMC_SDHCI_PLTFM && OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ support present in TI's DRA7 SOCs. The controller supports
+ SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7c7b29ff591a..a43cf0d5a5d3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
+obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
@@ -90,6 +91,7 @@ obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
+obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0a0ebf3a096d..e55f3932d580 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -732,11 +732,11 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
return 0;
}
-static void atmci_timeout_timer(unsigned long data)
+static void atmci_timeout_timer(struct timer_list *t)
{
struct atmel_mci *host;
- host = (struct atmel_mci *)data;
+ host = from_timer(host, t, timer);
dev_dbg(&host->pdev->dev, "software timeout\n");
@@ -1661,9 +1661,9 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = 0;
}
-static void atmci_detect_change(unsigned long data)
+static void atmci_detect_change(struct timer_list *t)
{
- struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
+ struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
bool present;
bool present_old;
@@ -2349,8 +2349,7 @@ static int atmci_init_slot(struct atmel_mci *host,
if (gpio_is_valid(slot->detect_pin)) {
int ret;
- setup_timer(&slot->detect_timer, atmci_detect_change,
- (unsigned long)slot);
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0);
ret = request_irq(gpio_to_irq(slot->detect_pin),
atmci_detect_interrupt,
@@ -2563,7 +2562,7 @@ static int atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, atmci_timeout_timer, 0);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index fbd29f00fca0..ed5cefb83768 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -967,7 +967,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/*
* Legacy Octeon firmware has no regulator entry, fall-back to
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 64cda84b2302..73fd75c3c824 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -75,7 +75,7 @@ struct hs_timing {
u32 smpl_phase_min;
};
-struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
+static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
{ /* reserved */ },
{ /* SD */
{7, 0, 15, 15,}, /* 0: LEGACY 400k */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4f2806720c5c..0aa39975f33b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -817,7 +817,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
struct dma_slave_config cfg;
struct dma_async_tx_descriptor *desc = NULL;
struct scatterlist *sgl = host->data->sg;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 sg_elems = host->data->sg_len;
u32 fifoth_val;
u32 fifo_offset = host->fifo_reg - host->regs;
@@ -1024,7 +1024,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
@@ -1938,6 +1938,7 @@ static void dw_mci_set_drto(struct dw_mci *host)
unsigned int drto_clks;
unsigned int drto_div;
unsigned int drto_ms;
+ unsigned long irqflags;
drto_clks = mci_readl(host, TMOUT) >> 8;
drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
@@ -1949,7 +1950,11 @@ static void dw_mci_set_drto(struct dw_mci *host)
/* add a bit spare time */
drto_ms += 10;
- mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ mod_timer(&host->dto_timer,
+ jiffies + msecs_to_jiffies(drto_ms));
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
@@ -1970,6 +1975,18 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
return true;
}
+static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ return false;
+
+ /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
+ WARN_ON(del_timer_sync(&host->dto_timer));
+ clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+
+ return true;
+}
+
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
@@ -2111,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_DATA_BUSY:
- if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events)) {
+ if (!dw_mci_clear_pending_data_complete(host)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
@@ -2682,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
@@ -2694,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_RXDR) {
@@ -2791,7 +2811,7 @@ static int dw_mci_init_slot(struct dw_mci *host)
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto err_host_allocated;
if (!mmc->ocr_avail)
@@ -2971,9 +2991,9 @@ no_dma:
host->use_dma = TRANS_MODE_PIO;
}
-static void dw_mci_cmd11_timer(unsigned long arg)
+static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -2985,9 +3005,9 @@ static void dw_mci_cmd11_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
}
-static void dw_mci_cto_timer(unsigned long arg)
+static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cto_timer);
unsigned long irqflags;
u32 pending;
@@ -3040,10 +3060,34 @@ exit:
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
-static void dw_mci_dto_timer(unsigned long arg)
+static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, dto_timer);
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ /*
+ * The DTO timer is much longer than the CTO timer, so it's even less
+ * likely that we'll these cases, but it pays to be paranoid.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & SDMMC_INT_DATA_OVER) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected data interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "DTO timeout when already completed\n");
+ goto exit;
+ }
+
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_DATA:
case STATE_DATA_BUSY:
@@ -3058,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
break;
default:
+ dev_warn(host->dev, "Unexpected data timeout, state %d\n",
+ host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
#ifdef CONFIG_OF
@@ -3208,14 +3257,9 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- setup_timer(&host->cmd11_timer,
- dw_mci_cmd11_timer, (unsigned long)host);
-
- setup_timer(&host->cto_timer,
- dw_mci_cto_timer, (unsigned long)host);
-
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
+ timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
+ timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 34474ad731aa..e3124f06a47e 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -74,7 +74,8 @@ struct dw_mci_dma_slave {
* @stop_abort: The command currently prepared for stoping transfer.
* @prev_blksz: The former transfer blksz record.
* @timing: Record of current ios timing.
- * @use_dma: Whether DMA channel is initialized or not.
+ * @use_dma: Which DMA channel is in use for the current transfer, zero
+ * denotes PIO mode.
* @using_dma: Whether DMA is in use for the current transfer.
* @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
* @sg_dma: Bus address of DMA buffer.
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 7db8c7a8d38d..712e08d9a45e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -586,9 +586,9 @@ poll_timeout:
return true;
}
-static void jz4740_mmc_timeout(unsigned long data)
+static void jz4740_mmc_timeout(struct timer_list *t)
{
- struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
+ struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
if (!test_and_clear_bit(0, &host->waiting))
return;
@@ -1036,8 +1036,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_reset(host);
jz4740_mmc_clock_disable(host);
- setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
- (unsigned long)host);
+ timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 85745ef179e2..e0862d3f65b3 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1190,7 +1190,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* Get regulators and the supported OCR mask */
host->vqmmc_enabled = false;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto free_host;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
new file mode 100644
index 000000000000..09cb89645d06
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -0,0 +1,768 @@
+/*
+ * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#define MESON_MX_SDIO_ARGU 0x00
+
+#define MESON_MX_SDIO_SEND 0x04
+ #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0)
+ #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8)
+ #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16)
+ #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17)
+ #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18)
+ #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19)
+ #define MESON_MX_SDIO_SEND_DATA BIT(20)
+ #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21)
+ #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24)
+
+#define MESON_MX_SDIO_CONF 0x08
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10
+ #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10)
+ #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11)
+ #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12)
+ #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18)
+ #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19)
+ #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20)
+ #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21)
+ #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23)
+ #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29)
+
+#define MESON_MX_SDIO_IRQS 0x0c
+ #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0)
+ #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4)
+ #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5)
+ #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6)
+ #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7)
+ #define MESON_MX_SDIO_IRQS_IF_INT BIT(8)
+ #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9)
+ #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16)
+ #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17)
+ #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19)
+
+#define MESON_MX_SDIO_IRQC 0x10
+ #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3)
+ #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4)
+ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
+ #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
+ #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
+
+#define MESON_MX_SDIO_MULT 0x14
+ #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3)
+ #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4)
+ #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5)
+ #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8)
+ #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10)
+ #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11)
+ #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12)
+
+#define MESON_MX_SDIO_ADDR 0x18
+
+#define MESON_MX_SDIO_EXT 0x1c
+ #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16)
+
+#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024)
+#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1)
+#define MESON_MX_SDIO_MAX_SLOTS 3
+
+struct meson_mx_mmc_host {
+ struct device *controller_dev;
+
+ struct clk *parent_clk;
+ struct clk *core_clk;
+ struct clk_divider cfg_div;
+ struct clk *cfg_div_clk;
+ struct clk_fixed_factor fixed_factor;
+ struct clk *fixed_factor_clk;
+
+ void __iomem *base;
+ int irq;
+ spinlock_t irq_lock;
+
+ struct timer_list cmd_timeout;
+
+ unsigned int slot_id;
+ struct mmc_host *mmc;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ int error;
+};
+
+static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask,
+ u32 val)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 regval;
+
+ regval = readl(host->base + reg);
+ regval &= ~mask;
+ regval |= (val & mask);
+
+ writel(regval, host->base + reg);
+}
+
+static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host)
+{
+ writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC);
+ udelay(2);
+}
+
+static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
+ return cmd->mrq->cmd;
+ else if (mmc_op_multi(cmd->opcode) &&
+ (!cmd->mrq->sbc || cmd->error || cmd->data->error))
+ return cmd->mrq->stop;
+ else
+ return NULL;
+}
+
+static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned int pack_size;
+ unsigned long irqflags, timeout;
+ u32 mult, send = 0, ext = 0;
+
+ host->cmd = cmd;
+
+ if (cmd->busy_timeout)
+ timeout = msecs_to_jiffies(cmd->busy_timeout);
+ else
+ timeout = msecs_to_jiffies(1000);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45);
+ break;
+ case MMC_RSP_R2:
+ /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133);
+ send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8;
+ break;
+ default:
+ break;
+ }
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY;
+
+ if (cmd->data) {
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ (cmd->data->blocks - 1));
+
+ pack_size = cmd->data->blksz * BITS_PER_BYTE;
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4;
+ else
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1;
+
+ ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ pack_size);
+
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ send |= MESON_MX_SDIO_SEND_DATA;
+ else
+ send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA;
+
+ cmd->data->bytes_xfered = 0;
+ }
+
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK,
+ (0x40 | cmd->opcode));
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id);
+ mult |= BIT(31);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ /* enable the CMD done interrupt */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
+
+ /* clear pending interrupts */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS,
+ MESON_MX_SDIO_IRQS_CMD_INT,
+ MESON_MX_SDIO_IRQS_CMD_INT);
+
+ writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU);
+ writel(ext, host->base + MESON_MX_SDIO_EXT);
+ writel(send, host->base + MESON_MX_SDIO_SEND);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ mod_timer(&host->cmd_timeout, jiffies + timeout);
+}
+
+static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
+{
+ struct mmc_request *mrq;
+
+ mrq = host->mrq;
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned short vdd = ios->vdd;
+ unsigned long clk_rate = ios->clock;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH, 0);
+ break;
+
+ case MMC_BUS_WIDTH_4:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH,
+ MESON_MX_SDIO_CONF_BUS_WIDTH);
+ break;
+
+ case MMC_BUS_WIDTH_8:
+ default:
+ dev_err(mmc_dev(mmc), "unsupported bus width: %d\n",
+ ios->bus_width);
+ host->error = -EINVAL;
+ return;
+ }
+
+ host->error = clk_set_rate(host->cfg_div_clk, ios->clock);
+ if (host->error) {
+ dev_warn(mmc_dev(mmc),
+ "failed to set MMC clock to %lu: %d\n",
+ clk_rate, host->error);
+ return;
+ }
+
+ mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ vdd = 0;
+ /* fall-through: */
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->error = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ vdd);
+ if (host->error)
+ return;
+ }
+ break;
+ }
+}
+
+static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ int dma_len;
+ struct scatterlist *sg;
+
+ if (!data)
+ return 0;
+
+ sg = data->sg;
+ if (sg->offset & 3 || sg->length & 3) {
+ dev_err(mmc_dev(mmc),
+ "unaligned scatterlist: offset %x length %d\n",
+ sg->offset, sg->length);
+ return -EINVAL;
+ }
+
+ dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (dma_len <= 0) {
+ dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (!host->error)
+ host->error = meson_mx_mmc_map_dma(mmc, mrq);
+
+ if (host->error) {
+ cmd->error = host->error;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->mrq = mrq;
+
+ if (mrq->data)
+ writel(sg_dma_address(mrq->data->sg),
+ host->base + MESON_MX_SDIO_ADDR);
+
+ if (mrq->sbc)
+ meson_mx_mmc_start_cmd(mmc, mrq->sbc);
+ else
+ meson_mx_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+
+ return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
+}
+
+static void meson_mx_mmc_read_response(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 mult;
+ int i, resp[4];
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX;
+ mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ if (cmd->flags & MMC_RSP_136) {
+ for (i = 0; i <= 3; i++)
+ resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU);
+ cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff);
+ cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff);
+ cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff);
+ cmd->resp[3] = (resp[3] << 8);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU);
+ }
+}
+
+static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host,
+ u32 irqs, u32 send)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ /*
+ * NOTE: even though it shouldn't happen we sometimes get command
+ * interrupts twice (at least this is what it looks like). Ideally
+ * we find out why this happens and warn here as soon as it occurs.
+ */
+ if (!cmd)
+ return IRQ_HANDLED;
+
+ cmd->error = 0;
+ meson_mx_mmc_read_response(host->mmc, cmd);
+
+ if (cmd->data) {
+ if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) ||
+ (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK)))
+ cmd->error = -EILSEQ;
+ } else {
+ if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) ||
+ (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7)))
+ cmd->error = -EILSEQ;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
+{
+ struct meson_mx_mmc_host *host = (void *) data;
+ u32 irqs, send;
+ unsigned long irqflags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ irqs = readl(host->base + MESON_MX_SDIO_IRQS);
+ send = readl(host->base + MESON_MX_SDIO_SEND);
+
+ if (irqs & MESON_MX_SDIO_IRQS_CMD_INT)
+ ret = meson_mx_mmc_process_cmd_irq(host, irqs, send);
+ else
+ ret = IRQ_HANDLED;
+
+ /* finally ACK all pending interrupts */
+ writel(irqs, host->base + MESON_MX_SDIO_IRQS);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ return ret;
+}
+
+static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
+{
+ struct meson_mx_mmc_host *host = (void *) irq_data;
+ struct mmc_command *cmd = host->cmd, *next_cmd;
+
+ if (WARN_ON(!cmd))
+ return IRQ_HANDLED;
+
+ del_timer_sync(&host->cmd_timeout);
+
+ if (cmd->data) {
+ dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
+ cmd->data->sg_len,
+ mmc_get_dma_dir(cmd->data));
+
+ cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
+ }
+
+ next_cmd = meson_mx_mmc_get_next_cmd(cmd);
+ if (next_cmd)
+ meson_mx_mmc_start_cmd(host->mmc, next_cmd);
+ else
+ meson_mx_mmc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_mx_mmc_timeout(struct timer_list *t)
+{
+ struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
+ unsigned long irqflags;
+ u32 irqc;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /* disable the CMD interrupt */
+ irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+ irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN;
+ writel(irqc, host->base + MESON_MX_SDIO_IRQC);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ /*
+ * skip the timeout handling if the interrupt handler already processed
+ * the command.
+ */
+ if (!host->cmd)
+ return;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n",
+ host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS),
+ readl(host->base + MESON_MX_SDIO_ARGU));
+
+ host->cmd->error = -ETIMEDOUT;
+
+ meson_mx_mmc_request_done(host);
+}
+
+static struct mmc_host_ops meson_mx_mmc_ops = {
+ .request = meson_mx_mmc_request,
+ .set_ios = meson_mx_mmc_set_ios,
+ .card_busy = meson_mx_mmc_card_busy,
+ .get_cd = mmc_gpio_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+};
+
+static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
+{
+ struct device_node *slot_node;
+
+ /*
+ * TODO: the MMC core framework currently does not support
+ * controllers with multiple slots properly. So we only register
+ * the first slot for now
+ */
+ slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+ if (!slot_node) {
+ dev_warn(parent, "no 'mmc-slot' sub-node found\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ return of_platform_device_create(slot_node, NULL, parent);
+}
+
+static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct device *slot_dev = mmc_dev(mmc);
+ int ret;
+
+ if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) {
+ dev_err(slot_dev, "missing 'reg' property\n");
+ return -EINVAL;
+ }
+
+ if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) {
+ dev_err(slot_dev, "invalid 'reg' property value %d\n",
+ host->slot_id);
+ return -EINVAL;
+ }
+
+ /* Get regulators and the supported OCR mask */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+ return ret;
+
+ mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_blk_count =
+ FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ 0xffffffff);
+ mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ 0xffffffff);
+ mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS);
+ mmc->max_blk_size /= BITS_PER_BYTE;
+
+ /* Get the min and max supported clock rates */
+ mmc->f_min = clk_round_rate(host->cfg_div_clk, 1);
+ mmc->f_max = clk_round_rate(host->cfg_div_clk,
+ clk_get_rate(host->parent_clk));
+
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->ops = &meson_mx_mmc_ops;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+{
+ struct clk_init_data init;
+ const char *clk_div_parent, *clk_fixed_factor_parent;
+
+ clk_fixed_factor_parent = __clk_get_name(host->parent_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#fixed_factor",
+ dev_name(host->controller_dev));
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ init.parent_names = &clk_fixed_factor_parent;
+ init.num_parents = 1;
+ host->fixed_factor.div = 2;
+ host->fixed_factor.mult = 1;
+ host->fixed_factor.hw.init = &init;
+
+ host->fixed_factor_clk = devm_clk_register(host->controller_dev,
+ &host->fixed_factor.hw);
+ if (WARN_ON(IS_ERR(host->fixed_factor_clk)))
+ return PTR_ERR(host->fixed_factor_clk);
+
+ clk_div_parent = __clk_get_name(host->fixed_factor_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#div", dev_name(host->controller_dev));
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &clk_div_parent;
+ init.num_parents = 1;
+ host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF;
+ host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
+ host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
+ host->cfg_div.hw.init = &init;
+ host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+
+ host->cfg_div_clk = devm_clk_register(host->controller_dev,
+ &host->cfg_div.hw);
+ if (WARN_ON(IS_ERR(host->cfg_div_clk)))
+ return PTR_ERR(host->cfg_div_clk);
+
+ return 0;
+}
+
+static int meson_mx_mmc_probe(struct platform_device *pdev)
+{
+ struct platform_device *slot_pdev;
+ struct mmc_host *mmc;
+ struct meson_mx_mmc_host *host;
+ struct resource *res;
+ int ret, irq;
+ u32 conf;
+
+ slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev);
+ if (!slot_pdev)
+ return -ENODEV;
+ else if (IS_ERR(slot_pdev))
+ return PTR_ERR(slot_pdev);
+
+ mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto error_unregister_slot_pdev;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->controller_dev = &pdev->dev;
+
+ spin_lock_init(&host->irq_lock);
+ timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0);
+
+ platform_set_drvdata(pdev, host);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(host->controller_dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto error_free_mmc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(host->controller_dev, irq,
+ meson_mx_mmc_irq,
+ meson_mx_mmc_irq_thread, IRQF_ONESHOT,
+ NULL, host);
+ if (ret)
+ goto error_free_mmc;
+
+ host->core_clk = devm_clk_get(host->controller_dev, "core");
+ if (IS_ERR(host->core_clk)) {
+ ret = PTR_ERR(host->core_clk);
+ goto error_free_mmc;
+ }
+
+ host->parent_clk = devm_clk_get(host->controller_dev, "clkin");
+ if (IS_ERR(host->parent_clk)) {
+ ret = PTR_ERR(host->parent_clk);
+ goto error_free_mmc;
+ }
+
+ ret = meson_mx_mmc_register_clks(host);
+ if (ret)
+ goto error_free_mmc;
+
+ ret = clk_prepare_enable(host->core_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable core clock\n");
+ goto error_free_mmc;
+ }
+
+ ret = clk_prepare_enable(host->cfg_div_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable MMC clock\n");
+ goto error_disable_core_clk;
+ }
+
+ conf = 0;
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2);
+ writel(conf, host->base + MESON_MX_SDIO_CONF);
+
+ meson_mx_mmc_soft_reset(host);
+
+ ret = meson_mx_mmc_add_host(host);
+ if (ret)
+ goto error_disable_clks;
+
+ return 0;
+
+error_disable_clks:
+ clk_disable_unprepare(host->cfg_div_clk);
+error_disable_core_clk:
+ clk_disable_unprepare(host->core_clk);
+error_free_mmc:
+ mmc_free_host(mmc);
+error_unregister_slot_pdev:
+ of_platform_device_destroy(&slot_pdev->dev, NULL);
+ return ret;
+}
+
+static int meson_mx_mmc_remove(struct platform_device *pdev)
+{
+ struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
+ struct device *slot_dev = mmc_dev(host->mmc);
+
+ del_timer_sync(&host->cmd_timeout);
+
+ mmc_remove_host(host->mmc);
+
+ of_platform_device_destroy(slot_dev, NULL);
+
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static const struct of_device_id meson_mx_mmc_of_match[] = {
+ { .compatible = "amlogic,meson8-sdio", },
+ { .compatible = "amlogic,meson8b-sdio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
+
+static struct platform_driver meson_mx_mmc_driver = {
+ .probe = meson_mx_mmc_probe,
+ .remove = meson_mx_mmc_remove,
+ .driver = {
+ .name = "meson-mx-sdio",
+ .of_match_table = of_match_ptr(meson_mx_mmc_of_match),
+ },
+};
+
+module_platform_driver(meson_mx_mmc_driver);
+
+MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver");
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f1f54a818489..e8a1bb1ae694 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1658,7 +1658,7 @@ static int mmci_probe(struct amba_device *dev,
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto clk_disable;
if (!mmc->ocr_avail)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 267f7ab08420..6457a7d8880f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -67,6 +67,7 @@
#define SDC_RESP2 0x48
#define SDC_RESP3 0x4c
#define SDC_BLK_NUM 0x50
+#define SDC_ADV_CFG0 0x64
#define EMMC_IOCON 0x7c
#define SDC_ACMD_RESP 0x80
#define MSDC_DMA_SA 0x90
@@ -74,10 +75,14 @@
#define MSDC_DMA_CFG 0x9c
#define MSDC_PATCH_BIT 0xb0
#define MSDC_PATCH_BIT1 0xb4
+#define MSDC_PATCH_BIT2 0xb8
#define MSDC_PAD_TUNE 0xec
+#define MSDC_PAD_TUNE0 0xf0
#define PAD_DS_TUNE 0x188
#define PAD_CMD_TUNE 0x18c
#define EMMC50_CFG0 0x208
+#define EMMC50_CFG3 0x220
+#define SDC_FIFO_CFG 0x228
/*--------------------------------------------------------------------------*/
/* Register Mask */
@@ -95,6 +100,9 @@
#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
+#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
+#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
+#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
/* MSDC_IOCON mask */
#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
@@ -183,6 +191,9 @@
#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+/* SDC_ADV_CFG0 mask */
+#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
+
/* MSDC_DMA_CTRL mask */
#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
@@ -212,11 +223,22 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
+
+#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
+#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
+#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
+#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
+#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
+
#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
+#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
+#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
+#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
@@ -228,6 +250,11 @@
#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
+#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
+
+#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
+#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -290,9 +317,23 @@ struct msdc_save_para {
u32 pad_tune;
u32 patch_bit0;
u32 patch_bit1;
+ u32 patch_bit2;
u32 pad_ds_tune;
u32 pad_cmd_tune;
u32 emmc50_cfg0;
+ u32 emmc50_cfg3;
+ u32 sdc_fifo_cfg;
+};
+
+struct mtk_mmc_compatible {
+ u8 clk_div_bits;
+ bool hs400_tune; /* only used for MT8173 */
+ u32 pad_tune_reg;
+ bool async_fifo;
+ bool data_tune;
+ bool busy_check;
+ bool stop_clk_fix;
+ bool enhance_rx;
};
struct msdc_tune_para {
@@ -309,6 +350,7 @@ struct msdc_delay_phase {
struct msdc_host {
struct device *dev;
+ const struct mtk_mmc_compatible *dev_comp;
struct mmc_host *mmc; /* mmc structure */
int cmd_rsp;
@@ -334,11 +376,13 @@ struct msdc_host {
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
+ struct clk *src_clk_cg; /* msdc source clock control gate */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
u32 sclk; /* SD/MS bus clock frequency */
unsigned char timing;
bool vqmmc_enabled;
+ u32 latch_ck;
u32 hs400_ds_delay;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
@@ -350,6 +394,59 @@ struct msdc_host {
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
};
+static const struct mtk_mmc_compatible mt8135_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt8173_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2712_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+};
+
+static const struct of_device_id msdc_of_ids[] = {
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
+ { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
+
static void sdr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
@@ -509,7 +606,12 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
timeout = (ns + clk_ns - 1) / clk_ns + clks;
/* in 1048576 sclk cycle unit */
timeout = (timeout + (0x1 << 20) - 1) >> 20;
- sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD, &mode);
+ else
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA, &mode);
/*DDR mode will double the clk cycles for data timeout */
timeout = mode >= 2 ? timeout * 2 : timeout;
timeout = timeout > 1 ? timeout - 1 : 0;
@@ -520,6 +622,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
static void msdc_gate_clock(struct msdc_host *host)
{
+ clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->h_clk);
}
@@ -528,6 +631,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
{
clk_prepare_enable(host->h_clk);
clk_prepare_enable(host->src_clk);
+ clk_prepare_enable(host->src_clk_cg);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
}
@@ -538,6 +642,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 flags;
u32 div;
u32 sclk;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -548,7 +653,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
- sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_clr_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
if (timing == MMC_TIMING_UHS_DDR50 ||
timing == MMC_TIMING_MMC_DDR52 ||
timing == MMC_TIMING_MMC_HS400) {
@@ -568,8 +677,12 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (timing == MMC_TIMING_MMC_HS400 &&
hz >= (host->src_clk_freq >> 1)) {
- sdr_set_bits(host->base + MSDC_CFG,
- MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
sclk = host->src_clk_freq >> 1;
div = 0; /* div is ignore when bit18 is set */
}
@@ -587,11 +700,31 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sclk = (host->src_clk_freq >> 2) / div;
}
}
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | div);
- sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ /*
+ * As src_clk/HCLK use the same bit to gate/ungate,
+ * So if want to only gate src_clk, need gate its parent(mux).
+ */
+ if (host->src_clk_cg)
+ clk_disable_unprepare(host->src_clk_cg);
+ else
+ clk_disable_unprepare(clk_get_parent(host->src_clk));
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
+ (mode << 8) | div);
+ else
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
+ (mode << 12) | div);
+ if (host->src_clk_cg)
+ clk_prepare_enable(host->src_clk_cg);
+ else
+ clk_prepare_enable(clk_get_parent(host->src_clk));
+
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
host->sclk = sclk;
host->mclk = hz;
host->timing = timing;
@@ -605,15 +738,16 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
*/
if (host->sclk <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->def_tune_para.pad_tune, host->base + tune_reg);
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->saved_tune_para.pad_tune, host->base + tune_reg);
writel(host->saved_tune_para.pad_cmd_tune,
host->base + PAD_CMD_TUNE);
}
- if (timing == MMC_TIMING_MMC_HS400)
+ if (timing == MMC_TIMING_MMC_HS400 &&
+ host->dev_comp->hs400_tune)
sdr_set_field(host->base + PAD_CMD_TUNE,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
@@ -1165,6 +1299,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -1180,14 +1315,53 @@ static void msdc_init_hw(struct msdc_host *host)
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
- writel(0, host->base + MSDC_PAD_TUNE);
+ writel(0, host->base + tune_reg);
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
- writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
+ writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+ if (host->dev_comp->stop_clk_fix) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT1,
+ MSDC_PATCH_BIT1_STOP_DLY, 3);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_WRVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_RDVALIDSEL);
+ }
+
+ if (host->dev_comp->busy_check)
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
+
+ if (host->dev_comp->async_fifo) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPWAIT, 3);
+ if (host->dev_comp->enhance_rx) {
+ sdr_set_bits(host->base + SDC_ADV_CFG0,
+ SDC_RX_ENHANCE_EN);
+ } else {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPSTSENSEL, 2);
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_CRCSTSENSEL, 2);
+ }
+ /* use async fifo, then no need tune internal delay */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGRESP);
+ sdr_set_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGCRCSTS);
+ }
+
+ if (host->dev_comp->data_tune) {
+ sdr_set_bits(host->base + tune_reg,
+ MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL);
+ } else {
+ /* choose clock tune */
+ sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL);
+ }
+
/* Configure to enable SDIO mode.
* it's must otherwise sdio cmd5 failed
*/
@@ -1200,7 +1374,9 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->def_tune_para.pad_tune = readl(host->base + tune_reg);
+ host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
dev_dbg(host->dev, "init hardware done!");
}
@@ -1343,18 +1519,19 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
u32 internal_delay = 0;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int cmd_err;
int i, j;
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1373,12 +1550,13 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
- if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4)
+ if (final_rise_delay.maxlen >= 12 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1403,20 +1581,20 @@ skip_fall:
final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
- if (host->hs200_cmd_int_delay)
+ if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
@@ -1424,7 +1602,7 @@ skip_fall:
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
internal_delay_phase.final_phase);
skip_internal:
dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
@@ -1486,12 +1664,15 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int i, ret;
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
+ host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1506,7 +1687,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1519,14 +1700,14 @@ skip_fall:
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
@@ -1540,8 +1721,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->hs400_mode)
+ if (host->hs400_mode &&
+ host->dev_comp->hs400_tune)
ret = hs400_tune_response(mmc, opcode);
else
ret = msdc_tune_response(mmc, opcode);
@@ -1556,7 +1739,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
return ret;
}
@@ -1567,6 +1750,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
host->hs400_mode = true;
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ /* hs400 mode must set it to 0 */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
+ /* to improve read performance, set outstanding to 2 */
+ sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
+
return 0;
}
@@ -1596,6 +1784,9 @@ static const struct mmc_host_ops mt_msdc_ops = {
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
+ of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
+ &host->latch_ck);
+
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
@@ -1617,12 +1808,17 @@ static int msdc_drv_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct msdc_host *host;
struct resource *res;
+ const struct of_device_id *of_id;
int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
+
+ of_id = of_match_node(msdc_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
/* Allocate MMC host for this device */
mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
if (!mmc)
@@ -1641,7 +1837,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto host_free;
host->src_clk = devm_clk_get(&pdev->dev, "source");
@@ -1656,6 +1852,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /*source clock control gate is optional clock*/
+ host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
+ if (IS_ERR(host->src_clk_cg))
+ host->src_clk_cg = NULL;
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
@@ -1686,11 +1887,15 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
+ host->dev_comp = of_id->data;
host->mmc = mmc;
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ if (host->dev_comp->clk_div_bits == 8)
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ else
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
@@ -1788,28 +1993,38 @@ static int msdc_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static void msdc_save_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
host->save_para.iocon = readl(host->base + MSDC_IOCON);
host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
- host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->save_para.pad_tune = readl(host->base + tune_reg);
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
+ host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
+ host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
+ host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
}
static void msdc_restore_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
- writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->save_para.pad_tune, host->base + tune_reg);
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
+ writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
+ writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
+ writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
}
static int msdc_runtime_suspend(struct device *dev)
@@ -1839,12 +2054,6 @@ static const struct dev_pm_ops msdc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
-static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msdc_of_ids);
-
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
.remove = msdc_drv_remove,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 58d74b8d6c79..210247b3d11a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -508,9 +508,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
return IRQ_NONE;
}
-static void mvsd_timeout_timer(unsigned long data)
+static void mvsd_timeout_timer(struct timer_list *t)
{
- struct mvsd_host *host = (struct mvsd_host *)data;
+ struct mvsd_host *host = from_timer(host, t, timer);
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
@@ -776,7 +776,7 @@ static int mvsd_probe(struct platform_device *pdev)
goto out;
}
- setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, mvsd_timeout_timer, 0);
platform_set_drvdata(pdev, mmc);
ret = mmc_add_host(mmc);
if (ret)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 1d5418e4efae..5ff8ef7223cc 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -963,10 +963,9 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
-static void mxcmci_watchdog(unsigned long data)
+static void mxcmci_watchdog(struct timer_list *t)
{
- struct mmc_host *mmc = (struct mmc_host *)data;
- struct mxcmci_host *host = mmc_priv(mmc);
+ struct mxcmci_host *host = from_timer(host, t, watchdog);
struct mmc_request *req = host->req;
unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
@@ -1075,7 +1074,7 @@ static int mxcmci_probe(struct platform_device *pdev)
dat3_card_detect = true;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto out_free;
if (!mmc->ocr_avail) {
@@ -1165,9 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_free_dma;
}
- init_timer(&host->watchdog);
- host->watchdog.function = &mxcmci_watchdog;
- host->watchdog.data = (unsigned long)mmc;
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0);
mmc_add_host(mmc);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bd49f34d7654..adf32682f27a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -625,9 +625,9 @@ static void mmc_omap_abort_command(struct work_struct *work)
}
static void
-mmc_omap_cmd_timer(unsigned long data)
+mmc_omap_cmd_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
unsigned long flags;
spin_lock_irqsave(&host->slot_lock, flags);
@@ -654,9 +654,9 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
}
static void
-mmc_omap_clk_timer(unsigned long data)
+mmc_omap_clk_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, clk_timer);
mmc_omap_fclk_enable(host, 0);
}
@@ -874,9 +874,9 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
tasklet_hi_schedule(&slot->cover_tasklet);
}
-static void mmc_omap_cover_timer(unsigned long arg)
+static void mmc_omap_cover_timer(struct timer_list *t)
{
- struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
+ struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
tasklet_schedule(&slot->cover_tasklet);
}
@@ -1264,8 +1264,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->max_seg_size = mmc->max_req_size;
if (slot->pdata->get_cover_state != NULL) {
- setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
- (unsigned long)slot);
+ timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
(unsigned long)slot);
}
@@ -1352,11 +1351,10 @@ static int mmc_omap_probe(struct platform_device *pdev)
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
- setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
- (unsigned long) host);
+ timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0);
spin_lock_init(&host->clk_lock);
- setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
+ timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0);
spin_lock_init(&host->dma_lock);
spin_lock_init(&host->slot_lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 3b5e6d11069b..071693ebfe18 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -147,10 +147,6 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-#define VDD_1V8 1800000 /* 180000 uV */
-#define VDD_3V0 3000000 /* 300000 uV */
-#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
-
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -308,8 +304,7 @@ err_set_ocr:
return ret;
}
-static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
- int vdd)
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on)
{
int ret;
@@ -317,17 +312,6 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
if (power_on) {
- if (vdd <= VDD_165_195)
- ret = regulator_set_voltage(host->pbias, VDD_1V8,
- VDD_1V8);
- else
- ret = regulator_set_voltage(host->pbias, VDD_3V0,
- VDD_3V0);
- if (ret < 0) {
- dev_err(host->dev, "pbias set voltage fail\n");
- return ret;
- }
-
if (host->pbias_enabled == 0) {
ret = regulator_enable(host->pbias);
if (ret) {
@@ -350,8 +334,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
}
-static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
- int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on)
{
struct mmc_host *mmc = host->mmc;
int ret = 0;
@@ -363,7 +346,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- ret = omap_hsmmc_set_pbias(host, false, 0);
+ ret = omap_hsmmc_set_pbias(host, false);
if (ret)
return ret;
@@ -385,7 +368,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (ret)
return ret;
- ret = omap_hsmmc_set_pbias(host, true, vdd);
+ ret = omap_hsmmc_set_pbias(host, true);
if (ret)
goto err_set_voltage;
} else {
@@ -462,7 +445,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/* Allow an aux regulator */
@@ -1220,11 +1203,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = omap_hsmmc_set_power(host, 0, 0);
+ ret = omap_hsmmc_set_power(host, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = omap_hsmmc_set_power(host, 1, vdd);
+ ret = omap_hsmmc_set_power(host, 1);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1621,10 +1604,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- omap_hsmmc_set_power(host, 0, 0);
+ omap_hsmmc_set_power(host, 0);
break;
case MMC_POWER_UP:
- omap_hsmmc_set_power(host, 1, ios->vdd);
+ omap_hsmmc_set_power(host, 1);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 8bae88a150fd..41cbe84c1d18 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -88,6 +88,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index df4465439e13..9ab10436e4b8 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -91,7 +91,6 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
};
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
- { .compatible = "renesas,sdhi-shmobile" },
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
@@ -107,6 +106,10 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-shmobile" },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 41b57713b620..0848dc0f882e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -618,29 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
u8 sample_point, bool rx)
{
struct rtsx_pcr *pcr = host->pcr;
- int err;
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
__func__, rx ? "RX" : "TX", sample_point);
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
if (rx)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPRX_CTL, 0x1F, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPRX_CTL,
+ PHASE_SELECT_MASK, sample_point);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPTX_CTL, 0x1F, sample_point);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, PHASE_NOT_RESET);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, SD_VPTX_CTL,
+ PHASE_SELECT_MASK, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
return 0;
}
@@ -708,10 +701,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
{
int err;
struct mmc_command cmd = {};
+ struct rtsx_pcr *pcr = host->pcr;
- err = sd_change_phase(host, sample_point, true);
- if (err < 0)
- return err;
+ sd_change_phase(host, sample_point, true);
+
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
cmd.opcode = opcode;
err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
@@ -719,9 +714,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
/* Wait till SD DATA IDLE */
sd_wait_data_idle(host);
sd_clear_error(host);
+ rtsx_pci_write_register(pcr, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
return err;
}
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 08ae0ff13513..b988997a1e80 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -73,6 +73,7 @@ struct sdhci_acpi_slot {
unsigned int caps2;
mmc_pm_flag_t pm_caps;
unsigned int flags;
+ size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
};
@@ -82,13 +83,118 @@ struct sdhci_acpi_host {
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
+ unsigned long private[0] ____cacheline_aligned;
};
+static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+{
+ return (void *)c->private;
+}
+
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
{
return c->slot && (c->slot->flags & flag);
}
+enum {
+ INTEL_DSM_FNS = 0,
+ INTEL_DSM_V18_SWITCH = 3,
+ INTEL_DSM_V33_SWITCH = 4,
+};
+
+struct intel_host {
+ u32 dsm_fns;
+};
+
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+ 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ union acpi_object *obj;
+ int err = 0;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ if (!obj)
+ return -EOPNOTSUPP;
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *result = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) {
+ size_t len = min_t(size_t, obj->buffer.length, 4);
+
+ *result = 0;
+ memcpy(result, obj->buffer.pointer, len);
+ } else {
+ dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n",
+ __func__, fn, obj->type, obj->buffer.length);
+ err = -EINVAL;
+ }
+
+ ACPI_FREE(obj);
+
+ return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ return -EOPNOTSUPP;
+
+ return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
+ struct mmc_host *mmc)
+{
+ int err;
+
+ err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ if (err) {
+ pr_debug("%s: DSM not supported, error %d\n",
+ mmc_hostname(mmc), err);
+ return;
+ }
+
+ pr_debug("%s: DSM function mask %#x\n",
+ mmc_hostname(mmc), intel_host->dsm_fns);
+}
+
+static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ unsigned int fn;
+ u32 result = 0;
+ int err;
+
+ err = sdhci_start_signal_voltage_switch(mmc, ios);
+ if (err)
+ return err;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ fn = INTEL_DSM_V33_SWITCH;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ fn = INTEL_DSM_V18_SWITCH;
+ break;
+ default:
+ return 0;
+ }
+
+ err = intel_dsm(intel_host, dev, fn, &result);
+ pr_debug("%s: %s DSM fn %u error %d result %u\n",
+ mmc_hostname(mmc), __func__, fn, err, result);
+
+ return 0;
+}
+
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
@@ -269,56 +375,26 @@ out:
return ret;
}
-static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, const char *hid,
+ const char *uid)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during emmc probe slot goes here */
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ struct sdhci_host *host = c->host;
if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- return 0;
-}
-
-static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
-
- if (!c || !c->host)
- return 0;
-
- /* Platform specific code during sdio probe slot goes here */
-
- return 0;
-}
-
-static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host || !c->slot)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during sd probe slot goes here */
-
if (hid && !strcmp(hid, "80865ACA"))
host->mmc_host_ops.get_cd = bxt_get_cd;
+ intel_dsm_init(intel_host, &pdev->dev, host->mmc);
+
+ host->mmc_host_ops.start_signal_voltage_switch =
+ intel_start_signal_voltage_switch;
+
return 0;
}
@@ -332,7 +408,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
- .probe_slot = sdhci_acpi_emmc_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
@@ -343,7 +420,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
- .probe_slot = sdhci_acpi_sdio_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
@@ -353,7 +431,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
- .probe_slot = sdhci_acpi_sd_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
@@ -429,11 +508,13 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct sdhci_acpi_slot *slot;
struct acpi_device *device, *child;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
resource_size_t len;
+ size_t priv_size;
const char *hid;
const char *uid;
int err;
@@ -443,7 +524,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return -ENODEV;
hid = acpi_device_hid(device);
- uid = device->pnp.unique_id;
+ uid = acpi_device_uid(device);
+
+ slot = sdhci_acpi_get_slot(hid, uid);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
@@ -467,13 +550,14 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
return -ENOMEM;
- host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ priv_size = slot ? slot->priv_size : 0;
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
return PTR_ERR(host);
c = sdhci_priv(host);
c->host = host;
- c->slot = sdhci_acpi_get_slot(hid, uid);
+ c->slot = slot;
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 56529c3d389a..0f589e26ee63 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -27,15 +28,14 @@
#define SDHCI_CDNS_HRS04_ACK BIT(26)
#define SDHCI_CDNS_HRS04_RD BIT(25)
#define SDHCI_CDNS_HRS04_WR BIT(24)
-#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16
-#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
-#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
+#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
+#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
+#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
-#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8
-#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f
-#define SDHCI_CDNS_HRS06_MODE_MASK 0x7
+#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
+#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
#define SDHCI_CDNS_HRS06_MODE_SD 0x0
#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
@@ -105,8 +105,8 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u32 tmp;
int ret;
- tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
- (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+ tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
+ FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
writel(tmp, reg);
tmp |= SDHCI_CDNS_HRS04_WR;
@@ -189,8 +189,8 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
/* The speed mode for eMMC is selected by HRS06 register */
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
- tmp |= mode;
+ tmp &= ~SDHCI_CDNS_HRS06_MODE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
}
@@ -199,7 +199,7 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
u32 tmp;
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- return tmp & SDHCI_CDNS_HRS06_MODE_MASK;
+ return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
@@ -254,12 +254,12 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
u32 tmp;
- if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+ if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
return -EINVAL;
tmp = readl(reg);
- tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
- tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+ tmp &= ~SDHCI_CDNS_HRS06_TUNE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
writel(tmp, reg);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fc73e56eb1e2..3fb7d2eec93f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -123,14 +123,17 @@
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
int pwr_irq; /* power irq */
- struct clk *clk; /* main SD/MMC bus clock */
- struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
unsigned long clk_rate;
struct mmc_host *mmc;
bool use_14lpp_dll_reset;
@@ -138,6 +141,10 @@ struct sdhci_msm_host {
bool calibration_done;
u8 saved_tuning_phase;
bool use_cdclp533;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ wait_queue_head_t pwr_irq_wait;
+ bool pwr_irq_flag;
};
static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
@@ -164,10 +171,11 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
struct mmc_ios curr_ios = host->mmc->ios;
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
int rc;
clock = msm_get_clock_rate_for_bus_mode(host, clock);
- rc = clk_set_rate(msm_host->clk, clock);
+ rc = clk_set_rate(core_clk, clock);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
mmc_hostname(host->mmc), clock,
@@ -176,7 +184,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
}
msm_host->clk_rate = clock;
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ mmc_hostname(host->mmc), clk_get_rate(core_clk),
curr_ios.timing);
}
@@ -995,21 +1003,142 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
sdhci_msm_hs400(host, &mmc->ios);
}
-static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
+{
+ init_waitqueue_head(&msm_host->pwr_irq_wait);
+}
+
+static inline void sdhci_msm_complete_pwr_irq_wait(
+ struct sdhci_msm_host *msm_host)
+{
+ wake_up(&msm_host->pwr_irq_wait);
+}
+
+/*
+ * sdhci_msm_check_power_status API should be called when registers writes
+ * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
+ * To what state the register writes will change the IO lines should be passed
+ * as the argument req_type. This API will check whether the IO line's state
+ * is already the expected state and will wait for power irq only if
+ * power irq is expected to be trigerred based on the current IO line state
+ * and expected IO line state.
+ */
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ bool done = false;
+
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+
+ /*
+ * The IRQ for request type IO High/LOW will be generated when -
+ * there is a state change in 1.8V enable bit (bit 3) of
+ * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
+ * which indicates 3.3V IO voltage. So, when MMC core layer tries
+ * to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if ((req_type & REQ_IO_HIGH) && !host->pwr) {
+ pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
+ mmc_hostname(host->mmc), req_type);
+ return;
+ }
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ /*
+ * This is needed here to handle cases where register writes will
+ * not change the current bus state or io level of the controller.
+ * In this case, no power irq will be triggerred and we should
+ * not wait.
+ */
+ if (!done) {
+ if (!wait_event_timeout(msm_host->pwr_irq_wait,
+ msm_host->pwr_irq_flag,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ dev_warn(&msm_host->pdev->dev,
+ "%s: pwr_irq for req: (%d) timed out\n",
+ mmc_hostname(host->mmc), req_type);
+ }
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+}
+
+static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 irq_status, irq_ack = 0;
+ int retry = 10;
+ int pwr_state = 0, io_level = 0;
+
irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
irq_status &= INT_MASK;
writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
- if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & readl_relaxed(msm_host->core_mem +
+ CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ WARN_ON(1);
+ break;
+ }
+ writel_relaxed(irq_status,
+ msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
- if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ io_level = REQ_IO_LOW;
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ io_level = REQ_IO_HIGH;
irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
/*
* The driver has to acknowledge the interrupt, switch voltages and
@@ -1017,13 +1146,27 @@ static void sdhci_msm_voltage_switch(struct sdhci_host *host)
* switches are handled by the sdhci core, so just report success.
*/
writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+
+ pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
+ irq_ack);
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
{
struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_msm_handle_pwr_irq(host, irq);
+ msm_host->pwr_irq_flag = 1;
+ sdhci_msm_complete_pwr_irq_wait(msm_host);
- sdhci_msm_voltage_switch(host);
return IRQ_HANDLED;
}
@@ -1032,8 +1175,9 @@ static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
- return clk_round_rate(msm_host->clk, ULONG_MAX);
+ return clk_round_rate(core_clk, ULONG_MAX);
}
static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
@@ -1092,6 +1236,69 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*
+ * Platform specific register write functions. This is so that, if any
+ * register write needs to be followed up by platform specific actions,
+ * they can be added here. These functions can go to sleep when writes
+ * to certain registers are done.
+ * These functions are relying on sdhci_set_ios not using spinlock.
+ */
+static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 req_type = 0;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL2:
+ req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
+ REQ_IO_HIGH;
+ break;
+ case SDHCI_SOFTWARE_RESET:
+ if (host->pwr && (val & SDHCI_RESET_ALL))
+ req_type = REQ_BUS_OFF;
+ break;
+ case SDHCI_POWER_CONTROL:
+ req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
+ break;
+ }
+
+ if (req_type) {
+ msm_host->pwr_irq_flag = 0;
+ /*
+ * Since this register write may trigger a power irq, ensure
+ * all previous register writes are complete by this point.
+ */
+ mb();
+ }
+ return req_type;
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+ writew_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+
+ writeb_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -1106,7 +1313,8 @@ static const struct sdhci_ops sdhci_msm_ops = {
.get_max_clock = sdhci_msm_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
- .voltage_switch = sdhci_msm_voltage_switch,
+ .write_w = sdhci_msm_writew,
+ .write_b = sdhci_msm_writeb,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1124,6 +1332,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
struct resource *core_memres;
+ struct clk *clk;
int ret;
u16 host_version, core_minor;
u32 core_version, config;
@@ -1160,24 +1369,42 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
/* Setup main peripheral bus clock */
- msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_host->pclk)) {
- ret = PTR_ERR(msm_host->pclk);
+ clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
goto bus_clk_disable;
}
-
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret)
- goto bus_clk_disable;
+ msm_host->bulk_clks[1].clk = clk;
/* Setup SDC MMC clock */
- msm_host->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(msm_host->clk)) {
- ret = PTR_ERR(msm_host->clk);
+ clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
- goto pclk_disable;
+ goto bus_clk_disable;
}
+ msm_host->bulk_clks[0].clk = clk;
+
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
+ clk = devm_clk_get(&pdev->dev, "cal");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[2].clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, "sleep");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[3].clk = clk;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+ if (ret)
+ goto bus_clk_disable;
/*
* xo clock is needed for FLL feature of cm_dll.
@@ -1189,15 +1416,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
}
- /* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(msm_host->clk, INT_MAX);
- if (ret)
- dev_warn(&pdev->dev, "core clock boost failed\n");
-
- ret = clk_prepare_enable(msm_host->clk);
- if (ret)
- goto pclk_disable;
-
core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
@@ -1251,6 +1469,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0);
}
+ /*
+ * Power on reset state may trigger power irq if previous status of
+ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
+ * interrupt in GIC, any pending power irq interrupt should be
+ * acknowledged. Otherwise power irq interrupt handler would be
+ * fired prematurely.
+ */
+ sdhci_msm_handle_pwr_irq(host, 0);
+
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+
/* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) {
@@ -1260,6 +1493,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ sdhci_msm_init_pwr_irq_wait(msm_host);
+ /* Enable pwr irq interrupts */
+ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
+
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), host);
@@ -1290,9 +1527,8 @@ pm_runtime_disable:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
clk_disable:
- clk_disable_unprepare(msm_host->clk);
-pclk_disable:
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
@@ -1315,8 +1551,8 @@ static int sdhci_msm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
sdhci_pltfm_free(pdev);
@@ -1330,8 +1566,8 @@ static int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
return 0;
}
@@ -1341,21 +1577,9 @@ static int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = clk_prepare_enable(msm_host->clk);
- if (ret) {
- dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
- return ret;
- }
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret) {
- dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
- clk_disable_unprepare(msm_host->clk);
- return ret;
- }
-
- return 0;
+ return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 4e47ed6bc716..682c573e20a7 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -114,7 +114,8 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
{
if (timing == MMC_TIMING_MMC_DDR52)
sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d96a057a7db8..1f424374bbbb 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
return clock / 256 / 16;
}
+static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
+{
+ u32 val;
+ ktime_t timeout;
+
+ val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+
+ if (enable)
+ val |= ESDHC_CLOCK_SDCLKEN;
+ else
+ val &= ~ESDHC_CLOCK_SDCLKEN;
+
+ sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ val = ESDHC_CLOCK_STABLE;
+ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ udelay(10);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- if (clock == 0)
+ if (clock == 0) {
+ esdhc_clock_enable(host, false);
return;
+ }
/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
if (esdhc->vendor_ver < VENDOR_V_23)
@@ -558,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
sdhci_writel(host, ctrl, ESDHC_PROCTL);
}
-static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
-{
- u32 val;
- ktime_t timeout;
-
- val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-
- if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
- else
- val &= ~ESDHC_CLOCK_SDCLKEN;
-
- sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
- if (ktime_after(ktime_get(), timeout)) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- break;
- }
- udelay(10);
- }
-}
-
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
new file mode 100644
index 000000000000..628bfe9a3d17
--- /dev/null
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -0,0 +1,607 @@
+/**
+ * SDHCI Controller driver for TI's OMAP SoCs
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDHCI_OMAP_CON 0x12c
+#define CON_DW8 BIT(5)
+#define CON_DMA_MASTER BIT(20)
+#define CON_INIT BIT(1)
+#define CON_OD BIT(0)
+
+#define SDHCI_OMAP_CMD 0x20c
+
+#define SDHCI_OMAP_HCTL 0x228
+#define HCTL_SDBP BIT(8)
+#define HCTL_SDVS_SHIFT 9
+#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
+
+#define SDHCI_OMAP_SYSCTL 0x22c
+#define SYSCTL_CEN BIT(2)
+#define SYSCTL_CLKD_SHIFT 6
+#define SYSCTL_CLKD_MASK 0x3ff
+
+#define SDHCI_OMAP_STAT 0x230
+
+#define SDHCI_OMAP_IE 0x234
+#define INT_CC_EN BIT(0)
+
+#define SDHCI_OMAP_AC12 0x23c
+#define AC12_V1V8_SIGEN BIT(19)
+
+#define SDHCI_OMAP_CAPA 0x240
+#define CAPA_VS33 BIT(24)
+#define CAPA_VS30 BIT(25)
+#define CAPA_VS18 BIT(26)
+
+#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
+
+#define SYSCTL_CLKD_MAX 0x3FF
+
+#define IOV_1V8 1800000 /* 180000 uV */
+#define IOV_3V0 3000000 /* 300000 uV */
+#define IOV_3V3 3300000 /* 330000 uV */
+
+struct sdhci_omap_data {
+ u32 offset;
+};
+
+struct sdhci_omap_host {
+ void __iomem *base;
+ struct device *dev;
+ struct regulator *pbias;
+ bool pbias_enabled;
+ struct sdhci_host *host;
+ u8 bus_mode;
+ u8 power_mode;
+};
+
+static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
+ unsigned int offset)
+{
+ return readl(host->base + offset);
+}
+
+static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
+ unsigned int offset, u32 data)
+{
+ writel(data, host->base + offset);
+}
+
+static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
+ bool power_on, unsigned int iov)
+{
+ int ret;
+ struct device *dev = omap_host->dev;
+
+ if (IS_ERR(omap_host->pbias))
+ return 0;
+
+ if (power_on) {
+ ret = regulator_set_voltage(omap_host->pbias, iov, iov);
+ if (ret) {
+ dev_err(dev, "pbias set voltage failed\n");
+ return ret;
+ }
+
+ if (omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_enable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg enable fail\n");
+ return ret;
+ }
+
+ omap_host->pbias_enabled = true;
+ } else {
+ if (!omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_disable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg disable fail\n");
+ return ret;
+ }
+ omap_host->pbias_enabled = false;
+ }
+
+ return 0;
+}
+
+static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
+ unsigned int iov)
+{
+ int ret;
+ struct sdhci_host *host = omap_host->host;
+ struct mmc_host *mmc = host->mmc;
+
+ ret = sdhci_omap_set_pbias(omap_host, false, 0);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
+ return ret;
+ }
+ }
+
+ ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
+ unsigned char signal_voltage)
+{
+ u32 reg;
+ ktime_t timeout;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ reg &= ~HCTL_SDVS_MASK;
+
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ reg |= HCTL_SDVS_33;
+ else
+ reg |= HCTL_SDVS_18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ reg |= HCTL_SDBP;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+}
+
+static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+ int ret;
+ unsigned int iov;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct device *dev;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ dev = omap_host->dev;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS33))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg &= ~AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_3V3;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS18))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg |= AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_1V8;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = sdhci_omap_enable_iov(omap_host, iov);
+ if (ret) {
+ dev_err(dev, "failed to switch IO voltage to %dmV\n", iov);
+ return ret;
+ }
+
+ dev_dbg(dev, "IO voltage switched to %dmV\n", iov);
+ return 0;
+}
+
+static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host,
+ unsigned int mode)
+{
+ u32 reg;
+
+ if (omap_host->bus_mode == mode)
+ return;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (mode == MMC_BUSMODE_OPENDRAIN)
+ reg |= CON_OD;
+ else
+ reg &= ~CON_OD;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ omap_host->bus_mode = mode;
+}
+
+static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_omap_set_bus_mode(omap_host, ios->bus_mode);
+ sdhci_set_ios(mmc, ios);
+}
+
+static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host,
+ unsigned int clock)
+{
+ u16 dsor;
+
+ dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock);
+ if (dsor > SYSCTL_CLKD_MAX)
+ dsor = SYSCTL_CLKD_MAX;
+
+ return dsor;
+}
+
+static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg |= SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg &= ~SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long clkdiv;
+
+ sdhci_omap_stop_clock(omap_host);
+
+ if (!clock)
+ return;
+
+ clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock);
+ clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT;
+ sdhci_enable_clk(host, clkdiv);
+
+ sdhci_omap_start_clock(omap_host);
+}
+
+static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+static int sdhci_omap_enable_dma(struct sdhci_host *host)
+{
+ u32 reg;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_DMA_MASTER;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ return 0;
+}
+
+static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX;
+}
+
+static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (width == MMC_BUS_WIDTH_8)
+ reg |= CON_DW8;
+ else
+ reg &= ~CON_DW8;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ sdhci_set_bus_width(host, width);
+}
+
+static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ u32 reg;
+ ktime_t timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->power_mode == power_mode)
+ return;
+
+ if (power_mode != MMC_POWER_ON)
+ return;
+
+ disable_irq(host->irq);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg &= ~CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN);
+
+ enable_irq(host->irq);
+
+ omap_host->power_mode = power_mode;
+}
+
+static struct sdhci_ops sdhci_omap_ops = {
+ .set_clock = sdhci_omap_set_clock,
+ .set_power = sdhci_omap_set_power,
+ .enable_dma = sdhci_omap_enable_dma,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = sdhci_omap_get_min_clock,
+ .set_bus_width = sdhci_omap_set_bus_width,
+ .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+ int ret = 0;
+ struct device *dev = omap_host->dev;
+ struct regulator *vqmmc;
+
+ vqmmc = regulator_get(dev, "vqmmc");
+ if (IS_ERR(vqmmc)) {
+ ret = PTR_ERR(vqmmc);
+ goto reg_put;
+ }
+
+ /* voltage capabilities might be set by boot loader, clear it */
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
+
+ if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
+ reg |= CAPA_VS33;
+ if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ reg |= CAPA_VS18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
+
+reg_put:
+ regulator_put(vqmmc);
+
+ return ret;
+}
+
+static const struct sdhci_pltfm_data sdhci_omap_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V |
+ SDHCI_QUIRK2_ACMD23_BROKEN |
+ SDHCI_QUIRK2_RSP_136_HAS_CRC,
+ .ops = &sdhci_omap_ops,
+};
+
+static const struct sdhci_omap_data dra7_data = {
+ .offset = 0x200,
+};
+
+static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,dra7-sdhci", .data = &dra7_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_sdhci_match);
+
+static int sdhci_omap_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 offset;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct mmc_host *mmc;
+ const struct of_device_id *match;
+ struct sdhci_omap_data *data;
+
+ match = of_match_device(omap_sdhci_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct sdhci_omap_data *)match->data;
+ if (!data) {
+ dev_err(dev, "no sdhci omap data\n");
+ return -EINVAL;
+ }
+ offset = data->offset;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
+ sizeof(*omap_host));
+ if (IS_ERR(host)) {
+ dev_err(dev, "Failed sdhci_pltfm_init\n");
+ return PTR_ERR(host);
+ }
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ omap_host->host = host;
+ omap_host->base = host->ioaddr;
+ omap_host->dev = dev;
+ host->ioaddr += offset;
+
+ mmc = host->mmc;
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_pltfm_free;
+
+ pltfm_host->clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err_pltfm_free;
+ }
+
+ ret = clk_set_rate(pltfm_host->clk, mmc->f_max);
+ if (ret) {
+ dev_err(dev, "failed to set clock to %d\n", mmc->f_max);
+ goto err_pltfm_free;
+ }
+
+ omap_host->pbias = devm_regulator_get_optional(dev, "pbias");
+ if (IS_ERR(omap_host->pbias)) {
+ ret = PTR_ERR(omap_host->pbias);
+ if (ret != -ENODEV)
+ goto err_pltfm_free;
+ dev_dbg(dev, "unable to get pbias regulator %d\n", ret);
+ }
+ omap_host->pbias_enabled = false;
+
+ /*
+ * omap_device_pm_domain has callbacks to enable the main
+ * functional clock, interface clock and also configure the
+ * SYSCONFIG register of omap devices. The callback will be invoked
+ * as part of pm_runtime_get_sync.
+ */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ pm_runtime_put_noidle(dev);
+ goto err_rpm_disable;
+ }
+
+ ret = sdhci_omap_set_capabilities(omap_host);
+ if (ret) {
+ dev_err(dev, "failed to set system capabilities\n");
+ goto err_put_sync;
+ }
+
+ host->mmc_host_ops.get_ro = mmc_gpio_get_ro;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_omap_start_signal_voltage_switch;
+ host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
+
+ sdhci_read_caps(host);
+ host->caps |= SDHCI_CAN_DO_ADMA2;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_put_sync;
+
+ return 0;
+
+err_put_sync:
+ pm_runtime_put_sync(dev);
+
+err_rpm_disable:
+ pm_runtime_disable(dev);
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_omap_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ sdhci_remove_host(host, true);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_omap_driver = {
+ .probe = sdhci_omap_probe,
+ .remove = sdhci_omap_remove,
+ .driver = {
+ .name = "sdhci-omap",
+ .of_match_table = omap_sdhci_match,
+ },
+};
+
+module_platform_driver(sdhci_omap_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci_omap");
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 67d787fa3306..3e4f04fd5175 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,7 +32,6 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
@@ -798,15 +797,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.probe_slot = intel_mrfld_mmc_probe_slot,
};
-/* O2Micro extra registers */
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -1290,6 +1280,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
+ SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 14273ca00641..555970a29c94 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -19,7 +19,40 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_LED_ENABLE BIT(6)
+#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
{
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
deleted file mode 100644
index 770f53857211..000000000000
--- a/drivers/mmc/host/sdhci-pci-o2micro.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2013 BayHub Technology Ltd.
- *
- * Authors: Peter Guo <peter.guo@bayhubtech.com>
- * Adam Lee <adam.lee@canonical.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __SDHCI_PCI_O2MICRO_H
-#define __SDHCI_PCI_O2MICRO_H
-
-#include "sdhci-pci.h"
-
-/*
- * O2Micro device IDs
- */
-
-#define PCI_DEVICE_ID_O2_SDS0 0x8420
-#define PCI_DEVICE_ID_O2_SDS1 0x8421
-#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
-#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
-#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
-
-/*
- * O2Micro device registers
- */
-
-#define O2_SD_MISC_REG5 0x64
-#define O2_SD_LD0_CTRL 0x68
-#define O2_SD_DEV_CTRL 0x88
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_TEST_REG 0xD4
-#define O2_SD_FUNC_REG0 0xDC
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-#define O2_SD_MISC_CTRL4 0xFC
-#define O2_SD_TUNING_CTRL 0x300
-#define O2_SD_PLL_SETTING 0x304
-#define O2_SD_CLK_SETTING 0x328
-#define O2_SD_CAP_REG2 0x330
-#define O2_SD_CAP_REG0 0x334
-#define O2_SD_UHS1_CAP_SETTING 0x33C
-#define O2_SD_DELAY_CTRL 0x350
-#define O2_SD_UHS2_L1_CTRL 0x35C
-#define O2_SD_FUNC_REG3 0x3E0
-#define O2_SD_FUNC_REG4 0x3E4
-#define O2_SD_LED_ENABLE BIT(6)
-#define O2_SD_FREG0_LEDOFF BIT(13)
-#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
-
-#define O2_SD_VENDOR_SETTING 0x110
-#define O2_SD_VENDOR_SETTING2 0x1C8
-
-extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
-
-extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
-
-extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
-
-#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 3e8ea3e566f6..0056f08a29cc 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -6,6 +6,12 @@
* PCI device IDs, sub IDs
*/
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
@@ -26,6 +32,7 @@
#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
+#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
@@ -164,4 +171,10 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+#ifdef CONFIG_PM_SLEEP
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+#endif
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index d328fcf284d1..cda83ccb2702 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -761,32 +761,24 @@ static const struct dev_pm_ops sdhci_s3c_pmops = {
NULL)
};
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
-static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
- .no_divider = true,
-};
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
-#else
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
-#endif
-
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
{
.name = "s3c-sdhci",
.driver_data = (kernel_ulong_t)NULL,
- }, {
- .name = "exynos4-sdhci",
- .driver_data = EXYNOS4_SDHCI_DRV_DATA,
},
{ }
};
MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+ .no_divider = true,
+};
+
static const struct of_device_id sdhci_s3c_dt_match[] = {
{ .compatible = "samsung,s3c6410-sdhci", },
{ .compatible = "samsung,exynos4210-sdhci",
- .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
+ .data = &exynos4_sdhci_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 0cd6fa80db66..b877c13184c2 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -422,7 +422,15 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ /* SDHCI controllers on Tegra186 support 40-bit addressing.
+ * IOVA addresses are 48-bit wide on Tegra186.
+ * With 64-bit dma mask used for SDHCI, accesses can
+ * be broken. Disable 64-bit dma, which would fall back
+ * to 32-bit dma mask. Ideally 40-bit dma mask would work,
+ * But it is not supported as of now.
+ */
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.ops = &tegra114_sdhci_ops,
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0d5fcca18c9e..2f14334e42df 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2407,12 +2407,12 @@ static void sdhci_tasklet_finish(unsigned long param)
;
}
-static void sdhci_timeout_timer(unsigned long data)
+static void sdhci_timeout_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)data;
+ host = from_timer(host, t, timer);
spin_lock_irqsave(&host->lock, flags);
@@ -2429,12 +2429,12 @@ static void sdhci_timeout_timer(unsigned long data)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_timeout_data_timer(unsigned long data)
+static void sdhci_timeout_data_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host *)data;
+ host = from_timer(host, t, data_timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3238,7 +3238,7 @@ int sdhci_setup_host(struct sdhci_host *host)
* available.
*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
DBG("Version: 0x%08x | Present: 0x%08x\n",
@@ -3749,9 +3749,8 @@ int __sdhci_add_host(struct sdhci_host *host)
tasklet_init(&host->finish_tasklet,
sdhci_tasklet_finish, (unsigned long)host);
- setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
- setup_timer(&host->data_timer, sdhci_timeout_data_timer,
- (unsigned long)host);
+ timer_setup(&host->timer, sdhci_timeout_timer, 0);
+ timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
init_waitqueue_head(&host->buf_ready_int);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 111b66f5439b..04ca0d33a521 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include "sdhci-pltfm.h"
@@ -47,6 +48,7 @@ struct f_sdhost_priv {
struct clk *clk;
u32 vendor_hs200;
struct device *dev;
+ bool enable_cmd_dat_delay;
};
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
@@ -84,10 +86,19 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
sdhci_reset(host, mask);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
}
static const struct sdhci_ops sdhci_f_sdh30_ops = {
@@ -126,6 +137,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
SDHCI_QUIRK2_TUNING_WORK_AROUND;
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 53c970fe0873..cc98355dbdb9 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1175,11 +1175,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return -EINVAL;
ret = mmc_regulator_get_supply(host->mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get vmmc supply\n");
+ if (ret)
return ret;
- }
host->reg_base = devm_ioremap_resource(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 93c4b40df90a..a3d8380ab480 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -783,9 +783,9 @@ static void tifm_sd_end_cmd(unsigned long data)
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_abort(unsigned long data)
+static void tifm_sd_abort(struct timer_list *t)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = from_timer(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
@@ -968,7 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
(unsigned long)host);
- setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
+ timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 9c4e6199b854..583bf3262df5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -167,11 +167,11 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
+ usleep_range(10000, 11000);
}
}
@@ -179,7 +179,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -187,7 +187,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
@@ -219,7 +219,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
tmio_mmc_clk_start(host);
}
@@ -230,11 +230,11 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
@@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
+ int err;
- mmc_regulator_get_supply(mmc);
+ err = mmc_regulator_get_supply(mmc);
+ if (err)
+ return err;
/* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
@@ -1299,23 +1302,24 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
pm_runtime_enable(&pdev->dev);
ret = mmc_add_host(mmc);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
+
mmc_gpiod_request_cd_irq(mmc);
}
return 0;
+
+remove_host:
+ tmio_mmc_host_remove(_host);
+ return ret;
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 64da6a88cfb9..cdfeb15b6f05 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1757,7 +1757,7 @@ static int usdhi6_probe(struct platform_device *pdev)
return -ENOMEM;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto e_free_mmc;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a838bf5480d8..32c4211506fc 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -932,12 +932,12 @@ out:
return result;
}
-static void via_sdc_timeout(unsigned long ulongdata)
+static void via_sdc_timeout(struct timer_list *t)
{
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
- sdhost = (struct via_crdr_mmc_host *)ulongdata;
+ sdhost = from_timer(sdhost, t, timer);
spin_lock_irqsave(&sdhost->lock, flags);
@@ -1036,9 +1036,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
u32 lenreg;
u32 status;
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = via_sdc_timeout;
+ timer_setup(&host->timer, via_sdc_timeout, 0);
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 8f569d257405..1fe68137a30f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -741,9 +741,10 @@ static void vub300_deadwork_thread(struct work_struct *work)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_inactivity_timer_expired(unsigned long data)
+static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
@@ -1180,9 +1181,10 @@ static void send_command(struct vub300_mmc_host *vub300)
* timer callback runs in atomic mode
* so it cannot call usb_kill_urb()
*/
-static void vub300_sg_timed_out(unsigned long data)
+static void vub300_sg_timed_out(struct timer_list *t)
{
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
@@ -1244,12 +1246,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1291,12 +1289,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1349,6 +1343,12 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
sizeof(vub300->vub_name));
return;
}
+
+ return;
+
+copy_error_message:
+ strncpy(vub300->vub_name, "SDIO pseudocode download failed",
+ sizeof(vub300->vub_name));
}
/*
@@ -2323,13 +2323,10 @@ static int vub300_probe(struct usb_interface *interface,
INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
kref_init(&vub300->kref);
- init_timer(&vub300->sg_transfer_timer);
- vub300->sg_transfer_timer.data = (unsigned long)vub300;
- vub300->sg_transfer_timer.function = vub300_sg_timed_out;
+ timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0);
kref_get(&vub300->kref);
- init_timer(&vub300->inactivity_timer);
- vub300->inactivity_timer.data = (unsigned long)vub300;
- vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
+ timer_setup(&vub300->inactivity_timer,
+ vub300_inactivity_timer_expired, 0);
vub300->inactivity_timer.expires = jiffies + HZ;
add_timer(&vub300->inactivity_timer);
if (vub300->card_present)
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 546aaf8d1507..f4233576153b 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -956,9 +956,9 @@ static const struct mmc_host_ops wbsd_ops = {
* Helper function to reset detection ignore
*/
-static void wbsd_reset_ignore(unsigned long data)
+static void wbsd_reset_ignore(struct timer_list *t)
{
- struct wbsd_host *host = (struct wbsd_host *)data;
+ struct wbsd_host *host = from_timer(host, t, ignore_timer);
BUG_ON(host == NULL);
@@ -1224,9 +1224,7 @@ static int wbsd_alloc_mmc(struct device *dev)
/*
* Set up timers
*/
- init_timer(&host->ignore_timer);
- host->ignore_timer.data = (unsigned long)host;
- host->ignore_timer.function = wbsd_reset_ignore;
+ timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0);
/*
* Maximum number of segments. Worst case is one sector per segment
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 7c887f111a7d..62fd6905c648 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -431,7 +431,7 @@ static int block2mtd_setup2(const char *val)
}
-static int block2mtd_setup(const char *val, struct kernel_param *kp)
+static int block2mtd_setup(const char *val, const struct kernel_param *kp)
{
#ifdef MODULE
return block2mtd_setup2(val);
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8b66e52ca3cc..7287696a21f9 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -266,7 +266,7 @@ static int phram_setup(const char *val)
return ret;
}
-static int phram_param_call(const char *val, struct kernel_param *kp)
+static int phram_param_call(const char *val, const struct kernel_param *kp)
{
#ifdef MODULE
return phram_setup(val);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 842550b5712a..136ce05d2328 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1334,7 +1334,7 @@ static int bytes_str_to_int(const char *str)
* This function returns zero in case of success and a negative error code in
* case of error.
*/
-static int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
{
int i, len;
struct mtd_dev_param *p;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c02cc817a490..1ed9529e7bd1 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
count];
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 99a3b0cd5bd6..c669554d70bb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- recv_probe = ACCESS_ONCE(bond->recv_probe);
+ recv_probe = READ_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
if (ret == RX_HANDLER_CONSUMED) {
@@ -3825,7 +3825,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
else
bond_xmit_slave_id(bond, skb, 0);
} else {
- int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
@@ -3987,7 +3987,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index fdfdb0edfe62..b24566bb74d2 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1301,7 +1301,7 @@ static void lan9303_probe_reset_gpio(struct lan9303 *chip,
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
GPIOD_OUT_LOW);
- if (!chip->reset_gpio) {
+ if (IS_ERR(chip->reset_gpio)) {
dev_dbg(chip->dev, "No reset GPIO defined\n");
return;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 5417e4da64ca..7451922c209d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -517,7 +517,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rc = ena_alloc_rx_page(rx_ring, rx_info,
- __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
+ GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"failed to alloc buffer for rx queue %d\n",
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 45d92304068e..cc1e4f820e64 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -295,7 +295,7 @@ again:
order = alloc_order;
/* Try to obtain pages, decreasing order if necessary */
- gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages_node(node, gfp, order);
if (pages)
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index cdf78e069a39..7d623e90dc19 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_AQUANTIA
Set this to y if you have an Ethernet network cards that uses the aQuantia
AQC107/AQC108 chipset.
- This option does not build any drivers; it casues the aQuantia
+ This option does not build any drivers; it causes the aQuantia
drivers that can be built to appear in the list of Ethernet drivers.
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0654e0c76bc2..519ca6534b85 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -304,8 +304,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX;
- buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
- __GFP_COMP, pages_order);
+ buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
if (!buff->page) {
err = -ENOMEM;
goto err_exit;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 54d1571384a0..be9fd7d184d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9332,7 +9332,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
- else
+ else if (bp->slowpath)
bnx2x_set_storm_rx_mode(bp);
/* Cleanup multicast configuration */
@@ -10271,8 +10271,15 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
smp_mb();
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
- bnx2x_nic_load(bp, LOAD_NORMAL);
-
+ /* When ret value shows failure of allocation failure,
+ * the nic is rebooted again. If open still fails, a error
+ * message to notify the user.
+ */
+ if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ BNX2X_ERR("Open the NIC fails again!\n");
+ }
rtnl_unlock();
return;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f05045a69dcc..6aa0eee88ea5 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -4038,7 +4038,8 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
*/
if (!oct->octeon_id &&
oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
- if (lio_vf_rep_modinit()) {
+ retval = lio_vf_rep_modinit();
+ if (retval) {
liquidio_stop_nic_module(oct);
goto octnet_init_failure;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 433f3619de8f..f2d1a076a038 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -198,7 +198,7 @@ static inline void
struct sk_buff *skb;
struct octeon_skb_page_info *skb_pg_info;
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC);
if (unlikely(!page))
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
index 705713b56636..3c3e6cf6aca6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
@@ -60,7 +60,7 @@ struct t3cdev {
int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
void *priv; /* driver private data */
- void *l2opt; /* optional layer 2 data */
+ void __rcu *l2opt; /* optional layer 2 data */
void *l3opt; /* optional layer 3 data */
void *l4opt; /* optional layer 4 data */
void *ulp; /* ulp stuff */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 1de1d811fde3..605689957496 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -145,6 +145,14 @@ struct cudbg_tid_info_region_rev1 {
u32 reserved[16];
};
+#define CUDBG_MAX_FL_QIDS 1024
+
+struct cudbg_ch_cntxt {
+ u32 cntxt_type;
+ u32 cntxt_id;
+ u32 data[SGE_CTXT_SIZE / 4];
+};
+
#define CUDBG_MAX_RPLC_SIZE 128
struct cudbg_mps_tcam {
@@ -185,6 +193,36 @@ struct cudbg_vpd_data {
u32 vpd_vers;
};
+#define CUDBG_MAX_TCAM_TID 0x800
+
+enum cudbg_le_entry_types {
+ LE_ET_UNKNOWN = 0,
+ LE_ET_TCAM_CON = 1,
+ LE_ET_TCAM_SERVER = 2,
+ LE_ET_TCAM_FILTER = 3,
+ LE_ET_TCAM_CLIP = 4,
+ LE_ET_TCAM_ROUTING = 5,
+ LE_ET_HASH_CON = 6,
+ LE_ET_INVALID_TID = 8,
+};
+
+struct cudbg_tcam {
+ u32 filter_start;
+ u32 server_start;
+ u32 clip_start;
+ u32 routing_start;
+ u32 tid_hash_base;
+ u32 max_tid;
+};
+
+struct cudbg_tid_data {
+ u32 tid;
+ u32 dbig_cmd;
+ u32 dbig_conf;
+ u32 dbig_rsp_stat;
+ u32 data[NUM_LE_DB_DBGI_RSP_DATA_INSTANCES];
+};
+
#define CUDBG_NUM_ULPTX 11
#define CUDBG_NUM_ULPTX_READ 512
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index e484c514e9ae..e10ff1ee62c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -63,8 +63,10 @@ enum cudbg_dbg_entity_type {
CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51,
CUDBG_TID_INFO = 54,
+ CUDBG_DUMP_CONTEXT = 56,
CUDBG_MPS_TCAM = 57,
CUDBG_VPD_DATA = 58,
+ CUDBG_LE_TCAM = 59,
CUDBG_CCTRL = 60,
CUDBG_MA_INDIRECT = 61,
CUDBG_ULPTX_LA = 62,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 32c9858da110..d699bf88d18f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1115,6 +1115,84 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
return rc;
}
+int cudbg_dump_context_size(struct adapter *padap)
+{
+ u32 value, size;
+ u8 flq;
+
+ value = t4_read_reg(padap, SGE_FLM_CFG_A);
+
+ /* Get number of data freelist queues */
+ flq = HDRSTARTFLQ_G(value);
+ size = CUDBG_MAX_FL_QIDS >> flq;
+
+ /* Add extra space for congestion manager contexts.
+ * The number of CONM contexts are same as number of freelist
+ * queues.
+ */
+ size += size;
+ return size * sizeof(struct cudbg_ch_cntxt);
+}
+
+static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int rc = -1;
+
+ /* Under heavy traffic, the SGE Queue contexts registers will be
+ * frequently accessed by firmware.
+ *
+ * To avoid conflicts with firmware, always ask firmware to fetch
+ * the SGE Queue contexts via mailbox. On failure, fallback to
+ * accessing hardware registers directly.
+ */
+ if (is_fw_attached(pdbg_init))
+ rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
+ if (rc)
+ t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
+}
+
+int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_ch_cntxt *buff;
+ u32 size, i = 0;
+ int rc;
+
+ rc = cudbg_dump_context_size(padap);
+ if (rc <= 0)
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ size = rc;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ buff = (struct cudbg_ch_cntxt *)temp_buff.data;
+ while (size > 0) {
+ buff->cntxt_type = CTXT_FLM;
+ buff->cntxt_id = i;
+ cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
+ buff++;
+ size -= sizeof(struct cudbg_ch_cntxt);
+
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = i;
+ cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
+ buff++;
+ size -= sizeof(struct cudbg_ch_cntxt);
+
+ i++;
+ }
+
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
{
*mask = x | y;
@@ -1367,6 +1445,181 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
return rc;
}
+static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
+ struct cudbg_tid_data *tid_data)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int i, cmd_retry = 8;
+ u32 val;
+
+ /* Fill REQ_DATA regs with 0's */
+ for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
+ t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
+
+ /* Write DBIG command */
+ val = DBGICMD_V(4) | DBGITID_V(tid);
+ t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
+ tid_data->dbig_cmd = val;
+
+ val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
+ t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
+ tid_data->dbig_conf = val;
+
+ /* Poll the DBGICMDBUSY bit */
+ val = 1;
+ while (val) {
+ val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
+ val = val & DBGICMDBUSY_F;
+ cmd_retry--;
+ if (!cmd_retry)
+ return CUDBG_SYSTEM_ERROR;
+ }
+
+ /* Check RESP status */
+ val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
+ tid_data->dbig_rsp_stat = val;
+ if (!(val & 1))
+ return CUDBG_SYSTEM_ERROR;
+
+ /* Read RESP data */
+ for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
+ tid_data->data[i] = t4_read_reg(padap,
+ LE_DB_DBGI_RSP_DATA_A +
+ (i << 2));
+ tid_data->tid = tid;
+ return 0;
+}
+
+static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
+{
+ int type = LE_ET_UNKNOWN;
+
+ if (tid < tcam_region.server_start)
+ type = LE_ET_TCAM_CON;
+ else if (tid < tcam_region.filter_start)
+ type = LE_ET_TCAM_SERVER;
+ else if (tid < tcam_region.clip_start)
+ type = LE_ET_TCAM_FILTER;
+ else if (tid < tcam_region.routing_start)
+ type = LE_ET_TCAM_CLIP;
+ else if (tid < tcam_region.tid_hash_base)
+ type = LE_ET_TCAM_ROUTING;
+ else if (tid < tcam_region.max_tid)
+ type = LE_ET_HASH_CON;
+ else
+ type = LE_ET_INVALID_TID;
+
+ return type;
+}
+
+static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
+ struct cudbg_tcam tcam_region)
+{
+ int ipv6 = 0;
+ int le_type;
+
+ le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
+ if (tid_data->tid & 1)
+ return 0;
+
+ if (le_type == LE_ET_HASH_CON) {
+ ipv6 = tid_data->data[16] & 0x8000;
+ } else if (le_type == LE_ET_TCAM_CON) {
+ ipv6 = tid_data->data[16] & 0x8000;
+ if (ipv6)
+ ipv6 = tid_data->data[9] == 0x00C00000;
+ } else {
+ ipv6 = 0;
+ }
+ return ipv6;
+}
+
+void cudbg_fill_le_tcam_info(struct adapter *padap,
+ struct cudbg_tcam *tcam_region)
+{
+ u32 value;
+
+ /* Get the LE regions */
+ value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
+ tcam_region->tid_hash_base = value;
+
+ /* Get routing table index */
+ value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
+ tcam_region->routing_start = value;
+
+ /*Get clip table index */
+ value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
+ tcam_region->clip_start = value;
+
+ /* Get filter table index */
+ value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
+ tcam_region->filter_start = value;
+
+ /* Get server table index */
+ value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
+ tcam_region->server_start = value;
+
+ /* Check whether hash is enabled and calculate the max tids */
+ value = t4_read_reg(padap, LE_DB_CONFIG_A);
+ if ((value >> HASHEN_S) & 1) {
+ value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+ tcam_region->max_tid = (value & 0xFFFFF) +
+ tcam_region->tid_hash_base;
+ } else {
+ value = HASHTIDSIZE_G(value);
+ value = 1 << value;
+ tcam_region->max_tid = value +
+ tcam_region->tid_hash_base;
+ }
+ } else { /* hash not enabled */
+ tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
+ }
+}
+
+int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_tcam tcam_region = { 0 };
+ struct cudbg_tid_data *tid_data;
+ u32 bytes = 0;
+ int rc, size;
+ u32 i;
+
+ cudbg_fill_le_tcam_info(padap, &tcam_region);
+
+ size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
+ size += sizeof(struct cudbg_tcam);
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
+ bytes = sizeof(struct cudbg_tcam);
+ tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
+ /* read all tid */
+ for (i = 0; i < tcam_region.max_tid; ) {
+ rc = cudbg_read_tid(pdbg_init, i, tid_data);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+
+ /* ipv6 takes two tids */
+ cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++;
+
+ tid_data++;
+ bytes += sizeof(struct cudbg_tid_data);
+ }
+
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index 230ba88a6a81..caeee8e33e86 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -123,12 +123,18 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -155,4 +161,9 @@ struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
struct cudbg_entity_hdr *entity_hdr);
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
+int cudbg_dump_context_size(struct adapter *padap);
+
+struct cudbg_tcam;
+void cudbg_fill_le_tcam_info(struct adapter *padap,
+ struct cudbg_tcam *tcam_region);
#endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 0de1a4b2223e..6f9fa6e3c42a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1670,6 +1670,10 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
unsigned int *kbps, unsigned int *ipg, bool sleep_ok);
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data);
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
+ enum ctxt_type ctype, u32 *data);
int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 7373617da635..29cc625e9833 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -60,8 +60,10 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_TID_INFO, cudbg_collect_tid },
+ { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
+ { CUDBG_LE_TCAM, cudbg_collect_le_tcam },
{ CUDBG_CCTRL, cudbg_collect_cctrl },
{ CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
{ CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
@@ -72,6 +74,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
{
+ struct cudbg_tcam tcam_region = { 0 };
u32 value, n = 0, len = 0;
switch (entity) {
@@ -216,6 +219,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TID_INFO:
len = sizeof(struct cudbg_tid_info_region_rev1);
break;
+ case CUDBG_DUMP_CONTEXT:
+ len = cudbg_dump_context_size(adap);
+ break;
case CUDBG_MPS_TCAM:
len = sizeof(struct cudbg_mps_tcam) *
adap->params.arch.mps_tcam_size;
@@ -223,6 +229,11 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_VPD_DATA:
len = sizeof(struct cudbg_vpd_data);
break;
+ case CUDBG_LE_TCAM:
+ cudbg_fill_le_tcam_info(adap, &tcam_region);
+ len = sizeof(struct cudbg_tcam) +
+ sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
+ break;
case CUDBG_CCTRL:
len = sizeof(u16) * NMTUS * NCCTRL_WIN;
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 486b01fe23bd..922f2f937789 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
*/
static inline int reclaimable(const struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb);
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
@@ -1537,7 +1537,13 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
*/
static inline int is_ofld_imm(const struct sk_buff *skb)
{
- return skb->len <= MAX_IMM_TX_PKT_LEN;
+ struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
+ unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
+
+ if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ return skb->len <= SGE_MAX_WR_LEN;
+ else
+ return skb->len <= MAX_IMM_TX_PKT_LEN;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index b4fad081ac78..f63210f15579 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9647,6 +9647,68 @@ void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
}
}
+/* t4_sge_ctxt_rd - read an SGE context through FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Issues a FW command through the given mailbox to read an SGE context.
+ */
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ struct fw_ldst_cmd c;
+ int ret;
+
+ if (ctype == CTXT_FLM)
+ ret = FW_LDST_ADDRSPC_SGE_FLMC;
+ else
+ ret = FW_LDST_ADDRSPC_SGE_CONMC;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(ret));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.idctxt.physid = cpu_to_be32(cid);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
+ data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
+ data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
+ data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
+ data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
+ data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+ }
+ return ret;
+}
+
+/**
+ * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
+ * @adap: the adapter
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Reads an SGE context directly, bypassing FW. This is only for
+ * debugging when FW is unavailable.
+ */
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ int i, ret;
+
+ t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
+ ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
+ if (!ret)
+ for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
+ *data++ = t4_read_reg(adap, i);
+ return ret;
+}
+
int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 7c6af14905c2..a964ed184356 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,12 @@ enum {
ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */
};
+/* SGE context types */
+enum ctxt_type {
+ CTXT_FLM = 2,
+ CTXT_CNM,
+};
+
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
@@ -79,6 +85,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_CTXT_SIZE = 24, /* size of SGE context */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_MAX_IQ_SIZE = 65520,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 623f453bd327..a7cfece72828 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -65,6 +65,9 @@
#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_INSTANCES 17
+#define NUM_LE_DB_DBGI_RSP_DATA_INSTANCES 17
+
#define SGE_PF_KDOORBELL_A 0x0
#define QID_S 15
@@ -150,6 +153,23 @@
#define T6_DBVFIFO_SIZE_M 0x1fffU
#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+#define SGE_CTXT_CMD_A 0x11fc
+
+#define BUSY_S 31
+#define BUSY_V(x) ((x) << BUSY_S)
+#define BUSY_F BUSY_V(1U)
+
+#define CTXTTYPE_S 24
+#define CTXTTYPE_M 0x3U
+#define CTXTTYPE_V(x) ((x) << CTXTTYPE_S)
+
+#define CTXTQID_S 0
+#define CTXTQID_M 0x1ffffU
+#define CTXTQID_V(x) ((x) << CTXTQID_S)
+
+#define SGE_CTXT_DATA0_A 0x1200
+#define SGE_CTXT_DATA5_A 0x1214
+
#define GLOBALENABLE_S 0
#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
#define GLOBALENABLE_F GLOBALENABLE_V(1U)
@@ -319,6 +339,16 @@
#define SGE_IMSG_CTXT_BADDR_A 0x1088
#define SGE_FLM_CACHE_BADDR_A 0x108c
+#define SGE_FLM_CFG_A 0x1090
+
+#define NOHDR_S 18
+#define NOHDR_V(x) ((x) << NOHDR_S)
+#define NOHDR_F NOHDR_V(1U)
+
+#define HDRSTARTFLQ_S 11
+#define HDRSTARTFLQ_M 0x7U
+#define HDRSTARTFLQ_G(x) (((x) >> HDRSTARTFLQ_S) & HDRSTARTFLQ_M)
+
#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
#define THRESHOLD_0_S 24
@@ -2273,6 +2303,35 @@
#define CHNENABLE_V(x) ((x) << CHNENABLE_S)
#define CHNENABLE_F CHNENABLE_V(1U)
+#define LE_DB_DBGI_CONFIG_A 0x19cf0
+
+#define DBGICMDBUSY_S 3
+#define DBGICMDBUSY_V(x) ((x) << DBGICMDBUSY_S)
+#define DBGICMDBUSY_F DBGICMDBUSY_V(1U)
+
+#define DBGICMDSTRT_S 2
+#define DBGICMDSTRT_V(x) ((x) << DBGICMDSTRT_S)
+#define DBGICMDSTRT_F DBGICMDSTRT_V(1U)
+
+#define DBGICMDMODE_S 0
+#define DBGICMDMODE_M 0x3U
+#define DBGICMDMODE_V(x) ((x) << DBGICMDMODE_S)
+
+#define LE_DB_DBGI_REQ_TCAM_CMD_A 0x19cf4
+
+#define DBGICMD_S 20
+#define DBGICMD_M 0xfU
+#define DBGICMD_V(x) ((x) << DBGICMD_S)
+
+#define DBGITID_S 0
+#define DBGITID_M 0xfffffU
+#define DBGITID_V(x) ((x) << DBGITID_S)
+
+#define LE_DB_DBGI_REQ_DATA_A 0x19d00
+#define LE_DB_DBGI_RSP_STATUS_A 0x19d94
+
+#define LE_DB_DBGI_RSP_DATA_A 0x19da0
+
#define PRTENABLE_S 29
#define PRTENABLE_V(x) ((x) << PRTENABLE_S)
#define PRTENABLE_F PRTENABLE_V(1U)
@@ -2882,11 +2941,20 @@
#define T6_LIPMISS_F T6_LIPMISS_V(1U)
#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_ROUTING_TABLE_INDEX_A 0x19c10
#define LE_DB_ACTIVE_TABLE_START_INDEX_A 0x19c10
+#define LE_DB_FILTER_TABLE_INDEX_A 0x19c14
#define LE_DB_SERVER_INDEX_A 0x19c18
#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_CLIP_TABLE_INDEX_A 0x19c1c
#define LE_DB_ACT_CNT_IPV4_A 0x19c20
#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_CONFIG_A 0x19c28
+
+#define HASHTIDSIZE_S 16
+#define HASHTIDSIZE_M 0x3fU
+#define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M)
+
#define LE_DB_HASH_TID_BASE_A 0x19c30
#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0e3d9f39a807..c6e859a27ee6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
if (wrapped)
newacc += 65536;
- ACCESS_ONCE(*acc) = newacc;
+ WRITE_ONCE(*acc, newacc);
}
static void populate_erx_stats(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 0cec06bec63e..340e28211135 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
unsigned int count;
smp_rmb();
- count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+ count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
if (count == 0)
goto out;
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t phys;
smp_rmb();
- count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+ count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
if (count == (TX_DESC_NUM - 1)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 781d5a8cbb6a..59ed806a52c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -891,14 +891,14 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae3_dev_roce_supported(hdev)) {
- hdev->num_roce_msix =
+ hdev->num_roce_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
} else {
hdev->num_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
@@ -1950,7 +1950,7 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
struct hnae3_handle *roce = &vport->roce;
struct hnae3_handle *nic = &vport->nic;
- roce->rinfo.num_vectors = vport->back->num_roce_msix;
+ roce->rinfo.num_vectors = vport->back->num_roce_msi;
if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
vport->back->num_msi_left == 0)
@@ -1968,67 +1968,47 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
return 0;
}
-static int hclge_init_msix(struct hclge_dev *hdev)
+static int hclge_init_msi(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- int ret, i;
-
- hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!hdev->msix_entries)
- return -ENOMEM;
-
- hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
- sizeof(u16), GFP_KERNEL);
- if (!hdev->vector_status)
- return -ENOMEM;
+ int vectors;
+ int i;
- for (i = 0; i < hdev->num_msi; i++) {
- hdev->msix_entries[i].entry = i;
- hdev->vector_status[i] = HCLGE_INVALID_VPORT;
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
}
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
- hdev->num_msi_left = hdev->num_msi;
- hdev->base_msi_vector = hdev->pdev->irq;
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+ hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
HCLGE_ROCE_VECTOR_OFFSET;
- ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
- hdev->num_msi, hdev->num_msi);
- if (ret < 0) {
- dev_info(&hdev->pdev->dev,
- "MSI-X vector alloc failed: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int hclge_init_msi(struct hclge_dev *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
- int vectors;
- int i;
-
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
- if (!hdev->vector_status)
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
return -ENOMEM;
+ }
for (i = 0; i < hdev->num_msi; i++)
hdev->vector_status[i] = HCLGE_INVALID_VPORT;
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
- if (vectors < 0) {
- dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
- return -EINVAL;
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
}
- hdev->num_msi = vectors;
- hdev->num_msi_left = vectors;
- hdev->base_msi_vector = pdev->irq;
- hdev->roce_base_vector = hdev->base_msi_vector +
- HCLGE_ROCE_VECTOR_OFFSET;
return 0;
}
@@ -2704,6 +2684,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
vport->vport_id *
HCLGE_VECTOR_VF_OFFSET;
hdev->vector_status[i] = vport->vport_id;
+ hdev->vector_irq[i] = vector->vector;
vector++;
alloc++;
@@ -2722,15 +2703,10 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
{
int i;
- for (i = 0; i < hdev->num_msi; i++) {
- if (hdev->msix_entries) {
- if (vector == hdev->msix_entries[i].vector)
- return i;
- } else {
- if (vector == (hdev->base_msi_vector + i))
- return i;
- }
- }
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
return -EINVAL;
}
@@ -4664,14 +4640,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
- pci_disable_msix(pdev);
- devm_kfree(&pdev->dev, hdev->msix_entries);
- hdev->msix_entries = NULL;
- } else {
- pci_disable_msi(pdev);
- }
-
+ pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
@@ -4689,7 +4658,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_hclge_dev;
}
- hdev->flag |= HCLGE_FLAG_USE_MSIX;
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
@@ -4726,12 +4694,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- if (hdev->flag & HCLGE_FLAG_USE_MSIX)
- ret = hclge_init_msix(hdev);
- else
- ret = hclge_init_msi(hdev);
+ ret = hclge_init_msi(hdev);
if (ret) {
- dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
+ dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 742e6ee9efaf..7027814ea5d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -425,9 +425,6 @@ struct hclge_dev {
u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */
- u16 num_roce_msix; /* Num of roce vectors for this PF */
- int roce_base_vector;
-
/* Base task tqp physical id of this PF */
u16 base_tqp_pid;
u16 alloc_rss_size; /* Allocated RSS task queue */
@@ -457,8 +454,10 @@ struct hclge_dev {
u16 num_msi_left;
u16 num_msi_used;
u32 base_msi_vector;
- struct msix_entry *msix_entries;
u16 *vector_status;
+ int *vector_irq;
+ u16 num_roce_msi; /* Num of roce vectors for this PF */
+ int roce_base_vector;
u16 pending_udp_bitmap;
@@ -482,12 +481,10 @@ struct hclge_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
-#define HCLGE_FLAG_USE_MSI 0x00000001
-#define HCLGE_FLAG_USE_MSIX 0x00000002
-#define HCLGE_FLAG_MAIN 0x00000004
-#define HCLGE_FLAG_DCB_CAPABLE 0x00000008
-#define HCLGE_FLAG_DCB_ENABLE 0x00000010
-#define HCLGE_FLAG_MQPRIO_ENABLE 0x00000020
+#define HCLGE_FLAG_MAIN BIT(0)
+#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
+#define HCLGE_FLAG_DCB_ENABLE BIT(2)
+#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index b918bc2f2e4f..04aaacbc3d45 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -574,6 +574,15 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
return 0;
}
+static void release_vpd_data(struct ibmvnic_adapter *adapter)
+{
+ if (!adapter->vpd)
+ return;
+
+ kfree(adapter->vpd->buff);
+ kfree(adapter->vpd);
+}
+
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
@@ -754,6 +763,8 @@ static void release_resources(struct ibmvnic_adapter *adapter)
{
int i;
+ release_vpd_data(adapter);
+
release_tx_pools(adapter);
release_rx_pools(adapter);
@@ -834,6 +845,57 @@ static int set_real_num_queues(struct net_device *netdev)
return rc;
}
+static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+ dma_addr_t dma_addr;
+ int len = 0;
+
+ if (adapter->vpd->buff)
+ len = adapter->vpd->len;
+
+ reinit_completion(&adapter->fw_done);
+ crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
+ crq.get_vpd_size.cmd = GET_VPD_SIZE;
+ ibmvnic_send_crq(adapter, &crq);
+ wait_for_completion(&adapter->fw_done);
+
+ if (!adapter->vpd->len)
+ return -ENODATA;
+
+ if (!adapter->vpd->buff)
+ adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
+ else if (adapter->vpd->len != len)
+ adapter->vpd->buff =
+ krealloc(adapter->vpd->buff,
+ adapter->vpd->len, GFP_KERNEL);
+
+ if (!adapter->vpd->buff) {
+ dev_err(dev, "Could allocate VPD buffer\n");
+ return -ENOMEM;
+ }
+
+ adapter->vpd->dma_addr =
+ dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Could not map VPD buffer\n");
+ kfree(adapter->vpd->buff);
+ return -ENOMEM;
+ }
+
+ reinit_completion(&adapter->fw_done);
+ crq.get_vpd.first = IBMVNIC_CRQ_CMD;
+ crq.get_vpd.cmd = GET_VPD;
+ crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
+ crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
+ ibmvnic_send_crq(adapter, &crq);
+ wait_for_completion(&adapter->fw_done);
+
+ return 0;
+}
+
static int init_resources(struct ibmvnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -851,6 +913,10 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
+ adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
+ if (!adapter->vpd)
+ return -ENOMEM;
+
adapter->map_id = 1;
adapter->napi = kcalloc(adapter->req_rx_queues,
sizeof(struct napi_struct), GFP_KERNEL);
@@ -924,7 +990,7 @@ static int __ibmvnic_open(struct net_device *netdev)
static int ibmvnic_open(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int rc;
+ int rc, vpd;
mutex_lock(&adapter->reset_lock);
@@ -951,6 +1017,12 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
netif_carrier_on(netdev);
+
+ /* Vital Product Data (VPD) */
+ vpd = ibmvnic_get_vpd(adapter);
+ if (vpd)
+ netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
+
mutex_unlock(&adapter->reset_lock);
return rc;
@@ -1879,11 +1951,15 @@ static int ibmvnic_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static void ibmvnic_get_drvinfo(struct net_device *dev,
+static void ibmvnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, adapter->fw_version,
+ sizeof(info->fw_version));
}
static u32 ibmvnic_get_msglevel(struct net_device *netdev)
@@ -3140,6 +3216,73 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
+static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+
+ if (crq->get_vpd_size_rsp.rc.code) {
+ dev_err(dev, "Error retrieving VPD size, rc=%x\n",
+ crq->get_vpd_size_rsp.rc.code);
+ complete(&adapter->fw_done);
+ return;
+ }
+
+ adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
+ complete(&adapter->fw_done);
+}
+
+static void handle_vpd_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ unsigned char *substr = NULL, *ptr = NULL;
+ u8 fw_level_len = 0;
+
+ memset(adapter->fw_version, 0, 32);
+
+ dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
+ DMA_FROM_DEVICE);
+
+ if (crq->get_vpd_rsp.rc.code) {
+ dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
+ crq->get_vpd_rsp.rc.code);
+ goto complete;
+ }
+
+ /* get the position of the firmware version info
+ * located after the ASCII 'RM' substring in the buffer
+ */
+ substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
+ if (!substr) {
+ dev_info(dev, "No FW level provided by VPD\n");
+ goto complete;
+ }
+
+ /* get length of firmware level ASCII substring */
+ if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
+ fw_level_len = *(substr + 2);
+ } else {
+ dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
+ goto complete;
+ }
+
+ /* copy firmware version string from vpd into adapter */
+ if ((substr + 3 + fw_level_len) <
+ (adapter->vpd->buff + adapter->vpd->len)) {
+ ptr = strncpy((char *)adapter->fw_version,
+ substr + 3, fw_level_len);
+
+ if (!ptr)
+ dev_err(dev, "Failed to isolate FW level string\n");
+ } else {
+ dev_info(dev, "FW substr extrapolated VPD buff\n");
+ }
+
+complete:
+ complete(&adapter->fw_done);
+}
+
static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
@@ -3871,6 +4014,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
netdev_dbg(netdev, "Got Collect firmware trace Response\n");
complete(&adapter->fw_done);
break;
+ case GET_VPD_SIZE_RSP:
+ handle_vpd_size_rsp(crq, adapter);
+ break;
+ case GET_VPD_RSP:
+ handle_vpd_rsp(crq, adapter);
+ break;
default:
netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
gen_crq->cmd);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 8ed829c5b026..4487f1e2c266 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -560,6 +560,12 @@ struct ibmvnic_multicast_ctrl {
struct ibmvnic_rc rc;
} __packed __aligned(8);
+struct ibmvnic_get_vpd_size {
+ u8 first;
+ u8 cmd;
+ u8 reserved[14];
+} __packed __aligned(8);
+
struct ibmvnic_get_vpd_size_rsp {
u8 first;
u8 cmd;
@@ -577,6 +583,13 @@ struct ibmvnic_get_vpd {
u8 reserved[4];
} __packed __aligned(8);
+struct ibmvnic_get_vpd_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved[10];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
struct ibmvnic_acl_change_indication {
u8 first;
u8 cmd;
@@ -702,10 +715,10 @@ union ibmvnic_crq {
struct ibmvnic_change_mac_addr change_mac_addr_rsp;
struct ibmvnic_multicast_ctrl multicast_ctrl;
struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
- struct ibmvnic_generic_crq get_vpd_size;
+ struct ibmvnic_get_vpd_size get_vpd_size;
struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
struct ibmvnic_get_vpd get_vpd;
- struct ibmvnic_generic_crq get_vpd_rsp;
+ struct ibmvnic_get_vpd_rsp get_vpd_rsp;
struct ibmvnic_acl_change_indication acl_change_indication;
struct ibmvnic_acl_query acl_query;
struct ibmvnic_generic_crq acl_query_rsp;
@@ -939,6 +952,12 @@ struct ibmvnic_error_buff {
__be32 error_id;
};
+struct ibmvnic_vpd {
+ unsigned char *buff;
+ dma_addr_t dma_addr;
+ u64 len;
+};
+
enum vnic_state {VNIC_PROBING = 1,
VNIC_PROBED,
VNIC_OPENING,
@@ -980,6 +999,10 @@ struct ibmvnic_adapter {
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ /* Vital Product Data (VPD) */
+ struct ibmvnic_vpd *vpd;
+ char fw_version[32];
+
/* Statistics */
struct ibmvnic_statistics stats;
dma_addr_t stats_token;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 2b8bbc84e34f..4c3b4243cf65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index dc9b8dcf4a1e..5f6cf7212d4f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1722,7 +1722,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
- tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 17e6f64299cf..4a964d6e4a9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -464,7 +464,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
u64 bytes, packets;
unsigned int start;
- tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
i40e_get_netdev_stats_struct_tx(tx_ring, stats);
@@ -814,7 +814,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
- p = ACCESS_ONCE(vsi->tx_rings[q]);
+ p = READ_ONCE(vsi->tx_rings[q]);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index d8456c381c99..97381238eb7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb(); /* Force any pending update before accessing. */
- adj = ACCESS_ONCE(pf->ptp_base_adj);
+ adj = READ_ONCE(pf->ptp_base_adj);
freq = adj;
freq *= ppb;
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
- ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ WRITE_ONCE(pf->ptp_base_adj, incval);
smp_mb(); /* Force the above update. */
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 8eee081d395f..568c96842f28 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg);
/* write operations, indexed using DWORDS */
#define wr32(reg, val) \
do { \
- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!E1000_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 43cf39527660..e94d3c256667 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -762,7 +762,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
u32 igb_rd32(struct e1000_hw *hw, u32 reg)
{
struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
- u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
if (E1000_REMOVED(hw_addr))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index e083732adf64..a01409e2e06c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr)
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr)
static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6eaca8366ac8..ca06c3cc2ca8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
*/
u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (ixgbe_removed(reg_addr))
@@ -8760,7 +8760,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
@@ -8776,12 +8776,12 @@ static void ixgbe_get_stats64(struct net_device *netdev,
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 86d6924a2b71..ae312c45696a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb();
- incval = ACCESS_ONCE(adapter->base_incval);
+ incval = READ_ONCE(adapter->base_incval);
freq = incval;
freq *= ppb;
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/* update the base incval used to calculate frequency adjustment */
- ACCESS_ONCE(adapter->base_incval) = incval;
+ WRITE_ONCE(adapter->base_incval, incval);
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 12d3601b1d57..feed11bc9ddf 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (IXGBE_REMOVED(reg_addr))
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 04d8d4ee4f04..c651fefcc3d2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -182,7 +182,7 @@ struct ixgbevf_info {
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (IXGBE_REMOVED(reg_addr))
return;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 64a04975bcf8..bc93b69cfd1e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
{
u32 val;
- /* Only 255 descriptors can be added at once ; Assume caller
- * process TX desriptors in quanta less than 256
- */
- val = pend_desc + txq->pending;
- mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ pend_desc += txq->pending;
+
+ /* Only 255 Tx descriptors can be added at once */
+ do {
+ val = min(pend_desc, 255);
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ pend_desc -= val;
+ } while (pend_desc > 0);
txq->pending = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index de0f9e5e42ec..e2b6b0cac1ac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -231,10 +231,10 @@ static void dump_err_buf(struct mlx4_dev *dev)
i, swab32(readl(priv->catas_err.map + i)));
}
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
{
- struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
- struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+ struct mlx4_dev *dev = &priv->dev;
u32 slave_read;
if (mlx4_is_slave(dev)) {
@@ -277,7 +277,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
phys_addr_t addr;
INIT_LIST_HEAD(&priv->catas_err.list);
- setup_timer(&priv->catas_err.timer, poll_catas, (unsigned long)dev);
+ timer_setup(&priv->catas_err.timer, poll_catas, 0);
priv->catas_err.map = NULL;
if (!mlx4_is_slave(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 92aec17f4b4d..85e28efcda33 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -193,7 +193,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
- GFP_KERNEL | __GFP_COLD)) {
+ GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
@@ -551,8 +551,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
- GFP_ATOMIC | __GFP_COLD |
- __GFP_MEMALLOC))
+ GFP_ATOMIC | __GFP_MEMALLOC))
break;
ring->prod++;
} while (likely(--missing));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 596445a4a241..6b6853773848 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
- last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
- ring_cons = ACCESS_ONCE(ring->cons);
+ last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
+ ring_cons = READ_ONCE(ring->cons);
ring_index = ring_cons & size_mask;
stamp_index = ring_index;
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
wmb();
/* we want to dirty this cache line once */
- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
+ WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
return done < budget;
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index b2cd1ebf4e36..2d46ec84ebdf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -74,8 +74,8 @@
#include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1420
-#define MLXSW_FWREV_SUBMINOR 122
+#define MLXSW_FWREV_MINOR 1530
+#define MLXSW_FWREV_SUBMINOR 152
static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
.major = MLXSW_FWREV_MAJOR,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e9187841d82a..632c7b229054 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2416,16 +2416,25 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
+static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif *rif)
+{
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+
+ mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
+ rif->rif_index, rif->addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
+ mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
- rif_list_node) {
- mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
+ rif_list_node)
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
- }
}
enum mlxsw_sp_nexthop_type {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 426c9a946eb4..fe7e0e1dd01d 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2623,7 +2623,7 @@ static void vxge_poll_vp_lockup(struct timer_list *t)
ring = &vdev->vpaths[i].ring;
/* Truncated to machine word size number of frames */
- rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
+ rx_frms = READ_ONCE(ring->stats.rx_frms);
/* Did this vpath received any packets */
if (ring->stats.prev_rx_frms == rx_frms) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 232044b1b7aa..1a603fdd9e80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1185,7 +1185,7 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
} else {
struct page *page;
- page = alloc_page(GFP_KERNEL | __GFP_COLD);
+ page = alloc_page(GFP_KERNEL);
frag = page ? page_address(page) : NULL;
}
if (!frag) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 29fea74bff2e..7b97a9969046 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1092,8 +1092,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
{
if (!rx_ring->pg_chunk.page) {
u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
- GFP_ATOMIC,
+ rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
netif_err(qdev, drv, qdev->ndev,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 46d60013564c..e566dbb3343d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2077,7 +2077,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
- if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ if (likely(READ_ONCE(efx->irq_soft_enabled))) {
/* Note test interrupts */
if (context->index == efx->irq_level)
efx->last_irq_cpu = raw_smp_processor_id();
@@ -2092,7 +2092,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
@@ -3299,7 +3299,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
bool rx_cont;
u16 flags = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
/* Basic packet information */
@@ -3436,7 +3436,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
unsigned int tx_ev_q_label;
int tx_descs = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
@@ -5324,7 +5324,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
int i;
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (ACCESS_ONCE(table->entry[i].spec) &
+ if (READ_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
rc = efx_ef10_filter_remove_internal(efx,
1U << EFX_FILTER_PRI_AUTO, i, true);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 6668e371405c..e3c492fcaff0 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2810,7 +2810,7 @@ static void efx_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if (method == RESET_TYPE_MC_BIST)
@@ -2875,7 +2875,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 6685a66ee1a3..3d6c91e96589 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2543,7 +2543,7 @@ static void ef4_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
@@ -2603,7 +2603,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index ccda017b6794..6520d7bc8d21 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Check to see if we have a serious error condition */
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
ef4_oword_t reg;
int link_speed, isolate;
- isolate = !!ACCESS_ONCE(efx->reset_pending);
+ isolate = !!READ_ONCE(efx->reset_pending);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 05916c710d8c..494884f6af4a 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
struct ef4_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
struct ef4_rx_queue *rx_queue;
struct ef4_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
{
struct ef4_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
ef4_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct ef4_channel *channel;
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index e2e3c008d073..07c62dc552cb 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -466,11 +466,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 382019b302db..02456ed13a7d 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -163,7 +163,7 @@ static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
do {
page = ef4_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 1b978d69e702..3409bbf5b19f 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 6608dfe455b1..5334dc83d926 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -983,7 +983,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1524,7 +1524,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
@@ -1616,7 +1616,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4d7fb8af880d..7b51b6371724 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 4f54245df0ec..caa89bf7603e 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -656,7 +656,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
/* Write host time for specified period or until MC is done */
while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
- ACCESS_ONCE(*mc_running)) {
+ READ_ONCE(*mc_running)) {
struct timespec64 update_time;
unsigned int host_time;
@@ -666,7 +666,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
do {
pps_get_ts(&now);
} while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
- ACCESS_ONCE(*mc_running));
+ READ_ONCE(*mc_running));
/* Synchronise NIC with single word of time only */
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
@@ -830,14 +830,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
ptp->start.dma_addr);
/* Clear flag that signals MC ready */
- ACCESS_ONCE(*start) = 0;
+ WRITE_ONCE(*start, 0);
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
EFX_WARN_ON_ONCE_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
- while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+ while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
udelay(20); /* Usually start MCDI execution quickly */
loops++;
}
@@ -847,7 +847,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
if (!time_before(jiffies, timeout))
++ptp->sync_timeouts;
- if (ACCESS_ONCE(*start))
+ if (READ_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
/* Collect results */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8cb60513dca2..cfe76aad79ee 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -163,7 +163,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index ea27b8a7f465..0ea7e16f2e6e 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index aeda3ab2d761..789dad8a07b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -98,7 +98,7 @@
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
GMAC_INT_PCS_ANE)
-#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
@@ -106,6 +106,7 @@ enum dwmac4_irq_status {
mmc_tx_irq = 0x00000400,
mmc_rx_irq = 0x00000200,
mmc_irq = 0x00000100,
+ lpi_irq = 0x00000020,
pmt_irq = 0x00000010,
};
@@ -132,6 +133,10 @@ enum power_event {
#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 2f7d7ec59962..f3ed8f7853eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -580,6 +580,25 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_receive_pmt_irq_n++;
}
+ /* MAC tx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpi_irq) {
+ /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
+ u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
if (intr_status & PCS_RGSMIIIS_IRQ)
dwmac4_phystatus(ioaddr, x);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ab502ee35fb2..06001bacbe0f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6243,7 +6243,7 @@ static void niu_get_rx_stats(struct niu *np,
pkts = dropped = errors = bytes = 0;
- rx_rings = ACCESS_ONCE(np->rx_rings);
+ rx_rings = READ_ONCE(np->rx_rings);
if (!rx_rings)
goto no_rings;
@@ -6274,7 +6274,7 @@ static void niu_get_tx_stats(struct niu *np,
pkts = errors = bytes = 0;
- tx_rings = ACCESS_ONCE(np->tx_rings);
+ tx_rings = READ_ONCE(np->tx_rings);
if (!tx_rings)
goto no_rings;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index e9672b1f9968..031cf9c3435a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -335,7 +335,7 @@ static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
dma_addr_t pages_dma;
/* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 15e2e3031d36..ed58c746e4af 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -906,7 +906,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
sw_data[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index c00102b8145a..b3e5816a4678 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -40,7 +40,7 @@
#include <linux/tcp.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/tick.h>
+#include <linux/sched/isolation.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -2270,8 +2270,8 @@ static int __init tile_net_init_module(void)
tile_net_dev_init(name, mac);
if (!network_cpus_init())
- cpumask_and(&network_cpus_map, housekeeping_cpumask(),
- cpu_online_mask);
+ cpumask_and(&network_cpus_map,
+ housekeeping_cpumask(HK_FLAG_MISC), cpu_online_mask);
return 0;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5ec39f113127..4e16d839c311 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1490,6 +1490,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
struct ip_tunnel_info *info = &geneve->info;
+ bool metadata = geneve->collect_md;
__u8 tmp_vni[3];
__u32 vni;
@@ -1498,32 +1499,24 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
goto nla_put_failure;
- if (rtnl_dereference(geneve->sock4)) {
+ if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
info->key.u.ipv4.dst))
goto nla_put_failure;
-
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
!!(info->key.tun_flags & TUNNEL_CSUM)))
goto nla_put_failure;
- }
-
#if IS_ENABLED(CONFIG_IPV6)
- if (rtnl_dereference(geneve->sock6)) {
+ } else if (!metadata) {
if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
&info->key.u.ipv6.dst))
goto nla_put_failure;
-
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
!(info->key.tun_flags & TUNNEL_CSUM)))
goto nla_put_failure;
-
- if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
- !geneve->use_udp6_rx_checksums))
- goto nla_put_failure;
- }
#endif
+ }
if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
@@ -1533,10 +1526,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
goto nla_put_failure;
- if (geneve->collect_md) {
- if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
+ if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
goto nla_put_failure;
- }
+
+ if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
+ !geneve->use_udp6_rx_checksums))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -1654,6 +1650,7 @@ static void __net_exit geneve_exit_net(struct net *net)
/* unregister the devices gathered above */
unregister_netdevice_many(&list);
rtnl_unlock();
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 0e27920c2b6b..be9aa368639f 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -616,7 +616,7 @@ static struct configfs_item_operations netconsole_target_item_ops = {
.release = netconsole_target_release,
};
-static struct config_item_type netconsole_target_type = {
+static const struct config_item_type netconsole_target_type = {
.ct_attrs = netconsole_target_attrs,
.ct_item_ops = &netconsole_target_item_ops,
.ct_owner = THIS_MODULE,
@@ -682,7 +682,7 @@ static struct configfs_group_operations netconsole_subsys_group_ops = {
.drop_item = drop_netconsole_target,
};
-static struct config_item_type netconsole_subsys_type = {
+static const struct config_item_type netconsole_subsys_type = {
.ct_group_ops = &netconsole_subsys_group_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index d4670ecdb1ba..eda0a6e86918 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -115,11 +115,13 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
{
int err;
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0xa42);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
RTL8211F_INER_LINK_STATUS);
else
err = phy_write(phydev, RTL821x_INER, 0);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0);
return err;
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 44891335f9af..d8e5747ff4e3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -962,6 +962,8 @@ static __net_exit void ppp_exit_net(struct net *net)
mutex_destroy(&pn->all_ppp_mutex);
idr_destroy(&pn->units_idr);
+ WARN_ON_ONCE(!list_empty(&pn->all_channels));
+ WARN_ON_ONCE(!list_empty(&pn->new_channels));
}
static struct pernet_operations ppp_net_ops = {
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 6c0c84c33e1f..b13890953ebb 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap,
* and validate that the result isn't NULL - in case we are
* racing against queue removal.
*/
- int numvtaps = ACCESS_ONCE(tap->numvtaps);
+ int numvtaps = READ_ONCE(tap->numvtaps);
__u32 rxq;
if (!numvtaps)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1a326b697221..6bb1e604aadd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -546,7 +546,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
u32 numqueues = 0;
rcu_read_lock();
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
if (txq) {
@@ -946,7 +946,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
if (txq >= numqueues)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 47cab1bde065..9e1b74590682 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -771,7 +771,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
- u16 curr_ntb_format;
+ __le16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -889,7 +889,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
- if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+ if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
dev_info(&intf->dev, "resetting NTB format to 16-bit");
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
USB_TYPE_CLASS | USB_DIR_OUT
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index d49c7103085e..ca71f6c03859 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -149,6 +149,7 @@ struct ipheth_device {
u8 bulk_in;
u8 bulk_out;
struct delayed_work carrier_work;
+ bool confirmed_pairing;
};
static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
@@ -259,7 +260,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += len;
-
+ dev->confirmed_pairing = true;
netif_rx(skb);
ipheth_rx_submit(dev, GFP_ATOMIC);
}
@@ -281,14 +282,21 @@ static void ipheth_sndbulk_callback(struct urb *urb)
__func__, status);
dev_kfree_skb_irq(dev->tx_skb);
- netif_wake_queue(dev->net);
+ if (status == 0)
+ netif_wake_queue(dev->net);
+ else
+ // on URB error, trigger immediate poll
+ schedule_delayed_work(&dev->carrier_work, 0);
}
static int ipheth_carrier_set(struct ipheth_device *dev)
{
struct usb_device *udev = dev->udev;
int retval;
-
+ if (!dev)
+ return 0;
+ if (!dev->confirmed_pairing)
+ return 0;
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
IPHETH_CMD_CARRIER_CHECK, /* request */
@@ -303,11 +311,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
return retval;
}
- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
+ if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
netif_carrier_on(dev->net);
- else
+ if (dev->tx_urb->status != -EINPROGRESS)
+ netif_wake_queue(dev->net);
+ } else {
netif_carrier_off(dev->net);
-
+ netif_stop_queue(dev->net);
+ }
return 0;
}
@@ -387,7 +398,6 @@ static int ipheth_open(struct net_device *net)
return retval;
schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
- netif_start_queue(net);
return retval;
}
@@ -491,7 +501,7 @@ static int ipheth_probe(struct usb_interface *intf,
dev->udev = udev;
dev->net = netdev;
dev->intf = intf;
-
+ dev->confirmed_pairing = false;
/* Set up endpoints */
hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
if (hintf == NULL) {
@@ -542,7 +552,9 @@ static int ipheth_probe(struct usb_interface *intf,
retval = -EIO;
goto err_register_netdev;
}
-
+ // carrier down and transmit queues stopped until packet from device
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
return 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index edf984406ba0..19a985ef9104 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1030,7 +1030,6 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
int err;
bool oom;
- gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(vi, rq, gfp);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3247d2feda07..7ac487031b4b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1623,26 +1623,19 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct nd_msg *msg;
- const struct ipv6hdr *iphdr;
const struct in6_addr *daddr;
- struct neighbour *n;
+ const struct ipv6hdr *iphdr;
struct inet6_dev *in6_dev;
+ struct neighbour *n;
+ struct nd_msg *msg;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
goto out;
- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
- goto out;
-
iphdr = ipv6_hdr(skb);
daddr = &iphdr->daddr;
-
msg = (struct nd_msg *)(iphdr + 1);
- if (msg->icmph.icmp6_code != 0 ||
- msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
- goto out;
if (ipv6_addr_loopback(daddr) ||
ipv6_addr_is_multicast(&msg->target))
@@ -2240,11 +2233,11 @@ tx_error:
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *rdst, *fdst = NULL;
const struct ip_tunnel_info *info;
- struct ethhdr *eth;
bool did_rsc = false;
- struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
+ struct ethhdr *eth;
__be32 vni = 0;
info = skb_tunnel_info(skb);
@@ -2269,12 +2262,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb, vni);
#if IS_ENABLED(CONFIG_IPV6)
- else if (ntohs(eth->h_proto) == ETH_P_IPV6) {
- struct ipv6hdr *hdr, _hdr;
- if ((hdr = skb_header_pointer(skb,
- skb_network_offset(skb),
- sizeof(_hdr), &_hdr)) &&
- hdr->nexthdr == IPPROTO_ICMPV6)
+ else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+ sizeof(struct nd_msg)) &&
+ ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
+ struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
+
+ if (m->icmph.icmp6_code == 0 &&
+ m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
return neigh_reduce(dev, skb, vni);
}
#endif
@@ -3702,6 +3697,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
+ unsigned int h;
LIST_HEAD(list);
rtnl_lock();
@@ -3721,6 +3717,9 @@ static void __net_exit vxlan_exit_net(struct net *net)
unregister_netdevice_many(&list);
rtnl_unlock();
+
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static struct pernet_operations vxlan_net_ops = {
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index bd8d4392d68b..80f75139495f 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
tx_status = &desc->ud.ds_tx5212.tx_stat;
- txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
+ txstat1 = READ_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
- txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
+ txstat0 = READ_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
- rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
+ rxstat1 = READ_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
- rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
+ rxstat0 = READ_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index b2256aa76eb6..e3495ea95553 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3634,7 +3634,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
bus->dpc_running = true;
wmb();
- while (ACCESS_ONCE(bus->dpc_triggered)) {
+ while (READ_ONCE(bus->dpc_triggered)) {
bus->dpc_triggered = false;
brcmf_sdio_dpc(bus);
bus->idlecount = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ce718e9c63ec..7078b7e458be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1119,7 +1119,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- bool calibrating = ACCESS_ONCE(mvm->calibrating);
+ bool calibrating = READ_ONCE(mvm->calibrating);
if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index d88c3685a6dd..593b7f97b29c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
- u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
@@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
- dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+ dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a06b6612b658..f25ce3a1ea50 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1247,7 +1247,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 8d992d5ba064..b7a51603465b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2165,12 +2165,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx];
- wr_ptr = ACCESS_ONCE(txq->write_ptr);
+ wr_ptr = READ_ONCE(txq->write_ptr);
- while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
+ while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
+ u8 write_ptr = READ_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
@@ -2642,7 +2642,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2903,7 +2903,7 @@ static struct iwl_trans_dump_data
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
+ num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
& 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ec2f4c31425a..07a49f58070a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, channel);
/* wmediumd mode check */
- _portid = ACCESS_ONCE(data->wmediumd);
+ _portid = READ_ONCE(data->wmediumd);
if (_portid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
- u32 _pid = ACCESS_ONCE(data->wmediumd);
+ u32 _pid = READ_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig
index dc9b777d78f6..5c6e21ccb19c 100644
--- a/drivers/nfc/st-nci/Kconfig
+++ b/drivers/nfc/st-nci/Kconfig
@@ -11,7 +11,7 @@ config NFC_ST_NCI_I2C
select NFC_ST_NCI
---help---
This module adds support for an I2C interface to the
- STMicroelectronics NFC NCI chips familly.
+ STMicroelectronics NFC NCI chips family.
Select this if your platform is using the i2c bus.
If you choose to build a module, it'll be called st-nci_i2c.
@@ -23,7 +23,7 @@ config NFC_ST_NCI_SPI
select NFC_ST_NCI
---help---
This module adds support for an SPI interface to the
- STMicroelectronics NFC NCI chips familly.
+ STMicroelectronics NFC NCI chips family.
Select this if your platform is using the spi bus.
If you choose to build a module, it'll be called st-nci_spi.
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index 8ce69c833362..b793727cd4f7 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -19,11 +19,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/hwtest.h>
-#include <asm/mac_via.h>
-#include <asm/mac_oss.h>
-
-extern void via_nubus_init(void);
-extern void oss_nubus_init(void);
/* Constants */
@@ -841,14 +836,6 @@ static int __init nubus_init(void)
if (!MACH_IS_MAC)
return 0;
- /* Initialize the NuBus interrupts */
- if (oss_present) {
- oss_nubus_init();
- } else {
- via_nubus_init();
- }
-
- /* And probe */
pr_info("NuBus: Scanning NuBus slots.\n");
nubus_devices = NULL;
nubus_boards = NULL;
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 5bdd499b5f4f..a65f2e1d9f53 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -96,7 +96,7 @@ config NVDIMM_DAX
help
Support raw device dax access to a persistent memory
namespace. For environments that want to hard partition
- peristent memory, this capability provides a mechanism to
+ persistent memory, this capability provides a mechanism to
sub-divide a namespace into character devices that can only be
accessed via DAX (mmap(2)).
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index d5612bd1cc81..e949e3302af4 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -23,6 +23,7 @@
#include <linux/ndctl.h>
#include <linux/fs.h>
#include <linux/nd.h>
+#include <linux/backing-dev.h>
#include "btt.h"
#include "nd.h"
@@ -1402,6 +1403,8 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->private_data = btt;
btt->btt_disk->queue = btt->btt_queue;
btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
+ btt->btt_disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_SYNCHRONOUS_IO;
blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 39dfd7affa31..7fbc5c5dc8e1 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -31,6 +31,7 @@
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/nd.h>
+#include <linux/backing-dev.h>
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
@@ -394,6 +395,7 @@ static int pmem_attach_disk(struct device *dev,
disk->fops = &pmem_fops;
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index b7c78a5b1f7a..04008e0bbe81 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1,2 +1,6 @@
+menu "NVME Support"
+
source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig"
+
+endmenu
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 46d6cb1e03bd..b979cf3bce65 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -13,6 +13,15 @@ config BLK_DEV_NVME
To compile this driver as a module, choose M here: the
module will be called nvme.
+config NVME_MULTIPATH
+ bool "NVMe multipath support"
+ depends on NVME_CORE
+ ---help---
+ This option enables support for multipath access to NVMe
+ subsystems. If this option is enabled only a single
+ /dev/nvmeXnY device will show up for each NVMe namespaces,
+ even if it is accessible through multiple controllers.
+
config NVME_FABRICS
tristate
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 7b96e4588a12..a25fd43650ad 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
+nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 37f9039bb9ca..25da74d310d1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -34,13 +34,13 @@
#define NVME_MINORS (1U << MINORBITS)
-unsigned char admin_timeout = 60;
-module_param(admin_timeout, byte, 0644);
+unsigned int admin_timeout = 60;
+module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
EXPORT_SYMBOL_GPL(admin_timeout);
-unsigned char nvme_io_timeout = 30;
-module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
+unsigned int nvme_io_timeout = 30;
+module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
EXPORT_SYMBOL_GPL(nvme_io_timeout);
@@ -52,9 +52,6 @@ static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
-static int nvme_char_major;
-module_param(nvme_char_major, int, 0);
-
static unsigned long default_ps_max_latency_us = 100000;
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
@@ -71,10 +68,17 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
-static LIST_HEAD(nvme_ctrl_list);
-static DEFINE_SPINLOCK(dev_list_lock);
+static DEFINE_IDA(nvme_subsystems_ida);
+static LIST_HEAD(nvme_subsystems);
+static DEFINE_MUTEX(nvme_subsystems_lock);
+static DEFINE_IDA(nvme_instance_ida);
+static dev_t nvme_chr_devt;
static struct class *nvme_class;
+static struct class *nvme_subsys_class;
+
+static void nvme_ns_remove(struct nvme_ns *ns);
+static int nvme_revalidate_disk(struct gendisk *disk);
static __le32 nvme_get_log_dw10(u8 lid, size_t size)
{
@@ -101,6 +105,51 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
+static void nvme_delete_ctrl_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, delete_work);
+
+ flush_work(&ctrl->reset_work);
+ nvme_stop_ctrl(ctrl);
+ nvme_remove_namespaces(ctrl);
+ ctrl->ops->delete_ctrl(ctrl);
+ nvme_uninit_ctrl(ctrl);
+ nvme_put_ctrl(ctrl);
+}
+
+int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+ if (!queue_work(nvme_wq, &ctrl->delete_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
+
+int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+ int ret = 0;
+
+ /*
+ * Keep a reference until the work is flushed since ->delete_ctrl
+ * can free the controller.
+ */
+ nvme_get_ctrl(ctrl);
+ ret = nvme_delete_ctrl(ctrl);
+ if (!ret)
+ flush_work(&ctrl->delete_work);
+ nvme_put_ctrl(ctrl);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync);
+
+static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+{
+ return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
+}
+
static blk_status_t nvme_error_status(struct request *req)
{
switch (nvme_req(req)->status & 0x7ff) {
@@ -142,9 +191,16 @@ static inline bool nvme_req_needs_retry(struct request *req)
void nvme_complete_rq(struct request *req)
{
if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
- nvme_req(req)->retries++;
- blk_mq_requeue_request(req, true);
- return;
+ if (nvme_req_needs_failover(req)) {
+ nvme_failover_req(req);
+ return;
+ }
+
+ if (!blk_queue_dying(req->q)) {
+ nvme_req(req)->retries++;
+ blk_mq_requeue_request(req, true);
+ return;
+ }
}
blk_mq_end_request(req, nvme_error_status(req));
@@ -153,18 +209,13 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_cancel_request(struct request *req, void *data, bool reserved)
{
- int status;
-
if (!blk_mq_request_started(req))
return;
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
- status = NVME_SC_ABORT_REQ;
- if (blk_queue_dying(req->q))
- status |= NVME_SC_DNR;
- nvme_req(req)->status = status;
+ nvme_req(req)->status = NVME_SC_ABORT_REQ;
blk_mq_complete_request(req);
}
@@ -205,6 +256,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_RECONNECTING:
switch (old_state) {
case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
changed = true;
/* FALLTHRU */
default:
@@ -239,11 +291,29 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
ctrl->state = new_state;
spin_unlock_irqrestore(&ctrl->lock, flags);
-
+ if (changed && ctrl->state == NVME_CTRL_LIVE)
+ nvme_kick_requeue_lists(ctrl);
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+static void nvme_free_ns_head(struct kref *ref)
+{
+ struct nvme_ns_head *head =
+ container_of(ref, struct nvme_ns_head, ref);
+
+ nvme_mpath_remove_disk(head);
+ ida_simple_remove(&head->subsys->ns_ida, head->instance);
+ list_del_init(&head->entry);
+ cleanup_srcu_struct(&head->srcu);
+ kfree(head);
+}
+
+static void nvme_put_ns_head(struct nvme_ns_head *head)
+{
+ kref_put(&head->ref, nvme_free_ns_head);
+}
+
static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
@@ -251,14 +321,8 @@ static void nvme_free_ns(struct kref *kref)
if (ns->ndev)
nvme_nvm_unregister(ns);
- if (ns->disk) {
- spin_lock(&dev_list_lock);
- ns->disk->private_data = NULL;
- spin_unlock(&dev_list_lock);
- }
-
put_disk(ns->disk);
- ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
+ nvme_put_ns_head(ns->head);
nvme_put_ctrl(ns->ctrl);
kfree(ns);
}
@@ -268,31 +332,8 @@ static void nvme_put_ns(struct nvme_ns *ns)
kref_put(&ns->kref, nvme_free_ns);
}
-static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
-{
- struct nvme_ns *ns;
-
- spin_lock(&dev_list_lock);
- ns = disk->private_data;
- if (ns) {
- if (!kref_get_unless_zero(&ns->kref))
- goto fail;
- if (!try_module_get(ns->ctrl->ops->module))
- goto fail_put_ns;
- }
- spin_unlock(&dev_list_lock);
-
- return ns;
-
-fail_put_ns:
- kref_put(&ns->kref, nvme_free_ns);
-fail:
- spin_unlock(&dev_list_lock);
- return NULL;
-}
-
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags, int qid)
+ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
struct request *req;
@@ -417,7 +458,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
{
memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
- cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
@@ -448,7 +489,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
- cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
@@ -467,16 +508,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
u16 control = 0;
u32 dsmgmt = 0;
- /*
- * If formated with metadata, require the block layer provide a buffer
- * unless this namespace is formated such that the metadata can be
- * stripped/generated by the controller with PRACT=1.
- */
- if (ns && ns->ms &&
- (!ns->pi_type || ns->ms != sizeof(struct t10_pi_tuple)) &&
- !blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
- return BLK_STS_NOTSUPP;
-
if (req->cmd_flags & REQ_FUA)
control |= NVME_RW_FUA;
if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
@@ -487,7 +518,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -495,6 +526,18 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
if (ns->ms) {
+ /*
+ * If formated with metadata, the block layer always provides a
+ * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
+ * we enable the PRACT bit for protection information or set the
+ * namespace capacity to zero to prevent any I/O.
+ */
+ if (!blk_integrity_rq(req)) {
+ if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
+ return BLK_STS_NOTSUPP;
+ control |= NVME_RW_PRINFO_PRACT;
+ }
+
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
@@ -507,8 +550,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
nvme_block_nr(ns, blk_rq_pos(req)));
break;
}
- if (!blk_integrity_rq(req))
- control |= NVME_RW_PRINFO_PRACT;
}
cmnd->rw.control = cpu_to_le16(control);
@@ -560,7 +601,8 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head, int flags)
+ unsigned timeout, int qid, int at_head,
+ blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -778,7 +820,7 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
}
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- u8 *eui64, u8 *nguid, uuid_t *uuid)
+ struct nvme_ns_ids *ids)
{
struct nvme_command c = { };
int status;
@@ -814,7 +856,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
goto free_data;
}
len = NVME_NIDT_EUI64_LEN;
- memcpy(eui64, data + pos + sizeof(*cur), len);
+ memcpy(ids->eui64, data + pos + sizeof(*cur), len);
break;
case NVME_NIDT_NGUID:
if (cur->nidl != NVME_NIDT_NGUID_LEN) {
@@ -824,7 +866,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
goto free_data;
}
len = NVME_NIDT_NGUID_LEN;
- memcpy(nguid, data + pos + sizeof(*cur), len);
+ memcpy(ids->nguid, data + pos + sizeof(*cur), len);
break;
case NVME_NIDT_UUID:
if (cur->nidl != NVME_NIDT_UUID_LEN) {
@@ -834,7 +876,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
goto free_data;
}
len = NVME_NIDT_UUID_LEN;
- uuid_copy(uuid, data + pos + sizeof(*cur));
+ uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
break;
default:
/* Skip unnkown types */
@@ -968,7 +1010,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
c.rw.flags = io.flags;
- c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.nsid = cpu_to_le32(ns->head->ns_id);
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
@@ -982,12 +1024,87 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
metadata, meta_len, io.slba, NULL, 0);
}
+static u32 nvme_known_admin_effects(u8 opcode)
+{
+ switch (opcode) {
+ case nvme_admin_format_nvm:
+ return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_CSE_MASK;
+ case nvme_admin_sanitize_nvm:
+ return NVME_CMD_EFFECTS_CSE_MASK;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode)
+{
+ u32 effects = 0;
+
+ if (ns) {
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
+ if (effects & ~NVME_CMD_EFFECTS_CSUPP)
+ dev_warn(ctrl->device,
+ "IO command:%02x has unhandled effects:%08x\n",
+ opcode, effects);
+ return 0;
+ }
+
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
+ else
+ effects = nvme_known_admin_effects(opcode);
+
+ /*
+ * For simplicity, IO to all namespaces is quiesced even if the command
+ * effects say only one namespace is affected.
+ */
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+ return effects;
+}
+
+static void nvme_update_formats(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (ns->disk && nvme_revalidate_disk(ns->disk))
+ nvme_ns_remove(ns);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+{
+ /*
+ * Revalidate LBA changes prior to unfreezing. This is necessary to
+ * prevent memory corruption if a logical block size was changed by
+ * this command.
+ */
+ if (effects & NVME_CMD_EFFECTS_LBCC)
+ nvme_update_formats(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+ nvme_unfreeze(ctrl);
+ if (effects & NVME_CMD_EFFECTS_CCC)
+ nvme_init_identify(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
+ nvme_queue_scan(ctrl);
+}
+
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
unsigned timeout = 0;
+ u32 effects;
int status;
if (!capable(CAP_SYS_ADMIN))
@@ -1013,10 +1130,13 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
+ effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
0, &cmd.result, timeout);
+ nvme_passthru_end(ctrl, effects);
+
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
return -EFAULT;
@@ -1025,15 +1145,37 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return status;
}
-static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+/*
+ * Issue ioctl requests on the first available path. Note that unlike normal
+ * block layer requests we will not retry failed request on another controller.
+ */
+static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
+ struct nvme_ns_head **head, int *srcu_idx)
{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
+#ifdef CONFIG_NVME_MULTIPATH
+ if (disk->fops == &nvme_ns_head_ops) {
+ *head = disk->private_data;
+ *srcu_idx = srcu_read_lock(&(*head)->srcu);
+ return nvme_find_path(*head);
+ }
+#endif
+ *head = NULL;
+ *srcu_idx = -1;
+ return disk->private_data;
+}
+static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
+{
+ if (head)
+ srcu_read_unlock(&head->srcu, idx);
+}
+
+static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg)
+{
switch (cmd) {
case NVME_IOCTL_ID:
force_successful_syscall_return();
- return ns->ns_id;
+ return ns->head->ns_id;
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
case NVME_IOCTL_IO_CMD:
@@ -1052,27 +1194,39 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
}
}
-#ifdef CONFIG_COMPAT
-static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
- return nvme_ioctl(bdev, mode, cmd, arg);
+ struct nvme_ns_head *head = NULL;
+ struct nvme_ns *ns;
+ int srcu_idx, ret;
+
+ ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ ret = -EWOULDBLOCK;
+ else
+ ret = nvme_ns_ioctl(ns, cmd, arg);
+ nvme_put_ns_from_disk(head, srcu_idx);
+ return ret;
}
-#else
-#define nvme_compat_ioctl NULL
-#endif
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
- return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+#ifdef CONFIG_NVME_MULTIPATH
+ /* should never be called due to GENHD_FL_HIDDEN */
+ if (WARN_ON_ONCE(ns->head->disk))
+ return -ENXIO;
+#endif
+ if (!kref_get_unless_zero(&ns->kref))
+ return -ENXIO;
+ return 0;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- struct nvme_ns *ns = disk->private_data;
-
- module_put(ns->ctrl->ops->module);
- nvme_put_ns(ns);
+ nvme_put_ns(disk->private_data);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1085,35 +1239,12 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
- u16 bs)
-{
- struct nvme_ns *ns = disk->private_data;
- u16 old_ms = ns->ms;
- u8 pi_type = 0;
-
- ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
- ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
-
- /* PI implementation requires metadata equal t10 pi tuple size */
- if (ns->ms == sizeof(struct t10_pi_tuple))
- pi_type = id->dps & NVME_NS_DPS_PI_MASK;
-
- if (blk_get_integrity(disk) &&
- (ns->pi_type != pi_type || ns->ms != old_ms ||
- bs != queue_logical_block_size(disk->queue) ||
- (ns->ms && ns->ext)))
- blk_integrity_unregister(disk);
-
- ns->pi_type = pi_type;
-}
-
-static void nvme_init_integrity(struct nvme_ns *ns)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
{
struct blk_integrity integrity;
memset(&integrity, 0, sizeof(integrity));
- switch (ns->pi_type) {
+ switch (pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
integrity.tag_size = sizeof(u16) + sizeof(u32);
@@ -1129,16 +1260,12 @@ static void nvme_init_integrity(struct nvme_ns *ns)
integrity.profile = NULL;
break;
}
- integrity.tuple_size = ns->ms;
- blk_integrity_register(ns->disk, &integrity);
- blk_queue_max_integrity_segments(ns->queue, 1);
+ integrity.tuple_size = ms;
+ blk_integrity_register(disk, &integrity);
+ blk_queue_max_integrity_segments(disk->queue, 1);
}
#else
-static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
- u16 bs)
-{
-}
-static void nvme_init_integrity(struct nvme_ns *ns)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -1149,53 +1276,89 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
-static void nvme_config_discard(struct nvme_ns *ns)
+static void nvme_config_discard(struct nvme_ctrl *ctrl,
+ unsigned stream_alignment, struct request_queue *queue)
{
- struct nvme_ctrl *ctrl = ns->ctrl;
- u32 logical_block_size = queue_logical_block_size(ns->queue);
+ u32 size = queue_logical_block_size(queue);
+
+ if (stream_alignment)
+ size *= stream_alignment;
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- if (ctrl->nr_streams && ns->sws && ns->sgs) {
- unsigned int sz = logical_block_size * ns->sws * ns->sgs;
+ queue->limits.discard_alignment = size;
+ queue->limits.discard_granularity = size;
- ns->queue->limits.discard_alignment = sz;
- ns->queue->limits.discard_granularity = sz;
- } else {
- ns->queue->limits.discard_alignment = logical_block_size;
- ns->queue->limits.discard_granularity = logical_block_size;
- }
- blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
- blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+ blk_queue_max_discard_sectors(queue, UINT_MAX);
+ blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
- blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX);
+ blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}
static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
- struct nvme_id_ns *id, u8 *eui64, u8 *nguid, uuid_t *uuid)
+ struct nvme_id_ns *id, struct nvme_ns_ids *ids)
{
+ memset(ids, 0, sizeof(*ids));
+
if (ctrl->vs >= NVME_VS(1, 1, 0))
- memcpy(eui64, id->eui64, sizeof(id->eui64));
+ memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0))
- memcpy(nguid, id->nguid, sizeof(id->nguid));
+ memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
if (ctrl->vs >= NVME_VS(1, 3, 0)) {
/* Don't treat error as fatal we potentially
* already have a NGUID or EUI-64
*/
- if (nvme_identify_ns_descs(ctrl, nsid, eui64, nguid, uuid))
+ if (nvme_identify_ns_descs(ctrl, nsid, ids))
dev_warn(ctrl->device,
"%s: Identify Descriptors failed\n", __func__);
}
}
+static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
+{
+ return !uuid_is_null(&ids->uuid) ||
+ memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
+ memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
+}
+
+static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
+{
+ return uuid_equal(&a->uuid, &b->uuid) &&
+ memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
+ memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
+}
+
+static void nvme_update_disk_info(struct gendisk *disk,
+ struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ unsigned stream_alignment = 0;
+
+ if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
+ stream_alignment = ns->sws * ns->sgs;
+
+ blk_mq_freeze_queue(disk->queue);
+ blk_integrity_unregister(disk);
+
+ blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+ if (ns->ms && !ns->ext &&
+ (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, ns->ms, ns->pi_type);
+ if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
+ capacity = 0;
+ set_capacity(disk, capacity);
+
+ if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
+ nvme_config_discard(ns->ctrl, stream_alignment, disk->queue);
+ blk_mq_unfreeze_queue(disk->queue);
+}
+
static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
struct nvme_ns *ns = disk->private_data;
- struct nvme_ctrl *ctrl = ns->ctrl;
- u16 bs;
/*
* If identify namespace failed, use default 512 byte block size so
@@ -1204,26 +1367,22 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
if (ns->lba_shift == 0)
ns->lba_shift = 9;
- bs = 1 << ns->lba_shift;
ns->noiob = le16_to_cpu(id->noiob);
+ ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+ ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+ /* the PI implementation requires metadata equal t10 pi tuple size */
+ if (ns->ms == sizeof(struct t10_pi_tuple))
+ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ else
+ ns->pi_type = 0;
- blk_mq_freeze_queue(disk->queue);
-
- if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
- nvme_prep_integrity(disk, id, bs);
- blk_queue_logical_block_size(ns->queue, bs);
if (ns->noiob)
nvme_set_chunk_size(ns);
- if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
- nvme_init_integrity(ns);
- if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
- set_capacity(disk, 0);
- else
- set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
-
- if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
- nvme_config_discard(ns);
- blk_mq_unfreeze_queue(disk->queue);
+ nvme_update_disk_info(disk, ns, id);
+#ifdef CONFIG_NVME_MULTIPATH
+ if (ns->head->disk)
+ nvme_update_disk_info(ns->head->disk, ns, id);
+#endif
}
static int nvme_revalidate_disk(struct gendisk *disk)
@@ -1231,8 +1390,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
struct nvme_ns *ns = disk->private_data;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ns *id;
- u8 eui64[8] = { 0 }, nguid[16] = { 0 };
- uuid_t uuid = uuid_null;
+ struct nvme_ns_ids ids;
int ret = 0;
if (test_bit(NVME_NS_DEAD, &ns->flags)) {
@@ -1240,7 +1398,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
return -ENODEV;
}
- id = nvme_identify_ns(ctrl, ns->ns_id);
+ id = nvme_identify_ns(ctrl, ns->head->ns_id);
if (!id)
return -ENODEV;
@@ -1250,12 +1408,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
}
__nvme_revalidate_disk(disk, id);
- nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
- if (!uuid_equal(&ns->uuid, &uuid) ||
- memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
- memcmp(&ns->eui, &eui64, sizeof(ns->eui))) {
+ nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
+ if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
dev_err(ctrl->device,
- "identifiers changed for nsid %d\n", ns->ns_id);
+ "identifiers changed for nsid %d\n", ns->head->ns_id);
ret = -ENODEV;
}
@@ -1287,8 +1443,10 @@ static char nvme_pr_type(enum pr_type type)
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
u64 key, u64 sa_key, u8 op)
{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
+ struct nvme_ns_head *head = NULL;
+ struct nvme_ns *ns;
struct nvme_command c;
+ int srcu_idx, ret;
u8 data[16] = { 0, };
put_unaligned_le64(key, &data[0]);
@@ -1296,10 +1454,16 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
memset(&c, 0, sizeof(c));
c.common.opcode = op;
- c.common.nsid = cpu_to_le32(ns->ns_id);
+ c.common.nsid = cpu_to_le32(head->ns_id);
c.common.cdw10[0] = cpu_to_le32(cdw10);
- return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+ ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ ret = -EWOULDBLOCK;
+ else
+ ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+ nvme_put_ns_from_disk(head, srcu_idx);
+ return ret;
}
static int nvme_pr_register(struct block_device *bdev, u64 old,
@@ -1381,7 +1545,7 @@ EXPORT_SYMBOL_GPL(nvme_sec_submit);
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_compat_ioctl,
+ .compat_ioctl = nvme_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -1389,6 +1553,32 @@ static const struct block_device_operations nvme_fops = {
.pr_ops = &nvme_pr_ops,
};
+#ifdef CONFIG_NVME_MULTIPATH
+static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+
+ if (!kref_get_unless_zero(&head->ref))
+ return -ENXIO;
+ return 0;
+}
+
+static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
+{
+ nvme_put_ns_head(disk->private_data);
+}
+
+const struct block_device_operations nvme_ns_head_ops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_head_open,
+ .release = nvme_ns_head_release,
+ .ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_ioctl,
+ .getgeo = nvme_getgeo,
+ .pr_ops = &nvme_pr_ops,
+};
+#endif /* CONFIG_NVME_MULTIPATH */
+
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
{
unsigned long timeout =
@@ -1737,14 +1927,15 @@ static bool quirk_matches(const struct nvme_id_ctrl *id,
string_matches(id->fr, q->fr, sizeof(id->fr));
}
-static void nvme_init_subnqn(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
+ struct nvme_id_ctrl *id)
{
size_t nqnlen;
int off;
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strcpy(ctrl->subnqn, id->subnqn);
+ strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
return;
}
@@ -1752,14 +1943,222 @@ static void nvme_init_subnqn(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
- off = snprintf(ctrl->subnqn, NVMF_NQN_SIZE,
+ off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
"nqn.2014.08.org.nvmexpress:%4x%4x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
- memcpy(ctrl->subnqn + off, id->sn, sizeof(id->sn));
+ memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
off += sizeof(id->sn);
- memcpy(ctrl->subnqn + off, id->mn, sizeof(id->mn));
+ memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
off += sizeof(id->mn);
- memset(ctrl->subnqn + off, 0, sizeof(ctrl->subnqn) - off);
+ memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
+}
+
+static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
+{
+ ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
+ kfree(subsys);
+}
+
+static void nvme_release_subsystem(struct device *dev)
+{
+ __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
+}
+
+static void nvme_destroy_subsystem(struct kref *ref)
+{
+ struct nvme_subsystem *subsys =
+ container_of(ref, struct nvme_subsystem, ref);
+
+ mutex_lock(&nvme_subsystems_lock);
+ list_del(&subsys->entry);
+ mutex_unlock(&nvme_subsystems_lock);
+
+ ida_destroy(&subsys->ns_ida);
+ device_del(&subsys->dev);
+ put_device(&subsys->dev);
+}
+
+static void nvme_put_subsystem(struct nvme_subsystem *subsys)
+{
+ kref_put(&subsys->ref, nvme_destroy_subsystem);
+}
+
+static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
+{
+ struct nvme_subsystem *subsys;
+
+ lockdep_assert_held(&nvme_subsystems_lock);
+
+ list_for_each_entry(subsys, &nvme_subsystems, entry) {
+ if (strcmp(subsys->subnqn, subsysnqn))
+ continue;
+ if (!kref_get_unless_zero(&subsys->ref))
+ continue;
+ return subsys;
+ }
+
+ return NULL;
+}
+
+#define SUBSYS_ATTR_RO(_name, _mode, _show) \
+ struct device_attribute subsys_attr_##_name = \
+ __ATTR(_name, _mode, _show, NULL)
+
+static ssize_t nvme_subsys_show_nqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
+}
+static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+
+#define nvme_subsys_show_str_function(field) \
+static ssize_t subsys_##field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_subsystem *subsys = \
+ container_of(dev, struct nvme_subsystem, dev); \
+ return sprintf(buf, "%.*s\n", \
+ (int)sizeof(subsys->field), subsys->field); \
+} \
+static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
+
+nvme_subsys_show_str_function(model);
+nvme_subsys_show_str_function(serial);
+nvme_subsys_show_str_function(firmware_rev);
+
+static struct attribute *nvme_subsys_attrs[] = {
+ &subsys_attr_model.attr,
+ &subsys_attr_serial.attr,
+ &subsys_attr_firmware_rev.attr,
+ &subsys_attr_subsysnqn.attr,
+ NULL,
+};
+
+static struct attribute_group nvme_subsys_attrs_group = {
+ .attrs = nvme_subsys_attrs,
+};
+
+static const struct attribute_group *nvme_subsys_attrs_groups[] = {
+ &nvme_subsys_attrs_group,
+ NULL,
+};
+
+static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ struct nvme_subsystem *subsys, *found;
+ int ret;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return -ENOMEM;
+ ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(subsys);
+ return ret;
+ }
+ subsys->instance = ret;
+ mutex_init(&subsys->lock);
+ kref_init(&subsys->ref);
+ INIT_LIST_HEAD(&subsys->ctrls);
+ INIT_LIST_HEAD(&subsys->nsheads);
+ nvme_init_subnqn(subsys, ctrl, id);
+ memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
+ memcpy(subsys->model, id->mn, sizeof(subsys->model));
+ memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
+ subsys->vendor_id = le16_to_cpu(id->vid);
+ subsys->cmic = id->cmic;
+
+ subsys->dev.class = nvme_subsys_class;
+ subsys->dev.release = nvme_release_subsystem;
+ subsys->dev.groups = nvme_subsys_attrs_groups;
+ dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance);
+ device_initialize(&subsys->dev);
+
+ mutex_lock(&nvme_subsystems_lock);
+ found = __nvme_find_get_subsystem(subsys->subnqn);
+ if (found) {
+ /*
+ * Verify that the subsystem actually supports multiple
+ * controllers, else bail out.
+ */
+ if (!(id->cmic & (1 << 1))) {
+ dev_err(ctrl->device,
+ "ignoring ctrl due to duplicate subnqn (%s).\n",
+ found->subnqn);
+ nvme_put_subsystem(found);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ __nvme_release_subsystem(subsys);
+ subsys = found;
+ } else {
+ ret = device_add(&subsys->dev);
+ if (ret) {
+ dev_err(ctrl->device,
+ "failed to register subsystem device.\n");
+ goto out_unlock;
+ }
+ ida_init(&subsys->ns_ida);
+ list_add_tail(&subsys->entry, &nvme_subsystems);
+ }
+
+ ctrl->subsys = subsys;
+ mutex_unlock(&nvme_subsystems_lock);
+
+ if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+ dev_name(ctrl->device))) {
+ dev_err(ctrl->device,
+ "failed to create sysfs link from subsystem.\n");
+ /* the transport driver will eventually put the subsystem */
+ return -EINVAL;
+ }
+
+ mutex_lock(&subsys->lock);
+ list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ mutex_unlock(&subsys->lock);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&nvme_subsystems_lock);
+ put_device(&subsys->dev);
+ return ret;
+}
+
+static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log,
+ size_t size)
+{
+ struct nvme_command c = { };
+
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(NVME_NSID_ALL);
+ c.common.cdw10[0] = nvme_get_log_dw10(log_page, size);
+
+ return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
+}
+
+static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ if (!ctrl->effects)
+ ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+
+ if (!ctrl->effects)
+ return 0;
+
+ ret = nvme_get_log(ctrl, NVME_LOG_CMD_EFFECTS, ctrl->effects,
+ sizeof(*ctrl->effects));
+ if (ret) {
+ kfree(ctrl->effects);
+ ctrl->effects = NULL;
+ }
+ return ret;
}
/*
@@ -1797,9 +2196,19 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
return -EIO;
}
- nvme_init_subnqn(ctrl, id);
+ if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
+ ret = nvme_get_effects_log(ctrl);
+ if (ret < 0)
+ return ret;
+ }
if (!ctrl->identified) {
+ int i;
+
+ ret = nvme_init_subsystem(ctrl, id);
+ if (ret)
+ goto out_free;
+
/*
* Check for quirks. Quirk can depend on firmware version,
* so, in principle, the set of quirks present can change
@@ -1808,9 +2217,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* the device, but we'd have to make sure that the driver
* behaves intelligently if the quirks change.
*/
-
- int i;
-
for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
if (quirk_matches(id, &core_quirks[i]))
ctrl->quirks |= core_quirks[i].quirks;
@@ -1823,14 +2229,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
ctrl->oacs = le16_to_cpu(id->oacs);
- ctrl->vid = le16_to_cpu(id->vid);
ctrl->oncs = le16_to_cpup(&id->oncs);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
ctrl->cntlid = le16_to_cpup(&id->cntlid);
- memcpy(ctrl->serial, id->sn, sizeof(id->sn));
- memcpy(ctrl->model, id->mn, sizeof(id->mn));
- memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
if (id->mdts)
max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else
@@ -1931,33 +2333,12 @@ EXPORT_SYMBOL_GPL(nvme_init_identify);
static int nvme_dev_open(struct inode *inode, struct file *file)
{
- struct nvme_ctrl *ctrl;
- int instance = iminor(inode);
- int ret = -ENODEV;
-
- spin_lock(&dev_list_lock);
- list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
- if (ctrl->instance != instance)
- continue;
-
- if (!ctrl->admin_q) {
- ret = -EWOULDBLOCK;
- break;
- }
- if (!kref_get_unless_zero(&ctrl->kref))
- break;
- file->private_data = ctrl;
- ret = 0;
- break;
- }
- spin_unlock(&dev_list_lock);
-
- return ret;
-}
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
-static int nvme_dev_release(struct inode *inode, struct file *file)
-{
- nvme_put_ctrl(file->private_data);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return -EWOULDBLOCK;
+ file->private_data = ctrl;
return 0;
}
@@ -2021,7 +2402,6 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
- .release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = nvme_dev_ioctl,
};
@@ -2051,77 +2431,86 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (disk->fops == &nvme_fops)
+ return nvme_get_ns_from_dev(dev)->head;
+ else
+ return disk->private_data;
+}
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- struct nvme_ctrl *ctrl = ns->ctrl;
- int serial_len = sizeof(ctrl->serial);
- int model_len = sizeof(ctrl->model);
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ struct nvme_ns_ids *ids = &head->ids;
+ struct nvme_subsystem *subsys = head->subsys;
+ int serial_len = sizeof(subsys->serial);
+ int model_len = sizeof(subsys->model);
- if (!uuid_is_null(&ns->uuid))
- return sprintf(buf, "uuid.%pU\n", &ns->uuid);
+ if (!uuid_is_null(&ids->uuid))
+ return sprintf(buf, "uuid.%pU\n", &ids->uuid);
- if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
- return sprintf(buf, "eui.%16phN\n", ns->nguid);
+ if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return sprintf(buf, "eui.%16phN\n", ids->nguid);
- if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
- return sprintf(buf, "eui.%8phN\n", ns->eui);
+ if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ return sprintf(buf, "eui.%8phN\n", ids->eui64);
- while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
- ctrl->serial[serial_len - 1] == '\0'))
+ while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
+ subsys->serial[serial_len - 1] == '\0'))
serial_len--;
- while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
- ctrl->model[model_len - 1] == '\0'))
+ while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
+ subsys->model[model_len - 1] == '\0'))
model_len--;
- return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
- serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
+ return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
+ serial_len, subsys->serial, model_len, subsys->model,
+ head->ns_id);
}
static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%pU\n", ns->nguid);
+ return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
}
static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
/* For backward compatibility expose the NGUID to userspace if
* we have no UUID set
*/
- if (uuid_is_null(&ns->uuid)) {
+ if (uuid_is_null(&ids->uuid)) {
printk_ratelimited(KERN_WARNING
"No UUID available providing old NGUID\n");
- return sprintf(buf, "%pU\n", ns->nguid);
+ return sprintf(buf, "%pU\n", ids->nguid);
}
- return sprintf(buf, "%pU\n", &ns->uuid);
+ return sprintf(buf, "%pU\n", &ids->uuid);
}
static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%8phd\n", ns->eui);
+ return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
}
static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%d\n", ns->ns_id);
+ return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
}
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
-static struct attribute *nvme_ns_attrs[] = {
+static struct attribute *nvme_ns_id_attrs[] = {
&dev_attr_wwid.attr,
&dev_attr_uuid.attr,
&dev_attr_nguid.attr,
@@ -2130,31 +2519,31 @@ static struct attribute *nvme_ns_attrs[] = {
NULL,
};
-static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
+static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
if (a == &dev_attr_uuid.attr) {
- if (uuid_is_null(&ns->uuid) &&
- !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+ if (uuid_is_null(&ids->uuid) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
return 0;
}
if (a == &dev_attr_nguid.attr) {
- if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+ if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
return 0;
}
if (a == &dev_attr_eui.attr) {
- if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
+ if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
return 0;
}
return a->mode;
}
-static const struct attribute_group nvme_ns_attr_group = {
- .attrs = nvme_ns_attrs,
- .is_visible = nvme_ns_attrs_are_visible,
+const struct attribute_group nvme_ns_id_attr_group = {
+ .attrs = nvme_ns_id_attrs,
+ .is_visible = nvme_ns_id_attrs_are_visible,
};
#define nvme_show_str_function(field) \
@@ -2162,10 +2551,15 @@ static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
+ return sprintf(buf, "%.*s\n", \
+ (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+nvme_show_str_function(model);
+nvme_show_str_function(serial);
+nvme_show_str_function(firmware_rev);
+
#define nvme_show_int_function(field) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2175,9 +2569,6 @@ static ssize_t field##_show(struct device *dev, \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
-nvme_show_str_function(model);
-nvme_show_str_function(serial);
-nvme_show_str_function(firmware_rev);
nvme_show_int_function(cntlid);
static ssize_t nvme_sysfs_delete(struct device *dev,
@@ -2187,7 +2578,7 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (device_remove_file_self(dev, attr))
- ctrl->ops->delete_ctrl(ctrl);
+ nvme_delete_ctrl_sync(ctrl);
return count;
}
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
@@ -2231,7 +2622,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subnqn);
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
@@ -2284,12 +2675,128 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
NULL,
};
+static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
+ unsigned nsid)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+
+ list_for_each_entry(h, &subsys->nsheads, entry) {
+ if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
+ return h;
+ }
+
+ return NULL;
+}
+
+static int __nvme_check_ids(struct nvme_subsystem *subsys,
+ struct nvme_ns_head *new)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+
+ list_for_each_entry(h, &subsys->nsheads, entry) {
+ if (nvme_ns_ids_valid(&new->ids) &&
+ nvme_ns_ids_equal(&new->ids, &h->ids))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
+ unsigned nsid, struct nvme_id_ns *id)
+{
+ struct nvme_ns_head *head;
+ int ret = -ENOMEM;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ goto out;
+ ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free_head;
+ head->instance = ret;
+ INIT_LIST_HEAD(&head->list);
+ init_srcu_struct(&head->srcu);
+ head->subsys = ctrl->subsys;
+ head->ns_id = nsid;
+ kref_init(&head->ref);
+
+ nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
+
+ ret = __nvme_check_ids(ctrl->subsys, head);
+ if (ret) {
+ dev_err(ctrl->device,
+ "duplicate IDs for nsid %d\n", nsid);
+ goto out_cleanup_srcu;
+ }
+
+ ret = nvme_mpath_alloc_disk(ctrl, head);
+ if (ret)
+ goto out_cleanup_srcu;
+
+ list_add_tail(&head->entry, &ctrl->subsys->nsheads);
+ return head;
+out_cleanup_srcu:
+ cleanup_srcu_struct(&head->srcu);
+ ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
+out_free_head:
+ kfree(head);
+out:
+ return ERR_PTR(ret);
+}
+
+static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
+ struct nvme_id_ns *id, bool *new)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ bool is_shared = id->nmic & (1 << 0);
+ struct nvme_ns_head *head = NULL;
+ int ret = 0;
+
+ mutex_lock(&ctrl->subsys->lock);
+ if (is_shared)
+ head = __nvme_find_ns_head(ctrl->subsys, nsid);
+ if (!head) {
+ head = nvme_alloc_ns_head(ctrl, nsid, id);
+ if (IS_ERR(head)) {
+ ret = PTR_ERR(head);
+ goto out_unlock;
+ }
+
+ *new = true;
+ } else {
+ struct nvme_ns_ids ids;
+
+ nvme_report_ns_ids(ctrl, nsid, id, &ids);
+ if (!nvme_ns_ids_equal(&head->ids, &ids)) {
+ dev_err(ctrl->device,
+ "IDs don't match for shared namespace %d\n",
+ nsid);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ *new = false;
+ }
+
+ list_add_tail(&ns->siblings, &head->list);
+ ns->head = head;
+
+out_unlock:
+ mutex_unlock(&ctrl->subsys->lock);
+ return ret;
+}
+
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
- return nsa->ns_id - nsb->ns_id;
+ return nsa->head->ns_id - nsb->head->ns_id;
}
static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -2298,12 +2805,13 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->ns_id == nsid) {
- kref_get(&ns->kref);
+ if (ns->head->ns_id == nsid) {
+ if (!kref_get_unless_zero(&ns->kref))
+ continue;
ret = ns;
break;
}
- if (ns->ns_id > nsid)
+ if (ns->head->ns_id > nsid)
break;
}
mutex_unlock(&ctrl->namespaces_mutex);
@@ -2318,7 +2826,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
if (!ctrl->nr_streams)
return 0;
- ret = nvme_get_stream_params(ctrl, &s, ns->ns_id);
+ ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
if (ret)
return ret;
@@ -2342,33 +2850,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
struct nvme_id_ns *id;
char disk_name[DISK_NAME_LEN];
- int node = dev_to_node(ctrl->dev);
+ int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
+ bool new = true;
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
- ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
- if (ns->instance < 0)
- goto out_free_ns;
-
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
- goto out_release_instance;
+ goto out_free_ns;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
kref_init(&ns->kref);
- ns->ns_id = nsid;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
nvme_setup_streams_ns(ctrl, ns);
- sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
-
id = nvme_identify_ns(ctrl, nsid);
if (!id)
goto out_free_queue;
@@ -2376,23 +2878,49 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (id->ncap == 0)
goto out_free_id;
- nvme_report_ns_ids(ctrl, ns->ns_id, id, ns->eui, ns->nguid, &ns->uuid);
+ if (nvme_init_ns_head(ns, nsid, id, &new))
+ goto out_free_id;
+
+#ifdef CONFIG_NVME_MULTIPATH
+ /*
+ * If multipathing is enabled we need to always use the subsystem
+ * instance number for numbering our devices to avoid conflicts
+ * between subsystems that have multiple controllers and thus use
+ * the multipath-aware subsystem node and those that have a single
+ * controller and use the controller node directly.
+ */
+ if (ns->head->disk) {
+ sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->cntlid, ns->head->instance);
+ flags = GENHD_FL_HIDDEN;
+ } else {
+ sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ }
+#else
+ /*
+ * But without the multipath code enabled, multiple controller per
+ * subsystems are visible as devices and thus we cannot use the
+ * subsystem instance.
+ */
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+#endif
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk_name, node)) {
dev_warn(ctrl->device, "LightNVM init failure\n");
- goto out_free_id;
+ goto out_unlink_ns;
}
}
disk = alloc_disk_node(0, node);
if (!disk)
- goto out_free_id;
+ goto out_unlink_ns;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
- disk->flags = GENHD_FL_EXT_DEVT;
+ disk->flags = flags;
memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
ns->disk = disk;
@@ -2402,49 +2930,65 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
list_add_tail(&ns->list, &ctrl->namespaces);
mutex_unlock(&ctrl->namespaces_mutex);
- kref_get(&ctrl->kref);
+ nvme_get_ctrl(ctrl);
kfree(id);
device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_attr_group))
+ &nvme_ns_id_attr_group))
pr_warn("%s: failed to create sysfs group for identification\n",
ns->disk->disk_name);
if (ns->ndev && nvme_nvm_register_sysfs(ns))
pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
ns->disk->disk_name);
+
+ if (new)
+ nvme_mpath_add_disk(ns->head);
+ nvme_mpath_add_disk_links(ns);
return;
+ out_unlink_ns:
+ mutex_lock(&ctrl->subsys->lock);
+ list_del_rcu(&ns->siblings);
+ mutex_unlock(&ctrl->subsys->lock);
out_free_id:
kfree(id);
out_free_queue:
blk_cleanup_queue(ns->queue);
- out_release_instance:
- ida_simple_remove(&ctrl->ns_ida, ns->instance);
out_free_ns:
kfree(ns);
}
static void nvme_ns_remove(struct nvme_ns *ns)
{
+ struct nvme_ns_head *head = ns->head;
+
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
+ nvme_mpath_remove_disk_links(ns);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_attr_group);
+ &nvme_ns_id_attr_group);
if (ns->ndev)
nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
}
+ mutex_lock(&ns->ctrl->subsys->lock);
+ nvme_mpath_clear_current_path(ns);
+ if (head)
+ list_del_rcu(&ns->siblings);
+ mutex_unlock(&ns->ctrl->subsys->lock);
+
mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex);
+ synchronize_srcu(&head->srcu);
nvme_put_ns(ns);
}
@@ -2467,7 +3011,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
struct nvme_ns *ns, *next;
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->ns_id > nsid)
+ if (ns->head->ns_id > nsid)
nvme_ns_remove(ns);
}
}
@@ -2583,20 +3127,29 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
+static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
+{
+ char *envp[2] = { NULL, NULL };
+ u32 aen_result = ctrl->aen_result;
+
+ ctrl->aen_result = 0;
+ if (!aen_result)
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
+ if (!envp[0])
+ return;
+ kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+ kfree(envp[0]);
+}
+
static void nvme_async_event_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, async_event_work);
- spin_lock_irq(&ctrl->lock);
- while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
- int aer_idx = --ctrl->event_limit;
-
- spin_unlock_irq(&ctrl->lock);
- ctrl->ops->submit_async_event(ctrl, aer_idx);
- spin_lock_irq(&ctrl->lock);
- }
- spin_unlock_irq(&ctrl->lock);
+ nvme_aen_uevent(ctrl);
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -2615,18 +3168,13 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
- struct nvme_command c = { };
struct nvme_fw_slot_info_log *log;
log = kmalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return;
- c.common.opcode = nvme_admin_get_log_page;
- c.common.nsid = cpu_to_le32(NVME_NSID_ALL);
- c.common.cdw10[0] = nvme_get_log_dw10(NVME_LOG_FW_SLOT, sizeof(*log));
-
- if (!nvme_submit_sync_cmd(ctrl->admin_q, &c, log, sizeof(*log)))
+ if (nvme_get_log(ctrl, NVME_LOG_FW_SLOT, log, sizeof(*log)))
dev_warn(ctrl->device,
"Get FW SLOT INFO log error\n");
kfree(log);
@@ -2660,7 +3208,7 @@ static void nvme_fw_act_work(struct work_struct *work)
return;
nvme_start_queues(ctrl);
- /* read FW slot informationi to clear the AER*/
+ /* read FW slot information to clear the AER */
nvme_get_fw_slot_info(ctrl);
}
@@ -2668,24 +3216,21 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- bool done = true;
- switch (le16_to_cpu(status) >> 1) {
- case NVME_SC_SUCCESS:
- done = false;
- /*FALLTHRU*/
- case NVME_SC_ABORT_REQ:
- ++ctrl->event_limit;
- if (ctrl->state == NVME_CTRL_LIVE)
- queue_work(nvme_wq, &ctrl->async_event_work);
+ if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
+ return;
+
+ switch (result & 0x7) {
+ case NVME_AER_ERROR:
+ case NVME_AER_SMART:
+ case NVME_AER_CSS:
+ case NVME_AER_VS:
+ ctrl->aen_result = result;
break;
default:
break;
}
- if (done)
- return;
-
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
dev_info(ctrl->device, "rescanning\n");
@@ -2697,44 +3242,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
-}
-EXPORT_SYMBOL_GPL(nvme_complete_async_event);
-
-void nvme_queue_async_events(struct nvme_ctrl *ctrl)
-{
- ctrl->event_limit = NVME_NR_AERS;
queue_work(nvme_wq, &ctrl->async_event_work);
}
-EXPORT_SYMBOL_GPL(nvme_queue_async_events);
-
-static DEFINE_IDA(nvme_instance_ida);
-
-static int nvme_set_instance(struct nvme_ctrl *ctrl)
-{
- int instance, error;
-
- do {
- if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
- return -ENODEV;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_instance_ida, &instance);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- return -ENODEV;
-
- ctrl->instance = instance;
- return 0;
-}
-
-static void nvme_release_instance(struct nvme_ctrl *ctrl)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_instance_ida, ctrl->instance);
- spin_unlock(&dev_list_lock);
-}
+EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
@@ -2752,7 +3262,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
- nvme_queue_async_events(ctrl);
+ queue_work(nvme_wq, &ctrl->async_event_work);
nvme_start_queues(ctrl);
}
}
@@ -2760,30 +3270,31 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
- device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
-
- spin_lock(&dev_list_lock);
- list_del(&ctrl->node);
- spin_unlock(&dev_list_lock);
+ cdev_device_del(&ctrl->cdev, ctrl->device);
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
-static void nvme_free_ctrl(struct kref *kref)
+static void nvme_free_ctrl(struct device *dev)
{
- struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
+ struct nvme_ctrl *ctrl =
+ container_of(dev, struct nvme_ctrl, ctrl_device);
+ struct nvme_subsystem *subsys = ctrl->subsys;
- put_device(ctrl->device);
- nvme_release_instance(ctrl);
- ida_destroy(&ctrl->ns_ida);
+ ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ kfree(ctrl->effects);
+
+ if (subsys) {
+ mutex_lock(&subsys->lock);
+ list_del(&ctrl->subsys_entry);
+ mutex_unlock(&subsys->lock);
+ sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
+ }
ctrl->ops->free_ctrl(ctrl);
-}
-void nvme_put_ctrl(struct nvme_ctrl *ctrl)
-{
- kref_put(&ctrl->kref, nvme_free_ctrl);
+ if (subsys)
+ nvme_put_subsystem(subsys);
}
-EXPORT_SYMBOL_GPL(nvme_put_ctrl);
/*
* Initialize a NVMe controller structures. This needs to be called during
@@ -2799,32 +3310,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
spin_lock_init(&ctrl->lock);
INIT_LIST_HEAD(&ctrl->namespaces);
mutex_init(&ctrl->namespaces_mutex);
- kref_init(&ctrl->kref);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
INIT_WORK(&ctrl->scan_work, nvme_scan_work);
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
+ INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
- ret = nvme_set_instance(ctrl);
- if (ret)
+ ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
goto out;
-
- ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
- MKDEV(nvme_char_major, ctrl->instance),
- ctrl, nvme_dev_attr_groups,
- "nvme%d", ctrl->instance);
- if (IS_ERR(ctrl->device)) {
- ret = PTR_ERR(ctrl->device);
+ ctrl->instance = ret;
+
+ device_initialize(&ctrl->ctrl_device);
+ ctrl->device = &ctrl->ctrl_device;
+ ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
+ ctrl->device->class = nvme_class;
+ ctrl->device->parent = ctrl->dev;
+ ctrl->device->groups = nvme_dev_attr_groups;
+ ctrl->device->release = nvme_free_ctrl;
+ dev_set_drvdata(ctrl->device, ctrl);
+ ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
+ if (ret)
goto out_release_instance;
- }
- get_device(ctrl->device);
- ida_init(&ctrl->ns_ida);
- spin_lock(&dev_list_lock);
- list_add_tail(&ctrl->node, &nvme_ctrl_list);
- spin_unlock(&dev_list_lock);
+ cdev_init(&ctrl->cdev, &nvme_dev_fops);
+ ctrl->cdev.owner = ops->module;
+ ret = cdev_device_add(&ctrl->cdev, ctrl->device);
+ if (ret)
+ goto out_free_name;
/*
* Initialize latency tolerance controls. The sysfs files won't
@@ -2835,8 +3350,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
return 0;
+out_free_name:
+ kfree_const(dev->kobj.name);
out_release_instance:
- nvme_release_instance(ctrl);
+ ida_simple_remove(&nvme_instance_ida, ctrl->instance);
out:
return ret;
}
@@ -2945,6 +3462,16 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
+int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
+{
+ if (!ctrl->ops->reinit_request)
+ return 0;
+
+ return blk_mq_tagset_iter(set, set->driver_data,
+ ctrl->ops->reinit_request);
+}
+EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
+
int __init nvme_core_init(void)
{
int result;
@@ -2954,12 +3481,9 @@ int __init nvme_core_init(void)
if (!nvme_wq)
return -ENOMEM;
- result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
- &nvme_dev_fops);
+ result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0)
goto destroy_wq;
- else if (result > 0)
- nvme_char_major = result;
nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) {
@@ -2967,10 +3491,17 @@ int __init nvme_core_init(void)
goto unregister_chrdev;
}
+ nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
+ if (IS_ERR(nvme_subsys_class)) {
+ result = PTR_ERR(nvme_subsys_class);
+ goto destroy_class;
+ }
return 0;
+destroy_class:
+ class_destroy(nvme_class);
unregister_chrdev:
- __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+ unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
destroy_wq:
destroy_workqueue(nvme_wq);
return result;
@@ -2978,8 +3509,10 @@ destroy_wq:
void nvme_core_exit(void)
{
+ ida_destroy(&nvme_subsystems_ida);
+ class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
- __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+ unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_wq);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 555c976cc2ee..76b4fe6816a0 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" },
{ NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
{ NVMF_OPT_HOST_ID, "hostid=%s" },
+ { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
{ NVMF_OPT_ERR, NULL }
};
@@ -566,6 +567,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->nr_io_queues = num_online_cpus();
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
opts->kato = NVME_DEFAULT_KATO;
+ opts->duplicate_connect = false;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -742,6 +744,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
goto out;
}
break;
+ case NVMF_OPT_DUP_CONNECT:
+ opts->duplicate_connect = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -823,7 +828,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
- NVMF_OPT_HOST_ID)
+ NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
@@ -841,6 +846,9 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
if (ret)
goto out_free_opts;
+
+ request_module("nvme-%s", opts->transport);
+
/*
* Check the generic options first as we need a valid transport for
* the lookup below. Then clear the generic flags so that transport
@@ -874,12 +882,12 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock;
}
- if (strcmp(ctrl->subnqn, opts->subsysnqn)) {
+ if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
- ctrl->subnqn);
+ ctrl->subsys->subnqn);
up_read(&nvmf_transports_rwsem);
- ctrl->ops->delete_ctrl(ctrl);
+ nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index bf33663218cd..42232e731f19 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -57,6 +57,7 @@ enum {
NVMF_OPT_HOST_TRADDR = 1 << 10,
NVMF_OPT_CTRL_LOSS_TMO = 1 << 11,
NVMF_OPT_HOST_ID = 1 << 12,
+ NVMF_OPT_DUP_CONNECT = 1 << 13,
};
/**
@@ -96,6 +97,7 @@ struct nvmf_ctrl_options {
unsigned int nr_io_queues;
unsigned int reconnect_delay;
bool discovery_nqn;
+ bool duplicate_connect;
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
@@ -131,6 +133,18 @@ struct nvmf_transport_ops {
struct nvmf_ctrl_options *opts);
};
+static inline bool
+nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ if (strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
+ strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
+ memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
+ return false;
+
+ return true;
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index be49d0f79381..7ab0be55c7d0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -30,27 +30,19 @@
/* *************************** Data Structures/Defines ****************** */
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_FC_NR_AEN_COMMANDS 1
-#define NVME_FC_AQ_BLKMQ_DEPTH \
- (NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
-#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
-
enum nvme_fc_queue_flags {
NVME_FC_Q_CONNECTED = (1 << 0),
};
#define NVMEFC_QUEUE_DELAY 3 /* ms units */
+#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
+
struct nvme_fc_queue {
struct nvme_fc_ctrl *ctrl;
struct device *dev;
struct blk_mq_hw_ctx *hctx;
void *lldd_handle;
- int queue_size;
size_t cmnd_capsule_len;
u32 qnum;
u32 rqcnt;
@@ -124,6 +116,7 @@ struct nvme_fc_lport {
struct device *dev; /* physical device for dma */
struct nvme_fc_port_template *ops;
struct kref ref;
+ atomic_t act_rport_cnt;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
struct nvme_fc_rport {
@@ -136,6 +129,8 @@ struct nvme_fc_rport {
struct nvme_fc_lport *lport;
spinlock_t lock;
struct kref ref;
+ atomic_t act_ctrl_cnt;
+ unsigned long dev_loss_end;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
enum nvme_fcctrl_flags {
@@ -150,6 +145,7 @@ struct nvme_fc_ctrl {
struct nvme_fc_rport *rport;
u32 cnum;
+ bool assoc_active;
u64 association_id;
struct list_head ctrl_list; /* rport->ctrl_list */
@@ -157,7 +153,6 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
- struct work_struct delete_work;
struct delayed_work connect_work;
struct kref ref;
@@ -165,7 +160,7 @@ struct nvme_fc_ctrl {
u32 iocnt;
wait_queue_head_t ioabort_wait;
- struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
+ struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
struct nvme_ctrl ctrl;
};
@@ -213,10 +208,16 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
+/*
+ * These items are short-term. They will eventually be moved into
+ * a generic FC class. See comments in module init.
+ */
+static struct class *fc_class;
+static struct device *fc_udev_device;
+
/* *********************** FC-NVME Port Management ************************ */
-static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
struct nvme_fc_queue *, unsigned int);
@@ -235,9 +236,6 @@ nvme_fc_free_lport(struct kref *ref)
list_del(&lport->port_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- /* let the LLDD know we've finished tearing it down */
- lport->ops->localport_delete(&lport->localport);
-
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
@@ -260,7 +258,9 @@ nvme_fc_lport_get(struct nvme_fc_lport *lport)
static struct nvme_fc_lport *
-nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
+nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *ops,
+ struct device *dev)
{
struct nvme_fc_lport *lport;
unsigned long flags;
@@ -272,6 +272,11 @@ nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
lport->localport.port_name != pinfo->port_name)
continue;
+ if (lport->dev != dev) {
+ lport = ERR_PTR(-EXDEV);
+ goto out_done;
+ }
+
if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
lport = ERR_PTR(-EEXIST);
goto out_done;
@@ -288,6 +293,7 @@ nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
/* resume the lport */
+ lport->ops = ops;
lport->localport.port_role = pinfo->port_role;
lport->localport.port_id = pinfo->port_id;
lport->localport.port_state = FC_OBJSTATE_ONLINE;
@@ -348,7 +354,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
* expired, we can simply re-enable the localport. Remoteports
* and controller reconnections should resume naturally.
*/
- newrec = nvme_fc_attach_to_unreg_lport(pinfo);
+ newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
/* found an lport, but something about its state is bad */
if (IS_ERR(newrec)) {
@@ -384,6 +390,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->port_list);
INIT_LIST_HEAD(&newrec->endp_list);
kref_init(&newrec->ref);
+ atomic_set(&newrec->act_rport_cnt, 0);
newrec->ops = template;
newrec->dev = dev;
ida_init(&newrec->endp_cnt);
@@ -446,12 +453,177 @@ nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ if (atomic_read(&lport->act_rport_cnt) == 0)
+ lport->ops->localport_delete(&lport->localport);
+
nvme_fc_lport_put(lport);
return 0;
}
EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
+/*
+ * TRADDR strings, per FC-NVME are fixed format:
+ * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
+ * udev event will only differ by prefix of what field is
+ * being specified:
+ * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
+ * 19 + 43 + null_fudge = 64 characters
+ */
+#define FCNVME_TRADDR_LENGTH 64
+
+static void
+nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
+ struct nvme_fc_rport *rport)
+{
+ char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
+ char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
+ char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
+
+ if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
+ return;
+
+ snprintf(hostaddr, sizeof(hostaddr),
+ "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
+ lport->localport.node_name, lport->localport.port_name);
+ snprintf(tgtaddr, sizeof(tgtaddr),
+ "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
+ rport->remoteport.node_name, rport->remoteport.port_name);
+ kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void
+nvme_fc_free_rport(struct kref *ref)
+{
+ struct nvme_fc_rport *rport =
+ container_of(ref, struct nvme_fc_rport, ref);
+ struct nvme_fc_lport *lport =
+ localport_to_lport(rport->remoteport.localport);
+ unsigned long flags;
+
+ WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&rport->ctrl_list));
+
+ /* remove from lport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&rport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+
+ kfree(rport);
+
+ nvme_fc_lport_put(lport);
+}
+
+static void
+nvme_fc_rport_put(struct nvme_fc_rport *rport)
+{
+ kref_put(&rport->ref, nvme_fc_free_rport);
+}
+
+static int
+nvme_fc_rport_get(struct nvme_fc_rport *rport)
+{
+ return kref_get_unless_zero(&rport->ref);
+}
+
+static void
+nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
+{
+ switch (ctrl->ctrl.state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_RECONNECTING:
+ /*
+ * As all reconnects were suppressed, schedule a
+ * connect.
+ */
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: connectivity re-established. "
+ "Attempting reconnect\n", ctrl->cnum);
+
+ queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
+ break;
+
+ case NVME_CTRL_RESETTING:
+ /*
+ * Controller is already in the process of terminating the
+ * association. No need to do anything further. The reconnect
+ * step will naturally occur after the reset completes.
+ */
+ break;
+
+ default:
+ /* no action to take - let it delete */
+ break;
+ }
+}
+
+static struct nvme_fc_rport *
+nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
+ struct nvme_fc_port_info *pinfo)
+{
+ struct nvme_fc_rport *rport;
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (rport->remoteport.node_name != pinfo->node_name ||
+ rport->remoteport.port_name != pinfo->port_name)
+ continue;
+
+ if (!nvme_fc_rport_get(rport)) {
+ rport = ERR_PTR(-ENOLCK);
+ goto out_done;
+ }
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ /* has it been unregistered */
+ if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
+ /* means lldd called us twice */
+ spin_unlock_irqrestore(&rport->lock, flags);
+ nvme_fc_rport_put(rport);
+ return ERR_PTR(-ESTALE);
+ }
+
+ rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
+ rport->dev_loss_end = 0;
+
+ /*
+ * kick off a reconnect attempt on all associations to the
+ * remote port. A successful reconnects will resume i/o.
+ */
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
+ nvme_fc_resume_controller(ctrl);
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return rport;
+ }
+
+ rport = NULL;
+
+out_done:
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return rport;
+}
+
+static inline void
+__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
+ struct nvme_fc_port_info *pinfo)
+{
+ if (pinfo->dev_loss_tmo)
+ rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
+ else
+ rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
+}
+
/**
* nvme_fc_register_remoteport - transport entry point called by an
* LLDD to register the existence of a NVME
@@ -478,28 +650,52 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
unsigned long flags;
int ret, idx;
+ if (!nvme_fc_lport_get(lport)) {
+ ret = -ESHUTDOWN;
+ goto out_reghost_failed;
+ }
+
+ /*
+ * look to see if there is already a remoteport that is waiting
+ * for a reconnect (within dev_loss_tmo) with the same WWN's.
+ * If so, transition to it and reconnect.
+ */
+ newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
+
+ /* found an rport, but something about its state is bad */
+ if (IS_ERR(newrec)) {
+ ret = PTR_ERR(newrec);
+ goto out_lport_put;
+
+ /* found existing rport, which was resumed */
+ } else if (newrec) {
+ nvme_fc_lport_put(lport);
+ __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
+ nvme_fc_signal_discovery_scan(lport, newrec);
+ *portptr = &newrec->remoteport;
+ return 0;
+ }
+
+ /* nothing found - allocate a new remoteport struct */
+
newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
GFP_KERNEL);
if (!newrec) {
ret = -ENOMEM;
- goto out_reghost_failed;
- }
-
- if (!nvme_fc_lport_get(lport)) {
- ret = -ESHUTDOWN;
- goto out_kfree_rport;
+ goto out_lport_put;
}
idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
- goto out_lport_put;
+ goto out_kfree_rport;
}
INIT_LIST_HEAD(&newrec->endp_list);
INIT_LIST_HEAD(&newrec->ctrl_list);
INIT_LIST_HEAD(&newrec->ls_req_list);
kref_init(&newrec->ref);
+ atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
newrec->remoteport.localport = &lport->localport;
newrec->dev = lport->dev;
@@ -511,63 +707,27 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
newrec->remoteport.port_id = pinfo->port_id;
newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
newrec->remoteport.port_num = idx;
+ __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
spin_lock_irqsave(&nvme_fc_lock, flags);
list_add_tail(&newrec->endp_list, &lport->endp_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ nvme_fc_signal_discovery_scan(lport, newrec);
+
*portptr = &newrec->remoteport;
return 0;
-out_lport_put:
- nvme_fc_lport_put(lport);
out_kfree_rport:
kfree(newrec);
+out_lport_put:
+ nvme_fc_lport_put(lport);
out_reghost_failed:
*portptr = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
-static void
-nvme_fc_free_rport(struct kref *ref)
-{
- struct nvme_fc_rport *rport =
- container_of(ref, struct nvme_fc_rport, ref);
- struct nvme_fc_lport *lport =
- localport_to_lport(rport->remoteport.localport);
- unsigned long flags;
-
- WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
- WARN_ON(!list_empty(&rport->ctrl_list));
-
- /* remove from lport list */
- spin_lock_irqsave(&nvme_fc_lock, flags);
- list_del(&rport->endp_list);
- spin_unlock_irqrestore(&nvme_fc_lock, flags);
-
- /* let the LLDD know we've finished tearing it down */
- lport->ops->remoteport_delete(&rport->remoteport);
-
- ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
-
- kfree(rport);
-
- nvme_fc_lport_put(lport);
-}
-
-static void
-nvme_fc_rport_put(struct nvme_fc_rport *rport)
-{
- kref_put(&rport->ref, nvme_fc_free_rport);
-}
-
-static int
-nvme_fc_rport_get(struct nvme_fc_rport *rport)
-{
- return kref_get_unless_zero(&rport->ref);
-}
-
static int
nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
{
@@ -592,6 +752,58 @@ restart:
return 0;
}
+static void
+nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
+{
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: controller connectivity lost. Awaiting "
+ "Reconnect", ctrl->cnum);
+
+ switch (ctrl->ctrl.state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ /*
+ * Schedule a controller reset. The reset will terminate the
+ * association and schedule the reconnect timer. Reconnects
+ * will be attempted until either the ctlr_loss_tmo
+ * (max_retries * connect_delay) expires or the remoteport's
+ * dev_loss_tmo expires.
+ */
+ if (nvme_reset_ctrl(&ctrl->ctrl)) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: Couldn't schedule reset. "
+ "Deleting controller.\n",
+ ctrl->cnum);
+ nvme_delete_ctrl(&ctrl->ctrl);
+ }
+ break;
+
+ case NVME_CTRL_RECONNECTING:
+ /*
+ * The association has already been terminated and the
+ * controller is attempting reconnects. No need to do anything
+ * futher. Reconnects will be attempted until either the
+ * ctlr_loss_tmo (max_retries * connect_delay) expires or the
+ * remoteport's dev_loss_tmo expires.
+ */
+ break;
+
+ case NVME_CTRL_RESETTING:
+ /*
+ * Controller is already in the process of terminating the
+ * association. No need to do anything further. The reconnect
+ * step will kick in naturally after the association is
+ * terminated.
+ */
+ break;
+
+ case NVME_CTRL_DELETING:
+ default:
+ /* no action to take - let it delete */
+ break;
+ }
+}
+
/**
* nvme_fc_unregister_remoteport - transport entry point called by an
* LLDD to deregister/remove a previously
@@ -621,19 +833,78 @@ nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
}
portptr->port_state = FC_OBJSTATE_DELETED;
- /* tear down all associations to the remote port */
- list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
- __nvme_fc_del_ctrl(ctrl);
+ rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
+
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ /* if dev_loss_tmo==0, dev loss is immediate */
+ if (!portptr->dev_loss_tmo) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: controller connectivity lost. "
+ "Deleting controller.\n",
+ ctrl->cnum);
+ nvme_delete_ctrl(&ctrl->ctrl);
+ } else
+ nvme_fc_ctrl_connectivity_loss(ctrl);
+ }
spin_unlock_irqrestore(&rport->lock, flags);
nvme_fc_abort_lsops(rport);
+ if (atomic_read(&rport->act_ctrl_cnt) == 0)
+ rport->lport->ops->remoteport_delete(portptr);
+
+ /*
+ * release the reference, which will allow, if all controllers
+ * go away, which should only occur after dev_loss_tmo occurs,
+ * for the rport to be torn down.
+ */
nvme_fc_rport_put(rport);
+
return 0;
}
EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
+/**
+ * nvme_fc_rescan_remoteport - transport entry point called by an
+ * LLDD to request a nvme device rescan.
+ * @remoteport: pointer to the (registered) remote port that is to be
+ * rescanned.
+ *
+ * Returns: N/A
+ */
+void
+nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
+
+ nvme_fc_signal_discovery_scan(rport->lport, rport);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
+
+int
+nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
+ u32 dev_loss_tmo)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ return -EINVAL;
+ }
+
+ /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
+ rport->remoteport.dev_loss_tmo = dev_loss_tmo;
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
+
/* *********************** FC-NVME DMA Handling **************************** */
@@ -723,7 +994,6 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_unmap_sg(dev, sg, nents, dir);
}
-
/* *********************** FC-NVME LS Handling **************************** */
static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
@@ -1266,7 +1536,7 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
unsigned long flags;
int i, ret;
- for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
continue;
@@ -1331,7 +1601,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
struct nvme_command *sqe = &op->cmd_iu.sqe;
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
union nvme_result result;
- bool complete_rq, terminate_assoc = true;
+ bool terminate_assoc = true;
/*
* WARNING:
@@ -1373,8 +1643,9 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
- if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
- status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+ if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
+ op->flags & FCOP_FLAGS_TERMIO)
+ status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
else if (freq->status)
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
@@ -1438,23 +1709,27 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
done:
if (op->flags & FCOP_FLAGS_AEN) {
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
- complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op);
atomic_set(&op->state, FCPOP_STATE_IDLE);
op->flags = FCOP_FLAGS_AEN; /* clear other flags */
nvme_fc_ctrl_put(ctrl);
goto check_error;
}
- complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
- if (!complete_rq) {
- if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
- status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
- if (blk_queue_dying(rq->q))
- status |= cpu_to_le16(NVME_SC_DNR << 1);
- }
- nvme_end_request(rq, status, result);
- } else
+ /*
+ * Force failures of commands if we're killing the controller
+ * or have an error on a command used to create an new association
+ */
+ if (status &&
+ (blk_queue_dying(rq->q) ||
+ ctrl->ctrl.state == NVME_CTRL_NEW ||
+ ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
+ status |= cpu_to_le16(NVME_SC_DNR << 1);
+
+ if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
__nvme_fc_final_op_cleanup(rq);
+ else
+ nvme_end_request(rq, status, result);
check_error:
if (terminate_assoc)
@@ -1531,7 +1806,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
int i, ret;
aen_op = ctrl->aen_ops;
- for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
GFP_KERNEL);
if (!private)
@@ -1541,7 +1816,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
sqe = &cmdiu->sqe;
ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
aen_op, (struct request *)NULL,
- (AEN_CMDID_BASE + i));
+ (NVME_AQ_BLK_MQ_DEPTH + i));
if (ret) {
kfree(private);
return ret;
@@ -1554,7 +1829,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
memset(sqe, 0, sizeof(*sqe));
sqe->common.opcode = nvme_admin_async_event;
/* Note: core layer may overwrite the sqe.command_id value */
- sqe->common.command_id = AEN_CMDID_BASE + i;
+ sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
}
return 0;
}
@@ -1566,7 +1841,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
int i;
aen_op = ctrl->aen_ops;
- for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
if (!aen_op->fcp_req.private)
continue;
@@ -1610,7 +1885,7 @@ nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
}
static void
-nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
+nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
{
struct nvme_fc_queue *queue;
@@ -1626,8 +1901,6 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
else
queue->cmnd_capsule_len = sizeof(struct nvme_command);
- queue->queue_size = queue_size;
-
/*
* Considered whether we should allocate buffers for all SQEs
* and CQEs and dma map them - mapping their respective entries
@@ -1751,7 +2024,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
int i;
for (i = 1; i < ctrl->ctrl.queue_count; i++)
- nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+ nvme_fc_init_queue(ctrl, i);
}
static void
@@ -1825,13 +2098,6 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
- dev_err(ctrl->ctrl.device,
- "NVME-FC{%d}: error_recovery: Couldn't change state "
- "to RECONNECTING\n", ctrl->cnum);
- return;
- }
-
nvme_reset_ctrl(&ctrl->ctrl);
}
@@ -1842,13 +2108,14 @@ nvme_fc_timeout(struct request *rq, bool reserved)
struct nvme_fc_ctrl *ctrl = op->ctrl;
int ret;
- if (reserved)
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+ atomic_read(&op->state) == FCPOP_STATE_ABORTED)
return BLK_EH_RESET_TIMER;
ret = __nvme_fc_abort_op(ctrl, op);
if (ret)
- /* io wasn't active to abort consider it done */
- return BLK_EH_HANDLED;
+ /* io wasn't active to abort */
+ return BLK_EH_NOT_HANDLED;
/*
* we can't individually ABTS an io without affecting the queue,
@@ -1859,7 +2126,12 @@ nvme_fc_timeout(struct request *rq, bool reserved)
*/
nvme_fc_error_recovery(ctrl, "io timeout error");
- return BLK_EH_HANDLED;
+ /*
+ * the io abort has been initiated. Have the reset timer
+ * restarted and the abort completion will complete the io
+ * shortly. Avoids a synchronous wait while the abort finishes.
+ */
+ return BLK_EH_RESET_TIMER;
}
static int
@@ -2110,7 +2382,7 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
}
static void
-nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op;
@@ -2118,9 +2390,6 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
bool terminating = false;
blk_status_t ret;
- if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
- return;
-
spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->flags & FCCTRL_TERMIO)
terminating = true;
@@ -2129,13 +2398,13 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
if (terminating)
return;
- aen_op = &ctrl->aen_ops[aer_idx];
+ aen_op = &ctrl->aen_ops[0];
ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
NVMEFC_FCP_NODATA);
if (ret)
dev_err(ctrl->ctrl.device,
- "failed async event work [%d]\n", aer_idx);
+ "failed async event work\n");
}
static void
@@ -2337,7 +2606,7 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_io_queues(ctrl);
- ret = blk_mq_reinit_tagset(&ctrl->tag_set, nvme_fc_reinit_request);
+ ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
if (ret)
goto out_free_io_queues;
@@ -2360,6 +2629,61 @@ out_free_io_queues:
return ret;
}
+static void
+nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_lport *lport = rport->lport;
+
+ atomic_inc(&lport->act_rport_cnt);
+}
+
+static void
+nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_lport *lport = rport->lport;
+ u32 cnt;
+
+ cnt = atomic_dec_return(&lport->act_rport_cnt);
+ if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
+ lport->ops->localport_delete(&lport->localport);
+}
+
+static int
+nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_rport *rport = ctrl->rport;
+ u32 cnt;
+
+ if (ctrl->assoc_active)
+ return 1;
+
+ ctrl->assoc_active = true;
+ cnt = atomic_inc_return(&rport->act_ctrl_cnt);
+ if (cnt == 1)
+ nvme_fc_rport_active_on_lport(rport);
+
+ return 0;
+}
+
+static int
+nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_rport *rport = ctrl->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ u32 cnt;
+
+ /* ctrl->assoc_active=false will be set independently */
+
+ cnt = atomic_dec_return(&rport->act_ctrl_cnt);
+ if (cnt == 0) {
+ if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
+ lport->ops->remoteport_delete(&rport->remoteport);
+ nvme_fc_rport_inactive_on_lport(rport);
+ }
+
+ return 0;
+}
+
/*
* This routine restarts the controller on the host side, and
* on the link side, recreates the controller association.
@@ -2368,26 +2692,31 @@ static int
nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- u32 segs;
int ret;
bool changed;
++ctrl->ctrl.nr_reconnects;
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ return -ENODEV;
+
+ if (nvme_fc_ctlr_active_on_rport(ctrl))
+ return -ENOTUNIQ;
+
/*
* Create the admin queue
*/
- nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
+ nvme_fc_init_queue(ctrl, 0);
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
- NVME_FC_AQ_BLKMQ_DEPTH);
+ NVME_AQ_BLK_MQ_DEPTH);
if (ret)
goto out_free_queue;
ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
- NVME_FC_AQ_BLKMQ_DEPTH,
- (NVME_FC_AQ_BLKMQ_DEPTH / 4));
+ NVME_AQ_BLK_MQ_DEPTH,
+ (NVME_AQ_BLK_MQ_DEPTH / 4));
if (ret)
goto out_delete_hw_queue;
@@ -2419,9 +2748,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_disconnect_admin_queue;
- segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
- ctrl->lport->ops->max_sgl_segments);
- ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
+ ctrl->ctrl.max_hw_sectors =
+ (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
ret = nvme_init_identify(&ctrl->ctrl);
if (ret)
@@ -2465,11 +2793,11 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
ctrl->ctrl.nr_reconnects = 0;
- nvme_start_ctrl(&ctrl->ctrl);
+ if (changed)
+ nvme_start_ctrl(&ctrl->ctrl);
return 0; /* Success */
@@ -2482,6 +2810,8 @@ out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
nvme_fc_free_queue(&ctrl->queues[0]);
+ ctrl->assoc_active = false;
+ nvme_fc_ctlr_inactive_on_rport(ctrl);
return ret;
}
@@ -2497,6 +2827,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{
unsigned long flags;
+ if (!ctrl->assoc_active)
+ return;
+ ctrl->assoc_active = false;
+
spin_lock_irqsave(&ctrl->lock, flags);
ctrl->flags |= FCCTRL_TERMIO;
ctrl->iocnt = 0;
@@ -2537,7 +2871,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
* use blk_mq_tagset_busy_itr() and the transport routine to
* terminate the exchanges.
*/
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ if (ctrl->ctrl.state != NVME_CTRL_NEW)
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2568,102 +2903,64 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]);
+
+ nvme_fc_ctlr_inactive_on_rport(ctrl);
}
static void
-nvme_fc_delete_ctrl_work(struct work_struct *work)
+nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, delete_work);
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
- nvme_stop_ctrl(&ctrl->ctrl);
- nvme_remove_namespaces(&ctrl->ctrl);
/*
* kill the association on the link side. this will block
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
-
- /*
- * tear down the controller
- * After the last reference on the nvme ctrl is removed,
- * the transport nvme_fc_nvme_ctrl_freed() callback will be
- * invoked. From there, the transport will tear down it's
- * logical queues and association.
- */
- nvme_uninit_ctrl(&ctrl->ctrl);
-
- nvme_put_ctrl(&ctrl->ctrl);
-}
-
-static bool
-__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
-{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return true;
-
- if (!queue_work(nvme_wq, &ctrl->delete_work))
- return true;
-
- return false;
-}
-
-static int
-__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
-{
- return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
-}
-
-/*
- * Request from nvme core layer to delete the controller
- */
-static int
-nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
-{
- struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- int ret;
-
- if (!kref_get_unless_zero(&ctrl->ctrl.kref))
- return -EBUSY;
-
- ret = __nvme_fc_del_ctrl(ctrl);
-
- if (!ret)
- flush_workqueue(nvme_wq);
-
- nvme_put_ctrl(&ctrl->ctrl);
-
- return ret;
}
static void
nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
{
- /* If we are resetting/deleting then do nothing */
- if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
- WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_LIVE);
- return;
- }
+ struct nvme_fc_rport *rport = ctrl->rport;
+ struct nvme_fc_remote_port *portptr = &rport->remoteport;
+ unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
+ bool recon = true;
- dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
- ctrl->cnum, status);
+ if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
+ return;
- if (nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (portptr->port_state == FC_OBJSTATE_ONLINE)
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
- ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
- queue_delayed_work(nvme_wq, &ctrl->connect_work,
- ctrl->ctrl.opts->reconnect_delay * HZ);
+ "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
+ ctrl->cnum, status);
+ else if (time_after_eq(jiffies, rport->dev_loss_end))
+ recon = false;
+
+ if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: Reconnect attempt in %ld "
+ "seconds\n",
+ ctrl->cnum, recon_delay / HZ);
+ else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
+ recon_delay = rport->dev_loss_end - jiffies;
+
+ queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else {
- dev_warn(ctrl->ctrl.device,
+ if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) "
"reached. Removing controller\n",
ctrl->cnum, ctrl->ctrl.nr_reconnects);
- WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
+ else
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: dev_loss_tmo (%d) expired "
+ "while waiting for remoteport connectivity. "
+ "Removing controller\n", ctrl->cnum,
+ portptr->dev_loss_tmo);
+ WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
}
}
@@ -2675,15 +2972,28 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
int ret;
nvme_stop_ctrl(&ctrl->ctrl);
+
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
- ret = nvme_fc_create_association(ctrl);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to RECONNECTING\n", ctrl->cnum);
+ return;
+ }
+
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
+ ret = nvme_fc_create_association(ctrl);
+ else
+ ret = -ENOTCONN;
+
if (ret)
nvme_fc_reconnect_or_delete(ctrl, ret);
else
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
+ "NVME-FC{%d}: controller reset complete\n",
+ ctrl->cnum);
}
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
@@ -2695,8 +3005,9 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_write32 = nvmf_reg_write32,
.free_ctrl = nvme_fc_nvme_ctrl_freed,
.submit_async_event = nvme_fc_submit_async_event,
- .delete_ctrl = nvme_fc_del_nvme_ctrl,
+ .delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
+ .reinit_request = nvme_fc_reinit_request,
};
static void
@@ -2728,6 +3039,33 @@ static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
};
+/*
+ * Fails a controller request if it matches an existing controller
+ * (association) with the same tuple:
+ * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
+ *
+ * The ports don't need to be compared as they are intrinsically
+ * already matched by the port pointers supplied.
+ */
+static bool
+nvme_fc_existing_controller(struct nvme_fc_rport *rport,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
+ if (found)
+ break;
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return found;
+}
+
static struct nvme_ctrl *
nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
@@ -2742,6 +3080,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
+ if (!opts->duplicate_connect &&
+ nvme_fc_existing_controller(rport, opts)) {
+ ret = -EALREADY;
+ goto out_fail;
+ }
+
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) {
ret = -ENOMEM;
@@ -2760,12 +3104,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->rport = rport;
ctrl->dev = lport->dev;
ctrl->cnum = idx;
+ ctrl->assoc_active = false;
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
kref_init(&ctrl->ref);
- INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
spin_lock_init(&ctrl->lock);
@@ -2787,7 +3131,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
@@ -2797,6 +3141,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+ ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
if (ret)
@@ -2878,7 +3223,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
return ERR_PTR(ret);
}
- kref_get(&ctrl->ctrl.kref);
+ nvme_get_ctrl(&ctrl->ctrl);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
@@ -3026,7 +3371,50 @@ static struct nvmf_transport_ops nvme_fc_transport = {
static int __init nvme_fc_init_module(void)
{
- return nvmf_register_transport(&nvme_fc_transport);
+ int ret;
+
+ /*
+ * NOTE:
+ * It is expected that in the future the kernel will combine
+ * the FC-isms that are currently under scsi and now being
+ * added to by NVME into a new standalone FC class. The SCSI
+ * and NVME protocols and their devices would be under this
+ * new FC class.
+ *
+ * As we need something to post FC-specific udev events to,
+ * specifically for nvme probe events, start by creating the
+ * new device class. When the new standalone FC class is
+ * put in place, this code will move to a more generic
+ * location for the class.
+ */
+ fc_class = class_create(THIS_MODULE, "fc");
+ if (IS_ERR(fc_class)) {
+ pr_err("couldn't register class fc\n");
+ return PTR_ERR(fc_class);
+ }
+
+ /*
+ * Create a device for the FC-centric udev events
+ */
+ fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
+ "fc_udev_device");
+ if (IS_ERR(fc_udev_device)) {
+ pr_err("couldn't create fc_udev device!\n");
+ ret = PTR_ERR(fc_udev_device);
+ goto out_destroy_class;
+ }
+
+ ret = nvmf_register_transport(&nvme_fc_transport);
+ if (ret)
+ goto out_destroy_device;
+
+ return 0;
+
+out_destroy_device:
+ device_destroy(fc_class, MKDEV(0, 0));
+out_destroy_class:
+ class_destroy(fc_class);
+ return ret;
}
static void __exit nvme_fc_exit_module(void)
@@ -3039,6 +3427,9 @@ static void __exit nvme_fc_exit_module(void)
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
+
+ device_destroy(fc_class, MKDEV(0, 0));
+ class_destroy(fc_class);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 1f79e3f141e6..ba3d7f3349e5 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -305,7 +305,7 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
int ret;
c.identity.opcode = nvme_nvm_admin_identity;
- c.identity.nsid = cpu_to_le32(ns->ns_id);
+ c.identity.nsid = cpu_to_le32(ns->head->ns_id);
c.identity.chnl_off = 0;
nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
@@ -344,7 +344,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
int ret = 0;
c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
- c.l2p.nsid = cpu_to_le32(ns->ns_id);
+ c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
entries = kmalloc(len, GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -402,7 +402,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
- c.get_bb.nsid = cpu_to_le32(ns->ns_id);
+ c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
c.get_bb.spba = cpu_to_le64(ppa.ppa);
bb_tbl = kzalloc(tblsz, GFP_KERNEL);
@@ -452,7 +452,7 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
int ret = 0;
c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
- c.set_bb.nsid = cpu_to_le32(ns->ns_id);
+ c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
c.set_bb.spba = cpu_to_le64(ppas->ppa);
c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
c.set_bb.value = type;
@@ -469,7 +469,7 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
struct nvme_nvm_command *c)
{
c->ph_rw.opcode = rqd->opcode;
- c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
+ c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
@@ -492,34 +492,47 @@ static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
blk_mq_free_request(rq);
}
-static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static struct request *nvme_nvm_alloc_request(struct request_queue *q,
+ struct nvm_rq *rqd,
+ struct nvme_nvm_command *cmd)
{
- struct request_queue *q = dev->q;
struct nvme_ns *ns = q->queuedata;
struct request *rq;
- struct bio *bio = rqd->bio;
- struct nvme_nvm_command *cmd;
-
- cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
nvme_nvm_rqtocmd(rqd, ns, cmd);
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
- if (IS_ERR(rq)) {
- kfree(cmd);
- return PTR_ERR(rq);
- }
+ if (IS_ERR(rq))
+ return rq;
+
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
- if (bio) {
- blk_init_request_from_bio(rq, bio);
+ if (rqd->bio) {
+ blk_init_request_from_bio(rq, rqd->bio);
} else {
rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
rq->__data_len = 0;
}
+ return rq;
+}
+
+static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ struct request_queue *q = dev->q;
+ struct nvme_nvm_command *cmd;
+ struct request *rq;
+
+ cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ rq = nvme_nvm_alloc_request(q, rqd, cmd);
+ if (IS_ERR(rq)) {
+ kfree(cmd);
+ return PTR_ERR(rq);
+ }
+
rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
@@ -527,6 +540,34 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
return 0;
}
+static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ struct request_queue *q = dev->q;
+ struct request *rq;
+ struct nvme_nvm_command cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(struct nvme_nvm_command));
+
+ rq = nvme_nvm_alloc_request(q, rqd, &cmd);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ /* I/Os can fail and the error is signaled through rqd. Callers must
+ * handle the error accordingly.
+ */
+ blk_execute_rq(q, NULL, rq, 0);
+ if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+ ret = -EINTR;
+
+ rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
+ rqd->error = nvme_req(rq)->status;
+
+ blk_mq_free_request(rq);
+
+ return ret;
+}
+
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
@@ -562,6 +603,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.set_bb_tbl = nvme_nvm_set_bb_tbl,
.submit_io = nvme_nvm_submit_io,
+ .submit_io_sync = nvme_nvm_submit_io_sync,
.create_dma_pool = nvme_nvm_create_dma_pool,
.destroy_dma_pool = nvme_nvm_destroy_dma_pool,
@@ -600,8 +642,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-
if (ppa_buf && ppa_len) {
ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
if (!ppa_list) {
@@ -691,7 +731,7 @@ static int nvme_nvm_submit_vio(struct nvme_ns *ns,
memset(&c, 0, sizeof(c));
c.ph_rw.opcode = vio.opcode;
- c.ph_rw.nsid = cpu_to_le32(ns->ns_id);
+ c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
c.ph_rw.control = cpu_to_le16(vio.control);
c.ph_rw.length = cpu_to_le16(vio.nppas);
@@ -728,7 +768,7 @@ static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
memset(&c, 0, sizeof(c));
c.common.opcode = vcmd.opcode;
- c.common.nsid = cpu_to_le32(ns->ns_id);
+ c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
/* cdw11-12 */
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
new file mode 100644
index 000000000000..78d92151a904
--- /dev/null
+++ b/drivers/nvme/host/multipath.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2017 Christoph Hellwig.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/moduleparam.h>
+#include "nvme.h"
+
+static bool multipath = true;
+module_param(multipath, bool, 0644);
+MODULE_PARM_DESC(multipath,
+ "turn on native support for multiple controllers per subsystem");
+
+void nvme_failover_req(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ns->head->requeue_lock, flags);
+ blk_steal_bios(&ns->head->requeue_list, req);
+ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+ blk_mq_end_request(req, 0);
+
+ nvme_reset_ctrl(ns->ctrl);
+ kblockd_schedule_work(&ns->head->requeue_work);
+}
+
+bool nvme_req_needs_failover(struct request *req)
+{
+ if (!(req->cmd_flags & REQ_NVME_MPATH))
+ return false;
+
+ switch (nvme_req(req)->status & 0x7ff) {
+ /*
+ * Generic command status:
+ */
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
+ case NVME_SC_LBA_RANGE:
+ case NVME_SC_CAP_EXCEEDED:
+ case NVME_SC_RESERVATION_CONFLICT:
+ return false;
+
+ /*
+ * I/O command set specific error. Unfortunately these values are
+ * reused for fabrics commands, but those should never get here.
+ */
+ case NVME_SC_BAD_ATTRIBUTES:
+ case NVME_SC_INVALID_PI:
+ case NVME_SC_READ_ONLY:
+ case NVME_SC_ONCS_NOT_SUPPORTED:
+ WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
+ nvme_fabrics_command);
+ return false;
+
+ /*
+ * Media and Data Integrity Errors:
+ */
+ case NVME_SC_WRITE_FAULT:
+ case NVME_SC_READ_ERROR:
+ case NVME_SC_GUARD_CHECK:
+ case NVME_SC_APPTAG_CHECK:
+ case NVME_SC_REFTAG_CHECK:
+ case NVME_SC_COMPARE_FAILED:
+ case NVME_SC_ACCESS_DENIED:
+ case NVME_SC_UNWRITTEN_BLOCK:
+ return false;
+ }
+
+ /* Everything else could be a path failure, so should be retried */
+ return true;
+}
+
+void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (ns->head->disk)
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (ns->ctrl->state == NVME_CTRL_LIVE) {
+ rcu_assign_pointer(head->current_path, ns);
+ return ns;
+ }
+ }
+
+ return NULL;
+}
+
+inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
+
+ if (unlikely(!ns || ns->ctrl->state != NVME_CTRL_LIVE))
+ ns = __nvme_find_path(head);
+ return ns;
+}
+
+static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
+ struct bio *bio)
+{
+ struct nvme_ns_head *head = q->queuedata;
+ struct device *dev = disk_to_dev(head->disk);
+ struct nvme_ns *ns;
+ blk_qc_t ret = BLK_QC_T_NONE;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (likely(ns)) {
+ bio->bi_disk = ns->disk;
+ bio->bi_opf |= REQ_NVME_MPATH;
+ ret = direct_make_request(bio);
+ } else if (!list_empty_careful(&head->list)) {
+ dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+
+ spin_lock_irq(&head->requeue_lock);
+ bio_list_add(&head->requeue_list, bio);
+ spin_unlock_irq(&head->requeue_lock);
+ } else {
+ dev_warn_ratelimited(dev, "no path - failing I/O\n");
+
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ }
+
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
+{
+ struct nvme_ns_head *head = q->queuedata;
+ struct nvme_ns *ns;
+ bool found = false;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = srcu_dereference(head->current_path, &head->srcu);
+ if (likely(ns && ns->ctrl->state == NVME_CTRL_LIVE))
+ found = ns->queue->poll_fn(q, qc);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return found;
+}
+
+static void nvme_requeue_work(struct work_struct *work)
+{
+ struct nvme_ns_head *head =
+ container_of(work, struct nvme_ns_head, requeue_work);
+ struct bio *bio, *next;
+
+ spin_lock_irq(&head->requeue_lock);
+ next = bio_list_get(&head->requeue_list);
+ spin_unlock_irq(&head->requeue_lock);
+
+ while ((bio = next) != NULL) {
+ next = bio->bi_next;
+ bio->bi_next = NULL;
+
+ /*
+ * Reset disk to the mpath node and resubmit to select a new
+ * path.
+ */
+ bio->bi_disk = head->disk;
+ generic_make_request(bio);
+ }
+}
+
+int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
+{
+ struct request_queue *q;
+ bool vwc = false;
+
+ bio_list_init(&head->requeue_list);
+ spin_lock_init(&head->requeue_lock);
+ INIT_WORK(&head->requeue_work, nvme_requeue_work);
+
+ /*
+ * Add a multipath node if the subsystems supports multiple controllers.
+ * We also do this for private namespaces as the namespace sharing data could
+ * change after a rescan.
+ */
+ if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
+ return 0;
+
+ q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+ if (!q)
+ goto out;
+ q->queuedata = head;
+ blk_queue_make_request(q, nvme_ns_head_make_request);
+ q->poll_fn = nvme_ns_head_poll;
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ /* set to a default value for 512 until disk is validated */
+ blk_queue_logical_block_size(q, 512);
+
+ /* we need to propagate up the VMC settings */
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ vwc = true;
+ blk_queue_write_cache(q, vwc, vwc);
+
+ head->disk = alloc_disk(0);
+ if (!head->disk)
+ goto out_cleanup_queue;
+ head->disk->fops = &nvme_ns_head_ops;
+ head->disk->private_data = head;
+ head->disk->queue = q;
+ head->disk->flags = GENHD_FL_EXT_DEVT;
+ sprintf(head->disk->disk_name, "nvme%dn%d",
+ ctrl->subsys->instance, head->instance);
+ return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(q);
+out:
+ return -ENOMEM;
+}
+
+void nvme_mpath_add_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
+ device_add_disk(&head->subsys->dev, head->disk);
+ if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
+ &nvme_ns_id_attr_group))
+ pr_warn("%s: failed to create sysfs group for identification\n",
+ head->disk->disk_name);
+}
+
+void nvme_mpath_add_disk_links(struct nvme_ns *ns)
+{
+ struct kobject *slave_disk_kobj, *holder_disk_kobj;
+
+ if (!ns->head->disk)
+ return;
+
+ slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
+ if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
+ kobject_name(slave_disk_kobj)))
+ return;
+
+ holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
+ if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
+ kobject_name(holder_disk_kobj)))
+ sysfs_remove_link(ns->head->disk->slave_dir,
+ kobject_name(slave_disk_kobj));
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
+ sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
+ &nvme_ns_id_attr_group);
+ del_gendisk(head->disk);
+ blk_set_queue_dying(head->disk->queue);
+ /* make sure all pending bios are cleaned up */
+ kblockd_schedule_work(&head->requeue_work);
+ flush_work(&head->requeue_work);
+ blk_cleanup_queue(head->disk->queue);
+ put_disk(head->disk);
+}
+
+void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
+{
+ if (!ns->head->disk)
+ return;
+
+ sysfs_remove_link(ns->disk->part0.holder_dir,
+ kobject_name(&disk_to_dev(ns->head->disk)->kobj));
+ sysfs_remove_link(ns->head->disk->slave_dir,
+ kobject_name(&disk_to_dev(ns->disk)->kobj));
+}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d3f3c4447515..c0873a68872f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -15,16 +15,17 @@
#define _NVME_H
#include <linux/nvme.h>
+#include <linux/cdev.h>
#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>
#include <linux/lightnvm.h>
#include <linux/sed-opal.h>
-extern unsigned char nvme_io_timeout;
+extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
-extern unsigned char admin_timeout;
+extern unsigned int admin_timeout;
#define ADMIN_TIMEOUT (admin_timeout * HZ)
#define NVME_DEFAULT_KATO 5
@@ -94,6 +95,11 @@ struct nvme_request {
u16 status;
};
+/*
+ * Mark a bio as coming in through the mpath node.
+ */
+#define REQ_NVME_MPATH REQ_DRV
+
enum {
NVME_REQ_CANCELLED = (1 << 0),
};
@@ -127,24 +133,23 @@ struct nvme_ctrl {
struct request_queue *admin_q;
struct request_queue *connect_q;
struct device *dev;
- struct kref kref;
int instance;
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
struct mutex namespaces_mutex;
+ struct device ctrl_device;
struct device *device; /* char device */
- struct list_head node;
- struct ida ns_ida;
+ struct cdev cdev;
struct work_struct reset_work;
+ struct work_struct delete_work;
+
+ struct nvme_subsystem *subsys;
+ struct list_head subsys_entry;
struct opal_dev *opal_dev;
char name[12];
- char serial[20];
- char model[40];
- char firmware_rev[8];
- char subnqn[NVMF_NQN_SIZE];
u16 cntlid;
u32 ctrl_config;
@@ -155,23 +160,23 @@ struct nvme_ctrl {
u32 page_size;
u32 max_hw_sectors;
u16 oncs;
- u16 vid;
u16 oacs;
u16 nssa;
u16 nr_streams;
atomic_t abort_limit;
- u8 event_limit;
u8 vwc;
u32 vs;
u32 sgls;
u16 kas;
u8 npss;
u8 apsta;
+ u32 aen_result;
unsigned int shutdown_timeout;
unsigned int kato;
bool subsystem;
unsigned long quirks;
struct nvme_id_power_state psd[32];
+ struct nvme_effects_log *effects;
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
@@ -197,21 +202,72 @@ struct nvme_ctrl {
struct nvmf_ctrl_options *opts;
};
+struct nvme_subsystem {
+ int instance;
+ struct device dev;
+ /*
+ * Because we unregister the device on the last put we need
+ * a separate refcount.
+ */
+ struct kref ref;
+ struct list_head entry;
+ struct mutex lock;
+ struct list_head ctrls;
+ struct list_head nsheads;
+ char subnqn[NVMF_NQN_SIZE];
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+ u8 cmic;
+ u16 vendor_id;
+ struct ida ns_ida;
+};
+
+/*
+ * Container structure for uniqueue namespace identifiers.
+ */
+struct nvme_ns_ids {
+ u8 eui64[8];
+ u8 nguid[16];
+ uuid_t uuid;
+};
+
+/*
+ * Anchor structure for namespaces. There is one for each namespace in a
+ * NVMe subsystem that any of our controllers can see, and the namespace
+ * structure for each controller is chained of it. For private namespaces
+ * there is a 1:1 relation to our namespace structures, that is ->list
+ * only ever has a single entry for private namespaces.
+ */
+struct nvme_ns_head {
+#ifdef CONFIG_NVME_MULTIPATH
+ struct gendisk *disk;
+ struct nvme_ns __rcu *current_path;
+ struct bio_list requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+#endif
+ struct list_head list;
+ struct srcu_struct srcu;
+ struct nvme_subsystem *subsys;
+ unsigned ns_id;
+ struct nvme_ns_ids ids;
+ struct list_head entry;
+ struct kref ref;
+ int instance;
+};
+
struct nvme_ns {
struct list_head list;
struct nvme_ctrl *ctrl;
struct request_queue *queue;
struct gendisk *disk;
+ struct list_head siblings;
struct nvm_dev *ndev;
struct kref kref;
- int instance;
+ struct nvme_ns_head *head;
- u8 eui[8];
- u8 nguid[16];
- uuid_t uuid;
-
- unsigned ns_id;
int lba_shift;
u16 ms;
u16 sgs;
@@ -234,9 +290,10 @@ struct nvme_ctrl_ops {
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
- void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
- int (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ void (*submit_async_event)(struct nvme_ctrl *ctrl);
+ void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
+ int (*reinit_request)(void *data, struct request *rq);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -278,6 +335,16 @@ static inline void nvme_end_request(struct request *req, __le16 status,
blk_mq_complete_request(req);
}
+static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
+{
+ get_device(ctrl->device);
+}
+
+static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
+{
+ put_device(ctrl->device);
+}
+
void nvme_complete_rq(struct request *req);
void nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
@@ -299,10 +366,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send);
-#define NVME_NR_AERS 1
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
union nvme_result *res);
-void nvme_queue_async_events(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
@@ -311,21 +376,79 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
+int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags, int qid);
+ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head, int flags);
+ unsigned timeout, int qid, int at_head,
+ blk_mq_req_flags_t flags);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
+int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
+
+extern const struct attribute_group nvme_ns_id_attr_group;
+extern const struct block_device_operations nvme_ns_head_ops;
+
+#ifdef CONFIG_NVME_MULTIPATH
+void nvme_failover_req(struct request *req);
+bool nvme_req_needs_failover(struct request *req);
+void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+void nvme_mpath_add_disk(struct nvme_ns_head *head);
+void nvme_mpath_add_disk_links(struct nvme_ns *ns);
+void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
+
+static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+
+ if (head && ns == srcu_dereference(head->current_path, &head->srcu))
+ rcu_assign_pointer(head->current_path, NULL);
+}
+struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+#else
+static inline void nvme_failover_req(struct request *req)
+{
+}
+static inline bool nvme_req_needs_failover(struct request *req)
+{
+ return false;
+}
+static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+{
+}
+static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head)
+{
+ return 0;
+}
+static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
+{
+}
+static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+}
+static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
+{
+}
+static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
+{
+}
+static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+}
+#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3f5a04c586ce..a11cfd470089 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -13,7 +13,6 @@
*/
#include <linux/aer.h>
-#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
@@ -26,12 +25,9 @@
#include <linux/mutex.h>
#include <linux/once.h>
#include <linux/pci.h>
-#include <linux/poison.h>
#include <linux/t10-pi.h>
-#include <linux/timer.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <asm/unaligned.h>
#include <linux/sed-opal.h>
#include "nvme.h"
@@ -39,11 +35,7 @@
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS)
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -57,6 +49,12 @@ module_param(max_host_mem_size_mb, uint, 0444);
MODULE_PARM_DESC(max_host_mem_size_mb,
"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
+static unsigned int sgl_threshold = SZ_32K;
+module_param(sgl_threshold, uint, 0644);
+MODULE_PARM_DESC(sgl_threshold,
+ "Use SGLs when average request segment size is larger or equal to "
+ "this size. Use 0 to disable SGLs.");
+
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
.set = io_queue_depth_set,
@@ -178,6 +176,7 @@ struct nvme_queue {
struct nvme_iod {
struct nvme_request req;
struct nvme_queue *nvmeq;
+ bool use_sgl;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
int nents; /* Used in scatterlist */
@@ -331,17 +330,35 @@ static int nvme_npages(unsigned size, struct nvme_dev *dev)
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}
-static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
- unsigned int size, unsigned int nseg)
+/*
+ * Calculates the number of pages needed for the SGL segments. For example a 4k
+ * page can accommodate 256 SGL descriptors.
+ */
+static int nvme_pci_npages_sgl(unsigned int num_seg)
{
- return sizeof(__le64 *) * nvme_npages(size, dev) +
- sizeof(struct scatterlist) * nseg;
+ return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
}
-static unsigned int nvme_cmd_size(struct nvme_dev *dev)
+static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
+ unsigned int size, unsigned int nseg, bool use_sgl)
{
- return sizeof(struct nvme_iod) +
- nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
+ size_t alloc_size;
+
+ if (use_sgl)
+ alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
+ else
+ alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);
+
+ return alloc_size + sizeof(struct scatterlist) * nseg;
+}
+
+static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
+{
+ unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
+ NVME_INT_BYTES(dev), NVME_INT_PAGES,
+ use_sgl);
+
+ return sizeof(struct nvme_iod) + alloc_size;
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -425,10 +442,10 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
nvmeq->sq_tail = tail;
}
-static __le64 **iod_list(struct request *req)
+static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
}
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
@@ -438,7 +455,10 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
unsigned int size = blk_rq_payload_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
- iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
+ size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
+ iod->use_sgl);
+
+ iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
if (!iod->sg)
return BLK_STS_RESOURCE;
} else {
@@ -456,18 +476,31 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- const int last_prp = dev->ctrl.page_size / 8 - 1;
+ const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+
int i;
- __le64 **list = iod_list(req);
- dma_addr_t prp_dma = iod->first_dma;
if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ dma_addr);
+
for (i = 0; i < iod->npages; i++) {
- __le64 *prp_list = list[i];
- dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
- dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
- prp_dma = next_prp_dma;
+ void *addr = nvme_pci_iod_list(req)[i];
+
+ if (iod->use_sgl) {
+ struct nvme_sgl_desc *sg_list = addr;
+
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ __le64 *prp_list = addr;
+
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(dev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
}
if (iod->sg != iod->inline_sg)
@@ -555,7 +588,8 @@ static void nvme_print_sgl(struct scatterlist *sgl, int nents)
}
}
-static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
@@ -566,14 +600,16 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
u32 page_size = dev->ctrl.page_size;
int offset = dma_addr & (page_size - 1);
__le64 *prp_list;
- __le64 **list = iod_list(req);
+ void **list = nvme_pci_iod_list(req);
dma_addr_t prp_dma;
int nprps, i;
+ iod->use_sgl = false;
+
length -= (page_size - offset);
if (length <= 0) {
iod->first_dma = 0;
- return BLK_STS_OK;
+ goto done;
}
dma_len -= (page_size - offset);
@@ -587,7 +623,7 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (length <= page_size) {
iod->first_dma = dma_addr;
- return BLK_STS_OK;
+ goto done;
}
nprps = DIV_ROUND_UP(length, page_size);
@@ -634,6 +670,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
dma_len = sg_dma_len(sg);
}
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+
return BLK_STS_OK;
bad_sgl:
@@ -643,6 +683,110 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
return BLK_STS_IOERR;
}
+static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = NVME_SGL_FMT_DATA_DESC << 4;
+}
+
+static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
+ dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries < SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = NVME_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmd)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ int length = blk_rq_payload_bytes(req);
+ struct dma_pool *pool;
+ struct nvme_sgl_desc *sg_list;
+ struct scatterlist *sg = iod->sg;
+ int entries = iod->nents, i = 0;
+ dma_addr_t sgl_dma;
+
+ iod->use_sgl = true;
+
+ /* setting the transfer type as SGL */
+ cmd->flags = NVME_CMD_SGL_METABUF;
+
+ if (length == sg_dma_len(sg)) {
+ nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
+ return BLK_STS_OK;
+ }
+
+ if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
+ pool = dev->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ iod->npages = -1;
+ return BLK_STS_RESOURCE;
+ }
+
+ nvme_pci_iod_list(req)[0] = sg_list;
+ iod->first_dma = sgl_dma;
+
+ nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
+
+ do {
+ if (i == SGES_PER_PAGE) {
+ struct nvme_sgl_desc *old_sg_desc = sg_list;
+ struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list)
+ return BLK_STS_RESOURCE;
+
+ i = 0;
+ nvme_pci_iod_list(req)[iod->npages++] = sg_list;
+ sg_list[i++] = *link;
+ nvme_pci_sgl_set_seg(link, sgl_dma, entries);
+ }
+
+ nvme_pci_sgl_set_data(&sg_list[i++], sg);
+
+ length -= sg_dma_len(sg);
+ sg = sg_next(sg);
+ entries--;
+ } while (length > 0);
+
+ WARN_ON(entries > 0);
+ return BLK_STS_OK;
+}
+
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int avg_seg_size;
+
+ avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
+ blk_rq_nr_phys_segments(req));
+
+ if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ return false;
+ if (!iod->nvmeq->qid)
+ return false;
+ if (!sgl_threshold || avg_seg_size < sgl_threshold)
+ return false;
+ return true;
+}
+
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
@@ -662,7 +806,11 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- ret = nvme_setup_prps(dev, req);
+ if (nvme_pci_use_sgls(dev, req))
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+ else
+ ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
+
if (ret != BLK_STS_OK)
goto out_unmap;
@@ -682,8 +830,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_unmap;
}
- cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
if (blk_integrity_rq(req))
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
return BLK_STS_OK;
@@ -804,7 +950,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
* for them but rather special case them here.
*/
if (unlikely(nvmeq->qid == 0 &&
- cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) {
+ cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
@@ -897,7 +1043,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return __nvme_poll(nvmeq, tag);
}
-static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = dev->queues[0];
@@ -905,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
- c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
+ c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
spin_lock_irq(&nvmeq->q_lock);
__nvme_submit_cmd(nvmeq, &c);
@@ -930,7 +1076,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
/*
- * Note: we (ab)use the fact the the prp fields survive if no data
+ * Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
memset(&c, 0, sizeof(c));
@@ -951,7 +1097,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
int flags = NVME_QUEUE_PHYS_CONTIG;
/*
- * Note: we (ab)use the fact the the prp fields survive if no data
+ * Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
memset(&c, 0, sizeof(c));
@@ -1372,14 +1518,10 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.ops = &nvme_mq_admin_ops;
dev->admin_tagset.nr_hw_queues = 1;
- /*
- * Subtract one to leave an empty queue entry for 'Full Queue'
- * condition. See NVM-Express 1.2 specification, section 4.1.2.
- */
- dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
+ dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
- dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
+ dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
@@ -1906,7 +2048,11 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = nvme_cmd_size(dev);
+ dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
+ if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
+ dev->tagset.cmd_size = max(dev->tagset.cmd_size,
+ nvme_pci_cmd_size(dev, true));
+ }
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
@@ -2132,9 +2278,9 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
- kref_get(&dev->ctrl.kref);
+ nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
- if (!schedule_work(&dev->remove_work))
+ if (!queue_work(nvme_wq, &dev->remove_work))
nvme_put_ctrl(&dev->ctrl);
}
@@ -2557,6 +2703,7 @@ static int __init nvme_init(void)
static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
+ flush_workqueue(nvme_wq);
_nvme_check_size();
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0ebb539f3bd3..4f9bf2f815c3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -41,17 +41,9 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_RDMA_NR_AEN_COMMANDS 1
-#define NVME_RDMA_AQ_BLKMQ_DEPTH \
- (NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
-
struct nvme_rdma_device {
- struct ib_device *dev;
- struct ib_pd *pd;
+ struct ib_device *dev;
+ struct ib_pd *pd;
struct kref ref;
struct list_head entry;
};
@@ -79,8 +71,8 @@ struct nvme_rdma_request {
};
enum nvme_rdma_queue_flags {
- NVME_RDMA_Q_LIVE = 0,
- NVME_RDMA_Q_DELETING = 1,
+ NVME_RDMA_Q_ALLOCATED = 0,
+ NVME_RDMA_Q_LIVE = 1,
};
struct nvme_rdma_queue {
@@ -105,7 +97,6 @@ struct nvme_rdma_ctrl {
/* other member variables */
struct blk_mq_tag_set tag_set;
- struct work_struct delete_work;
struct work_struct err_work;
struct nvme_rdma_qe async_event_sqe;
@@ -274,6 +265,9 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int ret = 0;
+ if (WARN_ON_ONCE(!req->mr))
+ return 0;
+
ib_dereg_mr(req->mr);
req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
@@ -434,11 +428,9 @@ out_err:
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
- struct nvme_rdma_device *dev;
- struct ib_device *ibdev;
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
- dev = queue->device;
- ibdev = dev->dev;
rdma_destroy_qp(queue->cm_id);
ib_free_cq(queue->ib_cq);
@@ -493,7 +485,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
return 0;
out_destroy_qp:
- ib_destroy_qp(queue->qp);
+ rdma_destroy_qp(queue->cm_id);
out_destroy_ib_cq:
ib_free_cq(queue->ib_cq);
out_put_dev:
@@ -544,11 +536,11 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
ret = nvme_rdma_wait_for_cm(queue);
if (ret) {
dev_info(ctrl->ctrl.device,
- "rdma_resolve_addr wait failed (%d).\n", ret);
+ "rdma connection establishment failed (%d)\n", ret);
goto out_destroy_cm_id;
}
- clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
+ set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
return 0;
@@ -568,7 +560,7 @@ static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
{
- if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
+ if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
if (nvme_rdma_queue_idx(queue) == 0) {
@@ -676,11 +668,10 @@ out_free_queues:
return ret;
}
-static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl, bool admin)
+static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl,
+ struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set = admin ?
- &ctrl->admin_tag_set : &ctrl->tag_set;
blk_mq_free_tag_set(set);
nvme_rdma_dev_put(ctrl->device);
@@ -697,7 +688,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set = &ctrl->admin_tag_set;
memset(set, 0, sizeof(*set));
set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = NUMA_NO_NODE;
set->cmd_size = sizeof(struct nvme_rdma_request) +
@@ -705,6 +696,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = ADMIN_TIMEOUT;
+ set->flags = BLK_MQ_F_NO_SCHED;
} else {
set = &ctrl->tag_set;
memset(set, 0, sizeof(*set));
@@ -748,7 +740,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, true);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
nvme_rdma_free_queue(&ctrl->queues[0]);
}
@@ -780,8 +772,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_tagset;
}
} else {
- error = blk_mq_reinit_tagset(&ctrl->admin_tag_set,
- nvme_rdma_reinit_request);
+ error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
if (error)
goto out_free_queue;
}
@@ -825,7 +816,7 @@ out_cleanup_queue:
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, true);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;
@@ -837,7 +828,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_rdma_stop_io_queues(ctrl);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, false);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
}
@@ -863,8 +854,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_tag_set;
}
} else {
- ret = blk_mq_reinit_tagset(&ctrl->tag_set,
- nvme_rdma_reinit_request);
+ ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
if (ret)
goto out_free_io_queues;
@@ -883,7 +873,7 @@ out_cleanup_connect_q:
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, false);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
@@ -922,7 +912,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
ctrl->ctrl.opts->reconnect_delay * HZ);
} else {
dev_info(ctrl->ctrl.device, "Removing controller...\n");
- queue_work(nvme_wq, &ctrl->delete_work);
+ nvme_delete_ctrl(&ctrl->ctrl);
}
}
@@ -935,10 +925,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
++ctrl->ctrl.nr_reconnects;
- if (ctrl->ctrl.queue_count > 1)
- nvme_rdma_destroy_io_queues(ctrl, false);
-
- nvme_rdma_destroy_admin_queue(ctrl, false);
ret = nvme_rdma_configure_admin_queue(ctrl, false);
if (ret)
goto requeue;
@@ -946,7 +932,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
if (ctrl->ctrl.queue_count > 1) {
ret = nvme_rdma_configure_io_queues(ctrl, false);
if (ret)
- goto requeue;
+ goto destroy_admin;
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -956,14 +942,17 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
}
- ctrl->ctrl.nr_reconnects = 0;
-
nvme_start_ctrl(&ctrl->ctrl);
- dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
+ dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
+ ctrl->ctrl.nr_reconnects);
+
+ ctrl->ctrl.nr_reconnects = 0;
return;
+destroy_admin:
+ nvme_rdma_destroy_admin_queue(ctrl, false);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
ctrl->ctrl.nr_reconnects);
@@ -979,17 +968,15 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
- nvme_rdma_stop_io_queues(ctrl);
- }
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_stop_queue(&ctrl->queues[0]);
-
- /* We must take care of fastfail/requeue all our inflight requests */
- if (ctrl->ctrl.queue_count > 1)
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_destroy_io_queues(ctrl, false);
+ }
+
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl, false);
/*
* queues are not a live anymore, so restart the queues to fail fast
@@ -1065,7 +1052,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_bytes(rq))
return;
- if (req->mr->need_inval) {
+ if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
res = nvme_rdma_inv_rkey(queue, req);
if (unlikely(res < 0)) {
dev_err(ctrl->ctrl.device,
@@ -1314,7 +1301,7 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
return queue->ctrl->tag_set.tags[queue_idx - 1];
}
-static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
struct nvme_rdma_queue *queue = &ctrl->queues[0];
@@ -1324,14 +1311,11 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
struct ib_sge sge;
int ret;
- if (WARN_ON_ONCE(aer_idx != 0))
- return;
-
ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
memset(cmd, 0, sizeof(*cmd));
cmd->common.opcode = nvme_admin_async_event;
- cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
cmd->common.flags |= NVME_CMD_SGL_METABUF;
nvme_rdma_set_sg_null(cmd);
@@ -1393,7 +1377,7 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
* for them but rather special case them here.
*/
if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
+ cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
@@ -1590,6 +1574,10 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ dev_warn(req->queue->ctrl->ctrl.device,
+ "I/O %d QID %d timeout, reset controller\n",
+ rq->tag, nvme_rdma_queue_idx(req->queue));
+
/* queue error recovery */
nvme_rdma_error_recovery(req->queue->ctrl);
@@ -1767,50 +1755,9 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
nvme_rdma_destroy_admin_queue(ctrl, shutdown);
}
-static void nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl)
+static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
{
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_rdma_shutdown_ctrl(ctrl, true);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
-}
-
-static void nvme_rdma_del_ctrl_work(struct work_struct *work)
-{
- struct nvme_rdma_ctrl *ctrl = container_of(work,
- struct nvme_rdma_ctrl, delete_work);
-
- nvme_stop_ctrl(&ctrl->ctrl);
- nvme_rdma_remove_ctrl(ctrl);
-}
-
-static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
-{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return -EBUSY;
-
- if (!queue_work(nvme_wq, &ctrl->delete_work))
- return -EBUSY;
-
- return 0;
-}
-
-static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
-{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- int ret = 0;
-
- /*
- * Keep a reference until all work is flushed since
- * __nvme_rdma_del_ctrl can free the ctrl mem
- */
- if (!kref_get_unless_zero(&ctrl->ctrl.kref))
- return -EBUSY;
- ret = __nvme_rdma_del_ctrl(ctrl);
- if (!ret)
- flush_work(&ctrl->delete_work);
- nvme_put_ctrl(&ctrl->ctrl);
- return ret;
+ nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
}
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
@@ -1834,7 +1781,11 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!changed) {
+ /* state change failure is ok if we're in DELETING state */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ return;
+ }
nvme_start_ctrl(&ctrl->ctrl);
@@ -1842,7 +1793,10 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
out_fail:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
- nvme_rdma_remove_ctrl(ctrl);
+ nvme_remove_namespaces(&ctrl->ctrl);
+ nvme_rdma_shutdown_ctrl(ctrl, true);
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
}
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1854,10 +1808,88 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.reg_write32 = nvmf_reg_write32,
.free_ctrl = nvme_rdma_free_ctrl,
.submit_async_event = nvme_rdma_submit_async_event,
- .delete_ctrl = nvme_rdma_del_ctrl,
+ .delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
+ .reinit_request = nvme_rdma_reinit_request,
};
+static inline bool
+__nvme_rdma_options_match(struct nvme_rdma_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ char *stdport = __stringify(NVME_RDMA_IP_PORT);
+
+
+ if (!nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts) ||
+ strcmp(opts->traddr, ctrl->ctrl.opts->traddr))
+ return false;
+
+ if (opts->mask & NVMF_OPT_TRSVCID &&
+ ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
+ if (strcmp(opts->trsvcid, ctrl->ctrl.opts->trsvcid))
+ return false;
+ } else if (opts->mask & NVMF_OPT_TRSVCID) {
+ if (strcmp(opts->trsvcid, stdport))
+ return false;
+ } else if (ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
+ if (strcmp(stdport, ctrl->ctrl.opts->trsvcid))
+ return false;
+ }
+ /* else, it's a match as both have stdport. Fall to next checks */
+
+ /*
+ * checking the local address is rough. In most cases, one
+ * is not specified and the host port is selected by the stack.
+ *
+ * Assume no match if:
+ * local address is specified and address is not the same
+ * local address is not specified but remote is, or vice versa
+ * (admin using specific host_traddr when it matters).
+ */
+ if (opts->mask & NVMF_OPT_HOST_TRADDR &&
+ ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
+ if (strcmp(opts->host_traddr, ctrl->ctrl.opts->host_traddr))
+ return false;
+ } else if (opts->mask & NVMF_OPT_HOST_TRADDR ||
+ ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
+ return false;
+ /*
+ * if neither controller had an host port specified, assume it's
+ * a match as everything else matched.
+ */
+
+ return true;
+}
+
+/*
+ * Fails a connection request if it matches an existing controller
+ * (association) with the same tuple:
+ * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
+ *
+ * if local address is not specified in the request, it will match an
+ * existing controller with all the other parameters the same and no
+ * local port address specified as well.
+ *
+ * The ports don't need to be compared as they are intrinsically
+ * already matched by the port pointers supplied.
+ */
+static bool
+nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ bool found = false;
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ found = __nvme_rdma_options_match(ctrl, opts);
+ if (found)
+ break;
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ return found;
+}
+
static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
@@ -1894,6 +1926,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
}
}
+ if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
+ ret = -EALREADY;
+ goto out_free_ctrl;
+ }
+
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
0 /* no quirks, we're perfect! */);
if (ret)
@@ -1902,7 +1939,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
INIT_DELAYED_WORK(&ctrl->reconnect_work,
nvme_rdma_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
- INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
@@ -1961,7 +1997,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- kref_get(&ctrl->ctrl.kref);
+ nvme_get_ctrl(&ctrl->ctrl);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
@@ -2006,7 +2042,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
dev_info(ctrl->ctrl.device,
"Removing ctrl: NQN \"%s\", addr %pISp\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- __nvme_rdma_del_ctrl(ctrl);
+ nvme_delete_ctrl(&ctrl->ctrl);
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index c4a0bf36e752..90dcdc40ac71 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -35,17 +35,14 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- u16 status;
struct nvmet_ns *ns;
u64 host_reads, host_writes, data_units_read, data_units_written;
- status = NVME_SC_SUCCESS;
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
if (!ns) {
- status = NVME_SC_INVALID_NS;
pr_err("nvmet : Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
- goto out;
+ return NVME_SC_INVALID_NS;
}
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
@@ -58,20 +55,18 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
nvmet_put_namespace(ns);
-out:
- return status;
+
+ return NVME_SC_SUCCESS;
}
static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- u16 status;
u64 host_reads = 0, host_writes = 0;
u64 data_units_read = 0, data_units_written = 0;
struct nvmet_ns *ns;
struct nvmet_ctrl *ctrl;
- status = NVME_SC_SUCCESS;
ctrl = req->sq->ctrl;
rcu_read_lock();
@@ -91,7 +86,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
- return status;
+ return NVME_SC_SUCCESS;
}
static u16 nvmet_get_smart_log(struct nvmet_req *req,
@@ -144,10 +139,8 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
}
smart_log = buf;
status = nvmet_get_smart_log(req, smart_log);
- if (status) {
- memset(buf, '\0', data_len);
+ if (status)
goto err;
- }
break;
case NVME_LOG_FW_SLOT:
/*
@@ -300,7 +293,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}
/*
- * nuse = ncap = nsze isn't aways true, but we have no way to find
+ * nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
id->ncap = id->nuse = id->nsze =
@@ -424,7 +417,7 @@ out:
}
/*
- * A "mimimum viable" abort implementation: the command is mandatory in the
+ * A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
* do a useful abort, so don't bother even with waiting for the command
* to be exectuted and return immediately telling the command to abort
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index b6aeb1d70951..e6b2d2af81b6 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -20,8 +20,8 @@
#include "nvmet.h"
-static struct config_item_type nvmet_host_type;
-static struct config_item_type nvmet_subsys_type;
+static const struct config_item_type nvmet_host_type;
+static const struct config_item_type nvmet_subsys_type;
/*
* nvmet_port Generic ConfigFS definitions.
@@ -425,7 +425,7 @@ static struct configfs_item_operations nvmet_ns_item_ops = {
.release = nvmet_ns_release,
};
-static struct config_item_type nvmet_ns_type = {
+static const struct config_item_type nvmet_ns_type = {
.ct_item_ops = &nvmet_ns_item_ops,
.ct_attrs = nvmet_ns_attrs,
.ct_owner = THIS_MODULE,
@@ -464,7 +464,7 @@ static struct configfs_group_operations nvmet_namespaces_group_ops = {
.make_group = nvmet_ns_make,
};
-static struct config_item_type nvmet_namespaces_type = {
+static const struct config_item_type nvmet_namespaces_type = {
.ct_group_ops = &nvmet_namespaces_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -540,7 +540,7 @@ static struct configfs_item_operations nvmet_port_subsys_item_ops = {
.drop_link = nvmet_port_subsys_drop_link,
};
-static struct config_item_type nvmet_port_subsys_type = {
+static const struct config_item_type nvmet_port_subsys_type = {
.ct_item_ops = &nvmet_port_subsys_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -613,7 +613,7 @@ static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
.drop_link = nvmet_allowed_hosts_drop_link,
};
-static struct config_item_type nvmet_allowed_hosts_type = {
+static const struct config_item_type nvmet_allowed_hosts_type = {
.ct_item_ops = &nvmet_allowed_hosts_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -729,7 +729,7 @@ static struct configfs_item_operations nvmet_subsys_item_ops = {
.release = nvmet_subsys_release,
};
-static struct config_item_type nvmet_subsys_type = {
+static const struct config_item_type nvmet_subsys_type = {
.ct_item_ops = &nvmet_subsys_item_ops,
.ct_attrs = nvmet_subsys_attrs,
.ct_owner = THIS_MODULE,
@@ -767,7 +767,7 @@ static struct configfs_group_operations nvmet_subsystems_group_ops = {
.make_group = nvmet_subsys_make,
};
-static struct config_item_type nvmet_subsystems_type = {
+static const struct config_item_type nvmet_subsystems_type = {
.ct_group_ops = &nvmet_subsystems_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -827,7 +827,7 @@ static struct configfs_item_operations nvmet_referral_item_ops = {
.release = nvmet_referral_release,
};
-static struct config_item_type nvmet_referral_type = {
+static const struct config_item_type nvmet_referral_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = nvmet_referral_attrs,
.ct_item_ops = &nvmet_referral_item_ops,
@@ -852,7 +852,7 @@ static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
};
-static struct config_item_type nvmet_referrals_type = {
+static const struct config_item_type nvmet_referrals_type = {
.ct_owner = THIS_MODULE,
.ct_group_ops = &nvmet_referral_group_ops,
};
@@ -880,7 +880,7 @@ static struct configfs_item_operations nvmet_port_item_ops = {
.release = nvmet_port_release,
};
-static struct config_item_type nvmet_port_type = {
+static const struct config_item_type nvmet_port_type = {
.ct_attrs = nvmet_port_attrs,
.ct_item_ops = &nvmet_port_item_ops,
.ct_owner = THIS_MODULE,
@@ -921,7 +921,7 @@ static struct configfs_group_operations nvmet_ports_group_ops = {
.make_group = nvmet_ports_make,
};
-static struct config_item_type nvmet_ports_type = {
+static const struct config_item_type nvmet_ports_type = {
.ct_group_ops = &nvmet_ports_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -940,7 +940,7 @@ static struct configfs_item_operations nvmet_host_item_ops = {
.release = nvmet_host_release,
};
-static struct config_item_type nvmet_host_type = {
+static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -963,14 +963,14 @@ static struct configfs_group_operations nvmet_hosts_group_ops = {
.make_group = nvmet_hosts_make_group,
};
-static struct config_item_type nvmet_hosts_type = {
+static const struct config_item_type nvmet_hosts_type = {
.ct_group_ops = &nvmet_hosts_group_ops,
.ct_owner = THIS_MODULE,
};
static struct config_group nvmet_hosts_group;
-static struct config_item_type nvmet_root_type = {
+static const struct config_item_type nvmet_root_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 645ba7eee35d..b54748ad5f48 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -57,6 +57,17 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
return 0;
}
+static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
+{
+ struct nvmet_ns *ns;
+
+ if (list_empty(&subsys->namespaces))
+ return 0;
+
+ ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
+ return ns->nsid;
+}
+
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
{
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
@@ -334,6 +345,8 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
ns->enabled = false;
list_del_rcu(&ns->dev_link);
+ if (ns->nsid == subsys->max_nsid)
+ subsys->max_nsid = nvmet_max_nsid(subsys);
mutex_unlock(&subsys->lock);
/*
@@ -497,6 +510,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->ops = ops;
req->sg = NULL;
req->sg_cnt = 0;
+ req->transfer_len = 0;
req->rsp->status = 0;
/* no support for fused commands yet */
@@ -546,6 +560,15 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+void nvmet_req_execute(struct nvmet_req *req)
+{
+ if (unlikely(req->data_len != req->transfer_len))
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ else
+ req->execute(req);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_execute);
+
static inline bool nvmet_cc_en(u32 cc)
{
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 58e010bdda3e..739b8feadc7d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -76,7 +76,6 @@ struct nvmet_fc_fcp_iod {
dma_addr_t rspdma;
struct scatterlist *data_sg;
int data_sg_cnt;
- u32 total_length;
u32 offset;
enum nvmet_fcp_datadir io_dir;
bool active;
@@ -150,6 +149,7 @@ struct nvmet_fc_tgt_assoc {
struct list_head a_list;
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
+ struct work_struct del_work;
};
@@ -232,6 +232,7 @@ static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
+static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -802,6 +803,16 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
return NULL;
}
+static void
+nvmet_fc_delete_assoc(struct work_struct *work)
+{
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
{
@@ -826,6 +837,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
while (needrandom) {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
@@ -1118,8 +1130,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
+ schedule_work(&assoc->del_work);
return;
}
@@ -1688,7 +1699,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
u32 page_len, length;
int i = 0;
- length = fod->total_length;
+ length = fod->req.transfer_len;
nent = DIV_ROUND_UP(length, PAGE_SIZE);
sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg)
@@ -1777,7 +1788,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
u32 rsn, rspcnt, xfr_length;
if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
- xfr_length = fod->total_length;
+ xfr_length = fod->req.transfer_len;
else
xfr_length = fod->offset;
@@ -1803,7 +1814,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
if (!(rspcnt % fod->queue->ersp_ratio) ||
sqe->opcode == nvme_fabrics_command ||
- xfr_length != fod->total_length ||
+ xfr_length != fod->req.transfer_len ||
(le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
@@ -1880,7 +1891,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
- (fod->total_length - fod->offset));
+ (fod->req.transfer_len - fod->offset));
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
@@ -1894,7 +1905,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
* combined xfr with response.
*/
if ((op == NVMET_FCOP_READDATA) &&
- ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
+ ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
(tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
fcpreq->op = NVMET_FCOP_READDATA_RSP;
nvmet_fc_prep_fcp_rsp(tgtport, fod);
@@ -1974,7 +1985,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
fod->offset += fcpreq->transferred_length;
- if (fod->offset != fod->total_length) {
+ if (fod->offset != fod->req.transfer_len) {
spin_lock_irqsave(&fod->flock, flags);
fod->writedataactive = true;
spin_unlock_irqrestore(&fod->flock, flags);
@@ -1986,9 +1997,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
/* data transfer complete, resume with nvmet layer */
-
- fod->req.execute(&fod->req);
-
+ nvmet_req_execute(&fod->req);
break;
case NVMET_FCOP_READDATA:
@@ -2011,7 +2020,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
fod->offset += fcpreq->transferred_length;
- if (fod->offset != fod->total_length) {
+ if (fod->offset != fod->req.transfer_len) {
/* transfer the next chunk */
nvmet_fc_transfer_fcp_data(tgtport, fod,
NVMET_FCOP_READDATA);
@@ -2148,7 +2157,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
- fod->total_length = be32_to_cpu(cmdiu->data_len);
+ fod->req.transfer_len = be32_to_cpu(cmdiu->data_len);
if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
fod->io_dir = NVMET_FCP_WRITE;
if (!nvme_is_write(&cmdiu->sqe))
@@ -2159,7 +2168,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
goto transport_error;
} else {
fod->io_dir = NVMET_FCP_NODATA;
- if (fod->total_length)
+ if (fod->req.transfer_len)
goto transport_error;
}
@@ -2167,9 +2176,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.rsp = &fod->rspiubuf.cqe;
fod->req.port = fod->queue->port;
- /* ensure nvmet handlers will set cmd handler callback */
- fod->req.execute = NULL;
-
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2189,7 +2195,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
/* keep a running counter of tail position */
atomic_inc(&fod->queue->sqtail);
- if (fod->total_length) {
+ if (fod->req.transfer_len) {
ret = nvmet_fc_alloc_tgt_pgs(fod);
if (ret) {
nvmet_req_complete(&fod->req, ret);
@@ -2212,9 +2218,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
* can invoke the nvmet_layer now. If read data, cmd completion will
* push the data
*/
-
- fod->req.execute(&fod->req);
-
+ nvmet_req_execute(&fod->req);
return;
transport_error:
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 0d4c23dc4532..0a4372a016f2 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -33,18 +33,11 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
req->ns->blksize_shift;
}
-static void nvmet_inline_bio_init(struct nvmet_req *req)
-{
- struct bio *bio = &req->inline_bio;
-
- bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC);
-}
-
static void nvmet_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
+ struct bio *bio = &req->inline_bio;
struct scatterlist *sg;
- struct bio *bio;
sector_t sector;
blk_qc_t cookie;
int op, op_flags = 0, i;
@@ -66,8 +59,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- nvmet_inline_bio_init(req);
- bio = &req->inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
@@ -94,16 +86,14 @@ static void nvmet_execute_rw(struct nvmet_req *req)
cookie = submit_bio(bio);
- blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
+ blk_poll(bdev_get_queue(req->ns->bdev), cookie);
}
static void nvmet_execute_flush(struct nvmet_req *req)
{
- struct bio *bio;
-
- nvmet_inline_bio_init(req);
- bio = &req->inline_bio;
+ struct bio *bio = &req->inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 92628c432926..96d390416789 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -23,14 +23,6 @@
#define NVME_LOOP_MAX_SEGMENTS 256
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_LOOP_NR_AEN_COMMANDS 1
-#define NVME_LOOP_AQ_BLKMQ_DEPTH \
- (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
-
struct nvme_loop_iod {
struct nvme_request nvme_req;
struct nvme_command cmd;
@@ -53,7 +45,6 @@ struct nvme_loop_ctrl {
struct nvme_ctrl ctrl;
struct nvmet_ctrl *target_ctrl;
- struct work_struct delete_work;
};
static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@@ -113,7 +104,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
* for them but rather special case them here.
*/
if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
+ cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else {
@@ -136,7 +127,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
struct nvme_loop_iod *iod =
container_of(work, struct nvme_loop_iod, work);
- iod->req.execute(&iod->req);
+ nvmet_req_execute(&iod->req);
}
static enum blk_eh_timer_return
@@ -185,6 +176,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+ iod->req.transfer_len = blk_rq_bytes(req);
}
blk_mq_start_request(req);
@@ -193,7 +185,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
struct nvme_loop_queue *queue = &ctrl->queues[0];
@@ -201,7 +193,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
memset(&iod->cmd, 0, sizeof(iod->cmd));
iod->cmd.common.opcode = nvme_admin_async_event;
- iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
@@ -357,7 +349,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
@@ -365,6 +357,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+ ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
@@ -438,41 +431,9 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_loop_destroy_admin_queue(ctrl);
}
-static void nvme_loop_del_ctrl_work(struct work_struct *work)
+static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
{
- struct nvme_loop_ctrl *ctrl = container_of(work,
- struct nvme_loop_ctrl, delete_work);
-
- nvme_stop_ctrl(&ctrl->ctrl);
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_loop_shutdown_ctrl(ctrl);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
-}
-
-static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
-{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return -EBUSY;
-
- if (!queue_work(nvme_wq, &ctrl->delete_work))
- return -EBUSY;
-
- return 0;
-}
-
-static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
-{
- struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
- int ret;
-
- ret = __nvme_loop_del_ctrl(ctrl);
- if (ret)
- return ret;
-
- flush_work(&ctrl->delete_work);
-
- return 0;
+ nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
}
static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
@@ -482,7 +443,7 @@ static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
mutex_lock(&nvme_loop_ctrl_mutex);
list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
if (ctrl->ctrl.cntlid == nctrl->cntlid)
- __nvme_loop_del_ctrl(ctrl);
+ nvme_delete_ctrl(&ctrl->ctrl);
}
mutex_unlock(&nvme_loop_ctrl_mutex);
}
@@ -538,7 +499,7 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
.reg_write32 = nvmf_reg_write32,
.free_ctrl = nvme_loop_free_ctrl,
.submit_async_event = nvme_loop_submit_async_event,
- .delete_ctrl = nvme_loop_del_ctrl,
+ .delete_ctrl = nvme_loop_delete_ctrl_host,
};
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -600,7 +561,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
- INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
@@ -641,7 +601,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device,
"new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
- kref_get(&ctrl->ctrl.kref);
+ nvme_get_ctrl(&ctrl->ctrl);
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
@@ -730,7 +690,7 @@ static void __exit nvme_loop_cleanup_module(void)
mutex_lock(&nvme_loop_ctrl_mutex);
list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
- __nvme_loop_del_ctrl(ctrl);
+ nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
flush_workqueue(nvme_wq);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 87e429bfcd8a..417f6c0331cc 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -223,7 +223,10 @@ struct nvmet_req {
struct bio inline_bio;
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
int sg_cnt;
+ /* data length as parsed from the command: */
size_t data_len;
+ /* data length as parsed from the SGL descriptor: */
+ size_t transfer_len;
struct nvmet_port *port;
@@ -266,6 +269,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
+void nvmet_req_execute(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
@@ -314,7 +318,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
#define NVMET_QUEUE_SIZE 1024
-#define NVMET_NR_QUEUES 64
+#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
#define NVMET_KAS 10
#define NVMET_DISC_KATO 120
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 76d2bb793afe..49912909c298 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -148,14 +148,14 @@ static inline u32 get_unaligned_le24(const u8 *p)
static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
{
return nvme_is_write(rsp->req.cmd) &&
- rsp->req.data_len &&
+ rsp->req.transfer_len &&
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}
static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
{
return !nvme_is_write(rsp->req.cmd) &&
- rsp->req.data_len &&
+ rsp->req.transfer_len &&
!rsp->req.rsp->status &&
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}
@@ -577,7 +577,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- rsp->req.execute(&rsp->req);
+ nvmet_req_execute(&rsp->req);
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -609,6 +609,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
nvmet_rdma_use_inline_sg(rsp, len, off);
rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
+ rsp->req.transfer_len += len;
return 0;
}
@@ -636,6 +637,7 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
nvmet_data_dir(&rsp->req));
if (ret < 0)
return NVME_SC_INTERNAL;
+ rsp->req.transfer_len += len;
rsp->n_rdma += ret;
if (invalidate) {
@@ -693,7 +695,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
- rsp->req.execute(&rsp->req);
+ nvmet_req_execute(&rsp->req);
}
return true;
@@ -1512,15 +1514,17 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = {
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{
- struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_queue *queue, *tmp;
/* Device is being removed, delete all queues using this device */
mutex_lock(&nvmet_rdma_queue_mutex);
- list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
if (queue->dev->device != ib_device)
continue;
pr_info("Removing queue %d\n", queue->idx);
+ list_del_init(&queue->queue_list);
__nvmet_rdma_queue_disconnect(queue);
}
mutex_unlock(&nvmet_rdma_queue_mutex);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 101ced4c84be..ff505af064ba 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -123,6 +123,17 @@ config NVMEM_SUNXI_SID
This driver can also be built as a module. If so, the module
will be called nvmem_sunxi_sid.
+config UNIPHIER_EFUSE
+ tristate "UniPhier SoCs eFuse support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of UniPhier SoC
+ from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-uniphier-efuse.
+
config NVMEM_VF610_OCOTP
tristate "VF610 SoC OCOTP support"
depends on SOC_VF610 || COMPILE_TEST
@@ -135,13 +146,33 @@ config NVMEM_VF610_OCOTP
be called nvmem-vf610-ocotp.
config MESON_EFUSE
- tristate "Amlogic eFuse Support"
+ tristate "Amlogic Meson GX eFuse Support"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
help
This is a driver to retrieve specific values from the eFuse found on
- the Amlogic Meson SoCs.
+ the Amlogic Meson GX SoCs.
This driver can also be built as a module. If so, the module
will be called nvmem_meson_efuse.
+config MESON_MX_EFUSE
+ tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_mx_efuse.
+
+config NVMEM_SNVS_LPGPR
+ tristate "Support for Low Power General Purpose Register"
+ depends on SOC_IMX6 || COMPILE_TEST
+ help
+ This is a driver for Low Power General Purpose Register (LPGPR) available on
+ i.MX6 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-snvs-lpgpr.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 6f7a77fb3ee7..e54dcfa6565a 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -27,7 +27,13 @@ obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
+nvmem-uniphier-efuse-y := uniphier-efuse.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
nvmem_meson_efuse-y := meson-efuse.o
+obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
+nvmem_meson_mx_efuse-y := meson-mx-efuse.o
+obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
+nvmem_snvs_lpgpr-y := snvs_lpgpr.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 3c56e3b2bd65..5e9e324427f9 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -232,7 +232,6 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
- .owner = THIS_MODULE,
.reg_read = bcm_otpc_read,
.reg_write = bcm_otpc_write,
};
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d12e5de78e70..5a5cefd12153 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -462,6 +462,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->id = rval;
nvmem->owner = config->owner;
+ if (!nvmem->owner && config->dev->driver)
+ nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride;
nvmem->word_size = config->word_size;
nvmem->size = config->size;
@@ -615,7 +617,7 @@ static struct nvmem_device *nvmem_find(const char *name)
return to_nvmem_device(d);
}
-#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_device_get() - Get nvmem device from a given id
*
@@ -753,7 +755,7 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
return cell;
}
-#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
@@ -946,8 +948,7 @@ void nvmem_cell_put(struct nvmem_cell *cell)
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
-static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
- void *buf)
+static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
{
u8 *p, *b;
int i, bit_offset = cell->bit_offset;
@@ -1028,8 +1029,8 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
}
EXPORT_SYMBOL_GPL(nvmem_cell_read);
-static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
- u8 *_buf, int len)
+static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
+ u8 *_buf, int len)
{
struct nvmem_device *nvmem = cell->nvmem;
int i, rc, nbits, bit_offset = cell->bit_offset;
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
index 52ff65e0673f..52cfe91d9762 100644
--- a/drivers/nvmem/imx-iim.c
+++ b/drivers/nvmem/imx-iim.c
@@ -34,7 +34,6 @@ struct imx_iim_drvdata {
struct iim_priv {
void __iomem *base;
struct clk *clk;
- struct nvmem_config nvmem;
};
static int imx_iim_read(void *context, unsigned int offset,
@@ -108,7 +107,7 @@ static int imx_iim_probe(struct platform_device *pdev)
struct resource *res;
struct iim_priv *iim;
struct nvmem_device *nvmem;
- struct nvmem_config *cfg;
+ struct nvmem_config cfg = {};
const struct imx_iim_drvdata *drvdata = NULL;
iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL);
@@ -130,19 +129,16 @@ static int imx_iim_probe(struct platform_device *pdev)
if (IS_ERR(iim->clk))
return PTR_ERR(iim->clk);
- cfg = &iim->nvmem;
+ cfg.name = "imx-iim",
+ cfg.read_only = true,
+ cfg.word_size = 1,
+ cfg.stride = 1,
+ cfg.reg_read = imx_iim_read,
+ cfg.dev = dev;
+ cfg.size = drvdata->nregs;
+ cfg.priv = iim;
- cfg->name = "imx-iim",
- cfg->read_only = true,
- cfg->word_size = 1,
- cfg->stride = 1,
- cfg->owner = THIS_MODULE,
- cfg->reg_read = imx_iim_read,
- cfg->dev = dev;
- cfg->size = drvdata->nregs;
- cfg->priv = iim;
-
- nvmem = nvmem_register(cfg);
+ nvmem = nvmem_register(&cfg);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 193ca8fd350a..d7ba351a70c9 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -40,14 +40,19 @@
#define IMX_OCOTP_ADDR_CTRL_SET 0x0004
#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008
#define IMX_OCOTP_ADDR_TIMING 0x0010
-#define IMX_OCOTP_ADDR_DATA 0x0020
+#define IMX_OCOTP_ADDR_DATA0 0x0020
+#define IMX_OCOTP_ADDR_DATA1 0x0030
+#define IMX_OCOTP_ADDR_DATA2 0x0040
+#define IMX_OCOTP_ADDR_DATA3 0x0050
#define IMX_OCOTP_BM_CTRL_ADDR 0x0000007F
#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
-#define DEF_RELAX 20 /* > 16.5ns */
+#define DEF_RELAX 20 /* > 16.5ns */
+#define DEF_FSOURCE 1001 /* > 1000 ns */
+#define DEF_STROBE_PROG 10000 /* IPG clocks */
#define IMX_OCOTP_WR_UNLOCK 0x3E770000
#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA
@@ -57,10 +62,16 @@ struct ocotp_priv {
struct device *dev;
struct clk *clk;
void __iomem *base;
- unsigned int nregs;
+ const struct ocotp_params *params;
struct nvmem_config *config;
};
+struct ocotp_params {
+ unsigned int nregs;
+ unsigned int bank_address_words;
+ void (*set_timing)(struct ocotp_priv *priv);
+};
+
static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
{
int count;
@@ -121,8 +132,8 @@ static int imx_ocotp_read(void *context, unsigned int offset,
index = offset >> 2;
count = bytes >> 2;
- if (count > (priv->nregs - index))
- count = priv->nregs - index;
+ if (count > (priv->params->nregs - index))
+ count = priv->params->nregs - index;
mutex_lock(&ocotp_mutex);
@@ -160,6 +171,52 @@ read_end:
return ret;
}
+static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate = 0;
+ unsigned long strobe_read, relax, strobe_prog;
+ u32 timing = 0;
+
+ /* 47.3.1.3.1
+ * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
+ * fields with timing values to match the current frequency of the
+ * ipg_clk. OTP writes will work at maximum bus frequencies as long
+ * as the HW_OCOTP_TIMING parameters are set correctly.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+
+ relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
+ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
+ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (relax << 12) & 0x0000F000;
+ timing |= (strobe_read << 16) & 0x003F0000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
+static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate = 0;
+ u64 fsource, strobe_prog;
+ u32 timing = 0;
+
+ /* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
+ * 6.4.3.3
+ */
+ clk_rate = clk_get_rate(priv->clk);
+ fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE,
+ NSEC_PER_SEC) + 1;
+ strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG,
+ NSEC_PER_SEC) + 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (fsource << 12) & 0x000FF000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
static int imx_ocotp_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
@@ -167,11 +224,9 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
u32 *buf = val;
int ret;
- unsigned long clk_rate = 0;
- unsigned long strobe_read, relax, strobe_prog;
- u32 timing = 0;
u32 ctrl;
u8 waddr;
+ u8 word = 0;
/* allow only writing one complete OTP word at a time */
if ((bytes != priv->config->word_size) ||
@@ -187,23 +242,8 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
return ret;
}
- /* 47.3.1.3.1
- * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
- * fields with timing values to match the current frequency of the
- * ipg_clk. OTP writes will work at maximum bus frequencies as long
- * as the HW_OCOTP_TIMING parameters are set correctly.
- */
- clk_rate = clk_get_rate(priv->clk);
-
- relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
- strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
- strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
-
- timing = strobe_prog & 0x00000FFF;
- timing |= (relax << 12) & 0x0000F000;
- timing |= (strobe_read << 16) & 0x003F0000;
-
- writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+ /* Setup the write timing values */
+ priv->params->set_timing(priv);
/* 47.3.1.3.2
* Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear.
@@ -224,8 +264,23 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* description. Both the unlock code and address can be written in the
* same operation.
*/
- /* OTP write/read address specifies one of 128 word address locations */
- waddr = offset / 4;
+ if (priv->params->bank_address_words != 0) {
+ /*
+ * In banked/i.MX7 mode the OTP register bank goes into waddr
+ * see i.MX 7Solo Applications Processor Reference Manual, Rev.
+ * 0.1 section 6.4.3.1
+ */
+ offset = offset / priv->config->word_size;
+ waddr = offset / priv->params->bank_address_words;
+ word = offset & (priv->params->bank_address_words - 1);
+ } else {
+ /*
+ * Non-banked i.MX6 mode.
+ * OTP write/read address specifies one of 128 word address
+ * locations
+ */
+ waddr = offset / 4;
+ }
ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
@@ -251,8 +306,43 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* shift right (with zero fill). This shifting is required to program
* the OTP serially. During the write operation, HW_OCOTP_DATA cannot be
* modified.
+ * Note: on i.MX7 there are four data fields to write for banked write
+ * with the fuse blowing operation only taking place after data0
+ * has been written. This is why data0 must always be the last
+ * register written.
*/
- writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA);
+ if (priv->params->bank_address_words != 0) {
+ /* Banked/i.MX7 mode */
+ switch (word) {
+ case 0:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 1:
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 2:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 3:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ }
+ } else {
+ /* Non-banked i.MX6 mode */
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ }
/* 47.4.1.4.5
* Once complete, the controller will clear BUSY. A write request to a
@@ -303,17 +393,46 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
- .owner = THIS_MODULE,
.reg_read = imx_ocotp_read,
.reg_write = imx_ocotp_write,
};
+static const struct ocotp_params imx6q_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6sl_params = {
+ .nregs = 64,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6sx_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6ul_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx7d_params = {
+ .nregs = 64,
+ .bank_address_words = 4,
+ .set_timing = imx_ocotp_set_imx7_timing,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
- { .compatible = "fsl,imx6q-ocotp", (void *)128 },
- { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
- { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
- { .compatible = "fsl,imx6ul-ocotp", (void *)128 },
- { .compatible = "fsl,imx7d-ocotp", (void *)64 },
+ { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
+ { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
+ { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
+ { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
+ { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
@@ -342,8 +461,8 @@ static int imx_ocotp_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
of_id = of_match_device(imx_ocotp_dt_ids, dev);
- priv->nregs = (unsigned long)of_id->data;
- imx_ocotp_nvmem_config.size = 4 * priv->nregs;
+ priv->params = of_device_get_match_data(&pdev->dev);
+ imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
priv->config = &imx_ocotp_nvmem_config;
@@ -375,5 +494,5 @@ static struct platform_driver imx_ocotp_driver = {
module_platform_driver(imx_ocotp_driver);
MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
-MODULE_DESCRIPTION("i.MX6 OCOTP fuse box driver");
+MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index 6c7e2c424a4e..b1af966206a6 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -159,7 +159,6 @@ static struct nvmem_config lpc18xx_nvmem_config = {
.word_size = 4,
.reg_read = lpc18xx_eeprom_read,
.reg_write = lpc18xx_eeprom_gather_write,
- .owner = THIS_MODULE,
};
static int lpc18xx_eeprom_probe(struct platform_device *pdev)
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
index be8d07403ffc..95268db155e9 100644
--- a/drivers/nvmem/lpc18xx_otp.c
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -64,7 +64,6 @@ static struct nvmem_config lpc18xx_otp_nvmem_config = {
.read_only = true,
.word_size = LPC18XX_OTP_WORD_SIZE,
.stride = LPC18XX_OTP_WORD_SIZE,
- .owner = THIS_MODULE,
.reg_read = lpc18xx_otp_read,
};
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index 70bfc9839bb2..a43c68f90937 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -1,5 +1,5 @@
/*
- * Amlogic eFuse Driver
+ * Amlogic Meson GX eFuse Driver
*
* Copyright (c) 2016 Endless Computers, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
@@ -37,7 +37,6 @@ static int meson_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "meson-efuse",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
@@ -89,5 +88,5 @@ static struct platform_driver meson_efuse_driver = {
module_platform_driver(meson_efuse_driver);
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
-MODULE_DESCRIPTION("Amlogic Meson NVMEM driver");
+MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
new file mode 100644
index 000000000000..a346b4923550
--- /dev/null
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -0,0 +1,265 @@
+/*
+ * Amlogic Meson6, Meson8 and Meson8b eFuse Driver
+ *
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#define MESON_MX_EFUSE_CNTL1 0x04
+#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24)
+#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0)
+
+#define MESON_MX_EFUSE_CNTL2 0x08
+
+#define MESON_MX_EFUSE_CNTL4 0x10
+#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10)
+
+struct meson_mx_efuse_platform_data {
+ const char *name;
+ unsigned int word_size;
+};
+
+struct meson_mx_efuse {
+ void __iomem *base;
+ struct clk *core_clk;
+ struct nvmem_device *nvmem;
+ struct nvmem_config config;
+};
+
+static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg,
+ u32 mask, u32 set)
+{
+ u32 data;
+
+ data = readl(efuse->base + reg);
+ data &= ~mask;
+ data |= (set & mask);
+
+ writel(data, efuse->base + reg);
+}
+
+static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse)
+{
+ int err;
+
+ err = clk_prepare_enable(efuse->core_clk);
+ if (err)
+ return err;
+
+ /* power up the efuse */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0);
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4,
+ MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0);
+
+ return 0;
+}
+
+static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse)
+{
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE);
+
+ clk_disable_unprepare(efuse->core_clk);
+}
+
+static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse,
+ unsigned int addr, u32 *value)
+{
+ int err;
+ u32 regval;
+
+ /* write the address to read */
+ regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval);
+
+ /* inform the hardware that we changed the address */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0);
+
+ /* start the read process */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0);
+
+ /*
+ * perform a dummy read to ensure that the HW has the RD_BUSY bit set
+ * when polling for the status below.
+ */
+ readl(efuse->base + MESON_MX_EFUSE_CNTL1);
+
+ err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1,
+ regval,
+ (!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)),
+ 1, 1000);
+ if (err) {
+ dev_err(efuse->config.dev,
+ "Timeout while reading efuse address %u\n", addr);
+ return err;
+ }
+
+ *value = readl(efuse->base + MESON_MX_EFUSE_CNTL2);
+
+ return 0;
+}
+
+static int meson_mx_efuse_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct meson_mx_efuse *efuse = context;
+ u32 tmp;
+ int err, i, addr;
+
+ err = meson_mx_efuse_hw_enable(efuse);
+ if (err)
+ return err;
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
+
+ for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
+ addr = i / efuse->config.word_size;
+
+ err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
+ if (err)
+ break;
+
+ memcpy(buf + i, &tmp, efuse->config.word_size);
+ }
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0);
+
+ meson_mx_efuse_hw_disable(efuse);
+
+ return err;
+}
+
+static const struct meson_mx_efuse_platform_data meson6_efuse_data = {
+ .name = "meson6-efuse",
+ .word_size = 1,
+};
+
+static const struct meson_mx_efuse_platform_data meson8_efuse_data = {
+ .name = "meson8-efuse",
+ .word_size = 4,
+};
+
+static const struct meson_mx_efuse_platform_data meson8b_efuse_data = {
+ .name = "meson8b-efuse",
+ .word_size = 4,
+};
+
+static const struct of_device_id meson_mx_efuse_match[] = {
+ { .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data },
+ { .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data },
+ { .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_mx_efuse_match);
+
+static int meson_mx_efuse_probe(struct platform_device *pdev)
+{
+ const struct meson_mx_efuse_platform_data *drvdata;
+ struct meson_mx_efuse *efuse;
+ struct resource *res;
+
+ drvdata = of_device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ efuse->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
+
+ efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name,
+ GFP_KERNEL);
+ efuse->config.owner = THIS_MODULE;
+ efuse->config.dev = &pdev->dev;
+ efuse->config.priv = efuse;
+ efuse->config.stride = drvdata->word_size;
+ efuse->config.word_size = drvdata->word_size;
+ efuse->config.size = SZ_512;
+ efuse->config.read_only = true;
+ efuse->config.reg_read = meson_mx_efuse_read;
+
+ efuse->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(efuse->core_clk)) {
+ dev_err(&pdev->dev, "Failed to get core clock\n");
+ return PTR_ERR(efuse->core_clk);
+ }
+
+ efuse->nvmem = nvmem_register(&efuse->config);
+ if (IS_ERR(efuse->nvmem))
+ return PTR_ERR(efuse->nvmem);
+
+ platform_set_drvdata(pdev, efuse);
+
+ return 0;
+}
+
+static int meson_mx_efuse_remove(struct platform_device *pdev)
+{
+ struct meson_mx_efuse *efuse = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(efuse->nvmem);
+}
+
+static struct platform_driver meson_mx_efuse_driver = {
+ .probe = meson_mx_efuse_probe,
+ .remove = meson_mx_efuse_remove,
+ .driver = {
+ .name = "meson-mx-efuse",
+ .of_match_table = meson_mx_efuse_match,
+ },
+};
+
+module_platform_driver(meson_mx_efuse_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 32fd572e18c5..9ee3479cfc7b 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -18,15 +18,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+struct mtk_efuse_priv {
+ void __iomem *base;
+};
+
static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
- *val++ = readl(base + reg + (i++ * 4));
+ *val++ = readl(priv->base + reg + (i++ * 4));
return 0;
}
@@ -34,12 +38,12 @@ static int mtk_reg_read(void *context,
static int mtk_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
- writel(*val++, base + reg + (i++ * 4));
+ writel(*val++, priv->base + reg + (i++ * 4));
return 0;
}
@@ -49,27 +53,26 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- struct nvmem_config *econfig;
- void __iomem *base;
+ struct nvmem_config econfig = {};
+ struct mtk_efuse_priv *priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
- if (!econfig)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- econfig->stride = 4;
- econfig->word_size = 4;
- econfig->reg_read = mtk_reg_read;
- econfig->reg_write = mtk_reg_write;
- econfig->size = resource_size(res);
- econfig->priv = base;
- econfig->dev = dev;
- econfig->owner = THIS_MODULE;
- nvmem = nvmem_register(econfig);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ econfig.stride = 4;
+ econfig.word_size = 4;
+ econfig.reg_read = mtk_reg_read;
+ econfig.reg_write = mtk_reg_write;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index d26dd03cec80..7018e2ef5714 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -118,7 +118,6 @@ static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
.stride = 16,
.word_size = 4,
- .owner = THIS_MODULE,
.reg_read = mxs_ocotp_read,
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 2bdb6c389328..cb3b48b47d64 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -17,15 +17,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+struct qfprom_priv {
+ void __iomem *base;
+};
+
static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
- *val++ = readb(base + reg + i++);
+ *val++ = readb(priv->base + reg + i++);
return 0;
}
@@ -33,12 +37,12 @@ static int qfprom_reg_read(void *context,
static int qfprom_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
- writeb(*val++, base + reg + i++);
+ writeb(*val++, priv->base + reg + i++);
return 0;
}
@@ -52,7 +56,6 @@ static int qfprom_remove(struct platform_device *pdev)
static struct nvmem_config econfig = {
.name = "qfprom",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
@@ -64,16 +67,20 @@ static int qfprom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- void __iomem *base;
+ struct qfprom_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
econfig.size = resource_size(res);
econfig.dev = dev;
- econfig.priv = base;
+ econfig.priv = priv;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 63e3eb55f3ac..123de77ca5d6 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -149,7 +149,6 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
@@ -178,6 +177,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
+ .compatible = "rockchip,rk3368-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
.compatible = "rockchip,rk3399-efuse",
.data = (void *)&rockchip_rk3399_efuse_read,
},
diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c
new file mode 100644
index 000000000000..e5c2a4a17f03
--- /dev/null
+++ b/drivers/nvmem/snvs_lpgpr.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define IMX6Q_SNVS_HPLR 0x00
+#define IMX6Q_GPR_SL BIT(5)
+#define IMX6Q_SNVS_LPLR 0x34
+#define IMX6Q_GPR_HL BIT(5)
+#define IMX6Q_SNVS_LPGPR 0x68
+
+struct snvs_lpgpr_cfg {
+ int offset;
+ int offset_hplr;
+ int offset_lplr;
+};
+
+struct snvs_lpgpr_priv {
+ struct device_d *dev;
+ struct regmap *regmap;
+ struct nvmem_config cfg;
+ const struct snvs_lpgpr_cfg *dcfg;
+};
+
+static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = {
+ .offset = IMX6Q_SNVS_LPGPR,
+ .offset_hplr = IMX6Q_SNVS_HPLR,
+ .offset_lplr = IMX6Q_SNVS_LPLR,
+};
+
+static int snvs_lpgpr_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+ unsigned int lock_reg;
+ int ret;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX6Q_GPR_SL)
+ return -EPERM;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX6Q_GPR_HL)
+ return -EPERM;
+
+ return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val,
+ bytes / 4);
+}
+
+static int snvs_lpgpr_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+
+ return regmap_bulk_read(priv->regmap, dcfg->offset + offset,
+ val, bytes / 4);
+}
+
+static int snvs_lpgpr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *syscon_node;
+ struct snvs_lpgpr_priv *priv;
+ struct nvmem_config *cfg;
+ struct nvmem_device *nvmem;
+ const struct snvs_lpgpr_cfg *dcfg;
+
+ if (!node)
+ return -ENOENT;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+
+ syscon_node = of_get_parent(node);
+ if (!syscon_node)
+ return -ENODEV;
+
+ priv->regmap = syscon_node_to_regmap(syscon_node);
+ of_node_put(syscon_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->dcfg = dcfg;
+
+ cfg = &priv->cfg;
+ cfg->priv = priv;
+ cfg->name = dev_name(dev);
+ cfg->dev = dev;
+ cfg->stride = 4,
+ cfg->word_size = 4,
+ cfg->size = 4,
+ cfg->owner = THIS_MODULE,
+ cfg->reg_read = snvs_lpgpr_read,
+ cfg->reg_write = snvs_lpgpr_write,
+
+ nvmem = nvmem_register(cfg);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int snvs_lpgpr_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id snvs_lpgpr_dt_ids[] = {
+ { .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q },
+ { .compatible = "fsl,imx6ul-snvs-lpgpr",
+ .data = &snvs_lpgpr_cfg_imx6q },
+ { },
+};
+MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids);
+
+static struct platform_driver snvs_lpgpr_driver = {
+ .probe = snvs_lpgpr_probe,
+ .remove = snvs_lpgpr_remove,
+ .driver = {
+ .name = "snvs_lpgpr",
+ .of_match_table = snvs_lpgpr_dt_ids,
+ },
+};
+module_platform_driver(snvs_lpgpr_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 Secure Non-Volatile Storage");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 0d6648be93b8..99bd54d85fcb 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -40,7 +40,6 @@ static struct nvmem_config econfig = {
.read_only = true,
.stride = 4,
.word_size = 1,
- .owner = THIS_MODULE,
};
struct sunxi_sid_cfg {
@@ -199,10 +198,16 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
.need_register_readout = true,
};
+static const struct sunxi_sid_cfg sun50i_a64_cfg = {
+ .value_offset = 0x200,
+ .size = 0x100,
+};
+
static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
+ { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
new file mode 100644
index 000000000000..9d278b4e1dc7
--- /dev/null
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -0,0 +1,97 @@
+/*
+ * UniPhier eFuse driver
+ *
+ * Copyright (C) 2017 Socionext Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct uniphier_efuse_priv {
+ void __iomem *base;
+};
+
+static int uniphier_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ struct uniphier_efuse_priv *priv = context;
+ u32 *val = _val;
+ int offs;
+
+ for (offs = 0; offs < bytes; offs += sizeof(u32))
+ *val++ = readl(priv->base + reg + offs);
+
+ return 0;
+}
+
+static int uniphier_efuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = {};
+ struct uniphier_efuse_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ econfig.stride = 4;
+ econfig.word_size = 4;
+ econfig.read_only = true;
+ econfig.reg_read = uniphier_reg_read;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int uniphier_efuse_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id uniphier_efuse_of_match[] = {
+ { .compatible = "socionext,uniphier-efuse",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match);
+
+static struct platform_driver uniphier_efuse_driver = {
+ .probe = uniphier_efuse_probe,
+ .remove = uniphier_efuse_remove,
+ .driver = {
+ .name = "uniphier-efuse",
+ .of_match_table = uniphier_efuse_of_match,
+ },
+};
+module_platform_driver(uniphier_efuse_driver);
+
+MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
+MODULE_DESCRIPTION("UniPhier eFuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c
index 72e4faabce29..5ae9e002f195 100644
--- a/drivers/nvmem/vf610-ocotp.c
+++ b/drivers/nvmem/vf610-ocotp.c
@@ -206,7 +206,6 @@ static int vf610_ocotp_read(void *context, unsigned int offset,
static struct nvmem_config ocotp_config = {
.name = "ocotp",
- .owner = THIS_MODULE,
.stride = 4,
.word_size = 4,
.reg_read = vf610_ocotp_read,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ba7b034b2b91..ad9a9578f9c4 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -46,10 +46,14 @@ config OF_EARLY_FLATTREE
config OF_PROMTREE
bool
+config OF_KOBJ
+ def_bool SYSFS
+
# Hardly any platforms need this. It is safe to select, but only do so if you
# need it.
config OF_DYNAMIC
bool "Support for dynamic device trees" if OF_UNITTEST
+ select OF_KOBJ
help
On some platforms, the device tree can be manipulated at runtime.
While this option is selected automatically on such platforms, you
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 8f19d342eed8..63a4be62ce19 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y = base.o device.o platform.o property.o
+obj-$(CONFIG_OF_KOBJ) += kobj.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 792722e7d458..fa6cabfc3cb9 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -232,8 +232,8 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-int of_pci_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
+static int parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node, const char *name)
{
const int na = 3, ns = 2;
int rlen;
@@ -242,7 +242,7 @@ int of_pci_range_parser_init(struct of_pci_range_parser *parser,
parser->pna = of_n_addr_cells(node);
parser->np = parser->pna + na + ns;
- parser->range = of_get_property(node, "ranges", &rlen);
+ parser->range = of_get_property(node, name, &rlen);
if (parser->range == NULL)
return -ENOENT;
@@ -250,8 +250,21 @@ int of_pci_range_parser_init(struct of_pci_range_parser *parser,
return 0;
}
+
+int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "ranges");
+}
EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
+int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "dma-ranges");
+}
+EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
+
struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
struct of_pci_range *range)
{
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 63897531cd75..f2e649ff746f 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -95,108 +95,6 @@ int __weak of_node_to_nid(struct device_node *np)
}
#endif
-#ifndef CONFIG_OF_DYNAMIC
-static void of_node_release(struct kobject *kobj)
-{
- /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
-}
-#endif /* CONFIG_OF_DYNAMIC */
-
-struct kobj_type of_node_ktype = {
- .release = of_node_release,
-};
-
-static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t offset, size_t count)
-{
- struct property *pp = container_of(bin_attr, struct property, attr);
- return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
-}
-
-/* always return newly allocated name, caller must free after use */
-static const char *safe_name(struct kobject *kobj, const char *orig_name)
-{
- const char *name = orig_name;
- struct kernfs_node *kn;
- int i = 0;
-
- /* don't be a hero. After 16 tries give up */
- while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
- sysfs_put(kn);
- if (name != orig_name)
- kfree(name);
- name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
- }
-
- if (name == orig_name) {
- name = kstrdup(orig_name, GFP_KERNEL);
- } else {
- pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
- kobject_name(kobj), name);
- }
- return name;
-}
-
-int __of_add_property_sysfs(struct device_node *np, struct property *pp)
-{
- int rc;
-
- /* Important: Don't leak passwords */
- bool secure = strncmp(pp->name, "security-", 9) == 0;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return 0;
-
- if (!of_kset || !of_node_is_attached(np))
- return 0;
-
- sysfs_bin_attr_init(&pp->attr);
- pp->attr.attr.name = safe_name(&np->kobj, pp->name);
- pp->attr.attr.mode = secure ? 0400 : 0444;
- pp->attr.size = secure ? 0 : pp->length;
- pp->attr.read = of_node_property_read;
-
- rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
- WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
- return rc;
-}
-
-int __of_attach_node_sysfs(struct device_node *np)
-{
- const char *name;
- struct kobject *parent;
- struct property *pp;
- int rc;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return 0;
-
- if (!of_kset)
- return 0;
-
- np->kobj.kset = of_kset;
- if (!np->parent) {
- /* Nodes without parents are new top level trees */
- name = safe_name(&of_kset->kobj, "base");
- parent = NULL;
- } else {
- name = safe_name(&np->parent->kobj, kbasename(np->full_name));
- parent = &np->parent->kobj;
- }
- if (!name)
- return -ENOMEM;
- rc = kobject_add(&np->kobj, parent, "%s", name);
- kfree(name);
- if (rc)
- return rc;
-
- for_each_property_of_node(np, pp)
- __of_add_property_sysfs(np, pp);
-
- return 0;
-}
-
void __init of_core_init(void)
{
struct device_node *np;
@@ -760,7 +658,7 @@ struct device_node *of_get_child_by_name(const struct device_node *node,
}
EXPORT_SYMBOL(of_get_child_by_name);
-static struct device_node *__of_find_node_by_path(struct device_node *parent,
+struct device_node *__of_find_node_by_path(struct device_node *parent,
const char *path)
{
struct device_node *child;
@@ -1504,22 +1402,6 @@ int __of_remove_property(struct device_node *np, struct property *prop)
return 0;
}
-void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
-{
- sysfs_remove_bin_file(&np->kobj, &prop->attr);
- kfree(prop->attr.attr.name);
-}
-
-void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
-{
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- /* at early boot, bail here and defer setup to of_init() */
- if (of_kset && of_node_is_attached(np))
- __of_sysfs_remove_bin_file(np, prop);
-}
-
/**
* of_remove_property - Remove a property from a node.
*
@@ -1579,21 +1461,6 @@ int __of_update_property(struct device_node *np, struct property *newprop,
return 0;
}
-void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
- struct property *oldprop)
-{
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- /* At early boot, bail out and defer setup to of_init() */
- if (!of_kset)
- return;
-
- if (oldprop)
- __of_sysfs_remove_bin_file(np, oldprop);
- __of_add_property_sysfs(np, newprop);
-}
-
/*
* of_update_property - Update a property in a node, if the property does
* not exist, add it.
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 64b710265d39..25bddf9c9fe1 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -9,9 +9,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
#include <asm/errno.h>
#include "of_private.h"
@@ -101,11 +99,7 @@ int of_dma_configure(struct device *dev, struct device_node *np)
* DMA configuration regardless of whether "dma-ranges" is
* correctly specified or not.
*/
- if (!dev_is_pci(dev) &&
-#ifdef CONFIG_ARM_AMBA
- dev->bus != &amba_bustype &&
-#endif
- dev->bus != &platform_bus_type)
+ if (!dev->bus->force_dma)
return ret == -ENODEV ? 0 : ret;
dma_addr = offset = 0;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 301b6db2b48d..c454941b34ec 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -16,6 +16,11 @@
#include "of_private.h"
+static struct device_node *kobj_to_device_node(struct kobject *kobj)
+{
+ return container_of(kobj, struct device_node, kobj);
+}
+
/**
* of_node_get() - Increment refcount of a node
* @node: Node to inc refcount, NULL is supported to simplify writing of
@@ -43,28 +48,6 @@ void of_node_put(struct device_node *node)
}
EXPORT_SYMBOL(of_node_put);
-void __of_detach_node_sysfs(struct device_node *np)
-{
- struct property *pp;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- BUG_ON(!of_node_is_initialized(np));
- if (!of_kset)
- return;
-
- /* only remove properties if on sysfs */
- if (of_node_is_attached(np)) {
- for_each_property_of_node(np, pp)
- __of_sysfs_remove_bin_file(np, pp);
- kobject_del(&np->kobj);
- }
-
- /* finally remove the kobj_init ref */
- of_node_put(np);
-}
-
static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
int of_reconfig_notifier_register(struct notifier_block *nb)
@@ -315,6 +298,18 @@ int of_detach_node(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_detach_node);
+static void property_list_free(struct property *prop_list)
+{
+ struct property *prop, *next;
+
+ for (prop = prop_list; prop != NULL; prop = next) {
+ next = prop->next;
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+ }
+}
+
/**
* of_node_release() - release a dynamically allocated node
* @kref: kref element of the node to be released
@@ -324,7 +319,6 @@ EXPORT_SYMBOL_GPL(of_detach_node);
void of_node_release(struct kobject *kobj)
{
struct device_node *node = kobj_to_device_node(kobj);
- struct property *prop = node->properties;
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
@@ -335,18 +329,9 @@ void of_node_release(struct kobject *kobj)
if (!of_node_check_flag(node, OF_DYNAMIC))
return;
- while (prop) {
- struct property *next = prop->next;
- kfree(prop->name);
- kfree(prop->value);
- kfree(prop);
- prop = next;
+ property_list_free(node->properties);
+ property_list_free(node->deadprops);
- if (!prop) {
- prop = node->deadprops;
- node->deadprops = NULL;
- }
- }
kfree(node->full_name);
kfree(node->data);
kfree(node);
@@ -508,11 +493,12 @@ static void __of_changeset_entry_invert(struct of_changeset_entry *ce,
}
}
-static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool revert)
+static int __of_changeset_entry_notify(struct of_changeset_entry *ce,
+ bool revert)
{
struct of_reconfig_data rd;
struct of_changeset_entry ce_inverted;
- int ret;
+ int ret = 0;
if (revert) {
__of_changeset_entry_invert(ce, &ce_inverted);
@@ -534,11 +520,12 @@ static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool reve
default:
pr_err("invalid devicetree changeset action: %i\n",
(int)ce->action);
- return;
+ ret = -EINVAL;
}
if (ret)
pr_err("changeset notifier error @%pOF\n", ce->np);
+ return ret;
}
static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
@@ -672,32 +659,82 @@ void of_changeset_destroy(struct of_changeset *ocs)
}
EXPORT_SYMBOL_GPL(of_changeset_destroy);
-int __of_changeset_apply(struct of_changeset *ocs)
+/*
+ * Apply the changeset entries in @ocs.
+ * If apply fails, an attempt is made to revert the entries that were
+ * successfully applied.
+ *
+ * If multiple revert errors occur then only the final revert error is reported.
+ *
+ * Returns 0 on success, a negative error value in case of an error.
+ * If a revert error occurs, it is returned in *ret_revert.
+ */
+int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert)
{
struct of_changeset_entry *ce;
- int ret;
+ int ret, ret_tmp;
- /* perform the rest of the work */
pr_debug("changeset: applying...\n");
list_for_each_entry(ce, &ocs->entries, node) {
ret = __of_changeset_entry_apply(ce);
if (ret) {
pr_err("Error applying changeset (%d)\n", ret);
- list_for_each_entry_continue_reverse(ce, &ocs->entries, node)
- __of_changeset_entry_revert(ce);
+ list_for_each_entry_continue_reverse(ce, &ocs->entries,
+ node) {
+ ret_tmp = __of_changeset_entry_revert(ce);
+ if (ret_tmp)
+ *ret_revert = ret_tmp;
+ }
return ret;
}
}
- pr_debug("changeset: applied, emitting notifiers.\n");
+
+ return 0;
+}
+
+/*
+ * Returns 0 on success, a negative error value in case of an error.
+ *
+ * If multiple changset entry notification errors occur then only the
+ * final notification error is reported.
+ */
+int __of_changeset_apply_notify(struct of_changeset *ocs)
+{
+ struct of_changeset_entry *ce;
+ int ret = 0, ret_tmp;
+
+ pr_debug("changeset: emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
- list_for_each_entry(ce, &ocs->entries, node)
- __of_changeset_entry_notify(ce, 0);
+ list_for_each_entry(ce, &ocs->entries, node) {
+ ret_tmp = __of_changeset_entry_notify(ce, 0);
+ if (ret_tmp)
+ ret = ret_tmp;
+ }
mutex_lock(&of_mutex);
pr_debug("changeset: notifiers sent.\n");
- return 0;
+ return ret;
+}
+
+/*
+ * Returns 0 on success, a negative error value in case of an error.
+ *
+ * If a changeset entry apply fails, an attempt is made to revert any
+ * previous entries in the changeset. If any of the reverts fails,
+ * that failure is not reported. Thus the state of the device tree
+ * is unknown if an apply error occurs.
+ */
+static int __of_changeset_apply(struct of_changeset *ocs)
+{
+ int ret, ret_revert = 0;
+
+ ret = __of_changeset_apply_entries(ocs, &ret_revert);
+ if (!ret)
+ ret = __of_changeset_apply_notify(ocs);
+
+ return ret;
}
/**
@@ -724,31 +761,74 @@ int of_changeset_apply(struct of_changeset *ocs)
}
EXPORT_SYMBOL_GPL(of_changeset_apply);
-int __of_changeset_revert(struct of_changeset *ocs)
+/*
+ * Revert the changeset entries in @ocs.
+ * If revert fails, an attempt is made to re-apply the entries that were
+ * successfully removed.
+ *
+ * If multiple re-apply errors occur then only the final apply error is
+ * reported.
+ *
+ * Returns 0 on success, a negative error value in case of an error.
+ * If an apply error occurs, it is returned in *ret_apply.
+ */
+int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply)
{
struct of_changeset_entry *ce;
- int ret;
+ int ret, ret_tmp;
pr_debug("changeset: reverting...\n");
list_for_each_entry_reverse(ce, &ocs->entries, node) {
ret = __of_changeset_entry_revert(ce);
if (ret) {
pr_err("Error reverting changeset (%d)\n", ret);
- list_for_each_entry_continue(ce, &ocs->entries, node)
- __of_changeset_entry_apply(ce);
+ list_for_each_entry_continue(ce, &ocs->entries, node) {
+ ret_tmp = __of_changeset_entry_apply(ce);
+ if (ret_tmp)
+ *ret_apply = ret_tmp;
+ }
return ret;
}
}
- pr_debug("changeset: reverted, emitting notifiers.\n");
+
+ return 0;
+}
+
+/*
+ * If multiple changset entry notification errors occur then only the
+ * final notification error is reported.
+ */
+int __of_changeset_revert_notify(struct of_changeset *ocs)
+{
+ struct of_changeset_entry *ce;
+ int ret = 0, ret_tmp;
+
+ pr_debug("changeset: emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
- list_for_each_entry_reverse(ce, &ocs->entries, node)
- __of_changeset_entry_notify(ce, 1);
+ list_for_each_entry_reverse(ce, &ocs->entries, node) {
+ ret_tmp = __of_changeset_entry_notify(ce, 1);
+ if (ret_tmp)
+ ret = ret_tmp;
+ }
mutex_lock(&of_mutex);
pr_debug("changeset: notifiers sent.\n");
- return 0;
+ return ret;
+}
+
+static int __of_changeset_revert(struct of_changeset *ocs)
+{
+ int ret, ret_reply;
+
+ ret_reply = 0;
+ ret = __of_changeset_revert_entries(ocs, &ret_reply);
+
+ if (!ret)
+ ret = __of_changeset_revert_notify(ocs);
+
+ return ret;
}
/**
@@ -775,7 +855,7 @@ int of_changeset_revert(struct of_changeset *ocs)
EXPORT_SYMBOL_GPL(of_changeset_revert);
/**
- * of_changeset_action - Perform a changeset action
+ * of_changeset_action - Add an action to the tail of the changeset list
*
* @ocs: changeset pointer
* @action: action to perform
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ce30c9a588a4..4675e5ac4d11 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -132,6 +132,19 @@ bool of_fdt_is_big_endian(const void *blob, unsigned long node)
return false;
}
+static bool of_fdt_device_is_available(const void *blob, unsigned long node)
+{
+ const char *status = fdt_getprop(blob, node, "status", NULL);
+
+ if (!status)
+ return true;
+
+ if (!strcmp(status, "ok") || !strcmp(status, "okay"))
+ return true;
+
+ return false;
+}
+
/**
* of_fdt_match - Return true if node matches a list of compatible values
*/
@@ -266,74 +279,32 @@ static void populate_properties(const void *blob,
*pprev = NULL;
}
-static unsigned int populate_node(const void *blob,
- int offset,
- void **mem,
- struct device_node *dad,
- unsigned int fpsize,
- struct device_node **pnp,
- bool dryrun)
+static bool populate_node(const void *blob,
+ int offset,
+ void **mem,
+ struct device_node *dad,
+ struct device_node **pnp,
+ bool dryrun)
{
struct device_node *np;
const char *pathp;
unsigned int l, allocl;
- int new_format = 0;
pathp = fdt_get_name(blob, offset, &l);
if (!pathp) {
*pnp = NULL;
- return 0;
+ return false;
}
allocl = ++l;
- /* version 0x10 has a more compact unit name here instead of the full
- * path. we accumulate the full path size using "fpsize", we'll rebuild
- * it later. We detect this because the first character of the name is
- * not '/'.
- */
- if ((*pathp) != '/') {
- new_format = 1;
- if (fpsize == 0) {
- /* root node: special case. fpsize accounts for path
- * plus terminating zero. root node only has '/', so
- * fpsize should be 2, but we want to avoid the first
- * level nodes to have two '/' so we use fpsize 1 here
- */
- fpsize = 1;
- allocl = 2;
- l = 1;
- pathp = "";
- } else {
- /* account for '/' and path size minus terminal 0
- * already in 'l'
- */
- fpsize += l;
- allocl = fpsize;
- }
- }
-
np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
__alignof__(struct device_node));
if (!dryrun) {
char *fn;
of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
- if (new_format) {
- /* rebuild full path for new format */
- if (dad && dad->parent) {
- strcpy(fn, dad->full_name);
-#ifdef DEBUG
- if ((strlen(fn) + l + 1) != allocl) {
- pr_debug("%s: p: %d, l: %d, a: %d\n",
- pathp, (int)strlen(fn),
- l, allocl);
- }
-#endif
- fn += strlen(fn);
- }
- *(fn++) = '/';
- }
+
memcpy(fn, pathp, l);
if (dad != NULL) {
@@ -355,7 +326,7 @@ static unsigned int populate_node(const void *blob,
}
*pnp = np;
- return fpsize;
+ return true;
}
static void reverse_nodes(struct device_node *parent)
@@ -399,7 +370,6 @@ static int unflatten_dt_nodes(const void *blob,
struct device_node *root;
int offset = 0, depth = 0, initial_depth = 0;
#define FDT_MAX_DEPTH 64
- unsigned int fpsizes[FDT_MAX_DEPTH];
struct device_node *nps[FDT_MAX_DEPTH];
void *base = mem;
bool dryrun = !base;
@@ -418,7 +388,6 @@ static int unflatten_dt_nodes(const void *blob,
depth = initial_depth = 1;
root = dad;
- fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
nps[depth] = dad;
for (offset = 0;
@@ -427,11 +396,12 @@ static int unflatten_dt_nodes(const void *blob,
if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
continue;
- fpsizes[depth+1] = populate_node(blob, offset, &mem,
- nps[depth],
- fpsizes[depth],
- &nps[depth+1], dryrun);
- if (!fpsizes[depth+1])
+ if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
+ !of_fdt_device_is_available(blob, offset))
+ continue;
+
+ if (!populate_node(blob, offset, &mem, nps[depth],
+ &nps[depth+1], dryrun))
return mem - base;
if (!dryrun && nodepp && !*nodepp)
@@ -467,6 +437,7 @@ static int unflatten_dt_nodes(const void *blob,
* @mynodes: The device_node tree created by the call
* @dt_alloc: An allocator that provides a virtual address to memory
* for the resulting tree
+ * @detached: if true set OF_DETACHED on @mynodes
*
* Returns NULL on failure or the memory chunk containing the unflattened
* device tree on success.
@@ -652,7 +623,6 @@ static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
int depth, void *data)
{
static int found;
- const char *status;
int err;
if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
@@ -672,8 +642,7 @@ static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
return 1;
}
- status = of_get_flat_dt_prop(node, "status", NULL);
- if (status && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0)
+ if (!of_fdt_device_is_available(initial_boot_params, node))
return 0;
err = __reserved_mem_reserve_reg(node, uname);
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
new file mode 100644
index 000000000000..250fc7bb550f
--- /dev/null
+++ b/drivers/of/kobj.c
@@ -0,0 +1,164 @@
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "of_private.h"
+
+/* true when node is initialized */
+static int of_node_is_initialized(struct device_node *node)
+{
+ return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+int of_node_is_attached(struct device_node *node)
+{
+ return node && node->kobj.state_in_sysfs;
+}
+
+
+#ifndef CONFIG_OF_DYNAMIC
+static void of_node_release(struct kobject *kobj)
+{
+ /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+struct kobj_type of_node_ktype = {
+ .release = of_node_release,
+};
+
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct property *pp = container_of(bin_attr, struct property, attr);
+ return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+}
+
+/* always return newly allocated name, caller must free after use */
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
+{
+ const char *name = orig_name;
+ struct kernfs_node *kn;
+ int i = 0;
+
+ /* don't be a hero. After 16 tries give up */
+ while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
+ sysfs_put(kn);
+ if (name != orig_name)
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+ }
+
+ if (name == orig_name) {
+ name = kstrdup(orig_name, GFP_KERNEL);
+ } else {
+ pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
+ kobject_name(kobj), name);
+ }
+ return name;
+}
+
+int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ int rc;
+
+ /* Important: Don't leak passwords */
+ bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return 0;
+
+ if (!of_kset || !of_node_is_attached(np))
+ return 0;
+
+ sysfs_bin_attr_init(&pp->attr);
+ pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+ pp->attr.attr.mode = secure ? 0400 : 0444;
+ pp->attr.size = secure ? 0 : pp->length;
+ pp->attr.read = of_node_property_read;
+
+ rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+ WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
+ return rc;
+}
+
+void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
+{
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return;
+
+ sysfs_remove_bin_file(&np->kobj, &prop->attr);
+ kfree(prop->attr.attr.name);
+}
+
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+{
+ /* at early boot, bail here and defer setup to of_init() */
+ if (of_kset && of_node_is_attached(np))
+ __of_sysfs_remove_bin_file(np, prop);
+}
+
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+ struct property *oldprop)
+{
+ /* At early boot, bail out and defer setup to of_init() */
+ if (!of_kset)
+ return;
+
+ if (oldprop)
+ __of_sysfs_remove_bin_file(np, oldprop);
+ __of_add_property_sysfs(np, newprop);
+}
+
+int __of_attach_node_sysfs(struct device_node *np)
+{
+ const char *name;
+ struct kobject *parent;
+ struct property *pp;
+ int rc;
+
+ if (!of_kset)
+ return 0;
+
+ np->kobj.kset = of_kset;
+ if (!np->parent) {
+ /* Nodes without parents are new top level trees */
+ name = safe_name(&of_kset->kobj, "base");
+ parent = NULL;
+ } else {
+ name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+ parent = &np->parent->kobj;
+ }
+ if (!name)
+ return -ENOMEM;
+ rc = kobject_add(&np->kobj, parent, "%s", name);
+ kfree(name);
+ if (rc)
+ return rc;
+
+ for_each_property_of_node(np, pp)
+ __of_add_property_sysfs(np, pp);
+
+ return 0;
+}
+
+void __of_detach_node_sysfs(struct device_node *np)
+{
+ struct property *pp;
+
+ BUG_ON(!of_node_is_initialized(np));
+ if (!of_kset)
+ return;
+
+ /* only remove properties if on sysfs */
+ if (of_node_is_attached(np)) {
+ for_each_property_of_node(np, pp)
+ __of_sysfs_remove_bin_file(np, pp);
+ kobject_del(&np->kobj);
+ }
+
+ /* finally remove the kobj_init ref */
+ of_node_put(np);
+}
+
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 3ae12ffbf547..92a9a3687446 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -35,18 +35,16 @@ extern struct mutex of_mutex;
extern struct list_head aliases_lookup;
extern struct kset *of_kset;
-
-static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
-{
- return container_of(kobj, struct device_node, kobj);
-}
-
#if defined(CONFIG_OF_DYNAMIC)
extern int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *old_prop);
extern void of_node_release(struct kobject *kobj);
-extern int __of_changeset_apply(struct of_changeset *ocs);
-extern int __of_changeset_revert(struct of_changeset *ocs);
+extern int __of_changeset_apply_entries(struct of_changeset *ocs,
+ int *ret_revert);
+extern int __of_changeset_apply_notify(struct of_changeset *ocs);
+extern int __of_changeset_revert_entries(struct of_changeset *ocs,
+ int *ret_apply);
+extern int __of_changeset_revert_notify(struct of_changeset *ocs);
#else /* CONFIG_OF_DYNAMIC */
static inline int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *old_prop)
@@ -55,6 +53,41 @@ static inline int of_property_notify(int action, struct device_node *np,
}
#endif /* CONFIG_OF_DYNAMIC */
+#if defined(CONFIG_OF_KOBJ)
+int of_node_is_attached(struct device_node *node);
+int __of_add_property_sysfs(struct device_node *np, struct property *pp);
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop);
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+ struct property *oldprop);
+int __of_attach_node_sysfs(struct device_node *np);
+void __of_detach_node_sysfs(struct device_node *np);
+#else
+static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ return 0;
+}
+static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {}
+static inline void __of_update_property_sysfs(struct device_node *np,
+ struct property *newprop, struct property *oldprop) {}
+static inline int __of_attach_node_sysfs(struct device_node *np)
+{
+ return 0;
+}
+static inline void __of_detach_node_sysfs(struct device_node *np) {}
+#endif
+
+#if defined(CONFIG_OF_RESOLVE)
+int of_resolve_phandles(struct device_node *tree);
+#endif
+
+#if defined(CONFIG_OF_OVERLAY)
+void of_overlay_mutex_lock(void);
+void of_overlay_mutex_unlock(void);
+#else
+static inline void of_overlay_mutex_lock(void) {};
+static inline void of_overlay_mutex_unlock(void) {};
+#endif
+
#if defined(CONFIG_OF_UNITTEST) && defined(CONFIG_OF_OVERLAY)
extern void __init unittest_unflatten_overlay_base(void);
#else
@@ -77,6 +110,8 @@ extern void *__unflatten_device_tree(const void *blob,
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags);
__printf(2, 3) struct device_node *__of_node_dup(const struct device_node *np, const char *fmt, ...);
+struct device_node *__of_find_node_by_path(struct device_node *parent,
+ const char *path);
struct device_node *__of_find_node_by_full_path(struct device_node *node,
const char *path);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 8ecfee31ab6d..c150abb9049d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -25,252 +25,378 @@
#include "of_private.h"
/**
- * struct of_overlay_info - Holds a single overlay info
+ * struct fragment - info about fragment nodes in overlay expanded device tree
* @target: target of the overlay operation
- * @overlay: pointer to the overlay contents node
- *
- * Holds a single overlay state, including all the overlay logs &
- * records.
+ * @overlay: pointer to the __overlay__ node
*/
-struct of_overlay_info {
+struct fragment {
struct device_node *target;
struct device_node *overlay;
- bool is_symbols_node;
};
/**
- * struct of_overlay - Holds a complete overlay transaction
- * @node: List on which we are located
- * @count: Count of ovinfo structures
- * @ovinfo_tab: Overlay info table (count sized)
- * @cset: Changeset to be used
- *
- * Holds a complete overlay transaction
+ * struct overlay_changeset
+ * @ovcs_list: list on which we are located
+ * @overlay_tree: expanded device tree that contains the fragment nodes
+ * @count: count of fragment structures
+ * @fragments: fragment nodes in the overlay expanded device tree
+ * @symbols_fragment: last element of @fragments[] is the __symbols__ node
+ * @cset: changeset to apply fragments to live device tree
*/
-struct of_overlay {
+struct overlay_changeset {
int id;
- struct list_head node;
+ struct list_head ovcs_list;
+ struct device_node *overlay_tree;
int count;
- struct of_overlay_info *ovinfo_tab;
+ struct fragment *fragments;
+ bool symbols_fragment;
struct of_changeset cset;
};
-static int of_overlay_apply_one(struct of_overlay *ov,
- struct device_node *target, const struct device_node *overlay,
- bool is_symbols_node);
+/* flags are sticky - once set, do not reset */
+static int devicetree_state_flags;
+#define DTSF_APPLY_FAIL 0x01
+#define DTSF_REVERT_FAIL 0x02
+
+/*
+ * If a changeset apply or revert encounters an error, an attempt will
+ * be made to undo partial changes, but may fail. If the undo fails
+ * we do not know the state of the devicetree.
+ */
+static int devicetree_corrupt(void)
+{
+ return devicetree_state_flags &
+ (DTSF_APPLY_FAIL | DTSF_REVERT_FAIL);
+}
+
+static int build_changeset_next_level(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ const struct device_node *overlay_node);
+
+/*
+ * of_resolve_phandles() finds the largest phandle in the live tree.
+ * of_overlay_apply() may add a larger phandle to the live tree.
+ * Do not allow race between two overlays being applied simultaneously:
+ * mutex_lock(&of_overlay_phandle_mutex)
+ * of_resolve_phandles()
+ * of_overlay_apply()
+ * mutex_unlock(&of_overlay_phandle_mutex)
+ */
+static DEFINE_MUTEX(of_overlay_phandle_mutex);
+
+void of_overlay_mutex_lock(void)
+{
+ mutex_lock(&of_overlay_phandle_mutex);
+}
+
+void of_overlay_mutex_unlock(void)
+{
+ mutex_unlock(&of_overlay_phandle_mutex);
+}
-static BLOCKING_NOTIFIER_HEAD(of_overlay_chain);
+
+static LIST_HEAD(ovcs_list);
+static DEFINE_IDR(ovcs_idr);
+
+static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain);
int of_overlay_notifier_register(struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&of_overlay_chain, nb);
+ return blocking_notifier_chain_register(&overlay_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
int of_overlay_notifier_unregister(struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&of_overlay_chain, nb);
+ return blocking_notifier_chain_unregister(&overlay_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister);
-static int of_overlay_notify(struct of_overlay *ov,
- enum of_overlay_notify_action action)
+static char *of_overlay_action_name[] = {
+ "pre-apply",
+ "post-apply",
+ "pre-remove",
+ "post-remove",
+};
+
+static int overlay_notify(struct overlay_changeset *ovcs,
+ enum of_overlay_notify_action action)
{
struct of_overlay_notify_data nd;
int i, ret;
- for (i = 0; i < ov->count; i++) {
- struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+ for (i = 0; i < ovcs->count; i++) {
+ struct fragment *fragment = &ovcs->fragments[i];
- nd.target = ovinfo->target;
- nd.overlay = ovinfo->overlay;
+ nd.target = fragment->target;
+ nd.overlay = fragment->overlay;
- ret = blocking_notifier_call_chain(&of_overlay_chain,
+ ret = blocking_notifier_call_chain(&overlay_notify_chain,
action, &nd);
- if (ret)
- return notifier_to_errno(ret);
+ if (ret == NOTIFY_OK || ret == NOTIFY_STOP)
+ return 0;
+ if (ret) {
+ ret = notifier_to_errno(ret);
+ pr_err("overlay changeset %s notifier error %d, target: %pOF\n",
+ of_overlay_action_name[action], ret, nd.target);
+ return ret;
+ }
}
return 0;
}
-static struct property *dup_and_fixup_symbol_prop(struct of_overlay *ov,
- const struct property *prop)
+/*
+ * The values of properties in the "/__symbols__" node are paths in
+ * the ovcs->overlay_tree. When duplicating the properties, the paths
+ * need to be adjusted to be the correct path for the live device tree.
+ *
+ * The paths refer to a node in the subtree of a fragment node's "__overlay__"
+ * node, for example "/fragment@0/__overlay__/symbol_path_tail",
+ * where symbol_path_tail can be a single node or it may be a multi-node path.
+ *
+ * The duplicated property value will be modified by replacing the
+ * "/fragment_name/__overlay/" portion of the value with the target
+ * path from the fragment node.
+ */
+static struct property *dup_and_fixup_symbol_prop(
+ struct overlay_changeset *ovcs, const struct property *prop)
{
- struct of_overlay_info *ovinfo;
- struct property *new;
- const char *overlay_name;
- char *label_path;
- char *symbol_path;
+ struct fragment *fragment;
+ struct property *new_prop;
+ struct device_node *fragment_node;
+ struct device_node *overlay_node;
+ const char *path;
+ const char *path_tail;
const char *target_path;
int k;
- int label_path_len;
int overlay_name_len;
+ int path_len;
+ int path_tail_len;
int target_path_len;
if (!prop->value)
return NULL;
- symbol_path = prop->value;
-
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
+ if (strnlen(prop->value, prop->length) >= prop->length)
return NULL;
+ path = prop->value;
+ path_len = strlen(path);
- for (k = 0; k < ov->count; k++) {
- ovinfo = &ov->ovinfo_tab[k];
- overlay_name = ovinfo->overlay->full_name;
- overlay_name_len = strlen(overlay_name);
- if (!strncasecmp(symbol_path, overlay_name, overlay_name_len))
+ if (path_len < 1)
+ return NULL;
+ fragment_node = __of_find_node_by_path(ovcs->overlay_tree, path + 1);
+ overlay_node = __of_find_node_by_path(fragment_node, "__overlay__/");
+ of_node_put(fragment_node);
+ of_node_put(overlay_node);
+
+ for (k = 0; k < ovcs->count; k++) {
+ fragment = &ovcs->fragments[k];
+ if (fragment->overlay == overlay_node)
break;
}
+ if (k >= ovcs->count)
+ return NULL;
+
+ overlay_name_len = snprintf(NULL, 0, "%pOF", fragment->overlay);
- if (k >= ov->count)
- goto err_free;
+ if (overlay_name_len > path_len)
+ return NULL;
+ path_tail = path + overlay_name_len;
+ path_tail_len = strlen(path_tail);
- target_path = ovinfo->target->full_name;
+ target_path = kasprintf(GFP_KERNEL, "%pOF", fragment->target);
+ if (!target_path)
+ return NULL;
target_path_len = strlen(target_path);
- label_path = symbol_path + overlay_name_len;
- label_path_len = strlen(label_path);
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ goto err_free_target_path;
- new->name = kstrdup(prop->name, GFP_KERNEL);
- new->length = target_path_len + label_path_len + 1;
- new->value = kzalloc(new->length, GFP_KERNEL);
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->length = target_path_len + path_tail_len + 1;
+ new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value)
+ goto err_free_new_prop;
- if (!new->name || !new->value)
- goto err_free;
+ strcpy(new_prop->value, target_path);
+ strcpy(new_prop->value + target_path_len, path_tail);
- strcpy(new->value, target_path);
- strcpy(new->value + target_path_len, label_path);
+ of_property_set_flag(new_prop, OF_DYNAMIC);
- /* mark the property as dynamic */
- of_property_set_flag(new, OF_DYNAMIC);
+ return new_prop;
- return new;
+err_free_new_prop:
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
+err_free_target_path:
+ kfree(target_path);
- err_free:
- kfree(new->name);
- kfree(new->value);
- kfree(new);
return NULL;
-
-
}
-static int of_overlay_apply_single_property(struct of_overlay *ov,
- struct device_node *target, struct property *prop,
- bool is_symbols_node)
+/**
+ * add_changeset_property() - add @overlay_prop to overlay changeset
+ * @ovcs: overlay changeset
+ * @target_node: where to place @overlay_prop in live tree
+ * @overlay_prop: property to add or update, from overlay tree
+ * @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__"
+ *
+ * If @overlay_prop does not already exist in @target_node, add changeset entry
+ * to add @overlay_prop in @target_node, else add changeset entry to update
+ * value of @overlay_prop.
+ *
+ * Some special properties are not updated (no error returned).
+ *
+ * Update of property in symbols node is not allowed.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid @overlay.
+ */
+static int add_changeset_property(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ struct property *overlay_prop,
+ bool is_symbols_prop)
{
- struct property *propn = NULL, *tprop;
+ struct property *new_prop = NULL, *prop;
+ int ret = 0;
- /* NOTE: Multiple changes of single properties not supported */
- tprop = of_find_property(target, prop->name, NULL);
+ prop = of_find_property(target_node, overlay_prop->name, NULL);
- /* special properties are not meant to be updated (silent NOP) */
- if (of_prop_cmp(prop->name, "name") == 0 ||
- of_prop_cmp(prop->name, "phandle") == 0 ||
- of_prop_cmp(prop->name, "linux,phandle") == 0)
+ if (!of_prop_cmp(overlay_prop->name, "name") ||
+ !of_prop_cmp(overlay_prop->name, "phandle") ||
+ !of_prop_cmp(overlay_prop->name, "linux,phandle"))
return 0;
- if (is_symbols_node) {
- /* changing a property in __symbols__ node not allowed */
- if (tprop)
+ if (is_symbols_prop) {
+ if (prop)
return -EINVAL;
- propn = dup_and_fixup_symbol_prop(ov, prop);
+ new_prop = dup_and_fixup_symbol_prop(ovcs, overlay_prop);
} else {
- propn = __of_prop_dup(prop, GFP_KERNEL);
+ new_prop = __of_prop_dup(overlay_prop, GFP_KERNEL);
}
- if (propn == NULL)
+ if (!new_prop)
return -ENOMEM;
- /* not found? add */
- if (tprop == NULL)
- return of_changeset_add_property(&ov->cset, target, propn);
-
- /* found? update */
- return of_changeset_update_property(&ov->cset, target, propn);
+ if (!prop)
+ ret = of_changeset_add_property(&ovcs->cset, target_node,
+ new_prop);
+ else
+ ret = of_changeset_update_property(&ovcs->cset, target_node,
+ new_prop);
+
+ if (ret) {
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
+ }
+ return ret;
}
-static int of_overlay_apply_single_device_node(struct of_overlay *ov,
- struct device_node *target, struct device_node *child)
+/**
+ * add_changeset_node() - add @node (and children) to overlay changeset
+ * @ovcs: overlay changeset
+ * @target_node: where to place @node in live tree
+ * @node: node from within overlay device tree fragment
+ *
+ * If @node does not already exist in @target_node, add changeset entry
+ * to add @node in @target_node.
+ *
+ * If @node already exists in @target_node, and the existing node has
+ * a phandle, the overlay node is not allowed to have a phandle.
+ *
+ * If @node has child nodes, add the children recursively via
+ * build_changeset_next_level().
+ *
+ * NOTE: Multiple mods of created nodes not supported.
+ * If more than one fragment contains a node that does not already exist
+ * in the live tree, then for each fragment of_changeset_attach_node()
+ * will add a changeset entry to add the node. When the changeset is
+ * applied, __of_attach_node() will attach the node twice (once for
+ * each fragment). At this point the device tree will be corrupted.
+ *
+ * TODO: add integrity check to ensure that multiple fragments do not
+ * create the same node.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid @overlay.
+ */
+static int add_changeset_node(struct overlay_changeset *ovcs,
+ struct device_node *target_node, struct device_node *node)
{
- const char *cname;
+ const char *node_kbasename;
struct device_node *tchild;
int ret = 0;
- cname = kbasename(child->full_name);
- if (cname == NULL)
- return -ENOMEM;
+ node_kbasename = kbasename(node->full_name);
- /* NOTE: Multiple mods of created nodes not supported */
- for_each_child_of_node(target, tchild)
- if (!of_node_cmp(cname, kbasename(tchild->full_name)))
+ for_each_child_of_node(target_node, tchild)
+ if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
break;
- if (tchild != NULL) {
- /* new overlay phandle value conflicts with existing value */
- if (child->phandle)
- return -EINVAL;
-
- /* apply overlay recursively */
- ret = of_overlay_apply_one(ov, tchild, child, 0);
- of_node_put(tchild);
- } else {
- /* create empty tree as a target */
- tchild = __of_node_dup(child, "%pOF/%s", target, cname);
+ if (!tchild) {
+ tchild = __of_node_dup(node, "%pOF/%s",
+ target_node, node_kbasename);
if (!tchild)
return -ENOMEM;
- /* point to parent */
- tchild->parent = target;
+ tchild->parent = target_node;
- ret = of_changeset_attach_node(&ov->cset, tchild);
+ ret = of_changeset_attach_node(&ovcs->cset, tchild);
if (ret)
return ret;
- ret = of_overlay_apply_one(ov, tchild, child, 0);
- if (ret)
- return ret;
+ return build_changeset_next_level(ovcs, tchild, node);
}
+ if (node->phandle && tchild->phandle)
+ ret = -EINVAL;
+ else
+ ret = build_changeset_next_level(ovcs, tchild, node);
+ of_node_put(tchild);
+
return ret;
}
-/*
- * Apply a single overlay node recursively.
+/**
+ * build_changeset_next_level() - add level of overlay changeset
+ * @ovcs: overlay changeset
+ * @target_node: where to place @overlay_node in live tree
+ * @overlay_node: node from within an overlay device tree fragment
+ *
+ * Add the properties (if any) and nodes (if any) from @overlay_node to the
+ * @ovcs->cset changeset. If an added node has child nodes, they will
+ * be added recursively.
*
- * Note that the in case of an error the target node is left
- * in a inconsistent state. Error recovery should be performed
- * by using the changeset.
+ * Do not allow symbols node to have any children.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid @overlay_node.
*/
-static int of_overlay_apply_one(struct of_overlay *ov,
- struct device_node *target, const struct device_node *overlay,
- bool is_symbols_node)
+static int build_changeset_next_level(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ const struct device_node *overlay_node)
{
struct device_node *child;
struct property *prop;
int ret;
- for_each_property_of_node(overlay, prop) {
- ret = of_overlay_apply_single_property(ov, target, prop,
- is_symbols_node);
+ for_each_property_of_node(overlay_node, prop) {
+ ret = add_changeset_property(ovcs, target_node, prop, 0);
if (ret) {
- pr_err("Failed to apply prop @%pOF/%s\n",
- target, prop->name);
+ pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
+ target_node, prop->name, ret);
return ret;
}
}
- /* do not allow symbols node to have any children */
- if (is_symbols_node)
- return 0;
-
- for_each_child_of_node(overlay, child) {
- ret = of_overlay_apply_single_device_node(ov, target, child);
- if (ret != 0) {
- pr_err("Failed to apply single node @%pOF/%s\n",
- target, child->name);
+ for_each_child_of_node(overlay_node, child) {
+ ret = add_changeset_node(ovcs, target_node, child);
+ if (ret) {
+ pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
+ target_node, child->name, ret);
of_node_put(child);
return ret;
}
@@ -279,28 +405,72 @@ static int of_overlay_apply_one(struct of_overlay *ov,
return 0;
}
+/*
+ * Add the properties from __overlay__ node to the @ovcs->cset changeset.
+ */
+static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ const struct device_node *overlay_symbols_node)
+{
+ struct property *prop;
+ int ret;
+
+ for_each_property_of_node(overlay_symbols_node, prop) {
+ ret = add_changeset_property(ovcs, target_node, prop, 1);
+ if (ret) {
+ pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
+ target_node, prop->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
- * of_overlay_apply() - Apply @count overlays pointed at by @ovinfo_tab
- * @ov: Overlay to apply
+ * build_changeset() - populate overlay changeset in @ovcs from @ovcs->fragments
+ * @ovcs: Overlay changeset
*
- * Applies the overlays given, while handling all error conditions
- * appropriately. Either the operation succeeds, or if it fails the
- * live tree is reverted to the state before the attempt.
- * Returns 0, or an error if the overlay attempt failed.
+ * Create changeset @ovcs->cset to contain the nodes and properties of the
+ * overlay device tree fragments in @ovcs->fragments[]. If an error occurs,
+ * any portions of the changeset that were successfully created will remain
+ * in @ovcs->cset.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid overlay in @ovcs->fragments[].
*/
-static int of_overlay_apply(struct of_overlay *ov)
+static int build_changeset(struct overlay_changeset *ovcs)
{
- int i, err;
-
- /* first we apply the overlays atomically */
- for (i = 0; i < ov->count; i++) {
- struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+ struct fragment *fragment;
+ int fragments_count, i, ret;
+
+ /*
+ * if there is a symbols fragment in ovcs->fragments[i] it is
+ * the final element in the array
+ */
+ if (ovcs->symbols_fragment)
+ fragments_count = ovcs->count - 1;
+ else
+ fragments_count = ovcs->count;
+
+ for (i = 0; i < fragments_count; i++) {
+ fragment = &ovcs->fragments[i];
+
+ ret = build_changeset_next_level(ovcs, fragment->target,
+ fragment->overlay);
+ if (ret) {
+ pr_debug("apply failed '%pOF'\n", fragment->target);
+ return ret;
+ }
+ }
- err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay,
- ovinfo->is_symbols_node);
- if (err != 0) {
- pr_err("apply failed '%pOF'\n", ovinfo->target);
- return err;
+ if (ovcs->symbols_fragment) {
+ fragment = &ovcs->fragments[ovcs->count - 1];
+ ret = build_changeset_symbols_node(ovcs, fragment->target,
+ fragment->overlay);
+ if (ret) {
+ pr_debug("apply failed '%pOF'\n", fragment->target);
+ return ret;
}
}
@@ -309,10 +479,10 @@ static int of_overlay_apply(struct of_overlay *ov)
/*
* Find the target node using a number of different strategies
- * in order of preference
+ * in order of preference:
*
- * "target" property containing the phandle of the target
- * "target-path" property containing the path of the target
+ * 1) "target" property containing the phandle of the target
+ * 2) "target-path" property containing the path of the target
*/
static struct device_node *find_target_node(struct device_node *info_node)
{
@@ -320,14 +490,12 @@ static struct device_node *find_target_node(struct device_node *info_node)
u32 val;
int ret;
- /* first try to go by using the target as a phandle */
ret = of_property_read_u32(info_node, "target", &val);
- if (ret == 0)
+ if (!ret)
return of_find_node_by_phandle(val);
- /* now try to locate by path */
ret = of_property_read_string(info_node, "target-path", &path);
- if (ret == 0)
+ if (!ret)
return of_find_node_by_path(path);
pr_err("Failed to find target for node %p (%s)\n",
@@ -337,228 +505,290 @@ static struct device_node *find_target_node(struct device_node *info_node)
}
/**
- * of_fill_overlay_info() - Fill an overlay info structure
- * @ov Overlay to fill
- * @info_node: Device node containing the overlay
- * @ovinfo: Pointer to the overlay info structure to fill
- *
- * Fills an overlay info structure with the overlay information
- * from a device node. This device node must have a target property
- * which contains a phandle of the overlay target node, and an
- * __overlay__ child node which has the overlay contents.
- * Both ovinfo->target & ovinfo->overlay have their references taken.
- *
- * Returns 0 on success, or a negative error value.
+ * init_overlay_changeset() - initialize overlay changeset from overlay tree
+ * @ovcs Overlay changeset to build
+ * @tree: Contains all the overlay fragments and overlay fixup nodes
+ *
+ * Initialize @ovcs. Populate @ovcs->fragments with node information from
+ * the top level of @tree. The relevant top level nodes are the fragment
+ * nodes and the __symbols__ node. Any other top level node will be ignored.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, -EINVAL if error
+ * detected in @tree, or -ENOSPC if idr_alloc() error.
*/
-static int of_fill_overlay_info(struct of_overlay *ov,
- struct device_node *info_node, struct of_overlay_info *ovinfo)
+static int init_overlay_changeset(struct overlay_changeset *ovcs,
+ struct device_node *tree)
{
- ovinfo->overlay = of_get_child_by_name(info_node, "__overlay__");
- if (ovinfo->overlay == NULL)
- goto err_fail;
+ struct device_node *node, *overlay_node;
+ struct fragment *fragment;
+ struct fragment *fragments;
+ int cnt, ret;
- ovinfo->target = find_target_node(info_node);
- if (ovinfo->target == NULL)
- goto err_fail;
+ /*
+ * Warn for some issues. Can not return -EINVAL for these until
+ * of_unittest_apply_overlay() is fixed to pass these checks.
+ */
+ if (!of_node_check_flag(tree, OF_DYNAMIC))
+ pr_debug("%s() tree is not dynamic\n", __func__);
- return 0;
+ if (!of_node_check_flag(tree, OF_DETACHED))
+ pr_debug("%s() tree is not detached\n", __func__);
-err_fail:
- of_node_put(ovinfo->target);
- of_node_put(ovinfo->overlay);
+ if (!of_node_is_root(tree))
+ pr_debug("%s() tree is not root\n", __func__);
- memset(ovinfo, 0, sizeof(*ovinfo));
- return -EINVAL;
-}
+ ovcs->overlay_tree = tree;
-/**
- * of_build_overlay_info() - Build an overlay info array
- * @ov Overlay to build
- * @tree: Device node containing all the overlays
- *
- * Helper function that given a tree containing overlay information,
- * allocates and builds an overlay info array containing it, ready
- * for use using of_overlay_apply.
- *
- * Returns 0 on success with the @cntp @ovinfop pointers valid,
- * while on error a negative error value is returned.
- */
-static int of_build_overlay_info(struct of_overlay *ov,
- struct device_node *tree)
-{
- struct device_node *node;
- struct of_overlay_info *ovinfo;
- int cnt, err;
+ INIT_LIST_HEAD(&ovcs->ovcs_list);
+
+ of_changeset_init(&ovcs->cset);
+
+ ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
+ if (ovcs->id <= 0)
+ return ovcs->id;
- /* worst case; every child is a node */
cnt = 0;
- for_each_child_of_node(tree, node)
- cnt++;
- if (of_get_child_by_name(tree, "__symbols__"))
+ /* fragment nodes */
+ for_each_child_of_node(tree, node) {
+ overlay_node = of_get_child_by_name(node, "__overlay__");
+ if (overlay_node) {
+ cnt++;
+ of_node_put(overlay_node);
+ }
+ }
+
+ node = of_get_child_by_name(tree, "__symbols__");
+ if (node) {
cnt++;
+ of_node_put(node);
+ }
- ovinfo = kcalloc(cnt, sizeof(*ovinfo), GFP_KERNEL);
- if (ovinfo == NULL)
- return -ENOMEM;
+ fragments = kcalloc(cnt, sizeof(*fragments), GFP_KERNEL);
+ if (!fragments) {
+ ret = -ENOMEM;
+ goto err_free_idr;
+ }
cnt = 0;
for_each_child_of_node(tree, node) {
- err = of_fill_overlay_info(ov, node, &ovinfo[cnt]);
- if (err == 0)
- cnt++;
+ fragment = &fragments[cnt];
+ fragment->overlay = of_get_child_by_name(node, "__overlay__");
+ if (fragment->overlay) {
+ fragment->target = find_target_node(node);
+ if (!fragment->target) {
+ of_node_put(fragment->overlay);
+ ret = -EINVAL;
+ goto err_free_fragments;
+ } else {
+ cnt++;
+ }
+ }
}
+ /*
+ * if there is a symbols fragment in ovcs->fragments[i] it is
+ * the final element in the array
+ */
node = of_get_child_by_name(tree, "__symbols__");
if (node) {
- ovinfo[cnt].overlay = node;
- ovinfo[cnt].target = of_find_node_by_path("/__symbols__");
- ovinfo[cnt].is_symbols_node = 1;
-
- if (!ovinfo[cnt].target) {
- pr_err("no symbols in root of device tree.\n");
- return -EINVAL;
+ ovcs->symbols_fragment = 1;
+ fragment = &fragments[cnt];
+ fragment->overlay = node;
+ fragment->target = of_find_node_by_path("/__symbols__");
+
+ if (!fragment->target) {
+ pr_err("symbols in overlay, but not in live tree\n");
+ ret = -EINVAL;
+ goto err_free_fragments;
}
cnt++;
}
- /* if nothing filled, return error */
- if (cnt == 0) {
- kfree(ovinfo);
- return -ENODEV;
+ if (!cnt) {
+ ret = -EINVAL;
+ goto err_free_fragments;
}
- ov->count = cnt;
- ov->ovinfo_tab = ovinfo;
+ ovcs->count = cnt;
+ ovcs->fragments = fragments;
return 0;
+
+err_free_fragments:
+ kfree(fragments);
+err_free_idr:
+ idr_remove(&ovcs_idr, ovcs->id);
+
+ pr_err("%s() failed, ret = %d\n", __func__, ret);
+
+ return ret;
}
-/**
- * of_free_overlay_info() - Free an overlay info array
- * @ov Overlay to free the overlay info from
- * @ovinfo_tab: Array of overlay_info's to free
- *
- * Releases the memory of a previously allocated ovinfo array
- * by of_build_overlay_info.
- * Returns 0, or an error if the arguments are bogus.
- */
-static int of_free_overlay_info(struct of_overlay *ov)
+static void free_overlay_changeset(struct overlay_changeset *ovcs)
{
- struct of_overlay_info *ovinfo;
int i;
- /* do it in reverse */
- for (i = ov->count - 1; i >= 0; i--) {
- ovinfo = &ov->ovinfo_tab[i];
+ if (!ovcs->cset.entries.next)
+ return;
+ of_changeset_destroy(&ovcs->cset);
+
+ if (ovcs->id)
+ idr_remove(&ovcs_idr, ovcs->id);
- of_node_put(ovinfo->target);
- of_node_put(ovinfo->overlay);
+ for (i = 0; i < ovcs->count; i++) {
+ of_node_put(ovcs->fragments[i].target);
+ of_node_put(ovcs->fragments[i].overlay);
}
- kfree(ov->ovinfo_tab);
+ kfree(ovcs->fragments);
- return 0;
+ kfree(ovcs);
}
-static LIST_HEAD(ov_list);
-static DEFINE_IDR(ov_idr);
-
/**
- * of_overlay_create() - Create and apply an overlay
- * @tree: Device node containing all the overlays
+ * of_overlay_apply() - Create and apply an overlay changeset
+ * @tree: Expanded overlay device tree
+ * @ovcs_id: Pointer to overlay changeset id
*
- * Creates and applies an overlay while also keeping track
- * of the overlay in a list. This list can be used to prevent
- * illegal overlay removals.
+ * Creates and applies an overlay changeset.
+ *
+ * If an error occurs in a pre-apply notifier, then no changes are made
+ * to the device tree.
+ *
+
+ * A non-zero return value will not have created the changeset if error is from:
+ * - parameter checks
+ * - building the changeset
+ * - overlay changset pre-apply notifier
*
- * Returns the id of the created overlay, or a negative error number
+ * If an error is returned by an overlay changeset pre-apply notifier
+ * then no further overlay changeset pre-apply notifier will be called.
+ *
+ * A non-zero return value will have created the changeset if error is from:
+ * - overlay changeset entry notifier
+ * - overlay changset post-apply notifier
+ *
+ * If an error is returned by an overlay changeset post-apply notifier
+ * then no further overlay changeset post-apply notifier will be called.
+ *
+ * If more than one notifier returns an error, then the last notifier
+ * error to occur is returned.
+ *
+ * If an error occurred while applying the overlay changeset, then an
+ * attempt is made to revert any changes that were made to the
+ * device tree. If there were any errors during the revert attempt
+ * then the state of the device tree can not be determined, and any
+ * following attempt to apply or remove an overlay changeset will be
+ * refused.
+ *
+ * Returns 0 on success, or a negative error number. Overlay changeset
+ * id is returned to *ovcs_id.
*/
-int of_overlay_create(struct device_node *tree)
+
+int of_overlay_apply(struct device_node *tree, int *ovcs_id)
{
- struct of_overlay *ov;
- int err, id;
+ struct overlay_changeset *ovcs;
+ int ret = 0, ret_revert, ret_tmp;
- /* allocate the overlay structure */
- ov = kzalloc(sizeof(*ov), GFP_KERNEL);
- if (ov == NULL)
- return -ENOMEM;
- ov->id = -1;
+ *ovcs_id = 0;
+
+ if (devicetree_corrupt()) {
+ pr_err("devicetree state suspect, refuse to apply overlay\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ovcs = kzalloc(sizeof(*ovcs), GFP_KERNEL);
+ if (!ovcs) {
+ ret = -ENOMEM;
+ goto out;
+ }
- INIT_LIST_HEAD(&ov->node);
+ of_overlay_mutex_lock();
- of_changeset_init(&ov->cset);
+ ret = of_resolve_phandles(tree);
+ if (ret)
+ goto err_overlay_unlock;
mutex_lock(&of_mutex);
- id = idr_alloc(&ov_idr, ov, 0, 0, GFP_KERNEL);
- if (id < 0) {
- err = id;
- goto err_destroy_trans;
- }
- ov->id = id;
+ ret = init_overlay_changeset(ovcs, tree);
+ if (ret)
+ goto err_free_overlay_changeset;
- /* build the overlay info structures */
- err = of_build_overlay_info(ov, tree);
- if (err) {
- pr_err("of_build_overlay_info() failed for tree@%pOF\n",
- tree);
- goto err_free_idr;
+ ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
+ if (ret) {
+ pr_err("overlay changeset pre-apply notify error %d\n", ret);
+ goto err_free_overlay_changeset;
}
- err = of_overlay_notify(ov, OF_OVERLAY_PRE_APPLY);
- if (err < 0) {
- pr_err("%s: Pre-apply notifier failed (err=%d)\n",
- __func__, err);
- goto err_free_idr;
+ ret = build_changeset(ovcs);
+ if (ret)
+ goto err_free_overlay_changeset;
+
+ ret_revert = 0;
+ ret = __of_changeset_apply_entries(&ovcs->cset, &ret_revert);
+ if (ret) {
+ if (ret_revert) {
+ pr_debug("overlay changeset revert error %d\n",
+ ret_revert);
+ devicetree_state_flags |= DTSF_APPLY_FAIL;
+ }
+ goto err_free_overlay_changeset;
+ } else {
+ ret = __of_changeset_apply_notify(&ovcs->cset);
+ if (ret)
+ pr_err("overlay changeset entry notify error %d\n",
+ ret);
+ /* fall through */
}
- /* apply the overlay */
- err = of_overlay_apply(ov);
- if (err)
- goto err_abort_trans;
-
- /* apply the changeset */
- err = __of_changeset_apply(&ov->cset);
- if (err)
- goto err_revert_overlay;
+ list_add_tail(&ovcs->ovcs_list, &ovcs_list);
+ *ovcs_id = ovcs->id;
+ ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_APPLY);
+ if (ret_tmp) {
+ pr_err("overlay changeset post-apply notify error %d\n",
+ ret_tmp);
+ if (!ret)
+ ret = ret_tmp;
+ }
- /* add to the tail of the overlay list */
- list_add_tail(&ov->node, &ov_list);
+ mutex_unlock(&of_mutex);
+ of_overlay_mutex_unlock();
- of_overlay_notify(ov, OF_OVERLAY_POST_APPLY);
+ goto out;
- mutex_unlock(&of_mutex);
+err_overlay_unlock:
+ of_overlay_mutex_unlock();
- return id;
+err_free_overlay_changeset:
+ free_overlay_changeset(ovcs);
-err_revert_overlay:
-err_abort_trans:
- of_free_overlay_info(ov);
-err_free_idr:
- idr_remove(&ov_idr, ov->id);
-err_destroy_trans:
- of_changeset_destroy(&ov->cset);
- kfree(ov);
mutex_unlock(&of_mutex);
- return err;
+out:
+ pr_debug("%s() err=%d\n", __func__, ret);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(of_overlay_create);
+EXPORT_SYMBOL_GPL(of_overlay_apply);
-/* check whether the given node, lies under the given tree */
-static int overlay_subtree_check(struct device_node *tree,
- struct device_node *dn)
+/*
+ * Find @np in @tree.
+ *
+ * Returns 1 if @np is @tree or is contained in @tree, else 0
+ */
+static int find_node(struct device_node *tree, struct device_node *np)
{
struct device_node *child;
- /* match? */
- if (tree == dn)
+ if (tree == np)
return 1;
for_each_child_of_node(tree, child) {
- if (overlay_subtree_check(child, dn)) {
+ if (find_node(child, np)) {
of_node_put(child);
return 1;
}
@@ -567,29 +797,39 @@ static int overlay_subtree_check(struct device_node *tree,
return 0;
}
-/* check whether this overlay is the topmost */
-static int overlay_is_topmost(struct of_overlay *ov, struct device_node *dn)
+/*
+ * Is @remove_ce_node a child of, a parent of, or the same as any
+ * node in an overlay changeset more topmost than @remove_ovcs?
+ *
+ * Returns 1 if found, else 0
+ */
+static int node_overlaps_later_cs(struct overlay_changeset *remove_ovcs,
+ struct device_node *remove_ce_node)
{
- struct of_overlay *ovt;
+ struct overlay_changeset *ovcs;
struct of_changeset_entry *ce;
- list_for_each_entry_reverse(ovt, &ov_list, node) {
- /* if we hit ourselves, we're done */
- if (ovt == ov)
+ list_for_each_entry_reverse(ovcs, &ovcs_list, ovcs_list) {
+ if (ovcs == remove_ovcs)
break;
- /* check against each subtree affected by this overlay */
- list_for_each_entry(ce, &ovt->cset.entries, node) {
- if (overlay_subtree_check(ce->np, dn)) {
- pr_err("%s: #%d clashes #%d @%pOF\n",
- __func__, ov->id, ovt->id, dn);
- return 0;
+ list_for_each_entry(ce, &ovcs->cset.entries, node) {
+ if (find_node(ce->np, remove_ce_node)) {
+ pr_err("%s: #%d overlaps with #%d @%pOF\n",
+ __func__, remove_ovcs->id, ovcs->id,
+ remove_ce_node);
+ return 1;
+ }
+ if (find_node(remove_ce_node, ce->np)) {
+ pr_err("%s: #%d overlaps with #%d @%pOF\n",
+ __func__, remove_ovcs->id, ovcs->id,
+ remove_ce_node);
+ return 1;
}
}
}
- /* overlay is topmost */
- return 1;
+ return 0;
}
/*
@@ -602,13 +842,13 @@ static int overlay_is_topmost(struct of_overlay *ov, struct device_node *dn)
* the one closest to the tail. If another overlay has affected this
* device node and is closest to the tail, then removal is not permited.
*/
-static int overlay_removal_is_ok(struct of_overlay *ov)
+static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
{
- struct of_changeset_entry *ce;
+ struct of_changeset_entry *remove_ce;
- list_for_each_entry(ce, &ov->cset.entries, node) {
- if (!overlay_is_topmost(ov, ce->np)) {
- pr_err("overlay #%d is not topmost\n", ov->id);
+ list_for_each_entry(remove_ce, &remove_ovcs->cset.entries, node) {
+ if (node_overlaps_later_cs(remove_ovcs, remove_ce->np)) {
+ pr_err("overlay #%d is not topmost\n", remove_ovcs->id);
return 0;
}
}
@@ -617,75 +857,130 @@ static int overlay_removal_is_ok(struct of_overlay *ov)
}
/**
- * of_overlay_destroy() - Removes an overlay
- * @id: Overlay id number returned by a previous call to of_overlay_create
+ * of_overlay_remove() - Revert and free an overlay changeset
+ * @ovcs_id: Pointer to overlay changeset id
*
- * Removes an overlay if it is permissible.
+ * Removes an overlay if it is permissible. @ovcs_id was previously returned
+ * by of_overlay_apply().
*
- * Returns 0 on success, or a negative error number
+ * If an error occurred while attempting to revert the overlay changeset,
+ * then an attempt is made to re-apply any changeset entry that was
+ * reverted. If an error occurs on re-apply then the state of the device
+ * tree can not be determined, and any following attempt to apply or remove
+ * an overlay changeset will be refused.
+ *
+ * A non-zero return value will not revert the changeset if error is from:
+ * - parameter checks
+ * - overlay changset pre-remove notifier
+ * - overlay changeset entry revert
+ *
+ * If an error is returned by an overlay changeset pre-remove notifier
+ * then no further overlay changeset pre-remove notifier will be called.
+ *
+ * If more than one notifier returns an error, then the last notifier
+ * error to occur is returned.
+ *
+ * A non-zero return value will revert the changeset if error is from:
+ * - overlay changeset entry notifier
+ * - overlay changset post-remove notifier
+ *
+ * If an error is returned by an overlay changeset post-remove notifier
+ * then no further overlay changeset post-remove notifier will be called.
+ *
+ * Returns 0 on success, or a negative error number. *ovcs_id is set to
+ * zero after reverting the changeset, even if a subsequent error occurs.
*/
-int of_overlay_destroy(int id)
+int of_overlay_remove(int *ovcs_id)
{
- struct of_overlay *ov;
- int err;
+ struct overlay_changeset *ovcs;
+ int ret, ret_apply, ret_tmp;
- mutex_lock(&of_mutex);
+ ret = 0;
- ov = idr_find(&ov_idr, id);
- if (ov == NULL) {
- err = -ENODEV;
- pr_err("destroy: Could not find overlay #%d\n", id);
+ if (devicetree_corrupt()) {
+ pr_err("suspect devicetree state, refuse to remove overlay\n");
+ ret = -EBUSY;
goto out;
}
- /* check whether the overlay is safe to remove */
- if (!overlay_removal_is_ok(ov)) {
- err = -EBUSY;
- goto out;
+ mutex_lock(&of_mutex);
+
+ ovcs = idr_find(&ovcs_idr, *ovcs_id);
+ if (!ovcs) {
+ ret = -ENODEV;
+ pr_err("remove: Could not find overlay #%d\n", *ovcs_id);
+ goto out_unlock;
}
- of_overlay_notify(ov, OF_OVERLAY_PRE_REMOVE);
- list_del(&ov->node);
- __of_changeset_revert(&ov->cset);
- of_overlay_notify(ov, OF_OVERLAY_POST_REMOVE);
- of_free_overlay_info(ov);
- idr_remove(&ov_idr, id);
- of_changeset_destroy(&ov->cset);
- kfree(ov);
+ if (!overlay_removal_is_ok(ovcs)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
- err = 0;
+ ret = overlay_notify(ovcs, OF_OVERLAY_PRE_REMOVE);
+ if (ret) {
+ pr_err("overlay changeset pre-remove notify error %d\n", ret);
+ goto out_unlock;
+ }
-out:
+ list_del(&ovcs->ovcs_list);
+
+ ret_apply = 0;
+ ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
+ if (ret) {
+ if (ret_apply)
+ devicetree_state_flags |= DTSF_REVERT_FAIL;
+ goto out_unlock;
+ } else {
+ ret = __of_changeset_revert_notify(&ovcs->cset);
+ if (ret) {
+ pr_err("overlay changeset entry notify error %d\n",
+ ret);
+ /* fall through - changeset was reverted */
+ }
+ }
+
+ *ovcs_id = 0;
+
+ ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE);
+ if (ret_tmp) {
+ pr_err("overlay changeset post-remove notify error %d\n",
+ ret_tmp);
+ if (!ret)
+ ret = ret_tmp;
+ }
+
+ free_overlay_changeset(ovcs);
+
+out_unlock:
mutex_unlock(&of_mutex);
- return err;
+out:
+ pr_debug("%s() err=%d\n", __func__, ret);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(of_overlay_destroy);
+EXPORT_SYMBOL_GPL(of_overlay_remove);
/**
- * of_overlay_destroy_all() - Removes all overlays from the system
+ * of_overlay_remove_all() - Reverts and frees all overlay changesets
*
* Removes all overlays from the system in the correct order.
*
* Returns 0 on success, or a negative error number
*/
-int of_overlay_destroy_all(void)
+int of_overlay_remove_all(void)
{
- struct of_overlay *ov, *ovn;
-
- mutex_lock(&of_mutex);
+ struct overlay_changeset *ovcs, *ovcs_n;
+ int ret;
/* the tail of list is guaranteed to be safe to remove */
- list_for_each_entry_safe_reverse(ov, ovn, &ov_list, node) {
- list_del(&ov->node);
- __of_changeset_revert(&ov->cset);
- of_free_overlay_info(ov);
- idr_remove(&ov_idr, ov->id);
- kfree(ov);
+ list_for_each_entry_safe_reverse(ovcs, ovcs_n, &ovcs_list, ovcs_list) {
+ ret = of_overlay_remove(&ovcs->id);
+ if (ret)
+ return ret;
}
- mutex_unlock(&of_mutex);
-
return 0;
}
-EXPORT_SYMBOL_GPL(of_overlay_destroy_all);
+EXPORT_SYMBOL_GPL(of_overlay_remove_all);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 264c355ba1ff..8ad33a44a7b8 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -817,9 +817,9 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
}
EXPORT_SYMBOL(of_graph_get_remote_node);
-static void of_fwnode_get(struct fwnode_handle *fwnode)
+static struct fwnode_handle *of_fwnode_get(struct fwnode_handle *fwnode)
{
- of_node_get(to_of_node(fwnode));
+ return of_fwnode_handle(of_node_get(to_of_node(fwnode)));
}
static void of_fwnode_put(struct fwnode_handle *fwnode)
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 99309cb7d372..cfaeef5f6cb1 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -84,10 +84,9 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
int offset, len;
int err = 0;
- value = kmalloc(prop_fixup->length, GFP_KERNEL);
+ value = kmemdup(prop_fixup->value, prop_fixup->length, GFP_KERNEL);
if (!value)
return -ENOMEM;
- memcpy(value, prop_fixup->value, prop_fixup->length);
/* prop_fixup contains a list of tuples of path:property_name:offset */
end = value + prop_fixup->length;
@@ -165,7 +164,6 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
struct property *prop_fix, *prop;
int err, i, count;
unsigned int off;
- phandle phandle;
if (!local_fixups)
return 0;
@@ -195,9 +193,7 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
if ((off + 4) > prop->length)
return -EINVAL;
- phandle = be32_to_cpu(*(__be32 *)(prop->value + off));
- phandle += phandle_delta;
- *(__be32 *)(prop->value + off) = cpu_to_be32(phandle);
+ be32_add_cpu(prop->value + off, phandle_delta);
}
}
@@ -275,11 +271,18 @@ int of_resolve_phandles(struct device_node *overlay)
err = -EINVAL;
goto out;
}
+
+#if 0
+ Temporarily disable check so that old style overlay unittests
+ do not fail when of_resolve_phandles() is moved into
+ of_overlay_apply().
+
if (!of_node_check_flag(overlay, OF_DETACHED)) {
pr_err("overlay not detached\n");
err = -EINVAL;
goto out;
}
+#endif
phandle_delta = live_tree_max_phandle() + 1;
adjust_overlay_phandles(overlay, phandle_delta);
diff --git a/drivers/of/unittest-data/.gitignore b/drivers/of/unittest-data/.gitignore
deleted file mode 100644
index 4b3cf8b16de2..000000000000
--- a/drivers/of/unittest-data/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-testcases.dtb
-testcases.dtb.S
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 168ef0bbabde..e568b1e82501 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -994,10 +994,17 @@ static int __init unittest_data_add(void)
pr_warn("%s: No tree to attach; not running tests\n", __func__);
return -ENODATA;
}
- of_node_set_flag(unittest_data_node, OF_DETACHED);
+
+ /*
+ * This lock normally encloses of_overlay_apply() as well as
+ * of_resolve_phandles().
+ */
+ of_overlay_mutex_lock();
+
rc = of_resolve_phandles(unittest_data_node);
if (rc) {
pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc);
+ of_overlay_mutex_unlock();
return -EINVAL;
}
@@ -1007,6 +1014,7 @@ static int __init unittest_data_add(void)
__of_attach_node_sysfs(np);
of_aliases = of_find_node_by_path("/aliases");
of_chosen = of_find_node_by_path("/chosen");
+ of_overlay_mutex_unlock();
return 0;
}
@@ -1019,6 +1027,9 @@ static int __init unittest_data_add(void)
attach_node_and_children(np);
np = next;
}
+
+ of_overlay_mutex_unlock();
+
return 0;
}
@@ -1219,7 +1230,7 @@ static void of_unittest_untrack_overlay(int id)
static void of_unittest_destroy_tracked_overlays(void)
{
- int id, ret, defers;
+ int id, ret, defers, ovcs_id;
if (overlay_first_id < 0)
return;
@@ -1232,7 +1243,8 @@ static void of_unittest_destroy_tracked_overlays(void)
if (!(overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id)))
continue;
- ret = of_overlay_destroy(id + overlay_first_id);
+ ovcs_id = id + overlay_first_id;
+ ret = of_overlay_remove(&ovcs_id);
if (ret == -ENODEV) {
pr_warn("%s: no overlay to destroy for #%d\n",
__func__, id + overlay_first_id);
@@ -1254,7 +1266,7 @@ static int of_unittest_apply_overlay(int overlay_nr, int unittest_nr,
int *overlay_id)
{
struct device_node *np = NULL;
- int ret, id = -1;
+ int ret;
np = of_find_node_by_path(overlay_path(overlay_nr));
if (np == NULL) {
@@ -1264,23 +1276,20 @@ static int of_unittest_apply_overlay(int overlay_nr, int unittest_nr,
goto out;
}
- ret = of_overlay_create(np);
+ *overlay_id = 0;
+ ret = of_overlay_apply(np, overlay_id);
if (ret < 0) {
unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr));
goto out;
}
- id = ret;
- of_unittest_track_overlay(id);
+ of_unittest_track_overlay(*overlay_id);
ret = 0;
out:
of_node_put(np);
- if (overlay_id)
- *overlay_id = id;
-
return ret;
}
@@ -1288,7 +1297,7 @@ out:
static int of_unittest_apply_overlay_check(int overlay_nr, int unittest_nr,
int before, int after, enum overlay_type ovtype)
{
- int ret;
+ int ret, ovcs_id;
/* unittest device must not be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -1299,7 +1308,8 @@ static int of_unittest_apply_overlay_check(int overlay_nr, int unittest_nr,
return -EINVAL;
}
- ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, NULL);
+ ovcs_id = 0;
+ ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, &ovcs_id);
if (ret != 0) {
/* of_unittest_apply_overlay already called unittest() */
return ret;
@@ -1322,7 +1332,7 @@ static int of_unittest_apply_revert_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
- int ret, ov_id;
+ int ret, ovcs_id;
/* unittest device must be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -1334,7 +1344,8 @@ static int of_unittest_apply_revert_overlay_check(int overlay_nr,
}
/* apply the overlay */
- ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, &ov_id);
+ ovcs_id = 0;
+ ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, &ovcs_id);
if (ret != 0) {
/* of_unittest_apply_overlay already called unittest() */
return ret;
@@ -1349,7 +1360,7 @@ static int of_unittest_apply_revert_overlay_check(int overlay_nr,
return -EINVAL;
}
- ret = of_overlay_destroy(ov_id);
+ ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "overlay @\"%s\" failed to be destroyed @\"%s\"\n",
overlay_path(overlay_nr),
@@ -1451,7 +1462,7 @@ static void of_unittest_overlay_5(void)
static void of_unittest_overlay_6(void)
{
struct device_node *np;
- int ret, i, ov_id[2];
+ int ret, i, ov_id[2], ovcs_id;
int overlay_nr = 6, unittest_nr = 6;
int before = 0, after = 1;
@@ -1478,13 +1489,14 @@ static void of_unittest_overlay_6(void)
return;
}
- ret = of_overlay_create(np);
+ ovcs_id = 0;
+ ret = of_overlay_apply(np, &ovcs_id);
if (ret < 0) {
unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
- ov_id[i] = ret;
+ ov_id[i] = ovcs_id;
of_unittest_track_overlay(ov_id[i]);
}
@@ -1502,7 +1514,8 @@ static void of_unittest_overlay_6(void)
}
for (i = 1; i >= 0; i--) {
- ret = of_overlay_destroy(ov_id[i]);
+ ovcs_id = ov_id[i];
+ ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "overlay @\"%s\" failed destroy @\"%s\"\n",
overlay_path(overlay_nr + i),
@@ -1533,7 +1546,7 @@ static void of_unittest_overlay_6(void)
static void of_unittest_overlay_8(void)
{
struct device_node *np;
- int ret, i, ov_id[2];
+ int ret, i, ov_id[2], ovcs_id;
int overlay_nr = 8, unittest_nr = 8;
/* we don't care about device state in this test */
@@ -1548,18 +1561,20 @@ static void of_unittest_overlay_8(void)
return;
}
- ret = of_overlay_create(np);
+ ovcs_id = 0;
+ ret = of_overlay_apply(np, &ovcs_id);
if (ret < 0) {
unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
- ov_id[i] = ret;
+ ov_id[i] = ovcs_id;
of_unittest_track_overlay(ov_id[i]);
}
/* now try to remove first overlay (it should fail) */
- ret = of_overlay_destroy(ov_id[0]);
+ ovcs_id = ov_id[0];
+ ret = of_overlay_remove(&ovcs_id);
if (ret == 0) {
unittest(0, "overlay @\"%s\" was destroyed @\"%s\"\n",
overlay_path(overlay_nr + 0),
@@ -1570,7 +1585,8 @@ static void of_unittest_overlay_8(void)
/* removing them in order should work */
for (i = 1; i >= 0; i--) {
- ret = of_overlay_destroy(ov_id[i]);
+ ovcs_id = ov_id[i];
+ ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "overlay @\"%s\" not destroyed @\"%s\"\n",
overlay_path(overlay_nr + i),
@@ -2144,21 +2160,13 @@ static int __init overlay_data_add(int onum)
ret = 0;
goto out_free_data;
}
- of_node_set_flag(info->np_overlay, OF_DETACHED);
- ret = of_resolve_phandles(info->np_overlay);
- if (ret) {
- pr_err("resolve ot phandles (ret=%d), %d\n", ret, onum);
- goto out_free_np_overlay;
- }
-
- ret = of_overlay_create(info->np_overlay);
+ info->overlay_id = 0;
+ ret = of_overlay_apply(info->np_overlay, &info->overlay_id);
if (ret < 0) {
- pr_err("of_overlay_create() (ret=%d), %d\n", ret, onum);
+ pr_err("of_overlay_apply() (ret=%d), %d\n", ret, onum);
+ of_overlay_mutex_unlock();
goto out_free_np_overlay;
- } else {
- info->overlay_id = ret;
- ret = 0;
}
pr_debug("__dtb_overlay_begin applied, overlay id %d\n", ret);
@@ -2207,7 +2215,10 @@ static __init void of_unittest_overlay_high_level(void)
* Could not fixup phandles in unittest_unflatten_overlay_base()
* because kmalloc() was not yet available.
*/
+ of_overlay_mutex_lock();
of_resolve_phandles(overlay_base_root);
+ of_overlay_mutex_unlock();
+
/*
* do not allow overlay_base to duplicate any node already in
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig
new file mode 100644
index 000000000000..a7fbb93f302c
--- /dev/null
+++ b/drivers/opp/Kconfig
@@ -0,0 +1,13 @@
+config PM_OPP
+ bool
+ select SRCU
+ ---help---
+ SOCs have a standard set of tuples consisting of frequency and
+ voltage pairs that the device will support per voltage domain. This
+ is called Operating Performance Point or OPP. The actual definitions
+ of OPP varies over silicon within the same family of devices.
+
+ OPP layer organizes the data internally using device pointers
+ representing individual voltage domains and provides SOC
+ implementations a ready to use framework to manage OPPs.
+ For more information, read <file:Documentation/power/opp.txt>
diff --git a/drivers/base/power/opp/Makefile b/drivers/opp/Makefile
index e70ceb406fe9..e70ceb406fe9 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/opp/Makefile
diff --git a/drivers/base/power/opp/core.c b/drivers/opp/core.c
index a6de32530693..92fa94a6dcc1 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/opp/core.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/export.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include "opp.h"
@@ -296,7 +297,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
count = PTR_ERR(opp_table);
- dev_err(dev, "%s: OPP table not found (%d)\n",
+ dev_dbg(dev, "%s: OPP table not found (%d)\n",
__func__, count);
return count;
}
@@ -535,6 +536,44 @@ _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
return ret;
}
+static inline int
+_generic_set_opp_domain(struct device *dev, struct clk *clk,
+ unsigned long old_freq, unsigned long freq,
+ unsigned int old_pstate, unsigned int new_pstate)
+{
+ int ret;
+
+ /* Scaling up? Scale domain performance state before frequency */
+ if (freq > old_freq) {
+ ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
+ if (ret)
+ return ret;
+ }
+
+ ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ if (ret)
+ goto restore_domain_state;
+
+ /* Scaling down? Scale domain performance state after frequency */
+ if (freq < old_freq) {
+ ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_domain_state:
+ if (freq > old_freq)
+ dev_pm_genpd_set_performance_state(dev, old_pstate);
+
+ return ret;
+}
+
static int _generic_set_opp_regulator(const struct opp_table *opp_table,
struct device *dev,
unsigned long old_freq,
@@ -653,7 +692,16 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
/* Only frequency scaling */
if (!opp_table->regulators) {
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ /*
+ * We don't support devices with both regulator and
+ * domain performance-state for now.
+ */
+ if (opp_table->genpd_performance_state)
+ ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
+ IS_ERR(old_opp) ? 0 : old_opp->pstate,
+ opp->pstate);
+ else
+ ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
} else if (!opp_table->set_opp) {
ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
IS_ERR(old_opp) ? NULL : old_opp->supplies,
@@ -988,6 +1036,9 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
return ret;
}
+ if (opp_table->get_pstate)
+ new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate);
+
list_add(&new_opp->node, head);
mutex_unlock(&opp_table->lock);
@@ -1476,13 +1527,13 @@ err:
EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
/**
- * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
+ * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
* set_opp helper
* @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
*
* Release resources blocked for platform specific set_opp helper.
*/
-void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table)
+void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
{
if (!opp_table->set_opp) {
pr_err("%s: Doesn't have custom set_opp helper set\n",
@@ -1497,7 +1548,82 @@ void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table)
dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
+EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
+
+/**
+ * dev_pm_opp_register_get_pstate_helper() - Register get_pstate() helper.
+ * @dev: Device for which the helper is getting registered.
+ * @get_pstate: Helper.
+ *
+ * TODO: Remove this callback after the same information is available via Device
+ * Tree.
+ *
+ * This allows a platform to initialize the performance states of individual
+ * OPPs for its devices, until we get similar information directly from DT.
+ *
+ * This must be called before the OPPs are initialized for the device.
+ */
+struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
+ int (*get_pstate)(struct device *dev, unsigned long rate))
+{
+ struct opp_table *opp_table;
+ int ret;
+
+ if (!get_pstate)
+ return ERR_PTR(-EINVAL);
+
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Already have genpd_performance_state set */
+ if (WARN_ON(opp_table->genpd_performance_state)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->genpd_performance_state = true;
+ opp_table->get_pstate = get_pstate;
+
+ return opp_table;
+
+err:
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_get_pstate_helper);
+
+/**
+ * dev_pm_opp_unregister_get_pstate_helper() - Releases resources blocked for
+ * get_pstate() helper
+ * @opp_table: OPP table returned from dev_pm_opp_register_get_pstate_helper().
+ *
+ * Release resources blocked for platform specific get_pstate() helper.
+ */
+void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table)
+{
+ if (!opp_table->genpd_performance_state) {
+ pr_err("%s: Doesn't have performance states set\n",
+ __func__);
+ return;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ opp_table->genpd_performance_state = false;
+ opp_table->get_pstate = NULL;
+
+ dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_get_pstate_helper);
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
@@ -1706,6 +1832,13 @@ void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
if (remove_all || !opp->dynamic)
dev_pm_opp_put(opp);
}
+
+ /*
+ * The OPP table is getting removed, drop the performance state
+ * constraints.
+ */
+ if (opp_table->genpd_performance_state)
+ dev_pm_genpd_set_performance_state(dev, 0);
} else {
_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
}
diff --git a/drivers/base/power/opp/cpu.c b/drivers/opp/cpu.c
index 2d87bc1adf38..2d87bc1adf38 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/opp/cpu.c
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/opp/debugfs.c
index 81cf120fcf43..b03c03576a62 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -41,16 +41,15 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
{
struct dentry *d;
int i;
- char *name;
for (i = 0; i < opp_table->regulator_count; i++) {
- name = kasprintf(GFP_KERNEL, "supply-%d", i);
+ char name[15];
+
+ snprintf(name, sizeof(name), "supply-%d", i);
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- kfree(name);
-
if (!d)
return false;
@@ -100,6 +99,9 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
return -ENOMEM;
+ if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate))
+ return -ENOMEM;
+
if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
return -ENOMEM;
diff --git a/drivers/base/power/opp/of.c b/drivers/opp/of.c
index 0b718886479b..cb716aa2f44b 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/opp/of.c
@@ -16,7 +16,7 @@
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/device.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
_dev_pm_opp_remove_table(opp_table, dev, false);
+ of_node_put(np);
goto put_opp_table;
}
}
@@ -603,7 +604,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
if (cpu == cpu_dev->id)
continue;
- cpu_np = of_get_cpu_node(cpu, NULL);
+ cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np) {
dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu);
@@ -613,6 +614,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
tmp_np = _opp_of_get_opp_desc_node(cpu_np);
+ of_node_put(cpu_np);
if (!tmp_np) {
pr_err("%pOF: Couldn't find opp node\n", cpu_np);
ret = -ENOENT;
diff --git a/drivers/base/power/opp/opp.h b/drivers/opp/opp.h
index 166eef990599..4d00061648a3 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -58,6 +58,7 @@ extern struct list_head opp_tables;
* @dynamic: not-created from static DT entries.
* @turbo: true if turbo (boost) OPP
* @suspend: true if suspend OPP
+ * @pstate: Device's power domain's performance state.
* @rate: Frequency in hertz
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
@@ -76,6 +77,7 @@ struct dev_pm_opp {
bool dynamic;
bool turbo;
bool suspend;
+ unsigned int pstate;
unsigned long rate;
struct dev_pm_opp_supply *supplies;
@@ -135,8 +137,10 @@ enum opp_table_access {
* @clk: Device's clock handle
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators
+ * @genpd_performance_state: Device's power domain support performance state.
* @set_opp: Platform specific set_opp callback
* @set_opp_data: Data to be passed to set_opp callback
+ * @get_pstate: Platform specific get_pstate callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -170,9 +174,11 @@ struct opp_table {
struct clk *clk;
struct regulator **regulators;
unsigned int regulator_count;
+ bool genpd_performance_state;
int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_set_opp_data *set_opp_data;
+ int (*get_pstate)(struct device *dev, unsigned long rate);
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index 0186db7680d4..62873070f988 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -1769,7 +1769,7 @@ stop:
/*--- Default parport operations ---------------------------------------*/
-static __initdata struct parport_operations parport_ip32_ops = {
+static const struct parport_operations parport_ip32_ops __initconst = {
.write_data = parport_ip32_write_data,
.read_data = parport_ip32_read_data,
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c32a77fc8b03..90944667ccea 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -29,6 +29,15 @@ config PCI_MSI_IRQ_DOMAIN
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
+config PCI_QUIRKS
+ default y
+ bool "Enable PCI quirk workarounds" if EXPERT
+ depends on PCI
+ help
+ This enables workarounds for various PCI chipset bugs/quirks.
+ Disable this only if your target machine is unaffected by PCI
+ quirks.
+
config PCI_DEBUG
bool "PCI Debugging"
depends on PCI && DEBUG_KERNEL
@@ -42,13 +51,13 @@ config PCI_DEBUG
config PCI_REALLOC_ENABLE_AUTO
bool "Enable PCI resource re-allocation detection"
depends on PCI
+ depends on PCI_IOV
help
Say Y here if you want the PCI core to detect if PCI resource
re-allocation needs to be enabled. You can always use pci=realloc=on
- or pci=realloc=off to override it. Note this feature is a no-op
- unless PCI_IOV support is also enabled; in that case it will
- automatically re-allocate PCI resources if SR-IOV BARs have not
- been allocated by the BIOS.
+ or pci=realloc=off to override it. It will automatically
+ re-allocate PCI resources if SR-IOV BARs have not been allocated by
+ the BIOS.
When in doubt, say N.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 80adbdbcecce..3d5e047f0a32 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -17,9 +17,6 @@ obj-$(CONFIG_PCIEPORTBUS) += pcie/
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
-ifdef CONFIG_HOTPLUG_PCI
-obj-y += hotplug-pci.o
-endif
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 22ec82fcdea2..113e09440f85 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -169,4 +169,14 @@ config PCIE_KIRIN
Say Y here if you want PCIe controller support
on HiSilicon Kirin series SoCs.
+config PCIE_HISI_STB
+ bool "HiSilicon STB SoCs PCIe controllers"
+ depends on ARCH_HISI
+ depends on PCI
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIEPORTBUS
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index e73661182da0..41ba499c96ee 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 34427a6a15af..e77a4ceed74c 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -11,6 +11,7 @@
*/
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -594,6 +595,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
int i;
int phy_count;
struct phy **phy;
+ struct device_link **link;
void __iomem *base;
struct resource *res;
struct dw_pcie *pci;
@@ -649,11 +651,21 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!phy)
return -ENOMEM;
+ link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
for (i = 0; i < phy_count; i++) {
snprintf(name, sizeof(name), "pcie-phy%d", i);
phy[i] = devm_phy_get(dev, name);
if (IS_ERR(phy[i]))
return PTR_ERR(phy[i]);
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
}
dra7xx->base = base;
@@ -732,6 +744,10 @@ err_get_sync:
pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx);
+err_link:
+ while (--i >= 0)
+ device_link_del(link[i]);
+
return ret;
}
@@ -794,6 +810,22 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
}
#endif
+void dra7xx_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int ret;
+
+ dra7xx_pcie_stop_link(dra7xx->pci);
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+ dra7xx_pcie_disable_phy(dra7xx);
+}
+
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
@@ -807,5 +839,6 @@ static struct platform_driver dra7xx_pcie_driver = {
.suppress_bind_attrs = true,
.pm = &dra7xx_pcie_pm_ops,
},
+ .shutdown = dra7xx_pcie_shutdown,
};
builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index 87fa486bee2c..8f34c2fdc600 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -33,6 +33,8 @@
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
#define PCIE_IATU_NUM 6
@@ -124,6 +126,14 @@ static int ls_pcie_link_up(struct dw_pcie *pci)
return 1;
}
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
+
static int ls_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -135,6 +145,7 @@ static int ls_pcie_host_init(struct pcie_port *pp)
* dw_pcie_setup_rc() will reconfigure the outbound windows.
*/
ls_pcie_disable_outbound_atus(pcie);
+ ls_pcie_fix_error_response(pcie);
dw_pcie_dbi_ro_wr_en(pci);
ls_pcie_clear_multifunction(pcie);
@@ -253,6 +264,7 @@ static struct ls_pcie_drvdata ls2088_drvdata = {
};
static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
diff --git a/drivers/pci/dwc/pcie-histb.c b/drivers/pci/dwc/pcie-histb.c
new file mode 100644
index 000000000000..33b01b734d7d
--- /dev/null
+++ b/drivers/pci/dwc/pcie-histb.c
@@ -0,0 +1,470 @@
+/*
+ * PCIe host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Ruqiang Ju <juruqiang@hisilicon.com>
+ * Jianguo Sun <sunjianguo1@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_histb_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_SYS_CTRL0 0x0000
+#define PCIE_SYS_CTRL1 0x0004
+#define PCIE_SYS_CTRL7 0x001C
+#define PCIE_SYS_CTRL13 0x0034
+#define PCIE_SYS_CTRL15 0x003C
+#define PCIE_SYS_CTRL16 0x0040
+#define PCIE_SYS_CTRL17 0x0044
+
+#define PCIE_SYS_STAT0 0x0100
+#define PCIE_SYS_STAT4 0x0110
+
+#define PCIE_RDLH_LINK_UP BIT(5)
+#define PCIE_XMLH_LINK_UP BIT(15)
+#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
+#define PCIE_APP_LTSSM_ENABLE BIT(11)
+
+#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28)
+#define PCIE_WM_EP 0
+#define PCIE_WM_LEGACY BIT(1)
+#define PCIE_WM_RC BIT(30)
+
+#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define PCIE_LTSSM_STATE_ACTIVE 0x11
+
+struct histb_pcie {
+ struct dw_pcie *pci;
+ struct clk *aux_clk;
+ struct clk *pipe_clk;
+ struct clk *sys_clk;
+ struct clk *bus_clk;
+ struct phy *phy;
+ struct reset_control *soft_reset;
+ struct reset_control *sys_reset;
+ struct reset_control *bus_reset;
+ void __iomem *ctrl;
+ int reset_gpio;
+};
+
+static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
+{
+ return readl(histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
+{
+ writel(val, histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
+}
+
+static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val);
+}
+
+static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ u32 val;
+
+ histb_pcie_dbi_r_mode(&pci->pp, true);
+ dw_pcie_read(base + reg, size, &val);
+ histb_pcie_dbi_r_mode(&pci->pp, false);
+
+ return val;
+}
+
+static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ histb_pcie_dbi_w_mode(&pci->pp, true);
+ dw_pcie_write(base + reg, size, val);
+ histb_pcie_dbi_w_mode(&pci->pp, false);
+}
+
+static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
+ int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ histb_pcie_dbi_r_mode(pp, true);
+ ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ histb_pcie_dbi_r_mode(pp, false);
+
+ return ret;
+}
+
+static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
+ int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ histb_pcie_dbi_w_mode(pp, true);
+ ret = dw_pcie_write(pci->dbi_base + where, size, val);
+ histb_pcie_dbi_w_mode(pp, false);
+
+ return ret;
+}
+
+static int histb_pcie_link_up(struct dw_pcie *pci)
+{
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+ u32 status;
+
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
+ status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
+ status &= PCIE_LTSSM_STATE_MASK;
+ if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE))
+ return 1;
+
+ return 0;
+}
+
+static int histb_pcie_establish_link(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ if (dw_pcie_link_up(pci)) {
+ dev_info(pci->dev, "Link already up\n");
+ return 0;
+ }
+
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert LTSSM enable */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
+ regval |= PCIE_APP_LTSSM_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static int histb_pcie_host_init(struct pcie_port *pp)
+{
+ histb_pcie_establish_link(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ return 0;
+}
+
+static struct dw_pcie_host_ops histb_pcie_host_ops = {
+ .rd_own_conf = histb_pcie_rd_own_conf,
+ .wr_own_conf = histb_pcie_wr_own_conf,
+ .host_init = histb_pcie_host_init,
+};
+
+static irqreturn_t histb_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static void histb_pcie_host_disable(struct histb_pcie *hipcie)
+{
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_assert(hipcie->bus_reset);
+
+ clk_disable_unprepare(hipcie->aux_clk);
+ clk_disable_unprepare(hipcie->pipe_clk);
+ clk_disable_unprepare(hipcie->sys_clk);
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ if (gpio_is_valid(hipcie->reset_gpio))
+ gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+}
+
+static int histb_pcie_host_enable(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* power on PCIe device if have */
+ if (gpio_is_valid(hipcie->reset_gpio))
+ gpio_set_value_cansleep(hipcie->reset_gpio, 1);
+
+ ret = clk_prepare_enable(hipcie->bus_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable bus clk\n");
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->sys_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable sys clk\n");
+ goto err_sys_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clk\n");
+ goto err_pipe_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clk\n");
+ goto err_aux_clk;
+ }
+
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_deassert(hipcie->soft_reset);
+
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_deassert(hipcie->sys_reset);
+
+ reset_control_assert(hipcie->bus_reset);
+ reset_control_deassert(hipcie->bus_reset);
+
+ return 0;
+
+err_aux_clk:
+ clk_disable_unprepare(hipcie->aux_clk);
+err_pipe_clk:
+ clk_disable_unprepare(hipcie->pipe_clk);
+err_sys_clk:
+ clk_disable_unprepare(hipcie->sys_clk);
+err_bus_clk:
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .read_dbi = histb_pcie_read_dbi,
+ .write_dbi = histb_pcie_write_dbi,
+ .link_up = histb_pcie_link_up,
+};
+
+static int histb_pcie_probe(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie;
+ struct dw_pcie *pci;
+ struct pcie_port *pp;
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ enum of_gpio_flags of_flags;
+ unsigned long flag = GPIOF_DIR_OUT;
+ int ret;
+
+ hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
+ if (!hipcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ hipcie->pci = pci;
+ pp = &pci->pp;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ hipcie->ctrl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hipcie->ctrl)) {
+ dev_err(dev, "cannot get control reg base\n");
+ return PTR_ERR(hipcie->ctrl);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi");
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base)) {
+ dev_err(dev, "cannot get rc-dbi base\n");
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ hipcie->reset_gpio = of_get_named_gpio_flags(np,
+ "reset-gpios", 0, &of_flags);
+ if (of_flags & OF_GPIO_ACTIVE_LOW)
+ flag |= GPIOF_ACTIVE_LOW;
+ if (gpio_is_valid(hipcie->reset_gpio)) {
+ ret = devm_gpio_request_one(dev, hipcie->reset_gpio,
+ flag, "PCIe device power control");
+ if (ret) {
+ dev_err(dev, "unable to request gpio\n");
+ return ret;
+ }
+ }
+
+ hipcie->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(hipcie->aux_clk)) {
+ dev_err(dev, "Failed to get PCIe aux clk\n");
+ return PTR_ERR(hipcie->aux_clk);
+ }
+
+ hipcie->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(hipcie->pipe_clk)) {
+ dev_err(dev, "Failed to get PCIe pipe clk\n");
+ return PTR_ERR(hipcie->pipe_clk);
+ }
+
+ hipcie->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_clk)) {
+ dev_err(dev, "Failed to get PCIEe sys clk\n");
+ return PTR_ERR(hipcie->sys_clk);
+ }
+
+ hipcie->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_clk)) {
+ dev_err(dev, "Failed to get PCIe bus clk\n");
+ return PTR_ERR(hipcie->bus_clk);
+ }
+
+ hipcie->soft_reset = devm_reset_control_get(dev, "soft");
+ if (IS_ERR(hipcie->soft_reset)) {
+ dev_err(dev, "couldn't get soft reset\n");
+ return PTR_ERR(hipcie->soft_reset);
+ }
+
+ hipcie->sys_reset = devm_reset_control_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_reset)) {
+ dev_err(dev, "couldn't get sys reset\n");
+ return PTR_ERR(hipcie->sys_reset);
+ }
+
+ hipcie->bus_reset = devm_reset_control_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_reset)) {
+ dev_err(dev, "couldn't get bus reset\n");
+ return PTR_ERR(hipcie->bus_reset);
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0) {
+ dev_err(dev, "Failed to get MSI IRQ\n");
+ return pp->msi_irq;
+ }
+
+ ret = devm_request_irq(dev, pp->msi_irq,
+ histb_pcie_msi_irq_handler,
+ IRQF_SHARED, "histb-pcie-msi", pp);
+ if (ret) {
+ dev_err(dev, "cannot request MSI IRQ\n");
+ return ret;
+ }
+ }
+
+ hipcie->phy = devm_phy_get(dev, "phy");
+ if (IS_ERR(hipcie->phy)) {
+ dev_info(dev, "no pcie-phy found\n");
+ hipcie->phy = NULL;
+ /* fall through here!
+ * if no pcie-phy found, phy init
+ * should be done under boot!
+ */
+ } else {
+ phy_init(hipcie->phy);
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &histb_pcie_host_ops;
+
+ platform_set_drvdata(pdev, hipcie);
+
+ ret = histb_pcie_host_enable(pp);
+ if (ret) {
+ dev_err(dev, "failed to enable host\n");
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int histb_pcie_remove(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie = platform_get_drvdata(pdev);
+
+ histb_pcie_host_disable(hipcie);
+
+ if (hipcie->phy)
+ phy_exit(hipcie->phy);
+
+ return 0;
+}
+
+static const struct of_device_id histb_pcie_of_match[] = {
+ { .compatible = "hisilicon,hi3798cv200-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
+
+static struct platform_driver histb_pcie_platform_driver = {
+ .probe = histb_pcie_probe,
+ .remove = histb_pcie_remove,
+ .driver = {
+ .name = "histb-pcie",
+ .of_match_table = histb_pcie_of_match,
+ },
+};
+module_platform_driver(histb_pcie_platform_driver);
+
+MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 424fdd6ed1ca..4f74386c1ced 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -150,7 +150,7 @@ static struct configfs_item_operations pci_epc_item_ops = {
.drop_link = pci_epc_epf_unlink,
};
-static struct config_item_type pci_epc_type = {
+static const struct config_item_type pci_epc_type = {
.ct_item_ops = &pci_epc_item_ops,
.ct_attrs = pci_epc_attrs,
.ct_owner = THIS_MODULE,
@@ -361,7 +361,7 @@ static struct configfs_item_operations pci_epf_ops = {
.release = pci_epf_release,
};
-static struct config_item_type pci_epf_type = {
+static const struct config_item_type pci_epf_type = {
.ct_item_ops = &pci_epf_ops,
.ct_attrs = pci_epf_attrs,
.ct_owner = THIS_MODULE,
@@ -400,7 +400,7 @@ static struct configfs_group_operations pci_epf_group_ops = {
.drop_item = &pci_epf_drop,
};
-static struct config_item_type pci_epf_group_type = {
+static const struct config_item_type pci_epf_group_type = {
.ct_group_ops = &pci_epf_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -428,15 +428,15 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group)
}
EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
-static struct config_item_type pci_functions_type = {
+static const struct config_item_type pci_functions_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_item_type pci_controllers_type = {
+static const struct config_item_type pci_controllers_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_item_type pci_ep_type = {
+static const struct config_item_type pci_ep_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index b868803792d8..38d12980db0f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -95,6 +95,12 @@ config PCI_XGENE_MSI
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+config PCI_V3_SEMI
+ bool "V3 Semiconductor PCI controller"
+ depends on OF
+ depends on ARM
+ default ARCH_INTEGRATOR_AP
+
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
depends on ARCH_VERSATILE
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 95f5b80ca52a..34ec1d88f961 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
+obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 4ea7d2ebcc5c..b9617d1c1d48 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -371,24 +371,6 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return 0;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
struct device_node *np)
{
@@ -403,7 +385,7 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
int i = 0;
u32 val;
- if (pci_dma_range_parser_init(&parser, np)) {
+ if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
@@ -482,7 +464,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
}
p->bus_clk = devm_clk_get(dev, "PCICLK");
if (IS_ERR(p->bus_clk))
- return PTR_ERR(clk);
+ return PTR_ERR(p->bus_clk);
ret = clk_prepare_enable(p->bus_clk);
if (ret) {
dev_err(dev, "could not prepare PCICLK\n");
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 7d709a7e0aa8..2f05511ce718 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -35,6 +35,40 @@ static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
}
};
+static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ /*
+ * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
+ * type 0 config TLPs sent to devices 1 and up on its downstream port,
+ * resulting in devices appearing multiple times on bus 0 unless we
+ * filter out those accesses here.
+ */
+ if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0)
+ return false;
+
+ return true;
+}
+
+static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ if (!pci_dw_valid_device(bus, devfn))
+ return NULL;
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static struct pci_ecam_ops pci_dw_ecam_bus_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_dw_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
static const struct of_device_id gen_pci_of_match[] = {
{ .compatible = "pci-host-cam-generic",
.data = &gen_pci_cfg_cam_bus_ops },
@@ -42,6 +76,15 @@ static const struct of_device_id gen_pci_of_match[] = {
{ .compatible = "pci-host-ecam-generic",
.data = &pci_generic_ecam_ops },
+ { .compatible = "marvell,armada8k-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "socionext,synquacer-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "snps,dw-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
{ },
};
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 0fe3ea164ee5..04dac6a42c9f 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -879,7 +879,7 @@ static void hv_irq_unmask(struct irq_data *data)
int cpu;
u64 res;
- dest = irq_data_get_affinity_mask(data);
+ dest = irq_data_get_effective_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@@ -1042,6 +1042,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct hv_pci_dev *hpdev;
struct pci_bus *pbus;
struct pci_dev *pdev;
+ struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct {
@@ -1056,6 +1057,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
int ret;
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
@@ -1081,14 +1083,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
switch (pci_protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
- irq_data_get_affinity_mask(data),
+ dest,
hpdev->desc.win_slot.slot,
cfg->vector);
break;
case PCI_PROTOCOL_VERSION_1_2:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
- irq_data_get_affinity_mask(data),
+ dest,
hpdev->desc.win_slot.slot,
cfg->vector);
break;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 6f879685fedd..e46de69f0380 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -293,24 +293,6 @@ static struct pci_ops rcar_pci_ops = {
.write = pci_generic_config_write,
};
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
@@ -320,7 +302,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
int index = 0;
/* Failure to parse is ok as we fall back to defaults */
- if (pci_dma_range_parser_init(&parser, np))
+ if (of_pci_dma_range_parser_init(&parser, np))
return 0;
/* Get the dma-ranges from DT */
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 1987fec1f126..f9d3960dc39f 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -159,10 +159,13 @@
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
#define AFI_FUSE 0x104
#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
@@ -253,6 +256,7 @@ struct tegra_pcie_soc {
bool has_cml_clk;
bool has_gen2;
bool force_pca_enable;
+ bool program_uphy;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -492,12 +496,32 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
return addr;
}
+static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_read32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_write32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
static struct pci_ops tegra_pcie_ops = {
.add_bus = tegra_pcie_add_bus,
.remove_bus = tegra_pcie_remove_bus,
.map_bus = tegra_pcie_map_bus,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
+ .read = tegra_pcie_config_read,
+ .write = tegra_pcie_config_write,
};
static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
@@ -1013,10 +1037,12 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
afi_writel(pcie, value, AFI_FUSE);
}
- err = tegra_pcie_phy_power_on(pcie);
- if (err < 0) {
- dev_err(dev, "failed to power on PHY(s): %d\n", err);
- return err;
+ if (soc->program_uphy) {
+ err = tegra_pcie_phy_power_on(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
+ return err;
+ }
}
/* take the PCIe interface module out of reset */
@@ -1049,19 +1075,23 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
/* TODO: disable and unprepare clocks? */
- err = tegra_pcie_phy_power_off(pcie);
- if (err < 0)
- dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ if (soc->program_uphy) {
+ err = tegra_pcie_phy_power_off(pcie);
+ if (err < 0)
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ }
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
reset_control_assert(pcie->pex_rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
@@ -1078,19 +1108,29 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
reset_control_assert(pcie->afi_rst);
reset_control_assert(pcie->pex_rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_err(dev, "failed to enable regulators: %d\n", err);
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- pcie->pex_clk,
- pcie->pex_rst);
- if (err) {
- dev_err(dev, "powerup sequence failed: %d\n", err);
- return err;
+ if (dev->pm_domain) {
+ err = clk_prepare_enable(pcie->pex_clk);
+ if (err) {
+ dev_err(dev, "failed to enable PEX clock: %d\n", err);
+ return err;
+ }
+ reset_control_deassert(pcie->pex_rst);
+ } else {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
+ pcie->pex_clk,
+ pcie->pex_rst);
+ if (err) {
+ dev_err(dev, "powerup sequence failed: %d\n", err);
+ return err;
+ }
}
reset_control_deassert(pcie->afi_rst);
@@ -1263,6 +1303,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
err = tegra_pcie_clocks_get(pcie);
@@ -1277,10 +1318,12 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
return err;
}
- err = tegra_pcie_phys_get(pcie);
- if (err < 0) {
- dev_err(dev, "failed to get PHYs: %d\n", err);
- return err;
+ if (soc->program_uphy) {
+ err = tegra_pcie_phys_get(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to get PHYs: %d\n", err);
+ return err;
+ }
}
err = tegra_pcie_power_on(pcie);
@@ -1342,6 +1385,7 @@ poweroff:
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
if (pcie->irq > 0)
@@ -1349,9 +1393,11 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
tegra_pcie_power_off(pcie);
- err = phy_exit(pcie->phy);
- if (err < 0)
- dev_err(dev, "failed to teardown PHY: %d\n", err);
+ if (soc->program_uphy) {
+ err = phy_exit(pcie->phy);
+ if (err < 0)
+ dev_err(dev, "failed to teardown PHY: %d\n", err);
+ }
return 0;
}
@@ -1606,8 +1652,32 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
- of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ switch (lanes) {
+ case 0x010004:
+ dev_info(dev, "4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
+ return 0;
+
+ case 0x010102:
+ dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+
+ case 0x010101:
+ dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
+ return 0;
+
+ default:
+ dev_info(dev, "wrong configuration updated in DT, "
+ "switching to default 2x1, 1x1, 1x1 "
+ "configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+ of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(dev, "4x1, 1x1 configuration\n");
@@ -1727,7 +1797,20 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
struct device_node *np = dev->of_node;
unsigned int i = 0;
- if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ pcie->num_supplies = 4;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "dvdd-pex";
+ pcie->supplies[i++].supply = "hvdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pexctl-aud";
+ } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
pcie->num_supplies = 6;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
@@ -2066,6 +2149,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_cml_clk = false,
.has_gen2 = false,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra30_pcie = {
@@ -2081,6 +2165,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_cml_clk = true,
.has_gen2 = false,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2095,6 +2180,7 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_cml_clk = true,
.has_gen2 = true,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra210_pcie = {
@@ -2109,9 +2195,27 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.has_cml_clk = true,
.has_gen2 = true,
.force_pca_enable = true,
+ .program_uphy = true,
+};
+
+static const struct tegra_pcie_soc tegra186_pcie = {
+ .num_ports = 3,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x80b880b8,
+ .pads_refclk_cfg1 = 0x000480b8,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = false,
+ .has_gen2 = true,
+ .force_pca_enable = false,
+ .program_uphy = false,
};
static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
new file mode 100644
index 000000000000..02f6e1e3a421
--- /dev/null
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -0,0 +1,959 @@
+/*
+ * Support for V3 Semiconductor PCI Local Bus to PCI Bridge
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the code from arch/arm/mach-integrator/pci_v3.c
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000-2001 Deep Blue Solutions Ltd
+ *
+ * Contributors to the old driver include:
+ * Russell King <linux@armlinux.org.uk>
+ * David A. Rusling <david.rusling@linaro.org> (uHAL, ARM Firmware suite)
+ * Rob Herring <robh@kernel.org>
+ * Liviu Dudau <Liviu.Dudau@arm.com>
+ * Grant Likely <grant.likely@secretlab.ca>
+ * Arnd Bergmann <arnd@arndb.de>
+ * Bjorn Helgaas <bhelgaas@google.com>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#define V3_PCI_VENDOR 0x00000000
+#define V3_PCI_DEVICE 0x00000002
+#define V3_PCI_CMD 0x00000004
+#define V3_PCI_STAT 0x00000006
+#define V3_PCI_CC_REV 0x00000008
+#define V3_PCI_HDR_CFG 0x0000000C
+#define V3_PCI_IO_BASE 0x00000010
+#define V3_PCI_BASE0 0x00000014
+#define V3_PCI_BASE1 0x00000018
+#define V3_PCI_SUB_VENDOR 0x0000002C
+#define V3_PCI_SUB_ID 0x0000002E
+#define V3_PCI_ROM 0x00000030
+#define V3_PCI_BPARAM 0x0000003C
+#define V3_PCI_MAP0 0x00000040
+#define V3_PCI_MAP1 0x00000044
+#define V3_PCI_INT_STAT 0x00000048
+#define V3_PCI_INT_CFG 0x0000004C
+#define V3_LB_BASE0 0x00000054
+#define V3_LB_BASE1 0x00000058
+#define V3_LB_MAP0 0x0000005E
+#define V3_LB_MAP1 0x00000062
+#define V3_LB_BASE2 0x00000064
+#define V3_LB_MAP2 0x00000066
+#define V3_LB_SIZE 0x00000068
+#define V3_LB_IO_BASE 0x0000006E
+#define V3_FIFO_CFG 0x00000070
+#define V3_FIFO_PRIORITY 0x00000072
+#define V3_FIFO_STAT 0x00000074
+#define V3_LB_ISTAT 0x00000076
+#define V3_LB_IMASK 0x00000077
+#define V3_SYSTEM 0x00000078
+#define V3_LB_CFG 0x0000007A
+#define V3_PCI_CFG 0x0000007C
+#define V3_DMA_PCI_ADR0 0x00000080
+#define V3_DMA_PCI_ADR1 0x00000090
+#define V3_DMA_LOCAL_ADR0 0x00000084
+#define V3_DMA_LOCAL_ADR1 0x00000094
+#define V3_DMA_LENGTH0 0x00000088
+#define V3_DMA_LENGTH1 0x00000098
+#define V3_DMA_CSR0 0x0000008B
+#define V3_DMA_CSR1 0x0000009B
+#define V3_DMA_CTLB_ADR0 0x0000008C
+#define V3_DMA_CTLB_ADR1 0x0000009C
+#define V3_DMA_DELAY 0x000000E0
+#define V3_MAIL_DATA 0x000000C0
+#define V3_PCI_MAIL_IEWR 0x000000D0
+#define V3_PCI_MAIL_IERD 0x000000D2
+#define V3_LB_MAIL_IEWR 0x000000D4
+#define V3_LB_MAIL_IERD 0x000000D6
+#define V3_MAIL_WR_STAT 0x000000D8
+#define V3_MAIL_RD_STAT 0x000000DA
+#define V3_QBA_MAP 0x000000DC
+
+/* PCI STATUS bits */
+#define V3_PCI_STAT_PAR_ERR BIT(15)
+#define V3_PCI_STAT_SYS_ERR BIT(14)
+#define V3_PCI_STAT_M_ABORT_ERR BIT(13)
+#define V3_PCI_STAT_T_ABORT_ERR BIT(12)
+
+/* LB ISTAT bits */
+#define V3_LB_ISTAT_MAILBOX BIT(7)
+#define V3_LB_ISTAT_PCI_RD BIT(6)
+#define V3_LB_ISTAT_PCI_WR BIT(5)
+#define V3_LB_ISTAT_PCI_INT BIT(4)
+#define V3_LB_ISTAT_PCI_PERR BIT(3)
+#define V3_LB_ISTAT_I2O_QWR BIT(2)
+#define V3_LB_ISTAT_DMA1 BIT(1)
+#define V3_LB_ISTAT_DMA0 BIT(0)
+
+/* PCI COMMAND bits */
+#define V3_COMMAND_M_FBB_EN BIT(9)
+#define V3_COMMAND_M_SERR_EN BIT(8)
+#define V3_COMMAND_M_PAR_EN BIT(6)
+#define V3_COMMAND_M_MASTER_EN BIT(2)
+#define V3_COMMAND_M_MEM_EN BIT(1)
+#define V3_COMMAND_M_IO_EN BIT(0)
+
+/* SYSTEM bits */
+#define V3_SYSTEM_M_RST_OUT BIT(15)
+#define V3_SYSTEM_M_LOCK BIT(14)
+#define V3_SYSTEM_UNLOCK 0xa05f
+
+/* PCI CFG bits */
+#define V3_PCI_CFG_M_I2O_EN BIT(15)
+#define V3_PCI_CFG_M_IO_REG_DIS BIT(14)
+#define V3_PCI_CFG_M_IO_DIS BIT(13)
+#define V3_PCI_CFG_M_EN3V BIT(12)
+#define V3_PCI_CFG_M_RETRY_EN BIT(10)
+#define V3_PCI_CFG_M_AD_LOW1 BIT(9)
+#define V3_PCI_CFG_M_AD_LOW0 BIT(8)
+/*
+ * This is the value applied to C/BE[3:1], with bit 0 always held 0
+ * during DMA access.
+ */
+#define V3_PCI_CFG_M_RTYPE_SHIFT 5
+#define V3_PCI_CFG_M_WTYPE_SHIFT 1
+#define V3_PCI_CFG_TYPE_DEFAULT 0x3
+
+/* PCI BASE bits (PCI -> Local Bus) */
+#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U
+#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U
+#define V3_PCI_BASE_M_PREFETCH BIT(3)
+#define V3_PCI_BASE_M_TYPE (3 << 1)
+#define V3_PCI_BASE_M_IO BIT(0)
+
+/* PCI MAP bits (PCI -> Local bus) */
+#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U
+#define V3_PCI_MAP_M_RD_POST_INH BIT(15)
+#define V3_PCI_MAP_M_ROM_SIZE (3 << 10)
+#define V3_PCI_MAP_M_SWAP (3 << 8)
+#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U
+#define V3_PCI_MAP_M_REG_EN BIT(1)
+#define V3_PCI_MAP_M_ENABLE BIT(0)
+
+/* LB_BASE0,1 bits (Local bus -> PCI) */
+#define V3_LB_BASE_ADR_BASE 0xfff00000U
+#define V3_LB_BASE_SWAP (3 << 8)
+#define V3_LB_BASE_ADR_SIZE (15 << 4)
+#define V3_LB_BASE_PREFETCH BIT(3)
+#define V3_LB_BASE_ENABLE BIT(0)
+
+#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4)
+#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4)
+#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4)
+#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4)
+#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4)
+#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4)
+#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4)
+#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4)
+#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4)
+#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4)
+#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4)
+#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4)
+
+#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE)
+
+/* LB_MAP0,1 bits (Local bus -> PCI) */
+#define V3_LB_MAP_MAP_ADR 0xfff0U
+#define V3_LB_MAP_TYPE (7 << 1)
+#define V3_LB_MAP_AD_LOW_EN BIT(0)
+
+#define V3_LB_MAP_TYPE_IACK (0 << 1)
+#define V3_LB_MAP_TYPE_IO (1 << 1)
+#define V3_LB_MAP_TYPE_MEM (3 << 1)
+#define V3_LB_MAP_TYPE_CONFIG (5 << 1)
+#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1)
+
+#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR)
+
+/* LB_BASE2 bits (Local bus -> PCI IO) */
+#define V3_LB_BASE2_ADR_BASE 0xff00U
+#define V3_LB_BASE2_SWAP_AUTO (3 << 6)
+#define V3_LB_BASE2_ENABLE BIT(0)
+
+#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE)
+
+/* LB_MAP2 bits (Local bus -> PCI IO) */
+#define V3_LB_MAP2_MAP_ADR 0xff00U
+
+#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR)
+
+/* FIFO priority bits */
+#define V3_FIFO_PRIO_LOCAL BIT(12)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11))
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9))
+#define V3_FIFO_PRIO_PCI BIT(4)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3))
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1))
+
+/* Local bus configuration bits */
+#define V3_LB_CFG_LB_TO_64_CYCLES 0x0000
+#define V3_LB_CFG_LB_TO_256_CYCLES BIT(13)
+#define V3_LB_CFG_LB_TO_512_CYCLES BIT(14)
+#define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14))
+#define V3_LB_CFG_LB_RST BIT(12)
+#define V3_LB_CFG_LB_PPC_RDY BIT(11)
+#define V3_LB_CFG_LB_LB_INT BIT(10)
+#define V3_LB_CFG_LB_ERR_EN BIT(9)
+#define V3_LB_CFG_LB_RDY_EN BIT(8)
+#define V3_LB_CFG_LB_BE_IMODE BIT(7)
+#define V3_LB_CFG_LB_BE_OMODE BIT(6)
+#define V3_LB_CFG_LB_ENDIAN BIT(5)
+#define V3_LB_CFG_LB_PARK_EN BIT(4)
+#define V3_LB_CFG_LB_FBB_DIS BIT(2)
+
+/* ARM Integrator-specific extended control registers */
+#define INTEGRATOR_SC_PCI_OFFSET 0x18
+#define INTEGRATOR_SC_PCI_ENABLE BIT(0)
+#define INTEGRATOR_SC_PCI_INTCLR BIT(1)
+#define INTEGRATOR_SC_LBFADDR_OFFSET 0x20
+#define INTEGRATOR_SC_LBFCODE_OFFSET 0x24
+
+struct v3_pci {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *config_base;
+ struct pci_bus *bus;
+ u32 config_mem;
+ u32 io_mem;
+ u32 non_pre_mem;
+ u32 pre_mem;
+ phys_addr_t io_bus_addr;
+ phys_addr_t non_pre_bus_addr;
+ phys_addr_t pre_bus_addr;
+ struct regmap *map;
+};
+
+/*
+ * The V3 PCI interface chip in Integrator provides several windows from
+ * local bus memory into the PCI memory areas. Unfortunately, there
+ * are not really enough windows for our usage, therefore we reuse
+ * one of the windows for access to PCI configuration space. On the
+ * Integrator/AP, the memory map is as follows:
+ *
+ * Local Bus Memory Usage
+ *
+ * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable
+ * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable
+ * 60000000 - 60FFFFFF PCI IO. 16M
+ * 61000000 - 61FFFFFF PCI Configuration. 16M
+ *
+ * There are three V3 windows, each described by a pair of V3 registers.
+ * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2.
+ * Base0 and Base1 can be used for any type of PCI memory access. Base2
+ * can be used either for PCI I/O or for I20 accesses. By default, uHAL
+ * uses this only for PCI IO space.
+ *
+ * Normally these spaces are mapped using the following base registers:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF
+ *
+ * This means that I20 and PCI configuration space accesses will fail.
+ * When PCI configuration accesses are needed (via the uHAL PCI
+ * configuration space primitives) we must remap the spaces as follows:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1
+ *
+ * To make this work, the code depends on overlapping windows working.
+ * The V3 chip translates an address by checking its range within
+ * each of the BASE/MAP pairs in turn (in ascending register number
+ * order). It will use the first matching pair. So, for example,
+ * if the same address is mapped by both LB_BASE0/LB_MAP0 and
+ * LB_BASE1/LB_MAP1, the V3 will use the translation from
+ * LB_BASE0/LB_MAP0.
+ *
+ * To allow PCI Configuration space access, the code enlarges the
+ * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes
+ * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can
+ * be remapped for use by configuration cycles.
+ *
+ * At the end of the PCI Configuration space accesses,
+ * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window
+ * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to
+ * reveal the now restored LB_BASE1/LB_MAP1 window.
+ *
+ * NOTE: We do not set up I2O mapping. I suspect that this is only
+ * for an intelligent (target) device. Using I2O disables most of
+ * the mappings into PCI memory.
+ */
+static void __iomem *v3_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ unsigned int address, mapaddress, busnr;
+
+ busnr = bus->number;
+ if (busnr == 0) {
+ int slot = PCI_SLOT(devfn);
+
+ /*
+ * local bus segment so need a type 0 config cycle
+ *
+ * build the PCI configuration "address" with one-hot in
+ * A31-A11
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 are 0 (0)
+ */
+ address = PCI_FUNC(devfn) << 8;
+ mapaddress = V3_LB_MAP_TYPE_CONFIG;
+
+ if (slot > 12)
+ /*
+ * high order bits are handled by the MAP register
+ */
+ mapaddress |= BIT(slot - 5);
+ else
+ /*
+ * low order bits handled directly in the address
+ */
+ address |= BIT(slot + 11);
+ } else {
+ /*
+ * not the local bus segment so need a type 1 config cycle
+ *
+ * address:
+ * 23:16 = bus number
+ * 15:11 = slot number (7:3 of devfn)
+ * 10:8 = func number (2:0 of devfn)
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 from host bus (1)
+ */
+ mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN;
+ address = (busnr << 16) | (devfn << 8);
+ }
+
+ /*
+ * Set up base0 to see all 512Mbytes of memory space (not
+ * prefetchable), this frees up base1 for re-use by
+ * configuration memory
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+
+ /*
+ * Set up base1/map1 to point into configuration space.
+ * The config mem is always 16MB.
+ */
+ writel(v3_addr_to_lb_base(v3->config_mem) |
+ V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(mapaddress, v3->base + V3_LB_MAP1);
+
+ return v3->config_base + address + offset;
+}
+
+static void v3_unmap_bus(struct v3_pci *v3)
+{
+ /*
+ * Reassign base1 for use by prefetchable PCI memory
+ */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+
+ /*
+ * And shrink base0 back to a 256M window (NOTE: MAP0 already correct)
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+}
+
+static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+ ret = pci_generic_config_read(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+ ret = pci_generic_config_write(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static struct pci_ops v3_pci_ops = {
+ .map_bus = v3_map_bus,
+ .read = v3_pci_read_config,
+ .write = v3_pci_write_config,
+};
+
+static irqreturn_t v3_irq(int irq, void *data)
+{
+ struct v3_pci *v3 = data;
+ struct device *dev = v3->dev;
+ u32 status;
+
+ status = readw(v3->base + V3_PCI_STAT);
+ if (status & V3_PCI_STAT_PAR_ERR)
+ dev_err(dev, "parity error interrupt\n");
+ if (status & V3_PCI_STAT_SYS_ERR)
+ dev_err(dev, "system error interrupt\n");
+ if (status & V3_PCI_STAT_M_ABORT_ERR)
+ dev_err(dev, "master abort error interrupt\n");
+ if (status & V3_PCI_STAT_T_ABORT_ERR)
+ dev_err(dev, "target abort error interrupt\n");
+ writew(status, v3->base + V3_PCI_STAT);
+
+ status = readb(v3->base + V3_LB_ISTAT);
+ if (status & V3_LB_ISTAT_MAILBOX)
+ dev_info(dev, "PCI mailbox interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_RD)
+ dev_err(dev, "PCI target LB->PCI READ abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_WR)
+ dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_INT)
+ dev_info(dev, "PCI pin interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_PERR)
+ dev_err(dev, "PCI parity error interrupt\n");
+ if (status & V3_LB_ISTAT_I2O_QWR)
+ dev_info(dev, "I2O inbound post queue interrupt\n");
+ if (status & V3_LB_ISTAT_DMA1)
+ dev_info(dev, "DMA channel 1 interrupt\n");
+ if (status & V3_LB_ISTAT_DMA0)
+ dev_info(dev, "DMA channel 0 interrupt\n");
+ /* Clear all possible interrupts on the local bus */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ if (v3->map)
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ return IRQ_HANDLED;
+}
+
+static int v3_integrator_init(struct v3_pci *v3)
+{
+ unsigned int val;
+
+ v3->map =
+ syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon");
+ if (IS_ERR(v3->map)) {
+ dev_err(v3->dev, "no syscon\n");
+ return -ENODEV;
+ }
+
+ regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val);
+ /* Take the PCI bridge out of reset, clear IRQs */
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ if (!(val & INTEGRATOR_SC_PCI_ENABLE)) {
+ /* If we were in reset we need to sleep a bit */
+ msleep(230);
+
+ /* Set the physical base for the controller itself */
+ writel(0x6200, v3->base + V3_LB_IO_BASE);
+
+ /* Wait for the mailbox to settle after reset */
+ do {
+ writeb(0xaa, v3->base + V3_MAIL_DATA);
+ writeb(0x55, v3->base + V3_MAIL_DATA + 4);
+ } while (readb(v3->base + V3_MAIL_DATA) != 0xaa &&
+ readb(v3->base + V3_MAIL_DATA) != 0x55);
+ }
+
+ dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n");
+
+ return 0;
+}
+
+static int v3_pci_setup_resource(struct v3_pci *v3,
+ resource_size_t io_base,
+ struct pci_host_bridge *host,
+ struct resource_entry *win)
+{
+ struct device *dev = v3->dev;
+ struct resource *mem;
+ struct resource *io;
+ int ret;
+
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "V3 PCI I/O";
+ v3->io_mem = io_base;
+ v3->io_bus_addr = io->start - win->offset;
+ dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
+ io, &v3->io_bus_addr);
+ ret = pci_remap_iospace(io, io_base);
+ if (ret) {
+ dev_warn(dev,
+ "error %d: failed to map resource %pR\n",
+ ret, io);
+ return ret;
+ }
+ /* Setup window 2 - PCI I/O */
+ writel(v3_addr_to_lb_base2(v3->io_mem) |
+ V3_LB_BASE2_ENABLE,
+ v3->base + V3_LB_BASE2);
+ writew(v3_addr_to_lb_map2(v3->io_bus_addr),
+ v3->base + V3_LB_MAP2);
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ if (mem->flags & IORESOURCE_PREFETCH) {
+ mem->name = "V3 PCI PRE-MEM";
+ v3->pre_mem = mem->start;
+ v3->pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev, "prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ if (v3->non_pre_mem &&
+ (mem->start != v3->non_pre_mem + SZ_256M)) {
+ dev_err(dev,
+ "prefetchable memory is not adjacent to non-prefetchable memory\n");
+ return -EINVAL;
+ }
+ /* Setup window 1 - PCI prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+ } else {
+ mem->name = "V3 PCI NON-PRE-MEM";
+ v3->non_pre_mem = mem->start;
+ v3->non_pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->non_pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev,
+ "non-prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ /* Setup window 0 - PCI non-prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+ writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM,
+ v3->base + V3_LB_MAP0);
+ }
+ break;
+ case IORESOURCE_BUS:
+ dev_dbg(dev, "BUS %pR\n", win->res);
+ host->busnr = win->res->start;
+ break;
+ default:
+ dev_info(dev, "Unknown resource type %lu\n",
+ resource_type(win->res));
+ break;
+ }
+
+ return 0;
+}
+
+static int v3_get_dma_range_config(struct v3_pci *v3,
+ struct of_pci_range *range,
+ u32 *pci_base, u32 *pci_map)
+{
+ struct device *dev = v3->dev;
+ u64 cpu_end = range->cpu_addr + range->size - 1;
+ u64 pci_end = range->pci_addr + range->size - 1;
+ u32 val;
+
+ if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+ dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+ *pci_base = val;
+
+ if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+ dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+
+ switch (range->size) {
+ case SZ_1M:
+ val |= V3_LB_BASE_ADR_SIZE_1MB;
+ break;
+ case SZ_2M:
+ val |= V3_LB_BASE_ADR_SIZE_2MB;
+ break;
+ case SZ_4M:
+ val |= V3_LB_BASE_ADR_SIZE_4MB;
+ break;
+ case SZ_8M:
+ val |= V3_LB_BASE_ADR_SIZE_8MB;
+ break;
+ case SZ_16M:
+ val |= V3_LB_BASE_ADR_SIZE_16MB;
+ break;
+ case SZ_32M:
+ val |= V3_LB_BASE_ADR_SIZE_32MB;
+ break;
+ case SZ_64M:
+ val |= V3_LB_BASE_ADR_SIZE_64MB;
+ break;
+ case SZ_128M:
+ val |= V3_LB_BASE_ADR_SIZE_128MB;
+ break;
+ case SZ_256M:
+ val |= V3_LB_BASE_ADR_SIZE_256MB;
+ break;
+ case SZ_512M:
+ val |= V3_LB_BASE_ADR_SIZE_512MB;
+ break;
+ case SZ_1G:
+ val |= V3_LB_BASE_ADR_SIZE_1GB;
+ break;
+ case SZ_2G:
+ val |= V3_LB_BASE_ADR_SIZE_2GB;
+ break;
+ default:
+ dev_err(v3->dev, "illegal dma memory chunk size\n");
+ return -EINVAL;
+ break;
+ };
+ val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;
+ *pci_map = val;
+
+ dev_dbg(dev,
+ "DMA MEM CPU: 0x%016llx -> 0x%016llx => "
+ "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
+ range->cpu_addr, cpu_end,
+ range->pci_addr, pci_end,
+ *pci_base, *pci_map);
+
+ return 0;
+}
+
+static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = v3->dev;
+ int i = 0;
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ int ret;
+ u32 pci_base, pci_map;
+
+ ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
+ if (ret)
+ return ret;
+
+ if (i == 0) {
+ writel(pci_base, v3->base + V3_PCI_BASE0);
+ writel(pci_map, v3->base + V3_PCI_MAP0);
+ } else if (i == 1) {
+ writel(pci_base, v3->base + V3_PCI_BASE1);
+ writel(pci_map, v3->base + V3_PCI_MAP1);
+ } else {
+ dev_err(dev, "too many ranges, only two supported\n");
+ dev_err(dev, "range %d ignored\n", i);
+ }
+ i++;
+ }
+ return 0;
+}
+
+static int v3_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ resource_size_t io_base;
+ struct resource *regs;
+ struct resource_entry *win;
+ struct v3_pci *v3;
+ struct pci_host_bridge *host;
+ struct clk *clk;
+ u16 val;
+ int irq;
+ int ret;
+ LIST_HEAD(res);
+
+ host = pci_alloc_host_bridge(sizeof(*v3));
+ if (!host)
+ return -ENOMEM;
+
+ host->dev.parent = dev;
+ host->ops = &v3_pci_ops;
+ host->busnr = 0;
+ host->msi = NULL;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+ v3 = pci_host_bridge_priv(host);
+ host->sysdata = v3;
+ v3->dev = dev;
+
+ /* Get and enable host clock */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "clock not found\n");
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "unable to enable clock\n");
+ return ret;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ v3->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(v3->base))
+ return PTR_ERR(v3->base);
+ /*
+ * The hardware has a register with the physical base address
+ * of the V3 controller itself, verify that this is the same
+ * as the physical memory we've remapped it from.
+ */
+ if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16))
+ dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n",
+ readl(v3->base + V3_LB_IO_BASE), regs);
+
+ /* Configuration space is 16MB directly mapped */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (resource_size(regs) != SZ_16M) {
+ dev_err(dev, "config mem is not 16MB!\n");
+ return -EINVAL;
+ }
+ v3->config_mem = regs->start;
+ v3->config_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(v3->config_base))
+ return PTR_ERR(v3->config_base);
+
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+ if (ret)
+ return ret;
+
+ ret = devm_request_pci_bus_resources(dev, &res);
+ if (ret)
+ return ret;
+
+ /* Get and request error IRQ resource */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "unable to obtain PCIv3 error IRQ\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, irq, v3_irq, 0,
+ "PCIv3 error", v3);
+ if (ret < 0) {
+ dev_err(dev,
+ "unable to request PCIv3 error IRQ %d (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /*
+ * Unlock V3 registers, but only if they were previously locked.
+ */
+ if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK)
+ writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM);
+
+ /* Disable all slave access while we set up the windows */
+ val = readw(v3->base + V3_PCI_CMD);
+ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Put the PCI bus into reset */
+ val = readw(v3->base + V3_SYSTEM);
+ val &= ~V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /* Retry until we're ready */
+ val = readw(v3->base + V3_PCI_CFG);
+ val |= V3_PCI_CFG_M_RETRY_EN;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /* Set up the local bus protocol */
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */
+ val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */
+ val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */
+ val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */
+ writew(val, v3->base + V3_LB_CFG);
+
+ /* Enable the PCI bus master */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MASTER;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ ret = v3_pci_setup_resource(v3, io_base, host, win);
+ if (ret) {
+ dev_err(dev, "error setting up resources\n");
+ return ret;
+ }
+ }
+ ret = v3_pci_parse_map_dma_ranges(v3, np);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable PCI to host IO cycles, enable I/O buffers @3.3V,
+ * set AD_LOW0 to 1 if one of the LB_MAP registers choose
+ * to use this (should be unused).
+ */
+ writel(0x00000000, v3->base + V3_PCI_IO_BASE);
+ val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS |
+ V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0;
+ /*
+ * DMA read and write from PCI bus commands types
+ */
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT;
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /*
+ * Set the V3 FIFO such that writes have higher priority than
+ * reads, and local bus write causes local bus read fifo flush
+ * on aperture 1. Same for PCI.
+ */
+ writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1,
+ v3->base + V3_FIFO_PRIORITY);
+
+
+ /*
+ * Clear any error interrupts, and enable parity and write error
+ * interrupts
+ */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_LB_INT;
+ writew(val, v3->base + V3_LB_CFG);
+ writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Special Integrator initialization */
+ if (of_device_is_compatible(np, "arm,integrator-ap-pci")) {
+ ret = v3_integrator_init(v3);
+ if (ret)
+ return ret;
+ }
+
+ /* Post-init: enable PCI memory and invalidate (master already on) */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Clear pending interrupts */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ /* Read or write errors and parity errors cause interrupts */
+ writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Take the PCI bus out of reset so devices can initialize */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /*
+ * Re-lock the system register.
+ */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_LOCK;
+ writew(val, v3->base + V3_SYSTEM);
+
+ list_splice_init(&res, &host->windows);
+ ret = pci_scan_root_bus_bridge(host);
+ if (ret) {
+ dev_err(dev, "failed to register host: %d\n", ret);
+ return ret;
+ }
+ v3->bus = host->bus;
+
+ pci_bus_assign_resources(v3->bus);
+ pci_bus_add_devices(v3->bus);
+
+ return 0;
+}
+
+static const struct of_device_id v3_pci_of_match[] = {
+ {
+ .compatible = "v3,v360epc-pci",
+ },
+ {},
+};
+
+static struct platform_driver v3_pci_driver = {
+ .driver = {
+ .name = "pci-v3-semi",
+ .of_match_table = of_match_ptr(v3_pci_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = v3_pci_probe,
+};
+builtin_platform_driver(v3_pci_driver);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 087645116ecb..465aa2a1b38d 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -542,24 +542,6 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
struct device_node *np = port->node;
@@ -568,7 +550,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
struct device *dev = port->dev;
u8 ib_reg_mask = 0;
- if (pci_dma_range_parser_init(&parser, np)) {
+ if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
@@ -628,7 +610,7 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
-static int xgene_pcie_probe_bridge(struct platform_device *pdev)
+static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
@@ -709,7 +691,7 @@ static struct platform_driver xgene_pcie_driver = {
.of_match_table = of_match_ptr(xgene_pcie_match_table),
.suppress_bind_attrs = true,
},
- .probe = xgene_pcie_probe_bridge,
+ .probe = xgene_pcie_probe,
};
builtin_platform_driver(xgene_pcie_driver);
#endif
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index b468b8cccf8d..5cc4f594d79a 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -105,7 +105,7 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
@@ -142,7 +142,7 @@ static bool altera_pcie_valid_device(struct altera_pcie *pcie,
{
/* If there is no link, then there is no device */
if (bus->number != pcie->root_bus_nr) {
- if (!altera_pcie_link_is_up(pcie))
+ if (!altera_pcie_link_up(pcie))
return false;
}
@@ -412,7 +412,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
/* Wait for link is up */
start_jiffies = jiffies;
for (;;) {
- if (altera_pcie_link_is_up(pcie))
+ if (altera_pcie_link_up(pcie))
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
@@ -427,7 +427,7 @@ static void altera_pcie_retrain(struct altera_pcie *pcie)
{
u16 linkcap, linkstat, linkctl;
- if (!altera_pcie_link_is_up(pcie))
+ if (!altera_pcie_link_up(pcie))
return;
/*
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 2d0f535a2f69..990fc906d73d 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -179,7 +179,7 @@ static struct irq_chip iproc_msi_irq_chip = {
static struct msi_domain_info iproc_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
.chip = &iproc_msi_irq_chip,
};
@@ -237,7 +237,7 @@ static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
msg->address_lo = lower_32_bits(addr);
msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
+ msg->data = data->hwirq << 5;
}
static struct irq_chip iproc_msi_bottom_irq_chip = {
@@ -251,7 +251,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
void *args)
{
struct iproc_msi *msi = domain->host_data;
- int hwirq;
+ int hwirq, i;
mutex_lock(&msi->bitmap_lock);
@@ -267,10 +267,14 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
mutex_unlock(&msi->bitmap_lock);
- irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip,
- domain->host_data, handle_simple_irq, NULL, NULL);
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &iproc_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
- return 0;
+ return hwirq;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
@@ -302,7 +306,8 @@ static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
msg = (u32 *)(msi->eq_cpu + offs);
- hwirq = *msg & IPROC_MSI_EQ_MASK;
+ hwirq = readl(msg);
+ hwirq = (hwirq >> 5) + (hwirq & 0x1f);
/*
* Since we have multiple hwirq mapped to a single MSI vector,
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 3a8b9d20ee57..935909bbe5c4 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -1097,24 +1097,6 @@ err_ib:
return ret;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
struct of_pci_range range;
@@ -1122,7 +1104,7 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
int ret;
/* Get the dma-ranges from DT */
- ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+ ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
if (ret)
return ret;
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 4e0b25d09b0c..12796eccb2be 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1027,24 +1027,6 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
return 0;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
struct device_node *np)
{
@@ -1053,7 +1035,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
int index = 0;
int err;
- if (pci_dma_range_parser_init(&parser, np))
+ if (of_pci_dma_range_parser_init(&parser, np))
return -EINVAL;
/* Get the dma-ranges from DT */
diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c
index 95049de4131c..21a208da3f59 100644
--- a/drivers/pci/host/pcie-tango.c
+++ b/drivers/pci/host/pcie-tango.c
@@ -1,13 +1,173 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/pci-ecam.h>
#include <linux/delay.h>
-#include <linux/of.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+
+#define MSI_MAX 256
#define SMP8759_MUX 0x48
#define SMP8759_TEST_OUT 0x74
+#define SMP8759_DOORBELL 0x7c
+#define SMP8759_STATUS 0x80
+#define SMP8759_ENABLE 0xa0
struct tango_pcie {
- void __iomem *base;
+ DECLARE_BITMAP(used_msi, MSI_MAX);
+ u64 msi_doorbell;
+ spinlock_t used_msi_lock;
+ void __iomem *base;
+ struct irq_domain *dom;
+};
+
+static void tango_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct tango_pcie *pcie = irq_desc_get_handler_data(desc);
+ unsigned long status, base, virq, idx, pos = 0;
+
+ chained_irq_enter(chip, desc);
+ spin_lock(&pcie->used_msi_lock);
+
+ while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) {
+ base = round_down(pos, 32);
+ status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8);
+ for_each_set_bit(idx, &status, 32) {
+ virq = irq_find_mapping(pcie->dom, base + idx);
+ generic_handle_irq(virq);
+ }
+ pos = base + 32;
+ }
+
+ spin_unlock(&pcie->used_msi_lock);
+ chained_irq_exit(chip, desc);
+}
+
+static void tango_ack(struct irq_data *d)
+{
+ struct tango_pcie *pcie = d->chip_data;
+ u32 offset = (d->hwirq / 32) * 4;
+ u32 bit = BIT(d->hwirq % 32);
+
+ writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset);
+}
+
+static void update_msi_enable(struct irq_data *d, bool unmask)
+{
+ unsigned long flags;
+ struct tango_pcie *pcie = d->chip_data;
+ u32 offset = (d->hwirq / 32) * 4;
+ u32 bit = BIT(d->hwirq % 32);
+ u32 val;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset);
+ val = unmask ? val | bit : val & ~bit;
+ writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static void tango_mask(struct irq_data *d)
+{
+ update_msi_enable(d, false);
+}
+
+static void tango_unmask(struct irq_data *d)
+{
+ update_msi_enable(d, true);
+}
+
+static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct tango_pcie *pcie = d->chip_data;
+ msg->address_lo = lower_32_bits(pcie->msi_doorbell);
+ msg->address_hi = upper_32_bits(pcie->msi_doorbell);
+ msg->data = d->hwirq;
+}
+
+static struct irq_chip tango_chip = {
+ .irq_ack = tango_ack,
+ .irq_mask = tango_mask,
+ .irq_unmask = tango_unmask,
+ .irq_set_affinity = tango_set_affinity,
+ .irq_compose_msi_msg = tango_compose_msi_msg,
+};
+
+static void msi_ack(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void msi_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void msi_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip msi_chip = {
+ .name = "MSI",
+ .irq_ack = msi_ack,
+ .irq_mask = msi_mask,
+ .irq_unmask = msi_unmask,
+};
+
+static struct msi_domain_info msi_dom_info = {
+ .flags = MSI_FLAG_PCI_MSIX
+ | MSI_FLAG_USE_DEF_DOM_OPS
+ | MSI_FLAG_USE_DEF_CHIP_OPS,
+ .chip = &msi_chip,
+};
+
+static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct tango_pcie *pcie = dom->host_data;
+ unsigned long flags;
+ int pos;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ pos = find_first_zero_bit(pcie->used_msi, MSI_MAX);
+ if (pos >= MSI_MAX) {
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+ return -ENOSPC;
+ }
+ __set_bit(pos, pcie->used_msi);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+ irq_domain_set_info(dom, virq, pos, &tango_chip,
+ pcie, handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ unsigned long flags;
+ struct irq_data *d = irq_domain_get_irq_data(dom, virq);
+ struct tango_pcie *pcie = d->chip_data;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ __clear_bit(d->hwirq, pcie->used_msi);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static const struct irq_domain_ops dom_ops = {
+ .alloc = tango_irq_domain_alloc,
+ .free = tango_irq_domain_free,
};
static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -77,7 +237,11 @@ static int tango_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct tango_pcie *pcie;
struct resource *res;
- int ret;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct irq_domain *msi_dom, *irq_dom;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int virq, offset;
dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -96,6 +260,41 @@ static int tango_pcie_probe(struct platform_device *pdev)
if (!tango_pcie_link_up(pcie))
return -ENODEV;
+ if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0)
+ return -ENOENT;
+
+ if (of_pci_range_parser_one(&parser, &range) == NULL)
+ return -ENOENT;
+
+ range.pci_addr += range.size;
+ pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL;
+
+ for (offset = 0; offset < MSI_MAX / 8; offset += 4)
+ writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
+
+ virq = platform_get_irq(pdev, 1);
+ if (virq <= 0) {
+ dev_err(dev, "Failed to map IRQ\n");
+ return -ENXIO;
+ }
+
+ irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
+ if (!irq_dom) {
+ dev_err(dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom);
+ if (!msi_dom) {
+ dev_err(dev, "Failed to create MSI domain\n");
+ irq_domain_remove(irq_dom);
+ return -ENOMEM;
+ }
+
+ pcie->dom = irq_dom;
+ spin_lock_init(&pcie->used_msi_lock);
+ irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
+
return pci_host_common_probe(pdev, &smp8759_ecam_ops);
}
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 94e13cb8608f..7b5325990f5e 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -129,7 +129,7 @@ static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
writel(val, port->reg_base + reg);
}
-static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port)
{
return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
@@ -165,7 +165,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
/* Check if link is up when trying to access downstream ports */
if (bus->number != port->root_busno)
- if (!xilinx_pcie_link_is_up(port))
+ if (!xilinx_pcie_link_up(port))
return false;
/* Only one device down on each root port */
@@ -541,7 +541,7 @@ static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
- if (xilinx_pcie_link_is_up(port))
+ if (xilinx_pcie_link_up(port))
dev_info(dev, "PCIe Link is UP\n");
else
dev_info(dev, "PCIe Link is DOWN\n");
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
deleted file mode 100644
index c68366cee6b7..000000000000
--- a/drivers/pci/hotplug-pci.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Core PCI functionality used only by PCI hotplug */
-
-#include <linux/pci.h>
-#include <linux/export.h>
-#include "pci.h"
-
-int pci_hp_add_bridge(struct pci_dev *dev)
-{
- struct pci_bus *parent = dev->bus;
- int pass, busnr, start = parent->busn_res.start;
- int end = parent->busn_res.end;
-
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent), busnr))
- break;
- }
- if (busnr-- > end) {
- printk(KERN_ERR "No bus number available for hot-added bridge %s\n",
- pci_name(dev));
- return -1;
- }
- for (pass = 0; pass < 2; pass++)
- busnr = pci_scan_bridge(parent, dev, busnr, pass);
- if (!dev->subordinate)
- return -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 5ed2dcaa8e27..5db6f1839dad 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -462,18 +462,15 @@ static void enable_slot(struct acpiphp_slot *slot)
acpiphp_rescan_slot(slot);
max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) {
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ for_each_pci_bridge(dev, bus) {
if (PCI_SLOT(dev->devfn) != slot->device)
continue;
- if (pci_is_bridge(dev)) {
- max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate) {
- check_hotplug_bridge(slot, dev);
- pcibios_resource_survey_bus(dev->subordinate);
- __pci_bus_size_bridges(dev->subordinate,
- &add_list);
- }
+ max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate, &add_list);
}
}
}
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 80c80017197d..f616358fa938 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -286,14 +286,11 @@ int cpci_configure_slot(struct slot *slot)
}
parent = slot->dev->bus;
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
- continue;
- if (pci_is_bridge(dev))
+ for_each_pci_bridge(dev, parent) {
+ if (PCI_SLOT(dev->devfn) == PCI_SLOT(slot->devfn))
pci_hp_add_bridge(dev);
}
-
pci_assign_unassigned_bridge_resources(parent->self);
pci_bus_add_devices(parent);
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 48c8a066a6b7..c2bbe6b65d06 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -410,7 +410,7 @@ void cpqhp_create_debugfs_files(struct controller *ctrl);
void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-void cpqhp_pushbutton_thread(unsigned long event_pointer);
+void cpqhp_pushbutton_thread(struct timer_list *t);
irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
int cpqhp_find_available_resources(struct controller *ctrl,
void __iomem *rom_start);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4d06b8461255..70967ac75265 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -661,9 +661,8 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot->p_sm_slot = slot_entry;
- init_timer(&slot->task_event);
+ timer_setup(&slot->task_event, cpqhp_pushbutton_thread, 0);
slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
/*FIXME: these capabilities aren't used but if they are
* they need to be correctly implemented
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index a55653b54eed..a93069e739cb 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -47,7 +47,7 @@ static void interrupt_event_handler(struct controller *ctrl);
static struct task_struct *cpqhp_event_thread;
-static unsigned long pushbutton_pending; /* = 0 */
+static struct timer_list *pushbutton_pending; /* = NULL */
/* delay is in jiffies to wait for */
static void long_delay(int delay)
@@ -1732,9 +1732,10 @@ static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controll
return 0;
}
-static void pushbutton_helper_thread(unsigned long data)
+static void pushbutton_helper_thread(struct timer_list *t)
{
- pushbutton_pending = data;
+ pushbutton_pending = t;
+
wake_up_process(cpqhp_event_thread);
}
@@ -1883,13 +1884,13 @@ static void interrupt_event_handler(struct controller *ctrl)
wait_for_ctrl_irq(ctrl);
mutex_unlock(&ctrl->crit_sect);
- init_timer(&p_slot->task_event);
+ timer_setup(&p_slot->task_event,
+ pushbutton_helper_thread,
+ 0);
p_slot->hp_slot = hp_slot;
p_slot->ctrl = ctrl;
/* p_slot->physical_slot = physical_slot; */
p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */
- p_slot->task_event.function = pushbutton_helper_thread;
- p_slot->task_event.data = (u32) p_slot;
dbg("add_timer p_slot = %p\n", p_slot);
add_timer(&p_slot->task_event);
@@ -1920,15 +1921,15 @@ static void interrupt_event_handler(struct controller *ctrl)
* Scheduled procedure to handle blocking stuff for the pushbuttons.
* Handles all pending events and exits.
*/
-void cpqhp_pushbutton_thread(unsigned long slot)
+void cpqhp_pushbutton_thread(struct timer_list *t)
{
u8 hp_slot;
u8 device;
struct pci_func *func;
- struct slot *p_slot = (struct slot *) slot;
+ struct slot *p_slot = from_timer(p_slot, t, task_event);
struct controller *ctrl = (struct controller *) p_slot->ctrl;
- pushbutton_pending = 0;
+ pushbutton_pending = NULL;
hp_slot = p_slot->hp_slot;
device = p_slot->device;
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index dc1876feb06f..25edd0b18b75 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1267,20 +1267,19 @@ static int unconfigure_boot_device(u8 busno, u8 device, u8 function)
size = size & 0xFFFFFFFC;
size = ~size + 1;
end_address = start_address + size - 1;
- if (ibmphp_find_resource(bus, start_address, &io, IO) < 0) {
- err("cannot find corresponding IO resource to remove\n");
- return -EIO;
- }
+ if (ibmphp_find_resource(bus, start_address, &io, IO))
+ goto report_search_failure;
+
debug("io->start = %x\n", io->start);
temp_end = io->end;
start_address = io->end + 1;
ibmphp_remove_resource(io);
/* This is needed b/c of the old I/O restrictions in the BIOS */
while (temp_end < end_address) {
- if (ibmphp_find_resource(bus, start_address, &io, IO) < 0) {
- err("cannot find corresponding IO resource to remove\n");
- return -EIO;
- }
+ if (ibmphp_find_resource(bus, start_address,
+ &io, IO))
+ goto report_search_failure;
+
debug("io->start = %x\n", io->start);
temp_end = io->end;
start_address = io->end + 1;
@@ -1327,6 +1326,10 @@ static int unconfigure_boot_device(u8 busno, u8 device, u8 function)
} /* end of for */
return 0;
+
+report_search_failure:
+ err("cannot find corresponding IO resource to remove\n");
+ return -EIO;
}
static int unconfigure_boot_bridge(u8 busno, u8 device, u8 function)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index ec0b4c11ccd9..83f3d4af3677 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -113,10 +113,11 @@ static int board_added(struct slot *p_slot)
retval = pciehp_configure_device(p_slot);
if (retval) {
- ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
- pci_domain_nr(parent), parent->number);
- if (retval != -EEXIST)
+ if (retval != -EEXIST) {
+ ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
+ pci_domain_nr(parent), parent->number);
goto err_exit;
+ }
}
pciehp_green_led_on(p_slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index e5d5ce9e3010..7bab0606f1a9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -50,14 +50,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
/* This is the interrupt polling timeout function. */
-static void int_poll_timeout(unsigned long data)
+static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = (struct controller *)data;
+ struct controller *ctrl = from_timer(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
pcie_isr(0, ctrl);
- init_timer(&ctrl->poll_timer);
if (!pciehp_poll_time)
pciehp_poll_time = 2; /* default polling interval is 2 sec */
@@ -71,8 +70,6 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
if ((sec <= 0) || (sec > 60))
sec = 2;
- ctrl->poll_timer.function = &int_poll_timeout;
- ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
@@ -83,7 +80,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
/* Install interrupt polling timer. Start with 10 sec delay */
if (pciehp_poll_mode) {
- init_timer(&ctrl->poll_timer);
+ timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
start_int_poll_timer(ctrl, 10);
return 0;
}
@@ -764,8 +761,7 @@ int pciehp_reset_slot(struct slot *slot, int probe)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
if (pciehp_poll_mode)
- int_poll_timeout(ctrl->poll_timer.data);
-
+ int_poll_timeout(&ctrl->poll_timer);
return 0;
}
@@ -795,7 +791,7 @@ static int pcie_init_slot(struct controller *ctrl)
if (!slot)
return -ENOMEM;
- slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl));
+ slot->wq = alloc_ordered_workqueue("pciehp-%u", 0, PSN(ctrl));
if (!slot->wq)
goto abort;
@@ -862,11 +858,16 @@ struct controller *pcie_init(struct pcie_device *dev)
if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
ctrl->link_active_reporting = 1;
- /* Clear all remaining event bits in Slot Status register */
+ /*
+ * Clear all remaining event bits in Slot Status register except
+ * Presence Detect Changed. We want to make sure possible
+ * hotplug event is triggered when the interrupt is unmasked so
+ * that we don't lose that event.
+ */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC);
ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 19f30a9f461d..2a1ca020cf5a 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -46,7 +46,11 @@ int pciehp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
- ctrl_err(ctrl, "Device %s already exists at %04x:%02x:00, cannot hot-add\n",
+ /*
+ * The device is already there. Either configured by the
+ * boot firmware or a previous hotplug event.
+ */
+ ctrl_dbg(ctrl, "Device %s already exists at %04x:%02x:00, skipping hot-add\n",
pci_name(dev), pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
ret = -EEXIST;
@@ -60,9 +64,8 @@ int pciehp_configure_device(struct slot *p_slot)
goto out;
}
- list_for_each_entry(dev, &parent->devices, bus_list)
- if (pci_is_bridge(dev))
- pci_hp_add_bridge(dev);
+ for_each_pci_bridge(dev, parent)
+ pci_hp_add_bridge(dev);
pci_assign_unassigned_bridge_resources(bridge);
pcie_bus_configure_settings(parent);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index e5824c7b7b6b..4810e9626d9f 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -229,14 +229,13 @@ static inline int shpc_indirect_read(struct controller *ctrl, int index,
/*
* This is the interrupt polling timeout function.
*/
-static void int_poll_timeout(unsigned long data)
+static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = (struct controller *)data;
+ struct controller *ctrl = from_timer(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
shpc_isr(0, ctrl);
- init_timer(&ctrl->poll_timer);
if (!shpchp_poll_time)
shpchp_poll_time = 2; /* default polling interval is 2 sec */
@@ -252,8 +251,6 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
if ((sec <= 0) || (sec > 60))
sec = 2;
- ctrl->poll_timer.function = &int_poll_timeout;
- ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
@@ -1054,7 +1051,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (shpchp_poll_mode) {
/* Install interrupt polling timer. Start with 10 sec delay */
- init_timer(&ctrl->poll_timer);
+ timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
start_int_poll_timer(ctrl, 10);
} else {
/* Installs the interrupt handler */
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index f8cd3a27e351..ea63db58b4b1 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -61,10 +61,8 @@ int shpchp_configure_device(struct slot *p_slot)
goto out;
}
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != p_slot->device)
- continue;
- if (pci_is_bridge(dev))
+ for_each_pci_bridge(dev, parent) {
+ if (PCI_SLOT(dev->devfn) == p_slot->device)
pci_hp_add_bridge(dev);
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ac41c8be9200..6bacb8995e96 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -113,7 +113,7 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
}
-int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
+int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
int i;
int rc = -ENOMEM;
@@ -134,7 +134,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
+ virtfn->device = iov->vf_device;
rc = pci_setup_device(virtfn);
if (rc)
goto failed0;
@@ -157,12 +157,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
BUG_ON(rc);
}
- if (reset)
- __pci_reset_function(virtfn);
-
pci_device_add(virtfn, virtfn->bus);
- pci_bus_add_device(virtfn);
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
@@ -173,6 +169,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+ pci_bus_add_device(virtfn);
+
return 0;
failed2:
@@ -187,7 +185,7 @@ failed:
return rc;
}
-void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
{
char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
@@ -198,11 +196,6 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
if (!virtfn)
return;
- if (reset) {
- device_release_driver(&virtfn->dev);
- __pci_reset_function(virtfn);
- }
-
sprintf(buf, "virtfn%u", id);
sysfs_remove_link(&dev->dev.kobj, buf);
/*
@@ -317,7 +310,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
pci_cfg_access_unlock(dev);
for (i = 0; i < initial; i++) {
- rc = pci_iov_add_virtfn(dev, i, 0);
+ rc = pci_iov_add_virtfn(dev, i);
if (rc)
goto failed;
}
@@ -329,7 +322,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
failed:
while (i--)
- pci_iov_remove_virtfn(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i);
err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
@@ -356,7 +349,7 @@ static void sriov_disable(struct pci_dev *dev)
return;
for (i = 0; i < iov->num_VFs; i++)
- pci_iov_remove_virtfn(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
@@ -449,6 +442,7 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
iov->pgsz = pgsz;
iov->self = dev;
iov->drivers_autoprobe = true;
@@ -504,6 +498,14 @@ static void sriov_restore_state(struct pci_dev *dev)
if (ctrl & PCI_SRIOV_CTRL_VFE)
return;
+ /*
+ * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
+ * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
+ */
+ ctrl &= ~PCI_SRIOV_CTRL_ARI;
+ ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
+
for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
pci_update_resource(dev, i);
@@ -724,7 +726,7 @@ int pci_vfs_assigned(struct pci_dev *dev)
* determine the device ID for the VFs, the vendor ID will be the
* same as the PF so there is no need to check for that one
*/
- pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
+ dev_id = dev->sriov->vf_device;
/* loop through all the VFs to see if we own any that are assigned */
vfdev = pci_get_device(dev->vendor, dev_id, NULL);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 496ed9130600..e06607167858 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1441,6 +1441,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
pci_msi_domain_update_chip_ops(info);
info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
+ info->flags |= MSI_FLAG_MUST_REACTIVATE;
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a8da543b3814..4708eb9df71b 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
union acpi_object *obj;
struct pci_host_bridge *bridge;
- if (acpi_pci_disabled || !bus->bridge)
+ if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
return;
acpi_pci_slot_enumerate(bus);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 11bd267fc137..7f47bb72bf30 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -680,17 +680,13 @@ static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
- /*
- * Devices having power.ignore_children set may still be necessary for
- * suspending their children in the next phase of device suspend.
- */
- if (dev->power.ignore_children)
- pm_runtime_resume(dev);
-
if (drv && drv->pm && drv->pm->prepare) {
int error = drv->pm->prepare(dev);
- if (error)
+ if (error < 0)
return error;
+
+ if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
+ return 0;
}
return pci_dev_keep_suspended(to_pci_dev(dev));
}
@@ -731,18 +727,25 @@ static int pci_pm_suspend(struct device *dev)
if (!pm) {
pci_pm_default_suspend(pci_dev);
- goto Fixup;
+ return 0;
}
/*
- * PCI devices suspended at run time need to be resumed at this point,
- * because in general it is necessary to reconfigure them for system
- * suspend. Namely, if the device is supposed to wake up the system
- * from the sleep state, we may need to reconfigure it for this purpose.
- * In turn, if the device is not supposed to wake up the system from the
- * sleep state, we'll have to prevent it from signaling wake-up.
+ * PCI devices suspended at run time may need to be resumed at this
+ * point, because in general it may be necessary to reconfigure them for
+ * system suspend. Namely, if the device is expected to wake up the
+ * system from the sleep state, it may have to be reconfigured for this
+ * purpose, or if the device is not expected to wake up the system from
+ * the sleep state, it should be prevented from signaling wakeup events
+ * going forward.
+ *
+ * Also if the driver of the device does not indicate that its system
+ * suspend callbacks can cope with runtime-suspended devices, it is
+ * better to resume the device from runtime suspend here.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ !pci_dev_keep_suspended(pci_dev))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->suspend) {
@@ -762,17 +765,27 @@ static int pci_pm_suspend(struct device *dev)
}
}
- Fixup:
- pci_fixup_device(pci_fixup_suspend, pci_dev);
-
return 0;
}
+static int pci_pm_suspend_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
+
+ return pm_generic_suspend_late(dev);
+}
+
static int pci_pm_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -805,6 +818,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_prepare_to_sleep(pci_dev);
}
+ dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
+ pci_power_name(pci_dev->current_state));
+
pci_pm_set_unknown_state(pci_dev);
/*
@@ -831,6 +847,14 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ /*
+ * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
+ * during system suspend, so update their runtime PM status to "active"
+ * as they are going to be put into D0 shortly.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
@@ -873,6 +897,7 @@ static int pci_pm_resume(struct device *dev)
#else /* !CONFIG_SUSPEND */
#define pci_pm_suspend NULL
+#define pci_pm_suspend_late NULL
#define pci_pm_suspend_noirq NULL
#define pci_pm_resume NULL
#define pci_pm_resume_noirq NULL
@@ -907,7 +932,8 @@ static int pci_pm_freeze(struct device *dev)
* devices should not be touched during freeze/thaw transitions,
* however.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->freeze) {
@@ -919,17 +945,25 @@ static int pci_pm_freeze(struct device *dev)
return error;
}
- if (pcibios_pm_ops.freeze)
- return pcibios_pm_ops.freeze(dev);
-
return 0;
}
+static int pci_pm_freeze_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_freeze_late(dev);;
+}
+
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
@@ -959,6 +993,16 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ /*
+ * If the device is in runtime suspend, the code below may not work
+ * correctly with it, so skip that code and make the PM core skip all of
+ * the subsequent "thaw" callbacks for the device.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.direct_complete = true;
+ return 0;
+ }
+
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
if (error)
@@ -983,12 +1027,6 @@ static int pci_pm_thaw(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
- if (pcibios_pm_ops.thaw) {
- error = pcibios_pm_ops.thaw(dev);
- if (error)
- return error;
- }
-
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -1014,11 +1052,13 @@ static int pci_pm_poweroff(struct device *dev)
if (!pm) {
pci_pm_default_suspend(pci_dev);
- goto Fixup;
+ return 0;
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ !pci_dev_keep_suspended(pci_dev))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->poweroff) {
@@ -1030,13 +1070,17 @@ static int pci_pm_poweroff(struct device *dev)
return error;
}
- Fixup:
- pci_fixup_device(pci_fixup_suspend, pci_dev);
+ return 0;
+}
- if (pcibios_pm_ops.poweroff)
- return pcibios_pm_ops.poweroff(dev);
+static int pci_pm_poweroff_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
- return 0;
+ pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
+
+ return pm_generic_poweroff_late(dev);
}
static int pci_pm_poweroff_noirq(struct device *dev)
@@ -1044,6 +1088,9 @@ static int pci_pm_poweroff_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(to_pci_dev(dev)))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
@@ -1085,6 +1132,10 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ /* This is analogous to the pci_pm_resume_noirq() case. */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
if (error)
@@ -1108,12 +1159,6 @@ static int pci_pm_restore(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
- if (pcibios_pm_ops.restore) {
- error = pcibios_pm_ops.restore(dev);
- if (error)
- return error;
- }
-
/*
* This is necessary for the hibernation error path in which restore is
* called without restoring the standard config registers of the device.
@@ -1139,10 +1184,12 @@ static int pci_pm_restore(struct device *dev)
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
+#define pci_pm_freeze_late NULL
#define pci_pm_freeze_noirq NULL
#define pci_pm_thaw NULL
#define pci_pm_thaw_noirq NULL
#define pci_pm_poweroff NULL
+#define pci_pm_poweroff_late NULL
#define pci_pm_poweroff_noirq NULL
#define pci_pm_restore NULL
#define pci_pm_restore_noirq NULL
@@ -1258,10 +1305,13 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
.complete = pci_pm_complete,
.suspend = pci_pm_suspend,
+ .suspend_late = pci_pm_suspend_late,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
+ .freeze_late = pci_pm_freeze_late,
.thaw = pci_pm_thaw,
.poweroff = pci_pm_poweroff,
+ .poweroff_late = pci_pm_poweroff_late,
.restore = pci_pm_restore,
.suspend_noirq = pci_pm_suspend_noirq,
.resume_noirq = pci_pm_resume_noirq,
@@ -1466,6 +1516,7 @@ struct bus_type pci_bus_type = {
.drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
.num_vf = pci_bus_num_vf,
+ .force_dma = true,
};
EXPORT_SYMBOL(pci_bus_type);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 00fa4278c1f4..06c7f0b85cd2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -649,6 +649,33 @@ exit:
return count;
}
+static ssize_t sriov_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->offset);
+}
+
+static ssize_t sriov_stride_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->stride);
+}
+
+static ssize_t sriov_vf_device_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%x\n", pdev->sriov->vf_device);
+}
+
static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -677,6 +704,9 @@ static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
static struct device_attribute sriov_numvfs_attr =
__ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_numvfs_show, sriov_numvfs_store);
+static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset);
+static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride);
+static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device);
static struct device_attribute sriov_drivers_autoprobe_attr =
__ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store);
@@ -1749,6 +1779,9 @@ static const struct attribute_group pci_dev_hp_attr_group = {
static struct attribute *sriov_dev_attrs[] = {
&sriov_totalvfs_attr.attr,
&sriov_numvfs_attr.attr,
+ &sriov_offset_attr.attr,
+ &sriov_stride_attr.attr,
+ &sriov_vf_device_attr.attr,
&sriov_drivers_autoprobe_attr.attr,
NULL,
};
@@ -1796,6 +1829,6 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
NULL,
};
-struct device_type pci_dev_type = {
+const struct device_type pci_dev_type = {
.groups = pci_dev_attr_groups,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6078dfc11b11..4a7c6864fdf4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2166,8 +2166,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
if (!pm_runtime_suspended(dev)
|| pci_target_state(pci_dev, wakeup) != pci_dev->current_state
- || platform_pci_need_resume(pci_dev)
- || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
+ || platform_pci_need_resume(pci_dev))
return false;
/*
@@ -2966,6 +2965,107 @@ bool pci_acs_path_enabled(struct pci_dev *start,
}
/**
+ * pci_rebar_find_pos - find position of resize ctrl reg for BAR
+ * @pdev: PCI device
+ * @bar: BAR to find
+ *
+ * Helper to find the position of the ctrl register for a BAR.
+ * Returns -ENOTSUPP if resizable BARs are not supported at all.
+ * Returns -ENOENT if no ctrl register for the BAR could be found.
+ */
+static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ if (!pos)
+ return -ENOTSUPP;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ int bar_idx;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
+ if (bar_idx == bar)
+ return pos;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * pci_rebar_get_possible_sizes - get possible sizes for BAR
+ * @pdev: PCI device
+ * @bar: BAR to query
+ *
+ * Get the possible sizes of a resizable BAR as bitmask defined in the spec
+ * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
+ */
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 cap;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return 0;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+ return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+}
+
+/**
+ * pci_rebar_get_current_size - get the current size of a BAR
+ * @pdev: PCI device
+ * @bar: BAR to set size to
+ *
+ * Read the size of a BAR from the resizable BAR config.
+ * Returns size if found or negative error code.
+ */
+int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
+}
+
+/**
+ * pci_rebar_set_size - set a new size for a BAR
+ * @pdev: PCI device
+ * @bar: BAR to set size to
+ * @size: new size as defined in the spec (0=1MB, 19=512GB)
+ *
+ * Set the new size of a BAR as defined in the spec.
+ * Returns zero if resizing was successful, error code otherwise.
+ */
+int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= size << 8;
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
+ return 0;
+}
+
+/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
@@ -3471,7 +3571,7 @@ EXPORT_SYMBOL(devm_pci_remap_cfgspace);
* All operations are managed and will be undone on driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example:
+ * on failure. Usage example::
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_pci_remap_cfg_resource(&pdev->dev, res);
@@ -4146,35 +4246,6 @@ static void pci_dev_restore(struct pci_dev *dev)
}
/**
- * __pci_reset_function - reset a PCI device function
- * @dev: PCI device to reset
- *
- * Some devices allow an individual function to be reset without affecting
- * other functions in the same device. The PCI device must be responsive
- * to PCI config space in order to use this function.
- *
- * The device function is presumed to be unused when this function is called.
- * Resetting the device will make the contents of PCI configuration space
- * random, so any caller of this must be prepared to reinitialise the
- * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
- * etc.
- *
- * Returns 0 if the device function was successfully reset or negative if the
- * device doesn't support resetting a single function.
- */
-int __pci_reset_function(struct pci_dev *dev)
-{
- int ret;
-
- pci_dev_lock(dev);
- ret = __pci_reset_function_locked(dev);
- pci_dev_unlock(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__pci_reset_function);
-
-/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
* @dev: PCI device to reset
@@ -4199,6 +4270,14 @@ int __pci_reset_function_locked(struct pci_dev *dev)
might_sleep();
+ /*
+ * A reset method returns -ENOTTY if it doesn't support this device
+ * and we should try the next method.
+ *
+ * If it returns 0 (success), we're finished. If it returns any
+ * other error, we're also finished: this indicates that further
+ * reset mechanisms might be broken on the device.
+ */
rc = pci_dev_specific_reset(dev, 0);
if (rc != -ENOTTY)
return rc;
@@ -4264,8 +4343,8 @@ int pci_probe_reset_function(struct pci_dev *dev)
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from __pci_reset_function in that it saves and restores device state
- * over the reset.
+ * from __pci_reset_function_locked() in that it saves and restores device state
+ * over the reset and takes the PCI device lock.
*
* Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
@@ -4300,7 +4379,7 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from __pci_reset_function() in that it saves and restores device state
+ * from __pci_reset_function_locked() in that it saves and restores device state
* over the reset. It also differs from pci_reset_function() in that it
* requires the PCI device lock to be held.
*
@@ -4356,6 +4435,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
{
struct pci_dev *dev;
+
+ if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resetable(dev->subordinate)))
@@ -4420,6 +4503,10 @@ static bool pci_slot_resetable(struct pci_slot *slot)
{
struct pci_dev *dev;
+ if (slot->bus->self &&
+ (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fdb02c1f94bb..f6b58b32a67c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -193,7 +193,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
}
extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pcibus_groups[];
-extern struct device_type pci_dev_type;
+extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
@@ -264,6 +264,7 @@ struct pci_sriov {
u16 num_VFs; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
+ u16 vf_device; /* VF device ID */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
u8 max_VF_buses; /* max buses consumed by VFs */
@@ -367,4 +368,12 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
#endif
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
+int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
+int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
+static inline u64 pci_rebar_size_to_bytes(int size)
+{
+ return 1ULL << (size + 20);
+}
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 890efcc574cb..744805232155 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
*/
- pci_walk_bus(dev->bus, cb, &result_data);
+ if (state == pci_channel_io_normal)
+ /*
+ * the error is non fatal so the bus is ok, just invoke
+ * the callback for the function that logged the error.
+ */
+ cb(dev, &result_data);
+ else
+ pci_walk_bus(dev->bus, cb, &result_data);
}
return result_data.result;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 83e4a892b14b..9783e10da3a9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -451,24 +451,25 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
- /* Choose the greater of the two T_cmn_mode_rstr_time */
- val1 = (upreg->l1ss_cap >> 8) & 0xFF;
- val2 = (upreg->l1ss_cap >> 8) & 0xFF;
+ /* Choose the greater of the two Port Common_Mode_Restore_Times */
+ val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
if (val1 > val2)
link->l1ss.ctl1 |= val1 << 8;
else
link->l1ss.ctl1 |= val2 << 8;
+
/*
* We currently use LTR L1.2 threshold to be fixed constant picked from
* Intel's coreboot.
*/
link->l1ss.ctl1 |= LTR_L1_2_THRESHOLD_BITS;
- /* Choose the greater of the two T_pwr_on */
- val1 = (upreg->l1ss_cap >> 19) & 0x1F;
- scale1 = (upreg->l1ss_cap >> 16) & 0x03;
- val2 = (dwreg->l1ss_cap >> 19) & 0x1F;
- scale2 = (dwreg->l1ss_cap >> 16) & 0x03;
+ /* Choose the greater of the two Port T_POWER_ON times */
+ val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+ val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
if (calc_l1ss_pwron(link->pdev, scale1, val1) >
calc_l1ss_pwron(link->downstream, scale2, val2))
@@ -647,21 +648,26 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
if (enable_req & ASPM_STATE_L1_2_MASK) {
- /* Program T_pwr_on in both ports */
+ /* Program T_POWER_ON times in both ports */
pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
link->l1ss.ctl2);
pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
link->l1ss.ctl2);
- /* Program T_cmn_mode in parent */
+ /* Program Common_Mode_Restore_Time in upstream device */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- 0xFF00, link->l1ss.ctl1);
-
- /* Program LTR L1.2 threshold in both ports */
- pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1,
- 0xE3FF0000, link->l1ss.ctl1);
+ PCI_L1SS_CTL1_CM_RESTORE_TIME,
+ link->l1ss.ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ link->l1ss.ctl1);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- 0xE3FF0000, link->l1ss.ctl1);
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ link->l1ss.ctl1);
}
val = 0;
@@ -803,10 +809,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
- * hierarchies.
+ * hierarchies. Note that some PCIe host implementations omit
+ * the root ports entirely, in which case a downstream port on
+ * a switch may become the root of the link state chain for all
+ * its subordinate endpoints.
*/
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
+ !pdev->bus->parent->self) {
link->root = link;
} else {
struct pcie_link_state *parent;
@@ -1061,7 +1071,8 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
}
EXPORT_SYMBOL(pci_disable_link_state);
-static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
+static int pcie_aspm_set_policy(const char *val,
+ const struct kernel_param *kp)
{
int i;
struct pcie_link_state *link;
@@ -1088,7 +1099,7 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
return 0;
}
-static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
+static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
{
int i, cnt = 0;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index fafdb165dd2e..df290aa58dce 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -226,6 +226,9 @@ static void pcie_pme_work_fn(struct work_struct *work)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+ if (rtsta == (u32) ~0)
+ break;
+
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
@@ -273,7 +276,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
spin_lock_irqsave(&data->lock, flags);
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
- if (!(rtsta & PCI_EXP_RTSTA_PME)) {
+ if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index ebc9d45bd731..a59210350c44 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -44,134 +44,113 @@ static void release_pcie_device(struct device *dev)
kfree(to_pcie_device(dev));
}
-/**
- * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
- * for given port
- * @dev: PCI Express port to handle
- * @irqs: Array of interrupt vectors to populate
- * @mask: Bitmask of port capabilities returned by get_port_device_capability()
- *
- * Return value: 0 on success, error code on failure
+/*
+ * Fill in *pme, *aer, *dpc with the relevant Interrupt Message Numbers if
+ * services are enabled in "mask". Return the number of MSI/MSI-X vectors
+ * required to accommodate the largest Message Number.
*/
-static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
+static int pcie_message_numbers(struct pci_dev *dev, int mask,
+ u32 *pme, u32 *aer, u32 *dpc)
{
- int nr_entries, entry, nvec = 0;
+ u32 nvec = 0, pos, reg32;
+ u16 reg16;
/*
- * Allocate as many entries as the port wants, so that we can check
- * which of them will be useful. Moreover, if nr_entries is correctly
- * equal to the number of entries this port actually uses, we'll happily
- * go through without any tricks.
+ * The Interrupt Message Number indicates which vector is used, i.e.,
+ * the MSI-X table entry or the MSI offset between the base Message
+ * Data and the generated interrupt message. See PCIe r3.1, sec
+ * 7.8.2, 7.10.10, 7.31.2.
*/
- nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
- PCI_IRQ_MSIX | PCI_IRQ_MSI);
- if (nr_entries < 0)
- return nr_entries;
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
- u16 reg16;
-
- /*
- * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event
- * interrupts (when both are implemented) always share the
- * same MSI or MSI-X vector, as indicated by the Interrupt
- * Message Number field in the PCI Express Capabilities
- * register".
- *
- * Per sec 7.8.2, "For MSI, the [Interrupt Message Number]
- * indicates the offset between the base Message Data and
- * the interrupt message that is generated."
- *
- * "For MSI-X, the [Interrupt Message Number] indicates
- * which MSI-X Table entry is used to generate the
- * interrupt message."
- */
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
- entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
- if (entry >= nr_entries)
- goto out_free_irqs;
-
- irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
- irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
-
- nvec = max(nvec, entry + 1);
+ *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
+ nvec = *pme + 1;
}
if (mask & PCIE_PORT_SERVICE_AER) {
- u32 reg32, pos;
-
- /*
- * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt
- * Message Number in the Root Error Status register
- * indicates which MSI/MSI-X vector is used for AER.
- *
- * "For MSI, the [Advanced Error Interrupt Message Number]
- * indicates the offset between the base Message Data and
- * the interrupt message that is generated."
- *
- * "For MSI-X, the [Advanced Error Interrupt Message
- * Number] indicates which MSI-X Table entry is used to
- * generate the interrupt message."
- */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- entry = reg32 >> 27;
- if (entry >= nr_entries)
- goto out_free_irqs;
-
- irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
-
- nvec = max(nvec, entry + 1);
+ if (pos) {
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
+ &reg32);
+ *aer = (reg32 & PCI_ERR_ROOT_AER_IRQ) >> 27;
+ nvec = max(nvec, *aer + 1);
+ }
}
if (mask & PCIE_PORT_SERVICE_DPC) {
- u16 reg16, pos;
-
- /*
- * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt
- * Message Number in the DPC Capability register indicates
- * which MSI/MSI-X vector is used for DPC.
- *
- * "For MSI, the [DPC Interrupt Message Number] indicates
- * the offset between the base Message Data and the
- * interrupt message that is generated."
- *
- * "For MSI-X, the [DPC Interrupt Message Number] indicates
- * which MSI-X Table entry is used to generate the
- * interrupt message."
- */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
- pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, &reg16);
- entry = reg16 & 0x1f;
- if (entry >= nr_entries)
- goto out_free_irqs;
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP,
+ &reg16);
+ *dpc = reg16 & PCI_EXP_DPC_IRQ;
+ nvec = max(nvec, *dpc + 1);
+ }
+ }
+
+ return nvec;
+}
- irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry);
+/**
+ * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
+ * for given port
+ * @dev: PCI Express port to handle
+ * @irqs: Array of interrupt vectors to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: 0 on success, error code on failure
+ */
+static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
+{
+ int nr_entries, nvec;
+ u32 pme = 0, aer = 0, dpc = 0;
- nvec = max(nvec, entry + 1);
+ /* Allocate the maximum possible number of MSI/MSI-X vectors */
+ nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nr_entries < 0)
+ return nr_entries;
+
+ /* See how many and which Interrupt Message Numbers we actually use */
+ nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
+ if (nvec > nr_entries) {
+ pci_free_irq_vectors(dev);
+ return -EIO;
}
/*
- * If nvec is equal to the allocated number of entries, we can just use
- * what we have. Otherwise, the port has some extra entries not for the
- * services we know and we need to work around that.
+ * If we allocated more than we need, free them and reallocate fewer.
+ *
+ * Reallocating may change the specific vectors we get, so
+ * pci_irq_vector() must be done *after* the reallocation.
+ *
+ * If we're using MSI, hardware is *allowed* to change the Interrupt
+ * Message Numbers when we free and reallocate the vectors, but we
+ * assume it won't because we allocate enough vectors for the
+ * biggest Message Number we found.
*/
if (nvec != nr_entries) {
- /* Drop the temporary MSI-X setup */
pci_free_irq_vectors(dev);
- /* Now allocate the MSI-X vectors for real */
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
}
- return 0;
+ /* PME and hotplug share an MSI/MSI-X vector */
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, pme);
+ irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, pme);
+ }
-out_free_irqs:
- pci_free_irq_vectors(dev);
- return -EIO;
+ if (mask & PCIE_PORT_SERVICE_AER)
+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, aer);
+
+ if (mask & PCIE_PORT_SERVICE_DPC)
+ irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, dpc);
+
+ return 0;
}
/**
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 68c389c7b975..ffbf4e723527 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -247,6 +247,7 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
+ .shutdown = pcie_portdrv_remove,
.err_handler = &pcie_portdrv_err_handler,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ff94b69738a8..14e0ea1ff38b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -959,7 +959,21 @@ static void pci_enable_crs(struct pci_dev *pdev)
PCI_EXP_RTCTL_CRSSVE);
}
+static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ unsigned int available_buses);
+
/*
+ * pci_scan_bridge_extend() - Scan buses behind a bridge
+ * @bus: Parent bus the bridge is on
+ * @dev: Bridge itself
+ * @max: Starting subordinate number of buses behind this bridge
+ * @available_buses: Total number of buses available for this bridge and
+ * the devices below. After the minimal bus space has
+ * been allocated the remaining buses will be
+ * distributed equally between hotplug-capable bridges.
+ * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
+ * that need to be reconfigured.
+ *
* If it's a bridge, configure it and scan the bus behind it.
* For CardBus bridges, we don't scan behind as the devices will
* be handled by the bridge driver itself.
@@ -969,7 +983,9 @@ static void pci_enable_crs(struct pci_dev *pdev)
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
*/
-int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
+ int max, unsigned int available_buses,
+ int pass)
{
struct pci_bus *child;
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
@@ -1076,9 +1092,13 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
child = pci_add_new_bus(bus, dev, max+1);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max+1, 0xff);
+ pci_bus_insert_busn_res(child, max+1,
+ bus->busn_res.end);
}
max++;
+ if (available_buses)
+ available_buses--;
+
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->busn_res.start) << 8)
@@ -1100,7 +1120,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
if (!is_cardbus) {
child->bridge_ctl = bctl;
- max = pci_scan_child_bus(child);
+ max = pci_scan_child_bus_extend(child, available_buses);
} else {
/*
* For CardBus bridges, we leave 4 bus numbers
@@ -1168,6 +1188,28 @@ out:
return max;
}
+
+/*
+ * pci_scan_bridge() - Scan buses behind a bridge
+ * @bus: Parent bus the bridge is on
+ * @dev: Bridge itself
+ * @max: Starting subordinate number of buses behind this bridge
+ * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
+ * that need to be reconfigured.
+ *
+ * If it's a bridge, configure it and scan the bus behind it.
+ * For CardBus bridges, we don't scan behind as the devices will
+ * be handled by the bridge driver itself.
+ *
+ * We need to process bridges in two passes -- first we scan those
+ * already configured by the BIOS and after we are done with all of
+ * them, we proceed to assigning numbers to the remaining buses in
+ * order to avoid overlaps between old and new bus numbers.
+ */
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+{
+ return pci_scan_bridge_extend(bus, dev, max, 0, pass);
+}
EXPORT_SYMBOL(pci_scan_bridge);
/*
@@ -2396,9 +2438,24 @@ void __weak pcibios_fixup_bus(struct pci_bus *bus)
/* nothing to do, expected to be removed in the future */
}
-unsigned int pci_scan_child_bus(struct pci_bus *bus)
+/**
+ * pci_scan_child_bus_extend() - Scan devices below a bus
+ * @bus: Bus to scan for devices
+ * @available_buses: Total number of buses available (%0 does not try to
+ * extend beyond the minimal)
+ *
+ * Scans devices below @bus including subordinate buses. Returns new
+ * subordinate number including all the found devices. Passing
+ * @available_buses causes the remaining bus space to be distributed
+ * equally between hotplug-capable bridges to allow future extension of the
+ * hierarchy.
+ */
+static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ unsigned int available_buses)
{
- unsigned int devfn, pass, max = bus->busn_res.start;
+ unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
+ unsigned int start = bus->busn_res.start;
+ unsigned int devfn, cmax, max = start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
@@ -2408,7 +2465,8 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
pci_scan_slot(bus, devfn);
/* Reserve buses for SR-IOV capability. */
- max += pci_iov_bus_range(bus);
+ used_buses = pci_iov_bus_range(bus);
+ max += used_buses;
/*
* After performing arch-dependent fixup of the bus, look behind
@@ -2420,19 +2478,73 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
bus->is_added = 1;
}
- for (pass = 0; pass < 2; pass++)
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (pci_is_bridge(dev))
- max = pci_scan_bridge(bus, dev, max, pass);
+ /*
+ * Calculate how many hotplug bridges and normal bridges there
+ * are on this bus. We will distribute the additional available
+ * buses between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ if (dev->is_hotplug_bridge)
+ hotplug_bridges++;
+ else
+ normal_bridges++;
+ }
+
+ /*
+ * Scan bridges that are already configured. We don't touch them
+ * unless they are misconfigured (which will be done in the second
+ * scan below).
+ */
+ for_each_pci_bridge(dev, bus) {
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
+ used_buses += cmax - max;
+ }
+
+ /* Scan bridges that need to be reconfigured */
+ for_each_pci_bridge(dev, bus) {
+ unsigned int buses = 0;
+
+ if (!hotplug_bridges && normal_bridges == 1) {
+ /*
+ * There is only one bridge on the bus (upstream
+ * port) so it gets all available buses which it
+ * can then distribute to the possible hotplug
+ * bridges below.
+ */
+ buses = available_buses;
+ } else if (dev->is_hotplug_bridge) {
+ /*
+ * Distribute the extra buses between hotplug
+ * bridges if any.
+ */
+ buses = available_buses / hotplug_bridges;
+ buses = min(buses, available_buses - used_buses);
}
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
+ used_buses += max - cmax;
+ }
+
/*
* Make sure a hotplug bridge has at least the minimum requested
- * number of buses.
+ * number of buses but allow it to grow up to the maximum available
+ * bus number of there is room.
*/
- if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
- if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
- max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+ if (bus->self && bus->self->is_hotplug_bridge) {
+ used_buses = max_t(unsigned int, available_buses,
+ pci_hotplug_bus_size - 1);
+ if (max - start < used_buses) {
+ max = start + used_buses;
+
+ /* Do not allocate more buses than we have room left */
+ if (max > bus->busn_res.end)
+ max = bus->busn_res.end;
+
+ dev_dbg(&bus->dev, "%pR extended by %#02x\n",
+ &bus->busn_res, max - start);
+ }
}
/*
@@ -2445,6 +2557,18 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
return max;
}
+
+/**
+ * pci_scan_child_bus() - Scan devices below a bus
+ * @bus: Bus to scan for devices
+ *
+ * Scans devices below @bus including subordinate buses. Returns new
+ * subordinate number including all the found devices.
+ */
+unsigned int pci_scan_child_bus(struct pci_bus *bus)
+{
+ return pci_scan_child_bus_extend(bus, 0);
+}
EXPORT_SYMBOL_GPL(pci_scan_child_bus);
/**
@@ -2737,3 +2861,38 @@ void __init pci_sort_breadthfirst(void)
{
bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
}
+
+int pci_hp_add_bridge(struct pci_dev *dev)
+{
+ struct pci_bus *parent = dev->bus;
+ int busnr, start = parent->busn_res.start;
+ unsigned int available_buses = 0;
+ int end = parent->busn_res.end;
+
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent), busnr))
+ break;
+ }
+ if (busnr-- > end) {
+ dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
+ return -1;
+ }
+
+ /* Scan bridges that are already configured */
+ busnr = pci_scan_bridge(parent, dev, busnr, 0);
+
+ /*
+ * Distribute the available bus numbers between hotplug-capable
+ * bridges to make extending the chain later possible.
+ */
+ available_buses = end - busnr;
+
+ /* Scan bridges that need to be reconfigured */
+ pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
+
+ if (!dev->subordinate)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 911b3b65c8b2..10684b17d0bd 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3366,6 +3366,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+/*
+ * Root port on some Cavium CN8xxx chips do not successfully complete a bus
+ * reset when used with certain child devices. After the reset, config
+ * accesses to the child may fail.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
@@ -4212,17 +4219,32 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
#endif
}
+static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
+{
+ /*
+ * Effectively selects all downstream ports for whole ThunderX 1
+ * family by 0xf800 mask (which represents 8 SoCs), while the lower
+ * bits of device ID are used to indicate which subdevice is used
+ * within the SoC.
+ */
+ return (pci_is_pcie(dev) &&
+ (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
+ ((dev->device & 0xf800) == 0xa000));
+}
+
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
- * Cavium devices matching this quirk do not perform peer-to-peer
- * with other functions, allowing masking out these bits as if they
- * were unimplemented in the ACS capability.
+ * Cavium root ports don't advertise an ACS capability. However,
+ * the RTL internally implements similar protection as if ACS had
+ * Request Redirection, Completion Redirection, Source Validation,
+ * and Upstream Forwarding features enabled. Assert that the
+ * hardware implements and enables equivalent ACS functionality for
+ * these flags.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
- if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
+ if (!pci_quirk_cavium_acs_match(dev))
return -ENOTTY;
return acs_flags ? 0 : 1;
@@ -4800,3 +4822,11 @@ static void quirk_no_ats(struct pci_dev *pdev)
/* AMD Stoney platform GPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
#endif /* CONFIG_PCI_ATS */
+
+/* Freescale PCIe doesn't support MSI in RC mode */
+static void quirk_fsl_no_msi(struct pci_dev *pdev)
+{
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 73a03d382590..2fa0dbde36b7 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (dev->is_added) {
+ device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_release_driver(&dev->dev);
dev->is_added = 0;
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index b6edb187d160..1f5e6af96c83 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -147,12 +147,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
return NULL;
rom = ioremap(start, *size);
- if (!rom) {
- /* restore enable if ioremap fails */
- if (!(res->flags & IORESOURCE_ROM_ENABLE))
- pci_disable_rom(pdev);
- return NULL;
- }
+ if (!rom)
+ goto err_ioremap;
/*
* Try to find the true size of the ROM since sometimes the PCI window
@@ -160,7 +156,18 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(pdev, rom, *size);
+ if (!*size)
+ goto invalid_rom;
+
return rom;
+
+invalid_rom:
+ iounmap(rom);
+err_ioremap:
+ /* restore enable if ioremap fails */
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ pci_disable_rom(pdev);
+ return NULL;
}
EXPORT_SYMBOL(pci_map_rom);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 958da7db9033..b1ad466199ad 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1518,13 +1518,16 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
break;
}
}
+
+#define PCI_RES_TYPE_MASK \
+ (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
+ IORESOURCE_MEM_64)
+
static void pci_bridge_release_resources(struct pci_bus *bus,
unsigned long type)
{
struct pci_dev *dev = bus->self;
struct resource *r;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
unsigned old_flags = 0;
struct resource *b_res;
int idx = 1;
@@ -1567,7 +1570,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
*/
release_child_resources(r);
if (!release_resource(r)) {
- type = old_flags = r->flags & type_mask;
+ type = old_flags = r->flags & PCI_RES_TYPE_MASK;
dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n",
PCI_BRIDGE_RESOURCES + idx, r);
/* keep the old size */
@@ -1758,8 +1761,6 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
enum release_type rel_type = leaf_only;
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -1818,7 +1819,7 @@ again:
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & type_mask,
+ fail_res->flags & PCI_RES_TYPE_MASK,
rel_type);
/* restore size and flags */
@@ -1853,6 +1854,175 @@ void __init pci_assign_unassigned_resources(void)
}
}
+static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
+ struct list_head *add_list, resource_size_t available)
+{
+ struct pci_dev_resource *dev_res;
+
+ if (res->parent)
+ return;
+
+ if (resource_size(res) >= available)
+ return;
+
+ dev_res = res_to_dev_res(add_list, res);
+ if (!dev_res)
+ return;
+
+ /* Is there room to extend the window? */
+ if (available - resource_size(res) <= dev_res->add_size)
+ return;
+
+ dev_res->add_size = available - resource_size(res);
+ dev_dbg(&bridge->dev, "bridge window %pR extended by %pa\n", res,
+ &dev_res->add_size);
+}
+
+static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ struct list_head *add_list, resource_size_t available_io,
+ resource_size_t available_mmio, resource_size_t available_mmio_pref)
+{
+ resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
+ unsigned int normal_bridges = 0, hotplug_bridges = 0;
+ struct resource *io_res, *mmio_res, *mmio_pref_res;
+ struct pci_dev *dev, *bridge = bus->self;
+
+ io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+
+ /*
+ * Update additional resource list (add_list) to fill all the
+ * extra resource space available for this port except the space
+ * calculated in __pci_bus_size_bridges() which covers all the
+ * devices currently connected to the port and below.
+ */
+ extend_bridge_window(bridge, io_res, add_list, available_io);
+ extend_bridge_window(bridge, mmio_res, add_list, available_mmio);
+ extend_bridge_window(bridge, mmio_pref_res, add_list,
+ available_mmio_pref);
+
+ /*
+ * Calculate the total amount of extra resource space we can
+ * pass to bridges below this one. This is basically the
+ * extra space reduced by the minimal required space for the
+ * non-hotplug bridges.
+ */
+ remaining_io = available_io;
+ remaining_mmio = available_mmio;
+ remaining_mmio_pref = available_mmio_pref;
+
+ /*
+ * Calculate how many hotplug bridges and normal bridges there
+ * are on this bus. We will distribute the additional available
+ * resources between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ if (dev->is_hotplug_bridge)
+ hotplug_bridges++;
+ else
+ normal_bridges++;
+ }
+
+ for_each_pci_bridge(dev, bus) {
+ const struct resource *res;
+
+ if (dev->is_hotplug_bridge)
+ continue;
+
+ /*
+ * Reduce the available resource space by what the
+ * bridge and devices below it occupy.
+ */
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
+ if (!res->parent && available_io > resource_size(res))
+ remaining_io -= resource_size(res);
+
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
+ if (!res->parent && available_mmio > resource_size(res))
+ remaining_mmio -= resource_size(res);
+
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
+ if (!res->parent && available_mmio_pref > resource_size(res))
+ remaining_mmio_pref -= resource_size(res);
+ }
+
+ /*
+ * Go over devices on this bus and distribute the remaining
+ * resource space between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ struct pci_bus *b;
+
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ if (!hotplug_bridges && normal_bridges == 1) {
+ /*
+ * There is only one bridge on the bus (upstream
+ * port) so it gets all available resources
+ * which it can then distribute to the possible
+ * hotplug bridges below.
+ */
+ pci_bus_distribute_available_resources(b, add_list,
+ available_io, available_mmio,
+ available_mmio_pref);
+ } else if (dev->is_hotplug_bridge) {
+ resource_size_t align, io, mmio, mmio_pref;
+
+ /*
+ * Distribute available extra resources equally
+ * between hotplug-capable downstream ports
+ * taking alignment into account.
+ *
+ * Here hotplug_bridges is always != 0.
+ */
+ align = pci_resource_alignment(bridge, io_res);
+ io = div64_ul(available_io, hotplug_bridges);
+ io = min(ALIGN(io, align), remaining_io);
+ remaining_io -= io;
+
+ align = pci_resource_alignment(bridge, mmio_res);
+ mmio = div64_ul(available_mmio, hotplug_bridges);
+ mmio = min(ALIGN(mmio, align), remaining_mmio);
+ remaining_mmio -= mmio;
+
+ align = pci_resource_alignment(bridge, mmio_pref_res);
+ mmio_pref = div64_ul(available_mmio_pref,
+ hotplug_bridges);
+ mmio_pref = min(ALIGN(mmio_pref, align),
+ remaining_mmio_pref);
+ remaining_mmio_pref -= mmio_pref;
+
+ pci_bus_distribute_available_resources(b, add_list, io,
+ mmio, mmio_pref);
+ }
+ }
+}
+
+static void
+pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ struct list_head *add_list)
+{
+ resource_size_t available_io, available_mmio, available_mmio_pref;
+ const struct resource *res;
+
+ if (!bridge->is_hotplug_bridge)
+ return;
+
+ /* Take the initial extra resources from the hotplug port */
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ available_io = resource_size(res);
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ available_mmio = resource_size(res);
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ available_mmio_pref = resource_size(res);
+
+ pci_bus_distribute_available_resources(bridge->subordinate,
+ add_list, available_io, available_mmio, available_mmio_pref);
+}
+
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
@@ -1862,11 +2032,17 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
int retval;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
again:
__pci_bus_size_bridges(parent, &add_list);
+
+ /*
+ * Distribute remaining resources (if any) equally between
+ * hotplug bridges below. This makes it possible to extend the
+ * hierarchy later without running out of resources.
+ */
+ pci_bridge_distribute_available_resources(bridge, &add_list);
+
__pci_bridge_assign_resources(bridge, &add_list, &fail_head);
BUG_ON(!list_empty(&add_list));
tried_times++;
@@ -1889,7 +2065,7 @@ again:
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & type_mask,
+ fail_res->flags & PCI_RES_TYPE_MASK,
whole_subtree);
/* restore size and flags */
@@ -1914,6 +2090,104 @@ enable_all:
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+{
+ struct pci_dev_resource *dev_res;
+ struct pci_dev *next;
+ LIST_HEAD(saved);
+ LIST_HEAD(added);
+ LIST_HEAD(failed);
+ unsigned int i;
+ int ret;
+
+ /* Walk to the root hub, releasing bridge BARs when possible */
+ next = bridge;
+ do {
+ bridge = next;
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
+ i++) {
+ struct resource *res = &bridge->resource[i];
+
+ if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
+ continue;
+
+ /* Ignore BARs which are still in use */
+ if (res->child)
+ continue;
+
+ ret = add_to_list(&saved, bridge, res, 0, 0);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&bridge->dev, "BAR %d: releasing %pR\n",
+ i, res);
+
+ if (res->parent)
+ release_resource(res);
+ res->start = 0;
+ res->end = 0;
+ break;
+ }
+ if (i == PCI_BRIDGE_RESOURCE_END)
+ break;
+
+ next = bridge->bus ? bridge->bus->self : NULL;
+ } while (next);
+
+ if (list_empty(&saved))
+ return -ENOENT;
+
+ __pci_bus_size_bridges(bridge->subordinate, &added);
+ __pci_bridge_assign_resources(bridge, &added, &failed);
+ BUG_ON(!list_empty(&added));
+
+ if (!list_empty(&failed)) {
+ ret = -ENOSPC;
+ goto cleanup;
+ }
+
+ list_for_each_entry(dev_res, &saved, list) {
+ /* Skip the bridge we just assigned resources for. */
+ if (bridge == dev_res->dev)
+ continue;
+
+ bridge = dev_res->dev;
+ pci_setup_bridge(bridge->subordinate);
+ }
+
+ free_list(&saved);
+ return 0;
+
+cleanup:
+ /* restore size and flags */
+ list_for_each_entry(dev_res, &failed, list) {
+ struct resource *res = dev_res->res;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+ }
+ free_list(&failed);
+
+ /* Revert to the old configuration */
+ list_for_each_entry(dev_res, &saved, list) {
+ struct resource *res = dev_res->res;
+
+ bridge = dev_res->dev;
+ i = res - bridge->resource;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+
+ pci_claim_resource(bridge, i);
+ pci_setup_bridge(bridge->subordinate);
+ }
+ free_list(&saved);
+
+ return ret;
+}
+
void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1921,10 +2195,9 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
want additional resources */
down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_bridge(dev) && pci_has_subordinate(dev))
- __pci_bus_size_bridges(dev->subordinate,
- &add_list);
+ for_each_pci_bridge(dev, bus)
+ if (pci_has_subordinate(dev))
+ __pci_bus_size_bridges(dev->subordinate, &add_list);
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
BUG_ON(!list_empty(&add_list));
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c039149cacb0..e815111f3f81 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -397,6 +397,64 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
return 0;
}
+void pci_release_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+
+ dev_info(&dev->dev, "BAR %d: releasing %pR\n", resno, res);
+ release_resource(res);
+ res->end = resource_size(res) - 1;
+ res->start = 0;
+ res->flags |= IORESOURCE_UNSET;
+}
+EXPORT_SYMBOL(pci_release_resource);
+
+int pci_resize_resource(struct pci_dev *dev, int resno, int size)
+{
+ struct resource *res = dev->resource + resno;
+ int old, ret;
+ u32 sizes;
+ u16 cmd;
+
+ /* Make sure the resource isn't assigned before resizing it. */
+ if (!(res->flags & IORESOURCE_UNSET))
+ return -EBUSY;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_MEMORY)
+ return -EBUSY;
+
+ sizes = pci_rebar_get_possible_sizes(dev, resno);
+ if (!sizes)
+ return -ENOTSUPP;
+
+ if (!(sizes & BIT(size)))
+ return -EINVAL;
+
+ old = pci_rebar_get_current_size(dev, resno);
+ if (old < 0)
+ return old;
+
+ ret = pci_rebar_set_size(dev, resno, size);
+ if (ret)
+ return ret;
+
+ res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
+
+ /* Check if the new config works by trying to assign everything. */
+ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ if (ret)
+ goto error_resize;
+
+ return 0;
+
+error_resize:
+ pci_rebar_set_size(dev, resno, old);
+ res->end = res->start + pci_rebar_size_to_bytes(old) - 1;
+ return ret;
+}
+EXPORT_SYMBOL(pci_resize_resource);
+
int pci_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index af81b2dec42e..da45dbea20ce 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -943,7 +943,7 @@ static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
-const struct event_reg {
+static const struct event_reg {
size_t offset;
u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
size_t offset, int index);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 4fe4cc4ae19a..5c0170597037 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -77,9 +77,8 @@ int __ref cb_alloc(struct pcmcia_socket *s)
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++)
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_bridge(dev))
- max = pci_scan_bridge(bus, dev, max, pass);
+ for_each_pci_bridge(dev, bus)
+ max = pci_scan_bridge(bus, dev, max, pass);
/*
* Size all resources below the CardBus controller.
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 55ef7d1fd8da..102646fedb56 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1599,7 +1599,7 @@ static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
}
-struct bin_attribute pccard_cis_attr = {
+const struct bin_attribute pccard_cis_attr = {
.attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
.size = 0x200,
.read = pccard_show_cis,
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index e86cd6b31773..6765beadea95 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -152,7 +152,7 @@ void pcmcia_cleanup_irq(struct pcmcia_socket *s);
int pcmcia_setup_irq(struct pcmcia_device *p_dev);
/* cistpl.c */
-extern struct bin_attribute pccard_cis_attr;
+extern const struct bin_attribute pccard_cis_attr;
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr,
u_int addr, u_int len, void *ptr);
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 70b089430fcc..9a4940e56e2f 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -380,11 +380,10 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
-static void pcc_interrupt_wrapper(u_long data)
+static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n");
pcc_interrupt(0, NULL);
- init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
@@ -758,9 +757,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
- poll_timer.function = pcc_interrupt_wrapper;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index e50bbf826188..c2239a7e383a 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -386,10 +386,9 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
-static void pcc_interrupt_wrapper(u_long data)
+static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pcc_interrupt(0, NULL);
- init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
@@ -729,9 +728,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
- poll_timer.function = pcc_interrupt_wrapper;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index e5197ffb7422..b8f44b068fc6 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -17,6 +17,13 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config HISI_PMU
+ bool "HiSilicon SoC PMU"
+ depends on ARM64 && ACPI
+ help
+ Support for HiSilicon SoC uncore performance monitoring
+ unit (PMU), such as: L3C, HHA and DDRC.
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI
@@ -43,4 +50,12 @@ config XGENE_PMU
help
Say y if you want to use APM X-Gene SoC performance monitors.
+config ARM_SPE_PMU
+ tristate "Enable support for the ARMv8.2 Statistical Profiling Extension"
+ depends on PERF_EVENTS && ARM64
+ help
+ Enable perf support for the ARMv8.2 Statistical Profiling
+ Extension, which provides periodic sampling of operations in
+ the CPU pipeline and reports this via the perf AUX interface.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 9402dc8ff22c..710a0135bd61 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
+obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
+obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d14fc2e67f93..7bc5eee96b31 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -539,7 +539,7 @@ void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
return;
- if (irq_is_percpu(irq)) {
+ if (irq_is_percpu_devid(irq)) {
free_percpu_irq(irq, &hw_events->percpu_pmu);
cpumask_clear(&armpmu->active_irqs);
return;
@@ -565,10 +565,10 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
if (!irq)
return 0;
- if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
+ if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
- } else if (irq_is_percpu(irq)) {
+ } else if (irq_is_percpu_devid(irq)) {
int other_cpu = cpumask_first(&armpmu->active_irqs);
int other_irq = per_cpu(hw_events->irq, other_cpu);
@@ -649,7 +649,7 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq) {
- if (irq_is_percpu(irq)) {
+ if (irq_is_percpu_devid(irq)) {
enable_percpu_irq(irq, IRQ_TYPE_NONE);
return 0;
}
@@ -667,7 +667,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq && irq_is_percpu(irq))
+ if (irq && irq_is_percpu_devid(irq))
disable_percpu_irq(irq);
return 0;
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 3303dd8d8eb5..705f1a390e31 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -193,9 +193,6 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
int pmu_idx = 0;
int cpu, ret;
- if (acpi_disabled)
- return 0;
-
/*
* Initialise and register the set of PMUs which we know about right
* now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 4428852e1da1..91b224eced18 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -127,7 +127,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu(irq))
+ if (irq && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
@@ -150,7 +150,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (WARN_ON(irq <= 0))
continue;
- if (irq_is_percpu(irq)) {
+ if (irq_is_percpu_devid(irq)) {
pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
return -EINVAL;
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
new file mode 100644
index 000000000000..8ce262fc2561
--- /dev/null
+++ b/drivers/perf/arm_spe_pmu.c
@@ -0,0 +1,1249 @@
+/*
+ * Perf support for the Statistical Profiling Extension, introduced as
+ * part of ARMv8.2.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2016 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define PMUNAME "arm_spe"
+#define DRVNAME PMUNAME "_pmu"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/sysreg.h>
+
+#define ARM_SPE_BUF_PAD_BYTE 0
+
+struct arm_spe_pmu_buf {
+ int nr_pages;
+ bool snapshot;
+ void *base;
+};
+
+struct arm_spe_pmu {
+ struct pmu pmu;
+ struct platform_device *pdev;
+ cpumask_t supported_cpus;
+ struct hlist_node hotplug_node;
+
+ int irq; /* PPI */
+
+ u16 min_period;
+ u16 counter_sz;
+
+#define SPE_PMU_FEAT_FILT_EVT (1UL << 0)
+#define SPE_PMU_FEAT_FILT_TYP (1UL << 1)
+#define SPE_PMU_FEAT_FILT_LAT (1UL << 2)
+#define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
+#define SPE_PMU_FEAT_LDS (1UL << 4)
+#define SPE_PMU_FEAT_ERND (1UL << 5)
+#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
+ u64 features;
+
+ u16 max_record_sz;
+ u16 align;
+ struct perf_output_handle __percpu *handle;
+};
+
+#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
+
+/* Convert a free-running index from perf into an SPE buffer offset */
+#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/* Keep track of our dynamic hotplug state */
+static enum cpuhp_state arm_spe_pmu_online;
+
+enum arm_spe_pmu_buf_fault_action {
+ SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
+ SPE_PMU_BUF_FAULT_ACT_FATAL,
+ SPE_PMU_BUF_FAULT_ACT_OK,
+};
+
+/* This sysfs gunk was really good fun to write. */
+enum arm_spe_pmu_capabilities {
+ SPE_PMU_CAP_ARCH_INST = 0,
+ SPE_PMU_CAP_ERND,
+ SPE_PMU_CAP_FEAT_MAX,
+ SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
+ SPE_PMU_CAP_MIN_IVAL,
+};
+
+static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
+ [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST,
+ [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
+};
+
+static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
+{
+ if (cap < SPE_PMU_CAP_FEAT_MAX)
+ return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
+
+ switch (cap) {
+ case SPE_PMU_CAP_CNT_SZ:
+ return spe_pmu->counter_sz;
+ case SPE_PMU_CAP_MIN_IVAL:
+ return spe_pmu->min_period;
+ default:
+ WARN(1, "unknown cap %d\n", cap);
+ }
+
+ return 0;
+}
+
+static ssize_t arm_spe_pmu_cap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ int cap = (long)ea->var;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ arm_spe_pmu_cap_get(spe_pmu, cap));
+}
+
+#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
+ &((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \
+ })[0].attr.attr
+
+#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
+ SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
+
+static struct attribute *arm_spe_pmu_cap_attr[] = {
+ SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
+ SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
+ SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
+ SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
+ NULL,
+};
+
+static struct attribute_group arm_spe_pmu_cap_group = {
+ .name = "caps",
+ .attrs = arm_spe_pmu_cap_attr,
+};
+
+/* User ABI */
+#define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */
+#define ATTR_CFG_FLD_ts_enable_LO 0
+#define ATTR_CFG_FLD_ts_enable_HI 0
+#define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */
+#define ATTR_CFG_FLD_pa_enable_LO 1
+#define ATTR_CFG_FLD_pa_enable_HI 1
+#define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */
+#define ATTR_CFG_FLD_pct_enable_LO 2
+#define ATTR_CFG_FLD_pct_enable_HI 2
+#define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */
+#define ATTR_CFG_FLD_jitter_LO 16
+#define ATTR_CFG_FLD_jitter_HI 16
+#define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */
+#define ATTR_CFG_FLD_branch_filter_LO 32
+#define ATTR_CFG_FLD_branch_filter_HI 32
+#define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */
+#define ATTR_CFG_FLD_load_filter_LO 33
+#define ATTR_CFG_FLD_load_filter_HI 33
+#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
+#define ATTR_CFG_FLD_store_filter_LO 34
+#define ATTR_CFG_FLD_store_filter_HI 34
+
+#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
+#define ATTR_CFG_FLD_event_filter_LO 0
+#define ATTR_CFG_FLD_event_filter_HI 63
+
+#define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */
+#define ATTR_CFG_FLD_min_latency_LO 0
+#define ATTR_CFG_FLD_min_latency_HI 11
+
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
+
+GEN_PMU_FORMAT_ATTR(ts_enable);
+GEN_PMU_FORMAT_ATTR(pa_enable);
+GEN_PMU_FORMAT_ATTR(pct_enable);
+GEN_PMU_FORMAT_ATTR(jitter);
+GEN_PMU_FORMAT_ATTR(branch_filter);
+GEN_PMU_FORMAT_ATTR(load_filter);
+GEN_PMU_FORMAT_ATTR(store_filter);
+GEN_PMU_FORMAT_ATTR(event_filter);
+GEN_PMU_FORMAT_ATTR(min_latency);
+
+static struct attribute *arm_spe_pmu_formats_attr[] = {
+ &format_attr_ts_enable.attr,
+ &format_attr_pa_enable.attr,
+ &format_attr_pct_enable.attr,
+ &format_attr_jitter.attr,
+ &format_attr_branch_filter.attr,
+ &format_attr_load_filter.attr,
+ &format_attr_store_filter.attr,
+ &format_attr_event_filter.attr,
+ &format_attr_min_latency.attr,
+ NULL,
+};
+
+static struct attribute_group arm_spe_pmu_format_group = {
+ .name = "format",
+ .attrs = arm_spe_pmu_formats_attr,
+};
+
+static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+
+ return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
+}
+static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *arm_spe_pmu_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group arm_spe_pmu_group = {
+ .attrs = arm_spe_pmu_attrs,
+};
+
+static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
+ &arm_spe_pmu_group,
+ &arm_spe_pmu_cap_group,
+ &arm_spe_pmu_format_group,
+ NULL,
+};
+
+/* Convert between user ABI and register values */
+static u64 arm_spe_event_to_pmscr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 reg = 0;
+
+ reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT;
+
+ if (!attr->exclude_user)
+ reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT);
+
+ if (!attr->exclude_kernel)
+ reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
+
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && capable(CAP_SYS_ADMIN))
+ reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
+
+ return reg;
+}
+
+static void arm_spe_event_sanitise_period(struct perf_event *event)
+{
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ u64 period = event->hw.sample_period;
+ u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK
+ << SYS_PMSIRR_EL1_INTERVAL_SHIFT;
+
+ if (period < spe_pmu->min_period)
+ period = spe_pmu->min_period;
+ else if (period > max_period)
+ period = max_period;
+ else
+ period &= max_period;
+
+ event->hw.sample_period = period;
+}
+
+static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 reg = 0;
+
+ arm_spe_event_sanitise_period(event);
+
+ reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT;
+ reg |= event->hw.sample_period;
+
+ return reg;
+}
+
+static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 reg = 0;
+
+ reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT;
+
+ if (reg)
+ reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT);
+
+ if (ATTR_CFG_GET_FLD(attr, event_filter))
+ reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT);
+
+ if (ATTR_CFG_GET_FLD(attr, min_latency))
+ reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT);
+
+ return reg;
+}
+
+static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ return ATTR_CFG_GET_FLD(attr, event_filter);
+}
+
+static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ return ATTR_CFG_GET_FLD(attr, min_latency)
+ << SYS_PMSLATFR_EL1_MINLAT_SHIFT;
+}
+
+static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
+ if (!buf->snapshot)
+ perf_aux_output_skip(handle, len);
+}
+
+static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+ u64 limit = buf->nr_pages * PAGE_SIZE;
+
+ /*
+ * The trace format isn't parseable in reverse, so clamp
+ * the limit to half of the buffer size in snapshot mode
+ * so that the worst case is half a buffer of records, as
+ * opposed to a single record.
+ */
+ if (head < limit >> 1)
+ limit >>= 1;
+
+ /*
+ * If we're within max_record_sz of the limit, we must
+ * pad, move the head index and recompute the limit.
+ */
+ if (limit - head < spe_pmu->max_record_sz) {
+ arm_spe_pmu_pad_buf(handle, limit - head);
+ handle->head = PERF_IDX2OFF(limit, buf);
+ limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
+ }
+
+ return limit;
+}
+
+static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ const u64 bufsize = buf->nr_pages * PAGE_SIZE;
+ u64 limit = bufsize;
+ u64 head, tail, wakeup;
+
+ /*
+ * The head can be misaligned for two reasons:
+ *
+ * 1. The hardware left PMBPTR pointing to the first byte after
+ * a record when generating a buffer management event.
+ *
+ * 2. We used perf_aux_output_skip to consume handle->size bytes
+ * and CIRC_SPACE was used to compute the size, which always
+ * leaves one entry free.
+ *
+ * Deal with this by padding to the next alignment boundary and
+ * moving the head index. If we run out of buffer space, we'll
+ * reduce handle->size to zero and end up reporting truncation.
+ */
+ head = PERF_IDX2OFF(handle->head, buf);
+ if (!IS_ALIGNED(head, spe_pmu->align)) {
+ unsigned long delta = roundup(head, spe_pmu->align) - head;
+
+ delta = min(delta, handle->size);
+ arm_spe_pmu_pad_buf(handle, delta);
+ head = PERF_IDX2OFF(handle->head, buf);
+ }
+
+ /* If we've run out of free space, then nothing more to do */
+ if (!handle->size)
+ goto no_space;
+
+ /* Compute the tail and wakeup indices now that we've aligned head */
+ tail = PERF_IDX2OFF(handle->head + handle->size, buf);
+ wakeup = PERF_IDX2OFF(handle->wakeup, buf);
+
+ /*
+ * Avoid clobbering unconsumed data. We know we have space, so
+ * if we see head == tail we know that the buffer is empty. If
+ * head > tail, then there's nothing to clobber prior to
+ * wrapping.
+ */
+ if (head < tail)
+ limit = round_down(tail, PAGE_SIZE);
+
+ /*
+ * Wakeup may be arbitrarily far into the future. If it's not in
+ * the current generation, either we'll wrap before hitting it,
+ * or it's in the past and has been handled already.
+ *
+ * If there's a wakeup before we wrap, arrange to be woken up by
+ * the page boundary following it. Keep the tail boundary if
+ * that's lower.
+ */
+ if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
+ limit = min(limit, round_up(wakeup, PAGE_SIZE));
+
+ if (limit > head)
+ return limit;
+
+ arm_spe_pmu_pad_buf(handle, handle->size);
+no_space:
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ perf_aux_output_end(handle, 0);
+ return 0;
+}
+
+static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+ u64 limit = __arm_spe_pmu_next_off(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * If the head has come too close to the end of the buffer,
+ * then pad to the end and recompute the limit.
+ */
+ if (limit && (limit - head < spe_pmu->max_record_sz)) {
+ arm_spe_pmu_pad_buf(handle, limit - head);
+ limit = __arm_spe_pmu_next_off(handle);
+ }
+
+ return limit;
+}
+
+static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event)
+{
+ u64 base, limit;
+ struct arm_spe_pmu_buf *buf;
+
+ /* Start a new aux session */
+ buf = perf_aux_output_begin(handle, event);
+ if (!buf) {
+ event->hw.state |= PERF_HES_STOPPED;
+ /*
+ * We still need to clear the limit pointer, since the
+ * profiler might only be disabled by virtue of a fault.
+ */
+ limit = 0;
+ goto out_write_limit;
+ }
+
+ limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
+ : arm_spe_pmu_next_off(handle);
+ if (limit)
+ limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT);
+
+ limit += (u64)buf->base;
+ base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
+ write_sysreg_s(base, SYS_PMBPTR_EL1);
+
+out_write_limit:
+ write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
+}
+
+static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ u64 offset, size;
+
+ offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+
+ if (buf->snapshot)
+ handle->head = offset;
+
+ perf_aux_output_end(handle, size);
+}
+
+static void arm_spe_pmu_disable_and_drain_local(void)
+{
+ /* Disable profiling at EL0 and EL1 */
+ write_sysreg_s(0, SYS_PMSCR_EL1);
+ isb();
+
+ /* Drain any buffered data */
+ psb_csync();
+ dsb(nsh);
+
+ /* Disable the profiling buffer */
+ write_sysreg_s(0, SYS_PMBLIMITR_EL1);
+ isb();
+}
+
+/* IRQ handling */
+static enum arm_spe_pmu_buf_fault_action
+arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
+{
+ const char *err_str;
+ u64 pmbsr;
+ enum arm_spe_pmu_buf_fault_action ret;
+
+ /*
+ * Ensure new profiling data is visible to the CPU and any external
+ * aborts have been resolved.
+ */
+ psb_csync();
+ dsb(nsh);
+
+ /* Ensure hardware updates to PMBPTR_EL1 are visible */
+ isb();
+
+ /* Service required? */
+ pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
+ if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT)))
+ return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
+
+ /*
+ * If we've lost data, disable profiling and also set the PARTIAL
+ * flag to indicate that the last record is corrupted.
+ */
+ if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT))
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
+ PERF_AUX_FLAG_PARTIAL);
+
+ /* Report collisions to userspace so that it can up the period */
+ if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT))
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
+
+ /* We only expect buffer management events */
+ switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) {
+ case SYS_PMBSR_EL1_EC_BUF:
+ /* Handled below */
+ break;
+ case SYS_PMBSR_EL1_EC_FAULT_S1:
+ case SYS_PMBSR_EL1_EC_FAULT_S2:
+ err_str = "Unexpected buffer fault";
+ goto out_err;
+ default:
+ err_str = "Unknown error code";
+ goto out_err;
+ }
+
+ /* Buffer management event */
+ switch (pmbsr &
+ (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) {
+ case SYS_PMBSR_EL1_BUF_BSC_FULL:
+ ret = SPE_PMU_BUF_FAULT_ACT_OK;
+ goto out_stop;
+ default:
+ err_str = "Unknown buffer status code";
+ }
+
+out_err:
+ pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
+ err_str, smp_processor_id(), pmbsr,
+ read_sysreg_s(SYS_PMBPTR_EL1),
+ read_sysreg_s(SYS_PMBLIMITR_EL1));
+ ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
+
+out_stop:
+ arm_spe_perf_aux_output_end(handle);
+ return ret;
+}
+
+static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
+{
+ struct perf_output_handle *handle = dev;
+ struct perf_event *event = handle->event;
+ enum arm_spe_pmu_buf_fault_action act;
+
+ if (!perf_get_aux(handle))
+ return IRQ_NONE;
+
+ act = arm_spe_pmu_buf_get_fault_act(handle);
+ if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
+ return IRQ_NONE;
+
+ /*
+ * Ensure perf callbacks have completed, which may disable the
+ * profiling buffer in response to a TRUNCATION flag.
+ */
+ irq_work_run();
+
+ switch (act) {
+ case SPE_PMU_BUF_FAULT_ACT_FATAL:
+ /*
+ * If a fatal exception occurred then leaving the profiling
+ * buffer enabled is a recipe waiting to happen. Since
+ * fatal faults don't always imply truncation, make sure
+ * that the profiling buffer is disabled explicitly before
+ * clearing the syndrome register.
+ */
+ arm_spe_pmu_disable_and_drain_local();
+ break;
+ case SPE_PMU_BUF_FAULT_ACT_OK:
+ /*
+ * We handled the fault (the buffer was full), so resume
+ * profiling as long as we didn't detect truncation.
+ * PMBPTR might be misaligned, but we'll burn that bridge
+ * when we get to it.
+ */
+ if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
+ arm_spe_perf_aux_output_begin(handle, event);
+ isb();
+ }
+ break;
+ case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
+ /* We've seen you before, but GCC has the memory of a sieve. */
+ break;
+ }
+
+ /* The buffer pointers are now sane, so resume profiling. */
+ write_sysreg_s(0, SYS_PMBSR_EL1);
+ return IRQ_HANDLED;
+}
+
+/* Perf callbacks */
+static int arm_spe_pmu_event_init(struct perf_event *event)
+{
+ u64 reg;
+ struct perf_event_attr *attr = &event->attr;
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+
+ /* This is, of course, deeply driver-specific */
+ if (attr->type != event->pmu->type)
+ return -ENOENT;
+
+ if (event->cpu >= 0 &&
+ !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
+ return -ENOENT;
+
+ if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0)
+ return -EOPNOTSUPP;
+
+ if (attr->exclude_idle)
+ return -EOPNOTSUPP;
+
+ /*
+ * Feedback-directed frequency throttling doesn't work when we
+ * have a buffer of samples. We'd need to manually count the
+ * samples in the buffer when it fills up and adjust the event
+ * count to reflect that. Instead, just force the user to specify
+ * a sample period.
+ */
+ if (attr->freq)
+ return -EINVAL;
+
+ reg = arm_spe_event_to_pmsfcr(event);
+ if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
+ return -EOPNOTSUPP;
+
+ if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
+ return -EOPNOTSUPP;
+
+ if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
+ return -EOPNOTSUPP;
+
+ reg = arm_spe_event_to_pmscr(event);
+ if (!capable(CAP_SYS_ADMIN) &&
+ (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
+ BIT(SYS_PMSCR_EL1_CX_SHIFT) |
+ BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
+ return -EACCES;
+
+ return 0;
+}
+
+static void arm_spe_pmu_start(struct perf_event *event, int flags)
+{
+ u64 reg;
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
+
+ hwc->state = 0;
+ arm_spe_perf_aux_output_begin(handle, event);
+ if (hwc->state)
+ return;
+
+ reg = arm_spe_event_to_pmsfcr(event);
+ write_sysreg_s(reg, SYS_PMSFCR_EL1);
+
+ reg = arm_spe_event_to_pmsevfr(event);
+ write_sysreg_s(reg, SYS_PMSEVFR_EL1);
+
+ reg = arm_spe_event_to_pmslatfr(event);
+ write_sysreg_s(reg, SYS_PMSLATFR_EL1);
+
+ if (flags & PERF_EF_RELOAD) {
+ reg = arm_spe_event_to_pmsirr(event);
+ write_sysreg_s(reg, SYS_PMSIRR_EL1);
+ isb();
+ reg = local64_read(&hwc->period_left);
+ write_sysreg_s(reg, SYS_PMSICR_EL1);
+ }
+
+ reg = arm_spe_event_to_pmscr(event);
+ isb();
+ write_sysreg_s(reg, SYS_PMSCR_EL1);
+}
+
+static void arm_spe_pmu_stop(struct perf_event *event, int flags)
+{
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
+
+ /* If we're already stopped, then nothing to do */
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ /* Stop all trace generation */
+ arm_spe_pmu_disable_and_drain_local();
+
+ if (flags & PERF_EF_UPDATE) {
+ /*
+ * If there's a fault pending then ensure we contain it
+ * to this buffer, since we might be on the context-switch
+ * path.
+ */
+ if (perf_get_aux(handle)) {
+ enum arm_spe_pmu_buf_fault_action act;
+
+ act = arm_spe_pmu_buf_get_fault_act(handle);
+ if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
+ arm_spe_perf_aux_output_end(handle);
+ else
+ write_sysreg_s(0, SYS_PMBSR_EL1);
+ }
+
+ /*
+ * This may also contain ECOUNT, but nobody else should
+ * be looking at period_left, since we forbid frequency
+ * based sampling.
+ */
+ local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static int arm_spe_pmu_add(struct perf_event *event, int flags)
+{
+ int ret = 0;
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
+
+ if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+ return -ENOENT;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START) {
+ arm_spe_pmu_start(event, PERF_EF_RELOAD);
+ if (hwc->state & PERF_HES_STOPPED)
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void arm_spe_pmu_del(struct perf_event *event, int flags)
+{
+ arm_spe_pmu_stop(event, PERF_EF_UPDATE);
+}
+
+static void arm_spe_pmu_read(struct perf_event *event)
+{
+}
+
+static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
+ bool snapshot)
+{
+ int i;
+ struct page **pglist;
+ struct arm_spe_pmu_buf *buf;
+
+ /* We need at least two pages for this to work. */
+ if (nr_pages < 2)
+ return NULL;
+
+ /*
+ * We require an even number of pages for snapshot mode, so that
+ * we can effectively treat the buffer as consisting of two equal
+ * parts and give userspace a fighting chance of getting some
+ * useful data out of it.
+ */
+ if (!nr_pages || (snapshot && (nr_pages & 1)))
+ return NULL;
+
+ if (cpu == -1)
+ cpu = raw_smp_processor_id();
+
+ buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
+ if (!buf)
+ return NULL;
+
+ pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
+ if (!pglist)
+ goto out_free_buf;
+
+ for (i = 0; i < nr_pages; ++i) {
+ struct page *page = virt_to_page(pages[i]);
+
+ if (PagePrivate(page)) {
+ pr_warn("unexpected high-order page for auxbuf!");
+ goto out_free_pglist;
+ }
+
+ pglist[i] = virt_to_page(pages[i]);
+ }
+
+ buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->base)
+ goto out_free_pglist;
+
+ buf->nr_pages = nr_pages;
+ buf->snapshot = snapshot;
+
+ kfree(pglist);
+ return buf;
+
+out_free_pglist:
+ kfree(pglist);
+out_free_buf:
+ kfree(buf);
+ return NULL;
+}
+
+static void arm_spe_pmu_free_aux(void *aux)
+{
+ struct arm_spe_pmu_buf *buf = aux;
+
+ vunmap(buf->base);
+ kfree(buf);
+}
+
+/* Initialisation and teardown functions */
+static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
+{
+ static atomic_t pmu_idx = ATOMIC_INIT(-1);
+
+ int idx;
+ char *name;
+ struct device *dev = &spe_pmu->pdev->dev;
+
+ spe_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
+ .attr_groups = arm_spe_pmu_attr_groups,
+ /*
+ * We hitch a ride on the software context here, so that
+ * we can support per-task profiling (which is not possible
+ * with the invalid context as it doesn't get sched callbacks).
+ * This requires that userspace either uses a dummy event for
+ * perf_event_open, since the aux buffer is not setup until
+ * a subsequent mmap, or creates the profiling event in a
+ * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
+ * once the buffer has been created.
+ */
+ .task_ctx_nr = perf_sw_context,
+ .event_init = arm_spe_pmu_event_init,
+ .add = arm_spe_pmu_add,
+ .del = arm_spe_pmu_del,
+ .start = arm_spe_pmu_start,
+ .stop = arm_spe_pmu_stop,
+ .read = arm_spe_pmu_read,
+ .setup_aux = arm_spe_pmu_setup_aux,
+ .free_aux = arm_spe_pmu_free_aux,
+ };
+
+ idx = atomic_inc_return(&pmu_idx);
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
+ return perf_pmu_register(&spe_pmu->pmu, name, -1);
+}
+
+static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
+{
+ perf_pmu_unregister(&spe_pmu->pmu);
+}
+
+static void __arm_spe_pmu_dev_probe(void *info)
+{
+ int fld;
+ u64 reg;
+ struct arm_spe_pmu *spe_pmu = info;
+ struct device *dev = &spe_pmu->pdev->dev;
+
+ fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
+ ID_AA64DFR0_PMSVER_SHIFT);
+ if (!fld) {
+ dev_err(dev,
+ "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
+ fld, smp_processor_id());
+ return;
+ }
+
+ /* Read PMBIDR first to determine whether or not we have access */
+ reg = read_sysreg_s(SYS_PMBIDR_EL1);
+ if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) {
+ dev_err(dev,
+ "profiling buffer owned by higher exception level\n");
+ return;
+ }
+
+ /* Minimum alignment. If it's out-of-range, then fail the probe */
+ fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK;
+ spe_pmu->align = 1 << fld;
+ if (spe_pmu->align > SZ_2K) {
+ dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
+ fld, smp_processor_id());
+ return;
+ }
+
+ /* It's now safe to read PMSIDR and figure out what we've got */
+ reg = read_sysreg_s(SYS_PMSIDR_EL1);
+ if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_LDS;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_ERND;
+
+ /* This field has a spaced out encoding, so just use a look-up */
+ fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK;
+ switch (fld) {
+ case 0:
+ spe_pmu->min_period = 256;
+ break;
+ case 2:
+ spe_pmu->min_period = 512;
+ break;
+ case 3:
+ spe_pmu->min_period = 768;
+ break;
+ case 4:
+ spe_pmu->min_period = 1024;
+ break;
+ case 5:
+ spe_pmu->min_period = 1536;
+ break;
+ case 6:
+ spe_pmu->min_period = 2048;
+ break;
+ case 7:
+ spe_pmu->min_period = 3072;
+ break;
+ default:
+ dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
+ fld);
+ /* Fallthrough */
+ case 8:
+ spe_pmu->min_period = 4096;
+ }
+
+ /* Maximum record size. If it's out-of-range, then fail the probe */
+ fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK;
+ spe_pmu->max_record_sz = 1 << fld;
+ if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
+ dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
+ fld, smp_processor_id());
+ return;
+ }
+
+ fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK;
+ switch (fld) {
+ default:
+ dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
+ fld);
+ /* Fallthrough */
+ case 2:
+ spe_pmu->counter_sz = 12;
+ }
+
+ dev_info(dev,
+ "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
+ cpumask_pr_args(&spe_pmu->supported_cpus),
+ spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
+
+ spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
+ return;
+}
+
+static void __arm_spe_pmu_reset_local(void)
+{
+ /*
+ * This is probably overkill, as we have no idea where we're
+ * draining any buffered data to...
+ */
+ arm_spe_pmu_disable_and_drain_local();
+
+ /* Reset the buffer base pointer */
+ write_sysreg_s(0, SYS_PMBPTR_EL1);
+ isb();
+
+ /* Clear any pending management interrupts */
+ write_sysreg_s(0, SYS_PMBSR_EL1);
+ isb();
+}
+
+static void __arm_spe_pmu_setup_one(void *info)
+{
+ struct arm_spe_pmu *spe_pmu = info;
+
+ __arm_spe_pmu_reset_local();
+ enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
+}
+
+static void __arm_spe_pmu_stop_one(void *info)
+{
+ struct arm_spe_pmu *spe_pmu = info;
+
+ disable_percpu_irq(spe_pmu->irq);
+ __arm_spe_pmu_reset_local();
+}
+
+static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_spe_pmu *spe_pmu;
+
+ spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
+ if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+ return 0;
+
+ __arm_spe_pmu_setup_one(spe_pmu);
+ return 0;
+}
+
+static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_spe_pmu *spe_pmu;
+
+ spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
+ if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+ return 0;
+
+ __arm_spe_pmu_stop_one(spe_pmu);
+ return 0;
+}
+
+static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
+{
+ int ret;
+ cpumask_t *mask = &spe_pmu->supported_cpus;
+
+ /* Make sure we probe the hardware on a relevant CPU */
+ ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
+ if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
+ return -ENXIO;
+
+ /* Request our PPIs (note that the IRQ is still disabled) */
+ ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
+ spe_pmu->handle);
+ if (ret)
+ return ret;
+
+ /*
+ * Register our hotplug notifier now so we don't miss any events.
+ * This will enable the IRQ for any supported CPUs that are already
+ * up.
+ */
+ ret = cpuhp_state_add_instance(arm_spe_pmu_online,
+ &spe_pmu->hotplug_node);
+ if (ret)
+ free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
+
+ return ret;
+}
+
+static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
+{
+ cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
+ free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
+}
+
+/* Driver and device probing */
+static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
+{
+ struct platform_device *pdev = spe_pmu->pdev;
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ (%d)\n", irq);
+ return -ENXIO;
+ }
+
+ if (!irq_is_percpu(irq)) {
+ dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
+ return -EINVAL;
+ }
+
+ if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
+ dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
+ return -EINVAL;
+ }
+
+ spe_pmu->irq = irq;
+ return 0;
+}
+
+static const struct of_device_id arm_spe_pmu_of_match[] = {
+ { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
+ { /* Sentinel */ },
+};
+
+static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct arm_spe_pmu *spe_pmu;
+ struct device *dev = &pdev->dev;
+
+ spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
+ if (!spe_pmu) {
+ dev_err(dev, "failed to allocate spe_pmu\n");
+ return -ENOMEM;
+ }
+
+ spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
+ if (!spe_pmu->handle)
+ return -ENOMEM;
+
+ spe_pmu->pdev = pdev;
+ platform_set_drvdata(pdev, spe_pmu);
+
+ ret = arm_spe_pmu_irq_probe(spe_pmu);
+ if (ret)
+ goto out_free_handle;
+
+ ret = arm_spe_pmu_dev_init(spe_pmu);
+ if (ret)
+ goto out_free_handle;
+
+ ret = arm_spe_pmu_perf_init(spe_pmu);
+ if (ret)
+ goto out_teardown_dev;
+
+ return 0;
+
+out_teardown_dev:
+ arm_spe_pmu_dev_teardown(spe_pmu);
+out_free_handle:
+ free_percpu(spe_pmu->handle);
+ return ret;
+}
+
+static int arm_spe_pmu_device_remove(struct platform_device *pdev)
+{
+ struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+
+ arm_spe_pmu_perf_destroy(spe_pmu);
+ arm_spe_pmu_dev_teardown(spe_pmu);
+ free_percpu(spe_pmu->handle);
+ return 0;
+}
+
+static struct platform_driver arm_spe_pmu_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(arm_spe_pmu_of_match),
+ },
+ .probe = arm_spe_pmu_device_dt_probe,
+ .remove = arm_spe_pmu_device_remove,
+};
+
+static int __init arm_spe_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
+ arm_spe_pmu_cpu_startup,
+ arm_spe_pmu_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ arm_spe_pmu_online = ret;
+
+ ret = platform_driver_register(&arm_spe_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(arm_spe_pmu_online);
+
+ return ret;
+}
+
+static void __exit arm_spe_pmu_exit(void)
+{
+ platform_driver_unregister(&arm_spe_pmu_driver);
+ cpuhp_remove_multi_state(arm_spe_pmu_online);
+}
+
+module_init(arm_spe_pmu_init);
+module_exit(arm_spe_pmu_exit);
+
+MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
new file mode 100644
index 000000000000..2621d51ae87a
--- /dev/null
+++ b/drivers/perf/hisilicon/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
new file mode 100644
index 000000000000..1b10ea05a914
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -0,0 +1,463 @@
+/*
+ * HiSilicon SoC DDRC uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ * Anurup M <anurup.m@huawei.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* DDRC register definition */
+#define DDRC_PERF_CTRL 0x010
+#define DDRC_FLUX_WR 0x380
+#define DDRC_FLUX_RD 0x384
+#define DDRC_FLUX_WCMD 0x388
+#define DDRC_FLUX_RCMD 0x38c
+#define DDRC_PRE_CMD 0x3c0
+#define DDRC_ACT_CMD 0x3c4
+#define DDRC_BNK_CHG 0x3c8
+#define DDRC_RNK_CHG 0x3cc
+#define DDRC_EVENT_CTRL 0x6C0
+#define DDRC_INT_MASK 0x6c8
+#define DDRC_INT_STATUS 0x6cc
+#define DDRC_INT_CLEAR 0x6d0
+
+/* DDRC has 8-counters */
+#define DDRC_NR_COUNTERS 0x8
+#define DDRC_PERF_CTRL_EN 0x2
+
+/*
+ * For DDRC PMU, there are eight-events and every event has been mapped
+ * to fixed-purpose counters which register offset is not consistent.
+ * Therefore there is no write event type and we assume that event
+ * code (0 to 7) is equal to counter index in PMU driver.
+ */
+#define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7)
+
+static const u32 ddrc_reg_off[] = {
+ DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
+ DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
+};
+
+/*
+ * Select the counter register offset using the counter index.
+ * In DDRC there are no programmable counter, the count
+ * is readed form the statistics counter register itself.
+ */
+static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
+{
+ return ddrc_reg_off[cntr_idx];
+}
+
+static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ /* Use event code as counter index */
+ u32 idx = GET_DDRC_EVENTID(hwc);
+
+ if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
+ dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return 0;
+ }
+
+ return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+}
+
+static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ u32 idx = GET_DDRC_EVENTID(hwc);
+
+ if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
+ dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ writel((u32)val,
+ ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+}
+
+/*
+ * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
+ * so there is no need to write event type.
+ */
+static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+ u32 type)
+{
+}
+
+static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ /* Set perf_enable in DDRC_PERF_CTRL to start event counting */
+ val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
+ val |= DDRC_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
+ val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
+ val &= ~DDRC_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Set counter index(event code) in DDRC_EVENT_CTRL register */
+ val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
+ val |= (1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index(event code) in DDRC_EVENT_CTRL register */
+ val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
+ val &= ~(1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
+}
+
+static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
+{
+ struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
+ unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
+ struct hw_perf_event *hwc = &event->hw;
+ /* For DDRC PMU, we use event code as counter index */
+ int idx = GET_DDRC_EVENTID(hwc);
+
+ if (test_bit(idx, used_mask))
+ return -EAGAIN;
+
+ set_bit(idx, used_mask);
+
+ return idx;
+}
+
+static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(ddrc_pmu->base + DDRC_INT_MASK);
+ val &= ~(1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_INT_MASK);
+}
+
+static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(ddrc_pmu->base + DDRC_INT_MASK);
+ val |= (1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_INT_MASK);
+}
+
+static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
+{
+ struct hisi_pmu *ddrc_pmu = dev_id;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ /* Read the DDRC_INT_STATUS register */
+ overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it
+ */
+ for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
+ /* Write 1 to clear the IRQ status flag */
+ writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
+
+ /* Get the corresponding event struct */
+ event = ddrc_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ /* Read and init IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), ddrc_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq, ret);
+ return ret;
+ }
+
+ ddrc_pmu->irq = irq;
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
+ { "HISI0233", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
+
+static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *ddrc_pmu)
+{
+ struct resource *res;
+
+ /*
+ * Use the SCCL_ID and DDRC channel ID to identify the
+ * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
+ &ddrc_pmu->index_id)) {
+ dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &ddrc_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
+ return -EINVAL;
+ }
+ /* DDRC PMUs only share the same SCCL */
+ ddrc_pmu->ccl_id = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddrc_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
+ return PTR_ERR(ddrc_pmu->base);
+ }
+
+ return 0;
+}
+
+static struct attribute *hisi_ddrc_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
+ NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_ddrc_pmu_format_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
+ HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
+ HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
+ HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03),
+ HISI_PMU_EVENT_ATTR(pre_cmd, 0x04),
+ HISI_PMU_EVENT_ATTR(act_cmd, 0x05),
+ HISI_PMU_EVENT_ATTR(rnk_chg, 0x06),
+ HISI_PMU_EVENT_ATTR(rw_chg, 0x07),
+ NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_ddrc_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
+ .attrs = hisi_ddrc_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
+ &hisi_ddrc_pmu_format_group,
+ &hisi_ddrc_pmu_events_group,
+ &hisi_ddrc_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
+ .write_evtype = hisi_ddrc_pmu_write_evtype,
+ .get_event_idx = hisi_ddrc_pmu_get_event_idx,
+ .start_counters = hisi_ddrc_pmu_start_counters,
+ .stop_counters = hisi_ddrc_pmu_stop_counters,
+ .enable_counter = hisi_ddrc_pmu_enable_counter,
+ .disable_counter = hisi_ddrc_pmu_disable_counter,
+ .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
+ .write_counter = hisi_ddrc_pmu_write_counter,
+ .read_counter = hisi_ddrc_pmu_read_counter,
+};
+
+static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *ddrc_pmu)
+{
+ int ret;
+
+ ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
+ if (ret)
+ return ret;
+
+ ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
+ ddrc_pmu->counter_bits = 32;
+ ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
+ ddrc_pmu->dev = &pdev->dev;
+ ddrc_pmu->on_cpu = -1;
+ ddrc_pmu->check_event = 7;
+
+ return 0;
+}
+
+static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *ddrc_pmu;
+ char *name;
+ int ret;
+
+ ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
+ if (!ddrc_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddrc_pmu);
+
+ ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ &ddrc_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
+ ddrc_pmu->sccl_id, ddrc_pmu->index_id);
+ ddrc_pmu->pmu = (struct pmu) {
+ .name = name,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = hisi_ddrc_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ &ddrc_pmu->node);
+ }
+
+ return ret;
+}
+
+static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&ddrc_pmu->pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ &ddrc_pmu->node);
+
+ return 0;
+}
+
+static struct platform_driver hisi_ddrc_pmu_driver = {
+ .driver = {
+ .name = "hisi_ddrc_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
+ },
+ .probe = hisi_ddrc_pmu_probe,
+ .remove = hisi_ddrc_pmu_remove,
+};
+
+static int __init hisi_ddrc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ "AP_PERF_ARM_HISI_DDRC_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_ddrc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
+
+ return ret;
+}
+module_init(hisi_ddrc_pmu_module_init);
+
+static void __exit hisi_ddrc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_ddrc_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
+
+}
+module_exit(hisi_ddrc_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
new file mode 100644
index 000000000000..443906e0aff3
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -0,0 +1,473 @@
+/*
+ * HiSilicon SoC HHA uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ * Anurup M <anurup.m@huawei.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* HHA register definition */
+#define HHA_INT_MASK 0x0804
+#define HHA_INT_STATUS 0x0808
+#define HHA_INT_CLEAR 0x080C
+#define HHA_PERF_CTRL 0x1E00
+#define HHA_EVENT_CTRL 0x1E04
+#define HHA_EVENT_TYPE0 0x1E80
+/*
+ * Each counter is 48-bits and [48:63] are reserved
+ * which are Read-As-Zero and Writes-Ignored.
+ */
+#define HHA_CNT0_LOWER 0x1F00
+
+/* HHA has 16-counters */
+#define HHA_NR_COUNTERS 0x10
+
+#define HHA_PERF_CTRL_EN 0x1
+#define HHA_EVTYPE_NONE 0xff
+
+/*
+ * Select the counter register offset using the counter index
+ * each counter is 48-bits.
+ */
+static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
+{
+ return (HHA_CNT0_LOWER + (cntr_idx * 8));
+}
+
+static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
+ dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return 0;
+ }
+
+ /* Read 64 bits and like L3C, top 16 bits are RAZ */
+ return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+}
+
+static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
+ dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ /* Write 64 bits and like L3C, top 16 bits are WI */
+ writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+}
+
+static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(HHA_EVENT_TYPEx).
+ * There are 4 event select registers for the 16 hardware counters.
+ * Event code is 8-bits and for the first 4 hardware counters,
+ * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
+ * HHA_EVENT_TYPE1 is chosen and so on.
+ */
+ reg = HHA_EVENT_TYPE0 + 4 * (idx / 4);
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to HHA_EVENT_TYPEx register */
+ val = readl(hha_pmu->base + reg);
+ val &= ~(HHA_EVTYPE_NONE << shift);
+ val |= (type << shift);
+ writel(val, hha_pmu->base + reg);
+}
+
+static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu)
+{
+ u32 val;
+
+ /*
+ * Set perf_enable bit in HHA_PERF_CTRL to start event
+ * counting for all enabled counters.
+ */
+ val = readl(hha_pmu->base + HHA_PERF_CTRL);
+ val |= HHA_PERF_CTRL_EN;
+ writel(val, hha_pmu->base + HHA_PERF_CTRL);
+}
+
+static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu)
+{
+ u32 val;
+
+ /*
+ * Clear perf_enable bit in HHA_PERF_CTRL to stop event
+ * counting for all enabled counters.
+ */
+ val = readl(hha_pmu->base + HHA_PERF_CTRL);
+ val &= ~(HHA_PERF_CTRL_EN);
+ writel(val, hha_pmu->base + HHA_PERF_CTRL);
+}
+
+static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in HHA_EVENT_CTRL register */
+ val = readl(hha_pmu->base + HHA_EVENT_CTRL);
+ val |= (1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_EVENT_CTRL);
+}
+
+static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in HHA_EVENT_CTRL register */
+ val = readl(hha_pmu->base + HHA_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_EVENT_CTRL);
+}
+
+static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(hha_pmu->base + HHA_INT_MASK);
+ val &= ~(1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_INT_MASK);
+}
+
+static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(hha_pmu->base + HHA_INT_MASK);
+ val |= (1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_INT_MASK);
+}
+
+static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
+{
+ struct hisi_pmu *hha_pmu = dev_id;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ /* Read HHA_INT_STATUS register */
+ overflown = readl(hha_pmu->base + HHA_INT_STATUS);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it
+ */
+ for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
+ /* Write 1 to clear the IRQ status flag */
+ writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
+
+ /* Get the corresponding event struct */
+ event = hha_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ /* Read and init IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), hha_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq, ret);
+ return ret;
+ }
+
+ hha_pmu->irq = irq;
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
+ { "HISI0243", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
+
+static int hisi_hha_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *hha_pmu)
+{
+ unsigned long long id;
+ struct resource *res;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ "_UID", NULL, &id);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ hha_pmu->index_id = id;
+
+ /*
+ * Use SCCL_ID and UID to identify the HHA PMU, while
+ * SCCL_ID is in MPIDR[aff2].
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &hha_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
+ return -EINVAL;
+ }
+ /* HHA PMUs only share the same SCCL */
+ hha_pmu->ccl_id = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hha_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
+ return PTR_ERR(hha_pmu->base);
+ }
+
+ return 0;
+}
+
+static struct attribute *hisi_hha_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_hha_pmu_format_attr,
+};
+
+static struct attribute *hisi_hha_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
+ HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
+ HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
+ HISI_PMU_EVENT_ATTR(rx_ccix, 0x03),
+ HISI_PMU_EVENT_ATTR(rx_wbi, 0x04),
+ HISI_PMU_EVENT_ATTR(rx_wbip, 0x05),
+ HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11),
+ HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c),
+ HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d),
+ HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e),
+ HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f),
+ HISI_PMU_EVENT_ATTR(spill_num, 0x20),
+ HISI_PMU_EVENT_ATTR(spill_success, 0x21),
+ HISI_PMU_EVENT_ATTR(bi_num, 0x23),
+ HISI_PMU_EVENT_ATTR(mediated_num, 0x32),
+ HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33),
+ HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34),
+ HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35),
+ HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38),
+ HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c),
+ HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40),
+ HISI_PMU_EVENT_ATTR(edir-lookup, 0x41),
+ HISI_PMU_EVENT_ATTR(sdir-hit, 0x42),
+ HISI_PMU_EVENT_ATTR(edir-hit, 0x43),
+ HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c),
+ HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d),
+ NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_hha_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
+ .attrs = hisi_hha_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
+ &hisi_hha_pmu_format_group,
+ &hisi_hha_pmu_events_group,
+ &hisi_hha_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
+ .write_evtype = hisi_hha_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_hha_pmu_start_counters,
+ .stop_counters = hisi_hha_pmu_stop_counters,
+ .enable_counter = hisi_hha_pmu_enable_counter,
+ .disable_counter = hisi_hha_pmu_disable_counter,
+ .enable_counter_int = hisi_hha_pmu_enable_counter_int,
+ .disable_counter_int = hisi_hha_pmu_disable_counter_int,
+ .write_counter = hisi_hha_pmu_write_counter,
+ .read_counter = hisi_hha_pmu_read_counter,
+};
+
+static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *hha_pmu)
+{
+ int ret;
+
+ ret = hisi_hha_pmu_init_data(pdev, hha_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
+ if (ret)
+ return ret;
+
+ hha_pmu->num_counters = HHA_NR_COUNTERS;
+ hha_pmu->counter_bits = 48;
+ hha_pmu->ops = &hisi_uncore_hha_ops;
+ hha_pmu->dev = &pdev->dev;
+ hha_pmu->on_cpu = -1;
+ hha_pmu->check_event = 0x65;
+
+ return 0;
+}
+
+static int hisi_hha_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *hha_pmu;
+ char *name;
+ int ret;
+
+ hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL);
+ if (!hha_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hha_pmu);
+
+ ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ &hha_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
+ hha_pmu->sccl_id, hha_pmu->index_id);
+ hha_pmu->pmu = (struct pmu) {
+ .name = name,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = hisi_hha_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ &hha_pmu->node);
+ }
+
+ return ret;
+}
+
+static int hisi_hha_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&hha_pmu->pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ &hha_pmu->node);
+
+ return 0;
+}
+
+static struct platform_driver hisi_hha_pmu_driver = {
+ .driver = {
+ .name = "hisi_hha_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
+ },
+ .probe = hisi_hha_pmu_probe,
+ .remove = hisi_hha_pmu_remove,
+};
+
+static int __init hisi_hha_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ "AP_PERF_ARM_HISI_HHA_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_hha_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
+
+ return ret;
+}
+module_init(hisi_hha_pmu_module_init);
+
+static void __exit hisi_hha_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_hha_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
+}
+module_exit(hisi_hha_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
new file mode 100644
index 000000000000..0bde5d919b2e
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -0,0 +1,463 @@
+/*
+ * HiSilicon SoC L3C uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ * Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* L3C register definition */
+#define L3C_PERF_CTRL 0x0408
+#define L3C_INT_MASK 0x0800
+#define L3C_INT_STATUS 0x0808
+#define L3C_INT_CLEAR 0x080c
+#define L3C_EVENT_CTRL 0x1c00
+#define L3C_EVENT_TYPE0 0x1d00
+/*
+ * Each counter is 48-bits and [48:63] are reserved
+ * which are Read-As-Zero and Writes-Ignored.
+ */
+#define L3C_CNTR0_LOWER 0x1e00
+
+/* L3C has 8-counters */
+#define L3C_NR_COUNTERS 0x8
+
+#define L3C_PERF_CTRL_EN 0x20000
+#define L3C_EVTYPE_NONE 0xff
+
+/*
+ * Select the counter register offset using the counter index
+ */
+static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
+{
+ return (L3C_CNTR0_LOWER + (cntr_idx * 8));
+}
+
+static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+ dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return 0;
+ }
+
+ /* Read 64-bits and the upper 16 bits are RAZ */
+ return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+}
+
+static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+ dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ /* Write 64-bits and the upper 16 bits are WI */
+ writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+}
+
+static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(L3C_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * L3C_EVENT_TYPE1 is chosen.
+ */
+ reg = L3C_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to L3C_EVENT_TYPEx Register */
+ val = readl(l3c_pmu->base + reg);
+ val &= ~(L3C_EVTYPE_NONE << shift);
+ val |= (type << shift);
+ writel(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
+{
+ u32 val;
+
+ /*
+ * Set perf_enable bit in L3C_PERF_CTRL register to start counting
+ * for all enabled counters.
+ */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
+{
+ u32 val;
+
+ /*
+ * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
+ * for all enabled counters.
+ */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~(L3C_PERF_CTRL_EN);
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in L3C_EVENT_CTRL register */
+ val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+ val |= (1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in L3C_EVENT_CTRL register */
+ val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_INT_MASK);
+ /* Write 0 to enable interrupt */
+ val &= ~(1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_INT_MASK);
+ /* Write 1 to mask interrupt */
+ val |= (1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
+{
+ struct hisi_pmu *l3c_pmu = dev_id;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ /* Read L3C_INT_STATUS register */
+ overflown = readl(l3c_pmu->base + L3C_INT_STATUS);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it.
+ */
+ for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) {
+ /* Write 1 to clear the IRQ status flag */
+ writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR);
+
+ /* Get the corresponding event struct */
+ event = l3c_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ /* Read and init IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), l3c_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq, ret);
+ return ret;
+ }
+
+ l3c_pmu->irq = irq;
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
+ { "HISI0213", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+
+static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *l3c_pmu)
+{
+ unsigned long long id;
+ struct resource *res;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ "_UID", NULL, &id);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ l3c_pmu->index_id = id;
+
+ /*
+ * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
+ * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &l3c_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
+ &l3c_pmu->ccl_id)) {
+ dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ l3c_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(l3c_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
+ return PTR_ERR(l3c_pmu->base);
+ }
+
+ return 0;
+}
+
+static struct attribute *hisi_l3c_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_l3c_pmu_format_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
+ HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
+ HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02),
+ HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03),
+ HISI_PMU_EVENT_ATTR(victim_num, 0x04),
+ HISI_PMU_EVENT_ATTR(rd_spipe, 0x20),
+ HISI_PMU_EVENT_ATTR(wr_spipe, 0x21),
+ HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22),
+ HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23),
+ HISI_PMU_EVENT_ATTR(back_invalid, 0x29),
+ HISI_PMU_EVENT_ATTR(retry_cpu, 0x40),
+ HISI_PMU_EVENT_ATTR(retry_ring, 0x41),
+ HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42),
+ NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_l3c_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
+ .attrs = hisi_l3c_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
+ &hisi_l3c_pmu_format_group,
+ &hisi_l3c_pmu_events_group,
+ &hisi_l3c_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
+ .write_evtype = hisi_l3c_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_l3c_pmu_start_counters,
+ .stop_counters = hisi_l3c_pmu_stop_counters,
+ .enable_counter = hisi_l3c_pmu_enable_counter,
+ .disable_counter = hisi_l3c_pmu_disable_counter,
+ .enable_counter_int = hisi_l3c_pmu_enable_counter_int,
+ .disable_counter_int = hisi_l3c_pmu_disable_counter_int,
+ .write_counter = hisi_l3c_pmu_write_counter,
+ .read_counter = hisi_l3c_pmu_read_counter,
+};
+
+static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *l3c_pmu)
+{
+ int ret;
+
+ ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
+ if (ret)
+ return ret;
+
+ l3c_pmu->num_counters = L3C_NR_COUNTERS;
+ l3c_pmu->counter_bits = 48;
+ l3c_pmu->ops = &hisi_uncore_l3c_ops;
+ l3c_pmu->dev = &pdev->dev;
+ l3c_pmu->on_cpu = -1;
+ l3c_pmu->check_event = 0x59;
+
+ return 0;
+}
+
+static int hisi_l3c_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *l3c_pmu;
+ char *name;
+ int ret;
+
+ l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL);
+ if (!l3c_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, l3c_pmu);
+
+ ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ &l3c_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
+ l3c_pmu->sccl_id, l3c_pmu->index_id);
+ l3c_pmu->pmu = (struct pmu) {
+ .name = name,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = hisi_l3c_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ &l3c_pmu->node);
+ }
+
+ return ret;
+}
+
+static int hisi_l3c_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&l3c_pmu->pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ &l3c_pmu->node);
+
+ return 0;
+}
+
+static struct platform_driver hisi_l3c_pmu_driver = {
+ .driver = {
+ .name = "hisi_l3c_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
+ },
+ .probe = hisi_l3c_pmu_probe,
+ .remove = hisi_l3c_pmu_remove,
+};
+
+static int __init hisi_l3c_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ "AP_PERF_ARM_HISI_L3_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_l3c_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+
+ return ret;
+}
+module_init(hisi_l3c_pmu_module_init);
+
+static void __exit hisi_l3c_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_l3c_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+}
+module_exit(hisi_l3c_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
new file mode 100644
index 000000000000..7ed24b954422
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -0,0 +1,447 @@
+/*
+ * HiSilicon SoC Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ * Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+
+#include <asm/local64.h>
+
+#include "hisi_uncore_pmu.h"
+
+#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
+#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1)
+
+/*
+ * PMU format attributes
+ */
+ssize_t hisi_format_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(buf, "%s\n", (char *)eattr->var);
+}
+
+/*
+ * PMU event attributes
+ */
+ssize_t hisi_event_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+/*
+ * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
+ */
+ssize_t hisi_cpumask_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
+}
+
+static bool hisi_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ /* Include count for the event */
+ int counters = 1;
+
+ if (!is_software_event(leader)) {
+ /*
+ * We must NOT create groups containing mixed PMUs, although
+ * software events are acceptable
+ */
+ if (leader->pmu != event->pmu)
+ return false;
+
+ /* Increment counter for the leader */
+ if (leader != event)
+ counters++;
+ }
+
+ list_for_each_entry(sibling, &event->group_leader->sibling_list,
+ group_entry) {
+ if (is_software_event(sibling))
+ continue;
+ if (sibling->pmu != event->pmu)
+ return false;
+ /* Increment counter for each sibling */
+ counters++;
+ }
+
+ /* The group can not count events more than the counters in the HW */
+ return counters <= hisi_pmu->num_counters;
+}
+
+int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx)
+{
+ return idx >= 0 && idx < hisi_pmu->num_counters;
+}
+
+int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ unsigned long *used_mask = hisi_pmu->pmu_events.used_mask;
+ u32 num_counters = hisi_pmu->num_counters;
+ int idx;
+
+ idx = find_first_zero_bit(used_mask, num_counters);
+ if (idx == num_counters)
+ return -EAGAIN;
+
+ set_bit(idx, used_mask);
+
+ return idx;
+}
+
+static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
+{
+ if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
+ dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+}
+
+int hisi_uncore_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hisi_pmu *hisi_pmu;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * We do not support sampling as the counters are all
+ * shared by all CPU cores in a CPU die(SCCL). Also we
+ * do not support attach to a task(per-process mode)
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ /* counters do not have these bits */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle)
+ return -EINVAL;
+
+ /*
+ * The uncore counters not specific to any CPU, so cannot
+ * support per-task
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /*
+ * Validate if the events in group does not exceed the
+ * available counters in hardware.
+ */
+ if (!hisi_validate_event_group(event))
+ return -EINVAL;
+
+ hisi_pmu = to_hisi_pmu(event->pmu);
+ if (event->attr.config > hisi_pmu->check_event)
+ return -EINVAL;
+
+ if (hisi_pmu->on_cpu == -1)
+ return -EINVAL;
+ /*
+ * We don't assign an index until we actually place the event onto
+ * hardware. Use -1 to signify that we haven't decided where to put it
+ * yet.
+ */
+ hwc->idx = -1;
+ hwc->config_base = event->attr.config;
+
+ /* Enforce to use the same CPU for all events in this PMU */
+ event->cpu = hisi_pmu->on_cpu;
+
+ return 0;
+}
+
+/*
+ * Set the counter to count the event that we're interested in,
+ * and enable interrupt and counter.
+ */
+static void hisi_uncore_pmu_enable_event(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
+ HISI_GET_EVENTID(event));
+
+ hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
+ hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
+}
+
+/*
+ * Disable counter and interrupt.
+ */
+static void hisi_uncore_pmu_disable_event(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
+ hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
+}
+
+void hisi_uncore_pmu_set_event_period(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * The HiSilicon PMU counters support 32 bits or 48 bits, depending on
+ * the PMU. We reduce it to 2^(counter_bits - 1) to account for the
+ * extreme interrupt latency. So we could hopefully handle the overflow
+ * interrupt before another 2^(counter_bits - 1) events occur and the
+ * counter overtakes its previous value.
+ */
+ u64 val = BIT_ULL(hisi_pmu->counter_bits - 1);
+
+ local64_set(&hwc->prev_count, val);
+ /* Write start value to the hardware event counter */
+ hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
+}
+
+void hisi_uncore_pmu_event_update(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+
+ do {
+ /* Read the count from the counter register */
+ new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc);
+ prev_raw_count = local64_read(&hwc->prev_count);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+ /*
+ * compute the delta
+ */
+ delta = (new_raw_count - prev_raw_count) &
+ HISI_MAX_PERIOD(hisi_pmu->counter_bits);
+ local64_add(delta, &event->count);
+}
+
+void hisi_uncore_pmu_start(struct perf_event *event, int flags)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ hwc->state = 0;
+ hisi_uncore_pmu_set_event_period(event);
+
+ if (flags & PERF_EF_RELOAD) {
+ u64 prev_raw_count = local64_read(&hwc->prev_count);
+
+ hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count);
+ }
+
+ hisi_uncore_pmu_enable_event(event);
+ perf_event_update_userpage(event);
+}
+
+void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_uncore_pmu_disable_event(event);
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ /* Read hardware counter and update the perf counter statistics */
+ hisi_uncore_pmu_event_update(event);
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+int hisi_uncore_pmu_add(struct perf_event *event, int flags)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ /* Get an available counter index for counting */
+ idx = hisi_pmu->ops->get_event_idx(event);
+ if (idx < 0)
+ return idx;
+
+ event->hw.idx = idx;
+ hisi_pmu->pmu_events.hw_events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ hisi_uncore_pmu_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+void hisi_uncore_pmu_del(struct perf_event *event, int flags)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_uncore_pmu_stop(event, PERF_EF_UPDATE);
+ hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx);
+ perf_event_update_userpage(event);
+ hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
+}
+
+void hisi_uncore_pmu_read(struct perf_event *event)
+{
+ /* Read hardware counter and update the perf counter statistics */
+ hisi_uncore_pmu_event_update(event);
+}
+
+void hisi_uncore_pmu_enable(struct pmu *pmu)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
+ int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask,
+ hisi_pmu->num_counters);
+
+ if (!enabled)
+ return;
+
+ hisi_pmu->ops->start_counters(hisi_pmu);
+}
+
+void hisi_uncore_pmu_disable(struct pmu *pmu)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
+
+ hisi_pmu->ops->stop_counters(hisi_pmu);
+}
+
+/*
+ * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
+ * If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID
+ * is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is
+ * in MPIDR[aff1]. If this changes in future, this shall be updated.
+ */
+static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+{
+ u64 mpidr = read_cpuid_mpidr();
+
+ if (mpidr & MPIDR_MT_BITMASK) {
+ if (sccl_id)
+ *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ if (ccl_id)
+ *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ } else {
+ if (sccl_id)
+ *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ if (ccl_id)
+ *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ }
+}
+
+/*
+ * Check whether the CPU is associated with this uncore PMU
+ */
+static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
+{
+ int sccl_id, ccl_id;
+
+ if (hisi_pmu->ccl_id == -1) {
+ /* If CCL_ID is -1, the PMU only shares the same SCCL */
+ hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
+
+ return sccl_id == hisi_pmu->sccl_id;
+ }
+
+ hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
+
+ return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
+}
+
+int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
+ node);
+
+ if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
+ return 0;
+
+ cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
+
+ /* If another CPU is already managing this PMU, simply return. */
+ if (hisi_pmu->on_cpu != -1)
+ return 0;
+
+ /* Use this CPU in cpumask for event counting */
+ hisi_pmu->on_cpu = cpu;
+
+ /* Overflow interrupt also should use the same CPU */
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
+
+ return 0;
+}
+
+int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
+ node);
+ cpumask_t pmu_online_cpus;
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
+ return 0;
+
+ /* Nothing to do if this CPU doesn't own the PMU */
+ if (hisi_pmu->on_cpu != cpu)
+ return 0;
+
+ /* Give up ownership of the PMU */
+ hisi_pmu->on_cpu = -1;
+
+ /* Choose a new CPU to migrate ownership of the PMU to */
+ cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
+ cpu_online_mask);
+ target = cpumask_any_but(&pmu_online_cpus, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
+ /* Use this CPU for event counting */
+ hisi_pmu->on_cpu = target;
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
new file mode 100644
index 000000000000..f21226a0e9c6
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -0,0 +1,102 @@
+/*
+ * HiSilicon SoC Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ * Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __HISI_UNCORE_PMU_H__
+#define __HISI_UNCORE_PMU_H__
+
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "hisi_pmu: " fmt
+
+#define HISI_MAX_COUNTERS 0x10
+#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
+
+#define HISI_PMU_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
+ })[0].attr.attr)
+
+#define HISI_PMU_FORMAT_ATTR(_name, _config) \
+ HISI_PMU_ATTR(_name, hisi_format_sysfs_show, (void *)_config)
+#define HISI_PMU_EVENT_ATTR(_name, _config) \
+ HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config)
+
+struct hisi_pmu;
+
+struct hisi_uncore_ops {
+ void (*write_evtype)(struct hisi_pmu *, int, u32);
+ int (*get_event_idx)(struct perf_event *);
+ u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64);
+ void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*start_counters)(struct hisi_pmu *);
+ void (*stop_counters)(struct hisi_pmu *);
+};
+
+struct hisi_pmu_hwevents {
+ struct perf_event *hw_events[HISI_MAX_COUNTERS];
+ DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS);
+};
+
+/* Generic pmu struct for different pmu types */
+struct hisi_pmu {
+ struct pmu pmu;
+ const struct hisi_uncore_ops *ops;
+ struct hisi_pmu_hwevents pmu_events;
+ /* associated_cpus: All CPUs associated with the PMU */
+ cpumask_t associated_cpus;
+ /* CPU used for counting */
+ int on_cpu;
+ int irq;
+ struct device *dev;
+ struct hlist_node node;
+ int sccl_id;
+ int ccl_id;
+ void __iomem *base;
+ /* the ID of the PMU modules */
+ u32 index_id;
+ int num_counters;
+ int counter_bits;
+ /* check event code range */
+ int check_event;
+};
+
+int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
+int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
+void hisi_uncore_pmu_read(struct perf_event *event);
+int hisi_uncore_pmu_add(struct perf_event *event, int flags);
+void hisi_uncore_pmu_del(struct perf_event *event, int flags);
+void hisi_uncore_pmu_start(struct perf_event *event, int flags);
+void hisi_uncore_pmu_stop(struct perf_event *event, int flags);
+void hisi_uncore_pmu_set_event_period(struct perf_event *event);
+void hisi_uncore_pmu_event_update(struct perf_event *event);
+int hisi_uncore_pmu_event_init(struct perf_event *event);
+void hisi_uncore_pmu_enable(struct pmu *pmu);
+void hisi_uncore_pmu_disable(struct pmu *pmu);
+ssize_t hisi_event_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ssize_t hisi_format_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ssize_t hisi_cpumask_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
+int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
+#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index b242cce10468..4fdc8486a8e4 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -92,6 +92,21 @@
#define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
+/*
+ * Events
+ */
+#define L2_EVENT_CYCLES 0xfe
+#define L2_EVENT_DCACHE_OPS 0x400
+#define L2_EVENT_ICACHE_OPS 0x401
+#define L2_EVENT_TLBI 0x402
+#define L2_EVENT_BARRIERS 0x403
+#define L2_EVENT_TOTAL_READS 0x405
+#define L2_EVENT_TOTAL_WRITES 0x406
+#define L2_EVENT_TOTAL_REQUESTS 0x407
+#define L2_EVENT_LDREX 0x420
+#define L2_EVENT_STREX 0x421
+#define L2_EVENT_CLREX 0x422
+
static DEFINE_RAW_SPINLOCK(l2_access_lock);
/**
@@ -700,9 +715,12 @@ static struct attribute_group l2_cache_pmu_cpumask_group = {
/* CCG format for perf RAW codes. */
PMU_FORMAT_ATTR(l2_code, "config:4-11");
PMU_FORMAT_ATTR(l2_group, "config:0-3");
+PMU_FORMAT_ATTR(event, "config:0-11");
+
static struct attribute *l2_cache_pmu_formats[] = {
&format_attr_l2_code.attr,
&format_attr_l2_group.attr,
+ &format_attr_event.attr,
NULL,
};
@@ -711,9 +729,45 @@ static struct attribute_group l2_cache_pmu_format_group = {
.attrs = l2_cache_pmu_formats,
};
+static ssize_t l2cache_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define L2CACHE_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *l2_cache_pmu_events[] = {
+ L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
+ L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
+ L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
+ L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
+ L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
+ L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
+ L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
+ L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
+ L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
+ L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
+ L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
+ NULL
+};
+
+static struct attribute_group l2_cache_pmu_events_group = {
+ .name = "events",
+ .attrs = l2_cache_pmu_events,
+};
+
static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
&l2_cache_pmu_format_group,
&l2_cache_pmu_cpumask_group,
+ &l2_cache_pmu_events_group,
NULL,
};
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 1161e11fb3cf..aa857be692cf 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -24,7 +24,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -926,6 +926,7 @@ static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.enable_pmu_unk1 = true,
+ .phy0_dual_route = true,
};
static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 64fc59c3ae6d..97d27b0d5cc7 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -67,3 +67,16 @@ config PHY_BRCM_SATA
help
Enable this to support the Broadcom SATA PHY.
If unsure, say N.
+
+config PHY_BRCM_USB
+ tristate "Broadcom STB USB PHY driver"
+ depends on ARCH_BRCMSTB
+ depends on OF
+ select GENERIC_PHY
+ select SOC_BRCMSTB
+ default ARCH_BRCMSTB
+ help
+ Enable this to support the Broadcom STB USB PHY.
+ This driver is required by the USB XHCI, EHCI and OHCI
+ drivers.
+ If unsure, say N.
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index fdd9ec8fd70b..13e000c1a43a 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -6,3 +6,6 @@ obj-$(CONFIG_PHY_BCM_NS_USB3) += phy-bcm-ns-usb3.o
obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
obj-$(CONFIG_PHY_NS2_USB_DRD) += phy-bcm-ns2-usbdrd.o
obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
+obj-$(CONFIG_PHY_BRCM_USB) += phy-brcm-usb-dvr.o
+
+phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index d099a0c8cee5..7ceea5ae2704 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -12,7 +12,7 @@
*/
#include <linux/delay.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 9d7f74fe3d7c..3f953db70288 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -49,11 +49,29 @@ enum brcm_sata_phy_version {
BRCM_SATA_PHY_IPROC_SR,
};
+enum brcm_sata_phy_rxaeq_mode {
+ RXAEQ_MODE_OFF = 0,
+ RXAEQ_MODE_AUTO,
+ RXAEQ_MODE_MANUAL,
+};
+
+static enum brcm_sata_phy_rxaeq_mode rxaeq_to_val(const char *m)
+{
+ if (!strcmp(m, "auto"))
+ return RXAEQ_MODE_AUTO;
+ else if (!strcmp(m, "manual"))
+ return RXAEQ_MODE_MANUAL;
+ else
+ return RXAEQ_MODE_OFF;
+}
+
struct brcm_sata_port {
int portnum;
struct phy *phy;
struct brcm_sata_phy *phy_priv;
bool ssc_en;
+ enum brcm_sata_phy_rxaeq_mode rxaeq_mode;
+ u32 rxaeq_val;
};
struct brcm_sata_phy {
@@ -93,6 +111,15 @@ enum sata_phy_regs {
TX_ACTRL0 = 0x80,
TX_ACTRL0_TXPOL_FLIP = BIT(6),
+ AEQRX_REG_BANK_0 = 0xd0,
+ AEQ_CONTROL1 = 0x81,
+ AEQ_CONTROL1_ENABLE = BIT(2),
+ AEQ_CONTROL1_FREEZE = BIT(3),
+ AEQ_FRC_EQ = 0x83,
+ AEQ_FRC_EQ_FORCE = BIT(0),
+ AEQ_FRC_EQ_FORCE_VAL = BIT(1),
+ AEQRX_REG_BANK_1 = 0xe0,
+
OOB_REG_BANK = 0x150,
OOB1_REG_BANK = 0x160,
OOB_CTRL1 = 0x80,
@@ -190,7 +217,7 @@ static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs)
#define STB_FMAX_VAL_DEFAULT 0x3df
#define STB_FMAX_VAL_SSC 0x83
-static int brcm_stb_sata_init(struct brcm_sata_port *port)
+static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
{
void __iomem *base = brcm_sata_pcb_base(port);
struct brcm_sata_phy *priv = port->phy_priv;
@@ -215,10 +242,47 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
~TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK, tmp);
+}
+
+#define AEQ_FRC_EQ_VAL_SHIFT 2
+#define AEQ_FRC_EQ_VAL_MASK 0x3f
+
+static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp = 0, reg = 0;
+
+ switch (port->rxaeq_mode) {
+ case RXAEQ_MODE_OFF:
+ return 0;
+
+ case RXAEQ_MODE_AUTO:
+ reg = AEQ_CONTROL1;
+ tmp = AEQ_CONTROL1_ENABLE | AEQ_CONTROL1_FREEZE;
+ break;
+
+ case RXAEQ_MODE_MANUAL:
+ reg = AEQ_FRC_EQ;
+ tmp = AEQ_FRC_EQ_FORCE | AEQ_FRC_EQ_FORCE_VAL;
+ if (port->rxaeq_val > AEQ_FRC_EQ_VAL_MASK)
+ return -EINVAL;
+ tmp |= port->rxaeq_val << AEQ_FRC_EQ_VAL_SHIFT;
+ break;
+ }
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, reg, ~tmp, tmp);
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, reg, ~tmp, tmp);
return 0;
}
+static int brcm_stb_sata_init(struct brcm_sata_port *port)
+{
+ brcm_stb_sata_ssc_init(port);
+
+ return brcm_stb_sata_rxaeq_init(port);
+}
+
/* NS2 SATA PLL1 defaults were characterized by H/W group */
#define NS2_PLL1_ACTRL2_MAGIC 0x1df8
#define NS2_PLL1_ACTRL3_MAGIC 0x2b00
@@ -463,6 +527,7 @@ MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
static int brcm_sata_phy_probe(struct platform_device *pdev)
{
+ const char *rxaeq_mode;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node, *child;
const struct of_device_id *of_id;
@@ -525,6 +590,13 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
port->portnum = id;
port->phy_priv = priv;
port->phy = devm_phy_create(dev, child, &phy_ops);
+ port->rxaeq_mode = RXAEQ_MODE_OFF;
+ if (!of_property_read_string(child, "brcm,rxaeq-mode",
+ &rxaeq_mode))
+ port->rxaeq_mode = rxaeq_to_val(rxaeq_mode);
+ if (port->rxaeq_mode == RXAEQ_MODE_MANUAL)
+ of_property_read_u32(child, "brcm,rxaeq-value",
+ &port->rxaeq_val);
port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
if (IS_ERR(port->phy)) {
dev_err(dev, "failed to create PHY\n");
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
new file mode 100644
index 000000000000..1e7ce0b6f299
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -0,0 +1,1017 @@
+/*
+ * phy-brcm-usb-init.c - Broadcom USB Phy chip specific init functions
+ *
+ * Copyright (C) 2014-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This module contains USB PHY initialization for power up and S3 resume
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/soc/brcmstb/brcmstb.h>
+#include "phy-brcm-usb-init.h"
+
+#define PHY_PORTS 2
+#define PHY_PORT_SELECT_0 0
+#define PHY_PORT_SELECT_1 0x1000
+
+/* Register definitions for the USB CTRL block */
+#define USB_CTRL_SETUP 0x00
+#define USB_CTRL_SETUP_IOC_MASK 0x00000010
+#define USB_CTRL_SETUP_IPP_MASK 0x00000020
+#define USB_CTRL_SETUP_BABO_MASK 0x00000001
+#define USB_CTRL_SETUP_FNHW_MASK 0x00000002
+#define USB_CTRL_SETUP_FNBO_MASK 0x00000004
+#define USB_CTRL_SETUP_WABO_MASK 0x00000008
+#define USB_CTRL_SETUP_SCB_CLIENT_SWAP_MASK 0x00002000 /* option */
+#define USB_CTRL_SETUP_SCB1_EN_MASK 0x00004000 /* option */
+#define USB_CTRL_SETUP_SCB2_EN_MASK 0x00008000 /* option */
+#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK 0X00020000 /* option */
+#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK 0x00010000 /* option */
+#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK 0x02000000 /* option */
+#define USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK 0x04000000 /* option */
+#define USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK 0x08000000 /* opt */
+#define USB_CTRL_SETUP_OC3_DISABLE_MASK 0xc0000000 /* option */
+#define USB_CTRL_PLL_CTL 0x04
+#define USB_CTRL_PLL_CTL_PLL_SUSPEND_EN_MASK 0x08000000
+#define USB_CTRL_PLL_CTL_PLL_RESETB_MASK 0x40000000
+#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */
+#define USB_CTRL_EBRIDGE 0x0c
+#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */
+#define USB_CTRL_MDIO 0x14
+#define USB_CTRL_MDIO2 0x18
+#define USB_CTRL_UTMI_CTL_1 0x2c
+#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_MASK 0x00000800
+#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_P1_MASK 0x08000000
+#define USB_CTRL_USB_PM 0x34
+#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK 0x00800000 /* option */
+#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK 0x00400000 /* option */
+#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK 0x40000000 /* option */
+#define USB_CTRL_USB_PM_USB_PWRDN_MASK 0x80000000 /* option */
+#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000 /* option */
+#define USB_CTRL_USB_PM_USB20_HC_RESETB_MASK 0x30000000 /* option */
+#define USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK 0x00300000 /* option */
+#define USB_CTRL_USB30_CTL1 0x60
+#define USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK 0x00000010
+#define USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK 0x00010000
+#define USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK 0x00020000 /* option */
+#define USB_CTRL_USB30_CTL1_USB3_IOC_MASK 0x10000000 /* option */
+#define USB_CTRL_USB30_CTL1_USB3_IPP_MASK 0x20000000 /* option */
+#define USB_CTRL_USB30_PCTL 0x70
+#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_MASK 0x00000002
+#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_P1_MASK 0x00020000
+#define USB_CTRL_USB_DEVICE_CTL1 0x90
+#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003 /* option */
+
+/* Register definitions for the XHCI EC block */
+#define USB_XHCI_EC_IRAADR 0x658
+#define USB_XHCI_EC_IRADAT 0x65c
+
+enum brcm_family_type {
+ BRCM_FAMILY_3390A0,
+ BRCM_FAMILY_7250B0,
+ BRCM_FAMILY_7271A0,
+ BRCM_FAMILY_7364A0,
+ BRCM_FAMILY_7366C0,
+ BRCM_FAMILY_74371A0,
+ BRCM_FAMILY_7439B0,
+ BRCM_FAMILY_7445D0,
+ BRCM_FAMILY_7260A0,
+ BRCM_FAMILY_7278A0,
+ BRCM_FAMILY_COUNT,
+};
+
+#define USB_BRCM_FAMILY(chip) \
+ [BRCM_FAMILY_##chip] = __stringify(chip)
+
+static const char *family_names[BRCM_FAMILY_COUNT] = {
+ USB_BRCM_FAMILY(3390A0),
+ USB_BRCM_FAMILY(7250B0),
+ USB_BRCM_FAMILY(7271A0),
+ USB_BRCM_FAMILY(7364A0),
+ USB_BRCM_FAMILY(7366C0),
+ USB_BRCM_FAMILY(74371A0),
+ USB_BRCM_FAMILY(7439B0),
+ USB_BRCM_FAMILY(7445D0),
+ USB_BRCM_FAMILY(7260A0),
+ USB_BRCM_FAMILY(7278A0),
+};
+
+enum {
+ USB_CTRL_SETUP_SCB1_EN_SELECTOR,
+ USB_CTRL_SETUP_SCB2_EN_SELECTOR,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR,
+ USB_CTRL_SETUP_OC3_DISABLE_SELECTOR,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR,
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_SELECTOR,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR,
+ USB_CTRL_USB_PM_USB_PWRDN_SELECTOR,
+ USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_SELECTOR,
+ USB_CTRL_USB30_CTL1_USB3_IOC_SELECTOR,
+ USB_CTRL_USB30_CTL1_USB3_IPP_SELECTOR,
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR,
+ USB_CTRL_USB_PM_SOFT_RESET_SELECTOR,
+ USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_SELECTOR,
+ USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_SELECTOR,
+ USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR,
+ USB_CTRL_SETUP_ENDIAN_SELECTOR,
+ USB_CTRL_SELECTOR_COUNT,
+};
+
+#define USB_CTRL_REG(base, reg) ((void *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_MASK(reg, field) \
+ USB_CTRL_##reg##_##field##_MASK
+#define USB_CTRL_MASK_FAMILY(params, reg, field) \
+ (params->usb_reg_bits_map[USB_CTRL_##reg##_##field##_SELECTOR])
+
+#define USB_CTRL_SET_FAMILY(params, reg, field) \
+ usb_ctrl_set_family(params, USB_CTRL_##reg, \
+ USB_CTRL_##reg##_##field##_SELECTOR)
+#define USB_CTRL_UNSET_FAMILY(params, reg, field) \
+ usb_ctrl_unset_family(params, USB_CTRL_##reg, \
+ USB_CTRL_##reg##_##field##_SELECTOR)
+
+#define USB_CTRL_SET(base, reg, field) \
+ usb_ctrl_set(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+#define USB_CTRL_UNSET(base, reg, field) \
+ usb_ctrl_unset(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+
+#define MDIO_USB2 0
+#define MDIO_USB3 BIT(31)
+
+#define USB_CTRL_SETUP_ENDIAN_BITS ( \
+ USB_CTRL_MASK(SETUP, BABO) | \
+ USB_CTRL_MASK(SETUP, FNHW) | \
+ USB_CTRL_MASK(SETUP, FNBO) | \
+ USB_CTRL_MASK(SETUP, WABO))
+
+#ifdef __LITTLE_ENDIAN
+#define ENDIAN_SETTINGS ( \
+ USB_CTRL_MASK(SETUP, BABO) | \
+ USB_CTRL_MASK(SETUP, FNHW))
+#else
+#define ENDIAN_SETTINGS ( \
+ USB_CTRL_MASK(SETUP, FNHW) | \
+ USB_CTRL_MASK(SETUP, FNBO) | \
+ USB_CTRL_MASK(SETUP, WABO))
+#endif
+
+struct id_to_type {
+ u32 id;
+ int type;
+};
+
+static const struct id_to_type id_to_type_table[] = {
+ { 0x33900000, BRCM_FAMILY_3390A0 },
+ { 0x72500010, BRCM_FAMILY_7250B0 },
+ { 0x72600000, BRCM_FAMILY_7260A0 },
+ { 0x72680000, BRCM_FAMILY_7271A0 },
+ { 0x72710000, BRCM_FAMILY_7271A0 },
+ { 0x73640000, BRCM_FAMILY_7364A0 },
+ { 0x73660020, BRCM_FAMILY_7366C0 },
+ { 0x07437100, BRCM_FAMILY_74371A0 },
+ { 0x74390010, BRCM_FAMILY_7439B0 },
+ { 0x74450030, BRCM_FAMILY_7445D0 },
+ { 0x72780000, BRCM_FAMILY_7278A0 },
+ { 0, BRCM_FAMILY_7271A0 }, /* default */
+};
+
+static const u32
+usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
+ /* 3390B0 */
+ [BRCM_FAMILY_3390A0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7250b0 */
+ [BRCM_FAMILY_7250B0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7271a0 */
+ [BRCM_FAMILY_7271A0] = {
+ 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */
+ 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ USB_CTRL_USB_PM_SOFT_RESET_MASK,
+ USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK,
+ USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK,
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7364a0 */
+ [BRCM_FAMILY_7364A0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7366c0 */
+ [BRCM_FAMILY_7366C0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 74371A0 */
+ [BRCM_FAMILY_74371A0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB30_CTL1_USB3_IOC_MASK,
+ USB_CTRL_USB30_CTL1_USB3_IPP_MASK,
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7439B0 */
+ [BRCM_FAMILY_7439B0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7445d0 */
+ [BRCM_FAMILY_7445D0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7260a0 */
+ [BRCM_FAMILY_7260A0] = {
+ 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */
+ 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ USB_CTRL_USB_PM_SOFT_RESET_MASK,
+ USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK,
+ USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK,
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7278a0 */
+ [BRCM_FAMILY_7278A0] = {
+ 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */
+ 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
+ 0, /*USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ USB_CTRL_USB_PM_SOFT_RESET_MASK,
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */
+ 0, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+};
+
+static inline u32 brcmusb_readl(void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline void brcmusb_writel(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+}
+
+static inline
+void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
+ u32 reg_offset, u32 field)
+{
+ u32 mask;
+ void *reg;
+
+ mask = params->usb_reg_bits_map[field];
+ reg = params->ctrl_regs + reg_offset;
+ brcmusb_writel(brcmusb_readl(reg) & ~mask, reg);
+};
+
+static inline
+void usb_ctrl_set_family(struct brcm_usb_init_params *params,
+ u32 reg_offset, u32 field)
+{
+ u32 mask;
+ void *reg;
+
+ mask = params->usb_reg_bits_map[field];
+ reg = params->ctrl_regs + reg_offset;
+ brcmusb_writel(brcmusb_readl(reg) | mask, reg);
+};
+
+static inline void usb_ctrl_set(void __iomem *reg, u32 field)
+{
+ u32 value;
+
+ value = brcmusb_readl(reg);
+ brcmusb_writel(value | field, reg);
+}
+
+static inline void usb_ctrl_unset(void __iomem *reg, u32 field)
+{
+ u32 value;
+
+ value = brcmusb_readl(reg);
+ brcmusb_writel(value & ~field, reg);
+}
+
+static u32 brcmusb_usb_mdio_read(void __iomem *ctrl_base, u32 reg, int mode)
+{
+ u32 data;
+
+ data = (reg << 16) | mode;
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data |= (1 << 24);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data &= ~(1 << 24);
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+
+ return brcmusb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
+}
+
+static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
+ u32 val, int mode)
+{
+ u32 data;
+
+ data = (reg << 16) | val | mode;
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data |= (1 << 25);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data &= ~(1 << 25);
+
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+}
+
+static void brcmusb_usb_phy_ldo_fix(void __iomem *ctrl_base)
+{
+ /* first disable FSM but also leave it that way */
+ /* to allow normal suspend/resume */
+ USB_CTRL_UNSET(ctrl_base, UTMI_CTL_1, POWER_UP_FSM_EN);
+ USB_CTRL_UNSET(ctrl_base, UTMI_CTL_1, POWER_UP_FSM_EN_P1);
+
+ /* reset USB 2.0 PLL */
+ USB_CTRL_UNSET(ctrl_base, PLL_CTL, PLL_RESETB);
+ /* PLL reset period */
+ udelay(1);
+ USB_CTRL_SET(ctrl_base, PLL_CTL, PLL_RESETB);
+ /* Give PLL enough time to lock */
+ usleep_range(1000, 2000);
+}
+
+static void brcmusb_usb2_eye_fix(void __iomem *ctrl_base)
+{
+ /* Increase USB 2.0 TX level to meet spec requirement */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x80a0, MDIO_USB2);
+ brcmusb_usb_mdio_write(ctrl_base, 0x0a, 0xc6a0, MDIO_USB2);
+}
+
+static void brcmusb_usb3_pll_fix(void __iomem *ctrl_base)
+{
+ /* Set correct window for PLL lock detect */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8000, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x07, 0x1503, MDIO_USB3);
+}
+
+static void brcmusb_usb3_enable_pipe_reset(void __iomem *ctrl_base)
+{
+ u32 val;
+
+ /* Re-enable USB 3.0 pipe reset */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8000, MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x0f, MDIO_USB3) | 0x200;
+ brcmusb_usb_mdio_write(ctrl_base, 0x0f, val, MDIO_USB3);
+}
+
+static void brcmusb_usb3_enable_sigdet(void __iomem *ctrl_base)
+{
+ u32 val, ofs;
+ int ii;
+
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ /* Set correct default for sigdet */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8080 + ofs),
+ MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x05, MDIO_USB3);
+ val = (val & ~0x800f) | 0x800d;
+ brcmusb_usb_mdio_write(ctrl_base, 0x05, val, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+}
+
+static void brcmusb_usb3_enable_skip_align(void __iomem *ctrl_base)
+{
+ u32 val, ofs;
+ int ii;
+
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ /* Set correct default for SKIP align */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8060 + ofs),
+ MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3) | 0x200;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+}
+
+static void brcmusb_usb3_unfreeze_aeq(void __iomem *ctrl_base)
+{
+ u32 val, ofs;
+ int ii;
+
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ /* Let EQ freeze after TSEQ */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x80e0 + ofs),
+ MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3);
+ val &= ~0x0008;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+}
+
+static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params)
+{
+ u32 ofs;
+ int ii;
+ void __iomem *ctrl_base = params->ctrl_regs;
+
+ /*
+ * On newer B53 based SoC's, the reference clock for the
+ * 3.0 PLL has been changed from 50MHz to 54MHz so the
+ * PLL needs to be reprogrammed.
+ * See SWLINUX-4006.
+ *
+ * On the 7364C0, the reference clock for the
+ * 3.0 PLL has been changed from 50MHz to 54MHz to
+ * work around a MOCA issue.
+ * See SWLINUX-4169.
+ */
+ switch (params->selected_family) {
+ case BRCM_FAMILY_3390A0:
+ case BRCM_FAMILY_7250B0:
+ case BRCM_FAMILY_7366C0:
+ case BRCM_FAMILY_74371A0:
+ case BRCM_FAMILY_7439B0:
+ case BRCM_FAMILY_7445D0:
+ case BRCM_FAMILY_7260A0:
+ return;
+ case BRCM_FAMILY_7364A0:
+ if (BRCM_REV(params->family_id) < 0x20)
+ return;
+ break;
+ }
+
+ /* set USB 3.0 PLL to accept 54Mhz reference clock */
+ USB_CTRL_UNSET(ctrl_base, USB30_CTL1, PHY3_PLL_SEQ_START);
+
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8000, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x10, 0x5784, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x11, 0x01d0, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x12, 0x1DE8, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x13, 0xAA80, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x14, 0x8826, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x15, 0x0044, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x16, 0x8000, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x17, 0x0851, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x18, 0x0000, MDIO_USB3);
+
+ /* both ports */
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8040 + ofs),
+ MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x03, 0x0090, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x04, 0x0134, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8020 + ofs),
+ MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, 0x00e2, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+
+ /* restart PLL sequence */
+ USB_CTRL_SET(ctrl_base, USB30_CTL1, PHY3_PLL_SEQ_START);
+ /* Give PLL enough time to lock */
+ usleep_range(1000, 2000);
+}
+
+static void brcmusb_usb3_ssc_enable(void __iomem *ctrl_base)
+{
+ u32 val;
+
+ /* Enable USB 3.0 TX spread spectrum */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8040, MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3) | 0xf;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+
+ /* Currently, USB 3.0 SSC is enabled via port 0 MDIO registers,
+ * which should have been adequate. However, due to a bug in the
+ * USB 3.0 PHY, it must be enabled via both ports (HWUSB3DVT-26).
+ */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x9040, MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3) | 0xf;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+}
+
+static void brcmusb_usb3_phy_workarounds(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl_base = params->ctrl_regs;
+
+ brcmusb_usb3_pll_fix(ctrl_base);
+ brcmusb_usb3_pll_54mhz(params);
+ brcmusb_usb3_ssc_enable(ctrl_base);
+ brcmusb_usb3_enable_pipe_reset(ctrl_base);
+ brcmusb_usb3_enable_sigdet(ctrl_base);
+ brcmusb_usb3_enable_skip_align(ctrl_base);
+ brcmusb_usb3_unfreeze_aeq(ctrl_base);
+}
+
+static void brcmusb_memc_fix(struct brcm_usb_init_params *params)
+{
+ u32 prid;
+
+ if (params->selected_family != BRCM_FAMILY_7445D0)
+ return;
+ /*
+ * This is a workaround for HW7445-1869 where a DMA write ends up
+ * doing a read pre-fetch after the end of the DMA buffer. This
+ * causes a problem when the DMA buffer is at the end of physical
+ * memory, causing the pre-fetch read to access non-existent memory,
+ * and the chip bondout has MEMC2 disabled. When the pre-fetch read
+ * tries to use the disabled MEMC2, it hangs the bus. The workaround
+ * is to disable MEMC2 access in the usb controller which avoids
+ * the hang.
+ */
+
+ prid = params->product_id & 0xfffff000;
+ switch (prid) {
+ case 0x72520000:
+ case 0x74480000:
+ case 0x74490000:
+ case 0x07252000:
+ case 0x07448000:
+ case 0x07449000:
+ USB_CTRL_UNSET_FAMILY(params, SETUP, SCB2_EN);
+ }
+}
+
+static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
+{
+ void __iomem *xhci_ec_base = params->xhci_ec_regs;
+ u32 val;
+
+ if (params->family_id != 0x74371000 || xhci_ec_base == 0)
+ return;
+ brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
+ val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+
+ /* set cfg_pick_ss_lock */
+ val |= (1 << 27);
+ brcmusb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+
+ /* Reset USB 3.0 PHY for workaround to take effect */
+ USB_CTRL_UNSET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_SET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
+}
+
+static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
+ int on_off)
+{
+ /* Assert reset */
+ if (on_off) {
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, XHC_SOFT_RESETB))
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, XHC_SOFT_RESETB);
+ else
+ USB_CTRL_UNSET_FAMILY(params,
+ USB30_CTL1, XHC_SOFT_RESETB);
+ } else { /* De-assert reset */
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, XHC_SOFT_RESETB))
+ USB_CTRL_SET_FAMILY(params, USB_PM, XHC_SOFT_RESETB);
+ else
+ USB_CTRL_SET_FAMILY(params, USB30_CTL1,
+ XHC_SOFT_RESETB);
+ }
+}
+
+/*
+ * Return the best map table family. The order is:
+ * - exact match of chip and major rev
+ * - exact match of chip and closest older major rev
+ * - default chip/rev.
+ * NOTE: The minor rev is always ignored.
+ */
+static enum brcm_family_type brcmusb_get_family_type(
+ struct brcm_usb_init_params *params)
+{
+ int last_type = -1;
+ u32 last_family = 0;
+ u32 family_no_major;
+ unsigned int x;
+ u32 family;
+
+ family = params->family_id & 0xfffffff0;
+ family_no_major = params->family_id & 0xffffff00;
+ for (x = 0; id_to_type_table[x].id; x++) {
+ if (family == id_to_type_table[x].id)
+ return id_to_type_table[x].type;
+ if (family_no_major == (id_to_type_table[x].id & 0xffffff00))
+ if (family > id_to_type_table[x].id &&
+ last_family < id_to_type_table[x].id) {
+ last_family = id_to_type_table[x].id;
+ last_type = id_to_type_table[x].type;
+ }
+ }
+
+ /* If no match, return the default family */
+ if (last_type == -1)
+ return id_to_type_table[x].type;
+ return last_type;
+}
+
+void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+ u32 reg;
+ u32 orig_reg;
+
+ /* Starting with the 7445d0, there are no longer separate 3.0
+ * versions of IOC and IPP.
+ */
+ if (USB_CTRL_MASK_FAMILY(params, USB30_CTL1, USB3_IOC)) {
+ if (params->ioc)
+ USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IOC);
+ if (params->ipp == 1)
+ USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IPP);
+ }
+
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ orig_reg = reg;
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_CC_DRD_MODE_ENABLE_SEL))
+ /* Never use the strap, it's going away. */
+ reg &= ~(USB_CTRL_MASK_FAMILY(params,
+ SETUP,
+ STRAP_CC_DRD_MODE_ENABLE_SEL));
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_IPP_SEL))
+ if (params->ipp != 2)
+ /* override ipp strap pin (if it exits) */
+ reg &= ~(USB_CTRL_MASK_FAMILY(params, SETUP,
+ STRAP_IPP_SEL));
+
+ /* Override the default OC and PP polarity */
+ reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
+ if (params->ioc)
+ reg |= USB_CTRL_MASK(SETUP, IOC);
+ if (params->ipp == 1 && ((reg & USB_CTRL_MASK(SETUP, IPP)) == 0))
+ reg |= USB_CTRL_MASK(SETUP, IPP);
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ /*
+ * If we're changing IPP, make sure power is off long enough
+ * to turn off any connected devices.
+ */
+ if (reg != orig_reg)
+ msleep(50);
+}
+
+int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+ u32 reg = 0;
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ }
+ return reg;
+}
+
+void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
+ int mode)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+ u32 reg;
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ reg |= mode;
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+}
+
+void brcm_usb_init_common(struct brcm_usb_init_params *params)
+{
+ u32 reg;
+ void __iomem *ctrl = params->ctrl_regs;
+
+ /* Take USB out of power down */
+ if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN)) {
+ USB_CTRL_UNSET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+ }
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN)) {
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, USB_PWRDN);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+ }
+
+ if (params->selected_family != BRCM_FAMILY_74371A0 &&
+ (BRCM_ID(params->family_id) != 0x7364))
+ /*
+ * HW7439-637: 7439a0 and its derivatives do not have large
+ * enough descriptor storage for this.
+ */
+ USB_CTRL_SET_FAMILY(params, SETUP, SS_EHCI64BIT_EN);
+
+ /* Block auto PLL suspend by USB2 PHY (Sasi) */
+ USB_CTRL_SET(ctrl, PLL_CTL, PLL_SUSPEND_EN);
+
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ if (params->selected_family == BRCM_FAMILY_7364A0)
+ /* Suppress overcurrent indication from USB30 ports for A0 */
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, OC3_DISABLE);
+
+ brcmusb_usb_phy_ldo_fix(ctrl);
+ brcmusb_usb2_eye_fix(ctrl);
+
+ /*
+ * Make sure the the second and third memory controller
+ * interfaces are enabled if they exist.
+ */
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN))
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN);
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN))
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN);
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ brcmusb_memc_fix(params);
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ reg |= params->mode;
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, BDC_SOFT_RESETB)) {
+ switch (params->mode) {
+ case USB_CTLR_MODE_HOST:
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
+ break;
+ default:
+ USB_CTRL_SET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
+ break;
+ }
+ }
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, CC_DRD_MODE_ENABLE)) {
+ if (params->mode == USB_CTLR_MODE_TYPEC_PD)
+ USB_CTRL_SET_FAMILY(params, SETUP, CC_DRD_MODE_ENABLE);
+ else
+ USB_CTRL_UNSET_FAMILY(params, SETUP,
+ CC_DRD_MODE_ENABLE);
+ }
+}
+
+void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
+{
+ u32 reg;
+ void __iomem *ctrl = params->ctrl_regs;
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
+ USB_CTRL_SET_FAMILY(params, USB_PM, USB20_HC_RESETB);
+
+ if (params->selected_family == BRCM_FAMILY_7366C0)
+ /*
+ * Don't enable this so the memory controller doesn't read
+ * into memory holes. NOTE: This bit is low true on 7366C0.
+ */
+ USB_CTRL_SET_FAMILY(params, EBRIDGE, ESTOP_SCB_REQ);
+
+ /* Setup the endian bits */
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+}
+
+void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+
+ if (BRCM_ID(params->family_id) == 0x7366) {
+ /*
+ * The PHY3_SOFT_RESETB bits default to the wrong state.
+ */
+ USB_CTRL_SET(ctrl, USB30_PCTL, PHY3_SOFT_RESETB);
+ USB_CTRL_SET(ctrl, USB30_PCTL, PHY3_SOFT_RESETB_P1);
+ }
+
+ /*
+ * Kick start USB3 PHY
+ * Make sure it's low to insure a rising edge.
+ */
+ USB_CTRL_UNSET(ctrl, USB30_CTL1, PHY3_PLL_SEQ_START);
+ USB_CTRL_SET(ctrl, USB30_CTL1, PHY3_PLL_SEQ_START);
+
+ brcmusb_usb3_phy_workarounds(params);
+ brcmusb_xhci_soft_reset(params, 0);
+ brcmusb_usb3_otp_fix(params);
+}
+
+void brcm_usb_uninit_common(struct brcm_usb_init_params *params)
+{
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN))
+ USB_CTRL_SET_FAMILY(params, USB_PM, USB_PWRDN);
+
+ if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN))
+ USB_CTRL_SET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+}
+
+void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
+{
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, USB20_HC_RESETB);
+}
+
+void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
+{
+ brcmusb_xhci_soft_reset(params, 1);
+}
+
+void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
+{
+ int fam;
+
+ fam = brcmusb_get_family_type(params);
+ params->selected_family = fam;
+ params->usb_reg_bits_map =
+ &usb_reg_bits_map_table[fam][0];
+ params->family_name = family_names[fam];
+}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
new file mode 100644
index 000000000000..bb77b863885e
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _USB_BRCM_COMMON_INIT_H
+#define _USB_BRCM_COMMON_INIT_H
+
+#define USB_CTLR_MODE_HOST 0
+#define USB_CTLR_MODE_DEVICE 1
+#define USB_CTLR_MODE_DRD 2
+#define USB_CTLR_MODE_TYPEC_PD 3
+
+struct brcm_usb_init_params;
+
+struct brcm_usb_init_params {
+ void __iomem *ctrl_regs;
+ void __iomem *xhci_ec_regs;
+ int ioc;
+ int ipp;
+ int mode;
+ u32 family_id;
+ u32 product_id;
+ int selected_family;
+ const char *family_name;
+ const u32 *usb_reg_bits_map;
+};
+
+void brcm_usb_set_family_map(struct brcm_usb_init_params *params);
+int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params);
+void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
+ int mode);
+
+void brcm_usb_init_ipp(struct brcm_usb_init_params *ini);
+void brcm_usb_init_common(struct brcm_usb_init_params *ini);
+void brcm_usb_init_eohci(struct brcm_usb_init_params *ini);
+void brcm_usb_init_xhci(struct brcm_usb_init_params *ini);
+void brcm_usb_uninit_common(struct brcm_usb_init_params *ini);
+void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini);
+void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini);
+
+#endif /* _USB_BRCM_COMMON_INIT_H */
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
new file mode 100644
index 000000000000..195b98139e5f
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -0,0 +1,459 @@
+/*
+ * phy-brcm-usb.c - Broadcom USB Phy Driver
+ *
+ * Copyright (C) 2015-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-brcm-usb-init.h"
+
+static DEFINE_MUTEX(sysfs_lock);
+
+enum brcm_usb_phy_id {
+ BRCM_USB_PHY_2_0 = 0,
+ BRCM_USB_PHY_3_0,
+ BRCM_USB_PHY_ID_MAX
+};
+
+struct value_to_name_map {
+ int value;
+ const char *name;
+};
+
+static struct value_to_name_map brcm_dr_mode_to_name[] = {
+ { USB_CTLR_MODE_HOST, "host" },
+ { USB_CTLR_MODE_DEVICE, "peripheral" },
+ { USB_CTLR_MODE_DRD, "drd" },
+ { USB_CTLR_MODE_TYPEC_PD, "typec-pd" }
+};
+
+static struct value_to_name_map brcm_dual_mode_to_name[] = {
+ { 0, "host" },
+ { 1, "device" },
+ { 2, "auto" },
+};
+
+struct brcm_usb_phy {
+ struct phy *phy;
+ unsigned int id;
+ bool inited;
+};
+
+struct brcm_usb_phy_data {
+ struct brcm_usb_init_params ini;
+ bool has_eohci;
+ bool has_xhci;
+ struct clk *usb_20_clk;
+ struct clk *usb_30_clk;
+ struct mutex mutex; /* serialize phy init */
+ int init_count;
+ struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
+};
+
+static int brcm_usb_phy_init(struct phy *gphy)
+{
+ struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
+ struct brcm_usb_phy_data *priv =
+ container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+
+ /*
+ * Use a lock to make sure a second caller waits until
+ * the base phy is inited before using it.
+ */
+ mutex_lock(&priv->mutex);
+ if (priv->init_count++ == 0) {
+ clk_enable(priv->usb_20_clk);
+ clk_enable(priv->usb_30_clk);
+ brcm_usb_init_common(&priv->ini);
+ }
+ mutex_unlock(&priv->mutex);
+ if (phy->id == BRCM_USB_PHY_2_0)
+ brcm_usb_init_eohci(&priv->ini);
+ else if (phy->id == BRCM_USB_PHY_3_0)
+ brcm_usb_init_xhci(&priv->ini);
+ phy->inited = true;
+ dev_dbg(&gphy->dev, "INIT, id: %d, total: %d\n", phy->id,
+ priv->init_count);
+
+ return 0;
+}
+
+static int brcm_usb_phy_exit(struct phy *gphy)
+{
+ struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
+ struct brcm_usb_phy_data *priv =
+ container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+
+ dev_dbg(&gphy->dev, "EXIT\n");
+ if (phy->id == BRCM_USB_PHY_2_0)
+ brcm_usb_uninit_eohci(&priv->ini);
+ if (phy->id == BRCM_USB_PHY_3_0)
+ brcm_usb_uninit_xhci(&priv->ini);
+
+ /* If both xhci and eohci are gone, reset everything else */
+ mutex_lock(&priv->mutex);
+ if (--priv->init_count == 0) {
+ brcm_usb_uninit_common(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+ }
+ mutex_unlock(&priv->mutex);
+ phy->inited = false;
+ return 0;
+}
+
+static struct phy_ops brcm_usb_phy_ops = {
+ .init = brcm_usb_phy_init,
+ .exit = brcm_usb_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *brcm_usb_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct brcm_usb_phy_data *data = dev_get_drvdata(dev);
+
+ /*
+ * values 0 and 1 are for backward compatibility with
+ * device tree nodes from older bootloaders.
+ */
+ switch (args->args[0]) {
+ case 0:
+ case PHY_TYPE_USB2:
+ if (data->phys[BRCM_USB_PHY_2_0].phy)
+ return data->phys[BRCM_USB_PHY_2_0].phy;
+ dev_warn(dev, "Error, 2.0 Phy not found\n");
+ break;
+ case 1:
+ case PHY_TYPE_USB3:
+ if (data->phys[BRCM_USB_PHY_3_0].phy)
+ return data->phys[BRCM_USB_PHY_3_0].phy;
+ dev_warn(dev, "Error, 3.0 Phy not found\n");
+ break;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static int name_to_value(struct value_to_name_map *table, int count,
+ const char *name, int *value)
+{
+ int x;
+
+ *value = 0;
+ for (x = 0; x < count; x++) {
+ if (sysfs_streq(name, table[x].name)) {
+ *value = x;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static const char *value_to_name(struct value_to_name_map *table, int count,
+ int value)
+{
+ if (value >= count)
+ return "unknown";
+ return table[value].name;
+}
+
+static ssize_t dr_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n",
+ value_to_name(&brcm_dr_mode_to_name[0],
+ ARRAY_SIZE(brcm_dr_mode_to_name),
+ priv->ini.mode));
+}
+static DEVICE_ATTR_RO(dr_mode);
+
+static ssize_t dual_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+ int value;
+ int res;
+
+ mutex_lock(&sysfs_lock);
+ res = name_to_value(&brcm_dual_mode_to_name[0],
+ ARRAY_SIZE(brcm_dual_mode_to_name), buf, &value);
+ if (!res) {
+ brcm_usb_init_set_dual_select(&priv->ini, value);
+ res = len;
+ }
+ mutex_unlock(&sysfs_lock);
+ return res;
+}
+
+static ssize_t dual_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+ int value;
+
+ mutex_lock(&sysfs_lock);
+ value = brcm_usb_init_get_dual_select(&priv->ini);
+ mutex_unlock(&sysfs_lock);
+ return sprintf(buf, "%s\n",
+ value_to_name(&brcm_dual_mode_to_name[0],
+ ARRAY_SIZE(brcm_dual_mode_to_name),
+ value));
+}
+static DEVICE_ATTR_RW(dual_select);
+
+static struct attribute *brcm_usb_phy_attrs[] = {
+ &dev_attr_dr_mode.attr,
+ &dev_attr_dual_select.attr,
+ NULL
+};
+
+static const struct attribute_group brcm_usb_phy_group = {
+ .attrs = brcm_usb_phy_attrs,
+};
+
+static int brcm_usb_phy_dvr_init(struct device *dev,
+ struct brcm_usb_phy_data *priv,
+ struct device_node *dn)
+{
+ struct phy *gphy;
+ int err;
+
+ priv->usb_20_clk = of_clk_get_by_name(dn, "sw_usb");
+ if (IS_ERR(priv->usb_20_clk)) {
+ dev_info(dev, "Clock not found in Device Tree\n");
+ priv->usb_20_clk = NULL;
+ }
+ err = clk_prepare_enable(priv->usb_20_clk);
+ if (err)
+ return err;
+
+ if (priv->has_eohci) {
+ gphy = devm_phy_create(dev, NULL, &brcm_usb_phy_ops);
+ if (IS_ERR(gphy)) {
+ dev_err(dev, "failed to create EHCI/OHCI PHY\n");
+ return PTR_ERR(gphy);
+ }
+ priv->phys[BRCM_USB_PHY_2_0].phy = gphy;
+ priv->phys[BRCM_USB_PHY_2_0].id = BRCM_USB_PHY_2_0;
+ phy_set_drvdata(gphy, &priv->phys[BRCM_USB_PHY_2_0]);
+ }
+
+ if (priv->has_xhci) {
+ gphy = devm_phy_create(dev, NULL, &brcm_usb_phy_ops);
+ if (IS_ERR(gphy)) {
+ dev_err(dev, "failed to create XHCI PHY\n");
+ return PTR_ERR(gphy);
+ }
+ priv->phys[BRCM_USB_PHY_3_0].phy = gphy;
+ priv->phys[BRCM_USB_PHY_3_0].id = BRCM_USB_PHY_3_0;
+ phy_set_drvdata(gphy, &priv->phys[BRCM_USB_PHY_3_0]);
+
+ priv->usb_30_clk = of_clk_get_by_name(dn, "sw_usb3");
+ if (IS_ERR(priv->usb_30_clk)) {
+ dev_info(dev,
+ "USB3.0 clock not found in Device Tree\n");
+ priv->usb_30_clk = NULL;
+ }
+ err = clk_prepare_enable(priv->usb_30_clk);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int brcm_usb_phy_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct brcm_usb_phy_data *priv;
+ struct phy_provider *phy_provider;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+ const char *mode;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->ini.family_id = brcmstb_get_family_id();
+ priv->ini.product_id = brcmstb_get_product_id();
+ brcm_usb_set_family_map(&priv->ini);
+ dev_dbg(dev, "Best mapping table is for %s\n",
+ priv->ini.family_name);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "can't get USB_CTRL base address\n");
+ return -EINVAL;
+ }
+ priv->ini.ctrl_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ini.ctrl_regs)) {
+ dev_err(dev, "can't map CTRL register space\n");
+ return -EINVAL;
+ }
+
+ /* The XHCI EC registers are optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ priv->ini.xhci_ec_regs =
+ devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ini.xhci_ec_regs)) {
+ dev_err(dev, "can't map XHCI EC register space\n");
+ return -EINVAL;
+ }
+ }
+
+ of_property_read_u32(dn, "brcm,ipp", &priv->ini.ipp);
+ of_property_read_u32(dn, "brcm,ioc", &priv->ini.ioc);
+
+ priv->ini.mode = USB_CTLR_MODE_HOST;
+ err = of_property_read_string(dn, "dr_mode", &mode);
+ if (err == 0) {
+ name_to_value(&brcm_dr_mode_to_name[0],
+ ARRAY_SIZE(brcm_dr_mode_to_name),
+ mode, &priv->ini.mode);
+ }
+ if (of_property_read_bool(dn, "brcm,has_xhci"))
+ priv->has_xhci = true;
+ if (of_property_read_bool(dn, "brcm,has_eohci"))
+ priv->has_eohci = true;
+
+ err = brcm_usb_phy_dvr_init(dev, priv, dn);
+ if (err)
+ return err;
+
+ mutex_init(&priv->mutex);
+
+ /* make sure invert settings are correct */
+ brcm_usb_init_ipp(&priv->ini);
+
+ /*
+ * Create sysfs entries for mode.
+ * Remove "dual_select" attribute if not in dual mode
+ */
+ if (priv->ini.mode != USB_CTLR_MODE_DRD)
+ brcm_usb_phy_attrs[1] = NULL;
+ err = sysfs_create_group(&dev->kobj, &brcm_usb_phy_group);
+ if (err)
+ dev_warn(dev, "Error creating sysfs attributes\n");
+
+ /* start with everything off */
+ if (priv->has_xhci)
+ brcm_usb_uninit_xhci(&priv->ini);
+ if (priv->has_eohci)
+ brcm_usb_uninit_eohci(&priv->ini);
+ brcm_usb_uninit_common(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+
+ phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcm_usb_phy_suspend(struct device *dev)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+
+ if (priv->init_count) {
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+ }
+ return 0;
+}
+
+static int brcm_usb_phy_resume(struct device *dev)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+
+ clk_enable(priv->usb_20_clk);
+ clk_enable(priv->usb_30_clk);
+ brcm_usb_init_ipp(&priv->ini);
+
+ /*
+ * Initialize anything that was previously initialized.
+ * Uninitialize anything that wasn't previously initialized.
+ */
+ if (priv->init_count) {
+ brcm_usb_init_common(&priv->ini);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited) {
+ brcm_usb_init_eohci(&priv->ini);
+ } else if (priv->has_eohci) {
+ brcm_usb_uninit_eohci(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ }
+ if (priv->phys[BRCM_USB_PHY_3_0].inited) {
+ brcm_usb_init_xhci(&priv->ini);
+ } else if (priv->has_xhci) {
+ brcm_usb_uninit_xhci(&priv->ini);
+ clk_disable(priv->usb_30_clk);
+ }
+ } else {
+ if (priv->has_xhci)
+ brcm_usb_uninit_xhci(&priv->ini);
+ if (priv->has_eohci)
+ brcm_usb_uninit_eohci(&priv->ini);
+ brcm_usb_uninit_common(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops brcm_usb_phy_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(brcm_usb_phy_suspend, brcm_usb_phy_resume)
+};
+
+static const struct of_device_id brcm_usb_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-usb-phy" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids);
+
+static struct platform_driver brcm_usb_driver = {
+ .probe = brcm_usb_phy_probe,
+ .driver = {
+ .name = "brcmstb-usb-phy",
+ .owner = THIS_MODULE,
+ .pm = &brcm_usb_phy_pm_ops,
+ .of_match_table = brcm_usb_dt_ids,
+ },
+};
+
+module_platform_driver(brcm_usb_driver);
+
+MODULE_ALIAS("platform:brcmstb-usb-phy");
+MODULE_AUTHOR("Al Cooper <acooper@broadcom.com>");
+MODULE_DESCRIPTION("BRCM USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 89c887ea5557..a0d522154cdf 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -154,7 +154,6 @@ struct mvebu_comphy_priv {
void __iomem *base;
struct regmap *regmap;
struct device *dev;
- int modes[MVEBU_COMPHY_LANES];
};
struct mvebu_comphy_lane {
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 721a2a1c97ef..402385f2562a 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -96,9 +96,11 @@
#define U3P_U2PHYDTM1 0x06C
#define P2C_RG_UART_EN BIT(16)
+#define P2C_FORCE_IDDIG BIT(9)
#define P2C_RG_VBUSVALID BIT(5)
#define P2C_RG_SESSEND BIT(4)
#define P2C_RG_AVALID BIT(2)
+#define P2C_RG_IDDIG BIT(1)
#define U3P_U3_CHIP_GPIO_CTLD 0x0c
#define P3C_REG_IP_SW_RST BIT(31)
@@ -585,6 +587,31 @@ static void u2_phy_instance_exit(struct mtk_tphy *tphy,
}
}
+static void u2_phy_instance_set_mode(struct mtk_tphy *tphy,
+ struct mtk_phy_instance *instance,
+ enum phy_mode mode)
+{
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ u32 tmp;
+
+ tmp = readl(u2_banks->com + U3P_U2PHYDTM1);
+ switch (mode) {
+ case PHY_MODE_USB_DEVICE:
+ tmp |= P2C_FORCE_IDDIG | P2C_RG_IDDIG;
+ break;
+ case PHY_MODE_USB_HOST:
+ tmp |= P2C_FORCE_IDDIG;
+ tmp &= ~P2C_RG_IDDIG;
+ break;
+ case PHY_MODE_USB_OTG:
+ tmp &= ~(P2C_FORCE_IDDIG | P2C_RG_IDDIG);
+ break;
+ default:
+ return;
+ }
+ writel(tmp, u2_banks->com + U3P_U2PHYDTM1);
+}
+
static void pcie_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
@@ -881,6 +908,17 @@ static int mtk_phy_exit(struct phy *phy)
return 0;
}
+static int mtk_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct mtk_phy_instance *instance = phy_get_drvdata(phy);
+ struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
+
+ if (instance->type == PHY_TYPE_USB2)
+ u2_phy_instance_set_mode(tphy, instance, mode);
+
+ return 0;
+}
+
static struct phy *mtk_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -931,6 +969,7 @@ static const struct phy_ops mtk_tphy_ops = {
.exit = mtk_phy_exit,
.power_on = mtk_phy_power_on,
.power_off = mtk_phy_power_off,
+ .set_mode = mtk_phy_set_mode,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a268f4d6f3e9..b4964b067aec 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -372,6 +372,21 @@ int phy_reset(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_reset);
+int phy_calibrate(struct phy *phy)
+{
+ int ret;
+
+ if (!phy || !phy->ops->calibrate)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->calibrate(phy);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_calibrate);
+
/**
* _of_phy_get() - lookup and obtain a reference to a phy by phandle
* @np: device_node for which to get the phy
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 13b02b7de30b..822c83b8efcd 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -114,14 +114,16 @@ struct ufs_qcom_phy {
struct ufs_qcom_phy_calibration *cached_regs;
int cached_regs_table_size;
bool is_powered_on;
+ bool is_started;
struct ufs_qcom_phy_specific_ops *phy_spec_ops;
+
+ enum phy_mode mode;
};
/**
* struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
* specific implementation per phy. Each UFS phy, should implement
* those functions according to its spec and requirements
- * @calibrate_phy: pointer to a function that calibrate the phy
* @start_serdes: pointer to a function that starts the serdes
* @is_physical_coding_sublayer_ready: pointer to a function that
* checks pcs readiness. returns 0 for success and non-zero for error.
@@ -130,7 +132,6 @@ struct ufs_qcom_phy {
* and writes to QSERDES_RX_SIGDET_CNTRL attribute
*/
struct ufs_qcom_phy_specific_ops {
- int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
void (*start_serdes)(struct ufs_qcom_phy *phy);
int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
index 12a1b498dc4b..ba1895b76a5d 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
@@ -44,7 +44,19 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
- return 0;
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ bool is_rate_B = false;
+ int ret;
+
+ if (phy_common->mode == PHY_MODE_UFS_HS_B)
+ is_rate_B = true;
+
+ ret = ufs_qcom_phy_qmp_14nm_phy_calibrate(phy_common, is_rate_B);
+ if (!ret)
+ /* phy calibrated, but yet to be started */
+ phy_common->is_started = false;
+
+ return ret;
}
static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
@@ -53,6 +65,19 @@ static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
}
static
+int ufs_qcom_phy_qmp_14nm_set_mode(struct phy *generic_phy, enum phy_mode mode)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+ phy_common->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ phy_common->mode = mode;
+
+ return 0;
+}
+
+static
void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
{
writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
@@ -102,11 +127,11 @@ static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
+ .set_mode = ufs_qcom_phy_qmp_14nm_set_mode,
.owner = THIS_MODULE,
};
static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
- .calibrate_phy = ufs_qcom_phy_qmp_14nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
index 4f68acb58b73..49f435c71147 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
@@ -63,7 +63,19 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
{
- return 0;
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ bool is_rate_B = false;
+ int ret;
+
+ if (phy_common->mode == PHY_MODE_UFS_HS_B)
+ is_rate_B = true;
+
+ ret = ufs_qcom_phy_qmp_20nm_phy_calibrate(phy_common, is_rate_B);
+ if (!ret)
+ /* phy calibrated, but yet to be started */
+ phy_common->is_started = false;
+
+ return ret;
}
static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
@@ -72,6 +84,19 @@ static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
}
static
+int ufs_qcom_phy_qmp_20nm_set_mode(struct phy *generic_phy, enum phy_mode mode)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+ phy_common->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ phy_common->mode = mode;
+
+ return 0;
+}
+
+static
void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
{
bool hibern8_exit_after_pwr_collapse = phy->quirks &
@@ -160,11 +185,11 @@ static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
+ .set_mode = ufs_qcom_phy_qmp_20nm_set_mode,
.owner = THIS_MODULE,
};
static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
- .calibrate_phy = ufs_qcom_phy_qmp_20nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index 43865ef340e2..c5ff4525edef 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -518,9 +518,8 @@ void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
}
}
-int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
+static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
int ret = 0;
if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
@@ -533,7 +532,6 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
{
@@ -564,31 +562,8 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
-int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int ret = 0;
-
- if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
- dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
- __func__);
- ret = -ENOTSUPP;
- } else {
- ret = ufs_qcom_phy->phy_spec_ops->
- calibrate_phy(ufs_qcom_phy, is_rate_B);
- if (ret)
- dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
- __func__, ret);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
-
-int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
+static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
__func__);
@@ -598,7 +573,6 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
return ufs_qcom_phy->phy_spec_ops->
is_physical_coding_sublayer_ready(ufs_qcom_phy);
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
int ufs_qcom_phy_power_on(struct phy *generic_phy)
{
@@ -609,6 +583,18 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
if (phy_common->is_powered_on)
return 0;
+ if (!phy_common->is_started) {
+ err = ufs_qcom_phy_start_serdes(phy_common);
+ if (err)
+ return err;
+
+ err = ufs_qcom_phy_is_pcs_ready(phy_common);
+ if (err)
+ return err;
+
+ phy_common->is_started = true;
+ }
+
err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 54c34298a000..9c90e7d67e0a 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -1,7 +1,7 @@
/*
* Renesas R-Car Gen3 for USB2.0 PHY driver
*
- * Copyright (C) 2015 Renesas Electronics Corporation
+ * Copyright (C) 2015-2017 Renesas Electronics Corporation
*
* This is based on the phy-rcar-gen2 driver:
* Copyright (C) 2014 Renesas Solutions Corp.
@@ -12,16 +12,18 @@
* published by the Free Software Foundation.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/usb/of.h>
#include <linux/workqueue.h>
/******* USB2.0 Host registers (original offset is +0x200) *******/
@@ -79,6 +81,8 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
+#define RCAR_GEN3_PHY_HAS_DEDICATED_PINS 1
+
struct rcar_gen3_chan {
void __iomem *base;
struct extcon_dev *extcon;
@@ -86,7 +90,7 @@ struct rcar_gen3_chan {
struct regulator *vbus;
struct work_struct work;
bool extcon_host;
- bool has_otg;
+ bool has_otg_pins;
};
static void rcar_gen3_phy_usb2_work(struct work_struct *work)
@@ -218,33 +222,40 @@ static bool rcar_gen3_is_host(struct rcar_gen3_chan *ch)
return !(readl(ch->base + USB2_COMMCTRL) & USB2_COMMCTRL_OTG_PERI);
}
+static enum phy_mode rcar_gen3_get_phy_mode(struct rcar_gen3_chan *ch)
+{
+ if (rcar_gen3_is_host(ch))
+ return PHY_MODE_USB_HOST;
+
+ return PHY_MODE_USB_DEVICE;
+}
+
static ssize_t role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- bool is_b_device, is_host, new_mode_is_host;
+ bool is_b_device;
+ enum phy_mode cur_mode, new_mode;
- if (!ch->has_otg || !ch->phy->init_count)
+ if (!ch->has_otg_pins || !ch->phy->init_count)
return -EIO;
- /*
- * is_b_device: true is B-Device. false is A-Device.
- * If {new_mode_}is_host: true is Host mode. false is Peripheral mode.
- */
- is_b_device = rcar_gen3_check_id(ch);
- is_host = rcar_gen3_is_host(ch);
if (!strncmp(buf, "host", strlen("host")))
- new_mode_is_host = true;
+ new_mode = PHY_MODE_USB_HOST;
else if (!strncmp(buf, "peripheral", strlen("peripheral")))
- new_mode_is_host = false;
+ new_mode = PHY_MODE_USB_DEVICE;
else
return -EINVAL;
+ /* is_b_device: true is B-Device. false is A-Device. */
+ is_b_device = rcar_gen3_check_id(ch);
+ cur_mode = rcar_gen3_get_phy_mode(ch);
+
/* If current and new mode is the same, this returns the error */
- if (is_host == new_mode_is_host)
+ if (cur_mode == new_mode)
return -EINVAL;
- if (new_mode_is_host) { /* And is_host must be false */
+ if (new_mode == PHY_MODE_USB_HOST) { /* And is_host must be false */
if (!is_b_device) /* A-Peripheral */
rcar_gen3_init_from_a_peri_to_a_host(ch);
else /* B-Peripheral */
@@ -264,7 +275,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->has_otg || !ch->phy->init_count)
+ if (!ch->has_otg_pins || !ch->phy->init_count)
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -303,7 +314,7 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
/* Initialize otg part */
- if (channel->has_otg)
+ if (channel->has_otg_pins)
rcar_gen3_init_otg(channel);
return 0;
@@ -377,9 +388,17 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
}
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
- { .compatible = "renesas,usb2-phy-r8a7795" },
- { .compatible = "renesas,usb2-phy-r8a7796" },
- { .compatible = "renesas,rcar-gen3-usb2-phy" },
+ {
+ .compatible = "renesas,usb2-phy-r8a7795",
+ .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
+ },
+ {
+ .compatible = "renesas,usb2-phy-r8a7796",
+ .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
+ },
+ {
+ .compatible = "renesas,rcar-gen3-usb2-phy",
+ },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
@@ -415,14 +434,17 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
/* call request_irq for OTG */
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
- int ret;
-
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
IRQF_SHARED, dev_name(dev), channel);
if (irq < 0)
dev_err(dev, "No irq handler (%d)\n", irq);
- channel->has_otg = true;
+ }
+
+ if (of_usb_get_dr_mode_by_phy(dev->of_node, 0) == USB_DR_MODE_OTG) {
+ int ret;
+
+ channel->has_otg_pins = (uintptr_t)of_device_get_match_data(dev);
channel->extcon = devm_extcon_dev_allocate(dev,
rcar_gen3_phy_cable);
if (IS_ERR(channel->extcon))
@@ -464,7 +486,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
goto error;
- } else if (channel->has_otg) {
+ } else if (channel->has_otg_pins) {
int ret;
ret = device_create_file(dev, &dev_attr_role);
@@ -484,7 +506,7 @@ static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
{
struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
- if (channel->has_otg)
+ if (channel->has_otg_pins)
device_remove_file(&pdev->dev, &dev_attr_role);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index ee7ce5ee53f9..5049dac79bd0 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -17,7 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index a958c9bced01..ee85fa0ca4b0 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -102,9 +102,40 @@
#define CMN_PLL1_SS_CTRL1 (0xb8 << 2)
#define CMN_PLL1_SS_CTRL2 (0xb9 << 2)
#define CMN_RXCAL_OVRD (0xd1 << 2)
+
#define CMN_TXPUCAL_CTRL (0xe0 << 2)
#define CMN_TXPUCAL_OVRD (0xe1 << 2)
+#define CMN_TXPDCAL_CTRL (0xf0 << 2)
#define CMN_TXPDCAL_OVRD (0xf1 << 2)
+
+/* For CMN_TXPUCAL_CTRL, CMN_TXPDCAL_CTRL */
+#define CMN_TXPXCAL_START BIT(15)
+#define CMN_TXPXCAL_DONE BIT(14)
+#define CMN_TXPXCAL_NO_RESPONSE BIT(13)
+#define CMN_TXPXCAL_CURRENT_RESPONSE BIT(12)
+
+#define CMN_TXPU_ADJ_CTRL (0x108 << 2)
+#define CMN_TXPD_ADJ_CTRL (0x10c << 2)
+
+/*
+ * For CMN_TXPUCAL_CTRL, CMN_TXPDCAL_CTRL,
+ * CMN_TXPU_ADJ_CTRL, CMN_TXPDCAL_CTRL
+ *
+ * NOTE: some of these registers are documented to be 2's complement
+ * signed numbers, but then documented to be always positive. Weird.
+ * In such a case, using CMN_CALIB_CODE_POS() avoids the unnecessary
+ * sign extension.
+ */
+#define CMN_CALIB_CODE_WIDTH 7
+#define CMN_CALIB_CODE_OFFSET 0
+#define CMN_CALIB_CODE_MASK GENMASK(CMN_CALIB_CODE_WIDTH, 0)
+#define CMN_CALIB_CODE(x) \
+ sign_extend32((x) >> CMN_CALIB_CODE_OFFSET, CMN_CALIB_CODE_WIDTH)
+
+#define CMN_CALIB_CODE_POS_MASK GENMASK(CMN_CALIB_CODE_WIDTH - 1, 0)
+#define CMN_CALIB_CODE_POS(x) \
+ (((x) >> CMN_CALIB_CODE_OFFSET) & CMN_CALIB_CODE_POS_MASK)
+
#define CMN_DIAG_PLL0_FBH_OVRD (0x1c0 << 2)
#define CMN_DIAG_PLL0_FBL_OVRD (0x1c1 << 2)
#define CMN_DIAG_PLL0_OVRD (0x1c2 << 2)
@@ -138,6 +169,15 @@
#define TX_TXCC_MGNFS_MULT_101(n) ((0x4055 | ((n) << 9)) << 2)
#define TX_TXCC_MGNFS_MULT_110(n) ((0x4056 | ((n) << 9)) << 2)
#define TX_TXCC_MGNFS_MULT_111(n) ((0x4057 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_000(n) ((0x4058 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_001(n) ((0x4059 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_010(n) ((0x405a | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_011(n) ((0x405b | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_100(n) ((0x405c | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_101(n) ((0x405d | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_110(n) ((0x405e | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_111(n) ((0x405f | ((n) << 9)) << 2)
+
#define XCVR_DIAG_PLLDRC_CTRL(n) ((0x40e0 | ((n) << 9)) << 2)
#define XCVR_DIAG_BIDI_CTRL(n) ((0x40e8 | ((n) << 9)) << 2)
#define XCVR_DIAG_LANE_FCM_EN_MGN(n) ((0x40f2 | ((n) << 9)) << 2)
@@ -150,10 +190,63 @@
#define TX_RCVDET_ST_TMR(n) ((0x4123 | ((n) << 9)) << 2)
#define TX_DIAG_TX_DRV(n) ((0x41e1 | ((n) << 9)) << 2)
#define TX_DIAG_BGREF_PREDRV_DELAY (0x41e7 << 2)
+
+/* Use this for "n" in macros like "_MULT_XXX" to target the aux channel */
+#define AUX_CH_LANE 8
+
#define TX_ANA_CTRL_REG_1 (0x5020 << 2)
+
+#define TXDA_DP_AUX_EN BIT(15)
+#define AUXDA_SE_EN BIT(14)
+#define TXDA_CAL_LATCH_EN BIT(13)
+#define AUXDA_POLARITY BIT(12)
+#define TXDA_DRV_POWER_ISOLATION_EN BIT(11)
+#define TXDA_DRV_POWER_EN_PH_2_N BIT(10)
+#define TXDA_DRV_POWER_EN_PH_1_N BIT(9)
+#define TXDA_BGREF_EN BIT(8)
+#define TXDA_DRV_LDO_EN BIT(7)
+#define TXDA_DECAP_EN_DEL BIT(6)
+#define TXDA_DECAP_EN BIT(5)
+#define TXDA_UPHY_SUPPLY_EN_DEL BIT(4)
+#define TXDA_UPHY_SUPPLY_EN BIT(3)
+#define TXDA_LOW_LEAKAGE_EN BIT(2)
+#define TXDA_DRV_IDLE_LOWI_EN BIT(1)
+#define TXDA_DRV_CMN_MODE_EN BIT(0)
+
#define TX_ANA_CTRL_REG_2 (0x5021 << 2)
+
+#define AUXDA_DEBOUNCING_CLK BIT(15)
+#define TXDA_LPBK_RECOVERED_CLK_EN BIT(14)
+#define TXDA_LPBK_ISI_GEN_EN BIT(13)
+#define TXDA_LPBK_SERIAL_EN BIT(12)
+#define TXDA_LPBK_LINE_EN BIT(11)
+#define TXDA_DRV_LDO_REDC_SINKIQ BIT(10)
+#define XCVR_DECAP_EN_DEL BIT(9)
+#define XCVR_DECAP_EN BIT(8)
+#define TXDA_MPHY_ENABLE_HS_NT BIT(7)
+#define TXDA_MPHY_SA_MODE BIT(6)
+#define TXDA_DRV_LDO_RBYR_FB_EN BIT(5)
+#define TXDA_DRV_RST_PULL_DOWN BIT(4)
+#define TXDA_DRV_LDO_BG_FB_EN BIT(3)
+#define TXDA_DRV_LDO_BG_REF_EN BIT(2)
+#define TXDA_DRV_PREDRV_EN_DEL BIT(1)
+#define TXDA_DRV_PREDRV_EN BIT(0)
+
#define TXDA_COEFF_CALC_CTRL (0x5022 << 2)
+
+#define TX_HIGH_Z BIT(6)
+#define TX_VMARGIN_OFFSET 3
+#define TX_VMARGIN_MASK 0x7
+#define LOW_POWER_SWING_EN BIT(2)
+#define TX_FCM_DRV_MAIN_EN BIT(1)
+#define TX_FCM_FULL_MARGIN BIT(0)
+
#define TX_DIG_CTRL_REG_2 (0x5024 << 2)
+
+#define TX_HIGH_Z_TM_EN BIT(15)
+#define TX_RESCAL_CODE_OFFSET 0
+#define TX_RESCAL_CODE_MASK 0x3f
+
#define TXDA_CYA_AUXDA_CYA (0x5025 << 2)
#define TX_ANA_CTRL_REG_3 (0x5026 << 2)
#define TX_ANA_CTRL_REG_4 (0x5027 << 2)
@@ -456,54 +549,72 @@ static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy)
*/
tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
if (!tcphy->flip)
- tx_ana_ctrl_reg_1 |= BIT(12);
+ tx_ana_ctrl_reg_1 |= AUXDA_POLARITY;
else
- tx_ana_ctrl_reg_1 &= ~BIT(12);
+ tx_ana_ctrl_reg_1 &= ~AUXDA_POLARITY;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
}
static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
{
+ u16 val;
u16 tx_ana_ctrl_reg_1;
- u16 rdata, rdata2, val;
-
- /* disable txda_cal_latch_en for rewrite the calibration values */
- tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- tx_ana_ctrl_reg_1 &= ~BIT(13);
- writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ u16 tx_ana_ctrl_reg_2;
+ s32 pu_calib_code, pd_calib_code;
+ s32 pu_adj, pd_adj;
+ u16 calib;
/*
- * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
- * write it to TX_DIG_CTRL_REG_2[6:0], and delay 1ms to make sure it
- * works.
+ * Calculate calibration code as per docs: use an average of the
+ * pull down and pull up. Then add in adjustments.
*/
- rdata = readl(tcphy->base + TX_DIG_CTRL_REG_2);
- rdata = rdata & 0xffc0;
+ val = readl(tcphy->base + CMN_TXPUCAL_CTRL);
+ pu_calib_code = CMN_CALIB_CODE_POS(val);
+ val = readl(tcphy->base + CMN_TXPDCAL_CTRL);
+ pd_calib_code = CMN_CALIB_CODE_POS(val);
+ val = readl(tcphy->base + CMN_TXPU_ADJ_CTRL);
+ pu_adj = CMN_CALIB_CODE(val);
+ val = readl(tcphy->base + CMN_TXPD_ADJ_CTRL);
+ pd_adj = CMN_CALIB_CODE(val);
+ calib = (pu_calib_code + pd_calib_code) / 2 + pu_adj + pd_adj;
- rdata2 = readl(tcphy->base + CMN_TXPUCAL_CTRL);
- rdata2 = rdata2 & 0x3f;
+ /* disable txda_cal_latch_en for rewrite the calibration values */
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 &= ~TXDA_CAL_LATCH_EN;
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata | rdata2;
+ /* write the calibration, then delay 10 ms as sample in docs */
+ val = readl(tcphy->base + TX_DIG_CTRL_REG_2);
+ val &= ~(TX_RESCAL_CODE_MASK << TX_RESCAL_CODE_OFFSET);
+ val |= calib << TX_RESCAL_CODE_OFFSET;
writel(val, tcphy->base + TX_DIG_CTRL_REG_2);
- usleep_range(1000, 1050);
+ usleep_range(10000, 10050);
/*
* Enable signal for latch that sample and holds calibration values.
* Activate this signal for 1 clock cycle to sample new calibration
* values.
*/
- tx_ana_ctrl_reg_1 |= BIT(13);
+ tx_ana_ctrl_reg_1 |= TXDA_CAL_LATCH_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
usleep_range(150, 200);
/* set TX Voltage Level and TX Deemphasis to 0 */
writel(0, tcphy->base + PHY_DP_TX_CTL);
+
/* re-enable decap */
- writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
- writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
- tx_ana_ctrl_reg_1 |= BIT(3);
+ tx_ana_ctrl_reg_2 = XCVR_DECAP_EN;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
+ udelay(1);
+ tx_ana_ctrl_reg_2 |= XCVR_DECAP_EN_DEL;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
+
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
+
+ tx_ana_ctrl_reg_1 |= TXDA_UPHY_SUPPLY_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
- tx_ana_ctrl_reg_1 |= BIT(4);
+ udelay(1);
+ tx_ana_ctrl_reg_1 |= TXDA_UPHY_SUPPLY_EN_DEL;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
@@ -515,44 +626,66 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
/* re-enables Bandgap reference for LDO */
- tx_ana_ctrl_reg_1 |= BIT(7);
+ tx_ana_ctrl_reg_1 |= TXDA_DRV_LDO_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
- tx_ana_ctrl_reg_1 |= BIT(8);
+ udelay(5);
+ tx_ana_ctrl_reg_1 |= TXDA_BGREF_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* re-enables the transmitter pre-driver, driver data selection MUX,
* and receiver detect circuits.
*/
- writel(0x301, tcphy->base + TX_ANA_CTRL_REG_2);
- writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
+ tx_ana_ctrl_reg_2 |= TXDA_DRV_PREDRV_EN;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
+ udelay(1);
+ tx_ana_ctrl_reg_2 |= TXDA_DRV_PREDRV_EN_DEL;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
/*
- * Do some magic undocumented stuff, some of which appears to
- * undo the "re-enables Bandgap reference for LDO" above.
+ * Do all the undocumented magic:
+ * - Turn on TXDA_DP_AUX_EN, whatever that is, even though sample
+ * never shows this going on.
+ * - Turn on TXDA_DECAP_EN (and TXDA_DECAP_EN_DEL) even though
+ * docs say for aux it's always 0.
+ * - Turn off the LDO and BGREF, which we just spent time turning
+ * on above (???).
+ *
+ * Without this magic, things seem worse.
*/
- tx_ana_ctrl_reg_1 |= BIT(15);
- tx_ana_ctrl_reg_1 &= ~BIT(8);
- tx_ana_ctrl_reg_1 &= ~BIT(7);
- tx_ana_ctrl_reg_1 |= BIT(6);
- tx_ana_ctrl_reg_1 |= BIT(5);
+ tx_ana_ctrl_reg_1 |= TXDA_DP_AUX_EN;
+ tx_ana_ctrl_reg_1 |= TXDA_DECAP_EN;
+ tx_ana_ctrl_reg_1 &= ~TXDA_DRV_LDO_EN;
+ tx_ana_ctrl_reg_1 &= ~TXDA_BGREF_EN;
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ udelay(1);
+ tx_ana_ctrl_reg_1 |= TXDA_DECAP_EN_DEL;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
-
- writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
- writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
- writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
/*
- * Controls low_power_swing_en, don't set the voltage swing of the
- * driver to 400mv. The values below are peak to peak (differential)
- * values.
+ * Undo the work we did to set the LDO voltage.
+ * This doesn't seem to help nor hurt, but it kinda goes with the
+ * undocumented magic above.
*/
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
+
+ /* Don't set voltage swing to 400 mV peak to peak (differential) */
writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL);
+
+ /* Init TXDA_CYA_AUXDA_CYA for unknown magic reasons */
writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
- /* Controls tx_high_z_tm_en */
+ /*
+ * More undocumented magic, presumably the goal of which is to
+ * make the "auxda_source_aux_oen" be ignored and instead to decide
+ * about "high impedance state" based on what software puts in the
+ * register TXDA_COEFF_CALC_CTRL (see TX_HIGH_Z). Since we only
+ * program that register once and we don't set the bit TX_HIGH_Z,
+ * presumably the goal here is that we should never put the analog
+ * driver in high impedance state.
+ */
val = readl(tcphy->base + TX_DIG_CTRL_REG_2);
- val |= BIT(15);
+ val |= TX_HIGH_Z_TM_EN;
writel(val, tcphy->base + TX_DIG_CTRL_REG_2);
}
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 0e564f32749f..68ce4a082b9b 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -68,6 +68,40 @@
#define PCIE_PCS_MASK 0xFF0000
#define PCIE_PCS_DELAY_COUNT_SHIFT 0x10
+#define PCIEPHYRX_ANA_PROGRAMMABILITY 0x0000000C
+#define INTERFACE_MASK GENMASK(31, 27)
+#define INTERFACE_SHIFT 27
+#define LOSD_MASK GENMASK(17, 14)
+#define LOSD_SHIFT 14
+#define MEM_PLLDIV GENMASK(6, 5)
+
+#define PCIEPHYRX_TRIM 0x0000001C
+#define MEM_DLL_TRIM_SEL GENMASK(31, 30)
+#define MEM_DLL_TRIM_SHIFT 30
+
+#define PCIEPHYRX_DLL 0x00000024
+#define MEM_DLL_PHINT_RATE GENMASK(31, 30)
+
+#define PCIEPHYRX_DIGITAL_MODES 0x00000028
+#define MEM_CDR_FASTLOCK BIT(23)
+#define MEM_CDR_LBW GENMASK(22, 21)
+#define MEM_CDR_STEPCNT GENMASK(20, 19)
+#define MEM_CDR_STL_MASK GENMASK(18, 16)
+#define MEM_CDR_STL_SHIFT 16
+#define MEM_CDR_THR_MASK GENMASK(15, 13)
+#define MEM_CDR_THR_SHIFT 13
+#define MEM_CDR_THR_MODE BIT(12)
+#define MEM_CDR_CDR_2NDO_SDM_MODE BIT(11)
+#define MEM_OVRD_HS_RATE BIT(26)
+
+#define PCIEPHYRX_EQUALIZER 0x00000038
+#define MEM_EQLEV GENMASK(31, 16)
+#define MEM_EQFTC GENMASK(15, 11)
+#define MEM_EQCTL GENMASK(10, 7)
+#define MEM_EQCTL_SHIFT 7
+#define MEM_OVRD_EQLEV BIT(2)
+#define MEM_OVRD_EQFTC BIT(1)
+
/*
* This is an Empirical value that works, need to confirm the actual
* value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -91,6 +125,8 @@ struct pipe3_dpll_map {
struct ti_pipe3 {
void __iomem *pll_ctrl_base;
+ void __iomem *phy_rx;
+ void __iomem *phy_tx;
struct device *dev;
struct device *control_dev;
struct clk *wkupclk;
@@ -261,6 +297,37 @@ static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
return ti_pipe3_dpll_wait_lock(phy);
}
+static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
+{
+ u32 val;
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
+ val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
+ val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
+ val &= ~(MEM_CDR_STEPCNT | MEM_CDR_STL_MASK | MEM_CDR_THR_MASK |
+ MEM_CDR_CDR_2NDO_SDM_MODE | MEM_OVRD_HS_RATE);
+ val |= (MEM_CDR_FASTLOCK | MEM_CDR_LBW | 0x3 << MEM_CDR_STL_SHIFT |
+ 0x1 << MEM_CDR_THR_SHIFT | MEM_CDR_THR_MODE);
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_TRIM);
+ val &= ~MEM_DLL_TRIM_SEL;
+ val |= 0x2 << MEM_DLL_TRIM_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_TRIM, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DLL);
+ val |= MEM_DLL_PHINT_RATE;
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_DLL, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_EQUALIZER);
+ val &= ~(MEM_EQLEV | MEM_EQCTL | MEM_OVRD_EQLEV | MEM_OVRD_EQFTC);
+ val |= MEM_EQFTC | 0x1 << MEM_EQCTL_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_EQUALIZER, val);
+}
+
static int ti_pipe3_init(struct phy *x)
{
struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -282,7 +349,12 @@ static int ti_pipe3_init(struct phy *x)
val = 0x96 << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT;
ret = regmap_update_bits(phy->pcs_syscon, phy->pcie_pcs_reg,
PCIE_PCS_MASK, val);
- return ret;
+ if (ret)
+ return ret;
+
+ ti_pipe3_calibrate(phy);
+
+ return 0;
}
/* Bring it out of IDLE if it is IDLE */
@@ -513,6 +585,29 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
return 0;
}
+static int ti_pipe3_get_tx_rx_base(struct ti_pipe3 *phy)
+{
+ struct resource *res;
+ struct device *dev = phy->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie"))
+ return 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "phy_rx");
+ phy->phy_rx = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->phy_rx))
+ return PTR_ERR(phy->phy_rx);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "phy_tx");
+ phy->phy_tx = devm_ioremap_resource(dev, res);
+
+ return PTR_ERR_OR_ZERO(phy->phy_tx);
+}
+
static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
{
struct resource *res;
@@ -559,6 +654,10 @@ static int ti_pipe3_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = ti_pipe3_get_tx_rx_base(phy);
+ if (ret)
+ return ret;
+
ret = ti_pipe3_get_sysctrl(phy);
if (ret)
return ret;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 82cd8b08d71f..4571cc098b76 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -2,11 +2,10 @@
# PINCTRL infrastructure and drivers
#
-config PINCTRL
- bool
+menuconfig PINCTRL
+ bool "Pin controllers"
-menu "Pin controllers"
- depends on PINCTRL
+if PINCTRL
config GENERIC_PINCTRL_GROUPS
bool
@@ -33,7 +32,8 @@ config DEBUG_PINCTRL
config PINCTRL_ADI2
bool "ADI pin controller driver"
- depends on BLACKFIN
+ depends on (BF54x || BF60x)
+ depends on !GPIO_ADI
select PINMUX
select IRQ_DOMAIN
help
@@ -98,7 +98,8 @@ config PINCTRL_AT91PIO4
config PINCTRL_AMD
tristate "AMD GPIO pin control"
- depends on GPIOLIB
+ depends on HAS_IOMEM
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
select PINCONF
@@ -152,12 +153,14 @@ config PINCTRL_GEMINI
depends on ARCH_GEMINI
default ARCH_GEMINI
select PINMUX
+ select GENERIC_PINCONF
select MFD_SYSCON
config PINCTRL_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
depends on SPI_MASTER || I2C
depends on I2C || I2C=n
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select REGMAP_I2C if I2C
select REGMAP_SPI if SPI_MASTER
@@ -168,16 +171,6 @@ config PINCTRL_MCP23S08
This provides a GPIO interface supporting inputs and outputs.
The I2C versions of the chips can be used as interrupt-controller.
-config PINCTRL_MESON
- bool
- depends on OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB
- select OF_GPIO
- select REGMAP_MMIO
-
config PINCTRL_OXNAS
bool
depends on OF
@@ -210,6 +203,7 @@ config PINCTRL_RZA1
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
+ depends on HAS_IOMEM
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
@@ -226,10 +220,11 @@ config PINCTRL_SIRF
config PINCTRL_SX150X
bool "Semtech SX150x I2C GPIO expander pinctrl driver"
- depends on GPIOLIB && I2C=y
+ depends on I2C=y
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select REGMAP
help
@@ -369,6 +364,7 @@ source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
source "drivers/pinctrl/zte/Kconfig"
+source "drivers/pinctrl/meson/Kconfig"
config PINCTRL_XWAY
bool
@@ -380,4 +376,4 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
-endmenu
+endif
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index ff782445dfb7..785c366fd6d6 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -379,7 +379,7 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
events &= pc->enabled_irq_map[bank];
for_each_set_bit(offset, &events, 32) {
gpio = (32 * bank) + offset;
- generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
+ generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irq.domain,
gpio));
}
}
@@ -661,7 +661,7 @@ static void bcm2835_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
const char *fname = bcm2835_functions[fsel];
int value = bcm2835_gpio_get_bit(pc, GPLEV0, offset);
- int irq = irq_find_mapping(chip->irqdomain, offset);
+ int irq = irq_find_mapping(chip->irq.domain, offset);
seq_printf(s, "function %s in %s; irq %d (%s)",
fname, value ? "hi" : "lo",
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 85a8c97d9dfe..b70058caee50 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -172,7 +172,7 @@ static void iproc_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(bit, &val, NGPIOS_PER_BANK) {
unsigned pin = NGPIOS_PER_BANK * i + bit;
- int child_irq = irq_find_mapping(gc->irqdomain, pin);
+ int child_irq = irq_find_mapping(gc->irq.domain, pin);
/*
* Clear the interrupt before invoking the
@@ -311,7 +311,7 @@ static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset)
if (!chip->pinmux_is_supported)
return 0;
- return pinctrl_request_gpio(gpio);
+ return pinctrl_gpio_request(gpio);
}
static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
@@ -322,7 +322,7 @@ static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
if (!chip->pinmux_is_supported)
return;
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
}
static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 1cfe45fd391f..e67ae52023ad 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -275,23 +275,6 @@ static struct irq_chip nsp_gpio_irq_chip = {
.irq_set_type = nsp_gpio_irq_set_type,
};
-/*
- * Request the nsp IOMUX pinmux controller to mux individual pins to GPIO
- */
-static int nsp_gpio_request(struct gpio_chip *gc, unsigned offset)
-{
- unsigned gpio = gc->base + offset;
-
- return pinctrl_request_gpio(gpio);
-}
-
-static void nsp_gpio_free(struct gpio_chip *gc, unsigned offset)
-{
- unsigned gpio = gc->base + offset;
-
- pinctrl_free_gpio(gpio);
-}
-
static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
@@ -670,8 +653,8 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->label = dev_name(dev);
gc->parent = dev;
gc->of_node = dev->of_node;
- gc->request = nsp_gpio_request;
- gc->free = nsp_gpio_free;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
gc->set = nsp_gpio_set;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 56fbe4c3e800..4c8d5b23e4d0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -733,14 +733,14 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
}
/**
- * pinctrl_request_gpio() - request a single pin to be used as GPIO
+ * pinctrl_gpio_request() - request a single pin to be used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_request() semantics, platforms and individual drivers
* shall *NOT* request GPIO pins to be muxed in.
*/
-int pinctrl_request_gpio(unsigned gpio)
+int pinctrl_gpio_request(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
@@ -765,17 +765,17 @@ int pinctrl_request_gpio(unsigned gpio)
return ret;
}
-EXPORT_SYMBOL_GPL(pinctrl_request_gpio);
+EXPORT_SYMBOL_GPL(pinctrl_gpio_request);
/**
- * pinctrl_free_gpio() - free control on a single pin, currently used as GPIO
+ * pinctrl_gpio_free() - free control on a single pin, currently used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_free() semantics, platforms and individual drivers
* shall *NOT* request GPIO pins to be muxed out.
*/
-void pinctrl_free_gpio(unsigned gpio)
+void pinctrl_gpio_free(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
@@ -795,7 +795,7 @@ void pinctrl_free_gpio(unsigned gpio)
mutex_unlock(&pctldev->mutex);
}
-EXPORT_SYMBOL_GPL(pinctrl_free_gpio);
+EXPORT_SYMBOL_GPL(pinctrl_gpio_free);
static int pinctrl_gpio_direction(unsigned gpio, bool input)
{
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 7880c3adc450..8cf2eba17c8c 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -154,7 +154,7 @@ struct pinctrl_setting {
* or pin, and each of these will increment the @usecount.
* @mux_owner: The name of device that called pinctrl_get().
* @mux_setting: The most recent selected mux setting for this pin, if any.
- * @gpio_owner: If pinctrl_request_gpio() was called for this pin, this is
+ * @gpio_owner: If pinctrl_gpio_request() was called for this pin, this is
* the name of the GPIO that "owns" this pin.
*/
struct pin_desc {
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index f30720a752f3..4aea1b8504f7 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -5,7 +5,8 @@ if (X86 || COMPILE_TEST)
config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
- depends on GPIOLIB && ACPI
+ depends on ACPI
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
select PINCONF
@@ -65,6 +66,14 @@ config PINCTRL_CANNONLAKE
This pinctrl driver provides an interface that allows configuring
of Intel Cannon Lake PCH pins and using them as GPIOs.
+config PINCTRL_CEDARFORK
+ tristate "Intel Cedar Fork pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Cedar Fork PCH pins and using them as GPIOs.
+
config PINCTRL_DENVERTON
tristate "Intel Denverton pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 624d367caa09..fadfe3ea2b04 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
+obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 0f3a02495aeb..9c1ca29c60b7 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1627,7 +1627,7 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
pending = readl(reg);
raw_spin_unlock(&vg->lock);
for_each_set_bit(pin, &pending, 32) {
- virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
+ virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
generic_handle_irq(virq);
}
}
@@ -1660,7 +1660,7 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
value = readl(reg);
if (value & BYT_DIRECT_IRQ_EN) {
- clear_bit(i, gc->irq_valid_mask);
+ clear_bit(i, gc->irq.valid_mask);
dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
} else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
@@ -1703,7 +1703,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
gc->can_sleep = false;
gc->parent = &vg->pdev->dev;
gc->ngpio = vg->soc_data->npins;
- gc->irq_need_valid_mask = true;
+ gc->irq.need_valid_mask = true;
#ifdef CONFIG_PM_SLEEP
vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
new file mode 100644
index 000000000000..59216b0533d9
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -0,0 +1,375 @@
+/*
+ * Intel Cedar Fork PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define CDF_PAD_OWN 0x020
+#define CDF_PADCFGLOCK 0x0c0
+#define CDF_HOSTSW_OWN 0x120
+#define CDF_GPI_IS 0x200
+#define CDF_GPI_IE 0x230
+
+#define CDF_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define CDF_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = CDF_PAD_OWN, \
+ .padcfglock_offset = CDF_PADCFGLOCK, \
+ .hostown_offset = CDF_HOSTSW_OWN, \
+ .is_offset = CDF_GPI_IS, \
+ .ie_offset = CDF_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Cedar Fork PCH */
+static const struct pinctrl_pin_desc cdf_pins[] = {
+ /* WEST2 */
+ PINCTRL_PIN(0, "GBE_SDP_TIMESYNC0_S2N"),
+ PINCTRL_PIN(1, "GBE_SDP_TIMESYNC1_S2N"),
+ PINCTRL_PIN(2, "GBE_SDP_TIMESYNC2_S2N"),
+ PINCTRL_PIN(3, "GBE_SDP_TIMESYNC3_S2N"),
+ PINCTRL_PIN(4, "GBE0_I2C_CLK"),
+ PINCTRL_PIN(5, "GBE0_I2C_DATA"),
+ PINCTRL_PIN(6, "GBE1_I2C_CLK"),
+ PINCTRL_PIN(7, "GBE1_I2C_DATA"),
+ PINCTRL_PIN(8, "GBE2_I2C_CLK"),
+ PINCTRL_PIN(9, "GBE2_I2C_DATA"),
+ PINCTRL_PIN(10, "GBE3_I2C_CLK"),
+ PINCTRL_PIN(11, "GBE3_I2C_DATA"),
+ PINCTRL_PIN(12, "GBE0_LED0"),
+ PINCTRL_PIN(13, "GBE0_LED1"),
+ PINCTRL_PIN(14, "GBE0_LED2"),
+ PINCTRL_PIN(15, "GBE1_LED0"),
+ PINCTRL_PIN(16, "GBE1_LED1"),
+ PINCTRL_PIN(17, "GBE1_LED2"),
+ PINCTRL_PIN(18, "GBE2_LED0"),
+ PINCTRL_PIN(19, "GBE2_LED1"),
+ PINCTRL_PIN(20, "GBE2_LED2"),
+ PINCTRL_PIN(21, "GBE3_LED0"),
+ PINCTRL_PIN(22, "GBE3_LED1"),
+ PINCTRL_PIN(23, "GBE3_LED2"),
+ /* WEST3 */
+ PINCTRL_PIN(24, "NCSI_RXD0"),
+ PINCTRL_PIN(25, "NCSI_CLK_IN"),
+ PINCTRL_PIN(26, "NCSI_RXD1"),
+ PINCTRL_PIN(27, "NCSI_CRS_DV"),
+ PINCTRL_PIN(28, "NCSI_ARB_IN"),
+ PINCTRL_PIN(29, "NCSI_TX_EN"),
+ PINCTRL_PIN(30, "NCSI_TXD0"),
+ PINCTRL_PIN(31, "NCSI_TXD1"),
+ PINCTRL_PIN(32, "NCSI_ARB_OUT"),
+ PINCTRL_PIN(33, "GBE_SMB_CLK"),
+ PINCTRL_PIN(34, "GBE_SMB_DATA"),
+ PINCTRL_PIN(35, "GBE_SMB_ALRT_N"),
+ PINCTRL_PIN(36, "THERMTRIP_N"),
+ PINCTRL_PIN(37, "PCHHOT_N"),
+ PINCTRL_PIN(38, "ERROR0_N"),
+ PINCTRL_PIN(39, "ERROR1_N"),
+ PINCTRL_PIN(40, "ERROR2_N"),
+ PINCTRL_PIN(41, "MSMI_N"),
+ PINCTRL_PIN(42, "CATERR_N"),
+ PINCTRL_PIN(43, "MEMTRIP_N"),
+ PINCTRL_PIN(44, "UART0_RXD"),
+ PINCTRL_PIN(45, "UART0_TXD"),
+ PINCTRL_PIN(46, "UART1_RXD"),
+ PINCTRL_PIN(47, "UART1_TXD"),
+ /* WEST01 */
+ PINCTRL_PIN(48, "GBE_GPIO13"),
+ PINCTRL_PIN(49, "AUX_PWR"),
+ PINCTRL_PIN(50, "CPU_GP_2"),
+ PINCTRL_PIN(51, "CPU_GP_3"),
+ PINCTRL_PIN(52, "FAN_PWM_0"),
+ PINCTRL_PIN(53, "FAN_PWM_1"),
+ PINCTRL_PIN(54, "FAN_PWM_2"),
+ PINCTRL_PIN(55, "FAN_PWM_3"),
+ PINCTRL_PIN(56, "FAN_TACH_0"),
+ PINCTRL_PIN(57, "FAN_TACH_1"),
+ PINCTRL_PIN(58, "FAN_TACH_2"),
+ PINCTRL_PIN(59, "FAN_TACH_3"),
+ PINCTRL_PIN(60, "ME_SMB0_CLK"),
+ PINCTRL_PIN(61, "ME_SMB0_DATA"),
+ PINCTRL_PIN(62, "ME_SMB0_ALRT_N"),
+ PINCTRL_PIN(63, "ME_SMB1_CLK"),
+ PINCTRL_PIN(64, "ME_SMB1_DATA"),
+ PINCTRL_PIN(65, "ME_SMB1_ALRT_N"),
+ PINCTRL_PIN(66, "ME_SMB2_CLK"),
+ PINCTRL_PIN(67, "ME_SMB2_DATA"),
+ PINCTRL_PIN(68, "ME_SMB2_ALRT_N"),
+ PINCTRL_PIN(69, "GBE_MNG_I2C_CLK"),
+ PINCTRL_PIN(70, "GBE_MNG_I2C_DATA"),
+ /* WEST5 */
+ PINCTRL_PIN(71, "IE_UART_RXD"),
+ PINCTRL_PIN(72, "IE_UART_TXD"),
+ PINCTRL_PIN(73, "VPP_SMB_CLK"),
+ PINCTRL_PIN(74, "VPP_SMB_DATA"),
+ PINCTRL_PIN(75, "VPP_SMB_ALRT_N"),
+ PINCTRL_PIN(76, "PCIE_CLKREQ0_N"),
+ PINCTRL_PIN(77, "PCIE_CLKREQ1_N"),
+ PINCTRL_PIN(78, "PCIE_CLKREQ2_N"),
+ PINCTRL_PIN(79, "PCIE_CLKREQ3_N"),
+ PINCTRL_PIN(80, "PCIE_CLKREQ4_N"),
+ PINCTRL_PIN(81, "PCIE_CLKREQ5_N"),
+ PINCTRL_PIN(82, "PCIE_CLKREQ6_N"),
+ PINCTRL_PIN(83, "PCIE_CLKREQ7_N"),
+ PINCTRL_PIN(84, "PCIE_CLKREQ8_N"),
+ PINCTRL_PIN(85, "PCIE_CLKREQ9_N"),
+ PINCTRL_PIN(86, "FLEX_CLK_SE0"),
+ PINCTRL_PIN(87, "FLEX_CLK_SE1"),
+ PINCTRL_PIN(88, "FLEX_CLK1_50"),
+ PINCTRL_PIN(89, "FLEX_CLK2_50"),
+ PINCTRL_PIN(90, "FLEX_CLK_125"),
+ /* WESTC */
+ PINCTRL_PIN(91, "TCK_PCH"),
+ PINCTRL_PIN(92, "JTAGX_PCH"),
+ PINCTRL_PIN(93, "TRST_N_PCH"),
+ PINCTRL_PIN(94, "TMS_PCH"),
+ PINCTRL_PIN(95, "TDI_PCH"),
+ PINCTRL_PIN(96, "TDO_PCH"),
+ /* WESTC_DFX */
+ PINCTRL_PIN(97, "CX_PRDY_N"),
+ PINCTRL_PIN(98, "CX_PREQ_N"),
+ PINCTRL_PIN(99, "CPU_FBREAK_OUT_N"),
+ PINCTRL_PIN(100, "TRIGGER0_N"),
+ PINCTRL_PIN(101, "TRIGGER1_N"),
+ /* WESTA */
+ PINCTRL_PIN(102, "DBG_PTI_CLK0"),
+ PINCTRL_PIN(103, "DBG_PTI_CLK3"),
+ PINCTRL_PIN(104, "DBG_PTI_DATA0"),
+ PINCTRL_PIN(105, "DBG_PTI_DATA1"),
+ PINCTRL_PIN(106, "DBG_PTI_DATA2"),
+ PINCTRL_PIN(107, "DBG_PTI_DATA3"),
+ PINCTRL_PIN(108, "DBG_PTI_DATA4"),
+ PINCTRL_PIN(109, "DBG_PTI_DATA5"),
+ PINCTRL_PIN(110, "DBG_PTI_DATA6"),
+ PINCTRL_PIN(111, "DBG_PTI_DATA7"),
+ /* WESTB */
+ PINCTRL_PIN(112, "DBG_PTI_DATA8"),
+ PINCTRL_PIN(113, "DBG_PTI_DATA9"),
+ PINCTRL_PIN(114, "DBG_PTI_DATA10"),
+ PINCTRL_PIN(115, "DBG_PTI_DATA11"),
+ PINCTRL_PIN(116, "DBG_PTI_DATA12"),
+ PINCTRL_PIN(117, "DBG_PTI_DATA13"),
+ PINCTRL_PIN(118, "DBG_PTI_DATA14"),
+ PINCTRL_PIN(119, "DBG_PTI_DATA15"),
+ PINCTRL_PIN(120, "DBG_SPARE0"),
+ PINCTRL_PIN(121, "DBG_SPARE1"),
+ PINCTRL_PIN(122, "DBG_SPARE2"),
+ PINCTRL_PIN(123, "DBG_SPARE3"),
+ /* WESTD */
+ PINCTRL_PIN(124, "CPU_PWR_GOOD"),
+ PINCTRL_PIN(125, "PLTRST_CPU_N"),
+ PINCTRL_PIN(126, "NAC_RESET_NAC_N"),
+ PINCTRL_PIN(127, "PCH_SBLINK_RX"),
+ PINCTRL_PIN(128, "PCH_SBLINK_TX"),
+ PINCTRL_PIN(129, "PMSYNC_CLK"),
+ PINCTRL_PIN(130, "CPU_ERR0_N"),
+ PINCTRL_PIN(131, "CPU_ERR1_N"),
+ PINCTRL_PIN(132, "CPU_ERR2_N"),
+ PINCTRL_PIN(133, "CPU_THERMTRIP_N"),
+ PINCTRL_PIN(134, "CPU_MSMI_N"),
+ PINCTRL_PIN(135, "CPU_CATERR_N"),
+ PINCTRL_PIN(136, "CPU_MEMTRIP_N"),
+ PINCTRL_PIN(137, "NAC_GR_N"),
+ PINCTRL_PIN(138, "NAC_XTAL_VALID"),
+ PINCTRL_PIN(139, "NAC_WAKE_N"),
+ PINCTRL_PIN(140, "NAC_SBLINK_CLK_S2N"),
+ PINCTRL_PIN(141, "NAC_SBLINK_N2S"),
+ PINCTRL_PIN(142, "NAC_SBLINK_S2N"),
+ PINCTRL_PIN(143, "NAC_SBLINK_CLK_N2S"),
+ /* WESTD_PECI */
+ PINCTRL_PIN(144, "ME_PECI"),
+ /* WESTF */
+ PINCTRL_PIN(145, "NAC_RMII_CLK"),
+ PINCTRL_PIN(146, "NAC_RGMII_CLK"),
+ PINCTRL_PIN(147, "NAC_SPARE0"),
+ PINCTRL_PIN(148, "NAC_SPARE1"),
+ PINCTRL_PIN(149, "NAC_SPARE2"),
+ PINCTRL_PIN(150, "NAC_INIT_SX_WAKE_N"),
+ PINCTRL_PIN(151, "NAC_GBE_GPIO0_S2N"),
+ PINCTRL_PIN(152, "NAC_GBE_GPIO1_S2N"),
+ PINCTRL_PIN(153, "NAC_GBE_GPIO2_S2N"),
+ PINCTRL_PIN(154, "NAC_GBE_GPIO3_S2N"),
+ PINCTRL_PIN(155, "NAC_NCSI_RXD0"),
+ PINCTRL_PIN(156, "NAC_NCSI_CLK_IN"),
+ PINCTRL_PIN(157, "NAC_NCSI_RXD1"),
+ PINCTRL_PIN(158, "NAC_NCSI_CRS_DV"),
+ PINCTRL_PIN(159, "NAC_NCSI_ARB_IN"),
+ PINCTRL_PIN(160, "NAC_NCSI_TX_EN"),
+ PINCTRL_PIN(161, "NAC_NCSI_TXD0"),
+ PINCTRL_PIN(162, "NAC_NCSI_TXD1"),
+ PINCTRL_PIN(163, "NAC_NCSI_ARB_OUT"),
+ PINCTRL_PIN(164, "NAC_NCSI_OE_N"),
+ PINCTRL_PIN(165, "NAC_GBE_SMB_CLK"),
+ PINCTRL_PIN(166, "NAC_GBE_SMB_DATA"),
+ PINCTRL_PIN(167, "NAC_GBE_SMB_ALRT_N"),
+ /* EAST2 */
+ PINCTRL_PIN(168, "USB_OC0_N"),
+ PINCTRL_PIN(169, "GBE_GPIO0"),
+ PINCTRL_PIN(170, "GBE_GPIO1"),
+ PINCTRL_PIN(171, "GBE_GPIO2"),
+ PINCTRL_PIN(172, "GBE_GPIO3"),
+ PINCTRL_PIN(173, "GBE_GPIO4"),
+ PINCTRL_PIN(174, "GBE_GPIO5"),
+ PINCTRL_PIN(175, "GBE_GPIO6"),
+ PINCTRL_PIN(176, "GBE_GPIO7"),
+ PINCTRL_PIN(177, "GBE_GPIO8"),
+ PINCTRL_PIN(178, "GBE_GPIO9"),
+ PINCTRL_PIN(179, "GBE_GPIO10"),
+ PINCTRL_PIN(180, "GBE_GPIO11"),
+ PINCTRL_PIN(181, "GBE_GPIO12"),
+ PINCTRL_PIN(182, "SATA0_LED_N"),
+ PINCTRL_PIN(183, "SATA1_LED_N"),
+ PINCTRL_PIN(184, "SATA_PDETECT0"),
+ PINCTRL_PIN(185, "SATA_PDETECT1"),
+ PINCTRL_PIN(186, "SATA0_SDOUT"),
+ PINCTRL_PIN(187, "SATA1_SDOUT"),
+ PINCTRL_PIN(188, "SATA2_LED_N"),
+ PINCTRL_PIN(189, "SATA_PDETECT2"),
+ PINCTRL_PIN(190, "SATA2_SDOUT"),
+ /* EAST3 */
+ PINCTRL_PIN(191, "ESPI_IO0"),
+ PINCTRL_PIN(192, "ESPI_IO1"),
+ PINCTRL_PIN(193, "ESPI_IO2"),
+ PINCTRL_PIN(194, "ESPI_IO3"),
+ PINCTRL_PIN(195, "ESPI_CLK"),
+ PINCTRL_PIN(196, "ESPI_RST_N"),
+ PINCTRL_PIN(197, "ESPI_CS0_N"),
+ PINCTRL_PIN(198, "ESPI_ALRT0_N"),
+ PINCTRL_PIN(199, "ESPI_CS1_N"),
+ PINCTRL_PIN(200, "ESPI_ALRT1_N"),
+ PINCTRL_PIN(201, "ESPI_CLK_LOOPBK"),
+ /* EAST0 */
+ PINCTRL_PIN(202, "SPI_CS0_N"),
+ PINCTRL_PIN(203, "SPI_CS1_N"),
+ PINCTRL_PIN(204, "SPI_MOSI_IO0"),
+ PINCTRL_PIN(205, "SPI_MISO_IO1"),
+ PINCTRL_PIN(206, "SPI_IO2"),
+ PINCTRL_PIN(207, "SPI_IO3"),
+ PINCTRL_PIN(208, "SPI_CLK"),
+ PINCTRL_PIN(209, "SPI_CLK_LOOPBK"),
+ PINCTRL_PIN(210, "SUSPWRDNACK"),
+ PINCTRL_PIN(211, "PMU_SUSCLK"),
+ PINCTRL_PIN(212, "ADR_COMPLETE"),
+ PINCTRL_PIN(213, "ADR_TRIGGER_N"),
+ PINCTRL_PIN(214, "PMU_SLP_S45_N"),
+ PINCTRL_PIN(215, "PMU_SLP_S3_N"),
+ PINCTRL_PIN(216, "PMU_WAKE_N"),
+ PINCTRL_PIN(217, "PMU_PWRBTN_N"),
+ PINCTRL_PIN(218, "PMU_RESETBUTTON_N"),
+ PINCTRL_PIN(219, "PMU_PLTRST_N"),
+ PINCTRL_PIN(220, "SUS_STAT_N"),
+ PINCTRL_PIN(221, "PMU_I2C_CLK"),
+ PINCTRL_PIN(222, "PMU_I2C_DATA"),
+ PINCTRL_PIN(223, "PECI_SMB_CLK"),
+ PINCTRL_PIN(224, "PECI_SMB_DATA"),
+ PINCTRL_PIN(225, "PECI_SMB_ALRT_N"),
+ /* EMMC */
+ PINCTRL_PIN(226, "EMMC_CMD"),
+ PINCTRL_PIN(227, "EMMC_STROBE"),
+ PINCTRL_PIN(228, "EMMC_CLK"),
+ PINCTRL_PIN(229, "EMMC_D0"),
+ PINCTRL_PIN(230, "EMMC_D1"),
+ PINCTRL_PIN(231, "EMMC_D2"),
+ PINCTRL_PIN(232, "EMMC_D3"),
+ PINCTRL_PIN(233, "EMMC_D4"),
+ PINCTRL_PIN(234, "EMMC_D5"),
+ PINCTRL_PIN(235, "EMMC_D6"),
+ PINCTRL_PIN(236, "EMMC_D7"),
+};
+
+static const struct intel_padgroup cdf_community0_gpps[] = {
+ CDF_GPP(0, 0, 23), /* WEST2 */
+ CDF_GPP(1, 24, 47), /* WEST3 */
+ CDF_GPP(2, 48, 70), /* WEST01 */
+ CDF_GPP(3, 71, 90), /* WEST5 */
+ CDF_GPP(4, 91, 96), /* WESTC */
+ CDF_GPP(5, 97, 101), /* WESTC_DFX */
+ CDF_GPP(6, 102, 111), /* WESTA */
+ CDF_GPP(7, 112, 123), /* WESTB */
+ CDF_GPP(8, 124, 143), /* WESTD */
+ CDF_GPP(9, 144, 144), /* WESTD_PECI */
+ CDF_GPP(10, 145, 167), /* WESTF */
+};
+
+static const struct intel_padgroup cdf_community1_gpps[] = {
+ CDF_GPP(0, 168, 190), /* EAST2 */
+ CDF_GPP(1, 191, 201), /* EAST3 */
+ CDF_GPP(2, 202, 225), /* EAST0 */
+ CDF_GPP(3, 226, 236), /* EMMC */
+};
+
+static const struct intel_community cdf_communities[] = {
+ CDF_COMMUNITY(0, 0, 167, cdf_community0_gpps), /* West */
+ CDF_COMMUNITY(1, 168, 236, cdf_community1_gpps), /* East */
+};
+
+static const struct intel_pinctrl_soc_data cdf_soc_data = {
+ .pins = cdf_pins,
+ .npins = ARRAY_SIZE(cdf_pins),
+ .communities = cdf_communities,
+ .ncommunities = ARRAY_SIZE(cdf_communities),
+};
+
+static int cdf_pinctrl_probe(struct platform_device *pdev)
+{
+ return intel_pinctrl_probe(pdev, &cdf_soc_data);
+}
+
+static const struct dev_pm_ops cdf_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static const struct acpi_device_id cdf_pinctrl_acpi_match[] = {
+ { "INTC3001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cdf_pinctrl_acpi_match);
+
+static struct platform_driver cdf_pinctrl_driver = {
+ .probe = cdf_pinctrl_probe,
+ .driver = {
+ .name = "cedarfork-pinctrl",
+ .acpi_match_table = cdf_pinctrl_acpi_match,
+ .pm = &cdf_pinctrl_pm_ops,
+ },
+};
+
+static int __init cdf_pinctrl_init(void)
+{
+ return platform_driver_register(&cdf_pinctrl_driver);
+}
+subsys_initcall(cdf_pinctrl_init);
+
+static void __exit cdf_pinctrl_exit(void)
+{
+ platform_driver_unregister(&cdf_pinctrl_driver);
+}
+module_exit(cdf_pinctrl_exit);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Cedar Fork PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index fadbca907c7c..bdedb6325c72 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -491,7 +491,7 @@ static const struct chv_community north_community = {
.ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
.ngpios = ARRAY_SIZE(north_pins),
/*
- * North community can benerate GPIO interrupts only for the first
+ * North community can generate GPIO interrupts only for the first
* 8 interrupts. The upper half (8-15) can only be used to trigger
* GPEs.
*/
@@ -1523,7 +1523,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
unsigned irq, offset;
offset = pctrl->intr_lines[intr_line];
- irq = irq_find_mapping(gc->irqdomain, offset);
+ irq = irq_find_mapping(gc->irq.domain, offset);
generic_handle_irq(irq);
}
@@ -1585,7 +1585,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
- chip->irq_need_valid_mask = need_valid_mask;
+ chip->irq.need_valid_mask = need_valid_mask;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
@@ -1617,7 +1617,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
if (need_valid_mask && intsel >= pctrl->community->nirqs)
- clear_bit(i, chip->irq_valid_mask);
+ clear_bit(i, chip->irq.valid_mask);
}
/* Clear all interrupts */
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 71df0f70b61f..12a1af45acb9 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -30,8 +30,6 @@
#define PADBAR 0x00c
#define GPI_IS 0x100
-#define GPI_GPE_STS 0x140
-#define GPI_GPE_EN 0x160
#define PADOWN_BITS 4
#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
@@ -818,7 +816,7 @@ static void intel_gpio_irq_ack(struct irq_data *d)
community = intel_get_community(pctrl, pin);
if (community) {
const struct intel_padgroup *padgrp;
- unsigned gpp, gpp_offset;
+ unsigned gpp, gpp_offset, is_offset;
padgrp = intel_community_get_padgroup(community, pin);
if (!padgrp)
@@ -826,9 +824,10 @@ static void intel_gpio_irq_ack(struct irq_data *d)
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
+ is_offset = community->is_offset + gpp * 4;
raw_spin_lock(&pctrl->lock);
- writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ writel(BIT(gpp_offset), community->regs + is_offset);
raw_spin_unlock(&pctrl->lock);
}
}
@@ -843,7 +842,7 @@ static void intel_gpio_irq_enable(struct irq_data *d)
community = intel_get_community(pctrl, pin);
if (community) {
const struct intel_padgroup *padgrp;
- unsigned gpp, gpp_offset;
+ unsigned gpp, gpp_offset, is_offset;
unsigned long flags;
u32 value;
@@ -853,10 +852,11 @@ static void intel_gpio_irq_enable(struct irq_data *d)
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
+ is_offset = community->is_offset + gpp * 4;
raw_spin_lock_irqsave(&pctrl->lock, flags);
/* Clear interrupt status first to avoid unexpected interrupt */
- writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ writel(BIT(gpp_offset), community->regs + is_offset);
value = readl(community->regs + community->ie_offset + gpp * 4);
value |= BIT(gpp_offset);
@@ -991,7 +991,8 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
const struct intel_padgroup *padgrp = &community->gpps[gpp];
unsigned long pending, enabled, gpp_offset;
- pending = readl(community->regs + GPI_IS + padgrp->reg_num * 4);
+ pending = readl(community->regs + community->is_offset +
+ padgrp->reg_num * 4);
enabled = readl(community->regs + community->ie_offset +
padgrp->reg_num * 4);
@@ -1005,7 +1006,7 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
if (padno >= community->npins)
break;
- irq = irq_find_mapping(gc->irqdomain,
+ irq = irq_find_mapping(gc->irq.domain,
community->pin_base + padno);
generic_handle_irq(irq);
@@ -1241,6 +1242,9 @@ int intel_pinctrl_probe(struct platform_device *pdev,
community->regs = regs;
community->pad_regs = regs + padbar;
+ if (!community->is_offset)
+ community->is_offset = GPI_IS;
+
ret = intel_pinctrl_add_padgroups(pctrl, community);
if (ret)
return ret;
@@ -1356,7 +1360,7 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
for (gpp = 0; gpp < community->ngpps; gpp++) {
/* Mask and clear all interrupts */
writel(0, base + community->ie_offset + gpp * 4);
- writel(0xffff, base + GPI_IS + gpp * 4);
+ writel(0xffff, base + community->is_offset + gpp * 4);
}
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 7fdb07753c2d..13b0bd6eb2a2 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -73,6 +73,8 @@ struct intel_padgroup {
* @hostown_offset: Register offset of HOSTSW_OWN from @regs. If %0 then it
* is assumed that the host owns the pin (rather than
* ACPI).
+ * @is_offset: Register offset of GPI_IS from @regs. If %0 then uses the
+ * default (%0x100).
* @ie_offset: Register offset of GPI_IE from @regs.
* @pin_base: Starting pin of pins in this community
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
@@ -98,6 +100,7 @@ struct intel_community {
unsigned padown_offset;
unsigned padcfglock_offset;
unsigned hostown_offset;
+ unsigned is_offset;
unsigned ie_offset;
unsigned pin_base;
unsigned gpp_size;
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
new file mode 100644
index 000000000000..1a51778759ea
--- /dev/null
+++ b/drivers/pinctrl/meson/Kconfig
@@ -0,0 +1,41 @@
+menuconfig PINCTRL_MESON
+ bool "Amlogic SoC pinctrl drivers"
+ depends on ARCH_MESON
+ depends on OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select REGMAP_MMIO
+
+if PINCTRL_MESON
+
+config PINCTRL_MESON8
+ bool "Meson 8 SoC pinctrl driver"
+ depends on ARM
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON8B
+ bool "Meson 8b SoC pinctrl driver"
+ depends on ARM
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON_GXBB
+ bool "Meson gxbb SoC pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON_GXL
+ bool "Meson gxl SoC pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON8_PMX
+ bool
+
+endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index 27c5b5126008..cbd47bb74549 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -1,3 +1,6 @@
-obj-y += pinctrl-meson8.o pinctrl-meson8b.o
-obj-y += pinctrl-meson-gxbb.o pinctrl-meson-gxl.o
-obj-y += pinctrl-meson.o
+obj-$(CONFIG_PINCTRL_MESON) += pinctrl-meson.o
+obj-$(CONFIG_PINCTRL_MESON8_PMX) += pinctrl-meson8-pmx.o
+obj-$(CONFIG_PINCTRL_MESON8) += pinctrl-meson8.o
+obj-$(CONFIG_PINCTRL_MESON8B) += pinctrl-meson8b.o
+obj-$(CONFIG_PINCTRL_MESON_GXBB) += pinctrl-meson-gxbb.o
+obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7bbc0d3cddcf..9079020259c5 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -14,418 +14,417 @@
#include <dt-bindings/gpio/meson-gxbb-gpio.h>
#include "pinctrl-meson.h"
-
-#define EE_OFF 14
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
- MESON_PIN(GPIOZ_0, EE_OFF),
- MESON_PIN(GPIOZ_1, EE_OFF),
- MESON_PIN(GPIOZ_2, EE_OFF),
- MESON_PIN(GPIOZ_3, EE_OFF),
- MESON_PIN(GPIOZ_4, EE_OFF),
- MESON_PIN(GPIOZ_5, EE_OFF),
- MESON_PIN(GPIOZ_6, EE_OFF),
- MESON_PIN(GPIOZ_7, EE_OFF),
- MESON_PIN(GPIOZ_8, EE_OFF),
- MESON_PIN(GPIOZ_9, EE_OFF),
- MESON_PIN(GPIOZ_10, EE_OFF),
- MESON_PIN(GPIOZ_11, EE_OFF),
- MESON_PIN(GPIOZ_12, EE_OFF),
- MESON_PIN(GPIOZ_13, EE_OFF),
- MESON_PIN(GPIOZ_14, EE_OFF),
- MESON_PIN(GPIOZ_15, EE_OFF),
-
- MESON_PIN(GPIOH_0, EE_OFF),
- MESON_PIN(GPIOH_1, EE_OFF),
- MESON_PIN(GPIOH_2, EE_OFF),
- MESON_PIN(GPIOH_3, EE_OFF),
-
- MESON_PIN(BOOT_0, EE_OFF),
- MESON_PIN(BOOT_1, EE_OFF),
- MESON_PIN(BOOT_2, EE_OFF),
- MESON_PIN(BOOT_3, EE_OFF),
- MESON_PIN(BOOT_4, EE_OFF),
- MESON_PIN(BOOT_5, EE_OFF),
- MESON_PIN(BOOT_6, EE_OFF),
- MESON_PIN(BOOT_7, EE_OFF),
- MESON_PIN(BOOT_8, EE_OFF),
- MESON_PIN(BOOT_9, EE_OFF),
- MESON_PIN(BOOT_10, EE_OFF),
- MESON_PIN(BOOT_11, EE_OFF),
- MESON_PIN(BOOT_12, EE_OFF),
- MESON_PIN(BOOT_13, EE_OFF),
- MESON_PIN(BOOT_14, EE_OFF),
- MESON_PIN(BOOT_15, EE_OFF),
- MESON_PIN(BOOT_16, EE_OFF),
- MESON_PIN(BOOT_17, EE_OFF),
-
- MESON_PIN(CARD_0, EE_OFF),
- MESON_PIN(CARD_1, EE_OFF),
- MESON_PIN(CARD_2, EE_OFF),
- MESON_PIN(CARD_3, EE_OFF),
- MESON_PIN(CARD_4, EE_OFF),
- MESON_PIN(CARD_5, EE_OFF),
- MESON_PIN(CARD_6, EE_OFF),
-
- MESON_PIN(GPIODV_0, EE_OFF),
- MESON_PIN(GPIODV_1, EE_OFF),
- MESON_PIN(GPIODV_2, EE_OFF),
- MESON_PIN(GPIODV_3, EE_OFF),
- MESON_PIN(GPIODV_4, EE_OFF),
- MESON_PIN(GPIODV_5, EE_OFF),
- MESON_PIN(GPIODV_6, EE_OFF),
- MESON_PIN(GPIODV_7, EE_OFF),
- MESON_PIN(GPIODV_8, EE_OFF),
- MESON_PIN(GPIODV_9, EE_OFF),
- MESON_PIN(GPIODV_10, EE_OFF),
- MESON_PIN(GPIODV_11, EE_OFF),
- MESON_PIN(GPIODV_12, EE_OFF),
- MESON_PIN(GPIODV_13, EE_OFF),
- MESON_PIN(GPIODV_14, EE_OFF),
- MESON_PIN(GPIODV_15, EE_OFF),
- MESON_PIN(GPIODV_16, EE_OFF),
- MESON_PIN(GPIODV_17, EE_OFF),
- MESON_PIN(GPIODV_18, EE_OFF),
- MESON_PIN(GPIODV_19, EE_OFF),
- MESON_PIN(GPIODV_20, EE_OFF),
- MESON_PIN(GPIODV_21, EE_OFF),
- MESON_PIN(GPIODV_22, EE_OFF),
- MESON_PIN(GPIODV_23, EE_OFF),
- MESON_PIN(GPIODV_24, EE_OFF),
- MESON_PIN(GPIODV_25, EE_OFF),
- MESON_PIN(GPIODV_26, EE_OFF),
- MESON_PIN(GPIODV_27, EE_OFF),
- MESON_PIN(GPIODV_28, EE_OFF),
- MESON_PIN(GPIODV_29, EE_OFF),
-
- MESON_PIN(GPIOY_0, EE_OFF),
- MESON_PIN(GPIOY_1, EE_OFF),
- MESON_PIN(GPIOY_2, EE_OFF),
- MESON_PIN(GPIOY_3, EE_OFF),
- MESON_PIN(GPIOY_4, EE_OFF),
- MESON_PIN(GPIOY_5, EE_OFF),
- MESON_PIN(GPIOY_6, EE_OFF),
- MESON_PIN(GPIOY_7, EE_OFF),
- MESON_PIN(GPIOY_8, EE_OFF),
- MESON_PIN(GPIOY_9, EE_OFF),
- MESON_PIN(GPIOY_10, EE_OFF),
- MESON_PIN(GPIOY_11, EE_OFF),
- MESON_PIN(GPIOY_12, EE_OFF),
- MESON_PIN(GPIOY_13, EE_OFF),
- MESON_PIN(GPIOY_14, EE_OFF),
- MESON_PIN(GPIOY_15, EE_OFF),
- MESON_PIN(GPIOY_16, EE_OFF),
-
- MESON_PIN(GPIOX_0, EE_OFF),
- MESON_PIN(GPIOX_1, EE_OFF),
- MESON_PIN(GPIOX_2, EE_OFF),
- MESON_PIN(GPIOX_3, EE_OFF),
- MESON_PIN(GPIOX_4, EE_OFF),
- MESON_PIN(GPIOX_5, EE_OFF),
- MESON_PIN(GPIOX_6, EE_OFF),
- MESON_PIN(GPIOX_7, EE_OFF),
- MESON_PIN(GPIOX_8, EE_OFF),
- MESON_PIN(GPIOX_9, EE_OFF),
- MESON_PIN(GPIOX_10, EE_OFF),
- MESON_PIN(GPIOX_11, EE_OFF),
- MESON_PIN(GPIOX_12, EE_OFF),
- MESON_PIN(GPIOX_13, EE_OFF),
- MESON_PIN(GPIOX_14, EE_OFF),
- MESON_PIN(GPIOX_15, EE_OFF),
- MESON_PIN(GPIOX_16, EE_OFF),
- MESON_PIN(GPIOX_17, EE_OFF),
- MESON_PIN(GPIOX_18, EE_OFF),
- MESON_PIN(GPIOX_19, EE_OFF),
- MESON_PIN(GPIOX_20, EE_OFF),
- MESON_PIN(GPIOX_21, EE_OFF),
-
- MESON_PIN(GPIOCLK_0, EE_OFF),
- MESON_PIN(GPIOCLK_1, EE_OFF),
- MESON_PIN(GPIOCLK_2, EE_OFF),
- MESON_PIN(GPIOCLK_3, EE_OFF),
-
- MESON_PIN(GPIO_TEST_N, EE_OFF),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(GPIOZ_15),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+ MESON_PIN(GPIOX_22),
+
+ MESON_PIN(GPIOCLK_0),
+ MESON_PIN(GPIOCLK_1),
+ MESON_PIN(GPIOCLK_2),
+ MESON_PIN(GPIOCLK_3),
};
static const unsigned int emmc_nand_d07_pins[] = {
- PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
- PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
- PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
-};
-static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int spi_sclk_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int spi_ss0_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int spi_miso_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int spi_mosi_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
-static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
-static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
-static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
-static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
-static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
-
-static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
-static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
-static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
-static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
-static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
-static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
-static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
-static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
-
-static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
-static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_13, EE_OFF) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_14, EE_OFF) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_12, EE_OFF) };
-
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
-
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-
-static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
-static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int pwm_a_x_pins[] = { PIN(GPIOX_6, EE_OFF) };
-static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, EE_OFF) };
-static const unsigned int pwm_b_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, EE_OFF) };
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_19, EE_OFF) };
-static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
-static const unsigned int pwm_f_y_pins[] = { PIN(GPIOY_15, EE_OFF) };
-
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
-
-static const unsigned int i2s_out_ch23_y_pins[] = { PIN(GPIOY_8, EE_OFF) };
-static const unsigned int i2s_out_ch45_y_pins[] = { PIN(GPIOY_9, EE_OFF) };
-static const unsigned int i2s_out_ch67_y_pins[] = { PIN(GPIOY_10, EE_OFF) };
-
-static const unsigned int spdif_out_y_pins[] = { PIN(GPIOY_12, EE_OFF) };
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7,
+};
+static const unsigned int emmc_clk_pins[] = { BOOT_8 };
+static const unsigned int emmc_cmd_pins[] = { BOOT_10 };
+static const unsigned int emmc_ds_pins[] = { BOOT_15 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_15 };
+
+static const unsigned int spi_sclk_pins[] = { GPIOZ_6 };
+static const unsigned int spi_ss0_pins[] = { GPIOZ_7 };
+static const unsigned int spi_miso_pins[] = { GPIOZ_12 };
+static const unsigned int spi_mosi_pins[] = { GPIOZ_13 };
+
+static const unsigned int sdcard_d0_pins[] = { CARD_1 };
+static const unsigned int sdcard_d1_pins[] = { CARD_0 };
+static const unsigned int sdcard_d2_pins[] = { CARD_5 };
+static const unsigned int sdcard_d3_pins[] = { CARD_4 };
+static const unsigned int sdcard_cmd_pins[] = { CARD_3 };
+static const unsigned int sdcard_clk_pins[] = { CARD_2 };
+
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
+static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
+
+static const unsigned int nand_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_wr_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
+static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
+static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
+static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
+
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+
+static const unsigned int i2c_sck_b_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_b_pins[] = { GPIODV_26 };
+
+static const unsigned int i2c_sck_c_pins[] = { GPIODV_29 };
+static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
+
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_13 };
+
+static const unsigned int pwm_a_x_pins[] = { GPIOX_6 };
+static const unsigned int pwm_a_y_pins[] = { GPIOY_16 };
+static const unsigned int pwm_b_pins[] = { GPIODV_29 };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+static const unsigned int pwm_e_pins[] = { GPIOX_19 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_7 };
+static const unsigned int pwm_f_y_pins[] = { GPIOY_15 };
+
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+
+static const unsigned int i2s_out_ch23_y_pins[] = { GPIOY_8 };
+static const unsigned int i2s_out_ch45_y_pins[] = { GPIOY_9 };
+static const unsigned int i2s_out_ch67_y_pins[] = { GPIOY_10 };
+
+static const unsigned int spdif_out_y_pins[] = { GPIOY_12 };
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, 0),
- MESON_PIN(GPIOAO_1, 0),
- MESON_PIN(GPIOAO_2, 0),
- MESON_PIN(GPIOAO_3, 0),
- MESON_PIN(GPIOAO_4, 0),
- MESON_PIN(GPIOAO_5, 0),
- MESON_PIN(GPIOAO_6, 0),
- MESON_PIN(GPIOAO_7, 0),
- MESON_PIN(GPIOAO_8, 0),
- MESON_PIN(GPIOAO_9, 0),
- MESON_PIN(GPIOAO_10, 0),
- MESON_PIN(GPIOAO_11, 0),
- MESON_PIN(GPIOAO_12, 0),
- MESON_PIN(GPIOAO_13, 0),
-};
-
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
-
-static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-
-static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
-
-static const unsigned int pwm_ao_a_3_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int pwm_ao_a_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int pwm_ao_a_12_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int i2s_out_ao_clk_pins[] = { PIN(GPIOAO_9, 0) };
-static const unsigned int i2s_out_lr_clk_pins[] = { PIN(GPIOAO_10, 0) };
-static const unsigned int i2s_out_ch01_ao_pins[] = { PIN(GPIOAO_11, 0) };
-static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int spdif_out_ao_13_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_12, 0) };
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
+
+ MESON_PIN(GPIO_TEST_N),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b_pins[] = { GPIOAO_5 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+
+static const unsigned int i2c_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int i2c_slave_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = {GPIOAO_5 };
+
+static const unsigned int remote_input_ao_pins[] = { GPIOAO_7 };
+
+static const unsigned int pwm_ao_a_3_pins[] = { GPIOAO_3 };
+static const unsigned int pwm_ao_a_6_pins[] = { GPIOAO_6 };
+static const unsigned int pwm_ao_a_12_pins[] = { GPIOAO_12 };
+static const unsigned int pwm_ao_b_pins[] = { GPIOAO_13 };
+
+static const unsigned int i2s_am_clk_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_out_ao_clk_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_out_lr_clk_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
+static const unsigned int i2s_out_ch23_ao_pins[] = { GPIOAO_12 };
+static const unsigned int i2s_out_ch45_ao_pins[] = { GPIOAO_13 };
+static const unsigned int i2s_out_ch67_ao_pins[] = { GPIO_TEST_N };
+
+static const unsigned int spdif_out_ao_6_pins[] = { GPIOAO_6 };
+static const unsigned int spdif_out_ao_13_pins[] = { GPIOAO_13 };
+
+static const unsigned int ao_cec_pins[] = { GPIOAO_12 };
+static const unsigned int ee_cec_pins[] = { GPIOAO_12 };
static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
- GPIO_GROUP(GPIOZ_0, EE_OFF),
- GPIO_GROUP(GPIOZ_1, EE_OFF),
- GPIO_GROUP(GPIOZ_2, EE_OFF),
- GPIO_GROUP(GPIOZ_3, EE_OFF),
- GPIO_GROUP(GPIOZ_4, EE_OFF),
- GPIO_GROUP(GPIOZ_5, EE_OFF),
- GPIO_GROUP(GPIOZ_6, EE_OFF),
- GPIO_GROUP(GPIOZ_7, EE_OFF),
- GPIO_GROUP(GPIOZ_8, EE_OFF),
- GPIO_GROUP(GPIOZ_9, EE_OFF),
- GPIO_GROUP(GPIOZ_10, EE_OFF),
- GPIO_GROUP(GPIOZ_11, EE_OFF),
- GPIO_GROUP(GPIOZ_12, EE_OFF),
- GPIO_GROUP(GPIOZ_13, EE_OFF),
- GPIO_GROUP(GPIOZ_14, EE_OFF),
- GPIO_GROUP(GPIOZ_15, EE_OFF),
-
- GPIO_GROUP(GPIOH_0, EE_OFF),
- GPIO_GROUP(GPIOH_1, EE_OFF),
- GPIO_GROUP(GPIOH_2, EE_OFF),
- GPIO_GROUP(GPIOH_3, EE_OFF),
-
- GPIO_GROUP(BOOT_0, EE_OFF),
- GPIO_GROUP(BOOT_1, EE_OFF),
- GPIO_GROUP(BOOT_2, EE_OFF),
- GPIO_GROUP(BOOT_3, EE_OFF),
- GPIO_GROUP(BOOT_4, EE_OFF),
- GPIO_GROUP(BOOT_5, EE_OFF),
- GPIO_GROUP(BOOT_6, EE_OFF),
- GPIO_GROUP(BOOT_7, EE_OFF),
- GPIO_GROUP(BOOT_8, EE_OFF),
- GPIO_GROUP(BOOT_9, EE_OFF),
- GPIO_GROUP(BOOT_10, EE_OFF),
- GPIO_GROUP(BOOT_11, EE_OFF),
- GPIO_GROUP(BOOT_12, EE_OFF),
- GPIO_GROUP(BOOT_13, EE_OFF),
- GPIO_GROUP(BOOT_14, EE_OFF),
- GPIO_GROUP(BOOT_15, EE_OFF),
- GPIO_GROUP(BOOT_16, EE_OFF),
- GPIO_GROUP(BOOT_17, EE_OFF),
-
- GPIO_GROUP(CARD_0, EE_OFF),
- GPIO_GROUP(CARD_1, EE_OFF),
- GPIO_GROUP(CARD_2, EE_OFF),
- GPIO_GROUP(CARD_3, EE_OFF),
- GPIO_GROUP(CARD_4, EE_OFF),
- GPIO_GROUP(CARD_5, EE_OFF),
- GPIO_GROUP(CARD_6, EE_OFF),
-
- GPIO_GROUP(GPIODV_0, EE_OFF),
- GPIO_GROUP(GPIODV_1, EE_OFF),
- GPIO_GROUP(GPIODV_2, EE_OFF),
- GPIO_GROUP(GPIODV_3, EE_OFF),
- GPIO_GROUP(GPIODV_4, EE_OFF),
- GPIO_GROUP(GPIODV_5, EE_OFF),
- GPIO_GROUP(GPIODV_6, EE_OFF),
- GPIO_GROUP(GPIODV_7, EE_OFF),
- GPIO_GROUP(GPIODV_8, EE_OFF),
- GPIO_GROUP(GPIODV_9, EE_OFF),
- GPIO_GROUP(GPIODV_10, EE_OFF),
- GPIO_GROUP(GPIODV_11, EE_OFF),
- GPIO_GROUP(GPIODV_12, EE_OFF),
- GPIO_GROUP(GPIODV_13, EE_OFF),
- GPIO_GROUP(GPIODV_14, EE_OFF),
- GPIO_GROUP(GPIODV_15, EE_OFF),
- GPIO_GROUP(GPIODV_16, EE_OFF),
- GPIO_GROUP(GPIODV_17, EE_OFF),
- GPIO_GROUP(GPIODV_19, EE_OFF),
- GPIO_GROUP(GPIODV_20, EE_OFF),
- GPIO_GROUP(GPIODV_21, EE_OFF),
- GPIO_GROUP(GPIODV_22, EE_OFF),
- GPIO_GROUP(GPIODV_23, EE_OFF),
- GPIO_GROUP(GPIODV_24, EE_OFF),
- GPIO_GROUP(GPIODV_25, EE_OFF),
- GPIO_GROUP(GPIODV_26, EE_OFF),
- GPIO_GROUP(GPIODV_27, EE_OFF),
- GPIO_GROUP(GPIODV_28, EE_OFF),
- GPIO_GROUP(GPIODV_29, EE_OFF),
-
- GPIO_GROUP(GPIOY_0, EE_OFF),
- GPIO_GROUP(GPIOY_1, EE_OFF),
- GPIO_GROUP(GPIOY_2, EE_OFF),
- GPIO_GROUP(GPIOY_3, EE_OFF),
- GPIO_GROUP(GPIOY_4, EE_OFF),
- GPIO_GROUP(GPIOY_5, EE_OFF),
- GPIO_GROUP(GPIOY_6, EE_OFF),
- GPIO_GROUP(GPIOY_7, EE_OFF),
- GPIO_GROUP(GPIOY_8, EE_OFF),
- GPIO_GROUP(GPIOY_9, EE_OFF),
- GPIO_GROUP(GPIOY_10, EE_OFF),
- GPIO_GROUP(GPIOY_11, EE_OFF),
- GPIO_GROUP(GPIOY_12, EE_OFF),
- GPIO_GROUP(GPIOY_13, EE_OFF),
- GPIO_GROUP(GPIOY_14, EE_OFF),
- GPIO_GROUP(GPIOY_15, EE_OFF),
- GPIO_GROUP(GPIOY_16, EE_OFF),
-
- GPIO_GROUP(GPIOX_0, EE_OFF),
- GPIO_GROUP(GPIOX_1, EE_OFF),
- GPIO_GROUP(GPIOX_2, EE_OFF),
- GPIO_GROUP(GPIOX_3, EE_OFF),
- GPIO_GROUP(GPIOX_4, EE_OFF),
- GPIO_GROUP(GPIOX_5, EE_OFF),
- GPIO_GROUP(GPIOX_6, EE_OFF),
- GPIO_GROUP(GPIOX_7, EE_OFF),
- GPIO_GROUP(GPIOX_8, EE_OFF),
- GPIO_GROUP(GPIOX_9, EE_OFF),
- GPIO_GROUP(GPIOX_10, EE_OFF),
- GPIO_GROUP(GPIOX_11, EE_OFF),
- GPIO_GROUP(GPIOX_12, EE_OFF),
- GPIO_GROUP(GPIOX_13, EE_OFF),
- GPIO_GROUP(GPIOX_14, EE_OFF),
- GPIO_GROUP(GPIOX_15, EE_OFF),
- GPIO_GROUP(GPIOX_16, EE_OFF),
- GPIO_GROUP(GPIOX_17, EE_OFF),
- GPIO_GROUP(GPIOX_18, EE_OFF),
- GPIO_GROUP(GPIOX_19, EE_OFF),
- GPIO_GROUP(GPIOX_20, EE_OFF),
- GPIO_GROUP(GPIOX_21, EE_OFF),
- GPIO_GROUP(GPIOX_22, EE_OFF),
-
- GPIO_GROUP(GPIOCLK_0, EE_OFF),
- GPIO_GROUP(GPIOCLK_1, EE_OFF),
- GPIO_GROUP(GPIOCLK_2, EE_OFF),
- GPIO_GROUP(GPIOCLK_3, EE_OFF),
-
- GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOZ_15),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+
+ GPIO_GROUP(BOOT_0),
+ GPIO_GROUP(BOOT_1),
+ GPIO_GROUP(BOOT_2),
+ GPIO_GROUP(BOOT_3),
+ GPIO_GROUP(BOOT_4),
+ GPIO_GROUP(BOOT_5),
+ GPIO_GROUP(BOOT_6),
+ GPIO_GROUP(BOOT_7),
+ GPIO_GROUP(BOOT_8),
+ GPIO_GROUP(BOOT_9),
+ GPIO_GROUP(BOOT_10),
+ GPIO_GROUP(BOOT_11),
+ GPIO_GROUP(BOOT_12),
+ GPIO_GROUP(BOOT_13),
+ GPIO_GROUP(BOOT_14),
+ GPIO_GROUP(BOOT_15),
+ GPIO_GROUP(BOOT_16),
+ GPIO_GROUP(BOOT_17),
+
+ GPIO_GROUP(CARD_0),
+ GPIO_GROUP(CARD_1),
+ GPIO_GROUP(CARD_2),
+ GPIO_GROUP(CARD_3),
+ GPIO_GROUP(CARD_4),
+ GPIO_GROUP(CARD_5),
+ GPIO_GROUP(CARD_6),
+
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+ GPIO_GROUP(GPIOX_22),
+
+ GPIO_GROUP(GPIOCLK_0),
+ GPIO_GROUP(GPIOCLK_1),
+ GPIO_GROUP(GPIOCLK_2),
+ GPIO_GROUP(GPIOCLK_3),
+
+ GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
GROUP(sdio_d0, 8, 5),
@@ -522,20 +521,20 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
};
static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, 0),
- GPIO_GROUP(GPIOAO_1, 0),
- GPIO_GROUP(GPIOAO_2, 0),
- GPIO_GROUP(GPIOAO_3, 0),
- GPIO_GROUP(GPIOAO_4, 0),
- GPIO_GROUP(GPIOAO_5, 0),
- GPIO_GROUP(GPIOAO_6, 0),
- GPIO_GROUP(GPIOAO_7, 0),
- GPIO_GROUP(GPIOAO_8, 0),
- GPIO_GROUP(GPIOAO_9, 0),
- GPIO_GROUP(GPIOAO_10, 0),
- GPIO_GROUP(GPIOAO_11, 0),
- GPIO_GROUP(GPIOAO_12, 0),
- GPIO_GROUP(GPIOAO_13, 0),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
/* bank AO */
GROUP(uart_tx_ao_b, 0, 24),
@@ -565,6 +564,9 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GROUP(spdif_out_ao_13, 0, 4),
GROUP(ao_cec, 0, 15),
GROUP(ee_cec, 0, 14),
+
+ /* test n pin */
+ GROUP(i2s_out_ch67_ao, 1, 2),
};
static const char * const gpio_periphs_groups[] = {
@@ -600,8 +602,6 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
"GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", "GPIOX_19",
"GPIOX_20", "GPIOX_21", "GPIOX_22",
-
- "GPIO_TEST_N",
};
static const char * const emmc_groups[] = {
@@ -710,6 +710,8 @@ static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
"GPIOAO_10", "GPIOAO_11", "GPIOAO_12", "GPIOAO_13",
+
+ "GPIO_TEST_N",
};
static const char * const uart_ao_groups[] = {
@@ -751,6 +753,7 @@ static const char * const pwm_ao_b_groups[] = {
static const char * const i2s_out_ao_groups[] = {
"i2s_am_clk", "i2s_out_ao_clk", "i2s_out_lr_clk",
"i2s_out_ch01_ao", "i2s_out_ch23_ao", "i2s_out_ch45_ao",
+ "i2s_out_ch67_ao",
};
static const char * const spdif_out_ao_groups[] = {
@@ -806,25 +809,24 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
};
static struct meson_bank meson_gxbb_periphs_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_22, EE_OFF), 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("Y", PIN(GPIOY_0, EE_OFF), PIN(GPIOY_16, EE_OFF), 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 59, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_3, EE_OFF), 30, 33, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 14, 29, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_17, EE_OFF), 34, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_3, EE_OFF), 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_22, 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("Y", GPIOY_0, GPIOY_16, 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 59, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", GPIOH_0, GPIOH_3, 30, 33, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", GPIOZ_0, GPIOZ_15, 14, 29, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", CARD_0, CARD_6, 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", BOOT_0, BOOT_17, 34, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", GPIOCLK_0, GPIOCLK_3, 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxbb_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_13, 0), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.name = "periphs-banks",
- .pin_base = 14,
.pins = meson_gxbb_periphs_pins,
.groups = meson_gxbb_periphs_groups,
.funcs = meson_gxbb_periphs_functions,
@@ -833,11 +835,11 @@ struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxbb_periphs_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_periphs_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_periphs_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 0,
.pins = meson_gxbb_aobus_pins,
.groups = meson_gxbb_aobus_groups,
.funcs = meson_gxbb_aobus_functions,
@@ -846,4 +848,26 @@ struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxbb_aobus_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-gxbb-periphs-pinctrl",
+ .data = &meson_gxbb_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-aobus-pinctrl",
+ .data = &meson_gxbb_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_gxbb_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-gxbb-pinctrl",
+ .of_match_table = meson_gxbb_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson_gxbb_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 36c14b85fc7c..b3786cde963d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -14,408 +14,400 @@
#include <dt-bindings/gpio/meson-gxl-gpio.h>
#include "pinctrl-meson.h"
-
-#define EE_OFF 10
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = {
- MESON_PIN(GPIOZ_0, EE_OFF),
- MESON_PIN(GPIOZ_1, EE_OFF),
- MESON_PIN(GPIOZ_2, EE_OFF),
- MESON_PIN(GPIOZ_3, EE_OFF),
- MESON_PIN(GPIOZ_4, EE_OFF),
- MESON_PIN(GPIOZ_5, EE_OFF),
- MESON_PIN(GPIOZ_6, EE_OFF),
- MESON_PIN(GPIOZ_7, EE_OFF),
- MESON_PIN(GPIOZ_8, EE_OFF),
- MESON_PIN(GPIOZ_9, EE_OFF),
- MESON_PIN(GPIOZ_10, EE_OFF),
- MESON_PIN(GPIOZ_11, EE_OFF),
- MESON_PIN(GPIOZ_12, EE_OFF),
- MESON_PIN(GPIOZ_13, EE_OFF),
- MESON_PIN(GPIOZ_14, EE_OFF),
- MESON_PIN(GPIOZ_15, EE_OFF),
-
- MESON_PIN(GPIOH_0, EE_OFF),
- MESON_PIN(GPIOH_1, EE_OFF),
- MESON_PIN(GPIOH_2, EE_OFF),
- MESON_PIN(GPIOH_3, EE_OFF),
- MESON_PIN(GPIOH_4, EE_OFF),
- MESON_PIN(GPIOH_5, EE_OFF),
- MESON_PIN(GPIOH_6, EE_OFF),
- MESON_PIN(GPIOH_7, EE_OFF),
- MESON_PIN(GPIOH_8, EE_OFF),
- MESON_PIN(GPIOH_9, EE_OFF),
-
- MESON_PIN(BOOT_0, EE_OFF),
- MESON_PIN(BOOT_1, EE_OFF),
- MESON_PIN(BOOT_2, EE_OFF),
- MESON_PIN(BOOT_3, EE_OFF),
- MESON_PIN(BOOT_4, EE_OFF),
- MESON_PIN(BOOT_5, EE_OFF),
- MESON_PIN(BOOT_6, EE_OFF),
- MESON_PIN(BOOT_7, EE_OFF),
- MESON_PIN(BOOT_8, EE_OFF),
- MESON_PIN(BOOT_9, EE_OFF),
- MESON_PIN(BOOT_10, EE_OFF),
- MESON_PIN(BOOT_11, EE_OFF),
- MESON_PIN(BOOT_12, EE_OFF),
- MESON_PIN(BOOT_13, EE_OFF),
- MESON_PIN(BOOT_14, EE_OFF),
- MESON_PIN(BOOT_15, EE_OFF),
-
- MESON_PIN(CARD_0, EE_OFF),
- MESON_PIN(CARD_1, EE_OFF),
- MESON_PIN(CARD_2, EE_OFF),
- MESON_PIN(CARD_3, EE_OFF),
- MESON_PIN(CARD_4, EE_OFF),
- MESON_PIN(CARD_5, EE_OFF),
- MESON_PIN(CARD_6, EE_OFF),
-
- MESON_PIN(GPIODV_0, EE_OFF),
- MESON_PIN(GPIODV_1, EE_OFF),
- MESON_PIN(GPIODV_2, EE_OFF),
- MESON_PIN(GPIODV_3, EE_OFF),
- MESON_PIN(GPIODV_4, EE_OFF),
- MESON_PIN(GPIODV_5, EE_OFF),
- MESON_PIN(GPIODV_6, EE_OFF),
- MESON_PIN(GPIODV_7, EE_OFF),
- MESON_PIN(GPIODV_8, EE_OFF),
- MESON_PIN(GPIODV_9, EE_OFF),
- MESON_PIN(GPIODV_10, EE_OFF),
- MESON_PIN(GPIODV_11, EE_OFF),
- MESON_PIN(GPIODV_12, EE_OFF),
- MESON_PIN(GPIODV_13, EE_OFF),
- MESON_PIN(GPIODV_14, EE_OFF),
- MESON_PIN(GPIODV_15, EE_OFF),
- MESON_PIN(GPIODV_16, EE_OFF),
- MESON_PIN(GPIODV_17, EE_OFF),
- MESON_PIN(GPIODV_18, EE_OFF),
- MESON_PIN(GPIODV_19, EE_OFF),
- MESON_PIN(GPIODV_20, EE_OFF),
- MESON_PIN(GPIODV_21, EE_OFF),
- MESON_PIN(GPIODV_22, EE_OFF),
- MESON_PIN(GPIODV_23, EE_OFF),
- MESON_PIN(GPIODV_24, EE_OFF),
- MESON_PIN(GPIODV_25, EE_OFF),
- MESON_PIN(GPIODV_26, EE_OFF),
- MESON_PIN(GPIODV_27, EE_OFF),
- MESON_PIN(GPIODV_28, EE_OFF),
- MESON_PIN(GPIODV_29, EE_OFF),
-
- MESON_PIN(GPIOX_0, EE_OFF),
- MESON_PIN(GPIOX_1, EE_OFF),
- MESON_PIN(GPIOX_2, EE_OFF),
- MESON_PIN(GPIOX_3, EE_OFF),
- MESON_PIN(GPIOX_4, EE_OFF),
- MESON_PIN(GPIOX_5, EE_OFF),
- MESON_PIN(GPIOX_6, EE_OFF),
- MESON_PIN(GPIOX_7, EE_OFF),
- MESON_PIN(GPIOX_8, EE_OFF),
- MESON_PIN(GPIOX_9, EE_OFF),
- MESON_PIN(GPIOX_10, EE_OFF),
- MESON_PIN(GPIOX_11, EE_OFF),
- MESON_PIN(GPIOX_12, EE_OFF),
- MESON_PIN(GPIOX_13, EE_OFF),
- MESON_PIN(GPIOX_14, EE_OFF),
- MESON_PIN(GPIOX_15, EE_OFF),
- MESON_PIN(GPIOX_16, EE_OFF),
- MESON_PIN(GPIOX_17, EE_OFF),
- MESON_PIN(GPIOX_18, EE_OFF),
-
- MESON_PIN(GPIOCLK_0, EE_OFF),
- MESON_PIN(GPIOCLK_1, EE_OFF),
-
- MESON_PIN(GPIO_TEST_N, EE_OFF),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(GPIOZ_15),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+
+ MESON_PIN(GPIOCLK_0),
+ MESON_PIN(GPIOCLK_1),
};
static const unsigned int emmc_nand_d07_pins[] = {
- PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
- PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
- PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
-};
-static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int spi_mosi_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int spi_miso_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int spi_ss0_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int spi_sclk_pins[] = { PIN(GPIOX_11, EE_OFF) };
-
-static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
-static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
-static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
-static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
-static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
-static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
-
-static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
-static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
-static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
-static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
-static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
-static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
-static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
-static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
-
-static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
-static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
-
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
-
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-
-static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int i2c_sck_c_dv19_pins[] = { PIN(GPIODV_19, EE_OFF) };
-static const unsigned int i2c_sda_c_dv18_pins[] = { PIN(GPIODV_18, EE_OFF) };
-
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
-static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int pwm_a_pins[] = { PIN(GPIOX_6, EE_OFF) };
-
-static const unsigned int pwm_b_pins[] = { PIN(GPIODV_29, EE_OFF) };
-
-static const unsigned int pwm_c_pins[] = { PIN(GPIOZ_15, EE_OFF) };
-
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_16, EE_OFF) };
-
-static const unsigned int pwm_f_clk_pins[] = { PIN(GPIOCLK_1, EE_OFF) };
-static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
-
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOH_6, EE_OFF) };
-static const unsigned int i2s_out_ao_clk_pins[] = { PIN(GPIOH_7, EE_OFF) };
-static const unsigned int i2s_out_lr_clk_pins[] = { PIN(GPIOH_8, EE_OFF) };
-static const unsigned int i2s_out_ch01_pins[] = { PIN(GPIOH_9, EE_OFF) };
-static const unsigned int i2s_out_ch23_z_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int i2s_out_ch45_z_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int i2s_out_ch67_z_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-
-static const unsigned int spdif_out_h_pins[] = { PIN(GPIOH_4, EE_OFF) };
-
-static const unsigned int eth_link_led_pins[] = { PIN(GPIOZ_14, EE_OFF) };
-static const unsigned int eth_act_led_pins[] = { PIN(GPIOZ_15, EE_OFF) };
-
-static const unsigned int tsin_a_d0_pins[] = { PIN(GPIODV_0, EE_OFF) };
-static const unsigned int tsin_a_d0_x_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int tsin_a_clk_pins[] = { PIN(GPIODV_8, EE_OFF) };
-static const unsigned int tsin_a_clk_x_pins[] = { PIN(GPIOX_11, EE_OFF) };
-static const unsigned int tsin_a_sop_pins[] = { PIN(GPIODV_9, EE_OFF) };
-static const unsigned int tsin_a_sop_x_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int tsin_a_d_valid_pins[] = { PIN(GPIODV_10, EE_OFF) };
-static const unsigned int tsin_a_d_valid_x_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int tsin_a_fail_pins[] = { PIN(GPIODV_11, EE_OFF) };
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7,
+};
+static const unsigned int emmc_clk_pins[] = { BOOT_8 };
+static const unsigned int emmc_cmd_pins[] = { BOOT_10 };
+static const unsigned int emmc_ds_pins[] = { BOOT_15 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_15 };
+
+static const unsigned int spi_mosi_pins[] = { GPIOX_8 };
+static const unsigned int spi_miso_pins[] = { GPIOX_9 };
+static const unsigned int spi_ss0_pins[] = { GPIOX_10 };
+static const unsigned int spi_sclk_pins[] = { GPIOX_11 };
+
+static const unsigned int sdcard_d0_pins[] = { CARD_1 };
+static const unsigned int sdcard_d1_pins[] = { CARD_0 };
+static const unsigned int sdcard_d2_pins[] = { CARD_5 };
+static const unsigned int sdcard_d3_pins[] = { CARD_4 };
+static const unsigned int sdcard_cmd_pins[] = { CARD_3 };
+static const unsigned int sdcard_clk_pins[] = { CARD_2 };
+
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
+static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
+
+static const unsigned int nand_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_wr_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+static const unsigned int uart_tx_c_pins[] = { GPIOX_8 };
+static const unsigned int uart_rx_c_pins[] = { GPIOX_9 };
+static const unsigned int uart_cts_c_pins[] = { GPIOX_10 };
+static const unsigned int uart_rts_c_pins[] = { GPIOX_11 };
+
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+
+static const unsigned int i2c_sck_b_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_b_pins[] = { GPIODV_26 };
+
+static const unsigned int i2c_sck_c_pins[] = { GPIODV_29 };
+static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
+
+static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 };
+static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 };
+
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_13 };
+
+static const unsigned int pwm_a_pins[] = { GPIOX_6 };
+
+static const unsigned int pwm_b_pins[] = { GPIODV_29 };
+
+static const unsigned int pwm_c_pins[] = { GPIOZ_15 };
+
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+
+static const unsigned int pwm_e_pins[] = { GPIOX_16 };
+
+static const unsigned int pwm_f_clk_pins[] = { GPIOCLK_1 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_7 };
+
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+
+static const unsigned int i2s_am_clk_pins[] = { GPIOH_6 };
+static const unsigned int i2s_out_ao_clk_pins[] = { GPIOH_7 };
+static const unsigned int i2s_out_lr_clk_pins[] = { GPIOH_8 };
+static const unsigned int i2s_out_ch01_pins[] = { GPIOH_9 };
+static const unsigned int i2s_out_ch23_z_pins[] = { GPIOZ_5 };
+static const unsigned int i2s_out_ch45_z_pins[] = { GPIOZ_6 };
+static const unsigned int i2s_out_ch67_z_pins[] = { GPIOZ_7 };
+
+static const unsigned int spdif_out_h_pins[] = { GPIOH_4 };
+
+static const unsigned int eth_link_led_pins[] = { GPIOZ_14 };
+static const unsigned int eth_act_led_pins[] = { GPIOZ_15 };
+
+static const unsigned int tsin_a_d0_pins[] = { GPIODV_0 };
+static const unsigned int tsin_a_d0_x_pins[] = { GPIOX_10 };
+static const unsigned int tsin_a_clk_pins[] = { GPIODV_8 };
+static const unsigned int tsin_a_clk_x_pins[] = { GPIOX_11 };
+static const unsigned int tsin_a_sop_pins[] = { GPIODV_9 };
+static const unsigned int tsin_a_sop_x_pins[] = { GPIOX_8 };
+static const unsigned int tsin_a_d_valid_pins[] = { GPIODV_10 };
+static const unsigned int tsin_a_d_valid_x_pins[] = { GPIOX_9 };
+static const unsigned int tsin_a_fail_pins[] = { GPIODV_11 };
static const unsigned int tsin_a_dp_pins[] = {
- PIN(GPIODV_1, EE_OFF),
- PIN(GPIODV_2, EE_OFF),
- PIN(GPIODV_3, EE_OFF),
- PIN(GPIODV_4, EE_OFF),
- PIN(GPIODV_5, EE_OFF),
- PIN(GPIODV_6, EE_OFF),
- PIN(GPIODV_7, EE_OFF),
+ GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5, GPIODV_6, GPIODV_7,
};
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, 0),
- MESON_PIN(GPIOAO_1, 0),
- MESON_PIN(GPIOAO_2, 0),
- MESON_PIN(GPIOAO_3, 0),
- MESON_PIN(GPIOAO_4, 0),
- MESON_PIN(GPIOAO_5, 0),
- MESON_PIN(GPIOAO_6, 0),
- MESON_PIN(GPIOAO_7, 0),
- MESON_PIN(GPIOAO_8, 0),
- MESON_PIN(GPIOAO_9, 0),
-};
-
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_tx_ao_b_0_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_1_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
-
-static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-
-static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
-
-static const unsigned int pwm_ao_a_3_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int pwm_ao_a_8_pins[] = { PIN(GPIOAO_8, 0) };
-
-static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_9, 0) };
-static const unsigned int pwm_ao_b_6_pins[] = { PIN(GPIOAO_6, 0) };
-
-static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_9, 0) };
-
-static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int spdif_out_ao_9_pins[] = { PIN(GPIOAO_9, 0) };
-
-static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_8, 0) };
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+
+ MESON_PIN(GPIO_TEST_N),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_tx_ao_b_0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b_1_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b_pins[] = { GPIOAO_5 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+
+static const unsigned int i2c_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = {GPIOAO_5 };
+static const unsigned int i2c_slave_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = {GPIOAO_5 };
+
+static const unsigned int remote_input_ao_pins[] = {GPIOAO_7 };
+
+static const unsigned int pwm_ao_a_3_pins[] = { GPIOAO_3 };
+static const unsigned int pwm_ao_a_8_pins[] = { GPIOAO_8 };
+
+static const unsigned int pwm_ao_b_pins[] = { GPIOAO_9 };
+static const unsigned int pwm_ao_b_6_pins[] = { GPIOAO_6 };
+
+static const unsigned int i2s_out_ch23_ao_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_out_ch45_ao_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_out_ch67_ao_pins[] = { GPIO_TEST_N };
+
+static const unsigned int spdif_out_ao_6_pins[] = { GPIOAO_6 };
+static const unsigned int spdif_out_ao_9_pins[] = { GPIOAO_9 };
+
+static const unsigned int ao_cec_pins[] = { GPIOAO_8 };
+static const unsigned int ee_cec_pins[] = { GPIOAO_8 };
static struct meson_pmx_group meson_gxl_periphs_groups[] = {
- GPIO_GROUP(GPIOZ_0, EE_OFF),
- GPIO_GROUP(GPIOZ_1, EE_OFF),
- GPIO_GROUP(GPIOZ_2, EE_OFF),
- GPIO_GROUP(GPIOZ_3, EE_OFF),
- GPIO_GROUP(GPIOZ_4, EE_OFF),
- GPIO_GROUP(GPIOZ_5, EE_OFF),
- GPIO_GROUP(GPIOZ_6, EE_OFF),
- GPIO_GROUP(GPIOZ_7, EE_OFF),
- GPIO_GROUP(GPIOZ_8, EE_OFF),
- GPIO_GROUP(GPIOZ_9, EE_OFF),
- GPIO_GROUP(GPIOZ_10, EE_OFF),
- GPIO_GROUP(GPIOZ_11, EE_OFF),
- GPIO_GROUP(GPIOZ_12, EE_OFF),
- GPIO_GROUP(GPIOZ_13, EE_OFF),
- GPIO_GROUP(GPIOZ_14, EE_OFF),
- GPIO_GROUP(GPIOZ_15, EE_OFF),
-
- GPIO_GROUP(GPIOH_0, EE_OFF),
- GPIO_GROUP(GPIOH_1, EE_OFF),
- GPIO_GROUP(GPIOH_2, EE_OFF),
- GPIO_GROUP(GPIOH_3, EE_OFF),
- GPIO_GROUP(GPIOH_4, EE_OFF),
- GPIO_GROUP(GPIOH_5, EE_OFF),
- GPIO_GROUP(GPIOH_6, EE_OFF),
- GPIO_GROUP(GPIOH_7, EE_OFF),
- GPIO_GROUP(GPIOH_8, EE_OFF),
- GPIO_GROUP(GPIOH_9, EE_OFF),
-
- GPIO_GROUP(BOOT_0, EE_OFF),
- GPIO_GROUP(BOOT_1, EE_OFF),
- GPIO_GROUP(BOOT_2, EE_OFF),
- GPIO_GROUP(BOOT_3, EE_OFF),
- GPIO_GROUP(BOOT_4, EE_OFF),
- GPIO_GROUP(BOOT_5, EE_OFF),
- GPIO_GROUP(BOOT_6, EE_OFF),
- GPIO_GROUP(BOOT_7, EE_OFF),
- GPIO_GROUP(BOOT_8, EE_OFF),
- GPIO_GROUP(BOOT_9, EE_OFF),
- GPIO_GROUP(BOOT_10, EE_OFF),
- GPIO_GROUP(BOOT_11, EE_OFF),
- GPIO_GROUP(BOOT_12, EE_OFF),
- GPIO_GROUP(BOOT_13, EE_OFF),
- GPIO_GROUP(BOOT_14, EE_OFF),
- GPIO_GROUP(BOOT_15, EE_OFF),
-
- GPIO_GROUP(CARD_0, EE_OFF),
- GPIO_GROUP(CARD_1, EE_OFF),
- GPIO_GROUP(CARD_2, EE_OFF),
- GPIO_GROUP(CARD_3, EE_OFF),
- GPIO_GROUP(CARD_4, EE_OFF),
- GPIO_GROUP(CARD_5, EE_OFF),
- GPIO_GROUP(CARD_6, EE_OFF),
-
- GPIO_GROUP(GPIODV_0, EE_OFF),
- GPIO_GROUP(GPIODV_1, EE_OFF),
- GPIO_GROUP(GPIODV_2, EE_OFF),
- GPIO_GROUP(GPIODV_3, EE_OFF),
- GPIO_GROUP(GPIODV_4, EE_OFF),
- GPIO_GROUP(GPIODV_5, EE_OFF),
- GPIO_GROUP(GPIODV_6, EE_OFF),
- GPIO_GROUP(GPIODV_7, EE_OFF),
- GPIO_GROUP(GPIODV_8, EE_OFF),
- GPIO_GROUP(GPIODV_9, EE_OFF),
- GPIO_GROUP(GPIODV_10, EE_OFF),
- GPIO_GROUP(GPIODV_11, EE_OFF),
- GPIO_GROUP(GPIODV_12, EE_OFF),
- GPIO_GROUP(GPIODV_13, EE_OFF),
- GPIO_GROUP(GPIODV_14, EE_OFF),
- GPIO_GROUP(GPIODV_15, EE_OFF),
- GPIO_GROUP(GPIODV_16, EE_OFF),
- GPIO_GROUP(GPIODV_17, EE_OFF),
- GPIO_GROUP(GPIODV_19, EE_OFF),
- GPIO_GROUP(GPIODV_20, EE_OFF),
- GPIO_GROUP(GPIODV_21, EE_OFF),
- GPIO_GROUP(GPIODV_22, EE_OFF),
- GPIO_GROUP(GPIODV_23, EE_OFF),
- GPIO_GROUP(GPIODV_24, EE_OFF),
- GPIO_GROUP(GPIODV_25, EE_OFF),
- GPIO_GROUP(GPIODV_26, EE_OFF),
- GPIO_GROUP(GPIODV_27, EE_OFF),
- GPIO_GROUP(GPIODV_28, EE_OFF),
- GPIO_GROUP(GPIODV_29, EE_OFF),
-
- GPIO_GROUP(GPIOX_0, EE_OFF),
- GPIO_GROUP(GPIOX_1, EE_OFF),
- GPIO_GROUP(GPIOX_2, EE_OFF),
- GPIO_GROUP(GPIOX_3, EE_OFF),
- GPIO_GROUP(GPIOX_4, EE_OFF),
- GPIO_GROUP(GPIOX_5, EE_OFF),
- GPIO_GROUP(GPIOX_6, EE_OFF),
- GPIO_GROUP(GPIOX_7, EE_OFF),
- GPIO_GROUP(GPIOX_8, EE_OFF),
- GPIO_GROUP(GPIOX_9, EE_OFF),
- GPIO_GROUP(GPIOX_10, EE_OFF),
- GPIO_GROUP(GPIOX_11, EE_OFF),
- GPIO_GROUP(GPIOX_12, EE_OFF),
- GPIO_GROUP(GPIOX_13, EE_OFF),
- GPIO_GROUP(GPIOX_14, EE_OFF),
- GPIO_GROUP(GPIOX_15, EE_OFF),
- GPIO_GROUP(GPIOX_16, EE_OFF),
- GPIO_GROUP(GPIOX_17, EE_OFF),
- GPIO_GROUP(GPIOX_18, EE_OFF),
-
- GPIO_GROUP(GPIOCLK_0, EE_OFF),
- GPIO_GROUP(GPIOCLK_1, EE_OFF),
-
- GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOZ_15),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+
+ GPIO_GROUP(BOOT_0),
+ GPIO_GROUP(BOOT_1),
+ GPIO_GROUP(BOOT_2),
+ GPIO_GROUP(BOOT_3),
+ GPIO_GROUP(BOOT_4),
+ GPIO_GROUP(BOOT_5),
+ GPIO_GROUP(BOOT_6),
+ GPIO_GROUP(BOOT_7),
+ GPIO_GROUP(BOOT_8),
+ GPIO_GROUP(BOOT_9),
+ GPIO_GROUP(BOOT_10),
+ GPIO_GROUP(BOOT_11),
+ GPIO_GROUP(BOOT_12),
+ GPIO_GROUP(BOOT_13),
+ GPIO_GROUP(BOOT_14),
+ GPIO_GROUP(BOOT_15),
+
+ GPIO_GROUP(CARD_0),
+ GPIO_GROUP(CARD_1),
+ GPIO_GROUP(CARD_2),
+ GPIO_GROUP(CARD_3),
+ GPIO_GROUP(CARD_4),
+ GPIO_GROUP(CARD_5),
+ GPIO_GROUP(CARD_6),
+
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+
+ GPIO_GROUP(GPIOCLK_0),
+ GPIO_GROUP(GPIOCLK_1),
+
+ GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
GROUP(sdio_d0, 5, 31),
@@ -530,16 +522,16 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
};
static struct meson_pmx_group meson_gxl_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, 0),
- GPIO_GROUP(GPIOAO_1, 0),
- GPIO_GROUP(GPIOAO_2, 0),
- GPIO_GROUP(GPIOAO_3, 0),
- GPIO_GROUP(GPIOAO_4, 0),
- GPIO_GROUP(GPIOAO_5, 0),
- GPIO_GROUP(GPIOAO_6, 0),
- GPIO_GROUP(GPIOAO_7, 0),
- GPIO_GROUP(GPIOAO_8, 0),
- GPIO_GROUP(GPIOAO_9, 0),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
/* bank AO */
GROUP(uart_tx_ao_b_0, 0, 26),
@@ -567,6 +559,9 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GROUP(spdif_out_ao_9, 0, 4),
GROUP(ao_cec, 0, 15),
GROUP(ee_cec, 0, 14),
+
+ /* test n pin */
+ GROUP(i2s_out_ch67_ao, 1, 2),
};
static const char * const gpio_periphs_groups[] = {
@@ -597,8 +592,6 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
"GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
"GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18",
-
- "GPIO_TEST_N",
};
static const char * const emmc_groups[] = {
@@ -713,6 +706,8 @@ static const char * const tsin_a_groups[] = {
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
+
+ "GPIO_TEST_N",
};
static const char * const uart_ao_groups[] = {
@@ -745,7 +740,7 @@ static const char * const pwm_ao_b_groups[] = {
};
static const char * const i2s_out_ao_groups[] = {
- "i2s_out_ch23_ao", "i2s_out_ch45_ao",
+ "i2s_out_ch23_ao", "i2s_out_ch45_ao", "i2s_out_ch67_ao",
};
static const char * const spdif_out_ao_groups[] = {
@@ -800,24 +795,23 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
};
static struct meson_bank meson_gxl_periphs_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_18, EE_OFF), 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_9, EE_OFF), 26, 35, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 10, 25, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_15, EE_OFF), 36, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_1, EE_OFF), 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_18, 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 26, 35, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", GPIOZ_0, GPIOZ_15, 10, 25, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", CARD_0, CARD_6, 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", BOOT_0, BOOT_15, 36, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", GPIOCLK_0, GPIOCLK_1, 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxl_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_9, 0), 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.name = "periphs-banks",
- .pin_base = 10,
.pins = meson_gxl_periphs_pins,
.groups = meson_gxl_periphs_groups,
.funcs = meson_gxl_periphs_functions,
@@ -826,11 +820,11 @@ struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxl_periphs_groups),
.num_funcs = ARRAY_SIZE(meson_gxl_periphs_functions),
.num_banks = ARRAY_SIZE(meson_gxl_periphs_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 0,
.pins = meson_gxl_aobus_pins,
.groups = meson_gxl_aobus_groups,
.funcs = meson_gxl_aobus_functions,
@@ -839,4 +833,26 @@ struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxl_aobus_groups),
.num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-gxl-periphs-pinctrl",
+ .data = &meson_gxl_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxl-aobus-pinctrl",
+ .data = &meson_gxl_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_gxl_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-gxl-pinctrl",
+ .of_match_table = meson_gxl_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson_gxl_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 66ed70c12733..29a458da78db 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -31,10 +31,6 @@
* In some cases the register ranges for pull enable and pull
* direction are the same and thus there are only 3 register ranges.
*
- * Every pinmux group can be enabled by a specific bit in the first
- * register range; when all groups for a given pin are disabled the
- * pin acts as a GPIO.
- *
* For the pull and GPIO configuration every bank uses a contiguous
* set of bits in the register sets described above; the same register
* can be shared by more banks with different offsets.
@@ -50,6 +46,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -147,94 +144,24 @@ static const struct pinctrl_ops meson_pctrl_ops = {
.pin_dbg_show = meson_pin_dbg_show,
};
-/**
- * meson_pmx_disable_other_groups() - disable other groups using a given pin
- *
- * @pc: meson pin controller device
- * @pin: number of the pin
- * @sel_group: index of the selected group, or -1 if none
- *
- * The function disables all pinmux groups using a pin except the
- * selected one. If @sel_group is -1 all groups are disabled, leaving
- * the pin in GPIO mode.
- */
-static void meson_pmx_disable_other_groups(struct meson_pinctrl *pc,
- unsigned int pin, int sel_group)
-{
- struct meson_pmx_group *group;
- int i, j;
-
- for (i = 0; i < pc->data->num_groups; i++) {
- group = &pc->data->groups[i];
- if (group->is_gpio || i == sel_group)
- continue;
-
- for (j = 0; j < group->num_pins; j++) {
- if (group->pins[j] == pin) {
- /* We have found a group using the pin */
- regmap_update_bits(pc->reg_mux,
- group->reg * 4,
- BIT(group->bit), 0);
- }
- }
- }
-}
-
-static int meson_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
- unsigned group_num)
-{
- struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_func *func = &pc->data->funcs[func_num];
- struct meson_pmx_group *group = &pc->data->groups[group_num];
- int i, ret = 0;
-
- dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
- group->name);
-
- /*
- * Disable groups using the same pin.
- * The selected group is not disabled to avoid glitches.
- */
- for (i = 0; i < group->num_pins; i++)
- meson_pmx_disable_other_groups(pc, group->pins[i], group_num);
-
- /* Function 0 (GPIO) doesn't need any additional setting */
- if (func_num)
- ret = regmap_update_bits(pc->reg_mux, group->reg * 4,
- BIT(group->bit), BIT(group->bit));
-
- return ret;
-}
-
-static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
- struct pinctrl_gpio_range *range,
- unsigned offset)
-{
- struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
-
- meson_pmx_disable_other_groups(pc, offset, -1);
-
- return 0;
-}
-
-static int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
+int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
return pc->data->num_funcs;
}
-static const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
- unsigned selector)
+const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
+ unsigned selector)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
return pc->data->funcs[selector].name;
}
-static int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
+int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
@@ -244,14 +171,6 @@ static int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
return 0;
}
-static const struct pinmux_ops meson_pmx_ops = {
- .set_mux = meson_pmx_set_mux,
- .get_functions_count = meson_pmx_get_funcs_count,
- .get_function_name = meson_pmx_get_func_name,
- .get_function_groups = meson_pmx_get_groups,
- .gpio_request_enable = meson_pmx_request_gpio,
-};
-
static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
unsigned long *configs, unsigned num_configs)
{
@@ -399,7 +318,7 @@ static int meson_pinconf_group_set(struct pinctrl_dev *pcdev,
static int meson_pinconf_group_get(struct pinctrl_dev *pcdev,
unsigned int group, unsigned long *config)
{
- return -ENOSYS;
+ return -ENOTSUPP;
}
static const struct pinconf_ops meson_pinconf_ops = {
@@ -410,31 +329,18 @@ static const struct pinconf_ops meson_pinconf_ops = {
.is_generic = true,
};
-static int meson_gpio_request(struct gpio_chip *chip, unsigned gpio)
-{
- return pinctrl_request_gpio(chip->base + gpio);
-}
-
-static void meson_gpio_free(struct gpio_chip *chip, unsigned gpio)
-{
- struct meson_pinctrl *pc = gpiochip_get_data(chip);
-
- pinctrl_free_gpio(pc->data->pin_base + gpio);
-}
-
static int meson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_DIR, &reg, &bit);
return regmap_update_bits(pc->reg_gpio, reg, BIT(bit), BIT(bit));
}
@@ -443,21 +349,20 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_DIR, &reg, &bit);
ret = regmap_update_bits(pc->reg_gpio, reg, BIT(bit), 0);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_OUT, &reg, &bit);
return regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
@@ -465,16 +370,15 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return;
- meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_OUT, &reg, &bit);
regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
@@ -482,70 +386,33 @@ static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, val, pin;
+ unsigned int reg, bit, val;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_IN, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_IN, &reg, &bit);
regmap_read(pc->reg_gpio, reg, &val);
return !!(val & BIT(bit));
}
-static const struct of_device_id meson_pinctrl_dt_match[] = {
- {
- .compatible = "amlogic,meson8-cbus-pinctrl",
- .data = &meson8_cbus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8b-cbus-pinctrl",
- .data = &meson8b_cbus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8-aobus-pinctrl",
- .data = &meson8_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8b-aobus-pinctrl",
- .data = &meson8b_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxbb-periphs-pinctrl",
- .data = &meson_gxbb_periphs_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxbb-aobus-pinctrl",
- .data = &meson_gxbb_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxl-periphs-pinctrl",
- .data = &meson_gxl_periphs_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxl-aobus-pinctrl",
- .data = &meson_gxl_aobus_pinctrl_data,
- },
- { },
-};
-
static int meson_gpiolib_register(struct meson_pinctrl *pc)
{
int ret;
pc->chip.label = pc->data->name;
pc->chip.parent = pc->dev;
- pc->chip.request = meson_gpio_request;
- pc->chip.free = meson_gpio_free;
+ pc->chip.request = gpiochip_generic_request;
+ pc->chip.free = gpiochip_generic_free;
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
pc->chip.set = meson_gpio_set;
- pc->chip.base = pc->data->pin_base;
+ pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
pc->chip.of_node = pc->of_node;
@@ -640,9 +507,8 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
return 0;
}
-static int meson_pinctrl_probe(struct platform_device *pdev)
+int meson_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct meson_pinctrl *pc;
int ret;
@@ -652,17 +518,16 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
pc->dev = dev;
- match = of_match_node(meson_pinctrl_dt_match, pdev->dev.of_node);
- pc->data = (struct meson_pinctrl_data *) match->data;
+ pc->data = (struct meson_pinctrl_data *) of_device_get_match_data(dev);
- ret = meson_pinctrl_parse_dt(pc, pdev->dev.of_node);
+ ret = meson_pinctrl_parse_dt(pc, dev->of_node);
if (ret)
return ret;
pc->desc.name = "pinctrl-meson";
pc->desc.owner = THIS_MODULE;
pc->desc.pctlops = &meson_pctrl_ops;
- pc->desc.pmxops = &meson_pmx_ops;
+ pc->desc.pmxops = pc->data->pmx_ops;
pc->desc.confops = &meson_pinconf_ops;
pc->desc.pins = pc->data->pins;
pc->desc.npins = pc->data->num_pins;
@@ -675,12 +540,3 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
return meson_gpiolib_register(pc);
}
-
-static struct platform_driver meson_pinctrl_driver = {
- .probe = meson_pinctrl_probe,
- .driver = {
- .name = "meson-pinctrl",
- .of_match_table = meson_pinctrl_dt_match,
- },
-};
-builtin_platform_driver(meson_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 890f296f5840..183b6e471635 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -13,6 +13,7 @@
#include <linux/gpio.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
@@ -31,9 +32,7 @@ struct meson_pmx_group {
const char *name;
const unsigned int *pins;
unsigned int num_pins;
- bool is_gpio;
- unsigned int reg;
- unsigned int bit;
+ const void *data;
};
/**
@@ -103,12 +102,12 @@ struct meson_pinctrl_data {
const struct pinctrl_pin_desc *pins;
struct meson_pmx_group *groups;
struct meson_pmx_func *funcs;
- unsigned int pin_base;
unsigned int num_pins;
unsigned int num_groups;
unsigned int num_funcs;
struct meson_bank *banks;
unsigned int num_banks;
+ const struct pinmux_ops *pmx_ops;
};
struct meson_pinctrl {
@@ -124,25 +123,6 @@ struct meson_pinctrl {
struct device_node *of_node;
};
-#define PIN(x, b) (b + x)
-
-#define GROUP(grp, r, b) \
- { \
- .name = #grp, \
- .pins = grp ## _pins, \
- .num_pins = ARRAY_SIZE(grp ## _pins), \
- .reg = r, \
- .bit = b, \
- }
-
-#define GPIO_GROUP(gpio, b) \
- { \
- .name = #gpio, \
- .pins = (const unsigned int[]){ PIN(gpio, b) }, \
- .num_pins = 1, \
- .is_gpio = true, \
- }
-
#define FUNCTION(fn) \
{ \
.name = #fn, \
@@ -166,13 +146,16 @@ struct meson_pinctrl {
}, \
}
-#define MESON_PIN(x, b) PINCTRL_PIN(PIN(x, b), #x)
+#define MESON_PIN(x) PINCTRL_PIN(x, #x)
+
+/* Common pmx functions */
+int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev);
+const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
+ unsigned selector);
+int meson_pmx_get_groups(struct pinctrl_dev *pcdev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups);
-extern struct meson_pinctrl_data meson8_cbus_pinctrl_data;
-extern struct meson_pinctrl_data meson8_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data;
+/* Common probe function */
+int meson_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
new file mode 100644
index 000000000000..b93b058c8a07
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
@@ -0,0 +1,108 @@
+/*
+ * First generation of pinmux driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2017 Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For this first generation of pinctrl driver every pinmux group can be
+ * enabled by a specific bit in the first register range. When all groups for
+ * a given pin are disabled the pin acts as a GPIO.
+ */
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-meson.h"
+#include "pinctrl-meson8-pmx.h"
+
+/**
+ * meson8_pmx_disable_other_groups() - disable other groups using a given pin
+ *
+ * @pc: meson pin controller device
+ * @pin: number of the pin
+ * @sel_group: index of the selected group, or -1 if none
+ *
+ * The function disables all pinmux groups using a pin except the
+ * selected one. If @sel_group is -1 all groups are disabled, leaving
+ * the pin in GPIO mode.
+ */
+static void meson8_pmx_disable_other_groups(struct meson_pinctrl *pc,
+ unsigned int pin, int sel_group)
+{
+ struct meson_pmx_group *group;
+ struct meson8_pmx_data *pmx_data;
+ int i, j;
+
+ for (i = 0; i < pc->data->num_groups; i++) {
+ group = &pc->data->groups[i];
+ pmx_data = (struct meson8_pmx_data *)group->data;
+ if (pmx_data->is_gpio || i == sel_group)
+ continue;
+
+ for (j = 0; j < group->num_pins; j++) {
+ if (group->pins[j] == pin) {
+ /* We have found a group using the pin */
+ regmap_update_bits(pc->reg_mux,
+ pmx_data->reg * 4,
+ BIT(pmx_data->bit), 0);
+ }
+ }
+ }
+}
+
+static int meson8_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
+ unsigned group_num)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+ struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ struct meson_pmx_group *group = &pc->data->groups[group_num];
+ struct meson8_pmx_data *pmx_data =
+ (struct meson8_pmx_data *)group->data;
+ int i, ret = 0;
+
+ dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
+ group->name);
+
+ /*
+ * Disable groups using the same pin.
+ * The selected group is not disabled to avoid glitches.
+ */
+ for (i = 0; i < group->num_pins; i++)
+ meson8_pmx_disable_other_groups(pc, group->pins[i], group_num);
+
+ /* Function 0 (GPIO) doesn't need any additional setting */
+ if (func_num)
+ ret = regmap_update_bits(pc->reg_mux, pmx_data->reg * 4,
+ BIT(pmx_data->bit),
+ BIT(pmx_data->bit));
+
+ return ret;
+}
+
+static int meson8_pmx_request_gpio(struct pinctrl_dev *pcdev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ meson8_pmx_disable_other_groups(pc, offset, -1);
+
+ return 0;
+}
+
+const struct pinmux_ops meson8_pmx_ops = {
+ .set_mux = meson8_pmx_set_mux,
+ .get_functions_count = meson_pmx_get_funcs_count,
+ .get_function_name = meson_pmx_get_func_name,
+ .get_function_groups = meson_pmx_get_groups,
+ .gpio_request_enable = meson8_pmx_request_gpio,
+};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.h b/drivers/pinctrl/meson/pinctrl-meson8-pmx.h
new file mode 100644
index 000000000000..47293c28f913
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.h
@@ -0,0 +1,48 @@
+/*
+ * First generation of pinmux driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2017 Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+struct meson8_pmx_data {
+ bool is_gpio;
+ unsigned int reg;
+ unsigned int bit;
+};
+
+#define PMX_DATA(r, b, g) \
+ { \
+ .reg = r, \
+ .bit = b, \
+ .is_gpio = g, \
+ }
+
+#define GROUP(grp, r, b) \
+ { \
+ .name = #grp, \
+ .pins = grp ## _pins, \
+ .num_pins = ARRAY_SIZE(grp ## _pins), \
+ .data = (const struct meson8_pmx_data[]){ \
+ PMX_DATA(r, b, false), \
+ }, \
+ }
+
+#define GPIO_GROUP(gpio) \
+ { \
+ .name = #gpio, \
+ .pins = (const unsigned int[]){ gpio }, \
+ .num_pins = 1, \
+ .data = (const struct meson8_pmx_data[]){ \
+ PMX_DATA(0, 0, true), \
+ }, \
+ }
+
+extern const struct pinmux_ops meson8_pmx_ops;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 970f6f14502c..49c7ce03547b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -13,506 +13,495 @@
#include <dt-bindings/gpio/meson8-gpio.h>
#include "pinctrl-meson.h"
-
-#define AO_OFF 120
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson8_cbus_pins[] = {
- MESON_PIN(GPIOX_0, 0),
- MESON_PIN(GPIOX_1, 0),
- MESON_PIN(GPIOX_2, 0),
- MESON_PIN(GPIOX_3, 0),
- MESON_PIN(GPIOX_4, 0),
- MESON_PIN(GPIOX_5, 0),
- MESON_PIN(GPIOX_6, 0),
- MESON_PIN(GPIOX_7, 0),
- MESON_PIN(GPIOX_8, 0),
- MESON_PIN(GPIOX_9, 0),
- MESON_PIN(GPIOX_10, 0),
- MESON_PIN(GPIOX_11, 0),
- MESON_PIN(GPIOX_12, 0),
- MESON_PIN(GPIOX_13, 0),
- MESON_PIN(GPIOX_14, 0),
- MESON_PIN(GPIOX_15, 0),
- MESON_PIN(GPIOX_16, 0),
- MESON_PIN(GPIOX_17, 0),
- MESON_PIN(GPIOX_18, 0),
- MESON_PIN(GPIOX_19, 0),
- MESON_PIN(GPIOX_20, 0),
- MESON_PIN(GPIOX_21, 0),
- MESON_PIN(GPIOY_0, 0),
- MESON_PIN(GPIOY_1, 0),
- MESON_PIN(GPIOY_2, 0),
- MESON_PIN(GPIOY_3, 0),
- MESON_PIN(GPIOY_4, 0),
- MESON_PIN(GPIOY_5, 0),
- MESON_PIN(GPIOY_6, 0),
- MESON_PIN(GPIOY_7, 0),
- MESON_PIN(GPIOY_8, 0),
- MESON_PIN(GPIOY_9, 0),
- MESON_PIN(GPIOY_10, 0),
- MESON_PIN(GPIOY_11, 0),
- MESON_PIN(GPIOY_12, 0),
- MESON_PIN(GPIOY_13, 0),
- MESON_PIN(GPIOY_14, 0),
- MESON_PIN(GPIOY_15, 0),
- MESON_PIN(GPIOY_16, 0),
- MESON_PIN(GPIODV_0, 0),
- MESON_PIN(GPIODV_1, 0),
- MESON_PIN(GPIODV_2, 0),
- MESON_PIN(GPIODV_3, 0),
- MESON_PIN(GPIODV_4, 0),
- MESON_PIN(GPIODV_5, 0),
- MESON_PIN(GPIODV_6, 0),
- MESON_PIN(GPIODV_7, 0),
- MESON_PIN(GPIODV_8, 0),
- MESON_PIN(GPIODV_9, 0),
- MESON_PIN(GPIODV_10, 0),
- MESON_PIN(GPIODV_11, 0),
- MESON_PIN(GPIODV_12, 0),
- MESON_PIN(GPIODV_13, 0),
- MESON_PIN(GPIODV_14, 0),
- MESON_PIN(GPIODV_15, 0),
- MESON_PIN(GPIODV_16, 0),
- MESON_PIN(GPIODV_17, 0),
- MESON_PIN(GPIODV_18, 0),
- MESON_PIN(GPIODV_19, 0),
- MESON_PIN(GPIODV_20, 0),
- MESON_PIN(GPIODV_21, 0),
- MESON_PIN(GPIODV_22, 0),
- MESON_PIN(GPIODV_23, 0),
- MESON_PIN(GPIODV_24, 0),
- MESON_PIN(GPIODV_25, 0),
- MESON_PIN(GPIODV_26, 0),
- MESON_PIN(GPIODV_27, 0),
- MESON_PIN(GPIODV_28, 0),
- MESON_PIN(GPIODV_29, 0),
- MESON_PIN(GPIOH_0, 0),
- MESON_PIN(GPIOH_1, 0),
- MESON_PIN(GPIOH_2, 0),
- MESON_PIN(GPIOH_3, 0),
- MESON_PIN(GPIOH_4, 0),
- MESON_PIN(GPIOH_5, 0),
- MESON_PIN(GPIOH_6, 0),
- MESON_PIN(GPIOH_7, 0),
- MESON_PIN(GPIOH_8, 0),
- MESON_PIN(GPIOH_9, 0),
- MESON_PIN(GPIOZ_0, 0),
- MESON_PIN(GPIOZ_1, 0),
- MESON_PIN(GPIOZ_2, 0),
- MESON_PIN(GPIOZ_3, 0),
- MESON_PIN(GPIOZ_4, 0),
- MESON_PIN(GPIOZ_5, 0),
- MESON_PIN(GPIOZ_6, 0),
- MESON_PIN(GPIOZ_7, 0),
- MESON_PIN(GPIOZ_8, 0),
- MESON_PIN(GPIOZ_9, 0),
- MESON_PIN(GPIOZ_10, 0),
- MESON_PIN(GPIOZ_11, 0),
- MESON_PIN(GPIOZ_12, 0),
- MESON_PIN(GPIOZ_13, 0),
- MESON_PIN(GPIOZ_14, 0),
- MESON_PIN(CARD_0, 0),
- MESON_PIN(CARD_1, 0),
- MESON_PIN(CARD_2, 0),
- MESON_PIN(CARD_3, 0),
- MESON_PIN(CARD_4, 0),
- MESON_PIN(CARD_5, 0),
- MESON_PIN(CARD_6, 0),
- MESON_PIN(BOOT_0, 0),
- MESON_PIN(BOOT_1, 0),
- MESON_PIN(BOOT_2, 0),
- MESON_PIN(BOOT_3, 0),
- MESON_PIN(BOOT_4, 0),
- MESON_PIN(BOOT_5, 0),
- MESON_PIN(BOOT_6, 0),
- MESON_PIN(BOOT_7, 0),
- MESON_PIN(BOOT_8, 0),
- MESON_PIN(BOOT_9, 0),
- MESON_PIN(BOOT_10, 0),
- MESON_PIN(BOOT_11, 0),
- MESON_PIN(BOOT_12, 0),
- MESON_PIN(BOOT_13, 0),
- MESON_PIN(BOOT_14, 0),
- MESON_PIN(BOOT_15, 0),
- MESON_PIN(BOOT_16, 0),
- MESON_PIN(BOOT_17, 0),
- MESON_PIN(BOOT_18, 0),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+ MESON_PIN(BOOT_18),
};
static const struct pinctrl_pin_desc meson8_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, AO_OFF),
- MESON_PIN(GPIOAO_1, AO_OFF),
- MESON_PIN(GPIOAO_2, AO_OFF),
- MESON_PIN(GPIOAO_3, AO_OFF),
- MESON_PIN(GPIOAO_4, AO_OFF),
- MESON_PIN(GPIOAO_5, AO_OFF),
- MESON_PIN(GPIOAO_6, AO_OFF),
- MESON_PIN(GPIOAO_7, AO_OFF),
- MESON_PIN(GPIOAO_8, AO_OFF),
- MESON_PIN(GPIOAO_9, AO_OFF),
- MESON_PIN(GPIOAO_10, AO_OFF),
- MESON_PIN(GPIOAO_11, AO_OFF),
- MESON_PIN(GPIOAO_12, AO_OFF),
- MESON_PIN(GPIOAO_13, AO_OFF),
- MESON_PIN(GPIO_BSD_EN, AO_OFF),
- MESON_PIN(GPIO_TEST_N, AO_OFF),
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
+ MESON_PIN(GPIO_BSD_EN),
+ MESON_PIN(GPIO_TEST_N),
};
/* bank X */
-static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
-static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
-static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
-static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-
-static const unsigned int sdxc_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sdxc_d13_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
- PIN(GPIOX_3, 0) };
-static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
- PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-
-static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
-
-static const unsigned int uart_tx_a0_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int uart_rx_a0_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int uart_cts_a0_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int uart_rts_a0_pins[] = { PIN(GPIOX_7, 0) };
-
-static const unsigned int uart_tx_a1_pins[] = { PIN(GPIOX_12, 0) };
-static const unsigned int uart_rx_a1_pins[] = { PIN(GPIOX_13, 0) };
-static const unsigned int uart_cts_a1_pins[] = { PIN(GPIOX_14, 0) };
-static const unsigned int uart_rts_a1_pins[] = { PIN(GPIOX_15, 0) };
-
-static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int iso7816_clk_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int iso7816_data_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
-
-static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
-
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_b_x_pins[] = { PIN(GPIOX_11, 0) };
+static const unsigned int sd_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sd_d1_a_pins[] = { GPIOX_1 };
+static const unsigned int sd_d2_a_pins[] = { GPIOX_2 };
+static const unsigned int sd_d3_a_pins[] = { GPIOX_3 };
+static const unsigned int sd_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sd_cmd_a_pins[] = { GPIOX_9 };
+
+static const unsigned int sdxc_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sdxc_d13_a_pins[] = { GPIOX_1, GPIOX_2, GPIOX_3 };
+static const unsigned int sdxc_d47_a_pins[] = { GPIOX_4, GPIOX_5, GPIOX_6,
+ GPIOX_7 };
+static const unsigned int sdxc_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sdxc_cmd_a_pins[] = { GPIOX_9 };
+
+static const unsigned int pcm_out_a_pins[] = { GPIOX_4 };
+static const unsigned int pcm_in_a_pins[] = { GPIOX_5 };
+static const unsigned int pcm_fs_a_pins[] = { GPIOX_6 };
+static const unsigned int pcm_clk_a_pins[] = { GPIOX_7 };
+
+static const unsigned int uart_tx_a0_pins[] = { GPIOX_4 };
+static const unsigned int uart_rx_a0_pins[] = { GPIOX_5 };
+static const unsigned int uart_cts_a0_pins[] = { GPIOX_6 };
+static const unsigned int uart_rts_a0_pins[] = { GPIOX_7 };
+
+static const unsigned int uart_tx_a1_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a1_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a1_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a1_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b0_pins[] = { GPIOX_16 };
+static const unsigned int uart_rx_b0_pins[] = { GPIOX_17 };
+static const unsigned int uart_cts_b0_pins[] = { GPIOX_18 };
+static const unsigned int uart_rts_b0_pins[] = { GPIOX_19 };
+
+static const unsigned int iso7816_det_pins[] = { GPIOX_16 };
+static const unsigned int iso7816_reset_pins[] = { GPIOX_17 };
+static const unsigned int iso7816_clk_pins[] = { GPIOX_18 };
+static const unsigned int iso7816_data_pins[] = { GPIOX_19 };
+
+static const unsigned int i2c_sda_d0_pins[] = { GPIOX_16 };
+static const unsigned int i2c_sck_d0_pins[] = { GPIOX_17 };
+
+static const unsigned int xtal_32k_out_pins[] = { GPIOX_10 };
+static const unsigned int xtal_24m_out_pins[] = { GPIOX_11 };
+
+static const unsigned int pwm_e_pins[] = { GPIOX_10 };
+static const unsigned int pwm_b_x_pins[] = { GPIOX_11 };
/* bank Y */
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOY_2, 0) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int uart_tx_c_pins[] = { GPIOY_0 };
+static const unsigned int uart_rx_c_pins[] = { GPIOY_1 };
+static const unsigned int uart_cts_c_pins[] = { GPIOY_2 };
+static const unsigned int uart_rts_c_pins[] = { GPIOY_3 };
-static const unsigned int pcm_out_b_pins[] = { PIN(GPIOY_4, 0) };
-static const unsigned int pcm_in_b_pins[] = { PIN(GPIOY_5, 0) };
-static const unsigned int pcm_fs_b_pins[] = { PIN(GPIOY_6, 0) };
-static const unsigned int pcm_clk_b_pins[] = { PIN(GPIOY_7, 0) };
+static const unsigned int pcm_out_b_pins[] = { GPIOY_4 };
+static const unsigned int pcm_in_b_pins[] = { GPIOY_5 };
+static const unsigned int pcm_fs_b_pins[] = { GPIOY_6 };
+static const unsigned int pcm_clk_b_pins[] = { GPIOY_7 };
-static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIOY_1, 0) };
+static const unsigned int i2c_sda_c0_pins[] = { GPIOY_0 };
+static const unsigned int i2c_sck_c0_pins[] = { GPIOY_1 };
-static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, 0) };
+static const unsigned int pwm_a_y_pins[] = { GPIOY_16 };
-static const unsigned int i2s_out_ch45_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int i2s_out_ch23_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int i2s_out_ch01_pins[] = { PIN(GPIOY_4, 0) };
-static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOY_5, 0) };
-static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOY_6, 0) };
-static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOY_7, 0) };
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOY_8, 0) };
-static const unsigned int i2s_out_ch78_pins[] = { PIN(GPIOY_9, 0) };
+static const unsigned int i2s_out_ch45_pins[] = { GPIOY_0 };
+static const unsigned int i2s_out_ch23_pins[] = { GPIOY_1 };
+static const unsigned int i2s_out_ch01_pins[] = { GPIOY_4 };
+static const unsigned int i2s_in_ch01_pins[] = { GPIOY_5 };
+static const unsigned int i2s_lr_clk_in_pins[] = { GPIOY_6 };
+static const unsigned int i2s_ao_clk_in_pins[] = { GPIOY_7 };
+static const unsigned int i2s_am_clk_pins[] = { GPIOY_8 };
+static const unsigned int i2s_out_ch78_pins[] = { GPIOY_9 };
-static const unsigned int spdif_in_pins[] = { PIN(GPIOY_2, 0) };
-static const unsigned int spdif_out_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int spdif_in_pins[] = { GPIOY_2 };
+static const unsigned int spdif_out_pins[] = { GPIOY_3 };
/* bank DV */
-static const unsigned int dvin_rgb_pins[] = { PIN(GPIODV_0, 0), PIN(GPIODV_1, 0),
- PIN(GPIODV_2, 0), PIN(GPIODV_3, 0),
- PIN(GPIODV_4, 0), PIN(GPIODV_5, 0),
- PIN(GPIODV_6, 0), PIN(GPIODV_7, 0),
- PIN(GPIODV_8, 0), PIN(GPIODV_9, 0),
- PIN(GPIODV_10, 0), PIN(GPIODV_11, 0),
- PIN(GPIODV_12, 0), PIN(GPIODV_13, 0),
- PIN(GPIODV_14, 0), PIN(GPIODV_15, 0),
- PIN(GPIODV_16, 0), PIN(GPIODV_17, 0),
- PIN(GPIODV_18, 0), PIN(GPIODV_19, 0),
- PIN(GPIODV_20, 0), PIN(GPIODV_21, 0),
- PIN(GPIODV_22, 0), PIN(GPIODV_23, 0) };
-static const unsigned int dvin_vs_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int dvin_hs_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int dvin_clk_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int dvin_de_pins[] = { PIN(GPIODV_27, 0) };
-
-static const unsigned int enc_0_pins[] = { PIN(GPIODV_0, 0) };
-static const unsigned int enc_1_pins[] = { PIN(GPIODV_1, 0) };
-static const unsigned int enc_2_pins[] = { PIN(GPIODV_2, 0) };
-static const unsigned int enc_3_pins[] = { PIN(GPIODV_3, 0) };
-static const unsigned int enc_4_pins[] = { PIN(GPIODV_4, 0) };
-static const unsigned int enc_5_pins[] = { PIN(GPIODV_5, 0) };
-static const unsigned int enc_6_pins[] = { PIN(GPIODV_6, 0) };
-static const unsigned int enc_7_pins[] = { PIN(GPIODV_7, 0) };
-static const unsigned int enc_8_pins[] = { PIN(GPIODV_8, 0) };
-static const unsigned int enc_9_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int enc_10_pins[] = { PIN(GPIODV_10, 0) };
-static const unsigned int enc_11_pins[] = { PIN(GPIODV_11, 0) };
-static const unsigned int enc_12_pins[] = { PIN(GPIODV_12, 0) };
-static const unsigned int enc_13_pins[] = { PIN(GPIODV_13, 0) };
-static const unsigned int enc_14_pins[] = { PIN(GPIODV_14, 0) };
-static const unsigned int enc_15_pins[] = { PIN(GPIODV_15, 0) };
-static const unsigned int enc_16_pins[] = { PIN(GPIODV_16, 0) };
-static const unsigned int enc_17_pins[] = { PIN(GPIODV_17, 0) };
-
-static const unsigned int uart_tx_b1_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int uart_rx_b1_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int uart_cts_b1_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int uart_rts_b1_pins[] = { PIN(GPIODV_27, 0) };
-
-static const unsigned int vga_vs_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int vga_hs_pins[] = { PIN(GPIODV_25, 0) };
-
-static const unsigned int pwm_c_dv9_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int pwm_c_dv29_pins[] = { PIN(GPIODV_29, 0) };
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
+static const unsigned int dvin_rgb_pins[] = {
+ GPIODV_0, GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5,
+ GPIODV_6, GPIODV_7, GPIODV_8, GPIODV_9, GPIODV_10, GPIODV_11,
+ GPIODV_12, GPIODV_13, GPIODV_14, GPIODV_15, GPIODV_16, GPIODV_17,
+ GPIODV_18, GPIODV_19, GPIODV_20, GPIODV_21, GPIODV_22, GPIODV_23
+};
+static const unsigned int dvin_vs_pins[] = { GPIODV_24 };
+static const unsigned int dvin_hs_pins[] = { GPIODV_25 };
+static const unsigned int dvin_clk_pins[] = { GPIODV_26 };
+static const unsigned int dvin_de_pins[] = { GPIODV_27 };
+
+static const unsigned int enc_0_pins[] = { GPIODV_0 };
+static const unsigned int enc_1_pins[] = { GPIODV_1 };
+static const unsigned int enc_2_pins[] = { GPIODV_2 };
+static const unsigned int enc_3_pins[] = { GPIODV_3 };
+static const unsigned int enc_4_pins[] = { GPIODV_4 };
+static const unsigned int enc_5_pins[] = { GPIODV_5 };
+static const unsigned int enc_6_pins[] = { GPIODV_6 };
+static const unsigned int enc_7_pins[] = { GPIODV_7 };
+static const unsigned int enc_8_pins[] = { GPIODV_8 };
+static const unsigned int enc_9_pins[] = { GPIODV_9 };
+static const unsigned int enc_10_pins[] = { GPIODV_10 };
+static const unsigned int enc_11_pins[] = { GPIODV_11 };
+static const unsigned int enc_12_pins[] = { GPIODV_12 };
+static const unsigned int enc_13_pins[] = { GPIODV_13 };
+static const unsigned int enc_14_pins[] = { GPIODV_14 };
+static const unsigned int enc_15_pins[] = { GPIODV_15 };
+static const unsigned int enc_16_pins[] = { GPIODV_16 };
+static const unsigned int enc_17_pins[] = { GPIODV_17 };
+
+static const unsigned int uart_tx_b1_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b1_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b1_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b1_pins[] = { GPIODV_27 };
+
+static const unsigned int vga_vs_pins[] = { GPIODV_24 };
+static const unsigned int vga_hs_pins[] = { GPIODV_25 };
+
+static const unsigned int pwm_c_dv9_pins[] = { GPIODV_9 };
+static const unsigned int pwm_c_dv29_pins[] = { GPIODV_29 };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
/* bank H */
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
-static const unsigned int hdmi_cec_pins[] = { PIN(GPIOH_3, 0) };
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int hdmi_cec_pins[] = { GPIOH_3 };
-static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int spi_miso_0_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOH_6, 0) };
+static const unsigned int spi_ss0_0_pins[] = { GPIOH_3 };
+static const unsigned int spi_miso_0_pins[] = { GPIOH_4 };
+static const unsigned int spi_mosi_0_pins[] = { GPIOH_5 };
+static const unsigned int spi_sclk_0_pins[] = { GPIOH_6 };
-static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
+static const unsigned int i2c_sda_d1_pins[] = { GPIOH_7 };
+static const unsigned int i2c_sck_d1_pins[] = { GPIOH_8 };
/* bank Z */
-static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOZ_9, 0) };
-static const unsigned int spi_ss1_1_pins[] = { PIN(GPIOZ_10, 0) };
-static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOZ_11, 0) };
-static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOZ_12, 0) };
-static const unsigned int spi_miso_1_pins[] = { PIN(GPIOZ_13, 0) };
-static const unsigned int spi_ss2_1_pins[] = { PIN(GPIOZ_14, 0) };
-
-static const unsigned int eth_tx_clk_50m_pins[] = { PIN(GPIOZ_4, 0) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_5, 0) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_6, 0) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_7, 0) };
-static const unsigned int eth_rx_clk_in_pins[] = { PIN(GPIOZ_8, 0) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_9, 0) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_10, 0) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_11, 0) };
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_12, 0) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_13, 0) };
-
-static const unsigned int i2c_sda_a0_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a0_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIOZ_2, 0) };
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIOZ_3, 0) };
-
-static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOZ_4, 0) };
-static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOZ_5, 0) };
-
-static const unsigned int i2c_sda_a1_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a1_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int i2c_sda_a2_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a2_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int pwm_a_z0_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int pwm_a_z7_pins[] = { PIN(GPIOZ_7, 0) };
-static const unsigned int pwm_b_z_pins[] = { PIN(GPIOZ_1, 0) };
-static const unsigned int pwm_c_z_pins[] = { PIN(GPIOZ_8, 0) };
+static const unsigned int spi_ss0_1_pins[] = { GPIOZ_9 };
+static const unsigned int spi_ss1_1_pins[] = { GPIOZ_10 };
+static const unsigned int spi_sclk_1_pins[] = { GPIOZ_11 };
+static const unsigned int spi_mosi_1_pins[] = { GPIOZ_12 };
+static const unsigned int spi_miso_1_pins[] = { GPIOZ_13 };
+static const unsigned int spi_ss2_1_pins[] = { GPIOZ_14 };
+
+static const unsigned int eth_tx_clk_50m_pins[] = { GPIOZ_4 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_5 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_6 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rx_clk_in_pins[] = { GPIOZ_8 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_9 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_10 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_11 };
+static const unsigned int eth_mdio_pins[] = { GPIOZ_12 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_13 };
+
+static const unsigned int i2c_sda_a0_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a0_pins[] = { GPIOZ_1 };
+
+static const unsigned int i2c_sda_b_pins[] = { GPIOZ_2 };
+static const unsigned int i2c_sck_b_pins[] = { GPIOZ_3 };
+
+static const unsigned int i2c_sda_c1_pins[] = { GPIOZ_4 };
+static const unsigned int i2c_sck_c1_pins[] = { GPIOZ_5 };
+
+static const unsigned int i2c_sda_a1_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a1_pins[] = { GPIOZ_1 };
+
+static const unsigned int i2c_sda_a2_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a2_pins[] = { GPIOZ_1 };
+
+static const unsigned int pwm_a_z0_pins[] = { GPIOZ_0 };
+static const unsigned int pwm_a_z7_pins[] = { GPIOZ_7 };
+static const unsigned int pwm_b_z_pins[] = { GPIOZ_1 };
+static const unsigned int pwm_c_z_pins[] = { GPIOZ_8 };
/* bank BOOT */
-static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
-static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
-static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
-static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
-static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
-static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
- PIN(BOOT_3, 0) };
-static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
- PIN(BOOT_2, 0), PIN(BOOT_3, 0),
- PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
-static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, 0) };
-static const unsigned int nand_ce2_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int nand_ce3_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
+static const unsigned int sd_d0_c_pins[] = { BOOT_0 };
+static const unsigned int sd_d1_c_pins[] = { BOOT_1 };
+static const unsigned int sd_d2_c_pins[] = { BOOT_2 };
+static const unsigned int sd_d3_c_pins[] = { BOOT_3 };
+static const unsigned int sd_cmd_c_pins[] = { BOOT_16 };
+static const unsigned int sd_clk_c_pins[] = { BOOT_17 };
+
+static const unsigned int sdxc_d0_c_pins[] = { BOOT_0};
+static const unsigned int sdxc_d13_c_pins[] = { BOOT_1, BOOT_2, BOOT_3 };
+static const unsigned int sdxc_d47_c_pins[] = { BOOT_4, BOOT_5, BOOT_6,
+ BOOT_7 };
+static const unsigned int sdxc_cmd_c_pins[] = { BOOT_16 };
+static const unsigned int sdxc_clk_c_pins[] = { BOOT_17 };
+
+static const unsigned int nand_io_pins[] = {
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7
+};
+static const unsigned int nand_io_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_io_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_io_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_clk_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+static const unsigned int nand_ce2_pins[] = { BOOT_16 };
+static const unsigned int nand_ce3_pins[] = { BOOT_17 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_18 };
/* bank CARD */
-static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
-static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
-static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
-static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
-
-static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
- PIN(CARD_5, 0) };
-static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d1_b_pins[] = { CARD_0 };
+static const unsigned int sd_d0_b_pins[] = { CARD_1 };
+static const unsigned int sd_clk_b_pins[] = { CARD_2 };
+static const unsigned int sd_cmd_b_pins[] = { CARD_3 };
+static const unsigned int sd_d3_b_pins[] = { CARD_4 };
+static const unsigned int sd_d2_b_pins[] = { CARD_5 };
+
+static const unsigned int sdxc_d13_b_pins[] = { CARD_0, CARD_4, CARD_5 };
+static const unsigned int sdxc_d0_b_pins[] = { CARD_1 };
+static const unsigned int sdxc_clk_b_pins[] = { CARD_2 };
+static const unsigned int sdxc_cmd_b_pins[] = { CARD_3 };
/* bank AO */
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
-static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int remote_output_ao_pins[] = { PIN(GPIOAO_13, AO_OFF) };
+static const unsigned int remote_input_pins[] = { GPIOAO_7 };
+static const unsigned int remote_output_ao_pins[] = { GPIOAO_13 };
-static const unsigned int i2c_slave_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_slave_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int i2c_slave_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = { GPIOAO_5 };
-static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
+static const unsigned int uart_tx_ao_b0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b0_pins[] = { GPIOAO_1 };
-static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int uart_tx_ao_b1_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b1_pins[] = { GPIOAO_5 };
-static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int i2c_mst_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_mst_sda_ao_pins[] = { GPIOAO_5 };
-static const unsigned int pwm_f_ao_pins[] = { PIN(GPIO_TEST_N, AO_OFF) };
+static const unsigned int pwm_f_ao_pins[] = { GPIO_TEST_N };
-static const unsigned int i2s_am_clk_out_ao_pins[] = { PIN(GPIOAO_8, AO_OFF) };
-static const unsigned int i2s_ao_clk_out_ao_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_out_ao_pins[] = { PIN(GPIOAO_10, AO_OFF) };
-static const unsigned int i2s_out_ch01_ao_pins[] = { PIN(GPIOAO_11, AO_OFF) };
+static const unsigned int i2s_am_clk_out_ao_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_ao_clk_out_ao_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_out_ao_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
-static const unsigned int hdmi_cec_ao_pins[] = { PIN(GPIOAO_12, AO_OFF) };
+static const unsigned int hdmi_cec_ao_pins[] = { GPIOAO_12 };
static struct meson_pmx_group meson8_cbus_groups[] = {
- GPIO_GROUP(GPIOX_0, 0),
- GPIO_GROUP(GPIOX_1, 0),
- GPIO_GROUP(GPIOX_2, 0),
- GPIO_GROUP(GPIOX_3, 0),
- GPIO_GROUP(GPIOX_4, 0),
- GPIO_GROUP(GPIOX_5, 0),
- GPIO_GROUP(GPIOX_6, 0),
- GPIO_GROUP(GPIOX_7, 0),
- GPIO_GROUP(GPIOX_8, 0),
- GPIO_GROUP(GPIOX_9, 0),
- GPIO_GROUP(GPIOX_10, 0),
- GPIO_GROUP(GPIOX_11, 0),
- GPIO_GROUP(GPIOX_12, 0),
- GPIO_GROUP(GPIOX_13, 0),
- GPIO_GROUP(GPIOX_14, 0),
- GPIO_GROUP(GPIOX_15, 0),
- GPIO_GROUP(GPIOX_16, 0),
- GPIO_GROUP(GPIOX_17, 0),
- GPIO_GROUP(GPIOX_18, 0),
- GPIO_GROUP(GPIOX_19, 0),
- GPIO_GROUP(GPIOX_20, 0),
- GPIO_GROUP(GPIOX_21, 0),
- GPIO_GROUP(GPIOY_0, 0),
- GPIO_GROUP(GPIOY_1, 0),
- GPIO_GROUP(GPIOY_2, 0),
- GPIO_GROUP(GPIOY_3, 0),
- GPIO_GROUP(GPIOY_4, 0),
- GPIO_GROUP(GPIOY_5, 0),
- GPIO_GROUP(GPIOY_6, 0),
- GPIO_GROUP(GPIOY_7, 0),
- GPIO_GROUP(GPIOY_8, 0),
- GPIO_GROUP(GPIOY_9, 0),
- GPIO_GROUP(GPIOY_10, 0),
- GPIO_GROUP(GPIOY_11, 0),
- GPIO_GROUP(GPIOY_12, 0),
- GPIO_GROUP(GPIOY_13, 0),
- GPIO_GROUP(GPIOY_14, 0),
- GPIO_GROUP(GPIOY_15, 0),
- GPIO_GROUP(GPIOY_16, 0),
- GPIO_GROUP(GPIODV_0, 0),
- GPIO_GROUP(GPIODV_1, 0),
- GPIO_GROUP(GPIODV_2, 0),
- GPIO_GROUP(GPIODV_3, 0),
- GPIO_GROUP(GPIODV_4, 0),
- GPIO_GROUP(GPIODV_5, 0),
- GPIO_GROUP(GPIODV_6, 0),
- GPIO_GROUP(GPIODV_7, 0),
- GPIO_GROUP(GPIODV_8, 0),
- GPIO_GROUP(GPIODV_9, 0),
- GPIO_GROUP(GPIODV_10, 0),
- GPIO_GROUP(GPIODV_11, 0),
- GPIO_GROUP(GPIODV_12, 0),
- GPIO_GROUP(GPIODV_13, 0),
- GPIO_GROUP(GPIODV_14, 0),
- GPIO_GROUP(GPIODV_15, 0),
- GPIO_GROUP(GPIODV_16, 0),
- GPIO_GROUP(GPIODV_17, 0),
- GPIO_GROUP(GPIODV_18, 0),
- GPIO_GROUP(GPIODV_19, 0),
- GPIO_GROUP(GPIODV_20, 0),
- GPIO_GROUP(GPIODV_21, 0),
- GPIO_GROUP(GPIODV_22, 0),
- GPIO_GROUP(GPIODV_23, 0),
- GPIO_GROUP(GPIODV_24, 0),
- GPIO_GROUP(GPIODV_25, 0),
- GPIO_GROUP(GPIODV_26, 0),
- GPIO_GROUP(GPIODV_27, 0),
- GPIO_GROUP(GPIODV_28, 0),
- GPIO_GROUP(GPIODV_29, 0),
- GPIO_GROUP(GPIOH_0, 0),
- GPIO_GROUP(GPIOH_1, 0),
- GPIO_GROUP(GPIOH_2, 0),
- GPIO_GROUP(GPIOH_3, 0),
- GPIO_GROUP(GPIOH_4, 0),
- GPIO_GROUP(GPIOH_5, 0),
- GPIO_GROUP(GPIOH_6, 0),
- GPIO_GROUP(GPIOH_7, 0),
- GPIO_GROUP(GPIOH_8, 0),
- GPIO_GROUP(GPIOH_9, 0),
- GPIO_GROUP(GPIOZ_0, 0),
- GPIO_GROUP(GPIOZ_1, 0),
- GPIO_GROUP(GPIOZ_2, 0),
- GPIO_GROUP(GPIOZ_3, 0),
- GPIO_GROUP(GPIOZ_4, 0),
- GPIO_GROUP(GPIOZ_5, 0),
- GPIO_GROUP(GPIOZ_6, 0),
- GPIO_GROUP(GPIOZ_7, 0),
- GPIO_GROUP(GPIOZ_8, 0),
- GPIO_GROUP(GPIOZ_9, 0),
- GPIO_GROUP(GPIOZ_10, 0),
- GPIO_GROUP(GPIOZ_11, 0),
- GPIO_GROUP(GPIOZ_12, 0),
- GPIO_GROUP(GPIOZ_13, 0),
- GPIO_GROUP(GPIOZ_14, 0),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_18),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -727,22 +716,22 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
};
static struct meson_pmx_group meson8_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
+ GPIO_GROUP(GPIO_BSD_EN),
+ GPIO_GROUP(GPIO_TEST_N),
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
@@ -1041,24 +1030,23 @@ static struct meson_pmx_func meson8_aobus_functions[] = {
};
static struct meson_bank meson8_cbus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_16, 0), 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, 0), PIN(GPIODV_29, 0), 65, 94, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 29, 38, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("Z", PIN(GPIOZ_0, 0), PIN(GPIOZ_14, 0), 14, 28, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 58, 64, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_21, 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", GPIOY_0, GPIOY_16, 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 65, 94, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 29, 38, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("Z", GPIOZ_0, GPIOZ_14, 14, 28, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
+ BANK("CARD", CARD_0, CARD_6, 58, 64, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", BOOT_0, BOOT_18, 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
static struct meson_bank meson8_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
+static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.name = "cbus-banks",
- .pin_base = 0,
.pins = meson8_cbus_pins,
.groups = meson8_cbus_groups,
.funcs = meson8_cbus_functions,
@@ -1067,11 +1055,11 @@ struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8_cbus_functions),
.num_banks = ARRAY_SIZE(meson8_cbus_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.name = "ao-bank",
- .pin_base = 120,
.pins = meson8_aobus_pins,
.groups = meson8_aobus_groups,
.funcs = meson8_aobus_functions,
@@ -1080,4 +1068,26 @@ struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
.num_banks = ARRAY_SIZE(meson8_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson8_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson8-cbus-pinctrl",
+ .data = &meson8_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8-aobus-pinctrl",
+ .data = &meson8_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson8_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson8-pinctrl",
+ .of_match_table = meson8_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson8_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 71f216b5b0b9..5bd808dc81e1 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -14,408 +14,405 @@
#include <dt-bindings/gpio/meson8b-gpio.h>
#include "pinctrl-meson.h"
-
-#define AO_OFF 130
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson8b_cbus_pins[] = {
- MESON_PIN(GPIOX_0, 0),
- MESON_PIN(GPIOX_1, 0),
- MESON_PIN(GPIOX_2, 0),
- MESON_PIN(GPIOX_3, 0),
- MESON_PIN(GPIOX_4, 0),
- MESON_PIN(GPIOX_5, 0),
- MESON_PIN(GPIOX_6, 0),
- MESON_PIN(GPIOX_7, 0),
- MESON_PIN(GPIOX_8, 0),
- MESON_PIN(GPIOX_9, 0),
- MESON_PIN(GPIOX_10, 0),
- MESON_PIN(GPIOX_11, 0),
- MESON_PIN(GPIOX_16, 0),
- MESON_PIN(GPIOX_17, 0),
- MESON_PIN(GPIOX_18, 0),
- MESON_PIN(GPIOX_19, 0),
- MESON_PIN(GPIOX_20, 0),
- MESON_PIN(GPIOX_21, 0),
-
- MESON_PIN(GPIOY_0, 0),
- MESON_PIN(GPIOY_1, 0),
- MESON_PIN(GPIOY_3, 0),
- MESON_PIN(GPIOY_6, 0),
- MESON_PIN(GPIOY_7, 0),
- MESON_PIN(GPIOY_8, 0),
- MESON_PIN(GPIOY_9, 0),
- MESON_PIN(GPIOY_10, 0),
- MESON_PIN(GPIOY_11, 0),
- MESON_PIN(GPIOY_12, 0),
- MESON_PIN(GPIOY_13, 0),
- MESON_PIN(GPIOY_14, 0),
-
- MESON_PIN(GPIODV_9, 0),
- MESON_PIN(GPIODV_24, 0),
- MESON_PIN(GPIODV_25, 0),
- MESON_PIN(GPIODV_26, 0),
- MESON_PIN(GPIODV_27, 0),
- MESON_PIN(GPIODV_28, 0),
- MESON_PIN(GPIODV_29, 0),
-
- MESON_PIN(GPIOH_0, 0),
- MESON_PIN(GPIOH_1, 0),
- MESON_PIN(GPIOH_2, 0),
- MESON_PIN(GPIOH_3, 0),
- MESON_PIN(GPIOH_4, 0),
- MESON_PIN(GPIOH_5, 0),
- MESON_PIN(GPIOH_6, 0),
- MESON_PIN(GPIOH_7, 0),
- MESON_PIN(GPIOH_8, 0),
- MESON_PIN(GPIOH_9, 0),
-
- MESON_PIN(CARD_0, 0),
- MESON_PIN(CARD_1, 0),
- MESON_PIN(CARD_2, 0),
- MESON_PIN(CARD_3, 0),
- MESON_PIN(CARD_4, 0),
- MESON_PIN(CARD_5, 0),
- MESON_PIN(CARD_6, 0),
-
- MESON_PIN(BOOT_0, 0),
- MESON_PIN(BOOT_1, 0),
- MESON_PIN(BOOT_2, 0),
- MESON_PIN(BOOT_3, 0),
- MESON_PIN(BOOT_4, 0),
- MESON_PIN(BOOT_5, 0),
- MESON_PIN(BOOT_6, 0),
- MESON_PIN(BOOT_7, 0),
- MESON_PIN(BOOT_8, 0),
- MESON_PIN(BOOT_9, 0),
- MESON_PIN(BOOT_10, 0),
- MESON_PIN(BOOT_11, 0),
- MESON_PIN(BOOT_12, 0),
- MESON_PIN(BOOT_13, 0),
- MESON_PIN(BOOT_14, 0),
- MESON_PIN(BOOT_15, 0),
- MESON_PIN(BOOT_16, 0),
- MESON_PIN(BOOT_17, 0),
- MESON_PIN(BOOT_18, 0),
-
- MESON_PIN(DIF_0_P, 0),
- MESON_PIN(DIF_0_N, 0),
- MESON_PIN(DIF_1_P, 0),
- MESON_PIN(DIF_1_N, 0),
- MESON_PIN(DIF_2_P, 0),
- MESON_PIN(DIF_2_N, 0),
- MESON_PIN(DIF_3_P, 0),
- MESON_PIN(DIF_3_N, 0),
- MESON_PIN(DIF_4_P, 0),
- MESON_PIN(DIF_4_N, 0),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+ MESON_PIN(BOOT_18),
+
+ MESON_PIN(DIF_0_P),
+ MESON_PIN(DIF_0_N),
+ MESON_PIN(DIF_1_P),
+ MESON_PIN(DIF_1_N),
+ MESON_PIN(DIF_2_P),
+ MESON_PIN(DIF_2_N),
+ MESON_PIN(DIF_3_P),
+ MESON_PIN(DIF_3_N),
+ MESON_PIN(DIF_4_P),
+ MESON_PIN(DIF_4_N),
};
static const struct pinctrl_pin_desc meson8b_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, AO_OFF),
- MESON_PIN(GPIOAO_1, AO_OFF),
- MESON_PIN(GPIOAO_2, AO_OFF),
- MESON_PIN(GPIOAO_3, AO_OFF),
- MESON_PIN(GPIOAO_4, AO_OFF),
- MESON_PIN(GPIOAO_5, AO_OFF),
- MESON_PIN(GPIOAO_6, AO_OFF),
- MESON_PIN(GPIOAO_7, AO_OFF),
- MESON_PIN(GPIOAO_8, AO_OFF),
- MESON_PIN(GPIOAO_9, AO_OFF),
- MESON_PIN(GPIOAO_10, AO_OFF),
- MESON_PIN(GPIOAO_11, AO_OFF),
- MESON_PIN(GPIOAO_12, AO_OFF),
- MESON_PIN(GPIOAO_13, AO_OFF),
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
/*
* The following 2 pins are not mentionned in the public datasheet
* According to this datasheet, they can't be used with the gpio
* interrupt controller
*/
- MESON_PIN(GPIO_BSD_EN, AO_OFF),
- MESON_PIN(GPIO_TEST_N, AO_OFF),
+ MESON_PIN(GPIO_BSD_EN),
+ MESON_PIN(GPIO_TEST_N),
};
/* bank X */
-static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
-static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
-static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
-static const unsigned int sdxc_d0_0_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
- PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_d13_0_a_pins[] = { PIN(GPIOX_5, 0), PIN(GPIOX_6, 0),
- PIN(GPIOX_7, 0) };
-static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
-static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int sdxc_d0_1_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sdxc_d13_1_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
- PIN(GPIOX_3, 0) };
-static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int pwm_vs_0_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_vs_1_pins[] = { PIN(GPIOX_11, 0) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int uart_tx_b1_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int uart_rx_b1_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int uart_cts_b1_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int uart_rts_b1_pins[] = { PIN(GPIOX_20, 0) };
-
-static const unsigned int iso7816_0_clk_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int iso7816_0_data_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int spi_miso_0_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int iso7816_1_clk_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int iso7816_1_data_pins[] = { PIN(GPIOX_19, 0) };
-static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOX_20, 0) };
-
-static const unsigned int tsin_clk_b_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int tsin_sop_b_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int tsin_d0_b_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_b_pins[] = { PIN(GPIOX_11, 0) };
-static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int tsin_d_valid_b_pins[] = { PIN(GPIOX_20, 0) };
+static const unsigned int sd_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sd_d1_a_pins[] = { GPIOX_1 };
+static const unsigned int sd_d2_a_pins[] = { GPIOX_2 };
+static const unsigned int sd_d3_a_pins[] = { GPIOX_3 };
+static const unsigned int sdxc_d0_0_a_pins[] = { GPIOX_4 };
+static const unsigned int sdxc_d47_a_pins[] = { GPIOX_4, GPIOX_5,
+ GPIOX_6, GPIOX_7 };
+static const unsigned int sdxc_d13_0_a_pins[] = { GPIOX_5, GPIOX_6,
+ GPIOX_7 };
+static const unsigned int sd_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sd_cmd_a_pins[] = { GPIOX_9 };
+static const unsigned int xtal_32k_out_pins[] = { GPIOX_10 };
+static const unsigned int xtal_24m_out_pins[] = { GPIOX_11 };
+static const unsigned int uart_tx_b0_pins[] = { GPIOX_16 };
+static const unsigned int uart_rx_b0_pins[] = { GPIOX_17 };
+static const unsigned int uart_cts_b0_pins[] = { GPIOX_18 };
+static const unsigned int uart_rts_b0_pins[] = { GPIOX_19 };
+
+static const unsigned int sdxc_d0_1_a_pins[] = { GPIOX_0 };
+static const unsigned int sdxc_d13_1_a_pins[] = { GPIOX_1, GPIOX_2,
+ GPIOX_3 };
+static const unsigned int pcm_out_a_pins[] = { GPIOX_4 };
+static const unsigned int pcm_in_a_pins[] = { GPIOX_5 };
+static const unsigned int pcm_fs_a_pins[] = { GPIOX_6 };
+static const unsigned int pcm_clk_a_pins[] = { GPIOX_7 };
+static const unsigned int sdxc_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sdxc_cmd_a_pins[] = { GPIOX_9 };
+static const unsigned int pwm_vs_0_pins[] = { GPIOX_10 };
+static const unsigned int pwm_e_pins[] = { GPIOX_10 };
+static const unsigned int pwm_vs_1_pins[] = { GPIOX_11 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_4 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_5 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_6 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_7 };
+static const unsigned int uart_tx_b1_pins[] = { GPIOX_8 };
+static const unsigned int uart_rx_b1_pins[] = { GPIOX_9 };
+static const unsigned int uart_cts_b1_pins[] = { GPIOX_10 };
+static const unsigned int uart_rts_b1_pins[] = { GPIOX_20 };
+
+static const unsigned int iso7816_0_clk_pins[] = { GPIOX_6 };
+static const unsigned int iso7816_0_data_pins[] = { GPIOX_7 };
+static const unsigned int spi_sclk_0_pins[] = { GPIOX_8 };
+static const unsigned int spi_miso_0_pins[] = { GPIOX_9 };
+static const unsigned int spi_mosi_0_pins[] = { GPIOX_10 };
+static const unsigned int iso7816_det_pins[] = { GPIOX_16 };
+static const unsigned int iso7816_reset_pins[] = { GPIOX_17 };
+static const unsigned int iso7816_1_clk_pins[] = { GPIOX_18 };
+static const unsigned int iso7816_1_data_pins[] = { GPIOX_19 };
+static const unsigned int spi_ss0_0_pins[] = { GPIOX_20 };
+
+static const unsigned int tsin_clk_b_pins[] = { GPIOX_8 };
+static const unsigned int tsin_sop_b_pins[] = { GPIOX_9 };
+static const unsigned int tsin_d0_b_pins[] = { GPIOX_10 };
+static const unsigned int pwm_b_pins[] = { GPIOX_11 };
+static const unsigned int i2c_sda_d0_pins[] = { GPIOX_16 };
+static const unsigned int i2c_sck_d0_pins[] = { GPIOX_17 };
+static const unsigned int tsin_d_valid_b_pins[] = { GPIOX_20 };
/* bank Y */
-static const unsigned int tsin_d_valid_a_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int tsin_sop_a_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int tsin_d17_a_pins[] = { PIN(GPIOY_6, 0), PIN(GPIOY_7, 0),
- PIN(GPIOY_10, 0), PIN(GPIOY_11, 0),
- PIN(GPIOY_12, 0), PIN(GPIOY_13, 0),
- PIN(GPIOY_14, 0) };
-static const unsigned int tsin_clk_a_pins[] = { PIN(GPIOY_8, 0) };
-static const unsigned int tsin_d0_a_pins[] = { PIN(GPIOY_9, 0) };
+static const unsigned int tsin_d_valid_a_pins[] = { GPIOY_0 };
+static const unsigned int tsin_sop_a_pins[] = { GPIOY_1 };
+static const unsigned int tsin_d17_a_pins[] = {
+ GPIOY_6, GPIOY_7, GPIOY_10, GPIOY_11, GPIOY_12, GPIOY_13, GPIOY_14,
+};
+static const unsigned int tsin_clk_a_pins[] = { GPIOY_8 };
+static const unsigned int tsin_d0_a_pins[] = { GPIOY_9 };
-static const unsigned int spdif_out_0_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int spdif_out_0_pins[] = { GPIOY_3 };
-static const unsigned int xtal_24m_pins[] = { PIN(GPIOY_3, 0) };
-static const unsigned int iso7816_2_clk_pins[] = { PIN(GPIOY_13, 0) };
-static const unsigned int iso7816_2_data_pins[] = { PIN(GPIOY_14, 0) };
+static const unsigned int xtal_24m_pins[] = { GPIOY_3 };
+static const unsigned int iso7816_2_clk_pins[] = { GPIOY_13 };
+static const unsigned int iso7816_2_data_pins[] = { GPIOY_14 };
/* bank DV */
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int pwm_c0_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+static const unsigned int pwm_c0_pins[] = { GPIODV_29 };
-static const unsigned int pwm_vs_2_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int pwm_vs_3_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int pwm_vs_4_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int pwm_vs_2_pins[] = { GPIODV_9 };
+static const unsigned int pwm_vs_3_pins[] = { GPIODV_28 };
+static const unsigned int pwm_vs_4_pins[] = { GPIODV_29 };
-static const unsigned int xtal24_out_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int xtal24_out_pins[] = { GPIODV_29 };
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIODV_27, 0) };
+static const unsigned int uart_tx_c_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_c_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_c_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_c_pins[] = { GPIODV_27 };
-static const unsigned int pwm_c1_pins[] = { PIN(GPIODV_9, 0) };
+static const unsigned int pwm_c1_pins[] = { GPIODV_9 };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int i2c_sda_b0_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int i2c_sck_b0_pins[] = { PIN(GPIODV_27, 0) };
-static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_b0_pins[] = { GPIODV_26 };
+static const unsigned int i2c_sck_b0_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_c0_pins[] = { GPIODV_28 };
+static const unsigned int i2c_sck_c0_pins[] = { GPIODV_29 };
/* bank H */
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
-static const unsigned int hdmi_cec_0_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int eth_txd1_0_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int eth_txd0_0_pins[] = { PIN(GPIOH_6, 0) };
-static const unsigned int clk_24m_out_pins[] = { PIN(GPIOH_9, 0) };
-
-static const unsigned int spi_ss1_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int spi_ss2_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int spi_miso_1_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOH_6, 0) };
-
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOH_8, 0) };
-static const unsigned int eth_tx_clk_pins[] = { PIN(GPIOH_9, 0) };
-
-static const unsigned int i2c_sda_b1_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int i2c_sck_b1_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOH_6, 0) };
-static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int hdmi_cec_0_pins[] = { GPIOH_3 };
+static const unsigned int eth_txd1_0_pins[] = { GPIOH_5 };
+static const unsigned int eth_txd0_0_pins[] = { GPIOH_6 };
+static const unsigned int clk_24m_out_pins[] = { GPIOH_9 };
+
+static const unsigned int spi_ss1_pins[] = { GPIOH_0 };
+static const unsigned int spi_ss2_pins[] = { GPIOH_1 };
+static const unsigned int spi_ss0_1_pins[] = { GPIOH_3 };
+static const unsigned int spi_miso_1_pins[] = { GPIOH_4 };
+static const unsigned int spi_mosi_1_pins[] = { GPIOH_5 };
+static const unsigned int spi_sclk_1_pins[] = { GPIOH_6 };
+
+static const unsigned int eth_txd3_pins[] = { GPIOH_7 };
+static const unsigned int eth_txd2_pins[] = { GPIOH_8 };
+static const unsigned int eth_tx_clk_pins[] = { GPIOH_9 };
+
+static const unsigned int i2c_sda_b1_pins[] = { GPIOH_3 };
+static const unsigned int i2c_sck_b1_pins[] = { GPIOH_4 };
+static const unsigned int i2c_sda_c1_pins[] = { GPIOH_5 };
+static const unsigned int i2c_sck_c1_pins[] = { GPIOH_6 };
+static const unsigned int i2c_sda_d1_pins[] = { GPIOH_7 };
+static const unsigned int i2c_sck_d1_pins[] = { GPIOH_8 };
/* bank BOOT */
-static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
- PIN(BOOT_2, 0), PIN(BOOT_3, 0),
- PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
-static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
-static const unsigned int nand_dqs_15_pins[] = { PIN(BOOT_15, 0) };
-static const unsigned int nand_dqs_18_pins[] = { PIN(BOOT_18, 0) };
-
-static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
-static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
- PIN(BOOT_3, 0) };
-static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
-
-static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
-static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
-static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
-static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
-static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_10, 0) };
+static const unsigned int nand_io_pins[] = {
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7
+};
+static const unsigned int nand_io_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_io_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_io_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_clk_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_15_pins[] = { BOOT_15 };
+static const unsigned int nand_dqs_18_pins[] = { BOOT_18 };
+
+static const unsigned int sdxc_d0_c_pins[] = { BOOT_0};
+static const unsigned int sdxc_d13_c_pins[] = { BOOT_1, BOOT_2,
+ BOOT_3 };
+static const unsigned int sdxc_d47_c_pins[] = { BOOT_4, BOOT_5,
+ BOOT_6, BOOT_7 };
+static const unsigned int sdxc_clk_c_pins[] = { BOOT_8 };
+static const unsigned int sdxc_cmd_c_pins[] = { BOOT_10 };
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_18 };
+
+static const unsigned int sd_d0_c_pins[] = { BOOT_0 };
+static const unsigned int sd_d1_c_pins[] = { BOOT_1 };
+static const unsigned int sd_d2_c_pins[] = { BOOT_2 };
+static const unsigned int sd_d3_c_pins[] = { BOOT_3 };
+static const unsigned int sd_cmd_c_pins[] = { BOOT_8 };
+static const unsigned int sd_clk_c_pins[] = { BOOT_10 };
/* bank CARD */
-static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
-static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
-static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
-static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
-
-static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
- PIN(CARD_5, 0) };
-static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d1_b_pins[] = { CARD_0 };
+static const unsigned int sd_d0_b_pins[] = { CARD_1 };
+static const unsigned int sd_clk_b_pins[] = { CARD_2 };
+static const unsigned int sd_cmd_b_pins[] = { CARD_3 };
+static const unsigned int sd_d3_b_pins[] = { CARD_4 };
+static const unsigned int sd_d2_b_pins[] = { CARD_5 };
+
+static const unsigned int sdxc_d13_b_pins[] = { CARD_0, CARD_4,
+ CARD_5 };
+static const unsigned int sdxc_d0_b_pins[] = { CARD_1 };
+static const unsigned int sdxc_clk_b_pins[] = { CARD_2 };
+static const unsigned int sdxc_cmd_b_pins[] = { CARD_3 };
/* bank AO */
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int clk_32k_in_out_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int hdmi_cec_1_pins[] = { PIN(GPIOAO_12, AO_OFF) };
-static const unsigned int ir_blaster_pins[] = { PIN(GPIOAO_13, AO_OFF) };
-
-static const unsigned int pwm_c2_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int i2c_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int ir_remote_out_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int i2s_am_clk_out_pins[] = { PIN(GPIOAO_8, AO_OFF) };
-static const unsigned int i2s_ao_clk_out_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_out_pins[] = { PIN(GPIOAO_10, AO_OFF) };
-static const unsigned int i2s_out_01_pins[] = { PIN(GPIOAO_11, AO_OFF) };
-
-static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int spdif_out_1_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-
-static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOAO_10, AO_OFF) };
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int i2c_mst_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_mst_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int clk_32k_in_out_pins[] = { GPIOAO_6 };
+static const unsigned int remote_input_pins[] = { GPIOAO_7 };
+static const unsigned int hdmi_cec_1_pins[] = { GPIOAO_12 };
+static const unsigned int ir_blaster_pins[] = { GPIOAO_13 };
+
+static const unsigned int pwm_c2_pins[] = { GPIOAO_3 };
+static const unsigned int i2c_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int ir_remote_out_pins[] = { GPIOAO_7 };
+static const unsigned int i2s_am_clk_out_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_ao_clk_out_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_out_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_01_pins[] = { GPIOAO_11 };
+
+static const unsigned int uart_tx_ao_b0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b0_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b1_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b1_pins[] = { GPIOAO_5 };
+static const unsigned int spdif_out_1_pins[] = { GPIOAO_6 };
+
+static const unsigned int i2s_in_ch01_pins[] = { GPIOAO_6 };
+static const unsigned int i2s_ao_clk_in_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_in_pins[] = { GPIOAO_10 };
/* bank DIF */
-static const unsigned int eth_rxd1_pins[] = { PIN(DIF_0_P, 0) };
-static const unsigned int eth_rxd0_pins[] = { PIN(DIF_0_N, 0) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(DIF_1_P, 0) };
-static const unsigned int eth_rx_clk_pins[] = { PIN(DIF_1_N, 0) };
-static const unsigned int eth_txd0_1_pins[] = { PIN(DIF_2_P, 0) };
-static const unsigned int eth_txd1_1_pins[] = { PIN(DIF_2_N, 0) };
-static const unsigned int eth_tx_en_pins[] = { PIN(DIF_3_P, 0) };
-static const unsigned int eth_ref_clk_pins[] = { PIN(DIF_3_N, 0) };
-static const unsigned int eth_mdc_pins[] = { PIN(DIF_4_P, 0) };
-static const unsigned int eth_mdio_en_pins[] = { PIN(DIF_4_N, 0) };
+static const unsigned int eth_rxd1_pins[] = { DIF_0_P };
+static const unsigned int eth_rxd0_pins[] = { DIF_0_N };
+static const unsigned int eth_rx_dv_pins[] = { DIF_1_P };
+static const unsigned int eth_rx_clk_pins[] = { DIF_1_N };
+static const unsigned int eth_txd0_1_pins[] = { DIF_2_P };
+static const unsigned int eth_txd1_1_pins[] = { DIF_2_N };
+static const unsigned int eth_tx_en_pins[] = { DIF_3_P };
+static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
+static const unsigned int eth_mdc_pins[] = { DIF_4_P };
+static const unsigned int eth_mdio_en_pins[] = { DIF_4_N };
static struct meson_pmx_group meson8b_cbus_groups[] = {
- GPIO_GROUP(GPIOX_0, 0),
- GPIO_GROUP(GPIOX_1, 0),
- GPIO_GROUP(GPIOX_2, 0),
- GPIO_GROUP(GPIOX_3, 0),
- GPIO_GROUP(GPIOX_4, 0),
- GPIO_GROUP(GPIOX_5, 0),
- GPIO_GROUP(GPIOX_6, 0),
- GPIO_GROUP(GPIOX_7, 0),
- GPIO_GROUP(GPIOX_8, 0),
- GPIO_GROUP(GPIOX_9, 0),
- GPIO_GROUP(GPIOX_10, 0),
- GPIO_GROUP(GPIOX_11, 0),
- GPIO_GROUP(GPIOX_16, 0),
- GPIO_GROUP(GPIOX_17, 0),
- GPIO_GROUP(GPIOX_18, 0),
- GPIO_GROUP(GPIOX_19, 0),
- GPIO_GROUP(GPIOX_20, 0),
- GPIO_GROUP(GPIOX_21, 0),
-
- GPIO_GROUP(GPIOY_0, 0),
- GPIO_GROUP(GPIOY_1, 0),
- GPIO_GROUP(GPIOY_3, 0),
- GPIO_GROUP(GPIOY_6, 0),
- GPIO_GROUP(GPIOY_7, 0),
- GPIO_GROUP(GPIOY_8, 0),
- GPIO_GROUP(GPIOY_9, 0),
- GPIO_GROUP(GPIOY_10, 0),
- GPIO_GROUP(GPIOY_11, 0),
- GPIO_GROUP(GPIOY_12, 0),
- GPIO_GROUP(GPIOY_13, 0),
- GPIO_GROUP(GPIOY_14, 0),
-
- GPIO_GROUP(GPIODV_9, 0),
- GPIO_GROUP(GPIODV_24, 0),
- GPIO_GROUP(GPIODV_25, 0),
- GPIO_GROUP(GPIODV_26, 0),
- GPIO_GROUP(GPIODV_27, 0),
- GPIO_GROUP(GPIODV_28, 0),
- GPIO_GROUP(GPIODV_29, 0),
-
- GPIO_GROUP(GPIOH_0, 0),
- GPIO_GROUP(GPIOH_1, 0),
- GPIO_GROUP(GPIOH_2, 0),
- GPIO_GROUP(GPIOH_3, 0),
- GPIO_GROUP(GPIOH_4, 0),
- GPIO_GROUP(GPIOH_5, 0),
- GPIO_GROUP(GPIOH_6, 0),
- GPIO_GROUP(GPIOH_7, 0),
- GPIO_GROUP(GPIOH_8, 0),
- GPIO_GROUP(GPIOH_9, 0),
-
- GPIO_GROUP(DIF_0_P, 0),
- GPIO_GROUP(DIF_0_N, 0),
- GPIO_GROUP(DIF_1_P, 0),
- GPIO_GROUP(DIF_1_N, 0),
- GPIO_GROUP(DIF_2_P, 0),
- GPIO_GROUP(DIF_2_N, 0),
- GPIO_GROUP(DIF_3_P, 0),
- GPIO_GROUP(DIF_3_N, 0),
- GPIO_GROUP(DIF_4_P, 0),
- GPIO_GROUP(DIF_4_N, 0),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+
+ GPIO_GROUP(DIF_0_P),
+ GPIO_GROUP(DIF_0_N),
+ GPIO_GROUP(DIF_1_P),
+ GPIO_GROUP(DIF_1_N),
+ GPIO_GROUP(DIF_2_P),
+ GPIO_GROUP(DIF_2_N),
+ GPIO_GROUP(DIF_3_P),
+ GPIO_GROUP(DIF_3_N),
+ GPIO_GROUP(DIF_4_P),
+ GPIO_GROUP(DIF_4_N),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -577,22 +574,22 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
};
static struct meson_pmx_group meson8b_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
+ GPIO_GROUP(GPIO_BSD_EN),
+ GPIO_GROUP(GPIO_TEST_N),
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
@@ -887,30 +884,29 @@ static struct meson_pmx_func meson8b_aobus_functions[] = {
};
static struct meson_bank meson8b_cbus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 97, 118, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 80, 96, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_9, 0), PIN(GPIODV_29, 0), 59, 79, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 14, 23, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 43, 49, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 24, 42, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_21, 97, 118, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", GPIOY_0, GPIOY_14, 80, 96, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_9, GPIODV_29, 59, 79, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 14, 23, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("CARD", CARD_0, CARD_6, 43, 49, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", BOOT_0, BOOT_18, 24, 42, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
/*
* The following bank is not mentionned in the public datasheet
* There is no information whether it can be used with the gpio
* interrupt controller
*/
- BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
+ BANK("DIF", DIF_0_P, DIF_4_N, -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
static struct meson_bank meson8b_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first lastc irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
+static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.name = "cbus-banks",
- .pin_base = 0,
.pins = meson8b_cbus_pins,
.groups = meson8b_cbus_groups,
.funcs = meson8b_cbus_functions,
@@ -919,11 +915,11 @@ struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8b_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8b_cbus_functions),
.num_banks = ARRAY_SIZE(meson8b_cbus_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 130,
.pins = meson8b_aobus_pins,
.groups = meson8b_aobus_groups,
.funcs = meson8b_aobus_functions,
@@ -932,4 +928,26 @@ struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8b_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson8b_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson8b-cbus-pinctrl",
+ .data = &meson8b_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8b-aobus-pinctrl",
+ .data = &meson8b_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson8b_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson8b-pinctrl",
+ .of_match_table = meson8b_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson8b_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 71b944748304..d45af31b86b4 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -576,6 +576,19 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
val |= (BIT(d->hwirq % GPIO_PER_REG));
break;
+ case IRQ_TYPE_EDGE_BOTH: {
+ u32 in_val, in_reg = INPUT_VAL;
+
+ armada_37xx_irq_update_reg(&in_reg, d);
+ regmap_read(info->regmap, in_reg, &in_val);
+
+ /* Set initial polarity based on current input level. */
+ if (in_val & d->mask)
+ val |= d->mask; /* falling */
+ else
+ val &= ~d->mask; /* rising */
+ break;
+ }
default:
spin_unlock_irqrestore(&info->irq_lock, flags);
return -EINVAL;
@@ -586,13 +599,47 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
+ u32 pin_idx)
+{
+ u32 reg_idx = pin_idx / GPIO_PER_REG;
+ u32 bit_num = pin_idx % GPIO_PER_REG;
+ u32 p, l, ret;
+ unsigned long flags;
+
+ regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
+
+ spin_lock_irqsave(&info->irq_lock, flags);
+ p = readl(info->base + IRQ_POL + 4 * reg_idx);
+ if ((p ^ l) & (1 << bit_num)) {
+ /*
+ * For the gpios which are used for both-edge irqs, when their
+ * interrupts happen, their input levels are changed,
+ * yet their interrupt polarities are kept in old values, we
+ * should synchronize their interrupt polarities; for example,
+ * at first a gpio's input level is low and its interrupt
+ * polarity control is "Detect rising edge", then the gpio has
+ * a interrupt , its level turns to high, we should change its
+ * polarity control to "Detect falling edge" correspondingly.
+ */
+ p ^= 1 << bit_num;
+ writel(p, info->base + IRQ_POL + 4 * reg_idx);
+ ret = 0;
+ } else {
+ /* Spurious irq */
+ ret = -1;
+ }
+
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+ return ret;
+}
static void armada_37xx_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct armada_37xx_pinctrl *info = gpiochip_get_data(gc);
- struct irq_domain *d = gc->irqdomain;
+ struct irq_domain *d = gc->irq.domain;
int i;
chained_irq_enter(chip, desc);
@@ -609,6 +656,23 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
u32 hwirq = ffs(status) - 1;
u32 virq = irq_find_mapping(d, hwirq +
i * GPIO_PER_REG);
+ u32 t = irq_get_trigger_type(virq);
+
+ if ((t & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ /* Swap polarity (race with GPIO line) */
+ if (armada_37xx_edge_both_irq_swap_pol(info,
+ hwirq + i * GPIO_PER_REG)) {
+ /*
+ * For spurious irq, which gpio level
+ * is not as expected after incoming
+ * edge, just ack the gpio irq.
+ */
+ writel(1 << hwirq,
+ info->base +
+ IRQ_STATUS + 4 * i);
+ continue;
+ }
+ }
generic_handle_irq(virq);
@@ -626,15 +690,13 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
static unsigned int armada_37xx_irq_startup(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- int irq = d->hwirq - chip->irq_base;
/*
* The mask field is a "precomputed bitmask for accessing the
* chip registers" which was introduced for the generic
* irqchip framework. As we don't use this framework, we can
* reuse this field for our own usage.
*/
- d->mask = BIT(irq % GPIO_PER_REG);
+ d->mask = BIT(d->hwirq % GPIO_PER_REG);
armada_37xx_irq_unmask(d);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index a53f1a9b1ed2..f0e7a8c114b2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -413,7 +413,7 @@ nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
u32 falling = nmk_chip->fimsc & BIT(offset);
u32 rising = nmk_chip->rimsc & BIT(offset);
int gpio = nmk_chip->chip.base + offset;
- int irq = irq_find_mapping(nmk_chip->chip.irqdomain, offset);
+ int irq = irq_find_mapping(nmk_chip->chip.irq.domain, offset);
struct irq_data *d = irq_get_irq_data(irq);
if (!rising && !falling)
@@ -815,7 +815,7 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
while (status) {
int bit = __ffs(status);
- generic_handle_irq(irq_find_mapping(chip->irqdomain, bit));
+ generic_handle_irq(irq_find_mapping(chip->irq.domain, bit));
status &= ~BIT(bit);
}
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 8eaa25c3384f..b4f7f8a458ea 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -49,6 +49,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEEP_HARDWARE_STATE, "sleep hardware state", NULL, false),
PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
+ PCONFDUMP(PIN_CONFIG_SKEW_DELAY, "skew delay", NULL, true),
};
static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
@@ -181,6 +182,7 @@ static const struct pinconf_generic_params dt_params[] = {
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
{ "sleep-hardware-state", PIN_CONFIG_SLEEP_HARDWARE_STATE, 0 },
{ "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
+ { "skew-delay", PIN_CONFIG_SKEW_DELAY, 0 },
};
/**
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 433af328d981..61d830c2bc17 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -532,7 +532,7 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
regval = readl(regs + i);
if (!(regval & PIN_IRQ_PENDING))
continue;
- irq = irq_find_mapping(gc->irqdomain, irqnr + i);
+ irq = irq_find_mapping(gc->irq.domain, irqnr + i);
generic_handle_irq(irq);
/* Clear interrupt.
@@ -753,7 +753,7 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
return false;
}
-int amd_gpio_suspend(struct device *dev)
+static int amd_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
@@ -772,7 +772,7 @@ int amd_gpio_suspend(struct device *dev)
return 0;
}
-int amd_gpio_resume(struct device *dev)
+static int amd_gpio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 569bc28cb909..03492e3c09fa 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1603,7 +1603,7 @@ static void gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(n, &isr, BITS_PER_LONG) {
generic_handle_irq(irq_find_mapping(
- gpio_chip->irqdomain, n));
+ gpio_chip->irq.domain, n));
}
}
chained_irq_exit(chip, desc);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index ac155e7d3412..7939b178c6ae 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -517,7 +517,7 @@ static void u300_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
int offset = pinoffset + irqoffset;
- int pin_irq = irq_find_mapping(chip->irqdomain, offset);
+ int pin_irq = irq_find_mapping(chip->irq.domain, offset);
dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
pin_irq, offset);
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 39e6221e7100..e9b83e291edf 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -13,6 +13,8 @@
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -22,6 +24,19 @@
#define DRIVER_NAME "pinctrl-gemini"
/**
+ * struct gemini_pin_conf - information about configuring a pin
+ * @pin: the pin number
+ * @reg: config register
+ * @mask: the bits affecting the configuration of the pin
+ */
+struct gemini_pin_conf {
+ unsigned int pin;
+ u32 reg;
+ u32 mask;
+};
+
+/**
+ * struct gemini_pmx - state holder for the gemini pin controller
* @dev: a pointer back to containing device
* @virtbase: the offset to the controller in virtual memory
* @map: regmap to access registers
@@ -29,6 +44,8 @@
* @is_3516: whether the SoC/package is the 3516 variant
* @flash_pin: whether the flash pin (extended pins for parallel
* flash) is set
+ * @confs: pin config information
+ * @nconfs: number of pin config information items
*/
struct gemini_pmx {
struct device *dev;
@@ -37,6 +54,8 @@ struct gemini_pmx {
bool is_3512;
bool is_3516;
bool flash_pin;
+ const struct gemini_pin_conf *confs;
+ unsigned int nconfs;
};
/**
@@ -57,6 +76,13 @@ struct gemini_pin_group {
u32 value;
};
+/* Some straight-forward control registers */
+#define GLOBAL_WORD_ID 0x00
+#define GLOBAL_STATUS 0x04
+#define GLOBAL_STATUS_FLPIN BIT(20)
+#define GLOBAL_GMAC_CTRL_SKEW 0x1c
+#define GLOBAL_GMAC0_DATA_SKEW 0x20
+#define GLOBAL_GMAC1_DATA_SKEW 0x24
/*
* Global Miscellaneous Control Register
* This register controls all Gemini pad/pin multiplexing
@@ -69,10 +95,14 @@ struct gemini_pin_group {
* DISABLED again. So you select a flash configuration once, and then
* you are stuck with it.
*/
-#define GLOBAL_WORD_ID 0x00
-#define GLOBAL_STATUS 0x04
-#define GLOBAL_STATUS_FLPIN BIT(20)
#define GLOBAL_MISC_CTRL 0x30
+#define GEMINI_GMAC_IOSEL_MASK GENMASK(28, 27)
+/* Not really used */
+#define GEMINI_GMAC_IOSEL_GMAC0_GMII BIT(28)
+/* Activated with GMAC1 */
+#define GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII BIT(27)
+/* This will be the default */
+#define GEMINI_GMAC_IOSEL_GMAC0_RGMII_GMAC1_GPIO2 0
#define TVC_CLK_PAD_ENABLE BIT(20)
#define PCI_CLK_PAD_ENABLE BIT(17)
#define LPC_CLK_PAD_ENABLE BIT(16)
@@ -86,8 +116,8 @@ struct gemini_pin_group {
#define NAND_PADS_DISABLE BIT(2)
#define PFLASH_PADS_DISABLE BIT(1)
#define SFLASH_PADS_DISABLE BIT(0)
-#define PADS_MASK (GENMASK(9, 0) | BIT(16) | BIT(17) | BIT(20))
-#define PADS_MAXBIT 20
+#define PADS_MASK (GENMASK(9, 0) | BIT(16) | BIT(17) | BIT(20) | BIT(27))
+#define PADS_MAXBIT 27
/* Ordered by bit index */
static const char * const gemini_padgroups[] = {
@@ -106,6 +136,8 @@ static const char * const gemini_padgroups[] = {
"PCI CLK",
NULL, NULL,
"TVC CLK",
+ NULL, NULL, NULL, NULL, NULL,
+ "GMAC1",
};
static const struct pinctrl_pin_desc gemini_3512_pins[] = {
@@ -493,9 +525,12 @@ static const unsigned int usb_3512_pins[] = {
};
/* GMII, ethernet pins */
-static const unsigned int gmii_3512_pins[] = {
- 311, 240, 258, 276, 294, 312, 241, 259, 277, 295, 313, 242, 260, 278, 296,
- 315, 297, 279, 261, 243, 316, 298, 280, 262, 244, 317, 299, 281
+static const unsigned int gmii_gmac0_3512_pins[] = {
+ 240, 241, 242, 258, 259, 260, 276, 277, 278, 294, 295, 311, 312, 313
+};
+
+static const unsigned int gmii_gmac1_3512_pins[] = {
+ 243, 244, 261, 262, 279, 280, 281, 296, 297, 298, 299, 315, 316, 317
};
static const unsigned int pci_3512_pins[] = {
@@ -645,10 +680,10 @@ static const unsigned int gpio1c_3512_pins[] = {
/* The GPIO1D (28-31) pins overlap with LCD and TVC */
static const unsigned int gpio1d_3512_pins[] = { 246, 319, 301, 283 };
-/* The GPIO2A (0-3) pins overlap with GMII and extended parallel flash */
+/* The GPIO2A (0-3) pins overlap with GMII GMAC1 and extended parallel flash */
static const unsigned int gpio2a_3512_pins[] = { 315, 297, 279, 261 };
-/* The GPIO2B (4-7) pins overlap with GMII, extended parallel flash and LCD */
+/* The GPIO2B (4-7) pins overlap with GMII GMAC1, extended parallel flash and LCD */
static const unsigned int gpio2b_3512_pins[] = { 262, 244, 317, 299 };
/* The GPIO2C (8-31) pins overlap with PCI */
@@ -715,9 +750,16 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.num_pins = ARRAY_SIZE(usb_3512_pins),
},
{
- .name = "gmiigrp",
- .pins = gmii_3512_pins,
- .num_pins = ARRAY_SIZE(gmii_3512_pins),
+ .name = "gmii_gmac0_grp",
+ .pins = gmii_gmac0_3512_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac0_3512_pins),
+ },
+ {
+ .name = "gmii_gmac1_grp",
+ .pins = gmii_gmac1_3512_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac1_3512_pins),
+ /* Bring out RGMII on the GMAC1 pins */
+ .value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "pcigrp",
@@ -931,14 +973,15 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.name = "gpio2agrp",
.pins = gpio2a_3512_pins,
.num_pins = ARRAY_SIZE(gpio2a_3512_pins),
- /* Conflict with GMII and extended parallel flash */
+ .mask = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
+ /* Conflict with GMII GMAC1 and extended parallel flash */
},
{
.name = "gpio2bgrp",
.pins = gpio2b_3512_pins,
.num_pins = ARRAY_SIZE(gpio2b_3512_pins),
- /* Conflict with GMII, extended parallel flash and LCD */
- .mask = LCD_PADS_ENABLE,
+ /* Conflict with GMII GMAC1, extended parallel flash and LCD */
+ .mask = LCD_PADS_ENABLE | GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "gpio2cgrp",
@@ -1418,9 +1461,12 @@ static const unsigned int usb_3516_pins[] = {
};
/* GMII, ethernet pins */
-static const unsigned int gmii_3516_pins[] = {
- 306, 307, 308, 309, 310, 325, 326, 327, 328, 329, 330, 345, 346, 347,
- 348, 349, 350, 351, 367, 368, 369, 370, 371, 386, 387, 389, 390, 391
+static const unsigned int gmii_gmac0_3516_pins[] = {
+ 306, 307, 325, 326, 327, 328, 345, 346, 347, 348, 367, 368, 386, 387
+};
+
+static const unsigned int gmii_gmac1_3516_pins[] = {
+ 308, 309, 310, 329, 330, 349, 350, 351, 369, 370, 371, 389, 390, 391
};
static const unsigned int pci_3516_pins[] = {
@@ -1562,10 +1608,10 @@ static const unsigned int gpio1c_3516_pins[] = {
/* The GPIO1D (28-31) pins overlap with TVC */
static const unsigned int gpio1d_3516_pins[] = { 353, 311, 394, 374 };
-/* The GPIO2A (0-3) pins overlap with GMII and extended parallel flash */
+/* The GPIO2A (0-3) pins overlap with GMII GMAC1 and extended parallel flash */
static const unsigned int gpio2a_3516_pins[] = { 308, 369, 389, 329 };
-/* The GPIO2B (4-7) pins overlap with GMII, extended parallel flash and LCD */
+/* The GPIO2B (4-7) pins overlap with GMII GMAC1, extended parallel flash and LCD */
static const unsigned int gpio2b_3516_pins[] = { 391, 351, 310, 371 };
/* The GPIO2C (8-31) pins overlap with PCI */
@@ -1637,9 +1683,16 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.num_pins = ARRAY_SIZE(usb_3516_pins),
},
{
- .name = "gmiigrp",
- .pins = gmii_3516_pins,
- .num_pins = ARRAY_SIZE(gmii_3516_pins),
+ .name = "gmii_gmac0_grp",
+ .pins = gmii_gmac0_3516_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac0_3516_pins),
+ },
+ {
+ .name = "gmii_gmac1_grp",
+ .pins = gmii_gmac1_3516_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac1_3516_pins),
+ /* Bring out RGMII on the GMAC1 pins */
+ .value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "pcigrp",
@@ -1838,14 +1891,15 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.name = "gpio2agrp",
.pins = gpio2a_3516_pins,
.num_pins = ARRAY_SIZE(gpio2a_3516_pins),
- /* Conflict with GMII and extended parallel flash */
+ .mask = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
+ /* Conflict with GMII GMAC1 and extended parallel flash */
},
{
.name = "gpio2bgrp",
.pins = gpio2b_3516_pins,
.num_pins = ARRAY_SIZE(gpio2b_3516_pins),
- /* Conflict with GMII, extended parallel flash and LCD */
- .mask = LCD_PADS_ENABLE,
+ /* Conflict with GMII GMAC1, extended parallel flash and LCD */
+ .mask = LCD_PADS_ENABLE | GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "gpio2cgrp",
@@ -1918,73 +1972,13 @@ static void gemini_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, " " DRIVER_NAME);
}
-static int gemini_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **map,
- unsigned int *reserved_maps,
- unsigned int *num_maps)
-{
- int ret;
- const char *function = NULL;
- const char *group;
- struct property *prop;
-
- ret = of_property_read_string(np, "function", &function);
- if (ret < 0)
- return ret;
-
- ret = of_property_count_strings(np, "groups");
- if (ret < 0)
- return ret;
-
- ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
- num_maps, ret);
- if (ret < 0)
- return ret;
-
- of_property_for_each_string(np, "groups", prop, group) {
- ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps,
- num_maps, group, function);
- if (ret < 0)
- return ret;
- pr_debug("ADDED FUNCTION %s <-> GROUP %s\n",
- function, group);
- }
-
- return 0;
-}
-
-static int gemini_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- unsigned int *num_maps)
-{
- unsigned int reserved_maps = 0;
- struct device_node *np;
- int ret;
-
- *map = NULL;
- *num_maps = 0;
-
- for_each_child_of_node(np_config, np) {
- ret = gemini_pinctrl_dt_subnode_to_map(pctldev, np, map,
- &reserved_maps, num_maps);
- if (ret < 0) {
- pinctrl_utils_free_map(pctldev, *map, *num_maps);
- return ret;
- }
- }
-
- return 0;
-};
-
static const struct pinctrl_ops gemini_pctrl_ops = {
.get_groups_count = gemini_get_groups_count,
.get_group_name = gemini_get_group_name,
.get_group_pins = gemini_get_group_pins,
.pin_dbg_show = gemini_pin_dbg_show,
- .dt_node_to_map = gemini_pinctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
};
/**
@@ -2008,7 +2002,7 @@ static const char * const icegrps[] = { "icegrp" };
static const char * const idegrps[] = { "idegrp" };
static const char * const satagrps[] = { "satagrp" };
static const char * const usbgrps[] = { "usbgrp" };
-static const char * const gmiigrps[] = { "gmiigrp" };
+static const char * const gmiigrps[] = { "gmii_gmac0_grp", "gmii_gmac1_grp" };
static const char * const pcigrps[] = { "pcigrp" };
static const char * const lpcgrps[] = { "lpcgrp" };
static const char * const lcdgrps[] = { "lcdgrp" };
@@ -2074,6 +2068,16 @@ static const struct gemini_pmx_func gemini_pmx_functions[] = {
.num_groups = ARRAY_SIZE(satagrps),
},
{
+ .name = "usb",
+ .groups = usbgrps,
+ .num_groups = ARRAY_SIZE(usbgrps),
+ },
+ {
+ .name = "gmii",
+ .groups = gmiigrps,
+ .num_groups = ARRAY_SIZE(gmiigrps),
+ },
+ {
.name = "pci",
.groups = pcigrps,
.num_groups = ARRAY_SIZE(pcigrps),
@@ -2251,10 +2255,155 @@ static const struct pinmux_ops gemini_pmx_ops = {
.set_mux = gemini_pmx_set_mux,
};
+#define GEMINI_CFGPIN(_n, _r, _lb, _hb) { \
+ .pin = _n, \
+ .reg = _r, \
+ .mask = GENMASK(_hb, _lb) \
+}
+
+static const struct gemini_pin_conf gemini_confs_3512[] = {
+ GEMINI_CFGPIN(259, GLOBAL_GMAC_CTRL_SKEW, 0, 3), /* GMAC0 RXDV */
+ GEMINI_CFGPIN(277, GLOBAL_GMAC_CTRL_SKEW, 4, 7), /* GMAC0 RXC */
+ GEMINI_CFGPIN(241, GLOBAL_GMAC_CTRL_SKEW, 8, 11), /* GMAC0 TXEN */
+ GEMINI_CFGPIN(312, GLOBAL_GMAC_CTRL_SKEW, 12, 15), /* GMAC0 TXC */
+ GEMINI_CFGPIN(298, GLOBAL_GMAC_CTRL_SKEW, 16, 19), /* GMAC1 RXDV */
+ GEMINI_CFGPIN(280, GLOBAL_GMAC_CTRL_SKEW, 20, 23), /* GMAC1 RXC */
+ GEMINI_CFGPIN(316, GLOBAL_GMAC_CTRL_SKEW, 24, 27), /* GMAC1 TXEN */
+ GEMINI_CFGPIN(243, GLOBAL_GMAC_CTRL_SKEW, 28, 31), /* GMAC1 TXC */
+ GEMINI_CFGPIN(295, GLOBAL_GMAC0_DATA_SKEW, 0, 3), /* GMAC0 RXD0 */
+ GEMINI_CFGPIN(313, GLOBAL_GMAC0_DATA_SKEW, 4, 7), /* GMAC0 RXD1 */
+ GEMINI_CFGPIN(242, GLOBAL_GMAC0_DATA_SKEW, 8, 11), /* GMAC0 RXD2 */
+ GEMINI_CFGPIN(260, GLOBAL_GMAC0_DATA_SKEW, 12, 15), /* GMAC0 RXD3 */
+ GEMINI_CFGPIN(294, GLOBAL_GMAC0_DATA_SKEW, 16, 19), /* GMAC0 TXD0 */
+ GEMINI_CFGPIN(276, GLOBAL_GMAC0_DATA_SKEW, 20, 23), /* GMAC0 TXD1 */
+ GEMINI_CFGPIN(258, GLOBAL_GMAC0_DATA_SKEW, 24, 27), /* GMAC0 TXD2 */
+ GEMINI_CFGPIN(240, GLOBAL_GMAC0_DATA_SKEW, 28, 31), /* GMAC0 TXD3 */
+ GEMINI_CFGPIN(262, GLOBAL_GMAC1_DATA_SKEW, 0, 3), /* GMAC1 RXD0 */
+ GEMINI_CFGPIN(244, GLOBAL_GMAC1_DATA_SKEW, 4, 7), /* GMAC1 RXD1 */
+ GEMINI_CFGPIN(317, GLOBAL_GMAC1_DATA_SKEW, 8, 11), /* GMAC1 RXD2 */
+ GEMINI_CFGPIN(299, GLOBAL_GMAC1_DATA_SKEW, 12, 15), /* GMAC1 RXD3 */
+ GEMINI_CFGPIN(261, GLOBAL_GMAC1_DATA_SKEW, 16, 19), /* GMAC1 TXD0 */
+ GEMINI_CFGPIN(279, GLOBAL_GMAC1_DATA_SKEW, 20, 23), /* GMAC1 TXD1 */
+ GEMINI_CFGPIN(297, GLOBAL_GMAC1_DATA_SKEW, 24, 27), /* GMAC1 TXD2 */
+ GEMINI_CFGPIN(315, GLOBAL_GMAC1_DATA_SKEW, 28, 31), /* GMAC1 TXD3 */
+};
+
+static const struct gemini_pin_conf gemini_confs_3516[] = {
+ GEMINI_CFGPIN(347, GLOBAL_GMAC_CTRL_SKEW, 0, 3), /* GMAC0 RXDV */
+ GEMINI_CFGPIN(386, GLOBAL_GMAC_CTRL_SKEW, 4, 7), /* GMAC0 RXC */
+ GEMINI_CFGPIN(307, GLOBAL_GMAC_CTRL_SKEW, 8, 11), /* GMAC0 TXEN */
+ GEMINI_CFGPIN(327, GLOBAL_GMAC_CTRL_SKEW, 12, 15), /* GMAC0 TXC */
+ GEMINI_CFGPIN(309, GLOBAL_GMAC_CTRL_SKEW, 16, 19), /* GMAC1 RXDV */
+ GEMINI_CFGPIN(390, GLOBAL_GMAC_CTRL_SKEW, 20, 23), /* GMAC1 RXC */
+ GEMINI_CFGPIN(370, GLOBAL_GMAC_CTRL_SKEW, 24, 27), /* GMAC1 TXEN */
+ GEMINI_CFGPIN(350, GLOBAL_GMAC_CTRL_SKEW, 28, 31), /* GMAC1 TXC */
+ GEMINI_CFGPIN(367, GLOBAL_GMAC0_DATA_SKEW, 0, 3), /* GMAC0 RXD0 */
+ GEMINI_CFGPIN(348, GLOBAL_GMAC0_DATA_SKEW, 4, 7), /* GMAC0 RXD1 */
+ GEMINI_CFGPIN(387, GLOBAL_GMAC0_DATA_SKEW, 8, 11), /* GMAC0 RXD2 */
+ GEMINI_CFGPIN(328, GLOBAL_GMAC0_DATA_SKEW, 12, 15), /* GMAC0 RXD3 */
+ GEMINI_CFGPIN(306, GLOBAL_GMAC0_DATA_SKEW, 16, 19), /* GMAC0 TXD0 */
+ GEMINI_CFGPIN(325, GLOBAL_GMAC0_DATA_SKEW, 20, 23), /* GMAC0 TXD1 */
+ GEMINI_CFGPIN(346, GLOBAL_GMAC0_DATA_SKEW, 24, 27), /* GMAC0 TXD2 */
+ GEMINI_CFGPIN(326, GLOBAL_GMAC0_DATA_SKEW, 28, 31), /* GMAC0 TXD3 */
+ GEMINI_CFGPIN(391, GLOBAL_GMAC1_DATA_SKEW, 0, 3), /* GMAC1 RXD0 */
+ GEMINI_CFGPIN(351, GLOBAL_GMAC1_DATA_SKEW, 4, 7), /* GMAC1 RXD1 */
+ GEMINI_CFGPIN(310, GLOBAL_GMAC1_DATA_SKEW, 8, 11), /* GMAC1 RXD2 */
+ GEMINI_CFGPIN(371, GLOBAL_GMAC1_DATA_SKEW, 12, 15), /* GMAC1 RXD3 */
+ GEMINI_CFGPIN(329, GLOBAL_GMAC1_DATA_SKEW, 16, 19), /* GMAC1 TXD0 */
+ GEMINI_CFGPIN(389, GLOBAL_GMAC1_DATA_SKEW, 20, 23), /* GMAC1 TXD1 */
+ GEMINI_CFGPIN(369, GLOBAL_GMAC1_DATA_SKEW, 24, 27), /* GMAC1 TXD2 */
+ GEMINI_CFGPIN(308, GLOBAL_GMAC1_DATA_SKEW, 28, 31), /* GMAC1 TXD3 */
+};
+
+static const struct gemini_pin_conf *gemini_get_pin_conf(struct gemini_pmx *pmx,
+ unsigned int pin)
+{
+ const struct gemini_pin_conf *retconf;
+ int i;
+
+ for (i = 0; i < pmx->nconfs; i++) {
+ retconf = &gemini_confs_3516[i];
+ if (retconf->pin == pin)
+ return retconf;
+ }
+ return NULL;
+}
+
+static int gemini_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct gemini_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct gemini_pin_conf *conf;
+ u32 val;
+
+ switch (param) {
+ case PIN_CONFIG_SKEW_DELAY:
+ conf = gemini_get_pin_conf(pmx, pin);
+ if (!conf)
+ return -ENOTSUPP;
+ regmap_read(pmx->map, conf->reg, &val);
+ val &= conf->mask;
+ val >>= (ffs(conf->mask) - 1);
+ *config = pinconf_to_config_packed(PIN_CONFIG_SKEW_DELAY, val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int gemini_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct gemini_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const struct gemini_pin_conf *conf;
+ enum pin_config_param param;
+ u32 arg;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_SKEW_DELAY:
+ if (arg > 0xf)
+ return -EINVAL;
+ conf = gemini_get_pin_conf(pmx, pin);
+ if (!conf) {
+ dev_err(pmx->dev,
+ "invalid pin for skew delay %d\n", pin);
+ return -ENOTSUPP;
+ }
+ arg <<= (ffs(conf->mask) - 1);
+ dev_dbg(pmx->dev,
+ "set pin %d to skew delay mask %08x, val %08x\n",
+ pin, conf->mask, arg);
+ regmap_update_bits(pmx->map, conf->reg, conf->mask, arg);
+ break;
+ default:
+ dev_err(pmx->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops gemini_pinconf_ops = {
+ .pin_config_get = gemini_pinconf_get,
+ .pin_config_set = gemini_pinconf_set,
+ .is_generic = true,
+};
+
static struct pinctrl_desc gemini_pmx_desc = {
.name = DRIVER_NAME,
.pctlops = &gemini_pctrl_ops,
.pmxops = &gemini_pmx_ops,
+ .confops = &gemini_pinconf_ops,
.owner = THIS_MODULE,
};
@@ -2297,11 +2446,15 @@ static int gemini_pmx_probe(struct platform_device *pdev)
val &= 0xffff;
if (val == 0x3512) {
pmx->is_3512 = true;
+ pmx->confs = gemini_confs_3512;
+ pmx->nconfs = ARRAY_SIZE(gemini_confs_3512);
gemini_pmx_desc.pins = gemini_3512_pins;
gemini_pmx_desc.npins = ARRAY_SIZE(gemini_3512_pins);
dev_info(dev, "detected 3512 chip variant\n");
} else if (val == 0x3516) {
pmx->is_3516 = true;
+ pmx->confs = gemini_confs_3516;
+ pmx->nconfs = ARRAY_SIZE(gemini_confs_3516);
gemini_pmx_desc.pins = gemini_3516_pins;
gemini_pmx_desc.npins = ARRAY_SIZE(gemini_3516_pins);
dev_info(dev, "detected 3516 chip variant\n");
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index d84761822243..372ddf386bdb 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -717,7 +717,7 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
{},
};
-int ingenic_pinctrl_probe(struct platform_device *pdev)
+static int ingenic_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_pinctrl *jzpc;
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index b8d2180a2bea..a7f37063518e 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -420,11 +420,9 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
MAX77620_REG_GPIO0 + pin,
MAX77620_CNFG_GPIO_DRV_MASK,
val);
- if (ret < 0) {
- dev_err(dev, "Reg 0x%02x update failed %d\n",
- MAX77620_REG_GPIO0 + pin, ret);
- return ret;
- }
+ if (ret)
+ goto report_update_failure;
+
mpci->pin_info[pin].drv_type = val ?
MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
break;
@@ -435,11 +433,9 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
MAX77620_REG_GPIO0 + pin,
MAX77620_CNFG_GPIO_DRV_MASK,
val);
- if (ret < 0) {
- dev_err(dev, "Reg 0x%02x update failed %d\n",
- MAX77620_REG_GPIO0 + pin, ret);
- return ret;
- }
+ if (ret)
+ goto report_update_failure;
+
mpci->pin_info[pin].drv_type = val ?
MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
break;
@@ -536,6 +532,11 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
}
return 0;
+
+report_update_failure:
+ dev_err(dev, "Reg 0x%02x update failed %d\n",
+ MAX77620_REG_GPIO0 + pin, ret);
+ return ret;
}
static const struct pinconf_ops max77620_pinconf_ops = {
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 9c950bbf07ba..4a6ea159c65d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -25,6 +25,7 @@
#define MCP_TYPE_008 2
#define MCP_TYPE_017 3
#define MCP_TYPE_S18 4
+#define MCP_TYPE_018 5
#define MCP_MAX_DEV_PER_CS 8
@@ -278,8 +279,7 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct mcp23s08 *mcp = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u32 arg, mask;
- u16 val;
+ u32 arg;
int ret = 0;
int i;
@@ -289,8 +289,6 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
- val = arg ? 0xFFFF : 0x0000;
- mask = BIT(pin);
ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
break;
default:
@@ -537,7 +535,7 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
((gpio_bit_changed || intcap_changed) &&
(BIT(i) & mcp->irq_fall) && !gpio_set) ||
defval_changed) {
- child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
+ child_irq = irq_find_mapping(mcp->chip.irq.domain, i);
handle_nested_irq(child_irq);
}
}
@@ -837,6 +835,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23017";
break;
+
+ case MCP_TYPE_018:
+ mcp->regmap = devm_regmap_init_i2c(data, &mcp23x17_regmap);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23018";
+ break;
#endif /* CONFIG_I2C */
default:
@@ -883,7 +888,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (mirror)
status |= IOCON_MIRROR | (IOCON_MIRROR << 8);
- if (type == MCP_TYPE_S18)
+ if (type == MCP_TYPE_S18 || type == MCP_TYPE_018)
status |= IOCON_INTCC | (IOCON_INTCC << 8);
ret = mcp_write(mcp, MCP_IOCON, status);
@@ -964,6 +969,10 @@ static const struct of_device_id mcp23s08_i2c_of_match[] = {
.compatible = "microchip,mcp23017",
.data = (void *) MCP_TYPE_017,
},
+ {
+ .compatible = "microchip,mcp23018",
+ .data = (void *) MCP_TYPE_018,
+ },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
{
.compatible = "mcp,mcp23008",
@@ -1013,6 +1022,7 @@ static int mcp230xx_probe(struct i2c_client *client,
static const struct i2c_device_id mcp230xx_id[] = {
{ "mcp23008", MCP_TYPE_008 },
{ "mcp23017", MCP_TYPE_017 },
+ { "mcp23018", MCP_TYPE_018 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 494ec9a7573a..53ec22a51f5c 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1064,7 +1064,7 @@ static void oxnas_gpio_irq_handler(struct irq_desc *desc)
stat = readl(bank->reg_base + IRQ_PENDING);
for_each_set_bit(pin, &stat, BITS_PER_LONG)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 31ceb958b3fe..96390228d388 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2106,7 +2106,7 @@ static void pic32_gpio_irq_handler(struct irq_desc *desc)
pending = pic32_gpio_get_pending(gc, stat);
for_each_set_bit(pin, &pending, BITS_PER_LONG)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 55375b1b3cc8..302190d1558d 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1307,7 +1307,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
pending = gpio_readl(bank, GPIO_INTERRUPT_STATUS) &
gpio_readl(bank, GPIO_INTERRUPT_EN);
for_each_set_bit(pin, &pending, 16)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index b5cb7858ffdc..2ba17548ad5b 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -884,6 +884,24 @@ static struct rockchip_mux_route_data rk3228_mux_route_data[] = {
},
};
+static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
+ {
+ /* edphdmi_cecinoutt1 */
+ .bank_num = 7,
+ .pin = 16,
+ .func = 2,
+ .route_offset = 0x264,
+ .route_val = BIT(16 + 12) | BIT(12),
+ }, {
+ /* edphdmi_cecinout */
+ .bank_num = 7,
+ .pin = 23,
+ .func = 4,
+ .route_offset = 0x264,
+ .route_val = BIT(16 + 12),
+ },
+};
+
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
{
/* uart2dbg_rxm0 */
@@ -900,12 +918,19 @@ static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
.route_offset = 0x50,
.route_val = BIT(16) | BIT(16 + 1) | BIT(0),
}, {
- /* gmac-m1-optimized_rxd0 */
+ /* gmac-m1_rxd0 */
.bank_num = 1,
.pin = 11,
.func = 2,
.route_offset = 0x50,
- .route_val = BIT(16 + 2) | BIT(16 + 10) | BIT(2) | BIT(10),
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* gmac-m1-optimized_rxd3 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 10) | BIT(10),
}, {
/* pdm_sdi0m0 */
.bank_num = 2,
@@ -3391,6 +3416,8 @@ static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
.type = RK3288,
.grf_mux_offset = 0x0,
.pmu_mux_offset = 0x84,
+ .iomux_routes = rk3288_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3288_mux_route_data),
.pull_calc_reg = rk3288_calc_pull_reg_and_bit,
.drv_calc_reg = rk3288_calc_drv_reg_and_bit,
};
@@ -3456,8 +3483,8 @@ static struct rockchip_pin_bank rk3399_pin_banks[] = {
DRV_TYPE_IO_1V8_ONLY,
DRV_TYPE_IO_DEFAULT,
DRV_TYPE_IO_DEFAULT,
- 0x0,
- 0x8,
+ 0x80,
+ 0x88,
-1,
-1,
PULL_TYPE_IO_1V8_ONLY,
@@ -3473,10 +3500,10 @@ static struct rockchip_pin_bank rk3399_pin_banks[] = {
DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
- 0x20,
- 0x28,
- 0x30,
- 0x38
+ 0xa0,
+ 0xa8,
+ 0xb0,
+ 0xb8
),
PIN_BANK_DRV_FLAGS_PULL_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 04d058706b80..717c0f4449a0 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -303,6 +303,134 @@ static const struct rza1_pinmux_conf rza1h_pmx_conf = {
};
/* ----------------------------------------------------------------------------
+ * RZ/A1L (r7s72102) pinmux flags
+ */
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p1[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p3[] = {
+ { .pin = 0, .func = 2 },
+ { .pin = 1, .func = 2 },
+ { .pin = 2, .func = 2 },
+ { .pin = 4, .func = 2 },
+ { .pin = 5, .func = 2 },
+ { .pin = 10, .func = 2 },
+ { .pin = 11, .func = 2 },
+ { .pin = 12, .func = 2 },
+ { .pin = 13, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p4[] = {
+ { .pin = 1, .func = 4 },
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p5[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+ { .pin = 0, .func = 2 },
+ { .pin = 1, .func = 2 },
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p6[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p7[] = {
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+ { .pin = 5, .func = 2 },
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+ { .pin = 2, .func = 3 },
+ { .pin = 3, .func = 3 },
+ { .pin = 5, .func = 3 },
+ { .pin = 6, .func = 3 },
+ { .pin = 7, .func = 3 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p9[] = {
+ { .pin = 1, .func = 2 },
+ { .pin = 0, .func = 3 },
+ { .pin = 1, .func = 3 },
+ { .pin = 3, .func = 3 },
+ { .pin = 4, .func = 3 },
+ { .pin = 5, .func = 3 },
+};
+
+static const struct rza1_swio_pin rza1l_swio_pins[] = {
+ { .port = 2, .pin = 8, .func = 2, .input = 0 },
+ { .port = 5, .pin = 6, .func = 3, .input = 0 },
+ { .port = 6, .pin = 6, .func = 3, .input = 0 },
+ { .port = 6, .pin = 10, .func = 3, .input = 0 },
+ { .port = 7, .pin = 10, .func = 2, .input = 0 },
+ { .port = 8, .pin = 2, .func = 3, .input = 0 },
+};
+
+static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = {
+ [1] = { ARRAY_SIZE(rza1l_bidir_pins_p1), rza1l_bidir_pins_p1 },
+ [3] = { ARRAY_SIZE(rza1l_bidir_pins_p3), rza1l_bidir_pins_p3 },
+ [4] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p4 },
+ [5] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p5 },
+ [6] = { ARRAY_SIZE(rza1l_bidir_pins_p6), rza1l_bidir_pins_p6 },
+ [7] = { ARRAY_SIZE(rza1l_bidir_pins_p7), rza1l_bidir_pins_p7 },
+ [9] = { ARRAY_SIZE(rza1l_bidir_pins_p9), rza1l_bidir_pins_p9 },
+};
+
+static const struct rza1_swio_entry rza1l_swio_entries[] = {
+ [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins },
+};
+
+/* RZ/A1L (r7s72102x) pinmux flags table */
+static const struct rza1_pinmux_conf rza1l_pmx_conf = {
+ .bidir_entries = rza1l_bidir_entries,
+ .swio_entries = rza1l_swio_entries,
+};
+
+/* ----------------------------------------------------------------------------
* RZ/A1 types
*/
/**
@@ -1283,9 +1411,15 @@ static int rza1_pinctrl_probe(struct platform_device *pdev)
static const struct of_device_id rza1_pinctrl_of_match[] = {
{
+ /* RZ/A1H, RZ/A1M */
.compatible = "renesas,r7s72100-ports",
.data = &rza1h_pmx_conf,
},
+ {
+ /* RZ/A1L */
+ .compatible = "renesas,r7s72102-ports",
+ .data = &rza1l_pmx_conf,
+ },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b8b3d932cd73..e6cd8de793e2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -873,13 +873,13 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
int i = 0, nconfs = 0;
unsigned long *settings = NULL, *s = NULL;
struct pcs_conf_vals *conf = NULL;
- struct pcs_conf_type prop2[] = {
+ static const struct pcs_conf_type prop2[] = {
{ "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
{ "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
{ "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
{ "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
};
- struct pcs_conf_type prop4[] = {
+ static const struct pcs_conf_type prop4[] = {
{ "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
{ "pinctrl-single,bias-pulldown", PIN_CONFIG_BIAS_PULL_DOWN, },
{ "pinctrl-single,input-schmitt-enable",
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index a5205b94b2e6..2081c67667a8 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1408,7 +1408,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank)
continue;
}
- generic_handle_irq(irq_find_mapping(bank->gpio_chip.irqdomain, n));
+ generic_handle_irq(irq_find_mapping(bank->gpio_chip.irq.domain, n));
}
}
}
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 7450f5118445..fb242c542dc9 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -561,7 +561,7 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
status = val;
for_each_set_bit(n, &status, pctl->data->ngpios)
- handle_nested_irq(irq_find_mapping(pctl->gpio.irqdomain, n));
+ handle_nested_irq(irq_find_mapping(pctl->gpio.irq.domain, n));
return IRQ_HANDLED;
}
@@ -1087,7 +1087,7 @@ static bool sx150x_reg_volatile(struct device *dev, unsigned int reg)
return reg == pctl->data->reg_irq_src || reg == pctl->data->reg_data;
}
-const struct regmap_config sx150x_regmap_config = {
+static const struct regmap_config sx150x_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index ff491da64dab..7a960590ecaa 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -795,7 +795,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
g = &pctrl->soc->groups[i];
val = readl(pctrl->regs + g->intr_status_reg);
if (val & BIT(g->intr_status_bit)) {
- irq_pin = irq_find_mapping(gc->irqdomain, i);
+ irq_pin = irq_find_mapping(gc->irq.domain, i);
generic_handle_irq(irq_pin);
handled++;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index c2c0bab04257..3e66e0d10010 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -453,6 +453,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad = pctldev->desc->pins[pin].drv_data;
+ pad->is_enabled = true;
for (i = 0; i < nconfs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
@@ -600,6 +601,10 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
}
+ val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_EN_CTL, val);
+
return ret;
}
@@ -1032,6 +1037,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */
{ .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
{ .compatible = "qcom,pm8994-gpio" }, /* 22 GPIO's */
+ { .compatible = "qcom,pmi8994-gpio" }, /* 10 GPIO's */
{ .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
{ .compatible = "qcom,spmi-gpio" }, /* Generic */
{ },
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 0357f9701eb9..ecfb90059eeb 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -29,7 +29,7 @@ config PINCTRL_EXYNOS5440
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX
+ depends on ARCH_S3C24XX && OF
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 0c5e952461fd..cf4ae4bc9115 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -24,6 +24,7 @@
#include <linux/of_device.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_device.h>
+#include <linux/psci.h>
#include <linux/slab.h>
#include "core.h"
@@ -175,19 +176,19 @@ void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
BUG();
}
-u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width)
+u32 sh_pfc_read(struct sh_pfc *pfc, u32 reg)
{
- return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width);
+ return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32);
}
-void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width, u32 data)
+void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data)
{
if (pfc->info->unlock_reg)
sh_pfc_write_raw_reg(
sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
~data);
- sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width, data);
+ sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32, data);
}
static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
@@ -389,15 +390,20 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return 0;
}
-const struct sh_pfc_bias_info *
-sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
- unsigned int num, unsigned int pin)
+const struct pinmux_bias_reg *
+sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit)
{
- unsigned int i;
+ unsigned int i, j;
- for (i = 0; i < num; i++)
- if (info[i].pin == pin)
- return &info[i];
+ for (i = 0; pfc->info->bias_regs[i].puen; i++) {
+ for (j = 0; j < ARRAY_SIZE(pfc->info->bias_regs[i].pins); j++) {
+ if (pfc->info->bias_regs[i].pins[j] == pin) {
+ *bit = j;
+ return &pfc->info->bias_regs[i];
+ }
+ }
+ }
WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
@@ -567,9 +573,99 @@ static const struct of_device_id sh_pfc_of_table[] = {
};
#endif
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
+static void sh_pfc_nop_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+}
+
+static void sh_pfc_save_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+ pfc->saved_regs[idx] = sh_pfc_read(pfc, reg);
+}
+
+static void sh_pfc_restore_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+ sh_pfc_write(pfc, reg, pfc->saved_regs[idx]);
+}
+
+static unsigned int sh_pfc_walk_regs(struct sh_pfc *pfc,
+ void (*do_reg)(struct sh_pfc *pfc, u32 reg, unsigned int idx))
+{
+ unsigned int i, n = 0;
+
+ if (pfc->info->cfg_regs)
+ for (i = 0; pfc->info->cfg_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->cfg_regs[i].reg, n++);
+
+ if (pfc->info->drive_regs)
+ for (i = 0; pfc->info->drive_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->drive_regs[i].reg, n++);
+
+ if (pfc->info->bias_regs)
+ for (i = 0; pfc->info->bias_regs[i].puen; i++) {
+ do_reg(pfc, pfc->info->bias_regs[i].puen, n++);
+ if (pfc->info->bias_regs[i].pud)
+ do_reg(pfc, pfc->info->bias_regs[i].pud, n++);
+ }
+
+ if (pfc->info->ioctrl_regs)
+ for (i = 0; pfc->info->ioctrl_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->ioctrl_regs[i].reg, n++);
+
+ return n;
+}
+
+static int sh_pfc_suspend_init(struct sh_pfc *pfc)
+{
+ unsigned int n;
+
+ /* This is the best we can do to check for the presence of PSCI */
+ if (!psci_ops.cpu_suspend)
+ return 0;
+
+ n = sh_pfc_walk_regs(pfc, sh_pfc_nop_reg);
+ if (!n)
+ return 0;
+
+ pfc->saved_regs = devm_kmalloc_array(pfc->dev, n,
+ sizeof(*pfc->saved_regs),
+ GFP_KERNEL);
+ if (!pfc->saved_regs)
+ return -ENOMEM;
+
+ dev_dbg(pfc->dev, "Allocated space to save %u regs\n", n);
+ return 0;
+}
+
+static int sh_pfc_suspend_noirq(struct device *dev)
+{
+ struct sh_pfc *pfc = dev_get_drvdata(dev);
+
+ if (pfc->saved_regs)
+ sh_pfc_walk_regs(pfc, sh_pfc_save_reg);
+ return 0;
+}
+
+static int sh_pfc_resume_noirq(struct device *dev)
+{
+ struct sh_pfc *pfc = dev_get_drvdata(dev);
+
+ if (pfc->saved_regs)
+ sh_pfc_walk_regs(pfc, sh_pfc_restore_reg);
+ return 0;
+}
+
+static const struct dev_pm_ops sh_pfc_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_pfc_suspend_noirq, sh_pfc_resume_noirq)
+};
+#define DEV_PM_OPS &sh_pfc_pm
+#else
+static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+
static int sh_pfc_probe(struct platform_device *pdev)
{
- const struct platform_device_id *platid = platform_get_device_id(pdev);
#ifdef CONFIG_OF
struct device_node *np = pdev->dev.of_node;
#endif
@@ -582,10 +678,7 @@ static int sh_pfc_probe(struct platform_device *pdev)
info = of_device_get_match_data(&pdev->dev);
else
#endif
- info = platid ? (const void *)platid->driver_data : NULL;
-
- if (info == NULL)
- return -ENODEV;
+ info = (const void *)platform_get_device_id(pdev)->driver_data;
pfc = devm_kzalloc(&pdev->dev, sizeof(*pfc), GFP_KERNEL);
if (pfc == NULL)
@@ -609,6 +702,10 @@ static int sh_pfc_probe(struct platform_device *pdev)
info = pfc->info;
}
+ ret = sh_pfc_suspend_init(pfc);
+ if (ret)
+ return ret;
+
/* Enable dummy states for those platforms without pinctrl support */
if (!of_have_populated_dt())
pinctrl_provide_dummies();
@@ -683,7 +780,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_SHX3
{ "pfc-shx3", (kernel_ulong_t)&shx3_pinmux_info },
#endif
- { "sh-pfc", 0 },
{ },
};
@@ -693,6 +789,7 @@ static struct platform_driver sh_pfc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(sh_pfc_of_table),
+ .pm = DEV_PM_OPS,
},
};
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 6d598dd63720..5af8ee26c03e 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -26,15 +26,14 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
u32 sh_pfc_read_raw_reg(void __iomem *mapped_reg, unsigned int reg_width);
void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
u32 data);
-u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width);
-void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
- u32 data);
+u32 sh_pfc_read(struct sh_pfc *pfc, u32 reg);
+void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data);
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
-const struct sh_pfc_bias_info *
-sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
- unsigned int num, unsigned int pin);
+const struct pinmux_bias_reg *
+sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit);
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 6b5422766f13..946d9be50b62 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -139,12 +139,12 @@ static int gpio_pin_request(struct gpio_chip *gc, unsigned offset)
if (idx < 0 || pfc->info->pins[idx].enum_id == 0)
return -EINVAL;
- return pinctrl_request_gpio(offset);
+ return pinctrl_gpio_request(offset);
}
static void gpio_pin_free(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_free_gpio(offset);
+ return pinctrl_gpio_free(offset);
}
static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index c3af9ebee4af..00d61d175249 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2912,189 +2912,230 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-#define PUPR0 0x100
-#define PUPR1 0x104
-#define PUPR2 0x108
-#define PUPR3 0x10c
-#define PUPR4 0x110
-#define PUPR5 0x114
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(0, 6), PUPR0, 0 }, /* A0 */
- { RCAR_GP_PIN(0, 7), PUPR0, 1 }, /* A1 */
- { RCAR_GP_PIN(0, 8), PUPR0, 2 }, /* A2 */
- { RCAR_GP_PIN(0, 9), PUPR0, 3 }, /* A3 */
- { RCAR_GP_PIN(0, 10), PUPR0, 4 }, /* A4 */
- { RCAR_GP_PIN(0, 11), PUPR0, 5 }, /* A5 */
- { RCAR_GP_PIN(0, 12), PUPR0, 6 }, /* A6 */
- { RCAR_GP_PIN(0, 13), PUPR0, 7 }, /* A7 */
- { RCAR_GP_PIN(0, 14), PUPR0, 8 }, /* A8 */
- { RCAR_GP_PIN(0, 15), PUPR0, 9 }, /* A9 */
- { RCAR_GP_PIN(0, 16), PUPR0, 10 }, /* A10 */
- { RCAR_GP_PIN(0, 17), PUPR0, 11 }, /* A11 */
- { RCAR_GP_PIN(0, 18), PUPR0, 12 }, /* A12 */
- { RCAR_GP_PIN(0, 19), PUPR0, 13 }, /* A13 */
- { RCAR_GP_PIN(0, 20), PUPR0, 14 }, /* A14 */
- { RCAR_GP_PIN(0, 21), PUPR0, 15 }, /* A15 */
- { RCAR_GP_PIN(0, 22), PUPR0, 16 }, /* A16 */
- { RCAR_GP_PIN(0, 23), PUPR0, 17 }, /* A17 */
- { RCAR_GP_PIN(0, 24), PUPR0, 18 }, /* A18 */
- { RCAR_GP_PIN(0, 25), PUPR0, 19 }, /* A19 */
- { RCAR_GP_PIN(0, 26), PUPR0, 20 }, /* A20 */
- { RCAR_GP_PIN(0, 27), PUPR0, 21 }, /* A21 */
- { RCAR_GP_PIN(0, 28), PUPR0, 22 }, /* A22 */
- { RCAR_GP_PIN(0, 29), PUPR0, 23 }, /* A23 */
- { RCAR_GP_PIN(0, 30), PUPR0, 24 }, /* A24 */
- { RCAR_GP_PIN(0, 31), PUPR0, 25 }, /* A25 */
- { RCAR_GP_PIN(1, 3), PUPR0, 26 }, /* /EX_CS0 */
- { RCAR_GP_PIN(1, 4), PUPR0, 27 }, /* /EX_CS1 */
- { RCAR_GP_PIN(1, 5), PUPR0, 28 }, /* /EX_CS2 */
- { RCAR_GP_PIN(1, 6), PUPR0, 29 }, /* /EX_CS3 */
- { RCAR_GP_PIN(1, 7), PUPR0, 30 }, /* /EX_CS4 */
- { RCAR_GP_PIN(1, 8), PUPR0, 31 }, /* /EX_CS5 */
-
- { RCAR_GP_PIN(0, 0), PUPR1, 0 }, /* /PRESETOUT */
- { RCAR_GP_PIN(0, 5), PUPR1, 1 }, /* /BS */
- { RCAR_GP_PIN(1, 0), PUPR1, 2 }, /* RD//WR */
- { RCAR_GP_PIN(1, 1), PUPR1, 3 }, /* /WE0 */
- { RCAR_GP_PIN(1, 2), PUPR1, 4 }, /* /WE1 */
- { RCAR_GP_PIN(1, 11), PUPR1, 5 }, /* EX_WAIT0 */
- { RCAR_GP_PIN(1, 9), PUPR1, 6 }, /* DREQ0 */
- { RCAR_GP_PIN(1, 10), PUPR1, 7 }, /* DACK0 */
- { RCAR_GP_PIN(1, 12), PUPR1, 8 }, /* IRQ0 */
- { RCAR_GP_PIN(1, 13), PUPR1, 9 }, /* IRQ1 */
-
- { RCAR_GP_PIN(1, 22), PUPR2, 0 }, /* DU0_DR0 */
- { RCAR_GP_PIN(1, 23), PUPR2, 1 }, /* DU0_DR1 */
- { RCAR_GP_PIN(1, 24), PUPR2, 2 }, /* DU0_DR2 */
- { RCAR_GP_PIN(1, 25), PUPR2, 3 }, /* DU0_DR3 */
- { RCAR_GP_PIN(1, 26), PUPR2, 4 }, /* DU0_DR4 */
- { RCAR_GP_PIN(1, 27), PUPR2, 5 }, /* DU0_DR5 */
- { RCAR_GP_PIN(1, 28), PUPR2, 6 }, /* DU0_DR6 */
- { RCAR_GP_PIN(1, 29), PUPR2, 7 }, /* DU0_DR7 */
- { RCAR_GP_PIN(1, 30), PUPR2, 8 }, /* DU0_DG0 */
- { RCAR_GP_PIN(1, 31), PUPR2, 9 }, /* DU0_DG1 */
- { RCAR_GP_PIN(2, 0), PUPR2, 10 }, /* DU0_DG2 */
- { RCAR_GP_PIN(2, 1), PUPR2, 11 }, /* DU0_DG3 */
- { RCAR_GP_PIN(2, 2), PUPR2, 12 }, /* DU0_DG4 */
- { RCAR_GP_PIN(2, 3), PUPR2, 13 }, /* DU0_DG5 */
- { RCAR_GP_PIN(2, 4), PUPR2, 14 }, /* DU0_DG6 */
- { RCAR_GP_PIN(2, 5), PUPR2, 15 }, /* DU0_DG7 */
- { RCAR_GP_PIN(2, 6), PUPR2, 16 }, /* DU0_DB0 */
- { RCAR_GP_PIN(2, 7), PUPR2, 17 }, /* DU0_DB1 */
- { RCAR_GP_PIN(2, 8), PUPR2, 18 }, /* DU0_DB2 */
- { RCAR_GP_PIN(2, 9), PUPR2, 19 }, /* DU0_DB3 */
- { RCAR_GP_PIN(2, 10), PUPR2, 20 }, /* DU0_DB4 */
- { RCAR_GP_PIN(2, 11), PUPR2, 21 }, /* DU0_DB5 */
- { RCAR_GP_PIN(2, 12), PUPR2, 22 }, /* DU0_DB6 */
- { RCAR_GP_PIN(2, 13), PUPR2, 23 }, /* DU0_DB7 */
- { RCAR_GP_PIN(2, 14), PUPR2, 24 }, /* DU0_DOTCLKIN */
- { RCAR_GP_PIN(2, 15), PUPR2, 25 }, /* DU0_DOTCLKOUT0 */
- { RCAR_GP_PIN(2, 17), PUPR2, 26 }, /* DU0_HSYNC */
- { RCAR_GP_PIN(2, 18), PUPR2, 27 }, /* DU0_VSYNC */
- { RCAR_GP_PIN(2, 19), PUPR2, 28 }, /* DU0_EXODDF */
- { RCAR_GP_PIN(2, 20), PUPR2, 29 }, /* DU0_DISP */
- { RCAR_GP_PIN(2, 21), PUPR2, 30 }, /* DU0_CDE */
- { RCAR_GP_PIN(2, 16), PUPR2, 31 }, /* DU0_DOTCLKOUT1 */
-
- { RCAR_GP_PIN(3, 24), PUPR3, 0 }, /* VI0_CLK */
- { RCAR_GP_PIN(3, 25), PUPR3, 1 }, /* VI0_CLKENB */
- { RCAR_GP_PIN(3, 26), PUPR3, 2 }, /* VI0_FIELD */
- { RCAR_GP_PIN(3, 27), PUPR3, 3 }, /* /VI0_HSYNC */
- { RCAR_GP_PIN(3, 28), PUPR3, 4 }, /* /VI0_VSYNC */
- { RCAR_GP_PIN(3, 29), PUPR3, 5 }, /* VI0_DATA0 */
- { RCAR_GP_PIN(3, 30), PUPR3, 6 }, /* VI0_DATA1 */
- { RCAR_GP_PIN(3, 31), PUPR3, 7 }, /* VI0_DATA2 */
- { RCAR_GP_PIN(4, 0), PUPR3, 8 }, /* VI0_DATA3 */
- { RCAR_GP_PIN(4, 1), PUPR3, 9 }, /* VI0_DATA4 */
- { RCAR_GP_PIN(4, 2), PUPR3, 10 }, /* VI0_DATA5 */
- { RCAR_GP_PIN(4, 3), PUPR3, 11 }, /* VI0_DATA6 */
- { RCAR_GP_PIN(4, 4), PUPR3, 12 }, /* VI0_DATA7 */
- { RCAR_GP_PIN(4, 5), PUPR3, 13 }, /* VI0_G2 */
- { RCAR_GP_PIN(4, 6), PUPR3, 14 }, /* VI0_G3 */
- { RCAR_GP_PIN(4, 7), PUPR3, 15 }, /* VI0_G4 */
- { RCAR_GP_PIN(4, 8), PUPR3, 16 }, /* VI0_G5 */
- { RCAR_GP_PIN(4, 21), PUPR3, 17 }, /* VI1_DATA12 */
- { RCAR_GP_PIN(4, 22), PUPR3, 18 }, /* VI1_DATA13 */
- { RCAR_GP_PIN(4, 23), PUPR3, 19 }, /* VI1_DATA14 */
- { RCAR_GP_PIN(4, 24), PUPR3, 20 }, /* VI1_DATA15 */
- { RCAR_GP_PIN(4, 9), PUPR3, 21 }, /* ETH_REF_CLK */
- { RCAR_GP_PIN(4, 10), PUPR3, 22 }, /* ETH_TXD0 */
- { RCAR_GP_PIN(4, 11), PUPR3, 23 }, /* ETH_TXD1 */
- { RCAR_GP_PIN(4, 12), PUPR3, 24 }, /* ETH_CRS_DV */
- { RCAR_GP_PIN(4, 13), PUPR3, 25 }, /* ETH_TX_EN */
- { RCAR_GP_PIN(4, 14), PUPR3, 26 }, /* ETH_RX_ER */
- { RCAR_GP_PIN(4, 15), PUPR3, 27 }, /* ETH_RXD0 */
- { RCAR_GP_PIN(4, 16), PUPR3, 28 }, /* ETH_RXD1 */
- { RCAR_GP_PIN(4, 17), PUPR3, 29 }, /* ETH_MDC */
- { RCAR_GP_PIN(4, 18), PUPR3, 30 }, /* ETH_MDIO */
- { RCAR_GP_PIN(4, 19), PUPR3, 31 }, /* ETH_LINK */
-
- { RCAR_GP_PIN(3, 6), PUPR4, 0 }, /* SSI_SCK012 */
- { RCAR_GP_PIN(3, 7), PUPR4, 1 }, /* SSI_WS012 */
- { RCAR_GP_PIN(3, 10), PUPR4, 2 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(3, 9), PUPR4, 3 }, /* SSI_SDATA1 */
- { RCAR_GP_PIN(3, 8), PUPR4, 4 }, /* SSI_SDATA2 */
- { RCAR_GP_PIN(3, 2), PUPR4, 5 }, /* SSI_SCK34 */
- { RCAR_GP_PIN(3, 3), PUPR4, 6 }, /* SSI_WS34 */
- { RCAR_GP_PIN(3, 5), PUPR4, 7 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(3, 4), PUPR4, 8 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(2, 31), PUPR4, 9 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(3, 0), PUPR4, 10 }, /* SSI_WS5 */
- { RCAR_GP_PIN(3, 1), PUPR4, 11 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(2, 28), PUPR4, 12 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(2, 29), PUPR4, 13 }, /* SSI_WS6 */
- { RCAR_GP_PIN(2, 30), PUPR4, 14 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(2, 24), PUPR4, 15 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(2, 25), PUPR4, 16 }, /* SSI_WS78 */
- { RCAR_GP_PIN(2, 27), PUPR4, 17 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(2, 26), PUPR4, 18 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(3, 23), PUPR4, 19 }, /* TCLK0 */
- { RCAR_GP_PIN(3, 11), PUPR4, 20 }, /* SD0_CLK */
- { RCAR_GP_PIN(3, 12), PUPR4, 21 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 13), PUPR4, 22 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 14), PUPR4, 23 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 15), PUPR4, 24 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 16), PUPR4, 25 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 17), PUPR4, 26 }, /* SD0_CD */
- { RCAR_GP_PIN(3, 18), PUPR4, 27 }, /* SD0_WP */
- { RCAR_GP_PIN(2, 22), PUPR4, 28 }, /* AUDIO_CLKA */
- { RCAR_GP_PIN(2, 23), PUPR4, 29 }, /* AUDIO_CLKB */
- { RCAR_GP_PIN(1, 14), PUPR4, 30 }, /* IRQ2 */
- { RCAR_GP_PIN(1, 15), PUPR4, 31 }, /* IRQ3 */
-
- { RCAR_GP_PIN(0, 1), PUPR5, 0 }, /* PENC0 */
- { RCAR_GP_PIN(0, 2), PUPR5, 1 }, /* PENC1 */
- { RCAR_GP_PIN(0, 3), PUPR5, 2 }, /* USB_OVC0 */
- { RCAR_GP_PIN(0, 4), PUPR5, 3 }, /* USB_OVC1 */
- { RCAR_GP_PIN(1, 16), PUPR5, 4 }, /* SCIF_CLK */
- { RCAR_GP_PIN(1, 17), PUPR5, 5 }, /* TX0 */
- { RCAR_GP_PIN(1, 18), PUPR5, 6 }, /* RX0 */
- { RCAR_GP_PIN(1, 19), PUPR5, 7 }, /* SCK0 */
- { RCAR_GP_PIN(1, 20), PUPR5, 8 }, /* /CTS0 */
- { RCAR_GP_PIN(1, 21), PUPR5, 9 }, /* /RTS0 */
- { RCAR_GP_PIN(3, 19), PUPR5, 10 }, /* HSPI_CLK0 */
- { RCAR_GP_PIN(3, 20), PUPR5, 11 }, /* /HSPI_CS0 */
- { RCAR_GP_PIN(3, 21), PUPR5, 12 }, /* HSPI_RX0 */
- { RCAR_GP_PIN(3, 22), PUPR5, 13 }, /* HSPI_TX0 */
- { RCAR_GP_PIN(4, 20), PUPR5, 14 }, /* ETH_MAGIC */
- { RCAR_GP_PIN(4, 25), PUPR5, 15 }, /* AVS1 */
- { RCAR_GP_PIN(4, 26), PUPR5, 16 }, /* AVS2 */
+#define PIN_NONE U16_MAX
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUPR0", 0x100, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 6), /* A0 */
+ [ 1] = RCAR_GP_PIN(0, 7), /* A1 */
+ [ 2] = RCAR_GP_PIN(0, 8), /* A2 */
+ [ 3] = RCAR_GP_PIN(0, 9), /* A3 */
+ [ 4] = RCAR_GP_PIN(0, 10), /* A4 */
+ [ 5] = RCAR_GP_PIN(0, 11), /* A5 */
+ [ 6] = RCAR_GP_PIN(0, 12), /* A6 */
+ [ 7] = RCAR_GP_PIN(0, 13), /* A7 */
+ [ 8] = RCAR_GP_PIN(0, 14), /* A8 */
+ [ 9] = RCAR_GP_PIN(0, 15), /* A9 */
+ [10] = RCAR_GP_PIN(0, 16), /* A10 */
+ [11] = RCAR_GP_PIN(0, 17), /* A11 */
+ [12] = RCAR_GP_PIN(0, 18), /* A12 */
+ [13] = RCAR_GP_PIN(0, 19), /* A13 */
+ [14] = RCAR_GP_PIN(0, 20), /* A14 */
+ [15] = RCAR_GP_PIN(0, 21), /* A15 */
+ [16] = RCAR_GP_PIN(0, 22), /* A16 */
+ [17] = RCAR_GP_PIN(0, 23), /* A17 */
+ [18] = RCAR_GP_PIN(0, 24), /* A18 */
+ [19] = RCAR_GP_PIN(0, 25), /* A19 */
+ [20] = RCAR_GP_PIN(0, 26), /* A20 */
+ [21] = RCAR_GP_PIN(0, 27), /* A21 */
+ [22] = RCAR_GP_PIN(0, 28), /* A22 */
+ [23] = RCAR_GP_PIN(0, 29), /* A23 */
+ [24] = RCAR_GP_PIN(0, 30), /* A24 */
+ [25] = RCAR_GP_PIN(0, 31), /* A25 */
+ [26] = RCAR_GP_PIN(1, 3), /* /EX_CS0 */
+ [27] = RCAR_GP_PIN(1, 4), /* /EX_CS1 */
+ [28] = RCAR_GP_PIN(1, 5), /* /EX_CS2 */
+ [29] = RCAR_GP_PIN(1, 6), /* /EX_CS3 */
+ [30] = RCAR_GP_PIN(1, 7), /* /EX_CS4 */
+ [31] = RCAR_GP_PIN(1, 8), /* /EX_CS5 */
+ } },
+ { PINMUX_BIAS_REG("PUPR1", 0x104, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* /PRESETOUT */
+ [ 1] = RCAR_GP_PIN(0, 5), /* /BS */
+ [ 2] = RCAR_GP_PIN(1, 0), /* RD//WR */
+ [ 3] = RCAR_GP_PIN(1, 1), /* /WE0 */
+ [ 4] = RCAR_GP_PIN(1, 2), /* /WE1 */
+ [ 5] = RCAR_GP_PIN(1, 11), /* EX_WAIT0 */
+ [ 6] = RCAR_GP_PIN(1, 9), /* DREQ0 */
+ [ 7] = RCAR_GP_PIN(1, 10), /* DACK0 */
+ [ 8] = RCAR_GP_PIN(1, 12), /* IRQ0 */
+ [ 9] = RCAR_GP_PIN(1, 13), /* IRQ1 */
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUPR2", 0x108, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(1, 22), /* DU0_DR0 */
+ [ 1] = RCAR_GP_PIN(1, 23), /* DU0_DR1 */
+ [ 2] = RCAR_GP_PIN(1, 24), /* DU0_DR2 */
+ [ 3] = RCAR_GP_PIN(1, 25), /* DU0_DR3 */
+ [ 4] = RCAR_GP_PIN(1, 26), /* DU0_DR4 */
+ [ 5] = RCAR_GP_PIN(1, 27), /* DU0_DR5 */
+ [ 6] = RCAR_GP_PIN(1, 28), /* DU0_DR6 */
+ [ 7] = RCAR_GP_PIN(1, 29), /* DU0_DR7 */
+ [ 8] = RCAR_GP_PIN(1, 30), /* DU0_DG0 */
+ [ 9] = RCAR_GP_PIN(1, 31), /* DU0_DG1 */
+ [10] = RCAR_GP_PIN(2, 0), /* DU0_DG2 */
+ [11] = RCAR_GP_PIN(2, 1), /* DU0_DG3 */
+ [12] = RCAR_GP_PIN(2, 2), /* DU0_DG4 */
+ [13] = RCAR_GP_PIN(2, 3), /* DU0_DG5 */
+ [14] = RCAR_GP_PIN(2, 4), /* DU0_DG6 */
+ [15] = RCAR_GP_PIN(2, 5), /* DU0_DG7 */
+ [16] = RCAR_GP_PIN(2, 6), /* DU0_DB0 */
+ [17] = RCAR_GP_PIN(2, 7), /* DU0_DB1 */
+ [18] = RCAR_GP_PIN(2, 8), /* DU0_DB2 */
+ [19] = RCAR_GP_PIN(2, 9), /* DU0_DB3 */
+ [20] = RCAR_GP_PIN(2, 10), /* DU0_DB4 */
+ [21] = RCAR_GP_PIN(2, 11), /* DU0_DB5 */
+ [22] = RCAR_GP_PIN(2, 12), /* DU0_DB6 */
+ [23] = RCAR_GP_PIN(2, 13), /* DU0_DB7 */
+ [24] = RCAR_GP_PIN(2, 14), /* DU0_DOTCLKIN */
+ [25] = RCAR_GP_PIN(2, 15), /* DU0_DOTCLKOUT0 */
+ [26] = RCAR_GP_PIN(2, 17), /* DU0_HSYNC */
+ [27] = RCAR_GP_PIN(2, 18), /* DU0_VSYNC */
+ [28] = RCAR_GP_PIN(2, 19), /* DU0_EXODDF */
+ [29] = RCAR_GP_PIN(2, 20), /* DU0_DISP */
+ [30] = RCAR_GP_PIN(2, 21), /* DU0_CDE */
+ [31] = RCAR_GP_PIN(2, 16), /* DU0_DOTCLKOUT1 */
+ } },
+ { PINMUX_BIAS_REG("PUPR3", 0x10c, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 24), /* VI0_CLK */
+ [ 1] = RCAR_GP_PIN(3, 25), /* VI0_CLKENB */
+ [ 2] = RCAR_GP_PIN(3, 26), /* VI0_FIELD */
+ [ 3] = RCAR_GP_PIN(3, 27), /* /VI0_HSYNC */
+ [ 4] = RCAR_GP_PIN(3, 28), /* /VI0_VSYNC */
+ [ 5] = RCAR_GP_PIN(3, 29), /* VI0_DATA0 */
+ [ 6] = RCAR_GP_PIN(3, 30), /* VI0_DATA1 */
+ [ 7] = RCAR_GP_PIN(3, 31), /* VI0_DATA2 */
+ [ 8] = RCAR_GP_PIN(4, 0), /* VI0_DATA3 */
+ [ 9] = RCAR_GP_PIN(4, 1), /* VI0_DATA4 */
+ [10] = RCAR_GP_PIN(4, 2), /* VI0_DATA5 */
+ [11] = RCAR_GP_PIN(4, 3), /* VI0_DATA6 */
+ [12] = RCAR_GP_PIN(4, 4), /* VI0_DATA7 */
+ [13] = RCAR_GP_PIN(4, 5), /* VI0_G2 */
+ [14] = RCAR_GP_PIN(4, 6), /* VI0_G3 */
+ [15] = RCAR_GP_PIN(4, 7), /* VI0_G4 */
+ [16] = RCAR_GP_PIN(4, 8), /* VI0_G5 */
+ [17] = RCAR_GP_PIN(4, 21), /* VI1_DATA12 */
+ [18] = RCAR_GP_PIN(4, 22), /* VI1_DATA13 */
+ [19] = RCAR_GP_PIN(4, 23), /* VI1_DATA14 */
+ [20] = RCAR_GP_PIN(4, 24), /* VI1_DATA15 */
+ [21] = RCAR_GP_PIN(4, 9), /* ETH_REF_CLK */
+ [22] = RCAR_GP_PIN(4, 10), /* ETH_TXD0 */
+ [23] = RCAR_GP_PIN(4, 11), /* ETH_TXD1 */
+ [24] = RCAR_GP_PIN(4, 12), /* ETH_CRS_DV */
+ [25] = RCAR_GP_PIN(4, 13), /* ETH_TX_EN */
+ [26] = RCAR_GP_PIN(4, 14), /* ETH_RX_ER */
+ [27] = RCAR_GP_PIN(4, 15), /* ETH_RXD0 */
+ [28] = RCAR_GP_PIN(4, 16), /* ETH_RXD1 */
+ [29] = RCAR_GP_PIN(4, 17), /* ETH_MDC */
+ [30] = RCAR_GP_PIN(4, 18), /* ETH_MDIO */
+ [31] = RCAR_GP_PIN(4, 19), /* ETH_LINK */
+ } },
+ { PINMUX_BIAS_REG("PUPR4", 0x110, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 6), /* SSI_SCK012 */
+ [ 1] = RCAR_GP_PIN(3, 7), /* SSI_WS012 */
+ [ 2] = RCAR_GP_PIN(3, 10), /* SSI_SDATA0 */
+ [ 3] = RCAR_GP_PIN(3, 9), /* SSI_SDATA1 */
+ [ 4] = RCAR_GP_PIN(3, 8), /* SSI_SDATA2 */
+ [ 5] = RCAR_GP_PIN(3, 2), /* SSI_SCK34 */
+ [ 6] = RCAR_GP_PIN(3, 3), /* SSI_WS34 */
+ [ 7] = RCAR_GP_PIN(3, 5), /* SSI_SDATA3 */
+ [ 8] = RCAR_GP_PIN(3, 4), /* SSI_SDATA4 */
+ [ 9] = RCAR_GP_PIN(2, 31), /* SSI_SCK5 */
+ [10] = RCAR_GP_PIN(3, 0), /* SSI_WS5 */
+ [11] = RCAR_GP_PIN(3, 1), /* SSI_SDATA5 */
+ [12] = RCAR_GP_PIN(2, 28), /* SSI_SCK6 */
+ [13] = RCAR_GP_PIN(2, 29), /* SSI_WS6 */
+ [14] = RCAR_GP_PIN(2, 30), /* SSI_SDATA6 */
+ [15] = RCAR_GP_PIN(2, 24), /* SSI_SCK78 */
+ [16] = RCAR_GP_PIN(2, 25), /* SSI_WS78 */
+ [17] = RCAR_GP_PIN(2, 27), /* SSI_SDATA7 */
+ [18] = RCAR_GP_PIN(2, 26), /* SSI_SDATA8 */
+ [19] = RCAR_GP_PIN(3, 23), /* TCLK0 */
+ [20] = RCAR_GP_PIN(3, 11), /* SD0_CLK */
+ [21] = RCAR_GP_PIN(3, 12), /* SD0_CMD */
+ [22] = RCAR_GP_PIN(3, 13), /* SD0_DAT0 */
+ [23] = RCAR_GP_PIN(3, 14), /* SD0_DAT1 */
+ [24] = RCAR_GP_PIN(3, 15), /* SD0_DAT2 */
+ [25] = RCAR_GP_PIN(3, 16), /* SD0_DAT3 */
+ [26] = RCAR_GP_PIN(3, 17), /* SD0_CD */
+ [27] = RCAR_GP_PIN(3, 18), /* SD0_WP */
+ [28] = RCAR_GP_PIN(2, 22), /* AUDIO_CLKA */
+ [29] = RCAR_GP_PIN(2, 23), /* AUDIO_CLKB */
+ [30] = RCAR_GP_PIN(1, 14), /* IRQ2 */
+ [31] = RCAR_GP_PIN(1, 15), /* IRQ3 */
+ } },
+ { PINMUX_BIAS_REG("PUPR5", 0x114, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 1), /* PENC0 */
+ [ 1] = RCAR_GP_PIN(0, 2), /* PENC1 */
+ [ 2] = RCAR_GP_PIN(0, 3), /* USB_OVC0 */
+ [ 3] = RCAR_GP_PIN(0, 4), /* USB_OVC1 */
+ [ 4] = RCAR_GP_PIN(1, 16), /* SCIF_CLK */
+ [ 5] = RCAR_GP_PIN(1, 17), /* TX0 */
+ [ 6] = RCAR_GP_PIN(1, 18), /* RX0 */
+ [ 7] = RCAR_GP_PIN(1, 19), /* SCK0 */
+ [ 8] = RCAR_GP_PIN(1, 20), /* /CTS0 */
+ [ 9] = RCAR_GP_PIN(1, 21), /* /RTS0 */
+ [10] = RCAR_GP_PIN(3, 19), /* HSPI_CLK0 */
+ [11] = RCAR_GP_PIN(3, 20), /* /HSPI_CS0 */
+ [12] = RCAR_GP_PIN(3, 21), /* HSPI_RX0 */
+ [13] = RCAR_GP_PIN(3, 22), /* HSPI_TX0 */
+ [14] = RCAR_GP_PIN(4, 20), /* ETH_MAGIC */
+ [15] = RCAR_GP_PIN(4, 25), /* AVS1 */
+ [16] = RCAR_GP_PIN(4, 26), /* AVS2 */
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
void __iomem *addr;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- addr = pfc->windows->virt + info->reg;
+ addr = pfc->windows->virt + reg->puen;
- if (ioread32(addr) & BIT(info->bit))
+ if (ioread32(addr) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_DISABLE;
@@ -3103,21 +3144,20 @@ static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
void __iomem *addr;
+ unsigned int bit;
u32 value;
- u32 bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- addr = pfc->windows->virt + info->reg;
- bit = BIT(info->bit);
+ addr = pfc->windows->virt + reg->puen;
- value = ioread32(addr) & ~bit;
+ value = ioread32(addr) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- value |= bit;
+ value |= BIT(bit);
iowrite32(value, addr);
}
@@ -3144,6 +3184,7 @@ const struct sh_pfc_soc_info r8a7778_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index a0ed220071f5..333a3470e842 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -5097,6 +5097,7 @@ static const struct sh_pfc_soc_operations r8a7794_pinmux_ops = {
#ifdef CONFIG_PINCTRL_PFC_R8A7745
const struct sh_pfc_soc_info r8a7745_pinmux_info = {
.name = "r8a77450_pfc",
+ .ops = &r8a7794_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 95fd0994893a..1d4d84f34d60 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -1443,12 +1443,13 @@ static const u16 pinmux_data[] = {
};
/*
- * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs.
+ * R8A7795 has 8 banks with 32 GPIOs in each => 256 GPIOs.
* Physical layout rows: A - AW, cols: 1 - 39.
*/
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -3774,6 +3775,23 @@ static const unsigned int usb2_mux[] = {
USB2_PWEN_MARK, USB2_OVC_MARK,
};
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+/* - USB31 ------------------------------------------------------------------ */
+static const unsigned int usb31_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int usb31_mux[] = {
+ USB31_PWEN_MARK, USB31_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
SH_PFC_PIN_GROUP(audio_clk_a_b),
@@ -4080,6 +4098,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(usb30),
+ SH_PFC_PIN_GROUP(usb31),
};
static const char * const audio_clk_groups[] = {
@@ -4537,6 +4557,14 @@ static const char * const usb2_groups[] = {
"usb2",
};
+static const char * const usb30_groups[] = {
+ "usb30",
+};
+
+static const char * const usb31_groups[] = {
+ "usb31",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
@@ -4588,6 +4616,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(usb31),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5393,12 +5423,21 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -5409,242 +5448,261 @@ static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { PIN_NUMBER('F', 1), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST# */
- { PIN_A_NUMBER('R', 8), PU3, 1 }, /* DU_DOTCLKIN3 */
- { PIN_A_NUMBER('R', 7), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = PIN_NUMBER('F', 1), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N_A26 */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 7), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST# */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* USB31_PWEN */
+ [ 6] = RCAR_GP_PIN(6, 31), /* USB31_OVC */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -5653,28 +5711,24 @@ static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct sh_pfc_soc_operations r8a7795es1_pinmux_ops = {
@@ -5699,6 +5753,8 @@ const struct sh_pfc_soc_info r8a7795es1_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 8b35772cda98..d1cec6d12e81 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1508,12 +1508,13 @@ static const u16 pinmux_data[] = {
};
/*
- * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs.
+ * R8A7795 has 8 banks with 32 GPIOs in each => 256 GPIOs.
* Physical layout rows: A - AW, cols: 1 - 39.
*/
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -1572,6 +1573,127 @@ static const struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+ AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+ AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+ AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+ AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+ AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+ AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+ AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+ AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+ AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+ AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+ AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+ AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+ AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+ AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+ AUDIO_CLKOUT2_B_MARK,
+};
+static const unsigned int audio_clkout3_a_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+ AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+ AUDIO_CLKOUT3_B_MARK,
+};
+
/* - EtherAVB --------------------------------------------------------------- */
static const unsigned int avb_link_pins[] = {
/* AVB_LINK */
@@ -1659,6 +1781,221 @@ static const unsigned int avb_avtp_capture_b_mux[] = {
AVB_AVTP_CAPTURE_B_MARK,
};
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif0_ctrl_a_mux[] = {
+ RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+static const unsigned int drif0_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif0_data0_a_mux[] = {
+ RIF0_D0_A_MARK,
+};
+static const unsigned int drif0_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif0_data1_a_mux[] = {
+ RIF0_D1_A_MARK,
+};
+static const unsigned int drif0_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int drif0_ctrl_b_mux[] = {
+ RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+static const unsigned int drif0_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int drif0_data0_b_mux[] = {
+ RIF0_D0_B_MARK,
+};
+static const unsigned int drif0_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int drif0_data1_b_mux[] = {
+ RIF0_D1_B_MARK,
+};
+static const unsigned int drif0_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int drif0_ctrl_c_mux[] = {
+ RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+static const unsigned int drif0_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int drif0_data0_c_mux[] = {
+ RIF0_D0_C_MARK,
+};
+static const unsigned int drif0_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int drif0_data1_c_mux[] = {
+ RIF0_D1_C_MARK,
+};
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif1_ctrl_a_mux[] = {
+ RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+static const unsigned int drif1_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif1_data0_a_mux[] = {
+ RIF1_D0_A_MARK,
+};
+static const unsigned int drif1_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif1_data1_a_mux[] = {
+ RIF1_D1_A_MARK,
+};
+static const unsigned int drif1_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int drif1_ctrl_b_mux[] = {
+ RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+static const unsigned int drif1_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int drif1_data0_b_mux[] = {
+ RIF1_D0_B_MARK,
+};
+static const unsigned int drif1_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int drif1_data1_b_mux[] = {
+ RIF1_D1_B_MARK,
+};
+static const unsigned int drif1_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int drif1_ctrl_c_mux[] = {
+ RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+static const unsigned int drif1_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 6),
+};
+static const unsigned int drif1_data0_c_mux[] = {
+ RIF1_D0_C_MARK,
+};
+static const unsigned int drif1_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int drif1_data1_c_mux[] = {
+ RIF1_D1_C_MARK,
+};
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif2_ctrl_a_mux[] = {
+ RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+static const unsigned int drif2_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif2_data0_a_mux[] = {
+ RIF2_D0_A_MARK,
+};
+static const unsigned int drif2_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif2_data1_a_mux[] = {
+ RIF2_D1_A_MARK,
+};
+static const unsigned int drif2_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int drif2_ctrl_b_mux[] = {
+ RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+static const unsigned int drif2_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int drif2_data0_b_mux[] = {
+ RIF2_D0_B_MARK,
+};
+static const unsigned int drif2_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int drif2_data1_b_mux[] = {
+ RIF2_D1_B_MARK,
+};
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif3_ctrl_a_mux[] = {
+ RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+static const unsigned int drif3_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif3_data0_a_mux[] = {
+ RIF3_D0_A_MARK,
+};
+static const unsigned int drif3_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif3_data1_a_mux[] = {
+ RIF3_D1_A_MARK,
+};
+static const unsigned int drif3_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int drif3_ctrl_b_mux[] = {
+ RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+static const unsigned int drif3_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int drif3_data0_b_mux[] = {
+ RIF3_D0_B_MARK,
+};
+static const unsigned int drif3_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int drif3_data1_b_mux[] = {
+ RIF3_D1_B_MARK,
+};
+
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -1740,6 +2077,308 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int hscif2_data_a_mux[] = {
+ HRX2_A_MARK, HTX2_A_MARK,
+};
+static const unsigned int hscif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int hscif2_clk_a_mux[] = {
+ HSCK2_A_MARK,
+};
+static const unsigned int hscif2_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int hscif2_ctrl_a_mux[] = {
+ HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int hscif2_data_b_mux[] = {
+ HRX2_B_MARK, HTX2_B_MARK,
+};
+static const unsigned int hscif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif2_clk_b_mux[] = {
+ HSCK2_B_MARK,
+};
+static const unsigned int hscif2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int hscif2_ctrl_b_mux[] = {
+ HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+
+static const unsigned int hscif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+static const unsigned int hscif2_data_c_mux[] = {
+ HRX2_C_MARK, HTX2_C_MARK,
+};
+static const unsigned int hscif2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 24),
+};
+static const unsigned int hscif2_clk_c_mux[] = {
+ HSCK2_C_MARK,
+};
+static const unsigned int hscif2_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int hscif2_ctrl_c_mux[] = {
+ HRTS2_N_C_MARK, HCTS2_N_C_MARK,
+};
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+static const unsigned int hscif3_data_c_mux[] = {
+ HRX3_C_MARK, HTX3_C_MARK,
+};
+static const unsigned int hscif3_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int hscif3_data_d_mux[] = {
+ HRX3_D_MARK, HTX3_D_MARK,
+};
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif4_data_a_mux[] = {
+ HRX4_A_MARK, HTX4_A_MARK,
+};
+static const unsigned int hscif4_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_clk_mux[] = {
+ HSCK4_MARK,
+};
+static const unsigned int hscif4_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int hscif4_ctrl_mux[] = {
+ HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_data_b_mux[] = {
+ HRX4_B_MARK, HTX4_B_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+ SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+ SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SDA2_B_MARK, SCL2_B_MARK,
+};
+static const unsigned int i2c6_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+ SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+ SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+ SDA6_C_MARK, SCL6_C_MARK,
+};
+
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MSIOF0 ----------------------------------------------------------------- */
static const unsigned int msiof0_clk_pins[] = {
/* SCK */
@@ -2750,6 +3389,390 @@ static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 2),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK,
+ SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DAT0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DAT0_MARK, SD1_DAT1_MARK,
+ SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CLK_MARK, SD1_CMD_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 14),
+};
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 15),
+};
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 2),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi2_data8_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+ SD2_DAT4_MARK, SD2_DAT5_MARK,
+ SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_a_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int sdhi2_cd_a_mux[] = {
+ SD2_CD_A_MARK,
+};
+static const unsigned int sdhi2_cd_b_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int sdhi2_cd_b_mux[] = {
+ SD2_CD_B_MARK,
+};
+static const unsigned int sdhi2_wp_a_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 14),
+};
+static const unsigned int sdhi2_wp_a_mux[] = {
+ SD2_WP_A_MARK,
+};
+static const unsigned int sdhi2_wp_b_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(5, 11),
+};
+static const unsigned int sdhi2_wp_b_mux[] = {
+ SD2_WP_B_MARK,
+};
+static const unsigned int sdhi2_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sdhi2_ds_mux[] = {
+ SD2_DS_MARK,
+};
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 9),
+};
+static const unsigned int sdhi3_data1_mux[] = {
+ SD3_DAT0_MARK,
+};
+static const unsigned int sdhi3_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int sdhi3_data4_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+static const unsigned int sdhi3_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_data8_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+ SD3_DAT4_MARK, SD3_DAT5_MARK,
+ SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+static const unsigned int sdhi3_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+static const unsigned int sdhi3_ctrl_mux[] = {
+ SD3_CLK_MARK, SD3_CMD_MARK,
+};
+static const unsigned int sdhi3_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int sdhi3_cd_mux[] = {
+ SD3_CD_MARK,
+};
+static const unsigned int sdhi3_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_wp_mux[] = {
+ SD3_WP_MARK,
+};
+static const unsigned int sdhi3_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int sdhi3_ds_mux[] = {
+ SD3_DS_MARK,
+};
+
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+ SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+ SSI_SCK01239_MARK, SSI_WS01239_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+ SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+ SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+ SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+ SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+ SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+ SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+ SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+ SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi349_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi349_ctrl_mux[] = {
+ SSI_SCK349_MARK, SSI_WS349_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+ SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+ SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+ SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+ SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+ SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+ SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+ SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+ SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+ SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+ SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+ SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+ SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+ SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
/* PWEN, OVC */
@@ -2783,7 +3806,33 @@ static const unsigned int usb2_ch3_mux[] = {
USB2_CH3_PWEN_MARK, USB2_CH3_OVC_MARK,
};
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
SH_PFC_PIN_GROUP(avb_link),
SH_PFC_PIN_GROUP(avb_magic),
SH_PFC_PIN_GROUP(avb_phy_int),
@@ -2794,6 +3843,36 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -2802,6 +3881,47 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
SH_PFC_PIN_GROUP(msiof0_clk),
SH_PFC_PIN_GROUP(msiof0_sync),
SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -2943,10 +4063,82 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
SH_PFC_PIN_GROUP(usb2_ch3),
+ SH_PFC_PIN_GROUP(usb30),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a_a",
+ "audio_clk_a_b",
+ "audio_clk_a_c",
+ "audio_clk_b_a",
+ "audio_clk_b_b",
+ "audio_clk_c_a",
+ "audio_clk_c_b",
+ "audio_clkout_a",
+ "audio_clkout_b",
+ "audio_clkout_c",
+ "audio_clkout_d",
+ "audio_clkout1_a",
+ "audio_clkout1_b",
+ "audio_clkout2_a",
+ "audio_clkout2_b",
+ "audio_clkout3_a",
+ "audio_clkout3_b",
};
static const char * const avb_groups[] = {
@@ -2962,6 +4154,48 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_b",
};
+static const char * const drif0_groups[] = {
+ "drif0_ctrl_a",
+ "drif0_data0_a",
+ "drif0_data1_a",
+ "drif0_ctrl_b",
+ "drif0_data0_b",
+ "drif0_data1_b",
+ "drif0_ctrl_c",
+ "drif0_data0_c",
+ "drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+ "drif1_ctrl_a",
+ "drif1_data0_a",
+ "drif1_data1_a",
+ "drif1_ctrl_b",
+ "drif1_data0_b",
+ "drif1_data1_b",
+ "drif1_ctrl_c",
+ "drif1_data0_c",
+ "drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+ "drif2_ctrl_a",
+ "drif2_data0_a",
+ "drif2_data1_a",
+ "drif2_ctrl_b",
+ "drif2_data0_b",
+ "drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+ "drif3_ctrl_a",
+ "drif3_data0_a",
+ "drif3_data1_a",
+ "drif3_ctrl_b",
+ "drif3_data0_b",
+ "drif3_data1_b",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -2973,6 +4207,74 @@ static const char * const du_groups[] = {
"du_disp",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data_a",
+ "hscif2_clk_a",
+ "hscif2_ctrl_a",
+ "hscif2_data_b",
+ "hscif2_clk_b",
+ "hscif2_ctrl_b",
+ "hscif2_data_c",
+ "hscif2_clk_c",
+ "hscif2_ctrl_c",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_b",
+ "hscif3_data_c",
+ "hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+ "hscif4_data_a",
+ "hscif4_clk",
+ "hscif4_ctrl",
+ "hscif4_data_b",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+ "i2c6_c",
+};
+
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
static const char * const msiof0_groups[] = {
"msiof0_clk",
"msiof0_sync",
@@ -3168,6 +4470,72 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_data8",
+ "sdhi2_ctrl",
+ "sdhi2_cd_a",
+ "sdhi2_wp_a",
+ "sdhi2_cd_b",
+ "sdhi2_wp_b",
+ "sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+ "sdhi3_data1",
+ "sdhi3_data4",
+ "sdhi3_data8",
+ "sdhi3_ctrl",
+ "sdhi3_cd",
+ "sdhi3_wp",
+ "sdhi3_ds",
+};
+
+static const char * const ssi_groups[] = {
+ "ssi0_data",
+ "ssi01239_ctrl",
+ "ssi1_data_a",
+ "ssi1_data_b",
+ "ssi1_ctrl_a",
+ "ssi1_ctrl_b",
+ "ssi2_data_a",
+ "ssi2_data_b",
+ "ssi2_ctrl_a",
+ "ssi2_ctrl_b",
+ "ssi3_data",
+ "ssi349_ctrl",
+ "ssi4_data",
+ "ssi4_ctrl",
+ "ssi5_data",
+ "ssi5_ctrl",
+ "ssi6_data",
+ "ssi6_ctrl",
+ "ssi7_data",
+ "ssi78_ctrl",
+ "ssi8_data",
+ "ssi9_data_a",
+ "ssi9_data_b",
+ "ssi9_ctrl_a",
+ "ssi9_ctrl_b",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -3184,9 +4552,27 @@ static const char * const usb2_ch3_groups[] = {
"usb2_ch3",
};
+static const char * const usb30_groups[] = {
+ "usb30",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
@@ -3205,10 +4591,16 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
SH_PFC_FUNCTION(usb2_ch3),
+ SH_PFC_FUNCTION(usb30),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4021,11 +5413,20 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -4036,242 +5437,261 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { PIN_NUMBER('F', 1), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST# */
- { PIN_A_NUMBER('R', 8), PU3, 1 }, /* DU_DOTCLKIN3 */
- { PIN_A_NUMBER('R', 7), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB2_CH3_OVC */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB2_CH3_PWEN */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = PIN_NUMBER('F', 1), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 7), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST# */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* USB2_CH3_PWEN */
+ [ 6] = RCAR_GP_PIN(6, 31), /* USB2_CH3_OVC */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -4280,28 +5700,24 @@ static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct soc_device_attribute r8a7795es1[] = {
@@ -4340,6 +5756,8 @@ const struct sh_pfc_soc_info r8a7795_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 200e1f4f6db9..73ed9c74c137 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -495,7 +495,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL1_1 FM(SEL_PWM2_0) FM(SEL_PWM2_1)
#define MOD_SEL1_0 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
-/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+/* MOD_SEL2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
#define MOD_SEL2_31 FM(I2C_SEL_5_0) FM(I2C_SEL_5_1)
#define MOD_SEL2_30 FM(I2C_SEL_3_0) FM(I2C_SEL_3_1)
#define MOD_SEL2_29 FM(I2C_SEL_0_0) FM(I2C_SEL_0_1)
@@ -1518,6 +1518,7 @@ static const u16 pinmux_data[] = {
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -2392,6 +2393,50 @@ static const unsigned int i2c6_c_mux[] = {
SDA6_C_MARK, SCL6_C_MARK,
};
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MSIOF0 ----------------------------------------------------------------- */
static const unsigned int msiof0_clk_pins[] = {
/* SCK */
@@ -3922,6 +3967,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c6_a),
SH_PFC_PIN_GROUP(i2c6_b),
SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
SH_PFC_PIN_GROUP(msiof0_clk),
SH_PFC_PIN_GROUP(msiof0_sync),
SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -4286,6 +4337,15 @@ static const char * const i2c6_groups[] = {
"i2c6_c",
};
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
static const char * const msiof0_groups[] = {
"msiof0_clk",
"msiof0_sync",
@@ -4580,6 +4640,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
@@ -5416,11 +5477,20 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -5431,242 +5501,261 @@ static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* GP7_03 */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { RCAR_GP_PIN(1, 28), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST */
- /* bit 1 n/a on M3*/
- { PIN_A_NUMBER('R', 8), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* GP6_31 */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* GP6_30 */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = RCAR_GP_PIN(1, 28), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* GP7_03 */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_NONE,
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* GP6_30 */
+ [ 6] = RCAR_GP_PIN(6, 31), /* GP6_31 */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -5675,28 +5764,24 @@ static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7796_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct sh_pfc_soc_operations r8a7796_pinmux_ops = {
@@ -5721,6 +5806,8 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
index 4f5ee1d7317d..89b7541ab1ed 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
@@ -198,8 +198,8 @@
#define GPSR6_0 FM(QSPI0_SPCLK)
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
-#define IP0_3_0 FM(IRQ0_A) FM(MSIOF2_SYNC_B) FM(USB0_IDIN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_7_4 FM(MSIOF2_SCK) F_(0, 0) FM(USB0_IDPU) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_3_0 FM(IRQ0_A) FM(MSIOF2_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4 FM(MSIOF2_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_11_8 FM(MSIOF2_TXD) FM(SCL3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_15_12 FM(MSIOF2_RXD) FM(SDA3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_19_16 FM(MLB_CLK) FM(MSIOF2_SYNC_A) FM(SCK5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -522,10 +522,8 @@ static const u16 pinmux_data[] = {
/* IPSR0 */
PINMUX_IPSR_MSEL(IP0_3_0, IRQ0_A, SEL_IRQ_0_0),
PINMUX_IPSR_MSEL(IP0_3_0, MSIOF2_SYNC_B, SEL_MSIOF2_1),
- PINMUX_IPSR_GPSR(IP0_3_0, USB0_IDIN),
PINMUX_IPSR_GPSR(IP0_7_4, MSIOF2_SCK),
- PINMUX_IPSR_GPSR(IP0_7_4, USB0_IDPU),
PINMUX_IPSR_GPSR(IP0_11_8, MSIOF2_TXD),
PINMUX_IPSR_MSEL(IP0_11_8, SCL3_A, SEL_I2C3_0),
@@ -936,6 +934,129 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - AUDIO CLOCK ------------------------------------------------------------- */
+static const unsigned int audio_clk_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(4, 1),
+};
+static const unsigned int audio_clk_a_mux[] = {
+ AUDIO_CLKA_MARK,
+};
+static const unsigned int audio_clk_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(2, 27),
+};
+static const unsigned int audio_clk_b_mux[] = {
+ AUDIO_CLKB_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int audio_clkout_mux[] = {
+ AUDIO_CLKOUT_MARK,
+};
+static const unsigned int audio_clkout1_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(4, 22),
+};
+static const unsigned int audio_clkout1_mux[] = {
+ AUDIO_CLKOUT1_MARK,
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdc_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int avb0_mdc_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_mii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0,
+ * AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0,
+ * AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ * AVB0_TXCREFCLK
+ */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int avb0_mii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK, AVB0_TD0_MARK,
+ AVB0_TD1_MARK, AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK, AVB0_RD0_MARK,
+ AVB0_RD1_MARK, AVB0_RD2_MARK, AVB0_RD3_MARK,
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_a_pins[] = {
+ /* AVB0_AVTP_PPS_A */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb0_avtp_pps_a_mux[] = {
+ AVB0_AVTP_PPS_A_MARK,
+};
+static const unsigned int avb0_avtp_match_a_pins[] = {
+ /* AVB0_AVTP_MATCH_A */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb0_avtp_match_a_mux[] = {
+ AVB0_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb0_avtp_capture_a_pins[] = {
+ /* AVB0_AVTP_CAPTURE_A */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb0_avtp_capture_a_mux[] = {
+ AVB0_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb0_avtp_pps_b_pins[] = {
+ /* AVB0_AVTP_PPS_B */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int avb0_avtp_pps_b_mux[] = {
+ AVB0_AVTP_PPS_B_MARK,
+};
+static const unsigned int avb0_avtp_match_b_pins[] = {
+ /* AVB0_AVTP_MATCH_B */
+ RCAR_GP_PIN(4, 18),
+};
+static const unsigned int avb0_avtp_match_b_mux[] = {
+ AVB0_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb0_avtp_capture_b_pins[] = {
+ /* AVB0_AVTP_CAPTURE_B */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int avb0_avtp_capture_b_mux[] = {
+ AVB0_AVTP_CAPTURE_B_MARK,
+};
+
/* - I2C -------------------------------------------------------------------- */
static const unsigned int i2c0_pins[] = {
/* SCL, SDA */
@@ -1018,6 +1139,118 @@ static const unsigned int mmc_ctrl_mux[] = {
MMC_CLK_MARK, MMC_CMD_MARK,
};
+/* - PWM0 ------------------------------------------------------------------ */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 1),
+};
+
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+static const unsigned int pwm0_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 18),
+};
+
+static const unsigned int pwm0_b_mux[] = {
+ PWM0_B_MARK,
+};
+
+static const unsigned int pwm0_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 29),
+};
+
+static const unsigned int pwm0_c_mux[] = {
+ PWM0_C_MARK,
+};
+
+/* - PWM1 ------------------------------------------------------------------ */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 19),
+};
+
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+static const unsigned int pwm1_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 30),
+};
+
+static const unsigned int pwm1_c_mux[] = {
+ PWM1_C_MARK,
+};
+
+/* - PWM2 ------------------------------------------------------------------ */
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 22),
+};
+
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+static const unsigned int pwm2_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 31),
+};
+
+static const unsigned int pwm2_c_mux[] = {
+ PWM2_C_MARK,
+};
+
+/* - PWM3 ------------------------------------------------------------------ */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 27),
+};
+
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+static const unsigned int pwm3_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(4, 0),
+};
+
+static const unsigned int pwm3_c_mux[] = {
+ PWM3_C_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
/* RX, TX */
@@ -1202,7 +1435,75 @@ static const unsigned int scif_clk_mux[] = {
SCIF_CLK_MARK,
};
+/* - SSI ---------------------------------------------------------------*/
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(4, 3),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi34_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 4),
+};
+static const unsigned int ssi34_ctrl_mux[] = {
+ SSI_SCK34_MARK, SSI_WS34_MARK,
+};
+static const unsigned int ssi4_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 5), RCAR_GP_PIN(4, 7),
+};
+static const unsigned int ssi4_ctrl_a_mux[] = {
+ SSI_SCK4_A_MARK, SSI_WS4_A_MARK,
+};
+static const unsigned int ssi4_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int ssi4_data_a_mux[] = {
+ SSI_SDATA4_A_MARK,
+};
+static const unsigned int ssi4_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 20),
+};
+static const unsigned int ssi4_ctrl_b_mux[] = {
+ SSI_SCK4_B_MARK, SSI_WS4_B_MARK,
+};
+static const unsigned int ssi4_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int ssi4_data_b_mux[] = {
+ SSI_SDATA4_B_MARK,
+};
+
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+};
+static const unsigned int usb0_mux[] = {
+ USB0_PWEN_MARK, USB0_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(audio_clkout1),
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdc),
+ SH_PFC_PIN_GROUP(avb0_mii),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps_b),
+ SH_PFC_PIN_GROUP(avb0_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture_b),
SH_PFC_PIN_GROUP(i2c0),
SH_PFC_PIN_GROUP(i2c1),
SH_PFC_PIN_GROUP(i2c2_a),
@@ -1213,6 +1514,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(mmc_data4),
SH_PFC_PIN_GROUP(mmc_data8),
SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(pwm0_a),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm0_c),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm1_c),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm2_c),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm3_c),
SH_PFC_PIN_GROUP(scif0_data_a),
SH_PFC_PIN_GROUP(scif0_clk_a),
SH_PFC_PIN_GROUP(scif0_data_b),
@@ -1238,6 +1551,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_data_b),
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi4_data_a),
+ SH_PFC_PIN_GROUP(ssi4_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi4_data_b),
+ SH_PFC_PIN_GROUP(usb0),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a",
+ "audio_clk_b",
+ "audio_clkout",
+ "audio_clkout1",
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdc",
+ "avb0_mii",
+ "avb0_avtp_pps_a",
+ "avb0_avtp_match_a",
+ "avb0_avtp_capture_a",
+ "avb0_avtp_pps_b",
+ "avb0_avtp_match_b",
+ "avb0_avtp_capture_b",
};
static const char * const i2c0_groups[] = {
@@ -1264,6 +1605,30 @@ static const char * const mmc_groups[] = {
"mmc_ctrl",
};
+static const char * const pwm0_groups[] = {
+ "pwm0_a",
+ "pwm0_b",
+ "pwm0_c",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+ "pwm1_c",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+ "pwm2_c",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+ "pwm3_c",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_clk_a",
@@ -1310,12 +1675,31 @@ static const char * const scif_clk_groups[] = {
"scif_clk",
};
+static const char * const ssi_groups[] = {
+ "ssi3_data",
+ "ssi34_ctrl",
+ "ssi4_ctrl_a",
+ "ssi4_data_a",
+ "ssi4_ctrl_b",
+ "ssi4_data_b",
+};
+
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb0),
SH_PFC_FUNCTION(i2c0),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -1323,6 +1707,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(usb0),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 5c9d79981e6d..736634aee500 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -513,7 +513,7 @@ static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
return -EINVAL;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, reg, 32);
+ val = sh_pfc_read(pfc, reg);
spin_unlock_irqrestore(&pfc->lock, flags);
val = (val >> offset) & GENMASK(size - 1, 0);
@@ -550,11 +550,11 @@ static int sh_pfc_pinconf_set_drive_strength(struct sh_pfc *pfc,
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, reg, 32);
+ val = sh_pfc_read(pfc, reg);
val &= ~GENMASK(offset + size - 1, offset);
val |= strength << offset;
- sh_pfc_write_reg(pfc, reg, 32, val);
+ sh_pfc_write(pfc, reg, val);
spin_unlock_irqrestore(&pfc->lock, flags);
@@ -645,7 +645,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
return bit;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, pocctrl, 32);
+ val = sh_pfc_read(pfc, pocctrl);
spin_unlock_irqrestore(&pfc->lock, flags);
arg = (val & BIT(bit)) ? 3300 : 1800;
@@ -716,12 +716,12 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
return -EINVAL;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, pocctrl, 32);
+ val = sh_pfc_read(pfc, pocctrl);
if (mV == 3300)
val |= BIT(bit);
else
val &= ~BIT(bit);
- sh_pfc_write_reg(pfc, pocctrl, 32, val);
+ sh_pfc_write(pfc, pocctrl, val);
spin_unlock_irqrestore(&pfc->lock, flags);
break;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 8688b405e081..213108a058fe 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -148,6 +148,21 @@ struct pinmux_drive_reg {
.reg = r, \
.fields =
+struct pinmux_bias_reg {
+ u32 puen; /* Pull-enable or pull-up control register */
+ u32 pud; /* Pull-up/down control register (optional) */
+ const u16 pins[32];
+};
+
+#define PINMUX_BIAS_REG(name1, r1, name2, r2) \
+ .puen = r1, \
+ .pud = r2, \
+ .pins =
+
+struct pinmux_ioctrl_reg {
+ u32 reg;
+};
+
struct pinmux_data_reg {
u32 reg;
u8 reg_width;
@@ -189,12 +204,6 @@ struct sh_pfc_window {
unsigned long size;
};
-struct sh_pfc_bias_info {
- u16 pin;
- u16 reg : 11;
- u16 bit : 5;
-};
-
struct sh_pfc_pin_range;
struct sh_pfc {
@@ -213,6 +222,7 @@ struct sh_pfc {
unsigned int nr_gpio_pins;
struct sh_pfc_chip *gpio;
+ u32 *saved_regs;
};
struct sh_pfc_soc_operations {
@@ -245,6 +255,8 @@ struct sh_pfc_soc_info {
const struct pinmux_cfg_reg *cfg_regs;
const struct pinmux_drive_reg *drive_regs;
+ const struct pinmux_bias_reg *bias_regs;
+ const struct pinmux_ioctrl_reg *ioctrl_regs;
const struct pinmux_data_reg *data_regs;
const u16 *pinmux_data;
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 4db9323251e3..3abb028f6158 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5820,7 +5820,7 @@ static void atlas7_gpio_handle_irq(struct irq_desc *desc)
__func__, gc->label,
bank->gpio_offset + pin_in_bank);
generic_handle_irq(
- irq_find_mapping(gc->irqdomain,
+ irq_find_mapping(gc->irq.domain,
bank->gpio_offset + pin_in_bank));
}
@@ -5860,7 +5860,7 @@ static int atlas7_gpio_request(struct gpio_chip *chip,
if (ret < 0)
return ret;
- if (pinctrl_request_gpio(chip->base + gpio))
+ if (pinctrl_gpio_request(chip->base + gpio))
return -ENODEV;
raw_spin_lock_irqsave(&a7gc->lock, flags);
@@ -5890,7 +5890,7 @@ static void atlas7_gpio_free(struct gpio_chip *chip,
raw_spin_unlock_irqrestore(&a7gc->lock, flags);
- pinctrl_free_gpio(chip->base + gpio);
+ pinctrl_gpio_free(chip->base + gpio);
}
static int atlas7_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index d3ef05973901..ca2347d0d579 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -587,7 +587,7 @@ static void sirfsoc_gpio_handle_irq(struct irq_desc *desc)
if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) {
pr_debug("%s: gpio id %d idx %d happens\n",
__func__, bank->id, idx);
- generic_handle_irq(irq_find_mapping(gc->irqdomain, idx +
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, idx +
bank->id * SIRFSOC_GPIO_BANK_SIZE));
}
@@ -614,7 +614,7 @@ static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset)
struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, offset);
unsigned long flags;
- if (pinctrl_request_gpio(chip->base + offset))
+ if (pinctrl_gpio_request(chip->base + offset))
return -ENODEV;
spin_lock_irqsave(&bank->lock, flags);
@@ -644,7 +644,7 @@ static void sirfsoc_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_unlock_irqrestore(&bank->lock, flags);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static int sirfsoc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index cf6d68c7345b..6a0ed8ab33b9 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -204,7 +204,7 @@ static int plgpio_request(struct gpio_chip *chip, unsigned offset)
if (offset >= chip->ngpio)
return -EINVAL;
- ret = pinctrl_request_gpio(gpio);
+ ret = pinctrl_gpio_request(gpio);
if (ret)
return ret;
@@ -242,7 +242,7 @@ err1:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
err0:
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
return ret;
}
@@ -273,7 +273,7 @@ disable_clk:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
}
/* PLGPIO IRQ */
@@ -401,7 +401,7 @@ static void plgpio_irq_handler(struct irq_desc *desc)
/* get correct irq line number */
pin = i * MAX_GPIO_PER_REG + pin;
generic_handle_irq(
- irq_find_mapping(gc->irqdomain, pin));
+ irq_find_mapping(gc->irq.domain, pin));
}
}
chained_irq_exit(irqchip, desc);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 50299ad96659..a276c61be217 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -150,12 +150,12 @@ static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -289,13 +289,14 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
return 0;
}
-static void stm32_gpio_domain_activate(struct irq_domain *d,
- struct irq_data *irq_data)
+static int stm32_gpio_domain_activate(struct irq_domain *d,
+ struct irq_data *irq_data, bool early)
{
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
+ return 0;
}
static int stm32_gpio_domain_alloc(struct irq_domain *d,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index f763d8d62d6e..295e48fc94bc 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1289,6 +1289,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
.npins = ARRAY_SIZE(sun4i_a10_pins),
.irq_banks = 1,
.irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i.c b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
index 47afd558b114..27ec99e81c4c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
@@ -713,6 +713,7 @@ static const struct sunxi_pinctrl_desc sun5i_pinctrl_data = {
.pins = sun5i_pins,
.npins = ARRAY_SIZE(sun5i_pins),
.irq_banks = 1,
+ .disable_strict_mode = true,
};
static int sun5i_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index 49a1deb97bb7..a00246d3dd49 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -106,6 +106,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun6i_a31_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .disable_strict_mode = true,
};
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 951a25c18815..82ffaf466892 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -965,6 +965,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_pinctrl_data = {
.pins = sun6i_a31_pins,
.npins = ARRAY_SIZE(sun6i_a31_pins),
.irq_banks = 4,
+ .disable_strict_mode = true,
};
static int sun6i_a31_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 67ee6f9b3b68..8a08c4afc6a8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -93,6 +93,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_a23_r_pins),
.pin_base = PL_BASE,
.irq_banks = 1,
+ .disable_strict_mode = true,
};
static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index 721b6935baf3..402fd7d21e7b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -563,6 +563,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_pinctrl_data = {
.pins = sun8i_a23_pins,
.npins = ARRAY_SIZE(sun8i_a23_pins),
.irq_banks = 3,
+ .disable_strict_mode = true,
};
static int sun8i_a23_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index ef1e0bef4099..da387211a75e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -486,6 +486,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
.irq_bank_base = 1,
+ .disable_strict_mode = true,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
index ebfd9a26628c..b795a199e240 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
@@ -82,7 +82,8 @@ static const struct sunxi_pinctrl_desc sun8i_h3_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_h3_r_pins),
.irq_banks = 1,
.pin_base = PL_BASE,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun8i_h3_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 518a92df4418..d1719a738c20 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -491,7 +491,8 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
.pins = sun8i_h3_pins,
.npins = ARRAY_SIZE(sun8i_h3_pins),
.irq_banks = 2,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index 92a873f73697..c63086c98335 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -152,6 +152,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun9i_a80_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .disable_strict_mode = true,
};
static int sun9i_a80_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index bc14e954d7a2..472ef0d91b99 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -721,6 +721,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = {
.pins = sun9i_a80_pins,
.npins = ARRAY_SIZE(sun9i_a80_pins),
.irq_banks = 5,
+ .disable_strict_mode = true,
};
static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 52edf3b5988d..4b6cb25bc796 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -696,6 +696,7 @@ static const struct pinmux_ops sunxi_pmx_ops = {
.get_function_groups = sunxi_pmx_get_func_groups,
.set_mux = sunxi_pmx_set_mux,
.gpio_set_direction = sunxi_pmx_gpio_set_direction,
+ .strict = true,
};
static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
@@ -1245,6 +1246,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
struct pinctrl_desc *pctrl_desc;
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
+ struct pinmux_ops *pmxops;
struct resource *res;
int i, ret, last_pin, pin_idx;
struct clk *clk;
@@ -1305,7 +1307,16 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctrl_desc->npins = pctl->ngroups;
pctrl_desc->confops = &sunxi_pconf_ops;
pctrl_desc->pctlops = &sunxi_pctrl_ops;
- pctrl_desc->pmxops = &sunxi_pmx_ops;
+
+ pmxops = devm_kmemdup(&pdev->dev, &sunxi_pmx_ops, sizeof(sunxi_pmx_ops),
+ GFP_KERNEL);
+ if (!pmxops)
+ return -ENOMEM;
+
+ if (desc->disable_strict_mode)
+ pmxops->strict = false;
+
+ pctrl_desc->pmxops = pmxops;
pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
if (IS_ERR(pctl->pctl_dev)) {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 1bfc0d8a55df..11b128f54ed2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -112,6 +112,7 @@ struct sunxi_pinctrl_desc {
unsigned irq_banks;
unsigned irq_bank_base;
bool irq_read_needs_mux;
+ bool disable_strict_mode;
};
struct sunxi_pinctrl_function {
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 5c1b6325d80d..a8a6510183b6 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -575,11 +575,9 @@ static int ti_iodelay_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned long *config)
{
struct ti_iodelay_device *iod;
- struct device *dev;
struct ti_iodelay_pingroup *group;
iod = pinctrl_dev_get_drvdata(pctldev);
- dev = iod->dev;
group = ti_iodelay_get_pingroup(iod, selector);
if (!group)
@@ -693,12 +691,10 @@ static void ti_iodelay_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
unsigned int selector)
{
struct ti_iodelay_device *iod;
- struct device *dev;
struct ti_iodelay_pingroup *group;
int i;
iod = pinctrl_dev_get_drvdata(pctldev);
- dev = iod->dev;
group = ti_iodelay_get_pingroup(iod, selector);
if (!group)
return;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index f9267fabe6b0..26fda5c53e65 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -204,9 +204,10 @@ static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
const struct pin_desc *desc = pin_desc_get(pctldev, pin);
enum uniphier_pin_drv_type type =
uniphier_pin_get_drv_type(desc->drv_data);
- const unsigned int strength_1bit[] = {4, 8};
- const unsigned int strength_2bit[] = {8, 12, 16, 20};
- const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16};
+ static const unsigned int strength_1bit[] = {4, 8};
+ static const unsigned int strength_2bit[] = {8, 12, 16, 20};
+ static const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12,
+ 14, 16};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
int ret;
@@ -399,9 +400,10 @@ static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
const struct pin_desc *desc = pin_desc_get(pctldev, pin);
enum uniphier_pin_drv_type type =
uniphier_pin_get_drv_type(desc->drv_data);
- const unsigned int strength_1bit[] = {4, 8, -1};
- const unsigned int strength_2bit[] = {8, 12, 16, 20, -1};
- const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16, -1};
+ static const unsigned int strength_1bit[] = {4, 8, -1};
+ static const unsigned int strength_2bit[] = {8, 12, 16, 20, -1};
+ static const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14,
+ 16, -1};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 9c5e359a63de..8a5ecd6277d8 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -472,8 +472,8 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
static const unsigned aout_pins[] = {135, 136, 137, 138, 139, 140, 141, 142};
static const int aout_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index 83341284dc44..3be7967edae0 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -553,8 +553,8 @@ static const struct pinctrl_pin_desc uniphier_ld20_pins[] = {
static const unsigned aout_pins[] = {135, 136, 137, 138, 139, 140, 141, 142};
static const int aout_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rgmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 38,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
index d9f166f0cc86..dbe94a9a0353 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
@@ -776,8 +776,8 @@ static const struct pinctrl_pin_desc uniphier_pxs3_pins[] = {
250, UNIPHIER_PIN_PULL_DOWN),
};
-static const unsigned int emmc_pins[] = {31, 32, 33, 34, 35, 36, 37, 38};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {32, 33, 34, 35, 36, 37, 38};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned int emmc_dat8_pins[] = {39, 40, 41, 42};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned int ether_rgmii_pins[] = {52, 53, 54, 55, 56, 57, 58, 59,
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 80b87954f6dd..09dac11337d1 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -774,7 +774,7 @@ config TOSHIBA_WMI
WARNING: This driver is incomplete as it lacks a proper keymap and the
*notify function only prints the ACPI event type value. Be warned that
you will need to provide some information if you have a Toshiba model
- with WMI event hotkeys and want to help with the develpment of this
+ with WMI event hotkeys and want to help with the development of this
driver.
If you have a WMI-based hotkeys Toshiba laptop, say Y or M here.
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 92dc230ef5b2..f7b67e898abc 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -119,7 +119,7 @@ static irqreturn_t int0002_irq(int irq, void *data)
if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
return IRQ_NONE;
- generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
GPE0A_PME_B0_VIRT_GPIO_PIN));
pm_system_wakeup();
@@ -165,7 +165,7 @@ static int int0002_probe(struct platform_device *pdev)
chip->direction_output = int0002_gpio_direction_output;
chip->base = -1;
chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
- chip->irq_need_valid_mask = true;
+ chip->irq.need_valid_mask = true;
ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL);
if (ret) {
@@ -173,7 +173,7 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
- bitmap_clear(chip->irq_valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
+ bitmap_clear(chip->irq.valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
/*
* We manually request the irq here instead of passing a flow-handler
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2242d6035d9e..3887dfeafc96 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -9543,7 +9543,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
},
};
-static int __init set_ibm_param(const char *val, struct kernel_param *kp)
+static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
{
unsigned int i;
struct ibm_struct *ibm;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index bb1dcd7fbdeb..e8d058c5ef21 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2510,7 +2510,6 @@ static const struct iio_chan_spec toshiba_iio_accel_channels[] = {
};
static const struct iio_info toshiba_iio_accel_info = {
- .driver_module = THIS_MODULE,
.read_raw = &toshiba_iio_accel_read_raw,
};
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 974fd684bab2..89bf4d6cb486 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -355,7 +355,7 @@ int sr_configure_errgen(struct omap_sr *sr)
u8 senp_shift, senn_shift;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -422,7 +422,7 @@ int sr_disable_errgen(struct omap_sr *sr)
u32 vpboundint_en, vpboundint_st;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -477,7 +477,7 @@ int sr_configure_minmax(struct omap_sr *sr)
u8 senp_shift, senn_shift;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -562,7 +562,7 @@ int sr_enable(struct omap_sr *sr, unsigned long volt)
int ret;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -614,7 +614,7 @@ int sr_enable(struct omap_sr *sr, unsigned long volt)
void sr_disable(struct omap_sr *sr)
{
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return;
}
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 5ab90c1f3f7c..428b426842f4 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -182,7 +182,21 @@ config CHARGER_SBS
tristate "SBS Compliant charger"
depends on I2C
help
- Say Y to include support for SBS compilant battery chargers.
+ Say Y to include support for SBS compliant battery chargers.
+
+config MANAGER_SBS
+ tristate "Smart Battery System Manager"
+ depends on I2C && I2C_MUX && GPIOLIB
+ select I2C_SMBUS
+ help
+ Say Y here to include support for Smart Battery System Manager
+ ICs. The driver reports online and charging status via sysfs.
+ It presents itself also as I2C mux which allows to bind
+ smart battery driver to its ports.
+ Supported is for example LTC1760.
+
+ This driver can also be built as a module. If so, the module will be
+ called sbs-manager.
config BATTERY_BQ27XXX
tristate "BQ27xxx battery driver"
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index aae4e4a8bbb3..e83aa843bcc6 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
obj-$(CONFIG_CHARGER_SBS) += sbs-charger.o
+obj-$(CONFIG_MANAGER_SBS) += sbs-manager.o
obj-$(CONFIG_BATTERY_BQ27XXX) += bq27xxx_battery.o
obj-$(CONFIG_BATTERY_BQ27XXX_I2C) += bq27xxx_battery_i2c.o
obj-$(CONFIG_BATTERY_BQ27XXX_HDQ) += bq27xxx_battery_hdq.o
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 11a07633de6c..e4905bef2663 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -484,7 +484,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
+ if (irq < 0)
return -ENODEV;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index 37e523374fe0..28dc056eaafa 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -201,14 +201,12 @@ err:
static void gab_work(struct work_struct *work)
{
struct gab *adc_bat;
- struct gab_platform_data *pdata;
struct delayed_work *delayed_work;
bool is_plugged;
int status;
delayed_work = to_delayed_work(work);
adc_bat = container_of(delayed_work, struct gab, bat_work);
- pdata = adc_bat->pdata;
status = adc_bat->status;
is_plugged = power_supply_am_i_supplied(adc_bat->psy);
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index fa861003fece..c73fb4221695 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -146,8 +146,7 @@ static int max8997_battery_probe(struct platform_device *pdev)
return ret;
}
- charger = devm_kzalloc(&pdev->dev, sizeof(struct charger_data),
- GFP_KERNEL);
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
if (!charger)
return -ENOMEM;
diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
index 1ad7ccce6075..1aba14046a83 100644
--- a/drivers/power/supply/pcf50633-charger.c
+++ b/drivers/power/supply/pcf50633-charger.c
@@ -43,7 +43,6 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
int ret = 0;
u8 bits;
- int charging_start = 1;
u8 mbcs2, chgmod;
unsigned int mbcc5;
@@ -58,7 +57,6 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
ma = 100;
} else {
bits = PCF50633_MBCC7_USB_SUSPEND;
- charging_start = 0;
ma = 0;
}
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 02c6340ae36f..82f998ab5a52 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -669,7 +669,7 @@ EXPORT_SYMBOL_GPL(power_supply_powers);
static void power_supply_dev_release(struct device *dev)
{
struct power_supply *psy = container_of(dev, struct power_supply, dev);
- pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+ dev_dbg(dev, "%s\n", __func__);
kfree(psy);
}
diff --git a/drivers/power/supply/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c
index f6a0d245731d..11de691b9a71 100644
--- a/drivers/power/supply/qcom_smbb.c
+++ b/drivers/power/supply/qcom_smbb.c
@@ -34,7 +34,7 @@
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/regulator/driver.h>
#define SMBB_CHG_VMAX 0x040
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index b19a73176910..83d7b4115857 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -177,10 +177,8 @@ static bool force_load;
static int sbs_read_word_data(struct i2c_client *client, u8 address)
{
struct sbs_info *chip = i2c_get_clientdata(client);
+ int retries = chip->i2c_retry_count;
s32 ret = 0;
- int retries = 1;
-
- retries = chip->i2c_retry_count;
while (retries > 0) {
ret = i2c_smbus_read_word_data(client, address);
@@ -204,7 +202,7 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address,
{
struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret = 0, block_length = 0;
- int retries_length = 1, retries_block = 1;
+ int retries_length, retries_block;
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
retries_length = chip->i2c_retry_count;
@@ -269,10 +267,8 @@ static int sbs_write_word_data(struct i2c_client *client, u8 address,
u16 value)
{
struct sbs_info *chip = i2c_get_clientdata(client);
+ int retries = chip->i2c_retry_count;
s32 ret = 0;
- int retries = 1;
-
- retries = chip->i2c_retry_count;
while (retries > 0) {
ret = i2c_smbus_write_word_data(client, address, value);
@@ -321,16 +317,6 @@ static int sbs_get_battery_presence_and_health(
union power_supply_propval *val)
{
s32 ret;
- struct sbs_info *chip = i2c_get_clientdata(client);
-
- if (psp == POWER_SUPPLY_PROP_PRESENT && chip->gpio_detect) {
- ret = gpiod_get_value_cansleep(chip->gpio_detect);
- if (ret < 0)
- return ret;
- val->intval = ret;
- chip->is_present = val->intval;
- return ret;
- }
/*
* Write to ManufacturerAccess with ManufacturerAccess command
@@ -570,7 +556,7 @@ static int sbs_get_battery_serial_number(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = sprintf(sbs_serial, "%04x", ret);
+ sprintf(sbs_serial, "%04x", ret);
val->strval = sbs_serial;
return 0;
@@ -598,6 +584,19 @@ static int sbs_get_property(struct power_supply *psy,
struct sbs_info *chip = power_supply_get_drvdata(psy);
struct i2c_client *client = chip->client;
+ if (chip->gpio_detect) {
+ ret = gpiod_get_value_cansleep(chip->gpio_detect);
+ if (ret < 0)
+ return ret;
+ if (psp == POWER_SUPPLY_PROP_PRESENT) {
+ val->intval = ret;
+ chip->is_present = val->intval;
+ return 0;
+ }
+ if (ret == 0)
+ return -ENODATA;
+ }
+
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
new file mode 100644
index 000000000000..ccb4217b9638
--- /dev/null
+++ b/drivers/power/supply/sbs-manager.c
@@ -0,0 +1,445 @@
+/*
+ * Driver for SBS compliant Smart Battery System Managers
+ *
+ * The device communicates via i2c at address 0x0a and multiplexes access to up
+ * to four smart batteries at address 0x0b.
+ *
+ * Via sysfs interface the online state and charge type are presented.
+ *
+ * Datasheet SBSM: http://sbs-forum.org/specs/sbsm100b.pdf
+ * Datasheet LTC1760: http://cds.linear.com/docs/en/datasheet/1760fb.pdf
+ *
+ * Karl-Heinz Schneider <karl-heinz@schneider-inet.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+
+#define SBSM_MAX_BATS 4
+#define SBSM_RETRY_CNT 3
+
+/* registers addresses */
+#define SBSM_CMD_BATSYSSTATE 0x01
+#define SBSM_CMD_BATSYSSTATECONT 0x02
+#define SBSM_CMD_BATSYSINFO 0x04
+#define SBSM_CMD_LTC 0x3c
+
+#define SBSM_MASK_BAT_SUPPORTED GENMASK(3, 0)
+#define SBSM_MASK_CHARGE_BAT GENMASK(7, 4)
+#define SBSM_BIT_AC_PRESENT BIT(0)
+#define SBSM_BIT_TURBO BIT(7)
+
+#define SBSM_SMB_BAT_OFFSET 11
+struct sbsm_data {
+ struct i2c_client *client;
+ struct i2c_mux_core *muxc;
+
+ struct power_supply *psy;
+
+ u8 cur_chan; /* currently selected channel */
+ struct gpio_chip chip;
+ bool is_ltc1760; /* special capabilities */
+
+ unsigned int supported_bats;
+ unsigned int last_state;
+ unsigned int last_state_cont;
+};
+
+static enum power_supply_property sbsm_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+};
+
+static int sbsm_read_word(struct i2c_client *client, u8 address)
+{
+ int reg, retries;
+
+ for (retries = SBSM_RETRY_CNT; retries > 0; retries--) {
+ reg = i2c_smbus_read_word_data(client, address);
+ if (reg >= 0)
+ break;
+ }
+
+ if (reg < 0) {
+ dev_err(&client->dev, "failed to read register 0x%02x\n",
+ address);
+ }
+
+ return reg;
+}
+
+static int sbsm_write_word(struct i2c_client *client, u8 address, u16 word)
+{
+ int ret, retries;
+
+ for (retries = SBSM_RETRY_CNT; retries > 0; retries--) {
+ ret = i2c_smbus_write_word_data(client, address, word);
+ if (ret >= 0)
+ break;
+ }
+ if (ret < 0)
+ dev_err(&client->dev, "failed to write to register 0x%02x\n",
+ address);
+
+ return ret;
+}
+
+static int sbsm_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sbsm_data *data = power_supply_get_drvdata(psy);
+ int regval = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ regval = sbsm_read_word(data->client, SBSM_CMD_BATSYSSTATECONT);
+ if (regval < 0)
+ return regval;
+ val->intval = !!(regval & SBSM_BIT_AC_PRESENT);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ regval = sbsm_read_word(data->client, SBSM_CMD_BATSYSSTATE);
+ if (regval < 0)
+ return regval;
+
+ if ((regval & SBSM_MASK_CHARGE_BAT) == 0) {
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ return 0;
+ }
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+ if (data->is_ltc1760) {
+ /* charge mode fast if turbo is active */
+ regval = sbsm_read_word(data->client, SBSM_CMD_LTC);
+ if (regval < 0)
+ return regval;
+ else if (regval & SBSM_BIT_TURBO)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sbsm_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ struct sbsm_data *data = power_supply_get_drvdata(psy);
+
+ return (psp == POWER_SUPPLY_PROP_CHARGE_TYPE) && data->is_ltc1760;
+}
+
+static int sbsm_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct sbsm_data *data = power_supply_get_drvdata(psy);
+ int ret = -EINVAL;
+ u16 regval;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ /* write 1 to TURBO if type fast is given */
+ if (!data->is_ltc1760)
+ break;
+ regval = val->intval ==
+ POWER_SUPPLY_CHARGE_TYPE_FAST ? SBSM_BIT_TURBO : 0;
+ ret = sbsm_write_word(data->client, SBSM_CMD_LTC, regval);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Switch to battery
+ * Parameter chan is directly the content of SMB_BAT* nibble
+ */
+static int sbsm_select(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct sbsm_data *data = i2c_mux_priv(muxc);
+ struct device *dev = &data->client->dev;
+ int ret = 0;
+ u16 reg;
+
+ if (data->cur_chan == chan)
+ return ret;
+
+ /* chan goes from 1 ... 4 */
+ reg = 1 << BIT(SBSM_SMB_BAT_OFFSET + chan);
+ ret = sbsm_write_word(data->client, SBSM_CMD_BATSYSSTATE, reg);
+ if (ret)
+ dev_err(dev, "Failed to select channel %i\n", chan);
+ else
+ data->cur_chan = chan;
+
+ return ret;
+}
+
+static int sbsm_gpio_get_value(struct gpio_chip *gc, unsigned int off)
+{
+ struct sbsm_data *data = gpiochip_get_data(gc);
+ int ret;
+
+ ret = sbsm_read_word(data->client, SBSM_CMD_BATSYSSTATE);
+ if (ret < 0)
+ return ret;
+
+ return ret & BIT(off);
+}
+
+/*
+ * This needs to be defined or the GPIO lib fails to register the pin.
+ * But the 'gpio' is always an input.
+ */
+static int sbsm_gpio_direction_input(struct gpio_chip *gc, unsigned int off)
+{
+ return 0;
+}
+
+static int sbsm_do_alert(struct device *dev, void *d)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
+
+ if (!client || client->addr != 0x0b)
+ return 0;
+
+ device_lock(dev);
+ if (client->dev.driver) {
+ driver = to_i2c_driver(client->dev.driver);
+ if (driver->alert)
+ driver->alert(client, I2C_PROTOCOL_SMBUS_ALERT, 0);
+ else
+ dev_warn(&client->dev, "no driver alert()!\n");
+ } else {
+ dev_dbg(&client->dev, "alert with no driver\n");
+ }
+ device_unlock(dev);
+
+ return -EBUSY;
+}
+
+static void sbsm_alert(struct i2c_client *client, enum i2c_alert_protocol prot,
+ unsigned int d)
+{
+ struct sbsm_data *sbsm = i2c_get_clientdata(client);
+
+ int ret, i, irq_bat = 0, state = 0;
+
+ ret = sbsm_read_word(sbsm->client, SBSM_CMD_BATSYSSTATE);
+ if (ret >= 0) {
+ irq_bat = ret ^ sbsm->last_state;
+ sbsm->last_state = ret;
+ state = ret;
+ }
+
+ ret = sbsm_read_word(sbsm->client, SBSM_CMD_BATSYSSTATECONT);
+ if ((ret >= 0) &&
+ ((ret ^ sbsm->last_state_cont) & SBSM_BIT_AC_PRESENT)) {
+ irq_bat |= sbsm->supported_bats & state;
+ power_supply_changed(sbsm->psy);
+ }
+ sbsm->last_state_cont = ret;
+
+ for (i = 0; i < SBSM_MAX_BATS; i++) {
+ if (irq_bat & BIT(i)) {
+ device_for_each_child(&sbsm->muxc->adapter[i]->dev,
+ NULL, sbsm_do_alert);
+ }
+ }
+}
+
+static int sbsm_gpio_setup(struct sbsm_data *data)
+{
+ struct gpio_chip *gc = &data->chip;
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int ret;
+
+ if (!device_property_present(dev, "gpio-controller"))
+ return 0;
+
+ ret = sbsm_read_word(client, SBSM_CMD_BATSYSSTATE);
+ if (ret < 0)
+ return ret;
+ data->last_state = ret;
+
+ ret = sbsm_read_word(client, SBSM_CMD_BATSYSSTATECONT);
+ if (ret < 0)
+ return ret;
+ data->last_state_cont = ret;
+
+ gc->get = sbsm_gpio_get_value;
+ gc->direction_input = sbsm_gpio_direction_input;
+ gc->can_sleep = true;
+ gc->base = -1;
+ gc->ngpio = SBSM_MAX_BATS;
+ gc->label = client->name;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, gc, data);
+ if (ret) {
+ dev_err(dev, "devm_gpiochip_add_data failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct power_supply_desc sbsm_default_psy_desc = {
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = sbsm_props,
+ .num_properties = ARRAY_SIZE(sbsm_props),
+ .get_property = &sbsm_get_property,
+ .set_property = &sbsm_set_property,
+ .property_is_writeable = &sbsm_prop_is_writeable,
+};
+
+static int sbsm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct sbsm_data *data;
+ struct device *dev = &client->dev;
+ struct power_supply_desc *psy_desc;
+ struct power_supply_config psy_cfg = {};
+ int ret = 0, i;
+
+ /* Device listens only at address 0x0a */
+ if (client->addr != 0x0a)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EPFNOSUPPORT;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+
+ data->client = client;
+ data->is_ltc1760 = !!strstr(id->name, "ltc1760");
+
+ ret = sbsm_read_word(client, SBSM_CMD_BATSYSINFO);
+ if (ret < 0)
+ return ret;
+ data->supported_bats = ret & SBSM_MASK_BAT_SUPPORTED;
+ data->muxc = i2c_mux_alloc(adapter, dev, SBSM_MAX_BATS, 0,
+ I2C_MUX_LOCKED, &sbsm_select, NULL);
+ if (!data->muxc) {
+ dev_err(dev, "failed to alloc i2c mux\n");
+ ret = -ENOMEM;
+ goto err_mux_alloc;
+ }
+ data->muxc->priv = data;
+
+ /* register muxed i2c channels. One for each supported battery */
+ for (i = 0; i < SBSM_MAX_BATS; ++i) {
+ if (data->supported_bats & BIT(i)) {
+ ret = i2c_mux_add_adapter(data->muxc, 0, i + 1, 0);
+ if (ret)
+ break;
+ }
+ }
+ if (ret) {
+ dev_err(dev, "failed to register i2c mux channel %d\n", i + 1);
+ goto err_mux_register;
+ }
+
+ psy_desc = devm_kmemdup(dev, &sbsm_default_psy_desc,
+ sizeof(struct power_supply_desc),
+ GFP_KERNEL);
+ if (!psy_desc) {
+ ret = -ENOMEM;
+ goto err_psy;
+ }
+
+ psy_desc->name = devm_kasprintf(dev, GFP_KERNEL, "sbsm-%s",
+ dev_name(&client->dev));
+ if (!psy_desc->name) {
+ ret = -ENOMEM;
+ goto err_psy;
+ }
+ ret = sbsm_gpio_setup(data);
+ if (ret < 0)
+ goto err_psy;
+
+ psy_cfg.drv_data = data;
+ psy_cfg.of_node = dev->of_node;
+ data->psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy)) {
+ ret = PTR_ERR(data->psy);
+ dev_err(dev, "failed to register power supply %s\n",
+ psy_desc->name);
+ goto err_psy;
+ }
+
+ return 0;
+
+err_psy:
+err_mux_register:
+ i2c_mux_del_adapters(data->muxc);
+
+err_mux_alloc:
+ return ret;
+}
+
+static int sbsm_remove(struct i2c_client *client)
+{
+ struct sbsm_data *data = i2c_get_clientdata(client);
+
+ i2c_mux_del_adapters(data->muxc);
+ return 0;
+}
+
+static const struct i2c_device_id sbsm_ids[] = {
+ { "sbs-manager", 0 },
+ { "ltc1760", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sbsm_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id sbsm_dt_ids[] = {
+ { .compatible = "sbs,sbs-manager" },
+ { .compatible = "lltc,ltc1760" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sbsm_dt_ids);
+#endif
+
+static struct i2c_driver sbsm_driver = {
+ .driver = {
+ .name = "sbsm",
+ .of_match_table = of_match_ptr(sbsm_dt_ids),
+ },
+ .probe = sbsm_probe,
+ .remove = sbsm_remove,
+ .alert = sbsm_alert,
+ .id_table = sbsm_ids
+};
+module_i2c_driver(sbsm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Karl-Heinz Schneider <karl-heinz@schneider-inet.de>");
+MODULE_DESCRIPTION("SBSM Smart Battery System Manager");
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index a5915f498eea..bbcaee56db9d 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -743,7 +743,7 @@ static int twl4030bci_state(struct twl4030_bci *bci)
ret = twl4030_bci_read(TWL4030_BCIMSTATEC, &state);
if (ret) {
- pr_err("twl4030_bci: error reading BCIMSTATEC\n");
+ dev_err(bci->dev, "error reading BCIMSTATEC\n");
return ret;
}
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5beb0c361076..5c1b6388122a 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -876,10 +876,10 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
* offset within the internal buffer specified by handle parameter.
*/
if (xfer->loc_addr) {
- unsigned long offset;
+ unsigned int offset;
long pinned;
- offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
+ offset = lower_32_bits(offset_in_page(xfer->loc_addr));
nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
page_list = kmalloc_array(nr_pages,
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 0fd6195601ba..96cd55f9e3c5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -244,7 +244,7 @@ config REGULATOR_DA9210
interface.
config REGULATOR_DA9211
- tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator"
+ tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator"
depends on I2C
select REGMAP_I2C
help
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 376a99b7cf5d..181622b2813d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -244,6 +244,7 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
.ops = &axp20x_ops_sw,
};
+/* DCDC ranges shared with AXP813 */
static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0x0, 0x46, 10000),
REGULATOR_LINEAR_RANGE(1220000, 0x47, 0x4b, 20000),
@@ -426,6 +427,69 @@ static const struct regulator_desc axp809_regulators[] = {
AXP_DESC_SW(AXP809, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(6)),
};
+static const struct regulator_desc axp813_regulators[] = {
+ AXP_DESC(AXP813, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
+ AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+ AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(1)),
+ AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(2)),
+ AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges,
+ 68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(4)),
+ AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges,
+ 72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(5)),
+ AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7", axp803_dcdc6_ranges,
+ 72, AXP813_DCDC7_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(6)),
+ AXP_DESC(AXP813, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)),
+ AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)),
+ AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+ AXP_DESC(AXP813, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
+ AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges,
+ 32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
+ BIT(4)),
+ AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP_DESC(AXP813, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP_DESC(AXP813, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP_DESC(AXP813, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ /* to do / check ... */
+ AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
+ AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)),
+ AXP_DESC(AXP813, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
+ AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)),
+ /*
+ * TODO: FLDO3 = {DCDC5, FLDOIN} / 2
+ *
+ * This means FLDO3 effectively switches supplies at runtime,
+ * something the regulator subsystem does not support.
+ */
+ AXP_DESC_FIXED(AXP813, RTC_LDO, "rtc-ldo", "ips", 1800),
+ AXP_DESC_IO(AXP813, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_IO(AXP813, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_SW(AXP813, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(7)),
+};
+
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -441,9 +505,10 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
step = 75;
break;
case AXP803_ID:
+ case AXP813_ID:
/*
- * AXP803 DCDC work frequency setting has the same range and
- * step as AXP22X, but at a different register.
+ * AXP803/AXP813 DCDC work frequency setting has the same
+ * range and step as AXP22X, but at a different register.
* Fall through to the check below.
* (See include/linux/mfd/axp20x.h)
*/
@@ -561,6 +626,14 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
workmode <<= id - AXP803_DCDC1;
break;
+ case AXP813_ID:
+ if (id < AXP813_DCDC1 || id > AXP813_DCDC7)
+ return -EINVAL;
+
+ mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP813_DCDC1);
+ workmode <<= id - AXP813_DCDC1;
+ break;
+
default:
/* should not happen */
WARN_ON(1);
@@ -579,11 +652,12 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
u32 reg = 0;
/*
- * Currently in our supported AXP variants, only AXP803 and AXP806
- * have polyphase regulators.
+ * Currently in our supported AXP variants, only AXP803, AXP806,
+ * and AXP813 have polyphase regulators.
*/
switch (axp20x->variant) {
case AXP803_ID:
+ case AXP813_ID:
regmap_read(axp20x->regmap, AXP803_POLYPHASE_CTRL, &reg);
switch (id) {
@@ -656,6 +730,12 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
regulators = axp809_regulators;
nregulators = AXP809_REG_ID_MAX;
break;
+ case AXP813_ID:
+ regulators = axp813_regulators;
+ nregulators = AXP813_REG_ID_MAX;
+ drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
+ "x-powers,drive-vbus-en");
+ break;
default:
dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
axp20x->variant);
@@ -677,6 +757,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
if (axp20x_is_polyphase_slave(axp20x, i))
continue;
+ /* Support for AXP813's FLDO3 is not implemented */
+ if (axp20x->variant == AXP813_ID && i == AXP813_FLDO3)
+ continue;
+
/*
* Regulators DC1SW and DC5LDO are connected internally,
* so we have to handle their supply names separately.
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index aa47280efd32..9b8f47617724 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,6 +1,6 @@
/*
* da9211-regulator.c - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -496,8 +496,11 @@ static const struct i2c_device_id da9211_i2c_id[] = {
{"da9211", DA9211},
{"da9212", DA9212},
{"da9213", DA9213},
+ {"da9223", DA9223},
{"da9214", DA9214},
+ {"da9224", DA9224},
{"da9215", DA9215},
+ {"da9225", DA9225},
{},
};
MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
@@ -507,8 +510,11 @@ static const struct of_device_id da9211_dt_ids[] = {
{ .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
{ .compatible = "dlg,da9212", .data = &da9211_i2c_id[1] },
{ .compatible = "dlg,da9213", .data = &da9211_i2c_id[2] },
- { .compatible = "dlg,da9214", .data = &da9211_i2c_id[3] },
- { .compatible = "dlg,da9215", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9223", .data = &da9211_i2c_id[3] },
+ { .compatible = "dlg,da9214", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9224", .data = &da9211_i2c_id[5] },
+ { .compatible = "dlg,da9215", .data = &da9211_i2c_id[6] },
+ { .compatible = "dlg,da9225", .data = &da9211_i2c_id[7] },
{},
};
MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -526,5 +532,5 @@ static struct i2c_driver da9211_regulator_driver = {
module_i2c_driver(da9211_regulator_driver);
MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9214/DA9215 regulator driver");
+MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index b841bbf330cc..2cb32aab4f82 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,6 +1,6 @@
/*
* da9211-regulator.h - Regulator definitions for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 0cb76ba29e84..8f782d22fdbe 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -34,6 +34,8 @@ struct pbias_reg_info {
u32 vmode;
unsigned int enable_time;
char *name;
+ const unsigned int *pbias_volt_table;
+ int n_voltages;
};
struct pbias_regulator_data {
@@ -49,11 +51,16 @@ struct pbias_of_data {
unsigned int offset;
};
-static const unsigned int pbias_volt_table[] = {
+static const unsigned int pbias_volt_table_3_0V[] = {
1800000,
3000000
};
+static const unsigned int pbias_volt_table_3_3V[] = {
+ 1800000,
+ 3300000
+};
+
static const struct regulator_ops pbias_regulator_voltage_ops = {
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -69,6 +76,8 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
.vmode = BIT(0),
.disable_val = 0,
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap2430"
};
@@ -77,6 +86,8 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
.enable_mask = BIT(9),
.vmode = BIT(8),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_sim_omap3"
};
@@ -86,6 +97,8 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap4"
};
@@ -95,6 +108,8 @@ static const struct pbias_reg_info pbias_mmc_omap5 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_3V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap5"
};
@@ -199,8 +214,8 @@ static int pbias_regulator_probe(struct platform_device *pdev)
drvdata[data_idx].desc.owner = THIS_MODULE;
drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
- drvdata[data_idx].desc.volt_table = pbias_volt_table;
- drvdata[data_idx].desc.n_voltages = 2;
+ drvdata[data_idx].desc.volt_table = info->pbias_volt_table;
+ drvdata[data_idx].desc.n_voltages = info->n_voltages;
drvdata[data_idx].desc.enable_time = info->enable_time;
drvdata[data_idx].desc.vsel_reg = offset;
drvdata[data_idx].desc.vsel_mask = info->vmode;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 16c5f84e06a7..0241ada47d04 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -593,13 +593,20 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
u8 *voltage_sel)
{
const struct spmi_voltage_range *range, *end;
+ unsigned offset;
range = vreg->set_points->range;
end = range + vreg->set_points->count;
for (; range < end; range++) {
if (selector < range->n_voltages) {
- *voltage_sel = selector;
+ /*
+ * hardware selectors between set point min and real
+ * min are invalid so we ignore them
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ *voltage_sel = selector + offset;
*range_sel = range->range_sel;
return 0;
}
@@ -613,15 +620,35 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
const struct spmi_voltage_range *range)
{
- int sw_sel = hw_sel;
+ unsigned sw_sel = 0;
+ unsigned offset, max_hw_sel;
const struct spmi_voltage_range *r = vreg->set_points->range;
-
- while (r != range) {
+ const struct spmi_voltage_range *end = r + vreg->set_points->count;
+
+ for (; r < end; r++) {
+ if (r == range && range->n_voltages) {
+ /*
+ * hardware selectors between set point min and real
+ * min and between set point max and real max are
+ * invalid so we return an error if they're
+ * programmed into the hardware
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ if (hw_sel < offset)
+ return -EINVAL;
+
+ max_hw_sel = range->set_point_max_uV - range->min_uV;
+ max_hw_sel /= range->step_uV;
+ if (hw_sel > max_hw_sel)
+ return -EINVAL;
+
+ return sw_sel + hw_sel - offset;
+ }
sw_sel += r->n_voltages;
- r++;
}
- return sw_sel;
+ return -EINVAL;
}
static const struct spmi_voltage_range *
@@ -1619,11 +1646,20 @@ static const struct spmi_regulator_data pm8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pmi8994_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "l1", 0x4000, "vdd_l1", },
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
+ { .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 5324dc9e6d6e..7b12e880d1ea 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -228,11 +228,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
int i, ret;
unsigned int val;
- if (tps65217_chip_id(tps) != TPS65217) {
- dev_err(&pdev->dev, "Invalid tps chip version\n");
- return -ENODEV;
- }
-
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65217_NUM_REGULATOR, GFP_KERNEL);
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 9aafbb03482d..bc489958fed7 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -154,7 +154,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
if (!tps->strobes[rid]) {
if (rid == TPS65218_DCDC_3)
- tps->info[rid]->strobe = 3;
+ tps->strobes[rid] = 3;
else
return -EINVAL;
}
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 4630782b5456..a7917d473774 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -296,7 +296,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
{
struct dasd_ccw_req *temp_cqr;
int data_size;
- struct timeval tv;
+ struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
@@ -310,9 +310,9 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = trigger;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
@@ -340,7 +340,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
{
int data_size;
int snss_rc;
- struct timeval tv;
+ struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
@@ -353,9 +353,9 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = DASD_EER_STATECHANGE;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index e94080a5196f..b095a23bcc0c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -96,14 +96,6 @@ do { \
d_data); \
} while(0)
-#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
-do { \
- debug_sprintf_exception(d_device->debug_area, \
- d_level, \
- d_str "\n", \
- d_data); \
-} while(0)
-
#define DBF_EVENT(d_level, d_str, d_data...)\
do { \
debug_sprintf_event(dasd_debug_area, \
@@ -122,14 +114,6 @@ do { \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
-#define DBF_EXC(d_level, d_str, d_data...)\
-do { \
- debug_sprintf_exception(dasd_debug_area, \
- d_level,\
- d_str "\n", \
- d_data); \
-} while(0)
-
/* limit size for an errorstring */
#define ERRORLENGTH 30
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index aa42c3a2c90a..a05a4297cfae 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -56,13 +56,7 @@ extern debug_info_t *scm_debug;
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
- if (!debug_level_enabled(scm_debug, level))
- return;
- while (length > 0) {
- debug_event(scm_debug, level, data, length);
- length -= scm_debug->buf_size;
- data += scm_debug->buf_size;
- }
+ debug_event(scm_debug, level, data, length);
}
static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index d247f238faf8..7027e61a6931 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -211,11 +211,8 @@ sclp_console_write(struct console *console, const char *message,
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
- init_timer(&sclp_con_timer);
- sclp_con_timer.function = sclp_console_timeout;
- sclp_con_timer.data = 0UL;
- sclp_con_timer.expires = jiffies + HZ/10;
- add_timer(&sclp_con_timer);
+ setup_timer(&sclp_con_timer, sclp_console_timeout, 0UL);
+ mod_timer(&sclp_con_timer, jiffies + HZ / 10);
}
out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 875628dab419..1cceefdc03e0 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -218,11 +218,8 @@ static int sclp_tty_write_string(const unsigned char *str, int count, int may_fa
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
- init_timer(&sclp_tty_timer);
- sclp_tty_timer.function = sclp_tty_timeout;
- sclp_tty_timer.data = 0UL;
- sclp_tty_timer.expires = jiffies + HZ/10;
- add_timer(&sclp_tty_timer);
+ setup_timer(&sclp_tty_timer, sclp_tty_timeout, 0UL);
+ mod_timer(&sclp_tty_timer, jiffies + HZ / 10);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 91c3c642c76e..e7d23048d3f0 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -68,9 +68,8 @@ struct tape_class_device *register_tape_dev(
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
- tcd->char_device->dev = dev;
- rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
+ rc = cdev_add(tcd->char_device, dev, 1);
if (rc)
goto fail_with_cdev;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b19020b9efff..62559dc0169f 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -812,8 +812,7 @@ static int vmlogrdr_register_cdev(dev_t dev)
}
vmlogrdr_cdev->owner = THIS_MODULE;
vmlogrdr_cdev->ops = &vmlogrdr_fops;
- vmlogrdr_cdev->dev = dev;
- rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
+ rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
if (!rc)
return 0;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 04aceb694d51..fa90ef05afc0 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -110,7 +110,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
mutex_init(&urd->io_mutex);
init_waitqueue_head(&urd->wait);
spin_lock_init(&urd->open_lock);
- atomic_set(&urd->ref_count, 1);
+ refcount_set(&urd->ref_count, 1);
urd->cdev = cdev;
get_device(&cdev->dev);
return urd;
@@ -126,7 +126,7 @@ static void urdev_free(struct urdev *urd)
static void urdev_get(struct urdev *urd)
{
- atomic_inc(&urd->ref_count);
+ refcount_inc(&urd->ref_count);
}
static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
@@ -159,7 +159,7 @@ static struct urdev *urdev_get_from_devno(u16 devno)
static void urdev_put(struct urdev *urd)
{
- if (atomic_dec_and_test(&urd->ref_count))
+ if (refcount_dec_and_test(&urd->ref_count))
urdev_free(urd);
}
@@ -892,10 +892,9 @@ static int ur_set_online(struct ccw_device *cdev)
}
urd->char_device->ops = &ur_fops;
- urd->char_device->dev = MKDEV(major, minor);
urd->char_device->owner = ur_fops.owner;
- rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
+ rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
if (rc)
goto fail_free_cdev;
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
@@ -946,7 +945,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
rc = -EBUSY;
goto fail_urdev_put;
}
- if (!force && (atomic_read(&urd->ref_count) > 2)) {
+ if (!force && (refcount_read(&urd->ref_count) > 2)) {
/* There is still a user of urd (e.g. ur_open) */
TRACE("ur_set_offline: BUSY\n");
rc = -EBUSY;
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 67164ba22f11..608b0719ce17 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -12,6 +12,8 @@
#ifndef _VMUR_H_
#define _VMUR_H_
+#include <linux/refcount.h>
+
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
/*
@@ -70,7 +72,7 @@ struct urdev {
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
- atomic_t ref_count; /* reference counter */
+ refcount_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 34b9ad6b3143..e2f7b6e93efd 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -373,6 +373,12 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
rc = -EINVAL;
goto error;
}
+ /* Check if the devices are bound to the required ccw driver. */
+ if (gdev->count && gdrv && gdrv->ccw_driver &&
+ gdev->cdev[0]->drv != gdrv->ccw_driver) {
+ rc = -EINVAL;
+ goto error;
+ }
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
gdev->dev.groups = ccwgroup_attr_groups;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 735052ecd3e5..8e7e19b9e92c 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -43,11 +43,7 @@ static DEFINE_MUTEX(on_close_mutex);
static void CHSC_LOG_HEX(int level, void *data, int length)
{
- while (length > 0) {
- debug_event(chsc_debug_log_id, level, data, length);
- length -= chsc_debug_log_id->buf_size;
- data += chsc_debug_log_id->buf_size;
- }
+ debug_event(chsc_debug_log_id, level, data, length);
}
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index fa817efcec8f..7bdbe73707c2 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -23,13 +23,7 @@ extern debug_info_t *cio_debug_crw_id;
static inline void CIO_HEX_EVENT(int level, void *data, int length)
{
- if (unlikely(!cio_debug_trace_id))
- return;
- while (length > 0) {
- debug_event(cio_debug_trace_id, level, data, length);
- length -= cio_debug_trace_id->buf_size;
- data += cio_debug_trace_id->buf_size;
- }
+ debug_event(cio_debug_trace_id, level, data, length);
}
#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 220491d27ef4..7d59230e88bb 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -58,8 +58,9 @@
/* indices for READCMB */
enum cmb_index {
+ avg_utilization = -1,
/* basic and exended format: */
- cmb_ssch_rsch_count,
+ cmb_ssch_rsch_count = 0,
cmb_sample_count,
cmb_device_connect_time,
cmb_function_pending_time,
@@ -215,71 +216,52 @@ struct set_schib_struct {
unsigned long address;
wait_queue_head_t wait;
int ret;
- struct kref kref;
};
-static void cmf_set_schib_release(struct kref *kref)
-{
- struct set_schib_struct *set_data;
-
- set_data = container_of(kref, struct set_schib_struct, kref);
- kfree(set_data);
-}
-
#define CMF_PENDING 1
+#define SET_SCHIB_TIMEOUT (10 * HZ)
static int set_schib_wait(struct ccw_device *cdev, u32 mme,
- int mbfc, unsigned long address)
+ int mbfc, unsigned long address)
{
- struct set_schib_struct *set_data;
- int ret;
+ struct set_schib_struct set_data;
+ int ret = -ENODEV;
spin_lock_irq(cdev->ccwlock);
- if (!cdev->private->cmb) {
- ret = -ENODEV;
+ if (!cdev->private->cmb)
goto out;
- }
- set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
- if (!set_data) {
- ret = -ENOMEM;
- goto out;
- }
- init_waitqueue_head(&set_data->wait);
- kref_init(&set_data->kref);
- set_data->mme = mme;
- set_data->mbfc = mbfc;
- set_data->address = address;
ret = set_schib(cdev, mme, mbfc, address);
if (ret != -EBUSY)
- goto out_put;
+ goto out;
- if (cdev->private->state != DEV_STATE_ONLINE) {
- /* if the device is not online, don't even try again */
- ret = -EBUSY;
- goto out_put;
- }
+ /* if the device is not online, don't even try again */
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
- cdev->private->state = DEV_STATE_CMFCHANGE;
- set_data->ret = CMF_PENDING;
- cdev->private->cmb_wait = set_data;
+ init_waitqueue_head(&set_data.wait);
+ set_data.mme = mme;
+ set_data.mbfc = mbfc;
+ set_data.address = address;
+ set_data.ret = CMF_PENDING;
+ cdev->private->state = DEV_STATE_CMFCHANGE;
+ cdev->private->cmb_wait = &set_data;
spin_unlock_irq(cdev->ccwlock);
- if (wait_event_interruptible(set_data->wait,
- set_data->ret != CMF_PENDING)) {
- spin_lock_irq(cdev->ccwlock);
- if (set_data->ret == CMF_PENDING) {
- set_data->ret = -ERESTARTSYS;
+
+ ret = wait_event_interruptible_timeout(set_data.wait,
+ set_data.ret != CMF_PENDING,
+ SET_SCHIB_TIMEOUT);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret <= 0) {
+ if (set_data.ret == CMF_PENDING) {
+ set_data.ret = (ret == 0) ? -ETIME : ret;
if (cdev->private->state == DEV_STATE_CMFCHANGE)
cdev->private->state = DEV_STATE_ONLINE;
}
- spin_unlock_irq(cdev->ccwlock);
}
- spin_lock_irq(cdev->ccwlock);
cdev->private->cmb_wait = NULL;
- ret = set_data->ret;
-out_put:
- kref_put(&set_data->kref, cmf_set_schib_release);
+ ret = set_data.ret;
out:
spin_unlock_irq(cdev->ccwlock);
return ret;
@@ -287,28 +269,21 @@ out:
void retry_set_schib(struct ccw_device *cdev)
{
- struct set_schib_struct *set_data;
+ struct set_schib_struct *set_data = cdev->private->cmb_wait;
- set_data = cdev->private->cmb_wait;
- if (!set_data) {
- WARN_ON(1);
+ if (!set_data)
return;
- }
- kref_get(&set_data->kref);
+
set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
set_data->address);
wake_up(&set_data->wait);
- kref_put(&set_data->kref, cmf_set_schib_release);
}
static int cmf_copy_block(struct ccw_device *cdev)
{
- struct subchannel *sch;
- void *reference_buf;
- void *hw_block;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct cmb_data *cmb_data;
-
- sch = to_subchannel(cdev->dev.parent);
+ void *hw_block;
if (cio_update_schib(sch))
return -ENODEV;
@@ -323,102 +298,65 @@ static int cmf_copy_block(struct ccw_device *cdev)
}
cmb_data = cdev->private->cmb;
hw_block = cmb_data->hw_block;
- if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
- /* No need to copy. */
- return 0;
- reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
- if (!reference_buf)
- return -ENOMEM;
- /* Ensure consistency of block copied from hardware. */
- do {
- memcpy(cmb_data->last_block, hw_block, cmb_data->size);
- memcpy(reference_buf, hw_block, cmb_data->size);
- } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
+ memcpy(cmb_data->last_block, hw_block, cmb_data->size);
cmb_data->last_update = get_tod_clock();
- kfree(reference_buf);
return 0;
}
struct copy_block_struct {
wait_queue_head_t wait;
int ret;
- struct kref kref;
};
-static void cmf_copy_block_release(struct kref *kref)
-{
- struct copy_block_struct *copy_block;
-
- copy_block = container_of(kref, struct copy_block_struct, kref);
- kfree(copy_block);
-}
-
static int cmf_cmb_copy_wait(struct ccw_device *cdev)
{
- struct copy_block_struct *copy_block;
- int ret;
- unsigned long flags;
+ struct copy_block_struct copy_block;
+ int ret = -ENODEV;
- spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- ret = -ENODEV;
- goto out;
- }
- copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
- if (!copy_block) {
- ret = -ENOMEM;
+ spin_lock_irq(cdev->ccwlock);
+ if (!cdev->private->cmb)
goto out;
- }
- init_waitqueue_head(&copy_block->wait);
- kref_init(&copy_block->kref);
ret = cmf_copy_block(cdev);
if (ret != -EBUSY)
- goto out_put;
+ goto out;
- if (cdev->private->state != DEV_STATE_ONLINE) {
- ret = -EBUSY;
- goto out_put;
- }
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
+
+ init_waitqueue_head(&copy_block.wait);
+ copy_block.ret = CMF_PENDING;
cdev->private->state = DEV_STATE_CMFUPDATE;
- copy_block->ret = CMF_PENDING;
- cdev->private->cmb_wait = copy_block;
+ cdev->private->cmb_wait = &copy_block;
+ spin_unlock_irq(cdev->ccwlock);
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- if (wait_event_interruptible(copy_block->wait,
- copy_block->ret != CMF_PENDING)) {
- spin_lock_irqsave(cdev->ccwlock, flags);
- if (copy_block->ret == CMF_PENDING) {
- copy_block->ret = -ERESTARTSYS;
+ ret = wait_event_interruptible(copy_block.wait,
+ copy_block.ret != CMF_PENDING);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret) {
+ if (copy_block.ret == CMF_PENDING) {
+ copy_block.ret = -ERESTARTSYS;
if (cdev->private->state == DEV_STATE_CMFUPDATE)
cdev->private->state = DEV_STATE_ONLINE;
}
- spin_unlock_irqrestore(cdev->ccwlock, flags);
}
- spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->cmb_wait = NULL;
- ret = copy_block->ret;
-out_put:
- kref_put(&copy_block->kref, cmf_copy_block_release);
+ ret = copy_block.ret;
out:
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ spin_unlock_irq(cdev->ccwlock);
return ret;
}
void cmf_retry_copy_block(struct ccw_device *cdev)
{
- struct copy_block_struct *copy_block;
+ struct copy_block_struct *copy_block = cdev->private->cmb_wait;
- copy_block = cdev->private->cmb_wait;
- if (!copy_block) {
- WARN_ON(1);
+ if (!copy_block)
return;
- }
- kref_get(&copy_block->kref);
+
copy_block->ret = cmf_copy_block(cdev);
wake_up(&copy_block->wait);
- kref_put(&copy_block->kref, cmf_copy_block_release);
}
static void cmf_generic_reset(struct ccw_device *cdev)
@@ -650,25 +588,44 @@ static int set_cmb(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 0, offset);
}
+/* calculate utilization in 0.1 percent units */
+static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
+ u64 device_disconnect_time, u64 start_time)
+{
+ u64 utilization, elapsed_time;
+
+ utilization = time_to_nsec(device_connect_time +
+ function_pending_time +
+ device_disconnect_time);
+
+ elapsed_time = get_tod_clock() - start_time;
+ elapsed_time = tod_to_ns(elapsed_time);
+ elapsed_time /= 1000;
+
+ return elapsed_time ? (utilization / elapsed_time) : 0;
+}
+
static u64 read_cmb(struct ccw_device *cdev, int index)
{
+ struct cmb_data *cmb_data;
+ unsigned long flags;
struct cmb *cmb;
+ u64 ret = 0;
u32 val;
- int ret;
- unsigned long flags;
-
- ret = cmf_cmb_copy_wait(cdev);
- if (ret < 0)
- return 0;
spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- ret = 0;
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data)
goto out;
- }
- cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
+ cmb = cmb_data->hw_block;
switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
@@ -691,7 +648,6 @@ static u64 read_cmb(struct ccw_device *cdev, int index)
val = cmb->device_active_only_time;
break;
default:
- ret = 0;
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
@@ -729,8 +685,7 @@ static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
- /* convert to nanoseconds */
- data->elapsed_time = (time * 1000) >> 12;
+ data->elapsed_time = tod_to_ns(time);
/* copy data to new structure */
data->ssch_rsch_count = cmb->ssch_rsch_count;
@@ -904,28 +859,27 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 1, mba);
}
-
static u64 read_cmbe(struct ccw_device *cdev, int index)
{
- struct cmbe *cmb;
struct cmb_data *cmb_data;
- u32 val;
- int ret;
unsigned long flags;
-
- ret = cmf_cmb_copy_wait(cdev);
- if (ret < 0)
- return 0;
+ struct cmbe *cmb;
+ u64 ret = 0;
+ u32 val;
spin_lock_irqsave(cdev->ccwlock, flags);
cmb_data = cdev->private->cmb;
- if (!cmb_data) {
- ret = 0;
+ if (!cmb_data)
goto out;
- }
- cmb = cmb_data->last_block;
+ cmb = cmb_data->hw_block;
switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
@@ -954,7 +908,6 @@ static u64 read_cmbe(struct ccw_device *cdev, int index)
val = cmb->initial_command_response_time;
break;
default:
- ret = 0;
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
@@ -991,8 +944,7 @@ static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
- /* conver to nanoseconds */
- data->elapsed_time = (time * 1000) >> 12;
+ data->elapsed_time = tod_to_ns(time);
cmb = cmb_data->last_block;
/* copy data to new structure */
@@ -1045,19 +997,15 @@ static ssize_t cmb_show_avg_sample_interval(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct ccw_device *cdev;
- long interval;
+ struct ccw_device *cdev = to_ccwdev(dev);
unsigned long count;
- struct cmb_data *cmb_data;
+ long interval;
- cdev = to_ccwdev(dev);
count = cmf_read(cdev, cmb_sample_count);
spin_lock_irq(cdev->ccwlock);
- cmb_data = cdev->private->cmb;
if (count) {
- interval = cmb_data->last_update -
- cdev->private->cmb_start_time;
- interval = (interval * 1000) >> 12;
+ interval = get_tod_clock() - cdev->private->cmb_start_time;
+ interval = tod_to_ns(interval);
interval /= count;
} else
interval = -1;
@@ -1069,27 +1017,9 @@ static ssize_t cmb_show_avg_utilization(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct cmbdata data;
- u64 utilization;
- unsigned long t, u;
- int ret;
-
- ret = cmf_readall(to_ccwdev(dev), &data);
- if (ret == -EAGAIN || ret == -ENODEV)
- /* No data (yet/currently) available to use for calculation. */
- return sprintf(buf, "n/a\n");
- else if (ret)
- return ret;
-
- utilization = data.device_connect_time +
- data.function_pending_time +
- data.device_disconnect_time;
-
- /* calculate value in 0.1 percent units */
- t = data.elapsed_time / 1000;
- u = utilization / t;
+ unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
- return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
+ return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
}
#define cmf_attr(name) \
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 0f11f3bcac82..d14795f7110b 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -43,13 +43,7 @@ static debug_info_t *eadm_debug;
static void EADM_LOG_HEX(int level, void *data, int length)
{
- if (!debug_level_enabled(eadm_debug, level))
- return;
- while (length > 0) {
- debug_event(eadm_debug, level, data, length);
- length -= eadm_debug->buf_size;
- data += eadm_debug->buf_size;
- }
+ debug_event(eadm_debug, level, data, length);
}
static void orb_init(union orb *orb)
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index e06496ab0036..f85f5fa7cefc 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -34,11 +34,7 @@ extern debug_info_t *qdio_dbf_error;
static inline void DBF_HEX(void *addr, int len)
{
- while (len > 0) {
- debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
- len -= qdio_dbf_setup->buf_size;
- addr += qdio_dbf_setup->buf_size;
- }
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
}
#define DBF_ERROR(text...) \
@@ -50,11 +46,7 @@ static inline void DBF_HEX(void *addr, int len)
static inline void DBF_ERROR_HEX(void *addr, int len)
{
- while (len > 0) {
- debug_event(qdio_dbf_error, DBF_ERR, addr, len);
- len -= qdio_dbf_error->buf_size;
- addr += qdio_dbf_error->buf_size;
- }
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
}
#define DBF_DEV_EVENT(level, device, text...) \
@@ -69,11 +61,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
int len, int level)
{
- while (len > 0) {
- debug_event(dev->debug_area, level, addr, len);
- len -= dev->debug_area->buf_size;
- addr += dev->debug_area->buf_size;
- }
+ debug_event(dev->debug_area, level, addr, len);
}
int qdio_allocate_dbf(struct qdio_initialize *init_data,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index a739bdf9630e..0787b587e4b8 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -57,10 +57,8 @@ static u32 *get_indicator(void)
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
- if (!atomic_read(&q_indicators[i].count)) {
- atomic_set(&q_indicators[i].count, 1);
+ if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1))
return &q_indicators[i].ind;
- }
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
@@ -69,13 +67,11 @@ static u32 *get_indicator(void)
static void put_indicator(u32 *addr)
{
- int i;
+ struct indicator_t *ind = container_of(addr, struct indicator_t, ind);
if (!addr)
return;
- i = ((unsigned long)addr - (unsigned long)q_indicators) /
- sizeof(struct indicator_t);
- atomic_dec(&q_indicators[i].count);
+ atomic_dec(&ind->count);
}
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index f20b4d66c75f..d9a2fffd034b 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -106,7 +106,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
{
int ret = 0;
- if (!len || pa->pa_nr)
+ if (!len)
+ return 0;
+
+ if (pa->pa_nr)
return -EINVAL;
pa->pa_iova = iova;
@@ -330,6 +333,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = chain->ch_ccw + idx;
+ if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
+ return;
if (!ccw->count)
return;
@@ -502,6 +507,16 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw = chain->ch_ccw + idx;
+ if (!ccw->count) {
+ /*
+ * We just want the translation result of any direct ccw
+ * to be an IDA ccw, so let's add the IDA flag for it.
+ * Although the flag will be ignored by firmware.
+ */
+ ccw->flags |= CCW_FLAG_IDA;
+ return 0;
+ }
+
/*
* Pin data page(s) in memory.
* The number of pages actually is the count of the idaws which will be
@@ -542,6 +557,9 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
ccw = chain->ch_ccw + idx;
+ if (!ccw->count)
+ return 0;
+
/* Calculate size of idaws. */
ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
if (ret)
@@ -570,10 +588,6 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
for (i = 0; i < idaw_nr; i++) {
idaw_iova = *(idaws + i);
- if (IS_ERR_VALUE(idaw_iova)) {
- ret = -EFAULT;
- goto out_free_idaws;
- }
ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
idaw_iova, 1);
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
index 6c0474c834d4..16b59ce5e01d 100644
--- a/drivers/s390/crypto/ap_asm.h
+++ b/drivers/s390/crypto/ap_asm.h
@@ -117,6 +117,49 @@ static inline int ap_qci(void *config)
return reg1;
}
+/*
+ * union ap_qact_ap_info - used together with the
+ * ap_aqic() function to provide a convenient way
+ * to handle the ap info needed by the qact function.
+ */
+union ap_qact_ap_info {
+ unsigned long val;
+ struct {
+ unsigned int : 3;
+ unsigned int mode : 3;
+ unsigned int : 26;
+ unsigned int cat : 8;
+ unsigned int : 8;
+ unsigned char ver[2];
+ };
+};
+
+/**
+ * ap_qact(): Query AP combatibility type.
+ * @qid: The AP queue number
+ * @apinfo: On input the info about the AP queue. On output the
+ * alternate AP queue info provided by the qact function
+ * in GR2 is stored in.
+ *
+ * Returns AP queue status. Check response_code field for failures.
+ */
+static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ union ap_qact_ap_info *apinfo)
+{
+ register unsigned long reg0 asm ("0") = qid | (5UL << 24)
+ | ((ifbit & 0x01) << 22);
+ register unsigned long reg1_in asm ("1") = apinfo->val;
+ register struct ap_queue_status reg1_out asm ("1");
+ register unsigned long reg2 asm ("2") = 0;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(QACT) */
+ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+ : : "cc");
+ apinfo->val = reg2;
+ return reg1_out;
+}
+
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5f0be2040272..8b5658b0bec3 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -176,6 +176,18 @@ static int ap_apft_available(void)
return test_facility(15);
}
+/*
+ * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
+ *
+ * Returns 1 if the QACT subfunction is available.
+ */
+static inline int ap_qact_available(void)
+{
+ if (ap_configuration)
+ return ap_configuration->qact;
+ return 0;
+}
+
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -988,6 +1000,47 @@ static int ap_select_domain(void)
}
/*
+ * This function checks the type and returns either 0 for not
+ * supported or the highest compatible type value (which may
+ * include the input type value).
+ */
+static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
+{
+ int comp_type = 0;
+
+ /* < CEX2A is not supported */
+ if (rawtype < AP_DEVICE_TYPE_CEX2A)
+ return 0;
+ /* up to CEX6 known and fully supported */
+ if (rawtype <= AP_DEVICE_TYPE_CEX6)
+ return rawtype;
+ /*
+ * unknown new type > CEX6, check for compatibility
+ * to the highest known and supported type which is
+ * currently CEX6 with the help of the QACT function.
+ */
+ if (ap_qact_available()) {
+ struct ap_queue_status status;
+ union ap_qact_ap_info apinfo = {0};
+
+ apinfo.mode = (func >> 26) & 0x07;
+ apinfo.cat = AP_DEVICE_TYPE_CEX6;
+ status = ap_qact(qid, 0, &apinfo);
+ if (status.response_code == AP_RESPONSE_NORMAL
+ && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
+ && apinfo.cat <= AP_DEVICE_TYPE_CEX6)
+ comp_type = apinfo.cat;
+ }
+ if (!comp_type)
+ AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+ else if (comp_type != rawtype)
+ AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+ return comp_type;
+}
+
+/*
* helper function to be used with bus_find_dev
* matches for the card device with the given id
*/
@@ -1014,8 +1067,8 @@ static void ap_scan_bus(struct work_struct *unused)
struct ap_card *ac;
struct device *dev;
ap_qid_t qid;
- int depth = 0, type = 0;
- unsigned int functions = 0;
+ int comp_type, depth = 0, type = 0;
+ unsigned int func = 0;
int rc, id, dom, borked, domains, defdomdevs = 0;
AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
@@ -1066,12 +1119,12 @@ static void ap_scan_bus(struct work_struct *unused)
}
continue;
}
- rc = ap_query_queue(qid, &depth, &type, &functions);
+ rc = ap_query_queue(qid, &depth, &type, &func);
if (dev) {
spin_lock_bh(&aq->lock);
if (rc == -ENODEV ||
/* adapter reconfiguration */
- (ac && ac->functions != functions))
+ (ac && ac->functions != func))
aq->state = AP_STATE_BORKED;
borked = aq->state == AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
@@ -1087,11 +1140,14 @@ static void ap_scan_bus(struct work_struct *unused)
}
if (rc)
continue;
- /* new queue device needed */
+ /* a new queue device is needed, check out comp type */
+ comp_type = ap_get_compatible_type(qid, type, func);
+ if (!comp_type)
+ continue;
+ /* maybe a card device needs to be created first */
if (!ac) {
- /* but first create the card device */
- ac = ap_card_create(id, depth,
- type, functions);
+ ac = ap_card_create(id, depth, type,
+ comp_type, func);
if (!ac)
continue;
ac->ap_dev.device.bus = &ap_bus_type;
@@ -1109,7 +1165,7 @@ static void ap_scan_bus(struct work_struct *unused)
get_device(&ac->ap_dev.device);
}
/* now create the new queue device */
- aq = ap_queue_create(qid, type);
+ aq = ap_queue_create(qid, comp_type);
if (!aq)
continue;
aq->card = ac;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 754cf2223cfb..3a0e19d87e7c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -250,8 +250,8 @@ void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
- unsigned int device_functions);
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
+ int comp_device_type, unsigned int functions);
int ap_module_init(void);
void ap_module_exit(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 8a31c9e95430..97a8cf578116 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -171,22 +171,20 @@ static void ap_card_device_release(struct device *dev)
kfree(ac);
}
-struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
- unsigned int functions)
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ int comp_type, unsigned int functions)
{
struct ap_card *ac;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
+ INIT_LIST_HEAD(&ac->list);
INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
- ac->ap_dev.device_type = device_type;
- /* CEX6 toleration: map to CEX5 */
- if (device_type == AP_DEVICE_TYPE_CEX6)
- ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
- ac->raw_hwtype = device_type;
+ ac->ap_dev.device_type = comp_type;
+ ac->raw_hwtype = raw_type;
ac->queue_depth = queue_depth;
ac->functions = functions;
ac->id = id;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 6c8bd8ad6185..a550d40921e7 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -627,13 +627,11 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
- /* CEX6 toleration: map to CEX5 */
- if (device_type == AP_DEVICE_TYPE_CEX6)
- aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
aq->qid = qid;
aq->state = AP_STATE_RESET_START;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->list);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index f61fa47135a6..8dda5bb34a2f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -125,10 +125,9 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
* allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block
*/
- cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL);
+ cprbmem = kzalloc(2 * cprbplusparamblen, GFP_KERNEL);
if (!cprbmem)
return -ENOMEM;
- memset(cprbmem, 0, 2 * cprbplusparamblen);
preqcblk = (struct CPRBX *) cprbmem;
prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 6c94efd23eac..73541a798db7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -76,6 +76,7 @@ struct ica_z90_status {
#define ZCRYPT_CEX3A 8
#define ZCRYPT_CEX4 10
#define ZCRYPT_CEX5 11
+#define ZCRYPT_CEX6 12
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 4e91163d70a6..e2eebc775a37 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -45,6 +45,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
@@ -55,6 +57,8 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
};
@@ -72,17 +76,25 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX4A_SPEED_IDX[] = {
- 5, 6, 59, 20, 115, 581, 0, 0};
+ 14, 19, 249, 42, 228, 1458, 0, 0};
static const int CEX5A_SPEED_IDX[] = {
- 3, 3, 6, 8, 32, 218, 0, 0};
+ 8, 9, 20, 18, 66, 458, 0, 0};
+ static const int CEX6A_SPEED_IDX[] = {
+ 6, 9, 20, 17, 65, 438, 0, 0};
+
static const int CEX4C_SPEED_IDX[] = {
- 24, 25, 82, 41, 138, 1111, 79, 8};
+ 59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
- 10, 14, 23, 17, 45, 242, 63, 4};
+ 24, 31, 50, 37, 90, 479, 27, 10};
+ static const int CEX6C_SPEED_IDX[] = {
+ 16, 20, 32, 27, 77, 455, 23, 9};
+
static const int CEX4P_SPEED_IDX[] = {
- 142, 198, 1852, 203, 331, 1563, 0, 8};
+ 224, 313, 3560, 359, 605, 2827, 0, 50};
static const int CEX5P_SPEED_IDX[] = {
- 49, 67, 131, 52, 85, 287, 0, 4};
+ 63, 84, 156, 83, 142, 533, 0, 10};
+ static const int CEX6P_SPEED_IDX[] = {
+ 55, 70, 121, 73, 129, 522, 0, 9};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
@@ -99,11 +111,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX4;
memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
sizeof(CEX4A_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5A";
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
sizeof(CEX5A_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6A";
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
+ sizeof(CEX6A_SPEED_IDX));
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -125,7 +142,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
sizeof(CEX4C_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5C";
/* wrong user space type, must be CEX5
* just keep it for cca compatibility
@@ -133,6 +150,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
sizeof(CEX5C_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6C";
+ /* wrong user space type, must be CEX6
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
+ sizeof(CEX6C_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -143,11 +168,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX4;
memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
sizeof(CEX4P_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5P";
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
sizeof(CEX5P_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6P";
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
+ sizeof(CEX6P_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 6dd5d7c58dd0..db5bde47dfb0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -240,8 +240,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
inp = meb2->message + sizeof(meb2->message) - mod_len;
- } else {
- /* mod_len > 256 = 4096 bit RSA Key */
+ } else if (mod_len <= 512) {
struct type50_meb3_msg *meb3 = ap_msg->message;
memset(meb3, 0, sizeof(*meb3));
ap_msg->length = sizeof(*meb3);
@@ -251,7 +250,8 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
inp = meb3->message + sizeof(meb3->message) - mod_len;
- }
+ } else
+ return -EINVAL;
if (copy_from_user(mod, mex->n_modulus, mod_len) ||
copy_from_user(exp, mex->b_key, mod_len) ||
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index afd20cee7ea0..785620d30504 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -474,7 +474,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
*dom = (unsigned short *)&msg->cprbx.domain;
- if (memcmp(function_code, "US", 2) == 0)
+ if (memcmp(function_code, "US", 2) == 0
+ || memcmp(function_code, "AU", 2) == 0)
ap_msg->special = 1;
else
ap_msg->special = 0;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 26363e0816fe..be9f17218531 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1761,6 +1761,7 @@ static struct ccwgroup_driver ctcm_group_driver = {
.owner = THIS_MODULE,
.name = CTC_DRIVER_NAME,
},
+ .ccw_driver = &ctcm_ccw_driver,
.setup = ctcm_probe_device,
.remove = ctcm_remove_device,
.set_online = ctcm_new_device,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index b855c6f08e96..e131a03262ad 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2391,6 +2391,7 @@ static struct ccwgroup_driver lcs_group_driver = {
.owner = THIS_MODULE,
.name = "lcs",
},
+ .ccw_driver = &lcs_ccw_driver,
.setup = lcs_probe_device,
.remove = lcs_remove_device,
.set_online = lcs_new_device,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 457a4b4e8212..49b9efeba1bd 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5811,6 +5811,7 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.owner = THIS_MODULE,
.name = "qeth",
},
+ .ccw_driver = &qeth_ccw_driver,
.setup = qeth_core_probe_device,
.remove = qeth_core_remove_device,
.set_online = qeth_core_set_online,
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
index df40692a9011..f68af1f317f1 100644
--- a/drivers/s390/virtio/Makefile
+++ b/drivers/s390/virtio/Makefile
@@ -6,8 +6,4 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-s390-virtio-objs := virtio_ccw.o
-ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
-s390-virtio-objs += kvm_virtio.o
-endif
-obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
+obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
deleted file mode 100644
index a99d09a11f05..000000000000
--- a/drivers/s390/virtio/kvm_virtio.c
+++ /dev/null
@@ -1,515 +0,0 @@
-/*
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/slab.h>
-#include <linux/virtio_console.h>
-#include <linux/interrupt.h>
-#include <linux/virtio_ring.h>
-#include <linux/export.h>
-#include <linux/pfn.h>
-#include <asm/io.h>
-#include <asm/kvm_para.h>
-#include <asm/kvm_virtio.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-#include <asm/irq.h>
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-/*
- * The pointer to our (page) of device descriptions.
- */
-static void *kvm_devices;
-static struct work_struct hotplug_work;
-
-struct kvm_device {
- struct virtio_device vdev;
- struct kvm_device_desc *desc;
-};
-
-#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
-
-/*
- * memory layout:
- * - kvm_device_descriptor
- * struct kvm_device_desc
- * - configuration
- * struct kvm_vqconfig
- * - feature bits
- * - config space
- */
-static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
-{
- return (struct kvm_vqconfig *)(desc + 1);
-}
-
-static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
-{
- return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
-}
-
-static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
-{
- return kvm_vq_features(desc) + desc->feature_len * 2;
-}
-
-/*
- * The total size of the config page used by this device (incl. desc)
- */
-static unsigned desc_size(const struct kvm_device_desc *desc)
-{
- return sizeof(*desc)
- + desc->num_vq * sizeof(struct kvm_vqconfig)
- + desc->feature_len * 2
- + desc->config_len;
-}
-
-/* This gets the device's feature bits. */
-static u64 kvm_get_features(struct virtio_device *vdev)
-{
- unsigned int i;
- u32 features = 0;
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
- u8 *in_features = kvm_vq_features(desc);
-
- for (i = 0; i < min(desc->feature_len * 8, 32); i++)
- if (in_features[i / 8] & (1 << (i % 8)))
- features |= (1 << i);
- return features;
-}
-
-static int kvm_finalize_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
- /* Second half of bitmap is features we accept. */
- u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Make sure we don't have any features > 32 bits! */
- BUG_ON((u32)vdev->features != vdev->features);
-
- memset(out_features, 0, desc->feature_len);
- bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++) {
- if (__virtio_test_bit(vdev, i))
- out_features[i / 8] |= (1 << (i % 8));
- }
-
- return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void kvm_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned len)
-{
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
- BUG_ON(offset + len > desc->config_len);
- memcpy(buf, kvm_vq_configspace(desc) + offset, len);
-}
-
-static void kvm_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned len)
-{
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
- BUG_ON(offset + len > desc->config_len);
- memcpy(kvm_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access
- * the status field of the device descriptor. set_status will also
- * make a hypercall to the host, to tell about status changes
- */
-static u8 kvm_get_status(struct virtio_device *vdev)
-{
- return to_kvmdev(vdev)->desc->status;
-}
-
-static void kvm_set_status(struct virtio_device *vdev, u8 status)
-{
- BUG_ON(!status);
- to_kvmdev(vdev)->desc->status = status;
- kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
- (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
- * descriptor address. The Host will zero the status and all the
- * features.
- */
-static void kvm_reset(struct virtio_device *vdev)
-{
- kvm_hypercall1(KVM_S390_VIRTIO_RESET,
- (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * When the virtio_ring code wants to notify the Host, it calls us here and we
- * make a hypercall. We hand the address of the virtqueue so the Host
- * knows which virtqueue we're talking about.
- */
-static bool kvm_notify(struct virtqueue *vq)
-{
- long rc;
- struct kvm_vqconfig *config = vq->priv;
-
- rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
- if (rc < 0)
- return false;
- return true;
-}
-
-/*
- * This routine finds the first virtqueue described in the configuration of
- * this device and sets it up.
- */
-static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
-{
- struct kvm_device *kdev = to_kvmdev(vdev);
- struct kvm_vqconfig *config;
- struct virtqueue *vq;
- int err;
-
- if (index >= kdev->desc->num_vq)
- return ERR_PTR(-ENOENT);
-
- if (!name)
- return NULL;
-
- config = kvm_vq_config(kdev->desc)+index;
-
- err = vmem_add_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
- if (err)
- goto out;
-
- vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
- vdev, true, ctx, (void *) config->address,
- kvm_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto unmap;
- }
-
- /*
- * register a callback token
- * The host will sent this via the external interrupt parameter
- */
- config->token = (u64) vq;
-
- vq->priv = config;
- return vq;
-unmap:
- vmem_remove_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
-out:
- return ERR_PTR(err);
-}
-
-static void kvm_del_vq(struct virtqueue *vq)
-{
- struct kvm_vqconfig *config = vq->priv;
-
- vring_del_virtqueue(vq);
- vmem_remove_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
-}
-
-static void kvm_del_vqs(struct virtio_device *vdev)
-{
- struct virtqueue *vq, *n;
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list)
- kvm_del_vq(vq);
-}
-
-static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
- struct irq_affinity *desc)
-{
- struct kvm_device *kdev = to_kvmdev(vdev);
- int i;
-
- /* We must have this many virtqueues. */
- if (nvqs > kdev->desc->num_vq)
- return -ENOENT;
-
- for (i = 0; i < nvqs; ++i) {
- vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i],
- ctx ? ctx[i] : false);
- if (IS_ERR(vqs[i]))
- goto error;
- }
- return 0;
-
-error:
- kvm_del_vqs(vdev);
- return PTR_ERR(vqs[i]);
-}
-
-static const char *kvm_bus_name(struct virtio_device *vdev)
-{
- return "";
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static const struct virtio_config_ops kvm_vq_configspace_ops = {
- .get_features = kvm_get_features,
- .finalize_features = kvm_finalize_features,
- .get = kvm_get,
- .set = kvm_set,
- .get_status = kvm_get_status,
- .set_status = kvm_set_status,
- .reset = kvm_reset,
- .find_vqs = kvm_find_vqs,
- .del_vqs = kvm_del_vqs,
- .bus_name = kvm_bus_name,
-};
-
-/*
- * The root device for the kvm virtio devices.
- * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
- */
-static struct device *kvm_root;
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
-{
- struct kvm_device *kdev;
-
- kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
- if (!kdev) {
- printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
- offset, d->type);
- return;
- }
-
- kdev->vdev.dev.parent = kvm_root;
- kdev->vdev.id.device = d->type;
- kdev->vdev.config = &kvm_vq_configspace_ops;
- kdev->desc = d;
-
- if (register_virtio_device(&kdev->vdev) != 0) {
- printk(KERN_ERR "Failed to register kvm device %u type %u\n",
- offset, d->type);
- kfree(kdev);
- }
-}
-
-/*
- * scan_devices() simply iterates through the device page.
- * The type 0 is reserved to mean "end of devices".
- */
-static void scan_devices(void)
-{
- unsigned int i;
- struct kvm_device_desc *d;
-
- for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
- d = kvm_devices + i;
-
- if (d->type == 0)
- break;
-
- add_kvm_device(d, i);
- }
-}
-
-/*
- * match for a kvm device with a specific desc pointer
- */
-static int match_desc(struct device *dev, void *data)
-{
- struct virtio_device *vdev = dev_to_virtio(dev);
- struct kvm_device *kdev = to_kvmdev(vdev);
-
- return kdev->desc == data;
-}
-
-/*
- * hotplug_device tries to find changes in the device page.
- */
-static void hotplug_devices(struct work_struct *dummy)
-{
- unsigned int i;
- struct kvm_device_desc *d;
- struct device *dev;
-
- for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
- d = kvm_devices + i;
-
- /* end of list */
- if (d->type == 0)
- break;
-
- /* device already exists */
- dev = device_find_child(kvm_root, d, match_desc);
- if (dev) {
- /* XXX check for hotplug remove */
- put_device(dev);
- continue;
- }
-
- /* new device */
- printk(KERN_INFO "Adding new virtio device %p\n", d);
- add_kvm_device(d, i);
- }
-}
-
-/*
- * we emulate the request_irq behaviour on top of s390 extints
- */
-static void kvm_extint_handler(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
-{
- struct virtqueue *vq;
- u32 param;
-
- if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
- return;
- inc_irq_stat(IRQEXT_VRT);
-
- /* The LSB might be overloaded, we have to mask it */
- vq = (struct virtqueue *)(param64 & ~1UL);
-
- /* We use ext_params to decide what this interrupt means */
- param = param32 & VIRTIO_PARAM_MASK;
-
- switch (param) {
- case VIRTIO_PARAM_CONFIG_CHANGED:
- virtio_config_changed(vq->vdev);
- break;
- case VIRTIO_PARAM_DEV_ADD:
- schedule_work(&hotplug_work);
- break;
- case VIRTIO_PARAM_VRING_INTERRUPT:
- default:
- vring_interrupt(0, vq);
- break;
- }
-}
-
-/*
- * For s390-virtio, we expect a page above main storage containing
- * the virtio configuration. Try to actually load from this area
- * in order to figure out if the host provides this page.
- */
-static int __init test_devices_support(unsigned long addr)
-{
- int ret = -EIO;
-
- asm volatile(
- "0: lura 0,%1\n"
- "1: xgr %0,%0\n"
- "2:\n"
- EX_TABLE(0b,2b)
- EX_TABLE(1b,2b)
- : "+d" (ret)
- : "a" (addr)
- : "0", "cc");
- return ret;
-}
-/*
- * Init function for virtio
- * devices are in a single page above top of "normal" + standby mem
- */
-static int __init kvm_devices_init(void)
-{
- int rc;
- unsigned long total_memory_size = sclp.rzm * sclp.rnmax;
-
- if (!MACHINE_IS_KVM)
- return -ENODEV;
-
- if (test_devices_support(total_memory_size) < 0)
- return -ENODEV;
-
- pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
-
- rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
- if (rc)
- return rc;
-
- kvm_devices = (void *) total_memory_size;
-
- kvm_root = root_device_register("kvm_s390");
- if (IS_ERR(kvm_root)) {
- rc = PTR_ERR(kvm_root);
- printk(KERN_ERR "Could not register kvm_s390 root device");
- vmem_remove_mapping(total_memory_size, PAGE_SIZE);
- return rc;
- }
-
- INIT_WORK(&hotplug_work, hotplug_devices);
-
- irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
- register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler);
-
- scan_devices();
- return 0;
-}
-
-/* code for early console output with virtio_console */
-static int early_put_chars(u32 vtermno, const char *buf, int count)
-{
- char scratch[17];
- unsigned int len = count;
-
- if (len > sizeof(scratch) - 1)
- len = sizeof(scratch) - 1;
- scratch[len] = '\0';
- memcpy(scratch, buf, len);
- kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
- return len;
-}
-
-static int __init s390_virtio_console_init(void)
-{
- if (sclp.has_vt220 || sclp.has_linemode)
- return -ENODEV;
- return virtio_cons_early_init(early_put_chars);
-}
-console_initcall(s390_virtio_console_init);
-
-
-/*
- * We do this after core stuff, but before the drivers.
- */
-postcore_initcall(kvm_devices_init);
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index c89ae9a04399..e2956741fbd1 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1 +1,2 @@
53c700_d.h
+scsi_devinfo_tbl.c
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 41366339b950..8a739b74cfb7 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -130,7 +130,8 @@ config CHR_DEV_OSST
config BLK_DEV_SR
tristate "SCSI CDROM support"
- depends on SCSI
+ depends on SCSI && BLK_DEV
+ select CDROM
---help---
If you want to use a CD or DVD drive attached to your computer
by SCSI, FireWire, USB or ATAPI, say Y and read the SCSI-HOWTO
@@ -786,7 +787,7 @@ config SCSI_IBMVSCSIS
depends on PPC_PSERIES && TARGET_CORE && SCSI && PCI
help
This is the IBM POWER Virtual SCSI Target Server
- This driver uses the SRP protocol for communication betwen servers
+ This driver uses the SRP protocol for communication between servers
guest and/or the host that run on the same server.
More information on VSCSI protocol can be found at www.power.org
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1639bf8b1ab6..fcfd28d2884c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -192,6 +192,14 @@ clean-files := 53c700_d.h 53c700_u.h
$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
+$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
+
+quiet_cmd_bflags = GEN $@
+ cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
+
+$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
+ $(call if_changed,bflags)
+
# If you want to play with the firmware, uncomment
# GENERATE_FIRMWARE := 1
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 777b0222d021..90ea0f5d9bdb 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -1908,8 +1908,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
switch (extended_msg[2]) {
case EXTENDED_SDTR:
case EXTENDED_WDTR:
- case EXTENDED_MODIFY_DATA_POINTER:
- case EXTENDED_EXTENDED_IDENTIFY:
tmp = 0;
}
} else if (len) {
@@ -1932,18 +1930,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* reject it.
*/
default:
- if (!tmp) {
- shost_printk(KERN_ERR, instance, "rejecting message ");
- spi_print_msg(extended_msg);
- printk("\n");
- } else if (tmp != EXTENDED_MESSAGE)
- scmd_printk(KERN_INFO, cmd,
- "rejecting unknown message %02x\n",
- tmp);
- else
+ if (tmp == EXTENDED_MESSAGE)
scmd_printk(KERN_INFO, cmd,
"rejecting unknown extended message code %02x, length %d\n",
- extended_msg[1], extended_msg[0]);
+ extended_msg[2], extended_msg[1]);
+ else if (tmp)
+ scmd_printk(KERN_INFO, cmd,
+ "rejecting unknown message code %02x\n",
+ tmp);
msgout = MESSAGE_REJECT;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index dfe8e70f8d99..525a652dab48 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2383,19 +2383,19 @@ fib_free_out:
goto out;
}
-int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now)
+int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
{
struct tm cur_tm;
char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
u32 datasize = sizeof(wellness_str);
- unsigned long local_time;
+ time64_t local_time;
int ret = -ENODEV;
if (!dev->sa_firmware)
goto out;
- local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60));
- time_to_tm(local_time, 0, &cur_tm);
+ local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
+ time64_to_tm(local_time, 0, &cur_tm);
cur_tm.tm_mon += 1;
cur_tm.tm_year += 1900;
wellness_str[8] = bin2bcd(cur_tm.tm_hour);
@@ -2412,7 +2412,7 @@ out:
return ret;
}
-int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
+int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
{
int ret = -ENOMEM;
struct fib *fibptr;
@@ -2424,7 +2424,7 @@ int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
aac_fib_init(fibptr);
info = (__le32 *)fib_data(fibptr);
- *info = cpu_to_le32(now->tv_sec);
+ *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
1, 1, NULL, NULL);
@@ -2496,7 +2496,7 @@ int aac_command_thread(void *data)
}
if (!time_before(next_check_jiffies,next_jiffies)
&& ((difference = next_jiffies - jiffies) <= 0)) {
- struct timeval now;
+ struct timespec64 now;
int ret;
/* Don't even try to talk to adapter if its sick */
@@ -2506,15 +2506,15 @@ int aac_command_thread(void *data)
next_check_jiffies = jiffies
+ ((long)(unsigned)check_interval)
* HZ;
- do_gettimeofday(&now);
+ ktime_get_real_ts64(&now);
/* Synchronize our watches */
- if (((1000000 - (1000000 / HZ)) > now.tv_usec)
- && (now.tv_usec > (1000000 / HZ)))
- difference = (((1000000 - now.tv_usec) * HZ)
- + 500000) / 1000000;
+ if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
+ && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
+ difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
+ + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
else {
- if (now.tv_usec > 500000)
+ if (now.tv_nsec > NSEC_PER_SEC / 2)
++now.tv_sec;
if (dev->sa_firmware)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 381846164003..6612ff3b2e83 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2212,7 +2212,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
* by the capabilities of the bus connectivity of and sync settings for
* the target.
*/
-const struct ahc_syncrate *
+static const struct ahc_syncrate *
ahc_devlimited_syncrate(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *tinfo,
u_int *period, u_int *ppr_options, role_t role)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index acd687f4554e..c6be3aeb302b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1141,7 +1141,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
* or forcing transfer negotiations on the next command to any
* target.
*/
-void
+static void
ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
{
int i;
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7cbc7213b2b2..5402b85b0bdc 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -721,11 +721,8 @@ Out:
*/
static void asd_chip_reset(struct asd_ha_struct *asd_ha)
{
- struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
-
ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
asd_chip_hardrst(asd_ha);
- sas_ha->notify_ha_event(sas_ha, HAE_RESET);
}
/* ---------- Done List Routines ---------- */
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 55e3f8b40eb3..e035acf56652 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -81,12 +81,12 @@ static inline void queue_tail_inc(struct be_queue_info *q)
/*ISCSI */
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
- u32 min_eqd; /* in usecs */
- u32 max_eqd; /* in usecs */
- u32 prev_eqd; /* in usecs */
- u32 et_eqd; /* configured val when aic is off */
- ulong jiffies;
- u64 eq_prev; /* Used to calculate eqe */
+ unsigned long jiffies;
+ u32 eq_prev; /* Used to calculate eqe */
+ u32 prev_eqd;
+#define BEISCSI_EQ_DELAY_MIN 0
+#define BEISCSI_EQ_DELAY_DEF 32
+#define BEISCSI_EQ_DELAY_MAX 128
};
struct be_eq_obj {
@@ -148,9 +148,8 @@ struct be_ctrl_info {
/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1)
-#define PAGE_SHIFT_4K 12
-#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
-#define mcc_timeout 120000 /* 12s timeout */
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index a79a5e72c777..2eb66df3e3d6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -675,8 +675,8 @@ static int be_mbox_notify(struct be_ctrl_info *ctrl)
return status;
}
-void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
- bool embedded, u8 sge_cnt)
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
+ bool embedded, u8 sge_cnt)
{
if (embedded)
wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
@@ -688,7 +688,7 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
}
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len)
+ u8 subsystem, u8 opcode, u32 cmd_len)
{
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
@@ -947,7 +947,6 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
default:
mutex_unlock(&ctrl->mbox_lock);
BUG();
- return -ENXIO;
}
be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
if (queue_type != QTYPE_SGL)
@@ -1522,6 +1521,52 @@ int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
return ret;
}
+int beiscsi_set_host_data(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_cmd_set_host_data *ioctl;
+ struct be_mcc_wrb *wrb;
+ int ret = 0;
+
+ if (is_chip_be2_be3r(phba))
+ return ret;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
+ ioctl = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+ be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_HOST_DATA,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+ ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
+ ioctl->param.req.param_len =
+ snprintf((char *)ioctl->param.req.param_data,
+ sizeof(ioctl->param.req.param_data),
+ "Linux iSCSI v%s", BUILD_STR);
+ ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4);
+ if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
+ ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
+ ret = be_mbox_notify(ctrl);
+ if (!ret) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : HBA set host driver version\n");
+ } else {
+ /**
+ * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
+ * Older FW versions return this error.
+ */
+ if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
+ ret == MCC_STATUS_INVALID_LENGTH)
+ __beiscsi_log(phba, KERN_INFO,
+ "BG_%d : HBA failed to set host driver version\n");
+ }
+
+ mutex_unlock(&ctrl->mbox_lock);
+ return ret;
+}
+
int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index d9b6773facdb..6f05d1dfa10a 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -230,6 +230,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
#define OPCODE_COMMON_FUNCTION_RESET 61
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_HOST_DATA 93
#define OPCODE_COMMON_SET_FEATURES 191
/**
@@ -737,6 +738,30 @@ struct be_cmd_hba_name {
u8 initiator_alias[BE_INI_ALIAS_LEN];
} __packed;
+/******************** COMMON SET HOST DATA *******************/
+#define BE_CMD_SET_HOST_PARAM_ID 0x2
+#define BE_CMD_MAX_DRV_VERSION 0x30
+struct be_sethost_req {
+ u32 param_id;
+ u32 param_len;
+ u32 param_data[32];
+};
+
+struct be_sethost_resp {
+ u32 rsvd0;
+};
+
+struct be_cmd_set_host_data {
+ union {
+ struct be_cmd_req_hdr req_hdr;
+ struct be_cmd_resp_hdr resp_hdr;
+ } h;
+ union {
+ struct be_sethost_req req;
+ struct be_sethost_resp resp;
+ } param;
+} __packed;
+
/******************** COMMON SET Features *******************/
#define BE_CMD_SET_FEATURE_UER 0x10
#define BE_CMD_UER_SUPP_BIT 0x1
@@ -793,8 +818,6 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
struct be_queue_info *mccq,
struct be_queue_info *cq);
-unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
-
void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
@@ -847,6 +870,7 @@ int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
int beiscsi_set_uer_feature(struct beiscsi_hba *phba);
+int beiscsi_set_host_data(struct beiscsi_hba *phba);
struct be_default_pdu_context {
u32 dw[4];
@@ -1274,19 +1298,9 @@ struct be_cmd_get_port_name {
* a read command
*/
#define TGT_CTX_UPDT_CMD 7 /* Target context update */
-#define TGT_STS_CMD 8 /* Target R2T and other BHS
- * where only the status number
- * need to be updated
- */
-#define TGT_DATAIN_CMD 9 /* Target Data-Ins in response
- * to read command
- */
-#define TGT_SOS_PDU 10 /* Target:standalone status
- * response
- */
#define TGT_DM_CMD 11 /* Indicates that the bhs
- * preparedby
- * driver should not be touched
+ * prepared by driver should not
+ * be touched.
*/
/* Returns the number of items in the field array. */
@@ -1444,9 +1458,9 @@ struct be_cmd_get_port_name {
* the cxn
*/
-void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
bool embedded, u8 sge_cnt);
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len);
+ u8 subsystem, u8 opcode, u32 cmd_len);
#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 43a80ce5ce6a..a398c54139aa 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -684,41 +684,6 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
}
/**
- * beiscsi_get_initname - Read Initiator Name from flash
- * @buf: buffer bointer
- * @phba: The device priv structure instance
- *
- * returns number of bytes
- */
-static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
-{
- int rc;
- unsigned int tag;
- struct be_mcc_wrb *wrb;
- struct be_cmd_hba_name *resp;
-
- tag = be_cmd_get_initname(phba);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Getting Initiator Name Failed\n");
-
- return -EBUSY;
- }
-
- rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
- if (rc) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : Initiator Name MBX Failed\n");
- return rc;
- }
-
- resp = embedded_payload(wrb);
- rc = sprintf(buf, "%s\n", resp->initiator_name);
- return rc;
-}
-
-/**
* beiscsi_get_port_state - Get the Port State
* @shost : pointer to scsi_host structure
*
@@ -772,7 +737,6 @@ static void beiscsi_get_port_speed(struct Scsi_Host *shost)
* @param: parameter type identifier
* @buf: buffer pointer
*
- * returns host parameter
*/
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
@@ -783,7 +747,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : HBA in error 0x%lx\n", phba->state);
- return -EBUSY;
+ return 0;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_get_host_param, param = %d\n", param);
@@ -794,15 +758,19 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
if (status < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_get_macaddr Failed\n");
- return status;
+ return 0;
}
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- status = beiscsi_get_initname(buf, phba);
+ /* try fetching user configured name first */
+ status = beiscsi_get_initiator_name(phba, buf, true);
if (status < 0) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Retreiving Initiator Name Failed\n");
- return status;
+ status = beiscsi_get_initiator_name(phba, buf, false);
+ if (status < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Retreiving Initiator Name Failed\n");
+ status = 0;
+ }
}
break;
case ISCSI_HOST_PARAM_PORT_STATE:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index b9d459a21f25..f41dfda97e17 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d8bd6f2c9c83..be96aa1e5077 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -455,14 +455,12 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
return -ENOMEM;
phba->ctrl.csr = addr;
phba->csr_va = addr;
- phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
if (addr == NULL)
goto pci_map_err;
phba->ctrl.db = addr;
phba->db_va = addr;
- phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
if (phba->generation == BE_GEN2)
pcicfg_reg = 1;
@@ -476,7 +474,6 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
goto pci_map_err;
phba->ctrl.pcicfg = addr;
phba->pci_va = addr;
- phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
return 0;
pci_map_err:
@@ -790,6 +787,24 @@ static irqreturn_t be_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void beiscsi_free_irqs(struct beiscsi_hba *phba)
+{
+ struct hwi_context_memory *phwi_context;
+ int i;
+
+ if (!phba->pcidev->msix_enabled) {
+ if (phba->pcidev->irq)
+ free_irq(phba->pcidev->irq, phba);
+ return;
+ }
+
+ phwi_context = phba->phwi_ctrlr->phwi_ctxt;
+ for (i = 0; i <= phba->num_cpus; i++) {
+ free_irq(pci_irq_vector(phba->pcidev, i),
+ &phwi_context->be_eq[i]);
+ kfree(phba->msi_name[i]);
+ }
+}
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
{
@@ -803,15 +818,14 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
if (pcidev->msix_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
- phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
- GFP_KERNEL);
+ phba->msi_name[i] = kasprintf(GFP_KERNEL,
+ "beiscsi_%02x_%02x",
+ phba->shost->host_no, i);
if (!phba->msi_name[i]) {
ret = -ENOMEM;
goto free_msix_irqs;
}
- sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
- phba->shost->host_no, i);
ret = request_irq(pci_irq_vector(pcidev, i),
be_isr_msix, 0, phba->msi_name[i],
&phwi_context->be_eq[i]);
@@ -824,13 +838,12 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
goto free_msix_irqs;
}
}
- phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
+ phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x",
+ phba->shost->host_no);
if (!phba->msi_name[i]) {
ret = -ENOMEM;
goto free_msix_irqs;
}
- sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
- phba->shost->host_no);
ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
phba->msi_name[i], &phwi_context->be_eq[i]);
if (ret) {
@@ -924,12 +937,11 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
* this can happen if clean_task is called on a task that
* failed in xmit_task or alloc_pdu.
*/
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
- "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
- "value there=%p\n", phba->io_sgl_free_index,
- phba->io_sgl_hndl_base
- [phba->io_sgl_free_index]);
- spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n",
+ phba->io_sgl_free_index,
+ phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1864,8 +1876,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK);
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] &
+ CQE_CODE_MASK);
/* Get the CID */
if (is_chip_be2_be3r(phba)) {
@@ -3024,7 +3036,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
mem->dma = paddr;
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
- phwi_context->cur_eqd);
+ BEISCSI_EQ_DELAY_DEF);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_cmd_eq_create"
@@ -3508,13 +3520,14 @@ static int be_mcc_queues_create(struct beiscsi_hba *phba,
goto err;
/* Ask BE to create MCC compl queue; */
if (phba->pcidev->msix_enabled) {
- if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
- [phba->num_cpus].q, false, true, 0))
- goto mcc_cq_free;
+ if (beiscsi_cmd_cq_create(ctrl, cq,
+ &phwi_context->be_eq[phba->num_cpus].q,
+ false, true, 0))
+ goto mcc_cq_free;
} else {
if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
false, true, 0))
- goto mcc_cq_free;
+ goto mcc_cq_free;
}
/* Alloc MCC queue */
@@ -3689,9 +3702,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- phwi_context->max_eqd = 128;
- phwi_context->min_eqd = 0;
- phwi_context->cur_eqd = 32;
/* set port optic state to unknown */
phba->optic_state = 0xff;
@@ -4792,10 +4802,10 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
sg = scsi_sglist(sc);
if (sc->sc_data_direction == DMA_TO_DEVICE)
writedir = 1;
- else
+ else
writedir = 0;
- return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
+ return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
}
/**
@@ -4917,6 +4927,13 @@ void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
schedule_work(&phba->boot_work);
}
+/**
+ * Boot flag info for iscsi-utilities
+ * Bit 0 Block valid flag
+ * Bit 1 Firmware booting selected
+ */
+#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
+
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
@@ -4972,7 +4989,7 @@ static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
auth_data.chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
@@ -5004,7 +5021,7 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
@@ -5209,8 +5226,8 @@ static void beiscsi_eqd_update_work(struct work_struct *work)
if (eqd < 8)
eqd = 0;
- eqd = min_t(u32, eqd, phwi_context->max_eqd);
- eqd = max_t(u32, eqd, phwi_context->min_eqd);
+ eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX);
+ eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN);
aic->jiffies = now;
aic->eq_prev = pbe_eq->cq_count;
@@ -5298,6 +5315,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
be2iscsi_enable_msix(phba);
beiscsi_get_params(phba);
+ beiscsi_set_host_data(phba);
/* Re-enable UER. If different TPE occurs then it is recoverable. */
beiscsi_set_uer_feature(phba);
@@ -5387,15 +5405,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
hwi_disable_intr(phba);
- if (phba->pcidev->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- free_irq(pci_irq_vector(phba->pcidev, i),
- &phwi_context->be_eq[i]);
- kfree(phba->msi_name[i]);
- }
- } else
- if (phba->pcidev->irq)
- free_irq(phba->pcidev->irq, phba);
+ beiscsi_free_irqs(phba);
pci_free_irq_vectors(phba->pcidev);
for (i = 0; i < phba->num_cpus; i++) {
@@ -5586,12 +5596,12 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : be_ctrl_init failed\n");
- goto hba_free;
+ goto free_hba;
}
ret = beiscsi_init_sliport(phba);
if (ret)
- goto hba_free;
+ goto free_hba;
spin_lock_init(&phba->io_sgl_lock);
spin_lock_init(&phba->mgmt_sgl_lock);
@@ -5604,6 +5614,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
}
beiscsi_get_port_name(&phba->ctrl, phba);
beiscsi_get_params(phba);
+ beiscsi_set_host_data(phba);
beiscsi_set_uer_feature(phba);
be2iscsi_enable_msix(phba);
@@ -5671,13 +5682,13 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-"
"Failed to beiscsi_init_irqs\n");
- goto free_blkenbld;
+ goto disable_iopoll;
}
hwi_enable_intr(phba);
ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
if (ret)
- goto free_blkenbld;
+ goto free_irqs;
/* set online bit after port is operational */
set_bit(BEISCSI_HBA_ONLINE, &phba->state);
@@ -5713,12 +5724,15 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
"\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
-free_blkenbld:
- destroy_workqueue(phba->wq);
+free_irqs:
+ hwi_disable_intr(phba);
+ beiscsi_free_irqs(phba);
+disable_iopoll:
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
irq_poll_disable(&pbe_eq->iopoll);
}
+ destroy_workqueue(phba->wq);
free_twq:
hwi_cleanup_port(phba);
beiscsi_cleanup_port(phba);
@@ -5727,9 +5741,9 @@ free_port:
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
- phba->ctrl.mbox_mem_alloced.dma);
+ phba->ctrl.mbox_mem_alloced.dma);
beiscsi_unmap_pci_function(phba);
-hba_free:
+free_hba:
pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 81ce3ffda968..42bb6bdb68bd 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -31,7 +31,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "11.4.0.0"
+#define BUILD_STR "11.4.0.1"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -59,7 +59,7 @@
#define BE2_DEFPDU_DATA_SZ 8192
#define BE2_MAX_NUM_CQ_PROC 512
-#define MAX_CPUS 64
+#define MAX_CPUS 64U
#define BEISCSI_MAX_NUM_CPUS 7
#define BEISCSI_VER_STRLEN 32
@@ -77,9 +77,7 @@
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
-#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
#define BEISCSI_MAX_FRAGS_INIT 192
-#define BE_NUM_MSIX_ENTRIES 1
#define BE_SENSE_INFO_SIZE 258
#define BE_ISCSI_PDU_HEADER_SIZE 64
@@ -155,8 +153,6 @@
#define PAGES_REQUIRED(x) \
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
-#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
-
#define MEM_DESCR_OFFSET 8
#define BEISCSI_DEFQ_HDR 1
#define BEISCSI_DEFQ_DATA 0
@@ -209,13 +205,8 @@ struct mem_array {
};
struct be_mem_descriptor {
- unsigned int index; /* Index of this memory parameter */
- unsigned int category; /* type indicates cached/non-cached */
- unsigned int num_elements; /* number of elements in this
- * descriptor
- */
- unsigned int alignment_mask; /* Alignment mask for this block */
unsigned int size_in_bytes; /* Size required by memory block */
+ unsigned int num_elements;
struct mem_array *mem_array;
};
@@ -238,32 +229,12 @@ struct hba_parameters {
unsigned int num_eq_entries;
unsigned int wrbs_per_cxn;
unsigned int hwi_ws_sz;
- /**
- * These are calculated from other params. They're here
- * for debug purposes
- */
- unsigned int num_mcc_pages;
- unsigned int num_mcc_cq_pages;
- unsigned int num_cq_pages;
- unsigned int num_eq_pages;
-
- unsigned int num_async_pdu_buf_pages;
- unsigned int num_async_pdu_buf_sgl_pages;
- unsigned int num_async_pdu_buf_cq_pages;
-
- unsigned int num_async_pdu_hdr_pages;
- unsigned int num_async_pdu_hdr_sgl_pages;
- unsigned int num_async_pdu_hdr_cq_pages;
-
- unsigned int num_sge;
};
#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
(phwi_ctrlr->wrb_context[cri].ulp_num)
struct hwi_wrb_context {
spinlock_t wrb_lock;
- struct list_head wrb_handle_list;
- struct list_head wrb_handle_drvr_list;
struct wrb_handle **pwrb_handle_base;
struct wrb_handle **pwrb_handle_basestd;
struct iscsi_wrb *plast_wrb;
@@ -272,8 +243,6 @@ struct hwi_wrb_context {
unsigned short wrb_handles_available;
unsigned short cid;
uint8_t ulp_num; /* ULP to which CID binded */
- uint16_t register_set;
- uint16_t doorbell_format;
uint32_t doorbell_offset;
};
@@ -310,9 +279,6 @@ struct beiscsi_hba {
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI Config */
- struct be_bus_address csr_pa; /* CSR */
- struct be_bus_address db_pa; /* CSR */
- struct be_bus_address pci_pa; /* CSR */
/* PCI representation of our HBA */
struct pci_dev *pcidev;
unsigned int num_cpus;
@@ -324,7 +290,6 @@ struct beiscsi_hba {
unsigned short io_sgl_free_index;
unsigned short io_sgl_hndl_avbl;
struct sgl_handle **io_sgl_hndl_base;
- struct sgl_handle **sgl_hndl_array;
unsigned short eh_sgl_alloc_index;
unsigned short eh_sgl_free_index;
@@ -1009,10 +974,6 @@ struct be_ring {
};
struct hwi_controller {
- struct list_head io_sgl_list;
- struct list_head eh_sgl_list;
- struct sgl_handle *psgl_handle_base;
-
struct hwi_wrb_context *wrb_context;
struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
@@ -1036,10 +997,6 @@ struct wrb_handle {
};
struct hwi_context_memory {
- /* Adaptive interrupt coalescing (AIC) info */
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
struct be_eq_obj be_eq[MAX_CPUS];
struct be_queue_info be_cq[MAX_CPUS - 1];
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index c73775368d09..66ca967f2850 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -19,43 +19,6 @@
#include "be_iscsi.h"
#include "be_main.h"
-int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
- struct be_set_eqd *set_eqd,
- int num)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_modify_eq_delay *req;
- unsigned int tag;
- int i;
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
-
- req->num_eq = cpu_to_le32(num);
- for (i = 0; i < num; i++) {
- req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
- req->delay[i].phase = 0;
- req->delay[i].delay_multiplier =
- cpu_to_le32(set_eqd[i].delay_multiplier);
- }
-
- /* ignore the completion of this mbox command */
- set_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state);
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba,
struct bsg_job *job,
@@ -156,7 +119,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : unknown addr family %d\n",
dst_addr->sa_family);
- return -EINVAL;
+ return 0;
}
phwi_ctrlr = phba->phwi_ctrlr;
@@ -236,16 +199,19 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
}
/*
- * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
- * @phba: Driver priv structure
- * @nonemb_cmd: Address of the MBX command issued
- * @resp_buf: Buffer to copy the MBX cmd response
- * @resp_buf_len: respone lenght to be copied
+ * beiscsi_exec_nemb_cmd()- execute non-embedded MBX cmd
+ * @phba: driver priv structure
+ * @nonemb_cmd: DMA address of the MBX command to be issued
+ * @cbfn: callback func on MCC completion
+ * @resp_buf: buffer to copy the MBX cmd response
+ * @resp_buf_len: response length to be copied
*
**/
-static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
- struct be_dma_mem *nonemb_cmd, void *resp_buf,
- int resp_buf_len)
+static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *nonemb_cmd,
+ void (*cbfn)(struct beiscsi_hba *,
+ unsigned int),
+ void *resp_buf, u32 resp_buf_len)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
@@ -267,36 +233,54 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
sge->len = cpu_to_le32(nonemb_cmd->size);
+ if (cbfn) {
+ struct be_dma_mem *tag_mem;
+
+ set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+ ctrl->ptag_state[tag].cbfn = cbfn;
+ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+
+ /* store DMA mem to be freed in callback */
+ tag_mem->size = nonemb_cmd->size;
+ tag_mem->va = nonemb_cmd->va;
+ tag_mem->dma = nonemb_cmd->dma;
+ }
be_mcc_notify(phba, tag);
mutex_unlock(&ctrl->mbox_lock);
+ /* with cbfn set, its async cmd, don't wait */
+ if (cbfn)
+ return 0;
+
rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd);
+ /* copy the response, if any */
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
+ /**
+ * This is special case of NTWK_GET_IF_INFO where the size of
+ * response is not known. beiscsi_if_get_info checks the return
+ * value to free DMA buffer.
+ */
+ if (rc == -EAGAIN)
+ return rc;
- if (rc) {
- /* Check if the MBX Cmd needs to be re-issued */
- if (rc == -EAGAIN)
- return rc;
-
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+ /**
+ * If FW is busy that is driver timed out, DMA buffer is saved with
+ * the tag, only when the cmd completes this buffer is freed.
+ */
+ if (rc == -EBUSY)
+ return rc;
- if (rc != -EBUSY)
- goto free_cmd;
- else
- return rc;
- }
free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
-static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
- int iscsi_cmd, int size)
+static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *cmd,
+ u8 subsystem, u8 opcode, u32 size)
{
cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
@@ -305,13 +289,86 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
return -ENOMEM;
}
cmd->size = size;
- be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+ be_cmd_hdr_prepare(cmd->va, subsystem, opcode, size);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BG_%d : subsystem iSCSI cmd %d size %d\n",
- iscsi_cmd, size);
+ "BG_%d : subsystem %u cmd %u size %u\n",
+ subsystem, opcode, size);
return 0;
}
+static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
+{
+ struct be_dma_mem *tag_mem;
+
+ /* status is ignored */
+ __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+ if (tag_mem->size) {
+ pci_free_consistent(phba->pcidev, tag_mem->size,
+ tag_mem->va, tag_mem->dma);
+ tag_mem->size = 0;
+ }
+}
+
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
+ struct be_set_eqd *set_eqd, int num)
+{
+ struct be_cmd_req_modify_eq_delay *req;
+ struct be_dma_mem nonemb_cmd;
+ int i, rc;
+
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->delay[i].phase = 0;
+ req->delay[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
+
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
+ __beiscsi_eq_delay_compl, NULL, 0);
+}
+
+/**
+ * beiscsi_get_initiator_name - read initiator name from flash
+ * @phba: device priv structure
+ * @name: buffer pointer
+ * @cfg: fetch user configured
+ *
+ */
+int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
+{
+ struct be_dma_mem nonemb_cmd;
+ struct be_cmd_hba_name resp;
+ struct be_cmd_hba_name *req;
+ int rc;
+
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_CFG_GET_HBA_NAME, sizeof(resp));
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ if (cfg)
+ req->hdr.version = 1;
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
+ &resp, sizeof(resp));
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : Initiator Name MBX Failed\n");
+ return rc;
+ }
+ rc = sprintf(name, "%s\n", resp.initiator_name);
+ return rc;
+}
+
unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -368,9 +425,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
struct be_dma_mem nonemb_cmd;
int rt_val;
- rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
- sizeof(*req));
+ rt_val = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
+ sizeof(*req));
if (rt_val)
return rt_val;
@@ -379,7 +436,7 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
req->ip_addr.ip_type = ip_type;
memcpy(req->ip_addr.addr, gw,
(ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
}
int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
@@ -420,17 +477,17 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
struct be_dma_mem nonemb_cmd;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
- sizeof(*resp));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+ sizeof(*resp));
if (rc)
return rc;
req = nonemb_cmd.va;
req->ip_type = ip_type;
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, resp,
- sizeof(*resp));
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
+ resp, sizeof(*resp));
}
static int
@@ -441,9 +498,9 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
struct be_dma_mem nonemb_cmd;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
- sizeof(*req));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
if (rc)
return rc;
@@ -461,7 +518,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
if_info->ip_addr.subnet_mask,
sizeof(if_info->ip_addr.subnet_mask));
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
if (rc < 0 || req->ip_params.ip_record.status) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BG_%d : failed to clear IP: rc %d status %d\n",
@@ -479,9 +536,9 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
uint32_t ip_len;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
- sizeof(*req));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
if (rc)
return rc;
@@ -499,7 +556,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
subnet, ip_len);
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
/**
* In some cases, host needs to look into individual record status
* even though FW reported success for that IOCTL.
@@ -527,7 +584,8 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
return rc;
if (if_info->dhcp_state) {
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd,
+ CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
sizeof(*reldhcp));
if (rc)
@@ -536,7 +594,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
reldhcp = nonemb_cmd.va;
reldhcp->interface_hndl = phba->interface_handle;
reldhcp->ip_type = ip_type;
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
if (rc < 0) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : failed to release existing DHCP: %d\n",
@@ -606,7 +664,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
}
}
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
sizeof(*dhcpreq));
if (rc)
@@ -617,7 +675,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
dhcpreq->retry_count = 1;
dhcpreq->interface_hndl = phba->interface_handle;
dhcpreq->ip_type = ip_type;
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
exit:
kfree(if_info);
@@ -673,9 +731,10 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
return rc;
do {
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
- ioctl_size);
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd,
+ CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+ ioctl_size);
if (rc)
return rc;
@@ -698,8 +757,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
return -ENOMEM;
}
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
- ioctl_size);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, *if_info,
+ ioctl_size);
/* Check if the error is because of Insufficent_Buffer */
if (rc == -EAGAIN) {
@@ -728,41 +787,14 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
struct be_dma_mem nonemb_cmd;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
- sizeof(*nic));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
+ sizeof(*nic));
if (rc)
return rc;
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic));
-}
-
-
-
-unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
-{
- unsigned int tag;
- struct be_mcc_wrb *wrb;
- struct be_cmd_hba_name *req;
- struct be_ctrl_info *ctrl = &phba->ctrl;
-
- if (mutex_lock_interruptible(&ctrl->mbox_lock))
- return 0;
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
- OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
- sizeof(*req));
-
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
+ nic, sizeof(*nic));
}
static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 06ddc5ad6874..0b22c99a7a22 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -157,7 +157,6 @@ struct be_bsg_vendor_cmd {
struct beiscsi_endpoint {
struct beiscsi_hba *phba;
- struct beiscsi_sess *sess;
struct beiscsi_conn *conn;
struct iscsi_endpoint *openiscsi_ep;
unsigned short ip_type;
@@ -169,15 +168,12 @@ struct beiscsi_endpoint {
u16 cid_vld;
};
-unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
- struct beiscsi_endpoint *beiscsi_ep,
- unsigned short cid,
- unsigned short issue_reset,
- unsigned short savecfg_flag);
int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct invldt_cmd_tbl *inv_tbl,
unsigned int nents);
+int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg);
+
int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2e8c0dfc79c..72ca2a2e08e2 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3137,16 +3137,9 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
- struct request_queue *request_q = job->req->q;
void *payload_kbuf;
int rc = -EINVAL;
- /*
- * Set the BSG device request_queue size to 256 to support
- * payloads larger than 512*1024K bytes.
- */
- blk_queue_max_segments(request_q, 256);
-
/* Allocate a temp buffer to hold the passed in user space command */
payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
if (!payload_kbuf) {
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 8dcd8c70c7ee..05f523971348 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
struct bfad_s *bfad = port->bfad;
struct bfa_s *bfa = &bfad->bfa;
struct bfa_ioc_s *ioc = &bfa->ioc;
- int addr, len, rc, i;
+ int addr, rc, i;
+ u32 len;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
@@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
- if (rc < 2) {
+ if (rc < 2 || len > (UINT_MAX >> 2)) {
printk(KERN_INFO
"bfad[%d]: %s failed to read user buf\n",
bfad->inst_no, __func__);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 61a93994d5ed..e0640e0f259f 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -332,12 +332,10 @@ static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_login_request *login_wqe;
struct iscsi_login_req *login_hdr;
u32 dword;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
login_hdr = (struct iscsi_login_req *)task->hdr;
login_wqe = (struct bnx2i_login_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
@@ -391,12 +389,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
struct iscsi_tm *tmfabort_hdr;
struct scsi_cmnd *ref_sc;
struct iscsi_task *ctask;
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_tmf_request *tmfabort_wqe;
u32 dword;
u32 scsi_lun[2];
- bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
tmfabort_wqe = (struct bnx2i_tmf_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
@@ -463,12 +459,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *mtask)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_text_request *text_wqe;
struct iscsi_text *text_hdr;
u32 dword;
- bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
text_hdr = (struct iscsi_text *)mtask->hdr;
text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
@@ -541,11 +535,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
char *datap, int data_len, int unsol)
{
struct bnx2i_endpoint *ep = bnx2i_conn->ep;
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_nop_out_request *nopout_wqe;
struct iscsi_nopout *nopout_hdr;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
@@ -602,11 +594,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_logout_request *logout_wqe;
struct iscsi_logout *logout_hdr;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
logout_hdr = (struct iscsi_logout *)task->hdr;
logout_wqe = (struct bnx2i_logout_request *)
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 667046419b19..30f5f523c8cc 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -368,6 +368,9 @@ struct csio_hw_stats {
#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
* enabled?
*/
+#define CSIO_HWF_ROOT_NO_RELAXED_ORDERING 0x00000400 /* Is PCIe relaxed
+ * ordering enabled
+ */
#define csio_is_hw_intr_enabled(__hw) \
((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 28a9c7d706cb..cb1711a5d7a3 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -968,6 +968,9 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_exit;
}
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING;
+
pci_set_drvdata(pdev, hw);
rv = csio_hw_start(hw);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index abcedfbcecda..931b1d8f9f3e 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -491,6 +491,7 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t iq_start_stop = (iq_params->iq_start) ?
FW_IQ_CMD_IQSTART_F :
FW_IQ_CMD_IQSTOP_F;
+ int relaxed = !(hw->flags & CSIO_HWF_ROOT_NO_RELAXED_ORDERING);
/*
* If this IQ write is cascaded with IQ alloc request, do not
@@ -537,6 +538,8 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
cmdp->iqns_to_fl0congen |= htonl(
FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 1bef2724eb78..266eddf17a99 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1575,6 +1575,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_free_cpl_skbs(csk);
+ cxgbi_sock_purge_write_queue(csk);
if (csk->wr_cred != csk->wr_max_cred) {
cxgbi_sock_purge_wr_queue(csk);
cxgbi_sock_reset_wr_list(csk);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index a61a152136a3..ce1336414e0a 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,8 +688,6 @@ rel_neigh:
rel_rt:
ip_rt_put(rt);
- if (csk)
- cxgbi_sock_closed(csk);
err_out:
return ERR_PTR(err);
}
@@ -1889,16 +1887,13 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
struct scsi_cmnd *sc = task->sc;
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
tcp_task->dd_data = tdata;
task->hdr = NULL;
- if (tdata->skb) {
- kfree_skb(tdata->skb);
- tdata->skb = NULL;
- }
-
if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
(opcode == ISCSI_OP_SCSI_DATA_OUT ||
(opcode == ISCSI_OP_SCSI_CMD &&
@@ -1910,15 +1905,23 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- struct cxgbi_sock *csk = cconn->cep->csk;
- struct net_device *ndev = cdev->ports[csk->port_id];
ndev->stats.tx_dropped++;
return -ENOMEM;
}
- skb_get(tdata->skb);
skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
- task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+
+ if (task->sc) {
+ task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+ } else {
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ if (!task->hdr) {
+ __kfree_skb(tdata->skb);
+ tdata->skb = NULL;
+ ndev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ }
task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
/* data_out uses scsi_cmd's itt */
@@ -2062,9 +2065,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
unsigned int datalen;
int err;
- if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
+ if (!skb) {
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "task 0x%p, skb 0x%p\n", task, skb);
+ "task 0x%p\n", task);
return 0;
}
@@ -2076,6 +2079,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
return -EPIPE;
}
+ tdata->skb = NULL;
datalen = skb->data_len;
/* write ppod first if using ofldq to write ppod */
@@ -2089,6 +2093,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
/* continue. Let fl get the data */
}
+ if (!task->sc)
+ memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
+
err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
if (err > 0) {
int pdulen = err;
@@ -2104,7 +2111,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
pdulen += ISCSI_DIGEST_SIZE;
task->conn->txdata_octets += pdulen;
- cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
return 0;
}
@@ -2113,6 +2119,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
"task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
task, skb, skb->len, skb->data_len, err);
/* reset skb to send when we are called again */
+ tdata->skb = skb;
return err;
}
@@ -2120,8 +2127,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
- __kfree_skb(tdata->skb);
- tdata->skb = NULL;
+ __kfree_skb(skb);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2146,9 +2152,14 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
task, tdata->skb, task->hdr_itt);
tcp_task->dd_data = NULL;
+
+ if (!task->sc)
+ kfree(task->hdr);
+ task->hdr = NULL;
+
/* never reached the xmit task callout */
if (tdata->skb) {
- kfree_skb(tdata->skb);
+ __kfree_skb(tdata->skb);
tdata->skb = NULL;
}
@@ -2556,7 +2567,10 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
goto err_out;
}
- ifindex = hba->ndev->ifindex;
+ rtnl_lock();
+ if (!vlan_uses_dev(hba->ndev))
+ ifindex = hba->ndev->ifindex;
+ rtnl_unlock();
}
if (dst_addr->sa_family == AF_INET) {
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 31a5816c2e8d..dcb190e75343 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -205,7 +205,6 @@ enum cxgbi_skcb_flags {
SKCBF_TX_NEED_HDR, /* packet needs a header */
SKCBF_TX_MEM_WRITE, /* memory write */
SKCBF_TX_FLAG_COMPL, /* wr completion flag */
- SKCBF_TX_DONE, /* skb tx done */
SKCBF_RX_COALESCED, /* received whole pdu */
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 76b8b7eed0c0..617802855233 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1634,7 +1634,10 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
- char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
+ const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
+ cfg->dev_id->driver_data;
+ const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
+ const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
@@ -1671,17 +1674,24 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
* value. Note that we must copy to a temporary buffer
* because the conversion service requires that the ASCII
* string be terminated.
+ *
+ * Allow for WWPN not being found for all devices, setting
+ * the returned WWPN to zero when not found. Notify with a
+ * log error for cards that should have had WWPN keywords
+ * in the VPD - cards requiring WWPN will not have their
+ * ports programmed and operate in an undefined state.
*/
for (k = 0; k < cfg->num_fc_ports; k++) {
j = ro_size;
i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
- if (unlikely(i < 0)) {
- dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
+ if (i < 0) {
+ if (wwpn_vpd_required)
+ dev_err(dev, "%s: Port %d WWPN not found\n",
+ __func__, k);
+ wwpn[k] = 0ULL;
+ continue;
}
j = pci_vpd_info_field_size(&vpd_data[i]);
@@ -3145,7 +3155,7 @@ static struct scsi_host_template driver_template = {
* Device dependent values
*/
static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
- 0ULL };
+ CXLFLASH_WWPN_VPD_REQUIRED };
static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 880e348ed5c9..ba0108a7a9c2 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -95,7 +95,8 @@ enum undo_level {
struct dev_dependent_vals {
u64 max_sectors;
u64 flags;
-#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
+#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
+#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
};
struct asyc_intr_info {
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 09daa86670fc..bedf1ce2f33c 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -548,7 +548,4 @@ struct sisl_rht_entry_f1 {
#define TMF_LUN_RESET 0x1U
#define TMF_CLEAR_ACA 0x2U
-
-#define SISLITE_MAX_WS_BLOCKS 512
-
#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ed46e8df2e42..170fff5aeff6 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -165,7 +165,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
struct llun_info *lli = arg;
u64 ctxid = DECODE_CTXID(rctxid);
int rc;
- pid_t pid = current->tgid, ctxpid = 0;
+ pid_t pid = task_tgid_nr(current), ctxpid = 0;
if (ctx_ctrl & CTX_CTRL_FILE) {
lli = NULL;
@@ -173,7 +173,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
}
if (ctx_ctrl & CTX_CTRL_CLONE)
- pid = current->parent->tgid;
+ pid = task_ppid_nr(current);
if (likely(ctxid < MAX_CONTEXT)) {
while (true) {
@@ -824,7 +824,7 @@ static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->pid = current->tgid; /* tgid = pid */
+ ctxi->pid = task_tgid_nr(current); /* tgid = pid */
ctxi->ctx = ctx;
ctxi->cfg = cfg;
ctxi->file = file;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 703bf1e9a64a..5deef57a7834 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -428,12 +428,14 @@ static int write_same16(struct scsi_device *sdev,
u8 *sense_buf = NULL;
int rc = 0;
int result = 0;
- int ws_limit = SISLITE_MAX_WS_BLOCKS;
u64 offset = lba;
int left = nblks;
- u32 to = sdev->request_queue->rq_timeout;
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
+ const u32 s = ilog2(sdev->sector_size) - 9;
+ const u32 to = sdev->request_queue->rq_timeout;
+ const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
+ REQ_OP_WRITE_SAME) >> s;
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 0962fd544401..fd22dc6ab5d9 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1085,11 +1085,11 @@ static void alua_rescan(struct scsi_device *sdev)
static int alua_bus_attach(struct scsi_device *sdev)
{
struct alua_dh_data *h;
- int err, ret = -EINVAL;
+ int err;
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
spin_lock_init(&h->pg_lock);
rcu_assign_pointer(h->pg, NULL);
h->init_error = SCSI_DH_OK;
@@ -1098,16 +1098,14 @@ static int alua_bus_attach(struct scsi_device *sdev)
mutex_init(&h->init_mutex);
err = alua_initialize(sdev, h);
- if (err == SCSI_DH_NOMEM)
- ret = -ENOMEM;
if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
goto failed;
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
failed:
kfree(h);
- return ret;
+ return err;
}
/*
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 8654e940e1a8..6a2792f3a37e 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -490,7 +490,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
h->lun_state = CLARIION_LUN_UNINITIALIZED;
h->default_sp = CLARIION_UNBOUND_LU;
h->current_sp = CLARIION_UNBOUND_LU;
@@ -510,11 +510,11 @@ static int clariion_bus_attach(struct scsi_device *sdev)
h->default_sp + 'A');
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
failed:
kfree(h);
- return -EINVAL;
+ return err;
}
static void clariion_bus_detach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 62d314e07d11..e65a0ebb4b54 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -218,24 +218,28 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
h->path_state = HP_SW_PATH_UNINITIALIZED;
h->retries = HP_SW_RETRIES;
h->sdev = sdev;
ret = hp_sw_tur(sdev, h);
- if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
+ if (ret != SCSI_DH_OK)
goto failed;
+ if (h->path_state == HP_SW_PATH_UNINITIALIZED) {
+ ret = SCSI_DH_NOSYS;
+ goto failed;
+ }
sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
"active":"passive");
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
failed:
kfree(h);
- return -EINVAL;
+ return ret;
}
static void hp_sw_bus_detach( struct scsi_device *sdev )
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2ceff585f189..7af31a1247ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -729,7 +729,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
@@ -755,7 +755,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
lun_state[(int)h->lun_state]);
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
clean_ctlr:
spin_lock(&list_lock);
@@ -764,7 +764,7 @@ clean_ctlr:
failed:
kfree(h);
- return -EINVAL;
+ return err;
}
static void rdac_bus_detach( struct scsi_device *sdev )
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 5cc09dce4d25..f46b312d04bc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -155,7 +155,7 @@ static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
-
+static void fcoe_vport_remove(struct fc_lport *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
.set_fcoe_ctlr_mode = fcoe_ctlr_mode,
@@ -501,11 +501,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
- rtnl_lock();
- if (!fcoe->removed)
- fcoe_interface_remove(fcoe);
- rtnl_unlock();
-
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
@@ -1014,6 +1009,8 @@ skip_oem:
* fcoe_if_destroy() - Tear down a SW FCoE instance
* @lport: The local port to be destroyed
*
+ * Locking: Must be called with the RTNL mutex held.
+ *
*/
static void fcoe_if_destroy(struct fc_lport *lport)
{
@@ -1035,14 +1032,12 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
- rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
dev_uc_del(netdev, port->data_src_addr);
if (lport->vport)
synchronize_net();
else
fcoe_interface_remove(fcoe);
- rtnl_unlock();
/* Free queued packets for the per-CPU receive threads */
fcoe_percpu_clean(lport);
@@ -1903,7 +1898,14 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
port = lport_priv(ctlr->lp);
- queue_work(fcoe_wq, &port->destroy_work);
+ fcoe_vport_remove(lport);
+ mutex_lock(&fcoe_config_mutex);
+ fcoe_if_destroy(lport);
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
+ fcoe_interface_cleanup(fcoe);
+ mutex_unlock(&fcoe_config_mutex);
+ fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr));
goto out;
break;
case NETDEV_FEAT_CHANGE:
@@ -2108,30 +2110,10 @@ static void fcoe_destroy_work(struct work_struct *work)
struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
struct fcoe_interface *fcoe;
- struct Scsi_Host *shost;
- struct fc_host_attrs *fc_host;
- unsigned long flags;
- struct fc_vport *vport;
- struct fc_vport *next_vport;
port = container_of(work, struct fcoe_port, destroy_work);
- shost = port->lport->host;
- fc_host = shost_to_fc_host(shost);
-
- /* Loop through all the vports and mark them for deletion */
- spin_lock_irqsave(shost->host_lock, flags);
- list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
- if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
- continue;
- } else {
- vport->flags |= FC_VPORT_DELETING;
- queue_work(fc_host_work_q(shost),
- &vport->vport_delete_work);
- }
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
- flush_workqueue(fc_host_work_q(shost));
+ fcoe_vport_remove(port->lport);
mutex_lock(&fcoe_config_mutex);
@@ -2139,7 +2121,11 @@ static void fcoe_destroy_work(struct work_struct *work)
ctlr = fcoe_to_ctlr(fcoe);
cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ rtnl_lock();
fcoe_if_destroy(port->lport);
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
+ rtnl_unlock();
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
@@ -2254,6 +2240,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
rc = -EIO;
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
rtnl_unlock();
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
@@ -2738,13 +2726,46 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
fcoe_if_destroy(vn_port);
+ rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
}
/**
+ * fcoe_vport_remove() - remove attached vports
+ * @lport: lport for which the vports should be removed
+ */
+static void fcoe_vport_remove(struct fc_lport *lport)
+{
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
+
+ shost = lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+}
+
+/**
* fcoe_vport_disable() - change vport state
* @vport: vport to bring online/offline
* @disable: should the vport be disabled?
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 1ba5f51713a3..f4909cd206d3 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -32,13 +32,13 @@ MODULE_AUTHOR("Open-FCoE.org");
MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
MODULE_LICENSE("GPL v2");
-static int fcoe_transport_create(const char *, struct kernel_param *);
-static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_create(const char *, const struct kernel_param *);
+static int fcoe_transport_destroy(const char *, const struct kernel_param *);
static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
-static int fcoe_transport_enable(const char *, struct kernel_param *);
-static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int fcoe_transport_enable(const char *, const struct kernel_param *);
+static int fcoe_transport_disable(const char *, const struct kernel_param *);
static int libfcoe_device_notification(struct notifier_block *notifier,
ulong event, void *ptr);
@@ -867,7 +867,8 @@ EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
*
* Returns: 0 for success
*/
-static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_create(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
@@ -932,7 +933,8 @@ out_nodev:
*
* Returns: 0 for success
*/
-static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_destroy(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
@@ -976,7 +978,8 @@ out_nodev:
*
* Returns: 0 for success
*/
-static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_disable(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
@@ -1010,7 +1013,8 @@ out_nodev:
*
* Returns: 0 for success
*/
-static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_enable(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 15692ea05ced..83357b0367d8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -29,7 +29,7 @@
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
#define HISI_SAS_QUEUE_SLOTS 512
-#define HISI_SAS_MAX_ITCT_ENTRIES 2048
+#define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
@@ -96,6 +96,7 @@ struct hisi_sas_hw_error {
int shift;
const char *msg;
int reg;
+ const struct hisi_sas_hw_error *sub;
};
struct hisi_sas_phy {
@@ -197,7 +198,7 @@ struct hisi_sas_hw {
int (*slot_complete)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
void (*phys_init)(struct hisi_hba *hisi_hba);
- void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
void (*get_events)(struct hisi_hba *hisi_hba, int phy_no);
@@ -341,7 +342,11 @@ struct hisi_sas_initial_fis {
};
struct hisi_sas_breakpoint {
- u8 data[128]; /*io128 byte*/
+ u8 data[128];
+};
+
+struct hisi_sas_sata_breakpoint {
+ struct hisi_sas_breakpoint tag[32];
};
struct hisi_sas_sge {
@@ -419,4 +424,6 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
+extern void hisi_sas_rst_work_handler(struct work_struct *work);
+extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 37c838be4757..61a85ff8e459 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -185,13 +185,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
+ if (!task->lldd_task)
+ return;
+
+ task->lldd_task = NULL;
+
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
dma_unmap_sg(dev, task->scatter, slot->n_elem,
task->data_dir);
- task->lldd_task = NULL;
-
if (sas_dev)
atomic64_dec(&sas_dev->running_req);
}
@@ -199,8 +202,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (slot->buf)
dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
-
list_del_init(&slot->entry);
+ slot->buf = NULL;
slot->task = NULL;
slot->port = NULL;
hisi_sas_slot_index_free(hisi_hba, slot->idx);
@@ -401,7 +404,9 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
goto err_out_buf;
}
+ spin_lock_irqsave(&hisi_hba->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -505,9 +510,10 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = NULL;
+ unsigned long flags;
int i;
- spin_lock(&hisi_hba->lock);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
int queue = i % hisi_hba->queue_count;
@@ -524,7 +530,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
break;
}
}
- spin_unlock(&hisi_hba->lock);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return sas_dev;
}
@@ -761,7 +767,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_LINK_RESET:
hisi_hba->hw->phy_disable(hisi_hba, phy_no);
msleep(100);
- hisi_hba->hw->phy_enable(hisi_hba, phy_no);
+ hisi_hba->hw->phy_start(hisi_hba, phy_no);
break;
case PHY_FUNC_DISABLE:
@@ -1045,7 +1051,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
{
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
u32 old_state, state;
@@ -1073,7 +1078,6 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
hisi_sas_release_tasks(hisi_hba);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
- sas_ha->notify_ha_event(sas_ha, HAE_RESET);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
/* Init and wait for PHYs to come up and all libsas event finished. */
@@ -1159,7 +1163,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED) {
+ if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1387,8 +1391,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
if (rc)
goto err_out_buf;
-
+ spin_lock_irqsave(&hisi_hba->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1469,6 +1474,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (slot)
slot->task = NULL;
dev_err(dev, "internal task abort: timeout.\n");
+ goto exit;
}
}
@@ -1540,6 +1546,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+ tasklet_kill(&cq->tasklet);
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
@@ -1608,7 +1625,7 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
memset(hisi_hba->breakpoint, 0, s);
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
memset(hisi_hba->sata_breakpoint, 0, s);
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
@@ -1701,7 +1718,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->initial_fis)
goto err_out;
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
@@ -1766,7 +1783,7 @@ void hisi_sas_free(struct hisi_hba *hisi_hba)
hisi_hba->initial_fis,
hisi_hba->initial_fis_dma);
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
if (hisi_hba->sata_breakpoint)
dma_free_coherent(dev, s,
hisi_hba->sata_breakpoint,
@@ -1777,13 +1794,14 @@ void hisi_sas_free(struct hisi_hba *hisi_hba)
}
EXPORT_SYMBOL_GPL(hisi_sas_free);
-static void hisi_sas_rst_work_handler(struct work_struct *work)
+void hisi_sas_rst_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, rst_work);
hisi_sas_controller_reset(hisi_hba);
}
+EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 9385554e43a6..dc6eca8d6afd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1857,7 +1857,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.start_delivery = start_delivery_v1_hw,
.slot_complete = slot_complete_v1_hw,
.phys_init = phys_init_v1_hw,
- .phy_enable = enable_phy_v1_hw,
+ .phy_start = start_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
.phy_set_linkrate = phy_set_linkrate_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b1f097dabd01..d02c2a791981 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -406,80 +406,70 @@ static const struct hisi_sas_hw_error one_bit_ecc_errors[] = {
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
.msk = HGC_DQE_ECC_1B_ADDR_MSK,
.shift = HGC_DQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_dqe_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
.msk = HGC_IOST_ECC_1B_ADDR_MSK,
.shift = HGC_IOST_ECC_1B_ADDR_OFF,
- .msg = "hgc_iost_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_iost_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
.msk = HGC_ITCT_ECC_1B_ADDR_MSK,
.shift = HGC_ITCT_ECC_1B_ADDR_OFF,
- .msg = "hgc_itct_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_itct_acc1b_intr found: am address is 0x%08X\n",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_iostl_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_itctl_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
.msk = HGC_CQE_ECC_1B_ADDR_MSK,
.shift = HGC_CQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_cqe_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem0_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem1_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem2_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem3_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS15,
},
};
@@ -489,80 +479,70 @@ static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
.msk = HGC_DQE_ECC_MB_ADDR_MSK,
.shift = HGC_DQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_dqe_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
.msk = HGC_IOST_ECC_MB_ADDR_MSK,
.shift = HGC_IOST_ECC_MB_ADDR_OFF,
- .msg = "hgc_iost_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_iost_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
- .msg = "hgc_itct_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_itct_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_iostl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_itctl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
.msk = HGC_CQE_ECC_MB_ADDR_MSK,
.shift = HGC_CQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_cqe_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem0_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem1_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem2_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem3_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS15,
},
};
@@ -843,8 +823,9 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
struct hisi_sas_device *sas_dev = NULL;
int i, sata_dev = dev_is_sata(device);
int sata_idx = -1;
+ unsigned long flags;
- spin_lock(&hisi_hba->lock);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
if (sata_dev)
if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
@@ -874,7 +855,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
}
out:
- spin_unlock(&hisi_hba->lock);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return sas_dev;
}
@@ -2376,7 +2357,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
if (unlikely(aborted)) {
ts->stat = SAS_ABORTED_TASK;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -1;
}
@@ -2951,25 +2934,58 @@ static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-#define AXI_ERR_NR 8
-static const char axi_err_info[AXI_ERR_NR][32] = {
- "IOST_AXI_W_ERR",
- "IOST_AXI_R_ERR",
- "ITCT_AXI_W_ERR",
- "ITCT_AXI_R_ERR",
- "SATA_AXI_W_ERR",
- "SATA_AXI_R_ERR",
- "DQE_AXI_R_ERR",
- "CQE_AXI_W_ERR"
+static const struct hisi_sas_hw_error axi_error[] = {
+ { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
+ { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
+ { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
+ { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
+ { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
+ { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
+ { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
+ { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
+ {},
+};
+
+static const struct hisi_sas_hw_error fifo_error[] = {
+ { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
+ { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
+ { .msk = BIT(10), .msg = "GETDQE_FIFO" },
+ { .msk = BIT(11), .msg = "CMDP_FIFO" },
+ { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
+ {},
};
-#define FIFO_ERR_NR 5
-static const char fifo_err_info[FIFO_ERR_NR][32] = {
- "CQE_WINFO_FIFO",
- "CQE_MSG_FIFIO",
- "GETDQE_FIFO",
- "CMDP_FIFO",
- "AWTCTRL_FIFO"
+static const struct hisi_sas_hw_error fatal_axi_errors[] = {
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
+ .msg = "write pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
+ .msg = "iptt no match slot",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
+ .msg = "read pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = axi_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = fifo_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
+ .msg = "LM add/fetch list",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
+ .msg = "SAS_HGC_ABT fetch LM list",
+ },
};
static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
@@ -2977,98 +2993,47 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
struct hisi_hba *hisi_hba = p;
u32 irq_value, irq_msk, err_value;
struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *axi_error;
+ int i;
irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
- if (irq_value) {
- if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
- dev_warn(dev, "write pointer and depth error (0x%x) \
- found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 <<
- ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
- dev_warn(dev, "iptt no match slot error (0x%x) found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
- if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) {
- dev_warn(dev, "read pointer and depth error (0x%x) \
- found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
- int i;
-
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_AXI_OFF);
- err_value = hisi_sas_read32(hisi_hba,
- HGC_AXI_FIFO_ERR_INFO);
-
- for (i = 0; i < AXI_ERR_NR; i++) {
- if (err_value & BIT(i)) {
- dev_warn(dev, "%s (0x%x) found!\n",
- axi_err_info[i], irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
- }
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
- int i;
-
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_FIFO_OFF);
- err_value = hisi_sas_read32(hisi_hba,
- HGC_AXI_FIFO_ERR_INFO);
+ for (i = 0; i < ARRAY_SIZE(fatal_axi_errors); i++) {
+ axi_error = &fatal_axi_errors[i];
+ if (!(irq_value & axi_error->irq_msk))
+ continue;
- for (i = 0; i < FIFO_ERR_NR; i++) {
- if (err_value & BIT(AXI_ERR_NR + i)) {
- dev_warn(dev, "%s (0x%x) found!\n",
- fifo_err_info[i], irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << axi_error->shift);
+ if (axi_error->sub) {
+ const struct hisi_sas_hw_error *sub = axi_error->sub;
+
+ err_value = hisi_sas_read32(hisi_hba, axi_error->reg);
+ for (; sub->msk || sub->msg; sub++) {
+ if (!(err_value & sub->msk))
+ continue;
+ dev_warn(dev, "%s (0x%x) found!\n",
+ sub->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
-
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_LM_OFF);
- dev_warn(dev, "LM add/fetch list error (0x%x) found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_ABT_OFF);
- dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
- irq_value);
+ } else {
+ dev_warn(dev, "%s (0x%x) found!\n",
+ axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
+ }
- if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
- u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
- u32 dev_id = reg_val & ITCT_DEV_MSK;
- struct hisi_sas_device *sas_dev =
- &hisi_hba->devices[dev_id];
+ if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+ u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ u32 dev_id = reg_val & ITCT_DEV_MSK;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[dev_id];
- hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
- dev_dbg(dev, "clear ITCT ok\n");
- complete(sas_dev->completion);
- }
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ complete(sas_dev->completion);
}
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value);
@@ -3408,6 +3373,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v2_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -3458,7 +3424,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.start_delivery = start_delivery_v2_hw,
.slot_complete = slot_complete_v2_hw,
.phys_init = phys_init_v2_hw,
- .phy_enable = enable_phy_v2_hw,
+ .phy_start = start_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
.get_events = phy_get_events_v2_hw,
@@ -3491,16 +3457,11 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- int i;
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
- for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[i];
-
- tasklet_kill(&cq->tasklet);
- }
+ hisi_sas_kill_tasklets(hisi_hba);
return hisi_sas_remove(pdev);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 3f2f0baf2a5e..19b1f2ffec17 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -53,6 +53,11 @@
#define HGC_IOMB_PROC1_STATUS 0x104
#define CFG_1US_TIMER_TRSH 0xcc
#define CHNL_INT_STATUS 0x148
+#define HGC_AXI_FIFO_ERR_INFO 0x154
+#define AXI_ERR_INFO_OFF 0
+#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
+#define FIFO_ERR_INFO_OFF 8
+#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
@@ -135,6 +140,7 @@
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+#define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
@@ -154,6 +160,10 @@
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
@@ -171,8 +181,11 @@
#define DMA_RX_STATUS (PORT_BASE + 0x2e8)
#define DMA_RX_STATUS_BUSY_OFF 0
#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
+#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
+#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
+#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
+#define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
-#define MAX_ITCT_HW 4096 /* max the hw can support */
#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
#error Max ITCT exceeded
@@ -377,6 +390,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
@@ -388,7 +402,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
@@ -407,7 +421,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -422,6 +436,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
0xa03e8);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER,
0x7f7a120);
+ hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER,
+ 0x2a0a80);
}
for (i = 0; i < hisi_hba->queue_count; i++) {
/* Delivery queue */
@@ -575,35 +591,24 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
static void free_device_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
+ DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
- struct device *dev = hisi_hba->dev;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ sas_dev->completion = &completion;
+
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
/* clear the itct table*/
- reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
- reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+ reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- udelay(10);
- reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
- if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
- dev_dbg(dev, "got clear ITCT done interrupt\n");
-
- /* invalid the itct state*/
- memset(itct, 0, sizeof(struct hisi_sas_itct));
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- ENT_INT_SRC3_ITC_INT_MSK);
-
- /* clear the itct */
- hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
- dev_dbg(dev, "clear ITCT ok\n");
- }
+ wait_for_completion(sas_dev->completion);
+ memset(itct, 0, sizeof(struct hisi_sas_itct));
}
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@@ -755,10 +760,12 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
for (i = 0; i < hisi_hba->n_phy; i++)
- if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
- bitmap |= 1 << i;
+ if (phy_state & BIT(i))
+ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ bitmap |= BIT(i);
return bitmap;
}
@@ -988,20 +995,6 @@ err_out_req:
return rc;
}
-static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag)
-{
- struct ata_queued_cmd *qc = task->uldd_task;
-
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
- *tag = qc->tag;
- return 1;
- }
- }
- return 0;
-}
-
static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
@@ -1050,7 +1043,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw1 = cpu_to_le32(dw1);
/* dw2 */
- if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) {
+ if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
@@ -1276,6 +1269,25 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
return res;
}
+static const struct hisi_sas_hw_error port_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+};
+
static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -1301,10 +1313,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if ((irq_msk & (4 << (phy_no * 4))) &&
irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
- dev_name(dev), irq_value1);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT1, irq_value1);
@@ -1331,6 +1352,114 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static const struct hisi_sas_hw_error axi_error[] = {
+ { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
+ { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
+ { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
+ { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
+ { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
+ { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
+ { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
+ { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
+ {},
+};
+
+static const struct hisi_sas_hw_error fifo_error[] = {
+ { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
+ { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
+ { .msk = BIT(10), .msg = "GETDQE_FIFO" },
+ { .msk = BIT(11), .msg = "CMDP_FIFO" },
+ { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
+ {},
+};
+
+static const struct hisi_sas_hw_error fatal_axi_error[] = {
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
+ .msg = "write pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
+ .msg = "iptt no match slot",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
+ .msg = "read pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = axi_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = fifo_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
+ .msg = "LM add/fetch list",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
+ .msg = "SAS_HGC_ABT fetch LM list",
+ },
+};
+
+static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
+{
+ u32 irq_value, irq_msk;
+ struct hisi_hba *hisi_hba = p;
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
+
+ irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+
+ for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
+ const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
+
+ if (!(irq_value & error->irq_msk))
+ continue;
+
+ if (error->sub) {
+ const struct hisi_sas_hw_error *sub = error->sub;
+ u32 err_value = hisi_sas_read32(hisi_hba, error->reg);
+
+ for (; sub->msk || sub->msg; sub++) {
+ if (!(err_value & sub->msk))
+ continue;
+
+ dev_warn(dev, "%s error (0x%x) found!\n",
+ sub->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ } else {
+ dev_warn(dev, "%s error (0x%x) found!\n",
+ error->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+ u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ u32 dev_id = reg_val & ITCT_DEV_MSK;
+ struct hisi_sas_device *sas_dev =
+ &hisi_hba->devices[dev_id];
+
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ complete(sas_dev->completion);
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
static void
slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
@@ -1414,7 +1543,9 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
ts->stat = SAS_ABORTED_TASK;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -1;
}
@@ -1629,6 +1760,15 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
goto free_phy_irq;
}
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
+ fatal_axi_int_v3_hw, 0,
+ DRV_NAME " fatal", hisi_hba);
+ if (rc) {
+ dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
+ rc = -ENOENT;
+ goto free_chnl_interrupt;
+ }
+
/* Init tasklets for cq only */
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
@@ -1656,6 +1796,8 @@ free_cq_irqs:
free_irq(pci_irq_vector(pdev, k+16), cq);
}
+ free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+free_chnl_interrupt:
free_irq(pci_irq_vector(pdev, 2), hisi_hba);
free_phy_irq:
free_irq(pci_irq_vector(pdev, 1), hisi_hba);
@@ -1749,6 +1891,31 @@ static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
return hisi_sas_read32(hisi_hba, PHY_STATE);
}
+static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_phy *sphy = sas_phy->phy;
+ u32 reg_value;
+
+ /* loss dword sync */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
+ sphy->loss_of_dword_sync_count += reg_value;
+
+ /* phy reset problem */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
+ sphy->phy_reset_problem_count += reg_value;
+
+ /* invalid dword */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
+ sphy->invalid_dword_count += reg_value;
+
+ /* disparity err */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
+ sphy->running_disparity_error_count += reg_value;
+
+}
+
static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -1757,6 +1924,7 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -1793,7 +1961,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.start_delivery = start_delivery_v3_hw,
.slot_complete = slot_complete_v3_hw,
.phys_init = phys_init_v3_hw,
- .phy_enable = enable_phy_v3_hw,
+ .phy_start = start_phy_v3_hw,
.phy_disable = disable_phy_v3_hw,
.phy_hard_reset = phy_hard_reset_v3_hw,
.phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
@@ -1801,6 +1969,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.dereg_device = dereg_device_v3_hw,
.soft_reset = soft_reset_v3_hw,
.get_phys_state = get_phys_state_v3_hw,
+ .get_events = phy_get_events_v3_hw,
};
static struct Scsi_Host *
@@ -1817,6 +1986,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
}
hisi_hba = shost_priv(shost);
+ INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
hisi_hba->hw = &hisi_sas_v3_hw;
hisi_hba->pci_dev = pdev;
hisi_hba->dev = dev;
@@ -1960,11 +2130,11 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
free_irq(pci_irq_vector(pdev, 1), hisi_hba);
free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+ free_irq(pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
free_irq(pci_irq_vector(pdev, i+16), cq);
- tasklet_kill(&cq->tasklet);
}
pci_free_irq_vectors(pdev);
}
@@ -1980,6 +2150,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
+ hisi_sas_kill_tasklets(hisi_hba);
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4ed3d26ffdde..287e5eb0723f 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.20-0"
+#define HPSA_DRIVER_VERSION "3.4.20-125"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -787,7 +787,12 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
}
offload_enabled = hdev->offload_enabled;
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "%d\n", offload_enabled);
+
+ if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
+ return snprintf(buf, 20, "%d\n", offload_enabled);
+ else
+ return snprintf(buf, 40, "%s\n",
+ "Not applicable for a controller");
}
#define MAX_PATHS 8
@@ -1270,7 +1275,7 @@ static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
dev->model,
label,
dev->offload_config ? '+' : '-',
- dev->offload_enabled ? '+' : '-',
+ dev->offload_to_be_enabled ? '+' : '-',
dev->expose_device);
}
@@ -1345,36 +1350,42 @@ lun_assigned:
(*nadded)++;
hpsa_show_dev_msg(KERN_INFO, h, device,
device->expose_device ? "added" : "masked");
- device->offload_to_be_enabled = device->offload_enabled;
- device->offload_enabled = 0;
return 0;
}
-/* Update an entry in h->dev[] array. */
+/*
+ * Called during a scan operation.
+ *
+ * Update an entry in h->dev[] array.
+ */
static void hpsa_scsi_update_entry(struct ctlr_info *h,
int entry, struct hpsa_scsi_dev_t *new_entry)
{
- int offload_enabled;
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
+ /*
+ * ioacccel_handle may have changed for a dual domain disk
+ */
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+
/* Raid offload parameters changed. Careful about the ordering. */
- if (new_entry->offload_config && new_entry->offload_enabled) {
+ if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
/*
* if drive is newly offload_enabled, we want to copy the
* raid map data first. If previously offload_enabled and
* offload_config were set, raid map data had better be
- * the same as it was before. if raid map data is changed
+ * the same as it was before. If raid map data has changed
* then it had better be the case that
* h->dev[entry]->offload_enabled is currently 0.
*/
h->dev[entry]->raid_map = new_entry->raid_map;
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
}
- if (new_entry->hba_ioaccel_enabled) {
+ if (new_entry->offload_to_be_enabled) {
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
}
@@ -1385,17 +1396,18 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h,
/*
* We can turn off ioaccel offload now, but need to delay turning
- * it on until we can update h->dev[entry]->phys_disk[], but we
+ * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
* can't do that until all the devices are updated.
*/
- h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
- if (!new_entry->offload_enabled)
+ h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
+
+ /*
+ * turn ioaccel off immediately if told to do so.
+ */
+ if (!new_entry->offload_to_be_enabled)
h->dev[entry]->offload_enabled = 0;
- offload_enabled = h->dev[entry]->offload_enabled;
- h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
- h->dev[entry]->offload_enabled = offload_enabled;
}
/* Replace an entry from h->dev[] array. */
@@ -1421,9 +1433,8 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h,
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
+
hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
- new_entry->offload_to_be_enabled = new_entry->offload_enabled;
- new_entry->offload_enabled = 0;
}
/* Remove an entry from h->dev[] array. */
@@ -1513,11 +1524,22 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
return 1;
if (dev1->offload_config != dev2->offload_config)
return 1;
- if (dev1->offload_enabled != dev2->offload_enabled)
+ if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
return 1;
if (!is_logical_dev_addr_mode(dev1->scsi3addr))
if (dev1->queue_depth != dev2->queue_depth)
return 1;
+ /*
+ * This can happen for dual domain devices. An active
+ * path change causes the ioaccel handle to change
+ *
+ * for example note the handle differences between p0 and p1
+ * Device WWN ,WWN hash,Handle
+ * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
+ * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
+ */
+ if (dev1->ioaccel_handle != dev2->ioaccel_handle)
+ return 1;
return 0;
}
@@ -1727,6 +1749,11 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
* be 0, but we'll turn it off here just in case
*/
if (!logical_drive->phys_disk[i]) {
+ dev_warn(&h->pdev->dev,
+ "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
+ __func__,
+ h->scsi_host->host_no, logical_drive->bus,
+ logical_drive->target, logical_drive->lun);
logical_drive->offload_enabled = 0;
logical_drive->offload_to_be_enabled = 0;
logical_drive->queue_depth = 8;
@@ -1738,8 +1765,12 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
* way too high for partial stripe writes
*/
logical_drive->queue_depth = qdepth;
- else
- logical_drive->queue_depth = h->nr_cmds;
+ else {
+ if (logical_drive->external)
+ logical_drive->queue_depth = EXTERNAL_QD;
+ else
+ logical_drive->queue_depth = h->nr_cmds;
+ }
}
static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
@@ -1759,13 +1790,24 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
/*
* If offload is currently enabled, the RAID map and
* phys_disk[] assignment *better* not be changing
- * and since it isn't changing, we do not need to
- * update it.
+ * because we would be changing ioaccel phsy_disk[] pointers
+ * on a ioaccel volume processing I/O requests.
+ *
+ * If an ioaccel volume status changed, initially because it was
+ * re-configured and thus underwent a transformation, or
+ * a drive failed, we would have received a state change
+ * request and ioaccel should have been turned off. When the
+ * transformation completes, we get another state change
+ * request to turn ioaccel back on. In this case, we need
+ * to update the ioaccel information.
+ *
+ * Thus: If it is not currently enabled, but will be after
+ * the scan completes, make sure the ioaccel pointers
+ * are up to date.
*/
- if (dev[i]->offload_enabled)
- continue;
- hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
+ if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
+ hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
}
}
@@ -1823,11 +1865,13 @@ static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
break;
if (++waits > 20)
break;
+ msleep(1000);
+ }
+
+ if (waits > 20)
dev_warn(&h->pdev->dev,
"%s: removing device with %d outstanding commands!\n",
__func__, cmds);
- msleep(1000);
- }
}
static void hpsa_remove_device(struct ctlr_info *h,
@@ -1838,6 +1882,12 @@ static void hpsa_remove_device(struct ctlr_info *h,
if (!h->scsi_host)
return;
+ /*
+ * Allow for commands to drain
+ */
+ device->removed = 1;
+ hpsa_wait_for_outstanding_commands_for_dev(h, device);
+
if (is_logical_device(device)) { /* RAID */
sdev = scsi_device_lookup(h->scsi_host, device->bus,
device->target, device->lun);
@@ -1855,9 +1905,6 @@ static void hpsa_remove_device(struct ctlr_info *h,
}
} else { /* HBA */
- device->removed = 1;
- hpsa_wait_for_outstanding_commands_for_dev(h, device);
-
hpsa_remove_sas_device(device);
}
}
@@ -1965,8 +2012,13 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h,
}
hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
- /* Now that h->dev[]->phys_disk[] is coherent, we can enable
+ /*
+ * Now that h->dev[]->phys_disk[] is coherent, we can enable
* any logical drives that need it enabled.
+ *
+ * The raid map should be current by now.
+ *
+ * We are updating the device list used for I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i] == NULL)
@@ -2441,7 +2493,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/*
* Any RAID offload error results in retry which will use
- * the normal I/O path so the controller can handle whatever's
+ * the normal I/O path so the controller can handle whatever is
* wrong.
*/
if (is_logical_device(dev) &&
@@ -2913,6 +2965,57 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h,
}
}
+static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
+ u8 page, u8 *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+}
+
+static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
+ u8 *scsi3addr)
+{
+ u8 *buf;
+ u64 sa = 0;
+ int rc = 0;
+
+ buf = kzalloc(1024, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
+ buf, 1024);
+
+ if (rc)
+ goto out;
+
+ sa = get_unaligned_be64(buf+12);
+
+out:
+ kfree(buf);
+ return sa;
+}
+
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
u16 page, unsigned char *buf,
unsigned char bufsize)
@@ -2929,7 +3032,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
goto out;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3213,7 +3316,7 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
return -1;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3256,7 +3359,7 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3284,7 +3387,7 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
goto out;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3315,7 +3418,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
@@ -3348,6 +3451,9 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+ encl_dev->sas_address =
+ hpsa_get_enclosure_logical_identifier(h, scsi3addr);
+
if (encl_dev->target == -1 || encl_dev->lun == -1) {
rc = IO_OK;
goto out;
@@ -3388,7 +3494,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
c->Request.CDB[5] = 0;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc)
goto out;
@@ -3472,6 +3578,30 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
dev->sas_address = sa;
}
+static void hpsa_ext_ctrl_present(struct ctlr_info *h,
+ struct ReportExtendedLUNdata *physdev)
+{
+ u32 nphysicals;
+ int i;
+
+ if (h->discovery_polling)
+ return;
+
+ nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
+
+ for (i = 0; i < nphysicals; i++) {
+ if (physdev->LUN[i].device_type ==
+ BMIC_DEVICE_TYPE_CONTROLLER
+ && !is_hba_lunid(physdev->LUN[i].lunid)) {
+ dev_info(&h->pdev->dev,
+ "External controller present, activate discovery polling and disable rld caching\n");
+ hpsa_disable_rld_caching(h);
+ h->discovery_polling = 1;
+ break;
+ }
+ }
+}
+
/* Get a device id from inquiry page 0x83 */
static bool hpsa_vpd_page_supported(struct ctlr_info *h,
unsigned char scsi3addr[], u8 page)
@@ -3516,6 +3646,13 @@ exit_supported:
return true;
}
+/*
+ * Called during a scan operation.
+ * Sets ioaccel status on the new device list, not the existing device list
+ *
+ * The device list used during I/O will be updated later in
+ * adjust_hpsa_scsi_table.
+ */
static void hpsa_get_ioaccel_status(struct ctlr_info *h,
unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
{
@@ -3544,12 +3681,12 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_enabled =
+ this_device->offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_enabled = 0;
+ this_device->offload_to_be_enabled = 0;
}
- this_device->offload_to_be_enabled = this_device->offload_enabled;
+
out:
kfree(buf);
return;
@@ -3604,7 +3741,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
if (extended_response)
c->Request.CDB[1] = extended_response;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3739,7 +3876,7 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc) {
cmd_free(h, c);
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
@@ -4228,6 +4365,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
*/
ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
+ hpsa_ext_ctrl_present(h, physdev_list);
+
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
if (i >= HPSA_MAX_DEVICES) {
@@ -4258,6 +4397,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
int phys_dev_index = i - (raid_ctlr_position == 0);
bool skip_device = false;
+ memset(tmpdevice, 0, sizeof(*tmpdevice));
+
physical_device = i < nphysicals + (raid_ctlr_position == 0);
/* Figure out where the LUN ID info is coming from */
@@ -4279,7 +4420,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
continue;
}
- /* Get device type, vendor, model, device id */
+ /* Get device type, vendor, model, device id, raid_map */
rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR);
if (rc == -ENOMEM) {
@@ -4296,18 +4437,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
this_device = currentsd[ncurrent];
- /* Turn on discovery_polling if there are ext target devices.
- * Event-based change notification is unreliable for those.
- */
- if (!h->discovery_polling) {
- if (tmpdevice->external) {
- h->discovery_polling = 1;
- dev_info(&h->pdev->dev,
- "External target, activate discovery polling.\n");
- }
- }
-
-
*this_device = *tmpdevice;
this_device->physical_device = physical_device;
@@ -6496,6 +6625,17 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[0] = HPSA_INQUIRY;
c->Request.CDB[4] = size & 0xFF;
break;
+ case RECEIVE_DIAGNOSTIC:
+ c->Request.CDBLen = 6;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[1] = 1;
+ c->Request.CDB[2] = 1;
+ c->Request.CDB[3] = (size >> 8) & 0xFF;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
case HPSA_REPORT_LOG:
case HPSA_REPORT_PHYS:
/* Talking to controller so It's a physical command
@@ -8007,6 +8147,10 @@ static void controller_lockup_detected(struct ctlr_info *h)
spin_unlock_irqrestore(&h->lock, flags);
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
lockup_detected, h->heartbeat_sample_interval / HZ);
+ if (lockup_detected == 0xffff0000) {
+ dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
+ writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
+ }
pci_disable_device(h->pdev);
fail_all_outstanding_cmds(h);
}
@@ -8047,9 +8191,79 @@ static int detect_controller_lockup(struct ctlr_info *h)
return false;
}
-static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+/*
+ * Set ioaccel status for all ioaccel volumes.
+ *
+ * Called from monitor controller worker (hpsa_event_monitor_worker)
+ *
+ * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * transformation, so we will be turning off ioaccel for all volumes that
+ * make up the Array.
+ */
+static void hpsa_set_ioaccel_status(struct ctlr_info *h)
{
+ int rc;
int i;
+ u8 ioaccel_status;
+ unsigned char *buf;
+ struct hpsa_scsi_dev_t *device;
+
+ if (!h)
+ return;
+
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ /*
+ * Run through current device list used during I/O requests.
+ */
+ for (i = 0; i < h->ndevices; i++) {
+ device = h->dev[i];
+
+ if (!device)
+ continue;
+ if (!device->scsi3addr)
+ continue;
+ if (!hpsa_vpd_page_supported(h, device->scsi3addr,
+ HPSA_VPD_LV_IOACCEL_STATUS))
+ continue;
+
+ memset(buf, 0, 64);
+
+ rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
+ buf, 64);
+ if (rc != 0)
+ continue;
+
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+ device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (device->offload_config)
+ device->offload_to_be_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+
+ /*
+ * Immediately turn off ioaccel for any volume the
+ * controller tells us to. Some of the reasons could be:
+ * transformation - change to the LVs of an Array.
+ * degraded volume - component failure
+ *
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ *
+ */
+ if (!device->offload_to_be_enabled)
+ device->offload_enabled = 0;
+ }
+
+ kfree(buf);
+}
+
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
char *event_type;
if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
@@ -8067,10 +8281,7 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
event_type = "configuration change";
/* Stop sending new RAID offload reqs via the IO accelerator */
scsi_block_requests(h->scsi_host);
- for (i = 0; i < h->ndevices; i++) {
- h->dev[i]->offload_enabled = 0;
- h->dev[i]->offload_to_be_enabled = 0;
- }
+ hpsa_set_ioaccel_status(h);
hpsa_drain_accel_commands(h);
/* Set 'accelerator path config change' bit */
dev_warn(&h->pdev->dev,
@@ -8087,10 +8298,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
writel(h->events, &(h->cfgtable->clear_event_notify));
writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_clear_event_notify_ack(h);
-#if 0
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- hpsa_wait_for_mode_change_ack(h);
-#endif
}
return;
}
@@ -8241,7 +8448,6 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
hpsa_perform_rescan(h);
} else if (h->discovery_polling) {
- hpsa_disable_rld_caching(h);
if (hpsa_luns_changed(h)) {
dev_info(&h->pdev->dev,
"driver discovery polling rescan.\n");
@@ -8601,7 +8807,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8613,7 +8819,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_TODEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8623,7 +8829,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8684,6 +8890,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
+ hpsa_delete_sas_host(h);
+
/*
* Call before disabling interrupts.
* scsi_remove_host can trigger I/O operations especially
@@ -8718,8 +8926,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
- hpsa_delete_sas_host(h);
-
kfree(h); /* init_one 1 */
}
@@ -9207,9 +9413,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
struct sas_phy *phy = hpsa_sas_phy->phy;
sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (hpsa_sas_phy->added_to_port)
list_del(&hpsa_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(hpsa_sas_phy);
}
@@ -9367,7 +9573,7 @@ static int hpsa_add_sas_host(struct ctlr_info *h)
struct hpsa_sas_port *hpsa_sas_port;
struct hpsa_sas_phy *hpsa_sas_phy;
- parent_dev = &h->scsi_host->shost_gendev;
+ parent_dev = &h->scsi_host->shost_dev;
hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
if (!hpsa_sas_node)
@@ -9458,7 +9664,7 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
- *identifier = 0;
+ *identifier = rphy->identify.sas_address;
return 0;
}
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 078afe448115..21a726e2eec6 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -142,6 +142,7 @@
#define DOORBELL_CTLR_RESET 0x00000004l
#define DOORBELL_CTLR_RESET2 0x00000020l
#define DOORBELL_CLEAR_EVENTS 0x00000040l
+#define DOORBELL_GENERATE_CHKPT 0x00000080l
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
@@ -779,6 +780,8 @@ struct bmic_identify_physical_device {
u8 phys_bay_in_box; /* phys drv bay this drive resides */
__le32 rpm; /* Drive rotational speed in rpm */
u8 device_type; /* type of drive */
+#define BMIC_DEVICE_TYPE_CONTROLLER 0x07
+
u8 sata_version; /* only valid when drive_type is SATA */
__le64 big_total_block_count;
__le64 ris_starting_lba;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 2fd0ec651170..5da46052e179 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -2083,7 +2083,6 @@ int fc_lport_bsg_request(struct bsg_job *job)
{
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
- struct request *rsp = job->req->next_rq;
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport;
@@ -2092,8 +2091,6 @@ int fc_lport_bsg_request(struct bsg_job *job)
u32 did, tov;
bsg_reply->reply_payload_rcv_len = 0;
- if (rsp)
- scsi_req(rsp)->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index cd6f99c1ae7e..7e5d262e7a7d 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -24,10 +24,6 @@
#include "sas_dump.h"
-static const char *sas_hae_str[] = {
- [0] = "HAE_RESET",
-};
-
static const char *sas_porte_str[] = {
[0] = "PORTE_BYTES_DMAED",
[1] = "PORTE_BROADCAST_RCVD",
@@ -53,12 +49,6 @@ void sas_dprint_phye(int phyid, enum phy_event pe)
SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
}
-void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
-{
- SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
- sas_hae_str[he]);
-}
-
void sas_dump_port(struct asd_sas_port *port)
{
SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
index 800e4c69093f..6aaee6b0fcdb 100644
--- a/drivers/scsi/libsas/sas_dump.h
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -26,5 +26,4 @@
void sas_dprint_porte(int phyid, enum port_event pe);
void sas_dprint_phye(int phyid, enum phy_event pe);
-void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
void sas_dump_port(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index c0d0d979b76d..0bb9eefc08c8 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -37,7 +37,7 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
- list_add(&sw->drain_node, &ha->defer_q);
+ list_add_tail(&sw->drain_node, &ha->defer_q);
} else
rc = scsi_queue_work(ha->core.shost, &sw->work);
@@ -124,15 +124,7 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
mutex_unlock(&ha->disco_mutex);
}
-static int notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
-{
- BUG_ON(event >= HA_NUM_EVENTS);
-
- return sas_queue_event(event, &sas_ha->pending,
- &sas_ha->ha_events[event].work, sas_ha);
-}
-
-static int notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
struct sas_ha_struct *ha = phy->ha;
@@ -154,19 +146,7 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
int sas_init_events(struct sas_ha_struct *sas_ha)
{
- static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
- [HAE_RESET] = sas_hae_reset,
- };
-
- int i;
-
- for (i = 0; i < HA_NUM_EVENTS; i++) {
- INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
- sas_ha->ha_events[i].ha = sas_ha;
- }
-
- sas_ha->notify_ha_event = notify_ha_event;
- sas_ha->notify_port_event = notify_port_event;
+ sas_ha->notify_port_event = sas_notify_port_event;
sas_ha->notify_phy_event = sas_notify_phy_event;
return 0;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 681fcb837354..64fa6f53cb8b 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -107,17 +107,6 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
hashed[2] = r & 0xFF;
}
-
-/* ---------- HA events ---------- */
-
-void sas_hae_reset(struct work_struct *work)
-{
- struct sas_ha_event *ev = to_sas_ha_event(work);
- struct sas_ha_struct *ha = ev->ha;
-
- clear_bit(HAE_RESET, &ha->pending);
-}
-
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
int error = 0;
@@ -155,7 +144,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
-
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8eb3f96fe068..231302273257 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -23,6 +23,7 @@
#include <scsi/scsi_host.h>
#include <linux/ktime.h>
+#include <linux/workqueue.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
#define CONFIG_SCSI_LPFC_DEBUG_FS
@@ -653,6 +654,8 @@ struct lpfc_hba {
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
+ struct workqueue_struct *wq;
+
struct lpfc_sli sli;
uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c17677f494af..82f6e219ee34 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3134,7 +3134,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pring ? pring->txq_max : 0);
}
static DEVICE_ATTR(txq_hw, S_IRUGO,
@@ -3147,7 +3148,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pring ? pring->txcmplq_max : 0);
}
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
@@ -3246,6 +3248,11 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
continue;
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ if (ndlp->nrport)
+ nvme_fc_set_remoteport_devloss(ndlp->nrport->remoteport,
+ vport->cfg_devloss_tmo);
+#endif
}
spin_unlock_irq(shost->host_lock);
}
@@ -3375,7 +3382,7 @@ LPFC_ATTR_R(nvmet_mrq,
*/
LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
- "Define fc4 type to register with fabric.");
+ "Enable FC4 Protocol support - FCP / NVME");
/*
* lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
@@ -3391,7 +3398,7 @@ LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
* percentage will go to NVME.
*/
LPFC_ATTR_R(xri_split, 50, 10, 90,
- "Division of XRI resources between SCSI and NVME");
+ "Percentage of FCP XRI resources versus NVME");
/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index fe9e1c079c20..d89816222b23 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
}
}
- if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
+ if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
ret_val = -ENOMEM;
goto err_post_rxbufs_exit;
}
@@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job)
struct lpfc_iocbq *check_iocb, *next_iocb;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return -EIO;
/* if job's driver data is NULL, the command completed or is in the
* the process of completing. In this case, return status to request
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index d50c481ec41c..2bf5ad3b1512 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2227,7 +2227,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
kfree(phba->nvmeio_trc);
/* Allocate new trace buffer and initialize */
- phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) *
+ phba->nvmeio_trc = kzalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) *
sz), GFP_KERNEL);
if (!phba->nvmeio_trc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2235,8 +2235,6 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
"nvmeio_trc buffer\n");
return -ENOMEM;
}
- memset(phba->nvmeio_trc, 0,
- (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz));
atomic_set(&phba->nvmeio_trc_cnt, 0);
phba->nvmeio_trc_on = 0;
phba->nvmeio_trc_output_idx = 0;
@@ -5457,7 +5455,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc;
/* Allocate trace buffer and initialize */
- phba->nvmeio_trc = kmalloc(
+ phba->nvmeio_trc = kzalloc(
(sizeof(struct lpfc_debugfs_nvmeio_trc) *
phba->nvmeio_trc_size), GFP_KERNEL);
@@ -5467,9 +5465,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
"nvmeio_trc buffer\n");
goto nvmeio_off;
}
- memset(phba->nvmeio_trc, 0,
- (sizeof(struct lpfc_debugfs_nvmeio_trc) *
- phba->nvmeio_trc_size));
phba->nvmeio_trc_on = 1;
phba->nvmeio_trc_output_idx = 0;
phba->nvmeio_trc = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 0dd6c21433fe..39d5b146202e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5394,10 +5394,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
(len + pcmd), vport, ndlp);
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
&rdp_context->link_stat);
- /* Check if nport is logged, BZ190632 */
- if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
- goto lpfc_skip_descriptor;
-
len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
&rdp_context->link_stat, vport);
len += lpfc_rdp_res_oed_temp_desc(phba,
@@ -5418,7 +5414,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
rdp_context->page_a0, vport);
-lpfc_skip_descriptor:
rdp_res->length = cpu_to_be32(len - 8);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
@@ -5540,7 +5535,6 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
-
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2422 ELS RDP Request "
"dec len %d tag x%x port_id %d len %d\n",
@@ -5549,12 +5543,6 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
be32_to_cpu(rdp_req->nport_id_desc.nport_id),
be32_to_cpu(rdp_req->nport_id_desc.length));
- if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
- !phba->cfg_enable_SmartSAN) {
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_expl = LSEXP_PORT_LOGIN_REQ;
- goto error;
- }
if (sizeof(struct fc_rdp_nport_desc) !=
be32_to_cpu(rdp_req->rdp_des_length))
goto rjt_logerr;
@@ -7430,6 +7418,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
if ((phba->pport->load_flag & FC_UNLOADING))
return;
@@ -9310,6 +9300,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
+
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
list) {
@@ -9416,7 +9409,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
rxid, 1);
/* Check if TXQ queue needs to be serviced */
- if (!(list_empty(&pring->txq)))
+ if (pring && !list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 8d491084eb5d..2bafde2b7cfe 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -3324,7 +3324,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
- pring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ if (pring)
+ pring->flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if (mb->mbxStatus) {
@@ -4981,7 +4982,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
!(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+ phba->sli_rev != LPFC_SLI_REV4) {
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
@@ -5429,6 +5431,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
psli = &phba->sli;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
/* Error matching iocb on txq or txcmplq
* First check the txq.
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1db0a38683f4..2b145966c73f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3636,7 +3636,7 @@ struct lpfc_mbx_get_port_name {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
-#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
struct lpfc_mbx_wr_object {
struct mbox_header header;
union {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6a1e28ba9258..2b7ea7e53e12 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3216,6 +3216,9 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_destroy_vport_work_array(phba, vports);
lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
+
+ if (phba->wq)
+ flush_workqueue(phba->wq);
}
/**
@@ -4173,6 +4176,9 @@ void
lpfc_stop_port(struct lpfc_hba *phba)
{
phba->lpfc_stop_port(phba);
+
+ if (phba->wq)
+ flush_workqueue(phba->wq);
}
/**
@@ -6363,6 +6369,9 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
return error;
}
+ /* workqueue for deferred irq use */
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+
return 0;
}
@@ -6377,6 +6386,12 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
{
+ if (phba->wq) {
+ flush_workqueue(phba->wq);
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
+ }
+
/* Stop kernel worker thread */
kthread_stop(phba->worker_thread);
}
@@ -11397,6 +11412,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
+ /*
+ * Bring down the SLI Layer. This step disables all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA FCoE function.
+ */
+ lpfc_debugfs_terminate(vport);
+ lpfc_sli4_hba_unset(phba);
/* Perform ndlp cleanup on the physical port. The nvme and nvmet
* localports are destroyed after to cleanup all transport memory.
@@ -11405,14 +11427,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_nvmet_destroy_targetport(phba);
lpfc_nvme_destroy_localport(vport);
- /*
- * Bring down the SLI Layer. This step disables all interrupts,
- * clears the rings, discards all mailbox commands, and resets
- * the HBA FCoE function.
- */
- lpfc_debugfs_terminate(vport);
- lpfc_sli4_hba_unset(phba);
+ lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f3ad7cac355d..b6957d944b9a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
pring = lpfc_phba_elsring(phba);
/* In case of error recovery path, we might have a NULL pring here */
- if (!pring)
+ if (unlikely(!pring))
return;
/* Abort outstanding I/O on NPort <nlp_DID> */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 23bdb1ca106e..517ae570e507 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -416,6 +416,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -667,15 +670,17 @@ lpfc_nvme_ktime(struct lpfc_hba *phba,
struct lpfc_nvme_buf *lpfc_ncmd)
{
uint64_t seg1, seg2, seg3, seg4;
+ uint64_t segsum;
- if (!phba->ktime_on)
- return;
if (!lpfc_ncmd->ts_last_cmd ||
!lpfc_ncmd->ts_cmd_start ||
!lpfc_ncmd->ts_cmd_wqput ||
!lpfc_ncmd->ts_isr_cmpl ||
!lpfc_ncmd->ts_data_nvme)
return;
+
+ if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
+ return;
if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
return;
if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
@@ -695,15 +700,23 @@ lpfc_nvme_ktime(struct lpfc_hba *phba,
* cmpl is handled off to the NVME Layer.
*/
seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
- if (seg1 > 5000000) /* 5 ms - for sequential IOs */
- return;
+ if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
+ seg1 = 0;
/* Calculate times relative to start of IO */
seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
- seg3 = (lpfc_ncmd->ts_isr_cmpl -
- lpfc_ncmd->ts_cmd_start) - seg2;
- seg4 = (lpfc_ncmd->ts_data_nvme -
- lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
+ segsum = seg2;
+ seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+
phba->ktime_data_samples++;
phba->ktime_seg1_total += seg1;
if (seg1 < phba->ktime_seg1_min)
@@ -840,7 +853,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
} else {
lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
LPFC_IOCB_STATUS_MASK);
- lpfc_ncmd->result = wcqe->parameter;
+ lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
* IO error is when the adapter rejects it. All other
@@ -874,9 +887,20 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_ncmd->status, lpfc_ncmd->result,
wcqe->total_data_placed);
break;
+ case IOSTAT_LOCAL_REJECT:
+ /* Let fall through to set command final state. */
+ if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_IOERR,
+ "6032 Delay Aborted cmd %p "
+ "nvme cmd %p, xri x%x, "
+ "xb %d\n",
+ lpfc_ncmd, nCmd,
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ bf_get(lpfc_wcqe_c_xb, wcqe));
default:
out_err:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6072 NVME Completion Error: xri %x "
"status x%x result x%x placed x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -902,7 +926,7 @@ out_err:
* owns the dma address.
*/
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
lpfc_ncmd->ts_data_nvme = ktime_get_ns();
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
@@ -920,12 +944,18 @@ out_err:
#endif
freqpriv = nCmd->private;
freqpriv->nvme_buf = NULL;
- nCmd->done(nCmd);
+
+ /* NVME targets need completion held off until the abort exchange
+ * completes.
+ */
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+ nCmd->done(nCmd);
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
@@ -1119,12 +1149,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n",
- phba->cfg_nvme_seg_cnt,
+ phba->cfg_nvme_seg_cnt + 1,
lpfc_ncmd->seg_cnt);
lpfc_ncmd->seg_cnt = 0;
return 1;
@@ -1225,6 +1255,21 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING) {
+ ret = -ENODEV;
+ goto out_fail;
+ }
+
+ /* Validate pointers. */
+ if (!pnvme_lport || !pnvme_rport || !freqpriv) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
+ "6117 No Send:IO submit ptrs NULL, lport %p, "
+ "rport %p fcreq_priv %p\n",
+ pnvme_lport, pnvme_rport, freqpriv);
+ ret = -ENODEV;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
@@ -1283,9 +1328,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (start) {
lpfc_ncmd->ts_cmd_start = start;
lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
+ } else {
+ lpfc_ncmd->ts_cmd_start = 0;
}
#endif
@@ -1327,7 +1374,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_dec(&ndlp->cmd_pending);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
@@ -1336,7 +1383,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
+ if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
@@ -1387,7 +1434,7 @@ void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"req_tag x%x, status x%x, hwstatus x%x\n",
@@ -1938,14 +1985,13 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* pci bus space for an I/O. The DMA buffer includes the
* number of SGE's necessary to support the sg_tablesize.
*/
- lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
- GFP_KERNEL,
- &lpfc_ncmd->dma_handle);
+ lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_KERNEL,
+ &lpfc_ncmd->dma_handle);
if (!lpfc_ncmd->data) {
kfree(lpfc_ncmd);
break;
}
- memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
@@ -2042,9 +2088,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
@@ -2057,9 +2100,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock(&phba->nvme_buf_list_put_lock);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(
- phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
@@ -2096,7 +2136,6 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
iflag);
- lpfc_ncmd->nvmeCmd = NULL;
list_add_tail(&lpfc_ncmd->list,
&phba->sli4_hba.lpfc_abts_nvme_buf_list);
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
@@ -2296,6 +2335,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
+ struct lpfc_nodelist *prev_ndlp;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2332,7 +2372,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* new rport.
*/
rport = remote_port->private;
- if (ndlp->nrport == rport) {
+ if (ndlp->nrport) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebinding lport to "
@@ -2343,24 +2383,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
remote_port->port_role,
ndlp->nlp_type,
ndlp->nlp_DID);
- } else {
- /* New rport. */
- rport->remoteport = remote_port;
- rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
- ndlp->nrport = rport;
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
- "Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
- rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ prev_ndlp = rport->ndlp;
+
+ /* Sever the ndlp<->rport connection before dropping
+ * the ndlp ref from register.
+ */
+ ndlp->nrport = NULL;
+ rport->ndlp = NULL;
+ if (prev_ndlp)
+ lpfc_nlp_put(ndlp);
}
+
+ /* Clean bind the rport to the ndlp. */
+ rport->remoteport = remote_port;
+ rport->lport = lport;
+ rport->ndlp = lpfc_nlp_get(ndlp);
+ if (!rport->ndlp)
+ return -1;
+ ndlp->nrport = rport;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_DISC | LOG_NODE,
+ "6022 Binding new rport to "
+ "lport %p Rport WWNN 0x%llx, "
+ "Rport WWPN 0x%llx DID "
+ "x%06x Role x%x\n",
+ lport,
+ rpinfo.node_name, rpinfo.port_name,
+ rpinfo.port_id, rpinfo.port_role);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2454,18 +2503,18 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * NVME aborted xri. Aborted NVME IO commands are completed to the transport
+ * here.
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
- int rrq_empty = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
@@ -2481,25 +2530,24 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
spin_unlock(
&phba->sli4_hba.abts_nvme_buf_list_lock);
- rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp;
- if (ndlp) {
- lpfc_set_rrq_active(
- phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag,
- rxid, 1);
+ if (ndlp)
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
- }
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6311 XRI Aborted xri x%x tag x%x "
- "released\n",
- xri, lpfc_ncmd->cur_iocbq.iotag);
-
+ "6311 nvme_cmd %p xri x%x tag x%x "
+ "abort complete and xri released\n",
+ lpfc_ncmd->nvmeCmd, xri,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
- if (rrq_empty)
- lpfc_worker_wake_up(phba);
return;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 0b7c1a49e203..84cf1b9079f7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -76,7 +76,7 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
{
unsigned long iflag;
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6313 NVMET Defer ctx release xri x%x flg x%x\n",
ctxp->oxid, ctxp->flag);
@@ -221,9 +221,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
ctxp->ts_cmd_nvme = ktime_get_ns();
- ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
ctxp->ts_nvme_data = 0;
ctxp->ts_data_wqput = 0;
ctxp->ts_isr_data = 0;
@@ -289,9 +288,7 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
{
uint64_t seg1, seg2, seg3, seg4, seg5;
uint64_t seg6, seg7, seg8, seg9, seg10;
-
- if (!phba->ktime_on)
- return;
+ uint64_t segsum;
if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
!ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
@@ -300,6 +297,8 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
!ctxp->ts_isr_status || !ctxp->ts_status_nvme)
return;
+ if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
+ return;
if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
return;
if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
@@ -344,34 +343,66 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
* (Segments 1 thru 4) for READDATA_RSP
*/
seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
- seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
- seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
- seg1 - seg2;
- seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3;
- seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4;
+ segsum = seg1;
+
+ seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
+ if (segsum > seg2)
+ return;
+ seg2 -= segsum;
+ segsum += seg2;
+
+ seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+ segsum += seg4;
+
+ seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
+ if (segsum > seg5)
+ return;
+ seg5 -= segsum;
+ segsum += seg5;
+
/* For auto rsp commands seg6 thru seg10 will be 0 */
if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
- seg6 = (ctxp->ts_nvme_status -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4 - seg5;
- seg7 = (ctxp->ts_status_wqput -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 -
- seg4 - seg5 - seg6;
- seg8 = (ctxp->ts_isr_status -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4 -
- seg5 - seg6 - seg7;
- seg9 = (ctxp->ts_status_nvme -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4 -
- seg5 - seg6 - seg7 - seg8;
+ seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
+ if (segsum > seg6)
+ return;
+ seg6 -= segsum;
+ segsum += seg6;
+
+ seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
+ if (segsum > seg7)
+ return;
+ seg7 -= segsum;
+ segsum += seg7;
+
+ seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
+ if (segsum > seg8)
+ return;
+ seg8 -= segsum;
+ segsum += seg8;
+
+ seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
+ if (segsum > seg9)
+ return;
+ seg9 -= segsum;
+ segsum += seg9;
+
+ if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
+ return;
seg10 = (ctxp->ts_isr_status -
ctxp->ts_isr_cmd);
} else {
+ if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
+ return;
seg6 = 0;
seg7 = 0;
seg8 = 0;
@@ -463,7 +494,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_nvmet_rcv_ctx *ctxp;
- uint32_t status, result, op, start_clean;
+ uint32_t status, result, op, start_clean, logerr;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t id;
#endif
@@ -491,17 +522,21 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (tgtp)
atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ logerr = LOG_NVME_IOERR;
+
/* pick up SLI4 exhange busy condition */
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
ctxp->flag |= LPFC_NVMET_XBUSY;
+ logerr |= LOG_NVME_ABTS;
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
- ctxp->oxid, status, result);
} else {
ctxp->flag &= ~LPFC_NVMET_XBUSY;
}
+ lpfc_printf_log(phba, KERN_INFO, logerr,
+ "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
+ ctxp->oxid, status, result, ctxp->flag);
+
} else {
rsp->fcp_error = NVME_SC_SUCCESS;
if (op == NVMET_FCOP_RSP)
@@ -519,7 +554,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->entry_cnt++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_READDATA_RSP) {
ctxp->ts_isr_data =
cmdwqe->isr_timestamp;
@@ -553,7 +588,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
#endif
rsp->done(rsp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
+ if (ctxp->ts_cmd_nvme)
lpfc_nvmet_ktime(phba, ctxp);
#endif
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
@@ -563,7 +598,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
memset(((char *)cmdwqe) + start_clean, 0,
(sizeof(struct lpfc_iocbq) - start_clean));
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
ctxp->ts_data_nvme = ktime_get_ns();
}
@@ -597,6 +632,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
struct ulp_bde64 bpl;
int rc;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -678,8 +716,13 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct lpfc_iocbq *nvmewqeq;
int rc;
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
ctxp->ts_nvme_status = ktime_get_ns();
else
@@ -734,7 +777,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (!phba->ktime_on)
+ if (!ctxp->ts_cmd_nvme)
return 0;
if (rsp->op == NVMET_FCOP_RSP)
ctxp->ts_status_wqput = ktime_get_ns();
@@ -777,6 +820,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct lpfc_hba *phba = ctxp->phba;
unsigned long flags;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -787,6 +833,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
spin_lock_irqsave(&ctxp->ctxlock, flags);
+ ctxp->state = LPFC_NVMET_STE_ABORT;
/* Since iaab/iaar are NOT set, we need to check
* if the firmware is in process of aborting IO
@@ -1125,9 +1172,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
}
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
- lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
- NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_OPDONE_IN_ISR;
+ lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
@@ -1138,9 +1183,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
#endif
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6025 Cannot register NVME targetport "
- "x%x\n", error);
+ "6025 Cannot register NVME targetport x%x: "
+ "portnm %llx nodenm %llx segs %d qs %d\n",
+ error,
+ pinfo.port_name, pinfo.node_name,
+ lpfc_tgttemplate.max_sgl_segments,
+ lpfc_tgttemplate.max_hw_queues);
phba->targetport = NULL;
+ phba->nvmet_support = 0;
lpfc_nvmet_cleanup_io_context(phba);
@@ -1152,9 +1202,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6026 Registered NVME "
"targetport: %p, private %p "
- "portnm %llx nodenm %llx\n",
+ "portnm %llx nodenm %llx segs %d qs %d\n",
phba->targetport, tgtp,
- pinfo.port_name, pinfo.node_name);
+ pinfo.port_name, pinfo.node_name,
+ lpfc_tgttemplate.max_sgl_segments,
+ lpfc_tgttemplate.max_hw_queues);
atomic_set(&tgtp->rcv_ls_req_in, 0);
atomic_set(&tgtp->rcv_ls_req_out, 0);
@@ -1457,6 +1509,7 @@ static struct lpfc_nvmet_ctxbuf *
lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
struct lpfc_nvmet_ctx_info *current_infop)
{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
struct lpfc_nvmet_ctx_info *get_infop;
int i;
@@ -1504,6 +1557,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
get_infop = get_infop->nvmet_ctx_next_cpu;
}
+#endif
/* Nothing found, all contexts for the MRQ are in-flight */
return NULL;
}
@@ -1631,7 +1685,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (isr_timestamp) {
ctxp->ts_isr_cmd = isr_timestamp;
ctxp->ts_cmd_nvme = ktime_get_ns();
ctxp->ts_nvme_data = 0;
@@ -1642,6 +1696,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->ts_status_wqput = 0;
ctxp->ts_isr_status = 0;
ctxp->ts_status_nvme = 0;
+ } else {
+ ctxp->ts_cmd_nvme = 0;
}
#endif
@@ -2320,7 +2376,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6165 ABORT cmpl: xri x%x flg x%x (%d) "
"WCQE: %08x %08x %08x %08x\n",
ctxp->oxid, ctxp->flag, released,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4edb81073409..aecd2399005d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -80,8 +80,8 @@ static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *);
static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
-static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
- struct lpfc_eqe *eqe, uint32_t qidx);
+static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
+ struct lpfc_eqe *eqe, uint32_t qidx);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
@@ -2732,7 +2732,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*
* This function looks up the iocb_lookup table to get the command iocb
* corresponding to the given response iocb using the iotag of the
- * response iocb. This function is called with the hbalock held.
+ * response iocb. This function is called with the hbalock held
+ * for sli3 devices or the ring_lock for sli4 devices.
* This function returns the command iocb object if it finds the command
* iocb else returns NULL.
**/
@@ -2828,9 +2829,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflag;
/* Based on the iotag field, get the cmd IOCB from the txcmplq */
- spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
@@ -9396,10 +9403,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
* for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
+ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
piocb->hba_wqidx =
lpfc_sli4_scmd_to_wqidx_distr(phba,
piocb->context1);
+ piocb->hba_wqidx = piocb->hba_wqidx %
+ phba->cfg_fcp_io_channel;
+ }
return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
} else {
if (unlikely(!phba->sli4_hba.oas_wq))
@@ -10632,6 +10642,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
+ if (!pring) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ goto abort_iotag_exit;
+ }
+
/*
* If we're unloading, don't abort iocb on the ELS ring, but change
* the callback so that nothing happens when it finishes.
@@ -12500,6 +12518,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
unsigned long iflags;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return NULL;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -12507,19 +12527,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- /* Put the iocb back on the txcmplq */
- lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
-
if (unlikely(!cmdiocbq)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
- "cmdiocb: iotag (%d)\n",
- bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
+ wcqe->word0, wcqe->total_data_placed,
+ wcqe->parameter, wcqe->word3);
lpfc_sli_release_iocbq(phba, irspiocbq);
return NULL;
}
+ /* Put the iocb back on the txcmplq */
+ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
/* Fake the irspiocbq and copy necessary response information */
lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
@@ -13010,14 +13032,11 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* completion queue, and then return.
*
**/
-static int
+static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
struct lpfc_queue *speq)
{
struct lpfc_queue *cq = NULL, *childq;
- struct lpfc_cqe *cqe;
- bool workposted = false;
- int ecount = 0;
uint16_t cqid;
/* Get the reference to the corresponding CQ */
@@ -13034,48 +13053,84 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0365 Slow-path CQ identifier "
"(%d) does not exist\n", cqid);
- return 0;
+ return;
}
/* Save EQ associated with this CQ */
cq->assoc_qp = speq;
+ if (!queue_work(phba->wq, &cq->spwork))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0390 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
+}
+
+/**
+ * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static void
+lpfc_sli4_sp_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq =
+ container_of(work, struct lpfc_queue, spwork);
+ struct lpfc_hba *phba = cq->phba;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ccount = 0;
+
/* Process all the entries to the CQ */
switch (cq->type) {
case LPFC_MCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
- if (!(++ecount % cq->entry_repost))
+ if (!(++ccount % cq->entry_repost))
break;
cq->CQ_mbox++;
}
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- if ((cq->subtype == LPFC_FCP) ||
- (cq->subtype == LPFC_NVME))
+ if (cq->subtype == LPFC_FCP ||
+ cq->subtype == LPFC_NVME) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+#endif
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
cqe);
- else
+ } else {
workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
cqe);
- if (!(++ecount % cq->entry_repost))
+ }
+ if (!(++ccount % cq->entry_repost))
break;
}
/* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
+ if (ccount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ccount;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0370 Invalid completion queue type (%d)\n",
cq->type);
- return 0;
+ return;
}
/* Catch the no cq entry condition, log an error */
- if (unlikely(ecount == 0))
+ if (unlikely(ccount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0371 No entry from the CQ: identifier "
"(x%x), type (%d)\n", cq->queue_id, cq->type);
@@ -13086,8 +13141,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
-
- return ecount;
}
/**
@@ -13143,11 +13196,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
return;
}
-
- if (cq->assoc_qp)
- cmdiocbq->isr_timestamp =
- cq->assoc_qp->isr_timestamp;
-
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ cmdiocbq->isr_timestamp = cq->isr_timestamp;
+#endif
if (cmdiocbq->iocb_cmpl == NULL) {
if (cmdiocbq->wqe_cmpl) {
if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
@@ -13292,7 +13343,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
lpfc_nvmet_unsol_fcp_event(
phba, idx, dma_buf,
- cq->assoc_qp->isr_timestamp);
+ cq->isr_timestamp);
return false;
}
drop:
@@ -13395,15 +13446,12 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* queue and process all the entries on the completion queue, rearm the
* completion queue, and then return.
**/
-static int
+static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint32_t qidx)
{
struct lpfc_queue *cq = NULL;
- struct lpfc_cqe *cqe;
- bool workposted = false;
uint16_t cqid, id;
- int ecount = 0;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13411,7 +13459,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
"event: majorcode=x%x, minorcode=x%x\n",
bf_get_le32(lpfc_eqe_major_code, eqe),
bf_get_le32(lpfc_eqe_minor_code, eqe));
- return 0;
+ return;
}
/* Get the reference to the corresponding CQ */
@@ -13448,9 +13496,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Otherwise this is a Slow path event */
if (cq == NULL) {
- ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
- phba->sli4_hba.hba_eq[qidx]);
- return ecount;
+ lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
+ return;
}
process_cq:
@@ -13459,26 +13506,61 @@ process_cq:
"0368 Miss-matched fast-path completion "
"queue identifier: eqcqid=%d, fcpcqid=%d\n",
cqid, cq->queue_id);
- return 0;
+ return;
}
/* Save EQ associated with this CQ */
cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
+ if (!queue_work(phba->wq, &cq->irqwork))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0363 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
+}
+
+/**
+ * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static void
+lpfc_sli4_hba_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq =
+ container_of(work, struct lpfc_queue, irqwork);
+ struct lpfc_hba *phba = cq->phba;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ccount = 0;
+
/* Process all the entries to the CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+#endif
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
- if (!(++ecount % cq->entry_repost))
+ if (!(++ccount % cq->entry_repost))
break;
}
/* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
- cq->assoc_qp->EQ_cqe_cnt += ecount;
+ if (ccount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ccount;
+ cq->assoc_qp->EQ_cqe_cnt += ccount;
/* Catch the no cq entry condition */
- if (unlikely(ecount == 0))
+ if (unlikely(ccount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0369 No entry from fast-path completion "
"queue fcpcqid=%d\n", cq->queue_id);
@@ -13489,8 +13571,6 @@ process_cq:
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
-
- return ecount;
}
static void
@@ -13524,10 +13604,7 @@ static void
lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
{
struct lpfc_queue *cq;
- struct lpfc_cqe *cqe;
- bool workposted = false;
uint16_t cqid;
- int ecount = 0;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13562,30 +13639,12 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Save EQ associated with this CQ */
cq->assoc_qp = phba->sli4_hba.fof_eq;
- /* Process all the entries to the OAS CQ */
- while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
- if (!(++ecount % cq->entry_repost))
- break;
- }
-
- /* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
- cq->assoc_qp->EQ_cqe_cnt += ecount;
-
- /* Catch the no cq entry condition */
- if (unlikely(ecount == 0))
+ /* CQ work will be processed on CPU affinitized to this IRQ */
+ if (!queue_work(phba->wq, &cq->irqwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "9153 No entry from fast-path completion "
- "queue fcpcqid=%d\n", cq->queue_id);
-
- /* In any case, flash and re-arm the CQ */
- lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
-
- /* wake up worker thread if there are works to be done */
- if (workposted)
- lpfc_worker_wake_up(phba);
+ "0367 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
}
/**
@@ -13711,7 +13770,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- int ccount = 0;
int hba_eqidx;
/* Get the driver's phba structure from the dev_id */
@@ -13729,11 +13787,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
if (unlikely(!fpeq))
return IRQ_NONE;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
- fpeq->isr_timestamp = ktime_get_ns();
-#endif
-
if (lpfc_fcp_look_ahead) {
if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
lpfc_sli4_eq_clr_intr(fpeq);
@@ -13760,12 +13813,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- if (eqe == NULL)
- break;
-
- ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
- if (!(++ecount % fpeq->entry_repost) ||
- ccount > LPFC_MAX_ISR_CQE)
+ lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
+ if (!(++ecount % fpeq->entry_repost))
break;
fpeq->EQ_processed++;
}
@@ -13948,6 +13997,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->entry_size = entry_size;
queue->entry_count = entry_count;
queue->phba = phba;
+ INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
+ INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
/* entry_repost will be set during q creation */
@@ -17137,7 +17188,8 @@ exit:
if (pcmd && pcmd->virt)
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
- lpfc_sli_release_iocbq(phba, iocbq);
+ if (iocbq)
+ lpfc_sli_release_iocbq(phba, iocbq);
lpfc_in_buf_free(phba, &dmabuf->dbuf);
}
@@ -18691,6 +18743,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
uint32_t txq_cnt = 0;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return 0;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_for_each_entry(piocbq, &pring->txq, list) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 60200385fe00..13b8f4d4da34 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -158,7 +158,6 @@ struct lpfc_queue {
#define LPFC_MQ_REPOST 8
#define LPFC_CQ_REPOST 64
#define LPFC_RQ_REPOST 64
-#define LPFC_MAX_ISR_CQE 64
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
@@ -202,6 +201,9 @@ struct lpfc_queue {
#define RQ_buf_posted q_cnt_3
#define RQ_rcv_buf q_cnt_4
+ struct work_struct irqwork;
+ struct work_struct spwork;
+
uint64_t isr_timestamp;
struct lpfc_queue *assoc_qp;
union sli4_qe qe[1]; /* array to index entries (must be last) */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6aa192b3e4bf..e0181371af09 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.3"
+#define LPFC_DRIVER_VERSION "11.4.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index c714482bf4c5..c9d33b1268cb 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -313,6 +313,15 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
+ /* NPIV is not supported if HBA has NVME enabled */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "3189 Create VPORT failed: "
+ "NPIV is not supported on NVME\n");
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
vpi = lpfc_alloc_vpi(phba);
if (vpi == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6722c93a295..f5a36ccb8606 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.702.06.00-rc1"
-#define MEGASAS_RELDATE "June 21, 2017"
+#define MEGASAS_VERSION "07.703.05.00-rc1"
+#define MEGASAS_RELDATE "October 5, 2017"
/*
* Device IDs
@@ -57,6 +57,7 @@
#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052
#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053
#define PCI_DEVICE_ID_LSI_VENTURA 0x0014
+#define PCI_DEVICE_ID_LSI_CRUSADER 0x0015
#define PCI_DEVICE_ID_LSI_HARPOON 0x0016
#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
@@ -186,16 +187,19 @@
/*
* MFI command opcodes
*/
-#define MFI_CMD_INIT 0x00
-#define MFI_CMD_LD_READ 0x01
-#define MFI_CMD_LD_WRITE 0x02
-#define MFI_CMD_LD_SCSI_IO 0x03
-#define MFI_CMD_PD_SCSI_IO 0x04
-#define MFI_CMD_DCMD 0x05
-#define MFI_CMD_ABORT 0x06
-#define MFI_CMD_SMP 0x07
-#define MFI_CMD_STP 0x08
-#define MFI_CMD_INVALID 0xff
+enum MFI_CMD_OP {
+ MFI_CMD_INIT = 0x0,
+ MFI_CMD_LD_READ = 0x1,
+ MFI_CMD_LD_WRITE = 0x2,
+ MFI_CMD_LD_SCSI_IO = 0x3,
+ MFI_CMD_PD_SCSI_IO = 0x4,
+ MFI_CMD_DCMD = 0x5,
+ MFI_CMD_ABORT = 0x6,
+ MFI_CMD_SMP = 0x7,
+ MFI_CMD_STP = 0x8,
+ MFI_CMD_OP_COUNT,
+ MFI_CMD_INVALID = 0xff
+};
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000
@@ -1504,6 +1508,15 @@ enum FW_BOOT_CONTEXT {
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
+
+enum MR_ADAPTER_TYPE {
+ MFI_SERIES = 1,
+ THUNDERBOLT_SERIES = 2,
+ INVADER_SERIES = 3,
+ VENTURA_SERIES = 4,
+};
+
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -1617,7 +1630,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:19;
+ u32 reserved:18;
+ u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
u32 support_fp_rlbypass:1;
@@ -1645,7 +1659,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_fp_rlbypass:1;
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
- u32 reserved:19;
+ u32 support_64bit_mode:1;
+ u32 reserved:18;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2114,6 +2129,19 @@ struct megasas_instance {
u32 *crash_dump_buf;
dma_addr_t crash_dump_h;
+
+ struct MR_PD_LIST *pd_list_buf;
+ dma_addr_t pd_list_buf_h;
+
+ struct megasas_ctrl_info *ctrl_info_buf;
+ dma_addr_t ctrl_info_buf_h;
+
+ struct MR_LD_LIST *ld_list_buf;
+ dma_addr_t ld_list_buf_h;
+
+ struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
+ dma_addr_t ld_targetid_list_buf_h;
+
void *crash_buf[MAX_CRASH_DUMP_SIZE];
unsigned int fw_crash_buffer_size;
unsigned int fw_crash_state;
@@ -2210,8 +2238,6 @@ struct megasas_instance {
/* Ptr to hba specific information */
void *ctrl_context;
- u32 ctrl_context_pages;
- struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
@@ -2236,12 +2262,13 @@ struct megasas_instance {
bool dev_handle;
bool fw_sync_cache_support;
u32 mfi_frame_size;
- bool is_ventura;
bool msix_combined;
u16 max_raid_mapsize;
/* preffered count to send as LDIO irrspective of FP capable.*/
u8 r1_ldio_hint_default;
u32 nvme_page_size;
+ u8 adapter_type;
+ bool consistent_mask_64bit;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2488,4 +2515,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
u32 mega_mod64(u64 dividend, u32 divisor);
int megasas_alloc_fusion_context(struct megasas_instance *instance);
void megasas_free_fusion_context(struct megasas_instance *instance);
+void megasas_set_dma_settings(struct megasas_instance *instance,
+ struct megasas_dcmd_frame *dcmd,
+ dma_addr_t dma_addr, u32 dma_len);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a36e18156e49..cc54bdb5c712 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -161,6 +161,7 @@ static struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
/* VENTURA */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
@@ -205,6 +206,43 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
+static int
+megasas_set_dma_mask(struct megasas_instance *instance);
+static int
+megasas_alloc_ctrl_mem(struct megasas_instance *instance);
+static inline void
+megasas_free_ctrl_mem(struct megasas_instance *instance);
+static inline int
+megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
+static inline void
+megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
+static inline void
+megasas_init_ctrl_params(struct megasas_instance *instance);
+
+/**
+ * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
+ * @instance: Adapter soft state
+ * @dcmd: DCMD frame inside MFI command
+ * @dma_addr: DMA address of buffer to be passed to FW
+ * @dma_len: Length of DMA buffer to be passed to FW
+ * @return: void
+ */
+void megasas_set_dma_settings(struct megasas_instance *instance,
+ struct megasas_dcmd_frame *dcmd,
+ dma_addr_t dma_addr, u32 dma_len)
+{
+ if (instance->consistent_mask_64bit) {
+ dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
+ dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
+
+ } else {
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags);
+ }
+}
void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -2023,7 +2061,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
msleep(1000);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
@@ -2485,13 +2523,15 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
+
+ megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
+ sizeof(struct MR_CTRL_HB_HOST_MEM));
dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
instance->host->host_no);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
retval = megasas_issue_blocked_cmd(instance, cmd,
MEGASAS_ROUTINE_WAIT_TIME_VF);
else
@@ -2787,7 +2827,9 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
/*
* First wait for all commands to complete
*/
- if (instance->ctrl_context) {
+ if (instance->adapter_type == MFI_SERIES) {
+ ret = megasas_generic_reset(scmd);
+ } else {
struct megasas_cmd_fusion *cmd;
cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
if (cmd)
@@ -2795,8 +2837,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
ret = megasas_reset_fusion(scmd->device->host,
SCSIIO_TIMEOUT_OCR);
- } else
- ret = megasas_generic_reset(scmd);
+ }
return ret;
}
@@ -2813,7 +2854,7 @@ static int megasas_task_abort(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_task_abort_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
@@ -2835,7 +2876,7 @@ static int megasas_reset_target(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_reset_target_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
@@ -3280,6 +3321,9 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+
case MFI_CMD_DCMD:
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
@@ -3366,6 +3410,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
default:
dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
hdr->cmd);
+ megasas_complete_int_cmd(instance, cmd);
break;
}
}
@@ -3712,7 +3757,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(
MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
@@ -3730,7 +3775,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
else
@@ -3750,11 +3795,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_RESET_FLAGS,
&instance->reg_set->doorbell);
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
for (i = 0; i < (10 * 1000); i += 20) {
if (readl(
&instance->
@@ -3921,7 +3966,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
* max_sge_sz = 12 byte (sizeof megasas_sge64)
* Total 192 byte (3 MFI frame of 64 byte)
*/
- frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ frame_count = (instance->adapter_type == MFI_SERIES) ?
+ (15 + 1) : (3 + 1);
instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
/*
* Use DMA pool facility provided by PCI layer
@@ -3976,7 +4022,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
memset(cmd->frame, 0, instance->mfi_frame_size);
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
- if (!instance->ctrl_context && reset_devices)
+ if ((instance->adapter_type == MFI_SERIES) && reset_devices)
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
}
@@ -4030,9 +4076,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
int j;
u16 max_cmd;
struct megasas_cmd *cmd;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
max_cmd = instance->max_mfi_cmds;
/*
@@ -4096,7 +4140,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
inline int
dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
- if (!instance->ctrl_context)
+ if (instance->adapter_type == MFI_SERIES)
return KILL_ADAPTER;
else if (instance->unload ||
test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
@@ -4132,15 +4176,17 @@ megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
+ sizeof(struct MR_PD_INFO));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -4203,6 +4249,9 @@ megasas_get_pd_list(struct megasas_instance *instance)
return ret;
}
+ ci = instance->pd_list_buf;
+ ci_h = instance->pd_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4212,15 +4261,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4229,15 +4269,17 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
+ (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -4248,7 +4290,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
"failed/not supported by firmware\n");
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megaraid_sas_kill_hba(instance);
else
instance->pd_list_not_supported = 1;
@@ -4305,10 +4347,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
}
- pci_free_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4334,6 +4372,9 @@ megasas_get_ld_list(struct megasas_instance *instance)
dma_addr_t ci_h = 0;
u32 ld_count;
+ ci = instance->ld_list_buf;
+ ci_h = instance->ld_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4343,16 +4384,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_LIST),
- &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4361,15 +4392,17 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -4423,8 +4456,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4450,6 +4481,9 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dma_addr_t ci_h = 0;
u32 tgtid_count;
+ ci = instance->ld_targetid_list_buf;
+ ci_h = instance->ld_targetid_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4460,16 +4494,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
-
- if (!ci) {
- dev_warn(&instance->pdev->dev,
- "Failed to alloc mem for ld_list_query\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4480,15 +4504,17 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_TARGETID_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -4539,9 +4565,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4563,9 +4586,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
return;
instance->supportmax256vd =
- instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
/* Below is additional check to address future FW enhancement */
- if (instance->ctrl_info->max_lds > 64)
+ if (instance->ctrl_info_buf->max_lds > 64)
instance->supportmax256vd = 1;
instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
@@ -4623,10 +4646,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct megasas_ctrl_info *ci;
- struct megasas_ctrl_info *ctrl_info;
dma_addr_t ci_h = 0;
- ctrl_info = instance->ctrl_info;
+ ci = instance->ctrl_info_buf;
+ ci_h = instance->ctrl_info_buf_h;
cmd = megasas_get_cmd(instance);
@@ -4637,45 +4660,37 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct megasas_ctrl_info), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->mbox.b[0] = 1;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct megasas_ctrl_info));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
switch (ret) {
case DCMD_SUCCESS:
- memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
/* Save required controller information in
* CPU endianness format.
*/
- le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
- le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
+ le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
+ le32_to_cpus((u32 *)&ci->adapterOperations2);
+ le32_to_cpus((u32 *)&ci->adapterOperations3);
+ le16_to_cpus((u16 *)&ci->adapter_operations4);
/* Update the latest Ext VD info.
* From Init path, store current firmware details.
@@ -4684,21 +4699,21 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
*/
megasas_update_ext_vd_details(instance);
instance->use_seqnum_jbod_fp =
- ctrl_info->adapterOperations3.useSeqNumJbodFP;
+ ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
- ctrl_info->adapter_operations4.support_pd_map_target_id;
+ ci->adapter_operations4.support_pd_map_target_id;
/*Check whether controller is iMR or MR */
- instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
+ instance->is_imr = (ci->memory_size ? 0 : 1);
dev_info(&instance->pdev->dev,
"controller type\t: %s(%dMB)\n",
instance->is_imr ? "iMR" : "MR",
- le16_to_cpu(ctrl_info->memory_size));
+ le16_to_cpu(ci->memory_size));
instance->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ ci->properties.OnOffProperties.disableOnlineCtrlReset;
instance->secure_jbod_support =
- ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ ci->adapterOperations3.supportSecurityonJBOD;
dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
@@ -4726,9 +4741,6 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
}
- pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
- ci, ci_h);
-
megasas_return_cmd(instance, cmd);
@@ -4772,15 +4784,17 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
+ CRASH_DMA_BUF_SIZE);
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -5088,7 +5102,7 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
if (reset_devices || !fusion ||
- !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
+ !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
dev_info(&instance->pdev->dev,
"Jbod map is not supported %s %d\n",
__func__, __LINE__);
@@ -5167,7 +5181,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
reg_set = instance->reg_set;
- if (fusion)
+ if (instance->adapter_type != MFI_SERIES)
instance->instancet = &megasas_instance_template_fusion;
else {
switch (instance->pdev->device) {
@@ -5208,7 +5222,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
goto fail_ready_state;
}
- if (instance->is_ventura) {
+ megasas_init_ctrl_params(instance);
+
+ if (megasas_set_dma_mask(instance))
+ goto fail_ready_state;
+
+ if (megasas_alloc_ctrl_mem(instance))
+ goto fail_alloc_dma_buf;
+
+ if (megasas_alloc_ctrl_dma_buffers(instance))
+ goto fail_alloc_dma_buf;
+
+ fusion = instance->ctrl_context;
+
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_3 =
readl(&instance->reg_set->outbound_scratch_pad_3);
instance->max_raid_mapsize = ((scratch_pad_3 >>
@@ -5226,7 +5253,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
if (fusion) {
- if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
+ if (instance->adapter_type == THUNDERBOLT_SERIES) {
+ /* Thunderbolt Series*/
instance->msix_vectors = (scratch_pad_2
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
@@ -5301,11 +5329,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
- GFP_KERNEL);
- if (instance->ctrl_info == NULL)
- goto fail_init_adapter;
-
/*
* Below are default value for legacy Firmware.
* non-fusion based controllers
@@ -5316,7 +5339,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_4 =
readl(&instance->reg_set->outbound_scratch_pad_4);
if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
@@ -5352,7 +5375,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
/* stream detection initialization */
- if (instance->is_ventura && fusion) {
+ if (instance->adapter_type == VENTURA_SERIES) {
fusion->stream_detect_by_ld =
kzalloc(sizeof(struct LD_STREAM_DETECT *)
* MAX_LOGICAL_DRIVES_EXT,
@@ -5394,7 +5417,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* to calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
- ctrl_info = instance->ctrl_info;
+ ctrl_info = instance->ctrl_info_buf;
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
le16_to_cpu(ctrl_info->max_strips_per_io);
@@ -5505,9 +5528,10 @@ fail_setup_irqs:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
+fail_alloc_dma_buf:
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
fail_ready_state:
- kfree(instance->ctrl_info);
- instance->ctrl_info = NULL;
iounmap(instance->reg_set);
fail_ioremap:
@@ -5580,13 +5604,14 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+ megasas_set_dma_settings(instance, dcmd, el_info_h,
+ sizeof(struct megasas_evt_log_info));
if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
DCMD_SUCCESS) {
@@ -5711,7 +5736,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
@@ -5719,8 +5744,9 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->mbox.w[0] = cpu_to_le32(seq_num);
instance->last_seq_num = seq_num;
dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+ megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
+ sizeof(struct megasas_evt_detail));
if (instance->aen_cmd != NULL) {
megasas_return_cmd(instance, cmd);
@@ -5787,18 +5813,18 @@ megasas_get_target_prop(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len =
cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
- dcmd->sgl.sge32[0].phys_addr =
- cpu_to_le32(instance->tgt_prop_h);
- dcmd->sgl.sge32[0].length =
- cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
+ sizeof(struct MR_TARGET_PROPERTIES));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance,
cmd, MFI_IO_TIMEOUT_SECS);
else
@@ -5923,234 +5949,408 @@ static int megasas_io_attach(struct megasas_instance *instance)
return 0;
}
+/**
+ * megasas_set_dma_mask - Set DMA mask for supported controllers
+ *
+ * @instance: Adapter soft state
+ * Description:
+ *
+ * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ *
+ * For invader-
+ * By default, driver/FW will operate in 32bit DMA addresses
+ * for consistent DMA mapping but if 32 bit consistent
+ * DMA mask fails, driver will try with 64 bit consistent
+ * mask provided FW is true 64bit DMA capable
+ *
+ * For older controllers(Thunderbolt and MFI based adapters)-
+ * driver/FW will operate in 32 bit consistent DMA addresses.
+ */
static int
-megasas_set_dma_mask(struct pci_dev *pdev)
+megasas_set_dma_mask(struct megasas_instance *instance)
{
- /*
- * All our controllers are capable of performing 64-bit DMA
- */
+ u64 consistent_mask;
+ struct pci_dev *pdev;
+ u32 scratch_pad_2;
+
+ pdev = instance->pdev;
+ consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
+ DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+
if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+ (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
+ /*
+ * If 32 bit DMA mask fails, then try for 64 bit mask
+ * for FW capable of handling 64 bit DMA.
+ */
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+ goto fail_set_dma_mask;
+ else if (dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(64)))
goto fail_set_dma_mask;
}
- } else {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
- goto fail_set_dma_mask;
- }
- /*
- * Ensure that all data structures are allocated in 32-bit
- * memory.
- */
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- /* Try 32bit DMA mask and 32 bit Consistent dma mask */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- dev_info(&pdev->dev, "set 32bit DMA mask"
- "and 32 bit consistent mask\n");
- else
- goto fail_set_dma_mask;
- }
+ } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
+ instance->consistent_mask_64bit = false;
+ else
+ instance->consistent_mask_64bit = true;
+
+ dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
+ (instance->consistent_mask_64bit ? "64" : "32"));
return 0;
fail_set_dma_mask:
- return 1;
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ return -1;
+
}
-/**
- * megasas_probe_one - PCI hotplug entry point
- * @pdev: PCI device structure
- * @id: PCI ids of supported hotplugged adapter
+/*
+ * megasas_set_adapter_type - Set adapter type.
+ * Supported controllers can be divided in
+ * 4 categories- enum MR_ADAPTER_TYPE {
+ * MFI_SERIES = 1,
+ * THUNDERBOLT_SERIES = 2,
+ * INVADER_SERIES = 3,
+ * VENTURA_SERIES = 4,
+ * };
+ * @instance: Adapter soft state
+ * return: void
*/
-static int megasas_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static inline void megasas_set_adapter_type(struct megasas_instance *instance)
{
- int rval, pos;
- struct Scsi_Host *host;
- struct megasas_instance *instance;
- u16 control = 0;
- struct fusion_context *fusion = NULL;
-
- /* Reset MSI-X in the kdump kernel */
- if (reset_devices) {
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
- &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev,
- pos + PCI_MSIX_FLAGS,
- control &
- ~PCI_MSIX_FLAGS_ENABLE);
- }
+ if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
+ instance->adapter_type = MFI_SERIES;
+ } else {
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_VENTURA:
+ case PCI_DEVICE_ID_LSI_CRUSADER:
+ case PCI_DEVICE_ID_LSI_HARPOON:
+ case PCI_DEVICE_ID_LSI_TOMCAT:
+ case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
+ case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
+ instance->adapter_type = VENTURA_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ instance->adapter_type = THUNDERBOLT_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_INTRUDER:
+ case PCI_DEVICE_ID_LSI_INTRUDER_24:
+ case PCI_DEVICE_ID_LSI_CUTLASS_52:
+ case PCI_DEVICE_ID_LSI_CUTLASS_53:
+ case PCI_DEVICE_ID_LSI_FURY:
+ instance->adapter_type = INVADER_SERIES;
+ break;
+ default: /* For all other supported controllers */
+ instance->adapter_type = MFI_SERIES;
+ break;
}
}
+}
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device_mem(pdev);
+static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
+{
+ instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->consumer_h);
- if (rval) {
- return rval;
+ if (!instance->producer || !instance->consumer) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate memory for producer, consumer\n");
+ return -1;
}
- pci_set_master(pdev);
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ return 0;
+}
- if (megasas_set_dma_mask(pdev))
- goto fail_set_dma_mask;
+/**
+ * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
+ * structures which are not common across MFI
+ * adapters and fusion adapters.
+ * For MFI based adapters, allocate producer and
+ * consumer buffers. For fusion adapters, allocate
+ * memory for fusion context.
+ * @instance: Adapter soft state
+ * return: 0 for SUCCESS
+ */
+static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
+{
+ switch (instance->adapter_type) {
+ case MFI_SERIES:
+ if (megasas_alloc_mfi_ctrl_mem(instance))
+ return -ENOMEM;
+ break;
+ case VENTURA_SERIES:
+ case THUNDERBOLT_SERIES:
+ case INVADER_SERIES:
+ if (megasas_alloc_fusion_context(instance))
+ return -ENOMEM;
+ break;
+ }
- host = scsi_host_alloc(&megasas_template,
- sizeof(struct megasas_instance));
+ return 0;
+}
- if (!host) {
- dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
- goto fail_alloc_instance;
+/*
+ * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
+ * producer, consumer buffers for MFI adapters
+ *
+ * @instance - Adapter soft instance
+ *
+ */
+static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
+{
+ if (instance->adapter_type == MFI_SERIES) {
+ if (instance->producer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->consumer,
+ instance->consumer_h);
+ } else {
+ megasas_free_fusion_context(instance);
}
+}
- instance = (struct megasas_instance *)host->hostdata;
- memset(instance, 0, sizeof(*instance));
- atomic_set(&instance->fw_reset_no_pci_access, 0);
- instance->pdev = pdev;
+/**
+ * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
+ * driver load time
+ *
+ * @instance- Adapter soft instance
+ * @return- O for SUCCESS
+ */
+static inline
+int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
+{
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
- switch (instance->pdev->device) {
- case PCI_DEVICE_ID_LSI_VENTURA:
- case PCI_DEVICE_ID_LSI_HARPOON:
- case PCI_DEVICE_ID_LSI_TOMCAT:
- case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
- case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
- instance->is_ventura = true;
- case PCI_DEVICE_ID_LSI_FUSION:
- case PCI_DEVICE_ID_LSI_PLASMA:
- case PCI_DEVICE_ID_LSI_INVADER:
- case PCI_DEVICE_ID_LSI_FURY:
- case PCI_DEVICE_ID_LSI_INTRUDER:
- case PCI_DEVICE_ID_LSI_INTRUDER_24:
- case PCI_DEVICE_ID_LSI_CUTLASS_52:
- case PCI_DEVICE_ID_LSI_CUTLASS_53:
- {
- if (megasas_alloc_fusion_context(instance)) {
- megasas_free_fusion_context(instance);
- goto fail_alloc_dma_buf;
- }
- fusion = instance->ctrl_context;
+ instance->evt_detail =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h);
- if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
- fusion->adapter_type = THUNDERBOLT_SERIES;
- else if (instance->is_ventura)
- fusion->adapter_type = VENTURA_SERIES;
- else
- fusion->adapter_type = INVADER_SERIES;
- }
- break;
- default: /* For all other supported controllers */
-
- instance->producer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->consumer_h);
-
- if (!instance->producer || !instance->consumer) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
- "memory for producer, consumer\n");
- goto fail_alloc_dma_buf;
+ if (!instance->evt_detail) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate event detail buffer\n");
+ return -ENOMEM;
+ }
+
+ if (fusion) {
+ fusion->ioc_init_request =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ &fusion->ioc_init_request_phys,
+ GFP_KERNEL);
+
+ if (!fusion->ioc_init_request) {
+ dev_err(&pdev->dev,
+ "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
+ }
- *instance->producer = 0;
- *instance->consumer = 0;
- break;
+ instance->pd_list_buf =
+ pci_alloc_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ &instance->pd_list_buf_h);
+
+ if (!instance->pd_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
- /* Crash dump feature related initialisation*/
- instance->drv_buf_index = 0;
- instance->drv_buf_alloc = 0;
- instance->crash_dump_fw_support = 0;
- instance->crash_dump_app_support = 0;
- instance->fw_crash_state = UNAVAILABLE;
- spin_lock_init(&instance->crashdump_lock);
- instance->crash_dump_buf = NULL;
+ instance->ctrl_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_ctrl_info),
+ &instance->ctrl_info_buf_h);
- megasas_poll_wait_aen = 0;
- instance->flag_ieee = 0;
- instance->ev = NULL;
- instance->issuepend_done = 1;
- atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
- instance->is_imr = 0;
+ if (!instance->ctrl_info_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate controller info buffer\n");
+ return -ENOMEM;
+ }
- instance->evt_detail = pci_alloc_consistent(pdev,
- sizeof(struct
- megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->ld_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_LIST),
+ &instance->ld_list_buf_h);
- if (!instance->evt_detail) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
- "event detail structure\n");
- goto fail_alloc_dma_buf;
+ if (!instance->ld_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
+ return -ENOMEM;
+ }
+
+ instance->ld_targetid_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h);
+
+ if (!instance->ld_targetid_list_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate LD targetid list buffer\n");
+ return -ENOMEM;
}
if (!reset_devices) {
- instance->system_info_buf = pci_zalloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
- if (!instance->system_info_buf)
- dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
+ instance->system_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h);
+ instance->pd_info =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h);
+ instance->tgt_prop =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h);
+ instance->crash_dump_buf =
+ pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
- instance->pd_info = pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+ if (!instance->system_info_buf)
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate system info buffer\n");
if (!instance->pd_info)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
-
- instance->tgt_prop = pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate pd_info buffer\n");
if (!instance->tgt_prop)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate tgt_prop buffer\n");
- instance->crash_dump_buf = pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
if (!instance->crash_dump_buf)
- dev_err(&pdev->dev, "Can't allocate Firmware "
- "crash dump DMA buffer\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate crash dump buffer\n");
}
+ return 0;
+}
+
+/*
+ * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
+ * during driver load time
+ *
+ * @instance- Adapter soft instance
+ *
+ */
+static inline
+void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
+{
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (fusion && fusion->ioc_init_request)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ fusion->ioc_init_request,
+ fusion->ioc_init_request_phys);
+
+ if (instance->pd_list_buf)
+ pci_free_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ instance->pd_list_buf,
+ instance->pd_list_buf_h);
+
+ if (instance->ld_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ instance->ld_list_buf,
+ instance->ld_list_buf_h);
+
+ if (instance->ld_targetid_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ instance->ld_targetid_list_buf,
+ instance->ld_targetid_list_buf_h);
+
+ if (instance->ctrl_info_buf)
+ pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ instance->ctrl_info_buf,
+ instance->ctrl_info_buf_h);
+
+ if (instance->system_info_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ instance->system_info_buf,
+ instance->system_info_h);
+
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info, instance->pd_info_h);
+
+ if (instance->tgt_prop)
+ pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ instance->tgt_prop, instance->tgt_prop_h);
+
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+}
+
+/*
+ * megasas_init_ctrl_params - Initialize controller's instance
+ * parameters before FW init
+ * @instance - Adapter soft instance
+ * @return - void
+ */
+static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
+{
+ instance->fw_crash_state = UNAVAILABLE;
+
+ megasas_poll_wait_aen = 0;
+ instance->issuepend_done = 1;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+
/*
* Initialize locks and queues
*/
INIT_LIST_HEAD(&instance->cmd_pool);
INIT_LIST_HEAD(&instance->internal_reset_pending_q);
- atomic_set(&instance->fw_outstanding,0);
+ atomic_set(&instance->fw_outstanding, 0);
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
+ spin_lock_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->reset_mutex);
mutex_init(&instance->hba_mutex);
-
- /*
- * Initialize PCI related and misc parameters
- */
- instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
- instance->init_id = MEGASAS_DEFAULT_INIT_ID;
- instance->ctrl_info = NULL;
-
+ mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
instance->flag_ieee = 1;
megasas_dbg_lvl = 0;
@@ -6160,11 +6360,75 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
- } else
+ } else {
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+ }
+}
+
+/**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rval, pos;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->pdev = pdev;
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+
+ megasas_set_adapter_type(instance);
/*
* Initialize MFI Firmware
@@ -6240,37 +6504,16 @@ fail_io_attach:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
-fail_alloc_dma_buf:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
scsi_host_put(host);
fail_alloc_instance:
-fail_set_dma_mask:
pci_disable_device(pdev);
return -ENODEV;
@@ -6447,7 +6690,13 @@ megasas_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- if (megasas_set_dma_mask(pdev))
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+
+ if (megasas_set_dma_mask(instance))
goto fail_set_dma_mask;
/*
@@ -6456,12 +6705,6 @@ megasas_resume(struct pci_dev *pdev)
atomic_set(&instance->fw_outstanding, 0);
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
-
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
irq_flags = PCI_IRQ_MSIX;
@@ -6474,7 +6717,7 @@ megasas_resume(struct pci_dev *pdev)
if (rval < 0)
goto fail_reenable_msix;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
megasas_free_cmds(instance);
@@ -6521,30 +6764,13 @@ megasas_resume(struct pci_dev *pdev)
return 0;
fail_init_mfi:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
+fail_reenable_msix:
fail_set_dma_mask:
fail_ready_state:
-fail_reenable_msix:
pci_disable_device(pdev);
@@ -6647,7 +6873,7 @@ skip_firing_dcmds:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
kfree(fusion->stream_detect_by_ld[i]);
kfree(fusion->stream_detect_by_ld);
@@ -6655,7 +6881,7 @@ skip_firing_dcmds:
}
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
@@ -6680,30 +6906,10 @@ skip_firing_dcmds:
fusion->pd_seq_sync[i],
fusion->pd_seq_phys[i]);
}
- megasas_free_fusion_context(instance);
} else {
megasas_release_mfi(instance);
- pci_free_consistent(pdev, sizeof(u32),
- instance->producer,
- instance->producer_h);
- pci_free_consistent(pdev, sizeof(u32),
- instance->consumer,
- instance->consumer_h);
}
- kfree(instance->ctrl_info);
-
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail, instance->evt_detail_h);
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
if (instance->vf_affiliation)
pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6721,13 +6927,9 @@ skip_firing_dcmds:
instance->hb_host_mem,
instance->hb_host_mem_h);
- if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
- instance->crash_dump_buf, instance->crash_dump_h);
+ megasas_free_ctrl_dma_buffers(instance);
- if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
- instance->system_info_buf, instance->system_info_h);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
@@ -6866,7 +7068,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
struct megasas_iocpacket __user * user_ioc,
struct megasas_iocpacket *ioc)
{
- struct megasas_sge32 *kern_sge32;
+ struct megasas_sge64 *kern_sge64 = NULL;
+ struct megasas_sge32 *kern_sge32 = NULL;
struct megasas_cmd *cmd;
void *kbuff_arr[MAX_IOCTL_SGE];
dma_addr_t buf_handle = 0;
@@ -6874,7 +7077,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
void *sense = NULL;
dma_addr_t sense_handle;
unsigned long *sense_ptr;
- u32 opcode;
+ u32 opcode = 0;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
@@ -6884,6 +7087,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
+ if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ dev_err(&instance->pdev->dev,
+ "Received invalid ioctl command 0x%x\n",
+ ioc->frame.hdr.cmd);
+ return -ENOTSUPP;
+ }
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
@@ -6899,10 +7109,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cpu_to_le32(cmd->index);
cmd->frame->hdr.pad_0 = 0;
- cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
- MFI_FRAME_SGL64 |
+
+ cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
+
+ if (instance->consistent_mask_64bit)
+ cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
+ else
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
- opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+
+ if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
@@ -6925,8 +7143,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* kernel buffers in SGLs. The location of SGL is embedded in the
* struct iocpacket itself.
*/
- kern_sge32 = (struct megasas_sge32 *)
- ((unsigned long)cmd->frame + ioc->sgl_off);
+ if (instance->consistent_mask_64bit)
+ kern_sge64 = (struct megasas_sge64 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+ else
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
/*
* For each user buffer, create a mirror buffer and copy in
@@ -6949,8 +7171,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses
*/
- kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
- kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ if (instance->consistent_mask_64bit) {
+ kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
+ kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ } else {
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ }
/*
* We created a kernel buffer corresponding to the
@@ -6973,7 +7200,10 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
sense_ptr =
(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- *sense_ptr = cpu_to_le32(sense_handle);
+ if (instance->consistent_mask_64bit)
+ *sense_ptr = cpu_to_le64(sense_handle);
+ else
+ *sense_ptr = cpu_to_le32(sense_handle);
}
/*
@@ -6984,8 +7214,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
cmd->sync_cmd = 0;
dev_err(&instance->pdev->dev,
- "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
- __func__, __LINE__, opcode, cmd->cmd_status_drv);
+ "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+ __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
+ cmd->cmd_status_drv);
return -EBUSY;
}
@@ -7045,10 +7276,16 @@ out:
for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i]) {
- dma_free_coherent(&instance->pdev->dev,
- le32_to_cpu(kern_sge32[i].length),
- kbuff_arr[i],
- le32_to_cpu(kern_sge32[i].phys_addr));
+ if (instance->consistent_mask_64bit)
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge64[i].length),
+ kbuff_arr[i],
+ le64_to_cpu(kern_sge64[i].phys_addr));
+ else
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL;
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index ecc699a65bac..bfad9bfc313f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -737,7 +737,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if (instance->is_ventura &&
+ if ((instance->adapter_type == VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -747,8 +747,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
} else {
if ((raid->level >= 5) &&
- ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
- ((fusion->adapter_type == INVADER_SERIES) &&
+ ((instance->adapter_type == THUNDERBOLT_SERIES) ||
+ ((instance->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
@@ -762,7 +762,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -853,7 +853,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if (instance->is_ventura &&
+ if ((instance->adapter_type == VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -863,8 +863,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
} else {
if ((raid->level >= 5) &&
- ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
- ((fusion->adapter_type == INVADER_SERIES) &&
+ ((instance->adapter_type == THUNDERBOLT_SERIES) ||
+ ((instance->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
@@ -880,7 +880,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -1088,10 +1088,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
cpu_to_le16(raid->fpIoTimeoutForLd ?
raid->fpIoTimeoutForLd :
map->raidMap.fpPdIoTimeoutSec);
- if (fusion->adapter_type == INVADER_SERIES)
+ if (instance->adapter_type == INVADER_SERIES)
pRAID_Context->reg_lock_flags = (isRead) ?
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
- else if (!instance->is_ventura)
+ else if (instance->adapter_type == THUNDERBOLT_SERIES)
pRAID_Context->reg_lock_flags = (isRead) ?
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
pRAID_Context->virtual_disk_tgt_id = raid->targetId;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 3c399e7b3fe1..65dc4fea6352 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -93,8 +93,37 @@ extern unsigned int resetwaittime;
extern unsigned int dual_qdepth_disable;
static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
static void megasas_free_reply_fusion(struct megasas_instance *instance);
+static inline
+void megasas_configure_queue_sizes(struct megasas_instance *instance);
+/**
+ * megasas_check_same_4gb_region - check if allocation
+ * crosses same 4GB boundary or not
+ * @instance - adapter's soft instance
+ * start_addr - start address of DMA allocation
+ * size - size of allocation in bytes
+ * return - true : allocation does not cross same
+ * 4GB boundary
+ * false: allocation crosses same
+ * 4GB boundary
+ */
+static inline bool megasas_check_same_4gb_region
+ (struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
+{
+ dma_addr_t end_addr;
+
+ end_addr = start_addr + size;
+ if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
+ dev_err(&instance->pdev->dev,
+ "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
+ (unsigned long long)start_addr,
+ (unsigned long long)end_addr);
+ return false;
+ }
+
+ return true;
+}
/**
* megasas_enable_intr_fusion - Enables interrupts
@@ -197,7 +226,7 @@ static void
megasas_fire_cmd_fusion(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
writel(le32_to_cpu(req_desc->u.low),
&instance->reg_set->inbound_single_queue_port);
else {
@@ -240,7 +269,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
reg_set = instance->reg_set;
/* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
- if (!instance->is_ventura)
+ if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
@@ -251,8 +280,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
dev_info(&instance->pdev->dev,
- "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
- cur_max_fw_cmds, ldio_threshold);
+ "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
+ cur_max_fw_cmds, ldio_threshold);
if (fw_boot_context == OCR_CONTEXT) {
cur_max_fw_cmds = cur_max_fw_cmds - 1;
@@ -267,10 +296,6 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
instance->max_fw_cmds = cur_max_fw_cmds;
instance->ldio_threshold = ldio_threshold;
- if (!instance->is_rdpq)
- instance->max_fw_cmds =
- min_t(u16, instance->max_fw_cmds, 1024);
-
if (reset_devices)
instance->max_fw_cmds = min(instance->max_fw_cmds,
(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
@@ -280,19 +305,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
* does not exceed max cmds that the FW can support
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
-
- instance->max_scsi_cmds = instance->max_fw_cmds -
- (MEGASAS_FUSION_INTERNAL_CMDS +
- MEGASAS_FUSION_IOCTL_CMDS);
- instance->cur_can_queue = instance->max_scsi_cmds;
- instance->host->can_queue = instance->cur_can_queue;
}
-
- if (instance->is_ventura)
- instance->max_mpt_cmds =
- instance->max_fw_cmds * RAID_1_PEER_CMDS;
- else
- instance->max_mpt_cmds = instance->max_fw_cmds;
}
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
@@ -305,17 +318,23 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
struct megasas_cmd_fusion *cmd;
- /* SG, Sense */
- for (i = 0; i < instance->max_mpt_cmds; i++) {
- cmd = fusion->cmd_list[i];
- if (cmd) {
- if (cmd->sg_frame)
- dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
- cmd->sg_frame_phys_addr);
- if (cmd->sense)
- dma_pool_free(fusion->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
+ if (fusion->sense)
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+
+ /* SG */
+ if (fusion->cmd_list) {
+ for (i = 0; i < instance->max_mpt_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ if (cmd) {
+ if (cmd->sg_frame)
+ dma_pool_free(fusion->sg_dma_pool,
+ cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+ }
+ kfree(cmd);
}
+ kfree(fusion->cmd_list);
}
if (fusion->sg_dma_pool) {
@@ -347,13 +366,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
dma_pool_destroy(fusion->io_request_frames_pool);
fusion->io_request_frames_pool = NULL;
}
-
-
- /* cmd_list */
- for (i = 0; i < instance->max_mpt_cmds; i++)
- kfree(fusion->cmd_list[i]);
-
- kfree(fusion->cmd_list);
}
/**
@@ -367,10 +379,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
u16 max_cmd;
struct fusion_context *fusion;
struct megasas_cmd_fusion *cmd;
+ int sense_sz;
+ u32 offset;
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
-
+ sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
fusion->sg_dma_pool =
dma_pool_create("mr_sg", &instance->pdev->dev,
@@ -379,7 +393,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* SCSI_SENSE_BUFFERSIZE = 96 bytes */
fusion->sense_dma_pool =
dma_pool_create("mr_sense", &instance->pdev->dev,
- SCSI_SENSE_BUFFERSIZE, 64, 0);
+ sense_sz, 64, 0);
if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
dev_err(&instance->pdev->dev,
@@ -387,6 +401,51 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
return -ENOMEM;
}
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* sense buffer, request frame and reply desc pool requires to be in
+ * same 4 gb region. Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * Older allocation and pool will be destroyed.
+ * Alignment will be used such a way that next allocation if success,
+ * will always meet same 4gb region requirement.
+ * Actual requirement is not alignment, but we need start and end of
+ * DMA address must have same upper 32 bit address.
+ */
+
+ if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
+ sense_sz)) {
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+ fusion->sense = NULL;
+ dma_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sense_dma_pool =
+ dma_pool_create("mr_sense_align", &instance->pdev->dev,
+ sense_sz, roundup_pow_of_two(sense_sz),
+ 0);
+ if (!fusion->sense_dma_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL,
+ &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
/*
* Allocate and attach a frame to each of the commands in cmd_list
*/
@@ -395,9 +454,11 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
GFP_KERNEL, &cmd->sg_frame_phys_addr);
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sg_frame || !cmd->sense) {
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
+ if (!cmd->sg_frame) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
@@ -407,13 +468,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* create sense buffer for the raid 1/10 fp */
for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sense) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
}
return 0;
@@ -465,16 +523,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- fusion->req_frames_desc =
- dma_alloc_coherent(&instance->pdev->dev,
- fusion->request_alloc_sz,
- &fusion->req_frames_desc_phys, GFP_KERNEL);
- if (!fusion->req_frames_desc) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
-
+retry_alloc:
fusion->io_request_frames_pool =
dma_pool_create("mr_ioreq", &instance->pdev->dev,
fusion->io_frames_alloc_sz, 16, 0);
@@ -489,10 +538,62 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
dma_pool_alloc(fusion->io_request_frames_pool,
GFP_KERNEL, &fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
+ if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
+ instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+ megasas_configure_queue_sizes(instance);
+ goto retry_alloc;
+ } else {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->io_request_frames_phys,
+ fusion->io_frames_alloc_sz)) {
+ dma_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ fusion->io_request_frames = NULL;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+
+ fusion->io_request_frames_pool =
+ dma_pool_create("mr_ioreq_align",
+ &instance->pdev->dev,
+ fusion->io_frames_alloc_sz,
+ roundup_pow_of_two(fusion->io_frames_alloc_sz),
+ 0);
+
+ if (!fusion->io_request_frames_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+ GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+
+ if (!fusion->io_request_frames) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ fusion->req_frames_desc =
+ dma_alloc_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
+ if (!fusion->req_frames_desc) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
return 0;
}
@@ -523,6 +624,41 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->reply_frames_desc_phys[0],
+ (fusion->reply_alloc_sz * count))) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[0],
+ fusion->reply_frames_desc_phys[0]);
+ fusion->reply_frames_desc[0] = NULL;
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
+
+ fusion->reply_frames_desc_pool =
+ dma_pool_create("mr_reply_align",
+ &instance->pdev->dev,
+ fusion->reply_alloc_sz * count,
+ roundup_pow_of_two(fusion->reply_alloc_sz * count),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->reply_frames_desc[0] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL,
+ &fusion->reply_frames_desc_phys[0]);
+
+ if (!fusion->reply_frames_desc[0]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
reply_desc = fusion->reply_frames_desc[0];
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
@@ -541,52 +677,124 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
- int i, j, count;
+ int i, j, k, msix_count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
+ dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
+ u8 dma_alloc_count, abs_index;
+ u32 chunk_size, array_size, offset;
fusion = instance->ctrl_context;
+ chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
+ array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
+ MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
+ &fusion->rdpq_phys);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- memset(fusion->rdpq_virt, 0,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
- count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ memset(fusion->rdpq_virt, 0, array_size);
+ msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
&instance->pdev->dev,
- fusion->reply_alloc_sz,
- 16, 0);
-
- if (!fusion->reply_frames_desc_pool) {
+ chunk_size, 16, 0);
+ fusion->reply_frames_desc_pool_align =
+ dma_pool_create("mr_rdpq_align",
+ &instance->pdev->dev,
+ chunk_size,
+ roundup_pow_of_two(chunk_size),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool ||
+ !fusion->reply_frames_desc_pool_align) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- for (i = 0; i < count; i++) {
- fusion->reply_frames_desc[i] =
- dma_pool_alloc(fusion->reply_frames_desc_pool,
- GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
- if (!fusion->reply_frames_desc[i]) {
+/*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
+ * within 4GB boundary and also reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating the
+ * DMA'able memory for reply queues according. Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+
+ for (i = 0; i < dma_alloc_count; i++) {
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+ /* reply desc pool requires to be in same 4 gb region.
+ * Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * For RDPQ buffers, driver always allocate two separate pci pool.
+ * Alignment will be used such a way that next allocation if
+ * success, will always meet same 4gb region requirement.
+ * rdpq_tracker keep track of each buffer's physical,
+ * virtual address and pci pool descriptor. It will help driver
+ * while freeing the resources.
+ *
+ */
+ if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
+ chunk_size)) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ rdpq_chunk_virt[i],
+ rdpq_chunk_phys[i]);
- fusion->rdpq_virt[i].RDPQBaseAddress =
- cpu_to_le64(fusion->reply_frames_desc_phys[i]);
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool_align,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool_align;
+ } else {
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool;
+ }
- reply_desc = fusion->reply_frames_desc[i];
- for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
- reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
+ fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
}
+
+ for (k = 0; k < dma_alloc_count; k++) {
+ for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
+ abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
+
+ if (abs_index == msix_count)
+ break;
+ offset = fusion->reply_alloc_sz * i;
+ fusion->rdpq_virt[abs_index].RDPQBaseAddress =
+ cpu_to_le64(rdpq_chunk_phys[k] + offset);
+ fusion->reply_frames_desc_phys[abs_index] =
+ rdpq_chunk_phys[k] + offset;
+ fusion->reply_frames_desc[abs_index] =
+ (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
+
+ reply_desc = fusion->reply_frames_desc[abs_index];
+ for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+ }
+ }
+
return 0;
}
@@ -598,15 +806,18 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
fusion = instance->ctrl_context;
- for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
- if (fusion->reply_frames_desc[i])
- dma_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc[i],
- fusion->reply_frames_desc_phys[i]);
+ for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
+ if (fusion->rdpq_tracker[i].pool_entry_virt)
+ dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
+ fusion->rdpq_tracker[i].pool_entry_virt,
+ fusion->rdpq_tracker[i].pool_entry_phys);
+
}
if (fusion->reply_frames_desc_pool)
dma_pool_destroy(fusion->reply_frames_desc_pool);
+ if (fusion->reply_frames_desc_pool_align)
+ dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
pci_free_consistent(instance->pdev,
@@ -661,9 +872,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- if (megasas_alloc_cmdlist_fusion(instance))
- goto fail_exit;
-
if (megasas_alloc_request_fusion(instance))
goto fail_exit;
@@ -674,6 +882,11 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_reply_fusion(instance))
goto fail_exit;
+ if (megasas_alloc_cmdlist_fusion(instance))
+ goto fail_exit;
+
+ dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
+ instance->max_fw_cmds);
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -770,22 +983,34 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
+ struct timeval tv;
+ bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
- cmd = megasas_get_cmd(instance);
+ ioc_init_handle = fusion->ioc_init_request_phys;
+ IOCInitMessage = fusion->ioc_init_request;
- if (!cmd) {
- dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
- ret = 1;
- goto fail_get_cmd;
- }
+ cmd = fusion->ioc_init_cmd;
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ if (instance->adapter_type == INVADER_SERIES) {
+ cur_fw_64bit_dma_capable =
+ (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+
+ if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
+ dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
+ "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
+ megaraid_sas_kill_hba(instance);
+ ret = 1;
+ goto fail_fw_init;
+ }
+ }
+
if (instance->is_rdpq && !cur_rdpq_mode) {
dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
" from RDPQ mode to non RDPQ mode\n");
@@ -798,18 +1023,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
instance->fw_sync_cache_support ? "Yes" : "No");
- IOCInitMessage =
- dma_alloc_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- &ioc_init_handle, GFP_KERNEL);
-
- if (!IOCInitMessage) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "IOCInitMessage\n");
- ret = 1;
- goto fail_fw_init;
- }
-
memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
@@ -825,8 +1038,15 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->MsgFlags = instance->is_rdpq ?
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
+
+ do_gettimeofday(&tv);
+ /* Convert to milliseconds as per FW requirement */
+ IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
+ (tv.tv_usec / 1000));
+
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
@@ -842,7 +1062,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
/* driver support Extended MSIX */
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
drv_ops->mfi_capabilities.support_additional_msix = 1;
/* driver supports HA / Remote LUN over Fast Path interface */
drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
@@ -860,6 +1080,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+
+ if (instance->consistent_mask_64bit)
+ drv_ops->mfi_capabilities.support_64bit_mode = 1;
+
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -869,8 +1093,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
strlen(sys_info) > 64 ? 64 : strlen(sys_info));
instance->system_info_buf->systemIdLength =
strlen(sys_info) > 64 ? 64 : strlen(sys_info);
- init_frame->system_info_lo = instance->system_info_h;
- init_frame->system_info_hi = 0;
+ init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
+ init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
}
init_frame->queue_info_new_phys_addr_hi =
@@ -917,12 +1141,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
ret = 0;
fail_fw_init:
- megasas_return_cmd(instance, cmd);
- if (IOCInitMessage)
- dma_free_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- IOCInitMessage, ioc_init_handle);
-fail_get_cmd:
dev_err(&instance->pdev->dev,
"Init cmd return status %s for SCSI host %d\n",
ret ? "FAILED" : "SUCCESS", instance->host->host_no);
@@ -967,6 +1185,15 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
memset(pd_sync, 0, pd_seq_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (pend) {
+ dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ instance->jbod_seq_cmd = cmd;
+ } else {
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ }
+
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -974,21 +1201,16 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
+
+ megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
if (pend) {
- dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
- instance->jbod_seq_cmd = cmd;
instance->instancet->issue_dcmd(instance, cmd);
return 0;
}
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
-
/* Below code is only for non pended DCMD */
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -1001,7 +1223,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
ret = -EINVAL;
}
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
if (ret == DCMD_SUCCESS)
@@ -1069,21 +1291,21 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
+
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
megasas_return_cmd(instance, cmd);
@@ -1173,15 +1395,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
instance->map_update_cmd = cmd;
@@ -1337,6 +1559,94 @@ ld_drv_map_alloc_fail:
}
/**
+ * megasas_configure_queue_sizes - Calculate size of request desc queue,
+ * reply desc queue,
+ * IO request frame queue, set can_queue.
+ * @instance: Adapter soft state
+ * @return: void
+ */
+static inline
+void megasas_configure_queue_sizes(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ u16 max_cmd;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_fw_cmds;
+
+ if (instance->adapter_type == VENTURA_SERIES)
+ instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
+ else
+ instance->max_mpt_cmds = instance->max_fw_cmds;
+
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->cur_can_queue = instance->max_scsi_cmds;
+ instance->host->can_queue = instance->cur_can_queue;
+
+ fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
+
+ fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
+ instance->max_mpt_cmds;
+ fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
+ (fusion->reply_q_depth);
+ fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+ * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+}
+
+static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd;
+
+ fusion = instance->ctrl_context;
+
+ cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cmd->frame = dma_alloc_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ &cmd->frame_phys_addr, GFP_KERNEL);
+
+ if (!cmd->frame) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ fusion->ioc_init_cmd = cmd;
+ return 0;
+}
+
+/**
+ * megasas_free_ioc_init_cmd - Free IOC INIT command frame
+ * @instance: Adapter soft state
+ */
+static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
+ dma_free_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ fusion->ioc_init_cmd->frame,
+ fusion->ioc_init_cmd->frame_phys_addr);
+
+ if (fusion->ioc_init_cmd)
+ kfree(fusion->ioc_init_cmd);
+}
+
+/**
* megasas_init_adapter_fusion - Initializes the FW
* @instance: Adapter soft state
*
@@ -1347,7 +1657,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u16 max_cmd;
u32 scratch_pad_2;
int i = 0, count;
@@ -1363,17 +1672,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
instance->max_mfi_cmds =
MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
- max_cmd = instance->max_fw_cmds;
-
- fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
-
- fusion->request_alloc_sz =
- sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
- fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
- *(fusion->reply_q_depth);
- fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
- (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
- * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+ megasas_configure_queue_sizes(instance);
scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
@@ -1431,6 +1730,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
MEGASAS_FUSION_IOCTL_CMDS);
sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ if (megasas_alloc_ioc_init_frame(instance))
+ return 1;
+
/*
* Allocate memory for descriptors
* Create a pool of commands
@@ -1468,6 +1770,7 @@ fail_ioc_init:
fail_alloc_cmds:
megasas_free_cmds(instance);
fail_alloc_mfi_cmds:
+ megasas_free_ioc_init_cmd(instance);
return 1;
}
@@ -1800,7 +2103,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
fusion = instance->ctrl_context;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
sgl_ptr_end->Flags = 0;
@@ -1810,7 +2113,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
if (i == sge_count - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
sgl_ptr++;
@@ -1820,7 +2123,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
(sge_count > fusion->max_sge_in_main_msg)) {
struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
if ((le16_to_cpu(cmd->io_request->IoFlags) &
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1836,7 +2139,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sg_chain = sgl_ptr;
/* Prepare chain element */
sg_chain->NextChainOffset = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags =
@@ -2360,7 +2663,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
megasas_stream_detect(instance, cmd, &io_info);
spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
@@ -2413,7 +2716,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_request->RaidContext.raid_context.reg_lock_flags ==
REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
@@ -2426,7 +2729,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_request->RaidContext.raid_context.reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.nseg_type |=
(1 << RAID_CONTEXT_NSEG_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2445,7 +2748,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
&io_info, local_map_ptr);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
io_request->RaidContext.raid_context_g35.span_arm
= io_info.span_arm;
else
@@ -2455,7 +2758,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
else
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2478,7 +2781,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_info.do_fp_rlbypass ||
(io_request->RaidContext.raid_context.reg_lock_flags
== REGION_TYPE_UNUSED))
@@ -2491,7 +2794,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
io_request->RaidContext.raid_context.nseg = 0x1;
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2566,7 +2869,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
/* set RAID context values */
pRAID_Context->config_seq_num = raid->seqNum;
- if (!instance->is_ventura)
+ if (instance->adapter_type != VENTURA_SERIES)
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2651,7 +2954,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2699,7 +3002,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeout_value =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
@@ -2782,7 +3085,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
return 1;
}
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -2805,7 +3108,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLowAddress =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
@@ -2846,7 +3150,7 @@ void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =
- cpu_to_le32(r1_cmd->sense_phys_addr);
+ cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
r1_cmd->scmd = cmd->scmd;
req_desc2 = megasas_get_request_descriptor(instance,
(r1_cmd->index - 1));
@@ -3312,7 +3616,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
io_req = cmd->io_request;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -3386,6 +3690,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
void
megasas_release_fusion(struct megasas_instance *instance)
{
+ megasas_free_ioc_init_cmd(instance);
megasas_free_cmds(instance);
megasas_free_cmds_fusion(instance);
@@ -4244,7 +4549,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
/*check for extra commands issued by driver*/
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
megasas_return_cmd_fusion(instance, r1_cmd);
}
@@ -4345,7 +4650,7 @@ transition_to_ready:
megasas_set_dynamic_target_properties(sdev);
/* reset stream detection array */
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
0, sizeof(struct LD_STREAM_DETECT));
@@ -4493,20 +4798,31 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
{
struct fusion_context *fusion;
- instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
- instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- instance->ctrl_context_pages);
+ instance->ctrl_context = kzalloc(sizeof(struct fusion_context),
+ GFP_KERNEL);
if (!instance->ctrl_context) {
- /* fall back to using vmalloc for fusion_context */
- instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
- if (!instance->ctrl_context) {
- dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
}
fusion = instance->ctrl_context;
+ fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ fusion->log_to_span =
+ (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ fusion->log_to_span_pages);
+ if (!fusion->log_to_span) {
+ fusion->log_to_span = vzalloc(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ if (!fusion->log_to_span) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
sizeof(struct LD_LOAD_BALANCE_INFO));
fusion->load_balance_info =
@@ -4537,11 +4853,15 @@ megasas_free_fusion_context(struct megasas_instance *instance)
fusion->load_balance_info_pages);
}
- if (is_vmalloc_addr(fusion))
- vfree(fusion);
- else
- free_pages((ulong)fusion,
- instance->ctrl_context_pages);
+ if (fusion->log_to_span) {
+ if (is_vmalloc_addr(fusion->log_to_span))
+ vfree(fusion->log_to_span);
+ else
+ free_pages((ulong)fusion->log_to_span,
+ fusion->log_to_span_pages);
+ }
+
+ kfree(fusion);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d78d76112501..1814d79cb98d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -51,6 +51,8 @@
#define HOST_DIAG_RESET_ADAPTER 0x4
#define MEGASAS_FUSION_MAX_RESET_TRIES 3
#define MAX_MSIX_QUEUES_FUSION 128
+#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
+#define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
/* Invader defines */
#define MPI2_TYPE_CUDA 0x2
@@ -103,12 +105,8 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define THRESHOLD_REPLY_COUNT 50
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
-
-enum MR_FUSION_ADAPTER_TYPE {
- THUNDERBOLT_SERIES = 0,
- INVADER_SERIES = 1,
- VENTURA_SERIES = 2,
-};
+#define MEGASAS_REDUCE_QD_COUNT 64
+#define IOC_INIT_FRAME_SIZE 4096
/*
* Raid Context structure which describes MegaRAID specific IO Parameters
@@ -1270,6 +1268,12 @@ struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
u32 Reserved2;
};
+struct rdpq_alloc_detail {
+ struct dma_pool *dma_pool_ptr;
+ dma_addr_t pool_entry_phys;
+ union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt;
+};
+
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
dma_addr_t req_frames_desc_phys;
@@ -1282,9 +1286,14 @@ struct fusion_context {
struct dma_pool *sg_dma_pool;
struct dma_pool *sense_dma_pool;
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
+ struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
struct dma_pool *reply_frames_desc_pool;
+ struct dma_pool *reply_frames_desc_pool_align;
u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
@@ -1318,9 +1327,13 @@ struct fusion_context {
u8 fast_path_io;
struct LD_LOAD_BALANCE_INFO *load_balance_info;
u32 load_balance_info_pages;
- LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
- u8 adapter_type;
+ LD_SPAN_INFO *log_to_span;
+ u32 log_to_span_pages;
struct LD_STREAM_DETECT **stream_detect_by_ld;
+ dma_addr_t ioc_init_request_phys;
+ struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
+ struct megasas_cmd *ioc_init_cmd;
+
};
union desc_value {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 2608011cc7f1..b015c30d2c32 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -9,7 +9,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.42
+ * mpi2.h Version: 02.00.48
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -104,6 +104,16 @@
* 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT.
* 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT
* 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT
+ * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines
+ * to be unique within first 32 characters.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -143,7 +153,7 @@
#define MPI2_VERSION_02_06 (0x0206)
/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x2A)
+#define MPI2_HEADER_VERSION_UNIT (0x30)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -250,6 +260,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+/* Defines for V7A/V7R HostDiagnostic Register */
+#define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000)
+#define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800)
+#define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000)
+#define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800)
+
#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
#define MPI2_DIAG_HCB_MODE (0x00000100)
@@ -368,6 +384,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+#define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10)
#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
@@ -426,6 +443,13 @@ typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
Mpi25FastPathSCSIIORequestDescriptor_t,
*pMpi25FastPathSCSIIORequestDescriptor_t;
+/*PCIe Encapsulated Request Descriptor */
+typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
+ *PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
+ Mpi26PCIeEncapsulatedRequestDescriptor_t,
+ *pMpi26PCIeEncapsulatedRequestDescriptor_t;
+
/*union of Request Descriptors */
typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
@@ -434,6 +458,7 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated;
U64 Words;
} MPI2_REQUEST_DESCRIPTOR_UNION,
*PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
@@ -451,6 +476,7 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
* Atomic SCSI Target Request Descriptor
* Atomic RAID Accelerator Request Descriptor
* Atomic Fast Path SCSI IO Request Descriptor
+ * Atomic PCIe Encapsulated Request Descriptor
*/
/*Atomic Request Descriptor */
@@ -488,6 +514,7 @@ typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+#define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08)
#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
/*values for marking a reply descriptor as unused */
@@ -566,6 +593,13 @@ typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
*pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+/*PCIe Encapsulated Success Reply Descriptor */
+typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+ MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t,
+ *pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t;
+
/*union of Reply Descriptors */
typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
@@ -575,6 +609,8 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR
+ PCIeEncapsulatedSuccess;
U64 Words;
} MPI2_REPLY_DESCRIPTORS_UNION,
*PTR_MPI2_REPLY_DESCRIPTORS_UNION,
@@ -617,6 +653,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -1163,6 +1200,8 @@ typedef union _MPI25_SGE_IO_UNION {
#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
/*Data Location Address Space */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 036c9cf61032..ee117106d0f7 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -7,7 +7,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.35
+ * mpi2_cnfg.h Version: 02.00.40
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -190,6 +190,35 @@
* MPI2_CONFIG_PAGE_BIOS_1.
* 08-25-15 02.00.34 Bumped Header Version.
* 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4.
+ * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added Link field to PCIe Link Pages
+ * Added EnclosureLevel and ConnectorName to PCIe
+ * Device Page 0.
+ * Added define for PCIE IoUnit page 1 max rate shift.
+ * Added comment for reserved ExtPageTypes.
+ * Added SAS 4 22.5 gbs speed support.
+ * Added PCIe 4 16.0 GT/sec speec support.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * Added NegotiatedLinkRate and NegotiatedPortWidth to
+ * PCIe device page 0.
+ * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines
+ * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types.
+ * Changed declaration of ConnectorName in PCIe DevicePage0
+ * to match SAS DevicePage 0.
+ * Added SATADeviceWaitTime to IO Unit Page 11.
+ * Added MPI26_MFGPAGE_DEVID_SAS4008
+ * Added x16 PCIe width to IO Unit Page 7
+ * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1
+ * phy data.
+ * Added InitStatus to PCIe IO Unit Page 1 header.
+ * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines.
+ * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and
+ * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats.
+ * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN.
+ * Added ChassisSlot field to SAS Enclosure Page 0.
+ * Added ChassisSlot Valid bit (bit 5) to the Flags field
+ * in SAS Enclosure Page 0.
* --------------------------------------------------------------------------
*/
@@ -273,6 +302,10 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E)
/*****************************************************************************
@@ -340,6 +373,12 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+/*Enclosure PageAddress format */
+#define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
/*RAID Configuration PageAddress format */
#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
@@ -366,6 +405,33 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+/*PCIe Switch PageAddress format */
+#define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000)
+#define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000)
+#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
+
+
+/*PCIe Device PageAddress format */
+#define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+/*PCIe Link PageAddress format */
+#define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
+#define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
+
+#define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF)
+
+
+
/****************************************************************************
* Configuration messages
****************************************************************************/
@@ -485,6 +551,12 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
+#define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0)
+#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
+#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
+
+#define MPI26_MFGPAGE_DEVID_SAS4008 (0x00A1)
+
/*Manufacturing Page 0 */
@@ -727,6 +799,12 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F)
+#define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10)
+#define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11)
+#define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12)
+#define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13)
/*defines for the Location field */
#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
@@ -737,6 +815,9 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+/*defines for the Slot field */
+#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF)
+
/*
*Host code (drivers, BIOS, utilities, etc.) should leave this define set to
*one and check the value returned for NumPhys at runtime.
@@ -1000,11 +1081,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10)
/*defines for IO Unit Page 7 PCIeSpeed field */
#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
/*defines for IO Unit Page 7 ProcessorState field */
#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
@@ -1971,6 +2054,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+#define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C)
/*values for AttachedPhyInfo fields */
@@ -2038,12 +2122,14 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
+#define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0)
#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+#define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C)
/*values for SAS HwLinkRate fields */
@@ -2052,11 +2138,13 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
+#define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0)
#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+#define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C)
@@ -2241,11 +2329,13 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+#define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0)
#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+#define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C)
/*see mpi2_sas.h for values for
*SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
@@ -3159,37 +3249,29 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 {
/*SAS Enclosure Page 0 */
typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
- MPI2_CONFIG_EXTENDED_PAGE_HEADER
- Header; /*0x00 */
- U32
- Reserved1; /*0x08 */
- U64
- EnclosureLogicalID; /*0x0C */
- U16
- Flags; /*0x14 */
- U16
- EnclosureHandle; /*0x16 */
- U16
- NumSlots; /*0x18 */
- U16
- StartSlot; /*0x1A */
- U8
- Reserved2; /*0x1C */
- U8
- EnclosureLevel; /*0x1D */
- U16
- SEPDevHandle; /*0x1E */
- U32
- Reserved3; /*0x20 */
- U32
- Reserved4; /*0x24 */
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U64 EnclosureLogicalID; /*0x0C */
+ U16 Flags; /*0x14 */
+ U16 EnclosureHandle; /*0x16 */
+ U16 NumSlots; /*0x18 */
+ U16 StartSlot; /*0x1A */
+ U8 ChassisSlot; /*0x1C */
+ U8 EnclosureLeve; /*0x1D */
+ U16 SEPDevHandle; /*0x1E */
+ U32 Reserved3; /*0x20 */
+ U32 Reserved4; /*0x24 */
} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
*PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
- Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
+ Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t,
+ MPI26_CONFIG_PAGE_ENCLOSURE_0,
+ *PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0,
+ Mpi26EnclosurePage0_t, *pMpi26EnclosurePage0_t;
#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
/*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
@@ -3199,6 +3281,18 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+#define MPI26_ENCLOSURE0_PAGEVERSION (0x04)
+
+/*Values for Enclosure Page 0 Flags field */
+#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
+#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
+#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
/****************************************************************************
* Log Config Page
@@ -3498,4 +3592,422 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
/*PageVersion should be provided by product-specific code */
+
+
+/****************************************************************************
+* values for fields used by several types of PCIe Config Pages
+****************************************************************************/
+
+/*values for NegotiatedLinkRates fields */
+#define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/*link rates used for Negotiated Physical Link Rate */
+#define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
+#define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02)
+#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
+#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
+#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
+
+
+/****************************************************************************
+* PCIe IO Unit Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe IO Unit Page 0 */
+
+typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA {
+ U8 Link; /*0x00 */
+ U8 LinkFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 NegotiatedLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo;/*0x04 */
+ U16 AttachedDevHandle; /*0x08 */
+ U16 ControllerDevHandle; /*0x0A */
+ U32 EnumerationStatus; /*0x0C */
+ U32 Reserved1; /*0x10 */
+} MPI26_PCIE_IO_UNIT0_PHY_DATA,
+ *PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA,
+ Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX
+#define MPI26_PCIE_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 NumPhys; /*0x0C */
+ U8 InitStatus; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI26_PCIE_IO_UNIT0_PHY_DATA
+ PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */
+} MPI26_CONFIG_PAGE_PIOUNIT_0,
+ *PTR_MPI26_CONFIG_PAGE_PIOUNIT_0,
+ Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t;
+
+#define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00)
+
+/*values for PCIe IO Unit Page 0 LinkFlags */
+#define MPI26_PCIEIOUNIT0_LINKFLAGS_ENUMERATION_IN_PROGRESS (0x08)
+
+/*values for PCIe IO Unit Page 0 PhyFlags */
+#define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
+ *values
+ */
+
+/*values for PCIe IO Unit Page 0 EnumerationStatus */
+#define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
+#define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000)
+
+
+/*PCIe IO Unit Page 1 */
+
+typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA {
+ U8 Link; /*0x00 */
+ U8 LinkFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 MaxMinLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo; /*0x04 */
+ U32 Reserved1; /*0x08 */
+} MPI26_PCIE_IO_UNIT1_PHY_DATA,
+ *PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA,
+ Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t;
+
+/*values for LinkFlags */
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS (0x00)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS (0x01)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX
+#define MPI26_PCIE_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 ControlFlags; /*0x08 */
+ U16 Reserved; /*0x0A */
+ U16 AdditionalControlFlags; /*0x0C */
+ U16 NVMeMaxQueueDepth; /*0x0E */
+ U8 NumPhys; /*0x10 */
+ U8 Reserved1; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ MPI26_PCIE_IO_UNIT1_PHY_DATA
+ PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
+} MPI26_CONFIG_PAGE_PIOUNIT_1,
+ *PTR_MPI26_CONFIG_PAGE_PIOUNIT_1,
+ Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t;
+
+#define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00)
+
+/*values for PCIe IO Unit Page 1 PhyFlags */
+#define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01)
+
+/*values for PCIe IO Unit Page 1 MaxMinLinkRate */
+#define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+
+/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
+ *values
+ */
+
+
+/****************************************************************************
+* PCIe Switch Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe Switch Page 0 */
+
+typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 PhysicalPort; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 DevHandle; /*0x0C */
+ U16 ParentDevHandle; /*0x0E */
+ U8 NumPorts; /*0x10 */
+ U8 PCIeLevel; /*0x11 */
+ U16 Reserved3; /*0x12 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U32 Reserved6; /*0x1C */
+} MPI26_CONFIG_PAGE_PSWITCH_0, *PTR_MPI26_CONFIG_PAGE_PSWITCH_0,
+ Mpi26PCIeSwitchPage0_t, *pMpi26PCIeSwitchPage0_t;
+
+#define MPI26_PCIESWITCH0_PAGEVERSION (0x00)
+
+
+/*PCIe Switch Page 1 */
+
+typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 PhysicalPort; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumPorts; /*0x0C */
+ U8 PortNum; /*0x0D */
+ U16 AttachedDevHandle; /*0x0E */
+ U16 SwitchDevHandle; /*0x10 */
+ U8 NegotiatedPortWidth; /*0x12 */
+ U8 NegotiatedLinkRate; /*0x13 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+} MPI26_CONFIG_PAGE_PSWITCH_1, *PTR_MPI26_CONFIG_PAGE_PSWITCH_1,
+ Mpi26PCIeSwitchPage1_t, *pMpi26PCIeSwitchPage1_t;
+
+#define MPI26_PCIESWITCH1_PAGEVERSION (0x00)
+
+/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/****************************************************************************
+* PCIe Device Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe Device Page 0 */
+
+typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 Slot; /*0x08 */
+ U16 EnclosureHandle; /*0x0A */
+ U64 WWID; /*0x0C */
+ U16 ParentDevHandle; /*0x14 */
+ U8 PortNum; /*0x16 */
+ U8 AccessStatus; /*0x17 */
+ U16 DevHandle; /*0x18 */
+ U8 PhysicalPort; /*0x1A */
+ U8 Reserved1; /*0x1B */
+ U32 DeviceInfo; /*0x1C */
+ U32 Flags; /*0x20 */
+ U8 SupportedLinkRates; /*0x24 */
+ U8 MaxPortWidth; /*0x25 */
+ U8 NegotiatedPortWidth; /*0x26 */
+ U8 NegotiatedLinkRate; /*0x27 */
+ U8 EnclosureLevel; /*0x28 */
+ U8 Reserved2; /*0x29 */
+ U16 Reserved3; /*0x2A */
+ U8 ConnectorName[4]; /*0x2C */
+ U32 Reserved4; /*0x30 */
+ U32 Reserved5; /*0x34 */
+} MPI26_CONFIG_PAGE_PCIEDEV_0, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_0,
+ Mpi26PCIeDevicePage0_t, *pMpi26PCIeDevicePage0_t;
+
+#define MPI26_PCIEDEVICE0_PAGEVERSION (0x01)
+
+/*values for PCIe Device Page 0 AccessStatus field */
+#define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00)
+#define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04)
+#define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02)
+#define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07)
+#define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08)
+#define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09)
+#define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A)
+#define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10)
+
+#define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38)
+
+#define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F)
+
+/*see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo
+ *field
+ */
+
+/*values for PCIe Device Page 0 Flags field */
+#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x4000)
+#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x0400)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x0200)
+#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x0040)
+#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x0020)
+#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x0010)
+#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x0002)
+#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x0001)
+
+/* values for PCIe Device Page 0 SupportedLinkRates field */
+#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08)
+#define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04)
+#define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02)
+#define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01)
+
+/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/*PCIe Device Page 2 */
+
+typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x08 */
+ U16 Reserved1; /*0x0A */
+ U32 MaximumDataTransferSize;/*0x0C */
+ U32 Capabilities; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
+ Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
+
+#define MPI26_PCIEDEVICE2_PAGEVERSION (0x00)
+
+/*defines for PCIe Device Page 2 Capabilities field */
+#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004)
+#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002)
+#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001)
+
+
+/****************************************************************************
+* PCIe Link Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe Link Page 1 */
+
+typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 CorrectableErrorCount; /*0x0C */
+ U16 NonFatalErrorCount; /*0x10 */
+ U16 Reserved3; /*0x12 */
+ U16 FatalErrorCount; /*0x14 */
+ U16 Reserved4; /*0x16 */
+} MPI26_CONFIG_PAGE_PCIELINK_1, *PTR_MPI26_CONFIG_PAGE_PCIELINK_1,
+ Mpi26PcieLinkPage1_t, *pMpi26PcieLinkPage1_t;
+
+#define MPI26_PCIELINK1_PAGEVERSION (0x00)
+
+/*PCIe Link Page 2 */
+
+typedef struct _MPI26_PCIELINK2_LINK_EVENT {
+ U8 LinkEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 LinkEventInfo; /*0x04 */
+} MPI26_PCIELINK2_LINK_EVENT, *PTR_MPI26_PCIELINK2_LINK_EVENT,
+ Mpi26PcieLink2LinkEvent_t, *pMpi26PcieLink2LinkEvent_t;
+
+/*use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLinkEvents at runtime.
+ */
+#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX
+#define MPI26_PCIELINK2_LINK_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumLinkEvents; /*0x0C */
+ U8 Reserved3; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ MPI26_PCIELINK2_LINK_EVENT
+ LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */
+} MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2,
+ Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t;
+
+#define MPI26_PCIELINK2_PAGEVERSION (0x00)
+
+/*PCIe Link Page 3 */
+
+typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG {
+ U8 LinkEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 CounterType; /*0x04 */
+ U8 ThresholdWindow; /*0x05 */
+ U8 TimeUnits; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U32 EventThreshold; /*0x08 */
+ U16 ThresholdFlags; /*0x0C */
+ U16 Reserved4; /*0x0E */
+} MPI26_PCIELINK3_LINK_EVENT_CONFIG, *PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG,
+ Mpi26PcieLink3LinkEventConfig_t, *pMpi26PcieLink3LinkEventConfig_t;
+
+/*values for LinkEventCode field */
+#define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00)
+#define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01)
+#define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02)
+#define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03)
+#define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04)
+#define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05)
+#define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06)
+#define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07)
+#define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08)
+#define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F)
+#define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10)
+#define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11)
+#define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12)
+
+/*values for the CounterType field */
+#define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/*values for the TimeUnits field */
+#define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00)
+#define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01)
+#define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02)
+#define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03)
+
+/*values for the ThresholdFlags field */
+#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLinkEvents at runtime.
+ */
+#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX
+#define MPI26_PCIELINK3_LINK_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumLinkEvents; /*0x0C */
+ U8 Reserved3; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ MPI26_PCIELINK3_LINK_EVENT_CONFIG
+ LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */
+} MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3,
+ Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t;
+
+#define MPI26_PCIELINK3_PAGEVERSION (0x00)
+
+
#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 38b2c879bf0f..948a3ba682d7 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -7,7 +7,7 @@
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.20
+ * mpi2_init.h Version: 02.00.21
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -55,6 +55,8 @@
* 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset.
* 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message.
* 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message.
+ * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to
+ * be unique within first 32 characters.
* --------------------------------------------------------------------------
*/
@@ -374,6 +376,11 @@ typedef struct _MPI2_SCSI_IO_REPLY {
} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
+/*SCSI IO Reply MsgFlags bits */
+#define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
+#define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02)
+#define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04)
+
/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
#define MPI2_SCSI_STATUS_GOOD (0x00)
@@ -447,11 +454,13 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
/*MsgFlags bits */
#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00)
#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+#define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18)
/*SCSI Task Management Reply Message */
typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index 673cf05f94dc..cc2aff7aa67b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -7,7 +7,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.27
+ * mpi2_ioc.h Version: 02.00.32
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -141,7 +141,32 @@
* Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
* MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
* Added MPI26_CTRL_OP_SHUTDOWN.
- * 08-25-15 02.00.27 Added IC ARCH Class based signature defines
+ * 08-25-15 02.00.27 Added IC ARCH Class based signature defines.
+ * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event.
+ * Added ConigurationFlags field to IOCInit message to
+ * support NVMe SGL format control.
+ * Added PCIe SRIOV support.
+ * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support.
+ * Added PCIe 4 16.0 GT/sec speec support.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * 07-01-16 02.00.29 Added Archclass for 4008 product.
+ * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED
+ * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload
+ * Request Message.
+ * Added new defines for the ImageType field of FWUpload
+ * Request Message.
+ * Added new values for the RegionType field in the Layout
+ * Data sections of the FLASH Layout Extended Image Data.
+ * Added new defines for the ReasonCode field of
+ * Active Cable Exception Event.
+ * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and
+ * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE.
+ * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and
+ * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR.
+ * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP.
+ * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related
+ * defines for the ReasonCode field.
* --------------------------------------------------------------------------
*/
@@ -213,6 +238,9 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+/*ConfigurationFlags */
+#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001)
+
/*minimum depth for a Reply Descriptor Post Queue */
#define MPI2_RDPQ_DEPTH_MIN (16)
@@ -300,6 +328,10 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
U16 MinDevHandle; /*0x3C */
U8 CurrentHostPageSize; /* 0x3E */
U8 Reserved4; /* 0x3F */
+ U8 SGEModifierMask; /*0x40 */
+ U8 SGEModifierValue; /*0x41 */
+ U8 SGEModifierShift; /*0x42 */
+ U8 Reserved5; /*0x43 */
} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
@@ -316,6 +348,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
/*IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400)
#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200)
#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
@@ -336,6 +369,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
@@ -354,6 +388,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
/*ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
@@ -403,6 +438,8 @@ typedef struct _MPI2_PORT_FACTS_REPLY {
#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40)
+
/****************************************************************************
* PortEnable message
@@ -509,6 +546,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D)
#define MPI2_EVENT_IR_VOLUME (0x001E)
#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
@@ -521,7 +559,12 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
#define MPI2_EVENT_HOST_MESSAGE (0x0028)
#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030)
+#define MPI2_EVENT_PCIE_ENUMERATION (0x0031)
+#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032)
+#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033)
#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034)
+#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035)
#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
@@ -618,11 +661,20 @@ typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
U8 ReasonCode; /* 0x04 */
U8 ReceptacleID; /* 0x05 */
U16 Reserved1; /* 0x06 */
-} MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+} MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ *PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ Mpi25EventDataActiveCableExcept_t,
+ *pMpi25EventDataActiveCableExcept_t,
+ MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
*PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
Mpi26EventDataActiveCableExcept_t,
*pMpi26EventDataActiveCableExcept_t;
+/*MPI2.5 defines for the ReasonCode field */
+#define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
+#define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01)
+#define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02)
+
/* defines for ReasonCode field */
#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01)
@@ -958,6 +1010,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+#define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C)
/*values for the PhyStatus field */
#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
@@ -983,12 +1036,37 @@ typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE {
} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
*PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
Mpi2EventDataSasEnclDevStatusChange_t,
- *pMpi2EventDataSasEnclDevStatusChange_t;
+ *pMpi2EventDataSasEnclDevStatusChange_t,
+ MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
+ *PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
+ Mpi26EventDataEnclDevStatusChange_t,
+ *pMpi26EventDataEnclDevStatusChange_t;
/*SAS Enclosure Device Status Change event ReasonCode values */
#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+/*Enclosure Device Status Change event ReasonCode values */
+#define MPI26_EVENT_ENCL_RC_ADDED (0x01)
+#define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02)
+
+
+typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR {
+ U16 DevHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U32 Reserved1[2]; /*0x04 */
+ U64 SASAddress; /*0x0C */
+ U32 Reserved2[2]; /*0x14 */
+} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
+ *PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
+ Mpi25EventDataSasDeviceDiscoveryError_t,
+ *pMpi25EventDataSasDeviceDiscoveryError_t;
+
+/*SAS Device Discovery Error Event data ReasonCode values */
+#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01)
+#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02)
+
/*SAS PHY Counter Event data */
typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
@@ -1074,6 +1152,174 @@ typedef struct _MPI2_EVENT_DATA_HBD_PHY {
/*values for the DescriptorType field */
#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+/*PCIe Device Status Change Event data (MPI v2.6 and later) */
+
+typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE {
+ U16 TaskTag; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U8 ASC; /*0x04 */
+ U8 ASCQ; /*0x05 */
+ U16 DevHandle; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ U64 WWID; /*0x0C */
+ U8 LUN[8]; /*0x14 */
+} MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
+ *PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
+ Mpi26EventDataPCIeDeviceStatusChange_t,
+ *pMpi26EventDataPCIeDeviceStatusChange_t;
+
+/*PCIe Device Status Change Event data ReasonCode values */
+#define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05)
+#define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10)
+
+
+/*PCIe Enumeration Event data (MPI v2.6 and later) */
+
+typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION {
+ U8 Flags; /*0x00 */
+ U8 ReasonCode; /*0x01 */
+ U8 PhysicalPort; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 EnumerationStatus; /*0x04 */
+} MPI26_EVENT_DATA_PCIE_ENUMERATION,
+ *PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION,
+ Mpi26EventDataPCIeEnumeration_t,
+ *pMpi26EventDataPCIeEnumeration_t;
+
+/*PCIe Enumeration Event data Flags values */
+#define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02)
+#define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01)
+
+/*PCIe Enumeration Event data ReasonCode values */
+#define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01)
+#define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
+
+/*PCIe Enumeration Event data EnumerationStatus values */
+#define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
+#define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
+
+
+/*PCIe Topology Change List Event data (MPI v2.6 and later) */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumEntries at runtime.
+ */
+#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT
+#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1)
+#endif
+
+typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY {
+ U16 AttachedDevHandle; /*0x00 */
+ U8 PortStatus; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U8 CurrentPortInfo; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U8 PreviousPortInfo; /*0x06 */
+ U8 Reserved3; /*0x07 */
+} MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
+ *PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
+ Mpi26EventPCIeTopoPortEntry_t,
+ *pMpi26EventPCIeTopoPortEntry_t;
+
+/*PCIe Topology Change List Event data PortStatus values */
+#define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01)
+#define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
+#define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
+#define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
+#define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
+
+/*PCIe Topology Change List Event data defines for CurrentPortInfo and
+ *PreviousPortInfo
+ */
+#define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0)
+#define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
+#define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10)
+#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20)
+#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30)
+#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40)
+
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
+
+typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST {
+ U16 EnclosureHandle; /*0x00 */
+ U16 SwitchDevHandle; /*0x02 */
+ U8 NumPorts; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U8 NumEntries; /*0x08 */
+ U8 StartPortNum; /*0x09 */
+ U8 SwitchStatus; /*0x0A */
+ U8 PhysicalPort; /*0x0B */
+ MPI26_EVENT_PCIE_TOPO_PORT_ENTRY
+ PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */
+} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
+ *PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
+ Mpi26EventDataPCIeTopologyChangeList_t,
+ *pMpi26EventDataPCIeTopologyChangeList_t;
+
+/*PCIe Topology Change List Event data SwitchStatus values */
+#define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
+#define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01)
+#define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
+#define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
+#define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+
+/*PCIe Link Counter Event data (MPI v2.6 and later) */
+
+typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 LinkEventCode; /*0x0C */
+ U8 LinkNum; /*0x0D */
+ U16 Reserved2; /*0x0E */
+ U32 LinkEventInfo; /*0x10 */
+ U8 CounterType; /*0x14 */
+ U8 ThresholdWindow; /*0x15 */
+ U8 TimeUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 EventThreshold; /*0x18 */
+ U16 ThresholdFlags; /*0x1C */
+ U16 Reserved4; /*0x1E */
+} MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
+ *PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
+ Mpi26EventDataPcieLinkCounter_t, *pMpi26EventDataPcieLinkCounter_t;
+
+
+/*use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode
+ *field
+ */
+
+/*use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType
+ *field
+ */
+
+/*use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits
+ *field
+ */
+
+/*use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags
+ *field
+ */
+
/****************************************************************************
* EventAck message
****************************************************************************/
@@ -1191,6 +1437,14 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
+#define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D)
+#define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E)
+#define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F)
+#define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10)
+#define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11)
+#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12)
+#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13)
+#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
/*MPI v2.0 FWDownload TransactionContext Element */
@@ -1277,6 +1531,14 @@ typedef struct _MPI2_FW_UPLOAD_REQUEST {
#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D)
+#define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E)
+#define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F)
+#define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10)
+#define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11)
+#define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12)
+#define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13)
+#define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14)
+
/*MPI v2.0 FWUpload TransactionContext Element */
typedef struct _MPI2_FW_UPLOAD_TCSGE {
@@ -1395,10 +1657,13 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
/* legacy (0x5AEAA55A) */
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
#define MPI26_FW_HEADER_SIGNATURE0 \
(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
#define MPI26_FW_HEADER_SIGNATURE0_3516 \
(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
+#define MPI26_FW_HEADER_SIGNATURE0_4008 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
/*Signature1 field */
#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
@@ -1542,6 +1807,13 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
+#define MPI2_FLASH_REGION_SBR (0x0E)
+#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
+#define MPI2_FLASH_REGION_HIIM (0x10)
+#define MPI2_FLASH_REGION_HIIA (0x11)
+#define MPI2_FLASH_REGION_CTLR (0x12)
+#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
+#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
/*ImageRevision */
#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
@@ -1826,6 +2098,8 @@ typedef struct _MPI26_IOUNIT_CONTROL_REQUEST {
#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17)
#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18)
#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19)
+#define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A)
+#define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B)
#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
new file mode 100644
index 000000000000..f0281f943ec9
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012-2015 Avago Technologies. All rights reserved.
+ *
+ *
+ * Name: mpi2_pci.h
+ * Title: MPI PCIe Attached Devices structures and definitions.
+ * Creation Date: October 9, 2012
+ *
+ * mpi2_pci.h Version: 02.00.02
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 03-16-15 02.00.00 Initial version.
+ * 02-17-16 02.00.01 Removed AHCI support.
+ * Removed SOP support.
+ * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
+ * NVME Encapsulated Request.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_PCI_H
+#define MPI2_PCI_H
+
+
+/*
+ *Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event
+ *data and PCIe Configuration pages.
+ */
+#define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010)
+
+#define MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE (0x0000000F)
+#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000)
+#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001)
+#define MPI26_PCIE_DEVINFO_NVME (0x00000003)
+
+
+/****************************************************************************
+* NVMe Encapsulated message
+****************************************************************************/
+
+/*NVME Encapsulated Request Message */
+typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 EncapsulatedCommandLength; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+ U64 ErrorResponseBaseAddress; /*0x10 */
+ U16 ErrorResponseAllocationLength; /*0x18 */
+ U16 Flags; /*0x1A */
+ U32 DataLength; /*0x1C */
+ U8 NVMe_Command[4]; /*0x20 */
+
+} MPI26_NVME_ENCAPSULATED_REQUEST, *PTR_MPI26_NVME_ENCAPSULATED_REQUEST,
+ Mpi26NVMeEncapsulatedRequest_t, *pMpi26NVMeEncapsulatedRequest_t;
+
+/*defines for the Flags field */
+#define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020)
+/*Submission Queue Type*/
+#define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010)
+#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
+#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010)
+/*Error Response Address Space */
+#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C)
+#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000)
+#define MPI26_NVME_FLAGS_IOCPLB_RSP_ADDR (0x0008)
+#define MPI26_NVME_FLAGS_IOCPLBNTA_RSP_ADDR (0x000C)
+/*Data Direction*/
+#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003)
+#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000)
+#define MPI26_NVME_FLAGS_WRITE (0x0001)
+#define MPI26_NVME_FLAGS_READ (0x0002)
+#define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003)
+
+
+/*NVMe Encapuslated Reply Message */
+typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 EncapsulatedCommandLength; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U16 ErrorResponseCount; /*0x14 */
+ U16 Reserved4; /*0x16 */
+} MPI26_NVME_ENCAPSULATED_ERROR_REPLY,
+ *PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY,
+ Mpi26NVMeEncapsulatedErrorReply_t,
+ *pMpi26NVMeEncapsulatedErrorReply_t;
+
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 593765a4ddb8..629296ee9236 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -7,7 +7,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.13
+ * mpi2_tool.h Version: 02.00.14
*
* Version History
* ---------------
@@ -36,6 +36,8 @@
* 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
* 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
* 11-18-14 02.00.13 Updated copyright information.
+ * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean
+ * Tool Request Message.
* --------------------------------------------------------------------------
*/
@@ -106,6 +108,16 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_SBR (0x00800000)
+#define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000)
+#define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000)
+#define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000)
+#define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000)
+#define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000)
+#define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000)
+#define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0)
+#define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010)
+#define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008)
#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 87999905bca3..8027de465d47 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -59,6 +59,7 @@
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/kthread.h>
+#include <asm/page.h> /* To get host page size per arch */
#include <linux/aer.h>
@@ -105,7 +106,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
*
*/
static int
-_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
struct MPT3SAS_ADAPTER *ioc;
@@ -556,6 +557,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
func_str = "smp_passthru";
break;
+ case MPI2_FUNCTION_NVME_ENCAPSULATED:
+ frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
+ ioc->sge_size;
+ func_str = "nvme_encapsulated";
+ break;
default:
frame_sz = 32;
func_str = "unknown";
@@ -655,7 +661,27 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
desc = "Temperature Threshold";
break;
case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
- desc = "Active cable exception";
+ desc = "Cable Event";
+ break;
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
+ desc = "PCIE Device Status Change";
+ break;
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ {
+ Mpi26EventDataPCIeEnumeration_t *event_data =
+ (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
+ pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
+ (event_data->ReasonCode ==
+ MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->EnumerationStatus)
+ pr_info("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_info("\n");
+ return;
+ }
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ desc = "PCIE Topology Change List";
break;
}
@@ -984,7 +1010,9 @@ _base_interrupt(int irq, void *bus_id)
if (request_desript_type ==
MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
request_desript_type ==
- MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
+ request_desript_type ==
+ MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
cb_idx = _base_get_cb_idx(ioc, smid);
if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
(likely(mpt_callbacks[cb_idx] != NULL))) {
@@ -1347,6 +1375,433 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
/* IEEE format sgls */
/**
+ * _base_build_nvme_prp - This function is called for NVMe end devices to build
+ * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
+ * entry of the NVMe message (PRP1). If the data buffer is small enough to be
+ * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
+ * used to describe a larger data buffer. If the data buffer is too large to
+ * describe using the two PRP entriess inside the NVMe message, then PRP1
+ * describes the first data memory segment, and PRP2 contains a pointer to a PRP
+ * list located elsewhere in memory to describe the remaining data memory
+ * segments. The PRP list will be contiguous.
+
+ * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
+ * consists of a list of PRP entries to describe a number of noncontigous
+ * physical memory segments as a single memory buffer, just as a SGL does. Note
+ * however, that this function is only used by the IOCTL call, so the memory
+ * given will be guaranteed to be contiguous. There is no need to translate
+ * non-contiguous SGL into a PRP in this case. All PRPs will describe
+ * contiguous space that is one page size each.
+ *
+ * Each NVMe message contains two PRP entries. The first (PRP1) either contains
+ * a PRP list pointer or a PRP element, depending upon the command. PRP2
+ * contains the second PRP element if the memory being described fits within 2
+ * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
+ *
+ * A PRP list pointer contains the address of a PRP list, structured as a linear
+ * array of PRP entries. Each PRP entry in this list describes a segment of
+ * physical memory.
+ *
+ * Each 64-bit PRP entry comprises an address and an offset field. The address
+ * always points at the beginning of a 4KB physical memory page, and the offset
+ * describes where within that 4KB page the memory segment begins. Only the
+ * first element in a PRP list may contain a non-zero offest, implying that all
+ * memory segments following the first begin at the start of a 4KB page.
+ *
+ * Each PRP element normally describes 4KB of physical memory, with exceptions
+ * for the first and last elements in the list. If the memory being described
+ * by the list begins at a non-zero offset within the first 4KB page, then the
+ * first PRP element will contain a non-zero offset indicating where the region
+ * begins within the 4KB page. The last memory segment may end before the end
+ * of the 4KB segment, depending upon the overall size of the memory being
+ * described by the PRP list.
+ *
+ * Since PRP entries lack any indication of size, the overall data buffer length
+ * is used to determine where the end of the data memory buffer is located, and
+ * how many PRP entries are required to describe it.
+ *
+ * @ioc: per adapter object
+ * @smid: system request message index for getting asscociated SGL
+ * @nvme_encap_request: the NVMe request msg frame pointer
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Returns nothing.
+ */
+static void
+_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ int prp_size = NVME_PRP_SIZE;
+ __le64 *prp_entry, *prp1_entry, *prp2_entry;
+ __le64 *prp_page;
+ dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
+ u32 offset, entry_len;
+ u32 page_mask_result, page_mask;
+ size_t length;
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any PRP.
+ */
+ if (!data_in_sz && !data_out_sz)
+ return;
+ /*
+ * Set pointers to PRP1 and PRP2, which are in the NVMe command.
+ * PRP1 is located at a 24 byte offset from the start of the NVMe
+ * command. Then set the current PRP entry pointer to PRP1.
+ */
+ prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
+ NVME_CMD_PRP1_OFFSET);
+ prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
+ NVME_CMD_PRP2_OFFSET);
+ prp_entry = prp1_entry;
+ /*
+ * For the PRP entries, use the specially allocated buffer of
+ * contiguous memory.
+ */
+ prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
+ prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
+
+ /*
+ * Check if we are within 1 entry of a page boundary we don't
+ * want our first entry to be a PRP List entry.
+ */
+ page_mask = ioc->page_size - 1;
+ page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
+ if (!page_mask_result) {
+ /* Bump up to next page boundary. */
+ prp_page = (__le64 *)((u8 *)prp_page + prp_size);
+ prp_page_dma = prp_page_dma + prp_size;
+ }
+
+ /*
+ * Set PRP physical pointer, which initially points to the current PRP
+ * DMA memory page.
+ */
+ prp_entry_dma = prp_page_dma;
+
+ /* Get physical address and length of the data buffer. */
+ if (data_in_sz) {
+ dma_addr = data_in_dma;
+ length = data_in_sz;
+ } else {
+ dma_addr = data_out_dma;
+ length = data_out_sz;
+ }
+
+ /* Loop while the length is not zero. */
+ while (length) {
+ /*
+ * Check if we need to put a list pointer here if we are at
+ * page boundary - prp_size (8 bytes).
+ */
+ page_mask_result = (prp_entry_dma + prp_size) & page_mask;
+ if (!page_mask_result) {
+ /*
+ * This is the last entry in a PRP List, so we need to
+ * put a PRP list pointer here. What this does is:
+ * - bump the current memory pointer to the next
+ * address, which will be the next full page.
+ * - set the PRP Entry to point to that page. This
+ * is now the PRP List pointer.
+ * - bump the PRP Entry pointer the start of the
+ * next page. Since all of this PRP memory is
+ * contiguous, no need to get a new page - it's
+ * just the next address.
+ */
+ prp_entry_dma++;
+ *prp_entry = cpu_to_le64(prp_entry_dma);
+ prp_entry++;
+ }
+
+ /* Need to handle if entry will be part of a page. */
+ offset = dma_addr & page_mask;
+ entry_len = ioc->page_size - offset;
+
+ if (prp_entry == prp1_entry) {
+ /*
+ * Must fill in the first PRP pointer (PRP1) before
+ * moving on.
+ */
+ *prp1_entry = cpu_to_le64(dma_addr);
+
+ /*
+ * Now point to the second PRP entry within the
+ * command (PRP2).
+ */
+ prp_entry = prp2_entry;
+ } else if (prp_entry == prp2_entry) {
+ /*
+ * Should the PRP2 entry be a PRP List pointer or just
+ * a regular PRP pointer? If there is more than one
+ * more page of data, must use a PRP List pointer.
+ */
+ if (length > ioc->page_size) {
+ /*
+ * PRP2 will contain a PRP List pointer because
+ * more PRP's are needed with this command. The
+ * list will start at the beginning of the
+ * contiguous buffer.
+ */
+ *prp2_entry = cpu_to_le64(prp_entry_dma);
+
+ /*
+ * The next PRP Entry will be the start of the
+ * first PRP List.
+ */
+ prp_entry = prp_page;
+ } else {
+ /*
+ * After this, the PRP Entries are complete.
+ * This command uses 2 PRP's and no PRP list.
+ */
+ *prp2_entry = cpu_to_le64(dma_addr);
+ }
+ } else {
+ /*
+ * Put entry in list and bump the addresses.
+ *
+ * After PRP1 and PRP2 are filled in, this will fill in
+ * all remaining PRP entries in a PRP List, one per
+ * each time through the loop.
+ */
+ *prp_entry = cpu_to_le64(dma_addr);
+ prp_entry++;
+ prp_entry_dma++;
+ }
+
+ /*
+ * Bump the phys address of the command's data buffer by the
+ * entry_len.
+ */
+ dma_addr += entry_len;
+
+ /* Decrement length accounting for last partial page. */
+ if (entry_len > length)
+ length = 0;
+ else
+ length -= entry_len;
+ }
+}
+
+/**
+ * base_make_prp_nvme -
+ * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ *
+ * @ioc: per adapter object
+ * @scmd: SCSI command from the mid-layer
+ * @mpi_request: mpi request
+ * @smid: msg Index
+ * @sge_count: scatter gather element count.
+ *
+ * Returns: true: PRPs are built
+ * false: IEEE SGLs needs to be built
+ */
+static void
+base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd,
+ Mpi25SCSIIORequest_t *mpi_request,
+ u16 smid, int sge_count)
+{
+ int sge_len, num_prp_in_chain = 0;
+ Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
+ __le64 *curr_buff;
+ dma_addr_t msg_dma, sge_addr, offset;
+ u32 page_mask, page_mask_result;
+ struct scatterlist *sg_scmd;
+ u32 first_prp_len;
+ int data_len = scsi_bufflen(scmd);
+ u32 nvme_pg_size;
+
+ nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
+ /*
+ * Nvme has a very convoluted prp format. One prp is required
+ * for each page or partial page. Driver need to split up OS sg_list
+ * entries if it is longer than one page or cross a page
+ * boundary. Driver also have to insert a PRP list pointer entry as
+ * the last entry in each physical page of the PRP list.
+ *
+ * NOTE: The first PRP "entry" is actually placed in the first
+ * SGL entry in the main message as IEEE 64 format. The 2nd
+ * entry in the main message is the chain element, and the rest
+ * of the PRP entries are built in the contiguous pcie buffer.
+ */
+ page_mask = nvme_pg_size - 1;
+
+ /*
+ * Native SGL is needed.
+ * Put a chain element in main message frame that points to the first
+ * chain buffer.
+ *
+ * NOTE: The ChainOffset field must be 0 when using a chain pointer to
+ * a native SGL.
+ */
+
+ /* Set main message chain element pointer */
+ main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
+ /*
+ * For NVMe the chain element needs to be the 2nd SG entry in the main
+ * message.
+ */
+ main_chain_element = (Mpi25IeeeSgeChain64_t *)
+ ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
+
+ /*
+ * For the PRP entries, use the specially allocated buffer of
+ * contiguous memory. Normal chain buffers can't be used
+ * because each chain buffer would need to be the size of an OS
+ * page (4k).
+ */
+ curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
+ msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
+
+ main_chain_element->Address = cpu_to_le64(msg_dma);
+ main_chain_element->NextChainOffset = 0;
+ main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
+
+ /* Build first prp, sge need not to be page aligned*/
+ ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
+ sg_scmd = scsi_sglist(scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+
+ offset = sge_addr & page_mask;
+ first_prp_len = nvme_pg_size - offset;
+
+ ptr_first_sgl->Address = cpu_to_le64(sge_addr);
+ ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
+
+ data_len -= first_prp_len;
+
+ if (sge_len > first_prp_len) {
+ sge_addr += first_prp_len;
+ sge_len -= first_prp_len;
+ } else if (data_len && (sge_len == first_prp_len)) {
+ sg_scmd = sg_next(sg_scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+ }
+
+ for (;;) {
+ offset = sge_addr & page_mask;
+
+ /* Put PRP pointer due to page boundary*/
+ page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
+ if (unlikely(!page_mask_result)) {
+ scmd_printk(KERN_NOTICE,
+ scmd, "page boundary curr_buff: 0x%p\n",
+ curr_buff);
+ msg_dma += 8;
+ *curr_buff = cpu_to_le64(msg_dma);
+ curr_buff++;
+ num_prp_in_chain++;
+ }
+
+ *curr_buff = cpu_to_le64(sge_addr);
+ curr_buff++;
+ msg_dma += 8;
+ num_prp_in_chain++;
+
+ sge_addr += nvme_pg_size;
+ sge_len -= nvme_pg_size;
+ data_len -= nvme_pg_size;
+
+ if (data_len <= 0)
+ break;
+
+ if (sge_len > 0)
+ continue;
+
+ sg_scmd = sg_next(sg_scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+ }
+
+ main_chain_element->Length =
+ cpu_to_le32(num_prp_in_chain * sizeof(u64));
+ return;
+}
+
+static bool
+base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
+{
+ u32 data_length = 0;
+ struct scatterlist *sg_scmd;
+ bool build_prp = true;
+
+ data_length = scsi_bufflen(scmd);
+ sg_scmd = scsi_sglist(scmd);
+
+ /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
+ * we built IEEE SGL
+ */
+ if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
+ build_prp = false;
+
+ return build_prp;
+}
+
+/**
+ * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
+ * determine if the driver needs to build a native SGL. If so, that native
+ * SGL is built in the special contiguous buffers allocated especially for
+ * PCIe SGL creation. If the driver will not build a native SGL, return
+ * TRUE and a normal IEEE SGL will be built. Currently this routine
+ * supports NVMe.
+ * @ioc: per adapter object
+ * @mpi_request: mf request pointer
+ * @smid: system request message index
+ * @scmd: scsi command
+ * @pcie_device: points to the PCIe device's info
+ *
+ * Returns 0 if native SGL was built, 1 if no SGL was built
+ */
+static int
+_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
+ Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
+ struct _pcie_device *pcie_device)
+{
+ struct scatterlist *sg_scmd;
+ int sges_left;
+
+ /* Get the SG list pointer and info. */
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ if (sges_left < 0) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return 1;
+ }
+
+ /* Check if we need to build a native SG list. */
+ if (base_is_prp_possible(ioc, pcie_device,
+ scmd, sges_left) == 0) {
+ /* We built a native SG list, just return. */
+ goto out;
+ }
+
+ /*
+ * Build native NVMe PRP.
+ */
+ base_make_prp_nvme(ioc, scmd, mpi_request,
+ smid, sges_left);
+
+ return 0;
+out:
+ scsi_dma_unmap(scmd);
+ return 1;
+}
+
+/**
* _base_add_sg_single_ieee - add sg element for IEEE format
* @paddr: virtual address for SGE
* @flags: SGE flags
@@ -1391,9 +1846,11 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
/**
* _base_build_sg_scmd - main sg creation routine
+ * pcie_device is unused here!
* @ioc: per adapter object
* @scmd: scsi command
* @smid: system request message index
+ * @unused: unused pcie_device pointer
* Context: none.
*
* The main routine that builds scatter gather table from a given
@@ -1403,7 +1860,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
*/
static int
_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid)
+ struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
{
Mpi2SCSIIORequest_t *mpi_request;
dma_addr_t chain_dma;
@@ -1537,6 +1994,8 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @scmd: scsi command
* @smid: system request message index
+ * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
+ * constructed on need.
* Context: none.
*
* The main routine that builds scatter gather table from a given
@@ -1546,9 +2005,9 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
*/
static int
_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid)
+ struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
{
- Mpi2SCSIIORequest_t *mpi_request;
+ Mpi25SCSIIORequest_t *mpi_request;
dma_addr_t chain_dma;
struct scatterlist *sg_scmd;
void *sg_local, *chain;
@@ -1571,6 +2030,13 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ /* Check if we need to build a native SG list. */
+ if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
+ smid, scmd, pcie_device) == 0)) {
+ /* We built a native SG list, just return. */
+ return 0;
+ }
+
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
@@ -1582,12 +2048,12 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sg_local = &mpi_request->SGL;
sges_in_segment = (ioc->request_sz -
- offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
+ offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
if (sges_left <= sges_in_segment)
goto fill_in_last_segment;
mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
- (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
+ (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
/* fill in main message segment when there is a chain following */
while (sges_in_segment > 1) {
@@ -1990,7 +2456,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
- local_max_msix_vectors = 8;
+ local_max_msix_vectors = (reset_devices) ? 1 : 8;
else
local_max_msix_vectors = max_msix_vectors;
@@ -2267,6 +2733,32 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+ * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns virt pointer to a PCIe SGL.
+ */
+void *
+mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
+}
+
+/**
+ * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns phys pointer to the address of the PCIe buffer.
+ */
+dma_addr_t
+mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
+}
+
+/**
* mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
* @ioc: per adapter object
* @phys_addr: lower 32 physical addr of the reply
@@ -2544,6 +3036,30 @@ _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
+ * _base_put_smid_nvme_encap - send NVMe encapsulated request to
+ * firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.Default.RequestFlags =
+ MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.SMID = cpu_to_le16(smid);
+ descriptor.Default.LMID = 0;
+ descriptor.Default.DescriptorTypeDependent = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
* _base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
@@ -2634,6 +3150,27 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
+ * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
* _base_put_smid_default - Default, primarily used for config pages
* use Atomic Request Descriptor
* @ioc: per adapter object
@@ -2945,6 +3482,11 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
_base_display_OEMs_branding(ioc);
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
+ pr_info("%sNVMe", i ? "," : "");
+ i++;
+ }
+
pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -3245,6 +3787,17 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post);
}
+ if (ioc->pcie_sgl_dma_pool) {
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
+ pci_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
+ ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ }
+ if (ioc->pcie_sgl_dma_pool)
+ pci_pool_destroy(ioc->pcie_sgl_dma_pool);
+ }
+
if (ioc->config_page) {
dexitprintk(ioc, pr_info(MPT3SAS_FMT
"config_page(0x%p): free\n", ioc->name,
@@ -3286,7 +3839,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz;
u32 retry_sz;
- u16 max_request_credit;
+ u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
int i;
@@ -3308,6 +3861,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sg_tablesize = MPT3SAS_SG_DEPTH;
}
+ /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
+ if (reset_devices)
+ sg_tablesize = min_t(unsigned short, sg_tablesize,
+ MPT_KDUMP_MIN_PHYS_SEGMENTS);
+
if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
@@ -3340,7 +3898,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->internal_depth, facts->RequestCredit);
if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
max_request_credit = MAX_HBA_QUEUE_DEPTH;
- } else
+ } else if (reset_devices)
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
+ else
max_request_credit = min_t(u16, facts->RequestCredit,
MAX_HBA_QUEUE_DEPTH);
@@ -3622,7 +4183,52 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
"internal(0x%p): depth(%d), start smid(%d)\n",
ioc->name, ioc->internal,
ioc->internal_depth, ioc->internal_smid));
+ /*
+ * The number of NVMe page sized blocks needed is:
+ * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
+ * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
+ * that is placed in the main message frame. 8 is the size of each PRP
+ * entry or PRP list pointer entry. 8 is subtracted from page_size
+ * because of the PRP list pointer entry at the end of a page, so this
+ * is not counted as a PRP entry. The 1 added page is a round up.
+ *
+ * To avoid allocation failures due to the amount of memory that could
+ * be required for NVMe PRP's, only each set of NVMe blocks will be
+ * contiguous, so a new set is allocated for each possible I/O.
+ */
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
+ nvme_blocks_needed =
+ (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
+ nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
+ nvme_blocks_needed++;
+
+ sz = nvme_blocks_needed * ioc->page_size;
+ ioc->pcie_sgl_dma_pool =
+ pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
+ if (!ioc->pcie_sgl_dma_pool) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
+ pci_pool_alloc(ioc->pcie_sgl_dma_pool,
+ GFP_KERNEL,
+ &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
+ "element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
+ total_sz += sz * ioc->scsiio_depth;
+ }
/* sense buffers, 4 byte align */
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
@@ -4446,7 +5052,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
ioc->ir_firmware = 1;
if ((facts->IOCCapabilities &
- MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+ MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
ioc->rdpq_array_capable = 1;
if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
ioc->atomic_desc_capable = 1;
@@ -4467,6 +5073,19 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
le16_to_cpu(mpi_reply.HighPriorityCredit);
facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
+ facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
+
+ /*
+ * Get the Page Size from IOC Facts. If it's 0, default to 4k.
+ */
+ ioc->page_size = 1 << facts->CurrentHostPageSize;
+ if (ioc->page_size == 1) {
+ pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
+ "default host page size to 4k\n", ioc->name);
+ ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
+ ioc->name, facts->CurrentHostPageSize));
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"hba queue depth(%d), max chains per io(%d)\n",
@@ -4506,6 +5125,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
mpi_request.VP_ID = 0;
mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+ mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
if (_base_is_controller_msix_enabled(ioc))
mpi_request.HostMSIxVectors = ioc->reply_queue_count;
@@ -5374,6 +5994,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
*/
ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
ioc->build_sg = &_base_build_sg_ieee;
+ ioc->build_nvme_prp = &_base_build_nvme_prp;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
@@ -5385,11 +6006,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
+ ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic;
} else {
ioc->put_smid_default = &_base_put_smid_default;
ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
ioc->put_smid_fast_path = &_base_put_smid_fast_path;
ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+ ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap;
}
@@ -5517,9 +6140,16 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
- if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
- _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
-
+ _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
+ if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
+ if (ioc->is_gen35_ioc) {
+ _base_unmask_events(ioc,
+ MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
+ _base_unmask_events(ioc,
+ MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ }
+ }
r = _base_make_ioc_operational(ioc);
if (r)
goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index a77bb7dc12b1..60f42ca3954f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -54,6 +54,7 @@
#include "mpi/mpi2_raid.h"
#include "mpi/mpi2_tool.h"
#include "mpi/mpi2_sas.h"
+#include "mpi/mpi2_pci.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -73,8 +74,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "15.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 15
+#define MPT3SAS_DRIVER_VERSION "17.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 17
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -92,6 +93,7 @@
*/
#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
#define MPT_MIN_PHYS_SEGMENTS 16
+#define MPT_KDUMP_MIN_PHYS_SEGMENTS 32
#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
@@ -111,9 +113,11 @@
#define MPT3SAS_SATA_QUEUE_DEPTH 32
#define MPT3SAS_SAS_QUEUE_DEPTH 254
#define MPT3SAS_RAID_QUEUE_DEPTH 128
+#define MPT3SAS_KDUMP_SCSI_IO_DEPTH 200
#define MPT3SAS_RAID_MAX_SECTORS 8192
-
+#define MPT3SAS_HOST_PAGE_SIZE_4K 12
+#define MPT3SAS_NVME_QUEUE_DEPTH 128
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
@@ -131,6 +135,15 @@
#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
/*
+ * NVMe defines
+ */
+#define NVME_PRP_SIZE 8 /* PRP size */
+#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
+#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
+#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
+#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
+
+/*
* reset phases
*/
#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
@@ -159,6 +172,7 @@
#define MPT_TARGET_FLAGS_VOLUME 0x02
#define MPT_TARGET_FLAGS_DELETED 0x04
#define MPT_TARGET_FASTPATH_IO 0x08
+#define MPT_TARGET_FLAGS_PCIE_DEVICE 0x10
#define SAS2_PCI_DEVICE_B0_REVISION (0x01)
#define SAS3_PCI_DEVICE_C0_REVISION (0x02)
@@ -357,7 +371,8 @@ struct Mpi2ManufacturingPage11_t {
* @flags: MPT_TARGET_FLAGS_XXX flags
* @deleted: target flaged for deletion
* @tm_busy: target is busy with TM request.
- * @sdev: The sas_device associated with this target
+ * @sas_dev: The sas_device associated with this target
+ * @pcie_dev: The pcie device associated with this target
*/
struct MPT3SAS_TARGET {
struct scsi_target *starget;
@@ -368,7 +383,8 @@ struct MPT3SAS_TARGET {
u32 flags;
u8 deleted;
u8 tm_busy;
- struct _sas_device *sdev;
+ struct _sas_device *sas_dev;
+ struct _pcie_device *pcie_dev;
};
@@ -467,6 +483,8 @@ struct _internal_cmd {
* @pfa_led_on: flag for PFA LED status
* @pend_sas_rphy_add: flag to check if device is in sas_rphy_add()
* addition routine.
+ * @chassis_slot: chassis slot
+ * @is_chassis_slot_valid: chassis slot valid or not
*/
struct _sas_device {
struct list_head list;
@@ -489,6 +507,8 @@ struct _sas_device {
u8 pfa_led_on;
u8 pend_sas_rphy_add;
u8 enclosure_level;
+ u8 chassis_slot;
+ u8 is_chassis_slot_valid;
u8 connector_name[5];
struct kref refcount;
};
@@ -508,6 +528,89 @@ static inline void sas_device_put(struct _sas_device *s)
kref_put(&s->refcount, sas_device_free);
}
+/*
+ * struct _pcie_device - attached PCIe device information
+ * @list: pcie device list
+ * @starget: starget object
+ * @wwid: device WWID
+ * @handle: device handle
+ * @device_info: bitfield provides detailed info about the device
+ * @id: target id
+ * @channel: target channel
+ * @slot: slot number
+ * @port_num: port number
+ * @responding: used in _scsih_pcie_device_mark_responding
+ * @fast_path: fast path feature enable bit
+ * @nvme_mdts: MaximumDataTransferSize from PCIe Device Page 2 for
+ * NVMe device only
+ * @enclosure_handle: enclosure handle
+ * @enclosure_logical_id: enclosure logical identifier
+ * @enclosure_level: The level of device's enclosure from the controller
+ * @connector_name: ASCII value of the Connector's name
+ * @serial_number: pointer of serial number string allocated runtime
+ * @refcount: reference count for deletion
+ */
+struct _pcie_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ u64 wwid;
+ u16 handle;
+ u32 device_info;
+ int id;
+ int channel;
+ u16 slot;
+ u8 port_num;
+ u8 responding;
+ u8 fast_path;
+ u32 nvme_mdts;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 enclosure_level;
+ u8 connector_name[4];
+ u8 *serial_number;
+ struct kref refcount;
+};
+/**
+ * pcie_device_get - Increment the pcie device reference count
+ *
+ * @p: pcie_device object
+ *
+ * When ever this function called it will increment the
+ * reference count of the pcie device for which this function called.
+ *
+ */
+static inline void pcie_device_get(struct _pcie_device *p)
+{
+ kref_get(&p->refcount);
+}
+
+/**
+ * pcie_device_free - Release the pcie device object
+ * @r - kref object
+ *
+ * Free's the pcie device object. It will be called when reference count
+ * reaches to zero.
+ */
+static inline void pcie_device_free(struct kref *r)
+{
+ kfree(container_of(r, struct _pcie_device, refcount));
+}
+
+/**
+ * pcie_device_put - Decrement the pcie device reference count
+ *
+ * @p: pcie_device object
+ *
+ * When ever this function called it will decrement the
+ * reference count of the pcie device for which this function called.
+ *
+ * When refernce count reaches to Zero, this will call pcie_device_free to the
+ * pcie_device object.
+ */
+static inline void pcie_device_put(struct _pcie_device *p)
+{
+ kref_put(&p->refcount, pcie_device_free);
+}
/**
* struct _raid_device - raid volume link list
* @list: sas device list
@@ -556,12 +659,13 @@ struct _raid_device {
/**
* struct _boot_device - boot device info
- * @is_raid: flag to indicate whether this is volume
- * @device: holds pointer for either struct _sas_device or
- * struct _raid_device
+ *
+ * @channel: sas, raid, or pcie channel
+ * @device: holds pointer for struct _sas_device, struct _raid_device or
+ * struct _pcie_device
*/
struct _boot_device {
- u8 is_raid;
+ int channel;
void *device;
};
@@ -644,6 +748,16 @@ enum reset_type {
};
/**
+ * struct pcie_sg_list - PCIe SGL buffer (contiguous per I/O)
+ * @pcie_sgl: PCIe native SGL for NVMe devices
+ * @pcie_sgl_dma: physical address
+ */
+struct pcie_sg_list {
+ void *pcie_sgl;
+ dma_addr_t pcie_sgl_dma;
+};
+
+/**
* struct chain_tracker - firmware chain tracker
* @chain_buffer: chain buffer
* @chain_buffer_dma: physical address
@@ -669,6 +783,7 @@ struct scsiio_tracker {
struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
+ struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
struct list_head tracker_list;
u16 msix_io;
@@ -742,13 +857,19 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
/* SAS3.0 support */
typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid);
+ struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device);
typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
dma_addr_t data_out_dma, size_t data_out_sz,
dma_addr_t data_in_dma, size_t data_in_sz);
typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
void *paddr);
+/* SAS3.5 support */
+typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz);
+
/* To support atomic and non atomic descriptors*/
typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 funcdep);
@@ -791,6 +912,7 @@ struct mpt3sas_facts {
u16 MaxDevHandle;
u16 MaxPersistentEntries;
u16 MinDevHandle;
+ u8 CurrentHostPageSize;
};
struct mpt3sas_port_facts {
@@ -825,6 +947,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
* @dma_mask: used to set the consistent dma mask
+ * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
+ * pci resource handling
* @fault_reset_work_q_name: fw fault work queue
* @fault_reset_work_q: ""
* @fault_reset_work: ""
@@ -888,9 +1012,13 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @sas_device_list: sas device object list
* @sas_device_init_list: sas device object list (used only at init time)
* @sas_device_lock:
+ * @pcie_device_list: pcie device object list
+ * @pcie_device_init_list: pcie device object list (used only at init time)
+ * @pcie_device_lock:
* @io_missing_delay: time for IO completed by fw when PDR enabled
* @device_missing_delay: time for device missing by fw when PDR enabled
* @sas_id : used for setting volume target IDs
+ * @pcie_target_id: used for setting pcie target IDs
* @blocking_handles: bitmask used to identify which devices need blocking
* @pd_handles : bitmask for PD handles
* @pd_handles_sz : size of pd_handle bitmask
@@ -1056,6 +1184,9 @@ struct MPT3SAS_ADAPTER {
MPT_BUILD_SG build_sg_mpi;
MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
+ /* function ptr for NVMe PRP elements only */
+ NVME_BUILD_PRP build_nvme_prp;
+
/* event log */
u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
u32 event_context;
@@ -1086,11 +1217,16 @@ struct MPT3SAS_ADAPTER {
struct list_head sas_device_list;
struct list_head sas_device_init_list;
spinlock_t sas_device_lock;
+ struct list_head pcie_device_list;
+ struct list_head pcie_device_init_list;
+ spinlock_t pcie_device_lock;
+
struct list_head raid_device_list;
spinlock_t raid_device_lock;
u8 io_missing_delay;
u16 device_missing_delay;
int sas_id;
+ int pcie_target_id;
void *blocking_handles;
void *pd_handles;
@@ -1119,6 +1255,11 @@ struct MPT3SAS_ADAPTER {
int pending_io_count;
wait_queue_head_t reset_wq;
+ /* PCIe SGL */
+ struct dma_pool *pcie_sgl_dma_pool;
+ /* Host Page Size */
+ u32 page_size;
+
/* chain */
struct chain_tracker *chain_lookup;
struct list_head free_chain_list;
@@ -1216,6 +1357,7 @@ struct MPT3SAS_ADAPTER {
PUT_SMID_IO_FP_HIP put_smid_fast_path;
PUT_SMID_IO_FP_HIP put_smid_hi_priority;
PUT_SMID_DEFAULT put_smid_default;
+ PUT_SMID_DEFAULT put_smid_nvme_encap;
};
@@ -1252,7 +1394,8 @@ void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
-
+void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
@@ -1321,6 +1464,10 @@ struct _sas_device *mpt3sas_get_sdev_by_addr(
struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
struct _sas_device *__mpt3sas_get_sdev_by_addr(
struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle);
+struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle);
void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
struct _raid_device *
@@ -1359,6 +1506,12 @@ int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
u32 form, u32 handle);
+int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
+ u32 form, u32 handle);
int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
u16 sz);
@@ -1466,7 +1619,7 @@ void
mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
u16 smid);
/* NCQ Prio Handling Check */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index dd6270125614..1c747cf419d5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -150,6 +150,24 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
desc = "driver_mapping";
break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_PORT:
+ desc = "sas_port";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING:
+ desc = "ext_manufacturing";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT:
+ desc = "pcie_io_unit";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH:
+ desc = "pcie_switch";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE:
+ desc = "pcie_device";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK:
+ desc = "pcie_link";
+ break;
}
break;
}
@@ -1053,6 +1071,88 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE;
+ mpi_request.Header.PageVersion = MPI26_PCIEDEVICE0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE;
+ mpi_request.Header.PageVersion = MPI26_PCIEDEVICE2_PAGEVERSION;
+ mpi_request.Header.PageNumber = 2;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_number_hba_phys - obtain number of phys on the host
* @ioc: per adapter object
* @num_phys: pointer returned with the number of phys
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index bdffb692bded..b4c374b08e5e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -79,32 +79,6 @@ enum block_state {
};
/**
- * _ctl_sas_device_find_by_handle - sas device search
- * @ioc: per adapter object
- * @handle: sas device handle (assigned by firmware)
- * Context: Calling function should acquire ioc->sas_device_lock
- *
- * This searches for sas_device based on sas_address, then return sas_device
- * object.
- */
-static struct _sas_device *
-_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
-{
- struct _sas_device *sas_device, *r;
-
- r = NULL;
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
-
- out:
- return r;
-}
-
-/**
* _ctl_display_some_debug - debug routine
* @ioc: per adapter object
* @smid: system request message index
@@ -229,10 +203,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
Mpi2SCSIIOReply_t *scsi_reply =
(Mpi2SCSIIOReply_t *)mpi_reply;
struct _sas_device *sas_device = NULL;
- unsigned long flags;
+ struct _pcie_device *pcie_device = NULL;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _ctl_sas_device_find_by_handle(ioc,
+ sas_device = mpt3sas_get_sdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (sas_device) {
pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
@@ -242,8 +215,25 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
"\tenclosure_logical_id(0x%016llx), slot(%d)\n",
ioc->name, (unsigned long long)
sas_device->enclosure_logical_id, sas_device->slot);
+ sas_device_put(sas_device);
+ }
+ if (!sas_device) {
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc,
+ le16_to_cpu(scsi_reply->DevHandle));
+ if (pcie_device) {
+ pr_warn(MPT3SAS_FMT
+ "\tWWID(0x%016llx), port(%d)\n", ioc->name,
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ pcie_device_put(pcie_device);
+ }
}
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
pr_info(MPT3SAS_FMT
"\tscsi_state(0x%02x), scsi_status"
@@ -272,6 +262,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
{
MPI2DefaultReply_t *mpi_reply;
Mpi2SCSIIOReply_t *scsiio_reply;
+ Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
const void *sense_data;
u32 sz;
@@ -298,7 +289,20 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
memcpy(ioc->ctl_cmds.sense, sense_data, sz);
}
}
+ /*
+ * Get Error Response data for NVMe device. The ctl_cmds.sense
+ * buffer is used to store the Error Response data.
+ */
+ if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
+ nvme_error_reply =
+ (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
+ sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
+ le32_to_cpu(nvme_error_reply->ErrorResponseCount));
+ sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
+ memcpy(ioc->ctl_cmds.sense, sense_data, sz);
+ }
}
+
_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
complete(&ioc->ctl_cmds.done);
@@ -640,11 +644,12 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
{
MPI2RequestHeader_t *mpi_request = NULL, *request;
MPI2DefaultReply_t *mpi_reply;
+ Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
u32 ioc_state;
u16 smid;
unsigned long timeout;
u8 issue_reset;
- u32 sz;
+ u32 sz, sz_arg;
void *psge;
void *data_out = NULL;
dma_addr_t data_out_dma = 0;
@@ -741,7 +746,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
- mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
if (!device_handle || (device_handle >
@@ -792,6 +798,38 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
+ case MPI2_FUNCTION_NVME_ENCAPSULATED:
+ {
+ nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ /*
+ * Get the Physical Address of the sense buffer.
+ * Use Error Response buffer address field to hold the sense
+ * buffer address.
+ * Clear the internal sense buffer, which will potentially hold
+ * the Completion Queue Entry on return, or 0 if no Entry.
+ * Build the PRPs and set direction bits.
+ * Send the request.
+ */
+ nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma &
+ 0xFFFFFFFF00000000;
+ nvme_encap_request->ErrorResponseBaseAddress |=
+ (U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ nvme_encap_request->ErrorResponseAllocationLength =
+ NVME_ERROR_RESPONSE_SIZE;
+ memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
+ ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
+ data_out_dma, data_out_sz, data_in_dma, data_in_sz);
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
+ "ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ ioc->put_smid_nvme_encap(ioc, smid);
+ break;
+ }
case MPI2_FUNCTION_SCSI_IO_REQUEST:
case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
{
@@ -1007,15 +1045,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
}
- /* copy out sense to user */
+ /* copy out sense/NVMe Error Response to user */
if (karg.max_sense_bytes && (mpi_request->Function ==
MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
- MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
- sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
+ MPI2_FUNCTION_NVME_ENCAPSULATED)) {
+ if (karg.sense_data_ptr == NULL) {
+ pr_info(MPT3SAS_FMT "Response buffer provided"
+ " by application is NULL; Response data will"
+ " not be returned.\n", ioc->name);
+ goto out;
+ }
+ sz_arg = (mpi_request->Function ==
+ MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
+ SCSI_SENSE_BUFFERSIZE;
+ sz = min_t(u32, karg.max_sense_bytes, sz_arg);
if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
sz)) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
+ __LINE__, __func__);
ret = -ENODATA;
goto out;
}
@@ -1065,12 +1113,6 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt3_ioctl_iocinfo karg;
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
-
dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
__func__));
@@ -1295,6 +1337,42 @@ _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _ctl_btdh_search_pcie_device - searching for pcie device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->pcie_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == pcie_device->handle) {
+ btdh->bus = pcie_device->channel;
+ btdh->id = pcie_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == pcie_device->channel && btdh->id ==
+ pcie_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = pcie_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ return rc;
+}
+
+/**
* _ctl_btdh_search_raid_device - searching for raid device
* @ioc: per adapter object
* @btdh: btdh ioctl payload
@@ -1352,6 +1430,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
rc = _ctl_btdh_search_sas_device(ioc, &karg);
if (!rc)
+ rc = _ctl_btdh_search_pcie_device(ioc, &karg);
+ if (!rc)
_ctl_btdh_search_raid_device(ioc, &karg);
if (copy_to_user(arg, &karg, sizeof(karg))) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 22998cbd538f..b258f210120a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -60,6 +60,9 @@
#include "mpt3sas_base.h"
#define RAID_CHANNEL 1
+
+#define PCIE_CHANNEL 2
+
/* forward proto's */
static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_expander);
@@ -69,7 +72,11 @@ static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
struct _sas_device *sas_device);
static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u8 retry_count, u8 is_pd);
-
+static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device);
+static void
+_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
/* global parameters */
@@ -281,7 +288,7 @@ struct _scsi_io_transfer {
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
static int
-_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
struct MPT3SAS_ADAPTER *ioc;
@@ -406,11 +413,6 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*sas_address = 0;
- if (handle <= ioc->sas_hba.num_phys) {
- *sas_address = ioc->sas_hba.sas_address;
- return 0;
- }
-
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
@@ -420,7 +422,15 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
- *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ /* For HBA, vSES doesn't return HBA SAS address. Instead return
+ * vSES's sas address.
+ */
+ if ((handle <= ioc->sas_hba.num_phys) &&
+ (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP)))
+ *sas_address = ioc->sas_hba.sas_address;
+ else
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
return 0;
}
@@ -439,21 +449,22 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
/**
* _scsih_determine_boot_device - determine boot device.
* @ioc: per adapter object
- * @device: either sas_device or raid_device object
- * @is_raid: [flag] 1 = raid object, 0 = sas object
+ * @device: sas_device or pcie_device object
+ * @channel: SAS or PCIe channel
*
* Determines whether this device should be first reported device to
* to scsi-ml or sas transport, this purpose is for persistent boot device.
* There are primary, alternate, and current entries in bios page 2. The order
* priority is primary, alternate, then current. This routine saves
- * the corresponding device object and is_raid flag in the ioc object.
+ * the corresponding device object.
* The saved data to be used later in _scsih_probe_boot_devices().
*/
static void
-_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
- void *device, u8 is_raid)
+_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
+ u32 channel)
{
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _raid_device *raid_device;
u64 sas_address;
u64 device_name;
@@ -468,18 +479,24 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->bios_pg3.BiosVersion)
return;
- if (!is_raid) {
- sas_device = device;
- sas_address = sas_device->sas_address;
- device_name = sas_device->device_name;
- enclosure_logical_id = sas_device->enclosure_logical_id;
- slot = sas_device->slot;
- } else {
+ if (channel == RAID_CHANNEL) {
raid_device = device;
sas_address = raid_device->wwid;
device_name = 0;
enclosure_logical_id = 0;
slot = 0;
+ } else if (channel == PCIE_CHANNEL) {
+ pcie_device = device;
+ sas_address = pcie_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ } else {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
}
if (!ioc->req_boot_device.device) {
@@ -493,7 +510,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__,
(unsigned long long)sas_address));
ioc->req_boot_device.device = device;
- ioc->req_boot_device.is_raid = is_raid;
+ ioc->req_boot_device.channel = channel;
}
}
@@ -508,7 +525,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__,
(unsigned long long)sas_address));
ioc->req_alt_boot_device.device = device;
- ioc->req_alt_boot_device.is_raid = is_raid;
+ ioc->req_alt_boot_device.channel = channel;
}
}
@@ -523,7 +540,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__,
(unsigned long long)sas_address));
ioc->current_boot_device.device = device;
- ioc->current_boot_device.is_raid = is_raid;
+ ioc->current_boot_device.channel = channel;
}
}
}
@@ -536,7 +553,7 @@ __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
assert_spin_locked(&ioc->sas_device_lock);
- ret = tgt_priv->sdev;
+ ret = tgt_priv->sas_dev;
if (ret)
sas_device_get(ret);
@@ -557,6 +574,44 @@ mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
return ret;
}
+static struct _pcie_device *
+__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _pcie_device *ret;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ ret = tgt_priv->pcie_dev;
+ if (ret)
+ pcie_device_get(ret);
+
+ return ret;
+}
+
+/**
+ * mpt3sas_get_pdev_from_target - pcie device search
+ * @ioc: per adapter object
+ * @tgt_priv: starget private object
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device from target, then return pcie_device object.
+ */
+static struct _pcie_device *
+mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _pcie_device *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return ret;
+}
struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
@@ -636,7 +691,7 @@ found_device:
* This searches for sas_device based on sas_address, then return sas_device
* object.
*/
-static struct _sas_device *
+struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_device *sas_device;
@@ -650,6 +705,69 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * _scsih_display_enclosure_chassis_info - display device location info
+ * @ioc: per adapter object
+ * @sas_device: per sas device object
+ * @sdev: scsi device struct
+ * @starget: scsi target struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device, struct scsi_device *sdev,
+ struct scsi_target *starget)
+{
+ if (sdev) {
+ if (sas_device->enclosure_handle != 0)
+ sdev_printk(KERN_INFO, sdev,
+ "enclosure logical id (0x%016llx), slot(%d) \n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ sdev_printk(KERN_INFO, sdev,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
+ } else if (starget) {
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d) \n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ starget_printk(KERN_INFO, starget,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ starget_printk(KERN_INFO, starget,
+ "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
+ } else {
+ if (sas_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "enclosure logical id(0x%016llx), slot(%d) \n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
+ ioc->name, sas_device->chassis_slot);
+ }
+}
+
+/**
* _scsih_sas_device_remove - remove sas_device from list.
* @ioc: per adapter object
* @sas_device: the sas_device object
@@ -670,17 +788,7 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
ioc->name, sas_device->handle,
(unsigned long long) sas_device->sas_address);
- if (sas_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
-
- if (sas_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name);
+ _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
/*
* The lock serializes access to the list, but we still need to verify
@@ -772,17 +880,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__, sas_device->handle,
(unsigned long long)sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot));
-
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- sas_device->enclosure_level, sas_device->connector_name));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device_get(sas_device);
@@ -832,17 +931,8 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
__func__, sas_device->handle,
(unsigned long long)sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot));
-
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, sas_device->enclosure_level,
- sas_device->connector_name));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device_get(sas_device);
@@ -851,6 +941,282 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->wwid == wwid)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->wwid == wwid)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+
+/**
+ * mpt3sas_get_pdev_by_wwid - pcie device search
+ * @ioc: per adapter object
+ * @wwid: wwid
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device based on wwid, then return pcie_device object.
+ */
+static struct _pcie_device *
+mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->id == id && pcie_device->channel == channel)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->id == id && pcie_device->channel == channel)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->handle == handle)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->handle == handle)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+
+/**
+ * mpt3sas_get_pdev_by_handle - pcie device search
+ * @ioc: per adapter object
+ * @handle: Firmware device handle
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device based on handle, then return pcie_device
+ * object.
+ */
+struct _pcie_device *
+mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+/**
+ * _scsih_pcie_device_remove - remove pcie_device from list.
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ * Context: This function will acquire ioc->pcie_device_lock.
+ *
+ * If pcie_device is on the list, remove it and decrement its reference count.
+ */
+static void
+_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+ int was_on_pcie_device_list = 0;
+
+ if (!pcie_device)
+ return;
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, pcie_device->handle,
+ (unsigned long long) pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "removing enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "removing enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, pcie_device->enclosure_level,
+ pcie_device->connector_name);
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ was_on_pcie_device_list = 1;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (was_on_pcie_device_list) {
+ kfree(pcie_device->serial_number);
+ pcie_device_put(pcie_device);
+ }
+}
+
+
+/**
+ * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ int was_on_pcie_device_list = 0;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device) {
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ was_on_pcie_device_list = 1;
+ pcie_device_put(pcie_device);
+ }
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (was_on_pcie_device_list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ }
+}
+
+/**
+ * _scsih_pcie_device_add - add pcie_device object
+ * @ioc: per adapter object
+ * @pcie_device: pcie_device object
+ *
+ * This is added to the pcie_device_list link list.
+ */
+static void
+_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ } else if (!pcie_device->starget) {
+ if (!ioc->is_driver_loading) {
+/*TODO-- Need to find out whether this condition will occur or not*/
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+ }
+ } else
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+}
+
+/*
+ * _scsih_pcie_device_init_add - insert pcie_device to the init list.
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ * Context: This function will acquire ioc->pcie_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->pcie_device_init_list.
+ */
+static void
+_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
+ _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
/**
* _scsih_raid_device_find_by_id - raid device search
* @ioc: per adapter object
@@ -1062,6 +1428,23 @@ _scsih_is_end_device(u32 device_info)
}
/**
+ * _scsih_is_nvme_device - determines if device is an nvme device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if nvme device.
+ */
+static int
+_scsih_is_nvme_device(u32 device_info)
+{
+ if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_NVME)
+ return 1;
+ else
+ return 0;
+}
+
+/**
* _scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
@@ -1278,6 +1661,7 @@ scsih_target_alloc(struct scsi_target *starget)
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
struct sas_rphy *rphy;
@@ -1307,6 +1691,28 @@ scsih_target_alloc(struct scsi_target *starget)
return 0;
}
+ /* PCIe devices */
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
+ starget->channel);
+ if (pcie_device) {
+ sas_target_priv_data->handle = pcie_device->handle;
+ sas_target_priv_data->sas_address = pcie_device->wwid;
+ sas_target_priv_data->pcie_dev = pcie_device;
+ pcie_device->starget = starget;
+ pcie_device->id = starget->id;
+ pcie_device->channel = starget->channel;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_PCIE_DEVICE;
+ if (pcie_device->fast_path)
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FASTPATH_IO;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ return 0;
+ }
+
/* sas/sata devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
rphy = dev_to_rphy(starget->dev.parent);
@@ -1316,7 +1722,7 @@ scsih_target_alloc(struct scsi_target *starget)
if (sas_device) {
sas_target_priv_data->handle = sas_device->handle;
sas_target_priv_data->sas_address = sas_device->sas_address;
- sas_target_priv_data->sdev = sas_device;
+ sas_target_priv_data->sas_dev = sas_device;
sas_device->starget = starget;
sas_device->id = starget->id;
sas_device->channel = starget->channel;
@@ -1324,7 +1730,8 @@ scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->flags |=
MPT_TARGET_FLAGS_RAID_COMPONENT;
if (sas_device->fast_path)
- sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FASTPATH_IO;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -1345,7 +1752,9 @@ scsih_target_destroy(struct scsi_target *starget)
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
+ struct sas_rphy *rphy;
sas_target_priv_data = starget->hostdata;
if (!sas_target_priv_data)
@@ -1363,7 +1772,29 @@ scsih_target_destroy(struct scsi_target *starget)
goto out;
}
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc,
+ sas_target_priv_data);
+ if (pcie_device && (pcie_device->starget == starget) &&
+ (pcie_device->id == starget->id) &&
+ (pcie_device->channel == starget->channel))
+ pcie_device->starget = NULL;
+
+ if (pcie_device) {
+ /*
+ * Corresponding get() is in _scsih_target_alloc()
+ */
+ sas_target_priv_data->pcie_dev = NULL;
+ pcie_device_put(pcie_device);
+ pcie_device_put(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ goto out;
+ }
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
@@ -1374,7 +1805,7 @@ scsih_target_destroy(struct scsi_target *starget)
/*
* Corresponding get() is in _scsih_target_alloc()
*/
- sas_target_priv_data->sdev = NULL;
+ sas_target_priv_data->sas_dev = NULL;
sas_device_put(sas_device);
sas_device_put(sas_device);
@@ -1403,6 +1834,7 @@ scsih_slave_alloc(struct scsi_device *sdev)
struct scsi_target *starget;
struct _raid_device *raid_device;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
@@ -1431,8 +1863,22 @@ scsih_slave_alloc(struct scsi_device *sdev)
raid_device->sdev = sdev; /* raid is single lun */
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
+ sas_target_priv_data->sas_address);
+ if (pcie_device && (pcie_device->starget == NULL)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s : pcie_device->starget set to starget @ %d\n",
+ __func__, __LINE__);
+ pcie_device->starget = starget;
+ }
+
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
sas_target_priv_data->sas_address);
@@ -1466,6 +1912,7 @@ scsih_slave_destroy(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
if (!sdev->hostdata)
@@ -1478,7 +1925,19 @@ scsih_slave_destroy(struct scsi_device *sdev)
shost = dev_to_shost(&starget->dev);
ioc = shost_priv(shost);
- if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc,
+ sas_target_priv_data);
+ if (pcie_device && !sas_target_priv_data->num_luns)
+ pcie_device->starget = NULL;
+
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_from_target(ioc,
sas_target_priv_data);
@@ -1562,6 +2021,14 @@ scsih_is_raid(struct device *dev)
return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
}
+static int
+scsih_is_nvme(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
+}
+
/**
* scsih_get_resync - get raid volume resync percent complete
* @dev the device struct object
@@ -1837,6 +2304,7 @@ scsih_slave_configure(struct scsi_device *sdev)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _raid_device *raid_device;
unsigned long flags;
int qdepth;
@@ -1967,6 +2435,55 @@ scsih_slave_configure(struct scsi_device *sdev)
}
}
+ /* PCIe handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!pcie_device) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+ qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ ds = "NVMe";
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
+ ds, handle, (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure logical id(0x%016llx), slot(%d)\n",
+ ds,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure level(0x%04x),"
+ "connector name( %s)\n", ds,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ scsih_change_queue_depth(sdev, qdepth);
+
+ if (pcie_device->nvme_mdts)
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ pcie_device->nvme_mdts/512);
+ /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
+ ** merged and can eliminate holes created during merging
+ ** operation.
+ **/
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES,
+ sdev->request_queue);
+ blk_queue_virt_boundary(sdev->request_queue,
+ ioc->page_size - 1);
+ return 0;
+ }
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
sas_device_priv_data->sas_target->sas_address);
@@ -2005,16 +2522,8 @@ scsih_slave_configure(struct scsi_device *sdev)
"sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
ds, handle, (unsigned long long)sas_device->sas_address,
sas_device->phy, (unsigned long long)sas_device->device_name);
- if (sas_device->enclosure_handle != 0)
- sdev_printk(KERN_INFO, sdev,
- "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
- ds, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- sdev_printk(KERN_INFO, sdev,
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ds, sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -2400,6 +2909,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *priv_target = starget->hostdata;
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
unsigned long flags;
char *device_str = NULL;
@@ -2416,6 +2926,31 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
"%s handle(0x%04x), %s wwid(0x%016llx)\n",
device_str, priv_target->handle,
device_str, (unsigned long long)priv_target->sas_address);
+
+ } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
+ if (pcie_device) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
+ pcie_device->handle,
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d)\n",
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ starget_printk(KERN_INFO, starget,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
} else {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
@@ -2433,17 +2968,9 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
sas_device->handle,
(unsigned long long)sas_device->sas_address,
sas_device->phy);
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget,
- "enclosure_logical_id(0x%016llx), slot(%d)\n",
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- starget_printk(KERN_INFO, starget,
- "enclosure level(0x%04x),connector name(%s)\n",
- sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(NULL, sas_device,
+ NULL, starget);
sas_device_put(sas_device);
}
@@ -3007,8 +3534,6 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _sas_device *sas_device;
sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
- if (!sas_device)
- return;
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
@@ -3018,7 +3543,7 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
continue;
if (sas_device_priv_data->block)
continue;
- if (sas_device->pend_sas_rphy_add)
+ if (sas_device && sas_device->pend_sas_rphy_add)
continue;
if (sas_device_priv_data->ignore_delay_remove) {
sdev_printk(KERN_INFO, sdev,
@@ -3029,7 +3554,8 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
_scsih_internal_device_block(sdev, sas_device_priv_data);
}
- sas_device_put(sas_device);
+ if (sas_device)
+ sas_device_put(sas_device);
}
/**
@@ -3113,6 +3639,33 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_block_io_to_pcie_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull/reconnect.
+ */
+static void
+_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ if (reason_code ==
+ MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+/**
* _scsih_tm_tr_send - send task management request
* @ioc: per adapter object
* @handle: device handle
@@ -3133,18 +3686,14 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
Mpi2SCSITaskManagementRequest_t *mpi_request;
u16 smid;
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
u64 sas_address = 0;
unsigned long flags;
struct _tr_list *delayed_tr;
u32 ioc_state;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed: handle(0x%04x)\n",
- __func__, ioc->name, handle));
- return;
- } else if (ioc->pci_error_recovery) {
+ if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host in pci error recovery: handle(0x%04x)\n",
__func__, ioc->name,
@@ -3175,24 +3724,52 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_address = sas_device->sas_address;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
+ if (!sas_device) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device && pcie_device->starget &&
+ pcie_device->starget->hostdata) {
+ sas_target_priv_data = pcie_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = pcie_device->wwid;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ }
if (sas_target_priv_data) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, handle,
(unsigned long long)sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:enclosure logical id(0x%016llx),"
- " slot(%d)\n", ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot));
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: enclosure level(0x%04x),"
- " connector name( %s)\n", ioc->name,
- sas_device->enclosure_level,
- sas_device->connector_name));
+ if (sas_device) {
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag:enclosure logical "
+ "id(0x%016llx), slot(%d)\n", ioc->name,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot));
+ if (sas_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: enclosure "
+ "level(0x%04x), connector name( %s)\n",
+ ioc->name, sas_device->enclosure_level,
+ sas_device->connector_name));
+ } else if (pcie_device) {
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: logical "
+ "id(0x%016llx), slot(%d)\n", ioc->name,
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag:, enclosure "
+ "level(0x%04x), "
+ "connector name( %s)\n", ioc->name,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
+ }
_scsih_ublock_io_device(ioc, sas_address);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -3227,6 +3804,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
out:
if (sas_device)
sas_device_put(sas_device);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
}
/**
@@ -3731,6 +4310,81 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_check_pcie_topo_remove_events - sanity check on topo
+ * events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This handles the case where driver receives multiple switch
+ * or device add and delete events in a single shot. When there
+ * is a delete event the routine will void any pending add
+ * events waiting in the event queue.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle, switch_handle;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
+ if (!switch_handle) {
+ _scsih_block_io_to_pcie_children_attached_directly(
+ ioc, event_data);
+ return;
+ }
+ /* TODO We are not supporting cascaded PCIe Switch removal yet*/
+ if ((event_data->SwitchStatus
+ == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
+ (event_data->SwitchStatus ==
+ MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
+ _scsih_block_io_to_pcie_children_attached_directly(
+ ioc, event_data);
+
+ if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data =
+ (Mpi26EventDataPCIeTopologyChangeList_t *)
+ fw_event->event_data;
+ if (local_event_data->SwitchStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->SwitchStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
+ switch_handle) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting ignoring flag for switch event\n",
+ ioc->name));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
* _scsih_set_volume_delete_flag - setting volume delete flag
* @ioc: per adapter object
* @handle: device handle
@@ -3979,7 +4633,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
*/
static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- Mpi2SCSIIORequest_t *mpi_request)
+ Mpi25SCSIIORequest_t *mpi_request)
{
u16 eedp_flags;
unsigned char prot_op = scsi_get_prot_op(scmd);
@@ -4082,7 +4736,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct _raid_device *raid_device;
struct request *rq = scmd->request;
int class;
- Mpi2SCSIIORequest_t *mpi_request;
+ Mpi25SCSIIORequest_t *mpi_request;
+ struct _pcie_device *pcie_device = NULL;
u32 mpi_control;
u16 smid;
u16 handle;
@@ -4159,8 +4814,9 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* Make sure Device is not raid volume.
* We do not expose raid functionality to upper layer for warpdrive.
*/
- if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
- && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
+ if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
+ && !scsih_is_nvme(&scmd->device->sdev_gendev))
+ && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -4170,7 +4826,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
- memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ memset(mpi_request, 0, ioc->request_sz);
_scsih_setup_eedp(ioc, scmd, mpi_request);
if (scmd->cmd_len == 32)
@@ -4189,13 +4845,14 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
mpi_request->SenseBufferLowAddress =
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
- mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
mpi_request->LUN);
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
if (mpi_request->DataLength) {
- if (ioc->build_sg_scmd(ioc, scmd, smid)) {
+ pcie_device = sas_target_priv_data->pcie_dev;
+ if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
goto out;
}
@@ -4204,8 +4861,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
- mpt3sas_setup_direct_io(ioc, scmd, raid_device, mpi_request,
- smid);
+ mpt3sas_setup_direct_io(ioc, scmd,
+ raid_device, mpi_request, smid);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -4273,6 +4930,7 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
char *desc_scsi_state = ioc->tmp_string;
u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *priv_target = starget->hostdata;
char *device_str = NULL;
@@ -4405,6 +5063,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
device_str, (unsigned long long)priv_target->sas_address);
+ } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
+ if (pcie_device) {
+ pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
+ ioc->name,
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "\tenclosure logical id(0x%016llx), "
+ "slot(%d)\n", ioc->name,
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0])
+ pr_info(MPT3SAS_FMT
+ "\tenclosure level(0x%04x),"
+ "connector name( %s)\n",
+ ioc->name, pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ }
} else {
sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
@@ -4412,19 +5092,9 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
"\tsas_address(0x%016llx), phy(%d)\n",
ioc->name, (unsigned long long)
sas_device->sas_address, sas_device->phy);
- if (sas_device->enclosure_handle != 0)
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx),"
- "slot(%d)\n", ioc->name,
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0])
- pr_warn(MPT3SAS_FMT
- "\tenclosure level(0x%04x),"
- " connector name( %s)\n", ioc->name,
- sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL);
sas_device_put(sas_device);
}
@@ -4451,11 +5121,10 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
pr_warn(MPT3SAS_FMT
- "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
- ioc->name, data.skey,
- data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
}
-
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
response_info = le32_to_cpu(mpi_reply->ResponseInfo);
response_bytes = (u8 *)&response_info;
@@ -4602,16 +5271,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
goto out_unlock;
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget, "predicted fault, "
- "enclosure logical id(0x%016llx), slot(%d)\n",
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- starget_printk(KERN_WARNING, starget, "predicted fault, "
- "enclosure level(0x%04x), connector name( %s)\n",
- sas_device->enclosure_level,
- sas_device->connector_name);
+ _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
@@ -4666,7 +5327,7 @@ out_unlock:
static u8
_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
{
- Mpi2SCSIIORequest_t *mpi_request;
+ Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
u16 ioc_status;
@@ -4731,9 +5392,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
if (!sas_device_priv_data->tlr_snoop_check) {
sas_device_priv_data->tlr_snoop_check++;
- if (!ioc->is_warpdrive &&
+ if ((!ioc->is_warpdrive &&
!scsih_is_raid(&scmd->device->sdev_gendev) &&
- sas_is_tlr_enabled(scmd->device) &&
+ !scsih_is_nvme(&scmd->device->sdev_gendev))
+ && sas_is_tlr_enabled(scmd->device) &&
response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
sas_disable_tlr(scmd->device);
sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
@@ -4804,6 +5466,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
scmd->result = DID_RESET << 16;
break;
+ } else if ((scmd->device->channel == RAID_CHANNEL) &&
+ (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
+ scmd->result = DID_RESET << 16;
+ break;
}
scmd->result = DID_SOFT_ERROR << 16;
break;
@@ -5274,8 +5941,6 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
sas_address);
- if (sas_expander)
- list_del(&sas_expander->list);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
_scsih_expander_node_remove(ioc, sas_expander);
@@ -5386,6 +6051,52 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
}
/**
+ * _scsih_get_enclosure_logicalid_chassis_slot - get device's
+ * EnclosureLogicalID and ChassisSlot information.
+ * @ioc: per adapter object
+ * @sas_device_pg0: SAS device page0
+ * @sas_device: per sas device object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_get_enclosure_logicalid_chassis_slot(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasDevicePage0_t *sas_device_pg0, struct _sas_device *sas_device)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+
+ if (!sas_device_pg0 || !sas_device)
+ return;
+
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0->EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+
+ if (!le16_to_cpu(sas_device_pg0->EnclosureHandle))
+ return;
+
+ if (mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ le16_to_cpu(sas_device_pg0->EnclosureHandle))) {
+ pr_err(MPT3SAS_FMT
+ "Enclosure Pg0 read failed for handle(0x%04x)\n",
+ ioc->name, le16_to_cpu(sas_device_pg0->EnclosureHandle));
+ return;
+ }
+
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ if (le16_to_cpu(enclosure_pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot = enclosure_pg0.ChassisSlot;
+ }
+}
+
+
+/**
* _scsih_check_device - checking device responsiveness
* @ioc: per adapter object
* @parent_sas_address: sas address of parent expander or sas host
@@ -5409,7 +6120,6 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
struct MPT3SAS_TARGET *sas_target_priv_data;
u32 device_info;
-
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
return;
@@ -5456,6 +6166,9 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
}
+
+ _scsih_get_enclosure_logicalid_chassis_slot(ioc,
+ &sas_device_pg0, sas_device);
}
/* check if device is present */
@@ -5507,6 +6220,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
u32 ioc_status;
u64 sas_address;
u32 device_info;
+ int encl_pg0_rc = -1;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -5551,6 +6265,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
return -1;
}
+ if (sas_device_pg0.EnclosureHandle) {
+ encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device_pg0.EnclosureHandle);
+ if (encl_pg0_rc)
+ pr_info(MPT3SAS_FMT
+ "Enclosure Pg0 read failed for handle(0x%04x)\n",
+ ioc->name, sas_device_pg0.EnclosureHandle);
+ }
+
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
if (!sas_device) {
@@ -5588,13 +6312,21 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
}
- /* get enclosure_logical_id */
- if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
- ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- sas_device->enclosure_handle)))
+
+ /* get enclosure_logical_id & chassis_slot */
+ sas_device->is_chassis_slot_valid = 0;
+ if (encl_pg0_rc == 0) {
sas_device->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_pg0.ChassisSlot;
+ }
+ }
+
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
@@ -5625,23 +6357,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
_scsih_turn_off_pfa_led(ioc, sas_device);
sas_device->pfa_led_on = 0;
}
+
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, __func__,
sas_device->handle, (unsigned long long)
sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot));
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- sas_device->enclosure_level,
- sas_device->connector_name));
+
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
if (sas_device->starget && sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
@@ -5660,34 +6384,16 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
"removing handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, sas_device->handle,
(unsigned long long) sas_device->sas_address);
- if (sas_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing : enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, __func__,
sas_device->handle, (unsigned long long)
sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot));
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure level(0x%04x), connector name(%s)\n",
- ioc->name, __func__, sas_device->enclosure_level,
- sas_device->connector_name));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
}
/**
@@ -6028,7 +6734,705 @@ out:
sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+
+/**
+ * _scsih_check_pcie_access_status - check access flags
+ * @ioc: per adapter object
+ * @wwid: wwid
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
+ case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
+ desc = "PCIe device capability failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
+ desc = "PCIe device blocked";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
+ desc = "PCIe device mem space access failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
+ desc = "PCIe device unsupported";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
+ desc = "PCIe device MSIx Required";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
+ desc = "PCIe device init fail max";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
+ desc = "PCIe device status unknown";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
+ desc = "nvme ready timeout";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
+ desc = "nvme device configuration unsupported";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
+ desc = "nvme identify failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
+ desc = "nvme qconfig failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
+ desc = "nvme qcreation failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
+ desc = "nvme eventcfg failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
+ desc = "nvme get feature stat failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
+ desc = "nvme idle timeout";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
+ desc = "nvme failure status";
+ break;
+ default:
+ pr_err(MPT3SAS_FMT
+ " NVMe discovery error(0x%02x): wwid(0x%016llx),"
+ "handle(0x%04x)\n", ioc->name, access_status,
+ (unsigned long long)wwid, handle);
+ return rc;
+ }
+
+ if (!rc)
+ return rc;
+
+ pr_info(MPT3SAS_FMT
+ "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+ ioc->name, desc,
+ (unsigned long long)wwid, handle);
+ return rc;
+}
+
+/**
+ * _scsih_pcie_device_remove_from_sml - removing pcie device
+ * from SML and free up associated memory
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)
+ pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ if (pcie_device->starget && pcie_device->starget->hostdata) {
+ sas_target_priv_data = pcie_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, pcie_device->wwid);
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), wwid (0x%016llx)\n",
+ ioc->name, pcie_device->handle,
+ (unsigned long long) pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "removing: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, pcie_device->enclosure_level,
+ pcie_device->connector_name);
+
+ if (pcie_device->starget)
+ scsi_remove_target(&pcie_device->starget->dev);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)
+ pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ kfree(pcie_device->serial_number);
+}
+
+
+/**
+ * _scsih_pcie_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @handle: attached device handle
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ u32 ioc_status;
+ struct _pcie_device *pcie_device;
+ u64 wwid;
+ unsigned long flags;
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+ if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ if (!(_scsih_is_nvme_device(device_info)))
+ return;
+
+ wwid = le64_to_cpu(pcie_device_pg0.WWID);
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+
+ if (!pcie_device) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(pcie_device->handle != handle)) {
+ starget = pcie_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "handle changed from(0x%04x) to (0x%04x)!!!\n",
+ pcie_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ pcie_device->handle = handle;
+
+ if (le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
+ pcie_device->enclosure_level =
+ pcie_device_pg0.EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0.ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+ }
+
+ /* check if device is present */
+ if (!(le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
+ pr_info(MPT3SAS_FMT
+ "device is not present handle(0x%04x), flags!!!\n",
+ ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_pcie_access_status(ioc, wwid, handle,
+ pcie_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+ return;
+ }
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+
+ _scsih_ublock_io_device(ioc, wwid);
+
+ return;
+}
+
+/**
+ * _scsih_pcie_add_device - creating pcie device object
+ * @ioc: per adapter object
+ * @handle: pcie device handle
+ *
+ * Creating end device object, stored in ioc->pcie_device_list.
+ *
+ * Return 1 means queue the event later, 0 means complete the event
+ */
+static int
+_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ Mpi26PCIeDevicePage2_t pcie_device_pg2;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ struct _pcie_device *pcie_device;
+ u32 pcie_device_type;
+ u32 ioc_status;
+ u64 wwid;
+
+ if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ set_bit(handle, ioc->pend_os_device_add);
+ wwid = le64_to_cpu(pcie_device_pg0.WWID);
+
+ /* check if device is present */
+ if (!(le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT
+ "device is not present handle(0x04%x)!!!\n",
+ ioc->name, handle);
+ return 0;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_pcie_access_status(ioc, wwid, handle,
+ pcie_device_pg0.AccessStatus))
+ return 0;
+
+ if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+ return 0;
+
+ pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
+ if (pcie_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
+ pcie_device_put(pcie_device);
+ return 0;
+ }
+
+ pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
+ if (!pcie_device) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ kref_init(&pcie_device->refcount);
+ pcie_device->id = ioc->pcie_target_id++;
+ pcie_device->channel = PCIE_CHANNEL;
+ pcie_device->handle = handle;
+ pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ pcie_device->wwid = wwid;
+ pcie_device->port_num = pcie_device_pg0.PortNum;
+ pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+ pcie_device_type = pcie_device->device_info &
+ MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
+
+ pcie_device->enclosure_handle =
+ le16_to_cpu(pcie_device_pg0.EnclosureHandle);
+ if (pcie_device->enclosure_handle != 0)
+ pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
+
+ if (le16_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
+ pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0.ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+
+ /* get enclosure_logical_id */
+ if (pcie_device->enclosure_handle &&
+ !(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ pcie_device->enclosure_handle)))
+ pcie_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ /* TODO -- Add device name once FW supports it */
+ if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
+ &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ kfree(pcie_device);
+ return 0;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ kfree(pcie_device);
+ return 0;
+ }
+ pcie_device->nvme_mdts =
+ le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_pcie_device_init_add(ioc, pcie_device);
+ else
+ _scsih_pcie_device_add(ioc, pcie_device);
+
+ pcie_device_put(pcie_device);
+ return 0;
+}
+
+/**
+ * _scsih_pcie_topology_change_event_debug - debug for topology
+ * event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 port_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->SwitchStatus) {
+ case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
+ status_str = "add";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
+ ioc->name, status_str);
+ pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
+ "start_port(%02d), count(%d)\n",
+ le16_to_cpu(event_data->SwitchDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPortNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ port_number = event_data->StartPortNum + i;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ switch (reason_code) {
+ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
+ status_str = "target add";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PortEntry[i].CurrentPortInfo &
+ MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
+ MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
+ " link rate: new(0x%02x), old(0x%02x)\n", port_number,
+ handle, status_str, link_rate, prev_link_rate);
+ }
+}
+
+/**
+ * _scsih_pcie_topology_change_event - handle PCIe topology
+ * changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static int
+_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 link_rate, prev_link_rate;
+ unsigned long flags;
+ int rc;
+ int requeue_event;
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data =
+ (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
+ struct _pcie_device *pcie_device;
+
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_pcie_topology_change_event_debug(ioc, event_data);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery)
+ return 0;
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
+ ioc->name));
+ return 0;
+ }
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring switch event\n", ioc->name));
+ return 0;
+ }
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+
+ link_rate = event_data->PortEntry[i].CurrentPortInfo
+ & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
+ & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+
+ switch (reason_code) {
+ case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ if (ioc->shost_recovery)
+ break;
+ if (link_rate == prev_link_rate)
+ break;
+ if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
+ break;
+
+ _scsih_pcie_check_device(ioc, handle);
+ /* This code after this point handles the test case
+ * where a device has been added, however its returning
+ * BUSY for sometime. Then before the Device Missing
+ * Delay expires and the device becomes READY, the
+ * device is removed and added back.
+ */
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ if (pcie_device) {
+ pcie_device_put(pcie_device);
+ break;
+ }
+
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) device not found: convert "
+ "event to a device add\n", ioc->name, handle));
+ event_data->PortEntry[i].PortStatus &= 0xF0;
+ event_data->PortEntry[i].PortStatus |=
+ MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
+ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
+ if (ioc->shost_recovery)
+ break;
+ if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
+ break;
+
+ rc = _scsih_pcie_add_device(ioc, handle);
+ if (!rc) {
+ /* mark entry vacant */
+ /* TODO This needs to be reviewed and fixed,
+ * we dont have an entry
+ * to make an event void like vacant
+ */
+ event_data->PortEntry[i].PortStatus |=
+ MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
+ }
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ _scsih_pcie_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+ return requeue_event;
+}
+
+/**
+ * _scsih_pcie_device_status_change_event_debug - debug for
+ * device event
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
+ reason_str = "device init failure";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
+ "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->WWID),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
+ pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ event_data->ASC, event_data->ASCQ);
+ pr_info("\n");
+}
+
+/**
+ * _scsih_pcie_device_status_change_event - handle device status
+ * change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT3SAS_TARGET *target_priv_data;
+ struct _pcie_device *pcie_device;
+ u64 wwid;
+ unsigned long flags;
+ Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
+ (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_pcie_device_status_change_event_debug(ioc,
+ event_data);
+
+ if (event_data->ReasonCode !=
+ MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ wwid = le64_to_cpu(event_data->WWID);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+
+ if (!pcie_device || !pcie_device->starget)
+ goto out;
+
+ target_priv_data = pcie_device->starget->hostdata;
+ if (!target_priv_data)
+ goto out;
+
+ if (event_data->ReasonCode ==
+ MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+out:
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
}
/**
@@ -6282,6 +7686,35 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_pcie_enumeration_event - handle enumeration events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi26EventDataPCIeEnumeration_t *event_data =
+ (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
+ ioc->name,
+ (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "started" : "completed",
+ event_data->Flags);
+ if (event_data->EnumerationStatus)
+ pr_cont("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_cont("\n");
+}
+
+/**
* _scsih_ir_fastpath - turn on fastpath for IR physdisk
* @ioc: per adapter object
* @handle: device handle for physical disk
@@ -7085,7 +8518,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
{
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
struct scsi_target *starget;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -7126,6 +8559,9 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
sas_device->connector_name[0] = '\0';
}
+ _scsih_get_enclosure_logicalid_chassis_slot(ioc,
+ sas_device_pg0, sas_device);
+
if (sas_device->handle == sas_device_pg0->DevHandle)
goto out;
pr_info("\thandle changed from(0x%04x)!!!\n",
@@ -7190,6 +8626,130 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
+ * @ioc: per adapter object
+ * @pcie_device_pg0: PCIe Device page 0
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponding_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26PCIeDevicePage0_t *pcie_device_pg0)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if ((pcie_device->wwid == pcie_device_pg0->WWID) &&
+ (pcie_device->slot == pcie_device_pg0->Slot)) {
+ pcie_device->responding = 1;
+ starget = pcie_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), wwid(0x%016llx) ",
+ pcie_device->handle,
+ (unsigned long long)pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n",
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ }
+
+ if (((le32_to_cpu(pcie_device_pg0->Flags)) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
+ (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
+ pcie_device->enclosure_level =
+ pcie_device_pg0->EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0->ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+
+ if (pcie_device->handle == pcie_device_pg0->DevHandle)
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ pcie_device->handle);
+ pcie_device->handle = pcie_device_pg0->DevHandle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle =
+ pcie_device_pg0->DevHandle;
+ goto out;
+ }
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_pcie_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u32 device_info;
+
+ pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+
+ if (list_empty(&ioc->pcie_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from %s: "
+ "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(pcie_device_pg0.DevHandle);
+ device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ if (!(_scsih_is_nvme_device(device_info)))
+ continue;
+ pcie_device_pg0.WWID = le64_to_cpu(pcie_device_pg0.WWID),
+ pcie_device_pg0.Slot = le16_to_cpu(pcie_device_pg0.Slot);
+ pcie_device_pg0.Flags = le32_to_cpu(pcie_device_pg0.Flags);
+ pcie_device_pg0.DevHandle = handle;
+ _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
+ }
+out:
+ pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
+ ioc->name);
+}
+
+/**
* _scsih_mark_responding_raid_device - mark a raid_device as responding
* @ioc: per adapter object
* @wwid: world wide identifier for raid volume
@@ -7322,8 +8882,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_mark_responding_expander - mark a expander as responding
* @ioc: per adapter object
- * @sas_address: sas address
- * @handle:
+ * @expander_pg0:SAS Expander Config Page0
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_expanders.
@@ -7331,18 +8890,41 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
* Return nothing.
*/
static void
-_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u16 handle)
+_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ExpanderPage0_t *expander_pg0)
{
- struct _sas_node *sas_expander;
+ struct _sas_node *sas_expander = NULL;
unsigned long flags;
- int i;
+ int i, encl_pg0_rc = -1;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u16 handle = le16_to_cpu(expander_pg0->DevHandle);
+ u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
+
+ if (le16_to_cpu(expander_pg0->EnclosureHandle)) {
+ encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ le16_to_cpu(expander_pg0->EnclosureHandle));
+ if (encl_pg0_rc)
+ pr_info(MPT3SAS_FMT
+ "Enclosure Pg0 read failed for handle(0x%04x)\n",
+ ioc->name,
+ le16_to_cpu(expander_pg0->EnclosureHandle));
+ }
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
sas_expander->responding = 1;
+
+ if (!encl_pg0_rc)
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ sas_expander->enclosure_handle =
+ le16_to_cpu(expander_pg0->EnclosureHandle);
+
if (sas_expander->handle == handle)
goto out;
pr_info("\texpander(0x%016llx): handle changed" \
@@ -7395,7 +8977,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
handle,
(unsigned long long)sas_address);
- _scsih_mark_responding_expander(ioc, sas_address, handle);
+ _scsih_mark_responding_expander(ioc, &expander_pg0);
}
out:
@@ -7403,17 +8985,18 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * _scsih_remove_unresponding_devices - removing unresponding devices
* @ioc: per adapter object
*
* Return nothing.
*/
static void
-_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander, *sas_expander_next;
struct _raid_device *raid_device, *raid_device_next;
+ struct _pcie_device *pcie_device, *pcie_device_next;
struct list_head tmp_list;
unsigned long flags;
LIST_HEAD(head);
@@ -7447,6 +9030,26 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_put(sas_device);
}
+ pr_info(MPT3SAS_FMT
+ " Removing unresponding devices: pcie end-devices\n"
+ , ioc->name);
+ INIT_LIST_HEAD(&head);
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry_safe(pcie_device, pcie_device_next,
+ &ioc->pcie_device_list, list) {
+ if (!pcie_device->responding)
+ list_move_tail(&pcie_device->list, &head);
+ else
+ pcie_device->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+
/* removing unresponding volumes */
if (ioc->ir_firmware) {
pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
@@ -7476,7 +9079,6 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
list) {
- list_del(&sas_expander->list);
_scsih_expander_node_remove(ioc, sas_expander);
}
@@ -7520,6 +9122,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ExpanderPage0_t expander_pg0;
Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
Mpi2RaidVolPage1_t volume_pg1;
Mpi2RaidVolPage0_t volume_pg0;
Mpi2RaidPhysDiskPage0_t pd_pg0;
@@ -7530,6 +9133,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u16 handle, parent_handle;
u64 sas_address;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _sas_node *expander_device;
static struct _raid_device *raid_device;
u8 retry_count;
@@ -7755,7 +9359,44 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
}
pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
ioc->name);
+ pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
+ ioc->name);
+ /* pcie devices */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
+ " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(pcie_device_pg0.DevHandle);
+ if (!(_scsih_is_nvme_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+ continue;
+ pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
+ le64_to_cpu(pcie_device_pg0.WWID));
+ if (pcie_device) {
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ retry_count = 0;
+ parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
+ _scsih_pcie_add_device(ioc, handle);
+
+ pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
+ "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
+ handle,
+ (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
+ }
+ pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
+ ioc->name);
pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
}
/**
@@ -7805,6 +9446,7 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
!ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
_scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_pcie_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
_scsih_error_recovery_delete_devices(ioc);
@@ -7849,7 +9491,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
goto out;
ssleep(1);
}
- _scsih_remove_unresponding_sas_devices(ioc);
+ _scsih_remove_unresponding_devices(ioc);
_scsih_scan_for_devices_after_reset(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
@@ -7892,6 +9534,16 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_IR_OPERATION_STATUS:
_scsih_sas_ir_operation_status_event(ioc, fw_event);
break;
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
+ _scsih_pcie_device_status_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ _scsih_pcie_enumeration_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ _scsih_pcie_topology_change_event(ioc, fw_event);
+ return;
+ break;
}
out:
fw_event_work_put(fw_event);
@@ -7982,6 +9634,11 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi2EventDataSasTopologyChangeList_t *)
mpi_reply->EventData);
break;
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_pcie_topo_remove_events(ioc,
+ (Mpi26EventDataPCIeTopologyChangeList_t *)
+ mpi_reply->EventData);
+ break;
case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
_scsih_check_ir_config_unhide_events(ioc,
(Mpi2EventDataIrConfigChangeList_t *)
@@ -8044,6 +9701,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_SAS_DISCOVERY:
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
case MPI2_EVENT_IR_PHYSICAL_DISK:
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
break;
case MPI2_EVENT_TEMP_THRESHOLD:
@@ -8056,19 +9715,21 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
switch (ActiveCableEventData->ReasonCode) {
case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
- pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable"
- " requires %d mW of power\n", ioc->name,
- ActiveCableEventData->ReceptacleID,
+ pr_notice(MPT3SAS_FMT
+ "Currently an active cable with ReceptacleID %d\n",
+ ioc->name, ActiveCableEventData->ReceptacleID);
+ pr_notice("cannot be powered and devices connected\n");
+ pr_notice("to this active cable will not be seen\n");
+ pr_notice("This active cable requires %d mW of power\n",
ActiveCableEventData->ActiveCablePowerRequirement);
- pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected"
- " to this active cable will not be seen\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
break;
case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
- pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable",
- ioc->name, ActiveCableEventData->ReceptacleID);
- pr_notice(" is not running at an optimal speed(12 Gb/s)\n");
+ pr_notice(MPT3SAS_FMT
+ "Currently a cable with ReceptacleID %d\n",
+ ioc->name, ActiveCableEventData->ReceptacleID);
+ pr_notice(
+ "is not running at optimal speed(12 Gb/s rate)\n");
break;
}
@@ -8100,7 +9761,6 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
* _scsih_expander_node_remove - removing expander device from list.
* @ioc: per adapter object
* @sas_expander: the sas_device object
- * Context: Calling function should acquire ioc->sas_node_lock.
*
* Removing object and freeing associated memory from the
* ioc->sas_expander_list.
@@ -8112,6 +9772,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_expander)
{
struct _sas_port *mpt3sas_port, *next;
+ unsigned long flags;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -8139,6 +9800,10 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
sas_expander->handle, (unsigned long long)
sas_expander->sas_address);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
kfree(sas_expander->phy);
kfree(sas_expander);
}
@@ -8231,6 +9896,7 @@ static void scsih_remove(struct pci_dev *pdev)
struct _sas_port *mpt3sas_port, *next_port;
struct _raid_device *raid_device, *next;
struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _pcie_device *pcie_device, *pcienext;
struct workqueue_struct *wq;
unsigned long flags;
@@ -8259,6 +9925,12 @@ static void scsih_remove(struct pci_dev *pdev)
(unsigned long long) raid_device->wwid);
_scsih_raid_device_remove(ioc, raid_device);
}
+ list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
+ list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
/* free ports attached to the sas_host */
list_for_each_entry_safe(mpt3sas_port, next_port,
@@ -8330,42 +10002,52 @@ scsih_shutdown(struct pci_dev *pdev)
static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
{
- u8 is_raid;
+ u32 channel;
void *device;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
u16 handle;
u64 sas_address_parent;
u64 sas_address;
unsigned long flags;
int rc;
+ int tid;
/* no Bios, return immediately */
if (!ioc->bios_pg3.BiosVersion)
return;
device = NULL;
- is_raid = 0;
if (ioc->req_boot_device.device) {
device = ioc->req_boot_device.device;
- is_raid = ioc->req_boot_device.is_raid;
+ channel = ioc->req_boot_device.channel;
} else if (ioc->req_alt_boot_device.device) {
device = ioc->req_alt_boot_device.device;
- is_raid = ioc->req_alt_boot_device.is_raid;
+ channel = ioc->req_alt_boot_device.channel;
} else if (ioc->current_boot_device.device) {
device = ioc->current_boot_device.device;
- is_raid = ioc->current_boot_device.is_raid;
+ channel = ioc->current_boot_device.channel;
}
if (!device)
return;
- if (is_raid) {
+ if (channel == RAID_CHANNEL) {
raid_device = device;
rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
raid_device->id, 0);
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
+ } else if (channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = device;
+ tid = pcie_device->id;
+ list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
+ if (rc)
+ _scsih_pcie_device_remove(ioc, pcie_device);
} else {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = device;
@@ -8498,6 +10180,101 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * get_next_pcie_device - Get the next pcie device
+ * @ioc: per adapter object
+ *
+ * Get the next pcie device from pcie_device_init_list list.
+ *
+ * Returns pcie device structure if pcie_device_init_list list is not empty
+ * otherwise returns NULL
+ */
+static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ if (!list_empty(&ioc->pcie_device_init_list)) {
+ pcie_device = list_first_entry(&ioc->pcie_device_init_list,
+ struct _pcie_device, list);
+ pcie_device_get(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+/**
+ * pcie_device_make_active - Add pcie device to pcie_device_list list
+ * @ioc: per adapter object
+ * @pcie_device: pcie device object
+ *
+ * Add the pcie device which has registered with SCSI Transport Later to
+ * pcie_device_list list
+ */
+static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
+ * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device;
+ int rc;
+
+ /* PCIe Device List */
+ while ((pcie_device = get_next_pcie_device(ioc))) {
+ if (pcie_device->starget) {
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
+ pcie_device->id, 0);
+ if (rc) {
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ } else if (!pcie_device->starget) {
+ /*
+ * When async scanning is enabled, its not possible to
+ * remove devices while scanning is turned on due to an
+ * oops in scsi_sysfs_add_sdev()->add_device()->
+ * sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading) {
+ /* TODO-- Need to find out whether this condition will
+ * occur or not
+ */
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ }
+ pcie_device_make_active(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ }
+}
+
+/**
* _scsih_probe_devices - probing for devices
* @ioc: per adapter object
*
@@ -8525,8 +10302,10 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_probe_sas(ioc);
_scsih_probe_raid(ioc);
}
- } else
+ } else {
_scsih_probe_sas(ioc);
+ _scsih_probe_pcie(ioc);
+ }
}
/**
@@ -8740,6 +10519,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3516:
case MPI26_MFGPAGE_DEVID_SAS3516_1:
case MPI26_MFGPAGE_DEVID_SAS3416:
+ case MPI26_MFGPAGE_DEVID_SAS3616:
return MPI26_VERSION;
}
return 0;
@@ -8817,6 +10597,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_MFGPAGE_DEVID_SAS3516:
case MPI26_MFGPAGE_DEVID_SAS3516_1:
case MPI26_MFGPAGE_DEVID_SAS3416:
+ case MPI26_MFGPAGE_DEVID_SAS3616:
ioc->is_gen35_ioc = 1;
break;
default:
@@ -8867,11 +10648,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ioc->sas_node_lock);
spin_lock_init(&ioc->fw_event_lock);
spin_lock_init(&ioc->raid_device_lock);
+ spin_lock_init(&ioc->pcie_device_lock);
spin_lock_init(&ioc->diag_trigger_lock);
INIT_LIST_HEAD(&ioc->sas_device_list);
INIT_LIST_HEAD(&ioc->sas_device_init_list);
INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->pcie_device_list);
+ INIT_LIST_HEAD(&ioc->pcie_device_init_list);
INIT_LIST_HEAD(&ioc->fw_event_list);
INIT_LIST_HEAD(&ioc->raid_device_list);
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
@@ -9273,6 +11057,9 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Mercator ~ 3616*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 540bd5005149..ced7d9f6274c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -299,7 +299,7 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
u16 smid)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 107e191bf023..8620ac5d6e41 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -604,7 +604,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* check bus line
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
- if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) {
+ if ((phase & BUSMON_BSY) || (phase & BUSMON_SEL)) {
nsp32_msg(KERN_WARNING, "bus busy");
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index be8269c8d127..596f3ff965f5 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -98,6 +98,58 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
}
}
static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
+
+/**
+ * pm8001_ctl_ila_version_show - ila version
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id != chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version));
+ }
+ return 0;
+}
+static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL);
+
+/**
+ * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id != chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
+ }
+ return 0;
+}
+static
+DEVICE_ATTR(inc_fw_ver, 0444, pm8001_ctl_inactive_fw_version_show, NULL);
+
/**
* pm8001_ctl_max_out_io_show - max outstanding io supported
* @cdev: pointer to embedded class device
@@ -748,6 +800,8 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_bios_version,
&dev_attr_ib_log,
&dev_attr_ob_log,
+ &dev_attr_ila_version,
+ &dev_attr_inc_fw_ver,
NULL,
};
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 10546faac58c..db88a8e7ee0e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3198,19 +3198,28 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
+ u32 tag;
struct local_phy_ctl_resp *pPayload =
(struct local_phy_ctl_resp *)(piomb + 4);
u32 status = le32_to_cpu(pPayload->status);
u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
+ tag = le32_to_cpu(pPayload->tag);
if (status != 0) {
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("%x phy execute %x phy op failed!\n",
phy_id, phy_op));
- } else
+ } else {
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("%x phy execute %x phy op success!\n",
phy_id, phy_op));
+ pm8001_ha->phy[phy_id].reset_success = true;
+ }
+ if (pm8001_ha->phy[phy_id].enable_completion) {
+ complete(pm8001_ha->phy[phy_id].enable_completion);
+ pm8001_ha->phy[phy_id].enable_completion = NULL;
+ }
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 0e013f76b582..7a697ca68501 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -132,7 +132,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
sas_phy->id = phy_id;
- sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
+ sas_phy->sas_addr = (u8 *)&phy->dev_sas_addr;
sas_phy->frame_rcvd = &phy->frame_rcvd[0];
sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
sas_phy->lldd_phy = phy;
@@ -591,10 +591,12 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
for (i = 0; i < chip_info->n_phy; i++) {
sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
+ sha->sas_phy[i]->sas_addr =
+ (u8 *)&pm8001_ha->phy[i].dev_sas_addr;
}
sha->sas_ha_name = DRV_NAME;
sha->dev = pm8001_ha->dev;
-
+ sha->strict_wide_ports = 1;
sha->lldd_module = THIS_MODULE;
sha->sas_addr = &pm8001_ha->sas_addr[0];
sha->num_phys = chip_info->n_phy;
@@ -611,6 +613,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
u8 i, j;
+ u8 sas_add[8];
#ifdef PM8001_READ_VPD
/* For new SPC controllers WWN is stored in flash vpd
* For SPC/SPCve controllers WWN is stored in EEPROM
@@ -672,10 +675,12 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->sas_addr[j] =
payload.func_specific[0x804 + i];
}
-
+ memcpy(sas_add, pm8001_ha->sas_addr, SAS_ADDR_SIZE);
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ if (i && ((i % 4) == 0))
+ sas_add[7] = sas_add[7] + 4;
memcpy(&pm8001_ha->phy[i].dev_sas_addr,
- pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ sas_add, SAS_ADDR_SIZE);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("phy %d sas_addr = %016llx\n", i,
pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 7b2f92ae9866..0e294e80c169 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1158,40 +1158,42 @@ int pm8001_query_task(struct sas_task *task)
int pm8001_abort_task(struct sas_task *task)
{
unsigned long flags;
- u32 tag = 0xdeadbeef;
+ u32 tag;
u32 device_id;
struct domain_device *dev ;
- struct pm8001_hba_info *pm8001_ha = NULL;
- struct pm8001_ccb_info *ccb;
+ struct pm8001_hba_info *pm8001_ha;
struct scsi_lun lun;
struct pm8001_device *pm8001_dev;
struct pm8001_tmf_task tmf_task;
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc = TMF_RESP_FUNC_FAILED, ret;
+ u32 phy_id;
+ struct sas_task_slow slow_task;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return rc;
+ return TMF_RESP_FUNC_FAILED;
+ dev = task->dev;
+ pm8001_dev = dev->lldd_dev;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ device_id = pm8001_dev->device_id;
+ phy_id = pm8001_dev->attached_phy;
+ rc = pm8001_find_tag(task, &tag);
+ if (rc == 0) {
+ pm8001_printk("no tag for task:%p\n", task);
+ return TMF_RESP_FUNC_FAILED;
+ }
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- rc = TMF_RESP_FUNC_COMPLETE;
- goto out;
+ return TMF_RESP_FUNC_COMPLETE;
+ }
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ if (task->slow_task == NULL) {
+ init_completion(&slow_task.completion);
+ task->slow_task = &slow_task;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd *cmnd = task->uldd_task;
- dev = task->dev;
- ccb = task->lldd_task;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
int_to_scsilun(cmnd->device->lun, &lun);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
- }
- device_id = pm8001_dev->device_id;
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("abort io to deviceid= %d\n", device_id));
tmf_task.tmf = TMF_ABORT_TASK;
tmf_task.tag_of_task_to_be_managed = tag;
rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
@@ -1199,33 +1201,77 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev->sas_device, 0, tag);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
- dev = task->dev;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
+ if (pm8001_ha->chip_id == chip_8006) {
+ DECLARE_COMPLETION_ONSTACK(completion_reset);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+
+ /* 1. Set Device state as Recovery */
+ pm8001_dev->setds_completion = &completion;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x03);
+ wait_for_completion(&completion);
+
+ /* 2. Send Phy Control Hard Reset */
+ reinit_completion(&completion);
+ phy->reset_success = false;
+ phy->enable_completion = &completion;
+ phy->reset_completion = &completion_reset;
+ ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_HARD_RESET);
+ if (ret)
+ goto out;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ wait_for_completion(&completion);
+ if (!phy->reset_success)
+ goto out;
+
+ /* 3. Wait for Port Reset complete / Port reset TMO */
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for Port reset\n"));
+ wait_for_completion(&completion_reset);
+ if (phy->port_reset_status)
+ goto out;
+
+ /*
+ * 4. SATA Abort ALL
+ * we wait for the task to be aborted so that the task
+ * is removed from the ccb. on success the caller is
+ * going to free the task.
+ */
+ ret = pm8001_exec_internal_task_abort(pm8001_ha,
+ pm8001_dev, pm8001_dev->sas_device, 1, tag);
+ if (ret)
+ goto out;
+ ret = wait_for_completion_timeout(
+ &task->slow_task->completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ goto out;
+
+ /* 5. Set Device State as Operational */
+ reinit_completion(&completion);
+ pm8001_dev->setds_completion = &completion;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion);
+ } else {
+ rc = pm8001_exec_internal_task_abort(pm8001_ha,
+ pm8001_dev, pm8001_dev->sas_device, 0, tag);
}
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = TMF_RESP_FUNC_COMPLETE;
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
- dev = task->dev;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
- }
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
pm8001_dev->sas_device, 0, tag);
}
out:
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->slow_task == &slow_task)
+ task->slow_task = NULL;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
if (rc != TMF_RESP_FUNC_COMPLETE)
pm8001_printk("rc= %d\n", rc);
return rc;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index e81a8fa7ef1a..80b4dd6df0c2 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -263,8 +263,15 @@ struct pm8001_phy {
u8 phy_state;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
+ struct completion *reset_completion;
+ bool port_reset_status;
+ bool reset_success;
};
+/* port reset status */
+#define PORT_RESET_SUCCESS 0x00
+#define PORT_RESET_TMO 0x01
+
struct pm8001_device {
enum sas_device_type dev_type;
struct domain_device *sas_device;
@@ -404,6 +411,8 @@ union main_cfg_table {
u32 port_recovery_timer;
u32 interrupt_reassertion_delay;
u32 fatal_n_non_fatal_dump; /* 0x28 */
+ u32 ila_version;
+ u32 inc_fw_version;
} pm80xx_tbl;
};
@@ -531,6 +540,7 @@ struct pm8001_hba_info {
u32 smp_exp_mode;
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
+ u32 reset_in_progress;
};
struct pm8001_work {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index eb4fee61df72..42f0405601ad 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -312,6 +312,11 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
/* read port recover and reset timeout */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
+ /* read ILA and inactive firmware version */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version =
+ pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
+ pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
}
/**
@@ -592,6 +597,12 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
PORT_RECOVERY_TIMEOUT;
+ if (pm8001_ha->chip_id == chip_8006) {
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
+ 0x0000ffff;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
+ 0x140000;
+ }
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
}
@@ -1478,6 +1489,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
+ ccb->n_elem = 0;
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
@@ -1770,6 +1782,8 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
"task 0x%p done with io_status 0x%x resp 0x%x "
"stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -3033,10 +3047,10 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ u32 port_sata = (phy->phy_type & PORT_TYPE_SATA);
port->port_state = portstate;
phy->identify.device_type = 0;
phy->phy_attached = 0;
- memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
switch (portstate) {
case PORT_VALID:
break;
@@ -3045,7 +3059,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk(" PortInvalid portID %d\n", port_id));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
- if (phy->phy_type & PORT_TYPE_SATA) {
+ if (port_sata) {
phy->phy_type = 0;
port->port_attached = 0;
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
@@ -3067,7 +3081,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
- if (phy->phy_type & PORT_TYPE_SATA) {
+ if (port_sata) {
port->port_attached = 0;
phy->phy_type = 0;
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
@@ -3083,6 +3097,11 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
}
+ if (port_sata && (portstate != PORT_IN_RESET)) {
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ }
}
static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3185,12 +3204,14 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PHY_DOWN:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PHY_DOWN\n"));
- if (phy->phy_type & PORT_TYPE_SATA)
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ hw_event_phy_down(pm8001_ha, piomb);
+ if (pm8001_ha->reset_in_progress) {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Reset in progress\n"));
+ return 0;
+ }
phy->phy_attached = 0;
phy->phy_state = 0;
- hw_event_phy_down(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_INVALID:
PM8001_MSG_DBG(pm8001_ha,
@@ -3297,9 +3318,17 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PORT_RESET_TIMER_TMO:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ if (pm8001_ha->phy[phy_id].reset_completion) {
+ pm8001_ha->phy[phy_id].port_reset_status =
+ PORT_RESET_TMO;
+ complete(pm8001_ha->phy[phy_id].reset_completion);
+ pm8001_ha->phy[phy_id].reset_completion = NULL;
+ }
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
PM8001_MSG_DBG(pm8001_ha,
@@ -3324,6 +3353,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PORT_RESET_COMPLETE:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ if (pm8001_ha->phy[phy_id].reset_completion) {
+ pm8001_ha->phy[phy_id].port_reset_status =
+ PORT_RESET_SUCCESS;
+ complete(pm8001_ha->phy[phy_id].reset_completion);
+ pm8001_ha->phy[phy_id].reset_completion = NULL;
+ }
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
PM8001_MSG_DBG(pm8001_ha,
@@ -4389,7 +4424,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
return ret;
@@ -4496,17 +4531,20 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 phyId, u32 phy_op)
{
+ u32 tag;
+ int rc;
struct local_phy_ctl_req payload;
struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return rc;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(1);
+ payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
- return ret;
+ return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
}
static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 7a443bad6163..889e69ce3689 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -167,7 +167,7 @@
#define LINKMODE_AUTO (0x03 << 12)
#define LINKRATE_15 (0x01 << 8)
#define LINKRATE_30 (0x02 << 8)
-#define LINKRATE_60 (0x06 << 8)
+#define LINKRATE_60 (0x04 << 8)
#define LINKRATE_120 (0x08 << 8)
/* phy_profile */
@@ -229,6 +229,102 @@
#define IT_NEXUS_TIMEOUT 0x7D0
#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
+#ifdef __LITTLE_ENDIAN_BITFIELD
+struct sas_identify_frame_local {
+ /* Byte 0 */
+ u8 frame_type:4;
+ u8 dev_type:3;
+ u8 _un0:1;
+
+ /* Byte 1 */
+ u8 _un1;
+
+ /* Byte 2 */
+ union {
+ struct {
+ u8 _un20:1;
+ u8 smp_iport:1;
+ u8 stp_iport:1;
+ u8 ssp_iport:1;
+ u8 _un247:4;
+ };
+ u8 initiator_bits;
+ };
+
+ /* Byte 3 */
+ union {
+ struct {
+ u8 _un30:1;
+ u8 smp_tport:1;
+ u8 stp_tport:1;
+ u8 ssp_tport:1;
+ u8 _un347:4;
+ };
+ u8 target_bits;
+ };
+
+ /* Byte 4 - 11 */
+ u8 _un4_11[8];
+
+ /* Byte 12 - 19 */
+ u8 sas_addr[SAS_ADDR_SIZE];
+
+ /* Byte 20 */
+ u8 phy_id;
+
+ u8 _un21_27[7];
+
+} __packed;
+
+#elif defined(__BIG_ENDIAN_BITFIELD)
+struct sas_identify_frame_local {
+ /* Byte 0 */
+ u8 _un0:1;
+ u8 dev_type:3;
+ u8 frame_type:4;
+
+ /* Byte 1 */
+ u8 _un1;
+
+ /* Byte 2 */
+ union {
+ struct {
+ u8 _un247:4;
+ u8 ssp_iport:1;
+ u8 stp_iport:1;
+ u8 smp_iport:1;
+ u8 _un20:1;
+ };
+ u8 initiator_bits;
+ };
+
+ /* Byte 3 */
+ union {
+ struct {
+ u8 _un347:4;
+ u8 ssp_tport:1;
+ u8 stp_tport:1;
+ u8 smp_tport:1;
+ u8 _un30:1;
+ };
+ u8 target_bits;
+ };
+
+ /* Byte 4 - 11 */
+ u8 _un4_11[8];
+
+ /* Byte 12 - 19 */
+ u8 sas_addr[SAS_ADDR_SIZE];
+
+ /* Byte 20 */
+ u8 phy_id;
+
+ u8 _un21_27[7];
+} __packed;
+#else
+#error "Bitfield order not defined!"
+#endif
+
struct mpi_msg_hdr {
__le32 header; /* Bits [11:0] - Message operation code */
/* Bits [15:12] - Message Category */
@@ -248,7 +344,7 @@ struct mpi_msg_hdr {
struct phy_start_req {
__le32 tag;
__le32 ase_sh_lm_slr_phyid;
- struct sas_identify_frame sas_identify; /* 28 Bytes */
+ struct sas_identify_frame_local sas_identify; /* 28 Bytes */
__le32 spasti;
u32 reserved[21];
} __attribute__((packed, aligned(4)));
@@ -1349,6 +1445,8 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */
#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */
+#define MAIN_MPI_ILA_RELEASE_TYPE 0xA4 /* DWORD 0x29 */
+#define MAIN_MPI_INACTIVE_FW_VERSION 0XB0 /* DWORD 0x2C */
/* Gereral Status Table offset - byte offset */
#define GST_GSTLEN_MPIS_OFFSET 0x00
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 93d54acd4a22..bd302d3cb9af 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -92,7 +92,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
int pld_len;
- u32 *tmp;
cmd = (struct qedi_cmd *)task->dd_data;
task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
@@ -108,7 +107,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
hton24(resp_hdr_ptr->dlength,
(cqe_text_response->hdr_second_dword &
ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
- tmp = (u32 *)resp_hdr_ptr->dlength;
resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
conn->session->age);
@@ -196,7 +194,6 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
struct iscsi_tm_rsp *resp_hdr_ptr;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd = NULL;
- u32 *tmp;
cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
@@ -222,7 +219,6 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
hton24(resp_hdr_ptr->dlength,
(cqe_tmp_response->hdr_second_dword &
ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
- tmp = (u32 *)resp_hdr_ptr->dlength;
resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
conn->session->age);
resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
@@ -269,7 +265,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
int pld_len;
- u32 *tmp;
cmd = (struct qedi_cmd *)task->dd_data;
@@ -286,7 +281,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
hton24(resp_hdr_ptr->dlength,
(cqe_login_response->hdr_second_dword &
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
- tmp = (u32 *)resp_hdr_ptr->dlength;
resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
conn->session->age);
resp_hdr_ptr->tsih = cqe_login_response->tsih;
@@ -590,7 +584,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
int datalen = 0;
struct qedi_conn *qedi_conn;
u32 iscsi_cid;
- bool mark_cmd_node_deleted = false;
u8 cqe_err_bits = 0;
iscsi_cid = cqe->cqe_common.conn_id;
@@ -674,7 +667,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
- mark_cmd_node_deleted = true;
}
spin_unlock(&qedi_conn->list_lock);
@@ -763,7 +755,7 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
u32 rtid = 0;
u32 iscsi_cid;
struct qedi_conn *qedi_conn;
- struct qedi_cmd *cmd_new, *dbg_cmd;
+ struct qedi_cmd *dbg_cmd;
struct iscsi_task *mtask;
struct iscsi_tm *tmf_hdr = NULL;
@@ -856,7 +848,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
}
qedi_conn->cmd_cleanup_cmpl++;
wake_up(&qedi_conn->wait_queue);
- cmd_new = task->dd_data;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
@@ -1029,7 +1020,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
- struct scsi_sge *req_sge = NULL;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
struct qedi_endpoint *ep;
@@ -1037,7 +1027,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
u16 sq_idx = 0;
int rval = 0;
- req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
ep = qedi_conn->ep;
@@ -1718,7 +1707,6 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
- struct scsi_sge *req_sge = NULL;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
struct qedi_endpoint *ep;
@@ -1727,7 +1715,6 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
u16 sq_idx = 0;
int rval = 0;
- req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
@@ -1995,7 +1982,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
struct qedi_conn *qedi_conn = conn->dd_data;
struct scsi_cmnd *sc_cmd = task->sc;
unsigned long flags;
- u8 op;
spin_lock_irqsave(&qedi->io_trace_lock, flags);
@@ -2005,7 +1991,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
io_log->cid = qedi_conn->iscsi_conn_id;
io_log->lun = sc_cmd->device->lun;
io_log->op = sc_cmd->cmnd[0];
- op = sc_cmd->cmnd[0];
io_log->lba[0] = sc_cmd->cmnd[2];
io_log->lba[1] = sc_cmd->cmnd[3];
io_log->lba[2] = sc_cmd->cmnd[4];
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2ea0ef93f5cb..e3ac7078d2aa 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -919,9 +919,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(response) + sizeof(uint8_t);
- fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
+ memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
+ sizeof(response));
fw_sts_ptr += sizeof(response);
*fw_sts_ptr = command_sent;
@@ -1116,14 +1116,13 @@ qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
return -EINVAL;
}
- mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
ql_log(ql_log_warn, vha, 0x703c,
"DMA alloc failed for fw buffer.\n");
return -ENOMEM;
}
- memset(mn, 0, sizeof(struct access_chip_84xx));
mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
mn->entry_count = 1;
ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
@@ -2554,13 +2553,11 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command "
"failed.\n");
- scsi_req(bsg_job->req)->result =
bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
- scsi_req(bsg_job->req)->result =
bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -2571,7 +2568,7 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO;
+ bsg_reply->result = -ENXIO;
return 0;
done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 486c075998f6..01a9b8971e88 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -323,6 +323,12 @@ struct els_logo_payload {
uint8_t wwpn[WWN_SIZE];
};
+struct els_plogi_payload {
+ uint8_t opcode;
+ uint8_t rsvd[3];
+ uint8_t data[112];
+};
+
struct ct_arg {
void *iocb;
u16 nport_handle;
@@ -358,6 +364,19 @@ struct srb_iocb {
dma_addr_t els_logo_pyld_dma;
} els_logo;
struct {
+#define ELS_DCMD_PLOGI 0x3
+ uint32_t flags;
+ uint32_t els_cmd;
+ struct completion comp;
+ struct els_plogi_payload *els_plogi_pyld;
+ struct els_plogi_payload *els_resp_pyld;
+ dma_addr_t els_plogi_pyld_dma;
+ dma_addr_t els_resp_pyld_dma;
+ uint32_t fw_status[3];
+ __le16 comp_status;
+ __le16 len;
+ } els_plogi;
+ struct {
/*
* Values for flags field below are as
* defined in tsk_mgmt_entry struct
@@ -922,6 +941,7 @@ struct mbx_cmd_32 {
#define INTR_RSP_QUE_UPDATE_83XX 0x14
#define INTR_ATIO_QUE_UPDATE 0x1C
#define INTR_ATIO_RSP_QUE_UPDATE 0x1D
+#define INTR_ATIO_QUE_UPDATE_27XX 0x1E
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
@@ -2302,6 +2322,7 @@ typedef struct fc_port {
unsigned int send_els_logo:1;
unsigned int login_pause:1;
unsigned int login_succ:1;
+ unsigned int query:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2347,6 +2368,7 @@ typedef struct fc_port {
uint8_t fc4_type;
uint8_t fc4f_nvme;
uint8_t scan_state;
+ uint8_t n2n_flag;
unsigned long last_queue_full;
unsigned long last_ramp_up;
@@ -2368,6 +2390,9 @@ typedef struct fc_port {
struct list_head gnl_entry;
struct work_struct del_work;
u8 iocb[IOCB_SIZE];
+ u8 current_login_state;
+ u8 last_login_state;
+ struct completion n2n_done;
} fc_port_t;
#define QLA_FCPORT_SCAN 1
@@ -4113,6 +4138,7 @@ typedef struct scsi_qla_host {
#define QPAIR_ONLINE_CHECK_NEEDED 27
#define SET_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
+#define N2N_LOGIN_NEEDED 30
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4223,6 +4249,9 @@ typedef struct scsi_qla_host {
wait_queue_head_t fcport_waitQ;
wait_queue_head_t vref_waitq;
uint8_t min_link_speed_feat;
+ uint8_t n2n_node_name[WWN_SIZE];
+ uint8_t n2n_port_name[WWN_SIZE];
+ uint16_t n2n_id;
} scsi_qla_host_t;
struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index bec641aae7b3..d5cef0727e72 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -753,9 +753,7 @@ struct els_entry_24xx {
uint8_t reserved_2;
uint8_t port_id[3];
- uint8_t reserved_3;
-
- uint16_t reserved_4;
+ uint8_t s_id[3];
uint16_t control_flags; /* Control flags. */
#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3ad375f85b59..fa115c7433e5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,6 +45,8 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *,
+ port_id_t);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
@@ -145,6 +147,7 @@ extern int ql2xmvasynctoatio;
extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
+extern int ql2xenablemsix;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -486,6 +489,8 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
uint16_t *);
int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
struct port_database_24xx *);
+int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
+ void *, uint16_t);
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 44cf875a484a..1bafa043f9f1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -812,13 +812,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
ql_log(ql_log_warn, vha, 0xd043,
"Failed to allocate port database structure.\n");
goto done_free_sp;
}
- memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
mb = sp->u.iocb_cmd.u.mbx.out_mb;
mb[0] = MBC_GET_PORT_DATABASE;
@@ -1434,6 +1433,14 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
+ if (ea->fcport->n2n_flag) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post fc4 prli\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->fc4f_nvme = 0;
+ ea->fcport->n2n_flag = 0;
+ qla24xx_post_prli_work(vha, ea->fcport);
+ }
ql_dbg(ql_dbg_disc, vha, 0x2119,
"%s %d %8phC unhandle event of %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[0]);
@@ -4367,7 +4374,109 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval);
}
+/*
+ * N2N Login
+ * Updates Fibre Channel Device Database with local loop devices.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ */
+static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
+ fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int res = QLA_SUCCESS, rval;
+ int greater_wwpn = 0;
+ int logged_in = 0;
+
+ if (ha->current_topology != ISP_CFG_N)
+ return res;
+
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(vha->n2n_port_name)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2002,
+ "HBA WWPN is greater %llx > target %llx\n",
+ wwn_to_u64(vha->port_name),
+ wwn_to_u64(vha->n2n_port_name));
+ greater_wwpn = 1;
+ fcport->d_id.b24 = vha->n2n_id;
+ }
+
+ fcport->loop_id = vha->loop_id;
+ fcport->fc4f_nvme = 0;
+ fcport->query = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x4001,
+ "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
+ fcport->d_id.b24, vha->loop_id);
+
+ /* Fill in member data. */
+ if (!greater_wwpn) {
+ rval = qla2x00_get_port_database(vha, fcport, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x1051,
+ "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
+ fcport->current_login_state, fcport->last_login_state,
+ fcport->d_id.b24, fcport->loop_id, rval);
+
+ if (((fcport->current_login_state & 0xf) == 0x4) ||
+ ((fcport->current_login_state & 0xf) == 0x6))
+ logged_in = 1;
+ }
+
+ if (logged_in || greater_wwpn) {
+ if (!vha->nvme_local_port && vha->flags.nvme_enabled)
+ qla_nvme_register_hba(vha);
+
+ /* Set connected N_Port d_id */
+ if (vha->flags.nvme_enabled)
+ fcport->fc4f_nvme = 1;
+
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ fcport->disc_state = DSC_GNL;
+ fcport->n2n_flag = 1;
+ fcport->flags = 3;
+ vha->hw->flags.gpsc_supported = 0;
+
+ if (greater_wwpn) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "%s %d PLOGI ELS %8phC\n",
+ __func__, __LINE__, fcport->port_name);
+
+ res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+ fcport, fcport->d_id);
+ }
+
+ if (res != QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xd04d,
+ "PLOGI Failed: portid=%06x - retrying\n",
+ fcport->d_id.b24);
+ res = QLA_SUCCESS;
+ } else {
+ /* State 0x6 means FCP PRLI complete */
+ if ((fcport->current_login_state & 0xf) == 0x6) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post GPDB work\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->chip_reset =
+ vha->hw->base_qpair->chip_reset;
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post NVMe PRLI\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+ }
+ } else {
+ /* Wait for next database change */
+ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
+ }
+ return res;
+}
/*
* qla2x00_configure_local_loop
@@ -4438,6 +4547,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
}
+ /* Inititae N2N login. */
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ rval = qla24xx_n2n_handle_login(vha, new_fcport);
+ if (rval != QLA_SUCCESS)
+ goto cleanup_allocation;
+ return QLA_SUCCESS;
+ }
+
/* Add devices to port list. */
id_iter = (char *)ha->gid_list;
for (index = 0; index < entries; index++) {
@@ -4479,10 +4596,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
"Failed to retrieve fcport information "
"-- get_port_database=%x, loop_id=0x%04x.\n",
rval2, new_fcport->loop_id);
- ql_dbg(ql_dbg_disc, vha, 0x2105,
- "Scheduling resync.\n");
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- continue;
+ /* Skip retry if N2N */
+ if (ha->current_topology != ISP_CFG_N) {
+ ql_dbg(ql_dbg_disc, vha, 0x2105,
+ "Scheduling resync.\n");
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ continue;
+ }
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -7555,6 +7675,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
icb->firmware_options_3 |= BIT_0;
+ if (IS_QLA27XX(ha)) {
+ icb->firmware_options_3 |= BIT_8;
+ ql_dbg(ql_log_info, vha, 0x0075,
+ "Enabling direct connection.\n");
+ }
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -7910,7 +8036,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
return NULL;
}
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
if (qpair == NULL) {
ql_log(ql_log_warn, vha, 0x0182,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2f94159186d7..d810a447cb4a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2518,6 +2518,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
scsi_qla_host_t *vha = sp->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
+ uint32_t dsd_len = 24;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2534,24 +2535,198 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->s_id[0] = vha->d_id.b.al_pa;
+ els_iocb->s_id[1] = vha->d_id.b.area;
+ els_iocb->s_id[2] = vha->d_id.b.domain;
els_iocb->control_flags = 0;
- els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
- els_iocb->tx_address[0] =
- cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
- els_iocb->tx_address[1] =
- cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
- els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
+ if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
+ els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->tx_address[0] =
+ cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
+ els_iocb->tx_address[1] =
+ cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
+ els_iocb->tx_len = dsd_len;
+
+ els_iocb->rx_dsd_count = 1;
+ els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->rx_address[0] =
+ cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
+ els_iocb->rx_address[1] =
+ cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
+ els_iocb->rx_len = dsd_len;
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
+ "PLOGI ELS IOCB:\n");
+ ql_dump_buffer(ql_log_info, vha, 0x0109,
+ (uint8_t *)els_iocb, 0x70);
+ } else {
+ els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
+ els_iocb->tx_address[0] =
+ cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
+ els_iocb->tx_address[1] =
+ cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+ els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
- els_iocb->rx_byte_count = 0;
- els_iocb->rx_address[0] = 0;
- els_iocb->rx_address[1] = 0;
- els_iocb->rx_len = 0;
+ els_iocb->rx_byte_count = 0;
+ els_iocb->rx_address[0] = 0;
+ els_iocb->rx_address[1] = 0;
+ els_iocb->rx_len = 0;
+ }
sp->vha->qla_stats.control_requests++;
}
static void
+qla2x00_els_dcmd2_sp_free(void *data)
+{
+ srb_t *sp = data;
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+
+ del_timer(&elsio->timer);
+ qla2x00_rel_sp(sp);
+}
+
+static void
+qla2x00_els_dcmd2_iocb_timeout(void *data)
+{
+ srb_t *sp = data;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ unsigned long flags = 0;
+ int res;
+
+ ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
+ "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
+ sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
+
+ /* Abort the exchange */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ res = ha->isp_ops->abort_command(sp);
+ ql_dbg(ql_dbg_io, vha, 0x3070,
+ "mbx abort_command %s\n",
+ (res == QLA_SUCCESS) ? "successful" : "failed");
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ complete(&lio->u.els_plogi.comp);
+}
+
+static void
+qla2x00_els_dcmd2_sp_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ fc_port_t *fcport = sp->fcport;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->vha;
+
+ ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
+ "%s ELS hdl=%x, portid=%06x done %8pC\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+
+ complete(&lio->u.els_plogi.comp);
+}
+
+int
+qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ fc_port_t *fcport, port_id_t remote_did)
+{
+ srb_t *sp;
+ struct srb_iocb *elsio = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ void *ptr, *resp_ptr;
+ dma_addr_t ptr_dma;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_log(ql_log_info, vha, 0x70e6,
+ "SRB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ elsio = &sp->u.iocb_cmd;
+ fcport->d_id.b.domain = remote_did.b.domain;
+ fcport->d_id.b.area = remote_did.b.area;
+ fcport->d_id.b.al_pa = remote_did.b.al_pa;
+
+ ql_dbg(ql_dbg_io, vha, 0x3073,
+ "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+
+ sp->type = SRB_ELS_DCMD;
+ sp->name = "ELS_DCMD";
+ sp->fcport = fcport;
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
+ sp->done = qla2x00_els_dcmd2_sp_done;
+ sp->free = qla2x00_els_dcmd2_sp_free;
+
+ ptr = elsio->u.els_plogi.els_plogi_pyld =
+ dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
+ ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
+
+ if (!elsio->u.els_plogi.els_plogi_pyld) {
+ rval = QLA_FUNCTION_FAILED;
+ goto out;
+ }
+
+ resp_ptr = elsio->u.els_plogi.els_resp_pyld =
+ dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
+
+ if (!elsio->u.els_plogi.els_resp_pyld) {
+ rval = QLA_FUNCTION_FAILED;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
+
+ memset(ptr, 0, sizeof(struct els_plogi_payload));
+ memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
+ elsio->u.els_plogi.els_cmd = els_opcode;
+ elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
+ qla24xx_get_port_login_templ(vha, ptr_dma + 4,
+ &elsio->u.els_plogi.els_plogi_pyld->data[0],
+ sizeof(struct els_plogi_payload));
+
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
+ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
+
+ init_completion(&elsio->u.els_plogi.comp);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_io, vha, 0x3074,
+ "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
+ sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
+
+ wait_for_completion(&elsio->u.els_plogi.comp);
+
+ if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+
+out:
+ sp->free(sp);
+ return rval;
+}
+
+static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
struct bsg_job *bsg_job = sp->u.bsg_job;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9d9668aac6f6..2fd79129bb2a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1041,6 +1041,7 @@ global_port_update:
*/
atomic_set(&vha->loop_down_timer, 0);
if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+ !ha->flags.n2n_ae &&
atomic_read(&vha->loop_state) != LOOP_DEAD) {
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
@@ -1543,8 +1544,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
- uint8_t* fw_sts_ptr;
int res;
+ struct srb_iocb *els;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
@@ -1561,10 +1562,14 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
case SRB_ELS_DCMD:
type = "Driver ELS logo";
- ql_dbg(ql_dbg_user, vha, 0x5047,
- "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
- sp->done(sp, 0);
- return;
+ if (iocb_type != ELS_IOCB_TYPE) {
+ ql_dbg(ql_dbg_user, vha, 0x5047,
+ "Completing %s: (%p) type=%d.\n",
+ type, sp, sp->type);
+ sp->done(sp, 0);
+ return;
+ }
+ break;
case SRB_CT_PTHRU_CMD:
/* borrowing sts_entry_24xx.comp_status.
same location as ct_entry_24xx.comp_status
@@ -1584,6 +1589,33 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+ if (iocb_type == ELS_IOCB_TYPE) {
+ els = &sp->u.iocb_cmd;
+ els->u.els_plogi.fw_status[0] = fw_status[0];
+ els->u.els_plogi.fw_status[1] = fw_status[1];
+ els->u.els_plogi.fw_status[2] = fw_status[2];
+ els->u.els_plogi.comp_status = fw_status[0];
+ if (comp_status == CS_COMPLETE) {
+ res = DID_OK << 16;
+ } else {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ res = DID_OK << 16;
+ els->u.els_plogi.len =
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count);
+ } else {
+ els->u.els_plogi.len = 0;
+ res = DID_ERROR << 16;
+ }
+ }
+ ql_log(ql_log_info, vha, 0x503f,
+ "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
+ type, sp->handle, comp_status, fw_status[1], fw_status[2],
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count));
+ goto els_ct_done;
+ }
+
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
@@ -1604,11 +1636,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count));
- fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
- }
- else {
+ } else {
ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
@@ -1619,10 +1647,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt)->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
- fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
+ memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
+ fw_status, sizeof(fw_status));
ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
(uint8_t *)pkt, sizeof(*pkt));
}
@@ -1631,6 +1658,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
+els_ct_done:
sp->done(sp, res);
}
@@ -3129,6 +3157,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
+ case INTR_ATIO_QUE_UPDATE_27XX:
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
@@ -3259,6 +3288,7 @@ qla24xx_msix_default(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
+ case INTR_ATIO_QUE_UPDATE_27XX:
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
@@ -3347,7 +3377,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
.pre_vectors = QLA_BASE_VECTORS,
};
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
+ IS_ATIO_MSIX_CAPABLE(ha)) {
desc.pre_vectors++;
min_vecs++;
}
@@ -3374,7 +3405,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
ha->msix_count, ret);
ha->msix_count = ret;
/* Recalculate queue values */
- if (ha->mqiobase && ql2xmqsupport) {
+ if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
ha->max_req_queues = ha->msix_count - 1;
/* ATIOQ needs 1 vector. That's 1 less QPair */
@@ -3432,7 +3463,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
* If target mode is enable, also request the vector for the ATIO
* queue.
*/
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
+ IS_ATIO_MSIX_CAPABLE(ha)) {
qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
rsp->msix = qentry;
qentry->handle = rsp;
@@ -3486,11 +3518,14 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
- if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
- !IS_QLA27XX(ha))
+ if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
+ !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
+ !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
goto skip_msi;
+ if (ql2xenablemsix == 2)
+ goto skip_msix;
+
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
(ha->pdev->subsystem_device == 0x7040 ||
ha->pdev->subsystem_device == 0x7041 ||
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 99502fa90810..cb717d47339f 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1782,13 +1782,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
"Entered %s.\n", __func__);
pd24 = NULL;
- pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
ql_log(ql_log_warn, vha, 0x1050,
"Failed to allocate port database structure.\n");
+ fcport->query = 0;
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
mcp->mb[0] = MBC_GET_PORT_DATABASE;
if (opt != 0 && !IS_FWI2_CAPABLE(ha))
@@ -1823,17 +1823,32 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
if (IS_FWI2_CAPABLE(ha)) {
uint64_t zero = 0;
+ u8 current_login_state, last_login_state;
+
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
- if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
- pd24->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0x1051,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd24->current_login_state,
- pd24->last_login_state, fcport->loop_id);
+ if (fcport->fc4f_nvme) {
+ current_login_state = pd24->current_login_state >> 4;
+ last_login_state = pd24->last_login_state >> 4;
+ } else {
+ current_login_state = pd24->current_login_state & 0xf;
+ last_login_state = pd24->last_login_state & 0xf;
+ }
+ fcport->current_login_state = pd24->current_login_state;
+ fcport->last_login_state = pd24->last_login_state;
+
+ /* Check for logged in state. */
+ if (current_login_state != PDS_PRLI_COMPLETE &&
+ last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x119a,
+ "Unable to verify login-state (%x/%x) for loop_id %x.\n",
+ current_login_state, last_login_state,
+ fcport->loop_id);
rval = QLA_FUNCTION_FAILED;
- goto gpd_error_out;
+
+ if (!fcport->query)
+ goto gpd_error_out;
}
if (fcport->loop_id == FC_NO_LOOP_ID ||
@@ -1912,6 +1927,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
gpd_error_out:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+ fcport->query = 0;
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1052,
@@ -2255,13 +2271,12 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
else
req = ha->req_q_map[0];
- lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+ lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
ql_log(ql_log_warn, vha, 0x1062,
"Failed to allocate login IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(lg, 0, sizeof(struct logio_entry_24xx));
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
@@ -2525,13 +2540,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
"Entered %s.\n", __func__);
- lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+ lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
ql_log(ql_log_warn, vha, 0x106e,
"Failed to allocate logout IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(lg, 0, sizeof(struct logio_entry_24xx));
req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
@@ -2820,13 +2834,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
"Entered %s.\n", __func__);
- pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
+ pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
ql_log(ql_log_warn, vha, 0x1080,
"Memory alloc failed.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(pmap, 0, FCAL_MAP_SIZE);
mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
mcp->mb[2] = MSW(pmap_dma);
@@ -3014,13 +3027,12 @@ qla24xx_abort_command(srb_t *sp)
return QLA_FUNCTION_FAILED;
}
- abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
+ abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
if (abt == NULL) {
ql_log(ql_log_warn, vha, 0x108d,
"Failed to allocate abort IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(abt, 0, sizeof(struct abort_entry_24xx));
abt->entry_type = ABORT_IOCB_TYPE;
abt->entry_count = 1;
@@ -3098,13 +3110,12 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
rsp = req->rsp;
}
- tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+ tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
ql_log(ql_log_warn, vha, 0x1093,
"Failed to allocate task management IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk->p.tsk.entry_count = 1;
@@ -3753,6 +3764,38 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->vp_status,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: WWPN %8phC.\n",
+ vha->port_name);
+
+ /* N2N. direct connect */
+ if (IS_QLA27XX(ha) &&
+ ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
+ /* if our portname is higher then initiate N2N login */
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ // ??? qlt_update_host_map(vha, id);
+ vha->n2n_id = 0x1;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Setting n2n_update_needed for id %d\n",
+ vha->n2n_id);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ }
+
+ memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
+ WWN_SIZE);
+ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ return;
+ }
/* buffer to buffer credit flag */
vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3856,14 +3899,13 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
"Entered %s.\n", __func__);
- vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+ vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
ql_log(ql_log_warn, vha, 0x10bc,
"Failed to allocate modify VP IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
vpmod->entry_count = 1;
vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
@@ -3934,13 +3976,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
- vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
+ vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
if (!vce) {
ql_log(ql_log_warn, vha, 0x10c2,
"Failed to allocate VP control IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
vce->entry_type = VP_CTRL_IOCB_TYPE;
vce->entry_count = 1;
@@ -4592,6 +4633,48 @@ qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
return rval;
}
+int
+qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
+ void *buf, uint16_t bufsiz)
+{
+ int rval, i;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint32_t *bp;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
+ mcp->mb[2] = MSW(buf_dma);
+ mcp->mb[3] = LSW(buf_dma);
+ mcp->mb[6] = MSW(MSD(buf_dma));
+ mcp->mb[7] = LSW(MSD(buf_dma));
+ mcp->mb[8] = bufsiz/4;
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x115a,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+ "Done %s.\n", __func__);
+ bp = (uint32_t *) buf;
+ for (i = 0; i < (bufsiz-4)/4; i++, bp++)
+ *bp = cpu_to_be32(*bp);
+ }
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
@@ -6025,13 +6108,12 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
if (!vha->hw->flags.fw_started)
goto done;
- pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
ql_log(ql_log_warn, vha, 0xd047,
"Failed to allocate port database structure.\n");
goto done_free_sp;
}
- memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_PORT_DATABASE;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cbf544dbf883..bd9f14bf7ac2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -606,7 +606,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair, *tqpair;
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
qp_list_elem)
qla2xxx_delete_qpair(vha, qpair);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index e23a3d4c36f3..d5da3981cefe 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2245,8 +2245,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
memcpy(fstatus.reserved_3,
pkt->reserved_2, 20 * sizeof(uint8_t));
- fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
+ fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
sizeof(struct qla_mt_iocb_rsp_fx00));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 50286cf02eca..46f2d0cf7c0d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -268,6 +268,15 @@ MODULE_PARM_DESC(ql2xautodetectsfp,
"Detect SFP range and set appropriate distance.\n"
"1 (Default): Enable\n");
+int ql2xenablemsix = 1;
+module_param(ql2xenablemsix, int, 0444);
+MODULE_PARM_DESC(ql2xenablemsix,
+ "Set to enable MSI or MSI-X interrupt mechanism.\n"
+ " Default is 1, enable MSI-X interrupt mechanism.\n"
+ " 0 -- enable traditional pin-based mechanism.\n"
+ " 1 -- enable MSI-X interrupt mechanism.\n"
+ " 2 -- enable MSI interrupt mechanism.\n");
+
/*
* SCSI host template entry points
*/
@@ -386,7 +395,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(rsp->qpair, smp_processor_id());
+ qla_cpu_update(rsp->qpair, raw_smp_processor_id());
ha->base_qpair->pdev = ha->pdev;
if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
@@ -422,7 +431,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
qla_init_base_qpair(vha, req, rsp);
- if (ql2xmqsupport && ha->max_qpairs) {
+ if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
GFP_KERNEL);
if (!ha->queue_pair_map) {
@@ -1965,7 +1974,8 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
- if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ if (!ql2xmqsupport || !ql2xnvmeenable ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -2062,7 +2072,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
* By default, driver uses at least two msix vectors
* (default & rspq)
*/
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
/* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
@@ -3080,9 +3090,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg(ql_dbg_init, base_vha, 0x0192,
"blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
- } else
- ql_dbg(ql_dbg_init, base_vha, 0x0193,
- "blk/scsi-mq disabled.\n");
+ } else {
+ if (ql2xnvmeenable) {
+ host->nr_hw_queues = ha->max_qpairs;
+ ql_dbg(ql_dbg_init, base_vha, 0x0194,
+ "FC-NVMe support is enabled, HW queues=%d\n",
+ host->nr_hw_queues);
+ } else {
+ ql_dbg(ql_dbg_init, base_vha, 0x0193,
+ "blk/scsi-mq disabled.\n");
+ }
+ }
qlt_probe_one_stage1(base_vha, ha);
@@ -4743,7 +4761,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (pla)
qlt_plogi_ack_unref(vha, pla);
else
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_async_gffid(vha, fcport);
}
if (free_fcport) {
@@ -6292,7 +6310,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
ha->flags.eeh_busy = 0;
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -6309,7 +6327,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
/* Return back all IOs */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -6317,7 +6335,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f05cfc83c9c8..18069edd4773 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work)
if (logout_started) {
bool traced = false;
- while (!ACCESS_ONCE(sess->logout_completed)) {
+ while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
"%s: waiting for sess %p logout\n",
@@ -6546,6 +6546,7 @@ void
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+ struct init_cb_24xx *icb;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6553,14 +6554,19 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ icb = (struct init_cb_24xx *)ha->init_cb;
+
+ if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
struct qla_msix_entry *msix = &ha->msix_entries[2];
- struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
icb->msix_atio = cpu_to_le16(msix->entry);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
+ } else if (ql2xenablemsix == 0) {
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
+ ql_dbg(ql_dbg_init, vha, 0xf07f,
+ "Registering INTx vector for ATIO.\n");
}
}
@@ -6805,7 +6811,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 8c4b505c9f66..b6ec02b96d3d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.01-k"
+#define QLA2XXX_VERSION "10.00.00.02-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 09ba494f8896..e4f037f0f38b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -953,9 +953,9 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
}
-static const char * inq_vendor_id = "Linux ";
-static const char * inq_product_id = "scsi_debug ";
-static const char *inq_product_rev = "0186"; /* version less '.' */
+static char sdebug_inq_vendor_id[9] = "Linux ";
+static char sdebug_inq_product_id[17] = "scsi_debug ";
+static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a = 0x3222222000000000ULL;
static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -975,8 +975,8 @@ static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
arr[0] = 0x2; /* ASCII */
arr[1] = 0x1;
arr[2] = 0x0;
- memcpy(&arr[4], inq_vendor_id, 8);
- memcpy(&arr[12], inq_product_id, 16);
+ memcpy(&arr[4], sdebug_inq_vendor_id, 8);
+ memcpy(&arr[12], sdebug_inq_product_id, 16);
memcpy(&arr[28], dev_id_str, dev_id_str_len);
num = 8 + 16 + dev_id_str_len;
arr[3] = num;
@@ -1408,9 +1408,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[6] = 0x10; /* claim: MultiP */
/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
arr[7] = 0xa; /* claim: LINKED + CMDQUE */
- memcpy(&arr[8], inq_vendor_id, 8);
- memcpy(&arr[16], inq_product_id, 16);
- memcpy(&arr[32], inq_product_rev, 4);
+ memcpy(&arr[8], sdebug_inq_vendor_id, 8);
+ memcpy(&arr[16], sdebug_inq_product_id, 16);
+ memcpy(&arr[32], sdebug_inq_product_rev, 4);
/* version descriptors (2 bytes each) follow */
put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -3001,11 +3001,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
if (-1 == ret) {
write_unlock_irqrestore(&atomic_rw, iflags);
return DID_ERROR << 16;
- } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
+ } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
sdev_printk(KERN_INFO, scp->device,
- "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ "%s: %s: lb size=%u, IO sent=%d bytes\n",
my_name, "write same",
- num * sdebug_sector_size, ret);
+ sdebug_sector_size, ret);
/* Copy first sector to remaining blocks */
for (i = 1 ; i < num ; i++)
@@ -4151,6 +4151,12 @@ module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, sdebug_guard, uint, S_IRUGO);
module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
+module_param_string(inq_vendor, sdebug_inq_vendor_id,
+ sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
+module_param_string(inq_product, sdebug_inq_product_id,
+ sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
+module_param_string(inq_rev, sdebug_inq_product_rev,
+ sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
@@ -4202,6 +4208,9 @@ MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
+MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
+MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
+MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 6bf43d94cdc0..fe5a9ea27b5e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -135,6 +135,7 @@ static struct {
{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
+ {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
@@ -161,7 +162,7 @@ static struct {
{"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
{"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
+ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
@@ -174,7 +175,7 @@ static struct {
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
{"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
- {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -305,8 +306,8 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
*/
to[from_length] = '\0';
} else {
- /*
- * space pad the string if it is short.
+ /*
+ * space pad the string if it is short.
*/
strncpy(&to[from_length], spaces,
to_length - from_length);
@@ -326,10 +327,10 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
* @flags: if strflags NULL, use this flag value
*
* Description:
- * Create and add one dev_info entry for @vendor, @model, @strflags or
- * @flag. If @compatible, add to the tail of the list, do not space
- * pad, and set devinfo->compatible. The scsi_static_device_list entries
- * are added with @compatible 1 and @clfags NULL.
+ * Create and add one dev_info entry for @vendor, @model, @strflags or
+ * @flag. If @compatible, add to the tail of the list, do not space
+ * pad, and set devinfo->compatible. The scsi_static_device_list entries
+ * are added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
@@ -351,11 +352,11 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* @key: specify list to use
*
* Description:
- * Create and add one dev_info entry for @vendor, @model,
- * @strflags or @flag in list specified by @key. If @compatible,
- * add to the tail of the list, do not space pad, and set
- * devinfo->compatible. The scsi_static_device_list entries are
- * added with @compatible 1 and @clfags NULL.
+ * Create and add one dev_info entry for @vendor, @model,
+ * @strflags or @flag in list specified by @key. If @compatible,
+ * add to the tail of the list, do not space pad, and set
+ * devinfo->compatible. The scsi_static_device_list entries are
+ * added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
@@ -400,13 +401,13 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
* scsi_dev_info_list_find - find a matching dev_info list entry.
- * @vendor: vendor string
- * @model: model (product) string
+ * @vendor: full vendor string
+ * @model: full model (product) string
* @key: specify list to use
*
* Description:
* Finds the first dev_info entry matching @vendor, @model
- * in list specified by @key.
+ * in list specified by @key.
*
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
@@ -416,7 +417,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
- size_t vmax, mmax;
+ size_t vmax, mmax, mlen;
const char *vskip, *mskip;
if (IS_ERR(devinfo_table))
@@ -455,22 +456,25 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
dev_info_list) {
if (devinfo->compatible) {
/*
- * Behave like the older version of get_device_flags.
+ * vendor strings must be an exact match
*/
- if (memcmp(devinfo->vendor, vskip, vmax) ||
- (vmax < sizeof(devinfo->vendor) &&
- devinfo->vendor[vmax]))
+ if (vmax != strlen(devinfo->vendor) ||
+ memcmp(devinfo->vendor, vskip, vmax))
continue;
- if (memcmp(devinfo->model, mskip, mmax) ||
- (mmax < sizeof(devinfo->model) &&
- devinfo->model[mmax]))
+
+ /*
+ * @model specifies the full string, and
+ * must be larger or equal to devinfo->model
+ */
+ mlen = strlen(devinfo->model);
+ if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
continue;
return devinfo;
} else {
if (!memcmp(devinfo->vendor, vendor,
- sizeof(devinfo->vendor)) &&
- !memcmp(devinfo->model, model,
- sizeof(devinfo->model)))
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
return devinfo;
}
}
@@ -509,10 +513,10 @@ EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
* @dev_list: string of device flags to add
*
* Description:
- * Parse dev_list, and add entries to the scsi_dev_info_list.
- * dev_list is of the form "vendor:product:flag,vendor:product:flag".
- * dev_list is modified via strsep. Can be called for command line
- * addition, for proc or mabye a sysfs interface.
+ * Parse dev_list, and add entries to the scsi_dev_info_list.
+ * dev_list is of the form "vendor:product:flag,vendor:product:flag".
+ * dev_list is modified via strsep. Can be called for command line
+ * addition, for proc or mabye a sysfs interface.
*
* Returns: 0 if OK, -error on failure.
**/
@@ -702,7 +706,7 @@ static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
return seq_open(file, &scsi_devinfo_seq_ops);
}
-/*
+/*
* proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
*
* Description: Adds a black/white list entry for vendor and model with an
@@ -841,8 +845,8 @@ EXPORT_SYMBOL(scsi_dev_info_remove_list);
* scsi_init_devinfo - set up the dynamic device list.
*
* Description:
- * Add command line entries from scsi_dev_flags, then add
- * scsi_static_device_list entries to the scsi device info list.
+ * Add command line entries from scsi_dev_flags, then add
+ * scsi_static_device_list entries to the scsi device info list.
*/
int __init scsi_init_devinfo(void)
{
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 84addee05be6..2b785d09d5bd 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -126,20 +126,36 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
static int scsi_dh_handler_attach(struct scsi_device *sdev,
struct scsi_device_handler *scsi_dh)
{
- int error;
+ int error, ret = 0;
if (!try_module_get(scsi_dh->module))
return -EINVAL;
error = scsi_dh->attach(sdev);
- if (error) {
- sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
- scsi_dh->name, error);
+ if (error != SCSI_DH_OK) {
+ switch (error) {
+ case SCSI_DH_NOMEM:
+ ret = -ENOMEM;
+ break;
+ case SCSI_DH_RES_TEMP_UNAVAIL:
+ ret = -EAGAIN;
+ break;
+ case SCSI_DH_DEV_UNSUPP:
+ case SCSI_DH_NOSYS:
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret != -ENODEV)
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
+ scsi_dh->name, error);
module_put(scsi_dh->module);
} else
sdev->handler = scsi_dh;
- return error;
+ return ret;
}
/*
@@ -153,18 +169,20 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev)
module_put(sdev->handler->module);
}
-int scsi_dh_add_device(struct scsi_device *sdev)
+void scsi_dh_add_device(struct scsi_device *sdev)
{
struct scsi_device_handler *devinfo = NULL;
const char *drv;
- int err = 0;
drv = scsi_dh_find_driver(sdev);
if (drv)
devinfo = __scsi_dh_lookup(drv);
+ /*
+ * device_handler is optional, so ignore errors
+ * from scsi_dh_handler_attach()
+ */
if (devinfo)
- err = scsi_dh_handler_attach(sdev, devinfo);
- return err;
+ (void)scsi_dh_handler_attach(sdev, devinfo);
}
void scsi_dh_release_device(struct scsi_device *sdev)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index dab876c65473..62b56de38ae8 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -403,6 +403,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
"threshold.\n");
}
+ if (sshdr->asc == 0x29) {
+ evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Power-on or device reset occurred\n");
+ }
+
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
@@ -579,6 +585,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
+ sshdr.asc == 0x22 || /* Invalid function */
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26 || /* Parameter value invalid */
sshdr.asc == 0x27) { /* Write protected */
@@ -1747,16 +1754,12 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* that it indicates SUCCESS.
*/
return SUCCESS;
+ case DID_SOFT_ERROR:
/*
* when the low level driver returns did_soft_error,
* it is responsible for keeping an internal retry counter
* in order to avoid endless loops (db)
- *
- * actually this is a bug in this function here. we should
- * be mindful of the maximum number of retries specified
- * and not get stuck in a loop.
*/
- case DID_SOFT_ERROR:
goto maybe_retry;
case DID_IMM_RETRY:
return NEEDS_RETRY;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index bcc1694cebcd..1cbc497e00bd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -252,9 +252,9 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request(sdev->request_queue,
+ req = blk_get_request_flags(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@@ -268,7 +268,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
rq->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
- req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
+ req->rq_flags |= rq_flags | RQF_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
@@ -1301,7 +1301,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
/*
* If the devices is blocked we defer normal commands.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
@@ -1310,7 +1310,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* special commands. In particular any user initiated
* command is not allowed.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_KILL;
break;
}
@@ -1750,7 +1750,10 @@ static void scsi_done(struct scsi_cmnd *cmd)
*
* Returns: Nothing
*
- * Lock status: IO request lock assumed to be held when called.
+ * Lock status: request queue lock assumed to be held when called.
+ *
+ * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
+ * protection for ZBC disks.
*/
static void scsi_request_fn(struct request_queue *q)
__releases(q->queue_lock)
@@ -1940,6 +1943,33 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
+static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ atomic_dec(&sdev->device_busy);
+ put_device(&sdev->sdev_gendev);
+}
+
+static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ if (!get_device(&sdev->sdev_gendev))
+ goto out;
+ if (!scsi_dev_queue_ready(q, sdev))
+ goto out_put_device;
+
+ return true;
+
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+out:
+ return false;
+}
+
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1953,16 +1983,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
if (ret != BLK_STS_OK)
- goto out;
+ goto out_put_budget;
ret = BLK_STS_RESOURCE;
- if (!get_device(&sdev->sdev_gendev))
- goto out;
-
- if (!scsi_dev_queue_ready(q, sdev))
- goto out_put_device;
if (!scsi_target_queue_ready(shost, sdev))
- goto out_dec_device_busy;
+ goto out_put_budget;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
@@ -1993,15 +2018,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ atomic_dec(&shost->host_busy);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
-out_dec_device_busy:
- atomic_dec(&sdev->device_busy);
-out_put_device:
- put_device(&sdev->sdev_gendev);
-out:
+out_put_budget:
+ scsi_mq_put_budget(hctx);
switch (ret) {
case BLK_STS_OK:
break;
@@ -2205,6 +2227,8 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
}
static const struct blk_mq_ops scsi_mq_ops = {
+ .get_budget = scsi_mq_get_budget,
+ .put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
@@ -2733,6 +2757,9 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
break;
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
+ envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
+ break;
default:
/* do nothing */
break;
@@ -2837,6 +2864,7 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
case SDEV_EVT_LUN_CHANGE_REPORTED:
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
default:
/* do nothing */
break;
@@ -2919,21 +2947,37 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
int
scsi_device_quiesce(struct scsi_device *sdev)
{
+ struct request_queue *q = sdev->request_queue;
int err;
+ /*
+ * It is allowed to call scsi_device_quiesce() multiple times from
+ * the same context but concurrent scsi_device_quiesce() calls are
+ * not allowed.
+ */
+ WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
+
+ blk_set_preempt_only(q);
+
+ blk_mq_freeze_queue(q);
+ /*
+ * Ensure that the effect of blk_set_preempt_only() will be visible
+ * for percpu_ref_tryget() callers that occur after the queue
+ * unfreeze even if the queue was already frozen before this function
+ * was called. See also https://lwn.net/Articles/573497/.
+ */
+ synchronize_rcu();
+ blk_mq_unfreeze_queue(q);
+
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ if (err == 0)
+ sdev->quiesced_by = current;
+ else
+ blk_clear_preempt_only(q);
mutex_unlock(&sdev->state_mutex);
- if (err)
- return err;
-
- scsi_run_queue(sdev->request_queue);
- while (atomic_read(&sdev->device_busy)) {
- msleep_interruptible(200);
- scsi_run_queue(sdev->request_queue);
- }
- return 0;
+ return err;
}
EXPORT_SYMBOL(scsi_device_quiesce);
@@ -2953,9 +2997,11 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- if (sdev->sdev_state == SDEV_QUIESCE &&
- scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
- scsi_run_queue(sdev->request_queue);
+ WARN_ON_ONCE(!sdev->quiesced_by);
+ sdev->quiesced_by = NULL;
+ blk_clear_preempt_only(sdev->request_queue);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index 6907c924df72..836185de28c4 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -4,10 +4,10 @@
/*
- * This defines the scsi logging feature. It is a means by which the user
- * can select how much information they get about various goings on, and it
- * can be really useful for fault tracing. The logging word is divided into
- * 8 nibbles, each of which describes a loglevel. The division of things is
+ * This defines the scsi logging feature. It is a means by which the user can
+ * select how much information they get about various goings on, and it can be
+ * really useful for fault tracing. The logging word is divided into 10 3-bit
+ * bitfields, each of which describes a loglevel. The division of things is
* somewhat arbitrary, and the division of the word could be changed if it
* were really needed for any reason. The numbers below are the only place
* where these are specified. For a first go-around, 3 bits is more than
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index d7669caa9893..df1368aea9a3 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -177,10 +177,10 @@ extern struct async_domain scsi_sd_probe_domain;
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
-int scsi_dh_add_device(struct scsi_device *sdev);
+void scsi_dh_add_device(struct scsi_device *sdev);
void scsi_dh_release_device(struct scsi_device *sdev);
#else
-static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
+static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 40124648a07b..a0f2a20ea9e9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -988,6 +988,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
scsi_attach_vpd(sdev);
sdev->max_queue_depth = sdev->queue_depth;
+ sdev->sdev_bflags = *bflags;
/*
* Ok, the device is now all set up, we can
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f796bd61f3f0..50e7d7e4a861 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -20,6 +20,7 @@
#include <scsi/scsi_dh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_devinfo.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -966,6 +967,41 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
+#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+static const char *const sdev_bflags_name[] = {
+#include "scsi_devinfo_tbl.c"
+};
+#undef BLIST_FLAG_NAME
+
+static ssize_t
+sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int i;
+ ssize_t len = 0;
+
+ for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
+ const char *name = NULL;
+
+ if (!(sdev->sdev_bflags & BIT(i)))
+ continue;
+ if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
+ name = sdev_bflags_name[i];
+
+ if (name)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s%s", len ? " " : "", name);
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%sINVALID_BIT(%d)", len ? " " : "", i);
+ }
+ if (len)
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+}
+static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL);
+
#ifdef CONFIG_SCSI_DH
static ssize_t
sdev_show_dh_state(struct device *dev, struct device_attribute *attr,
@@ -1151,6 +1187,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_queue_depth.attr,
&dev_attr_queue_type.attr,
&dev_attr_wwid.attr,
+ &dev_attr_blacklist.attr,
#ifdef CONFIG_SCSI_DH
&dev_attr_dh_state.attr,
&dev_attr_access_state.attr,
@@ -1234,13 +1271,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
scsi_autopm_get_device(sdev);
- error = scsi_dh_add_device(sdev);
- if (error)
- /*
- * device_handler is optional, so any error can be ignored
- */
- sdev_printk(KERN_INFO, sdev,
- "failed to add device handler: %d\n", error);
+ scsi_dh_add_device(sdev);
error = device_add(&sdev->sdev_gendev);
if (error) {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 8c46a6d536af..4664024bd5d3 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,6 +267,8 @@ static const struct {
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
+ { FC_PORTSPEED_64BIT, "64 Gbit" },
+ { FC_PORTSPEED_128BIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7404d26895f5..f4b52b44b966 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3420,7 +3420,7 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
if (!shost) {
- pr_err("%s: failed. Cound not find host no %u\n",
+ pr_err("%s: failed. Could not find host no %u\n",
__func__, ev->u.get_host_stats.host_no);
return -ENODEV;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 319dff970237..736a1f4f9676 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -177,7 +177,7 @@ static int sas_smp_dispatch(struct bsg_job *job)
if (!scsi_is_host_device(job->dev))
rphy = dev_to_rphy(job->dev);
- if (!job->req->next_rq) {
+ if (!job->reply_payload.payload_len) {
dev_warn(job->dev, "space for a smp response is missing\n");
bsg_job_done(job, -EINVAL, 0);
return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d175c5c5ccf8..24fe68522716 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -231,11 +231,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
+ bool v;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_start_stop = v;
return count;
}
@@ -253,6 +257,7 @@ static ssize_t
allow_restart_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ bool v;
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -262,7 +267,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
- sdp->allow_restart = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->allow_restart = v;
return count;
}
@@ -906,6 +914,26 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
else
sdkp->zeroing_mode = SD_ZERO_WRITE;
+ if (sdkp->max_ws_blocks &&
+ sdkp->physical_block_size > logical_block_size) {
+ /*
+ * Reporting a maximum number of blocks that is not aligned
+ * on the device physical size would cause a large write same
+ * request to be split into physically unaligned chunks by
+ * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
+ * even if the caller of these functions took care to align the
+ * large request. So make sure the maximum reported is aligned
+ * to the device physical block size. This is only an optional
+ * optimization for regular disks, but this is mandatory to
+ * avoid failure of large write same requests directed at
+ * sequential write required zones of host-managed ZBC disks.
+ */
+ sdkp->max_ws_blocks =
+ round_down(sdkp->max_ws_blocks,
+ bytes_to_logical(sdkp->device,
+ sdkp->physical_block_size));
+ }
+
out:
blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 8aa54779aac1..27793b9f54c0 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -28,38 +28,18 @@
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_eh.h>
#include "sd.h"
-#include "scsi_priv.h"
-
-enum zbc_zone_type {
- ZBC_ZONE_TYPE_CONV = 0x1,
- ZBC_ZONE_TYPE_SEQWRITE_REQ,
- ZBC_ZONE_TYPE_SEQWRITE_PREF,
- ZBC_ZONE_TYPE_RESERVED,
-};
-
-enum zbc_zone_cond {
- ZBC_ZONE_COND_NO_WP,
- ZBC_ZONE_COND_EMPTY,
- ZBC_ZONE_COND_IMP_OPEN,
- ZBC_ZONE_COND_EXP_OPEN,
- ZBC_ZONE_COND_CLOSED,
- ZBC_ZONE_COND_READONLY = 0xd,
- ZBC_ZONE_COND_FULL,
- ZBC_ZONE_COND_OFFLINE,
-};
/**
- * Convert a zone descriptor to a zone struct.
+ * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
+ * @sdkp: The disk the report originated from
+ * @buf: Address of the report zone descriptor
+ * @zone: the destination zone structure
+ *
+ * All LBA sized values are converted to 512B sectors unit.
*/
-static void sd_zbc_parse_report(struct scsi_disk *sdkp,
- u8 *buf,
+static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
struct blk_zone *zone)
{
struct scsi_device *sdp = sdkp->device;
@@ -82,7 +62,13 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp,
}
/**
- * Issue a REPORT ZONES scsi command.
+ * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
+ * @sdkp: The target disk
+ * @buf: Buffer to use for the reply
+ * @buflen: the buffer size
+ * @lba: Start LBA of the report
+ *
+ * For internal use during device validation.
*/
static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
unsigned int buflen, sector_t lba)
@@ -123,6 +109,12 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
+/**
+ * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
+ * @cmd: The command to setup
+ *
+ * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
+ */
int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -165,6 +157,14 @@ int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
+/**
+ * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
+ * @scmd: The completed report zones command
+ * @good_bytes: reply size in bytes
+ *
+ * Convert all reported zone descriptors to struct blk_zone. The conversion
+ * is done in-place, directly in the request specified sg buffer.
+ */
static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
unsigned int good_bytes)
{
@@ -220,17 +220,32 @@ static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
local_irq_restore(flags);
}
+/**
+ * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
+ * @sdkp: The target disk
+ */
static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
{
return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
}
+/**
+ * sd_zbc_zone_no - Get the number of the zone conataining a sector.
+ * @sdkp: The target disk
+ * @sector: 512B sector address contained in the zone
+ */
static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
sector_t sector)
{
return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
}
+/**
+ * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
+ * @cmd: the command to setup
+ *
+ * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
+ */
int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -263,6 +278,23 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
+/**
+ * sd_zbc_write_lock_zone - Write lock a sequential zone.
+ * @cmd: write command
+ *
+ * Called from sd_init_cmd() for write requests (standard write, write same or
+ * write zeroes operations). If the request target zone is not already locked,
+ * the zone is locked and BLKPREP_OK returned, allowing the request to proceed
+ * through dispatch in scsi_request_fn(). Otherwise, BLKPREP_DEFER is returned,
+ * forcing the request to wait for the zone to be unlocked, that is, for the
+ * previously issued write request targeting the same zone to complete.
+ *
+ * This is called from blk_peek_request() context with the queue lock held and
+ * before the request is removed from the scheduler. As a result, multiple
+ * contexts executing concurrently scsi_request_fn() cannot result in write
+ * sequence reordering as only a single write request per zone is allowed to
+ * proceed.
+ */
int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -285,10 +317,7 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
* Do not issue more than one write at a time per
* zone. This solves write ordering problems due to
* the unlocking of the request queue in the dispatch
- * path in the non scsi-mq case. For scsi-mq, this
- * also avoids potential write reordering when multiple
- * threads running on different CPUs write to the same
- * zone (with a synchronized sequential pattern).
+ * path in the non scsi-mq case.
*/
if (sdkp->zones_wlock &&
test_and_set_bit(zno, sdkp->zones_wlock))
@@ -300,6 +329,13 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
+/**
+ * sd_zbc_write_unlock_zone - Write unlock a sequential zone.
+ * @cmd: write command
+ *
+ * Called from sd_uninit_cmd(). Unlocking the request target zone will allow
+ * dispatching the next write request for the zone.
+ */
void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -314,8 +350,16 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
}
}
-void sd_zbc_complete(struct scsi_cmnd *cmd,
- unsigned int good_bytes,
+/**
+ * sd_zbc_complete - ZBC command post processing.
+ * @cmd: Completed command
+ * @good_bytes: Command reply bytes
+ * @sshdr: command sense header
+ *
+ * Called from sd_done(). Process report zones reply and handle reset zone
+ * and write commands errors.
+ */
+void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr)
{
int result = cmd->result;
@@ -360,7 +404,11 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
}
/**
- * Read zoned block device characteristics (VPD page B6).
+ * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
+ * @sdkp: Target disk
+ * @buf: Buffer where to store the VPD page data
+ *
+ * Read VPD page B6.
*/
static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
unsigned char *buf)
@@ -375,25 +423,31 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
if (sdkp->device->type != TYPE_ZBC) {
/* Host-aware */
sdkp->urswrz = 1;
- sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]);
- sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]);
+ sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
+ sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
sdkp->zones_max_open = 0;
} else {
/* Host-managed */
sdkp->urswrz = buf[4] & 1;
sdkp->zones_optimal_open = 0;
sdkp->zones_optimal_nonseq = 0;
- sdkp->zones_max_open = get_unaligned_be64(&buf[16]);
+ sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
}
return 0;
}
/**
- * Check reported capacity.
+ * sd_zbc_check_capacity - Check reported capacity.
+ * @sdkp: Target disk
+ * @buf: Buffer to use for commands
+ *
+ * ZBC drive may report only the capacity of the first conventional zones at
+ * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
+ * Check this here. If the disk reported only its conventional zones capacity,
+ * get the total capacity by doing a report zones.
*/
-static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
- unsigned char *buf)
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
{
sector_t lba;
int ret;
@@ -421,8 +475,15 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
return 0;
}
-#define SD_ZBC_BUF_SIZE 131072
+#define SD_ZBC_BUF_SIZE 131072U
+/**
+ * sd_zbc_check_zone_size - Check the device zone sizes
+ * @sdkp: Target disk
+ *
+ * Check that all zones of the device are equal. The last zone can however
+ * be smaller. The zone size must also be a power of two number of LBAs.
+ */
static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
{
u64 zone_blocks;
@@ -465,10 +526,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
/* Parse REPORT ZONES header */
list_length = get_unaligned_be32(&buf[0]) + 64;
rec = buf + 64;
- if (list_length < SD_ZBC_BUF_SIZE)
- buf_len = list_length;
- else
- buf_len = SD_ZBC_BUF_SIZE;
+ buf_len = min(list_length, SD_ZBC_BUF_SIZE);
/* Parse zone descriptors */
while (rec < buf + buf_len) {
@@ -523,6 +581,7 @@ out:
}
sdkp->zone_blocks = zone_blocks;
+ sdkp->zone_shift = ilog2(zone_blocks);
return 0;
}
@@ -530,13 +589,15 @@ out:
static int sd_zbc_setup(struct scsi_disk *sdkp)
{
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
/* chunk_sectors indicates the zone size */
blk_queue_chunk_sectors(sdkp->disk->queue,
logical_to_sectors(sdkp->device, sdkp->zone_blocks));
- sdkp->zone_shift = ilog2(sdkp->zone_blocks);
- sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift;
- if (sdkp->capacity & (sdkp->zone_blocks - 1))
- sdkp->nr_zones++;
+ sdkp->nr_zones =
+ round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
if (!sdkp->zones_wlock) {
sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
@@ -549,8 +610,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
return 0;
}
-int sd_zbc_read_zones(struct scsi_disk *sdkp,
- unsigned char *buf)
+int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
int ret;
@@ -561,7 +621,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
*/
return 0;
-
/* Get zoned block device characteristics */
ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
if (ret)
@@ -598,10 +657,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
if (ret)
goto err;
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
return 0;
err:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index aa28874e8fb9..f098877eed4a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -217,7 +217,7 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
- return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
+ return blk_verify_command(cmd, filp->f_mode);
}
static int
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 90f6effc32b4..b2880c7709e6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.1.2-125"
+#define DRIVER_VERSION "1.1.2-126"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 2
-#define DRIVER_REVISION 125
+#define DRIVER_REVISION 126
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -1078,9 +1078,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
bad_raid_map:
dev_warn(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d %s\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target, device->lun, err_msg);
+ "logical device %08x%08x %s\n",
+ *((u32 *)&device->scsi3addr),
+ *((u32 *)&device->scsi3addr[4]), err_msg);
return -EINVAL;
}
@@ -6925,6 +6925,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1302)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1303)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
},
{
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5e7200f05873..1b06cf0375dc 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -486,6 +486,9 @@ struct hv_host_device {
unsigned int port;
unsigned char path;
unsigned char target;
+ struct workqueue_struct *handle_error_wq;
+ struct work_struct host_scan_work;
+ struct Scsi_Host *host;
};
struct storvsc_scan_work {
@@ -514,13 +517,12 @@ done:
static void storvsc_host_scan(struct work_struct *work)
{
- struct storvsc_scan_work *wrk;
struct Scsi_Host *host;
struct scsi_device *sdev;
+ struct hv_host_device *host_device =
+ container_of(work, struct hv_host_device, host_scan_work);
- wrk = container_of(work, struct storvsc_scan_work, work);
- host = wrk->host;
-
+ host = host_device->host;
/*
* Before scanning the host, first check to see if any of the
* currrently known devices have been hot removed. We issue a
@@ -540,8 +542,6 @@ static void storvsc_host_scan(struct work_struct *work)
* Now scan the host to discover LUNs that may have been added.
*/
scsi_scan_host(host);
-
- kfree(wrk);
}
static void storvsc_remove_lun(struct work_struct *work)
@@ -922,6 +922,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
{
struct storvsc_scan_work *wrk;
void (*process_err_fn)(struct work_struct *work);
+ struct hv_host_device *host_dev = shost_priv(host);
bool do_work = false;
switch (SRB_STATUS(vm_srb->srb_status)) {
@@ -988,7 +989,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
wrk->lun = vm_srb->lun;
wrk->tgt_id = vm_srb->target_id;
INIT_WORK(&wrk->work, process_err_fn);
- schedule_work(&wrk->work);
+ queue_work(host_dev->handle_error_wq, &wrk->work);
}
@@ -1116,8 +1117,7 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
struct vstor_packet *vstor_packet,
struct storvsc_cmd_request *request)
{
- struct storvsc_scan_work *work;
-
+ struct hv_host_device *host_dev;
switch (vstor_packet->operation) {
case VSTOR_OPERATION_COMPLETE_IO:
storvsc_on_io_completion(stor_device, vstor_packet, request);
@@ -1125,13 +1125,9 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
case VSTOR_OPERATION_REMOVE_DEVICE:
case VSTOR_OPERATION_ENUMERATE_BUS:
- work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
- if (!work)
- return;
-
- INIT_WORK(&work->work, storvsc_host_scan);
- work->host = stor_device->host;
- schedule_work(&work->work);
+ host_dev = shost_priv(stor_device->host);
+ queue_work(
+ host_dev->handle_error_wq, &host_dev->host_scan_work);
break;
case VSTOR_OPERATION_FCHBA_DATA:
@@ -1744,6 +1740,7 @@ static int storvsc_probe(struct hv_device *device,
host_dev->port = host->host_no;
host_dev->dev = device;
+ host_dev->host = host;
stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
@@ -1803,10 +1800,20 @@ static int storvsc_probe(struct hv_device *device,
if (stor_device->num_sc != 0)
host->nr_hw_queues = stor_device->num_sc + 1;
+ /*
+ * Set the error handler work queue.
+ */
+ host_dev->handle_error_wq =
+ alloc_ordered_workqueue("storvsc_error_wq_%d",
+ WQ_MEM_RECLAIM,
+ host->host_no);
+ if (!host_dev->handle_error_wq)
+ goto err_out2;
+ INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
- goto err_out2;
+ goto err_out3;
if (!dev_is_ide) {
scsi_scan_host(host);
@@ -1815,7 +1822,7 @@ static int storvsc_probe(struct hv_device *device,
device->dev_instance.b[4]);
ret = scsi_add_device(host, 0, target, 0);
if (ret)
- goto err_out3;
+ goto err_out4;
}
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
if (host->transportt == fc_transport_template) {
@@ -1827,14 +1834,17 @@ static int storvsc_probe(struct hv_device *device,
fc_host_port_name(host) = stor_device->port_name;
stor_device->rport = fc_remote_port_add(host, 0, &ids);
if (!stor_device->rport)
- goto err_out3;
+ goto err_out4;
}
#endif
return 0;
-err_out3:
+err_out4:
scsi_remove_host(host);
+err_out3:
+ destroy_workqueue(host_dev->handle_error_wq);
+
err_out2:
/*
* Once we have connected with the host, we would need to
@@ -1858,6 +1868,7 @@ static int storvsc_remove(struct hv_device *dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
struct Scsi_Host *host = stor_device->host;
+ struct hv_host_device *host_dev = shost_priv(host);
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
if (host->transportt == fc_transport_template) {
@@ -1865,6 +1876,7 @@ static int storvsc_remove(struct hv_device *dev)
fc_remove_host(host);
}
#endif
+ destroy_workqueue(host_dev->handle_error_wq);
scsi_remove_host(host);
storvsc_dev_remove(dev);
scsi_host_put(host);
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
index dc03e47f7c58..3a8bc6d9cb5b 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/scsi/ufs/tc-dwc-g210.c
@@ -26,7 +26,7 @@
*/
static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
{ UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
{ UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
@@ -90,7 +90,7 @@ static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
@@ -147,7 +147,7 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
int connected_tx_lanes = 0;
int ret = 0;
- const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
{ UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
@@ -158,7 +158,7 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
DME_LOCAL },
};
- const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
{ UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
@@ -222,7 +222,7 @@ static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
{
int ret = 0;
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
{ UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
{ UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index c87d770b519a..2b38db2eeafa 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -273,15 +273,18 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
+ if (is_rate_B)
+ phy_set_mode(phy, PHY_MODE_UFS_HS_B);
+
/* Assert PHY reset and apply PHY calibration values */
ufs_qcom_assert_reset(hba);
/* provide 1ms delay to let the reset pulse propagate */
usleep_range(1000, 1100);
- ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
-
+ /* phy initialization - calibrate the phy */
+ ret = phy_init(phy);
if (ret) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+ dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
goto out;
}
@@ -294,21 +297,22 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
* voltage, current to settle down before starting serdes.
*/
usleep_range(1000, 1100);
- ret = ufs_qcom_phy_start_serdes(phy);
+
+ /* power on phy - start serdes and phy's power and clocks */
+ ret = phy_power_on(phy);
if (ret) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
+ dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
__func__, ret);
- goto out;
+ goto out_disable_phy;
}
- ret = ufs_qcom_phy_is_pcs_ready(phy);
- if (ret)
- dev_err(hba->dev,
- "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
- __func__, ret);
-
ufs_qcom_select_unipro_mode(host);
+ return 0;
+
+out_disable_phy:
+ ufs_qcom_assert_reset(hba);
+ phy_exit(phy);
out:
return ret;
}
@@ -1273,14 +1277,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
- phy_init(host->generic_phy);
- err = phy_power_on(host->generic_phy);
- if (err)
- goto out_unregister_bus;
-
err = ufs_qcom_init_lane_clks(host);
if (err)
- goto out_disable_phy;
+ goto out_variant_clear;
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
@@ -1301,10 +1300,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out;
-out_disable_phy:
- phy_power_off(host->generic_phy);
-out_unregister_bus:
- phy_exit(host->generic_phy);
out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
@@ -1458,7 +1453,7 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UFS_BIT(17);
+ reg |= UTP_DBG_RAMS_EN;
ufshcd_writel(hba, reg, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
@@ -1471,7 +1466,7 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
/* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 076f52813a4c..295f4bef6a0e 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -92,7 +92,8 @@ enum {
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL UFS_BIT(0)
+#define QUNIPRO_SEL 0x1
+#define UTP_DBG_RAMS_EN 0x20000
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
@@ -213,13 +214,13 @@ struct ufs_qcom_host {
* Note: By default this capability will be kept enabled if host
* controller supports the QUniPro mode.
*/
- #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0)
+ #define UFS_QCOM_CAP_QUNIPRO 0x1
/*
* Set this capability if host controller can retain the secure
* configuration even after UFS controller core power collapse.
*/
- #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE UFS_BIT(1)
+ #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
u32 caps;
struct phy *generic_phy;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 794a4600e952..011c3369082c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -385,6 +385,8 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
tag, ktime_to_us(lrbp->issue_time_stamp));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
+ tag, ktime_to_us(lrbp->compl_time_stamp));
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -1746,6 +1748,7 @@ static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -2195,10 +2198,11 @@ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
u32 upiu_flags;
int ret = 0;
- if (hba->ufs_version == UFSHCI_VERSION_20)
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- else
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
@@ -2222,10 +2226,11 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
u32 upiu_flags;
int ret = 0;
- if (hba->ufs_version == UFSHCI_VERSION_20)
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- else
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
lrbp->command_type = UTP_CMD_TYPE_SCSI;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
@@ -3586,7 +3591,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
- "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
+ "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
@@ -4627,6 +4632,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
+
+ lrbp->compl_time_stamp = ktime_get();
}
/* clear corresponding bits of completed commands */
@@ -5998,25 +6005,22 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
}
scsi_device_put(hba->sdev_ufs_device);
- sdev_boot = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
- if (IS_ERR(sdev_boot)) {
- ret = PTR_ERR(sdev_boot);
- goto remove_sdev_ufs_device;
- }
- scsi_device_put(sdev_boot);
-
sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
if (IS_ERR(sdev_rpmb)) {
ret = PTR_ERR(sdev_rpmb);
- goto remove_sdev_boot;
+ goto remove_sdev_ufs_device;
}
scsi_device_put(sdev_rpmb);
+
+ sdev_boot = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+ if (IS_ERR(sdev_boot))
+ dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
+ else
+ scsi_device_put(sdev_boot);
goto out;
-remove_sdev_boot:
- scsi_remove_device(sdev_boot);
remove_sdev_ufs_device:
scsi_remove_device(hba->sdev_ufs_device);
out:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index cdc8bd05f7df..1332e544da92 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -166,6 +166,7 @@ struct ufs_pm_lvl_states {
* @lun: LUN of the command
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
* @issue_time_stamp: time stamp for debug purposes
+ * @compl_time_stamp: time stamp for statistics
* @req_abort_skip: skip request abort task flag
*/
struct ufshcd_lrb {
@@ -189,6 +190,7 @@ struct ufshcd_lrb {
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
ktime_t issue_time_stamp;
+ ktime_t compl_time_stamp;
bool req_abort_skip;
};
@@ -544,13 +546,13 @@ struct ufs_hba {
bool is_irq_enabled;
/* Interrupt aggregation support is broken */
- #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
+ #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
/*
* delay before each dme command is required as the unipro
* layer has shown instabilities
*/
- #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
+ #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
/*
* If UFS host controller is having issue in processing LCC (Line
@@ -559,21 +561,21 @@ struct ufs_hba {
* the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
* attribute of device to 0).
*/
- #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
+ #define UFSHCD_QUIRK_BROKEN_LCC 0x4
/*
* The attribute PA_RXHSUNTERMCAP specifies whether or not the
* inbound Link supports unterminated line in HS mode. Setting this
* attribute to 1 fixes moving to HS gear.
*/
- #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
+ #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
/*
* This quirk needs to be enabled if the host contoller only allows
* accessing the peer dme attributes in AUTO mode (FAST AUTO or
* SLOW AUTO).
*/
- #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
+ #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
/*
* This quirk needs to be enabled if the host contoller doesn't
@@ -581,13 +583,13 @@ struct ufs_hba {
* is enabled, standard UFS host driver will call the vendor specific
* ops (get_ufs_hci_version) to get the correct version.
*/
- #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
/*
* This quirk needs to be enabled if the host contoller regards
* resolution of the values of PRDTO and PRDTL in UTRD as byte.
*/
- #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index f60145d4a66e..277752b0fc6f 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -119,22 +119,23 @@ enum {
#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
-#define UFS_BIT(x) (1L << (x))
-
-#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0)
-#define UIC_DME_END_PT_RESET UFS_BIT(1)
-#define UIC_ERROR UFS_BIT(2)
-#define UIC_TEST_MODE UFS_BIT(3)
-#define UIC_POWER_MODE UFS_BIT(4)
-#define UIC_HIBERNATE_EXIT UFS_BIT(5)
-#define UIC_HIBERNATE_ENTER UFS_BIT(6)
-#define UIC_LINK_LOST UFS_BIT(7)
-#define UIC_LINK_STARTUP UFS_BIT(8)
-#define UTP_TASK_REQ_COMPL UFS_BIT(9)
-#define UIC_COMMAND_COMPL UFS_BIT(10)
-#define DEVICE_FATAL_ERROR UFS_BIT(11)
-#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
-#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+/*
+ * IS - Interrupt Status - 20h
+ */
+#define UTP_TRANSFER_REQ_COMPL 0x1
+#define UIC_DME_END_PT_RESET 0x2
+#define UIC_ERROR 0x4
+#define UIC_TEST_MODE 0x8
+#define UIC_POWER_MODE 0x10
+#define UIC_HIBERNATE_EXIT 0x20
+#define UIC_HIBERNATE_ENTER 0x40
+#define UIC_LINK_LOST 0x80
+#define UIC_LINK_STARTUP 0x100
+#define UTP_TASK_REQ_COMPL 0x200
+#define UIC_COMMAND_COMPL 0x400
+#define DEVICE_FATAL_ERROR 0x800
+#define CONTROLLER_FATAL_ERROR 0x10000
+#define SYSTEM_BUS_FATAL_ERROR 0x20000
#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
UIC_HIBERNATE_EXIT |\
@@ -152,12 +153,10 @@ enum {
SYSTEM_BUS_FATAL_ERROR)
/* HCS - Host Controller Status 30h */
-#define DEVICE_PRESENT UFS_BIT(0)
-#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1)
-#define UTP_TASK_REQ_LIST_READY UFS_BIT(2)
-#define UIC_COMMAND_READY UFS_BIT(3)
-#define HOST_ERROR_INDICATOR UFS_BIT(4)
-#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
+#define DEVICE_PRESENT 0x1
+#define UTP_TRANSFER_REQ_LIST_READY 0x2
+#define UTP_TASK_REQ_LIST_READY 0x4
+#define UIC_COMMAND_READY 0x8
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -174,46 +173,47 @@ enum {
};
/* HCE - Host Controller Enable 34h */
-#define CONTROLLER_ENABLE UFS_BIT(0)
+#define CONTROLLER_ENABLE 0x1
#define CONTROLLER_DISABLE 0x0
-#define CRYPTO_GENERAL_ENABLE UFS_BIT(1)
+#define CRYPTO_GENERAL_ENABLE 0x2
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
-#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
-#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
+#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
/* UECN - Host UIC Error Code Network Layer 40h */
-#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
+#define UIC_NETWORK_LAYER_ERROR 0x80000000
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
/* UECT - Host UIC Error Code Transport Layer 44h */
-#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31)
+#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
/* UECDME - Host UIC Error Code DME 48h */
-#define UIC_DME_ERROR UFS_BIT(31)
+#define UIC_DME_ERROR 0x80000000
#define UIC_DME_ERROR_CODE_MASK 0x1
+/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
-#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16)
-#define INT_AGGR_STATUS_BIT UFS_BIT(20)
-#define INT_AGGR_PARAM_WRITE UFS_BIT(24)
-#define INT_AGGR_ENABLE UFS_BIT(31)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000
+#define INT_AGGR_STATUS_BIT 0x100000
+#define INT_AGGR_PARAM_WRITE 0x1000000
+#define INT_AGGR_ENABLE 0x80000000
/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
-#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
-#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
/* UICCMD - UIC Command */
#define COMMAND_OPCODE_MASK 0xFF
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index bec81c2404f7..7525039d812c 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -300,7 +300,7 @@ static void maple_send(void)
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
- dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
+ sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
@@ -642,8 +642,7 @@ static void maple_dma_handler(struct work_struct *work)
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
- dma_cache_sync(&mdev->dev, recvbuf, 0x400,
- DMA_FROM_DEVICE);
+ sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index 22e98a90468c..a71730da6385 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -40,6 +40,18 @@ bool soc_is_brcmstb(void)
return of_match_node(brcmstb_machine_match, root) != NULL;
}
+u32 brcmstb_get_family_id(void)
+{
+ return family_id;
+}
+EXPORT_SYMBOL(brcmstb_get_family_id);
+
+u32 brcmstb_get_product_id(void)
+{
+ return product_id;
+}
+EXPORT_SYMBOL(brcmstb_get_product_id);
+
static const struct of_device_id sun_top_ctrl_match[] = {
{ .compatible = "brcm,bcm7125-sun-top-ctrl", },
{ .compatible = "brcm,bcm7346-sun-top-ctrl", },
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index e1ce8b1b5090..e570b6af2e6f 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -361,17 +361,6 @@ out:
return ret;
}
-static bool scpsys_active_wakeup(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- struct scp_domain *scpd;
-
- genpd = pd_to_genpd(dev->pm_domain);
- scpd = container_of(genpd, struct scp_domain, genpd);
-
- return scpd->data->active_wakeup;
-}
-
static void init_clks(struct platform_device *pdev, struct clk **clk)
{
int i;
@@ -466,7 +455,8 @@ static struct scp *init_scp(struct platform_device *pdev,
genpd->name = data->name;
genpd->power_off = scpsys_power_off;
genpd->power_on = scpsys_power_on;
- genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+ if (scpd->data->active_wakeup)
+ genpd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
}
return scp;
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 40b75748835f..5c342167b9db 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -358,17 +358,6 @@ static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
pm_clk_destroy(dev);
}
-static bool rockchip_active_wakeup(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- struct rockchip_pm_domain *pd;
-
- genpd = pd_to_genpd(dev->pm_domain);
- pd = container_of(genpd, struct rockchip_pm_domain, genpd);
-
- return pd->info->active_wakeup;
-}
-
static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
struct device_node *node)
{
@@ -489,8 +478,9 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
- pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup;
pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ if (pd_info->active_wakeup)
+ pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&pd->genpd, NULL, false);
pmu->genpd_data.domains[id] = &pd->genpd;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a75f2a2cf780..603783976b81 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1,10 +1,6 @@
#
# SPI driver configuration
#
-# NOTE: the reason this doesn't show SPI slave support is mostly that
-# nobody's needed a slave side API yet. The master-role API is not
-# fully appropriate there, so it'd need some thought to do well.
-#
menuconfig SPI
bool "SPI support"
depends on HAS_IOMEM
@@ -379,7 +375,7 @@ config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
depends on HAS_DMA
- depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
@@ -626,6 +622,13 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_SPRD_ADI
+ tristate "Spreadtrum ADI controller"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HWSPINLOCK || (COMPILE_TEST && !HWSPINLOCK)
+ help
+ ADI driver based on SPI for Spreadtrum SoCs.
+
config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8e0cda73b324..34c5f2832ddf 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 568e1c65aa82..77fe55ce790c 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -213,7 +213,7 @@ static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
}
static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
- unsigned int speed_hz, u16 mode)
+ unsigned int speed_hz)
{
u32 val;
u32 prescale;
@@ -231,17 +231,6 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
val |= A3700_SPI_CLK_CAPT_EDGE;
spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
}
-
- val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA);
-
- if (mode & SPI_CPOL)
- val |= A3700_SPI_CLK_POL;
-
- if (mode & SPI_CPHA)
- val |= A3700_SPI_CLK_PHA;
-
- spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
@@ -423,7 +412,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
a3700_spi = spi_master_get_devdata(spi->master);
- a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode);
+ a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
byte_len = xfer->bits_per_word >> 3;
@@ -584,6 +573,8 @@ static int a3700_spi_prepare_message(struct spi_master *master,
a3700_spi_bytelen_set(a3700_spi, 4);
+ a3700_spi_mode_set(a3700_spi, spi->mode);
+
return 0;
}
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 6ab4c7700228..68cfc351b47f 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -553,7 +553,7 @@ err_put_master:
static int spi_engine_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct spi_engine *spi_engine = spi_master_get_devdata(master);
int irq = platform_get_irq(pdev, 0);
@@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
free_irq(irq, master);
+ spi_master_put(master);
+
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d89127f4a46d..f652f70cb8db 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -32,6 +32,7 @@
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-fsl-dspi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/time.h>
@@ -151,6 +152,11 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+static const struct fsl_dspi_devtype_data coldfire_data = {
+ .trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 8,
+};
+
struct fsl_dspi_dma {
/* Length of transfer in words of DSPI_FIFO_SIZE */
u32 curr_xfer_len;
@@ -741,6 +747,7 @@ static int dspi_setup(struct spi_device *spi)
{
struct chip_data *chip;
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ struct fsl_dspi_platform_data *pdata;
u32 cs_sck_delay = 0, sck_cs_delay = 0;
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
unsigned char pasc = 0, asc = 0, fmsz = 0;
@@ -761,11 +768,18 @@ static int dspi_setup(struct spi_device *spi)
return -ENOMEM;
}
- of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
- &cs_sck_delay);
+ pdata = dev_get_platdata(&dspi->pdev->dev);
- of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
- &sck_cs_delay);
+ if (!pdata) {
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+ } else {
+ cs_sck_delay = pdata->cs_sck_delay;
+ sck_cs_delay = pdata->sck_cs_delay;
+ }
chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
@@ -949,6 +963,7 @@ static int dspi_probe(struct platform_device *pdev)
struct fsl_dspi *dspi;
struct resource *res;
void __iomem *base;
+ struct fsl_dspi_platform_data *pdata;
int ret = 0, cs_num, bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
@@ -969,25 +984,34 @@ static int dspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
- ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
- goto out_master_put;
- }
- master->num_chipselect = cs_num;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ master->num_chipselect = pdata->cs_num;
+ master->bus_num = pdata->bus_num;
- ret = of_property_read_u32(np, "bus-num", &bus_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get bus-num\n");
- goto out_master_put;
- }
- master->bus_num = bus_num;
+ dspi->devtype_data = &coldfire_data;
+ } else {
- dspi->devtype_data = of_device_get_match_data(&pdev->dev);
- if (!dspi->devtype_data) {
- dev_err(&pdev->dev, "can't get devtype_data\n");
- ret = -EFAULT;
- goto out_master_put;
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_master_put;
+ }
+ master->num_chipselect = cs_num;
+
+ ret = of_property_read_u32(np, "bus-num", &bus_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get bus-num\n");
+ goto out_master_put;
+ }
+ master->bus_num = bus_num;
+
+ dspi->devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!dspi->devtype_data) {
+ dev_err(&pdev->dev, "can't get devtype_data\n");
+ ret = -EFAULT;
+ goto out_master_put;
+ }
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index babb15f07995..79ddefe4180d 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -53,10 +53,13 @@
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
/* The maximum bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES (1 << 15)
#define MX51_ECSPI_CTRL_MAX_BURST 512
+/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
+#define MX53_MAX_TRANSFER_BYTES 512
enum spi_imx_devtype {
IMX1_CSPI,
@@ -76,7 +79,9 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
+ void (*disable)(struct spi_imx_data *);
bool has_dmamode;
+ bool has_slavemode;
unsigned int fifo_size;
bool dynamic_burst;
enum spi_imx_devtype devtype;
@@ -108,6 +113,11 @@ struct spi_imx_data {
unsigned int dynamic_burst, read_u32;
unsigned int word_mask;
+ /* Slave mode */
+ bool slave_mode;
+ bool slave_aborted;
+ unsigned int slave_burst;
+
/* DMA */
bool usedma;
u32 wml;
@@ -221,6 +231,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
if (!master->dma_rx)
return false;
+ if (spi_imx->slave_mode)
+ return false;
+
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
@@ -262,6 +275,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)
#define MX51_ECSPI_INT_RREN (1 << 3)
+#define MX51_ECSPI_INT_RDREN (1 << 4)
#define MX51_ECSPI_DMA 0x14
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
@@ -378,6 +392,44 @@ static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
spi_imx_buf_tx_u16(spi_imx);
}
+static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
+
+ if (spi_imx->rx_buf) {
+ int n_bytes = spi_imx->slave_burst % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ memcpy(spi_imx->rx_buf,
+ ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
+
+ spi_imx->rx_buf += n_bytes;
+ spi_imx->slave_burst -= n_bytes;
+ }
+}
+
+static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = 0;
+ int n_bytes = spi_imx->count % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ if (spi_imx->tx_buf) {
+ memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
+ spi_imx->tx_buf, n_bytes);
+ val = cpu_to_be32(val);
+ spi_imx->tx_buf += n_bytes;
+ }
+
+ spi_imx->count -= n_bytes;
+
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
+}
+
/* MX51 eCSPI */
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int fspi, unsigned int *fres)
@@ -427,6 +479,9 @@ static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
if (enable & MXC_INT_RR)
val |= MX51_ECSPI_INT_RREN;
+ if (enable & MXC_INT_RDR)
+ val |= MX51_ECSPI_INT_RDREN;
+
writel(val, spi_imx->base + MX51_ECSPI_INT);
}
@@ -439,6 +494,15 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
+static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
static int mx51_ecspi_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
@@ -446,14 +510,11 @@ static int mx51_ecspi_config(struct spi_device *spi)
u32 clk = spi_imx->speed_hz, delay, reg;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
- /*
- * The hardware seems to have a race condition when changing modes. The
- * current assumption is that the selection of the channel arrives
- * earlier in the hardware than the mode bits when they are written at
- * the same time.
- * So set master mode for all channels as we do not support slave mode.
- */
- ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
+ /* set Master or Slave mode */
+ if (spi_imx->slave_mode)
+ ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
+ else
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/*
* Enable SPI_RDY handling (falling edge/level triggered).
@@ -468,9 +529,22 @@ static int mx51_ecspi_config(struct spi_device *spi)
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
- ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ ctrl |= (spi_imx->slave_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
- cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ /*
+ * eCSPI burst completion by Chip Select signal in Slave mode
+ * is not functional for imx53 Soc, config SPI burst completed when
+ * BURST_LENGTH + 1 bits are received
+ */
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ else
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
if (spi->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
@@ -805,6 +879,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX1_CSPI,
};
@@ -817,6 +892,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX21_CSPI,
};
@@ -830,6 +906,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX27_CSPI,
};
@@ -842,6 +919,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX31_CSPI,
};
@@ -855,6 +933,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = true,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX35_CSPI,
};
@@ -867,6 +946,8 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
};
@@ -878,6 +959,8 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX53_ECSPI,
};
@@ -945,14 +1028,16 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
spi_imx->txfifo++;
}
- spi_imx->devtype_data->trigger(spi_imx);
+ if (!spi_imx->slave_mode)
+ spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
- while (spi_imx->devtype_data->rx_available(spi_imx)) {
+ while (spi_imx->txfifo &&
+ spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
@@ -1034,7 +1119,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->speed_hz = t->speed_hz;
/* Initialize the functions for transfer */
- if (spi_imx->devtype_data->dynamic_burst) {
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode) {
u32 mask;
spi_imx->dynamic_burst = 0;
@@ -1078,6 +1163,12 @@ static int spi_imx_setupxfer(struct spi_device *spi,
return ret;
}
+ if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
+ spi_imx->rx = mx53_ecspi_rx_slave;
+ spi_imx->tx = mx53_ecspi_tx_slave;
+ spi_imx->slave_burst = t->len;
+ }
+
spi_imx->devtype_data->config(spi);
return 0;
@@ -1262,11 +1353,61 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
return transfer->len;
}
+static int spi_imx_pio_transfer_slave(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ int ret = transfer->len;
+
+ if (is_imx53_ecspi(spi_imx) &&
+ transfer->len > MX53_MAX_TRANSFER_BYTES) {
+ dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
+ MX53_MAX_TRANSFER_BYTES);
+ return -EMSGSIZE;
+ }
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+
+ reinit_completion(&spi_imx->xfer_done);
+ spi_imx->slave_aborted = false;
+
+ spi_imx_push(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
+
+ if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
+ spi_imx->slave_aborted) {
+ dev_dbg(&spi->dev, "interrupted\n");
+ ret = -EINTR;
+ }
+
+ /* ecspi has a HW issue when works in Slave mode,
+ * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
+ * ECSPI_TXDATA keeps shift out the last word data,
+ * so we have to disable ECSPI when in slave mode after the
+ * transfer completes
+ */
+ if (spi_imx->devtype_data->disable)
+ spi_imx->devtype_data->disable(spi_imx);
+
+ return ret;
+}
+
static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ /* flush rxfifo before transfer */
+ while (spi_imx->devtype_data->rx_available(spi_imx))
+ spi_imx->rx(spi_imx);
+
+ if (spi_imx->slave_mode)
+ return spi_imx_pio_transfer_slave(spi, transfer);
+
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
else
@@ -1323,6 +1464,16 @@ spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
return 0;
}
+static int spi_imx_slave_abort(struct spi_master *master)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ spi_imx->slave_aborted = true;
+ complete(&spi_imx->xfer_done);
+
+ return 0;
+}
+
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1334,13 +1485,23 @@ static int spi_imx_probe(struct platform_device *pdev)
struct spi_imx_data *spi_imx;
struct resource *res;
int i, ret, irq, spi_drctl;
+ const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
+ (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ bool slave_mode;
if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+ slave_mode = devtype_data->has_slavemode &&
+ of_property_read_bool(np, "spi-slave");
+ if (slave_mode)
+ master = spi_alloc_slave(&pdev->dev,
+ sizeof(struct spi_imx_data));
+ else
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data));
if (!master)
return -ENOMEM;
@@ -1358,20 +1519,29 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = master;
spi_imx->dev = &pdev->dev;
+ spi_imx->slave_mode = slave_mode;
- spi_imx->devtype_data = of_id ? of_id->data :
- (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ spi_imx->devtype_data = devtype_data;
+ /* Get number of chip selects, either platform data or OF */
if (mxc_platform_info) {
master->num_chipselect = mxc_platform_info->num_chipselect;
- master->cs_gpios = devm_kzalloc(&master->dev,
- sizeof(int) * master->num_chipselect, GFP_KERNEL);
- if (!master->cs_gpios)
- return -ENOMEM;
+ if (mxc_platform_info->chipselect) {
+ master->cs_gpios = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++)
+ master->cs_gpios[i] = mxc_platform_info->chipselect[i];
+ }
+ } else {
+ u32 num_cs;
- for (i = 0; i < master->num_chipselect; i++)
- master->cs_gpios[i] = mxc_platform_info->chipselect[i];
- }
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+ /* If not preset, default value of 1 is used */
+ }
spi_imx->bitbang.chipselect = spi_imx_chipselect;
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
@@ -1380,6 +1550,7 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
+ spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS;
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
@@ -1451,37 +1622,38 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+
+ /* Request GPIO CS lines, if any */
+ if (!spi_imx->slave_mode && master->cs_gpios) {
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (!gpio_is_valid(master->cs_gpios[i]))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev,
+ master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
+ master->cs_gpios[i]);
+ goto out_spi_bitbang;
+ }
+ }
+ }
+
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
goto out_clk_put;
}
- if (!master->cs_gpios) {
- dev_err(&pdev->dev, "No CS GPIOs available\n");
- ret = -EINVAL;
- goto out_clk_put;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i]))
- continue;
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
- master->cs_gpios[i]);
- goto out_clk_put;
- }
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
clk_disable(spi_imx->clk_per);
return ret;
+out_spi_bitbang:
+ spi_bitbang_stop(&spi_imx->bitbang);
out_clk_put:
clk_disable_unprepare(spi_imx->clk_ipg);
out_put_per:
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 5b0e9a3e83f6..3d216b950b41 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -44,6 +44,7 @@
#include <linux/completion.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
@@ -442,6 +443,85 @@ static int mxs_spi_transfer_one(struct spi_master *master,
return status;
}
+static int mxs_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ clk_disable_unprepare(ssp->clk);
+
+ ret = pinctrl_pm_select_idle_state(dev);
+ if (ret) {
+ int ret2 = clk_prepare_enable(ssp->clk);
+
+ if (ret2)
+ dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n",
+ ret, ret2);
+ }
+
+ return ret;
+}
+
+static int mxs_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ pinctrl_pm_select_idle_state(dev);
+
+ return ret;
+}
+
+static int __maybe_unused mxs_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ return mxs_spi_runtime_suspend(dev);
+ else
+ return 0;
+}
+
+static int __maybe_unused mxs_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_suspended(dev))
+ ret = mxs_spi_runtime_resume(dev);
+ else
+ ret = 0;
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret < 0 && !pm_runtime_suspended(dev))
+ mxs_spi_runtime_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_spi_pm = {
+ SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
+ mxs_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
+};
+
static const struct of_device_id mxs_spi_dt_ids[] = {
{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
@@ -493,12 +573,15 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
master->transfer_one_message = mxs_spi_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
master->dev.of_node = np;
master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->auto_runtime_pm = true;
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
@@ -521,28 +604,41 @@ static int mxs_spi_probe(struct platform_device *pdev)
goto out_master_free;
}
- ret = clk_prepare_enable(ssp->clk);
- if (ret)
- goto out_dma_release;
+ pm_runtime_enable(ssp->dev);
+ if (!pm_runtime_enabled(ssp->dev)) {
+ ret = mxs_spi_runtime_resume(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime resume failed\n");
+ goto out_dma_release;
+ }
+ }
+
+ ret = pm_runtime_get_sync(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime_get_sync failed\n");
+ goto out_pm_runtime_disable;
+ }
clk_set_rate(ssp->clk, clk_freq);
ret = stmp_reset_block(ssp->base);
if (ret)
- goto out_disable_clk;
-
- platform_set_drvdata(pdev, master);
+ goto out_pm_runtime_put;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
- goto out_disable_clk;
+ goto out_pm_runtime_put;
}
+ pm_runtime_put(ssp->dev);
+
return 0;
-out_disable_clk:
- clk_disable_unprepare(ssp->clk);
+out_pm_runtime_put:
+ pm_runtime_put(ssp->dev);
+out_pm_runtime_disable:
+ pm_runtime_disable(ssp->dev);
out_dma_release:
dma_release_channel(ssp->dmach);
out_master_free:
@@ -560,7 +656,10 @@ static int mxs_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
- clk_disable_unprepare(ssp->clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mxs_spi_runtime_suspend(&pdev->dev);
+
dma_release_channel(ssp->dmach);
return 0;
@@ -572,6 +671,7 @@ static struct platform_driver mxs_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxs_spi_dt_ids,
+ .pm = &mxs_spi_pm,
},
};
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 4b6dd73b80da..8974bb340b3a 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -671,7 +671,6 @@ static int orion_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"%pOF has no valid 'reg' property (%d)\n",
np, status);
- status = 0;
continue;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2a10b3f94ff7..2ce875764ca6 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1221,7 +1221,6 @@ static int rspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct rspi_data *rspi;
int ret;
- const struct of_device_id *of_id;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
@@ -1229,9 +1228,8 @@ static int rspi_probe(struct platform_device *pdev)
if (master == NULL)
return -ENOMEM;
- of_id = of_match_device(rspi_of_match, &pdev->dev);
- if (of_id) {
- ops = of_id->data;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (ops) {
ret = rspi_parse_dt(&pdev->dev, master);
if (ret)
goto error1;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index b392cca8fa4f..de7df20f8712 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -752,7 +752,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_info *sci;
int err;
sdd = spi_master_get_devdata(spi->master);
@@ -788,8 +787,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
}
- sci = sdd->cntrlr_info;
-
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0eb1e9583485..fcd261f98b9f 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -900,7 +900,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
- if (l & 1)
+ if (l & 3)
break;
copy32 = copy_wswap32;
} else {
@@ -1021,6 +1021,8 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
@@ -1188,12 +1190,10 @@ free_tx_chan:
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
struct spi_master *master = p->master;
- struct device *dev;
if (!master->dma_tx)
return;
- dev = &p->pdev->dev;
dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
@@ -1209,15 +1209,13 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
struct resource *r;
struct spi_master *master;
const struct sh_msiof_chipdata *chipdata;
- const struct of_device_id *of_id;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
int i;
int ret;
- of_id = of_match_device(sh_msiof_match, &pdev->dev);
- if (of_id) {
- chipdata = of_id->data;
+ chipdata = of_device_get_match_data(&pdev->dev);
+ if (chipdata) {
info = sh_msiof_spi_parse_dt(&pdev->dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
new file mode 100644
index 000000000000..5993bdbf79e4
--- /dev/null
+++ b/drivers/spi/spi-sprd-adi.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+/* Registers definitions for ADI controller */
+#define REG_ADI_CTRL0 0x4
+#define REG_ADI_CHN_PRIL 0x8
+#define REG_ADI_CHN_PRIH 0xc
+#define REG_ADI_INT_EN 0x10
+#define REG_ADI_INT_RAW 0x14
+#define REG_ADI_INT_MASK 0x18
+#define REG_ADI_INT_CLR 0x1c
+#define REG_ADI_GSSI_CFG0 0x20
+#define REG_ADI_GSSI_CFG1 0x24
+#define REG_ADI_RD_CMD 0x28
+#define REG_ADI_RD_DATA 0x2c
+#define REG_ADI_ARM_FIFO_STS 0x30
+#define REG_ADI_STS 0x34
+#define REG_ADI_EVT_FIFO_STS 0x38
+#define REG_ADI_ARM_CMD_STS 0x3c
+#define REG_ADI_CHN_EN 0x40
+#define REG_ADI_CHN_ADDR(id) (0x44 + (id - 2) * 4)
+#define REG_ADI_CHN_EN1 0x20c
+
+/* Bits definitions for register REG_ADI_GSSI_CFG0 */
+#define BIT_CLK_ALL_ON BIT(30)
+
+/* Bits definitions for register REG_ADI_RD_DATA */
+#define BIT_RD_CMD_BUSY BIT(31)
+#define RD_ADDR_SHIFT 16
+#define RD_VALUE_MASK GENMASK(15, 0)
+#define RD_ADDR_MASK GENMASK(30, 16)
+
+/* Bits definitions for register REG_ADI_ARM_FIFO_STS */
+#define BIT_FIFO_FULL BIT(11)
+#define BIT_FIFO_EMPTY BIT(10)
+
+/*
+ * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
+ * The slave devices address offset is always 0x8000 and size is 4K.
+ */
+#define ADI_SLAVE_ADDR_SIZE SZ_4K
+#define ADI_SLAVE_OFFSET 0x8000
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define ADI_HWSPINLOCK_TIMEOUT 5000
+/*
+ * ADI controller has 50 channels including 2 software channels
+ * and 48 hardware channels.
+ */
+#define ADI_HW_CHNS 50
+
+#define ADI_FIFO_DRAIN_TIMEOUT 1000
+#define ADI_READ_TIMEOUT 2000
+#define REG_ADDR_LOW_MASK GENMASK(11, 0)
+
+struct sprd_adi {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ struct hwspinlock *hwlock;
+ unsigned long slave_vbase;
+ unsigned long slave_pbase;
+};
+
+static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
+{
+ if (paddr < sadi->slave_pbase || paddr >
+ (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
+ dev_err(sadi->dev,
+ "slave physical address is incorrect, addr = 0x%x\n",
+ paddr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
+{
+ return (paddr - sadi->slave_pbase + sadi->slave_vbase);
+}
+
+static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ u32 sts;
+
+ do {
+ sts = readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS);
+ if (sts & BIT_FIFO_EMPTY)
+ break;
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "drain write fifo timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
+{
+ return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
+{
+ int read_timeout = ADI_READ_TIMEOUT;
+ u32 val, rd_addr;
+
+ /*
+ * Set the physical register address need to read into RD_CMD register,
+ * then ADI controller will start to transfer automatically.
+ */
+ writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
+
+ /*
+ * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
+ * simultaneously when writing read command to register, and the
+ * BIT_RD_CMD_BUSY will be cleared after the read operation is
+ * completed.
+ */
+ do {
+ val = readl_relaxed(sadi->base + REG_ADI_RD_DATA);
+ if (!(val & BIT_RD_CMD_BUSY))
+ break;
+
+ cpu_relax();
+ } while (--read_timeout);
+
+ if (read_timeout == 0) {
+ dev_err(sadi->dev, "ADI read timeout\n");
+ return -EBUSY;
+ }
+
+ /*
+ * The return value includes data and read register address, from bit 0
+ * to bit 15 are data, and from bit 16 to bit 30 are read register
+ * address. Then we can check the returned register address to validate
+ * data.
+ */
+ rd_addr = (val & RD_ADDR_MASK ) >> RD_ADDR_SHIFT;
+
+ if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
+ dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
+ reg_paddr, val);
+ return -EIO;
+ }
+
+ *read_val = val & RD_VALUE_MASK;
+ return 0;
+}
+
+static int sprd_adi_write(struct sprd_adi *sadi, unsigned long reg, u32 val)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ int ret;
+
+ ret = sprd_adi_drain_fifo(sadi);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we should wait for write fifo is empty before writing data to PMIC
+ * registers.
+ */
+ do {
+ if (!sprd_adi_fifo_is_full(sadi)) {
+ writel_relaxed(val, (void __iomem *)reg);
+ break;
+ }
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "write fifo is full\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi_dev,
+ struct spi_transfer *t)
+{
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+ unsigned long flags, virt_reg;
+ u32 phy_reg, val;
+ int ret;
+
+ if (t->rx_buf) {
+ phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
+
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_read(sadi, phy_reg, &val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+
+ *(u32 *)t->rx_buf = val;
+ } else if (t->tx_buf) {
+ u32 *p = (u32 *)t->tx_buf;
+
+ /*
+ * Get the physical register address need to write and convert
+ * the physical address to virtual address. Since we need
+ * virtual register address to write.
+ */
+ phy_reg = *p++ + sadi->slave_pbase;
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ virt_reg = sprd_adi_to_vaddr(sadi, phy_reg);
+ val = *p;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_write(sadi, virt_reg, val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+ } else {
+ dev_err(sadi->dev, "no buffer for transfer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sprd_adi_hw_init(struct sprd_adi *sadi)
+{
+ struct device_node *np = sadi->dev->of_node;
+ int i, size, chn_cnt;
+ const __be32 *list;
+ u32 tmp;
+
+ /* Address bits select default 12 bits */
+ writel_relaxed(0, sadi->base + REG_ADI_CTRL0);
+
+ /* Set all channels as default priority */
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
+
+ /* Set clock auto gate mode */
+ tmp = readl_relaxed(sadi->base + REG_ADI_GSSI_CFG0);
+ tmp &= ~BIT_CLK_ALL_ON;
+ writel_relaxed(tmp, sadi->base + REG_ADI_GSSI_CFG0);
+
+ /* Set hardware channels setting */
+ list = of_get_property(np, "sprd,hw-channels", &size);
+ if (!list || !size) {
+ dev_info(sadi->dev, "no hw channels setting in node\n");
+ return;
+ }
+
+ chn_cnt = size / 8;
+ for (i = 0; i < chn_cnt; i++) {
+ u32 value;
+ u32 chn_id = be32_to_cpu(*list++);
+ u32 chn_config = be32_to_cpu(*list++);
+
+ /* Channel 0 and 1 are software channels */
+ if (chn_id < 2)
+ continue;
+
+ writel_relaxed(chn_config, sadi->base +
+ REG_ADI_CHN_ADDR(chn_id));
+
+ if (chn_id < 32) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN);
+ value |= BIT(chn_id);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN);
+ } else if (chn_id < ADI_HW_CHNS) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN1);
+ value |= BIT(chn_id - 32);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN1);
+ }
+ }
+}
+
+static int sprd_adi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_controller *ctlr;
+ struct sprd_adi *sadi;
+ struct resource *res;
+ u32 num_chipselect;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "can not find the adi bus node\n");
+ return -ENODEV;
+ }
+
+ pdev->id = of_alias_get_id(np, "spi");
+ num_chipselect = of_get_child_count(np);
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+ sadi = spi_controller_get_devdata(ctlr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sadi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sadi->base)) {
+ ret = PTR_ERR(sadi->base);
+ goto put_ctlr;
+ }
+
+ sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
+ sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
+ sadi->ctlr = ctlr;
+ sadi->dev = &pdev->dev;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not get the hardware spinlock\n");
+ goto put_ctlr;
+ }
+
+ sadi->hwlock = hwspin_lock_request_specific(ret);
+ if (!sadi->hwlock) {
+ ret = -ENXIO;
+ goto put_ctlr;
+ }
+
+ sprd_adi_hw_init(sadi);
+
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = num_chipselect;
+ ctlr->flags = SPI_MASTER_HALF_DUPLEX;
+ ctlr->bits_per_word_mask = 0;
+ ctlr->transfer_one = sprd_adi_transfer_one;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register SPI controller\n");
+ goto free_hwlock;
+ }
+
+ return 0;
+
+free_hwlock:
+ hwspin_lock_free(sadi->hwlock);
+put_ctlr:
+ spi_controller_put(ctlr);
+ return ret;
+}
+
+static int sprd_adi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+
+ hwspin_lock_free(sadi->hwlock);
+ return 0;
+}
+
+static const struct of_device_id sprd_adi_of_match[] = {
+ {
+ .compatible = "sprd,sc9860-adi",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_adi_of_match);
+
+static struct platform_driver sprd_adi_driver = {
+ .driver = {
+ .name = "sprd-adi",
+ .of_match_table = sprd_adi_of_match,
+ },
+ .probe = sprd_adi_probe,
+ .remove = sprd_adi_remove,
+};
+module_platform_driver(sprd_adi_driver);
+
+MODULE_DESCRIPTION("Spreadtrum ADI Controller Driver");
+MODULE_AUTHOR("Baolin Wang <Baolin.Wang@spreadtrum.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 44550182a4a3..a76acedd7e2f 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -50,7 +50,7 @@
#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
#define SPI_IDLE_SDA_MASK (3 << 18)
-#define SPI_CS_SS_VAL (1 << 20)
+#define SPI_CS_SW_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
/* n from 0 to 3 */
@@ -705,9 +705,9 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
command1 |= SPI_CS_SW_HW;
if (spi->mode & SPI_CS_HIGH)
- command1 |= SPI_CS_SS_VAL;
+ command1 |= SPI_CS_SW_VAL;
else
- command1 &= ~SPI_CS_SS_VAL;
+ command1 &= ~SPI_CS_SW_VAL;
tegra_spi_writel(tspi, 0, SPI_COMMAND2);
} else {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e8b5a5e21b2e..b33a727a0158 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2200,7 +2200,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
* Context: can sleep
*
* Register a SPI device as with spi_register_controller() which will
- * automatically be unregister
+ * automatically be unregistered and freed.
*
* Return: zero on success, else a negative error code.
*/
@@ -2241,15 +2241,18 @@ static int __unregister(struct device *dev, void *null)
* only ones directly touching chip registers.
*
* This must be called from context that can sleep.
+ *
+ * Note that this function also drops a reference to the controller.
*/
void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
+ int id = ctlr->bus_num;
int dummy;
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, ctlr->bus_num);
+ found = idr_find(&spi_master_idr, id);
mutex_unlock(&board_lock);
if (found != ctlr) {
dev_dbg(&ctlr->dev,
@@ -2269,7 +2272,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
device_unregister(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 5f14247392bf..687e0eac85bf 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -1,5 +1,4 @@
TODO:
- - checkpatch.pl cleanups
- sparse fixes
- rename files to be not so "generic"
- add proper arch dependencies as needed
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
index d9f8b1424da1..c78989351f9c 100644
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -27,19 +27,18 @@ union ion_ioctl_arg {
static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
{
- int ret = 0;
-
switch (cmd) {
case ION_IOC_HEAP_QUERY:
- ret = arg->query.reserved0 != 0;
- ret |= arg->query.reserved1 != 0;
- ret |= arg->query.reserved2 != 0;
+ if (arg->query.reserved0 ||
+ arg->query.reserved1 ||
+ arg->query.reserved2)
+ return -EINVAL;
break;
default:
break;
}
- return ret ? -EINVAL : 0;
+ return 0;
}
/* fix up the cases where the ioctl direction bits are incorrect */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 93e2c90fa77d..a7d9b0e98572 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -81,7 +81,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
unsigned long flags)
{
struct ion_buffer *buffer;
- struct sg_table *table;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -109,7 +108,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err1;
}
- table = buffer->sg_table;
buffer->dev = dev;
buffer->size = len;
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 621e5f7ceacb..f5f9cd63f8e9 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -86,6 +86,7 @@ struct ion_buffer {
struct sg_table *sg_table;
struct list_head attachments;
};
+
void ion_buffer_destroy(struct ion_buffer *buffer);
/**
@@ -299,7 +300,6 @@ size_t ion_heap_freelist_shrink(struct ion_heap *heap,
*/
size_t ion_heap_freelist_size(struct ion_heap *heap);
-
/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
diff --git a/drivers/staging/ccree/cc_hal.h b/drivers/staging/ccree/cc_hal.h
deleted file mode 100644
index eecc866dfc74..000000000000
--- a/drivers/staging/ccree/cc_hal.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* pseudo cc_hal.h for cc7x_perf_test_driver (to be able to include code from
- * CC drivers).
- */
-
-#ifndef __CC_HAL_H__
-#define __CC_HAL_H__
-
-#include <linux/io.h>
-
-#define READ_REGISTER(_addr) ioread32((_addr))
-#define WRITE_REGISTER(_addr, _data) iowrite32((_data), (_addr))
-
-#define CC_HAL_WRITE_REGISTER(offset, val) \
- WRITE_REGISTER(cc_base + (offset), val)
-#define CC_HAL_READ_REGISTER(offset) READ_REGISTER(cc_base + (offset))
-
-#endif
diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h
index 851d3907167e..a9c417b07b04 100644
--- a/drivers/staging/ccree/cc_lli_defs.h
+++ b/drivers/staging/ccree/cc_lli_defs.h
@@ -59,7 +59,7 @@ static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
- lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 16));
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
}
diff --git a/drivers/staging/ccree/cc_regs.h b/drivers/staging/ccree/cc_regs.h
deleted file mode 100644
index 4a893a6ba6ef..000000000000
--- a/drivers/staging/ccree/cc_regs.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*!
- * @file
- * @brief This file contains macro definitions for accessing ARM TrustZone
- * CryptoCell register space.
- */
-
-#ifndef _CC_REGS_H_
-#define _CC_REGS_H_
-
-#include <linux/bitfield.h>
-
-#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
-#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
-
-#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
-#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
-
-/* Register Offset macro */
-#define CC_REG_OFFSET(unit_name, reg_name) \
- (DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
-
-#endif /*_CC_REGS_H_*/
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 5abe6b24ff8c..ba0954e4d2e5 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -92,18 +92,17 @@ static inline bool valid_assoclen(struct aead_request *req)
static void ssi_aead_exit(struct crypto_aead *tfm)
{
- struct device *dev = NULL;
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
+ dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
+ crypto_tfm_alg_name(&tfm->base));
- dev = &ctx->drvdata->plat_dev->dev;
/* Unmap enckey buffer */
if (ctx->enckey) {
dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
- SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n",
- ctx->enckey_dma_addr);
+ dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ &ctx->enckey_dma_addr);
ctx->enckey_dma_addr = 0;
ctx->enckey = NULL;
}
@@ -116,8 +115,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
xcbc->xcbc_keys,
xcbc->xcbc_keys_dma_addr);
}
- SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
- xcbc->xcbc_keys_dma_addr);
+ dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ &xcbc->xcbc_keys_dma_addr);
xcbc->xcbc_keys_dma_addr = 0;
xcbc->xcbc_keys = NULL;
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
@@ -127,8 +126,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
hmac->ipad_opad,
hmac->ipad_opad_dma_addr);
- SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
- hmac->ipad_opad_dma_addr);
+ dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ &hmac->ipad_opad_dma_addr);
hmac->ipad_opad_dma_addr = 0;
hmac->ipad_opad = NULL;
}
@@ -136,8 +135,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
hmac->padded_authkey,
hmac->padded_authkey_dma_addr);
- SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
- hmac->padded_authkey_dma_addr);
+ dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ &hmac->padded_authkey_dma_addr);
hmac->padded_authkey_dma_addr = 0;
hmac->padded_authkey = NULL;
}
@@ -146,29 +145,31 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
static int ssi_aead_init(struct crypto_aead *tfm)
{
- struct device *dev;
struct aead_alg *alg = crypto_aead_alg(tfm);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, aead_alg);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base));
+ struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
+ crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
ctx->cipher_mode = ssi_alg->cipher_mode;
ctx->flow_mode = ssi_alg->flow_mode;
ctx->auth_mode = ssi_alg->auth_mode;
ctx->drvdata = ssi_alg->drvdata;
- dev = &ctx->drvdata->plat_dev->dev;
crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
/* Allocate key buffer, cache line aligned */
ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
&ctx->enckey_dma_addr, GFP_KERNEL);
if (!ctx->enckey) {
- SSI_LOG_ERR("Failed allocating key buffer\n");
+ dev_err(dev, "Failed allocating key buffer\n");
goto init_failed;
}
- SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
+ dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
+ ctx->enckey);
/* Set default authlen value */
@@ -182,7 +183,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
&xcbc->xcbc_keys_dma_addr,
GFP_KERNEL);
if (!xcbc->xcbc_keys) {
- SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
+ dev_err(dev, "Failed allocating buffer for XCBC keys\n");
goto init_failed;
}
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
@@ -196,12 +197,12 @@ static int ssi_aead_init(struct crypto_aead *tfm)
GFP_KERNEL);
if (!hmac->ipad_opad) {
- SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
+ dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
goto init_failed;
}
- SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
- hmac->ipad_opad);
+ dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
+ hmac->ipad_opad);
hmac->padded_authkey = dma_alloc_coherent(dev,
MAX_HMAC_BLOCK_SIZE,
@@ -209,7 +210,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
GFP_KERNEL);
if (!hmac->padded_authkey) {
- SSI_LOG_ERR("failed to allocate padded_authkey\n");
+ dev_err(dev, "failed to allocate padded_authkey\n");
goto init_failed;
}
} else {
@@ -240,8 +241,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
ctx->authsize) != 0) {
- SSI_LOG_DEBUG("Payload authentication failure, "
- "(auth-size=%d, cipher=%d).\n",
+ dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
ctx->authsize, ctx->cipher_mode);
/* In case of payload authentication failure, MUST NOT
* revealed the decrypted message --> zero its memory.
@@ -252,8 +252,11 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented))
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset,
- areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF);
+ dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
+ areq->cryptlen + areq_ctx->dst_offset,
+ (areq->cryptlen + areq_ctx->dst_offset +
+ ctx->authsize),
+ SSI_SG_FROM_BUF);
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv) {
@@ -377,8 +380,10 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
{
- SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n",
- ctx->enc_keylen, ctx->auth_keylen);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
+ ctx->enc_keylen, ctx->auth_keylen);
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
@@ -395,22 +400,22 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return -EINVAL;
break;
default:
- SSI_LOG_ERR("Invalid auth_mode=%d\n", ctx->auth_mode);
+ dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
return -EINVAL;
}
/* Check cipher key size */
if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
- SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
- ctx->enc_keylen);
+ dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ ctx->enc_keylen);
return -EINVAL;
}
} else { /* Default assumed to be AES ciphers */
if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
(ctx->enc_keylen != AES_KEYSIZE_192) &&
(ctx->enc_keylen != AES_KEYSIZE_256)) {
- SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
- ctx->enc_keylen);
+ dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ ctx->enc_keylen);
return -EINVAL;
}
}
@@ -426,7 +431,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
{
dma_addr_t key_dma_addr = 0;
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
ctx->drvdata, ctx->auth_mode);
struct ssi_crypto_req ssi_req = {};
@@ -455,8 +460,8 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
if (likely(keylen != 0)) {
key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
- SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
- " DMA failed\n", key, keylen);
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -534,7 +539,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (unlikely(rc != 0))
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
if (likely(key_dma_addr != 0))
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
@@ -551,10 +556,10 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int seq_len = 0, rc = -EINVAL;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
- ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
- key, keylen);
+ dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
@@ -622,7 +627,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
break; /* No auth. key setup */
default:
- SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
rc = -ENOTSUPP;
goto badkey;
}
@@ -632,7 +637,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error;
}
}
@@ -651,7 +656,6 @@ setkey_error:
static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int rc = 0;
if (keylen < 3)
return -EINVAL;
@@ -659,9 +663,7 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 3;
memcpy(ctx->ctr_nonce, key + keylen, 3);
- rc = ssi_aead_setkey(tfm, key, keylen);
-
- return rc;
+ return ssi_aead_setkey(tfm, key, keylen);
}
#endif /*SSI_CC_HAS_AES_CCM*/
@@ -670,6 +672,7 @@ static int ssi_aead_setauthsize(
unsigned int authsize)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
/* Unsupported auth. sizes */
if ((authsize == 0) ||
@@ -678,7 +681,7 @@ static int ssi_aead_setauthsize(
}
ctx->authsize = authsize;
- SSI_LOG_DEBUG("authlen=%d\n", ctx->authsize);
+ dev_dbg(dev, "authlen=%d\n", ctx->authsize);
return 0;
}
@@ -731,10 +734,11 @@ ssi_aead_create_assoc_desc(
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (assoc_dma_type) {
case SSI_DMA_BUF_DLLI:
- SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
+ dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
@@ -744,7 +748,7 @@ ssi_aead_create_assoc_desc(
set_din_not_last_indication(&desc[idx]);
break;
case SSI_DMA_BUF_MLLI:
- SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
+ dev_dbg(dev, "ASSOC buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
areq_ctx->assoc.mlli_nents, NS_BIT);
@@ -755,7 +759,7 @@ ssi_aead_create_assoc_desc(
break;
case SSI_DMA_BUF_NULL:
default:
- SSI_LOG_ERR("Invalid ASSOC buffer type\n");
+ dev_err(dev, "Invalid ASSOC buffer type\n");
}
*seq_size = (++idx);
@@ -772,6 +776,9 @@ ssi_aead_process_authenc_data_desc(
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
unsigned int idx = *seq_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (data_dma_type) {
case SSI_DMA_BUF_DLLI:
@@ -783,7 +790,7 @@ ssi_aead_process_authenc_data_desc(
unsigned int offset =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
areq_ctx->dst_offset : areq_ctx->src_offset;
- SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(sg_dma_address(cipher) + offset),
@@ -810,7 +817,7 @@ ssi_aead_process_authenc_data_desc(
}
}
- SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
NS_BIT);
@@ -819,7 +826,7 @@ ssi_aead_process_authenc_data_desc(
}
case SSI_DMA_BUF_NULL:
default:
- SSI_LOG_ERR("AUTHENC: Invalid SRC/DST buffer type\n");
+ dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
}
*seq_size = (++idx);
@@ -835,13 +842,16 @@ ssi_aead_process_cipher_data_desc(
unsigned int idx = *seq_size;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (areq_ctx->cryptlen == 0)
return; /*null processing*/
switch (data_dma_type) {
case SSI_DMA_BUF_DLLI:
- SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(sg_dma_address(areq_ctx->src_sgl) +
@@ -853,7 +863,7 @@ ssi_aead_process_cipher_data_desc(
set_flow_mode(&desc[idx], flow_mode);
break;
case SSI_DMA_BUF_MLLI:
- SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
areq_ctx->src.mlli_nents, NS_BIT);
@@ -863,7 +873,7 @@ ssi_aead_process_cipher_data_desc(
break;
case SSI_DMA_BUF_NULL:
default:
- SSI_LOG_ERR("CIPHER: Invalid SRC/DST buffer type\n");
+ dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
}
*seq_size = (++idx);
@@ -1178,14 +1188,15 @@ static inline void ssi_aead_load_mlli_to_sram(
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (unlikely(
(req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
!req_ctx->is_single_pass)) {
- SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
- (unsigned int)ctx->drvdata->mlli_sram_addr,
- req_ctx->mlli_params.mlli_len);
+ dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
+ (unsigned int)ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
/* Copy MLLI table host-to-sram */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI,
@@ -1333,6 +1344,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int assoclen = req->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1371,7 +1383,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
areq_ctx->is_single_pass = false;
break;
default:
- SSI_LOG_ERR("Unexpected flow mode (%d)\n", ctx->flow_mode);
+ dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
goto data_size_err;
}
@@ -1554,6 +1566,7 @@ static int config_ccm_adata(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
//unsigned int size_of_a = 0, rem_a_size = 0;
unsigned int lp = req->iv[0];
@@ -1575,7 +1588,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (l < 2 || l > 8) {
- SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
+ dev_err(dev, "illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1588,8 +1601,10 @@ static int config_ccm_adata(struct aead_request *req)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
- if (rc != 0)
+ if (rc != 0) {
+ dev_err(dev, "message len overflow detected");
return rc;
+ }
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
@@ -1812,7 +1827,6 @@ static inline int ssi_aead_gcm(
unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
- unsigned int idx = *seq_size;
unsigned int cipher_flow_mode;
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
@@ -1829,7 +1843,6 @@ static inline int ssi_aead_gcm(
ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
ssi_aead_process_gcm_result_desc(req, desc, seq_size);
- idx = *seq_size;
return 0;
}
@@ -1844,7 +1857,6 @@ static inline int ssi_aead_gcm(
ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
ssi_aead_process_gcm_result_desc(req, desc, seq_size);
- idx = *seq_size;
return 0;
}
@@ -1861,13 +1873,13 @@ static inline void ssi_aead_dump_gcm(
return;
if (title) {
- SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
- SSI_LOG_DEBUG("%s\n", title);
+ dev_dbg(dev, "----------------------------------------------------------------------------------");
+ dev_dbg(dev, "%s\n", title);
}
- SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
- ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
- req->assoclen, req_ctx->cryptlen);
+ dev_dbg(dev, "cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
+ ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
+ req->assoclen, req_ctx->cryptlen);
if (ctx->enckey)
dump_byte_array("mac key", ctx->enckey, 16);
@@ -1897,6 +1909,7 @@ static int config_gcm_context(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
@@ -1904,7 +1917,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1958,20 +1972,20 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
- SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
- ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
- ctx, req, req->iv, sg_virt(req->src), req->src->offset,
- sg_virt(req->dst), req->dst->offset, req->cryptlen);
+ dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
+ ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
+ ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+ sg_virt(req->dst), req->dst->offset, req->cryptlen);
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
- SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
+ req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2017,7 +2031,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
if (ctx->cipher_mode == DRV_CIPHER_CCM) {
rc = config_ccm_adata(req);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
+ dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
+ rc);
goto exit;
}
} else {
@@ -2031,7 +2046,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
+ dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
+ rc);
goto exit;
}
}
@@ -2039,7 +2055,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("map_request() failed\n");
+ dev_err(dev, "map_request() failed\n");
goto exit;
}
@@ -2095,7 +2111,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
break;
#endif
default:
- SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
ssi_buffer_mgr_unmap_aead_request(dev, req);
rc = -ENOTSUPP;
goto exit;
@@ -2106,7 +2122,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_aead_request(dev, req);
}
@@ -2139,10 +2155,13 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
/* Very similar to ssi_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2183,13 +2202,14 @@ static int ssi_aead_decrypt(struct aead_request *req)
#if SSI_CC_HAS_AES_CCM
static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
-
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2214,9 +2234,9 @@ out:
static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int rc = 0;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2224,17 +2244,15 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- rc = ssi_aead_setkey(tfm, key, keylen);
-
- return rc;
+ return ssi_aead_setkey(tfm, key, keylen);
}
static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int rc = 0;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2242,9 +2260,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- rc = ssi_aead_setkey(tfm, key, keylen);
-
- return rc;
+ return ssi_aead_setkey(tfm, key, keylen);
}
static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
@@ -2269,7 +2285,10 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("authsize %d\n", authsize);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
switch (authsize) {
case 8:
@@ -2286,7 +2305,10 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("authsize %d\n", authsize);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
if (authsize != 16)
return -EINVAL;
@@ -2298,11 +2320,14 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
{
/* Very similar to ssi_aead_encrypt() above. */
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2350,11 +2375,14 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
{
/* Very similar to ssi_aead_decrypt() above. */
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2654,16 +2682,17 @@ static struct ssi_alg_template aead_algs[] = {
#endif /*SSI_CC_HAS_AES_GCM*/
};
-static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *template)
+static struct ssi_crypto_alg *ssi_aead_create_alg(
+ struct ssi_alg_template *template,
+ struct device *dev)
{
struct ssi_crypto_alg *t_alg;
struct aead_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- SSI_LOG_ERR("failed to allocate t_alg\n");
+ if (!t_alg)
return ERR_PTR(-ENOMEM);
- }
+
alg = &template->template_aead;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
@@ -2713,6 +2742,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
struct ssi_crypto_alg *t_alg;
int rc = -ENOMEM;
int alg;
+ struct device *dev = drvdata_to_dev(drvdata);
aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
if (!aead_handle) {
@@ -2720,36 +2750,36 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
goto fail0;
}
+ INIT_LIST_HEAD(&aead_handle->aead_list);
drvdata->aead_handle = aead_handle;
aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
drvdata, MAX_HMAC_DIGEST_SIZE);
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
- SSI_LOG_ERR("SRAM pool exhausted\n");
+ dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail1;
}
- INIT_LIST_HEAD(&aead_handle->aead_list);
-
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- t_alg = ssi_aead_create_alg(&aead_algs[alg]);
+ t_alg = ssi_aead_create_alg(&aead_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- aead_algs[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ aead_algs[alg].driver_name);
goto fail1;
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->aead_alg.base.cra_driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->aead_alg.base.cra_driver_name);
goto fail2;
} else {
list_add_tail(&t_alg->entry, &aead_handle->aead_list);
- SSI_LOG_DEBUG("Registered %s\n", t_alg->aead_alg.base.cra_driver_name);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->aead_alg.base.cra_driver_name);
}
}
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 63936091d524..1f8a225530a8 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -33,14 +33,10 @@
#include "ssi_hash.h"
#include "ssi_aead.h"
-#ifdef CC_DEBUG
#define GET_DMA_BUFFER_TYPE(buff_type) ( \
((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
-#else
-#define GET_DMA_BUFFER_TYPE(buff_type)
-#endif
enum dma_buffer_type {
DMA_NULL_TYPE = -1,
@@ -76,16 +72,12 @@ struct buffer_array {
* @lbytes: [OUT] Returns the amount of bytes at the last entry
*/
static unsigned int ssi_buffer_mgr_get_sgl_nents(
- struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
+ struct device *dev, struct scatterlist *sg_list,
+ unsigned int nbytes, u32 *lbytes, bool *is_chained)
{
unsigned int nents = 0;
while (nbytes != 0) {
- if (sg_is_chain(sg_list)) {
- SSI_LOG_ERR("Unexpected chained entry "
- "in sg (entry =0x%X)\n", nents);
- BUG();
- }
if (sg_list->length != 0) {
nents++;
/* get the number of bytes in the last entry */
@@ -98,7 +90,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
*is_chained = true;
}
}
- SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
+ dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
}
@@ -134,20 +126,20 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
* @direct:
*/
void ssi_buffer_mgr_copy_scatterlist_portion(
- u8 *dest, struct scatterlist *sg,
- u32 to_skip, u32 end,
- enum ssi_sg_cpy_direct direct)
+ struct device *dev, u8 *dest,
+ struct scatterlist *sg, u32 to_skip,
+ u32 end, enum ssi_sg_cpy_direct direct)
{
u32 nents, lbytes;
- nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
+ nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, end, &lbytes, NULL);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == SSI_SG_TO_BUF));
}
static inline int ssi_buffer_mgr_render_buff_to_mlli(
- dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
- u32 **mlli_entry_pp)
+ struct device *dev, dma_addr_t buff_dma, u32 buff_size,
+ u32 *curr_nents, u32 **mlli_entry_pp)
{
u32 *mlli_entry_p = *mlli_entry_pp;
u32 new_nents;
@@ -161,9 +153,9 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
mlli_entry_p = mlli_entry_p + 2;
@@ -172,9 +164,9 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
/*Last entry */
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, buff_size);
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
mlli_entry_p = mlli_entry_p + 2;
*mlli_entry_pp = mlli_entry_p;
(*curr_nents)++;
@@ -182,8 +174,9 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
}
static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
- struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset,
- u32 *curr_nents, u32 **mlli_entry_pp)
+ struct device *dev, struct scatterlist *sgl,
+ u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents,
+ u32 **mlli_entry_pp)
{
struct scatterlist *curr_sgl = sgl;
u32 *mlli_entry_p = *mlli_entry_pp;
@@ -197,8 +190,8 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
sgl_data_len;
sgl_data_len -= entry_data_len;
rc = ssi_buffer_mgr_render_buff_to_mlli(
- sg_dma_address(curr_sgl) + sgl_offset, entry_data_len,
- curr_nents, &mlli_entry_p);
+ dev, sg_dma_address(curr_sgl) + sgl_offset,
+ entry_data_len, curr_nents, &mlli_entry_p);
if (rc != 0)
return rc;
@@ -217,14 +210,14 @@ static int ssi_buffer_mgr_generate_mlli(
u32 total_nents = 0, prev_total_nents = 0;
int rc = 0, i;
- SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
+ dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
/* Allocate memory from the pointed pool */
mlli_params->mlli_virt_addr = dma_pool_alloc(
mlli_params->curr_pool, GFP_KERNEL,
&mlli_params->mlli_dma_addr);
if (unlikely(!mlli_params->mlli_virt_addr)) {
- SSI_LOG_ERR("dma_pool_alloc() failed\n");
+ dev_err(dev, "dma_pool_alloc() failed\n");
rc = -ENOMEM;
goto build_mlli_exit;
}
@@ -234,12 +227,12 @@ static int ssi_buffer_mgr_generate_mlli(
for (i = 0; i < sg_data->num_of_buffers; i++) {
if (sg_data->type[i] == DMA_SGL_TYPE)
rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
- sg_data->entry[i].sgl,
- sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
- &mlli_p);
+ dev, sg_data->entry[i].sgl,
+ sg_data->total_data_len[i], sg_data->offset[i],
+ &total_nents, &mlli_p);
else /*DMA_BUFF_TYPE*/
rc = ssi_buffer_mgr_render_buff_to_mlli(
- sg_data->entry[i].buffer_dma,
+ dev, sg_data->entry[i].buffer_dma,
sg_data->total_data_len[i], &total_nents,
&mlli_p);
if (rc != 0)
@@ -259,26 +252,23 @@ static int ssi_buffer_mgr_generate_mlli(
/* Set MLLI size for the bypass operation */
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
- SSI_LOG_DEBUG("MLLI params: "
- "virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
- mlli_params->mlli_virt_addr,
- mlli_params->mlli_dma_addr,
- mlli_params->mlli_len);
+ dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
+ mlli_params->mlli_len);
build_mlli_exit:
return rc;
}
static inline void ssi_buffer_mgr_add_buffer_entry(
- struct buffer_array *sgl_data,
+ struct device *dev, struct buffer_array *sgl_data,
dma_addr_t buffer_dma, unsigned int buffer_len,
bool is_last_entry, u32 *mlli_nents)
{
unsigned int index = sgl_data->num_of_buffers;
- SSI_LOG_DEBUG("index=%u single_buff=%pad "
- "buffer_len=0x%08X is_last=%d\n",
- index, buffer_dma, buffer_len, is_last_entry);
+ dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
+ index, &buffer_dma, buffer_len, is_last_entry);
sgl_data->nents[index] = 1;
sgl_data->entry[index].buffer_dma = buffer_dma;
sgl_data->offset[index] = 0;
@@ -292,6 +282,7 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
}
static inline void ssi_buffer_mgr_add_scatterlist_entry(
+ struct device *dev,
struct buffer_array *sgl_data,
unsigned int nents,
struct scatterlist *sgl,
@@ -302,8 +293,8 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
{
unsigned int index = sgl_data->num_of_buffers;
- SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
- index, nents, sgl, data_len, is_last_table);
+ dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
sgl_data->offset[index] = data_offset;
@@ -327,7 +318,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
if (!l_sg)
break;
if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
- SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
+ dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
l_sg = sg_next(l_sg);
@@ -356,26 +347,22 @@ static int ssi_buffer_mgr_map_scatterlist(
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
- SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
+ dev_err(dev, "dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped sg: dma_address=%pad "
- "page=%p addr=%pK offset=%u "
- "length=%u\n",
- sg_dma_address(sg),
- sg_page(sg),
- sg_virt(sg),
- sg->offset, sg->length);
+ dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
+ sg->offset, sg->length);
*lbytes = nbytes;
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
+ *nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, nbytes, lbytes,
&is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- *nents, max_sg_nents);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
return -ENOMEM;
}
if (!is_chained) {
@@ -385,7 +372,7 @@ static int ssi_buffer_mgr_map_scatterlist(
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
if (unlikely(*mapped_nents == 0)) {
*nents = 0;
- SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
} else {
@@ -398,7 +385,7 @@ static int ssi_buffer_mgr_map_scatterlist(
direction);
if (unlikely(*mapped_nents != *nents)) {
*nents = *mapped_nents;
- SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
}
@@ -414,26 +401,22 @@ ssi_aead_handle_config_buf(struct device *dev,
struct buffer_array *sg_data,
unsigned int assoclen)
{
- SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
+ dev_dbg(dev, " handle additional data config set to DLLI\n");
/* create sg for the current buffer */
sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
DMA_TO_DEVICE) != 1)) {
- SSI_LOG_ERR("dma_map_sg() "
- "config buffer failed\n");
- return -ENOMEM;
+ dev_err(dev, "dma_map_sg() config buffer failed\n");
+ return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
- "page=%p addr=%pK "
- "offset=%u length=%u\n",
- sg_dma_address(&areq_ctx->ccm_adata_sg),
- sg_page(&areq_ctx->ccm_adata_sg),
- sg_virt(&areq_ctx->ccm_adata_sg),
- areq_ctx->ccm_adata_sg.offset,
- areq_ctx->ccm_adata_sg.length);
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(&areq_ctx->ccm_adata_sg),
+ sg_page(&areq_ctx->ccm_adata_sg),
+ sg_virt(&areq_ctx->ccm_adata_sg),
+ areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
/* prepare for case of MLLI */
if (assoclen > 0) {
- ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1,
&areq_ctx->ccm_adata_sg,
(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
0, false, NULL);
@@ -447,28 +430,23 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
u32 curr_buff_cnt,
struct buffer_array *sg_data)
{
- SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
+ dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
DMA_TO_DEVICE) != 1)) {
- SSI_LOG_ERR("dma_map_sg() "
- "src buffer failed\n");
- return -ENOMEM;
+ dev_err(dev, "dma_map_sg() src buffer failed\n");
+ return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
- "page=%p addr=%pK "
- "offset=%u length=%u\n",
- sg_dma_address(areq_ctx->buff_sg),
- sg_page(areq_ctx->buff_sg),
- sg_virt(areq_ctx->buff_sg),
- areq_ctx->buff_sg->offset,
- areq_ctx->buff_sg->length);
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+ sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+ areq_ctx->buff_sg->length);
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
- ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1, areq_ctx->buff_sg,
curr_buff_cnt, 0, false, NULL);
return 0;
}
@@ -483,9 +461,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
- SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
- req_ctx->gen_ctx.iv_dma_addr,
- ivsize);
+ dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+ &req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
@@ -499,11 +476,11 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
}
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
if (src != dst) {
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
}
}
@@ -519,7 +496,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
u32 dummy = 0;
int rc = 0;
@@ -539,13 +516,12 @@ int ssi_buffer_mgr_map_blkcipher_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
req_ctx->gen_ctx.iv_dma_addr))) {
- SSI_LOG_ERR("Mapping iv %u B at va=%pK "
- "for DMA failed\n", ivsize, info);
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ ivsize, info);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
- ivsize, info,
- req_ctx->gen_ctx.iv_dma_addr);
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
}
@@ -567,7 +543,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
/* Handle inplace operation */
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
req_ctx->out_nents = 0;
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
req_ctx->in_nents,
src, nbytes, 0,
true,
@@ -587,12 +563,12 @@ int ssi_buffer_mgr_map_blkcipher_request(
req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
req_ctx->in_nents,
src, nbytes, 0,
true,
&req_ctx->in_mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
req_ctx->out_nents,
dst, nbytes, 0,
true,
@@ -607,8 +583,8 @@ int ssi_buffer_mgr_map_blkcipher_request(
goto ablkcipher_exit;
}
- SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+ dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+ GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
return 0;
@@ -674,30 +650,34 @@ void ssi_buffer_mgr_unmap_aead_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
- areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
- SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
+ dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+ req->assoclen, req->cryptlen);
size_to_unmap = req->assoclen + req->cryptlen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_unmap += areq_ctx->req_authsize;
if (areq_ctx->is_gcm4543)
size_to_unmap += crypto_aead_ivsize(tfm);
- dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src,
+ ssi_buffer_mgr_get_sgl_nents(dev, req->src, size_to_unmap,
+ &dummy, &chained),
+ DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) {
- SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
- sg_virt(req->dst));
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
- ssi_buffer_mgr_get_sgl_nents(req->dst,
+ ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
size_to_unmap,
- &dummy,
- &chained),
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -712,13 +692,14 @@ void ssi_buffer_mgr_unmap_aead_request(
* data memory overriding that caused by cache coherence problem.
*/
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
+ dev, areq_ctx->backup_mac, req->src,
size_to_skip + req->cryptlen - areq_ctx->req_authsize,
size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
}
}
static inline int ssi_buffer_mgr_get_aead_icv_nents(
+ struct device *dev,
struct scatterlist *sgl,
unsigned int sgl_nents,
unsigned int authsize,
@@ -757,12 +738,12 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
nents = 2;
*is_icv_fragmented = true;
} else {
- SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
+ dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
+ MAX_ICV_NENTS_SUPPORTED);
nents = -1; /*unsupported*/
}
- SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
+ dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
+ (*is_icv_fragmented ? "true" : "false"), nents);
return nents;
}
@@ -775,7 +756,7 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
if (unlikely(!req->iv)) {
@@ -786,22 +767,22 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
- SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
- hw_iv_size, req->iv);
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ hw_iv_size, req->iv);
rc = -ENOMEM;
goto chain_iv_exit;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
- hw_iv_size, req->iv,
- areq_ctx->gen_ctx.iv_dma_addr);
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
/* Chain to given list */
ssi_buffer_mgr_add_buffer_entry(
- sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
+ dev, sg_data,
+ areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
iv_size_to_authenc, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
@@ -824,6 +805,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int sg_index = 0;
u32 size_of_assoc = req->assoclen;
+ struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
size_of_assoc += crypto_aead_ivsize(tfm);
@@ -837,9 +819,9 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
- SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
goto chain_assoc_exit;
}
@@ -853,16 +835,16 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
current_sg = sg_next(current_sg);
//if have reached the end of the sgl, then this is unexpected
if (!current_sg) {
- SSI_LOG_ERR("reached end of sg list. unexpected\n");
- BUG();
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
}
sg_index += current_sg->length;
mapped_nents++;
}
}
if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->assoc.nents = mapped_nents;
@@ -873,9 +855,9 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) >
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
- SSI_LOG_ERR("CCM case.Too many fragments. Current %d max %d\n",
- (areq_ctx->assoc.nents + 1),
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
rc = -ENOMEM;
goto chain_assoc_exit;
}
@@ -889,11 +871,11 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (unlikely((do_chain) ||
(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
- SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
ssi_buffer_mgr_add_scatterlist_entry(
- sg_data, areq_ctx->assoc.nents,
+ dev, sg_data, areq_ctx->assoc.nents,
req->src, req->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
@@ -951,10 +933,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
unsigned int authsize = areq_ctx->req_authsize;
int rc = 0, icv_nents;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct device *dev = drvdata_to_dev(drvdata);
if (likely(req->src == req->dst)) {
/*INPLACE*/
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->src.nents,
areq_ctx->src_sgl,
areq_ctx->cryptlen,
@@ -962,7 +945,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
+ areq_ctx->src_sgl,
areq_ctx->src.nents,
authsize,
*src_last_bytes,
@@ -990,7 +974,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
skip += crypto_aead_ivsize(tfm);
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
+ dev, areq_ctx->backup_mac,
+ req->src,
(skip + req->cryptlen -
areq_ctx->req_authsize),
skip + req->cryptlen,
@@ -1013,14 +998,14 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->src.nents,
areq_ctx->src_sgl,
areq_ctx->cryptlen,
areq_ctx->src_offset,
is_last_table,
&areq_ctx->src.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->dst.nents,
areq_ctx->dst_sgl,
areq_ctx->cryptlen,
@@ -1028,7 +1013,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
is_last_table,
&areq_ctx->dst.mlli_nents);
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
+ areq_ctx->src_sgl,
areq_ctx->src.nents,
authsize,
*src_last_bytes,
@@ -1043,15 +1029,15 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = req->assoclen;
- if (areq_ctx->is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
+ if (areq_ctx->is_gcm4543)
+ size_to_skip += crypto_aead_ivsize(tfm);
- ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ dev, areq_ctx->backup_mac, req->src,
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
@@ -1065,14 +1051,14 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else {
/*NON-INPLACE and ENCRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->dst.nents,
areq_ctx->dst_sgl,
areq_ctx->cryptlen,
areq_ctx->dst_offset,
is_last_table,
&areq_ctx->dst.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->src.nents,
areq_ctx->src_sgl,
areq_ctx->cryptlen,
@@ -1080,7 +1066,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dst_sgl,
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
+ areq_ctx->dst_sgl,
areq_ctx->dst.nents,
authsize,
*dst_last_bytes,
@@ -1115,7 +1102,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
bool is_last_table, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
int src_last_bytes = 0, dst_last_bytes = 0;
@@ -1134,10 +1121,9 @@ static inline int ssi_buffer_mgr_aead_chain_data(
offset = size_to_skip;
- if (!sg_data) {
- rc = -EINVAL;
- goto chain_data_exit;
- }
+ if (!sg_data)
+ return -EINVAL;
+
areq_ctx->src_sgl = req->src;
areq_ctx->dst_sgl = req->dst;
@@ -1145,7 +1131,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
size_for_map += crypto_aead_ivsize(tfm);
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
- src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
+ src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->src,
+ size_for_map,
+ &src_last_bytes,
+ &chained);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@@ -1153,15 +1142,15 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
//if have reached the end of the sgl, then this is unexpected
if (!areq_ctx->src_sgl) {
- SSI_LOG_ERR("reached end of sg list. unexpected\n");
- BUG();
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
}
sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
}
if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
@@ -1187,7 +1176,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
}
}
- dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
+ dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
+ size_for_map,
+ &dst_last_bytes,
+ &chained);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
@@ -1197,15 +1189,15 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
//if have reached the end of the sgl, then this is unexpected
if (!areq_ctx->dst_sgl) {
- SSI_LOG_ERR("reached end of sg list. unexpected\n");
- BUG();
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
}
sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
}
if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->dst.nents = dst_mapped_nents;
@@ -1285,7 +1277,7 @@ int ssi_buffer_mgr_map_aead_request(
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
unsigned int authsize = areq_ctx->req_authsize;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
@@ -1312,7 +1304,7 @@ int ssi_buffer_mgr_map_aead_request(
* data memory overriding that caused by cache coherence problem.
*/
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
+ dev, areq_ctx->backup_mac, req->src,
size_to_skip + req->cryptlen - areq_ctx->req_authsize,
size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
}
@@ -1327,8 +1319,8 @@ int ssi_buffer_mgr_map_aead_request(
MAX_MAC_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
- SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
- MAX_MAC_SIZE, areq_ctx->mac_buf);
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1340,9 +1332,10 @@ int ssi_buffer_mgr_map_aead_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
- SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
- "for DMA failed\n", AES_BLOCK_SIZE,
- (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE,
+ (areq_ctx->ccm_config +
+ CCM_CTR_COUNT_0_OFFSET));
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
@@ -1362,8 +1355,8 @@ int ssi_buffer_mgr_map_aead_request(
AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
- SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, areq_ctx->hkey);
+ dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1373,8 +1366,8 @@ int ssi_buffer_mgr_map_aead_request(
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
- SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1385,9 +1378,8 @@ int ssi_buffer_mgr_map_aead_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
- SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
- "for DMA failed\n", AES_BLOCK_SIZE,
- (areq_ctx->gcm_iv_inc1));
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
@@ -1399,9 +1391,8 @@ int ssi_buffer_mgr_map_aead_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
- SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
- "for DMA failed\n", AES_BLOCK_SIZE,
- (areq_ctx->gcm_iv_inc2));
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
@@ -1481,9 +1472,10 @@ int ssi_buffer_mgr_map_aead_request(
goto aead_map_failure;
ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
- SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
- SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
- SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
+ dev_dbg(dev, "assoc params mn %d\n",
+ areq_ctx->assoc.mlli_nents);
+ dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
+ dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
}
return 0;
@@ -1496,7 +1488,7 @@ int ssi_buffer_mgr_map_hash_request_final(
struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
areq_ctx->buff0;
u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
@@ -1507,11 +1499,8 @@ int ssi_buffer_mgr_map_hash_request_final(
u32 dummy = 0;
u32 mapped_nents = 0;
- SSI_LOG_DEBUG(" final params : curr_buff=%pK "
- "curr_buff_cnt=0x%X nbytes = 0x%X "
- "src=%pK curr_index=%u\n",
- curr_buff, *curr_buff_cnt, nbytes,
- src, areq_ctx->buff_index);
+ dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
@@ -1557,7 +1546,7 @@ int ssi_buffer_mgr_map_hash_request_final(
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
areq_ctx->in_nents,
src, nbytes, 0, true,
&areq_ctx->mlli_nents);
@@ -1568,8 +1557,8 @@ int ssi_buffer_mgr_map_hash_request_final(
}
/* change the buffer index for the unmap function */
areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
- SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+ dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
return 0;
fail_unmap_din:
@@ -1586,7 +1575,7 @@ int ssi_buffer_mgr_map_hash_request_update(
struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
areq_ctx->buff0;
u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
@@ -1604,11 +1593,8 @@ int ssi_buffer_mgr_map_hash_request_update(
u32 dummy = 0;
u32 mapped_nents = 0;
- SSI_LOG_DEBUG(" update params : curr_buff=%pK "
- "curr_buff_cnt=0x%X nbytes=0x%X "
- "src=%pK curr_index=%u\n",
- curr_buff, *curr_buff_cnt, nbytes,
- src, areq_ctx->buff_index);
+ dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
@@ -1617,14 +1603,11 @@ int ssi_buffer_mgr_map_hash_request_update(
areq_ctx->in_nents = 0;
if (unlikely(total_in_len < block_size)) {
- SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
- "*curr_buff_cnt=0x%X copy_to=%pK\n",
- curr_buff, *curr_buff_cnt,
- &curr_buff[*curr_buff_cnt]);
+ dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
- ssi_buffer_mgr_get_sgl_nents(src,
- nbytes,
- &dummy, NULL);
+ ssi_buffer_mgr_get_sgl_nents(dev, src, nbytes, &dummy,
+ NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1636,17 +1619,15 @@ int ssi_buffer_mgr_map_hash_request_update(
/* update data len */
update_data_len = total_in_len - *next_buff_cnt;
- SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
- "update_data_len=0x%X\n",
+ dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
*next_buff_cnt, update_data_len);
/* Copy the new residue to next buffer */
if (*next_buff_cnt != 0) {
- SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
- " residue %u\n", next_buff,
- (update_data_len - *curr_buff_cnt),
- *next_buff_cnt);
- ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
+ dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ next_buff, (update_data_len - *curr_buff_cnt),
+ *next_buff_cnt);
+ ssi_buffer_mgr_copy_scatterlist_portion(dev, next_buff, src,
(update_data_len - *curr_buff_cnt),
nbytes, SSI_SG_TO_BUF);
/* change the buffer index for next operation */
@@ -1688,7 +1669,7 @@ int ssi_buffer_mgr_map_hash_request_update(
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
areq_ctx->in_nents,
src,
(update_data_len - *curr_buff_cnt),
@@ -1725,29 +1706,26 @@ void ssi_buffer_mgr_unmap_hash_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
- areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
if ((src) && likely(areq_ctx->in_nents != 0)) {
- SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
- sg_virt(src),
- sg_dma_address(src),
- sg_dma_len(src));
+ dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len != 0) {
- SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
- " dma=%pad len 0x%X\n",
- sg_virt(areq_ctx->buff_sg),
- sg_dma_address(areq_ctx->buff_sg),
- sg_dma_len(areq_ctx->buff_sg));
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ sg_virt(areq_ctx->buff_sg),
+ &sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
/* clean the previous data length for update operation */
@@ -1761,7 +1739,7 @@ void ssi_buffer_mgr_unmap_hash_request(
int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
if (!buff_mgr_handle)
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.h b/drivers/staging/ccree/ssi_buffer_mgr.h
index 41f5223730f8..1032f25edcab 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.h
+++ b/drivers/staging/ccree/ssi_buffer_mgr.h
@@ -80,7 +80,10 @@ int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ct
void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
-void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
+void ssi_buffer_mgr_copy_scatterlist_portion(struct device *dev, u8 *dest,
+ struct scatterlist *sg,
+ u32 to_skip, u32 end,
+ enum ssi_sg_cpy_direct direct);
void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index 8d31a93fd8b7..ee85cbf7c9ae 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -181,45 +181,42 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
struct crypto_alg *alg = tfm->__crt_alg;
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, crypto_alg);
- struct device *dev;
+ struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n",
- ctx_p, crypto_tfm_alg_name(tfm));
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+ crypto_tfm_alg_name(tfm));
ctx_p->cipher_mode = ssi_alg->cipher_mode;
ctx_p->flow_mode = ssi_alg->flow_mode;
ctx_p->drvdata = ssi_alg->drvdata;
- dev = &ctx_p->drvdata->plat_dev->dev;
/* Allocate key buffer, cache line aligned */
ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
- if (!ctx_p->user.key) {
- SSI_LOG_ERR("Allocating key buffer in context failed\n");
- rc = -ENOMEM;
- }
- SSI_LOG_DEBUG("Allocated key buffer in context. key=@%p\n",
- ctx_p->user.key);
+ if (!ctx_p->user.key)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+ ctx_p->user.key);
/* Map key buffer */
ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
- SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
- max_key_buf_size, ctx_p->user.key);
+ dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ max_key_buf_size, ctx_p->user.key);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=%pad\n",
- max_key_buf_size, ctx_p->user.key,
- ctx_p->user.key_dma_addr);
+ dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Alloc hash tfm for essiv */
ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
if (IS_ERR(ctx_p->shash_tfm)) {
- SSI_LOG_ERR("Error allocating hash tfm for ESSIV.\n");
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
return PTR_ERR(ctx_p->shash_tfm);
}
}
@@ -230,11 +227,11 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+ dev_dbg(dev, "Clearing context @%p for %s\n",
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Free hash tfm for essiv */
@@ -245,12 +242,12 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
/* Unmap key buffer */
dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=%pad\n",
- ctx_p->user.key_dma_addr);
+ dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+ &ctx_p->user.key_dma_addr);
/* Free key buffer in context */
kfree(ctx_p->user.key);
- SSI_LOG_DEBUG("Free key buffer in context. key=@%p\n", ctx_p->user.key);
+ dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
}
struct tdes_keys {
@@ -298,16 +295,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
unsigned int keylen)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
u32 tmp[DES_EXPKEY_WORDS];
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
- ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
dump_byte_array("key", (u8 *)key, keylen);
- SSI_LOG_DEBUG("after FIPS check");
-
/* STAT_PHASE_0: Init and sanity checks */
#if SSI_CC_HAS_MULTI2
@@ -317,7 +312,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
#endif /*SSI_CC_HAS_MULTI2*/
if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
- SSI_LOG_ERR("Unsupported key size %d.\n", keylen);
+ dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -327,13 +322,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
- SSI_LOG_ERR("HW key not supported for non-AES flows\n");
+ dev_err(dev, "HW key not supported for non-AES flows\n");
return -EINVAL;
}
ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
- SSI_LOG_ERR("Unsupported hw key1 number (%d)\n", hki->hw_key1);
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki->hw_key1);
return -EINVAL;
}
@@ -341,18 +337,20 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
(ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
(ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) {
if (unlikely(hki->hw_key1 == hki->hw_key2)) {
- SSI_LOG_ERR("Illegal hw key numbers (%d,%d)\n", hki->hw_key1, hki->hw_key2);
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki->hw_key1, hki->hw_key2);
return -EINVAL;
}
ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2);
if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
- SSI_LOG_ERR("Unsupported hw key2 number (%d)\n", hki->hw_key2);
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki->hw_key2);
return -EINVAL;
}
}
ctx_p->keylen = keylen;
- SSI_LOG_DEBUG("ssi_is_hw_key ret 0");
+ dev_dbg(dev, "ssi_is_hw_key ret 0");
return 0;
}
@@ -362,19 +360,19 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (unlikely(!des_ekey(tmp, key)) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- SSI_LOG_DEBUG("weak DES key");
+ dev_dbg(dev, "weak DES key");
return -EINVAL;
}
}
if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
xts_check_key(tfm, key, keylen) != 0) {
- SSI_LOG_DEBUG("weak XTS key");
+ dev_dbg(dev, "weak XTS key");
return -EINVAL;
}
if ((ctx_p->flow_mode == S_DIN_to_DES) &&
(keylen == DES3_EDE_KEY_SIZE) &&
ssi_verify_3des_keys(key, keylen) != 0) {
- SSI_LOG_DEBUG("weak 3DES key");
+ dev_dbg(dev, "weak 3DES key");
return -EINVAL;
}
@@ -389,7 +387,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- SSI_LOG_DEBUG("SSI_CC_HAS_MULTI2 einval");
+ dev_dbg(dev, "SSI_CC_HAS_MULTI2 einval");
return -EINVAL;
#endif /*SSI_CC_HAS_MULTI2*/
} else {
@@ -407,7 +405,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
if (err) {
- SSI_LOG_ERR("Failed to hash ESSIV key.\n");
+ dev_err(dev, "Failed to hash ESSIV key.\n");
return err;
}
}
@@ -416,7 +414,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
max_key_buf_size, DMA_TO_DEVICE);
ctx_p->keylen = keylen;
- SSI_LOG_DEBUG("return safely");
+ dev_dbg(dev, "return safely");
return 0;
}
@@ -430,6 +428,7 @@ ssi_blkcipher_create_setup_desc(
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
@@ -540,8 +539,7 @@ ssi_blkcipher_create_setup_desc(
(*seq_size)++;
break;
default:
- SSI_LOG_ERR("Unsupported cipher mode (%d)\n", cipher_mode);
- BUG();
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
}
}
@@ -601,6 +599,7 @@ ssi_blkcipher_create_data_desc(
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int flow_mode = ctx_p->flow_mode;
switch (ctx_p->flow_mode) {
@@ -616,15 +615,15 @@ ssi_blkcipher_create_data_desc(
break;
#endif /*SSI_CC_HAS_MULTI2*/
default:
- SSI_LOG_ERR("invalid flow mode, flow_mode = %d\n", flow_mode);
+ dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
return;
}
/* Process */
if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
- SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
- sg_dma_address(src), nbytes);
- SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
- sg_dma_address(dst), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
nbytes, NS_BIT);
@@ -637,9 +636,8 @@ ssi_blkcipher_create_data_desc(
(*seq_size)++;
} else {
/* bypass */
- SSI_LOG_DEBUG(" bypass params addr %pad "
- "length 0x%X addr 0x%08X\n",
- req_ctx->mlli_params.mlli_dma_addr,
+ dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+ &req_ctx->mlli_params.mlli_dma_addr,
req_ctx->mlli_params.mlli_len,
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
hw_desc_init(&desc[*seq_size]);
@@ -657,21 +655,18 @@ ssi_blkcipher_create_data_desc(
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT);
if (req_ctx->out_nents == 0) {
- SSI_LOG_DEBUG(" din/dout params addr 0x%08X "
- "addr 0x%08X\n",
- (unsigned int)ctx_p->drvdata->mlli_sram_addr,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
(!areq ? 0 : 1));
} else {
- SSI_LOG_DEBUG(" din/dout params "
- "addr 0x%08X addr 0x%08X\n",
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
(unsigned int)ctx_p->drvdata->mlli_sram_addr +
- (u32)LLI_ENTRY_BYTE_SIZE *
- req_ctx->in_nents);
+ (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
set_dout_mlli(&desc[*seq_size],
(ctx_p->drvdata->mlli_sram_addr +
(LLI_ENTRY_BYTE_SIZE *
@@ -697,16 +692,10 @@ static int ssi_blkcipher_complete(struct device *dev,
void __iomem *cc_base)
{
int completion_error = 0;
- u32 inflight_counter;
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-
- /*Set the inflight couter value to local variable*/
- inflight_counter = ctx_p->drvdata->inflight_counter;
- /*Decrease the inflight counter*/
- if (ctx_p->flow_mode == BYPASS && ctx_p->drvdata->inflight_counter > 0)
- ctx_p->drvdata->inflight_counter--;
+ kfree(req_ctx->iv);
if (areq) {
/*
@@ -742,20 +731,20 @@ static int ssi_blkcipher_process(
enum drv_crypto_direction direction)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
struct ssi_crypto_req ssi_req = {};
int rc, seq_len = 0, cts_restore_flag = 0;
- SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
- ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
- areq, info, nbytes);
+ dev_dbg(dev, "%s areq=%p info=%p nbytes=%d\n",
+ ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ "Encrypt" : "Decrypt"), areq, info, nbytes);
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
if (unlikely(validate_data_size(ctx_p, nbytes))) {
- SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
+ dev_err(dev, "Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
rc = -EINVAL;
goto exit_process;
@@ -765,6 +754,17 @@ static int ssi_blkcipher_process(
rc = 0;
goto exit_process;
}
+
+ /* The IV we are handed may be allocted from the stack so
+ * we must copy it to a DMAable buffer before use.
+ */
+ req_ctx->iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!req_ctx->iv) {
+ rc = -ENOMEM;
+ goto exit_process;
+ }
+ memcpy(req_ctx->iv, info, ivsize);
+
/*For CTS in case of data size aligned to 16 use CBC mode*/
if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
ctx_p->cipher_mode = DRV_CIPHER_CBC;
@@ -786,9 +786,11 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_1: Map buffers */
- rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
+ rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx,
+ ivsize, nbytes, req_ctx->iv,
+ src, dst);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("map_request() failed\n");
+ dev_err(dev, "map_request() failed\n");
goto exit_process;
}
@@ -838,8 +840,10 @@ exit_process:
if (cts_restore_flag != 0)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS) {
kfree(req_ctx->backup_info);
+ kfree(req_ctx->iv);
+ }
return rc;
}
@@ -1245,16 +1249,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
};
static
-struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *template)
+struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
+ *template, struct device *dev)
{
struct ssi_crypto_alg *t_alg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- SSI_LOG_ERR("failed to allocate t_alg\n");
+ if (!t_alg)
return ERR_PTR(-ENOMEM);
- }
alg = &t_alg->crypto_alg;
@@ -1285,10 +1288,6 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
struct ssi_crypto_alg *t_alg, *n;
struct ssi_blkcipher_handle *blkcipher_handle =
drvdata->blkcipher_handle;
- struct device *dev;
-
- dev = &drvdata->plat_dev->dev;
-
if (blkcipher_handle) {
/* Remove registered algs */
list_for_each_entry_safe(t_alg, n,
@@ -1308,6 +1307,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
{
struct ssi_blkcipher_handle *ablkcipher_handle;
struct ssi_crypto_alg *t_alg;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = -ENOMEM;
int alg;
@@ -1315,37 +1315,38 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
if (!ablkcipher_handle)
return -ENOMEM;
- drvdata->blkcipher_handle = ablkcipher_handle;
-
INIT_LIST_HEAD(&ablkcipher_handle->blkcipher_alg_list);
+ drvdata->blkcipher_handle = ablkcipher_handle;
/* Linux crypto */
- SSI_LOG_DEBUG("Number of algorithms = %zu\n", ARRAY_SIZE(blkcipher_algs));
+ dev_dbg(dev, "Number of algorithms = %zu\n",
+ ARRAY_SIZE(blkcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
- SSI_LOG_DEBUG("creating %s\n", blkcipher_algs[alg].driver_name);
- t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg]);
+ dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
+ t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- blkcipher_algs[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ blkcipher_algs[alg].driver_name);
goto fail0;
}
t_alg->drvdata = drvdata;
- SSI_LOG_DEBUG("registering %s\n", blkcipher_algs[alg].driver_name);
+ dev_dbg(dev, "registering %s\n",
+ blkcipher_algs[alg].driver_name);
rc = crypto_register_alg(&t_alg->crypto_alg);
- SSI_LOG_DEBUG("%s alg registration rc = %x\n",
- t_alg->crypto_alg.cra_driver_name, rc);
+ dev_dbg(dev, "%s alg registration rc = %x\n",
+ t_alg->crypto_alg.cra_driver_name, rc);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
goto fail0;
} else {
list_add_tail(&t_alg->entry,
&ablkcipher_handle->blkcipher_alg_list);
- SSI_LOG_DEBUG("Registered %s\n",
- t_alg->crypto_alg.cra_driver_name);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->crypto_alg.cra_driver_name);
}
}
return 0;
diff --git a/drivers/staging/ccree/ssi_cipher.h b/drivers/staging/ccree/ssi_cipher.h
index 296b375d5d89..25e6335c0d94 100644
--- a/drivers/staging/ccree/ssi_cipher.h
+++ b/drivers/staging/ccree/ssi_cipher.h
@@ -27,11 +27,11 @@
#include "ssi_buffer_mgr.h"
/* Crypto cipher flags */
-#define CC_CRYPTO_CIPHER_KEY_KFDE0 (1 << 0)
-#define CC_CRYPTO_CIPHER_KEY_KFDE1 (1 << 1)
-#define CC_CRYPTO_CIPHER_KEY_KFDE2 (1 << 2)
-#define CC_CRYPTO_CIPHER_KEY_KFDE3 (1 << 3)
-#define CC_CRYPTO_CIPHER_DU_SIZE_512B (1 << 4)
+#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
@@ -43,6 +43,7 @@ struct blkcipher_req_ctx {
u32 out_nents;
u32 out_mlli_nents;
u8 *backup_info; /*store iv for generated IV flow*/
+ u8 *iv;
bool is_giv;
struct mlli_params mlli_params;
};
@@ -75,7 +76,7 @@ struct arm_hw_key_info {
static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
{
- return 0;
+ return false;
}
#endif /* CRYPTO_TFM_REQ_HW_KEY */
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index 9c6f1200c130..1a3c481fa92a 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -74,70 +74,46 @@
#include "ssi_fips.h"
#ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
+void dump_byte_array(const char *name, const u8 *buf, size_t len)
{
- int i, line_offset = 0, ret = 0;
- const u8 *cur_byte;
- char line_buf[80];
+ char prefix[NAME_LEN];
- if (!the_array) {
- SSI_LOG_ERR("cannot dump array - NULL pointer\n");
+ if (!buf)
return;
- }
- ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", name, size);
- if (ret < 0) {
- SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
- return;
- }
- line_offset = ret;
- for (i = 0, cur_byte = the_array;
- (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
- ret = snprintf(line_buf + line_offset,
- sizeof(line_buf) - line_offset,
- "0x%02X ", *cur_byte);
- if (ret < 0) {
- SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
- return;
- }
- line_offset += ret;
- if (line_offset > 75) { /* Cut before line end */
- SSI_LOG_DEBUG("%s\n", line_buf);
- line_offset = 0;
- }
- }
+ snprintf(prefix, sizeof(prefix), "%s[%lu]: ", name, len);
- if (line_offset > 0) /* Dump remaining line */
- SSI_LOG_DEBUG("%s\n", line_buf);
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, len,
+ false);
}
#endif
static irqreturn_t cc_isr(int irq, void *dev_id)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
- void __iomem *cc_base = drvdata->cc_base;
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irr;
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
/* read the interrupt status */
- irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
- SSI_LOG_DEBUG("Got IRR=0x%08X\n", irr);
+ irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
if (unlikely(irr == 0)) { /* Probably shared interrupt line */
- SSI_LOG_ERR("Got interrupt with empty IRR\n");
+ dev_err(dev, "Got interrupt with empty IRR\n");
return IRQ_NONE;
}
- imr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR));
+ imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
/* clear interrupt - must be before processing events */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), irr);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
drvdata->irq = irr;
/* Completion interrupt - most probable */
if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
/* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_COMP_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_COMP_IRQ_MASK);
irr &= ~SSI_COMP_IRQ_MASK;
complete_request(drvdata);
}
@@ -145,7 +121,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* TEE FIPS interrupt */
if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
/* Mask interrupt - will be unmasked in Deferred service handler */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
irr &= ~SSI_GPR0_IRQ_MASK;
fips_handler(drvdata);
}
@@ -155,14 +131,16 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 axi_err;
/* Read the AXI error ID */
- axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
- SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err);
+ axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
+ dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
+ axi_err);
irr &= ~SSI_AXI_ERR_IRQ_MASK;
}
if (unlikely(irr != 0)) {
- SSI_LOG_DEBUG("IRR includes unknown cause bits (0x%08X)\n", irr);
+ dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
/* Just warning */
}
@@ -172,48 +150,48 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
- void __iomem *cc_base = drvdata->cc_base;
+ struct device *dev = drvdata_to_dev(drvdata);
/* Unmask all AXI interrupt sources AXI_CFG1 register */
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG));
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
- SSI_LOG_DEBUG("AXIM_CFG=0x%08X\n", CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG)));
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
/* Clear all pending interrupts */
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
- SSI_LOG_DEBUG("IRR=0x%08X\n", val);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), val);
+ val = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK));
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
+ val = (unsigned int)(~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK |
+ SSI_GPR0_IRQ_MASK));
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
#ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
#ifdef DX_IRQ_DELAY
/* Set CC IRQ delay */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL),
- DX_IRQ_DELAY);
+ cc_iowrite(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL), DX_IRQ_DELAY);
#endif
- if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) {
- SSI_LOG_DEBUG("irq_delay=%d CC cycles\n",
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
+ if (cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)) > 0) {
+ dev_dbg(dev, "irq_delay=%d CC cycles\n",
+ cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)));
}
#endif
cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
if (is_probe)
- SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
+ dev_info(dev, "Cache params previous: 0x%08X\n", val);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS),
- cache_params);
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
+ cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
if (is_probe)
- SSI_LOG_INFO("Cache params current: 0x%08X (expect: 0x%08X)\n",
- val, cache_params);
+ dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
+ val, cache_params);
return 0;
}
@@ -222,181 +200,172 @@ static int init_cc_resources(struct platform_device *plat_dev)
{
struct resource *req_mem_cc_regs = NULL;
void __iomem *cc_base = NULL;
- bool irq_registered = false;
- struct ssi_drvdata *new_drvdata = kzalloc(sizeof(*new_drvdata),
- GFP_KERNEL);
+ struct ssi_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
u32 signature_val;
+ dma_addr_t dma_mask;
int rc = 0;
- if (unlikely(!new_drvdata)) {
- SSI_LOG_ERR("Failed to allocate drvdata");
- rc = -ENOMEM;
- goto init_cc_res_err;
- }
+ new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
+ if (!new_drvdata)
+ return -ENOMEM;
+
+ platform_set_drvdata(plat_dev, new_drvdata);
+ new_drvdata->plat_dev = plat_dev;
new_drvdata->clk = of_clk_get(np, 0);
new_drvdata->coherent = of_dma_is_coherent(np);
- /*Initialize inflight counter used in dx_ablkcipher_secure_complete used for count of BYSPASS blocks operations*/
- new_drvdata->inflight_counter = 0;
-
- dev_set_drvdata(&plat_dev->dev, new_drvdata);
/* Get device resources */
/* First CC registers space */
- new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
- if (unlikely(!new_drvdata->res_mem)) {
- SSI_LOG_ERR("Failed getting IO memory resource\n");
- rc = -ENODEV;
- goto init_cc_res_err;
- }
- SSI_LOG_DEBUG("Got MEM resource (%s): start=%pad end=%pad\n",
- new_drvdata->res_mem->name,
- new_drvdata->res_mem->start,
- new_drvdata->res_mem->end);
+ req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
- req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
- if (unlikely(!req_mem_cc_regs)) {
- SSI_LOG_ERR("Couldn't allocate registers memory region at "
- "0x%08X\n", (unsigned int)new_drvdata->res_mem->start);
- rc = -EBUSY;
- goto init_cc_res_err;
+ new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(new_drvdata->cc_base)) {
+ dev_err(dev, "Failed to ioremap registers");
+ return PTR_ERR(new_drvdata->cc_base);
}
- cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
- if (unlikely(!cc_base)) {
- SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
- (unsigned int)new_drvdata->res_mem->start,
- (unsigned int)resource_size(new_drvdata->res_mem));
- rc = -ENOMEM;
- goto init_cc_res_err;
- }
- SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
- new_drvdata->cc_base = cc_base;
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, new_drvdata->cc_base);
+
+ cc_base = new_drvdata->cc_base;
/* Then IRQ */
- new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
- if (unlikely(!new_drvdata->res_irq)) {
- SSI_LOG_ERR("Failed getting IRQ resource\n");
- rc = -ENODEV;
- goto init_cc_res_err;
- }
- rc = request_irq(new_drvdata->res_irq->start, cc_isr,
- IRQF_SHARED, "arm_cc7x", new_drvdata);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("Could not register to interrupt %llu\n",
- (unsigned long long)new_drvdata->res_irq->start);
- goto init_cc_res_err;
+ new_drvdata->irq = platform_get_irq(plat_dev, 0);
+ if (new_drvdata->irq < 0) {
+ dev_err(dev, "Failed getting IRQ resource\n");
+ return new_drvdata->irq;
}
- init_completion(&new_drvdata->icache_setup_completion);
- irq_registered = true;
- SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n",
- new_drvdata->res_irq->name,
- (unsigned long long)new_drvdata->res_irq->start);
+ rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+ IRQF_SHARED, "arm_cc7x", new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n",
+ new_drvdata->irq);
+ return rc;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
- new_drvdata->plat_dev = plat_dev;
+ if (!plat_dev->dev.dma_mask)
+ plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
- rc = cc_clk_on(new_drvdata);
- if (rc)
- goto init_cc_res_err;
+ dma_mask = (dma_addr_t)(DMA_BIT_MASK(DMA_BIT_MASK_LEN));
+ while (dma_mask > 0x7fffffffUL) {
+ if (dma_supported(&plat_dev->dev, dma_mask)) {
+ rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
+ if (!rc)
+ break;
+ }
+ dma_mask >>= 1;
+ }
- if (!new_drvdata->plat_dev->dev.dma_mask)
- new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask;
+ if (rc) {
+ dev_err(dev, "Failed in dma_set_mask, mask=%par\n",
+ &dma_mask);
+ return rc;
+ }
- if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
- new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
+ rc = cc_clk_on(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock");
+ return rc;
+ }
/* Verify correct mapping */
- signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
+ signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
if (signature_val != DX_DEV_SIGNATURE) {
- SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, (u32)DX_DEV_SIGNATURE);
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ signature_val, (u32)DX_DEV_SIGNATURE);
rc = -EINVAL;
- goto init_cc_res_err;
+ goto post_clk_err;
}
- SSI_LOG_DEBUG("CC SIGNATURE=0x%08X\n", signature_val);
+ dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
/* Display HW versions */
- SSI_LOG(KERN_INFO, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", SSI_DEV_NAME_STR,
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_VERSION)), DRV_MODULE_VERSION);
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
+ SSI_DEV_NAME_STR,
+ cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
+ DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("init_cc_regs failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "init_cc_regs failed\n");
+ goto post_clk_err;
}
#ifdef ENABLE_CC_SYSFS
- rc = ssi_sysfs_init(&plat_dev->dev.kobj, new_drvdata);
+ rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("init_stat_db failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "init_stat_db failed\n");
+ goto post_regs_err;
}
#endif
+ rc = ssi_fips_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
+ goto post_sysfs_err;
+ }
rc = ssi_sram_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_sram_mgr_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_sram_mgr_init failed\n");
+ goto post_fips_init_err;
}
new_drvdata->mlli_sram_addr =
ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
- SSI_LOG_ERR("Failed to alloc MLLI Sram buffer\n");
+ dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
- goto init_cc_res_err;
+ goto post_sram_mgr_err;
}
rc = request_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("request_mgr_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "request_mgr_init failed\n");
+ goto post_sram_mgr_err;
}
rc = ssi_buffer_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("buffer_mgr_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "buffer_mgr_init failed\n");
+ goto post_req_mgr_err;
}
rc = ssi_power_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_power_mgr_init failed\n");
- goto init_cc_res_err;
- }
-
- rc = ssi_fips_init(new_drvdata);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("SSI_FIPS_INIT failed 0x%x\n", rc);
- goto init_cc_res_err;
+ dev_err(dev, "ssi_power_mgr_init failed\n");
+ goto post_buf_mgr_err;
}
rc = ssi_ivgen_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_ivgen_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_ivgen_init failed\n");
+ goto post_power_mgr_err;
}
/* Allocate crypto algs */
rc = ssi_ablkcipher_alloc(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_ablkcipher_alloc failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_ablkcipher_alloc failed\n");
+ goto post_ivgen_err;
}
/* hash must be allocated before aead since hash exports APIs */
rc = ssi_hash_alloc(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_hash_alloc failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_hash_alloc failed\n");
+ goto post_cipher_err;
}
rc = ssi_aead_alloc(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_aead_alloc failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_aead_alloc failed\n");
+ goto post_hash_err;
}
/* If we got here and FIPS mode is enabled
@@ -407,52 +376,43 @@ static int init_cc_resources(struct platform_device *plat_dev)
return 0;
-init_cc_res_err:
- SSI_LOG_ERR("Freeing CC HW resources!\n");
-
- if (new_drvdata) {
- ssi_aead_free(new_drvdata);
- ssi_hash_free(new_drvdata);
- ssi_ablkcipher_free(new_drvdata);
- ssi_ivgen_fini(new_drvdata);
- ssi_power_mgr_fini(new_drvdata);
- ssi_buffer_mgr_fini(new_drvdata);
- request_mgr_fini(new_drvdata);
- ssi_sram_mgr_fini(new_drvdata);
- ssi_fips_fini(new_drvdata);
+post_hash_err:
+ ssi_hash_free(new_drvdata);
+post_cipher_err:
+ ssi_ablkcipher_free(new_drvdata);
+post_ivgen_err:
+ ssi_ivgen_fini(new_drvdata);
+post_power_mgr_err:
+ ssi_power_mgr_fini(new_drvdata);
+post_buf_mgr_err:
+ ssi_buffer_mgr_fini(new_drvdata);
+post_req_mgr_err:
+ request_mgr_fini(new_drvdata);
+post_sram_mgr_err:
+ ssi_sram_mgr_fini(new_drvdata);
+post_fips_init_err:
+ ssi_fips_fini(new_drvdata);
+post_sysfs_err:
#ifdef ENABLE_CC_SYSFS
- ssi_sysfs_fini();
+ ssi_sysfs_fini();
#endif
-
- if (req_mem_cc_regs) {
- if (irq_registered) {
- free_irq(new_drvdata->res_irq->start, new_drvdata);
- new_drvdata->res_irq = NULL;
- iounmap(cc_base);
- new_drvdata->cc_base = NULL;
- }
- release_mem_region(new_drvdata->res_mem->start,
- resource_size(new_drvdata->res_mem));
- new_drvdata->res_mem = NULL;
- }
- kfree(new_drvdata);
- dev_set_drvdata(&plat_dev->dev, NULL);
- }
-
+post_regs_err:
+ fini_cc_regs(new_drvdata);
+post_clk_err:
+ cc_clk_off(new_drvdata);
return rc;
}
void fini_cc_regs(struct ssi_drvdata *drvdata)
{
/* Mask all interrupts */
- WRITE_REGISTER(drvdata->cc_base +
- CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
}
static void cleanup_cc_resources(struct platform_device *plat_dev)
{
struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev);
+ (struct ssi_drvdata *)platform_get_drvdata(plat_dev);
ssi_aead_free(drvdata);
ssi_hash_free(drvdata);
@@ -466,22 +426,8 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
#ifdef ENABLE_CC_SYSFS
ssi_sysfs_fini();
#endif
-
fini_cc_regs(drvdata);
cc_clk_off(drvdata);
- free_irq(drvdata->res_irq->start, drvdata);
- drvdata->res_irq = NULL;
-
- if (drvdata->cc_base) {
- iounmap(drvdata->cc_base);
- release_mem_region(drvdata->res_mem->start,
- resource_size(drvdata->res_mem));
- drvdata->cc_base = NULL;
- drvdata->res_mem = NULL;
- }
-
- kfree(drvdata);
- dev_set_drvdata(&plat_dev->dev, NULL);
}
int cc_clk_on(struct ssi_drvdata *drvdata)
@@ -514,18 +460,19 @@ void cc_clk_off(struct ssi_drvdata *drvdata)
static int cc7x_probe(struct platform_device *plat_dev)
{
int rc;
+ struct device *dev = &plat_dev->dev;
#if defined(CONFIG_ARM) && defined(CC_DEBUG)
u32 ctr, cacheline_size;
asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
cacheline_size = 4 << ((ctr >> 16) & 0xf);
- SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
- cacheline_size, L1_CACHE_BYTES);
+ dev_dbg(dev, "CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
+ cacheline_size, L1_CACHE_BYTES);
asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
- SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
- (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
- (ctr >> 20) & 0xF, ctr & 0xF);
+ dev_dbg(dev, "Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
+ (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
+ (ctr >> 20) & 0xF, ctr & 0xF);
#endif
/* Map registers space */
@@ -533,18 +480,20 @@ static int cc7x_probe(struct platform_device *plat_dev)
if (rc != 0)
return rc;
- SSI_LOG(KERN_INFO, "ARM cc7x_ree device initialized\n");
+ dev_info(dev, "ARM ccree device initialized\n");
return 0;
}
static int cc7x_remove(struct platform_device *plat_dev)
{
- SSI_LOG_DEBUG("Releasing cc7x resources...\n");
+ struct device *dev = &plat_dev->dev;
+
+ dev_dbg(dev, "Releasing cc7x resources...\n");
cleanup_cc_resources(plat_dev);
- SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
+ dev_info(dev, "ARM ccree device terminated\n");
return 0;
}
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
index b6ad89ae9bee..94c755cafb47 100644
--- a/drivers/staging/ccree/ssi_driver.h
+++ b/drivers/staging/ccree/ssi_driver.h
@@ -37,13 +37,11 @@
#include <crypto/hash.h>
#include <linux/version.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
/* Registers definitions from shared/hw/ree_include */
-#include "dx_reg_base_host.h"
#include "dx_host.h"
-#include "cc_regs.h"
#include "dx_reg_common.h"
-#include "cc_hal.h"
#define CC_SUPPORT_SHA DX_DEV_SHA_MAX
#include "cc_crypto_ctx.h"
#include "ssi_sysfs.h"
@@ -68,12 +66,19 @@
#define SSI_AXI_IRQ_MASK ((1 << DX_AXIM_CFG_BRESPMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
(1 << DX_AXIM_CFG_INFLTMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_COMPMASK_BIT_SHIFT))
-#define SSI_AXI_ERR_IRQ_MASK (1 << DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+#define SSI_AXI_ERR_IRQ_MASK BIT(DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
-#define SSI_COMP_IRQ_MASK (1 << DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+#define SSI_COMP_IRQ_MASK BIT(DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+ DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+ DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) DX_ ## reg_name ## _REG_OFFSET
/* TEE FIPS status interrupt */
-#define SSI_GPR0_IRQ_MASK (1 << DX_HOST_IRR_GPR0_BIT_SHIFT)
+#define SSI_GPR0_IRQ_MASK BIT(DX_HOST_IRR_GPR0_BIT_SHIFT)
#define SSI_CRA_PRIO 3000
@@ -90,19 +95,6 @@
* field in the HW descriptor. The DMA engine +8 that value.
*/
-/* Logging macros */
-#define SSI_LOG(level, format, ...) \
- printk(level "cc715ree::%s: " format, __func__, ##__VA_ARGS__)
-#define SSI_LOG_ERR(format, ...) SSI_LOG(KERN_ERR, format, ##__VA_ARGS__)
-#define SSI_LOG_WARNING(format, ...) SSI_LOG(KERN_WARNING, format, ##__VA_ARGS__)
-#define SSI_LOG_NOTICE(format, ...) SSI_LOG(KERN_NOTICE, format, ##__VA_ARGS__)
-#define SSI_LOG_INFO(format, ...) SSI_LOG(KERN_INFO, format, ##__VA_ARGS__)
-#ifdef CC_DEBUG
-#define SSI_LOG_DEBUG(format, ...) SSI_LOG(KERN_DEBUG, format, ##__VA_ARGS__)
-#else /* Debug log messages are removed at compile time for non-DEBUG config. */
-#define SSI_LOG_DEBUG(format, ...) do {} while (0)
-#endif
-
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
@@ -128,10 +120,8 @@ struct ssi_crypto_req {
* @fw_ver: SeP loaded firmware version
*/
struct ssi_drvdata {
- struct resource *res_mem;
- struct resource *res_irq;
void __iomem *cc_base;
- unsigned int irq;
+ int irq;
u32 irq_mask;
u32 fw_ver;
/* Calibration time of start/stop
@@ -140,7 +130,6 @@ struct ssi_drvdata {
u32 monitor_null_cycles;
struct platform_device *plat_dev;
ssi_sram_addr_t mlli_sram_addr;
- struct completion icache_setup_completion;
void *buff_mgr_handle;
void *hash_handle;
void *aead_handle;
@@ -149,7 +138,6 @@ struct ssi_drvdata {
void *fips_handle;
void *ivgen_handle;
void *sram_mgr_handle;
- u32 inflight_counter;
struct clk *clk;
bool coherent;
};
@@ -187,11 +175,16 @@ struct async_gen_req_ctx {
enum drv_crypto_direction op_type;
};
+static inline struct device *drvdata_to_dev(struct ssi_drvdata *drvdata)
+{
+ return &drvdata->plat_dev->dev;
+}
+
#ifdef DX_DUMP_BYTES
void dump_byte_array(const char *name, const u8 *the_array, unsigned long size);
#else
-#define dump_byte_array(name, array, size) do { \
-} while (0);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+ unsigned long size) {};
#endif
int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe);
@@ -199,5 +192,15 @@ void fini_cc_regs(struct ssi_drvdata *drvdata);
int cc_clk_on(struct ssi_drvdata *drvdata);
void cc_clk_off(struct ssi_drvdata *drvdata);
+static inline void cc_iowrite(struct ssi_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct ssi_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
#endif /*__SSI_DRIVER_H__*/
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/ssi_fips.c
index 33d53d64603d..4aea99fa129f 100644
--- a/drivers/staging/ccree/ssi_fips.c
+++ b/drivers/staging/ccree/ssi_fips.c
@@ -19,7 +19,6 @@
#include "ssi_config.h"
#include "ssi_driver.h"
-#include "cc_hal.h"
#include "ssi_fips.h"
static void fips_dsr(unsigned long devarg);
@@ -34,9 +33,8 @@ struct ssi_fips_handle {
static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
{
u32 reg;
- void __iomem *cc_base = drvdata->cc_base;
- reg = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
}
@@ -46,12 +44,11 @@ static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
*/
void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
{
- void __iomem *cc_base = drvdata->cc_base;
int val = CC_FIPS_SYNC_REE_STATUS;
val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), val);
+ cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
}
void ssi_fips_fini(struct ssi_drvdata *drvdata)
@@ -76,41 +73,42 @@ void fips_handler(struct ssi_drvdata *drvdata)
tasklet_schedule(&fips_handle_ptr->tasklet);
}
-static inline void tee_fips_error(void)
+static inline void tee_fips_error(struct device *dev)
{
if (fips_enabled)
panic("ccree: TEE reported cryptographic error in fips mode!\n");
else
- SSI_LOG_ERR("TEE reported error!\n");
+ dev_err(dev, "TEE reported error!\n");
}
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- void __iomem *cc_base = drvdata->cc_base;
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irq, state, val;
irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
if (irq) {
- state = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ state = cc_ioread(drvdata, CC_REG(GPR_HOST));
if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- tee_fips_error();
+ tee_fips_error(dev);
}
/* after verifing that there is nothing to do,
* unmask AXI completion interrupt.
*/
- val = (CC_REG_OFFSET(HOST_RGF, HOST_IMR) & ~irq);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
+ val = (CC_REG(HOST_IMR) & ~irq);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
}
/* The function called once at driver entry point .*/
int ssi_fips_init(struct ssi_drvdata *p_drvdata)
{
struct ssi_fips_handle *fips_h;
+ struct device *dev = drvdata_to_dev(p_drvdata);
fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
if (!fips_h)
@@ -118,11 +116,11 @@ int ssi_fips_init(struct ssi_drvdata *p_drvdata)
p_drvdata->fips_handle = fips_h;
- SSI_LOG_DEBUG("Initializing fips tasklet\n");
+ dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
if (!cc_get_tee_fips_status(p_drvdata))
- tee_fips_error();
+ tee_fips_error(dev);
return 0;
}
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
index 369ddf9478e7..63bcca7f3af9 100644
--- a/drivers/staging/ccree/ssi_fips.h
+++ b/drivers/staging/ccree/ssi_fips.h
@@ -40,8 +40,8 @@ static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
}
static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
-void fips_handler(struct ssi_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
+static inline void fips_handler(struct ssi_drvdata *drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 13291aeaf350..d79090ed7f9c 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -134,14 +134,13 @@ static int ssi_hash_map_result(struct device *dev,
digestsize,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
- SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
- digestsize);
+ dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
+ digestsize);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped digest result buffer %u B "
- "at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
digestsize, state->digest_result_buff,
- state->digest_result_dma_addr);
+ &state->digest_result_dma_addr);
return 0;
}
@@ -158,54 +157,50 @@ static int ssi_hash_map_request(struct device *dev,
int rc = -ENOMEM;
state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff0) {
- SSI_LOG_ERR("Allocating buff0 in context failed\n");
+ if (!state->buff0)
goto fail0;
- }
+
state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff1) {
- SSI_LOG_ERR("Allocating buff1 in context failed\n");
+ if (!state->buff1)
goto fail_buff0;
- }
+
state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_result_buff) {
- SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
+ if (!state->digest_result_buff)
goto fail_buff1;
- }
+
state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->digest_buff) {
- SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
+ if (!state->digest_buff)
goto fail_digest_result_buff;
- }
- SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
+ dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
+ state->digest_buff);
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_bytes_len) {
- SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
+ if (!state->digest_bytes_len)
goto fail1;
- }
- SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len);
+
+ dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
+ state->digest_bytes_len);
} else {
state->digest_bytes_len = NULL;
}
state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->opad_digest_buff) {
- SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
+ if (!state->opad_digest_buff)
goto fail2;
- }
- SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff);
+
+ dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
+ state->opad_digest_buff);
state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize, state->digest_buff);
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
goto fail3;
}
- SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
- ctx->inter_digestsize, state->digest_buff,
- state->digest_buff_dma_addr);
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
if (is_hmac) {
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
@@ -240,7 +235,7 @@ static int ssi_hash_map_request(struct device *dev,
rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto fail4;
}
}
@@ -248,13 +243,13 @@ static int ssi_hash_map_request(struct device *dev,
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
- SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
- HASH_LEN_SIZE, state->digest_bytes_len);
+ dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ HASH_LEN_SIZE, state->digest_bytes_len);
goto fail4;
}
- SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
- HASH_LEN_SIZE, state->digest_bytes_len,
- state->digest_bytes_len_dma_addr);
+ dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ HASH_LEN_SIZE, state->digest_bytes_len,
+ &state->digest_bytes_len_dma_addr);
} else {
state->digest_bytes_len_dma_addr = 0;
}
@@ -262,14 +257,14 @@ static int ssi_hash_map_request(struct device *dev,
if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
- SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize,
- state->opad_digest_buff);
+ dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize,
+ state->opad_digest_buff);
goto fail5;
}
- SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
- ctx->inter_digestsize, state->opad_digest_buff,
- state->opad_digest_dma_addr);
+ dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ &state->opad_digest_dma_addr);
} else {
state->opad_digest_dma_addr = 0;
}
@@ -316,22 +311,22 @@ static void ssi_hash_unmap_request(struct device *dev,
if (state->digest_buff_dma_addr != 0) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
- state->digest_buff_dma_addr);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &state->digest_buff_dma_addr);
state->digest_buff_dma_addr = 0;
}
if (state->digest_bytes_len_dma_addr != 0) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
- state->digest_bytes_len_dma_addr);
+ dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ &state->digest_bytes_len_dma_addr);
state->digest_bytes_len_dma_addr = 0;
}
if (state->opad_digest_dma_addr != 0) {
dma_unmap_single(dev, state->opad_digest_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
- state->opad_digest_dma_addr);
+ dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ &state->opad_digest_dma_addr);
state->opad_digest_dma_addr = 0;
}
@@ -352,11 +347,9 @@ static void ssi_hash_unmap_result(struct device *dev,
state->digest_result_dma_addr,
digestsize,
DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("unmpa digest result buffer "
- "va (%pK) pa (%pad) len %u\n",
- state->digest_result_buff,
- state->digest_result_dma_addr,
- digestsize);
+ dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ state->digest_result_buff,
+ &state->digest_result_dma_addr, digestsize);
memcpy(result,
state->digest_result_buff,
digestsize);
@@ -369,7 +362,7 @@ static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __i
struct ahash_request *req = (struct ahash_request *)ssi_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
- SSI_LOG_DEBUG("req=%pK\n", req);
+ dev_dbg(dev, "req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
req->base.complete(&req->base, 0);
@@ -383,7 +376,7 @@ static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __i
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- SSI_LOG_DEBUG("req=%pK\n", req);
+ dev_dbg(dev, "req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
@@ -399,7 +392,7 @@ static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *c
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- SSI_LOG_DEBUG("req=%pK\n", req);
+ dev_dbg(dev, "req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
@@ -414,7 +407,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
unsigned int nbytes, u8 *result,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
@@ -423,20 +416,21 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
int idx = 0;
int rc = 0;
- SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
- SSI_LOG_ERR("map_ahash_source() failed\n");
+ dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
@@ -553,7 +547,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
ssi_hash_unmap_request(dev, state, ctx);
@@ -561,7 +555,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
} else {
ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
@@ -579,14 +573,14 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
unsigned int nbytes,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
u32 idx = 0;
int rc;
- SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
- "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
+ "hmac" : "hash", nbytes);
if (nbytes == 0) {
/* no real updates required */
@@ -596,12 +590,12 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
if (unlikely(rc)) {
if (rc == 1) {
- SSI_LOG_DEBUG(" data size not require HW update %x\n",
- nbytes);
+ dev_dbg(dev, " data size not require HW update %x\n",
+ nbytes);
/* No hardware updates are required */
return 0;
}
- SSI_LOG_ERR("map_ahash_request_update() failed\n");
+ dev_err(dev, "map_ahash_request_update() failed\n");
return -ENOMEM;
}
@@ -653,13 +647,13 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
} else {
ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
@@ -676,21 +670,22 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
u8 *result,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
int rc;
- SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -783,14 +778,14 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
} else {
@@ -810,22 +805,23 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
u8 *result,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
int rc;
- SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -927,14 +923,14 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
} else {
@@ -948,7 +944,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
state->xcbc_count = 0;
@@ -970,10 +966,12 @@ static int ssi_hash_setkey(void *hash,
int i, idx = 0, rc = 0;
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
ssi_sram_addr_t larval_addr;
-
- SSI_LOG_DEBUG("start keylen: %d", keylen);
+ struct device *dev;
ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
+ dev = drvdata_to_dev(ctx->drvdata);
+ dev_dbg(dev, "start keylen: %d", keylen);
+
blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
@@ -989,18 +987,16 @@ static int ssi_hash_setkey(void *hash,
if (keylen != 0) {
ctx->key_params.key_dma_addr = dma_map_single(
- &ctx->drvdata->plat_dev->dev,
- (void *)key,
+ dev, (void *)key,
keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
+ if (unlikely(dma_mapping_error(dev,
ctx->key_params.key_dma_addr))) {
- SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
- " DMA failed\n", key, keylen);
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
- "keylen=%u\n", ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
if (keylen > blocksize) {
/* Load hash initial state */
@@ -1079,7 +1075,7 @@ static int ssi_hash_setkey(void *hash,
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out;
}
@@ -1139,12 +1135,10 @@ out:
crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
if (ctx->key_params.key_dma_addr) {
- dma_unmap_single(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr,
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
return rc;
}
@@ -1154,10 +1148,11 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
{
struct ssi_crypto_req ssi_req = {};
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
int idx = 0, rc = 0;
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
switch (keylen) {
case AES_KEYSIZE_128:
@@ -1171,19 +1166,15 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = dma_map_single(
- &ctx->drvdata->plat_dev->dev,
- (void *)key,
+ dev, (void *)key,
keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr))) {
- SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
- " DMA failed\n", key, keylen);
+ if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
- "keylen=%u\n",
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
ctx->is_hmac = true;
/* 1. Load the AES key */
@@ -1226,12 +1217,10 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
if (rc != 0)
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
- dma_unmap_single(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr,
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
return rc;
}
@@ -1241,8 +1230,9 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
ctx->is_hmac = true;
@@ -1259,16 +1249,14 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
/* STAT_PHASE_1: Copy key to ctx */
- dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
- ctx->opad_tmp_keys_dma_addr,
+ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
memcpy(ctx->opad_tmp_keys_buff, key, keylen);
if (keylen == 24)
memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
- dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
- ctx->opad_tmp_keys_dma_addr,
+ dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
ctx->key_params.keylen = keylen;
@@ -1279,23 +1267,21 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (ctx->digest_buff_dma_addr != 0) {
dma_unmap_single(dev, ctx->digest_buff_dma_addr,
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-buffer: "
- "digest_buff_dma_addr=%pad\n",
- ctx->digest_buff_dma_addr);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &ctx->digest_buff_dma_addr);
ctx->digest_buff_dma_addr = 0;
}
if (ctx->opad_tmp_keys_dma_addr != 0) {
dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped opad-digest: "
- "opad_tmp_keys_dma_addr=%pad\n",
- ctx->opad_tmp_keys_dma_addr);
+ dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
+ &ctx->opad_tmp_keys_dma_addr);
ctx->opad_tmp_keys_dma_addr = 0;
}
@@ -1304,30 +1290,30 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
ctx->key_params.keylen = 0;
ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
- SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
- sizeof(ctx->digest_buff), ctx->digest_buff);
+ dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
- sizeof(ctx->digest_buff), ctx->digest_buff,
- ctx->digest_buff_dma_addr);
+ dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ &ctx->digest_buff_dma_addr);
ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
- SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
- sizeof(ctx->opad_tmp_keys_buff),
- ctx->opad_tmp_keys_buff);
+ dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
- sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
- ctx->opad_tmp_keys_dma_addr);
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ &ctx->opad_tmp_keys_dma_addr);
ctx->is_hmac = false;
return 0;
@@ -1361,8 +1347,9 @@ static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
{
struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("ssi_hash_cra_exit");
+ dev_dbg(dev, "ssi_hash_cra_exit");
ssi_hash_free_ctx(ctx);
}
@@ -1371,7 +1358,7 @@ static int ssi_mac_update(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
@@ -1388,12 +1375,12 @@ static int ssi_mac_update(struct ahash_request *req)
rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
if (unlikely(rc)) {
if (rc == 1) {
- SSI_LOG_DEBUG(" data size not require HW update %x\n",
- req->nbytes);
+ dev_dbg(dev, " data size not require HW update %x\n",
+ req->nbytes);
/* No hardware updates are required */
return 0;
}
- SSI_LOG_ERR("map_ahash_request_update() failed\n");
+ dev_err(dev, "map_ahash_request_update() failed\n");
return -ENOMEM;
}
@@ -1420,7 +1407,7 @@ static int ssi_mac_update(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
}
return rc;
@@ -1431,7 +1418,7 @@ static int ssi_mac_final(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
@@ -1451,15 +1438,15 @@ static int ssi_mac_final(struct ahash_request *req)
key_len = ctx->key_params.keylen;
}
- SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
+ dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -1530,7 +1517,7 @@ static int ssi_mac_final(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
}
@@ -1542,7 +1529,7 @@ static int ssi_mac_finup(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
@@ -1550,18 +1537,18 @@ static int ssi_mac_finup(struct ahash_request *req)
u32 key_len = 0;
u32 digestsize = crypto_ahash_digestsize(tfm);
- SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
+ dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
if (state->xcbc_count > 0 && req->nbytes == 0) {
- SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final\n");
+ dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
return ssi_mac_final(req);
}
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -1601,7 +1588,7 @@ static int ssi_mac_finup(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
}
@@ -1613,7 +1600,7 @@ static int ssi_mac_digest(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
@@ -1621,19 +1608,19 @@ static int ssi_mac_digest(struct ahash_request *req)
int idx = 0;
int rc;
- SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
+ dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
- SSI_LOG_ERR("map_ahash_source() failed\n");
+ dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
@@ -1673,7 +1660,7 @@ static int ssi_mac_digest(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
ssi_hash_unmap_request(dev, state, ctx);
@@ -1727,8 +1714,9 @@ static int ssi_ahash_init(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
return ssi_hash_init(state, ctx);
}
@@ -1737,7 +1725,7 @@ static int ssi_ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
@@ -1778,7 +1766,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u32 tmp;
int rc;
@@ -2054,17 +2042,17 @@ static struct ssi_hash_template driver_hash[] = {
};
static struct ssi_hash_alg *
-ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
+ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
+ bool keyed)
{
struct ssi_hash_alg *t_crypto_alg;
struct crypto_alg *alg;
struct ahash_alg *halg;
t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
- if (!t_crypto_alg) {
- SSI_LOG_ERR("failed to allocate t_crypto_alg\n");
+ if (!t_crypto_alg)
return ERR_PTR(-ENOMEM);
- }
+
t_crypto_alg->ahash_alg = template->template_ahash;
halg = &t_crypto_alg->ahash_alg;
@@ -2107,6 +2095,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
#if (DX_DEV_SHA_MAX > 256)
int i;
@@ -2191,7 +2180,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
+ dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err;
}
larval_seq_len = 0;
@@ -2209,7 +2198,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
+ dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err;
}
#endif
@@ -2223,17 +2212,15 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
struct ssi_hash_handle *hash_handle;
ssi_sram_addr_t sram_buff;
u32 sram_size_to_alloc;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
int alg;
hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
- if (!hash_handle) {
- SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
- sizeof(*hash_handle));
- rc = -ENOMEM;
- goto fail;
- }
+ if (!hash_handle)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&hash_handle->hash_list);
drvdata->hash_handle = hash_handle;
sram_size_to_alloc = sizeof(digest_len_init) +
@@ -2249,7 +2236,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
- SSI_LOG_ERR("SRAM pool exhausted\n");
+ dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail;
}
@@ -2260,31 +2247,29 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
/*must be set before the alg registration as it is being used there*/
rc = ssi_hash_init_sram_digest_consts(drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc);
+ dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
goto fail;
}
- INIT_LIST_HEAD(&hash_handle->hash_list);
-
/* ahash registration */
for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
struct ssi_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
/* register hmac version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
+ t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
goto fail;
}
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
if (unlikely(rc)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
kfree(t_alg);
goto fail;
} else {
@@ -2297,19 +2282,19 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
continue;
/* register hash version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
+ t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
goto fail;
}
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
if (unlikely(rc)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
kfree(t_alg);
goto fail;
} else {
@@ -2443,6 +2428,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
hw_desc_init(&desc[idx]);
@@ -2453,7 +2439,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
idx++;
} else {
if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
- SSI_LOG_DEBUG(" NULL mode\n");
+ dev_dbg(dev, " NULL mode\n");
/* nothing to build */
return;
}
@@ -2493,6 +2479,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
{
struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct device *dev = drvdata_to_dev(_drvdata);
switch (mode) {
case DRV_HASH_NULL:
@@ -2527,7 +2514,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
sizeof(sha384_init));
#endif
default:
- SSI_LOG_ERR("Invalid hash mode (%d)\n", mode);
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
}
/*This is valid wrong value to avoid kernel crash*/
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
index b01e03231947..3f082f41ae8f 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -193,12 +193,9 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
/* Allocate "this" context */
drvdata->ivgen_handle = kzalloc(sizeof(*drvdata->ivgen_handle),
GFP_KERNEL);
- if (!drvdata->ivgen_handle) {
- SSI_LOG_ERR("Not enough memory to allocate IVGEN context "
- "(%zu B)\n", sizeof(*drvdata->ivgen_handle));
- rc = -ENOMEM;
- goto out;
- }
+ if (!drvdata->ivgen_handle)
+ return -ENOMEM;
+
ivgen_ctx = drvdata->ivgen_handle;
/* Allocate pool's header for intial enc. key/IV */
@@ -206,15 +203,15 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
&ivgen_ctx->pool_meta_dma,
GFP_KERNEL);
if (!ivgen_ctx->pool_meta) {
- SSI_LOG_ERR("Not enough memory to allocate DMA of pool_meta "
- "(%u B)\n", SSI_IVPOOL_META_SIZE);
+ dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
+ SSI_IVPOOL_META_SIZE);
rc = -ENOMEM;
goto out;
}
/* Allocate IV pool in SRAM */
ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
- SSI_LOG_ERR("SRAM pool exhausted\n");
+ dev_err(device, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto out;
}
@@ -248,6 +245,7 @@ int ssi_ivgen_getiv(
{
struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
unsigned int idx = *iv_seq_len;
+ struct device *dev = drvdata_to_dev(drvdata);
unsigned int t;
if ((iv_out_size != CC_AES_IV_SIZE) &&
@@ -291,7 +289,7 @@ int ssi_ivgen_getiv(
ivgen_ctx->next_iv_ofs += iv_out_size;
if ((SSI_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
- SSI_LOG_DEBUG("Pool exhausted, regenerating iv-pool\n");
+ dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
/* pool is drained -regenerate it! */
return ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, iv_seq_len);
}
diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c
index 31325e6cd4b4..36a498098a70 100644
--- a/drivers/staging/ccree/ssi_pm.c
+++ b/drivers/staging/ccree/ssi_pm.c
@@ -40,11 +40,12 @@ int ssi_power_mgr_runtime_suspend(struct device *dev)
(struct ssi_drvdata *)dev_get_drvdata(dev);
int rc;
- SSI_LOG_DEBUG("set HOST_POWER_DOWN_EN\n");
- WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
if (rc != 0) {
- SSI_LOG_ERR("ssi_request_mgr_runtime_suspend_queue (%x)\n", rc);
+ dev_err(dev, "ssi_request_mgr_runtime_suspend_queue (%x)\n",
+ rc);
return rc;
}
fini_cc_regs(drvdata);
@@ -58,24 +59,24 @@ int ssi_power_mgr_runtime_resume(struct device *dev)
struct ssi_drvdata *drvdata =
(struct ssi_drvdata *)dev_get_drvdata(dev);
- SSI_LOG_DEBUG("unset HOST_POWER_DOWN_EN\n");
- WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = cc_clk_on(drvdata);
if (rc) {
- SSI_LOG_ERR("failed getting clock back on. We're toast.\n");
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
rc = init_cc_regs(drvdata, false);
if (rc != 0) {
- SSI_LOG_ERR("init_cc_regs (%x)\n", rc);
+ dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
rc = ssi_request_mgr_runtime_resume_queue(drvdata);
if (rc != 0) {
- SSI_LOG_ERR("ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
+ dev_err(dev, "ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
return rc;
}
@@ -109,7 +110,8 @@ int ssi_power_mgr_runtime_put_suspend(struct device *dev)
rc = pm_runtime_put_autosuspend(dev);
} else {
/* Something wrong happens*/
- BUG();
+ dev_err(dev, "request to suspend already suspended queue");
+ rc = -EBUSY;
}
return rc;
}
@@ -120,16 +122,17 @@ int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
{
int rc = 0;
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- struct platform_device *plat_dev = drvdata->plat_dev;
+ struct device *dev = drvdata_to_dev(drvdata);
+
/* must be before the enabling to avoid resdundent suspending */
- pm_runtime_set_autosuspend_delay(&plat_dev->dev, SSI_SUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(&plat_dev->dev);
+ pm_runtime_set_autosuspend_delay(dev, SSI_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
/* activate the PM module */
- rc = pm_runtime_set_active(&plat_dev->dev);
+ rc = pm_runtime_set_active(dev);
if (rc != 0)
return rc;
/* enable the PM module*/
- pm_runtime_enable(&plat_dev->dev);
+ pm_runtime_enable(dev);
#endif
return rc;
}
@@ -137,8 +140,6 @@ int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
{
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- struct platform_device *plat_dev = drvdata->plat_dev;
-
- pm_runtime_disable(&plat_dev->dev);
+ pm_runtime_disable(drvdata_to_dev(drvdata));
#endif
}
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index e5c2f92857f6..a8a7dc672d4c 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -49,7 +49,6 @@ struct ssi_request_mgr_handle {
dma_addr_t dummy_comp_buff_dma;
struct cc_hw_desc monitor_desc;
- volatile unsigned long monitor_lock;
#ifdef COMP_IN_WQ
struct workqueue_struct *workq;
struct delayed_work compwork;
@@ -69,19 +68,19 @@ static void comp_work_handler(struct work_struct *work);
void request_mgr_fini(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
if (!req_mgr_h)
return; /* Not allocated */
if (req_mgr_h->dummy_comp_buff_dma != 0) {
- dma_free_coherent(&drvdata->plat_dev->dev,
- sizeof(u32), req_mgr_h->dummy_comp_buff,
+ dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
req_mgr_h->dummy_comp_buff_dma);
}
- SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
req_mgr_h->min_free_hw_slots));
- SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+ dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
#ifdef COMP_IN_WQ
flush_workqueue(req_mgr_h->workq);
@@ -98,6 +97,7 @@ void request_mgr_fini(struct ssi_drvdata *drvdata)
int request_mgr_init(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *req_mgr_h;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
@@ -110,24 +110,24 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
spin_lock_init(&req_mgr_h->hw_lock);
#ifdef COMP_IN_WQ
- SSI_LOG_DEBUG("Initializing completion workqueue\n");
+ dev_dbg(dev, "Initializing completion workqueue\n");
req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
if (unlikely(!req_mgr_h->workq)) {
- SSI_LOG_ERR("Failed creating work queue\n");
+ dev_err(dev, "Failed creating work queue\n");
rc = -ENOMEM;
goto req_mgr_init_err;
}
INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
#else
- SSI_LOG_DEBUG("Initializing completion tasklet\n");
+ dev_dbg(dev, "Initializing completion tasklet\n");
tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata);
#endif
- req_mgr_h->hw_queue_size = READ_REGISTER(drvdata->cc_base +
- CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_SRAM_SIZE));
- SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+ CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+ dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
- SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
- req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
rc = -ENOMEM;
goto req_mgr_init_err;
}
@@ -135,13 +135,12 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
req_mgr_h->max_used_sw_slots = 0;
/* Allocate DMA word for "dummy" completion descriptor use */
- req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
- sizeof(u32),
+ req_mgr_h->dummy_comp_buff = dma_alloc_coherent(dev, sizeof(u32),
&req_mgr_h->dummy_comp_buff_dma,
GFP_KERNEL);
if (!req_mgr_h->dummy_comp_buff) {
- SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
- "buffer\n", sizeof(u32));
+ dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+ sizeof(u32));
rc = -ENOMEM;
goto req_mgr_init_err;
}
@@ -168,17 +167,17 @@ static inline void enqueue_seq(
int i;
for (i = 0; i < seq_len; i++) {
- writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
wmb();
- writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
#ifdef DX_DUMP_DESCS
- SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
- seq[i].word[0], seq[i].word[1], seq[i].word[2],
- seq[i].word[3], seq[i].word[4], seq[i].word[5]);
+ dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, seq[i].word[0], seq[i].word[1], seq[i].word[2],
+ seq[i].word[3], seq[i].word[4], seq[i].word[5]);
#endif
}
}
@@ -198,11 +197,12 @@ static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __io
}
static inline int request_mgr_queues_status_check(
+ struct ssi_drvdata *drvdata,
struct ssi_request_mgr_handle *req_mgr_h,
- void __iomem *cc_base,
unsigned int total_seq_len)
{
unsigned long poll_queue;
+ struct device *dev = drvdata_to_dev(drvdata);
/* SW queue is checked only once as it will not
* be chaned during the poll becasue the spinlock_bh
@@ -211,8 +211,8 @@ static inline int request_mgr_queues_status_check(
if (unlikely(((req_mgr_h->req_queue_head + 1) &
(MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail)) {
- SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
@@ -222,8 +222,7 @@ static inline int request_mgr_queues_status_check(
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
if (unlikely(req_mgr_h->q_free_slots <
req_mgr_h->min_free_hw_slots)) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
@@ -234,16 +233,13 @@ static inline int request_mgr_queues_status_check(
return 0;
}
- SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->q_free_slots, total_seq_len);
+ dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
}
/* No room in the HW queue try again later */
- SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
- "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->req_queue_head,
- MAX_REQUEST_QUEUE_SIZE,
- req_mgr_h->q_free_slots,
- total_seq_len);
+ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots, total_seq_len);
return -EAGAIN;
}
@@ -270,16 +266,17 @@ int send_request(
unsigned int iv_seq_len = 0;
unsigned int total_seq_len = len; /*initial sequence length*/
struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
+ struct device *dev = drvdata_to_dev(drvdata);
int rc;
unsigned int max_required_seq_len = (total_seq_len +
((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
SSI_IVPOOL_SEQ_LEN) +
- ((is_dout == 0) ? 1 : 0));
+ (!is_dout ? 1 : 0));
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev);
+ rc = ssi_power_mgr_runtime_get(dev);
if (rc != 0) {
- SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
return rc;
}
#endif
@@ -291,7 +288,7 @@ int send_request(
* in case iv gen add the max size and in case of no dout add 1
* for the internal completion descriptor
*/
- rc = request_mgr_queues_status_check(req_mgr_h, cc_base,
+ rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
max_required_seq_len);
if (likely(rc == 0))
/* There is enough place in the queue */
@@ -304,7 +301,7 @@ int send_request(
* (SW queue is full)
*/
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
+ ssi_power_mgr_runtime_put_suspend(dev);
#endif
return rc;
}
@@ -324,12 +321,12 @@ int send_request(
}
if (ssi_req->ivgen_dma_addr_len > 0) {
- SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
- ssi_req->ivgen_dma_addr_len,
- ssi_req->ivgen_dma_addr[0],
- ssi_req->ivgen_dma_addr[1],
- ssi_req->ivgen_dma_addr[2],
- ssi_req->ivgen_size);
+ dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ ssi_req->ivgen_dma_addr_len,
+ &ssi_req->ivgen_dma_addr[0],
+ &ssi_req->ivgen_dma_addr[1],
+ &ssi_req->ivgen_dma_addr[2],
+ ssi_req->ivgen_size);
/* Acquire IV from pool */
rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr,
@@ -337,10 +334,10 @@ int send_request(
ssi_req->ivgen_size, iv_seq, &iv_seq_len);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc);
+ dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
spin_unlock_bh(&req_mgr_h->hw_lock);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
+ ssi_power_mgr_runtime_put_suspend(dev);
#endif
return rc;
}
@@ -357,7 +354,7 @@ int send_request(
req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
/* TODO: Use circ_buf.h ? */
- SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
#ifdef FLUSH_CACHE_ALL
flush_cache_all();
@@ -369,11 +366,16 @@ int send_request(
enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
- /*This means that there was a problem with the resume*/
- BUG();
+ /* This situation should never occur. Maybe indicating problem
+ * with resuming power. Set the free slot count to 0 and hope
+ * for the best.
+ */
+ dev_err(dev, "HW free slot count mismatch.");
+ req_mgr_h->q_free_slots = 0;
+ } else {
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
}
- /* Update the free slots in HW queue */
- req_mgr_h->q_free_slots -= total_seq_len;
spin_unlock_bh(&req_mgr_h->hw_lock);
@@ -383,10 +385,9 @@ int send_request(
*/
wait_for_completion(&ssi_req->seq_compl);
return 0;
- } else {
- /* Operation still in process */
- return -EINPROGRESS;
}
+ /* Operation still in process */
+ return -EINPROGRESS;
}
/*!
@@ -409,7 +410,8 @@ int send_request_init(
int rc = 0;
/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
- rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len);
+ rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
+ total_seq_len);
if (unlikely(rc != 0))
return rc;
@@ -418,8 +420,8 @@ int send_request_init(
enqueue_seq(cc_base, desc, len);
/* Update the free slots in HW queue */
- req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
return 0;
}
@@ -448,7 +450,7 @@ static void comp_work_handler(struct work_struct *work)
static void proc_completions(struct ssi_drvdata *drvdata)
{
struct ssi_crypto_req *ssi_req;
- struct platform_device *plat_dev = drvdata->plat_dev;
+ struct device *dev = drvdata_to_dev(drvdata);
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
@@ -460,8 +462,13 @@ static void proc_completions(struct ssi_drvdata *drvdata)
/* Dequeue request */
if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) {
- SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle->req_queue_head);
- BUG();
+ /* We are supposed to handle a completion but our
+ * queue is empty. This is not normal. Return and
+ * hope for the best.
+ */
+ dev_err(dev, "Request queue is empty head == tail %u\n",
+ request_mgr_handle->req_queue_head);
+ break;
}
ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail];
@@ -476,39 +483,40 @@ static void proc_completions(struct ssi_drvdata *drvdata)
u32 axi_err;
int i;
- SSI_LOG_INFO("Delay\n");
+ dev_info(dev, "Delay\n");
for (i = 0; i < 1000000; i++)
- axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
+ axi_err = cc_ioread(drvdata,
+ CC_REG(AXIM_MON_ERR));
}
#endif /* COMPLETION_DELAY */
if (likely(ssi_req->user_cb))
- ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base);
+ ssi_req->user_cb(dev, ssi_req->user_arg,
+ drvdata->cc_base);
request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
- SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail);
- SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed);
+ dev_dbg(dev, "Dequeue request tail=%u\n",
+ request_mgr_handle->req_queue_tail);
+ dev_dbg(dev, "Request completed. axi_completed=%d\n",
+ request_mgr_handle->axi_completed);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev);
+ rc = ssi_power_mgr_runtime_put_suspend(dev);
if (rc != 0)
- SSI_LOG_ERR("Failed to set runtime suspension %d\n", rc);
+ dev_err(dev, "Failed to set runtime suspension %d\n",
+ rc);
#endif
}
}
-static inline u32 cc_axi_comp_count(void __iomem *cc_base)
+static inline u32 cc_axi_comp_count(struct ssi_drvdata *drvdata)
{
- /* The CC_HAL_READ_REGISTER macro implictly requires and uses
- * a base MMIO register address variable named cc_base.
- */
return FIELD_GET(AXIM_MON_COMP_VALUE,
- CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+ cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
}
/* Deferred service handler, run as interrupt-fired tasklet */
static void comp_handler(unsigned long devarg)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- void __iomem *cc_base = drvdata->cc_base;
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
@@ -517,12 +525,16 @@ static void comp_handler(unsigned long devarg)
irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
if (irq & SSI_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it, we clear it now */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), SSI_COMP_IRQ_MASK);
- /* Avoid race with above clear: Test completion counter once more */
+ /* Avoid race with above clear: Test completion counter
+ * once more
+ */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(cc_base);
+ cc_axi_comp_count(drvdata);
while (request_mgr_handle->axi_completed) {
do {
@@ -531,20 +543,21 @@ static void comp_handler(unsigned long devarg)
* request_mgr_handle->axi_completed is 0.
*/
request_mgr_handle->axi_completed =
- cc_axi_comp_count(cc_base);
+ cc_axi_comp_count(drvdata);
} while (request_mgr_handle->axi_completed > 0);
- /* To avoid the interrupt from firing as we unmask it, we clear it now */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR),
+ SSI_COMP_IRQ_MASK);
- /* Avoid race with above clear: Test completion counter once more */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(cc_base);
+ cc_axi_comp_count(drvdata);
}
}
- /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR),
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
}
/*
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c
index f11116afe89a..07260d168c91 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/ssi_sram_mgr.c
@@ -50,28 +50,14 @@ void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
*/
int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
{
- struct ssi_sram_mgr_ctx *smgr_ctx;
- int rc;
-
/* Allocate "this" context */
- drvdata->sram_mgr_handle = kzalloc(
- sizeof(struct ssi_sram_mgr_ctx), GFP_KERNEL);
- if (!drvdata->sram_mgr_handle) {
- SSI_LOG_ERR("Not enough memory to allocate SRAM_MGR ctx (%zu)\n",
- sizeof(struct ssi_sram_mgr_ctx));
- rc = -ENOMEM;
- goto out;
- }
- smgr_ctx = drvdata->sram_mgr_handle;
+ drvdata->sram_mgr_handle = kzalloc(sizeof(struct ssi_sram_mgr_ctx),
+ GFP_KERNEL);
- /* Pool starts at start of SRAM */
- smgr_ctx->sram_free_offset = 0;
+ if (!drvdata->sram_mgr_handle)
+ return -ENOMEM;
return 0;
-
-out:
- ssi_sram_mgr_fini(drvdata);
- return rc;
}
/*!
@@ -86,22 +72,23 @@ out:
ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
{
struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
ssi_sram_addr_t p;
if (unlikely((size & 0x3) != 0)) {
- SSI_LOG_ERR("Requested buffer size (%u) is not multiple of 4",
- size);
+ dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
+ size);
return NULL_SRAM_ADDR;
}
if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
- SSI_LOG_ERR("Not enough space to allocate %u B (at offset %llu)\n",
- size, smgr_ctx->sram_free_offset);
+ dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
+ size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
}
p = smgr_ctx->sram_free_offset;
smgr_ctx->sram_free_offset += size;
- SSI_LOG_DEBUG("Allocated %u B @ %u\n", size, (unsigned int)p);
+ dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
return p;
}
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
index 0655658bba4d..5d39f15cdb59 100644
--- a/drivers/staging/ccree/ssi_sysfs.c
+++ b/drivers/staging/ccree/ssi_sysfs.c
@@ -24,277 +24,22 @@
static struct ssi_drvdata *sys_get_drvdata(void);
-#ifdef CC_CYCLE_COUNT
-
-#include <asm/timex.h>
-
-struct stat_item {
- unsigned int min;
- unsigned int max;
- cycles_t sum;
- unsigned int count;
-};
-
-struct stat_name {
- const char *op_type_name;
- const char *stat_phase_name[MAX_STAT_PHASES];
-};
-
-static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = {
- {
- /* STAT_OP_TYPE_NULL */
- .op_type_name = "NULL",
- .stat_phase_name = {NULL},
- },
- {
- .op_type_name = "Encode",
- .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
- .stat_phase_name[STAT_PHASE_1] = "Map buffers",
- .stat_phase_name[STAT_PHASE_2] = "Create sequence",
- .stat_phase_name[STAT_PHASE_3] = "Send Request",
- .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
- .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- },
- { .op_type_name = "Decode",
- .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
- .stat_phase_name[STAT_PHASE_1] = "Map buffers",
- .stat_phase_name[STAT_PHASE_2] = "Create sequence",
- .stat_phase_name[STAT_PHASE_3] = "Send Request",
- .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
- .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- },
- { .op_type_name = "Setkey",
- .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
- .stat_phase_name[STAT_PHASE_1] = "Copy key to ctx",
- .stat_phase_name[STAT_PHASE_2] = "Create sequence",
- .stat_phase_name[STAT_PHASE_3] = "Send Request",
- .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
- .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- },
- {
- .op_type_name = "Generic",
- .stat_phase_name[STAT_PHASE_0] = "Interrupt",
- .stat_phase_name[STAT_PHASE_1] = "ISR-to-Tasklet",
- .stat_phase_name[STAT_PHASE_2] = "Tasklet start-to-end",
- .stat_phase_name[STAT_PHASE_3] = "Tasklet:user_cb()",
- .stat_phase_name[STAT_PHASE_4] = "Tasklet:dx_X_complete() - w/o X_complete()",
- .stat_phase_name[STAT_PHASE_5] = "",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- }
-};
-
-/*
- * Structure used to create a directory
- * and its attributes in sysfs.
- */
-struct sys_dir {
- struct kobject *sys_dir_kobj;
- struct attribute_group sys_dir_attr_group;
- struct attribute **sys_dir_attr_list;
- u32 num_of_attrs;
- struct ssi_drvdata *drvdata; /* Associated driver context */
-};
-
-/* top level directory structures */
-struct sys_dir sys_top_dir;
-
-static DEFINE_SPINLOCK(stat_lock);
-
-/* List of DBs */
-static struct stat_item stat_host_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
-static struct stat_item stat_cc_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
-
-static void init_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
-{
- unsigned int i, j;
-
- /* Clear db */
- for (i = 0; i < MAX_STAT_OP_TYPES; i++) {
- for (j = 0; j < MAX_STAT_PHASES; j++) {
- item[i][j].min = 0xFFFFFFFF;
- item[i][j].max = 0;
- item[i][j].sum = 0;
- item[i][j].count = 0;
- }
- }
-}
-
-static void update_db(struct stat_item *item, unsigned int result)
-{
- item->count++;
- item->sum += result;
- if (result < item->min)
- item->min = result;
- if (result > item->max)
- item->max = result;
-}
-
-static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
-{
- unsigned int i, j;
- u64 avg;
-
- for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
- for (j = 0; j < MAX_STAT_PHASES; j++) {
- if (item[i][j].count > 0) {
- avg = (u64)item[i][j].sum;
- do_div(avg, item[i][j].count);
- SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
- stat_name_db[i].op_type_name,
- stat_name_db[i].stat_phase_name[j],
- item[i][j].min, (int)avg,
- item[i][j].max,
- (long long)item[i][j].sum,
- item[i][j].count);
- }
- }
- }
-}
-
-/**************************************
- * Attributes show functions section *
- **************************************/
-
-static ssize_t ssi_sys_stats_host_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- init_db(stat_host_db);
- return count;
-}
-
-static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- init_db(stat_cc_db);
- return count;
-}
-
-static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int i, j;
- char line[512];
- u32 min_cyc, max_cyc;
- u64 avg;
- ssize_t buf_len, tmp_len = 0;
-
- buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
- if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
- for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
- for (j = 0; j < MAX_STAT_PHASES - 1; j++) {
- if (stat_host_db[i][j].count > 0) {
- avg = (u64)stat_host_db[i][j].sum;
- do_div(avg, stat_host_db[i][j].count);
- min_cyc = stat_host_db[i][j].min;
- max_cyc = stat_host_db[i][j].max;
- } else {
- avg = min_cyc = max_cyc = 0;
- }
- tmp_len = scnprintf(line, 512,
- "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name,
- stat_name_db[i].stat_phase_name[j],
- min_cyc, (unsigned int)avg, max_cyc,
- stat_host_db[i][j].count);
- if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
- if (buf_len + tmp_len >= PAGE_SIZE)
- return buf_len;
- buf_len += tmp_len;
- strncat(buf, line, 512);
- }
- }
- return buf_len;
-}
-
-static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int i;
- char line[256];
- u32 min_cyc, max_cyc;
- u64 avg;
- ssize_t buf_len, tmp_len = 0;
-
- buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
- if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
- for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
- if (stat_cc_db[i][STAT_PHASE_6].count > 0) {
- avg = (u64)stat_cc_db[i][STAT_PHASE_6].sum;
- do_div(avg, stat_cc_db[i][STAT_PHASE_6].count);
- min_cyc = stat_cc_db[i][STAT_PHASE_6].min;
- max_cyc = stat_cc_db[i][STAT_PHASE_6].max;
- } else {
- avg = min_cyc = max_cyc = 0;
- }
- tmp_len = scnprintf(line, 256, "%s\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name, min_cyc,
- (unsigned int)avg, max_cyc,
- stat_cc_db[i][STAT_PHASE_6].count);
-
- if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
-
- if (buf_len + tmp_len >= PAGE_SIZE)
- return buf_len;
- buf_len += tmp_len;
- strncat(buf, line, 256);
- }
- return buf_len;
-}
-
-void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&stat_lock, flags);
- update_db(&stat_host_db[op_type][phase], (unsigned int)result);
- spin_unlock_irqrestore(&stat_lock, flags);
-}
-
-void update_cc_stat(
- unsigned int op_type,
- unsigned int phase,
- unsigned int elapsed_cycles)
-{
- update_db(&stat_cc_db[op_type][phase], elapsed_cycles);
-}
-
-void display_all_stat_db(void)
-{
- SSI_LOG_ERR("\n======= CYCLE COUNT STATS =======\n");
- display_db(stat_host_db);
- SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n");
- display_db(stat_cc_db);
-}
-#endif /*CC_CYCLE_COUNT*/
-
static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct ssi_drvdata *drvdata = sys_get_drvdata();
u32 register_value;
- void __iomem *cc_base = drvdata->cc_base;
int offset = 0;
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
+ register_value = cc_ioread(drvdata, CC_REG(HOST_SIGNATURE));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
+ register_value = cc_ioread(drvdata, CC_REG(HOST_IRR));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN));
+ register_value = cc_ioread(drvdata, CC_REG(HOST_POWER_DOWN_EN));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
+ register_value = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT));
+ register_value = cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
return offset;
}
@@ -304,12 +49,6 @@ static ssize_t ssi_sys_help_show(struct kobject *kobj,
{
char *help_str[] = {
"cat reg_dump ", "Print several of CC register values",
- #if defined CC_CYCLE_COUNT
- "cat stats_host ", "Print host statistics",
- "echo <number> > stats_host", "Clear host statistics database",
- "cat stats_cc ", "Print CC statistics",
- "echo <number> > stats_cc ", "Clear CC statistics database",
- #endif
};
int i = 0, offset = 0;
@@ -376,7 +115,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
return -ENOMEM;
/* allocate memory for directory's attributes list */
sys_dir->sys_dir_attr_list =
- kzalloc(sizeof(struct attribute *) * (num_of_attrs + 1),
+ kcalloc(num_of_attrs + 1, sizeof(struct attribute *),
GFP_KERNEL);
if (!(sys_dir->sys_dir_attr_list)) {
@@ -413,14 +152,9 @@ static void sys_free_dir(struct sys_dir *sys_dir)
int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
{
int retval;
+ struct device *dev = drvdata_to_dev(drvdata);
-#if defined CC_CYCLE_COUNT
- /* Init. statistics */
- init_db(stat_host_db);
- init_db(stat_cc_db);
-#endif
-
- SSI_LOG_ERR("setup sysfs under %s\n", sys_dev_obj->name);
+ dev_info(dev, "setup sysfs under %s\n", sys_dev_obj->name);
/* Initialize top directory */
retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, "cc_info",
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 7a655ed071a3..4218fc0e17f1 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -785,8 +785,8 @@ config COMEDI_ADV_PCI_DIO
---help---
Enable support for Advantech PCI DIO cards
PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
- PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756 and
- PCI-1762
+ PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
+ PCI-1761 and PCI-1762
To compile this driver as a module, choose M here: the module will be
called adv_pci_dio.
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 620cec13d74c..a8186687ca2c 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -12,9 +12,9 @@
* Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
* PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
* PCI-1751, PCI-1752, PCI-1753, PCI-1753+PCI-1753E,
- * PCI-1754, PCI-1756, PCI-1762
+ * PCI-1754, PCI-1756, PCI-1761, PCI-1762
* Author: Michal Dobes <dobes@tesnet.cz>
- * Updated: Mon, 09 Jan 2012 12:40:46 +0000
+ * Updated: Fri, 25 Aug 2017 07:23:06 +0300
* Status: untested
*
* Configuration Options: not applicable, uses PCI auto config
@@ -50,6 +50,11 @@
/* PCI-1752, PCI-1756 special registers */
#define PCI1752_CFC_REG 0x12 /* R/W: channel freeze function */
+/* PCI-1761 interrupt control registers */
+#define PCI1761_INT_EN_REG 0x03 /* R/W: enable/disable interrupts */
+#define PCI1761_INT_RF_REG 0x04 /* R/W: falling/rising edge */
+#define PCI1761_INT_CLR_REG 0x05 /* R/W: clear interrupts */
+
/* PCI-1762 interrupt control registers */
#define PCI1762_INT_REG 0x06 /* R/W: status/control */
@@ -72,6 +77,7 @@ enum pci_dio_boardid {
TYPE_PCI1753E,
TYPE_PCI1754,
TYPE_PCI1756,
+ TYPE_PCI1761,
TYPE_PCI1762
};
@@ -181,6 +187,13 @@ static const struct dio_boardtype boardtypes[] = {
.id_reg = 0x10,
.is_16bit = 1,
},
+ [TYPE_PCI1761] = {
+ .name = "pci1761",
+ .nsubdevs = 3,
+ .sdi[1] = { 8, 0x01 }, /* ISO DI 0-7 */
+ .sdo[1] = { 8, 0x00 }, /* RELAY DO 0-7 */
+ .id_reg = 0x02,
+ },
[TYPE_PCI1762] = {
.name = "pci1762",
.nsubdevs = 3,
@@ -309,6 +322,14 @@ static int pci_dio_reset(struct comedi_device *dev, unsigned long cardtype)
outw(0x08, dev->iobase + PCI1754_INT_REG(3));
}
break;
+ case TYPE_PCI1761:
+ /* disable interrupts */
+ outb(0, dev->iobase + PCI1761_INT_EN_REG);
+ /* clear interrupts */
+ outb(0xff, dev->iobase + PCI1761_INT_CLR_REG);
+ /* set rising edge trigger */
+ outb(0, dev->iobase + PCI1761_INT_RF_REG);
+ break;
case TYPE_PCI1762:
outw(0x0101, dev->iobase + PCI1762_INT_REG);
break;
@@ -496,6 +517,7 @@ static const struct pci_device_id adv_pci_dio_pci_table[] = {
{ PCI_VDEVICE(ADVANTECH, 0x1753), TYPE_PCI1753 },
{ PCI_VDEVICE(ADVANTECH, 0x1754), TYPE_PCI1754 },
{ PCI_VDEVICE(ADVANTECH, 0x1756), TYPE_PCI1756 },
+ { PCI_VDEVICE(ADVANTECH, 0x1761), TYPE_PCI1761 },
{ PCI_VDEVICE(ADVANTECH, 0x1762), TYPE_PCI1762 },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index ccfd642998be..c7e8194984e5 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -93,6 +93,7 @@ struct waveform_private {
unsigned int ai_scan_period; /* AI scan period in usec */
unsigned int ai_convert_period; /* AI conversion period in usec */
struct timer_list ao_timer; /* timer for AO commands */
+ struct comedi_device *dev; /* parent comedi device */
u64 ao_last_scan_time; /* time of previous AO scan in usec */
unsigned int ao_scan_period; /* AO scan period in usec */
unsigned short ao_loopbacks[N_CHANS];
@@ -201,10 +202,10 @@ static unsigned short fake_waveform(struct comedi_device *dev,
* It should run in the background; therefore it is scheduled by
* a timer mechanism.
*/
-static void waveform_ai_timer(unsigned long arg)
+static void waveform_ai_timer(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct waveform_private *devpriv = dev->private;
+ struct waveform_private *devpriv = from_timer(devpriv, t, ai_timer);
+ struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -438,10 +439,10 @@ static int waveform_ai_insn_read(struct comedi_device *dev,
* This is the background routine to handle AO commands, scheduled by
* a timer mechanism.
*/
-static void waveform_ao_timer(unsigned long arg)
+static void waveform_ao_timer(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct waveform_private *devpriv = dev->private;
+ struct waveform_private *devpriv = from_timer(devpriv, t, ao_timer);
+ struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->write_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -686,8 +687,9 @@ static int waveform_common_attach(struct comedi_device *dev,
for (i = 0; i < s->n_chan; i++)
devpriv->ao_loopbacks[i] = s->maxdata / 2;
- setup_timer(&devpriv->ai_timer, waveform_ai_timer, (unsigned long)dev);
- setup_timer(&devpriv->ao_timer, waveform_ao_timer, (unsigned long)dev);
+ devpriv->dev = dev;
+ timer_setup(&devpriv->ai_timer, waveform_ai_timer, 0);
+ timer_setup(&devpriv->ao_timer, waveform_ao_timer, 0);
dev_info(dev->class_dev,
"%s: %u microvolt, %u microsecond waveform attached\n",
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index 5d157951f63f..ddd4aeab6365 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -440,6 +440,7 @@ static inline int timer_period(void)
struct das16_private_struct {
struct comedi_isadma *dma;
+ struct comedi_device *dev;
unsigned int clockbase;
unsigned int ctrl_reg;
unsigned int divisor1;
@@ -525,10 +526,10 @@ static void das16_interrupt(struct comedi_device *dev)
comedi_handle_events(dev, s);
}
-static void das16_timer_interrupt(unsigned long arg)
+static void das16_timer_interrupt(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct das16_private_struct *devpriv = dev->private;
+ struct das16_private_struct *devpriv = from_timer(devpriv, t, timer);
+ struct comedi_device *dev = devpriv->dev;
unsigned long flags;
das16_interrupt(dev);
@@ -934,6 +935,8 @@ static void das16_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
{
struct das16_private_struct *devpriv = dev->private;
+ timer_setup(&devpriv->timer, das16_timer_interrupt, 0);
+
/* only DMA channels 3 and 1 are valid */
if (!(dma_chan == 1 || dma_chan == 3))
return;
@@ -941,10 +944,6 @@ static void das16_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
/* DMA uses two buffers */
devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
DAS16_DMA_SIZE, COMEDI_ISADMA_READ);
- if (devpriv->dma) {
- setup_timer(&devpriv->timer, das16_timer_interrupt,
- (unsigned long)dev);
- }
}
static void das16_free_dma(struct comedi_device *dev)
@@ -952,8 +951,7 @@ static void das16_free_dma(struct comedi_device *dev)
struct das16_private_struct *devpriv = dev->private;
if (devpriv) {
- if (devpriv->timer.data)
- del_timer_sync(&devpriv->timer);
+ del_timer_sync(&devpriv->timer);
comedi_isadma_free(devpriv->dma);
}
}
@@ -1046,6 +1044,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
+ devpriv->dev = dev;
if (board->size < 0x400) {
ret = comedi_request_region(dev, it->options[0], board->size);
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index d5295bbdd28c..217a4b884689 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -1062,6 +1062,8 @@ static void dt282x_alloc_dma(struct comedi_device *dev,
PAGE_SIZE, 0);
if (!devpriv->dma)
free_irq(irq_num, dev);
+ else
+ dev->irq = irq_num;
}
static void dt282x_free_dma(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index f1c2a20a7d4d..cbff3b41bb45 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -96,6 +96,7 @@ struct jr3_pci_poll_delay {
struct jr3_pci_dev_private {
struct timer_list timer;
+ struct comedi_device *dev;
};
union jr3_pci_single_range {
@@ -585,10 +586,10 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
return result;
}
-static void jr3_pci_poll_dev(unsigned long data)
+static void jr3_pci_poll_dev(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)data;
- struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_pci_dev_private *devpriv = from_timer(devpriv, t, timer);
+ struct comedi_device *dev = devpriv->dev;
struct jr3_pci_subdev_private *spriv;
struct comedi_subdevice *s;
unsigned long flags;
@@ -770,7 +771,8 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
spriv->next_time_min = jiffies + msecs_to_jiffies(500);
}
- setup_timer(&devpriv->timer, jr3_pci_poll_dev, (unsigned long)dev);
+ devpriv->dev = dev;
+ timer_setup(&devpriv->timer, jr3_pci_poll_dev, 0);
devpriv->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&devpriv->timer);
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index c80527db9c19..e226275972c0 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -408,9 +408,8 @@ static int s526_gpct_winsn(struct comedi_device *dev,
*/
if ((data[1] <= data[0]) || !data[0])
return -EINVAL;
-
- /* Fall thru to write the PULSE_WIDTH */
-
+ /* to write the PULSE_WIDTH */
+ /* fall through */
case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
s526_gpct_write(dev, chan, data[0]);
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index c1b6079384e9..d9bf5da1b8e5 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -291,7 +291,7 @@ static void dgnc_free_irq(struct dgnc_board *brd)
* waiter needs to be woken up, and (b) whether the poller needs to
* be rescheduled.
*/
-static void dgnc_poll_handler(ulong dummy)
+static void dgnc_poll_handler(struct timer_list *unused)
{
struct dgnc_board *brd;
unsigned long flags;
@@ -323,7 +323,7 @@ static void dgnc_poll_handler(ulong dummy)
if ((ulong)new_time >= 2 * dgnc_poll_tick)
dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
+ timer_setup(&dgnc_poll_timer, dgnc_poll_handler, 0);
dgnc_poll_timer.expires = dgnc_poll_time;
spin_unlock_irqrestore(&dgnc_poll_lock, flags);
@@ -392,8 +392,6 @@ static int dgnc_start(void)
unsigned long flags;
struct device *dev;
- init_timer(&dgnc_poll_timer);
-
rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
if (rc < 0) {
pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
@@ -419,7 +417,7 @@ static int dgnc_start(void)
/* Start the poller */
spin_lock_irqsave(&dgnc_poll_lock, flags);
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
+ timer_setup(&dgnc_poll_timer, dgnc_poll_handler, 0);
dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
dgnc_poll_timer.expires = dgnc_poll_time;
spin_unlock_irqrestore(&dgnc_poll_lock, flags);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 764d6fe0d030..efdb11a5e27f 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -104,7 +104,6 @@ struct board_ops {
* struct dgnc_board - Per board information.
* @boardnum: Board number (0 - 32).
*
- * @type: Type of board.
* @name: Product name.
* @pdev: Pointer to the pci_dev structure.
* @bd_flags: Board flags.
@@ -140,13 +139,9 @@ struct board_ops {
* @dpastatus: Board status as defined by DPA.
* @bd_dividend: Board/UART's specific dividend.
* @bd_ops: Pointer to board operations structure.
- * @proc_entry_pointer: Proc/<board> entry
- * @dgnc_board_table: Proc/<board> entry
*/
struct dgnc_board {
int boardnum;
-
- int type;
char *name;
struct pci_dev *pdev;
unsigned long bd_flags;
@@ -200,10 +195,6 @@ struct dgnc_board {
uint bd_dividend;
struct board_ops *bd_ops;
-
- struct proc_dir_entry *proc_entry_pointer;
- struct dgnc_proc_entry *dgnc_board_table;
-
};
/* Unit flag definitions for un_flags. */
@@ -233,7 +224,6 @@ struct device;
*/
struct un_t {
struct channel_t *un_ch;
- ulong un_time;
uint un_type;
uint un_open_count;
struct tty_struct *un_tty;
@@ -321,8 +311,6 @@ struct un_t {
* @ch_err_overrun: Count of overruns on channel.
* @ch_xon_sends: Count of xons transmitted.
* @ch_xoff_sends: Count of xoffs transmitted.
- * @proc_entry_pointer: Proc/<board>/<channel> entry.
- * @dgnc_channel_table: Proc/<board>/<channel> entry.
*/
struct channel_t {
struct dgnc_board *ch_bd;
@@ -391,10 +379,6 @@ struct channel_t {
ulong ch_xon_sends;
ulong ch_xoff_sends;
-
- struct proc_dir_entry *proc_entry_pointer;
- struct dgnc_proc_entry *dgnc_channel_table;
-
};
extern uint dgnc_major; /* Our driver/mgmt major */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 1943e66fec57..0ae229c3aaaa 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -1182,7 +1182,6 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
unsigned long flags;
struct channel_t *ch;
struct un_t *un;
- int rc = 0;
if (!tty)
return -ENXIO;
@@ -1199,12 +1198,10 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
un->un_flags |= UN_EMPTY;
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = wait_event_interruptible_timeout(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0),
- msecs_to_jiffies(seconds * 1000));
-
- /* If ret is non-zero, user ctrl-c'ed us */
- return rc;
+ /* If returned value is non-zero, user ctrl-c'ed us */
+ return wait_event_interruptible_timeout(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0),
+ msecs_to_jiffies(seconds * 1000));
}
/*
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index a899614ce829..6d1cad85957b 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -253,7 +253,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- __be16 *txbuf16 = par->txbuf.buf;
+ __be16 *txbuf16;
size_t remain;
size_t to_copy;
size_t tx_array_size;
@@ -267,10 +267,10 @@ static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
tx_array_size = par->txbuf.len / 2;
- txbuf16 = par->txbuf.buf + 1;
- tx_array_size -= 2;
- *(u8 *)(par->txbuf.buf) = 0x00;
- startbyte_size = 1;
+ txbuf16 = par->txbuf.buf + 1;
+ tx_array_size -= 2;
+ *(u8 *)(par->txbuf.buf) = 0x00;
+ startbyte_size = 1;
while (remain) {
to_copy = min(tx_array_size, remain);
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 9aa9864fcf30..e4a759b54ba0 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -26,7 +26,13 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
write_reg(par, 0xae); /* Display Off */
- write_reg(par, 0xa0, 0x70 | (par->bgr << 2)); /* Set Colour Depth */
+
+ /* Set Column Address Mapping, COM Scan Direction and Colour Depth */
+ if (par->info->var.rotate == 180)
+ write_reg(par, 0xa0, 0x60 | (par->bgr << 2));
+ else
+ write_reg(par, 0xa0, 0x72 | (par->bgr << 2));
+
write_reg(par, 0x72); /* RGB colour */
write_reg(par, 0xa1, 0x00); /* Set Display Start Line */
write_reg(par, 0xa2, 0x00); /* Set Display Offset */
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 48e3b3fd9fed..8eb5e7f10fb5 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -225,7 +225,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
u8 *buf8 = par->txbuf.buf;
u16 *buf16 = par->txbuf.buf;
int line_length = par->info->fix.line_length;
- int y_start = (offset / line_length);
+ int y_start = offset / line_length;
int y_end = (offset + len - 1) / line_length;
int x, y, i;
int ret = 0;
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
index b78045fe5393..78899a172c7e 100644
--- a/drivers/staging/fbtft/fb_uc1701.c
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -127,7 +127,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_buffer;
- u8 *buf = par->txbuf.buf;
+ u8 *buf;
int x, y, i;
int ret = 0;
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 2a8eef15c439..a263bce260c9 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -11,33 +11,33 @@
*
*****************************************************************************/
-#define define_fbtft_write_reg(func, type, modifier) \
+#define define_fbtft_write_reg(func, buffer_type, data_type, modifier) \
void func(struct fbtft_par *par, int len, ...) \
{ \
va_list args; \
int i, ret; \
int offset = 0; \
- type *buf = (type *)par->buf; \
+ buffer_type *buf = (buffer_type *)par->buf; \
\
if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) { \
va_start(args, len); \
for (i = 0; i < len; i++) { \
- buf[i] = (type)va_arg(args, unsigned int); \
+ buf[i] = modifier((data_type)va_arg(args, unsigned int)); \
} \
va_end(args); \
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, type, buf, len, "%s: ", __func__); \
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, buffer_type, buf, len, "%s: ", __func__); \
} \
\
va_start(args, len); \
\
if (par->startbyte) { \
*(u8 *)par->buf = par->startbyte; \
- buf = (type *)(par->buf + 1); \
+ buf = (buffer_type *)(par->buf + 1); \
offset = 1; \
} \
\
- *buf = modifier((type)va_arg(args, unsigned int)); \
- ret = fbtft_write_buf_dc(par, par->buf, sizeof(type) + offset, 0); \
+ *buf = modifier((data_type)va_arg(args, unsigned int)); \
+ ret = fbtft_write_buf_dc(par, par->buf, sizeof(data_type) + offset, 0); \
if (ret < 0) \
goto out; \
len--; \
@@ -48,18 +48,18 @@ void func(struct fbtft_par *par, int len, ...) \
if (len) { \
i = len; \
while (i--) \
- *buf++ = modifier((type)va_arg(args, unsigned int)); \
+ *buf++ = modifier((data_type)va_arg(args, unsigned int)); \
fbtft_write_buf_dc(par, par->buf, \
- len * (sizeof(type) + offset), 1); \
+ len * (sizeof(data_type) + offset), 1); \
} \
out: \
va_end(args); \
} \
EXPORT_SYMBOL(func);
-define_fbtft_write_reg(fbtft_write_reg8_bus8, u8, )
-define_fbtft_write_reg(fbtft_write_reg16_bus8, u16, cpu_to_be16)
-define_fbtft_write_reg(fbtft_write_reg16_bus16, u16, )
+define_fbtft_write_reg(fbtft_write_reg8_bus8, u8, u8, )
+define_fbtft_write_reg(fbtft_write_reg16_bus8, __be16, u16, cpu_to_be16)
+define_fbtft_write_reg(fbtft_write_reg16_bus16, u16, u16, )
void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...)
{
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
index 26017fe9df93..0d8ed002adcb 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -104,9 +104,11 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
/* We don't support any other format */
return;
- /* For S/G frames, we first need to free all SG entries */
+ /* For S/G frames, we first need to free all SG entries
+ * except the first one, which was taken care of already
+ */
sgt = vaddr + dpaa2_fd_get_offset(fd);
- for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
@@ -131,16 +133,15 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
u16 fd_offset = dpaa2_fd_get_offset(fd);
u32 fd_length = dpaa2_fd_get_len(fd);
- skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ ch->buf_count--;
+
+ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
if (unlikely(!skb))
return NULL;
skb_reserve(skb, fd_offset);
skb_put(skb, fd_length);
- ch->buf_count--;
-
return skb;
}
@@ -176,10 +177,21 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
if (i == 0) {
/* We build the skb around the first data buffer */
- skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (unlikely(!skb))
- return NULL;
+ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
+ if (unlikely(!skb)) {
+ /* Free the first SG entry now, since we already
+ * unmapped it and obtained the virtual address
+ */
+ skb_free_frag(sg_vaddr);
+
+ /* We still need to subtract the buffers used
+ * by this FD from our software counter
+ */
+ while (!dpaa2_sg_is_final(&sgt[i]) &&
+ i < DPAA2_ETH_MAX_SG_ENTRIES)
+ i++;
+ break;
+ }
sg_offset = dpaa2_sg_get_offset(sge);
skb_reserve(skb, sg_offset);
@@ -206,6 +218,8 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
break;
}
+ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
/* Count all data buffers + SG table buffer */
ch->buf_count -= i + 2;
@@ -557,10 +571,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
- if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
+ if (unlikely(skb_headroom(skb) < dpaa2_eth_needed_headroom(priv))) {
struct sk_buff *ns;
- ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
+ ns = skb_realloc_headroom(skb, dpaa2_eth_needed_headroom(priv));
if (unlikely(!ns)) {
percpu_stats->tx_dropped++;
goto err_alloc_headroom;
@@ -718,6 +732,23 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ skb_free_frag(vaddr);
+ }
+}
+
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
@@ -727,17 +758,17 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
void *buf;
dma_addr_t addr;
- int i;
+ int i, err;
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
/* Allocate buffer visible to WRIOP + skb shared info +
* alignment padding
*/
- buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
+ buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
if (unlikely(!buf))
goto err_alloc;
- buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
+ buf = PTR_ALIGN(buf, priv->rx_buf_align);
addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
DMA_FROM_DEVICE);
@@ -748,28 +779,33 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
- buf, DPAA2_ETH_BUF_RAW_SIZE,
+ buf, dpaa2_eth_buf_raw_size(priv),
addr, DPAA2_ETH_RX_BUF_SIZE,
bpid);
}
release_bufs:
- /* In case the portal is busy, retry until successful.
- * The buffer release function would only fail if the QBMan portal
- * was busy, which implies portal contention (i.e. more CPUs than
- * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
- * there is little we can realistically do, short of giving up -
- * in which case we'd risk depleting the buffer pool and never again
- * receiving the Rx interrupt which would kick-start the refill logic.
- * So just keep retrying, at the risk of being moved to ksoftirqd.
- */
- while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
+ /* In case the portal is busy, retry until successful */
+ while ((err = dpaa2_io_service_release(NULL, bpid,
+ buf_array, i)) == -EBUSY)
cpu_relax();
+
+ /* If release command failed, clean up and bail out;
+ * not much else we can do about it
+ */
+ if (err) {
+ free_bufs(priv, buf_array, i);
+ return 0;
+ }
+
return i;
err_map:
skb_free_frag(buf);
err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
if (i)
goto release_bufs;
@@ -811,10 +847,8 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
*/
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
- struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
- void *vaddr;
- int ret, i;
+ int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
@@ -823,15 +857,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- for (i = 0; i < ret; i++) {
- /* Same logic as on regular Rx path */
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
- buf_array[i]);
- dma_unmap_single(dev, buf_array[i],
- DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- skb_free_frag(vaddr);
- }
+ free_bufs(priv, buf_array, ret);
} while (ret);
}
@@ -927,13 +953,14 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
break;
}
- if (cleaned < budget) {
- napi_complete_done(napi, cleaned);
+ if (cleaned < budget && napi_complete_done(napi, cleaned)) {
/* Re-enable data available notifications */
do {
err = dpaa2_io_service_rearm(NULL, &ch->nctx);
cpu_relax();
} while (err == -EBUSY);
+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+ ch->nctx.desired_cpu);
}
ch->stats.frames += cleaned;
@@ -1415,34 +1442,32 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_open() failed\n");
- goto err_open;
+ goto free;
}
err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_reset() failed\n");
- goto err_reset;
+ goto close;
}
err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
if (err) {
dev_err(dev, "dpcon_get_attributes() failed\n");
- goto err_get_attr;
+ goto close;
}
err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_enable() failed\n");
- goto err_enable;
+ goto close;
}
return dpcon;
-err_enable:
-err_get_attr:
-err_reset:
+close:
dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-err_open:
+free:
fsl_mc_object_free(dpcon);
return NULL;
@@ -1749,66 +1774,32 @@ static void free_dpbp(struct dpaa2_eth_priv *priv)
fsl_mc_object_free(priv->dpbp_dev);
}
-/* Configure the DPNI object this interface is associated with */
-static int setup_dpni(struct fsl_mc_device *ls_dev)
+static int set_buffer_layout(struct dpaa2_eth_priv *priv)
{
- struct device *dev = &ls_dev->dev;
- struct dpaa2_eth_priv *priv;
- struct net_device *net_dev;
+ struct device *dev = priv->net_dev->dev.parent;
struct dpni_buffer_layout buf_layout = {0};
int err;
- net_dev = dev_get_drvdata(dev);
- priv = netdev_priv(net_dev);
-
- /* get a handle for the DPNI object */
- err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
- if (err) {
- dev_err(dev, "dpni_open() failed\n");
- goto err_open;
- }
-
- ls_dev->mc_io = priv->mc_io;
- ls_dev->mc_handle = priv->mc_token;
-
- err = dpni_reset(priv->mc_io, 0, priv->mc_token);
- if (err) {
- dev_err(dev, "dpni_reset() failed\n");
- goto err_reset;
- }
-
- err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
- &priv->dpni_attrs);
- if (err) {
- dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
- goto err_get_attr;
- }
+ /* We need to check for WRIOP version 1.0.0, but depending on the MC
+ * version, this number is not always provided correctly on rev1.
+ * We need to check for both alternatives in this situation.
+ */
+ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
+ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
+ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+ else
+ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
- /* Configure buffer layouts */
- /* rx buffer */
- buf_layout.pass_parser_result = true;
+ /* tx buffer */
buf_layout.pass_frame_status = true;
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
- buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
- DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
- DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
- DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
- err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, &buf_layout);
- if (err) {
- dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
- goto err_buf_layout;
- }
-
- /* tx buffer */
buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &buf_layout);
if (err) {
dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
- goto err_buf_layout;
+ return err;
}
/* tx-confirm buffer */
@@ -1817,7 +1808,7 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
DPNI_QUEUE_TX_CONFIRM, &buf_layout);
if (err) {
dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
- goto err_buf_layout;
+ return err;
}
/* Now that we've set our tx buffer layout, retrieve the minimum
@@ -1827,24 +1818,76 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
&priv->tx_data_offset);
if (err) {
dev_err(dev, "dpni_get_tx_data_offset() failed\n");
- goto err_data_offset;
+ return err;
}
if ((priv->tx_data_offset % 64) != 0)
dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
priv->tx_data_offset);
- /* Accommodate software annotation space (SWA) */
- priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
+ /* rx buffer */
+ buf_layout.pass_parser_result = true;
+ buf_layout.data_align = priv->rx_buf_align;
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
+ buf_layout.private_data_size = 0;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+ return err;
+ }
return 0;
+}
-err_data_offset:
-err_buf_layout:
-err_get_attr:
-err_reset:
+/* Configure the DPNI object this interface is associated with */
+static int setup_dpni(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_eth_priv *priv;
+ struct net_device *net_dev;
+ int err;
+
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ /* get a handle for the DPNI object */
+ err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_open() failed\n");
+ return err;
+ }
+
+ ls_dev->mc_io = priv->mc_io;
+ ls_dev->mc_handle = priv->mc_token;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_reset() failed\n");
+ goto close;
+ }
+
+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+ &priv->dpni_attrs);
+ if (err) {
+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+ goto close;
+ }
+
+ err = set_buffer_layout(priv);
+ if (err)
+ goto close;
+
+
+ return 0;
+
+close:
dpni_close(priv->mc_io, 0, priv->mc_token);
-err_open:
+
return err;
}
@@ -2085,7 +2128,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
*/
err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
if (err)
- netdev_err(net_dev, "Failed to configure hashing\n");
+ dev_err(dev, "Failed to configure hashing\n");
/* Configure handling of error frames */
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
@@ -2230,6 +2273,7 @@ static int netdev_init(struct net_device *net_dev)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u16 rx_headroom, req_headroom;
u8 bcast_addr[ETH_ALEN];
u8 num_queues;
int err;
@@ -2251,7 +2295,20 @@ static int netdev_init(struct net_device *net_dev)
/* Reserve enough space to align buffer as per hardware requirement;
* NOTE: priv->tx_data_offset MUST be initialized at this point.
*/
- net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
+ net_dev->needed_headroom = dpaa2_eth_needed_headroom(priv);
+
+ /* If headroom guaranteed by hardware in the Rx frame buffer is
+ * smaller than the Tx headroom required by the stack, issue a
+ * one time warning. This will most likely mean skbs forwarded to
+ * another DPAA2 network interface will get reallocated, with a
+ * significant performance impact.
+ */
+ req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
+ rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE +
+ dpaa2_eth_rx_head_room(priv), priv->rx_buf_align);
+ if (req_headroom > rx_headroom)
+ dev_info_once(dev, "Required headroom (%d) greater than available (%d)\n",
+ req_headroom, rx_headroom);
/* Set MTU limits */
net_dev->min_mtu = 68;
@@ -2303,7 +2360,7 @@ static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
{
- u32 status = 0, clear = 0;
+ u32 status = ~0;
struct device *dev = (struct device *)arg;
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
@@ -2313,18 +2370,12 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
DPNI_IRQ_INDEX, &status);
if (unlikely(err)) {
netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
- clear = 0xffffffff;
- goto out;
+ return IRQ_HANDLED;
}
- if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
- clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
link_state_update(netdev_priv(net_dev));
- }
-out:
- dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
- DPNI_IRQ_INDEX, clear);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
index bfbabae1aad8..5b3ab9f62d5e 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -45,6 +45,8 @@
#include "dpaa2-eth-trace.h"
+#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+
#define DPAA2_ETH_STORE_SIZE 16
/* Maximum number of scatter-gather entries in an ingress frame,
@@ -80,23 +82,21 @@
*/
#define DPAA2_ETH_BUFS_PER_CMD 7
-/* Hardware requires alignment for ingress/egress buffer addresses
- * and ingress buffer lengths.
- */
-#define DPAA2_ETH_RX_BUF_SIZE 2048
+/* Hardware requires alignment for ingress/egress buffer addresses */
#define DPAA2_ETH_TX_BUF_ALIGN 64
-#define DPAA2_ETH_RX_BUF_ALIGN 256
-#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
- ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
-/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
- * buffers large enough to allow building an skb around them and also account
- * for alignment restrictions
+#define DPAA2_ETH_RX_BUF_SIZE 2048
+#define DPAA2_ETH_SKB_SIZE \
+ (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* Hardware annotation area in RX buffers */
+#define DPAA2_ETH_RX_HWA_SIZE 64
+
+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
+ * to 256B. For newer revisions, the requirement is only for 64B alignment
*/
-#define DPAA2_ETH_BUF_RAW_SIZE \
- (DPAA2_ETH_RX_BUF_SIZE + \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
- DPAA2_ETH_RX_BUF_ALIGN)
+#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
+#define DPAA2_ETH_RX_BUF_ALIGN 64
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
@@ -134,7 +134,7 @@ struct dpaa2_eth_swa {
DPAA2_FD_CTRL_FAERR)
/* Annotation bits in FD CTRL */
-#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+#define DPAA2_FD_CTRL_ASAL 0x00010000 /* ASAL = 64 */
#define DPAA2_FD_CTRL_PTA 0x00800000
#define DPAA2_FD_CTRL_PTV1 0x00400000
@@ -318,6 +318,7 @@ struct dpaa2_eth_priv {
struct iommu_domain *iommu_domain;
u16 tx_qdid;
+ u16 rx_buf_align;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
* This is the cpu set on which Rx and Tx conf frames are processed
@@ -353,6 +354,29 @@ struct dpaa2_eth_priv {
extern const struct ethtool_ops dpaa2_ethtool_ops;
extern const char dpaa2_eth_drv_version[];
+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
+ * the buffer also needs space for its shared info struct, and we need
+ * to allocate enough to accommodate hardware alignment restrictions
+ */
+static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
+{
+ return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
+}
+
+static inline
+unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv)
+{
+ return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD;
+}
+
+/* Extra headroom space requested to hardware, in order to make sure there's
+ * no realloc'ing in forwarding scenarios
+ */
+static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
+{
+ return dpaa2_eth_needed_headroom(priv) - DPAA2_ETH_RX_HWA_SIZE;
+}
+
static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
{
return priv->dpni_attrs.num_queues;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index 031179ab3a22..ebe8fd6ccf2c 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -76,10 +76,22 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u16 fw_major, fw_minor;
+ int err;
+
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, dpaa2_eth_drv_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+
+ err = dpni_get_api_version(priv->mc_io, 0, &fw_major, &fw_minor);
+ if (!err)
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", fw_major, fw_minor);
+ else
+ strlcpy(drvinfo->fw_version, "N/A",
+ sizeof(drvinfo->fw_version));
+
strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
index 57df22292233..3120e22496d0 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
@@ -538,4 +538,9 @@ struct dpni_rsp_get_taildrop {
__le32 threshold;
};
+struct dpni_rsp_get_api_version {
+ u16 major;
+ u16 minor;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
index 04a5b14bc1c5..e8be76181c36 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
@@ -1594,3 +1594,35 @@ int dpni_get_taildrop(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct dpni_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
index 282e5e85ffa7..ce86a816af45 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
@@ -829,4 +829,9 @@ struct dpni_rule_cfg {
u8 key_size;
};
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
index f8096828f5b7..a609ec82daf3 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
@@ -76,7 +76,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
if (d)
return d;
- if (unlikely(cpu >= num_possible_cpus()))
+ if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
return NULL;
/*
@@ -121,7 +121,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
return NULL;
/* check if CPU is out of range (-1 means any cpu) */
- if (desc->cpu >= num_possible_cpus()) {
+ if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
kfree(obj);
return NULL;
}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 038da4d1ebd0..f74a6f1764bb 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -137,7 +137,7 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
- if (WARN_ON((!chip)))
+ if (WARN_ON(!chip))
return;
/*
diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
index c5646096c5d4..afc2d060d077 100644
--- a/drivers/staging/fsl-mc/include/dpaa2-io.h
+++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
@@ -54,6 +54,8 @@ struct device;
* for dequeue.
*/
+#define DPAA2_IO_ANY_CPU -1
+
/**
* struct dpaa2_io_desc - The DPIO descriptor
* @receives_notifications: Use notificaton mode. Non-zero if the DPIO
@@ -91,8 +93,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
* @cb: The callback to be invoked when the notification arrives
* @is_cdan: Zero for FQDAN, non-zero for CDAN
* @id: FQID or channel ID, needed for rearm
- * @desired_cpu: The cpu on which the notifications will show up. -1 means
- * any CPU.
+ * @desired_cpu: The cpu on which the notifications will show up. Use
+ * DPAA2_IO_ANY_CPU if don't care
* @dpio_id: The dpio index
* @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
* @node: The list node
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 41a49c8194e5..bba7e9c888b3 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -217,13 +217,6 @@ static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
}
}
-static void fwtty_txn_constructor(void *this)
-{
- struct fwtty_transaction *txn = this;
-
- init_timer(&txn->fw_txn.split_timeout_timer);
-}
-
static void fwtty_common_callback(struct fw_card *card, int rcode,
void *payload, size_t len, void *cb_data)
{
@@ -1806,9 +1799,9 @@ static void fwserial_release_port(struct fwtty_port *port, bool reset)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
}
-static void fwserial_plug_timeout(unsigned long data)
+static void fwserial_plug_timeout(struct timer_list *t)
{
- struct fwtty_peer *peer = (struct fwtty_peer *)data;
+ struct fwtty_peer *peer = from_timer(peer, t, timer);
struct fwtty_port *port;
spin_lock_bh(&peer->lock);
@@ -1860,7 +1853,6 @@ static int fwserial_connect_peer(struct fwtty_peer *peer)
fill_plug_req(pkt, peer->port);
- setup_timer(&peer->timer, fwserial_plug_timeout, (unsigned long)peer);
mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT);
spin_unlock_bh(&peer->lock);
@@ -2098,7 +2090,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
spin_lock_init(&peer->lock);
peer->port = NULL;
- init_timer(&peer->timer);
+ timer_setup(&peer->timer, fwserial_plug_timeout, 0);
INIT_WORK(&peer->work, fwserial_peer_workfn);
INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
@@ -2863,7 +2855,7 @@ static int __init fwserial_init(void)
fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache",
sizeof(struct fwtty_transaction),
- 0, 0, fwtty_txn_constructor);
+ 0, 0, NULL);
if (!fwtty_txn_cache) {
err = -ENOMEM;
goto unregister_loop;
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 9ab6ce231f11..0527b0d1c1d0 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -26,6 +26,7 @@
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/uaccess.h>
+#include <linux/errno.h>
#include <net/ndisc.h>
#include "gdm_lte.h"
@@ -118,6 +119,10 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
void *mac_header_data;
u32 mac_header_len;
+ /* Check for skb->len, discard if empty */
+ if (skb_in->len == 0)
+ return -ENODATA;
+
/* Format the mac header so that it can be put to skb */
if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
@@ -241,13 +246,13 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
if (ntohs(vlan_eth.h_vlan_encapsulated_proto) != ETH_P_IPV6)
- return -1;
+ return -EPROTONOSUPPORT;
mac_header_data = &vlan_eth;
mac_header_len = VLAN_ETH_HLEN;
} else {
memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
if (ntohs(eth.h_proto) != ETH_P_IPV6)
- return -1;
+ return -EPROTONOSUPPORT;
mac_header_data = &eth;
mac_header_len = ETH_HLEN;
}
@@ -255,13 +260,13 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
/* Check if this is IPv6 ICMP packet */
ipv6_in = (struct ipv6hdr *)(skb_in->data + mac_header_len);
if (ipv6_in->version != 6 || ipv6_in->nexthdr != IPPROTO_ICMPV6)
- return -1;
+ return -EPROTONOSUPPORT;
/* Check if this is NDP packet */
icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len +
sizeof(struct ipv6hdr));
if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
- return -1;
+ return -EPROTONOSUPPORT;
} else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
/* Check NS */
u8 icmp_na[sizeof(struct icmp6hdr) +
@@ -305,7 +310,7 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out,
(u16 *)icmp_na, sizeof(icmp_na));
} else {
- return -1;
+ return -EINVAL;
}
/* Fill the destination mac with source mac of the received packet */
@@ -412,7 +417,7 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
nic_type = gdm_lte_tx_nic_type(dev, skb);
if (nic_type == 0) {
netdev_err(dev, "tx - invalid nic_type\n");
- return -1;
+ return -EMEDIUMTYPE;
}
if (nic_type & NIC_TYPE_ARP) {
@@ -539,7 +544,7 @@ int gdm_lte_event_init(void)
}
pr_err("event init failed\n");
- return -1;
+ return -ENODATA;
}
void gdm_lte_event_exit(void)
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
index b836f0a20c36..806e75b7f405 100644
--- a/drivers/staging/greybus/Documentation/firmware/authenticate.c
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Sample code to test CAP protocol
*
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
index c73dee9d13c1..31d9c23e2eeb 100644
--- a/drivers/staging/greybus/Documentation/firmware/firmware.c
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Sample code to test firmware-management protocol
*
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 0412f3d06efb..b0c66112eb18 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Arche Platform driver to control APB.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/clk.h>
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 21ac92d0f533..ace4eb365c0e 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/clk.h>
diff --git a/drivers/staging/greybus/arche_platform.h b/drivers/staging/greybus/arche_platform.h
index bcffc69d0960..02056351d25a 100644
--- a/drivers/staging/greybus/arche_platform.h
+++ b/drivers/staging/greybus/arche_platform.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2015-2016 Google Inc.
* Copyright 2015-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __ARCHE_PLATFORM_H
diff --git a/drivers/staging/greybus/arpc.h b/drivers/staging/greybus/arpc.h
index c0b63c0130c5..3534ba1a4e6c 100644
--- a/drivers/staging/greybus/arpc.h
+++ b/drivers/staging/greybus/arpc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
index 1b4252d5d255..7ebb1bde5cb7 100644
--- a/drivers/staging/greybus/audio_apbridgea.c
+++ b/drivers/staging/greybus/audio_apbridgea.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
index b94cb05c89e4..42ac6059bfc7 100644
--- a/drivers/staging/greybus/audio_apbridgea.h
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: BSD-3-Clause
/**
* Copyright (c) 2015-2016 Google Inc.
* All rights reserved.
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index a6d01f0761f3..fdb9e83cc34b 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* APBridge ALSA SoC dummy codec driver
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index 6fb064c69a36..161b37c8ef17 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __LINUX_GBAUDIO_CODEC_H
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
index 7884d8482dc0..8894f1c87d48 100644
--- a/drivers/staging/greybus/audio_gb.c
+++ b/drivers/staging/greybus/audio_gb.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index aa6508b44fab..d44b070d8862 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -1,16 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/idr.h>
#include "audio_manager.h"
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
index 5ab8f5e0ed3f..dcb1a20f5ff1 100644
--- a/drivers/staging/greybus/audio_manager.h
+++ b/drivers/staging/greybus/audio_manager.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_H_
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index adc16977452d..52342e832e3b 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/slab.h>
diff --git a/drivers/staging/greybus/audio_manager_private.h b/drivers/staging/greybus/audio_manager_private.h
index 079ce953c256..9d9b58fc54c4 100644
--- a/drivers/staging/greybus/audio_manager_private.h
+++ b/drivers/staging/greybus/audio_manager_private.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
index 34ebd147052f..283fbed0c8ed 100644
--- a/drivers/staging/greybus/audio_manager_sysfs.c
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/string.h>
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index 094c3be79b33..d065334efa23 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 07fac3948f3a..de4b1b2b12f3 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus audio driver
* Copyright 2015-2016 Google Inc.
* Copyright 2015-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "audio_codec.h"
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index 6c5dcb1c226b..16cc65e1472b 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Component Authentication Protocol (CAP) Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
index 06df0ce03150..e85ffae85dff 100644
--- a/drivers/staging/greybus/bootrom.c
+++ b/drivers/staging/greybus/bootrom.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BOOTROM Greybus driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/firmware.h>
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
index d2ef57d090be..81c018da1248 100644
--- a/drivers/staging/greybus/bundle.c
+++ b/drivers/staging/greybus/bundle.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus bundles
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h
index 0c3491def96c..ffcfd43802cc 100644
--- a/drivers/staging/greybus/bundle.h
+++ b/drivers/staging/greybus/bundle.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus bundles
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __BUNDLE_H
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index a64517eabff4..f13f16b63d7e 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Camera protocol driver.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
index 2cf64640e8ec..2103168b585e 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/staging/greybus/connection.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/workqueue.h>
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
index 4d9f4c64176c..ec3f1d3ef3b9 100644
--- a/drivers/staging/greybus/connection.h
+++ b/drivers/staging/greybus/connection.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __CONNECTION_H
diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c
index 5b30be30a3a4..35f945a12b11 100644
--- a/drivers/staging/greybus/control.c
+++ b/drivers/staging/greybus/control.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus CPort control protocol.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h
index 4dcaec8b9cfe..643ddb9e0f92 100644
--- a/drivers/staging/greybus/control.h
+++ b/drivers/staging/greybus/control.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus CPort control protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __CONTROL_H
diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c
index ba761905b790..dafa430d176e 100644
--- a/drivers/staging/greybus/core.c
+++ b/drivers/staging/greybus/core.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus "Core"
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c
index a9d4d3da99a0..56e20c30feb5 100644
--- a/drivers/staging/greybus/debugfs.c
+++ b/drivers/staging/greybus/debugfs.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus debugfs code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index f7b24e0eaa6f..b082d81833a0 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus "AP" USB driver for "ES2" controller chips
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kthread.h>
#include <linux/sizes.h>
@@ -761,6 +760,7 @@ static int check_urb_status(struct urb *urb)
case -EOVERFLOW:
dev_err(dev, "%s: overflow actual length is %d\n",
__func__, urb->actual_length);
+ /* fall through */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
index f4f0db1cefe8..946221307ef6 100644
--- a/drivers/staging/greybus/firmware.h
+++ b/drivers/staging/greybus/firmware.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Management Header
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __FIRMWARE_H
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
index 454a98957ba5..388866d92f5b 100644
--- a/drivers/staging/greybus/fw-core.c
+++ b/drivers/staging/greybus/fw-core.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Core Bundle Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
index 8a1a413c6cb3..d3b7cccbc10d 100644
--- a/drivers/staging/greybus/fw-download.c
+++ b/drivers/staging/greybus/fw-download.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Download Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/firmware.h>
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index 3cd6cf0a656b..71aec14f8181 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Management Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/cdev.h>
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
index d45dabc5b367..ee293e461fc3 100644
--- a/drivers/staging/greybus/gb-camera.h
+++ b/drivers/staging/greybus/gb-camera.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Camera protocol driver.
*
* Copyright 2015 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef __GB_CAMERA_H
#define __GB_CAMERA_H
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 80c1da8224e6..6cb85c3d3572 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Bridged-Phy Bus driver
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
index 8ee68055ccc4..99463489d7d6 100644
--- a/drivers/staging/greybus/gbphy.h
+++ b/drivers/staging/greybus/gbphy.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Bridged-Phy Bus driver
*
* Copyright 2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef __GBPHY_H
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index ee5f998b174f..b1d4698019a1 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO Greybus driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h
index c9bb93f23927..d03ddb7c9df0 100644
--- a/drivers/staging/greybus/greybus.h
+++ b/drivers/staging/greybus/greybus.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver and device API
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __LINUX_GREYBUS_H
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
index 4784ed98e8a3..03ea9615b217 100644
--- a/drivers/staging/greybus/greybus_authentication.h
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Greybus Component Authentication User Header
*
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
index 277a2acce6fd..b58281a63ba4 100644
--- a/drivers/staging/greybus/greybus_firmware.h
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Greybus Firmware Management User Header
*
diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h
index d135945cefe1..2cec5cf7a846 100644
--- a/drivers/staging/greybus/greybus_manifest.h
+++ b/drivers/staging/greybus/greybus_manifest.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus manifest definition
*
diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h
index b1be0b0af464..9bd7b6dfb476 100644
--- a/drivers/staging/greybus/greybus_protocols.h
+++ b/drivers/staging/greybus/greybus_protocols.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/staging/greybus/greybus_trace.h
index f8feae4dc3b5..7b5e2c6b1f6b 100644
--- a/drivers/staging/greybus/greybus_trace.h
+++ b/drivers/staging/greybus/greybus_trace.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver and device API
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM greybus
diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c
index 185ae3fa10fd..969f86697673 100644
--- a/drivers/staging/greybus/hd.c
+++ b/drivers/staging/greybus/hd.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Host Device
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h
index e7927bb1761c..6cf024a20a58 100644
--- a/drivers/staging/greybus/hd.h
+++ b/drivers/staging/greybus/hd.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Host Device
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __HD_H
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index 465101bbab69..04053ff075a6 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HID class driver for the Greybus.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/bitops.h>
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
index c2a50087000c..58a37deb6579 100644
--- a/drivers/staging/greybus/i2c.c
+++ b/drivers/staging/greybus/i2c.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* I2C bridge driver for the Greybus "generic" I2C module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c
index 71e5cc234e78..d7b5b89a2f40 100644
--- a/drivers/staging/greybus/interface.c
+++ b/drivers/staging/greybus/interface.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus interface code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/delay.h>
diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h
index bd31b8c18d5b..1c00c5bb3ec9 100644
--- a/drivers/staging/greybus/interface.h
+++ b/drivers/staging/greybus/interface.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Interface Block code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __INTERFACE_H
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 3f4148c92308..010ae1e9c7fb 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Lights protocol driver.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
@@ -925,6 +924,8 @@ static void __gb_lights_led_unregister(struct gb_channel *channel)
return;
led_classdev_unregister(cdev);
+ kfree(cdev->name);
+ cdev->name = NULL;
channel->led = NULL;
}
@@ -998,11 +999,7 @@ static int gb_lights_channel_config(struct gb_light *light,
light->has_flash = true;
- ret = gb_lights_channel_flash_config(channel);
- if (ret < 0)
- return ret;
-
- return ret;
+ return gb_lights_channel_flash_config(channel);
}
static int gb_lights_light_config(struct gb_lights *glights, u8 id)
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 5c5bedaf69a6..15a88574dbb0 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver for the log protocol
*
* Copyright 2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 08e255884206..42f6f3de967c 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Loopback bridge driver for the Greybus loopback module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -58,12 +57,7 @@ static struct gb_loopback_device gb_dev;
struct gb_loopback_async_operation {
struct gb_loopback *gb;
struct gb_operation *operation;
- struct timeval ts;
- struct timer_list timer;
- struct list_head entry;
- struct work_struct work;
- struct kref kref;
- bool pending;
+ ktime_t ts;
int (*completion)(struct gb_loopback_async_operation *op_async);
};
@@ -72,7 +66,6 @@ struct gb_loopback {
struct dentry *file;
struct kfifo kfifo_lat;
- struct kfifo kfifo_ts;
struct mutex mutex;
struct task_struct *task;
struct list_head entry;
@@ -82,7 +75,7 @@ struct gb_loopback {
atomic_t outstanding_operations;
/* Per connection stats */
- struct timeval ts;
+ ktime_t ts;
struct gb_loopback_stats latency;
struct gb_loopback_stats throughput;
struct gb_loopback_stats requests_per_second;
@@ -262,7 +255,6 @@ static void gb_loopback_check_attr(struct gb_loopback *gb)
gb->iteration_max, kfifo_depth);
}
kfifo_reset_out(&gb->kfifo_lat);
- kfifo_reset_out(&gb->kfifo_ts);
switch (gb->type) {
case GB_LOOPBACK_TYPE_PING:
@@ -377,21 +369,9 @@ static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
return NSEC_PER_DAY - t2 + t1;
}
-static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
-{
- u64 t1, t2;
-
- t1 = timeval_to_ns(ts);
- t2 = timeval_to_ns(te);
-
- return __gb_loopback_calc_latency(t1, t2);
-}
-
-static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
- struct timeval *ts, struct timeval *te)
+static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
{
- kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
- kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
+ return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
}
static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
@@ -399,10 +379,10 @@ static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
void *response, int response_size)
{
struct gb_operation *operation;
- struct timeval ts, te;
+ ktime_t ts, te;
int ret;
- do_gettimeofday(&ts);
+ ts = ktime_get();
operation = gb_operation_create(gb->connection, type, request_size,
response_size, GFP_KERNEL);
if (!operation)
@@ -430,11 +410,10 @@ static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
}
}
- do_gettimeofday(&te);
+ te = ktime_get();
/* Calculate the total time the message took */
- gb_loopback_push_latency_ts(gb, &ts, &te);
- gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
+ gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
out_put_operation:
gb_operation_put(operation);
@@ -442,56 +421,6 @@ out_put_operation:
return ret;
}
-static void __gb_loopback_async_operation_destroy(struct kref *kref)
-{
- struct gb_loopback_async_operation *op_async;
-
- op_async = container_of(kref, struct gb_loopback_async_operation, kref);
-
- list_del(&op_async->entry);
- if (op_async->operation)
- gb_operation_put(op_async->operation);
- atomic_dec(&op_async->gb->outstanding_operations);
- wake_up(&op_async->gb->wq_completion);
- kfree(op_async);
-}
-
-static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
- *op_async)
-{
- kref_get(&op_async->kref);
-}
-
-static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
- *op_async)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gb_dev.lock, flags);
- kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
- spin_unlock_irqrestore(&gb_dev.lock, flags);
-}
-
-static struct gb_loopback_async_operation *
- gb_loopback_operation_find(u16 id)
-{
- struct gb_loopback_async_operation *op_async;
- bool found = false;
- unsigned long flags;
-
- spin_lock_irqsave(&gb_dev.lock, flags);
- list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
- if (op_async->operation->id == id) {
- gb_loopback_async_operation_get(op_async);
- found = true;
- break;
- }
- }
- spin_unlock_irqrestore(&gb_dev.lock, flags);
-
- return found ? op_async : NULL;
-}
-
static void gb_loopback_async_wait_all(struct gb_loopback *gb)
{
wait_event(gb->wq_completion,
@@ -502,87 +431,42 @@ static void gb_loopback_async_operation_callback(struct gb_operation *operation)
{
struct gb_loopback_async_operation *op_async;
struct gb_loopback *gb;
- struct timeval te;
- bool err = false;
-
- do_gettimeofday(&te);
- op_async = gb_loopback_operation_find(operation->id);
- if (!op_async)
- return;
+ ktime_t te;
+ int result;
+ te = ktime_get();
+ result = gb_operation_result(operation);
+ op_async = gb_operation_get_data(operation);
gb = op_async->gb;
+
mutex_lock(&gb->mutex);
- if (!op_async->pending || gb_operation_result(operation)) {
- err = true;
+ if (!result && op_async->completion)
+ result = op_async->completion(op_async);
+
+ if (!result) {
+ gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
} else {
- if (op_async->completion)
- if (op_async->completion(op_async))
- err = true;
+ gb->error++;
+ if (result == -ETIMEDOUT)
+ gb->requests_timedout++;
}
- if (!err) {
- gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
- gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
- &te);
- }
+ gb->iteration_count++;
+ gb_loopback_calculate_stats(gb, result);
- if (op_async->pending) {
- if (err)
- gb->error++;
- gb->iteration_count++;
- op_async->pending = false;
- del_timer_sync(&op_async->timer);
- gb_loopback_async_operation_put(op_async);
- gb_loopback_calculate_stats(gb, err);
- }
mutex_unlock(&gb->mutex);
dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
operation->id);
- gb_loopback_async_operation_put(op_async);
-}
-
-static void gb_loopback_async_operation_work(struct work_struct *work)
-{
- struct gb_loopback *gb;
- struct gb_operation *operation;
- struct gb_loopback_async_operation *op_async;
-
- op_async = container_of(work, struct gb_loopback_async_operation, work);
- gb = op_async->gb;
- operation = op_async->operation;
-
- mutex_lock(&gb->mutex);
- if (op_async->pending) {
- gb->requests_timedout++;
- gb->error++;
- gb->iteration_count++;
- op_async->pending = false;
- gb_loopback_async_operation_put(op_async);
- gb_loopback_calculate_stats(gb, true);
- }
- mutex_unlock(&gb->mutex);
-
- dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
- operation->id);
-
- gb_operation_cancel(operation, -ETIMEDOUT);
- gb_loopback_async_operation_put(op_async);
-}
-
-static void gb_loopback_async_operation_timeout(unsigned long data)
-{
- struct gb_loopback_async_operation *op_async;
- u16 id = data;
+ /* Wake up waiters */
+ atomic_dec(&op_async->gb->outstanding_operations);
+ wake_up(&gb->wq_completion);
- op_async = gb_loopback_operation_find(id);
- if (!op_async) {
- pr_err("operation %d not found - time out ?\n", id);
- return;
- }
- schedule_work(&op_async->work);
+ /* Release resources */
+ gb_operation_put(operation);
+ kfree(op_async);
}
static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
@@ -593,15 +477,11 @@ static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
struct gb_loopback_async_operation *op_async;
struct gb_operation *operation;
int ret;
- unsigned long flags;
op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
if (!op_async)
return -ENOMEM;
- INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
- kref_init(&op_async->kref);
-
operation = gb_operation_create(gb->connection, type, request_size,
response_size, GFP_KERNEL);
if (!operation) {
@@ -612,35 +492,24 @@ static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
if (request_size)
memcpy(operation->request->payload, request, request_size);
+ gb_operation_set_data(operation, op_async);
+
op_async->gb = gb;
op_async->operation = operation;
op_async->completion = completion;
- spin_lock_irqsave(&gb_dev.lock, flags);
- list_add_tail(&op_async->entry, &gb_dev.list_op_async);
- spin_unlock_irqrestore(&gb_dev.lock, flags);
+ op_async->ts = ktime_get();
- do_gettimeofday(&op_async->ts);
- op_async->pending = true;
atomic_inc(&gb->outstanding_operations);
- mutex_lock(&gb->mutex);
ret = gb_operation_request_send(operation,
gb_loopback_async_operation_callback,
- 0,
+ jiffies_to_msecs(gb->jiffy_timeout),
GFP_KERNEL);
- if (ret)
- goto error;
-
- setup_timer(&op_async->timer, gb_loopback_async_operation_timeout,
- (unsigned long)operation->id);
- op_async->timer.expires = jiffies + gb->jiffy_timeout;
- add_timer(&op_async->timer);
-
- goto done;
-error:
- gb_loopback_async_operation_put(op_async);
-done:
- mutex_unlock(&gb->mutex);
+ if (ret) {
+ atomic_dec(&gb->outstanding_operations);
+ gb_operation_put(operation);
+ kfree(op_async);
+ }
return ret;
}
@@ -854,7 +723,7 @@ static void gb_loopback_reset_stats(struct gb_loopback *gb)
/* Should be initialized at least once per transaction set */
gb->apbridge_latency_ts = 0;
gb->gbphy_latency_ts = 0;
- memset(&gb->ts, 0, sizeof(struct timeval));
+ gb->ts = ktime_set(0, 0);
}
static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
@@ -937,15 +806,15 @@ static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
{
u64 nlat;
u32 lat;
- struct timeval te;
+ ktime_t te;
if (!error) {
gb->requests_completed++;
gb_loopback_calculate_latency_stats(gb);
}
- do_gettimeofday(&te);
- nlat = gb_loopback_calc_latency(&gb->ts, &te);
+ te = ktime_get();
+ nlat = gb_loopback_calc_latency(gb->ts, te);
if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
lat = gb_loopback_nsec_to_usec_latency(nlat);
@@ -1029,9 +898,8 @@ static int gb_loopback_fn(void *data)
size = gb->size;
us_wait = gb->us_wait;
type = gb->type;
- if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0)
- do_gettimeofday(&gb->ts);
- mutex_unlock(&gb->mutex);
+ if (ktime_to_ns(gb->ts) == 0)
+ gb->ts = ktime_get();
/* Else operations to perform */
if (gb->async) {
@@ -1042,8 +910,10 @@ static int gb_loopback_fn(void *data)
else if (type == GB_LOOPBACK_TYPE_SINK)
error = gb_loopback_async_sink(gb, size);
- if (error)
+ if (error) {
gb->error++;
+ gb->iteration_count++;
+ }
} else {
/* We are effectively single threaded here */
if (type == GB_LOOPBACK_TYPE_PING)
@@ -1059,6 +929,7 @@ static int gb_loopback_fn(void *data)
gb_loopback_calculate_stats(gb, !!error);
}
gb->send_count++;
+ mutex_unlock(&gb->mutex);
if (us_wait) {
if (us_wait < 20000)
@@ -1241,18 +1112,12 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
retval = -ENOMEM;
goto out_conn;
}
- if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
- GFP_KERNEL)) {
- retval = -ENOMEM;
- goto out_kfifo0;
- }
-
/* Fork worker thread */
mutex_init(&gb->mutex);
gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
if (IS_ERR(gb->task)) {
retval = PTR_ERR(gb->task);
- goto out_kfifo1;
+ goto out_kfifo;
}
spin_lock_irqsave(&gb_dev.lock, flags);
@@ -1266,9 +1131,7 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
return 0;
-out_kfifo1:
- kfifo_free(&gb->kfifo_ts);
-out_kfifo0:
+out_kfifo:
kfifo_free(&gb->kfifo_lat);
out_conn:
device_unregister(dev);
@@ -1302,7 +1165,6 @@ static void gb_loopback_disconnect(struct gb_bundle *bundle)
kthread_stop(gb->task);
kfifo_free(&gb->kfifo_lat);
- kfifo_free(&gb->kfifo_ts);
gb_connection_latency_tag_disable(gb->connection);
debugfs_remove(gb->file);
diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c
index 7b903770a684..08db49264f2b 100644
--- a/drivers/staging/greybus/manifest.c
+++ b/drivers/staging/greybus/manifest.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus manifest parsing
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h
index d96428407cd7..f3c95a255631 100644
--- a/drivers/staging/greybus/manifest.h
+++ b/drivers/staging/greybus/manifest.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus manifest parsing
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __MANIFEST_H
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
index 660b4674a76f..b785382192de 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/staging/greybus/module.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Module code
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h
index 88a97ce04243..b1ebcc6636db 100644
--- a/drivers/staging/greybus/module.h
+++ b/drivers/staging/greybus/module.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Module code
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __MODULE_H
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
index 3023012808d9..609332b3e15b 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/staging/greybus/operation.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h
index 7529f01b2529..40b7b02fff88 100644
--- a/drivers/staging/greybus/operation.h
+++ b/drivers/staging/greybus/operation.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __OPERATION_H
@@ -105,6 +104,8 @@ struct gb_operation {
int active;
struct list_head links; /* connection->operations */
+
+ void *private;
};
static inline bool
@@ -206,6 +207,17 @@ static inline int gb_operation_unidirectional(struct gb_connection *connection,
request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
}
+static inline void *gb_operation_get_data(struct gb_operation *operation)
+{
+ return operation->private;
+}
+
+static inline void gb_operation_set_data(struct gb_operation *operation,
+ void *data)
+{
+ operation->private = data;
+}
+
int gb_operation_init(void);
void gb_operation_exit(void);
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index 20cac20518d7..0529e5628c24 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Power Supply driver for a Greybus module.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index f0404bc37123..4a6d394b6c44 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PWM Greybus driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index 729d25811568..838acbe84ca0 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver for the Raw protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 101ca5097fc9..38e85033fc4b 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SD/MMC Greybus driver.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
index c893552b5c0b..47d896992b35 100644
--- a/drivers/staging/greybus/spi.c
+++ b/drivers/staging/greybus/spi.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SPI bridge PHY driver.
*
* Copyright 2014-2016 Google Inc.
* Copyright 2014-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/module.h>
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index e97b19148497..2e07c6b41334 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus SPI library
*
* Copyright 2014-2016 Google Inc.
* Copyright 2014-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/bitops.h>
@@ -544,12 +543,15 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
return 0;
-exit_spi_unregister:
- spi_unregister_master(master);
exit_spi_put:
spi_master_put(master);
return ret;
+
+exit_spi_unregister:
+ spi_unregister_master(master);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(gb_spilib_master_init);
@@ -558,7 +560,6 @@ void gb_spilib_master_exit(struct gb_connection *connection)
struct spi_master *master = gb_connection_get_data(connection);
spi_unregister_master(master);
- spi_master_put(master);
}
EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
index cb6092578a92..043d4d32c3ee 100644
--- a/drivers/staging/greybus/spilib.h
+++ b/drivers/staging/greybus/spilib.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus SPI library header
*
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
index 516f827e5ed9..a874fed761a1 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/staging/greybus/svc.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SVC Greybus driver.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h
index 226c2a396fc8..ad01783bac9c 100644
--- a/drivers/staging/greybus/svc.h
+++ b/drivers/staging/greybus/svc.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus SVC code
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __SVC_H
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c
index 779fbea5d4ba..7868ad8211c5 100644
--- a/drivers/staging/greybus/svc_watchdog.c
+++ b/drivers/staging/greybus/svc_watchdog.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SVC Greybus "watchdog" driver.
*
* Copyright 2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/delay.h>
diff --git a/drivers/staging/greybus/tools/lbtest b/drivers/staging/greybus/tools/lbtest
index d7353f1a2a6f..47c481239e98 100755
--- a/drivers/staging/greybus/tools/lbtest
+++ b/drivers/staging/greybus/tools/lbtest
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2015 Google, Inc.
# Copyright (c) 2015 Linaro, Ltd.
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index fbe589fca840..c51610ce24af 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: BSD-3-Clause
/*
* Loopback test application
*
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index c6d01b800d3c..8a006323c3c1 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UART driver for the Greybus "generic" UART module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
- * Released under the GPLv2 only.
- *
* Heavily based on drivers/usb/class/cdc-acm.c and
* drivers/usb/serial/usb-serial.c.
*/
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
index f93a76d02de6..1c246c73a085 100644
--- a/drivers/staging/greybus/usb.c
+++ b/drivers/staging/greybus/usb.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB host driver for the Greybus "generic" USB module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
- *
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
index 5cd8a50d41ad..3e5dedeacd5c 100644
--- a/drivers/staging/greybus/vibrator.c
+++ b/drivers/staging/greybus/vibrator.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Vibrator protocol driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index bcbdc7340b55..fa8b27e091a2 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -106,7 +106,7 @@ static int readmagic_bitstream(u8 *bitdata, int *offset)
read_bitstream(bitdata, buf, offset, 13);
r = memcmp(buf, bits_magic, 13);
if (r) {
- pr_err("error: corrupted header");
+ pr_err("error: corrupted header\n");
return -EINVAL;
}
pr_info("bitstream file magic number Ok\n");
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 54ef0deed28f..ec42544a46aa 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -26,9 +26,6 @@ Then fill in the following:
pointer to a structure with elements that tend to be fixed for
large sets of different parts supported by a given driver.
This contains:
- * info->driver_module:
- Set to THIS_MODULE. Used to ensure correct ownership
- of various resources allocate by the core.
* info->event_attrs:
Attributes used to enable / disable hardware events.
* info->attrs:
diff --git a/drivers/staging/iio/Documentation/trigger.txt b/drivers/staging/iio/Documentation/trigger.txt
index 7c0e505e4f04..299a1add98bf 100644
--- a/drivers/staging/iio/Documentation/trigger.txt
+++ b/drivers/staging/iio/Documentation/trigger.txt
@@ -10,10 +10,6 @@ struct iio_trig *trig = iio_trigger_alloc("<trigger format string>", ...);
allocates a trigger structure. The key elements to then fill in within
a driver are:
-trig->owner
- Typically set to THIS_MODULE. Used to ensure correct
- ownership of core allocated resources.
-
trig->set_trigger_state:
Function that enables / disables the underlying source of the trigger.
diff --git a/drivers/staging/iio/accel/adis16201.c b/drivers/staging/iio/accel/adis16201.c
index fbc240663621..2ebd27536216 100644
--- a/drivers/staging/iio/accel/adis16201.c
+++ b/drivers/staging/iio/accel/adis16201.c
@@ -284,7 +284,6 @@ static const struct iio_info adis16201_info = {
.read_raw = adis16201_read_raw,
.write_raw = adis16201_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16201_status_error_msgs[] = {
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index 4e3fa7592d3f..b3e4571340ab 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -232,7 +232,6 @@ static const struct iio_info adis16203_info = {
.read_raw = adis16203_read_raw,
.write_raw = adis16203_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16203_status_error_msgs[] = {
diff --git a/drivers/staging/iio/accel/adis16209.c b/drivers/staging/iio/accel/adis16209.c
index 8485c024e3f5..7fcef9a2590a 100644
--- a/drivers/staging/iio/accel/adis16209.c
+++ b/drivers/staging/iio/accel/adis16209.c
@@ -285,7 +285,6 @@ static const struct iio_info adis16209_info = {
.read_raw = adis16209_read_raw,
.write_raw = adis16209_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16209_status_error_msgs[] = {
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 109cd94b5ac3..fff6d99089cc 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -361,7 +361,6 @@ static const struct iio_info adis16240_info = {
.read_raw = adis16240_read_raw,
.write_raw = adis16240_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16240_status_error_msgs[] = {
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 6150d2780e22..cadfb96734ed 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -162,6 +162,7 @@ struct ad7192_state {
u32 scale_avail[8][2];
u8 gpocon;
u8 devid;
+ struct mutex lock; /* protect sensor state */
struct ad_sigma_delta sd;
};
@@ -461,10 +462,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
*val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
*val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
case IIO_TEMP:
*val = 0;
@@ -508,6 +509,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
+ mutex_lock(&st->lock);
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
ret = 0;
@@ -521,6 +523,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
ad7192_calibrate_all(st);
break;
}
+ mutex_unlock(&st->lock);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val) {
@@ -567,7 +570,6 @@ static const struct iio_info ad7192_info = {
.write_raw_get_fmt = ad7192_write_raw_get_fmt,
.attrs = &ad7192_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad7195_info = {
@@ -576,7 +578,6 @@ static const struct iio_info ad7195_info = {
.write_raw_get_fmt = ad7192_write_raw_get_fmt,
.attrs = &ad7195_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec ad7192_channels[] = {
@@ -632,6 +633,8 @@ static int ad7192_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+
st->avdd = devm_regulator_get(&spi->dev, "avdd");
if (IS_ERR(st->avdd))
return PTR_ERR(st->avdd);
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index f85dde9805e0..b736275c10f5 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -828,7 +828,6 @@ static const struct iio_info ad7280_info = {
.read_raw = ad7280_read_raw,
.event_attrs = &ad7280_event_attrs_group,
.attrs = &ad7280_attrs_group,
- .driver_module = THIS_MODULE,
};
static const struct ad7280_platform_data ad7793_default_pdata = {
diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c
index 18f5f139117e..25b9fcd5e3a4 100644
--- a/drivers/staging/iio/adc/ad7606.c
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -373,26 +373,22 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
};
static const struct iio_info ad7606_info_no_os_or_range = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
};
static const struct iio_info ad7606_info_os_and_range = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os_and_range,
};
static const struct iio_info ad7606_info_os = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os,
};
static const struct iio_info ad7606_info_range = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_range,
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index dec3ba6eba8a..a7797af579b9 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -155,7 +155,6 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
static const struct iio_info ad7780_info = {
.read_raw = ad7780_read_raw,
- .driver_module = THIS_MODULE,
};
static int ad7780_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 17d280581e24..bfe180a475ee 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -335,7 +335,6 @@ static const struct attribute_group ad7816_event_attribute_group = {
static const struct iio_info ad7816_info = {
.attrs = &ad7816_attribute_group,
.event_attrs = &ad7816_event_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index b2bce26499f5..2d33632c00e8 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2087,13 +2087,11 @@ EXPORT_SYMBOL_GPL(adt7316_pm_ops);
static const struct iio_info adt7316_info = {
.attrs = &adt7316_attribute_group,
.event_attrs = &adt7316_event_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info adt7516_info = {
.attrs = &adt7516_attribute_group,
.event_attrs = &adt7516_event_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index a6f249e9c1e1..2fe916c48848 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -569,7 +569,6 @@ static const struct attribute_group ad7150_event_attribute_group = {
static const struct iio_info ad7150_info = {
.event_attrs = &ad7150_event_attribute_group,
- .driver_module = THIS_MODULE,
.read_raw = &ad7150_read_raw,
.read_event_config = &ad7150_read_event_config,
.write_event_config = &ad7150_write_event_config,
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index ff10d1f0a7e4..61377ca444de 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -441,7 +441,6 @@ static const struct iio_info ad7152_info = {
.read_raw = ad7152_read_raw,
.write_raw = ad7152_write_raw,
.write_raw_get_fmt = ad7152_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec ad7152_channels[] = {
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index cdcb4fccf3fe..a124853a05f0 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -667,7 +667,6 @@ static const struct iio_info ad7746_info = {
.attrs = &ad7746_attribute_group,
.read_raw = ad7746_read_raw,
.write_raw = ad7746_write_raw,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 6da46ede7ee0..c73eff1f8d73 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -290,7 +290,6 @@ static const struct attribute_group ad9832_attribute_group = {
static const struct iio_info ad9832_info = {
.attrs = &ad9832_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9832_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 995acdd7c942..4c6d4043903e 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -381,12 +381,10 @@ static const struct attribute_group ad9833_attribute_group = {
static const struct iio_info ad9834_info = {
.attrs = &ad9834_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad9833_info = {
.attrs = &ad9833_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9834_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index 967524583d8a..4e7630caf7d3 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -103,7 +103,6 @@ static int adis16060_read_raw(struct iio_dev *indio_dev,
static const struct iio_info adis16060_info = {
.read_raw = adis16060_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec adis16060_channels[] = {
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3d539eeb0e26..2b28fb9c0048 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -575,7 +575,6 @@ out:
static const struct iio_info ad5933_info = {
.read_raw = ad5933_read_raw,
.attrs = &ad5933_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad5933_ring_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/light/tsl2x7x.c b/drivers/staging/iio/light/tsl2x7x.c
index 786e93f16ce9..42ed9c015aaf 100644
--- a/drivers/staging/iio/light/tsl2x7x.c
+++ b/drivers/staging/iio/light/tsl2x7x.c
@@ -15,112 +15,112 @@
* more details.
*/
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include "tsl2x7x.h"
/* Cal defs*/
-#define PROX_STAT_CAL 0
-#define PROX_STAT_SAMP 1
-#define MAX_SAMPLES_CAL 200
+#define PROX_STAT_CAL 0
+#define PROX_STAT_SAMP 1
+#define MAX_SAMPLES_CAL 200
/* TSL2X7X Device ID */
-#define TRITON_ID 0x00
-#define SWORDFISH_ID 0x30
-#define HALIBUT_ID 0x20
+#define TRITON_ID 0x00
+#define SWORDFISH_ID 0x30
+#define HALIBUT_ID 0x20
/* Lux calculation constants */
-#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
+#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
/* TAOS Register definitions - note:
* depending on device, some of these register are not used and the
* register address is benign.
*/
/* 2X7X register offsets */
-#define TSL2X7X_MAX_CONFIG_REG 16
+#define TSL2X7X_MAX_CONFIG_REG 16
/* Device Registers and Masks */
-#define TSL2X7X_CNTRL 0x00
-#define TSL2X7X_ALS_TIME 0X01
-#define TSL2X7X_PRX_TIME 0x02
-#define TSL2X7X_WAIT_TIME 0x03
-#define TSL2X7X_ALS_MINTHRESHLO 0X04
-#define TSL2X7X_ALS_MINTHRESHHI 0X05
-#define TSL2X7X_ALS_MAXTHRESHLO 0X06
-#define TSL2X7X_ALS_MAXTHRESHHI 0X07
-#define TSL2X7X_PRX_MINTHRESHLO 0X08
-#define TSL2X7X_PRX_MINTHRESHHI 0X09
-#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
-#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
-#define TSL2X7X_PERSISTENCE 0x0C
-#define TSL2X7X_PRX_CONFIG 0x0D
-#define TSL2X7X_PRX_COUNT 0x0E
-#define TSL2X7X_GAIN 0x0F
-#define TSL2X7X_NOTUSED 0x10
-#define TSL2X7X_REVID 0x11
-#define TSL2X7X_CHIPID 0x12
-#define TSL2X7X_STATUS 0x13
-#define TSL2X7X_ALS_CHAN0LO 0x14
-#define TSL2X7X_ALS_CHAN0HI 0x15
-#define TSL2X7X_ALS_CHAN1LO 0x16
-#define TSL2X7X_ALS_CHAN1HI 0x17
-#define TSL2X7X_PRX_LO 0x18
-#define TSL2X7X_PRX_HI 0x19
+#define TSL2X7X_CNTRL 0x00
+#define TSL2X7X_ALS_TIME 0X01
+#define TSL2X7X_PRX_TIME 0x02
+#define TSL2X7X_WAIT_TIME 0x03
+#define TSL2X7X_ALS_MINTHRESHLO 0X04
+#define TSL2X7X_ALS_MINTHRESHHI 0X05
+#define TSL2X7X_ALS_MAXTHRESHLO 0X06
+#define TSL2X7X_ALS_MAXTHRESHHI 0X07
+#define TSL2X7X_PRX_MINTHRESHLO 0X08
+#define TSL2X7X_PRX_MINTHRESHHI 0X09
+#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
+#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
+#define TSL2X7X_PERSISTENCE 0x0C
+#define TSL2X7X_PRX_CONFIG 0x0D
+#define TSL2X7X_PRX_COUNT 0x0E
+#define TSL2X7X_GAIN 0x0F
+#define TSL2X7X_NOTUSED 0x10
+#define TSL2X7X_REVID 0x11
+#define TSL2X7X_CHIPID 0x12
+#define TSL2X7X_STATUS 0x13
+#define TSL2X7X_ALS_CHAN0LO 0x14
+#define TSL2X7X_ALS_CHAN0HI 0x15
+#define TSL2X7X_ALS_CHAN1LO 0x16
+#define TSL2X7X_ALS_CHAN1HI 0x17
+#define TSL2X7X_PRX_LO 0x18
+#define TSL2X7X_PRX_HI 0x19
/* tsl2X7X cmd reg masks */
-#define TSL2X7X_CMD_REG 0x80
-#define TSL2X7X_CMD_SPL_FN 0x60
+#define TSL2X7X_CMD_REG 0x80
+#define TSL2X7X_CMD_SPL_FN 0x60
-#define TSL2X7X_CMD_PROX_INT_CLR 0X05
-#define TSL2X7X_CMD_ALS_INT_CLR 0x06
-#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
+#define TSL2X7X_CMD_PROX_INT_CLR 0X05
+#define TSL2X7X_CMD_ALS_INT_CLR 0x06
+#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_ADC_ENBL 0x02
-#define TSL2X7X_CNTL_PWR_ON 0x01
+#define TSL2X7X_CNTL_ADC_ENBL 0x02
+#define TSL2X7X_CNTL_PWR_ON 0x01
/* tsl2X7X status reg masks */
-#define TSL2X7X_STA_ADC_VALID 0x01
-#define TSL2X7X_STA_PRX_VALID 0x02
-#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID |\
- TSL2X7X_STA_PRX_VALID)
-#define TSL2X7X_STA_ALS_INTR 0x10
-#define TSL2X7X_STA_PRX_INTR 0x20
+#define TSL2X7X_STA_ADC_VALID 0x01
+#define TSL2X7X_STA_PRX_VALID 0x02
+#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID | \
+ TSL2X7X_STA_PRX_VALID)
+#define TSL2X7X_STA_ALS_INTR 0x10
+#define TSL2X7X_STA_PRX_INTR 0x20
/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_REG_CLEAR 0x00
-#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
-#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
-#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
-#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
-#define TSL2X7X_CNTL_PWRON 0x01
-#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
-#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
-#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
-#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
+#define TSL2X7X_CNTL_REG_CLEAR 0x00
+#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
+#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
+#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
+#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
+#define TSL2X7X_CNTL_PWRON 0x01
+#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
+#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
+#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
+#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
/*Prox diode to use */
-#define TSL2X7X_DIODE0 0x10
-#define TSL2X7X_DIODE1 0x20
-#define TSL2X7X_DIODE_BOTH 0x30
+#define TSL2X7X_DIODE0 0x10
+#define TSL2X7X_DIODE1 0x20
+#define TSL2X7X_DIODE_BOTH 0x30
/* LED Power */
-#define TSL2X7X_mA100 0x00
-#define TSL2X7X_mA50 0x40
-#define TSL2X7X_mA25 0x80
-#define TSL2X7X_mA13 0xD0
-#define TSL2X7X_MAX_TIMER_CNT (0xFF)
+#define TSL2X7X_100_mA 0x00
+#define TSL2X7X_50_mA 0x40
+#define TSL2X7X_25_mA 0x80
+#define TSL2X7X_13_mA 0xD0
+#define TSL2X7X_MAX_TIMER_CNT 0xFF
-#define TSL2X7X_MIN_ITIME 3
+#define TSL2X7X_MIN_ITIME 3
/* TAOS txx2x7x Device family members */
enum {
@@ -142,11 +142,6 @@ enum {
TSL2X7X_CHIP_SUSPENDED = 2
};
-struct tsl2x7x_parse_result {
- int integer;
- int fract;
-};
-
/* Per-device data */
struct tsl2x7x_als_info {
u16 als_ch0;
@@ -174,7 +169,7 @@ struct tsl2X7X_chip {
struct i2c_client *client;
u16 prox_data;
struct tsl2x7x_als_info als_cur_info;
- struct tsl2x7x_settings tsl2x7x_settings;
+ struct tsl2x7x_settings settings;
struct tsl2X7X_platform_data *pdata;
int als_time_scale;
int als_saturation;
@@ -192,25 +187,25 @@ struct tsl2X7X_chip {
};
/* Different devices require different coefficents */
-static const struct tsl2x7x_lux tsl2x71_lux_table[] = {
+static const struct tsl2x7x_lux tsl2x71_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 14461, 611, 1211 },
{ 18540, 352, 623 },
{ 0, 0, 0 },
};
-static const struct tsl2x7x_lux tmd2x71_lux_table[] = {
+static const struct tsl2x7x_lux tmd2x71_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 11635, 115, 256 },
{ 15536, 87, 179 },
{ 0, 0, 0 },
};
-static const struct tsl2x7x_lux tsl2x72_lux_table[] = {
+static const struct tsl2x7x_lux tsl2x72_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 14013, 466, 917 },
{ 18222, 310, 552 },
{ 0, 0, 0 },
};
-static const struct tsl2x7x_lux tmd2x72_lux_table[] = {
+static const struct tsl2x7x_lux tmd2x72_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 13218, 130, 262 },
{ 17592, 92, 169 },
{ 0, 0, 0 },
@@ -248,14 +243,14 @@ static const struct tsl2x7x_settings tsl2x7x_default_settings = {
.prox_pulse_count = 8
};
-static const s16 tsl2X7X_als_gainadj[] = {
+static const s16 tsl2x7x_als_gain[] = {
1,
8,
16,
120
};
-static const s16 tsl2X7X_prx_gainadj[] = {
+static const s16 tsl2x7x_prx_gain[] = {
1,
2,
4,
@@ -353,9 +348,9 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* clear any existing interrupt status */
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_REG |
TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_ALS_INT_CLR));
+ TSL2X7X_CMD_ALS_INT_CLR);
if (ret < 0) {
dev_err(&chip->client->dev,
"i2c_write_command failed - err = %d\n", ret);
@@ -369,7 +364,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
chip->als_cur_info.als_ch0 = ch0;
chip->als_cur_info.als_ch1 = ch1;
- if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation)) {
+ if (ch0 >= chip->als_saturation || ch1 >= chip->als_saturation) {
lux = TSL2X7X_LUX_CALC_OVER_FLOW;
goto return_max;
}
@@ -389,11 +384,10 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
if (p->ratio == 0) {
lux = 0;
} else {
- ch0lux = DIV_ROUND_UP(ch0 * p->ch0,
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
- ch1lux = DIV_ROUND_UP(ch1 * p->ch1,
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
- lux = ch0lux - ch1lux;
+ lux = DIV_ROUND_UP(ch0 * p->ch0,
+ tsl2x7x_als_gain[chip->settings.als_gain]) -
+ DIV_ROUND_UP(ch1 * p->ch1,
+ tsl2x7x_als_gain[chip->settings.als_gain]);
}
/* note: lux is 31 bit max at this point */
@@ -419,7 +413,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
*/
lux64 = lux;
- lux64 = lux64 * chip->tsl2x7x_settings.als_gain_trim;
+ lux64 = lux64 * chip->settings.als_gain_trim;
lux64 >>= 8;
lux = lux64;
lux = (lux + 500) / 1000;
@@ -472,7 +466,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
case tmd2771:
if (!(ret & TSL2X7X_STA_ADC_VALID))
goto prox_poll_err;
- break;
+ break;
case tsl2572:
case tsl2672:
case tmd2672:
@@ -480,7 +474,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
case tmd2772:
if (!(ret & TSL2X7X_STA_PRX_VALID))
goto prox_poll_err;
- break;
+ break;
}
for (i = 0; i < 2; i++) {
@@ -514,12 +508,10 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
{
/* If Operational settings defined elsewhere.. */
if (chip->pdata && chip->pdata->platform_default_settings)
- memcpy(&chip->tsl2x7x_settings,
- chip->pdata->platform_default_settings,
+ memcpy(&chip->settings, chip->pdata->platform_default_settings,
sizeof(tsl2x7x_default_settings));
else
- memcpy(&chip->tsl2x7x_settings,
- &tsl2x7x_default_settings,
+ memcpy(&chip->settings, &tsl2x7x_default_settings,
sizeof(tsl2x7x_default_settings));
/* Load up the proper lux table. */
@@ -529,8 +521,8 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
sizeof(chip->pdata->platform_lux_table));
else
memcpy(chip->tsl2x7x_device_lux,
- (struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
- MAX_DEFAULT_TABLE_BYTES);
+ tsl2x7x_default_lux_table_group[chip->id],
+ TSL2X7X_DEFAULT_TABLE_BYTES);
}
/**
@@ -542,9 +534,7 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int gain_trim_val;
- int ret;
- int lux_val;
+ int ret, lux_val;
ret = i2c_smbus_read_byte_data(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_CNTRL);
@@ -575,16 +565,16 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
return lux_val;
}
- gain_trim_val = ((chip->tsl2x7x_settings.als_cal_target)
- * chip->tsl2x7x_settings.als_gain_trim) / lux_val;
- if ((gain_trim_val < 250) || (gain_trim_val > 4000))
+ ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
+ lux_val;
+ if (ret < 250 || ret > 4000)
return -ERANGE;
- chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
+ chip->settings.als_gain_trim = ret;
dev_info(&chip->client->dev,
"%s als_calibrate completed\n", chip->client->name);
- return (int)gain_trim_val;
+ return ret;
}
static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
@@ -602,34 +592,30 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->pdata->power_on(indio_dev);
/* Non calculated parameters */
- chip->tsl2x7x_config[TSL2X7X_PRX_TIME] =
- chip->tsl2x7x_settings.prx_time;
- chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] =
- chip->tsl2x7x_settings.wait_time;
- chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] =
- chip->tsl2x7x_settings.prox_config;
+ chip->tsl2x7x_config[TSL2X7X_PRX_TIME] = chip->settings.prx_time;
+ chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] = chip->settings.wait_time;
+ chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] = chip->settings.prox_config;
chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHLO] =
- (chip->tsl2x7x_settings.als_thresh_low) & 0xFF;
+ (chip->settings.als_thresh_low) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHHI] =
- (chip->tsl2x7x_settings.als_thresh_low >> 8) & 0xFF;
+ (chip->settings.als_thresh_low >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHLO] =
- (chip->tsl2x7x_settings.als_thresh_high) & 0xFF;
+ (chip->settings.als_thresh_high) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHHI] =
- (chip->tsl2x7x_settings.als_thresh_high >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] =
- chip->tsl2x7x_settings.persistence;
+ (chip->settings.als_thresh_high >> 8) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] = chip->settings.persistence;
chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
- chip->tsl2x7x_settings.prox_pulse_count;
+ chip->settings.prox_pulse_count;
chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
- (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
+ (chip->settings.prox_thres_low) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
- (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
+ (chip->settings.prox_thres_low >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
- (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
+ (chip->settings.prox_thres_high) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
- (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
+ (chip->settings.prox_thres_high >> 8) & 0xFF;
/* and make sure we're not already on */
if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
@@ -639,7 +625,7 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
}
/* determine als integration register */
- als_count = (chip->tsl2x7x_settings.als_time * 100 + 135) / 270;
+ als_count = (chip->settings.als_time * 100 + 135) / 270;
if (!als_count)
als_count = 1; /* ensure at least one cycle */
@@ -649,9 +635,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
/* Set the gain based on tsl2x7x_settings struct */
chip->tsl2x7x_config[TSL2X7X_GAIN] =
- chip->tsl2x7x_settings.als_gain |
- (TSL2X7X_mA100 | TSL2X7X_DIODE1)
- | ((chip->tsl2x7x_settings.prox_gain) << 2);
+ chip->settings.als_gain |
+ (TSL2X7X_100_mA | TSL2X7X_DIODE1) |
+ (chip->settings.prox_gain << 2);
/* set chip struct re scaling and saturation */
chip->als_saturation = als_count * 922; /* 90% of full scale */
@@ -706,18 +692,18 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_WORKING;
- if (chip->tsl2x7x_settings.interrupts_en != 0) {
+ if (chip->settings.interrupts_en != 0) {
dev_info(&chip->client->dev, "Setting Up Interrupt(s)\n");
reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
- if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
- (chip->tsl2x7x_settings.interrupts_en == 0x30))
+ if (chip->settings.interrupts_en == 0x20 ||
+ chip->settings.interrupts_en == 0x30)
reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
- reg_val |= chip->tsl2x7x_settings.interrupts_en;
+ reg_val |= chip->settings.interrupts_en;
ret = i2c_smbus_write_byte_data(chip->client,
- (TSL2X7X_CMD_REG |
- TSL2X7X_CNTRL), reg_val);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL,
+ reg_val);
if (ret < 0)
dev_err(&chip->client->dev,
"%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
@@ -766,8 +752,7 @@ static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
* put device back into proper state, and unlock
* resource.
*/
-static
-int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
+static int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int device_status = chip->tsl2x7x_chip_status;
@@ -791,9 +776,8 @@ unlock:
return ret;
}
-static
-void tsl2x7x_prox_calculate(int *data, int length,
- struct tsl2x7x_prox_stat *statP)
+static void tsl2x7x_prox_calculate(int *data, int length,
+ struct tsl2x7x_prox_stat *statP)
{
int i;
int sample_sum;
@@ -837,25 +821,25 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
u8 tmp_irq_settings;
u8 current_state = chip->tsl2x7x_chip_status;
- if (chip->tsl2x7x_settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
+ if (chip->settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
dev_err(&chip->client->dev,
"max prox samples cal is too big: %d\n",
- chip->tsl2x7x_settings.prox_max_samples_cal);
- chip->tsl2x7x_settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
+ chip->settings.prox_max_samples_cal);
+ chip->settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
}
/* have to stop to change settings */
tsl2x7x_chip_off(indio_dev);
/* Enable proximity detection save just in case prox not wanted yet*/
- tmp_irq_settings = chip->tsl2x7x_settings.interrupts_en;
- chip->tsl2x7x_settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
+ tmp_irq_settings = chip->settings.interrupts_en;
+ chip->settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
/*turn on device if not already on*/
tsl2x7x_chip_on(indio_dev);
/*gather the samples*/
- for (i = 0; i < chip->tsl2x7x_settings.prox_max_samples_cal; i++) {
+ for (i = 0; i < chip->settings.prox_max_samples_cal; i++) {
usleep_range(15000, 17500);
tsl2x7x_get_prox(indio_dev);
prox_history[i] = chip->prox_data;
@@ -866,25 +850,25 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
tsl2x7x_chip_off(indio_dev);
calP = &prox_stat_data[PROX_STAT_CAL];
tsl2x7x_prox_calculate(prox_history,
- chip->tsl2x7x_settings.prox_max_samples_cal,
- calP);
- chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
+ chip->settings.prox_max_samples_cal, calP);
+ chip->settings.prox_thres_high = (calP->max << 1) - calP->mean;
dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
calP->min, calP->mean, calP->max);
dev_info(&chip->client->dev,
"%s proximity threshold set to %d\n",
- chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
+ chip->client->name, chip->settings.prox_thres_high);
/* back to the way they were */
- chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
+ chip->settings.interrupts_en = tmp_irq_settings;
if (current_state == TSL2X7X_CHIP_WORKING)
tsl2x7x_chip_on(indio_dev);
}
-static ssize_t in_illuminance0_calibscale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+in_illuminance0_calibscale_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -900,69 +884,23 @@ static ssize_t in_illuminance0_calibscale_available_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
}
-static ssize_t in_proximity0_calibscale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
-}
-
-static ssize_t in_illuminance0_integration_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- y /= 1000;
- z %= 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t in_illuminance0_integration_time_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- result.fract /= 3;
- chip->tsl2x7x_settings.als_time =
- TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
-
- dev_info(&chip->client->dev, "%s: als time = %d",
- __func__, chip->tsl2x7x_settings.als_time);
-
- tsl2x7x_invoke_change(indio_dev);
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
+static IIO_CONST_ATTR(in_proximity0_calibscale_available, "1 2 4 8");
static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
".00272 - .696");
static ssize_t in_illuminance0_target_input_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n",
- chip->tsl2x7x_settings.als_cal_target);
+ return snprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
}
static ssize_t in_illuminance0_target_input_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -973,7 +911,7 @@ static ssize_t in_illuminance0_target_input_store(struct device *dev,
return -EINVAL;
if (value)
- chip->tsl2x7x_settings.als_cal_target = value;
+ chip->settings.als_cal_target = value;
ret = tsl2x7x_invoke_change(indio_dev);
if (ret < 0)
@@ -982,111 +920,9 @@ static ssize_t in_illuminance0_target_input_store(struct device *dev,
return len;
}
-/* persistence settings */
-static ssize_t in_intensity0_thresh_period_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z, filter_delay;
-
- /* Determine integration time */
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- filter_delay = z * (chip->tsl2x7x_settings.persistence & 0x0F);
- y = filter_delay / 1000;
- z = filter_delay % 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t in_intensity0_thresh_period_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int y, z, filter_delay;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
-
- filter_delay =
- DIV_ROUND_UP((result.integer * 1000) + result.fract, z);
-
- chip->tsl2x7x_settings.persistence &= 0xF0;
- chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
-
- dev_info(&chip->client->dev, "%s: als persistence = %d",
- __func__, filter_delay);
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
-static ssize_t in_proximity0_thresh_period_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z, filter_delay;
-
- /* Determine integration time */
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- filter_delay = z * ((chip->tsl2x7x_settings.persistence & 0xF0) >> 4);
- y = filter_delay / 1000;
- z = filter_delay % 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t in_proximity0_thresh_period_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int y, z, filter_delay;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
-
- filter_delay =
- DIV_ROUND_UP((result.integer * 1000) + result.fract, z);
-
- chip->tsl2x7x_settings.persistence &= 0x0F;
- chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
-
- dev_info(&chip->client->dev, "%s: prox persistence = %d",
- __func__, filter_delay);
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
static ssize_t in_illuminance0_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1106,14 +942,14 @@ static ssize_t in_illuminance0_calibrate_store(struct device *dev,
}
static ssize_t in_illuminance0_lux_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int i = 0;
int offset = 0;
- while (i < (TSL2X7X_MAX_LUX_TABLE_SIZE * 3)) {
+ while (i < TSL2X7X_MAX_LUX_TABLE_SIZE) {
offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,%u,",
chip->tsl2x7x_device_lux[i].ratio,
chip->tsl2x7x_device_lux[i].ch0,
@@ -1134,8 +970,8 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
}
static ssize_t in_illuminance0_lux_table_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1176,8 +1012,8 @@ static ssize_t in_illuminance0_lux_table_store(struct device *dev,
}
static ssize_t in_proximity0_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1205,9 +1041,9 @@ static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
int ret;
if (chan->type == IIO_INTENSITY)
- ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x10);
+ ret = !!(chip->settings.interrupts_en & 0x10);
else
- ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x20);
+ ret = !!(chip->settings.interrupts_en & 0x20);
return ret;
}
@@ -1223,14 +1059,14 @@ static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
if (chan->type == IIO_INTENSITY) {
if (val)
- chip->tsl2x7x_settings.interrupts_en |= 0x10;
+ chip->settings.interrupts_en |= 0x10;
else
- chip->tsl2x7x_settings.interrupts_en &= 0x20;
+ chip->settings.interrupts_en &= 0x20;
} else {
if (val)
- chip->tsl2x7x_settings.interrupts_en |= 0x20;
+ chip->settings.interrupts_en |= 0x20;
else
- chip->tsl2x7x_settings.interrupts_en &= 0x10;
+ chip->settings.interrupts_en &= 0x10;
}
ret = tsl2x7x_invoke_change(indio_dev);
@@ -1248,18 +1084,19 @@ static int tsl2x7x_write_event_value(struct iio_dev *indio_dev,
int val, int val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret = -EINVAL, y, z, filter_delay;
+ u8 time;
switch (info) {
case IIO_EV_INFO_VALUE:
if (chan->type == IIO_INTENSITY) {
switch (dir) {
case IIO_EV_DIR_RISING:
- chip->tsl2x7x_settings.als_thresh_high = val;
+ chip->settings.als_thresh_high = val;
ret = 0;
break;
case IIO_EV_DIR_FALLING:
- chip->tsl2x7x_settings.als_thresh_low = val;
+ chip->settings.als_thresh_low = val;
ret = 0;
break;
default:
@@ -1268,11 +1105,11 @@ static int tsl2x7x_write_event_value(struct iio_dev *indio_dev,
} else {
switch (dir) {
case IIO_EV_DIR_RISING:
- chip->tsl2x7x_settings.prox_thres_high = val;
+ chip->settings.prox_thres_high = val;
ret = 0;
break;
case IIO_EV_DIR_FALLING:
- chip->tsl2x7x_settings.prox_thres_low = val;
+ chip->settings.prox_thres_low = val;
ret = 0;
break;
default:
@@ -1280,6 +1117,33 @@ static int tsl2x7x_write_event_value(struct iio_dev *indio_dev,
}
}
break;
+ case IIO_EV_INFO_PERIOD:
+ if (chan->type == IIO_INTENSITY)
+ time = chip->settings.als_time;
+ else
+ time = chip->settings.prx_time;
+
+ y = (TSL2X7X_MAX_TIMER_CNT - time) + 1;
+ z = y * TSL2X7X_MIN_ITIME;
+
+ filter_delay = DIV_ROUND_UP((val * 1000) + val2, z);
+
+ if (chan->type == IIO_INTENSITY) {
+ chip->settings.persistence &= 0xF0;
+ chip->settings.persistence |=
+ (filter_delay & 0x0F);
+ dev_info(&chip->client->dev, "%s: ALS persistence = %d",
+ __func__, filter_delay);
+ } else {
+ chip->settings.persistence &= 0x0F;
+ chip->settings.persistence |=
+ ((filter_delay << 4) & 0xF0);
+ dev_info(&chip->client->dev,
+ "%s: Proximity persistence = %d",
+ __func__, filter_delay);
+ }
+ ret = 0;
+ break;
default:
break;
}
@@ -1298,18 +1162,19 @@ static int tsl2x7x_read_event_value(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret = -EINVAL, filter_delay, mult;
+ u8 time;
switch (info) {
case IIO_EV_INFO_VALUE:
if (chan->type == IIO_INTENSITY) {
switch (dir) {
case IIO_EV_DIR_RISING:
- *val = chip->tsl2x7x_settings.als_thresh_high;
+ *val = chip->settings.als_thresh_high;
ret = IIO_VAL_INT;
break;
case IIO_EV_DIR_FALLING:
- *val = chip->tsl2x7x_settings.als_thresh_low;
+ *val = chip->settings.als_thresh_low;
ret = IIO_VAL_INT;
break;
default:
@@ -1318,11 +1183,11 @@ static int tsl2x7x_read_event_value(struct iio_dev *indio_dev,
} else {
switch (dir) {
case IIO_EV_DIR_RISING:
- *val = chip->tsl2x7x_settings.prox_thres_high;
+ *val = chip->settings.prox_thres_high;
ret = IIO_VAL_INT;
break;
case IIO_EV_DIR_FALLING:
- *val = chip->tsl2x7x_settings.prox_thres_low;
+ *val = chip->settings.prox_thres_low;
ret = IIO_VAL_INT;
break;
default:
@@ -1330,6 +1195,23 @@ static int tsl2x7x_read_event_value(struct iio_dev *indio_dev,
}
}
break;
+ case IIO_EV_INFO_PERIOD:
+ if (chan->type == IIO_INTENSITY) {
+ time = chip->settings.als_time;
+ mult = chip->settings.persistence & 0x0F;
+ } else {
+ time = chip->settings.prx_time;
+ mult = (chip->settings.persistence & 0xF0) >> 4;
+ }
+
+ /* Determine integration time */
+ *val = (TSL2X7X_MAX_TIMER_CNT - time) + 1;
+ *val2 = *val * TSL2X7X_MIN_ITIME;
+ filter_delay = *val2 * mult;
+ *val = filter_delay / 1000;
+ *val2 = filter_delay % 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
default:
break;
}
@@ -1379,18 +1261,20 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT)
- *val =
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain];
+ *val = tsl2x7x_als_gain[chip->settings.als_gain];
else
- *val =
- tsl2X7X_prx_gainadj[chip->tsl2x7x_settings.prox_gain];
+ *val = tsl2x7x_prx_gain[chip->settings.prox_gain];
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_CALIBBIAS:
- *val = chip->tsl2x7x_settings.als_gain_trim;
+ *val = chip->settings.als_gain_trim;
ret = IIO_VAL_INT;
break;
-
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = (TSL2X7X_MAX_TIMER_CNT - chip->settings.als_time) + 1;
+ *val2 = ((*val * TSL2X7X_MIN_ITIME) % 1000) / 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
default:
ret = -EINVAL;
}
@@ -1411,13 +1295,13 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
if (chan->type == IIO_INTENSITY) {
switch (val) {
case 1:
- chip->tsl2x7x_settings.als_gain = 0;
+ chip->settings.als_gain = 0;
break;
case 8:
- chip->tsl2x7x_settings.als_gain = 1;
+ chip->settings.als_gain = 1;
break;
case 16:
- chip->tsl2x7x_settings.als_gain = 2;
+ chip->settings.als_gain = 2;
break;
case 120:
switch (chip->id) {
@@ -1428,7 +1312,7 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
case tmd2772:
return -EINVAL;
}
- chip->tsl2x7x_settings.als_gain = 3;
+ chip->settings.als_gain = 3;
break;
case 128:
switch (chip->id) {
@@ -1439,7 +1323,7 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
case tmd2771:
return -EINVAL;
}
- chip->tsl2x7x_settings.als_gain = 3;
+ chip->settings.als_gain = 3;
break;
default:
return -EINVAL;
@@ -1447,16 +1331,16 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
} else {
switch (val) {
case 1:
- chip->tsl2x7x_settings.prox_gain = 0;
+ chip->settings.prox_gain = 0;
break;
case 2:
- chip->tsl2x7x_settings.prox_gain = 1;
+ chip->settings.prox_gain = 1;
break;
case 4:
- chip->tsl2x7x_settings.prox_gain = 2;
+ chip->settings.prox_gain = 2;
break;
case 8:
- chip->tsl2x7x_settings.prox_gain = 3;
+ chip->settings.prox_gain = 3;
break;
default:
return -EINVAL;
@@ -1464,9 +1348,15 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
}
break;
case IIO_CHAN_INFO_CALIBBIAS:
- chip->tsl2x7x_settings.als_gain_trim = val;
+ chip->settings.als_gain_trim = val;
break;
+ case IIO_CHAN_INFO_INT_TIME:
+ chip->settings.als_time =
+ TSL2X7X_MAX_TIMER_CNT - (val2 / TSL2X7X_MIN_ITIME);
+ dev_info(&chip->client->dev, "%s: als time = %d",
+ __func__, chip->settings.als_time);
+ break;
default:
return -EINVAL;
}
@@ -1474,12 +1364,8 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
return tsl2x7x_invoke_change(indio_dev);
}
-static DEVICE_ATTR_RO(in_proximity0_calibscale_available);
-
static DEVICE_ATTR_RO(in_illuminance0_calibscale_available);
-static DEVICE_ATTR_RW(in_illuminance0_integration_time);
-
static DEVICE_ATTR_RW(in_illuminance0_target_input);
static DEVICE_ATTR_WO(in_illuminance0_calibrate);
@@ -1488,10 +1374,6 @@ static DEVICE_ATTR_WO(in_proximity0_calibrate);
static DEVICE_ATTR_RW(in_illuminance0_lux_table);
-static DEVICE_ATTR_RW(in_intensity0_thresh_period);
-
-static DEVICE_ATTR_RW(in_proximity0_thresh_period);
-
/* Use the default register values to identify the Taos device */
static int tsl2x7x_device_id(int *id, int target)
{
@@ -1559,7 +1441,6 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
static struct attribute *tsl2x7x_ALS_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
@@ -1574,49 +1455,31 @@ static struct attribute *tsl2x7x_PRX_device_attrs[] = {
static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
- &dev_attr_in_proximity0_calibrate.attr,
+ &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
NULL
};
static struct attribute *tsl2x7x_PRX2_device_attrs[] = {
&dev_attr_in_proximity0_calibrate.attr,
- &dev_attr_in_proximity0_calibscale_available.attr,
+ &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
NULL
};
static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
&dev_attr_in_proximity0_calibrate.attr,
- &dev_attr_in_proximity0_calibscale_available.attr,
+ &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
NULL
};
-static struct attribute *tsl2X7X_ALS_event_attrs[] = {
- &dev_attr_in_intensity0_thresh_period.attr,
- NULL,
-};
-
-static struct attribute *tsl2X7X_PRX_event_attrs[] = {
- &dev_attr_in_proximity0_thresh_period.attr,
- NULL,
-};
-
-static struct attribute *tsl2X7X_ALSPRX_event_attrs[] = {
- &dev_attr_in_intensity0_thresh_period.attr,
- &dev_attr_in_proximity0_thresh_period.attr,
- NULL,
-};
-
static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
[ALS] = {
.attrs = tsl2x7x_ALS_device_attrs,
@@ -1635,26 +1498,9 @@ static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
},
};
-static const struct attribute_group tsl2X7X_event_attr_group_tbl[] = {
- [ALS] = {
- .attrs = tsl2X7X_ALS_event_attrs,
- .name = "events",
- },
- [PRX] = {
- .attrs = tsl2X7X_PRX_event_attrs,
- .name = "events",
- },
- [ALSPRX] = {
- .attrs = tsl2X7X_ALSPRX_event_attrs,
- .name = "events",
- },
-};
-
static const struct iio_info tsl2X7X_device_info[] = {
[ALS] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALS],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALS],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1664,8 +1510,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[PRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1675,8 +1519,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[ALSPRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1686,8 +1528,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[PRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1697,8 +1537,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[ALSPRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1719,6 +1557,10 @@ static const struct iio_event_spec tsl2x7x_events[] = {
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
},
};
@@ -1729,7 +1571,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1768,7 +1611,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED)
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1816,7 +1660,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1874,7 +1719,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
return -EINVAL;
}
- ret = i2c_smbus_write_byte(clientp, (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ ret = i2c_smbus_write_byte(clientp, TSL2X7X_CMD_REG | TSL2X7X_CNTRL);
if (ret < 0) {
dev_err(&clientp->dev, "write to cmd reg failed. err = %d\n",
ret);
@@ -1982,7 +1827,7 @@ static int tsl2x7x_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id tsl2x7x_idtable[] = {
+static const struct i2c_device_id tsl2x7x_idtable[] = {
{ "tsl2571", tsl2571 },
{ "tsl2671", tsl2671 },
{ "tmd2671", tmd2671 },
diff --git a/drivers/staging/iio/light/tsl2x7x.h b/drivers/staging/iio/light/tsl2x7x.h
index ecae92211216..df00f2ec1719 100644
--- a/drivers/staging/iio/light/tsl2x7x.h
+++ b/drivers/staging/iio/light/tsl2x7x.h
@@ -23,18 +23,19 @@
#define __TSL2X7X_H
#include <linux/pm.h>
-/* Max number of segments allowable in LUX table */
-#define TSL2X7X_MAX_LUX_TABLE_SIZE 9
-#define MAX_DEFAULT_TABLE_BYTES (sizeof(int) * TSL2X7X_MAX_LUX_TABLE_SIZE)
-
-struct iio_dev;
-
struct tsl2x7x_lux {
unsigned int ratio;
unsigned int ch0;
unsigned int ch1;
};
+/* Max number of segments allowable in LUX table */
+#define TSL2X7X_MAX_LUX_TABLE_SIZE 9
+/* The default LUX tables all have 3 elements. */
+#define TSL2X7X_DEF_LUX_TABLE_SZ 3
+#define TSL2X7X_DEFAULT_TABLE_BYTES (sizeof(struct tsl2x7x_lux) * \
+ TSL2X7X_DEF_LUX_TABLE_SZ)
+
/**
* struct tsl2x7x_default_settings - power on defaults unless
* overridden by platform data.
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index ce26abdeab92..c44eb577dc35 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -80,7 +80,7 @@
* @us: actual spi_device
* @tx: transmit buffer
* @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
+ * @buf_lock: mutex to protect tx, rx and write frequency
**/
struct ade7753_state {
struct spi_device *us;
@@ -107,6 +107,19 @@ static int ade7753_spi_write_reg_8(struct device *dev,
return ret;
}
+static int __ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
+ u16 value)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
+
+ st->tx[0] = ADE7753_WRITE_REG(reg_address);
+ st->tx[1] = (value >> 8) & 0xFF;
+ st->tx[2] = value & 0xFF;
+
+ return spi_write(st->us, st->tx, 3);
+}
+
static int ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
u16 value)
{
@@ -115,10 +128,7 @@ static int ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
struct ade7753_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7753_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
+ ret = __ade7753_spi_write_reg_16(dev, reg_address, value);
mutex_unlock(&st->buf_lock);
return ret;
@@ -483,7 +493,7 @@ static ssize_t ade7753_write_frequency(struct device *dev,
if (!val)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->buf_lock);
t = 27900 / val;
if (t > 0)
@@ -501,10 +511,10 @@ static ssize_t ade7753_write_frequency(struct device *dev,
reg &= ~(3 << 11);
reg |= t << 11;
- ret = ade7753_spi_write_reg_16(dev, ADE7753_MODE, reg);
+ ret = __ade7753_spi_write_reg_16(dev, ADE7753_MODE, reg);
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
@@ -561,7 +571,6 @@ static const struct attribute_group ade7753_attribute_group = {
static const struct iio_info ade7753_info = {
.attrs = &ade7753_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ade7753_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index be0df3fe4230..3a1e342d75fb 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -601,7 +601,6 @@ static const struct attribute_group ade7754_attribute_group = {
static const struct iio_info ade7754_info = {
.attrs = &ade7754_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ade7754_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 40498af4dc46..7b7ffe5ed186 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -827,7 +827,6 @@ static const struct iio_info ade7758_info = {
.attrs = &ade7758_attribute_group,
.read_raw = &ade7758_read_raw,
.write_raw = &ade7758_write_raw,
- .driver_module = THIS_MODULE,
};
static int ade7758_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
index 5b35a7f08f4f..1f0d1a0cf889 100644
--- a/drivers/staging/iio/meter/ade7758_trigger.c
+++ b/drivers/staging/iio/meter/ade7758_trigger.c
@@ -53,7 +53,6 @@ static int ade7758_trig_try_reen(struct iio_trigger *trig)
}
static const struct iio_trigger_ops ade7758_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &ade7758_data_rdy_trigger_set_state,
.try_reenable = &ade7758_trig_try_reen,
};
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 02573c517d9d..d99cf508d8d0 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -60,7 +60,7 @@
/**
* struct ade7759_state - device instance specific data
* @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx
+ * @buf_lock: mutex to protect tx and rx and write frequency
* @tx: transmit buffer
* @rx: receive buffer
**/
@@ -89,19 +89,30 @@ static int ade7759_spi_write_reg_8(struct device *dev,
return ret;
}
-static int ade7759_spi_write_reg_16(struct device *dev,
+/*Unlocked version of ade7759_spi_write_reg_16 function */
+static int __ade7759_spi_write_reg_16(struct device *dev,
u8 reg_address,
u16 value)
{
- int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7759_state *st = iio_priv(indio_dev);
- mutex_lock(&st->buf_lock);
st->tx[0] = ADE7759_WRITE_REG(reg_address);
st->tx[1] = (value >> 8) & 0xFF;
st->tx[2] = value & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
+ return spi_write(st->us, st->tx, 3);
+}
+
+static int ade7759_spi_write_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 value)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ ret = __ade7759_spi_write_reg_16(dev, reg_address, value);
mutex_unlock(&st->buf_lock);
return ret;
@@ -429,7 +440,7 @@ static ssize_t ade7759_write_frequency(struct device *dev,
if (!val)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->buf_lock);
t = 27900 / val;
if (t > 0)
@@ -447,10 +458,10 @@ static ssize_t ade7759_write_frequency(struct device *dev,
reg &= ~(3 << 13);
reg |= t << 13;
- ret = ade7759_spi_write_reg_16(dev, ADE7759_MODE, reg);
+ ret = __ade7759_spi_write_reg_16(dev, ADE7759_MODE, reg);
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
@@ -493,7 +504,6 @@ static const struct attribute_group ade7759_attribute_group = {
static const struct iio_info ade7759_info = {
.attrs = &ade7759_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ade7759_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 70612da64a8b..90d07cdca4b8 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -530,7 +530,6 @@ static const struct attribute_group ade7854_attribute_group = {
static const struct iio_info ade7854_info = {
.attrs = &ade7854_attribute_group,
- .driver_module = THIS_MODULE,
};
int ade7854_probe(struct iio_dev *indio_dev, struct device *dev)
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index a37e199225f4..aa62c64e9bc4 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -98,7 +98,6 @@ static const struct iio_chan_spec ad2s1200_channels[] = {
static const struct iio_info ad2s1200_info = {
.read_raw = ad2s1200_read_raw,
- .driver_module = THIS_MODULE,
};
static int ad2s1200_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 3e00df74b18c..f8baab061eba 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -638,7 +638,6 @@ error_ret:
static const struct iio_info ad2s1210_info = {
.read_raw = ad2s1210_read_raw,
.attrs = &ad2s1210_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index b2270908f26f..59586947a936 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -48,7 +48,6 @@ error_ret:
static const struct iio_info ad2s90_info = {
.read_raw = ad2s90_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec ad2s90_chan = {
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 4e0b4eedb53d..d80dcf82eba9 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -172,7 +172,6 @@ static int iio_bfin_tmr_get_number(int irq)
}
static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = iio_bfin_tmr_set_state,
};
diff --git a/drivers/staging/irda/drivers/ali-ircc.c b/drivers/staging/irda/drivers/ali-ircc.c
index 35f198d83701..589cd01797f4 100644
--- a/drivers/staging/irda/drivers/ali-ircc.c
+++ b/drivers/staging/irda/drivers/ali-ircc.c
@@ -1876,8 +1876,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
self->stamp = ktime_get();
skb = dev_alloc_skb(len+1);
- if (skb == NULL)
- {
+ if (!skb) {
self->netdev->stats.rx_dropped++;
return FALSE;
diff --git a/drivers/staging/irda/drivers/au1k_ir.c b/drivers/staging/irda/drivers/au1k_ir.c
index be4ea6aa57a9..73e3e4b041bf 100644
--- a/drivers/staging/irda/drivers/au1k_ir.c
+++ b/drivers/staging/irda/drivers/au1k_ir.c
@@ -290,8 +290,7 @@ static int au1k_irda_set_speed(struct net_device *dev, int speed)
while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
msleep(20);
if (!timeout--) {
- printk(KERN_ERR "%s: rx/tx disable timeout\n",
- dev->name);
+ netdev_err(dev, "rx/tx disable timeout\n");
break;
}
}
@@ -349,7 +348,7 @@ static int au1k_irda_set_speed(struct net_device *dev, int speed)
IR_RX_ENABLE);
break;
default:
- printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
+ netdev_err(dev, "unsupported speed %x\n", speed);
ret = -EINVAL;
break;
}
@@ -361,18 +360,18 @@ static int au1k_irda_set_speed(struct net_device *dev, int speed)
irda_write(aup, IR_RING_PROMPT, 0);
if (control & (1 << 14)) {
- printk(KERN_ERR "%s: configuration error\n", dev->name);
+ netdev_err(dev, "configuration error\n");
} else {
if (control & (1 << 11))
- printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
+ netdev_debug(dev, "Valid SIR config\n");
if (control & (1 << 12))
- printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
+ netdev_debug(dev, "Valid MIR config\n");
if (control & (1 << 13))
- printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
+ netdev_debug(dev, "Valid FIR config\n");
if (control & (1 << 10))
- printk(KERN_DEBUG "%s TX enabled\n", dev->name);
+ netdev_debug(dev, "TX enabled\n");
if (control & (1 << 9))
- printk(KERN_DEBUG "%s RX enabled\n", dev->name);
+ netdev_debug(dev, "RX enabled\n");
}
return ret;
@@ -584,23 +583,21 @@ static int au1k_irda_start(struct net_device *dev)
retval = au1k_init(dev);
if (retval) {
- printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
+ netdev_err(dev, "error in au1k_init\n");
return retval;
}
retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
dev->name, dev);
if (retval) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
dev->name, dev);
if (retval) {
free_irq(aup->irq_tx, dev);
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
@@ -673,12 +670,12 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
flags = ptxd->flags;
if (flags & AU_OWN) {
- printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netdev_debug(dev, "tx_full\n");
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
} else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
- printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netdev_debug(dev, "tx_full\n");
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
@@ -688,7 +685,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
#if 0
if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
- printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
+ netdev_debug(dev, "tx warning: rx byte cnt %x\n",
irda_read(aup, IR_RX_BYTE_CNT));
}
#endif
@@ -726,7 +723,7 @@ static void au1k_tx_timeout(struct net_device *dev)
u32 speed;
struct au1k_private *aup = netdev_priv(dev);
- printk(KERN_ERR "%s: tx timeout\n", dev->name);
+ netdev_err(dev, "tx timeout\n");
speed = aup->speed;
aup->speed = 0;
au1k_irda_set_speed(dev, speed);
@@ -751,8 +748,7 @@ static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
ret = au1k_irda_set_speed(dev,
rq->ifr_baudrate);
else {
- printk(KERN_ERR "%s ioctl: !netif_running\n",
- dev->name);
+ netdev_err(dev, "ioctl: !netif_running\n");
ret = 0;
}
}
@@ -868,7 +864,7 @@ out3:
out2:
kfree(aup->rx_buff.head);
out1:
- printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
+ netdev_err(dev, "au1k_irda_net_init() failed. Returns %d\n");
return retval;
}
@@ -934,7 +930,7 @@ static int au1k_irda_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
+ netdev_info(dev, "IrDA: Registered device\n");
return 0;
out4:
diff --git a/drivers/staging/irda/drivers/bfin_sir.c b/drivers/staging/irda/drivers/bfin_sir.c
index 3151b580dbd6..59e409b68349 100644
--- a/drivers/staging/irda/drivers/bfin_sir.c
+++ b/drivers/staging/irda/drivers/bfin_sir.c
@@ -22,6 +22,8 @@ static int max_rate = 57600;
static int max_rate = 115200;
#endif
+static void bfin_sir_rx_dma_timeout(struct timer_list *t);
+
static void turnaround_delay(int mtt)
{
long ticks;
@@ -57,7 +59,7 @@ static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device
sp->clk = get_sclk();
#ifdef CONFIG_SIR_BFIN_DMA
sp->tx_done = 1;
- init_timer(&(sp->rx_dma_timer));
+ timer_setup(&sp->rx_dma_timer, bfin_sir_rx_dma_timeout, 0);
#endif
}
@@ -317,10 +319,12 @@ static void bfin_sir_dma_rx_chars(struct net_device *dev)
async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
}
-void bfin_sir_rx_dma_timeout(struct net_device *dev)
+static void bfin_sir_rx_dma_timeout(struct timer_list *t)
{
+ struct bfin_sir_port *port = from_timer(port, t, rx_dma_timer);
+ struct net_device *dev = port->dev;
struct bfin_sir_self *self = netdev_priv(dev);
- struct bfin_sir_port *port = self->sir_port;
+
int x_pos, pos;
unsigned long flags;
@@ -405,8 +409,6 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
enable_dma(port->rx_dma_channel);
- port->rx_dma_timer.data = (unsigned long)(dev);
- port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
#else
diff --git a/drivers/staging/irda/drivers/esi-sir.c b/drivers/staging/irda/drivers/esi-sir.c
index 019a3e848bcb..eb7aa6430bea 100644
--- a/drivers/staging/irda/drivers/esi-sir.c
+++ b/drivers/staging/irda/drivers/esi-sir.c
@@ -1,5 +1,5 @@
/*********************************************************************
- *
+ *
* Filename: esi.c
* Version: 1.6
* Description: Driver for the Extended Systems JetEye PC dongle
@@ -8,25 +8,25 @@
* Created at: Sat Feb 21 18:54:38 1998
* Modified at: Sun Oct 27 22:01:04 2002
* Modified by: Martin Diehl <mad@mdiehl.de>
- *
+ *
* Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
* Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
* Copyright (c) 2002 Martin Diehl, <mad@mdiehl.de>,
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
+ *
+ * You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
+ *
********************************************************************/
#include <linux/module.h>
@@ -97,7 +97,7 @@ static int esi_change_speed(struct sir_dev *dev, unsigned speed)
{
int ret = 0;
int dtr, rts;
-
+
switch (speed) {
case 19200:
dtr = TRUE;
diff --git a/drivers/staging/irda/drivers/irda-usb.c b/drivers/staging/irda/drivers/irda-usb.c
index 723e49bc4baa..bda6bdc6c70b 100644
--- a/drivers/staging/irda/drivers/irda-usb.c
+++ b/drivers/staging/irda/drivers/irda-usb.c
@@ -117,7 +117,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
static void speed_bulk_callback(struct urb *urb);
static void write_bulk_callback(struct urb *urb);
static void irda_usb_receive(struct urb *urb);
-static void irda_usb_rx_defer_expired(unsigned long data);
+static void irda_usb_rx_defer_expired(struct timer_list *t);
static int irda_usb_net_open(struct net_device *dev);
static int irda_usb_net_close(struct net_device *dev);
static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -334,9 +334,9 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
urb->transfer_flags = 0;
/* Irq disabled -> GFP_ATOMIC */
- if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
net_warn_ratelimited("%s(), failed Speed URB\n", __func__);
- }
}
/*------------------------------------------------------------------*/
@@ -846,8 +846,7 @@ static void irda_usb_receive(struct urb *urb)
* hot unplug of the dongle...
* Lowest effective timer is 10ms...
* Jean II */
- self->rx_defer_timer.function = irda_usb_rx_defer_expired;
- self->rx_defer_timer.data = (unsigned long) urb;
+ self->rx_defer_timer_urb = urb;
mod_timer(&self->rx_defer_timer,
jiffies + msecs_to_jiffies(10));
@@ -953,20 +952,13 @@ done:
* In case of errors, we want the USB layer to have time to recover.
* Now, it is time to resubmit ouur Rx URB...
*/
-static void irda_usb_rx_defer_expired(unsigned long data)
+static void irda_usb_rx_defer_expired(struct timer_list *t)
{
- struct urb *urb = (struct urb *) data;
+ struct irda_usb_cb *self = from_timer(self, t, rx_defer_timer);
+ struct urb *urb = self->rx_defer_timer_urb;
struct sk_buff *skb = (struct sk_buff *) urb->context;
- struct irda_usb_cb *self;
- struct irda_skb_cb *cb;
struct urb *next_urb;
- /* Find ourselves */
- cb = (struct irda_skb_cb *) skb->cb;
- IRDA_ASSERT(cb != NULL, return;);
- self = (struct irda_usb_cb *) cb->context;
- IRDA_ASSERT(self != NULL, return;);
-
/* Same stuff as when Rx is done, see above... */
next_urb = self->idle_rx_urb;
urb->context = NULL;
@@ -1622,7 +1614,7 @@ static int irda_usb_probe(struct usb_interface *intf,
self = netdev_priv(net);
self->netdev = net;
spin_lock_init(&self->lock);
- init_timer(&self->rx_defer_timer);
+ timer_setup(&self->rx_defer_timer, irda_usb_rx_defer_expired, 0);
self->capability = id->driver_info;
self->needspatch = ((self->capability & IUC_STIR421X) != 0);
diff --git a/drivers/staging/irda/drivers/irda-usb.h b/drivers/staging/irda/drivers/irda-usb.h
index 8ac389fa9348..56ee8c16c5e2 100644
--- a/drivers/staging/irda/drivers/irda-usb.h
+++ b/drivers/staging/irda/drivers/irda-usb.h
@@ -170,5 +170,6 @@ struct irda_usb_cb {
int needspatch; /* device needs firmware patch */
struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
+ struct urb *rx_defer_timer_urb; /* URB attached to rx_defer_timer */
};
diff --git a/drivers/staging/irda/drivers/mcs7780.c b/drivers/staging/irda/drivers/mcs7780.c
index c3f0b254b344..d52e9f4b9770 100644
--- a/drivers/staging/irda/drivers/mcs7780.c
+++ b/drivers/staging/irda/drivers/mcs7780.c
@@ -605,19 +605,22 @@ static int mcs_speed_change(struct mcs_cb *mcs)
if (mcs->new_speed <= 115200) {
rval &= ~MCS_FIR;
- if ((rst = (mcs->speed > 115200)))
+ rst = mcs->speed > 115200;
+ if (rst)
mcs_set_reg(mcs, MCS_MINRXPW_REG, 0);
} else if (mcs->new_speed <= 1152000) {
rval &= ~MCS_FIR;
- if ((rst = !(mcs->speed == 576000 || mcs->speed == 1152000)))
+ rst = !(mcs->speed == 576000 || mcs->speed == 1152000);
+ if (rst)
mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
} else {
rval |= MCS_FIR;
- if ((rst = (mcs->speed != 4000000)))
+ rst = mcs->speed != 4000000;
+ if (rst)
mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
}
diff --git a/drivers/staging/irda/drivers/vlsi_ir.c b/drivers/staging/irda/drivers/vlsi_ir.c
index 6638784c082e..3dff3c55ddf5 100644
--- a/drivers/staging/irda/drivers/vlsi_ir.c
+++ b/drivers/staging/irda/drivers/vlsi_ir.c
@@ -170,10 +170,10 @@ static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
- seq_printf(seq, "hw registers: ");
+ seq_puts(seq, "hw registers: ");
for (i = 0; i < 0x20; i++)
seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
}
static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
@@ -193,7 +193,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
if (!netif_running(ndev))
return;
- seq_printf(seq, "\nhw-state:\n");
+ seq_puts(seq, "\nhw-state:\n");
pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
seq_printf(seq, "IRMISC:%s%s%s uart%s",
(byte&IRMISC_IRRAIL) ? " irrail" : "",
@@ -274,7 +274,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
- seq_printf(seq, "\nsw-state:\n");
+ seq_puts(seq, "\nsw-state:\n");
seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
@@ -305,10 +305,10 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
t = atomic_read(&r->tail) & r->mask;
seq_printf(seq, "head = %d / tail = %d ", h, t);
if (h == t)
- seq_printf(seq, "(empty)\n");
+ seq_puts(seq, "(empty)\n");
else {
if (((t+1)&r->mask) == h)
- seq_printf(seq, "(full)\n");
+ seq_puts(seq, "(full)\n");
else
seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
rd = &r->rd[h];
@@ -355,13 +355,13 @@ static int vlsi_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
idev->resume_ok);
if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
- seq_printf(seq, "\n--------- RX ring -----------\n\n");
+ seq_puts(seq, "\n--------- RX ring -----------\n\n");
vlsi_proc_ring(seq, idev->rx_ring);
- seq_printf(seq, "\n--------- TX ring -----------\n\n");
+ seq_puts(seq, "\n--------- TX ring -----------\n\n");
vlsi_proc_ring(seq, idev->tx_ring);
}
}
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
spin_unlock_irqrestore(&idev->lock, flags);
return 0;
diff --git a/drivers/staging/irda/include/net/irda/irlmp_event.h b/drivers/staging/irda/include/net/irda/irlmp_event.h
index 9e4ec17a7449..a1a082fe384e 100644
--- a/drivers/staging/irda/include/net/irda/irlmp_event.h
+++ b/drivers/staging/irda/include/net/irda/irlmp_event.h
@@ -82,9 +82,9 @@ typedef enum {
extern const char *const irlmp_state[];
extern const char *const irlsap_state[];
-void irlmp_watchdog_timer_expired(void *data);
-void irlmp_discovery_timer_expired(void *data);
-void irlmp_idle_timer_expired(void *data);
+void irlmp_watchdog_timer_expired(struct timer_list *t);
+void irlmp_discovery_timer_expired(struct timer_list *t);
+void irlmp_idle_timer_expired(struct timer_list *t);
void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb);
diff --git a/drivers/staging/irda/include/net/irda/qos.h b/drivers/staging/irda/include/net/irda/qos.h
index 05a5a249956f..a0315b50ac27 100644
--- a/drivers/staging/irda/include/net/irda/qos.h
+++ b/drivers/staging/irda/include/net/irda/qos.h
@@ -58,23 +58,23 @@
#define IR_16000000 0x02
/* Quality of Service information */
-typedef struct {
+struct qos_value {
__u32 value;
__u16 bits; /* LSB is first byte, MSB is second byte */
-} qos_value_t;
+};
struct qos_info {
magic_t magic;
- qos_value_t baud_rate; /* IR_11520O | ... */
- qos_value_t max_turn_time;
- qos_value_t data_size;
- qos_value_t window_size;
- qos_value_t additional_bofs;
- qos_value_t min_turn_time;
- qos_value_t link_disc_time;
+ struct qos_value baud_rate; /* IR_11520O | ... */
+ struct qos_value max_turn_time;
+ struct qos_value data_size;
+ struct qos_value window_size;
+ struct qos_value additional_bofs;
+ struct qos_value min_turn_time;
+ struct qos_value link_disc_time;
- qos_value_t power;
+ struct qos_value power;
};
extern int sysctl_max_baud_rate;
diff --git a/drivers/staging/irda/include/net/irda/timer.h b/drivers/staging/irda/include/net/irda/timer.h
index d784f242cf7b..a6635f0afae9 100644
--- a/drivers/staging/irda/include/net/irda/timer.h
+++ b/drivers/staging/irda/include/net/irda/timer.h
@@ -72,14 +72,11 @@ struct lap_cb;
#define WATCHDOG_TIMEOUT (20*HZ) /* 20 sec */
-typedef void (*TIMER_CALLBACK)(void *);
-
-static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
- void* data, TIMER_CALLBACK callback)
+static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
+ void (*callback)(struct timer_list *))
{
- ptimer->function = (void (*)(unsigned long)) callback;
- ptimer->data = (unsigned long) data;
-
+ ptimer->function = (TIMER_FUNC_TYPE) callback;
+
/* Set new value for timer (update or add timer).
* We use mod_timer() because it's more efficient and also
* safer with respect to race conditions - Jean II */
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
index 23fa7c8b09a5..b82a47b9ef0b 100644
--- a/drivers/staging/irda/net/af_irda.c
+++ b/drivers/staging/irda/net/af_irda.c
@@ -429,11 +429,11 @@ static void irda_selective_discovery_indication(discinfo_t *discovery,
* We were waiting for a node to be discovered, but nothing has come up
* so far. Wake up the user and tell him that we failed...
*/
-static void irda_discovery_timeout(u_long priv)
+static void irda_discovery_timeout(struct timer_list *t)
{
struct irda_sock *self;
- self = (struct irda_sock *) priv;
+ self = from_timer(self, t, watchdog);
BUG_ON(self == NULL);
/* Nothing for the caller */
@@ -2505,8 +2505,7 @@ bed:
/* Set watchdog timer to expire in <val> ms. */
self->errno = 0;
- setup_timer(&self->watchdog, irda_discovery_timeout,
- (unsigned long)self);
+ timer_setup(&self->watchdog, irda_discovery_timeout, 0);
mod_timer(&self->watchdog,
jiffies + msecs_to_jiffies(val));
diff --git a/drivers/staging/irda/net/discovery.c b/drivers/staging/irda/net/discovery.c
index 364d70aed068..1e54954a4081 100644
--- a/drivers/staging/irda/net/discovery.c
+++ b/drivers/staging/irda/net/discovery.c
@@ -179,7 +179,7 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
- if (buffer == NULL) {
+ if (!buffer) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return;
}
@@ -291,7 +291,7 @@ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
- if (buffer == NULL) {
+ if (!buffer) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return NULL;
}
diff --git a/drivers/staging/irda/net/ircomm/ircomm_tty.c b/drivers/staging/irda/net/ircomm/ircomm_tty.c
index ec157c3419b5..473abfaffe7b 100644
--- a/drivers/staging/irda/net/ircomm/ircomm_tty.c
+++ b/drivers/staging/irda/net/ircomm/ircomm_tty.c
@@ -395,7 +395,7 @@ static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
/* Init some important stuff */
- init_timer(&self->watchdog_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
spin_lock_init(&self->spinlock);
/*
diff --git a/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c b/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
index 0a411019c098..e2d5ce8ba0db 100644
--- a/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
+++ b/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
@@ -52,7 +52,7 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
struct ias_value *value, void *priv);
static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
int timeout);
-static void ircomm_tty_watchdog_timer_expired(void *data);
+static void ircomm_tty_watchdog_timer_expired(struct timer_list *timer);
static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
@@ -587,7 +587,7 @@ static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
- irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
+ irda_start_timer(&self->watchdog_timer, timeout,
ircomm_tty_watchdog_timer_expired);
}
@@ -597,9 +597,9 @@ static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
* Called when the connect procedure have taken to much time.
*
*/
-static void ircomm_tty_watchdog_timer_expired(void *data)
+static void ircomm_tty_watchdog_timer_expired(struct timer_list *t)
{
- struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
+ struct ircomm_tty_cb *self = from_timer(self, t, watchdog_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
diff --git a/drivers/staging/irda/net/irda_device.c b/drivers/staging/irda/net/irda_device.c
index 890b90d055d5..682b4eea15e0 100644
--- a/drivers/staging/irda/net/irda_device.c
+++ b/drivers/staging/irda/net/irda_device.c
@@ -54,29 +54,30 @@
static void __irda_task_delete(struct irda_task *task);
-static hashbin_t *dongles = NULL;
-static hashbin_t *tasks = NULL;
+static hashbin_t *dongles;
+static hashbin_t *tasks;
-static void irda_task_timer_expired(void *data);
+static void irda_task_timer_expired(struct timer_list *timer);
-int __init irda_device_init( void)
+int __init irda_device_init(void)
{
dongles = hashbin_new(HB_NOLOCK);
- if (dongles == NULL) {
+ if (!dongles) {
net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n");
return -ENOMEM;
}
spin_lock_init(&dongles->hb_spinlock);
tasks = hashbin_new(HB_LOCK);
- if (tasks == NULL) {
+ if (!tasks) {
net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n");
hashbin_delete(dongles, NULL);
return -ENOMEM;
}
/* We no longer initialise the driver ourselves here, we let
- * the system do it for us... - Jean II */
+ * the system do it for us... - Jean II
+ */
return 0;
}
@@ -84,6 +85,7 @@ int __init irda_device_init( void)
static void leftover_dongle(void *arg)
{
struct dongle_reg *reg = arg;
+
net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n",
reg->type);
}
@@ -107,7 +109,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
pr_debug("%s(%s)\n", __func__, status ? "TRUE" : "FALSE");
- self = (struct irlap_cb *) dev->atalk_ptr;
+ self = (struct irlap_cb *)dev->atalk_ptr;
/* Some drivers may enable the receive interrupt before calling
* irlap_open(), or they may disable the receive interrupt
@@ -115,7 +117,8 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
* The IrDA stack is protected from this in irlap_driver_rcv().
* However, the driver calls directly the wrapper, that calls
* us directly. Make sure we protect ourselves.
- * Jean II */
+ * Jean II
+ */
if (!self || self->magic != LAP_MAGIC)
return;
@@ -133,7 +136,6 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
}
EXPORT_SYMBOL(irda_device_set_media_busy);
-
/*
* Function irda_device_is_receiving (dev)
*
@@ -169,7 +171,7 @@ static void __irda_task_delete(struct irda_task *task)
static void irda_task_delete(struct irda_task *task)
{
/* Unregister task */
- hashbin_remove(tasks, (long) task, NULL);
+ hashbin_remove(tasks, (long)task, NULL);
__irda_task_delete(task);
}
@@ -231,7 +233,7 @@ static int irda_task_kick(struct irda_task *task)
}
irda_task_delete(task);
} else if (timeout > 0) {
- irda_start_timer(&task->timer, timeout, (void *) task,
+ irda_start_timer(&task->timer, timeout,
irda_task_timer_expired);
finished = FALSE;
} else {
@@ -249,11 +251,9 @@ static int irda_task_kick(struct irda_task *task)
* Task time has expired. We now try to execute task (again), and restart
* the timer if the task has not finished yet
*/
-static void irda_task_timer_expired(void *data)
+static void irda_task_timer_expired(struct timer_list *t)
{
- struct irda_task *task;
-
- task = data;
+ struct irda_task *task = from_timer(task, t, timer);
irda_task_kick(task);
}
@@ -280,8 +280,8 @@ static void irda_device_setup(struct net_device *dev)
/*
* Funciton alloc_irdadev
- * Allocates and sets up an IRDA device in a manner similar to
- * alloc_etherdev.
+ * Allocates and sets up an IRDA device in a manner similar to
+ * alloc_etherdev.
*/
struct net_device *alloc_irdadev(int sizeof_priv)
{
diff --git a/drivers/staging/irda/net/iriap.c b/drivers/staging/irda/net/iriap.c
index 1138eaf5c682..d64192e9db8b 100644
--- a/drivers/staging/irda/net/iriap.c
+++ b/drivers/staging/irda/net/iriap.c
@@ -76,12 +76,12 @@ static void iriap_connect_confirm(void *instance, void *sap,
static int iriap_data_indication(void *instance, void *sap,
struct sk_buff *skb);
-static void iriap_watchdog_timer_expired(void *data);
+static void iriap_watchdog_timer_expired(struct timer_list *t);
static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
int timeout)
{
- irda_start_timer(&self->watchdog_timer, timeout, self,
+ irda_start_timer(&self->watchdog_timer, timeout,
iriap_watchdog_timer_expired);
}
@@ -199,7 +199,7 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
* we connect, so this must have a sane value... Jean II */
self->max_header_size = LMP_MAX_HEADER;
- init_timer(&self->watchdog_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
hashbin_insert(iriap, (irda_queue_t *) self, (long) self, NULL);
@@ -946,9 +946,9 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
* Query has taken too long time, so abort
*
*/
-static void iriap_watchdog_timer_expired(void *data)
+static void iriap_watchdog_timer_expired(struct timer_list *t)
{
- struct iriap_cb *self = (struct iriap_cb *) data;
+ struct iriap_cb *self = from_timer(self, t, watchdog_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
diff --git a/drivers/staging/irda/net/irlan/irlan_client.c b/drivers/staging/irda/net/irlan/irlan_client.c
index c5837a40c78e..0b65e80849ae 100644
--- a/drivers/staging/irda/net/irlan/irlan_client.c
+++ b/drivers/staging/irda/net/irlan/irlan_client.c
@@ -68,9 +68,9 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
char *value, int val_len);
static void irlan_client_open_ctrl_tsap(struct irlan_cb *self);
-static void irlan_client_kick_timer_expired(void *data)
+static void irlan_client_kick_timer_expired(struct timer_list *t)
{
- struct irlan_cb *self = (struct irlan_cb *) data;
+ struct irlan_cb *self = from_timer(self, t, client.kick_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -89,7 +89,7 @@ static void irlan_client_kick_timer_expired(void *data)
static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
{
- irda_start_timer(&self->client.kick_timer, timeout, (void *) self,
+ irda_start_timer(&self->client.kick_timer, timeout,
irlan_client_kick_timer_expired);
}
diff --git a/drivers/staging/irda/net/irlan/irlan_common.c b/drivers/staging/irda/net/irlan/irlan_common.c
index 481bbc2a4349..fdcd7147007d 100644
--- a/drivers/staging/irda/net/irlan/irlan_common.c
+++ b/drivers/staging/irda/net/irlan/irlan_common.c
@@ -228,8 +228,8 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr)
self->media = MEDIA_802_3;
self->disconnect_reason = LM_USER_REQUEST;
- init_timer(&self->watchdog_timer);
- init_timer(&self->client.kick_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
+ timer_setup(&self->client.kick_timer, NULL, 0);
init_waitqueue_head(&self->open_wait);
skb_queue_head_init(&self->client.txq);
diff --git a/drivers/staging/irda/net/irlap.c b/drivers/staging/irda/net/irlap.c
index 1cde711bcab5..d7d894423b4f 100644
--- a/drivers/staging/irda/net/irlap.c
+++ b/drivers/staging/irda/net/irlap.c
@@ -148,14 +148,14 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
/* Copy to the driver */
memcpy(dev->dev_addr, &self->saddr, 4);
- init_timer(&self->slot_timer);
- init_timer(&self->query_timer);
- init_timer(&self->discovery_timer);
- init_timer(&self->final_timer);
- init_timer(&self->poll_timer);
- init_timer(&self->wd_timer);
- init_timer(&self->backoff_timer);
- init_timer(&self->media_busy_timer);
+ timer_setup(&self->slot_timer, NULL, 0);
+ timer_setup(&self->query_timer, NULL, 0);
+ timer_setup(&self->discovery_timer, NULL, 0);
+ timer_setup(&self->final_timer, NULL, 0);
+ timer_setup(&self->poll_timer, NULL, 0);
+ timer_setup(&self->wd_timer, NULL, 0);
+ timer_setup(&self->backoff_timer, NULL, 0);
+ timer_setup(&self->media_busy_timer, NULL, 0);
irlap_apply_default_connection_parameters(self);
diff --git a/drivers/staging/irda/net/irlap_event.c b/drivers/staging/irda/net/irlap_event.c
index 0e1b4d79f745..634188b07e0a 100644
--- a/drivers/staging/irda/net/irlap_event.c
+++ b/drivers/staging/irda/net/irlap_event.c
@@ -163,9 +163,9 @@ static int (*state[])(struct irlap_cb *self, IRLAP_EVENT event,
* Poll timer has expired. Normally we must now send a RR frame to the
* remote device
*/
-static void irlap_poll_timer_expired(void *data)
+static void irlap_poll_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, poll_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -222,7 +222,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
if (timeout == 0)
irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
else
- irda_start_timer(&self->poll_timer, timeout, self,
+ irda_start_timer(&self->poll_timer, timeout,
irlap_poll_timer_expired);
}
diff --git a/drivers/staging/irda/net/irlmp.c b/drivers/staging/irda/net/irlmp.c
index 43964594aa12..34355061ab0b 100644
--- a/drivers/staging/irda/net/irlmp.c
+++ b/drivers/staging/irda/net/irlmp.c
@@ -109,7 +109,7 @@ int __init irlmp_init(void)
irlmp->last_lsap_sel = 0x0f; /* Reserved 0x00-0x0f */
strcpy(sysctl_devname, "Linux");
- init_timer(&irlmp->discovery_timer);
+ timer_setup(&irlmp->discovery_timer, NULL, 0);
/* Do discovery every 3 seconds, conditionally */
if (sysctl_discovery)
@@ -185,7 +185,7 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
self->dlsap_sel = LSAP_ANY;
/* self->connected = FALSE; -> already NULL via memset() */
- init_timer(&self->watchdog_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
self->notify = *notify;
@@ -311,7 +311,7 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
lap->lap_state = LAP_STANDBY;
- init_timer(&lap->idle_timer);
+ timer_setup(&lap->idle_timer, NULL, 0);
/*
* Insert into queue of LMP links
@@ -655,7 +655,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
/* Not everything is the same */
new->notify.instance = instance;
- init_timer(&new->watchdog_timer);
+ timer_setup(&new->watchdog_timer, NULL, 0);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) new,
(long) new, NULL);
diff --git a/drivers/staging/irda/net/irlmp_event.c b/drivers/staging/irda/net/irlmp_event.c
index e306cf2c1e04..ddad0994b6dc 100644
--- a/drivers/staging/irda/net/irlmp_event.c
+++ b/drivers/staging/irda/net/irlmp_event.c
@@ -165,7 +165,7 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
(*lap_state[self->lap_state]) (self, event, skb);
}
-void irlmp_discovery_timer_expired(void *data)
+void irlmp_discovery_timer_expired(struct timer_list *t)
{
/* We always cleanup the log (active & passive discovery) */
irlmp_do_expiry();
@@ -176,9 +176,9 @@ void irlmp_discovery_timer_expired(void *data)
irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout * HZ);
}
-void irlmp_watchdog_timer_expired(void *data)
+void irlmp_watchdog_timer_expired(struct timer_list *t)
{
- struct lsap_cb *self = (struct lsap_cb *) data;
+ struct lsap_cb *self = from_timer(self, t, watchdog_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -186,9 +186,9 @@ void irlmp_watchdog_timer_expired(void *data)
irlmp_do_lsap_event(self, LM_WATCHDOG_TIMEOUT, NULL);
}
-void irlmp_idle_timer_expired(void *data)
+void irlmp_idle_timer_expired(struct timer_list *t)
{
- struct lap_cb *self = (struct lap_cb *) data;
+ struct lap_cb *self = from_timer(self, t, idle_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
diff --git a/drivers/staging/irda/net/irqueue.c b/drivers/staging/irda/net/irqueue.c
index 160dc89335e2..14291cbc4097 100644
--- a/drivers/staging/irda/net/irqueue.c
+++ b/drivers/staging/irda/net/irqueue.c
@@ -217,7 +217,8 @@ static __u32 hash( const char* name)
while(*name) {
h = (h<<4) + *name++;
- if ((g = (h & 0xf0000000)))
+ g = h & 0xf0000000;
+ if (g)
h ^=g>>24;
h &=~g;
}
diff --git a/drivers/staging/irda/net/irttp.c b/drivers/staging/irda/net/irttp.c
index b6ab41d5b3a3..741a94f39b4e 100644
--- a/drivers/staging/irda/net/irttp.c
+++ b/drivers/staging/irda/net/irttp.c
@@ -62,7 +62,6 @@ static void irttp_run_rx_queue(struct tsap_cb *self);
static void irttp_flush_queues(struct tsap_cb *self);
static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
-static void irttp_todo_expired(unsigned long data);
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get);
@@ -160,9 +159,9 @@ static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
* killed (need user context), and we can't guarantee that here...
* Jean II
*/
-static void irttp_todo_expired(unsigned long data)
+static void irttp_todo_expired(struct timer_list *t)
{
- struct tsap_cb *self = (struct tsap_cb *) data;
+ struct tsap_cb *self = from_timer(self, t, todo_timer);
/* Check that we still exist */
if (!self || self->magic != TTP_TSAP_MAGIC)
@@ -374,7 +373,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
static void irttp_init_tsap(struct tsap_cb *tsap)
{
spin_lock_init(&tsap->lock);
- init_timer(&tsap->todo_timer);
+ timer_setup(&tsap->todo_timer, irttp_todo_expired, 0);
skb_queue_head_init(&tsap->rx_queue);
skb_queue_head_init(&tsap->tx_queue);
@@ -410,10 +409,6 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
/* Initialize internal objects */
irttp_init_tsap(self);
- /* Initialise todo timer */
- self->todo_timer.data = (unsigned long) self;
- self->todo_timer.function = &irttp_todo_expired;
-
/* Initialize callbacks for IrLMP to use */
irda_notify_init(&ttp_notify);
ttp_notify.connect_confirm = irttp_connect_confirm;
diff --git a/drivers/staging/irda/net/timer.c b/drivers/staging/irda/net/timer.c
index f2280f73b057..cf00c0d848aa 100644
--- a/drivers/staging/irda/net/timer.c
+++ b/drivers/staging/irda/net/timer.c
@@ -34,16 +34,16 @@
extern int sysctl_slot_timeout;
-static void irlap_slot_timer_expired(void* data);
-static void irlap_query_timer_expired(void* data);
-static void irlap_final_timer_expired(void* data);
-static void irlap_wd_timer_expired(void* data);
-static void irlap_backoff_timer_expired(void* data);
-static void irlap_media_busy_expired(void* data);
+static void irlap_slot_timer_expired(struct timer_list *t);
+static void irlap_query_timer_expired(struct timer_list *t);
+static void irlap_final_timer_expired(struct timer_list *t);
+static void irlap_wd_timer_expired(struct timer_list *t);
+static void irlap_backoff_timer_expired(struct timer_list *t);
+static void irlap_media_busy_expired(struct timer_list *t);
void irlap_start_slot_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->slot_timer, timeout, (void *) self,
+ irda_start_timer(&self->slot_timer, timeout,
irlap_slot_timer_expired);
}
@@ -66,32 +66,32 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
/* Set or re-set the timer. We reset the timer for each received
* discovery query, which allow us to automatically adjust to
* the speed of the peer discovery (faster or slower). Jean II */
- irda_start_timer( &self->query_timer, timeout, (void *) self,
+ irda_start_timer(&self->query_timer, timeout,
irlap_query_timer_expired);
}
void irlap_start_final_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->final_timer, timeout, (void *) self,
+ irda_start_timer(&self->final_timer, timeout,
irlap_final_timer_expired);
}
void irlap_start_wd_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->wd_timer, timeout, (void *) self,
+ irda_start_timer(&self->wd_timer, timeout,
irlap_wd_timer_expired);
}
void irlap_start_backoff_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->backoff_timer, timeout, (void *) self,
+ irda_start_timer(&self->backoff_timer, timeout,
irlap_backoff_timer_expired);
}
void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout)
{
irda_start_timer(&self->media_busy_timer, timeout,
- (void *) self, irlap_media_busy_expired);
+ irlap_media_busy_expired);
}
void irlap_stop_mbusy_timer(struct irlap_cb *self)
@@ -110,19 +110,19 @@ void irlap_stop_mbusy_timer(struct irlap_cb *self)
void irlmp_start_watchdog_timer(struct lsap_cb *self, int timeout)
{
- irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
+ irda_start_timer(&self->watchdog_timer, timeout,
irlmp_watchdog_timer_expired);
}
void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout)
{
- irda_start_timer(&self->discovery_timer, timeout, (void *) self,
+ irda_start_timer(&self->discovery_timer, timeout,
irlmp_discovery_timer_expired);
}
void irlmp_start_idle_timer(struct lap_cb *self, int timeout)
{
- irda_start_timer(&self->idle_timer, timeout, (void *) self,
+ irda_start_timer(&self->idle_timer, timeout,
irlmp_idle_timer_expired);
}
@@ -138,9 +138,9 @@ void irlmp_stop_idle_timer(struct lap_cb *self)
* IrLAP slot timer has expired
*
*/
-static void irlap_slot_timer_expired(void *data)
+static void irlap_slot_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, slot_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -154,9 +154,9 @@ static void irlap_slot_timer_expired(void *data)
* IrLAP query timer has expired
*
*/
-static void irlap_query_timer_expired(void *data)
+static void irlap_query_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, query_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -170,9 +170,9 @@ static void irlap_query_timer_expired(void *data)
*
*
*/
-static void irlap_final_timer_expired(void *data)
+static void irlap_final_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, final_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -186,9 +186,9 @@ static void irlap_final_timer_expired(void *data)
*
*
*/
-static void irlap_wd_timer_expired(void *data)
+static void irlap_wd_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, wd_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -202,9 +202,9 @@ static void irlap_wd_timer_expired(void *data)
*
*
*/
-static void irlap_backoff_timer_expired(void *data)
+static void irlap_backoff_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, backoff_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -218,9 +218,9 @@ static void irlap_backoff_timer_expired(void *data)
*
*
*/
-static void irlap_media_busy_expired(void *data)
+static void irlap_media_busy_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, media_busy_timer);
IRDA_ASSERT(self != NULL, return;);
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 0f9348ba5d84..880085e2f24a 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -114,7 +114,7 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
}
static
-void ks_wlan_update_phyinfo_timeout(unsigned long ptr)
+void ks_wlan_update_phyinfo_timeout(struct timer_list *unused)
{
DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
atomic_set(&update_phyinfo, 0);
@@ -473,13 +473,16 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[3] =
TX_RATE_11M;
i++;
+ /* fall through */
case 5500000:
priv->reg.rate_set.body[2] = TX_RATE_5M;
i++;
+ /* fall through */
case 2000000:
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
i++;
+ /* fall through */
case 1000000:
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
@@ -535,14 +538,17 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[11] =
TX_RATE_54M;
i++;
+ /* fall through */
case 48000000:
priv->reg.rate_set.body[10] =
TX_RATE_48M;
i++;
+ /* fall through */
case 36000000:
priv->reg.rate_set.body[9] =
TX_RATE_36M;
i++;
+ /* fall through */
case 24000000:
case 18000000:
case 12000000:
@@ -619,14 +625,17 @@ static int ks_wlan_set_rate(struct net_device *dev,
TX_RATE_6M | BASIC_RATE;
i++;
}
+ /* fall through */
case 5500000:
priv->reg.rate_set.body[2] =
TX_RATE_5M | BASIC_RATE;
i++;
+ /* fall through */
case 2000000:
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
i++;
+ /* fall through */
case 1000000:
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
@@ -2010,6 +2019,7 @@ static int ks_wlan_set_mlme(struct net_device *dev,
case IW_MLME_DEAUTH:
if (mlme->reason_code == WLAN_REASON_MIC_FAILURE)
return 0;
+ /* fall through */
case IW_MLME_DISASSOC:
mode = 1;
return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
@@ -2941,8 +2951,7 @@ int ks_wlan_net_start(struct net_device *dev)
/* phy information update timer */
atomic_set(&update_phyinfo, 0);
- setup_timer(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout,
- (unsigned long)priv);
+ timer_setup(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, 0);
/* dummy address set */
memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index 1ea27c9e3708..3cb3f086148e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index b48e2f093bcc..6ad8867e5451 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 6d8752a368fa..6d132f941281 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index 3f773a4a344b..e5c156e9d907 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index e7c37415a0c7..1b98f0953afb 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index fedb46dff696..d6fc3164e7e7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 0cc2fc465c1a..5a27220cc608 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 8c75d5075590..d4c5965c43b1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 709771d27f89..2f4ff595fac9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 41795d9b3b9b..1191764c431a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
index 008da4497bda..9699646decb9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index 2accd9a85472..fc780f608e57 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index fa0808d2953b..30e333af8d0d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index dd0cd0442b86..854c84358ab4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index 709e1ce98d8d..aece13698eb4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
index 9c37f3e4b134..31fcd33171b4 100644
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index e0968ab8d95e..c1626726fa05 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index eea3b8e5e406..cfe8ee424e94 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -160,9 +161,9 @@ struct lnet_libmd {
} md_iov;
};
-#define LNET_MD_FLAG_ZOMBIE (1 << 0)
-#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
-#define LNET_MD_FLAG_ABORTED (1 << 2)
+#define LNET_MD_FLAG_ZOMBIE BIT(0)
+#define LNET_MD_FLAG_AUTO_UNLINK BIT(1)
+#define LNET_MD_FLAG_ABORTED BIT(2)
struct lnet_test_peer {
/* info about peers we are trying to fail */
@@ -287,9 +288,9 @@ struct lnet_ni {
* of old LNet, so there shouldn't be any compatibility issue
*/
#define LNET_PING_FEAT_INVAL (0) /* no feature */
-#define LNET_PING_FEAT_BASE (1 << 0) /* just a ping */
-#define LNET_PING_FEAT_NI_STATUS (1 << 1) /* return NI status */
-#define LNET_PING_FEAT_RTE_DISABLED (1 << 2) /* Routing enabled */
+#define LNET_PING_FEAT_BASE BIT(0) /* just a ping */
+#define LNET_PING_FEAT_NI_STATUS BIT(1) /* return NI status */
+#define LNET_PING_FEAT_RTE_DISABLED BIT(2) /* Routing enabled */
#define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \
LNET_PING_FEAT_NI_STATUS)
@@ -440,23 +441,21 @@ struct lnet_rtrbuf {
enum lnet_match_flags {
/* Didn't match anything */
- LNET_MATCHMD_NONE = (1 << 0),
+ LNET_MATCHMD_NONE = BIT(0),
/* Matched OK */
- LNET_MATCHMD_OK = (1 << 1),
+ LNET_MATCHMD_OK = BIT(1),
/* Must be discarded */
- LNET_MATCHMD_DROP = (1 << 2),
+ LNET_MATCHMD_DROP = BIT(2),
/* match and buffer is exhausted */
- LNET_MATCHMD_EXHAUSTED = (1 << 3),
+ LNET_MATCHMD_EXHAUSTED = BIT(3),
/* match or drop */
LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
};
/* Options for lnet_portal::ptl_options */
-#define LNET_PTL_LAZY (1 << 0)
-#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
-#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match,
- * request portal
- */
+#define LNET_PTL_LAZY BIT(0)
+#define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */
+#define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */
/* parameter for matching operations (GET, PUT) */
struct lnet_match_info {
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
index 553fb64b3e80..6bd1bca190a3 100644
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index 2b5930150cda..6bcb53d0c6f4 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -34,7 +34,7 @@ config LNET_SELFTEST
config LNET_XPRT_IB
tristate "LNET infiniband support"
- depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
+ depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
default LNET && INFINIBAND
help
This option allows the LNET users to use infiniband as an
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 64763aacda57..8024843521ab 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index a1e994a1cc84..171eced213f8 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 8fc191d99927..40e3af5d8b04 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 3fe4d4858eba..a71b765215ad 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index fbbd8a5489e9..f8ea523863ba 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -176,12 +177,9 @@ struct ksock_peer *
ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
- struct list_head *tmp;
struct ksock_peer *peer;
- list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, struct ksock_peer, ksnp_list);
-
+ list_for_each_entry(peer, peer_list, ksnp_list) {
LASSERT(!peer->ksnp_closing);
if (peer->ksnp_ni != ni)
@@ -453,7 +451,6 @@ int
ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
int port)
{
- struct list_head *tmp;
struct ksock_peer *peer;
struct ksock_peer *peer2;
struct ksock_route *route;
@@ -491,9 +488,7 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
}
route2 = NULL;
- list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, struct ksock_route, ksnr_list);
-
+ list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
if (route2->ksnr_ipaddr == ipaddr)
break;
@@ -1854,12 +1849,10 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when)
peer = ksocknal_find_peer_locked(ni, id);
if (peer) {
- struct list_head *tmp;
struct ksock_conn *conn;
int bufnob;
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
+ list_for_each_entry(conn, &peer->ksnp_conns, ksnc_list) {
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (bufnob < conn->ksnc_tx_bufnob) {
@@ -2316,7 +2309,7 @@ ksocknal_base_shutdown(void)
switch (ksocknal_data.ksnd_init) {
default:
LASSERT(0);
-
+ /* fall through */
case SOCKNAL_INIT_ALL:
case SOCKNAL_INIT_DATA:
LASSERT(ksocknal_data.ksnd_peers);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index e6428c4b7aec..35a7b396def4 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 6b38d5a8fe92..a5f2ecb966fa 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 9c328dc6537b..970140f09258 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index fc7eec83ac07..5663a4ca94d4 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 84be9a518190..d827f770e831 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 1ab394c1fabc..551c45bf4108 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index 24f4701a7a1e..5d501beeb622 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index 49a04a2b4ec4..f4f67d2b301e 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index 2ddd09a83cd0..e3a4c67a66b5 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 77fd3d06cde9..f6a0040f4ab1 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index 1a0c7cad5983..df93d8f77ea2 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 333e47febf87..bcac5074bf80 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 2da051c0d251..51823ce71773 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index db0572733712..2e5d311d2438 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 55663390b608..80072b2a443c 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
index d0b3aa80cfa6..5616e9ea1450 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
index 528d49794881..1d8949f1a4fa 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 972677bdf6bc..0092166af258 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
index 3f5dec153571..963df0ef4afb 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index 435722175cce..b5746230ab31 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 4e331e71083d..6f92ea272186 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 16a3ae791bb6..7928d7182634 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 6aed98fc9688..4ead55920e79 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index 963ef4ae93b1..f47cf67a92e3 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index f916b475e767..da2844f37edf 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index c3547cd4c72c..a29d6eb3a785 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index 038ed8c52107..6a05d9bab8dc 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index be2823f8eb02..ee85cab6f437 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index ad835035fffa..7caff290c146 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 26841a7b6213..0cf0f4f99435 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 6b446a51eeac..daf744277003 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index a0aef4b9bce3..ac5b9593d597 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index f52a5e8ed386..dd5d3cf6d3e2 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index bc0779c02d97..27848cd69564 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index d04875e3956f..c72ef05b2420 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 5946848a7846..8ae93bf6fd1b 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 7d0add0c0de3..539a26444f31 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 80c06f4b0c8d..7456b989e451 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 7d12a7fb36a4..c0c4723f72fd 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
index 03f3d18a1a29..3c83aa31e2c2 100644
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -629,6 +630,7 @@ delayed_msg_process(struct list_head *msg_list, bool drop)
case LNET_CREDIT_OK:
lnet_ni_recv(ni, msg->msg_private, msg, 0,
0, msg->msg_len, msg->msg_len);
+ /* fall through */
case LNET_CREDIT_WAIT:
continue;
default: /* failures */
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index 7bd1e6f389aa..05b120c2d45a 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 4d55df8ff74e..5e94ad349454 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 3df101bafd9f..88283ca3f860 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
@@ -222,15 +223,12 @@ struct lnet_remotenet *
lnet_find_net_locked(__u32 net)
{
struct lnet_remotenet *rnet;
- struct list_head *tmp;
struct list_head *rn_list;
LASSERT(!the_lnet.ln_shutdown);
rn_list = lnet_net2rnethash(net);
- list_for_each(tmp, rn_list) {
- rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
-
+ list_for_each_entry(rnet, rn_list, lrn_list) {
if (rnet->lrn_net == net)
return rnet;
}
@@ -243,7 +241,6 @@ static void lnet_shuffle_seed(void)
__u32 lnd_type, seed[2];
struct timespec64 ts;
struct lnet_ni *ni;
- struct list_head *tmp;
if (seeded)
return;
@@ -254,8 +251,7 @@ static void lnet_shuffle_seed(void)
* Nodes with small feet have little entropy
* the NID for this node gives the most entropy in the low bits
*/
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, struct lnet_ni, ni_list);
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
if (lnd_type != LOLND)
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 4a994d113c7d..d32d653edcb0 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index f8b9175f08d4..f1ee219bc8f3 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 9619ecbf8bdf..082c0afacf23 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -151,6 +152,7 @@ lst_debug_ioctl(struct lstio_debug_args *args)
case LST_OPC_BATCHSRV:
client = 0;
+ /* fall through */
case LST_OPC_BATCHCLI:
if (!name)
goto out;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 196d23c10921..6a0f770e0e24 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 239323679baa..374a5f31ef6f 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 289b202c3b36..a2662638d599 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 143eae9b8d71..3933ed4cca93 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index ef27bfffc230..fe889607ff3f 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index b5d556fa48ab..1d44d912f014 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -57,10 +58,13 @@ lnet_selftest_exit(void)
switch (lst_init_step) {
case LST_INIT_CONSOLE:
lstcon_console_fini();
+ /* fall through */
case LST_INIT_FW:
sfw_shutdown();
+ /* fall through */
case LST_INIT_RPC:
srpc_shutdown();
+ /* fall through */
case LST_INIT_WI_TEST:
for (i = 0;
i < cfs_cpt_number(lnet_cpt_table()); i++) {
@@ -72,7 +76,7 @@ lnet_selftest_exit(void)
sizeof(lst_sched_test[0]) *
cfs_cpt_number(lnet_cpt_table()));
lst_sched_test = NULL;
-
+ /* fall through */
case LST_INIT_WI_SERIAL:
cfs_wi_sched_destroy(lst_sched_serial);
lst_sched_serial = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 9653ac6fd619..f54bd630dbf8 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 77c222cca230..ab7e8a8e58b8 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1037,6 +1038,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
ev->ev_status = rc;
}
}
+ /* fall through */
case SWI_STATE_BULK_STARTED:
LASSERT(!rpc->srpc_bulk || ev->ev_fired);
@@ -1237,7 +1239,8 @@ srpc_send_rpc(struct swi_workitem *wi)
break;
wi->swi_state = SWI_STATE_REQUEST_SENT;
- /* perhaps more events, fall thru */
+ /* perhaps more events */
+ /* fall through */
case SWI_STATE_REQUEST_SENT: {
enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service);
@@ -1269,6 +1272,7 @@ srpc_send_rpc(struct swi_workitem *wi)
wi->swi_state = SWI_STATE_REPLY_RECEIVED;
}
+ /* fall through */
case SWI_STATE_REPLY_RECEIVED:
if (do_bulk && !rpc->crpc_bulkev.ev_fired)
break;
@@ -1448,6 +1452,7 @@ srpc_lnet_ev_handler(struct lnet_event *ev)
srpc_data.rpc_counters.rpcs_sent++;
spin_unlock(&srpc_data.rpc_glock);
}
+ /* fall through */
case SRPC_REPLY_RCVD:
case SRPC_BULK_REQ_RCVD:
crpc = rpcev->ev_data;
@@ -1570,7 +1575,7 @@ srpc_lnet_ev_handler(struct lnet_event *ev)
if (!ev->unlinked)
break; /* wait for final event */
-
+ /* fall through */
case SRPC_BULK_PUT_SENT:
if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
spin_lock(&srpc_data.rpc_glock);
@@ -1582,6 +1587,7 @@ srpc_lnet_ev_handler(struct lnet_event *ev)
spin_unlock(&srpc_data.rpc_glock);
}
+ /* fall through */
case SRPC_REPLY_SENT:
srpc = rpcev->ev_data;
scd = srpc->srpc_scd;
@@ -1674,14 +1680,14 @@ srpc_shutdown(void)
spin_unlock(&srpc_data.rpc_glock);
stt_shutdown();
-
+ /* fall through */
case SRPC_STATE_EQ_INIT:
rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
LASSERT(!rc);
rc = LNetEQFree(srpc_data.rpc_lnet_eq);
LASSERT(!rc); /* the EQ should have no user by now */
-
+ /* fall through */
case SRPC_STATE_NI_INIT:
LNetNIFini();
}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 7bb442a8e698..465b5b534423 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 7adad4302dcf..8c10f0f149d5 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 2fe692df19d0..ab125a8524c5 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
index 441d6d6b4f8e..7f0ef9bd0cda 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
index f48ab9d21428..b7b8f900df8e 100644
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ b/drivers/staging/lustre/lustre/fid/fid_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
index c21a5f5b7621..9577da33e666 100644
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ b/drivers/staging/lustre/lustre/fid/fid_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index ba736239243c..009c2367f74e 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -279,7 +280,8 @@ int seq_client_alloc_fid(const struct lu_env *env,
*fid = seq->lcs_fid;
mutex_unlock(&seq->lcs_mutex);
- CDEBUG(D_INFO, "%s: Allocated FID " DFID "\n", seq->lcs_name, PFID(fid));
+ CDEBUG(D_INFO,
+ "%s: Allocated FID " DFID "\n", seq->lcs_name, PFID(fid));
return rc;
}
EXPORT_SYMBOL(seq_client_alloc_fid);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 1a269fbc4b47..083419f77697 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index b723ece02eff..7d6a7106c0a5 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index fe6f278a7d9f..b5e3abaa508a 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 5b180830eec0..068c364adda8 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -425,7 +426,8 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
target = fld_client_get_target(fld, seq);
LASSERT(target);
- CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
+ CDEBUG(D_INFO,
+ "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
res.lsr_start = seq;
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 6cae803fc8d2..1a6a76110c3e 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 9ba184b6017f..90419dca2e1e 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
index a4d7280e1fa4..7d119c1a0469 100644
--- a/drivers/staging/lustre/lustre/include/interval_tree.h
+++ b/drivers/staging/lustre/lustre/include/interval_tree.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h
index 925271db4554..0433b79efdcb 100644
--- a/drivers/staging/lustre/lustre/include/llog_swab.h
+++ b/drivers/staging/lustre/lustre/include/llog_swab.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 98d6b1364c21..835a729dd8d0 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 4f213c408cfa..34e35fbff978 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1130,7 +1131,7 @@ struct lu_context_key {
{ \
type *value; \
\
- BUILD_BUG_ON(PAGE_SIZE < sizeof(*value)); \
+ BUILD_BUG_ON(sizeof(*value) > PAGE_SIZE); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
@@ -1303,8 +1304,6 @@ struct lu_buf {
size_t lb_len;
};
-#define DLUBUF "(%p %zu)"
-#define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
/**
* One-time initializers, called at obdclass module initialization, not
* exported.
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
index f7dfd83951ee..ad0c24d29ffa 100644
--- a/drivers/staging/lustre/lustre/include/lu_ref.h
+++ b/drivers/staging/lustre/lustre/include/lu_ref.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
index 9786f6caaade..35ff61ce4e9d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_acl.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
index 69bfd6a6e0f9..9f488e605083 100644
--- a/drivers/staging/lustre/lustre/include/lustre_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_compat.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
index 0be6a534f712..721a81f923e3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ b/drivers/staging/lustre/lustre/include/lustre_debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 2d862b32265b..8f1a22527006 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 13c3d2fd31a8..e0b17052b2ea 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 11331ae81d58..53db031c4c8c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* -*- buffer-read-only: t -*- vi: set ro:
*
* This program is free software; you can redistribute it and/or modify
@@ -136,7 +137,8 @@
#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
/**
- * measure lock contention and return -EUSERS if locking contention is high */
+ * measure lock contention and return -EUSERS if locking contention is high
+ */
#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL /* bit 30 */
#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG((_l), 1ULL << 30)
#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG((_l), 1ULL << 30)
@@ -144,7 +146,8 @@
/**
* These are flags that are mapped into the flags and ASTs of blocking
- * locks Add FL_DISCARD to blocking ASTs */
+ * locks Add FL_DISCARD to blocking ASTs
+ */
#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL /* bit 31 */
#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 31)
#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 31)
diff --git a/drivers/staging/lustre/lustre/include/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre_errno.h
index 35aefa2cdad1..59fbb9f47ff1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_errno.h
+++ b/drivers/staging/lustre/lustre/include/lustre_errno.h
@@ -70,16 +70,14 @@
#define LUSTRE_EROFS 30 /* Read-only file system */
#define LUSTRE_EMLINK 31 /* Too many links */
#define LUSTRE_EPIPE 32 /* Broken pipe */
-#define LUSTRE_EDOM 33 /* Math argument out of domain of
- func */
+#define LUSTRE_EDOM 33 /* Math argument out of func domain */
#define LUSTRE_ERANGE 34 /* Math result not representable */
#define LUSTRE_EDEADLK 35 /* Resource deadlock would occur */
#define LUSTRE_ENAMETOOLONG 36 /* File name too long */
#define LUSTRE_ENOLCK 37 /* No record locks available */
#define LUSTRE_ENOSYS 38 /* Function not implemented */
#define LUSTRE_ENOTEMPTY 39 /* Directory not empty */
-#define LUSTRE_ELOOP 40 /* Too many symbolic links
- encountered */
+#define LUSTRE_ELOOP 40 /* Too many symbolic links found */
#define LUSTRE_ENOMSG 42 /* No message of desired type */
#define LUSTRE_EIDRM 43 /* Identifier removed */
#define LUSTRE_ECHRNG 44 /* Channel number out of range */
@@ -112,23 +110,17 @@
#define LUSTRE_EMULTIHOP 72 /* Multihop attempted */
#define LUSTRE_EDOTDOT 73 /* RFS specific error */
#define LUSTRE_EBADMSG 74 /* Not a data message */
-#define LUSTRE_EOVERFLOW 75 /* Value too large for defined data
- type */
+#define LUSTRE_EOVERFLOW 75 /* Value too large for data type */
#define LUSTRE_ENOTUNIQ 76 /* Name not unique on network */
#define LUSTRE_EBADFD 77 /* File descriptor in bad state */
#define LUSTRE_EREMCHG 78 /* Remote address changed */
-#define LUSTRE_ELIBACC 79 /* Can not access a needed shared
- library */
-#define LUSTRE_ELIBBAD 80 /* Accessing a corrupted shared
- library */
+#define LUSTRE_ELIBACC 79 /* Can't access needed shared library */
+#define LUSTRE_ELIBBAD 80 /* Access corrupted shared library */
#define LUSTRE_ELIBSCN 81 /* .lib section in a.out corrupted */
-#define LUSTRE_ELIBMAX 82 /* Attempting to link in too many shared
- libraries */
-#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared library
- directly */
+#define LUSTRE_ELIBMAX 82 /* Trying to link too many libraries */
+#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared lib directly */
#define LUSTRE_EILSEQ 84 /* Illegal byte sequence */
-#define LUSTRE_ERESTART 85 /* Interrupted system call should be
- restarted */
+#define LUSTRE_ERESTART 85 /* Restart interrupted system call */
#define LUSTRE_ESTRPIPE 86 /* Streams pipe error */
#define LUSTRE_EUSERS 87 /* Too many users */
#define LUSTRE_ENOTSOCK 88 /* Socket operation on non-socket */
@@ -138,26 +130,20 @@
#define LUSTRE_ENOPROTOOPT 92 /* Protocol not available */
#define LUSTRE_EPROTONOSUPPORT 93 /* Protocol not supported */
#define LUSTRE_ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported on transport
- endpoint */
+#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported */
#define LUSTRE_EPFNOSUPPORT 96 /* Protocol family not supported */
-#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported by
- protocol */
+#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported */
#define LUSTRE_EADDRINUSE 98 /* Address already in use */
#define LUSTRE_EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define LUSTRE_ENETDOWN 100 /* Network is down */
#define LUSTRE_ENETUNREACH 101 /* Network is unreachable */
-#define LUSTRE_ENETRESET 102 /* Network dropped connection because of
- reset */
+#define LUSTRE_ENETRESET 102 /* Network connection drop for reset */
#define LUSTRE_ECONNABORTED 103 /* Software caused connection abort */
#define LUSTRE_ECONNRESET 104 /* Connection reset by peer */
#define LUSTRE_ENOBUFS 105 /* No buffer space available */
-#define LUSTRE_EISCONN 106 /* Transport endpoint is already
- connected */
-#define LUSTRE_ENOTCONN 107 /* Transport endpoint is not
- connected */
-#define LUSTRE_ESHUTDOWN 108 /* Cannot send after transport endpoint
- shutdown */
+#define LUSTRE_EISCONN 106 /* Transport endpoint is connected */
+#define LUSTRE_ENOTCONN 107 /* Transport endpoint not connected */
+#define LUSTRE_ESHUTDOWN 108 /* Cannot send after shutdown */
#define LUSTRE_ETOOMANYREFS 109 /* Too many references: cannot splice */
#define LUSTRE_ETIMEDOUT 110 /* Connection timed out */
#define LUSTRE_ECONNREFUSED 111 /* Connection refused */
@@ -185,8 +171,7 @@
#define LUSTRE_ERESTARTNOINTR 513
#define LUSTRE_ERESTARTNOHAND 514 /* restart if no handler.. */
#define LUSTRE_ENOIOCTLCMD 515 /* No ioctl command */
-#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart by calling
- sys_restart_syscall */
+#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart via sys_restart_syscall */
#define LUSTRE_EBADHANDLE 521 /* Illegal NFS file handle */
#define LUSTRE_ENOTSYNC 522 /* Update synchronization mismatch */
#define LUSTRE_EBADCOOKIE 523 /* Cookie is stale */
@@ -194,10 +179,8 @@
#define LUSTRE_ETOOSMALL 525 /* Buffer or request is too small */
#define LUSTRE_ESERVERFAULT 526 /* An untranslatable error occurred */
#define LUSTRE_EBADTYPE 527 /* Type not supported by server */
-#define LUSTRE_EJUKEBOX 528 /* Request initiated, but will not
- complete before timeout */
-#define LUSTRE_EIOCBQUEUED 529 /* iocb queued, will get completion
- event */
+#define LUSTRE_EJUKEBOX 528 /* Request won't finish until timeout */
+#define LUSTRE_EIOCBQUEUED 529 /* iocb queued await completion event */
#define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */
/*
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 3631a69a5c6f..66ac9dc7302a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index e0f2b8295775..d19c7a27ee48 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 6125eb0d3395..4055bbd24c55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index dec1e99d594d..cbd68985ada9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
index d49932628f32..c48c97362cf6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index d71d0473a4eb..ea158e0630e2 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_intent.h b/drivers/staging/lustre/lustre/include/lustre_intent.h
index ed2b6c674109..519e94fc089d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_intent.h
+++ b/drivers/staging/lustre/lustre/include/lustre_intent.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
index f1899a3d7a40..2b3fa8430185 100644
--- a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 81b9cbffc050..ca1dce15337e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h
index 3ff008fee13d..03db1511bfd3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_linkea.h
+++ b/drivers/staging/lustre/lustre/include/lustre_linkea.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
index 98a82be2037f..f4298e5f7543 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lmv.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 24a7777424f6..07f4e600386b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index c0c44974cb1c..007e1ec3f0f4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
index c424e1239fd5..6937546f1d46 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mds.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mds.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index c6d1646f102a..3ff5de4770e8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h
index 51f45f7776df..ffa7317da35b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_nrs.h
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
index 3b5418eac6c4..b70d97d4acbb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_obdo.h b/drivers/staging/lustre/lustre/include/lustre_obdo.h
index 53379f861161..d67dcbb84f18 100644
--- a/drivers/staging/lustre/lustre/include/lustre_obdo.h
+++ b/drivers/staging/lustre/lustre/include/lustre_obdo.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
index 5842cb18b49e..ce28ed5c1ef8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index cd62ccd53e2c..213d0a01adcf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 03a970bcac55..a40f706a53a1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h
index 765e923c2fc9..9d786bbe7f3f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_swab.h
+++ b/drivers/staging/lustre/lustre/include/lustre_swab.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index a986737ec010..4368f4e9f208 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index cda3d2808d2f..e5f7bb20415d 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 976005a1e0b2..67c535c5aa98 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index aea193a882a2..3f4fe290f6ea 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h
index d7175485944d..9450da728160 100644
--- a/drivers/staging/lustre/lustre/include/seq_range.h
+++ b/drivers/staging/lustre/lustre/include/seq_range.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index 19e285dd2ee1..8df7a4463c21 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index 57fd84effdfa..0662cec14b81 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index 2cc6dc2b281f..fac9d19d50b6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -207,7 +208,8 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
continue;
if (ldlm_extent_overlap(&lck->l_req_extent,
&lock->l_req_extent)) {
- CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
+ CDEBUG(D_ERROR,
+ "granting conflicting lock %p %p\n",
lck, lock);
ldlm_resource_dump(D_ERROR, res);
LBUG();
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index cb826e9e840e..657ab95091a0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -59,17 +60,6 @@
#include <linux/list.h>
#include "ldlm_internal.h"
-/**
- * list_for_remaining_safe - iterate over the remaining entries in a list
- * and safeguard against removal of a list entry.
- * \param pos the &struct list_head to use as a loop counter. pos MUST
- * have been initialized prior to using it in this macro.
- * \param n another &struct list_head to use as temporary storage
- * \param head the head for your list.
- */
-#define list_for_remaining_safe(pos, n, head) \
- for (n = pos->next; pos != (head); pos = n, n = pos->next)
-
static inline int
ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
{
@@ -88,24 +78,23 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
}
static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode)
{
- LDLM_DEBUG(lock, "%s(mode: %d, flags: 0x%llx)",
- __func__, mode, flags);
+ LDLM_DEBUG(lock, "%s(mode: %d)",
+ __func__, mode);
/* Safe to not lock here, since it should be empty anyway */
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- /* client side - set a flag to prevent sending a CANCEL */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
- /* when reaching here, it is under lock_res_and_lock(). Thus,
- * need call the nolock version of ldlm_lock_decref_internal
- */
- ldlm_lock_decref_internal_nolock(lock, mode);
- }
+ /* client side - set a flag to prevent sending a CANCEL */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
+
+ /* when reaching here, it is under lock_res_and_lock(). Thus,
+ * need call the nolock version of ldlm_lock_decref_internal
+ */
+ ldlm_lock_decref_internal_nolock(lock, mode);
ldlm_lock_destroy_nolock(lock);
}
@@ -121,129 +110,45 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
* It is also responsible for splitting a lock if a portion of the lock
* is released.
*
- * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
- * - blocking ASTs have already been sent
- *
- * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
- * - blocking ASTs have not been sent yet, so list of conflicting locks
- * would be collected and ASTs sent.
*/
-static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
- int first_enq, enum ldlm_error *err,
- struct list_head *work_list)
+static int ldlm_process_flock_lock(struct ldlm_lock *req)
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- struct list_head *tmp;
- struct list_head *ownlocks = NULL;
- struct ldlm_lock *lock = NULL;
+ struct ldlm_lock *tmp;
+ struct ldlm_lock *lock;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
enum ldlm_mode mode = req->l_req_mode;
int added = (mode == LCK_NL);
- int overlaps = 0;
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { };
CDEBUG(D_DLMTRACE,
- "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
- *flags, new->l_policy_data.l_flock.owner,
+ "owner %llu pid %u mode %u start %llu end %llu\n",
+ new->l_policy_data.l_flock.owner,
new->l_policy_data.l_flock.pid, mode,
req->l_policy_data.l_flock.start,
req->l_policy_data.l_flock.end);
- *err = ELDLM_OK;
-
/* No blocking ASTs are sent to the clients for
* Posix file & record locks
*/
req->l_blocking_ast = NULL;
reprocess:
- if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
- /* This loop determines where this processes locks start
- * in the resource lr_granted list.
- */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
- if (ldlm_same_flock_owner(lock, req)) {
- ownlocks = tmp;
- break;
- }
- }
- } else {
- int reprocess_failed = 0;
-
- lockmode_verify(mode);
-
- /* This loop determines if there are existing locks
- * that conflict with the new lock request.
- */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
-
- if (ldlm_same_flock_owner(lock, req)) {
- if (!ownlocks)
- ownlocks = tmp;
- continue;
- }
-
- /* locks are compatible, overlap doesn't matter */
- if (lockmode_compat(lock->l_granted_mode, mode))
- continue;
-
- if (!ldlm_flocks_overlap(lock, req))
- continue;
-
- if (!first_enq) {
- reprocess_failed = 1;
- continue;
- }
-
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
- ldlm_flock_destroy(req, mode, *flags);
- *err = -EAGAIN;
- return LDLM_ITER_STOP;
- }
-
- if (*flags & LDLM_FL_TEST_LOCK) {
- ldlm_flock_destroy(req, mode, *flags);
- req->l_req_mode = lock->l_granted_mode;
- req->l_policy_data.l_flock.pid =
- lock->l_policy_data.l_flock.pid;
- req->l_policy_data.l_flock.start =
- lock->l_policy_data.l_flock.start;
- req->l_policy_data.l_flock.end =
- lock->l_policy_data.l_flock.end;
- *flags |= LDLM_FL_LOCK_CHANGED;
- return LDLM_ITER_STOP;
- }
-
- ldlm_resource_add_lock(res, &res->lr_waiting, req);
- *flags |= LDLM_FL_BLOCK_GRANTED;
- return LDLM_ITER_STOP;
- }
- if (reprocess_failed)
- return LDLM_ITER_CONTINUE;
- }
-
- if (*flags & LDLM_FL_TEST_LOCK) {
- ldlm_flock_destroy(req, mode, *flags);
- req->l_req_mode = LCK_NL;
- *flags |= LDLM_FL_LOCK_CHANGED;
- return LDLM_ITER_STOP;
- }
+ /* This loop determines where this processes locks start
+ * in the resource lr_granted list.
+ */
+ list_for_each_entry(lock, &res->lr_granted, l_res_link)
+ if (ldlm_same_flock_owner(lock, req))
+ break;
- /* Scan the locks owned by this process that overlap this request.
+ /* Scan the locks owned by this process to find the insertion point
+ * (as locks are ordered), and to handle overlaps.
* We may have to merge or split existing locks.
*/
- if (!ownlocks)
- ownlocks = &res->lr_granted;
-
- list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
- lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
+ list_for_each_entry_safe_from(lock, tmp, &res->lr_granted, l_res_link) {
if (!ldlm_same_flock_owner(lock, new))
break;
@@ -283,7 +188,7 @@ reprocess:
}
if (added) {
- ldlm_flock_destroy(lock, mode, *flags);
+ ldlm_flock_destroy(lock, mode);
} else {
new = lock;
added = 1;
@@ -299,8 +204,6 @@ reprocess:
lock->l_policy_data.l_flock.start)
break;
- ++overlaps;
-
if (new->l_policy_data.l_flock.start <=
lock->l_policy_data.l_flock.start) {
if (new->l_policy_data.l_flock.end <
@@ -309,7 +212,7 @@ reprocess:
new->l_policy_data.l_flock.end + 1;
break;
}
- ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
+ ldlm_flock_destroy(lock, lock->l_req_mode);
continue;
}
if (new->l_policy_data.l_flock.end >=
@@ -340,9 +243,7 @@ reprocess:
NULL, 0, LVB_T_NONE);
lock_res_and_lock(req);
if (IS_ERR(new2)) {
- ldlm_flock_destroy(req, lock->l_granted_mode,
- *flags);
- *err = PTR_ERR(new2);
+ ldlm_flock_destroy(req, lock->l_granted_mode);
return LDLM_ITER_STOP;
}
goto reprocess;
@@ -371,12 +272,11 @@ reprocess:
&new2->l_remote_handle,
&new2->l_exp_hash);
}
- if (*flags == LDLM_FL_WAIT_NOREPROC)
- ldlm_lock_addref_internal_nolock(new2,
- lock->l_granted_mode);
+ ldlm_lock_addref_internal_nolock(new2,
+ lock->l_granted_mode);
/* insert new2 at lock */
- ldlm_resource_add_lock(res, ownlocks, new2);
+ ldlm_resource_add_lock(res, &lock->l_res_link, new2);
LDLM_LOCK_RELEASE(new2);
break;
}
@@ -390,17 +290,12 @@ reprocess:
if (!added) {
list_del_init(&req->l_res_link);
- /* insert new lock before ownlocks in list. */
- ldlm_resource_add_lock(res, ownlocks, req);
- }
-
- if (*flags != LDLM_FL_WAIT_NOREPROC) {
- /* The only one possible case for client-side calls flock
- * policy function is ldlm_flock_completion_ast inside which
- * carries LDLM_FL_WAIT_NOREPROC flag.
+ /* insert new lock before "lock", which might be the
+ * next lock for this owner, or might be the first
+ * lock for the next owner, or might not be a lock at
+ * all, but instead points at the head of the list
*/
- CERROR("Illegal parameter for client-side-only module.\n");
- LBUG();
+ ldlm_resource_add_lock(res, &lock->l_res_link, req);
}
/* In case we're reprocessing the requested lock we can't destroy
@@ -409,7 +304,7 @@ reprocess:
* could be freed before the completion AST can be sent.
*/
if (added)
- ldlm_flock_destroy(req, mode, *flags);
+ ldlm_flock_destroy(req, mode);
ldlm_resource_dump(D_INFO, res);
return LDLM_ITER_CONTINUE;
@@ -417,7 +312,6 @@ reprocess:
struct ldlm_flock_wait_data {
struct ldlm_lock *fwd_lock;
- int fwd_generation;
};
static void
@@ -448,12 +342,9 @@ int
ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
struct file_lock *getlk = lock->l_ast_data;
- struct obd_device *obd;
- struct obd_import *imp = NULL;
- struct ldlm_flock_wait_data fwd;
- struct l_wait_info lwi;
- enum ldlm_error err;
- int rc = 0;
+ struct ldlm_flock_wait_data fwd;
+ struct l_wait_info lwi;
+ int rc = 0;
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
@@ -479,20 +370,9 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
+ LDLM_DEBUG(lock,
+ "client-side enqueue returned a blocked lock, sleeping");
fwd.fwd_lock = lock;
- obd = class_exp2obd(lock->l_conn_export);
-
- /* if this is a local lock, there is no import */
- if (obd)
- imp = obd->u.cli.cl_import;
-
- if (imp) {
- spin_lock(&imp->imp_lock);
- fwd.fwd_generation = imp->imp_generation;
- spin_unlock(&imp->imp_lock);
- }
-
lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
/* Go to sleep until the lock is granted. */
@@ -561,10 +441,11 @@ granted:
mode = lock->l_granted_mode;
if (ldlm_is_flock_deadlock(lock)) {
- LDLM_DEBUG(lock, "client-side enqueue deadlock received");
+ LDLM_DEBUG(lock,
+ "client-side enqueue deadlock received");
rc = -EDEADLK;
}
- ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
+ ldlm_flock_destroy(lock, mode);
unlock_res_and_lock(lock);
/* Need to wake up the waiter if we were evicted */
@@ -585,7 +466,7 @@ granted:
* in the lock changes we can decref the appropriate refcount.
*/
LASSERT(ldlm_is_test_lock(lock));
- ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
+ ldlm_flock_destroy(lock, getlk->fl_type);
switch (lock->l_granted_mode) {
case LCK_PR:
getlk->fl_type = F_RDLCK;
@@ -600,12 +481,10 @@ granted:
getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
} else {
- __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
-
/* We need to reprocess the lock to do merges or splits
* with existing locks owned by this process.
*/
- ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
+ ldlm_process_flock_lock(lock);
}
unlock_res_and_lock(lock);
return rc;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index fcb6e44bd319..2926208cdfa1 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 36808dbe8790..bc33ca100620 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -88,7 +89,7 @@ struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client);
/* ldlm_request.c */
/* Cancel lru flag, it indicates we cancel aged locks. */
enum {
- LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */
+ LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel old non-LRU resize locks */
LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 22600c2a73ea..9efd26ec59dd 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index b5d84f3f6071..7cb61e2e7d3b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1035,7 +1036,8 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
ldlm_extent_add_lock(res, lock);
} else if (res->lr_type == LDLM_FLOCK) {
/*
- * We should not add locks to granted list in the following cases:
+ * We should not add locks to granted list in
+ * the following cases:
* - this is an UNLOCK but not a real lock;
* - this is a TEST lock;
* - this is a F_CANCELLK lock (async flock has req_mode == 0)
@@ -2051,13 +2053,16 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
va_end(args);
return;
}
@@ -2067,7 +2072,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
@@ -2076,8 +2082,10 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
ldlm_typename[resource->lr_type],
lock->l_policy_data.l_extent.start,
lock->l_policy_data.l_extent.end,
- lock->l_req_extent.start, lock->l_req_extent.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_req_extent.start,
+ lock->l_req_extent.end,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
@@ -2087,7 +2095,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n",
ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
@@ -2097,7 +2106,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
lock->l_policy_data.l_flock.pid,
lock->l_policy_data.l_flock.start,
lock->l_policy_data.l_flock.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
@@ -2115,7 +2125,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
lock->l_policy_data.l_inodebits.bits,
atomic_read(&resource->lr_refcount),
ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
@@ -2133,7 +2144,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
PLDLMRES(resource),
atomic_read(&resource->lr_refcount),
ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index e2707336586c..2d5a2c932ddc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -184,7 +185,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
LASSERT(lock->l_lvb_data);
if (unlikely(lock->l_lvb_len < lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
+ LDLM_ERROR(lock,
+ "Replied LVB is larger than expectation, expected = %d, replied = %d",
lock->l_lvb_len, lvb_len);
rc = -EINVAL;
goto out;
@@ -598,7 +600,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
if (!lock) {
- CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n",
+ CDEBUG(D_DLMTRACE,
+ "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 1ca605fe25ff..33b5a3f96fcb 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index d77bf0baa84f..da65d00a7811 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -386,7 +387,8 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
/* DEBUG: should be re-removed after LU-4536 is fixed */
- CDEBUG(D_DLMTRACE, "%s: Negative interval(%ld), too short period(%ld)\n",
+ CDEBUG(D_DLMTRACE,
+ "%s: Negative interval(%ld), too short period(%ld)\n",
pl->pl_name, (long)recalc_interval_sec,
(long)pl->pl_recalc_period);
@@ -415,7 +417,8 @@ static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
lprocfs_counter_add(pl->pl_stats,
LDLM_POOL_SHRINK_FREED_STAT,
cancel);
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
+ CDEBUG(D_DLMTRACE,
+ "%s: request to shrink %d locks, shrunk %d\n",
pl->pl_name, nr, cancel);
}
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index f3bf238d0748..02ea14c9b089 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -121,7 +122,8 @@ static int ldlm_expired_completion_wait(void *data)
if (!lock->l_conn_export) {
static unsigned long next_dump, last_dump;
- LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
+ LDLM_ERROR(lock,
+ "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() -
lock->l_last_activity));
@@ -139,7 +141,8 @@ static int ldlm_expired_completion_wait(void *data)
obd = lock->l_conn_export->exp_obd;
imp = obd->u.cli.cl_import;
ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
- LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
+ LDLM_ERROR(lock,
+ "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() - lock->l_last_activity),
obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
@@ -218,7 +221,8 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
return ldlm_completion_tail(lock, data);
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
+ LDLM_DEBUG(lock,
+ "client-side enqueue returned a blocked lock, going forward");
return 0;
}
EXPORT_SYMBOL(ldlm_completion_ast_async);
@@ -264,7 +268,8 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
+ LDLM_DEBUG(lock,
+ "client-side enqueue returned a blocked lock, sleeping");
noreproc:
@@ -414,7 +419,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = size;
goto cleanup;
} else if (unlikely(size > lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
+ LDLM_ERROR(lock,
+ "Replied LVB is larger than expectation, expected = %d, replied = %d",
lvb_len, size);
rc = -EINVAL;
goto cleanup;
@@ -473,8 +479,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
&lock->l_resource->lr_name)) {
- CDEBUG(D_INFO, "remote intent success, locking " DLDLMRES
- " instead of " DLDLMRES "\n",
+ CDEBUG(D_INFO,
+ "remote intent success, locking " DLDLMRES " instead of " DLDLMRES "\n",
PLDLMRES(&reply->lock_desc.l_resource),
PLDLMRES(lock->l_resource));
@@ -850,7 +856,8 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
unlock_res_and_lock(lock);
if (local_only) {
- CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
+ CDEBUG(D_DLMTRACE,
+ "not sending request (at caller's instruction)\n");
rc = LDLM_FL_LOCAL_ONLY;
}
ldlm_lock_cancel(lock);
@@ -963,7 +970,8 @@ static int ldlm_cli_cancel_req(struct obd_export *exp,
rc = ptlrpc_queue_wait(req);
if (rc == LUSTRE_ESTALE) {
- CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
+ CDEBUG(D_DLMTRACE,
+ "client/server (nid %s) out of sync -- not fatal\n",
libcfs_nid2str(req->rq_import->
imp_connection->c_peer.nid));
rc = 0;
@@ -1175,6 +1183,7 @@ ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
case LDLM_IBITS:
if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
break;
+ /* fall through */
default:
result = LDLM_POLICY_SKIP_LOCK;
lock_res_and_lock(lock);
@@ -1363,13 +1372,14 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
* flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
* cancel not more than \a count locks;
*
- * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at
- * the beginning of LRU list);
+ * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located
+ * at the beginning of LRU list);
*
- * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to
- * memory pressure policy function;
+ * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according
+ * to memory pressure policy function;
*
- * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy".
+ * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to
+ * "aged policy".
*
* flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
* (typically before replaying locks) w/o
@@ -1383,7 +1393,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
- int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
+ int no_wait = flags &
+ (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -2034,7 +2045,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
int canceled;
LIST_HEAD(cancels);
- CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
+ CDEBUG(D_DLMTRACE,
+ "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
ldlm_ns_name(ns), ns->ns_nr_unused);
/* We don't need to care whether or not LRU resize is enabled
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index c2ddf7312571..2689ffdf10e3 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1358,7 +1359,8 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
++granted > ldlm_dump_granted_max) {
- CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
+ CDEBUG(level,
+ "only dump %d granted locks to avoid DDOS.\n",
granted);
break;
}
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 3670fcaf373f..549369739d80 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -141,7 +142,8 @@ void ll_intent_drop_lock(struct lookup_intent *it)
handle.cookie = it->it_lock_handle;
- CDEBUG(D_DLMTRACE, "releasing lock with cookie %#llx from it %p\n",
+ CDEBUG(D_DLMTRACE,
+ "releasing lock with cookie %#llx from it %p\n",
handle.cookie, it);
ldlm_lock_decref(&handle, it->it_lock_mode);
@@ -152,7 +154,8 @@ void ll_intent_drop_lock(struct lookup_intent *it)
if (it->it_remote_lock_mode != 0) {
handle.cookie = it->it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "releasing remote lock with cookie%#llx from it %p\n",
+ CDEBUG(D_DLMTRACE,
+ "releasing remote lock with cookie%#llx from it %p\n",
handle.cookie, it);
ldlm_lock_decref(&handle,
it->it_remote_lock_mode);
@@ -185,7 +188,8 @@ void ll_invalidate_aliases(struct inode *inode)
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
+ CDEBUG(D_DENTRY,
+ "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
dentry, dentry, dentry->d_parent,
d_inode(dentry), dentry->d_flags);
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 1db3e7f345c5..5b2e47c246f3 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -303,7 +304,8 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
struct md_op_data *op_data;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p) pos/size %lu/%llu 32bit_api %d\n",
+ CDEBUG(D_VFSTRACE,
+ "VFS Op:inode=" DFID "(%p) pos/size %lu/%llu 32bit_api %d\n",
PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
i_size_read(inode), api32);
@@ -502,7 +504,8 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
break;
}
default: {
- CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ CDEBUG(D_IOCTL,
+ "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
lump->lmm_magic, LOV_USER_MAGIC_V1,
LOV_USER_MAGIC_V3);
return -EINVAL;
@@ -732,10 +735,10 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc != 0) {
- CDEBUG(D_HSM, "Could not read file data version of "
- DFID " (rc = %d). Archive request (%#llx) could not be done.\n",
- PFID(&copy->hc_hai.hai_fid), rc,
- copy->hc_hai.hai_cookie);
+ CDEBUG(D_HSM,
+ "Could not read file data version of " DFID " (rc = %d). Archive request (%#llx) could not be done.\n",
+ PFID(&copy->hc_hai.hai_fid), rc,
+ copy->hc_hai.hai_cookie);
hpk.hpk_flags |= HP_FLAG_RETRY;
/* hpk_errval must be >= 0 */
hpk.hpk_errval = -rc;
@@ -816,7 +819,8 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc) {
- CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
+ CDEBUG(D_HSM,
+ "Could not read file data version. Request could not be confirmed.\n");
if (hpk.hpk_errval == 0)
hpk.hpk_errval = -rc;
goto progress;
@@ -832,8 +836,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
*/
if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
(copy->hc_data_version != data_version)) {
- CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
- DFID ", start:%#llx current:%#llx\n",
+ CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. " DFID ", start:%#llx current:%#llx\n",
PFID(&copy->hc_hai.hai_fid),
copy->hc_data_version, data_version);
/* File was changed, send error to cdt. Do not ask for
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index be665454f407..2d6e64dea266 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -386,8 +387,8 @@ static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
ll_finish_md_op_data(op_data);
if (rc == -ESTALE) {
/* reason for keep own exit path - don`t flood log
- * with messages with -ESTALE errors.
- */
+ * with messages with -ESTALE errors.
+ */
if (!it_disposition(itp, DISP_OPEN_OPEN) ||
it_open_error(DISP_OPEN_OPEN, itp))
goto out;
@@ -605,7 +606,8 @@ restart:
* to get file with different fid.
*/
it->it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID;
- rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it);
+ rc = ll_intent_file_open(file->f_path.dentry,
+ NULL, 0, it);
if (rc)
goto out_openerr;
@@ -1119,7 +1121,8 @@ out:
cl_io_fini(env, io);
if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
- CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count:%zu, result: %zd\n",
+ CDEBUG(D_VFSTRACE,
+ "%s: restart %s from %lld, count:%zu, result: %zd\n",
file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write",
*ppos, count, result);
@@ -3455,7 +3458,8 @@ out:
if (rc == 0)
rc = -EAGAIN;
- CDEBUG(D_INODE, "%s: file=" DFID " waiting layout return: %d.\n",
+ CDEBUG(D_INODE,
+ "%s: file=" DFID " waiting layout return: %d.\n",
ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&lli->lli_fid), rc);
}
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 34c2cfecf4b8..c43ac574274c 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index d2392e4c6872..df5c0c0ae703 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index 422f410d95c1..a246b955306e 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 0287c751e1cd..b133fd00c08c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -84,7 +85,7 @@ struct ll_dentry_data {
struct ll_getname_data {
struct dir_context ctx;
- char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
+ char *lgd_name; /* points to buffer with NAME_MAX+1 size */
struct lu_fid lgd_fid; /* target fid we are looking for */
int lgd_found; /* inode matched? */
};
@@ -392,7 +393,8 @@ enum stats_track_type {
#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
- * suppress_pings */
+ * suppress_pings
+ */
#define LL_SBI_FLAGS { \
"nolck", \
@@ -637,7 +639,8 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#if BITS_PER_LONG == 32
return 1;
#elif defined(CONFIG_COMPAT)
- return unlikely(in_compat_syscall() || (sbi->ll_flags & LL_SBI_32BIT_API));
+ return unlikely(in_compat_syscall() ||
+ (sbi->ll_flags & LL_SBI_32BIT_API));
#else
return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
#endif
@@ -1065,7 +1068,7 @@ struct ll_statahead_info {
* hidden entries
*/
sai_agl_valid:1,/* AGL is valid for the dir */
- sai_in_readpage:1;/* statahead is in readdir() */
+ sai_in_readpage:1;/* statahead in readdir() */
wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
struct ptlrpc_thread sai_thread; /* stat-ahead thread */
struct ptlrpc_thread sai_agl_thread; /* AGL thread */
@@ -1198,7 +1201,7 @@ typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
* Return value:
* A magic pointer will be returned if success;
* otherwise, NULL will be returned.
- * */
+ */
void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
void ll_iocontrol_unregister(void *magic);
@@ -1261,7 +1264,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
handle.cookie = it->it_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "%p for lock %#llx\n",
+ CDEBUG(D_DLMTRACE,
+ "setting l_data to inode " DFID "%p for lock %#llx\n",
PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
@@ -1284,7 +1288,8 @@ static inline int d_lustre_invalid(const struct dentry *dentry)
*/
static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
{
- CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
+ CDEBUG(D_DENTRY,
+ "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
dentry, dentry,
dentry->d_parent, d_inode(dentry), d_count(dentry));
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 25393e3a0fe8..65ac5128f005 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -231,7 +232,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
data, NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ LCONSOLE_ERROR_MSG(0x14f,
+ "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
md);
goto out;
} else if (err) {
@@ -279,7 +281,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
obd_connect_flags2str(buf, PAGE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
- LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
+ LCONSOLE_ERROR_MSG(0x170,
+ "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
kfree(buf);
err = -EPROTO;
@@ -380,7 +383,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
+ CDEBUG(D_RPCTRACE,
+ "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
@@ -392,7 +396,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ LCONSOLE_ERROR_MSG(0x150,
+ "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
dt);
goto out_md;
} else if (err) {
@@ -915,7 +920,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
if (!lprof) {
- LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
+ LCONSOLE_ERROR_MSG(0x156,
+ "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
profilenm);
err = -EINVAL;
goto out_free;
@@ -1042,7 +1048,8 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
} else {
inode = lock->l_resource->lr_lvb_inode;
LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
- D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
+ D_WARNING, lock,
+ "lr_lvb_inode %p is bogus: magic %08x",
lock->l_resource->lr_lvb_inode,
lli->lli_inode_magic);
inode = NULL;
@@ -1744,7 +1751,8 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
}
if (body->mbo_valid & OBD_MD_FLMTIME) {
if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
+ CDEBUG(D_INODE,
+ "setting ino %lu mtime from %lu to %llu\n",
inode->i_ino, LTIME_S(inode->i_mtime),
body->mbo_mtime);
LTIME_S(inode->i_mtime) = body->mbo_mtime;
@@ -2254,7 +2262,8 @@ int ll_process_config(struct lustre_cfg *lcfg)
return -EINVAL;
sb = (void *)x;
/* This better be a real Lustre superblock! */
- LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
+ LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic ==
+ LMD_MAGIC);
/* Note we have not called client_common_fill_super yet, so
* proc fns must be able to handle that!
@@ -2571,8 +2580,9 @@ static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
*
* \param[in] file - File descriptor against which to perform the operation
* \param[in,out] arg - User-filled structure containing the linkno to operate
- * on and the available size. It is eventually filled with
- * the requested information or left untouched on error
+ * on and the available size. It is eventually filled
+ * with the requested information or left untouched on
+ * error
*
* \retval - 0 on success
* \retval - Appropriate negative error code on failure
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index ccc7ae15a943..c0533bd6f352 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -208,7 +209,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
*/
unlock_page(vmpage);
- CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
+ CDEBUG(D_MMAP,
+ "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
vmpage, vmpage->index);
*retry = true;
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index e50c637fab54..a6a1d80c711a 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -127,7 +128,8 @@ struct lustre_nfs_fid {
};
static struct dentry *
-ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *parent)
+ll_iget_for_nfs(struct super_block *sb,
+ struct lu_fid *fid, struct lu_fid *parent)
{
struct inode *inode;
struct dentry *result;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index e3bd2d18eac5..644bea2f9d37 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 5cc2b3255207..a2687f46a16d 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -204,7 +205,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
if (!fid_res_name_eq(ll_inode2fid(inode),
&lock->l_resource->lr_name)) {
- LDLM_ERROR(lock, "data mismatch with object " DFID "(%p)",
+ LDLM_ERROR(lock,
+ "data mismatch with object " DFID "(%p)",
PFID(ll_inode2fid(inode)), inode);
LBUG();
}
@@ -289,7 +291,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
* we have to invalidate the negative children
* on master inode
*/
- CDEBUG(D_INODE, "Invalidate s" DFID " m" DFID "\n",
+ CDEBUG(D_INODE,
+ "Invalidate s" DFID " m" DFID "\n",
PFID(ll_inode2fid(inode)),
PFID(&lli->lli_pfid));
@@ -736,7 +739,8 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
*opened |= FILE_CREATED;
}
- if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
+ if (d_really_is_positive(dentry) &&
+ it_disposition(it, DISP_OPEN_OPEN)) {
/* Open dentry. */
if (S_ISFIFO(d_inode(dentry)->i_mode)) {
/* We cannot call open here as it might
@@ -949,7 +953,9 @@ static int ll_mknod(struct inode *dir, struct dentry *dchild,
switch (mode & S_IFMT) {
case 0:
- mode |= S_IFREG; /* for mode = 0 case, fallthrough */
+ mode |= S_IFREG;
+ /* for mode = 0 case */
+ /* fall through */
case S_IFREG:
case S_IFCHR:
case S_IFBLK:
@@ -980,7 +986,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
{
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p), flags=%u, excl=%d\n",
+ CDEBUG(D_VFSTRACE,
+ "VFS Op:name=%pd, dir=" DFID "(%p), flags=%u, excl=%d\n",
dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
rc = ll_mknod(dir, dentry, mode, 0);
@@ -1101,7 +1108,8 @@ static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct md_op_data *op_data;
int err;
- CDEBUG(D_VFSTRACE, "VFS Op: inode=" DFID "(%p), dir=" DFID "(%p), target=%pd\n",
+ CDEBUG(D_VFSTRACE,
+ "VFS Op: inode=" DFID "(%p), dir=" DFID "(%p), target=%pd\n",
PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
new_dentry);
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.c b/drivers/staging/lustre/lustre/llite/range_lock.c
index a32598bacdfb..cc9565f6bfe2 100644
--- a/drivers/staging/lustre/lustre/llite/range_lock.c
+++ b/drivers/staging/lustre/lustre/llite/range_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.h b/drivers/staging/lustre/lustre/llite/range_lock.h
index 1e1519b1e006..38b2be4e378f 100644
--- a/drivers/staging/lustre/lustre/llite/range_lock.h
+++ b/drivers/staging/lustre/lustre/llite/range_lock.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index e72090572bcc..3e008ce7275d 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -297,7 +298,8 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
else
pg_count = start_left + st_pgs * (end - start - 1) + end_left;
- CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
+ CDEBUG(D_READA,
+ "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
st_off, st_len, st_pgs, off, length, pg_count);
return pg_count;
@@ -404,7 +406,8 @@ ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io,
* forward read-ahead, it will be fixed when backward
* read-ahead is implemented
*/
- LASSERTF(page_idx >= ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
+ LASSERTF(page_idx >= ria->ria_stoff,
+ "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
page_idx,
ria->ria_start, ria->ria_end, ria->ria_stoff,
ria->ria_length, ria->ria_pages);
@@ -669,8 +672,9 @@ static void ras_stride_increase_window(struct ll_readahead_state *ras,
unsigned long stride_len;
LASSERT(ras->ras_stride_length > 0);
- LASSERTF(ras->ras_window_start + ras->ras_window_len
- >= ras->ras_stride_offset, "window_start %lu, window_len %lu stride_offset %lu\n",
+ LASSERTF(ras->ras_window_start + ras->ras_window_len >=
+ ras->ras_stride_offset,
+ "window_start %lu, window_len %lu stride_offset %lu\n",
ras->ras_window_start,
ras->ras_window_len, ras->ras_stride_offset);
@@ -766,7 +770,8 @@ static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
PAGE_SHIFT;
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
- ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
+ ra->ra_max_read_ahead_whole_pages,
+ ra->ra_max_pages_per_file);
if (kms_pages &&
kms_pages <= ra->ra_max_read_ahead_whole_pages) {
@@ -884,7 +889,8 @@ static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
/* The initial ras_window_len is set to the request size. To avoid
* uselessly reading and discarding pages for random IO the window is
- * only increased once per consecutive request received. */
+ * only increased once per consecutive request received.
+ */
if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
!ras->ras_request_index)
ras_increase_window(inode, ras, ra);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 3619cd8bb5f3..722e5ea1af5f 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index ea9d59f07b78..90c7324575e4 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1082,7 +1083,8 @@ static int ll_statahead_thread(void *arg)
struct ll_inode_info *clli;
clli = list_entry(sai->sai_agls.next,
- struct ll_inode_info, lli_agl_list);
+ struct ll_inode_info,
+ lli_agl_list);
list_del_init(&clli->lli_agl_list);
spin_unlock(&lli->lli_agl_lock);
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 0da4af81b830..0bda111a096e 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -88,7 +89,8 @@ static int __init lustre_init(void)
struct timespec64 ts;
int i, rc, seed[2];
- BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) != LUSTRE_VOLATILE_HDR_LEN + 1);
+ BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) !=
+ LUSTRE_VOLATILE_HDR_LEN + 1);
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 3cd33483afaf..0690fdbf49f5 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index f9d9a161bd4e..8ccc8b799c02 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index adce0ff4ae44..02ea5161d635 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index c83853fa1bb4..bfae98e82d6f 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index e522f7c00617..4b6c7143bd2c 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 3953750b334e..05ad3b322a29 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 687c0c79d621..6eb0565ddc22 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 0be55623bac4..532384c91447 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index f9cf79761d51..4dc799d60a9f 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -365,7 +365,8 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
}
if (oit->it_status < 0) {
- CDEBUG(D_CACHE, "getxattr intent returned %d for fid " DFID "\n",
+ CDEBUG(D_CACHE,
+ "getxattr intent returned %d for fid " DFID "\n",
oit->it_status, PFID(ll_inode2fid(inode)));
rc = oit->it_status;
/* xattr data is so large that we don't want to cache it */
diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c
index 391fb25ac31d..93ec07531ac7 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_security.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_security.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index 5937468080b8..00dc858c10c9 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 22c247a7d8ca..1793c9f79b24 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index a0475231dd90..c27c3c32188d 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 6e16c930a021..c2c57f65431e 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index f16cfa435f77..30727b7acccc 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 89d92b05b48c..1185eceaf497 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index cea5f9dcd04e..c7db23472346 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 1124fd5ab32f..d563dd73343a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index a21f074008af..ae28ddf80d9b 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 9e3b150967b4..c5f5d1b106dc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index e12dc5afc14f..2fcdeb707ff9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 916336115989..3796bbb25305 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index fefd3c588681..7ce01026a409 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 334ecb1bc049..105b707eed14 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index 899d12c41aab..3e16e647b334 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 24fb2a97532b..e5b11c4085a9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index de43c609cf3d..cfae1294d77a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index d774ee2a3675..ecd9329cd073 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 9d3b3f3e9f10..3bdf48e4edb4 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index d4646a0949d2..7e89a2e485fc 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index d29f0bb33980..ea492be2eef3 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 011296ee16e6..13d452086b61 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index b2e68c3e820d..915520bcdd60 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 9bb7e9ea0a6a..721440feef72 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index f68513771527..6cce32491eb5 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index cbf011501005..e0300c34ca3a 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index ba13f0894e0d..46eefdc09e3a 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index cbfea3dd0319..3114907ac5ff 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index f45c91d1b4ae..488b98007558 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 6ef8ddec4ab6..03e55bca4ada 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1151,7 +1152,7 @@ static int mdc_read_page_remote(void *data, struct page *page0)
}
for (npages = 1; npages < max_pages; npages++) {
- page = page_cache_alloc_cold(inode->i_mapping);
+ page = page_cache_alloc(inode->i_mapping);
if (!page)
break;
page_pool[npages] = page;
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
index 2ec2d7f731d3..636770624e8f 100644
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
index 7a2f2b7bc6b1..2c571c180578 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3d2b969c90a7..77fa8fea0249 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index 7b403fbd5f94..a0db830ca841 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 2a70e21ae07f..6ec5218a18c1 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 20e64051d2d6..d415f8396038 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 95c7fa3b532c..fdd27ce46fda 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 3dc084cb93bc..7f65439f9b95 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 2df218b010e1..2985bca4dc4c 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 7964cad7e780..2156a82a613a 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 739bfb9421ca..b1d6ba4a3190 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
index 4f0a42633d5a..b9bf81607bbf 100644
--- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
+++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/linkea.c b/drivers/staging/lustre/lustre/obdclass/linkea.c
index 9af86d3d56e4..fe1638b0916e 100644
--- a/drivers/staging/lustre/lustre/obdclass/linkea.c
+++ b/drivers/staging/lustre/lustre/obdclass/linkea.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 6df911112731..fc59f29a4290 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index e92cccceefa1..e5e8687784ee 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 98021a2d7238..cd051e31233e 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index 8fa969101650..d9c63adff206 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
index 8de90bc638b4..4991d4e589dc 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 3c42de966077..28bbaa2136ac 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index d2d3114ce008..b431c3408fe4 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
index e4829880dc10..c83b7d7f8e72 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index e79485b4bf7f..05d71f568837 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1507,12 +1508,16 @@ int lprocfs_write_frac_u64_helper(const char __user *buffer,
switch (tolower(*end)) {
case 'p':
units <<= 10;
+ /* fall through */
case 't':
units <<= 10;
+ /* fall through */
case 'g':
units <<= 10;
+ /* fall through */
case 'm':
units <<= 10;
+ /* fall through */
case 'k':
units <<= 10;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 09c98184a291..b938a3f9d50a 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
index fa690b2bd643..54fc88206534 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_ref.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index e1273c997b5f..71329adc0318 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 2798d35ad318..e286a2665423 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 94a940faca5d..c0e192ae22a9 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 1256034b60c1..2a79a223b98a 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 7083f8786e9a..c4503bc36591 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
index 89abea26a1f8..355e888885f4 100644
--- a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
+++ b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
index 9b1872b99f2a..6cf7a03f048f 100644
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index f9808d1cc352..b9c1dc7e61b0 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
index 966414fd5424..42faa164fabb 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h
+++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index ae13eb055229..dc76c35ae801 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index e1207c227b79..5767ac2a7d16 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -227,6 +228,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
rc = 65;
goto out;
}
+ /* fall through */
default:
if (atomic_read(&ext->oe_users) > 0) {
rc = 70;
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 35bdbfb8660d..1449013722f6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index cf7b8879d7f0..2b5f324743e2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index a536908fb26a..feda61bcdb9b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index f7969e33f28a..76743faf3e6d 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index b4f1f74dead8..fe8ed0d0497a 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 945ae6e5a8b1..f82c87a77550 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index ed8a0dc18ee5..20094b6309f9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index a6118f8ba446..ce1731dc604f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 4c68c42b2281..53eda4c99142 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index b1d379a6a70f..2a9f2f2ebaa8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index cfdcbcec2779..dfdb4587d49d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/errno.c b/drivers/staging/lustre/lustre/ptlrpc/errno.c
index cb788364a553..54f0c36dc2bd 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/errno.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/errno.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 62951f19b2ce..811b7ab3a582 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 21f528957b73..5b0f65536c29 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 85854d9a376d..18769d335751 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 480c20a6a792..254488be7093 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
index bc5aa7bcdba8..bc4398b9bd1d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 1392ae9747bd..36eea50a77e7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 12149fb64719..047d712e850c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 2969d8da270e..4847f9a90cc9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
index df330e43bfe5..8251cbf2ad68 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index aad4ff191d95..a64e125df95f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -786,7 +787,7 @@ __u32 lustre_msg_get_flags(struct lustre_msg *msg)
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
}
- /* no break */
+ /* fall through */
default:
/* flags might be printed in debug code while message
* uninitialized
@@ -854,7 +855,7 @@ __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
}
- /* no break */
+ /* fall through */
default:
return 0;
}
@@ -1035,7 +1036,7 @@ int lustre_msg_get_status(struct lustre_msg *msg)
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
}
- /* no break */
+ /* fall through */
default:
/* status might be printed in debug code while message
* uninitialized
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 643388b03af7..2466868afb9c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index e4de50e18d08..fe6b47bfe8be 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index c38e166f1502..f9decbd1459d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index 38e488dd5409..131fc6d9646e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 0e476828cf75..8b865294d933 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 72a19a379e2f..e4d3f23e9f3a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index cd7a5391a574..617e004d00f8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -847,7 +848,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req)
if (req->rq_pool || !req->rq_reqbuf)
return;
- kfree(req->rq_reqbuf);
+ kvfree(req->rq_reqbuf);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 059294aad172..77a3721beaee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 0f4af66688a3..2389f9a8f534 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index d10a8053d04f..8d1e0edfcede 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
index 7792132eb145..fd609b63d2de 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index dc39a54c5e1a..80cea0b24693 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 6aa9b65b1926..44e34056515b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 155f6a45cc8b..23cdb7c4476c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 07b86a1b6550..2f64eb417e77 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index 8eb13c3ba29c..27f078749148 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -1,9 +1,10 @@
menuconfig INTEL_ATOMISP
- bool "Enable support to Intel MIPI camera drivers"
- depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI
- help
- Enable support for the Intel ISP2 camera interfaces and MIPI
- sensor drivers.
+ bool "Enable support to Intel MIPI camera drivers"
+ depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI
+ select COMMON_CLK
+ help
+ Enable support for the Intel ISP2 camera interfaces and MIPI
+ sensor drivers.
if INTEL_ATOMISP
source "drivers/staging/media/atomisp/pci/Kconfig"
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index 737452cbf8a0..255ce3630c2a 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -36,13 +36,23 @@
there are any specific things that can be done to fold in support for
multiple firmware versions.
+8. Switch to V4L2 async API to set up sensor, lens and flash devices.
+ Control those devices using V4L2 sub-device API without custom
+ extensions.
-Limitations:
+9. Switch to standard V4L2 sub-device API for sensor and lens. In
+ particular, the user space API needs to support V4L2 controls as
+ defined in the V4L2 spec and references to atomisp must be removed from
+ these drivers.
+
+10. Use LED flash API for flash LED drivers such as LM3554 (which already
+ has a LED class driver).
-1. Currently the patch only support some camera sensors
- gc2235/gc0310/0v2680/ov2722/ov5693/mt9m114...
+11. Switch from videobuf1 to videobuf2. Videobuf1 is being removed!
+
+Limitations:
-2. To test the patches, you also need the ISP firmware
+1. To test the patches, you also need the ISP firmware
for BYT:/lib/firmware/shisp_2400b0_v21.bin
for CHT:/lib/firmware/shisp_2401a0_v21.bin
@@ -51,14 +61,14 @@ Limitations:
device but can also be extracted from the upgrade kit if you've managed
to lose them somehow.
-3. Without a 3A libary the capture behaviour is not very good. To take a good
+2. Without a 3A libary the capture behaviour is not very good. To take a good
picture, you need tune ISP parameters by IOCTL functions or use a 3A libary
such as libxcam.
-4. The driver is intended to drive the PCI exposed versions of the device.
+3. The driver is intended to drive the PCI exposed versions of the device.
It will not detect those devices enumerated via ACPI as a field of the
i915 GPU driver.
-5. The driver supports only v2 of the IPU/Camera. It will not work with the
+4. The driver supports only v2 of the IPU/Camera. It will not work with the
versions of the hardware in other SoCs.
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index b80d29d53e65..db054d3c7ed6 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -3,104 +3,96 @@
#
source "drivers/staging/media/atomisp/i2c/ov5693/Kconfig"
-source "drivers/staging/media/atomisp/i2c/imx/Kconfig"
-config VIDEO_OV2722
+config VIDEO_ATOMISP_OV2722
tristate "OVT ov2722 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the OVT
- OV2722 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ OV2722 raw camera.
- OVT is a 2M raw sensor.
+ OVT is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_GC2235
+config VIDEO_ATOMISP_GC2235
tristate "Galaxy gc2235 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the OVT
- GC2235 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ GC2235 raw camera.
- GC2235 is a 2M raw sensor.
+ GC2235 is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_OV8858
+config VIDEO_ATOMISP_OV8858
tristate "Omnivision ov8858 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2 && VIDEO_ATOMISP
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
- ov8858 RAW sensor.
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ ov8858 RAW sensor.
OV8858 is a 8M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_MSRLIST_HELPER
+config VIDEO_ATOMISP_MSRLIST_HELPER
tristate "Helper library to load, parse and apply large register lists."
depends on I2C
---help---
- This is a helper library to be used from a sensor driver to load, parse
- and apply large register lists.
+ This is a helper library to be used from a sensor driver to load, parse
+ and apply large register lists.
- To compile this driver as a module, choose M here: the
- module will be called libmsrlisthelper.
+ To compile this driver as a module, choose M here: the
+ module will be called libmsrlisthelper.
-config VIDEO_MT9M114
+config VIDEO_ATOMISP_MT9M114
tristate "Aptina mt9m114 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
- mt9m114 1.3 Mpixel camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt9m114 1.3 Mpixel camera.
- mt9m114 is video camera sensor.
+ mt9m114 is video camera sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_AP1302
- tristate "AP1302 external ISP support"
- depends on I2C && VIDEO_V4L2
- select REGMAP_I2C
- ---help---
- This is a Video4Linux2 sensor-level driver for the external
- ISP AP1302.
-
- AP1302 is an exteral ISP.
-
- It currently only works with the atomisp driver.
-
-config VIDEO_GC0310
+config VIDEO_ATOMISP_GC0310
tristate "GC0310 sensor support"
- depends on I2C && VIDEO_V4L2
- ---help---
- This is a Video4Linux2 sensor-level driver for the Galaxycore
- GC0310 0.3MP sensor.
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
-config VIDEO_OV2680
+config VIDEO_ATOMISP_OV2680
tristate "Omnivision OV2680 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
- OV2680 raw camera.
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ OV2680 raw camera.
- ov2680 is a 2M raw sensor.
+ ov2680 is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
#
# Kconfig for flash drivers
#
-config VIDEO_LM3554
+config VIDEO_ATOMISP_LM3554
tristate "LM3554 flash light driver"
+ depends on ACPI
depends on VIDEO_V4L2 && I2C
---help---
- This is a Video4Linux2 sub-dev driver for the LM3554
- flash light driver.
-
- To compile this driver as a module, choose M here: the
- module will be called lm3554
-
+ This is a Video4Linux2 sub-dev driver for the LM3554
+ flash light driver.
+ To compile this driver as a module, choose M here: the
+ module will be called lm3554
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 041a041718d2..99ea35c043fd 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -3,22 +3,19 @@
# Makefile for sensor drivers
#
-obj-$(CONFIG_VIDEO_IMX) += imx/
-obj-$(CONFIG_VIDEO_OV5693) += ov5693/
-obj-$(CONFIG_VIDEO_MT9M114) += mt9m114.o
-obj-$(CONFIG_VIDEO_GC2235) += gc2235.o
-obj-$(CONFIG_VIDEO_OV2722) += ov2722.o
-obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
-obj-$(CONFIG_VIDEO_GC0310) += gc0310.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += ov5693/
+obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2680) += atomisp-ov2680.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o
-obj-$(CONFIG_VIDEO_MSRLIST_HELPER) += libmsrlisthelper.o
-
-obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
+obj-$(CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER) += atomisp-libmsrlisthelper.o
# Makefile for flash drivers
#
-obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
+obj-$(CONFIG_VIDEO_ATOMISP_LM3554) += atomisp-lm3554.o
# HACK! While this driver is in bad shape, don't enable several warnings
# that would be otherwise enabled with W=1
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.c b/drivers/staging/media/atomisp/i2c/ap1302.c
deleted file mode 100644
index 2f772a020c8b..000000000000
--- a/drivers/staging/media/atomisp/i2c/ap1302.c
+++ /dev/null
@@ -1,1255 +0,0 @@
-/*
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "../include/linux/atomisp.h"
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include "ap1302.h"
-
-#define to_ap1302_device(sub_dev) \
- container_of(sub_dev, struct ap1302_device, sd)
-
-/* Static definitions */
-static struct regmap_config ap1302_reg16_config = {
- .reg_bits = 16,
- .val_bits = 16,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .val_format_endian = REGMAP_ENDIAN_BIG,
-};
-
-static struct regmap_config ap1302_reg32_config = {
- .reg_bits = 16,
- .val_bits = 32,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .val_format_endian = REGMAP_ENDIAN_BIG,
-};
-
-static enum ap1302_contexts ap1302_cntx_mapping[] = {
- CONTEXT_PREVIEW, /* Invalid atomisp run mode */
- CONTEXT_VIDEO, /* ATOMISP_RUN_MODE_VIDEO */
- CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_STILL_CAPTURE */
- CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE */
- CONTEXT_PREVIEW, /* ATOMISP_RUN_MODE_PREVIEW */
-};
-
-static struct ap1302_res_struct ap1302_preview_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static struct ap1302_res_struct ap1302_snapshot_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static struct ap1302_res_struct ap1302_video_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static enum ap1302_contexts stream_to_context[] = {
- CONTEXT_SNAPSHOT,
- CONTEXT_PREVIEW,
- CONTEXT_PREVIEW,
- CONTEXT_VIDEO
-};
-
-static u16 aux_stream_config[CONTEXT_NUM][CONTEXT_NUM] = {
- {0, 0, 0}, /* Preview: No aux streams. */
- {1, 0, 2}, /* Snapshot: 1 for postview. 2 for video */
- {1, 0, 0}, /* Video: 1 for preview. */
-};
-
-static struct ap1302_context_info context_info[] = {
- {CNTX_WIDTH, AP1302_REG16, "width"},
- {CNTX_HEIGHT, AP1302_REG16, "height"},
- {CNTX_ROI_X0, AP1302_REG16, "roi_x0"},
- {CNTX_ROI_X1, AP1302_REG16, "roi_x1"},
- {CNTX_ROI_Y0, AP1302_REG16, "roi_y0"},
- {CNTX_ROI_Y1, AP1302_REG16, "roi_y1"},
- {CNTX_ASPECT, AP1302_REG16, "aspect"},
- {CNTX_LOCK, AP1302_REG16, "lock"},
- {CNTX_ENABLE, AP1302_REG16, "enable"},
- {CNTX_OUT_FMT, AP1302_REG16, "out_fmt"},
- {CNTX_SENSOR_MODE, AP1302_REG16, "sensor_mode"},
- {CNTX_MIPI_CTRL, AP1302_REG16, "mipi_ctrl"},
- {CNTX_MIPI_II_CTRL, AP1302_REG16, "mipi_ii_ctrl"},
- {CNTX_LINE_TIME, AP1302_REG32, "line_time"},
- {CNTX_MAX_FPS, AP1302_REG16, "max_fps"},
- {CNTX_AE_USG, AP1302_REG16, "ae_usg"},
- {CNTX_AE_UPPER_ET, AP1302_REG32, "ae_upper_et"},
- {CNTX_AE_MAX_ET, AP1302_REG32, "ae_max_et"},
- {CNTX_SS, AP1302_REG16, "ss"},
- {CNTX_S1_SENSOR_MODE, AP1302_REG16, "s1_sensor_mode"},
- {CNTX_HINF_CTRL, AP1302_REG16, "hinf_ctrl"},
-};
-
-/* This array stores the description list for metadata.
- The metadata contains exposure settings and face
- detection results. */
-static u16 ap1302_ss_list[] = {
- 0xb01c, /* From 0x0186 with size 0x1C are exposure settings. */
- 0x0186,
- 0xb002, /* 0x71c0 is for F-number */
- 0x71c0,
- 0xb010, /* From 0x03dc with size 0x10 are face general infos. */
- 0x03dc,
- 0xb0a0, /* From 0x03e4 with size 0xa0 are face detail infos. */
- 0x03e4,
- 0xb020, /* From 0x0604 with size 0x20 are smile rate infos. */
- 0x0604,
- 0x0000
-};
-
-/* End of static definitions */
-
-static int ap1302_i2c_read_reg(struct v4l2_subdev *sd,
- u16 reg, u16 len, void *val)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (len == AP1302_REG16)
- ret = regmap_read(dev->regmap16, reg, val);
- else if (len == AP1302_REG32)
- ret = regmap_read(dev->regmap32, reg, val);
- else
- ret = -EINVAL;
- if (ret) {
- dev_dbg(&client->dev, "Read reg failed. reg=0x%04X\n", reg);
- return ret;
- }
- if (len == AP1302_REG16)
- dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%04X\n",
- reg, *(u16 *)val);
- else
- dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%08X\n",
- reg, *(u32 *)val);
- return ret;
-}
-
-static int ap1302_i2c_write_reg(struct v4l2_subdev *sd,
- u16 reg, u16 len, u32 val)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- if (len == AP1302_REG16)
- ret = regmap_write(dev->regmap16, reg, val);
- else if (len == AP1302_REG32)
- ret = regmap_write(dev->regmap32, reg, val);
- else
- ret = -EINVAL;
- if (ret) {
- dev_dbg(&client->dev, "Write reg failed. reg=0x%04X\n", reg);
- return ret;
- }
- if (len == AP1302_REG16)
- dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%04X\n",
- reg, (u16)val);
- else
- dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%08X\n",
- reg, (u32)val);
- return ret;
-}
-
-static u16
-ap1302_calculate_context_reg_addr(enum ap1302_contexts context, u16 offset)
-{
- u16 reg_addr;
- /* The register offset is defined according to preview/video registers.
- Preview and video context have the same register definition.
- But snapshot context does not have register S1_SENSOR_MODE.
- When setting snapshot registers, if the offset exceeds
- S1_SENSOR_MODE, the actual offset needs to minus 2. */
- if (context == CONTEXT_SNAPSHOT) {
- if (offset == CNTX_S1_SENSOR_MODE)
- return 0;
- if (offset > CNTX_S1_SENSOR_MODE)
- offset -= 2;
- }
- if (context == CONTEXT_PREVIEW)
- reg_addr = REG_PREVIEW_BASE + offset;
- else if (context == CONTEXT_VIDEO)
- reg_addr = REG_VIDEO_BASE + offset;
- else
- reg_addr = REG_SNAPSHOT_BASE + offset;
- return reg_addr;
-}
-
-static int ap1302_read_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context, u16 offset, u16 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
- if (reg_addr == 0)
- return -EINVAL;
- return ap1302_i2c_read_reg(sd, reg_addr, len,
- ((u8 *)&dev->cntx_config[context]) + offset);
-}
-
-static int ap1302_write_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context, u16 offset, u16 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
- if (reg_addr == 0)
- return -EINVAL;
- return ap1302_i2c_write_reg(sd, reg_addr, len,
- *(u32 *)(((u8 *)&dev->cntx_config[context]) + offset));
-}
-
-static int ap1302_dump_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- int i;
- dev_dbg(&client->dev, "Dump registers for context[%d]:\n", context);
- for (i = 0; i < ARRAY_SIZE(context_info); i++) {
- struct ap1302_context_info *info = &context_info[i];
- u8 *var = (u8 *)&dev->cntx_config[context] + info->offset;
- /* Snapshot context does not have s1_sensor_mode register. */
- if (context == CONTEXT_SNAPSHOT &&
- info->offset == CNTX_S1_SENSOR_MODE)
- continue;
- ap1302_read_context_reg(sd, context, info->offset, info->len);
- if (info->len == AP1302_REG16)
- dev_dbg(&client->dev, "context.%s = 0x%04X (%d)\n",
- info->name, *(u16 *)var, *(u16 *)var);
- else
- dev_dbg(&client->dev, "context.%s = 0x%08X (%d)\n",
- info->name, *(u32 *)var, *(u32 *)var);
- }
- return 0;
-}
-
-static int ap1302_request_firmware(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- ret = request_firmware(&dev->fw, "ap1302_fw.bin", &client->dev);
- if (ret)
- dev_err(&client->dev,
- "ap1302_request_firmware failed. ret=%d\n", ret);
- return ret;
-}
-
-/* When loading firmware, host writes firmware data from address 0x8000.
- When the address reaches 0x9FFF, the next address should return to 0x8000.
- This function handles this address window and load firmware data to AP1302.
- win_pos indicates the offset within this window. Firmware loading procedure
- may call this function several times. win_pos records the current position
- that has been written to.*/
-static int ap1302_write_fw_window(struct v4l2_subdev *sd,
- u16 *win_pos, const u8 *buf, u32 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- u32 pos;
- u32 sub_len;
- for (pos = 0; pos < len; pos += sub_len) {
- if (len - pos < AP1302_FW_WINDOW_SIZE - *win_pos)
- sub_len = len - pos;
- else
- sub_len = AP1302_FW_WINDOW_SIZE - *win_pos;
- ret = regmap_raw_write(dev->regmap16,
- *win_pos + AP1302_FW_WINDOW_OFFSET,
- buf + pos, sub_len);
- if (ret)
- return ret;
- *win_pos += sub_len;
- if (*win_pos >= AP1302_FW_WINDOW_SIZE)
- *win_pos = 0;
- }
- return 0;
-}
-
-static int ap1302_load_firmware(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- const struct ap1302_firmware *fw;
- const u8 *fw_data;
- u16 reg_val = 0;
- u16 win_pos = 0;
- int ret;
-
- dev_info(&client->dev, "Start to load firmware.\n");
- if (!dev->fw) {
- dev_err(&client->dev, "firmware not requested.\n");
- return -EINVAL;
- }
- fw = (const struct ap1302_firmware *) dev->fw->data;
- if (dev->fw->size != (sizeof(*fw) + fw->total_size)) {
- dev_err(&client->dev, "firmware size does not match.\n");
- return -EINVAL;
- }
- /* The fw binary contains a header of struct ap1302_firmware.
- Following the header is the bootdata of AP1302.
- The bootdata pointer can be referenced as &fw[1]. */
- fw_data = (u8 *)&fw[1];
-
- /* Clear crc register. */
- ret = ap1302_i2c_write_reg(sd, REG_SIP_CRC, AP1302_REG16, 0xFFFF);
- if (ret)
- return ret;
-
- /* Load FW data for PLL init stage. */
- ret = ap1302_write_fw_window(sd, &win_pos, fw_data, fw->pll_init_size);
- if (ret)
- return ret;
-
- /* Write 2 to bootdata_stage register to apply basic_init_hp
- settings and enable PLL. */
- ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
- AP1302_REG16, 0x0002);
- if (ret)
- return ret;
-
- /* Wait 1ms for PLL to lock. */
- msleep(20);
-
- /* Load the rest of bootdata content. */
- ret = ap1302_write_fw_window(sd, &win_pos, fw_data + fw->pll_init_size,
- fw->total_size - fw->pll_init_size);
- if (ret)
- return ret;
-
- /* Check crc. */
- ret = ap1302_i2c_read_reg(sd, REG_SIP_CRC, AP1302_REG16, &reg_val);
- if (ret)
- return ret;
- if (reg_val != fw->crc) {
- dev_err(&client->dev,
- "crc does not match. T:0x%04X F:0x%04X\n",
- fw->crc, reg_val);
- return -EAGAIN;
- }
-
- /* Write 0xFFFF to bootdata_stage register to indicate AP1302 that
- the whole bootdata content has been loaded. */
- ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
- AP1302_REG16, 0xFFFF);
- if (ret)
- return ret;
- dev_info(&client->dev, "Load firmware successfully.\n");
-
- return 0;
-}
-
-static int __ap1302_s_power(struct v4l2_subdev *sd, int on, int load_fw)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret, i;
- u16 ss_ptr;
-
- dev_info(&client->dev, "ap1302_s_power is called.\n");
- ret = dev->platform_data->power_ctrl(sd, on);
- if (ret) {
- dev_err(&client->dev,
- "ap1302_s_power error. on=%d ret=%d\n", on, ret);
- return ret;
- }
- dev->power_on = on;
- if (!on || !load_fw)
- return 0;
- /* Load firmware after power on. */
- ret = ap1302_load_firmware(sd);
- if (ret) {
- dev_err(&client->dev,
- "ap1302_load_firmware failed. ret=%d\n", ret);
- return ret;
- }
- ret = ap1302_i2c_read_reg(sd, REG_SS_HEAD_PT0, AP1302_REG16, &ss_ptr);
- if (ret)
- return ret;
- for (i = 0; i < ARRAY_SIZE(ap1302_ss_list); i++) {
- ret = ap1302_i2c_write_reg(sd, ss_ptr + i * 2,
- AP1302_REG16, ap1302_ss_list[i]);
- if (ret)
- return ret;
- }
- return ret;
-}
-
-static int ap1302_s_power(struct v4l2_subdev *sd, int on)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
- ret = __ap1302_s_power(sd, on, 1);
- dev->sys_activated = 0;
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static int ap1302_s_config(struct v4l2_subdev *sd, void *pdata)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct camera_mipi_info *mipi_info;
- u16 reg_val = 0;
- int ret;
-
- dev_info(&client->dev, "ap1302_s_config is called.\n");
- if (pdata == NULL)
- return -ENODEV;
-
- dev->platform_data = pdata;
-
- mutex_lock(&dev->input_lock);
-
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret)
- goto fail_power;
- }
-
- ret = __ap1302_s_power(sd, 1, 0);
- if (ret)
- goto fail_power;
-
- /* Detect for AP1302 */
- ret = ap1302_i2c_read_reg(sd, REG_CHIP_VERSION, AP1302_REG16, &reg_val);
- if (ret || (reg_val != AP1302_CHIP_ID)) {
- dev_err(&client->dev,
- "Chip version does no match. ret=%d ver=0x%04x\n",
- ret, reg_val);
- goto fail_config;
- }
- dev_info(&client->dev, "AP1302 Chip ID is 0x%X\n", reg_val);
-
- /* Detect revision for AP1302 */
- ret = ap1302_i2c_read_reg(sd, REG_CHIP_REV, AP1302_REG16, &reg_val);
- if (ret)
- goto fail_config;
- dev_info(&client->dev, "AP1302 Chip Rev is 0x%X\n", reg_val);
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_config;
-
- mipi_info = v4l2_get_subdev_hostdata(sd);
- if (!mipi_info)
- goto fail_config;
- dev->num_lanes = mipi_info->num_lanes;
-
- ret = __ap1302_s_power(sd, 0, 0);
- if (ret)
- goto fail_power;
-
- mutex_unlock(&dev->input_lock);
-
- return ret;
-
-fail_config:
- __ap1302_s_power(sd, 0, 0);
-fail_power:
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "ap1302_s_config failed\n");
- return ret;
-}
-
-static enum ap1302_contexts ap1302_get_context(struct v4l2_subdev *sd)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- return dev->cur_context;
-}
-
-static int ap1302_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->index)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_UYVY8_1X16;
-
- return 0;
-}
-
-static int ap1302_match_resolution(struct ap1302_context_res *res,
- struct v4l2_mbus_framefmt *fmt)
-{
- s32 w0, h0, mismatch, distance;
- s32 w1 = fmt->width;
- s32 h1 = fmt->height;
- s32 min_distance = INT_MAX;
- s32 i, idx = -1;
-
- if (w1 == 0 || h1 == 0)
- return -1;
-
- for (i = 0; i < res->res_num; i++) {
- w0 = res->res_table[i].width;
- h0 = res->res_table[i].height;
- if (w0 < w1 || h0 < h1)
- continue;
- mismatch = abs(w0 * h1 - w1 * h0) * 8192 / w1 / h0;
- if (mismatch > 8192 * AP1302_MAX_RATIO_MISMATCH / 100)
- continue;
- distance = (w0 * h1 + w1 * h0) * 8192 / w1 / h1;
- if (distance < min_distance) {
- min_distance = distance;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static s32 ap1302_try_mbus_fmt_locked(struct v4l2_subdev *sd,
- enum ap1302_contexts context,
- struct v4l2_mbus_framefmt *fmt)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct ap1302_res_struct *res_table;
- s32 res_num, idx = -1;
-
- res_table = dev->cntx_res[context].res_table;
- res_num = dev->cntx_res[context].res_num;
-
- if ((fmt->width <= res_table[res_num - 1].width) &&
- (fmt->height <= res_table[res_num - 1].height))
- idx = ap1302_match_resolution(&dev->cntx_res[context], fmt);
- if (idx == -1)
- idx = res_num - 1;
-
- fmt->width = res_table[idx].width;
- fmt->height = res_table[idx].height;
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
- return idx;
-}
-
-
-static int ap1302_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- s32 cur_res;
- if (format->pad)
- return -EINVAL;
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- res_table = dev->cntx_res[context].res_table;
- cur_res = dev->cntx_res[context].cur_res;
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
- fmt->width = res_table[cur_res].width;
- fmt->height = res_table[cur_res].height;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int ap1302_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct atomisp_input_stream_info *stream_info =
- (struct atomisp_input_stream_info *)fmt->reserved;
- enum ap1302_contexts context, main_context;
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
- mutex_lock(&dev->input_lock);
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- context = ap1302_get_context(sd);
- ap1302_try_mbus_fmt_locked(sd, context, fmt);
- cfg->try_fmt = *fmt;
- mutex_unlock(&dev->input_lock);
- return 0;
- }
- context = stream_to_context[stream_info->stream];
- dev_dbg(&client->dev, "ap1302_set_mbus_fmt. stream=%d context=%d\n",
- stream_info->stream, context);
- dev->cntx_res[context].cur_res =
- ap1302_try_mbus_fmt_locked(sd, context, fmt);
- dev->cntx_config[context].width = fmt->width;
- dev->cntx_config[context].height = fmt->height;
- ap1302_write_context_reg(sd, context, CNTX_WIDTH, AP1302_REG16);
- ap1302_write_context_reg(sd, context, CNTX_HEIGHT, AP1302_REG16);
- ap1302_read_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
- dev->cntx_config[context].out_fmt &= ~OUT_FMT_TYPE_MASK;
- dev->cntx_config[context].out_fmt |= AP1302_FMT_UYVY422;
- ap1302_write_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
-
- main_context = ap1302_get_context(sd);
- if (context == main_context) {
- ap1302_read_context_reg(sd, context,
- CNTX_MIPI_CTRL, AP1302_REG16);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (context << MIPI_CTRL_IMGVC_OFFSET);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSVC_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (context << MIPI_CTRL_SSVC_OFFSET);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSTYPE_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (0x12 << MIPI_CTRL_SSTYPE_OFFSET);
- ap1302_write_context_reg(sd, context,
- CNTX_MIPI_CTRL, AP1302_REG16);
- ap1302_read_context_reg(sd, context,
- CNTX_SS, AP1302_REG16);
- dev->cntx_config[context].ss = AP1302_SS_CTRL;
- ap1302_write_context_reg(sd, context,
- CNTX_SS, AP1302_REG16);
- } else {
- /* Configure aux stream */
- ap1302_read_context_reg(sd, context,
- CNTX_MIPI_II_CTRL, AP1302_REG16);
- dev->cntx_config[context].mipi_ii_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
- dev->cntx_config[context].mipi_ii_ctrl |=
- (context << MIPI_CTRL_IMGVC_OFFSET);
- ap1302_write_context_reg(sd, context,
- CNTX_MIPI_II_CTRL, AP1302_REG16);
- if (stream_info->enable) {
- ap1302_read_context_reg(sd, main_context,
- CNTX_OUT_FMT, AP1302_REG16);
- dev->cntx_config[context].out_fmt |=
- (aux_stream_config[main_context][context]
- << OUT_FMT_IIS_OFFSET);
- ap1302_write_context_reg(sd, main_context,
- CNTX_OUT_FMT, AP1302_REG16);
- }
- }
- stream_info->ch_id = context;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-
-static int ap1302_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- u32 cur_res;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- res_table = dev->cntx_res[context].res_table;
- cur_res = dev->cntx_res[context].cur_res;
- interval->interval.denominator = res_table[cur_res].fps;
- interval->interval.numerator = 1;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int ap1302_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- int index = fse->index;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- if (index >= dev->cntx_res[context].res_num) {
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
- res_table = dev->cntx_res[context].res_table;
- fse->min_width = res_table[index].width;
- fse->min_height = res_table[index].height;
- fse->max_width = res_table[index].width;
- fse->max_height = res_table[index].height;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-
-static int ap1302_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- *frames = 0;
- return 0;
-}
-
-static int ap1302_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- enum ap1302_contexts context;
- u32 reg_val;
- int ret;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- dev_dbg(&client->dev, "ap1302_s_stream. context=%d enable=%d\n",
- context, enable);
- /* Switch context */
- ap1302_i2c_read_reg(sd, REG_CTRL,
- AP1302_REG16, &reg_val);
- reg_val &= ~CTRL_CNTX_MASK;
- reg_val |= (context<<CTRL_CNTX_OFFSET);
- ap1302_i2c_write_reg(sd, REG_CTRL,
- AP1302_REG16, reg_val);
- /* Select sensor */
- ap1302_i2c_read_reg(sd, REG_SENSOR_SELECT,
- AP1302_REG16, &reg_val);
- reg_val &= ~SENSOR_SELECT_MASK;
- reg_val |= (AP1302_SENSOR_PRI<<SENSOR_SELECT_OFFSET);
- ap1302_i2c_write_reg(sd, REG_SENSOR_SELECT,
- AP1302_REG16, reg_val);
- if (enable) {
- dev_info(&client->dev, "Start stream. context=%d\n", context);
- ap1302_dump_context_reg(sd, context);
- if (!dev->sys_activated) {
- reg_val = AP1302_SYS_ACTIVATE;
- dev->sys_activated = 1;
- } else {
- reg_val = AP1302_SYS_SWITCH;
- }
- } else {
- dev_info(&client->dev, "Stop stream. context=%d\n", context);
- reg_val = AP1302_SYS_SWITCH;
- }
- ret = ap1302_i2c_write_reg(sd, REG_SYS_START, AP1302_REG16, reg_val);
- if (ret)
- dev_err(&client->dev,
- "AP1302 set stream failed. enable=%d\n", enable);
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static u16 ap1302_ev_values[] = {0xfd00, 0xfe80, 0x0, 0x180, 0x300};
-
-static int ap1302_set_exposure_off(struct v4l2_subdev *sd, s32 val)
-{
- val -= AP1302_MIN_EV;
- return ap1302_i2c_write_reg(sd, REG_AE_BV_OFF, AP1302_REG16,
- ap1302_ev_values[val]);
-}
-
-static u16 ap1302_wb_values[] = {
- 0, /* V4L2_WHITE_BALANCE_MANUAL */
- 0xf, /* V4L2_WHITE_BALANCE_AUTO */
- 0x2, /* V4L2_WHITE_BALANCE_INCANDESCENT */
- 0x4, /* V4L2_WHITE_BALANCE_FLUORESCENT */
- 0x5, /* V4L2_WHITE_BALANCE_FLUORESCENT_H */
- 0x1, /* V4L2_WHITE_BALANCE_HORIZON */
- 0x5, /* V4L2_WHITE_BALANCE_DAYLIGHT */
- 0xf, /* V4L2_WHITE_BALANCE_FLASH */
- 0x6, /* V4L2_WHITE_BALANCE_CLOUDY */
- 0x6, /* V4L2_WHITE_BALANCE_SHADE */
-};
-
-static int ap1302_set_wb_mode(struct v4l2_subdev *sd, s32 val)
-{
- int ret = 0;
- u16 reg_val;
-
- ret = ap1302_i2c_read_reg(sd, REG_AWB_CTRL, AP1302_REG16, &reg_val);
- if (ret)
- return ret;
- reg_val &= ~AWB_CTRL_MODE_MASK;
- reg_val |= ap1302_wb_values[val] << AWB_CTRL_MODE_OFFSET;
- if (val == V4L2_WHITE_BALANCE_FLASH)
- reg_val |= AWB_CTRL_FLASH_MASK;
- else
- reg_val &= ~AWB_CTRL_FLASH_MASK;
- ret = ap1302_i2c_write_reg(sd, REG_AWB_CTRL, AP1302_REG16, reg_val);
- return ret;
-}
-
-static int ap1302_set_zoom(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_DZ_TGT_FCT, AP1302_REG16,
- val * 4 + 0x100);
- return 0;
-}
-
-static u16 ap1302_sfx_values[] = {
- 0x00, /* V4L2_COLORFX_NONE */
- 0x03, /* V4L2_COLORFX_BW */
- 0x0d, /* V4L2_COLORFX_SEPIA */
- 0x07, /* V4L2_COLORFX_NEGATIVE */
- 0x04, /* V4L2_COLORFX_EMBOSS */
- 0x0f, /* V4L2_COLORFX_SKETCH */
- 0x08, /* V4L2_COLORFX_SKY_BLUE */
- 0x09, /* V4L2_COLORFX_GRASS_GREEN */
- 0x0a, /* V4L2_COLORFX_SKIN_WHITEN */
- 0x00, /* V4L2_COLORFX_VIVID */
- 0x00, /* V4L2_COLORFX_AQUA */
- 0x00, /* V4L2_COLORFX_ART_FREEZE */
- 0x00, /* V4L2_COLORFX_SILHOUETTE */
- 0x10, /* V4L2_COLORFX_SOLARIZATION */
- 0x02, /* V4L2_COLORFX_ANTIQUE */
- 0x00, /* V4L2_COLORFX_SET_CBCR */
-};
-
-static int ap1302_set_special_effect(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_SFX_MODE, AP1302_REG16,
- ap1302_sfx_values[val]);
- return 0;
-}
-
-static u16 ap1302_scene_mode_values[] = {
- 0x00, /* V4L2_SCENE_MODE_NONE */
- 0x07, /* V4L2_SCENE_MODE_BACKLIGHT */
- 0x0a, /* V4L2_SCENE_MODE_BEACH_SNOW */
- 0x06, /* V4L2_SCENE_MODE_CANDLE_LIGHT */
- 0x00, /* V4L2_SCENE_MODE_DAWN_DUSK */
- 0x00, /* V4L2_SCENE_MODE_FALL_COLORS */
- 0x0d, /* V4L2_SCENE_MODE_FIREWORKS */
- 0x02, /* V4L2_SCENE_MODE_LANDSCAPE */
- 0x05, /* V4L2_SCENE_MODE_NIGHT */
- 0x0c, /* V4L2_SCENE_MODE_PARTY_INDOOR */
- 0x01, /* V4L2_SCENE_MODE_PORTRAIT */
- 0x03, /* V4L2_SCENE_MODE_SPORTS */
- 0x0e, /* V4L2_SCENE_MODE_SUNSET */
- 0x0b, /* V4L2_SCENE_MODE_TEXT */
-};
-
-static int ap1302_set_scene_mode(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_SCENE_CTRL, AP1302_REG16,
- ap1302_scene_mode_values[val]);
- return 0;
-}
-
-static u16 ap1302_flicker_values[] = {
- 0x0, /* OFF */
- 0x3201, /* 50HZ */
- 0x3c01, /* 60HZ */
- 0x2 /* AUTO */
-};
-
-static int ap1302_set_flicker_freq(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_FLICK_CTRL, AP1302_REG16,
- ap1302_flicker_values[val]);
- return 0;
-}
-
-static int ap1302_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ap1302_device *dev = container_of(
- ctrl->handler, struct ap1302_device, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_RUN_MODE:
- dev->cur_context = ap1302_cntx_mapping[ctrl->val];
- break;
- case V4L2_CID_EXPOSURE:
- ap1302_set_exposure_off(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
- ap1302_set_wb_mode(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_ZOOM_ABSOLUTE:
- ap1302_set_zoom(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_COLORFX:
- ap1302_set_special_effect(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_SCENE_MODE:
- ap1302_set_scene_mode(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- ap1302_set_flicker_freq(&dev->sd, ctrl->val);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ap1302_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- u32 reg_val;
-
- if (reg->size != AP1302_REG16 &&
- reg->size != AP1302_REG32)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- if (dev->power_on)
- ret = ap1302_i2c_read_reg(sd, reg->reg, reg->size, &reg_val);
- else
- ret = -EIO;
- mutex_unlock(&dev->input_lock);
- if (ret)
- return ret;
-
- reg->val = reg_val;
-
- return 0;
-}
-
-static int ap1302_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
-
- if (reg->size != AP1302_REG16 &&
- reg->size != AP1302_REG32)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- if (dev->power_on)
- ret = ap1302_i2c_write_reg(sd, reg->reg, reg->size, reg->val);
- else
- ret = -EIO;
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static long ap1302_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- long ret = 0;
- switch (cmd) {
- case VIDIOC_DBG_G_REGISTER:
- ret = ap1302_g_register(sd, arg);
- break;
- case VIDIOC_DBG_S_REGISTER:
- ret = ap1302_s_register(sd, arg);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = ap1302_s_ctrl,
-};
-
-static const char * const ctrl_run_mode_menu[] = {
- NULL,
- "Video",
- "Still capture",
- "Continuous capture",
- "Preview",
-};
-
-static const struct v4l2_ctrl_config ctrls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_RUN_MODE,
- .name = "Run Mode",
- .type = V4L2_CTRL_TYPE_MENU,
- .min = 1,
- .def = 4,
- .max = 4,
- .qmenu = ctrl_run_mode_menu,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE,
- .name = "Exposure",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = AP1302_MIN_EV,
- .def = 0,
- .max = AP1302_MAX_EV,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
- .name = "White Balance",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 9,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ZOOM_ABSOLUTE,
- .name = "Zoom Absolute",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 1024,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_COLORFX,
- .name = "Color Special Effect",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 15,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_SCENE_MODE,
- .name = "Scene Mode",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 13,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .name = "Light frequency filter",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 3,
- .max = 3,
- .step = 1,
- },
-};
-
-static const struct v4l2_subdev_sensor_ops ap1302_sensor_ops = {
- .g_skip_frames = ap1302_g_skip_frames,
-};
-
-static const struct v4l2_subdev_video_ops ap1302_video_ops = {
- .s_stream = ap1302_s_stream,
- .g_frame_interval = ap1302_g_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops ap1302_core_ops = {
- .s_power = ap1302_s_power,
- .ioctl = ap1302_ioctl,
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ap1302_g_register,
- .s_register = ap1302_s_register,
-#endif
-};
-
-static const struct v4l2_subdev_pad_ops ap1302_pad_ops = {
- .enum_mbus_code = ap1302_enum_mbus_code,
- .enum_frame_size = ap1302_enum_frame_size,
- .get_fmt = ap1302_get_fmt,
- .set_fmt = ap1302_set_fmt,
-};
-
-static const struct v4l2_subdev_ops ap1302_ops = {
- .core = &ap1302_core_ops,
- .pad = &ap1302_pad_ops,
- .video = &ap1302_video_ops,
- .sensor = &ap1302_sensor_ops
-};
-
-static int ap1302_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ap1302_device *dev = to_ap1302_device(sd);
-
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
- release_firmware(dev->fw);
-
- media_entity_cleanup(&dev->sd.entity);
- dev->platform_data->csi_cfg(sd, 0);
- v4l2_device_unregister_subdev(sd);
-
- return 0;
-}
-
-static int ap1302_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ap1302_device *dev;
- int ret;
- unsigned int i;
-
- dev_info(&client->dev, "ap1302 probe called.\n");
-
- /* allocate device & init sub device */
- dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- mutex_init(&dev->input_lock);
-
- v4l2_i2c_subdev_init(&(dev->sd), client, &ap1302_ops);
-
- ret = ap1302_request_firmware(&(dev->sd));
- if (ret) {
- dev_err(&client->dev, "Cannot request ap1302 firmware.\n");
- goto out_free;
- }
-
- dev->regmap16 = devm_regmap_init_i2c(client, &ap1302_reg16_config);
- if (IS_ERR(dev->regmap16)) {
- ret = PTR_ERR(dev->regmap16);
- dev_err(&client->dev,
- "Failed to allocate 16bit register map: %d\n", ret);
- return ret;
- }
-
- dev->regmap32 = devm_regmap_init_i2c(client, &ap1302_reg32_config);
- if (IS_ERR(dev->regmap32)) {
- ret = PTR_ERR(dev->regmap32);
- dev_err(&client->dev,
- "Failed to allocate 32bit register map: %d\n", ret);
- return ret;
- }
-
- if (client->dev.platform_data) {
- ret = ap1302_s_config(&dev->sd, client->dev.platform_data);
- if (ret)
- goto out_free;
- }
-
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- dev->cntx_res[CONTEXT_PREVIEW].res_num = ARRAY_SIZE(ap1302_preview_res);
- dev->cntx_res[CONTEXT_PREVIEW].res_table = ap1302_preview_res;
- dev->cntx_res[CONTEXT_SNAPSHOT].res_num =
- ARRAY_SIZE(ap1302_snapshot_res);
- dev->cntx_res[CONTEXT_SNAPSHOT].res_table = ap1302_snapshot_res;
- dev->cntx_res[CONTEXT_VIDEO].res_num = ARRAY_SIZE(ap1302_video_res);
- dev->cntx_res[CONTEXT_VIDEO].res_table = ap1302_video_res;
-
- ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ctrls));
- if (ret) {
- ap1302_remove(client);
- return ret;
- }
-
- for (i = 0; i < ARRAY_SIZE(ctrls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler, &ctrls[i], NULL);
-
- if (dev->ctrl_handler.error) {
- ap1302_remove(client);
- return dev->ctrl_handler.error;
- }
-
- /* Use same lock for controls as for everything else. */
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = &dev->ctrl_handler;
- v4l2_ctrl_handler_setup(&dev->ctrl_handler);
-
- dev->run_mode = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_RUN_MODE);
- v4l2_ctrl_s_ctrl(dev->run_mode, ATOMISP_RUN_MODE_PREVIEW);
-
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret)
- ap1302_remove(client);
- return ret;
-out_free:
- v4l2_device_unregister_subdev(&dev->sd);
- return ret;
-}
-
-static const struct i2c_device_id ap1302_id[] = {
- {AP1302_NAME, 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, ap1302_id);
-
-static struct i2c_driver ap1302_driver = {
- .driver = {
- .name = AP1302_NAME,
- },
- .probe = ap1302_probe,
- .remove = ap1302_remove,
- .id_table = ap1302_id,
-};
-
-module_i2c_driver(ap1302_driver);
-
-MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
-MODULE_DESCRIPTION("AP1302 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h
deleted file mode 100644
index 4d0b181a9671..000000000000
--- a/drivers/staging/media/atomisp/i2c/ap1302.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __AP1302_H__
-#define __AP1302_H__
-
-#include "../include/linux/atomisp_platform.h"
-#include <linux/regmap.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-
-#define AP1302_NAME "ap1302"
-#define AP1302_CHIP_ID 0x265
-#define AP1302_I2C_MAX_LEN 65534
-#define AP1302_FW_WINDOW_OFFSET 0x8000
-#define AP1302_FW_WINDOW_SIZE 0x2000
-
-#define AP1302_REG16 2
-#define AP1302_REG32 4
-
-#define REG_CHIP_VERSION 0x0000
-#define REG_CHIP_REV 0x0050
-#define REG_MF_ID 0x0004
-#define REG_ERROR 0x0006
-#define REG_CTRL 0x1000
-#define REG_DZ_TGT_FCT 0x1010
-#define REG_SFX_MODE 0x1016
-#define REG_SS_HEAD_PT0 0x1174
-#define REG_AE_BV_OFF 0x5014
-#define REG_AE_BV_BIAS 0x5016
-#define REG_AWB_CTRL 0x5100
-#define REG_FLICK_CTRL 0x5440
-#define REG_SCENE_CTRL 0x5454
-#define REG_BOOTDATA_STAGE 0x6002
-#define REG_SENSOR_SELECT 0x600C
-#define REG_SYS_START 0x601A
-#define REG_SIP_CRC 0xF052
-
-#define REG_PREVIEW_BASE 0x2000
-#define REG_SNAPSHOT_BASE 0x3000
-#define REG_VIDEO_BASE 0x4000
-#define CNTX_WIDTH 0x00
-#define CNTX_HEIGHT 0x02
-#define CNTX_ROI_X0 0x04
-#define CNTX_ROI_Y0 0x06
-#define CNTX_ROI_X1 0x08
-#define CNTX_ROI_Y1 0x0A
-#define CNTX_ASPECT 0x0C
-#define CNTX_LOCK 0x0E
-#define CNTX_ENABLE 0x10
-#define CNTX_OUT_FMT 0x12
-#define CNTX_SENSOR_MODE 0x14
-#define CNTX_MIPI_CTRL 0x16
-#define CNTX_MIPI_II_CTRL 0x18
-#define CNTX_LINE_TIME 0x1C
-#define CNTX_MAX_FPS 0x20
-#define CNTX_AE_USG 0x22
-#define CNTX_AE_UPPER_ET 0x24
-#define CNTX_AE_MAX_ET 0x28
-#define CNTX_SS 0x2C
-#define CNTX_S1_SENSOR_MODE 0x2E
-#define CNTX_HINF_CTRL 0x30
-
-#define CTRL_CNTX_MASK 0x03
-#define CTRL_CNTX_OFFSET 0x00
-#define HINF_CTRL_LANE_MASK 0x07
-#define HINF_CTRL_LANE_OFFSET 0x00
-#define MIPI_CTRL_IMGVC_MASK 0xC0
-#define MIPI_CTRL_IMGVC_OFFSET 0x06
-#define MIPI_CTRL_IMGTYPE_AUTO 0x3F
-#define MIPI_CTRL_SSVC_MASK 0xC000
-#define MIPI_CTRL_SSVC_OFFSET 0x0E
-#define MIPI_CTRL_SSTYPE_MASK 0x3F00
-#define MIPI_CTRL_SSTYPE_OFFSET 0x08
-#define OUT_FMT_IIS_MASK 0x30
-#define OUT_FMT_IIS_OFFSET 0x08
-#define OUT_FMT_SS_MASK 0x1000
-#define OUT_FMT_SS_OFFSET 0x12
-#define OUT_FMT_TYPE_MASK 0xFF
-#define SENSOR_SELECT_MASK 0x03
-#define SENSOR_SELECT_OFFSET 0x00
-#define AWB_CTRL_MODE_MASK 0x0F
-#define AWB_CTRL_MODE_OFFSET 0x00
-#define AWB_CTRL_FLASH_MASK 0x100
-
-#define AP1302_FMT_UYVY422 0x50
-
-#define AP1302_SYS_ACTIVATE 0x8010
-#define AP1302_SYS_SWITCH 0x8140
-#define AP1302_SENSOR_PRI 0x01
-#define AP1302_SENSOR_SEC 0x02
-#define AP1302_SS_CTRL 0x31
-
-#define AP1302_MAX_RATIO_MISMATCH 10 /* Unit in percentage */
-#define AP1302_MAX_EV 2
-#define AP1302_MIN_EV -2
-
-enum ap1302_contexts {
- CONTEXT_PREVIEW = 0,
- CONTEXT_SNAPSHOT,
- CONTEXT_VIDEO,
- CONTEXT_NUM
-};
-
-/* The context registers are defined according to preview/video registers.
- Preview and video context have the same register definition.
- But snapshot context does not have register S1_SENSOR_MODE.
- When setting snapshot registers, if the offset exceeds
- S1_SENSOR_MODE, the actual offset needs to minus 2. */
-struct ap1302_context_config {
- u16 width;
- u16 height;
- u16 roi_x0;
- u16 roi_y0;
- u16 roi_x1;
- u16 roi_y1;
- u16 aspect_factor;
- u16 lock;
- u16 enable;
- u16 out_fmt;
- u16 sensor_mode;
- u16 mipi_ctrl;
- u16 mipi_ii_ctrl;
- u16 padding;
- u32 line_time;
- u16 max_fps;
- u16 ae_usg;
- u32 ae_upper_et;
- u32 ae_max_et;
- u16 ss;
- u16 s1_sensor_mode;
- u16 hinf_ctrl;
- u32 reserved;
-};
-
-struct ap1302_res_struct {
- u16 width;
- u16 height;
- u16 fps;
-};
-
-struct ap1302_context_res {
- u32 res_num;
- u32 cur_res;
- struct ap1302_res_struct *res_table;
-};
-
-struct ap1302_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct camera_sensor_platform_data *platform_data;
- const struct firmware *fw;
- struct mutex input_lock; /* serialize sensor's ioctl */
- struct v4l2_mbus_framefmt format;
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *run_mode;
- struct ap1302_context_config cntx_config[CONTEXT_NUM];
- struct ap1302_context_res cntx_res[CONTEXT_NUM];
- enum ap1302_contexts cur_context;
- unsigned int num_lanes;
- struct regmap *regmap16;
- struct regmap *regmap32;
- bool sys_activated;
- bool power_on;
-};
-
-struct ap1302_firmware {
- u32 crc;
- u32 pll_init_size;
- u32 total_size;
- u32 reserved;
-};
-
-struct ap1302_context_info {
- u16 offset;
- u16 len;
- char *name;
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 35ed51ffe944..e70d8afcc229 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -738,10 +737,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
/* The upstream module driver (written to Crystal
* Cove) had this logic to pulse the rails low first.
@@ -772,10 +767,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* GPIO0 == "reset" (active low), GPIO1 == "power down" */
if (flag) {
/* Pulse reset, then release power down */
@@ -1165,13 +1156,6 @@ static int gc0310_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
@@ -1216,9 +1200,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1362,9 +1343,6 @@ static int gc0310_remove(struct i2c_client *client)
struct gc0310_device *dev = to_gc0310_sensor(sd);
dev_dbg(&client->dev, "gc0310_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_device_unregister_subdev(sd);
@@ -1375,8 +1353,7 @@ static int gc0310_remove(struct i2c_client *client)
return 0;
}
-static int gc0310_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gc0310_probe(struct i2c_client *client)
{
struct gc0310_device *dev;
int ret;
@@ -1385,10 +1362,8 @@ static int gc0310_probe(struct i2c_client *client,
pr_info("%s S\n", __func__);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1457,33 +1432,17 @@ static const struct acpi_device_id gc0310_acpi_match[] = {
{"INT0310"},
{},
};
-
MODULE_DEVICE_TABLE(acpi, gc0310_acpi_match);
-MODULE_DEVICE_TABLE(i2c, gc0310_id);
static struct i2c_driver gc0310_driver = {
.driver = {
- .name = GC0310_NAME,
- .acpi_match_table = ACPI_PTR(gc0310_acpi_match),
+ .name = "gc0310",
+ .acpi_match_table = gc0310_acpi_match,
},
- .probe = gc0310_probe,
+ .probe_new = gc0310_probe,
.remove = gc0310_remove,
- .id_table = gc0310_id,
};
-
-static int init_gc0310(void)
-{
- return i2c_add_driver(&gc0310_driver);
-}
-
-static void exit_gc0310(void)
-{
-
- i2c_del_driver(&gc0310_driver);
-}
-
-module_init(init_gc0310);
-module_exit(exit_gc0310);
+module_i2c_driver(gc0310_driver);
MODULE_AUTHOR("Lai, Angie <angie.lai@intel.com>");
MODULE_DESCRIPTION("A low-level driver for GalaxyCore GC0310 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index e43d31ea9676..85da5fe24033 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include "../include/linux/atomisp_gmin_platform.h"
@@ -548,10 +547,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v1p8_ctrl(sd, 1);
usleep_range(60, 90);
@@ -572,10 +567,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
usleep_range(60, 90);
return dev->platform_data->gpio0_ctrl(sd, flag);
@@ -906,13 +897,6 @@ static int gc2235_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
@@ -956,9 +940,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1101,9 +1082,6 @@ static int gc2235_remove(struct i2c_client *client)
struct gc2235_device *dev = to_gc2235_sensor(sd);
dev_dbg(&client->dev, "gc2235_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_device_unregister_subdev(sd);
@@ -1114,8 +1092,7 @@ static int gc2235_remove(struct i2c_client *client)
return 0;
}
-static int gc2235_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gc2235_probe(struct i2c_client *client)
{
struct gc2235_device *dev;
void *gcpdev;
@@ -1123,10 +1100,8 @@ static int gc2235_probe(struct i2c_client *client,
unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1187,32 +1162,17 @@ static const struct acpi_device_id gc2235_acpi_match[] = {
{ "INT33F8" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match);
-MODULE_DEVICE_TABLE(i2c, gc2235_id);
+
static struct i2c_driver gc2235_driver = {
.driver = {
- .name = GC2235_NAME,
- .acpi_match_table = ACPI_PTR(gc2235_acpi_match),
+ .name = "gc2235",
+ .acpi_match_table = gc2235_acpi_match,
},
- .probe = gc2235_probe,
+ .probe_new = gc2235_probe,
.remove = gc2235_remove,
- .id_table = gc2235_id,
};
-
-static int init_gc2235(void)
-{
- return i2c_add_driver(&gc2235_driver);
-}
-
-static void exit_gc2235(void)
-{
-
- i2c_del_driver(&gc2235_driver);
-}
-
-module_init(init_gc2235);
-module_exit(exit_gc2235);
+module_i2c_driver(gc2235_driver);
MODULE_AUTHOR("Shuguang Gong <Shuguang.Gong@intel.com>");
MODULE_DESCRIPTION("A low-level driver for GC2235 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
index decb65cfd7c9..81e5ec0c2b64 100644
--- a/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/i2c.h>
diff --git a/drivers/staging/media/atomisp/i2c/lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index 679176f7c542..4fd9f538ac95 100644
--- a/drivers/staging/media/atomisp/i2c/lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -171,10 +167,9 @@ static int lm3554_set_config1(struct lm3554 *flash)
/* -----------------------------------------------------------------------------
* Hardware trigger
*/
-static void lm3554_flash_off_delay(long unsigned int arg)
+static void lm3554_flash_off_delay(struct timer_list *t)
{
- struct v4l2_subdev *sd = i2c_get_clientdata((struct i2c_client *)arg);
- struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554 *flash = from_timer(flash, t, flash_off_delay);
struct lm3554_platform_data *pdata = flash->pdata;
gpio_set_value(pdata->gpio_strobe, 0);
@@ -862,8 +857,7 @@ static void *lm3554_platform_data_func(struct i2c_client *client)
return &platform_data;
}
-static int lm3554_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3554_probe(struct i2c_client *client)
{
int err = 0;
struct lm3554 *flash;
@@ -871,10 +865,8 @@ static int lm3554_probe(struct i2c_client *client,
int ret;
flash = kzalloc(sizeof(*flash), GFP_KERNEL);
- if (!flash) {
- dev_err(&client->dev, "out of memory\n");
+ if (!flash)
return -ENOMEM;
- }
flash->pdata = client->dev.platform_data;
@@ -915,8 +907,7 @@ static int lm3554_probe(struct i2c_client *client,
mutex_init(&flash->power_lock);
- setup_timer(&flash->flash_off_delay, lm3554_flash_off_delay,
- (unsigned long)client);
+ timer_setup(&flash->flash_off_delay, lm3554_flash_off_delay, 0);
err = lm3554_gpio_init(client);
if (err) {
@@ -962,13 +953,6 @@ fail:
return ret;
}
-static const struct i2c_device_id lm3554_id[] = {
- {LM3554_NAME, 0},
- {},
-};
-
-MODULE_DEVICE_TABLE(i2c, lm3554_id);
-
static const struct dev_pm_ops lm3554_pm_ops = {
.suspend = lm3554_suspend,
.resume = lm3554_resume,
@@ -978,32 +962,19 @@ static const struct acpi_device_id lm3554_acpi_match[] = {
{ "INTCF1C" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, lm3554_acpi_match);
static struct i2c_driver lm3554_driver = {
.driver = {
- .name = LM3554_NAME,
+ .name = "lm3554",
.pm = &lm3554_pm_ops,
- .acpi_match_table = ACPI_PTR(lm3554_acpi_match),
+ .acpi_match_table = lm3554_acpi_match,
},
- .probe = lm3554_probe,
+ .probe_new = lm3554_probe,
.remove = lm3554_remove,
- .id_table = lm3554_id,
};
+module_i2c_driver(lm3554_driver);
-static __init int init_lm3554(void)
-{
- return i2c_add_driver(&lm3554_driver);
-}
-
-static __exit void exit_lm3554(void)
-{
- i2c_del_driver(&lm3554_driver);
-}
-
-module_init(init_lm3554);
-module_exit(exit_lm3554);
MODULE_AUTHOR("Jing Tao <jing.tao@intel.com>");
MODULE_DESCRIPTION("LED flash driver for LM3554");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 3c837cb8859c..55882bea2049 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -32,7 +28,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/acpi.h>
#include "../include/linux/atomisp_gmin_platform.h"
#include <media/v4l2-device.h>
@@ -455,10 +450,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v2p8_ctrl(sd, 1);
if (ret == 0) {
@@ -481,10 +472,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* Note: current modules wire only one GPIO signal (RESET#),
* but the schematic wires up two to the connector. BIOS
* versions have been unfortunately inconsistent with which
@@ -1584,13 +1571,6 @@ mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
dev->platform_data =
(struct camera_sensor_platform_data *)platform_data;
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- v4l2_err(client, "mt9m114 platform init err\n");
- return ret;
- }
- }
ret = power_up(sd);
if (ret) {
v4l2_err(client, "mt9m114 power-up err");
@@ -1844,8 +1824,6 @@ static int mt9m114_remove(struct i2c_client *client)
dev = container_of(sd, struct mt9m114_device, sd);
dev->platform_data->csi_cfg(sd, 0);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
@@ -1853,8 +1831,7 @@ static int mt9m114_remove(struct i2c_client *client)
return 0;
}
-static int mt9m114_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mt9m114_probe(struct i2c_client *client)
{
struct mt9m114_device *dev;
int ret = 0;
@@ -1863,10 +1840,8 @@ static int mt9m114_probe(struct i2c_client *client,
/* Setup sensor configuration structure */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops);
pdata = client->dev.platform_data;
@@ -1926,38 +1901,22 @@ static int mt9m114_probe(struct i2c_client *client,
return 0;
}
-MODULE_DEVICE_TABLE(i2c, mt9m114_id);
-
static const struct acpi_device_id mt9m114_acpi_match[] = {
{ "INT33F0" },
{ "CRMT1040" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match);
static struct i2c_driver mt9m114_driver = {
.driver = {
.name = "mt9m114",
- .acpi_match_table = ACPI_PTR(mt9m114_acpi_match),
+ .acpi_match_table = mt9m114_acpi_match,
},
- .probe = mt9m114_probe,
+ .probe_new = mt9m114_probe,
.remove = mt9m114_remove,
- .id_table = mt9m114_id,
};
-
-static __init int init_mt9m114(void)
-{
- return i2c_add_driver(&mt9m114_driver);
-}
-
-static __exit void exit_mt9m114(void)
-{
- i2c_del_driver(&mt9m114_driver);
-}
-
-module_init(init_mt9m114);
-module_exit(exit_mt9m114);
+module_i2c_driver(mt9m114_driver);
MODULE_AUTHOR("Shuguang Gong <Shuguang.gong@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 51b7d61df0f5..cd67d38f183a 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -253,8 +252,8 @@ static int ov2680_write_reg_array(struct i2c_client *client,
if (!__ov2680_write_reg_is_consecutive(client, &ctrl,
next)) {
err = __ov2680_flush_reg_array(client, &ctrl);
- if (err)
- return err;
+ if (err)
+ return err;
}
err = __ov2680_buf_reg_array(client, &ctrl, next);
if (err) {
@@ -399,7 +398,9 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
u16 vts,hts;
int ret,exp_val;
- dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain);
+ dev_dbg(&client->dev,
+ "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",
+ coarse_itg, gain, digitgain);
hts = ov2680_res[dev->fmt_idx].pixels_per_line;
vts = ov2680_res[dev->fmt_idx].lines_per_frame;
@@ -847,10 +848,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret |= dev->platform_data->v1p8_ctrl(sd, 1);
ret |= dev->platform_data->v2p8_ctrl(sd, 1);
@@ -872,10 +869,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* The OV2680 documents only one GPIO input (#XSHUTDN), but
* existing integrations often wire two (reset/power_down)
* because that is the way other sensors work. There is no
@@ -1438,8 +1431,7 @@ static int ov2680_remove(struct i2c_client *client)
return 0;
}
-static int ov2680_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2680_probe(struct i2c_client *client)
{
struct ov2680_device *dev;
int ret;
@@ -1447,10 +1439,8 @@ static int ov2680_probe(struct i2c_client *client,
unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1523,35 +1513,16 @@ static const struct acpi_device_id ov2680_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match);
-
-MODULE_DEVICE_TABLE(i2c, ov2680_id);
static struct i2c_driver ov2680_driver = {
.driver = {
- .owner = THIS_MODULE,
- .name = OV2680_NAME,
- .acpi_match_table = ACPI_PTR(ov2680_acpi_match),
-
+ .name = "ov2680",
+ .acpi_match_table = ov2680_acpi_match,
},
- .probe = ov2680_probe,
+ .probe_new = ov2680_probe,
.remove = ov2680_remove,
- .id_table = ov2680_id,
};
-
-static int init_ov2680(void)
-{
- return i2c_add_driver(&ov2680_driver);
-}
-
-static void exit_ov2680(void)
-{
-
- i2c_del_driver(&ov2680_driver);
-}
-
-module_init(init_ov2680);
-module_exit(exit_ov2680);
+module_i2c_driver(ov2680_driver);
MODULE_AUTHOR("Jacky Wang <Jacky_wang@ovt.com>");
MODULE_DESCRIPTION("A low-level driver for OmniVision 2680 sensors");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index 10094ac56561..4df7eba8d375 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include "../include/linux/atomisp_gmin_platform.h"
@@ -651,10 +650,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v1p8_ctrl(sd, 1);
if (ret == 0) {
@@ -678,10 +673,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* Note: the GPIO order is asymmetric: always RESET#
* before PWDN# when turning it on or off.
*/
@@ -1044,13 +1035,6 @@ static int ov2722_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
@@ -1095,9 +1079,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1241,9 +1222,6 @@ static int ov2722_remove(struct i2c_client *client)
struct ov2722_device *dev = to_ov2722_sensor(sd);
dev_dbg(&client->dev, "ov2722_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister_subdev(sd);
@@ -1276,8 +1254,7 @@ static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
return 0;
}
-static int ov2722_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2722_probe(struct i2c_client *client)
{
struct ov2722_device *dev;
void *ovpdev;
@@ -1285,10 +1262,8 @@ static int ov2722_probe(struct i2c_client *client,
struct acpi_device *adev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1335,38 +1310,21 @@ out_free:
return ret;
}
-MODULE_DEVICE_TABLE(i2c, ov2722_id);
-
static const struct acpi_device_id ov2722_acpi_match[] = {
{ "INT33FB" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match);
static struct i2c_driver ov2722_driver = {
.driver = {
- .name = OV2722_NAME,
- .acpi_match_table = ACPI_PTR(ov2722_acpi_match),
+ .name = "ov2722",
+ .acpi_match_table = ov2722_acpi_match,
},
- .probe = ov2722_probe,
+ .probe_new = ov2722_probe,
.remove = ov2722_remove,
- .id_table = ov2722_id,
};
-
-static int init_ov2722(void)
-{
- return i2c_add_driver(&ov2722_driver);
-}
-
-static void exit_ov2722(void)
-{
-
- i2c_del_driver(&ov2722_driver);
-}
-
-module_init(init_ov2722);
-module_exit(exit_ov2722);
+module_i2c_driver(ov2722_driver);
MODULE_AUTHOR("Wei Liu <wei.liu@intel.com>");
MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
index 7d8a0aeecb6c..c422d0398fc7 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.h
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -36,8 +32,6 @@
#include "../include/linux/atomisp_platform.h"
-#define GC0310_NAME "gc0310"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 1
#define I2C_RETRY_COUNT 5
@@ -196,11 +190,6 @@ struct gc0310_write_ctrl {
struct gc0310_write_buffer buffer;
};
-static const struct i2c_device_id gc0310_id[] = {
- {GC0310_NAME, 0},
- {}
-};
-
/*
* Register settings for various resolution
*/
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
index a8d6aa9c9a5d..3c30a05c3991 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.h
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -33,8 +33,6 @@
#include "../include/linux/atomisp_platform.h"
-#define GC2235_NAME "gc2235"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 0x2
#define I2C_RETRY_COUNT 5
@@ -200,11 +198,6 @@ struct gc2235_write_ctrl {
struct gc2235_write_buffer buffer;
};
-static const struct i2c_device_id gc2235_id[] = {
- {GC2235_NAME, 0},
- {}
-};
-
static struct gc2235_reg const gc2235_stream_on[] = {
{ GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
{ GC2235_8BIT, 0x10, 0x91}, /* start mipi */
diff --git a/drivers/staging/media/atomisp/i2c/imx/Kconfig b/drivers/staging/media/atomisp/i2c/imx/Kconfig
deleted file mode 100644
index a39eeb3b6ad4..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config VIDEO_IMX
- tristate "sony imx sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_MSRLIST_HELPER && m
- ---help---
- This is a Video4Linux2 sensor-level driver for the Sony
- IMX RAW sensor.
-
- It currently depends on internal V4L2 extensions defined in
- atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
deleted file mode 100644
index c1a85e6e27a9..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_IMX) += imx1x5.o
-
-imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o otp_imx.o otp_brcc064_e2prom.o otp_e2prom.o
-
-ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
-obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
-
-# HACK! While this driver is in bad shape, don't enable several warnings
-# that would be otherwise enabled with W=1
-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
-ccflags-y += $(call cc-disable-warning, unused-const-variable)
-ccflags-y += $(call cc-disable-warning, missing-prototypes)
-ccflags-y += $(call cc-disable-warning, missing-declarations)
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c b/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
deleted file mode 100644
index fb74f14cbe5a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-
-#include "ad5816g.h"
-
-struct ad5816g_device ad5816g_dev;
-
-static int ad5816g_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
- buf[0] = reg;
- buf[1] = 0;
-
- msg[0].addr = AD5816G_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &buf[0];
-
- msg[1].addr = AD5816G_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
- return 0;
-}
-
-static int ad5816g_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2];
- buf[0] = reg;
- buf[1] = val;
- msg.addr = AD5816G_VCM_ADDR;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int ad5816g_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3];
- buf[0] = reg;
- buf[1] = (u8)(val >> 8);
- buf[2] = (u8)(val & 0xff);
- msg.addr = AD5816G_VCM_ADDR;
- msg.flags = 0;
- msg.len = 3;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int ad5816g_set_arc_mode(struct i2c_client *client)
-{
- int ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_CONTROL, AD5816G_ARC_EN);
- if (ret)
- return ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_MODE,
- AD5816G_MODE_2_5M_SWITCH_CLOCK);
- if (ret)
- return ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_VCM_FREQ, AD5816G_DEF_FREQ);
- return ret;
-}
-
-int ad5816g_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 ad5816g_id;
-
- /* Enable power */
- ret = ad5816g_dev.platform_data->power_ctrl(sd, 1);
- if (ret)
- return ret;
- /* waiting time AD5816G(vcm) - t1 + t2
- * t1(1ms) -Time from VDD high to first i2c cmd
- * t2(100us) - exit power-down mode time
- */
- usleep_range(1100, 2200);
- /* Detect device */
- ret = ad5816g_i2c_rd8(client, AD5816G_IC_INFO, &ad5816g_id);
- if (ret < 0)
- goto fail_powerdown;
- if (ad5816g_id != AD5816G_ID) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
- ret = ad5816g_set_arc_mode(client);
- if (ret)
- return ret;
-
- /* set the VCM_THRESHOLD */
- ret = ad5816g_i2c_wr8(client, AD5816G_VCM_THRESHOLD,
- AD5816G_DEF_THRESHOLD);
-
- return ret;
-
-fail_powerdown:
- ad5816g_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int ad5816g_vcm_power_down(struct v4l2_subdev *sd)
-{
- return ad5816g_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 data = val & VCM_CODE_MASK;
-
- return ad5816g_i2c_wr16(client, AD5816G_VCM_CODE_MSB, data);
-}
-
-int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, AD5816G_MAX_FOCUS_POS);
- ret = ad5816g_t_focus_vcm(sd, value);
- if (ret == 0) {
- ad5816g_dev.number_of_steps = value - ad5816g_dev.focus;
- ad5816g_dev.focus = value;
- getnstimeofday(&(ad5816g_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
-
- return ad5816g_t_focus_abs(sd, ad5816g_dev.focus + value);
-}
-
-int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(ad5816g_dev.number_of_steps) * DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (ad5816g_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- ad5816g_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = ad5816g_dev.focus - ad5816g_dev.number_of_steps;
- else
- *value = ad5816g_dev.focus;
-
- return 0;
-}
-
-int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.h b/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
deleted file mode 100644
index e1396b00a0e1..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __AD5816G_H__
-#define __AD5816G_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-#include <linux/time.h>
-
-#define AD5816G_VCM_ADDR 0x0e
-
-/* ad5816g device structure */
-struct ad5816g_device {
- const struct camera_af_platform_data *platform_data;
- struct timespec timestamp_t_focus_abs;
- struct timespec focus_time; /* Time when focus was last time set */
- s32 focus; /* Current focus value */
- s16 number_of_steps;
-};
-
-#define AD5816G_INVALID_CONFIG 0xffffffff
-#define AD5816G_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-/* Register Definitions */
-#define AD5816G_IC_INFO 0x00
-#define AD5816G_IC_VERSION 0x01
-#define AD5816G_CONTROL 0x02
-#define AD5816G_VCM_CODE_MSB 0x03
-#define AD5816G_VCM_CODE_LSB 0x04
-#define AD5816G_STATUS 0x05
-#define AD5816G_MODE 0x06
-#define AD5816G_VCM_FREQ 0x07
-#define AD5816G_VCM_THRESHOLD 0x08
-
-/* ARC MODE ENABLE */
-#define AD5816G_ARC_EN 0x02
-/* ARC RES2 MODE */
-#define AD5816G_ARC_RES2 0x01
-/* ARC VCM FREQ - 78.1Hz */
-#define AD5816G_DEF_FREQ 0x7a
-/* ARC VCM THRESHOLD - 0x08 << 1 */
-#define AD5816G_DEF_THRESHOLD 0x64
-#define AD5816G_ID 0x24
-#define VCM_CODE_MASK 0x03ff
-
-#define AD5816G_MODE_2_5M_SWITCH_CLOCK 0x14
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/common.h b/drivers/staging/media/atomisp/i2c/imx/common.h
deleted file mode 100644
index af2e3160df95..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/common.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#define MAX_FPS_OPTIONS_SUPPORTED 3
-#define I2C_MSG_LENGTH 0x2
-#define E2PROM_2ADDR 0x80000000
-#define E2PROM_ADDR_MASK 0x7fffffff
-
-/* Defines for register writes and register array processing */
-#define IMX_BYTE_MAX 32
-#define IMX_SHORT_MAX 16
-#define I2C_RETRY_COUNT 5
-#define IMX_TOK_MASK 0xfff0
-
-enum imx_tok_type {
- IMX_8BIT = 0x0001,
- IMX_16BIT = 0x0002,
- IMX_TOK_TERM = 0xf000, /* terminating token for reg list */
- IMX_TOK_DELAY = 0xfe00 /* delay token for reg list */
-};
-
-/**
- * struct imx_reg - MI sensor register format
- * @type: type of the register
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- *
- * Define a structure for sensor register initialization values
- */
-struct imx_reg {
- enum imx_tok_type type;
- u16 sreg;
- u32 val; /* @set value for read/mod/write, @mask */
-};
-
-struct imx_fps_setting {
- int fps;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- int mipi_freq; /* MIPI lane frequency in kHz */
- const struct imx_reg *regs; /* regs that the fps setting needs */
-};
-
-struct imx_resolution {
- const struct imx_fps_setting fps_options[MAX_FPS_OPTIONS_SUPPORTED];
- u8 *desc;
- const struct imx_reg *regs;
- int res;
- int width;
- int height;
- int fps;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- int mipi_freq; /* MIPI lane frequency in kHz */
- unsigned short skip_frames;
- u8 bin_factor_x;
- u8 bin_factor_y;
- bool used;
-};
-
-#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
-#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
-
-int imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val);
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.c b/drivers/staging/media/atomisp/i2c/imx/drv201.c
deleted file mode 100644
index 221e4875ac49..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/drv201.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-
-#include "drv201.h"
-
-static struct drv201_device drv201_dev;
-
-static int drv201_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
- buf[0] = reg;
- buf[1] = 0;
-
- msg[0].addr = DRV201_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &buf[0];
-
- msg[1].addr = DRV201_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
- return 0;
-}
-
-static int drv201_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2];
- buf[0] = reg;
- buf[1] = val;
- msg.addr = DRV201_VCM_ADDR;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int drv201_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3];
- buf[0] = reg;
- buf[1] = (u8)(val >> 8);
- buf[2] = (u8)(val & 0xff);
- msg.addr = DRV201_VCM_ADDR;
- msg.flags = 0;
- msg.len = 3;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-int drv201_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- /* Enable power */
- ret = drv201_dev.platform_data->power_ctrl(sd, 1);
- if (ret)
- return ret;
- /* Wait for VBAT to stabilize */
- udelay(1);
- /*
- * Jiggle SCL pin to wake up device.
- * Drv201 expect SCL from low to high to wake device up.
- * So the 1st access to i2c would fail.
- * Using following function to wake device up.
- */
- drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
-
- /* Need 100us to transit from SHUTDOWN to STANDBY*/
- usleep_range(WAKEUP_DELAY_US, WAKEUP_DELAY_US * 10);
-
- /* Reset device */
- ret = drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Detect device */
- ret = drv201_i2c_rd8(client, DRV201_CONTROL, &value);
- if (ret < 0)
- goto fail_powerdown;
- if (value != DEFAULT_CONTROL_VAL) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
-
- drv201_dev.focus = DRV201_MAX_FOCUS_POS;
- drv201_dev.initialized = true;
-
- return 0;
-fail_powerdown:
- drv201_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int drv201_vcm_power_down(struct v4l2_subdev *sd)
-{
- return drv201_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 data = val & VCM_CODE_MASK;
-
- if (!drv201_dev.initialized)
- return -ENODEV;
- return drv201_i2c_wr16(client, DRV201_VCM_CURRENT, data);
-}
-
-int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, DRV201_MAX_FOCUS_POS);
- ret = drv201_t_focus_vcm(sd, value);
- if (ret == 0) {
- drv201_dev.number_of_steps = value - drv201_dev.focus;
- drv201_dev.focus = value;
- getnstimeofday(&(drv201_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return drv201_t_focus_abs(sd, drv201_dev.focus + value);
-}
-
-int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(drv201_dev.number_of_steps)*DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (drv201_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- drv201_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = drv201_dev.focus - drv201_dev.number_of_steps;
- else
- *value = drv201_dev.focus;
-
- return 0;
-}
-
-int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.h b/drivers/staging/media/atomisp/i2c/imx/drv201.h
deleted file mode 100644
index 2ef8aafdf675..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/drv201.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRV201_H__
-#define __DRV201_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-#include <linux/time.h>
-
-#define DRV201_VCM_ADDR 0x0e
-
-/* drv201 device structure */
-struct drv201_device {
- const struct camera_af_platform_data *platform_data;
- struct timespec timestamp_t_focus_abs;
- struct timespec focus_time; /* Time when focus was last time set */
- s32 focus; /* Current focus value */
- s16 number_of_steps;
- bool initialized; /* true if drv201 is detected */
-};
-
-#define DRV201_INVALID_CONFIG 0xffffffff
-#define DRV201_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DRV201_CONTROL 2
-#define DRV201_VCM_CURRENT 3
-#define DRV201_STATUS 5
-#define DRV201_MODE 6
-#define DRV201_VCM_FREQ 7
-
-#define DEFAULT_CONTROL_VAL 2
-#define DRV201_RESET 1
-#define WAKEUP_DELAY_US 100
-#define VCM_CODE_MASK 0x03ff
-
-#endif
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.c b/drivers/staging/media/atomisp/i2c/imx/dw9714.c
deleted file mode 100644
index f96855454313..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9714.c
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-
-#include "dw9714.h"
-
-static struct dw9714_device dw9714_dev;
-static int dw9714_i2c_write(struct i2c_client *client, u16 data)
-{
- struct i2c_msg msg;
- const int num_msg = 1;
- int ret;
- u16 val;
-
- val = cpu_to_be16(data);
- msg.addr = DW9714_VCM_ADDR;
- msg.flags = 0;
- msg.len = DW9714_16BIT;
- msg.buf = (u8 *)&val;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
-
- return ret == num_msg ? 0 : -EIO;
-}
-
-int dw9714_vcm_power_up(struct v4l2_subdev *sd)
-{
- int ret;
-
- /* Enable power */
- ret = dw9714_dev.platform_data->power_ctrl(sd, 1);
- /* waiting time requested by DW9714A(vcm) */
- usleep_range(12000, 12500);
- return ret;
-}
-
-int dw9714_vcm_power_down(struct v4l2_subdev *sd)
-{
- return dw9714_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = -EINVAL;
- u8 mclk = vcm_step_mclk(dw9714_dev.vcm_settings.step_setting);
- u8 s = vcm_step_s(dw9714_dev.vcm_settings.step_setting);
-
- /*
- * For different mode, VCM_PROTECTION_OFF/ON required by the
- * control procedure. For DW9714_DIRECT/DLC mode, slew value is
- * VCM_DEFAULT_S(0).
- */
- switch (dw9714_dev.vcm_mode) {
- case DW9714_DIRECT:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, DIRECT_VCM);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client,
- vcm_val(val, VCM_DEFAULT_S));
- break;
- case DW9714_LSC:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_dlc_mclk(DLC_DISABLE, mclk));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_tsrc(dw9714_dev.vcm_settings.t_src));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client, vcm_val(val, s));
- break;
- case DW9714_DLC:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_dlc_mclk(DLC_ENABLE, mclk));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_tsrc(dw9714_dev.vcm_settings.t_src));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client,
- vcm_val(val, VCM_DEFAULT_S));
- break;
- }
- return ret;
-}
-
-int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, DW9714_MAX_FOCUS_POS);
- ret = dw9714_t_focus_vcm(sd, value);
- if (ret == 0) {
- dw9714_dev.number_of_steps = value - dw9714_dev.focus;
- dw9714_dev.focus = value;
- getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int dw9714_t_focus_abs_init(struct v4l2_subdev *sd)
-{
- int ret;
-
- ret = dw9714_t_focus_vcm(sd, DW9714_DEFAULT_FOCUS_POS);
- if (ret == 0) {
- dw9714_dev.number_of_steps =
- DW9714_DEFAULT_FOCUS_POS - dw9714_dev.focus;
- dw9714_dev.focus = DW9714_DEFAULT_FOCUS_POS;
- getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
-
- return dw9714_t_focus_abs(sd, dw9714_dev.focus + value);
-}
-
-int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(dw9714_dev.number_of_steps)*DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (dw9714_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- dw9714_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = dw9714_dev.focus - dw9714_dev.number_of_steps;
- else
- *value = dw9714_dev.focus;
-
- return 0;
-}
-
-int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- dw9714_dev.vcm_settings.step_setting = value;
- dw9714_dev.vcm_settings.update = true;
-
- return 0;
-}
-
-int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- dw9714_dev.vcm_settings.t_src = value;
- dw9714_dev.vcm_settings.update = true;
-
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.h b/drivers/staging/media/atomisp/i2c/imx/dw9714.h
deleted file mode 100644
index aee560026b56..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9714.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DW9714_H__
-#define __DW9714_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-
-#define DW9714_VCM_ADDR 0x0c
-
-enum dw9714_tok_type {
- DW9714_8BIT = 0x0001,
- DW9714_16BIT = 0x0002,
-};
-
-struct dw9714_vcm_settings {
- u16 code; /* bit[9:0]: Data[9:0] */
- u8 t_src; /* bit[4:0]: T_SRC[4:0] */
- u8 step_setting; /* bit[3:0]: S[3:0]/bit[5:4]: MCLK[1:0] */
- bool update;
-};
-
-enum dw9714_vcm_mode {
- DW9714_DIRECT = 0x1, /* direct control */
- DW9714_LSC = 0x2, /* linear slope control */
- DW9714_DLC = 0x3, /* dual level control */
-};
-
-/* dw9714 device structure */
-struct dw9714_device {
- struct dw9714_vcm_settings vcm_settings;
- struct timespec timestamp_t_focus_abs;
- enum dw9714_vcm_mode vcm_mode;
- s16 number_of_steps;
- bool initialized; /* true if dw9714 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
-};
-
-#define DW9714_INVALID_CONFIG 0xffffffff
-#define DW9714_MAX_FOCUS_POS 1023
-#define DW9714_DEFAULT_FOCUS_POS 290
-
-
-/* MCLK[1:0] = 01 T_SRC[4:0] = 00001 S[3:0] = 0111 */
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DLC_ENABLE 1
-#define DLC_DISABLE 0
-#define VCM_PROTECTION_OFF 0xeca3
-#define VCM_PROTECTION_ON 0xdc51
-#define VCM_DEFAULT_S 0x0
-
-#define vcm_step_s(a) (u8)(a & 0xf)
-#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
-#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
-#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
-#define vcm_val(data, s) (u16)(data << 4 | s)
-#define DIRECT_VCM vcm_dlc_mclk(0, 0)
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.c b/drivers/staging/media/atomisp/i2c/imx/dw9718.c
deleted file mode 100644
index c02b9f0a2440..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9718.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Support for dw9718 vcm driver.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/delay.h>
-#include "dw9718.h"
-
-static struct dw9718_device dw9718_dev;
-
-static int dw9718_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2] = { reg };
-
- msg[0].addr = DW9718_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = buf;
-
- msg[1].addr = DW9718_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
-
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
-
- return 0;
-}
-
-static int dw9718_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2] = { reg, val};
-
- msg.addr = DW9718_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-static int dw9718_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
-
- msg.addr = DW9718_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- value = clamp(value, 0, DW9718_MAX_FOCUS_POS);
- ret = dw9718_i2c_wr16(client, DW9718_DATA_M, value);
- /*pr_info("%s: value = %d\n", __func__, value);*/
- if (ret < 0)
- return ret;
-
- getnstimeofday(&dw9718_dev.focus_time);
- dw9718_dev.focus = value;
-
- return 0;
-}
-
-int dw9718_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- if (dw9718_dev.power_on)
- return 0;
-
- /* Enable power */
- ret = dw9718_dev.platform_data->power_ctrl(sd, 1);
- if (ret) {
- dev_err(&client->dev, "DW9718_PD power_ctrl failed %d\n", ret);
- return ret;
- }
- /* Wait for VBAT to stabilize */
- udelay(100);
-
- /* Detect device */
- ret = dw9718_i2c_rd8(client, DW9718_SACT, &value);
- if (ret < 0) {
- dev_err(&client->dev, "read DW9718_SACT failed %d\n", ret);
- goto fail_powerdown;
- }
- /*
- * WORKAROUND: for module P8V12F-203 which are used on
- * Cherrytrail Refresh Davis Reef AoB, register SACT is not
- * returning default value as spec. But VCM works as expected and
- * root cause is still under discussion with vendor.
- * workaround here to avoid aborting the power up sequence and just
- * give a warning about this error.
- */
- if (value != DW9718_SACT_DEFAULT_VAL)
- dev_warn(&client->dev, "%s error, incorrect ID\n", __func__);
-
- /* Initialize according to recommended settings */
- ret = dw9718_i2c_wr8(client, DW9718_CONTROL,
- DW9718_CONTROL_SW_LINEAR |
- DW9718_CONTROL_S_SAC4 |
- DW9718_CONTROL_OCP_DISABLE |
- DW9718_CONTROL_UVLO_DISABLE);
- if (ret < 0) {
- dev_err(&client->dev, "write DW9718_CONTROL failed %d\n", ret);
- goto fail_powerdown;
- }
- ret = dw9718_i2c_wr8(client, DW9718_SACT,
- DW9718_SACT_MULT_TWO |
- DW9718_SACT_PERIOD_8_8MS);
- if (ret < 0) {
- dev_err(&client->dev, "write DW9718_SACT failed %d\n", ret);
- goto fail_powerdown;
- }
-
- ret = dw9718_t_focus_abs(sd, dw9718_dev.focus);
- if (ret)
- return ret;
- dw9718_dev.initialized = true;
- dw9718_dev.power_on = 1;
-
- return 0;
-
-fail_powerdown:
- dev_err(&client->dev, "%s error, powerup failed\n", __func__);
- dw9718_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int dw9718_vcm_power_down(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (!dw9718_dev.power_on)
- return 0;
-
- ret = dw9718_dev.platform_data->power_ctrl(sd, 0);
- if (ret) {
- dev_err(&client->dev, "%s power_ctrl failed\n",
- __func__);
- return ret;
- }
- dw9718_dev.power_on = 0;
-
- return 0;
-}
-
-int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- static const struct timespec move_time = {
- .tv_sec = 0,
- .tv_nsec = 60000000
- };
- struct timespec current_time, finish_time, delta_time;
-
- getnstimeofday(&current_time);
- finish_time = timespec_add(dw9718_dev.focus_time, move_time);
- delta_time = timespec_sub(current_time, finish_time);
- if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
- *value = ATOMISP_FOCUS_HP_COMPLETE |
- ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- } else {
- *value = ATOMISP_FOCUS_STATUS_MOVING |
- ATOMISP_FOCUS_HP_IN_PROGRESS;
- }
-
- return 0;
-}
-
-int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return dw9718_t_focus_abs(sd, dw9718_dev.focus + value);
-}
-
-int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- *value = dw9718_dev.focus;
- return 0;
-}
-int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9718_vcm_init(struct v4l2_subdev *sd)
-{
- dw9718_dev.platform_data = camera_get_af_platform_data();
- dw9718_dev.focus = DW9718_DEFAULT_FOCUS_POSITION;
- dw9718_dev.power_on = 0;
- return (NULL == dw9718_dev.platform_data) ? -ENODEV : 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.h b/drivers/staging/media/atomisp/i2c/imx/dw9718.h
deleted file mode 100644
index 4a1040c3149f..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9718.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __DW9718_H__
-#define __DW9718_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-#define DW9718_VCM_ADDR (0x18 >> 1)
-
-/* dw9718 device structure */
-struct dw9718_device {
- struct timespec timestamp_t_focus_abs;
- s16 number_of_steps;
- bool initialized; /* true if dw9718 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
- __u8 power_on;
-};
-
-#define DW9718_MAX_FOCUS_POS 1023
-
-/* Register addresses */
-#define DW9718_PD 0x00
-#define DW9718_CONTROL 0x01
-#define DW9718_DATA_M 0x02
-#define DW9718_DATA_L 0x03
-#define DW9718_SW 0x04
-#define DW9718_SACT 0x05
-#define DW9718_FLAG 0x10
-
-#define DW9718_CONTROL_SW_LINEAR BIT(0)
-#define DW9718_CONTROL_S_SAC4 (BIT(1) | BIT(3))
-#define DW9718_CONTROL_OCP_DISABLE BIT(4)
-#define DW9718_CONTROL_UVLO_DISABLE BIT(5)
-
-#define DW9718_SACT_MULT_TWO 0x00
-#define DW9718_SACT_PERIOD_8_8MS 0x19
-#define DW9718_SACT_DEFAULT_VAL 0x60
-
-#define DW9718_DEFAULT_FOCUS_POSITION 300
-
-#endif /* __DW9718_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.c b/drivers/staging/media/atomisp/i2c/imx/dw9719.c
deleted file mode 100644
index 565237796bb4..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9719.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/delay.h>
-#include "dw9719.h"
-
-static struct dw9719_device dw9719_dev;
-
-static int dw9719_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2] = { reg };
-
- msg[0].addr = DW9719_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = buf;
-
- msg[1].addr = DW9719_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
-
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
-
- return 0;
-}
-
-static int dw9719_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2] = { reg, val };
-
- msg.addr = DW9719_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-static int dw9719_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
-
- msg.addr = DW9719_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-int dw9719_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- /* Enable power */
- ret = dw9719_dev.platform_data->power_ctrl(sd, 1);
- /* waiting time requested by DW9714A(vcm) */
- if (ret)
- return ret;
- /* Wait for VBAT to stabilize */
- udelay(1);
-
- /*
- * Jiggle SCL pin to wake up device.
- */
- ret = dw9719_i2c_wr8(client, DW9719_CONTROL, 1);
- /* Need 100us to transit from SHUTDOWN to STANDBY*/
- usleep_range(100, 1000);
-
- /* Enable the ringing compensation */
- ret = dw9719_i2c_wr8(client, DW9719_CONTROL, DW9719_ENABLE_RINGING);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Use SAC3 mode */
- ret = dw9719_i2c_wr8(client, DW9719_MODE, DW9719_MODE_SAC3);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Set the resonance frequency */
- ret = dw9719_i2c_wr8(client, DW9719_VCM_FREQ, DW9719_DEFAULT_VCM_FREQ);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Detect device */
- ret = dw9719_i2c_rd8(client, DW9719_INFO, &value);
- if (ret < 0)
- goto fail_powerdown;
- if (value != DW9719_ID) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
- dw9719_dev.focus = 0;
- dw9719_dev.initialized = true;
-
- return 0;
-
-fail_powerdown:
- dw9719_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int dw9719_vcm_power_down(struct v4l2_subdev *sd)
-{
- return dw9719_dev.platform_data->power_ctrl(sd, 0);
-}
-
-int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- static const struct timespec move_time = {
-
- .tv_sec = 0,
- .tv_nsec = 60000000
- };
- struct timespec current_time, finish_time, delta_time;
-
- getnstimeofday(&current_time);
- finish_time = timespec_add(dw9719_dev.focus_time, move_time);
- delta_time = timespec_sub(current_time, finish_time);
- if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
- *value = ATOMISP_FOCUS_HP_COMPLETE |
- ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- } else {
- *value = ATOMISP_FOCUS_STATUS_MOVING |
- ATOMISP_FOCUS_HP_IN_PROGRESS;
- }
-
- return 0;
-}
-
-int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- value = clamp(value, 0, DW9719_MAX_FOCUS_POS);
- ret = dw9719_i2c_wr16(client, DW9719_VCM_CURRENT, value);
- if (ret < 0)
- return ret;
-
- getnstimeofday(&dw9719_dev.focus_time);
- dw9719_dev.focus = value;
-
- return 0;
-}
-
-int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return dw9719_t_focus_abs(sd, dw9719_dev.focus + value);
-}
-
-int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- *value = dw9719_dev.focus;
- return 0;
-}
-int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.h b/drivers/staging/media/atomisp/i2c/imx/dw9719.h
deleted file mode 100644
index 711f412aef2a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9719.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __DW9719_H__
-#define __DW9719_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-#define DW9719_VCM_ADDR (0x18 >> 1)
-
-/* dw9719 device structure */
-struct dw9719_device {
- struct timespec timestamp_t_focus_abs;
- s16 number_of_steps;
- bool initialized; /* true if dw9719 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
-};
-
-#define DW9719_INVALID_CONFIG 0xffffffff
-#define DW9719_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DW9719_INFO 0
-#define DW9719_ID 0xF1
-#define DW9719_CONTROL 2
-#define DW9719_VCM_CURRENT 3
-
-#define DW9719_MODE 6
-#define DW9719_VCM_FREQ 7
-
-#define DW9719_MODE_SAC3 0x40
-#define DW9719_DEFAULT_VCM_FREQ 0x04
-#define DW9719_ENABLE_RINGING 0x02
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.c b/drivers/staging/media/atomisp/i2c/imx/imx.c
deleted file mode 100644
index 49ab0af87096..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx.c
+++ /dev/null
@@ -1,2480 +0,0 @@
-/*
- * Support for Sony imx 8MP camera sensor.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <asm/intel-mid.h>
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include "../../include/linux/libmsrlisthelper.h"
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include "imx.h"
-
-/*
- * The imx135 embedded data info:
- * embedded data line num: 2
- * line 0 effective data size(byte): 76
- * line 1 effective data size(byte): 113
- */
-static const uint32_t
- imx135_embedded_effective_size[IMX135_EMBEDDED_DATA_LINE_NUM]
- = {76, 113};
-
-static enum atomisp_bayer_order imx_bayer_order_mapping[] = {
- atomisp_bayer_order_rggb,
- atomisp_bayer_order_grbg,
- atomisp_bayer_order_gbrg,
- atomisp_bayer_order_bggr
-};
-
-static const unsigned int
-IMX227_BRACKETING_LUT_FRAME_ENTRY[IMX_MAX_AE_LUT_LENGTH] = {
- 0x0E10, 0x0E1E, 0x0E2C, 0x0E3A, 0x0E48};
-
-static int
-imx_read_reg(struct i2c_client *client, u16 len, u16 reg, u16 *val)
-{
- struct i2c_msg msg[2];
- u16 data[IMX_SHORT_MAX];
- int ret, i;
- int retry = 0;
-
- if (len > IMX_BYTE_MAX) {
- dev_err(&client->dev, "%s error, invalid data length\n",
- __func__);
- return -EINVAL;
- }
-
- do {
- memset(msg, 0 , sizeof(msg));
- memset(data, 0 , sizeof(data));
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = I2C_MSG_LENGTH;
- msg[0].buf = (u8 *)data;
- /* high byte goes first */
- data[0] = cpu_to_be16(reg);
-
- msg[1].addr = client->addr;
- msg[1].len = len;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = (u8 *)data;
-
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret != 2) {
- dev_err(&client->dev,
- "retrying i2c read from offset 0x%x error %d... %d\n",
- reg, ret, retry);
- msleep(20);
- }
- } while (ret != 2 && retry++ < I2C_RETRY_COUNT);
-
- if (ret != 2)
- return -EIO;
-
- /* high byte comes first */
- if (len == IMX_8BIT) {
- *val = (u8)data[0];
- } else {
- /* 16-bit access is default when len > 1 */
- for (i = 0; i < (len >> 1); i++)
- val[i] = be16_to_cpu(data[i]);
- }
-
- return 0;
-}
-
-static int imx_i2c_write(struct i2c_client *client, u16 len, u8 *data)
-{
- struct i2c_msg msg;
- int ret;
- int retry = 0;
-
- do {
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = len;
- msg.buf = data;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret != 1) {
- dev_err(&client->dev,
- "retrying i2c write transfer... %d\n", retry);
- msleep(20);
- }
- } while (ret != 1 && retry++ < I2C_RETRY_COUNT);
-
- return ret == 1 ? 0 : -EIO;
-}
-
-int
-imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val)
-{
- int ret;
- unsigned char data[4] = {0};
- u16 *wreg = (u16 *)data;
- const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
-
- if (data_length != IMX_8BIT && data_length != IMX_16BIT) {
- v4l2_err(client, "%s error, invalid data_length\n", __func__);
- return -EINVAL;
- }
-
- /* high byte goes out first */
- *wreg = cpu_to_be16(reg);
-
- if (data_length == IMX_8BIT)
- data[2] = (u8)(val);
- else {
- /* IMX_16BIT */
- u16 *wdata = (u16 *)&data[2];
- *wdata = cpu_to_be16(val);
- }
-
- ret = imx_i2c_write(client, len, data);
- if (ret)
- dev_err(&client->dev,
- "write error: wrote 0x%x to offset 0x%x error %d",
- val, reg, ret);
-
- return ret;
-}
-
-/*
- * imx_write_reg_array - Initializes a list of imx registers
- * @client: i2c driver client structure
- * @reglist: list of registers to be written
- *
- * This function initializes a list of registers. When consecutive addresses
- * are found in a row on the list, this function creates a buffer and sends
- * consecutive data in a single i2c_transfer().
- *
- * __imx_flush_reg_array, __imx_buf_reg_array() and
- * __imx_write_reg_is_consecutive() are internal functions to
- * imx_write_reg_array_fast() and should be not used anywhere else.
- *
- */
-
-static int __imx_flush_reg_array(struct i2c_client *client,
- struct imx_write_ctrl *ctrl)
-{
- u16 size;
-
- if (ctrl->index == 0)
- return 0;
-
- size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
- ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
- ctrl->index = 0;
-
- return imx_i2c_write(client, size, (u8 *)&ctrl->buffer);
-}
-
-static int __imx_buf_reg_array(struct i2c_client *client,
- struct imx_write_ctrl *ctrl,
- const struct imx_reg *next)
-{
- int size;
- u16 *data16;
-
- switch (next->type) {
- case IMX_8BIT:
- size = 1;
- ctrl->buffer.data[ctrl->index] = (u8)next->val;
- break;
- case IMX_16BIT:
- size = 2;
- data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
- *data16 = cpu_to_be16((u16)next->val);
- break;
- default:
- return -EINVAL;
- }
-
- /* When first item is added, we need to store its starting address */
- if (ctrl->index == 0)
- ctrl->buffer.addr = next->sreg;
-
- ctrl->index += size;
-
- /*
- * Buffer cannot guarantee free space for u32? Better flush it to avoid
- * possible lack of memory for next item.
- */
- if (ctrl->index + sizeof(u16) >= IMX_MAX_WRITE_BUF_SIZE)
- return __imx_flush_reg_array(client, ctrl);
-
- return 0;
-}
-
-static int
-__imx_write_reg_is_consecutive(struct i2c_client *client,
- struct imx_write_ctrl *ctrl,
- const struct imx_reg *next)
-{
- if (ctrl->index == 0)
- return 1;
-
- return ctrl->buffer.addr + ctrl->index == next->sreg;
-}
-
-static int imx_write_reg_array(struct i2c_client *client,
- const struct imx_reg *reglist)
-{
- const struct imx_reg *next = reglist;
- struct imx_write_ctrl ctrl;
- int err;
-
- ctrl.index = 0;
- for (; next->type != IMX_TOK_TERM; next++) {
- switch (next->type & IMX_TOK_MASK) {
- case IMX_TOK_DELAY:
- err = __imx_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- msleep(next->val);
- break;
-
- default:
- /*
- * If next address is not consecutive, data needs to be
- * flushed before proceed.
- */
- if (!__imx_write_reg_is_consecutive(client, &ctrl,
- next)) {
- err = __imx_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- }
- err = __imx_buf_reg_array(client, &ctrl, next);
- if (err) {
- v4l2_err(client, "%s: write error, aborted\n",
- __func__);
- return err;
- }
- break;
- }
- }
-
- return __imx_flush_reg_array(client, &ctrl);
-}
-
-static int __imx_min_fps_diff(int fps, const struct imx_fps_setting *fps_list)
-{
- int diff = INT_MAX;
- int i;
-
- if (fps == 0)
- return 0;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (abs(fps_list[i].fps - fps) < diff)
- diff = abs(fps_list[i].fps - fps);
- }
-
- return diff;
-}
-
-static int __imx_nearest_fps_index(int fps,
- const struct imx_fps_setting *fps_list)
-{
- int fps_index = 0;
- int i;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (abs(fps_list[i].fps - fps)
- < abs(fps_list[fps_index].fps - fps))
- fps_index = i;
- }
- return fps_index;
-}
-
-/*
- * This is to choose the nearest fps setting above the requested fps
- * fps_list should be in ascendant order.
- */
-static int __imx_above_nearest_fps_index(int fps,
- const struct imx_fps_setting *fps_list)
-{
- int fps_index = 0;
- int i;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (fps <= fps_list[i].fps) {
- fps_index = i;
- break;
- }
- }
-
- return fps_index;
-}
-
-static int imx_get_lanes(struct v4l2_subdev *sd)
-{
- struct camera_mipi_info *imx_info = v4l2_get_subdev_hostdata(sd);
-
- if (!imx_info)
- return -ENOSYS;
- if (imx_info->num_lanes < 1 || imx_info->num_lanes > 4 ||
- imx_info->num_lanes == 3)
- return -EINVAL;
-
- return imx_info->num_lanes;
-}
-
-static int __imx_update_exposure_timing(struct i2c_client *client, u16 exposure,
- u16 llp, u16 fll)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
-
- if (dev->sensor_id != IMX227_ID) {
- /* Increase the VTS to match exposure + margin */
- if (exposure > fll - IMX_INTEGRATION_TIME_MARGIN)
- fll = exposure + IMX_INTEGRATION_TIME_MARGIN;
- }
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->line_length_pixels, llp);
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->frame_length_lines, fll);
- if (ret)
- return ret;
-
- if (exposure)
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->coarse_integration_time, exposure);
-
- return ret;
-}
-
-static int __imx_update_gain(struct v4l2_subdev *sd, u16 gain)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- /* set global gain */
- ret = imx_write_reg(client, IMX_8BIT, dev->reg_addr->global_gain, gain);
- if (ret)
- return ret;
-
- /* set short analog gain */
- if (dev->sensor_id == IMX135_ID)
- ret = imx_write_reg(client, IMX_8BIT, IMX_SHORT_AGC_GAIN, gain);
-
- return ret;
-}
-
-static int __imx_update_digital_gain(struct i2c_client *client, u16 digitgain)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- struct imx_write_buffer digit_gain;
-
- digit_gain.addr = cpu_to_be16(dev->reg_addr->dgc_adj);
- digit_gain.data[0] = (digitgain >> 8) & 0xFF;
- digit_gain.data[1] = digitgain & 0xFF;
-
- if (dev->sensor_id == IMX219_ID) {
- return imx_i2c_write(client, IMX219_DGC_LEN, (u8 *)&digit_gain);
- } else if (dev->sensor_id == IMX227_ID) {
- return imx_i2c_write(client, IMX227_DGC_LEN, (u8 *)&digit_gain);
- } else {
- digit_gain.data[2] = (digitgain >> 8) & 0xFF;
- digit_gain.data[3] = digitgain & 0xFF;
- digit_gain.data[4] = (digitgain >> 8) & 0xFF;
- digit_gain.data[5] = digitgain & 0xFF;
- digit_gain.data[6] = (digitgain >> 8) & 0xFF;
- digit_gain.data[7] = digitgain & 0xFF;
- return imx_i2c_write(client, IMX_DGC_LEN, (u8 *)&digit_gain);
- }
- return 0;
-}
-
-static int imx_set_exposure_gain(struct v4l2_subdev *sd, u16 coarse_itg,
- u16 gain, u16 digitgain)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int lanes = imx_get_lanes(sd);
- unsigned int digitgain_scaled;
- int ret = 0;
-
- /* Validate exposure: cannot exceed VTS-4 where VTS is 16bit */
- coarse_itg = clamp_t(u16, coarse_itg, 0, IMX_MAX_EXPOSURE_SUPPORTED);
-
- /* Validate gain: must not exceed maximum 8bit value */
- gain = clamp_t(u16, gain, 0, IMX_MAX_GLOBAL_GAIN_SUPPORTED);
-
- mutex_lock(&dev->input_lock);
-
- if (dev->sensor_id == IMX227_ID) {
- ret = imx_write_reg_array(client, imx_param_hold);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- }
-
- /* For imx175, setting gain must be delayed by one */
- if ((dev->sensor_id == IMX175_ID) && dev->digital_gain)
- digitgain_scaled = dev->digital_gain;
- else
- digitgain_scaled = digitgain;
- /* imx132 with two lanes needs more gain to saturate at max */
- if (dev->sensor_id == IMX132_ID && lanes > 1) {
- digitgain_scaled *= IMX132_2LANES_GAINFACT;
- digitgain_scaled >>= IMX132_2LANES_GAINFACT_SHIFT;
- }
- /* Validate digital gain: must not exceed 12 bit value*/
- digitgain_scaled = clamp_t(unsigned int, digitgain_scaled,
- 0, IMX_MAX_DIGITAL_GAIN_SUPPORTED);
-
- ret = __imx_update_exposure_timing(client, coarse_itg,
- dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- goto out;
- dev->coarse_itg = coarse_itg;
-
- if (dev->sensor_id == IMX175_ID)
- ret = __imx_update_gain(sd, dev->gain);
- else
- ret = __imx_update_gain(sd, gain);
- if (ret)
- goto out;
- dev->gain = gain;
-
- ret = __imx_update_digital_gain(client, digitgain_scaled);
- if (ret)
- goto out;
- dev->digital_gain = digitgain;
-
-out:
- if (dev->sensor_id == IMX227_ID)
- ret = imx_write_reg_array(client, imx_param_update);
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static long imx_s_exposure(struct v4l2_subdev *sd,
- struct atomisp_exposure *exposure)
-{
- return imx_set_exposure_gain(sd, exposure->integration_time[0],
- exposure->gain[0], exposure->gain[1]);
-}
-
-/* FIXME -To be updated with real OTP reading */
-static int imx_g_priv_int_data(struct v4l2_subdev *sd,
- struct v4l2_private_int_data *priv)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u8 __user *to = priv->data;
- u32 read_size = priv->size;
- int ret;
-
- /* No need to copy data if size is 0 */
- if (!read_size)
- goto out;
-
- if (IS_ERR(dev->otp_data)) {
- dev_err(&client->dev, "OTP data not available");
- return PTR_ERR(dev->otp_data);
- }
- /* Correct read_size value only if bigger than maximum */
- if (read_size > dev->otp_driver->size)
- read_size = dev->otp_driver->size;
-
- ret = copy_to_user(to, dev->otp_data, read_size);
- if (ret) {
- dev_err(&client->dev, "%s: failed to copy OTP data to user\n",
- __func__);
- return -EFAULT;
- }
-out:
- /* Return correct size */
- priv->size = dev->otp_driver->size;
-
- return 0;
-}
-
-static int __imx_init(struct v4l2_subdev *sd, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int lanes = imx_get_lanes(sd);
- int ret;
-
- if (dev->sensor_id == IMX_ID_DEFAULT)
- return 0;
-
- /* The default is no flip at sensor initialization */
- dev->h_flip->cur.val = 0;
- dev->v_flip->cur.val = 0;
- /* Sets the default FPS */
- dev->fps_index = 0;
- dev->curr_res_table = dev->mode_tables->res_preview;
- dev->entries_curr_table = dev->mode_tables->n_res_preview;
-
- ret = imx_write_reg_array(client, dev->mode_tables->init_settings);
- if (ret)
- return ret;
-
- if (dev->sensor_id == IMX132_ID && lanes > 0) {
- static const u8 imx132_rglanesel[] = {
- IMX132_RGLANESEL_1LANE, /* 1 lane */
- IMX132_RGLANESEL_2LANES, /* 2 lanes */
- IMX132_RGLANESEL_1LANE, /* undefined */
- IMX132_RGLANESEL_4LANES, /* 4 lanes */
- };
- ret = imx_write_reg(client, IMX_8BIT,
- IMX132_RGLANESEL, imx132_rglanesel[lanes - 1]);
- }
-
- return ret;
-}
-
-static int imx_init(struct v4l2_subdev *sd, u32 val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
-
- mutex_lock(&dev->input_lock);
- ret = __imx_init(sd, val);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static long imx_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
-
- switch (cmd) {
- case ATOMISP_IOC_S_EXPOSURE:
- return imx_s_exposure(sd, arg);
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
- return imx_g_priv_int_data(sd, arg);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- /* power control */
- ret = dev->platform_data->power_ctrl(sd, 1);
- if (ret)
- goto fail_power;
-
- /* flis clock control */
- ret = dev->platform_data->flisclk_ctrl(sd, 1);
- if (ret)
- goto fail_clk;
-
- /* gpio ctrl */
- ret = dev->platform_data->gpio_ctrl(sd, 1);
- if (ret) {
- dev_err(&client->dev, "gpio failed\n");
- goto fail_gpio;
- }
-
- return 0;
-fail_gpio:
- dev->platform_data->gpio_ctrl(sd, 0);
-fail_clk:
- dev->platform_data->flisclk_ctrl(sd, 0);
-fail_power:
- dev->platform_data->power_ctrl(sd, 0);
- dev_err(&client->dev, "sensor power-up failed\n");
-
- return ret;
-}
-
-static int power_down(struct v4l2_subdev *sd)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- ret = dev->platform_data->flisclk_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "flisclk failed\n");
-
- /* gpio ctrl */
- ret = dev->platform_data->gpio_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "gpio failed\n");
-
- /* power control */
- ret = dev->platform_data->power_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "vprog failed.\n");
-
- return ret;
-}
-
-static int __imx_s_power(struct v4l2_subdev *sd, int on)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
- int r = 0;
-
- if (on == 0) {
- ret = power_down(sd);
- if (dev->vcm_driver && dev->vcm_driver->power_down)
- r = dev->vcm_driver->power_down(sd);
- if (ret == 0)
- ret = r;
- dev->power = 0;
- } else {
- if (dev->vcm_driver && dev->vcm_driver->power_up)
- ret = dev->vcm_driver->power_up(sd);
- if (ret)
- return ret;
- ret = power_up(sd);
- if (!ret) {
- dev->power = 1;
- return __imx_init(sd, 0);
- }
- }
-
- return ret;
-}
-
-static int imx_s_power(struct v4l2_subdev *sd, int on)
-{
- int ret;
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- ret = __imx_s_power(sd, on);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static int imx_get_intg_factor(struct i2c_client *client,
- struct camera_mipi_info *info,
- const struct imx_reg *reglist)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- int lanes = imx_get_lanes(sd);
- u32 vt_pix_clk_div;
- u32 vt_sys_clk_div;
- u32 pre_pll_clk_div;
- u32 pll_multiplier;
-
- const int ext_clk_freq_hz = 19200000;
- struct atomisp_sensor_mode_data *buf = &info->data;
- int ret;
- u16 data[IMX_INTG_BUF_COUNT];
-
- u32 vt_pix_clk_freq_mhz;
- u32 coarse_integration_time_min;
- u32 coarse_integration_time_max_margin;
- u32 read_mode;
- u32 div;
-
- if (info == NULL)
- return -EINVAL;
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- ret = imx_read_reg(client, 1, IMX_VT_PIX_CLK_DIV, data);
- if (ret)
- return ret;
- vt_pix_clk_div = data[0] & IMX_MASK_5BIT;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID) {
- static const int rgpltd[] = { 2, 4, 1, 1 };
- ret = imx_read_reg(client, 1, IMX132_208_VT_RGPLTD, data);
- if (ret)
- return ret;
- vt_sys_clk_div = rgpltd[data[0] & IMX_MASK_2BIT];
- } else {
- ret = imx_read_reg(client, 1, IMX_VT_SYS_CLK_DIV, data);
- if (ret)
- return ret;
- vt_sys_clk_div = data[0] & IMX_MASK_2BIT;
- }
- ret = imx_read_reg(client, 1, IMX_PRE_PLL_CLK_DIV, data);
- if (ret)
- return ret;
- pre_pll_clk_div = data[0] & IMX_MASK_4BIT;
-
- ret = imx_read_reg(client, 2,
- (dev->sensor_id == IMX132_ID ||
- dev->sensor_id == IMX219_ID ||
- dev->sensor_id == IMX208_ID) ?
- IMX132_208_219_PLL_MULTIPLIER : IMX_PLL_MULTIPLIER, data);
- if (ret)
- return ret;
- pll_multiplier = data[0] & IMX_MASK_11BIT;
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- ret = imx_read_reg(client, 4, IMX_COARSE_INTG_TIME_MIN, data);
- if (ret)
- return ret;
- coarse_integration_time_min = data[0];
- coarse_integration_time_max_margin = data[1];
-
- /* Get the cropping and output resolution to ISP for this mode. */
- ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_start_h, data);
- if (ret)
- return ret;
- buf->crop_horizontal_start = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->vertical_start_h, data);
- if (ret)
- return ret;
- buf->crop_vertical_start = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_end_h, data);
- if (ret)
- return ret;
- buf->crop_horizontal_end = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->vertical_end_h, data);
- if (ret)
- return ret;
- buf->crop_vertical_end = data[0];
-
- ret = imx_read_reg(client, 2,
- dev->reg_addr->horizontal_output_size_h, data);
- if (ret)
- return ret;
- buf->output_width = data[0];
-
- ret = imx_read_reg(client, 2,
- dev->reg_addr->vertical_output_size_h, data);
- if (ret)
- return ret;
- buf->output_height = data[0];
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
- dev->sensor_id == IMX219_ID)
- read_mode = 0;
- else {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1, IMX227_READ_MODE, data);
- else
- ret = imx_read_reg(client, 1, IMX_READ_MODE, data);
-
- if (ret)
- return ret;
- read_mode = data[0] & IMX_MASK_2BIT;
- }
-
- div = pre_pll_clk_div*vt_sys_clk_div*vt_pix_clk_div;
- if (div == 0)
- return -EINVAL;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID)
- vt_pix_clk_freq_mhz = ext_clk_freq_hz / div;
- else if (dev->sensor_id == IMX227_ID) {
- /* according to IMX227 datasheet:
- * vt_pix_freq_mhz = * num_of_vt_lanes(4) * ivt_pix_clk_freq_mhz
- */
- vt_pix_clk_freq_mhz =
- (u64)4 * ext_clk_freq_hz * pll_multiplier;
- do_div(vt_pix_clk_freq_mhz, div);
- } else
- vt_pix_clk_freq_mhz = 2 * ext_clk_freq_hz / div;
-
- vt_pix_clk_freq_mhz *= pll_multiplier;
- if (dev->sensor_id == IMX132_ID && lanes > 0)
- vt_pix_clk_freq_mhz *= lanes;
-
- dev->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
-
- buf->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
- buf->coarse_integration_time_min = coarse_integration_time_min;
- buf->coarse_integration_time_max_margin =
- coarse_integration_time_max_margin;
-
- buf->fine_integration_time_min = IMX_FINE_INTG_TIME;
- buf->fine_integration_time_max_margin = IMX_FINE_INTG_TIME;
- buf->fine_integration_time_def = IMX_FINE_INTG_TIME;
- buf->frame_length_lines = dev->lines_per_frame;
- buf->line_length_pck = dev->pixels_per_line;
- buf->read_mode = read_mode;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
- dev->sensor_id == IMX219_ID) {
- buf->binning_factor_x = 1;
- buf->binning_factor_y = 1;
- } else {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1, IMX227_BINNING_ENABLE,
- data);
- else
- ret = imx_read_reg(client, 1, IMX_BINNING_ENABLE, data);
-
- if (ret)
- return ret;
- /* 1:binning enabled, 0:disabled */
- if (data[0] == 1) {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1,
- IMX227_BINNING_TYPE, data);
- else
- ret = imx_read_reg(client, 1,
- IMX_BINNING_TYPE, data);
-
- if (ret)
- return ret;
- buf->binning_factor_x = data[0] >> 4 & 0x0f;
- if (!buf->binning_factor_x)
- buf->binning_factor_x = 1;
- buf->binning_factor_y = data[0] & 0xf;
- if (!buf->binning_factor_y)
- buf->binning_factor_y = 1;
- /* WOWRKAROUND, NHD setting for IMX227 should have 4x4
- * binning but the register setting does not reflect
- * this, I am asking vendor why this happens. this is
- * workaround for INTEL BZ 216560.
- */
- if (dev->sensor_id == IMX227_ID) {
- if (dev->curr_res_table[dev->fmt_idx].width ==
- 376 &&
- dev->curr_res_table[dev->fmt_idx].height ==
- 656) {
- buf->binning_factor_x = 4;
- buf->binning_factor_y = 4;
- }
- }
- } else {
- buf->binning_factor_x = 1;
- buf->binning_factor_y = 1;
- }
- }
-
- return 0;
-}
-
-/* This returns the exposure time being used. This should only be used
- for filling in EXIF data, not for actual image processing. */
-static int imx_q_exposure(struct v4l2_subdev *sd, s32 *value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u16 coarse;
- int ret;
-
- /* the fine integration time is currently not calculated */
- ret = imx_read_reg(client, IMX_16BIT,
- dev->reg_addr->coarse_integration_time, &coarse);
- *value = coarse;
-
- return ret;
-}
-
-static int imx_test_pattern(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- if (dev->power == 0)
- return 0;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_R,
- (u16)(dev->tp_r->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GR,
- (u16)(dev->tp_gr->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GB,
- (u16)(dev->tp_gb->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_B,
- (u16)(dev->tp_b->val >> 22));
- if (ret)
- return ret;
-
- return imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_MODE,
- (u16)(dev->tp_mode->val));
-}
-
-static u32 imx_translate_bayer_order(enum atomisp_bayer_order code)
-{
- switch (code) {
- case atomisp_bayer_order_rggb:
- return MEDIA_BUS_FMT_SRGGB10_1X10;
- case atomisp_bayer_order_grbg:
- return MEDIA_BUS_FMT_SGRBG10_1X10;
- case atomisp_bayer_order_bggr:
- return MEDIA_BUS_FMT_SBGGR10_1X10;
- case atomisp_bayer_order_gbrg:
- return MEDIA_BUS_FMT_SGBRG10_1X10;
- }
- return 0;
-}
-
-static int imx_v_flip(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u16 val;
-
- if (dev->power == 0)
- return -EIO;
-
- ret = imx_write_reg_array(client, dev->param_hold);
- if (ret)
- return ret;
-
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- return ret;
- if (value)
- val |= IMX_VFLIP_BIT;
- else
- val &= ~IMX_VFLIP_BIT;
-
- ret = imx_write_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, val);
- if (ret)
- return ret;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info) {
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- }
-
- return imx_write_reg_array(client, dev->param_update);
-}
-
-static int imx_h_flip(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u16 val;
-
- if (dev->power == 0)
- return -EIO;
-
- ret = imx_write_reg_array(client, dev->param_hold);
- if (ret)
- return ret;
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- return ret;
- if (value)
- val |= IMX_HFLIP_BIT;
- else
- val &= ~IMX_HFLIP_BIT;
- ret = imx_write_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, val);
- if (ret)
- return ret;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info) {
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- }
-
- return imx_write_reg_array(client, dev->param_update);
-}
-
-static int imx_g_focal(struct v4l2_subdev *sd, s32 *val)
-{
- *val = (IMX_FOCAL_LENGTH_NUM << 16) | IMX_FOCAL_LENGTH_DEM;
- return 0;
-}
-
-static int imx_g_fnumber(struct v4l2_subdev *sd, s32 *val)
-{
- /*const f number for imx*/
- *val = (IMX_F_NUMBER_DEFAULT_NUM << 16) | IMX_F_NUMBER_DEM;
- return 0;
-}
-
-static int imx_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
-{
- *val = (IMX_F_NUMBER_DEFAULT_NUM << 24) |
- (IMX_F_NUMBER_DEM << 16) |
- (IMX_F_NUMBER_DEFAULT_NUM << 8) | IMX_F_NUMBER_DEM;
- return 0;
-}
-
-static int imx_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- *val = dev->curr_res_table[dev->fmt_idx].bin_factor_x;
-
- return 0;
-}
-
-static int imx_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- *val = dev->curr_res_table[dev->fmt_idx].bin_factor_y;
-
- return 0;
-}
-
-static int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_focus_abs)
- return dev->vcm_driver->t_focus_abs(sd, value);
- return 0;
-}
-
-static int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_focus_rel)
- return dev->vcm_driver->t_focus_rel(sd, value);
- return 0;
-}
-
-static int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->q_focus_status)
- return dev->vcm_driver->q_focus_status(sd, value);
- return 0;
-}
-
-static int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->q_focus_abs)
- return dev->vcm_driver->q_focus_abs(sd, value);
- return 0;
-}
-
-static int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_vcm_slew)
- return dev->vcm_driver->t_vcm_slew(sd, value);
- return 0;
-}
-
-static int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_vcm_timing)
- return dev->vcm_driver->t_vcm_timing(sd, value);
- return 0;
-}
-
-static int imx_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct imx_device *dev = container_of(
- ctrl->handler, struct imx_device, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_TEST_PATTERN:
- ret = imx_test_pattern(&dev->sd);
- break;
- case V4L2_CID_VFLIP:
- dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
- __func__, ctrl->val);
- ret = imx_v_flip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_HFLIP:
- dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
- __func__, ctrl->val);
- ret = imx_h_flip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_FOCUS_ABSOLUTE:
- ret = imx_t_focus_abs(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_FOCUS_RELATIVE:
- ret = imx_t_focus_rel(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_SLEW:
- ret = imx_t_vcm_slew(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_TIMEING:
- ret = imx_t_vcm_timing(&dev->sd, ctrl->val);
- break;
- }
-
- return ret;
-}
-
-static int imx_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct imx_device *dev = container_of(
- ctrl->handler, struct imx_device, ctrl_handler);
- int ret = 0;
- unsigned int val;
-
- switch (ctrl->id) {
- case V4L2_CID_EXPOSURE_ABSOLUTE:
- ret = imx_q_exposure(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_ABSOLUTE:
- ret = imx_q_focus_abs(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_STATUS:
- ret = imx_q_focus_status(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCAL_ABSOLUTE:
- ret = imx_g_focal(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FNUMBER_ABSOLUTE:
- ret = imx_g_fnumber(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FNUMBER_RANGE:
- ret = imx_g_fnumber_range(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_BIN_FACTOR_HORZ:
- ret = imx_g_bin_factor_x(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_BIN_FACTOR_VERT:
- ret = imx_g_bin_factor_y(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_VBLANK:
- ctrl->val = dev->lines_per_frame -
- dev->curr_res_table[dev->fmt_idx].height;
- break;
- case V4L2_CID_HBLANK:
- ctrl->val = dev->pixels_per_line -
- dev->curr_res_table[dev->fmt_idx].width;
- break;
- case V4L2_CID_PIXEL_RATE:
- ctrl->val = dev->vt_pix_clk_freq_mhz;
- break;
- case V4L2_CID_LINK_FREQ:
- val = dev->curr_res_table[dev->fmt_idx].
- fps_options[dev->fps_index].mipi_freq;
- if (val == 0)
- val = dev->curr_res_table[dev->fmt_idx].mipi_freq;
- if (val == 0)
- return -EINVAL;
- ctrl->val = val * 1000; /* To Hz */
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = imx_s_ctrl,
- .g_volatile_ctrl = imx_g_volatile_ctrl
-};
-
-static const struct v4l2_ctrl_config imx_controls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .min = 0x0,
- .max = 0xffff,
- .step = 0x01,
- .def = 0x00,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern",
- .min = 0,
- .max = 0xffff,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_R,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color R",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_GR,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color GR",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_GB,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color GB",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_B,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color B",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip",
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move absolute",
- .min = 0,
- .max = IMX_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_RELATIVE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move relative",
- .min = IMX_MAX_FOCUS_NEG,
- .max = IMX_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_STATUS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus status",
- .min = 0,
- .max = 100, /* allow enum to grow in the future */
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_SLEW,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm slew",
- .min = 0,
- .max = IMX_VCM_SLEW_STEP_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_TIMEING,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm step time",
- .min = 0,
- .max = IMX_VCM_SLEW_TIME_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCAL_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focal length",
- .min = IMX_FOCAL_LENGTH_DEFAULT,
- .max = IMX_FOCAL_LENGTH_DEFAULT,
- .step = 0x01,
- .def = IMX_FOCAL_LENGTH_DEFAULT,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FNUMBER_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "f-number",
- .min = IMX_F_NUMBER_DEFAULT,
- .max = IMX_F_NUMBER_DEFAULT,
- .step = 0x01,
- .def = IMX_F_NUMBER_DEFAULT,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FNUMBER_RANGE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "f-number range",
- .min = IMX_F_NUMBER_RANGE,
- .max = IMX_F_NUMBER_RANGE,
- .step = 0x01,
- .def = IMX_F_NUMBER_RANGE,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_BIN_FACTOR_HORZ,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "horizontal binning factor",
- .min = 0,
- .max = IMX_BIN_FACTOR_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_BIN_FACTOR_VERT,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vertical binning factor",
- .min = 0,
- .max = IMX_BIN_FACTOR_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_LINK_FREQ,
- .name = "Link Frequency",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 1,
- .max = 1500000 * 1000,
- .step = 1,
- .def = 1,
- .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_PIXEL_RATE,
- .name = "Pixel Rate",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HBLANK,
- .name = "Horizontal Blanking",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = SHRT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VBLANK,
- .name = "Vertical Blanking",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = SHRT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HFLIP,
- .name = "Horizontal Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VFLIP,
- .name = "Vertical Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
-};
-
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 600
-static int distance(struct imx_resolution const *res, u32 w, u32 h,
- bool keep_ratio)
-{
- unsigned int w_ratio;
- unsigned int h_ratio;
- int match;
- unsigned int allowed_ratio_mismatch = LARGEST_ALLOWED_RATIO_MISMATCH;
-
- if (!keep_ratio)
- allowed_ratio_mismatch = ~0;
-
- if (w == 0)
- return -1;
- w_ratio = (res->width << 13) / w;
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - ((int)8192));
-
- if ((w_ratio < (int)8192) || (h_ratio < (int)8192) ||
- (match > allowed_ratio_mismatch))
- return -1;
-
- return w_ratio + h_ratio;
-}
-
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(struct v4l2_subdev *sd, int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int fps_diff;
- int min_fps_diff = INT_MAX;
- int min_dist = INT_MAX;
- const struct imx_resolution *tmp_res = NULL;
- struct imx_device *dev = to_imx_sensor(sd);
- bool again = 1;
-retry:
- for (i = 0; i < dev->entries_curr_table; i++) {
- tmp_res = &dev->curr_res_table[i];
- dist = distance(tmp_res, w, h, again);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- if (dist == min_dist) {
- fps_diff = __imx_min_fps_diff(dev->targetfps,
- tmp_res->fps_options);
- if (fps_diff < min_fps_diff) {
- min_fps_diff = fps_diff;
- idx = i;
- }
- }
- }
-
- /*
- * FIXME!
- * only IMX135 for Saltbay and IMX227 use this algorithm
- */
- if (idx == -1 && again == true && dev->new_res_sel_method) {
- again = false;
- goto retry;
- }
- return idx;
-}
-
-/* Call with ctrl_handler.lock hold */
-static int __adjust_hvblank(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u16 new_frame_length_lines, new_line_length_pck;
- int ret;
-
- /*
- * No need to adjust h/v blank if not set dbg value
- * Note that there is no other checking on the h/v blank value,
- * as h/v blank can be set to any value above zero for debug purpose
- */
- if (!dev->v_blank->val || !dev->h_blank->val)
- return 0;
-
- new_frame_length_lines = dev->curr_res_table[dev->fmt_idx].height +
- dev->v_blank->val;
- new_line_length_pck = dev->curr_res_table[dev->fmt_idx].width +
- dev->h_blank->val;
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->line_length_pixels, new_line_length_pck);
- if (ret)
- return ret;
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->frame_length_lines, new_frame_length_lines);
- if (ret)
- return ret;
-
- dev->lines_per_frame = new_frame_length_lines;
- dev->pixels_per_line = new_line_length_pck;
-
- return 0;
-}
-
-static int imx_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct imx_resolution *res;
- int lanes = imx_get_lanes(sd);
- int ret;
- u16 data, val;
- int idx;
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info == NULL)
- return -EINVAL;
- if ((fmt->width > imx_max_res[dev->sensor_id].res_max_width)
- || (fmt->height > imx_max_res[dev->sensor_id].res_max_height)) {
- fmt->width = imx_max_res[dev->sensor_id].res_max_width;
- fmt->height = imx_max_res[dev->sensor_id].res_max_height;
- } else {
- idx = nearest_resolution_index(sd, fmt->width, fmt->height);
-
- /*
- * nearest_resolution_index() doesn't return smaller
- * resolutions. If it fails, it means the requested
- * resolution is higher than wecan support. Fallback
- * to highest possible resolution in this case.
- */
- if (idx == -1)
- idx = dev->entries_curr_table - 1;
-
- fmt->width = dev->curr_res_table[idx].width;
- fmt->height = dev->curr_res_table[idx].height;
- }
-
- fmt->code = dev->format.code;
- if(format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
- return 0;
- }
- mutex_lock(&dev->input_lock);
-
- dev->fmt_idx = nearest_resolution_index(sd, fmt->width, fmt->height);
- if (dev->fmt_idx == -1) {
- ret = -EINVAL;
- goto out;
- }
- res = &dev->curr_res_table[dev->fmt_idx];
-
- /* Adjust the FPS selection based on the resolution selected */
- dev->fps_index = __imx_nearest_fps_index(dev->targetfps,
- res->fps_options);
- dev->fps = res->fps_options[dev->fps_index].fps;
- dev->regs = res->fps_options[dev->fps_index].regs;
- if (!dev->regs)
- dev->regs = res->regs;
-
- ret = imx_write_reg_array(client, dev->regs);
- if (ret)
- goto out;
-
- if (dev->sensor_id == IMX132_ID && lanes > 0) {
- static const u8 imx132_rgpltd[] = {
- 2, /* 1 lane: /1 */
- 0, /* 2 lanes: /2 */
- 0, /* undefined */
- 1, /* 4 lanes: /4 */
- };
- ret = imx_write_reg(client, IMX_8BIT, IMX132_208_VT_RGPLTD,
- imx132_rgpltd[lanes - 1]);
- if (ret)
- goto out;
- }
-
- dev->pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
- dev->lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
-
- /* dbg h/v blank time */
- __adjust_hvblank(sd);
-
- ret = __imx_update_exposure_timing(client, dev->coarse_itg,
- dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- goto out;
-
- ret = __imx_update_gain(sd, dev->gain);
- if (ret)
- goto out;
-
- ret = __imx_update_digital_gain(client, dev->digital_gain);
- if (ret)
- goto out;
-
- ret = imx_write_reg_array(client, dev->param_update);
- if (ret)
- goto out;
-
- ret = imx_get_intg_factor(client, imx_info, dev->regs);
- if (ret)
- goto out;
-
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- goto out;
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
-
- /*
- * Fill meta data info. add imx135 metadata setting for RAW10 format
- */
- switch (dev->sensor_id) {
- case IMX135_ID:
- ret = imx_read_reg(client, 2,
- IMX135_OUTPUT_DATA_FORMAT_REG, &data);
- if (ret)
- goto out;
- /*
- * The IMX135 can support various resolutions like
- * RAW6/8/10/12/14.
- * 1.The data format is RAW10:
- * matadata width = current resolution width(pixel) * 10 / 8
- * 2.The data format is RAW6 or RAW8:
- * matadata width = current resolution width(pixel);
- * 3.other data format(RAW12/14 etc):
- * TBD.
- */
- if (data == IMX135_OUTPUT_FORMAT_RAW10)
- /* the data format is RAW10. */
- imx_info->metadata_width = res->width * 10 / 8;
- else
- /* The data format is RAW6/8/12/14/ etc. */
- imx_info->metadata_width = res->width;
-
- imx_info->metadata_height = IMX135_EMBEDDED_DATA_LINE_NUM;
-
- if (imx_info->metadata_effective_width == NULL)
- imx_info->metadata_effective_width =
- imx135_embedded_effective_size;
-
- break;
- case IMX227_ID:
- ret = imx_read_reg(client, 2, IMX227_OUTPUT_DATA_FORMAT_REG,
- &data);
- if (ret)
- goto out;
- if (data == IMX227_OUTPUT_FORMAT_RAW10)
- /* the data format is RAW10. */
- imx_info->metadata_width = res->width * 10 / 8;
- else
- /* The data format is RAW6/8/12/14/ etc. */
- imx_info->metadata_width = res->width;
-
- imx_info->metadata_height = IMX227_EMBEDDED_DATA_LINE_NUM;
-
- if (imx_info->metadata_effective_width == NULL)
- imx_info->metadata_effective_width =
- imx227_embedded_effective_size;
-
- break;
- default:
- imx_info->metadata_width = 0;
- imx_info->metadata_height = 0;
- imx_info->metadata_effective_width = NULL;
- break;
- }
-
-out:
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-
-static int imx_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct imx_device *dev = to_imx_sensor(sd);
-
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- fmt->width = dev->curr_res_table[dev->fmt_idx].width;
- fmt->height = dev->curr_res_table[dev->fmt_idx].height;
- fmt->code = dev->format.code;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int imx_detect(struct i2c_client *client, u16 *id, u8 *revision)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- /* i2c check */
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
- /* check sensor chip ID */
- if (imx_read_reg(client, IMX_16BIT, IMX132_175_208_219_CHIP_ID, id)) {
- v4l2_err(client, "sensor_id = 0x%x\n", *id);
- return -ENODEV;
- }
-
- if (*id == IMX132_ID || *id == IMX175_ID ||
- *id == IMX208_ID || *id == IMX219_ID)
- goto found;
-
- if (imx_read_reg(client, IMX_16BIT, IMX134_135_227_CHIP_ID, id)) {
- v4l2_err(client, "sensor_id = 0x%x\n", *id);
- return -ENODEV;
- }
- if (*id != IMX134_ID && *id != IMX135_ID && *id != IMX227_ID) {
- v4l2_err(client, "no imx sensor found\n");
- return -ENODEV;
- }
-found:
- v4l2_info(client, "sensor_id = 0x%x\n", *id);
-
- /* TODO - need to be updated */
- *revision = 0;
-
- return 0;
-}
-
-static void __imx_print_timing(struct v4l2_subdev *sd)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 width = dev->curr_res_table[dev->fmt_idx].width;
- u16 height = dev->curr_res_table[dev->fmt_idx].height;
-
- dev_dbg(&client->dev, "Dump imx timing in stream on:\n");
- dev_dbg(&client->dev, "width: %d:\n", width);
- dev_dbg(&client->dev, "height: %d:\n", height);
- dev_dbg(&client->dev, "pixels_per_line: %d:\n", dev->pixels_per_line);
- dev_dbg(&client->dev, "line per frame: %d:\n", dev->lines_per_frame);
- dev_dbg(&client->dev, "pix freq: %d:\n", dev->vt_pix_clk_freq_mhz);
- dev_dbg(&client->dev, "init fps: %d:\n", dev->vt_pix_clk_freq_mhz /
- dev->pixels_per_line / dev->lines_per_frame);
- dev_dbg(&client->dev, "HBlank: %d nS:\n",
- 1000 * (dev->pixels_per_line - width) /
- (dev->vt_pix_clk_freq_mhz / 1000000));
- dev_dbg(&client->dev, "VBlank: %d uS:\n",
- (dev->lines_per_frame - height) * dev->pixels_per_line /
- (dev->vt_pix_clk_freq_mhz / 1000000));
-}
-
-/*
- * imx stream on/off
- */
-static int imx_s_stream(struct v4l2_subdev *sd, int enable)
-{
- int ret;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- if (enable) {
- /* Noise reduction & dead pixel applied before streaming */
- if (dev->fw == NULL) {
- dev_warn(&client->dev, "No MSR loaded from library");
- } else {
- ret = apply_msr_data(client, dev->fw);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- }
- ret = imx_test_pattern(sd);
- if (ret) {
- v4l2_err(client, "Configure test pattern failed.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- __imx_print_timing(sd);
- ret = imx_write_reg_array(client, imx_streaming);
- if (ret != 0) {
- v4l2_err(client, "write_reg_array err\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- dev->streaming = 1;
- if (dev->vcm_driver && dev->vcm_driver->t_focus_abs_init)
- dev->vcm_driver->t_focus_abs_init(sd);
- } else {
- ret = imx_write_reg_array(client, imx_soft_standby);
- if (ret != 0) {
- v4l2_err(client, "write_reg_array err\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- dev->streaming = 0;
- dev->targetfps = 0;
- }
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-static int __update_imx_device_settings(struct imx_device *dev, u16 sensor_id)
-{
- /* IMX on other platform is not supported yet */
- return -EINVAL;
-}
-
-static int imx_s_config(struct v4l2_subdev *sd,
- int irq, void *pdata)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 sensor_revision;
- u16 sensor_id;
- int ret;
- if (pdata == NULL)
- return -ENODEV;
-
- dev->platform_data = pdata;
-
- mutex_lock(&dev->input_lock);
-
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "imx platform init err\n");
- return ret;
- }
- }
- /*
- * power off the module first.
- *
- * As first power on by board have undecided state of power/gpio pins.
- */
- ret = __imx_s_power(sd, 0);
- if (ret) {
- v4l2_err(client, "imx power-down err.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
-
- ret = __imx_s_power(sd, 1);
- if (ret) {
- v4l2_err(client, "imx power-up err.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
-
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_csi_cfg;
-
- /* config & detect sensor */
- ret = imx_detect(client, &sensor_id, &sensor_revision);
- if (ret) {
- v4l2_err(client, "imx_detect err s_config.\n");
- goto fail_detect;
- }
-
- dev->sensor_id = sensor_id;
- dev->sensor_revision = sensor_revision;
-
- /* Resolution settings depend on sensor type and platform */
- ret = __update_imx_device_settings(dev, dev->sensor_id);
- if (ret)
- goto fail_detect;
- /* Read sensor's OTP data */
- dev->otp_data = dev->otp_driver->otp_read(sd,
- dev->otp_driver->dev_addr, dev->otp_driver->start_addr,
- dev->otp_driver->size);
-
- /* power off sensor */
- ret = __imx_s_power(sd, 0);
-
- mutex_unlock(&dev->input_lock);
- if (ret)
- v4l2_err(client, "imx power-down err.\n");
-
- return ret;
-
-fail_detect:
- dev->platform_data->csi_cfg(sd, 0);
-fail_csi_cfg:
- __imx_s_power(sd, 0);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "sensor power-gating failed\n");
- return ret;
-}
-
-static int
-imx_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (code->index >= MAX_FMTS)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- code->code = dev->format.code;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int
-imx_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- int index = fse->index;
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- if (index >= dev->entries_curr_table) {
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
- fse->min_width = dev->curr_res_table[index].width;
- fse->min_height = dev->curr_res_table[index].height;
- fse->max_width = dev->curr_res_table[index].width;
- fse->max_height = dev->curr_res_table[index].height;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int
-imx_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- dev->run_mode = param->parm.capture.capturemode;
-
- switch (dev->run_mode) {
- case CI_MODE_VIDEO:
- dev->curr_res_table = dev->mode_tables->res_video;
- dev->entries_curr_table = dev->mode_tables->n_res_video;
- break;
- case CI_MODE_STILL_CAPTURE:
- dev->curr_res_table = dev->mode_tables->res_still;
- dev->entries_curr_table = dev->mode_tables->n_res_still;
- break;
- default:
- dev->curr_res_table = dev->mode_tables->res_preview;
- dev->entries_curr_table = dev->mode_tables->n_res_preview;
- }
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int imx_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- interval->interval.denominator = dev->fps;
- interval->interval.numerator = 1;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int __imx_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct imx_resolution *res =
- &dev->curr_res_table[dev->fmt_idx];
- struct camera_mipi_info *imx_info = NULL;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- unsigned int fps_index;
- int fps;
- int ret = 0;
-
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info == NULL)
- return -EINVAL;
-
- if (!interval->interval.numerator)
- interval->interval.numerator = 1;
-
- fps = interval->interval.denominator / interval->interval.numerator;
-
- if (!fps)
- return -EINVAL;
-
- dev->targetfps = fps;
- /* No need to proceed further if we are not streaming */
- if (!dev->streaming)
- return 0;
-
- /* Ignore if we are already using the required FPS. */
- if (fps == dev->fps)
- return 0;
-
- /*
- * Start here, sensor is already streaming, so adjust fps dynamically
- */
- fps_index = __imx_above_nearest_fps_index(fps, res->fps_options);
- if (fps > res->fps_options[fps_index].fps) {
- /*
- * if does not have high fps setting, not support increase fps
- * by adjust lines per frame.
- */
- dev_err(&client->dev, "Could not support fps: %d.\n", fps);
- return -EINVAL;
- }
-
- if (res->fps_options[fps_index].regs &&
- res->fps_options[fps_index].regs != dev->regs) {
- /*
- * if need a new setting, but the new setting has difference
- * with current setting, not use this one, as may have
- * unexpected result, e.g. PLL, IQ.
- */
- dev_dbg(&client->dev,
- "Sensor is streaming, not apply new sensor setting\n");
- if (fps > res->fps_options[dev->fps_index].fps) {
- /*
- * Does not support increase fps based on low fps
- * setting, as the high fps setting could not be used,
- * and fps requested is above current setting fps.
- */
- dev_warn(&client->dev,
- "Could not support fps: %d, keep current: %d.\n",
- fps, dev->fps);
- return 0;
- }
- } else {
- dev->fps_index = fps_index;
- dev->fps = res->fps_options[dev->fps_index].fps;
- }
-
- /* Update the new frametimings based on FPS */
- pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
- lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
-
- if (fps > res->fps_options[fps_index].fps) {
- /*
- * if does not have high fps setting, not support increase fps
- * by adjust lines per frame.
- */
- dev_warn(&client->dev, "Could not support fps: %d. Use:%d.\n",
- fps, res->fps_options[fps_index].fps);
- goto done;
- }
-
- /* if the new setting does not match exactly */
- if (dev->fps != fps) {
-#define MAX_LINES_PER_FRAME 0xffff
- dev_dbg(&client->dev, "adjusting fps using lines_per_frame\n");
- /*
- * FIXME!
- * 1: check DS on max value of lines_per_frame
- * 2: consider use pixel per line for more range?
- */
- if (dev->lines_per_frame * dev->fps / fps >
- MAX_LINES_PER_FRAME) {
- dev_warn(&client->dev,
- "adjust lines_per_frame out of range, try to use max value.\n");
- lines_per_frame = MAX_LINES_PER_FRAME;
- } else {
- lines_per_frame = lines_per_frame * dev->fps / fps;
- }
- }
-done:
- /* Update the new frametimings based on FPS */
- dev->pixels_per_line = pixels_per_line;
- dev->lines_per_frame = lines_per_frame;
-
- /* Update the new values so that user side knows the current settings */
- ret = __imx_update_exposure_timing(client,
- dev->coarse_itg, dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- return ret;
-
- dev->fps = fps;
-
- ret = imx_get_intg_factor(client, imx_info, dev->regs);
- if (ret)
- return ret;
-
- interval->interval.denominator = res->fps_options[dev->fps_index].fps;
- interval->interval.numerator = 1;
- __imx_print_timing(sd);
-
- return ret;
-}
-
-static int imx_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
- ret = __imx_s_frame_interval(sd, interval);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-static int imx_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- *frames = dev->curr_res_table[dev->fmt_idx].skip_frames;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-static const struct v4l2_subdev_sensor_ops imx_sensor_ops = {
- .g_skip_frames = imx_g_skip_frames,
-};
-
-static const struct v4l2_subdev_video_ops imx_video_ops = {
- .s_stream = imx_s_stream,
- .s_parm = imx_s_parm,
- .g_frame_interval = imx_g_frame_interval,
- .s_frame_interval = imx_s_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops imx_core_ops = {
- .s_power = imx_s_power,
- .ioctl = imx_ioctl,
- .init = imx_init,
-};
-
-static const struct v4l2_subdev_pad_ops imx_pad_ops = {
- .enum_mbus_code = imx_enum_mbus_code,
- .enum_frame_size = imx_enum_frame_size,
- .get_fmt = imx_get_fmt,
- .set_fmt = imx_set_fmt,
-};
-
-static const struct v4l2_subdev_ops imx_ops = {
- .core = &imx_core_ops,
- .video = &imx_video_ops,
- .pad = &imx_pad_ops,
- .sensor = &imx_sensor_ops,
-};
-
-static const struct media_entity_operations imx_entity_ops = {
- .link_setup = NULL,
-};
-
-static int imx_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
-
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
- media_entity_cleanup(&dev->sd.entity);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
- dev->platform_data->csi_cfg(sd, 0);
- v4l2_device_unregister_subdev(sd);
- release_msr_list(client, dev->fw);
- kfree(dev);
-
- return 0;
-}
-
-static int __imx_init_ctrl_handler(struct imx_device *dev)
-{
- struct v4l2_ctrl_handler *hdl;
- int i;
-
- hdl = &dev->ctrl_handler;
-
- v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(imx_controls));
-
- for (i = 0; i < ARRAY_SIZE(imx_controls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler,
- &imx_controls[i], NULL);
-
- dev->pixel_rate = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_PIXEL_RATE);
- dev->h_blank = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_HBLANK);
- dev->v_blank = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_VBLANK);
- dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_LINK_FREQ);
- dev->h_flip = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_HFLIP);
- dev->v_flip = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_VFLIP);
- dev->tp_mode = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN);
- dev->tp_r = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_R);
- dev->tp_gr = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_GR);
- dev->tp_gb = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_GB);
- dev->tp_b = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_B);
-
- if (dev->ctrl_handler.error || dev->pixel_rate == NULL
- || dev->h_blank == NULL || dev->v_blank == NULL
- || dev->h_flip == NULL || dev->v_flip == NULL
- || dev->link_freq == NULL) {
- return dev->ctrl_handler.error;
- }
-
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = hdl;
- v4l2_ctrl_handler_setup(&dev->ctrl_handler);
-
- return 0;
-}
-
-static void imx_update_reg_info(struct imx_device *dev)
-{
- if (dev->sensor_id == IMX219_ID) {
- dev->reg_addr = &imx219_addr;
- dev->param_hold = imx219_param_hold;
- dev->param_update = imx219_param_update;
- } else {
- dev->reg_addr = &imx_addr;
- dev->param_hold = imx_param_hold;
- dev->param_update = imx_param_update;
- }
-}
-
-static int imx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct imx_device *dev;
- struct camera_mipi_info *imx_info = NULL;
- int ret;
- char *msr_file_name = NULL;
-
- /* allocate sensor device & init sub device */
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- v4l2_err(client, "%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- mutex_init(&dev->input_lock);
-
- dev->i2c_id = id->driver_data;
- dev->fmt_idx = 0;
- dev->sensor_id = IMX_ID_DEFAULT;
- dev->vcm_driver = &imx_vcms[IMX_ID_DEFAULT];
- dev->digital_gain = 256;
-
- v4l2_i2c_subdev_init(&(dev->sd), client, &imx_ops);
-
- if (client->dev.platform_data) {
- ret = imx_s_config(&dev->sd, client->irq,
- client->dev.platform_data);
- if (ret)
- goto out_free;
- }
- imx_info = v4l2_get_subdev_hostdata(&dev->sd);
-
- /*
- * sd->name is updated with sensor driver name by the v4l2.
- * change it to sensor name in this case.
- */
- imx_update_reg_info(dev);
- snprintf(dev->sd.name, sizeof(dev->sd.name), "%s%x %d-%04x",
- IMX_SUBDEV_PREFIX, dev->sensor_id,
- i2c_adapter_id(client->adapter), client->addr);
-
- ret = __imx_init_ctrl_handler(dev);
- if (ret)
- goto out_ctrl_handler_free;
-
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- dev->sd.entity.ops = &imx_entity_ops;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret) {
- imx_remove(client);
- return ret;
- }
-
- /* Load the Noise reduction, Dead pixel registers from cpf file*/
- if (dev->platform_data->msr_file_name != NULL)
- msr_file_name = dev->platform_data->msr_file_name();
- if (msr_file_name) {
- ret = load_msr_list(client, msr_file_name, &dev->fw);
- if (ret) {
- imx_remove(client);
- return ret;
- }
- } else {
- dev_warn(&client->dev, "Drvb file not present");
- }
-
- return ret;
-
-out_ctrl_handler_free:
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
-out_free:
- v4l2_device_unregister_subdev(&dev->sd);
- kfree(dev);
- return ret;
-}
-
-static const struct i2c_device_id imx_ids[] = {
- {IMX_NAME_175, IMX175_ID},
- {IMX_NAME_135, IMX135_ID},
- {IMX_NAME_135_FUJI, IMX135_FUJI_ID},
- {IMX_NAME_134, IMX134_ID},
- {IMX_NAME_132, IMX132_ID},
- {IMX_NAME_208, IMX208_ID},
- {IMX_NAME_219, IMX219_ID},
- {IMX_NAME_227, IMX227_ID},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, imx_ids);
-
-static struct i2c_driver imx_driver = {
- .driver = {
- .name = IMX_DRIVER,
- },
- .probe = imx_probe,
- .remove = imx_remove,
- .id_table = imx_ids,
-};
-
-static __init int init_imx(void)
-{
- return i2c_add_driver(&imx_driver);
-}
-
-static __exit void exit_imx(void)
-{
- i2c_del_driver(&imx_driver);
-}
-
-module_init(init_imx);
-module_exit(exit_imx);
-
-MODULE_DESCRIPTION("A low-level driver for Sony IMX sensors");
-MODULE_AUTHOR("Shenbo Huang <shenbo.huang@intel.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h
deleted file mode 100644
index 30beb2a0ed93..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx.h
+++ /dev/null
@@ -1,737 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX_H__
-#define __IMX_H__
-#include "../../include/linux/atomisp_platform.h"
-#include "../../include/linux/atomisp.h"
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/videodev2.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-subdev.h>
-#include "imx175.h"
-#include "imx135.h"
-#include "imx134.h"
-#include "imx132.h"
-#include "imx208.h"
-#include "imx219.h"
-#include "imx227.h"
-
-#define IMX_MCLK 192
-
-/* TODO - This should be added into include/linux/videodev2.h */
-#ifndef V4L2_IDENT_IMX
-#define V4L2_IDENT_IMX 8245
-#endif
-
-#define IMX_MAX_AE_LUT_LENGTH 5
-/*
- * imx System control registers
- */
-#define IMX_MASK_5BIT 0x1F
-#define IMX_MASK_4BIT 0xF
-#define IMX_MASK_3BIT 0x7
-#define IMX_MASK_2BIT 0x3
-#define IMX_MASK_8BIT 0xFF
-#define IMX_MASK_11BIT 0x7FF
-#define IMX_INTG_BUF_COUNT 2
-
-#define IMX_FINE_INTG_TIME 0x1E8
-
-#define IMX_VT_PIX_CLK_DIV 0x0301
-#define IMX_VT_SYS_CLK_DIV 0x0303
-#define IMX_PRE_PLL_CLK_DIV 0x0305
-#define IMX227_IOP_PRE_PLL_CLK_DIV 0x030D
-#define IMX227_PLL_MULTIPLIER 0x0306
-#define IMX227_IOP_PLL_MULTIPLIER 0x030E
-#define IMX227_PLL_MULTI_DRIVE 0x0310
-#define IMX227_OP_PIX_CLK_DIV 0x0309
-#define IMX227_OP_SYS_CLK_DIV 0x030B
-#define IMX_PLL_MULTIPLIER 0x030C
-#define IMX_OP_PIX_DIV 0x0309
-#define IMX_OP_SYS_DIV 0x030B
-#define IMX_FRAME_LENGTH_LINES 0x0340
-#define IMX_LINE_LENGTH_PIXELS 0x0342
-#define IMX_COARSE_INTG_TIME_MIN 0x1004
-#define IMX_COARSE_INTG_TIME_MAX 0x1006
-#define IMX_BINNING_ENABLE 0x0390
-#define IMX227_BINNING_ENABLE 0x0900
-#define IMX_BINNING_TYPE 0x0391
-#define IMX227_BINNING_TYPE 0x0901
-#define IMX_READ_MODE 0x0390
-#define IMX227_READ_MODE 0x0900
-
-#define IMX_HORIZONTAL_START_H 0x0344
-#define IMX_VERTICAL_START_H 0x0346
-#define IMX_HORIZONTAL_END_H 0x0348
-#define IMX_VERTICAL_END_H 0x034a
-#define IMX_HORIZONTAL_OUTPUT_SIZE_H 0x034c
-#define IMX_VERTICAL_OUTPUT_SIZE_H 0x034e
-
-/* Post Divider setting register for imx132 and imx208 */
-#define IMX132_208_VT_RGPLTD 0x30A4
-
-/* Multiplier setting register for imx132, imx208, and imx219 */
-#define IMX132_208_219_PLL_MULTIPLIER 0x0306
-
-#define IMX_COARSE_INTEGRATION_TIME 0x0202
-#define IMX_TEST_PATTERN_MODE 0x0600
-#define IMX_TEST_PATTERN_COLOR_R 0x0602
-#define IMX_TEST_PATTERN_COLOR_GR 0x0604
-#define IMX_TEST_PATTERN_COLOR_B 0x0606
-#define IMX_TEST_PATTERN_COLOR_GB 0x0608
-#define IMX_IMG_ORIENTATION 0x0101
-#define IMX_VFLIP_BIT 2
-#define IMX_HFLIP_BIT 1
-#define IMX_GLOBAL_GAIN 0x0205
-#define IMX_SHORT_AGC_GAIN 0x0233
-#define IMX_DGC_ADJ 0x020E
-#define IMX_DGC_LEN 10
-#define IMX227_DGC_LEN 4
-#define IMX_MAX_EXPOSURE_SUPPORTED 0xfffb
-#define IMX_MAX_GLOBAL_GAIN_SUPPORTED 0x00ff
-#define IMX_MAX_DIGITAL_GAIN_SUPPORTED 0x0fff
-
-#define MAX_FMTS 1
-#define IMX_OTP_DATA_SIZE 1280
-
-#define IMX_SUBDEV_PREFIX "imx"
-#define IMX_DRIVER "imx1x5"
-
-/* Sensor ids from identification register */
-#define IMX_NAME_134 "imx134"
-#define IMX_NAME_135 "imx135"
-#define IMX_NAME_175 "imx175"
-#define IMX_NAME_132 "imx132"
-#define IMX_NAME_208 "imx208"
-#define IMX_NAME_219 "imx219"
-#define IMX_NAME_227 "imx227"
-#define IMX175_ID 0x0175
-#define IMX135_ID 0x0135
-#define IMX134_ID 0x0134
-#define IMX132_ID 0x0132
-#define IMX208_ID 0x0208
-#define IMX219_ID 0x0219
-#define IMX227_ID 0x0227
-
-/* Sensor id based on i2c_device_id table
- * (Fuji module can not be detected based on sensor registers) */
-#define IMX135_FUJI_ID 0x0136
-#define IMX_NAME_135_FUJI "imx135fuji"
-
-/* imx175 - use dw9714 vcm */
-#define IMX175_MERRFLD 0x175
-#define IMX175_VALLEYVIEW 0x176
-#define IMX135_SALTBAY 0x135
-#define IMX135_VICTORIABAY 0x136
-#define IMX132_SALTBAY 0x132
-#define IMX134_VALLEYVIEW 0x134
-#define IMX208_MOFD_PD2 0x208
-#define IMX219_MFV0_PRH 0x219
-#define IMX227_SAND 0x227
-
-/* otp - specific settings */
-#define E2PROM_ADDR 0xa0
-#define E2PROM_LITEON_12P1BA869D_ADDR 0xa0
-#define E2PROM_ABICO_SS89A839_ADDR 0xa8
-#define DEFAULT_OTP_SIZE 1280
-#define IMX135_OTP_SIZE 1280
-#define IMX219_OTP_SIZE 2048
-#define IMX227_OTP_SIZE 2560
-#define E2PROM_LITEON_12P1BA869D_SIZE 544
-
-#define IMX_ID_DEFAULT 0x0000
-#define IMX132_175_208_219_CHIP_ID 0x0000
-#define IMX134_135_CHIP_ID 0x0016
-#define IMX134_135_227_CHIP_ID 0x0016
-
-#define IMX175_RES_WIDTH_MAX 3280
-#define IMX175_RES_HEIGHT_MAX 2464
-#define IMX135_RES_WIDTH_MAX 4208
-#define IMX135_RES_HEIGHT_MAX 3120
-#define IMX132_RES_WIDTH_MAX 1936
-#define IMX132_RES_HEIGHT_MAX 1096
-#define IMX134_RES_WIDTH_MAX 3280
-#define IMX134_RES_HEIGHT_MAX 2464
-#define IMX208_RES_WIDTH_MAX 1936
-#define IMX208_RES_HEIGHT_MAX 1096
-#define IMX219_RES_WIDTH_MAX 3280
-#define IMX219_RES_HEIGHT_MAX 2464
-#define IMX227_RES_WIDTH_MAX 2400
-#define IMX227_RES_HEIGHT_MAX 2720
-
-/* Defines for lens/VCM */
-#define IMX_FOCAL_LENGTH_NUM 369 /*3.69mm*/
-#define IMX_FOCAL_LENGTH_DEM 100
-#define IMX_F_NUMBER_DEFAULT_NUM 22
-#define IMX_F_NUMBER_DEM 10
-#define IMX_INVALID_CONFIG 0xffffffff
-#define IMX_MAX_FOCUS_POS 1023
-#define IMX_MAX_FOCUS_NEG (-1023)
-#define IMX_VCM_SLEW_STEP_MAX 0x3f
-#define IMX_VCM_SLEW_TIME_MAX 0x1f
-
-#define IMX_BIN_FACTOR_MAX 4
-#define IMX_INTEGRATION_TIME_MARGIN 4
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_F_NUMBER_DEFAULT 0x16000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define IMX_F_NUMBER_RANGE 0x160a160a
-
-struct imx_vcm {
- int (*power_up)(struct v4l2_subdev *sd);
- int (*power_down)(struct v4l2_subdev *sd);
- int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value);
- int (*t_focus_abs_init)(struct v4l2_subdev *sd);
- int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value);
- int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value);
- int (*q_focus_abs)(struct v4l2_subdev *sd, s32 *value);
- int (*t_vcm_slew)(struct v4l2_subdev *sd, s32 value);
- int (*t_vcm_timing)(struct v4l2_subdev *sd, s32 value);
-};
-
-struct imx_otp {
- void * (*otp_read)(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
- u32 start_addr;
- u32 size;
- u8 dev_addr;
-};
-
-struct max_res {
- int res_max_width;
- int res_max_height;
-};
-
-struct max_res imx_max_res[] = {
- [IMX175_ID] = {
- .res_max_width = IMX175_RES_WIDTH_MAX,
- .res_max_height = IMX175_RES_HEIGHT_MAX,
- },
- [IMX135_ID] = {
- .res_max_width = IMX135_RES_WIDTH_MAX,
- .res_max_height = IMX135_RES_HEIGHT_MAX,
- },
- [IMX132_ID] = {
- .res_max_width = IMX132_RES_WIDTH_MAX,
- .res_max_height = IMX132_RES_HEIGHT_MAX,
- },
- [IMX134_ID] = {
- .res_max_width = IMX134_RES_WIDTH_MAX,
- .res_max_height = IMX134_RES_HEIGHT_MAX,
- },
- [IMX208_ID] = {
- .res_max_width = IMX208_RES_WIDTH_MAX,
- .res_max_height = IMX208_RES_HEIGHT_MAX,
- },
- [IMX219_ID] = {
- .res_max_width = IMX219_RES_WIDTH_MAX,
- .res_max_height = IMX219_RES_HEIGHT_MAX,
- },
- [IMX227_ID] = {
- .res_max_width = IMX227_RES_WIDTH_MAX,
- .res_max_height = IMX227_RES_HEIGHT_MAX,
- },
-};
-
-struct imx_settings {
- struct imx_reg const *init_settings;
- struct imx_resolution *res_preview;
- struct imx_resolution *res_still;
- struct imx_resolution *res_video;
- int n_res_preview;
- int n_res_still;
- int n_res_video;
-};
-
-struct imx_settings imx_sets[] = {
- [IMX175_MERRFLD] = {
- .init_settings = imx175_init_settings,
- .res_preview = imx175_res_preview,
- .res_still = imx175_res_still,
- .res_video = imx175_res_video,
- .n_res_preview = ARRAY_SIZE(imx175_res_preview),
- .n_res_still = ARRAY_SIZE(imx175_res_still),
- .n_res_video = ARRAY_SIZE(imx175_res_video),
- },
- [IMX175_VALLEYVIEW] = {
- .init_settings = imx175_init_settings,
- .res_preview = imx175_res_preview,
- .res_still = imx175_res_still,
- .res_video = imx175_res_video,
- .n_res_preview = ARRAY_SIZE(imx175_res_preview),
- .n_res_still = ARRAY_SIZE(imx175_res_still),
- .n_res_video = ARRAY_SIZE(imx175_res_video),
- },
- [IMX135_SALTBAY] = {
- .init_settings = imx135_init_settings,
- .res_preview = imx135_res_preview,
- .res_still = imx135_res_still,
- .res_video = imx135_res_video,
- .n_res_preview = ARRAY_SIZE(imx135_res_preview),
- .n_res_still = ARRAY_SIZE(imx135_res_still),
- .n_res_video = ARRAY_SIZE(imx135_res_video),
- },
- [IMX135_VICTORIABAY] = {
- .init_settings = imx135_init_settings,
- .res_preview = imx135_res_preview_mofd,
- .res_still = imx135_res_still_mofd,
- .res_video = imx135_res_video,
- .n_res_preview = ARRAY_SIZE(imx135_res_preview_mofd),
- .n_res_still = ARRAY_SIZE(imx135_res_still_mofd),
- .n_res_video = ARRAY_SIZE(imx135_res_video),
- },
- [IMX132_SALTBAY] = {
- .init_settings = imx132_init_settings,
- .res_preview = imx132_res_preview,
- .res_still = imx132_res_still,
- .res_video = imx132_res_video,
- .n_res_preview = ARRAY_SIZE(imx132_res_preview),
- .n_res_still = ARRAY_SIZE(imx132_res_still),
- .n_res_video = ARRAY_SIZE(imx132_res_video),
- },
- [IMX134_VALLEYVIEW] = {
- .init_settings = imx134_init_settings,
- .res_preview = imx134_res_preview,
- .res_still = imx134_res_still,
- .res_video = imx134_res_video,
- .n_res_preview = ARRAY_SIZE(imx134_res_preview),
- .n_res_still = ARRAY_SIZE(imx134_res_still),
- .n_res_video = ARRAY_SIZE(imx134_res_video),
- },
- [IMX208_MOFD_PD2] = {
- .init_settings = imx208_init_settings,
- .res_preview = imx208_res_preview,
- .res_still = imx208_res_still,
- .res_video = imx208_res_video,
- .n_res_preview = ARRAY_SIZE(imx208_res_preview),
- .n_res_still = ARRAY_SIZE(imx208_res_still),
- .n_res_video = ARRAY_SIZE(imx208_res_video),
- },
- [IMX219_MFV0_PRH] = {
- .init_settings = imx219_init_settings,
- .res_preview = imx219_res_preview,
- .res_still = imx219_res_still,
- .res_video = imx219_res_video,
- .n_res_preview = ARRAY_SIZE(imx219_res_preview),
- .n_res_still = ARRAY_SIZE(imx219_res_still),
- .n_res_video = ARRAY_SIZE(imx219_res_video),
- },
- [IMX227_SAND] = {
- .init_settings = imx227_init_settings,
- .res_preview = imx227_res_preview,
- .res_still = imx227_res_still,
- .res_video = imx227_res_video,
- .n_res_preview = ARRAY_SIZE(imx227_res_preview),
- .n_res_still = ARRAY_SIZE(imx227_res_still),
- .n_res_video = ARRAY_SIZE(imx227_res_video),
- },
-};
-
-struct imx_reg_addr {
- u16 frame_length_lines;
- u16 line_length_pixels;
- u16 horizontal_start_h;
- u16 vertical_start_h;
- u16 horizontal_end_h;
- u16 vertical_end_h;
- u16 horizontal_output_size_h;
- u16 vertical_output_size_h;
- u16 coarse_integration_time;
- u16 img_orientation;
- u16 global_gain;
- u16 dgc_adj;
-};
-
-struct imx_reg_addr imx_addr = {
- IMX_FRAME_LENGTH_LINES,
- IMX_LINE_LENGTH_PIXELS,
- IMX_HORIZONTAL_START_H,
- IMX_VERTICAL_START_H,
- IMX_HORIZONTAL_END_H,
- IMX_VERTICAL_END_H,
- IMX_HORIZONTAL_OUTPUT_SIZE_H,
- IMX_VERTICAL_OUTPUT_SIZE_H,
- IMX_COARSE_INTEGRATION_TIME,
- IMX_IMG_ORIENTATION,
- IMX_GLOBAL_GAIN,
- IMX_DGC_ADJ,
-};
-
-struct imx_reg_addr imx219_addr = {
- IMX219_FRAME_LENGTH_LINES,
- IMX219_LINE_LENGTH_PIXELS,
- IMX219_HORIZONTAL_START_H,
- IMX219_VERTICAL_START_H,
- IMX219_HORIZONTAL_END_H,
- IMX219_VERTICAL_END_H,
- IMX219_HORIZONTAL_OUTPUT_SIZE_H,
- IMX219_VERTICAL_OUTPUT_SIZE_H,
- IMX219_COARSE_INTEGRATION_TIME,
- IMX219_IMG_ORIENTATION,
- IMX219_GLOBAL_GAIN,
- IMX219_DGC_ADJ,
-};
-
-#define v4l2_format_capture_type_entry(_width, _height, \
- _pixelformat, _bytesperline, _colorspace) \
- {\
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,\
- .fmt.pix.width = (_width),\
- .fmt.pix.height = (_height),\
- .fmt.pix.pixelformat = (_pixelformat),\
- .fmt.pix.bytesperline = (_bytesperline),\
- .fmt.pix.colorspace = (_colorspace),\
- .fmt.pix.sizeimage = (_height)*(_bytesperline),\
- }
-
-#define s_output_format_entry(_width, _height, _pixelformat, \
- _bytesperline, _colorspace, _fps) \
- {\
- .v4l2_fmt = v4l2_format_capture_type_entry(_width, \
- _height, _pixelformat, _bytesperline, \
- _colorspace),\
- .fps = (_fps),\
- }
-
-#define s_output_format_reg_entry(_width, _height, _pixelformat, \
- _bytesperline, _colorspace, _fps, _reg_setting) \
- {\
- .s_fmt = s_output_format_entry(_width, _height,\
- _pixelformat, _bytesperline, \
- _colorspace, _fps),\
- .reg_setting = (_reg_setting),\
- }
-
-/* imx device structure */
-struct imx_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct v4l2_mbus_framefmt format;
- struct camera_sensor_platform_data *platform_data;
- struct mutex input_lock; /* serialize sensor's ioctl */
- int fmt_idx;
- int status;
- int streaming;
- int power;
- int run_mode;
- int vt_pix_clk_freq_mhz;
- int fps_index;
- u32 focus;
- u16 sensor_id; /* Sensor id from registers */
- u16 i2c_id; /* Sensor id from i2c_device_id */
- u16 coarse_itg;
- u16 fine_itg;
- u16 digital_gain;
- u16 gain;
- u16 pixels_per_line;
- u16 lines_per_frame;
- u8 targetfps;
- u8 fps;
- const struct imx_reg *regs;
- u8 res;
- u8 type;
- u8 sensor_revision;
- u8 *otp_data;
- struct imx_settings *mode_tables;
- struct imx_vcm *vcm_driver;
- struct imx_otp *otp_driver;
- const struct imx_resolution *curr_res_table;
- unsigned long entries_curr_table;
- const struct firmware *fw;
- struct imx_reg_addr *reg_addr;
- const struct imx_reg *param_hold;
- const struct imx_reg *param_update;
-
- /* used for h/b blank tuning */
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *pixel_rate;
- struct v4l2_ctrl *h_blank;
- struct v4l2_ctrl *v_blank;
- struct v4l2_ctrl *link_freq;
- struct v4l2_ctrl *h_flip;
- struct v4l2_ctrl *v_flip;
-
- /* Test pattern control */
- struct v4l2_ctrl *tp_mode;
- struct v4l2_ctrl *tp_r;
- struct v4l2_ctrl *tp_gr;
- struct v4l2_ctrl *tp_gb;
- struct v4l2_ctrl *tp_b;
-
- /* FIXME! */
- bool new_res_sel_method;
-};
-
-#define to_imx_sensor(x) container_of(x, struct imx_device, sd)
-
-#define IMX_MAX_WRITE_BUF_SIZE 32
-struct imx_write_buffer {
- u16 addr;
- u8 data[IMX_MAX_WRITE_BUF_SIZE];
-};
-
-struct imx_write_ctrl {
- int index;
- struct imx_write_buffer buffer;
-};
-
-static const struct imx_reg imx_soft_standby[] = {
- {IMX_8BIT, 0x0100, 0x00},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_streaming[] = {
- {IMX_8BIT, 0x0100, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_param_hold[] = {
- {IMX_8BIT, 0x0104, 0x01}, /* GROUPED_PARAMETER_HOLD */
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_param_update[] = {
- {IMX_8BIT, 0x0104, 0x00}, /* GROUPED_PARAMETER_HOLD */
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx219_param_hold[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx219_param_update[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-extern int ad5816g_vcm_power_up(struct v4l2_subdev *sd);
-extern int ad5816g_vcm_power_down(struct v4l2_subdev *sd);
-extern int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int drv201_vcm_power_up(struct v4l2_subdev *sd);
-extern int drv201_vcm_power_down(struct v4l2_subdev *sd);
-extern int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9714_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9714_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_t_focus_abs_init(struct v4l2_subdev *sd);
-extern int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9719_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9719_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9718_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9718_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int vcm_power_up(struct v4l2_subdev *sd);
-extern int vcm_power_down(struct v4l2_subdev *sd);
-
-struct imx_vcm imx_vcms[] = {
- [IMX175_MERRFLD] = {
- .power_up = drv201_vcm_power_up,
- .power_down = drv201_vcm_power_down,
- .t_focus_abs = drv201_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = drv201_t_focus_rel,
- .q_focus_status = drv201_q_focus_status,
- .q_focus_abs = drv201_q_focus_abs,
- .t_vcm_slew = drv201_t_vcm_slew,
- .t_vcm_timing = drv201_t_vcm_timing,
- },
- [IMX175_VALLEYVIEW] = {
- .power_up = dw9714_vcm_power_up,
- .power_down = dw9714_vcm_power_down,
- .t_focus_abs = dw9714_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9714_t_focus_rel,
- .q_focus_status = dw9714_q_focus_status,
- .q_focus_abs = dw9714_q_focus_abs,
- .t_vcm_slew = dw9714_t_vcm_slew,
- .t_vcm_timing = dw9714_t_vcm_timing,
- },
- [IMX135_SALTBAY] = {
- .power_up = ad5816g_vcm_power_up,
- .power_down = ad5816g_vcm_power_down,
- .t_focus_abs = ad5816g_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = ad5816g_t_focus_rel,
- .q_focus_status = ad5816g_q_focus_status,
- .q_focus_abs = ad5816g_q_focus_abs,
- .t_vcm_slew = ad5816g_t_vcm_slew,
- .t_vcm_timing = ad5816g_t_vcm_timing,
- },
- [IMX135_VICTORIABAY] = {
- .power_up = dw9719_vcm_power_up,
- .power_down = dw9719_vcm_power_down,
- .t_focus_abs = dw9719_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9719_t_focus_rel,
- .q_focus_status = dw9719_q_focus_status,
- .q_focus_abs = dw9719_q_focus_abs,
- .t_vcm_slew = dw9719_t_vcm_slew,
- .t_vcm_timing = dw9719_t_vcm_timing,
- },
- [IMX134_VALLEYVIEW] = {
- .power_up = dw9714_vcm_power_up,
- .power_down = dw9714_vcm_power_down,
- .t_focus_abs = dw9714_t_focus_abs,
- .t_focus_abs_init = dw9714_t_focus_abs_init,
- .t_focus_rel = dw9714_t_focus_rel,
- .q_focus_status = dw9714_q_focus_status,
- .q_focus_abs = dw9714_q_focus_abs,
- .t_vcm_slew = dw9714_t_vcm_slew,
- .t_vcm_timing = dw9714_t_vcm_timing,
- },
- [IMX219_MFV0_PRH] = {
- .power_up = dw9718_vcm_power_up,
- .power_down = dw9718_vcm_power_down,
- .t_focus_abs = dw9718_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9718_t_focus_rel,
- .q_focus_status = dw9718_q_focus_status,
- .q_focus_abs = dw9718_q_focus_abs,
- .t_vcm_slew = dw9718_t_vcm_slew,
- .t_vcm_timing = dw9718_t_vcm_timing,
- },
- [IMX_ID_DEFAULT] = {
- .power_up = NULL,
- .power_down = NULL,
- .t_focus_abs_init = NULL,
- },
-};
-
-extern void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-struct imx_otp imx_otps[] = {
- [IMX175_MERRFLD] = {
- .otp_read = imx_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX175_VALLEYVIEW] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_ABICO_SS89A839_ADDR,
- .start_addr = E2PROM_2ADDR,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX135_SALTBAY] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX135_VICTORIABAY] = {
- .otp_read = imx_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX134_VALLEYVIEW] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_LITEON_12P1BA869D_ADDR,
- .start_addr = 0,
- .size = E2PROM_LITEON_12P1BA869D_SIZE,
- },
- [IMX132_SALTBAY] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX208_MOFD_PD2] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX219_MFV0_PRH] = {
- .otp_read = brcc064_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = IMX219_OTP_SIZE,
- },
- [IMX227_SAND] = {
- .otp_read = imx227_otp_read,
- .size = IMX227_OTP_SIZE,
- },
- [IMX_ID_DEFAULT] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
-};
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx132.h b/drivers/staging/media/atomisp/i2c/imx/imx132.h
deleted file mode 100644
index 98f047b8a1ba..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx132.h
+++ /dev/null
@@ -1,566 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX132_H__
-#define __IMX132_H__
-#include "common.h"
-
-/********************** registers define ********************************/
-#define IMX132_RGLANESEL 0x3301 /* Number of lanes */
-#define IMX132_RGLANESEL_1LANE 0x01
-#define IMX132_RGLANESEL_2LANES 0x00
-#define IMX132_RGLANESEL_4LANES 0x03
-
-#define IMX132_2LANES_GAINFACT 2096 /* 524/256 * 2^10 */
-#define IMX132_2LANES_GAINFACT_SHIFT 10
-
-/********************** settings for imx from vendor*********************/
-static struct imx_reg imx132_1080p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x14},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0xA3},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x07},
- {IMX_8BIT, 0x034D, 0x90},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1456x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0x04},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0xB3},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x05},
- {IMX_8BIT, 0x034D, 0xB0},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1636x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0xAA},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x0D},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x06},
- {IMX_8BIT, 0x034D, 0x64},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1336x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0x2C},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0x77},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x05},
- {IMX_8BIT, 0x034D, 0x38},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-/********************** settings for imx - reference *********************/
-static struct imx_reg const imx132_init_settings[] = {
- /* sw reset */
- { IMX_8BIT, 0x0100, 0x00 },
- { IMX_8BIT, 0x0103, 0x01 },
- { IMX_TOK_DELAY, 0, 5},
- { IMX_8BIT, 0x0103, 0x00 },
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx132_res_preview[] = {
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-
-struct imx_resolution imx132_res_still[] = {
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-
-struct imx_resolution imx132_res_video[] = {
- {
- .desc = "imx132_1336x1096_30fps",
- .regs = imx132_1336x1096_30fps,
- .width = 1336,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1456x1096_30fps",
- .regs = imx132_1456x1096_30fps,
- .width = 1456,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1636x1096_30fps",
- .regs = imx132_1636x1096_30fps,
- .width = 1636,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx134.h b/drivers/staging/media/atomisp/i2c/imx/imx134.h
deleted file mode 100644
index 9026e8ba5679..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx134.h
+++ /dev/null
@@ -1,2465 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX134_H__
-#define __IMX134_H__
-
-/********************** imx134 setting - version 1 *********************/
-static struct imx_reg const imx134_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Basic settings */
- { IMX_8BIT, 0x0105, 0x01 },
- { IMX_8BIT, 0x0220, 0x01 },
- { IMX_8BIT, 0x3302, 0x11 },
- { IMX_8BIT, 0x3833, 0x20 },
- { IMX_8BIT, 0x3893, 0x00 },
- { IMX_8BIT, 0x3906, 0x08 },
- { IMX_8BIT, 0x3907, 0x01 },
- { IMX_8BIT, 0x391B, 0x01 },
- { IMX_8BIT, 0x3C09, 0x01 },
- { IMX_8BIT, 0x600A, 0x00 },
-
- /* Analog settings */
- { IMX_8BIT, 0x3008, 0xB0 },
- { IMX_8BIT, 0x320A, 0x01 },
- { IMX_8BIT, 0x320D, 0x10 },
- { IMX_8BIT, 0x3216, 0x2E },
- { IMX_8BIT, 0x322C, 0x02 },
- { IMX_8BIT, 0x3409, 0x0C },
- { IMX_8BIT, 0x340C, 0x2D },
- { IMX_8BIT, 0x3411, 0x39 },
- { IMX_8BIT, 0x3414, 0x1E },
- { IMX_8BIT, 0x3427, 0x04 },
- { IMX_8BIT, 0x3480, 0x1E },
- { IMX_8BIT, 0x3484, 0x1E },
- { IMX_8BIT, 0x3488, 0x1E },
- { IMX_8BIT, 0x348C, 0x1E },
- { IMX_8BIT, 0x3490, 0x1E },
- { IMX_8BIT, 0x3494, 0x1E },
- { IMX_8BIT, 0x3511, 0x8F },
- { IMX_8BIT, 0x3617, 0x2D },
-
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 3280x2464 8M 30fps, vendor provide */
-static struct imx_reg const imx134_8M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* clock setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x0C }, /* x_output_size[15:8]: 3280*/
- { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x09 }, /* y_output_size[15:8]:2464 */
- { IMX_8BIT, 0x034F, 0xA0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C },
- { IMX_8BIT, 0x0355, 0xD0 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0xA0 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x0C },
- { IMX_8BIT, 0x3311, 0xD0 },
- { IMX_8BIT, 0x3312, 0x09 },
- { IMX_8BIT, 0x3313, 0xA0 },
- { IMX_8BIT, 0x331C, 0x01 },
- { IMX_8BIT, 0x331D, 0xAE },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global timing setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration time setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/2 binning 30fps 1640x1232, vendor provide */
-static struct imx_reg const imx134_1640_1232_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1640 */
- { IMX_8BIT, 0x034D, 0x68 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1232 */
- { IMX_8BIT, 0x034F, 0xD0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x68 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0xD0 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x68 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0xD0 },
-
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x06 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 820x616, vendor provide */
-static struct imx_reg const imx134_820_616_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:820 */
- { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x03 },
- { IMX_8BIT, 0x0355, 0x34 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x68 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x34 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x68 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 820x552 */
-static struct imx_reg const imx134_820_552_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:128 */
- { IMX_8BIT, 0x0347, 0x80 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3280-1 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2208+128-1 */
- { IMX_8BIT, 0x034B, 0x1F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]: */
- { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x28 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x03 },
- { IMX_8BIT, 0x0355, 0x34 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x28 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x34 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 720x592 */
-static struct imx_reg const imx134_720_592_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:200 */
- { IMX_8BIT, 0x0345, 0xC8 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:40 */
- { IMX_8BIT, 0x0347, 0x28 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:2880+200-1 */
- { IMX_8BIT, 0x0349, 0x07 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2368+40-1 */
- { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: */
- { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x02 },
- { IMX_8BIT, 0x0355, 0xD0 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x50 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x02 },
- { IMX_8BIT, 0x3311, 0xD0 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x50 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-static struct imx_reg const imx134_752_616_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
- { IMX_8BIT, 0x0345, 0x88 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
- { IMX_8BIT, 0x0349, 0x47 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: 752*/
- { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
-
- { IMX_8BIT, 0x0354, 0x02 },
- { IMX_8BIT, 0x0355, 0xF0 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x68 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x02 },
- { IMX_8BIT, 0x3311, 0xF0 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x68 },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 1424x1168 */
-static struct imx_reg const imx134_1424_1168_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* binning */
- { IMX_8BIT, 0x0391, 0x11 }, /* no binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x22 }, /* 34/16=2.125 */
- { IMX_8BIT, 0x4082, 0x00 }, /* ?? */
- { IMX_8BIT, 0x4083, 0x00 }, /* ?? */
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
- { IMX_8BIT, 0x0345, 0x80 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
- { IMX_8BIT, 0x0349, 0x51 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0xB1 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]: 1424*/
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1168 */
- { IMX_8BIT, 0x034F, 0x90 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
-
- { IMX_8BIT, 0x0354, 0x0B },
- { IMX_8BIT, 0x0355, 0xD2 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0xB2 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x05 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x90 },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x05 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x90 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning, 16/35 down scaling, 30fps, dvs */
-static struct imx_reg const imx134_240_196_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /*4x4 binning */
- { IMX_8BIT, 0x0391, 0x44 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x23 }, /* down scaling = 16/35 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
- { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
- { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2104+590-1 */
- { IMX_8BIT, 0x0349, 0x85 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1720+366-1 */
- { IMX_8BIT, 0x034B, 0x25 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x00 }, /* x_output_size[15:8]: 240*/
- { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x00 }, /* y_output_size[15:8]:196 */
- { IMX_8BIT, 0x034F, 0xC4 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x02 }, /* crop_x: 526 */
- { IMX_8BIT, 0x0355, 0x0E },
- { IMX_8BIT, 0x0356, 0x01 }, /* crop_y: 430 */
- { IMX_8BIT, 0x0357, 0xAE },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x00 },
- { IMX_8BIT, 0x3311, 0xF0 },
- { IMX_8BIT, 0x3312, 0x00 },
- { IMX_8BIT, 0x3313, 0xC4 },
-
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
-
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0xF0 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0xC4 },
-
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x0A },
- { IMX_8BIT, 0x0203, 0x88 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/2 binning, 16/38 downscaling, 30fps, dvs */
-static struct imx_reg const imx134_448_366_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* 2x2 binning */
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x26 }, /* down scaling = 16/38 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
- { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
- { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2128+590-1 */
- { IMX_8BIT, 0x0349, 0x9D }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1740+366-1 */
- { IMX_8BIT, 0x034B, 0x39 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x01 }, /* x_output_size[15:8]: 448*/
- { IMX_8BIT, 0x034D, 0xC0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x01 }, /* y_output_size[15:8]:366 */
- { IMX_8BIT, 0x034F, 0x6E }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x04 }, /* crop_x: 1064 */
- { IMX_8BIT, 0x0355, 0x28 },
- { IMX_8BIT, 0x0356, 0x03 }, /* crop_y: 870 */
- { IMX_8BIT, 0x0357, 0x66 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x01 },
- { IMX_8BIT, 0x3311, 0xC0 },
- { IMX_8BIT, 0x3312, 0x01 },
- { IMX_8BIT, 0x3313, 0x6E },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
-
- { IMX_8BIT, 0x4084, 0x01 },
- { IMX_8BIT, 0x4085, 0xC0 },
- { IMX_8BIT, 0x4086, 0x01 },
- { IMX_8BIT, 0x4087, 0x6E },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 2336x1312, 30fps, for 1080p dvs, vendor provide */
-static struct imx_reg const imx134_2336_1312_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x16 }, /* down scaling = 16/22 = 8/11 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:34 */
- { IMX_8BIT, 0x0345, 0x22 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
- { IMX_8BIT, 0x0347, 0x4C }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3245 */
- { IMX_8BIT, 0x0349, 0xAD }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2135 */
- { IMX_8BIT, 0x034B, 0x57 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:2336 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:1312 */
- { IMX_8BIT, 0x034F, 0x20 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C },
- { IMX_8BIT, 0x0355, 0x8C },
- { IMX_8BIT, 0x0356, 0x07 },
- { IMX_8BIT, 0x0357, 0x0C },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x20 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xEB },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x20 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1920x1080, 30fps, for 720p still capture */
-static struct imx_reg const imx134_1936_1096_30fps_v1[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1A }, /* downscaling 16/26*/
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
- { IMX_8BIT, 0x0345, 0x40 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
- { IMX_8BIT, 0x0347, 0x54 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
- { IMX_8BIT, 0x0349, 0x89 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
- { IMX_8BIT, 0x034B, 0x49 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
- { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
- { IMX_8BIT, 0x0355, 0x4A },
- { IMX_8BIT, 0x0356, 0x06 }, /* xrop y:1782 */
- { IMX_8BIT, 0x0357, 0xF6 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x80 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x38 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x1E },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x80 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x38 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1920x1080, 30fps, for 720p still capture, vendor provide */
-static struct imx_reg const imx134_1936_1096_30fps_v2[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1B }, /* downscaling 16/27*/
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
- { IMX_8BIT, 0x0345, 0x06 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
- { IMX_8BIT, 0x0347, 0x34 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
- { IMX_8BIT, 0x0349, 0xC9 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
- { IMX_8BIT, 0x034B, 0x6F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
- { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
- { IMX_8BIT, 0x0355, 0xC4 },
- { IMX_8BIT, 0x0356, 0x07 }, /* xrop y:1782 */
- { IMX_8BIT, 0x0357, 0x3A },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 }, /* decide by mode and output size */
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x1E },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1296x736, 30fps, for 720p still capture, vendor provide */
-static struct imx_reg const imx134_1296_736_30fps_v2[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x14 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:40 */
- { IMX_8BIT, 0x0345, 0x14 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
- { IMX_8BIT, 0x0347, 0x38 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3239 */
- { IMX_8BIT, 0x0349, 0xBB }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2131 */
- { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]:1280 */
- { IMX_8BIT, 0x034D, 0x10 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:720 */
- { IMX_8BIT, 0x034F, 0xE0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x54 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x98 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x05 },
- { IMX_8BIT, 0x3311, 0x10 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0xE0 },
- { IMX_8BIT, 0x331C, 0x01 },
- { IMX_8BIT, 0x331D, 0x10 },
- { IMX_8BIT, 0x4084, 0x05 },
- { IMX_8BIT, 0x4085, 0x10 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0xE0 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1280x720, 30fps, for 720p dvs, vendor provide */
-static struct imx_reg const imx134_1568_880_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x70 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-static struct imx_reg const imx134_1568_876_60fps_0625[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0x8F },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x3B }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x6C }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x6C },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x6C },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x6F },
- { IMX_8BIT, 0x0831, 0x27 },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x2F },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x2F },
- { IMX_8BIT, 0x0836, 0x9F },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-
-/* 4 lane for 720p dvs, vendor provide */
-static struct imx_reg const imx134_1568_880[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x70 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-/* 4 lane for 480p dvs, default 60fps, vendor provide */
-static struct imx_reg const imx134_880_592[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1D }, /* downscaling ratio = 16/29 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:44 */
- { IMX_8BIT, 0x0345, 0x2C }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:160 */
- { IMX_8BIT, 0x0347, 0xA0 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3235 */
- { IMX_8BIT, 0x0349, 0xA3 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2307 */
- { IMX_8BIT, 0x034B, 0x03 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:880 */
- { IMX_8BIT, 0x034D, 0x70 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:592 */
- { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x3C },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x32 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x70 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x50 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
- { IMX_8BIT, 0x4084, 0x03 },
- { IMX_8BIT, 0x4085, 0x70 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0x50 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x05 },
- { IMX_8BIT, 0x0203, 0x42 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-static struct imx_reg const imx134_2336_1308_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x01 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0xD8 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x02 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x44 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0xF7 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x07 }, /* y_addr_end[15:8]:2107 */
- { IMX_8BIT, 0x034B, 0x5F+4 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:876 */
- { IMX_8BIT, 0x034F, 0x1C+4 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x09 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x1C+4 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x1C+4 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xE8 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x05 },
- { IMX_8BIT, 0x0203, 0x42 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-struct imx_resolution imx134_res_preview[] = {
- {
- .desc = "imx134_CIF_30fps",
- .regs = imx134_720_592_30fps,
- .width = 720,
- .height = 592,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_820_552_30fps_preview",
- .regs = imx134_820_552_30fps,
- .width = 820,
- .height = 552,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_820_616_preview_30fps",
- .regs = imx134_820_616_30fps,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_preview_30fps",
- .regs = imx134_1936_1096_30fps_v2,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1640_1232_preview_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_8M_preview_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-struct imx_resolution imx134_res_still[] = {
- {
- .desc = "imx134_CIF_30fps",
- .regs = imx134_1424_1168_30fps,
- .width = 1424,
- .height = 1168,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_VGA_still_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_still_30fps",
- .regs = imx134_1936_1096_30fps_v2,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1640_1232_still_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_8M_still_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- /* WORKAROUND for FW performance limitation */
- .fps = 8,
- .pixels_per_line = 6400,
- .lines_per_frame = 5312,
- },
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-struct imx_resolution imx134_res_video[] = {
- {
- .desc = "imx134_QCIF_DVS_30fps",
- .regs = imx134_240_196_30fps,
- .width = 240,
- .height = 196,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_CIF_DVS_30fps",
- .regs = imx134_448_366_30fps,
- .width = 448,
- .height = 366,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_VGA_30fps",
- .regs = imx134_820_616_30fps,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_480p",
- .regs = imx134_880_592,
- .width = 880,
- .height = 592,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2700,
- },
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1568_880",
- .regs = imx134_1568_880,
- .width = 1568,
- .height = 880,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2700,
- },
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_dvs_30fps",
- .regs = imx134_2336_1312_30fps,
- .width = 2336,
- .height = 1312,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_dvs_60fps",
- .regs = imx134_2336_1308_60fps,
- .width = 2336,
- .height = 1312,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- /*This setting only be used for SDV mode*/
- .desc = "imx134_8M_sdv_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx135.h b/drivers/staging/media/atomisp/i2c/imx/imx135.h
deleted file mode 100644
index 58b43af909f2..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx135.h
+++ /dev/null
@@ -1,3374 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX135_H__
-#define __IMX135_H__
-
-#include "common.h"
-
-#define IMX_SC_CMMN_CHIP_ID_H 0x0016
-#define IMX_SC_CMMN_CHIP_ID_L 0x0017
-
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_F_NUMBER_DEFAULT 0x16000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define IMX_F_NUMBER_RANGE 0x160a160a
-
-#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
-#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
-
-#define IMX135_EMBEDDED_DATA_LINE_NUM 2
-#define IMX135_OUTPUT_DATA_FORMAT_REG 0x0112
-#define IMX135_OUTPUT_FORMAT_RAW10 0x0a0a
-/*
- * We use three different MIPI rates for our modes based on the resolution and
- * FPS requirements. So we have three PLL configurationa and these are based
- * on the EMC friendly MIPI values.
- *
- * Maximum clock: Pix clock @ 360.96MHz MIPI @ 451.2MHz 902.4mbps
- * Reduced clock: Pix clock @ 273.00MHz MIPI @ 342.0MHz 684.0mbps
- * Binning modes: Pix clock @ 335.36MHz MIPI @ 209.6MHz 419.2mbps
- * Global Timing registers are based on the data rates and these are part of
- * the below clock definitions.
- */
-/* MIPI 499.2MHz 998.4mbps PIXCLK: 399.36MHz */
-#define PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x0c}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x02}, \
- {IMX_8BIT, 0x030d, 0x70}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x7f}, \
- {IMX_8BIT, 0x0831, 0x37}, \
- {IMX_8BIT, 0x0832, 0x67}, \
- {IMX_8BIT, 0x0833, 0x3f}, \
- {IMX_8BIT, 0x0834, 0x3f}, \
- {IMX_8BIT, 0x0835, 0x47}, \
- {IMX_8BIT, 0x0836, 0xdf}, \
- {IMX_8BIT, 0x0837, 0x47}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 451.2MHz 902.4mbps PIXCLK: 360.96MHz */
-#define PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x0c}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x02}, \
- {IMX_8BIT, 0x030d, 0x34}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x7f}, \
- {IMX_8BIT, 0x0831, 0x37}, \
- {IMX_8BIT, 0x0832, 0x67}, \
- {IMX_8BIT, 0x0833, 0x3f}, \
- {IMX_8BIT, 0x0834, 0x3f}, \
- {IMX_8BIT, 0x0835, 0x47}, \
- {IMX_8BIT, 0x0836, 0xdf}, \
- {IMX_8BIT, 0x0837, 0x47}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 209.6MHz, 419.2mbps PIXCLK: 335.36 MHz */
-#define PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x06}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x02}, \
- {IMX_8BIT, 0x030c, 0x01}, \
- {IMX_8BIT, 0x030d, 0x06}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x12}, \
- {IMX_8BIT, 0x0830, 0x5f}, \
- {IMX_8BIT, 0x0831, 0x1f}, \
- {IMX_8BIT, 0x0832, 0x3f}, \
- {IMX_8BIT, 0x0833, 0x1f}, \
- {IMX_8BIT, 0x0834, 0x1f}, \
- {IMX_8BIT, 0x0835, 0x17}, \
- {IMX_8BIT, 0x0836, 0x67}, \
- {IMX_8BIT, 0x0837, 0x27}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 342MHz 684mbps PIXCLK: 273.6MHz */
-#define PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x08}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x01}, \
- {IMX_8BIT, 0x030d, 0x1d}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x77}, \
- {IMX_8BIT, 0x0831, 0x2f}, \
- {IMX_8BIT, 0x0832, 0x4f}, \
- {IMX_8BIT, 0x0833, 0x37}, \
- {IMX_8BIT, 0x0834, 0x2f}, \
- {IMX_8BIT, 0x0835, 0x37}, \
- {IMX_8BIT, 0x0836, 0xa7}, \
- {IMX_8BIT, 0x0837, 0x37}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* Basic settings: Applied only once after the sensor power up */
-static struct imx_reg const imx135_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- { IMX_8BIT, 0x0220, 0x01},
- { IMX_8BIT, 0x3008, 0xB0},
- { IMX_8BIT, 0x320A, 0x01},
- { IMX_8BIT, 0x320D, 0x10},
- { IMX_8BIT, 0x3216, 0x2E},
- { IMX_8BIT, 0x3230, 0x0A},
- { IMX_8BIT, 0x3228, 0x05},
- { IMX_8BIT, 0x3229, 0x02},
- { IMX_8BIT, 0x322C, 0x02},
- { IMX_8BIT, 0x3302, 0x10},
- { IMX_8BIT, 0x3390, 0x45},
- { IMX_8BIT, 0x3409, 0x0C},
- { IMX_8BIT, 0x340B, 0xF5},
- { IMX_8BIT, 0x340C, 0x2D},
- { IMX_8BIT, 0x3412, 0x41},
- { IMX_8BIT, 0x3413, 0xAD},
- { IMX_8BIT, 0x3414, 0x1E},
- { IMX_8BIT, 0x3427, 0x04},
- { IMX_8BIT, 0x3480, 0x1E},
- { IMX_8BIT, 0x3484, 0x1E},
- { IMX_8BIT, 0x3488, 0x1E},
- { IMX_8BIT, 0x348C, 0x1E},
- { IMX_8BIT, 0x3490, 0x1E},
- { IMX_8BIT, 0x3494, 0x1E},
- { IMX_8BIT, 0x349C, 0x38},
- { IMX_8BIT, 0x34A3, 0x38},
- { IMX_8BIT, 0x3511, 0x8F},
- { IMX_8BIT, 0x3518, 0x00},
- { IMX_8BIT, 0x3519, 0x94},
- { IMX_8BIT, 0x3833, 0x20},
- { IMX_8BIT, 0x3893, 0x01},
- { IMX_8BIT, 0x38C2, 0x08},
- { IMX_8BIT, 0x38C3, 0x08},
- { IMX_8BIT, 0x3C09, 0x01},
- { IMX_8BIT, 0x4000, 0x0E},
- { IMX_8BIT, 0x4300, 0x00},
- { IMX_8BIT, 0x4316, 0x12},
- { IMX_8BIT, 0x4317, 0x22},
- { IMX_8BIT, 0x4318, 0x00},
- { IMX_8BIT, 0x4319, 0x00},
- { IMX_8BIT, 0x431A, 0x00},
- { IMX_8BIT, 0x4324, 0x03},
- { IMX_8BIT, 0x4325, 0x20},
- { IMX_8BIT, 0x4326, 0x03},
- { IMX_8BIT, 0x4327, 0x84},
- { IMX_8BIT, 0x4328, 0x03},
- { IMX_8BIT, 0x4329, 0x20},
- { IMX_8BIT, 0x432A, 0x03},
- { IMX_8BIT, 0x432B, 0x84},
- { IMX_8BIT, 0x432C, 0x01},
- { IMX_8BIT, 0x4401, 0x3F},
- { IMX_8BIT, 0x4402, 0xFF},
- { IMX_8BIT, 0x4412, 0x3F},
- { IMX_8BIT, 0x4413, 0xFF},
- { IMX_8BIT, 0x441D, 0x28},
- { IMX_8BIT, 0x4444, 0x00},
- { IMX_8BIT, 0x4445, 0x00},
- { IMX_8BIT, 0x4446, 0x3F},
- { IMX_8BIT, 0x4447, 0xFF},
- { IMX_8BIT, 0x4452, 0x00},
- { IMX_8BIT, 0x4453, 0xA0},
- { IMX_8BIT, 0x4454, 0x08},
- { IMX_8BIT, 0x4455, 0x00},
- { IMX_8BIT, 0x4458, 0x18},
- { IMX_8BIT, 0x4459, 0x18},
- { IMX_8BIT, 0x445A, 0x3F},
- { IMX_8BIT, 0x445B, 0x3A},
- { IMX_8BIT, 0x4462, 0x00},
- { IMX_8BIT, 0x4463, 0x00},
- { IMX_8BIT, 0x4464, 0x00},
- { IMX_8BIT, 0x4465, 0x00},
- { IMX_8BIT, 0x446E, 0x01},
- { IMX_8BIT, 0x4500, 0x1F},
- { IMX_8BIT, 0x600a, 0x00},
- { IMX_8BIT, 0x380a, 0x00},
- { IMX_8BIT, 0x380b, 0x00},
- { IMX_8BIT, 0x4103, 0x00},
- { IMX_8BIT, 0x4243, 0x9a},
- { IMX_8BIT, 0x4330, 0x01},
- { IMX_8BIT, 0x4331, 0x90},
- { IMX_8BIT, 0x4332, 0x02},
- { IMX_8BIT, 0x4333, 0x58},
- { IMX_8BIT, 0x4334, 0x03},
- { IMX_8BIT, 0x4335, 0x20},
- { IMX_8BIT, 0x4336, 0x03},
- { IMX_8BIT, 0x4337, 0x84},
- { IMX_8BIT, 0x433C, 0x01},
- { IMX_8BIT, 0x4340, 0x02},
- { IMX_8BIT, 0x4341, 0x58},
- { IMX_8BIT, 0x4342, 0x03},
- { IMX_8BIT, 0x4343, 0x52},
- { IMX_8BIT, 0x4364, 0x0b},
- { IMX_8BIT, 0x4368, 0x00},
- { IMX_8BIT, 0x4369, 0x0f},
- { IMX_8BIT, 0x436a, 0x03},
- { IMX_8BIT, 0x436b, 0xa8},
- { IMX_8BIT, 0x436c, 0x00},
- { IMX_8BIT, 0x436d, 0x00},
- { IMX_8BIT, 0x436e, 0x00},
- { IMX_8BIT, 0x436f, 0x06},
- { IMX_8BIT, 0x4281, 0x21},
- { IMX_8BIT, 0x4282, 0x18},
- { IMX_8BIT, 0x4283, 0x04},
- { IMX_8BIT, 0x4284, 0x08},
- { IMX_8BIT, 0x4287, 0x7f},
- { IMX_8BIT, 0x4288, 0x08},
- { IMX_8BIT, 0x428c, 0x08},
- { IMX_8BIT, 0x4297, 0x00},
- { IMX_8BIT, 0x4299, 0x7E},
- { IMX_8BIT, 0x42A4, 0xFB},
- { IMX_8BIT, 0x42A5, 0x7E},
- { IMX_8BIT, 0x42A6, 0xDF},
- { IMX_8BIT, 0x42A7, 0xB7},
- { IMX_8BIT, 0x42AF, 0x03},
- { IMX_8BIT, 0x4207, 0x03},
- { IMX_8BIT, 0x4218, 0x00},
- { IMX_8BIT, 0x421B, 0x20},
- { IMX_8BIT, 0x421F, 0x04},
- { IMX_8BIT, 0x4222, 0x02},
- { IMX_8BIT, 0x4223, 0x22},
- { IMX_8BIT, 0x422E, 0x54},
- { IMX_8BIT, 0x422F, 0xFB},
- { IMX_8BIT, 0x4230, 0xFF},
- { IMX_8BIT, 0x4231, 0xFE},
- { IMX_8BIT, 0x4232, 0xFF},
- { IMX_8BIT, 0x4235, 0x58},
- { IMX_8BIT, 0x4236, 0xF7},
- { IMX_8BIT, 0x4237, 0xFD},
- { IMX_8BIT, 0x4239, 0x4E},
- { IMX_8BIT, 0x423A, 0xFC},
- { IMX_8BIT, 0x423B, 0xFD},
- { IMX_8BIT, 0x4300, 0x00},
- { IMX_8BIT, 0x4316, 0x12},
- { IMX_8BIT, 0x4317, 0x22},
- { IMX_8BIT, 0x4318, 0x00},
- { IMX_8BIT, 0x4319, 0x00},
- { IMX_8BIT, 0x431A, 0x00},
- { IMX_8BIT, 0x4324, 0x03},
- { IMX_8BIT, 0x4325, 0x20},
- { IMX_8BIT, 0x4326, 0x03},
- { IMX_8BIT, 0x4327, 0x84},
- { IMX_8BIT, 0x4328, 0x03},
- { IMX_8BIT, 0x4329, 0x20},
- { IMX_8BIT, 0x432A, 0x03},
- { IMX_8BIT, 0x432B, 0x20},
- { IMX_8BIT, 0x432C, 0x01},
- { IMX_8BIT, 0x432D, 0x01},
- { IMX_8BIT, 0x4338, 0x02},
- { IMX_8BIT, 0x4339, 0x00},
- { IMX_8BIT, 0x433A, 0x00},
- { IMX_8BIT, 0x433B, 0x02},
- { IMX_8BIT, 0x435A, 0x03},
- { IMX_8BIT, 0x435B, 0x84},
- { IMX_8BIT, 0x435E, 0x01},
- { IMX_8BIT, 0x435F, 0xFF},
- { IMX_8BIT, 0x4360, 0x01},
- { IMX_8BIT, 0x4361, 0xF4},
- { IMX_8BIT, 0x4362, 0x03},
- { IMX_8BIT, 0x4363, 0x84},
- { IMX_8BIT, 0x437B, 0x01},
- { IMX_8BIT, 0x4400, 0x00}, /* STATS off ISP do not support STATS*/
- { IMX_8BIT, 0x4401, 0x3F},
- { IMX_8BIT, 0x4402, 0xFF},
- { IMX_8BIT, 0x4404, 0x13},
- { IMX_8BIT, 0x4405, 0x26},
- { IMX_8BIT, 0x4406, 0x07},
- { IMX_8BIT, 0x4408, 0x20},
- { IMX_8BIT, 0x4409, 0xE5},
- { IMX_8BIT, 0x440A, 0xFB},
- { IMX_8BIT, 0x440C, 0xF6},
- { IMX_8BIT, 0x440D, 0xEA},
- { IMX_8BIT, 0x440E, 0x20},
- { IMX_8BIT, 0x4410, 0x00},
- { IMX_8BIT, 0x4411, 0x00},
- { IMX_8BIT, 0x4412, 0x3F},
- { IMX_8BIT, 0x4413, 0xFF},
- { IMX_8BIT, 0x4414, 0x1F},
- { IMX_8BIT, 0x4415, 0xFF},
- { IMX_8BIT, 0x4416, 0x20},
- { IMX_8BIT, 0x4417, 0x00},
- { IMX_8BIT, 0x4418, 0x1F},
- { IMX_8BIT, 0x4419, 0xFF},
- { IMX_8BIT, 0x441A, 0x20},
- { IMX_8BIT, 0x441B, 0x00},
- { IMX_8BIT, 0x441D, 0x40},
- { IMX_8BIT, 0x441E, 0x1E},
- { IMX_8BIT, 0x441F, 0x38},
- { IMX_8BIT, 0x4420, 0x01},
- { IMX_8BIT, 0x4444, 0x00},
- { IMX_8BIT, 0x4445, 0x00},
- { IMX_8BIT, 0x4446, 0x1D},
- { IMX_8BIT, 0x4447, 0xF9},
- { IMX_8BIT, 0x4452, 0x00},
- { IMX_8BIT, 0x4453, 0xA0},
- { IMX_8BIT, 0x4454, 0x08},
- { IMX_8BIT, 0x4455, 0x00},
- { IMX_8BIT, 0x4456, 0x0F},
- { IMX_8BIT, 0x4457, 0xFF},
- { IMX_8BIT, 0x4458, 0x18},
- { IMX_8BIT, 0x4459, 0x18},
- { IMX_8BIT, 0x445A, 0x3F},
- { IMX_8BIT, 0x445B, 0x3A},
- { IMX_8BIT, 0x445C, 0x00},
- { IMX_8BIT, 0x445D, 0x28},
- { IMX_8BIT, 0x445E, 0x01},
- { IMX_8BIT, 0x445F, 0x90},
- { IMX_8BIT, 0x4460, 0x00},
- { IMX_8BIT, 0x4461, 0x60},
- { IMX_8BIT, 0x4462, 0x00},
- { IMX_8BIT, 0x4463, 0x00},
- { IMX_8BIT, 0x4464, 0x00},
- { IMX_8BIT, 0x4465, 0x00},
- { IMX_8BIT, 0x446C, 0x00},
- { IMX_8BIT, 0x446D, 0x00},
- { IMX_8BIT, 0x446E, 0x00},
- { IMX_8BIT, 0x452A, 0x02},
- { IMX_8BIT, 0x0712, 0x01},
- { IMX_8BIT, 0x0713, 0x00},
- { IMX_8BIT, 0x0714, 0x01},
- { IMX_8BIT, 0x0715, 0x00},
- { IMX_8BIT, 0x0716, 0x01},
- { IMX_8BIT, 0x0717, 0x00},
- { IMX_8BIT, 0x0718, 0x01},
- { IMX_8BIT, 0x0719, 0x00},
- { IMX_8BIT, 0x4500, 0x1F },
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- { IMX_8BIT, 0x0205, 0x00},
- { IMX_8BIT, 0x020E, 0x01},
- { IMX_8BIT, 0x020F, 0x00},
- { IMX_8BIT, 0x0210, 0x02},
- { IMX_8BIT, 0x0211, 0x00},
- { IMX_8BIT, 0x0212, 0x02},
- { IMX_8BIT, 0x0213, 0x00},
- { IMX_8BIT, 0x0214, 0x01},
- { IMX_8BIT, 0x0215, 0x00},
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00},
- { IMX_8BIT, 0x0231, 0x00},
- { IMX_8BIT, 0x0233, 0x00},
- { IMX_8BIT, 0x0234, 0x00},
- { IMX_8BIT, 0x0235, 0x40},
- { IMX_8BIT, 0x0238, 0x00},
- { IMX_8BIT, 0x0239, 0x04},
- { IMX_8BIT, 0x023B, 0x00},
- { IMX_8BIT, 0x023C, 0x01},
- { IMX_8BIT, 0x33B0, 0x04},
- { IMX_8BIT, 0x33B1, 0x00},
- { IMX_8BIT, 0x33B3, 0x00},
- { IMX_8BIT, 0x33B4, 0x01},
- { IMX_8BIT, 0x3800, 0x00},
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0}
-};
-
-/********* Preview, continuous capture and still modes *****************/
-
-static struct imx_reg const imx135_13m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size Setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 0, 4207,3119 4208x3120 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6F},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x2F},
- {IMX_8BIT, 0x034C, 0x10},
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x0C},
- {IMX_8BIT, 0x034F, 0x30},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4208x3120 */
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x30},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x0C},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/* 13MP reduced pixel clock MIPI 342MHz is EMC friendly*/
-static struct imx_reg const imx135_13m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size Setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6F},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x2F},
- {IMX_8BIT, 0x034C, 0x10},
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x0C},
- {IMX_8BIT, 0x034F, 0x30},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x30},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x0C},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_10m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x78},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6f},
- {IMX_8BIT, 0x034A, 0x0a},
- {IMX_8BIT, 0x034B, 0xb7},
- {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0x40},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x40},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0x40},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x68},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_10m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x78},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6f},
- {IMX_8BIT, 0x034A, 0x0a},
- {IMX_8BIT, 0x034B, 0xb7},
- {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0x40},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x40},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0x40},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x68},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 8.5 DS from (3:2)8m cropped setting.
- *
- * The 8m(3:2) cropped setting is 2992x2448 effective res.
- * The ISP effect cropped setting should be 1408x1152 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 368x304
- * cropped region is 3128x2584
- */
-static struct imx_reg const imx135_368x304_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x88}, /* 136/16=8.5 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x1C}, /* 540 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x0C}, /* 268 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0x53}, /* 3667 */
- {IMX_8BIT, 0x034A, 0x0B}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0x23}, /* 2851 */
- {IMX_8BIT, 0x034C, 0x01}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x70}, /* 368 */
- {IMX_8BIT, 0x034E, 0x01}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x30}, /* 304 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0x38},
- {IMX_8BIT, 0x0356, 0x0A},
- {IMX_8BIT, 0x0357, 0x18},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x01}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x01},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xD0},
- {IMX_8BIT, 0x4084, 0x01}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x70},
- {IMX_8BIT, 0x4086, 0x01},
- {IMX_8BIT, 0x4087, 0x30},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 1/4 binning from 8m cropped setting.
- *
- * The 8m cropped setting is 3264x2448 effective res.
- * The xga cropped setting should be 816x612 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 832x628
- * cropped region is 3328x2512
- */
-static struct imx_reg const imx135_xga_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x44},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
-/* {IMX_8BIT, 0x4203, 0xFF}, */
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0xB8}, /* 440 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x30}, /* 304 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xB7}, /* 4207-440=3767 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xFF}, /* 3119-304=2815 */
- {IMX_8BIT, 0x034C, 0x03}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x40}, /* 832 */
- {IMX_8BIT, 0x034E, 0x02}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x74}, /* 628 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x03}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x40},
- {IMX_8BIT, 0x0356, 0x02},
- {IMX_8BIT, 0x0357, 0x74},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x03}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x40},
- {IMX_8BIT, 0x3312, 0x02},
- {IMX_8BIT, 0x3313, 0x74},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0x21},
- {IMX_8BIT, 0x4084, 0x03}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x40},
- {IMX_8BIT, 0x4086, 0x02},
- {IMX_8BIT, 0x4087, 0x74},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 28/16 DS from (16:9)8m cropped setting.
- *
- * The 8m(16:9) cropped setting is 3360x1890 effective res.
- * - this is larger then the expected 3264x1836 FOV
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1936x1096
- * cropped region is 3388x1918
- */
-static struct imx_reg const imx135_1936x1096_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x1C}, /* 28/16 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x9A}, /* 410 */
- {IMX_8BIT, 0x0346, 0x02}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x58}, /* 600 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xD5}, /* 3797 */
- {IMX_8BIT, 0x034A, 0x09}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xD5}, /* 2517 */
- {IMX_8BIT, 0x034C, 0x07}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x90}, /* 1936 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x48}, /* 1096 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0D}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0x3C},
- {IMX_8BIT, 0x0356, 0x07},
- {IMX_8BIT, 0x0357, 0x7E},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x07}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x48},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x07}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x48},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 2.125 DS from (3:2)8m cropped setting.
- *
- * The 8m(3:2) cropped setting is 2992x2448 effective res.
- * The ISP effect cropped setting should be 1408x1152 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1424x1168
- * cropped region is 3026x2482
- */
-static struct imx_reg const imx135_1424x1168_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x22}, /* 34/16=2.125 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x4E}, /* 590 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x3E}, /* 318 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0x1F}, /* 3615 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xEF}, /* 2799 */
- {IMX_8BIT, 0x034C, 0x05}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x90}, /* 1424 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x90}, /* 1168 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0B}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD2},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0xB2},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x05}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x90},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x05}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x90},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 1/2 binning from 8m cropped setting.
- *
- * The 8m cropped setting is 3264x2448 effective res.
- * The 2m cropped setting should be 1632x1224 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1648x1240
- * cropped region is 3296x2480
- */
-static struct imx_reg const imx135_2m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0xC8}, /* 464(1D0) -> 456(1C8)*/
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x40}, /* 320 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xA7}, /* 4207-456=3751 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xEF}, /* 3119-320=2799 */
- {IMX_8BIT, 0x034C, 0x06}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x70}, /* 1648 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0xD8}, /* 1240 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x06}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x04},
- {IMX_8BIT, 0x0357, 0xD8},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x06}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0xD8},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * 8M Cropped 16:9 setting
- *
- * Effect res: 3264x1836
- * Sensor out: 3280x1852
- */
-static struct imx_reg const imx135_6m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xD0},
- {IMX_8BIT, 0x0346, 0x02}, /* 634 */
- {IMX_8BIT, 0x0347, 0x7A},
- {IMX_8BIT, 0x0348, 0x0E},
- {IMX_8BIT, 0x0349, 0x9F},
- {IMX_8BIT, 0x034A, 0x09}, /* 2485 */
- {IMX_8BIT, 0x034B, 0xB5},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07}, /* 1852 */
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD0},
- {IMX_8BIT, 0x0356, 0x07},
- {IMX_8BIT, 0x0357, 0x3C},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xD0},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x48},
- {IMX_8BIT, 0x0348, 0x0E},
- {IMX_8BIT, 0x0349, 0x9F},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0xE7},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09}, /* 2464 */
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD0},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0xA0},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_scaled_from_12m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x14},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x1B},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x08},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x09},
- {IMX_8BIT, 0x4087, 0xA0},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_scaled_from_12m_for_mipi342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x14},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x1B},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x08},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C}, /* Resize IMG Hand V size-> Scaling related?*/
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x09},
- {IMX_8BIT, 0x4087, 0xA0},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_6m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x94},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0x9F},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07},
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x0C},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x0C},
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C},
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x07},
- {IMX_8BIT, 0x4087, 0x3C},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_6m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x94},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0x9F},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07},
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x0C},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x0C},
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C},
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x07},
- {IMX_8BIT, 0x4087, 0x3C},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * FOV is: 3280x2464, larger then 3264x2448.
- * Sensor output: 336x256
- * Cropping region: 3444x2624
- */
-static struct imx_reg const imx135_336x256[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* 2x binning */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x52}, /* scaling: 82/16 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* x_start: 374 */
- {IMX_8BIT, 0x0345, 0x76},
- {IMX_8BIT, 0x0346, 0x00}, /* y_start: 248 */
- {IMX_8BIT, 0x0347, 0xF8},
- {IMX_8BIT, 0x0348, 0x0E}, /* x_end: 3817 */
- {IMX_8BIT, 0x0349, 0xE9},
- {IMX_8BIT, 0x034A, 0x0B}, /* y_end: 2871 */
- {IMX_8BIT, 0x034B, 0x37},
- {IMX_8BIT, 0x034C, 0x01}, /* x_out: 336 */
- {IMX_8BIT, 0x034D, 0x50},
- {IMX_8BIT, 0x034E, 0x01}, /* y_out: 256 */
- {IMX_8BIT, 0x034F, 0x00},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x06}, /* dig x_out: 1722 */
- {IMX_8BIT, 0x0355, 0xBA},
- {IMX_8BIT, 0x0356, 0x05}, /* dig y_out: 1312 */
- {IMX_8BIT, 0x0357, 0x20},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x01}, /* ?: x_out */
- {IMX_8BIT, 0x3311, 0x50},
- {IMX_8BIT, 0x3312, 0x01}, /* ?: y_out */
- {IMX_8BIT, 0x3313, 0x00},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0x4E},
- {IMX_8BIT, 0x4084, 0x01}, /* ?: x_out */
- {IMX_8BIT, 0x4085, 0x50},
- {IMX_8BIT, 0x4086, 0x01}, /* ?: y_out */
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_1m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x1F},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x58},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x28},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x17},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x07},
- {IMX_8BIT, 0x034C, 0x04},
- {IMX_8BIT, 0x034D, 0x10},
- {IMX_8BIT, 0x034E, 0x03},
- {IMX_8BIT, 0x034F, 0x10},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x07},
- {IMX_8BIT, 0x0355, 0xE0},
- {IMX_8BIT, 0x0356, 0x05},
- {IMX_8BIT, 0x0357, 0xF0},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x04},
- {IMX_8BIT, 0x3311, 0x10},
- {IMX_8BIT, 0x3312, 0x03},
- {IMX_8BIT, 0x3313, 0x10},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0x4E},
- {IMX_8BIT, 0x4084, 0x04},
- {IMX_8BIT, 0x4085, 0x10},
- {IMX_8BIT, 0x4086, 0x03},
- {IMX_8BIT, 0x4087, 0x10},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_3m_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01}, /* Binning */
- {IMX_8BIT, 0x0391, 0x22}, /* 2x2 binning */
- {IMX_8BIT, 0x0392, 0x00}, /* average */
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x28},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x08},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x47},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x27},
- {IMX_8BIT, 0x034C, 0x08},
- {IMX_8BIT, 0x034D, 0x10},
- {IMX_8BIT, 0x034E, 0x06},
- {IMX_8BIT, 0x034F, 0x10},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x08},
- {IMX_8BIT, 0x0355, 0x10},
- {IMX_8BIT, 0x0356, 0x06},
- {IMX_8BIT, 0x0357, 0x10},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x08},
- {IMX_8BIT, 0x3311, 0x10},
- {IMX_8BIT, 0x3312, 0x06},
- {IMX_8BIT, 0x3313, 0x10},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/* 1080P 1936x1104 */
-static struct imx_reg const imx135_1080p_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x11},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x2E},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x84},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x41},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0xAF},
- {IMX_8BIT, 0x034C, 0x07},
- {IMX_8BIT, 0x034D, 0x90},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x50},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x08},
- {IMX_8BIT, 0x0355, 0x0A},
- {IMX_8BIT, 0x0356, 0x04},
- {IMX_8BIT, 0x0357, 0x96},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x07},
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x50},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x00},
- {IMX_8BIT, 0x4084, 0x07},
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x50},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static const struct imx_reg imx135_1080p_nodvs_fullfov_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 168,464,4039,2655: 3872x2192 */
- { IMX_8BIT, 0x0345, 0xA8 },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0xD0 },
- { IMX_8BIT, 0x0348, 0x0F },
- { IMX_8BIT, 0x0349, 0xC7 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x5F },
- { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x034D, 0x90 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0x48 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x0355, 0x90 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x48 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P NODVS 1936x1096 */
-static const struct imx_reg imx135_1080p_nodvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x11 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,396,4161,2727: 4116x2332 */
- { IMX_8BIT, 0x0345, 0x2E },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x8C },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0xA7 },
- { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x034D, 0x90 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0x48 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1166 */
- { IMX_8BIT, 0x0355, 0x0A },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x8E },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P 10%DVS 2104x1184 */
-static const struct imx_reg imx135_1080p_10_dvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 0,376,4207,2743: 4208x2368 */
- { IMX_8BIT, 0x0345, 0x00 },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x78 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x6F },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0xB7 },
- { IMX_8BIT, 0x034C, 0x08 }, /* 2104 x 1184 */
- { IMX_8BIT, 0x034D, 0x38 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0xA0 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2104 x 1184 */
- { IMX_8BIT, 0x0355, 0x38 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0xA0 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x08 },
- { IMX_8BIT, 0x3311, 0x38 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0xA0 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx135_720pdvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x15 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
- { IMX_8BIT, 0x0345, 0x2E },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x03 },
- { IMX_8BIT, 0x034F, 0x70 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /*2058 x 1156 */
- { IMX_8BIT, 0x0355, 0x0A },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x84 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
- { IMX_8BIT, 0x4084, 0x06 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x03 },
- { IMX_8BIT, 0x4087, 0x70 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/******************* Video Modes ******************/
-
-/* 1080P DVS 2336x1320 */
-static const struct imx_reg imx135_2336x1320_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1C },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 60,404,4147,2715: 4088x2312 */
- { IMX_8BIT, 0x0345, 0x3C },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x33 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x09 }, /*2336 x 1320 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x05 },
- { IMX_8BIT, 0x034F, 0x28 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0F }, /* 4088x2312 */
- { IMX_8BIT, 0x0355, 0xF8 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0x08 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xE2 },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x28 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P DVS 2336x1320 Cropped */
-static const struct imx_reg imx135_2336x1320_cropped_mipi499[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1C },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x03 }, /* 936,900,3271,2219: 2336x1320 */
- { IMX_8BIT, 0x0345, 0xA8 },
- { IMX_8BIT, 0x0346, 0x03 },
- { IMX_8BIT, 0x0347, 0x84 },
- { IMX_8BIT, 0x0348, 0x0C },
- { IMX_8BIT, 0x0349, 0xC7 },
- { IMX_8BIT, 0x034A, 0x08 },
- { IMX_8BIT, 0x034B, 0xAB },
- { IMX_8BIT, 0x034C, 0x09 }, /* 2336 x 1320 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x05 },
- { IMX_8BIT, 0x034F, 0x28 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x09 }, /* 2336 x 1320 */
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x28 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x00 },
- { IMX_8BIT, 0x331D, 0xB4 },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x28 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 720P DVS 1568 x 880 */
-static const struct imx_reg imx135_720p_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x15 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
- { IMX_8BIT, 0x0345, 0x2e },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x03 },
- { IMX_8BIT, 0x034F, 0x70 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
- { IMX_8BIT, 0x0355, 0x0a },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x84 },
- { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
- { IMX_8BIT, 0x331D, 0xd6 }, /* TODO! */
- { IMX_8BIT, 0x4084, 0x06 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x03 },
- { IMX_8BIT, 0x4087, 0x70 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* wvga: H : 1640 V : 1024 */
-static const struct imx_reg imx135_wvga_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x22 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x14 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 },
- {IMX_8BIT, 0x0345, 0x36 },
- {IMX_8BIT, 0x0346, 0x01 },
- {IMX_8BIT, 0x0347, 0x18 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x39 },
- {IMX_8BIT, 0x034A, 0x0B },
- {IMX_8BIT, 0x034B, 0x17 },
- {IMX_8BIT, 0x034C, 0x06 },
- {IMX_8BIT, 0x034D, 0x68 },
- {IMX_8BIT, 0x034E, 0x04 },
- {IMX_8BIT, 0x034F, 0x00 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x08 },
- {IMX_8BIT, 0x0355, 0x02 },
- {IMX_8BIT, 0x0356, 0x05 },
- {IMX_8BIT, 0x0357, 0x00 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x06 },
- {IMX_8BIT, 0x3311, 0x68 },
- {IMX_8BIT, 0x3312, 0x04 },
- {IMX_8BIT, 0x3313, 0x00 },
- {IMX_8BIT, 0x331C, 0x01 },
- {IMX_8BIT, 0x331D, 0xBD },
- {IMX_8BIT, 0x4084, 0x06 },
- {IMX_8BIT, 0x4085, 0x68 },
- {IMX_8BIT, 0x4086, 0x04 },
- {IMX_8BIT, 0x4087, 0x00 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 480P 1036 x 696 */
-static const struct imx_reg imx135_480p_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },/* No scal */
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x2784*/
- {IMX_8BIT, 0x0345, 0x20 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0xA8 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x4F },
- {IMX_8BIT, 0x034A, 0x0B },
- {IMX_8BIT, 0x034B, 0x88 },
- {IMX_8BIT, 0x034C, 0x04 }, /* 1036 * 696 */
- {IMX_8BIT, 0x034D, 0x0C },
- {IMX_8BIT, 0x034E, 0x02 },
- {IMX_8BIT, 0x034F, 0xB8 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1036x696 */
- {IMX_8BIT, 0x0355, 0x0C },
- {IMX_8BIT, 0x0356, 0x02 },
- {IMX_8BIT, 0x0357, 0xB8 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x04 },
- {IMX_8BIT, 0x3311, 0x0C },
- {IMX_8BIT, 0x3312, 0x02 },
- {IMX_8BIT, 0x3313, 0xB8 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x04 },
- {IMX_8BIT, 0x4085, 0x0C },
- {IMX_8BIT, 0x4086, 0x02 },
- {IMX_8BIT, 0x4087, 0xB8 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 480P DVS 936 x 602 */
-static const struct imx_reg imx135_480p_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x23 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 56,244,4151,2877: 4096x2634 */
- { IMX_8BIT, 0x0345, 0x38 },
- { IMX_8BIT, 0x0346, 0x00 },
- { IMX_8BIT, 0x0347, 0xf4 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x37 },
- { IMX_8BIT, 0x034A, 0x0b },
- { IMX_8BIT, 0x034B, 0x3d },
- { IMX_8BIT, 0x034C, 0x03 }, /* 936 x 602 */
- { IMX_8BIT, 0x034D, 0xa8 },
- { IMX_8BIT, 0x034E, 0x02 },
- { IMX_8BIT, 0x034F, 0x5a },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
- { IMX_8BIT, 0x0355, 0x00 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x25 },
- { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0xa8 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x5a },
- { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
- { IMX_8BIT, 0x331D, 0xd6 },
- { IMX_8BIT, 0x4084, 0x03 },
- { IMX_8BIT, 0x4085, 0xa8 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0x5a },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 1036 V : 780 */
-static const struct imx_reg imx135_vga_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x3120*/
- {IMX_8BIT, 0x0345, 0x20 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x00 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x4F },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x2F },
- {IMX_8BIT, 0x034C, 0x04 }, /* 1036x780 */
- {IMX_8BIT, 0x034D, 0x0C },
- {IMX_8BIT, 0x034E, 0x03 },
- {IMX_8BIT, 0x034F, 0x0C },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1036x780 */
- {IMX_8BIT, 0x0355, 0x0C },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x0C },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x04 },
- {IMX_8BIT, 0x3311, 0x0C },
- {IMX_8BIT, 0x3312, 0x03 },
- {IMX_8BIT, 0x3313, 0x0C },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x04 },
- {IMX_8BIT, 0x4085, 0x0C },
- {IMX_8BIT, 0x4086, 0x03 },
- {IMX_8BIT, 0x4087, 0x0C },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 820 V : 616 */
-static const struct imx_reg imx135_vga_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x14 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4104x3080*/
- {IMX_8BIT, 0x0345, 0x34 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x3B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x03 }, /* 820x616 */
- {IMX_8BIT, 0x034D, 0x34 },
- {IMX_8BIT, 0x034E, 0x02 },
- {IMX_8BIT, 0x034F, 0x68 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1026x770 */
- {IMX_8BIT, 0x0355, 0x02 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x03 },
- {IMX_8BIT, 0x3311, 0x34 },
- {IMX_8BIT, 0x3312, 0x02 },
- {IMX_8BIT, 0x3313, 0x68 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x03 },
- {IMX_8BIT, 0x4085, 0x34 },
- {IMX_8BIT, 0x4086, 0x02 },
- {IMX_8BIT, 0x4087, 0x68 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 436 V : 360 */
-static const struct imx_reg imx135_436x360_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x22 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 212,0,3995,3119 3784x3120 */
- {IMX_8BIT, 0x0345, 0xD4 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x00 },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x9B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x2F },
-
- {IMX_8BIT, 0x034C, 0x01 }, /* 436x360 */
- {IMX_8BIT, 0x034D, 0xB4 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x68 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x12 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x0C },
-
- {IMX_8BIT, 0x0354, 0x03 }, /* 928x768 crop from 946x780*/
- {IMX_8BIT, 0x0355, 0xA0 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x00 },
-
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0xB4 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x68 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0xB4 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x68 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* QVGA: H : 408 V : 308 */
-static const struct imx_reg imx135_qvga__dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x28 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 64,20,4143,3099 4080x3080 */
- {IMX_8BIT, 0x0345, 0x40 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x2F },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x01 }, /* 408x308 */
- {IMX_8BIT, 0x034D, 0x98 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x34 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 1020x770 */
- {IMX_8BIT, 0x0355, 0xFC },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0x98 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x34 },
- {IMX_8BIT, 0x331C, 0x01 },
- {IMX_8BIT, 0x331D, 0x68 },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0x98 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x34 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* CIF H : 368 V : 304 */
-static const struct imx_reg imx135_cif_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x28 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01 }, /* 264,42,3943,3081 3680x3040 */
- {IMX_8BIT, 0x0345, 0x08 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x2a },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x67 },
- {IMX_8BIT, 0x034A, 0x0c },
- {IMX_8BIT, 0x034B, 0x09 },
- {IMX_8BIT, 0x034C, 0x01 }, /* 368x304 */
- {IMX_8BIT, 0x034D, 0x70 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x30 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 920x760 */
- {IMX_8BIT, 0x0355, 0x98 },
- {IMX_8BIT, 0x0356, 0x02 },
- {IMX_8BIT, 0x0357, 0xf8 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0x70 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x30 },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0x70 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x30 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* CIF H : 1888 V : 1548 */
-static const struct imx_reg imx135_cif_binning_1888x1548[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x22 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 264,42, 3776x3096 */
- {IMX_8BIT, 0x0345, 0xD8 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x0C },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x97 },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x23 },
- {IMX_8BIT, 0x034C, 0x07 }, /* 1888x1548 */
- {IMX_8BIT, 0x034D, 0x60 },
- {IMX_8BIT, 0x034E, 0x06 },
- {IMX_8BIT, 0x034F, 0x0C },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x07 }, /* 1888x1548 */
- {IMX_8BIT, 0x0355, 0x60 },
- {IMX_8BIT, 0x0356, 0x06 },
- {IMX_8BIT, 0x0357, 0x0C },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x07 },
- {IMX_8BIT, 0x3311, 0x60 },
- {IMX_8BIT, 0x3312, 0x06 },
- {IMX_8BIT, 0x3313, 0x0C },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x07 },
- {IMX_8BIT, 0x4085, 0x60 },
- {IMX_8BIT, 0x4086, 0x06 },
- {IMX_8BIT, 0x4087, 0x0C },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* QCIF H : 216 V : 176 */
-static const struct imx_reg imx135_qcif_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x46 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 212,20,3995,3099 3784x3080 */
- {IMX_8BIT, 0x0345, 0xD4 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x9B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x00 }, /* 216x176 */
- {IMX_8BIT, 0x034D, 0xD8 },
- {IMX_8BIT, 0x034E, 0x00 },
- {IMX_8BIT, 0x034F, 0xB0 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 946x770 */
- {IMX_8BIT, 0x0355, 0xB2 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x00 },
- {IMX_8BIT, 0x3311, 0xD8 },
- {IMX_8BIT, 0x3312, 0x00 },
- {IMX_8BIT, 0x3313, 0xB0 },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x00 },
- {IMX_8BIT, 0x4085, 0xD8 },
- {IMX_8BIT, 0x4086, 0x00 },
- {IMX_8BIT, 0x4087, 0xB0 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/*
- * ISP Scaling is now supported in offine capture use cases. Because of that
- * we need only few modes to cover the different aspect ratios from the
- * sensor and the ISP will scale it based on the requested resolution from HAL.
- *
- * There is a performance impact when continuous view finder option is chose
- * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
- * than these take 8MP or 6MP espectively for down scaling based on the
- * aspect ratio.
- */
-struct imx_resolution imx135_res_preview_mofd[] = {
- {
- .desc = "imx135_cif_binning_preview",
- .regs = imx135_cif_binning,
- .width = 368,
- .height = 304,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9114,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_binning_preview",
- .regs = imx135_vga_binning,
- .width = 1036,
- .height = 780,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_preview",
- .regs = imx135_480p_binning,
- .width = 1036,
- .height = 696,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1080p_binning_preview",
- .regs = imx135_1080p_binning,
- .width = 1936,
- .height = 1104,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_3m__cont_cap",
- .regs = imx135_3m_binning,
- .width = 2064,
- .height = 1552,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_6m_cont_cap",
- .regs = imx135_6m,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Binning Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_scaled_from_12m__cont_cap",
- .regs = imx135_8m_scaled_from_12m,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 24,
- .pixels_per_line = 4572,
- .lines_per_frame = 3280,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_10m__cont_cap",
- .regs = imx135_10m,
- .width = 4208,
- .height = 2368,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2632,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_13m__cont_cap",
- .regs = imx135_13m,
- .width = 4208,
- .height = 3120,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 24,
- .pixels_per_line = 4572,
- .lines_per_frame = 3290,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-struct imx_resolution imx135_res_preview[] = {
- {
- .desc = "imx135_xga_cropped_video",
- .regs = imx135_xga_cropped,
- .width = 832,
- .height = 628,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_2m_cropped_video",
- .regs = imx135_2m_cropped,
- .width = 1648,
- .height = 1240,
- .fps_options = {
- { /* Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1936x1096_cropped",
- .regs = imx135_1936x1096_cropped,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- { /* Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-/*
- * ISP Scaling is now supported in online capture use cases. Because of that
- * we need only few modes to cover the different aspect ratios from the
- * sensor and the ISP will scale it based on the requested resolution from HAL.
- *
- * There is a performance impact when continuous view finder option is chose
- * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
- * than these take 8MP or 6MP espectively for down scaling based on the
- * aspect ratio.
- */
-struct imx_resolution imx135_res_still_mofd[] = {
- {
- .desc = "imx135_cif_binning_still",
- .regs = imx135_cif_binning_1888x1548,
- .width = 1888,
- .height = 1548,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_binning_preview",
- .regs = imx135_vga_binning,
- .width = 1036,
- .height = 780,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_preview",
- .regs = imx135_480p_binning,
- .width = 1036,
- .height = 696,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1080p_binning_still",
- .regs = imx135_1080p_binning,
- .width = 1936,
- .height = 1104,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 15,
- .pixels_per_line = 9114,
- .lines_per_frame = 2453,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_3m__still",
- .regs = imx135_3m_binning,
- .width = 2064,
- .height = 1552,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 15,
- .pixels_per_line = 9114,
- .lines_per_frame = 2453,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_6m_for_mipi_342_still",
- .regs = imx135_6m_for_mipi_342,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 11,
- .pixels_per_line = 9114,
- .lines_per_frame = 2664,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_8m_scaled_from_12m_for_mipi342_still",
- .regs = imx135_8m_scaled_from_12m_for_mipi342,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 8,
- .pixels_per_line = 7672,
- .lines_per_frame = 4458,
- },
- { /* Pixel clock: 273.6MHz */
- .fps = 15,
- .pixels_per_line = 5500,
- .lines_per_frame = 3314,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_10m_for_mipi_342_still",
- .regs = imx135_10m_for_mipi_342,
- .width = 4208,
- .height = 2368,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 11,
- .pixels_per_line = 9144,
- .lines_per_frame = 2664,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_13m_still",
- .regs = imx135_13m_for_mipi_342,
- .width = 4208,
- .height = 3120,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 5,
- .pixels_per_line = 9144,
- .lines_per_frame = 5990,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
-};
-
-struct imx_resolution imx135_res_still[] = {
- {
- .desc = "imx135_qvga",
- .regs = imx135_336x256,
- .width = 336,
- .height = 256,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_cif",
- .regs = imx135_368x304_cropped,
- .width = 368,
- .height = 304,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_xga_cropped_video",
- .regs = imx135_xga_cropped,
- .width = 832,
- .height = 628,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_2M_for_11:9",
- .regs = imx135_1424x1168_cropped,
- .width = 1424,
- .height = 1168,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_2m_cropped_video",
- .regs = imx135_2m_cropped,
- .width = 1648,
- .height = 1240,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 15,
- .pixels_per_line = 6466,
- .lines_per_frame = 3710,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_6m_cropped_video",
- .regs = imx135_6m_cropped,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 8,
- .pixels_per_line = 8850,
- .lines_per_frame = 5080,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 8,
- .pixels_per_line = 8850,
- .lines_per_frame = 5080,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-/*
- * ISP scaling is not supported in case of video modes. So we need to have
- * separate sensor mode for video use cases
- */
-struct imx_resolution imx135_res_video[] = {
- /* For binning modes pix clock is 335.36 MHz. */
- {
- .desc = "imx135_qcif_dvs_binning_video",
- .regs = imx135_qcif_dvs_binning,
- .width = 216,
- .height = 176,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_cif_binning_video",
- .regs = imx135_cif_binning,
- .width = 368,
- .height = 304,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_qvga__dvs_binning_video",
- .regs = imx135_qvga__dvs_binning,
- .width = 408,
- .height = 308,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_436x360_binning_video",
- .regs = imx135_436x360_binning,
- .width = 436,
- .height = 360,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_dvs_binning_video",
- .regs = imx135_vga_dvs_binning,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_dvs_binning_video",
- .regs = imx135_480p_dvs_binning,
- .width = 936,
- .height = 602,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_720P_dvs_video",
- .regs = imx135_720pdvs_max_clock,
- .width = 1568,
- .height = 880,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 5850,
- .lines_per_frame = 2000,
- },
- {/* Pixel Clock : 360.96 MHz */
- .fps = 60,
- .pixels_per_line = 4572,
- .lines_per_frame = 1310,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_wvga_dvs_binning_video",
- .regs = imx135_wvga_dvs_binning,
- .width = 1640,
- .height = 1024,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1936_1096_fullfov_max_clock",
- .regs = imx135_1080p_nodvs_max_clock,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 5850,
- .lines_per_frame = 2000,
- },
- {/* Pixel Clock : 360.96 MHz */
- .fps = 60,
- .pixels_per_line = 4572,
- .lines_per_frame = 1310,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_1080P_dvs_video",
- .regs = imx135_2336x1320_max_clock,
- .width = 2336,
- .height = 1320,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2632,
- .regs = imx135_2336x1320_max_clock,
- .mipi_freq = 451200,
- },
- {/* Pixel Clock : 399.36MHz */
- .fps = 60,
- .pixels_per_line = 4754,
- .lines_per_frame = 1400,
- .regs = imx135_2336x1320_cropped_mipi499,
- .mipi_freq = 499200,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_6m_cont_cap",
- .regs = imx135_6m,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Binning Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx175.h b/drivers/staging/media/atomisp/i2c/imx/imx175.h
deleted file mode 100644
index 5e082088cb37..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx175.h
+++ /dev/null
@@ -1,1960 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX175_H__
-#define __IMX175_H__
-#include "common.h"
-
-/************************** settings for imx *************************/
-static struct imx_reg const imx_STILL_8M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_8M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_3M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_3M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-static struct imx_reg const imx_STILL_5M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_5M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_6M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_6M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_2M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_2M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_PREVIEW_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x03},
- {IMX_8BIT, 0x33D5, 0x34},
- {IMX_8BIT, 0x33D6, 0x02},
- {IMX_8BIT, 0x33D7, 0x68},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_WIDE_PREVIEW_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0D}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x70}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x10}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x00}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x14}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x8C}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xBC}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x68},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0xBC},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/*****************************video************************/
-static struct imx_reg const imx_1080p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x06}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x4C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xA4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xC6}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xDB}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x42}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xEA}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x61}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x09}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x05}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x20}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_1080p_no_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xD0}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0F}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x3C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_1080p_no_dvs_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xA6}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-/*****************************video************************/
-static struct imx_reg const imx_720p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xD7}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x3E}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xEE}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x65}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x70}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x18}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_480p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xD4}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0xC8}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xF1}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0xDB}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x50}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x15}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_720p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x14}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x28}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_720p_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x08}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xCA}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x38}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_WVGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0xD0}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0xCF}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x00}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-static struct imx_reg const imx_CIF_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xDB}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x30}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x94}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VGA_strong_dvs_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x9E}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x1C}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xB6}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_QVGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x03}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x38}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x68}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x09}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x97}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x37}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x98}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x34}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x01},
- {IMX_8BIT, 0x33D5, 0x98},
- {IMX_8BIT, 0x33D6, 0x01},
- {IMX_8BIT, 0x33D7, 0x34},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_QCIF_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x04}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xB8}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x03}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x70}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x08}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x06}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x2F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x00}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD8}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x00}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xB0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x00},
- {IMX_8BIT, 0x33D5, 0xD8},
- {IMX_8BIT, 0x33D6, 0x00},
- {IMX_8BIT, 0x33D7, 0xB0},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx175_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0103, 0x01},
- /* misc control */
- {IMX_8BIT, 0x3020, 0x10},
- {IMX_8BIT, 0x302D, 0x02},
- {IMX_8BIT, 0x302F, 0x80},
- {IMX_8BIT, 0x3032, 0xA3},
- {IMX_8BIT, 0x3033, 0x20},
- {IMX_8BIT, 0x3034, 0x24},
- {IMX_8BIT, 0x3041, 0x15},
- {IMX_8BIT, 0x3042, 0x87},
- {IMX_8BIT, 0x3050, 0x35},
- {IMX_8BIT, 0x3056, 0x57},
- {IMX_8BIT, 0x305D, 0x41},
- {IMX_8BIT, 0x3097, 0x69},
- {IMX_8BIT, 0x3109, 0x41},
- {IMX_8BIT, 0x3148, 0x3F},
- {IMX_8BIT, 0x330F, 0x07},
- /* csi & inck */
- {IMX_8BIT, 0x3364, 0x00},
- {IMX_8BIT, 0x3368, 0x13},
- {IMX_8BIT, 0x3369, 0x33},
- /* znr */
- {IMX_8BIT, 0x4100, 0x0E},
- {IMX_8BIT, 0x4104, 0x32},
- {IMX_8BIT, 0x4105, 0x32},
- {IMX_8BIT, 0x4108, 0x01},
- {IMX_8BIT, 0x4109, 0x7C},
- {IMX_8BIT, 0x410A, 0x00},
- {IMX_8BIT, 0x410B, 0x00},
- GROUPED_PARAMETER_HOLD_DISABLE,
- {IMX_TOK_TERM, 0, 0}
-};
-/* TODO settings of preview/still/video will be updated with new use case */
-struct imx_resolution imx175_res_preview[] = {
- {
- .desc = "CIF_strong_dvs_30fps",
- .regs = imx_CIF_strong_dvs_30fps,
- .width = 368,
- .height = 304,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
-
- },
- {
- .desc = "VGA_strong_dvs_30fps",
- .regs = imx_VGA_strong_dvs_30fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "WIDE_PREVIEW_30fps",
- .regs = imx_WIDE_PREVIEW_30fps,
- .width = 1640,
- .height = 956,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1000,
- .lines_per_frame = 0x0D70,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "STILL_720p_30fps",
- .regs = imx_STILL_720p_30fps,
- .width = 1568,
- .height = 876,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1428,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "STILL_2M_30fps",
- .regs = imx_STILL_2M_30fps,
- .width = 1640,
- .height = 1232,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "1080p_strong_dvs_30fps",
- .regs = imx_1080p_no_dvs_30fps,
- .width = 1940,
- .height = 1092,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0F3C,
- .lines_per_frame = 0x07D0,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "STILL_3M_30fps",
- .regs = imx_STILL_3M_30fps,
- .width = 2064,
- .height = 1552,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_5M_30fps",
- .regs = imx_STILL_5M_30fps,
- .width = 2576,
- .height = 1936,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_6M_30fps",
- .regs = imx_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
-};
-
-struct imx_resolution imx175_res_still[] = {
- {
- .desc = "CIF_strong_dvs_30fps",
- .regs = imx_CIF_strong_dvs_30fps,
- .width = 368,
- .height = 304,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261000,
- },
- {
- .desc = "VGA_strong_dvs_15fps",
- .regs = imx_VGA_strong_dvs_15fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1C86,
- .lines_per_frame = 0x079E,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "imx_STILL_720p_15fps",
- .regs = imx_STILL_720p_15fps,
- .width = 1568,
- .height = 876,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1838,
- .lines_per_frame = 0x08CA,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "STILL_2M_15fps",
- .regs = imx_STILL_2M_15fps,
- .width = 1640,
- .height = 1232,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "1080p_strong_dvs_15fps",
- .regs = imx_1080p_no_dvs_15fps,
- .width = 1940,
- .height = 1092,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x189C,
- .lines_per_frame = 0x09A6,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "STILL_3M_15fps",
- .regs = imx_STILL_3M_15fps,
- .width = 2064,
- .height = 1552,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_5M_15fps",
- .regs = imx_STILL_5M_15fps,
- .width = 2576,
- .height = 1936,
- .fps = 15,
- .pixels_per_line = 0x1646, /* consistent with regs arrays */
- .lines_per_frame = 0x0BB8, /* consistent with regs arrays */
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_6M_15fps",
- .regs = imx_STILL_6M_15fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_8M_15fps",
- .regs = imx_STILL_8M_15fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
-};
-
-struct imx_resolution imx175_res_video[] = {
- {
- .desc = "QCIF_strong_dvs_30fps",
- .regs = imx_QCIF_strong_dvs_30fps,
- .width = 216,
- .height = 176,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D70,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "QVGA_strong_dvs_30fps",
- .regs = imx_QVGA_strong_dvs_30fps,
- .width = 408,
- .height = 308,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D70,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "VGA_strong_dvs_30fps",
- .regs = imx_VGA_strong_dvs_30fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1194,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "720p_strong_dvs_30fps",
- .regs = imx_720p_strong_dvs_30fps,
- .width = 1552,
- .height = 880,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- .fps = 60,
- .pixels_per_line = 0xD70,
- .lines_per_frame = 0x444,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "480p_strong_dvs_30fps",
- .regs = imx_480p_strong_dvs_30fps,
- .width = 880,
- .height = 592,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "WVGA_strong_dvs_30fps",
- .regs = imx_WVGA_strong_dvs_30fps,
- .width = 1640,
- .height = 1024,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "1080p_strong_dvs_30fps",
- .regs = imx_1080p_strong_dvs_30fps,
- .width = 2320,
- .height = 1312,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11C6,
- .lines_per_frame = 0x06A4,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx208.h b/drivers/staging/media/atomisp/i2c/imx/imx208.h
deleted file mode 100644
index fed387f42f99..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx208.h
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX208_H__
-#define __IMX208_H__
-#include "common.h"
-
-/********************** settings for imx from vendor*********************/
-static struct imx_reg imx208_1080p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x07}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x8F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x47}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x90}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0x48}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_1296x736_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0xB4}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x93}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xE0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_1296x976_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x3C}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x0B}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_336x256_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x78}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x24}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x23}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x50}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0x00}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x03}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x03}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x66}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_192x160_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0xE4}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x47}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x63}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x00}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0xC0}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x00}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x03}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x05}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x03}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x05}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x11}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x74}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-/********************** settings for imx - reference *********************/
-static struct imx_reg const imx208_init_settings[] = {
- { IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx208_res_preview[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-
-struct imx_resolution imx208_res_still[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-
-struct imx_resolution imx208_res_video[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx219.h b/drivers/staging/media/atomisp/i2c/imx/imx219.h
deleted file mode 100644
index bbd515bf7279..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx219.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX219_H__
-#define __IMX219_H__
-#include "common.h"
-
-#define IMX219_FRAME_LENGTH_LINES 0x0160
-#define IMX219_LINE_LENGTH_PIXELS 0x0162
-#define IMX219_HORIZONTAL_START_H 0x0164
-#define IMX219_VERTICAL_START_H 0x0168
-#define IMX219_HORIZONTAL_END_H 0x0166
-#define IMX219_VERTICAL_END_H 0x016A
-#define IMX219_HORIZONTAL_OUTPUT_SIZE_H 0x016c
-#define IMX219_VERTICAL_OUTPUT_SIZE_H 0x016E
-#define IMX219_COARSE_INTEGRATION_TIME 0x015A
-#define IMX219_IMG_ORIENTATION 0x0172
-#define IMX219_GLOBAL_GAIN 0x0157
-#define IMX219_DGC_ADJ 0x0158
-
-#define IMX219_DGC_LEN 4
-
-/************************** settings for imx *************************/
-static struct imx_reg const imx219_STILL_8M_30fps[] = {
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
- {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
- {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
- {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
- {IMX_8BIT, 0x0160, 0x0A}, /*FRM_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0161, 0x94}, /*FRM_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x0168, 0x00}, /*Y_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0169, 0x00}, /*Y_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x016A, 0x09}, /*Y_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x016B, 0x9F}, /*Y_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x016E, 0x09}, /*Y_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016F, 0xA0}, /*Y_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
- {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
- {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
- {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
- {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
- {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
- {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
- {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
- {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
- {IMX_8BIT, 0x0307, 0x49}, /*PLL_VT_MPY[7:0]*/
- {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
- {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
- {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
- {IMX_8BIT, 0x030D, 0x4C}, /*PLL_OP_MPY[7:0]*/
- {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
- {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
- {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx219_STILL_6M_30fps[] = {
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
- {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
- {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
- {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
- {IMX_8BIT, 0x0160, 0x07}, /*FRM_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0161, 0x64}, /*FRM_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x0168, 0x01}, /*Y_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0169, 0x32}, /*Y_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x016A, 0x08}, /*Y_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x016B, 0x6D}, /*Y_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x016E, 0x07}, /*Y_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016F, 0x3C}, /*Y_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
- {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
- {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
- {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
- {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
- {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
- {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
- {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
- {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
- {IMX_8BIT, 0x0307, 0x33}, /*PLL_VT_MPY[7:0]*/
- {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
- {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
- {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
- {IMX_8BIT, 0x030D, 0x36}, /*PLL_OP_MPY[7:0]*/
- {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
- {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
- {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx219_init_settings[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx219_res_preview[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx219_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0A94,
- },
- {
- }
- },
- .mipi_freq = 365000,
- },
-};
-
-struct imx_resolution imx219_res_still[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx219_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0A94,
- },
- {
- }
- },
- .mipi_freq = 365000,
- },
-};
-
-struct imx_resolution imx219_res_video[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx227.h b/drivers/staging/media/atomisp/i2c/imx/imx227.h
deleted file mode 100644
index 795fe017d01b..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx227.h
+++ /dev/null
@@ -1,727 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IMX227_H__
-#define __IMX227_H__
-
-#include "common.h"
-
-#define IMX227_EMBEDDED_DATA_LINE_NUM 2
-#define IMX227_OUTPUT_DATA_FORMAT_REG 0x0112
-#define IMX227_OUTPUT_FORMAT_RAW10 0x0a0a
-
-/* AE Bracketing Registers */
-#define IMX227_BRACKETING_LUT_MODE_BIT_CONTINUE_STREAMING 0x1
-#define IMX227_BRACKETING_LUT_MODE_BIT_LOOP_MODE 0x2
-
-#define IMX227_BRACKETING_LUT_CONTROL 0x0E00
-#define IMX227_BRACKETING_LUT_MODE 0x0E01
-#define IMX227_BRACKETING_LUT_ENTRY_CONTROL 0x0E02
-
-/*
- * The imx135 embedded data info:
- * embedded data line num: 2
- * line 0 effective data size(byte): 76
- * line 1 effective data size(byte): 113
- */
-static const uint32_t
-imx227_embedded_effective_size[IMX227_EMBEDDED_DATA_LINE_NUM] = {160, 62};
-
-/************************** settings for imx *************************/
-/* Full Output Mode */
-static struct imx_reg const imx_STILL_6_5M_25fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd0}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 4:3 Output Mode */
-static struct imx_reg const imx_STILL_5_5M_3X4_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0xb0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x08},
- {IMX_8BIT, 0x0349, 0xaf},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x9f},
- {IMX_8BIT, 0x034c, 0x08},
- {IMX_8BIT, 0x034d, 0x00},
- {IMX_8BIT, 0x034e, 0x0a},
- {IMX_8BIT, 0x034f, 0xa0},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd8}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Square Output Mode */
-static struct imx_reg const imx_STILL_5_7M_1X1_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0xa0},
- {IMX_8BIT, 0x0348, 0x09},
- {IMX_8BIT, 0x0349, 0x5f},
- {IMX_8BIT, 0x034a, 0x09},
- {IMX_8BIT, 0x034b, 0xff},
- {IMX_8BIT, 0x034c, 0x09},
- {IMX_8BIT, 0x034d, 0x60},
- {IMX_8BIT, 0x034e, 0x09},
- {IMX_8BIT, 0x034f, 0x60},
-
- {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd4}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Full Frame 1080P Mode (use ISP scaler)*/
-static struct imx_reg const imx_VIDEO_4M_9X16_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Cropped 1080P Mode */
-static struct imx_reg const imx_VIDEO_2M_9X16_45fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x02},
- {IMX_8BIT, 0x0345, 0x8a},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x88},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0xd1},
- {IMX_8BIT, 0x034a, 0x09},
- {IMX_8BIT, 0x034b, 0x17},
- {IMX_8BIT, 0x034c, 0x04},
- {IMX_8BIT, 0x034d, 0x48},
- {IMX_8BIT, 0x034e, 0x07},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x04},
- {IMX_8BIT, 0x040d, 0x48},
- {IMX_8BIT, 0x040e, 0x07},
- {IMX_8BIT, 0x040f, 0x90},
-
- {IMX_8BIT, 0x0900, 0x00},
- {IMX_8BIT, 0x0901, 0x00},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
-
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Moment mode */
-static struct imx_reg const imx_VIDEO_1_3M_3X4_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd9}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* High Speed 3:4 mode */
-static struct imx_reg const imx_VIDEO_VGA_3X4_120fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x9004, 0xca}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-/* Binned 720P mode */
-static struct imx_reg const imx_VIDEO_1M_9X16_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xd0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x40},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x8f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x5f},
- {IMX_8BIT, 0x034c, 0x02},
- {IMX_8BIT, 0x034d, 0xe0},
- {IMX_8BIT, 0x034e, 0x05},
- {IMX_8BIT, 0x034f, 0x10},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x02},
- {IMX_8BIT, 0x040d, 0xe0},
- {IMX_8BIT, 0x040e, 0x05},
- {IMX_8BIT, 0x040f, 0x10},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Binned 496x868 mode */
-static struct imx_reg const imx_VIDEO_496x868_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x02},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0xec},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x08},
- {IMX_8BIT, 0x034b, 0xb3},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0xf0},
- {IMX_8BIT, 0x034e, 0x03},
- {IMX_8BIT, 0x034f, 0x64},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0xf0},
- {IMX_8BIT, 0x040e, 0x03},
- {IMX_8BIT, 0x040f, 0x64},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-/* Hangout mode */
-static struct imx_reg const imx_PREVIEW_374X652_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x30},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x6f},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0x78},
- {IMX_8BIT, 0x034e, 0x02},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x03},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x03},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x02},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0x76},
- {IMX_8BIT, 0x040e, 0x02},
- {IMX_8BIT, 0x040f, 0x8c},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VIDEO_NHD_9X16_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x30},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x6f},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0x78},
- {IMX_8BIT, 0x034e, 0x02},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x03},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x03},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0x78},
- {IMX_8BIT, 0x040e, 0x02},
- {IMX_8BIT, 0x040f, 0x90},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-static struct imx_reg const imx227_init_settings[] = {
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0306, 0x00},
- {IMX_8BIT, 0x0307, 0xBB},
- {IMX_8BIT, 0x030E, 0x03},
- {IMX_8BIT, 0x030F, 0x0D},
- {IMX_8BIT, 0x463b, 0x30},
- {IMX_8BIT, 0x463e, 0x05},
- {IMX_8BIT, 0x4612, 0x66},
- {IMX_8BIT, 0x4815, 0x65},
- {IMX_8BIT, 0x4991, 0x00},
- {IMX_8BIT, 0x4992, 0x01},
- {IMX_8BIT, 0x4993, 0xff},
- {IMX_8BIT, 0x458b, 0x00},
- {IMX_8BIT, 0x452a, 0x02},
- {IMX_8BIT, 0x4a7c, 0x00},
- {IMX_8BIT, 0x4a7d, 0x1c},
- {IMX_8BIT, 0x4a7e, 0x00},
- {IMX_8BIT, 0x4a7f, 0x17},
- {IMX_8BIT, 0x462C, 0x2E},
- {IMX_8BIT, 0x461B, 0x28},
- {IMX_8BIT, 0x4663, 0x29},
- {IMX_8BIT, 0x461A, 0x7C},
- {IMX_8BIT, 0x4619, 0x28},
- {IMX_8BIT, 0x4667, 0x22},
- {IMX_8BIT, 0x466B, 0x23},
- {IMX_8BIT, 0x40AD, 0xFF},
- {IMX_8BIT, 0x40BE, 0x00},
- {IMX_8BIT, 0x40BF, 0x6E},
- {IMX_8BIT, 0x40CE, 0x00},
- {IMX_8BIT, 0x40CF, 0x0A},
- {IMX_8BIT, 0x40CA, 0x00},
- {IMX_8BIT, 0x40CB, 0x1F},
- {IMX_8BIT, 0x4D16, 0x00},
- {IMX_8BIT, 0x6204, 0x01},
- {IMX_8BIT, 0x6209, 0x00},
- {IMX_8BIT, 0x621F, 0x01},
- {IMX_8BIT, 0x621E, 0x10},
- GROUPED_PARAMETER_HOLD_DISABLE,
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* TODO settings of preview/still/video will be updated with new use case */
-struct imx_resolution imx227_res_preview[] = {
- {
- .desc = "imx_PREVIEW_374X652_30fps",
- .regs = imx_PREVIEW_374X652_30fps,
- .width = 374,
- .height = 652,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C0A,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_496x868_30fps",
- .regs = imx_VIDEO_496x868_30fps,
- .width = 496,
- .height = 868,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_STILL_5_5M_3X4_30fps",
- .regs = imx_STILL_5_5M_3X4_30fps,
- .width = 2048,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0ED8,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_5_7M_1X1_30fps",
- .regs = imx_STILL_5_7M_1X1_30fps,
- .width = 2400,
- .height = 2400,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0A1E,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_6_5M_25fps",
- .regs = imx_STILL_6_5M_25fps,
- .width = 2400,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 25,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0C24,
- },
- {
- }
- },
- }
-};
-
-struct imx_resolution imx227_res_still[] = {
- {
- .desc = "imx_STILL_5_5M_3X4_30fps",
- .regs = imx_STILL_5_5M_3X4_30fps,
- .width = 2048,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 6,
- .pixels_per_line = 0x2130,
- .lines_per_frame = 0x1A22,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0ED8,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_5_7M_1X1_30fps",
- .regs = imx_STILL_5_7M_1X1_30fps,
- .width = 2400,
- .height = 2400,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 6,
- .pixels_per_line = 0x266E,
- .lines_per_frame = 0x1704,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0A1E,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_6_5M_25fps",
- .regs = imx_STILL_6_5M_25fps,
- .width = 2400,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 25,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0C24,
- },
- {
- }
- },
- },
-};
-
-struct imx_resolution imx227_res_video[] = {
- {
- .desc = "imx_VIDEO_4M_9X16_30fps",
- .regs = imx_VIDEO_4M_9X16_30fps,
- .width = 1536,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_VIDEO_2M_9X16_45fps",
- .regs = imx_VIDEO_2M_9X16_45fps,
- .width = 1096,
- .height = 1936,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- .fps = 45,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0800,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_VIDEO_1_3M_3X4_60fps",
- .regs = imx_VIDEO_1_3M_3X4_60fps,
- .width = 1024,
- .height = 1360,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0604,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_496x868_30fps",
- .regs = imx_VIDEO_496x868_30fps,
- .width = 496,
- .height = 868,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_1M_9X16_60fps",
- .regs = imx_VIDEO_1M_9X16_60fps,
- .width = 736,
- .height = 1296,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0604,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C10,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_VGA_3X4_120fps",
- .regs = imx_VIDEO_VGA_3X4_120fps,
- .width = 512,
- .height = 680,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 120,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0302,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_NHD_9X16_30fps",
- .regs = imx_VIDEO_NHD_9X16_30fps,
- .width = 376,
- .height = 656,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C0A,
- },
- {
- }
- },
- },
-};
-
-#endif /* __IMX227_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp.c b/drivers/staging/media/atomisp/i2c/imx/otp.c
deleted file mode 100644
index 462275038046..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-
-void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- return buf;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
deleted file mode 100644
index b11f90c5960c..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "common.h"
-
-/*
- * Read EEPROM data from brcc064 and store
- * it into a kmalloced buffer. On error return NULL.
- * @size: set to the size of the returned EEPROM data.
- */
-void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned int e2prom_i2c_addr = dev_addr >> 1;
- static const unsigned int max_read_size = 30;
- int addr;
- u32 s_addr = start_addr & E2PROM_ADDR_MASK;
- unsigned char *buffer;
-
- buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- for (addr = s_addr; addr < size; addr += max_read_size) {
- struct i2c_msg msg[2];
- unsigned int i2c_addr = e2prom_i2c_addr;
- u16 addr_buf;
- int r;
-
- msg[0].flags = 0;
- msg[0].addr = i2c_addr;
- addr_buf = cpu_to_be16(addr & 0xFFFF);
- msg[0].len = 2;
- msg[0].buf = (u8 *)&addr_buf;
-
- msg[1].addr = i2c_addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = min(max_read_size, size - addr);
- msg[1].buf = &buffer[addr];
-
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(&client->dev, "read failed at 0x%03x\n", addr);
- return NULL;
- }
- }
- return buffer;
-
-}
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
deleted file mode 100644
index 73d041f97811..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "common.h"
-
-/*
- * Read EEPROM data from the gerneral e2prom chip(eg.
- * CAT24C08, CAT24C128, le24l042cs, and store
- * it into a kmalloced buffer. On error return NULL.
- * @size: set to the size of the returned EEPROM data.
- */
-void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned int e2prom_i2c_addr = dev_addr >> 1;
- static const unsigned int max_read_size = 30;
- int addr;
- u32 s_addr = start_addr & E2PROM_ADDR_MASK;
- bool two_addr = (start_addr & E2PROM_2ADDR) >> 31;
- char *buffer;
-
- buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- for (addr = s_addr; addr < size; addr += max_read_size) {
- struct i2c_msg msg[2];
- unsigned int i2c_addr = e2prom_i2c_addr;
- u16 addr_buf;
- int r;
-
- msg[0].flags = 0;
- if (two_addr) {
- msg[0].addr = i2c_addr;
- addr_buf = cpu_to_be16(addr & 0xFFFF);
- msg[0].len = 2;
- msg[0].buf = (u8 *)&addr_buf;
- } else {
- i2c_addr |= (addr >> 8) & 0x7;
- msg[0].addr = i2c_addr;
- addr_buf = addr & 0xFF;
- msg[0].len = 1;
- msg[0].buf = (u8 *)&addr_buf;
- }
-
- msg[1].addr = i2c_addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = min(max_read_size, size - addr);
- msg[1].buf = &buffer[addr];
-
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(&client->dev, "read failed at 0x%03x\n", addr);
- return NULL;
- }
- }
- return buffer;
-}
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_imx.c b/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
deleted file mode 100644
index 1ca27c26ef75..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-#include "common.h"
-
-/* Defines for OTP Data Registers */
-#define IMX_OTP_START_ADDR 0x3B04
-#define IMX_OTP_PAGE_SIZE 64
-#define IMX_OTP_READY_REG 0x3B01
-#define IMX_OTP_PAGE_REG 0x3B02
-#define IMX_OTP_MODE_REG 0x3B00
-#define IMX_OTP_PAGE_MAX 20
-#define IMX_OTP_READY_REG_DONE 1
-#define IMX_OTP_READ_ONETIME 32
-#define IMX_OTP_MODE_READ 1
-#define IMX227_OTP_START_ADDR 0x0A04
-#define IMX227_OTP_ENABLE_REG 0x0A00
-#define IMX227_OTP_READY_REG 0x0A01
-#define IMX227_OTP_PAGE_REG 0x0A02
-#define IMX227_OTP_READY_REG_DONE 1
-#define IMX227_OTP_MODE_READ 1
-
-static int
-imx_read_otp_data(struct i2c_client *client, u16 len, u16 reg, void *val)
-{
- struct i2c_msg msg[2];
- u16 data[IMX_SHORT_MAX] = { 0 };
- int err;
-
- if (len > IMX_BYTE_MAX) {
- dev_err(&client->dev, "%s error, invalid data length\n",
- __func__);
- return -EINVAL;
- }
-
- memset(msg, 0 , sizeof(msg));
- memset(data, 0 , sizeof(data));
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = I2C_MSG_LENGTH;
- msg[0].buf = (u8 *)data;
- /* high byte goes first */
- data[0] = cpu_to_be16(reg);
-
- msg[1].addr = client->addr;
- msg[1].len = len;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = (u8 *)data;
-
- err = i2c_transfer(client->adapter, msg, 2);
- if (err != 2) {
- if (err >= 0)
- err = -EIO;
- goto error;
- }
-
- memcpy(val, data, len);
- return 0;
-
-error:
- dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
- return err;
-}
-
-static int imx_read_otp_reg_array(struct i2c_client *client, u16 size, u16 addr,
- u8 *buf)
-{
- u16 index;
- int ret;
-
- for (index = 0; index + IMX_OTP_READ_ONETIME <= size;
- index += IMX_OTP_READ_ONETIME) {
- ret = imx_read_otp_data(client, IMX_OTP_READ_ONETIME,
- addr + index, &buf[index]);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
- int ret;
- int i;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
-
- /*set page NO.*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX_OTP_PAGE_REG, i & 0xff);
- if (ret)
- goto fail;
-
- /*set read mode*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX_OTP_MODE_REG, IMX_OTP_MODE_READ);
- if (ret)
- goto fail;
-
- /* Reading the OTP data array */
- ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
- IMX_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
- if (ret)
- goto fail;
- }
-
- return buf;
-fail:
- /* Driver has failed to find valid data */
- dev_err(&client->dev, "sensor found no valid OTP data\n");
- return ERR_PTR(ret);
-}
-
-void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
- int ret;
- int i;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
-
- /*set page NO.*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX227_OTP_PAGE_REG, i & 0xff);
- if (ret)
- goto fail;
-
- /*set read mode*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX227_OTP_ENABLE_REG, IMX227_OTP_MODE_READ);
- if (ret)
- goto fail;
-
- /* Reading the OTP data array */
- ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
- IMX227_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
- if (ret)
- goto fail;
- }
-
- return buf;
-fail:
- /* Driver has failed to find valid data */
- dev_err(&client->dev, "sensor found no valid OTP data\n");
- return ERR_PTR(ret);
-}
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/vcm.c b/drivers/staging/media/atomisp/i2c/imx/vcm.c
deleted file mode 100644
index 2d2df04c800a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/vcm.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "../../include/linux/atomisp_platform.h"
-
-int vcm_power_up(struct v4l2_subdev *sd)
-{
- const struct camera_af_platform_data *vcm_platform_data;
-
- vcm_platform_data = camera_get_af_platform_data();
- if (NULL == vcm_platform_data)
- return -ENODEV;
- /* Enable power */
- return vcm_platform_data->power_ctrl(sd, 1);
-}
-
-int vcm_power_down(struct v4l2_subdev *sd)
-{
- const struct camera_af_platform_data *vcm_platform_data;
-
- vcm_platform_data = camera_get_af_platform_data();
- if (NULL == vcm_platform_data)
- return -ENODEV;
- return vcm_platform_data->power_ctrl(sd, 0);
-}
-
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h
index 5e7d79d2e01b..0af79d77a404 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.h
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -394,11 +390,6 @@ static struct mt9m114_res_struct mt9m114_res[] = {
};
#define N_RES (ARRAY_SIZE(mt9m114_res))
-static const struct i2c_device_id mt9m114_id[] = {
- {"mt9m114", 0},
- {}
-};
-
static struct misensor_reg const mt9m114_exitstandby[] = {
{MISENSOR_16BIT, 0x098E, 0xDC00},
/* exit-standby */
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index ab8907e6c9ef..bf4897347df7 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,10 +31,6 @@
#include "../include/linux/atomisp_platform.h"
-#define OV2680_NAME "ov2680"
-#define OV2680B_NAME "ov2680b"
-#define OV2680F_NAME "ov2680f"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 0x2
#define I2C_RETRY_COUNT 5
@@ -227,12 +219,6 @@ struct ov2680_format {
struct ov2680_write_buffer buffer;
};
- static const struct i2c_device_id ov2680_id[] = {
- {OV2680B_NAME, 0},
- {OV2680F_NAME, 0},
- {}
- };
-
static struct ov2680_reg const ov2680_global_setting[] = {
{OV2680_8BIT, 0x0103, 0x01},
{OV2680_8BIT, 0x3002, 0x00},
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
index 73ecb1679718..d8a973d71699 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.h
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,8 +31,6 @@
#include "../include/linux/atomisp_platform.h"
-#define OV2722_NAME "ov2722"
-
#define OV2722_POWER_UP_RETRY_NUM 5
/* Defines for register writes and register array processing */
@@ -257,11 +251,6 @@ struct ov2722_write_ctrl {
struct ov2722_write_buffer buffer;
};
-static const struct i2c_device_id ov2722_id[] = {
- {OV2722_NAME, 0},
- {}
-};
-
/*
* Register settings for various resolution
*/
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
index 9fb1bffbe9b3..3f527f2047a7 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
@@ -1,11 +1,11 @@
-config VIDEO_OV5693
+config VIDEO_ATOMISP_OV5693
tristate "Omnivision ov5693 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
- ov5693 5 Mpixel camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ ov5693 5 Mpixel camera.
- ov5693 is video camera sensor.
-
- It currently only works with the atomisp driver.
+ ov5693 is video camera sensor.
+ It currently only works with the atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index facb70e6a93e..aa6be85c5a60 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += atomisp-ov5693.o
# HACK! While this driver is in bad shape, don't enable several warnings
# that would be otherwise enabled with W=1
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
index 2dd894989cd9..4de44569fe54 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 123642557aa8..3e7c3851280f 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -31,7 +27,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -391,8 +386,8 @@ static int ov5693_write_reg_array(struct i2c_client *client,
if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
next)) {
err = __ov5693_flush_reg_array(client, &ctrl);
- if (err)
- return err;
+ if (err)
+ return err;
}
err = __ov5693_buf_reg_array(client, &ctrl, next);
if (err) {
@@ -945,12 +940,8 @@ static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
{
- int ret;
-
value = min(value, AD5823_MAX_FOCUS_POS);
- ret = ad5823_t_focus_vcm(sd, value);
-
- return ret;
+ return ad5823_t_focus_vcm(sd, value);
}
static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
@@ -1302,10 +1293,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
/* This driver assumes "internal DVDD, PWDNB tied to DOVDD".
* In this set up only gpio0 (XSHUTDN) should be available
* but in some products (for example ECS) gpio1 (PWDNB) is
@@ -1332,19 +1319,12 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
{
- int ret;
struct ov5693_device *dev = to_ov5693_sensor(sd);
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
- ret = dev->platform_data->gpio0_ctrl(sd, flag);
-
- return ret;
+ return dev->platform_data->gpio0_ctrl(sd, flag);
}
static int __power_up(struct v4l2_subdev *sd)
@@ -1942,8 +1922,7 @@ static int ov5693_remove(struct i2c_client *client)
return 0;
}
-static int ov5693_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov5693_probe(struct i2c_client *client)
{
struct ov5693_device *dev;
int i2c;
@@ -1965,10 +1944,8 @@ static int ov5693_probe(struct i2c_client *client,
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -2030,8 +2007,6 @@ out_free:
return ret;
}
-MODULE_DEVICE_TABLE(i2c, ov5693_id);
-
static const struct acpi_device_id ov5693_acpi_match[] = {
{"INT33BE"},
{},
@@ -2040,27 +2015,13 @@ MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
static struct i2c_driver ov5693_driver = {
.driver = {
- .name = OV5693_NAME,
- .acpi_match_table = ACPI_PTR(ov5693_acpi_match),
+ .name = "ov5693",
+ .acpi_match_table = ov5693_acpi_match,
},
- .probe = ov5693_probe,
+ .probe_new = ov5693_probe,
.remove = ov5693_remove,
- .id_table = ov5693_id,
};
-
-static int init_ov5693(void)
-{
- return i2c_add_driver(&ov5693_driver);
-}
-
-static void exit_ov5693(void)
-{
-
- i2c_del_driver(&ov5693_driver);
-}
-
-module_init(init_ov5693);
-module_exit(exit_ov5693);
+module_i2c_driver(ov5693_driver);
MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
index 8c2e6794463b..2ea63807c56d 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,8 +31,6 @@
#include "../../include/linux/atomisp_platform.h"
-#define OV5693_NAME "ov5693"
-
#define OV5693_POWER_UP_RETRY_NUM 5
/* Defines for register writes and register array processing */
@@ -278,11 +272,6 @@ struct ov5693_write_ctrl {
struct ov5693_write_buffer buffer;
};
-static const struct i2c_device_id ov5693_id[] = {
- {OV5693_NAME, 0},
- {}
-};
-
static struct ov5693_reg const ov5693_global_setting[] = {
{OV5693_8BIT, 0x0103, 0x01},
{OV5693_8BIT, 0x3001, 0x0a},
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.c b/drivers/staging/media/atomisp/i2c/ov8858.c
index 43e1638fd674..ba147ac2e36f 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.c
+++ b/drivers/staging/media/atomisp/i2c/ov8858.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -480,8 +476,6 @@ static int ov8858_priv_int_data_init(struct v4l2_subdev *sd)
if (!dev->otp_data) {
dev->otp_data = devm_kzalloc(&client->dev, size, GFP_KERNEL);
if (!dev->otp_data) {
- dev_err(&client->dev, "%s: can't allocate memory",
- __func__);
r = -ENOMEM;
goto error3;
}
@@ -714,10 +708,6 @@ static int __power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (dev->platform_data->v1p2_ctrl) {
ret = dev->platform_data->v1p2_ctrl(sd, flag);
if (ret) {
@@ -769,10 +759,6 @@ static int __gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!client || !dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
if (dev->platform_data->gpio0_ctrl)
return dev->platform_data->gpio0_ctrl(sd, flag);
@@ -1575,15 +1561,6 @@ static int ov8858_s_config(struct v4l2_subdev *sd,
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "platform init error %d!\n", ret);
- return ret;
- }
- }
-
ret = __ov8858_s_power(sd, 1);
if (ret) {
dev_err(&client->dev, "power-up error %d!\n", ret);
@@ -1628,8 +1605,6 @@ fail_detect:
fail_csi_cfg:
__ov8858_s_power(sd, 0);
fail_update:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
mutex_unlock(&dev->input_lock);
dev_err(&client->dev, "sensor power-gating failed\n");
return ret;
@@ -1930,8 +1905,6 @@ static int ov8858_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov8858_device *dev = to_ov8858_sensor(sd);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
@@ -2082,8 +2055,7 @@ static const struct v4l2_ctrl_config ctrls[] = {
}
};
-static int ov8858_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov8858_probe(struct i2c_client *client)
{
struct ov8858_device *dev;
unsigned int i;
@@ -2094,15 +2066,11 @@ static int ov8858_probe(struct i2c_client *client,
/* allocate sensor device & init sub device */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "%s: out of memory\n", __func__);
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
- if (id)
- dev->i2c_id = id->driver_data;
dev->fmt_idx = 0;
dev->sensor_id = OV_ID_DEFAULT;
dev->vcm_driver = &ov8858_vcms[OV8858_ID_DEFAULT];
@@ -2182,40 +2150,21 @@ out_free:
return ret;
}
-static const struct i2c_device_id ov8858_id[] = {
- {OV8858_NAME, 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ov8858_id);
-
static const struct acpi_device_id ov8858_acpi_match[] = {
{"INT3477"},
{},
};
+MODULE_DEVICE_TABLE(acpi, ov8858_acpi_match);
static struct i2c_driver ov8858_driver = {
.driver = {
- .name = OV8858_NAME,
- .acpi_match_table = ACPI_PTR(ov8858_acpi_match),
+ .name = "ov8858",
+ .acpi_match_table = ov8858_acpi_match,
},
- .probe = ov8858_probe,
+ .probe_new = ov8858_probe,
.remove = ov8858_remove,
- .id_table = ov8858_id,
};
-
-static __init int ov8858_init_mod(void)
-{
- return i2c_add_driver(&ov8858_driver);
-}
-
-static __exit void ov8858_exit_mod(void)
-{
- i2c_del_driver(&ov8858_driver);
-}
-
-module_init(ov8858_init_mod);
-module_exit(ov8858_exit_mod);
+module_i2c_driver(ov8858_driver);
MODULE_DESCRIPTION("A low-level driver for Omnivision OV8858 sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h
index 638d1a803a2b..6c89568bb44e 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -113,7 +109,6 @@
#define OV_SUBDEV_PREFIX "ov"
#define OV_ID_DEFAULT 0x0000
-#define OV8858_NAME "ov8858"
#define OV8858_CHIP_ID 0x8858
#define OV8858_LONG_EXPO 0x3500
diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
index 7d74a8899fae..f81851306832 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -113,7 +109,6 @@
#define OV_SUBDEV_PREFIX "ov"
#define OV_ID_DEFAULT 0x0000
-#define OV8858_NAME "ov8858"
#define OV8858_CHIP_ID 0x8858
#define OV8858_LONG_EXPO 0x3500
diff --git a/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h b/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
deleted file mode 100644
index dc7104470f5c..000000000000
--- a/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Access to message bus through three registers
- * in CUNIT(0:0:0) PCI configuration space.
- * MSGBUS_CTRL_REG(0xD0):
- * 31:24 = message bus opcode
- * 23:16 = message bus port
- * 15:8 = message bus address, low 8 bits.
- * 7:4 = message bus byte enables
- * MSGBUS_CTRL_EXT_REG(0xD8):
- * 31:8 = message bus address, high 24 bits.
- * MSGBUS_DATA_REG(0xD4):
- * hold the data for write or read
- */
-#define PCI_ROOT_MSGBUS_CTRL_REG 0xD0
-#define PCI_ROOT_MSGBUS_DATA_REG 0xD4
-#define PCI_ROOT_MSGBUS_CTRL_EXT_REG 0xD8
-#define PCI_ROOT_MSGBUS_READ 0x10
-#define PCI_ROOT_MSGBUS_WRITE 0x11
-#define PCI_ROOT_MSGBUS_DWORD_ENABLE 0xf0
-
-/* In BYT platform for all internal PCI devices d3 delay
- * of 3 ms is sufficient. Default value of 10 ms is overkill.
- */
-#define INTERNAL_PCI_PM_D3_WAIT 3
-
-#define ISP_SUB_CLASS 0x80
-#define SUB_CLASS_MASK 0xFF00
-
-u32 intel_mid_msgbus_read32_raw(u32 cmd);
-u32 intel_mid_msgbus_read32(u8 port, u32 addr);
-void intel_mid_msgbus_write32_raw(u32 cmd, u32 data);
-void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data);
-u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext);
-void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data);
-u32 intel_mid_soc_stepping(void);
-int intel_mid_dw_i2c_acquire_ownership(void);
-int intel_mid_dw_i2c_release_ownership(void);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index d67dd658cff9..b5533197226d 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifdef CSS15
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
index 5390b97ac6e7..7e3ca12dd4e9 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
@@ -17,9 +17,6 @@
#include "atomisp_platform.h"
-const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
-const struct atomisp_platform_data *atomisp_get_platform_data(void);
-const struct camera_af_platform_data *camera_get_af_platform_data(void);
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
struct camera_sensor_platform_data *plat_data,
enum intel_v4l2_subdev_type type);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index dbac2b777dad..e0f0c379e7ce 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef ATOMISP_PLATFORM_H_
@@ -205,20 +201,13 @@ struct camera_vcm_control {
};
struct camera_sensor_platform_data {
- int (*gpio_ctrl)(struct v4l2_subdev *subdev, int flag);
int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag);
- int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
int (*csi_cfg)(struct v4l2_subdev *subdev, int flag);
- bool (*low_fps)(void);
- int (*platform_init)(struct i2c_client *);
- int (*platform_deinit)(void);
- char *(*msr_file_name)(void);
- struct atomisp_camera_caps *(*get_camera_caps)(void);
- int (*gpio_intr_ctrl)(struct v4l2_subdev *subdev);
- /* New G-Min power and GPIO interface, replaces
- * power/gpio_ctrl with methods to control individual
- * lines as implemented on all known camera modules. */
+ /*
+ * New G-Min power and GPIO interface to control individual
+ * lines as implemented on all known camera modules.
+ */
int (*gpio0_ctrl)(struct v4l2_subdev *subdev, int on);
int (*gpio1_ctrl)(struct v4l2_subdev *subdev, int on);
int (*v1p8_ctrl)(struct v4l2_subdev *subdev, int on);
@@ -228,12 +217,6 @@ struct camera_sensor_platform_data {
char *module_id);
};
-struct camera_af_platform_data {
- int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
-};
-
-const struct camera_af_platform_data *camera_get_af_platform_data(void);
-
struct camera_mipi_info {
enum atomisp_camera_port port;
unsigned int num_lanes;
diff --git a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
index 589f4eae38ca..8988b37943b3 100644
--- a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
+++ b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __LIBMSRLISTHELPER_H__
diff --git a/drivers/staging/media/atomisp/include/media/lm3554.h b/drivers/staging/media/atomisp/include/media/lm3554.h
index 7d6a8c05dd52..9276ce44d907 100644
--- a/drivers/staging/media/atomisp/include/media/lm3554.h
+++ b/drivers/staging/media/atomisp/include/media/lm3554.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _LM3554_H_
@@ -24,7 +20,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-subdev.h>
-#define LM3554_NAME "lm3554"
#define LM3554_ID 3554
#define v4l2_queryctrl_entry_integer(_id, _name,\
diff --git a/drivers/staging/media/atomisp/include/media/lm3642.h b/drivers/staging/media/atomisp/include/media/lm3642.h
deleted file mode 100644
index 545d95763335..000000000000
--- a/drivers/staging/media/atomisp/include/media/lm3642.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * include/media/lm3642.h
- *
- * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- */
-
-#ifndef _LM3642_H_
-#define _LM3642_H_
-
-#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-
-#define LM3642_NAME "lm3642"
-#define LM3642_ID 3642
-
-#define v4l2_queryctrl_entry_integer(_id, _name,\
- _minimum, _maximum, _step, \
- _default_value, _flags) \
- {\
- .id = (_id), \
- .type = V4L2_CTRL_TYPE_INTEGER, \
- .name = _name, \
- .minimum = (_minimum), \
- .maximum = (_maximum), \
- .step = (_step), \
- .default_value = (_default_value),\
- .flags = (_flags),\
- }
-#define v4l2_queryctrl_entry_boolean(_id, _name,\
- _default_value, _flags) \
- {\
- .id = (_id), \
- .type = V4L2_CTRL_TYPE_BOOLEAN, \
- .name = _name, \
- .minimum = 0, \
- .maximum = 1, \
- .step = 1, \
- .default_value = (_default_value),\
- .flags = (_flags),\
- }
-
-#define s_ctrl_id_entry_integer(_id, _name, \
- _minimum, _maximum, _step, \
- _default_value, _flags, \
- _s_ctrl, _g_ctrl) \
- {\
- .qc = v4l2_queryctrl_entry_integer(_id, _name,\
- _minimum, _maximum, _step,\
- _default_value, _flags), \
- .s_ctrl = _s_ctrl, \
- .g_ctrl = _g_ctrl, \
- }
-
-#define s_ctrl_id_entry_boolean(_id, _name, \
- _default_value, _flags, \
- _s_ctrl, _g_ctrl) \
- {\
- .qc = v4l2_queryctrl_entry_boolean(_id, _name,\
- _default_value, _flags), \
- .s_ctrl = _s_ctrl, \
- .g_ctrl = _g_ctrl, \
- }
-
-
-/* Default Values */
-#define LM3642_DEFAULT_TIMEOUT 300U
-#define LM3642_DEFAULT_RAMP_TIME 0x10 /* 1.024ms */
-#define LM3642_DEFAULT_INDICATOR_CURRENT 0x01 /* 1.88A */
-#define LM3642_DEFAULT_FLASH_CURRENT 0x0f /* 1500mA */
-
-/* Value settings for Flash Time-out Duration*/
-#define LM3642_MIN_TIMEOUT 100U
-#define LM3642_MAX_TIMEOUT 800U
-#define LM3642_TIMEOUT_STEPSIZE 100U
-
-/* Flash modes */
-#define LM3642_MODE_SHUTDOWN 0
-#define LM3642_MODE_INDICATOR 1
-#define LM3642_MODE_TORCH 2
-#define LM3642_MODE_FLASH 3
-
-/* timer delay time */
-#define LM3642_TIMER_DELAY 5
-
-/* Percentage <-> value macros */
-#define LM3642_MIN_PERCENT 0U
-#define LM3642_MAX_PERCENT 100U
-#define LM3642_CLAMP_PERCENTAGE(val) \
- clamp(val, LM3642_MIN_PERCENT, LM3642_MAX_PERCENT)
-
-#define LM3642_VALUE_TO_PERCENT(v, step) \
- (((((unsigned long)((v)+1))*(step))+50)/100)
-#define LM3642_PERCENT_TO_VALUE(p, step) \
- (((((unsigned long)(p))*100)+((step)>>1))/(step)-1)
-
-/* Product specific limits
- * TODO: get these from platform data */
-#define LM3642_FLASH_MAX_LVL 0x0F /* 1500mA */
-#define LM3642_TORCH_MAX_LVL 0x07 /* 187mA */
-#define LM3642_INDICATOR_MAX_LVL 0x01 /* 1.88A */
-
-/* Flash brightness, input is percentage, output is [0..15] */
-#define LM3642_FLASH_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_FLASH_MAX_LVL+1)>>1)) \
- /((LM3642_FLASH_MAX_LVL+1)))
-#define LM3642_FLASH_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(15, LM3642_FLASH_STEP)
-
-/* Torch brightness, input is percentage, output is [0..7] */
-#define LM3642_TORCH_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_TORCH_MAX_LVL+1)>>1)) \
- /((LM3642_TORCH_MAX_LVL+1)))
-#define LM3642_TORCH_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(0, LM3642_TORCH_STEP)
-
-/* Indicator brightness, input is percentage, output is [0..1] */
-#define LM3642_INDICATOR_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_INDICATOR_MAX_LVL+1)>>1)) \
- /((LM3642_INDICATOR_MAX_LVL+1)))
-#define LM3642_INDICATOR_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(1, LM3642_INDICATOR_STEP)
-
-/*
- * lm3642_platform_data - Flash controller platform data
- */
-struct lm3642_platform_data {
- int gpio_torch;
- int gpio_strobe;
- int (*power_ctrl)(struct v4l2_subdev *subdev, int on);
-
- unsigned int torch_en;
- unsigned int flash_en;
- unsigned int tx_en;
- unsigned int ivfm_en;
-};
-
-#endif /* _LM3642_H_ */
-
diff --git a/drivers/staging/media/atomisp/pci/Kconfig b/drivers/staging/media/atomisp/pci/Kconfig
index a72421431c7a..41f116d52060 100644
--- a/drivers/staging/media/atomisp/pci/Kconfig
+++ b/drivers/staging/media/atomisp/pci/Kconfig
@@ -3,11 +3,12 @@
#
config VIDEO_ATOMISP
- tristate "Intel Atom Image Signal Processor Driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF_VMALLOC
- ---help---
- Say Y here if your platform supports Intel Atom SoC
- camera imaging subsystem.
- To compile this driver as a module, choose M here: the
- module will be called atomisp
+ tristate "Intel Atom Image Signal Processor Driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select IOSF_MBI
+ select VIDEOBUF_VMALLOC
+ ---help---
+ Say Y here if your platform supports Intel Atom SoC
+ camera imaging subsystem.
+ To compile this driver as a module, choose M here: the
+ module will be called atomisp
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
index 513a430ee01a..5d102a4f8aff 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
index 1eac329339b7..a6638edee360 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
index 5b58e7d9ca5b..56386154643b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
index f48bf451c1f5..8a18c528cad4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/firmware.h>
@@ -27,7 +23,8 @@
#include <linux/kfifo.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-#include <asm/intel-mid.h>
+
+#include <asm/iosf_mbi.h>
#include <media/v4l2-event.h>
#include <media/videobuf-vmalloc.h>
@@ -143,36 +140,36 @@ static int write_target_freq_to_hw(struct atomisp_device *isp,
unsigned int ratio, timeout, guar_ratio;
u32 isp_sspm1 = 0;
int i;
+
if (!isp->hpll_freq) {
dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n");
return -EINVAL;
}
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
if (isp_sspm1 & ISP_FREQ_VALID_MASK) {
dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n");
- intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET));
}
ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1;
guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1;
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET);
for (i = 0; i < ISP_DFS_TRY_TIMES; i++) {
- intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1
| ratio << ISP_REQ_FREQ_OFFSET
| 1 << ISP_FREQ_VALID_OFFSET
| guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET);
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
-
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 20;
while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) {
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n");
udelay(100);
timeout--;
@@ -187,10 +184,10 @@ static int write_target_freq_to_hw(struct atomisp_device *isp,
return -EINVAL;
}
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 10;
while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) {
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n",
new_freq);
udelay(100);
@@ -1660,20 +1657,15 @@ void atomisp_css_flush(struct atomisp_device *isp)
dev_dbg(isp->dev, "atomisp css flush done\n");
}
-#ifndef ISP2401
-void atomisp_wdt(unsigned long isp_addr)
-#else
-void atomisp_wdt(unsigned long pipe_addr)
-#endif
+void atomisp_wdt(struct timer_list *t)
{
#ifndef ISP2401
- struct atomisp_device *isp = (struct atomisp_device *)isp_addr;
+ struct atomisp_sub_device *asd = from_timer(asd, t, wdt);
#else
- struct atomisp_video_pipe *pipe =
- (struct atomisp_video_pipe *)pipe_addr;
+ struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt);
struct atomisp_sub_device *asd = pipe->asd;
- struct atomisp_device *isp = asd->isp;
#endif
+ struct atomisp_device *isp = asd->isp;
#ifdef ISP2401
atomic_inc(&pipe->wdt_count);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
index 31ba4e613d13..bdc73862fb79 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -85,11 +81,7 @@ static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address)
void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_wdt_work(struct work_struct *work);
-#ifndef ISP2401
-void atomisp_wdt(unsigned long isp_addr);
-#else
-void atomisp_wdt(unsigned long pipe_addr);
-#endif
+void atomisp_wdt(struct timer_list *t);
void atomisp_setup_flash(struct atomisp_sub_device *asd);
irqreturn_t atomisp_isr(int irq, void *dev);
irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
index 69d1526da362..2558193045a6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
index fb8b8fab4e92..3ef850cd25bd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
index 05897b747349..6e87aa5aab4c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -155,7 +151,7 @@ static void atomisp_css2_hw_store(hrt_address addr,
const void *from, uint32_t n)
{
unsigned long flags;
- unsigned i;
+ unsigned int i;
unsigned int _to = (unsigned int)addr;
const char *_from = (const char *)from;
@@ -168,7 +164,7 @@ static void atomisp_css2_hw_store(hrt_address addr,
static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
{
unsigned long flags;
- unsigned i;
+ unsigned int i;
char *_to = (char *)to;
unsigned int _from = (unsigned int)addr;
@@ -232,9 +228,11 @@ static void __dump_pipe_config(struct atomisp_sub_device *asd,
unsigned int pipe_id)
{
struct atomisp_device *isp = asd->isp;
+
if (stream_env->pipes[pipe_id]) {
struct ia_css_pipe_config *p_config;
struct ia_css_pipe_extra_config *pe_config;
+
p_config = &stream_env->pipe_configs[pipe_id];
pe_config = &stream_env->pipe_extra_configs[pipe_id];
dev_dbg(isp->dev, "dumping pipe[%d] config:\n", pipe_id);
@@ -507,6 +505,7 @@ static int __destroy_stream(struct atomisp_sub_device *asd,
static int __destroy_streams(struct atomisp_sub_device *asd, bool force)
{
int ret, i;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
ret = __destroy_stream(asd, &asd->stream_env[i], force);
if (ret)
@@ -573,6 +572,7 @@ static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
struct atomisp_device *isp = asd->isp;
int ret = 0;
int i;
+
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
if (!stream_env->pipes[i] ||
!(force || stream_env->update_pipe[i]))
@@ -892,12 +892,12 @@ static inline int __set_css_print_env(struct atomisp_device *isp, int opt)
{
int ret = 0;
- if (0 == opt)
+ if (opt == 0)
isp->css_env.isp_css_env.print_env.debug_print = NULL;
- else if (1 == opt)
+ else if (opt == 1)
isp->css_env.isp_css_env.print_env.debug_print =
atomisp_css2_dbg_ftrace_print;
- else if (2 == opt)
+ else if (opt == 2)
isp->css_env.isp_css_env.print_env.debug_print =
atomisp_css2_dbg_print;
else
@@ -1051,6 +1051,7 @@ int atomisp_css_irq_enable(struct atomisp_device *isp,
void atomisp_css_init_struct(struct atomisp_sub_device *asd)
{
int i, j;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
asd->stream_env[i].stream = NULL;
for (j = 0; j < IA_CSS_PIPE_MODE_NUM; j++) {
@@ -1189,6 +1190,7 @@ int atomisp_css_start(struct atomisp_sub_device *asd,
struct atomisp_device *isp = asd->isp;
bool sp_is_started = false;
int ret = 0, i = 0;
+
if (in_reset) {
if (__destroy_streams(asd, true))
dev_warn(isp->dev, "destroy stream failed.\n");
@@ -1976,6 +1978,7 @@ void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd,
void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable)
{
int i;
+
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.pipe_configs[i].enable_dz = enable;
@@ -2002,6 +2005,7 @@ void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
int i;
struct atomisp_device *isp = asd->isp;
unsigned int size_mem_words;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
asd->stream_env[i].stream_config.mode = mode;
@@ -2275,6 +2279,7 @@ int atomisp_css_stop(struct atomisp_sub_device *asd,
if (!in_reset) {
struct atomisp_stream_env *stream_env;
int i, j;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
stream_env = &asd->stream_env[i];
for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
@@ -2801,6 +2806,7 @@ static void __configure_video_vf_output(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
struct ia_css_frame_info *css_output_info;
+
stream_env->pipe_configs[pipe_id].mode =
__pipe_id_to_pipe_mode(asd, pipe_id);
stream_env->update_pipe[pipe_id] = true;
@@ -4464,7 +4470,8 @@ int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
static struct atomisp_sub_device *__get_atomisp_subdev(
struct ia_css_pipe *css_pipe,
struct atomisp_device *isp,
- enum atomisp_input_stream_id *stream_id) {
+ enum atomisp_input_stream_id *stream_id)
+{
int i, j, k;
struct atomisp_sub_device *asd;
struct atomisp_stream_env *stream_env;
@@ -4515,7 +4522,7 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
for (i = 0; i < isp->num_of_streams; i++)
atomisp_wdt_stop(&isp->asd[i], 0);
#ifndef ISP2401
- atomisp_wdt((unsigned long)isp);
+ atomisp_wdt(&isp->asd[0].wdt);
#else
queue_work(isp->wdt_work_queue, &isp->wdt_work);
#endif
@@ -4659,7 +4666,7 @@ int atomisp_css_dump_sp_raw_copy_linecount(bool reduced)
int atomisp_css_dump_blob_infor(void)
{
struct ia_css_blob_descr *bd = sh_css_blob_info;
- unsigned i, nm = sh_css_num_binaries;
+ unsigned int i, nm = sh_css_num_binaries;
if (nm == 0)
return -EPERM;
@@ -4695,7 +4702,7 @@ int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt)
int ret;
ret = __set_css_print_env(isp, opt);
- if (0 == ret)
+ if (ret == 0)
dbg_func = opt;
return ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
index b62ad9082018..b03711668eda 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
index 0592ac1f2832..44c21813a06e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
index 750478f614d6..685da0f48bab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_COMPAT_IOCTL32_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
index 2c5036685447..fa03b78c3580 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
index faa9cf7e05c0..0191d28a55bc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_CSI2_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
index 204d941cdb6c..54e28605b5de 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_DFS_TABLES_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
index 1ae2358de8d4..7129b88456cb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -162,7 +158,7 @@ static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
return size;
}
-static struct driver_attribute iunit_drvfs_attrs[] = {
+static const struct driver_attribute iunit_drvfs_attrs[] = {
__ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store),
__ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store),
__ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store),
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
index 5cb717b0c1c2..b91bfef21639 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
index c766119bf798..377ec2a9fa6d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
index 1b86abd35c38..61fdeb5ee60a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index d8cfed358d55..dd7596d8763d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -1137,10 +1133,8 @@ static int remove_pad_from_frame(struct atomisp_device *isp,
ia_css_ptr store = load;
buffer = kmalloc(width*sizeof(load), GFP_KERNEL);
- if (!buffer) {
- dev_err(isp->dev, "out of memory.\n");
+ if (!buffer)
return -ENOMEM;
- }
load += ISP_LEFT_PAD;
for (i = 0; i < height; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
index 8471e391501a..2faab3429d43 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
index e9650cb75ba5..55ba185b43a0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _atomisp_helper_h_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
index 7542a72f1d0f..52a6f8002048 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_INTERNAL_H__
@@ -29,9 +25,6 @@
#include <linux/pm_qos.h>
#include <linux/idr.h>
-#include <asm/intel-mid.h>
-#include "../../include/asm/intel_mid_pcihelpers.h"
-
#include <media/media-device.h>
#include <media/v4l2-subdev.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
index 717647951fb6..339b5d31e1f1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
@@ -14,17 +14,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/delay.h>
#include <linux/pci.h>
-#include <asm/intel-mid.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -943,10 +938,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
while (count--) {
s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL);
- if (!s3a_buf) {
- dev_err(isp->dev, "s3a stat buf alloc failed\n");
+ if (!s3a_buf)
goto error;
- }
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, s3a_buf, NULL, NULL)) {
@@ -965,7 +958,6 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
while (count--) {
dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
if (!dis_buf) {
- dev_err(isp->dev, "dis stat buf alloc failed\n");
kfree(s3a_buf);
goto error;
}
@@ -990,10 +982,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
while (count--) {
md_buf = kzalloc(sizeof(struct atomisp_metadata_buf),
GFP_KERNEL);
- if (!md_buf) {
- dev_err(isp->dev, "metadata buf alloc failed\n");
+ if (!md_buf)
goto error;
- }
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, NULL, NULL, md_buf)) {
@@ -2943,13 +2933,15 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
#else
if (isp->motor)
#endif
- err = v4l2_subdev_call(
#ifndef ISP2401
+ err = v4l2_subdev_call(
isp->inputs[asd->input_curr].motor,
+ core, ioctl, cmd, arg);
#else
+ err = v4l2_subdev_call(
isp->motor,
-#endif
core, ioctl, cmd, arg);
+#endif
else
err = v4l2_subdev_call(
isp->inputs[asd->input_curr].camera,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
index fb5fadb5332b..0d2785b9ef99 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
index 744ab6eb42a0..70b53988553c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -25,7 +21,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/intel-mid.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mediabus.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
index ba5c2ab14253..f3d61827ae8c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_SUBDEV_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
index af09218d8b71..319ded6a96da 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_TABLES_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
index 48b96048cab4..b71cc7bcdbab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
index 64ab60f02e85..af354c4bfd3e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
index 5ce282d6c939..462b296554c7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#undef TRACE_SYSTEM
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
index 663aa916e3ca..3c260f8b52e2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -28,6 +24,8 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <asm/iosf_mbi.h>
+
#include "../../include/linux/atomisp_gmin_platform.h"
#include "atomisp_cmd.h"
@@ -46,7 +44,6 @@
#include "hrt/hive_isp_css_mm_hrt.h"
#include "device_access.h"
-#include <asm/intel-mid.h>
/* G-Min addition: pull this in from intel_mid_pm.h */
#define CSTATE_EXIT_LATENCY_C1 1
@@ -386,28 +383,23 @@ done:
*/
static void punit_ddr_dvfs_enable(bool enable)
{
- int reg = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSDVFS);
int door_bell = 1 << 8;
int max_wait = 30;
+ int reg;
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
if (enable) {
reg &= ~(MRFLD_BIT0 | MRFLD_BIT1);
} else {
reg |= (MRFLD_BIT1 | door_bell);
reg &= ~(MRFLD_BIT0);
}
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSDVFS, reg);
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSDVFS, reg);
-
- /*Check Req_ACK to see freq status, wait until door_bell is cleared*/
- if (reg & door_bell) {
- while (max_wait--) {
- if (0 == (intel_mid_msgbus_read32(PUNIT_PORT,
- MRFLD_ISPSSDVFS) & door_bell))
- break;
-
- usleep_range(100, 500);
- }
+ /* Check Req_ACK to see freq status, wait until door_bell is cleared */
+ while ((reg & door_bell) && max_wait--) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
+ usleep_range(100, 500);
}
if (max_wait == -1)
@@ -421,10 +413,10 @@ int atomisp_mrfld_power_down(struct atomisp_device *isp)
u32 reg_value;
/* writing 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
reg_value |= MRFLD_ISPSSPM0_IUNIT_POWER_OFF;
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value);
/*WA:Enable DVFS*/
if (IS_CHT)
@@ -437,8 +429,7 @@ int atomisp_mrfld_power_down(struct atomisp_device *isp)
*/
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT,
- MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
dev_dbg(isp->dev, "power-off in progress, ISPSSPM0: 0x%x\n",
reg_value);
/* wait until ISPSSPM0 bit[25:24] shows 0x3 */
@@ -477,14 +468,14 @@ int atomisp_mrfld_power_up(struct atomisp_device *isp)
msleep(10);
/* writing 0x0 to ISPSSPM0 bit[1:0] to power off the IUNIT */
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value);
/* FIXME: experienced value for delay */
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
dev_dbg(isp->dev, "power-on in progress, ISPSSPM0: 0x%x\n",
reg_value);
/* wait until ISPSSPM0 bit[25:24] shows 0x0 */
@@ -755,7 +746,6 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
&subdevs->v4l2_subdev.board_info;
struct i2c_adapter *adapter =
i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id);
- struct camera_sensor_platform_data *sensor_pdata;
int sensor_num, i;
if (adapter == NULL) {
@@ -807,13 +797,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
* pixel_format.
*/
isp->inputs[isp->input_cnt].frame_size.pixel_format = 0;
- sensor_pdata = (struct camera_sensor_platform_data *)
- board_info->platform_data;
- if (sensor_pdata->get_camera_caps)
- isp->inputs[isp->input_cnt].camera_caps =
- sensor_pdata->get_camera_caps();
- else
- isp->inputs[isp->input_cnt].camera_caps =
+ isp->inputs[isp->input_cnt].camera_caps =
atomisp_get_default_camera_caps();
sensor_num = isp->inputs[isp->input_cnt]
.camera_caps->sensor_num;
@@ -1020,7 +1004,7 @@ csi_and_subdev_probe_failed:
v4l2_device_unregister(&isp->v4l2_dev);
v4l2_device_failed:
media_device_unregister(&isp->media_dev);
- media_device_cleanup(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
return ret;
}
@@ -1155,17 +1139,12 @@ static int init_atomisp_wdts(struct atomisp_device *isp)
struct atomisp_sub_device *asd = &isp->asd[i];
asd = &isp->asd[i];
#ifndef ISP2401
- setup_timer(&asd->wdt, atomisp_wdt, (unsigned long)isp);
+ timer_setup(&asd->wdt, atomisp_wdt, 0);
#else
- setup_timer(&asd->video_out_capture.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_capture);
- setup_timer(&asd->video_out_preview.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_preview);
- setup_timer(&asd->video_out_vf.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_vf);
- setup_timer(&asd->video_out_video_capture.wdt,
- atomisp_wdt,
- (unsigned long)&asd->video_out_video_capture);
+ timer_setup(&asd->video_out_capture.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_preview.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_video_capture.wdt, atomisp_wdt, 0);
#endif
}
return 0;
@@ -1323,7 +1302,7 @@ static int atomisp_pci_probe(struct pci_dev *dev,
isp->dfs = &dfs_config_cht;
isp->pdev->d3cold_delay = 0;
- val = intel_mid_msgbus_read32(CCK_PORT, CCK_FUSE_REG_0);
+ iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val);
switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
case 0x00:
isp->hpll_freq = HPLL_FREQ_800MHZ;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
index 191b2e57a810..944a6cf40a2f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
index 766218ed3649..914aa7f98700 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
@@ -18,7 +18,6 @@
#include <sp.h>
#include <type_support.h>
#include <math_support.h>
-#include <storage_class.h>
#include <assert_support.h>
#include <platform_support.h>
#include "ia_css_circbuf_comm.h"
@@ -45,7 +44,7 @@ struct ia_css_circbuf_s {
* @param elems An array of elements.
* @param desc The descriptor set to the size using ia_css_circbuf_desc_init().
*/
-STORAGE_CLASS_EXTERN void ia_css_circbuf_create(
+extern void ia_css_circbuf_create(
ia_css_circbuf_t *cb,
ia_css_circbuf_elem_t *elems,
ia_css_circbuf_desc_t *desc);
@@ -55,7 +54,7 @@ STORAGE_CLASS_EXTERN void ia_css_circbuf_create(
*
* @param cb The pointer to the circular buffer.
*/
-STORAGE_CLASS_EXTERN void ia_css_circbuf_destroy(
+extern void ia_css_circbuf_destroy(
ia_css_circbuf_t *cb);
/**
@@ -68,7 +67,7 @@ STORAGE_CLASS_EXTERN void ia_css_circbuf_destroy(
*
* @return the pop-out value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_pop(
+extern uint32_t ia_css_circbuf_pop(
ia_css_circbuf_t *cb);
/**
@@ -82,7 +81,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_pop(
*
* @return the extracted value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_extract(
+extern uint32_t ia_css_circbuf_extract(
ia_css_circbuf_t *cb,
int offset);
@@ -97,7 +96,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_extract(
* @param elem The pointer to the element.
* @param val The value to be set.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_set_val(
+static inline void ia_css_circbuf_elem_set_val(
ia_css_circbuf_elem_t *elem,
uint32_t val)
{
@@ -111,7 +110,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_set_val(
*
* @param elem The pointer to the element.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_init(
+static inline void ia_css_circbuf_elem_init(
ia_css_circbuf_elem_t *elem)
{
OP___assert(elem != NULL);
@@ -124,7 +123,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_init(
* @param src The element as the copy source.
* @param dest The element as the copy destination.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_cpy(
+static inline void ia_css_circbuf_elem_cpy(
ia_css_circbuf_elem_t *src,
ia_css_circbuf_elem_t *dest)
{
@@ -143,7 +142,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_cpy(
*
* @return the position at offset.
*/
-STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_get_pos_at_offset(
+static inline uint8_t ia_css_circbuf_get_pos_at_offset(
ia_css_circbuf_t *cb,
uint32_t base,
int offset)
@@ -176,7 +175,7 @@ STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_get_pos_at_offset(
*
* @return the offset.
*/
-STORAGE_CLASS_INLINE int ia_css_circbuf_get_offset(
+static inline int ia_css_circbuf_get_offset(
ia_css_circbuf_t *cb,
uint32_t src_pos,
uint32_t dest_pos)
@@ -201,7 +200,7 @@ STORAGE_CLASS_INLINE int ia_css_circbuf_get_offset(
*
* TODO: Test this API.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_size(
+static inline uint32_t ia_css_circbuf_get_size(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -217,7 +216,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_size(
*
* @return the number of available elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_num_elems(
+static inline uint32_t ia_css_circbuf_get_num_elems(
ia_css_circbuf_t *cb)
{
int num;
@@ -239,7 +238,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_num_elems(
* - true when it is empty.
* - false when it is not empty.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_is_empty(
+static inline bool ia_css_circbuf_is_empty(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -257,7 +256,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_is_empty(
* - true when it is full.
* - false when it is not full.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
+static inline bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
OP___assert(cb->desc != NULL);
@@ -274,7 +273,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
* @param cb The pointer to the circular buffer.
* @param elem The new element.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_write(
+static inline void ia_css_circbuf_write(
ia_css_circbuf_t *cb,
ia_css_circbuf_elem_t elem)
{
@@ -298,7 +297,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_write(
* @param cb The pointer to the circular buffer.
* @param val The value to be pushed in.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_push(
+static inline void ia_css_circbuf_push(
ia_css_circbuf_t *cb,
uint32_t val)
{
@@ -321,7 +320,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_push(
*
* @return: The number of free elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_free_elems(
+static inline uint32_t ia_css_circbuf_get_free_elems(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -338,7 +337,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_free_elems(
*
* @return the elements value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek(
+extern uint32_t ia_css_circbuf_peek(
ia_css_circbuf_t *cb,
int offset);
@@ -350,7 +349,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek(
*
* @return the elements value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek_from_start(
+extern uint32_t ia_css_circbuf_peek_from_start(
ia_css_circbuf_t *cb,
int offset);
@@ -369,7 +368,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek_from_start(
* @return true on succesfully increasing the size
* false on failure
*/
-STORAGE_CLASS_EXTERN bool ia_css_circbuf_increase_size(
+extern bool ia_css_circbuf_increase_size(
ia_css_circbuf_t *cb,
unsigned int sz_delta,
ia_css_circbuf_elem_t *elems);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
index a8447d409c31..8dd7cd6cd3d8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
@@ -17,7 +17,6 @@
#include <type_support.h>
#include <math_support.h>
-#include <storage_class.h>
#include <platform_support.h>
#include <sp.h>
#include "ia_css_circbuf_comm.h"
@@ -35,7 +34,7 @@
* - true when it is empty.
* - false when it is not empty.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_empty(
+static inline bool ia_css_circbuf_desc_is_empty(
ia_css_circbuf_desc_t *cb_desc)
{
OP___assert(cb_desc != NULL);
@@ -52,7 +51,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_empty(
* - true when it is full.
* - false when it is not full.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_full(
+static inline bool ia_css_circbuf_desc_is_full(
ia_css_circbuf_desc_t *cb_desc)
{
OP___assert(cb_desc != NULL);
@@ -65,7 +64,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_full(
* @param cb_desc The pointer circular buffer descriptor
* @param size The size of the circular buffer
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_desc_init(
+static inline void ia_css_circbuf_desc_init(
ia_css_circbuf_desc_t *cb_desc,
int8_t size)
{
@@ -82,7 +81,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_desc_init(
*
* @return the position in the circular buffer descriptor.
*/
-STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_desc_get_pos_at_offset(
+static inline uint8_t ia_css_circbuf_desc_get_pos_at_offset(
ia_css_circbuf_desc_t *cb_desc,
uint32_t base,
int offset)
@@ -114,7 +113,7 @@ STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_desc_get_pos_at_offset(
*
* @return the offset.
*/
-STORAGE_CLASS_INLINE int ia_css_circbuf_desc_get_offset(
+static inline int ia_css_circbuf_desc_get_offset(
ia_css_circbuf_desc_t *cb_desc,
uint32_t src_pos,
uint32_t dest_pos)
@@ -135,7 +134,7 @@ STORAGE_CLASS_INLINE int ia_css_circbuf_desc_get_offset(
*
* @return The number of available elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_num_elems(
+static inline uint32_t ia_css_circbuf_desc_get_num_elems(
ia_css_circbuf_desc_t *cb_desc)
{
int num;
@@ -155,7 +154,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_num_elems(
*
* @return: The number of free elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_free_elems(
+static inline uint32_t ia_css_circbuf_desc_get_free_elems(
ia_css_circbuf_desc_t *cb_desc)
{
uint32_t num;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
index 17d3b7de93ba..98a2a3e9b3e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
@@ -22,6 +22,7 @@
#include <assert_support.h>
/* HRT_GDC_N */
#include "gdc_device.h"
+#include <linux/kernel.h>
/* This module provides a binary descriptions to used to find a binary. Since,
* every stage is associated with a binary, it implicity helps stage
@@ -147,11 +148,9 @@ enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
unsigned int *bds_factor_denominator)
{
unsigned int i;
- unsigned int bds_list_size = sizeof(bds_factors_list) /
- sizeof(struct sh_css_bds_factor);
/* Loop over all bds factors until a match is found */
- for (i = 0; i < bds_list_size; i++) {
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
if (bds_factors_list[i].bds_factor == bds_factor) {
*bds_factor_numerator = bds_factors_list[i].numerator;
*bds_factor_denominator = bds_factors_list[i].denominator;
@@ -170,8 +169,6 @@ enum ia_css_err binarydesc_calculate_bds_factor(
unsigned int *bds_factor)
{
unsigned int i;
- unsigned int bds_list_size = sizeof(bds_factors_list) /
- sizeof(struct sh_css_bds_factor);
unsigned int in_w = input_res.width,
in_h = input_res.height,
out_w = output_res.width, out_h = output_res.height;
@@ -186,7 +183,7 @@ enum ia_css_err binarydesc_calculate_bds_factor(
assert(out_w != 0 && out_h != 0);
/* Loop over all bds factors until a match is found */
- for (i = 0; i < bds_list_size; i++) {
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
unsigned num = bds_factors_list[i].numerator;
unsigned den = bds_factors_list[i].denominator;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
index 08f486e20a65..54193789a809 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
@@ -111,7 +111,7 @@ unsigned int ia_css_util_input_format_bpp(
break;
}
-return rval;
+ return rval;
}
enum ia_css_err ia_css_util_check_vf_info(
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
index 5819bcff5e55..6720ab55d6f5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
@@ -34,7 +34,7 @@
* @brief Get the csi rx fe state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_state(
+static inline void csi_rx_fe_ctrl_get_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state)
{
@@ -73,7 +73,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_state(
* @brief Get the state of the csi rx fe dlane process.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_dlane_state(
+static inline void csi_rx_fe_ctrl_get_dlane_state(
const csi_rx_frontend_ID_t ID,
const uint32_t lane,
csi_rx_fe_ctrl_lane_t *dlane_state)
@@ -89,7 +89,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_dlane_state(
* @brief dump the csi rx fe state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_dump_state(
+static inline void csi_rx_fe_ctrl_dump_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state)
{
@@ -118,7 +118,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_dump_state(
* @brief Get the csi rx be state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_get_state(
+static inline void csi_rx_be_ctrl_get_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state)
{
@@ -181,7 +181,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_get_state(
* @brief Dump the csi rx be state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_dump_state(
+static inline void csi_rx_be_ctrl_dump_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state)
{
@@ -225,7 +225,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_dump_state(
* @brief Load the register value.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_fe_ctrl_reg_load(
+static inline hrt_data csi_rx_fe_ctrl_reg_load(
const csi_rx_frontend_ID_t ID,
const hrt_address reg)
{
@@ -239,7 +239,7 @@ STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_fe_ctrl_reg_load(
* @brief Store a value to the register.
* Refer to "ibuf_ctrl_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_reg_store(
+static inline void csi_rx_fe_ctrl_reg_store(
const csi_rx_frontend_ID_t ID,
const hrt_address reg,
const hrt_data value)
@@ -253,7 +253,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_reg_store(
* @brief Load the register value.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_be_ctrl_reg_load(
+static inline hrt_data csi_rx_be_ctrl_reg_load(
const csi_rx_backend_ID_t ID,
const hrt_address reg)
{
@@ -267,7 +267,7 @@ STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_be_ctrl_reg_load(
* @brief Store a value to the register.
* Refer to "ibuf_ctrl_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_reg_store(
+static inline void csi_rx_be_ctrl_reg_store(
const csi_rx_backend_ID_t ID,
const hrt_address reg,
const hrt_data value)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
index 87a25d4289ec..770db7dff5d3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
@@ -12,7 +12,7 @@
* more details.
*/
-#include <stddef.h> /* NULL */
+#include <linux/kernel.h>
#include "dma.h"
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
index 9d3a29696094..bcfb734c2ed3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
@@ -28,7 +28,7 @@ STORAGE_CLASS_EVENT_C void event_wait_for(const event_ID_t ID)
assert(ID < N_EVENT_ID);
assert(event_source_addr[ID] != ((hrt_address)-1));
(void)ia_css_device_load_uint32(event_source_addr[ID]);
-return;
+ return;
}
STORAGE_CLASS_EVENT_C void cnd_event_wait_for(const event_ID_t ID,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
index 1087944d637f..1bf292401adc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
@@ -38,12 +38,12 @@ STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = {
#include "fifo_monitor_private.h"
#endif /* __INLINE_FIFO_MONITOR__ */
-STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+static inline bool fifo_monitor_status_valid (
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id);
-STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+static inline bool fifo_monitor_status_accept(
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id);
@@ -546,7 +546,7 @@ void fifo_monitor_get_state(
return;
}
-STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+static inline bool fifo_monitor_status_valid (
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id)
@@ -556,7 +556,7 @@ STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1;
}
-STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+static inline bool fifo_monitor_status_accept(
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
index 618b2f7e9c75..d58cd7d1828d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
@@ -33,26 +33,26 @@ STORAGE_CLASS_FIFO_MONITOR_C void fifo_switch_set(
const fifo_switch_t switch_id,
const hrt_data sel)
{
-assert(ID == FIFO_MONITOR0_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-assert(switch_id < N_FIFO_SWITCH);
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(switch_id < N_FIFO_SWITCH);
(void)ID;
gp_device_reg_store(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id], sel);
-return;
+ return;
}
STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_switch_get(
const fifo_monitor_ID_t ID,
const fifo_switch_t switch_id)
{
-assert(ID == FIFO_MONITOR0_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-assert(switch_id < N_FIFO_SWITCH);
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(switch_id < N_FIFO_SWITCH);
(void)ID;
-return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+ return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
}
@@ -61,19 +61,19 @@ STORAGE_CLASS_FIFO_MONITOR_C void fifo_monitor_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_FIFO_MONITOR_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_monitor_reg_load(
const fifo_monitor_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_FIFO_MONITOR_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __FIFO_MONITOR_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
index 69fa616889b1..1966b147f8ab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
@@ -22,12 +22,12 @@
/*
* Local function declarations
*/
-STORAGE_CLASS_INLINE void gdc_reg_store(
+static inline void gdc_reg_store(
const gdc_ID_t ID,
const unsigned int reg,
const hrt_data value);
-STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+static inline hrt_data gdc_reg_load(
const gdc_ID_t ID,
const unsigned int reg);
@@ -62,7 +62,7 @@ void gdc_lut_store(
gdc_reg_store(ID, lut_offset++, word_0);
gdc_reg_store(ID, lut_offset++, word_1);
}
-return;
+ return;
}
/*
@@ -103,25 +103,25 @@ int gdc_get_unity(
{
assert(ID < N_GDC_ID);
(void)ID;
-return (int)(1UL << HRT_GDC_FRAC_BITS);
+ return (int)(1UL << HRT_GDC_FRAC_BITS);
}
/*
* Local function implementations
*/
-STORAGE_CLASS_INLINE void gdc_reg_store(
+static inline void gdc_reg_store(
const gdc_ID_t ID,
const unsigned int reg,
const hrt_data value)
{
ia_css_device_store_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
-STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+static inline hrt_data gdc_reg_load(
const gdc_ID_t ID,
const unsigned int reg)
{
-return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data));
+ return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data));
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
index 9a34ac052adf..da88aa3af664 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
@@ -104,5 +104,5 @@ void gp_device_get_state(
_REG_GP_SYNCGEN_FRAME_CNT_ADDR);
state->soft_reset = gp_device_reg_load(ID,
_REG_GP_SOFT_RESET_ADDR);
-return;
+ return;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
index bce1fdf79114..7c0362c29411 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
@@ -26,21 +26,21 @@ STORAGE_CLASS_GP_DEVICE_C void gp_device_reg_store(
const unsigned int reg_addr,
const hrt_data value)
{
-assert(ID < N_GP_DEVICE_ID);
-assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
ia_css_device_store_uint32(GP_DEVICE_BASE[ID] + reg_addr, value);
-return;
+ return;
}
STORAGE_CLASS_GP_DEVICE_C hrt_data gp_device_reg_load(
const gp_device_ID_t ID,
const hrt_address reg_addr)
{
-assert(ID < N_GP_DEVICE_ID);
-assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
-return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
}
#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
index 6ace2184b522..b6ebf34eaa9d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
@@ -29,7 +29,7 @@ STORAGE_CLASS_GPIO_C void gpio_reg_store(
OP___assert(ID < N_GPIO_ID);
OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
@@ -38,7 +38,7 @@ STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
{
OP___assert(ID < N_GPIO_ID);
OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data));
+ return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __GPIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
index 2b636e0e6482..32a780380e11 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
@@ -22,9 +22,9 @@
STORAGE_CLASS_HMEM_C size_t sizeof_hmem(
const hmem_ID_t ID)
{
-assert(ID < N_HMEM_ID);
+ assert(ID < N_HMEM_ID);
(void)ID;
-return HMEM_SIZE*sizeof(hmem_data_t);
+ return HMEM_SIZE*sizeof(hmem_data_t);
}
#endif /* __HMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
index d34933e44aa9..2f42a9c2771c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
@@ -26,21 +26,21 @@ STORAGE_CLASS_INPUT_FORMATTER_C void input_formatter_reg_store(
const hrt_address reg_addr,
const hrt_data value)
{
-assert(ID < N_INPUT_FORMATTER_ID);
-assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
ia_css_device_store_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr, value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_FORMATTER_C hrt_data input_formatter_reg_load(
const input_formatter_ID_t ID,
const unsigned int reg_addr)
{
-assert(ID < N_INPUT_FORMATTER_ID);
-assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
-return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
}
#endif /* __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
index f35e18987b67..bd6821e436b2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
@@ -81,27 +81,27 @@ static input_system_error_t input_system_multiplexer_cfg(
-STORAGE_CLASS_INLINE void capture_unit_get_state(
+static inline void capture_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
capture_unit_state_t *state);
-STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+static inline void acquisition_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
acquisition_unit_state_t *state);
-STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+static inline void ctrl_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
ctrl_unit_state_t *state);
-STORAGE_CLASS_INLINE void mipi_port_get_state(
+static inline void mipi_port_get_state(
const rx_ID_t ID,
const mipi_port_ID_t port_ID,
mipi_port_state_t *state);
-STORAGE_CLASS_INLINE void rx_channel_get_state(
+static inline void rx_channel_get_state(
const rx_ID_t ID,
const unsigned int ch_id,
rx_channel_state_t *state);
@@ -173,7 +173,7 @@ void input_system_get_state(
&(state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]));
}
-return;
+ return;
}
void receiver_get_state(
@@ -245,7 +245,7 @@ void receiver_get_state(
state->be_irq_clear = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX);
-return;
+ return;
}
bool is_mipi_format_yuv420(
@@ -258,7 +258,7 @@ bool is_mipi_format_yuv420(
(mipi_format == MIPI_FORMAT_YUV420_10_SHIFT));
/* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */
-return is_yuv420;
+ return is_yuv420;
}
void receiver_set_compression(
@@ -300,7 +300,7 @@ void receiver_set_compression(
reg = ((field_id < 6)?(val << (field_id * 5)):(val << ((field_id - 6) * 5)));
receiver_reg_store(ID, addr, reg);
-return;
+ return;
}
void receiver_port_enable(
@@ -319,7 +319,7 @@ void receiver_port_enable(
receiver_port_reg_store(ID, port_ID,
_HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg);
-return;
+ return;
}
bool is_receiver_port_enabled(
@@ -328,7 +328,7 @@ bool is_receiver_port_enabled(
{
hrt_data reg = receiver_port_reg_load(ID, port_ID,
_HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
-return ((reg & 0x01) != 0);
+ return ((reg & 0x01) != 0);
}
void receiver_irq_enable(
@@ -338,14 +338,14 @@ void receiver_irq_enable(
{
receiver_port_reg_store(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info);
-return;
+ return;
}
rx_irq_info_t receiver_get_irq_info(
const rx_ID_t ID,
const mipi_port_ID_t port_ID)
{
-return receiver_port_reg_load(ID,
+ return receiver_port_reg_load(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
}
@@ -356,10 +356,10 @@ void receiver_irq_clear(
{
receiver_port_reg_store(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void capture_unit_get_state(
+static inline void capture_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
capture_unit_state_t *state)
@@ -418,10 +418,10 @@ STORAGE_CLASS_INLINE void capture_unit_get_state(
sub_id,
CAPT_FSM_STATE_INFO_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+static inline void acquisition_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
acquisition_unit_state_t *state)
@@ -468,10 +468,10 @@ STORAGE_CLASS_INLINE void acquisition_unit_get_state(
sub_id,
ACQ_INT_CNTR_INFO_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+static inline void ctrl_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
ctrl_unit_state_t *state)
@@ -551,10 +551,10 @@ STORAGE_CLASS_INLINE void ctrl_unit_get_state(
sub_id,
ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void mipi_port_get_state(
+static inline void mipi_port_get_state(
const rx_ID_t ID,
const mipi_port_ID_t port_ID,
mipi_port_state_t *state)
@@ -587,10 +587,10 @@ STORAGE_CLASS_INLINE void mipi_port_get_state(
state->lane_rx_count[i] = (uint8_t)((state->rx_count)>>(i*8));
}
-return;
+ return;
}
-STORAGE_CLASS_INLINE void rx_channel_get_state(
+static inline void rx_channel_get_state(
const rx_ID_t ID,
const unsigned int ch_id,
rx_channel_state_t *state)
@@ -602,30 +602,30 @@ STORAGE_CLASS_INLINE void rx_channel_get_state(
assert(state != NULL);
switch (ch_id) {
- case 0:
- state->comp_scheme0 = receiver_reg_load(ID,
+ case 0:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
- break;
- case 1:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 1:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
- break;
- case 2:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 2:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
- break;
- case 3:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 3:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
- break;
+ break;
}
/* See Table 7.1.17,..., 7.1.24 */
@@ -640,7 +640,7 @@ STORAGE_CLASS_INLINE void rx_channel_get_state(
state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
}
-return;
+ return;
}
// MW: "2400" in the name is not good, but this is to avoid a naming conflict
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
index ed1b947b00f9..118185eb86e9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_INPUT_SYSTEM_C void input_system_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_reg_load(
const input_system_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
@@ -46,19 +46,19 @@ STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_RX_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(RX_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_reg_load(
const rx_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_RX_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
@@ -67,12 +67,12 @@ STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_RX_ID);
-assert(port_ID < N_MIPI_PORT_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
ia_css_device_store_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
@@ -80,11 +80,11 @@ STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
const mipi_port_ID_t port_ID,
const hrt_address reg)
{
-assert(ID < N_RX_ID);
-assert(port_ID < N_MIPI_PORT_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data));
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
@@ -93,12 +93,12 @@ STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(sub_ID < N_SUB_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
@@ -106,11 +106,11 @@ STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
const sub_system_ID_t sub_ID,
const hrt_address reg)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(sub_ID < N_SUB_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data));
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data));
}
#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
index 6b58bc13dc1b..51daf76c2aea 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
@@ -22,13 +22,13 @@
#include "platform_support.h" /* hrt_sleep() */
-STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+static inline void irq_wait_for_write_complete(
const irq_ID_t ID);
-STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+static inline bool any_irq_channel_enabled(
const irq_ID_t ID);
-STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+static inline irq_ID_t virq_get_irq_id(
const virq_id_t irq_ID,
unsigned int *channel_ID);
@@ -69,7 +69,7 @@ void irq_clear_all(
irq_reg_store(ID,
_HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask);
-return;
+ return;
}
/*
@@ -114,7 +114,7 @@ void irq_enable_channel(
irq_wait_for_write_complete(ID);
-return;
+ return;
}
void irq_enable_pulse(
@@ -129,7 +129,7 @@ void irq_enable_pulse(
/* output is given as edge, not pulse */
irq_reg_store(ID,
_HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out);
-return;
+ return;
}
void irq_disable_channel(
@@ -160,7 +160,7 @@ void irq_disable_channel(
irq_wait_for_write_complete(ID);
-return;
+ return;
}
enum hrt_isp_css_irq_status irq_get_channel_id(
@@ -195,7 +195,7 @@ enum hrt_isp_css_irq_status irq_get_channel_id(
if (irq_id != NULL)
*irq_id = (unsigned int)idx;
-return status;
+ return status;
}
static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = {
@@ -220,7 +220,7 @@ void irq_raise(
(unsigned int)addr, 1);
gp_device_reg_store(GP_DEVICE0_ID,
(unsigned int)addr, 0);
-return;
+ return;
}
void irq_controller_get_state(
@@ -240,7 +240,7 @@ void irq_controller_get_state(
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
state->irq_level_not_pulse = irq_reg_load(ID,
_HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX);
-return;
+ return;
}
bool any_virq_signal(void)
@@ -248,7 +248,7 @@ bool any_virq_signal(void)
unsigned int irq_status = irq_reg_load(IRQ0_ID,
_HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
-return (irq_status != 0);
+ return (irq_status != 0);
}
void cnd_virq_enable_channel(
@@ -279,7 +279,7 @@ void cnd_virq_enable_channel(
irq_disable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
}
}
-return;
+ return;
}
@@ -290,7 +290,7 @@ void virq_clear_all(void)
for (irq_id = (irq_ID_t)0; irq_id < N_IRQ_ID; irq_id++) {
irq_clear_all(irq_id);
}
-return;
+ return;
}
enum hrt_isp_css_irq_status virq_get_channel_signals(
@@ -320,7 +320,7 @@ enum hrt_isp_css_irq_status virq_get_channel_signals(
}
}
-return irq_status;
+ return irq_status;
}
void virq_clear_info(
@@ -333,7 +333,7 @@ void virq_clear_info(
for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
irq_info->irq_status_reg[ID] = 0;
}
-return;
+ return;
}
enum hrt_isp_css_irq_status virq_get_channel_id(
@@ -403,10 +403,10 @@ enum hrt_isp_css_irq_status virq_get_channel_id(
if (irq_id != NULL)
*irq_id = (virq_id_t)idx;
-return status;
+ return status;
}
-STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+static inline void irq_wait_for_write_complete(
const irq_ID_t ID)
{
assert(ID < N_IRQ_ID);
@@ -415,7 +415,7 @@ STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX*sizeof(hrt_data));
}
-STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+static inline bool any_irq_channel_enabled(
const irq_ID_t ID)
{
hrt_data en_reg;
@@ -425,10 +425,10 @@ STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
en_reg = irq_reg_load(ID,
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
-return (en_reg != 0);
+ return (en_reg != 0);
}
-STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+static inline irq_ID_t virq_get_irq_id(
const virq_id_t irq_ID,
unsigned int *channel_ID)
{
@@ -444,5 +444,5 @@ STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
*channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID];
-return ID;
+ return ID;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
index eb325e870e88..23a13ac696c2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_IRQ_C void irq_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_IRQ_ID);
-assert(IRQ_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_IRQ_C hrt_data irq_reg_load(
const irq_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_IRQ_ID);
-assert(IRQ_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __IRQ_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
index 47c21e486c25..531c932a48f5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
@@ -34,7 +34,7 @@ void cnd_isp_irq_enable(
isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG,
ISP_IRQ_READY_BIT);
}
-return;
+ return;
}
void isp_get_state(
@@ -94,7 +94,7 @@ void isp_get_state(
!isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG,
ISP_ICACHE_MT_SINK_BIT);
*/
-return;
+ return;
}
/* ISP functions to control the ISP state from the host, even in crun. */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
index b75d0f85d524..a28b67eb66ea 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
@@ -24,20 +24,20 @@ void mmu_set_page_table_base_index(
const hrt_data base_index)
{
mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index);
-return;
+ return;
}
hrt_data mmu_get_page_table_base_index(
const mmu_ID_t ID)
{
-return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
+ return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
}
void mmu_invalidate_cache(
const mmu_ID_t ID)
{
mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1);
-return;
+ return;
}
void mmu_invalidate_cache_all(void)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
index 392b6cc24e8f..7377666f6eb7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_MMU_H void mmu_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_MMU_ID);
-assert(MMU_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_MMU_H hrt_data mmu_reg_load(
const mmu_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_MMU_ID);
-assert(MMU_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __MMU_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
index e6283bf67ad3..5ea81c0e82d1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_SP_C void sp_ctrl_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_SP_ID);
-assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_SP_C hrt_data sp_ctrl_load(
const sp_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_SP_ID);
-assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
@@ -47,7 +47,7 @@ STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
const unsigned int bit)
{
hrt_data val = sp_ctrl_load(ID, reg);
-return (val & (1UL << bit)) != 0;
+ return (val & (1UL << bit)) != 0;
}
STORAGE_CLASS_SP_C void sp_ctrl_setbit(
@@ -57,7 +57,7 @@ STORAGE_CLASS_SP_C void sp_ctrl_setbit(
{
hrt_data data = sp_ctrl_load(ID, reg);
sp_ctrl_store(ID, reg, (data | (1UL << bit)));
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
@@ -67,7 +67,7 @@ STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
{
hrt_data data = sp_ctrl_load(ID, reg);
sp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store(
@@ -76,10 +76,10 @@ STORAGE_CLASS_SP_C void sp_dmem_store(
const void *data,
const size_t size)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
ia_css_device_store(SP_DMEM_BASE[ID] + addr, data, size);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_load(
@@ -88,10 +88,10 @@ STORAGE_CLASS_SP_C void sp_dmem_load(
void *data,
const size_t size)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
ia_css_device_load(SP_DMEM_BASE[ID] + addr, data, size);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
@@ -99,11 +99,11 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
hrt_address addr,
const uint8_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint8(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
@@ -111,11 +111,11 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
hrt_address addr,
const uint16_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint16(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
@@ -123,19 +123,19 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
hrt_address addr,
const uint32_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint32(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C uint8_t sp_dmem_load_uint8(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint8(SP_DMEM_BASE[SP0_ID] + addr);
}
@@ -144,8 +144,8 @@ STORAGE_CLASS_SP_C uint16_t sp_dmem_load_uint16(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint16(SP_DMEM_BASE[SP0_ID] + addr);
}
@@ -154,8 +154,8 @@ STORAGE_CLASS_SP_C uint32_t sp_dmem_load_uint32(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint32(SP_DMEM_BASE[SP0_ID] + addr);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
index 92fb15d04703..fd0d92e87c36 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
@@ -15,7 +15,6 @@
#ifndef __ASSERT_SUPPORT_H_INCLUDED__
#define __ASSERT_SUPPORT_H_INCLUDED__
-#include "storage_class.h"
/**
* The following macro can help to test the size of a struct at compile
@@ -92,7 +91,7 @@
* The implemenation for the pipe generation tool is in see support.isp.h */
#define OP___assert(cnd) assert(cnd)
-STORAGE_CLASS_INLINE void compile_time_assert (unsigned cond)
+static inline void compile_time_assert (unsigned cond)
{
/* Call undefined function if cond is false */
extern void _compile_time_assert (void);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
index d71e08f27a42..6928965cf513 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "bamem_local.h"
#ifndef __INLINE_BAMEM__
-#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_BAMEM_H extern
#define STORAGE_CLASS_BAMEM_C
#include "bamem_public.h"
#else /* __INLINE_BAMEM__ */
-#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_BAMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_BAMEM_H static inline
+#define STORAGE_CLASS_BAMEM_C static inline
#include "bamem_private.h"
#endif /* __INLINE_BAMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
index 0398f5802f05..917ee8cdb1d9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
@@ -30,18 +30,13 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "csi_rx_local.h"
#ifndef __INLINE_CSI_RX__
-#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_CSI_RX_C
#include "csi_rx_public.h"
#else /* __INLINE_CSI_RX__ */
-#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_CSI_RX_C STORAGE_CLASS_INLINE
#include "csi_rx_private.h"
#endif /* __INLINE_CSI_RX__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
index 7d8011735033..0aa22446e27e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "debug_local.h"
#ifndef __INLINE_DEBUG__
-#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DEBUG_H extern
#define STORAGE_CLASS_DEBUG_C
#include "debug_public.h"
#else /* __INLINE_DEBUG__ */
-#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_DEBUG_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DEBUG_H static inline
+#define STORAGE_CLASS_DEBUG_C static inline
#include "debug_private.h"
#endif /* __INLINE_DEBUG__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
index b266191f21ef..d9dee691e3f8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "dma_local.h"
#ifndef __INLINE_DMA__
-#define STORAGE_CLASS_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DMA_H extern
#define STORAGE_CLASS_DMA_C
#include "dma_public.h"
#else /* __INLINE_DMA__ */
-#define STORAGE_CLASS_DMA_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_DMA_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DMA_H static inline
+#define STORAGE_CLASS_DMA_C static inline
#include "dma_private.h"
#endif /* __INLINE_DMA__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
index 78827c554cc3..df579e902796 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "event_fifo_local.h"
#ifndef __INLINE_EVENT__
-#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_EVENT_H extern
#define STORAGE_CLASS_EVENT_C
#include "event_fifo_public.h"
#else /* __INLINE_EVENT__ */
-#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_EVENT_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_EVENT_H static inline
+#define STORAGE_CLASS_EVENT_C static inline
#include "event_fifo_private.h"
#endif /* __INLINE_EVENT__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
index 3bdd260bcaa5..f10c4fa2e32b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "fifo_monitor_local.h"
#ifndef __INLINE_FIFO_MONITOR__
-#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_FIFO_MONITOR_H extern
#define STORAGE_CLASS_FIFO_MONITOR_C
#include "fifo_monitor_public.h"
#else /* __INLINE_FIFO_MONITOR__ */
-#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_FIFO_MONITOR_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_FIFO_MONITOR_H static inline
+#define STORAGE_CLASS_FIFO_MONITOR_C static inline
#include "fifo_monitor_private.h"
#endif /* __INLINE_FIFO_MONITOR__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
index 016132ba0b7f..75c6854c8e7b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gdc_local.h"
#ifndef __INLINE_GDC__
-#define STORAGE_CLASS_GDC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GDC_H extern
#define STORAGE_CLASS_GDC_C
#include "gdc_public.h"
#else /* __INLINE_GDC__ */
-#define STORAGE_CLASS_GDC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GDC_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GDC_H static inline
+#define STORAGE_CLASS_GDC_C static inline
#include "gdc_private.h"
#endif /* __INLINE_GDC__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
index 766d2532d8f9..aba94e623043 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gp_device_local.h"
#ifndef __INLINE_GP_DEVICE__
-#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_DEVICE_H extern
#define STORAGE_CLASS_GP_DEVICE_C
#include "gp_device_public.h"
#else /* __INLINE_GP_DEVICE__ */
-#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GP_DEVICE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_DEVICE_H static inline
+#define STORAGE_CLASS_GP_DEVICE_C static inline
#include "gp_device_private.h"
#endif /* __INLINE_GP_DEVICE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
index ca70f5603bf8..d5d2df24e11a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h" /*GP_TIMER_BASE address */
#include "gp_timer_local.h" /*GP_TIMER register offsets */
#ifndef __INLINE_GP_TIMER__
-#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_TIMER_H extern
#define STORAGE_CLASS_GP_TIMER_C
#include "gp_timer_public.h" /* functions*/
#else /* __INLINE_GP_TIMER__ */
-#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GP_TIMER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_TIMER_H static inline
+#define STORAGE_CLASS_GP_TIMER_C static inline
#include "gp_timer_private.h" /* inline functions*/
#endif /* __INLINE_GP_TIMER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
index dec21bcb6f47..d37f7166aa4a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gpio_local.h"
#ifndef __INLINE_GPIO__
-#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GPIO_H extern
#define STORAGE_CLASS_GPIO_C
#include "gpio_public.h"
#else /* __INLINE_GPIO__ */
-#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GPIO_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GPIO_H static inline
+#define STORAGE_CLASS_GPIO_C static inline
#include "gpio_private.h"
#endif /* __INLINE_GPIO__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
index 671dd5b5fca6..a82fd3a21e98 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "hmem_local.h"
#ifndef __INLINE_HMEM__
-#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_HMEM_H extern
#define STORAGE_CLASS_HMEM_C
#include "hmem_public.h"
#else /* __INLINE_HMEM__ */
-#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_HMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_HMEM_H static inline
+#define STORAGE_CLASS_HMEM_C static inline
#include "hmem_private.h"
#endif /* __INLINE_HMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
index 396240954bed..3b5df85fc510 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
@@ -28,7 +28,7 @@
* @param[in] id The global unique ID of the csi rx fe controller.
* @param[out] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_state(
+extern void csi_rx_fe_ctrl_get_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state);
/**
@@ -38,7 +38,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_state(
* @param[in] id The global unique ID of the csi rx fe controller.
* @param[in] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_dump_state(
+extern void csi_rx_fe_ctrl_dump_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state);
/**
@@ -49,7 +49,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_dump_state(
* @param[in] lane The lane ID.
* @param[out] state Point to the dlane state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_dlane_state(
+extern void csi_rx_fe_ctrl_get_dlane_state(
const csi_rx_frontend_ID_t ID,
const uint32_t lane,
csi_rx_fe_ctrl_lane_t *dlane_state);
@@ -60,7 +60,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_dlane_state(
* @param[in] id The global unique ID of the csi rx be controller.
* @param[out] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_get_state(
+extern void csi_rx_be_ctrl_get_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state);
/**
@@ -70,7 +70,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_get_state(
* @param[in] id The global unique ID of the csi rx be controller.
* @param[in] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_dump_state(
+extern void csi_rx_be_ctrl_dump_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state);
/** end of NCI */
@@ -89,7 +89,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_dump_state(
*
* @return the value of the register.
*/
-STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_fe_ctrl_reg_load(
+extern hrt_data csi_rx_fe_ctrl_reg_load(
const csi_rx_frontend_ID_t ID,
const hrt_address reg);
/**
@@ -101,7 +101,7 @@ STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_fe_ctrl_reg_load(
* @param[in] value The value to be stored.
*
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_reg_store(
+extern void csi_rx_fe_ctrl_reg_store(
const csi_rx_frontend_ID_t ID,
const hrt_address reg,
const hrt_data value);
@@ -114,7 +114,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_reg_store(
*
* @return the value of the register.
*/
-STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_be_ctrl_reg_load(
+extern hrt_data csi_rx_be_ctrl_reg_load(
const csi_rx_backend_ID_t ID,
const hrt_address reg);
/**
@@ -126,7 +126,7 @@ STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_be_ctrl_reg_load(
* @param[in] value The value to be stored.
*
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_reg_store(
+extern void csi_rx_be_ctrl_reg_store(
const csi_rx_backend_ID_t ID,
const hrt_address reg,
const hrt_data value);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
index d27f87a719db..d09d1e320306 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
@@ -33,7 +33,7 @@
\return none, GDC[ID].lut[0...3][0...HRT_GDC_N-1] = data
*/
-STORAGE_CLASS_EXTERN void gdc_lut_store(
+extern void gdc_lut_store(
const gdc_ID_t ID,
const int data[4][HRT_GDC_N]);
@@ -43,7 +43,7 @@ STORAGE_CLASS_EXTERN void gdc_lut_store(
\param in_lut[in] The data matrix to be converted
\param out_lut[out] The data matrix as the output of conversion
*/
-STORAGE_CLASS_EXTERN void gdc_lut_convert_to_isp_format(
+extern void gdc_lut_convert_to_isp_format(
const int in_lut[4][HRT_GDC_N],
int out_lut[4][HRT_GDC_N]);
@@ -53,7 +53,7 @@ STORAGE_CLASS_EXTERN void gdc_lut_convert_to_isp_format(
\return unity
*/
-STORAGE_CLASS_EXTERN int gdc_get_unity(
+extern int gdc_get_unity(
const gdc_ID_t ID);
#endif /* __GDC_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
index 9b8e7c92442d..8538f86ab5e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
@@ -15,10 +15,10 @@
#ifndef __HMEM_PUBLIC_H_INCLUDED__
#define __HMEM_PUBLIC_H_INCLUDED__
-#include <stddef.h> /* size_t */
+#include <linux/types.h> /* size_t */
/*! Return the size of HMEM[ID]
-
+
\param ID[in] HMEM identifier
\Note: The size is the byte size of the area it occupies
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
index 2251f372145b..a025ad562bd2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
@@ -27,14 +27,13 @@
* Prerequisites:
*
*/
-#include "storage_class.h"
#ifdef INLINE_ISP_OP1W
-#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H static inline
+#define STORAGE_CLASS_ISP_OP1W_DATA_H static inline_DATA
#else /* INLINE_ISP_OP1W */
-#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H extern
+#define STORAGE_CLASS_ISP_OP1W_DATA_H extern_DATA
#endif /* INLINE_ISP_OP1W */
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
index 1cfe6d717283..cf7e7314842d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
@@ -27,14 +27,13 @@
* Prerequisites:
*
*/
-#include "storage_class.h"
#ifdef INLINE_ISP_OP2W
-#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H static inline
+#define STORAGE_CLASS_ISP_OP2W_DATA_H static inline_DATA
#else /* INLINE_ISP_OP2W */
-#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H extern
+#define STORAGE_CLASS_ISP_OP2W_DATA_H extern_DATA
#endif /* INLINE_ISP_OP2W */
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
index 4258fa872087..0a13eda73607 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
@@ -24,7 +24,7 @@
\return none, MMU[ID].page_table_base_index = base_index
*/
-STORAGE_CLASS_EXTERN void mmu_set_page_table_base_index(
+extern void mmu_set_page_table_base_index(
const mmu_ID_t ID,
const hrt_data base_index);
@@ -35,7 +35,7 @@ STORAGE_CLASS_EXTERN void mmu_set_page_table_base_index(
\return MMU[ID].page_table_base_index
*/
-STORAGE_CLASS_EXTERN hrt_data mmu_get_page_table_base_index(
+extern hrt_data mmu_get_page_table_base_index(
const mmu_ID_t ID);
/*! Invalidate the page table cache of MMU[ID]
@@ -44,7 +44,7 @@ STORAGE_CLASS_EXTERN hrt_data mmu_get_page_table_base_index(
\return none
*/
-STORAGE_CLASS_EXTERN void mmu_invalidate_cache(
+extern void mmu_invalidate_cache(
const mmu_ID_t ID);
@@ -52,7 +52,7 @@ STORAGE_CLASS_EXTERN void mmu_invalidate_cache(
\return none
*/
-STORAGE_CLASS_EXTERN void mmu_invalidate_cache_all(void);
+extern void mmu_invalidate_cache_all(void);
/*! Write to a control register of MMU[ID]
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
index 3e955fca2a94..a202d6dce106 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
@@ -15,14 +15,13 @@
#ifndef _REF_VECTOR_FUNC_H_INCLUDED_
#define _REF_VECTOR_FUNC_H_INCLUDED_
-#include "storage_class.h"
#ifdef INLINE_VECTOR_FUNC
-#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H static inline
+#define STORAGE_CLASS_REF_VECTOR_DATA_H static inline_DATA
#else /* INLINE_VECTOR_FUNC */
-#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H extern
+#define STORAGE_CLASS_REF_VECTOR_DATA_H extern_DATA
#endif /* INLINE_VECTOR_FUNC */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
index f5de0df7981e..c7d9095472b1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "ibuf_ctrl_local.h"
#ifndef __INLINE_IBUF_CTRL__
-#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IBUF_CTRL_H extern
#define STORAGE_CLASS_IBUF_CTRL_C
#include "ibuf_ctrl_public.h"
#else /* __INLINE_IBUF_CTRL__ */
-#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_IBUF_CTRL_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IBUF_CTRL_H static inline
+#define STORAGE_CLASS_IBUF_CTRL_C static inline
#include "ibuf_ctrl_private.h"
#endif /* __INLINE_IBUF_CTRL__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
index 041c8b660aa4..eeaaecdd57ba 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "input_formatter_local.h"
#ifndef __INLINE_INPUT_FORMATTER__
-#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_FORMATTER_H extern
#define STORAGE_CLASS_INPUT_FORMATTER_C
#include "input_formatter_public.h"
#else /* __INLINE_INPUT_FORMATTER__ */
-#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_INPUT_FORMATTER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_FORMATTER_H static inline
+#define STORAGE_CLASS_INPUT_FORMATTER_C static inline
#include "input_formatter_private.h"
#endif /* __INLINE_INPUT_FORMATTER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
index 182867367b48..3f02d9ec9588 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "input_system_local.h"
#ifndef __INLINE_INPUT_SYSTEM__
-#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_SYSTEM_H extern
#define STORAGE_CLASS_INPUT_SYSTEM_C
#include "input_system_public.h"
#else /* __INLINE_INPUT_SYSTEM__ */
-#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_INPUT_SYSTEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_SYSTEM_H static inline
+#define STORAGE_CLASS_INPUT_SYSTEM_C static inline
#include "input_system_private.h"
#endif /* __INLINE_INPUT_SYSTEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
index 1dc443892cc5..e1446388dee5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "irq_local.h"
#ifndef __INLINE_IRQ__
-#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IRQ_H extern
#define STORAGE_CLASS_IRQ_C
#include "irq_public.h"
#else /* __INLINE_IRQ__ */
-#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_IRQ_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IRQ_H static inline
+#define STORAGE_CLASS_IRQ_C static inline
#include "irq_private.h"
#endif /* __INLINE_IRQ__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
index 49190d0abc30..b916953e7f47 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isp_local.h"
#ifndef __INLINE_ISP__
-#define STORAGE_CLASS_ISP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISP_H extern
#define STORAGE_CLASS_ISP_C
#include "isp_public.h"
#else /* __INLINE_iSP__ */
-#define STORAGE_CLASS_ISP_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISP_H static inline
+#define STORAGE_CLASS_ISP_C static inline
#include "isp_private.h"
#endif /* __INLINE_ISP__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
index 9a608f07adcb..76aba114a5c1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isys_dma_local.h"
#ifndef __INLINE_ISYS2401_DMA__
-#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_DMA_H extern
#define STORAGE_CLASS_ISYS2401_DMA_C
#include "isys_dma_public.h"
#else /* __INLINE_ISYS2401_DMA__ */
-#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISYS2401_DMA_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_DMA_H static inline
+#define STORAGE_CLASS_ISYS2401_DMA_C static inline
#include "isys_dma_private.h"
#endif /* __INLINE_ISYS2401_DMA__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
index cf858bcc8e45..d3f64cfd0b7d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
@@ -16,21 +16,20 @@
#define __IA_CSS_ISYS_IRQ_H__
#include <type_support.h>
-#include <storage_class.h>
#include <system_local.h>
#if defined(USE_INPUT_SYSTEM_VERSION_2401)
#ifndef __INLINE_ISYS2401_IRQ__
-#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_IRQ_H extern
+#define STORAGE_CLASS_ISYS2401_IRQ_C extern
#include "isys_irq_public.h"
#else /* __INLINE_ISYS2401_IRQ__ */
-#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_IRQ_H static inline
+#define STORAGE_CLASS_ISYS2401_IRQ_C static inline
#include "isys_irq_private.h"
#endif /* __INLINE_ISYS2401_IRQ__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
index 3e8cfe555ad5..16fbf9d25eba 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isys_stream2mmio_local.h"
#ifndef __INLINE_STREAM2MMIO__
-#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM2MMIO_H extern
#define STORAGE_CLASS_STREAM2MMIO_C
#include "isys_stream2mmio_public.h"
#else /* __INLINE_STREAM2MMIO__ */
-#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_STREAM2MMIO_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM2MMIO_H static inline
+#define STORAGE_CLASS_STREAM2MMIO_C static inline
#include "isys_stream2mmio_private.h"
#endif /* __INLINE_STREAM2MMIO__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
index f74b405b0f39..e85e5c889c15 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
@@ -15,7 +15,6 @@
#ifndef __MATH_SUPPORT_H
#define __MATH_SUPPORT_H
-#include "storage_class.h" /* for STORAGE_CLASS_INLINE */
#if defined(__KERNEL__)
#include <linux/kernel.h> /* Override the definition of max/min from linux kernel*/
#endif /*__KERNEL__*/
@@ -110,60 +109,60 @@ Leaving out the other math utility functions as they are newly added
#else /* !defined(INLINE_MATH_SUPPORT_UTILS) */
-STORAGE_CLASS_INLINE int max(int a, int b)
+static inline int max(int a, int b)
{
return MAX(a, b);
}
-STORAGE_CLASS_INLINE int min(int a, int b)
+static inline int min(int a, int b)
{
return MIN(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b)
+static inline unsigned int ceil_div(unsigned int a, unsigned int b)
{
return CEIL_DIV(a, b);
}
#endif /* !defined(INLINE_MATH_SUPPORT_UTILS) */
-STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b)
+static inline unsigned int umax(unsigned int a, unsigned int b)
{
return MAX(a, b);
}
-STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b)
+static inline unsigned int umin(unsigned int a, unsigned int b)
{
return MIN(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b)
+static inline unsigned int ceil_mul(unsigned int a, unsigned int b)
{
return CEIL_MUL(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b)
+static inline unsigned int ceil_mul2(unsigned int a, unsigned int b)
{
return CEIL_MUL2(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b)
+static inline unsigned int ceil_shift(unsigned int a, unsigned int b)
{
return CEIL_SHIFT(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
+static inline unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
{
return CEIL_SHIFT_MUL(a, b);
}
#ifdef ISP2401
-STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, unsigned int b)
+static inline unsigned int round_half_down_div(unsigned int a, unsigned int b)
{
return ROUND_HALF_DOWN_DIV(a, b);
}
-STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, unsigned int b)
+static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b)
{
return ROUND_HALF_DOWN_MUL(a, b);
}
@@ -187,7 +186,7 @@ STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, unsigned i
*
*/
-STORAGE_CLASS_INLINE unsigned int ceil_pow2(unsigned int a)
+static inline unsigned int ceil_pow2(unsigned int a)
{
if (a == 0) {
return 1;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
index 1b2017b029f2..519e850ec390 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "mmu_local.h"
#ifndef __INLINE_MMU__
-#define STORAGE_CLASS_MMU_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_MMU_H extern
#define STORAGE_CLASS_MMU_C
#include "mmu_public.h"
#else /* __INLINE_MMU__ */
-#define STORAGE_CLASS_MMU_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_MMU_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_MMU_H static inline
+#define STORAGE_CLASS_MMU_C static inline
#include "mmu_private.h"
#endif /* __INLINE_MMU__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
index 565983aafa4d..cd938375e02e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
@@ -15,14 +15,13 @@
#ifndef __MPMATH_H_INCLUDED__
#define __MPMATH_H_INCLUDED__
-#include "storage_class.h"
#ifdef INLINE_MPMATH
-#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_MPMATH_FUNC_H static inline
+#define STORAGE_CLASS_MPMATH_DATA_H static inline_DATA
#else /* INLINE_MPMATH */
-#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_MPMATH_FUNC_H extern
+#define STORAGE_CLASS_MPMATH_DATA_H extern_DATA
#endif /* INLINE_MPMATH */
#include <type_support.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
index 6e48ea9afc29..a607242c5f1a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "osys_local.h"
#ifndef __INLINE_OSYS__
-#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_OSYS_H extern
#define STORAGE_CLASS_OSYS_C
#include "osys_public.h"
#else /* __INLINE_OSYS__ */
-#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_OSYS_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_OSYS_H static inline
+#define STORAGE_CLASS_OSYS_C static inline
#include "osys_private.h"
#endif /* __INLINE_OSYS__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
index 67f7f3a14231..418d02382d76 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "pixelgen_local.h"
#ifndef __INLINE_PIXELGEN__
-#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_PIXELGEN_H extern
#define STORAGE_CLASS_PIXELGEN_C
#include "pixelgen_public.h"
#else /* __INLINE_PIXELGEN__ */
-#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_PIXELGEN_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_PIXELGEN_H static inline
+#define STORAGE_CLASS_PIXELGEN_C static inline
#include "pixelgen_private.h"
#endif /* __INLINE_PIXELGEN__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
index 02f9eee67ff3..39a125ba563d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
@@ -20,7 +20,6 @@
* Platform specific includes and functionality.
*/
-#include "storage_class.h"
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
index cfbc222ea0c1..ca0fbbb57788 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
@@ -15,7 +15,6 @@
#ifndef __PRINT_SUPPORT_H_INCLUDED__
#define __PRINT_SUPPORT_H_INCLUDED__
-#include "storage_class.h"
#include <stdarg.h>
#if !defined(__KERNEL__)
@@ -24,7 +23,7 @@
extern int (*sh_css_printf) (const char *fmt, va_list args);
/* depends on host supplied print function in ia_css_init() */
-STORAGE_CLASS_INLINE void ia_css_print(const char *fmt, ...)
+static inline void ia_css_print(const char *fmt, ...)
{
va_list ap;
if (sh_css_printf) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
index a3d874b9516a..aa5fadf5aadb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
@@ -29,18 +29,17 @@
*
*/
-#include <storage_class.h>
#include "queue_local.h"
#ifndef __INLINE_QUEUE__
-#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_QUEUE_H extern
#define STORAGE_CLASS_QUEUE_C
/* #include "queue_public.h" */
#include "ia_css_queue.h"
#else /* __INLINE_QUEUE__ */
-#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_QUEUE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_QUEUE_H static inline
+#define STORAGE_CLASS_QUEUE_C static inline
#include "queue_private.h"
#endif /* __INLINE_QUEUE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
index 82c55acd0380..bd9f53e6b680 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "resource_local.h"
#ifndef __INLINE_RESOURCE__
-#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RESOURCE_H extern
#define STORAGE_CLASS_RESOURCE_C
#include "resource_public.h"
#else /* __INLINE_RESOURCE__ */
-#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_RESOURCE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RESOURCE_H static inline
+#define STORAGE_CLASS_RESOURCE_C static inline
#include "resource_private.h"
#endif /* __INLINE_RESOURCE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
index c34c2e75c51f..43cfb0cb4aa8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "socket_local.h"
#ifndef __INLINE_SOCKET__
-#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SOCKET_H extern
#define STORAGE_CLASS_SOCKET_C
#include "socket_public.h"
#else /* __INLINE_SOCKET__ */
-#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_SOCKET_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SOCKET_H static inline
+#define STORAGE_CLASS_SOCKET_C static inline
#include "socket_private.h"
#endif /* __INLINE_SOCKET__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
index 150fc2f6129b..8f57f2060791 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "sp_local.h"
#ifndef __INLINE_SP__
-#define STORAGE_CLASS_SP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SP_H extern
#define STORAGE_CLASS_SP_C
#include "sp_public.h"
#else /* __INLINE_SP__ */
-#define STORAGE_CLASS_SP_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_SP_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SP_H static inline
+#define STORAGE_CLASS_SP_C static inline
#include "sp_private.h"
#endif /* __INLINE_SP__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
deleted file mode 100644
index 3908e668dacd..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __STORAGE_CLASS_H_INCLUDED__
-#define __STORAGE_CLASS_H_INCLUDED__
-
-/**
-* @file
-* Platform specific includes and functionality.
-*/
-
-#define STORAGE_CLASS_EXTERN extern
-
-#if defined(_MSC_VER)
-#define STORAGE_CLASS_INLINE static __inline
-#else
-#define STORAGE_CLASS_INLINE static inline
-#endif
-
-#define STORAGE_CLASS_EXTERN_DATA extern const
-#define STORAGE_CLASS_INLINE_DATA static const
-
-#endif /* __STORAGE_CLASS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
index 8e41f60b5d39..53d535e4f2ae 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "stream_buffer_local.h"
#ifndef __INLINE_STREAM_BUFFER__
-#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM_BUFFER_H extern
#define STORAGE_CLASS_STREAM_BUFFER_C
#include "stream_buffer_public.h"
#else /* __INLINE_STREAM_BUFFER__ */
-#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_STREAM_BUFFER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM_BUFFER_H static inline
+#define STORAGE_CLASS_STREAM_BUFFER_C static inline
#include "stream_buffer_private.h"
#endif /* __INLINE_STREAM_BUFFER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
index c53241a7a281..d80437c58bde 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
@@ -16,7 +16,6 @@
#define __STRING_SUPPORT_H_INCLUDED__
#include <platform_support.h>
#include <type_support.h>
-#include <storage_class.h>
#if !defined(_MSC_VER)
/*
@@ -34,7 +33,7 @@
* @return EINVAL on Invalid arguments
* @return ERANGE on Destination size too small
*/
-STORAGE_CLASS_INLINE int memcpy_s(
+static inline int memcpy_s(
void* dest_buf,
size_t dest_size,
const void* src_buf,
@@ -89,7 +88,7 @@ static size_t strnlen_s(
* @return Returns EINVAL on invalid arguments
* @return Returns ERANGE on destination size too small
*/
-STORAGE_CLASS_INLINE int strncpy_s(
+static inline int strncpy_s(
char* dest_str,
size_t dest_size,
const char* src_str,
@@ -130,7 +129,7 @@ STORAGE_CLASS_INLINE int strncpy_s(
* @return Returns EINVAL on invalid arguments
* @return Returns ERANGE on destination size too small
*/
-STORAGE_CLASS_INLINE int strcpy_s(
+static inline int strcpy_s(
char* dest_str,
size_t dest_size,
const char* src_str)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
index 7385fd11c95f..ace695643369 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
@@ -29,17 +29,16 @@
*
*/
-#include "storage_class.h"
#include "tag_local.h"
#ifndef __INLINE_TAG__
-#define STORAGE_CLASS_TAG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TAG_H extern
#define STORAGE_CLASS_TAG_C
#include "tag_public.h"
#else /* __INLINE_TAG__ */
-#define STORAGE_CLASS_TAG_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_TAG_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TAG_H static inline
+#define STORAGE_CLASS_TAG_C static inline
#include "tag_private.h"
#endif /* __INLINE_TAG__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
index ed13451c9261..f6bc1c47553f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "timed_ctrl_local.h"
#ifndef __INLINE_TIMED_CTRL__
-#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TIMED_CTRL_H extern
#define STORAGE_CLASS_TIMED_CTRL_C
#include "timed_ctrl_public.h"
#else /* __INLINE_TIMED_CTRL__ */
-#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_TIMED_CTRL_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TIMED_CTRL_H static inline
+#define STORAGE_CLASS_TIMED_CTRL_C static inline
#include "timed_ctrl_private.h"
#endif /* __INLINE_TIMED_CTRL__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
index b82fa3eba79f..bc77537fa73a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
@@ -30,27 +30,6 @@
#define IA_CSS_INT32_T_BITS 32
#define IA_CSS_UINT64_T_BITS 64
-#if defined(_MSC_VER)
-#include <stdint.h>
-/* For ATE compilation define the bool */
-#if defined(_ATE_)
-#define bool int
-#define true 1
-#define false 0
-#else
-#include <stdbool.h>
-#endif
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#if defined(_M_X64)
-#define HOST_ADDRESS(x) (unsigned long long)(x)
-#else
-#define HOST_ADDRESS(x) (unsigned long)(x)
-#endif
-
-#elif defined(__KERNEL__)
-
#define CHAR_BIT (8)
#include <linux/types.h>
@@ -58,25 +37,4 @@
#include <linux/errno.h>
#define HOST_ADDRESS(x) (unsigned long)(x)
-#elif defined(__GNUC__)
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS 1
-#endif
-#include <stdint.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#define HOST_ADDRESS(x) (unsigned long)(x)
-
-#else /* default is for the FIST environment */
-#include <stdint.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#define HOST_ADDRESS(x) (unsigned long)(x)
-
-#endif
-
#endif /* __TYPE_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
index acf932e1f563..82d447bf9704 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "vamem_local.h"
#ifndef __INLINE_VAMEM__
-#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VAMEM_H extern
#define STORAGE_CLASS_VAMEM_C
#include "vamem_public.h"
#else /* __INLINE_VAMEM__ */
-#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VAMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VAMEM_H static inline
+#define STORAGE_CLASS_VAMEM_C static inline
#include "vamem_private.h"
#endif /* __INLINE_VAMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
index 5d3be31759e4..5368b9062897 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
@@ -15,7 +15,6 @@
#ifndef __VECTOR_FUNC_H_INCLUDED__
#define __VECTOR_FUNC_H_INCLUDED__
-#include "storage_class.h"
/* TODO: Later filters will be moved to types directory,
* and we should only include matrix_MxN types */
@@ -27,12 +26,12 @@
#include "vector_func_local.h"
#ifndef __INLINE_VECTOR_FUNC__
-#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_FUNC_H extern
#define STORAGE_CLASS_VECTOR_FUNC_C
#include "vector_func_public.h"
#else /* __INLINE_VECTOR_FUNC__ */
-#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VECTOR_FUNC_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_FUNC_H static inline
+#define STORAGE_CLASS_VECTOR_FUNC_C static inline
#include "vector_func_private.h"
#endif /* __INLINE_VECTOR_FUNC__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
index 261f87378ce5..4923f2d5518b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
@@ -15,17 +15,16 @@
#ifndef __VECTOR_OPS_H_INCLUDED__
#define __VECTOR_OPS_H_INCLUDED__
-#include "storage_class.h"
#include "vector_ops_local.h"
#ifndef __INLINE_VECTOR_OPS__
-#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_OPS_H extern
#define STORAGE_CLASS_VECTOR_OPS_C
#include "vector_ops_public.h"
#else /* __INLINE_VECTOR_OPS__ */
-#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VECTOR_OPS_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_OPS_H static inline
+#define STORAGE_CLASS_VECTOR_OPS_C static inline
#include "vector_ops_private.h"
#endif /* __INLINE_VECTOR_OPS__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
index 79a36755bfd9..d3375729c441 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "vmem_local.h"
#ifndef __INLINE_VMEM__
-#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VMEM_H extern
#define STORAGE_CLASS_VMEM_C
#include "vmem_public.h"
#else /* __INLINE_VMEM__ */
-#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VMEM_H static inline
+#define STORAGE_CLASS_VMEM_C static inline
#include "vmem_private.h"
#endif /* __INLINE_VMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
index 9169e04f9b4b..13083fe55141 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "xmem_local.h"
#ifndef __INLINE_XMEM__
-#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_XMEM_H extern
#define STORAGE_CLASS_XMEM_C
#include "xmem_public.h"
#else /* __INLINE_XMEM__ */
-#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_XMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_XMEM_H static inline
+#define STORAGE_CLASS_XMEM_C static inline
#include "xmem_private.h"
#endif /* __INLINE_XMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
index 8ef6c54ee813..aa733674f42b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
@@ -321,7 +321,7 @@ ia_css_s3a_dmem_decode(
}
/* MW: this is an ISP function */
-STORAGE_CLASS_INLINE int
+static inline int
merge_hi_lo_14(unsigned short hi, unsigned short lo)
{
int val = (int) ((((unsigned int) hi << 14) & 0xfffc000) |
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
index 9f8a125f0d74..e028e460ae4c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
@@ -1697,11 +1697,11 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
}
#endif
if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */
- req_vf_info && /* and we need vf output. */
+ req_vf_info && /* and we need vf output. */
/* check if the required vf format
is supported. */
- !binary_supports_output_format(xcandidate, req_vf_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ !binary_supports_output_format(xcandidate, req_vf_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
__LINE__, xcandidate->num_output_pins, 1,
req_vf_info,
@@ -1711,8 +1711,8 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
/* Check if vf_veceven supports the requested vf format */
if (xcandidate->num_output_pins == 1 &&
- req_vf_info && candidate->enable.vf_veceven &&
- !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
+ req_vf_info && candidate->enable.vf_veceven &&
+ !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
__LINE__, xcandidate->num_output_pins, 1,
@@ -1723,7 +1723,7 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
/* Check if vf_veceven supports the requested vf width */
if (xcandidate->num_output_pins == 1 &&
- req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
+ req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
if (req_vf_info->res.width > candidate->output.max_width) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d < %d)\n",
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
index 5d40afd482f5..42d9a8508858 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
@@ -280,7 +280,7 @@ static ia_css_queue_t *bufq_get_qhandle(
/* Local function to initialize a buffer queue. This reduces
* the chances of copy-paste errors or typos.
*/
-STORAGE_CLASS_INLINE void
+static inline void
init_bufq(unsigned int desc_offset,
unsigned int elems_offset,
ia_css_queue_t *handle)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
index 91c105cc6204..3c8dcfd4bbc6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
@@ -130,7 +130,7 @@ enum ia_css_debug_enable_param_dump {
* @param[in] fmt printf like format string
* @param[in] args arguments for the format string
*/
-STORAGE_CLASS_INLINE void
+static inline void
ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args)
{
if (ia_css_debug_trace_level >= level)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
index a7c6bba7e094..11d3995ba0db 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
@@ -29,6 +29,7 @@ more details.
#endif
#include "system_global.h"
+#include <linux/kernel.h>
#ifdef USE_INPUT_SYSTEM_VERSION_2
@@ -487,7 +488,7 @@ static void ifmtr_set_if_blocking_mode(
{
int i;
bool block[] = { false, false, false, false };
- assert(N_INPUT_FORMATTER_ID <= (sizeof(block) / sizeof(block[0])));
+ assert(N_INPUT_FORMATTER_ID <= (ARRAY_SIZE(block)));
#if !defined(IS_ISP_2400_SYSTEM)
#error "ifmtr_set_if_blocking_mode: ISP_SYSTEM must be one of {IS_ISP_2400_SYSTEM}"
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
index cf02970d4f59..d9a5f3e9283a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
@@ -105,7 +105,7 @@ static struct inputfifo_instance
/* Streaming to MIPI */
static unsigned inputfifo_wrap_marker(
-/* STORAGE_CLASS_INLINE unsigned inputfifo_wrap_marker( */
+/* static inline unsigned inputfifo_wrap_marker( */
unsigned marker)
{
return marker |
@@ -113,7 +113,7 @@ static unsigned inputfifo_wrap_marker(
(inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB);
}
-STORAGE_CLASS_INLINE void
+static inline void
_sh_css_fifo_snd(unsigned token)
{
while (!can_event_send_token(STR2MIPI_EVENT_ID))
@@ -123,7 +123,7 @@ _sh_css_fifo_snd(unsigned token)
}
static void inputfifo_send_data_a(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data_a( */
+/* static inline void inputfifo_send_data_a( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
@@ -135,7 +135,7 @@ unsigned int data)
static void inputfifo_send_data_b(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data_b( */
+/* static inline void inputfifo_send_data_b( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
@@ -147,7 +147,7 @@ static void inputfifo_send_data_b(
static void inputfifo_send_data(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data( */
+/* static inline void inputfifo_send_data( */
unsigned int a,
unsigned int b)
{
@@ -162,7 +162,7 @@ static void inputfifo_send_data(
static void inputfifo_send_sol(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_sol(void) */
+/* static inline void inputfifo_send_sol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOL_BIT);
@@ -174,7 +174,7 @@ static void inputfifo_send_sol(void)
static void inputfifo_send_eol(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_eol(void) */
+/* static inline void inputfifo_send_eol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOL_BIT);
@@ -185,7 +185,7 @@ static void inputfifo_send_eol(void)
static void inputfifo_send_sof(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_sof(void) */
+/* static inline void inputfifo_send_sof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOF_BIT);
@@ -197,7 +197,7 @@ static void inputfifo_send_sof(void)
static void inputfifo_send_eof(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_eof(void) */
+/* static inline void inputfifo_send_eof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOF_BIT);
@@ -209,7 +209,7 @@ static void inputfifo_send_eof(void)
#ifdef __ON__
static void inputfifo_send_ch_id(
-/* STORAGE_CLASS_INLINE void inputfifo_send_ch_id( */
+/* static inline void inputfifo_send_ch_id( */
unsigned int ch_id)
{
hrt_data token;
@@ -223,7 +223,7 @@ static void inputfifo_send_ch_id(
}
static void inputfifo_send_fmt_type(
-/* STORAGE_CLASS_INLINE void inputfifo_send_fmt_type( */
+/* static inline void inputfifo_send_fmt_type( */
unsigned int fmt_type)
{
hrt_data token;
@@ -240,7 +240,7 @@ static void inputfifo_send_fmt_type(
static void inputfifo_send_ch_id_and_fmt_type(
-/* STORAGE_CLASS_INLINE
+/* static inline
void inputfifo_send_ch_id_and_fmt_type( */
unsigned int ch_id,
unsigned int fmt_type)
@@ -259,7 +259,7 @@ void inputfifo_send_ch_id_and_fmt_type( */
static void inputfifo_send_empty_token(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_empty_token(void) */
+/* static inline void inputfifo_send_empty_token(void) */
{
hrt_data token = inputfifo_wrap_marker(0);
_sh_css_fifo_snd(token);
@@ -269,7 +269,7 @@ static void inputfifo_send_empty_token(void)
static void inputfifo_start_frame(
-/* STORAGE_CLASS_INLINE void inputfifo_start_frame( */
+/* static inline void inputfifo_start_frame( */
unsigned int ch_id,
unsigned int fmt_type)
{
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
index 95542fc82217..62d13978475d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
@@ -603,7 +603,7 @@ static enum ia_css_err pipeline_stage_create(
/* Verify input parameters*/
if (!(stage_desc->in_frame) && !(stage_desc->firmware)
&& (stage_desc->binary) && !(stage_desc->binary->online)) {
- err = IA_CSS_ERR_INTERNAL_ERROR;
+ err = IA_CSS_ERR_INTERNAL_ERROR;
goto ERR;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
index a0bb9f663ce6..9f78e709b3d0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
@@ -31,15 +31,14 @@ more details.
#ifndef _IA_CSS_RMGR_H
#define _IA_CSS_RMGR_H
-#include "storage_class.h"
#include <ia_css_err.h>
#ifndef __INLINE_RMGR__
-#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RMGR_H extern
#define STORAGE_CLASS_RMGR_C
#else /* __INLINE_RMGR__ */
-#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_RMGR_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RMGR_H static inline
+#define STORAGE_CLASS_RMGR_C static inline
#endif /* __INLINE_RMGR__ */
/**
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
index fa92d8da8f1c..e56006c07ee8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
@@ -174,7 +174,7 @@ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_rmgr_uninit_vbuf()\n");
if (pool == NULL) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_rmgr_uninit_vbuf(): NULL argument\n");
- return;
+ return;
}
if (pool->handles != NULL) {
/* free the hmm buffers */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
index d9178e80dab2..6d9bceb60196 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
@@ -37,7 +37,7 @@ more details.
#include "ia_css_spctrl.h"
#include "ia_css_debug.h"
-typedef struct {
+struct spctrl_context_info {
struct ia_css_sp_init_dmem_cfg dmem_config;
uint32_t spctrl_config_dmem_addr; /** location of dmem_cfg in SP dmem */
uint32_t spctrl_state_dmem_addr;
@@ -45,9 +45,9 @@ typedef struct {
hrt_vaddress code_addr; /* sp firmware location in host mem-DDR*/
uint32_t code_size;
char *program_name; /* used in case of PLATFORM_SIM */
-} spctrl_context_info;
+};
-static spctrl_context_info spctrl_cofig_info[N_SP_ID];
+static struct spctrl_context_info spctrl_cofig_info[N_SP_ID];
static bool spctrl_loaded[N_SP_ID] = {0};
/* Load firmware */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
index e882b5596813..f92b6a9f77eb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
@@ -451,8 +451,6 @@ static enum ia_css_frame_format yuv422_copy_formats[] = {
IA_CSS_FRAME_FORMAT_YUYV
};
-#define array_length(array) (sizeof(array)/sizeof(array[0]))
-
/* Verify whether the selected output format is can be produced
* by the copy binary given the stream format.
* */
@@ -468,7 +466,7 @@ verify_copy_out_frame_format(struct ia_css_pipe *pipe)
switch (pipe->stream->config.input_config.format) {
case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
case IA_CSS_STREAM_FORMAT_YUV420_8:
- for (i=0; i<array_length(yuv420_copy_formats) && !found; i++)
+ for (i=0; i<ARRAY_SIZE(yuv420_copy_formats) && !found; i++)
found = (out_fmt == yuv420_copy_formats[i]);
break;
case IA_CSS_STREAM_FORMAT_YUV420_10:
@@ -476,7 +474,7 @@ verify_copy_out_frame_format(struct ia_css_pipe *pipe)
found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
break;
case IA_CSS_STREAM_FORMAT_YUV422_8:
- for (i=0; i<array_length(yuv422_copy_formats) && !found; i++)
+ for (i=0; i<ARRAY_SIZE(yuv422_copy_formats) && !found; i++)
found = (out_fmt == yuv422_copy_formats[i]);
break;
case IA_CSS_STREAM_FORMAT_YUV422_10:
@@ -3781,6 +3779,7 @@ static enum ia_css_err
create_host_acc_pipeline(struct ia_css_pipe *pipe)
{
enum ia_css_err err = IA_CSS_SUCCESS;
+ const struct ia_css_fw_info *fw;
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
@@ -3794,14 +3793,12 @@ create_host_acc_pipeline(struct ia_css_pipe *pipe)
if (pipe->config.acc_extension)
pipe->pipeline.pipe_qos_config = 0;
-{
- const struct ia_css_fw_info *fw = pipe->vf_stage;
+ fw = pipe->vf_stage;
for (i = 0; fw; fw = fw->next){
err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
if (err != IA_CSS_SUCCESS)
goto ERR;
}
-}
for (i=0; i<pipe->config.num_acc_stages; i++) {
struct ia_css_fw_info *fw = pipe->config.acc_stages[i];
@@ -4333,12 +4330,13 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
}
}
} else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) {
- return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) {
+
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
queue_id,
(uint32_t)h_vbuf->vptr);
#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
@@ -5607,13 +5605,13 @@ static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
return err;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage
* sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
return err;
}
@@ -5797,13 +5795,15 @@ static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
#endif
/* Make tnr reference buffers output block height align */
- tnr_info.res.height =
#ifndef ISP2401
+ tnr_info.res.height =
CEIL_MUL(tnr_info.res.height,
+ mycs->video_binary.info->sp.block.output_block_height);
#else
+ tnr_info.res.height =
CEIL_MUL(tnr_height,
+ mycs->video_binary.info->sp.block.output_block_height);
#endif
- mycs->video_binary.info->sp.block.output_block_height);
} else {
tnr_info = mycs->video_binary.internal_frame_info;
}
@@ -6027,7 +6027,7 @@ sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
err = ia_css_util_check_res(width, height);
if (err != IA_CSS_SUCCESS) {
- IA_CSS_LEAVE_ERR_PRIVATE(err);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (pipe->vf_output_info[idx].res.width != width ||
@@ -6258,14 +6258,14 @@ static enum ia_css_err load_primary_binaries(
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -6982,27 +6982,27 @@ static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output(
}
descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->in_info == NULL) {
+ if (!descr->in_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->internal_out_info == NULL) {
+ if (!descr->internal_out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->out_info == NULL) {
+ if (!descr->out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->vf_info == NULL) {
+ if (!descr->vf_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
- if (descr->is_output_stage == NULL) {
+ if (!descr->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7118,22 +7118,22 @@ static enum ia_css_err ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pi
descr->num_stage = num_stages;
descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->in_info == NULL) {
+ if (!descr->in_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->internal_out_info == NULL) {
+ if (!descr->internal_out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->out_info == NULL) {
+ if (!descr->out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->vf_info == NULL) {
+ if (!descr->vf_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7276,13 +7276,13 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7383,7 +7383,7 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
}
mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp * sizeof(struct ia_css_binary),
GFP_KERNEL);
- if (mycs->vf_pp_binary == NULL) {
+ if (!mycs->vf_pp_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -8689,9 +8689,9 @@ enum ia_css_err ia_css_stream_capture(
/* Check if the tag descriptor is valid */
if (num_captures < SH_CSS_MINIMUM_TAG_ID) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_stream_capture() leave: return_err=%d\n",
- IA_CSS_ERR_INVALID_ARGUMENTS);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ IA_CSS_ERR_INVALID_ARGUMENTS);
return IA_CSS_ERR_INVALID_ARGUMENTS;
}
@@ -9445,7 +9445,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* allocate the stream instance */
curr_stream = kmalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
- if (curr_stream == NULL) {
+ if (!curr_stream) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -9457,7 +9457,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* allocate pipes */
curr_stream->num_pipes = num_pipes;
curr_stream->pipes = kzalloc(num_pipes * sizeof(struct ia_css_pipe *), GFP_KERNEL);
- if (curr_stream->pipes == NULL) {
+ if (!curr_stream->pipes) {
curr_stream->num_pipes = 0;
kfree(curr_stream);
curr_stream = NULL;
@@ -9780,23 +9780,22 @@ ERR:
if (err == IA_CSS_SUCCESS)
{
/* working mode: enter into the seed list */
- if (my_css_save.mode == sh_css_mode_working)
- for(i = 0; i < MAX_ACTIVE_STREAMS; i++)
- if (my_css_save.stream_seeds[i].stream == NULL)
- {
- IA_CSS_LOG("entered stream into loc=%d", i);
- my_css_save.stream_seeds[i].orig_stream = stream;
- my_css_save.stream_seeds[i].stream = curr_stream;
- my_css_save.stream_seeds[i].num_pipes = num_pipes;
- my_css_save.stream_seeds[i].stream_config = *stream_config;
- for(j = 0; j < num_pipes; j++)
- {
- my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
- my_css_save.stream_seeds[i].pipes[j] = pipes[j];
- my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
- }
- break;
+ if (my_css_save.mode == sh_css_mode_working) {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
+ if (!my_css_save.stream_seeds[i].stream) {
+ IA_CSS_LOG("entered stream into loc=%d", i);
+ my_css_save.stream_seeds[i].orig_stream = stream;
+ my_css_save.stream_seeds[i].stream = curr_stream;
+ my_css_save.stream_seeds[i].num_pipes = num_pipes;
+ my_css_save.stream_seeds[i].stream_config = *stream_config;
+ for (j = 0; j < num_pipes; j++) {
+ my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
+ my_css_save.stream_seeds[i].pipes[j] = pipes[j];
+ my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
+ }
+ break;
}
+ }
#else
if (err == IA_CSS_SUCCESS) {
err = ia_css_save_stream(curr_stream);
@@ -9970,32 +9969,32 @@ ia_css_stream_load(struct ia_css_stream *stream)
enum ia_css_err err;
assert(stream != NULL);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter, \n");
- for(i=0;i<MAX_ACTIVE_STREAMS;i++)
- if (my_css_save.stream_seeds[i].stream == stream)
- {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (my_css_save.stream_seeds[i].stream == stream) {
int j;
- for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
- if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS)
- {
- if (j)
- {
+ for ( j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) {
+ if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS) {
+ if (j) {
int k;
for(k=0;k<j;k++)
ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
}
return err;
}
- err = ia_css_stream_create(&(my_css_save.stream_seeds[i].stream_config), my_css_save.stream_seeds[i].num_pipes,
- my_css_save.stream_seeds[i].pipes, &(my_css_save.stream_seeds[i].stream));
- if (err != IA_CSS_SUCCESS)
- {
+ }
+ err = ia_css_stream_create(&(my_css_save.stream_seeds[i].stream_config),
+ my_css_save.stream_seeds[i].num_pipes,
+ my_css_save.stream_seeds[i].pipes,
+ &(my_css_save.stream_seeds[i].stream));
+ if (err != IA_CSS_SUCCESS) {
ia_css_stream_destroy(stream);
- for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
return err;
}
break;
}
+ }
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit, \n");
return IA_CSS_SUCCESS;
#else
@@ -10524,7 +10523,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream)
ia_css_debug_dtrace(
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
- return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
}
pipe = stream->continuous_pipe;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
index 63582161050a..8158ea40d069 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
@@ -145,9 +145,9 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia
size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets);
size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets);
- char *parambuf = (char *)kmalloc(paramstruct_size + configstruct_size + statestruct_size,
- GFP_KERNEL);
- if (parambuf == NULL)
+ char *parambuf = kmalloc(paramstruct_size + configstruct_size + statestruct_size,
+ GFP_KERNEL);
+ if (!parambuf)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL;
@@ -229,14 +229,15 @@ sh_css_load_firmware(const char *fw_data,
sh_css_blob_info = kmalloc(
(sh_css_num_binaries - NUM_OF_SPS) *
sizeof(*sh_css_blob_info), GFP_KERNEL);
- if (sh_css_blob_info == NULL)
+ if (!sh_css_blob_info)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
} else {
sh_css_blob_info = NULL;
}
- fw_minibuffer = kzalloc(sh_css_num_binaries * sizeof(struct fw_param), GFP_KERNEL);
- if (fw_minibuffer == NULL)
+ fw_minibuffer = kcalloc(sh_css_num_binaries, sizeof(struct fw_param),
+ GFP_KERNEL);
+ if (!fw_minibuffer)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
for (i = 0; i < sh_css_num_binaries; i++) {
@@ -295,10 +296,8 @@ void sh_css_unload_firmware(void)
}
memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw));
- if (sh_css_blob_info) {
- kfree(sh_css_blob_info);
- sh_css_blob_info = NULL;
- }
+ kfree(sh_css_blob_info);
+ sh_css_blob_info = NULL;
sh_css_num_binaries = 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
index 0bfebced63af..716d808d56db 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
@@ -80,5 +80,5 @@ enum ia_css_err sh_css_hrt_sp_wait(void)
hrt_sleep();
}
-return IA_CSS_SUCCESS;
+ return IA_CSS_SUCCESS;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
index 5b2b78f96dc5..0910021286a4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
@@ -961,7 +961,7 @@ struct host_sp_queues {
extern int (*sh_css_printf)(const char *fmt, va_list args);
-STORAGE_CLASS_INLINE void
+static inline void
sh_css_print(const char *fmt, ...)
{
va_list ap;
@@ -973,7 +973,7 @@ sh_css_print(const char *fmt, ...)
}
}
-STORAGE_CLASS_INLINE void
+static inline void
sh_css_vprint(const char *fmt, va_list args)
{
if (sh_css_printf)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
index eaf60e7b2dac..e6ebd1b08f0d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
@@ -365,10 +365,8 @@ ia_css_shading_table_alloc(
IA_CSS_ENTER("");
me = kmalloc(sizeof(*me), GFP_KERNEL);
- if (me == NULL) {
- IA_CSS_ERROR("out of memory");
+ if (!me)
return me;
- }
me->width = width;
me->height = height;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
index 48224370b8bf..fbb36112fe3c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
@@ -4266,33 +4266,33 @@ sh_css_params_write_to_ddr_internal(
size_t *virt_size_tetra_y[
IA_CSS_MORPH_TABLE_NUM_PLANES];
- virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
- virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
- virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
- virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
- virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
- virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
-
- virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
- virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
- virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
- virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
- virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
- virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
-
- virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
- virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
- virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
- virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
- virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
- virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
-
- virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
- virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
- virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
- virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
- virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
- virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
+ virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
+ virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
+ virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
+ virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
+ virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
+ virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
+
+ virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
+ virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
+ virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
+ virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
+ virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
+ virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
+
+ virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
+ virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
+ virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
+ virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
+ virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
+ virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
+
+ virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
+ virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
+ virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
+ virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
+ virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
+ virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
buff_realloced = false;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
index b8aae4ba5a78..a1c81c12718c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
index 11162f595fc7..6e2dce7a5a2d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -58,7 +54,7 @@ static unsigned int nr_to_order_bottom(unsigned int nr)
return fls(nr) - 1;
}
-struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
+static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
{
struct hmm_buffer_object *bo;
@@ -99,7 +95,7 @@ static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
return 0;
}
-struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
+static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
struct rb_node *node, unsigned int pgnr)
{
struct hmm_buffer_object *this, *ret_bo, *temp_bo;
@@ -150,7 +146,7 @@ remove_bo_and_return:
return temp_bo;
}
-struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
+static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
ia_css_ptr start)
{
struct rb_node *n = root->rb_node;
@@ -175,8 +171,8 @@ struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
return NULL;
}
-struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
- unsigned int start)
+static struct hmm_buffer_object *__bo_search_by_addr_in_range(
+ struct rb_root *root, unsigned int start)
{
struct rb_node *n = root->rb_node;
struct hmm_buffer_object *bo;
@@ -258,7 +254,7 @@ static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
rb_insert_color(&bo->node, root);
}
-struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
+static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
struct hmm_buffer_object *bo,
unsigned int pgnr)
{
@@ -331,7 +327,7 @@ static void __bo_take_off_handling(struct hmm_buffer_object *bo)
}
}
-struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
+static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
struct hmm_buffer_object *next_bo)
{
struct hmm_bo_device *bdev;
@@ -725,12 +721,10 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
pgnr = bo->pgnr;
- bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * pgnr,
+ bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
- if (unlikely(!bo->page_obj)) {
- dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
+ if (unlikely(!bo->page_obj))
return -ENOMEM;
- }
i = 0;
alloc_pgnr = 0;
@@ -990,16 +984,13 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
struct vm_area_struct *vma;
struct page **pages;
- pages = kmalloc(sizeof(struct page *) * bo->pgnr, GFP_KERNEL);
- if (unlikely(!pages)) {
- dev_err(atomisp_dev, "out of memory for pages...\n");
+ pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(!pages))
return -ENOMEM;
- }
- bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * bo->pgnr,
+ bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
if (unlikely(!bo->page_obj)) {
- dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
kfree(pages);
return -ENOMEM;
}
@@ -1168,13 +1159,9 @@ status_err2:
int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
{
- int ret;
-
check_bo_null_return(bo, 0);
- ret = bo->status & HMM_BO_PAGE_ALLOCED;
-
- return ret;
+ return bo->status & HMM_BO_PAGE_ALLOCED;
}
/*
@@ -1363,10 +1350,9 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
}
- pages = kmalloc(sizeof(*pages) * bo->pgnr, GFP_KERNEL);
+ pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
if (unlikely(!pages)) {
mutex_unlock(&bo->mutex);
- dev_err(atomisp_dev, "out of memory for pages...\n");
return NULL;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
index 19e0e9ee37de..f59fd9908257 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -116,8 +112,6 @@ static void free_pages_to_dynamic_pool(void *pool,
hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
GFP_KERNEL);
if (!hmm_page) {
- dev_err(atomisp_dev, "out of memory for hmm_page.\n");
-
/* free page directly */
ret = set_pages_wb(page_obj->page, 1);
if (ret)
@@ -151,10 +145,8 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
GFP_KERNEL);
- if (unlikely(!dypool_info)) {
- dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ if (unlikely(!dypool_info))
return -ENOMEM;
- }
dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
sizeof(struct hmm_page), 0,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
index bf6586805f7f..f300e7547997 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -92,15 +88,12 @@ static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
GFP_KERNEL);
- if (unlikely(!pool_info)) {
- dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ if (unlikely(!pool_info))
return -ENOMEM;
- }
pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
GFP_KERNEL);
if (unlikely(!pool_info->pages)) {
- dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
kfree(pool_info);
return -ENOMEM;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
index 0722a68a49e7..0df96e661983 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -89,10 +85,8 @@ static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr,
struct hmm_vm_node *node;
node = kmem_cache_alloc(vm->cache, GFP_KERNEL);
- if (!node) {
- dev_err(atomisp_dev, "out of memory.\n");
+ if (!node)
return NULL;
- }
INIT_LIST_HEAD(&node->list);
node->pgnr = pgnr;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
index 46a5d29e2d3a..fb38fc540b81 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _hive_isp_css_custom_host_hrt_h_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
index 2e78976bb2ac..a94958bde718 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
index 1328944a7afd..15c2dfb6794e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
index 6b9fb1b2caaf..1e135c7c6d9b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
index dffd6e9cf693..bd44ebbc427c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
index a9446adb4c70..9e51a657ece4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
index f1593aa38ce1..00885203fb14 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
index 1ba360433d88..bf24e44462bc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __HMM_POOL_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
index 07d40662de32..52098161082d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
index 6b4eefc929e2..560014add005 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
index 06041e94cbb2..031c0398bf65 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef SH_MMU_H_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
index b9bad9f06235..662e98f41da2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
index 706bd43e8b1b..e36c2a33b41a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
index 97546bd124cd..c59bcc982966 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include "type_support.h"
diff --git a/drivers/staging/media/atomisp/platform/Makefile b/drivers/staging/media/atomisp/platform/Makefile
index df157630bda9..0e3b7e1c81c6 100644
--- a/drivers/staging/media/atomisp/platform/Makefile
+++ b/drivers/staging/media/atomisp/platform/Makefile
@@ -2,5 +2,4 @@
# Makefile for camera drivers.
#
-obj-$(CONFIG_INTEL_ATOMISP) += clock/
obj-$(CONFIG_INTEL_ATOMISP) += intel-mid/
diff --git a/drivers/staging/media/atomisp/platform/clock/Makefile b/drivers/staging/media/atomisp/platform/clock/Makefile
deleted file mode 100644
index 82fbe8b6968a..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for clock devices.
-#
-
-obj-$(CONFIG_INTEL_ATOMISP) += vlv2_plat_clock.o
-obj-$(CONFIG_INTEL_ATOMISP) += platform_vlv2_plat_clk.o
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
deleted file mode 100644
index 0aae9b0283bb..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * platform_vlv2_plat_clk.c - VLV2 platform clock driver
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/printk.h>
-
-static int __init vlv2_plat_clk_init(void)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple("vlv2_plat_clk", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- pr_err("platform_vlv2_plat_clk:register failed: %ld\n",
- PTR_ERR(pdev));
- return PTR_ERR(pdev);
- }
-
- return 0;
-}
-
-device_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
deleted file mode 100644
index b730ab0e8223..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * platform_vlv2_plat_clk.h: platform clock driver library header file
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-#ifndef _PLATFORM_VLV2_PLAT_CLK_H_
-#define _PLATFORM_VLV2_PLAT_CLK_H_
-
-#include <linux/sfi.h>
-#include <asm/intel-mid.h>
-
-extern void __init *vlv2_plat_clk_device_platform_data(
- void *info) __attribute__((weak));
-#endif
diff --git a/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c b/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
deleted file mode 100644
index f96789a31819..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * vlv2_plat_clock.c - VLV2 platform clock driver
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include "../../include/linux/vlv2_plat_clock.h"
-
-/* NOTE: Most of below constants could come from platform data.
- * To be fixed when appropriate ACPI support comes.
- */
-#define VLV2_PMC_CLK_BASE_ADDRESS 0xfed03060
-#define PLT_CLK_CTL_OFFSET(x) (0x04 * (x))
-
-#define CLK_CONFG_BIT_POS 0
-#define CLK_CONFG_BIT_LEN 2
-#define CLK_CONFG_D3_GATED 0
-#define CLK_CONFG_FORCE_ON 1
-#define CLK_CONFG_FORCE_OFF 2
-
-#define CLK_FREQ_TYPE_BIT_POS 2
-#define CLK_FREQ_TYPE_BIT_LEN 1
-#define CLK_FREQ_TYPE_XTAL 0 /* 25 MHz */
-#define CLK_FREQ_TYPE_PLL 1 /* 19.2 MHz */
-
-#define MAX_CLK_COUNT 5
-
-/* Helper macros to manipulate bitfields */
-#define REG_MASK(n) (((1 << (n##_BIT_LEN)) - 1) << (n##_BIT_POS))
-#define REG_SET_FIELD(r, n, v) (((r) & ~REG_MASK(n)) | \
- (((v) << (n##_BIT_POS)) & REG_MASK(n)))
-#define REG_GET_FIELD(r, n) (((r) & REG_MASK(n)) >> n##_BIT_POS)
-/*
- * vlv2 platform has 6 platform clocks, controlled by 4 byte registers
- * Total size required for mapping is 6*4 = 24 bytes
- */
-#define PMC_MAP_SIZE 24
-
-static DEFINE_MUTEX(clk_mutex);
-static void __iomem *pmc_base;
-
-/*
- * vlv2_plat_set_clock_freq - Set clock frequency to a specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- * @freq_type: Clock frequency (0-25 MHz(XTAL), 1-19.2 MHz(PLL) )
- */
-int vlv2_plat_set_clock_freq(int clk_num, int freq_type)
-{
- void __iomem *addr;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (freq_type != CLK_FREQ_TYPE_XTAL &&
- freq_type != CLK_FREQ_TYPE_PLL) {
- pr_err("wrong clock type\n");
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
-
- mutex_lock(&clk_mutex);
- writel(REG_SET_FIELD(readl(addr), CLK_FREQ_TYPE, freq_type), addr);
- mutex_unlock(&clk_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_set_clock_freq);
-
-/*
- * vlv2_plat_get_clock_freq - Get the status of specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- *
- * Returns 0 for 25 MHz(XTAL) and 1 for 19.2 MHz(PLL)
- */
-int vlv2_plat_get_clock_freq(int clk_num)
-{
- u32 ret;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- mutex_lock(&clk_mutex);
- ret = REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
- CLK_FREQ_TYPE);
- mutex_unlock(&clk_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_freq);
-
-/*
- * vlv2_plat_configure_clock - Configure the specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- * @conf: Clock gating:
- * 0 - Clock gated on D3 state
- * 1 - Force on
- * 2,3 - Force off
- */
-int vlv2_plat_configure_clock(int clk_num, u32 conf)
-{
- void __iomem *addr;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (conf != CLK_CONFG_D3_GATED &&
- conf != CLK_CONFG_FORCE_ON &&
- conf != CLK_CONFG_FORCE_OFF) {
- pr_err("Invalid clock configuration requested\n");
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
-
- mutex_lock(&clk_mutex);
- writel(REG_SET_FIELD(readl(addr), CLK_CONFG, conf), addr);
- mutex_unlock(&clk_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_configure_clock);
-
-/*
- * vlv2_plat_get_clock_status - Get the status of specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- *
- * Returns 1 - On, 0 - Off
- */
-int vlv2_plat_get_clock_status(int clk_num)
-{
- int ret;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- mutex_lock(&clk_mutex);
- ret = (int)REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
- CLK_CONFG);
- mutex_unlock(&clk_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_status);
-
-static int vlv2_plat_clk_probe(struct platform_device *pdev)
-{
- int i = 0;
-
- pmc_base = ioremap_nocache(VLV2_PMC_CLK_BASE_ADDRESS, PMC_MAP_SIZE);
- if (!pmc_base) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
- return -ENOMEM;
- }
-
- /* Initialize all clocks as disabled */
- for (i = 0; i < MAX_CLK_COUNT; i++)
- vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
-
- dev_info(&pdev->dev, "vlv2_plat_clk initialized\n");
- return 0;
-}
-
-static const struct platform_device_id vlv2_plat_clk_id[] = {
- {"vlv2_plat_clk", 0},
- {}
-};
-
-static int vlv2_resume(struct device *device)
-{
- int i;
-
- /* Initialize all clocks as disabled */
- for (i = 0; i < MAX_CLK_COUNT; i++)
- vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
-
- return 0;
-}
-
-static int vlv2_suspend(struct device *device)
-{
- return 0;
-}
-
-static const struct dev_pm_ops vlv2_pm_ops = {
- .suspend = vlv2_suspend,
- .resume = vlv2_resume,
-};
-
-static struct platform_driver vlv2_plat_clk_driver = {
- .probe = vlv2_plat_clk_probe,
- .id_table = vlv2_plat_clk_id,
- .driver = {
- .name = "vlv2_plat_clk",
- .pm = &vlv2_pm_ops,
- },
-};
-
-static int __init vlv2_plat_clk_init(void)
-{
- return platform_driver_register(&vlv2_plat_clk_driver);
-}
-arch_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/Makefile b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
index 4621261c35db..c53db1364e21 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/Makefile
+++ b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
@@ -1,5 +1,4 @@
#
# Makefile for intel-mid devices.
#
-obj-$(CONFIG_INTEL_ATOMISP) += intel_mid_pcihelpers.o
obj-$(CONFIG_INTEL_ATOMISP) += atomisp_gmin_platform.o
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
index edaae93af8f9..bf9f34b7ad72 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
@@ -4,10 +4,10 @@
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <media/v4l2-subdev.h>
#include <linux/mfd/intel_soc_pmic.h>
-#include "../../include/linux/vlv2_plat_clock.h"
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio.h>
@@ -17,11 +17,7 @@
#define MAX_SUBDEVS 8
-/* Should be defined in vlv2_plat_clock API, isn't: */
-#define VLV2_CLK_PLL_19P2MHZ 1
-#define VLV2_CLK_XTAL_19P2MHZ 0
-#define VLV2_CLK_ON 1
-#define VLV2_CLK_OFF 2
+#define VLV2_CLK_PLL_19P2MHZ 1 /* XTAL on CHT */
#define ELDO1_SEL_REG 0x19
#define ELDO1_1P8V 0x16
#define ELDO1_CTRL_SHIFT 0x00
@@ -33,6 +29,8 @@ struct gmin_subdev {
struct v4l2_subdev *subdev;
int clock_num;
int clock_src;
+ bool clock_on;
+ struct clk *pmc_clk;
struct gpio_desc *gpio0;
struct gpio_desc *gpio1;
struct regulator *v1p8_reg;
@@ -108,49 +106,6 @@ const struct atomisp_platform_data *atomisp_get_platform_data(void)
}
EXPORT_SYMBOL_GPL(atomisp_get_platform_data);
-static int af_power_ctrl(struct v4l2_subdev *subdev, int flag)
-{
- struct gmin_subdev *gs = find_gmin_subdev(subdev);
-
- if (gs && gs->v2p8_vcm_on == flag)
- return 0;
- gs->v2p8_vcm_on = flag;
-
- /*
- * The power here is used for dw9817,
- * regulator is from rear sensor
- */
- if (gs->v2p8_vcm_reg) {
- if (flag)
- return regulator_enable(gs->v2p8_vcm_reg);
- else
- return regulator_disable(gs->v2p8_vcm_reg);
- }
- return 0;
-}
-
-/*
- * Used in a handful of modules. Focus motor control, I think. Note
- * that there is no configurability in the API, so this needs to be
- * fixed where it is used.
- *
- * struct camera_af_platform_data {
- * int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
- * };
- *
- * Note that the implementation in MCG platform_camera.c is stubbed
- * out anyway (i.e. returns zero from the callback) on BYT. So
- * neither needed on gmin platforms or supported upstream.
- */
-const struct camera_af_platform_data *camera_get_af_platform_data(void)
-{
- static struct camera_af_platform_data afpd = {
- .power_ctrl = af_power_ctrl,
- };
- return &afpd;
-}
-EXPORT_SYMBOL_GPL(camera_get_af_platform_data);
-
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
struct camera_sensor_platform_data *plat_data,
enum intel_v4l2_subdev_type type)
@@ -334,15 +289,8 @@ static const struct {
#define CFG_VAR_NAME_MAX 64
-static int gmin_platform_init(struct i2c_client *client)
-{
- return 0;
-}
-
-static int gmin_platform_deinit(void)
-{
- return 0;
-}
+#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
+static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
{
@@ -377,21 +325,42 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
- if (!IS_ERR(gmin_subdevs[i].gpio0)) {
- ret = gpiod_direction_output(gmin_subdevs[i].gpio0, 0);
- if (ret)
- dev_err(dev, "gpio0 set output failed: %d\n", ret);
- } else {
- gmin_subdevs[i].gpio0 = NULL;
+ /* get PMC clock with clock framework */
+ snprintf(gmin_pmc_clk_name,
+ sizeof(gmin_pmc_clk_name),
+ "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num);
+
+ gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
+ if (IS_ERR(gmin_subdevs[i].pmc_clk)) {
+ ret = PTR_ERR(gmin_subdevs[i].pmc_clk);
+
+ dev_err(dev,
+ "Failed to get clk from %s : %d\n",
+ gmin_pmc_clk_name,
+ ret);
+
+ return NULL;
}
- if (!IS_ERR(gmin_subdevs[i].gpio1)) {
- ret = gpiod_direction_output(gmin_subdevs[i].gpio1, 0);
- if (ret)
- dev_err(dev, "gpio1 set output failed: %d\n", ret);
- } else {
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk);
+ if (!ret)
+ clk_disable_unprepare(gmin_subdevs[i].pmc_clk);
+
+ if (IS_ERR(gmin_subdevs[i].gpio0))
+ gmin_subdevs[i].gpio0 = NULL;
+
+ if (IS_ERR(gmin_subdevs[i].gpio1))
gmin_subdevs[i].gpio1 = NULL;
- }
if (pmic_id == PMIC_REGULATOR) {
gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX");
@@ -539,13 +508,27 @@ static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+
+ if (gs->clock_on == !!on)
+ return 0;
+
+ if (on) {
+ ret = clk_set_rate(gs->pmc_clk, gs->clock_src);
+
+ if (ret)
+ dev_err(&client->dev, "unable to set PMC rate %d\n",
+ gs->clock_src);
+
+ ret = clk_prepare_enable(gs->pmc_clk);
+ if (ret == 0)
+ gs->clock_on = true;
+ } else {
+ clk_disable_unprepare(gs->pmc_clk);
+ gs->clock_on = false;
+ }
- if (on)
- ret = vlv2_plat_set_clock_freq(gs->clock_num, gs->clock_src);
- if (ret)
- return ret;
- return vlv2_plat_configure_clock(gs->clock_num,
- on ? VLV2_CLK_ON : VLV2_CLK_OFF);
+ return ret;
}
static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
@@ -592,8 +575,6 @@ static struct camera_sensor_platform_data gmin_plat = {
.v2p8_ctrl = gmin_v2p8_ctrl,
.v1p2_ctrl = gmin_v1p2_ctrl,
.flisclk_ctrl = gmin_flisclk_ctrl,
- .platform_init = gmin_platform_init,
- .platform_deinit = gmin_platform_deinit,
.csi_cfg = gmin_csi_cfg,
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
@@ -739,10 +720,8 @@ int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
if (flag) {
csi = kzalloc(sizeof(*csi), GFP_KERNEL);
- if (!csi) {
- dev_err(&client->dev, "out of memory\n");
+ if (!csi)
return -ENOMEM;
- }
csi->port = port;
csi->num_lanes = lanes;
csi->input_format = format;
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c b/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
deleted file mode 100644
index 4631b1d39bb4..000000000000
--- a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
+++ /dev/null
@@ -1,298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/pm_qos.h>
-#include <linux/delay.h>
-
-/* G-Min addition: "platform_is()" lives in intel_mid_pm.h in the MCG
- * tree, but it's just platform ID info and we don't want to pull in
- * the whole SFI-based PM architecture.
- */
-#define INTEL_ATOM_MRST 0x26
-#define INTEL_ATOM_MFLD 0x27
-#define INTEL_ATOM_CLV 0x35
-#define INTEL_ATOM_MRFLD 0x4a
-#define INTEL_ATOM_BYT 0x37
-#define INTEL_ATOM_MOORFLD 0x5a
-#define INTEL_ATOM_CHT 0x4c
-/* synchronization for sharing the I2C controller */
-#define PUNIT_PORT 0x04
-#define PUNIT_DOORBELL_OPCODE (0xE0)
-#define PUNIT_DOORBELL_REG (0x0)
-#ifndef CSTATE_EXIT_LATENCY
-#define CSTATE_EXIT_LATENCY_C1 1
-#endif
-static inline int platform_is(u8 model)
-{
- return (boot_cpu_data.x86_model == model);
-}
-
-#include "../../include/asm/intel_mid_pcihelpers.h"
-
-/* Unified message bus read/write operation */
-static DEFINE_SPINLOCK(msgbus_lock);
-
-static struct pci_dev *pci_root;
-static struct pm_qos_request pm_qos;
-
-#define DW_I2C_NEED_QOS (platform_is(INTEL_ATOM_BYT))
-
-static int intel_mid_msgbus_init(void)
-{
- pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
- if (!pci_root) {
- pr_err("%s: Error: msgbus PCI handle NULL\n", __func__);
- return -ENODEV;
- }
-
- if (DW_I2C_NEED_QOS) {
- pm_qos_add_request(&pm_qos,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
- }
- return 0;
-}
-fs_initcall(intel_mid_msgbus_init);
-
-u32 intel_mid_msgbus_read32_raw(u32 cmd)
-{
- unsigned long irq_flags;
- u32 data;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32_raw);
-
-/*
- * GU: this function is only used by the VISA and 'VXD' drivers.
- */
-u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext)
-{
- unsigned long irq_flags;
- u32 data;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32_raw_ext);
-
-void intel_mid_msgbus_write32_raw(u32 cmd, u32 data)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32_raw);
-
-/*
- * GU: this function is only used by the VISA and 'VXD' drivers.
- */
-void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32_raw_ext);
-
-u32 intel_mid_msgbus_read32(u8 port, u32 addr)
-{
- unsigned long irq_flags;
- u32 data;
- u32 cmd;
- u32 cmdext;
-
- cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
- ((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = addr & 0xffffff00;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
-
- if (cmdext) {
- /* This resets to 0 automatically, no need to write 0 */
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
- cmdext);
- }
-
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32);
-
-void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
-{
- unsigned long irq_flags;
- u32 cmd;
- u32 cmdext;
-
- cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
- ((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = addr & 0xffffff00;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
-
- if (cmdext) {
- /* This resets to 0 automatically, no need to write 0 */
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
- cmdext);
- }
-
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32);
-
-/* called only from where is later then fs_initcall */
-u32 intel_mid_soc_stepping(void)
-{
- return pci_root->revision;
-}
-EXPORT_SYMBOL(intel_mid_soc_stepping);
-
-static bool is_south_complex_device(struct pci_dev *dev)
-{
- unsigned int base_class = dev->class >> 16;
- unsigned int sub_class = (dev->class & SUB_CLASS_MASK) >> 8;
-
- /* other than camera, pci bridges and display,
- * everything else are south complex devices.
- */
- if (((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
- (sub_class == ISP_SUB_CLASS)) ||
- (base_class == PCI_BASE_CLASS_BRIDGE) ||
- ((base_class == PCI_BASE_CLASS_DISPLAY) && !sub_class))
- return false;
- else
- return true;
-}
-
-/* In BYT platform, d3_delay for internal south complex devices,
- * they are not subject to 10 ms d3 to d0 delay required by pci spec.
- */
-static void pci_d3_delay_fixup(struct pci_dev *dev)
-{
- if (platform_is(INTEL_ATOM_BYT) ||
- platform_is(INTEL_ATOM_CHT)) {
- /* All internal devices are in bus 0. */
- if (dev->bus->number == 0 && is_south_complex_device(dev)) {
- dev->d3_delay = INTERNAL_PCI_PM_D3_WAIT;
- dev->d3cold_delay = INTERNAL_PCI_PM_D3_WAIT;
- }
- }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3_delay_fixup);
-
-#define PUNIT_SEMAPHORE (platform_is(INTEL_ATOM_BYT) ? 0x7 : 0x10E)
-#define GET_SEM() (intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE) & 0x1)
-
-static void reset_semaphore(void)
-{
- u32 data;
-
- data = intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE);
- smp_mb();
- data = data & 0xfffffffc;
- intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, data);
- smp_mb();
-
-}
-
-int intel_mid_dw_i2c_acquire_ownership(void)
-{
- u32 ret = 0;
- u32 data = 0; /* data sent to PUNIT */
- u32 cmd;
- u32 cmdext;
- int timeout = 1000;
-
- if (DW_I2C_NEED_QOS)
- pm_qos_update_request(&pm_qos, CSTATE_EXIT_LATENCY_C1 - 1);
-
- /*
- * We need disable irq. Otherwise, the main thread
- * might be preempted and the other thread jumps to
- * disable irq for a long time. Another case is
- * some irq handlers might trigger power voltage change
- */
- BUG_ON(irqs_disabled());
- local_irq_disable();
-
- /* host driver writes 0x2 to side band register 0x7 */
- intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, 0x2);
- smp_mb();
-
- /* host driver sends 0xE0 opcode to PUNIT and writes 0 register */
- cmd = (PUNIT_DOORBELL_OPCODE << 24) | (PUNIT_PORT << 16) |
- ((PUNIT_DOORBELL_REG & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = PUNIT_DOORBELL_REG & 0xffffff00;
-
- if (cmdext)
- intel_mid_msgbus_write32_raw_ext(cmd, cmdext, data);
- else
- intel_mid_msgbus_write32_raw(cmd, data);
-
- /* host driver waits for bit 0 to be set in side band 0x7 */
- while (GET_SEM() != 0x1) {
- udelay(100);
- timeout--;
- if (timeout <= 0) {
- pr_err("Timeout: semaphore timed out, reset sem\n");
- ret = -ETIMEDOUT;
- reset_semaphore();
- /*Delay 1ms in case race with punit*/
- udelay(1000);
- if (GET_SEM() != 0) {
- /*Reset again as kernel might race with punit*/
- reset_semaphore();
- }
- pr_err("PUNIT SEM: %d\n",
- intel_mid_msgbus_read32(PUNIT_PORT,
- PUNIT_SEMAPHORE));
- local_irq_enable();
-
- if (DW_I2C_NEED_QOS) {
- pm_qos_update_request(&pm_qos,
- PM_QOS_DEFAULT_VALUE);
- }
-
- return ret;
- }
- }
- smp_mb();
-
- return ret;
-}
-EXPORT_SYMBOL(intel_mid_dw_i2c_acquire_ownership);
-
-int intel_mid_dw_i2c_release_ownership(void)
-{
- reset_semaphore();
- local_irq_enable();
-
- if (DW_I2C_NEED_QOS)
- pm_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
-
- return 0;
-}
-EXPORT_SYMBOL(intel_mid_dw_i2c_release_ownership);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 58adaea44eb5..5d3b0e5a1283 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -1964,7 +1964,7 @@ static ssize_t bcm2048_##prop##_write(struct device *dev, \
return err < 0 ? err : count; \
}
-#define property_read(prop, size, mask) \
+#define property_read(prop, mask) \
static ssize_t bcm2048_##prop##_read(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
@@ -1999,9 +1999,9 @@ static ssize_t bcm2048_##prop##_read(struct device *dev, \
return sprintf(buf, mask "\n", value); \
}
-#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \
-property_write(prop, signal size, mask, check) \
-property_read(prop, size, mask)
+#define DEFINE_SYSFS_PROPERTY(prop, prop_type, mask, check) \
+property_write(prop, prop_type, mask, check) \
+property_read(prop, mask) \
#define property_str_read(prop, size) \
static ssize_t bcm2048_##prop##_read(struct device *dev, \
@@ -2027,39 +2027,39 @@ static ssize_t bcm2048_##prop##_read(struct device *dev, \
return count; \
}
-DEFINE_SYSFS_PROPERTY(power_state, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(mute, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(audio_route, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(dac_output, unsigned, int, "%u", 0)
-
-DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned, int, "%u", value > 3)
-
-DEFINE_SYSFS_PROPERTY(rds, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_wline, unsigned, int, "%u", 0)
-property_read(rds_pi, unsigned int, "%x")
+DEFINE_SYSFS_PROPERTY(power_state, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(mute, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(audio_route, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(dac_output, unsigned int, "%u", 0)
+
+DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned int, "%u", value > 3)
+
+DEFINE_SYSFS_PROPERTY(rds, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_wline, unsigned int, "%u", 0)
+property_read(rds_pi, "%x")
property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1))
property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1))
-property_read(fm_rds_flags, unsigned int, "%u")
+property_read(fm_rds_flags, "%u")
property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT * 5)
-property_read(region_bottom_frequency, unsigned int, "%u")
-property_read(region_top_frequency, unsigned int, "%u")
+property_read(region_bottom_frequency, "%u")
+property_read(region_top_frequency, "%u")
property_signed_read(fm_carrier_error, int, "%d")
property_signed_read(fm_rssi, int, "%d")
-DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(region, unsigned int, "%u", 0)
static struct device_attribute attrs[] = {
__ATTR(power_state, 0644, bcm2048_power_state_read,
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index c2bb5ef2acb4..9e41987f9884 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -320,9 +320,10 @@ static int prp_link_validate(struct v4l2_subdev *sd,
* the ->PRPENC link cannot be enabled if the source
* is the VDIC
*/
- if (priv->sink_sd_prpenc)
+ if (priv->sink_sd_prpenc) {
ret = -EINVAL;
- goto out;
+ goto out;
+ }
} else {
/* the source is a CSI */
if (!csi) {
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index b55e5ebba8b4..47c4c954fed5 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -440,6 +440,11 @@ unlock:
return media_device_register(&imxmd->md);
}
+static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
+
/*
* adds controls to a video device from an entity subdevice.
* Continues upstream from the entity's sink pads.
@@ -608,8 +613,7 @@ static int imx_media_probe(struct platform_device *pdev)
/* prepare the async subdev notifier and register it */
imxmd->subdev_notifier.subdevs = imxmd->async_ptrs;
- imxmd->subdev_notifier.bound = imx_media_subdev_bound;
- imxmd->subdev_notifier.complete = imx_media_probe_complete;
+ imxmd->subdev_notifier.ops = &imx_media_subdev_ops;
ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
&imxmd->subdev_notifier);
if (ret) {
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 71af13bd0ebd..6bd0717bf76e 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -99,13 +99,14 @@ struct IR {
struct kref ref;
struct list_head list;
- /* FIXME spinlock access to l.features */
- struct lirc_driver l;
+ /* FIXME spinlock access to l->features */
+ struct lirc_dev *l;
struct lirc_buffer rbuf;
struct mutex ir_lock;
atomic_t open_count;
+ struct device *dev;
struct i2c_adapter *adapter;
spinlock_t rx_ref_lock; /* struct IR_rx kref get()/put() */
@@ -183,10 +184,8 @@ static void release_ir_device(struct kref *ref)
* ir->open_count == 0 - happens on final close()
* ir_lock, tx_ref_lock, rx_ref_lock, all released
*/
- if (ir->l.minor >= 0) {
- lirc_unregister_driver(ir->l.minor);
- ir->l.minor = -1;
- }
+ if (ir->l)
+ lirc_unregister_device(ir->l);
if (kfifo_initialized(&ir->rbuf.fifo))
lirc_buffer_free(&ir->rbuf);
@@ -243,7 +242,7 @@ static void release_ir_rx(struct kref *ref)
* and releasing the ir reference can cause a sleep. That work is
* performed by put_ir_rx()
*/
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_REC_LIRCCODE;
/* Don't put_ir_device(rx->ir) here; lock can't be freed yet */
ir->rx = NULL;
/* Don't do the kfree(rx) here; we still need to kill the poll thread */
@@ -288,7 +287,7 @@ static void release_ir_tx(struct kref *ref)
struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
struct IR *ir = tx->ir;
- ir->l.features &= ~LIRC_CAN_SEND_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_SEND_LIRCCODE;
/* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
ir->tx = NULL;
kfree(tx);
@@ -317,12 +316,12 @@ static int add_to_buf(struct IR *ir)
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
struct IR_rx *rx;
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
- dev_dbg(ir->l.dev, "buffer overflow\n");
+ dev_dbg(ir->dev, "buffer overflow\n");
return -EOVERFLOW;
}
@@ -368,17 +367,17 @@ static int add_to_buf(struct IR *ir)
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
- dev_err(ir->l.dev, "i2c_master_send failed with %d\n",
+ dev_err(ir->dev, "i2c_master_send failed with %d\n",
ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"unable to read from the IR chip after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"polling the IR receiver chip failed, trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -405,14 +404,14 @@ static int add_to_buf(struct IR *ir)
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"i2c_master_recv failed with %d -- keeping last read buffer\n",
ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
- dev_dbg(ir->l.dev,
+ dev_dbg(ir->dev,
"key (0x%02x/0x%02x)\n",
rx->b[0], rx->b[1]);
}
@@ -463,9 +462,9 @@ static int add_to_buf(struct IR *ir)
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
- dev_dbg(ir->l.dev, "poll thread started\n");
+ dev_dbg(ir->dev, "poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -493,7 +492,7 @@ static int lirc_thread(void *arg)
wake_up_interruptible(&rbuf->wait_poll);
}
- dev_dbg(ir->l.dev, "poll thread ended\n");
+ dev_dbg(ir->dev, "poll thread ended\n");
return 0;
}
@@ -646,10 +645,10 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
- dev_dbg(tx->ir->l.dev, "%*ph", 5, buf);
+ dev_dbg(tx->ir->dev, "%*ph", 5, buf);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -674,7 +673,7 @@ static int send_boot_data(struct IR_tx *tx)
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -691,22 +690,22 @@ static int send_boot_data(struct IR_tx *tx)
}
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
- dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX init response: %02x\n",
buf[0]);
return 0;
}
- dev_notice(tx->ir->l.dev,
+ dev_notice(tx->ir->dev,
"Zilog/Hauppauge IR blaster firmware version %d.%d.%d loaded\n",
buf[1], buf[2], buf[3]);
@@ -751,15 +750,15 @@ static int fw_load(struct IR_tx *tx)
}
/* Request codeset data file */
- ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
+ ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->dev);
if (ret != 0) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"firmware haup-ir-blaster.bin not available (%d)\n",
ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
- dev_dbg(tx->ir->l.dev, "firmware of size %zu loaded\n", fw_entry->size);
+ dev_dbg(tx->ir->dev, "firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
@@ -787,7 +786,7 @@ static int fw_load(struct IR_tx *tx)
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"unsupported code set file version (%u, expected 1) -- please upgrade to a newer driver\n",
version);
fw_unload_locked();
@@ -804,7 +803,7 @@ static int fw_load(struct IR_tx *tx)
&tx_data->num_code_sets))
goto corrupt;
- dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
+ dev_dbg(tx->ir->dev, "%u IR blaster codesets loaded\n",
tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
@@ -869,7 +868,7 @@ static int fw_load(struct IR_tx *tx)
goto out;
corrupt:
- dev_err(tx->ir->l.dev, "firmware is corrupt\n");
+ dev_err(tx->ir->dev, "firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
@@ -882,16 +881,16 @@ out:
static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
loff_t *ppos)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
int ret = 0, written = 0, retries = 0;
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
- dev_dbg(ir->l.dev, "read called\n");
+ dev_dbg(ir->dev, "read called\n");
if (n % rbuf->chunk_size) {
- dev_dbg(ir->l.dev, "read result = -EINVAL\n");
+ dev_dbg(ir->dev, "read result = -EINVAL\n");
return -EINVAL;
}
@@ -935,7 +934,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned char buf[MAX_XFER_SIZE];
if (rbuf->chunk_size > sizeof(buf)) {
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"chunk_size is too big (%d)!\n",
rbuf->chunk_size);
ret = -EINVAL;
@@ -950,7 +949,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
retries++;
}
if (retries >= 5) {
- dev_err(ir->l.dev, "Buffer read failed!\n");
+ dev_err(ir->dev, "Buffer read failed!\n");
ret = -EIO;
}
}
@@ -960,7 +959,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
- dev_dbg(ir->l.dev, "read result = %d (%s)\n", ret,
+ dev_dbg(ir->dev, "read result = %d (%s)\n", ret,
ret ? "Error" : "OK");
return ret ? ret : written;
@@ -977,7 +976,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"failed to get data for code %u, key %u -- check lircd.conf entries\n",
code, key);
return ret;
@@ -995,7 +994,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1008,18 +1007,18 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
}
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #1: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
@@ -1029,7 +1028,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1039,7 +1038,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1055,12 +1054,12 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
- dev_dbg(tx->ir->l.dev,
+ dev_dbg(tx->ir->dev,
"NAK expected: i2c_master_send failed with %d (try %d)\n",
ret, i + 1);
}
if (ret != 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"IR TX chip never got ready: last i2c_master_send failed with %d\n",
ret);
return ret < 0 ? ret : -EFAULT;
@@ -1069,17 +1068,17 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX response #2: %02x\n",
buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1092,7 +1091,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
static ssize_t write(struct file *filep, const char __user *buf, size_t n,
loff_t *ppos)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_tx *tx;
size_t i;
int failures = 0;
@@ -1165,11 +1164,11 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"sending to the IR transmitter chip failed, trying reset\n");
if (failures >= 3) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"unable to send to the IR chip after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
@@ -1200,12 +1199,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
/* copied from lirc_dev */
static unsigned int poll(struct file *filep, poll_table *wait)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
unsigned int ret;
- dev_dbg(ir->l.dev, "%s called\n", __func__);
+ dev_dbg(ir->dev, "%s called\n", __func__);
rx = get_ir_rx(ir);
if (!rx) {
@@ -1213,7 +1212,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
- dev_dbg(ir->l.dev, "%s result = POLLERR\n", __func__);
+ dev_dbg(ir->dev, "%s result = POLLERR\n", __func__);
return POLLERR;
}
@@ -1226,19 +1225,19 @@ static unsigned int poll(struct file *filep, poll_table *wait)
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN | POLLRDNORM);
- dev_dbg(ir->l.dev, "%s result = %s\n", __func__,
+ dev_dbg(ir->dev, "%s result = %s\n", __func__,
ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
unsigned long __user *uptr = (unsigned long __user *)arg;
int result;
unsigned long mode, features;
- features = ir->l.features;
+ features = ir->l->features;
switch (cmd) {
case LIRC_GET_LENGTH:
@@ -1283,46 +1282,18 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return result;
}
-static struct IR *get_ir_device_by_minor(unsigned int minor)
-{
- struct IR *ir;
- struct IR *ret = NULL;
-
- mutex_lock(&ir_devices_lock);
-
- if (!list_empty(&ir_devices_list)) {
- list_for_each_entry(ir, &ir_devices_list, list) {
- if (ir->l.minor == minor) {
- ret = get_ir_device(ir, true);
- break;
- }
- }
- }
-
- mutex_unlock(&ir_devices_lock);
- return ret;
-}
-
/*
- * Open the IR device. Get hold of our IR structure and
- * stash it in private_data for the file
+ * Open the IR device.
*/
static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
- unsigned int minor = MINOR(node->i_rdev);
- /* find our IR struct */
- ir = get_ir_device_by_minor(minor);
-
- if (!ir)
- return -ENODEV;
+ lirc_init_pdata(node, filep);
+ ir = lirc_get_pdata(filep);
atomic_inc(&ir->open_count);
- /* stash our IR struct */
- filep->private_data = ir;
-
nonseekable_open(node, filep);
return 0;
}
@@ -1330,14 +1301,7 @@ static int open(struct inode *node, struct file *filep)
/* Close the IR device */
static int close(struct inode *node, struct file *filep)
{
- /* find our IR struct */
- struct IR *ir = filep->private_data;
-
- if (!ir) {
- pr_err("ir: %s: no private_data attached to the file!\n",
- __func__);
- return -ENODEV;
- }
+ struct IR *ir = lirc_get_pdata(filep);
atomic_dec(&ir->open_count);
@@ -1383,16 +1347,6 @@ static const struct file_operations lirc_fops = {
.release = close
};
-static struct lirc_driver lirc_template = {
- .name = "lirc_zilog",
- .minor = -1,
- .code_length = 13,
- .buffer_size = BUFLEN / 2,
- .chunk_size = 2,
- .fops = &lirc_fops,
- .owner = THIS_MODULE,
-};
-
static int ir_remove(struct i2c_client *client)
{
if (strncmp("ir_tx_z8", client->name, 8) == 0) {
@@ -1476,27 +1430,42 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
list_add_tail(&ir->list, &ir_devices_list);
ir->adapter = adap;
+ ir->dev = &adap->dev;
mutex_init(&ir->ir_lock);
atomic_set(&ir->open_count, 0);
spin_lock_init(&ir->tx_ref_lock);
spin_lock_init(&ir->rx_ref_lock);
/* set lirc_dev stuff */
- memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+ ir->l = lirc_allocate_device();
+ if (!ir->l) {
+ ret = -ENOMEM;
+ goto out_put_ir;
+ }
+
+ snprintf(ir->l->name, sizeof(ir->l->name), "lirc_zilog");
+ ir->l->code_length = 13;
+ ir->l->fops = &lirc_fops;
+ ir->l->owner = THIS_MODULE;
+ ir->l->dev.parent = &adap->dev;
+
/*
* FIXME this is a pointer reference to us, but no refcount.
*
* This OK for now, since lirc_dev currently won't touch this
* buffer as we provide our own lirc_fops.
*
- * Currently our own lirc_fops rely on this ir->l.rbuf pointer
+ * Currently our own lirc_fops rely on this ir->l->buf pointer
*/
- ir->l.rbuf = &ir->rbuf;
- ir->l.dev = &adap->dev;
- ret = lirc_buffer_init(ir->l.rbuf,
- ir->l.chunk_size, ir->l.buffer_size);
- if (ret)
+ ir->l->buf = &ir->rbuf;
+ /* This will be returned by lirc_get_pdata() */
+ ir->l->data = ir;
+ ret = lirc_buffer_init(ir->l->buf, 2, BUFLEN / 2);
+ if (ret) {
+ lirc_free_device(ir->l);
+ ir->l = NULL;
goto out_put_ir;
+ }
}
if (tx_probe) {
@@ -1512,7 +1481,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
kref_init(&tx->ref);
ir->tx = tx;
- ir->l.features |= LIRC_CAN_SEND_LIRCCODE;
+ ir->l->features |= LIRC_CAN_SEND_LIRCCODE;
mutex_init(&tx->client_lock);
tx->c = client;
tx->need_boot = 1;
@@ -1538,7 +1507,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Rx client is also ready or not needed */
if (!rx && !tx_only) {
- dev_info(tx->ir->l.dev,
+ dev_info(tx->ir->dev,
"probe of IR Tx on %s (i2c-%d) done. Waiting on IR Rx.\n",
adap->name, adap->nr);
goto out_ok;
@@ -1556,7 +1525,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
kref_init(&rx->ref);
ir->rx = rx;
- ir->l.features |= LIRC_CAN_REC_LIRCCODE;
+ ir->l->features |= LIRC_CAN_REC_LIRCCODE;
mutex_init(&rx->client_lock);
rx->c = client;
rx->hdpvr_data_fmt =
@@ -1578,7 +1547,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"%s: could not start IR Rx polling thread\n",
__func__);
/* Failed kthread, so put back the ir ref */
@@ -1586,7 +1555,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Failure exit, so put back rx ref from i2c_client */
i2c_set_clientdata(client, NULL);
put_ir_rx(rx, true);
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_REC_LIRCCODE;
goto out_put_tx;
}
@@ -1599,17 +1568,19 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
/* register with lirc */
- ir->l.minor = lirc_register_driver(&ir->l);
- if (ir->l.minor < 0) {
- dev_err(tx->ir->l.dev,
- "%s: lirc_register_driver() failed: %i\n",
- __func__, ir->l.minor);
- ret = -EBADRQC;
+ ret = lirc_register_device(ir->l);
+ if (ret < 0) {
+ dev_err(tx->ir->dev,
+ "%s: lirc_register_device() failed: %i\n",
+ __func__, ret);
+ lirc_free_device(ir->l);
+ ir->l = NULL;
goto out_put_xx;
}
- dev_info(ir->l.dev,
+
+ dev_info(ir->dev,
"IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
- adap->name, adap->nr, ir->l.minor);
+ adap->name, adap->nr, ir->l->minor);
out_ok:
if (rx)
@@ -1617,7 +1588,7 @@ out_ok:
if (tx)
put_ir_tx(tx, true);
put_ir_device(ir, true);
- dev_info(ir->l.dev,
+ dev_info(ir->dev,
"probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index e05ae4645d91..30532d8c310b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -364,39 +364,39 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER);
stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER);
stats->rx_dropped = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
+ RX_DROP_PACKET_COUNTER);
stats->tx_dropped = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
+ TX_DROP_FRAME_COUNTER);
stats->multicast = xlr_nae_rdreg(priv->base_addr,
- RX_MULTICAST_PACKET_COUNTER);
+ RX_MULTICAST_PACKET_COUNTER);
stats->collisions = xlr_nae_rdreg(priv->base_addr,
- TX_TOTAL_COLLISION_COUNTER);
+ TX_TOTAL_COLLISION_COUNTER);
stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FRAME_LENGTH_ERROR_COUNTER);
+ RX_FRAME_LENGTH_ERROR_COUNTER);
stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
+ RX_DROP_PACKET_COUNTER);
stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FCS_ERROR_COUNTER);
+ RX_FCS_ERROR_COUNTER);
stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr,
- RX_ALIGNMENT_ERROR_COUNTER);
+ RX_ALIGNMENT_ERROR_COUNTER);
stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
+ RX_DROP_PACKET_COUNTER);
stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr,
- RX_CARRIER_SENSE_ERROR_COUNTER);
+ RX_CARRIER_SENSE_ERROR_COUNTER);
stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_fifo_errors +
- stats->rx_missed_errors);
+ stats->rx_frame_errors + stats->rx_fifo_errors +
+ stats->rx_missed_errors);
stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr,
TX_EXCESSIVE_COLLISION_PACKET_COUNTER);
stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
+ TX_DROP_FRAME_COUNTER);
stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
+ TX_DROP_FRAME_COUNTER);
}
static const struct net_device_ops xlr_netdev_ops = {
@@ -448,41 +448,35 @@ static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
{
priv->frin_spill = xlr_config_spill(priv,
- R_REG_FRIN_SPILL_MEM_START_0,
- R_REG_FRIN_SPILL_MEM_START_1,
- R_REG_FRIN_SPILL_MEM_SIZE,
- MAX_FRIN_SPILL *
- sizeof(u64));
+ R_REG_FRIN_SPILL_MEM_START_0,
+ R_REG_FRIN_SPILL_MEM_START_1,
+ R_REG_FRIN_SPILL_MEM_SIZE,
+ MAX_FRIN_SPILL * sizeof(u64));
priv->frout_spill = xlr_config_spill(priv,
- R_FROUT_SPILL_MEM_START_0,
- R_FROUT_SPILL_MEM_START_1,
- R_FROUT_SPILL_MEM_SIZE,
- MAX_FROUT_SPILL *
- sizeof(u64));
+ R_FROUT_SPILL_MEM_START_0,
+ R_FROUT_SPILL_MEM_START_1,
+ R_FROUT_SPILL_MEM_SIZE,
+ MAX_FROUT_SPILL * sizeof(u64));
priv->class_0_spill = xlr_config_spill(priv,
- R_CLASS0_SPILL_MEM_START_0,
- R_CLASS0_SPILL_MEM_START_1,
- R_CLASS0_SPILL_MEM_SIZE,
- MAX_CLASS_0_SPILL *
- sizeof(u64));
+ R_CLASS0_SPILL_MEM_START_0,
+ R_CLASS0_SPILL_MEM_START_1,
+ R_CLASS0_SPILL_MEM_SIZE,
+ MAX_CLASS_0_SPILL * sizeof(u64));
priv->class_1_spill = xlr_config_spill(priv,
- R_CLASS1_SPILL_MEM_START_0,
- R_CLASS1_SPILL_MEM_START_1,
- R_CLASS1_SPILL_MEM_SIZE,
- MAX_CLASS_1_SPILL *
- sizeof(u64));
+ R_CLASS1_SPILL_MEM_START_0,
+ R_CLASS1_SPILL_MEM_START_1,
+ R_CLASS1_SPILL_MEM_SIZE,
+ MAX_CLASS_1_SPILL * sizeof(u64));
priv->class_2_spill = xlr_config_spill(priv,
- R_CLASS2_SPILL_MEM_START_0,
- R_CLASS2_SPILL_MEM_START_1,
- R_CLASS2_SPILL_MEM_SIZE,
- MAX_CLASS_2_SPILL *
- sizeof(u64));
+ R_CLASS2_SPILL_MEM_START_0,
+ R_CLASS2_SPILL_MEM_START_1,
+ R_CLASS2_SPILL_MEM_SIZE,
+ MAX_CLASS_2_SPILL * sizeof(u64));
priv->class_3_spill = xlr_config_spill(priv,
- R_CLASS3_SPILL_MEM_START_0,
- R_CLASS3_SPILL_MEM_START_1,
- R_CLASS3_SPILL_MEM_SIZE,
- MAX_CLASS_3_SPILL *
- sizeof(u64));
+ R_CLASS3_SPILL_MEM_START_0,
+ R_CLASS3_SPILL_MEM_START_1,
+ R_CLASS3_SPILL_MEM_SIZE,
+ MAX_CLASS_3_SPILL * sizeof(u64));
}
/*
diff --git a/drivers/staging/pi433/Documentation/pi433.txt b/drivers/staging/pi433/Documentation/pi433.txt
index 38b83b86c334..245fef33d688 100644
--- a/drivers/staging/pi433/Documentation/pi433.txt
+++ b/drivers/staging/pi433/Documentation/pi433.txt
@@ -20,7 +20,7 @@ Discription of driver operation
a) transmission
Each transmission can take place with a different configuration of the rf
-module. Therfore each application can set its own set of parameters. The driver
+module. Therefore each application can set its own set of parameters. The driver
takes care, that each transmission takes place with the parameterset of the
application, that requests the transmission. To allow the transmission to take
place in the background, a tx thread is introduced.
@@ -33,7 +33,7 @@ there is no receive request or the receiver is still waiting for something in
the air, the rf module is set to standby, the parameters for transmission gets
set, the hardware fifo of the rf chip gets preloaded and the transmission gets
started. Upon hardware fifo threshold interrupt it gets reloaded, thus enabling
-much longer telegrams then hardware fifo size. If the telegram is send and there
+much longer telegrams than the hardware fifo size. If the telegram is sent and there
is more data available in the kfifo, the procedure is repeated. If not the
transmission cycle ends.
@@ -41,7 +41,7 @@ b) reception
Since there is only one application allowed to receive data at a time, for
reception there is only one configuration set.
-As soon as an application sets an request for receiving a telegram, the reception
+As soon as an application sets a request for receiving a telegram, the reception
configuration set is written to the rf module and it gets set into receiving mode.
Now the driver is waiting, that a predefined RSSI level (signal strength at the
receiver) is reached. Until this hasn't happened, the reception can be
@@ -123,7 +123,7 @@ packet format:
optionOff - no preamble will be generated
enable_sync
optionOn - a sync word will be automatically added to
- the telegram after preamble
+ the telegram after the preamble
optionOff - no sync word will be added
Attention: While possible to generate sync without preamble, the
receiver won't be able to detect the sync without preamble.
@@ -136,7 +136,7 @@ packet format:
Attention: should be used in combination with sync, only
enable_address_byte
optionOn - the address byte will be automatically added to the
- telgram. It's part of the payload
+ telegram. It's part of the payload
optionOff - the address byte will not be added to the telegram.
The address byte can be used for address filtering, so the receiver
will only receive telegrams with a given address byte.
@@ -161,7 +161,7 @@ packet format:
one byte, used as address byte on address byte option.
-The rx configuration is transfered via struct pi433_rx_cfg, the parameterset for receiving. It is devided into two sections: rf parameters and packet format.
+The rx configuration is transferred via struct pi433_rx_cfg, the parameterset for receiving. It is divided into two sections: rf parameters and packet format.
rf params:
frequency
@@ -178,7 +178,7 @@ rf params:
OOK - on off key
rssi_threshold
threshold value for the signal strength on the receiver input.
- If this value is exeeded, a reception cycle starts
+ If this value is exceeded, a reception cycle starts
Allowed values: 0...255
thresholdDecrement
in order to adapt to different levels of singnal strength, over
@@ -198,7 +198,7 @@ rf params:
twohundretOhm - for antennas with an impedance of 200Ohm
lnaGain
sets the gain of the low noise amp
- automatic - lna gain is determed by an agc
+ automatic - lna gain is determined by an agc
max - lna gain is set to maximum
maxMinus6 - lna gain is set to 6db below max
maxMinus12 - lna gain is set to 12db below max
@@ -232,7 +232,7 @@ rf params:
amount of bytes that were requested by the read request.
Attention: should be used in combination with sync, only
enable_address_filtering;
- filteringOff - no adress filtering will take place
+ filteringOff - no address filtering will take place
nodeAddress - all telegrams, not matching the node
address will be internally discarded
nodeOrBroadcastAddress - all telegrams, neither matching the
@@ -245,7 +245,7 @@ rf params:
calculated crc doesn't match to two bytes,
that follow the payload, the telegram will be
internally discarded.
- Attention: This option is only operational, if sync on and fixed length
+ Attention: This option is only operational if sync on and fixed length
or length byte is used
sync_length
Gives the length of the payload.
@@ -255,9 +255,9 @@ rf params:
Overrides the telegram length either given by the first byte of
payload or by the read request.
bytes_to_drop
- gives the number of bytes, that will be dropped before transfering
+ gives the number of bytes, that will be dropped before transferring
data to the read buffer
- This option is only usefull, if all packet helper are switched
+ This option is only useful if all packet helper are switched
off and the rf chip is used in raw receiving mode. This may be
needed, if a telegram of a third party device should be received,
using a protocol not compatible with the packet engine of the rf69 chip.
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 93c01680f016..d946838450d4 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -136,17 +136,17 @@ static irqreturn_t DIO0_irq_handler(int irq, void *dev_id)
if (device->irq_state[DIO0] == DIO_PacketSent)
{
device->free_in_fifo = FIFO_SIZE;
- printk("DIO0 irq: Packet sent\n"); // TODO: printk() should include KERN_ facility level
+ dev_dbg(device->dev, "DIO0 irq: Packet sent\n");
wake_up_interruptible(&device->fifo_wait_queue);
}
else if (device->irq_state[DIO0] == DIO_Rssi_DIO0)
{
- printk("DIO0 irq: RSSI level over threshold\n");
+ dev_dbg(device->dev, "DIO0 irq: RSSI level over threshold\n");
wake_up_interruptible(&device->rx_wait_queue);
}
else if (device->irq_state[DIO0] == DIO_PayloadReady)
{
- printk("DIO0 irq: PayloadReady\n");
+ dev_dbg(device->dev, "DIO0 irq: PayloadReady\n");
device->free_in_fifo = 0;
wake_up_interruptible(&device->fifo_wait_queue);
}
@@ -167,7 +167,8 @@ static irqreturn_t DIO1_irq_handler(int irq, void *dev_id)
if (device->rx_active) device->free_in_fifo = FIFO_THRESHOLD - 1;
else device->free_in_fifo = FIFO_SIZE - FIFO_THRESHOLD - 1;
}
- printk("DIO1 irq: %d bytes free in fifo\n", device->free_in_fifo); // TODO: printk() should include KERN_ facility level
+ dev_dbg(device->dev,
+ "DIO1 irq: %d bytes free in fifo\n", device->free_in_fifo);
wake_up_interruptible(&device->fifo_wait_queue);
return IRQ_HANDLED;
@@ -284,8 +285,7 @@ rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg)
SET_CHECKED(rf69_set_crc_enable (dev->spi, tx_cfg->enable_crc));
/* configure sync, if enabled */
- if (tx_cfg->enable_sync == optionOn)
- {
+ if (tx_cfg->enable_sync == optionOn) {
SET_CHECKED(rf69_set_sync_size(dev->spi, tx_cfg->sync_length));
SET_CHECKED(rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern));
}
@@ -407,8 +407,7 @@ pi433_receive(void *data)
if (retval) goto abort; /* wait was interrupted */
rf69_read_fifo(spi, (u8 *)&bytes_total, 1);
- if (bytes_total > dev->rx_buffer_size)
- {
+ if (bytes_total > dev->rx_buffer_size) {
retval = -1;
goto abort;
}
@@ -466,7 +465,7 @@ pi433_receive(void *data)
}
- /* rx done, wait was interrupted or error occured */
+ /* rx done, wait was interrupted or error occurred */
abort:
dev->interrupt_rx_allowed = true;
SET_CHECKED(rf69_set_mode(dev->spi, standby));
@@ -508,16 +507,14 @@ pi433_tx_thread(void *data)
mutex_lock(&device->tx_fifo_lock);
retval = kfifo_out(&device->tx_fifo, &tx_cfg, sizeof(tx_cfg));
- if (retval != sizeof(tx_cfg))
- {
+ if (retval != sizeof(tx_cfg)) {
dev_dbg(device->dev, "reading tx_cfg from fifo failed: got %d byte(s), expected %d", retval, (unsigned int)sizeof(tx_cfg) );
mutex_unlock(&device->tx_fifo_lock);
continue;
}
retval = kfifo_out(&device->tx_fifo, &size, sizeof(size_t));
- if (retval != sizeof(size_t))
- {
+ if (retval != sizeof(size_t)) {
dev_dbg(device->dev, "reading msg size from fifo failed: got %d, expected %d", retval, (unsigned int)sizeof(size_t) );
mutex_unlock(&device->tx_fifo_lock);
continue;
@@ -649,8 +646,7 @@ pi433_tx_thread(void *data)
SET_CHECKED(rf69_set_mode(spi, standby));
/* everything sent? */
- if ( kfifo_is_empty(&device->tx_fifo) )
- {
+ if (kfifo_is_empty(&device->tx_fifo)) {
abort:
if (rx_interrupted)
{
@@ -704,8 +700,7 @@ pi433_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos)
mutex_unlock(&device->rx_lock);
/* if read was successful copy to user space*/
- if (bytes_received > 0)
- {
+ if (bytes_received > 0) {
retval = copy_to_user(buf, device->rx_buffer, bytes_received);
if (retval)
return -EFAULT;
@@ -805,8 +800,7 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case PI433_IOC_RD_TX_CFG:
tmp = _IOC_SIZE(cmd);
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) )
- {
+ if ((tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0)) {
retval = -EINVAL;
break;
}
@@ -822,8 +816,7 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
case PI433_IOC_WR_TX_CFG:
tmp = _IOC_SIZE(cmd);
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) )
- {
+ if ((tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0)) {
retval = -EINVAL;
break;
}
@@ -916,8 +909,7 @@ static int pi433_open(struct inode *inode, struct file *filp)
if (!device->rx_buffer) {
device->rx_buffer = kmalloc(MAX_MSG_SIZE, GFP_KERNEL);
- if (!device->rx_buffer)
- {
+ if (!device->rx_buffer) {
dev_dbg(device->dev, "open/ENOMEM\n");
return -ENOMEM;
}
@@ -925,8 +917,7 @@ static int pi433_open(struct inode *inode, struct file *filp)
device->users++;
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
- if (!instance)
- {
+ if (!instance) {
kfree(device->rx_buffer);
device->rx_buffer = NULL;
return -ENOMEM;
@@ -986,8 +977,7 @@ static int setup_GPIOs(struct pi433_device *device)
snprintf(name, sizeof(name), "DIO%d", i);
device->gpiod[i] = gpiod_get(&device->spi->dev, name, 0 /*GPIOD_IN*/);
- if (device->gpiod[i] == ERR_PTR(-ENOENT))
- {
+ if (device->gpiod[i] == ERR_PTR(-ENOENT)) {
dev_dbg(&device->spi->dev, "Could not find entry for %s. Ignoring.", name);
continue;
}
@@ -1016,8 +1006,7 @@ static int setup_GPIOs(struct pi433_device *device)
/* configure irq */
device->irq_num[i] = gpiod_to_irq(device->gpiod[i]);
- if (device->irq_num[i] < 0)
- {
+ if (device->irq_num[i] < 0) {
device->gpiod[i] = ERR_PTR(-EINVAL);//(struct gpio_desc *)device->irq_num[i];
return device->irq_num[i];
}
@@ -1030,7 +1019,7 @@ static int setup_GPIOs(struct pi433_device *device)
if (retval)
return retval;
- dev_dbg(&device->spi->dev, "%s succesfully configured", name);
+ dev_dbg(&device->spi->dev, "%s successfully configured", name);
}
return 0;
@@ -1156,8 +1145,7 @@ static int pi433_probe(struct spi_device *spi)
/* setup GPIO (including irq_handler) for the different DIOs */
retval = setup_GPIOs(device);
- if (retval)
- {
+ if (retval) {
dev_dbg(&spi->dev, "setup of GPIOs failed");
goto GPIO_failed;
}
@@ -1175,16 +1163,14 @@ static int pi433_probe(struct spi_device *spi)
device->tx_task_struct = kthread_run(pi433_tx_thread,
device,
"pi433_tx_task");
- if (IS_ERR(device->tx_task_struct))
- {
+ if (IS_ERR(device->tx_task_struct)) {
dev_dbg(device->dev, "start of send thread failed");
goto send_thread_failed;
}
/* determ minor number */
retval = pi433_get_minor(device);
- if (retval)
- {
+ if (retval) {
dev_dbg(device->dev, "get of minor number failed");
goto minor_failed;
}
@@ -1213,8 +1199,7 @@ static int pi433_probe(struct spi_device *spi)
device->cdev->owner = THIS_MODULE;
cdev_init(device->cdev, &pi433_fops);
retval = cdev_add(device->cdev, device->devt, 1);
- if (retval)
- {
+ if (retval) {
dev_dbg(device->dev, "register of cdev failed");
goto cdev_failed;
}
@@ -1306,15 +1291,13 @@ static int __init pi433_init(void)
return status;
pi433_class = class_create(THIS_MODULE, "pi433");
- if (IS_ERR(pi433_class))
- {
+ if (IS_ERR(pi433_class)) {
unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
return PTR_ERR(pi433_class);
}
status = spi_register_driver(&pi433_spi_driver);
- if (status < 0)
- {
+ if (status < 0) {
class_destroy(pi433_class);
unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
}
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 290b419aa9dd..e69a2153c999 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -34,7 +34,7 @@
/*-------------------------------------------------------------------------*/
#define READ_REG(x) rf69_read_reg (spi, x)
-#define WRITE_REG(x,y) rf69_write_reg(spi, x, y)
+#define WRITE_REG(x, y) rf69_write_reg(spi, x, y)
/*-------------------------------------------------------------------------*/
@@ -164,9 +164,12 @@ int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate)
// transmit to RF 69
retval = WRITE_REG(REG_BITRATE_MSB, msb);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_BITRATE_LSB, lsb);
- if (retval) return retval;
+ if (retval)
+ return retval;
return 0;
}
@@ -196,7 +199,7 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
// calculate register settings
f_reg = deviation * factor;
- do_div(f_reg , f_step);
+ do_div(f_reg, f_step);
msb = (f_reg&0xff00) >> 8;
lsb = (f_reg&0xff);
@@ -209,9 +212,12 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
// write to chip
retval = WRITE_REG(REG_FDEV_MSB, msb);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_FDEV_LSB, lsb);
- if (retval) return retval;
+ if (retval)
+ return retval;
return 0;
}
@@ -244,7 +250,7 @@ int rf69_set_frequency(struct spi_device *spi, u32 frequency)
// calculate reg settings
f_reg = frequency * factor;
- do_div(f_reg , f_step);
+ do_div(f_reg, f_step);
msb = (f_reg&0xff0000) >> 16;
mid = (f_reg&0xff00) >> 8;
@@ -252,11 +258,16 @@ int rf69_set_frequency(struct spi_device *spi, u32 frequency)
// write to chip
retval = WRITE_REG(REG_FRF_MSB, msb);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_FRF_MID, mid);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_FRF_LSB, lsb);
- if (retval) return retval;
+ if (retval)
+ return retval;
return 0;
}
@@ -267,9 +278,9 @@ int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: amp #0");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA0) );
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA0) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA0));
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA0));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -282,9 +293,9 @@ int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: amp #1");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA1) );
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA1) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA1));
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA1));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -297,9 +308,9 @@ int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: amp #2");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA2) );
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA2) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA2));
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA2));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -312,7 +323,7 @@ int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel)
dev_dbg(&spi->dev, "set: power level");
#endif
- powerLevel +=18; // TODO Abhängigkeit von PA0,1,2 setting
+ powerLevel += 18; // TODO Abhängigkeit von PA0,1,2 setting
// check input value
if (powerLevel > 0x1f) {
@@ -330,7 +341,7 @@ int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp)
dev_dbg(&spi->dev, "set: pa ramp");
#endif
- switch(paRamp) {
+ switch (paRamp) {
case ramp3400: return WRITE_REG(REG_PARAMP, PARAMP_3400);
case ramp2000: return WRITE_REG(REG_PARAMP, PARAMP_2000);
case ramp1000: return WRITE_REG(REG_PARAMP, PARAMP_1000);
@@ -359,9 +370,9 @@ int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance ant
dev_dbg(&spi->dev, "set: antenna impedance");
#endif
- switch(antennaImpedance) {
- case fiftyOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) & ~MASK_LNA_ZIN) );
- case twohundretOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) | MASK_LNA_ZIN) );
+ switch (antennaImpedance) {
+ case fiftyOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) & ~MASK_LNA_ZIN));
+ case twohundretOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) | MASK_LNA_ZIN));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -374,14 +385,14 @@ int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain)
dev_dbg(&spi->dev, "set: lna gain");
#endif
- switch(lnaGain) {
- case automatic: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_AUTO) );
- case max: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX) );
- case maxMinus6: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_6) );
- case maxMinus12: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_12) );
- case maxMinus24: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_24) );
- case maxMinus36: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_36) );
- case maxMinus48: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_48) );
+ switch (lnaGain) {
+ case automatic: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_AUTO));
+ case max: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX));
+ case maxMinus6: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_6));
+ case maxMinus12: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_12));
+ case maxMinus24: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_24));
+ case maxMinus36: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_36));
+ case maxMinus48: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_48));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -410,17 +421,17 @@ enum lnaGain rf69_get_lna_gain(struct spi_device *spi)
}
}
-int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi ,u8 reg, enum dccPercent dccPercent)
+int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi, u8 reg, enum dccPercent dccPercent)
{
switch (dccPercent) {
- case dcc16Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_16_PERCENT) );
- case dcc8Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_8_PERCENT) );
- case dcc4Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_4_PERCENT) );
- case dcc2Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_2_PERCENT) );
- case dcc1Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_1_PERCENT) );
- case dcc0_5Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_5_PERCENT) );
- case dcc0_25Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_25_PERCENT) );
- case dcc0_125Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_125_PERCENT) );
+ case dcc16Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_16_PERCENT));
+ case dcc8Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_8_PERCENT));
+ case dcc4Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_4_PERCENT));
+ case dcc2Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_2_PERCENT));
+ case dcc1Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_1_PERCENT));
+ case dcc0_5Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_5_PERCENT));
+ case dcc0_25Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_25_PERCENT));
+ case dcc0_125Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_125_PERCENT));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -470,10 +481,16 @@ static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
newValue = newValue & MASK_BW_DCC_FREQ;
// add new mantisse
- switch(mantisse) {
- case mantisse16: newValue = newValue | BW_MANT_16; break;
- case mantisse20: newValue = newValue | BW_MANT_20; break;
- case mantisse24: newValue = newValue | BW_MANT_24; break;
+ switch (mantisse) {
+ case mantisse16:
+ newValue = newValue | BW_MANT_16;
+ break;
+ case mantisse20:
+ newValue = newValue | BW_MANT_20;
+ break;
+ case mantisse24:
+ newValue = newValue | BW_MANT_24;
+ break;
}
// add new exponent
@@ -508,9 +525,9 @@ int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thres
#endif
switch (thresholdType) {
- case fixed: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_FIXED) );
- case peak: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_PEAK) );
- case average: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_AVERAGE) );
+ case fixed: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_FIXED));
+ case peak: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_PEAK));
+ case average: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_AVERAGE));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -524,14 +541,14 @@ int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thres
#endif
switch (thresholdStep) {
- case step_0_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_0_5_DB) );
- case step_1_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_0_DB) );
- case step_1_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_5_DB) );
- case step_2_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_2_0_DB) );
- case step_3_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_3_0_DB) );
- case step_4_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_4_0_DB) );
- case step_5_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_5_0_DB) );
- case step_6_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_6_0_DB) );
+ case step_0_5db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_0_5_DB));
+ case step_1_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_0_DB));
+ case step_1_5db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_5_DB));
+ case step_2_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_2_0_DB));
+ case step_3_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_3_0_DB));
+ case step_4_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_4_0_DB));
+ case step_5_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_5_0_DB));
+ case step_6_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_6_0_DB));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -545,14 +562,14 @@ int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement t
#endif
switch (thresholdDecrement) {
- case dec_every8th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_8TH) );
- case dec_every4th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_4TH) );
- case dec_every2nd: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_2ND) );
- case dec_once: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_ONCE) );
- case dec_twice: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_TWICE) );
- case dec_4times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_4_TIMES) );
- case dec_8times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_8_TIMES) );
- case dec_16times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_16_TIMES) );
+ case dec_every8th: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_8TH));
+ case dec_every4th: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_4TH));
+ case dec_every2nd: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_2ND));
+ case dec_once: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_ONCE));
+ case dec_twice: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_TWICE));
+ case dec_4times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_4_TIMES));
+ case dec_8times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_8_TIMES));
+ case dec_16times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_16_TIMES));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -571,25 +588,37 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
#endif
switch (DIONumber) {
- case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break;
- case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break;
- case 2: mask=MASK_DIO2; shift=SHIFT_DIO2; regaddr=REG_DIOMAPPING1; break;
- case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break;
- case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break;
- case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break;
+ case 0:
+ mask = MASK_DIO0; shift = SHIFT_DIO0; regaddr = REG_DIOMAPPING1;
+ break;
+ case 1:
+ mask = MASK_DIO1; shift = SHIFT_DIO1; regaddr = REG_DIOMAPPING1;
+ break;
+ case 2:
+ mask = MASK_DIO2; shift = SHIFT_DIO2; regaddr = REG_DIOMAPPING1;
+ break;
+ case 3:
+ mask = MASK_DIO3; shift = SHIFT_DIO3; regaddr = REG_DIOMAPPING1;
+ break;
+ case 4:
+ mask = MASK_DIO4; shift = SHIFT_DIO4; regaddr = REG_DIOMAPPING2;
+ break;
+ case 5:
+ mask = MASK_DIO5; shift = SHIFT_DIO5; regaddr = REG_DIOMAPPING2;
+ break;
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
// read reg
- regValue=READ_REG(regaddr);
+ regValue = READ_REG(regaddr);
// delete old value
regValue = regValue & ~mask;
// add new value
regValue = regValue | value << shift;
// write back
- return WRITE_REG(regaddr,regValue);
+ return WRITE_REG(regaddr, regValue);
}
bool rf69_get_flag(struct spi_device *spi, enum flag flag)
@@ -598,7 +627,7 @@ bool rf69_get_flag(struct spi_device *spi, enum flag flag)
dev_dbg(&spi->dev, "get: flag");
#endif
- switch(flag) {
+ switch (flag) {
case modeSwitchCompleted: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_MODE_READY);
case readyToReceive: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RX_READY);
case readyToSend: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TX_READY);
@@ -626,7 +655,7 @@ int rf69_reset_flag(struct spi_device *spi, enum flag flag)
dev_dbg(&spi->dev, "reset: flag");
#endif
- switch(flag) {
+ switch (flag) {
case rssiExceededThreshold: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_RSSI);
case syncAddressMatch: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
case fifoOverrun: return WRITE_REG(REG_IRQFLAGS2, MASK_IRQFLAGS2_FIFO_OVERRUN);
@@ -686,10 +715,9 @@ int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength)
/* transmit to chip */
retval = WRITE_REG(REG_PREAMBLE_MSB, msb);
- if (retval) return retval;
- retval = WRITE_REG(REG_PREAMBLE_LSB, lsb);
-
- return retval;
+ if (retval)
+ return retval;
+ return WRITE_REG(REG_PREAMBLE_LSB, lsb);
}
int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
@@ -698,9 +726,9 @@ int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: sync enable");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_SYNC_ON) );
- case optionOff: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_ON) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_SYNC_ON));
+ case optionOff: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_ON));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -713,9 +741,9 @@ int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition
dev_dbg(&spi->dev, "set: fifo fill condition");
#endif
- switch(fifoFillCondition) {
- case always: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) );
- case afterSyncInterrupt: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) );
+ switch (fifoFillCondition) {
+ case always: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_FIFO_FILL_CONDITION));
+ case afterSyncInterrupt: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_FIFO_FILL_CONDITION));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -735,7 +763,7 @@ int rf69_set_sync_size(struct spi_device *spi, u8 syncSize)
}
// write value
- return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | (syncSize << 3) );
+ return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | (syncSize << 3));
}
int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance)
@@ -780,9 +808,9 @@ int rf69_set_packet_format(struct spi_device *spi, enum packetFormat packetForma
dev_dbg(&spi->dev, "set: packet format");
#endif
- switch(packetFormat) {
- case packetLengthVar: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) );
- case packetLengthFix: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) );
+ switch (packetFormat) {
+ case packetLengthVar: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE));
+ case packetLengthFix: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -795,9 +823,9 @@ int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: crc enable");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_CRC_ON) );
- case optionOff: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_CRC_ON) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_CRC_ON));
+ case optionOff: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_CRC_ON));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -811,9 +839,9 @@ int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addre
#endif
switch (addressFiltering) {
- case filteringOff: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_OFF) );
- case nodeAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODE) );
- case nodeOrBroadcastAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST) );
+ case filteringOff: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_OFF));
+ case nodeAddress: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODE));
+ case nodeOrBroadcastAddress: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -862,9 +890,9 @@ int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition tx
dev_dbg(&spi->dev, "set: start condition");
#endif
- switch(txStartCondition) {
- case fifoLevel: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_TXSTART) );
- case fifoNotEmpty: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) | MASK_FIFO_THRESH_TXSTART) );
+ switch (txStartCondition) {
+ case fifoLevel: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_TXSTART));
+ case fifoNotEmpty: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) | MASK_FIFO_THRESH_TXSTART));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -891,7 +919,7 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
return retval;
// access the fifo to activate new threshold
- return rf69_read_fifo (spi, (u8*) &retval, 1); // retval used as buffer
+ return rf69_read_fifo(spi, (u8 *)&retval, 1); // retval used as buffer
}
int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
@@ -900,7 +928,7 @@ int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
dev_dbg(&spi->dev, "set: dagc");
#endif
- switch(dagc) {
+ switch (dagc) {
case normalMode: return WRITE_REG(REG_TESTDAGC, DAGC_NORMAL);
case improve: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA0);
case improve4LowModulationIndex: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA1);
@@ -931,14 +959,14 @@ int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size)
/* prepare a bidirectional transfer */
local_buffer[0] = REG_FIFO;
memset(&transfer, 0, sizeof(transfer));
- transfer.tx_buf = local_buffer;
- transfer.rx_buf = local_buffer;
+ transfer.tx_buf = local_buffer;
+ transfer.rx_buf = local_buffer;
transfer.len = size+1;
retval = spi_sync_transfer(spi, &transfer, 1);
#ifdef DEBUG_FIFO_ACCESS
- for (i=0; i<size; i++)
+ for (i = 0; i < size; i++)
dev_dbg(&spi->dev, "%d - 0x%x\n", i, local_buffer[i+1]);
#endif
@@ -966,8 +994,8 @@ int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
memcpy(&local_buffer[1], buffer, size); // TODO: ohne memcopy wäre schöner
#ifdef DEBUG_FIFO_ACCESS
- for (i=0; i<size; i++)
- dev_dbg(&spi->dev, "0x%x\n",buffer[i]);
+ for (i = 0; i < size; i++)
+ dev_dbg(&spi->dev, "0x%x\n", buffer[i]);
#endif
return spi_write (spi, local_buffer, size + 1);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 32a483769975..fa611455109a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -754,7 +754,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
}
/* setting only at first time */
- if (!(pmlmepriv->cur_network.join_res)) {
+ if (pmlmepriv->cur_network.join_res != true) {
/* WEP Key will be set before this function, do not
* clear CAM.
*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 9461bce883ea..be8542676adf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -333,7 +333,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
else
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
goto exit;
@@ -508,7 +508,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
- cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+ cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC);
if (!cmdobj) {
res = _FAIL;
kfree(param);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index b9bdff0490ca..2c4c8c43b1ad 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -48,7 +48,7 @@ void Efuse_PowerSwitch(
if (PwrState) {
usb_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
- /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid */
+ /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), default valid */
tmpV16 = usb_read16(pAdapter, REG_SYS_ISO_CTRL);
if (!(tmpV16 & PWC_EV12V)) {
tmpV16 |= PWC_EV12V;
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 1b9bc9817a57..c4335893d8f6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -22,9 +22,9 @@
/* Callback function of LED BlinkTimer, */
/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl */
/* */
-void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
struct adapter *padapter = pLed->padapter;
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
@@ -73,8 +73,7 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
ResetLedStatus(pLed);
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
- (unsigned long)pLed);
+ timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index f663e6c41f8a..1cd49e292804 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -106,10 +106,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
- rtw_free_mlme_priv_ie_data(pmlmepriv);
-
- if (pmlmepriv)
+ if (pmlmepriv) {
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
vfree(pmlmepriv->free_bss_buf);
+ }
}
struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
@@ -1135,7 +1135,7 @@ static u8 search_max_mac_id(struct adapter *padapter)
#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
+ for (aid = pstapriv->max_num_sta; aid > 0; aid--) {
if (pstapriv->sta_aid[aid-1])
break;
}
@@ -1143,7 +1143,7 @@ static u8 search_max_mac_id(struct adapter *padapter)
} else
#endif
{/* adhoc id = 31~2 */
- for (mac_id = (NUM_STA-1); mac_id >= IBSS_START_MAC_ID; mac_id--) {
+ for (mac_id = NUM_STA-1; mac_id >= IBSS_START_MAC_ID; mac_id--) {
if (pmlmeinfo->FW_sta_info[mac_id].status == 1)
break;
}
@@ -1329,12 +1329,13 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
}
/*
- * _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+ * _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
* @adapter: pointer to struct adapter structure
*/
-void _rtw_join_timeout_handler (unsigned long data)
+void _rtw_join_timeout_handler (struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.assoc_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
@@ -1373,9 +1374,10 @@ void _rtw_join_timeout_handler (unsigned long data)
* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
* @adapter: pointer to struct adapter structure
*/
-void rtw_scan_timeout_handler (unsigned long data)
+void rtw_scan_timeout_handler (struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
@@ -1400,9 +1402,10 @@ static void rtw_auto_scan_handler(struct adapter *padapter)
}
}
-void rtw_dynamic_check_timer_handlder(unsigned long data)
+void rtw_dynamic_check_timer_handlder(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
struct registry_priv *pregistrypriv = &adapter->registrypriv;
if (!adapter)
@@ -1569,7 +1572,7 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
pcmd->cmdcode = _SetAuth_CMD_;
pcmd->parmbuf = (unsigned char *)psetauthparm;
- pcmd->cmdsz = (sizeof(struct setauth_parm));
+ pcmd->cmdsz = sizeof(struct setauth_parm);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
@@ -1648,7 +1651,7 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
}
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
- pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->cmdsz = sizeof(struct setkey_parm);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
@@ -1814,45 +1817,45 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
- pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */
+ pdev_network->Privacy = psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0; /* adhoc no 802.1x */
pdev_network->Rssi = 0;
switch (pregistrypriv->wireless_mode) {
case WIRELESS_11B:
- pdev_network->NetworkTypeInUse = (Ndis802_11DS);
+ pdev_network->NetworkTypeInUse = Ndis802_11DS;
break;
case WIRELESS_11G:
case WIRELESS_11BG:
case WIRELESS_11_24N:
case WIRELESS_11G_24N:
case WIRELESS_11BG_24N:
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM24;
break;
case WIRELESS_11A:
case WIRELESS_11A_5N:
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM5;
break;
case WIRELESS_11ABGN:
if (pregistrypriv->channel > 14)
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM5;
else
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM24;
break;
default:
/* TODO */
break;
}
- pdev_network->Configuration.DSConfig = (pregistrypriv->channel);
+ pdev_network->Configuration.DSConfig = pregistrypriv->channel;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("pregistrypriv->channel=%d, pdev_network->Configuration.DSConfig=0x%x\n",
pregistrypriv->channel, pdev_network->Configuration.DSConfig));
if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
- pdev_network->Configuration.ATIMWindow = (0);
+ pdev_network->Configuration.ATIMWindow = 0;
- pdev_network->InfrastructureMode = (cur_network->network.InfrastructureMode);
+ pdev_network->InfrastructureMode = cur_network->network.InfrastructureMode;
/* 1. Supported rates */
/* 2. IE */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 611c9409bb98..d73e9bdc80cc 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -605,7 +605,9 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
dump_mgntframe(padapter, pmgntframe);
}
-static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, bool wait_ack)
+static int issue_probereq(struct adapter *padapter,
+ struct ndis_802_11_ssid *pssid, u8 *da,
+ bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -707,7 +709,7 @@ static int issue_probereq_ex(struct adapter *padapter,
unsigned long start = jiffies;
do {
- ret = issue_probereq(padapter, pssid, da, wait_ms > 0 ? true : false);
+ ret = issue_probereq(padapter, pssid, da, wait_ms > 0);
i++;
@@ -1196,7 +1198,8 @@ exit:
}
/* when wait_ack is true, this function should be called at process context */
-static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
+static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -1269,7 +1272,8 @@ exit:
/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
-int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
+int issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, int try_cnt, int wait_ms)
{
int ret;
int i = 0;
@@ -1283,7 +1287,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
da = pnetwork->MacAddress;
do {
- ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0 ? true : false);
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0);
i++;
@@ -1316,7 +1320,8 @@ exit:
}
/* when wait_ack is true, this function should be called at process context */
-static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
+static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -1396,7 +1401,8 @@ exit:
/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
-int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, int try_cnt, int wait_ms)
{
int ret;
int i = 0;
@@ -1410,7 +1416,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
da = pnetwork->MacAddress;
do {
- ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0 ? true : false);
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0);
i++;
@@ -1442,7 +1448,8 @@ exit:
return ret;
}
-static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
+static int _issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason, bool wait_ack)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
@@ -1502,7 +1509,8 @@ exit:
return ret;
}
-int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason)
+int issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason)
{
DBG_88E("%s to %pM\n", __func__, da);
return _issue_deauth(padapter, da, reason, false);
@@ -1517,7 +1525,7 @@ static int issue_deauth_ex(struct adapter *padapter, u8 *da,
unsigned long start = jiffies;
do {
- ret = _issue_deauth(padapter, da, reason, wait_ms > 0 ? true : false);
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
i++;
@@ -3413,7 +3421,7 @@ static unsigned int OnAssocRsp(struct adapter *padapter,
/* following are moved to join event callback function */
/* to handle HT, WMM, rate adaptive, update MAC reg */
/* for not to handle the synchronous IO in the tasklet */
- for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
+ for (i = 6 + WLAN_HDR_A3_LEN; i < pkt_len;) {
pIE = (struct ndis_802_11_var_ie *)(pframe + i);
switch (pIE->ElementID) {
@@ -3757,7 +3765,8 @@ static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
return ret;
}
-static unsigned int on_action_public_default(struct recv_frame *precv_frame, u8 action)
+static unsigned int on_action_public_default(struct recv_frame *precv_frame,
+ u8 action)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->pkt->data;
@@ -3972,9 +3981,10 @@ static int has_channel(struct rt_channel_info *channel_set,
return 0;
}
-static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
- u8 chanset_size,
- struct p2p_channels *channel_list)
+static void init_channel_list(struct adapter *padapter,
+ struct rt_channel_info *channel_set,
+ u8 chanset_size,
+ struct p2p_channels *channel_list)
{
struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
@@ -3999,7 +4009,7 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
continue;
if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
- ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
if (reg == NULL) {
@@ -4015,7 +4025,8 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
channel_list->reg_classes = cla;
}
-static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_channel_info *channel_set)
+static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan,
+ struct rt_channel_info *channel_set)
{
u8 index, chanset_size = 0;
u8 b2_4GBand = false;
@@ -4030,7 +4041,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
b2_4GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
else
Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -4040,14 +4051,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
- if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
- (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)) {
+ if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G)) {
if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
- RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {/* channel 12~13, passive scan */
+ } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+ Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) {/* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else
@@ -4105,7 +4116,9 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
}
}
-static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
+static void _mgt_dispatcher(struct adapter *padapter,
+ struct mlme_handler *ptable,
+ struct recv_frame *precv_frame)
{
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->pkt->data;
@@ -4351,7 +4364,8 @@ void report_join_res(struct adapter *padapter, int res)
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
-void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr,
+ unsigned short reason)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
@@ -4406,7 +4420,8 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
-void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
+void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr,
+ int cam_idx)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
@@ -4767,7 +4782,7 @@ void linked_status_chk(struct adapter *padapter)
if (pmlmeinfo->FW_sta_info[i].status == 1) {
psta = pmlmeinfo->FW_sta_info[i].psta;
- if (NULL == psta)
+ if (psta == NULL)
continue;
if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
if (pmlmeinfo->FW_sta_info[i].retry < 3) {
@@ -4788,9 +4803,10 @@ void linked_status_chk(struct adapter *padapter)
}
}
-void survey_timer_hdl(unsigned long data)
+void survey_timer_hdl(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)data;
+ struct adapter *padapter = from_timer(padapter, t,
+ mlmeextpriv.survey_timer);
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -4828,9 +4844,10 @@ exit_survey_timer_hdl:
return;
}
-void link_timer_hdl(unsigned long data)
+void link_timer_hdl(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)data;
+ struct adapter *padapter = from_timer(padapter, t,
+ mlmeextpriv.link_timer);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -4864,9 +4881,9 @@ void link_timer_hdl(unsigned long data)
}
}
-void addba_timer_hdl(unsigned long data)
+void addba_timer_hdl(struct timer_list *t)
{
- struct sta_info *psta = (struct sta_info *)data;
+ struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
struct ht_priv *phtpriv;
if (!psta)
@@ -5125,8 +5142,10 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
return H2C_SUCCESS;
}
-static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_channel *out,
- u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
+static int rtw_scan_ch_decision(struct adapter *padapter,
+ struct rtw_ieee80211_channel *out,
+ u32 out_num,
+ struct rtw_ieee80211_channel *in, u32 in_num)
{
int i, j;
int set_idx;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index f86c9cebf09a..ac27f9a023bc 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -37,7 +37,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
/* system suspend */
LeaveAllPowerSaveMode(padapter);
- DBG_88E("==> rtw_hw_suspend\n");
+ DBG_88E("==> %s\n", __func__);
mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
/* s1. */
@@ -89,7 +89,7 @@ static int rtw_hw_resume(struct adapter *padapter)
/* system resume */
- DBG_88E("==> rtw_hw_resume\n");
+ DBG_88E("==> %s\n", __func__);
mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
rtw_reset_drv_sw(padapter);
@@ -146,7 +146,7 @@ void ips_enter(struct adapter *padapter)
pwrpriv->ips_mode = pwrpriv->ips_mode_req;
pwrpriv->ips_enter_cnts++;
- DBG_88E("==>ips_enter cnts:%d\n", pwrpriv->ips_enter_cnts);
+ DBG_88E("==>%s:%d\n", __func__, pwrpriv->ips_enter_cnts);
if (rf_off == pwrpriv->change_rfpwrstate) {
pwrpriv->bpower_saving = true;
DBG_88E_LEVEL(_drv_info_, "nolinked power save enter\n");
@@ -177,7 +177,7 @@ int ips_leave(struct adapter *padapter)
pwrpriv->bips_processing = true;
pwrpriv->change_rfpwrstate = rf_on;
pwrpriv->ips_leave_cnts++;
- DBG_88E("==>ips_leave cnts:%d\n", pwrpriv->ips_leave_cnts);
+ DBG_88E("==>%s:%d\n", __func__, pwrpriv->ips_leave_cnts);
result = rtw_ips_pwr_up(padapter);
if (result == _SUCCESS)
@@ -198,7 +198,7 @@ int ips_leave(struct adapter *padapter)
}
}
- DBG_88E("==> ips_leave.....LED(0x%08x)...\n", usb_read32(padapter, 0x4c));
+ DBG_88E("==> %s.....LED(0x%08x)...\n", __func__, usb_read32(padapter, 0x4c));
pwrpriv->bips_processing = false;
pwrpriv->bkeepfwalive = false;
@@ -276,9 +276,11 @@ exit:
pwrpriv->ps_processing = false;
}
-static void pwr_state_check_handler(unsigned long data)
+static void pwr_state_check_handler(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)data;
+ struct adapter *padapter =
+ from_timer(padapter, t,
+ pwrctrlpriv.pwr_state_check_timer);
rtw_ps_cmd(padapter);
}
@@ -332,7 +334,7 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
rpwm = pslv | pwrpriv->tog;
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
- ("rtw_set_rpwm: rpwm=0x%02x cpwm=0x%02x\n", rpwm, pwrpriv->cpwm));
+ ("%s: rpwm=0x%02x cpwm=0x%02x\n", __func__, rpwm, pwrpriv->cpwm));
pwrpriv->rpwm = pslv;
@@ -525,7 +527,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
else
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -540,9 +542,8 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->btcoex_rfon = false;
- setup_timer(&pwrctrlpriv->pwr_state_check_timer,
- pwr_state_check_handler,
- (unsigned long)padapter);
+ timer_setup(&pwrctrlpriv->pwr_state_check_timer,
+ pwr_state_check_handler, 0);
}
/*
@@ -569,7 +570,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
DBG_88E("%s wait ps_processing...\n", __func__);
while (pwrpriv->ps_processing &&
jiffies_to_msecs(jiffies - start) <= 3000)
- usleep_range(1000, 3000);
+ udelay(1500);
if (pwrpriv->ps_processing)
DBG_88E("%s wait ps_processing timeout\n", __func__);
else
@@ -595,7 +596,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
}
if (rf_off == pwrpriv->rf_pwrstate) {
DBG_88E("%s call ips_leave....\n", __func__);
- if (_FAIL == ips_leave(padapter)) {
+ if (ips_leave(padapter) == _FAIL) {
DBG_88E("======> ips_leave fail.............\n");
ret = _FAIL;
goto exit;
@@ -628,12 +629,12 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
if (mode < PS_MODE_NUM) {
if (pwrctrlpriv->power_mgnt != mode) {
- if (PS_MODE_ACTIVE == mode)
+ if (mode == PS_MODE_ACTIVE)
LeaveAllPowerSaveMode(padapter);
else
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
}
} else {
ret = -EINVAL;
@@ -653,7 +654,7 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
} else if (mode == IPS_NONE) {
rtw_ips_mode_req(pwrctrlpriv, mode);
DBG_88E("%s %s\n", __func__, "IPS_NONE");
- if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+ if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL))
return -EFAULT;
} else {
return -EINVAL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 3fd5f4102b36..6506a1587df0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -39,7 +39,7 @@ static u8 rtw_rfc1042_header[] = {
0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
};
-static void rtw_signal_stat_timer_hdl(unsigned long data);
+static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
@@ -86,9 +86,8 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
}
res = rtw_hal_init_recv_priv(padapter);
- setup_timer(&precvpriv->signal_stat_timer,
- rtw_signal_stat_timer_hdl,
- (unsigned long)padapter);
+ timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl,
+ 0);
precvpriv->signal_stat_sampling_interval = 1000; /* ms */
@@ -193,7 +192,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfre
plist = phead->next;
while (phead != plist) {
- hdr = container_of(plist, struct recv_frame, list);
+ hdr = list_entry(plist, struct recv_frame, list);
plist = plist->next;
@@ -237,32 +236,40 @@ static int recvframe_chkmic(struct adapter *adapter,
stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
if (prxattrib->encrypt == _TKIP_) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:prxattrib->encrypt==_TKIP_\n"));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
/* calculate mic code */
if (stainfo != NULL) {
if (IS_MCAST(prxattrib->ra)) {
if (!psecuritypriv) {
res = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
- DBG_88E("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n %s: didn't install group key!!!!!!!!!!\n", __func__));
+ DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: bcmc key\n", __func__));
} else {
mickey = &stainfo->dot11tkiprxmickey.skey[0];
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic: unicast key\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n %s: unicast key\n", __func__));
}
/* icv_len included the mic code */
- datalen = precvframe->pkt->len-prxattrib->hdrlen - 8;
+ datalen = precvframe->pkt->len-prxattrib->hdrlen -
+ prxattrib->iv_len-prxattrib->icv_len-8;
pframe = precvframe->pkt->data;
- payload = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
(unsigned char)prxattrib->priority); /* care the length of the data */
@@ -273,8 +280,8 @@ static int recvframe_chkmic(struct adapter *adapter,
for (i = 0; i < 8; i++) {
if (miccode[i] != *(pframemic+i)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("recvframe_chkmic:miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
- i, miccode[i], i, *(pframemic+i)));
+ ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+ __func__, i, miccode[i], i, *(pframemic + i)));
bmic_err = true;
}
}
@@ -346,7 +353,8 @@ static int recvframe_chkmic(struct adapter *adapter,
}
}
} else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic: rtw_get_stainfo==NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
}
skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
@@ -407,15 +415,9 @@ static struct recv_frame *decryptor(struct adapter *padapter,
default:
break;
}
- if (res != _FAIL) {
- memmove(precv_frame->pkt->data + precv_frame->attrib.iv_len, precv_frame->pkt->data, precv_frame->attrib.hdrlen);
- skb_pull(precv_frame->pkt, precv_frame->attrib.iv_len);
- skb_trim(precv_frame->pkt, precv_frame->pkt->len - precv_frame->attrib.icv_len);
- }
} else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
- (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) {
- psecuritypriv->hw_decrypted = true;
- }
+ (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_))
+ psecuritypriv->hw_decrypted = true;
if (res == _FAIL) {
rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
@@ -456,7 +458,7 @@ static struct recv_frame *portctrl(struct adapter *adapter,
if (auth_alg == 2) {
/* get ether_type */
- ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
+ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len;
memcpy(&be_tmp, ptr, 2);
ether_type = ntohs(be_tmp);
@@ -943,7 +945,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
xmitframe_plist = xmitframe_phead->next;
if (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = list_entry(xmitframe_plist, struct xmit_frame, list);
xmitframe_plist = xmitframe_plist->next;
@@ -1011,7 +1013,8 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
precv_frame = recvframe_chk_defrag(padapter, precv_frame);
if (precv_frame == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("%s: fragment packet\n", __func__));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("%s: fragment packet\n", __func__));
return _SUCCESS;
}
@@ -1138,8 +1141,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
}
if (pattrib->privacy) {
- struct sk_buff *skb = precv_frame->pkt;
-
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy));
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra)));
@@ -1148,13 +1149,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt));
SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
-
- if (pattrib->bdecrypted == 1 && pattrib->encrypt > 0) {
- memmove(skb->data + pattrib->iv_len,
- skb->data, pattrib->hdrlen);
- skb_pull(skb, pattrib->iv_len);
- skb_trim(skb, skb->len - pattrib->icv_len);
- }
} else {
pattrib->encrypt = 0;
pattrib->iv_len = 0;
@@ -1274,7 +1268,6 @@ static int validate_recv_frame(struct adapter *adapter,
* Hence forward the frame to the monitor anyway to preserve the order
* in which frames were received.
*/
-
rtl88eu_mon_recv_hook(adapter->pmondev, precv_frame);
exit:
@@ -1296,8 +1289,11 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
u8 *ptr = precvframe->pkt->data;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen);
- psnap_type = ptr+pattrib->hdrlen + SNAP_SIZE;
+ if (pattrib->encrypt)
+ skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len);
+
+ psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
+ psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
(!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
@@ -1310,9 +1306,12 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
bsnaphdr = false;
}
- rmv_len = pattrib->hdrlen + (bsnaphdr ? SNAP_SIZE : 0);
+ rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
len = precvframe->pkt->len - rmv_len;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len));
+
memcpy(&be_tmp, ptr+rmv_len, 2);
eth_type = ntohs(be_tmp); /* pattrib->ether_type */
pattrib->eth_type = eth_type;
@@ -1337,6 +1336,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
+ u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame *pfhdr, *pnfhdr;
struct recv_frame *prframe, *pnextrframe;
@@ -1347,7 +1347,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
phead = get_list_head(defrag_q);
plist = phead->next;
- pfhdr = container_of(plist, struct recv_frame, list);
+ pfhdr = list_entry(plist, struct recv_frame, list);
prframe = pfhdr;
list_del_init(&(prframe->list));
@@ -1367,7 +1367,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
plist = plist->next;
while (phead != plist) {
- pnfhdr = container_of(plist, struct recv_frame, list);
+ pnfhdr = list_entry(plist, struct recv_frame, list);
pnextrframe = pnfhdr;
/* check the fragment sequence (2nd ~n fragment frame) */
@@ -1385,7 +1385,12 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
/* copy the 2nd~n fragment frame's payload to the first fragment */
/* get the 2nd~last fragment frame's payload */
- skb_pull(pnextrframe->pkt, pnfhdr->attrib.hdrlen);
+ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
+
+ skb_pull(pnextrframe->pkt, wlanhdr_offset);
+
+ /* append to first fragment frame's tail (if privacy frame, pull the ICV) */
+ skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len);
/* memcpy */
memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data,
@@ -1393,7 +1398,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
skb_put(prframe->pkt, pnfhdr->pkt->len);
- pfhdr->attrib.icv_len = 0;
+ pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = plist->next;
}
@@ -1519,6 +1524,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
nr_subframes = 0;
pattrib = &prframe->attrib;
+ skb_pull(prframe->pkt, prframe->attrib.hdrlen);
+
+ if (prframe->attrib.iv_len > 0)
+ skb_pull(prframe->pkt, prframe->attrib.iv_len);
+
a_len = prframe->pkt->len;
pdata = prframe->pkt->data;
@@ -1655,7 +1665,7 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
plist = phead->next;
while (phead != plist) {
- hdr = container_of(plist, struct recv_frame, list);
+ hdr = list_entry(plist, struct recv_frame, list);
pnextattrib = &hdr->attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
@@ -1690,7 +1700,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
if (list_empty(phead))
return true;
- prhdr = container_of(plist, struct recv_frame, list);
+ prhdr = list_entry(plist, struct recv_frame, list);
pattrib = &prhdr->attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
@@ -1698,7 +1708,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
/* Prepare indication list and indication. */
/* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
- prhdr = container_of(plist, struct recv_frame, list);
+ prhdr = list_entry(plist, struct recv_frame, list);
prframe = prhdr;
pattrib = &prframe->attrib;
@@ -1829,9 +1839,10 @@ _err_exit:
return _FAIL;
}
-void rtw_reordering_ctrl_timeout_handler(unsigned long data)
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
{
- struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)data;
+ struct recv_reorder_ctrl *preorder_ctrl = from_timer(preorder_ctrl, t,
+ reordering_ctrl_timer);
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -1887,6 +1898,24 @@ static int process_recv_indicatepkts(struct adapter *padapter,
return retval;
}
+static int recv_func_prehandle(struct adapter *padapter,
+ struct recv_frame *rframe)
+{
+ int ret = _SUCCESS;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* check the frame crtl field and decache */
+ ret = validate_recv_frame(padapter, rframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
+ rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
static int recv_func_posthandle(struct adapter *padapter,
struct recv_frame *prframe)
{
@@ -1939,7 +1968,6 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
struct rx_pkt_attrib *prxattrib = &rframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
- struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
@@ -1951,12 +1979,9 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
}
}
- /* check the frame crtl field and decache */
- ret = validate_recv_frame(padapter, rframe);
- if (ret != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
- rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
- } else {
+ ret = recv_func_prehandle(padapter, rframe);
+
+ if (ret == _SUCCESS) {
/* check if need to enqueue into uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
!IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
@@ -1999,9 +2024,10 @@ _recv_entry_drop:
return ret;
}
-static void rtw_signal_stat_timer_hdl(unsigned long data)
+static void rtw_signal_stat_timer_hdl(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, recvpriv.signal_stat_timer);
struct recv_priv *recvpriv = &adapter->recvpriv;
u32 tmp_s, tmp_q;
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index b283a4903369..5b1ef229df2a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -1690,4 +1690,4 @@ do { \
d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; \
-} while (0);
+} while (0)
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 22cf362b8528..2fd2a9e2416e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -239,8 +239,8 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
}
/* init for DM */
- psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
- psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
+ psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
+ psta->rssi_stat.UndecoratedSmoothedCCK = -1;
/* init for the sequence number of received management frame */
psta->RxMgmtFrameSeqNum = 0xffff;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index be2f46eb9f78..e8d9858f2942 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -93,7 +93,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
for (i = 0; i < NR_XMITFRAME; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -103,7 +103,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+ list_add_tail(&pxframe->list, &pxmitpriv->free_xmit_queue.queue);
pxframe++;
}
@@ -148,7 +148,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->flags = XMIT_VO_QUEUE;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmitbuf_queue.queue);
pxmitbuf++;
}
@@ -182,7 +182,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue);
pxmitbuf++;
}
@@ -258,8 +258,8 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
u32 sz;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct sta_info *psta = pattrib->psta;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pattrib->nr_frags != 1)
sz = padapter->xmitpriv.frag_len;
@@ -697,7 +697,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum=%d length=%d pattrib->icv_len=%d", curfragnum, length, pattrib->icv_len));
}
}
- rtw_secgetmic(&micdata, &(mic[0]));
+ rtw_secgetmic(&micdata, &mic[0]);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz=%d!!!\n", pattrib->last_txcmdsz));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]=0x%.2x , mic[1]=0x%.2x , mic[2]= 0x%.2x, mic[3]=0x%.2x\n\
@@ -705,18 +705,18 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
/* add mic code and add the mic code length in last_txcmdsz */
- memcpy(payload, &(mic[0]), 8);
+ memcpy(payload, &mic[0], 8);
pattrib->last_txcmdsz += 8;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ======== last pkt ========\n"));
payload = payload-pattrib->last_txcmdsz+8;
for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz; curfragnum = curfragnum+8)
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
- *(payload+curfragnum), *(payload+curfragnum+1),
- *(payload+curfragnum+2), *(payload+curfragnum+3),
- *(payload+curfragnum+4), *(payload+curfragnum+5),
- *(payload+curfragnum+6), *(payload+curfragnum+7)));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
+ *(payload + curfragnum), *(payload + curfragnum + 1),
+ *(payload + curfragnum + 2), *(payload + curfragnum + 3),
+ *(payload + curfragnum + 4), *(payload + curfragnum + 5),
+ *(payload + curfragnum + 6), *(payload + curfragnum + 7)));
} else {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo==NULL!!!\n"));
}
@@ -786,7 +786,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
- if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* to_ds = 1, fr_ds = 0; */
/* Data transfer to AP */
SetToDs(fctrl);
@@ -899,20 +899,20 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pat
switch (priority) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
break;
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
break;
}
@@ -1229,7 +1229,7 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
pxmitpriv->free_xmit_extbuf_cnt++;
spin_unlock_irqrestore(&pfree_queue->lock, irql);
@@ -1283,7 +1283,7 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
@@ -1395,7 +1395,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct xmit_frame *pxmitframe;
- spin_lock_bh(&(pframequeue->lock));
+ spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
plist = phead->next;
@@ -1407,7 +1407,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
- spin_unlock_bh(&(pframequeue->lock));
+ spin_unlock_bh(&pframequeue->lock);
}
@@ -1503,26 +1503,26 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
switch (up) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
*(ac) = 3;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
*(ac) = 1;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
*(ac) = 0;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
break;
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
*(ac) = 2;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
break;
@@ -1845,21 +1845,21 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
pstapriv->sta_dz_bitmap |= BIT(psta->aid);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
/* for BC/MC Frames */
pstaxmitpriv = &psta_bmc->sta_xmitpriv;
dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index ec8aae76bf40..001d6267b56e 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -382,7 +382,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
}
/* add by Neil Chen to avoid PSD is processing */
- if (pDM_Odm->bDMInitialGainEnable == false) {
+ if (!pDM_Odm->bDMInitialGainEnable) {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: PSD is Processing\n"));
return;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 0555e42a3787..5fcbe5639e99 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -109,7 +109,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
dm_odm->PhyDbgInfo.NumQryPhyStatusCCK++;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
cck_highpwr = dm_odm->bCckHighPower;
@@ -223,7 +223,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
}
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 3039bbe44a25..20253b5b6679 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -820,9 +820,8 @@ static void save_adda_registers(struct adapter *adapt, u32 *addareg,
{
u32 i;
- for (i = 0; i < register_num; i++) {
+ for (i = 0; i < register_num; i++)
backup[i] = phy_query_bb_reg(adapt, addareg[i], bMaskDWord);
- }
}
static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
@@ -830,9 +829,9 @@ static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
{
u32 i;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
backup[i] = usb_read8(adapt, mac_reg[i]);
- }
+
backup[i] = usb_read32(adapt, mac_reg[i]);
}
@@ -850,9 +849,9 @@ static void reload_mac_registers(struct adapter *adapt,
{
u32 i;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
usb_write8(adapt, mac_reg[i], (u8)backup[i]);
- }
+
usb_write32(adapt, mac_reg[i], backup[i]);
}
@@ -880,9 +879,9 @@ static void mac_setting_calibration(struct adapter *adapt, u32 *mac_reg, u32 *ba
usb_write8(adapt, mac_reg[i], 0x3F);
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(3))));
- }
+
usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(5))));
}
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 674ac5396d00..17967c944946 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -1745,6 +1745,7 @@ void rtw_hal_get_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
switch (variable) {
case HW_VAR_BASIC_RATE:
*((u16 *)(val)) = Adapter->HalData->BasicRateSet;
+ /* fall through */
case HW_VAR_TXPAUSE:
val[0] = usb_read8(Adapter, REG_TXPAUSE);
break;
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 550ad62e7064..4e5d7fc6de07 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -75,7 +75,8 @@ enum rf_radio_path {
#define CHANNEL_MAX_NUMBER 14 /* 14 is the max chnl number */
#define MAX_CHNL_GROUP_24G 6 /* ch1~2, ch3~5, ch6~8,
*ch9~11, ch12~13, CH 14
- * total three groups */
+ * total three groups
+ */
#define CHANNEL_GROUP_MAX_88E 6
enum wireless_mode {
@@ -116,35 +117,45 @@ struct bb_reg_def {
/* 0x80c~0x80f [4 bytes] */
u32 rfHSSIPara1; /* wire parameter control1 : */
/* 0x820~0x823,0x828~0x82b,
- * 0x830~0x833, 0x838~0x83b [16 bytes] */
+ * 0x830~0x833, 0x838~0x83b [16 bytes]
+ */
u32 rfHSSIPara2; /* wire parameter control2 : */
/* 0x824~0x827,0x82c~0x82f, 0x834~0x837,
- * 0x83c~0x83f [16 bytes] */
+ * 0x83c~0x83f [16 bytes]
+ */
u32 rfSwitchControl; /* Tx Rx antenna control : */
/* 0x858~0x85f [16 bytes] */
u32 rfAGCControl1; /* AGC parameter control1 : */
/* 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63,
- * 0xc68~0xc6b [16 bytes] */
+ * 0xc68~0xc6b [16 bytes]
+ */
u32 rfAGCControl2; /* AGC parameter control2 : */
/* 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67,
- * 0xc6c~0xc6f [16 bytes] */
+ * 0xc6c~0xc6f [16 bytes]
+ */
u32 rfRxIQImbalance; /* OFDM Rx IQ imbalance matrix : */
/* 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27,
- * 0xc2c~0xc2f [16 bytes] */
+ * 0xc2c~0xc2f [16 bytes]
+ */
u32 rfRxAFE; /* Rx IQ DC ofset and Rx digital filter,
- * Rx DC notch filter : */
+ * Rx DC notch filter :
+ */
/* 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23,
- * 0xc28~0xc2b [16 bytes] */
+ * 0xc28~0xc2b [16 bytes]
+ */
u32 rfTxIQImbalance; /* OFDM Tx IQ imbalance matrix */
/* 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93,
- * 0xc98~0xc9b [16 bytes] */
+ * 0xc98~0xc9b [16 bytes]
+ */
u32 rfTxAFE; /* Tx IQ DC Offset and Tx DFIR type */
/* 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97,
- * 0xc9c~0xc9f [16 bytes] */
+ * 0xc9c~0xc9f [16 bytes]
+ */
u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
/* 0x8a0~0x8af [16 bytes] */
u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode 0x8b8-8bc for
- * Path A and B */
+ * Path A and B
+ */
};
/*------------------------------Define structure----------------------------*/
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 04159a9f90d3..8cbba85e1587 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -136,7 +136,8 @@
#define rCCK0_CCA 0xa08
/* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold,
- * RX LNA Threshold useless now. Not the same as 90 series */
+ * RX LNA Threshold useless now. Not the same as 90 series
+ */
#define rCCK0_RxAGC1 0xa0c
#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
index 6722010ba1ec..96ebda93b4ee 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
@@ -1,20 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INC_RA_H
#define __INC_RA_H
-/*++
-Copyright (c) Realtek Semiconductor Corp. All rights reserved.
-
-Module Name:
- RateAdaptive.h
-
-Abstract:
- Prototype of RA and related data structure.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2011-08-12 Page Create.
---*/
+/*
+ * Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Module Name:
+ * RateAdaptive.h
+ *
+ * Abstract:
+ * Prototype of RA and related data structure.
+ *
+ * Major Change History:
+ * When Who What
+ * ---------- --------------- -------------------------------
+ * 2011-08-12 Page Create.
+ */
/* Rate adaptive define */
#define PERENTRY 23
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 69c4d49f43ab..73cc86705cf3 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -1,4 +1,4 @@
-/******************************************************************************
+ /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
@@ -20,14 +20,15 @@
/* port from fw */
/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
- * to check correctness */
+ * to check correctness
+ */
/*
* Call endian free function when
* 1. Read/write packet content.
* 2. Before write integer to IO.
* 3. After read integer from IO.
-*/
+ */
/* Convert little data endian to host ordering */
#define EF1BYTE(_val) \
@@ -74,9 +75,10 @@
#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
(EF1BYTE(*((u8 *)(__pstart))))
-/*Description:
-Translate subfield (continuous bits in little-endian) of 4-byte
-value to host byte ordering.*/
+/* Description:
+ * Translate subfield (continuous bits in little-endian) of 4-byte
+ * value to host byte ordering.
+ */
#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
( \
(LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index c3517c0903ca..2734565ce802 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -128,7 +128,8 @@ struct dvobj_priv {
static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
{
/* todo: get interface type from dvobj and the return
- * the dev accordingly */
+ * the dev accordingly
+ */
return &dvobj->pusbintf->dev;
};
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index dfdbd0254886..da4ee1561c36 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -96,9 +96,11 @@ enum hw_variables {
HW_VAR_FIFO_CLEARN_UP,
HW_VAR_CHECK_TXBUF,
HW_VAR_APFM_ON_MAC, /* Auto FSM to Turn On, include clock, isolation,
- * power control for MAC only */
+ * power control for MAC only
+ */
/* The valid upper nav range for the HW updating, if the true value is
- * larger than the upper range, the HW won't update it. */
+ * larger than the upper range, the HW won't update it.
+ */
/* Unit in microsecond. 0 means disable this function. */
HW_VAR_NAV_UPPER,
HW_VAR_RPT_TIMER_SETTING,
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index 284db7d00f50..9f480ccec531 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -202,9 +202,9 @@ enum NETWORK_TYPE {
#define IsSupportedTxCCK(NetType) \
((NetType) & (WIRELESS_11B) ? true : false)
#define IsSupportedTxOFDM(NetType) \
- ((NetType) & (WIRELESS_11G|WIRELESS_11A) ? true : false)
+ ((NetType) & (WIRELESS_11G | WIRELESS_11A) ? true : false)
#define IsSupportedTxMCS(NetType) \
- ((NetType) & (WIRELESS_11_24N|WIRELESS_11_5N) ? true : false)
+ ((NetType) & (WIRELESS_11_24N | WIRELESS_11_5N) ? true : false)
struct ieee_param {
@@ -276,12 +276,13 @@ struct sta_data {
#define IEEE80211_DATA_LEN 2304
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
- 6.2.1.1.2.
+ * 6.2.1.1.2.
- The figure in section 7.1.2 suggests a body size of up to 2312
- bytes is allowed, which is a bit confusing, I suspect this
- represents the 2304 bytes of real data, plus a possible 8 bytes of
- WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+ * The figure in section 7.1.2 suggests a body size of up to 2312
+ * bytes is allowed, which is a bit confusing, I suspect this
+ * represents the 2304 bytes of real data, plus a possible 8 bytes of
+ * WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro)
+ */
#define IEEE80211_HLEN 30
@@ -358,11 +359,11 @@ struct ieee80211_snap_hdr {
#define IEEE80211_DATA_HDR3_LEN 24
#define IEEE80211_DATA_HDR4_LEN 30
-#define IEEE80211_CCK_MODULATION (1<<0)
-#define IEEE80211_OFDM_MODULATION (1<<1)
+#define IEEE80211_CCK_MODULATION BIT(0)
+#define IEEE80211_OFDM_MODULATION BIT(1)
-#define IEEE80211_24GHZ_BAND (1<<0)
-#define IEEE80211_52GHZ_BAND (1<<1)
+#define IEEE80211_24GHZ_BAND BIT(0)
+#define IEEE80211_52GHZ_BAND BIT(1)
#define IEEE80211_CCK_RATE_LEN 4
#define IEEE80211_NUM_OFDM_RATESLEN 8
@@ -383,18 +384,18 @@ struct ieee80211_snap_hdr {
#define IEEE80211_OFDM_RATE_54MB 0x6C
#define IEEE80211_BASIC_RATE_MASK 0x80
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+#define IEEE80211_CCK_RATE_1MB_MASK BIT(0)
+#define IEEE80211_CCK_RATE_2MB_MASK BIT(1)
+#define IEEE80211_CCK_RATE_5MB_MASK BIT(2)
+#define IEEE80211_CCK_RATE_11MB_MASK BIT(3)
+#define IEEE80211_OFDM_RATE_6MB_MASK BIT(4)
+#define IEEE80211_OFDM_RATE_9MB_MASK BIT(5)
+#define IEEE80211_OFDM_RATE_12MB_MASK BIT(6)
+#define IEEE80211_OFDM_RATE_18MB_MASK BIT(7)
+#define IEEE80211_OFDM_RATE_24MB_MASK BIT(8)
+#define IEEE80211_OFDM_RATE_36MB_MASK BIT(9)
+#define IEEE80211_OFDM_RATE_48MB_MASK BIT(10)
+#define IEEE80211_OFDM_RATE_54MB_MASK BIT(11)
#define IEEE80211_CCK_RATES_MASK 0x0000000F
#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
@@ -423,18 +424,19 @@ struct ieee80211_snap_hdr {
/* IEEE 802.11 requires that STA supports concurrent reception of at least
* three fragmented frames. This define can be increased to support more
* concurrent frames, but it should be noted that each entry can consume about
- * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly.
+ */
#define IEEE80211_FRAG_CACHE_LEN 4
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
+#define SEC_KEY_1 BIT(0)
+#define SEC_KEY_2 BIT(1)
+#define SEC_KEY_3 BIT(2)
+#define SEC_KEY_4 BIT(3)
+#define SEC_ACTIVE_KEY BIT(4)
+#define SEC_AUTH_MODE BIT(5)
+#define SEC_UNICAST_GROUP BIT(6)
+#define SEC_LEVEL BIT(7)
+#define SEC_ENABLED BIT(8)
#define SEC_LEVEL_0 0 /* None */
#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
@@ -451,7 +453,8 @@ struct ieee80211_snap_hdr {
/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
* only use 8, and then use extended rates for the remaining supported
* rates. Other APs, however, stick all of their supported rates on the
- * main rates information element... */
+ * main rates information element...
+ */
#define MAX_RATES_LENGTH ((u8)12)
#define MAX_RATES_EX_LENGTH ((u8)16)
#define MAX_NETWORK_COUNT 128
@@ -467,17 +470,17 @@ struct ieee80211_snap_hdr {
#define MAX_P2P_IE_LEN (256)
#define MAX_WFD_IE_LEN (128)
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
+#define NETWORK_EMPTY_ESSID BIT(0)
+#define NETWORK_HAS_OFDM BIT(1)
+#define NETWORK_HAS_CCK BIT(2)
#define IW_ESSID_MAX_SIZE 32
/*
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-*/
+ * join_res:
+ * -1: authentication fail
+ * -2: association fail
+ * > 0: TID
+ */
enum ieee80211_state {
/* the card is not linked at all */
@@ -531,15 +534,15 @@ static inline int is_broadcast_mac_addr(const u8 *addr)
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff);
}
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+#define CFG_IEEE80211_RESERVE_FCS BIT(0)
+#define CFG_IEEE80211_COMPUTE_FCS BIT(1)
#define MAXTID 16
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+#define IEEE_A BIT(0)
+#define IEEE_B BIT(1)
+#define IEEE_G BIT(2)
+#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
/* Action category code */
enum rtw_ieee80211_category {
@@ -615,7 +618,8 @@ enum rtw_ieee80211_back_parties {
};
#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
- * 00:50:F2 */
+ * 00:50:F2
+ */
#define WME_OUI_TYPE 2
#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
@@ -655,12 +659,12 @@ enum rtw_ieee80211_back_parties {
* is not permitted.
*/
enum rtw_ieee80211_channel_flags {
- RTW_IEEE80211_CHAN_DISABLED = 1<<0,
- RTW_IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
- RTW_IEEE80211_CHAN_NO_IBSS = 1<<2,
- RTW_IEEE80211_CHAN_RADAR = 1<<3,
- RTW_IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
- RTW_IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+ RTW_IEEE80211_CHAN_DISABLED = BIT(0),
+ RTW_IEEE80211_CHAN_PASSIVE_SCAN = BIT(1),
+ RTW_IEEE80211_CHAN_NO_IBSS = BIT(2),
+ RTW_IEEE80211_CHAN_RADAR = BIT(3),
+ RTW_IEEE80211_CHAN_NO_HT40PLUS = BIT(4),
+ RTW_IEEE80211_CHAN_NO_HT40MINUS = BIT(5),
};
#define RTW_IEEE80211_CHAN_NO_HT40 \
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 4fb3bb07ceaa..95426b7c6dbf 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -36,7 +36,8 @@
/* Mainly, it just retains last scan result and scan again. */
/* After that, it compares the scan result to see which one gets better
* RSSI. It selects antenna with better receiving power and returns better
- * scan result. */
+ * scan result.
+ */
#define TP_MODE 0
#define RSSI_MODE 1
@@ -173,10 +174,12 @@ struct rx_hpc {
/* This indicates two different steps. */
/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to
- * the signal on the air. */
+ * the signal on the air.
+ */
/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
* SWAW_STEP_PEAK with original RSSI to determine if it is necessary to
- * switch antenna. */
+ * switch antenna.
+ */
#define SWAW_STEP_PEAK 0
#define SWAW_STEP_DETERMINE 1
@@ -265,7 +268,8 @@ struct odm_phy_status_info {
s8 RxPower; /* in dBm Translate from PWdB */
s8 RecvSignalPower;/* Real power in dBm for this packet, no
* beautification and aggregation. Keep this raw
- * info to be used for the other procedures. */
+ * info to be used for the other procedures.
+ */
u8 BTRxRSSIPercentage;
u8 SignalStrength; /* in 0-100 index. */
u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
@@ -478,7 +482,7 @@ enum odm_operation_mode {
/* ODM_CMNINFO_WM_MODE */
enum odm_wireless_mode {
- ODM_WM_UNKNOW = 0x0,
+ ODM_WM_UNKNOWN = 0x0,
ODM_WM_B = BIT(0),
ODM_WM_G = BIT(1),
ODM_WM_A = BIT(2),
@@ -509,7 +513,7 @@ enum odm_security {
ODM_SEC_RESERVE = 3,
ODM_SEC_AESCCMP = 4,
ODM_SEC_WEP104 = 5,
- ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
+ ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
ODM_SEC_SMS4 = 7,
};
@@ -567,7 +571,8 @@ struct odm_ra_info {
u8 PTPreRssi; /* if RSSI change 5% do PT */
u8 PTModeSS; /* decide whitch rate should do PT */
u8 RAstage; /* StageRA, decide how many times RA will be done
- * between PT */
+ * between PT
+ */
u8 PTSmoothFactor;
};
@@ -587,12 +592,14 @@ struct odm_rf_cal {
u8 TXPowercount;
bool bTXPowerTracking;
u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
- * as default */
+ * as default
+ */
u8 TM_Trigger;
u8 InternalPA5G[2]; /* pathA / pathB */
u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0,
- * and 1 for RFIC1 */
+ * and 1 for RFIC1
+ */
u8 ThermalValue;
u8 ThermalValue_LCK;
u8 ThermalValue_IQK;
@@ -688,7 +695,7 @@ enum ant_div_type {
/* Copy from SD4 defined structure. We use to support PHY DM integration. */
struct odm_dm_struct {
- /* Add for different team use temporarily */
+ /* Add for different team use temporarily */
struct adapter *Adapter; /* For CE/NIC team */
struct rtl8192cd_priv *priv; /* For AP/ADSL team */
/* WHen you use above pointers, they must be initialized. */
@@ -714,7 +721,8 @@ struct odm_dm_struct {
/* ODM PCIE/USB/SDIO/GSPI = 0/1/2/3 */
u8 SupportInterface;
/* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any
- * other type = 1/2/3/... */
+ * other type = 1/2/3/...
+ */
u32 SupportICType;
/* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
u8 CutVersion;
@@ -788,19 +796,21 @@ struct odm_dm_struct {
bool bBtHsOperation; /* BT HS mode is under progress */
u8 btHsDigVal; /* use BT rssi to decide the DIG value */
bool bBtDisableEdcaTurbo;/* Under some condition, don't enable the
- * EDCA Turbo */
+ * EDCA Turbo
+ */
bool bBtBusy; /* BT is busy. */
/* CALL BY VALUE------------- */
/* 2 Define STA info. */
/* _ODM_STA_INFO */
- /* For MP, we need to reduce one array pointer for default port.?? */
+ /* For MP, we need to reduce one array pointer for default port.??*/
struct sta_info *pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
u16 CurrminRptTime;
struct odm_ra_info RAInfo[ODM_ASSOCIATE_ENTRY_NUM]; /* Use MacID as
- * array index. STA MacID=0,
- * VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1} */
+ * array index. STA MacID=0,
+ * VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1}
+ */
/* */
/* 2012/02/14 MH Add to share 88E ra with other SW team. */
/* We need to colelct all support abilit to a proper area. */
@@ -1029,9 +1039,11 @@ extern u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8];
/* 20100514 Joseph: Add definition for antenna switching test after link. */
/* This indicates two different the steps. */
/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the
- * signal on the air. */
+ * signal on the air.
+ */
/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
- * SWAW_STEP_PEAK */
+ * SWAW_STEP_PEAK
+ */
/* with original RSSI to determine if it is necessary to switch antenna. */
#define SWAW_STEP_PEAK 0
#define SWAW_STEP_DETERMINE 1
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 97d3d8504184..f1fb3d511a45 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -26,7 +26,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter);
u8 rtw_free_drv_sw(struct adapter *padapter);
u8 rtw_reset_drv_sw(struct adapter *padapter);
-void rtw_stop_drv_threads (struct adapter *padapter);
+void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index addf90b60ce9..bd77a50c0d41 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -20,28 +20,28 @@
#include "pwrseqcmd.h"
/*
- Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
- There are 6 HW Power States:
- 0: POFF--Power Off
- 1: PDN--Power Down
- 2: CARDEMU--Card Emulation
- 3: ACT--Active Mode
- 4: LPS--Low Power State
- 5: SUS--Suspend
-
- The transition from different states are defined below
- TRANS_CARDEMU_TO_ACT
- TRANS_ACT_TO_CARDEMU
- TRANS_CARDEMU_TO_SUS
- TRANS_SUS_TO_CARDEMU
- TRANS_CARDEMU_TO_PDN
- TRANS_ACT_TO_LPS
- TRANS_LPS_TO_ACT
-
- TRANS_END
-
- PWR SEQ Version: rtl8188E_PwrSeq_V09.h
-*/
+ * Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
+ * There are 6 HW Power States:
+ * 0: POFF--Power Off
+ * 1: PDN--Power Down
+ * 2: CARDEMU--Card Emulation
+ * 3: ACT--Active Mode
+ * 4: LPS--Low Power State
+ * 5: SUS--Suspend
+ *
+ * The transition from different states are defined below
+ * TRANS_CARDEMU_TO_ACT
+ * TRANS_ACT_TO_CARDEMU
+ * TRANS_CARDEMU_TO_SUS
+ * TRANS_SUS_TO_CARDEMU
+ * TRANS_CARDEMU_TO_PDN
+ * TRANS_ACT_TO_LPS
+ * TRANS_LPS_TO_ACT
+ *
+ * TRANS_END
+ *
+ * PWR SEQ Version: rtl8188E_PwrSeq_V09.h
+ */
#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10
#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10
#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 9330361da4ad..b4b5e217105a 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -42,7 +42,7 @@
#define RTL8188E_PHY_REG_PG "rtl8188E\\PHY_REG_PG.txt"
#define RTL8188E_PHY_REG_MP "rtl8188E\\PHY_REG_MP.txt"
-/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
+/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188E_radio_off_flow
#define Rtl8188E_NIC_DISABLE_FLOW rtl8188E_card_disable_flow
@@ -81,7 +81,8 @@ enum usb_rx_agg_mode {
#define MAX_RX_DMA_BUFFER_SIZE_88E \
0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
- * WOLPattern(16*24)) */
+ * WOLPattern(16*24))
+ */
#define MAX_TX_REPORT_BUFFER_SIZE 0x0400 /* 1k */
@@ -94,11 +95,13 @@ enum usb_rx_agg_mode {
#define TX_SELE_NQ BIT(2) /* Normal Queue */
/* Note: We will divide number of page equally for each queue other
- * than public queue! */
+ * than public queue!
+ */
/* 22k = 22528 bytes = 176 pages (@page = 128 bytes) */
/* must reserved about 7 pages for LPS => 176-7 = 169 (0xA9) */
/* 2*BCN / 1*ps-poll / 1*null-data /1*prob_rsp /1*QOS null-data /1*BT QOS
- * null-data */
+ * null-data
+ */
#define TX_TOTAL_PAGE_NUMBER_88E 0xA9/* 169 (21632=> 21k) */
@@ -110,7 +113,7 @@ enum usb_rx_agg_mode {
#define WMM_NORMAL_TX_PAGE_BOUNDARY_88E \
(WMM_NORMAL_TX_TOTAL_PAGE_NUMBER + 1) /* 0xA9 */
-/* Chip specific */
+/* Chip specific */
#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
#define CHIP_BONDING_92C_1T2R 0x1
#define CHIP_BONDING_88C_USB_MCARD 0x2
@@ -118,7 +121,7 @@ enum usb_rx_agg_mode {
#include "HalVerDef.h"
#include "hal_com.h"
-/* Channel Plan */
+/* Channel Plan */
enum ChannelPlan {
CHPL_FCC = 0,
CHPL_IC = 1,
@@ -168,7 +171,8 @@ struct txpowerinfo24g {
#define AVAILABLE_EFUSE_ADDR_88E(addr) \
(addr < EFUSE_REAL_CONTENT_LEN_88E)
/* To prevent out of boundary programming case, leave 1byte and program
- * full section */
+ * full section
+ */
/* 9bytes + 1byt + 5bytes and pre 1byte. */
/* For worst case: */
/* | 2byte|----8bytes----|1byte|--7bytes--| 92D */
@@ -176,7 +180,7 @@ struct txpowerinfo24g {
#define EFUSE_OOB_PROTECT_BYTES_88E 18
#define EFUSE_PROTECT_BYTES_BANK_88E 16
-/* EFUSE for BT definition */
+/* EFUSE for BT definition */
#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
@@ -293,7 +297,8 @@ struct hal_data_8188e {
u8 bDumpRxPkt;/* for debug */
u8 bDumpTxPkt;/* for debug */
u8 FwRsvdPageStartOffset; /* Reserve page start offset except
- * beacon in TxQ. */
+ * beacon in TxQ.
+ */
/* 2010/08/09 MH Add CU power down mode. */
bool pwrdown;
@@ -307,7 +312,8 @@ struct hal_data_8188e {
u16 EfuseUsedBytes;
/* Auto FSM to Turn On, include clock, isolation, power control
- * for MAC only */
+ * for MAC only
+ */
u8 bMacPwrCtrlOn;
u32 UsbBulkOutSize;
@@ -324,7 +330,8 @@ struct hal_data_8188e {
enum usb_rx_agg_mode UsbRxAggMode;
u8 UsbRxAggBlockCount; /* USB Block count. Block size is
* 512-byte in high speed and 64-byte
- * in full speed */
+ * in full speed
+ */
u8 UsbRxAggBlockTimeout;
u8 UsbRxAggPageCount; /* 8192C DMA page count */
u8 UsbRxAggPageTimeout;
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index c93e19d1c50f..71e2b817e20a 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -15,7 +15,7 @@
#ifndef __RTL8188E_SPEC_H__
#define __RTL8188E_SPEC_H__
-/* 8192C Regsiter offset definition */
+/* 8192C Register offset definition */
#define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */
#define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */
@@ -62,12 +62,15 @@
#define REG_HSIMR 0x0058
#define REG_HSISR 0x005c
#define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Pin Control. */
+ * Multi-Function GPIO Pin Control.
+ */
#define REG_GPIO_IO_SEL_2 0x0062 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Select. */
+ * Multi-Function GPIO Select.
+ */
#define REG_BB_PAD_CTRL 0x0064
#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS
- * Multi-Function control source. */
+ * Multi-Function control source.
+ */
#define REG_GPIO_OUTPUT 0x006c
#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
@@ -87,7 +90,8 @@
#define REG_HIMRE_88E 0x00B8
#define REG_HISRE_88E 0x00BC
#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection
- * for RTL8723 */
+ * for RTL8723
+ */
#define REG_BIST_SCAN 0x00D0
#define REG_BIST_RPT 0x00D4
#define REG_BIST_ROM_RPT 0x00D8
@@ -119,9 +123,9 @@
#define REG_FWISR 0x0134
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_PKTBUF_DBG_ADDR (REG_PKTBUF_DBG_CTRL)
-#define REG_RXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+2)
-#define REG_TXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+3)
-#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
+#define REG_RXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL + 2)
+#define REG_TXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL + 3)
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL + 2)
#define REG_PKTBUF_DBG_DATA_L 0x0144
#define REG_PKTBUF_DBG_DATA_H 0x0148
@@ -252,21 +256,24 @@
#define REG_TXPAUSE 0x0522
#define REG_DIS_TXREQ_CLR 0x0523
#define REG_RD_CTRL 0x0524
-/* Format for offset 540h-542h: */
-/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
- * beacon content before TBTT. */
-/* [7:4]: Reserved. */
-/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
- * to send the beacon packet. */
-/* [23:20]: Reserved */
-/* Description: */
-/* | */
-/* |<--Setup--|--Hold------------>| */
-/* --------------|---------------------- */
-/* | */
-/* TBTT */
-/* Note: We cannot update beacon content to HW or send any AC packets during
- * the time between Setup and Hold. */
+/* Format for offset 540h-542h:
+ * [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
+ * beacon content before TBTT.
+ *
+ * [7:4]: Reserved.
+ * [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
+ * to send the beacon packet.
+ *
+ * [23:20]: Reserved
+ * Description:
+ * |
+ * |<--Setup--|--Hold------------>|
+ * --------------|----------------------
+ * |
+ * TBTT
+ * Note: We cannot update beacon content to HW or send any AC packets during
+ * the time between Setup and Hold.
+ */
#define REG_TBTT_PROHIBIT 0x0540
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
@@ -340,13 +347,14 @@
#define RXERR_RPT_RST BIT(27)
#define _RXERR_RPT_SEL(type) ((type) << 28)
-/* Note: */
-/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
+/* Note:
+ * The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
* The default value is always too small, but the WiFi TestPlan test
* by 25,000 microseconds of NAV through sending CTS in the air.
* We must update this value greater than 25,000 microseconds to pass
* the item. The offset of NAV_UPPER in 8192C Spec is incorrect, and
- * the offset should be 0x0652. */
+ * the offset should be 0x0652.
+ */
#define REG_NAV_UPPER 0x0652 /* unit of 128 */
/* WMA, BA, CCX */
@@ -453,11 +461,12 @@
/* GPIO pins input value */
#define GPIO_IN REG_GPIO_PIN_CTRL
/* GPIO pins output value */
-#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1)
/* GPIO pins output enable when a bit is set to "1"; otherwise,
- * input is configured. */
-#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
-#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+ * input is configured.
+ */
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3)
/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
#define HSIMR_GPIO12_0_INT_EN BIT(0)
@@ -475,13 +484,13 @@
/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
/*
-Network Type
-00: No link
-01: Link in ad hoc network
-10: Link in infrastructure network
-11: AP mode
-Default: 00b.
-*/
+ * Network Type
+ * 00: No link
+ * 01: Link in ad hoc network
+ * 10: Link in infrastructure network
+ * 11: AP mode
+ * Default: 00b.
+ */
#define MSR_NOLINK 0x00
#define MSR_ADHOC 0x01
#define MSR_INFRA 0x02
@@ -635,26 +644,27 @@ So the following defines for 92C is not entire!!!!!!
=====================================================================
=====================================================================*/
/*
-Based on Datasheet V33---090401
-Register Summary
-Current IOREG MAP
-0x0000h ~ 0x00FFh System Configuration (256 Bytes)
-0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
-0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
-0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
-0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
-0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
-0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
-0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
-0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
-*/
+ * Based on Datasheet V33---090401
+ * Register Summary
+ * Current IOREG MAP
+ * 0x0000h ~ 0x00FFh System Configuration (256 Bytes)
+ * 0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
+ * 0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
+ * 0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
+ * 0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
+ * 0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
+ * 0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
+ * 0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
+ * 0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
+ */
/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
-/* Note: */
-/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
- * RTL8192S/RTL8192C are wrong, */
-/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
- * and BK - Bit3. */
-/* 8723 and 88E may be not correct either in the earlier version. */
+/* Note:
+ * The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
+ * RTL8192S/RTL8192C are wrong,
+ * the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
+ * and BK - Bit3.
+ * 8723 and 88E may be not correct either in the earlier version.
+ */
#define StopBecon BIT(6)
#define StopHigh BIT(5)
#define StopMgt BIT(4)
@@ -680,7 +690,8 @@ Current IOREG MAP
#define RCR_AICV BIT(9) /* Accept ICV error packet */
#define RCR_ACRC32 BIT(8) /* Accept CRC32 error packet */
#define RCR_CBSSID_BCN BIT(7) /* Accept BSSID match packet
- * (Rx beacon, probe rsp) */
+ * (Rx beacon, probe rsp)
+ */
#define RCR_CBSSID_DATA BIT(6) /* Accept BSSID match (Data)*/
#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match */
#define RCR_APWRMGT BIT(5) /* Accept power management pkt*/
@@ -701,7 +712,7 @@ Current IOREG MAP
#define REG_USB_HRPWM 0xFE58
#define REG_USB_HCPWM 0xFE57
-/* 8192C Regsiter Bit and Content definition */
+/* 8192C Register Bit and Content definition */
/* 0x0000h ~ 0x00FFh System Configuration */
/* 2 SYS_ISO_CTRL */
@@ -798,7 +809,7 @@ Current IOREG MAP
/* 2 EFUSE_TEST (For RTL8723 partially) */
#define EF_TRPT BIT(7)
/* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
-#define EF_CELL_SEL (BIT(8)|BIT(9))
+#define EF_CELL_SEL (BIT(8) | BIT(9))
#define LDOE25_EN BIT(31)
#define EFUSE_SEL(x) (((x) & 0x3) << 8)
#define EFUSE_SEL_MASK 0x300
@@ -835,7 +846,7 @@ Current IOREG MAP
#define BD_MAC2 BIT(9)
#define BD_MAC1 BIT(10)
#define IC_MACPHY_MODE BIT(11)
-#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define CHIP_VER (BIT(12) | BIT(13) | BIT(14) | BIT(15))
#define BT_FUNC BIT(16)
#define VENDOR_ID BIT(19)
#define PAD_HWPD_IDN BIT(22)
@@ -849,9 +860,9 @@ Current IOREG MAP
#define CHIP_VER_RTL_SHIFT 12
/* 2REG_GPIO_OUTSTS (For RTL8723 only) */
-#define EFS_HCI_SEL (BIT(0)|BIT(1))
-#define PAD_HCI_SEL (BIT(2)|BIT(3))
-#define HCI_SEL (BIT(4)|BIT(5))
+#define EFS_HCI_SEL (BIT(0) | BIT(1))
+#define PAD_HCI_SEL (BIT(2) | BIT(3))
+#define HCI_SEL (BIT(4) | BIT(5))
#define PKG_SEL_HCI BIT(6)
#define FEN_GPS BIT(7)
#define FEN_BT BIT(8)
@@ -868,7 +879,7 @@ Current IOREG MAP
#define UPHY_SUSB BIT(21)
#define PCI_SUSEN BIT(22)
#define USB_SUSEN BIT(23)
-#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
/* 2SYS_CFG */
#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
@@ -931,12 +942,12 @@ Current IOREG MAP
#define HQSEL_HIQ BIT(5)
/* For normal driver, 0x10C */
-#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
-#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
-#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
-#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
-#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
-#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
+#define _TXDMA_HIQ_MAP(x) (((x) & 0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x) & 0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x) & 0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x) & 0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x) & 0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x) & 0x3) << 4)
#define QUEUE_LOW 1
#define QUEUE_NORMAL 2
@@ -1242,10 +1253,12 @@ Current IOREG MAP
/* 2REG_C2HEVT_CLEAR */
/* Set by driver and notify FW that the driver has read
- * the C2H command message */
+ * the C2H command message
+ */
#define C2H_EVT_HOST_CLOSE 0x00
/* Set by FW indicating that FW had set the C2H command
- * message and it's not yet read by driver. */
+ * message and it's not yet read by driver.
+ */
#define C2H_EVT_FW_CLOSE 0xFF
/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index f79feeb4e38f..2c026bf6fecb 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -97,13 +97,13 @@ enum RFINTFS {
};
/*
-Caller Mode: Infra, Ad-HoC(C)
-
-Notes: To disconnect the current associated BSS
-
-Command Mode
-
-*/
+ * Caller Mode: Infra, Ad-HoC(C)
+ *
+ * Notes: To disconnect the current associated BSS
+ *
+ * Command Mode
+ *
+ */
struct disconnect_parm {
u32 deauth_timeout_ms;
};
@@ -114,13 +114,13 @@ struct setopmode_parm {
};
/*
-Caller Mode: AP, Ad-HoC, Infra
-
-Notes: To ask RTL8711 performing site-survey
-
-Command-Event Mode
-
-*/
+ * Caller Mode: AP, Ad-HoC, Infra
+ *
+ * Notes: To ask RTL8711 performing site-survey
+ *
+ * Command-Event Mode
+ *
+ */
#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
@@ -133,13 +133,13 @@ struct sitesurvey_parm {
};
/*
-Caller Mode: Any
-
-Notes: To set the auth type of RTL8711. open/shared/802.1x
-
-Command Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To set the auth type of RTL8711. open/shared/802.1x
+ *
+ * Command Mode
+ *
+ */
struct setauth_parm {
u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
u8 _1x; /* 0: PSK, 1: TLS */
@@ -147,40 +147,42 @@ struct setauth_parm {
};
/*
-Caller Mode: Infra
-
-a. algorithm: wep40, wep104, tkip & aes
-b. keytype: grp key/unicast key
-c. key contents
-
-when shared key ==> keyid is the camid
-when 802.1x ==> keyid [0:1] ==> grp key
-when 802.1x ==> keyid > 2 ==> unicast key
-
-*/
+ * Caller Mode: Infra
+ *
+ * a. algorithm: wep40, wep104, tkip & aes
+ * b. keytype: grp key/unicast key
+ * c. key contents
+ *
+ * when shared key ==> keyid is the camid
+ * when 802.1x ==> keyid [0:1] ==> grp key
+ * when 802.1x ==> keyid > 2 ==> unicast key
+ *
+ */
struct setkey_parm {
u8 algorithm; /* could be none, wep40, TKIP, CCMP, wep104 */
u8 keyid;
u8 grpkey; /* 1: this is the grpkey for 802.1x.
- * 0: this is the unicast key for 802.1x */
+ * 0: this is the unicast key for 802.1x
+ */
u8 set_tx; /* 1: main tx key for wep. 0: other key. */
u8 key[16]; /* this could be 40 or 104 */
};
/*
-When in AP or Ad-Hoc mode, this is used to
-allocate an sw/hw entry for a newly associated sta.
-
-Command
-
-when shared key ==> algorithm/keyid
-
-*/
+ * When in AP or Ad-Hoc mode, this is used to
+ * allocate an sw/hw entry for a newly associated sta.
+ *
+ * Command
+ *
+ * when shared key ==> algorithm/keyid
+ *
+ */
struct set_stakey_parm {
u8 addr[ETH_ALEN];
u8 algorithm;
u8 id;/* currently for erasing cam entry if
- * algorithm == _NO_PRIVACY_ */
+ * algorithm == _NO_PRIVACY_
+ */
u8 key[16];
};
@@ -191,15 +193,15 @@ struct set_stakey_rsp {
};
/*
-Caller Ad-Hoc/AP
-
-Command -Rsp(AID == CAMID) mode
-
-This is to force fw to add an sta_data entry per driver's request.
-
-FW will write an cam entry associated with it.
-
-*/
+ * Caller Ad-Hoc/AP
+ *
+ * Command -Rsp(AID == CAMID) mode
+ *
+ * This is to force fw to add an sta_data entry per driver's request.
+ *
+ * FW will write an cam entry associated with it.
+ *
+ */
struct set_assocsta_parm {
u8 addr[ETH_ALEN];
};
@@ -210,55 +212,57 @@ struct set_assocsta_rsp {
};
/*
- Notes: This command is used for H2C/C2H loopback testing
-
- mac[0] == 0
- ==> CMD mode, return H2C_SUCCESS.
- The following condition must be true under CMD mode
- mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
- s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
- s2 == (b1 << 8 | b0);
-
- mac[0] == 1
- ==> CMD_RSP mode, return H2C_SUCCESS_RSP
-
- The rsp layout shall be:
- rsp: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = mac[3];
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = s1;
- s1 = swap16(s0);
- w0 = swap32(w1);
- b0 = b1
- s2 = s0 + s1
- b1 = b0
- w1 = w0
-
- mac[0] == 2
- ==> CMD_EVENT mode, return H2C_SUCCESS
- The event layout shall be:
- event: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = event's seq no, starting from 1 to parm's marc[3]
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = swap16(s0) - event.mac[2];
- s1 = s1 + event.mac[2];
- w0 = swap32(w0);
- b0 = b1
- s2 = s0 + event.mac[2]
- b1 = b0
- w1 = swap32(w1) - event.mac[2];
-
- parm->mac[3] is the total event counts that host requested.
- event will be the same with the cmd's param.
-*/
+ * Notes: This command is used for H2C/C2H loopback testing
+ *
+ * mac[0] == 0
+ * ==> CMD mode, return H2C_SUCCESS.
+ * The following condition must be true under CMD mode
+ * mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
+ * s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
+ * s2 == (b1 << 8 | b0);
+ *
+ * mac[0] == 1
+ * ==> CMD_RSP mode, return H2C_SUCCESS_RSP
+ *
+ * The rsp layout shall be:
+ * rsp: parm:
+ * mac[0] = mac[5];
+ * mac[1] = mac[4];
+ * mac[2] = mac[3];
+ * mac[3] = mac[2];
+ * mac[4] = mac[1];
+ * mac[5] = mac[0];
+ * s0 = s1;
+ * s1 = swap16(s0);
+ * w0 = swap32(w1);
+ * b0 = b1
+ * s2 = s0 + s1
+ * b1 = b0
+ * w1 = w0
+ *
+ * mac[0] == 2
+ * ==> CMD_EVENT mode, return H2C_SUCCESS
+ * The event layout shall be:
+ * event: parm:
+ * mac[0] = mac[5];
+ * mac[1] = mac[4];
+ * mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ * mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ * mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ * mac[3] = mac[2];
+ * mac[4] = mac[1];
+ * mac[5] = mac[0];
+ * s0 = swap16(s0) - event.mac[2];
+ * s1 = s1 + event.mac[2];
+ * w0 = swap32(w0);
+ * b0 = b1
+ * s2 = s0 + event.mac[2]
+ * b1 = b0
+ * w1 = swap32(w1) - event.mac[2];
+ *
+ * parm->mac[3] is the total event counts that host requested.
+ * event will be the same with the cmd's param.
+ */
/* CMD param Format for driver extra cmd handler */
struct drvextra_cmd_parm {
@@ -285,15 +289,15 @@ struct SetChannelPlan_param {
};
/*
-
-Result:
-0x00: success
-0x01: success, and check Response.
-0x02: cmd ignored due to duplicated sequcne number
-0x03: cmd dropped due to invalid cmd code
-0x04: reserved.
-
-*/
+ *
+ * Result:
+ * 0x00: success
+ * 0x01: success, and check Response.
+ * 0x02: cmd ignored due to duplicated sequcne number
+ * 0x03: cmd dropped due to invalid cmd code
+ * 0x04: reserved.
+ *
+ */
#define H2C_SUCCESS 0x00
#define H2C_SUCCESS_RSP 0x01
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 9cc4b8c7c166..4873ba49900c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -106,9 +106,9 @@ extern u32 GlobalDebugLevel;
(((__i + 1) % 4) == 0) ? \
" " : " "); \
if (((__i + 1) % 16) == 0) \
- printk("\n"); \
+ pr_cont("\n"); \
} \
- printk("\n"); \
+ pr_cont("\n"); \
} \
} while (0)
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index 5dd73841dd9e..11d1cb6de506 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -55,7 +55,8 @@
/* This variable is initiailzed through EEPROM or registry, */
/* however, its definition may be different with that in EEPROM for */
/* EEPROM size consideration. So, we have to perform proper translation
- * between them. */
+ * between them.
+ */
/* Besides, CustomerID of registry has precedence of that of EEPROM. */
/* defined below. 060703, by rcnjko. */
enum RT_CUSTOMER_ID {
@@ -79,7 +80,8 @@ enum RT_CUSTOMER_ID {
RT_CID_819x_Sitecom = 17,
RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded
* for CCX functions, but for test behavior like retry
- * limit and tx report. By Bruce, 2009-02-17. */
+ * limit and tx report. By Bruce, 2009-02-17.
+ */
RT_CID_819x_Lenovo = 19,
RT_CID_819x_QMI = 20,
RT_CID_819x_Edimax_Belkin = 21,
@@ -89,7 +91,8 @@ enum RT_CUSTOMER_ID {
RT_CID_819x_Acer = 25,
RT_CID_819x_AzWave_ASUS = 26,
RT_CID_819x_AzWave = 27, /* For AzWave in PCIe,i
- * The ID is AzWave use and not only Asus */
+ * The ID is AzWave use and not only Asus
+ */
RT_CID_819x_HP = 28,
RT_CID_819x_WNC_COREGA = 29,
RT_CID_819x_Arcadyan_Belkin = 30,
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index 1c5ebde97091..e798e794d962 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -22,42 +22,42 @@
#include <linux/sem.h>
/*
-Used to report a bss has been scanned
-*/
+ * Used to report a bss has been scanned
+ */
struct survey_event {
struct wlan_bssid_ex bss;
};
/*
-Used to report that the requested site survey has been done.
-
-bss_cnt indicates the number of bss that has been reported.
-
-
-*/
+ * Used to report that the requested site survey has been done.
+ *
+ * bss_cnt indicates the number of bss that has been reported.
+ *
+ *
+ */
struct surveydone_event {
unsigned int bss_cnt;
};
/*
-Used to report the link result of joinning the given bss
-
-
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-
-*/
+ * Used to report the link result of joinning the given bss
+ *
+ *
+ * join_res:
+ * -1: authentication fail
+ * -2: association fail
+ * > 0: TID
+ *
+ */
struct joinbss_event {
struct wlan_network network;
};
/*
-Used to report a given STA has joinned the created BSS.
-It is used in AP/Ad-HoC(M) mode.
-*/
+ * Used to report a given STA has joinned the created BSS.
+ * It is used in AP/Ad-HoC(M) mode.
+ */
struct stassoc_event {
unsigned char macaddr[6];
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index 607d1ba56a46..884e1397755a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -58,7 +58,8 @@ struct LED_871x {
enum LED_STATE_871x CurrLedState; /* Current LED state. */
enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
- * either RTW_LED_ON or RTW_LED_OFF are. */
+ * either RTW_LED_ON or RTW_LED_OFF are.
+ */
u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
@@ -75,7 +76,8 @@ struct LED_871x {
u8 bLedLinkBlinkInProgress;
u8 bLedScanBlinkInProgress;
struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
- * manipulate H/W to blink LED. */
+ * manipulate H/W to blink LED.
+ */
};
#define IS_LED_WPS_BLINKING(_LED_871x) \
@@ -91,7 +93,6 @@ struct led_priv {
/* add for led control */
};
-void BlinkTimerCallback(unsigned long data);
void BlinkWorkItemCallback(struct work_struct *work);
void ResetLedStatus(struct LED_871x *pLed);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 5c5d0ae8bdd1..e6d4175af3a2 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -70,25 +70,28 @@ enum rt_scan_type {
enum SCAN_RESULT_TYPE {
SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
SCAN_RESULT_ALL = 1, /* Will return all the scanned device,
- * include AP. */
+ * include AP.
+ */
SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD
- * device. */
+ * device.
+ */
/* If this device is Miracast sink
* device, it will just return all the
- * Miracast source devices. */
+ * Miracast source devices.
+ */
};
/*
-there are several "locks" in mlme_priv,
-since mlme_priv is a shared resource between many threads,
-like ISR/Call-Back functions, the OID handlers, and even timer functions.
-
-Each _queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
-
-To avoid possible dead lock, any thread trying to modifiying mlme_priv
-SHALL not lock up more than one lock at a time!
-*/
+ * there are several "locks" in mlme_priv,
+ * since mlme_priv is a shared resource between many threads,
+ * like ISR/Call-Back functions, the OID handlers, and even timer functions.
+ *
+ * Each _queue has its own locks, already.
+ * Other items are protected by mlme_priv.lock.
+ *
+ * To avoid possible dead lock, any thread trying to modifiying mlme_priv
+ * SHALL not lock up more than one lock at a time!
+ */
#define traffic_threshold 10
#define traffic_scan_period 500
@@ -102,9 +105,11 @@ struct rt_link_detect {
bool bRxBusyTraffic;
bool bHigherBusyTraffic; /* For interrupt migration purpose. */
bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according
- * to Rx traffic. */
+ * to Rx traffic.
+ */
bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according
- * to Tx traffic. */
+ * to Tx traffic.
+ */
};
struct mlme_priv {
@@ -164,7 +169,8 @@ struct mlme_priv {
#if defined(CONFIG_88EU_AP_MODE)
/* Number of associated Non-ERP stations (i.e., stations using 802.11b
- * in 802.11g BSS) */
+ * in 802.11g BSS)
+ */
int num_sta_non_erp;
/* Number of associated stations that do not support Short Slot Time */
@@ -325,10 +331,10 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter);
void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
-void _rtw_join_timeout_handler(unsigned long data);
-void rtw_scan_timeout_handler(unsigned long data);
+void _rtw_join_timeout_handler(struct timer_list *t);
+void rtw_scan_timeout_handler(struct timer_list *t);
-void rtw_dynamic_check_timer_handlder(unsigned long data);
+void rtw_dynamic_check_timer_handlder(struct timer_list *t);
#define rtw_is_scan_deny(adapter) false
#define rtw_clear_scan_deny(adapter) do {} while (0)
#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 1b1caaf583c9..118bf5509d97 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -101,9 +101,11 @@ extern unsigned char WMM_PARA_OUI[];
/* Channel Plan Type. */
/* Note: */
/* We just add new channel plan when the new channel plan is different
- * from any of the following channel plan. */
+ * from any of the following channel plan.
+ */
/* If you just want to customize the actions(scan period or join actions)
- * about one of the channel plan, */
+ * about one of the channel plan,
+ */
/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
enum RT_CHANNEL_DOMAIN {
/* old channel plan mapping ===== */
@@ -319,7 +321,8 @@ struct mlme_ext_info {
u32 authModeToggle;
u32 enc_algo;/* encrypt algorithm; */
u32 key_index; /* this is only valid for legacy wep,
- * 0~3 for key id. */
+ * 0~3 for key id.
+ */
u32 iv;
u8 chg_txt[128];
u16 aid;
@@ -353,16 +356,19 @@ struct mlme_ext_info {
struct HT_info_element HT_info;
struct wlan_bssid_ex network;/* join network or bss_network,
* if in ap mode, it is the same
- * as cur_network.network */
+ * as cur_network.network
+ */
struct FW_Sta_Info FW_sta_info[NUM_STA];
};
/* The channel information about this channel including joining,
- * scanning, and power constraints. */
+ * scanning, and power constraints.
+ */
struct rt_channel_info {
u8 ChannelNum; /* The channel number. */
enum rt_scan_type ScanType; /* Scan type such as passive
- * or active scan. */
+ * or active scan.
+ */
u32 rx_count;
};
@@ -413,7 +419,8 @@ struct mlme_ext_priv {
unsigned char cur_wireless_mode; /* NETWORK_TYPE */
unsigned char oper_channel; /* saved chan info when call
- * set_channel_bw */
+ * set_channel_bw
+ */
unsigned char oper_bwmode;
unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
@@ -427,7 +434,8 @@ struct mlme_ext_priv {
struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including
* current scan/connecting/connected
* related info. For ap mode,
- * network includes ap's cap_info*/
+ * network includes ap's cap_info
+ */
struct timer_list survey_timer;
struct timer_list link_timer;
u16 chan_scan_time;
@@ -572,9 +580,9 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter,
void linked_status_chk(struct adapter *padapter);
-void survey_timer_hdl(unsigned long data);
-void link_timer_hdl(unsigned long data);
-void addba_timer_hdl(unsigned long data);
+void survey_timer_hdl(struct timer_list *t);
+void link_timer_hdl(struct timer_list *t);
+void addba_timer_hdl(struct timer_list *t);
#define set_survey_timer(mlmeext, ms) \
mod_timer(&mlmeext->survey_timer, jiffies + \
@@ -690,7 +698,8 @@ enum rtw_c2h_event {
_C2HBCN_EVT_,
_ReportPwrState_EVT_, /* filen: only for PCIE, USB */
_CloseRF_EVT_, /* filen: only for PCIE,
- * work around ASPM */
+ * work around ASPM
+ */
MAX_C2HEVT
};
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 4872a21b3103..aa353aefed3d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -521,14 +521,16 @@
#define bCCKRxPhase 0x4
#if (RTL92SE_FPGA_VERIFY == 1)
#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address
- Reg 0x824 rFPGA0_XA_HSSIParameter2 */
+ * Reg 0x824 rFPGA0_XA_HSSIParameter2
+ */
#else
#define bLSSIReadAddress 0x7f800000 /* T65 RF */
#endif
#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
#if (RTL92SE_FPGA_VERIFY == 1)
#define bLSSIReadBackData 0xfff /* Reg 0x8a0
- rFPGA0_XA_LSSIReadBack */
+ * rFPGA0_XA_LSSIReadBack
+ */
#else
#define bLSSIReadBackData 0xfffff /* T65 RF */
#endif
@@ -548,13 +550,16 @@
#define bDA6Swing 0x380000
#define bADClkPhase 0x4000000 /* Reg 0x880
- rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+ * rFPGA0_AnalogParameter1 20/40 CCK
+ * support switch 40/80 BB MHZ
+ */
#define b80MClkDelay 0x18000000 /* Useless */
#define bAFEWatchDogEnable 0x20000000
#define bXtalCap01 0xc0000000 /* Reg 0x884
- rFPGA0_AnalogParameter2 Crystal cap */
+ * rFPGA0_AnalogParameter2 Crystal cap
+ */
#define bXtalCap23 0x3
#define bXtalCap92x 0x0f000000
#define bXtalCap 0x0f000000
@@ -598,7 +603,8 @@
#define bCCKTxOn 0x1
#define bOFDMTxOn 0x2
#define bDebugPage 0xfff /* reset debug page and HWord,
- * LWord */
+ * LWord
+ */
#define bDebugItem 0xff /* reset debug page and LWord */
#define bAntL 0x10
#define bAntNonHT 0x100
@@ -1071,7 +1077,8 @@
#define RCR_EnCS1 BIT(29) /* enable carrier sense method 1 */
#define RCR_EnCS2 BIT(30) /* enable carrier sense method 2 */
#define RCR_OnlyErlPkt BIT(31) /* Rx Early mode is performed for
- * packet size greater than 1536 */
+ * packet size greater than 1536
+ */
/*--------------------------Define Parameters-------------------------------*/
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index 18a9e744fcbe..f39e90cfc031 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -51,11 +51,11 @@ enum power_mgnt {
};
/*
- BIT[2:0] = HW state
- BIT[3] = Protocol PS state, 0: register active state,
- 1: register sleep state
- BIT[4] = sub-state
-*/
+ * BIT[2:0] = HW state
+ * BIT[3] = Protocol PS state, 0: register active state,
+ * 1: register sleep state
+ * BIT[4] = sub-state
+ */
#define PS_DPS BIT(0)
#define PS_LCLK (PS_DPS)
@@ -115,9 +115,11 @@ enum rt_rf_power_state {
#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /* FW free, re-download the FW*/
#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) /* Always enable ASPM and Clock
- * Req in initialization. */
+ * Req in initialization.
+ */
#define RT_RF_LPS_DISALBE_2R BIT(30) /* When LPS is on, disable 2R
- * if no packet is RX or TX. */
+ * if no packet is RX or TX.
+ */
#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) \
@@ -145,7 +147,8 @@ struct pwrctrl_priv {
struct mutex mutex_lock;
volatile u8 rpwm; /* requested power state for fw */
volatile u8 cpwm; /* fw current power state. updated when
- * 1. read from HCPWM 2. driver lowers power level */
+ * 1. read from HCPWM 2. driver lowers power level
+ */
volatile u8 tog; /* toggling */
volatile u8 cpwm_tog; /* toggling */
@@ -170,7 +173,8 @@ struct pwrctrl_priv {
u8 ips_mode;
u8 ips_mode_req; /* used to accept the mode setting request,
- * will update to ipsmode later */
+ * will update to ipsmode later
+ */
uint bips_processing;
unsigned long ips_deny_time; /* will deny IPS when system time less than this */
u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
index 45a77f6f8427..576dff68d0dc 100644
--- a/drivers/staging/rtl8188eu/include/rtw_qos.h
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -19,7 +19,8 @@
struct qos_priv {
unsigned int qos_option; /* bit mask option: u-apsd,
- * s-apsd, ts, block ack... */
+ * s-apsd, ts, block ack...
+ */
};
#endif /* _RTL871X_QOS_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index 121150860450..7e85f700acb3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -46,23 +46,23 @@ struct recv_reorder_ctrl {
struct stainfo_rxcache {
u16 tid_rxseq[16];
/*
- unsigned short tid0_rxseq;
- unsigned short tid1_rxseq;
- unsigned short tid2_rxseq;
- unsigned short tid3_rxseq;
- unsigned short tid4_rxseq;
- unsigned short tid5_rxseq;
- unsigned short tid6_rxseq;
- unsigned short tid7_rxseq;
- unsigned short tid8_rxseq;
- unsigned short tid9_rxseq;
- unsigned short tid10_rxseq;
- unsigned short tid11_rxseq;
- unsigned short tid12_rxseq;
- unsigned short tid13_rxseq;
- unsigned short tid14_rxseq;
- unsigned short tid15_rxseq;
-*/
+ * unsigned short tid0_rxseq;
+ * unsigned short tid1_rxseq;
+ * unsigned short tid2_rxseq;
+ * unsigned short tid3_rxseq;
+ * unsigned short tid4_rxseq;
+ * unsigned short tid5_rxseq;
+ * unsigned short tid6_rxseq;
+ * unsigned short tid7_rxseq;
+ * unsigned short tid8_rxseq;
+ * unsigned short tid9_rxseq;
+ * unsigned short tid10_rxseq;
+ * unsigned short tid11_rxseq;
+ * unsigned short tid12_rxseq;
+ * unsigned short tid13_rxseq;
+ * unsigned short tid14_rxseq;
+ * unsigned short tid15_rxseq;
+ */
};
struct signal_stat {
@@ -79,7 +79,8 @@ struct phy_info {
u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
s8 RxPower; /* in dBm Translate from PWdB */
/* Real power in dBm for this packet, no beautification and aggregation.
- * Keep this raw info to be used for the other procedures. */
+ * Keep this raw info to be used for the other procedures.
+ */
s8 recvpower;
u8 BTRxRSSIPercentage;
u8 SignalStrength; /* in 0-100 index. */
@@ -106,7 +107,8 @@ struct rx_pkt_attrib {
u8 privacy; /* in frame_ctrl field */
u8 bdecrypted;
u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorithm */
+ * indicate the encrypt algorithm
+ */
u8 iv_len;
u8 icv_len;
u8 crc_err;
@@ -152,12 +154,12 @@ struct recv_stat {
};
/*
-accesser of recv_priv: rtw_recv_entry(dispatch / passive level);
-recv_thread(passive) ; returnpkt(dispatch)
-; halt(passive) ;
-
-using enter_critical section to protect
-*/
+ * accesser of recv_priv: rtw_recv_entry(dispatch / passive level);
+ * recv_thread(passive) ; returnpkt(dispatch)
+ * ; halt(passive) ;
+ *
+ * using enter_critical section to protect
+ */
struct recv_priv {
struct __queue free_recv_queue;
struct __queue recv_pending_queue;
@@ -209,20 +211,20 @@ struct recv_buf {
};
/*
- head ----->
-
- data ----->
-
- payload
-
- tail ----->
-
-
- end ----->
-
- len = (unsigned int )(tail - data);
-
-*/
+ * head ----->
+ *
+ * data ----->
+ *
+ * payload
+ *
+ * tail ----->
+ *
+ *
+ * end ----->
+ *
+ * len = (unsigned int )(tail - data);
+ *
+ */
struct recv_frame {
struct list_head list;
struct sk_buff *pkt;
@@ -247,7 +249,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue);
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
-void rtw_reordering_ctrl_timeout_handler(unsigned long data);
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
static inline s32 translate_percentage_to_dbm(u32 sig_stren_index)
{
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 66896af02042..0718a29e7c9d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -31,7 +31,8 @@
#define RTL8711_RF_DEF_SENS 4
/* We now define the following channels as the max channels in each
- * channel plan. */
+ * channel plan.
+ */
/* 2G, total 14 chnls */
/* {1,2,3,4,5,6,7,8,9,10,11,12,13,14} */
#define MAX_CHANNEL_NUM_2G 14
@@ -65,7 +66,8 @@ enum capability {
cChannelAgility = 0x0080,
cSpectrumMgnt = 0x0100,
cQos = 0x0200, /* For HCCA, use with CF-Pollable
- * and CF-PollReq */
+ * and CF-PollReq
+ */
cShortSlotTime = 0x0400,
cAPSD = 0x0800,
cRM = 0x1000, /* RRM (Radio Request Measurement) */
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index 74fe664787e5..a0c6cf706218 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -100,20 +100,26 @@ struct rt_pmkid_list {
struct security_priv {
u32 dot11AuthAlgrthm; /* 802.11 auth, could be open,
- * shared, 8021x and authswitch */
+ * shared, 8021x and authswitch
+ */
u32 dot11PrivacyAlgrthm; /* This specify the privacy for
- * shared auth. algorithm. */
+ * shared auth. algorithm.
+ */
/* WEP */
u32 dot11PrivacyKeyIndex; /* this is only valid for legendary
- * wep, 0~3 for key id.(tx key index) */
+ * wep, 0~3 for key id.(tx key index)
+ */
union Keytype dot11DefKey[4]; /* this is only valid for def. key */
u32 dot11DefKeylen[4];
u32 dot118021XGrpPrivacy; /* This specify the privacy algthm.
- * used for Grp key */
+ * used for Grp key
+ */
u32 dot118021XGrpKeyid; /* key id used for Grp Key
- * ( tx key index) */
+ * ( tx key index)
+ */
union Keytype dot118021XGrpKey[4]; /* 802.1x Group Key,
- * for inx0 and inx1 */
+ * for inx0 and inx1
+ */
union Keytype dot118021XGrptxmickey[4];
union Keytype dot118021XGrprxmickey[4];
union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
@@ -134,10 +140,12 @@ struct security_priv {
u8 bcheck_grpkey;
u8 bgrpkey_handshake;
s32 hw_decrypted;/* if the rx packets is hw_decrypted==false,i
- * it means the hw has not been ready. */
+ * it means the hw has not been ready.
+ */
/* keeps the auth_type & enc_status from upper layer
- * ioctl(wpa_supplicant or wzc) */
+ * ioctl(wpa_supplicant or wzc)
+ */
u32 ndisauthtype; /* NDIS_802_11_AUTHENTICATION_MODE */
u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
@@ -256,7 +264,8 @@ static inline u32 rotr(u32 val, int bits)
/* ===== start - public domain SHA256 implementation ===== */
/* This is based on SHA256 implementation in LibTomCrypt that was released into
- * public domain by Tom St Denis. */
+ * public domain by Tom St Denis.
+ */
/* the K array */
static const unsigned long K[64] = {
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index dd6b7a9a8d4a..b4b3d13ace9e 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -115,11 +115,13 @@ struct pkt_attrib {
u16 seqnum;
u16 hdrlen; /* the WLAN Header Len */
u32 pktlen; /* the original 802.3 pkt raw_data len (not include
- * ether_hdr data) */
+ * ether_hdr data)
+ */
u32 last_txcmdsz;
u8 nr_frags;
u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorith */
+ * indicate the encrypt algorith
+ */
u8 iv_len;
u8 icv_len;
u8 iv[18];
@@ -234,7 +236,8 @@ struct sta_xmit_priv {
spinlock_t lock;
int option;
int apsd_setting; /* When bit mask is on, the associated edca
- * queue supports APSD. */
+ * queue supports APSD.
+ */
struct tx_servq be_q; /* priority == 0,3 */
struct tx_servq bk_q; /* priority == 1,2 */
struct tx_servq vi_q; /* priority == 4,5 */
@@ -280,7 +283,8 @@ struct xmit_priv {
u8 hwxmit_entry;
u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength
* from large to small. it's value is 0->vo,
- * 1->vi, 2->be, 3->bk. */
+ * 1->vi, 2->be, 3->bk.
+ */
u8 txirp_cnt;/* */
struct tasklet_struct xmit_tasklet;
/* per AC pending irp */
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index 42a035123365..8f01deed6e4a 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -126,7 +126,8 @@ struct sta_info {
/* Notes: */
/* STA_Mode: */
/* curr_network(mlme_priv/security_priv/qos/ht) +
- * sta_info: (STA & AP) CAP/INFO */
+ * sta_info: (STA & AP) CAP/INFO
+ */
/* scan_q: AP CAP/INFO */
/* AP_Mode: */
@@ -184,7 +185,8 @@ struct sta_info {
/* ================ODM Relative Info======================= */
/* Please be careful, don't declare too much structure here.
- * It will cost memory * STA support num. */
+ * It will cost memory * STA support num.
+ */
/* 2011/10/20 MH Add for ODM STA info. */
/* Driver Write */
u8 bValid; /* record the sta status link or not? */
@@ -318,9 +320,11 @@ struct sta_priv {
struct sta_info *sta_aid[NUM_STA];
u16 sta_dz_bitmap;/* only support 15 stations, station aid bitmap
- * for sleeping sta. */
+ * for sleeping sta.
+ */
u16 tim_bitmap; /* only support 15 stations, aid=0~15 mapping
- * bit0~bit15 */
+ * bit0~bit15
+ */
u16 max_num_sta;
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index cb46d353327b..084a246eec19 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -35,7 +35,8 @@
/* This value is tested by WiFi 11n Test Plan 5.2.3. */
/* This test verifies the WLAN NIC can update the NAV through sending
- * the CTS with large duration. */
+ * the CTS with large duration.
+ */
#define WiFiNavUpperUs 30000 /* 30 ms */
enum WIFI_FRAME_TYPE {
@@ -459,14 +460,14 @@ static inline int IsFrameTypeCtrl(unsigned char *pframe)
#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
/*
-#define _NO_PRIVACY_ 0
-#define _WEP_40_PRIVACY_ 1
-#define _TKIP_PRIVACY_ 2
-#define _WRAP_PRIVACY_ 3
-#define _CCMP_PRIVACY_ 4
-#define _WEP_104_PRIVACY_ 5
-#define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
-*/
+ * #define _NO_PRIVACY_ 0
+ * #define _WEP_40_PRIVACY_ 1
+ * #define _TKIP_PRIVACY_ 2
+ * #define _WRAP_PRIVACY_ 3
+ * #define _CCMP_PRIVACY_ 4
+ * #define _WEP_104_PRIVACY_ 5
+ * #define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
+ */
/*-----------------------------------------------------------------------------
Below is the definition for WMM
@@ -771,10 +772,12 @@ enum ht_cap_ampdu_factor {
#define P2P_PROVISIONING_SCAN_CNT 3
/* default value, used when: (1)p2p disabled or (2)p2p enabled
- * but only do 1 scan phase */
+ * but only do 1 scan phase
+ */
#define P2P_FINDPHASE_EX_NONE 0
/* used when p2p enabled and want to do 1 scan phase and
- * P2P_FINDPHASE_EX_MAX-1 find phase */
+ * P2P_FINDPHASE_EX_MAX-1 find phase
+ */
#define P2P_FINDPHASE_EX_FULL 1
#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
#define P2P_FINDPHASE_EX_MAX 4
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index e1931dd04da0..d7b25d2f933a 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -86,7 +86,8 @@ struct ndis_802_11_var_ie {
* + NDIS_802_11_LENGTH_RATES_EX + IELength
*
* Except the IELength, all other fields are fixed length.
- * Therefore, we can define a macro to represent the partial sum. */
+ * Therefore, we can define a macro to represent the partial sum.
+ */
enum ndis_802_11_auth_mode {
Ndis802_11AuthModeOpen,
@@ -130,7 +131,8 @@ enum ndis_802_11_reload_def {
struct ndis_802_11_wep {
u32 Length; /* Length of this structure */
u32 KeyIndex; /* 0 is the per-client key,
- * 1-N are the global keys */
+ * 1-N are the global keys
+ */
u32 KeyLength; /* length of key in bytes */
u8 KeyMaterial[16];/* variable len depending on above field */
};
@@ -140,7 +142,8 @@ enum ndis_802_11_status_type {
Ndis802_11StatusType_MediaStreamMode,
Ndis802_11StatusType_PMKID_CandidateList,
Ndis802_11StatusTypeMax /* not a real type, defined as
- * an upper bound */
+ * an upper bound
+ */
};
/* mask for authentication/integrity fields */
@@ -166,7 +169,8 @@ struct wlan_phy_info {
struct wlan_bcn_info {
/* these infor get from rtw_get_encrypt_info when
- * * translate scan to UI */
+ * * translate scan to UI
+ */
u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2/WAPI */
int group_cipher; /* WPA/WPA2 group cipher */
int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
@@ -178,8 +182,8 @@ struct wlan_bcn_info {
};
/* temporally add #pragma pack for structure alignment issue of
-* struct wlan_bssid_ex and get_struct wlan_bssid_ex_sz()
-*/
+ * struct wlan_bssid_ex and get_struct wlan_bssid_ex_sz()
+ */
struct wlan_bssid_ex {
u32 Length;
unsigned char MacAddress[ETH_ALEN];
@@ -194,7 +198,8 @@ struct wlan_bssid_ex {
struct wlan_phy_info PhyInfo;
u32 IELength;
u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and
- * capability information) */
+ * capability information)
+ */
} __packed;
static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
@@ -206,7 +211,8 @@ struct wlan_network {
struct list_head list;
int network_type; /* refer to ieee80211.h for WIRELESS_11A/B/G */
int fixed; /* set fixed when not to be removed
- * in site-surveying */
+ * in site-surveying
+ */
unsigned long last_scanned; /* timestamp for the network */
int aid; /* will only be valid when a BSS is joinned. */
int join_res;
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index bc756267c7fc..831c1ecc5e28 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -24,12 +24,10 @@ void rtw_init_mlme_timer(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- setup_timer(&pmlmepriv->assoc_timer, _rtw_join_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->dynamic_chk_timer,
- rtw_dynamic_check_timer_handlder, (unsigned long)padapter);
+ timer_setup(&pmlmepriv->assoc_timer, _rtw_join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dynamic_chk_timer,
+ rtw_dynamic_check_timer_handlder, 0);
}
void rtw_os_indicate_connect(struct adapter *adapter)
@@ -125,18 +123,15 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
{
- setup_timer(&psta->addba_retry_timer, addba_timer_hdl,
- (unsigned long)psta);
+ timer_setup(&psta->addba_retry_timer, addba_timer_hdl, 0);
}
void init_mlme_ext_timer(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- setup_timer(&pmlmeext->survey_timer, survey_timer_hdl,
- (unsigned long)padapter);
- setup_timer(&pmlmeext->link_timer, link_timer_hdl,
- (unsigned long)padapter);
+ timer_setup(&pmlmeext->survey_timer, survey_timer_hdl, 0);
+ timer_setup(&pmlmeext->link_timer, link_timer_hdl, 0);
}
#ifdef CONFIG_88EU_AP_MODE
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 37fd52d7364f..225c23fc69dc 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -66,34 +66,6 @@ static void mon_recv_decrypted(struct net_device *dev, const u8 *data,
netif_rx(skb);
}
-static void mon_recv_decrypted_recv(struct net_device *dev, const u8 *data,
- int data_len)
-{
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- int hdr_len;
-
- skb = netdev_alloc_skb(dev, data_len);
- if (!skb)
- return;
- memcpy(skb_put(skb, data_len), data, data_len);
-
- /*
- * Frame data is not encrypted. Strip off protection so
- * userspace doesn't think that it is.
- */
-
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- if (ieee80211_has_protected(hdr->frame_control))
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-}
-
static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
int data_len)
{
@@ -110,6 +82,7 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
{
struct rx_pkt_attrib *attr;
+ int iv_len, icv_len;
int data_len;
u8 *data;
@@ -122,8 +95,11 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
data = frame->pkt->data;
data_len = frame->pkt->len;
+ /* Broadcast and multicast frames don't have attr->{iv,icv}_len set */
+ SET_ICE_IV_LEN(iv_len, icv_len, attr->encrypt);
+
if (attr->bdecrypted)
- mon_recv_decrypted_recv(dev, data, data_len);
+ mon_recv_decrypted(dev, data, data_len, iv_len, icv_len);
else
mon_recv_encrypted(dev, data, data_len);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index d14bc2b68d98..bda4ab879f58 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -155,7 +155,6 @@ _recv_indicatepkt_drop:
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- setup_timer(&preorder_ctrl->reordering_ctrl_timer,
- rtw_reordering_ctrl_timeout_handler,
- (unsigned long)preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ rtw_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 64397b6f1248..7e75030475f7 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -407,6 +407,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
case -ENODEV:
case -ESHUTDOWN:
adapt->bSurpriseRemoved = true;
+ /* fall through */
case -ENOENT:
adapt->bDriverStopped = true;
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bDriverStopped=true\n"));
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 017fe04ebe2d..88f89d77b511 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -128,12 +128,16 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
pTriple = (struct chnl_txpow_triple *)(pCoutryIe + 3);
for (i = 0; i < NumTriples; i++) {
if (MaxChnlNum >= pTriple->FirstChnl) {
- netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ netdev_info(dev->dev,
+ "%s: Invalid country IE, skip it......1\n",
+ __func__);
return;
}
if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl +
pTriple->NumChnls)) {
- netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
+ netdev_info(dev->dev,
+ "%s: Invalid country IE, skip it......2\n",
+ __func__);
return;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index aca52654825b..d2605158546b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -85,7 +85,7 @@ static struct pci_driver rtl8192_pci_driver = {
static short _rtl92e_is_tx_queue_empty(struct net_device *dev);
static void _rtl92e_watchdog_wq_cb(void *data);
-static void _rtl92e_watchdog_timer_cb(unsigned long data);
+static void _rtl92e_watchdog_timer_cb(struct timer_list *t);
static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
int rate);
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -766,12 +766,12 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
priv->bfirst_init = false;
if (priv->polling_timer_on == 0)
- rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
if (priv->rtllib->state != RTLLIB_LINKED)
rtllib_softmac_start_protocol(priv->rtllib, 0);
rtllib_reset_queue(priv->rtllib);
- _rtl92e_watchdog_timer_cb((unsigned long)dev);
+ _rtl92e_watchdog_timer_cb(&priv->watch_dog_timer);
if (!netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1075,13 +1075,10 @@ static short _rtl92e_init(struct net_device *dev)
rtl92e_dm_init(dev);
- setup_timer(&priv->watch_dog_timer,
- _rtl92e_watchdog_timer_cb,
- (unsigned long)dev);
+ timer_setup(&priv->watch_dog_timer, _rtl92e_watchdog_timer_cb, 0);
- setup_timer(&priv->gpio_polling_timer,
- rtl92e_check_rfctrl_gpio_timer,
- (unsigned long)dev);
+ timer_setup(&priv->gpio_polling_timer, rtl92e_check_rfctrl_gpio_timer,
+ 0);
rtl92e_irq_disable(dev);
if (request_irq(dev->irq, _rtl92e_irq, IRQF_SHARED, dev->name, dev)) {
@@ -1531,9 +1528,9 @@ static void _rtl92e_watchdog_wq_cb(void *data)
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
-static void _rtl92e_watchdog_timer_cb(unsigned long data)
+static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
{
- struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
schedule_delayed_work(&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer, jiffies +
@@ -2535,7 +2532,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
if (priv->polling_timer_on == 0)
- rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -2648,9 +2645,9 @@ bool rtl92e_disable_nic(struct net_device *dev)
module_pci_driver(rtl8192_pci_driver);
-void rtl92e_check_rfctrl_gpio_timer(unsigned long data)
+void rtl92e_check_rfctrl_gpio_timer(struct timer_list *t)
{
- struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, gpio_polling_timer);
priv->polling_timer_on = 1;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 9d3089cb6a5a..866fe4d4cb28 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -587,7 +587,7 @@ void rtl92e_tx_enable(struct net_device *);
void rtl92e_hw_sleep_wq(void *data);
void rtl92e_commit(struct net_device *dev);
-void rtl92e_check_rfctrl_gpio_timer(unsigned long data);
+void rtl92e_check_rfctrl_gpio_timer(struct timer_list *t);
void rtl92e_hw_wakeup_wq(void *data);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index b8205ebafd72..9bf95bd0ad13 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -196,7 +196,7 @@ static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
static void _rtl92e_dm_check_fsync(struct net_device *dev);
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
-static void _rtl92e_dm_fsync_timer_callback(unsigned long data);
+static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
/*---------------------Define local function prototype-----------------------*/
@@ -2125,8 +2125,7 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev)
priv->rtllib->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1;
- setup_timer(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback,
- (unsigned long)dev);
+ timer_setup(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback, 0);
}
@@ -2137,10 +2136,10 @@ static void _rtl92e_dm_deinit_fsync(struct net_device *dev)
del_timer_sync(&priv->fsync_timer);
}
-static void _rtl92e_dm_fsync_timer_callback(unsigned long data)
+static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct net_device *dev = priv->rtllib->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 3e3273d3e043..81a68b0b4a7f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -91,7 +91,7 @@ int rtl92e_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
if (priv->polling_timer_on == 0)
- rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
if (!netif_running(dev)) {
netdev_info(dev,
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 1720e1b6ae04..eb6d841f7c45 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -528,18 +528,20 @@ void TsInitDelBA(struct rtllib_device *ieee,
}
}
-void BaSetupTimeOut(unsigned long data)
+void BaSetupTimeOut(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
+ TxPendingBARecord.Timer);
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = true;
pTxTs->TxPendingBARecord.bValid = false;
}
-void TxBaInactTimeout(unsigned long data)
+void TxBaInactTimeout(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
+ TxAdmittedBARecord.Timer);
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[pTxTs->num]);
TxTsDeleteBA(ieee, pTxTs);
@@ -548,9 +550,10 @@ void TxBaInactTimeout(unsigned long data)
DELBA_REASON_TIMEOUT);
}
-void RxBaInactTimeout(unsigned long data)
+void RxBaInactTimeout(struct timer_list *t)
{
- struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
+ struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
+ RxAdmittedBARecord.Timer);
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index dcc4eb691889..f839d2447b85 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -16,17 +16,18 @@
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
-static void TsSetupTimeOut(unsigned long data)
+static void TsSetupTimeOut(struct timer_list *unused)
{
}
-static void TsInactTimeout(unsigned long data)
+static void TsInactTimeout(struct timer_list *unused)
{
}
-static void RxPktPendingTimeout(unsigned long data)
+static void RxPktPendingTimeout(struct timer_list *t)
{
- struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
+ struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
+ RxPktPendingTimer);
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
@@ -96,9 +97,9 @@ static void RxPktPendingTimeout(unsigned long data)
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
-static void TsAddBaProcess(unsigned long data)
+static void TsAddBaProcess(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
u8 num = pTxTs->num;
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[num]);
@@ -150,24 +151,18 @@ void TSInitialize(struct rtllib_device *ieee)
for (count = 0; count < TOTAL_TS_NUM; count++) {
pTxTS->num = count;
- setup_timer(&pTxTS->TsCommonInfo.SetupTimer,
- TsSetupTimeOut,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
- setup_timer(&pTxTS->TsCommonInfo.InactTimer,
- TsInactTimeout,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
- setup_timer(&pTxTS->TsAddBaTimer,
- TsAddBaProcess,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
- setup_timer(&pTxTS->TxPendingBARecord.Timer,
- BaSetupTimeOut,
- (unsigned long) pTxTS);
- setup_timer(&pTxTS->TxAdmittedBARecord.Timer,
- TxBaInactTimeout,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut,
+ 0);
+ timer_setup(&pTxTS->TxAdmittedBARecord.Timer,
+ TxBaInactTimeout, 0);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List,
@@ -182,21 +177,16 @@ void TSInitialize(struct rtllib_device *ieee)
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
- setup_timer(&pRxTS->TsCommonInfo.SetupTimer,
- TsSetupTimeOut,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
- setup_timer(&pRxTS->TsCommonInfo.InactTimer,
- TsInactTimeout,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
- setup_timer(&pRxTS->RxAdmittedBARecord.Timer,
- RxBaInactTimeout,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->RxAdmittedBARecord.Timer,
+ RxBaInactTimeout, 0);
- setup_timer(&pRxTS->RxPktPendingTimer,
- RxPktPendingTimeout,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout, 0);
ResetRxTsEntry(pRxTS);
list_add_tail(&pRxTS->TsCommonInfo.List,
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 0042a0f6cf79..c01474a6db1e 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -2113,9 +2113,9 @@ void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
void TsInitDelBA(struct rtllib_device *ieee,
struct ts_common_info *pTsCommonInfo,
enum tr_select TxRxSelect);
-void BaSetupTimeOut(unsigned long data);
-void TxBaInactTimeout(unsigned long data);
-void RxBaInactTimeout(unsigned long data);
+void BaSetupTimeOut(struct timer_list *t);
+void TxBaInactTimeout(struct timer_list *t);
+void RxBaInactTimeout(struct timer_list *t);
void ResetBaEntry(struct ba_record *pBA);
bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *Addr,
u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index e4be85af31e7..c2b9ffba354a 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -393,10 +393,10 @@ static void rtllib_send_beacon(struct rtllib_device *ieee)
}
-static void rtllib_send_beacon_cb(unsigned long _ieee)
+static void rtllib_send_beacon_cb(struct timer_list *t)
{
struct rtllib_device *ieee =
- (struct rtllib_device *) _ieee;
+ from_timer(ieee, t, beacon_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
@@ -1427,9 +1427,11 @@ static void rtllib_associate_abort(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static void rtllib_associate_abort_cb(unsigned long dev)
+static void rtllib_associate_abort_cb(struct timer_list *t)
{
- rtllib_associate_abort((struct rtllib_device *) dev);
+ struct rtllib_device *dev = from_timer(dev, t, associate_timer);
+
+ rtllib_associate_abort(dev);
}
static void rtllib_associate_step1(struct rtllib_device *ieee, u8 *daddr)
@@ -2811,8 +2813,9 @@ exit:
static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
{
- const u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
+ static const u8 broadcast_addr[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
struct sk_buff *skb;
struct rtllib_probe_response *b;
@@ -3011,13 +3014,9 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->tx_pending.txb = NULL;
- setup_timer(&ieee->associate_timer,
- rtllib_associate_abort_cb,
- (unsigned long) ieee);
+ timer_setup(&ieee->associate_timer, rtllib_associate_abort_cb, 0);
- setup_timer(&ieee->beacon_timer,
- rtllib_send_beacon_cb,
- (unsigned long) ieee);
+ timer_setup(&ieee->beacon_timer, rtllib_send_beacon_cb, 0);
INIT_DELAYED_WORK_RSL(&ieee->link_change_wq,
(void *)rtllib_link_change_wq, ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index f7eba01b5d15..03fbff067fa4 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -694,8 +694,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
deauth = true;
- /* leave break out intentionly */
-
+ /* fall through */
case IW_MLME_DISASSOC:
if (deauth)
netdev_info(ieee->dev, "disauth packet !\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index b062cad052b9..3addaa65085a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -323,7 +323,7 @@ typedef struct ieee_param {
u8 key[0];
} crypt;
} u;
-}ieee_param;
+} ieee_param;
// linux under 2.6.9 release may not support it, so modify it for common use
@@ -412,15 +412,15 @@ typedef struct ieee_param {
#define IEEE80211_QCTL_TID 0x000F
#define FC_QOS_BIT BIT(7)
-#define IsDataFrame(pdu) ( ((pdu[0] & 0x0C)==0x08) ? true : false )
-#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)) )
+#define IsDataFrame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
+#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0] & FC_QOS_BIT)))
//added by wb. Is this right?
-#define IsQoSDataFrame(pframe) ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a-b)&0x800)!=0)
+#define IsQoSDataFrame(pframe) ((*(u16 *)pframe & (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
+#define Frame_Order(pframe) (*(u16 *)pframe & IEEE80211_FCTL_ORDER)
+#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
#define SN_EQUAL(a, b) (a == b)
#define MAX_DEV_ADDR_SIZE 8
-typedef enum _ACT_CATEGORY{
+typedef enum _ACT_CATEGORY {
ACT_CAT_QOS = 1,
ACT_CAT_DLS = 2,
ACT_CAT_BA = 3,
@@ -428,24 +428,24 @@ typedef enum _ACT_CATEGORY{
ACT_CAT_WMM = 17,
} ACT_CATEGORY, *PACT_CATEGORY;
-typedef enum _TS_ACTION{
+typedef enum _TS_ACTION {
ACT_ADDTSREQ = 0,
ACT_ADDTSRSP = 1,
ACT_DELTS = 2,
ACT_SCHEDULE = 3,
} TS_ACTION, *PTS_ACTION;
-typedef enum _BA_ACTION{
+typedef enum _BA_ACTION {
ACT_ADDBAREQ = 0,
ACT_ADDBARSP = 1,
ACT_DELBA = 2,
} BA_ACTION, *PBA_ACTION;
-typedef enum _InitialGainOpType{
- IG_Backup=0,
+typedef enum _InitialGainOpType {
+ IG_Backup = 0,
IG_Restore,
IG_Max
-}InitialGainOpType;
+} InitialGainOpType;
/* debug macros */
#define CONFIG_IEEE80211_DEBUG
@@ -457,22 +457,22 @@ do { if (ieee80211_debug_level & (level)) \
//wb added to debug out data buf
//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do{ if ((ieee80211_debug_level & (level)) == (level)) \
+ do { if ((ieee80211_debug_level & (level)) == (level)) \
{ \
int i; \
u8 *pdata = (u8 *) data; \
printk(KERN_DEBUG "ieee80211: %s()\n", __func__); \
- for(i=0; i<(int)(datalen); i++) \
+ for (i = 0; i < (int)(datalen); i++) \
{ \
printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) printk("\n"); \
+ if ((i + 1) % 16 == 0) printk("\n"); \
} \
printk("\n"); \
} \
} while (0)
#else
-#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
-#define IEEE80211_DEBUG_DATA(level, data, datalen) do {} while(0)
+#define IEEE80211_DEBUG (level, fmt, args...) do {} while (0)
+#define IEEE80211_DEBUG_DATA (level, data, datalen) do {} while(0)
#endif /* CONFIG_IEEE80211_DEBUG */
/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
@@ -876,9 +876,9 @@ enum ieee80211_mfie {
MFIE_TYPE_ERP = 42,
MFIE_TYPE_RSN = 48,
MFIE_TYPE_RATES_EX = 50,
- MFIE_TYPE_HT_CAP= 45,
- MFIE_TYPE_HT_INFO= 61,
- MFIE_TYPE_AIRONET=133,
+ MFIE_TYPE_HT_CAP = 45,
+ MFIE_TYPE_HT_INFO = 61,
+ MFIE_TYPE_AIRONET = 133,
MFIE_TYPE_GENERIC = 221,
MFIE_TYPE_QOS_PARAMETER = 222,
};
@@ -1051,7 +1051,7 @@ typedef union _frameqos {
u16 ack_policy:2;
u16 reserved:1;
u16 txop:8;
- }field;
+ } field;
} frameqos, *pframeqos;
/* SWEEP TABLE ENTRIES NUMBER*/
@@ -1196,7 +1196,7 @@ static inline u8 Frame_QoSTID(u8 *buf)
u16 fc;
hdr = (struct rtl_80211_hdr_3addr *)buf;
fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS)&&(fc & IEEE80211_FCTL_FROMDS))? 30 : 24)))->field.tid;
+ return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS) && (fc & IEEE80211_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
}
//added by amy for reorder
@@ -1209,7 +1209,7 @@ struct eapol {
u16 length;
} __packed;
-struct ieee80211_softmac_stats{
+struct ieee80211_softmac_stats {
unsigned int rx_ass_ok;
unsigned int rx_ass_err;
unsigned int rx_probe_rq;
@@ -1320,7 +1320,7 @@ struct ether_header {
#define ETHERTYPE_IP 0x0800 /* IP protocol */
#endif
-typedef enum _erp_t{
+typedef enum _erp_t {
ERP_NonERPpresent = 0x01,
ERP_UseProtection = 0x02,
ERP_BarkerPreambleMode = 0x04,
@@ -1479,37 +1479,35 @@ typedef struct _RX_REORDER_ENTRY {
struct ieee80211_rxb *prxb;
} RX_REORDER_ENTRY, *PRX_REORDER_ENTRY;
//added by amy for order
-typedef enum _Fsync_State{
+typedef enum _Fsync_State {
Default_Fsync,
HW_Fsync,
SW_Fsync
-}Fsync_State;
+} Fsync_State;
// Power save mode configured.
-typedef enum _RT_PS_MODE
-{
+typedef enum _RT_PS_MODE {
eActive, // Active/Continuous access.
eMaxPs, // Max power save mode.
eFastPs // Fast power save mode.
-}RT_PS_MODE;
+} RT_PS_MODE;
-typedef enum _IPS_CALLBACK_FUNCION
-{
+typedef enum _IPS_CALLBACK_FUNCION {
IPS_CALLBACK_NONE = 0,
IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
IPS_CALLBACK_JOIN_REQUEST = 2,
-}IPS_CALLBACK_FUNCION;
+} IPS_CALLBACK_FUNCION;
-typedef enum _RT_JOIN_ACTION{
+typedef enum _RT_JOIN_ACTION {
RT_JOIN_INFRA = 1,
RT_JOIN_IBSS = 2,
RT_START_IBSS = 3,
RT_NO_ACTION = 4,
-}RT_JOIN_ACTION;
+} RT_JOIN_ACTION;
-typedef struct _IbssParms{
+typedef struct _IbssParms {
u16 atimWin;
-}IbssParms, *PIbssParms;
+} IbssParms, *PIbssParms;
#define MAX_NUM_RATES 264 // Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko.
// RF state.
@@ -1517,7 +1515,7 @@ typedef enum _RT_RF_POWER_STATE {
eRfOn,
eRfSleep,
eRfOff
-}RT_RF_POWER_STATE;
+} RT_RF_POWER_STATE;
typedef struct _RT_POWER_SAVE_CONTROL {
@@ -1572,8 +1570,7 @@ typedef u32 RT_RF_CHANGE_SOURCE;
#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_INIT 0 // Do not change the RFOff reason. Defined by Bruce, 2008-01-17.
-typedef enum
-{
+typedef enum {
COUNTRY_CODE_FCC = 0,
COUNTRY_CODE_IC = 1,
COUNTRY_CODE_ETSI = 2,
@@ -1585,10 +1582,10 @@ typedef enum
COUNTRY_CODE_TELEC,
COUNTRY_CODE_MIC,
COUNTRY_CODE_GLOBAL_DOMAIN
-}country_code_type_t;
+} country_code_type_t;
#define RT_MAX_LD_SLOT_NUM 10
-typedef struct _RT_LINK_DETECT_T{
+typedef struct _RT_LINK_DETECT_T {
u32 NumRecvBcnInPeriod;
u32 NumRecvDataInPeriod;
@@ -1601,7 +1598,7 @@ typedef struct _RT_LINK_DETECT_T{
u32 NumTxOkInPeriod;
u32 NumRxOkInPeriod;
bool bBusyTraffic;
-}RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
+} RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
struct ieee80211_device {
@@ -1917,11 +1914,11 @@ struct ieee80211_device {
struct net_device *dev);
int (*reset_port)(struct net_device *dev);
- int (*is_queue_full) (struct net_device *dev, int pri);
+ int (*is_queue_full)(struct net_device *dev, int pri);
- int (*handle_management) (struct net_device *dev,
+ int (*handle_management)(struct net_device *dev,
struct ieee80211_network *network, u16 type);
- int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
+ int (*is_qos_active)(struct net_device *dev, struct sk_buff *skb);
/* Softmac-generated frames (management) are TXed via this
* callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
@@ -1989,16 +1986,16 @@ struct ieee80211_device {
* stop_send_bacons is NOT guaranteed to be called only
* after start_send_beacons.
*/
- void (*start_send_beacons) (struct net_device *dev,u16 tx_rate);
- void (*stop_send_beacons) (struct net_device *dev);
+ void (*start_send_beacons)(struct net_device *dev, u16 tx_rate);
+ void (*stop_send_beacons)(struct net_device *dev);
/* power save mode related */
- void (*sta_wake_up) (struct net_device *dev);
- void (*ps_request_tx_ack) (struct net_device *dev);
- void (*enter_sleep_state) (struct net_device *dev, u32 th, u32 tl);
- short (*ps_is_queue_empty) (struct net_device *dev);
- int (*handle_beacon) (struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
- int (*handle_assoc_response) (struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
+ void (*sta_wake_up)(struct net_device *dev);
+ void (*ps_request_tx_ack)(struct net_device *dev);
+ void (*enter_sleep_state)(struct net_device *dev, u32 th, u32 tl);
+ short (*ps_is_queue_empty)(struct net_device *dev);
+ int (*handle_beacon)(struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
+ int (*handle_assoc_response)(struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
/* check whether Tx hw resource available */
@@ -2023,7 +2020,7 @@ struct ieee80211_device {
#define IEEE_G (1<<2)
#define IEEE_N_24G (1<<4)
#define IEEE_N_5G (1<<5)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
/* Generate a 802.11 header */
@@ -2112,7 +2109,7 @@ static inline int ieee80211_get_hdrlen(u16 fc)
case IEEE80211_FTYPE_DATA:
if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
hdrlen = IEEE80211_4ADDR_LEN; /* Addr4 */
- if(IEEE80211_QOS_HAS_SEQ(fc))
+ if (IEEE80211_QOS_HAS_SEQ(fc))
hdrlen += 2; /* QOS ctrl*/
break;
case IEEE80211_FTYPE_CTL:
@@ -2379,7 +2376,7 @@ void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee,
u8 HTGetHighestMCSRate(struct ieee80211_device *ieee,
u8 *pMCSRateSet, u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
-extern u16 MCS_DATA_RATE[2][2][77] ;
+extern u16 MCS_DATA_RATE[2][2][77];
u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
//extern void HTSetConnectBwModeCallback(unsigned long data);
void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
@@ -2395,9 +2392,9 @@ void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS,
u8 Policy, u8 bOverwritePending);
void TsInitDelBA(struct ieee80211_device *ieee,
PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
-void BaSetupTimeOut(unsigned long data);
-void TxBaInactTimeout(unsigned long data);
-void RxBaInactTimeout(unsigned long data);
+void BaSetupTimeOut(struct timer_list *t);
+void TxBaInactTimeout(struct timer_list *t);
+void RxBaInactTimeout(struct timer_list *t);
void ResetBaEntry(PBA_RECORD pBA);
//function in TS.c
bool GetTs(
@@ -2426,7 +2423,8 @@ static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
return ieee->scans;
}
-static inline const char *escape_essid(const char *essid, u8 essid_len) {
+static inline const char *escape_essid(const char *essid, u8 essid_len)
+{
static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
if (ieee80211_is_empty_essid(essid, essid_len)) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 48e80be90ba5..6f457812e5a3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -57,9 +57,9 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
}
}
-void ieee80211_crypt_deinit_handler(unsigned long data)
+void ieee80211_crypt_deinit_handler(struct timer_list *t)
{
- struct ieee80211_device *ieee = (struct ieee80211_device *)data;
+ struct ieee80211_device *ieee = from_timer(ieee, t, crypt_deinit_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
index a0aa0f5be63a..1f2aea7e0e55 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
@@ -83,7 +83,7 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force);
-void ieee80211_crypt_deinit_handler(unsigned long data);
+void ieee80211_crypt_deinit_handler(struct timer_list *t);
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
struct ieee80211_crypt_data **crypt);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 8f236b332a47..90a097f2cd4e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -133,8 +133,8 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
INIT_LIST_HEAD(&ieee->crypt_deinit_list);
- setup_timer(&ieee->crypt_deinit_timer,
- ieee80211_crypt_deinit_handler, (unsigned long)ieee);
+ timer_setup(&ieee->crypt_deinit_timer, ieee80211_crypt_deinit_handler,
+ 0);
spin_lock_init(&ieee->lock);
spin_lock_init(&ieee->wpax_suitlist_lock);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index fe6f38b7ec35..4e7908322d77 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -2948,8 +2948,9 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
struct ieee_param *param, int param_len)
{
int ret = 0;
+ const char *module = NULL;
- struct ieee80211_crypto_ops *ops;
+ struct ieee80211_crypto_ops *ops = NULL;
struct ieee80211_crypt_data **crypt;
struct ieee80211_security sec = {
@@ -2995,19 +2996,17 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
strcmp(param->u.crypt.alg, "TKIP"))
goto skip_host_crypt;
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
- request_module("ieee80211_crypt_wep");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
- request_module("ieee80211_crypt_tkip");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
- request_module("ieee80211_crypt_ccmp");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- }
- if (ops == NULL) {
+ //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place
+ if (!strcmp(param->u.crypt.alg, "WEP"))
+ module = "ieee80211_crypt_wep";
+ else if (!strcmp(param->u.crypt.alg, "TKIP"))
+ module = "ieee80211_crypt_tkip";
+ else if (!strcmp(param->u.crypt.alg, "CCMP"))
+ module = "ieee80211_crypt_ccmp";
+ if (module)
+ ops = try_then_request_module(ieee80211_get_crypto_ops(param->u.crypt.alg),
+ module);
+ if (!ops) {
printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
ret = -EINVAL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index f58971a4a2e3..9a1a84548bc6 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -302,7 +302,6 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
}
}
-#define SN_LESS(a, b) (((a-b)&0x800)!=0)
static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
struct sk_buff *skb, struct cb_desc *tcb_desc)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index c925e53bf013..f2fcdec9bd17 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -364,11 +364,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
GFP_KERNEL);
if (!new_crypt)
return -ENOMEM;
- new_crypt->ops = ieee80211_get_crypto_ops("WEP");
- if (!new_crypt->ops) {
- request_module("ieee80211_crypt_wep");
- new_crypt->ops = ieee80211_get_crypto_ops("WEP");
- }
+ new_crypt->ops = try_then_request_module(ieee80211_get_crypto_ops("WEP"),
+ "ieee80211_crypt_wep");
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(key);
@@ -591,12 +588,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
}
printk("alg name:%s\n",alg);
- ops = ieee80211_get_crypto_ops(alg);
- if (ops == NULL) {
- request_module(module);
- ops = ieee80211_get_crypto_ops(alg);
- }
- if (ops == NULL) {
+ ops = try_then_request_module(ieee80211_get_crypto_ops(alg), module);
+ if (!ops) {
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
printk("========>unknown crypto alg %d\n", ext->alg);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 4b92bb51c3e6..21b55fd5b717 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -144,7 +144,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
if (ACT_ADDBARSP == type) {
// Status Code
- printk(KERN_INFO "=====>to send ADDBARSP\n");
+ netdev_info(ieee->dev, "=====>to send ADDBARSP\n");
put_unaligned_le16(StatusCode, tag);
tag += 2;
@@ -346,7 +346,7 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
pBaTimeoutVal = (u16 *)(tag + 5);
pBaStartSeqCtrl = (PSEQUENCE_CONTROL)(req + 7);
- printk(KERN_INFO "====================>rx ADDBAREQ from :%pM\n", dst);
+ netdev_info(ieee->dev, "====================>rx ADDBAREQ from :%pM\n", dst);
//some other capability is not ready now.
if ((ieee->current_network.qos_data.active == 0) ||
(!ieee->pHTInfo->bCurrentHTSupport)) //||
@@ -673,18 +673,18 @@ TsInitDelBA( struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SE
* return: NULL
* notice:
********************************************************************************************************************/
-void BaSetupTimeOut(unsigned long data)
+void BaSetupTimeOut(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
+ PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TxPendingBARecord.Timer);
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = true;
pTxTs->TxPendingBARecord.bValid = false;
}
-void TxBaInactTimeout(unsigned long data)
+void TxBaInactTimeout(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
+ PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TxAdmittedBARecord.Timer);
struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[pTxTs->num]);
TxTsDeleteBA(ieee, pTxTs);
ieee80211_send_DELBA(
@@ -695,9 +695,9 @@ void TxBaInactTimeout(unsigned long data)
DELBA_REASON_TIMEOUT);
}
-void RxBaInactTimeout(unsigned long data)
+void RxBaInactTimeout(struct timer_list *t)
{
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
+ PRX_TS_RECORD pRxTs = from_timer(pRxTs, t, RxAdmittedBARecord.Timer);
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
RxTsDeleteBA(ieee, pRxTs);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index f4921abf7300..e60a26250682 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -4,13 +4,13 @@
#include <linux/slab.h>
#include "rtl819x_TS.h"
-static void TsSetupTimeOut(unsigned long data)
+static void TsSetupTimeOut(struct timer_list *unused)
{
// Not implement yet
// This is used for WMMSA and ACM , that would send ADDTSReq frame.
}
-static void TsInactTimeout(unsigned long data)
+static void TsInactTimeout(struct timer_list *unused)
{
// Not implement yet
// This is used for WMMSA and ACM.
@@ -23,9 +23,9 @@ static void TsInactTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-static void RxPktPendingTimeout(unsigned long data)
+static void RxPktPendingTimeout(struct timer_list *t)
{
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
+ PRX_TS_RECORD pRxTs = from_timer(pRxTs, t, RxPktPendingTimer);
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
PRX_REORDER_ENTRY pReorderEntry = NULL;
@@ -90,9 +90,9 @@ static void RxPktPendingTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-static void TsAddBaProcess(unsigned long data)
+static void TsAddBaProcess(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
+ PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
u8 num = pTxTs->num;
struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[num]);
@@ -146,16 +146,15 @@ void TSInitialize(struct ieee80211_device *ieee)
pTxTS->num = count;
// The timers for the operation of Traffic Stream and Block Ack.
// DLS related timer will be add here in the future!!
- setup_timer(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TsAddBaTimer, TsAddBaProcess,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TxAdmittedBARecord.Timer,
- TxBaInactTimeout, (unsigned long)pTxTS);
+ timer_setup(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
+ timer_setup(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
+ timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
+ timer_setup(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut,
+ 0);
+ timer_setup(&pTxTS->TxAdmittedBARecord.Timer,
+ TxBaInactTimeout, 0);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List, &ieee->Tx_TS_Unused_List);
pTxTS++;
@@ -168,14 +167,13 @@ void TSInitialize(struct ieee80211_device *ieee)
for(count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
- setup_timer(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
- (unsigned long)pRxTS);
- setup_timer(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout,
- (unsigned long)pRxTS);
- setup_timer(&pRxTS->RxAdmittedBARecord.Timer,
- RxBaInactTimeout, (unsigned long)pRxTS);
- setup_timer(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout,
- (unsigned long)pRxTS);
+ timer_setup(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
+ timer_setup(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
+ timer_setup(&pRxTS->RxAdmittedBARecord.Timer,
+ RxBaInactTimeout, 0);
+ timer_setup(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout, 0);
ResetRxTsEntry(pRxTS);
list_add_tail(&pRxTS->TsCommonInfo.List, &ieee->Rx_TS_Unused_List);
pRxTS++;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 46b3f19e0878..09f66b386e44 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -497,7 +497,7 @@ inline void force_pci_posting(struct net_device *dev)
static struct net_device_stats *rtl8192_stats(struct net_device *dev);
static void rtl8192_restart(struct work_struct *work);
-static void watch_dog_timer_callback(unsigned long data);
+static void watch_dog_timer_callback(struct timer_list *t);
/****************************************************************************
* -----------------------------PROCFS STUFF-------------------------
@@ -1687,9 +1687,13 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
#ifndef JACKSON_NEW_RX
for (i = 0; i < (MAX_RX_URB + 1); i++) {
priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!priv->rx_urb[i])
+ return -ENOMEM;
priv->rx_urb[i]->transfer_buffer =
kmalloc(RX_URB_SIZE, GFP_KERNEL);
+ if (!priv->rx_urb[i]->transfer_buffer)
+ return -ENOMEM;
priv->rx_urb[i]->transfer_buffer_length = RX_URB_SIZE;
}
@@ -2690,15 +2694,11 @@ static short rtl8192_init(struct net_device *dev)
err = rtl8192_read_eeprom_info(dev);
if (err) {
DMESG("Reading EEPROM info failed");
- kfree(priv->pFirmware);
- priv->pFirmware = NULL;
- free_ieee80211(dev);
return err;
}
rtl8192_get_channel_map(dev);
init_hal_dm(dev);
- setup_timer(&priv->watch_dog_timer, watch_dog_timer_callback,
- (unsigned long)dev);
+ timer_setup(&priv->watch_dog_timer, watch_dog_timer_callback, 0);
if (rtl8192_usb_initendpoints(dev) != 0) {
DMESG("Endopoints initialization failed");
return -ENOMEM;
@@ -3499,9 +3499,9 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
-static void watch_dog_timer_callback(unsigned long data)
+static void watch_dog_timer_callback(struct timer_list *t)
{
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
schedule_delayed_work(&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer,
@@ -3528,7 +3528,7 @@ static int _rtl8192_up(struct net_device *dev)
if (priv->ieee80211->state != IEEE80211_LINKED)
ieee80211_softmac_start_protocol(priv->ieee80211);
ieee80211_reset_queue(priv->ieee80211);
- watch_dog_timer_callback((unsigned long)dev);
+ watch_dog_timer_callback(&priv->watch_dog_timer);
if (!netif_queue_stopped(dev))
netif_start_queue(dev);
else
@@ -4994,11 +4994,11 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
fail2:
rtl8192_down(dev);
+fail:
kfree(priv->pFirmware);
priv->pFirmware = NULL;
rtl8192_usb_deleteendpoints(dev);
mdelay(10);
-fail:
free_ieee80211(dev);
RT_TRACE(COMP_ERR, "wlan driver load failed\n");
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 4c241a07ae75..e1b81d34f1ad 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -2730,8 +2730,7 @@ static void dm_init_fsync(struct net_device *dev)
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
priv->ieee80211->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
- setup_timer(&priv->fsync_timer, dm_fsync_timer_callback,
- (unsigned long)dev);
+ timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0);
}
static void dm_deInit_fsync(struct net_device *dev)
@@ -2741,10 +2740,10 @@ static void dm_deInit_fsync(struct net_device *dev)
del_timer_sync(&priv->fsync_timer);
}
-void dm_fsync_timer_callback(unsigned long data)
+void dm_fsync_timer_callback(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct net_device *dev = priv->ieee80211->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 98137f65d1b2..8f3d618dcfdb 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -227,7 +227,7 @@ void dm_force_tx_fw_info(struct net_device *dev,
void dm_init_edca_turbo(struct net_device *dev);
void dm_rf_operation_test_callback(unsigned long data);
void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_fsync_timer_callback(unsigned long data);
+void dm_fsync_timer_callback(struct timer_list *t);
void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
void dm_shadow_init(struct net_device *dev);
void dm_initialize_txpower_tracking(struct net_device *dev);
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index a9545386fbc5..e4e6c979bedf 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -964,7 +964,7 @@ struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
return wstats;
}
-struct iw_handler_def r8192_wx_handlers_def = {
+const struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
.private = r8192_private_handler,
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index fb5f808433d1..a6c2b95e2e69 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -17,7 +17,7 @@
#ifndef R8180_WX_H
#define R8180_WX_H
-extern struct iw_handler_def r8192_wx_handlers_def;
+extern const struct iw_handler_def r8192_wx_handlers_def;
/* Enable the rtl819x_core.c to share this function, david 2008.9.22 */
struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index c83d7ebb164f..de832b0b5eec 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -216,9 +216,9 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
emem_sz = fwhdr.img_SRAM_size;
do {
memset(ptx_desc, 0, TXDESC_SIZE);
- if (emem_sz > MAX_DUMP_FWSZ) /* max=48k */
+ if (emem_sz > MAX_DUMP_FWSZ) { /* max=48k */
dump_emem_sz = MAX_DUMP_FWSZ;
- else {
+ } else {
dump_emem_sz = emem_sz;
ptx_desc->txdw0 |= cpu_to_le32(BIT(28));
}
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index a077069d6227..3c7c4a4faeb2 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -32,39 +32,45 @@
#include "drv_types.h"
#include "mlme_osdep.h"
-static void sitesurvey_ctrl_handler(unsigned long data)
+static void sitesurvey_ctrl_handler(struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t,
+ mlmepriv.sitesurveyctrl.sitesurvey_ctrl_timer);
_r8712_sitesurvey_ctrl_handler(adapter);
mod_timer(&adapter->mlmepriv.sitesurveyctrl.sitesurvey_ctrl_timer,
jiffies + msecs_to_jiffies(3000));
}
-static void join_timeout_handler (unsigned long data)
+static void join_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.assoc_timer);
_r8712_join_timeout_handler(adapter);
}
-static void _scan_timeout_handler (unsigned long data)
+static void _scan_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.scan_to_timer);
r8712_scan_timeout_handler(adapter);
}
-static void dhcp_timeout_handler (unsigned long data)
+static void dhcp_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.dhcp_timer);
_r8712_dhcp_timeout_handler(adapter);
}
-static void wdg_timeout_handler (unsigned long data)
+static void wdg_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.wdg_timer);
_r8712_wdg_timeout_handler(adapter);
@@ -76,17 +82,12 @@ void r8712_init_mlme_timer(struct _adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- setup_timer(&pmlmepriv->assoc_timer, join_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->sitesurveyctrl.sitesurvey_ctrl_timer,
- sitesurvey_ctrl_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->scan_to_timer, _scan_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->dhcp_timer, dhcp_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->wdg_timer, wdg_timeout_handler,
- (unsigned long)padapter);
+ timer_setup(&pmlmepriv->assoc_timer, join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->sitesurveyctrl.sitesurvey_ctrl_timer,
+ sitesurvey_ctrl_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, _scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dhcp_timer, dhcp_timeout_handler, 0);
+ timer_setup(&pmlmepriv->wdg_timer, wdg_timeout_handler, 0);
}
void r8712_os_indicate_connect(struct _adapter *adapter)
@@ -118,9 +119,8 @@ void r8712_os_indicate_disconnect(struct _adapter *adapter)
adapter->securitypriv.btkip_countermeasure;
memset((unsigned char *)&adapter->securitypriv, 0,
sizeof(struct security_priv));
- setup_timer(&adapter->securitypriv.tkip_timer,
- r8712_use_tkipkey_handler,
- (unsigned long)adapter);
+ timer_setup(&adapter->securitypriv.tkip_timer,
+ r8712_use_tkipkey_handler, 0);
/* Restore the PMK information to securitypriv structure
* for the following connection.
*/
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index e698f6ede449..95caf8df9a13 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -313,8 +313,8 @@ u8 r8712_init_drv_sw(struct _adapter *padapter)
_r8712_init_recv_priv(&padapter->recvpriv, padapter);
memset((unsigned char *)&padapter->securitypriv, 0,
sizeof(struct security_priv));
- setup_timer(&padapter->securitypriv.tkip_timer,
- r8712_use_tkipkey_handler, (unsigned long)padapter);
+ timer_setup(&padapter->securitypriv.tkip_timer,
+ r8712_use_tkipkey_handler, 0);
_r8712_init_sta_priv(&padapter->stapriv);
padapter->stapriv.padapter = padapter;
r8712_init_bcmc_stainfo(padapter);
@@ -385,11 +385,11 @@ static int netdev_open(struct net_device *pnetdev)
padapter->bup = true;
if (rtl871x_hal_init(padapter) != _SUCCESS)
goto netdev_open_error;
- if (!r8712_initmac)
+ if (!r8712_initmac) {
/* Use the mac address stored in the Efuse */
memcpy(pnetdev->dev_addr,
padapter->eeprompriv.mac_addr, ETH_ALEN);
- else {
+ } else {
/* We have to inform f/w to use user-supplied MAC
* address.
*/
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 0104aced113e..3c88994fdfcd 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -356,11 +356,11 @@ _next:
if ((wr_sz % 64) == 0)
blnPending = 1;
}
- if (blnPending) /* 32 bytes for TX Desc - 8 offset */
+ if (blnPending) { /* 32 bytes for TX Desc - 8 offset */
pdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE +
OFFSET_SZ + 8) << OFFSET_SHT) &
0x00ff0000);
- else {
+ } else {
pdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE +
OFFSET_SZ) <<
OFFSET_SHT) &
diff --git a/drivers/staging/rtl8712/rtl8712_event.h b/drivers/staging/rtl8712/rtl8712_event.h
index b38374025c93..cad7085c3f8a 100644
--- a/drivers/staging/rtl8712/rtl8712_event.h
+++ b/drivers/staging/rtl8712/rtl8712_event.h
@@ -60,7 +60,6 @@ enum rtl8712_c2h_event {
MAX_C2HEVT
};
-
#ifdef _RTL8712_CMD_C_
static struct fwevent wlanevents[] = {
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index ea3eb94b28b3..8f555e6e1b3f 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -149,7 +149,7 @@ int r8712_free_recvframe(union recv_frame *precvframe,
list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
if (padapter != NULL) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
+ precvpriv->free_recvframe_cnt++;
}
spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL);
return _SUCCESS;
@@ -883,10 +883,10 @@ static void query_rx_phy_status(struct _adapter *padapter,
* from 0~100. It is assigned to the BSS List in
* GetValueFromBeaconOrProbeRsp().
*/
- if (bcck_rate)
+ if (bcck_rate) {
prframe->u.hdr.attrib.signal_strength =
(u8)r8712_signal_scale_mapping(pwdb_all);
- else {
+ } else {
if (rf_rx_num != 0)
prframe->u.hdr.attrib.signal_strength =
(u8)(r8712_signal_scale_mapping(total_rssi /=
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 04638f1e4e88..a424f447a725 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -899,9 +899,10 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
if (!pwlan)
goto createbss_cmd_fail;
pwlan->last_scanned = jiffies;
- } else
+ } else {
list_add_tail(&(pwlan->list),
&pmlmepriv->scanned_queue.queue);
+ }
pnetwork->Length = r8712_get_wlan_bssid_ex_sz(pnetwork);
memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
pwlan->fixed = true;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 01a150446f5a..8a5ced4fa9d3 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -78,10 +78,10 @@ static u8 do_join(struct _adapter *padapter)
int ret;
ret = r8712_select_and_join_from_scan(pmlmepriv);
- if (ret == _SUCCESS)
+ if (ret == _SUCCESS) {
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
- else {
+ } else {
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
/* submit r8712_createbss_cmd to change to an
* ADHOC_MASTER pmlmepriv->lock has been
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index bf1ac22bae1c..111c809afc51 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -574,10 +574,10 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
if (r8712_select_and_join_from_scan(pmlmepriv)
- == _SUCCESS)
+ == _SUCCESS) {
mod_timer(&pmlmepriv->assoc_timer, jiffies +
msecs_to_jiffies(MAX_JOIN_TIMEOUT));
- else {
+ } else {
struct wlan_bssid_ex *pdev_network =
&(adapter->registrypriv.dev_network);
u8 *pibss =
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index e42fc1404c35..ae4c9567bb55 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -164,9 +164,10 @@ static void rpwm_workitem_callback(struct work_struct *work)
}
}
-static void rpwm_check_handler (unsigned long data)
+static void rpwm_check_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, pwrctrlpriv.rpwm_check_timer);
_rpwm_check_handler(adapter);
}
@@ -185,8 +186,7 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
r8712_write8(padapter, 0x1025FE58, 0);
INIT_WORK(&pwrctrlpriv->SetPSModeWorkItem, SetPSModeWorkItemCallback);
INIT_WORK(&pwrctrlpriv->rpwm_workitem, rpwm_workitem_callback);
- setup_timer(&pwrctrlpriv->rpwm_check_timer, rpwm_check_handler,
- (unsigned long)padapter);
+ timer_setup(&pwrctrlpriv->rpwm_check_timer, rpwm_check_handler, 0);
}
/*
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index bd83fb492c45..56d36f6f9c46 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -1402,9 +1402,10 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
return _SUCCESS;
}
-void r8712_use_tkipkey_handler(unsigned long data)
+void r8712_use_tkipkey_handler(struct timer_list *t)
{
- struct _adapter *padapter = (struct _adapter *)data;
+ struct _adapter *padapter =
+ from_timer(padapter, t, securitypriv.tkip_timer);
padapter->securitypriv.busetkipkey = true;
}
diff --git a/drivers/staging/rtl8712/rtl871x_security.h b/drivers/staging/rtl8712/rtl871x_security.h
index fa952e17975b..46b88a41d236 100644
--- a/drivers/staging/rtl8712/rtl871x_security.h
+++ b/drivers/staging/rtl8712/rtl871x_security.h
@@ -224,7 +224,7 @@ void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe);
u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe);
u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe);
void r8712_wep_decrypt(struct _adapter *padapter, u8 *precvframe);
-void r8712_use_tkipkey_handler(unsigned long data);
+void r8712_use_tkipkey_handler(struct timer_list *t);
#endif /*__RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index b3e266bd57ab..85eadddfaf06 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -590,9 +590,10 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
mac[0] &= 0xFE;
dev_info(&udev->dev,
"r8712u: MAC Address from user = %pM\n", mac);
- } else
+ } else {
dev_info(&udev->dev,
"r8712u: MAC Address from efuse = %pM\n", mac);
+ }
ether_addr_copy(pnetdev->dev_addr, mac);
}
/* step 6. Load the firmware asynchronously */
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index d3007c1c45e3..0b530ea7fd81 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1581,15 +1581,13 @@ u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(
- sizeof(struct set_stakey_parm)
- );
+ psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
kfree((u8 *) ph2c);
res = _FAIL;
@@ -1630,12 +1628,12 @@ static int rtw_ap_set_key(
/* DBG_871X("%s\n", __func__); */
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
goto exit;
}
- psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (psetkeyparm == NULL) {
kfree((unsigned char *)pcmd);
res = _FAIL;
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index 01f78d1671de..79aa02afad01 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -207,11 +207,11 @@ void rtw_btcoex_RejectApAggregatedPacket(struct adapter *padapter, u8 enable)
psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
if (true == enable) {
- pmlmeinfo->bAcceptAddbaReq = false;
+ pmlmeinfo->accept_addba_req = false;
if (psta)
send_delba(padapter, 0, psta->hwaddr);
} else{
- pmlmeinfo->bAcceptAddbaReq = true;
+ pmlmeinfo->accept_addba_req = true;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index d381827dba3b..9ac2dea6dff1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -408,7 +408,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
}
/* free cmd_obj */
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
}
@@ -613,13 +613,13 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
}
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL)
return _FAIL;
- psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (psurveyPara == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
@@ -681,15 +681,15 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pbsetdataratepara = (struct setdatarate_parm *)rtw_zmalloc(sizeof(struct setdatarate_parm));
+ pbsetdataratepara = rtw_zmalloc(sizeof(struct setdatarate_parm));
if (pbsetdataratepara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -706,8 +706,8 @@ exit:
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
/* rtw_free_cmd_obj(pcmd); */
- kfree((unsigned char *) pcmd->parmbuf);
- kfree((unsigned char *) pcmd);
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
}
u8 rtw_createbss_cmd(struct adapter *padapter)
@@ -724,7 +724,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
}
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
goto exit;
@@ -757,7 +757,7 @@ u8 rtw_startbss_cmd(struct adapter *padapter, int flags)
start_bss_network(padapter, (u8 *)&(padapter->mlmepriv.cur_network.network));
} else {
/* need enqueue, prepare cmd_obj and enqueue */
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
goto exit;
@@ -815,7 +815,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
}
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -847,7 +847,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
if (psecnetwork == NULL) {
if (pcmd != NULL)
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
res = _FAIL;
@@ -943,7 +943,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
/* prepare cmd parameter */
- param = (struct disconnect_parm *)rtw_zmalloc(sizeof(*param));
+ param = rtw_zmalloc(sizeof(*param));
if (param == NULL) {
res = _FAIL;
goto exit;
@@ -952,10 +952,10 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
- cmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(*cmdobj));
+ cmdobj = rtw_zmalloc(sizeof(*cmdobj));
if (cmdobj == NULL) {
res = _FAIL;
- kfree((u8 *)param);
+ kfree(param);
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
@@ -964,7 +964,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
res = _FAIL;
- kfree((u8 *)param);
+ kfree(param);
}
exit:
@@ -979,7 +979,7 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRAST
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- psetop = (struct setopmode_parm *)rtw_zmalloc(sizeof(struct setopmode_parm));
+ psetop = rtw_zmalloc(sizeof(struct setopmode_parm));
if (psetop == NULL) {
res = _FAIL;
@@ -988,9 +988,9 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRAST
psetop->mode = (u8)networktype;
if (enqueue) {
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
- kfree((u8 *)psetop);
+ kfree(psetop);
res = _FAIL;
goto exit;
}
@@ -999,7 +999,7 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRAST
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else{
setopmode_hdl(padapter, (u8 *)psetop);
- kfree((u8 *)psetop);
+ kfree(psetop);
}
exit:
return res;
@@ -1016,7 +1016,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
struct security_priv *psecuritypriv = &padapter->securitypriv;
u8 res = _SUCCESS;
- psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
res = _FAIL;
goto exit;
@@ -1040,17 +1040,17 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
padapter->securitypriv.busetkipkey = true;
if (enqueue) {
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
- kfree((u8 *) psetstakey_para);
+ kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
- psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
if (psetstakey_rsp == NULL) {
- kfree((u8 *) ph2c);
- kfree((u8 *) psetstakey_para);
+ kfree(ph2c);
+ kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
@@ -1061,7 +1061,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else{
set_stakey_hdl(padapter, (u8 *)psetstakey_para);
- kfree((u8 *) psetstakey_para);
+ kfree(psetstakey_para);
}
exit:
return res;
@@ -1083,23 +1083,23 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueu
rtw_camid_free(padapter, cam_id);
}
} else{
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
- psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
if (psetstakey_rsp == NULL) {
- kfree((u8 *) ph2c);
- kfree((u8 *) psetstakey_para);
+ kfree(ph2c);
+ kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
@@ -1128,15 +1128,15 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- paddbareq_parm = (struct addBaReq_parm *)rtw_zmalloc(sizeof(struct addBaReq_parm));
+ paddbareq_parm = rtw_zmalloc(sizeof(struct addBaReq_parm));
if (paddbareq_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1162,15 +1162,15 @@ u8 rtw_reset_securitypriv_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1197,15 +1197,15 @@ u8 rtw_free_assoc_resources_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1233,15 +1233,15 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
/* only primary padapter does this cmd */
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1283,7 +1283,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
}
/* prepare cmd parameter */
- setChannelPlan_param = (struct SetChannelPlan_param *)rtw_zmalloc(sizeof(struct SetChannelPlan_param));
+ setChannelPlan_param = rtw_zmalloc(sizeof(struct SetChannelPlan_param));
if (setChannelPlan_param == NULL) {
res = _FAIL;
goto exit;
@@ -1292,9 +1292,9 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmdobj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmdobj == NULL) {
- kfree((u8 *)setChannelPlan_param);
+ kfree(setChannelPlan_param);
res = _FAIL;
goto exit;
}
@@ -1306,7 +1306,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
if (H2C_SUCCESS != set_chplan_hdl(padapter, (unsigned char *)setChannelPlan_param))
res = _FAIL;
- kfree((u8 *)setChannelPlan_param);
+ kfree(setChannelPlan_param);
}
/* do something based on res... */
@@ -1553,15 +1553,15 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
/* return res; */
if (enqueue) {
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1595,15 +1595,15 @@ u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1668,15 +1668,15 @@ u8 rtw_dm_ra_mask_wk_cmd(struct adapter *padapter, u8 *psta)
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1719,15 +1719,15 @@ u8 rtw_ps_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ppscmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ppscmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (ppscmd == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ppscmd);
+ kfree(ppscmd);
res = _FAIL;
goto exit;
}
@@ -1791,15 +1791,15 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1890,15 +1890,15 @@ u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((u8 *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1925,15 +1925,15 @@ u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((u8 *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1967,7 +1967,7 @@ static void c2h_wk_callback(_workitem *work)
/* This C2H event is read, clear it */
c2h_evt_clear(adapter);
} else{
- c2h_evt = (u8 *)rtw_malloc(16);
+ c2h_evt = rtw_malloc(16);
if (c2h_evt != NULL) {
/* This C2H event is not read, read & clear now */
if (rtw_hal_c2h_evt_read(adapter, c2h_evt) != _SUCCESS) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
index 3db02e9f27ab..b5dd244fee8f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ b/drivers/staging/rtl8723bs/core/rtw_debug.c
@@ -1122,7 +1122,8 @@ int proc_get_rx_ampdu(struct seq_file *m, void *v)
if (pregpriv)
DBG_871X_SEL_NL(m,
- "bAcceptAddbaReq = %d , 0:Reject AP's Add BA req, 1:Accept AP's Add BA req.\n", pmlmeinfo->bAcceptAddbaReq
+ "accept_addba_req = %d , 0:Reject AP's Add BA req, 1:Accept AP's Add BA req.\n",
+ pmlmeinfo->accept_addba_req
);
return 0;
@@ -1146,8 +1147,9 @@ ssize_t proc_set_rx_ampdu(struct file *file, const char __user *buffer, size_t c
sscanf(tmp, "%d ", &mode);
if (pregpriv && mode < 2) {
- pmlmeinfo->bAcceptAddbaReq = mode;
- DBG_871X("pmlmeinfo->bAcceptAddbaReq =%d\n", pmlmeinfo->bAcceptAddbaReq);
+ pmlmeinfo->accept_addba_req = mode;
+ DBG_871X("pmlmeinfo->accept_addba_req =%d\n",
+ pmlmeinfo->accept_addba_req);
if (mode == 0) {
/*tear down Rx AMPDU*/
send_delba(padapter, 0, get_my_bssid(&(pmlmeinfo->network)));/* recipient*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 7b37e085b793..9167900b5f7d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -1189,9 +1189,9 @@ void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr)
(mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
((mac[0] == 0x00) && (mac[1] == 0x00) && (mac[2] == 0x00) &&
(mac[3] == 0x00) && (mac[4] == 0x00) && (mac[5] == 0x00))) {
- if (np &&
- (addr = of_get_property(np, "local-mac-address", &len)) &&
- len == ETH_ALEN) {
+ if (np &&
+ (addr = of_get_property(np, "local-mac-address", &len)) &&
+ len == ETH_ALEN) {
memcpy(mac_addr, addr, ETH_ALEN);
} else {
mac[0] = 0x00;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index d815a693fa64..e5354cec8dd5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -590,14 +590,10 @@ u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum NDIS_802_11
u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
{
- u8 bdefaultkey;
- u8 btransmitkey;
sint keyid, res;
struct security_priv *psecuritypriv = &(padapter->securitypriv);
u8 ret = _SUCCESS;
- bdefaultkey = (wep->KeyIndex & 0x40000000) > 0 ? false : true; /* for ??? */
- btransmitkey = (wep->KeyIndex & 0x80000000) > 0 ? true : false; /* for ??? */
keyid = wep->KeyIndex & 0x3fffffff;
if (keyid >= 4) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index cb8a95aabd6c..fe739eb2cf7d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -28,9 +28,6 @@ sint _rtw_init_mlme_priv(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
sint res = _SUCCESS;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((u8 *)pmlmepriv, 0, sizeof(struct mlme_priv)); */
-
pmlmepriv->nic_hdl = (u8 *)padapter;
pmlmepriv->pscanned = NULL;
@@ -1817,8 +1814,10 @@ void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
* @adapter: pointer to struct adapter structure
*/
-void _rtw_join_timeout_handler (struct adapter *adapter)
+void _rtw_join_timeout_handler(struct timer_list *t)
{
+ struct adapter *adapter = from_timer(adapter, t,
+ mlmepriv.assoc_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_871X("%s, fw_state =%x\n", __func__, get_fwstate(pmlmepriv));
@@ -1870,8 +1869,10 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
* rtw_scan_timeout_handler - Timeout/Failure handler for CMD SiteSurvey
* @adapter: pointer to struct adapter structure
*/
-void rtw_scan_timeout_handler (struct adapter *adapter)
+void rtw_scan_timeout_handler(struct timer_list *t)
{
+ struct adapter *adapter = from_timer(adapter, t,
+ mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_871X(FUNC_ADPT_FMT" fw_state =%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
@@ -1934,7 +1935,7 @@ exit:
return;
}
-void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
+void rtw_dynamic_check_timer_handler(struct adapter *adapter)
{
if (!adapter)
return;
@@ -2271,13 +2272,13 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
sint res = _SUCCESS;
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL; /* try again */
goto exit;
}
- psetauthparm = (struct setauth_parm *)rtw_zmalloc(sizeof(struct setauth_parm));
+ psetauthparm = rtw_zmalloc(sizeof(struct setauth_parm));
if (psetauthparm == NULL) {
kfree((unsigned char *)pcmd);
res = _FAIL;
@@ -2312,7 +2313,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
sint res = _SUCCESS;
- psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (psetkeyparm == NULL) {
res = _FAIL;
goto exit;
@@ -2364,7 +2365,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
if (enqueue) {
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
kfree((unsigned char *)psetkeyparm);
res = _FAIL; /* try again */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index b6d137f505e1..7d7756e40bcb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -17,6 +17,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
+#include <linux/kernel.h>
static struct mlme_handler mlme_sta_tbl[] = {
@@ -474,15 +475,12 @@ int init_mlme_ext_priv(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((u8 *)pmlmeext, 0, sizeof(struct mlme_ext_priv)); */
-
pmlmeext->padapter = padapter;
/* fill_fwpriv(padapter, &(pmlmeext->fwpriv)); */
init_mlme_ext_priv_value(padapter);
- pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+ pmlmeinfo->accept_addba_req = pregistrypriv->accept_addba_req;
init_mlme_ext_timer(padapter);
@@ -510,7 +508,7 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
if (!padapter)
return;
- if (padapter->bDriverStopped == true) {
+ if (padapter->bDriverStopped) {
del_timer_sync(&pmlmeext->survey_timer);
del_timer_sync(&pmlmeext->link_timer);
/* del_timer_sync(&pmlmeext->ADDBA_timer); */
@@ -562,7 +560,7 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
index = GetFrameSubType(pframe) >> 4;
- if (index >= (sizeof(mlme_sta_tbl) / sizeof(struct mlme_handler))) {
+ if (index >= ARRAY_SIZE(mlme_sta_tbl)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type =%d\n", index));
return;
}
@@ -582,11 +580,11 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
switch (GetFrameSubType(pframe)) {
case WIFI_AUTH:
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
ptable->func = &OnAuth;
else
ptable->func = &OnAuthClient;
- /* pass through */
+ /* fall through */
case WIFI_ASSOCREQ:
case WIFI_REASSOCREQ:
_mgt_dispatcher(padapter, ptable, precv_frame);
@@ -637,8 +635,8 @@ unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
/* DBG_871X("+OnProbeReq\n"); */
#ifdef CONFIG_AUTO_AP_MODE
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
- pmlmepriv->cur_network.join_res == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ pmlmepriv->cur_network.join_res) {
struct sta_info *psta;
u8 *mac_addr, *peer_addr;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -752,7 +750,7 @@ _non_rc_device:
/* check (wildcard) SSID */
if (p != NULL) {
- if (is_valid_p2p_probereq == true)
+ if (is_valid_p2p_probereq)
goto _issue_probersp;
if ((ielen != 0 && false == !memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength))
@@ -761,8 +759,8 @@ _non_rc_device:
return _SUCCESS;
_issue_probersp:
- if (((check_fwstate(pmlmepriv, _FW_LINKED) == true &&
- pmlmepriv->cur_network.join_res == true)) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) &&
+ pmlmepriv->cur_network.join_res) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
/* DBG_871X("+issue_probersp during ap mode\n"); */
issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq);
}
@@ -818,7 +816,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
/* we should update current network before auth, or some IE is wrong */
- pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
+ pbss = rtw_malloc(sizeof(struct wlan_bssid_ex));
if (pbss) {
if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) {
update_network(&(pmlmepriv->cur_network.network), pbss, padapter, true);
@@ -1773,7 +1771,7 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
DBG_871X("%s Reason code(%d)\n", __func__, reason);
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1848,7 +1846,7 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
DBG_871X("%s Reason code(%d)\n", __func__, reason);
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1976,7 +1974,7 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
/* process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), GetAddr3Ptr(pframe)); */
process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
- if (pmlmeinfo->bAcceptAddbaReq == true) {
+ if (pmlmeinfo->accept_addba_req) {
issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
} else{
issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
@@ -2227,7 +2225,7 @@ unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
category = frame_body[0];
- for (i = 0; i < sizeof(OnAction_tbl)/sizeof(struct action_handler); i++) {
+ for (i = 0; i < ARRAY_SIZE(OnAction_tbl); i++) {
ptable = &OnAction_tbl[i];
if (category == ptable->num)
@@ -2350,8 +2348,8 @@ void update_mgntframe_attrib_addr(struct adapter *padapter, struct xmit_frame *p
void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
{
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true) {
+ if (padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return;
@@ -2368,8 +2366,8 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntfr
struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
struct submit_ctx sctx;
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true) {
+ if (padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return ret;
@@ -2397,8 +2395,8 @@ s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmg
u32 timeout_ms = 500;/* 500ms */
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true) {
+ if (padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return -1;
@@ -2833,7 +2831,9 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
}
-static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, u8 ch, bool append_wps, int wait_ack)
+static int _issue_probereq(struct adapter *padapter,
+ struct ndis_802_11_ssid *pssid,
+ u8 *da, u8 ch, bool append_wps, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -3394,7 +3394,7 @@ void issue_assocreq(struct adapter *padapter)
pframe = rtw_set_ie(pframe, EID_WPA2, pIE->Length, pIE->data, &(pattrib->pktlen));
break;
case EID_HTCapability:
- if (padapter->mlmepriv.htpriv.ht_option == true) {
+ if (padapter->mlmepriv.htpriv.ht_option) {
if (!(is_ap_in_tkip(padapter))) {
memcpy(&(pmlmeinfo->HT_caps), pIE->data, sizeof(struct HT_caps_element));
pframe = rtw_set_ie(pframe, EID_HTCapability, pIE->Length, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
@@ -3403,7 +3403,7 @@ void issue_assocreq(struct adapter *padapter)
break;
case EID_EXTCapability:
- if (padapter->mlmepriv.htpriv.ht_option == true)
+ if (padapter->mlmepriv.htpriv.ht_option)
pframe = rtw_set_ie(pframe, EID_EXTCapability, pIE->Length, pIE->data, &(pattrib->pktlen));
break;
default:
@@ -3432,7 +3432,8 @@ exit:
}
/* when wait_ack is ture, this function shoule be called at process context */
-static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
+static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -3593,7 +3594,8 @@ s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
}
/* when wait_ack is ture, this function shoule be called at process context */
-static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
+static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -3717,7 +3719,8 @@ exit:
return ret;
}
-static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
+static int _issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason, bool wait_ack)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
@@ -4219,7 +4222,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
if (initiator == 0) {/* recipient */
for (tid = 0; tid < MAXTID; tid++) {
- if (psta->recvreorder_ctrl[tid].enable == true) {
+ if (psta->recvreorder_ctrl[tid].enable) {
DBG_871X("rx agg disable tid(%d)\n", tid);
issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
psta->recvreorder_ctrl[tid].enable = false;
@@ -4408,7 +4411,7 @@ void site_survey(struct adapter *padapter)
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
- if (is_client_associated_to_ap(padapter) == true)
+ if (is_client_associated_to_ap(padapter))
issue_nulldata(padapter, NULL, 0, 3, 500);
val8 = 0; /* survey done */
@@ -5049,12 +5052,12 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
pmlmeext = &padapter->mlmeextpriv;
pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5102,12 +5105,12 @@ void report_surveydone_event(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5149,12 +5152,12 @@ void report_join_res(struct adapter *padapter, int res)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5200,12 +5203,12 @@ void report_wmm_edca_update(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct wmm_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5246,13 +5249,13 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL) {
return;
}
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5302,12 +5305,12 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5445,7 +5448,7 @@ static void rtw_mlmeext_disconnect(struct adapter *padapter)
pmlmeinfo->state = WIFI_FW_NULL_STATE;
if (state_backup == WIFI_FW_STATION_STATE) {
- if (rtw_port_switch_chk(padapter) == true) {
+ if (rtw_port_switch_chk(padapter)) {
rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
{
struct adapter *port0_iface = dvobj_get_port0_adapter(adapter_to_dvobj(padapter));
@@ -5534,7 +5537,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
rtw_hal_macid_wakeup(padapter, psta->mac_id);
}
- if (rtw_port_switch_chk(padapter) == true)
+ if (rtw_port_switch_chk(padapter))
rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
join_type = 2;
@@ -5659,7 +5662,7 @@ void _linked_info_dump(struct adapter *padapter)
}
for (i = 0; i < NUM_STA; i++) {
- if (pdvobj->macid[i] == true) {
+ if (pdvobj->macid[i]) {
if (i != 1) /* skip bc/mc sta */
/* tx info ============ */
rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &i);
@@ -5827,8 +5830,10 @@ void linked_status_chk(struct adapter *padapter)
}
-void survey_timer_hdl(struct adapter *padapter)
+void survey_timer_hdl(struct timer_list *t)
{
+ struct adapter *padapter =
+ from_timer(padapter, t, mlmeextpriv.survey_timer);
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -5842,7 +5847,7 @@ void survey_timer_hdl(struct adapter *padapter)
pmlmeext->sitesurvey_res.channel_idx++;
}
- if (pmlmeext->scan_abort == true) {
+ if (pmlmeext->scan_abort) {
{
pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
DBG_871X("%s idx:%d\n", __func__
@@ -5853,12 +5858,12 @@ void survey_timer_hdl(struct adapter *padapter)
pmlmeext->scan_abort = false;/* reset */
}
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
goto exit_survey_timer_hdl;
}
- psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (psurveyPara == NULL) {
kfree((unsigned char *)ph2c);
goto exit_survey_timer_hdl;
@@ -5874,8 +5879,10 @@ exit_survey_timer_hdl:
return;
}
-void link_timer_hdl(struct adapter *padapter)
+void link_timer_hdl(struct timer_list *t)
{
+ struct adapter *padapter =
+ from_timer(padapter, t, mlmeextpriv.link_timer);
/* static unsigned int rx_pkt = 0; */
/* static u64 tx_cnt = 0; */
/* struct xmit_priv *pxmitpriv = &(padapter->xmitpriv); */
@@ -5924,8 +5931,9 @@ void link_timer_hdl(struct adapter *padapter)
return;
}
-void addba_timer_hdl(struct sta_info *psta)
+void addba_timer_hdl(struct timer_list *t)
{
+ struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
struct ht_priv *phtpriv;
if (!psta)
@@ -5933,20 +5941,22 @@ void addba_timer_hdl(struct sta_info *psta)
phtpriv = &psta->htpriv;
- if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true)) {
+ if (phtpriv->ht_option && phtpriv->ampdu_enable) {
if (phtpriv->candidate_tid_bitmap)
phtpriv->candidate_tid_bitmap = 0x0;
}
}
-void sa_query_timer_hdl(struct adapter *padapter)
+void sa_query_timer_hdl(struct timer_list *t)
{
+ struct adapter *padapter =
+ from_timer(padapter, t, mlmeextpriv.sa_query_timer);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
/* disconnect */
spin_lock_bh(&pmlmepriv->lock);
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
rtw_disassoc_cmd(padapter, 0, true);
rtw_indicate_disconnect(padapter);
rtw_free_assoc_resources(padapter, 1);
@@ -6084,7 +6094,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
rtw_auto_ap_start_beacon(padapter);
#endif
- if (rtw_port_switch_chk(padapter) == true) {
+ if (rtw_port_switch_chk(padapter)) {
rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
if (psetop->mode == Ndis802_11APMode)
@@ -6358,7 +6368,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED)
&& set_idx >= 0
- && rtw_mlme_band_check(padapter, in[i].hw_value) == true
+ && rtw_mlme_band_check(padapter, in[i].hw_value)
) {
if (j >= out_num) {
DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" out_num:%u not enough\n",
@@ -6383,7 +6393,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
DBG_871X(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter), pmlmeext->channel_set[i].ChannelNum);
- if (rtw_mlme_band_check(padapter, pmlmeext->channel_set[i].ChannelNum) == true) {
+ if (rtw_mlme_band_check(padapter, pmlmeext->channel_set[i].ChannelNum)) {
if (j >= out_num) {
DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" out_num:%u not enough\n",
@@ -6435,7 +6445,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
/* issue null data if associating to the AP */
- if (is_client_associated_to_ap(padapter) == true) {
+ if (is_client_associated_to_ap(padapter)) {
pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
issue_nulldata(padapter, NULL, 1, 3, 500);
@@ -6602,7 +6612,7 @@ u8 chk_bmc_sleepq_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
@@ -6626,13 +6636,13 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
int len_diff = 0;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- ptxBeacon_parm = (struct Tx_Beacon_param *)rtw_zmalloc(sizeof(struct Tx_Beacon_param));
+ ptxBeacon_parm = rtw_zmalloc(sizeof(struct Tx_Beacon_param));
if (ptxBeacon_parm == NULL) {
kfree((unsigned char *)ph2c);
res = _FAIL;
@@ -6767,7 +6777,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
pxmitframe->attrib.triggered = 1;
- if (xmitframe_hiq_filter(pxmitframe) == true)
+ if (xmitframe_hiq_filter(pxmitframe))
pxmitframe->attrib.qsel = 0x11;/* HIQ */
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
@@ -6809,7 +6819,7 @@ int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset
connect_allow = false;
}
- if (connect_allow == true) {
+ if (connect_allow) {
DBG_871X("start_join_set_ch_bw: ch =%d, bwmode =%d, ch_offset =%d\n", cur_ch, cur_bw, cur_ch_offset);
*ch = cur_ch;
*bw = cur_bw;
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index aabdaafcbdd3..4a6af72013fa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -34,7 +34,7 @@ void _ips_enter(struct adapter *padapter)
if (rf_off == pwrpriv->change_rfpwrstate) {
pwrpriv->bpower_saving = true;
- DBG_871X_LEVEL(_drv_always_, "nolinked power save enter\n");
+ DBG_871X("nolinked power save enter\n");
if (pwrpriv->ips_mode == IPS_LEVEL_2)
pwrpriv->bkeepfwalive = true;
@@ -73,7 +73,7 @@ int _ips_leave(struct adapter *padapter)
if (result == _SUCCESS) {
pwrpriv->rf_pwrstate = rf_on;
}
- DBG_871X_LEVEL(_drv_always_, "nolinked power save leave\n");
+ DBG_871X("nolinked power save leave\n");
DBG_871X("==> ips_leave.....LED(0x%08x)...\n", rtw_read32(padapter, 0x4c));
pwrpriv->bips_processing = false;
@@ -201,10 +201,12 @@ exit:
return;
}
-void pwr_state_check_handler(RTW_TIMER_HDL_ARGS);
-void pwr_state_check_handler(RTW_TIMER_HDL_ARGS)
+static void pwr_state_check_handler(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)FunctionContext;
+ struct pwrctrl_priv *pwrctrlpriv =
+ from_timer(pwrctrlpriv, t, pwr_state_check_timer);
+ struct adapter *padapter = pwrctrlpriv->adapter;
+
rtw_ps_cmd(padapter);
}
@@ -823,14 +825,10 @@ exit:
/*
* This function is a timer handler, can't do any IO in it.
*/
-static void pwr_rpwm_timeout_handler(void *FunctionContext)
+static void pwr_rpwm_timeout_handler(struct timer_list *t)
{
- struct adapter *padapter;
- struct pwrctrl_priv *pwrpriv;
-
+ struct pwrctrl_priv *pwrpriv = from_timer(pwrpriv, t, pwr_rpwm_timer);
- padapter = FunctionContext;
- pwrpriv = adapter_to_pwrctl(padapter);
DBG_871X("+%s: rpwm = 0x%02X cpwm = 0x%02X\n", __func__, pwrpriv->rpwm, pwrpriv->cpwm);
if ((pwrpriv->rpwm == pwrpriv->cpwm) || (pwrpriv->cpwm >= PS_STATE_S2)) {
@@ -1154,7 +1152,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->LpsIdleCount = 0;
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+ pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -1173,10 +1171,11 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
_init_workitem(&pwrctrlpriv->cpwm_event, cpwm_event_callback, NULL);
pwrctrlpriv->brpwmtimeout = false;
+ pwrctrlpriv->adapter = padapter;
_init_workitem(&pwrctrlpriv->rpwmtimeoutwi, rpwmtimeout_workitem_callback, NULL);
- _init_timer(&pwrctrlpriv->pwr_rpwm_timer, padapter->pnetdev, pwr_rpwm_timeout_handler, padapter);
-
- rtw_init_timer(&pwrctrlpriv->pwr_state_check_timer, padapter, pwr_state_check_handler);
+ timer_setup(&pwrctrlpriv->pwr_rpwm_timer, pwr_rpwm_timeout_handler, 0);
+ timer_setup(&pwrctrlpriv->pwr_state_check_timer,
+ pwr_state_check_handler, 0);
pwrctrlpriv->wowlan_mode = false;
pwrctrlpriv->wowlan_ap_mode = false;
@@ -1193,8 +1192,6 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
void rtw_free_pwrctrl_priv(struct adapter *adapter)
{
- /* memset((unsigned char *)pwrctrlpriv, 0, sizeof(struct pwrctrl_priv)); */
-
#ifdef CONFIG_PNO_SUPPORT
if (pwrctrlpriv->pnlo_info != NULL)
printk("****** pnlo_info memory leak********\n");
@@ -1327,7 +1324,8 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+ pwrctrlpriv->bLeisurePs =
+ pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
}
} else
ret = -EINVAL;
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 68a6303e2754..9c7c3be0553a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -26,7 +26,7 @@ u8 rtw_rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
u8 rtw_bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
+static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
@@ -46,9 +46,6 @@ sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
union recv_frame *precvframe;
sint res = _SUCCESS;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((unsigned char *)precvpriv, 0, sizeof (struct recv_priv)); */
-
spin_lock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
@@ -65,7 +62,6 @@ sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
res = _FAIL;
goto exit;
}
- /* memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ); */
precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
/* precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - */
@@ -90,7 +86,8 @@ sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
res = rtw_hal_init_recv_priv(padapter);
- rtw_init_timer(&precvpriv->signal_stat_timer, padapter, rtw_signal_stat_timer_hdl);
+ timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl,
+ 0);
precvpriv->signal_stat_sampling_interval = 2000; /* ms */
@@ -129,7 +126,7 @@ union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
plist = get_next(phead);
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = (union recv_frame *)plist;
list_del_init(&precvframe->u.hdr.list);
padapter = precvframe->u.hdr.adapter;
@@ -243,7 +240,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfre
plist = get_next(phead);
while (phead != plist) {
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = (union recv_frame *)plist;
plist = get_next(plist);
@@ -1732,7 +1729,7 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
phead = get_list_head(defrag_q);
plist = get_next(phead);
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
pfhdr = &prframe->u.hdr;
list_del_init(&(prframe->u.list));
@@ -1754,7 +1751,7 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
data = get_recvframe_data(prframe);
while (phead != plist) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = (union recv_frame *)plist;
pnfhdr = &pnextrframe->u.hdr;
@@ -2071,7 +2068,7 @@ int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union rec
plist = get_next(phead);
while (phead != plist) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = (union recv_frame *)plist;
pnextattrib = &pnextrframe->u.hdr.attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
@@ -2146,7 +2143,7 @@ int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctr
return true;
}
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
pattrib = &prframe->u.hdr.attrib;
#ifdef DBG_RX_SEQ
@@ -2162,7 +2159,7 @@ int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctr
/* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
@@ -2358,9 +2355,10 @@ _err_exit:
}
-void rtw_reordering_ctrl_timeout_handler(void *pcontext)
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
{
- struct recv_reorder_ctrl *preorder_ctrl = pcontext;
+ struct recv_reorder_ctrl *preorder_ctrl =
+ from_timer(preorder_ctrl, t, reordering_ctrl_timer);
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -2601,9 +2599,10 @@ _recv_entry_drop:
return ret;
}
-void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS)
+static void rtw_signal_stat_timer_hdl(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct adapter *adapter =
+ from_timer(adapter, t, recvpriv.signal_stat_timer);
struct recv_priv *recvpriv = &adapter->recvpriv;
u32 tmp_s, tmp_q;
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
index b87ea4e388c0..07f5577cc073 100644
--- a/drivers/staging/rtl8723bs/core/rtw_rf.c
+++ b/drivers/staging/rtl8723bs/core/rtw_rf.c
@@ -15,6 +15,7 @@
#define _RTW_RF_C_
#include <drv_types.h>
+#include <linux/kernel.h>
struct ch_freq {
@@ -44,20 +45,18 @@ static struct ch_freq ch_freq_map[] = {
{216, 5080},/* Japan, means J16 */
};
-static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
-
u32 rtw_ch2freq(u32 channel)
{
u8 i;
u32 freq = 0;
- for (i = 0; i < ch_freq_map_num; i++) {
+ for (i = 0; i < ARRAY_SIZE(ch_freq_map); i++) {
if (channel == ch_freq_map[i].channel) {
freq = ch_freq_map[i].frequency;
break;
}
}
- if (i == ch_freq_map_num)
+ if (i == ARRAY_SIZE(ch_freq_map))
freq = 2412;
return freq;
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 06a7e4059fbb..aadf67bd0559 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -2272,7 +2272,7 @@ static void *aes_encrypt_init(u8 *key, size_t len)
u32 *rk;
if (len != 16)
return NULL;
- rk = (u32 *)rtw_malloc(AES_PRIV_SIZE);
+ rk = rtw_malloc(AES_PRIV_SIZE);
if (rk == NULL)
return NULL;
rijndaelKeySetupEnc(rk, key);
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index cb43ec90a648..03dd6848daa1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -429,7 +429,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
plist = get_next(phead);
while (!list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
plist = get_next(plist);
@@ -604,10 +604,10 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
- u8 res = true;
+ bool res = true;
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
- u8 match = false;
+ bool match = false;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -630,10 +630,10 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
if (pacl_list->mode == 1) /* accept unless in deny list */
- res = (match == true) ? false:true;
+ res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match == true) ? true:false;
+ res = match;
else
res = true;
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index f485f541e36d..f6dc26c8bd3d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1359,7 +1359,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
return true;
}
- bssid = (struct wlan_bssid_ex *)rtw_zmalloc(sizeof(struct wlan_bssid_ex));
+ bssid = rtw_zmalloc(sizeof(struct wlan_bssid_ex));
if (bssid == NULL) {
DBG_871X("%s rtw_zmalloc fail !!!\n", __func__);
return true;
@@ -1946,7 +1946,7 @@ void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
preorder_ctrl->indicate_seq = 0xffff;
#endif
- preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq == true) ? true : false;
+ preorder_ctrl->enable = pmlmeinfo->accept_addba_req;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 022f654419e4..be54186fb223 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -51,9 +51,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
struct xmit_frame *pxframe;
sint res = _SUCCESS;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); */
-
spin_lock_init(&pxmitpriv->lock);
spin_lock_init(&pxmitpriv->lock_sctx);
sema_init(&pxmitpriv->xmit_sema, 0);
@@ -2166,7 +2163,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = NULL;
- pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
+ pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
if (pxmitpriv->hwxmits == NULL) {
DBG_871X("alloc hwxmits fail!...\n");
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
index 51d4219177d3..951585467ab1 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
@@ -13,7 +13,7 @@
*
******************************************************************************/
-
+#include <linux/kernel.h>
#include "odm_precomp.h"
static bool CheckPositive(
@@ -268,7 +268,7 @@ static u32 Array_MP_8723B_AGC_TAB[] = {
void ODM_ReadAndConfig_MP_8723B_AGC_TAB(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_AGC_TAB)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_AGC_TAB);
u32 *Array = Array_MP_8723B_AGC_TAB;
ODM_RT_TRACE(
@@ -537,7 +537,7 @@ static u32 Array_MP_8723B_PHY_REG[] = {
void ODM_ReadAndConfig_MP_8723B_PHY_REG(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_PHY_REG)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_PHY_REG);
u32 *Array = Array_MP_8723B_PHY_REG;
ODM_RT_TRACE(
@@ -617,7 +617,6 @@ static u32 Array_MP_8723B_PHY_REG_PG[] = {
void ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_PHY_REG_PG)/sizeof(u32);
u32 *Array = Array_MP_8723B_PHY_REG_PG;
ODM_RT_TRACE(
@@ -630,7 +629,7 @@ void ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(PDM_ODM_T pDM_Odm)
pDM_Odm->PhyRegPgVersion = 1;
pDM_Odm->PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
- for (i = 0; i < ArrayLen; i += 6) {
+ for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_PHY_REG_PG); i += 6) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
u32 v3 = Array[i+2];
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
index b868e26f20ac..7f8afa1be1ca 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
@@ -13,7 +13,7 @@
*
******************************************************************************/
-
+#include <linux/kernel.h>
#include "odm_precomp.h"
static bool CheckPositive(
@@ -239,7 +239,7 @@ static u32 Array_MP_8723B_MAC_REG[] = {
void ODM_ReadAndConfig_MP_8723B_MAC_REG(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_MAC_REG)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_MAC_REG);
u32 *Array = Array_MP_8723B_MAC_REG;
ODM_RT_TRACE(
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
index 84a0be7ba697..fadfcbd91858 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
@@ -13,7 +13,7 @@
*
******************************************************************************/
-
+#include <linux/kernel.h>
#include "odm_precomp.h"
static bool CheckPositive(
@@ -270,7 +270,7 @@ static u32 Array_MP_8723B_RadioA[] = {
void ODM_ReadAndConfig_MP_8723B_RadioA(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_RadioA)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_RadioA);
u32 *Array = Array_MP_8723B_RadioA;
ODM_RT_TRACE(
@@ -766,7 +766,6 @@ static u8 *Array_MP_8723B_TXPWR_LMT[] = {
void ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_TXPWR_LMT)/sizeof(u8 *);
u8 **Array = Array_MP_8723B_TXPWR_LMT;
ODM_RT_TRACE(
@@ -776,7 +775,7 @@ void ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(PDM_ODM_T pDM_Odm)
("===> ODM_ReadAndConfig_MP_8723B_TXPWR_LMT\n")
);
- for (i = 0; i < ArrayLen; i += 7) {
+ for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_TXPWR_LMT); i += 7) {
u8 *regulation = Array[i];
u8 *band = Array[i+1];
u8 *bandwidth = Array[i+2];
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 86fee109e42d..7d4df5a8832e 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -463,7 +463,7 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
break;
case BTC_GET_BL_WIFI_UNDER_5G:
- *pu8 = (pHalData->CurrentBandType == 1) ? true : false;
+ *pu8 = pHalData->CurrentBandType == 1;
break;
case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
@@ -1411,15 +1411,8 @@ void hal_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath)
u8 hal_btcoex_Initialize(struct adapter *padapter)
{
- u8 ret1;
- u8 ret2;
-
-
memset(&GLBtCoexist, 0, sizeof(GLBtCoexist));
- ret1 = EXhalbtcoutsrc_InitlizeVariables((void *)padapter);
- ret2 = (ret1 == true) ? true : false;
-
- return ret2;
+ return EXhalbtcoutsrc_InitlizeVariables((void *)padapter);
}
void hal_btcoex_PowerOnSetting(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 3e63b6d9c097..dec887a5b338 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -14,6 +14,7 @@
******************************************************************************/
#define _HAL_COM_C_
+#include <linux/kernel.h>
#include <drv_types.h>
#include <rtw_debug.h>
#include "hal_com_h2c.h"
@@ -1622,7 +1623,7 @@ void rtw_get_raw_rssi_info(void *sel, struct adapter *padapter)
psample_pkt_rssi->pwdball, psample_pkt_rssi->pwr_all
);
- isCCKrate = (psample_pkt_rssi->data_rate <= DESC_RATE11M) ? true : false;
+ isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
if (isCCKrate)
psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
@@ -1655,7 +1656,7 @@ void rtw_dump_raw_rssi_info(struct adapter *padapter)
DBG_871X("RxRate = %s, PWDBALL = %d(%%), rx_pwr_all = %d(dBm)\n",
HDATA_RATE(psample_pkt_rssi->data_rate), psample_pkt_rssi->pwdball, psample_pkt_rssi->pwr_all);
- isCCKrate = (psample_pkt_rssi->data_rate <= DESC_RATE11M) ? true : false;
+ isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
if (isCCKrate)
psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
@@ -1683,7 +1684,7 @@ void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe)
struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
psample_pkt_rssi->data_rate = pattrib->data_rate;
- isCCKrate = (pattrib->data_rate <= DESC_RATE11M) ? true : false;
+ isCCKrate = pattrib->data_rate <= DESC_RATE11M;
psample_pkt_rssi->pwdball = pPhyInfo->RxPWDBAll;
psample_pkt_rssi->pwr_all = pPhyInfo->RecvSignalPower;
@@ -1716,7 +1717,6 @@ void rtw_bb_rf_gain_offset(struct adapter *padapter)
{
u8 value = padapter->eeprompriv.EEPROMRFGainOffset;
u32 res, i = 0;
- u32 ArrayLen = sizeof(Array_kfreemap)/sizeof(u32);
u32 *Array = Array_kfreemap;
u32 v1 = 0, v2 = 0, target = 0;
/* DBG_871X("+%s value: 0x%02x+\n", __func__, value); */
@@ -1729,7 +1729,7 @@ void rtw_bb_rf_gain_offset(struct adapter *padapter)
res &= 0xfff87fff;
DBG_871X("Offset RF Gain. before reg 0x7f = 0x%08x\n", res);
/* res &= 0xfff87fff; */
- for (i = 0; i < ArrayLen; i += 2) {
+ for (i = 0; i < ARRAY_SIZE(Array_kfreemap); i += 2) {
v1 = Array[i];
v2 = Array[i+1];
if (v1 == padapter->eeprompriv.EEPROMRFGainVal) {
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 566b6f0997da..e6787c22e00b 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -17,6 +17,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_data.h>
+#include <linux/kernel.h>
u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
u8 TxNum, enum RATE_SECTION RateSection)
@@ -860,7 +861,7 @@ struct adapter *padapter
for (txNum = RF_1TX; txNum < RF_MAX_TX_NUM; ++txNum) {
/* CCK */
base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_11M);
- for (i = 0; i < sizeof(cckRates); ++i) {
+ for (i = 0; i < ARRAY_SIZE(cckRates); ++i) {
value = PHY_GetTxPowerByRate(padapter, band, path, txNum, cckRates[i]);
PHY_SetTxPowerByRate(padapter, band, path, txNum, cckRates[i], value - base);
}
@@ -939,58 +940,78 @@ void PHY_SetTxPowerIndexByRateSection(
if (RateSection == CCK) {
u8 cckRates[] = {MGN_1M, MGN_2M, MGN_5_5M, MGN_11M};
if (pHalData->CurrentBandType == BAND_ON_2_4G)
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- cckRates, sizeof(cckRates)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, cckRates,
+ ARRAY_SIZE(cckRates));
} else if (RateSection == OFDM) {
u8 ofdmRates[] = {MGN_6M, MGN_9M, MGN_12M, MGN_18M, MGN_24M, MGN_36M, MGN_48M, MGN_54M};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- ofdmRates, sizeof(ofdmRates)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, ofdmRates,
+ ARRAY_SIZE(ofdmRates));
} else if (RateSection == HT_MCS0_MCS7) {
u8 htRates1T[] = {MGN_MCS0, MGN_MCS1, MGN_MCS2, MGN_MCS3, MGN_MCS4, MGN_MCS5, MGN_MCS6, MGN_MCS7};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates1T, sizeof(htRates1T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates1T,
+ ARRAY_SIZE(htRates1T));
} else if (RateSection == HT_MCS8_MCS15) {
u8 htRates2T[] = {MGN_MCS8, MGN_MCS9, MGN_MCS10, MGN_MCS11, MGN_MCS12, MGN_MCS13, MGN_MCS14, MGN_MCS15};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates2T, sizeof(htRates2T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates2T,
+ ARRAY_SIZE(htRates2T));
} else if (RateSection == HT_MCS16_MCS23) {
u8 htRates3T[] = {MGN_MCS16, MGN_MCS17, MGN_MCS18, MGN_MCS19, MGN_MCS20, MGN_MCS21, MGN_MCS22, MGN_MCS23};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates3T, sizeof(htRates3T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates3T,
+ ARRAY_SIZE(htRates3T));
} else if (RateSection == HT_MCS24_MCS31) {
u8 htRates4T[] = {MGN_MCS24, MGN_MCS25, MGN_MCS26, MGN_MCS27, MGN_MCS28, MGN_MCS29, MGN_MCS30, MGN_MCS31};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates4T, sizeof(htRates4T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates4T,
+ ARRAY_SIZE(htRates4T));
} else if (RateSection == VHT_1SSMCS0_1SSMCS9) {
u8 vhtRates1T[] = {MGN_VHT1SS_MCS0, MGN_VHT1SS_MCS1, MGN_VHT1SS_MCS2, MGN_VHT1SS_MCS3, MGN_VHT1SS_MCS4,
MGN_VHT1SS_MCS5, MGN_VHT1SS_MCS6, MGN_VHT1SS_MCS7, MGN_VHT1SS_MCS8, MGN_VHT1SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates1T, sizeof(vhtRates1T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates1T,
+ ARRAY_SIZE(vhtRates1T));
} else if (RateSection == VHT_2SSMCS0_2SSMCS9) {
u8 vhtRates2T[] = {MGN_VHT2SS_MCS0, MGN_VHT2SS_MCS1, MGN_VHT2SS_MCS2, MGN_VHT2SS_MCS3, MGN_VHT2SS_MCS4,
MGN_VHT2SS_MCS5, MGN_VHT2SS_MCS6, MGN_VHT2SS_MCS7, MGN_VHT2SS_MCS8, MGN_VHT2SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates2T, sizeof(vhtRates2T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates2T,
+ ARRAY_SIZE(vhtRates2T));
} else if (RateSection == VHT_3SSMCS0_3SSMCS9) {
u8 vhtRates3T[] = {MGN_VHT3SS_MCS0, MGN_VHT3SS_MCS1, MGN_VHT3SS_MCS2, MGN_VHT3SS_MCS3, MGN_VHT3SS_MCS4,
MGN_VHT3SS_MCS5, MGN_VHT3SS_MCS6, MGN_VHT3SS_MCS7, MGN_VHT3SS_MCS8, MGN_VHT3SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates3T, sizeof(vhtRates3T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates3T,
+ ARRAY_SIZE(vhtRates3T));
} else if (RateSection == VHT_4SSMCS0_4SSMCS9) {
u8 vhtRates4T[] = {MGN_VHT4SS_MCS0, MGN_VHT4SS_MCS1, MGN_VHT4SS_MCS2, MGN_VHT4SS_MCS3, MGN_VHT4SS_MCS4,
MGN_VHT4SS_MCS5, MGN_VHT4SS_MCS6, MGN_VHT4SS_MCS7, MGN_VHT4SS_MCS8, MGN_VHT4SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates4T, sizeof(vhtRates4T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates4T,
+ ARRAY_SIZE(vhtRates4T));
} else
DBG_871X("Invalid RateSection %d in %s", RateSection, __func__);
}
@@ -1012,7 +1033,7 @@ static bool phy_GetChnlIndex(u8 Channel, u8 *ChannelIdx)
} else {
bIn24G = false;
- for (i = 0; i < sizeof(channel5G)/sizeof(u8); ++i) {
+ for (i = 0; i < ARRAY_SIZE(channel5G); ++i) {
if (channel5G[i] == Channel) {
*ChannelIdx = i;
return bIn24G;
@@ -1149,7 +1170,7 @@ u8 PHY_GetTxPowerIndexBase(
} else if (BandWidth == CHANNEL_WIDTH_80) { /* BW80-1S, BW80-2S */
/* <20121220, Kordan> Get the index of array "Index5G_BW80_Base". */
u8 channel5G_80M[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
- for (i = 0; i < sizeof(channel5G_80M)/sizeof(u8); ++i)
+ for (i = 0; i < ARRAY_SIZE(channel5G_80M); ++i)
if (channel5G_80M[i] == Channel)
chnlIdx = i;
@@ -1588,7 +1609,7 @@ static s8 phy_GetChannelIndexOfTxPowerLimit(u8 Band, u8 Channel)
if (Band == BAND_ON_2_4G)
channelIndex = Channel - 1;
else if (Band == BAND_ON_5G) {
- for (i = 0; i < sizeof(channel5G)/sizeof(u8); ++i) {
+ for (i = 0; i < ARRAY_SIZE(channel5G); ++i) {
if (channel5G[i] == Channel)
channelIndex = i;
}
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
index 9cde6c66235b..71853e6f7106 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -23,7 +23,7 @@ static void odm_SetCrystalCap(void *pDM_VOID, u8 CrystalCap)
struct adapter *Adapter = pDM_Odm->Adapter;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- bEEPROMCheck = (pHalData->EEPROMVersion >= 0x01) ? true : false;
+ bEEPROMCheck = pHalData->EEPROMVersion >= 0x01;
if (pCfoTrack->CrystalCap == CrystalCap)
return;
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 0bde9444471d..f02eb63a45ce 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -553,7 +553,7 @@ void odm_DIG(void *pDM_VOID)
dm_dig_min = DM_DIG_MIN_NIC;
DIG_MaxOfMin = DM_DIG_MAX_AP;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Absolutly upper bound = 0x%x, lower bound = 0x%x\n", dm_dig_max, dm_dig_min));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Absolutely upper bound = 0x%x, lower bound = 0x%x\n", dm_dig_max, dm_dig_min));
/* 1 Adjust boundary by RSSI */
if (pDM_Odm->bLinked && bPerformance) {
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
index ba2700135b60..8dd6da8a4e26 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -106,7 +106,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(
u8 LNA_idx, VGA_idx;
PPHY_STATUS_RPT_8192CD_T pPhyStaRpt = (PPHY_STATUS_RPT_8192CD_T)pPhyStatus;
- isCCKrate = (pPktinfo->DataRate <= DESC_RATE11M) ? true : false;
+ isCCKrate = pPktinfo->DataRate <= DESC_RATE11M;
pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = -1;
pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 1565f2d67ea4..d6cef9e8378d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -891,7 +891,7 @@ static void hal_ReadEFuse_WiFi(
return;
}
- efuseTbl = (u8 *)rtw_malloc(EFUSE_MAX_MAP_LEN);
+ efuseTbl = rtw_malloc(EFUSE_MAX_MAP_LEN);
if (efuseTbl == NULL) {
DBG_8192C("%s: alloc efuseTbl fail!\n", __func__);
return;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index d0b317077511..6281dfa1a3ca 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -113,7 +113,7 @@ query_free_page:
RT_TRACE(
_module_hal_xmit_c_,
_drv_notice_,
- ("%s: bSurpriseRemoved(wirte port)\n", __func__)
+ ("%s: bSurpriseRemoved(write port)\n", __func__)
);
goto free_xmitbuf;
}
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 6dfb06a49d41..1af77add6af4 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -1019,8 +1019,8 @@ static u32 rtl8723bs_hal_init(struct adapter *padapter)
rtw_btcoex_IQKNotify(padapter, true);
- restore_iqk_rst = (pwrpriv->bips_processing == true) ? true : false;
- b2Ant = pHalData->EEPROMBluetoothAntNum == Ant_x2 ? true : false;
+ restore_iqk_rst = pwrpriv->bips_processing;
+ b2Ant = pHalData->EEPROMBluetoothAntNum == Ant_x2;
PHY_IQCalibrate_8723B(padapter, false, restore_iqk_rst, b2Ant, pHalData->ant_path);
pHalData->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 1d1b14dedd35..9a4c24861947 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -218,7 +218,7 @@ static u32 sdio_read32(struct intf_hdl *pintfhdl, u32 addr)
} else {
u8 *ptmpbuf;
- ptmpbuf = (u8 *)rtw_malloc(8);
+ ptmpbuf = rtw_malloc(8);
if (NULL == ptmpbuf) {
DBG_8192C(KERN_ERR "%s: Allocate memory FAIL!(size =8) addr = 0x%x\n", __func__, addr);
return SDIO_ERR_VAL32;
@@ -594,7 +594,7 @@ static s32 _sdio_local_read(
}
n = RND4(cnt);
- ptmpbuf = (u8 *)rtw_malloc(n);
+ ptmpbuf = rtw_malloc(n);
if (!ptmpbuf)
return (-1);
@@ -637,7 +637,7 @@ s32 sdio_local_read(
}
n = RND4(cnt);
- ptmpbuf = (u8 *)rtw_malloc(n);
+ ptmpbuf = rtw_malloc(n);
if (!ptmpbuf)
return (-1);
@@ -684,7 +684,7 @@ s32 sdio_local_write(
return err;
}
- ptmpbuf = (u8 *)rtw_malloc(cnt);
+ ptmpbuf = rtw_malloc(cnt);
if (!ptmpbuf)
return (-1);
@@ -1108,7 +1108,7 @@ void sd_int_dpc(struct adapter *padapter)
struct c2h_evt_hdr_88xx *c2h_evt;
DBG_8192C("%s: C2H Command\n", __func__);
- c2h_evt = (struct c2h_evt_hdr_88xx *)rtw_zmalloc(16);
+ c2h_evt = rtw_zmalloc(16);
if (c2h_evt != NULL) {
if (rtw_hal_c2h_evt_read(padapter, (u8 *)c2h_evt) == _SUCCESS) {
if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 4d14fbc5a1fe..32129ac8e169 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -177,7 +177,8 @@ struct registry_priv
u8 bt_ampdu;
s8 ant_num;
- bool bAcceptAddbaReq;
+ /* false:Reject AP's Add BA req, true:accept AP's Add BA req */
+ bool accept_addba_req;
u8 antdiv_cfg;
u8 antdiv_type;
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index ac9ffe0e3b84..e62ed71e1d80 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -96,8 +96,8 @@ typedef enum mstat_status{
#define rtw_mstat_update(flag, status, sz) do {} while (0)
#define rtw_mstat_dump(sel) do {} while (0)
-u8*_rtw_zmalloc(u32 sz);
-u8*_rtw_malloc(u32 sz);
+void *_rtw_zmalloc(u32 sz);
+void *_rtw_malloc(u32 sz);
void _kfree(u8 *pbuf, u32 sz);
struct sk_buff *_rtw_skb_alloc(u32 sz);
@@ -118,8 +118,6 @@ int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
extern void _rtw_init_queue(struct __queue *pqueue);
-extern void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc);
-
static __inline void thread_enter(char *name)
{
allow_signal(SIGTERM);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 0c9b4f622fee..711863d74a01 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -86,17 +86,7 @@ __inline static struct list_head *get_list_head(struct __queue *queue)
#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(__kernel_size_t)(&((type *)0)->member)))
-
-#define RTW_TIMER_HDL_ARGS void *FunctionContext
-
-__inline static void _init_timer(_timer *ptimer, _nic_hdl nic_hdl, void *pfunc, void* cntx)
-{
- /* setup_timer(ptimer, pfunc, (u32)cntx); */
- ptimer->function = pfunc;
- ptimer->data = (unsigned long)cntx;
- init_timer(ptimer);
-}
+ container_of(ptr, type, member)
__inline static void _set_timer(_timer *ptimer, u32 delay_time)
{
@@ -109,7 +99,6 @@ __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
*bcancelled = true;/* true == 1; false == 0 */
}
-
__inline static void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
{
INIT_WORK(pwork, pfunc);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index d88ef67ce8d6..00b3d92c9f51 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -518,8 +518,8 @@ extern void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf);
extern void rtw_cpwm_event_callback(struct adapter *adapter, u8 *pbuf);
extern void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf);
-extern void rtw_join_timeout_handler(RTW_TIMER_HDL_ARGS);
-extern void _rtw_scan_timeout_handler(RTW_TIMER_HDL_ARGS);
+extern void rtw_join_timeout_handler(struct timer_list *t);
+extern void _rtw_scan_timeout_handler(struct timer_list *t);
int event_thread(void *context);
@@ -618,10 +618,10 @@ extern void rtw_update_registrypriv_dev_network(struct adapter *adapter);
extern void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
-extern void _rtw_join_timeout_handler(struct adapter *adapter);
-extern void rtw_scan_timeout_handler(struct adapter *adapter);
+extern void _rtw_join_timeout_handler(struct timer_list *t);
+extern void rtw_scan_timeout_handler(struct timer_list *t);
-extern void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
+extern void rtw_dynamic_check_timer_handler(struct adapter *adapter);
bool rtw_is_scan_deny(struct adapter *adapter);
void rtw_clear_scan_deny(struct adapter *adapter);
void rtw_set_scan_deny_timer_hdl(struct adapter *adapter);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index f3952463697e..6613dea2b283 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -424,7 +424,7 @@ struct mlme_ext_info
u8 candidate_tid_bitmap;
u8 dialogToken;
/* Accept ADDBA Request */
- bool bAcceptAddbaReq;
+ bool accept_addba_req;
u8 bwmode_updated;
u8 hidden_ssid_mode;
u8 VHT_enable;
@@ -719,10 +719,10 @@ void linked_status_chk(struct adapter *padapter);
void _linked_info_dump(struct adapter *padapter);
-void survey_timer_hdl (struct adapter *padapter);
-void link_timer_hdl (struct adapter *padapter);
-void addba_timer_hdl(struct sta_info *psta);
-void sa_query_timer_hdl(struct adapter *padapter);
+void survey_timer_hdl (struct timer_list *t);
+void link_timer_hdl (struct timer_list *t);
+void addba_timer_hdl(struct timer_list *t);
+void sa_query_timer_hdl(struct timer_list *t);
/* void reauth_timer_hdl(struct adapter *padapter); */
/* void reassoc_timer_hdl(struct adapter *padapter); */
diff --git a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
index cf8e766a27a8..faf91022f54a 100644
--- a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
@@ -250,7 +250,7 @@ struct pwrctrl_priv
u8 ips_mode;
u8 ips_org_mode;
u8 ips_mode_req; /* used to accept the mode setting request, will update to ipsmode later */
- uint bips_processing;
+ bool bips_processing;
unsigned long ips_deny_time; /* will deny IPS when system time is smaller than this */
u8 pre_ips_type;/* 0: default flow, 1: carddisbale flow */
@@ -300,6 +300,7 @@ struct pwrctrl_priv
u64 wowlan_fw_iv;
#endif /* CONFIG_WOWLAN */
_timer pwr_state_check_timer;
+ struct adapter *adapter;
int pwr_state_check_interval;
u8 pwr_state_check_cnts;
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 570a3c333aa0..71039ca79e4b 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -411,7 +411,7 @@ sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queu
sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue);
-void rtw_reordering_ctrl_timeout_handler(void *pcontext);
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
__inline static u8 *get_rxmem(union recv_frame *precvframe)
{
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index bd4352fe2de3..51d48de24a24 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -136,11 +136,9 @@ static struct ieee80211_supported_band *rtw_spt_band_alloc(
goto exit;
}
- spt_band = (struct ieee80211_supported_band *)rtw_zmalloc(
- sizeof(struct ieee80211_supported_band)
- + sizeof(struct ieee80211_channel)*n_channels
- + sizeof(struct ieee80211_rate)*n_bitrates
- );
+ spt_band = rtw_zmalloc(sizeof(struct ieee80211_supported_band) +
+ sizeof(struct ieee80211_channel) * n_channels +
+ sizeof(struct ieee80211_rate) * n_bitrates);
if (!spt_band)
goto exit;
@@ -1094,7 +1092,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
DBG_871X("pairwise =%d\n", pairwise);
param_len = sizeof(struct ieee_param) + params->key_len;
- param = (struct ieee_param *)rtw_malloc(param_len);
+ param = rtw_malloc(param_len);
if (param == NULL)
return -1;
@@ -2183,7 +2181,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
{
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep =(struct ndis_802_11_wep *) rtw_malloc(wep_total_len);
+ pwep = rtw_malloc(wep_total_len);
if (pwep == NULL) {
DBG_871X(" wpa_set_encryption: pwep allocate fail !!!\n");
ret = -ENOMEM;
@@ -2677,7 +2675,7 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
pnpi->sizeof_priv = sizeof(struct adapter);
/* wdev */
- mon_wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
+ mon_wdev = rtw_zmalloc(sizeof(struct wireless_dev));
if (!mon_wdev) {
DBG_871X(FUNC_ADPT_FMT" allocate mon_wdev fail\n", FUNC_ADPT_ARG(padapter));
ret = -ENOMEM;
@@ -3497,7 +3495,7 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
}
/* wdev */
- wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
+ wdev = rtw_zmalloc(sizeof(struct wireless_dev));
if (!wdev) {
DBG_8192C("Couldn't allocate wireless device\n");
ret = -ENOMEM;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index d5e5f830f2a1..3fca0c2d4c8d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -18,6 +18,7 @@
#include <rtw_debug.h>
#include <rtw_mp.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV+30)
@@ -557,7 +558,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep =(struct ndis_802_11_wep *) rtw_malloc(wep_total_len);
+ pwep = rtw_malloc(wep_total_len);
if (pwep == NULL) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
goto exit;
@@ -2123,12 +2124,9 @@ static int rtw_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- int ret;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- ret = rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
-
- return ret;
+ return rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
}
static int rtw_wx_set_auth(struct net_device *dev,
@@ -2238,7 +2236,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
int ret = 0;
param_len = sizeof(struct ieee_param) + pext->key_len;
- param = (struct ieee_param *)rtw_malloc(param_len);
+ param = rtw_malloc(param_len);
if (param == NULL)
return -1;
@@ -2347,7 +2345,7 @@ static int rtw_wx_read32(struct net_device *dev,
if (0 == len)
return -EINVAL;
- ptmp = (u8 *)rtw_malloc(len);
+ ptmp = rtw_malloc(len);
if (NULL == ptmp)
return -ENOMEM;
@@ -3500,7 +3498,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
goto out;
}
- param = (struct ieee_param *)rtw_malloc(p->length);
+ param = rtw_malloc(p->length);
if (param == NULL) {
ret = -ENOMEM;
goto out;
@@ -3621,7 +3619,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep =(struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
+ pwep = rtw_malloc(wep_total_len);
if (pwep == NULL) {
DBG_871X(" r871x_set_encryption: pwep allocate fail !!!\n");
goto exit;
@@ -3857,7 +3855,6 @@ static int rtw_hostapd_sta_flush(struct net_device *dev)
{
/* _irqL irqL; */
/* struct list_head *phead, *plist; */
- int ret = 0;
/* struct sta_info *psta = NULL; */
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
/* struct sta_priv *pstapriv = &padapter->stapriv; */
@@ -3866,9 +3863,7 @@ static int rtw_hostapd_sta_flush(struct net_device *dev)
flush_all_cam_entry(padapter); /* clear CAM */
- ret = rtw_sta_flush(padapter);
-
- return ret;
+ return rtw_sta_flush(padapter);
}
@@ -4266,7 +4261,6 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -4279,15 +4273,12 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
return -EINVAL;
}
- ret = rtw_acl_remove_sta(padapter, param->sta_addr);
-
- return ret;
+ return rtw_acl_remove_sta(padapter, param->sta_addr);
}
static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -4300,9 +4291,7 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
return -EINVAL;
}
- ret = rtw_acl_add_sta(padapter, param->sta_addr);
-
- return ret;
+ return rtw_acl_add_sta(padapter, param->sta_addr);
}
@@ -4345,7 +4334,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
goto out;
}
- param = (struct ieee_param *)rtw_malloc(p->length);
+ param = rtw_malloc(p->length);
if (param == NULL) {
ret = -ENOMEM;
goto out;
@@ -4673,7 +4662,7 @@ static int rtw_test(
DBG_871X("+%s\n", __func__);
len = wrqu->data.length;
- pbuf = (u8 *)rtw_zmalloc(len);
+ pbuf = rtw_zmalloc(len);
if (pbuf == NULL) {
DBG_871X("%s: no memory!\n", __func__);
return -ENOMEM;
@@ -5029,12 +5018,12 @@ static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
struct iw_handler_def rtw_handlers_def = {
.standard = rtw_handlers,
- .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+ .num_standard = ARRAY_SIZE(rtw_handlers),
#if defined(CONFIG_WEXT_PRIV)
.private = rtw_private_handler,
.private_args = (struct iw_priv_args *)rtw_private_args,
- .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
- .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+ .num_private = ARRAY_SIZE(rtw_private_handler),
+ .num_private_args = ARRAY_SIZE(rtw_private_args),
#endif
.get_wireless_stats = rtw_get_wireless_stats,
};
@@ -5121,8 +5110,8 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
priv = rtw_private_handler;
priv_args = rtw_private_args;
- num_priv = sizeof(rtw_private_handler) / sizeof(iw_handler);
- num_priv_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args);
+ num_priv = ARRAY_SIZE(rtw_private_handler);
+ num_priv_args = ARRAY_SIZE(rtw_private_args);
if (num_priv_args == 0) {
err = -EOPNOTSUPP;
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index 80ca2d781c5d..a4ef5789d794 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -19,18 +19,21 @@
#include <drv_types.h>
#include <rtw_debug.h>
-static void _dynamic_check_timer_handlder (void *FunctionContext)
+static void _dynamic_check_timer_handler(struct timer_list *t)
{
- struct adapter *adapter = FunctionContext;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
- rtw_dynamic_check_timer_handlder(adapter);
+ rtw_dynamic_check_timer_handler(adapter);
_set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
}
-static void _rtw_set_scan_deny_timer_hdl(void *FunctionContext)
+static void _rtw_set_scan_deny_timer_hdl(struct timer_list *t)
{
- struct adapter *adapter = FunctionContext;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.set_scan_deny_timer);
+
rtw_set_scan_deny_timer_hdl(adapter);
}
@@ -38,21 +41,20 @@ void rtw_init_mlme_timer(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- _init_timer(&(pmlmepriv->assoc_timer), padapter->pnetdev, _rtw_join_timeout_handler, padapter);
- /* _init_timer(&(pmlmepriv->sitesurveyctrl.sitesurvey_ctrl_timer), padapter->pnetdev, sitesurvey_ctrl_handler, padapter); */
- _init_timer(&(pmlmepriv->scan_to_timer), padapter->pnetdev, rtw_scan_timeout_handler, padapter);
-
- _init_timer(&(pmlmepriv->dynamic_chk_timer), padapter->pnetdev, _dynamic_check_timer_handlder, padapter);
-
- _init_timer(&(pmlmepriv->set_scan_deny_timer), padapter->pnetdev, _rtw_set_scan_deny_timer_hdl, padapter);
+ timer_setup(&pmlmepriv->assoc_timer, _rtw_join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dynamic_chk_timer,
+ _dynamic_check_timer_handler, 0);
+ timer_setup(&pmlmepriv->set_scan_deny_timer,
+ _rtw_set_scan_deny_timer_hdl, 0);
}
void rtw_os_indicate_connect(struct adapter *adapter)
{
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ==true))
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
{
rtw_cfg80211_ibss_indicate_connect(adapter);
}
@@ -99,7 +101,7 @@ void rtw_reset_securitypriv(struct adapter *adapter)
/* reset RX BIP packet number */
pmlmeext->mgnt_80211w_IPN_rx = 0;
- memset((unsigned char *)&adapter->securitypriv, 0, sizeof (struct security_priv));
+ memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
/* Added by Albert 2009/02/18 */
/* Restore the PMK information to securitypriv structure for the following connection. */
@@ -116,9 +118,9 @@ void rtw_reset_securitypriv(struct adapter *adapter)
{
/* if (adapter->mlmepriv.fw_state & WIFI_STATION_STATE) */
/* */
- struct security_priv *psec_priv =&adapter->securitypriv;
+ struct security_priv *psec_priv = &adapter->securitypriv;
- psec_priv->dot11AuthAlgrthm =dot11AuthAlgrthm_Open; /* open system */
+ psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
psec_priv->dot11PrivacyKeyIndex = 0;
@@ -150,7 +152,7 @@ void rtw_os_indicate_disconnect(struct adapter *adapter)
void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
{
uint len;
- u8 *buff,*p, i;
+ u8 *buff, *p, i;
union iwreq_data wrqu;
RT_TRACE(_module_mlme_osdep_c_, _drv_info_, ("+rtw_report_sec_ie, authmode =%d\n", authmode));
@@ -168,22 +170,22 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
}
p = buff;
- p+=sprintf(p,"ASSOCINFO(ReqIEs =");
+ p += sprintf(p, "ASSOCINFO(ReqIEs =");
- len = sec_ie[1]+2;
- len = (len < IW_CUSTOM_MAX) ? len:IW_CUSTOM_MAX;
+ len = sec_ie[1] + 2;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
- for (i = 0;i<len;i++) {
- p+=sprintf(p,"%02x", sec_ie[i]);
+ for (i = 0; i < len; i++) {
+ p += sprintf(p, "%02x", sec_ie[i]);
}
- p+=sprintf(p,")");
+ p += sprintf(p, ")");
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length =p-buff;
+ wrqu.data.length = p - buff;
- wrqu.data.length = (wrqu.data.length<IW_CUSTOM_MAX) ? wrqu.data.length:IW_CUSTOM_MAX;
+ wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ? wrqu.data.length : IW_CUSTOM_MAX;
kfree(buff);
}
@@ -191,14 +193,14 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
{
- _init_timer(&psta->addba_retry_timer, padapter->pnetdev, addba_timer_hdl, psta);
+ timer_setup(&psta->addba_retry_timer, addba_timer_hdl, 0);
}
void init_mlme_ext_timer(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- _init_timer(&pmlmeext->survey_timer, padapter->pnetdev, survey_timer_hdl, padapter);
- _init_timer(&pmlmeext->link_timer, padapter->pnetdev, link_timer_hdl, padapter);
- _init_timer(&pmlmeext->sa_query_timer, padapter->pnetdev, sa_query_timer_hdl, padapter);
+ timer_setup(&pmlmeext->survey_timer, survey_timer_hdl, 0);
+ timer_setup(&pmlmeext->link_timer, link_timer_hdl, 0);
+ timer_setup(&pmlmeext->sa_query_timer, sa_query_timer_hdl, 0);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 021589913681..fc5e3d4739c0 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -24,9 +24,9 @@ MODULE_AUTHOR("Realtek Semiconductor Corp.");
MODULE_VERSION(DRIVERVERSION);
/* module param defaults */
-static int rtw_chip_version = 0x00;
+static int rtw_chip_version;
static int rtw_rfintfs = HWPI;
-static int rtw_lbkmode = 0;/* RTL8712_AIR_TRX; */
+static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure;infra, ad-hoc, auto */
@@ -40,12 +40,12 @@ static int rtw_frag_thresh = 2346;/* */
static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
static int rtw_scan_mode = 1;/* active, passive */
static int rtw_adhoc_tx_pwr = 1;
-static int rtw_soft_ap = 0;
+static int rtw_soft_ap;
/* int smart_ps = 1; */
static int rtw_power_mgnt = 1;
static int rtw_ips_mode = IPS_NORMAL;
module_param(rtw_ips_mode, int, 0644);
-MODULE_PARM_DESC(rtw_ips_mode,"The default IPS mode");
+MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
static int rtw_smart_ps = 2;
@@ -61,18 +61,18 @@ static int rtw_busy_thresh = 40;
/* int qos_enable = 0; */
static int rtw_ack_policy = NORMAL_ACK;
-static int rtw_software_encrypt = 0;
-static int rtw_software_decrypt = 0;
+static int rtw_software_encrypt;
+static int rtw_software_decrypt;
-static int rtw_acm_method = 0;/* 0:By SW 1:By HW. */
+static int rtw_acm_method;/* 0:By SW 1:By HW. */
static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
-static int rtw_uapsd_enable = 0;
+static int rtw_uapsd_enable;
static int rtw_uapsd_max_sp = NO_LIMIT;
-static int rtw_uapsd_acbk_en = 0;
-static int rtw_uapsd_acbe_en = 0;
-static int rtw_uapsd_acvi_en = 0;
-static int rtw_uapsd_acvo_en = 0;
+static int rtw_uapsd_acbk_en;
+static int rtw_uapsd_acbe_en;
+static int rtw_uapsd_acvi_en;
+static int rtw_uapsd_acvo_en;
int rtw_ht_enable = 1;
/* 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160MHz, 4: 80+80MHz */
@@ -81,7 +81,7 @@ int rtw_ht_enable = 1;
static int rtw_bw_mode = 0x21;
static int rtw_ampdu_enable = 1;/* for enable tx_ampdu ,0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
static int rtw_rx_stbc = 1;/* 0: disable, 1:enable 2.4g */
-static int rtw_ampdu_amsdu = 0;/* 0: disabled, 1:enabled, 2:auto . There is an IOT issu with DLINK DIR-629 when the flag turn on */
+static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto . There is an IOT issu with DLINK DIR-629 when the flag turn on */
/* Short GI support Bit Map */
/* BIT0 - 20MHz, 0: non-support, 1: support */
/* BIT1 - 40MHz, 0: non-support, 1: support */
@@ -99,8 +99,8 @@ static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and lega
/* int rf_config = RF_1T2R; 1T2R */
static int rtw_rf_config = RF_MAX_TYPE; /* auto */
-static int rtw_low_power = 0;
-static int rtw_wifi_spec = 0;
+static int rtw_low_power;
+static int rtw_wifi_spec;
static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
static int rtw_btcoex_enable = 1;
@@ -113,40 +113,38 @@ static int rtw_ant_num = -1; /* <0: undefined, >0: Antenna number */
module_param(rtw_ant_num, int, 0644);
MODULE_PARM_DESC(rtw_ant_num, "Antenna number setting");
-static int rtw_AcceptAddbaReq = true;/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
-
static int rtw_antdiv_cfg = 1; /* 0:OFF , 1:ON, 2:decide by Efuse config */
-static int rtw_antdiv_type = 0 ; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
+static int rtw_antdiv_type; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
-static int rtw_enusbss = 0;/* 0:disable, 1:enable */
+static int rtw_enusbss;/* 0:disable, 1:enable */
-static int rtw_hwpdn_mode =2;/* 0:disable, 1:enable, 2: by EFUSE config */
+static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
#ifdef CONFIG_HW_PWRP_DETECTION
static int rtw_hwpwrp_detect = 1;
#else
-static int rtw_hwpwrp_detect = 0; /* HW power ping detect 0:disable , 1:enable */
+static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
#endif
-static int rtw_hw_wps_pbc = 0;
+static int rtw_hw_wps_pbc;
int rtw_mc2u_disable = 0;
-static int rtw_80211d = 0;
+static int rtw_80211d;
#ifdef CONFIG_QOS_OPTIMIZATION
static int rtw_qos_opt_enable = 1;/* 0: disable, 1:enable */
#else
-static int rtw_qos_opt_enable = 0;/* 0: disable, 1:enable */
+static int rtw_qos_opt_enable;/* 0: disable, 1:enable */
#endif
module_param(rtw_qos_opt_enable, int, 0644);
-static char* ifname = "wlan%d";
+static char *ifname = "wlan%d";
module_param(ifname, charp, 0644);
MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
-char* rtw_initmac = NULL; /* temp mac address if users want to use instead of the mac address in Efuse */
+char *rtw_initmac = NULL; /* temp mac address if users want to use instead of the mac address in Efuse */
module_param(rtw_initmac, charp, 0644);
module_param(rtw_channel_plan, int, 0644);
@@ -183,16 +181,16 @@ module_param(rtw_hwpwrp_detect, int, 0644);
module_param(rtw_hw_wps_pbc, int, 0644);
-static uint rtw_max_roaming_times =2;
+static uint rtw_max_roaming_times = 2;
module_param(rtw_max_roaming_times, uint, 0644);
-MODULE_PARM_DESC(rtw_max_roaming_times,"The max roaming times to try");
+MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try");
module_param(rtw_mc2u_disable, int, 0644);
module_param(rtw_80211d, int, 0644);
MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
-static uint rtw_notch_filter = 0;
+static uint rtw_notch_filter;
module_param(rtw_notch_filter, uint, 0644);
MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
@@ -202,14 +200,14 @@ static uint rtw_hiq_filter = CONFIG_RTW_HIQ_FILTER;
module_param(rtw_hiq_filter, uint, 0644);
MODULE_PARM_DESC(rtw_hiq_filter, "0:allow all, 1:allow special, 2:deny all");
-static int rtw_tx_pwr_lmt_enable = 0;
-static int rtw_tx_pwr_by_rate = 0;
+static int rtw_tx_pwr_lmt_enable;
+static int rtw_tx_pwr_by_rate;
module_param(rtw_tx_pwr_lmt_enable, int, 0644);
-MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable,"0:Disable, 1:Enable, 2: Depend on efuse");
+MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable, "0:Disable, 1:Enable, 2: Depend on efuse");
module_param(rtw_tx_pwr_by_rate, int, 0644);
-MODULE_PARM_DESC(rtw_tx_pwr_by_rate,"0:Disable, 1:Enable, 2: Depend on efuse");
+MODULE_PARM_DESC(rtw_tx_pwr_by_rate, "0:Disable, 1:Enable, 2: Depend on efuse");
char *rtw_phy_file_path = "";
module_param(rtw_phy_file_path, charp, 0644);
@@ -222,12 +220,12 @@ MODULE_PARM_DESC(rtw_phy_file_path, "The path of phy parameter");
/* BIT4 - RF, 0: non-support, 1: support */
/* BIT5 - RF_TXPWR_TRACK, 0: non-support, 1: support */
/* BIT6 - RF_TXPWR_LMT, 0: non-support, 1: support */
-static int rtw_load_phy_file = (BIT2|BIT6);
+static int rtw_load_phy_file = (BIT2 | BIT6);
module_param(rtw_load_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_load_phy_file,"PHY File Bit Map");
-static int rtw_decrypt_phy_file = 0;
+MODULE_PARM_DESC(rtw_load_phy_file, "PHY File Bit Map");
+static int rtw_decrypt_phy_file;
module_param(rtw_decrypt_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_decrypt_phy_file,"Enable Decrypt PHY File");
+MODULE_PARM_DESC(rtw_decrypt_phy_file, "Enable Decrypt PHY File");
int _netdev_open(struct net_device *pnetdev);
int netdev_open (struct net_device *pnetdev);
@@ -255,8 +253,8 @@ static uint loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
registry_par->vcs_type = (u8)rtw_vcs_type;
- registry_par->rts_thresh =(u16)rtw_rts_thresh;
- registry_par->frag_thresh =(u16)rtw_frag_thresh;
+ registry_par->rts_thresh = (u16)rtw_rts_thresh;
+ registry_par->frag_thresh = (u16)rtw_frag_thresh;
registry_par->preamble = (u8)rtw_preamble;
registry_par->scan_mode = (u8)rtw_scan_mode;
registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
@@ -311,7 +309,7 @@ static uint loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->bt_ampdu = (u8)rtw_bt_ampdu;
registry_par->ant_num = (s8)rtw_ant_num;
- registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+ registry_par->accept_addba_req = true;
registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
registry_par->antdiv_type = (u8)rtw_antdiv_type;
@@ -351,8 +349,7 @@ static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
struct sockaddr *addr = p;
- if (padapter->bup == false)
- {
+ if (padapter->bup == false) {
/* DBG_871X("r8711_net_set_mac_address(), MAC =%x:%x:%x:%x:%x:%x\n", addr->sa_data[0], addr->sa_data[1], addr->sa_data[2], addr->sa_data[3], */
/* addr->sa_data[4], addr->sa_data[5]); */
memcpy(padapter->eeprompriv.mac_addr, addr->sa_data, ETH_ALEN);
@@ -425,9 +422,7 @@ static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
skb->priority = rtw_classify8021d(skb);
if (pmlmepriv->acm_mask != 0)
- {
skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
- }
return rtw_1d_to_queue[skb->priority];
}
@@ -440,12 +435,12 @@ u16 rtw_recv_select_queue(struct sk_buff *skb)
u32 priority;
u8 *pdata = skb->data;
- memcpy(&eth_type, pdata+(ETH_ALEN<<1), 2);
+ memcpy(&eth_type, pdata + (ETH_ALEN << 1), 2);
switch (be16_to_cpu(eth_type)) {
case ETH_P_IP:
- piphdr = (struct iphdr *)(pdata+ETH_HLEN);
+ piphdr = (struct iphdr *)(pdata + ETH_HLEN);
dscp = piphdr->tos & 0xfc;
@@ -457,17 +452,16 @@ u16 rtw_recv_select_queue(struct sk_buff *skb)
}
return rtw_1d_to_queue[priority];
-
}
-static int rtw_ndev_notifier_call(struct notifier_block * nb, unsigned long state, void *ptr)
+static int rtw_ndev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev->netdev_ops->ndo_do_ioctl != rtw_ioctl)
return NOTIFY_DONE;
- DBG_871X_LEVEL(_drv_info_, FUNC_NDEV_FMT" state:%lu\n", FUNC_NDEV_ARG(dev), state);
+ DBG_871X_LEVEL(_drv_info_, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev), state);
switch (state) {
case NETDEV_CHANGENAME:
@@ -497,7 +491,7 @@ static int rtw_ndev_init(struct net_device *dev)
{
struct adapter *adapter = rtw_netdev_priv(dev);
- DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(adapter));
strncpy(adapter->old_ifname, dev->name, IFNAMSIZ);
rtw_adapter_proc_init(dev);
@@ -508,7 +502,7 @@ static void rtw_ndev_uninit(struct net_device *dev)
{
struct adapter *adapter = rtw_netdev_priv(dev);
- DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(adapter));
rtw_adapter_proc_deinit(dev);
}
@@ -561,7 +555,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
pnetdev->netdev_ops = &rtw_netdev_ops;
/* pnetdev->tx_timeout = NULL; */
- pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
+ pnetdev->watchdog_timeo = HZ * 3; /* 3 second timeout */
pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
/* step 2. */
@@ -597,7 +591,7 @@ u32 rtw_start_drv_threads(struct adapter *padapter)
_status = _FAIL;
padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD");
- if (IS_ERR(padapter->cmdThread))
+ if (IS_ERR(padapter->cmdThread))
_status = _FAIL;
else
down(&padapter->cmdpriv.terminate_cmdthread_sema); /* wait for cmd_thread to run */
@@ -623,7 +617,7 @@ void rtw_stop_drv_threads (struct adapter *padapter)
static u8 rtw_init_default_value(struct adapter *padapter)
{
u8 ret = _SUCCESS;
- struct registry_priv* pregistrypriv = &padapter->registrypriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -652,8 +646,8 @@ static u8 rtw_init_default_value(struct adapter *padapter)
#ifdef CONFIG_GTK_OL
psecuritypriv->binstallKCK_KEK = _FAIL;
#endif /* CONFIG_GTK_OL */
- psecuritypriv->sw_encrypt =pregistrypriv->software_encrypt;
- psecuritypriv->sw_decrypt =pregistrypriv->software_decrypt;
+ psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt;
+ psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt;
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
@@ -691,7 +685,8 @@ struct dvobj_priv *devobj_init(void)
{
struct dvobj_priv *pdvobj = NULL;
- if ((pdvobj = (struct dvobj_priv*)rtw_zmalloc(sizeof(*pdvobj))) == NULL)
+ pdvobj = rtw_zmalloc(sizeof(*pdvobj));
+ if (pdvobj == NULL)
return NULL;
mutex_init(&pdvobj->hw_init_mutex);
@@ -722,7 +717,7 @@ void devobj_deinit(struct dvobj_priv *pdvobj)
mutex_destroy(&pdvobj->setch_mutex);
mutex_destroy(&pdvobj->setbw_mutex);
- kfree((u8 *)pdvobj);
+ kfree(pdvobj);
}
u8 rtw_reset_drv_sw(struct adapter *padapter)
@@ -748,7 +743,7 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
pmlmepriv->LinkDetectInfo.LowPowerTransitionCount = 0;
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY |_FW_UNDER_LINKING);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
pwrctrlpriv->pwr_state_check_cnts = 0;
@@ -777,7 +772,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
goto exit;
}
- padapter->cmdpriv.padapter =padapter;
+ padapter->cmdpriv.padapter = padapter;
if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n"));
@@ -935,7 +930,7 @@ static int _rtw_drv_register_netdev(struct adapter *padapter, char *name)
goto error_register_netdev;
}
- DBG_871X("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id+1), MAC_ARG(pnetdev->dev_addr));
+ DBG_871X("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id + 1), MAC_ARG(pnetdev->dev_addr));
return ret;
@@ -984,9 +979,9 @@ int _netdev_open(struct net_device *pnetdev)
goto netdev_open_error;
}
- DBG_871X("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr));
+ DBG_871X("MAC Address = " MAC_FMT "\n", MAC_ARG(pnetdev->dev_addr));
- status =rtw_start_drv_threads(padapter);
+ status = rtw_start_drv_threads(padapter);
if (status == _FAIL) {
DBG_871X("Initialize driver software resource Failed!\n");
goto netdev_open_error;
@@ -1027,7 +1022,6 @@ netdev_open_error:
DBG_871X("-871x_drv - drv_open fail, bup =%d\n", padapter->bup);
return (-1);
-
}
int netdev_open(struct net_device *pnetdev)
@@ -1036,8 +1030,7 @@ int netdev_open(struct net_device *pnetdev)
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
- if (pwrctrlpriv->bInSuspend == true)
- {
+ if (pwrctrlpriv->bInSuspend == true) {
DBG_871X("+871x_drv - drv_open, bInSuspend =%d\n", pwrctrlpriv->bInSuspend);
return 0;
}
@@ -1066,16 +1059,13 @@ static int ips_netdrv_open(struct adapter *padapter)
/* padapter->bup = true; */
status = rtw_hal_init(padapter);
- if (status == _FAIL)
- {
+ if (status == _FAIL) {
RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n"));
goto netdev_open_error;
}
if (padapter->intf_start)
- {
padapter->intf_start(padapter);
- }
_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
@@ -1098,7 +1088,6 @@ int rtw_ips_pwr_up(struct adapter *padapter)
DBG_871X("<=== rtw_ips_pwr_up..............\n");
return result;
-
}
void rtw_ips_pwr_down(struct adapter *padapter)
@@ -1119,10 +1108,7 @@ void rtw_ips_dev_unload(struct adapter *padapter)
if (padapter->bSurpriseRemoved == false)
- {
rtw_hal_deinit(padapter);
- }
-
}
@@ -1132,15 +1118,14 @@ static int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- if (true == bnormal)
- {
+ if (true == bnormal) {
if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->hw_init_mutex)) == 0) {
status = _netdev_open(pnetdev);
mutex_unlock(&(adapter_to_dvobj(padapter)->hw_init_mutex));
}
}
else
- status = (_SUCCESS == ips_netdrv_open(padapter))?(0):(-1);
+ status = (_SUCCESS == ips_netdrv_open(padapter)) ? (0) : (-1);
return status;
}
@@ -1152,8 +1137,7 @@ static int netdev_close(struct net_device *pnetdev)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n"));
- if (pwrctl->bInternalAutoSuspend == true)
- {
+ if (pwrctl->bInternalAutoSuspend == true) {
/* rtw_pwr_wakeup(padapter); */
if (pwrctl->rf_pwrstate == rf_off)
pwrctl->ps_flag = true;
@@ -1174,8 +1158,7 @@ static int netdev_close(struct net_device *pnetdev)
DBG_871X("(2)871x_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed);
/* s1. */
- if (pnetdev)
- {
+ if (pnetdev) {
if (!rtw_netif_queue_stopped(pnetdev))
rtw_netif_stop_queue(pnetdev);
}
@@ -1198,12 +1181,11 @@ static int netdev_close(struct net_device *pnetdev)
DBG_871X("-871x_drv - drv_close, bup =%d\n", padapter->bup);
return 0;
-
}
void rtw_ndev_destructor(struct net_device *ndev)
{
- DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+ DBG_871X(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
if (ndev->ieee80211_ptr)
kfree((u8 *)ndev->ieee80211_ptr);
@@ -1219,8 +1201,7 @@ void rtw_dev_unload(struct adapter *padapter)
RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("+%s\n", __func__));
- if (padapter->bup == true)
- {
+ if (padapter->bup == true) {
DBG_871X("===> %s\n", __func__);
padapter->bDriverStopped = true;
@@ -1257,12 +1238,11 @@ void rtw_dev_unload(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: driver not in IPS\n", __func__);
}
- if (padapter->bSurpriseRemoved == false)
- {
+ if (padapter->bSurpriseRemoved == false) {
rtw_btcoex_IpsNotify(padapter, pwrctl->ips_mode_req);
#ifdef CONFIG_WOWLAN
if (pwrctl->bSupportRemoteWakeup == true &&
- pwrctl->wowlan_mode ==true) {
+ pwrctl->wowlan_mode == true) {
DBG_871X_LEVEL(_drv_always_, "%s bSupportRemoteWakeup ==true do not run rtw_hal_deinit()\n", __func__);
}
else
@@ -1292,12 +1272,11 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME)) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
- && check_fwstate(pmlmepriv, _FW_LINKED))
- {
+ && check_fwstate(pmlmepriv, _FW_LINKED)) {
DBG_871X("%s %s(" MAC_FMT "), length:%d assoc_ssid.length:%d\n", __func__,
pmlmepriv->cur_network.network.Ssid.Ssid,
MAC_ARG(pmlmepriv->cur_network.network.MacAddress),
@@ -1307,14 +1286,12 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
}
}
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED))
- {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED)) {
rtw_disassoc_cmd(padapter, 0, false);
/* s2-2. indicate disconnect to os */
rtw_indicate_disconnect(padapter);
}
- else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- {
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
rtw_sta_flush(padapter);
}
@@ -1327,13 +1304,12 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
rtw_indicate_scan_done(padapter, 1);
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
- {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
DBG_871X_LEVEL(_drv_always_, "%s: fw_under_linking\n", __func__);
rtw_indicate_disconnect(padapter);
}
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return _SUCCESS;
}
@@ -1347,7 +1323,7 @@ int rtw_suspend_wow(struct adapter *padapter)
struct wowlan_ioctl_param poidparam;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
DBG_871X("wowlan_mode: %d\n", pwrpriv->wowlan_mode);
@@ -1379,8 +1355,7 @@ int rtw_suspend_wow(struct adapter *padapter)
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME)) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
- && check_fwstate(pmlmepriv, _FW_LINKED))
- {
+ && check_fwstate(pmlmepriv, _FW_LINKED)) {
DBG_871X("%s %s(" MAC_FMT "), length:%d assoc_ssid.length:%d\n", __func__,
pmlmepriv->cur_network.network.Ssid.Ssid,
MAC_ARG(pmlmepriv->cur_network.network.MacAddress),
@@ -1393,15 +1368,14 @@ int rtw_suspend_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: wowmode suspending\n", __func__);
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
- {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
DBG_871X_LEVEL(_drv_always_, "%s: fw_under_survey\n", __func__);
rtw_indicate_scan_done(padapter, 1);
clr_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
}
if (rtw_get_ch_setting_union(padapter, &ch, &bw, &offset) != 0) {
- DBG_871X(FUNC_ADPT_FMT" back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
+ DBG_871X(FUNC_ADPT_FMT " back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
FUNC_ADPT_ARG(padapter), ch, bw, offset);
set_channel_bwmode(padapter, ch, offset, bw);
}
@@ -1410,13 +1384,11 @@ int rtw_suspend_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: pno: %d\n", __func__, pwrpriv->wowlan_pno_enable);
else
rtw_set_ps_mode(padapter, PS_MODE_DTIM, 0, 0, "WOWLAN");
-
}
- else
- {
+ else {
DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR ### wowlan_mode =%d\n", __func__, pwrpriv->wowlan_mode);
}
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_WOWLAN */
@@ -1430,7 +1402,7 @@ int rtw_suspend_ap_wow(struct adapter *padapter)
struct wowlan_ioctl_param poidparam;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
pwrpriv->wowlan_ap_mode = true;
@@ -1462,14 +1434,14 @@ int rtw_suspend_ap_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: wowmode suspending\n", __func__);
if (rtw_get_ch_setting_union(padapter, &ch, &bw, &offset) != 0) {
- DBG_871X(FUNC_ADPT_FMT" back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
+ DBG_871X(FUNC_ADPT_FMT " back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
FUNC_ADPT_ARG(padapter), ch, bw, offset);
set_channel_bwmode(padapter, ch, offset, bw);
}
rtw_set_ps_mode(padapter, PS_MODE_MIN, 0, 0, "AP-WOWLAN");
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_AP_WOWLAN */
@@ -1480,7 +1452,7 @@ static int rtw_suspend_normal(struct adapter *padapter)
struct net_device *pnetdev = padapter->pnetdev;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (pnetdev) {
netif_carrier_off(pnetdev);
rtw_netif_stop_queue(pnetdev);
@@ -1489,10 +1461,8 @@ static int rtw_suspend_normal(struct adapter *padapter)
rtw_suspend_free_assoc_resource(padapter);
if ((rtw_hal_check_ips_status(padapter) == true)
- || (adapter_to_pwrctl(padapter)->rf_pwrstate == rf_off))
- {
+ || (adapter_to_pwrctl(padapter)->rf_pwrstate == rf_off)) {
DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR #### driver in IPS ####ERROR###!!!\n", __func__);
-
}
rtw_dev_unload(padapter);
@@ -1501,7 +1471,7 @@ static int rtw_suspend_normal(struct adapter *padapter)
if (padapter->intf_deinit)
padapter->intf_deinit(adapter_to_dvobj(padapter));
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
@@ -1524,8 +1494,7 @@ int rtw_suspend_common(struct adapter *padapter)
while (pwrpriv->bips_processing == true)
msleep(1);
- if ((!padapter->bup) || (padapter->bDriverStopped)||(padapter->bSurpriseRemoved))
- {
+ if ((!padapter->bup) || (padapter->bDriverStopped) || (padapter->bSurpriseRemoved)) {
DBG_871X("%s bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n", __func__
, padapter->bup, padapter->bDriverStopped, padapter->bSurpriseRemoved);
pdbgpriv->dbg_suspend_error_cnt++;
@@ -1599,7 +1568,7 @@ int rtw_resume_process_wow(struct adapter *padapter)
struct sta_info *psta = NULL;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (padapter) {
pnetdev = padapter->pnetdev;
@@ -1641,7 +1610,7 @@ int rtw_resume_process_wow(struct adapter *padapter)
}
/* Disable WOW, set H2C command */
- poidparam.subcode =WOWLAN_DISABLE;
+ poidparam.subcode = WOWLAN_DISABLE;
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
@@ -1667,11 +1636,10 @@ int rtw_resume_process_wow(struct adapter *padapter)
}
}
else {
-
DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR ### wowlan_mode =%d\n", __func__, pwrpriv->wowlan_mode);
}
- if (padapter->pid[1]!= 0) {
+ if (padapter->pid[1] != 0) {
DBG_871X("pid[1]:%d\n", padapter->pid[1]);
rtw_signal_process(padapter->pid[1], SIGUSR2);
}
@@ -1680,7 +1648,6 @@ int rtw_resume_process_wow(struct adapter *padapter)
if (pwrpriv->wowlan_wake_reason == FWDecisionDisconnect ||
pwrpriv->wowlan_wake_reason == Rx_DisAssoc ||
pwrpriv->wowlan_wake_reason == Rx_DeAuth) {
-
DBG_871X("%s: disconnect reason: %02x\n", __func__,
pwrpriv->wowlan_wake_reason);
rtw_indicate_disconnect(padapter);
@@ -1705,12 +1672,12 @@ int rtw_resume_process_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "do not reset timer\n");
}
- pwrpriv->wowlan_mode =false;
+ pwrpriv->wowlan_mode = false;
/* clean driver side wake up reason. */
pwrpriv->wowlan_wake_reason = 0;
exit:
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_WOWLAN */
@@ -1725,7 +1692,7 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
struct wowlan_ioctl_param poidparam;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (padapter) {
pnetdev = padapter->pnetdev;
@@ -1774,7 +1741,7 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
rtw_netif_wake_queue(pnetdev);
}
- if (padapter->pid[1]!= 0) {
+ if (padapter->pid[1] != 0) {
DBG_871X("pid[1]:%d\n", padapter->pid[1]);
rtw_signal_process(padapter->pid[1], SIGUSR2);
}
@@ -1785,7 +1752,7 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
/* clean driver side wake up reason. */
pwrpriv->wowlan_wake_reason = 0;
exit:
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_APWOWLAN */
@@ -1811,19 +1778,17 @@ static int rtw_resume_process_normal(struct adapter *padapter)
psdpriv = padapter->dvobj;
pdbgpriv = &psdpriv->drv_dbg;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
/* interface init */
/* if (sdio_init(adapter_to_dvobj(padapter)) != _SUCCESS) */
- if ((padapter->intf_init) && (padapter->intf_init(adapter_to_dvobj(padapter)) != _SUCCESS))
- {
+ if ((padapter->intf_init) && (padapter->intf_init(adapter_to_dvobj(padapter)) != _SUCCESS)) {
ret = -1;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: initialize SDIO Failed!!\n", __func__));
goto exit;
}
rtw_hal_disable_interrupt(padapter);
/* if (sdio_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS) */
- if ((padapter->intf_alloc_irq) && (padapter->intf_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS))
- {
+ if ((padapter->intf_alloc_irq) && (padapter->intf_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS)) {
ret = -1;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: sdio_alloc_irq Failed!!\n", __func__));
goto exit;
@@ -1842,28 +1807,28 @@ static int rtw_resume_process_normal(struct adapter *padapter)
netif_device_attach(pnetdev);
netif_carrier_on(pnetdev);
- if (padapter->pid[1]!= 0) {
+ if (padapter->pid[1] != 0) {
DBG_871X("pid[1]:%d\n", padapter->pid[1]);
rtw_signal_process(padapter->pid[1], SIGUSR2);
}
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_STATION_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - WIFI_STATION_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME))
rtw_roaming(padapter, NULL);
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_AP_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - WIFI_AP_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
rtw_ap_restore_network(padapter);
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_ADHOC_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - WIFI_ADHOC_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
} else {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - ???\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - ???\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
}
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
exit:
return ret;
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index a05daf06a870..f4221952dd1b 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -30,22 +30,17 @@ inline int RTW_STATUS_CODE(int error_code)
return _FAIL;
}
-u8 *_rtw_malloc(u32 sz)
+void *_rtw_malloc(u32 sz)
{
- u8 *pbuf = NULL;
-
- pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
-
- return pbuf;
+ return kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
}
-u8 *_rtw_zmalloc(u32 sz)
+void *_rtw_zmalloc(u32 sz)
{
- u8 *pbuf = _rtw_malloc(sz);
+ void *pbuf = _rtw_malloc(sz);
- if (pbuf != NULL) {
+ if (pbuf)
memset(pbuf, 0, sz);
- }
return pbuf;
}
@@ -71,13 +66,6 @@ inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
return netif_rx(skb);
}
-void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc)
-{
- struct adapter *adapter = padapter;
-
- _init_timer(ptimer, adapter->pnetdev, pfunc, adapter);
-}
-
void _rtw_init_queue(struct __queue *pqueue)
{
INIT_LIST_HEAD(&(pqueue->queue));
@@ -470,7 +458,7 @@ struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
{
struct rtw_cbuf *cbuf;
- cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void*)*size);
+ cbuf = rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
if (cbuf) {
cbuf->write = cbuf->read = 0;
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index f42e00081e0e..e804b430931c 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -356,8 +356,7 @@ _recv_indicatepkt_drop:
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- struct adapter *padapter = preorder_ctrl->padapter;
-
- _init_timer(&(preorder_ctrl->reordering_ctrl_timer), padapter->pnetdev, rtw_reordering_ctrl_timeout_handler, preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ rtw_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
index ce1dd6f9036f..9a885e626d1c 100644
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
@@ -87,7 +87,7 @@ static ssize_t proc_set_log_level(struct file *file, const char __user *buffer,
* rtw_drv_proc:
* init/deinit when register/unregister driver
*/
-static const struct rtw_proc_hdl drv_proc_hdls [] = {
+static const struct rtw_proc_hdl drv_proc_hdls[] = {
{"ver_info", proc_get_drv_version, NULL},
{"log_level", proc_get_log_level, proc_set_log_level},
};
@@ -365,7 +365,7 @@ static int proc_get_cam_cache(struct seq_file *m, void *v)
* rtw_adapter_proc:
* init/deinit when register/unregister net_device
*/
-static const struct rtw_proc_hdl adapter_proc_hdls [] = {
+static const struct rtw_proc_hdl adapter_proc_hdls[] = {
{"write_reg", proc_get_dummy, proc_set_write_reg},
{"read_reg", proc_get_read_reg, proc_set_read_reg},
{"fwstate", proc_get_fwstate, NULL},
@@ -600,7 +600,7 @@ ssize_t proc_set_odm_adaptivity(struct file *file, const char __user *buffer, si
* rtw_odm_proc:
* init/deinit when register/unregister net_device, along with rtw_adapter_proc
*/
-static const struct rtw_proc_hdl odm_proc_hdls [] = {
+static const struct rtw_proc_hdl odm_proc_hdls[] = {
{"dbg_comp", proc_get_odm_dbg_comp, proc_set_odm_dbg_comp},
{"dbg_level", proc_get_odm_dbg_level, proc_set_odm_dbg_level},
{"ability", proc_get_odm_ability, proc_set_odm_ability},
diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c
index b88b0e8edd3d..c947def37d31 100644
--- a/drivers/staging/rtlwifi/base.c
+++ b/drivers/staging/rtlwifi/base.c
@@ -465,10 +465,10 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* <1> timer */
- setup_timer(&rtlpriv->works.watchdog_timer,
- rtl_watch_dog_timer_callback, (unsigned long)hw);
- setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
- rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
+ timer_setup(&rtlpriv->works.watchdog_timer,
+ rtl_watch_dog_timer_callback, 0);
+ timer_setup(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
+ rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
@@ -637,7 +637,7 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
- if ((!sta->ht_cap.ht_supported) && (!sta->vht_cap.vht_supported))
+ if (!sta->ht_cap.ht_supported && !sta->vht_cap.vht_supported)
return;
if (!sgi_40 && !sgi_20)
@@ -734,10 +734,10 @@ u8 rtl_mrate_idx_to_arfr_id(
ret = RATEID_IDX_B;
break;
case RATR_INX_WIRELESS_MC:
- if ((wirelessmode == WIRELESS_MODE_B) ||
- (wirelessmode == WIRELESS_MODE_G) ||
- (wirelessmode == WIRELESS_MODE_N_24G) ||
- (wirelessmode == WIRELESS_MODE_AC_24G))
+ if (wirelessmode == WIRELESS_MODE_B ||
+ wirelessmode == WIRELESS_MODE_G ||
+ wirelessmode == WIRELESS_MODE_N_24G ||
+ wirelessmode == WIRELESS_MODE_AC_24G)
ret = RATEID_IDX_BG;
else
ret = RATEID_IDX_G;
@@ -920,7 +920,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
else if ((tx_mcs_map & 0x000c) >> 2 ==
IEEE80211_VHT_MCS_SUPPORT_0_8)
hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
+ rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS8];
else
hw_rate =
rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
@@ -932,7 +932,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
else if ((tx_mcs_map & 0x0003) ==
IEEE80211_VHT_MCS_SUPPORT_0_8)
hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
+ rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS8];
else
hw_rate =
rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
@@ -948,8 +948,8 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 hw_rate;
- if ((get_rf_type(rtlphy) == RF_2T2R) &&
- (sta->ht_cap.mcs.rx_mask[1] != 0))
+ if (get_rf_type(rtlphy) == RF_2T2R &&
+ sta->ht_cap.mcs.rx_mask[1] != 0)
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
else
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1277,7 +1277,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
tcb_desc->hw_rate =
_rtl_get_vht_highest_n_rate(hw, sta);
} else {
- if (sta && (sta->ht_cap.ht_supported)) {
+ if (sta && sta->ht_cap.ht_supported) {
tcb_desc->hw_rate =
_rtl_get_highest_n_rate(hw, sta);
} else {
@@ -2080,9 +2080,9 @@ void rtl_watchdog_wq_callback(void *data)
rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
goto label_lps_done;
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
+ if (rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod > 8 ||
+ rtlpriv->link_info.num_rx_inperiod > 2)
rtl_lps_leave(hw);
else
rtl_lps_enter(hw);
@@ -2161,10 +2161,9 @@ label_lps_done:
rtl_scan_list_expire(hw);
}
-void rtl_watch_dog_timer_callback(unsigned long data)
+void rtl_watch_dog_timer_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv = from_timer(rtlpriv, t, works.watchdog_timer);
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.watchdog_wq, 0);
@@ -2270,10 +2269,11 @@ void rtl_c2hcmd_wq_callback(void *data)
rtl_c2hcmd_launcher(hw, 1);
}
-void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
+void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv =
+ from_timer(rtlpriv, t, works.dualmac_easyconcurrent_retrytimer);
+ struct ieee80211_hw *hw = rtlpriv->hw;
struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
if (!buddy_priv)
@@ -2334,9 +2334,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
case IEEE80211_SMPS_AUTOMATIC:/* 0 */
case IEEE80211_SMPS_NUM_MODES:/* 4 */
WARN_ON(1);
- /* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it.
- * According to Kernel Code, here is right.
- */
+ /* fall through */
case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2552,8 +2550,8 @@ bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data, unsigned int len)
bcn_key.valid = true;
/* update cur_beacon_keys or compare beacon key */
- if ((rtlpriv->mac80211.link_state != MAC80211_LINKED) &&
- (rtlpriv->mac80211.link_state != MAC80211_LINKED_SCANNING))
+ if (rtlpriv->mac80211.link_state != MAC80211_LINKED &&
+ rtlpriv->mac80211.link_state != MAC80211_LINKED_SCANNING)
return true;
if (!cur_bcn_key->valid) {
@@ -2576,8 +2574,8 @@ bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data, unsigned int len)
goto chk_exit;
}
- if ((cur_bcn_key->bcn_channel == bcn_key.bcn_channel) &&
- (cur_bcn_key->ht_cap_info == bcn_key.ht_cap_info)) {
+ if (cur_bcn_key->bcn_channel == bcn_key.bcn_channel &&
+ cur_bcn_key->ht_cap_info == bcn_key.ht_cap_info) {
/* Beacon HT info IE, secondary channel offset check */
/* 40M -> 20M */
if (cur_bcn_key->ht_info_infos_0_sco >
diff --git a/drivers/staging/rtlwifi/base.h b/drivers/staging/rtlwifi/base.h
index 1829712dc4e2..b7f92b32978e 100644
--- a/drivers/staging/rtlwifi/base.h
+++ b/drivers/staging/rtlwifi/base.h
@@ -120,7 +120,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw);
void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
-void rtl_watch_dog_timer_callback(unsigned long data);
+void rtl_watch_dog_timer_callback(struct timer_list *t);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -176,7 +176,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
-void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
+void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t);
extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data,
diff --git a/drivers/staging/rtlwifi/core.c b/drivers/staging/rtlwifi/core.c
index d33847d0550d..b00e51df984f 100644
--- a/drivers/staging/rtlwifi/core.c
+++ b/drivers/staging/rtlwifi/core.c
@@ -49,43 +49,6 @@ u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {
42, 58, 106, 122, 138, 155, 171
};
-void rtl_addr_delay(u32 addr)
-{
- if (addr == 0xfe)
- mdelay(50);
- else if (addr == 0xfd)
- msleep(5);
- else if (addr == 0xfc)
- msleep(1);
- else if (addr == 0xfb)
- usleep_range(50, 100);
- else if (addr == 0xfa)
- usleep_range(5, 10);
- else if (addr == 0xf9)
- usleep_range(1, 2);
-}
-
-void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
- u32 mask, u32 data)
-{
- if (addr >= 0xf9 && addr <= 0xfe) {
- rtl_addr_delay(addr);
- } else {
- rtl_set_rfreg(hw, rfpath, addr, mask, data);
- udelay(1);
- }
-}
-
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
-{
- if (addr >= 0xf9 && addr <= 0xfe) {
- rtl_addr_delay(addr);
- } else {
- rtl_set_bbreg(hw, addr, MASKDWORD, data);
- udelay(1);
- }
-}
-
static void rtl_fw_do_work(const struct firmware *firmware, void *context,
bool is_wow)
{
@@ -153,7 +116,7 @@ static int rtl_op_start(struct ieee80211_hw *hw)
mutex_lock(&rtlpriv->locks.conf_mutex);
err = rtlpriv->intf_ops->adapter_start(hw);
if (!err)
- rtl_watch_dog_timer_callback((unsigned long)hw);
+ rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
mutex_unlock(&rtlpriv->locks.conf_mutex);
return err;
}
@@ -339,9 +302,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtlpriv->locks.conf_mutex);
/* Free beacon resources */
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
if (mac->beacon_enabled == 1) {
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -449,7 +412,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
for (i = 0; i < wow->n_patterns; i++) {
memset(&rtl_pattern, 0, sizeof(struct rtl_wow_pattern));
memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
- if (patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
+ if (patterns[i].pattern_len < 0 ||
+ patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
"Pattern[%d] is too long\n", i);
continue;
@@ -856,8 +820,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
* here just used for linked scanning, & linked
* and nolink check bssid is set in set network_type
*/
- if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
- (mac->link_state >= MAC80211_LINKED)) {
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC &&
+ mac->link_state >= MAC80211_LINKED) {
if (mac->opmode != NL80211_IFTYPE_AP &&
mac->opmode != NL80211_IFTYPE_MESH_POINT) {
if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
@@ -1078,10 +1042,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
mutex_lock(&rtlpriv->locks.conf_mutex);
- if ((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
- if ((changed & BSS_CHANGED_BEACON) ||
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (changed & BSS_CHANGED_BEACON ||
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
@@ -1160,7 +1124,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlpriv->dm.supp_phymode_switch) {
if (sta->ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
- IEEE80211_SMPS_STATIC);
+ IEEE80211_SMPS_STATIC);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
@@ -1224,7 +1188,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
cfg80211_unlink_bss(hw->wiphy, bss);
cfg80211_put_bss(hw->wiphy, bss);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "cfg80211_unlink !!\n");
+ "cfg80211_unlink !!\n");
}
eth_zero_addr(mac->bssid);
@@ -1621,8 +1585,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
- if (((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
@@ -1697,7 +1661,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
} else {
- if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+ if (!group_key || vif->type == NL80211_IFTYPE_ADHOC ||
rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
if (rtlpriv->sec.pairwise_enc_algorithm ==
NO_ENCRYPTION &&
@@ -1885,7 +1849,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
break;
case PWR_CMD_WRITE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_WRITE\n", __func__);
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
diff --git a/drivers/staging/rtlwifi/core.h b/drivers/staging/rtlwifi/core.h
index 782ac2fc4b28..4c2b69412621 100644
--- a/drivers/staging/rtlwifi/core.h
+++ b/drivers/staging/rtlwifi/core.h
@@ -75,10 +75,6 @@ enum dm_dig_connect_e {
extern const struct ieee80211_ops rtl_ops;
void rtl_fw_cb(const struct firmware *firmware, void *context);
void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
-void rtl_addr_delay(u32 addr);
-void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
- u32 mask, u32 data);
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
bool rtl_btc_status_false(void);
void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
diff --git a/drivers/staging/rtlwifi/debug.c b/drivers/staging/rtlwifi/debug.c
index 7446d71c41d1..be8d72cb63db 100644
--- a/drivers/staging/rtlwifi/debug.c
+++ b/drivers/staging/rtlwifi/debug.c
@@ -33,7 +33,7 @@ void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -52,7 +52,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -127,10 +127,10 @@ static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \
- .cb_read = rtl_debug_get_mac_page, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \
+ .cb_read = rtl_debug_get_mac_page, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000);
@@ -169,10 +169,10 @@ static int rtl_debug_get_bb_page(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \
- .cb_read = rtl_debug_get_bb_page, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \
+ .cb_read = rtl_debug_get_bb_page, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800);
@@ -216,10 +216,10 @@ static int rtl_debug_get_reg_rf(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \
- .cb_read = rtl_debug_get_reg_rf, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \
+ .cb_read = rtl_debug_get_reg_rf, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A);
@@ -271,10 +271,10 @@ static int rtl_debug_get_cam_register(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \
- .cb_read = rtl_debug_get_cam_register, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \
+ .cb_read = rtl_debug_get_cam_register, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_CAM_SERIES(1, 0);
diff --git a/drivers/staging/rtlwifi/efuse.c b/drivers/staging/rtlwifi/efuse.c
index 6d5e657017c6..d74c80d512c9 100644
--- a/drivers/staging/rtlwifi/efuse.c
+++ b/drivers/staging/rtlwifi/efuse.c
@@ -252,12 +252,11 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
sizeof(u8), GFP_ATOMIC);
if (!efuse_tbl)
return;
- efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
+ efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
if (!efuse_word)
goto out;
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16),
- GFP_ATOMIC);
+ efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16), GFP_ATOMIC);
if (!efuse_word[i])
goto done;
}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
index edbf6af1c8b7..448b1379d220 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
@@ -3391,8 +3391,10 @@ halmac_cfg_txbf_88xx(struct halmac_adapter *halmac_adapter, u8 userid,
switch (bw) {
case HALMAC_BW_80:
temp42C |= BIT_R_TXBF0_80M;
+ /* fall through */
case HALMAC_BW_40:
temp42C |= BIT_R_TXBF0_40M;
+ /* fall through */
case HALMAC_BW_20:
temp42C |= BIT_R_TXBF0_20M;
break;
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
index 544f638ed3ef..c4cb217d3d1f 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
@@ -276,17 +276,13 @@ halmac_dump_efuse_drv_88xx(struct halmac_adapter *halmac_adapter)
if (!halmac_adapter->hal_efuse_map) {
halmac_adapter->hal_efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!halmac_adapter->hal_efuse_map) {
- pr_err("[ERR]halmac allocate efuse map Fail!!\n");
+ if (!halmac_adapter->hal_efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
}
efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!efuse_map) {
- /* out of memory */
+ if (!efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
if (halmac_read_hw_efuse_88xx(halmac_adapter, 0, efuse_size,
efuse_map) != HALMAC_RET_SUCCESS) {
@@ -325,10 +321,8 @@ halmac_dump_efuse_fw_88xx(struct halmac_adapter *halmac_adapter)
if (!halmac_adapter->hal_efuse_map) {
halmac_adapter->hal_efuse_map = kzalloc(
halmac_adapter->hw_config_info.efuse_size, GFP_KERNEL);
- if (!halmac_adapter->hal_efuse_map) {
- /* out of memory */
+ if (!halmac_adapter->hal_efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
}
if (!halmac_adapter->hal_efuse_map_valid) {
@@ -537,10 +531,8 @@ halmac_read_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
if (!halmac_adapter->hal_efuse_map_valid) {
efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!efuse_map) {
- pr_err("[ERR]halmac allocate local efuse map Fail!!\n");
+ if (!efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
status = halmac_func_read_efuse_88xx(halmac_adapter, 0,
efuse_size, efuse_map);
@@ -554,7 +546,6 @@ halmac_read_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
halmac_adapter->hal_efuse_map =
kzalloc(efuse_size, GFP_KERNEL);
if (!halmac_adapter->hal_efuse_map) {
- pr_err("[ERR]halmac allocate efuse map Fail!!\n");
kfree(efuse_map);
return HALMAC_RET_MALLOC_FAIL;
}
@@ -592,10 +583,8 @@ halmac_func_write_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
driver_adapter = halmac_adapter->driver_adapter;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(eeprom_map, 0xFF, eeprom_size);
status = halmac_read_logical_efuse_map_88xx(halmac_adapter, eeprom_map);
@@ -687,10 +676,8 @@ halmac_func_pg_efuse_by_map_88xx(struct halmac_adapter *halmac_adapter,
enum halmac_ret_status status = HALMAC_RET_SUCCESS;
eeprom_mask_updated = kzalloc(eeprom_mask_size, GFP_KERNEL);
- if (!eeprom_mask_updated) {
- /* out of memory */
+ if (!eeprom_mask_updated)
return HALMAC_RET_MALLOC_FAIL;
- }
status = halmac_update_eeprom_mask_88xx(halmac_adapter, pg_efuse_info,
eeprom_mask_updated);
@@ -743,12 +730,10 @@ halmac_update_eeprom_mask_88xx(struct halmac_adapter *halmac_adapter,
driver_adapter = halmac_adapter->driver_adapter;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
- memset(eeprom_map, 0xFF, eeprom_size);
+ memset(eeprom_map, 0xFF, eeprom_size);
memset(eeprom_mask_updated, 0x00, pg_efuse_info->efuse_mask_size);
status = halmac_read_logical_efuse_map_88xx(halmac_adapter, eeprom_map);
@@ -1036,7 +1021,7 @@ halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
if (halmac_send_fwpkt_88xx(
halmac_adapter, code_ptr + mem_offset,
send_pkt_size) != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_fwpkt_88xx fail!!");
+ pr_err("halmac_send_fwpkt_88xx fail!!\n");
return HALMAC_RET_DLFW_FAIL;
}
@@ -1046,7 +1031,7 @@ halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
halmac_adapter->hw_config_info.txdesc_size,
dest + mem_offset, send_pkt_size,
first_part) != HALMAC_RET_SUCCESS) {
- pr_err("halmac_iddma_dlfw_88xx fail!!");
+ pr_err("halmac_iddma_dlfw_88xx fail!!\n");
return HALMAC_RET_DLFW_FAIL;
}
@@ -1057,7 +1042,7 @@ halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
if (halmac_check_fw_chksum_88xx(halmac_adapter, dest) !=
HALMAC_RET_SUCCESS) {
- pr_err("halmac_check_fw_chksum_88xx fail!!");
+ pr_err("halmac_check_fw_chksum_88xx fail!!\n");
return HALMAC_RET_DLFW_FAIL;
}
@@ -2549,10 +2534,8 @@ halmac_parse_efuse_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
halmac_adapter->efuse_segment_size = segment_size;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(eeprom_map, 0xFF, eeprom_size);
spin_lock(&halmac_adapter->efuse_lock);
@@ -3355,10 +3338,8 @@ enum halmac_ret_status halmac_query_dump_logical_efuse_status_88xx(
*size = eeprom_size;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(eeprom_map, 0xFF, eeprom_size);
if (halmac_eeprom_parser_88xx(
@@ -3579,10 +3560,8 @@ halmac_verify_send_rsvd_page_88xx(struct halmac_adapter *halmac_adapter)
rsvd_buf = kzalloc(h2c_pkt_verify_size, GFP_KERNEL);
- if (!rsvd_buf) {
- /*pr_err("[ERR]rsvd buffer malloc fail!!\n");*/
+ if (!rsvd_buf)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(rsvd_buf, (u8)h2c_pkt_verify_payload, h2c_pkt_verify_size);
@@ -3599,7 +3578,6 @@ halmac_verify_send_rsvd_page_88xx(struct halmac_adapter *halmac_adapter)
GFP_KERNEL);
if (!rsvd_page) {
- pr_err("[ERR]rsvd page malloc fail!!\n");
kfree(rsvd_buf);
return HALMAC_RET_MALLOC_FAIL;
}
diff --git a/drivers/staging/rtlwifi/halmac/rtl_halmac.c b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
index 6448a8bfc14b..66f0a6dfc52c 100644
--- a/drivers/staging/rtlwifi/halmac/rtl_halmac.c
+++ b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
@@ -617,7 +617,7 @@ static int _send_general_info(struct rtl_priv *rtlpriv)
RT_TRACE(rtlpriv, COMP_HALMAC, DBG_WARNING,
"%s: halmac_send_general_info() fail because fw not dl!\n",
__func__);
- /* fallthrough here */
+ /* fall through */
default:
return -1;
}
diff --git a/drivers/staging/rtlwifi/pci.c b/drivers/staging/rtlwifi/pci.c
index 4035b8835bd1..70a64a5f564a 100644
--- a/drivers/staging/rtlwifi/pci.c
+++ b/drivers/staging/rtlwifi/pci.c
@@ -649,7 +649,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
prio, ring->idx,
skb_queue_len(&ring->queue));
- ieee80211_wake_queue(hw, skb_get_queue_mapping (skb));
+ ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
}
tx_status_ok:
skb = NULL;
diff --git a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c b/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
index 684e383201d6..5986892e767e 100644
--- a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
+++ b/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
@@ -798,7 +798,7 @@ void odm_txpowertracking_callback_thermal_meter(void *dm_void)
if (xtal_offset_eanble != 0 &&
cali_info->txpowertrack_control &&
- (rtlefu->eeprom_thermalmeter != 0xff)) {
+ rtlefu->eeprom_thermalmeter != 0xff) {
ODM_RT_TRACE(
dm, ODM_COMP_TX_PWR_TRACK,
"**********Enter Xtal Tracking**********\n");
diff --git a/drivers/staging/rtlwifi/phydm/phydm.c b/drivers/staging/rtlwifi/phydm/phydm.c
index 37888c3087a4..8b2a180cc13c 100644
--- a/drivers/staging/rtlwifi/phydm/phydm.c
+++ b/drivers/staging/rtlwifi/phydm/phydm.c
@@ -1338,7 +1338,7 @@ static void odm_update_power_training_state(struct phy_dm_struct *dm)
return;
/* First connect */
- if ((dm->is_linked) && !dig_tab->is_media_connect_0) {
+ if (dm->is_linked && !dig_tab->is_media_connect_0) {
dm->PT_score = 0;
dm->is_change_state = true;
dm->phy_dbg_info.num_qry_phy_status_ofdm = 0;
@@ -1360,7 +1360,7 @@ static void odm_update_power_training_state(struct phy_dm_struct *dm)
(u32)(dm->phy_dbg_info.num_qry_phy_status_cck);
if ((false_alm_cnt->cnt_cca_all > 31 && rx_pkt_cnt > 31) &&
- (false_alm_cnt->cnt_cca_all >= rx_pkt_cnt)) {
+ false_alm_cnt->cnt_cca_all >= rx_pkt_cnt) {
if ((rx_pkt_cnt + (rx_pkt_cnt >> 1)) <=
false_alm_cnt->cnt_cca_all)
score = 0;
@@ -1697,7 +1697,7 @@ static u8 phydm_calculate_fc(void *dm_void, u32 channel, u32 bw, u32 second_ch,
fc = 2412 + (channel - 1) * 5;
- if (bw == 40 && (second_ch == PHYDM_ABOVE)) {
+ if (bw == 40 && second_ch == PHYDM_ABOVE) {
if (channel >= 10) {
ODM_RT_TRACE(
dm, ODM_COMP_API,
@@ -1774,7 +1774,7 @@ static u8 phydm_calculate_intf_distance(void *dm_void, u32 bw, u32 fc,
"[f_l, fc, fh] = [ %d, %d, %d ], f_int = ((%d))\n", bw_low,
fc, bw_up, f_interference);
- if ((f_interference >= bw_low) && (f_interference <= bw_up)) {
+ if (f_interference >= bw_low && f_interference <= bw_up) {
int_distance = (fc >= f_interference) ? (fc - f_interference) :
(f_interference - fc);
tone_idx_tmp =
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
index 4f9e267409f6..103a774f9c8f 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
@@ -762,7 +762,7 @@ void phydm_adaptivity(void *dm_void)
dm->rssi_min, adaptivity->adajust_igi_level,
dm->adaptivity_flag, dm->adaptivity_enable);
- if (adaptivity->dynamic_link_adaptivity && (!dm->is_linked) &&
+ if (adaptivity->dynamic_link_adaptivity && !dm->is_linked &&
!dm->adaptivity_enable) {
phydm_set_edcca_threshold(dm, 0x7f, 0x7f);
ODM_RT_TRACE(
@@ -773,7 +773,7 @@ void phydm_adaptivity(void *dm_void)
if (dm->support_ic_type &
(ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA)) {
- if ((adaptivity->adajust_igi_level > IGI) &&
+ if (adaptivity->adajust_igi_level > IGI &&
dm->adaptivity_enable)
diff = adaptivity->adajust_igi_level - IGI;
diff --git a/drivers/staging/rtlwifi/phydm/phydm_debug.c b/drivers/staging/rtlwifi/phydm/phydm_debug.c
index a5f90afdae9b..e18ba2cca2bd 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_debug.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_debug.c
@@ -29,6 +29,7 @@
#include "mp_precomp.h"
#include "phydm_precomp.h"
+#include <linux/kernel.h>
bool phydm_api_set_txagc(struct phy_dm_struct *, u32, enum odm_rf_radio_path,
u8, bool);
@@ -1441,9 +1442,9 @@ static void phydm_get_per_path_txagc(void *dm_void, u8 path, u32 *_used,
u32 out_len = *_out_len;
if (((dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8197F)) &&
- (path <= ODM_RF_PATH_B)) ||
+ path <= ODM_RF_PATH_B) ||
((dm->support_ic_type & (ODM_RTL8821C)) &&
- (path <= ODM_RF_PATH_A))) {
+ path <= ODM_RF_PATH_A)) {
for (rate_idx = 0; rate_idx <= 0x53; rate_idx++) {
if (rate_idx == ODM_RATE1M)
PHYDM_SNPRINTF(output + used, out_len - used,
@@ -2107,8 +2108,7 @@ void phydm_cmd_parser(struct phy_dm_struct *dm, char input[][MAX_ARGV],
/* Parsing Cmd ID */
if (input_num) {
- phydm_ary_size =
- sizeof(phy_dm_ary) / sizeof(struct phydm_command);
+ phydm_ary_size = ARRAY_SIZE(phy_dm_ary);
for (i = 0; i < phydm_ary_size; i++) {
if (strcmp(phy_dm_ary[i].name, input[0]) == 0) {
id = phy_dm_ary[i].id;
@@ -2530,7 +2530,7 @@ void phydm_cmd_parser(struct phy_dm_struct *dm, char input[][MAX_ARGV],
}
/* NMH trigger */
- if ((var1[0] <= 2) && (var1[0] != 0)) {
+ if (var1[0] <= 2 && var1[0] != 0) {
ccx_info->echo_NHM_en = true;
ccx_info->echo_IGI =
(u8)odm_get_bb_reg(dm, 0xC50, MASKBYTE0);
@@ -2808,7 +2808,7 @@ void phydm_fw_trace_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len)
freg_num = (buf_0 & 0xf);
c2h_seq = (buf_0 & 0xf0) >> 4;
- if ((c2h_seq != dm->pre_c2h_seq) && !dm->fw_buff_is_enpty) {
+ if (c2h_seq != dm->pre_c2h_seq && !dm->fw_buff_is_enpty) {
dm->fw_debug_trace[dm->c2h_cmd_start] = '\0';
ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
"[FW Dbg Queue Overflow] %s\n",
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.c b/drivers/staging/rtlwifi/phydm/phydm_dig.c
index 31a4f3fcad19..f851ff12dc35 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dig.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_dig.c
@@ -198,7 +198,7 @@ static u8 odm_forbidden_igi_check(void *dm_void, u8 dig_dynamic_min,
if ((fa_cnt->cnt_all >
(fa_cnt->cnt_all_pre + (fa_cnt->cnt_all_pre >> 3) +
(fa_cnt->cnt_all_pre >> 4))) &&
- (current_igi < dig_tab->pre_ig_value)) {
+ current_igi < dig_tab->pre_ig_value) {
if (dig_tab->large_fa_hit != 3)
dig_tab->large_fa_hit++;
@@ -319,7 +319,7 @@ void odm_write_dig(void *dm_void, u8 current_igi)
__func__, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm));
/* 1 Check initial gain by upper bound */
- if ((!dig_tab->is_psd_in_progress) && dm->is_linked) {
+ if (!dig_tab->is_psd_in_progress && dm->is_linked) {
if (current_igi > dig_tab->rx_gain_range_max) {
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -353,7 +353,7 @@ void odm_write_dig(void *dm_void, u8 current_igi)
/*Add by YuChen for USB IO too slow issue*/
if ((dm->support_ability & ODM_BB_ADAPTIVITY) &&
- (current_igi > dig_tab->cur_ig_value)) {
+ current_igi > dig_tab->cur_ig_value) {
dig_tab->cur_ig_value = current_igi;
phydm_adaptivity(dm);
}
@@ -388,7 +388,7 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type,
ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s()=========> level = %d\n", __func__,
pause_level);
- if ((dig_tab->pause_dig_level == 0) &&
+ if (dig_tab->pause_dig_level == 0 &&
(!(dm->support_ability & ODM_BB_DIG) ||
!(dm->support_ability & ODM_BB_FA_CNT))) {
ODM_RT_TRACE(
@@ -490,6 +490,8 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type,
break;
}
+ /* pin max_level to be >= 0 */
+ max_level = max_t(s8, 0, max_level);
/* write IGI of lower level */
odm_write_dig(dm, dig_tab->pause_dig_value[max_level]);
ODM_RT_TRACE(dm, ODM_COMP_DIG,
@@ -718,7 +720,7 @@ void odm_DIG(void *dm_void)
/* 4 Modify DIG upper bound for 92E, 8723A\B, 8821 & 8812 BT */
if ((dm->support_ic_type & (ODM_RTL8192E | ODM_RTL8723B |
ODM_RTL8812 | ODM_RTL8821)) &&
- (dm->is_bt_limited_dig == 1)) {
+ dm->is_bt_limited_dig == 1) {
offset = 10;
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -817,8 +819,8 @@ void odm_DIG(void *dm_void)
if (dm->is_linked && !first_connect) {
ODM_RT_TRACE(dm, ODM_COMP_DIG, "Beacon Num (%d)\n",
dm->phy_dbg_info.num_qry_beacon_pkt);
- if ((dm->phy_dbg_info.num_qry_beacon_pkt < 5) &&
- (dm->bsta_state)) {
+ if (dm->phy_dbg_info.num_qry_beacon_pkt < 5 &&
+ dm->bsta_state) {
dig_tab->rx_gain_range_min = 0x1c;
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -880,9 +882,9 @@ void odm_DIG(void *dm_void)
current_igi = current_igi - 2;
/* 4 Abnormal # beacon case */
- if ((dm->phy_dbg_info.num_qry_beacon_pkt < 5) &&
- (fa_cnt->cnt_all < DM_DIG_FA_TH1) &&
- (dm->bsta_state)) {
+ if (dm->phy_dbg_info.num_qry_beacon_pkt < 5 &&
+ fa_cnt->cnt_all < DM_DIG_FA_TH1 &&
+ dm->bsta_state) {
current_igi = dig_tab->rx_gain_range_min;
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -1319,7 +1321,7 @@ void odm_pause_cck_packet_detection(void *dm_void,
ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s()=========> level = %d\n", __func__,
pause_level);
- if ((dig_tab->pause_cckpd_level == 0) &&
+ if (dig_tab->pause_cckpd_level == 0 &&
(!(dm->support_ability & ODM_BB_CCK_PD) ||
!(dm->support_ability & ODM_BB_FA_CNT))) {
ODM_RT_TRACE(
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.c b/drivers/staging/rtlwifi/phydm/phydm_interface.c
index 102576a46c04..2f9bf6708c54 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_interface.c
@@ -179,29 +179,6 @@ void ODM_sleep_ms(u32 ms) { msleep(ms); }
void ODM_sleep_us(u32 us) { usleep_range(us, us + 1); }
-void odm_set_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- u32 ms_delay)
-{
- mod_timer(timer, jiffies + msecs_to_jiffies(ms_delay));
-}
-
-void odm_initialize_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- void *call_back_func, void *context,
- const char *sz_id)
-{
- init_timer(timer);
- timer->function = call_back_func;
- timer->data = (unsigned long)dm;
- /*mod_timer(timer, jiffies+RTL_MILISECONDS_TO_JIFFIES(10)); */
-}
-
-void odm_cancel_timer(struct phy_dm_struct *dm, struct timer_list *timer)
-{
- del_timer(timer);
-}
-
-void odm_release_timer(struct phy_dm_struct *dm, struct timer_list *timer) {}
-
static u8 phydm_trans_h2c_id(struct phy_dm_struct *dm, u8 phydm_h2c_id)
{
u8 platform_h2c_id = phydm_h2c_id;
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.h b/drivers/staging/rtlwifi/phydm/phydm_interface.h
index d315c79c962a..53ba5585bf33 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_interface.h
@@ -172,17 +172,6 @@ void ODM_sleep_ms(u32 ms);
void ODM_sleep_us(u32 us);
-void odm_set_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- u32 ms_delay);
-
-void odm_initialize_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- void *call_back_func, void *context,
- const char *sz_id);
-
-void odm_cancel_timer(struct phy_dm_struct *dm, struct timer_list *timer);
-
-void odm_release_timer(struct phy_dm_struct *dm, struct timer_list *timer);
-
/*
* ODM FW relative API.
*/
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
index 4e7946019fcb..29d19f2b300e 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
@@ -26,6 +26,7 @@
/*Image2HeaderVersion: 3.2*/
#include "../mp_precomp.h"
#include "../phydm_precomp.h"
+#include <linux/kernel.h>
static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
const u32 condition2, const u32 condition3,
@@ -1350,7 +1351,6 @@ void odm_read_and_config_mp_8822b_agc_tab(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_agc_tab) / sizeof(u32);
u32 *array = array_mp_8822b_agc_tab;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -1358,7 +1358,7 @@ void odm_read_and_config_mp_8822b_agc_tab(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_agc_tab); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -1843,7 +1843,6 @@ void odm_read_and_config_mp_8822b_phy_reg(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_phy_reg) / sizeof(u32);
u32 *array = array_mp_8822b_phy_reg;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -1851,7 +1850,7 @@ void odm_read_and_config_mp_8822b_phy_reg(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_phy_reg); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -1947,7 +1946,6 @@ static u32 array_mp_8822b_phy_reg_pg[] = {
void odm_read_and_config_mp_8822b_phy_reg_pg(struct phy_dm_struct *dm)
{
u32 i = 0;
- u32 array_len = sizeof(array_mp_8822b_phy_reg_pg) / sizeof(u32);
u32 *array = array_mp_8822b_phy_reg_pg;
ODM_RT_TRACE(dm, ODM_COMP_INIT,
@@ -1956,7 +1954,7 @@ void odm_read_and_config_mp_8822b_phy_reg_pg(struct phy_dm_struct *dm)
dm->phy_reg_pg_version = 1;
dm->phy_reg_pg_value_type = PHY_REG_PG_EXACT_VALUE;
- for (i = 0; i < array_len; i += 6) {
+ for (i = 0; i < ARRAY_SIZE(array_mp_8822b_phy_reg_pg); i += 6) {
u32 v1 = array[i];
u32 v2 = array[i + 1];
u32 v3 = array[i + 2];
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
index 1a9daed2e609..70924f002541 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
@@ -26,6 +26,7 @@
/*Image2HeaderVersion: 3.2*/
#include "../mp_precomp.h"
#include "../phydm_precomp.h"
+#include <linux/kernel.h>
static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
const u32 condition2, const u32 condition3,
@@ -173,7 +174,6 @@ void odm_read_and_config_mp_8822b_mac_reg(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_mac_reg) / sizeof(u32);
u32 *array = array_mp_8822b_mac_reg;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -181,7 +181,7 @@ void odm_read_and_config_mp_8822b_mac_reg(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_mac_reg); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
index 84cdc0644207..0ff3a9a712d6 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
@@ -26,6 +26,7 @@
/*Image2HeaderVersion: 3.2*/
#include "../mp_precomp.h"
#include "../phydm_precomp.h"
+#include <linux/kernel.h>
static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
const u32 condition2, const u32 condition3,
@@ -1346,7 +1347,6 @@ void odm_read_and_config_mp_8822b_radioa(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_radioa) / sizeof(u32);
u32 *array = array_mp_8822b_radioa;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -1354,7 +1354,7 @@ void odm_read_and_config_mp_8822b_radioa(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_radioa); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -2506,7 +2506,6 @@ void odm_read_and_config_mp_8822b_radiob(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_radiob) / sizeof(u32);
u32 *array = array_mp_8822b_radiob;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -2514,7 +2513,7 @@ void odm_read_and_config_mp_8822b_radiob(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_radiob); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -4239,13 +4238,12 @@ static const char *const array_mp_8822b_txpwr_lmt[] = {
void odm_read_and_config_mp_8822b_txpwr_lmt(struct phy_dm_struct *dm)
{
u32 i = 0;
- u32 array_len = sizeof(array_mp_8822b_txpwr_lmt) / sizeof(u8 *);
u8 **array = (u8 **)array_mp_8822b_txpwr_lmt;
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (i = 0; i < array_len; i += 7) {
+ for (i = 0; i < ARRAY_SIZE(array_mp_8822b_txpwr_lmt); i += 7) {
u8 *regulation = array[i];
u8 *band = array[i + 1];
u8 *bandwidth = array[i + 2];
@@ -4723,13 +4721,12 @@ static const char *const array_mp_8822b_txpwr_lmt_type5[] = {
void odm_read_and_config_mp_8822b_txpwr_lmt_type5(struct phy_dm_struct *dm)
{
u32 i = 0;
- u32 array_len = sizeof(array_mp_8822b_txpwr_lmt_type5) / sizeof(u8 *);
u8 **array = (u8 **)array_mp_8822b_txpwr_lmt_type5;
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> odm_read_and_config_mp_8822b_txpwr_lmt_type5\n");
- for (i = 0; i < array_len; i += 7) {
+ for (i = 0; i < ARRAY_SIZE(array_mp_8822b_txpwr_lmt_type5); i += 7) {
u8 *regulation = array[i];
u8 *band = array[i + 1];
u8 *bandwidth = array[i + 2];
diff --git a/drivers/staging/rtlwifi/ps.c b/drivers/staging/rtlwifi/ps.c
index 9172cee45f74..7856fc5d10bd 100644
--- a/drivers/staging/rtlwifi/ps.c
+++ b/drivers/staging/rtlwifi/ps.c
@@ -61,7 +61,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->enable_interrupt(hw);
/*<enable timer> */
- rtl_watch_dog_timer_callback((unsigned long)hw);
+ rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
return true;
}
diff --git a/drivers/staging/rtlwifi/rc.c b/drivers/staging/rtlwifi/rc.c
index 65de0c7b5a67..c835be91f398 100644
--- a/drivers/staging/rtlwifi/rc.c
+++ b/drivers/staging/rtlwifi/rc.c
@@ -125,8 +125,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
}
rate->count = tries;
rate->idx = rix >= 0x00 ? rix : 0x00;
- if (((rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) ||
- (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8822BE)) &&
+ if ((rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE ||
+ rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8822BE) &&
wireless_mode == WIRELESS_MODE_AC_5G)
rate->idx |= 0x10;/*2NSS for 8812AE, 8822BE*/
@@ -138,7 +138,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sta && (sta->ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (sta && (sta->vht_cap.vht_supported))
+ if (sta && sta->vht_cap.vht_supported)
rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
} else {
if (mac->bw_80)
@@ -150,8 +150,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sgi_20 || sgi_40 || sgi_80)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
if (sta && sta->ht_cap.ht_supported &&
- ((wireless_mode == WIRELESS_MODE_N_5G) ||
- (wireless_mode == WIRELESS_MODE_N_24G)))
+ (wireless_mode == WIRELESS_MODE_N_5G ||
+ wireless_mode == WIRELESS_MODE_N_24G))
rate->flags |= IEEE80211_TX_RC_MCS;
if (sta && sta->vht_cap.vht_supported &&
(wireless_mode == WIRELESS_MODE_AC_5G ||
@@ -232,7 +232,7 @@ static void rtl_tx_status(void *ppriv,
if (sta) {
/* Check if aggregation has to be enabled for this tid */
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if ((sta->ht_cap.ht_supported) &&
+ if (sta->ht_cap.ht_supported &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
@@ -281,10 +281,8 @@ static void *rtl_rate_alloc_sta(void *ppriv,
struct rtl_rate_priv *rate_priv;
rate_priv = kzalloc(sizeof(*rate_priv), gfp);
- if (!rate_priv) {
- pr_err("Unable to allocate private rc structure\n");
+ if (!rate_priv)
return NULL;
- }
rtlpriv->rate_priv = rate_priv;
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index 8e24da16752c..f45487122517 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -330,7 +330,7 @@ void rtl8822be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
byte5 = btc_ops->btc_get_lps_val(rtlpriv);
power_state = btc_ops->btc_get_rpwm_val(rtlpriv);
- if ((rlbm == 2) && (byte5 & BIT(4))) {
+ if (rlbm == 2 && (byte5 & BIT(4))) {
/* Keep awake interval to 1 to prevent from
* decreasing coex performance
*/
@@ -419,7 +419,7 @@ static bool _rtl8822be_send_bcn_or_cmd_packet(struct ieee80211_hw *hw,
dma_addr = rtlpriv->cfg->ops->get_desc(
hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR);
- pci_unmap_single(rtlpci->pdev, dma_addr, skb->len,
+ pci_unmap_single(rtlpci->pdev, dma_addr, pskb->len,
PCI_DMA_TODEVICE);
kfree_skb(pskb);
@@ -766,9 +766,10 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
rtl8822be_fill_h2c_cmd(hw, H2C_8822B_RSVDPAGE,
sizeof(u1_rsvd_page_loc),
u1_rsvd_page_loc);
- } else
+ } else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Set RSVD page location to Fw FAIL!!!!!!.\n");
+ }
}
/* Should check FW support p2p or not. */
@@ -834,7 +835,7 @@ void rtl8822be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtl_write_dword(rtlpriv, 0x5EC,
p2pinfo->noa_count_type[i]);
}
- if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) {
+ if (p2pinfo->opp_ps == 1 || p2pinfo->noa_num > 0) {
/* rst p2p circuit */
rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST_8822B, BIT(4));
p2p_ps_offload->offload_en = 1;
diff --git a/drivers/staging/rtlwifi/rtl8822be/led.c b/drivers/staging/rtlwifi/rtl8822be/led.c
index f4b5af8ab116..0054c892dce6 100644
--- a/drivers/staging/rtlwifi/rtl8822be/led.c
+++ b/drivers/staging/rtlwifi/rtl8822be/led.c
@@ -114,7 +114,7 @@ void rtl8822be_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ if (ppsc->rfoff_reason > RF_CHANGE_BY_PS &&
(ledaction == LED_CTL_TX || ledaction == LED_CTL_RX ||
ledaction == LED_CTL_SITE_SURVEY || ledaction == LED_CTL_LINK ||
ledaction == LED_CTL_NO_LINK ||
diff --git a/drivers/staging/rtlwifi/rtl8822be/phy.c b/drivers/staging/rtlwifi/rtl8822be/phy.c
index 4cba2adc3165..ef37ae98c803 100644
--- a/drivers/staging/rtlwifi/rtl8822be/phy.c
+++ b/drivers/staging/rtlwifi/rtl8822be/phy.c
@@ -890,7 +890,7 @@ bool rtl8822be_load_txpower_by_rate(struct ieee80211_hw *hw)
rtstatus = rtlpriv->phydm.ops->phydm_load_txpower_by_rate(rtlpriv);
if (!rtstatus) {
- pr_err("BB_PG Reg Fail!!");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
@@ -915,7 +915,7 @@ bool rtl8822be_load_txpower_limit(struct ieee80211_hw *hw)
rtstatus = rtlpriv->phydm.ops->phydm_load_txpower_limit(rtlpriv);
if (!rtstatus) {
- pr_err("RF TxPwr Limit Fail!!");
+ pr_err("RF TxPwr Limit Fail!!\n");
return false;
}
@@ -1562,9 +1562,10 @@ static char _rtl8822be_phy_get_txpower_limit(struct ieee80211_hw *hw, u8 band,
[channel_index]
[rate_section]
[channel_index][rf_path];
- } else
+ } else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"No power limit table of the specified band\n");
+ }
return power_limit;
}
@@ -1609,9 +1610,9 @@ u8 rtl8822be_get_txpower_index(struct ieee80211_hw *hw, u8 path, u8 rate,
char limit;
char powerdiff_byrate = 0;
- if (((rtlhal->current_bandtype == BAND_ON_2_4G) &&
+ if ((rtlhal->current_bandtype == BAND_ON_2_4G &&
(channel > 14 || channel < 1)) ||
- ((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) {
+ (rtlhal->current_bandtype == BAND_ON_5G && channel <= 14)) {
index = 0;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Illegal channel!!\n");
@@ -1755,9 +1756,9 @@ static void _rtl8822be_phy_set_txpower_index(struct ieee80211_hw *hw,
static u32 index;
/*
- * For 8822B, phydm api use 4 bytes txagc value
- * driver must combine every four 1 byte to one 4 byte and send to phydm
- */
+ * For 8822B, phydm api use 4 bytes txagc value driver must
+ * combine every four 1 byte to one 4 byte and send to phydm
+ */
shift = rate & 0x03;
index |= ((u32)power_index << (shift * 8));
@@ -1912,8 +1913,8 @@ static u8 _rtl8822be_phy_get_pri_ch_id(struct rtl_priv *rtlpriv)
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
/* primary channel is at lower subband of 80MHz & 40MHz */
- if ((mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) {
+ if (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER &&
+ mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) {
pri_ch_idx = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
/* primary channel is at
* lower subband of 80MHz & upper subband of 40MHz
@@ -2141,7 +2142,7 @@ static bool _rtl8822be_phy_set_rf_power_state(struct ieee80211_hw *hw,
switch (rfpwr_state) {
case ERFON:
- if ((ppsc->rfpwr_state == ERFOFF) &&
+ if (ppsc->rfpwr_state == ERFOFF &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus = false;
u32 initialize_count = 0;
diff --git a/drivers/staging/rtlwifi/rtl8822be/trx.c b/drivers/staging/rtlwifi/rtl8822be/trx.c
index 38f80e48a399..87e15e419252 100644
--- a/drivers/staging/rtlwifi/rtl8822be/trx.c
+++ b/drivers/staging/rtlwifi/rtl8822be/trx.c
@@ -165,7 +165,7 @@ static bool rtl8822be_get_rxdesc_is_ht(struct ieee80211_hw *hw, u8 *pdesc)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
- if ((rx_rate >= DESC_RATEMCS0) && (rx_rate <= DESC_RATEMCS15))
+ if (rx_rate >= DESC_RATEMCS0 && rx_rate <= DESC_RATEMCS15)
return true;
else
return false;
@@ -193,8 +193,8 @@ static u8 rtl8822be_get_rx_vht_nss(struct ieee80211_hw *hw, u8 *pdesc)
rx_rate = GET_RX_DESC_RX_RATE(pdesc);
- if ((rx_rate >= DESC_RATEVHT1SS_MCS0) &&
- (rx_rate <= DESC_RATEVHT1SS_MCS9))
+ if (rx_rate >= DESC_RATEVHT1SS_MCS0 &&
+ rx_rate <= DESC_RATEVHT1SS_MCS9)
vht_nss = 1;
else if ((rx_rate >= DESC_RATEVHT2SS_MCS0) &&
(rx_rate <= DESC_RATEVHT2SS_MCS9))
@@ -510,8 +510,8 @@ static u8 rtl8822be_bw_mapping(struct ieee80211_hw *hw,
else
bw_setting_of_desc = 0;
} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- if ((ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) ||
- (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80))
+ if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40 ||
+ ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)
bw_setting_of_desc = 1;
else
bw_setting_of_desc = 0;
@@ -546,10 +546,10 @@ static u8 rtl8822be_sc_mapping(struct ieee80211_hw *hw,
"%s: Not Correct Primary40MHz Setting\n",
__func__);
} else {
- if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER))
+ if (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER &&
+ mac->cur_80_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER)
sc_setting_of_desc =
VHT_DATA_SC_20_LOWEST_OF_80MHZ;
else if ((mac->cur_40_prime_sc ==
@@ -571,9 +571,9 @@ static u8 rtl8822be_sc_mapping(struct ieee80211_hw *hw,
sc_setting_of_desc =
VHT_DATA_SC_20_UPPERST_OF_80MHZ;
else
- RT_TRACE(
- rtlpriv, COMP_SEND, DBG_LOUD,
- "rtl8822be_sc_mapping: Not Correct Primary40MHz Setting\n");
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
+ "%s: Not Correct Primary40MHz Setting\n",
+ __func__);
}
} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 4033a2cf7ac9..d548bc695f9e 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4125,12 +4125,6 @@ RTY_SEND_CMD:
rtsx_trace(chip);
return STATUS_FAIL;
}
-
- } else if (rsp_type == SD_RSP_TYPE_R0) {
- if ((ptr[3] & 0x1E) != 0x03) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
}
}
}
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 09c223f815de..aee82fcaf669 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -18,7 +18,7 @@ static inline u32 peek32(u32 addr)
return readl(addr + mmio750);
}
-static inline void poke32(u32 data, u32 addr)
+static inline void poke32(u32 addr, u32 data)
{
writel(data, addr + mmio750);
}
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index f41bd9181757..b20d16198c17 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -11,7 +11,7 @@
* function API. Please set the function pointer to NULL whenever the function
* is not supported.
*/
-static dvi_ctrl_device_t g_dcftSupportedDviController[] = {
+static struct dvi_ctrl_device g_dcftSupportedDviController[] = {
#ifdef DVI_CTRL_SII164
{
.pfnInit = sii164InitChip,
@@ -41,7 +41,7 @@ int dviInit(unsigned char edgeSelect,
unsigned char pllFilterEnable,
unsigned char pllFilterValue)
{
- dvi_ctrl_device_t *pCurrentDviCtrl;
+ struct dvi_ctrl_device *pCurrentDviCtrl;
pCurrentDviCtrl = g_dcftSupportedDviController;
if (pCurrentDviCtrl->pfnInit) {
diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h
index 9529cb9cfd69..1c7a565b617a 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.h
+++ b/drivers/staging/sm750fb/ddk750_dvi.h
@@ -26,7 +26,7 @@ typedef unsigned char (*PFN_DVICTRL_CHECKINTERRUPT)(void);
typedef void (*PFN_DVICTRL_CLEARINTERRUPT)(void);
/* Structure to hold all the function pointer to the DVI Controller. */
-typedef struct _dvi_ctrl_device_t {
+struct dvi_ctrl_device {
PFN_DVICTRL_INIT pfnInit;
PFN_DVICTRL_RESETCHIP pfnResetChip;
PFN_DVICTRL_GETCHIPSTRING pfnGetChipString;
@@ -37,7 +37,7 @@ typedef struct _dvi_ctrl_device_t {
PFN_DVICTRL_ISCONNECTED pfnIsConnected;
PFN_DVICTRL_CHECKINTERRUPT pfnCheckInterrupt;
PFN_DVICTRL_CLEARINTERRUPT pfnClearInterrupt;
-} dvi_ctrl_device_t;
+};
#define DVI_CTRL_SII164
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 73aeaeb89a64..12834f78eef7 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -3,7 +3,7 @@
#include "ddk750_reg.h"
#include "ddk750_power.h"
-void ddk750_set_dpms(DPMS_t state)
+void ddk750_set_dpms(enum dpms state)
{
unsigned int value;
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 651d05247edf..e48c74ecbbd1 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -2,20 +2,19 @@
#ifndef DDK750_POWER_H__
#define DDK750_POWER_H__
-typedef enum _DPMS_t {
+enum dpms {
crtDPMS_ON = 0x0,
crtDPMS_STANDBY = 0x1,
crtDPMS_SUSPEND = 0x2,
crtDPMS_OFF = 0x3,
-}
-DPMS_t;
+};
#define setDAC(off) { \
poke32(MISC_CTRL, \
(peek32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
}
-void ddk750_set_dpms(DPMS_t state);
+void ddk750_set_dpms(enum dpms state);
void sm750_set_power_mode(unsigned int powerMode);
void sm750_set_current_gate(unsigned int gate);
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 1eeaf087e891..c787a74c4f9c 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -297,7 +297,8 @@ void sii164SetPower(unsigned char powerUp)
* sii164SelectHotPlugDetectionMode
* This function selects the mode of the hot plug detection.
*/
-static void sii164SelectHotPlugDetectionMode(sii164_hot_plug_mode_t hotPlugMode)
+static
+void sii164SelectHotPlugDetectionMode(enum sii164_hot_plug_mode hotPlugMode)
{
unsigned char detectReg;
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index af52a3121067..2e9a88cd6af3 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -5,12 +5,12 @@
#define USE_DVICHIP
/* Hot Plug detection mode structure */
-typedef enum _sii164_hot_plug_mode_t {
+enum sii164_hot_plug_mode {
SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit (always high). */
SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
-} sii164_hot_plug_mode_t;
+};
/* Silicon Image SiI164 chip prototype */
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index 9d24159226d1..bbbef27cb329 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -19,8 +19,6 @@
#include "sm750.h"
#include "sm750_cursor.h"
-
-
#define poke32(addr, data) \
writel((data), cursor->mmio + (addr))
@@ -46,7 +44,6 @@ writel((data), cursor->mmio + (addr))
#define HWC_COLOR_3 0xC
#define HWC_COLOR_3_RGB565_MASK 0xffff
-
/* hw_cursor_xxx works for voyager,718 and 750 */
void sm750_hw_cursor_enable(struct lynx_cursor *cursor)
{
@@ -135,7 +132,6 @@ void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
}
}
-
void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
const u8 *pcol, const u8 *pmsk)
{
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index f459e4004bfa..6137fa83c609 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -27,7 +27,7 @@ void speakup_start_ttys(void)
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (speakup_console[i] && speakup_console[i]->tty_stopped)
continue;
- if ((vc_cons[i].d) && (vc_cons[i].d->port.tty))
+ if (vc_cons[i].d && vc_cons[i].d->port.tty)
start_tty(vc_cons[i].d->port.tty);
}
}
@@ -38,7 +38,7 @@ static void speakup_stop_ttys(void)
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++)
- if ((vc_cons[i].d && (vc_cons[i].d->port.tty)))
+ if (vc_cons[i].d && vc_cons[i].d->port.tty)
stop_tty(vc_cons[i].d->port.tty);
}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 585925bb49a4..16497202473f 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -447,7 +447,7 @@ static void speak_char(u16 ch)
cp = spk_characters[ch];
if (!cp) {
- pr_info("speak_char: cp == NULL!\n");
+ pr_info("%s: cp == NULL!\n", __func__);
return;
}
if (IS_CHAR(ch, B_CAP)) {
@@ -2101,7 +2101,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
u_char shift_info, offset;
int ret = 0;
- if (synth == NULL)
+ if (!synth)
return 0;
spin_lock_irqsave(&speakup_info.spinlock, flags);
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index 0e10404e2e8c..43315849b7b6 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -136,7 +136,7 @@ static int synth_probe(struct spk_synth *synth)
}
module_param_named(ser, synth_acntsa.ser, int, 0444);
-module_param_named(dev, synth_acntsa.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_acntsa.dev_name, charp, 0444);
module_param_named(start, synth_acntsa.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 2edb56c8a559..dcf0c3b59fdd 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -200,7 +200,7 @@ static void do_catch_up(struct spk_synth *synth)
}
module_param_named(ser, synth_apollo.ser, int, 0444);
-module_param_named(dev, synth_apollo.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_apollo.dev_name, charp, 0444);
module_param_named(start, synth_apollo.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index 8ae826eba71c..45b5721441ba 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -163,7 +163,7 @@ static int synth_probe(struct spk_synth *synth)
}
module_param_named(ser, synth_audptr.ser, int, 0444);
-module_param_named(dev, synth_audptr.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_audptr.dev_name, charp, 0444);
module_param_named(start, synth_audptr.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index 60bcf0df8123..402b0fbfb94d 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -120,7 +120,7 @@ static struct spk_synth synth_bns = {
};
module_param_named(ser, synth_bns.ser, int, 0444);
-module_param_named(dev, synth_bns.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_bns.dev_name, charp, 0444);
module_param_named(start, synth_bns.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index 95f4b2116d0c..4310c2c276c4 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -227,7 +227,7 @@ static void synth_flush(struct spk_synth *synth)
}
module_param_named(ser, synth_decext.ser, int, 0444);
-module_param_named(dev, synth_decext.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_decext.dev_name, charp, 0444);
module_param_named(start, synth_decext.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index f06995480022..5d6a861c9b1e 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -299,7 +299,7 @@ static void synth_flush(struct spk_synth *synth)
}
module_param_named(ser, synth_dectlk.ser, int, 0444);
-module_param_named(dev, synth_dectlk.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_dectlk.dev_name, charp, 0444);
module_param_named(start, synth_dectlk.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index 851953d5eefb..ea3b2911cab9 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -122,7 +122,7 @@ static struct spk_synth synth_dummy = {
};
module_param_named(ser, synth_dummy.ser, int, 0444);
-module_param_named(dev, synth_dummy.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_dummy.dev_name, charp, 0444);
module_param_named(start, synth_dummy.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index 423795f88f53..95efaab73813 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -167,7 +167,7 @@ static int synth_probe(struct spk_synth *synth)
}
module_param_named(ser, synth_ltlk.ser, int, 0444);
-module_param_named(dev, synth_ltlk.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_ltlk.dev_name, charp, 0444);
module_param_named(start, synth_ltlk.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index 9ca21edc42ce..1037aa0d085a 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -131,7 +131,7 @@ static void synth_flush(struct spk_synth *synth)
}
module_param_named(ser, synth_spkout.ser, int, 0444);
-module_param_named(dev, synth_spkout.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_spkout.dev_name, charp, 0444);
module_param_named(start, synth_spkout.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index 831ee404e7a1..e160034e4a68 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -119,7 +119,7 @@ static struct spk_synth synth_txprt = {
};
module_param_named(ser, synth_txprt.ser, int, 0444);
-module_param_named(dev, synth_txprt.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_txprt.dev_name, charp, 0444);
module_param_named(start, synth_txprt.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index 4d7d8f2f66ea..513cebbd161c 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -51,10 +51,8 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
speakup_tty = tty;
ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL);
- if (!ldisc_data) {
- pr_err("speakup: Failed to allocate ldisc_data.\n");
+ if (!ldisc_data)
return -ENOMEM;
- }
sema_init(&ldisc_data->sem, 0);
ldisc_data->buf_free = true;
@@ -90,7 +88,8 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
return 0;
/* Make sure the consumer has read buf before we have seen
- * buf_free == true and overwrite buf */
+ * buf_free == true and overwrite buf
+ */
mb();
ldisc_data->buf = cp[0];
@@ -276,7 +275,8 @@ static unsigned char ttyio_in(int timeout)
rv = ldisc_data->buf;
/* Make sure we have read buf before we set buf_free to let
- * the producer overwrite it */
+ * the producer overwrite it
+ */
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig
index 37a0781b0d0c..5359f556d203 100644
--- a/drivers/staging/typec/Kconfig
+++ b/drivers/staging/typec/Kconfig
@@ -1,13 +1,5 @@
menu "USB Power Delivery and Type-C drivers"
-config TYPEC_TCPM
- tristate "USB Type-C Port Controller Manager"
- depends on USB
- select TYPEC
- help
- The Type-C Port Controller Manager provides a USB PD and USB Type-C
- state machine for use with Type-C Port Controllers.
-
if TYPEC_TCPM
config TYPEC_TCPCI
@@ -17,8 +9,6 @@ config TYPEC_TCPCI
help
Type-C Port Controller driver for TCPCI-compliant controller.
-source "drivers/staging/typec/fusb302/Kconfig"
-
endif
endmenu
diff --git a/drivers/staging/typec/Makefile b/drivers/staging/typec/Makefile
index 30a7e29cbc9e..53d649abcb53 100644
--- a/drivers/staging/typec/Makefile
+++ b/drivers/staging/typec/Makefile
@@ -1,3 +1 @@
-obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
-obj-y += fusb302/
diff --git a/drivers/staging/typec/TODO b/drivers/staging/typec/TODO
index bc1f97a2d1bf..53fe2f726c88 100644
--- a/drivers/staging/typec/TODO
+++ b/drivers/staging/typec/TODO
@@ -1,13 +1,3 @@
-tcpm:
-- Add documentation (at the very least for the API to low level drivers)
-- Split PD code into separate file
-- Check if it makes sense to use tracepoints instead of debugfs for debug logs
-- Implement Alternate Mode handling
-- Address "#if 0" code if not addressed with the above
-- Validate all comments marked with "XXX"; either address or remove comments
-- Add support for USB PD 3.0. While not mandatory, at least fast role swap
- as well as authentication support would be very desirable.
-
tcpci:
- Test with real hardware
diff --git a/drivers/staging/typec/fusb302/TODO b/drivers/staging/typec/fusb302/TODO
deleted file mode 100644
index 19b466eb585d..000000000000
--- a/drivers/staging/typec/fusb302/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-fusb302:
-- Find a better logging scheme, at least not having the same debugging/logging
- code replicated here and in tcpm
-- Find a non-hacky way to coordinate between PM and I2C access
-- Documentation? The FUSB302 datasheet provides information on the chip to help
- understand the code. But it may still be helpful to have a documentation.
-- We may want to replace the "fcs,max-snk-microvolt", "fcs,max-snk-microamp",
- "fcs,max-snk-microwatt" and "fcs,operating-snk-microwatt" device(tree)
- properties with properties which are part of a generic type-c controller
- devicetree binding.
diff --git a/drivers/staging/typec/tcpci.c b/drivers/staging/typec/tcpci.c
index df72d8b01e73..b6abaf79ef0b 100644
--- a/drivers/staging/typec/tcpci.c
+++ b/drivers/staging/typec/tcpci.c
@@ -20,11 +20,11 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
-#include "pd.h"
#include "tcpci.h"
-#include "tcpm.h"
#define PD_RETRY_COUNT 3
@@ -139,6 +139,7 @@ static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
case 0x3:
if (sink)
return TYPEC_CC_RP_3_0;
+ /* fall through */
case 0x0:
default:
return TYPEC_CC_OPEN;
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
index 1f0425bf3583..aaddc619c329 100644
--- a/drivers/staging/unisys/MAINTAINERS
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -1,5 +1,5 @@
Unisys s-Par drivers
M: David Kershner <sparmaintainer@unisys.com>
S: Maintained
-F: Documentation/s-Par/overview.txt
+F: drivers/staging/unisys/Documentation/overview.txt
F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index a70760f48566..5cd407ca2251 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -44,7 +44,7 @@
#include <linux/uuid.h>
#include <linux/skbuff.h>
-#include "channel.h"
+#include "visorchannel.h"
/*
* Must increment these whenever you insert or delete fields within this channel
@@ -348,10 +348,9 @@ struct sense_data {
* the start of the NETWORK LAYER HEADER.
*
* NOTE:
- * The full packet is described in frags but the ethernet header is
- * separately kept in ethhdr so that uisnic doesn't have "MAP" the
- * guest memory to get to the header. uisnic needs ethhdr to
- * determine how to route the packet.
+ * The full packet is described in frags but the ethernet header is separately
+ * kept in ethhdr so that uisnic doesn't have "MAP" the guest memory to get to
+ * the header. uisnic needs ethhdr to determine how to route the packet.
*/
struct net_pkt_xmt {
int len;
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index e4ee38c3dbe4..1a0986ba3d24 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -28,87 +28,20 @@
#define __VISORBUS_H__
#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "channel.h"
-
-struct visor_device;
-extern struct bus_type visorbus_type;
-
-typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
- int status);
+#include "visorchannel.h"
struct visorchipset_state {
u32 created:1;
u32 attached:1;
u32 configured:1;
u32 running:1;
- /* Add new fields above. */
- /* Remaining bits in this 32-bit word are unused. */
-};
-
-/*
- * This struct describes a specific Supervisor channel, by providing its
- * GUID, name, and sizes.
- */
-struct visor_channeltype_descriptor {
- const guid_t guid;
- const char *name;
+ /* Remaining bits in this 32-bit word are reserved. */
};
/**
- * struct visor_driver - Information provided by each visor driver when it
- * registers with the visorbus driver.
- * @name: Name of the visor driver.
- * @owner: The module owner.
- * @channel_types: Types of channels handled by this driver, ending with
- * a zero GUID. Our specialized BUS.match() method knows
- * about this list, and uses it to determine whether this
- * driver will in fact handle a new device that it has
- * detected.
- * @probe: Called when a new device comes online, by our probe()
- * function specified by driver.probe() (triggered
- * ultimately by some call to driver_register(),
- * bus_add_driver(), or driver_attach()).
- * @remove: Called when a new device is removed, by our remove()
- * function specified by driver.remove() (triggered
- * ultimately by some call to device_release_driver()).
- * @channel_interrupt: Called periodically, whenever there is a possiblity
- * that "something interesting" may have happened to the
- * channel.
- * @pause: Called to initiate a change of the device's state. If
- * the return valu`e is < 0, there was an error and the
- * state transition will NOT occur. If the return value
- * is >= 0, then the state transition was INITIATED
- * successfully, and complete_func() will be called (or
- * was just called) with the final status when either the
- * state transition fails or completes successfully.
- * @resume: Behaves similar to pause.
- * @driver: Private reference to the device driver. For use by bus
- * driver only.
- */
-struct visor_driver {
- const char *name;
- struct module *owner;
- struct visor_channeltype_descriptor *channel_types;
- int (*probe)(struct visor_device *dev);
- void (*remove)(struct visor_device *dev);
- void (*channel_interrupt)(struct visor_device *dev);
- int (*pause)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
- int (*resume)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-
- /* These fields are for private use by the bus driver only. */
- struct device_driver driver;
-};
-
-#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
-
-/**
* struct visor_device - A device type for things "plugged" into the visorbus
- * bus
+ * bus
* @visorchannel: Points to the channel that the device is
* associated with.
* @channel_type_guid: Identifies the channel type to the bus driver.
@@ -139,7 +72,6 @@ struct visor_driver {
* same across all visor_devices in the current
* guest. Private use by bus driver only.
*/
-
struct visor_device {
struct visorchannel *visorchannel;
guid_t channel_type_guid;
@@ -161,11 +93,74 @@ struct visor_device {
void *vbus_hdr_info;
guid_t partition_guid;
struct dentry *debugfs_dir;
- struct dentry *debugfs_client_bus_info;
+ struct dentry *debugfs_bus_info;
};
#define to_visor_device(x) container_of(x, struct visor_device, device)
+typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
+ int status);
+
+/*
+ * This struct describes a specific visor channel, by providing its GUID, name,
+ * and sizes.
+ */
+struct visor_channeltype_descriptor {
+ const guid_t guid;
+ const char *name;
+ u64 min_bytes;
+ u32 version;
+};
+
+/**
+ * struct visor_driver - Information provided by each visor driver when it
+ * registers with the visorbus driver
+ * @name: Name of the visor driver.
+ * @owner: The module owner.
+ * @channel_types: Types of channels handled by this driver, ending with
+ * a zero GUID. Our specialized BUS.match() method knows
+ * about this list, and uses it to determine whether this
+ * driver will in fact handle a new device that it has
+ * detected.
+ * @probe: Called when a new device comes online, by our probe()
+ * function specified by driver.probe() (triggered
+ * ultimately by some call to driver_register(),
+ * bus_add_driver(), or driver_attach()).
+ * @remove: Called when a new device is removed, by our remove()
+ * function specified by driver.remove() (triggered
+ * ultimately by some call to device_release_driver()).
+ * @channel_interrupt: Called periodically, whenever there is a possiblity
+ * that "something interesting" may have happened to the
+ * channel.
+ * @pause: Called to initiate a change of the device's state. If
+ * the return valu`e is < 0, there was an error and the
+ * state transition will NOT occur. If the return value
+ * is >= 0, then the state transition was INITIATED
+ * successfully, and complete_func() will be called (or
+ * was just called) with the final status when either the
+ * state transition fails or completes successfully.
+ * @resume: Behaves similar to pause.
+ * @driver: Private reference to the device driver. For use by bus
+ * driver only.
+ */
+struct visor_driver {
+ const char *name;
+ struct module *owner;
+ struct visor_channeltype_descriptor *channel_types;
+ int (*probe)(struct visor_device *dev);
+ void (*remove)(struct visor_device *dev);
+ void (*channel_interrupt)(struct visor_device *dev);
+ int (*pause)(struct visor_device *dev,
+ visorbus_state_complete_func complete_func);
+ int (*resume)(struct visor_device *dev,
+ visorbus_state_complete_func complete_func);
+
+ /* These fields are for private use by the bus driver only. */
+ struct device_driver driver;
+};
+
+#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
+
int visor_check_channel(struct channel_header *ch, struct device *dev,
const guid_t *expected_uuid, char *chname,
u64 expected_min_bytes, u32 expected_version,
@@ -182,26 +177,6 @@ int visorbus_write_channel(struct visor_device *dev,
int visorbus_enable_channel_interrupts(struct visor_device *dev);
void visorbus_disable_channel_interrupts(struct visor_device *dev);
-/*
- * Levels of severity for diagnostic events, in order from lowest severity to
- * highest (i.e. fatal errors are the most severe, and should always be logged,
- * but info events rarely need to be logged except during debugging). The
- * values DIAG_SEVERITY_ENUM_BEGIN and DIAG_SEVERITY_ENUM_END are not valid
- * severity values. They exist merely to dilineate the list, so that future
- * additions won't require changes to the driver (i.e. when checking for
- * out-of-range severities in SetSeverity). The values DIAG_SEVERITY_OVERRIDE
- * and DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events
- * but they are valid for controlling the amount of event data. Changes made
- * to the enum, need to be reflected in s-Par.
- */
-enum diag_severity {
- DIAG_SEVERITY_VERBOSE = 0,
- DIAG_SEVERITY_INFO = 1,
- DIAG_SEVERITY_WARNING = 2,
- DIAG_SEVERITY_ERR = 3,
- DIAG_SEVERITY_PRINT = 4,
-};
-
int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
void *msg);
int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/visorchannel.h
index 2babe93631f3..33945749c8b6 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/visorchannel.h
@@ -14,17 +14,13 @@
* details.
*/
-#ifndef __CHANNEL_H__
-#define __CHANNEL_H__
+#ifndef __VISORCHANNEL_H__
+#define __VISORCHANNEL_H__
#include <linux/types.h>
-#include <linux/io.h>
#include <linux/uuid.h>
-#define SIGNATURE_16(A, B) ((A) | ((B) << 8))
-#define SIGNATURE_32(A, B, C, D) \
- (SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16))
-#define VISOR_CHANNEL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
+#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
/*
* enum channel_serverstate
@@ -183,7 +179,7 @@ struct signal_queue_header {
u8 filler[12];
} __packed;
-/* CHANNEL Guids */
+/* VISORCHANNEL Guids */
/* {414815ed-c58c-11da-95a9-00e08161165f} */
#define VISOR_VHBA_CHANNEL_GUID \
GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index 32ff5c1bb6ba..9ee9886a9aed 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -17,7 +17,8 @@
#define __CONTROLVMCHANNEL_H__
#include <linux/uuid.h>
-#include "channel.h"
+
+#include "visorchannel.h"
/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
#define VISOR_CONTROLVM_CHANNEL_GUID \
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index 27e04de14818..981b180f3c4b 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -26,8 +26,7 @@
*/
#include <linux/uuid.h>
-#include <linux/ctype.h>
-#include "channel.h"
+#include "visorchannel.h"
/* {193b331b-c58f-11da-95a9-00e08161165f} */
#define VISOR_VBUS_CHANNEL_GUID \
@@ -50,9 +49,9 @@
* @infostrs: Kernel vversion.
* @reserved: Pad size to 256 bytes.
*
- * An array of this struct is present in the channel area for each vbus.
- * (See vbuschannel.h.). It is filled in by the client side to provide info
- * about the device and driver from the client's perspective.
+ * An array of this struct is present in the channel area for each vbus. It is
+ * filled in by the client side to provide info about the device and driver from
+ * the client's perspective.
*/
struct visor_vbus_deviceinfo {
u8 devtype[16];
@@ -73,7 +72,7 @@ struct visor_vbus_deviceinfo {
* BusInfo struct.
* @dev_info_offset: Byte offset from beginning of this struct to the
* DevInfo array.
- * @reserved: Natural Alignment
+ * @reserved: Natural alignment.
*/
struct visor_vbus_headerinfo {
u32 struct_bytes;
@@ -97,7 +96,6 @@ struct visor_vbus_headerinfo {
struct visor_vbus_channel {
struct channel_header channel_header;
struct visor_vbus_headerinfo hdr_info;
- /* The remainder of this channel is filled in by the client */
struct visor_vbus_deviceinfo chp_info;
struct visor_vbus_deviceinfo bus_info;
struct visor_vbus_deviceinfo dev_info[0];
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 2bc7ff7bb96a..b604d0cccef1 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -13,7 +13,10 @@
* details.
*/
+#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/uuid.h>
#include "visorbus.h"
@@ -69,12 +72,9 @@ static LIST_HEAD(list_all_device_instances);
* Note that <logCtx> is only needed for callers in the EFI environment, and
* is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
*/
-int visor_check_channel(struct channel_header *ch,
- struct device *dev,
- const guid_t *expected_guid,
- char *chname,
- u64 expected_min_bytes,
- u32 expected_version,
+int visor_check_channel(struct channel_header *ch, struct device *dev,
+ const guid_t *expected_guid, char *chname,
+ u64 expected_min_bytes, u32 expected_version,
u64 expected_signature)
{
if (!guid_is_null(expected_guid)) {
@@ -125,7 +125,6 @@ static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
dev = to_visor_device(xdev);
guid = visorchannel_get_guid(dev->visorchannel);
-
return add_uevent_var(env, "MODALIAS=visorbus:%pUl", guid);
}
@@ -144,17 +143,24 @@ static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
int i;
struct visor_device *dev;
struct visor_driver *drv;
+ struct visorchannel *chan;
dev = to_visor_device(xdev);
channel_type = visorchannel_get_guid(dev->visorchannel);
drv = to_visor_driver(xdrv);
+ chan = dev->visorchannel;
if (!drv->channel_types)
return 0;
-
for (i = 0; !guid_is_null(&drv->channel_types[i].guid); i++)
- if (guid_equal(&drv->channel_types[i].guid, channel_type))
+ if (guid_equal(&drv->channel_types[i].guid, channel_type) &&
+ visor_check_channel(visorchannel_get_header(chan),
+ xdev,
+ &drv->channel_types[i].guid,
+ (char *)drv->channel_types[i].name,
+ drv->channel_types[i].min_bytes,
+ drv->channel_types[i].version,
+ VISOR_CHANNEL_SIGNATURE))
return i + 1;
-
return 0;
}
@@ -162,13 +168,48 @@ static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
* This describes the TYPE of bus.
* (Don't confuse this with an INSTANCE of the bus.)
*/
-struct bus_type visorbus_type = {
+static struct bus_type visorbus_type = {
.name = "visorbus",
.match = visorbus_match,
.uevent = visorbus_uevent,
.dev_groups = visorbus_dev_groups,
};
+struct visor_busdev {
+ u32 bus_no;
+ u32 dev_no;
+};
+
+static int match_visorbus_dev_by_id(struct device *dev, void *data)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+ struct visor_busdev *id = data;
+
+ if (vdev->chipset_bus_no == id->bus_no &&
+ vdev->chipset_dev_no == id->dev_no)
+ return 1;
+ return 0;
+}
+
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+ struct visor_device *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct visor_busdev id = {
+ .bus_no = bus_no,
+ .dev_no = dev_no
+ };
+
+ if (from)
+ dev_start = &from->device;
+ dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
+ match_visorbus_dev_by_id);
+ if (!dev)
+ return NULL;
+ return to_visor_device(dev);
+}
+
/*
* visorbus_release_busdevice() - called when device_unregister() is called for
* the bus device instance, after all other tasks
@@ -179,8 +220,9 @@ static void visorbus_release_busdevice(struct device *xdev)
{
struct visor_device *dev = dev_get_drvdata(xdev);
- debugfs_remove(dev->debugfs_client_bus_info);
+ debugfs_remove(dev->debugfs_bus_info);
debugfs_remove_recursive(dev->debugfs_dir);
+ visorchannel_destroy(dev->visorchannel);
kfree(dev);
}
@@ -198,7 +240,7 @@ static void visorbus_release_device(struct device *xdev)
}
/*
- * begin implementation of specific channel attributes to appear under
+ * BUS specific channel attributes to appear under
* /sys/bus/visorbus<x>/dev<y>/channel
*/
@@ -218,7 +260,7 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
struct visor_device *vdev = to_visor_device(dev);
return sprintf(buf, "0x%lx\n",
- visorchannel_get_nbytes(vdev->visorchannel));
+ visorchannel_get_nbytes(vdev->visorchannel));
}
static DEVICE_ATTR_RO(nbytes);
@@ -284,18 +326,14 @@ static struct attribute *channel_attrs[] = {
ATTRIBUTE_GROUPS(channel);
-/* end implementation of specific channel attributes */
-
/*
* BUS instance attributes
*
* define & implement display of bus attributes under
* /sys/bus/visorbus/devices/visorbus<n>.
*/
-
static ssize_t partition_handle_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
@@ -305,8 +343,7 @@ static ssize_t partition_handle_show(struct device *dev,
static DEVICE_ATTR_RO(partition_handle);
static ssize_t partition_guid_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
@@ -315,8 +352,7 @@ static ssize_t partition_guid_show(struct device *dev,
static DEVICE_ATTR_RO(partition_guid);
static ssize_t partition_name_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
@@ -325,8 +361,7 @@ static ssize_t partition_name_show(struct device *dev,
static DEVICE_ATTR_RO(partition_name);
static ssize_t channel_addr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
@@ -336,8 +371,7 @@ static ssize_t channel_addr_show(struct device *dev,
static DEVICE_ATTR_RO(channel_addr);
static ssize_t channel_bytes_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
@@ -347,8 +381,7 @@ static ssize_t channel_bytes_show(struct device *dev,
static DEVICE_ATTR_RO(channel_bytes);
static ssize_t channel_id_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
int len = 0;
@@ -356,7 +389,6 @@ static ssize_t channel_id_show(struct device *dev,
visorchannel_id(vdev->visorchannel, buf);
len = strlen(buf);
buf[len++] = '\n';
-
return len;
}
static DEVICE_ATTR_RO(channel_id);
@@ -396,13 +428,11 @@ static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
/* uninitialized vbus device entry */
if (!isprint(devinfo->devtype[0]))
return;
-
if (devix >= 0)
seq_printf(seq, "[%d]", devix);
else
/* vbus device entry is for bus or chipset */
seq_puts(seq, " ");
-
/*
* Note: because the s-Par back-end is free to scribble in this area,
* we never assume '\0'-termination.
@@ -415,7 +445,7 @@ static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
devinfo->infostrs);
}
-static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
+static int bus_info_debugfs_show(struct seq_file *seq, void *v)
{
int i = 0;
unsigned long off;
@@ -427,10 +457,9 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
return 0;
seq_printf(seq,
- "Client device / client driver info for %s partition (vbus #%u):\n",
+ "Client device/driver info for %s partition (vbus #%u):\n",
((vdev->name) ? (char *)(vdev->name) : ""),
vdev->chipset_bus_no);
-
if (visorchannel_read(channel,
offsetof(struct visor_vbus_channel, chp_info),
&dev_info, sizeof(dev_info)) >= 0)
@@ -448,19 +477,17 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
off += sizeof(dev_info);
i++;
}
-
return 0;
}
-static int client_bus_info_debugfs_open(struct inode *inode, struct file *file)
+static int bus_info_debugfs_open(struct inode *inode, struct file *file)
{
- return single_open(file, client_bus_info_debugfs_show,
- inode->i_private);
+ return single_open(file, bus_info_debugfs_show, inode->i_private);
}
-static const struct file_operations client_bus_info_debugfs_fops = {
+static const struct file_operations bus_info_debugfs_fops = {
.owner = THIS_MODULE,
- .open = client_bus_info_debugfs_open,
+ .open = bus_info_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
@@ -479,6 +506,7 @@ static int dev_start_periodic_work(struct visor_device *dev)
{
if (dev->being_removed || dev->timer_active)
return -EINVAL;
+
/* now up by at least 2 */
get_device(&dev->device);
dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
@@ -491,6 +519,7 @@ static void dev_stop_periodic_work(struct visor_device *dev)
{
if (!dev->timer_active)
return;
+
del_timer_sync(&dev->timer);
dev->timer_active = false;
put_device(&dev->device);
@@ -508,20 +537,15 @@ static void dev_stop_periodic_work(struct visor_device *dev)
*/
static int visordriver_remove_device(struct device *xdev)
{
- struct visor_device *dev;
- struct visor_driver *drv;
-
- dev = to_visor_device(xdev);
- drv = to_visor_driver(xdev->driver);
+ struct visor_device *dev = to_visor_device(xdev);
+ struct visor_driver *drv = to_visor_driver(xdev->driver);
mutex_lock(&dev->visordriver_callback_lock);
dev->being_removed = true;
drv->remove(dev);
mutex_unlock(&dev->visordriver_callback_lock);
-
dev_stop_periodic_work(dev);
put_device(&dev->device);
-
return 0;
}
@@ -546,8 +570,7 @@ EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
* @dest: the destination buffer that is written into from the channel
* @nbytes: the number of bytes to read from the channel
*
- * If receiving a message, use the visorchannel_signalremove()
- * function instead.
+ * If receiving a message, use the visorchannel_signalremove() function instead.
*
* Return: integer indicating success (zero) or failure (non-zero)
*/
@@ -566,8 +589,7 @@ EXPORT_SYMBOL_GPL(visorbus_read_channel);
* @src: the source buffer that is written into the channel
* @nbytes: the number of bytes to write into the channel
*
- * If sending a message, use the visorchannel_signalinsert()
- * function instead.
+ * If sending a message, use the visorchannel_signalinsert() function instead.
*
* Return: integer indicating success (zero) or failure (non-zero)
*/
@@ -618,17 +640,16 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
*
* This is how everything starts from the device end.
* This function is called when a channel first appears via a ControlVM
- * message. In response, this function allocates a visor_device to
- * correspond to the new channel, and attempts to connect it the appropriate
- * driver. If the appropriate driver is found, the visor_driver.probe()
- * function for that driver will be called, and will be passed the new
- * visor_device that we just created.
+ * message. In response, this function allocates a visor_device to correspond
+ * to the new channel, and attempts to connect it the appropriate * driver. If
+ * the appropriate driver is found, the visor_driver.probe() function for that
+ * driver will be called, and will be passed the new * visor_device that we
+ * just created.
*
* It's ok if the appropriate driver is not yet loaded, because in that case
* the new device struct will just stick around in the bus' list of devices.
* When the appropriate driver calls visorbus_register_visor_driver(), the
- * visor_driver.probe() for the new driver will be called with the new
- * device.
+ * visor_driver.probe() for the new driver will be called with the new device.
*
* Return: 0 if successful, otherwise the negative value returned by
* device_add() indicating the reason for failure
@@ -647,17 +668,15 @@ int create_visor_device(struct visor_device *dev)
/* keep a reference just for us (now 2) */
get_device(&dev->device);
setup_timer(&dev->timer, dev_periodic_work, (unsigned long)dev);
-
/*
- * bus_id must be a unique name with respect to this bus TYPE
- * (NOT bus instance). That's why we need to include the bus
- * number within the name.
+ * bus_id must be a unique name with respect to this bus TYPE (NOT bus
+ * instance). That's why we need to include the bus number within the
+ * name.
*/
err = dev_set_name(&dev->device, "vbus%u:dev%u",
chipset_bus_no, chipset_dev_no);
if (err)
goto err_put;
-
/*
* device_add does this:
* bus_add_device(dev)
@@ -671,14 +690,13 @@ int create_visor_device(struct visor_device *dev)
* if (!drv.probe(dev)) [visordriver_probe_device]
* dev.drv = NULL
*
- * Note that device_add does NOT fail if no driver failed to
- * claim the device. The device will be linked onto
- * bus_type.klist_devices regardless (use bus_for_each_dev).
+ * Note that device_add does NOT fail if no driver failed to claim the
+ * device. The device will be linked onto bus_type.klist_devices
+ * regardless (use bus_for_each_dev).
*/
err = device_add(&dev->device);
if (err < 0)
goto err_put;
-
list_add_tail(&dev->list_all, &list_all_device_instances);
dev->state.created = 1;
visorbus_response(dev, err, CONTROLVM_DEVICE_CREATE);
@@ -695,8 +713,9 @@ void remove_visor_device(struct visor_device *dev)
{
list_del(&dev->list_all);
put_device(&dev->device);
+ if (dev->pending_msg_hdr)
+ visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
device_unregister(&dev->device);
- visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
}
static int get_vbus_header_info(struct visorchannel *chan,
@@ -718,14 +737,11 @@ static int get_vbus_header_info(struct visorchannel *chan,
sizeof(*hdr_info));
if (err < 0)
return err;
-
if (hdr_info->struct_bytes < sizeof(struct visor_vbus_headerinfo))
return -EINVAL;
-
if (hdr_info->device_info_struct_bytes <
sizeof(struct visor_vbus_deviceinfo))
return -EINVAL;
-
return 0;
}
@@ -746,11 +762,12 @@ static void write_vbus_chp_info(struct visorchannel *chan,
struct visor_vbus_headerinfo *hdr_info,
struct visor_vbus_deviceinfo *info)
{
- int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
+ int off;
if (hdr_info->chp_info_offset == 0)
return;
+ off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
visorchannel_write(chan, off, info, sizeof(*info));
}
@@ -771,11 +788,12 @@ static void write_vbus_bus_info(struct visorchannel *chan,
struct visor_vbus_headerinfo *hdr_info,
struct visor_vbus_deviceinfo *info)
{
- int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
+ int off;
if (hdr_info->bus_info_offset == 0)
return;
+ off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
visorchannel_write(chan, off, info, sizeof(*info));
}
@@ -798,13 +816,12 @@ static void write_vbus_dev_info(struct visorchannel *chan,
struct visor_vbus_deviceinfo *info,
unsigned int devix)
{
- int off =
- (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
- (hdr_info->device_info_struct_bytes * devix);
+ int off;
if (hdr_info->dev_info_offset == 0)
return;
-
+ off = (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
+ (hdr_info->device_info_struct_bytes * devix);
visorchannel_write(chan, off, info, sizeof(*info));
}
@@ -844,7 +861,6 @@ static void publish_vbus_dev_info(struct visor_device *visordev)
if (!visordev->device.driver)
return;
-
bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bdev)
return;
@@ -860,14 +876,12 @@ static void publish_vbus_dev_info(struct visor_device *visordev)
* type name
*/
for (i = 0; visordrv->channel_types[i].name; i++) {
- if (memcmp(&visordrv->channel_types[i].guid,
- &visordev->channel_type_guid,
- sizeof(visordrv->channel_types[i].guid)) == 0) {
+ if (guid_equal(&visordrv->channel_types[i].guid,
+ &visordev->channel_type_guid)) {
chan_type_name = visordrv->channel_types[i].name;
break;
}
}
-
bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
@@ -892,36 +906,32 @@ static void publish_vbus_dev_info(struct visor_device *visordev)
*/
static int visordriver_probe_device(struct device *xdev)
{
- int res;
- struct visor_driver *drv;
- struct visor_device *dev;
-
- dev = to_visor_device(xdev);
- drv = to_visor_driver(xdev->driver);
+ int err;
+ struct visor_driver *drv = to_visor_driver(xdev->driver);
+ struct visor_device *dev = to_visor_device(xdev);
mutex_lock(&dev->visordriver_callback_lock);
dev->being_removed = false;
-
- res = drv->probe(dev);
- if (res >= 0) {
- /* success: reference kept via unmatched get_device() */
- get_device(&dev->device);
- publish_vbus_dev_info(dev);
+ err = drv->probe(dev);
+ if (err) {
+ mutex_unlock(&dev->visordriver_callback_lock);
+ return err;
}
-
+ /* success: reference kept via unmatched get_device() */
+ get_device(&dev->device);
+ publish_vbus_dev_info(dev);
mutex_unlock(&dev->visordriver_callback_lock);
- return res;
+ return 0;
}
/*
- * visorbus_register_visor_driver() - registers the provided visor driver
- * for handling one or more visor device
+ * visorbus_register_visor_driver() - registers the provided visor driver for
+ * handling one or more visor device
* types (channel_types)
* @drv: the driver to register
*
- * A visor function driver calls this function to register
- * the driver. The caller MUST fill in the following fields within the
- * #drv structure:
+ * A visor function driver calls this function to register the driver. The
+ * caller MUST fill in the following fields within the #drv structure:
* name, version, owner, channel_types, probe, remove
*
* Here's how the whole Linux bus / driver / device model works.
@@ -967,16 +977,12 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
/* can't register on a nonexistent bus */
if (!initialized)
return -ENODEV;
-
if (!drv->probe)
return -EINVAL;
-
if (!drv->remove)
return -EINVAL;
-
if (!drv->pause)
return -EINVAL;
-
if (!drv->resume)
return -EINVAL;
@@ -985,7 +991,6 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
drv->driver.probe = visordriver_probe_device;
drv->driver.remove = visordriver_remove_device;
drv->driver.owner = drv->owner;
-
/*
* driver_register does this:
* bus_add_driver(drv)
@@ -998,7 +1003,6 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
* if (!drv.probe(dev)) [visordriver_probe_device]
* dev.drv = NULL
*/
-
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
@@ -1019,39 +1023,28 @@ int visorbus_create_instance(struct visor_device *dev)
hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
if (!hdr_info)
return -ENOMEM;
-
dev_set_name(&dev->device, "visorbus%d", id);
dev->device.bus = &visorbus_type;
dev->device.groups = visorbus_groups;
dev->device.release = visorbus_release_busdevice;
-
dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
visorbus_debugfs_dir);
- dev->debugfs_client_bus_info =
- debugfs_create_file("client_bus_info", 0440,
- dev->debugfs_dir, dev,
- &client_bus_info_debugfs_fops);
-
+ dev->debugfs_bus_info = debugfs_create_file("client_bus_info", 0440,
+ dev->debugfs_dir, dev,
+ &bus_info_debugfs_fops);
dev_set_drvdata(&dev->device, dev);
err = get_vbus_header_info(dev->visorchannel, &dev->device, hdr_info);
if (err < 0)
goto err_debugfs_dir;
-
err = device_register(&dev->device);
if (err < 0)
goto err_debugfs_dir;
-
list_add_tail(&dev->list_all, &list_all_bus_instances);
-
dev->state.created = 1;
dev->vbus_hdr_info = (void *)hdr_info;
- write_vbus_chp_info(dev->visorchannel, hdr_info,
- &chipset_driverinfo);
- write_vbus_bus_info(dev->visorchannel, hdr_info,
- &clientbus_driverinfo);
-
+ write_vbus_chp_info(dev->visorchannel, hdr_info, &chipset_driverinfo);
+ write_vbus_bus_info(dev->visorchannel, hdr_info, &clientbus_driverinfo);
visorbus_response(dev, err, CONTROLVM_BUS_CREATE);
-
return 0;
err_debugfs_dir:
@@ -1075,11 +1068,11 @@ void visorbus_remove_instance(struct visor_device *dev)
* successfully been able to trace thru the code to see where/how
* release() gets called. But I know it does.
*/
- visorchannel_destroy(dev->visorchannel);
kfree(dev->vbus_hdr_info);
list_del(&dev->list_all);
+ if (dev->pending_msg_hdr)
+ visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
device_unregister(&dev->device);
- visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
}
/*
@@ -1090,9 +1083,9 @@ static void remove_all_visor_devices(void)
struct list_head *listentry, *listtmp;
list_for_each_safe(listentry, listtmp, &list_all_device_instances) {
- struct visor_device *dev = list_entry(listentry,
- struct visor_device,
- list_all);
+ struct visor_device *dev;
+
+ dev = list_entry(listentry, struct visor_device, list_all);
remove_visor_device(dev);
}
}
@@ -1131,7 +1124,6 @@ static void resume_state_change_complete(struct visor_device *dev, int status)
return;
dev->resuming = false;
-
/*
* Notify the chipset driver that the resume is complete,
* which will presumably want to send some sort of response to
@@ -1156,7 +1148,7 @@ static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
bool is_pause)
{
int err;
- struct visor_driver *drv = NULL;
+ struct visor_driver *drv;
/* If no driver associated with the device nothing to pause/resume */
if (!dev->device.driver)
@@ -1177,7 +1169,6 @@ static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
dev->resuming = true;
err = drv->resume(dev, resume_state_change_complete);
}
-
return err;
}
@@ -1198,7 +1189,6 @@ int visorchipset_device_pause(struct visor_device *dev_info)
dev_info->pausing = false;
return err;
}
-
return 0;
}
@@ -1219,7 +1209,6 @@ int visorchipset_device_resume(struct visor_device *dev_info)
dev_info->resuming = false;
return err;
}
-
return 0;
}
@@ -1228,18 +1217,12 @@ int visorbus_init(void)
int err;
visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
- if (!visorbus_debugfs_dir)
- return -ENOMEM;
-
bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
-
err = bus_register(&visorbus_type);
if (err < 0)
return err;
-
initialized = true;
bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
-
return 0;
}
@@ -1248,14 +1231,12 @@ void visorbus_exit(void)
struct list_head *listentry, *listtmp;
remove_all_visor_devices();
-
list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
- struct visor_device *dev = list_entry(listentry,
- struct visor_device,
- list_all);
+ struct visor_device *dev;
+
+ dev = list_entry(listentry, struct visor_device, list_all);
visorbus_remove_instance(dev);
}
-
bus_unregister(&visorbus_type);
initialized = false;
debugfs_remove_recursive(visorbus_debugfs_dir);
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index e878d65ab668..4a8b12d7cfaa 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -23,25 +23,23 @@
#include "vbuschannel.h"
#include "visorbus.h"
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+ struct visor_device *from);
int visorbus_create_instance(struct visor_device *dev);
void visorbus_remove_instance(struct visor_device *bus_info);
int create_visor_device(struct visor_device *dev_info);
void remove_visor_device(struct visor_device *dev_info);
int visorchipset_device_pause(struct visor_device *dev_info);
int visorchipset_device_resume(struct visor_device *dev_info);
-
void visorbus_response(struct visor_device *p, int response, int controlvm_id);
void visorbus_device_changestate_response(struct visor_device *p, int response,
struct visor_segment_state state);
-
int visorbus_init(void);
void visorbus_exit(void);
/* visorchannel access functions */
struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid);
-struct visorchannel *visorchannel_create_with_lock(u64 physaddr, gfp_t gfp,
- const guid_t *guid);
+ const guid_t *guid, bool needs_lock);
void visorchannel_destroy(struct visorchannel *channel);
int visorchannel_read(struct visorchannel *channel, ulong offset,
void *dest, ulong nbytes);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 2a000fee3119..aae16073ba03 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -20,6 +20,7 @@
#include <linux/uuid.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include "visorbus.h"
#include "visorbus_private.h"
@@ -41,8 +42,8 @@ struct visorchannel {
struct channel_header chan_hdr;
guid_t guid;
/*
- * channel creator knows if more than one
- * thread will be inserting or removing
+ * channel creator knows if more than one thread will be inserting or
+ * removing
*/
bool needs_lock;
/* protect head writes in chan_hdr */
@@ -57,6 +58,7 @@ void visorchannel_destroy(struct visorchannel *channel)
{
if (!channel)
return;
+
if (channel->mapped) {
memunmap(channel->mapped);
if (channel->requested)
@@ -122,7 +124,6 @@ int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest,
return -EIO;
memcpy(dest, channel->mapped + offset, nbytes);
-
return 0;
}
@@ -140,9 +141,7 @@ int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest,
memcpy(((char *)(&channel->chan_hdr)) + offset,
dest, copy_size);
}
-
memcpy(channel->mapped + offset, dest, nbytes);
-
return 0;
}
@@ -173,8 +172,8 @@ static int sig_data_offset(struct channel_header *chan_hdr, int q,
}
/*
- * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
- * into host memory
+ * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back into
+ * host memory
*/
#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
visorchannel_write(channel, \
@@ -226,32 +225,25 @@ static int signalremove_inner(struct visorchannel *channel, u32 queue,
error = sig_read_header(channel, queue, &sig_hdr);
if (error)
return error;
-
/* No signals to remove; have caller try again. */
if (sig_hdr.head == sig_hdr.tail)
return -EAGAIN;
-
sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
-
error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
if (error)
return error;
-
sig_hdr.num_received++;
-
/*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified,
- * update host memory. Required for channel sync.
+ * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
+ * host memory. Required for channel sync.
*/
mb();
-
error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
if (error)
return error;
error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
if (error)
return error;
-
return 0;
}
@@ -288,13 +280,12 @@ static bool queue_empty(struct visorchannel *channel, u32 queue)
if (sig_read_header(channel, queue, &sig_hdr))
return true;
-
return (sig_hdr.head == sig_hdr.tail);
}
/**
- * visorchannel_signalempty() - checks if the designated channel/queue
- * contains any messages
+ * visorchannel_signalempty() - checks if the designated channel/queue contains
+ * any messages
* @channel: the channel to query
* @queue: the queue in the channel to query
*
@@ -308,11 +299,9 @@ bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
if (!channel->needs_lock)
return queue_empty(channel, queue);
-
spin_lock_irqsave(&channel->remove_lock, flags);
rc = queue_empty(channel, queue);
spin_unlock_irqrestore(&channel->remove_lock, flags);
-
return rc;
}
EXPORT_SYMBOL_GPL(visorchannel_signalempty);
@@ -326,7 +315,6 @@ static int signalinsert_inner(struct visorchannel *channel, u32 queue,
err = sig_read_header(channel, queue, &sig_hdr);
if (err)
return err;
-
sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
if (sig_hdr.head == sig_hdr.tail) {
sig_hdr.num_overflows++;
@@ -335,33 +323,28 @@ static int signalinsert_inner(struct visorchannel *channel, u32 queue,
return err;
return -EIO;
}
-
err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
if (err)
return err;
-
sig_hdr.num_sent++;
-
/*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified,
- * update host memory. Required for channel sync.
+ * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
+ * host memory. Required for channel sync.
*/
mb();
-
err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
if (err)
return err;
err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
if (err)
return err;
-
return 0;
}
/*
- * visorchannel_create_guts() - creates the struct visorchannel abstraction
- * for a data area in memory, but does NOT modify
- * this data area
+ * visorchannel_create() - creates the struct visorchannel abstraction for a
+ * data area in memory, but does NOT modify this data
+ * area
* @physaddr: physical address of start of channel
* @gfp: gfp_t to use when allocating memory for the data struct
* @guid: GUID that identifies channel type;
@@ -372,9 +355,8 @@ static int signalinsert_inner(struct visorchannel *channel, u32 queue,
* Return: pointer to visorchannel that was created if successful,
* otherwise NULL
*/
-static struct visorchannel *visorchannel_create_guts(u64 physaddr, gfp_t gfp,
- const guid_t *guid,
- bool needs_lock)
+struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
+ const guid_t *guid, bool needs_lock)
{
struct visorchannel *channel;
int err;
@@ -386,37 +368,30 @@ static struct visorchannel *visorchannel_create_guts(u64 physaddr, gfp_t gfp,
channel = kzalloc(sizeof(*channel), gfp);
if (!channel)
return NULL;
-
channel->needs_lock = needs_lock;
spin_lock_init(&channel->insert_lock);
spin_lock_init(&channel->remove_lock);
-
/*
- * Video driver constains the efi framebuffer so it will get a
- * conflict resource when requesting its full mem region. Since
- * we are only using the efi framebuffer for video we can ignore
- * this. Remember that we haven't requested it so we don't try to
- * release later on.
+ * Video driver constains the efi framebuffer so it will get a conflict
+ * resource when requesting its full mem region. Since we are only
+ * using the efi framebuffer for video we can ignore this. Remember that
+ * we haven't requested it so we don't try to release later on.
*/
channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
if (!channel->requested && !guid_equal(guid, &visor_video_guid))
/* we only care about errors if this is not the video channel */
goto err_destroy_channel;
-
channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(physaddr, size);
goto err_destroy_channel;
}
-
channel->physaddr = physaddr;
channel->nbytes = size;
-
err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
if (err)
goto err_destroy_channel;
size = (ulong)channel->chan_hdr.size;
-
memunmap(channel->mapped);
if (channel->requested)
release_mem_region(channel->physaddr, channel->nbytes);
@@ -426,13 +401,11 @@ static struct visorchannel *visorchannel_create_guts(u64 physaddr, gfp_t gfp,
if (!channel->requested && !guid_equal(guid, &visor_video_guid))
/* we only care about errors if this is not the video channel */
goto err_destroy_channel;
-
channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(channel->physaddr, size);
goto err_destroy_channel;
}
-
channel->nbytes = size;
guid_copy(&channel->guid, guid);
return channel;
@@ -442,18 +415,6 @@ err_destroy_channel:
return NULL;
}
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid)
-{
- return visorchannel_create_guts(physaddr, gfp, guid, false);
-}
-
-struct visorchannel *visorchannel_create_with_lock(u64 physaddr, gfp_t gfp,
- const guid_t *guid)
-{
- return visorchannel_create_guts(physaddr, gfp, guid, true);
-}
-
/**
* visorchannel_signalinsert() - inserts a message into the designated
* channel/queue
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 27ecf6fb49fd..fed554a43151 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -27,8 +27,8 @@ static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
-#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
-#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
+#define POLLJIFFIES_CONTROLVM_FAST 1
+#define POLLJIFFIES_CONTROLVM_SLOW 100
#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
@@ -128,7 +128,6 @@ static ssize_t toolaction_show(struct device *dev,
&tool_action, sizeof(u8));
if (err)
return err;
-
return sprintf(buf, "%u\n", tool_action);
}
@@ -141,7 +140,6 @@ static ssize_t toolaction_store(struct device *dev,
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
tool_action),
@@ -178,7 +176,6 @@ static ssize_t boottotool_store(struct device *dev,
if (kstrtoint(buf, 10, &val))
return -EINVAL;
-
efi_visor_indication.boot_to_tool = val;
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
@@ -214,7 +211,6 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &error))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_error),
@@ -237,7 +233,6 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
&text_id, sizeof(u32));
if (err)
return err;
-
return sprintf(buf, "%u\n", text_id);
}
@@ -249,7 +244,6 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_text_id),
@@ -272,7 +266,6 @@ static ssize_t remaining_steps_show(struct device *dev,
&remaining_steps, sizeof(u16));
if (err)
return err;
-
return sprintf(buf, "%hu\n", remaining_steps);
}
@@ -285,7 +278,6 @@ static ssize_t remaining_steps_store(struct device *dev,
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_remaining_steps),
@@ -296,43 +288,6 @@ static ssize_t remaining_steps_store(struct device *dev,
}
static DEVICE_ATTR_RW(remaining_steps);
-struct visor_busdev {
- u32 bus_no;
- u32 dev_no;
-};
-
-static int match_visorbus_dev_by_id(struct device *dev, void *data)
-{
- struct visor_device *vdev = to_visor_device(dev);
- struct visor_busdev *id = data;
-
- if ((vdev->chipset_bus_no == id->bus_no) &&
- (vdev->chipset_dev_no == id->dev_no))
- return 1;
-
- return 0;
-}
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from)
-{
- struct device *dev;
- struct device *dev_start = NULL;
- struct visor_device *vdev = NULL;
- struct visor_busdev id = {
- .bus_no = bus_no,
- .dev_no = dev_no
- };
-
- if (from)
- dev_start = &from->device;
- dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
- match_visorbus_dev_by_id);
- if (dev)
- vdev = to_visor_device(dev);
- return vdev;
-}
-
static void controlvm_init_response(struct controlvm_message *msg,
struct controlvm_message_header *msg_hdr,
int response)
@@ -374,18 +329,13 @@ static int chipset_init(struct controlvm_message *inmsg)
goto out_respond;
}
chipset_inited = 1;
-
/*
* Set features to indicate we support parahotplug (if Command also
- * supports it).
+ * supports it). Set the "reply" bit so Command knows this is a
+ * features-aware driver.
*/
features = inmsg->cmd.init_chipset.features &
VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
-
- /*
- * Set the "reply" bit so Command knows this is a features-aware
- * driver.
- */
features |= VISOR_CHIPSET_FEATURE_REPLY;
out_respond:
@@ -396,20 +346,17 @@ out_respond:
}
static int controlvm_respond(struct controlvm_message_header *msg_hdr,
- int response,
- struct visor_segment_state *state)
+ int response, struct visor_segment_state *state)
{
struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msg_hdr, response);
if (outmsg.hdr.flags.test_message == 1)
return -EINVAL;
-
if (state) {
outmsg.cmd.device_change_state.state = *state;
outmsg.cmd.device_change_state.flags.phys_device = 1;
}
-
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -435,13 +382,11 @@ static int save_crash_message(struct controlvm_message *msg,
"failed to read message count\n");
return err;
}
-
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
dev_err(&chipset_dev->acpi_device->dev,
"invalid number of messages\n");
return -EIO;
}
-
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
saved_crash_message_offset),
@@ -451,13 +396,11 @@ static int save_crash_message(struct controlvm_message *msg,
"failed to read offset\n");
return err;
}
-
switch (cr_type) {
case CRASH_DEV:
local_crash_msg_offset += sizeof(struct controlvm_message);
err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset,
- msg,
+ local_crash_msg_offset, msg,
sizeof(struct controlvm_message));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -467,8 +410,7 @@ static int save_crash_message(struct controlvm_message *msg,
break;
case CRASH_BUS:
err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset,
- msg,
+ local_crash_msg_offset, msg,
sizeof(struct controlvm_message));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -488,33 +430,25 @@ static int controlvm_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
- if (!pending_msg_hdr)
- return -EIO;
-
if (pending_msg_hdr->id != (u32)cmd_id)
return -EINVAL;
return controlvm_respond(pending_msg_hdr, response, NULL);
}
-static int device_changestate_responder(
- enum controlvm_id cmd_id,
- struct visor_device *p, int response,
- struct visor_segment_state response_state)
+static int device_changestate_responder(enum controlvm_id cmd_id,
+ struct visor_device *p, int response,
+ struct visor_segment_state state)
{
struct controlvm_message outmsg;
- if (!p->pending_msg_hdr)
- return -EIO;
if (p->pending_msg_hdr->id != cmd_id)
return -EINVAL;
controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
-
outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
- outmsg.cmd.device_change_state.state = response_state;
-
+ outmsg.cmd.device_change_state.state = state;
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -522,64 +456,55 @@ static int device_changestate_responder(
static int visorbus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->create_bus.bus_no;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (bus_info && (bus_info->state.created == 1)) {
+ if (bus_info && bus_info->state.created == 1) {
dev_err(&chipset_dev->acpi_device->dev,
"failed %s: already exists\n", __func__);
err = -EEXIST;
goto err_respond;
}
-
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
err = -ENOMEM;
goto err_respond;
}
-
INIT_LIST_HEAD(&bus_info->list_all);
bus_info->chipset_bus_no = bus_no;
bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
-
if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
err = save_crash_message(inmsg, CRASH_BUS);
if (err)
goto err_free_bus_info;
}
-
if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
- GFP_KERNEL);
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_free_bus_info;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
bus_info->pending_msg_hdr = pmsg_hdr;
}
-
visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
GFP_KERNEL,
- &cmd->create_bus.bus_data_type_guid);
+ &cmd->create_bus.bus_data_type_guid,
+ false);
if (!visorchannel) {
err = -ENOMEM;
goto err_free_pending_msg;
}
-
bus_info->visorchannel = visorchannel;
-
/* Response will be handled by visorbus_create_instance on success */
err = visorbus_create_instance(bus_info);
if (err)
goto err_destroy_channel;
-
return 0;
err_destroy_channel:
@@ -599,9 +524,8 @@ err_respond:
static int visorbus_destroy(struct controlvm_message *inmsg)
{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
- u32 bus_no = cmd->destroy_bus.bus_no;
+ struct controlvm_message_header *pmsg_hdr;
+ u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
struct visor_device *bus_info;
int err;
@@ -625,12 +549,10 @@ static int visorbus_destroy(struct controlvm_message *inmsg)
err = -ENOMEM;
goto err_respond;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
bus_info->pending_msg_hdr = pmsg_hdr;
}
-
/* Response will be handled by visorbus_remove_instance */
visorbus_remove_instance(bus_info);
return 0;
@@ -646,51 +568,33 @@ static const guid_t *parser_id_get(struct parser_context *ctx)
return &ctx->data.id;
}
-static void *parser_string_get(struct parser_context *ctx)
+static void *parser_string_get(u8 *pscan, int nscan)
{
- u8 *pscan;
- unsigned long nscan;
int value_length;
void *value;
- int i;
- pscan = ctx->curr;
- if (!pscan)
- return NULL;
- nscan = ctx->bytes_remaining;
if (nscan == 0)
return NULL;
- for (i = 0, value_length = -1; i < nscan; i++)
- if (pscan[i] == '\0') {
- value_length = i;
- break;
- }
- /* '\0' was not included in the length */
- if (value_length < 0)
- value_length = nscan;
-
- value = kmalloc(value_length + 1, GFP_KERNEL);
+ value_length = strnlen(pscan, nscan);
+ value = kzalloc(value_length + 1, GFP_KERNEL);
if (!value)
return NULL;
if (value_length > 0)
memcpy(value, pscan, value_length);
- ((u8 *)(value))[value_length] = '\0';
return value;
}
static void *parser_name_get(struct parser_context *ctx)
{
- struct visor_controlvm_parameters_header *phdr = NULL;
+ struct visor_controlvm_parameters_header *phdr;
phdr = &ctx->data;
-
if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
return NULL;
-
ctx->curr = (char *)&phdr + phdr->name_offset;
ctx->bytes_remaining = phdr->name_length;
- return parser_string_get(ctx);
+ return parser_string_get(ctx->curr, phdr->name_length);
}
static int visorbus_configure(struct controlvm_message *inmsg,
@@ -715,20 +619,16 @@ static int visorbus_configure(struct controlvm_message *inmsg,
err = -EIO;
goto err_respond;
}
-
- err = visorchannel_set_clientpartition
- (bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
+ err = visorchannel_set_clientpartition(bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
if (err)
goto err_respond;
-
if (parser_ctx) {
const guid_t *partition_guid = parser_id_get(parser_ctx);
guid_copy(&bus_info->partition_guid, partition_guid);
bus_info->name = parser_name_get(parser_ctx);
}
-
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return 0;
@@ -744,10 +644,10 @@ err_respond:
static int visorbus_device_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->create_device.bus_no;
u32 dev_no = cmd->create_device.dev_no;
- struct visor_device *dev_info = NULL;
+ struct visor_device *dev_info;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
int err;
@@ -765,9 +665,8 @@ static int visorbus_device_create(struct controlvm_message *inmsg)
err = -EINVAL;
goto err_respond;
}
-
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (dev_info && (dev_info->state.created == 1)) {
+ if (dev_info && dev_info->state.created == 1) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to get bus by id: %d/%d\n", bus_no, dev_no);
err = -EEXIST;
@@ -779,16 +678,14 @@ static int visorbus_device_create(struct controlvm_message *inmsg)
err = -ENOMEM;
goto err_respond;
}
-
dev_info->chipset_bus_no = bus_no;
dev_info->chipset_dev_no = dev_no;
guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
dev_info->device.parent = &bus_info->device;
-
- visorchannel =
- visorchannel_create_with_lock(cmd->create_device.channel_addr,
- GFP_KERNEL,
- &cmd->create_device.data_type_guid);
+ visorchannel = visorchannel_create(cmd->create_device.channel_addr,
+ GFP_KERNEL,
+ &cmd->create_device.data_type_guid,
+ true);
if (!visorchannel) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to create visorchannel: %d/%d\n",
@@ -797,20 +694,20 @@ static int visorbus_device_create(struct controlvm_message *inmsg)
goto err_free_dev_info;
}
dev_info->visorchannel = visorchannel;
- guid_copy(&dev_info->channel_type_guid, &cmd->create_device.data_type_guid);
- if (guid_equal(&cmd->create_device.data_type_guid, &visor_vhba_channel_guid)) {
+ guid_copy(&dev_info->channel_type_guid,
+ &cmd->create_device.data_type_guid);
+ if (guid_equal(&cmd->create_device.data_type_guid,
+ &visor_vhba_channel_guid)) {
err = save_crash_message(inmsg, CRASH_DEV);
if (err)
goto err_destroy_visorchannel;
}
-
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_destroy_visorchannel;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
@@ -837,7 +734,7 @@ err_respond:
static int visorbus_device_changestate(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->device_change_state.bus_no;
u32 dev_no = cmd->device_change_state.dev_no;
struct visor_segment_state state = cmd->device_change_state.state;
@@ -858,18 +755,17 @@ static int visorbus_device_changestate(struct controlvm_message *inmsg)
err = -EIO;
goto err_respond;
}
+
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_respond;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
}
-
if (state.alive == segment_state_running.alive &&
state.operating == segment_state_running.operating)
/* Response will be sent from visorchipset_device_resume */
@@ -884,7 +780,6 @@ static int visorbus_device_changestate(struct controlvm_message *inmsg)
err = visorchipset_device_pause(dev_info);
if (err)
goto err_respond;
-
return 0;
err_respond:
@@ -897,7 +792,7 @@ err_respond:
static int visorbus_device_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->destroy_device.bus_no;
u32 dev_no = cmd->destroy_device.dev_no;
struct visor_device *dev_info;
@@ -928,7 +823,6 @@ static int visorbus_device_destroy(struct controlvm_message *inmsg)
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
}
-
kfree(dev_info->name);
remove_visor_device(dev_info);
return 0;
@@ -995,11 +889,9 @@ static struct parahotplug_request *parahotplug_request_create(
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
-
req->id = parahotplug_next_id();
req->expiration = parahotplug_next_expiration();
req->msg = *msg;
-
return req;
}
@@ -1031,13 +923,12 @@ static int parahotplug_request_complete(int id, u16 active)
{
struct list_head *pos;
struct list_head *tmp;
+ struct parahotplug_request *req;
spin_lock(&parahotplug_request_list_lock);
-
/* Look for a request matching "id". */
list_for_each_safe(pos, tmp, &parahotplug_request_list) {
- struct parahotplug_request *req =
- list_entry(pos, struct parahotplug_request, list);
+ req = list_entry(pos, struct parahotplug_request, list);
if (req->id == id) {
/*
* Found a match. Remove it from the list and
@@ -1054,7 +945,6 @@ static int parahotplug_request_complete(int id, u16 active)
return 0;
}
}
-
spin_unlock(&parahotplug_request_list_lock);
return -EINVAL;
}
@@ -1081,7 +971,6 @@ static ssize_t devicedisabled_store(struct device *dev,
if (kstrtouint(buf, 10, &id))
return -EINVAL;
-
err = parahotplug_request_complete(id, 0);
if (err < 0)
return err;
@@ -1110,7 +999,6 @@ static ssize_t deviceenabled_store(struct device *dev,
if (kstrtouint(buf, 10, &id))
return -EINVAL;
-
parahotplug_request_complete(id, 1);
return count;
}
@@ -1158,9 +1046,9 @@ static int parahotplug_request_kickoff(struct parahotplug_request *req)
{
struct controlvm_message_packet *cmd = &req->msg.cmd;
char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
- env_func[40];
- char *envp[] = {
- env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
+ env_func[40];
+ char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
+ env_func, NULL
};
sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
@@ -1173,7 +1061,6 @@ static int parahotplug_request_kickoff(struct parahotplug_request *req)
cmd->device_change_state.dev_no >> 3);
sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
cmd->device_change_state.dev_no & 0x7);
-
return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
KOBJ_CHANGE, envp);
}
@@ -1191,7 +1078,6 @@ static int parahotplug_process_message(struct controlvm_message *inmsg)
req = parahotplug_request_create(inmsg);
if (!req)
return -ENOMEM;
-
/*
* For enable messages, just respond with success right away, we don't
* need to wait to see if the enable was successful.
@@ -1205,7 +1091,6 @@ static int parahotplug_process_message(struct controlvm_message *inmsg)
parahotplug_request_destroy(req);
return 0;
}
-
/*
* For disable messages, add the request to the request list before
* kicking off the udev script. It won't get responded to until the
@@ -1214,7 +1099,6 @@ static int parahotplug_process_message(struct controlvm_message *inmsg)
spin_lock(&parahotplug_request_list_lock);
list_add_tail(&req->list, &parahotplug_request_list);
spin_unlock(&parahotplug_request_list_lock);
-
err = parahotplug_request_kickoff(req);
if (err)
goto err_respond;
@@ -1237,12 +1121,9 @@ static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
{
int res;
- res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_ONLINE);
-
+ res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
-
return res;
}
@@ -1262,10 +1143,8 @@ static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
KOBJ_CHANGE, envp);
-
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
-
return res;
}
@@ -1279,11 +1158,10 @@ static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
{
int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_OFFLINE);
+ KOBJ_OFFLINE);
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
-
return res;
}
@@ -1296,17 +1174,15 @@ static int unisys_vmcall(unsigned long tuple, unsigned long param)
reg_ebx = param & 0xFFFFFFFF;
reg_ecx = param >> 32;
-
cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
if (!(cpuid_ecx & 0x80000000))
return -EPERM;
-
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
if (result)
goto error;
-
return 0;
+
/* Need to convert from VMCALL error codes to Linux */
error:
switch (result) {
@@ -1330,8 +1206,8 @@ static int controlvm_channel_create(struct visorchipset_device *dev)
if (err)
return err;
addr = dev->controlvm_params.address;
- chan = visorchannel_create_with_lock(addr, GFP_KERNEL,
- &visor_controlvm_channel_guid);
+ chan = visorchannel_create(addr, GFP_KERNEL,
+ &visor_controlvm_channel_guid, true);
if (!chan)
return -ENOMEM;
dev->controlvm_channel = chan;
@@ -1350,9 +1226,7 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
msg.hdr.id = CONTROLVM_CHIPSET_INIT;
msg.cmd.init_chipset.bus_count = 23;
msg.cmd.init_chipset.switch_count = 0;
-
chipset_init(&msg);
-
/* get saved message count */
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
@@ -1362,13 +1236,10 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- dev_err(&chipset_dev->acpi_device->dev,
- "invalid count\n");
+ dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
return;
}
-
/* get saved crash message offset */
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
@@ -1378,7 +1249,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
/* read create device message for storage bus offset */
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset,
@@ -1388,7 +1258,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
/* read create device message for storage device */
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset +
@@ -1399,7 +1268,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
/* reuse IOVM create bus message */
if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -1407,7 +1275,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
return;
}
visorbus_create(&local_crash_bus_msg);
-
/* reuse create device message for storage device */
if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -1420,8 +1287,10 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
void visorbus_response(struct visor_device *bus_info, int response,
int controlvm_id)
{
- controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
+ if (!bus_info->pending_msg_hdr)
+ return;
+ controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
kfree(bus_info->pending_msg_hdr);
bus_info->pending_msg_hdr = NULL;
}
@@ -1430,9 +1299,11 @@ void visorbus_device_changestate_response(struct visor_device *dev_info,
int response,
struct visor_segment_state state)
{
- device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
- dev_info, response, state);
+ if (!dev_info->pending_msg_hdr)
+ return;
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
+ response, state);
kfree(dev_info->pending_msg_hdr);
dev_info->pending_msg_hdr = NULL;
}
@@ -1451,12 +1322,11 @@ static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
void *mapping;
*retry = false;
-
/* alloc an extra byte to ensure payload is \0 terminated */
allocbytes = bytes + 1 + (sizeof(struct parser_context) -
sizeof(struct visor_controlvm_parameters_header));
- if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
- > MAX_CONTROLVM_PAYLOAD_BYTES) {
+ if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
+ MAX_CONTROLVM_PAYLOAD_BYTES) {
*retry = true;
return NULL;
}
@@ -1465,7 +1335,6 @@ static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
*retry = true;
return NULL;
}
-
ctx->allocbytes = allocbytes;
ctx->param_bytes = bytes;
mapping = memremap(addr, bytes, MEMREMAP_WB);
@@ -1475,7 +1344,6 @@ static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
memunmap(mapping);
ctx->byte_stream = true;
chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
-
return ctx;
err_finish_ctx:
@@ -1508,14 +1376,13 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
/* create parsing context if necessary */
parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
parm_bytes = inmsg.hdr.payload_bytes;
-
/*
* Parameter and channel addresses within test messages actually lie
* within our OS-controlled memory. We need to know that, because it
* makes a difference in how we compute the virtual address.
*/
if (parm_bytes) {
- bool retry = false;
+ bool retry;
parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
if (!parser_ctx && retry)
@@ -1526,7 +1393,6 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
CONTROLVM_QUEUE_ACK, &ackmsg);
if (err)
return err;
-
switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
err = chipset_init(&inmsg);
@@ -1580,7 +1446,6 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
-CONTROLVM_RESP_ID_UNKNOWN, NULL);
break;
}
-
if (parser_ctx) {
parser_done(parser_ctx);
parser_ctx = NULL;
@@ -1599,14 +1464,13 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
static int read_controlvm_event(struct controlvm_message *msg)
{
int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_EVENT, msg);
+ CONTROLVM_QUEUE_EVENT, msg);
+
if (err)
return err;
-
/* got a message */
if (msg->hdr.flags.test_message == 1)
return -EINVAL;
-
return 0;
}
@@ -1620,14 +1484,12 @@ static void parahotplug_process_list(void)
struct list_head *tmp;
spin_lock(&parahotplug_request_list_lock);
-
list_for_each_safe(pos, tmp, &parahotplug_request_list) {
struct parahotplug_request *req =
list_entry(pos, struct parahotplug_request, list);
if (!time_after_eq(jiffies, req->expiration))
continue;
-
list_del(pos);
if (req->msg.hdr.flags.response_expected)
controlvm_respond(
@@ -1636,7 +1498,6 @@ static void parahotplug_process_list(void)
&req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
}
-
spin_unlock(&parahotplug_request_list_lock);
}
@@ -1652,10 +1513,8 @@ static void controlvm_periodic_work(struct work_struct *work)
CONTROLVM_QUEUE_RESPONSE,
&inmsg);
} while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
-
if (err != -EAGAIN)
goto schedule_out;
-
if (chipset_dev->controlvm_pending_msg_valid) {
/*
* we throttled processing of a prior msg, so try to process
@@ -1667,7 +1526,6 @@ static void controlvm_periodic_work(struct work_struct *work)
} else {
err = read_controlvm_event(&inmsg);
}
-
while (!err) {
chipset_dev->most_recent_message_jiffies = jiffies;
err = handle_command(inmsg,
@@ -1681,7 +1539,6 @@ static void controlvm_periodic_work(struct work_struct *work)
err = read_controlvm_event(&inmsg);
}
-
/* parahotplug_worker */
parahotplug_process_list();
@@ -1697,17 +1554,12 @@ schedule_out:
* it's been longer than MIN_IDLE_SECONDS since we processed
* our last controlvm message; slow down the polling
*/
- if (chipset_dev->poll_jiffies !=
- POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
- chipset_dev->poll_jiffies =
- POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
} else {
- if (chipset_dev->poll_jiffies !=
- POLLJIFFIES_CONTROLVMCHANNEL_FAST)
- chipset_dev->poll_jiffies =
- POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
}
-
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
}
@@ -1720,20 +1572,16 @@ static int visorchipset_init(struct acpi_device *acpi_device)
chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
if (!chipset_dev)
goto error;
-
err = controlvm_channel_create(chipset_dev);
if (err)
goto error_free_chipset_dev;
-
acpi_device->driver_data = chipset_dev;
chipset_dev->acpi_device = acpi_device;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
visorchipset_dev_groups);
if (err < 0)
goto error_destroy_channel;
-
controlvm_channel = chipset_dev->controlvm_channel;
if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
&chipset_dev->acpi_device->dev,
@@ -1743,7 +1591,6 @@ static int visorchipset_init(struct acpi_device *acpi_device)
VISOR_CONTROLVM_CHANNEL_VERSIONID,
VISOR_CHANNEL_SIGNATURE))
goto error_delete_groups;
-
/* if booting in a crash kernel */
if (is_kdump_kernel())
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
@@ -1751,16 +1598,13 @@ static int visorchipset_init(struct acpi_device *acpi_device)
else
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
controlvm_periodic_work);
-
chipset_dev->most_recent_message_jiffies = jiffies;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
-
err = visorbus_init();
if (err < 0)
goto error_cancel_work;
-
return 0;
error_cancel_work:
@@ -1787,10 +1631,8 @@ static int visorchipset_exit(struct acpi_device *acpi_device)
cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
visorchipset_dev_groups);
-
visorchannel_destroy(chipset_dev->controlvm_channel);
kfree(chipset_dev);
-
return 0;
}
@@ -1832,11 +1674,9 @@ static int __init init_unisys(void)
if (!visorutil_spar_detect())
return -ENODEV;
-
result = acpi_bus_register_driver(&unisys_acpi_driver);
if (result)
return -ENODEV;
-
pr_info("Unisys Visorchipset Driver Loaded.\n");
return 0;
};
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 419dba89af06..0bcd3acb7b0c 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/idr.h>
+#include <linux/module.h>
#include <linux/seq_file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -39,7 +40,8 @@ static struct visor_channeltype_descriptor visorhba_channel_types[] = {
/* Note that the only channel type we expect to be reported by the
* bus driver is the VISOR_VHBA channel.
*/
- { VISOR_VHBA_CHANNEL_GUID, "sparvhba" },
+ { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
+ VISOR_VHBA_CHANNEL_VERSIONID },
{}
};
@@ -818,9 +820,9 @@ static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
/* Do not log errors for disk-not-present inquiries */
- if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
+ if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
(host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
- (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
+ cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
return;
/* Okay see what our error_count is here.... */
vdisk = scsidev->hostdata;
@@ -868,8 +870,8 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
struct visordisk_info *vdisk;
scsidev = scsicmd->device;
- if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
- (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
+ if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
+ cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
if (cmdrsp->scsi.no_disk_result == 0)
return;
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 9d8cbc52de8b..450f003743c0 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -23,6 +23,7 @@
#include <linux/fb.h>
#include <linux/input.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/uuid.h>
#include "visorbus.h"
@@ -711,8 +712,9 @@ out:
/* GUIDS for all channel types supported by this driver. */
static struct visor_channeltype_descriptor visorinput_channel_types[] = {
- { VISOR_KEYBOARD_CHANNEL_GUID, "keyboard"},
- { VISOR_MOUSE_CHANNEL_GUID, "mouse"},
+ { VISOR_KEYBOARD_CHANNEL_GUID, "keyboard",
+ sizeof(struct channel_header), 0 },
+ { VISOR_MOUSE_CHANNEL_GUID, "mouse", sizeof(struct channel_header), 0 },
{}
};
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index dc390eae2960..735d7e5fa86b 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/kthread.h>
#include <linux/skbuff.h>
@@ -48,7 +49,8 @@ static struct visor_channeltype_descriptor visornic_channel_types[] = {
/* Note that the only channel type we expect to be reported by the
* bus driver is the VISOR_VNIC channel.
*/
- { VISOR_VNIC_CHANNEL_GUID, "ultravnic" },
+ { VISOR_VNIC_CHANNEL_GUID, "ultravnic", sizeof(struct channel_header),
+ VISOR_VNIC_CHANNEL_VERSIONID },
{}
};
MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
@@ -899,7 +901,7 @@ static int visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if ((len < ETH_MIN_PACKET_SIZE) &&
+ if (len < ETH_MIN_PACKET_SIZE &&
((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
/* pad the packet out to minimum size */
padlen = ETH_MIN_PACKET_SIZE - len;
@@ -1450,7 +1452,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
rcu_read_lock();
for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
/* Only consider netdevs that are visornic, and are open */
- if ((dev->netdev_ops != &visornic_dev_ops) ||
+ if (dev->netdev_ops != &visornic_dev_ops ||
(!netif_queue_stopped(dev)))
continue;
@@ -1680,7 +1682,7 @@ static void service_resp_queue(struct uiscmdrsp *cmdrsp,
/* only call queue wake if we stopped it */
netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
/* ASSERT netdev == vnicinfo->netdev; */
- if ((netdev == devdata->netdev) &&
+ if (netdev == devdata->netdev &&
netif_queue_stopped(netdev)) {
/* check if we have crossed the lower watermark
* for netif_wake_queue()
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index 4b9302703b36..eeac4f0cb2c6 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -137,8 +137,8 @@ struct vbox_connector {
char name[32];
struct vbox_crtc *vbox_crtc;
struct {
- u16 width;
- u16 height;
+ u32 width;
+ u32 height;
bool disconnected;
} mode_hint;
};
@@ -150,8 +150,8 @@ struct vbox_crtc {
unsigned int crtc_id;
u32 fb_offset;
bool cursor_enabled;
- u16 x_hint;
- u16 y_hint;
+ u32 x_hint;
+ u32 y_hint;
};
struct vbox_encoder {
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
index 3ca8bec62ac4..74abdf02d9fd 100644
--- a/drivers/staging/vboxvideo/vbox_irq.c
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -150,8 +150,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox)
disconnected = !(hints->enabled);
crtc_id = vbox_conn->vbox_crtc->crtc_id;
- vbox_conn->mode_hint.width = hints->cx & 0x8fff;
- vbox_conn->mode_hint.height = hints->cy & 0x8fff;
+ vbox_conn->mode_hint.width = hints->cx;
+ vbox_conn->mode_hint.height = hints->cy;
vbox_conn->vbox_crtc->x_hint = hints->dx;
vbox_conn->vbox_crtc->y_hint = hints->dy;
vbox_conn->mode_hint.disconnected = disconnected;
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index 257a77830410..b265fe924556 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -377,7 +377,7 @@ static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
/* pick the encoder ids */
if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@@ -553,12 +553,22 @@ static int vbox_get_modes(struct drm_connector *connector)
++num_modes;
}
vbox_set_edid(connector, preferred_width, preferred_height);
- drm_object_property_set_value(
- &connector->base, vbox->dev->mode_config.suggested_x_property,
- vbox_connector->vbox_crtc->x_hint);
- drm_object_property_set_value(
- &connector->base, vbox->dev->mode_config.suggested_y_property,
- vbox_connector->vbox_crtc->y_hint);
+
+ if (vbox_connector->vbox_crtc->x_hint != -1)
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_x_property,
+ vbox_connector->vbox_crtc->x_hint);
+ else
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_x_property, 0);
+
+ if (vbox_connector->vbox_crtc->y_hint != -1)
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_y_property,
+ vbox_connector->vbox_crtc->y_hint);
+ else
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_y_property, 0);
return num_modes;
}
@@ -640,9 +650,9 @@ static int vbox_connector_init(struct drm_device *dev,
drm_mode_create_suggested_offset_properties(dev);
drm_object_attach_property(&connector->base,
- dev->mode_config.suggested_x_property, -1);
+ dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
- dev->mode_config.suggested_y_property, -1);
+ dev->mode_config.suggested_y_property, 0);
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 9e2763663ab8..f5aaf7d629f0 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -19,18 +19,6 @@ config BCM2835_VCHIQ
Defaults to Y when the Broadcom Videocore services
are included in the build, N otherwise.
-if BCM2835_VCHIQ
-
-config BCM2835_VCHIQ_SUPPORT_MEMDUMP
- bool "Support dumping memory contents to debug log"
- help
- BCM2835 VCHIQ supports the ability to dump the
- contents of memory to the debug log. This
- is typically only needed by diagnostic tools used
- to debug issues with VideoCore.
-
-endif
-
source "drivers/staging/vc04_services/bcm2835-audio/Kconfig"
source "drivers/staging/vc04_services/bcm2835-camera/Kconfig"
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 94654c0c7bba..7e68b3e28246 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -65,7 +65,6 @@ void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream)
unsigned int consumed = 0;
int new_period = 0;
-
audio_info("alsa_stream=%p substream=%p\n", alsa_stream,
alsa_stream ? alsa_stream->substream : 0);
@@ -111,7 +110,6 @@ static int snd_bcm2835_playback_open_generic(
int idx;
int err;
-
if (mutex_lock_interruptible(&chip->audio_mutex)) {
audio_error("Interrupted whilst waiting for lock\n");
return -EINTR;
@@ -184,7 +182,6 @@ static int snd_bcm2835_playback_open_generic(
out:
mutex_unlock(&chip->audio_mutex);
-
return err;
}
@@ -207,7 +204,6 @@ static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime;
struct bcm2835_alsa_stream *alsa_stream;
-
chip = snd_pcm_substream_chip(substream);
if (mutex_lock_interruptible(&chip->audio_mutex)) {
audio_error("Interrupted whilst waiting for lock\n");
@@ -259,7 +255,6 @@ static int snd_bcm2835_pcm_hw_params(struct snd_pcm_substream *substream,
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
int err;
-
err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
if (err < 0) {
audio_error
@@ -289,7 +284,6 @@ static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
int channels;
int err;
-
if (mutex_lock_interruptible(&chip->audio_mutex))
return -EINTR;
@@ -307,13 +301,11 @@ static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
if (err < 0)
audio_error(" error setting hw params\n");
-
bcm2835_audio_setup(alsa_stream);
/* in preparation of the stream, set the controls (volume level) of the stream */
bcm2835_audio_set_ctls(alsa_stream->chip);
-
memset(&alsa_stream->pcm_indirect, 0, sizeof(alsa_stream->pcm_indirect));
alsa_stream->pcm_indirect.hw_buffer_size =
@@ -364,7 +356,6 @@ static int snd_bcm2835_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
int err = 0;
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
audio_debug("bcm2835_AUDIO_TRIGGER_START running=%d\n",
@@ -416,7 +407,6 @@ snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
-
audio_debug("pcm_pointer... (%d) hwptr=%d appl=%d pos=%d\n", 0,
frames_to_bytes(runtime, runtime->status->hw_ptr),
frames_to_bytes(runtime, runtime->control->appl_ptr),
@@ -493,7 +483,6 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, u32 numchannels)
snd_bcm2835_playback_hw.buffer_bytes_max,
snd_bcm2835_playback_hw.buffer_bytes_max);
-
out:
mutex_unlock(&chip->audio_mutex);
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 4be864dbd41c..3c6f1d91d22d 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -337,7 +337,6 @@ static int vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
{
unsigned int i;
-
if (!instance) {
LOG_ERR("%s: invalid handle %p\n", __func__, instance);
@@ -369,7 +368,6 @@ static int vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
kfree(instance);
-
return 0;
}
@@ -382,7 +380,6 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
(struct bcm2835_audio_instance *)alsa_stream->instance;
int ret;
-
LOG_INFO("%s: start\n", __func__);
BUG_ON(instance);
if (instance) {
@@ -438,7 +435,6 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
my_workqueue_init(alsa_stream);
ret = bcm2835_audio_open_connection(alsa_stream);
@@ -486,7 +482,6 @@ static int bcm2835_audio_set_ctls_chan(struct bcm2835_alsa_stream *alsa_stream,
int status;
int ret;
-
LOG_INFO(" Setting ALSA dest(%d), volume(%d)\n",
chip->dest, chip->volume);
@@ -570,7 +565,6 @@ int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
int status;
int ret;
-
LOG_INFO(" Setting ALSA channels(%d), samplerate(%d), bits-per-sample(%d)\n",
channels, samplerate, bps);
@@ -631,7 +625,6 @@ unlock:
int bcm2835_audio_setup(struct bcm2835_alsa_stream *alsa_stream)
{
-
return 0;
}
@@ -642,7 +635,6 @@ static int bcm2835_audio_start_worker(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
instance->num_connections);
@@ -679,7 +671,6 @@ static int bcm2835_audio_stop_worker(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
instance->num_connections);
@@ -717,7 +708,6 @@ int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
my_workqueue_quit(alsa_stream);
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
@@ -775,7 +765,6 @@ static int bcm2835_audio_write_worker(struct bcm2835_alsa_stream *alsa_stream,
int status;
int ret;
-
LOG_INFO(" Writing %d bytes from %p\n", count, src);
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index 379604d3554e..f1e43e45fd67 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -123,8 +123,6 @@ struct bcm2835_alsa_stream {
struct snd_pcm_indirect pcm_indirect;
spinlock_t lock;
- volatile unsigned int control;
- volatile unsigned int status;
int open;
int running;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
index 52cdf4da1b47..5a1b2a7d8eb0 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
@@ -70,7 +70,7 @@ enum mmal_msg_type {
/* port action request messages differ depending on the action type */
enum mmal_msg_port_action_type {
- MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unkown action */
+ MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unknown action */
MMAL_MSG_PORT_ACTION_TYPE_ENABLE, /* Enable a port */
MMAL_MSG_PORT_ACTION_TYPE_DISABLE, /* Disable a port */
MMAL_MSG_PORT_ACTION_TYPE_FLUSH, /* Flush a port */
@@ -217,36 +217,36 @@ struct mmal_msg_port_action_reply {
#define MMAL_VC_SHORT_DATA 128
/** Signals that the current payload is the end of the stream of data */
-#define MMAL_BUFFER_HEADER_FLAG_EOS (1<<0)
+#define MMAL_BUFFER_HEADER_FLAG_EOS BIT(0)
/** Signals that the start of the current payload starts a frame */
-#define MMAL_BUFFER_HEADER_FLAG_FRAME_START (1<<1)
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_START BIT(1)
/** Signals that the end of the current payload ends a frame */
-#define MMAL_BUFFER_HEADER_FLAG_FRAME_END (1<<2)
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_END BIT(2)
/** Signals that the current payload contains only complete frames (>1) */
#define MMAL_BUFFER_HEADER_FLAG_FRAME \
(MMAL_BUFFER_HEADER_FLAG_FRAME_START|MMAL_BUFFER_HEADER_FLAG_FRAME_END)
/** Signals that the current payload is a keyframe (i.e. self decodable) */
-#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME (1<<3)
+#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME BIT(3)
/** Signals a discontinuity in the stream of data (e.g. after a seek).
* Can be used for instance by a decoder to reset its state
*/
-#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY (1<<4)
+#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY BIT(4)
/** Signals a buffer containing some kind of config data for the component
* (e.g. codec config data)
*/
-#define MMAL_BUFFER_HEADER_FLAG_CONFIG (1<<5)
+#define MMAL_BUFFER_HEADER_FLAG_CONFIG BIT(5)
/** Signals an encrypted payload */
-#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED (1<<6)
+#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED BIT(6)
/** Signals a buffer containing side information */
-#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO (1<<7)
+#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO BIT(7)
/** Signals a buffer which is the snapshot/postview image from a stills
* capture
*/
-#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT (1<<8)
+#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT BIT(8)
/** Signals a buffer which contains data known to be corrupted */
-#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED (1<<9)
+#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED BIT(9)
/** Signals that a buffer failed to be transmitted */
-#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED (1<<10)
+#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED BIT(10)
struct mmal_driver_buffer {
u32 magic;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 4360db6d4392..6ea7fb0ea50e 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -1963,7 +1963,7 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
status = vchi_service_close(instance->handle);
if (status != 0)
- pr_err("mmal-vchiq: VCHIQ close failed");
+ pr_err("mmal-vchiq: VCHIQ close failed\n");
mutex_unlock(&instance->vchiq_mutex);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
index 63db053532bf..db39900c9d91 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
@@ -130,7 +130,7 @@ int vchiq_mmal_component_disable(
/* enable a mmal port
*
* enables a port and if a buffer callback provided enque buffer
- * headers as apropriate for the port.
+ * headers as appropriate for the port.
*/
int vchiq_mmal_port_enable(
struct vchiq_mmal_instance *instance,
diff --git a/drivers/staging/vc04_services/interface/vchi/connections/connection.h b/drivers/staging/vc04_services/interface/vchi/connections/connection.h
index e793cdf2847c..67c84386c65a 100644
--- a/drivers/staging/vc04_services/interface/vchi/connections/connection.h
+++ b/drivers/staging/vc04_services/interface/vchi/connections/connection.h
@@ -54,7 +54,6 @@ typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
typedef struct vchi_connection_t VCHI_CONNECTION_T;
-
/******************************************************************************
API
*****************************************************************************/
@@ -212,7 +211,6 @@ typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_
// free memory allocated by buffer_allocate
typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
-
/******************************************************************************
System driver struct
*****************************************************************************/
@@ -321,7 +319,6 @@ struct vchi_connection_t {
#endif
};
-
#endif /* CONNECTION_H_ */
/****************************** End of file **********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h b/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
index a7740a425388..834263f278cf 100644
--- a/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
+++ b/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
@@ -41,7 +41,6 @@
#include "interface/vchi/vchi_cfg_internal.h"
#include "interface/vchi/vchi_common.h"
-
typedef enum message_event_type {
MESSAGE_EVENT_NONE,
MESSAGE_EVENT_NOP,
@@ -111,7 +110,6 @@ typedef struct rx_bulk_slotinfo_t {
VCHI_FLAGS_T flags;
} RX_BULK_SLOTINFO_T;
-
/* ----------------------------------------------------------------------
* each connection driver will have a pool of the following struct.
*
@@ -155,7 +153,6 @@ typedef struct {
} MESSAGE_EVENT_T;
-
// callbacks
typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
@@ -163,7 +160,6 @@ typedef struct {
VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
} VCHI_MESSAGE_DRIVER_OPEN_T;
-
// handle to this instance of message driver (as returned by ->open)
typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
@@ -195,7 +191,6 @@ struct opaque_vchi_message_driver_t {
void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
};
-
#endif // _VCHI_MESSAGE_H_
/****************************** End of file ***********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index addb7b00b688..66a3a060fad2 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -39,7 +39,6 @@
#include "interface/vchi/connections/connection.h"
#include "vchi_mh.h"
-
/******************************************************************************
Global defs
*****************************************************************************/
@@ -92,7 +91,6 @@ typedef struct vchi_msg_vector_ex {
} u;
} VCHI_MSG_VECTOR_EX_T;
-
// Construct an entry in a msg vector for a pointer (p) of length (l)
#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
@@ -103,7 +101,6 @@ typedef struct vchi_msg_vector_ex {
#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
-
// Opaque service information
struct opaque_vchi_service_t;
@@ -114,8 +111,6 @@ typedef struct {
void *message;
} VCHI_HELD_MSG_T;
-
-
// structure used to provide the information needed to open a server or a client
typedef struct {
struct vchi_version version;
@@ -162,7 +157,6 @@ extern "C" {
extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
const VCHI_MESSAGE_DRIVER_T * low_level);
-
// Routine used to initialise the vchi on both local + remote connections
extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
@@ -185,7 +179,6 @@ extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *lengt
extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
-
/******************************************************************************
Global service API
*****************************************************************************/
@@ -194,7 +187,7 @@ extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
SERVICE_CREATION_T *setup,
VCHI_SERVICE_HANDLE_T *handle );
-// Routine to destory a service
+// Routine to destroy a service
extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
// Routine to open a named service
@@ -307,7 +300,6 @@ extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
uint32_t *msg_size, // }
VCHI_HELD_MSG_T *message );
-
/******************************************************************************
Global bulk API
*****************************************************************************/
@@ -319,7 +311,6 @@ extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *transfer_handle );
-
// Prepare interface for a transfer from the other side into relocatable memory.
int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
VCHI_MEM_HANDLE_T h_dst,
@@ -335,7 +326,6 @@ extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *transfer_handle );
-
/******************************************************************************
Configuration plumbing
*****************************************************************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index 45c2070d46b0..76e10fe65d9b 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -34,7 +34,6 @@
#ifndef VCHI_COMMON_H_
#define VCHI_COMMON_H_
-
//flags used when sending messages (must be bitmapped)
typedef enum {
VCHI_FLAGS_NONE = 0x0,
@@ -118,14 +117,11 @@ typedef enum {
VCHI_SERVICE_OPTION_MAX
} VCHI_SERVICE_OPTION_T;
-
//Callback used by all services / bulk transfers
typedef void (*VCHI_CALLBACK_T)(void *callback_param, //my service local param
VCHI_CALLBACK_REASON_T reason,
void *handle); //for transmitting msg's only
-
-
/*
* Define vector struct for scatter-gather (vector) operations
* Vectors can be nested - if a vector element has negative length, then
@@ -154,7 +150,6 @@ typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
// Opaque type for a message driver
typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
-
// Iterator structure for reading ahead through received message queue. Allocated by client,
// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
@@ -168,5 +163,4 @@ typedef struct {
void *remove;
} VCHI_MSG_ITER_T;
-
#endif // VCHI_COMMON_H_
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index be08849175ea..eea2d78b0ec6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -59,10 +59,10 @@
#define BELL0 0x00
#define BELL2 0x08
-typedef struct vchiq_2835_state_struct {
+struct vchiq_2835_state {
int inited;
VCHIQ_ARM_STATE_T arm_state;
-} VCHIQ_2835_ARM_STATE_T;
+};
struct vchiq_pagelist_info {
PAGELIST_T *pagelist;
@@ -84,8 +84,6 @@ static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
static struct device *g_dev;
-extern int vchiq_arm_log_level;
-
static DEFINE_SEMAPHORE(g_free_fragments_mutex);
static irqreturn_t
@@ -206,25 +204,31 @@ VCHIQ_STATUS_T
vchiq_platform_init_state(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ struct vchiq_2835_state *platform_state;
+
+ state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
+ platform_state = (struct vchiq_2835_state *)state->platform_state;
+
+ platform_state->inited = 1;
+ status = vchiq_arm_init_state(state, &platform_state->arm_state);
- state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
- ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 1;
- status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state);
if (status != VCHIQ_SUCCESS)
- {
- ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 0;
- }
+ platform_state->inited = 0;
+
return status;
}
VCHIQ_ARM_STATE_T*
vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
{
- if (!((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited)
- {
+ struct vchiq_2835_state *platform_state;
+
+ platform_state = (struct vchiq_2835_state *)state->platform_state;
+
+ if (!platform_state->inited)
BUG();
- }
- return &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state;
+
+ return &platform_state->arm_state;
}
void
@@ -383,12 +387,12 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
}
/* There is a potential problem with partial cache lines (pages?)
-** at the ends of the block when reading. If the CPU accessed anything in
-** the same line (page?) then it may have pulled old data into the cache,
-** obscuring the new data underneath. We can solve this by transferring the
-** partial cache lines separately, and allowing the ARM to copy into the
-** cached area.
-*/
+ * at the ends of the block when reading. If the CPU accessed anything in
+ * the same line (page?) then it may have pulled old data into the cache,
+ * obscuring the new data underneath. We can solve this by transferring the
+ * partial cache lines separately, and allowing the ARM to copy into the
+ * cached area.
+ */
static struct vchiq_pagelist_info *
create_pagelist(char __user *buf, size_t count, unsigned short type,
@@ -415,15 +419,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
sizeof(struct vchiq_pagelist_info);
/* Allocate enough storage to hold the page pointers and the page
- ** list
- */
+ * list
+ */
pagelist = dma_zalloc_coherent(g_dev,
pagelist_size,
&dma_addr,
GFP_KERNEL);
- vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
- pagelist);
+ vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
+
if (!pagelist)
return NULL;
@@ -483,9 +487,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
if (actual_pages != num_pages) {
vchiq_log_info(vchiq_arm_log_level,
- "create_pagelist - only %d/%d pages locked",
- actual_pages,
- num_pages);
+ "%s - only %d/%d pages locked",
+ __func__, actual_pages, num_pages);
/* This is probably due to the process being killed */
while (actual_pages > 0)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 314ffac50bb8..411539f8ff8c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -109,9 +109,7 @@ static const char *const resume_state_names[] = {
* requested */
#define FORCE_SUSPEND_TIMEOUT_MS 200
-
-static void suspend_timer_callback(unsigned long context);
-
+static void suspend_timer_callback(struct timer_list *t);
typedef struct user_service_struct {
VCHIQ_SERVICE_T *service;
@@ -195,11 +193,6 @@ static const char *const ioctl_names[] = {
vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
(VCHIQ_IOC_MAX + 1));
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
-static void
-dump_phys_mem(void *virt_addr, u32 num_bytes);
-#endif
-
/****************************************************************************
*
* add_completion
@@ -1161,20 +1154,6 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
args.handle, args.option, args.value);
} break;
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
- case VCHIQ_IOC_DUMP_PHYS_MEM: {
- VCHIQ_DUMP_MEM_T args;
-
- if (copy_from_user
- (&args, (const void __user *)arg,
- sizeof(args)) != 0) {
- ret = -EFAULT;
- break;
- }
- dump_phys_mem(args.virt_addr, args.num_bytes);
- } break;
-#endif
-
case VCHIQ_IOC_LIB_VERSION: {
unsigned int lib_version = (unsigned int)arg;
@@ -1654,42 +1633,6 @@ vchiq_compat_ioctl_get_config(struct file *file,
return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
}
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
-
-struct vchiq_dump_mem32 {
- compat_uptr_t virt_addr;
- u32 num_bytes;
-};
-
-#define VCHIQ_IOC_DUMP_PHYS_MEM32 \
- _IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem32)
-
-static long
-vchiq_compat_ioctl_dump_phys_mem(struct file *file,
- unsigned int cmd,
- unsigned long arg)
-{
- VCHIQ_DUMP_MEM_T *args;
- struct vchiq_dump_mem32 args32;
-
- args = compat_alloc_user_space(sizeof(*args));
- if (!args)
- return -EFAULT;
-
- if (copy_from_user(&args32,
- (struct vchiq_dump_mem32 *)arg,
- sizeof(args32)))
- return -EFAULT;
-
- if (put_user(compat_ptr(args32.virt_addr), &args->virt_addr) ||
- put_user(args32.num_bytes, &args->num_bytes))
- return -EFAULT;
-
- return vchiq_ioctl(file, VCHIQ_IOC_DUMP_PHYS_MEM, (unsigned long)args);
-}
-
-#endif
-
static long
vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
@@ -1707,10 +1650,6 @@ vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
case VCHIQ_IOC_GET_CONFIG32:
return vchiq_compat_ioctl_get_config(file, cmd, arg);
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
- case VCHIQ_IOC_DUMP_PHYS_MEM32:
- return vchiq_compat_ioctl_dump_phys_mem(file, cmd, arg);
-#endif
default:
return vchiq_ioctl(file, cmd, arg);
}
@@ -2050,98 +1989,6 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
/****************************************************************************
*
-* dump_user_mem
-*
-***************************************************************************/
-
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
-
-static void
-dump_phys_mem(void *virt_addr, u32 num_bytes)
-{
- int rc;
- u8 *end_virt_addr = virt_addr + num_bytes;
- int num_pages;
- int offset;
- int end_offset;
- int page_idx;
- int prev_idx;
- struct page *page;
- struct page **pages;
- u8 *kmapped_virt_ptr;
-
- /* Align virt_addr and end_virt_addr to 16 byte boundaries. */
-
- virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
- end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
- ~0x0fuL);
-
- offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
- end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
-
- num_pages = DIV_ROUND_UP(offset + num_bytes, PAGE_SIZE);
-
- pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
- if (!pages) {
- vchiq_log_error(vchiq_arm_log_level,
- "Unable to allocation memory for %d pages\n",
- num_pages);
- return;
- }
-
- down_read(&current->mm->mmap_sem);
- rc = get_user_pages(
- (unsigned long)virt_addr, /* start */
- num_pages, /* len */
- 0, /* gup_flags */
- pages, /* pages (array of page pointers) */
- NULL); /* vmas */
- up_read(&current->mm->mmap_sem);
-
- prev_idx = -1;
- page = NULL;
-
- if (rc < 0) {
- vchiq_log_error(vchiq_arm_log_level,
- "Failed to get user pages: %d\n", rc);
- goto out;
- }
-
- while (offset < end_offset) {
- int page_offset = offset % PAGE_SIZE;
-
- page_idx = offset / PAGE_SIZE;
- if (page_idx != prev_idx) {
- if (page != NULL)
- kunmap(page);
- page = pages[page_idx];
- kmapped_virt_ptr = kmap(page);
- prev_idx = page_idx;
- }
-
- if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
- vchiq_log_dump_mem("ph",
- (u32)(unsigned long)&kmapped_virt_ptr[
- page_offset],
- &kmapped_virt_ptr[page_offset], 16);
-
- offset += 16;
- }
-
-out:
- if (page != NULL)
- kunmap(page);
-
- for (page_idx = 0; page_idx < num_pages; page_idx++)
- put_page(pages[page_idx]);
-
- kfree(pages);
-}
-
-#endif
-
-/****************************************************************************
-*
* vchiq_read
*
***************************************************************************/
@@ -2307,8 +2154,6 @@ exit:
return 0;
}
-
-
VCHIQ_STATUS_T
vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
{
@@ -2339,8 +2184,9 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
arm_state->suspend_timer_running = 0;
- setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
- (unsigned long)(state));
+ arm_state->state = state;
+ timer_setup(&arm_state->suspend_timer, suspend_timer_callback,
+ 0);
arm_state->first_connect = 0;
@@ -2469,7 +2315,6 @@ set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
}
}
-
/* should be called with the write lock held */
inline void
start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
@@ -2589,7 +2434,6 @@ vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
status = VCHIQ_SUCCESS;
-
switch (arm_state->vc_suspend_state) {
case VC_SUSPEND_REQUESTED:
vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
@@ -2654,7 +2498,6 @@ out:
return;
}
-
static void
output_timeout_error(VCHIQ_STATE_T *state)
{
@@ -2834,7 +2677,6 @@ out:
return;
}
-
int
vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
{
@@ -2996,7 +2838,6 @@ vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
"%s %s count %d, state count %d",
__func__, entity, *entity_uc, local_uc);
-
write_unlock_bh(&arm_state->susp_res_lock);
/* Completion is in a done state when we're not suspended, so this won't
@@ -3177,18 +3018,14 @@ vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
instance->trace = (trace != 0);
}
-static void suspend_timer_callback(unsigned long context)
+static void suspend_timer_callback(struct timer_list *t)
{
- VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
- VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_ARM_STATE_T *arm_state = from_timer(arm_state, t, suspend_timer);
+ VCHIQ_STATE_T *state = arm_state->state;
- if (!arm_state)
- goto out;
vchiq_log_info(vchiq_susp_log_level,
"%s - suspend timer expired - check suspend", __func__);
vchiq_check_suspend(state);
-out:
- return;
}
VCHIQ_STATUS_T
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index bfbd81d9db33..40bb0c63b1a9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -42,7 +42,6 @@
#include "vchiq_core.h"
#include "vchiq_debugfs.h"
-
enum vc_suspend_status {
VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
@@ -61,15 +60,12 @@ enum vc_resume_status {
VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
};
-
enum USE_TYPE_E {
USE_TYPE_SERVICE,
USE_TYPE_SERVICE_NO_RESUME,
USE_TYPE_VCHIQ
};
-
-
typedef struct vchiq_arm_state_struct {
/* Keepalive-related data */
struct task_struct *ka_thread;
@@ -87,6 +83,7 @@ typedef struct vchiq_arm_state_struct {
unsigned int wake_address;
+ VCHIQ_STATE_T *state;
struct timer_list suspend_timer;
int suspend_timer_timeout;
int suspend_timer_running;
@@ -216,5 +213,4 @@ set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
extern void
start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
-
#endif /* VCHIQ_ARM_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 486be990d7fc..ecff92bae200 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -130,7 +130,6 @@ static const char *const conn_state_names[] = {
"RESUME_TIMEOUT"
};
-
static void
release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
@@ -2168,7 +2167,6 @@ slot_handler_func(void *v)
break;
}
-
}
DEBUG_TRACE(SLOT_HANDLER_LINE);
@@ -2177,7 +2175,6 @@ slot_handler_func(void *v)
return 0;
}
-
/* Called by the recycle thread */
static int
recycle_func(void *v)
@@ -2193,7 +2190,6 @@ recycle_func(void *v)
return 0;
}
-
/* Called by the sync thread */
static int
sync_func(void *v)
@@ -2301,7 +2297,6 @@ sync_func(void *v)
return 0;
}
-
static void
init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
{
@@ -2312,14 +2307,12 @@ init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
queue->remove = 0;
}
-
inline const char *
get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
{
return conn_state_names[conn_state];
}
-
VCHIQ_SLOT_ZERO_T *
vchiq_init_slots(void *mem_base, int mem_size)
{
@@ -2958,8 +2951,7 @@ vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
case VCHIQ_SRVSTATE_OPENSYNC:
mutex_lock(&state->sync_mutex);
- /* Drop through */
-
+ /* fall through */
case VCHIQ_SRVSTATE_OPEN:
if (state->is_master || close_recvd) {
if (!do_abort_bulks(service))
@@ -3296,7 +3288,6 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
return status;
}
-
/* This function may be called by kernel threads or user threads.
* User threads may receive VCHIQ_RETRY to indicate that a signal has been
* received and the call should be retried after being returned to user
@@ -3876,7 +3867,6 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
vchiq_dump_platform_service_state(dump_context, service);
}
-
void
vchiq_loud_error_header(void)
{
@@ -3901,7 +3891,6 @@ vchiq_loud_error_footer(void)
"================");
}
-
VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_RETRY;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9e164652548a..afc1d8144a84 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -702,7 +702,6 @@ vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
extern void
vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
-
extern void
vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
size_t numBytes);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 9367a9a5aa3c..766b4fe5f32c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -32,7 +32,6 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#include <linux/debugfs.h>
#include "vchiq_core.h"
#include "vchiq_arm.h"
@@ -52,7 +51,6 @@
#define VCHIQ_LOG_INFO_STR "info"
#define VCHIQ_LOG_TRACE_STR "trace"
-
/* Top-level debug info */
struct vchiq_debugfs_info {
/* Global 'vchiq' debugfs entry used by all instances */
@@ -316,7 +314,6 @@ void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
debugfs_remove_recursive(node->dentry);
}
-
int vchiq_debugfs_init(void)
{
BUG_ON(debugfs_info.vchiq_cfg_dir != NULL);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 8af95fc361ed..d465e1cf5db9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -41,14 +41,14 @@
#define vchiq_status_to_vchi(status) ((int32_t)status)
-typedef struct {
+struct shim_service {
VCHIQ_SERVICE_HANDLE_T handle;
VCHIU_QUEUE_T queue;
VCHI_CALLBACK_T callback;
void *callback_param;
-} SHIM_SERVICE_T;
+};
/* ----------------------------------------------------------------------
* return pointer to the mphi message driver function table
@@ -84,7 +84,6 @@ VCHI_CONNECTION_T *vchi_create_connection(
* void **data,
* uint32_t *msg_size,
-
* VCHI_FLAGS_T flags
*
* Description: Routine to return a pointer to the current message (to allow in
@@ -99,7 +98,7 @@ int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
uint32_t *msg_size,
VCHI_FLAGS_T flags)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
WARN_ON((flags != VCHI_FLAGS_NONE) &&
@@ -131,7 +130,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
***********************************************************/
int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
header = vchiu_queue_pop(&service->queue);
@@ -163,7 +162,7 @@ int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
void *context,
uint32_t data_size)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_STATUS_T status;
while (1) {
@@ -262,7 +261,7 @@ int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *bulk_handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_BULK_MODE_T mode;
VCHIQ_STATUS_T status;
@@ -322,7 +321,7 @@ int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *bulk_handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_BULK_MODE_T mode;
VCHIQ_STATUS_T status;
@@ -384,7 +383,7 @@ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
uint32_t *actual_msg_size,
VCHI_FLAGS_T flags)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
WARN_ON((flags != VCHI_FLAGS_NONE) &&
@@ -458,7 +457,7 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
VCHI_HELD_MSG_T *message_handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
WARN_ON((flags != VCHI_FLAGS_NONE) &&
@@ -541,7 +540,6 @@ int32_t vchi_connect(VCHI_CONNECTION_T **connections,
}
EXPORT_SYMBOL(vchi_connect);
-
/***********************************************************
* Name: vchi_disconnect
*
@@ -561,7 +559,6 @@ int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
}
EXPORT_SYMBOL(vchi_disconnect);
-
/***********************************************************
* Name: vchi_service_open
* Name: vchi_service_create
@@ -579,8 +576,8 @@ EXPORT_SYMBOL(vchi_disconnect);
static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
{
- SHIM_SERVICE_T *service =
- (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
+ struct shim_service *service =
+ (struct shim_service *)VCHIQ_GET_SERVICE_USERDATA(handle);
if (!service->callback)
goto release;
@@ -637,10 +634,10 @@ done:
return VCHIQ_SUCCESS;
}
-static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
+static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
SERVICE_CREATION_T *setup)
{
- SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
+ struct shim_service *service = kzalloc(sizeof(struct shim_service), GFP_KERNEL);
(void)instance;
@@ -657,7 +654,7 @@ static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
return service;
}
-static void service_free(SHIM_SERVICE_T *service)
+static void service_free(struct shim_service *service)
{
if (service) {
vchiu_queue_delete(&service->queue);
@@ -670,7 +667,7 @@ int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
VCHI_SERVICE_HANDLE_T *handle)
{
VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
- SHIM_SERVICE_T *service = service_alloc(instance, setup);
+ struct shim_service *service = service_alloc(instance, setup);
*handle = (VCHI_SERVICE_HANDLE_T)service;
@@ -703,7 +700,7 @@ int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
VCHI_SERVICE_HANDLE_T *handle)
{
VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
- SHIM_SERVICE_T *service = service_alloc(instance, setup);
+ struct shim_service *service = service_alloc(instance, setup);
*handle = (VCHI_SERVICE_HANDLE_T)service;
@@ -733,7 +730,7 @@ EXPORT_SYMBOL(vchi_service_create);
int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service) {
VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
@@ -751,7 +748,7 @@ EXPORT_SYMBOL(vchi_service_close);
int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service) {
VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
@@ -772,7 +769,7 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
int value)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_SERVICE_OPTION_T vchiq_option;
switch (option) {
@@ -801,7 +798,7 @@ EXPORT_SYMBOL(vchi_service_set_option);
int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_version)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service)
{
@@ -828,7 +825,7 @@ int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service)
ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
return ret;
@@ -849,7 +846,7 @@ int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service)
ret = vchiq_status_to_vchi(
vchiq_release_service(service->handle));
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index 1d2ff0cc41f1..c548dd8c91e1 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -10,16 +10,3 @@ config VME_USER
To compile this driver as a module, choose M here. The module will
be called vme_user. If unsure, say N.
-
-config VME_PIO2
- tristate "GE PIO2 VME"
- depends on STAGING && GPIOLIB
- help
- Say Y here to include support for the GE PIO2. The PIO2 is a 6U VME
- slave card, implementing 32 solid-state relay switched IO lines, in
- 4 groups of 8. Each bank of IO lines is built to function as input,
- output or both depending on the variant of the card.
-
- To compile this driver as a module, choose M here. The module will
- be called vme_pio2. If unsure, say N.
-
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme/devices/Makefile
index 172512cb5dbf..459742a75283 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme/devices/Makefile
@@ -3,6 +3,3 @@
#
obj-$(CONFIG_VME_USER) += vme_user.o
-
-vme_pio2-objs := vme_pio2_cntr.o vme_pio2_gpio.o vme_pio2_core.o
-obj-$(CONFIG_VME_PIO2) += vme_pio2.o
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
deleted file mode 100644
index 65f834cdeab2..000000000000
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _VME_PIO2_H_
-#define _VME_PIO2_H_
-
-#define PIO2_CARDS_MAX 32
-
-#define PIO2_VARIANT_LENGTH 5
-
-#define PIO2_NUM_CHANNELS 32
-#define PIO2_NUM_IRQS 11
-#define PIO2_NUM_CNTRS 6
-
-#define PIO2_REGS_SIZE 0x40
-
-#define PIO2_REGS_DATA0 0x0
-#define PIO2_REGS_DATA1 0x1
-#define PIO2_REGS_DATA2 0x2
-#define PIO2_REGS_DATA3 0x3
-
-static const int PIO2_REGS_DATA[4] = { PIO2_REGS_DATA0, PIO2_REGS_DATA1,
- PIO2_REGS_DATA2, PIO2_REGS_DATA3 };
-
-#define PIO2_REGS_INT_STAT0 0x8
-#define PIO2_REGS_INT_STAT1 0x9
-#define PIO2_REGS_INT_STAT2 0xa
-#define PIO2_REGS_INT_STAT3 0xb
-
-static const int PIO2_REGS_INT_STAT[4] = { PIO2_REGS_INT_STAT0,
- PIO2_REGS_INT_STAT1,
- PIO2_REGS_INT_STAT2,
- PIO2_REGS_INT_STAT3 };
-
-#define PIO2_REGS_INT_STAT_CNTR 0xc
-#define PIO2_REGS_INT_MASK0 0x10
-#define PIO2_REGS_INT_MASK1 0x11
-#define PIO2_REGS_INT_MASK2 0x12
-#define PIO2_REGS_INT_MASK3 0x13
-#define PIO2_REGS_INT_MASK4 0x14
-#define PIO2_REGS_INT_MASK5 0x15
-#define PIO2_REGS_INT_MASK6 0x16
-#define PIO2_REGS_INT_MASK7 0x17
-
-static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
- PIO2_REGS_INT_MASK1,
- PIO2_REGS_INT_MASK2,
- PIO2_REGS_INT_MASK3,
- PIO2_REGS_INT_MASK4,
- PIO2_REGS_INT_MASK5,
- PIO2_REGS_INT_MASK6,
- PIO2_REGS_INT_MASK7 };
-
-#define PIO2_REGS_CTRL 0x18
-#define PIO2_REGS_VME_VECTOR 0x19
-#define PIO2_REGS_CNTR0 0x20
-#define PIO2_REGS_CNTR1 0x22
-#define PIO2_REGS_CNTR2 0x24
-#define PIO2_REGS_CTRL_WRD0 0x26
-#define PIO2_REGS_CNTR3 0x28
-#define PIO2_REGS_CNTR4 0x2a
-#define PIO2_REGS_CNTR5 0x2c
-#define PIO2_REGS_CTRL_WRD1 0x2e
-
-#define PIO2_REGS_ID 0x30
-
-/* PIO2_REGS_DATAx (0x0 - 0x3) */
-
-static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3 };
-
-#define PIO2_CHANNEL0_BIT BIT(0)
-#define PIO2_CHANNEL1_BIT BIT(1)
-#define PIO2_CHANNEL2_BIT BIT(2)
-#define PIO2_CHANNEL3_BIT BIT(3)
-#define PIO2_CHANNEL4_BIT BIT(4)
-#define PIO2_CHANNEL5_BIT BIT(5)
-#define PIO2_CHANNEL6_BIT BIT(6)
-#define PIO2_CHANNEL7_BIT BIT(7)
-#define PIO2_CHANNEL8_BIT BIT(0)
-#define PIO2_CHANNEL9_BIT BIT(1)
-#define PIO2_CHANNEL10_BIT BIT(2)
-#define PIO2_CHANNEL11_BIT BIT(3)
-#define PIO2_CHANNEL12_BIT BIT(4)
-#define PIO2_CHANNEL13_BIT BIT(5)
-#define PIO2_CHANNEL14_BIT BIT(6)
-#define PIO2_CHANNEL15_BIT BIT(7)
-#define PIO2_CHANNEL16_BIT BIT(0)
-#define PIO2_CHANNEL17_BIT BIT(1)
-#define PIO2_CHANNEL18_BIT BIT(2)
-#define PIO2_CHANNEL19_BIT BIT(3)
-#define PIO2_CHANNEL20_BIT BIT(4)
-#define PIO2_CHANNEL21_BIT BIT(5)
-#define PIO2_CHANNEL22_BIT BIT(6)
-#define PIO2_CHANNEL23_BIT BIT(7)
-#define PIO2_CHANNEL24_BIT BIT(0)
-#define PIO2_CHANNEL25_BIT BIT(1)
-#define PIO2_CHANNEL26_BIT BIT(2)
-#define PIO2_CHANNEL27_BIT BIT(3)
-#define PIO2_CHANNEL28_BIT BIT(4)
-#define PIO2_CHANNEL29_BIT BIT(5)
-#define PIO2_CHANNEL30_BIT BIT(6)
-#define PIO2_CHANNEL31_BIT BIT(7)
-
-static const int PIO2_CHANNEL_BIT[32] = { PIO2_CHANNEL0_BIT, PIO2_CHANNEL1_BIT,
- PIO2_CHANNEL2_BIT, PIO2_CHANNEL3_BIT,
- PIO2_CHANNEL4_BIT, PIO2_CHANNEL5_BIT,
- PIO2_CHANNEL6_BIT, PIO2_CHANNEL7_BIT,
- PIO2_CHANNEL8_BIT, PIO2_CHANNEL9_BIT,
- PIO2_CHANNEL10_BIT, PIO2_CHANNEL11_BIT,
- PIO2_CHANNEL12_BIT, PIO2_CHANNEL13_BIT,
- PIO2_CHANNEL14_BIT, PIO2_CHANNEL15_BIT,
- PIO2_CHANNEL16_BIT, PIO2_CHANNEL17_BIT,
- PIO2_CHANNEL18_BIT, PIO2_CHANNEL19_BIT,
- PIO2_CHANNEL20_BIT, PIO2_CHANNEL21_BIT,
- PIO2_CHANNEL22_BIT, PIO2_CHANNEL23_BIT,
- PIO2_CHANNEL24_BIT, PIO2_CHANNEL25_BIT,
- PIO2_CHANNEL26_BIT, PIO2_CHANNEL27_BIT,
- PIO2_CHANNEL28_BIT, PIO2_CHANNEL29_BIT,
- PIO2_CHANNEL30_BIT, PIO2_CHANNEL31_BIT
- };
-
-/* PIO2_REGS_INT_STAT_CNTR (0xc) */
-#define PIO2_COUNTER0 BIT(0)
-#define PIO2_COUNTER1 BIT(1)
-#define PIO2_COUNTER2 BIT(2)
-#define PIO2_COUNTER3 BIT(3)
-#define PIO2_COUNTER4 BIT(4)
-#define PIO2_COUNTER5 BIT(5)
-
-static const int PIO2_COUNTER[6] = { PIO2_COUNTER0, PIO2_COUNTER1,
- PIO2_COUNTER2, PIO2_COUNTER3,
- PIO2_COUNTER4, PIO2_COUNTER5 };
-
-/* PIO2_REGS_CTRL (0x18) */
-#define PIO2_VME_INT_MASK 0x7
-#define PIO2_LED BIT(6)
-#define PIO2_LOOP BIT(7)
-
-/* PIO2_REGS_VME_VECTOR (0x19) */
-#define PIO2_VME_VECTOR_SPUR 0x0
-#define PIO2_VME_VECTOR_BANK0 0x1
-#define PIO2_VME_VECTOR_BANK1 0x2
-#define PIO2_VME_VECTOR_BANK2 0x3
-#define PIO2_VME_VECTOR_BANK3 0x4
-#define PIO2_VME_VECTOR_CNTR0 0x5
-#define PIO2_VME_VECTOR_CNTR1 0x6
-#define PIO2_VME_VECTOR_CNTR2 0x7
-#define PIO2_VME_VECTOR_CNTR3 0x8
-#define PIO2_VME_VECTOR_CNTR4 0x9
-#define PIO2_VME_VECTOR_CNTR5 0xa
-
-#define PIO2_VME_VECTOR_MASK 0xf0
-
-static const int PIO2_VECTOR_BANK[4] = { PIO2_VME_VECTOR_BANK0,
- PIO2_VME_VECTOR_BANK1,
- PIO2_VME_VECTOR_BANK2,
- PIO2_VME_VECTOR_BANK3 };
-
-static const int PIO2_VECTOR_CNTR[6] = { PIO2_VME_VECTOR_CNTR0,
- PIO2_VME_VECTOR_CNTR1,
- PIO2_VME_VECTOR_CNTR2,
- PIO2_VME_VECTOR_CNTR3,
- PIO2_VME_VECTOR_CNTR4,
- PIO2_VME_VECTOR_CNTR5 };
-
-/* PIO2_REGS_CNTRx (0x20 - 0x24 & 0x28 - 0x2c) */
-
-static const int PIO2_CNTR_DATA[6] = { PIO2_REGS_CNTR0, PIO2_REGS_CNTR1,
- PIO2_REGS_CNTR2, PIO2_REGS_CNTR3,
- PIO2_REGS_CNTR4, PIO2_REGS_CNTR5 };
-
-/* PIO2_REGS_CTRL_WRDx (0x26 & 0x2e) */
-
-static const int PIO2_CNTR_CTRL[6] = { PIO2_REGS_CTRL_WRD0,
- PIO2_REGS_CTRL_WRD0,
- PIO2_REGS_CTRL_WRD0,
- PIO2_REGS_CTRL_WRD1,
- PIO2_REGS_CTRL_WRD1,
- PIO2_REGS_CTRL_WRD1 };
-
-#define PIO2_CNTR_SC_DEV0 0
-#define PIO2_CNTR_SC_DEV1 (1 << 6)
-#define PIO2_CNTR_SC_DEV2 (2 << 6)
-#define PIO2_CNTR_SC_RDBACK (3 << 6)
-
-static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
- PIO2_CNTR_SC_DEV2, PIO2_CNTR_SC_DEV0,
- PIO2_CNTR_SC_DEV1, PIO2_CNTR_SC_DEV2 };
-
-#define PIO2_CNTR_RW_LATCH 0
-#define PIO2_CNTR_RW_LSB (1 << 4)
-#define PIO2_CNTR_RW_MSB (2 << 4)
-#define PIO2_CNTR_RW_BOTH (3 << 4)
-
-#define PIO2_CNTR_MODE0 0
-#define PIO2_CNTR_MODE1 (1 << 1)
-#define PIO2_CNTR_MODE2 (2 << 1)
-#define PIO2_CNTR_MODE3 (3 << 1)
-#define PIO2_CNTR_MODE4 (4 << 1)
-#define PIO2_CNTR_MODE5 (5 << 1)
-
-#define PIO2_CNTR_BCD 1
-
-enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
-enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
-
-/* Bank configuration structure */
-struct pio2_io_bank {
- enum pio2_bank_config config;
- u8 value;
- enum pio2_int_config irq[8];
-};
-
-/* Counter configuration structure */
-struct pio2_cntr {
- int mode;
- int count;
-};
-
-struct pio2_card {
- int id;
- int bus;
- long base;
- int irq_vector;
- int irq_level;
- char variant[6];
- int led;
-
- struct vme_dev *vdev;
- struct vme_resource *window;
-
- struct gpio_chip gc;
- struct pio2_io_bank bank[4];
-
- struct pio2_cntr cntr[6];
-};
-
-int pio2_cntr_reset(struct pio2_card *card);
-
-int pio2_gpio_reset(struct pio2_card *card);
-int pio2_gpio_init(struct pio2_card *card);
-void pio2_gpio_exit(struct pio2_card *card);
-
-#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_cntr.c b/drivers/staging/vme/devices/vme_pio2_cntr.c
deleted file mode 100644
index 486c30c4956f..000000000000
--- a/drivers/staging/vme/devices/vme_pio2_cntr.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * GE PIO2 Counter Driver
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The PIO-2 has 6 counters, currently this code just disables the interrupts
- * and leaves them alone.
- *
- */
-
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/gpio.h>
-#include <linux/vme.h>
-
-#include "vme_pio2.h"
-
-static int pio2_cntr_irq_set(struct pio2_card *card, int id)
-{
- int retval;
- u8 data;
-
- data = PIO2_CNTR_SC_DEV[id] | PIO2_CNTR_RW_BOTH | card->cntr[id].mode;
- retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_CTRL[id]);
- if (retval < 0)
- return retval;
-
- data = card->cntr[id].count & 0xFF;
- retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
- if (retval < 0)
- return retval;
-
- data = (card->cntr[id].count >> 8) & 0xFF;
- retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-int pio2_cntr_reset(struct pio2_card *card)
-{
- int i, retval = 0;
- u8 reg;
-
- /* Clear down all timers */
- for (i = 0; i < 6; i++) {
- card->cntr[i].mode = PIO2_CNTR_MODE5;
- card->cntr[i].count = 0;
- retval = pio2_cntr_irq_set(card, i);
- if (retval < 0)
- return retval;
- }
-
- /* Ensure all counter interrupts are cleared */
- do {
- retval = vme_master_read(card->window, &reg, 1,
- PIO2_REGS_INT_STAT_CNTR);
- if (retval < 0)
- return retval;
- } while (reg != 0);
-
- return retval;
-}
-
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
deleted file mode 100644
index 367535b4b77f..000000000000
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ /dev/null
@@ -1,493 +0,0 @@
-/*
- * GE PIO2 6U VME I/O Driver
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/ctype.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/vme.h>
-
-#include "vme_pio2.h"
-
-static const char driver_name[] = "pio2";
-
-static int bus[PIO2_CARDS_MAX];
-static int bus_num;
-static long base[PIO2_CARDS_MAX];
-static int base_num;
-static int vector[PIO2_CARDS_MAX];
-static int vector_num;
-static int level[PIO2_CARDS_MAX];
-static int level_num;
-static char *variant[PIO2_CARDS_MAX];
-static int variant_num;
-
-static bool loopback;
-
-static int pio2_match(struct vme_dev *);
-static int pio2_probe(struct vme_dev *);
-static int pio2_remove(struct vme_dev *);
-
-static int pio2_get_led(struct pio2_card *card)
-{
- /* Can't read hardware, state saved in structure */
- return card->led;
-}
-
-static int pio2_set_led(struct pio2_card *card, int state)
-{
- u8 reg;
- int retval;
-
- reg = card->irq_level;
-
- /* Register state inverse of led state */
- if (!state)
- reg |= PIO2_LED;
-
- if (loopback)
- reg |= PIO2_LOOP;
-
- retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
- if (retval < 0)
- return retval;
-
- card->led = state ? 1 : 0;
-
- return 0;
-}
-
-static void pio2_int(int level, int vector, void *ptr)
-{
- int vec, i, channel, retval;
- u8 reg;
- struct pio2_card *card = ptr;
-
- vec = vector & ~PIO2_VME_VECTOR_MASK;
-
- switch (vec) {
- case 0:
- dev_warn(&card->vdev->dev, "Spurious Interrupt\n");
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- /* Channels 0 to 7 */
- retval = vme_master_read(card->window, &reg, 1,
- PIO2_REGS_INT_STAT[vec - 1]);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to read IRQ status register\n");
- return;
- }
- for (i = 0; i < 8; i++) {
- channel = ((vec - 1) * 8) + i;
- if (reg & PIO2_CHANNEL_BIT[channel])
- dev_info(&card->vdev->dev,
- "Interrupt on I/O channel %d\n",
- channel);
- }
- break;
- case 5:
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- /* Counters are dealt with by their own handler */
- dev_err(&card->vdev->dev,
- "Counter interrupt\n");
- break;
- }
-}
-
-/*
- * We return whether this has been successful - this is used in the probe to
- * ensure we have a valid card.
- */
-static int pio2_reset_card(struct pio2_card *card)
-{
- int retval = 0;
- u8 data = 0;
-
- /* Clear main register*/
- retval = vme_master_write(card->window, &data, 1, PIO2_REGS_CTRL);
- if (retval < 0)
- return retval;
-
- /* Clear VME vector */
- retval = vme_master_write(card->window, &data, 1, PIO2_REGS_VME_VECTOR);
- if (retval < 0)
- return retval;
-
- /* Reset GPIO */
- retval = pio2_gpio_reset(card);
- if (retval < 0)
- return retval;
-
- /* Reset counters */
- retval = pio2_cntr_reset(card);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-static struct vme_driver pio2_driver = {
- .name = driver_name,
- .match = pio2_match,
- .probe = pio2_probe,
- .remove = pio2_remove,
-};
-
-static int __init pio2_init(void)
-{
- if (bus_num == 0) {
- pr_err("No cards, skipping registration\n");
- return -ENODEV;
- }
-
- if (bus_num > PIO2_CARDS_MAX) {
- pr_err("Driver only able to handle %d PIO2 Cards\n",
- PIO2_CARDS_MAX);
- bus_num = PIO2_CARDS_MAX;
- }
-
- /* Register the PIO2 driver */
- return vme_register_driver(&pio2_driver, bus_num);
-}
-
-static int pio2_match(struct vme_dev *vdev)
-{
- if (vdev->num >= bus_num) {
- dev_err(&vdev->dev,
- "The enumeration of the VMEbus to which the board is connected must be specified\n");
- return 0;
- }
-
- if (vdev->num >= base_num) {
- dev_err(&vdev->dev,
- "The VME address for the cards registers must be specified\n");
- return 0;
- }
-
- if (vdev->num >= vector_num) {
- dev_err(&vdev->dev,
- "The IRQ vector used by the card must be specified\n");
- return 0;
- }
-
- if (vdev->num >= level_num) {
- dev_err(&vdev->dev,
- "The IRQ level used by the card must be specified\n");
- return 0;
- }
-
- if (vdev->num >= variant_num) {
- dev_err(&vdev->dev, "The variant of the card must be specified\n");
- return 0;
- }
-
- return 1;
-}
-
-static int pio2_probe(struct vme_dev *vdev)
-{
- struct pio2_card *card;
- int retval;
- int i;
- u8 reg;
- int vec;
-
- card = devm_kzalloc(&vdev->dev, sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->id = vdev->num;
- card->bus = bus[card->id];
- card->base = base[card->id];
- card->irq_vector = vector[card->id];
- card->irq_level = level[card->id] & PIO2_VME_INT_MASK;
- strncpy(card->variant, variant[card->id], PIO2_VARIANT_LENGTH);
- card->vdev = vdev;
-
- for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
- if (!isdigit(card->variant[i])) {
- dev_err(&card->vdev->dev, "Variant invalid\n");
- return -EINVAL;
- }
- }
-
- /*
- * Bottom 4 bits of VME interrupt vector used to determine source,
- * provided vector should only use upper 4 bits.
- */
- if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) {
- dev_err(&card->vdev->dev,
- "Invalid VME IRQ Vector, vector must not use lower 4 bits\n");
- return -EINVAL;
- }
-
- /*
- * There is no way to determine the build variant or whether each bank
- * is input, output or both at run time. The inputs are also inverted
- * if configured as both.
- *
- * We pass in the board variant and use that to determine the
- * configuration of the banks.
- */
- for (i = 1; i < PIO2_VARIANT_LENGTH; i++) {
- switch (card->variant[i]) {
- case '0':
- card->bank[i - 1].config = NOFIT;
- break;
- case '1':
- case '2':
- case '3':
- case '4':
- card->bank[i - 1].config = INPUT;
- break;
- case '5':
- card->bank[i - 1].config = OUTPUT;
- break;
- case '6':
- case '7':
- case '8':
- case '9':
- card->bank[i - 1].config = BOTH;
- break;
- }
- }
-
- /* Get a master window and position over regs */
- card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16);
- if (!card->window) {
- dev_err(&card->vdev->dev,
- "Unable to assign VME master resource\n");
- return -EIO;
- }
-
- retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24,
- VME_SCT | VME_USER | VME_DATA, VME_D16);
- if (retval) {
- dev_err(&card->vdev->dev,
- "Unable to configure VME master resource\n");
- goto err_set;
- }
-
- /*
- * There is also no obvious register which we can probe to determine
- * whether the provided base is valid. If we can read the "ID Register"
- * offset and the reset function doesn't error, assume we have a valid
- * location.
- */
- retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_ID);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to read from device\n");
- goto err_read;
- }
-
- dev_dbg(&card->vdev->dev, "ID Register:%x\n", reg);
-
- /*
- * Ensure all the I/O is cleared. We can't read back the states, so
- * this is the only method we have to ensure that the I/O is in a known
- * state.
- */
- retval = pio2_reset_card(card);
- if (retval) {
- dev_err(&card->vdev->dev,
- "Failed to reset card, is location valid?\n");
- retval = -ENODEV;
- goto err_reset;
- }
-
- /* Configure VME Interrupts */
- reg = card->irq_level;
- if (pio2_get_led(card))
- reg |= PIO2_LED;
- if (loopback)
- reg |= PIO2_LOOP;
- retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
- if (retval < 0)
- return retval;
-
- /* Set VME vector */
- retval = vme_master_write(card->window, &card->irq_vector, 1,
- PIO2_REGS_VME_VECTOR);
- if (retval < 0)
- return retval;
-
- /* Attach spurious interrupt handler. */
- vec = card->irq_vector | PIO2_VME_VECTOR_SPUR;
-
- retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
- vec, card->irq_level);
- goto err_irq;
- }
-
- /* Attach GPIO interrupt handlers. */
- for (i = 0; i < 4; i++) {
- vec = card->irq_vector | PIO2_VECTOR_BANK[i];
-
- retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
- vec, card->irq_level);
- goto err_gpio_irq;
- }
- }
-
- /* Attach counter interrupt handlers. */
- for (i = 0; i < 6; i++) {
- vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
-
- retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
- vec, card->irq_level);
- goto err_cntr_irq;
- }
- }
-
- /* Register IO */
- retval = pio2_gpio_init(card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to register with GPIO framework\n");
- goto err_gpio;
- }
-
- /* Set LED - This also sets interrupt level */
- retval = pio2_set_led(card, 0);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to set LED\n");
- goto err_led;
- }
-
- dev_set_drvdata(&card->vdev->dev, card);
-
- dev_info(&card->vdev->dev,
- "PIO2 (variant %s) configured at 0x%lx\n", card->variant,
- card->base);
-
- return 0;
-
-err_led:
- pio2_gpio_exit(card);
-err_gpio:
- i = 6;
-err_cntr_irq:
- while (i > 0) {
- i--;
- vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- i = 4;
-err_gpio_irq:
- while (i > 0) {
- i--;
- vec = card->irq_vector | PIO2_VECTOR_BANK[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
- vme_irq_free(vdev, card->irq_level, vec);
-err_irq:
- pio2_reset_card(card);
-err_reset:
-err_read:
- vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
-err_set:
- vme_master_free(card->window);
- return retval;
-}
-
-static int pio2_remove(struct vme_dev *vdev)
-{
- int vec;
- int i;
-
- struct pio2_card *card = dev_get_drvdata(&vdev->dev);
-
- pio2_gpio_exit(card);
-
- for (i = 0; i < 6; i++) {
- vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- for (i = 0; i < 4; i++) {
- vec = card->irq_vector | PIO2_VECTOR_BANK[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
- vme_irq_free(vdev, card->irq_level, vec);
-
- pio2_reset_card(card);
-
- vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
-
- vme_master_free(card->window);
-
- return 0;
-}
-
-static void __exit pio2_exit(void)
-{
- vme_unregister_driver(&pio2_driver);
-}
-
-/* These are required for each board */
-MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
-module_param_hw_array(bus, int, other, &bus_num, 0444);
-
-MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers");
-module_param_hw_array(base, long, other, &base_num, 0444);
-
-MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)");
-module_param_hw_array(vector, int, other, &vector_num, 0444);
-
-MODULE_PARM_DESC(level, "VME IRQ Level");
-module_param_hw_array(level, int, other, &level_num, 0444);
-
-MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant");
-module_param_array(variant, charp, &variant_num, 0444);
-
-/* This is for debugging */
-MODULE_PARM_DESC(loopback, "Enable loopback mode on all cards");
-module_param(loopback, bool, 0444);
-
-MODULE_DESCRIPTION("GE PIO2 6U VME I/O Driver");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
-MODULE_LICENSE("GPL");
-
-module_init(pio2_init);
-module_exit(pio2_exit);
-
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
deleted file mode 100644
index ba9fe3bc2642..000000000000
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * GE PIO2 GPIO Driver
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/ctype.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/vme.h>
-
-#include "vme_pio2.h"
-
-static const char driver_name[] = "pio2_gpio";
-
-static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- u8 reg;
- int retval;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev, "Channel not available as input\n");
- return 0;
- }
-
- retval = vme_master_read(card->window, &reg, 1,
- PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to read from GPIO\n");
- return 0;
- }
-
- /*
- * Remember, input on channels configured as both input and output
- * are inverted!
- */
- if (reg & PIO2_CHANNEL_BIT[offset]) {
- if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
- return 0;
-
- return 1;
- }
-
- if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
- return 1;
-
- return 0;
-}
-
-static void pio2_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- u8 reg;
- int retval;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev, "Channel not available as output\n");
- return;
- }
-
- if (value)
- reg = card->bank[PIO2_CHANNEL_BANK[offset]].value |
- PIO2_CHANNEL_BIT[offset];
- else
- reg = card->bank[PIO2_CHANNEL_BANK[offset]].value &
- ~PIO2_CHANNEL_BIT[offset];
-
- retval = vme_master_write(card->window, &reg, 1,
- PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to write to GPIO\n");
- return;
- }
-
- card->bank[PIO2_CHANNEL_BANK[offset]].value = reg;
-}
-
-/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
-{
- int data;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev,
- "Channel directionality not configurable at runtime\n");
-
- data = -EINVAL;
- } else {
- data = 0;
- }
-
- return data;
-}
-
-/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_out(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- int data;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev,
- "Channel directionality not configurable at runtime\n");
-
- data = -EINVAL;
- } else {
- data = 0;
- }
-
- return data;
-}
-
-/*
- * We return whether this has been successful - this is used in the probe to
- * ensure we have a valid card.
- */
-int pio2_gpio_reset(struct pio2_card *card)
-{
- int retval = 0;
- int i, j;
-
- u8 data = 0;
-
- /* Zero output registers */
- for (i = 0; i < 4; i++) {
- retval = vme_master_write(card->window, &data, 1,
- PIO2_REGS_DATA[i]);
- if (retval < 0)
- return retval;
- card->bank[i].value = 0;
- }
-
- /* Set input interrupt masks */
- for (i = 0; i < 4; i++) {
- retval = vme_master_write(card->window, &data, 1,
- PIO2_REGS_INT_MASK[i * 2]);
- if (retval < 0)
- return retval;
-
- retval = vme_master_write(card->window, &data, 1,
- PIO2_REGS_INT_MASK[(i * 2) + 1]);
- if (retval < 0)
- return retval;
-
- for (j = 0; j < 8; j++)
- card->bank[i].irq[j] = NONE;
- }
-
- /* Ensure all I/O interrupts are cleared */
- for (i = 0; i < 4; i++) {
- do {
- retval = vme_master_read(card->window, &data, 1,
- PIO2_REGS_INT_STAT[i]);
- if (retval < 0)
- return retval;
- } while (data != 0);
- }
-
- return 0;
-}
-
-int pio2_gpio_init(struct pio2_card *card)
-{
- int retval = 0;
- char *label;
-
- label = kasprintf(GFP_KERNEL,
- "%s@%s", driver_name, dev_name(&card->vdev->dev));
- if (!label)
- return -ENOMEM;
-
- card->gc.label = label;
-
- card->gc.ngpio = PIO2_NUM_CHANNELS;
- /* Dynamic allocation of base */
- card->gc.base = -1;
- /* Setup pointers to chip functions */
- card->gc.direction_input = pio2_gpio_dir_in;
- card->gc.direction_output = pio2_gpio_dir_out;
- card->gc.get = pio2_gpio_get;
- card->gc.set = pio2_gpio_set;
-
- /* This function adds a memory mapped GPIO chip */
- retval = gpiochip_add_data(&card->gc, card);
- if (retval) {
- dev_err(&card->vdev->dev, "Unable to register GPIO\n");
- kfree(card->gc.label);
- }
-
- return retval;
-};
-
-void pio2_gpio_exit(struct pio2_card *card)
-{
- const char *label = card->gc.label;
-
- gpiochip_remove(&card->gc);
- kfree(label);
-}
-
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 9fcf2e223f71..1123b4f1e1d6 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1693,10 +1693,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
MACbShutdown(priv);
pci_disable_device(pcid);
- pci_set_power_state(pcid, pci_choose_state(pcid, state));
spin_unlock_irqrestore(&priv->lock, flags);
+ pci_set_power_state(pcid, pci_choose_state(pcid, state));
+
return 0;
}
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index d7ede73a1a01..d891993b20cf 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -56,17 +56,19 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
}
switch (key_type) {
- /* fallthrough */
case VNT_KEY_DEFAULTKEY:
/* default key last entry */
entry = MAX_KEY_TABLE - 1;
key->hw_key_idx = entry;
+ /* fall through */
case VNT_KEY_ALLGROUP:
key_mode |= VNT_KEY_ALLGROUP;
if (onfly_latch)
key_mode |= VNT_KEY_ONFLY_ALL;
+ /* fall through */
case VNT_KEY_GROUP_ADDRESS:
key_mode |= mode;
+ /* fall through */
case VNT_KEY_GROUP:
key_mode |= (mode << 4);
key_mode |= VNT_KEY_GROUP;
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index c61422ea8846..4fd9cd64c6e8 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -382,11 +382,13 @@ void vnt_update_ifs(struct vnt_private *priv)
priv->difs -= 1;
break;
}
+ /* fall through */
case RF_AIROHA7230:
case RF_AL2230:
case RF_AL2230S:
if (priv->bb_type != BB_TYPE_11B)
break;
+ /* fall through */
case RF_RFMD2959:
case RF_VT3226:
case RF_VT3342A0:
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 421168b9a9ca..d69248a8c7b5 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -239,6 +239,7 @@ static struct completion hif_driver_comp;
static struct completion hif_wait_response;
static struct mutex hif_deinit_lock;
static struct timer_list periodic_rssi;
+static struct wilc_vif *periodic_rssi_vif;
u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
@@ -2273,7 +2274,7 @@ static int Handle_RemainOnChan(struct wilc_vif *vif,
ERRORHANDLER:
{
P2P_LISTEN_STATE = 1;
- hif_drv->remain_on_ch_timer.data = (unsigned long)vif;
+ hif_drv->remain_on_ch_timer_vif = vif;
mod_timer(&hif_drv->remain_on_ch_timer,
jiffies +
msecs_to_jiffies(pstrHostIfRemainOnChan->duration));
@@ -2361,11 +2362,13 @@ _done_:
return result;
}
-static void ListenTimerCB(unsigned long arg)
+static void ListenTimerCB(struct timer_list *t)
{
+ struct host_if_drv *hif_drv = from_timer(hif_drv, t,
+ remain_on_ch_timer);
+ struct wilc_vif *vif = hif_drv->remain_on_ch_timer_vif;
s32 result = 0;
struct host_if_msg msg;
- struct wilc_vif *vif = (struct wilc_vif *)arg;
del_timer(&vif->hif_drv->remain_on_ch_timer);
@@ -2418,9 +2421,9 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif,
pu8CurrByte = wid.val;
*pu8CurrByte++ = (strHostIfSetMulti->enabled & 0xFF);
- *pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 8) & 0xFF);
- *pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 16) & 0xFF);
- *pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 24) & 0xFF);
+ *pu8CurrByte++ = 0;
+ *pu8CurrByte++ = 0;
+ *pu8CurrByte++ = 0;
*pu8CurrByte++ = (strHostIfSetMulti->cnt & 0xFF);
*pu8CurrByte++ = ((strHostIfSetMulti->cnt >> 8) & 0xFF);
@@ -2644,9 +2647,10 @@ free_msg:
complete(&hif_thread_comp);
}
-static void TimerCB_Scan(unsigned long arg)
+static void TimerCB_Scan(struct timer_list *t)
{
- struct wilc_vif *vif = (struct wilc_vif *)arg;
+ struct host_if_drv *hif_drv = from_timer(hif_drv, t, scan_timer);
+ struct wilc_vif *vif = hif_drv->scan_timer_vif;
struct host_if_msg msg;
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -2656,9 +2660,11 @@ static void TimerCB_Scan(unsigned long arg)
wilc_enqueue_cmd(&msg);
}
-static void TimerCB_Connect(unsigned long arg)
+static void TimerCB_Connect(struct timer_list *t)
{
- struct wilc_vif *vif = (struct wilc_vif *)arg;
+ struct host_if_drv *hif_drv = from_timer(hif_drv, t,
+ connect_timer);
+ struct wilc_vif *vif = hif_drv->connect_timer_vif;
struct host_if_msg msg;
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3041,7 +3047,7 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
return -EFAULT;
}
- hif_drv->connect_timer.data = (unsigned long)vif;
+ hif_drv->connect_timer_vif = vif;
mod_timer(&hif_drv->connect_timer,
jiffies + msecs_to_jiffies(HOST_IF_CONNECT_TIMEOUT));
@@ -3284,7 +3290,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
return -EINVAL;
}
- hif_drv->scan_timer.data = (unsigned long)vif;
+ hif_drv->scan_timer_vif = vif;
mod_timer(&hif_drv->scan_timer,
jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
@@ -3310,9 +3316,9 @@ int wilc_hif_set_cfg(struct wilc_vif *vif,
return wilc_enqueue_cmd(&msg);
}
-static void GetPeriodicRSSI(unsigned long arg)
+static void GetPeriodicRSSI(struct timer_list *unused)
{
- struct wilc_vif *vif = (struct wilc_vif *)arg;
+ struct wilc_vif *vif = periodic_rssi_vif;
if (!vif->hif_drv) {
netdev_err(vif->ndev, "Driver handler is NULL\n");
@@ -3322,7 +3328,6 @@ static void GetPeriodicRSSI(unsigned long arg)
if (vif->hif_drv->hif_state == HOST_IF_CONNECTED)
wilc_get_statistics(vif, &vif->wilc->dummy_statistics);
- periodic_rssi.data = (unsigned long)vif;
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
}
@@ -3375,14 +3380,14 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
goto _fail_;
}
- setup_timer(&periodic_rssi, GetPeriodicRSSI,
- (unsigned long)vif);
+ periodic_rssi_vif = vif;
+ timer_setup(&periodic_rssi, GetPeriodicRSSI, 0);
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
}
- setup_timer(&hif_drv->scan_timer, TimerCB_Scan, 0);
- setup_timer(&hif_drv->connect_timer, TimerCB_Connect, 0);
- setup_timer(&hif_drv->remain_on_ch_timer, ListenTimerCB, 0);
+ timer_setup(&hif_drv->scan_timer, TimerCB_Scan, 0);
+ timer_setup(&hif_drv->connect_timer, TimerCB_Connect, 0);
+ timer_setup(&hif_drv->remain_on_ch_timer, ListenTimerCB, 0);
mutex_init(&hif_drv->cfg_values_lock);
mutex_lock(&hif_drv->cfg_values_lock);
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 44a2f4b595c5..aa914d69ab0d 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -280,8 +280,13 @@ struct host_if_drv {
struct completion comp_inactive_time;
struct timer_list scan_timer;
+ struct wilc_vif *scan_timer_vif;
+
struct timer_list connect_timer;
+ struct wilc_vif *connect_timer_vif;
+
struct timer_list remain_on_ch_timer;
+ struct wilc_vif *remain_on_ch_timer_vif;
bool IFC_UP;
int driver_handler_id;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index de6ff59e3e65..8a275996d4e6 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -1110,7 +1110,6 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
g_key_gtk_params.key = NULL;
kfree(g_key_gtk_params.seq);
g_key_gtk_params.seq = NULL;
-
}
if (key_index >= 0 && key_index <= 3) {
@@ -1616,7 +1615,7 @@ static int mgmt_tx(struct wiphy *wiphy,
*cookie = (unsigned long)buf;
priv->u64tx_cookie = *cookie;
- mgmt = (const struct ieee80211_mgmt *) buf;
+ mgmt = (const struct ieee80211_mgmt *)buf;
if (ieee80211_is_mgmt(mgmt->frame_control)) {
mgmt_tx = kmalloc(sizeof(struct p2p_mgmt_data), GFP_KERNEL);
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 9addef1f1e12..f49dfa82f1b8 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid;
buffer_offset = ETH_ETHERNET_HDR_OFFSET;
- memcpy(&txb[offset + 4], bssid, 6);
+ memcpy(&txb[offset + 8], bssid, 6);
} else {
buffer_offset = HOST_HDR_OFFSET;
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index d1e8218f96fb..197f5a914e8f 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -184,11 +184,11 @@ static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
static void hfa384x_usbctlxq_run(struct hfa384x *hw);
-static void hfa384x_usbctlx_reqtimerfn(unsigned long data);
+static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t);
-static void hfa384x_usbctlx_resptimerfn(unsigned long data);
+static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
-static void hfa384x_usb_throttlefn(unsigned long data);
+static void hfa384x_usb_throttlefn(struct timer_list *t);
static void hfa384x_usbctlx_completion_task(unsigned long data);
@@ -558,13 +558,11 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
INIT_WORK(&hw->link_bh, prism2sta_processing_defer);
INIT_WORK(&hw->usb_work, hfa384x_usb_defer);
- setup_timer(&hw->throttle, hfa384x_usb_throttlefn, (unsigned long)hw);
+ timer_setup(&hw->throttle, hfa384x_usb_throttlefn, 0);
- setup_timer(&hw->resptimer, hfa384x_usbctlx_resptimerfn,
- (unsigned long)hw);
+ timer_setup(&hw->resptimer, hfa384x_usbctlx_resptimerfn, 0);
- setup_timer(&hw->reqtimer, hfa384x_usbctlx_reqtimerfn,
- (unsigned long)hw);
+ timer_setup(&hw->reqtimer, hfa384x_usbctlx_reqtimerfn, 0);
usb_init_urb(&hw->rx_urb);
usb_init_urb(&hw->tx_urb);
@@ -574,8 +572,7 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
hw->state = HFA384x_STATE_INIT;
INIT_WORK(&hw->commsqual_bh, prism2sta_commsqual_defer);
- setup_timer(&hw->commsqual_timer, prism2sta_commsqual_timer,
- (unsigned long)hw);
+ timer_setup(&hw->commsqual_timer, prism2sta_commsqual_timer, 0);
}
/*----------------------------------------------------------------
@@ -2460,7 +2457,7 @@ int hfa384x_drvr_start(struct hfa384x *hw)
* ok
*/
result =
- usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in, &status);
+ usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in, &status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk in endpoint status.\n");
goto done;
@@ -2469,7 +2466,7 @@ int hfa384x_drvr_start(struct hfa384x *hw)
netdev_err(hw->wlandev->netdev, "Failed to reset bulk in endpoint.\n");
result =
- usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out, &status);
+ usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out, &status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk out endpoint status.\n");
goto done;
@@ -3800,9 +3797,9 @@ delresp:
* interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
+static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, reqtimer);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3859,9 +3856,9 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
* interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_resptimerfn(unsigned long data)
+static void hfa384x_usbctlx_resptimerfn(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, resptimer);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3899,9 +3896,9 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
* Interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usb_throttlefn(unsigned long data)
+static void hfa384x_usb_throttlefn(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, throttle);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index c4aa9e7e7003..72070593394a 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -394,8 +394,9 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
count = HFA384x_SCANRESULT_MAX;
if (req->bssindex.data >= count) {
- pr_debug("requested index (%d) out of range (%d)\n",
- req->bssindex.data, count);
+ netdev_dbg(wlandev->netdev,
+ "requested index (%d) out of range (%d)\n",
+ req->bssindex.data, count);
result = 2;
req->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
goto exit;
@@ -684,7 +685,8 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
goto done;
failed:
- pr_debug("Failed to set a config option, result=%d\n", result);
+ netdev_dbg(wlandev->netdev,
+ "Failed to set a config option, result=%d\n", result);
msg->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
done:
@@ -1120,15 +1122,17 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Disable monitor mode */
result = hfa384x_cmd_monitor(hw, HFA384x_MONITOR_DISABLE);
if (result) {
- pr_debug("failed to disable monitor mode, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to disable monitor mode, result=%d\n",
+ result);
goto failed;
}
/* Disable port 0 */
result = hfa384x_drvr_disable(hw, 0);
if (result) {
- pr_debug
- ("failed to disable port 0 after sniffing, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to disable port 0 after sniffing, result=%d\n",
result);
goto failed;
}
@@ -1140,8 +1144,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFWEPFLAGS,
hw->presniff_wepflags);
if (result) {
- pr_debug
- ("failed to restore wepflags=0x%04x, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to restore wepflags=0x%04x, result=%d\n",
hw->presniff_wepflags, result);
goto failed;
}
@@ -1153,8 +1158,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFPORTTYPE,
word);
if (result) {
- pr_debug
- ("failed to restore porttype, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to restore porttype, result=%d\n",
result);
goto failed;
}
@@ -1162,8 +1168,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Enable the port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
- pr_debug("failed to enable port to presniff setting, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to enable port to presniff setting, result=%d\n",
+ result);
goto failed;
}
} else {
@@ -1182,8 +1189,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFPORTTYPE,
&(hw->presniff_port_type));
if (result) {
- pr_debug
- ("failed to read porttype, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to read porttype, result=%d\n",
result);
goto failed;
}
@@ -1192,24 +1200,27 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFWEPFLAGS,
&(hw->presniff_wepflags));
if (result) {
- pr_debug
- ("failed to read wepflags, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to read wepflags, result=%d\n",
result);
goto failed;
}
hfa384x_drvr_stop(hw);
result = hfa384x_drvr_start(hw);
if (result) {
- pr_debug("failed to restart the card for sniffing, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to restart the card for sniffing, result=%d\n",
+ result);
goto failed;
}
} else {
/* Disable the port */
result = hfa384x_drvr_disable(hw, 0);
if (result) {
- pr_debug("failed to enable port for sniffing, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to enable port for sniffing, result=%d\n",
+ result);
goto failed;
}
}
@@ -1225,8 +1236,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
hw->sniff_channel = word;
if (result) {
- pr_debug("failed to set channel %d, result=%d\n",
- word, result);
+ netdev_dbg(wlandev->netdev,
+ "failed to set channel %d, result=%d\n",
+ word, result);
goto failed;
}
@@ -1238,8 +1250,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFPORTTYPE,
word);
if (result) {
- pr_debug
- ("failed to set porttype %d, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to set porttype %d, result=%d\n",
word, result);
goto failed;
}
@@ -1257,8 +1270,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
}
if (result) {
- pr_debug
- ("failed to set wepflags=0x%04x, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to set wepflags=0x%04x, result=%d\n",
word, result);
goto failed;
}
@@ -1283,16 +1297,18 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Enable the port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
- pr_debug
- ("failed to enable port for sniffing, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to enable port for sniffing, result=%d\n",
result);
goto failed;
}
/* Enable monitor mode */
result = hfa384x_cmd_monitor(hw, HFA384x_MONITOR_ENABLE);
if (result) {
- pr_debug("failed to enable monitor mode, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to enable monitor mode, result=%d\n",
+ result);
goto failed;
}
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 88b979ff68b3..c062418f1202 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -109,7 +109,7 @@ int prism2mgmt_get_grpaddr_index(u32 did);
void prism2sta_processing_defer(struct work_struct *data);
void prism2sta_commsqual_defer(struct work_struct *data);
-void prism2sta_commsqual_timer(unsigned long data);
+void prism2sta_commsqual_timer(struct timer_list *t);
/* Interface callback functions, passing data back up to the cfg80211 layer */
void prism2_connect_result(struct wlandevice *wlandev, u8 failed);
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index c9df45063ab3..99316b9a4e49 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -1447,7 +1447,7 @@ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
{
struct hfa384x *hw = wlandev->priv;
- hw->link_status_new = inf->info.linkstatus.linkstatus;
+ hw->link_status_new = le16_to_cpu(inf->info.linkstatus.linkstatus);
schedule_work(&hw->link_bh);
}
@@ -2004,9 +2004,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
mod_timer(&hw->commsqual_timer, jiffies + HZ);
}
-void prism2sta_commsqual_timer(unsigned long data)
+void prism2sta_commsqual_timer(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, commsqual_timer);
schedule_work(&hw->commsqual_bh);
}
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index b450c740f626..b813f1d460ce 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1697,7 +1697,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (XGIfb_get_dram_size(xgifb_info)) {
xgifb_info->video_size = min_t(unsigned long, video_size_max,
- SZ_16M);
+ SZ_16M);
} else if (xgifb_info->video_size > video_size_max) {
xgifb_info->video_size = video_size_max;
}
@@ -1736,7 +1736,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_info->pjVideoMemoryAddress =
ioremap_wc(xgifb_info->video_base, xgifb_info->video_size);
xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
- xgifb_info->mmio_size);
+ xgifb_info->mmio_size);
dev_info(&pdev->dev,
"Framebuffer at 0x%llx, mapped to 0x%p, size %dk\n",
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index da07ca57bb40..e9d930f150cb 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -5047,7 +5047,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- const u8 LCDARefreshIndex[] = {
+ static const u8 LCDARefreshIndex[] = {
0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
unsigned short RefreshRateTableIndex, i, index, temp;
@@ -5480,8 +5480,9 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
ModeIdIndex))
return 0;
- pVBInfo->ModeType = XGI330_EModeIDTable[ModeIdIndex].
- Ext_ModeFlag & ModeTypeMask;
+ pVBInfo->ModeType =
+ XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag
+ & ModeTypeMask;
pVBInfo->SetFlag = 0;
pVBInfo->VBInfo = DisableCRT2Display;
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 411cb266a47d..df0a39811dc2 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -187,7 +187,7 @@ static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_instance_cit = {
+const struct config_item_type iscsi_stat_instance_cit = {
.ct_attrs = iscsi_stat_instance_attrs,
.ct_owner = THIS_MODULE,
};
@@ -249,7 +249,7 @@ static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_sess_err_cit = {
+const struct config_item_type iscsi_stat_sess_err_cit = {
.ct_attrs = iscsi_stat_sess_err_attrs,
.ct_owner = THIS_MODULE,
};
@@ -390,7 +390,7 @@ static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_tgt_attr_cit = {
+const struct config_item_type iscsi_stat_tgt_attr_cit = {
.ct_attrs = iscsi_stat_tgt_attr_attrs,
.ct_owner = THIS_MODULE,
};
@@ -522,7 +522,7 @@ static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_login_cit = {
+const struct config_item_type iscsi_stat_login_cit = {
.ct_attrs = iscsi_stat_login_stats_attrs,
.ct_owner = THIS_MODULE,
};
@@ -579,7 +579,7 @@ static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_logout_cit = {
+const struct config_item_type iscsi_stat_logout_cit = {
.ct_attrs = iscsi_stat_logout_stats_attrs,
.ct_owner = THIS_MODULE,
};
@@ -801,7 +801,7 @@ static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_sess_cit = {
+const struct config_item_type iscsi_stat_sess_cit = {
.ct_attrs = iscsi_stat_sess_stats_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 7e87d952bb7a..bd87cc26c6e5 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -307,7 +307,7 @@ static struct configfs_attribute *target_core_fabric_item_attrs[] = {
/*
* Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
*/
-static struct config_item_type target_core_fabrics_item = {
+static const struct config_item_type target_core_fabrics_item = {
.ct_group_ops = &target_core_fabric_group_ops,
.ct_attrs = target_core_fabric_item_attrs,
.ct_owner = THIS_MODULE,
@@ -2376,7 +2376,7 @@ static struct configfs_item_operations target_core_alua_lu_gp_ops = {
.release = target_core_alua_lu_gp_release,
};
-static struct config_item_type target_core_alua_lu_gp_cit = {
+static const struct config_item_type target_core_alua_lu_gp_cit = {
.ct_item_ops = &target_core_alua_lu_gp_ops,
.ct_attrs = target_core_alua_lu_gp_attrs,
.ct_owner = THIS_MODULE,
@@ -2434,7 +2434,7 @@ static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
.drop_item = &target_core_alua_drop_lu_gp,
};
-static struct config_item_type target_core_alua_lu_gps_cit = {
+static const struct config_item_type target_core_alua_lu_gps_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_alua_lu_gps_group_ops,
.ct_owner = THIS_MODULE,
@@ -2813,7 +2813,7 @@ static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
.release = target_core_alua_tg_pt_gp_release,
};
-static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
.ct_item_ops = &target_core_alua_tg_pt_gp_ops,
.ct_attrs = target_core_alua_tg_pt_gp_attrs,
.ct_owner = THIS_MODULE,
@@ -2884,7 +2884,7 @@ TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NU
* core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
* target_core_alua_cit in target_core_init_configfs() below.
*/
-static struct config_item_type target_core_alua_cit = {
+static const struct config_item_type target_core_alua_cit = {
.ct_item_ops = NULL,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
@@ -3105,7 +3105,7 @@ static struct configfs_item_operations target_core_hba_item_ops = {
.release = target_core_hba_release,
};
-static struct config_item_type target_core_hba_cit = {
+static const struct config_item_type target_core_hba_cit = {
.ct_item_ops = &target_core_hba_item_ops,
.ct_group_ops = &target_core_hba_group_ops,
.ct_attrs = target_core_hba_attrs,
@@ -3188,7 +3188,7 @@ static struct configfs_group_operations target_core_group_ops = {
.drop_item = target_core_call_delhbafromtarget,
};
-static struct config_item_type target_core_cit = {
+static const struct config_item_type target_core_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_group_ops,
.ct_attrs = NULL,
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 8038255b21e8..f0db91ebd735 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -96,7 +96,7 @@ static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_dev_cit = {
+static const struct config_item_type target_stat_scsi_dev_cit = {
.ct_attrs = target_stat_scsi_dev_attrs,
.ct_owner = THIS_MODULE,
};
@@ -193,7 +193,7 @@ static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_tgt_dev_cit = {
+static const struct config_item_type target_stat_scsi_tgt_dev_cit = {
.ct_attrs = target_stat_scsi_tgt_dev_attrs,
.ct_owner = THIS_MODULE,
};
@@ -414,7 +414,7 @@ static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_lu_cit = {
+static const struct config_item_type target_stat_scsi_lu_cit = {
.ct_attrs = target_stat_scsi_lu_attrs,
.ct_owner = THIS_MODULE,
};
@@ -540,7 +540,7 @@ static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_port_cit = {
+static const struct config_item_type target_stat_scsi_port_cit = {
.ct_attrs = target_stat_scsi_port_attrs,
.ct_owner = THIS_MODULE,
};
@@ -724,7 +724,7 @@ static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_tgt_port_cit = {
+static const struct config_item_type target_stat_scsi_tgt_port_cit = {
.ct_attrs = target_stat_scsi_tgt_port_attrs,
.ct_owner = THIS_MODULE,
};
@@ -844,7 +844,7 @@ static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_transport_cit = {
+static const struct config_item_type target_stat_scsi_transport_cit = {
.ct_attrs = target_stat_scsi_transport_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1206,7 +1206,7 @@ static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_auth_intr_cit = {
+static const struct config_item_type target_stat_scsi_auth_intr_cit = {
.ct_attrs = target_stat_scsi_auth_intr_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1378,7 +1378,7 @@ static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_att_intr_port_cit = {
+static const struct config_item_type target_stat_scsi_att_intr_port_cit = {
.ct_attrs = target_stat_scsi_ath_intr_port_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 942d094269fb..9469695f5871 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
mb = udev->mb_addr;
tcmu_flush_dcache_range(mb, sizeof(*mb));
- while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
+ while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
struct tcmu_cmd *cmd;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index d674e06767a5..1424581fd9af 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -225,6 +225,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
tb_port_info(up_port,
"PCIe tunnel activation failed, aborting\n");
tb_pci_free(tunnel);
+ continue;
}
list_add(&tunnel->list, &tcm->tunnel_list);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 9820e20993db..32d7ce430b02 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial driver for the amiga builtin port.
*
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index ce24182f8514..c369bf27a67b 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TTY over Blackfin JTAG Communication
*
* Copyright 2008-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#define DRV_NAME "bfin-jtag-comm"
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index dac8a1a8e4ac..5d442469c95e 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#undef BLOCKMOVE
#define Z_WAKE
#undef Z_EXT_CHARS_IN_BUFFER
@@ -286,8 +287,7 @@ static long cyz_polling_cycle = CZ_DEF_POLL;
static DEFINE_TIMER(cyz_timerlist, cyz_poll);
#else /* CONFIG_CYZ_INTR */
-static void cyz_rx_restart(unsigned long);
-static struct timer_list cyz_rx_full_timer[NR_PORTS];
+static void cyz_rx_restart(struct timer_list *);
#endif /* CONFIG_CYZ_INTR */
static void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val)
@@ -992,10 +992,8 @@ static void cyz_handle_rx(struct cyclades_port *info)
else
char_count = rx_put - rx_get + rx_bufsize;
if (char_count >= readl(&buf_ctrl->rx_threshold) &&
- !timer_pending(&cyz_rx_full_timer[
- info->line]))
- mod_timer(&cyz_rx_full_timer[info->line],
- jiffies + 1);
+ !timer_pending(&info->rx_full_timer))
+ mod_timer(&info->rx_full_timer, jiffies + 1);
#endif
info->idle_stats.recv_idle = jiffies;
tty_schedule_flip(&info->port);
@@ -1197,9 +1195,9 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
} /* cyz_interrupt */
-static void cyz_rx_restart(unsigned long arg)
+static void cyz_rx_restart(struct timer_list *t)
{
- struct cyclades_port *info = (struct cyclades_port *)arg;
+ struct cyclades_port *info = from_timer(info, t, rx_full_timer);
struct cyclades_card *card = info->card;
int retval;
__u32 channel = info->line - card->first_line;
@@ -3097,8 +3095,7 @@ static int cy_init_card(struct cyclades_card *cinfo)
else
info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
#ifdef CONFIG_CYZ_INTR
- setup_timer(&cyz_rx_full_timer[port],
- cyz_rx_restart, (unsigned long)info);
+ timer_setup(&info->rx_full_timer, cyz_rx_restart, 0);
#endif
} else {
unsigned short chip_number;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index a1c7125cb968..47ac56817c43 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* ePAPR hypervisor byte channel device driver
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*
* Author: Timur Tabi <timur@freescale.com>
*
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
* This driver support three distinct interfaces, all of which are related to
* ePAPR hypervisor byte channels.
*
@@ -328,7 +325,7 @@ console_initcall(ehv_bc_console_init);
/******************************** TTY DRIVER ********************************/
/*
- * byte channel receive interupt handler
+ * byte channel receive interrupt handler
*
* This ISR is called whenever data is available on a byte channel.
*/
@@ -428,7 +425,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
}
/*
- * byte channel transmit interupt handler
+ * byte channel transmit interrupt handler
*
* This ISR is called whenever space becomes available for transmitting
* characters on a byte channel.
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 381e981dee06..7f657bb5113c 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2017 Imagination Technologies Ltd.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_bfin_jtag.c b/drivers/tty/hvc/hvc_bfin_jtag.c
index 31d6cc6a77af..dd7cae4c195b 100644
--- a/drivers/tty/hvc/hvc_bfin_jtag.c
+++ b/drivers/tty/hvc/hvc_bfin_jtag.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Console via Blackfin JTAG Communication
*
* Copyright 2008-2011 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index a8d399188242..7709fcc707f4 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
* Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
@@ -6,20 +7,6 @@
*
* Additional Author(s):
* Ryan S. Arnold <rsa@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 798c48d0d32c..ea63090e013f 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* hvc_console.h
* Copyright (C) 2005 IBM Corporation
@@ -8,20 +9,6 @@
* hvc_console header information:
* moved here from arch/powerpc/include/asm/hvconsole.h
* and drivers/char/hvc_console.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef HVC_CONSOLE_H
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 82f240fb98f0..02629a1f193d 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,14 +1,5 @@
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. */
#include <linux/init.h>
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 16331a90c1e8..2ed07ca6389e 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* opal driver interface to hvc_console.c
*
* Copyright 2011 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#undef DEBUG
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 08c87920b74a..e8b8c645482b 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IBM RTAS driver interface to hvc_console.c
*
@@ -11,20 +12,6 @@
*
* inspired by drivers/char/hvc_console.c
* written by Anton Blanchard and Paul Mackerras
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index 9da1e842bbe9..b517c0661abb 100644
--- a/drivers/tty/hvc/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
* Tilera TILE Processor hypervisor console
*/
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 9cf573d06a29..a4c9913f76a0 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* udbg interface to hvc_console.c
*
* (C) Copyright David Gibson, IBM Corporation 2008.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index a1d272ac82bb..59eaa620bf13 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* vio driver interface to hvc_console.c
*
@@ -14,20 +15,6 @@
* Additional Author(s):
* Ryan S. Arnold <rsa@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* TODO:
*
* - handle error in sending hvsi protocol packets
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 5e87e4866bcb..dc43fa96c3de 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* xen console driver interface to hvc_console.c
*
* (c) 2007 Gerd Hoffmann <kraxel@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 63c29fe9d21f..1db1d97e72e7 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IBM eServer Hypervisor Virtual Console Server Device Driver
* Copyright (C) 2003, 2004 IBM Corp.
* Ryan S. Arnold (rsa@us.ibm.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
*
* This is the device driver for the IBM Hypervisor Virtual Console Server,
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 2e578d6433af..66f95f758be0 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
index 655c7948261c..3475e841ef5c 100644
--- a/drivers/tty/ipwireless/main.c
+++ b/drivers/tty/ipwireless/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 40af32108ff5..ee7958ab269f 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Original driver code supplied by Multi-Tech
*
* Changes
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 25ccef2fe748..99eaed4b2dbc 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dashtty.c - tty driver for Dash channels interface.
*
* Copyright (C) 2007,2008,2012 Imagination Technologies
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
*/
#include <linux/atomic.h>
@@ -309,7 +305,7 @@ static int put_data(void *arg)
/*
* This gets called every DA_TTY_POLL and polls the channels for data
*/
-static void dashtty_timer(unsigned long ignored)
+static void dashtty_timer(struct timer_list *poll_timer)
{
int channel;
@@ -323,12 +319,12 @@ static void dashtty_timer(unsigned long ignored)
if (channel >= 0)
fetch_data(channel);
- mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
+ mod_timer(poll_timer, jiffies + DA_TTY_POLL);
}
static void add_poll_timer(struct timer_list *poll_timer)
{
- setup_pinned_timer(poll_timer, dashtty_timer, 0);
+ timer_setup(poll_timer, dashtty_timer, TIMER_PINNED);
poll_timer->expires = jiffies + DA_TTY_POLL;
/*
@@ -461,7 +457,7 @@ static void dashtty_hangup(struct tty_struct *tty)
* buffers. It is used to delay the expensive writeout until the writer has
* stopped writing.
*/
-static void dashtty_put_timer(unsigned long ignored)
+static void dashtty_put_timer(struct timer_list *unused)
{
if (atomic_read(&dashtty_xmit_cnt))
wake_up_interruptible(&dashtty_waitqueue);
@@ -603,7 +599,7 @@ static int __init dashtty_init(void)
complete(&dport->xmit_empty);
}
- setup_timer(&put_timer, dashtty_put_timer, 0);
+ timer_setup(&put_timer, dashtty_put_timer, 0);
init_waitqueue_head(&dashtty_waitqueue);
dashtty_thread = kthread_create(put_data, NULL, "ttyDA");
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a2dab3fb8751..4c1cd49ae95b 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TTY driver for MIPS EJTAG Fast Debug Channels.
*
* Copyright (C) 2007-2015 Imagination Technologies Ltd
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for more
- * details.
*/
#include <linux/atomic.h>
@@ -683,9 +680,9 @@ static irqreturn_t mips_ejtag_fdc_isr(int irq, void *dev_id)
* It simply triggers the common FDC handler code and arranges for further
* polling.
*/
-static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
+static void mips_ejtag_fdc_tty_timer(struct timer_list *t)
{
- struct mips_ejtag_fdc_tty *priv = (void *)opaque;
+ struct mips_ejtag_fdc_tty *priv = from_timer(priv, t, poll_timer);
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
@@ -1002,8 +999,8 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
raw_spin_unlock_irq(&priv->lock);
} else {
/* If we didn't get an usable IRQ, poll instead */
- setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
- (unsigned long)priv);
+ timer_setup(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+ TIMER_PINNED);
priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
/*
* Always attach the timer to the right CPU. The channels are
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 93d37655d928..65a70f3c7cde 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
* moxa.c -- MOXA Intellio family multiport serial driver.
@@ -7,11 +8,6 @@
*
* This code is loosely based on the Linux serial driver, written by
* Linus Torvalds, Theodore T'so and others.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 7dd38047ba23..8bc15cb67a58 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mxser.c -- MOXA Smartio/Industio family multiport serial driver.
*
@@ -8,11 +9,6 @@
* Linux serial driver, written by Linus Torvalds, Theodore T'so and
* others.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
* <alan@lxorguk.ukuu.org.uk>. The original 1.8 code is available on
* www.moxa.com.
@@ -642,8 +638,7 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
-static int mxser_change_speed(struct tty_struct *tty,
- struct ktermios *old_termios)
+static int mxser_change_speed(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval, fcr;
@@ -945,7 +940,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
/*
* and set the speed of the serial port
*/
- mxser_change_speed(tty, NULL);
+ mxser_change_speed(tty);
spin_unlock_irqrestore(&info->slock, flags);
return 0;
@@ -1288,7 +1283,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
if (tty_port_initialized(port)) {
if (flags != (port->flags & ASYNC_SPD_MASK)) {
spin_lock_irqsave(&info->slock, sl_flags);
- mxser_change_speed(tty, NULL);
+ mxser_change_speed(tty);
spin_unlock_irqrestore(&info->slock, sl_flags);
}
} else {
@@ -1946,7 +1941,7 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
- mxser_change_speed(tty, old_termios);
+ mxser_change_speed(tty);
spin_unlock_irqrestore(&info->slock, flags);
if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
@@ -2375,8 +2370,7 @@ static void mxser_release_ISA_res(struct mxser_board *brd)
mxser_release_vector(brd);
}
-static int mxser_initbrd(struct mxser_board *brd,
- struct pci_dev *pdev)
+static int mxser_initbrd(struct mxser_board *brd)
{
struct mxser_port *info;
unsigned int i;
@@ -2640,7 +2634,7 @@ static int mxser_probe(struct pci_dev *pdev,
}
/* mxser_initbrd will hook ISR. */
- retval = mxser_initbrd(brd, pdev);
+ retval = mxser_initbrd(brd);
if (retval)
goto err_rel3;
@@ -2746,7 +2740,7 @@ static int __init mxser_module_init(void)
brd->info->name, ioaddr[b]);
/* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(brd, NULL) < 0) {
+ if (mxser_initbrd(brd) < 0) {
mxser_release_ISA_res(brd);
brd->info = NULL;
continue;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 0a3c9665e015..3a39eb685c69 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_gsm.c GSM 0710 tty multiplexor
* Copyright (c) 2009/10 Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
* TO DO:
@@ -1646,9 +1634,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
}
skb_queue_head_init(&dlci->skb_list);
- init_timer(&dlci->t1);
- dlci->t1.function = gsm_dlci_t1;
- dlci->t1.data = (unsigned long)dlci;
+ setup_timer(&dlci->t1, gsm_dlci_t1, (unsigned long)dlci);
tty_port_init(&dlci->port);
dlci->port.ops = &gsm_port_ops;
dlci->gsm = gsm;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 7b2a466616d6..eea7b6cb3cc4 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* generic HDLC line discipline for Linux
*
* Written by Paul Fulghum paulkf@microgate.com
@@ -11,8 +12,6 @@
*
* Original release 01/11/99
*
- * This code is released under the GNU General Public License (GPL)
- *
* This module implements the tty line discipline N_HDLC for use with
* tty device drivers that support bit-synchronous HDLC communications.
*
diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
index d63261c36e42..96feabae4740 100644
--- a/drivers/tty/n_null.c
+++ b/drivers/tty/n_null.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/tty.h>
@@ -7,19 +8,6 @@
* n_null.c - Null line discipline used in the failure path
*
* Copyright (C) Intel 2017
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
static int n_null_open(struct tty_struct *tty)
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 305b6490d405..9f246d4db3ca 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* r3964 linediscipline for linux
*
* -----------------------------------------------------------
@@ -5,9 +6,6 @@
* Philips Automation Projects
* Kassel (Germany)
* -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
* Author:
* L. Haag
*
diff --git a/drivers/tty/n_tracerouter.c b/drivers/tty/n_tracerouter.c
index ac5716979bc1..4479af4d2fa5 100644
--- a/drivers/tty/n_tracerouter.c
+++ b/drivers/tty/n_tracerouter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_tracerouter.c - Trace data router through tty space
*
@@ -5,17 +6,6 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* This trace router uses the Linux line discipline framework to route
* trace data coming from a HW Modem to a PTI (Parallel Trace Module) port.
* The solution is not specific to a HW modem and this line disciple can
diff --git a/drivers/tty/n_tracesink.c b/drivers/tty/n_tracesink.c
index 4616870a6b1b..d96ba82cc356 100644
--- a/drivers/tty/n_tracesink.c
+++ b/drivers/tty/n_tracesink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_tracesink.c - Trace data router and sink path through tty space.
*
@@ -5,17 +6,6 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* The trace sink uses the Linux line discipline framework to receive
* trace data coming from the PTI source line discipline driver
* to a user-desired tty port, like USB.
diff --git a/drivers/tty/n_tracesink.h b/drivers/tty/n_tracesink.h
index a68bb44f1ef5..1b846330c855 100644
--- a/drivers/tty/n_tracesink.h
+++ b/drivers/tty/n_tracesink.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_tracesink.h - Kernel driver API to route trace data in kernel space.
*
@@ -5,17 +6,6 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* The PTI (Parallel Trace Interface) driver directs trace data routed from
* various parts in the system out through the Intel Penwell PTI port and
* out of the mobile device for analysis with a debugging tool
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index bdf0e6e89991..427e0d5d8f13 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* n_tty.c --- implements the N_TTY line discipline.
*
@@ -15,9 +16,6 @@
* This file also contains code originally written by Linus Torvalds,
* Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994.
*
- * This file may be redistributed under the terms of the GNU General Public
- * License.
- *
* Reduced memory usage for older ARM systems - Russell King.
*
* 2000/01/20 Fixed SMP locking on put_tty_queue using bits of
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 39b3723a32a6..b57b35066ebe 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
*
@@ -21,20 +22,6 @@
* Copyright (c) 2006 Option Wireless n/v
* All rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
* --------------------------------------------------------------------------
*/
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index aa695fda1084..f7dc9b1ea806 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* RocketPort device driver for Linux
*
* Written by Theodore Ts'o, 1995, 1996, 1997, 1998, 1999, 2000.
*
* Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2003 by Comtrol, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/tty/serdev/Kconfig b/drivers/tty/serdev/Kconfig
index cdc6b820cf93..1dbc8352e027 100644
--- a/drivers/tty/serdev/Kconfig
+++ b/drivers/tty/serdev/Kconfig
@@ -6,11 +6,19 @@ menuconfig SERIAL_DEV_BUS
help
Core support for devices connected via a serial port.
+ Note that you typically also want to enable TTY port controller support.
+
if SERIAL_DEV_BUS
config SERIAL_DEV_CTRL_TTYPORT
bool "Serial device TTY port controller"
+ help
+ Say Y here if you want to use the Serial device bus with common TTY
+ drivers (e.g. serial drivers).
+
+ If unsure, say Y.
depends on TTY
depends on SERIAL_DEV_BUS != m
+ default y
endif
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index c68fb3a8ea1c..1bef39828ca7 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -1,19 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
*
* Based on drivers/spmi/spmi.c:
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/kernel.h>
@@ -49,13 +42,22 @@ static const struct device_type serdev_ctrl_type = {
static int serdev_device_match(struct device *dev, struct device_driver *drv)
{
- /* TODO: ACPI and platform matching */
+ /* TODO: platform matching */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
return of_driver_match_device(dev, drv);
}
static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- /* TODO: ACPI and platform modalias */
+ int rc;
+
+ /* TODO: platform modalias */
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
return of_device_uevent_modalias(dev, env);
}
@@ -65,21 +67,32 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
*/
int serdev_device_add(struct serdev_device *serdev)
{
+ struct serdev_controller *ctrl = serdev->ctrl;
struct device *parent = serdev->dev.parent;
int err;
dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr);
+ /* Only a single slave device is currently supported. */
+ if (ctrl->serdev) {
+ dev_err(&serdev->dev, "controller busy\n");
+ return -EBUSY;
+ }
+ ctrl->serdev = serdev;
+
err = device_add(&serdev->dev);
if (err < 0) {
dev_err(&serdev->dev, "Can't add %s, status %d\n",
dev_name(&serdev->dev), err);
- goto err_device_add;
+ goto err_clear_serdev;
}
dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
-err_device_add:
+ return 0;
+
+err_clear_serdev:
+ ctrl->serdev = NULL;
return err;
}
EXPORT_SYMBOL_GPL(serdev_device_add);
@@ -90,7 +103,10 @@ EXPORT_SYMBOL_GPL(serdev_device_add);
*/
void serdev_device_remove(struct serdev_device *serdev)
{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
device_unregister(&serdev->dev);
+ ctrl->serdev = NULL;
}
EXPORT_SYMBOL_GPL(serdev_device_remove);
@@ -260,6 +276,12 @@ static int serdev_drv_remove(struct device *dev)
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+
return of_device_modalias(dev, buf, PAGE_SIZE);
}
DEVICE_ATTR_RO(modalias);
@@ -295,7 +317,6 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
return NULL;
serdev->ctrl = ctrl;
- ctrl->serdev = serdev;
device_initialize(&serdev->dev);
serdev->dev.parent = &ctrl->dev;
serdev->dev.bus = &serdev_bus_type;
@@ -329,26 +350,31 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
if (!ctrl)
return NULL;
- device_initialize(&ctrl->dev);
- ctrl->dev.type = &serdev_ctrl_type;
- ctrl->dev.bus = &serdev_bus_type;
- ctrl->dev.parent = parent;
- ctrl->dev.of_node = parent->of_node;
- serdev_controller_set_drvdata(ctrl, &ctrl[1]);
-
id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(parent,
"unable to allocate serdev controller identifier.\n");
- serdev_controller_put(ctrl);
- return NULL;
+ goto err_free;
}
ctrl->nr = id;
+
+ device_initialize(&ctrl->dev);
+ ctrl->dev.type = &serdev_ctrl_type;
+ ctrl->dev.bus = &serdev_bus_type;
+ ctrl->dev.parent = parent;
+ ctrl->dev.of_node = parent->of_node;
+ serdev_controller_set_drvdata(ctrl, &ctrl[1]);
+
dev_set_name(&ctrl->dev, "serial%d", id);
dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
return ctrl;
+
+err_free:
+ kfree(ctrl);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(serdev_controller_alloc);
@@ -385,6 +411,75 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
return 0;
}
+#ifdef CONFIG_ACPI
+static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
+ struct acpi_device *adev)
+{
+ struct serdev_device *serdev = NULL;
+ int err;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return AE_OK;
+
+ serdev = serdev_device_alloc(ctrl);
+ if (!serdev) {
+ dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
+ dev_name(&adev->dev));
+ return AE_NO_MEMORY;
+ }
+
+ ACPI_COMPANION_SET(&serdev->dev, adev);
+ acpi_device_set_enumerated(adev);
+
+ err = serdev_device_add(serdev);
+ if (err) {
+ dev_err(&serdev->dev,
+ "failure adding ACPI serdev device. status %d\n", err);
+ serdev_device_put(serdev);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct serdev_controller *ctrl = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ return acpi_serdev_register_device(ctrl, adev);
+}
+
+static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(ctrl->dev.parent);
+ if (!handle)
+ return -ENODEV;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_serdev_add_device, NULL, ctrl, NULL);
+ if (ACPI_FAILURE(status))
+ dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
+
+ if (!ctrl->serdev)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static inline int acpi_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ACPI */
+
/**
* serdev_controller_add() - Add an serdev controller
* @ctrl: controller to be registered.
@@ -394,7 +489,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
*/
int serdev_controller_add(struct serdev_controller *ctrl)
{
- int ret;
+ int ret_of, ret_acpi, ret;
/* Can't register until after driver model init */
if (WARN_ON(!is_registered))
@@ -404,9 +499,14 @@ int serdev_controller_add(struct serdev_controller *ctrl)
if (ret)
return ret;
- ret = of_serdev_register_devices(ctrl);
- if (ret)
+ ret_of = of_serdev_register_devices(ctrl);
+ ret_acpi = acpi_serdev_register_devices(ctrl);
+ if (ret_of && ret_acpi) {
+ dev_dbg(&ctrl->dev, "no devices registered: of:%d acpi:%d\n",
+ ret_of, ret_acpi);
+ ret = -ENODEV;
goto out_dev_del;
+ }
dev_dbg(&ctrl->dev, "serdev%d registered: dev:%p\n",
ctrl->nr, &ctrl->dev);
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 302018d67efa..ce7ad0acee7a 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/serdev.h>
@@ -96,16 +88,21 @@ static int ttyport_open(struct serdev_controller *ctrl)
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty;
struct ktermios ktermios;
+ int ret;
tty = tty_init_dev(serport->tty_drv, serport->tty_idx);
if (IS_ERR(tty))
return PTR_ERR(tty);
serport->tty = tty;
- if (tty->ops->open)
- tty->ops->open(serport->tty, NULL);
- else
- tty_port_open(serport->port, tty, NULL);
+ if (!tty->ops->open || !tty->ops->close) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = tty->ops->open(serport->tty, NULL);
+ if (ret)
+ goto err_close;
/* Bring the UART into a known 8 bits no parity hw fc state */
ktermios = tty->termios;
@@ -122,6 +119,14 @@ static int ttyport_open(struct serdev_controller *ctrl)
tty_unlock(serport->tty);
return 0;
+
+err_close:
+ tty->ops->close(tty, NULL);
+err_unlock:
+ tty_unlock(tty);
+ tty_release_struct(tty, serport->tty_idx);
+
+ return ret;
}
static void ttyport_close(struct serdev_controller *ctrl)
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 804632b4a929..32b3acf8150a 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the serial port on the 21285 StrongArm-110 core logic chip.
*
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index b2bdc35f7495..ebfb0bd5bef5 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for 8250/16550-type serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2001 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/serial_8250.h>
diff --git a/drivers/tty/serial/8250/8250_accent.c b/drivers/tty/serial/8250/8250_accent.c
index 522aeae05192..1691f1a57f89 100644
--- a/drivers/tty/serial/8250/8250_accent.c
+++ b/drivers/tty/serial/8250/8250_accent.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_acorn.c b/drivers/tty/serial/8250/8250_acorn.c
index 402dfdd4940e..758c4aa203ab 100644
--- a/drivers/tty/serial/8250/8250_acorn.c
+++ b/drivers/tty/serial/8250/8250_acorn.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/serial/acorn.c
*
* Copyright (C) 1996-2003 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 33a801353114..74a408d9db24 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Serial Port driver for Aspeed VUART device
*
* Copyright (C) 2016 Jeremy Kerr <jk@ozlabs.org>, IBM Corp.
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/module.h>
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index a23c7da42ea8..bd53661103eb 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial port driver for BCM2835AUX UART
*
@@ -5,11 +6,6 @@
*
* Based on 8250_lpc18xx.c:
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/8250_boca.c b/drivers/tty/serial/8250/8250_boca.c
index a63b5998e383..a9b97c034653 100644
--- a/drivers/tty/serial/8250/8250_boca.c
+++ b/drivers/tty/serial/8250/8250_boca.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index d29b512a7d9f..d64afdd93872 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Universal/legacy driver for 8250/16550-type serial ports
*
@@ -11,11 +12,6 @@
* userspace-configurable "phantom" ports
* "serial8250" platform devices
* serial8250_register_8250_port() ports
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
@@ -262,17 +258,17 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
* barely passable results for a 16550A. (Although at the expense
* of much CPU overhead).
*/
-static void serial8250_timeout(unsigned long data)
+static void serial8250_timeout(struct timer_list *t)
{
- struct uart_8250_port *up = (struct uart_8250_port *)data;
+ struct uart_8250_port *up = from_timer(up, t, timer);
up->port.handle_irq(&up->port);
mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
}
-static void serial8250_backup_timeout(unsigned long data)
+static void serial8250_backup_timeout(struct timer_list *t)
{
- struct uart_8250_port *up = (struct uart_8250_port *)data;
+ struct uart_8250_port *up = from_timer(up, t, timer);
unsigned int iir, ier = 0, lsr;
unsigned long flags;
@@ -329,8 +325,7 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
if (up->bugs & UART_BUG_THRE) {
pr_debug("ttyS%d - using backup timer\n", serial_index(port));
- up->timer.function = serial8250_backup_timeout;
- up->timer.data = (unsigned long)up;
+ up->timer.function = (TIMER_FUNC_TYPE)serial8250_backup_timeout;
mod_timer(&up->timer, jiffies +
uart_poll_timeout(port) + HZ / 5);
}
@@ -341,7 +336,6 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
* driver used to do this with IRQ0.
*/
if (!port->irq) {
- up->timer.data = (unsigned long)up;
mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
} else
retval = serial_link_irq_chain(up);
@@ -354,7 +348,7 @@ static void univ8250_release_irq(struct uart_8250_port *up)
struct uart_port *port = &up->port;
del_timer_sync(&up->timer);
- up->timer.function = serial8250_timeout;
+ up->timer.function = (TIMER_FUNC_TYPE)serial8250_timeout;
if (port->irq)
serial_unlink_irq_chain(up);
}
@@ -525,8 +519,7 @@ static void __init serial8250_isa_init_ports(void)
base_ops = port->ops;
port->ops = &univ8250_port_ops;
- init_timer(&up->timer);
- up->timer.function = serial8250_timeout;
+ timer_setup(&up->timer, serial8250_timeout, 0);
up->ops = &univ8250_driver_ops;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 26f17456b0d7..bfa1a857f3ff 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* 8250_dma.c - DMA Engine API support for 8250.c
*
* Copyright (C) 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 7e638997bfc2..5bb0c42c88dd 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Synopsys DesignWare 8250 driver.
*
* Copyright 2011 Picochip, Jamie Iles.
* Copyright 2013 Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* The Synopsys DesignWare 8250 has an extra feature whereby it detects if the
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
@@ -256,25 +252,31 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
+ unsigned int target_rate, min_rate, max_rate;
struct dw8250_data *d = p->private_data;
long rate;
- int ret;
+ int i, ret;
if (IS_ERR(d->clk) || !old)
goto out;
- clk_disable_unprepare(d->clk);
- rate = clk_round_rate(d->clk, baud * 16);
- if (rate < 0)
- ret = rate;
- else if (rate == 0)
- ret = -ENOENT;
- else
- ret = clk_set_rate(d->clk, rate);
- clk_prepare_enable(d->clk);
+ /* Find a clk rate within +/-1.6% of an integer multiple of baudx16 */
+ target_rate = baud * 16;
+ min_rate = target_rate - (target_rate >> 6);
+ max_rate = target_rate + (target_rate >> 6);
- if (!ret)
- p->uartclk = rate;
+ for (i = 1; i <= UART_DIV_MAX; i++) {
+ rate = clk_round_rate(d->clk, i * target_rate);
+ if (rate >= i * min_rate && rate <= i * max_rate)
+ break;
+ }
+ if (i <= UART_DIV_MAX) {
+ clk_disable_unprepare(d->clk);
+ ret = clk_set_rate(d->clk, rate);
+ clk_prepare_enable(d->clk);
+ if (!ret)
+ p->uartclk = rate;
+ }
out:
p->status &= ~UPSTAT_AUTOCTS;
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index af72ec32e404..362c25ff188a 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Early serial console for 8250/16550 devices
*
* (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Based on the 8250.c serial driver, Copyright (C) 2001 Russell King,
* and on early_printk.c by Andi Kleen.
*
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 0b6381214917..f6a86f2bc4e5 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Emma Mobile 8250 driver
*
* Copyright (C) 2012 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/device.h>
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index c55624703fdf..a402878c9f30 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe module for 8250/16550-type Exar chips PCI serial ports.
*
* Based on drivers/tty/serial/8250/8250_pci.c,
*
* Copyright (C) 2017 Sudip Mukherjee, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#include <linux/acpi.h>
#include <linux/dmi.h>
diff --git a/drivers/tty/serial/8250/8250_exar_st16c554.c b/drivers/tty/serial/8250/8250_exar_st16c554.c
index 3a7cb8262bb9..933811ebfaac 100644
--- a/drivers/tty/serial/8250/8250_exar_st16c554.c
+++ b/drivers/tty/serial/8250/8250_exar_st16c554.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com >
* Based on 8250_boca.
*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index e500f7dd2470..79a4958b3f5c 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe for F81216A LPC to 4 UART
*
* Copyright (C) 2014-2016 Ricardo Ribalda, Qtechnology A/S
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/pci.h>
@@ -40,6 +36,16 @@
#define IRQ_LEVEL_LOW 0
#define IRQ_EDGE_HIGH BIT(5)
+/*
+ * F81216H clock source register, the value and mask is the same with F81866,
+ * but it's on F0h.
+ *
+ * Clock speeds for UART (register F0h)
+ * 00: 1.8432MHz.
+ * 01: 18.432MHz.
+ * 10: 24MHz.
+ * 11: 14.769MHz.
+ */
#define RS485 0xF0
#define RTS_INVERT BIT(5)
#define RS485_URA BIT(4)
@@ -118,6 +124,9 @@ static int fintek_8250_enter_key(u16 base_port, u8 key)
if (!request_muxed_region(base_port, 2, "8250_fintek"))
return -EBUSY;
+ /* Force to deactive all SuperIO in this base_port */
+ outb(EXIT_KEY, base_port + ADDR_PORT);
+
outb(key, base_port + ADDR_PORT);
outb(key, base_port + ADDR_PORT);
return 0;
@@ -188,14 +197,27 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!pdata)
return -EINVAL;
- if (rs485->flags & SER_RS485_ENABLED)
+ /* Hardware do not support same RTS level on send and receive */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
memset(rs485->padding, 0, sizeof(rs485->padding));
- else
+ config |= RS485_URA;
+ } else {
memset(rs485, 0, sizeof(*rs485));
+ }
rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
SER_RS485_RTS_AFTER_SEND;
+ /* Only the first port supports delays */
+ if (pdata->index) {
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+ }
+
if (rs485->delay_rts_before_send) {
rs485->delay_rts_before_send = 1;
config |= TXW4C_IRA;
@@ -206,12 +228,6 @@ static int fintek_8250_rs485_config(struct uart_port *port,
config |= RXW4C_IRA;
}
- if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
- (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
- rs485->flags &= SER_RS485_ENABLED;
- else
- config |= RS485_URA;
-
if (rs485->flags & SER_RS485_RTS_ON_SEND)
config |= RTS_INVERT;
@@ -280,13 +296,91 @@ static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
F81866_UART_CLK_MASK,
F81866_UART_CLK_14_769MHZ);
- uart->port.uartclk = 921600 * 16;
+ uart->port.uartclk = 921600 * 16;
break;
default: /* leave clock speed untouched */
break;
}
}
+void fintek_8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct fintek_8250 *pdata = port->private_data;
+ unsigned int baud = tty_termios_baud_rate(termios);
+ int i;
+ u8 reg;
+ static u32 baudrate_table[] = {115200, 921600, 1152000, 1500000};
+ static u8 clock_table[] = { F81866_UART_CLK_1_8432MHZ,
+ F81866_UART_CLK_14_769MHZ, F81866_UART_CLK_18_432MHZ,
+ F81866_UART_CLK_24MHZ };
+
+ /*
+ * We'll use serial8250_do_set_termios() for baud = 0, otherwise It'll
+ * crash on baudrate_table[i] % baud with "division by zero".
+ */
+ if (!baud)
+ goto exit;
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81216H:
+ reg = RS485;
+ break;
+ case CHIP_ID_F81866:
+ reg = F81866_UART_CLK;
+ break;
+ default:
+ /* Don't change clocksource with unknown PID */
+ dev_warn(port->dev,
+ "%s: pid: %x Not support. use default set_termios.\n",
+ __func__, pdata->pid);
+ goto exit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(baudrate_table); ++i) {
+ if (baud > baudrate_table[i] || baudrate_table[i] % baud != 0)
+ continue;
+
+ if (port->uartclk == baudrate_table[i] * 16)
+ break;
+
+ if (fintek_8250_enter_key(pdata->base_port, pdata->key))
+ continue;
+
+ port->uartclk = baudrate_table[i] * 16;
+
+ sio_write_reg(pdata, LDN, pdata->index);
+ sio_write_mask_reg(pdata, reg, F81866_UART_CLK_MASK,
+ clock_table[i]);
+
+ fintek_8250_exit_key(pdata->base_port);
+ break;
+ }
+
+ if (i == ARRAY_SIZE(baudrate_table)) {
+ baud = tty_termios_baud_rate(old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+
+exit:
+ serial8250_do_set_termios(port, termios, old);
+}
+
+static void fintek_8250_set_termios_handler(struct uart_8250_port *uart)
+{
+ struct fintek_8250 *pdata = uart->port.private_data;
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81866:
+ uart->port.set_termios = fintek_8250_set_termios;
+ break;
+
+ default:
+ break;
+ }
+}
+
static int probe_setup_port(struct fintek_8250 *pdata,
struct uart_8250_port *uart)
{
@@ -373,6 +467,7 @@ int fintek_8250_probe(struct uart_8250_port *uart)
memcpy(pdata, &probe_data, sizeof(probe_data));
uart->port.private_data = pdata;
fintek_8250_set_rs485_handler(uart);
+ fintek_8250_set_termios_handler(uart);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_fourport.c b/drivers/tty/serial/8250/8250_fourport.c
index 4045180a8cfc..3215b9b7afde 100644
--- a/drivers/tty/serial/8250/8250_fourport.c
+++ b/drivers/tty/serial/8250/8250_fourport.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 910bfee5a88b..6640a4c7ddd1 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
@@ -6,10 +7,6 @@
/*
* Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This isn't a full driver; it just provides an alternate IRQ
* handler to deal with an errata. Everything else is just
* using the bog standard 8250 support.
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index df2931e1e086..0809ae2aa9b1 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Serial Device Initialisation for Lasi/Asp/Wax/Dino
*
* (c) Copyright Matthew Wilcox <willy@debian.org> 2001-2002
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/errno.h>
diff --git a/drivers/tty/serial/8250/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c
index 115190b7962a..3012ea03d22c 100644
--- a/drivers/tty/serial/8250/8250_hp300.c
+++ b/drivers/tty/serial/8250/8250_hp300.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the 98626/98644/internal serial interface on hp300/hp400
* (based on the National Semiconductor INS8250/NS16550AF/WD16C552 UARTs)
diff --git a/drivers/tty/serial/8250/8250_hub6.c b/drivers/tty/serial/8250/8250_hub6.c
index 27124e21eb96..273f59b9bca5 100644
--- a/drivers/tty/serial/8250/8250_hub6.c
+++ b/drivers/tty/serial/8250/8250_hub6.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 464389b28900..6af84900870e 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2015 Imagination Technologies
*
* Ingenic SoC UART support
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 99cd478851ff..eddf119374e1 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial port driver for NXP LPC18xx/43xx UART
*
@@ -6,11 +7,6 @@
* Based on 8250_mtk.c:
* Copyright (c) 2014 MundoReader S.L.
* Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 7dddd7e6a01c..98dbc796353f 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250_lpss.c - Driver for UART on Intel Braswell and various other Intel SoCs
*
* Copyright (C) 2016 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index ec957cce8c9a..efa0515139f8 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250_mid.c - Driver for UART on Intel Penwell and various other Intel SOCs
*
* Copyright (C) 2015 Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -23,10 +20,11 @@
#define PCI_DEVICE_ID_INTEL_PNW_UART2 0x081c
#define PCI_DEVICE_ID_INTEL_PNW_UART3 0x081d
#define PCI_DEVICE_ID_INTEL_TNG_UART 0x1191
+#define PCI_DEVICE_ID_INTEL_CDF_UART 0x18d8
#define PCI_DEVICE_ID_INTEL_DNV_UART 0x19d8
/* Intel MID Specific registers */
-#define INTEL_MID_UART_DNV_FISR 0x08
+#define INTEL_MID_UART_FISR 0x08
#define INTEL_MID_UART_PS 0x30
#define INTEL_MID_UART_MUL 0x34
#define INTEL_MID_UART_DIV 0x38
@@ -130,7 +128,7 @@ static int dnv_handle_irq(struct uart_port *p)
{
struct mid8250 *mid = p->private_data;
struct uart_8250_port *up = up_to_u8250p(p);
- unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
+ unsigned int fisr = serial_port_in(p, INTEL_MID_UART_FISR);
u32 status;
int ret = 0;
int err;
@@ -377,6 +375,7 @@ static const struct pci_device_id pci_ids[] = {
MID_DEVICE(PCI_DEVICE_ID_INTEL_PNW_UART2, pnw_board),
MID_DEVICE(PCI_DEVICE_ID_INTEL_PNW_UART3, pnw_board),
MID_DEVICE(PCI_DEVICE_ID_INTEL_TNG_UART, tng_board),
+ MID_DEVICE(PCI_DEVICE_ID_INTEL_CDF_UART, dnv_board),
MID_DEVICE(PCI_DEVICE_ID_INTEL_DNV_UART, dnv_board),
{ },
};
diff --git a/drivers/tty/serial/8250/8250_moxa.c b/drivers/tty/serial/8250/8250_moxa.c
index d5069b2d4d79..1ee4cd94d4fa 100644
--- a/drivers/tty/serial/8250/8250_moxa.c
+++ b/drivers/tty/serial/8250/8250_moxa.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250_moxa.c - MOXA Smartio/Industio MUE multiport serial driver.
*
* Author: Mathieu OTHACEHE <m.othacehe@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index fb45770d47aa..dd5e1cede2b5 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Mediatek 8250 driver.
*
* Copyright (c) 2014 MundoReader S.L.
* Author: Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/io.h>
@@ -61,7 +52,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
* registers to their default values.
*/
baud = uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / 0xffff,
+ port->uartclk / 16 / UART_DIV_MAX,
port->uartclk);
if (baud <= 115200) {
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 1222c005fb98..1e67a7e4a5fd 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Serial Port driver for Open Firmware platform devices
*
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#include <linux/console.h>
#include <linux/module.h>
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 833771bca0a5..bd40ba402410 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250-core based driver for the OMAP internal UART
*
@@ -199,7 +200,7 @@ static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
* Old custom speed handling.
*/
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
- priv->quot = port->custom_divisor & 0xffff;
+ priv->quot = port->custom_divisor & UART_DIV_MAX;
/*
* I assume that nobody is using this. But hey, if somebody
* would like to specify the divisor _and_ the mode then the
@@ -358,7 +359,7 @@ static void omap_8250_set_termios(struct uart_port *port,
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / 0xffff,
+ port->uartclk / 16 / UART_DIV_MAX,
port->uartclk / 13);
omap_8250_get_divisor(port, baud, priv);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0c101a7470b0..b7e0e3416641 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe module for 8250/16550-type PCI serial ports.
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#undef DEBUG
#include <linux/module.h>
@@ -3368,6 +3365,7 @@ static const struct pci_device_id blacklist[] = {
{ PCI_VDEVICE(INTEL, 0x081c), },
{ PCI_VDEVICE(INTEL, 0x081d), },
{ PCI_VDEVICE(INTEL, 0x1191), },
+ { PCI_VDEVICE(INTEL, 0x18d8), },
{ PCI_VDEVICE(INTEL, 0x19d8), },
/* Intel platforms with DesignWare UART */
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 34f05ed78b68..431e69a5a6a0 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe for 8250/16550-type ISAPNP serial ports.
*
@@ -6,10 +7,6 @@
* Copyright (C) 2001 Russell King, All Rights Reserved.
*
* Ported to the Linux PnP Layer - (C) Adam Belay.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f0cc04f62b67..11434551ac0a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Base port operations for 8250/16550-type serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
* Split from 8250_core.c, Copyright (C) 2001 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* A note about mapbase / membase
*
* mapbase is the physical address of the IO port.
@@ -1516,7 +1512,6 @@ static inline void __stop_tx(struct uart_8250_port *p)
return;
em485->active_timer = NULL;
- hrtimer_cancel(&em485->start_tx_timer);
__stop_tx_rs485(p);
}
@@ -1580,8 +1575,6 @@ static inline void start_tx_rs485(struct uart_port *port)
serial8250_stop_rx(&up->port);
em485->active_timer = NULL;
- if (hrtimer_is_queued(&em485->stop_tx_timer))
- hrtimer_cancel(&em485->stop_tx_timer);
mcr = serial8250_in_MCR(up);
if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
@@ -2586,8 +2579,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
serial_dl_write(up, quot);
/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
- if (up->port.type == PORT_XR17V35X)
+ if (up->port.type == PORT_XR17V35X) {
+ /* Preserve bits not related to baudrate; DLD[7:4]. */
+ quot_frac |= serial_port_in(port, 0x2) & 0xf0;
serial_port_out(port, 0x2, quot_frac);
+ }
}
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
@@ -2601,7 +2597,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
* causing transmission errors.
*/
return uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / 0xffff,
+ port->uartclk / 16 / UART_DIV_MAX,
port->uartclk);
}
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 4d68731af534..b9bcbe20a2be 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/tty/serial/8250/8250_pxa.c -- driver for PXA on-board UARTS
* Copyright: (C) 2013 Sergei Ianovich <ynvich@gmail.com>
@@ -7,12 +8,6 @@
* Copyright: (C) 2003 Monta Vista Software, Inc.
*
* Based on drivers/serial/8250.c by Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/device.h>
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 8a10b10e27aa..45ef506293ae 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 933c2688dd7e..9963a766dcfb 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*======================================================================
A driver for PCMCIA serial devices
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 0475f5d261ce..c90e503d6b57 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* altera_jtaguart.c -- Altera JTAG UART driver
*
@@ -6,11 +7,6 @@
* (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
* (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
* (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 3e4b717670d7..b88b05f8e81e 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* altera_uart.c -- Altera UART driver
*
@@ -6,11 +7,6 @@
* (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
* (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
* (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -288,10 +284,10 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
return IRQ_RETVAL(isr);
}
-static void altera_uart_timer(unsigned long data)
+static void altera_uart_timer(struct timer_list *t)
{
- struct uart_port *port = (void *)data;
- struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ struct altera_uart *pp = from_timer(pp, t, tmr);
+ struct uart_port *port = &pp->port;
altera_uart_interrupt(0, port);
mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
@@ -314,7 +310,7 @@ static int altera_uart_startup(struct uart_port *port)
int ret;
if (!port->irq) {
- setup_timer(&pp->tmr, altera_uart_timer, (unsigned long)port);
+ timer_setup(&pp->tmr, altera_uart_timer, 0);
mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
return 0;
}
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 9ec4b8d2879f..2c37d11726ab 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for AMBA serial ports
*
@@ -6,20 +7,6 @@
* Copyright 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This is a generic driver for ARM AMBA-type serial ports. They
* have a lot of 16550-like features, but are not register compatible.
* Note that although they do have CTS, DCD and DSR inputs, they do
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 111e6a950779..04af8de8617e 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for AMBA serial ports
*
@@ -7,20 +8,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* Copyright (C) 2010 ST-Ericsson SA
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This is a generic driver for ARM AMBA-type serial ports. They
* have a lot of 16550-like features, but are not register compatible.
* Note that although they do have CTS, DCD and DSR inputs, they do
@@ -281,7 +268,6 @@ struct uart_amba_port {
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
unsigned int old_cr; /* state during shutdown */
- bool autorts;
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
#ifdef CONFIG_DMA_ENGINE
@@ -1078,9 +1064,9 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
* Every polling, It checks the residue in the dma buffer and transfer
* data to the tty. Also, last_residue is updated for the next polling.
*/
-static void pl011_dma_rx_poll(unsigned long args)
+static void pl011_dma_rx_poll(struct timer_list *t)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)args;
+ struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
struct tty_port *port = &uap->port.state->port;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = uap->dmarx.chan;
@@ -1192,9 +1178,7 @@ skip_rx:
dev_dbg(uap->port.dev, "could not trigger initial "
"RX DMA job, fall back to interrupt mode\n");
if (uap->dmarx.poll_rate) {
- init_timer(&(uap->dmarx.timer));
- uap->dmarx.timer.function = pl011_dma_rx_poll;
- uap->dmarx.timer.data = (unsigned long)uap;
+ timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
mod_timer(&uap->dmarx.timer,
jiffies +
msecs_to_jiffies(uap->dmarx.poll_rate));
@@ -1588,7 +1572,7 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
- if (uap->autorts) {
+ if (port->status & UPSTAT_AUTORTS) {
/* We need to disable auto-RTS if we want to turn RTS off */
TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
}
@@ -1842,7 +1826,7 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
{
unsigned int cr;
- uap->autorts = false;
+ uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
spin_lock_irq(&uap->port.lock);
cr = pl011_read(uap, REG_CR);
uap->old_cr = cr;
@@ -2028,10 +2012,10 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
old_cr |= UART011_CR_RTSEN;
old_cr |= UART011_CR_CTSEN;
- uap->autorts = true;
+ port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
} else {
old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
- uap->autorts = false;
+ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
}
if (uap->vendor->oversampling) {
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index dd60ed96a0ad..60cd133ffbbc 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for GRLIB serial ports (APBUART)
*
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index decc7f3c1ab2..db5df3d54818 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Atheros AR933X SoC built-in UART driver
*
* Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 77fe306690c4..2599f9ecccfe 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARC On-Chip(fpga) UART Driver
*
* Copyright (C) 2010-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: July 10th 2012
* -Decoupled the driver from arch/arc
* +Using platform_get_resource() for irq/membase (thx to bfin_uart.c)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 7551cab438ff..efa25611ca0c 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Atmel AT91 Serial ports
* Copyright (C) 2003 Rick Bronson
@@ -6,21 +7,6 @@
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* DMA support added by Chip Coldwell.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/tty.h>
#include <linux/ioport.h>
@@ -171,6 +157,7 @@ struct atmel_uart_port {
bool has_hw_timer;
struct timer_list uart_timer;
+ bool tx_stopped;
bool suspended;
unsigned int pending;
unsigned int pending_status;
@@ -380,6 +367,10 @@ static int atmel_config_rs485(struct uart_port *port,
*/
static u_int atmel_tx_empty(struct uart_port *port)
{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_port->tx_stopped)
+ return TIOCSER_TEMT;
return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
TIOCSER_TEMT :
0;
@@ -485,6 +476,7 @@ static void atmel_stop_tx(struct uart_port *port)
* is fully transmitted.
*/
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
+ atmel_port->tx_stopped = true;
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
@@ -521,6 +513,7 @@ static void atmel_start_tx(struct uart_port *port)
/* re-enable the transmitter */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+ atmel_port->tx_stopped = false;
}
/*
@@ -1178,10 +1171,11 @@ chan_err:
return -EINVAL;
}
-static void atmel_uart_timer_callback(unsigned long data)
+static void atmel_uart_timer_callback(struct timer_list *t)
{
- struct uart_port *port = (void *)data;
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
+ uart_timer);
+ struct uart_port *port = &atmel_port->uart;
if (!atomic_read(&atmel_port->tasklet_shutdown)) {
tasklet_schedule(&atmel_port->tasklet_rx);
@@ -1667,29 +1661,6 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port,
}
}
-static void atmel_init_rs485(struct uart_port *port,
- struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- struct serial_rs485 *rs485conf = &port->rs485;
- u32 rs485_delay[2];
-
- /* rs485 properties */
- if (of_property_read_u32_array(np, "rs485-rts-delay",
- rs485_delay, 2) == 0) {
- rs485conf->delay_rts_before_send = rs485_delay[0];
- rs485conf->delay_rts_after_send = rs485_delay[1];
- rs485conf->flags = 0;
- }
-
- if (of_get_property(np, "rs485-rx-during-tx", NULL))
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
-
- if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
- rs485conf->flags |= SER_RS485_ENABLED;
-}
-
static void atmel_set_ops(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -1866,10 +1837,9 @@ static int atmel_startup(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
/* enable xmit & rcvr */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_port->tx_stopped = false;
- setup_timer(&atmel_port->uart_timer,
- atmel_uart_timer_callback,
- (unsigned long)port);
+ timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
if (atmel_use_pdc_rx(port)) {
/* set UART timeout */
@@ -2122,6 +2092,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* disable receiver and transmitter */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+ atmel_port->tx_stopped = true;
/* mode */
if (port->rs485.flags & SER_RS485_ENABLED) {
@@ -2207,6 +2178,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
atmel_uart_writel(port, ATMEL_US_BRGR, quot);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_port->tx_stopped = false;
/* restore interrupts */
atmel_uart_writel(port, ATMEL_US_IER, imr);
@@ -2373,7 +2345,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- atmel_init_rs485(port, pdev);
+ of_get_rs485_mode(pdev->dev.of_node, &port->rs485);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
@@ -2450,6 +2422,7 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
/* Make sure that tx path is actually able to send characters */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+ atmel_port->tx_stopped = false;
uart_console_write(port, s, count, atmel_console_putchar);
@@ -2511,6 +2484,7 @@ static int __init atmel_console_setup(struct console *co, char *options)
{
int ret;
struct uart_port *port = &atmel_ports[co->index].uart;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int baud = 115200;
int bits = 8;
int parity = 'n';
@@ -2528,6 +2502,7 @@ static int __init atmel_console_setup(struct console *co, char *options)
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_port->tx_stopped = false;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/tty/serial/atmel_serial.h b/drivers/tty/serial/atmel_serial.h
index bd2560502f3c..ba3a2437cde4 100644
--- a/drivers/tty/serial/atmel_serial.h
+++ b/drivers/tty/serial/atmel_serial.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* include/linux/atmel_serial.h
*
@@ -6,11 +7,6 @@
*
* USART registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef ATMEL_SERIAL_H
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 8c48c3784831..b7adc6127b3d 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Derived from many drivers using generic_serial interface.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
@@ -846,8 +843,10 @@ static int bcm_uart_probe(struct platform_device *pdev)
if (!res_irq)
return -ENODEV;
- clk = pdev->dev.of_node ? of_clk_get(pdev->dev.of_node, 0) :
- clk_get(&pdev->dev, "periph");
+ clk = clk_get(&pdev->dev, "refclk");
+ if (IS_ERR(clk) && pdev->dev.of_node)
+ clk = of_clk_get(pdev->dev.of_node, 0);
+
if (IS_ERR(clk))
return -ENODEV;
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index 6b03fb12cd19..4ccca5d22f4f 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Blackfin On-Chip Sport Emulated UART Driver
*
* Copyright 2006-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
/*
@@ -584,7 +583,7 @@ static void sport_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&up->port.lock, flags);
}
-struct uart_ops sport_uart_ops = {
+static const struct uart_ops sport_uart_ops = {
.tx_empty = sport_tx_empty,
.set_mctrl = sport_set_mctrl,
.get_mctrl = sport_get_mctrl,
diff --git a/drivers/tty/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index e4510ea135ce..4b12f45d6580 100644
--- a/drivers/tty/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Blackfin On-Chip Sport Emulated UART Driver
*
* Copyright 2006-2008 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
/*
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 293ecbb00684..4755fa696321 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Blackfin On-Chip Serial Driver
*
* Copyright 2006-2011 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -456,8 +455,9 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
tty_flip_buffer_push(&uart->port.state->port);
}
-void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
+void bfin_serial_rx_dma_timeout(struct timer_list *t)
{
+ struct bfin_serial_port *uart = from_timer(uart, t, rx_dma_timer);
int x_pos, pos;
unsigned long flags;
@@ -624,8 +624,6 @@ static int bfin_serial_startup(struct uart_port *port)
set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
enable_dma(uart->rx_dma_channel);
- uart->rx_dma_timer.data = (unsigned long)(uart);
- uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
add_timer(&(uart->rx_dma_timer));
#else
@@ -1316,7 +1314,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
}
uart->rx_dma_channel = res->start;
- init_timer(&(uart->rx_dma_timer));
+ timer_setup(&uart->rx_dma_timer, bfin_serial_rx_dma_timeout, 0);
#endif
#if defined(SERIAL_BFIN_CTSRTS) || \
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index ac1328629baa..98f193a83392 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CLPS711x serial ports
*
@@ -5,11 +6,6 @@
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 0ad027b95873..9f175a92fb5d 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for CPM (SCC/SMC) serial ports
*
@@ -5,11 +6,6 @@
*
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
*/
#ifndef CPM_UART_H
#define CPM_UART_H
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 9ac142cfc1f1..24a5f05e769b 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CPM (SCC/SMC) serial ports; core driver
*
@@ -12,21 +13,6 @@
* (C) 2004 Intracom, S.A.
* (C) 2005-2006 MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 6d3b22e93246..4eba17f3d293 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CPM (SCC/SMC) serial ports; CPM1 definitions
*
@@ -8,21 +9,6 @@
* (C) 2004 Intracom, S.A.
* (C) 2006 MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index f46d2ca87209..e3bff068dc3c 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CPM (SCC/SMC) serial ports; CPM2 definitions
*
@@ -8,21 +9,6 @@
* (C) 2004 Intracom, S.A.
* (C) 2006 MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 02ad6953b167..f460cca139e2 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Conexant Digicolor serial ports (USART)
*
* Author: Baruch Siach <baruch@tkos.co.il>
*
* Copyright (C) 2014 Paradox Innovation Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index ff465ff43577..7b57e840e255 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dz.c: Serial port driver for DECstations equipped
* with the DZ chipset.
diff --git a/drivers/tty/serial/earlycon-arm-semihost.c b/drivers/tty/serial/earlycon-arm-semihost.c
index 6bbeb699777c..fa096c10b591 100644
--- a/drivers/tty/serial/earlycon-arm-semihost.c
+++ b/drivers/tty/serial/earlycon-arm-semihost.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 ARM Ltd.
* Author: Marc Zyngier <marc.zyngier@arm.com>
@@ -5,18 +6,6 @@
* Adapted for ARM and earlycon:
* Copyright (C) 2014 Linaro Ltd.
* Author: Rob Herring <robh@kernel.org>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/console.h>
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 98928f082d87..4c8b80f1c688 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Linaro Ltd.
* Author: Rob Herring <robh@kernel.org>
@@ -5,10 +6,6 @@
* Based on 8250 earlycon:
* (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 9fff25be87f9..d6b5e5463746 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#if defined(CONFIG_SERIAL_EFM32_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index f0252184291e..c84e6f0db54e 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale lpuart serial port driver
*
* Copyright 2012-2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#if defined(CONFIG_SERIAL_FSL_LPUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -1632,12 +1628,11 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned long ctrl, old_ctrl, bd, modem;
+ unsigned long ctrl, old_ctrl, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
- bd = lpuart32_read(&sport->port, UARTBAUD);
modem = lpuart32_read(&sport->port, UARTMODIR);
/*
* only support CS8 and CS7, and for CS7 must enable PE.
@@ -2212,6 +2207,24 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_attach_port;
+ of_get_rs485_mode(np, &sport->port.rs485);
+
+ if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX) {
+ dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
+ return -ENOSYS;
+ }
+
+ if (sport->port.rs485.delay_rts_before_send ||
+ sport->port.rs485.delay_rts_after_send) {
+ dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
+ return -ENOSYS;
+ }
+
+ if (sport->port.rs485.flags & SER_RS485_ENABLED) {
+ sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
+ writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM);
+ }
+
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
dev_info(sport->port.dev, "DMA tx channel request failed, "
@@ -2222,12 +2235,6 @@ static int lpuart_probe(struct platform_device *pdev)
dev_info(sport->port.dev, "DMA rx channel request failed, "
"operating without rx DMA\n");
- if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time")) {
- sport->port.rs485.flags |= SER_RS485_ENABLED;
- sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
- writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM);
- }
-
return 0;
failed_attach_port:
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index fe92d74f4ea5..ad374f7c476d 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* icom.c
*
@@ -6,21 +7,6 @@
* Serial device driver.
*
* Based on code from serial.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/icom.h b/drivers/tty/serial/icom.h
index c8029e0025c9..8a77e739b333 100644
--- a/drivers/tty/serial/icom.h
+++ b/drivers/tty/serial/icom.h
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* icom.h
*
* Copyright (C) 2001 Michael Anderson, IBM Corporation
*
* Serial device driver include file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index f190a84a0246..473f4f81d690 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/****************************************************************************
*
* Driver for the IFX 6x60 spi modem.
@@ -10,20 +11,6 @@
* Copyright (C) 2009, 2010 Intel Corp
* Russ Gorby <russ.gorby@intel.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA
- *
* Driver modified by Intel from Option gtm501l_spi.c
*
* Notes
@@ -1029,9 +1016,8 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
spin_lock_init(&ifx_dev->write_lock);
spin_lock_init(&ifx_dev->power_lock);
ifx_dev->power_status = 0;
- init_timer(&ifx_dev->spi_timer);
- ifx_dev->spi_timer.function = ifx_spi_timeout;
- ifx_dev->spi_timer.data = (unsigned long)ifx_dev;
+ setup_timer(&ifx_dev->spi_timer, ifx_spi_timeout,
+ (unsigned long)ifx_dev);
ifx_dev->modem = pl_data->modem_type;
ifx_dev->use_dma = pl_data->use_dma;
ifx_dev->max_hz = pl_data->max_hz;
diff --git a/drivers/tty/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
index 4fbddc297839..c5a2514212ff 100644
--- a/drivers/tty/serial/ifx6x60.h
+++ b/drivers/tty/serial/ifx6x60.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/****************************************************************************
*
* Driver for the IFX spi modem.
@@ -5,23 +6,6 @@
* Copyright (C) 2009, 2010 Intel Corp
* Jim Stanley <jim.stanley@intel.com>
*
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA
- *
- *
- *
*****************************************************************************/
#ifndef _IFX6X60_H
#define _IFX6X60_H
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index dfeff3951f93..a67a606c38eb 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Motorola/Freescale IMX serial ports
*
@@ -5,16 +6,6 @@
*
* Author: Sascha Hauer <sascha@saschahauer.de>
* Copyright (C) 2004 Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -334,7 +325,8 @@ static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2)
{
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
- mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS);
+ sport->port.mctrl |= TIOCM_RTS;
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl);
}
static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2)
@@ -342,7 +334,8 @@ static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2)
*ucr2 &= ~UCR2_CTSC;
*ucr2 |= UCR2_CTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS);
+ sport->port.mctrl &= ~TIOCM_RTS;
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl);
}
static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2)
@@ -714,8 +707,6 @@ static void imx_disable_rx_int(struct imx_port *sport)
{
unsigned long temp;
- sport->dma_is_rxing = 1;
-
/* disable the receiver ready and aging timer interrupts */
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_RRDYEN);
@@ -732,29 +723,6 @@ static void imx_disable_rx_int(struct imx_port *sport)
}
static void clear_rx_errors(struct imx_port *sport);
-static int start_rx_dma(struct imx_port *sport);
-/*
- * If the RXFIFO is filled with some data, and then we
- * arise a DMA operation to receive them.
- */
-static void imx_dma_rxint(struct imx_port *sport)
-{
- unsigned long temp;
- unsigned long flags;
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- temp = readl(sport->port.membase + USR2);
- if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
-
- imx_disable_rx_int(sport);
-
- /* tell the DMA to receive the data. */
- start_rx_dma(sport);
- }
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
/*
* We have a modem side uart, so the meanings of RTS and CTS are inverted.
@@ -816,11 +784,8 @@ static irqreturn_t imx_int(int irq, void *dev_id)
sts = readl(sport->port.membase + USR1);
sts2 = readl(sport->port.membase + USR2);
- if (sts & (USR1_RRDY | USR1_AGTIM)) {
- if (sport->dma_is_enabled)
- imx_dma_rxint(sport);
- else
- imx_rxint(irq, dev_id);
+ if (!sport->dma_is_enabled && (sts & (USR1_RRDY | USR1_AGTIM))) {
+ imx_rxint(irq, dev_id);
ret = IRQ_HANDLED;
}
@@ -1074,6 +1039,7 @@ static int start_rx_dma(struct imx_port *sport)
desc->callback_param = sport;
dev_dbg(dev, "RX: prepare for the DMA.\n");
+ sport->dma_is_rxing = 1;
sport->rx_cookie = dmaengine_submit(desc);
dma_async_issue_pending(chan);
return 0;
@@ -1165,7 +1131,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
goto err;
}
- sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ sport->rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
if (!sport->rx_buf) {
ret = -ENOMEM;
goto err;
@@ -1207,10 +1173,6 @@ static void imx_enable_dma(struct imx_port *sport)
temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
writel(temp, sport->port.membase + UCR1);
- temp = readl(sport->port.membase + UCR2);
- temp |= UCR2_ATEN;
- writel(temp, sport->port.membase + UCR2);
-
imx_setup_ufcr(sport, TXTL_DMA, RXTL_DMA);
sport->dma_is_enabled = 1;
@@ -1411,15 +1373,19 @@ static void imx_flush_buffer(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp &= ~UCR1_TDMAEN;
writel(temp, sport->port.membase + UCR1);
- sport->dma_is_txing = false;
+ sport->dma_is_txing = 0;
}
/*
* According to the Reference Manual description of the UART SRST bit:
+ *
* "Reset the transmit and receive state machines,
* all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
- * and UTS[6-3]". As we don't need to restore the old values from
- * USR1, USR2, URXD, UTXD, only save/restore the other four registers
+ * and UTS[6-3]".
+ *
+ * We don't need to restore the old values from USR1, USR2, URXD and
+ * UTXD. UBRC is read only, so only save/restore the other three
+ * registers.
*/
ubir = readl(sport->port.membase + UBIR);
ubmr = readl(sport->port.membase + UBMR);
@@ -2051,6 +2017,8 @@ static int serial_imx_probe_dt(struct imx_port *sport,
if (of_get_property(np, "rts-gpios", NULL))
sport->have_rtsgpio = 1;
+ of_get_rs485_mode(np, &sport->port.rs485);
+
return 0;
}
#else
@@ -2112,12 +2080,9 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.fifosize = 32;
sport->port.ops = &imx_pops;
sport->port.rs485_config = imx_rs485_config;
- sport->port.rs485.flags =
- SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX;
+ sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
sport->port.flags = UPF_BOOT_AUTOCONF;
- init_timer(&sport->timer);
- sport->timer.function = imx_timeout;
- sport->timer.data = (unsigned long)sport;
+ setup_timer(&sport->timer, imx_timeout, (unsigned long)sport);
sport->gpios = mctrl_gpio_init(&sport->port, 0);
if (IS_ERR(sport->gpios))
@@ -2346,11 +2311,39 @@ static int imx_serial_port_resume(struct device *dev)
return 0;
}
+static int imx_serial_port_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+
+ uart_suspend_port(&imx_reg, &sport->port);
+
+ /* Needed to enable clock in suspend_noirq */
+ return clk_prepare(sport->clk_ipg);
+}
+
+static int imx_serial_port_thaw(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+
+ uart_resume_port(&imx_reg, &sport->port);
+
+ clk_unprepare(sport->clk_ipg);
+
+ return 0;
+}
+
static const struct dev_pm_ops imx_serial_port_pm_ops = {
.suspend_noirq = imx_serial_port_suspend_noirq,
.resume_noirq = imx_serial_port_resume_noirq,
+ .freeze_noirq = imx_serial_port_suspend_noirq,
+ .restore_noirq = imx_serial_port_resume_noirq,
.suspend = imx_serial_port_suspend,
.resume = imx_serial_port_resume,
+ .freeze = imx_serial_port_freeze,
+ .thaw = imx_serial_port_thaw,
+ .restore = imx_serial_port_thaw,
};
static struct platform_driver serial_imx_driver = {
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index 906ee770ff4a..d8a1cdd6a53d 100644
--- a/drivers/tty/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2005 Silicon Graphics, Inc. All Rights Reserved.
*/
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index 43d7d32eb150..db5b979e5a0c 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 7ddddb4c3844..8c810733df3d 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Zilog serial chips found on SGI workstations and
* servers. This driver could actually be made more generic.
diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index 0b79b87df47d..7a128aaa3a66 100644
--- a/drivers/tty/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index 4eb12a9cae76..c061a7b7bd23 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
* This is shared code between Digi's CVS archive and the
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 102d499814ac..592e51d8944e 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index c6fdd6369534..4718560b8fdc 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index ec7d8383900f..469927d37b41 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Ananda Venkatarman <mansarov@us.ibm.com>
@@ -36,7 +27,7 @@ static void jsm_carrier(struct jsm_channel *ch);
static inline int jsm_get_mstat(struct jsm_channel *ch)
{
unsigned char mstat;
- unsigned result;
+ int result;
jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n");
@@ -124,6 +115,7 @@ static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void jsm_tty_write(struct uart_port *port)
{
struct jsm_channel *channel;
+
channel = container_of(port, struct jsm_channel, uart_port);
channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
}
@@ -275,14 +267,12 @@ static int jsm_tty_open(struct uart_port *port)
static void jsm_tty_close(struct uart_port *port)
{
struct jsm_board *bd;
- struct ktermios *ts;
struct jsm_channel *channel =
container_of(port, struct jsm_channel, uart_port);
jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n");
bd = channel->ch_bd;
- ts = &port->state->port.tty->termios;
channel->ch_flags &= ~(CH_STOPI);
@@ -473,12 +463,11 @@ int jsm_uart_port_init(struct jsm_board *brd)
} else
set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
- rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
- if (rc){
+ rc = uart_add_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
+ if (rc) {
printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
return rc;
- }
- else
+ } else
printk(KERN_INFO "jsm: Port %d added\n", i);
}
@@ -541,7 +530,7 @@ void jsm_input(struct jsm_channel *ch)
tp = port->tty;
bd = ch->ch_bd;
- if(!bd)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
@@ -781,7 +770,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
if (qleft < 256) {
/* HWFLOW */
if (ch->ch_c_cflag & CRTSCTS) {
- if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
+ if (!(ch->ch_flags & CH_RECEIVER_OFF)) {
bd_ops->disable_receiver(ch);
ch->ch_flags |= (CH_RECEIVER_OFF);
jsm_dbg(READ, &ch->ch_bd->pci_dev,
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 117df151627d..ed2b03058627 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* KGDB NMI serial console
*
@@ -6,10 +7,6 @@
* Colin Cross <ccross@android.com>
* Copyright 2012 Linaro Ltd.
* Anton Vorontsov <anton.vorontsov@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index a260cde743e2..b4ba2b1dab76 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on the same principle as kgdboe using the NETPOLL api, this
* driver uses a console polling api to implement a gdb serial inteface
@@ -6,10 +7,6 @@
* Maintainer: Jason Wessel <jason.wessel@windriver.com>
*
* 2007-2008 (c) Jason Wessel - Wind River Systems, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
@@ -245,7 +242,8 @@ static void kgdboc_put_char(u8 chr)
kgdb_tty_line, chr);
}
-static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+static int param_set_kgdboc_var(const char *kmessage,
+ const struct kernel_param *kp)
{
int len = strlen(kmessage);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 22df94f107e5..044128277248 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Copyright (C) 2004 Infineon IFAP DC COM CPE
* Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2007 John Crispin <john@phrozen.org>
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index cea57ff32c33..d1d73261575b 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* High Speed Serial Ports on NXP LPC32xx SoC
*
@@ -6,16 +7,6 @@
*
* Copyright (C) 2010 NXP Semiconductors
* Copyright (C) 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 5b3bd9511993..7b83a8aab495 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* m32r_sio.c
*
@@ -8,11 +9,6 @@
*
* Copyright (C) 2001 Russell King.
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
@@ -511,9 +507,9 @@ static void serial_unlink_irq_chain(struct uart_sio_port *up)
/*
* This function is used to handle ports that do not have an interrupt.
*/
-static void m32r_sio_timeout(unsigned long data)
+static void m32r_sio_timeout(struct timer_list *t)
{
- struct uart_sio_port *up = (struct uart_sio_port *)data;
+ struct uart_sio_port *up = from_timer(up, t, timer);
unsigned int timeout;
unsigned int sts;
@@ -576,7 +572,6 @@ static int m32r_sio_startup(struct uart_port *port)
timeout = timeout > 6 ? (timeout / 2 - 2) : 1;
- up->timer.data = (unsigned long)up;
mod_timer(&up->timer, jiffies + timeout);
} else {
retval = serial_link_irq_chain(up);
@@ -907,8 +902,7 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv)
up->port.line = i;
up->port.ops = &m32r_sio_pops;
- init_timer(&up->timer);
- up->timer.function = m32r_sio_timeout;
+ timer_setup(&up->timer, m32r_sio_timeout, 0);
uart_add_one_port(drv, &up->port);
}
diff --git a/drivers/tty/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
index 4671473793e3..6eed48828f94 100644
--- a/drivers/tty/serial/m32r_sio_reg.h
+++ b/drivers/tty/serial/m32r_sio_reg.h
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* m32r_sio_reg.h
*
* Copyright (C) 1992, 1994 by Theodore Ts'o.
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
*
- * Redistribution of this file is permitted under the terms of the GNU
- * Public License (GPL)
- *
* These are the UART port assignments, expressed as offsets from the base
* register. These assignments should hold for any serial port based on
* a 8250, 16450, or 16550(A).
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index ace82645b123..27d6049eb6a9 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
*
* Copyright (C) 2008 Christian Pellegrin <chripell@evolware.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
* Notes: the MAX3100 doesn't provide an interrupt on CTS so we have
* to use polling for flow control. TX empty IRQ is unusable, since
* writing conf clears FIFO buffer and we cannot have this interrupt
@@ -263,7 +258,7 @@ static void max3100_work(struct work_struct *w)
struct max3100_port *s = container_of(w, struct max3100_port, work);
int rxchars;
u16 tx, rx;
- int conf, cconf, rts, crts;
+ int conf, cconf, crts;
struct circ_buf *xmit = &s->port.state->xmit;
dev_dbg(&s->spi->dev, "%s\n", __func__);
@@ -274,7 +269,6 @@ static void max3100_work(struct work_struct *w)
conf = s->conf;
cconf = s->conf_commit;
s->conf_commit = 0;
- rts = s->rts;
crts = s->rts_commit;
s->rts_commit = 0;
spin_unlock(&s->conf_lock);
@@ -436,7 +430,6 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios,
dev_dbg(&s->spi->dev, "%s\n", __func__);
cflag = termios->c_cflag;
- param_new = 0;
param_mask = 0;
baud = tty_termios_baud_rate(termios);
@@ -787,9 +780,8 @@ static int max3100_probe(struct spi_device *spi)
max3100s[i]->poll_time = 1;
max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
max3100s[i]->minor = i;
- init_timer(&max3100s[i]->timer);
- max3100s[i]->timer.function = max3100_timeout;
- max3100s[i]->timer.data = (unsigned long) max3100s[i];
+ setup_timer(&max3100s[i]->timer, max3100_timeout,
+ (unsigned long)max3100s[i]);
dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
max3100s[i]->port.irq = max3100s[i]->irq;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 9dfedbe6c071..ecb6513a6505 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
@@ -6,11 +7,6 @@
* Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
* Based on max3110.c, by Feng Tang <feng.tang@intel.com>
* Based on max3107.c, by Aavamobile
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/bitops.h>
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 02eb32217685..7dbfb4cde124 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/****************************************************************************/
/*
* mcf.c -- Freescale ColdFire UART driver
*
* (C) Copyright 2003-2007, Greg Ungerer <gerg@uclinux.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/****************************************************************************/
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index e72ea61c70db..ef89534dd760 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MEN 16z135 High Speed UART
*
* Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
* Author: Johannes Thumshirn <johannes.thumshirn@men.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 07c0f98be3ac..daafe60175da 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -1,19 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on meson_uart.c, by AMLOGIC, INC.
*
* Copyright (C) 2014 Carlo Caione <carlo@caione.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
+#if defined(CONFIG_SERIAL_MESON_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -183,12 +178,12 @@ static void meson_receive_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
char flag;
- u32 status, ch, mode;
+ u32 ostatus, status, ch, mode;
do {
flag = TTY_NORMAL;
port->icount.rx++;
- status = readl(port->membase + AML_UART_STATUS);
+ ostatus = status = readl(port->membase + AML_UART_STATUS);
if (status & AML_UART_ERR) {
if (status & AML_UART_TX_FIFO_WERR)
@@ -216,6 +211,16 @@ static void meson_receive_chars(struct uart_port *port)
ch = readl(port->membase + AML_UART_RFIFO);
ch &= 0xff;
+ if ((ostatus & AML_UART_FRAME_ERR) && (ch == 0)) {
+ port->icount.brk++;
+ flag = TTY_BREAK;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+
if ((status & port->ignore_status_mask) == 0)
tty_insert_flip_char(tport, ch, flag);
@@ -362,7 +367,7 @@ static void meson_uart_set_termios(struct uart_port *port,
writel(val, port->membase + AML_UART_CONTROL);
- baud = uart_get_baud_rate(port, termios, old, 9600, 4000000);
+ baud = uart_get_baud_rate(port, termios, old, 50, 4000000);
meson_uart_change_speed(port, baud);
port->read_status_mask = AML_UART_TX_FIFO_WERR;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 791c4c74f6d6..3a75ee08d619 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
*
@@ -23,10 +24,6 @@
* Grant Likely <grant.likely@secretlab.ca>
* Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#undef DEBUG
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index 492ec4b375a0..9f8f63719126 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MPS2 UART driver
*
@@ -5,10 +6,6 @@
*
* Author: Vladimir Murzin <vladimir.murzin@arm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* TODO: support for SysRq
*/
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 67ffecc50e42..1f60d6fe4ff2 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
* GT64260, MV64340, MV64360, GT96100, ... ).
@@ -10,10 +11,7 @@
* taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
* by Russell King.
*
- * 2004 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2004 (c) MontaVista, Software, Inc.
*/
/*
* The MPSC interface is much like a typical network controller's interface.
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 1db79ee8a886..ee96cf0d0057 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for msm7k serial device and console
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 2bff69e70e4b..3b74369c262f 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
** mux.c:
** serial driver for the Mux console found in some PA-RISC servers.
@@ -5,11 +6,6 @@
** (c) Copyright 2002 Ryan Bradetich
** (c) Copyright 2002 Hewlett-Packard Company
**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
** This Driver currently only supports the console (port 0) on the MUX.
** Additional work will be needed on this driver to enable the full
** functionality of the MUX.
@@ -576,8 +572,7 @@ static int __init mux_init(void)
if(port_cnt > 0) {
/* Start the Mux timer */
- init_timer(&mux_timer);
- mux_timer.function = mux_poll;
+ setup_timer(&mux_timer, mux_poll, 0UL);
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 45b57c294d13..a100e98259d7 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ***************************************************************************
* Marvell Armada-3700 Serial Driver
* Author: Wilson Ding <dingwei@marvell.com>
* Copyright (C) 2015 Marvell International Ltd.
* ***************************************************************************
-* This program is free software: you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the Free
-* Software Foundation, either version 2 of the License, or any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-* ***************************************************************************
*/
#include <linux/clk.h>
@@ -38,46 +27,40 @@
#include <linux/tty_flip.h>
/* Register Map */
-#define UART_RBR 0x00
-#define RBR_BRK_DET BIT(15)
-#define RBR_FRM_ERR_DET BIT(14)
-#define RBR_PAR_ERR_DET BIT(13)
-#define RBR_OVR_ERR_DET BIT(12)
+#define UART_STD_RBR 0x00
+#define UART_EXT_RBR 0x18
-#define UART_TSH 0x04
+#define UART_STD_TSH 0x04
+#define UART_EXT_TSH 0x1C
-#define UART_CTRL 0x08
+#define UART_STD_CTRL1 0x08
+#define UART_EXT_CTRL1 0x04
#define CTRL_SOFT_RST BIT(31)
#define CTRL_TXFIFO_RST BIT(15)
#define CTRL_RXFIFO_RST BIT(14)
-#define CTRL_ST_MIRR_EN BIT(13)
-#define CTRL_LPBK_EN BIT(12)
#define CTRL_SND_BRK_SEQ BIT(11)
-#define CTRL_PAR_EN BIT(10)
-#define CTRL_TWO_STOP BIT(9)
-#define CTRL_TX_HFL_INT BIT(8)
-#define CTRL_RX_HFL_INT BIT(7)
-#define CTRL_TX_EMP_INT BIT(6)
-#define CTRL_TX_RDY_INT BIT(5)
-#define CTRL_RX_RDY_INT BIT(4)
#define CTRL_BRK_DET_INT BIT(3)
#define CTRL_FRM_ERR_INT BIT(2)
#define CTRL_PAR_ERR_INT BIT(1)
#define CTRL_OVR_ERR_INT BIT(0)
-#define CTRL_RX_INT (CTRL_RX_RDY_INT | CTRL_BRK_DET_INT |\
- CTRL_FRM_ERR_INT | CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
+#define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
+ CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
+
+#define UART_STD_CTRL2 UART_STD_CTRL1
+#define UART_EXT_CTRL2 0x20
+#define CTRL_STD_TX_RDY_INT BIT(5)
+#define CTRL_EXT_TX_RDY_INT BIT(6)
+#define CTRL_STD_RX_RDY_INT BIT(4)
+#define CTRL_EXT_RX_RDY_INT BIT(5)
-#define UART_STAT 0x0c
+#define UART_STAT 0x0C
#define STAT_TX_FIFO_EMP BIT(13)
-#define STAT_RX_FIFO_EMP BIT(12)
#define STAT_TX_FIFO_FUL BIT(11)
-#define STAT_TX_FIFO_HFL BIT(10)
-#define STAT_RX_TOGL BIT(9)
-#define STAT_RX_FIFO_FUL BIT(8)
-#define STAT_RX_FIFO_HFL BIT(7)
#define STAT_TX_EMP BIT(6)
-#define STAT_TX_RDY BIT(5)
-#define STAT_RX_RDY BIT(4)
+#define STAT_STD_TX_RDY BIT(5)
+#define STAT_EXT_TX_RDY BIT(15)
+#define STAT_STD_RX_RDY BIT(4)
+#define STAT_EXT_RX_RDY BIT(14)
#define STAT_BRK_DET BIT(3)
#define STAT_FRM_ERR BIT(2)
#define STAT_PAR_ERR BIT(1)
@@ -86,18 +69,73 @@
| STAT_PAR_ERR | STAT_OVR_ERR)
#define UART_BRDV 0x10
+#define BRDV_BAUD_MASK 0x3FF
-#define MVEBU_NR_UARTS 1
+#define MVEBU_NR_UARTS 2
#define MVEBU_UART_TYPE "mvebu-uart"
+#define DRIVER_NAME "mvebu_serial"
+
+enum {
+ /* Either there is only one summed IRQ... */
+ UART_IRQ_SUM = 0,
+ /* ...or there are two separate IRQ for RX and TX */
+ UART_RX_IRQ = 0,
+ UART_TX_IRQ,
+ UART_IRQ_COUNT
+};
-static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
+/* Diverging register offsets */
+struct uart_regs_layout {
+ unsigned int rbr;
+ unsigned int tsh;
+ unsigned int ctrl;
+ unsigned int intr;
+};
+
+/* Diverging flags */
+struct uart_flags {
+ unsigned int ctrl_tx_rdy_int;
+ unsigned int ctrl_rx_rdy_int;
+ unsigned int stat_tx_rdy;
+ unsigned int stat_rx_rdy;
+};
+
+/* Driver data, a structure for each UART port */
+struct mvebu_uart_driver_data {
+ bool is_ext;
+ struct uart_regs_layout regs;
+ struct uart_flags flags;
+};
-struct mvebu_uart_data {
+/* MVEBU UART driver structure */
+struct mvebu_uart {
struct uart_port *port;
- struct clk *clk;
+ struct clk *clk;
+ int irq[UART_IRQ_COUNT];
+ unsigned char __iomem *nb;
+ struct mvebu_uart_driver_data *data;
};
+static struct mvebu_uart *to_mvuart(struct uart_port *port)
+{
+ return (struct mvebu_uart *)port->private_data;
+}
+
+#define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext)
+
+#define UART_RBR(port) (to_mvuart(port)->data->regs.rbr)
+#define UART_TSH(port) (to_mvuart(port)->data->regs.tsh)
+#define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl)
+#define UART_INTR(port) (to_mvuart(port)->data->regs.intr)
+
+#define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int)
+#define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int)
+#define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy)
+#define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy)
+
+static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
+
/* Core UART Driver Operations */
static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
{
@@ -127,26 +165,39 @@ static void mvebu_uart_set_mctrl(struct uart_port *port,
static void mvebu_uart_stop_tx(struct uart_port *port)
{
- unsigned int ctl = readl(port->membase + UART_CTRL);
+ unsigned int ctl = readl(port->membase + UART_INTR(port));
- ctl &= ~CTRL_TX_RDY_INT;
- writel(ctl, port->membase + UART_CTRL);
+ ctl &= ~CTRL_TX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_start_tx(struct uart_port *port)
{
- unsigned int ctl = readl(port->membase + UART_CTRL);
+ unsigned int ctl;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) {
+ writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
- ctl |= CTRL_TX_RDY_INT;
- writel(ctl, port->membase + UART_CTRL);
+ ctl = readl(port->membase + UART_INTR(port));
+ ctl |= CTRL_TX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_stop_rx(struct uart_port *port)
{
- unsigned int ctl = readl(port->membase + UART_CTRL);
+ unsigned int ctl;
+
+ ctl = readl(port->membase + UART_CTRL(port));
+ ctl &= ~CTRL_BRK_INT;
+ writel(ctl, port->membase + UART_CTRL(port));
- ctl &= ~CTRL_RX_INT;
- writel(ctl, port->membase + UART_CTRL);
+ ctl = readl(port->membase + UART_INTR(port));
+ ctl &= ~CTRL_RX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
@@ -155,12 +206,12 @@ static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- ctl = readl(port->membase + UART_CTRL);
+ ctl = readl(port->membase + UART_CTRL(port));
if (brk == -1)
ctl |= CTRL_SND_BRK_SEQ;
else
ctl &= ~CTRL_SND_BRK_SEQ;
- writel(ctl, port->membase + UART_CTRL);
+ writel(ctl, port->membase + UART_CTRL(port));
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -171,8 +222,8 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
char flag = 0;
do {
- if (status & STAT_RX_RDY) {
- ch = readl(port->membase + UART_RBR);
+ if (status & STAT_RX_RDY(port)) {
+ ch = readl(port->membase + UART_RBR(port));
ch &= 0xff;
flag = TTY_NORMAL;
port->icount.rx++;
@@ -198,7 +249,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
goto ignore_char;
if (status & port->ignore_status_mask & STAT_PAR_ERR)
- status &= ~STAT_RX_RDY;
+ status &= ~STAT_RX_RDY(port);
status &= port->read_status_mask;
@@ -207,7 +258,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
status &= ~port->ignore_status_mask;
- if (status & STAT_RX_RDY)
+ if (status & STAT_RX_RDY(port))
tty_insert_flip_char(tport, ch, flag);
if (status & STAT_BRK_DET)
@@ -221,7 +272,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
ignore_char:
status = readl(port->membase + UART_STAT);
- } while (status & (STAT_RX_RDY | STAT_BRK_DET));
+ } while (status & (STAT_RX_RDY(port) | STAT_BRK_DET));
tty_flip_buffer_push(tport);
}
@@ -233,7 +284,7 @@ static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
unsigned int st;
if (port->x_char) {
- writel(port->x_char, port->membase + UART_TSH);
+ writel(port->x_char, port->membase + UART_TSH(port));
port->icount.tx++;
port->x_char = 0;
return;
@@ -245,7 +296,7 @@ static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
}
for (count = 0; count < port->fifosize; count++) {
- writel(xmit->buf[xmit->tail], port->membase + UART_TSH);
+ writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -269,10 +320,34 @@ static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int st = readl(port->membase + UART_STAT);
- if (st & (STAT_RX_RDY | STAT_OVR_ERR | STAT_FRM_ERR | STAT_BRK_DET))
+ if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
+ STAT_BRK_DET))
mvebu_uart_rx_chars(port, st);
- if (st & STAT_TX_RDY)
+ if (st & STAT_TX_RDY(port))
+ mvebu_uart_tx_chars(port, st);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned int st = readl(port->membase + UART_STAT);
+
+ if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
+ STAT_BRK_DET))
+ mvebu_uart_rx_chars(port, st);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned int st = readl(port->membase + UART_STAT);
+
+ if (st & STAT_TX_RDY(port))
mvebu_uart_tx_chars(port, st);
return IRQ_HANDLED;
@@ -280,18 +355,57 @@ static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
static int mvebu_uart_startup(struct uart_port *port)
{
+ struct mvebu_uart *mvuart = to_mvuart(port);
+ unsigned int ctl;
int ret;
writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
- port->membase + UART_CTRL);
+ port->membase + UART_CTRL(port));
udelay(1);
- writel(CTRL_RX_INT, port->membase + UART_CTRL);
- ret = request_irq(port->irq, mvebu_uart_isr, port->irqflags, "serial",
- port);
- if (ret) {
- dev_err(port->dev, "failed to request irq\n");
- return ret;
+ /* Clear the error bits of state register before IRQ request */
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+
+ writel(CTRL_BRK_INT, port->membase + UART_CTRL(port));
+
+ ctl = readl(port->membase + UART_INTR(port));
+ ctl |= CTRL_RX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
+
+ if (!mvuart->irq[UART_TX_IRQ]) {
+ /* Old bindings with just one interrupt (UART0 only) */
+ ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM],
+ mvebu_uart_isr, port->irqflags,
+ dev_name(port->dev), port);
+ if (ret) {
+ dev_err(port->dev, "unable to request IRQ %d\n",
+ mvuart->irq[UART_IRQ_SUM]);
+ return ret;
+ }
+ } else {
+ /* New bindings with an IRQ for RX and TX (both UART) */
+ ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ],
+ mvebu_uart_rx_isr, port->irqflags,
+ dev_name(port->dev), port);
+ if (ret) {
+ dev_err(port->dev, "unable to request IRQ %d\n",
+ mvuart->irq[UART_RX_IRQ]);
+ return ret;
+ }
+
+ ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ],
+ mvebu_uart_tx_isr, port->irqflags,
+ dev_name(port->dev),
+ port);
+ if (ret) {
+ dev_err(port->dev, "unable to request IRQ %d\n",
+ mvuart->irq[UART_TX_IRQ]);
+ devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ],
+ port);
+ return ret;
+ }
}
return 0;
@@ -299,9 +413,41 @@ static int mvebu_uart_startup(struct uart_port *port)
static void mvebu_uart_shutdown(struct uart_port *port)
{
- writel(0, port->membase + UART_CTRL);
+ struct mvebu_uart *mvuart = to_mvuart(port);
+
+ writel(0, port->membase + UART_INTR(port));
+
+ if (!mvuart->irq[UART_TX_IRQ]) {
+ devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port);
+ } else {
+ devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port);
+ devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port);
+ }
+}
+
+static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+{
+ struct mvebu_uart *mvuart = to_mvuart(port);
+ unsigned int baud_rate_div;
+ u32 brdv;
+
+ if (IS_ERR(mvuart->clk))
+ return -PTR_ERR(mvuart->clk);
+
+ /*
+ * The UART clock is divided by the value of the divisor to generate
+ * UCLK_OUT clock, which is 16 times faster than the baudrate.
+ * This prescaler can achieve all standard baudrates until 230400.
+ * Higher baudrates could be achieved for the extended UART by using the
+ * programmable oversampling stack (also called fractional divisor).
+ */
+ baud_rate_div = DIV_ROUND_UP(port->uartclk, baud * 16);
+ brdv = readl(port->membase + UART_BRDV);
+ brdv &= ~BRDV_BAUD_MASK;
+ brdv |= baud_rate_div;
+ writel(brdv, port->membase + UART_BRDV);
- free_irq(port->irq, port);
+ return 0;
}
static void mvebu_uart_set_termios(struct uart_port *port,
@@ -313,8 +459,8 @@ static void mvebu_uart_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
- port->read_status_mask = STAT_RX_RDY | STAT_OVR_ERR |
- STAT_TX_RDY | STAT_TX_FIFO_FUL;
+ port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
+ STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
if (termios->c_iflag & INPCK)
port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
@@ -325,13 +471,32 @@ static void mvebu_uart_set_termios(struct uart_port *port,
STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
if ((termios->c_cflag & CREAD) == 0)
- port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
-
- if (old)
- tty_termios_copy_hw(termios, old);
+ port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
+
+ /*
+ * Maximum achievable frequency with simple baudrate divisor is 230400.
+ * Since the error per bit frame would be of more than 15%, achieving
+ * higher frequencies would require to implement the fractional divisor
+ * feature.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
+ if (mvebu_uart_baud_rate_set(port, baud)) {
+ /* No clock available, baudrate cannot be changed */
+ if (old)
+ baud = uart_get_baud_rate(port, old, NULL, 0, 230400);
+ } else {
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ }
- baud = uart_get_baud_rate(port, termios, old, 0, 460800);
- uart_update_timeout(port, termios->c_cflag, baud);
+ /* Only the following flag changes are supported */
+ if (old) {
+ termios->c_iflag &= INPCK | IGNPAR;
+ termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
+ termios->c_cflag &= CREAD | CBAUD;
+ termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+ termios->c_lflag = old->c_lflag;
+ }
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -356,10 +521,10 @@ static int mvebu_uart_get_poll_char(struct uart_port *port)
{
unsigned int st = readl(port->membase + UART_STAT);
- if (!(st & STAT_RX_RDY))
+ if (!(st & STAT_RX_RDY(port)))
return NO_POLL_CHAR;
- return readl(port->membase + UART_RBR);
+ return readl(port->membase + UART_RBR(port));
}
static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
@@ -375,7 +540,7 @@ static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
udelay(1);
}
- writel(c, port->membase + UART_TSH);
+ writel(c, port->membase + UART_TSH(port));
}
#endif
@@ -413,7 +578,8 @@ static void mvebu_uart_putc(struct uart_port *port, int c)
break;
}
- writel(c, port->membase + UART_TSH);
+ /* At early stage, DT is not parsed yet, only use UART0 */
+ writel(c, port->membase + UART_STD_TSH);
for (;;) {
st = readl(port->membase + UART_STAT);
@@ -458,7 +624,7 @@ static void wait_for_xmitr(struct uart_port *port)
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
- writel(ch, port->membase + UART_TSH);
+ writel(ch, port->membase + UART_TSH(port));
}
static void mvebu_uart_console_write(struct console *co, const char *s,
@@ -466,7 +632,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
{
struct uart_port *port = &mvebu_uart_ports[co->index];
unsigned long flags;
- unsigned int ier;
+ unsigned int ier, intr, ctl;
int locked = 1;
if (oops_in_progress)
@@ -474,16 +640,23 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
else
spin_lock_irqsave(&port->lock, flags);
- ier = readl(port->membase + UART_CTRL) &
- (CTRL_RX_INT | CTRL_TX_RDY_INT);
- writel(0, port->membase + UART_CTRL);
+ ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
+ intr = readl(port->membase + UART_INTR(port)) &
+ (CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port));
+ writel(0, port->membase + UART_CTRL(port));
+ writel(0, port->membase + UART_INTR(port));
uart_console_write(port, s, count, mvebu_uart_console_putchar);
wait_for_xmitr(port);
if (ier)
- writel(ier, port->membase + UART_CTRL);
+ writel(ier, port->membase + UART_CTRL(port));
+
+ if (intr) {
+ ctl = intr | readl(port->membase + UART_INTR(port));
+ writel(ctl, port->membase + UART_INTR(port));
+ }
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -538,7 +711,7 @@ console_initcall(mvebu_uart_console_init);
static struct uart_driver mvebu_uart_driver = {
.owner = THIS_MODULE,
- .driver_name = "mvebu_serial",
+ .driver_name = DRIVER_NAME,
.dev_name = "ttyMV",
.nr = MVEBU_NR_UARTS,
#ifdef CONFIG_SERIAL_MVEBU_CONSOLE
@@ -546,20 +719,39 @@ static struct uart_driver mvebu_uart_driver = {
#endif
};
+static const struct of_device_id mvebu_uart_of_match[];
+
+/* Counter to keep track of each UART port id when not using CONFIG_OF */
+static int uart_num_counter;
+
static int mvebu_uart_probe(struct platform_device *pdev)
{
struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
+ &pdev->dev);
struct uart_port *port;
- struct mvebu_uart_data *data;
- int ret;
+ struct mvebu_uart *mvuart;
+ int ret, id, irq;
+
+ if (!reg) {
+ dev_err(&pdev->dev, "no registers defined\n");
+ return -EINVAL;
+ }
+
+ /* Assume that all UART ports have a DT alias or none has */
+ id = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (!pdev->dev.of_node || id < 0)
+ pdev->id = uart_num_counter++;
+ else
+ pdev->id = id;
- if (!reg || !irq) {
- dev_err(&pdev->dev, "no registers/irq defined\n");
+ if (pdev->id >= MVEBU_NR_UARTS) {
+ dev_err(&pdev->dev, "cannot have more than %d UART ports\n",
+ MVEBU_NR_UARTS);
return -EINVAL;
}
- port = &mvebu_uart_ports[0];
+ port = &mvebu_uart_ports[pdev->id];
spin_lock_init(&port->lock);
@@ -571,9 +763,14 @@ static int mvebu_uart_probe(struct platform_device *pdev)
port->fifosize = 32;
port->iotype = UPIO_MEM32;
port->flags = UPF_FIXED_PORT;
- port->line = 0; /* single port: force line number to 0 */
-
- port->irq = irq->start;
+ port->line = pdev->id;
+
+ /*
+ * IRQ number is not stored in this structure because we may have two of
+ * them per port (RX and TX). Instead, use the driver UART structure
+ * array so called ->irq[].
+ */
+ port->irq = 0;
port->irqflags = 0;
port->mapbase = reg->start;
@@ -581,15 +778,70 @@ static int mvebu_uart_probe(struct platform_device *pdev)
if (IS_ERR(port->membase))
return -PTR_ERR(port->membase);
- data = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart_data),
- GFP_KERNEL);
- if (!data)
+ mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
+ GFP_KERNEL);
+ if (!mvuart)
return -ENOMEM;
- data->port = port;
+ /* Get controller data depending on the compatible string */
+ mvuart->data = (struct mvebu_uart_driver_data *)match->data;
+ mvuart->port = port;
+
+ port->private_data = mvuart;
+ platform_set_drvdata(pdev, mvuart);
+
+ /* Get fixed clock frequency */
+ mvuart->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mvuart->clk)) {
+ if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER)
+ return PTR_ERR(mvuart->clk);
+
+ if (IS_EXTENDED(port)) {
+ dev_err(&pdev->dev, "unable to get UART clock\n");
+ return PTR_ERR(mvuart->clk);
+ }
+ } else {
+ if (!clk_prepare_enable(mvuart->clk))
+ port->uartclk = clk_get_rate(mvuart->clk);
+ }
+
+ /* Manage interrupts */
+ if (platform_irq_count(pdev) == 1) {
+ /* Old bindings: no name on the single unamed UART0 IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get UART IRQ\n");
+ return irq;
+ }
+
+ mvuart->irq[UART_IRQ_SUM] = irq;
+ } else {
+ /*
+ * New bindings: named interrupts (RX, TX) for both UARTS,
+ * only make use of uart-rx and uart-tx interrupts, do not use
+ * uart-sum of UART0 port.
+ */
+ irq = platform_get_irq_byname(pdev, "uart-rx");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get 'uart-rx' IRQ\n");
+ return irq;
+ }
+
+ mvuart->irq[UART_RX_IRQ] = irq;
- port->private_data = data;
- platform_set_drvdata(pdev, data);
+ irq = platform_get_irq_byname(pdev, "uart-tx");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get 'uart-tx' IRQ\n");
+ return irq;
+ }
+
+ mvuart->irq[UART_TX_IRQ] = irq;
+ }
+
+ /* UART Soft Reset*/
+ writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port));
+ udelay(1);
+ writel(0, port->membase + UART_CTRL(port));
ret = uart_add_one_port(&mvebu_uart_driver, port);
if (ret)
@@ -597,9 +849,40 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return 0;
}
+static struct mvebu_uart_driver_data uart_std_driver_data = {
+ .is_ext = false,
+ .regs.rbr = UART_STD_RBR,
+ .regs.tsh = UART_STD_TSH,
+ .regs.ctrl = UART_STD_CTRL1,
+ .regs.intr = UART_STD_CTRL2,
+ .flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT,
+ .flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT,
+ .flags.stat_tx_rdy = STAT_STD_TX_RDY,
+ .flags.stat_rx_rdy = STAT_STD_RX_RDY,
+};
+
+static struct mvebu_uart_driver_data uart_ext_driver_data = {
+ .is_ext = true,
+ .regs.rbr = UART_EXT_RBR,
+ .regs.tsh = UART_EXT_TSH,
+ .regs.ctrl = UART_EXT_CTRL1,
+ .regs.intr = UART_EXT_CTRL2,
+ .flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT,
+ .flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT,
+ .flags.stat_tx_rdy = STAT_EXT_TX_RDY,
+ .flags.stat_rx_rdy = STAT_EXT_RX_RDY,
+};
+
/* Match table for of_platform binding */
static const struct of_device_id mvebu_uart_of_match[] = {
- { .compatible = "marvell,armada-3700-uart", },
+ {
+ .compatible = "marvell,armada-3700-uart",
+ .data = (void *)&uart_std_driver_data,
+ },
+ {
+ .compatible = "marvell,armada-3700-uart-ext",
+ .data = (void *)&uart_ext_driver_data,
+ },
{}
};
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index be94246b6fcc..efb4fd3784ed 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Application UART driver for:
* Freescale STMP37XX/STMP378X
@@ -9,10 +10,6 @@
* Provide Alphascale ASM9260 support.
* Copyright 2008-2010 Freescale Semiconductor, Inc.
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
*/
#if defined(CONFIG_SERIAL_MXS_AUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 207a0a032ed1..b3556863491f 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 7754053deeda..53d59e9b944a 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for OMAP-UART controller.
* Based on drivers/serial/8250.c
@@ -8,11 +9,6 @@
* Govindraj R <govindraj.raja@ti.com>
* Thara Gopinath <thara@ti.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Note: This driver is made separate from 8250 driver as we cannot
* over load 8250 driver with omap platform specific configuration for
* features like DMA, it makes easier to implement features like DMA and
@@ -610,7 +606,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
default:
break;
}
- } while (!(iir & UART_IIR_NO_INT) && max_count--);
+ } while (max_count--);
spin_unlock(&up->port.lock);
@@ -693,7 +689,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
up->efr |= UART_EFR_RTS;
else
- up->efr &= UART_EFR_RTS;
+ up->efr &= ~UART_EFR_RTS;
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, lcr);
@@ -1606,7 +1602,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
struct device_node *np)
{
struct serial_rs485 *rs485conf = &up->port.rs485;
- u32 rs485_delay[2];
enum of_gpio_flags flags;
int ret;
@@ -1637,17 +1632,7 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
up->rts_gpio = -EINVAL;
}
- if (of_property_read_u32_array(np, "rs485-rts-delay",
- rs485_delay, 2) == 0) {
- rs485conf->delay_rts_before_send = rs485_delay[0];
- rs485conf->delay_rts_after_send = rs485_delay[1];
- }
-
- if (of_property_read_bool(np, "rs485-rx-during-tx"))
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
-
- if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time"))
- rs485conf->flags |= SER_RS485_ENABLED;
+ of_get_rs485_mode(np, rs485conf);
return 0;
}
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index b9c859365334..29a6dc6a8d23 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Actions Semi Owl family serial console
*
@@ -5,19 +6,6 @@
* Author: Actions Semi, Inc.
*
* Copyright (c) 2016-2017 Andreas Färber
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d9123f995705..760d5dd0aada 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
- *
- *This program is free software; you can redistribute it and/or modify
- *it under the terms of the GNU General Public License as published by
- *the Free Software Foundation; version 2 of the License.
- *
- *This program is distributed in the hope that it will be useful,
- *but WITHOUT ANY WARRANTY; without even the implied warranty of
- *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *GNU General Public License for more details.
- *
- *You should have received a copy of the GNU General Public License
- *along with this program; if not, write to the Free Software
- *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#if defined(CONFIG_SERIAL_PCH_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 00a33eb859d3..fd80d999308d 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PIC32 Integrated Serial Driver.
*
@@ -5,8 +6,6 @@
*
* Authors:
* Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
- *
- * Licensed under GPLv2 or later.
*/
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
index ec379da55ebb..2f2b56927dc6 100644
--- a/drivers/tty/serial/pic32_uart.h
+++ b/drivers/tty/serial/pic32_uart.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PIC32 Integrated Serial Driver.
*
@@ -5,8 +6,6 @@
*
* Authors:
* Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
- *
- * Licensed under GPLv2 or later.
*/
#ifndef __DT_PIC32_UART_H__
#define __DT_PIC32_UART_H__
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 6ccdd018fb45..3d21790d961e 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for PowerMac Z85c30 based ESCC cell found in the
* "macio" ASICs of various PowerMac models
@@ -13,20 +14,6 @@
* and once done, I expect that driver to remain fairly stable in
* the long term, unless we change the driver model again...
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* 2004-08-06 Harald Welte <laforge@gnumonks.org>
* - Enable BREAK interrupt
* - Add support for sysreq
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index dab2668d3879..f8812389b8a8 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UART driver for PNX8XXX SoCs
*
@@ -7,11 +8,6 @@
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of
- * any kind, whether express or implied.
- *
*/
#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -666,9 +662,8 @@ static void __init pnx8xxx_init_ports(void)
first = 0;
for (i = 0; i < NR_PORTS; i++) {
- init_timer(&pnx8xxx_ports[i].timer);
- pnx8xxx_ports[i].timer.function = pnx8xxx_timeout;
- pnx8xxx_ports[i].timer.data = (unsigned long)&pnx8xxx_ports[i];
+ setup_timer(&pnx8xxx_ports[i].timer, pnx8xxx_timeout,
+ (unsigned long)&pnx8xxx_ports[i]);
pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
}
}
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 905631df1f8b..baf552944d56 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Based on drivers/serial/8250.c by Russell King.
*
@@ -5,11 +6,6 @@
* Created: Feb 20, 2003
* Copyright: (C) 2003 Monta Vista Software, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Note 1: This driver is made separate from the already too overloaded
* 8250.c because it needs some kirks of its own and that'll make it
* easier to add DMA support.
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 056f91b3a4ca..520b43b23543 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Comtrol RocketPort EXPRESS/INFINITY cards
*
@@ -10,10 +11,6 @@
*
* rocketport_infinity_express-linux-1.20.tar.gz
* Copyright (C) 2004-2011 Comtrol, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/bitops.h>
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index fd3d1329d48c..4e3f169b30cf 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SA11x0 serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -640,9 +627,8 @@ static void __init sa1100_init_ports(void)
sa1100_ports[i].port.fifosize = 8;
sa1100_ports[i].port.line = i;
sa1100_ports[i].port.iotype = UPIO_MEM;
- init_timer(&sa1100_ports[i].timer);
- sa1100_ports[i].timer.function = sa1100_timeout;
- sa1100_ports[i].timer.data = (unsigned long)&sa1100_ports[i];
+ setup_timer(&sa1100_ports[i].timer, sa1100_timeout,
+ (unsigned long)&sa1100_ports[i]);
}
/*
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 8aca18c4cdea..f9fecc5ed0ce 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver core for Samsung SoC onboard UARTs.
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* Hote on 2410 error handling
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 965199b6c16f..f93022113f59 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#ifndef __SAMSUNG_H
#define __SAMSUNG_H
@@ -6,10 +7,6 @@
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 041625cc24bb..329aced26bd8 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Support for the asynchronous serial interface (DUART) included
* in the BCM1250 and derived System-On-a-Chip (SOC) devices.
@@ -9,11 +10,6 @@
*
* Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* References:
*
* "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index ca54ce074a5f..65792a3539d0 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* SC16IS7xx tty serial driver - Copyright (C) 2014 GridPoint
* Author: Jon Ringle <jringle@gridpoint.com>
*
* Based on max310x.c, by Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index b9c7a904c1ea..d6ae3086c2a2 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NXP (Philips) SCC+++(SCN+++) serial driver
*
* Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
*
* Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#if defined(CONFIG_SERIAL_SCCNXP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -465,9 +461,9 @@ static void sccnxp_handle_events(struct sccnxp_port *s)
} while (1);
}
-static void sccnxp_timer(unsigned long data)
+static void sccnxp_timer(struct timer_list *t)
{
- struct sccnxp_port *s = (struct sccnxp_port *)data;
+ struct sccnxp_port *s = from_timer(s, t, timer);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
@@ -987,8 +983,7 @@ static int sccnxp_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
} else {
- init_timer(&s->timer);
- setup_timer(&s->timer, sccnxp_timer, (unsigned long)s);
+ timer_setup(&s->timer, sccnxp_timer, 0);
mod_timer(&s->timer, jiffies +
usecs_to_jiffies(s->pdata.poll_time_us));
return 0;
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index cf9b736f26f8..af2a29cfbbe9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* serial_tegra.c
*
@@ -6,18 +7,6 @@
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 3a14cccbd7ff..854995e1cae7 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver core for serial ports
*
@@ -5,20 +6,6 @@
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/tty.h>
@@ -1482,10 +1469,10 @@ out:
static void uart_close(struct tty_struct *tty, struct file *filp)
{
struct uart_state *state = tty->driver_data;
- struct tty_port *port;
if (!state) {
struct uart_driver *drv = tty->driver->driver_state;
+ struct tty_port *port;
state = drv->state + tty->index;
port = &state->port;
@@ -1495,7 +1482,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
return;
}
- port = &state->port;
pr_debug("uart_close(%d) called\n", tty->index);
tty_port_close(tty->port, tty, filp);
@@ -3026,5 +3012,41 @@ EXPORT_SYMBOL(uart_resume_port);
EXPORT_SYMBOL(uart_add_one_port);
EXPORT_SYMBOL(uart_remove_one_port);
+/**
+ * of_get_rs485_mode() - Implement parsing rs485 properties
+ * @np: uart node
+ * @rs485conf: output parameter
+ *
+ * This function implements the device tree binding described in
+ * Documentation/devicetree/bindings/serial/rs485.txt.
+ */
+void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf)
+{
+ u32 rs485_delay[2];
+ int ret;
+
+ ret = of_property_read_u32_array(np, "rs485-rts-delay", rs485_delay, 2);
+ if (!ret) {
+ rs485conf->delay_rts_before_send = rs485_delay[0];
+ rs485conf->delay_rts_after_send = rs485_delay[1];
+ } else {
+ rs485conf->delay_rts_before_send = 0;
+ rs485conf->delay_rts_after_send = 0;
+ }
+
+ /*
+ * clear full-duplex and enabled flags to get to a defined state with
+ * the two following properties.
+ */
+ rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED);
+
+ if (of_property_read_bool(np, "rs485-rx-during-tx"))
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
+ if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time"))
+ rs485conf->flags |= SER_RS485_ENABLED;
+}
+EXPORT_SYMBOL_GPL(of_get_rs485_mode);
+
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index 57f152394af5..b461d791188c 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for KS8695 serial ports
*
* Based on drivers/serial/serial_amba.c, by Kam Lee.
*
* Copyright 2002-2005 Micrel Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/tty.h>
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index d2da6aa7f27d..1c06325beaca 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Helpers for controlling modem lines via GPIO
*
* Copyright (C) 2014 Paratronic S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/err.h>
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index fa000bcff217..b7d3cca48ede 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Helpers for controlling modem lines via GPIO
*
* Copyright (C) 2014 Paratronic S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __SERIAL_MCTRL_GPIO__
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index f80fead6c5fc..1b4008d022bf 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Derived from many drivers using generic_serial interface,
* especially serial_tx3912.c by Steven J. Hill and r39xx_serial.c
@@ -8,10 +9,6 @@
* Copyright (C) 2001 Steven J. Hill (sjhill@realitydiluted.com)
* Copyright (C) 2000-2002 Toshiba Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
*/
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 784dd42002ea..31fcc7072a90 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
*
@@ -13,10 +14,6 @@
* Modified to support SecureEdge. David McCullough (2002)
* Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
* Removed SH7300 support (Jul 2007).
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
@@ -40,6 +37,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -152,6 +150,7 @@ struct sci_port {
int rx_trigger;
struct timer_list rx_fifo_timer;
int rx_fifo_timeout;
+ u16 hscif_tot;
bool has_rtscts;
bool autorts;
@@ -1107,8 +1106,14 @@ static ssize_t rx_fifo_timeout_show(struct device *dev,
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
+ int v;
+
+ if (port->type == PORT_HSCIF)
+ v = sci->hscif_tot >> HSSCR_TOT_SHIFT;
+ else
+ v = sci->rx_fifo_timeout;
- return sprintf(buf, "%d\n", sci->rx_fifo_timeout);
+ return sprintf(buf, "%d\n", v);
}
static ssize_t rx_fifo_timeout_store(struct device *dev,
@@ -1124,11 +1129,19 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
ret = kstrtol(buf, 0, &r);
if (ret)
return ret;
- sci->rx_fifo_timeout = r;
- scif_set_rtrg(port, 1);
- if (r > 0)
- setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)sci);
+
+ if (port->type == PORT_HSCIF) {
+ if (r < 0 || r > 3)
+ return -EINVAL;
+ sci->hscif_tot = r << HSSCR_TOT_SHIFT;
+ } else {
+ sci->rx_fifo_timeout = r;
+ scif_set_rtrg(port, 1);
+ if (r > 0)
+ setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
+ (unsigned long)sci);
+ }
+
return count;
}
@@ -1210,8 +1223,11 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
sg_dma_address(&s->sg_rx[0]));
dma_release_channel(chan);
- if (enable_pio)
+ if (enable_pio) {
+ spin_lock_irqsave(&port->lock, flags);
sci_start_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
}
static void sci_dma_rx_complete(void *arg)
@@ -1278,8 +1294,11 @@ static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
DMA_TO_DEVICE);
dma_release_channel(chan);
- if (enable_pio)
+ if (enable_pio) {
+ spin_lock_irqsave(&port->lock, flags);
sci_start_tx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
}
static void sci_submit_rx(struct sci_port *s)
@@ -1491,6 +1510,14 @@ static void sci_request_dma(struct uart_port *port)
return;
s->cookie_tx = -EINVAL;
+
+ /*
+ * Don't request a dma channel if no channel was specified
+ * in the device tree.
+ */
+ if (!of_find_property(port->dev->of_node, "dmas", NULL))
+ return;
+
chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV);
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
if (chan) {
@@ -1980,6 +2007,7 @@ static void sci_enable_ms(struct uart_port *port)
static void sci_break_ctl(struct uart_port *port, int break_state)
{
unsigned short scscr, scsptr;
+ unsigned long flags;
/* check wheter the port has SCSPTR */
if (!sci_getreg(port, SCSPTR)->size) {
@@ -1990,6 +2018,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
return;
}
+ spin_lock_irqsave(&port->lock, flags);
scsptr = serial_port_in(port, SCSPTR);
scscr = serial_port_in(port, SCSCR);
@@ -2003,6 +2032,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
serial_port_out(port, SCSPTR, scsptr);
serial_port_out(port, SCSCR, scscr);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int sci_startup(struct uart_port *port)
@@ -2037,9 +2067,13 @@ static void sci_shutdown(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
sci_stop_rx(port);
sci_stop_tx(port);
- /* Stop RX and TX, disable related interrupts, keep clock source */
+ /*
+ * Stop RX and TX, disable related interrupts, keep clock source
+ * and HSCIF TOT bits
+ */
scr = serial_port_in(port, SCSCR);
- serial_port_out(port, SCSCR, scr & (SCSCR_CKE1 | SCSCR_CKE0));
+ serial_port_out(port, SCSCR, scr &
+ (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
spin_unlock_irqrestore(&port->lock, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2186,7 +2220,7 @@ static void sci_reset(struct uart_port *port)
unsigned int status;
struct sci_port *s = to_sci_port(port);
- serial_port_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
+ serial_port_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */
reg = sci_getreg(port, SCFCR);
if (reg->size)
@@ -2227,6 +2261,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
int min_err = INT_MAX, err;
unsigned long max_freq = 0;
int best_clk = -1;
+ unsigned long flags;
if ((termios->c_cflag & CSIZE) == CS7)
smr_val |= SCSMR_CHR;
@@ -2336,6 +2371,8 @@ done:
serial_port_out(port, SCCKS, sccks);
}
+ spin_lock_irqsave(&port->lock, flags);
+
sci_reset(port);
uart_update_timeout(port, termios->c_cflag, baud);
@@ -2353,10 +2390,7 @@ done:
case 27: smr_val |= SCSMR_SRC_27; break;
}
smr_val |= cks;
- dev_dbg(port->dev,
- "SCR 0x%x SMR 0x%x BRR %u CKS 0x%x DL %u SRR %u\n",
- scr_val, smr_val, brr, sccks, dl, srr);
- serial_port_out(port, SCSCR, scr_val);
+ serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
serial_port_out(port, SCSMR, smr_val);
serial_port_out(port, SCBRR, brr);
if (sci_getreg(port, HSSRR)->size)
@@ -2369,8 +2403,7 @@ done:
scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
smr_val |= serial_port_in(port, SCSMR) &
(SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
- dev_dbg(port->dev, "SCR 0x%x SMR 0x%x\n", scr_val, smr_val);
- serial_port_out(port, SCSCR, scr_val);
+ serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
serial_port_out(port, SCSMR, smr_val);
}
@@ -2406,8 +2439,7 @@ done:
scr_val |= SCSCR_RE | SCSCR_TE |
(s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
- dev_dbg(port->dev, "SCSCR 0x%x\n", scr_val);
- serial_port_out(port, SCSCR, scr_val);
+ serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
if ((srr + 1 == 5) &&
(port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
/*
@@ -2453,8 +2485,6 @@ done:
s->rx_frame = (100 * bits * HZ) / (baud / 10);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
s->rx_timeout = DIV_ROUND_UP(s->buf_len_rx * 2 * s->rx_frame, 1000);
- dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
- s->rx_timeout * 1000 / HZ, port->timeout);
if (s->rx_timeout < msecs_to_jiffies(20))
s->rx_timeout = msecs_to_jiffies(20);
#endif
@@ -2462,6 +2492,8 @@ done:
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+
sci_port_disable(s);
if (UART_ENABLE_MS(port, termios->c_cflag))
@@ -2773,6 +2805,7 @@ static int sci_init_single(struct platform_device *dev,
}
sci_port->rx_fifo_timeout = 0;
+ sci_port->hscif_tot = 0;
/* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
* match the SoC datasheet, this should be investigated. Let platform
@@ -2860,7 +2893,7 @@ static void serial_console_write(struct console *co, const char *s,
ctrl_temp = SCSCR_RE | SCSCR_TE |
(sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
(ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
- serial_port_out(port, SCSCR, ctrl_temp);
+ serial_port_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
uart_console_write(port, s, count, serial_console_putchar);
@@ -2988,7 +3021,8 @@ static int sci_remove(struct platform_device *dev)
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_trigger.attr);
}
- if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB) {
+ if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
+ port->port.type == PORT_HSCIF) {
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
}
@@ -3044,17 +3078,15 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
unsigned int *dev_id)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
struct plat_sci_port *p;
struct sci_port *sp;
+ const void *data;
int id;
if (!IS_ENABLED(CONFIG_OF) || !np)
return NULL;
- match = of_match_node(of_sci_match, np);
- if (!match)
- return NULL;
+ data = of_device_get_match_data(&pdev->dev);
p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
if (!p)
@@ -3070,8 +3102,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
sp = &sci_ports[id];
*dev_id = id;
- p->type = SCI_OF_TYPE(match->data);
- p->regtype = SCI_OF_REGTYPE(match->data);
+ p->type = SCI_OF_TYPE(data);
+ p->regtype = SCI_OF_REGTYPE(data);
sp->has_rtscts = of_property_read_bool(np, "uart-has-rtscts");
@@ -3173,7 +3205,8 @@ static int sci_probe(struct platform_device *dev)
if (ret)
return ret;
}
- if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB) {
+ if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB ||
+ sp->port.type == PORT_HSCIF) {
ret = sysfs_create_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
if (ret) {
@@ -3244,7 +3277,7 @@ early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
-static struct __init plat_sci_port port_cfg;
+static struct plat_sci_port port_cfg __initdata;
static int __init early_console_setup(struct earlycon_device *device,
int type)
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 938e23a2d166..a5f792fd48d9 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -63,6 +63,9 @@ enum {
#define SCSCR_TDRQE BIT(15) /* Tx Data Transfer Request Enable */
#define SCSCR_RDRQE BIT(14) /* Rx Data Transfer Request Enable */
+/* Serial Control Register, HSCIF-only bits */
+#define HSSCR_TOT_SHIFT 14
+
/* SCxSR (Serial Status Register) on SCI */
#define SCI_TDRE BIT(7) /* Transmit Data Register Empty */
#define SCI_RDRF BIT(6) /* Receive Data Register Full */
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 684cb8dd8050..9925b00a9777 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CSR SiRFprimaII onboard UARTs.
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 43756bd9111c..004ca684d3ae 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Drivers for CSR SiRFprimaII onboard UARTs.
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
*/
#include <linux/bitops.h>
#include <linux/log2.h>
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index 9e0e6586c698..ed78542c4c37 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -8,25 +8,6 @@
*
* Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
* Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
* Mountain View, CA 94043, or:
*
@@ -687,9 +668,7 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
* timer to poll for input and push data from the console
* buffer.
*/
- init_timer(&port->sc_timer);
- port->sc_timer.function = sn_sal_timer_poll;
- port->sc_timer.data = (unsigned long)port;
+ setup_timer(&port->sc_timer, sn_sal_timer_poll, (unsigned long)port);
if (IS_RUNNING_ON_SIMULATOR())
port->sc_interrupt_timeout = 6;
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index e902494ebbd5..828f1143859c 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012-2015 Spreadtrum Communications Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_SPRD_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index b313a792b149..c763253514e9 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* st-asc.c: ST Asynchronous serial controller (ASC) driver
*
* Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#if defined(CONFIG_SERIAL_ST_ASC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 03a583264d9e..0fa735b60f2d 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics SA 2017
* Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
* Gerald Baeza <gerald.baeza@st.com>
- * License terms: GNU General Public License (GPL), version 2
*
* Inspired by st-asc.c from STMicroelectronics (c)
*/
@@ -736,11 +736,8 @@ static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id stm32_match[] = {
- { .compatible = "st,stm32-usart", .data = &stm32f4_info},
{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
- { .compatible = "st,stm32f7-usart", .data = &stm32f7_info},
{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
- { .compatible = "st,stm32h7-usart", .data = &stm32h7_info},
{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
{},
};
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index ffc0c5285e51..8a5ff54d0f42 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics SA 2017
* Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
* Gerald Baeza <gerald_baeza@yahoo.fr>
- * License terms: GNU General Public License (GPL), version 2
*/
#define DRIVER_NAME "stm32-usart"
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
index 127472bd6a7c..70a4ea4eaa6e 100644
--- a/drivers/tty/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* suncore.c
*
* Common SUN serial routines. Based entirely
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 46e46894e918..63e34d868de8 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunhv.c: Serial driver for SUN4V hypervisor console.
*
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 653a076d89d3..b93d0225f8c9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 95d34d7565c9..6cf3e9b0728f 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI
*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 252cea49c068..bc7af8b08a72 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunzilog.c: Zilog serial driver for Sparc systems.
*
* Driver for Zilog serial chips found on Sun workstations and
diff --git a/drivers/tty/serial/tilegx.c b/drivers/tty/serial/tilegx.c
index 453215f5420d..f0a3ae57f881 100644
--- a/drivers/tty/serial/tilegx.c
+++ b/drivers/tty/serial/tilegx.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
* TILEGx UART driver.
*/
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 5da7fe40e391..19d38b504e27 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* timbuart.c timberdale FPGA UART driver
* Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
diff --git a/drivers/tty/serial/timbuart.h b/drivers/tty/serial/timbuart.h
index 7e566766bc43..fb00b172117d 100644
--- a/drivers/tty/serial/timbuart.h
+++ b/drivers/tty/serial/timbuart.h
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* timbuart.c timberdale FPGA GPIO driver
* Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9b8d702dadc..c47db7826189 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uartlite.c: Serial driver for Xilinx uartlite serial controller
*
* Copyright (C) 2006 Peter Korsgaard <jacmet@sunsite.dk>
* Copyright (C) 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/platform_device.h>
@@ -739,7 +736,7 @@ static int __init ulite_init(void)
err_plat:
uart_unregister_driver(&ulite_uart_driver);
err_uart:
- pr_err("registering uartlite driver failed: err=%i", ret);
+ pr_err("registering uartlite driver failed: err=%i\n", ret);
return ret;
}
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 55b702775786..2b6376e6e5ad 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Freescale QUICC Engine UART device driver
*
* Author: Timur Tabi <timur@freescale.com>
*
- * Copyright 2007 Freescale Semiconductor, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * Copyright 2007 Freescale Semiconductor, Inc.
*
* This driver adds support for UART devices via Freescale's QUICC Engine
* found on some Freescale SOCs.
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 439057e8107a..6d106e33f842 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for NEC VR4100 series Serial Interface Unit.
*
* Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* Based on drivers/serial/8250.c, by Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_VR41XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 435a6f3260be..3d58e9b34553 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* Based on msm_serial.c, which is:
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 31a630ae0870..b9b2bc76bcac 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cadence UART driver (found in Xilinx Zynq)
*
* 2011 - 2014 (C) Xilinx Inc.
*
- * This program is free software; you can redistribute it
- * and/or modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2 of the License, or (at your option) any
- * later version.
- *
* This driver has originally been pushed by Xilinx using a Zynq-branding. This
* still shows in the naming of this file, the kconfig symbols and some symbols
* in the code.
@@ -1673,7 +1668,7 @@ static void __exit cdns_uart_exit(void)
uart_unregister_driver(&cdns_uart_uart_driver);
}
-module_init(cdns_uart_init);
+arch_initcall(cdns_uart_init);
module_exit(cdns_uart_exit);
MODULE_DESCRIPTION("Driver for Cadence UART");
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index d32bd499d684..b03d3e458ea2 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zs.c: Serial port driver for IOASIC DECstations.
*
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 3be981101297..f2c34d656144 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
*
@@ -13,8 +14,6 @@
*
* Original release 01/11/99
*
- * This code is released under the GNU General Public License (GPL)
- *
* This driver is primarily intended for use in synchronous
* HDLC mode. Asynchronous mode is also provided.
*
@@ -4098,8 +4097,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
if (request_dma(info->dma_level,info->device_name) < 0){
printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
__FILE__,__LINE__,info->device_name, info->dma_level );
- mgsl_release_resources( info );
- return -ENODEV;
+ goto errout;
}
info->dma_requested = true;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 636b8ae29b46..06a03731bba7 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Device driver for Microgate SyncLink GT serial adapters.
*
@@ -6,8 +7,6 @@
*
* Microgate and SyncLink are trademarks of Microgate Corporation
*
- * This code is released under the GNU General Public License (GPL)
- *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 4fed9e7b281f..d45f234e1914 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* $Id: synclinkmp.c,v 4.38 2005/07/15 13:29:44 paulkf Exp $
*
@@ -10,7 +11,6 @@
* Microgate and SyncLink are trademarks of Microgate Corporation
*
* Derived from serial.c written by Theodore Ts'o and Linus Torvalds
- * This code is released under the GNU General Public License (GPL)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index d008f5a75197..b674793be478 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -246,8 +246,10 @@ static void sysrq_handle_showallcpus(int key)
* architecture has no support for it:
*/
if (!trigger_all_cpu_backtrace()) {
- struct pt_regs *regs = get_irq_regs();
+ struct pt_regs *regs = NULL;
+ if (in_irq())
+ regs = get_irq_regs();
if (regs) {
pr_info("CPU%d:\n", smp_processor_id());
show_regs(regs);
@@ -266,7 +268,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
static void sysrq_handle_showregs(int key)
{
- struct pt_regs *regs = get_irq_regs();
+ struct pt_regs *regs = NULL;
+
+ if (in_irq())
+ regs = get_irq_regs();
if (regs)
show_regs(regs);
perf_event_print_debug();
@@ -649,9 +654,9 @@ static void sysrq_parse_reset_sequence(struct sysrq_state *state)
state->reset_seq_version = sysrq_reset_seq_version;
}
-static void sysrq_do_reset(unsigned long _state)
+static void sysrq_do_reset(struct timer_list *t)
{
- struct sysrq_state *state = (struct sysrq_state *) _state;
+ struct sysrq_state *state = from_timer(state, t, keyreset_timer);
state->reset_requested = true;
@@ -668,7 +673,7 @@ static void sysrq_handle_reset_request(struct sysrq_state *state)
mod_timer(&state->keyreset_timer,
jiffies + msecs_to_jiffies(sysrq_reset_downtime_ms));
else
- sysrq_do_reset((unsigned long)state);
+ sysrq_do_reset(&state->keyreset_timer);
}
static void sysrq_detect_reset_sequence(struct sysrq_state *state,
@@ -904,8 +909,7 @@ static int sysrq_connect(struct input_handler *handler,
sysrq->handle.handler = handler;
sysrq->handle.name = "sysrq";
sysrq->handle.private = sysrq;
- setup_timer(&sysrq->keyreset_timer,
- sysrq_do_reset, (unsigned long)sysrq);
+ timer_setup(&sysrq->keyreset_timer, sysrq_do_reset, 0);
error = input_register_handle(&sysrq->handle);
if (error) {
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index df2d735338e2..e30aa6bf9ff9 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Creating audit events from TTY input.
*
- * Copyright (C) 2007 Red Hat, Inc. All rights reserved. This copyrighted
- * material is made available to anyone wishing to use, modify, copy, or
- * redistribute it subject to the terms and conditions of the GNU General
- * Public License v.2.
+ * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
*
* Authors: Miloslav Trmac <mitr@redhat.com>
*/
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 5c33fd25676d..6ff8cdfc9d2a 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f8eba1c5412f..c996b6859c5e 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tty buffer allocation management
*/
@@ -446,7 +447,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
* Callers other than flush_to_ldisc() need to exclude the kworker
* from concurrent use of the line discipline, see paste_selection().
*
- * Returns the number of bytes not processed
+ * Returns the number of bytes processed
*/
int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
char *f, int count)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 94cccb6efa32..dc60aeea87d8 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index efa96e6c4c1b..d9b561d89432 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index e7032309ee87..c4ecd66fafef 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 84a8ac2a779f..24ec5c7e6b20 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kmod.h>
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 52b7baef4f7a..37a91b3df980 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Ldisc rw semaphore
*
@@ -22,9 +23,6 @@
* Michel Lespinasse <walken@google.com>.
*
* Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
- *
- * This file may be redistributed under the terms of the GNU General Public
- * License v2.
*/
#include <linux/list.h>
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 6b137194069f..25d736880013 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tty port functions
*/
@@ -78,7 +79,7 @@ EXPORT_SYMBOL(tty_port_init);
* @driver: tty_driver for this device
* @index: index of the tty
*
- * Provide the tty layer wit ha link from a tty (specified by @index) to a
+ * Provide the tty layer with a link from a tty (specified by @index) to a
* tty_port (@port). Use this only if neither tty_port_register_device nor
* tty_port_install is used in the driver. If used, this has to be called before
* tty_register_driver.
@@ -235,7 +236,7 @@ EXPORT_SYMBOL(tty_port_free_xmit_buf);
/**
* tty_port_destroy -- destroy inited port
- * @port: tty port to be doestroyed
+ * @port: tty port to be destroyed
*
* When a port was initialized using tty_port_init, one has to destroy the
* port by this function. Either indirectly by using tty_port refcounting
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index ef01d24858cd..58b454c34560 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -361,17 +361,13 @@ done:
return rv;
}
-static void vcc_rx_timer(unsigned long index)
+static void vcc_rx_timer(struct timer_list *t)
{
+ struct vcc_port *port = from_timer(port, t, rx_timer);
struct vio_driver_state *vio;
- struct vcc_port *port;
unsigned long flags;
int rv;
- port = vcc_get_ne(index);
- if (!port)
- return;
-
spin_lock_irqsave(&port->lock, flags);
port->rx_timer.expires = 0;
@@ -391,18 +387,14 @@ done:
vcc_put(port, false);
}
-static void vcc_tx_timer(unsigned long index)
+static void vcc_tx_timer(struct timer_list *t)
{
- struct vcc_port *port;
+ struct vcc_port *port = from_timer(port, t, tx_timer);
struct vio_vcc *pkt;
unsigned long flags;
int tosend = 0;
int rv;
- port = vcc_get_ne(index);
- if (!port)
- return;
-
spin_lock_irqsave(&port->lock, flags);
port->tx_timer.expires = 0;
@@ -645,13 +637,8 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (rv)
goto free_domain;
- init_timer(&port->rx_timer);
- port->rx_timer.function = vcc_rx_timer;
- port->rx_timer.data = port->index;
-
- init_timer(&port->tx_timer);
- port->tx_timer.function = vcc_tx_timer;
- port->tx_timer.data = port->index;
+ timer_setup(&port->rx_timer, vcc_rx_timer, 0);
+ timer_setup(&port->tx_timer, vcc_tx_timer, 0);
dev_set_drvdata(&vdev->dev, port);
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index a5f88cf0f61d..722a6690c70d 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* consolemap.c
*
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index f974d6340d04..c8d90d7e7e37 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Written for linux by Johan Myreen as a translation from
* the assembly version by Linus (with diacriticals added)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 602d71630952..bce4c71cb338 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
@@ -102,6 +103,7 @@
#include <linux/uaccess.h>
#include <linux/kdb.h>
#include <linux/ctype.h>
+#include <linux/bsearch.h>
#define MAX_NR_CON_DRIVER 16
@@ -2142,22 +2144,15 @@ struct interval {
uint32_t last;
};
-static int bisearch(uint32_t ucs, const struct interval *table, int max)
+static int ucs_cmp(const void *key, const void *elt)
{
- int min = 0;
- int mid;
+ uint32_t ucs = *(uint32_t *)key;
+ struct interval e = *(struct interval *) elt;
- if (ucs < table[0].first || ucs > table[max].last)
- return 0;
- while (max >= min) {
- mid = (min + max) / 2;
- if (ucs > table[mid].last)
- min = mid + 1;
- else if (ucs < table[mid].first)
- max = mid - 1;
- else
- return 1;
- }
+ if (ucs > e.last)
+ return 1;
+ else if (ucs < e.first)
+ return -1;
return 0;
}
@@ -2169,7 +2164,12 @@ static int is_double_width(uint32_t ucs)
{ 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
{ 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
};
- return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
+ if (ucs < double_width[0].first ||
+ ucs > double_width[ARRAY_SIZE(double_width) - 1].last)
+ return 0;
+
+ return bsearch(&ucs, double_width, ARRAY_SIZE(double_width),
+ sizeof(struct interval), ucs_cmp) != NULL;
}
static void con_flush(struct vc_data *vc, unsigned long draw_from,
@@ -2205,7 +2205,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
console_lock();
vc = tty->driver_data;
if (vc == NULL) {
- printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
+ pr_err("vt: argh, driver_data is NULL !\n");
console_unlock();
return 0;
}
@@ -3190,20 +3190,21 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
pr_info("Console: switching ");
if (!deflt)
- printk(KERN_CONT "consoles %d-%d ", first+1, last+1);
+ pr_cont("consoles %d-%d ", first + 1, last + 1);
if (j >= 0) {
struct vc_data *vc = vc_cons[j].d;
- printk(KERN_CONT "to %s %s %dx%d\n",
- vc->vc_can_do_color ? "colour" : "mono",
- desc, vc->vc_cols, vc->vc_rows);
+ pr_cont("to %s %s %dx%d\n",
+ vc->vc_can_do_color ? "colour" : "mono",
+ desc, vc->vc_cols, vc->vc_rows);
if (k >= 0) {
vc = vc_cons[k].d;
update_screen(vc);
}
- } else
- printk(KERN_CONT "to %s\n", desc);
+ } else {
+ pr_cont("to %s\n", desc);
+ }
retval = 0;
err:
@@ -3622,9 +3623,8 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
con_driver, con_dev_groups,
"vtcon%i", con_driver->node);
if (IS_ERR(con_driver->dev)) {
- printk(KERN_WARNING "Unable to create device for %s; "
- "errno = %ld\n", con_driver->desc,
- PTR_ERR(con_driver->dev));
+ pr_warn("Unable to create device for %s; errno = %ld\n",
+ con_driver->desc, PTR_ERR(con_driver->dev));
con_driver->dev = NULL;
} else {
vtconsole_init_device(con_driver);
@@ -3761,8 +3761,8 @@ static int __init vtconsole_class_init(void)
vtconsole_class = class_create(THIS_MODULE, "vtconsole");
if (IS_ERR(vtconsole_class)) {
- printk(KERN_WARNING "Unable to create vt console class; "
- "errno = %ld\n", PTR_ERR(vtconsole_class));
+ pr_warn("Unable to create vt console class; errno = %ld\n",
+ PTR_ERR(vtconsole_class));
vtconsole_class = NULL;
}
@@ -3778,9 +3778,8 @@ static int __init vtconsole_class_init(void)
"vtcon%i", con->node);
if (IS_ERR(con->dev)) {
- printk(KERN_WARNING "Unable to create "
- "device for %s; errno = %ld\n",
- con->desc, PTR_ERR(con->dev));
+ pr_warn("Unable to create device for %s; errno = %ld\n",
+ con->desc, PTR_ERR(con->dev));
con->dev = NULL;
} else {
vtconsole_init_device(con);
@@ -4121,37 +4120,45 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
return -EINVAL;
if (op->charcount > 512)
return -EINVAL;
+ if (op->width <= 0 || op->width > 32 || op->height > 32)
+ return -EINVAL;
+ size = (op->width+7)/8 * 32 * op->charcount;
+ if (size > max_font_size)
+ return -ENOSPC;
+
+ font.data = memdup_user(op->data, size);
+ if (IS_ERR(font.data))
+ return PTR_ERR(font.data);
+
if (!op->height) { /* Need to guess font height [compat] */
int h, i;
- u8 __user *charmap = op->data;
- u8 tmp;
-
- /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
- so that we can get rid of this soon */
- if (!(op->flags & KD_FONT_FLAG_OLD))
+ u8 *charmap = font.data;
+
+ /*
+ * If from KDFONTOP ioctl, don't allow things which can be done
+ * in userland,so that we can get rid of this soon
+ */
+ if (!(op->flags & KD_FONT_FLAG_OLD)) {
+ kfree(font.data);
return -EINVAL;
+ }
+
for (h = 32; h > 0; h--)
- for (i = 0; i < op->charcount; i++) {
- if (get_user(tmp, &charmap[32*i+h-1]))
- return -EFAULT;
- if (tmp)
+ for (i = 0; i < op->charcount; i++)
+ if (charmap[32*i+h-1])
goto nonzero;
- }
+
+ kfree(font.data);
return -EINVAL;
+
nonzero:
op->height = h;
}
- if (op->width <= 0 || op->width > 32 || op->height > 32)
- return -EINVAL;
- size = (op->width+7)/8 * 32 * op->charcount;
- if (size > max_font_size)
- return -ENOSPC;
+
font.charcount = op->charcount;
- font.height = op->height;
font.width = op->width;
- font.data = memdup_user(op->data, size);
- if (IS_ERR(font.data))
- return PTR_ERR(font.data);
+ font.height = op->height;
+
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 939a63bca82f..f699abab1787 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -77,11 +77,12 @@ config USB_PCI
depends on PCI
default y
---help---
- A lot of embeded system SOC (e.g. freescale T2080) have both
- PCI and USB modules. But USB module is controlled by registers
- directly, it have no relationship with PCI module.
+ Many embedded system SOCs (e.g. freescale T2080) have both
+ PCI and USB modules with the USB module directly controlled by
+ registers and having no relationship to the PCI module.
- When say N here it will not build PCI related code in USB driver.
+ If you have such a device you may say N here and PCI related code
+ will not be built in the USB driver.
if USB
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 5160a4a966b3..6470d259b7d8 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* cxacru.c - driver for USB ADSL modems based on
* Conexant AccessRunner chipset
@@ -6,21 +7,6 @@
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
* Copyright (C) 2007 Simon Arlott
* Copyright (C) 2009 Simon Arlott
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
/*
@@ -424,6 +410,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
+ /* fall through */
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
@@ -570,10 +557,8 @@ static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
{
struct timer_list timer;
- init_timer(&timer);
+ setup_timer(&timer, cxacru_timeout_kill, (unsigned long)urb);
timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
- timer.data = (unsigned long) urb;
- timer.function = cxacru_timeout_kill;
add_timer(&timer);
wait_for_completion(done);
del_timer_sync(&timer);
@@ -797,6 +782,7 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
+ /* fall through */
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 3676adb40d89..5a5e8c0aaa39 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* speedtch.c - Alcatel SpeedTouch USB xDSL modem driver
*
@@ -6,21 +7,6 @@
* Copyright (C) 2004, David Woodhouse
*
* Based on "modem_run.c", copyright (C) 2001, Benoit Papillault
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <asm/page.h>
@@ -874,16 +860,13 @@ static int speedtch_bind(struct usbatm_data *usbatm,
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
- init_timer(&instance->status_check_timer);
-
- instance->status_check_timer.function = speedtch_status_poll;
- instance->status_check_timer.data = (unsigned long)instance;
+ setup_timer(&instance->status_check_timer, speedtch_status_poll,
+ (unsigned long)instance);
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
- init_timer(&instance->resubmit_timer);
- instance->resubmit_timer.function = speedtch_resubmit_int;
- instance->resubmit_timer.data = (unsigned long)instance;
+ setup_timer(&instance->resubmit_timer, speedtch_resubmit_int,
+ (unsigned long)instance);
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index ba7616395db2..ab75690044bb 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*-
* Copyright (c) 2003, 2004
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 8607af758bbd..044264aa1f96 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* usbatm.c - Generic USB xDSL driver core
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
* Copyright (C) 2004, David Woodhouse, Roman Kagan
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
/*
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index f3eecd967a8a..d3bdc4cc47aa 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* usbatm.h - Generic USB xDSL driver core
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
* Copyright (C) 2004, David Woodhouse
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#ifndef _USBATM_H_
diff --git a/drivers/usb/atm/xusbatm.c b/drivers/usb/atm/xusbatm.c
index c73c1ec3005e..ffc9810070a3 100644
--- a/drivers/usb/atm/xusbatm.c
+++ b/drivers/usb/atm/xusbatm.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* xusbatm.c - dumb usbatm-based driver for modems initialized in userspace
*
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/module.h>
diff --git a/drivers/usb/c67x00/Makefile b/drivers/usb/c67x00/Makefile
index da5f314a5de0..0cde62d06e5d 100644
--- a/drivers/usb/c67x00/Makefile
+++ b/drivers/usb/c67x00/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Cypress C67X00 USB Controller
#
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 5796c8820514..53838e7d4eef 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-drv.c: Cypress C67X00 USB Common infrastructure
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
/*
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
index 30d3f346686e..c39eee17c0e4 100644
--- a/drivers/usb/c67x00/c67x00-hcd.c
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-hcd.c: Cypress C67X00 USB Host Controller Driver
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#include <linux/device.h>
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index cf8a455a6403..3b181d4c7a03 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-hcd.h: Cypress C67X00 USB HCD
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _USB_C67X00_HCD_H
diff --git a/drivers/usb/c67x00/c67x00-ll-hpi.c b/drivers/usb/c67x00/c67x00-ll-hpi.c
index b58151841e10..e1fe3603140a 100644
--- a/drivers/usb/c67x00/c67x00-ll-hpi.c
+++ b/drivers/usb/c67x00/c67x00-ll-hpi.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-ll-hpi.c: Cypress C67X00 USB Low level interface using HPI
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#include <asm/byteorder.h>
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index 7311ed61e99a..633c52de3bb3 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#include <linux/kthread.h>
@@ -966,13 +952,11 @@ static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
struct urb *urb = td->urb;
- struct c67x00_urb_priv *urbp;
int cnt;
if (!urb)
return;
- urbp = urb->hcpriv;
cnt = td->privdata;
if (td->status & TD_ERROR_MASK)
diff --git a/drivers/usb/c67x00/c67x00.h b/drivers/usb/c67x00/c67x00.h
index a26e9ded0f32..7ce10928b037 100644
--- a/drivers/usb/c67x00/c67x00.h
+++ b/drivers/usb/c67x00/c67x00.h
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00.h: Cypress C67X00 USB register and field definitions
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _USB_C67X00_H
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 51f4157bbecf..785f0ed037f7 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -9,7 +9,7 @@ config USB_CHIPIDEA
Dual-role switch (ID, OTG FSM, sysfs), Host-only, and
Peripheral-only.
- When compiled dynamically, the module will be called ci-hdrc.ko.
+ When compiled dynamically, the module will be called ci_hdrc.ko.
if USB_CHIPIDEA
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index e462f55c8b99..98da99510be7 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bits.h - register bits of the ChipIdea USB IP core
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_BITS_H
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6743f85b1b7a..98b7cb3d0064 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ci.h - common structures, functions, and macros of the ChipIdea driver
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_CI_H
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 5f4a8157fad8..3b45c25f296e 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index d666c9f036ba..204275f47573 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -1,12 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index bb626120296f..3593ce0ec641 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -1,9 +1,5 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. */
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 39414e4b2d81..49a61549cee6 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ci_hdrc_pci.c - MIPS USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index bfcee2702d50..7b65a1040d2c 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016, NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 99425db9ba62..c044fba463e4 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_zevio.c b/drivers/usb/chipidea/ci_hdrc_zevio.c
index 1264de505527..e1634da4a4b1 100644
--- a/drivers/usb/chipidea/ci_hdrc_zevio.c
+++ b/drivers/usb/chipidea/ci_hdrc_zevio.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
* Based off drivers/usb/chipidea/ci_hdrc_msm.c
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 43ea5fb87b9a..dd2dd9391bb7 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* core.c - ChipIdea USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 18cb8e46262d..19d60ed7e41f 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* host.c - ChipIdea USB host controller driver
*
* Copyright (c) 2012 Intel Corporation
*
* Author: Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 10236fe71522..db4ceffcf2a6 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* otg.c - ChipIdea USB IP core OTG driver
*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* Author: Peter Chen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 9ecb598e48f0..7e7428e48bfa 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013-2014 Freescale Semiconductor, Inc.
*
* Author: Peter Chen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_OTG_H
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 5ea0246f650d..9e2d300060bc 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* otg_fsm.c - ChipIdea USB IP core OTG FSM driver
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* Author: Jun Li
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/usb/chipidea/otg_fsm.h b/drivers/usb/chipidea/otg_fsm.h
index 6366fe398ba6..2b49d29bf2fb 100644
--- a/drivers/usb/chipidea/otg_fsm.h
+++ b/drivers/usb/chipidea/otg_fsm.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* Author: Jun Li
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_OTG_FSM_H
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index fe8a90543ea3..9852ec5e6e01 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* udc.c - ChipIdea UDC driver
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
@@ -1526,6 +1523,10 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
gadget_ready = 1;
spin_unlock_irqrestore(&ci->lock, flags);
+ if (ci->usb_phy)
+ usb_phy_set_charger_state(ci->usb_phy, is_active ?
+ USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
+
if (gadget_ready) {
if (is_active) {
pm_runtime_get_sync(&_gadget->dev);
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index 2ecd1174d66c..e023735d94b7 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* udc.h - ChipIdea UDC structures
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_UDC_H
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 1219583dc1b2..6da42dcd2888 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Linaro Ltd.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 9f4a0185dd60..8cdf0af156c6 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -1,12 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
diff --git a/drivers/usb/class/Makefile b/drivers/usb/class/Makefile
index 32e85277b5cf..5d393a28f7f2 100644
--- a/drivers/usb/class/Makefile
+++ b/drivers/usb/class/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB Class drivers
# (one step up from the misc category)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 18c923a4c16e..8e0636c963a7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* cdc-acm.c
*
@@ -12,20 +13,6 @@
* USB Abstract Control Model driver for USB modems and ISDN adapters
*
* Sponsored by SuSE
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e865dbf878c..6c181a625daf 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cdc-wdm.c
*
@@ -483,7 +484,7 @@ static ssize_t wdm_read
if (rv < 0)
return -ERESTARTSYS;
- cntr = ACCESS_ONCE(desc->length);
+ cntr = READ_ONCE(desc->length);
if (cntr == 0) {
desc->read = 0;
retry:
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index fb87c17ed6fa..c454885ef4a0 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* usblp.c
*
@@ -31,22 +32,6 @@
* none - Maintained in Linux kernel after v0.13
*/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 6ebfabfa0dc7..0b8b0f3bdd2f 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/**
* drivers/usb/class/usbtmc.c - USB Test & Measurement class driver
*
* Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * The GNU General Public License is available at
- * http://www.gnu.org/copyleft/gpl.html.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1343,6 +1331,7 @@ static void usbtmc_interrupt(struct urb *urb)
case -EOVERFLOW:
dev_err(dev, "overflow with length %d, actual length is %d\n",
data->iin_wMaxPacketSize, urb->actual_length);
+ /* fall through */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 552ff7ac5a6b..50a2362ed3ea 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Provides code common for host and device side USB.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
* If either host side (ie. CONFIG_USB=y) or device side USB stack
* (ie. CONFIG_USB_GADGET=y) is compiled in the kernel, this module is
* compiled-in as well. Otherwise, if either of the two stacks is
diff --git a/drivers/usb/common/led.c b/drivers/usb/common/led.c
index df23da00a901..7bd81166b77d 100644
--- a/drivers/usb/common/led.c
+++ b/drivers/usb/common/led.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* LED Triggers for USB Activity
*
* Copyright 2014 Michal Sojka <sojka@merica.cz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 4aa5195db8ea..8b351444cc40 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* ulpi.c - USB ULPI PHY bus
*
* Copyright (C) 2015 Intel Corporation
*
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/ulpi/interface.h>
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index b8fe31e409a5..3740cf95560e 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* OTG Finite State Machine from OTG spec
*
@@ -5,20 +6,6 @@
*
* Author: Li Yang <LeoLi@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index b64568cf572c..77eef8acff94 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DMA memory management for framework level HCD code (hc_driver)
*
@@ -5,7 +6,6 @@
* and should work with all USB controllers, regardless of bus type.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 883549ee946c..da8acd980fc6 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 55dea2e7828f..c2cf62b7043a 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* devices.c
* (C) Copyright 1999 Randy Dunlap.
@@ -5,20 +6,6 @@
* (proc file per device)
* (C) Copyright 1999 Deti Fliegl (new USB architecture)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*************************************************************
*
* <mountpoint>/devices contains USB topology, device, config, class,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e9326f31db8d..705c573d0257 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
@@ -5,20 +6,6 @@
*
* Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* This file implements the usbfs/x/y files, where
* x is the bus number and y the device number.
*
@@ -150,7 +137,7 @@ static int usbfs_increase_memory_usage(u64 amount)
{
u64 lim;
- lim = ACCESS_ONCE(usbfs_memory_mb);
+ lim = READ_ONCE(usbfs_memory_mb);
lim <<= 20;
atomic64_add(amount, &usbfs_memory_usage);
@@ -1833,6 +1820,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
return 0;
}
+static void compute_isochronous_actual_length(struct urb *urb)
+{
+ unsigned int i;
+
+ if (urb->number_of_packets > 0) {
+ urb->actual_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++)
+ urb->actual_length +=
+ urb->iso_frame_desc[i].actual_length;
+ }
+}
+
static int processcompl(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
@@ -1840,6 +1839,7 @@ static int processcompl(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
+ compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
goto err_out;
@@ -2008,6 +2008,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
+ compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
return -EFAULT;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index eb87a259d55c..64262a9a8829 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/driver.c - most of the driver model stuff for usb
*
@@ -16,7 +17,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
@@ -1340,8 +1340,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
int err;
u16 devstat;
- err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
- &devstat);
+ err = usb_get_std_status(udev, USB_RECIP_DEVICE, 0,
+ &devstat);
if (err) {
dev_err(&udev->dev,
"Failed to suspend device, error %d\n",
@@ -1461,6 +1461,7 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
+ int r;
unbind_no_pm_drivers_interfaces(udev);
@@ -1469,7 +1470,14 @@ int usb_suspend(struct device *dev, pm_message_t msg)
* so we may still need to unbind and rebind upon resume
*/
choose_wakeup(udev, msg);
- return usb_suspend_both(udev, msg);
+ r = usb_suspend_both(udev, msg);
+ if (r)
+ return r;
+
+ if (udev->quirks & USB_QUIRK_DISCONNECT_SUSPEND)
+ usb_port_disable(udev);
+
+ return 0;
}
/* The device lock is held by the PM core */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index a60bc830a056..1c2c04079676 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/endpoint.c
*
@@ -6,7 +7,6 @@
* (C) Copyright 2006 Novell Inc.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*
* Endpoint sysfs stuff
*/
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 87ad6b6bfee8..65de6f73b672 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/file.c
*
@@ -14,7 +15,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index bd3e0c5a6db2..83c14dda6300 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/generic.c - generic driver for USB devices (not interfaces)
*
@@ -16,7 +17,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index ea829ad798c0..66fe1b78d952 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright David Brownell 2000-2002
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 75ad6718858c..19b5c4afeef2 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
@@ -6,20 +7,6 @@
* (C) Copyright Deti Fliegl 1999
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2002
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bcd.h>
@@ -2558,9 +2545,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
hcd->self.bus_name = bus_name;
hcd->self.uses_dma = (sysdev->dma_mask != NULL);
- init_timer(&hcd->rh_timer);
- hcd->rh_timer.function = rh_timer_func;
- hcd->rh_timer.data = (unsigned long) hcd;
+ setup_timer(&hcd->rh_timer, rh_timer_func, (unsigned long)hcd);
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e9ce6bb0b22d..7ccdd3d4db84 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB hub driver.
*
@@ -7,7 +8,6 @@
* (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au)
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/kernel.h>
@@ -1482,7 +1482,7 @@ static int hub_configure(struct usb_hub *hub,
/* power budgeting mostly matters with bus-powered hubs,
* and battery-powered root hubs (may provide just 8 mA).
*/
- ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
+ ret = usb_get_std_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
if (ret) {
message = "can't get hub status";
goto fail;
@@ -3279,7 +3279,7 @@ static int finish_port_resume(struct usb_device *udev)
*/
if (status == 0) {
devstatus = 0;
- status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
+ status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
/* If a normal resume failed, try doing a reset-resume */
if (status && !udev->reset_resume && udev->persist_enabled) {
@@ -3303,7 +3303,7 @@ static int finish_port_resume(struct usb_device *udev)
if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
status = usb_disable_remote_wakeup(udev);
} else {
- status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
+ status = usb_get_std_status(udev, USB_RECIP_INTERFACE, 0,
&devstatus);
if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
| USB_INTRF_STAT_FUNC_RW))
@@ -4183,6 +4183,19 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
return ret;
}
+/*
+ * usb_port_disable - disable a usb device's upstream port
+ * @udev: device to disable
+ * Context: @udev locked, must be able to sleep.
+ *
+ * Disables a USB device that isn't in active use.
+ */
+int usb_port_disable(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+
+ return hub_port_disable(hub, udev->portnum, 0);
+}
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
*
@@ -4853,7 +4866,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
&& udev->bus_mA <= unit_load) {
u16 devstat;
- status = usb_get_status(udev, USB_RECIP_DEVICE, 0,
+ status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0,
&devstat);
if (status) {
dev_dbg(&udev->dev, "get status %d ?\n", status);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 34c1a7e22aae..2a700ccc868c 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb hub driver head file
*
@@ -8,15 +9,6 @@
* Copyright (C) 2012 Intel Corp (tianyu.lan@intel.com)
*
* move struct usb_hub to this file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 1af877942110..9dbb429cd471 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB port LED trigger
*
* Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 371a07d874a3..77001bcfc504 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* message.c - synchronous message handling
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/pci.h> /* for scatterlist macros */
@@ -918,7 +918,8 @@ int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
/**
* usb_get_status - issues a GET_STATUS call
* @dev: the device whose status is being checked
- * @type: USB_RECIP_*; for device, interface, or endpoint
+ * @recip: USB_RECIP_*; for device, interface, or endpoint
+ * @type: USB_STATUS_TYPE_*; for standard or PTM status types
* @target: zero (for device), else interface or endpoint number
* @data: pointer to two bytes of bitmap data
* Context: !in_interrupt ()
@@ -937,24 +938,58 @@ int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
* Returns 0 and the status value in *@data (in host byte order) on success,
* or else the status code from the underlying usb_control_msg() call.
*/
-int usb_get_status(struct usb_device *dev, int type, int target, void *data)
+int usb_get_status(struct usb_device *dev, int recip, int type, int target,
+ void *data)
{
int ret;
- __le16 *status = kmalloc(sizeof(*status), GFP_KERNEL);
+ void *status;
+ int length;
+
+ switch (type) {
+ case USB_STATUS_TYPE_STANDARD:
+ length = 2;
+ break;
+ case USB_STATUS_TYPE_PTM:
+ if (recip != USB_RECIP_DEVICE)
+ return -EINVAL;
+ length = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = kmalloc(length, GFP_KERNEL);
if (!status)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- USB_REQ_GET_STATUS, USB_DIR_IN | type, 0, target, status,
- sizeof(*status), USB_CTRL_GET_TIMEOUT);
+ USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD,
+ target, status, length, USB_CTRL_GET_TIMEOUT);
- if (ret == 2) {
- *(u16 *) data = le16_to_cpu(*status);
+ switch (ret) {
+ case 4:
+ if (type != USB_STATUS_TYPE_PTM) {
+ ret = -EIO;
+ break;
+ }
+
+ *(u32 *) data = le32_to_cpu(*(__le32 *) status);
+ ret = 0;
+ break;
+ case 2:
+ if (type != USB_STATUS_TYPE_STANDARD) {
+ ret = -EIO;
+ break;
+ }
+
+ *(u16 *) data = le16_to_cpu(*(__le16 *) status);
ret = 0;
- } else if (ret >= 0) {
+ break;
+ default:
ret = -EIO;
}
+
kfree(status);
return ret;
}
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index b12a463a3e22..ab474b11523e 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* All the USB notify logic
*
@@ -7,7 +8,6 @@
* but fixed up to not be so broken.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index 3863bb1ce8c5..2be968353257 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* of.c The helpers for hcd device tree support
*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Author: Peter Chen <peter.chen@freescale.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/of.h>
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index 085049d37d7a..2ae90158ded7 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/core/otg_whitelist.h
*
* Copyright (C) 2004 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 460c855be0d0..1a01e9ad3804 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb port device code
*
* Copyright (C) 2012 Intel Corp
*
* Author: Lan Tianyu <tianyu.lan@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
*/
#include <linux/slab.h>
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a6aaf2f193a4..f1dbab6f798f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB device quirk handling logic and table
*
* Copyright (c) 2007 Oliver Neukum
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- *
*/
#include <linux/usb.h>
@@ -203,6 +198,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* Huawei 4G LTE module */
+ { USB_DEVICE(0x12d1, 0x15bb), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+ { USB_DEVICE(0x12d1, 0x15c3), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
/* SKYMEDI USB_DRIVE */
{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -221,6 +222,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Corsair K70 LUX */
+ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d930bfda4010..27bb34043053 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/sysfs.c
*
@@ -8,7 +9,6 @@
* All of the sysfs file attributes for usb devices and interfaces.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
@@ -654,7 +654,8 @@ static int add_power_attributes(struct device *dev)
if (udev->usb2_hw_lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb2_hardware_lpm_attr_group);
- if (udev->speed == USB_SPEED_SUPER &&
+ if ((udev->speed == USB_SPEED_SUPER ||
+ udev->speed == USB_SPEED_SUPER_PLUS) &&
udev->lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb3_hardware_lpm_attr_group);
@@ -973,7 +974,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
char *string;
intf = to_usb_interface(dev);
- string = ACCESS_ONCE(intf->cur_altsetting->string);
+ string = READ_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sprintf(buf, "%s\n", string);
@@ -989,7 +990,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
- alt = ACCESS_ONCE(intf->cur_altsetting);
+ alt = READ_ONCE(intf->cur_altsetting);
return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 47903d510955..9fdf137c4865 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
@@ -187,6 +187,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
/*-------------------------------------------------------------------*/
+static const int pipetypes[4] = {
+ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+};
+
+/**
+ * usb_urb_ep_type_check - sanity check of endpoint in the given urb
+ * @urb: urb to be checked
+ *
+ * This performs a light-weight sanity check for the endpoint in the
+ * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
+ * a negative error code.
+ */
+int usb_urb_ep_type_check(const struct urb *urb)
+{
+ const struct usb_host_endpoint *ep;
+
+ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
+ if (!ep)
+ return -EINVAL;
+ if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
+
/**
* usb_submit_urb - issue an asynchronous transfer request for an endpoint
* @urb: pointer to the urb describing the request
@@ -326,9 +351,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
*/
int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
- static int pipetypes[4] = {
- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
- };
int xfertype, max;
struct usb_device *dev;
struct usb_host_endpoint *ep;
@@ -444,7 +466,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
*/
/* Check that the pipe's type matches the endpoint's type */
- if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
+ if (usb_urb_ep_type_check(urb))
dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
usb_pipetype(urb->pipe), pipetypes[xfertype]);
@@ -492,6 +514,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if ((urb->interval < 6)
&& (xfertype == USB_ENDPOINT_XFER_INT))
return -EINVAL;
+ /* fall through */
default:
if (urb->interval <= 0)
return -EINVAL;
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index ef9cf4a21afe..84da17460568 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB-ACPI glue code
*
* Copyright 2012 Red Hat <mjg@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
*/
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 17681d5638ac..845286f08ab0 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/usb.c
*
@@ -13,7 +14,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index dc6949248823..2bee08d084ae 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/pm.h>
@@ -73,6 +73,7 @@ extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
extern void usb_major_cleanup(void);
extern int usb_device_supports_lpm(struct usb_device *udev);
+extern int usb_port_disable(struct usb_device *udev);
#ifdef CONFIG_PM
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 1b6612c2cdda..82a7d98c3436 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core.c - DesignWare HS OTG Controller common routines
*
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 8367d4f985c1..f66c94130cac 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core.h - DesignWare HS OTG Controller common declarations
*
@@ -395,6 +396,9 @@ enum dwc2_ep0_state {
* (default when phy_type is UTMI+ or ULPI)
* 1 - 6 MHz
* (default when phy_type is Full Speed)
+ * @oc_disable: Flag to disable overcurrent condition.
+ * 0 - Allow overcurrent condition to get detected
+ * 1 - Disable overcurrent condtion to get detected
* @ts_dline: Enable Term Select Dline pulsing
* 0 - No (default)
* 1 - Yes
@@ -492,6 +496,7 @@ struct dwc2_core_params {
bool dma_desc_fs_enable;
bool host_support_fs_ls_low_power;
bool host_ls_low_power_phy_clk;
+ bool oc_disable;
u8 host_channels;
u16 host_rx_fifo_size;
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index b8bcb007c92a..ab3fa1630853 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core_intr.c - DesignWare HS OTG Controller common interrupt handling
*
diff --git a/drivers/usb/dwc2/debug.h b/drivers/usb/dwc2/debug.h
index 8222783e6822..6f23219c13cb 100644
--- a/drivers/usb/dwc2/debug.h
+++ b/drivers/usb/dwc2/debug.h
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debug.h - Designware USB2 DRD controller debug header
*
* Copyright (C) 2015 Intel Corporation
* Mian Yousaf Kaukab <yousaf.kaukab@intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "core.h"
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 794b959a7c8c..f4650a88be78 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debugfs.c - Designware USB2 DRD controller debugfs
*
* Copyright (C) 2015 Intel Corporation
* Mian Yousaf Kaukab <yousaf.kaukab@intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/spinlock.h>
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 0d8e09ccb59c..88529d092503 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -8,10 +9,6 @@
* http://armlinux.simtec.co.uk/
*
* S3C USB2.0 High-speed / OtG driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -3202,6 +3199,8 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
call_gadget(hsotg, disconnect);
hsotg->lx_state = DWC2_L3;
+
+ usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
}
/**
@@ -4004,6 +4003,11 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
+ if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
+ dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
+ return -EINVAL;
+ }
+
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index c2631145f404..69eb40cd1b47 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd.c - DesignWare HS OTG Controller host-mode routines
*
@@ -213,6 +214,11 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
if (hsotg->params.phy_ulpi_ddr)
usbcfg |= GUSBCFG_DDRSEL;
+
+ /* Set external VBUS indicator as needed. */
+ if (hsotg->params.oc_disable)
+ usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
+ GUSBCFG_INDICATORPASSTHROUGH);
break;
case DWC2_PHY_TYPE_PARAM_UTMI:
/* UTMI+ interface */
@@ -3277,7 +3283,6 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
- dwc2_hsotg_disconnect(hsotg);
dwc2_hsotg_core_init_disconnected(hsotg, false);
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hsotg_core_connect(hsotg);
@@ -3296,8 +3301,12 @@ host:
if (count > 250)
dev_err(hsotg->dev,
"Connection id status change timed out\n");
- hsotg->op_state = OTG_STATE_A_HOST;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_disconnect(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ hsotg->op_state = OTG_STATE_A_HOST;
/* Initialize the Core for Host mode */
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 11c3c145b793..78e9e01051b5 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd.h - DesignWare HS OTG Controller host-mode declarations
*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index b8bdf545c3a7..28c8898b3b66 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
*
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 28a8210710b1..916d991b96b8 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
*
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 3ae8b1bbaa55..f472de238ac2 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_queue.c - DesignWare HS OTG Controller host queuing routines
*
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 4592012c4743..2c906d8ee465 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hw.h - DesignWare HS OTG Controller hardware definitions
*
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a3ffe97170ff..ef73af6e03a9 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2004-2016 Synopsys, Inc.
*
@@ -136,6 +137,15 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
p->activate_stm_fs_transceiver = true;
}
+static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->host_rx_fifo_size = 622;
+ p->host_nperio_tx_fifo_size = 128;
+ p->host_perio_tx_fifo_size = 256;
+}
+
const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
{ .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
@@ -154,6 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "st,stm32f4x9-fsotg",
.data = dwc2_set_stm32f4x9_fsotg_params },
{ .compatible = "st,stm32f4x9-hsotg" },
+ { .compatible = "st,stm32f7xx-hsotg",
+ .data = dwc2_set_stm32f7xx_hsotg_params },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
@@ -335,6 +347,9 @@ static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg)
num);
}
}
+
+ if (of_find_property(hsotg->dev->of_node, "disable-over-current", NULL))
+ p->oc_disable = true;
}
static void dwc2_check_param_otg_cap(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index fdeb8c7bf30a..3ecc951a1aea 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* pci.c - DesignWare HS OTG Controller PCI driver
*
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index daf0d37acb37..3e26550d13dd 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* platform.c - DesignWare HS OTG Controller platform driver
*
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 03474d3575ab..07832509584f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* core.c - DesignWare USB3 DRD Controller Core file
*
@@ -5,18 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/version.h>
@@ -156,9 +145,8 @@ static void __dwc3_set_mode(struct work_struct *work)
} else {
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
-
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
}
break;
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -166,8 +154,8 @@ static void __dwc3_set_mode(struct work_struct *work)
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret)
@@ -927,12 +915,13 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
+ dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
@@ -942,12 +931,13 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_HOST:
+ dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
@@ -1293,21 +1283,19 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
{
unsigned long flags;
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_core_exit(dwc);
break;
- case USB_DR_MODE_HOST:
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
}
- dwc3_core_exit(dwc);
-
return 0;
}
@@ -1316,18 +1304,17 @@ static int dwc3_resume_common(struct dwc3 *dwc)
unsigned long flags;
int ret;
- ret = dwc3_core_init(dwc);
- if (ret)
- return ret;
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
- /* FALLTHROUGH */
- case USB_DR_MODE_HOST:
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
@@ -1338,7 +1325,7 @@ static int dwc3_resume_common(struct dwc3 *dwc)
static int dwc3_runtime_checks(struct dwc3 *dwc)
{
- switch (dwc->dr_mode) {
+ switch (dwc->current_dr_role) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
if (dwc->connected)
@@ -1381,19 +1368,17 @@ static int dwc3_runtime_resume(struct device *dev)
if (ret)
return ret;
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
dwc3_gadget_process_pending_events(dwc);
break;
- case USB_DR_MODE_HOST:
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
}
pm_runtime_mark_last_busy(dev);
- pm_runtime_put(dev);
return 0;
}
@@ -1402,13 +1387,12 @@ static int dwc3_runtime_idle(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
if (dwc3_runtime_checks(dwc))
return -EBUSY;
break;
- case USB_DR_MODE_HOST:
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ea910acb4bb0..4a4a4c98508c 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* core.h - DesignWare USB3 DRD Core Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_CORE_H
@@ -529,6 +521,7 @@ struct dwc3_event_buffer {
* @number: endpoint number (1 - 15)
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
+ * @frame_number: set to the frame number we want this transfer to start (ISOC)
* @interval: the interval on which the ISOC transfer is started
* @allocated_requests: number of requests allocated
* @queued_requests: number of requests queued for transfer
@@ -581,6 +574,7 @@ struct dwc3_ep {
u8 resource_index;
u32 allocated_requests;
u32 queued_requests;
+ u32 frame_number;
u32 interval;
char name[20];
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 5e9c070ec874..368f8e59219a 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debug.h - DesignWare USB3 DRD Controller Debug Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DWC3_DEBUG_H
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4e09be80e59f..00e65530c81e 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debugfs.c - DesignWare USB3 DRD Controller DebugFS file
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 2765c51c7ef5..cc8ab9a8e9d2 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* drd.c - DesignWare USB3 DRD Controller Dual-role support
*
* Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com
*
* Authors: Roger Quadros <rogerq@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/extcon.h>
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e089df72f766..a94fb1ba8f2c 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer
*
@@ -5,15 +6,6 @@
* http://www.samsung.com
*
* Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index d2ed9523e77c..193a9a88222a 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-keystone.c - Keystone Specific Glue layer
*
* Copyright (C) 2010-2013 Texas Instruments Incorporated - http://www.ti.com
*
* Author: WingMan Kwok <w-kwok2@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index a26d1fde0f5e..c4a4d7bd2766 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-of-simple.c - OF glue layer for simple integrations
*
@@ -5,15 +6,6 @@
*
* Author: Felipe Balbi <balbi@ti.com>
*
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This is a combination of the old dwc3-qcom.c by Ivan T. Ivanov
* <iivanov@mm-sol.com> and the original patch adding support for Xilinx' SoC
* by Subbaraya Sundeep Bhatta <subbaraya.sundeep.bhatta@xilinx.com>
@@ -28,11 +20,13 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
struct dwc3_of_simple {
struct device *dev;
struct clk **clks;
int num_clocks;
+ struct reset_control *resets;
};
static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
@@ -95,10 +89,21 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, simple);
simple->dev = dev;
+ simple->resets = of_reset_control_array_get_optional_exclusive(np);
+ if (IS_ERR(simple->resets)) {
+ ret = PTR_ERR(simple->resets);
+ dev_err(dev, "failed to get device resets, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(simple->resets);
+ if (ret)
+ goto err_resetc_put;
+
ret = dwc3_of_simple_clk_init(simple, of_count_phandle_with_args(np,
"clocks", "#clock-cells"));
if (ret)
- return ret;
+ goto err_resetc_assert;
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
@@ -107,7 +112,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
clk_put(simple->clks[i]);
}
- return ret;
+ goto err_resetc_assert;
}
pm_runtime_set_active(dev);
@@ -115,6 +120,13 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
pm_runtime_get_sync(dev);
return 0;
+
+err_resetc_assert:
+ reset_control_assert(simple->resets);
+
+err_resetc_put:
+ reset_control_put(simple->resets);
+ return ret;
}
static int dwc3_of_simple_remove(struct platform_device *pdev)
@@ -123,12 +135,15 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
+ of_platform_depopulate(dev);
+
for (i = 0; i < simple->num_clocks; i++) {
clk_disable_unprepare(simple->clks[i]);
clk_put(simple->clks[i]);
}
- of_platform_depopulate(dev);
+ reset_control_assert(simple->resets);
+ reset_control_put(simple->resets);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 3530795bbb8f..a4719e853b85 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-omap.c - OMAP Specific Glue layer
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 54343fbd85ee..3ba11136ebf0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-pci.c - PCI Specific glue layer
*
@@ -5,21 +6,13 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/workqueue.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
@@ -61,6 +54,7 @@ struct dwc3_pci {
guid_t guid;
unsigned int has_dsm_for_pm:1;
+ struct work_struct wakeup_work;
};
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -174,6 +168,22 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
return 0;
}
+#ifdef CONFIG_PM
+static void dwc3_pci_resume_work(struct work_struct *work)
+{
+ struct dwc3_pci *dwc = container_of(work, struct dwc3_pci, wakeup_work);
+ struct platform_device *dwc3 = dwc->dwc3;
+ int ret;
+
+ ret = pm_runtime_get_sync(&dwc3->dev);
+ if (ret)
+ return;
+
+ pm_runtime_mark_last_busy(&dwc3->dev);
+ pm_runtime_put_sync_autosuspend(&dwc3->dev);
+}
+#endif
+
static int dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
@@ -232,6 +242,9 @@ static int dwc3_pci_probe(struct pci_dev *pci,
device_init_wakeup(dev, true);
pci_set_drvdata(pci, dwc);
pm_runtime_put(dev);
+#ifdef CONFIG_PM
+ INIT_WORK(&dwc->wakeup_work, dwc3_pci_resume_work);
+#endif
return 0;
err:
@@ -243,6 +256,9 @@ static void dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *dwc = pci_get_drvdata(pci);
+#ifdef CONFIG_PM
+ cancel_work_sync(&dwc->wakeup_work);
+#endif
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
platform_device_unregister(dwc->dwc3);
@@ -318,14 +334,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
static int dwc3_pci_runtime_resume(struct device *dev)
{
struct dwc3_pci *dwc = dev_get_drvdata(dev);
- struct platform_device *dwc3 = dwc->dwc3;
int ret;
ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
if (ret)
return ret;
- return pm_runtime_get(&dwc3->dev);
+ queue_work(pm_wq, &dwc->wakeup_work);
+
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 505676fd3ba4..16081383c401 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/**
* dwc3-st.c Support for dwc3 platform devices on ST Microelectronics platforms
*
@@ -10,11 +11,6 @@
* Contributors: Aymen Bouattay <aymen.bouattay@st.com>
* Peter Griffin <peter.griffin@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Inspired by dwc3-omap.c and dwc3-exynos.c.
*/
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 75e6cb044eb2..fd3e7ad2eb0e 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -487,14 +479,10 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
- enum usb_device_state state;
u32 wValue;
- u32 wIndex;
int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
- wIndex = le16_to_cpu(ctrl->wIndex);
- state = dwc->gadget.state;
switch (wValue) {
case USB_INTRF_FUNC_SUSPEND:
@@ -517,14 +505,10 @@ static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
struct dwc3_ep *dep;
- enum usb_device_state state;
u32 wValue;
- u32 wIndex;
int ret;
wValue = le16_to_cpu(ctrl->wValue);
- wIndex = le16_to_cpu(ctrl->wIndex);
- state = dwc->gadget.state;
switch (wValue) {
case USB_ENDPOINT_HALT:
@@ -551,10 +535,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
{
u32 recip;
int ret;
- enum usb_device_state state;
recip = ctrl->bRequestType & USB_RECIP_MASK;
- state = dwc->gadget.state;
switch (recip) {
case USB_RECIP_DEVICE:
@@ -712,12 +694,10 @@ static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
struct dwc3_ep *dep;
enum usb_device_state state = dwc->gadget.state;
u16 wLength;
- u16 wValue;
if (state == USB_STATE_DEFAULT)
return -EINVAL;
- wValue = le16_to_cpu(ctrl->wValue);
wLength = le16_to_cpu(ctrl->wLength);
if (wLength != 6) {
@@ -842,9 +822,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct usb_request *ur;
struct dwc3_trb *trb;
struct dwc3_ep *ep0;
- unsigned maxp;
- unsigned remaining_ur_length;
- void *buf;
u32 transferred = 0;
u32 status;
u32 length;
@@ -871,11 +848,8 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
}
ur = &r->request;
- buf = ur->buf;
- remaining_ur_length = ur->length;
length = trb->size & DWC3_TRB_SIZE_MASK;
- maxp = ep0->endpoint.maxpacket;
transferred = ur->length - length;
ur->actual += transferred;
@@ -1001,7 +975,6 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
req->request.length && req->request.zero) {
u32 maxpacket;
- u32 rem;
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
@@ -1009,7 +982,6 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
return;
maxpacket = dep->endpoint.maxpacket;
- rem = req->request.length % maxpacket;
/* prepare normal TRB */
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f064f1549333..981fd986cf82 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -1151,9 +1143,6 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
- if (!dwc3_calc_trbs_left(dep))
- return;
-
/*
* We can get in a situation where there's a request in the started list
* but there weren't enough TRBs to fully kick it in the first time
@@ -1194,7 +1183,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
}
}
-static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
+static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
@@ -1202,6 +1191,9 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
int ret;
u32 cmd;
+ if (!dwc3_calc_trbs_left(dep))
+ return 0;
+
starting = !(dep->flags & DWC3_EP_BUSY);
dwc3_prepare_trbs(dep);
@@ -1216,8 +1208,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
if (starting) {
params.param0 = upper_32_bits(req->trb_dma);
params.param1 = lower_32_bits(req->trb_dma);
- cmd = DWC3_DEPCMD_STARTTRANSFER |
- DWC3_DEPCMD_PARAM(cmd_param);
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
} else {
cmd = DWC3_DEPCMD_UPDATETRANSFER |
DWC3_DEPCMD_PARAM(dep->resource_index);
@@ -1258,8 +1252,6 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
- u32 uf;
-
if (list_empty(&dep->pending_list)) {
dev_info(dwc->dev, "%s: ran out of requests\n",
dep->name);
@@ -1271,9 +1263,8 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
* Schedule the first trb for one interval in the future or at
* least 4 microframes.
*/
- uf = cur_uf + max_t(u32, 4, dep->interval);
-
- __dwc3_gadget_kick_transfer(dep, uf);
+ dep->frame_number = cur_uf + max_t(u32, 4, dep->interval);
+ __dwc3_gadget_kick_transfer(dep);
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1290,7 +1281,6 @@ static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
- int ret = 0;
if (!dep->endpoint.desc) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
@@ -1337,24 +1327,14 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
}
if ((dep->flags & DWC3_EP_BUSY) &&
- !(dep->flags & DWC3_EP_MISSED_ISOC)) {
- WARN_ON_ONCE(!dep->resource_index);
- ret = __dwc3_gadget_kick_transfer(dep,
- dep->resource_index);
- }
-
- goto out;
- }
+ !(dep->flags & DWC3_EP_MISSED_ISOC))
+ goto out;
- if (!dwc3_calc_trbs_left(dep))
return 0;
+ }
- ret = __dwc3_gadget_kick_transfer(dep, 0);
out:
- if (ret == -EBUSY)
- ret = 0;
-
- return ret;
+ return __dwc3_gadget_kick_transfer(dep);
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
@@ -2347,7 +2327,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
req->request.actual = length - req->remaining;
if ((req->request.actual < length) && req->num_pending_sgs)
- return __dwc3_gadget_kick_transfer(dep, 0);
+ return __dwc3_gadget_kick_transfer(dep);
dwc3_gadget_giveback(dep, req, status);
@@ -2440,13 +2420,8 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
if (!dep->endpoint.desc)
return;
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- int ret;
-
- ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (!ret || ret == -EBUSY)
- return;
- }
+ if (!usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
}
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
@@ -2487,15 +2462,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
dwc3_gadget_start_isoc(dwc, dep, event);
- } else {
- int ret;
-
- ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (!ret || ret == -EBUSY)
- return;
- }
+ else
+ __dwc3_gadget_kick_transfer(dep);
break;
case DWC3_DEPEVT_STREAMEVT:
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 4a3227543255..578aa856f986 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gadget.h - DesignWare USB3 DRD Gadget Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_GADGET_H
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 76f0b0df37c1..1a3878a3be78 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* host.c - DesignWare USB3 DRD Controller Host Glue
*
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
*
* Authors: Felipe Balbi <balbi@ti.com>,
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/platform_device.h>
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index c69b06696824..70acdf94a0bf 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* io.h - DesignWare USB3 DRD IO Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_IO_H
diff --git a/drivers/usb/dwc3/trace.c b/drivers/usb/dwc3/trace.c
index 6cd166412ad0..f8886f3f3c9e 100644
--- a/drivers/usb/dwc3/trace.c
+++ b/drivers/usb/dwc3/trace.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* trace.c - DesignWare USB3 DRD Controller Trace Support
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 6504b116da04..a9dd5c64e6c7 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* trace.h - DesignWare USB3 DRD Controller Trace Support
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index e87ce8e9edee..f62b5f3c2d67 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* ulpi.c - DesignWare USB3 Controller's ULPI PHY interface
*
* Copyright (C) 2015 Intel Corporation
*
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/ulpi/regs.h>
diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile
index fcde2286da1c..7b77b49d3b8c 100644
--- a/drivers/usb/early/Makefile
+++ b/drivers/usb/early/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for early USB devices
#
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index e2654443e8eb..d633c2abe5a4 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Standalone EHCI usb debug driver
*
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 12fe70beae69..8a700b45b9a9 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* xhci-dbc.c - xHCI debug capability early driver
*
* Copyright (C) 2016 Intel Corporation
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
index 2df0f6e613fe..673686eeddd7 100644
--- a/drivers/usb/early/xhci-dbc.h
+++ b/drivers/usb/early/xhci-dbc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xhci-dbc.h - xHCI debug capability early driver
*
* Copyright (C) 2016 Intel Corporation
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_XHCI_DBC_H
@@ -90,8 +87,8 @@ struct xdbc_context {
#define XDBC_INFO_CONTEXT_SIZE 48
#define XDBC_MAX_STRING_LENGTH 64
-#define XDBC_STRING_MANUFACTURER "Linux"
-#define XDBC_STRING_PRODUCT "Remote GDB"
+#define XDBC_STRING_MANUFACTURER "Linux Foundation"
+#define XDBC_STRING_PRODUCT "Linux USB GDB Target"
#define XDBC_STRING_SERIAL "0001"
struct xdbc_strings {
@@ -103,7 +100,7 @@ struct xdbc_strings {
#define XDBC_PROTOCOL 1 /* GNU Remote Debug Command Set */
#define XDBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */
-#define XDBC_PRODUCT_ID 0x0004 /* __le16 idProduct; device 0004 */
+#define XDBC_PRODUCT_ID 0x0011 /* __le16 idProduct; device 0011 */
#define XDBC_DEVICE_REV 0x0010 /* 0.10 */
/*
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5d061b3d8224..eec14e6ed20b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* composite.c - infrastructure for Composite USB Gadgets
*
* Copyright (C) 2006-2008 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -170,20 +166,20 @@ int config_ep_by_speed(struct usb_gadget *g,
want_comp_desc = 1;
break;
}
- /* else: Fall trough */
+ /* fall through */
case USB_SPEED_SUPER:
if (gadget_is_superspeed(g)) {
speed_desc = f->ss_descriptors;
want_comp_desc = 1;
break;
}
- /* else: Fall trough */
+ /* fall through */
case USB_SPEED_HIGH:
if (gadget_is_dualspeed(g)) {
speed_desc = f->hs_descriptors;
break;
}
- /* else: fall through */
+ /* fall through */
default:
speed_desc = f->fs_descriptors;
}
@@ -224,6 +220,7 @@ ep_found:
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
_ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
+ /* fall through */
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 17a6077b89a4..2d115353424c 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* usb/gadget/config.c -- simplify building config descriptors
*
* Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index aeb9f3c40521..efba66ca0719 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/configfs.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -505,13 +506,13 @@ static struct configfs_attribute *gadget_config_attrs[] = {
NULL,
};
-static struct config_item_type gadget_config_type = {
+static const struct config_item_type gadget_config_type = {
.ct_item_ops = &gadget_config_item_ops,
.ct_attrs = gadget_config_attrs,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type gadget_root_type = {
+static const struct config_item_type gadget_root_type = {
.ct_item_ops = &gadget_root_item_ops,
.ct_attrs = gadget_root_attrs,
.ct_owner = THIS_MODULE,
@@ -593,7 +594,7 @@ static struct configfs_group_operations functions_ops = {
.drop_item = &function_drop,
};
-static struct config_item_type functions_type = {
+static const struct config_item_type functions_type = {
.ct_group_ops = &functions_ops,
.ct_owner = THIS_MODULE,
};
@@ -694,7 +695,7 @@ static struct configfs_group_operations config_desc_ops = {
.drop_item = &config_desc_drop,
};
-static struct config_item_type config_desc_type = {
+static const struct config_item_type config_desc_type = {
.ct_group_ops = &config_desc_ops,
.ct_owner = THIS_MODULE,
};
@@ -1476,7 +1477,7 @@ static struct configfs_group_operations gadgets_ops = {
.drop_item = &gadgets_drop,
};
-static struct config_item_type gadgets_type = {
+static const struct config_item_type gadgets_type = {
.ct_group_ops = &gadgets_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 30fdab0ae383..71b15c65b90f 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
*
* Copyright (C) 2004 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 5e3828d9dac7..9fc98de83624 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_acm.c -- USB CDC serial (ACM) function driver
*
@@ -6,10 +7,6 @@
* Copyright (C) 2008 by Nokia Corporation
* Copyright (C) 2009 by Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -786,7 +783,7 @@ static struct configfs_attribute *acm_attrs[] = {
NULL,
};
-static struct config_item_type acm_func_type = {
+static const struct config_item_type acm_func_type = {
.ct_item_ops = &acm_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 4c488d15b6f6..b104ed0c1ab5 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_ecm.c -- USB CDC Ethernet (ECM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -845,7 +841,7 @@ static struct configfs_attribute *ecm_attrs[] = {
NULL,
};
-static struct config_item_type ecm_func_type = {
+static const struct config_item_type ecm_func_type = {
.ct_item_ops = &ecm_item_ops,
.ct_attrs = ecm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index 007ec6e4a5d4..37557651b600 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_eem.c -- USB CDC Ethernet (EEM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 EF Johnson Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -556,7 +552,7 @@ static struct configfs_attribute *eem_attrs[] = {
NULL,
};
-static struct config_item_type eem_func_type = {
+static const struct config_item_type eem_func_type = {
.ct_item_ops = &eem_item_ops,
.ct_attrs = eem_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8b342587f8ad..97ea059a7aa4 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_fs.c -- user mode file system API for USB composite function controllers
*
@@ -7,11 +8,6 @@
* Based on inode.c (GadgetFS) which was:
* Copyright (C) 2003-2004 David Brownell
* Copyright (C) 2003 Agilent Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
@@ -3385,7 +3381,7 @@ static struct configfs_item_operations ffs_item_ops = {
.release = ffs_attr_release,
};
-static struct config_item_type ffs_func_type = {
+static const struct config_item_type ffs_func_type = {
.ct_item_ops = &ffs_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -3677,6 +3673,7 @@ static void ffs_closed(struct ffs_data *ffs)
goto done;
ffs_obj->desc_ready = false;
+ ffs_obj->ffs_data = NULL;
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
ffs_obj->ffs_closed_callback)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index d8e359ef6eb1..daae35318a3a 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_hid.c -- USB HID function driver
*
* Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -992,7 +988,7 @@ static struct configfs_attribute *hid_attrs[] = {
NULL,
};
-static struct config_item_type hid_func_type = {
+static const struct config_item_type hid_func_type = {
.ct_item_ops = &hidg_item_ops,
.ct_attrs = hid_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index e70093835e14..1803646b3678 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_loopback.c - USB peripheral loopback configuration driver
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -556,7 +552,7 @@ static struct configfs_attribute *lb_attrs[] = {
NULL,
};
-static struct config_item_type lb_func_type = {
+static const struct config_item_type lb_func_type = {
.ct_item_ops = &lb_item_ops,
.ct_attrs = lb_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 5153e29870c3..acecd13dcbd9 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* f_mass_storage.c -- Mass Storage USB Composite Function
*
@@ -3140,7 +3141,7 @@ static struct configfs_attribute *fsg_lun_attrs[] = {
NULL,
};
-static struct config_item_type fsg_lun_type = {
+static const struct config_item_type fsg_lun_type = {
.ct_item_ops = &fsg_lun_item_ops,
.ct_attrs = fsg_lun_attrs,
.ct_owner = THIS_MODULE,
@@ -3331,7 +3332,7 @@ static struct configfs_group_operations fsg_group_ops = {
.drop_item = fsg_lun_drop,
};
-static struct config_item_type fsg_func_type = {
+static const struct config_item_type fsg_func_type = {
.ct_item_ops = &fsg_item_ops,
.ct_group_ops = &fsg_group_ops,
.ct_attrs = fsg_attrs,
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 5d3d7941d2c2..4eb96b91cc40 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_midi.c -- USB MIDI class function driver
*
@@ -15,8 +16,6 @@
* and drivers/usb/gadget/midi.c,
* Copyright (C) 2006 Thumtronics Pty Ltd.
* Ben Williamson <ben.williamson@greyinnovation.com>
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
@@ -1189,7 +1188,7 @@ static struct configfs_attribute *midi_attrs[] = {
NULL,
};
-static struct config_item_type midi_func_type = {
+static const struct config_item_type midi_func_type = {
.ct_item_ops = &midi_item_ops,
.ct_attrs = midi_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 45b334ceaf2e..c5bce8e22983 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_ncm.c -- USB CDC Network (NCM) link function driver
*
@@ -8,11 +9,6 @@
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -1568,7 +1564,7 @@ static struct configfs_attribute *ncm_attrs[] = {
NULL,
};
-static struct config_item_type ncm_func_type = {
+static const struct config_item_type ncm_func_type = {
.ct_item_ops = &ncm_item_ops,
.ct_attrs = ncm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index d43e86cea74f..55b7f57d2dc7 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_obex.c -- USB CDC OBEX function driver
*
@@ -5,11 +6,6 @@
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
*
* Based on f_acm.c by Al Borchers and David Brownell.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -411,7 +407,7 @@ static struct configfs_attribute *acm_attrs[] = {
NULL,
};
-static struct config_item_type obex_func_type = {
+static const struct config_item_type obex_func_type = {
.ct_item_ops = &obex_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 9c4c58e4a1a2..7889bcc0509a 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f_phonet.c -- USB CDC Phonet function
*
* Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
*
* Author: Rémi Denis-Courmont
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/mm.h>
@@ -215,6 +212,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
case -ESHUTDOWN: /* disconnected */
case -ECONNRESET: /* disabled */
dev->stats.tx_aborted_errors++;
+ /* fall through */
default:
dev->stats.tx_errors++;
}
@@ -362,6 +360,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
/* Do resubmit in these cases: */
case -EOVERFLOW: /* request buffer overflow */
dev->stats.rx_over_errors++;
+ /* fall through */
default:
dev->stats.rx_errors++;
break;
@@ -599,7 +598,7 @@ static struct configfs_attribute *phonet_attrs[] = {
NULL,
};
-static struct config_item_type phonet_func_type = {
+static const struct config_item_type phonet_func_type = {
.ct_item_ops = &phonet_item_ops,
.ct_attrs = phonet_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index ea0da35a44e2..dd607b99eb1d 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_printer.c - USB printer function driver
*
@@ -8,11 +9,6 @@
*
* Copyright (C) 2003-2005 David Brownell
* Copyright (C) 2006 Craig W. Nadler
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
@@ -1261,7 +1257,7 @@ static struct configfs_attribute *printer_attrs[] = {
NULL,
};
-static struct config_item_type printer_func_type = {
+static const struct config_item_type printer_func_type = {
.ct_item_ops = &printer_item_ops,
.ct_attrs = printer_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index c7c5b3ce1d98..d48df36622b7 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_rndis.c -- RNDIS link function driver
*
@@ -6,11 +7,6 @@
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -890,7 +886,7 @@ static struct configfs_attribute *rndis_attrs[] = {
NULL,
};
-static struct config_item_type rndis_func_type = {
+static const struct config_item_type rndis_func_type = {
.ct_item_ops = &rndis_item_ops,
.ct_attrs = rndis_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index cb00ada21d9c..c860f30a0ea2 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_serial.c - generic USB serial function driver
*
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#include <linux/slab.h>
@@ -281,7 +278,7 @@ static struct configfs_attribute *acm_attrs[] = {
NULL,
};
-static struct config_item_type serial_func_type = {
+static const struct config_item_type serial_func_type = {
.ct_item_ops = &serial_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 8784fa12ea2c..9cdef108fb1b 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_sourcesink.c - USB peripheral source/sink configuration driver
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -1230,7 +1226,7 @@ static struct configfs_attribute *ss_attrs[] = {
NULL,
};
-static struct config_item_type ss_func_type = {
+static const struct config_item_type ss_func_type = {
.ct_item_ops = &ss_item_ops,
.ct_attrs = ss_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 434b983f3b4c..4d945254905d 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_subset.c -- "CDC Subset" Ethernet link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/slab.h>
@@ -412,7 +408,7 @@ static struct configfs_attribute *gether_attrs[] = {
NULL,
};
-static struct config_item_type gether_func_type = {
+static const struct config_item_type gether_func_type = {
.ct_item_ops = &gether_item_ops,
.ct_attrs = gether_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index a82e2bd5ea34..da81cf16b850 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* Target based USB-Gadget
*
* UAS protocol handling, target callbacks, configfs handling,
* BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
*
* Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
- * License: GPLv2 as published by FSF.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -1145,6 +1145,7 @@ static int usbg_submit_command(struct f_uas *fu,
default:
pr_debug_once("Unsupported prio_attr: %02x.\n",
cmd_iu->prio_attr);
+ /* fall through */
case UAS_SIMPLE_TAG:
cmd->prio_attr = TCM_SIMPLE_TAG;
break;
@@ -2166,7 +2167,7 @@ static struct configfs_item_operations tcm_item_ops = {
.release = tcm_attr_release,
};
-static struct config_item_type tcm_func_type = {
+static const struct config_item_type tcm_func_type = {
.ct_item_ops = &tcm_item_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 29efbedc91f9..2746a926a8d9 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_uac1.c -- USB Audio Class 1.0 Function (using u_audio API)
*
@@ -10,11 +11,6 @@
* This file is based on f_uac1.c which is
* Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
* Copyright (C) 2008 Analog Devices, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/usb/audio.h>
@@ -709,7 +705,7 @@ static struct configfs_attribute *f_uac1_attrs[] = {
NULL,
};
-static struct config_item_type f_uac1_func_type = {
+static const struct config_item_type f_uac1_func_type = {
.ct_item_ops = &f_uac1_item_ops,
.ct_attrs = f_uac1_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
index 5d229e72912e..04f4b2862256 100644
--- a/drivers/usb/gadget/function/f_uac1_legacy.c
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_audio.c -- USB Audio class function driver
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/slab.h>
@@ -921,7 +920,7 @@ static struct configfs_attribute *f_uac1_attrs[] = {
NULL,
};
-static struct config_item_type f_uac1_func_type = {
+static const struct config_item_type f_uac1_func_type = {
.ct_item_ops = &f_uac1_item_ops,
.ct_attrs = f_uac1_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index f05c3f3e6103..11fe788b4308 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_uac2.c -- USB Audio Class 2.0 Function
*
* Copyright (C) 2011
* Yadwinder Singh (yadi.brar01@gmail.com)
* Jaswinder Singh (jaswinder.singh@linaro.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/usb/audio.h>
@@ -921,7 +917,7 @@ static struct configfs_attribute *f_uac2_attrs[] = {
NULL,
};
-static struct config_item_type f_uac2_func_type = {
+static const struct config_item_type f_uac2_func_type = {
.ct_item_ops = &f_uac2_item_ops,
.ct_attrs = f_uac2_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index f8a1881609a2..439eba660e95 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_gadget.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h
index d0a73bdcbba1..81defe4557fe 100644
--- a/drivers/usb/gadget/function/f_uvc.h
+++ b/drivers/usb/gadget/function/f_uvc.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_uvc.h -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _F_UVC_H_
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index d6341045c631..51dd3e90b06c 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RNDIS MSG parser
*
* Authors: Benedikt Spranger, Pengutronix
* Robert Schwebel, Pengutronix
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
* This software was originally developed in conformance with
* Microsoft's Remote NDIS Specification License Agreement.
*
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 21e0430ffb98..c7e3a70ce6c1 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RNDIS Definitions for Remote NDIS
*
* Authors: Benedikt Spranger, Pengutronix
* Robert Schwebel, Pengutronix
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
* This software was originally developed in conformance with
* Microsoft's Remote NDIS Specification License Agreement.
*/
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 8fbf6861690d..f7e6c42558eb 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* storage_common.c -- Common definitions for mass storage functionality
*
* Copyright (C) 2003-2008 Alan Stern
* Copyeight (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 3971bbab88bd..a72295c953bb 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_audio.c -- interface to USB gadget "ALSA sound card" utilities
*
@@ -9,16 +10,6 @@
* Copyright (C) 2011
* Yadwinder Singh (yadi.brar01@gmail.com)
* Jaswinder Singh (jaswinder.singh@linaro.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
index 07e13784cbb8..81d3d4ed6dfb 100644
--- a/drivers/usb/gadget/function/u_audio.h
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_audio.h -- interface to USB gadget "ALSA sound card" utilities
*
* Copyright (C) 2016
* Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __U_AUDIO_H
diff --git a/drivers/usb/gadget/function/u_ecm.h b/drivers/usb/gadget/function/u_ecm.h
index 262cc03cc2c0..050aa672ee7f 100644
--- a/drivers/usb/gadget/function/u_ecm.h
+++ b/drivers/usb/gadget/function/u_ecm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_ecm.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_ECM_H
diff --git a/drivers/usb/gadget/function/u_eem.h b/drivers/usb/gadget/function/u_eem.h
index e3ae97874c4f..de3828d3e8f0 100644
--- a/drivers/usb/gadget/function/u_eem.h
+++ b/drivers/usb/gadget/function/u_eem.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_eem.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_EEM_H
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index bdbc3fdc7c4f..6fcda62f55ea 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index c77145bd6b5b..332307d54292 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_ether.h -- interface to USB gadget "ethernet link" utilities
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __U_ETHER_H
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index e4c3f84af4c3..cd33cee4d78b 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_ether_configfs.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_ETHER_CONFIGFS_H
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 79f70ebf85dc..c3aba4dfa958 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_fs.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_FFS_H
diff --git a/drivers/usb/gadget/function/u_gether.h b/drivers/usb/gadget/function/u_gether.h
index d4078426ba5d..5b7e2eb90336 100644
--- a/drivers/usb/gadget/function/u_gether.h
+++ b/drivers/usb/gadget/function/u_gether.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_gether.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_GETHER_H
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index aaa0e368a159..2f5ca4bfa7ff 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_hid.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_HID_H
diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h
index 22510189758e..5599aa5fc977 100644
--- a/drivers/usb/gadget/function/u_midi.h
+++ b/drivers/usb/gadget/function/u_midi.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_midi.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_MIDI_H
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index ce0f3a78ca13..67324f983343 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_ncm.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_NCM_H
diff --git a/drivers/usb/gadget/function/u_phonet.h b/drivers/usb/gadget/function/u_phonet.h
index 98ced18779ea..12fb613f85d1 100644
--- a/drivers/usb/gadget/function/u_phonet.h
+++ b/drivers/usb/gadget/function/u_phonet.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_phonet.h - interface to Phonet
*
* Copyright (C) 2007-2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#ifndef __U_PHONET_H
diff --git a/drivers/usb/gadget/function/u_printer.h b/drivers/usb/gadget/function/u_printer.h
index 8d30b7577f87..6088ff744194 100644
--- a/drivers/usb/gadget/function/u_printer.h
+++ b/drivers/usb/gadget/function/u_printer.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_printer.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_PRINTER_H
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index efdb7ac381d9..d65fb4ebac3c 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_rndis.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_RNDIS_H
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 4176216d54be..4d653d2960d4 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_serial.c - utilities for USB gadget "serial port"/TTY support
*
@@ -9,10 +10,6 @@
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2000 Peter Berger (pberger@brimson.com)
* Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -1078,6 +1075,7 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
default:
pr_warn("%s: unexpected %s status %d\n",
__func__, ep->name, req->status);
+ /* fall through */
case 0:
/* normal completion */
spin_lock(&info->con_lock);
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index c20210c0babd..9acaac1cbb75 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_serial.h - interface to USB gadget "serial port"/TTY utilities
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#ifndef __U_SERIAL_H
diff --git a/drivers/usb/gadget/function/u_tcm.h b/drivers/usb/gadget/function/u_tcm.h
index 0bd751e0483f..3f7ccecb0f9b 100644
--- a/drivers/usb/gadget/function/u_tcm.h
+++ b/drivers/usb/gadget/function/u_tcm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_tcm.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@xxxxxxxxxxx>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_TCM_H
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 6f188fd8633f..6f1a9d73defe 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_uac1.h - Utility definitions for UAC1 function
*
* Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_UAC1_H
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.c b/drivers/usb/gadget/function/u_uac1_legacy.c
index fa4684a1c54c..cbc868d117af 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_uac1.c -- ALSA audio utilities for Gadget stack
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.h b/drivers/usb/gadget/function/u_uac1_legacy.h
index d715b1af56a4..dd69e408a3d9 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.h
+++ b/drivers/usb/gadget/function/u_uac1_legacy.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __U_UAC1_LEGACY_H
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 19eeb83538a5..8362ee572e1e 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_uac2.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_UAC2_H
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 4676b60a5063..d00d3ded71c0 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_uvc.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_UVC_H
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 11d70dead32b..a64e07e61f8c 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_gadget.h -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _UVC_GADGET_H_
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 844cb738bafd..c9b8cc4aae5a 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_configfs.c
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include "u_uvc.h"
#include "uvc_configfs.h"
@@ -127,7 +124,7 @@ static struct configfs_attribute *uvcg_control_header_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_control_header_type = {
+static const struct config_item_type uvcg_control_header_type = {
.ct_attrs = uvcg_control_header_attrs,
.ct_owner = THIS_MODULE,
};
@@ -170,7 +167,7 @@ static struct configfs_group_operations uvcg_control_header_grp_ops = {
.drop_item = uvcg_control_header_drop,
};
-static struct config_item_type uvcg_control_header_grp_type = {
+static const struct config_item_type uvcg_control_header_grp_type = {
.ct_group_ops = &uvcg_control_header_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -265,7 +262,7 @@ static struct configfs_attribute *uvcg_default_processing_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_processing_type = {
+static const struct config_item_type uvcg_default_processing_type = {
.ct_attrs = uvcg_default_processing_attrs,
.ct_owner = THIS_MODULE,
};
@@ -277,7 +274,7 @@ static struct uvcg_processing_grp {
struct config_group group;
} uvcg_processing_grp;
-static struct config_item_type uvcg_processing_grp_type = {
+static const struct config_item_type uvcg_processing_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -382,7 +379,7 @@ static struct configfs_attribute *uvcg_default_camera_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_camera_type = {
+static const struct config_item_type uvcg_default_camera_type = {
.ct_attrs = uvcg_default_camera_attrs,
.ct_owner = THIS_MODULE,
};
@@ -394,7 +391,7 @@ static struct uvcg_camera_grp {
struct config_group group;
} uvcg_camera_grp;
-static struct config_item_type uvcg_camera_grp_type = {
+static const struct config_item_type uvcg_camera_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -460,7 +457,7 @@ static struct configfs_attribute *uvcg_default_output_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_output_type = {
+static const struct config_item_type uvcg_default_output_type = {
.ct_attrs = uvcg_default_output_attrs,
.ct_owner = THIS_MODULE,
};
@@ -472,7 +469,7 @@ static struct uvcg_output_grp {
struct config_group group;
} uvcg_output_grp;
-static struct config_item_type uvcg_output_grp_type = {
+static const struct config_item_type uvcg_output_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -481,7 +478,7 @@ static struct uvcg_terminal_grp {
struct config_group group;
} uvcg_terminal_grp;
-static struct config_item_type uvcg_terminal_grp_type = {
+static const struct config_item_type uvcg_terminal_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -586,7 +583,7 @@ static struct configfs_item_operations uvcg_control_class_item_ops = {
.drop_link = uvcg_control_class_drop_link,
};
-static struct config_item_type uvcg_control_class_type = {
+static const struct config_item_type uvcg_control_class_type = {
.ct_item_ops = &uvcg_control_class_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -596,7 +593,7 @@ static struct uvcg_control_class_grp {
struct config_group group;
} uvcg_control_class_grp;
-static struct config_item_type uvcg_control_class_grp_type = {
+static const struct config_item_type uvcg_control_class_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -605,7 +602,7 @@ static struct uvcg_control_grp {
struct config_group group;
} uvcg_control_grp;
-static struct config_item_type uvcg_control_grp_type = {
+static const struct config_item_type uvcg_control_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -857,7 +854,7 @@ static struct configfs_attribute *uvcg_streaming_header_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_streaming_header_type = {
+static const struct config_item_type uvcg_streaming_header_type = {
.ct_item_ops = &uvcg_streaming_header_item_ops,
.ct_attrs = uvcg_streaming_header_attrs,
.ct_owner = THIS_MODULE,
@@ -901,7 +898,7 @@ static struct configfs_group_operations uvcg_streaming_header_grp_ops = {
.drop_item = uvcg_streaming_header_drop,
};
-static struct config_item_type uvcg_streaming_header_grp_type = {
+static const struct config_item_type uvcg_streaming_header_grp_type = {
.ct_group_ops = &uvcg_streaming_header_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -1150,7 +1147,7 @@ static struct configfs_attribute *uvcg_frame_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_frame_type = {
+static const struct config_item_type uvcg_frame_type = {
.ct_attrs = uvcg_frame_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1419,7 +1416,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_uncompressed_type = {
+static const struct config_item_type uvcg_uncompressed_type = {
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
@@ -1469,7 +1466,7 @@ static struct configfs_group_operations uvcg_uncompressed_grp_ops = {
.drop_item = uvcg_uncompressed_drop,
};
-static struct config_item_type uvcg_uncompressed_grp_type = {
+static const struct config_item_type uvcg_uncompressed_grp_type = {
.ct_group_ops = &uvcg_uncompressed_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -1619,7 +1616,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_mjpeg_type = {
+static const struct config_item_type uvcg_mjpeg_type = {
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
@@ -1663,7 +1660,7 @@ static struct configfs_group_operations uvcg_mjpeg_grp_ops = {
.drop_item = uvcg_mjpeg_drop,
};
-static struct config_item_type uvcg_mjpeg_grp_type = {
+static const struct config_item_type uvcg_mjpeg_grp_type = {
.ct_group_ops = &uvcg_mjpeg_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -1728,7 +1725,7 @@ static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_color_matching_type = {
+static const struct config_item_type uvcg_default_color_matching_type = {
.ct_attrs = uvcg_default_color_matching_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1740,7 +1737,7 @@ static struct uvcg_color_matching_grp {
struct config_group group;
} uvcg_color_matching_grp;
-static struct config_item_type uvcg_color_matching_grp_type = {
+static const struct config_item_type uvcg_color_matching_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -2085,7 +2082,7 @@ static struct configfs_item_operations uvcg_streaming_class_item_ops = {
.drop_link = uvcg_streaming_class_drop_link,
};
-static struct config_item_type uvcg_streaming_class_type = {
+static const struct config_item_type uvcg_streaming_class_type = {
.ct_item_ops = &uvcg_streaming_class_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -2095,7 +2092,7 @@ static struct uvcg_streaming_class_grp {
struct config_group group;
} uvcg_streaming_class_grp;
-static struct config_item_type uvcg_streaming_class_grp_type = {
+static const struct config_item_type uvcg_streaming_class_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -2104,7 +2101,7 @@ static struct uvcg_streaming_grp {
struct config_group group;
} uvcg_streaming_grp;
-static struct config_item_type uvcg_streaming_grp_type = {
+static const struct config_item_type uvcg_streaming_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -2190,7 +2187,7 @@ static struct configfs_attribute *uvc_attrs[] = {
NULL,
};
-static struct config_item_type uvc_func_type = {
+static const struct config_item_type uvc_func_type = {
.ct_item_ops = &uvc_item_ops,
.ct_attrs = uvc_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 085e67be7c71..8549c0b27b9d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_configfs.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef UVC_CONFIGFS_H
#define UVC_CONFIGFS_H
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 6377e9fee6e5..278d50ff1eea 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_queue.c -- USB Video Class driver - Buffers management
*
* Copyright (C) 2005-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/atomic.h>
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 3e22b45687d3..f3069db6f08e 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_v4l2.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -354,7 +350,7 @@ static unsigned long uvcg_v4l2_get_unmapped_area(struct file *file,
}
#endif
-struct v4l2_file_operations uvc_v4l2_fops = {
+const struct v4l2_file_operations uvc_v4l2_fops = {
.owner = THIS_MODULE,
.open = uvc_v4l2_open,
.release = uvc_v4l2_release,
diff --git a/drivers/usb/gadget/function/uvc_v4l2.h b/drivers/usb/gadget/function/uvc_v4l2.h
index 2683b92fda65..a75e9c397446 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.h
+++ b/drivers/usb/gadget/function/uvc_v4l2.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_v4l2.h -- USB Video Class Gadget driver
*
@@ -7,16 +8,12 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __UVC_V4L2_H__
#define __UVC_V4L2_H__
extern const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops;
-extern struct v4l2_file_operations uvc_v4l2_fops;
+extern const struct v4l2_file_operations uvc_v4l2_fops;
#endif /* __UVC_V4L2_H__ */
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 0f01c04d7cbd..d3567b90343a 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_video.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index ef00f06fa00b..6c20aa75f966 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_video.h -- USB Video Class Gadget driver
*
@@ -7,10 +8,6 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __UVC_VIDEO_H__
#define __UVC_VIDEO_H__
diff --git a/drivers/usb/gadget/functions.c b/drivers/usb/gadget/functions.c
index b13f839e7368..203361a64212 100644
--- a/drivers/usb/gadget/functions.c
+++ b/drivers/usb/gadget/functions.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index c39de65a448b..af16672d5118 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* acm_ms.c -- Composite driver, with ACM and mass storage support
*
@@ -7,11 +8,6 @@
* Modified: Klaus Schwarzkopf <schwarzkopf@sensortherm.de>
*
* Heavily based on multi.c and cdc2.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index 1f5cdbe162df..7b11dce98b94 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* audio.c -- Audio gadget driver
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 51c08682de84..da1c37933ca1 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* cdc2.c -- CDC Composite driver, with ECM and ACM support
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 99ca3dabc4f3..e1d566c9918a 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dbgp.c -- EHCI Debug Port device gadget
*
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index 25a2c2e48592..30313b233680 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ether.c -- Ethernet gadget driver, with CDC and non-CDC options
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 6da7316f8e87..b640ed3fcf70 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* g_ffs.c -- user mode file system API for USB composite function controllers
*
* Copyright (C) 2010 Samsung Electronics
* Author: Michal Nazarewicz <mina86@mina86.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#define pr_fmt(fmt) "g_ffs: " fmt
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index 0bf39c3ccdb1..9eea2d18f2bf 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gmidi.c -- USB MIDI Gadget Driver
*
@@ -5,9 +6,6 @@
* Developed for Thumtronics by Grey Innovation
* Ben Williamson <ben.williamson@greyinnovation.com>
*
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
* This code is based in part on:
*
* Gadget Zero driver, Copyright (C) 2003-2004 David Brownell.
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index a71a884f79fc..c4eda7fe7ab4 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* hid.c -- HID Composite driver
*
* Based on multi.c
*
* Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5c28bee327e1..9343ec436485 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* inode.c -- user mode filesystem api for usb gadget controllers
*
* Copyright (C) 2003-2004 David Brownell
* Copyright (C) 2003 Agilent Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index fcba59782f26..ef3d25259b0e 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mass_storage.c -- Mass Storage USB Gadget
*
@@ -5,11 +6,6 @@
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz <mina86@mina86.com>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index a70a406580ea..50515f9e1022 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* multi.c -- Multifunction Composite driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 0aba68253e3d..fcee1ee0bf66 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ncm.c -- NCM gadget driver
*
@@ -9,11 +10,6 @@
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define DEBUG */
diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
index b1e535f4022e..978c1a34a932 100644
--- a/drivers/usb/gadget/legacy/nokia.c
+++ b/drivers/usb/gadget/legacy/nokia.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nokia.c -- Nokia Composite Gadget Driver
*
@@ -9,10 +10,6 @@
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * version 2 of that License.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index 4c9cfff34a03..57858f0c2b6c 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* printer.c -- Printer gadget driver
*
* Copyright (C) 2003-2005 David Brownell
* Copyright (C) 2006 Craig W. Nadler
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index 9d89adce756d..de30d7628eef 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* serial.c -- USB gadget serial driver
*
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 0b0bb98319cd..682bf99dcf76 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* Target based USB-Gadget
*
* UAS protocol handling, target callbacks, configfs handling,
* BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
*
* Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
- * License: GPLv2 as published by FSF.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
index 82c13fce9232..6b86568c9157 100644
--- a/drivers/usb/gadget/legacy/webcam.c
+++ b/drivers/usb/gadget/legacy/webcam.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* webcam.c -- USB webcam gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index d02e2ce73ea5..6e84b44c8a3b 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zero.c -- Gadget Zero, for USB development
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
@@ -154,10 +150,11 @@ static struct usb_gadget_strings *dev_strings[] = {
/*-------------------------------------------------------------------------*/
static struct timer_list autoresume_timer;
+static struct usb_composite_dev *autoresume_cdev;
-static void zero_autoresume(unsigned long _c)
+static void zero_autoresume(struct timer_list *unused)
{
- struct usb_composite_dev *cdev = (void *)_c;
+ struct usb_composite_dev *cdev = autoresume_cdev;
struct usb_gadget *g = cdev->gadget;
/* unconfigured devices can't issue wakeups */
@@ -282,7 +279,8 @@ static int zero_bind(struct usb_composite_dev *cdev)
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
device_desc.iSerialNumber = strings_dev[USB_GADGET_SERIAL_IDX].id;
- setup_timer(&autoresume_timer, zero_autoresume, (unsigned long) cdev);
+ autoresume_cdev = cdev;
+ timer_setup(&autoresume_timer, zero_autoresume, 0);
func_inst_ss = usb_get_function_instance("SourceSink");
if (IS_ERR(func_inst_ss))
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index 18839732c840..dbaa46eee853 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_f.c -- USB function utilities for Gadget stack
*
@@ -5,10 +6,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include "u_f.h"
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 7d53a4773d1a..c3fbef2bb5db 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_f.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_F_H__
diff --git a/drivers/usb/gadget/u_os_desc.h b/drivers/usb/gadget/u_os_desc.h
index 947b7ddff691..8acd21779ac8 100644
--- a/drivers/usb/gadget/u_os_desc.h
+++ b/drivers/usb/gadget/u_os_desc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_os_desc.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_OS_DESC_H__
diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h
index 4fe22d432af2..dfdef6a28904 100644
--- a/drivers/usb/gadget/udc/amd5536udc.h
+++ b/drivers/usb/gadget/udc/amd5536udc.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2007 AMD (http://www.amd.com)
* Author: Thomas Dahlmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AMD5536UDC_H
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 57a13f080a79..57b6f66331cf 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536udc_pci.c -- AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2005-2007 AMD (http://www.amd.com)
* Author: Thomas Dahlmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 8bc78418d40e..bfe278294e88 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* at91_udc -- driver for at91-series USB peripheral controller
*
* Copyright (C) 2004 by Thomas Rathbone
* Copyright (C) 2005 by HP Labs
* Copyright (C) 2005 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#undef VERBOSE_DEBUG
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index 9bbe72764f31..fd58c5b81826 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004 by Thomas Rathbone, HP Labs
* Copyright (C) 2005 by Ivan Kokshaysky
* Copyright (C) 2006 by SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AT91_UDC_H
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index a884c022df7a..075eaaa8a408 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Atmel USBA high speed USB device controller
*
* Copyright (C) 2005-2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/clk/at91_pmc.h>
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index f8ebe0389bd4..860a00a6fdd0 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Atmel USBA high speed USB device controller
*
* Copyright (C) 2005-2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_USB_GADGET_USBA_UDC_H__
#define __LINUX_USB_GADGET_USBA_UDC_H__
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index f78503203f42..29f254793592 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
*
* Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
* Copyright (C) 2012 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/bitops.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
index 6df0352cdc50..6e1e881dc51e 100644
--- a/drivers/usb/gadget/udc/bdc/bdc.h
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc.h - header for the BRCM BDC USB3.0 device controller
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 6e920f1dce02..6305bf2c8b59 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_cmd.c - BRCM BDC USB3.0 device controller
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
index 61d0e3bf9853..29cc988a671a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_cmd.h - header for the BDC debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_CMD_H__
#define __LINUX_BDC_CMD_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 7a8af4b916cf..d39f070acbd7 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_core.c - BRCM BDC USB3.0 device controller core operations
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
index ac98f6f681b7..7ba7448ad743 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_dbg.c - BRCM BDC USB3.0 device controller debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include "bdc.h"
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
index 338a6c701315..373d5abffbb8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_dbg.h - header for the BDC debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_DBG_H__
#define __LINUX_BDC_DBG_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index bfd8f7ade935..f40d4c13cfa4 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
*
@@ -6,12 +7,6 @@
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h
index 8a6b36cbf2ea..a37ff8033b4f 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_ep.h - header for the BDC debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_EP_H__
#define __LINUX_BDC_EP_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
index 02968842b359..1e940f054cb8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_pci.c - BRCM BDC USB3.0 device controller PCI interface file.
*
@@ -6,12 +7,6 @@
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index c84346146456..7bfd58c846f7 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_udc.c - BRCM BDC USB3.0 device controller gagdet ops
*
@@ -6,12 +7,6 @@
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/gadget/udc/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d41d07aae0ce..61422d624ad0 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* udc.c - Core UDC Framework
*
* Copyright (C) 2010 Texas Instruments
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -912,7 +901,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
return 0;
type = usb_endpoint_type(desc);
- max = 0x7ff & usb_endpoint_maxp(desc);
+ max = usb_endpoint_maxp(desc);
if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
return 0;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index f04e91ef9e7c..4f1b1809472c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
*
@@ -5,11 +6,6 @@
*
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
@@ -23,6 +19,8 @@
*
* Having this all in one kernel can help some stages of development,
* bypassing some hardware (and driver) issues. UML could help too.
+ *
+ * Note: The emulation does not include isochronous transfers!
*/
#include <linux/module.h>
@@ -137,6 +135,9 @@ static const struct {
.caps = _caps, \
}
+/* we don't provide isochronous endpoints since we don't support them */
+#define TYPE_BULK_OR_INT (USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
+
/* everyone has ep0 */
EP_INFO(ep0name,
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
@@ -145,64 +146,72 @@ static const struct {
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep2out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
EP_INFO("ep3in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep4out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
EP_INFO("ep5in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep6in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep7out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
EP_INFO("ep8in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep9out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
EP_INFO("ep10in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep11in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep12out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
EP_INFO("ep13in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep14out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
EP_INFO("ep15in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+
/* or like sa1100: two fixed function endpoints */
EP_INFO("ep1out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep2in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+
/* and now some generic EPs so we have enough in multi config */
EP_INFO("ep3out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep4in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep5out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep6out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep7in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep8out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep9in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep10out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep11out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep12in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep13out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep14in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep15out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
#undef EP_INFO
};
@@ -556,10 +565,12 @@ static int dummy_enable(struct usb_ep *_ep,
if (max <= 1024)
break;
/* save a return statement */
+ /* fall through */
case USB_SPEED_FULL:
if (max <= 64)
break;
/* save a return statement */
+ /* fall through */
default:
if (max <= 8)
break;
@@ -577,6 +588,7 @@ static int dummy_enable(struct usb_ep *_ep,
if (max <= 1024)
break;
/* save a return statement */
+ /* fall through */
case USB_SPEED_FULL:
if (max <= 1023)
break;
@@ -1769,6 +1781,7 @@ static void dummy_timer(unsigned long _dum_hcd)
int i;
/* simplistic model for one frame's bandwidth */
+ /* FIXME: account for transaction and packet overhead */
switch (dum->gadget.speed) {
case USB_SPEED_LOW:
total = 8/*bytes*/ * 12/*packets*/;
@@ -1813,7 +1826,6 @@ restart:
struct dummy_request *req;
u8 address;
struct dummy_ep *ep = NULL;
- int type;
int status = -EINPROGRESS;
/* stop when we reach URBs queued after the timer interrupt */
@@ -1825,14 +1837,10 @@ restart:
goto return_urb;
else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
continue;
- type = usb_pipetype(urb->pipe);
- /* used up this frame's non-periodic bandwidth?
- * FIXME there's infinite bandwidth for control and
- * periodic transfers ... unrealistic.
- */
- if (total <= 0 && type == PIPE_BULK)
- continue;
+ /* Used up this frame's bandwidth? */
+ if (total <= 0)
+ break;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
@@ -1930,13 +1938,17 @@ restart:
limit = total;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
- /* FIXME is it urb->interval since the last xfer?
- * use urb->iso_frame_desc[i].
- * complete whether or not ep has requests queued.
- * report random errors, to debug drivers.
+ /*
+ * We don't support isochronous. But if we did,
+ * here are some of the issues we'd have to face:
+ *
+ * Is it urb->interval since the last xfer?
+ * Use urb->iso_frame_desc[i].
+ * Complete whether or not ep has requests queued.
+ * Report random errors, to debug drivers.
*/
limit = max(limit, periodic_bytes(dum, ep));
- status = -ENOSYS;
+ status = -EINVAL; /* fail all xfers */
break;
case PIPE_INTERRUPT:
@@ -2433,9 +2445,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- init_timer(&dum_hcd->timer);
- dum_hcd->timer.function = dummy_timer;
- dum_hcd->timer.data = (unsigned long)dum_hcd;
+ setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2464,9 +2474,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- init_timer(&dum_hcd->timer);
- dum_hcd->timer.function = dummy_timer;
- dum_hcd->timer.data = (unsigned long)dum_hcd;
+ setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 78d0204e3e20..53a48f561458 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* FOTG210 UDC Driver supports Bulk transfer so far
*
* Copyright (C) 2013 Faraday Technology Corporation
*
* Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/gadget/udc/fotg210.h b/drivers/usb/gadget/udc/fotg210.h
index bbf991bcbe7c..08c32957503b 100644
--- a/drivers/usb/gadget/udc/fotg210.h
+++ b/drivers/usb/gadget/udc/fotg210.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Faraday FOTG210 USB OTG controller
*
* Copyright (C) 2013 Faraday Technology Corporation
* Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
index f16e149c5b3e..f29cf5c6160c 100644
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/udc/fsl_mxc_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2009
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
@@ -5,11 +6,6 @@
* Description:
* Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
* driver to function correctly on these systems.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index a3e72d690eef..2707be628298 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* driver/usb/gadget/fsl_qe_udc.c
*
@@ -11,11 +12,6 @@
* Freescle QE/CPM USB Pheripheral Controller Driver
* The controller can be found on MPC8360, MPC8272, and etc.
* MPC8360 Rev 1.1 may need QE mircocode update
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#undef USB_TRACE
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.h b/drivers/usb/gadget/udc/fsl_qe_udc.h
index 7026919fc901..2c537a904ee7 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.h
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/gadget/qe_udc.h
*
@@ -8,11 +9,6 @@
*
* Description:
* Freescale USB device/endpoint management registers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
*/
#ifndef __FSL_QE_UDC_H
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 6f2f71c054be..d606d4f13098 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004-2007,2011-2012 Freescale Semiconductor, Inc.
* All rights reserved.
@@ -10,11 +11,6 @@
* This can be found on MPC8349E/MPC8313E/MPC5121E cpus.
* The driver is previously named as mpc_udc. Based on bare board
* code from Dave Liu and Shlomi Gridish.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#undef VERBOSE
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index e92b8408b6f6..4ba651ae9048 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004,2012 Freescale Semiconductor, Inc
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Freescale USB device/endpoint management registers
*/
#ifndef __FSL_USB2_UDC_H
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index e0c1b0099265..263804d154a7 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Fusb300 UDC (USB gadget)
*
* Copyright (C) 2010 Faraday Technology Corp.
*
* Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h
index ad39f892d200..eb3d6d379ba7 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.h
+++ b/drivers/usb/gadget/udc/fusb300_udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Fusb300 UDC (USB gadget)
*
* Copyright (C) 2010 Faraday Technology Corp.
*
* Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 8433c22900dc..4504d0b202db 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
@@ -5,10 +6,6 @@
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
/*
@@ -127,11 +124,15 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
mode = 0;
max = get_unaligned_le16(&desc->wMaxPacketSize);
switch (max) {
- case 64: mode++;
- case 32: mode++;
- case 16: mode++;
- case 8: mode <<= 3;
- break;
+ case 64:
+ mode++; /* fall through */
+ case 32:
+ mode++; /* fall through */
+ case 16:
+ mode++; /* fall through */
+ case 8:
+ mode <<= 3;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
index 86d2adafe149..26601bf4e7a9 100644
--- a/drivers/usb/gadget/udc/goku_udc.h
+++ b/drivers/usb/gadget/udc/goku_udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
@@ -5,10 +6,6 @@
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
/*
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 1f9941145746..b3fb1bbdb854 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
*
@@ -9,11 +10,6 @@
* Full documentation of the GRUSBDC core can be found here:
* http://www.gaisler.com/products/grlib/grip.pdf
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Contributors:
* - Andreas Larsson <andreas@gaisler.com>
* - Marko Isomaki
@@ -1261,7 +1257,7 @@ static int gr_handle_in_ep(struct gr_ep *ep)
if (!req->last_desc)
return 0;
- if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+ if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
return 0; /* Not put in hardware buffers yet */
if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
@@ -1290,7 +1286,7 @@ static int gr_handle_out_ep(struct gr_ep *ep)
if (!req->curr_desc)
return 0;
- ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
+ ctrl = READ_ONCE(req->curr_desc->ctrl);
if (ctrl & GR_DESC_OUT_CTRL_EN)
return 0; /* Not received yet */
@@ -1538,7 +1534,7 @@ static int gr_ep_enable(struct usb_ep *_ep,
* Bits 10-0 set the max payload. 12-11 set the number of
* additional transactions.
*/
- max = 0x7ff & usb_endpoint_maxp(desc);
+ max = usb_endpoint_maxp(desc);
nt = usb_endpoint_maxp_mult(desc) - 1;
buffer_size = GR_BUFFER_SIZE(epctrl);
if (nt && (mode == 0 || mode == 2)) {
diff --git a/drivers/usb/gadget/udc/gr_udc.h b/drivers/usb/gadget/udc/gr_udc.h
index 4297c4e8021f..3e913268c8c5 100644
--- a/drivers/usb/gadget/udc/gr_udc.h
+++ b/drivers/usb/gadget/udc/gr_udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
*
@@ -9,11 +10,6 @@
* Full documentation of the GRUSBDC core can be found here:
* http://www.gaisler.com/products/grlib/grip.pdf
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Contributors:
* - Andreas Larsson <andreas@gaisler.com>
* - Marko Isomaki
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 8f32b5ee7734..b0781771704e 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Gadget driver for LPC32xx
*
@@ -12,20 +13,6 @@
*
* Note: This driver is based on original work done by Mike James for
* the LPC3180.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/clk.h>
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 46ce7bc15f2b..f19e6282a688 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* M66592 UDC (USB gadget)
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
@@ -1592,9 +1589,7 @@ static int m66592_probe(struct platform_device *pdev)
m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.name = udc_name;
- init_timer(&m66592->timer);
- m66592->timer.function = m66592_timer;
- m66592->timer.data = (unsigned long)m66592;
+ setup_timer(&m66592->timer, m66592_timer, (unsigned long)m66592);
m66592->reg = reg;
ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
diff --git a/drivers/usb/gadget/udc/m66592-udc.h b/drivers/usb/gadget/udc/m66592-udc.h
index 96d49d7bfb6b..01a64685b8a3 100644
--- a/drivers/usb/gadget/udc/m66592-udc.h
+++ b/drivers/usb/gadget/udc/m66592-udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* M66592 UDC (USB gadget)
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __M66592_UDC_H__
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
index e32a787ac373..982625b7197a 100644
--- a/drivers/usb/gadget/udc/mv_u3d.h
+++ b/drivers/usb/gadget/udc/mv_u3d.h
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#ifndef __MV_U3D_H
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 772049afe166..35e02a8d0091 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/mv_udc.h b/drivers/usb/gadget/udc/mv_udc.h
index be77f207dbaf..b3f759c0962c 100644
--- a/drivers/usb/gadget/udc/mv_udc.h
+++ b/drivers/usb/gadget/udc/mv_udc.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __MV_UDC_H
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 4103bf7cf52a..95f52232493b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <chao.xie@marvell.com>
* Neil Zhang <zhangwm@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 8f85a51bd2b3..660878a19505 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for PLX NET2272 USB device controller
*
* Copyright (C) 2005-2006 PLX Technology, Inc.
* Copyright (C) 2006-2011 Analog Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
index 69bc9c3c6ce4..8e644627992d 100644
--- a/drivers/usb/gadget/udc/net2272.h
+++ b/drivers/usb/gadget/udc/net2272.h
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PLX NET2272 high/full speed USB device controller
*
* Copyright (C) 2005-2006 PLX Technology, Inc.
* Copyright (C) 2006-2011 Analog Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __NET2272_H__
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f608c1f85e61..318246d8b2e2 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the PLX NET2280 USB device controller.
* Specs and errata are available from <http://www.plxtech.com>.
@@ -31,11 +32,6 @@
*
* Modified Ricardo Ribalda Qtechnology AS to provide compatibility
* with usb 338x chip. Based on PLX driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 1088c3745999..b65a797544d7 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NetChip 2280 high/full speed USB device controller.
* Unlike many such controllers, this one talks PCI.
@@ -7,11 +8,6 @@
* Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
* Copyright (C) 2003 David Brownell
* Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/usb/net2280.h>
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index f05ba6825bfe..fc7f810baef7 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* omap_udc.c -- for OMAP full speed udc; most chips support OTG.
*
@@ -5,11 +6,6 @@
* Copyright (C) 2004-2005 David Brownell
*
* OMAP2 & DMA support by Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#undef DEBUG
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 84dcbcd756f0..afaea11ec771 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index a238da906115..8f135d9fa245 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Intel PXA25x and IXP4xx on-chip full speed USB device controllers
*
@@ -6,11 +7,6 @@
* Copyright (C) 2003 Benedikt Spranger, Pengutronix
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003 Joshua Wise
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -2417,9 +2413,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
gpio_direction_output(dev->mach->gpio_pullup, 0);
}
- init_timer(&dev->timer);
- dev->timer.function = udc_watchdog;
- dev->timer.data = (unsigned long) dev;
+ setup_timer(&dev->timer, udc_watchdog, (unsigned long)dev);
the_controller = dev;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index a458bec2536d..ccc6b921f067 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Intel PXA25x on-chip full speed USB device controller
*
* Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
* Copyright (C) 2003 David Brownell
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_USB_GADGET_PXA25X_H
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index d48e239660c3..be2761f1b3f5 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Handles the Intel 27x USB Device Controller (UDC)
*
* Inspired by original driver by Frank Becker, David Brownell, and others.
* Copyright (C) 2008 Robert Jarzmik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index cea2cb79b30c..1128d39a4255 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/drivers/usb/gadget/pxa27x_udc.h
* Intel PXA27x on-chip full speed USB device controller
*
* Inspired by original driver by Frank Becker, David Brownell, and others.
* Copyright (C) 2008 Robert Jarzmik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_USB_GADGET_PXA27X_H
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 118ad70f1af0..143122ed3c66 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 UDC (USB gadget)
*
* Copyright (C) 2006-2009 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
@@ -1877,9 +1874,7 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.name = udc_name;
- init_timer(&r8a66597->timer);
- r8a66597->timer.function = r8a66597_timer;
- r8a66597->timer.data = (unsigned long)r8a66597;
+ setup_timer(&r8a66597->timer, r8a66597_timer, (unsigned long)r8a66597);
r8a66597->reg = reg;
if (r8a66597->pdata->on_chip) {
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.h b/drivers/usb/gadget/udc/r8a66597-udc.h
index 45c4b2df1785..9a115caba661 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.h
+++ b/drivers/usb/gadget/udc/r8a66597-udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 UDC
*
* Copyright (C) 2007-2009 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __R8A66597_H__
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 63a206122058..bc37f40baacf 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1,22 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB3.0 Peripheral driver (USB gadget)
*
- * Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * Copyright (C) 2015-2017 Renesas Electronics Corporation
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
@@ -334,6 +332,7 @@ struct renesas_usb3 {
struct usb_gadget_driver *driver;
struct extcon_dev *extcon;
struct work_struct extcon_work;
+ struct phy *phy;
struct renesas_usb3_ep *usb3_ep;
int num_usb3_eps;
@@ -2056,7 +2055,7 @@ static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
int i;
- const u32 max_packet_array[] = {8, 16, 32, 64, 512};
+ static const u32 max_packet_array[] = {8, 16, 32, 64, 512};
u32 mpkt = PN_RAMMAP_MPKT(1024);
for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
@@ -2239,7 +2238,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
/* hook up the driver */
usb3->driver = driver;
- pm_runtime_enable(usb3_to_dev(usb3));
+ if (usb3->phy)
+ phy_init(usb3->phy);
+
pm_runtime_get_sync(usb3_to_dev(usb3));
renesas_usb3_init_controller(usb3);
@@ -2256,8 +2257,10 @@ static int renesas_usb3_stop(struct usb_gadget *gadget)
usb3->driver = NULL;
renesas_usb3_stop_controller(usb3);
+ if (usb3->phy)
+ phy_exit(usb3->phy);
+
pm_runtime_put(usb3_to_dev(usb3));
- pm_runtime_disable(usb3_to_dev(usb3));
return 0;
}
@@ -2405,6 +2408,9 @@ static int renesas_usb3_remove(struct platform_device *pdev)
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
+ if (usb3->phy)
+ phy_put(usb3->phy);
+ pm_runtime_disable(usb3_to_dev(usb3));
return 0;
}
@@ -2560,20 +2566,15 @@ static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
struct resource *res;
- const struct of_device_id *match;
int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
- match = of_match_node(usb3_of_match, pdev->dev.of_node);
- if (!match)
- return -ENODEV;
-
attr = soc_device_match(renesas_usb3_quirks_match);
if (attr)
priv = attr->data;
else
- priv = match->data;
+ priv = of_device_get_match_data(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -2635,11 +2636,20 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
goto err_dev_create;
+ /*
+ * This is an optional. So, if this driver cannot get a phy,
+ * this driver will not handle a phy anymore.
+ */
+ usb3->phy = devm_phy_get(&pdev->dev, "usb");
+ if (IS_ERR(usb3->phy))
+ usb3->phy = NULL;
+
usb3->workaround_for_vbus = priv->workaround_for_vbus;
renesas_usb3_debugfs_init(usb3, &pdev->dev);
- dev_info(&pdev->dev, "probed\n");
+ dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
+ pm_runtime_enable(usb3_to_dev(usb3));
return 0;
@@ -2655,11 +2665,49 @@ err_alloc_prd:
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int renesas_usb3_suspend(struct device *dev)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!usb3->driver)
+ return 0;
+
+ renesas_usb3_stop_controller(usb3);
+ if (usb3->phy)
+ phy_exit(usb3->phy);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int renesas_usb3_resume(struct device *dev)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!usb3->driver)
+ return 0;
+
+ if (usb3->phy)
+ phy_init(usb3->phy);
+ pm_runtime_get_sync(dev);
+ renesas_usb3_init_controller(usb3);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(renesas_usb3_pm_ops, renesas_usb3_suspend,
+ renesas_usb3_resume);
+
static struct platform_driver renesas_usb3_driver = {
.probe = renesas_usb3_probe,
.remove = renesas_usb3_remove,
.driver = {
.name = (char *)udc_name,
+ .pm = &renesas_usb3_pm_ops,
.of_match_table = of_match_ptr(usb3_of_match),
},
};
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 42587b738a1f..31c7c5587cf9 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* linux/drivers/usb/gadget/s3c-hsudc.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
@@ -8,11 +9,7 @@
* The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
* Each endpoint can be configured as either in or out endpoint. Endpoints
* can be configured for Bulk or Interrupt transfer mode.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 394abd5d65c0..f154f49e98c8 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/drivers/usb/gadget/s3c2410_udc.c
*
@@ -5,11 +6,6 @@
*
* Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#define pr_fmt(fmt) "s3c2410_udc: " fmt
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
index 93bf225f1969..bdcaa8dd300f 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.h
+++ b/drivers/usb/gadget/udc/s3c2410_udc.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/drivers/usb/gadget/s3c2410_udc.h
* Samsung on-chip full speed USB device controllers
*
* Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _S3C2410_UDC_H
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 38a165dbf924..d4da47f4f6f4 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536.c -- AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2005-2007 AMD (http://www.amd.com)
* Author: Thomas Dahlmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
@@ -1733,7 +1729,7 @@ static void udc_soft_reset(struct udc *dev)
}
/* RDE timer callback to set RDE bit */
-static void udc_timer_function(unsigned long v)
+static void udc_timer_function(struct timer_list *unused)
{
u32 tmp;
@@ -1813,7 +1809,7 @@ static void udc_handle_halt_state(struct udc_ep *ep)
}
/* Stall timer callback to poll S bit and set it again after */
-static void udc_pollstall_timer_function(unsigned long v)
+static void udc_pollstall_timer_function(struct timer_list *unused)
{
struct udc_ep *ep;
int halted = 0;
@@ -3067,14 +3063,12 @@ void udc_remove(struct udc *dev)
stop_timer++;
if (timer_pending(&udc_timer))
wait_for_completion(&on_exit);
- if (udc_timer.data)
- del_timer_sync(&udc_timer);
+ del_timer_sync(&udc_timer);
/* remove pollstall timer */
stop_pollstall_timer++;
if (timer_pending(&udc_pollstall_timer))
wait_for_completion(&on_pollstall_exit);
- if (udc_pollstall_timer.data)
- del_timer_sync(&udc_pollstall_timer);
+ del_timer_sync(&udc_pollstall_timer);
udc = NULL;
}
EXPORT_SYMBOL_GPL(udc_remove);
@@ -3164,10 +3158,6 @@ int udc_probe(struct udc *dev)
u32 reg;
int retval;
- /* mark timer as not initialized */
- udc_timer.data = 0;
- udc_pollstall_timer.data = 0;
-
/* device struct setup */
dev->gadget.ops = &udc_ops;
@@ -3207,13 +3197,8 @@ int udc_probe(struct udc *dev)
goto finished;
/* timer init */
- init_timer(&udc_timer);
- udc_timer.function = udc_timer_function;
- udc_timer.data = 1;
- /* timer pollstall init */
- init_timer(&udc_pollstall_timer);
- udc_pollstall_timer.function = udc_pollstall_timer_function;
- udc_pollstall_timer.data = 1;
+ timer_setup(&udc_timer, udc_timer_function, 0);
+ timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
/* set SD */
reg = readl(&dev->regs->ctl);
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index e8a5fdaee37d..32f1d3e90c26 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* snps_udc_plat.c - Synopsys UDC Platform Driver
*
* Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/extcon.h>
diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
index 8c551ab91ad8..7430624c0bd7 100644
--- a/drivers/usb/gadget/udc/trace.c
+++ b/drivers/usb/gadget/udc/trace.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* trace.c - USB Gadget Framework Trace Support
*
* Copyright (C) 2016 Intel Corporation
* Author: Felipe Balbi <felipe.balbi@linux.intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
index da29874b5366..f07ddb3f4bb9 100644
--- a/drivers/usb/gadget/udc/trace.h
+++ b/drivers/usb/gadget/udc/trace.h
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* udc.c - Core UDC Framework
*
* Copyright (C) 2016 Intel Corporation
* Author: Felipe Balbi <felipe.balbi@linux.intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index de207a90571e..7da2b9ce8cb3 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Xilinx USB peripheral controller driver
*
@@ -8,12 +9,6 @@
*
* Some parts of this driver code is based on the driver for at91-series
* USB peripheral controller (at91_udc.c).
- *
- * This program is free software; you can redistribute it
- * and/or modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2 of the License, or (at your option) any
- * later version.
*/
#include <linux/delay.h>
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 73a4dfba0edb..566ab261e8b7 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1+
/*
* Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index fa5692dec832..b80a94e632af 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -45,12 +45,12 @@ config USB_XHCI_PLATFORM
If unsure, say N.
config USB_XHCI_MTK
- tristate "xHCI support for Mediatek MT65xx/MT7621"
+ tristate "xHCI support for MediaTek SoCs"
select MFD_SYSCON
depends on (MIPS && SOC_MT7621) || ARCH_MEDIATEK || COMPILE_TEST
---help---
Say 'Y' to enable the support for the xHCI host controller
- found in Mediatek MT65xx SoCs.
+ found in MediaTek SoCs.
If unsure, say N.
config USB_XHCI_MVEBU
@@ -222,18 +222,6 @@ config USB_EHCI_HCD_AT91
Enables support for the on-chip EHCI controller on
Atmel chips.
-config USB_EHCI_MSM
- tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
- depends on ARCH_QCOM
- select USB_EHCI_ROOT_HUB_TT
- ---help---
- Enables support for the USB Host controller present on the
- Qualcomm chipsets. Root Hub has inbuilt TT.
- This driver depends on OTG driver for PHY initialization,
- clock management, powering up VBUS, and power management.
- This driver is not supported on boards like trout which
- has an external PHY.
-
config USB_EHCI_TEGRA
tristate "NVIDIA Tegra HCD support"
depends on ARCH_TEGRA
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 4ab2689c8952..32b036e2ffef 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -26,6 +26,10 @@ ifneq ($(CONFIG_USB_XHCI_RCAR), )
xhci-plat-hcd-y += xhci-rcar.o
endif
+ifneq ($(CONFIG_DEBUG_FS),)
+ xhci-hcd-y += xhci-debugfs.o
+endif
+
obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_USB_PCI) += pci-quirks.o
@@ -40,7 +44,6 @@ obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o
obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
-obj-$(CONFIG_USB_EHCI_MSM) += ehci-msm.o
obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
obj-$(CONFIG_USB_W90X900_EHCI) += ehci-w90x900.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 5f425c89faf1..2400a826397a 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom specific Advanced Microcontroller Bus
* Broadcom USB-core driver (BCMA bus glue)
@@ -16,8 +17,6 @@
*
* Derived from the USBcore related parts of Broadcom-SB
* Copyright 2005-2011 Broadcom Corporation
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 7440722bfbf0..3ba140ceaf52 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EHCI UHP on Atmel chips
*
@@ -5,10 +6,6 @@
* Nicolas Ferre <nicolas.ferre@atmel.com>
*
* Based on various ehci-*.c drivers
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
*/
#include <linux/clk.h>
@@ -205,7 +202,8 @@ static int __maybe_unused ehci_atmel_drv_resume(struct device *dev)
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
atmel_start_clock(atmel_ehci);
- return ehci_resume(hcd, false);
+ ehci_resume(hcd, false);
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index cbb9b8e12c3c..19f00424f53e 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -1,16 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 26b641100639..d9145a8f35d2 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* SAMSUNG EXYNOS USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index d025cc06dda7..c5094cb88cd5 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2005-2009 MontaVista Software, Inc.
* Copyright 2008,2012,2015 Freescale Semiconductor, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
* Power Management support by Dave Liu <daveliu@freescale.com>,
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 1a8a60a57cf2..cbc422032e50 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2005-2010,2012 Freescale Semiconductor, Inc.
* Copyright (c) 2005 MontaVista Software
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _EHCI_FSL_H
#define _EHCI_FSL_H
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 21650044b09e..656b8c08efc8 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Aeroflex Gaisler GRLIB GRUSBHC EHCI host controller
*
@@ -9,20 +10,6 @@
* (c) Valentine Barshak <vbarshak@ru.mvista.com>
* and in turn based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
* and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/err.h>
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6e834b83a104..7f0737449df7 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Enhanced Host Controller Interface (EHCI) driver for USB.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* Copyright (c) 2000-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -1012,7 +999,7 @@ idle_timeout:
qh_destroy(ehci, qh);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index df169c8e7225..facafdf8fb95 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 9b7e63977215..21307d862af6 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
deleted file mode 100644
index 2f8d3af811ce..000000000000
--- a/drivers/usb/host/ehci-msm.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/* ehci-msm.c - HSUSB Host Controller Driver Implementation
- *
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
- *
- * Partly derived from ehci-fsl.c and ehci-hcd.c
- * Copyright (c) 2000-2004 by David Brownell
- * Copyright (c) 2005 MontaVista Software
- *
- * All source code in this file is licensed under the following license except
- * where indicated.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * See the GNU General Public License for more details.
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can find it at http://www.fsf.org
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-#include <linux/acpi.h>
-
-#include "ehci.h"
-
-#define MSM_USB_BASE (hcd->regs)
-
-#define DRIVER_DESC "Qualcomm On-Chip EHCI Host Controller"
-
-static const char hcd_name[] = "ehci-msm";
-static struct hc_driver __read_mostly msm_hc_driver;
-
-static int ehci_msm_reset(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
-
- ehci->caps = USB_CAPLENGTH;
- hcd->has_tt = 1;
-
- retval = ehci_setup(hcd);
- if (retval)
- return retval;
-
- /* select ULPI phy and clear other status/control bits in PORTSC */
- writel(PORTSC_PTS_ULPI, USB_PORTSC);
- /* bursts of unspecified length. */
- writel(0, USB_AHBBURST);
- /* Use the AHB transactor, allow posted data writes */
- writel(0x8, USB_AHBMODE);
- /* Disable streaming mode and select host mode */
- writel(0x13, USB_USBMODE);
- /* Disable ULPI_TX_PKT_EN_CLR_FIX which is valid only for HSIC */
- writel(readl(USB_GENCONFIG_2) & ~ULPI_TX_PKT_EN_CLR_FIX, USB_GENCONFIG_2);
-
- return 0;
-}
-
-static int ehci_msm_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct resource *res;
- struct usb_phy *phy;
- int ret;
-
- dev_dbg(&pdev->dev, "ehci_msm proble\n");
-
- hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd) {
- dev_err(&pdev->dev, "Unable to create HCD\n");
- return -ENOMEM;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get IRQ resource\n");
- goto put_hcd;
- }
- hcd->irq = ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get memory resource\n");
- ret = -ENODEV;
- goto put_hcd;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto put_hcd;
- }
-
- /*
- * If there is an OTG driver, let it take care of PHY initialization,
- * clock management, powering up VBUS, mapping of registers address
- * space and power management.
- */
- if (pdev->dev.of_node)
- phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
- else
- phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
-
- if (IS_ERR(phy)) {
- if (PTR_ERR(phy) == -EPROBE_DEFER) {
- dev_err(&pdev->dev, "unable to find transceiver\n");
- ret = -EPROBE_DEFER;
- goto put_hcd;
- }
- phy = NULL;
- }
-
- hcd->usb_phy = phy;
- device_init_wakeup(&pdev->dev, 1);
-
- if (phy && phy->otg) {
- /*
- * MSM OTG driver takes care of adding the HCD and
- * placing hardware into low power mode via runtime PM.
- */
- ret = otg_set_host(phy->otg, &hcd->self);
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to register with transceiver\n");
- goto put_hcd;
- }
-
- pm_runtime_no_callbacks(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- } else {
- ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
- if (ret)
- goto put_hcd;
- }
-
- return 0;
-
-put_hcd:
- usb_put_hcd(hcd);
-
- return ret;
-}
-
-static int ehci_msm_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- device_init_wakeup(&pdev->dev, 0);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
-
- if (hcd->usb_phy && hcd->usb_phy->otg)
- otg_set_host(hcd->usb_phy->otg, NULL);
- else
- usb_remove_hcd(hcd);
-
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_msm_pm_suspend(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- bool do_wakeup = device_may_wakeup(dev);
-
- dev_dbg(dev, "ehci-msm PM suspend\n");
-
- /* Only call ehci_suspend if ehci_setup has been done */
- if (ehci->sbrn)
- return ehci_suspend(hcd, do_wakeup);
-
- return 0;
-}
-
-static int ehci_msm_pm_resume(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- dev_dbg(dev, "ehci-msm PM resume\n");
-
- /* Only call ehci_resume if ehci_setup has been done */
- if (ehci->sbrn)
- ehci_resume(hcd, false);
-
- return 0;
-}
-
-#else
-#define ehci_msm_pm_suspend NULL
-#define ehci_msm_pm_resume NULL
-#endif
-
-static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
- .suspend = ehci_msm_pm_suspend,
- .resume = ehci_msm_pm_resume,
-};
-
-static const struct acpi_device_id msm_ehci_acpi_ids[] = {
- { "QCOM8040", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, msm_ehci_acpi_ids);
-
-static const struct of_device_id msm_ehci_dt_match[] = {
- { .compatible = "qcom,ehci-host", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_ehci_dt_match);
-
-static struct platform_driver ehci_msm_driver = {
- .probe = ehci_msm_probe,
- .remove = ehci_msm_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "msm_hsusb_host",
- .pm = &ehci_msm_dev_pm_ops,
- .of_match_table = msm_ehci_dt_match,
- .acpi_match_table = ACPI_PTR(msm_ehci_acpi_ids),
- },
-};
-
-static const struct ehci_driver_overrides msm_overrides __initconst = {
- .reset = ehci_msm_reset,
-};
-
-static int __init ehci_msm_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
- ehci_init_driver(&msm_hc_driver, &msm_overrides);
- return platform_driver_register(&ehci_msm_driver);
-}
-module_init(ehci_msm_init);
-
-static void __exit ehci_msm_cleanup(void)
-{
- platform_driver_unregister(&ehci_msm_driver);
-}
-module_exit(ehci_msm_cleanup);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_ALIAS("platform:msm-ehci");
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 849806a75f1c..de764459e05a 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <chao.xie@marvell.com>
* Neil Zhang <zhangwm@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c7a9b31eeaef..c9f91e6c72b6 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 4d308533bc83..854b146a457d 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ehci-omap.c - driver for USBHOST on OMAP3/4 processors
*
@@ -14,21 +15,6 @@
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
*
* Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1aec87ec68df..1ad72647a069 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/host/ehci-orion.c
*
* Tzachi Perelstein <tzachi@marvell.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 93326974ff4b..fe9422d3bcdc 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI HCD (Host Controller Driver) PCI Bus Glue.
*
* Copyright (c) 2000-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f1908ea9fbd8..b065a960adc2 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic platform ehci driver
*
@@ -16,8 +17,6 @@
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/acpi.h>
#include <linux/clk.h>
@@ -40,12 +39,11 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 4
-#define EHCI_MAX_RSTS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
- struct reset_control *rsts[EHCI_MAX_RSTS];
+ struct reset_control *rsts;
struct phy **phys;
int num_phys;
bool reset_on_resume;
@@ -151,7 +149,7 @@ static int ehci_platform_probe(struct platform_device *dev)
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv;
struct ehci_hcd *ehci;
- int err, irq, phy_num, clk = 0, rst;
+ int err, irq, phy_num, clk = 0;
if (usb_disabled())
return -ENODEV;
@@ -239,22 +237,16 @@ static int ehci_platform_probe(struct platform_device *dev)
}
}
- for (rst = 0; rst < EHCI_MAX_RSTS; rst++) {
- priv->rsts[rst] = devm_reset_control_get_shared_by_index(
- &dev->dev, rst);
- if (IS_ERR(priv->rsts[rst])) {
- err = PTR_ERR(priv->rsts[rst]);
- if (err == -EPROBE_DEFER)
- goto err_reset;
- priv->rsts[rst] = NULL;
- break;
- }
-
- err = reset_control_deassert(priv->rsts[rst]);
- if (err)
- goto err_reset;
+ priv->rsts = devm_reset_control_array_get_optional_shared(&dev->dev);
+ if (IS_ERR(priv->rsts)) {
+ err = PTR_ERR(priv->rsts);
+ goto err_put_clks;
}
+ err = reset_control_deassert(priv->rsts);
+ if (err)
+ goto err_put_clks;
+
if (pdata->big_endian_desc)
ehci->big_endian_desc = 1;
if (pdata->big_endian_mmio)
@@ -310,8 +302,7 @@ err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
- while (--rst >= 0)
- reset_control_assert(priv->rsts[rst]);
+ reset_control_assert(priv->rsts);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -329,15 +320,14 @@ static int ehci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
- int clk, rst;
+ int clk;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
- for (rst = 0; rst < EHCI_MAX_RSTS && priv->rsts[rst]; rst++)
- reset_control_assert(priv->rsts[rst]);
+ reset_control_assert(priv->rsts);
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 342816a7f8b1..46e160370d6e 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PMC MSP EHCI (Host Controller Driver) for USB.
*
* (C) Copyright 2006-2010 PMC-Sierra Inc
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
/* includes */
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 1a10c8d542ca..576f7d79ad4e 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* EHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 7934ff9b35e1..8c733492d8fe 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PS3 EHCI Host Controller driver
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/firmware.h>
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 8f3f055c05fa..88158324dcae 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 6bc6304672bc..e56db44708bc 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2004 by David Brownell
* Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 5caf88d679e4..a9ee767952c1 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH EHCI host controller driver
*
* Copyright (C) 2010 Paul Mundt
*
* Based on ohci-sh.c and ehci-atmel.c.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1f25c7985f5b..add796c78561 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EHCI HCD on SPEAr SOC
*
@@ -5,10 +6,6 @@
* Deepak Sikri <deepak.sikri@st.com>
*
* Based on various ehci-*.c drivers
-*
-* This file is subject to the terms and conditions of the GNU General Public
-* License. See the file COPYING in the main directory of this archive for
-* more details.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index be4a2788fc58..3c1362ab70be 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ST EHCI driver
*
@@ -6,10 +7,6 @@
* Author: Peter Griffin <peter.griffin@linaro.org>
*
* Derived from ehci-platform.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index 5216f2b09d63..71fb61dd4a87 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2007 by Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 9a3d7db5be57..c809f7d2f08f 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2009 - 2013 NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index bdb93b6a356f..610ed437ed2c 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
*/
/*
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 0b6cdb723192..4fcebda4b79d 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 by Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
*/
/* This file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 63b9d0c67963..6d77ace1697b 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/driver/usb/host/ehci-w90x900.c
*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;version 2 of the License.
- *
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index f54480850bb8..d2a27578e440 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI HCD (Host Controller Driver) for USB.
*
@@ -8,21 +9,6 @@
* Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
* and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
* and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/err.h>
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index a8e36170d8b8..c8e9a48e1d51 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_EHCI_HCD_H
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index b58e7a60913a..fafa91189e45 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 55a0ae6f2d74..48fe9e6c2465 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 60d55eb3de0d..c359dcdb9b13 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
index b0b88f57a5ac..658aedc6adc1 100644
--- a/drivers/usb/host/fhci-mem.c
+++ b/drivers/usb/host/fhci-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
index 03be7494a476..669c240523fe 100644
--- a/drivers/usb/host/fhci-q.c
+++ b/drivers/usb/host/fhci-q.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 2f162faabbca..3d12cdd5f999 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index f82ad5df1b0d..3a4e8f616751 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 3fc82c1c3c73..e7ec41d62410 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __FHCI_H
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 457cc6525abd..62fc955085a1 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Faraday FOTG210 EHCI-like driver
*
* Copyright (c) 2013 Faraday Technology Corporation
@@ -7,20 +8,6 @@
* Po-Yu Chuang <ratbert.chuang@gmail.com>
*
* Most of code borrowed from the Linux-3.7 EHCI driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/device.h>
@@ -5449,7 +5436,7 @@ idle_timeout:
qh_destroy(fotg210, qh);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index ba557cdba8ef..677f9d592109 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Setup platform devices needed by the Freescale multi-port host
* and/or dual-role USB controller modules based on the description
* in flat device tree.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index da3b18038d23..684d6f074c3a 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host Wire Adapter:
* Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* The HWA driver is a simple layer that forwards requests to the WAHC
* (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
* Controller) layers.
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index 4f320d050da7..b964f9a51d87 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2009 by Martin Fuzzey
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of imx21-hcd.c */
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 39ae7fb64b6f..3a8bbfe43a8e 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Host Controller Driver for IMX21
*
@@ -5,20 +6,6 @@
* Copyright (C) 2009 Martin Fuzzey
* Originally written by Jay Monkman <jtm@lopingdog.com>
* Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 05122f8a6983..7b9cf0a38d6e 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Macros and prototypes for i.MX21
*
@@ -5,20 +6,6 @@
* Copyright (C) 2009 Martin Fuzzey
* Originally written by Jay Monkman <jtm@lopingdog.com>
* Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IMX21_HCD_H__
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 73fec38754f9..5f9234b9cf7b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ISP116x HCD (Host Controller Driver) for USB.
*
@@ -1018,6 +1019,7 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp116x->lock, flags);
+ /* fall through */
case C_HUB_LOCAL_POWER:
DBG("C_HUB_LOCAL_POWER\n");
break;
@@ -1433,8 +1435,10 @@ static int isp116x_bus_suspend(struct usb_hcd *hcd)
isp116x_write_reg32(isp116x, HCCONTROL,
(val & ~HCCONTROL_HCFS) |
HCCONTROL_USB_RESET);
+ /* fall through */
case HCCONTROL_USB_RESET:
ret = -EBUSY;
+ /* fall through */
default: /* HCCONTROL_USB_SUSPEND */
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 9b7e307e2d54..b21c386e6a46 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ISP1362 HCD (Host Controller Driver) for USB.
*
@@ -712,7 +713,7 @@ static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int fl
static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
{
- int index = epq->free_ptd;
+ int index;
prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
index = claim_ptd_buffers(epq, ep, ep->length);
@@ -1578,6 +1579,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
case C_HUB_LOCAL_POWER:
DBG(0, "C_HUB_LOCAL_POWER\n");
break;
@@ -2251,7 +2253,6 @@ static int isp1362_mem_config(struct usb_hcd *hcd)
return -ENOMEM;
}
- total = istl_size + intl_size + atl_size;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
for (i = 0; i < 2; i++) {
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 0ece9a9341e5..afa321ab55fc 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MAX3421 Host Controller driver for USB.
*
@@ -60,6 +61,7 @@
#include <linux/spi/spi.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/of.h>
#include <linux/platform_data/max3421-hcd.h>
@@ -85,6 +87,8 @@
USB_PORT_STAT_C_OVERCURRENT | \
USB_PORT_STAT_C_RESET) << 16)
+#define MAX3421_GPOUT_COUNT 8
+
enum max3421_rh_state {
MAX3421_RH_RESET,
MAX3421_RH_SUSPENDED,
@@ -1672,7 +1676,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
u8 mask, idx;
--pin_number;
- if (pin_number > 7)
+ if (pin_number >= MAX3421_GPOUT_COUNT)
return;
mask = 1u << (pin_number % 4);
@@ -1696,10 +1700,10 @@ max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
unsigned long flags;
int retval = 0;
- spin_lock_irqsave(&max3421_hcd->lock, flags);
-
pdata = spi->dev.platform_data;
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
switch (type_req) {
case ClearHubFeature:
break;
@@ -1832,10 +1836,34 @@ static const struct hc_driver max3421_hcd_desc = {
};
static int
+max3421_of_vbus_en_pin(struct device *dev, struct max3421_hcd_platform_data *pdata)
+{
+ int retval;
+ uint32_t value[2];
+
+ if (!pdata)
+ return -EINVAL;
+
+ retval = of_property_read_u32_array(dev->of_node, "maxim,vbus-en-pin", value, 2);
+ if (retval) {
+ dev_err(dev, "device tree node property 'maxim,vbus-en-pin' is missing\n");
+ return retval;
+ }
+ dev_info(dev, "property 'maxim,vbus-en-pin' value is <%d %d>\n", value[0], value[1]);
+
+ pdata->vbus_gpout = value[0];
+ pdata->vbus_active_level = value[1];
+
+ return 0;
+}
+
+static int
max3421_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct max3421_hcd *max3421_hcd;
struct usb_hcd *hcd = NULL;
+ struct max3421_hcd_platform_data *pdata = NULL;
int retval = -ENOMEM;
if (spi_setup(spi) < 0) {
@@ -1843,6 +1871,41 @@ max3421_probe(struct spi_device *spi)
return -EFAULT;
}
+ if (!spi->irq) {
+ dev_err(dev, "Failed to get SPI IRQ");
+ return -EFAULT;
+ }
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ retval = max3421_of_vbus_en_pin(dev, pdata);
+ if (retval)
+ goto error;
+
+ spi->dev.platform_data = pdata;
+ }
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ dev_err(&spi->dev, "driver configuration data is not provided\n");
+ retval = -EFAULT;
+ goto error;
+ }
+ if (pdata->vbus_active_level > 1) {
+ dev_err(&spi->dev, "vbus active level value %d is out of range (0/1)\n", pdata->vbus_active_level);
+ retval = -EINVAL;
+ goto error;
+ }
+ if (pdata->vbus_gpout < 1 || pdata->vbus_gpout > MAX3421_GPOUT_COUNT) {
+ dev_err(&spi->dev, "vbus gpout value %d is out of range (1..8)\n", pdata->vbus_gpout);
+ retval = -EINVAL;
+ goto error;
+ }
+
hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
dev_name(&spi->dev));
if (!hcd) {
@@ -1885,6 +1948,11 @@ max3421_probe(struct spi_device *spi)
return 0;
error:
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node && pdata) {
+ devm_kfree(&spi->dev, pdata);
+ spi->dev.platform_data = NULL;
+ }
+
if (hcd) {
kfree(max3421_hcd->tx);
kfree(max3421_hcd->rx);
@@ -1929,11 +1997,18 @@ max3421_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id max3421_of_match_table[] = {
+ { .compatible = "maxim,max3421", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max3421_of_match_table);
+
static struct spi_driver max3421_driver = {
.probe = max3421_probe,
.remove = max3421_remove,
.driver = {
.name = "max3421-hcd",
+ .of_match_table = of_match_ptr(max3421_of_match_table),
},
};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 5302f988e7e6..5ad9e9bdc8ee 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 05da2cb59612..0c507a0cfe1f 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD (Host Controller Driver) for USB.
*
@@ -5,10 +6,6 @@
*
* Derived from: ohci-omap.c and ohci-s3c2410.c
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index c3eded317495..ac7d4ac34b02 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 6865b919403f..a39fae41bc70 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* SAMSUNG EXYNOS USB HOST OHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 44924824fa41..10887e09e9bc 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Open Host Controller Interface (OHCI) driver for USB.
*
@@ -382,7 +383,7 @@ sanitize:
ed_free (ohci, ed);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. can't recover; must leak ed.
@@ -785,7 +786,7 @@ static void io_watchdog_func(unsigned long _ohci)
}
/* find the last TD processed by the controller. */
- head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK;
+ head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
td_start = td;
td_next = list_prepare_entry(td, &ed->td_list, td_list);
list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 248eb7702463..fb7aaa3b9d06 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index ed8a762b8670..b3da3f12e5b1 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 6df8e2ed40fd..f5f532601092 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver for NXP USB Host devices
*
@@ -13,10 +14,7 @@
* NOTE: This driver does not have suspend/resume functionality
* This driver is intended for engineering development purposes only
*
- * 2005-2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2005-2006 (c) MontaVista Software, Inc.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 91393ec7d850..0201c49bc4fc 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a84aebe9b0a9..fbcd34911025 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 61fe2b985070..1e6c954f4b3f 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic platform ohci driver
*
@@ -11,8 +12,6 @@
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/clk.h>
@@ -34,12 +33,11 @@
#define DRIVER_DESC "OHCI generic platform driver"
#define OHCI_MAX_CLKS 3
-#define OHCI_MAX_RESETS 2
#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
struct ohci_platform_priv {
struct clk *clks[OHCI_MAX_CLKS];
- struct reset_control *resets[OHCI_MAX_RESETS];
+ struct reset_control *resets;
struct phy **phys;
int num_phys;
};
@@ -119,7 +117,7 @@ static int ohci_platform_probe(struct platform_device *dev)
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv;
struct ohci_hcd *ohci;
- int err, irq, phy_num, clk = 0, rst = 0;
+ int err, irq, phy_num, clk = 0;
if (usb_disabled())
return -ENODEV;
@@ -204,21 +202,17 @@ static int ohci_platform_probe(struct platform_device *dev)
break;
}
}
- for (rst = 0; rst < OHCI_MAX_RESETS; rst++) {
- priv->resets[rst] =
- devm_reset_control_get_shared_by_index(
- &dev->dev, rst);
- if (IS_ERR(priv->resets[rst])) {
- err = PTR_ERR(priv->resets[rst]);
- if (err == -EPROBE_DEFER)
- goto err_reset;
- priv->resets[rst] = NULL;
- break;
- }
- err = reset_control_deassert(priv->resets[rst]);
- if (err)
- goto err_reset;
+
+ priv->resets = devm_reset_control_array_get_optional_shared(
+ &dev->dev);
+ if (IS_ERR(priv->resets)) {
+ err = PTR_ERR(priv->resets);
+ goto err_put_clks;
}
+
+ err = reset_control_deassert(priv->resets);
+ if (err)
+ goto err_put_clks;
}
if (pdata->big_endian_desc)
@@ -279,8 +273,7 @@ err_power:
pdata->power_off(dev);
err_reset:
pm_runtime_disable(&dev->dev);
- while (--rst >= 0)
- reset_control_assert(priv->resets[rst]);
+ reset_control_assert(priv->resets);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -298,7 +291,7 @@ static int ohci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
- int clk, rst;
+ int clk;
pm_runtime_get_sync(&dev->dev);
usb_remove_hcd(hcd);
@@ -306,8 +299,7 @@ static int ohci_platform_remove(struct platform_device *dev)
if (pdata->power_off)
pdata->power_off(dev);
- for (rst = 0; rst < OHCI_MAX_RESETS && priv->resets[rst]; rst++)
- reset_control_assert(priv->resets[rst]);
+ reset_control_assert(priv->resets);
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 4f87a5c61b08..76a9b40b08f1 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 71d8bc4c27f6..20a23d795adf 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PS3 OHCI Host Controller driver
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/firmware.h>
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 21c010ffb03c..3e2474959735 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 641fed609911..b2ec8c399363 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index b006b93126f7..4511e27e9da8 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 3a9ea32508df..ebec9a7699e3 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
@@ -42,7 +43,7 @@
#if 0
static void dump_hci_status(struct usb_hcd *hcd, const char *label)
{
- unsigned long status = sa1111_readl(hcd->regs + USB_STATUS);
+ unsigned long status = readl_relaxed(hcd->regs + USB_STATUS);
printk(KERN_DEBUG "%s USB_STATUS = { %s%s%s%s%s}\n", label,
((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
@@ -134,7 +135,7 @@ static int sa1111_start_hc(struct sa1111_dev *dev)
* Configure the power sense and control lines. Place the USB
* host controller in reset.
*/
- sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
+ writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
@@ -144,7 +145,7 @@ static int sa1111_start_hc(struct sa1111_dev *dev)
ret = sa1111_enable_device(dev);
if (ret == 0) {
udelay(11);
- sa1111_writel(usb_rst, dev->mapbase + USB_RESET);
+ writel_relaxed(usb_rst, dev->mapbase + USB_RESET);
}
return ret;
@@ -159,8 +160,8 @@ static void sa1111_stop_hc(struct sa1111_dev *dev)
/*
* Put the USB host controller into reset.
*/
- usb_rst = sa1111_readl(dev->mapbase + USB_RESET);
- sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
+ usb_rst = readl_relaxed(dev->mapbase + USB_RESET);
+ writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
@@ -178,7 +179,7 @@ static void sa1111_stop_hc(struct sa1111_dev *dev)
static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
{
struct usb_hcd *hcd;
- int ret;
+ int ret, irq;
if (usb_disabled())
return -ENODEV;
@@ -196,6 +197,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
hcd->rsrc_start = dev->res.start;
hcd->rsrc_len = resource_size(&dev->res);
+ irq = sa1111_get_irq(dev, 1);
+ if (irq <= 0) {
+ ret = irq ? : -ENXIO;
+ goto err1;
+ }
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&dev->dev, "request_mem_region failed\n");
ret = -EBUSY;
@@ -208,7 +215,7 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
if (ret)
goto err2;
- ret = usb_add_hcd(hcd, dev->irq[1], 0);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret == 0) {
device_wakeup_enable(hcd->self.controller);
return ret;
@@ -241,8 +248,9 @@ static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
return 0;
}
-static void ohci_hcd_sa1111_shutdown(struct sa1111_dev *dev)
+static void ohci_hcd_sa1111_shutdown(struct device *_dev)
{
+ struct sa1111_dev *dev = to_sa1111_device(_dev);
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
@@ -255,9 +263,9 @@ static struct sa1111_driver ohci_hcd_sa1111_driver = {
.drv = {
.name = "sa1111-ohci",
.owner = THIS_MODULE,
+ .shutdown = ohci_hcd_sa1111_shutdown,
},
.devid = SA1111_DEVID_USB,
.probe = ohci_hcd_sa1111_probe,
.remove = ohci_hcd_sa1111_remove,
- .shutdown = ohci_hcd_sa1111_shutdown,
};
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index d4e0f7cd96fa..c9233cddf9a2 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 56478ed2f932..69fa04697793 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD (Host Controller Driver) for USB.
*
@@ -5,10 +6,6 @@
* Deepak Sikri<deepak.sikri@st.com>
*
* Based on various ohci-*.c drivers
-*
-* This file is licensed under the terms of the GNU General Public
-* License version 2. This program is licensed "as is" without any
-* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index 02816a1515a1..992807c9850a 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ST OHCI driver
*
@@ -6,10 +7,6 @@
* Author: Peter Griffin <peter.griffin@linaro.org>
*
* Derived from ohci-platform.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index e1b208da460a..d21ca3ce9a30 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
*/
/*
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 16d081a093bb..a631dbb369d7 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD(Host Controller Driver) for USB.
*
@@ -18,10 +19,6 @@
* Written from sparse documentation from Toshiba and Sharp's driver
* for the 2.4 kernel,
* usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*#include <linux/fs.h>
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 12742d002d2d..508a803139dd 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index ed20fb34c897..0bf7759aae78 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
*
* This code is *strongly* based on EHCI-HCD code by David Brownell since
* the chip is a quasi-EHCI compatible.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -3040,7 +3027,7 @@ idle_timeout:
qh_put(qh);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
nogood:
/* caller was supposed to have unlinked any requests;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 6dda3623a276..161536717025 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file contains code to reset and initialize USB host controllers.
* Some of it includes work-arounds for PCI hardware and BIOS quirks.
@@ -841,7 +842,7 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
ehci_bios_handoff(pdev, op_reg_base, cap, offset);
break;
case 0: /* Illegal reserved cap, set cap=0 so we exit */
- cap = 0; /* then fallthrough... */
+ cap = 0; /* fall through */
default:
dev_warn(&pdev->dev,
"EHCI: unrecognized capability %02x\n",
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 5e5fc9d7d533..f3d9ba420a97 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 HCD (Host Controller Driver)
*
@@ -7,20 +8,6 @@
* Portions Copyright (C) 1999 Roman Weissgaerber
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
@@ -1273,7 +1260,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
break;
}
- mod_timer(&r8a66597->td_timer[td->pipenum],
+ mod_timer(&r8a66597->timers[td->pipenum].td,
jiffies + msecs_to_jiffies(time));
}
}
@@ -1733,9 +1720,10 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
}
}
-static void r8a66597_interval_timer(unsigned long _r8a66597)
+static void r8a66597_interval_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597_timers *timers = from_timer(timers, t, interval);
+ struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
struct r8a66597_td *td;
@@ -1745,7 +1733,7 @@ static void r8a66597_interval_timer(unsigned long _r8a66597)
for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
if (!(r8a66597->interval_map & (1 << pipenum)))
continue;
- if (timer_pending(&r8a66597->interval_timer[pipenum]))
+ if (timer_pending(&r8a66597->timers[pipenum].interval))
continue;
td = r8a66597_get_td(r8a66597, pipenum);
@@ -1756,9 +1744,10 @@ static void r8a66597_interval_timer(unsigned long _r8a66597)
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
-static void r8a66597_td_timer(unsigned long _r8a66597)
+static void r8a66597_td_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597_timers *timers = from_timer(timers, t, td);
+ struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
struct r8a66597_td *td, *new_td = NULL;
@@ -1768,7 +1757,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
if (!(r8a66597->timeout_map & (1 << pipenum)))
continue;
- if (timer_pending(&r8a66597->td_timer[pipenum]))
+ if (timer_pending(&r8a66597->timers[pipenum].td))
continue;
td = r8a66597_get_td(r8a66597, pipenum);
@@ -1942,7 +1931,7 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
if (request) {
if (td->pipe->info.timer_interval) {
r8a66597->interval_map |= 1 << td->pipenum;
- mod_timer(&r8a66597->interval_timer[td->pipenum],
+ mod_timer(&r8a66597->timers[td->pipenum].interval,
jiffies + msecs_to_jiffies(
td->pipe->info.timer_interval));
} else {
@@ -2495,11 +2484,10 @@ static int r8a66597_probe(struct platform_device *pdev)
for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
- setup_timer(&r8a66597->td_timer[i], r8a66597_td_timer,
- (unsigned long)r8a66597);
- setup_timer(&r8a66597->interval_timer[i],
- r8a66597_interval_timer,
- (unsigned long)r8a66597);
+ r8a66597->timers[i].r8a66597 = r8a66597;
+ timer_setup(&r8a66597->timers[i].td, r8a66597_td_timer, 0);
+ timer_setup(&r8a66597->timers[i].interval,
+ r8a66597_interval_timer, 0);
}
INIT_LIST_HEAD(&r8a66597->child_device);
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index 672cea307abb..51973a923526 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 HCD (Host Controller Driver)
*
@@ -7,20 +8,6 @@
* Portions Copyright (C) 1999 Roman Weissgaerber
*
* Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __R8A66597_H__
@@ -107,6 +94,14 @@ struct r8a66597_root_hub {
struct r8a66597_device *dev;
};
+struct r8a66597;
+
+struct r8a66597_timers {
+ struct timer_list td;
+ struct timer_list interval;
+ struct r8a66597 *r8a66597;
+};
+
struct r8a66597 {
spinlock_t lock;
void __iomem *reg;
@@ -117,8 +112,7 @@ struct r8a66597 {
struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE];
struct timer_list rh_timer;
- struct timer_list td_timer[R8A66597_MAX_NUM_PIPE];
- struct timer_list interval_timer[R8A66597_MAX_NUM_PIPE];
+ struct r8a66597_timers timers[R8A66597_MAX_NUM_PIPE];
unsigned short address_map;
unsigned short timeout_map;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 24ad1d6cec25..601fb00603cc 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SL811HS HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 88a9bffe93df..72136373ffab 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCMCIA driver for SL811HS (as found in REX-CFU1U)
* Filename: sl811_cs.c
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
index 62b6b7804c66..016987764afb 100644
--- a/drivers/usb/host/ssb-hcd.c
+++ b/drivers/usb/host/ssb-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Sonics Silicon Backplane
* Broadcom USB-core driver (SSB bus glue)
@@ -15,8 +16,6 @@
*
* Derived from the USBcore related parts of Broadcom-SB
* Copyright 2005-2011 Broadcom Corporation
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index c38855aed62c..032b8652910a 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host Controller Driver for the Elan Digital Systems U132 adapter
*
@@ -7,11 +8,6 @@
* Author and Maintainer - Tony Olech - Elan Digital Systems
* tony.olech@elandigitalsystems.com
*
-* This program is free software;you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation, version 2.
-*
-*
* This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
* based on various USB host drivers in the 2.6.15 linux kernel
* with constant reference to the 3rd Edition of Linux Device Drivers
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index c3267a78c94e..babeefd84ffd 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Universal Host Controller Interface driver for USB.
*
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index d97f0d9b3ce6..f1cc47292a59 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -187,7 +187,7 @@ struct uhci_qh {
* We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller.
*/
-#define qh_element(qh) ACCESS_ONCE((qh)->element)
+#define qh_element(qh) READ_ONCE((qh)->element)
#define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \
cpu_to_hc32((uhci), (qh)->dma_handle))
@@ -275,7 +275,7 @@ struct uhci_td {
* subject to asynchronous updates by the controller.
*/
#define td_status(uhci, td) hc32_to_cpu((uhci), \
- ACCESS_ONCE((td)->status))
+ READ_ONCE((td)->status))
#define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle))
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
index 773249306031..c5ac9efb076a 100644
--- a/drivers/usb/host/whci/asl.c
+++ b/drivers/usb/host/whci/asl.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) asynchronous schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 774b89d28fae..f154e5791bfd 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) debug.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index cf84269c3e6d..8af9dcfea127 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) driver.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c
index 6afa2e379160..22b3b7f7419d 100644
--- a/drivers/usb/host/whci/hw.c
+++ b/drivers/usb/host/whci/hw.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) hardware access helpers.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
index ad8eb575c30a..82416973f773 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/usb/host/whci/init.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) initialization.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
index 0c086b2790d1..7e4ad1b8f3e3 100644
--- a/drivers/usb/host/whci/int.c
+++ b/drivers/usb/host/whci/int.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) interrupt handling.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/uwb/umc.h>
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
index 33c5580b4d25..bb84366f7bd3 100644
--- a/drivers/usb/host/whci/pzl.c
+++ b/drivers/usb/host/whci/pzl.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) periodic schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index c0e6812426b3..925166a207aa 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) qset management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
index c80c7d93bc4a..139476997e7c 100644
--- a/drivers/usb/host/whci/whcd.h
+++ b/drivers/usb/host/whci/whcd.h
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) private header.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef __WHCD_H
#define __WHCD_H
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
index 4d4cbc0730bf..5a86a57a80cc 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) data structures.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _WHCI_WHCI_HC_H
#define _WHCI_WHCI_HC_H
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
index 8d2762682869..8a4d805ff63a 100644
--- a/drivers/usb/host/whci/wusb.c
+++ b/drivers/usb/host/whci/wusb.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) WUSB operations.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/uwb/umc.h>
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2c83b37ae8f2..584d7b9a3683 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "xhci.h"
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
new file mode 100644
index 000000000000..4f7895dbcf88
--- /dev/null
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-debugfs.c - xHCI debugfs interface
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#include <linux/slab.h>
+
+#include "xhci.h"
+#include "xhci-debugfs.h"
+
+static const struct debugfs_reg32 xhci_cap_regs[] = {
+ dump_register(CAPLENGTH),
+ dump_register(HCSPARAMS1),
+ dump_register(HCSPARAMS2),
+ dump_register(HCSPARAMS3),
+ dump_register(HCCPARAMS1),
+ dump_register(DOORBELLOFF),
+ dump_register(RUNTIMEOFF),
+ dump_register(HCCPARAMS2),
+};
+
+static const struct debugfs_reg32 xhci_op_regs[] = {
+ dump_register(USBCMD),
+ dump_register(USBSTS),
+ dump_register(PAGESIZE),
+ dump_register(DNCTRL),
+ dump_register(CRCR),
+ dump_register(DCBAAP_LOW),
+ dump_register(DCBAAP_HIGH),
+ dump_register(CONFIG),
+};
+
+static const struct debugfs_reg32 xhci_runtime_regs[] = {
+ dump_register(MFINDEX),
+ dump_register(IR0_IMAN),
+ dump_register(IR0_IMOD),
+ dump_register(IR0_ERSTSZ),
+ dump_register(IR0_ERSTBA_LOW),
+ dump_register(IR0_ERSTBA_HIGH),
+ dump_register(IR0_ERDP_LOW),
+ dump_register(IR0_ERDP_HIGH),
+};
+
+static const struct debugfs_reg32 xhci_extcap_legsup[] = {
+ dump_register(EXTCAP_USBLEGSUP),
+ dump_register(EXTCAP_USBLEGCTLSTS),
+};
+
+static const struct debugfs_reg32 xhci_extcap_protocol[] = {
+ dump_register(EXTCAP_REVISION),
+ dump_register(EXTCAP_NAME),
+ dump_register(EXTCAP_PORTINFO),
+ dump_register(EXTCAP_PORTTYPE),
+ dump_register(EXTCAP_MANTISSA1),
+ dump_register(EXTCAP_MANTISSA2),
+ dump_register(EXTCAP_MANTISSA3),
+ dump_register(EXTCAP_MANTISSA4),
+ dump_register(EXTCAP_MANTISSA5),
+ dump_register(EXTCAP_MANTISSA6),
+};
+
+static const struct debugfs_reg32 xhci_extcap_dbc[] = {
+ dump_register(EXTCAP_DBC_CAPABILITY),
+ dump_register(EXTCAP_DBC_DOORBELL),
+ dump_register(EXTCAP_DBC_ERSTSIZE),
+ dump_register(EXTCAP_DBC_ERST_LOW),
+ dump_register(EXTCAP_DBC_ERST_HIGH),
+ dump_register(EXTCAP_DBC_ERDP_LOW),
+ dump_register(EXTCAP_DBC_ERDP_HIGH),
+ dump_register(EXTCAP_DBC_CONTROL),
+ dump_register(EXTCAP_DBC_STATUS),
+ dump_register(EXTCAP_DBC_PORTSC),
+ dump_register(EXTCAP_DBC_CONT_LOW),
+ dump_register(EXTCAP_DBC_CONT_HIGH),
+ dump_register(EXTCAP_DBC_DEVINFO1),
+ dump_register(EXTCAP_DBC_DEVINFO2),
+};
+
+static struct dentry *xhci_debugfs_root;
+
+static struct xhci_regset *xhci_debugfs_alloc_regset(struct xhci_hcd *xhci)
+{
+ struct xhci_regset *regset;
+
+ regset = kzalloc(sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return NULL;
+
+ /*
+ * The allocation and free of regset are executed in order.
+ * We needn't a lock here.
+ */
+ INIT_LIST_HEAD(&regset->list);
+ list_add_tail(&regset->list, &xhci->regset_list);
+
+ return regset;
+}
+
+static void xhci_debugfs_free_regset(struct xhci_regset *regset)
+{
+ if (!regset)
+ return;
+
+ list_del(&regset->list);
+ kfree(regset);
+}
+
+static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
+ const struct debugfs_reg32 *regs,
+ size_t nregs, struct dentry *parent,
+ const char *fmt, ...)
+{
+ struct xhci_regset *rgs;
+ va_list args;
+ struct debugfs_regset32 *regset;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+ rgs = xhci_debugfs_alloc_regset(xhci);
+ if (!rgs)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(rgs->name, sizeof(rgs->name), fmt, args);
+ va_end(args);
+
+ regset = &rgs->regset;
+ regset->regs = regs;
+ regset->nregs = nregs;
+ regset->base = hcd->regs + base;
+
+ debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
+}
+
+static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
+ const struct debugfs_reg32 *regs,
+ size_t n, const char *cap_name)
+{
+ u32 offset;
+ int index = 0;
+ size_t psic, nregs = n;
+ void __iomem *base = &xhci->cap_regs->hc_capbase;
+
+ offset = xhci_find_next_ext_cap(base, 0, cap_id);
+ while (offset) {
+ if (cap_id == XHCI_EXT_CAPS_PROTOCOL) {
+ psic = XHCI_EXT_PORT_PSIC(readl(base + offset + 8));
+ nregs = min(4 + psic, n);
+ }
+
+ xhci_debugfs_regset(xhci, offset, regs, nregs,
+ xhci->debugfs_root, "%s:%02d",
+ cap_name, index);
+ offset = xhci_find_next_ext_cap(base, offset, cap_id);
+ index++;
+ }
+}
+
+static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
+{
+ dma_addr_t dma;
+ struct xhci_ring *ring = s->private;
+
+ dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+ seq_printf(s, "%pad\n", &dma);
+
+ return 0;
+}
+
+static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
+{
+ dma_addr_t dma;
+ struct xhci_ring *ring = s->private;
+
+ dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+ seq_printf(s, "%pad\n", &dma);
+
+ return 0;
+}
+
+static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
+{
+ struct xhci_ring *ring = s->private;
+
+ seq_printf(s, "%d\n", ring->cycle_state);
+
+ return 0;
+}
+
+static void xhci_ring_dump_segment(struct seq_file *s,
+ struct xhci_segment *seg)
+{
+ int i;
+ dma_addr_t dma;
+ union xhci_trb *trb;
+
+ for (i = 0; i < TRBS_PER_SEGMENT; i++) {
+ trb = &seg->trbs[i];
+ dma = seg->dma + i * sizeof(*trb);
+ seq_printf(s, "%pad: %s\n", &dma,
+ xhci_decode_trb(trb->generic.field[0],
+ trb->generic.field[1],
+ trb->generic.field[2],
+ trb->generic.field[3]));
+ }
+}
+
+static int xhci_ring_trb_show(struct seq_file *s, void *unused)
+{
+ int i;
+ struct xhci_ring *ring = s->private;
+ struct xhci_segment *seg = ring->first_seg;
+
+ for (i = 0; i < ring->num_segs; i++) {
+ xhci_ring_dump_segment(s, seg);
+ seg = seg->next;
+ }
+
+ return 0;
+}
+
+static struct xhci_file_map ring_files[] = {
+ {"enqueue", xhci_ring_enqueue_show, },
+ {"dequeue", xhci_ring_dequeue_show, },
+ {"cycle", xhci_ring_cycle_show, },
+ {"trbs", xhci_ring_trb_show, },
+};
+
+static int xhci_ring_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct xhci_file_map *f_map;
+ const char *file_name = file_dentry(file)->d_iname;
+
+ for (i = 0; i < ARRAY_SIZE(ring_files); i++) {
+ f_map = &ring_files[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations xhci_ring_fops = {
+ .open = xhci_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int xhci_slot_context_show(struct seq_file *s, void *unused)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_slot_priv *priv = s->private;
+ struct xhci_virt_device *dev = priv->dev;
+
+ xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
+ slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+ seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
+ xhci_decode_slot_context(slot_ctx->dev_info,
+ slot_ctx->dev_info2,
+ slot_ctx->tt_info,
+ slot_ctx->dev_state));
+
+ return 0;
+}
+
+static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
+{
+ int dci;
+ dma_addr_t dma;
+ struct xhci_hcd *xhci;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_slot_priv *priv = s->private;
+ struct xhci_virt_device *dev = priv->dev;
+
+ xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
+
+ for (dci = 1; dci < 32; dci++) {
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
+ dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
+ seq_printf(s, "%pad: %s\n", &dma,
+ xhci_decode_ep_context(ep_ctx->ep_info,
+ ep_ctx->ep_info2,
+ ep_ctx->deq,
+ ep_ctx->tx_info));
+ }
+
+ return 0;
+}
+
+static int xhci_device_name_show(struct seq_file *s, void *unused)
+{
+ struct xhci_slot_priv *priv = s->private;
+ struct xhci_virt_device *dev = priv->dev;
+
+ seq_printf(s, "%s\n", dev_name(&dev->udev->dev));
+
+ return 0;
+}
+
+static struct xhci_file_map context_files[] = {
+ {"name", xhci_device_name_show, },
+ {"slot-context", xhci_slot_context_show, },
+ {"ep-context", xhci_endpoint_context_show, },
+};
+
+static int xhci_context_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct xhci_file_map *f_map;
+ const char *file_name = file_dentry(file)->d_iname;
+
+ for (i = 0; i < ARRAY_SIZE(context_files); i++) {
+ f_map = &context_files[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations xhci_context_fops = {
+ .open = xhci_context_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
+ struct xhci_file_map *files,
+ size_t nentries, void *data,
+ struct dentry *parent,
+ const struct file_operations *fops)
+{
+ int i;
+
+ for (i = 0; i < nentries; i++)
+ debugfs_create_file(files[i].name, 0444, parent, data, fops);
+}
+
+static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
+ struct xhci_ring *ring,
+ const char *name,
+ struct dentry *parent)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(name, parent);
+ xhci_debugfs_create_files(xhci, ring_files, ARRAY_SIZE(ring_files),
+ ring, dir, &xhci_ring_fops);
+
+ return dir;
+}
+
+static void xhci_debugfs_create_context_files(struct xhci_hcd *xhci,
+ struct dentry *parent,
+ int slot_id)
+{
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+
+ xhci_debugfs_create_files(xhci, context_files,
+ ARRAY_SIZE(context_files),
+ dev->debugfs_private,
+ parent, &xhci_context_fops);
+}
+
+void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev,
+ int ep_index)
+{
+ struct xhci_ep_priv *epriv;
+ struct xhci_slot_priv *spriv = dev->debugfs_private;
+
+ if (spriv->eps[ep_index])
+ return;
+
+ epriv = kzalloc(sizeof(*epriv), GFP_KERNEL);
+ if (!epriv)
+ return;
+
+ snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
+ epriv->root = xhci_debugfs_create_ring_dir(xhci,
+ dev->eps[ep_index].new_ring,
+ epriv->name,
+ spriv->root);
+ spriv->eps[ep_index] = epriv;
+}
+
+void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev,
+ int ep_index)
+{
+ struct xhci_ep_priv *epriv;
+ struct xhci_slot_priv *spriv = dev->debugfs_private;
+
+ if (!spriv || !spriv->eps[ep_index])
+ return;
+
+ epriv = spriv->eps[ep_index];
+ debugfs_remove_recursive(epriv->root);
+ spriv->eps[ep_index] = NULL;
+ kfree(epriv);
+}
+
+void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_slot_priv *priv;
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return;
+
+ snprintf(priv->name, sizeof(priv->name), "%02d", slot_id);
+ priv->root = debugfs_create_dir(priv->name, xhci->debugfs_slots);
+ priv->dev = dev;
+ dev->debugfs_private = priv;
+
+ xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring,
+ "ep00", priv->root);
+
+ xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
+}
+
+void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id)
+{
+ int i;
+ struct xhci_slot_priv *priv;
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+
+ if (!dev || !dev->debugfs_private)
+ return;
+
+ priv = dev->debugfs_private;
+
+ debugfs_remove_recursive(priv->root);
+
+ for (i = 0; i < 31; i++)
+ kfree(priv->eps[i]);
+
+ kfree(priv);
+ dev->debugfs_private = NULL;
+}
+
+void xhci_debugfs_init(struct xhci_hcd *xhci)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+ xhci->debugfs_root = debugfs_create_dir(dev_name(dev),
+ xhci_debugfs_root);
+
+ INIT_LIST_HEAD(&xhci->regset_list);
+
+ xhci_debugfs_regset(xhci,
+ 0,
+ xhci_cap_regs, ARRAY_SIZE(xhci_cap_regs),
+ xhci->debugfs_root, "reg-cap");
+
+ xhci_debugfs_regset(xhci,
+ HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)),
+ xhci_op_regs, ARRAY_SIZE(xhci_op_regs),
+ xhci->debugfs_root, "reg-op");
+
+ xhci_debugfs_regset(xhci,
+ readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK,
+ xhci_runtime_regs, ARRAY_SIZE(xhci_runtime_regs),
+ xhci->debugfs_root, "reg-runtime");
+
+ xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_LEGACY,
+ xhci_extcap_legsup,
+ ARRAY_SIZE(xhci_extcap_legsup),
+ "reg-ext-legsup");
+
+ xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_PROTOCOL,
+ xhci_extcap_protocol,
+ ARRAY_SIZE(xhci_extcap_protocol),
+ "reg-ext-protocol");
+
+ xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_DEBUG,
+ xhci_extcap_dbc,
+ ARRAY_SIZE(xhci_extcap_dbc),
+ "reg-ext-dbc");
+
+ xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring,
+ "command-ring",
+ xhci->debugfs_root);
+
+ xhci_debugfs_create_ring_dir(xhci, xhci->event_ring,
+ "event-ring",
+ xhci->debugfs_root);
+
+ xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
+}
+
+void xhci_debugfs_exit(struct xhci_hcd *xhci)
+{
+ struct xhci_regset *rgs, *tmp;
+
+ debugfs_remove_recursive(xhci->debugfs_root);
+ xhci->debugfs_root = NULL;
+ xhci->debugfs_slots = NULL;
+
+ list_for_each_entry_safe(rgs, tmp, &xhci->regset_list, list)
+ xhci_debugfs_free_regset(rgs);
+}
+
+void __init xhci_debugfs_create_root(void)
+{
+ xhci_debugfs_root = debugfs_create_dir("xhci", usb_debug_root);
+}
+
+void __exit xhci_debugfs_remove_root(void)
+{
+ debugfs_remove_recursive(xhci_debugfs_root);
+ xhci_debugfs_root = NULL;
+}
diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h
new file mode 100644
index 000000000000..ac5bc40f5c3a
--- /dev/null
+++ b/drivers/usb/host/xhci-debugfs.h
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-debugfs.h - xHCI debugfs interface
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#ifndef __LINUX_XHCI_DEBUGFS_H
+#define __LINUX_XHCI_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#define DEBUGFS_NAMELEN 32
+
+#define REG_CAPLENGTH 0x00
+#define REG_HCSPARAMS1 0x04
+#define REG_HCSPARAMS2 0x08
+#define REG_HCSPARAMS3 0x0c
+#define REG_HCCPARAMS1 0x10
+#define REG_DOORBELLOFF 0x14
+#define REG_RUNTIMEOFF 0x18
+#define REG_HCCPARAMS2 0x1c
+
+#define REG_USBCMD 0x00
+#define REG_USBSTS 0x04
+#define REG_PAGESIZE 0x08
+#define REG_DNCTRL 0x14
+#define REG_CRCR 0x18
+#define REG_DCBAAP_LOW 0x30
+#define REG_DCBAAP_HIGH 0x34
+#define REG_CONFIG 0x38
+
+#define REG_MFINDEX 0x00
+#define REG_IR0_IMAN 0x20
+#define REG_IR0_IMOD 0x24
+#define REG_IR0_ERSTSZ 0x28
+#define REG_IR0_ERSTBA_LOW 0x30
+#define REG_IR0_ERSTBA_HIGH 0x34
+#define REG_IR0_ERDP_LOW 0x38
+#define REG_IR0_ERDP_HIGH 0x3c
+
+#define REG_EXTCAP_USBLEGSUP 0x00
+#define REG_EXTCAP_USBLEGCTLSTS 0x04
+
+#define REG_EXTCAP_REVISION 0x00
+#define REG_EXTCAP_NAME 0x04
+#define REG_EXTCAP_PORTINFO 0x08
+#define REG_EXTCAP_PORTTYPE 0x0c
+#define REG_EXTCAP_MANTISSA1 0x10
+#define REG_EXTCAP_MANTISSA2 0x14
+#define REG_EXTCAP_MANTISSA3 0x18
+#define REG_EXTCAP_MANTISSA4 0x1c
+#define REG_EXTCAP_MANTISSA5 0x20
+#define REG_EXTCAP_MANTISSA6 0x24
+
+#define REG_EXTCAP_DBC_CAPABILITY 0x00
+#define REG_EXTCAP_DBC_DOORBELL 0x04
+#define REG_EXTCAP_DBC_ERSTSIZE 0x08
+#define REG_EXTCAP_DBC_ERST_LOW 0x10
+#define REG_EXTCAP_DBC_ERST_HIGH 0x14
+#define REG_EXTCAP_DBC_ERDP_LOW 0x18
+#define REG_EXTCAP_DBC_ERDP_HIGH 0x1c
+#define REG_EXTCAP_DBC_CONTROL 0x20
+#define REG_EXTCAP_DBC_STATUS 0x24
+#define REG_EXTCAP_DBC_PORTSC 0x28
+#define REG_EXTCAP_DBC_CONT_LOW 0x30
+#define REG_EXTCAP_DBC_CONT_HIGH 0x34
+#define REG_EXTCAP_DBC_DEVINFO1 0x38
+#define REG_EXTCAP_DBC_DEVINFO2 0x3c
+
+#define dump_register(nm) \
+{ \
+ .name = __stringify(nm), \
+ .offset = REG_ ##nm, \
+}
+
+struct xhci_regset {
+ char name[DEBUGFS_NAMELEN];
+ struct debugfs_regset32 regset;
+ size_t nregs;
+ struct dentry *parent;
+ struct list_head list;
+};
+
+struct xhci_file_map {
+ const char *name;
+ int (*show)(struct seq_file *s, void *unused);
+};
+
+struct xhci_ep_priv {
+ char name[DEBUGFS_NAMELEN];
+ struct dentry *root;
+};
+
+struct xhci_slot_priv {
+ char name[DEBUGFS_NAMELEN];
+ struct dentry *root;
+ struct xhci_ep_priv *eps[31];
+ struct xhci_virt_device *dev;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void xhci_debugfs_init(struct xhci_hcd *xhci);
+void xhci_debugfs_exit(struct xhci_hcd *xhci);
+void __init xhci_debugfs_create_root(void);
+void __exit xhci_debugfs_remove_root(void);
+void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id);
+void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id);
+void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index);
+void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index);
+#else
+static inline void xhci_debugfs_init(struct xhci_hcd *xhci) { }
+static inline void xhci_debugfs_exit(struct xhci_hcd *xhci) { }
+static inline void __init xhci_debugfs_create_root(void) { }
+static inline void __exit xhci_debugfs_remove_root(void) { }
+static inline void xhci_debugfs_create_slot(struct xhci_hcd *x, int s) { }
+static inline void xhci_debugfs_remove_slot(struct xhci_hcd *x, int s) { }
+static inline void
+xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index) { }
+static inline void
+xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index) { }
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __LINUX_XHCI_DEBUGFS_H */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 28deea584884..bf7316e130d3 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Up to 16 ms to halt an HC */
#define XHCI_MAX_HALT_USEC (16*1000)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a2336deb5e36..2a90229be7a6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
@@ -634,7 +622,10 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Disable all slots\n");
spin_unlock_irqrestore(&xhci->lock, *flags);
for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
- retval = xhci_disable_slot(xhci, NULL, i);
+ if (!xhci->devs[i])
+ continue;
+
+ retval = xhci_disable_slot(xhci, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
@@ -1374,6 +1365,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
+ /* fall through */
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_BH_PORT_RESET:
case USB_PORT_FEAT_C_CONNECTION:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 2a82c927ded2..e1fba4688509 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
@@ -28,6 +16,7 @@
#include "xhci.h"
#include "xhci-trace.h"
+#include "xhci-debugfs.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
@@ -465,8 +454,6 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
return 0;
}
-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
@@ -801,8 +788,8 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
- setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
- (unsigned long)ep);
+ timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
+ 0);
ep->xhci = xhci;
}
@@ -961,6 +948,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
}
}
/* we are now at a leaf device */
+ xhci_debugfs_remove_slot(xhci, slot_id);
xhci_free_virt_device(xhci, slot_id);
}
@@ -1311,6 +1299,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
* since it uses the same rules as low speed interrupt
* endpoints.
*/
+ /* fall through */
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
@@ -1496,7 +1485,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
EP_AVG_TRB_LENGTH(avg_trb_len));
- /* FIXME Debug endpoint context */
return 0;
}
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 6e7ddf6cafae..eea7360a18fc 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Zhigang.Wei <zhigang.wei@mediatek.com>
* Chunfeng.Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/kernel.h>
@@ -287,12 +278,13 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
struct mu3h_sch_bw_info *sch_array;
int num_usb_bus;
int i;
/* ss IN and OUT are separated */
- num_usb_bus = mtk->num_u3_ports * 2 + mtk->num_u2_ports;
+ num_usb_bus = xhci->num_usb3_ports * 2 + xhci->num_usb2_ports;
sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
if (sch_array == NULL)
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 8fb60657ed4f..b62a1d23244b 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MediaTek xHCI Host Controller Driver
*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/clk.h>
@@ -43,6 +34,7 @@
/* ip_pw_sts1 register */
#define STS1_IP_SLEEP_STS BIT(30)
+#define STS1_U3_MAC_RST BIT(16)
#define STS1_XHCI_RST BIT(11)
#define STS1_SYS125_RST BIT(10)
#define STS1_REF_RST BIT(8)
@@ -91,6 +83,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
{
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value, check_val;
+ int u3_ports_disabed = 0;
int ret;
int i;
@@ -102,8 +95,13 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
value &= ~CTRL1_IP_HOST_PDN;
writel(value, &ippc->ip_pw_ctr1);
- /* power on and enable all u3 ports */
+ /* power on and enable u3 ports except skipped ones */
for (i = 0; i < mtk->num_u3_ports; i++) {
+ if ((0x1 << i) & mtk->u3p_dis_msk) {
+ u3_ports_disabed++;
+ continue;
+ }
+
value = readl(&ippc->u3_ctrl_p[i]);
value &= ~(CTRL_U3_PORT_PDN | CTRL_U3_PORT_DIS);
value |= CTRL_U3_PORT_HOST_SEL;
@@ -125,6 +123,9 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
check_val = STS1_SYSPLL_STABLE | STS1_REF_RST |
STS1_SYS125_RST | STS1_XHCI_RST;
+ if (mtk->num_u3_ports > u3_ports_disabed)
+ check_val |= STS1_U3_MAC_RST;
+
ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
(check_val == (value & check_val)), 100, 20000);
if (ret) {
@@ -145,8 +146,11 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
if (!mtk->has_ippc)
return 0;
- /* power down all u3 ports */
+ /* power down u3 ports except skipped ones */
for (i = 0; i < mtk->num_u3_ports; i++) {
+ if ((0x1 << i) & mtk->u3p_dis_msk)
+ continue;
+
value = readl(&ippc->u3_ctrl_p[i]);
value |= CTRL_U3_PORT_PDN;
writel(value, &ippc->u3_ctrl_p[i]);
@@ -208,6 +212,41 @@ static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
return xhci_mtk_host_enable(mtk);
}
+/* ignore the error if the clock does not exist */
+static struct clk *optional_clk_get(struct device *dev, const char *id)
+{
+ struct clk *opt_clk;
+
+ opt_clk = devm_clk_get(dev, id);
+ /* ignore error number except EPROBE_DEFER */
+ if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
+ opt_clk = NULL;
+
+ return opt_clk;
+}
+
+static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
+{
+ struct device *dev = mtk->dev;
+
+ mtk->sys_clk = devm_clk_get(dev, "sys_ck");
+ if (IS_ERR(mtk->sys_clk)) {
+ dev_err(dev, "fail to get sys_ck\n");
+ return PTR_ERR(mtk->sys_clk);
+ }
+
+ mtk->ref_clk = optional_clk_get(dev, "ref_ck");
+ if (IS_ERR(mtk->ref_clk))
+ return PTR_ERR(mtk->ref_clk);
+
+ mtk->mcu_clk = optional_clk_get(dev, "mcu_ck");
+ if (IS_ERR(mtk->mcu_clk))
+ return PTR_ERR(mtk->mcu_clk);
+
+ mtk->dma_clk = optional_clk_get(dev, "dma_ck");
+ return PTR_ERR_OR_ZERO(mtk->dma_clk);
+}
+
static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk)
{
int ret;
@@ -224,37 +263,34 @@ static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk)
goto sys_clk_err;
}
- if (mtk->wakeup_src) {
- ret = clk_prepare_enable(mtk->wk_deb_p0);
- if (ret) {
- dev_err(mtk->dev, "failed to enable wk_deb_p0\n");
- goto usb_p0_err;
- }
+ ret = clk_prepare_enable(mtk->mcu_clk);
+ if (ret) {
+ dev_err(mtk->dev, "failed to enable mcu_clk\n");
+ goto mcu_clk_err;
+ }
- ret = clk_prepare_enable(mtk->wk_deb_p1);
- if (ret) {
- dev_err(mtk->dev, "failed to enable wk_deb_p1\n");
- goto usb_p1_err;
- }
+ ret = clk_prepare_enable(mtk->dma_clk);
+ if (ret) {
+ dev_err(mtk->dev, "failed to enable dma_clk\n");
+ goto dma_clk_err;
}
+
return 0;
-usb_p1_err:
- clk_disable_unprepare(mtk->wk_deb_p0);
-usb_p0_err:
+dma_clk_err:
+ clk_disable_unprepare(mtk->mcu_clk);
+mcu_clk_err:
clk_disable_unprepare(mtk->sys_clk);
sys_clk_err:
clk_disable_unprepare(mtk->ref_clk);
ref_clk_err:
- return -EINVAL;
+ return ret;
}
static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk)
{
- if (mtk->wakeup_src) {
- clk_disable_unprepare(mtk->wk_deb_p1);
- clk_disable_unprepare(mtk->wk_deb_p0);
- }
+ clk_disable_unprepare(mtk->dma_clk);
+ clk_disable_unprepare(mtk->mcu_clk);
clk_disable_unprepare(mtk->sys_clk);
clk_disable_unprepare(mtk->ref_clk);
}
@@ -358,18 +394,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
if (!mtk->wakeup_src)
return 0;
- mtk->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0");
- if (IS_ERR(mtk->wk_deb_p0)) {
- dev_err(dev, "fail to get wakeup_deb_p0\n");
- return PTR_ERR(mtk->wk_deb_p0);
- }
-
- mtk->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1");
- if (IS_ERR(mtk->wk_deb_p1)) {
- dev_err(dev, "fail to get wakeup_deb_p1\n");
- return PTR_ERR(mtk->wk_deb_p1);
- }
-
mtk->pericfg = syscon_regmap_lookup_by_phandle(dn,
"mediatek,syscon-wakeup");
if (IS_ERR(mtk->pericfg)) {
@@ -492,7 +516,6 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
/* called during probe() after chip reset completes */
static int xhci_mtk_setup(struct usb_hcd *hcd)
{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
int ret;
@@ -507,8 +530,6 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
return ret;
if (usb_hcd_is_primary_hcd(hcd)) {
- mtk->num_u3_ports = xhci->num_usb3_ports;
- mtk->num_u2_ports = xhci->num_usb2_ports;
ret = xhci_mtk_sch_init(mtk);
if (ret)
return ret;
@@ -552,26 +573,14 @@ static int xhci_mtk_probe(struct platform_device *pdev)
return PTR_ERR(mtk->vusb33);
}
- mtk->sys_clk = devm_clk_get(dev, "sys_ck");
- if (IS_ERR(mtk->sys_clk)) {
- dev_err(dev, "fail to get sys_ck\n");
- return PTR_ERR(mtk->sys_clk);
- }
-
- /*
- * reference clock is usually a "fixed-clock", make it optional
- * for backward compatibility and ignore the error if it does
- * not exist.
- */
- mtk->ref_clk = devm_clk_get(dev, "ref_ck");
- if (IS_ERR(mtk->ref_clk)) {
- if (PTR_ERR(mtk->ref_clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- mtk->ref_clk = NULL;
- }
+ ret = xhci_mtk_clks_get(mtk);
+ if (ret)
+ return ret;
mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
+ /* optional property, ignore the error if it does not exist */
+ of_property_read_u32(node, "mediatek,u3p-dis-msk",
+ &mtk->u3p_dis_msk);
ret = usb_wakeup_of_property_parse(mtk, node);
if (ret)
@@ -606,15 +615,10 @@ static int xhci_mtk_probe(struct platform_device *pdev)
}
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
goto disable_clk;
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- else
- dma_set_mask(dev, DMA_BIT_MASK(32));
-
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 3aa5e1d25064..6b74ce5b7564 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Zhigang.Wei <zhigang.wei@mediatek.com>
* Chunfeng.Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _XHCI_MTK_H_
@@ -121,12 +112,13 @@ struct xhci_hcd_mtk {
bool has_ippc;
int num_u2_ports;
int num_u3_ports;
+ int u3p_dis_msk;
struct regulator *vusb33;
struct regulator *vbus;
struct clk *sys_clk; /* sys and mac clock */
struct clk *ref_clk;
- struct clk *wk_deb_p0; /* port0's wakeup debounce clock */
- struct clk *wk_deb_p1;
+ struct clk *mcu_clk;
+ struct clk *dma_clk;
struct regmap *pericfg;
struct phy **phys;
int num_phys;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 85908a3ecb8f..32e158568788 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell
* Author: Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/io.h>
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 301fc984cae6..09791df2cec0 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell
*
* Gregory Clement <gregory.clement@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __LINUX_XHCI_MVEBU_H
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 76f392954733..7ef1274ef7f7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver PCI Bus Glue.
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1cb6eaef4ae1..09f164f8cf8c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xhci-plat.c - xHCI host controller driver platform Bus Glue.
*
@@ -5,10 +6,6 @@
* Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A lot of code borrowed from the Linux xHCI driver.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/clk.h>
@@ -16,6 +13,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
@@ -152,7 +150,7 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
static int xhci_plat_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
+ const struct xhci_plat_priv *priv_match;
const struct hc_driver *driver;
struct device *sysdev;
struct xhci_hcd *xhci;
@@ -242,9 +240,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
}
xhci = hcd_to_xhci(hcd);
- match = of_match_node(usb_xhci_of_match, pdev->dev.of_node);
- if (match) {
- const struct xhci_plat_priv *priv_match = match->data;
+ priv_match = of_device_get_match_data(&pdev->dev);
+ if (priv_match) {
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
@@ -263,6 +260,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
+ if (device_property_read_bool(sysdev, "usb2-lpm-disable"))
+ xhci->quirks |= XHCI_HW_LPM_DISABLE;
+
if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 29b227895b07..ae29f22ff5bd 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xhci-plat.h - xHCI host controller driver platform Bus Glue.
*
* Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _XHCI_PLAT_H
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 198bc188ab25..f0b559660007 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver for R-Car SoCs
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/firmware.h>
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index d247951147a1..804b6ab4246f 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/host/xhci-rcar.h
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _XHCI_RCAR_H
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 82c746e2d85c..c239c688076c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -171,13 +159,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
- return;
+ goto out;
}
if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
- return;
+ goto out;
}
/* All other rings have link trbs */
@@ -190,6 +178,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring->dequeue = ring->deq_seg->trbs;
}
+out:
trace_xhci_inc_deq(ring);
return;
@@ -946,15 +935,12 @@ void xhci_hc_died(struct xhci_hcd *xhci)
* Instead we use a combination of that flag and checking if a new timer is
* pending.
*/
-void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
{
- struct xhci_hcd *xhci;
- struct xhci_virt_ep *ep;
+ struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
+ struct xhci_hcd *xhci = ep->xhci;
unsigned long flags;
- ep = (struct xhci_virt_ep *) arg;
- xhci = ep->xhci;
-
spin_lock_irqsave(&xhci->lock, flags);
/* bail out if cmd completed but raced with stop ep watchdog timer.*/
@@ -1680,9 +1666,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(faked_port_index, &bus_state->resuming_ports);
+ /* Do the rest in GetPortStatus after resume time delay.
+ * Avoid polling roothub status before that so that a
+ * usb device auto-resume latency around ~40ms.
+ */
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
- /* Do the rest in GetPortStatus */
+ bogus_port_status = true;
}
}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 74436f8ca538..2c076ea80522 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NVIDIA Tegra xHCI host controller driver
*
* Copyright (C) 2014 NVIDIA Corporation
* Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#include <linux/clk.h>
@@ -771,7 +768,7 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
struct device *dev = tegra->dev;
const struct firmware *fw;
unsigned long timeout;
- time_t timestamp;
+ time64_t timestamp;
struct tm time;
u64 address;
u32 value;
@@ -877,7 +874,7 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
}
timestamp = le32_to_cpu(header->fwimg_created_time);
- time_to_tm(timestamp, 0, &time);
+ time64_to_tm(timestamp, 0, &time);
dev_info(dev, "Firmware timestamp: %ld-%02d-%02d %02d:%02d:%02d UTC\n",
time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
diff --git a/drivers/usb/host/xhci-trace.c b/drivers/usb/host/xhci-trace.c
index 367b630bdb3c..d0070814d1ea 100644
--- a/drivers/usb/host/xhci-trace.c
+++ b/drivers/usb/host/xhci-trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,10 +6,6 @@
*
* Author: Xenia Ragiadakou
* Email : burzalodowa@gmail.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index f20753b99624..a3b57c781db1 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,10 +6,6 @@
*
* Author: Xenia Ragiadakou
* Email : burzalodowa@gmail.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#undef TRACE_SYSTEM
@@ -388,6 +385,11 @@ DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
TP_ARGS(ctx)
);
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
DECLARE_EVENT_CLASS(xhci_log_ring,
TP_PROTO(struct xhci_ring *ring),
TP_ARGS(ring),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 51535ba2bcd4..327ba8b8a98b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
@@ -32,6 +20,7 @@
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-mtk.h"
+#include "xhci-debugfs.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -632,6 +621,9 @@ int xhci_run(struct usb_hcd *hcd)
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
+
+ xhci_debugfs_init(xhci);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_run);
@@ -660,6 +652,8 @@ static void xhci_stop(struct usb_hcd *hcd)
return;
}
+ xhci_debugfs_exit(xhci);
+
spin_lock_irq(&xhci->lock);
xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
@@ -1600,6 +1594,8 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+ xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
+
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
if (xhci->quirks & XHCI_MTK_HOST)
@@ -1723,6 +1719,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
+ xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
+
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
@@ -2555,6 +2553,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
unsigned long flags;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *slot_ctx;
if (!command)
return -EINVAL;
@@ -2593,6 +2592,9 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return -ENOMEM;
}
+ slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+ trace_xhci_configure_endpoint(slot_ctx);
+
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, command,
command->in_ctx->dma,
@@ -2773,6 +2775,7 @@ static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; i++) {
if (virt_dev->eps[i].new_ring) {
+ xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
virt_dev->eps[i].new_ring = NULL;
}
@@ -3488,6 +3491,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
}
if (ep->ring) {
+ xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
@@ -3520,11 +3524,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
int i, ret;
- struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
- if (!command)
- return;
+ xhci_debugfs_remove_slot(xhci, udev->slot_id);
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
@@ -3540,10 +3541,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
/* If the host is halted due to driver unload, we still need to free the
* device.
*/
- if (ret <= 0 && ret != -ENODEV) {
- kfree(command);
+ if (ret <= 0 && ret != -ENODEV)
return;
- }
virt_dev = xhci->devs[udev->slot_id];
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
@@ -3555,26 +3554,19 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
- xhci_disable_slot(xhci, command, udev->slot_id);
- /*
- * Event command completion handler will free any data structures
- * associated with the slot. XXX Can free sleep?
- */
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ if (ret)
+ xhci_free_virt_device(xhci, udev->slot_id);
}
-int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
- u32 slot_id)
+int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
{
+ struct xhci_command *command;
unsigned long flags;
u32 state;
int ret = 0;
- struct xhci_virt_device *virt_dev;
- virt_dev = xhci->devs[slot_id];
- if (!virt_dev)
- return -EINVAL;
- if (!command)
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -3583,17 +3575,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_free_virt_device(xhci, slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
kfree(command);
- return ret;
+ return -ENODEV;
}
ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
slot_id);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ kfree(command);
return ret;
}
xhci_ring_cmd_db(xhci);
@@ -3641,13 +3632,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
if (!command)
return 0;
- /* xhci->slot_id and xhci->addr_dev are not thread-safe */
- mutex_lock(&xhci->mutex);
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- mutex_unlock(&xhci->mutex);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
xhci_free_command(xhci, command);
return 0;
@@ -3657,7 +3645,6 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
wait_for_completion(command->completion);
slot_id = command->slot_id;
- mutex_unlock(&xhci->mutex);
if (!slot_id || command->status != COMP_SUCCESS) {
xhci_err(xhci, "Error while assigning device slot ID\n");
@@ -3668,6 +3655,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
return 0;
}
+ xhci_free_command(xhci, command);
+
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_reserve_host_control_ep_resources(xhci);
@@ -3694,6 +3683,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
udev->slot_id = slot_id;
+ xhci_debugfs_create_slot(xhci, slot_id);
+
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* If resetting upon resume, we can't put the controller into runtime
@@ -3703,18 +3694,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
pm_runtime_get_noresume(hcd->self.controller);
#endif
-
- xhci_free_command(xhci, command);
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
disable_slot:
- /* Disable slot, if we can do it without mem alloc */
- kfree(command->completion);
- command->completion = NULL;
- command->status = 0;
- return xhci_disable_slot(xhci, command, udev->slot_id);
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ if (ret)
+ xhci_free_virt_device(xhci, udev->slot_id);
+
+ return 0;
}
/*
@@ -3838,8 +3827,14 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
break;
case COMP_USB_TRANSACTION_ERROR:
dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
- ret = -EPROTO;
- break;
+
+ mutex_unlock(&xhci->mutex);
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ if (!ret)
+ xhci_alloc_dev(hcd, udev);
+ kfree(command->completion);
+ kfree(command);
+ return -EPROTO;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
dev_warn(&udev->dev,
"ERROR: Incompatible device for setup %s command\n", act);
@@ -4088,7 +4083,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
- if (enable) {
+ if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
/* Host supports BESL timeout instead of HIRD */
if (udev->usb2_hw_lpm_besl_capable) {
/* if device doesn't have a preferred BESL value use a
@@ -4287,6 +4282,7 @@ static unsigned long long xhci_calculate_intel_u1_timeout(
break;
}
/* Otherwise the calculation is the same as isoc eps */
+ /* fall through */
case USB_ENDPOINT_XFER_ISOC:
timeout_ns = xhci_service_interval_to_ns(desc);
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
@@ -5005,6 +5001,8 @@ static int __init xhci_hcd_init(void)
if (usb_disabled())
return -ENODEV;
+ xhci_debugfs_create_root();
+
return 0;
}
@@ -5012,7 +5010,10 @@ static int __init xhci_hcd_init(void)
* If an init function is provided, an exit function must also be provided
* to allow module unload.
*/
-static void __exit xhci_hcd_fini(void) { }
+static void __exit xhci_hcd_fini(void)
+{
+ xhci_debugfs_remove_root();
+}
module_init(xhci_hcd_init);
module_exit(xhci_hcd_fini);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2b48aa4f6b76..99a014a920d3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
@@ -6,19 +7,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_XHCI_HCD_H
@@ -131,6 +119,8 @@ struct xhci_cap_regs {
/* Extended Capabilities pointer from PCI base - section 5.3.6 */
#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
/* db_off bitmask - bits 0:1 reserved */
#define DBOFF_MASK (~0x3)
@@ -1007,6 +997,8 @@ struct xhci_virt_device {
struct xhci_tt_bw_info *tt_info;
/* The current max exit latency for the enabled USB3 link states. */
u16 current_mel;
+ /* Used for the debugfs interfaces. */
+ void *debugfs_private;
};
/*
@@ -1830,6 +1822,7 @@ struct xhci_hcd {
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
/* Reserved. It was XHCI_U2_DISABLE_WAKE */
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
+#define XHCI_HW_LPM_DISABLE (1 << 29)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -1859,6 +1852,10 @@ struct xhci_hcd {
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_slots;
+ struct list_head regset_list;
+
/* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64));
};
@@ -2012,8 +2009,7 @@ int xhci_run(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
-int xhci_disable_slot(struct xhci_hcd *xhci,
- struct xhci_command *command, u32 slot_id);
+int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
@@ -2068,7 +2064,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *td);
-void xhci_stop_endpoint_command_watchdog(unsigned long arg);
+void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
@@ -2107,6 +2103,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id);
+
static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
struct urb *urb)
{
@@ -2442,11 +2439,12 @@ static inline const char *xhci_decode_portsc(u32 portsc)
static char str[256];
int ret;
- ret = sprintf(str, "%s %s %s Link:%s ",
+ ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ",
portsc & PORT_POWER ? "Powered" : "Powered-off",
portsc & PORT_CONNECT ? "Connected" : "Not-connected",
portsc & PORT_PE ? "Enabled" : "Disabled",
- xhci_portsc_link_state_string(portsc));
+ xhci_portsc_link_state_string(portsc),
+ DEV_PORT_SPEED(portsc));
if (portsc & PORT_OC)
ret += sprintf(str + ret, "OverCurrent ");
diff --git a/drivers/usb/image/Makefile b/drivers/usb/image/Makefile
index 4148ae306352..8997c81ba86b 100644
--- a/drivers/usb/image/Makefile
+++ b/drivers/usb/image/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB Image drivers
#
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index e92540a21b6b..2388674042a9 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* copyright (C) 1999/2000 by Henning Zabel <henning@uni-paderborn.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
@@ -893,6 +880,7 @@ static ssize_t mdc800_device_write (struct file *file, const char __user *buf, s
return -EIO;
}
mdc800->pic_len=-1;
+ /* fall through */
case 0x09: /* Download Thumbnail */
mdc800->download_left=answersize+64;
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 0b21ba757bba..9f2f563c82ed 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Driver for Microtek Scanmaker X6 USB scanner, and possibly others.
*
* (C) Copyright 2000 John Fremlin <vii@penguinpowered.com>
diff --git a/drivers/usb/isp1760/Makefile b/drivers/usb/isp1760/Makefile
index 2b741074ad2b..9bb09e8290c4 100644
--- a/drivers/usb/isp1760/Makefile
+++ b/drivers/usb/isp1760/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
isp1760-y := isp1760-core.o isp1760-if.o
isp1760-$(CONFIG_USB_ISP1760_HCD) += isp1760-hcd.o
isp1760-$(CONFIG_USB_ISP1761_UDC) += isp1760-udc.o
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index bfa402cf3a27..05d22589b5cc 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
@@ -7,10 +8,6 @@
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/usb/isp1760/isp1760-core.h b/drivers/usb/isp1760/isp1760-core.h
index c70f8368a794..97cb4d7a3e1c 100644
--- a/drivers/usb/isp1760/isp1760-core.h
+++ b/drivers/usb/isp1760/isp1760-core.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
@@ -7,10 +8,6 @@
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _ISP1760_CORE_H_
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index bb4d6d959871..42672d6ec525 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1259,10 +1259,11 @@ leave:
#define SLOT_TIMEOUT 300
#define SLOT_CHECK_PERIOD 200
static struct timer_list errata2_timer;
+static struct usb_hcd *errata2_timer_hcd;
-static void errata2_function(unsigned long data)
+static void errata2_function(struct timer_list *unused)
{
- struct usb_hcd *hcd = (struct usb_hcd *) data;
+ struct usb_hcd *hcd = errata2_timer_hcd;
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int slot;
struct ptd ptd;
@@ -1334,7 +1335,8 @@ static int isp1760_run(struct usb_hcd *hcd)
if (retval)
return retval;
- setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd);
+ errata2_timer_hcd = hcd;
+ timer_setup(&errata2_timer, errata2_function, 0);
errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
add_timer(&errata2_timer);
diff --git a/drivers/usb/isp1760/isp1760-regs.h b/drivers/usb/isp1760/isp1760-regs.h
index b67095c9a9d4..1f00c3850cf7 100644
--- a/drivers/usb/isp1760/isp1760-regs.h
+++ b/drivers/usb/isp1760/isp1760-regs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
@@ -7,10 +8,6 @@
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _ISP1760_REGS_H_
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 69400f3da886..bac4ef5d9512 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1761 device controller
*
@@ -5,10 +6,6 @@
*
* Contacts:
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
@@ -1331,9 +1328,9 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
return status ? IRQ_HANDLED : IRQ_NONE;
}
-static void isp1760_udc_vbus_poll(unsigned long data)
+static void isp1760_udc_vbus_poll(struct timer_list *t)
{
- struct isp1760_udc *udc = (struct isp1760_udc *)data;
+ struct isp1760_udc *udc = from_timer(udc, t, vbus_timer);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
@@ -1452,8 +1449,7 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq,
udc->regs = isp->regs;
spin_lock_init(&udc->lock);
- setup_timer(&udc->vbus_timer, isp1760_udc_vbus_poll,
- (unsigned long)udc);
+ timer_setup(&udc->vbus_timer, isp1760_udc_vbus_poll, 0);
ret = isp1760_udc_init(udc);
if (ret < 0)
diff --git a/drivers/usb/isp1760/isp1760-udc.h b/drivers/usb/isp1760/isp1760-udc.h
index 26899ed81145..2d0b88747701 100644
--- a/drivers/usb/isp1760/isp1760-udc.h
+++ b/drivers/usb/isp1760/isp1760-udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1761 device controller
*
@@ -5,10 +6,6 @@
*
* Contacts:
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _ISP1760_UDC_H_
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 0f9f25db9163..68d2f2cd17dd 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -238,8 +238,8 @@ config USB_HUB_USB251XB
depends on I2C
help
This option enables support for configuration via SMBus of the
- Microchip USB251xB/xBi USB 2.0 Hub Controller series.
- Configuration parameters may be set in devicetree or platform data.
+ Microchip USB251x/xBi USB 2.0 Hub Controller series. Configuration
+ parameters may be set in devicetree or platform data.
Say Y or M here if you need to configure such a device via SMBus.
config USB_HSIC_USB3503
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 1c0ada75c35d..4b8712733fc7 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* adutux - driver for ADU devices from Ontrak Control Systems
* This is an experimental driver. Use at your own risk.
@@ -5,11 +6,6 @@
*
* Copyright (c) 2003 John Homppi (SCO, leave this notice here)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
* derived from the Lego USB Tower driver 0.56:
* Copyright (c) 2003 David Glance <davidgsf@sourceforge.net>
* 2001 Juergen Stuber <stuber@loria.fr>
@@ -761,13 +757,11 @@ error:
static void adu_disconnect(struct usb_interface *interface)
{
struct adu_device *dev;
- int minor;
dev = usb_get_intfdata(interface);
mutex_lock(&dev->mtx); /* not interruptible */
dev->udev = NULL; /* poison */
- minor = dev->minor;
usb_deregister_dev(interface, &adu_class);
mutex_unlock(&dev->mtx);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 8efdc500e790..b3eb8b989bd4 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Apple Cinema Display driver
*
* Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
*
* Thanks to Caskey L. Dickson for his work with acdctl.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index abec6e604a62..b6a1b9331aa0 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* chaoskey - driver for ChaosKey device from Altus Metrum.
*
@@ -11,15 +12,6 @@
* bit stream.
*
* Copyright © 2015 Keith Packard <keithp@keithp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 5c93a888c40e..bf4d2778907e 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cypress_cy7c63.c
*
@@ -23,10 +24,6 @@
*
* For up-to-date information please visit:
* http://www.obock.de/kernel/cypress
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation, version 2.
*/
#include <linux/module.h>
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 63207c42acf6..66be86077292 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* -*- linux-c -*-
* Cypress USB Thermometer driver
*
@@ -6,11 +7,6 @@
* This driver works with Elektor magazine USB Interface as published in
* issue #291. It should also work with the original starter kit/demo board
* from Cypress.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
*/
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
index c31b4a33e6bb..7895d61e733b 100644
--- a/drivers/usb/misc/ehset.c
+++ b/drivers/usb/misc/ehset.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 8950fa5e973d..24d841850e05 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Emagic EMI 2|6 usb audio interface firmware loader.
* Copyright (C) 2002
* Tapio Laxström (tapio.laxstrom@iptime.fi)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, as published by
- * the Free Software Foundation, version 2.
- *
* emi26.c,v 1.13 2002/03/08 13:10:26 tapio Exp
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 1d9be4431b72..3eea60437f56 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Emagic EMI 2|6 usb audio interface firmware loader.
* Copyright (C) 2002
* Tapio Laxström (tapio.laxstrom@iptime.fi)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, as published by
- * the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/usb/misc/ezusb.c b/drivers/usb/misc/ezusb.c
index 837208f14f86..f058d8029761 100644
--- a/drivers/usb/misc/ezusb.c
+++ b/drivers/usb/misc/ezusb.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EZ-USB specific functions used by some of the USB to Serial drivers.
*
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 424ff12f3b51..76c718ac8c78 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB FTDI client driver for Elan Digital Systems's Uxxx adapters
*
@@ -7,11 +8,6 @@
* Author and Maintainer - Tony Olech - Elan Digital Systems
* tony.olech@elandigitalsystems.com
*
- * This program is free software;you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- *
* This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
* based on various USB client drivers in the 2.6.15 linux kernel
* with constant reference to the 3rd Edition of Linux Device Drivers
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 39d8fedfaf3b..20b0f91a5d9b 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Siemens ID Mouse driver v0.6
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2 of
- the License, or (at your option) any later version.
-
Copyright (C) 2004-5 by Florian 'Floe' Echtler <echtler@fs.tum.de>
and Andreas 'ad' Deresch <aderesch@fs.tum.de>
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index be5881303681..ad3109490c0f 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Native support for the I/O-Warrior USB devices
*
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index 1c61830e96f9..4d30095d6ad2 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for loading USB isight firmware
*
* Copyright (C) 2008 Matthew Garrett <mjg@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
* The USB isight cameras in recent Apples are roughly compatible with the USB
* video class specification, and can be driven by uvcvideo. However, they
* need firmware to be loaded beforehand. After firmware loading, the device
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 680bddb3ce05..5c1a3b852453 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/**
* Generic USB driver for report based interrupt in/out devices
* like LD Didactic's USB devices. LD Didactic's USB devices are
@@ -12,11 +13,6 @@
*
* Copyright (C) 2005 Michael Hund <mhund@ld-didactic.de>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
* Derived from Lego USB Tower driver
* Copyright (C) 2003 David Glance <advidgsf@sourceforge.net>
* 2001-2004 Juergen Stuber <starblue@users.sourceforge.net>
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 5628f678ab59..c5be6e9e24a5 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* LEGO USB Tower driver
*
* Copyright (C) 2003 David Glance <davidgsf@sourceforge.net>
* 2001-2004 Juergen Stuber <starblue@users.sourceforge.net>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
* derived from USB Skeleton driver - 0.5
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
*
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index ddddd6387f66..e5c03c6d16e9 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/misc/lvstest.c
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2014 ST Microelectronics
* Pratyush Anand <pratyush.anand@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index ddfebb144aaa..7b9adeb3e7aa 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* -*- linux-c -*- */
/*
@@ -6,20 +7,6 @@
* Cesar Miquel (miquel@df.uba.ar)
*
* based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
*
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
index 359abc98e706..6db7a5863496 100644
--- a/drivers/usb/misc/rio500_usb.h
+++ b/drivers/usb/misc/rio500_usb.h
@@ -1,25 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* ----------------------------------------------------------------------
-
Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
---------------------------------------------------------------------- */
-
-
#define RIO_SEND_COMMAND 0x1
#define RIO_RECV_COMMAND 0x2
diff --git a/drivers/usb/misc/sisusbvga/Makefile b/drivers/usb/misc/sisusbvga/Makefile
index 3142476ccc8e..6ed3a638261a 100644
--- a/drivers/usb/misc/sisusbvga/Makefile
+++ b/drivers/usb/misc/sisusbvga/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the sisusb driver (if driver is inside kernel tree).
#
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 30774e0aeadd..3e65bdc2615c 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h
index 55492a5930bd..20f03ad0ea16 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.h
+++ b/drivers/usb/misc/sisusbvga/sisusb.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for Net2280/SiS315 based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index f019d80ca9e4..73f7bde78e11 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index bf0032ca35ed..6a30e8bd9221 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index e79a616f0d26..1782c759c4ad 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* $XFree86$ */
/* $XdotOrg$ */
/*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_struct.h b/drivers/usb/misc/sisusbvga/sisusb_struct.h
index 1c4240e802c1..706d77090e00 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_struct.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_struct.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* General structure definitions for universal mode switching modules
*
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 1862ed15ce28..d83af2a332e4 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PlayStation 2 Trance Vibrator driver
*
* Copyright (C) 2006 Sam Hocevar <sam@zoy.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Standard include files */
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 135c91c434bf..a6efb9a72939 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Microchip USB251xB USB 2.0 Hi-Speed Hub Controller
* Configuration via SMBus.
@@ -7,25 +8,14 @@
* This work is based on the USB3503 driver by Dongjin Kim and
* a not-accepted patch by Fabien Lahoudere, see:
* https://patchwork.kernel.org/patch/9257715/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
/* Internal Register Set Addresses & Default Values acc. to DS00001692C */
@@ -38,6 +28,7 @@
#define USB251XB_DEF_PRODUCT_ID_12 0x2512 /* USB2512B/12Bi */
#define USB251XB_DEF_PRODUCT_ID_13 0x2513 /* USB2513B/13Bi */
#define USB251XB_DEF_PRODUCT_ID_14 0x2514 /* USB2514B/14Bi */
+#define USB251XB_DEF_PRODUCT_ID_17 0x2517 /* USB2517/17i */
#define USB251XB_ADDR_DEVICE_ID_LSB 0x04
#define USB251XB_ADDR_DEVICE_ID_MSB 0x05
@@ -82,7 +73,7 @@
#define USB251XB_ADDR_PRODUCT_STRING_LEN 0x14
#define USB251XB_ADDR_PRODUCT_STRING 0x54
-#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi"
+#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi/7i"
#define USB251XB_ADDR_SERIAL_STRING_LEN 0x15
#define USB251XB_ADDR_SERIAL_STRING 0x92
@@ -93,8 +84,10 @@
#define USB251XB_ADDR_BOOST_UP 0xF6
#define USB251XB_DEF_BOOST_UP 0x00
-#define USB251XB_ADDR_BOOST_X 0xF8
-#define USB251XB_DEF_BOOST_X 0x00
+#define USB251XB_ADDR_BOOST_57 0xF7
+#define USB251XB_DEF_BOOST_57 0x00
+#define USB251XB_ADDR_BOOST_14 0xF8
+#define USB251XB_DEF_BOOST_14 0x00
#define USB251XB_ADDR_PORT_SWAP 0xFA
#define USB251XB_DEF_PORT_SWAP 0x00
@@ -102,7 +95,11 @@
#define USB251XB_ADDR_PORT_MAP_12 0xFB
#define USB251XB_DEF_PORT_MAP_12 0x00
#define USB251XB_ADDR_PORT_MAP_34 0xFC
-#define USB251XB_DEF_PORT_MAP_34 0x00 /* USB2513B/i & USB2514B/i only */
+#define USB251XB_DEF_PORT_MAP_34 0x00 /* USB251{3B/i,4B/i,7/i} only */
+#define USB251XB_ADDR_PORT_MAP_56 0xFD
+#define USB251XB_DEF_PORT_MAP_56 0x00 /* USB2517/i only */
+#define USB251XB_ADDR_PORT_MAP_7 0xFE
+#define USB251XB_DEF_PORT_MAP_7 0x00 /* USB2517/i only */
#define USB251XB_ADDR_STATUS_COMMAND 0xFF
#define USB251XB_STATUS_COMMAND_SMBUS_DOWN 0x04
@@ -119,7 +116,7 @@ struct usb251xb {
struct device *dev;
struct i2c_client *i2c;
u8 skip_config;
- int gpio_reset;
+ struct gpio_desc *gpio_reset;
u16 vendor_id;
u16 product_id;
u16 device_id;
@@ -143,57 +140,97 @@ struct usb251xb {
char serial[USB251XB_STRING_BUFSIZE];
u8 bat_charge_en;
u8 boost_up;
- u8 boost_x;
+ u8 boost_57;
+ u8 boost_14;
u8 port_swap;
u8 port_map12;
u8 port_map34;
+ u8 port_map56;
+ u8 port_map7;
u8 status;
};
struct usb251xb_data {
u16 product_id;
+ u8 port_cnt;
+ bool led_support;
+ bool bat_support;
char product_str[USB251XB_STRING_BUFSIZE / 2]; /* ASCII string */
};
static const struct usb251xb_data usb2512b_data = {
.product_id = 0x2512,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2512B",
};
static const struct usb251xb_data usb2512bi_data = {
.product_id = 0x2512,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2512Bi",
};
static const struct usb251xb_data usb2513b_data = {
.product_id = 0x2513,
+ .port_cnt = 3,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2513B",
};
static const struct usb251xb_data usb2513bi_data = {
.product_id = 0x2513,
+ .port_cnt = 3,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2513Bi",
};
static const struct usb251xb_data usb2514b_data = {
.product_id = 0x2514,
+ .port_cnt = 4,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2514B",
};
static const struct usb251xb_data usb2514bi_data = {
.product_id = 0x2514,
+ .port_cnt = 4,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2514Bi",
};
+static const struct usb251xb_data usb2517_data = {
+ .product_id = 0x2517,
+ .port_cnt = 7,
+ .led_support = true,
+ .bat_support = false,
+ .product_str = "USB2517",
+};
+
+static const struct usb251xb_data usb2517i_data = {
+ .product_id = 0x2517,
+ .port_cnt = 7,
+ .led_support = true,
+ .bat_support = false,
+ .product_str = "USB2517i",
+};
+
static void usb251xb_reset(struct usb251xb *hub, int state)
{
- if (!gpio_is_valid(hub->gpio_reset))
+ if (!hub->gpio_reset)
return;
- gpio_set_value_cansleep(hub->gpio_reset, state);
+ gpiod_set_value_cansleep(hub->gpio_reset, state);
/* wait for hub recovery/stabilization */
- if (state)
+ if (!state)
usleep_range(500, 750); /* >=500us at power on */
else
usleep_range(1, 10); /* >=1us at power down */
@@ -212,7 +249,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[0] = 0x01;
i2c_wb[1] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 1);
+ usb251xb_reset(hub, 0);
err = i2c_smbus_write_i2c_block_data(hub->i2c,
USB251XB_ADDR_STATUS_COMMAND, 2, i2c_wb);
@@ -253,13 +290,16 @@ static int usb251xb_connect(struct usb251xb *hub)
USB251XB_STRING_BUFSIZE);
i2c_wb[USB251XB_ADDR_BATTERY_CHARGING_ENABLE] = hub->bat_charge_en;
i2c_wb[USB251XB_ADDR_BOOST_UP] = hub->boost_up;
- i2c_wb[USB251XB_ADDR_BOOST_X] = hub->boost_x;
+ i2c_wb[USB251XB_ADDR_BOOST_57] = hub->boost_57;
+ i2c_wb[USB251XB_ADDR_BOOST_14] = hub->boost_14;
i2c_wb[USB251XB_ADDR_PORT_SWAP] = hub->port_swap;
i2c_wb[USB251XB_ADDR_PORT_MAP_12] = hub->port_map12;
i2c_wb[USB251XB_ADDR_PORT_MAP_34] = hub->port_map34;
+ i2c_wb[USB251XB_ADDR_PORT_MAP_56] = hub->port_map56;
+ i2c_wb[USB251XB_ADDR_PORT_MAP_7] = hub->port_map7;
i2c_wb[USB251XB_ADDR_STATUS_COMMAND] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 1);
+ usb251xb_reset(hub, 0);
/* write registers */
for (i = 0; i < (USB251XB_I2C_REG_SZ / USB251XB_I2C_WRITE_SZ); i++) {
@@ -297,7 +337,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
int len, err, i;
- u32 *property_u32 = NULL;
+ u32 property_u32 = 0;
const u32 *cproperty_u32;
const char *cproperty_char;
char str[USB251XB_STRING_BUFSIZE / 2];
@@ -312,19 +352,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
else
hub->skip_config = 0;
- hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
- if (hub->gpio_reset == -EPROBE_DEFER)
+ hub->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (PTR_ERR(hub->gpio_reset) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
- if (gpio_is_valid(hub->gpio_reset)) {
- err = devm_gpio_request_one(dev, hub->gpio_reset,
- GPIOF_OUT_INIT_LOW,
- "usb251xb reset");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as reset pin (%d)\n",
- hub->gpio_reset, err);
- return err;
- }
+ } else if (IS_ERR(hub->gpio_reset)) {
+ err = PTR_ERR(hub->gpio_reset);
+ dev_err(dev, "unable to request GPIO reset pin (%d)\n", err);
+ return err;
}
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
@@ -374,16 +408,16 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_get_property(np, "dynamic-power-switching", NULL))
hub->conf_data2 |= BIT(7);
- if (!of_property_read_u32(np, "oc-delay-us", property_u32)) {
- if (*property_u32 == 100) {
+ if (!of_property_read_u32(np, "oc-delay-us", &property_u32)) {
+ if (property_u32 == 100) {
/* 100 us*/
hub->conf_data2 &= ~BIT(5);
hub->conf_data2 &= ~BIT(4);
- } else if (*property_u32 == 4000) {
+ } else if (property_u32 == 4000) {
/* 4 ms */
hub->conf_data2 &= ~BIT(5);
hub->conf_data2 |= BIT(4);
- } else if (*property_u32 == 16000) {
+ } else if (property_u32 == 16000) {
/* 16 ms */
hub->conf_data2 |= BIT(5);
hub->conf_data2 |= BIT(4);
@@ -401,6 +435,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_get_property(np, "port-mapping-mode", NULL))
hub->conf_data3 |= BIT(3);
+ if (data->led_support && of_get_property(np, "led-usb-mode", NULL))
+ hub->conf_data3 &= ~BIT(1);
+
if (of_get_property(np, "string-support", NULL))
hub->conf_data3 |= BIT(0);
@@ -410,8 +447,11 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
for (i = 0; i < len / sizeof(u32); i++) {
u32 port = be32_to_cpu(cproperty_u32[i]);
- if ((port >= 1) && (port <= 4))
+ if ((port >= 1) && (port <= data->port_cnt))
hub->non_rem_dev |= BIT(port);
+ else
+ dev_warn(dev, "NRD port %u doesn't exist\n",
+ port);
}
}
@@ -421,8 +461,11 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
for (i = 0; i < len / sizeof(u32); i++) {
u32 port = be32_to_cpu(cproperty_u32[i]);
- if ((port >= 1) && (port <= 4))
+ if ((port >= 1) && (port <= data->port_cnt))
hub->port_disable_sp |= BIT(port);
+ else
+ dev_warn(dev, "PDS port %u doesn't exist\n",
+ port);
}
}
@@ -432,14 +475,37 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
for (i = 0; i < len / sizeof(u32); i++) {
u32 port = be32_to_cpu(cproperty_u32[i]);
- if ((port >= 1) && (port <= 4))
+ if ((port >= 1) && (port <= data->port_cnt))
hub->port_disable_bp |= BIT(port);
+ else
+ dev_warn(dev, "PDB port %u doesn't exist\n",
+ port);
}
}
+ hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
+ if (!of_property_read_u32(np, "sp-max-total-current-microamp",
+ &property_u32))
+ hub->max_power_sp = min_t(u8, property_u32 / 2000, 50);
+
+ hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
+ if (!of_property_read_u32(np, "bp-max-total-current-microamp",
+ &property_u32))
+ hub->max_power_bp = min_t(u8, property_u32 / 2000, 255);
+
+ hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
+ if (!of_property_read_u32(np, "sp-max-removable-current-microamp",
+ &property_u32))
+ hub->max_current_sp = min_t(u8, property_u32 / 2000, 50);
+
+ hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
+ if (!of_property_read_u32(np, "bp-max-removable-current-microamp",
+ &property_u32))
+ hub->max_current_bp = min_t(u8, property_u32 / 2000, 255);
+
hub->power_on_time = USB251XB_DEF_POWER_ON_TIME;
- if (!of_property_read_u32(np, "power-on-time-ms", property_u32))
- hub->power_on_time = min_t(u8, *property_u32 / 2, 255);
+ if (!of_property_read_u32(np, "power-on-time-ms", &property_u32))
+ hub->power_on_time = min_t(u8, property_u32 / 2, 255);
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
@@ -476,16 +542,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
*/
- hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
- hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
- hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
- hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
hub->boost_up = USB251XB_DEF_BOOST_UP;
- hub->boost_x = USB251XB_DEF_BOOST_X;
+ hub->boost_57 = USB251XB_DEF_BOOST_57;
+ hub->boost_14 = USB251XB_DEF_BOOST_14;
hub->port_swap = USB251XB_DEF_PORT_SWAP;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
hub->port_map34 = USB251XB_DEF_PORT_MAP_34;
+ hub->port_map56 = USB251XB_DEF_PORT_MAP_56;
+ hub->port_map7 = USB251XB_DEF_PORT_MAP_7;
return 0;
}
@@ -510,6 +575,12 @@ static const struct of_device_id usb251xb_of_match[] = {
.compatible = "microchip,usb2514bi",
.data = &usb2514bi_data,
}, {
+ .compatible = "microchip,usb2517",
+ .data = &usb2517_data,
+ }, {
+ .compatible = "microchip,usb2517i",
+ .data = &usb2517i_data,
+ }, {
/* sentinel */
}
};
@@ -573,6 +644,8 @@ static const struct i2c_device_id usb251xb_id[] = {
{ "usb2513bi", 0 },
{ "usb2514b", 0 },
{ "usb2514bi", 0 },
+ { "usb2517", 0 },
+ { "usb2517i", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, usb251xb_id);
@@ -589,5 +662,5 @@ static struct i2c_driver usb251xb_i2c_driver = {
module_i2c_driver(usb251xb_i2c_driver);
MODULE_AUTHOR("Richard Leitner <richard.leitner@skidata.com>");
-MODULE_DESCRIPTION("USB251xB/xBi USB 2.0 Hub Controller Driver");
+MODULE_DESCRIPTION("USB251x/xBi USB 2.0 Hub Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 8e7737d7ac0a..465dbf68b463 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SMSC USB3503 USB 2.0 hub controller driver
*
* Copyright (c) 2012-2013 Dongjin Kim (tobetter@gmail.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/clk.h>
diff --git a/drivers/usb/misc/usb4604.c b/drivers/usb/misc/usb4604.c
index e9f37fb746ac..1b4de651e697 100644
--- a/drivers/usb/misc/usb4604.c
+++ b/drivers/usb/misc/usb4604.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SMSC USB4604 USB HSIC 4-port 2.0 hub controller driver
* Based on usb3503 driver
*
* Copyright (c) 2012-2013 Dongjin Kim (tobetter@gmail.com)
* Copyright (c) 2016 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/i2c.h>
diff --git a/drivers/usb/misc/usb_u132.h b/drivers/usb/misc/usb_u132.h
index dc2e5a31caec..4bf77736914f 100644
--- a/drivers/usb/misc/usb_u132.h
+++ b/drivers/usb/misc/usb_u132.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Common Header File for the Elan Digital Systems U132 adapter
* this file should be included by both the "ftdi-u132" and
@@ -9,11 +10,6 @@
* Author and Maintainer - Tony Olech - Elan Digital Systems
*(tony.olech@elandigitalsystems.com)
*
-* This program is free software;you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation, version 2.
-*
-*
* The driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
* based on various USB client drivers in the 2.6.15 linux kernel
* with constant reference to the 3rd Edition of Linux Device Drivers
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 0f5ad896c7e3..9ba4a4e68d91 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*****************************************************************************
* USBLCD Kernel Driver *
* Version 1.05 *
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 3f6a28045b53..12f7e94695a2 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB 7 Segment Driver
*
* Copyright (C) 2008 Harrison Metzger <harrisonmetz@gmail.com>
* Based on usbled.c by Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5ad74750e8a4..aedc9a7f149e 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -1023,7 +1024,7 @@ static int ch9_postconfig(struct usbtest_dev *dev)
/* FIXME fetch strings from at least the device descriptor */
/* [9.4.5] get_status always works */
- retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
+ retval = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
if (retval) {
dev_err(&iface->dev, "get dev status --> %d\n", retval);
return retval;
@@ -1033,7 +1034,7 @@ static int ch9_postconfig(struct usbtest_dev *dev)
* the device's remote wakeup feature ... if we can, test that here
*/
- retval = usb_get_status(udev, USB_RECIP_INTERFACE,
+ retval = usb_get_std_status(udev, USB_RECIP_INTERFACE,
iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
if (retval) {
dev_err(&iface->dev, "get interface status --> %d\n", retval);
@@ -1622,7 +1623,7 @@ static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
u16 status;
/* shouldn't look or act halted */
- retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
+ retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
ep, retval);
@@ -1644,7 +1645,7 @@ static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
u16 status;
/* should look and act halted */
- retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
+ retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
ep, retval);
@@ -1917,7 +1918,7 @@ static struct urb *iso_alloc_urb(
if (bytes < 0 || !desc)
return NULL;
- maxp = 0x7ff & usb_endpoint_maxp(desc);
+ maxp = usb_endpoint_maxp(desc);
maxp *= usb_endpoint_maxp_mult(desc);
packets = DIV_ROUND_UP(bytes, maxp);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 8a13b2fcf3e1..263c97fec708 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
@@ -6,20 +7,6 @@
* Copyright (C) 1999, 2005, 2010
* Thomas Sailer (t.sailer@alumni.ethz.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Based on parport_pc.c
*
* History:
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 58abdf28620a..8abb6cbbd98a 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Meywa-Denki & KAYAC YUREX
*
* Copyright (C) 2010 Tomoki Sekiyama (tomoki.sekiyama@gmail.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 8ed24ab08698..09f43e89633c 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB monitor
#
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 46847340b819..9812d102a005 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index b26fffc58446..3c888d942a9f 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3.h - MediaTek USB3 DRD header
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __MTU3_H__
@@ -46,6 +37,9 @@ struct mtu3_request;
#define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10))
#define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQHIAR(epnum) (U3D_TXQHIAR1 + (((epnum) - 1) * 0x4))
+#define USB_QMU_RQHIAR(epnum) (U3D_RXQHIAR1 + (((epnum) - 1) * 0x4))
+
#define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10))
#define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10))
#define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10))
@@ -91,6 +85,7 @@ enum mtu3_speed {
MTU3_SPEED_FULL = 1,
MTU3_SPEED_HIGH = 3,
MTU3_SPEED_SUPER = 4,
+ MTU3_SPEED_SUPER_PLUS = 5,
};
/**
@@ -112,6 +107,19 @@ enum mtu3_g_ep0_state {
};
/**
+ * MTU3_DR_FORCE_NONE: automatically switch host and periperal mode
+ * by IDPIN signal.
+ * MTU3_DR_FORCE_HOST: force to enter host mode and override OTG
+ * IDPIN signal.
+ * MTU3_DR_FORCE_DEVICE: force to enter peripheral mode.
+ */
+enum mtu3_dr_force_mode {
+ MTU3_DR_FORCE_NONE = 0,
+ MTU3_DR_FORCE_HOST,
+ MTU3_DR_FORCE_DEVICE,
+};
+
+/**
* @base: the base address of fifo
* @limit: the bitmap size in bits
* @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT
@@ -138,23 +146,33 @@ struct mtu3_fifo_info {
* Checksum value is calculated over the 16 bytes of the GPD by default;
* @data_buf_len (RX ONLY): This value indicates the length of
* the assigned data buffer
+ * @tx_ext_addr (TX ONLY): [3:0] are 4 extension bits of @buffer,
+ * [7:4] are 4 extension bits of @next_gpd
* @next_gpd: Physical address of the next GPD
* @buffer: Physical address of the data buffer
* @buf_len:
* (TX): This value indicates the length of the assigned data buffer
* (RX): The total length of data received
* @ext_len: reserved
+ * @rx_ext_addr(RX ONLY): [3:0] are 4 extension bits of @buffer,
+ * [7:4] are 4 extension bits of @next_gpd
* @ext_flag:
* bit5 (TX ONLY): Zero Length Packet (ZLP),
*/
struct qmu_gpd {
__u8 flag;
__u8 chksum;
- __le16 data_buf_len;
+ union {
+ __le16 data_buf_len;
+ __le16 tx_ext_addr;
+ };
__le32 next_gpd;
__le32 buffer;
__le16 buf_len;
- __u8 ext_len;
+ union {
+ __u8 ext_len;
+ __u8 rx_ext_addr;
+ };
__u8 ext_flag;
} __packed;
@@ -183,7 +201,6 @@ struct mtu3_gpd_ring {
* xHCI driver initialization, it's necessary for system bootup
* as device.
* @is_u3_drd: whether port0 supports usb3.0 dual-role device or not
-* @id_*: used to maually switch between host and device modes by idpin
* @manual_drd_enabled: it's true when supports dual-role device by debugfs
* to switch host/device modes depending on user input.
*/
@@ -194,10 +211,6 @@ struct otg_switch_mtk {
struct notifier_block id_nb;
struct delayed_work extcon_reg_dwork;
bool is_u3_drd;
- /* dual-role switch by debugfs */
- struct pinctrl *id_pinctrl;
- struct pinctrl_state *id_float;
- struct pinctrl_state *id_ground;
bool manual_drd_enabled;
};
@@ -206,14 +219,17 @@ struct otg_switch_mtk {
* @ippc_base: register base address of IP Power and Clock interface (IPPC)
* @vusb33: usb3.3V shared by device/host IP
* @sys_clk: system clock of mtu3, shared by device/host IP
+ * @ref_clk: reference clock
+ * @mcu_clk: mcu_bus_ck clock for AHB bus etc
+ * @dma_clk: dma_bus_ck clock for AXI bus etc
* @dr_mode: works in which mode:
* host only, device only or dual-role mode
* @u2_ports: number of usb2.0 host ports
* @u3_ports: number of usb3.0 host ports
+ * @u3p_dis_msk: mask of disabling usb3 ports, for example, bit0==1 to
+ * disable u3port0, bit1==1 to disable u3port1,... etc
* @dbgfs_root: only used when supports manual dual-role switch via debugfs
* @wakeup_en: it's true when supports remote wakeup in host mode
- * @wk_deb_p0: port0's wakeup debounce clock
- * @wk_deb_p1: it's optional, and depends on port1 is supported or not
*/
struct ssusb_mtk {
struct device *dev;
@@ -226,17 +242,18 @@ struct ssusb_mtk {
struct regulator *vusb33;
struct clk *sys_clk;
struct clk *ref_clk;
+ struct clk *mcu_clk;
+ struct clk *dma_clk;
/* otg */
struct otg_switch_mtk otg_switch;
enum usb_dr_mode dr_mode;
bool is_host;
int u2_ports;
int u3_ports;
+ int u3p_dis_msk;
struct dentry *dbgfs_root;
/* usb wakeup for host mode */
bool wakeup_en;
- struct clk *wk_deb_p0;
- struct clk *wk_deb_p1;
struct regmap *pericfg;
};
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index 99c65b0788ff..b1b99a8f6a7a 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_core.c - hardware access layer and gadget init/exit of
* MediaTek usb3 Dual-Role Controller Driver
@@ -5,18 +6,9 @@
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -114,7 +106,9 @@ static int mtu3_device_enable(struct mtu3 *mtu)
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0),
(SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
SSUSB_U2_PORT_HOST_SEL));
- mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
return ssusb_check_clocks(mtu->ssusb, check_clk);
}
@@ -129,7 +123,10 @@ static void mtu3_device_disable(struct mtu3 *mtu)
mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
- mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
@@ -236,7 +233,7 @@ void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
{
- if (mtu->is_u3_ip && (mtu->max_speed == USB_SPEED_SUPER))
+ if (mtu->is_u3_ip && mtu->max_speed >= USB_SPEED_SUPER)
mtu3_ss_func_set(mtu, is_on);
else
mtu3_hs_softconn_set(mtu, is_on);
@@ -546,6 +543,9 @@ static void mtu3_set_speed(struct mtu3 *mtu)
mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
/* HS/FS detected by HW */
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ } else if (mtu->max_speed == USB_SPEED_SUPER) {
+ mtu3_clrbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
+ SSUSB_U3_PORT_SSP_SPEED);
}
dev_info(mtu->dev, "max_speed: %s\n",
@@ -623,6 +623,10 @@ static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
udev_speed = USB_SPEED_SUPER;
maxpkt = 512;
break;
+ case MTU3_SPEED_SUPER_PLUS:
+ udev_speed = USB_SPEED_SUPER_PLUS;
+ maxpkt = 512;
+ break;
default:
udev_speed = USB_SPEED_UNKNOWN;
break;
@@ -759,7 +763,31 @@ static void mtu3_hw_exit(struct mtu3 *mtu)
mtu3_mem_free(mtu);
}
-/*-------------------------------------------------------------------------*/
+/**
+ * we set 32-bit DMA mask by default, here check whether the controller
+ * supports 36-bit DMA or not, if it does, set 36-bit DMA mask.
+ */
+static int mtu3_set_dma_mask(struct mtu3 *mtu)
+{
+ struct device *dev = mtu->dev;
+ bool is_36bit = false;
+ int ret = 0;
+ u32 value;
+
+ value = mtu3_readl(mtu->mac_base, U3D_MISC_CTRL);
+ if (value & DMA_ADDR_36BIT) {
+ is_36bit = true;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ /* If set 36-bit DMA mask fails, fall back to 32-bit DMA mask */
+ if (ret) {
+ is_36bit = false;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ }
+ }
+ dev_info(dev, "dma mask: %s bits\n", is_36bit ? "36" : "32");
+
+ return ret;
+}
int ssusb_gadget_init(struct ssusb_mtk *ssusb)
{
@@ -774,9 +802,9 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
return -ENOMEM;
mtu->irq = platform_get_irq(pdev, 0);
- if (mtu->irq <= 0) {
+ if (mtu->irq < 0) {
dev_err(dev, "fail to get irq number\n");
- return -ENODEV;
+ return mtu->irq;
}
dev_info(dev, "irq %d\n", mtu->irq);
@@ -800,14 +828,15 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
break;
default:
dev_err(dev, "invalid max_speed: %s\n",
usb_speed_string(mtu->max_speed));
/* fall through */
case USB_SPEED_UNKNOWN:
- /* default as SS */
- mtu->max_speed = USB_SPEED_SUPER;
+ /* default as SSP */
+ mtu->max_speed = USB_SPEED_SUPER_PLUS;
break;
}
@@ -820,6 +849,12 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
return ret;
}
+ ret = mtu3_set_dma_mask(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 set dma_mask failed:%d\n", ret);
+ goto dma_mask_err;
+ }
+
ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu);
if (ret) {
dev_err(dev, "request irq %d failed!\n", mtu->irq);
@@ -845,6 +880,7 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
gadget_err:
device_init_wakeup(dev, false);
+dma_mask_err:
irq_err:
mtu3_hw_exit(mtu);
ssusb->u3d = NULL;
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 560256115b23..db7562d99b95 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.c - dual role switch and host glue layer
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/debugfs.h>
@@ -261,21 +252,22 @@ static void extcon_register_dwork(struct work_struct *work)
* depending on user input.
* This is useful in special cases, such as uses TYPE-A receptacle but also
* wants to support dual-role mode.
- * It generates cable state changes by pulling up/down IDPIN and
- * notifies driver to switch mode by "extcon-usb-gpio".
- * NOTE: when use MICRO receptacle, should not enable this interface.
*/
static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- if (to_host)
- pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_ground);
- else
- pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_float);
+ if (to_host) {
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+ } else {
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ }
}
-
static int ssusb_mode_show(struct seq_file *sf, void *unused)
{
struct ssusb_mtk *ssusb = sf->private;
@@ -388,17 +380,45 @@ static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb)
debugfs_remove_recursive(ssusb->dbgfs_root);
}
+void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
+ enum mtu3_dr_force_mode mode)
+{
+ u32 value;
+
+ value = mtu3_readl(ssusb->ippc_base, SSUSB_U2_CTRL(0));
+ switch (mode) {
+ case MTU3_DR_FORCE_DEVICE:
+ value |= SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG;
+ break;
+ case MTU3_DR_FORCE_HOST:
+ value |= SSUSB_U2_PORT_FORCE_IDDIG;
+ value &= ~SSUSB_U2_PORT_RG_IDDIG;
+ break;
+ case MTU3_DR_FORCE_NONE:
+ value &= ~(SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG);
+ break;
+ default:
+ return;
+ }
+ mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value);
+}
+
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork, extcon_register_dwork);
-
- if (otg_sx->manual_drd_enabled)
+ if (otg_sx->manual_drd_enabled) {
ssusb_debugfs_init(ssusb);
-
- /* It is enough to delay 1s for waiting for host initialization */
- schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ);
+ } else {
+ INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork,
+ extcon_register_dwork);
+
+ /*
+ * It is enough to delay 1s for waiting for
+ * host initialization
+ */
+ schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ);
+ }
return 0;
}
@@ -407,8 +427,8 @@ void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- cancel_delayed_work(&otg_sx->extcon_reg_dwork);
-
if (otg_sx->manual_drd_enabled)
ssusb_debugfs_exit(ssusb);
+ else
+ cancel_delayed_work(&otg_sx->extcon_reg_dwork);
}
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index 9b228b5811b0..c179192408ba 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.h - dual role switch and host glue layer header
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _MTU3_DR_H_
@@ -87,6 +78,8 @@ static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
+void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
+ enum mtu3_dr_force_mode mode);
#else
@@ -103,6 +96,10 @@ static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
return 0;
}
+static inline void
+ssusb_set_force_mode(struct ssusb_mtk *ssusb, enum mtu3_dr_force_mode mode)
+{}
+
#endif
#endif /* _MTU3_DR_H_ */
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 434fca58143c..f05f10f5c171 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_gadget.c - MediaTek usb3 DRD peripheral support
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include "mtu3.h"
@@ -89,6 +80,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
switch (mtu->g.speed) {
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
interval = desc->bInterval;
@@ -456,7 +448,7 @@ static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
return -EOPNOTSUPP;
spin_lock_irqsave(&mtu->lock, flags);
- if (mtu->g.speed == USB_SPEED_SUPER) {
+ if (mtu->g.speed >= USB_SPEED_SUPER) {
mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
} else {
mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 958d74dd2b78..ebdcf7a38c29 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling
*
* Copyright (c) 2016 MediaTek Inc.
*
* Author: Chunfeng.Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/usb/composite.h>
@@ -212,8 +203,8 @@ ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
case USB_RECIP_DEVICE:
result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED;
result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
- /* superspeed only */
- if (mtu->g.speed == USB_SPEED_SUPER) {
+
+ if (mtu->g.speed >= USB_SPEED_SUPER) {
result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED;
result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED;
}
@@ -329,8 +320,8 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
handled = handle_test_mode(mtu, setup);
break;
case USB_DEVICE_U1_ENABLE:
- if (mtu->g.speed != USB_SPEED_SUPER ||
- mtu->g.state != USB_STATE_CONFIGURED)
+ if (mtu->g.speed < USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
break;
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
@@ -344,8 +335,8 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
handled = 1;
break;
case USB_DEVICE_U2_ENABLE:
- if (mtu->g.speed != USB_SPEED_SUPER ||
- mtu->g.state != USB_STATE_CONFIGURED)
+ if (mtu->g.speed < USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
break;
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
@@ -384,8 +375,8 @@ static int ep0_handle_feature(struct mtu3 *mtu,
break;
case USB_RECIP_INTERFACE:
/* superspeed only */
- if ((value == USB_INTRF_FUNC_SUSPEND)
- && (mtu->g.speed == USB_SPEED_SUPER)) {
+ if (value == USB_INTRF_FUNC_SUSPEND &&
+ mtu->g.speed >= USB_SPEED_SUPER) {
/*
* forward the request because function drivers
* should handle it
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index e42d308b8dc2..d237d7e65c44 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.c - dual role switch and host glue layer
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/clk.h>
@@ -79,20 +70,6 @@ int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
if (!ssusb->wakeup_en)
return 0;
- ssusb->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0");
- if (IS_ERR(ssusb->wk_deb_p0)) {
- dev_err(dev, "fail to get wakeup_deb_p0\n");
- return PTR_ERR(ssusb->wk_deb_p0);
- }
-
- if (of_property_read_bool(dn, "wakeup_deb_p1")) {
- ssusb->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1");
- if (IS_ERR(ssusb->wk_deb_p1)) {
- dev_err(dev, "fail to get wakeup_deb_p1\n");
- return PTR_ERR(ssusb->wk_deb_p1);
- }
- }
-
ssusb->pericfg = syscon_regmap_lookup_by_phandle(dn,
"mediatek,syscon-wakeup");
if (IS_ERR(ssusb->pericfg)) {
@@ -103,36 +80,6 @@ int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
return 0;
}
-static int ssusb_wakeup_clks_enable(struct ssusb_mtk *ssusb)
-{
- int ret;
-
- ret = clk_prepare_enable(ssusb->wk_deb_p0);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable wk_deb_p0\n");
- goto usb_p0_err;
- }
-
- ret = clk_prepare_enable(ssusb->wk_deb_p1);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable wk_deb_p1\n");
- goto usb_p1_err;
- }
-
- return 0;
-
-usb_p1_err:
- clk_disable_unprepare(ssusb->wk_deb_p0);
-usb_p0_err:
- return -EINVAL;
-}
-
-static void ssusb_wakeup_clks_disable(struct ssusb_mtk *ssusb)
-{
- clk_disable_unprepare(ssusb->wk_deb_p1);
- clk_disable_unprepare(ssusb->wk_deb_p0);
-}
-
static void host_ports_num_get(struct ssusb_mtk *ssusb)
{
u32 xhci_cap;
@@ -151,6 +98,7 @@ int ssusb_host_enable(struct ssusb_mtk *ssusb)
void __iomem *ibase = ssusb->ippc_base;
int num_u3p = ssusb->u3_ports;
int num_u2p = ssusb->u2_ports;
+ int u3_ports_disabed;
u32 check_clk;
u32 value;
int i;
@@ -158,8 +106,14 @@ int ssusb_host_enable(struct ssusb_mtk *ssusb)
/* power on host ip */
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
- /* power on and enable all u3 ports */
+ /* power on and enable u3 ports except skipped ones */
+ u3_ports_disabed = 0;
for (i = 0; i < num_u3p; i++) {
+ if ((0x1 << i) & ssusb->u3p_dis_msk) {
+ u3_ports_disabed++;
+ continue;
+ }
+
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
value |= SSUSB_U3_PORT_HOST_SEL;
@@ -175,7 +129,7 @@ int ssusb_host_enable(struct ssusb_mtk *ssusb)
}
check_clk = SSUSB_XHCI_RST_B_STS;
- if (num_u3p)
+ if (num_u3p > u3_ports_disabed)
check_clk = SSUSB_U3_MAC_RST_B_STS;
return ssusb_check_clocks(ssusb, check_clk);
@@ -190,8 +144,11 @@ int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
int ret;
int i;
- /* power down and disable all u3 ports */
+ /* power down and disable u3 ports except skipped ones */
for (i = 0; i < num_u3p; i++) {
+ if ((0x1 << i) & ssusb->u3p_dis_msk)
+ continue;
+
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value |= SSUSB_U3_PORT_PDN;
value |= suspend ? 0 : SSUSB_U3_PORT_DIS;
@@ -223,6 +180,8 @@ int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
static void ssusb_host_setup(struct ssusb_mtk *ssusb)
{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
host_ports_num_get(ssusb);
/*
@@ -231,6 +190,9 @@ static void ssusb_host_setup(struct ssusb_mtk *ssusb)
*/
ssusb_host_enable(ssusb);
+ if (otg_sx->manual_drd_enabled)
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+
/* if port0 supports dual-role, works as host mode by default */
ssusb_set_vbus(&ssusb->otg_switch, 1);
}
@@ -276,19 +238,14 @@ void ssusb_host_exit(struct ssusb_mtk *ssusb)
int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
{
- int ret = 0;
-
- if (ssusb->wakeup_en) {
- ret = ssusb_wakeup_clks_enable(ssusb);
+ if (ssusb->wakeup_en)
ssusb_wakeup_ip_sleep_en(ssusb);
- }
- return ret;
+
+ return 0;
}
void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
{
- if (ssusb->wakeup_en) {
+ if (ssusb->wakeup_en)
ssusb_wakeup_ip_sleep_dis(ssusb);
- ssusb_wakeup_clks_disable(ssusb);
- }
}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 06b29664470f..6ee371478d89 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _SSUSB_HW_REGS_H_
@@ -58,6 +49,8 @@
#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404)
#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408)
#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C)
+#define U3D_TXQHIAR1 (SSUSB_DEV_BASE + 0x0484)
+#define U3D_RXQHIAR1 (SSUSB_DEV_BASE + 0x04C4)
#define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510)
#define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514)
@@ -189,6 +182,13 @@
#define QMU_RX_COZ(x) (BIT(16) << (x))
#define QMU_RX_ZLP(x) (BIT(0) << (x))
+/* U3D_TXQHIAR1 */
+/* U3D_RXQHIAR1 */
+#define QMU_LAST_DONE_PTR_HI(x) (((x) >> 16) & 0xf)
+#define QMU_CUR_GPD_ADDR_HI(x) (((x) >> 8) & 0xf)
+#define QMU_START_ADDR_HI_MSK GENMASK(3, 0)
+#define QMU_START_ADDR_HI(x) (((x) & 0xf) << 0)
+
/* U3D_TXQCSR1 */
/* U3D_RXQCSR1 */
#define QMU_Q_ACTIVE BIT(15)
@@ -225,6 +225,7 @@
#define CAP_TX_EP_NUM(x) ((x) & 0x1f)
/* U3D_MISC_CTRL */
+#define DMA_ADDR_36BIT BIT(31)
#define VBUS_ON BIT(1)
#define VBUS_FRC_EN BIT(0)
@@ -457,11 +458,14 @@
#define SSUSB_VBUS_CHG_INT_B_EN BIT(6)
/* U3D_SSUSB_U3_CTRL_0P */
+#define SSUSB_U3_PORT_SSP_SPEED BIT(9)
#define SSUSB_U3_PORT_HOST_SEL BIT(2)
#define SSUSB_U3_PORT_PDN BIT(1)
#define SSUSB_U3_PORT_DIS BIT(0)
/* U3D_SSUSB_U2_CTRL_0P */
+#define SSUSB_U2_PORT_RG_IDDIG BIT(12)
+#define SSUSB_U2_PORT_FORCE_IDDIG BIT(11)
#define SSUSB_U2_PORT_VBUSVALID BIT(9)
#define SSUSB_U2_PORT_OTG_SEL BIT(7)
#define SSUSB_U2_PORT_HOST BIT(2)
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 088e3e685c4f..3650fd11fc49 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/clk.h>
@@ -21,7 +12,6 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include "mtu3.h"
@@ -110,15 +100,9 @@ static void ssusb_phy_power_off(struct ssusb_mtk *ssusb)
phy_power_off(ssusb->phys[i]);
}
-static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
+static int ssusb_clks_enable(struct ssusb_mtk *ssusb)
{
- int ret = 0;
-
- ret = regulator_enable(ssusb->vusb33);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable vusb33\n");
- goto vusb33_err;
- }
+ int ret;
ret = clk_prepare_enable(ssusb->sys_clk);
if (ret) {
@@ -132,6 +116,52 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
goto ref_clk_err;
}
+ ret = clk_prepare_enable(ssusb->mcu_clk);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable mcu_clk\n");
+ goto mcu_clk_err;
+ }
+
+ ret = clk_prepare_enable(ssusb->dma_clk);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable dma_clk\n");
+ goto dma_clk_err;
+ }
+
+ return 0;
+
+dma_clk_err:
+ clk_disable_unprepare(ssusb->mcu_clk);
+mcu_clk_err:
+ clk_disable_unprepare(ssusb->ref_clk);
+ref_clk_err:
+ clk_disable_unprepare(ssusb->sys_clk);
+sys_clk_err:
+ return ret;
+}
+
+static void ssusb_clks_disable(struct ssusb_mtk *ssusb)
+{
+ clk_disable_unprepare(ssusb->dma_clk);
+ clk_disable_unprepare(ssusb->mcu_clk);
+ clk_disable_unprepare(ssusb->ref_clk);
+ clk_disable_unprepare(ssusb->sys_clk);
+}
+
+static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
+{
+ int ret = 0;
+
+ ret = regulator_enable(ssusb->vusb33);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable vusb33\n");
+ goto vusb33_err;
+ }
+
+ ret = ssusb_clks_enable(ssusb);
+ if (ret)
+ goto clks_err;
+
ret = ssusb_phy_init(ssusb);
if (ret) {
dev_err(ssusb->dev, "failed to init phy\n");
@@ -149,20 +179,16 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
phy_err:
ssusb_phy_exit(ssusb);
phy_init_err:
- clk_disable_unprepare(ssusb->ref_clk);
-ref_clk_err:
- clk_disable_unprepare(ssusb->sys_clk);
-sys_clk_err:
+ ssusb_clks_disable(ssusb);
+clks_err:
regulator_disable(ssusb->vusb33);
vusb33_err:
-
return ret;
}
static void ssusb_rscs_exit(struct ssusb_mtk *ssusb)
{
- clk_disable_unprepare(ssusb->sys_clk);
- clk_disable_unprepare(ssusb->ref_clk);
+ ssusb_clks_disable(ssusb);
regulator_disable(ssusb->vusb33);
ssusb_phy_power_off(ssusb);
ssusb_phy_exit(ssusb);
@@ -176,31 +202,17 @@ static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
}
-static int get_iddig_pinctrl(struct ssusb_mtk *ssusb)
+/* ignore the error if the clock does not exist */
+static struct clk *get_optional_clk(struct device *dev, const char *id)
{
- struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
-
- otg_sx->id_pinctrl = devm_pinctrl_get(ssusb->dev);
- if (IS_ERR(otg_sx->id_pinctrl)) {
- dev_err(ssusb->dev, "Cannot find id pinctrl!\n");
- return PTR_ERR(otg_sx->id_pinctrl);
- }
-
- otg_sx->id_float =
- pinctrl_lookup_state(otg_sx->id_pinctrl, "id_float");
- if (IS_ERR(otg_sx->id_float)) {
- dev_err(ssusb->dev, "Cannot find pinctrl id_float!\n");
- return PTR_ERR(otg_sx->id_float);
- }
+ struct clk *opt_clk;
- otg_sx->id_ground =
- pinctrl_lookup_state(otg_sx->id_pinctrl, "id_ground");
- if (IS_ERR(otg_sx->id_ground)) {
- dev_err(ssusb->dev, "Cannot find pinctrl id_ground!\n");
- return PTR_ERR(otg_sx->id_ground);
- }
+ opt_clk = devm_clk_get(dev, id);
+ /* ignore error number except EPROBE_DEFER */
+ if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
+ opt_clk = NULL;
- return 0;
+ return opt_clk;
}
static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
@@ -225,18 +237,17 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
return PTR_ERR(ssusb->sys_clk);
}
- /*
- * reference clock is usually a "fixed-clock", make it optional
- * for backward compatibility and ignore the error if it does
- * not exist.
- */
- ssusb->ref_clk = devm_clk_get(dev, "ref_ck");
- if (IS_ERR(ssusb->ref_clk)) {
- if (PTR_ERR(ssusb->ref_clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ ssusb->ref_clk = get_optional_clk(dev, "ref_ck");
+ if (IS_ERR(ssusb->ref_clk))
+ return PTR_ERR(ssusb->ref_clk);
- ssusb->ref_clk = NULL;
- }
+ ssusb->mcu_clk = get_optional_clk(dev, "mcu_ck");
+ if (IS_ERR(ssusb->mcu_clk))
+ return PTR_ERR(ssusb->mcu_clk);
+
+ ssusb->dma_clk = get_optional_clk(dev, "dma_ck");
+ if (IS_ERR(ssusb->dma_clk))
+ return PTR_ERR(ssusb->dma_clk);
ssusb->num_phys = of_count_phandle_with_args(node,
"phys", "#phy-cells");
@@ -263,10 +274,8 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
return PTR_ERR(ssusb->ippc_base);
ssusb->dr_mode = usb_get_dr_mode(dev);
- if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) {
- dev_err(dev, "dr_mode is error\n");
- return -EINVAL;
- }
+ if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN)
+ ssusb->dr_mode = USB_DR_MODE_OTG;
if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
return 0;
@@ -276,10 +285,10 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
if (ret)
return ret;
- if (ssusb->dr_mode != USB_DR_MODE_OTG)
- return 0;
+ /* optional property, ignore the error if it does not exist */
+ of_property_read_u32(node, "mediatek,u3p-dis-msk",
+ &ssusb->u3p_dis_msk);
- /* if dual-role mode is supported */
vbus = devm_regulator_get(&pdev->dev, "vbus");
if (IS_ERR(vbus)) {
dev_err(dev, "failed to get vbus\n");
@@ -287,6 +296,10 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
}
otg_sx->vbus = vbus;
+ if (ssusb->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
+ /* if dual-role mode is supported */
otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
otg_sx->manual_drd_enabled =
of_property_read_bool(node, "enable-manual-drd");
@@ -297,15 +310,11 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
dev_err(ssusb->dev, "couldn't get extcon device\n");
return -EPROBE_DEFER;
}
- if (otg_sx->manual_drd_enabled) {
- ret = get_iddig_pinctrl(ssusb);
- if (ret)
- return ret;
- }
}
- dev_info(dev, "dr_mode: %d, is_u3_dr: %d\n",
- ssusb->dr_mode, otg_sx->is_u3_drd);
+ dev_info(dev, "dr_mode: %d, is_u3_dr: %d, u3p_dis_msk: %x, drd: %s\n",
+ ssusb->dr_mode, otg_sx->is_u3_drd, ssusb->u3p_dis_msk,
+ otg_sx->manual_drd_enabled ? "manual" : "auto");
return 0;
}
@@ -447,8 +456,7 @@ static int __maybe_unused mtu3_suspend(struct device *dev)
ssusb_host_disable(ssusb, true);
ssusb_phy_power_off(ssusb);
- clk_disable_unprepare(ssusb->sys_clk);
- clk_disable_unprepare(ssusb->ref_clk);
+ ssusb_clks_disable(ssusb);
ssusb_wakeup_enable(ssusb);
return 0;
@@ -466,27 +474,21 @@ static int __maybe_unused mtu3_resume(struct device *dev)
return 0;
ssusb_wakeup_disable(ssusb);
- ret = clk_prepare_enable(ssusb->sys_clk);
- if (ret)
- goto err_sys_clk;
-
- ret = clk_prepare_enable(ssusb->ref_clk);
+ ret = ssusb_clks_enable(ssusb);
if (ret)
- goto err_ref_clk;
+ goto clks_err;
ret = ssusb_phy_power_on(ssusb);
if (ret)
- goto err_power_on;
+ goto phy_err;
ssusb_host_enable(ssusb);
return 0;
-err_power_on:
- clk_disable_unprepare(ssusb->ref_clk);
-err_ref_clk:
- clk_disable_unprepare(ssusb->sys_clk);
-err_sys_clk:
+phy_err:
+ ssusb_clks_disable(ssusb);
+clks_err:
return ret;
}
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 7d9ba8a52368..ff62ba232177 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_qmu.c - Queue Management Unit driver for device controller
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
/*
@@ -40,7 +31,58 @@
#define GPD_FLAGS_IOC BIT(7)
#define GPD_EXT_FLAG_ZLP BIT(5)
+#define GPD_EXT_NGP(x) (((x) & 0xf) << 4)
+#define GPD_EXT_BUF(x) (((x) & 0xf) << 0)
+
+#define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
+#define HILO_DMA(hi, lo) \
+ ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
+
+static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
+{
+ u32 txcpr;
+ u32 txhiar;
+
+ txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+ txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
+ return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
+}
+
+static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
+{
+ u32 rxcpr;
+ u32 rxhiar;
+
+ rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
+ rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
+
+ return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
+}
+
+static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
+{
+ u32 tqhiar;
+
+ mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
+ cpu_to_le32(lower_32_bits(dma)));
+ tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
+ tqhiar &= ~QMU_START_ADDR_HI_MSK;
+ tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
+ mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
+}
+
+static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
+{
+ u32 rqhiar;
+
+ mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
+ cpu_to_le32(lower_32_bits(dma)));
+ rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
+ rqhiar &= ~QMU_START_ADDR_HI_MSK;
+ rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
+ mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
+}
static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
dma_addr_t dma_addr)
@@ -193,21 +235,27 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
+ dma_addr_t enq_dma;
+ u16 ext_addr;
/* set all fields to zero as default value */
memset(gpd, 0, sizeof(*gpd));
- gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
+ ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
gpd->buf_len = cpu_to_le16(req->length);
gpd->flag |= GPD_FLAGS_IOC;
/* get the next GPD */
enq = advance_enq_gpd(ring);
- dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
- mep->epnum, gpd, enq);
+ enq_dma = gpd_virt_to_dma(ring, enq);
+ dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
+ mep->epnum, gpd, enq, &enq_dma);
enq->flag &= ~GPD_FLAGS_HWO;
- gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+ gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
+ ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
+ gpd->tx_ext_addr = cpu_to_le16(ext_addr);
if (req->zero)
gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
@@ -226,21 +274,27 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
+ dma_addr_t enq_dma;
+ u16 ext_addr;
/* set all fields to zero as default value */
memset(gpd, 0, sizeof(*gpd));
- gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
+ ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
gpd->data_buf_len = cpu_to_le16(req->length);
gpd->flag |= GPD_FLAGS_IOC;
/* get the next GPD */
enq = advance_enq_gpd(ring);
- dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
- mep->epnum, gpd, enq);
+ enq_dma = gpd_virt_to_dma(ring, enq);
+ dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
+ mep->epnum, gpd, enq, &enq_dma);
enq->flag &= ~GPD_FLAGS_HWO;
- gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+ gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
+ ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
+ gpd->rx_ext_addr = cpu_to_le16(ext_addr);
gpd->chksum = qmu_calc_checksum((u8 *)gpd);
gpd->flag |= GPD_FLAGS_HWO;
@@ -267,8 +321,8 @@ int mtu3_qmu_start(struct mtu3_ep *mep)
if (mep->is_in) {
/* set QMU start address */
- mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma);
- mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+ write_txq_start_addr(mbase, epnum, ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
/* send zero length packet according to ZLP flag in GPD */
mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
@@ -282,8 +336,8 @@ int mtu3_qmu_start(struct mtu3_ep *mep)
mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
} else {
- mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma);
- mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN);
+ write_rxq_start_addr(mbase, epnum, ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
/* don't expect ZLP */
mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
@@ -353,9 +407,9 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd_current = NULL;
- dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
struct usb_request *req = NULL;
struct mtu3_request *mreq;
+ dma_addr_t cur_gpd_dma;
u32 txcsr = 0;
int ret;
@@ -365,7 +419,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
else
return;
- gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+ cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
+ gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
if (le16_to_cpu(gpd_current->buf_len) != 0) {
dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
@@ -408,12 +463,13 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd = ring->dequeue;
struct qmu_gpd *gpd_current = NULL;
- dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
struct usb_request *request = NULL;
struct mtu3_request *mreq;
+ dma_addr_t cur_gpd_dma;
/*transfer phy address got from QMU register to virtual address */
- gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+ cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
+ gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
@@ -446,11 +502,12 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd = ring->dequeue;
struct qmu_gpd *gpd_current = NULL;
- dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
struct usb_request *req = NULL;
struct mtu3_request *mreq;
+ dma_addr_t cur_gpd_dma;
- gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+ cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
+ gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
index 4dafa16bf120..81f5151a55ed 100644
--- a/drivers/usb/mtu3/mtu3_qmu.h
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_qmu.h - Queue Management Unit driver header
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __MTK_QMU_H__
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 02fbb4fe3745..0ad664efda6b 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments AM35x "glue layer"
@@ -8,23 +9,6 @@
* Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -133,11 +117,9 @@ static void am35x_musb_set_vbus(struct musb *musb, int is_on)
#define POLL_SECONDS 2
-static struct timer_list otg_workaround;
-
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
@@ -173,7 +155,7 @@ static void otg_timer(unsigned long _musb)
case OTG_STATE_B_IDLE:
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
@@ -195,12 +177,12 @@ static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
- if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
return;
}
@@ -209,7 +191,7 @@ static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
- mod_timer(&otg_workaround, timeout);
+ mod_timer(&musb->dev_timer, timeout);
}
static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
@@ -278,14 +260,14 @@ static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -324,7 +306,7 @@ eoi:
/* Poll for ID change */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -365,7 +347,7 @@ static int am35x_musb_init(struct musb *musb)
if (IS_ERR_OR_NULL(musb->xceiv))
return -EPROBE_DEFER;
- setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the musb */
if (data->reset)
@@ -395,7 +377,7 @@ static int am35x_musb_exit(struct musb *musb)
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
- del_timer_sync(&otg_workaround);
+ del_timer_sync(&musb->dev_timer);
/* Shutdown the on-chip PHY and its PLL. */
if (data->set_phy_power)
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 4418574a36a1..0a98dcd66d19 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* MUSB OTG controller driver for Blackfin Processors
*
* Copyright 2006-2008 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
@@ -223,7 +222,7 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
if ((musb->xceiv->otg->state == OTG_STATE_B_IDLE
|| musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON) ||
(musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ mod_timer(&musb->dev_timer, jiffies + TIMER_DELAY);
musb->a_wait_bcon = TIMER_DELAY;
}
@@ -232,9 +231,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
return retval;
}
-static void musb_conn_timer_handler(unsigned long _musb)
+static void musb_conn_timer_handler(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
unsigned long flags;
u16 val;
static u8 toggle;
@@ -266,7 +265,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
musb_writeb(musb->mregs, MUSB_INTRUSB, val);
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
}
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ mod_timer(&musb->dev_timer, jiffies + TIMER_DELAY);
break;
case OTG_STATE_B_IDLE:
/*
@@ -310,7 +309,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
* shortening it, if accelerating A-plug detection
* is needed in OTG mode.
*/
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY / 4);
+ mod_timer(&musb->dev_timer, jiffies + TIMER_DELAY / 4);
}
break;
default:
@@ -445,8 +444,7 @@ static int bfin_musb_init(struct musb *musb)
bfin_musb_reg_init(musb);
- setup_timer(&musb_conn_timer, musb_conn_timer_handler,
- (unsigned long) musb);
+ timer_setup(&musb->dev_timer, musb_conn_timer_handler, 0);
musb->xceiv->set_power = bfin_musb_set_power;
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index c84dae546dc6..5b149915b0f8 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 by Analog Devices, Inc.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
*/
#ifndef __MUSB_BLACKFIN_H__
@@ -82,6 +78,4 @@ static void dump_fifo_data(u8 *buf, u16 len)
/* Almost 1 second */
#define TIMER_DELAY (1 * HZ)
-static struct timer_list musb_conn_timer;
-
#endif /* __MUSB_BLACKFIN_H__ */
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index a13bd3625043..b4d6d9bb3239 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
*
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index df88123274ca..0397606a211b 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments DA8xx/OMAP-L1x "glue layer"
*
@@ -10,23 +11,6 @@
* Copyright (c) 2016 Petr Kulhavy <petr@barix.com>
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -50,10 +34,7 @@
#define DA8XX_USB_CTRL_REG 0x04
#define DA8XX_USB_STAT_REG 0x08
#define DA8XX_USB_EMULATION_REG 0x0c
-#define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */
-#define DA8XX_USB_AUTOREQ_REG 0x14
#define DA8XX_USB_SRP_FIX_TIME_REG 0x18
-#define DA8XX_USB_TEARDOWN_REG 0x1c
#define DA8XX_USB_INTR_SRC_REG 0x20
#define DA8XX_USB_INTR_SRC_SET_REG 0x24
#define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28
@@ -138,11 +119,9 @@ static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
#define POLL_SECONDS 2
-static struct timer_list otg_workaround;
-
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
@@ -178,7 +157,7 @@ static void otg_timer(unsigned long _musb)
* VBUSERR got reported during enumeration" cases.
*/
if (devctl & MUSB_DEVCTL_VBUS) {
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
break;
}
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
@@ -201,7 +180,7 @@ static void otg_timer(unsigned long _musb)
musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
@@ -223,12 +202,12 @@ static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
- if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
return;
}
@@ -237,7 +216,7 @@ static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
- mod_timer(&otg_workaround, timeout);
+ mod_timer(&musb->dev_timer, timeout);
}
static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
@@ -297,14 +276,14 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -331,7 +310,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
/* Poll for ID change */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -393,7 +372,7 @@ static int da8xx_musb_init(struct musb *musb)
goto fail;
}
- setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the controller */
musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
@@ -431,7 +410,7 @@ static int da8xx_musb_exit(struct musb *musb)
{
struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
- del_timer_sync(&otg_workaround);
+ del_timer_sync(&musb->dev_timer);
phy_power_off(glue->phy);
phy_exit(glue->phy);
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 52b491d3d5d8..2ad39dcd2f4c 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -1,24 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -199,11 +183,9 @@ static void davinci_musb_set_vbus(struct musb *musb, int is_on)
#define POLL_SECONDS 2
-static struct timer_list otg_workaround;
-
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
@@ -224,7 +206,7 @@ static void otg_timer(unsigned long _musb)
* VBUSERR got reported during enumeration" cases.
*/
if (devctl & MUSB_DEVCTL_VBUS) {
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
break;
}
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
@@ -248,7 +230,7 @@ static void otg_timer(unsigned long _musb)
devctl | MUSB_DEVCTL_SESSION);
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
@@ -325,14 +307,14 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -361,7 +343,7 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
/* poll for ID change */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -393,7 +375,7 @@ static int davinci_musb_init(struct musb *musb)
if (revision == 0)
goto fail;
- setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
davinci_musb_source_power(musb, 0, 1);
@@ -443,7 +425,7 @@ unregister:
static int davinci_musb_exit(struct musb *musb)
{
- del_timer_sync(&otg_workaround);
+ del_timer_sync(&musb->dev_timer);
/* force VBUS off */
if (cpu_is_davinci_dm355()) {
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 371baa0ee509..e021485c83ae 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
*/
#ifndef __MUSB_HDRDF_H__
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 40c68c23d553..04d8b2bc205a 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Ingenic JZ4740 "glue layer"
*
* Copyright (C) 2013, Apelete Seketeli <apelete@seketeli.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
index 1e58ed2361cc..5f04f8e3a640 100644
--- a/drivers/usb/musb/musb_am335x.c
+++ b/drivers/usb/musb/musb_am335x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ff5a1a8989d5..ea5013aa69e2 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver core code
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
/*
@@ -485,9 +460,9 @@ void musb_load_testpacket(struct musb *musb)
/*
* Handles OTG hnp timeouts, such as b_ase0_brst
*/
-static void musb_otg_timer_func(unsigned long data)
+static void musb_otg_timer_func(struct timer_list *t)
{
- struct musb *musb = (struct musb *)data;
+ struct musb *musb = from_timer(musb, t, otg_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
@@ -767,6 +742,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
case OTG_STATE_B_IDLE:
if (!musb->is_active)
break;
+ /* fall through */
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
musb->is_active = musb->g.b_hnp_enable;
@@ -2330,7 +2306,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0)
goto fail3;
- setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
+ timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
/* attach to the IRQ */
if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 20f4614178d9..385841ee6f46 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_CORE_H__
@@ -345,6 +320,7 @@ struct musb {
struct list_head pending_list; /* pending work list */
struct timer_list otg_timer;
+ struct timer_list dev_timer;
struct notifier_block nb;
struct dma_controller *dma_controller;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 1ec0a4947b6b..d0dd4f470bbe 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 9a78877a8afe..5e0f079dde21 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver debug defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_LINUX_DEBUG_H__
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 952733ceaac8..7cf5a1bbdaff 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver debugfs support
*
* Copyright 2010 Nokia Corporation
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 04c3bd86bd62..a4241f4d430e 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver DMA controller abstraction
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_DMA_H__
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index f6b526606ad1..05a679d5e3a2 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments DSPS platforms "glue layer"
*
@@ -7,22 +8,6 @@
*
* This file is part of the Inventra Controller Driver for Linux.
*
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
* musb_dsps.c will be a common file for all the TI DSPS platforms
* such as dm64x, dm36x, dm35x, da8x, am35x and ti81x.
* For now only ti81x is using this and in future davinci.c, am35x.c
@@ -119,7 +104,6 @@ struct dsps_glue {
struct platform_device *musb; /* child musb pdev */
const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
int vbus_irq; /* optional vbus irq */
- struct timer_list timer; /* otg_workaround timer */
unsigned long last_timer; /* last timer data for each instance */
bool sw_babble_enabled;
void __iomem *usbss_base;
@@ -149,6 +133,7 @@ static const struct debugfs_reg32 dsps_musb_regs[] = {
static void dsps_mod_timer(struct dsps_glue *glue, int wait_ms)
{
+ struct musb *musb = platform_get_drvdata(glue->musb);
int wait;
if (wait_ms < 0)
@@ -156,7 +141,7 @@ static void dsps_mod_timer(struct dsps_glue *glue, int wait_ms)
else
wait = msecs_to_jiffies(wait_ms);
- mod_timer(&glue->timer, jiffies + wait);
+ mod_timer(&musb->dev_timer, jiffies + wait);
}
/*
@@ -216,7 +201,7 @@ static void dsps_musb_disable(struct musb *musb)
musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
musb_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
- del_timer_sync(&glue->timer);
+ del_timer_sync(&musb->dev_timer);
}
/* Caller must take musb->lock */
@@ -230,7 +215,7 @@ static int dsps_check_status(struct musb *musb, void *unused)
int skip_session = 0;
if (glue->vbus_irq)
- del_timer(&glue->timer);
+ del_timer(&musb->dev_timer);
/*
* We poll because DSPS IP's won't expose several OTG-critical
@@ -282,9 +267,9 @@ static int dsps_check_status(struct musb *musb, void *unused)
return 0;
}
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
struct device *dev = musb->controller;
unsigned long flags;
int err;
@@ -480,7 +465,7 @@ static int dsps_musb_init(struct musb *musb)
}
}
- setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the musb */
musb_writel(reg_base, wrp->control, (1 << wrp->reset));
@@ -515,7 +500,7 @@ static int dsps_musb_exit(struct musb *musb)
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
- del_timer_sync(&glue->timer);
+ del_timer_sync(&musb->dev_timer);
usb_phy_shutdown(musb->xceiv);
phy_power_off(musb->phy);
phy_exit(musb->phy);
@@ -1027,7 +1012,7 @@ static int dsps_suspend(struct device *dev)
return ret;
}
- del_timer_sync(&glue->timer);
+ del_timer_sync(&musb->dev_timer);
mbase = musb->ctrl_base;
glue->context.control = musb_readl(mbase, wrp->control);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index bc6d1717c9ec..293e5b8da565 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver peripheral support
*
@@ -5,32 +6,6 @@
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index 0314dfc770c7..9c34aca06db6 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver peripheral defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_GADGET_H
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 844a309fe895..18da4873e52e 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG peripheral driver ep0 handling
*
@@ -5,32 +6,6 @@
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index b17450a59882..2627363fb4fe 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver host support
*
@@ -5,32 +6,6 @@
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 7bbf01bf4bb0..72392bbcd0a4 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver host defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _MUSB_HOST_H
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 17a80ae20674..b7025b2e6e00 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver register I/O
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index cff5bcf0d00f..a4beba184798 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver register defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_REGS_H__
diff --git a/drivers/usb/musb/musb_trace.c b/drivers/usb/musb/musb_trace.c
index 70973d901a21..476872adce80 100644
--- a/drivers/usb/musb/musb_trace.c
+++ b/drivers/usb/musb/musb_trace.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* musb_trace.c - MUSB Controller Trace Support
*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Bin Liu <b-liu@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index f031c9e74322..a97d618fe8ff 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* musb_trace.h - MUSB Controller Trace Support
*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Bin Liu <b-liu@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 0b4595439d51..5165d2b07ade 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver virtual root hub support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 3620073da58c..21fb9e6622f3 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver - support for Mentor's DMA controller
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2007 by Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index a3dcbd55e436..44f7983df0a1 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver - support for Mentor's DMA controller
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2007 by Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef CONFIG_BLACKFIN
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 456f3e6ecf03..5d705930ef47 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2007 by Texas Instruments
* Some code has been taken from tusb6010.c
@@ -6,23 +7,6 @@
* Tony Lindgren <tony@atomide.com>
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index 1b5e83a9840e..859008fa0e3c 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
*/
#ifndef __MUSB_OMAP243X_H__
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index dc353e24d53c..2d201219ecff 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Allwinner sun4i MUSB Glue Layer
*
@@ -5,16 +6,6 @@
*
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 4eb640c54f2c..39453287b5c3 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TUSB6010 USB 2.0 OTG Dual Role controller
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Notes:
* - Driver assumes that interface to external host (main CPU) is
* configured for NOR FLASH interface instead of VLYNQ serial
@@ -452,11 +449,9 @@ static int tusb_musb_vbus_status(struct musb *musb)
return ret;
}
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
+static void musb_do_idle(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
@@ -523,13 +518,13 @@ static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
&& (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&musb_idle_timer);
+ del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
if (time_after(last_timer, timeout)) {
- if (!timer_pending(&musb_idle_timer))
+ if (!timer_pending(&musb->dev_timer))
last_timer = timeout;
else {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
@@ -541,7 +536,7 @@ static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
(unsigned long)jiffies_to_msecs(timeout - jiffies));
- mod_timer(&musb_idle_timer, timeout);
+ mod_timer(&musb->dev_timer, timeout);
}
/* ticks of 60 MHz clock */
@@ -873,7 +868,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
}
if (int_src & TUSB_INT_SRC_USB_IP_CONN)
- del_timer(&musb_idle_timer);
+ del_timer(&musb->dev_timer);
/* OTG state change reports (annoyingly) not issued by Mentor core */
if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
@@ -982,7 +977,7 @@ static void tusb_musb_disable(struct musb *musb)
musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
- del_timer(&musb_idle_timer);
+ del_timer(&musb->dev_timer);
if (is_dma_capable() && !dma_off) {
printk(KERN_WARNING "%s %s: dma still active\n",
@@ -1142,7 +1137,7 @@ static int tusb_musb_init(struct musb *musb)
musb->xceiv->set_power = tusb_draw_power;
the_musb = musb;
- setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, musb_do_idle, 0);
done:
if (ret < 0) {
@@ -1156,7 +1151,7 @@ done:
static int tusb_musb_exit(struct musb *musb)
{
- del_timer_sync(&musb_idle_timer);
+ del_timer_sync(&musb->dev_timer);
the_musb = NULL;
if (musb->board_set_power)
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
index 72cdad23ced9..fd8025bbece7 100644
--- a/drivers/usb/musb/tusb6010.h
+++ b/drivers/usb/musb/tusb6010.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __TUSB6010_H__
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e8060e49b0f4..60a93b8bbe3c 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 5a572500c418..27b4a77a9e23 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 ST-Ericsson AB
* Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
*
* Based on omap2430.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index c92a295049ad..d19bb3e89da6 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/musb/ux500_dma.c
*
@@ -9,19 +10,6 @@
* Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
* Praveena Nadahally <praveen.nadahally@stericsson.com>
* Rajaram Regupathy <ragupathy.rajaram@stericsson.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index aff702c0eb9f..0f8ab981d572 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -137,35 +137,6 @@ config USB_ISP1301
To compile this driver as a module, choose M here: the
module will be called phy-isp1301.
-config USB_MSM_OTG
- tristate "Qualcomm on-chip USB OTG controller support"
- depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
- depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
- depends on RESET_CONTROLLER
- select USB_PHY
- help
- Enable this to support the USB OTG transceiver on Qualcomm chips. It
- handles PHY initialization, clock management, and workarounds
- required after resetting the hardware and power management.
- This driver is required even for peripheral only or host only
- mode configurations.
- This driver is not supported on boards like trout which
- has an external PHY.
-
-config USB_QCOM_8X16_PHY
- tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on RESET_CONTROLLER
- select USB_PHY
- select USB_ULPI_VIEWPORT
- help
- Enable this to support the USB transceiver on Qualcomm 8x16 chipsets.
- It handles PHY initialization, clock management, power management,
- and workarounds required after resetting the hardware.
-
- To compile this driver as a module, choose M here: the
- module will be called phy-qcom-8x16-usb.
-
config USB_MV_OTG
tristate "Marvell USB OTG support"
depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 0c40ccc90631..25e579fb92b8 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -19,8 +19,6 @@ obj-$(CONFIG_TWL6030_USB) += phy-twl6030-usb.o
obj-$(CONFIG_USB_EHCI_TEGRA) += phy-tegra-usb.o
obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o
-obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
-obj-$(CONFIG_USB_QCOM_8X16_PHY) += phy-qcom-8x16-usb.o
obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o
obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
diff --git a/drivers/usb/phy/of.c b/drivers/usb/phy/of.c
index 66ffa82457a8..1ab134f45d67 100644
--- a/drivers/usb/phy/of.c
+++ b/drivers/usb/phy/of.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB of helper code
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 61bf2285d5b1..87295313a10c 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB transceiver driver for AB8500 family chips
*
@@ -5,21 +6,6 @@
* Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
* Avinash Kumar <avinash.kumar@stericsson.com>
* Thirupathi Chippakurthy <thirupathi.chippakurthy@stericsson.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 5f5f19813fde..a3cb25cb74f8 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 7e5aece769da..b36fa8b953d0 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index cf8f40ae6e01..900875f326d7 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2007,2008 Freescale semiconductor, Inc.
*
@@ -5,20 +6,6 @@
* Jerry Huang <Chang-Ming.Huang@freescale.com>
*
* Initialization based on code from Shlomi Gridish.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index 23149954a09c..43d410f6641b 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -1,19 +1,5 @@
-/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. */
#include <linux/usb/otg-fsm.h>
#include <linux/usb/otg.h>
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 89d6e7a5fdb7..74ba88297991 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NOP USB transceiver for all USB transceiver which are either built-in
* into USB IP or which are mostly autonomous.
@@ -5,20 +6,6 @@
* Copyright (C) 2009 Texas Instruments Inc
* Author: Ajay Kumar Gupta <ajay.gupta@ti.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Current status:
* This provides a "nop" transceiver for PHYs which are
* autonomous such as isp1504, isp1707, etc.
@@ -224,7 +211,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
int err = 0;
u32 clk_rate = 0;
- bool needs_vcc = false;
+ bool needs_vcc = false, needs_clk = false;
if (dev->of_node) {
struct device_node *node = dev->of_node;
@@ -233,6 +220,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
clk_rate = 0;
needs_vcc = of_property_read_bool(node, "vcc-supply");
+ needs_clk = of_property_read_bool(node, "clocks");
nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
@@ -275,6 +263,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
if (IS_ERR(nop->clk)) {
dev_dbg(dev, "Can't get phy clock: %ld\n",
PTR_ERR(nop->clk));
+ if (needs_clk)
+ return PTR_ERR(nop->clk);
}
if (!IS_ERR(nop->clk) && clk_rate) {
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index f66120db8a41..553e2573c74f 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gpio-vbus.c - simple GPIO VBUS sensing driver for B peripheral devices
*
* Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index c6052c814bcc..7041ba030052 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* isp1301_omap - ISP 1301 USB transceiver, talking to OMAP OTG controller
*
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -1183,9 +1170,11 @@ static irqreturn_t isp1301_irq(int irq, void *isp)
return IRQ_HANDLED;
}
-static void isp1301_timer(unsigned long _isp)
+static void isp1301_timer(struct timer_list *t)
{
- isp1301_defer_work((void *)_isp, WORK_TIMER);
+ struct isp1301 *isp = from_timer(isp, t, timer);
+
+ isp1301_defer_work(isp, WORK_TIMER);
}
/*-------------------------------------------------------------------------*/
@@ -1222,7 +1211,6 @@ static int isp1301_remove(struct i2c_client *i2c)
if (machine_is_omap_h2())
gpio_free(2);
- isp->timer.data = 0;
set_bit(WORK_STOP, &isp->todo);
del_timer_sync(&isp->timer);
flush_work(&isp->work);
@@ -1507,9 +1495,7 @@ isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
}
INIT_WORK(&isp->work, isp1301_work);
- init_timer(&isp->timer);
- isp->timer.function = isp1301_timer;
- isp->timer.data = (unsigned long) isp;
+ timer_setup(&isp->timer, isp1301_timer, 0);
i2c_set_clientdata(i2c, isp);
isp->client = i2c;
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index f333024660b4..93b7d6a30aad 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NXP ISP1301 USB transceiver driver
*
* Copyright (C) 2012 Roland Stigge
*
* Author: Roland Stigge <stigge@antcom.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 01d4e4cdbc79..19871266312d 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* phy-keystone - USB PHY, talking to dwc3 controller in Keystone.
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
* Author: WingMan Kwok <w-kwok2@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
deleted file mode 100644
index 3d0dd2f97415..000000000000
--- a/drivers/usb/phy/phy-msm-usb.c
+++ /dev/null
@@ -1,2085 +0,0 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/extcon.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/reboot.h>
-#include <linux/reset.h>
-#include <linux/types.h>
-#include <linux/usb/otg.h>
-
-#include <linux/usb.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/of.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/regulator/consumer.h>
-
-/**
- * OTG control
- *
- * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
- * only configuration.
- * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
- * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
- * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
- *
- */
-enum otg_control_type {
- OTG_NO_CONTROL = 0,
- OTG_PHY_CONTROL,
- OTG_PMIC_CONTROL,
- OTG_USER_CONTROL,
-};
-
-/**
- * PHY used in
- *
- * INVALID_PHY Unsupported PHY
- * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
- * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
- *
- */
-enum msm_usb_phy_type {
- INVALID_PHY = 0,
- CI_45NM_INTEGRATED_PHY,
- SNPS_28NM_INTEGRATED_PHY,
-};
-
-#define IDEV_CHG_MAX 1500
-#define IUNIT 100
-
-/**
- * Different states involved in USB charger detection.
- *
- * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
- * process is not yet started.
- * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
- * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
- * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
- * between SDP and DCP/CDP).
- * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
- * between DCP and CDP).
- * USB_CHG_STATE_DETECTED USB charger type is determined.
- *
- */
-enum usb_chg_state {
- USB_CHG_STATE_UNDEFINED = 0,
- USB_CHG_STATE_WAIT_FOR_DCD,
- USB_CHG_STATE_DCD_DONE,
- USB_CHG_STATE_PRIMARY_DONE,
- USB_CHG_STATE_SECONDARY_DONE,
- USB_CHG_STATE_DETECTED,
-};
-
-/**
- * USB charger types
- *
- * USB_INVALID_CHARGER Invalid USB charger.
- * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
- * on USB2.0 compliant host/hub.
- * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
- * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
- * IDEV_CHG_MAX can be drawn irrespective of USB state.
- *
- */
-enum usb_chg_type {
- USB_INVALID_CHARGER = 0,
- USB_SDP_CHARGER,
- USB_DCP_CHARGER,
- USB_CDP_CHARGER,
-};
-
-/**
- * struct msm_otg_platform_data - platform device data
- * for msm_otg driver.
- * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
- * "do not overwrite default vaule at this address".
- * @phy_init_sz: PHY configuration sequence size.
- * @vbus_power: VBUS power on/off routine.
- * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
- * @mode: Supported mode (OTG/peripheral/host).
- * @otg_control: OTG switch controlled by user/Id pin
- */
-struct msm_otg_platform_data {
- int *phy_init_seq;
- int phy_init_sz;
- void (*vbus_power)(bool on);
- unsigned power_budget;
- enum usb_dr_mode mode;
- enum otg_control_type otg_control;
- enum msm_usb_phy_type phy_type;
- void (*setup_gpio)(enum usb_otg_state state);
-};
-
-/**
- * struct msm_otg: OTG driver data. Shared by HCD and DCD.
- * @otg: USB OTG Transceiver structure.
- * @pdata: otg device platform data.
- * @irq: IRQ number assigned for HSUSB controller.
- * @clk: clock struct of usb_hs_clk.
- * @pclk: clock struct of usb_hs_pclk.
- * @core_clk: clock struct of usb_hs_core_clk.
- * @regs: ioremapped register base address.
- * @inputs: OTG state machine inputs(Id, SessValid etc).
- * @sm_work: OTG state machine work.
- * @in_lpm: indicates low power mode (LPM) state.
- * @async_int: Async interrupt arrived.
- * @cur_power: The amount of mA available from downstream port.
- * @chg_work: Charger detection work.
- * @chg_state: The state of charger detection process.
- * @chg_type: The type of charger attached.
- * @dcd_retires: The retry count used to track Data contact
- * detection process.
- * @manual_pullup: true if VBUS is not routed to USB controller/phy
- * and controller driver therefore enables pull-up explicitly before
- * starting controller using usbcmd run/stop bit.
- * @vbus: VBUS signal state trakining, using extcon framework
- * @id: ID signal state trakining, using extcon framework
- * @switch_gpio: Descriptor for GPIO used to control external Dual
- * SPDT USB Switch.
- * @reboot: Used to inform the driver to route USB D+/D- line to Device
- * connector
- */
-struct msm_otg {
- struct usb_phy phy;
- struct msm_otg_platform_data *pdata;
- int irq;
- struct clk *clk;
- struct clk *pclk;
- struct clk *core_clk;
- void __iomem *regs;
-#define ID 0
-#define B_SESS_VLD 1
- unsigned long inputs;
- struct work_struct sm_work;
- atomic_t in_lpm;
- int async_int;
- unsigned cur_power;
- int phy_number;
- struct delayed_work chg_work;
- enum usb_chg_state chg_state;
- enum usb_chg_type chg_type;
- u8 dcd_retries;
- struct regulator *v3p3;
- struct regulator *v1p8;
- struct regulator *vddcx;
- struct regulator_bulk_data supplies[3];
-
- struct reset_control *phy_rst;
- struct reset_control *link_rst;
- int vdd_levels[3];
-
- bool manual_pullup;
-
- struct gpio_desc *switch_gpio;
- struct notifier_block reboot;
-};
-
-#define MSM_USB_BASE (motg->regs)
-#define DRIVER_NAME "msm_otg"
-
-#define ULPI_IO_TIMEOUT_USEC (10 * 1000)
-#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
-
-#define USB_PHY_3P3_VOL_MIN 3050000 /* uV */
-#define USB_PHY_3P3_VOL_MAX 3300000 /* uV */
-#define USB_PHY_3P3_HPM_LOAD 50000 /* uA */
-#define USB_PHY_3P3_LPM_LOAD 4000 /* uA */
-
-#define USB_PHY_1P8_VOL_MIN 1800000 /* uV */
-#define USB_PHY_1P8_VOL_MAX 1800000 /* uV */
-#define USB_PHY_1P8_HPM_LOAD 50000 /* uA */
-#define USB_PHY_1P8_LPM_LOAD 4000 /* uA */
-
-#define USB_PHY_VDD_DIG_VOL_MIN 1000000 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
-#define USB_PHY_SUSP_DIG_VOL 500000 /* uV */
-
-enum vdd_levels {
- VDD_LEVEL_NONE = 0,
- VDD_LEVEL_MIN,
- VDD_LEVEL_MAX,
-};
-
-static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init)
-{
- int ret = 0;
-
- if (init) {
- ret = regulator_set_voltage(motg->vddcx,
- motg->vdd_levels[VDD_LEVEL_MIN],
- motg->vdd_levels[VDD_LEVEL_MAX]);
- if (ret) {
- dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
- return ret;
- }
-
- ret = regulator_enable(motg->vddcx);
- if (ret)
- dev_err(motg->phy.dev, "unable to enable hsusb vddcx\n");
- } else {
- ret = regulator_set_voltage(motg->vddcx, 0,
- motg->vdd_levels[VDD_LEVEL_MAX]);
- if (ret)
- dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
- ret = regulator_disable(motg->vddcx);
- if (ret)
- dev_err(motg->phy.dev, "unable to disable hsusb vddcx\n");
- }
-
- return ret;
-}
-
-static int msm_hsusb_ldo_init(struct msm_otg *motg, int init)
-{
- int rc = 0;
-
- if (init) {
- rc = regulator_set_voltage(motg->v3p3, USB_PHY_3P3_VOL_MIN,
- USB_PHY_3P3_VOL_MAX);
- if (rc) {
- dev_err(motg->phy.dev, "Cannot set v3p3 voltage\n");
- goto exit;
- }
- rc = regulator_enable(motg->v3p3);
- if (rc) {
- dev_err(motg->phy.dev, "unable to enable the hsusb 3p3\n");
- goto exit;
- }
- rc = regulator_set_voltage(motg->v1p8, USB_PHY_1P8_VOL_MIN,
- USB_PHY_1P8_VOL_MAX);
- if (rc) {
- dev_err(motg->phy.dev, "Cannot set v1p8 voltage\n");
- goto disable_3p3;
- }
- rc = regulator_enable(motg->v1p8);
- if (rc) {
- dev_err(motg->phy.dev, "unable to enable the hsusb 1p8\n");
- goto disable_3p3;
- }
-
- return 0;
- }
-
- regulator_disable(motg->v1p8);
-disable_3p3:
- regulator_disable(motg->v3p3);
-exit:
- return rc;
-}
-
-static int msm_hsusb_ldo_set_mode(struct msm_otg *motg, int on)
-{
- int ret = 0;
-
- if (on) {
- ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_HPM_LOAD);
- if (ret < 0) {
- pr_err("Could not set HPM for v1p8\n");
- return ret;
- }
- ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_HPM_LOAD);
- if (ret < 0) {
- pr_err("Could not set HPM for v3p3\n");
- regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
- return ret;
- }
- } else {
- ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
- if (ret < 0)
- pr_err("Could not set LPM for v1p8\n");
- ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_LPM_LOAD);
- if (ret < 0)
- pr_err("Could not set LPM for v3p3\n");
- }
-
- pr_debug("reg (%s)\n", on ? "HPM" : "LPM");
- return ret < 0 ? ret : 0;
-}
-
-static int ulpi_read(struct usb_phy *phy, u32 reg)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int cnt = 0;
-
- /* initiate read operation */
- writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
- USB_ULPI_VIEWPORT);
-
- /* wait for completion */
- while (cnt < ULPI_IO_TIMEOUT_USEC) {
- if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= ULPI_IO_TIMEOUT_USEC) {
- dev_err(phy->dev, "ulpi_read: timeout %08x\n",
- readl(USB_ULPI_VIEWPORT));
- return -ETIMEDOUT;
- }
- return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
-}
-
-static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int cnt = 0;
-
- /* initiate write operation */
- writel(ULPI_RUN | ULPI_WRITE |
- ULPI_ADDR(reg) | ULPI_DATA(val),
- USB_ULPI_VIEWPORT);
-
- /* wait for completion */
- while (cnt < ULPI_IO_TIMEOUT_USEC) {
- if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= ULPI_IO_TIMEOUT_USEC) {
- dev_err(phy->dev, "ulpi_write: timeout\n");
- return -ETIMEDOUT;
- }
- return 0;
-}
-
-static struct usb_phy_io_ops msm_otg_io_ops = {
- .read = ulpi_read,
- .write = ulpi_write,
-};
-
-static void ulpi_init(struct msm_otg *motg)
-{
- struct msm_otg_platform_data *pdata = motg->pdata;
- int *seq = pdata->phy_init_seq, idx;
- u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
-
- for (idx = 0; idx < pdata->phy_init_sz; idx++) {
- if (seq[idx] == -1)
- continue;
-
- dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n",
- seq[idx], addr + idx);
- ulpi_write(&motg->phy, seq[idx], addr + idx);
- }
-}
-
-static int msm_phy_notify_disconnect(struct usb_phy *phy,
- enum usb_device_speed speed)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int val;
-
- if (motg->manual_pullup) {
- val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
- usb_phy_io_write(phy, val, ULPI_CLR(ULPI_MISC_A));
- }
-
- /*
- * Put the transceiver in non-driving mode. Otherwise host
- * may not detect soft-disconnection.
- */
- val = ulpi_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- ulpi_write(phy, val, ULPI_FUNC_CTRL);
-
- return 0;
-}
-
-static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
-{
- int ret;
-
- if (assert)
- ret = reset_control_assert(motg->link_rst);
- else
- ret = reset_control_deassert(motg->link_rst);
-
- if (ret)
- dev_err(motg->phy.dev, "usb link clk reset %s failed\n",
- assert ? "assert" : "deassert");
-
- return ret;
-}
-
-static int msm_otg_phy_clk_reset(struct msm_otg *motg)
-{
- int ret = 0;
-
- if (motg->phy_rst)
- ret = reset_control_reset(motg->phy_rst);
-
- if (ret)
- dev_err(motg->phy.dev, "usb phy clk reset failed\n");
-
- return ret;
-}
-
-static int msm_link_reset(struct msm_otg *motg)
-{
- u32 val;
- int ret;
-
- ret = msm_otg_link_clk_reset(motg, 1);
- if (ret)
- return ret;
-
- /* wait for 1ms delay as suggested in HPG. */
- usleep_range(1000, 1200);
-
- ret = msm_otg_link_clk_reset(motg, 0);
- if (ret)
- return ret;
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
-
- /* put transceiver in serial mode as part of reset */
- val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
- writel(val | PORTSC_PTS_SERIAL, USB_PORTSC);
-
- return 0;
-}
-
-static int msm_otg_reset(struct usb_phy *phy)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int cnt = 0;
-
- writel(USBCMD_RESET, USB_USBCMD);
- while (cnt < LINK_RESET_TIMEOUT_USEC) {
- if (!(readl(USB_USBCMD) & USBCMD_RESET))
- break;
- udelay(1);
- cnt++;
- }
- if (cnt >= LINK_RESET_TIMEOUT_USEC)
- return -ETIMEDOUT;
-
- /* select ULPI phy and clear other status/control bits in PORTSC */
- writel(PORTSC_PTS_ULPI, USB_PORTSC);
-
- writel(0x0, USB_AHBBURST);
- writel(0x08, USB_AHBMODE);
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
- return 0;
-}
-
-static void msm_phy_reset(struct msm_otg *motg)
-{
- void __iomem *addr;
-
- if (motg->pdata->phy_type != SNPS_28NM_INTEGRATED_PHY) {
- msm_otg_phy_clk_reset(motg);
- return;
- }
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- /* Assert USB PHY_POR */
- writel(readl(addr) | PHY_POR_ASSERT, addr);
-
- /*
- * wait for minimum 10 microseconds as suggested in HPG.
- * Use a slightly larger value since the exact value didn't
- * work 100% of the time.
- */
- udelay(12);
-
- /* Deassert USB PHY_POR */
- writel(readl(addr) & ~PHY_POR_ASSERT, addr);
-}
-
-static int msm_usb_reset(struct usb_phy *phy)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int ret;
-
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
-
- ret = msm_link_reset(motg);
- if (ret) {
- dev_err(phy->dev, "phy_reset failed\n");
- return ret;
- }
-
- ret = msm_otg_reset(&motg->phy);
- if (ret) {
- dev_err(phy->dev, "link reset failed\n");
- return ret;
- }
-
- msleep(100);
-
- /* Reset USB PHY after performing USB Link RESET */
- msm_phy_reset(motg);
-
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-
- return 0;
-}
-
-static int msm_phy_init(struct usb_phy *phy)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
- u32 val, ulpi_val = 0;
-
- /* Program USB PHY Override registers. */
- ulpi_init(motg);
-
- /*
- * It is recommended in HPG to reset USB PHY after programming
- * USB PHY Override registers.
- */
- msm_phy_reset(motg);
-
- if (pdata->otg_control == OTG_PHY_CONTROL) {
- val = readl(USB_OTGSC);
- if (pdata->mode == USB_DR_MODE_OTG) {
- ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
- val |= OTGSC_IDIE | OTGSC_BSVIE;
- } else if (pdata->mode == USB_DR_MODE_PERIPHERAL) {
- ulpi_val = ULPI_INT_SESS_VALID;
- val |= OTGSC_BSVIE;
- }
- writel(val, USB_OTGSC);
- ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE);
- ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL);
- }
-
- if (motg->manual_pullup) {
- val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
- ulpi_write(phy, val, ULPI_SET(ULPI_MISC_A));
-
- val = readl(USB_GENCONFIG_2);
- val |= GENCONFIG_2_SESS_VLD_CTRL_EN;
- writel(val, USB_GENCONFIG_2);
-
- val = readl(USB_USBCMD);
- val |= USBCMD_SESS_VLD_CTRL;
- writel(val, USB_USBCMD);
-
- val = ulpi_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
- ulpi_write(phy, val, ULPI_FUNC_CTRL);
- }
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
-
- return 0;
-}
-
-#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
-#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
-
-#ifdef CONFIG_PM
-
-static int msm_hsusb_config_vddcx(struct msm_otg *motg, int high)
-{
- int max_vol = motg->vdd_levels[VDD_LEVEL_MAX];
- int min_vol;
- int ret;
-
- if (high)
- min_vol = motg->vdd_levels[VDD_LEVEL_MIN];
- else
- min_vol = motg->vdd_levels[VDD_LEVEL_NONE];
-
- ret = regulator_set_voltage(motg->vddcx, min_vol, max_vol);
- if (ret) {
- pr_err("Cannot set vddcx voltage\n");
- return ret;
- }
-
- pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-
- return ret;
-}
-
-static int msm_otg_suspend(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- struct usb_bus *bus = phy->otg->host;
- struct msm_otg_platform_data *pdata = motg->pdata;
- void __iomem *addr;
- int cnt = 0;
-
- if (atomic_read(&motg->in_lpm))
- return 0;
-
- disable_irq(motg->irq);
- /*
- * Chipidea 45-nm PHY suspend sequence:
- *
- * Interrupt Latch Register auto-clear feature is not present
- * in all PHY versions. Latch register is clear on read type.
- * Clear latch register to avoid spurious wakeup from
- * low power mode (LPM).
- *
- * PHY comparators are disabled when PHY enters into low power
- * mode (LPM). Keep PHY comparators ON in LPM only when we expect
- * VBUS/Id notifications from USB PHY. Otherwise turn off USB
- * PHY comparators. This save significant amount of power.
- *
- * PLL is not turned off when PHY enters into low power mode (LPM).
- * Disable PLL for maximum power savings.
- */
-
- if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) {
- ulpi_read(phy, 0x14);
- if (pdata->otg_control == OTG_PHY_CONTROL)
- ulpi_write(phy, 0x01, 0x30);
- ulpi_write(phy, 0x08, 0x09);
- }
-
- /*
- * PHY may take some time or even fail to enter into low power
- * mode (LPM). Hence poll for 500 msec and reset the PHY and link
- * in failure case.
- */
- writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
- while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
- if (readl(USB_PORTSC) & PORTSC_PHCD)
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) {
- dev_err(phy->dev, "Unable to suspend PHY\n");
- msm_otg_reset(phy);
- enable_irq(motg->irq);
- return -ETIMEDOUT;
- }
-
- /*
- * PHY has capability to generate interrupt asynchronously in low
- * power mode (LPM). This interrupt is level triggered. So USB IRQ
- * line must be disabled till async interrupt enable bit is cleared
- * in USBCMD register. Assert STP (ULPI interface STOP signal) to
- * block data communication from PHY.
- */
- writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL)
- writel(readl(addr) | PHY_RETEN, addr);
-
- clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL) {
- msm_hsusb_ldo_set_mode(motg, 0);
- msm_hsusb_config_vddcx(motg, 0);
- }
-
- if (device_may_wakeup(phy->dev))
- enable_irq_wake(motg->irq);
- if (bus)
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
-
- atomic_set(&motg->in_lpm, 1);
- enable_irq(motg->irq);
-
- dev_info(phy->dev, "USB in low power mode\n");
-
- return 0;
-}
-
-static int msm_otg_resume(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- struct usb_bus *bus = phy->otg->host;
- void __iomem *addr;
- int cnt = 0;
- unsigned temp;
-
- if (!atomic_read(&motg->in_lpm))
- return 0;
-
- clk_prepare_enable(motg->pclk);
- clk_prepare_enable(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
-
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL) {
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- msm_hsusb_ldo_set_mode(motg, 1);
- msm_hsusb_config_vddcx(motg, 1);
- writel(readl(addr) & ~PHY_RETEN, addr);
- }
-
- temp = readl(USB_USBCMD);
- temp &= ~ASYNC_INTR_CTRL;
- temp &= ~ULPI_STP_CTRL;
- writel(temp, USB_USBCMD);
-
- /*
- * PHY comes out of low power mode (LPM) in case of wakeup
- * from asynchronous interrupt.
- */
- if (!(readl(USB_PORTSC) & PORTSC_PHCD))
- goto skip_phy_resume;
-
- writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
- while (cnt < PHY_RESUME_TIMEOUT_USEC) {
- if (!(readl(USB_PORTSC) & PORTSC_PHCD))
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
- /*
- * This is a fatal error. Reset the link and
- * PHY. USB state can not be restored. Re-insertion
- * of USB cable is the only way to get USB working.
- */
- dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n");
- msm_otg_reset(phy);
- }
-
-skip_phy_resume:
- if (device_may_wakeup(phy->dev))
- disable_irq_wake(motg->irq);
- if (bus)
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
-
- atomic_set(&motg->in_lpm, 0);
-
- if (motg->async_int) {
- motg->async_int = 0;
- pm_runtime_put(phy->dev);
- enable_irq(motg->irq);
- }
-
- dev_info(phy->dev, "USB exited from low power mode\n");
-
- return 0;
-}
-#endif
-
-static void msm_otg_notify_charger(struct msm_otg *motg, unsigned mA)
-{
- if (motg->cur_power == mA)
- return;
-
- /* TODO: Notify PMIC about available current */
- dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
- motg->cur_power = mA;
-}
-
-static void msm_otg_start_host(struct usb_phy *phy, int on)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
- struct usb_hcd *hcd;
-
- if (!phy->otg->host)
- return;
-
- hcd = bus_to_hcd(phy->otg->host);
-
- if (on) {
- dev_dbg(phy->dev, "host on\n");
-
- if (pdata->vbus_power)
- pdata->vbus_power(1);
- /*
- * Some boards have a switch cotrolled by gpio
- * to enable/disable internal HUB. Enable internal
- * HUB before kicking the host.
- */
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_A_HOST);
-#ifdef CONFIG_USB
- usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
- device_wakeup_enable(hcd->self.controller);
-#endif
- } else {
- dev_dbg(phy->dev, "host off\n");
-
-#ifdef CONFIG_USB
- usb_remove_hcd(hcd);
-#endif
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_UNDEFINED);
- if (pdata->vbus_power)
- pdata->vbus_power(0);
- }
-}
-
-static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
-{
- struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
- struct usb_hcd *hcd;
-
- /*
- * Fail host registration if this board can support
- * only peripheral configuration.
- */
- if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL) {
- dev_info(otg->usb_phy->dev, "Host mode is not supported\n");
- return -ENODEV;
- }
-
- if (!host) {
- if (otg->state == OTG_STATE_A_HOST) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- msm_otg_start_host(otg->usb_phy, 0);
- otg->host = NULL;
- otg->state = OTG_STATE_UNDEFINED;
- schedule_work(&motg->sm_work);
- } else {
- otg->host = NULL;
- }
-
- return 0;
- }
-
- hcd = bus_to_hcd(host);
- hcd->power_budget = motg->pdata->power_budget;
-
- otg->host = host;
- dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
-
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
-
- return 0;
-}
-
-static void msm_otg_start_peripheral(struct usb_phy *phy, int on)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
-
- if (!phy->otg->gadget)
- return;
-
- if (on) {
- dev_dbg(phy->dev, "gadget on\n");
- /*
- * Some boards have a switch cotrolled by gpio
- * to enable/disable internal HUB. Disable internal
- * HUB before kicking the gadget.
- */
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_B_PERIPHERAL);
- usb_gadget_vbus_connect(phy->otg->gadget);
- } else {
- dev_dbg(phy->dev, "gadget off\n");
- usb_gadget_vbus_disconnect(phy->otg->gadget);
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_UNDEFINED);
- }
-
-}
-
-static int msm_otg_set_peripheral(struct usb_otg *otg,
- struct usb_gadget *gadget)
-{
- struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
-
- /*
- * Fail peripheral registration if this board can support
- * only host configuration.
- */
- if (motg->pdata->mode == USB_DR_MODE_HOST) {
- dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n");
- return -ENODEV;
- }
-
- if (!gadget) {
- if (otg->state == OTG_STATE_B_PERIPHERAL) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- msm_otg_start_peripheral(otg->usb_phy, 0);
- otg->gadget = NULL;
- otg->state = OTG_STATE_UNDEFINED;
- schedule_work(&motg->sm_work);
- } else {
- otg->gadget = NULL;
- }
-
- return 0;
- }
- otg->gadget = gadget;
- dev_dbg(otg->usb_phy->dev,
- "peripheral driver registered w/ tranceiver\n");
-
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
-
- return 0;
-}
-
-static bool msm_chg_check_secondary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
- bool ret = false;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- ret = chg_det & (1 << 4);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x87);
- ret = chg_det & 1;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static void msm_chg_enable_secondary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn off charger block */
- chg_det |= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- /* control chg block via ULPI */
- chg_det &= ~(1 << 3);
- ulpi_write(phy, chg_det, 0x34);
- /* put it in host mode for enabling D- source */
- chg_det &= ~(1 << 2);
- ulpi_write(phy, chg_det, 0x34);
- /* Turn on chg detect block */
- chg_det &= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- /* enable chg detection */
- chg_det &= ~(1 << 0);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /*
- * Configure DM as current source, DP as current sink
- * and enable battery charging comparators.
- */
- ulpi_write(phy, 0x8, 0x85);
- ulpi_write(phy, 0x2, 0x85);
- ulpi_write(phy, 0x1, 0x85);
- break;
- default:
- break;
- }
-}
-
-static bool msm_chg_check_primary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
- bool ret = false;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- ret = chg_det & (1 << 4);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x87);
- ret = chg_det & 1;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static void msm_chg_enable_primary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* enable chg detection */
- chg_det &= ~(1 << 0);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /*
- * Configure DP as current source, DM as current sink
- * and enable battery charging comparators.
- */
- ulpi_write(phy, 0x2, 0x85);
- ulpi_write(phy, 0x1, 0x85);
- break;
- default:
- break;
- }
-}
-
-static bool msm_chg_check_dcd(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 line_state;
- bool ret = false;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- line_state = ulpi_read(phy, 0x15);
- ret = !(line_state & 1);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- line_state = ulpi_read(phy, 0x87);
- ret = line_state & 2;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static void msm_chg_disable_dcd(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- chg_det &= ~(1 << 5);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- ulpi_write(phy, 0x10, 0x86);
- break;
- default:
- break;
- }
-}
-
-static void msm_chg_enable_dcd(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn on D+ current source */
- chg_det |= (1 << 5);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /* Data contact detection enable */
- ulpi_write(phy, 0x10, 0x85);
- break;
- default:
- break;
- }
-}
-
-static void msm_chg_block_on(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 func_ctrl, chg_det;
-
- /* put the controller in non-driving mode */
- func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
- func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* control chg block via ULPI */
- chg_det &= ~(1 << 3);
- ulpi_write(phy, chg_det, 0x34);
- /* Turn on chg detect block */
- chg_det &= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /* Clear charger detecting control bits */
- ulpi_write(phy, 0x3F, 0x86);
- /* Clear alt interrupt latch and enable bits */
- ulpi_write(phy, 0x1F, 0x92);
- ulpi_write(phy, 0x1F, 0x95);
- udelay(100);
- break;
- default:
- break;
- }
-}
-
-static void msm_chg_block_off(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 func_ctrl, chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn off charger block */
- chg_det |= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /* Clear charger detecting control bits */
- ulpi_write(phy, 0x3F, 0x86);
- /* Clear alt interrupt latch and enable bits */
- ulpi_write(phy, 0x1F, 0x92);
- ulpi_write(phy, 0x1F, 0x95);
- break;
- default:
- break;
- }
-
- /* put the controller in normal mode */
- func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
- func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
- ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-}
-
-#define MSM_CHG_DCD_POLL_TIME (100 * HZ/1000) /* 100 msec */
-#define MSM_CHG_DCD_MAX_RETRIES 6 /* Tdcd_tmout = 6 * 100 msec */
-#define MSM_CHG_PRIMARY_DET_TIME (40 * HZ/1000) /* TVDPSRC_ON */
-#define MSM_CHG_SECONDARY_DET_TIME (40 * HZ/1000) /* TVDMSRC_ON */
-static void msm_chg_detect_work(struct work_struct *w)
-{
- struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
- struct usb_phy *phy = &motg->phy;
- bool is_dcd, tmout, vout;
- unsigned long delay;
-
- dev_dbg(phy->dev, "chg detection work\n");
- switch (motg->chg_state) {
- case USB_CHG_STATE_UNDEFINED:
- pm_runtime_get_sync(phy->dev);
- msm_chg_block_on(motg);
- msm_chg_enable_dcd(motg);
- motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
- motg->dcd_retries = 0;
- delay = MSM_CHG_DCD_POLL_TIME;
- break;
- case USB_CHG_STATE_WAIT_FOR_DCD:
- is_dcd = msm_chg_check_dcd(motg);
- tmout = ++motg->dcd_retries == MSM_CHG_DCD_MAX_RETRIES;
- if (is_dcd || tmout) {
- msm_chg_disable_dcd(motg);
- msm_chg_enable_primary_det(motg);
- delay = MSM_CHG_PRIMARY_DET_TIME;
- motg->chg_state = USB_CHG_STATE_DCD_DONE;
- } else {
- delay = MSM_CHG_DCD_POLL_TIME;
- }
- break;
- case USB_CHG_STATE_DCD_DONE:
- vout = msm_chg_check_primary_det(motg);
- if (vout) {
- msm_chg_enable_secondary_det(motg);
- delay = MSM_CHG_SECONDARY_DET_TIME;
- motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
- } else {
- motg->chg_type = USB_SDP_CHARGER;
- motg->chg_state = USB_CHG_STATE_DETECTED;
- delay = 0;
- }
- break;
- case USB_CHG_STATE_PRIMARY_DONE:
- vout = msm_chg_check_secondary_det(motg);
- if (vout)
- motg->chg_type = USB_DCP_CHARGER;
- else
- motg->chg_type = USB_CDP_CHARGER;
- motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
- /* fall through */
- case USB_CHG_STATE_SECONDARY_DONE:
- motg->chg_state = USB_CHG_STATE_DETECTED;
- case USB_CHG_STATE_DETECTED:
- msm_chg_block_off(motg);
- dev_dbg(phy->dev, "charger = %d\n", motg->chg_type);
- schedule_work(&motg->sm_work);
- return;
- default:
- return;
- }
-
- schedule_delayed_work(&motg->chg_work, delay);
-}
-
-/*
- * We support OTG, Peripheral only and Host only configurations. In case
- * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
- * via Id pin status or user request (debugfs). Id/BSV interrupts are not
- * enabled when switch is controlled by user and default mode is supplied
- * by board file, which can be changed by userspace later.
- */
-static void msm_otg_init_sm(struct msm_otg *motg)
-{
- struct msm_otg_platform_data *pdata = motg->pdata;
- u32 otgsc = readl(USB_OTGSC);
-
- switch (pdata->mode) {
- case USB_DR_MODE_OTG:
- if (pdata->otg_control == OTG_PHY_CONTROL) {
- if (otgsc & OTGSC_ID)
- set_bit(ID, &motg->inputs);
- else
- clear_bit(ID, &motg->inputs);
-
- if (otgsc & OTGSC_BSV)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
- } else if (pdata->otg_control == OTG_USER_CONTROL) {
- set_bit(ID, &motg->inputs);
- clear_bit(B_SESS_VLD, &motg->inputs);
- }
- break;
- case USB_DR_MODE_HOST:
- clear_bit(ID, &motg->inputs);
- break;
- case USB_DR_MODE_PERIPHERAL:
- set_bit(ID, &motg->inputs);
- if (otgsc & OTGSC_BSV)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
- break;
- default:
- break;
- }
-}
-
-static void msm_otg_sm_work(struct work_struct *w)
-{
- struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
- struct usb_otg *otg = motg->phy.otg;
-
- switch (otg->state) {
- case OTG_STATE_UNDEFINED:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_UNDEFINED state\n");
- msm_otg_reset(otg->usb_phy);
- msm_otg_init_sm(motg);
- otg->state = OTG_STATE_B_IDLE;
- /* FALL THROUGH */
- case OTG_STATE_B_IDLE:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_IDLE state\n");
- if (!test_bit(ID, &motg->inputs) && otg->host) {
- /* disable BSV bit */
- writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
- msm_otg_start_host(otg->usb_phy, 1);
- otg->state = OTG_STATE_A_HOST;
- } else if (test_bit(B_SESS_VLD, &motg->inputs)) {
- switch (motg->chg_state) {
- case USB_CHG_STATE_UNDEFINED:
- msm_chg_detect_work(&motg->chg_work.work);
- break;
- case USB_CHG_STATE_DETECTED:
- switch (motg->chg_type) {
- case USB_DCP_CHARGER:
- msm_otg_notify_charger(motg,
- IDEV_CHG_MAX);
- break;
- case USB_CDP_CHARGER:
- msm_otg_notify_charger(motg,
- IDEV_CHG_MAX);
- msm_otg_start_peripheral(otg->usb_phy,
- 1);
- otg->state
- = OTG_STATE_B_PERIPHERAL;
- break;
- case USB_SDP_CHARGER:
- msm_otg_notify_charger(motg, IUNIT);
- msm_otg_start_peripheral(otg->usb_phy,
- 1);
- otg->state
- = OTG_STATE_B_PERIPHERAL;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- } else {
- /*
- * If charger detection work is pending, decrement
- * the pm usage counter to balance with the one that
- * is incremented in charger detection work.
- */
- if (cancel_delayed_work_sync(&motg->chg_work)) {
- pm_runtime_put_sync(otg->usb_phy->dev);
- msm_otg_reset(otg->usb_phy);
- }
- msm_otg_notify_charger(motg, 0);
- motg->chg_state = USB_CHG_STATE_UNDEFINED;
- motg->chg_type = USB_INVALID_CHARGER;
- }
-
- if (otg->state == OTG_STATE_B_IDLE)
- pm_runtime_put_sync(otg->usb_phy->dev);
- break;
- case OTG_STATE_B_PERIPHERAL:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
- if (!test_bit(B_SESS_VLD, &motg->inputs) ||
- !test_bit(ID, &motg->inputs)) {
- msm_otg_notify_charger(motg, 0);
- msm_otg_start_peripheral(otg->usb_phy, 0);
- motg->chg_state = USB_CHG_STATE_UNDEFINED;
- motg->chg_type = USB_INVALID_CHARGER;
- otg->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->usb_phy);
- schedule_work(w);
- }
- break;
- case OTG_STATE_A_HOST:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_A_HOST state\n");
- if (test_bit(ID, &motg->inputs)) {
- msm_otg_start_host(otg->usb_phy, 0);
- otg->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->usb_phy);
- schedule_work(w);
- }
- break;
- default:
- break;
- }
-}
-
-static irqreturn_t msm_otg_irq(int irq, void *data)
-{
- struct msm_otg *motg = data;
- struct usb_phy *phy = &motg->phy;
- u32 otgsc = 0;
-
- if (atomic_read(&motg->in_lpm)) {
- disable_irq_nosync(irq);
- motg->async_int = 1;
- pm_runtime_get(phy->dev);
- return IRQ_HANDLED;
- }
-
- otgsc = readl(USB_OTGSC);
- if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
- return IRQ_NONE;
-
- if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
- if (otgsc & OTGSC_ID)
- set_bit(ID, &motg->inputs);
- else
- clear_bit(ID, &motg->inputs);
- dev_dbg(phy->dev, "ID set/clear\n");
- pm_runtime_get_noresume(phy->dev);
- } else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
- if (otgsc & OTGSC_BSV)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
- dev_dbg(phy->dev, "BSV set/clear\n");
- pm_runtime_get_noresume(phy->dev);
- }
-
- writel(otgsc, USB_OTGSC);
- schedule_work(&motg->sm_work);
- return IRQ_HANDLED;
-}
-
-static int msm_otg_mode_show(struct seq_file *s, void *unused)
-{
- struct msm_otg *motg = s->private;
- struct usb_otg *otg = motg->phy.otg;
-
- switch (otg->state) {
- case OTG_STATE_A_HOST:
- seq_puts(s, "host\n");
- break;
- case OTG_STATE_B_PERIPHERAL:
- seq_puts(s, "peripheral\n");
- break;
- default:
- seq_puts(s, "none\n");
- break;
- }
-
- return 0;
-}
-
-static int msm_otg_mode_open(struct inode *inode, struct file *file)
-{
- return single_open(file, msm_otg_mode_show, inode->i_private);
-}
-
-static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *s = file->private_data;
- struct msm_otg *motg = s->private;
- char buf[16];
- struct usb_otg *otg = motg->phy.otg;
- int status = count;
- enum usb_dr_mode req_mode;
-
- memset(buf, 0x00, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
- status = -EFAULT;
- goto out;
- }
-
- if (!strncmp(buf, "host", 4)) {
- req_mode = USB_DR_MODE_HOST;
- } else if (!strncmp(buf, "peripheral", 10)) {
- req_mode = USB_DR_MODE_PERIPHERAL;
- } else if (!strncmp(buf, "none", 4)) {
- req_mode = USB_DR_MODE_UNKNOWN;
- } else {
- status = -EINVAL;
- goto out;
- }
-
- switch (req_mode) {
- case USB_DR_MODE_UNKNOWN:
- switch (otg->state) {
- case OTG_STATE_A_HOST:
- case OTG_STATE_B_PERIPHERAL:
- set_bit(ID, &motg->inputs);
- clear_bit(B_SESS_VLD, &motg->inputs);
- break;
- default:
- goto out;
- }
- break;
- case USB_DR_MODE_PERIPHERAL:
- switch (otg->state) {
- case OTG_STATE_B_IDLE:
- case OTG_STATE_A_HOST:
- set_bit(ID, &motg->inputs);
- set_bit(B_SESS_VLD, &motg->inputs);
- break;
- default:
- goto out;
- }
- break;
- case USB_DR_MODE_HOST:
- switch (otg->state) {
- case OTG_STATE_B_IDLE:
- case OTG_STATE_B_PERIPHERAL:
- clear_bit(ID, &motg->inputs);
- break;
- default:
- goto out;
- }
- break;
- default:
- goto out;
- }
-
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
-out:
- return status;
-}
-
-static const struct file_operations msm_otg_mode_fops = {
- .open = msm_otg_mode_open,
- .read = seq_read,
- .write = msm_otg_mode_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static struct dentry *msm_otg_dbg_root;
-static struct dentry *msm_otg_dbg_mode;
-
-static int msm_otg_debugfs_init(struct msm_otg *motg)
-{
- msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
-
- if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
- return -ENODEV;
-
- msm_otg_dbg_mode = debugfs_create_file("mode", S_IRUGO | S_IWUSR,
- msm_otg_dbg_root, motg, &msm_otg_mode_fops);
- if (!msm_otg_dbg_mode) {
- debugfs_remove(msm_otg_dbg_root);
- msm_otg_dbg_root = NULL;
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void msm_otg_debugfs_cleanup(void)
-{
- debugfs_remove(msm_otg_dbg_mode);
- debugfs_remove(msm_otg_dbg_root);
-}
-
-static const struct of_device_id msm_otg_dt_match[] = {
- {
- .compatible = "qcom,usb-otg-ci",
- .data = (void *) CI_45NM_INTEGRATED_PHY
- },
- {
- .compatible = "qcom,usb-otg-snps",
- .data = (void *) SNPS_28NM_INTEGRATED_PHY
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, msm_otg_dt_match);
-
-static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
- void *ptr)
-{
- struct usb_phy *usb_phy = container_of(nb, struct usb_phy, vbus_nb);
- struct msm_otg *motg = container_of(usb_phy, struct msm_otg, phy);
-
- if (event)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
-
- if (test_bit(B_SESS_VLD, &motg->inputs)) {
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- } else {
- /* Switch D+/D- lines to Hub */
- gpiod_set_value_cansleep(motg->switch_gpio, 1);
- }
-
- schedule_work(&motg->sm_work);
-
- return NOTIFY_DONE;
-}
-
-static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
- void *ptr)
-{
- struct usb_phy *usb_phy = container_of(nb, struct usb_phy, id_nb);
- struct msm_otg *motg = container_of(usb_phy, struct msm_otg, phy);
-
- if (event)
- clear_bit(ID, &motg->inputs);
- else
- set_bit(ID, &motg->inputs);
-
- schedule_work(&motg->sm_work);
-
- return NOTIFY_DONE;
-}
-
-static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
-{
- struct msm_otg_platform_data *pdata;
- struct device_node *node = pdev->dev.of_node;
- struct property *prop;
- int len, ret, words;
- u32 val, tmp[3];
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- motg->pdata = pdata;
-
- pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
- if (!pdata->phy_type)
- return 1;
-
- motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
- if (IS_ERR(motg->link_rst))
- return PTR_ERR(motg->link_rst);
-
- motg->phy_rst = devm_reset_control_get(&pdev->dev, "phy");
- if (IS_ERR(motg->phy_rst))
- motg->phy_rst = NULL;
-
- pdata->mode = usb_get_dr_mode(&pdev->dev);
- if (pdata->mode == USB_DR_MODE_UNKNOWN)
- pdata->mode = USB_DR_MODE_OTG;
-
- pdata->otg_control = OTG_PHY_CONTROL;
- if (!of_property_read_u32(node, "qcom,otg-control", &val))
- if (val == OTG_PMIC_CONTROL)
- pdata->otg_control = val;
-
- if (!of_property_read_u32(node, "qcom,phy-num", &val) && val < 2)
- motg->phy_number = val;
-
- motg->vdd_levels[VDD_LEVEL_NONE] = USB_PHY_SUSP_DIG_VOL;
- motg->vdd_levels[VDD_LEVEL_MIN] = USB_PHY_VDD_DIG_VOL_MIN;
- motg->vdd_levels[VDD_LEVEL_MAX] = USB_PHY_VDD_DIG_VOL_MAX;
-
- if (of_get_property(node, "qcom,vdd-levels", &len) &&
- len == sizeof(tmp)) {
- of_property_read_u32_array(node, "qcom,vdd-levels",
- tmp, len / sizeof(*tmp));
- motg->vdd_levels[VDD_LEVEL_NONE] = tmp[VDD_LEVEL_NONE];
- motg->vdd_levels[VDD_LEVEL_MIN] = tmp[VDD_LEVEL_MIN];
- motg->vdd_levels[VDD_LEVEL_MAX] = tmp[VDD_LEVEL_MAX];
- }
-
- motg->manual_pullup = of_property_read_bool(node, "qcom,manual-pullup");
-
- motg->switch_gpio = devm_gpiod_get_optional(&pdev->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(motg->switch_gpio))
- return PTR_ERR(motg->switch_gpio);
-
- prop = of_find_property(node, "qcom,phy-init-sequence", &len);
- if (!prop || !len)
- return 0;
-
- words = len / sizeof(u32);
-
- if (words >= ULPI_EXT_VENDOR_SPECIFIC) {
- dev_warn(&pdev->dev, "Too big PHY init sequence %d\n", words);
- return 0;
- }
-
- pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
- if (!pdata->phy_init_seq)
- return 0;
-
- ret = of_property_read_u32_array(node, "qcom,phy-init-sequence",
- pdata->phy_init_seq, words);
- if (!ret)
- pdata->phy_init_sz = words;
-
- return 0;
-}
-
-static int msm_otg_reboot_notify(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- struct msm_otg *motg = container_of(this, struct msm_otg, reboot);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot
- */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- return NOTIFY_DONE;
-}
-
-static int msm_otg_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct device_node *np = pdev->dev.of_node;
- struct msm_otg_platform_data *pdata;
- struct resource *res;
- struct msm_otg *motg;
- struct usb_phy *phy;
- void __iomem *phy_select;
-
- motg = devm_kzalloc(&pdev->dev, sizeof(struct msm_otg), GFP_KERNEL);
- if (!motg)
- return -ENOMEM;
-
- motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
- GFP_KERNEL);
- if (!motg->phy.otg)
- return -ENOMEM;
-
- phy = &motg->phy;
- phy->dev = &pdev->dev;
-
- motg->clk = devm_clk_get(&pdev->dev, np ? "core" : "usb_hs_clk");
- if (IS_ERR(motg->clk)) {
- dev_err(&pdev->dev, "failed to get usb_hs_clk\n");
- return PTR_ERR(motg->clk);
- }
-
- /*
- * If USB Core is running its protocol engine based on CORE CLK,
- * CORE CLK must be running at >55Mhz for correct HSUSB
- * operation and USB core cannot tolerate frequency changes on
- * CORE CLK.
- */
- motg->pclk = devm_clk_get(&pdev->dev, np ? "iface" : "usb_hs_pclk");
- if (IS_ERR(motg->pclk)) {
- dev_err(&pdev->dev, "failed to get usb_hs_pclk\n");
- return PTR_ERR(motg->pclk);
- }
-
- /*
- * USB core clock is not present on all MSM chips. This
- * clock is introduced to remove the dependency on AXI
- * bus frequency.
- */
- motg->core_clk = devm_clk_get(&pdev->dev,
- np ? "alt_core" : "usb_hs_core_clk");
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- motg->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!motg->regs)
- return -ENOMEM;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
- if (ret)
- return ret;
- }
-
- /*
- * NOTE: The PHYs can be multiplexed between the chipidea controller
- * and the dwc3 controller, using a single bit. It is important that
- * the dwc3 driver does not set this bit in an incompatible way.
- */
- if (motg->phy_number) {
- phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
- if (!phy_select)
- return -ENOMEM;
-
- /* Enable second PHY with the OTG port */
- writel(0x1, phy_select);
- }
-
- dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
-
- motg->irq = platform_get_irq(pdev, 0);
- if (motg->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
- ret = motg->irq;
- return motg->irq;
- }
-
- motg->supplies[0].supply = "vddcx";
- motg->supplies[1].supply = "v3p3";
- motg->supplies[2].supply = "v1p8";
-
- ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
- motg->supplies);
- if (ret)
- return ret;
-
- motg->vddcx = motg->supplies[0].consumer;
- motg->v3p3 = motg->supplies[1].consumer;
- motg->v1p8 = motg->supplies[2].consumer;
-
- clk_set_rate(motg->clk, 60000000);
-
- clk_prepare_enable(motg->clk);
- clk_prepare_enable(motg->pclk);
-
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
-
- ret = msm_hsusb_init_vddcx(motg, 1);
- if (ret) {
- dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
- goto disable_clks;
- }
-
- ret = msm_hsusb_ldo_init(motg, 1);
- if (ret) {
- dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
- goto disable_vddcx;
- }
- ret = msm_hsusb_ldo_set_mode(motg, 1);
- if (ret) {
- dev_err(&pdev->dev, "hsusb vreg enable failed\n");
- goto disable_ldo;
- }
-
- writel(0, USB_USBINTR);
- writel(0, USB_OTGSC);
-
- INIT_WORK(&motg->sm_work, msm_otg_sm_work);
- INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
- ret = devm_request_irq(&pdev->dev, motg->irq, msm_otg_irq, IRQF_SHARED,
- "msm_otg", motg);
- if (ret) {
- dev_err(&pdev->dev, "request irq failed\n");
- goto disable_ldo;
- }
-
- phy->init = msm_phy_init;
- phy->notify_disconnect = msm_phy_notify_disconnect;
- phy->type = USB_PHY_TYPE_USB2;
- phy->vbus_nb.notifier_call = msm_otg_vbus_notifier;
- phy->id_nb.notifier_call = msm_otg_id_notifier;
-
- phy->io_ops = &msm_otg_io_ops;
-
- phy->otg->usb_phy = &motg->phy;
- phy->otg->set_host = msm_otg_set_host;
- phy->otg->set_peripheral = msm_otg_set_peripheral;
-
- msm_usb_reset(phy);
-
- ret = usb_add_phy_dev(&motg->phy);
- if (ret) {
- dev_err(&pdev->dev, "usb_add_phy failed\n");
- goto disable_ldo;
- }
-
- ret = extcon_get_state(phy->edev, EXTCON_USB);
- if (ret)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
-
- ret = extcon_get_state(phy->id_edev, EXTCON_USB_HOST);
- if (ret)
- clear_bit(ID, &motg->inputs);
- else
- set_bit(ID, &motg->inputs);
-
- platform_set_drvdata(pdev, motg);
- device_init_wakeup(&pdev->dev, 1);
-
- if (motg->pdata->mode == USB_DR_MODE_OTG &&
- motg->pdata->otg_control == OTG_USER_CONTROL) {
- ret = msm_otg_debugfs_init(motg);
- if (ret)
- dev_dbg(&pdev->dev, "Can not create mode change file\n");
- }
-
- if (test_bit(B_SESS_VLD, &motg->inputs)) {
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- } else {
- /* Switch D+/D- lines to Hub */
- gpiod_set_value_cansleep(motg->switch_gpio, 1);
- }
-
- motg->reboot.notifier_call = msm_otg_reboot_notify;
- register_reboot_notifier(&motg->reboot);
-
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-
-disable_ldo:
- msm_hsusb_ldo_init(motg, 0);
-disable_vddcx:
- msm_hsusb_init_vddcx(motg, 0);
-disable_clks:
- clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-
- return ret;
-}
-
-static int msm_otg_remove(struct platform_device *pdev)
-{
- struct msm_otg *motg = platform_get_drvdata(pdev);
- struct usb_phy *phy = &motg->phy;
- int cnt = 0;
-
- if (phy->otg->host || phy->otg->gadget)
- return -EBUSY;
-
- unregister_reboot_notifier(&motg->reboot);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot
- */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
-
- msm_otg_debugfs_cleanup();
- cancel_delayed_work_sync(&motg->chg_work);
- cancel_work_sync(&motg->sm_work);
-
- pm_runtime_resume(&pdev->dev);
-
- device_init_wakeup(&pdev->dev, 0);
- pm_runtime_disable(&pdev->dev);
-
- usb_remove_phy(phy);
- disable_irq(motg->irq);
-
- /*
- * Put PHY in low power mode.
- */
- ulpi_read(phy, 0x14);
- ulpi_write(phy, 0x08, 0x09);
-
- writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
- while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
- if (readl(USB_PORTSC) & PORTSC_PHCD)
- break;
- udelay(1);
- cnt++;
- }
- if (cnt >= PHY_SUSPEND_TIMEOUT_USEC)
- dev_err(phy->dev, "Unable to suspend PHY\n");
-
- clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
- msm_hsusb_ldo_init(motg, 0);
-
- pm_runtime_set_suspended(&pdev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int msm_otg_runtime_idle(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
- struct usb_otg *otg = motg->phy.otg;
-
- dev_dbg(dev, "OTG runtime idle\n");
-
- /*
- * It is observed some times that a spurious interrupt
- * comes when PHY is put into LPM immediately after PHY reset.
- * This 1 sec delay also prevents entering into LPM immediately
- * after asynchronous interrupt.
- */
- if (otg->state != OTG_STATE_UNDEFINED)
- pm_schedule_suspend(dev, 1000);
-
- return -EAGAIN;
-}
-
-static int msm_otg_runtime_suspend(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
-
- dev_dbg(dev, "OTG runtime suspend\n");
- return msm_otg_suspend(motg);
-}
-
-static int msm_otg_runtime_resume(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
-
- dev_dbg(dev, "OTG runtime resume\n");
- return msm_otg_resume(motg);
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int msm_otg_pm_suspend(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
-
- dev_dbg(dev, "OTG PM suspend\n");
- return msm_otg_suspend(motg);
-}
-
-static int msm_otg_pm_resume(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
- int ret;
-
- dev_dbg(dev, "OTG PM resume\n");
-
- ret = msm_otg_resume(motg);
- if (ret)
- return ret;
-
- /*
- * Runtime PM Documentation recommends bringing the
- * device to full powered state upon resume.
- */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops msm_otg_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
- SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
- msm_otg_runtime_idle)
-};
-
-static struct platform_driver msm_otg_driver = {
- .probe = msm_otg_probe,
- .remove = msm_otg_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &msm_otg_dev_pm_ops,
- .of_match_table = msm_otg_dt_match,
- },
-};
-
-module_platform_driver(msm_otg_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 0e315694adc9..554b72282276 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <chao.xie@marvell.com>
* Neil Zhang <zhangwm@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
@@ -87,9 +83,10 @@ static void mv_otg_run_state_machine(struct mv_otg *mvotg,
queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
}
-static void mv_otg_timer_await_bcon(unsigned long data)
+static void mv_otg_timer_await_bcon(struct timer_list *t)
{
- struct mv_otg *mvotg = (struct mv_otg *) data;
+ struct mv_otg *mvotg = from_timer(mvotg, t,
+ otg_ctrl.timer[A_WAIT_BCON_TIMER]);
mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
@@ -117,8 +114,7 @@ static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
}
static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
- unsigned long interval,
- void (*callback) (unsigned long))
+ unsigned long interval)
{
struct timer_list *timer;
@@ -131,9 +127,6 @@ static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
return -EBUSY;
}
- init_timer(timer);
- timer->data = (unsigned long) mvotg;
- timer->function = callback;
timer->expires = jiffies + interval;
add_timer(timer);
@@ -459,8 +452,7 @@ run:
if (old_state != OTG_STATE_A_HOST)
mv_otg_start_host(mvotg, 1);
mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
- T_A_WAIT_BCON,
- mv_otg_timer_await_bcon);
+ T_A_WAIT_BCON);
/*
* Now, we directly enter A_HOST. So set b_conn = 1
* here. In fact, it need host driver to notify us.
@@ -722,7 +714,8 @@ static int mv_otg_probe(struct platform_device *pdev)
otg->set_vbus = mv_otg_set_vbus;
for (i = 0; i < OTG_TIMER_NUM; i++)
- init_timer(&mvotg->otg_ctrl.timer[i]);
+ timer_setup(&mvotg->otg_ctrl.timer[i],
+ mv_otg_timer_await_bcon, 0);
r = platform_get_resource_byname(mvotg->pdev,
IORESOURCE_MEM, "phyregs");
diff --git a/drivers/usb/phy/phy-mv-usb.h b/drivers/usb/phy/phy-mv-usb.h
index 551da6eb0ba8..96701a1229ad 100644
--- a/drivers/usb/phy/phy-mv-usb.h
+++ b/drivers/usb/phy/phy-mv-usb.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __MV_USB_OTG_CONTROLLER__
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 0e2f1a36d315..da031c45395a 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012-2014 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
@@ -67,11 +61,26 @@
#define ANADIG_ANA_MISC0_SET 0x154
#define ANADIG_ANA_MISC0_CLR 0x158
+#define ANADIG_USB1_CHRG_DETECT_SET 0x1b4
+#define ANADIG_USB1_CHRG_DETECT_CLR 0x1b8
+#define ANADIG_USB1_CHRG_DETECT_EN_B BIT(20)
+#define ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B BIT(19)
+#define ANADIG_USB1_CHRG_DETECT_CHK_CONTACT BIT(18)
+
#define ANADIG_USB1_VBUS_DET_STAT 0x1c0
+#define ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
+
+#define ANADIG_USB1_CHRG_DET_STAT 0x1d0
+#define ANADIG_USB1_CHRG_DET_STAT_DM_STATE BIT(2)
+#define ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED BIT(1)
+#define ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT BIT(0)
+
#define ANADIG_USB2_VBUS_DET_STAT 0x220
#define ANADIG_USB1_LOOPBACK_SET 0x1e4
#define ANADIG_USB1_LOOPBACK_CLR 0x1e8
+#define ANADIG_USB1_LOOPBACK_UTMI_TESTSTART BIT(0)
+
#define ANADIG_USB2_LOOPBACK_SET 0x244
#define ANADIG_USB2_LOOPBACK_CLR 0x248
@@ -479,6 +488,144 @@ static int mxs_phy_on_disconnect(struct usb_phy *phy,
return 0;
}
+#define MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT 100
+static int mxs_charger_data_contact_detect(struct mxs_phy *x)
+{
+ struct regmap *regmap = x->regmap_anatop;
+ int i, stable_contact_count = 0;
+ u32 val;
+
+ /* Check if vbus is valid */
+ regmap_read(regmap, ANADIG_USB1_VBUS_DET_STAT, &val);
+ if (!(val & ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)) {
+ dev_err(x->phy.dev, "vbus is not valid\n");
+ return -EINVAL;
+ }
+
+ /* Enable charger detector */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_CLR,
+ ANADIG_USB1_CHRG_DETECT_EN_B);
+ /*
+ * - Do not check whether a charger is connected to the USB port
+ * - Check whether the USB plug has been in contact with each other
+ */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
+ ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+
+ /* Check if plug is connected */
+ for (i = 0; i < MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT; i++) {
+ regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
+ if (val & ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT) {
+ stable_contact_count++;
+ if (stable_contact_count > 5)
+ /* Data pin makes contact */
+ break;
+ else
+ usleep_range(5000, 10000);
+ } else {
+ stable_contact_count = 0;
+ usleep_range(5000, 6000);
+ }
+ }
+
+ if (i == MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT) {
+ dev_err(x->phy.dev,
+ "Data pin can't make good contact.\n");
+ /* Disable charger detector */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
+ ANADIG_USB1_CHRG_DETECT_EN_B |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static enum usb_charger_type mxs_charger_primary_detection(struct mxs_phy *x)
+{
+ struct regmap *regmap = x->regmap_anatop;
+ enum usb_charger_type chgr_type = UNKNOWN_TYPE;
+ u32 val;
+
+ /*
+ * - Do check whether a charger is connected to the USB port
+ * - Do not Check whether the USB plug has been in contact with
+ * each other
+ */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_CLR,
+ ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+
+ msleep(100);
+
+ /* Check if it is a charger */
+ regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
+ if (!(val & ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
+ chgr_type = SDP_TYPE;
+ dev_dbg(x->phy.dev, "It is a stardard downstream port\n");
+ }
+
+ /* Disable charger detector */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
+ ANADIG_USB1_CHRG_DETECT_EN_B |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+
+ return chgr_type;
+}
+
+/*
+ * It must be called after DP is pulled up, which is used to
+ * differentiate DCP and CDP.
+ */
+enum usb_charger_type mxs_charger_secondary_detection(struct mxs_phy *x)
+{
+ struct regmap *regmap = x->regmap_anatop;
+ int val;
+
+ msleep(80);
+
+ regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
+ if (val & ANADIG_USB1_CHRG_DET_STAT_DM_STATE) {
+ dev_dbg(x->phy.dev, "It is a dedicate charging port\n");
+ return DCP_TYPE;
+ } else {
+ dev_dbg(x->phy.dev, "It is a charging downstream port\n");
+ return CDP_TYPE;
+ }
+}
+
+static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
+{
+ struct mxs_phy *mxs_phy = to_mxs_phy(phy);
+ struct regmap *regmap = mxs_phy->regmap_anatop;
+ void __iomem *base = phy->io_priv;
+ enum usb_charger_type chgr_type = UNKNOWN_TYPE;
+
+ if (mxs_charger_data_contact_detect(mxs_phy))
+ return chgr_type;
+
+ chgr_type = mxs_charger_primary_detection(mxs_phy);
+
+ if (chgr_type != SDP_TYPE) {
+ /* Pull up DP via test */
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_CLR);
+ regmap_write(regmap, ANADIG_USB1_LOOPBACK_SET,
+ ANADIG_USB1_LOOPBACK_UTMI_TESTSTART);
+
+ chgr_type = mxs_charger_secondary_detection(mxs_phy);
+
+ /* Stop the test */
+ regmap_write(regmap, ANADIG_USB1_LOOPBACK_CLR,
+ ANADIG_USB1_LOOPBACK_UTMI_TESTSTART);
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_SET);
+ }
+
+ return chgr_type;
+}
+
static int mxs_phy_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -567,6 +714,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
mxs_phy->phy.type = USB_PHY_TYPE_USB2;
mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup;
+ mxs_phy->phy.charger_detect = mxs_phy_charger_detect;
mxs_phy->clk = clk;
mxs_phy->data = of_id->data;
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 800d1d90753d..ee0863c6553e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OMAP OTG controller driver
*
@@ -6,15 +7,6 @@
* Copyright (C) 2005-2006 Nokia Corporation
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
deleted file mode 100644
index 679afeaaa9a8..000000000000
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Copyright (c) 2015, Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/extcon.h>
-#include <linux/gpio/consumer.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-#include <linux/usb/ulpi.h>
-
-#define HSPHY_AHBBURST 0x0090
-#define HSPHY_AHBMODE 0x0098
-#define HSPHY_GENCONFIG 0x009c
-#define HSPHY_GENCONFIG_2 0x00a0
-
-#define HSPHY_USBCMD 0x0140
-#define HSPHY_ULPI_VIEWPORT 0x0170
-#define HSPHY_CTRL 0x0240
-
-#define HSPHY_TXFIFO_IDLE_FORCE_DIS BIT(4)
-#define HSPHY_SESS_VLD_CTRL_EN BIT(7)
-#define HSPHY_POR_ASSERT BIT(0)
-#define HSPHY_RETEN BIT(1)
-
-#define HSPHY_SESS_VLD_CTRL BIT(25)
-
-#define ULPI_PWR_CLK_MNG_REG 0x88
-#define ULPI_PWR_OTG_COMP_DISABLE BIT(0)
-
-#define ULPI_MISC_A 0x96
-#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
-#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
-
-#define HSPHY_3P3_MIN 3050000 /* uV */
-#define HSPHY_3P3_MAX 3300000 /* uV */
-
-#define HSPHY_1P8_MIN 1800000 /* uV */
-#define HSPHY_1P8_MAX 1800000 /* uV */
-
-#define HSPHY_VDD_MIN 5
-#define HSPHY_VDD_MAX 7
-
-struct phy_8x16 {
- struct usb_phy phy;
- void __iomem *regs;
- struct clk *core_clk;
- struct clk *iface_clk;
- struct regulator_bulk_data regulator[3];
-
- struct reset_control *phy_reset;
-
- struct gpio_desc *switch_gpio;
- struct notifier_block reboot_notify;
-};
-
-static int phy_8x16_notify_connect(struct usb_phy *phy,
- enum usb_device_speed speed)
-{
- struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
- u32 val;
-
- val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
- usb_phy_io_write(&qphy->phy, val, ULPI_SET(ULPI_MISC_A));
-
- val = readl(qphy->regs + HSPHY_USBCMD);
- val |= HSPHY_SESS_VLD_CTRL;
- writel(val, qphy->regs + HSPHY_USBCMD);
-
- return 0;
-}
-
-static int phy_8x16_notify_disconnect(struct usb_phy *phy,
- enum usb_device_speed speed)
-{
- struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
- u32 val;
-
- val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
- usb_phy_io_write(&qphy->phy, val, ULPI_CLR(ULPI_MISC_A));
-
- val = readl(qphy->regs + HSPHY_USBCMD);
- val &= ~HSPHY_SESS_VLD_CTRL;
- writel(val, qphy->regs + HSPHY_USBCMD);
-
- return 0;
-}
-
-static int phy_8x16_vbus_on(struct phy_8x16 *qphy)
-{
- phy_8x16_notify_connect(&qphy->phy, USB_SPEED_UNKNOWN);
-
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(qphy->switch_gpio, 0);
-
- return 0;
-}
-
-static int phy_8x16_vbus_off(struct phy_8x16 *qphy)
-{
- phy_8x16_notify_disconnect(&qphy->phy, USB_SPEED_UNKNOWN);
-
- /* Switch D+/D- lines to USB HUB */
- gpiod_set_value_cansleep(qphy->switch_gpio, 1);
-
- return 0;
-}
-
-static int phy_8x16_vbus_notify(struct notifier_block *nb, unsigned long event,
- void *ptr)
-{
- struct usb_phy *usb_phy = container_of(nb, struct usb_phy, vbus_nb);
- struct phy_8x16 *qphy = container_of(usb_phy, struct phy_8x16, phy);
-
- if (event)
- phy_8x16_vbus_on(qphy);
- else
- phy_8x16_vbus_off(qphy);
-
- return NOTIFY_DONE;
-}
-
-static int phy_8x16_init(struct usb_phy *phy)
-{
- struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
- u32 val, init[] = {0x44, 0x6B, 0x24, 0x13};
- u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
- int idx, state;
-
- for (idx = 0; idx < ARRAY_SIZE(init); idx++)
- usb_phy_io_write(phy, init[idx], addr + idx);
-
- reset_control_reset(qphy->phy_reset);
-
- /* Assert USB HSPHY_POR */
- val = readl(qphy->regs + HSPHY_CTRL);
- val |= HSPHY_POR_ASSERT;
- writel(val, qphy->regs + HSPHY_CTRL);
-
- /*
- * wait for minimum 10 microseconds as suggested in HPG.
- * Use a slightly larger value since the exact value didn't
- * work 100% of the time.
- */
- usleep_range(12, 15);
-
- /* Deassert USB HSPHY_POR */
- val = readl(qphy->regs + HSPHY_CTRL);
- val &= ~HSPHY_POR_ASSERT;
- writel(val, qphy->regs + HSPHY_CTRL);
-
- usleep_range(10, 15);
-
- writel(0x00, qphy->regs + HSPHY_AHBBURST);
- writel(0x08, qphy->regs + HSPHY_AHBMODE);
-
- /* workaround for rx buffer collision issue */
- val = readl(qphy->regs + HSPHY_GENCONFIG);
- val &= ~HSPHY_TXFIFO_IDLE_FORCE_DIS;
- writel(val, qphy->regs + HSPHY_GENCONFIG);
-
- val = readl(qphy->regs + HSPHY_GENCONFIG_2);
- val |= HSPHY_SESS_VLD_CTRL_EN;
- writel(val, qphy->regs + HSPHY_GENCONFIG_2);
-
- val = ULPI_PWR_OTG_COMP_DISABLE;
- usb_phy_io_write(phy, val, ULPI_SET(ULPI_PWR_CLK_MNG_REG));
-
- state = extcon_get_state(qphy->phy.edev, EXTCON_USB);
- if (state)
- phy_8x16_vbus_on(qphy);
- else
- phy_8x16_vbus_off(qphy);
-
- val = usb_phy_io_read(&qphy->phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
- usb_phy_io_write(&qphy->phy, val, ULPI_FUNC_CTRL);
-
- return 0;
-}
-
-static void phy_8x16_shutdown(struct usb_phy *phy)
-{
- u32 val;
-
- /* Put the controller in non-driving mode */
- val = usb_phy_io_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- usb_phy_io_write(phy, val, ULPI_FUNC_CTRL);
-}
-
-static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
-{
- struct device *dev = qphy->phy.dev;
- int ret;
-
- qphy->core_clk = devm_clk_get(dev, "core");
- if (IS_ERR(qphy->core_clk))
- return PTR_ERR(qphy->core_clk);
-
- qphy->iface_clk = devm_clk_get(dev, "iface");
- if (IS_ERR(qphy->iface_clk))
- return PTR_ERR(qphy->iface_clk);
-
- qphy->regulator[0].supply = "v3p3";
- qphy->regulator[1].supply = "v1p8";
- qphy->regulator[2].supply = "vddcx";
-
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
- qphy->regulator);
- if (ret)
- return ret;
-
- qphy->phy_reset = devm_reset_control_get(dev, "phy");
- if (IS_ERR(qphy->phy_reset))
- return PTR_ERR(qphy->phy_reset);
-
- qphy->switch_gpio = devm_gpiod_get_optional(dev, "switch",
- GPIOD_OUT_LOW);
- return PTR_ERR_OR_ZERO(qphy->switch_gpio);
-}
-
-static int phy_8x16_reboot_notify(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- struct phy_8x16 *qphy;
-
- qphy = container_of(this, struct phy_8x16, reboot_notify);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot_notify
- */
- gpiod_set_value_cansleep(qphy->switch_gpio, 0);
- return NOTIFY_DONE;
-}
-
-static int phy_8x16_probe(struct platform_device *pdev)
-{
- struct phy_8x16 *qphy;
- struct resource *res;
- struct usb_phy *phy;
- int ret;
-
- qphy = devm_kzalloc(&pdev->dev, sizeof(*qphy), GFP_KERNEL);
- if (!qphy)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, qphy);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- qphy->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(qphy->regs))
- return PTR_ERR(qphy->regs);
-
- phy = &qphy->phy;
- phy->dev = &pdev->dev;
- phy->label = dev_name(&pdev->dev);
- phy->init = phy_8x16_init;
- phy->shutdown = phy_8x16_shutdown;
- phy->notify_connect = phy_8x16_notify_connect;
- phy->notify_disconnect = phy_8x16_notify_disconnect;
- phy->io_priv = qphy->regs + HSPHY_ULPI_VIEWPORT;
- phy->io_ops = &ulpi_viewport_access_ops;
- phy->type = USB_PHY_TYPE_USB2;
- phy->vbus_nb.notifier_call = phy_8x16_vbus_notify;
- phy->id_nb.notifier_call = NULL;
-
- ret = phy_8x16_read_devicetree(qphy);
- if (ret < 0)
- return ret;
-
- ret = clk_set_rate(qphy->core_clk, INT_MAX);
- if (ret < 0)
- dev_dbg(phy->dev, "Can't boost core clock\n");
-
- ret = clk_prepare_enable(qphy->core_clk);
- if (ret < 0)
- return ret;
-
- ret = clk_prepare_enable(qphy->iface_clk);
- if (ret < 0)
- goto off_core;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
- qphy->regulator);
- if (WARN_ON(ret))
- goto off_clks;
-
- ret = usb_add_phy_dev(&qphy->phy);
- if (ret)
- goto off_power;
-
- qphy->reboot_notify.notifier_call = phy_8x16_reboot_notify;
- register_reboot_notifier(&qphy->reboot_notify);
-
- return 0;
-
-off_power:
- regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
-off_clks:
- clk_disable_unprepare(qphy->iface_clk);
-off_core:
- clk_disable_unprepare(qphy->core_clk);
- return ret;
-}
-
-static int phy_8x16_remove(struct platform_device *pdev)
-{
- struct phy_8x16 *qphy = platform_get_drvdata(pdev);
-
- unregister_reboot_notifier(&qphy->reboot_notify);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot_notify
- */
- gpiod_set_value_cansleep(qphy->switch_gpio, 0);
-
- usb_remove_phy(&qphy->phy);
-
- clk_disable_unprepare(qphy->iface_clk);
- clk_disable_unprepare(qphy->core_clk);
- regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
- return 0;
-}
-
-static const struct of_device_id phy_8x16_dt_match[] = {
- { .compatible = "qcom,usb-8x16-phy" },
- { }
-};
-MODULE_DEVICE_TABLE(of, phy_8x16_dt_match);
-
-static struct platform_driver phy_8x16_driver = {
- .probe = phy_8x16_probe,
- .remove = phy_8x16_remove,
- .driver = {
- .name = "phy-qcom-8x16-usb",
- .of_match_table = phy_8x16_dt_match,
- },
-};
-module_platform_driver(phy_8x16_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm APQ8016/MSM8916 chipsets USB transceiver driver");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 8babd318c0ed..b3ce42edb373 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tahvo USB transceiver driver
*
@@ -9,21 +10,12 @@
*
* Original driver written by Juha Yrjölä, Tony Lindgren and Timo Teräs.
* Modified for Retu/Tahvo MFD by Aaro Koskinen.
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/usb.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/otg.h>
@@ -368,7 +360,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
if (IS_ERR(tu->extcon)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
- return -ENOMEM;
+ ret = PTR_ERR(tu->extcon);
+ goto err_disable_clk;
}
ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index ccc2bf5274b4..f668bfb708d3 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2013 NVIDIA Corporation
@@ -6,16 +7,6 @@
* Erik Gilling <konkers@google.com>
* Benoit Goby <benoit@android.com>
* Venu Byravarasu <vbyravarasu@nvidia.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/resource.h>
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index b5dc077ed7d3..e78ed52339e6 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* twl6030_usb - TWL6030 USB transceiver, talking to OMAP OTG driver.
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
* Author: Hema HK <hemahk@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-ulpi-viewport.c b/drivers/usb/phy/phy-ulpi-viewport.c
index 18bb8264b5a0..7a14e0e3b635 100644
--- a/drivers/usb/phy/phy-ulpi-viewport.c
+++ b/drivers/usb/phy/phy-ulpi-viewport.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/export.h>
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index f48a7a21e3c2..a43c49369a60 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Generic ULPI USB transceiver support
*
@@ -7,20 +8,6 @@
*
* Sascha Hauer <s.hauer@pengutronix.de>
* Freescale Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 89f4ac4cd93e..f97cb47577fc 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* phy.c -- USB phy handling
*
* Copyright (C) 2004-2013 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/export.h>
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index f0ce304c5aaf..56079bb6759a 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/err.h>
#include <linux/gpio.h>
@@ -486,6 +477,10 @@ static const struct of_device_id usbhs_of_match[] = {
.data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{
+ .compatible = "renesas,usbhs-r8a77995",
+ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
+ },
+ {
.compatible = "renesas,rcar-gen2-usbhs",
.data = (void *)USBHS_TYPE_RCAR_GEN2,
},
@@ -501,7 +496,6 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
{
struct renesas_usbhs_platform_info *info;
struct renesas_usbhs_driver_param *dparam;
- const struct of_device_id *of_id = of_match_device(usbhs_of_match, dev);
u32 tmp;
int gpio;
@@ -510,7 +504,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
return NULL;
dparam = &info->driver_param;
- dparam->type = of_id ? (uintptr_t)of_id->data : 0;
+ dparam->type = (uintptr_t)of_device_get_match_data(dev);
if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp))
dparam->buswait_bwait = tmp;
gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
@@ -519,15 +513,19 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
dparam->enable_gpio = gpio;
if (dparam->type == USBHS_TYPE_RCAR_GEN2 ||
- dparam->type == USBHS_TYPE_RCAR_GEN3)
+ dparam->type == USBHS_TYPE_RCAR_GEN3 ||
+ dparam->type == USBHS_TYPE_RCAR_GEN3_WITH_PLL) {
dparam->has_usb_dmac = 1;
+ dparam->pipe_configs = usbhsc_new_pipe;
+ dparam->pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
+ }
return info;
}
static int usbhs_probe(struct platform_device *pdev)
{
- struct renesas_usbhs_platform_info *info = dev_get_platdata(&pdev->dev);
+ struct renesas_usbhs_platform_info *info = renesas_usbhs_get_info(pdev);
struct renesas_usbhs_driver_callback *dfunc;
struct usbhs_priv *priv;
struct resource *res, *irq_res;
@@ -577,17 +575,12 @@ static int usbhs_probe(struct platform_device *pdev)
switch (priv->dparam.type) {
case USBHS_TYPE_RCAR_GEN2:
priv->pfunc = usbhs_rcar2_ops;
- if (!priv->dparam.pipe_configs) {
- priv->dparam.pipe_configs = usbhsc_new_pipe;
- priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
- }
break;
case USBHS_TYPE_RCAR_GEN3:
priv->pfunc = usbhs_rcar3_ops;
- if (!priv->dparam.pipe_configs) {
- priv->dparam.pipe_configs = usbhsc_new_pipe;
- priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
- }
+ break;
+ case USBHS_TYPE_RCAR_GEN3_WITH_PLL:
+ priv->pfunc = usbhs_rcar3_with_pll_ops;
break;
default:
if (!info->platform_callback.get_id) {
@@ -710,7 +703,7 @@ probe_end_pipe_exit:
static int usbhs_remove(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- struct renesas_usbhs_platform_info *info = dev_get_platdata(&pdev->dev);
+ struct renesas_usbhs_platform_info *info = renesas_usbhs_get_info(pdev);
struct renesas_usbhs_driver_callback *dfunc = &info->driver_callback;
dev_dbg(&pdev->dev, "usb remove\n");
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 8c5fc12ad778..64797784a6df 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_DRIVER_H
#define RENESAS_USB_DRIVER_H
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 50285b01da92..2d24ef3076ef 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/delay.h>
#include <linux/io.h>
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 8b98507d7abc..88d1816bcda2 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_FIFO_H
#define RENESAS_USB_FIFO_H
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 28965ef4f824..7475c4f64724 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/interrupt.h>
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 1ef5bf604070..a4a61d6b82a1 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_MOD_H
#define RENESAS_USB_MOD_H
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index c068b673420b..34ee9ebe12a3 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index e256351cb72d..4e59c649db81 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/io.h>
#include <linux/list.h>
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index d811f0550c04..093cd8e87335 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 95185fdb29b1..d3d002244891 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_PIPE_H
#define RENESAS_USB_PIPE_H
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index 277160bc6f25..85a0e0933917 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver R-Car Gen. 2 initialization and power control
*
* Copyright (C) 2014 Ulrich Hecht
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/gpio.h>
diff --git a/drivers/usb/renesas_usbhs/rcar2.h b/drivers/usb/renesas_usbhs/rcar2.h
index f07f10d9b3b2..45e3526cedeb 100644
--- a/drivers/usb/renesas_usbhs/rcar2.h
+++ b/drivers/usb/renesas_usbhs/rcar2.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "common.h"
extern const struct renesas_usbhs_platform_callback
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 02b67abfc2a1..c929d296c77b 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB driver R-Car Gen. 3 initialization and power control
*
* Copyright (C) 2016 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/delay.h>
@@ -15,24 +11,39 @@
#include "rcar3.h"
#define LPSTS 0x102
+#define UGCTRL 0x180 /* 32-bit register */
#define UGCTRL2 0x184 /* 32-bit register */
+#define UGSTS 0x188 /* 32-bit register */
/* Low Power Status register (LPSTS) */
#define LPSTS_SUSPM 0x4000
+/* R-Car D3 only: USB General control register (UGCTRL) */
+#define UGCTRL_PLLRESET 0x00000001
+#define UGCTRL_CONNECT 0x00000004
+
/*
* USB General control register 2 (UGCTRL2)
* Remarks: bit[31:11] and bit[9:6] should be 0
*/
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
+#define UGCTRL2_USB0SEL_HSUSB 0x00000020
#define UGCTRL2_USB0SEL_OTG 0x00000030
#define UGCTRL2_VBUSSEL 0x00000400
+/* R-Car D3 only: USB General status register (UGSTS) */
+#define UGSTS_LOCK 0x00000100
+
static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
iowrite32(data, priv->base + reg);
}
+static u32 usbhs_read32(struct usbhs_priv *priv, u32 reg)
+{
+ return ioread32(priv->base + reg);
+}
+
static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
@@ -52,6 +63,34 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
return 0;
}
+/* R-Car D3 needs to release UGCTRL.PLLRESET */
+static int usbhs_rcar3_power_and_pll_ctrl(struct platform_device *pdev,
+ void __iomem *base, int enable)
+{
+ struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+ u32 val;
+ int timeout = 1000;
+
+ if (enable) {
+ usbhs_write32(priv, UGCTRL, 0); /* release PLLRESET */
+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 |
+ UGCTRL2_USB0SEL_HSUSB);
+
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
+ do {
+ val = usbhs_read32(priv, UGSTS);
+ udelay(1);
+ } while (!(val & UGSTS_LOCK) && timeout--);
+ usbhs_write32(priv, UGCTRL, UGCTRL_CONNECT);
+ } else {
+ usbhs_write32(priv, UGCTRL, 0);
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+ usbhs_write32(priv, UGCTRL, UGCTRL_PLLRESET);
+ }
+
+ return 0;
+}
+
static int usbhs_rcar3_get_id(struct platform_device *pdev)
{
return USBHS_GADGET;
@@ -61,3 +100,8 @@ const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
.power_ctrl = usbhs_rcar3_power_ctrl,
.get_id = usbhs_rcar3_get_id,
};
+
+const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops = {
+ .power_ctrl = usbhs_rcar3_power_and_pll_ctrl,
+ .get_id = usbhs_rcar3_get_id,
+};
diff --git a/drivers/usb/renesas_usbhs/rcar3.h b/drivers/usb/renesas_usbhs/rcar3.h
index 5f850b23ff18..49e535a31771 100644
--- a/drivers/usb/renesas_usbhs/rcar3.h
+++ b/drivers/usb/renesas_usbhs/rcar3.h
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
#include "common.h"
extern const struct renesas_usbhs_platform_callback usbhs_rcar3_ops;
+extern const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops;
diff --git a/drivers/usb/serial/Makefile-keyspan_pda_fw b/drivers/usb/serial/Makefile-keyspan_pda_fw
index c20baf728011..503b472d85f2 100644
--- a/drivers/usb/serial/Makefile-keyspan_pda_fw
+++ b/drivers/usb/serial/Makefile-keyspan_pda_fw
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# some rules to handle the quirks of the 'as31' assembler, like
# insisting upon fixed suffixes for the input and output files,
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 569c2200ba42..84d52953dd0a 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AIRcable USB Bluetooth Dongle Driver.
*
* Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2006 Manuel Francisco Naranjo (naranjo.manuel@gmail.com)
*
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
* The device works as an standard CDC device, it has 2 interfaces, the first
* one is for firmware access and the second is the serial one.
* The protocol is very simply, there are two possibilities reading or writing.
@@ -161,4 +158,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 0adbd38b4eea..3c544782f60b 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com)
* Original version:
@@ -15,11 +16,6 @@
* into the old ark3116.c driver and suddenly realized the ark3116 is
* a 16450 with a USB interface glued to it. See comments at the
* bottom of this file.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 15bc71853db5..c1235d5b9fba 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Belkin USB Serial Adapter Driver
*
@@ -9,11 +10,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/belkin_sa.h b/drivers/usb/serial/belkin_sa.h
index c74b58ab56f9..51bc06287603 100644
--- a/drivers/usb/serial/belkin_sa.h
+++ b/drivers/usb/serial/belkin_sa.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Definitions for Belkin USB Serial Adapter Driver
*
@@ -8,11 +9,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 8936a83c96cd..9e265eb92611 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter Bus specific functions
*
* Copyright (C) 2002 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 351745aec0e1..bdd7a5ad3bf1 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2007, Frank A Kingswood <frank@kingswood-consulting.co.uk>
* Copyright 2007, Werner Cornelius <werner@cornelius-consult.de>
@@ -9,10 +10,6 @@
* serial port, an IEEE-1284 parallel printer port or a memory-like
* interface. In all cases the CH341 supports an I2C interface as well.
* This driver only supports the asynchronous serial interface.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -644,4 +641,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 43a862a90a77..17940589c647 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Console driver
*
* Copyright (C) 2001 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* Thanks to Randy Dunlap for the original version of this code.
*
*/
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 412f812522ee..7c6273bf5beb 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Silicon Laboratories CP210x USB to RS232 serial adaptor driver
*
* Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* Support to set flow control line levels using TIOCMGET and TIOCMSET
* thanks to Karl Hiramoto karl@hiramoto.org. RTSCTS hardware flow
* control thanks to Munir Nassar nassarmu@real-time.com
@@ -1529,4 +1526,4 @@ static void cp210x_release(struct usb_serial *serial)
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 47fbd9f0c0c7..dc67a2eb98d7 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* REINER SCT cyberJack pinpad/e-com USB Chipcard Reader Driver
*
@@ -10,11 +11,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Thanks to Greg Kroah-Hartman (greg@kroah.com) for his help and
* patience.
*
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 90110de715e0..e0035c023120 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Cypress M8 driver
*
@@ -6,11 +7,6 @@
* Copyright (C) 2003,2004
* Neil Whelchel (koyama@firstlight.net)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 2ce39af32cfa..b0526786fb02 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Digi AccelePort USB-4 and USB-2 Serial Converters
*
* Copyright 2000 by Digi International
*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
* Shamelessly based on Brian Warner's keyspan_pda.c and Greg Kroah-Hartman's
* usb-serial driver.
*
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 90e603d5f660..d680bec62547 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Empeg empeg-car player driver
*
@@ -7,10 +8,6 @@
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, as published by
- * the Free Software Foundation, version 2.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
@@ -126,4 +123,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 972f5a5fe577..96036f87b1de 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Fintek F81232 USB to serial adaptor driver
*
* Copyright (C) 2012 Greg Kroah-Hartman (gregkh@linuxfoundation.org)
* Copyright (C) 2012 Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 3d616a2a9f96..e4573b4c8935 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* F81532/F81534 USB to Serial Ports Bridge
*
* F81532 => 2 Serial Ports
* F81534 => 4 Serial Ports
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Copyright (C) 2016 Feature Integration Technology Inc., (Fintek)
* Copyright (C) 2016 Tom Tsai (Tom_Tsai@fintek.com.tw)
* Copyright (C) 2016 Peter Hong (Peter_Hong@fintek.com.tw)
@@ -39,9 +35,11 @@
#define F81534_UART_OFFSET 0x10
#define F81534_DIVISOR_LSB_REG (0x00 + F81534_UART_BASE_ADDRESS)
#define F81534_DIVISOR_MSB_REG (0x01 + F81534_UART_BASE_ADDRESS)
+#define F81534_INTERRUPT_ENABLE_REG (0x01 + F81534_UART_BASE_ADDRESS)
#define F81534_FIFO_CONTROL_REG (0x02 + F81534_UART_BASE_ADDRESS)
#define F81534_LINE_CONTROL_REG (0x03 + F81534_UART_BASE_ADDRESS)
#define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS)
+#define F81534_LINE_STATUS_REG (0x05 + F81534_UART_BASE_ADDRESS)
#define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS)
#define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS)
@@ -126,9 +124,13 @@ struct f81534_serial_private {
struct f81534_port_private {
struct mutex mcr_mutex;
+ struct mutex lcr_mutex;
+ struct work_struct lsr_work;
+ struct usb_serial_port *port;
unsigned long tx_empty;
spinlock_t msr_lock;
u8 shadow_mcr;
+ u8 shadow_lcr;
u8 shadow_msr;
u8 phy_num;
};
@@ -461,6 +463,7 @@ static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
u8 lcr)
{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
u32 divisor;
int status;
u8 value;
@@ -489,35 +492,65 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
}
divisor = f81534_calc_baud_divisor(baudrate, F81534_MAX_BAUDRATE);
+
+ mutex_lock(&port_priv->lcr_mutex);
+
value = UART_LCR_DLAB;
status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
value);
if (status) {
dev_err(&port->dev, "%s: set LCR failed\n", __func__);
- return status;
+ goto out_unlock;
}
value = divisor & 0xff;
status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value);
if (status) {
dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__);
- return status;
+ goto out_unlock;
}
value = (divisor >> 8) & 0xff;
status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value);
if (status) {
dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__);
- return status;
+ goto out_unlock;
}
- status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, lcr);
+ value = lcr | (port_priv->shadow_lcr & UART_LCR_SBC);
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
+ value);
if (status) {
dev_err(&port->dev, "%s: set LCR failed\n", __func__);
- return status;
+ goto out_unlock;
}
- return 0;
+ port_priv->shadow_lcr = value;
+out_unlock:
+ mutex_unlock(&port_priv->lcr_mutex);
+
+ return status;
+}
+
+static void f81534_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+
+ mutex_lock(&port_priv->lcr_mutex);
+
+ if (break_state)
+ port_priv->shadow_lcr |= UART_LCR_SBC;
+ else
+ port_priv->shadow_lcr &= ~UART_LCR_SBC;
+
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
+ port_priv->shadow_lcr);
+ if (status)
+ dev_err(&port->dev, "set break failed: %d\n", status);
+
+ mutex_unlock(&port_priv->lcr_mutex);
}
static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set,
@@ -1015,6 +1048,8 @@ static void f81534_process_per_serial_block(struct usb_serial_port *port,
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
+
+ schedule_work(&port_priv->lsr_work);
}
if (port->port.console && port->sysrq) {
@@ -1162,6 +1197,21 @@ static int f81534_attach(struct usb_serial *serial)
return 0;
}
+static void f81534_lsr_worker(struct work_struct *work)
+{
+ struct f81534_port_private *port_priv;
+ struct usb_serial_port *port;
+ int status;
+ u8 tmp;
+
+ port_priv = container_of(work, struct f81534_port_private, lsr_work);
+ port = port_priv->port;
+
+ status = f81534_get_port_register(port, F81534_LINE_STATUS_REG, &tmp);
+ if (status)
+ dev_warn(&port->dev, "read LSR failed: %d\n", status);
+}
+
static int f81534_port_probe(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv;
@@ -1173,6 +1223,8 @@ static int f81534_port_probe(struct usb_serial_port *port)
spin_lock_init(&port_priv->msr_lock);
mutex_init(&port_priv->mcr_mutex);
+ mutex_init(&port_priv->lcr_mutex);
+ INIT_WORK(&port_priv->lsr_work, f81534_lsr_worker);
/* Assign logic-to-phy mapping */
ret = f81534_logic_to_phy_port(port->serial, port);
@@ -1180,10 +1232,30 @@ static int f81534_port_probe(struct usb_serial_port *port)
return ret;
port_priv->phy_num = ret;
+ port_priv->port = port;
usb_set_serial_port_data(port, port_priv);
dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
port->port_number, port_priv->phy_num);
+ /*
+ * The F81532/534 will hang-up when enable LSR interrupt in IER and
+ * occur data overrun. So we'll disable the LSR interrupt in probe()
+ * and submit the LSR worker to clear LSR state when reported LSR error
+ * bit with bulk-in data in f81534_process_per_serial_block().
+ */
+ ret = f81534_set_port_register(port, F81534_INTERRUPT_ENABLE_REG,
+ UART_IER_RDI | UART_IER_THRI | UART_IER_MSI);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int f81534_port_remove(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ flush_work(&port_priv->lsr_work);
return 0;
}
@@ -1317,6 +1389,8 @@ static struct usb_serial_driver f81534_device = {
.calc_num_ports = f81534_calc_num_ports,
.attach = f81534_attach,
.port_probe = f81534_port_probe,
+ .port_remove = f81534_port_remove,
+ .break_ctl = f81534_break_ctl,
.dtr_rts = f81534_dtr_rts,
.process_read_urb = f81534_process_read_urb,
.ioctl = f81534_ioctl,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 49d1b2d4606d..1aba9105b369 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB FTDI SIO driver
*
@@ -9,11 +10,6 @@
* Copyright (C) 2002
* Kuba Ober (kuba@mareimbrium.org)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index b2f2e87aed94..633550ec3025 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Garmin GPS driver
*
@@ -7,20 +8,6 @@
* http://sourceforge.net/projects/garmin-gps/
*
* This driver has been derived from v2.1 of the visor driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111 USA
*/
#include <linux/kernel.h>
@@ -138,6 +125,7 @@ struct garmin_data {
__u8 privpkt[4*6];
spinlock_t lock;
struct list_head pktlist;
+ struct usb_anchor write_urbs;
};
@@ -875,42 +863,38 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
static int garmin_init_session(struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
- int status = 0;
- int i = 0;
+ int status;
+ int i;
- if (status == 0) {
- usb_kill_urb(port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
- dev_dbg(&serial->dev->dev, "%s - adding interrupt input\n", __func__);
- status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (status)
- dev_err(&serial->dev->dev,
- "%s - failed submitting interrupt urb, error %d\n",
- __func__, status);
+ status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+ if (status) {
+ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+ status);
+ return status;
}
/*
* using the initialization method from gpsbabel. See comments in
* gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
*/
- if (status == 0) {
- dev_dbg(&serial->dev->dev, "%s - starting session ...\n", __func__);
- garmin_data_p->state = STATE_ACTIVE;
+ dev_dbg(&port->dev, "%s - starting session ...\n", __func__);
+ garmin_data_p->state = STATE_ACTIVE;
- for (i = 0; i < 3; i++) {
- status = garmin_write_bulk(port,
- GARMIN_START_SESSION_REQ,
- sizeof(GARMIN_START_SESSION_REQ), 0);
+ for (i = 0; i < 3; i++) {
+ status = garmin_write_bulk(port, GARMIN_START_SESSION_REQ,
+ sizeof(GARMIN_START_SESSION_REQ), 0);
+ if (status < 0)
+ goto err_kill_urbs;
+ }
- if (status < 0)
- break;
- }
+ return 0;
- if (status > 0)
- status = 0;
- }
+err_kill_urbs:
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
+ usb_kill_urb(port->interrupt_in_urb);
return status;
}
@@ -930,7 +914,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
- usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
if (garmin_data_p->state == STATE_RESET)
@@ -953,7 +936,7 @@ static void garmin_close(struct usb_serial_port *port)
/* shutdown our urbs */
usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
/* keep reset state so we know that we must start a new session */
if (garmin_data_p->state != STATE_RESET)
@@ -1037,12 +1020,14 @@ static int garmin_write_bulk(struct usb_serial_port *port,
}
/* send it down the pipe */
+ usb_anchor_urb(urb, &garmin_data_p->write_urbs);
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
+ usb_unanchor_urb(urb);
kfree(buffer);
}
@@ -1370,9 +1355,9 @@ static void garmin_unthrottle(struct tty_struct *tty)
* the tty in cases where the protocol provides no own handshaking
* to initiate the transfer.
*/
-static void timeout_handler(unsigned long data)
+static void timeout_handler(struct timer_list *t)
{
- struct garmin_data *garmin_data_p = (struct garmin_data *) data;
+ struct garmin_data *garmin_data_p = from_timer(garmin_data_p, t, timer);
/* send the next queued packet to the tty port */
if (garmin_data_p->mode == MODE_NATIVE)
@@ -1391,19 +1376,23 @@ static int garmin_port_probe(struct usb_serial_port *port)
if (!garmin_data_p)
return -ENOMEM;
- init_timer(&garmin_data_p->timer);
+ timer_setup(&garmin_data_p->timer, timeout_handler, 0);
spin_lock_init(&garmin_data_p->lock);
INIT_LIST_HEAD(&garmin_data_p->pktlist);
- /* garmin_data_p->timer.expires = jiffies + session_timeout; */
- garmin_data_p->timer.data = (unsigned long)garmin_data_p;
- garmin_data_p->timer.function = timeout_handler;
garmin_data_p->port = port;
garmin_data_p->state = 0;
garmin_data_p->flags = 0;
garmin_data_p->count = 0;
+ init_usb_anchor(&garmin_data_p->write_urbs);
usb_set_serial_port_data(port, garmin_data_p);
status = garmin_init_session(port);
+ if (status)
+ goto err_free;
+
+ return 0;
+err_free:
+ kfree(garmin_data_p);
return status;
}
@@ -1413,6 +1402,7 @@ static int garmin_port_remove(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
usb_kill_urb(port->interrupt_in_urb);
del_timer_sync(&garmin_data_p->timer);
kfree(garmin_data_p);
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 35cb8c0e584f..2274d9625f63 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter Generic functions
*
* Copyright (C) 2010 - 2013 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/io_16654.h b/drivers/usb/serial/io_16654.h
index a53abc9530ff..4980f72dc56f 100644
--- a/drivers/usb/serial/io_16654.h
+++ b/drivers/usb/serial/io_16654.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* 16654.H Definitions for 16C654 UART used on EdgePorts
*
* Copyright (C) 1998 Inside Out Networks, Inc.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
************************************************************************/
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index bdf8bd814a9a..219265ce3711 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Edgeport USB Serial Converter driver
*
* Copyright (C) 2000 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Supports the following devices:
* Edgeport/4
* Edgeport/4t
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index ad9c1d47a619..2e7fedbaf2ff 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* io_edgeport.h Edgeport Linux Interface definitions
*
* Copyright (C) 2000 Inside Out Networks, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
************************************************************************/
#if !defined(_IO_EDGEPORT_H_)
diff --git a/drivers/usb/serial/io_ionsp.h b/drivers/usb/serial/io_ionsp.h
index 5cc591bae54d..4b8e4823bd45 100644
--- a/drivers/usb/serial/io_ionsp.h
+++ b/drivers/usb/serial/io_ionsp.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* IONSP.H Definitions for I/O Networks Serial Protocol
*
* Copyright (C) 1997-1998 Inside Out Networks, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* These definitions are used by both kernel-mode driver and the
* peripheral firmware and MUST be kept in sync.
*
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 6cefb9cb133d..0fbadb37c104 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Edgeport USB Serial Converter driver
*
* Copyright (C) 2000-2002 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Supports the following devices:
* EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT
*
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index 1bd67b24f916..e53c68261017 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************
*
* Copyright (C) 1997-2002 Inside Out Networks, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
* Feb-16-2001 DMI Added I2C structure definitions
* May-29-2002 gkh Ported to Linux
*
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 6f6a856bc37c..c38e87ac5ea9 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* USBVEND.H Vendor-specific USB definitions
@@ -8,10 +9,6 @@
************************************************************************
*
* Copyright (C) 1998 Inside Out Networks, Inc.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
************************************************************************/
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index cde0dcdce9c4..f81746c3c26c 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Compaq iPAQ driver
*
* Copyright (C) 2001 - 2002
* Ganesh Varadarajan <ganesh@veritas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 8b1cf18a668b..d04c7cc5c1c2 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IPWireless 3G UMTS TDD Modem driver (USB connected)
*
* Copyright (C) 2004 Roelf Diedericks <roelfd@inet.co.za>
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* All information about the device was acquired using SnoopyPro
* on MSFT's O/S, and examing the MSFT drivers' debug output
* (insanely left _on_ in the enduser version)
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index f9734a96d516..24b06c7e5e2d 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB IR Dongle driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2002 Gary Brubaker (xavyer@ix.netcom.com)
* Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This driver allows a USB IrDA device to be used as a "dumb" serial device.
* This can be useful if you do not have access to a full IrDA stack on the
* other side of the connection. If you do have an IrDA stack on both devices,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 18fc992a245f..397a8012ffa3 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Infinity Unlimited USB Phoenix driver
*
@@ -7,13 +8,7 @@
*
* Original code taken from iuutool (Copyright (C) 2006 Juan Carlos Borrás)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* And tested with help of WB Electronics
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/usb/serial/iuu_phoenix.h b/drivers/usb/serial/iuu_phoenix.h
index b82630a3b8fd..b400b262f72e 100644
--- a/drivers/usb/serial/iuu_phoenix.h
+++ b/drivers/usb/serial/iuu_phoenix.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Infinity Unlimited USB Phoenix driver
*
@@ -6,13 +7,7 @@
*
* Original code taken from iuutool ( Copyright (C) 2006 Juan Carlos Borrás )
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* And tested with help of WB Electronics
- *
*/
#define IUU_USB_VENDOR_ID 0x104f
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5662d324edd2..d34779fe4a8d 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
Keyspan USB to Serial Converter driver
(C) Copyright (C) 2000-2001 Hugh Blemings <hugh@blemings.org>
(C) Copyright (C) 2002 Greg Kroah-Hartman <greg@kroah.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
See http://blemings.org/hugh/keyspan.html for more information.
Code in this driver inspired by and in a number of places taken
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 196908dd25a1..5169624d8b11 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Keyspan PDA / Xircom / Entrega Converter driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 1999, 2000 Brian Warner <warner@lothar.com>
* Copyright (C) 2000 Al Borchers <borchers@steinerpoint.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 595415e59d5d..5046ffd53cde 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* KLSI KL5KUSB105 chip RS232 converter driver
*
* Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2001 Utz-Uwe Haus <haus@uuhaus.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* All information about the device was acquired using SniffUSB ans snoopUSB
* on Windows98.
* It was written out of frustration with the PalmConnect USB Serial adapter
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 3024b9b25360..a31ea7e194dd 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* KOBIL USB Smart Card Terminal Driver
*
@@ -10,11 +11,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Thanks to Greg Kroah-Hartman (greg@kroah.com) for his help and
* patience.
*
@@ -491,6 +487,7 @@ static void kobil_set_termios(struct tty_struct *tty,
break;
default:
speed = 9600;
+ /* fall through */
case 9600:
urb_val = SUSBCR_SBR_9600;
break;
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 70f346f1aa86..7887c312d9a9 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* MCT (Magic Control Technology Corp.) USB RS232 Converter Driver
*
* Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This program is largely derived from the Belkin USB Serial Adapter Driver
* (see belkin_sa.[ch]). All of the information about the device was acquired
* by using SniffUSB on Windows98. For technical details see mct_u232.h.
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index d325bb8cb583..0084edf518e8 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Definitions for MCT (Magic Control Technology) USB-RS232 Converter Driver
*
* Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This driver is for the device MCT USB-RS232 Converter (25 pin, Model No.
* U232-P25) from Magic Control Technology Corp. (there is also a 9 pin
* Model No. U232-P9). See http://www.mct.com.tw/products/product_us232.html
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 14511d6a7d44..e63cea02cfd8 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
Some of this code is credited to Linux USB open source files that are
distributed with Linux.
@@ -54,21 +55,33 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define UNI_CMD_OPEN 0x80
#define UNI_CMD_CLOSE 0xFF
-static inline int metrousb_is_unidirectional_mode(struct usb_serial_port *port)
+static int metrousb_is_unidirectional_mode(struct usb_serial *serial)
{
- __u16 product_id = le16_to_cpu(
- port->serial->dev->descriptor.idProduct);
+ u16 product_id = le16_to_cpu(serial->dev->descriptor.idProduct);
return product_id == FOCUS_PRODUCT_ID_UNI;
}
+static int metrousb_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
+{
+ if (metrousb_is_unidirectional_mode(serial)) {
+ if (epds->num_interrupt_out == 0) {
+ dev_err(&serial->interface->dev, "interrupt-out endpoint missing\n");
+ return -ENODEV;
+ }
+ }
+
+ return 1;
+}
+
static int metrousb_send_unidirectional_cmd(u8 cmd, struct usb_serial_port *port)
{
int ret;
int actual_len;
u8 *buffer_cmd = NULL;
- if (!metrousb_is_unidirectional_mode(port))
+ if (!metrousb_is_unidirectional_mode(port->serial))
return 0;
buffer_cmd = kzalloc(sizeof(cmd), GFP_KERNEL);
@@ -161,13 +174,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
unsigned long flags = 0;
int result = 0;
- /* Make sure the urb is initialized. */
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "%s - interrupt urb not initialized\n",
- __func__);
- return -ENODEV;
- }
-
/* Set the private data information for the port. */
spin_lock_irqsave(&metro_priv->lock, flags);
metro_priv->control_state = 0;
@@ -189,7 +195,7 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
dev_err(&port->dev,
"%s - failed submitting interrupt in urb, error code=%d\n",
__func__, result);
- goto exit;
+ return result;
}
/* Send activate cmd to device */
@@ -198,9 +204,14 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
dev_err(&port->dev,
"%s - failed to configure device, error code=%d\n",
__func__, result);
- goto exit;
+ goto err_kill_urb;
}
-exit:
+
+ return 0;
+
+err_kill_urb:
+ usb_kill_urb(port->interrupt_in_urb);
+
return result;
}
@@ -337,7 +348,8 @@ static struct usb_serial_driver metrousb_device = {
},
.description = "Metrologic USB to Serial",
.id_table = id_table,
- .num_ports = 1,
+ .num_interrupt_in = 1,
+ .calc_num_ports = metrousb_calc_num_ports,
.open = metrousb_open,
.close = metrousb_cleanup,
.read_int_callback = metrousb_read_int_callback,
@@ -356,7 +368,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Philip Nicastro");
MODULE_AUTHOR("Aleksey Babahin <tamerlan311@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index a453965f9e9a..f4df3d5bf69c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mos7720.c
* Controls the Moschip 7720 usb to dual port serial converter
*
* Copyright 2006 Moschip Semiconductor Tech. Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
* Developed by:
* Vijaya Kumar <vijaykumar.gn@gmail.com>
* Ajay Kumar <naanuajay@yahoo.com>
@@ -2043,4 +2040,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index e8669aae14b3..a859c2d33c29 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1,18 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Clean ups from Moschip version and a few ioctl implementations by:
* Paul B Schroeder <pschroeder "at" uplogix "dot" com>
*
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 3aef091fe88b..2513ee902779 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mxuport.c - MOXA UPort series driver
*
* Copyright (c) 2006 Moxa Technologies Co., Ltd.
* Copyright (c) 2013 Andrew Lunn <andrew@lunn.ch>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Supports the following Moxa USB to serial converters:
* 2 ports : UPort 1250, UPort 1250I
* 4 ports : UPort 1410, UPort 1450, UPort 1450I
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 2a97cdc078d5..20277c52dded 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Navman Serial USB driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <gregkh@suse.de>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
* TODO:
* Add termios method that uses copy_hw but also kills all echo
* flags as the navman is rx only so cannot echo.
@@ -115,4 +112,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index efcd7feed6f4..e51c9464ea42 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB ZyXEL omni.net LCD PLUS driver
*
* Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
@@ -179,4 +176,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 58657d64678b..caa0746326fd 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Opticon USB barcode to serial driver
*
@@ -5,10 +6,6 @@
* Copyright (C) 2011 Martin Jansen <martin.jansen@opticon.com>
* Copyright (C) 2008 - 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2008 - 2009 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -420,4 +417,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ba672cf4e888..aaa7d901a06d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
USB Driver for GSM modems
Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de>
- This driver is free software; you can redistribute it and/or modify
- it under the terms of Version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org>
History: see the git log.
@@ -2206,4 +2203,4 @@ static void option_instat_callback(struct urb *urb)
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index b11eead469ee..ae9cb15ee02d 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
*
@@ -21,10 +22,6 @@
* So, THIS CODE CAN DESTROY OTi-6858 AND ANY OTHER DEVICES, THAT ARE
* CONNECTED TO IT!
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
@@ -847,4 +844,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(OTI6858_DESCRIPTION);
MODULE_AUTHOR(OTI6858_AUTHOR);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/oti6858.h b/drivers/usb/serial/oti6858.h
index 704ac3a532b3..1226bf2347eb 100644
--- a/drivers/usb/serial/oti6858.h
+++ b/drivers/usb/serial/oti6858.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_USB_SERIAL_OTI6858_H
#define __LINUX_USB_SERIAL_OTI6858_H
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index a585b477415d..57ae832a49ff 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Prolific PL2303 USB to serial adaptor driver
*
@@ -6,10 +7,6 @@
*
* Original driver for 2.2.x by anonymous
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
@@ -1025,4 +1022,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION("Prolific PL2303 USB to serial adaptor driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3b5a15d1dc0d..f98fd84890de 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -1,11 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Prolific PL2303 USB to serial adaptor driver header file
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#define BENQ_VENDOR_ID 0x04a5
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 6e9f8af96959..929ffba663f2 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm USB Auxiliary Serial Port driver
*
* Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2010 Dan Williams <dcbw@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Devices listed here usually provide a CDC ACM port on which normal modem
* AT commands and PPP can be used. But when that port is in-use by PPP it
* cannot be used simultaneously for status or signal strength. Instead, the
@@ -87,4 +84,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
};
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index eb9928963a53..e3892541a489 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm Serial USB driver
*
* Copyright (c) 2008 QUALCOMM Incorporated.
* Copyright (c) 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2009 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
*/
#include <linux/tty.h>
@@ -148,6 +144,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
{DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
+ {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 60e17d1444c3..958e12e1e7c7 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb-serial driver for Quatech USB 2 devices
*
* Copyright (C) 2012 Bill Pemberton (wfp5p@virginia.edu)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- *
* These devices all have only 1 bulk in and 1 bulk out that is shared
* for all serial ports.
*
@@ -1027,4 +1023,4 @@ static struct usb_serial_driver *const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 27d7a7016298..6accbecb6318 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Safe Encapsulated USB Serial Driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2001 Lineo
* Copyright (C) 2001 Hewlett-Packard
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* By:
* Stuart Lynne <sl@lineo.com>, Tom Rushworth <tbr@lineo.com>
*/
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 4c4ac4705ac0..d189f953c891 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
USB Driver for Sierra Wireless
@@ -9,10 +10,6 @@
IMPORTANT DISCLAIMER: This driver is not commercially supported by
Sierra Wireless. Use at your own risk.
- This driver is free software; you can redistribute it and/or modify
- it under the terms of Version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
@@ -1078,7 +1075,7 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
module_param(nmea, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nmea, "NMEA streaming");
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5167b6564c8b..b42714855364 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* spcp8x5 USB to serial adaptor driver
*
@@ -8,11 +9,6 @@
* Original driver for 2.6.10 pl2303 driver by
* Greg Kroah-Hartman (greg@kroah.com)
* Changes for 2.6.20 by Harald Klein <hari@vt100.at>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 5aa7bbbeba3d..2083c267787b 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb-serial driver for Quatech SSU-100
*
@@ -576,4 +577,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 0d1727232d0c..cd2f8dc8b58c 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Symbol USB barcode to serial driver
*
* Copyright (C) 2013 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2009 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -194,4 +191,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 8fc3854e5e69..6b22857f6e52 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TI 3410/5052 USB Serial Driver
*
@@ -7,11 +8,6 @@
* Copyright (C) 2000-2002 Inside Out Networks
* Copyright (C) 2001-2002 Greg Kroah-Hartman
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* For questions or problems with this driver, contact Texas Instruments
* technical support, or Al Borchers <alborchers@steinerpoint.com>, or
* Peter Berger <pberger@brimson.com>.
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index 6819a3486e5d..1ba1401d27d7 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Electronics uPD78F0730 USB to serial converter driver
*
* Copyright (C) 2014,2016 Maksim Salau <maksim.salau@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
* Protocol of the adaptor is described in the application note U19660EJ1V0AN00
* μPD78F0730 8-bit Single-Chip Microcontroller
* USB-to-Serial Conversion Software
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index e98b6e57b703..74172fe158df 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial "Simple" driver
*
@@ -8,10 +9,6 @@
* Copyright (C) 2010 Zilogic Systems <code@zilogic.com>
* Copyright (C) 2013 Wei Shuai <cpuwolf@gmail.com>
* Copyright (C) 2013 Linux Foundation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -134,4 +131,4 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index bb34f9f7eaf4..790e0cbe3da9 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter driver
*
@@ -6,10 +7,6 @@
* Copyright (C) 2000 Peter Berger (pberger@brimson.com)
* Copyright (C) 2000 Al Borchers (borchers@steinerpoint.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* This driver was originally based on the ACM driver by Armin Fuerst (which was
* based on a driver by Brad Keryan)
*
@@ -1201,17 +1198,6 @@ static const struct tty_operations serial_ops = {
struct tty_driver *usb_serial_tty_driver;
-/* Driver structure we register with the USB core */
-static struct usb_driver usb_serial_driver = {
- .name = "usbserial",
- .probe = usb_serial_probe,
- .disconnect = usb_serial_disconnect,
- .suspend = usb_serial_suspend,
- .resume = usb_serial_resume,
- .no_dynamic_id = 1,
- .supports_autosuspend = 1,
-};
-
static int __init usb_serial_init(void)
{
int result;
@@ -1247,13 +1233,6 @@ static int __init usb_serial_init(void)
goto exit_reg_driver;
}
- /* register the USB driver */
- result = usb_register(&usb_serial_driver);
- if (result < 0) {
- pr_err("%s - usb_register failed\n", __func__);
- goto exit_tty;
- }
-
/* register the generic driver, if we should */
result = usb_serial_generic_register();
if (result < 0) {
@@ -1264,9 +1243,6 @@ static int __init usb_serial_init(void)
return result;
exit_generic:
- usb_deregister(&usb_serial_driver);
-
-exit_tty:
tty_unregister_driver(usb_serial_tty_driver);
exit_reg_driver:
@@ -1285,7 +1261,6 @@ static void __exit usb_serial_exit(void)
usb_serial_generic_deregister();
- usb_deregister(&usb_serial_driver);
tty_unregister_driver(usb_serial_tty_driver);
put_tty_driver(usb_serial_tty_driver);
bus_unregister(&usb_serial_bus_type);
@@ -1460,4 +1435,4 @@ EXPORT_SYMBOL_GPL(usb_serial_deregister_drivers);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 12f4c5a91e62..ab5a2ac4993a 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Debug cable driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <greg@kroah.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/gfp.h>
@@ -34,13 +31,13 @@ static const struct usb_device_id id_table[] = {
};
static const struct usb_device_id dbc_id_table[] = {
- { USB_DEVICE(0x1d6b, 0x0004) },
+ { USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(0x0525, 0x127a) },
- { USB_DEVICE(0x1d6b, 0x0004) },
+ { USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
@@ -98,4 +95,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
};
module_usb_serial_driver(serial_drivers, id_table_combined);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 59bfcb3da116..107e64c42e94 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
USB Driver layer for GSM modems
Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de>
- This driver is free software; you can redistribute it and/or modify
- it under the terms of Version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org>
History: see the git log.
@@ -723,4 +720,4 @@ EXPORT_SYMBOL(usb_wwan_resume);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 9f3317a940ef..f5373ed2cd45 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB HandSpring Visor, Palm m50x, and Sony Clie driver
* (supports all of the Palm OS USB devices)
@@ -5,10 +6,6 @@
* Copyright (C) 1999 - 2004
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
@@ -579,4 +576,4 @@ module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 4c456dd69ce5..fe290243f1ce 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB HandSpring Visor driver
*
* Copyright (C) 1999 - 2003
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver.
*
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 55cebc1e6fec..1c7b46a8620c 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB ConnectTech WhiteHEAT driver
*
@@ -7,11 +8,6 @@
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
index 38065df4d2d8..72c1b0cf4063 100644
--- a/drivers/usb/serial/whiteheat.h
+++ b/drivers/usb/serial/whiteheat.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB ConnectTech WhiteHEAT driver
*
@@ -7,11 +8,6 @@
* Copyright (C) 1999, 2000
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/wishbone-serial.c b/drivers/usb/serial/wishbone-serial.c
index 4fed4a0bd702..ff4092f9b33c 100644
--- a/drivers/usb/serial/wishbone-serial.c
+++ b/drivers/usb/serial/wishbone-serial.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Wishbone-Serial adapter driver
*
* Copyright (C) 2013 Wesley W. Terpstra <w.terpstra@gsi.de>
* Copyright (C) 2013 GSI Helmholtz Centre for Heavy Ion Research GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/xsens_mt.c b/drivers/usb/serial/xsens_mt.c
index 3837d5113bb2..cf262c9a9638 100644
--- a/drivers/usb/serial/xsens_mt.c
+++ b/drivers/usb/serial/xsens_mt.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xsens MT USB driver
*
* Copyright (C) 2013 Xsens <info@xsens.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -69,4 +66,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR("Frans Klaver <frans.klaver@xsens.com>");
MODULE_DESCRIPTION("USB-serial driver for Xsens motion trackers");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 878b4b8761f5..900591df8bb2 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Alauda-based card readers
*
@@ -15,20 +16,6 @@
* (very old) vendor-supplied GPL sma03 driver.
*
* For protocol info, see http://alauda.sourceforge.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index 5e4af44d7d9f..4825902377eb 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Support for emulating SAT (ata pass through) on devices based
* on the Cypress USB/ATA bridge supporting ATACB.
*
* Copyright (c) 2008 Matthieu Castet (castet.matthieu@free.fr)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index 723197af6ec5..09353be199be 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Datafab USB Compact Flash reader
*
@@ -18,20 +19,6 @@
*
* Other contributors:
* (c) 2002 Alan Stern <stern@rowland.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 8d20804a59e6..e5a4969d15ae 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Debugging Functions Source Code File
@@ -27,20 +28,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/device.h>
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 8ab73299b650..8833cd4f78b6 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Debugging Functions Header File
@@ -24,20 +25,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _DEBUG_H_
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 28100374f7bd..93cf57ac47d6 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1,19 +1,4 @@
-/*
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/module.h>
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index c0a5d954414b..ec4d92c92762 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Freecom USB/IDE adaptor
*
@@ -8,20 +9,6 @@
* Current development and maintenance by:
* (C) 2000 David Brown <usb-storage@davidb.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* This driver was developed with information provided in FREECOM's USB
* Programmers Reference Guide. For further information contact Freecom
* (http://www.freecom.de/)
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index d9d8c17e05d1..93a6bcf77806 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Special Initializers for certain USB Mass Storage devices
*
@@ -20,20 +21,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 039abf4d1cb7..e4cf28efb4a7 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Header file for Special Initializers for certain USB Mass Storage devices
*
@@ -20,20 +21,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "usb.h"
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 6a7720e66595..f5e4500d9970 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC
*
@@ -14,20 +15,6 @@
* does implement an interface, the ATA Command Block (ATACB) which provides
* a means of passing ATA commands and ATA register accesses to a device.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* History:
*
* 2002-10-19: Removed the specialized transfer routines.
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 011e5270690a..917f170c4124 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Lexar "Jumpshot" Compact Flash reader
*
@@ -19,20 +20,6 @@
* Developed with the assistance of:
*
* (C) 2002 Alan Stern <stern@rowland.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index b05ba4929f00..edcf2be0e0eb 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Rio Karma
*
* (c) 2006 Bob Copeland <me@bobcopeland.com>
* (c) 2006 Keith Bennett <keith@mcs.st-and.ac.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index acc3d03d8c1e..39a5009a41a6 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Support for the Maxtor OneTouch USB hard drive's button
*
@@ -10,24 +11,6 @@
* Based on usbmouse.c (Vojtech Pavlik) and xpad.c (Marko Friedemann)
*
*/
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/slab.h>
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index 57282f12317b..7c0b05a36554 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Option High Speed Mobile Devices.
*
* (c) 2008 Dan Williams <dcbw@redhat.com>
*
* Inspiration taken from sierra_ms.c by Kevin Lloyd <klloyd@sierrawireless.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
@@ -41,7 +28,7 @@ MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
static int option_rezero(struct us_data *us)
{
- const unsigned char rezero_msg[] = {
+ static const unsigned char rezero_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -87,7 +74,7 @@ out:
static int option_inquiry(struct us_data *us)
{
- const unsigned char inquiry_msg[] = {
+ static const unsigned char inquiry_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 74c38870a17e..f3f2a93f52e1 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
@@ -27,20 +28,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/highmem.h>
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index a55666880b7b..9198396e8c6e 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Protocol Functions Header File
@@ -21,20 +22,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _PROTOCOL_H_
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index ec83b3b5efa9..48e2e32c97e8 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 8cd2926fb1fe..585efd120193 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* SCSI layer glue code
@@ -28,20 +29,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/scsiglue.h b/drivers/usb/storage/scsiglue.h
index d0a331dd9bc5..bf99c6201331 100644
--- a/drivers/usb/storage/scsiglue.h
+++ b/drivers/usb/storage/scsiglue.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* SCSI Connecting Glue Header File
@@ -21,20 +22,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _SCSIGLUE_H_
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 44f8ffccd031..1cf7dbfe277c 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SanDisk SDDR-09 SmartMedia reader
*
@@ -11,20 +12,6 @@
* been programmed to obey a certain limited set of SCSI commands.
* This driver translates the "real" SCSI commands to the SDDR-09 SCSI
* commands.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 147c50b3e00f..8c814b2ec9b2 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SanDisk SDDR-55 SmartMedia reader
*
@@ -7,20 +8,6 @@
*
* Current development and maintenance by:
* (c) 2002 Simon Munton
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/jiffies.h>
@@ -604,6 +591,7 @@ static unsigned long sddr55_get_capacity(struct us_data *us) {
case 0x64:
info->pageshift = 8;
info->smallpageshift = 1;
+ /* fall through */
case 0x5d: // 5d is a ROM card with pagesize 512.
return 0x00200000;
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 3b0294e4df93..854498e1012c 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*
@@ -26,20 +27,6 @@
*
* See the Kconfig help text for a list of devices known to be supported by
* this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index a3ccb899df60..d947957f3635 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
@@ -28,20 +29,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/sched.h>
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index dae3ecd2e6cf..f559dc575f4f 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Transport Functions Header File
@@ -21,20 +22,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _TRANSPORT_H_
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 63cf981ed81c..5d04c40ee40a 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
@@ -5,8 +6,6 @@
* Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
- *
- * Distributed under the terms of the GNU GPL, version two.
*/
#include <linux/blkdev.h>
@@ -668,6 +667,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
break;
case DMA_BIDIRECTIONAL:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
+ /* fall through */
case DMA_TO_DEVICE:
cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
case DMA_NONE:
diff --git a/drivers/usb/storage/unusual_alauda.h b/drivers/usb/storage/unusual_alauda.h
index 763bc03032a1..0ec8c99a4976 100644
--- a/drivers/usb/storage/unusual_alauda.h
+++ b/drivers/usb/storage/unusual_alauda.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Alauda-based card readers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_ALAUDA) || \
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index e9a2eb88869a..fb99e526cd48 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for devices based on the Cypress USB/ATA bridge
* with support for ATACB
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || \
diff --git a/drivers/usb/storage/unusual_datafab.h b/drivers/usb/storage/unusual_datafab.h
index 5049b6bbe5d5..fdab5e7d68ca 100644
--- a/drivers/usb/storage/unusual_datafab.h
+++ b/drivers/usb/storage/unusual_datafab.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Datafab USB Compact Flash reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_DATAFAB) || \
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index eb06d88b41d6..2968046e7c05 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Unusual Devices File
@@ -10,20 +11,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/unusual_ene_ub6250.h b/drivers/usb/storage/unusual_ene_ub6250.h
index 5667f5d365c6..9134b91fbd73 100644
--- a/drivers/usb/storage/unusual_ene_ub6250.h
+++ b/drivers/usb/storage/unusual_ene_ub6250.h
@@ -1,20 +1,4 @@
-/*
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
+// SPDX-License-Identifier: GPL-2.0+
#if defined(CONFIG_USB_STORAGE_ENE_UB6250) || \
defined(CONFIG_USB_STORAGE_ENE_UB6250_MODULE)
diff --git a/drivers/usb/storage/unusual_freecom.h b/drivers/usb/storage/unusual_freecom.h
index 1f5aab42ece2..949231c7a36b 100644
--- a/drivers/usb/storage/unusual_freecom.h
+++ b/drivers/usb/storage/unusual_freecom.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Freecom USB/IDE adaptor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_FREECOM) || \
diff --git a/drivers/usb/storage/unusual_isd200.h b/drivers/usb/storage/unusual_isd200.h
index 9b6862ec3d4f..d03a02cc904e 100644
--- a/drivers/usb/storage/unusual_isd200.h
+++ b/drivers/usb/storage/unusual_isd200.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for In-System Design, Inc. ISD200 ASIC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_ISD200) || \
diff --git a/drivers/usb/storage/unusual_jumpshot.h b/drivers/usb/storage/unusual_jumpshot.h
index 413e64fa6b95..c323338881ef 100644
--- a/drivers/usb/storage/unusual_jumpshot.h
+++ b/drivers/usb/storage/unusual_jumpshot.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Lexar "Jumpshot" Compact Flash reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_JUMPSHOT) || \
diff --git a/drivers/usb/storage/unusual_karma.h b/drivers/usb/storage/unusual_karma.h
index e6fad3aeae20..8f1eebd71d2c 100644
--- a/drivers/usb/storage/unusual_karma.h
+++ b/drivers/usb/storage/unusual_karma.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Rio Karma
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_KARMA) || \
diff --git a/drivers/usb/storage/unusual_onetouch.h b/drivers/usb/storage/unusual_onetouch.h
index 425dc22f345a..c76d4e990f7b 100644
--- a/drivers/usb/storage/unusual_onetouch.h
+++ b/drivers/usb/storage/unusual_onetouch.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Maxtor OneTouch USB hard drive's button
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_ONETOUCH) || \
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index 8fe624ad302a..d17cd95b55bb 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
diff --git a/drivers/usb/storage/unusual_sddr09.h b/drivers/usb/storage/unusual_sddr09.h
index d9d38ac4abf9..650cf2862754 100644
--- a/drivers/usb/storage/unusual_sddr09.h
+++ b/drivers/usb/storage/unusual_sddr09.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for SanDisk SDDR-09 SmartMedia reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_SDDR09) || \
diff --git a/drivers/usb/storage/unusual_sddr55.h b/drivers/usb/storage/unusual_sddr55.h
index ebb1d1c6c467..e89df2cea7bd 100644
--- a/drivers/usb/storage/unusual_sddr55.h
+++ b/drivers/usb/storage/unusual_sddr55.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for SanDisk SDDR-55 SmartMedia reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_SDDR55) || \
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cde115359793..d520374a824e 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Attached SCSI devices - Unusual Devices File
*
@@ -6,20 +7,6 @@
* Based on the same file for the usb-storage driver, which is:
* (c) 2000-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
* (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/unusual_usbat.h b/drivers/usb/storage/unusual_usbat.h
index 2044ad5ef5e4..05abf6870b8f 100644
--- a/drivers/usb/storage/unusual_usbat.h
+++ b/drivers/usb/storage/unusual_usbat.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_USBAT) || \
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 0dceb9fa3a06..a0c07e05a8f1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
@@ -30,20 +31,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef CONFIG_USB_STORAGE_DEBUG
@@ -332,7 +319,7 @@ static int usb_stor_control_thread(void * __us)
/* When we are called with no command pending, we're done */
srb = us->srb;
- if (us->srb == NULL) {
+ if (srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
usb_stor_dbg(us, "-- exiting\n");
@@ -341,7 +328,7 @@ static int usb_stor_control_thread(void * __us)
/* has the command timed out *already* ? */
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
- us->srb->result = DID_ABORT << 16;
+ srb->result = DID_ABORT << 16;
goto SkipForAbort;
}
@@ -351,35 +338,35 @@ static int usb_stor_control_thread(void * __us)
* reject the command if the direction indicator
* is UNKNOWN
*/
- if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
+ if (srb->sc_data_direction == DMA_BIDIRECTIONAL) {
usb_stor_dbg(us, "UNKNOWN data direction\n");
- us->srb->result = DID_ERROR << 16;
+ srb->result = DID_ERROR << 16;
}
/*
* reject if target != 0 or if LUN is higher than
* the maximum known LUN
*/
- else if (us->srb->device->id &&
+ else if (srb->device->id &&
!(us->fflags & US_FL_SCM_MULT_TARG)) {
usb_stor_dbg(us, "Bad target number (%d:%llu)\n",
- us->srb->device->id,
- us->srb->device->lun);
- us->srb->result = DID_BAD_TARGET << 16;
+ srb->device->id,
+ srb->device->lun);
+ srb->result = DID_BAD_TARGET << 16;
}
- else if (us->srb->device->lun > us->max_lun) {
+ else if (srb->device->lun > us->max_lun) {
usb_stor_dbg(us, "Bad LUN (%d:%llu)\n",
- us->srb->device->id,
- us->srb->device->lun);
- us->srb->result = DID_BAD_TARGET << 16;
+ srb->device->id,
+ srb->device->lun);
+ srb->result = DID_BAD_TARGET << 16;
}
/*
* Handle those devices which need us to fake
* their inquiry data
*/
- else if ((us->srb->cmnd[0] == INQUIRY) &&
+ else if ((srb->cmnd[0] == INQUIRY) &&
(us->fflags & US_FL_FIX_INQUIRY)) {
unsigned char data_ptr[36] = {
0x00, 0x80, 0x02, 0x02,
@@ -387,13 +374,13 @@ static int usb_stor_control_thread(void * __us)
usb_stor_dbg(us, "Faking INQUIRY command\n");
fill_inquiry_response(us, data_ptr, 36);
- us->srb->result = SAM_STAT_GOOD;
+ srb->result = SAM_STAT_GOOD;
}
/* we've got a command, let's do it! */
else {
- US_DEBUG(usb_stor_show_command(us, us->srb));
- us->proto_handler(us->srb, us);
+ US_DEBUG(usb_stor_show_command(us, srb));
+ us->proto_handler(srb, us);
usb_mark_last_busy(us->pusb_dev);
}
@@ -401,7 +388,7 @@ static int usb_stor_control_thread(void * __us)
scsi_lock(host);
/* was the command aborted? */
- if (us->srb->result == DID_ABORT << 16) {
+ if (srb->result == DID_ABORT << 16) {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
srb = NULL; /* Don't call srb->scsi_done() */
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 8fae28b40bb4..90133e16bec5 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Main Header File
@@ -24,20 +25,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _USB_H_
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index 499669bcf700..83ad01747eed 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage devices
* Usual Tables File for usb-storage and libusual
@@ -6,20 +7,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index bc1b7745f1d4..465d7da849c3 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -4,6 +4,18 @@ menu "USB Power Delivery and Type-C drivers"
config TYPEC
tristate
+config TYPEC_TCPM
+ tristate "USB Type-C Port Controller Manager"
+ depends on USB
+ select TYPEC
+ help
+ The Type-C Port Controller Manager provides a USB PD and USB Type-C
+ state machine for use with Type-C Port Controllers.
+
+if TYPEC_TCPM
+
+source "drivers/usb/typec/fusb302/Kconfig"
+
config TYPEC_WCOVE
tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
depends on ACPI
@@ -19,6 +31,19 @@ config TYPEC_WCOVE
To compile this driver as module, choose M here: the module will be
called typec_wcove
+endif
+
source "drivers/usb/typec/ucsi/Kconfig"
+config TYPEC_TPS6598X
+ tristate "TI TPS6598x USB Power Delivery controller driver"
+ depends on I2C
+ select TYPEC
+ help
+ Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
+ Delivery controller.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called tps6598x.ko.
+
endmenu
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index bc214f15f1b5..bb3138a6eaac 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -1,3 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC) += typec.o
+obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
+obj-y += fusb302/
obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
+obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
diff --git a/drivers/staging/typec/fusb302/Kconfig b/drivers/usb/typec/fusb302/Kconfig
index 48a4f2fcee03..48a4f2fcee03 100644
--- a/drivers/staging/typec/fusb302/Kconfig
+++ b/drivers/usb/typec/fusb302/Kconfig
diff --git a/drivers/staging/typec/fusb302/Makefile b/drivers/usb/typec/fusb302/Makefile
index 207efa5fbab8..3b51b33631a0 100644
--- a/drivers/staging/typec/fusb302/Makefile
+++ b/drivers/usb/typec/fusb302/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
index fc6a3cf74eb3..72cb060b3fca 100644
--- a/drivers/staging/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/fusb302/fusb302.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2016-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Fairchild FUSB302 Type-C Chip Driver
*/
@@ -37,11 +28,11 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/pd.h>
#include <linux/workqueue.h>
#include "fusb302_reg.h"
-#include "../tcpm.h"
-#include "../pd.h"
/*
* When the device is SNK, BC_LVL interrupt is used to monitor cc pins
diff --git a/drivers/staging/typec/fusb302/fusb302_reg.h b/drivers/usb/typec/fusb302/fusb302_reg.h
index 0682e63de773..00b39d365478 100644
--- a/drivers/staging/typec/fusb302/fusb302_reg.h
+++ b/drivers/usb/typec/fusb302/fusb302_reg.h
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2016-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Fairchild FUSB302 Type-C Chip Driver
*/
diff --git a/drivers/staging/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 8af62e74d54c..c166fc77dfb8 100644
--- a/drivers/staging/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2015-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* USB Power Delivery protocol stack.
*/
@@ -26,14 +17,13 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/pd_bdo.h>
+#include <linux/usb/pd_vdo.h>
+#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
#include <linux/workqueue.h>
-#include "pd.h"
-#include "pd_vdo.h"
-#include "pd_bdo.h"
-#include "tcpm.h"
-
#define FOREACH_STATE(S) \
S(INVALID_STATE), \
S(DRP_TOGGLING), \
@@ -908,27 +898,6 @@ static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
memset(&port->mode_data, 0, sizeof(port->mode_data));
-#if 0 /* Not really a match */
- switch (PD_IDH_PTYPE(vdo)) {
- case IDH_PTYPE_UNDEF:
- port->partner.type = TYPEC_PARTNER_NONE; /* no longer exists */
- break;
- case IDH_PTYPE_HUB:
- break;
- case IDH_PTYPE_PERIPH:
- break;
- case IDH_PTYPE_PCABLE:
- break;
- case IDH_PTYPE_ACABLE:
- break;
- case IDH_PTYPE_AMA:
- port->partner.type = TYPEC_PARTNER_ALTMODE;
- break;
- default:
- break;
- }
-#endif
-
port->partner_ident.id_header = vdo;
port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
port->partner_ident.product = product;
@@ -1008,7 +977,7 @@ static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
}
port->partner_altmode[pmdata->altmodes] =
typec_partner_register_altmode(port->partner, paltmode);
- if (port->partner_altmode[pmdata->altmodes] == NULL) {
+ if (!port->partner_altmode[pmdata->altmodes]) {
tcpm_log(port,
"Failed to register alternate modes for SVID 0x%04x",
paltmode->svid);
@@ -1103,11 +1072,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
rlen = 1;
} else {
-#if 0
- response[0] = pd_dfp_enter_mode(port, 0, 0);
- if (response[0])
- rlen = 1;
-#endif
+ /* enter alternate mode if/when implemented */
}
break;
case CMD_ENTER_MODE:
@@ -1145,10 +1110,6 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
if (PD_VDO_SVDM(p0))
rlen = tcpm_pd_svdm(port, payload, cnt, response);
-#if 0
- else
- rlen = tcpm_pd_custom_vdm(port, cnt, payload, response);
-#endif
if (rlen > 0) {
tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
@@ -2442,7 +2403,6 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_STARTUP, 0);
break;
case SNK_STARTUP:
- /* XXX: callback into infrastructure */
opmode = tcpm_get_pwr_opmode(port->polarity ?
port->cc2 : port->cc1);
typec_set_pwr_opmode(port->typec_port, opmode);
@@ -3589,11 +3549,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
port->port_type = tcpc->config->type;
- /*
- * TODO:
- * - alt_modes, set_alt_mode
- * - {debug,audio}_accessory
- */
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (!port->typec_port) {
@@ -3638,6 +3593,7 @@ void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
+ tcpm_reset_port(port);
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
typec_unregister_port(port->typec_port);
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
new file mode 100644
index 000000000000..2719f5d382f7
--- /dev/null
+++ b/drivers/usb/typec/tps6598x.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for TI TPS6598x USB Power Delivery controller family
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/usb/typec.h>
+
+/* Register offsets */
+#define TPS_REG_CMD1 0x08
+#define TPS_REG_DATA1 0x09
+#define TPS_REG_INT_EVENT1 0x14
+#define TPS_REG_INT_EVENT2 0x15
+#define TPS_REG_INT_MASK1 0x16
+#define TPS_REG_INT_MASK2 0x17
+#define TPS_REG_INT_CLEAR1 0x18
+#define TPS_REG_INT_CLEAR2 0x19
+#define TPS_REG_STATUS 0x1a
+#define TPS_REG_SYSTEM_CONF 0x28
+#define TPS_REG_CTRL_CONF 0x29
+#define TPS_REG_POWER_STATUS 0x3f
+#define TPS_REG_RX_IDENTITY_SOP 0x48
+
+/* TPS_REG_INT_* bits */
+#define TPS_REG_INT_PLUG_EVENT BIT(3)
+
+/* TPS_REG_STATUS bits */
+#define TPS_STATUS_PLUG_PRESENT BIT(0)
+#define TPS_STATUS_ORIENTATION BIT(4)
+#define TPS_STATUS_PORTROLE(s) (!!((s) & BIT(5)))
+#define TPS_STATUS_DATAROLE(s) (!!((s) & BIT(6)))
+#define TPS_STATUS_VCONN(s) (!!((s) & BIT(7)))
+
+/* TPS_REG_SYSTEM_CONF bits */
+#define TPS_SYSCONF_PORTINFO(c) ((c) & 3)
+
+enum {
+ TPS_PORTINFO_SINK,
+ TPS_PORTINFO_SINK_ACCESSORY,
+ TPS_PORTINFO_DRP_UFP,
+ TPS_PORTINFO_DRP_UFP_DRD,
+ TPS_PORTINFO_DRP_DFP,
+ TPS_PORTINFO_DRP_DFP_DRD,
+ TPS_PORTINFO_SOURCE,
+};
+
+/* TPS_REG_POWER_STATUS bits */
+#define TPS_POWER_STATUS_SOURCESINK BIT(1)
+#define TPS_POWER_STATUS_PWROPMODE(p) (((p) & GENMASK(3, 2)) >> 2)
+
+/* TPS_REG_RX_IDENTITY_SOP */
+struct tps6598x_rx_identity_reg {
+ u8 status;
+ struct usb_pd_identity identity;
+ u32 vdo[3];
+} __packed;
+
+/* Standard Task return codes */
+#define TPS_TASK_TIMEOUT 1
+#define TPS_TASK_REJECTED 3
+
+/* Unrecognized commands will be replaced with "!CMD" */
+#define INVALID_CMD(_cmd_) (_cmd_ == 0x444d4321)
+
+struct tps6598x {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock; /* device lock */
+
+ struct typec_port *port;
+ struct typec_partner *partner;
+ struct usb_pd_identity partner_identity;
+ struct typec_capability typec_cap;
+};
+
+static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
+{
+ return regmap_raw_read(tps->regmap, reg, val, sizeof(u16));
+}
+
+static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val)
+{
+ return regmap_raw_read(tps->regmap, reg, val, sizeof(u32));
+}
+
+static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
+{
+ return regmap_raw_read(tps->regmap, reg, val, sizeof(u64));
+}
+
+static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16));
+}
+
+static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
+}
+
+static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64));
+}
+
+static inline int
+tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
+}
+
+static int tps6598x_read_partner_identity(struct tps6598x *tps)
+{
+ struct tps6598x_rx_identity_reg id;
+ int ret;
+
+ ret = regmap_raw_read(tps->regmap, TPS_REG_RX_IDENTITY_SOP,
+ &id, sizeof(id));
+ if (ret)
+ return ret;
+
+ tps->partner_identity = id.identity;
+
+ return 0;
+}
+
+static int tps6598x_connect(struct tps6598x *tps, u32 status)
+{
+ struct typec_partner_desc desc;
+ enum typec_pwr_opmode mode;
+ u16 pwr_status;
+ int ret;
+
+ if (tps->partner)
+ return 0;
+
+ ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
+ if (ret < 0)
+ return ret;
+
+ mode = TPS_POWER_STATUS_PWROPMODE(pwr_status);
+
+ desc.usb_pd = mode == TYPEC_PWR_MODE_PD;
+ desc.accessory = TYPEC_ACCESSORY_NONE; /* XXX: handle accessories */
+ desc.identity = NULL;
+
+ if (desc.usb_pd) {
+ ret = tps6598x_read_partner_identity(tps);
+ if (ret)
+ return ret;
+ desc.identity = &tps->partner_identity;
+ }
+
+ tps->partner = typec_register_partner(tps->port, &desc);
+ if (!tps->partner)
+ return -ENODEV;
+
+ typec_set_pwr_opmode(tps->port, mode);
+ typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
+ typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
+ typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+
+ if (desc.identity)
+ typec_partner_set_identity(tps->partner);
+
+ return 0;
+}
+
+static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
+{
+ typec_unregister_partner(tps->partner);
+ tps->partner = NULL;
+ typec_set_pwr_opmode(tps->port, TYPEC_PWR_MODE_USB);
+ typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
+ typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
+ typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+}
+
+static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
+ size_t in_len, u8 *in_data,
+ size_t out_len, u8 *out_data)
+{
+ unsigned long timeout;
+ u32 val;
+ int ret;
+
+ ret = tps6598x_read32(tps, TPS_REG_CMD1, &val);
+ if (ret)
+ return ret;
+ if (val && !INVALID_CMD(val))
+ return -EBUSY;
+
+ if (in_len) {
+ ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1,
+ in_data, in_len);
+ if (ret)
+ return ret;
+ }
+
+ ret = tps6598x_write_4cc(tps, TPS_REG_CMD1, cmd);
+ if (ret < 0)
+ return ret;
+
+ /* XXX: Using 1s for now, but it may not be enough for every command. */
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ do {
+ ret = tps6598x_read32(tps, TPS_REG_CMD1, &val);
+ if (ret)
+ return ret;
+ if (INVALID_CMD(val))
+ return -EINVAL;
+
+ if (time_is_before_jiffies(timeout))
+ return -ETIMEDOUT;
+ } while (val);
+
+ if (out_len) {
+ ret = regmap_raw_read(tps->regmap, TPS_REG_DATA1,
+ out_data, out_len);
+ if (ret)
+ return ret;
+ val = out_data[0];
+ } else {
+ ret = regmap_read(tps->regmap, TPS_REG_DATA1, &val);
+ if (ret)
+ return ret;
+ }
+
+ switch (val) {
+ case TPS_TASK_TIMEOUT:
+ return -ETIMEDOUT;
+ case TPS_TASK_REJECTED:
+ return -EPERM;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role)
+{
+ struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
+ const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_exec_cmd(tps, cmd, 0, NULL, 0, NULL);
+ if (ret)
+ goto out_unlock;
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret)
+ goto out_unlock;
+
+ if (role != TPS_STATUS_DATAROLE(status)) {
+ ret = -EPROTO;
+ goto out_unlock;
+ }
+
+ typec_set_data_role(tps->port, role);
+
+out_unlock:
+ mutex_unlock(&tps->lock);
+
+ return ret;
+}
+
+static int
+tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role)
+{
+ struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
+ const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr";
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_exec_cmd(tps, cmd, 0, NULL, 0, NULL);
+ if (ret)
+ goto out_unlock;
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret)
+ goto out_unlock;
+
+ if (role != TPS_STATUS_PORTROLE(status)) {
+ ret = -EPROTO;
+ goto out_unlock;
+ }
+
+ typec_set_pwr_role(tps->port, role);
+
+out_unlock:
+ mutex_unlock(&tps->lock);
+
+ return ret;
+}
+
+static irqreturn_t tps6598x_interrupt(int irq, void *data)
+{
+ struct tps6598x *tps = data;
+ u64 event1;
+ u64 event2;
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1);
+ ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2);
+ if (ret) {
+ dev_err(tps->dev, "%s: failed to read events\n", __func__);
+ goto err_unlock;
+ }
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret) {
+ dev_err(tps->dev, "%s: failed to read status\n", __func__);
+ goto err_clear_ints;
+ }
+
+ /* Handle plug insert or removal */
+ if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT) {
+ if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_connect(tps, status);
+ if (ret)
+ dev_err(tps->dev,
+ "failed to register partner\n");
+ } else {
+ tps6598x_disconnect(tps, status);
+ }
+ }
+
+err_clear_ints:
+ tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1);
+ tps6598x_write64(tps, TPS_REG_INT_CLEAR2, event2);
+
+err_unlock:
+ mutex_unlock(&tps->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config tps6598x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x7F,
+};
+
+static int tps6598x_probe(struct i2c_client *client)
+{
+ struct tps6598x *tps;
+ u32 status;
+ u32 conf;
+ u32 vid;
+ int ret;
+
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ mutex_init(&tps->lock);
+ tps->dev = &client->dev;
+
+ tps->regmap = devm_regmap_init_i2c(client, &tps6598x_regmap_config);
+ if (IS_ERR(tps->regmap))
+ return PTR_ERR(tps->regmap);
+
+ ret = tps6598x_read32(tps, 0, &vid);
+ if (ret < 0)
+ return ret;
+ if (!vid)
+ return -ENODEV;
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
+ if (ret < 0)
+ return ret;
+
+ switch (TPS_SYSCONF_PORTINFO(conf)) {
+ case TPS_PORTINFO_SINK_ACCESSORY:
+ case TPS_PORTINFO_SINK:
+ tps->typec_cap.type = TYPEC_PORT_UFP;
+ break;
+ case TPS_PORTINFO_DRP_UFP_DRD:
+ case TPS_PORTINFO_DRP_DFP_DRD:
+ tps->typec_cap.dr_set = tps6598x_dr_set;
+ /* fall through */
+ case TPS_PORTINFO_DRP_UFP:
+ case TPS_PORTINFO_DRP_DFP:
+ tps->typec_cap.pr_set = tps6598x_pr_set;
+ tps->typec_cap.type = TYPEC_PORT_DRP;
+ break;
+ case TPS_PORTINFO_SOURCE:
+ tps->typec_cap.type = TYPEC_PORT_DFP;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ tps->typec_cap.revision = USB_TYPEC_REV_1_2;
+ tps->typec_cap.pd_revision = 0x200;
+ tps->typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+
+ tps->port = typec_register_port(&client->dev, &tps->typec_cap);
+ if (!tps->port)
+ return -ENODEV;
+
+ if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_connect(tps, status);
+ if (ret)
+ dev_err(&client->dev, "failed to register partner\n");
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ tps6598x_interrupt,
+ IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&client->dev), tps);
+ if (ret) {
+ tps6598x_disconnect(tps, 0);
+ typec_unregister_port(tps->port);
+ return ret;
+ }
+
+ i2c_set_clientdata(client, tps);
+
+ return 0;
+}
+
+static int tps6598x_remove(struct i2c_client *client)
+{
+ struct tps6598x *tps = i2c_get_clientdata(client);
+
+ tps6598x_disconnect(tps, 0);
+ typec_unregister_port(tps->port);
+
+ return 0;
+}
+
+static const struct acpi_device_id tps6598x_acpi_match[] = {
+ { "INT3515", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tps6598x_acpi_match);
+
+static struct i2c_driver tps6598x_i2c_driver = {
+ .driver = {
+ .name = "tps6598x",
+ .acpi_match_table = tps6598x_acpi_match,
+ },
+ .probe_new = tps6598x_probe,
+ .remove = tps6598x_remove,
+};
+module_i2c_driver(tps6598x_i2c_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI TPS6598x USB Power Delivery Controller Driver");
diff --git a/drivers/usb/typec/typec.c b/drivers/usb/typec/typec.c
index 24e355ba109d..735726ced602 100644
--- a/drivers/usb/typec/typec.c
+++ b/drivers/usb/typec/typec.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Connector Class
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
index e9c4e784a9cb..a8d479eb221a 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/typec_wcove.c
@@ -1,24 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* typec_wcove.c - WhiskeyCove PMIC USB Type-C PHY driver
*
* Copyright (C) 2017 Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/usb/tcpm.h>
#include <linux/interrupt.h>
-#include <linux/usb/typec.h>
#include <linux/platform_device.h>
#include <linux/mfd/intel_soc_pmic.h>
/* Register offsets */
#define WCOVE_CHGRIRQ0 0x4e09
-#define WCOVE_PHYCTRL 0x5e07
#define USBC_CONTROL1 0x7001
#define USBC_CONTROL2 0x7002
@@ -28,22 +24,57 @@
#define USBC_STATUS1 0x7007
#define USBC_STATUS2 0x7008
#define USBC_STATUS3 0x7009
+#define USBC_CC1 0x700a
+#define USBC_CC2 0x700b
+#define USBC_CC1_STATUS 0x700c
+#define USBC_CC2_STATUS 0x700d
#define USBC_IRQ1 0x7015
#define USBC_IRQ2 0x7016
#define USBC_IRQMASK1 0x7017
#define USBC_IRQMASK2 0x7018
+#define USBC_PDCFG2 0x701a
+#define USBC_PDCFG3 0x701b
+#define USBC_PDSTATUS 0x701c
+#define USBC_RXSTATUS 0x701d
+#define USBC_RXINFO 0x701e
+#define USBC_TXCMD 0x701f
+#define USBC_TXINFO 0x7020
+#define USBC_RX_DATA 0x7028
+#define USBC_TX_DATA 0x7047
/* Register bits */
-#define USBC_CONTROL1_MODE_DRP(r) (((r) & ~0x7) | 4)
+#define USBC_CONTROL1_MODE_MASK 0x3
+#define USBC_CONTROL1_MODE_SNK 0
+#define USBC_CONTROL1_MODE_SNKACC 1
+#define USBC_CONTROL1_MODE_SRC 2
+#define USBC_CONTROL1_MODE_SRCACC 3
+#define USBC_CONTROL1_MODE_DRP 4
+#define USBC_CONTROL1_MODE_DRPACC 5
+#define USBC_CONTROL1_MODE_TEST 7
+#define USBC_CONTROL1_CURSRC_MASK 0xc
+#define USBC_CONTROL1_CURSRC_UA_0 (0 << 3)
+#define USBC_CONTROL1_CURSRC_UA_80 (1 << 3)
+#define USBC_CONTROL1_CURSRC_UA_180 (2 << 3)
+#define USBC_CONTROL1_CURSRC_UA_330 (3 << 3)
+#define USBC_CONTROL1_DRPTOGGLE_RANDOM 0xe0
#define USBC_CONTROL2_UNATT_SNK BIT(0)
#define USBC_CONTROL2_UNATT_SRC BIT(1)
#define USBC_CONTROL2_DIS_ST BIT(2)
+#define USBC_CONTROL3_DET_DIS BIT(0)
#define USBC_CONTROL3_PD_DIS BIT(1)
+#define USBC_CONTROL3_RESETPHY BIT(2)
+#define USBC_CC_CTRL_PU_EN BIT(0)
#define USBC_CC_CTRL_VCONN_EN BIT(1)
+#define USBC_CC_CTRL_TX_EN BIT(2)
+#define USBC_CC_CTRL_PD_EN BIT(3)
+#define USBC_CC_CTRL_CDET_EN BIT(4)
+#define USBC_CC_CTRL_RDET_EN BIT(5)
+#define USBC_CC_CTRL_ADC_EN BIT(6)
+#define USBC_CC_CTRL_VBUSOK BIT(7)
#define USBC_STATUS1_DET_ONGOING BIT(6)
#define USBC_STATUS1_RSLT(r) ((r) & 0xf)
@@ -61,6 +92,15 @@
#define USBC_STATUS2_VBUS_REQ BIT(5)
+#define UCSC_CC_STATUS_SNK_RP BIT(0)
+#define UCSC_CC_STATUS_PWRDEFSNK BIT(1)
+#define UCSC_CC_STATUS_PWR_1P5A_SNK BIT(2)
+#define UCSC_CC_STATUS_PWR_3A_SNK BIT(3)
+#define UCSC_CC_STATUS_SRC_RP BIT(4)
+#define UCSC_CC_STATUS_RX(r) (((r) >> 5) & 0x3)
+#define USBC_CC_STATUS_RD 1
+#define USBC_CC_STATUS_RA 2
+
#define USBC_IRQ1_ADCDONE1 BIT(2)
#define USBC_IRQ1_OVERTEMP BIT(1)
#define USBC_IRQ1_SHORT BIT(0)
@@ -79,15 +119,44 @@
USBC_IRQ2_RX_HR | USBC_IRQ2_RX_CR | \
USBC_IRQ2_TX_SUCCESS | USBC_IRQ2_TX_FAIL)
+#define USBC_PDCFG2_SOP BIT(0)
+#define USBC_PDCFG2_SOP_P BIT(1)
+#define USBC_PDCFG2_SOP_PP BIT(2)
+#define USBC_PDCFG2_SOP_P_DEBUG BIT(3)
+#define USBC_PDCFG2_SOP_PP_DEBUG BIT(4)
+
+#define USBC_PDCFG3_DATAROLE_SHIFT 1
+#define USBC_PDCFG3_SOP_SHIFT 2
+
+#define USBC_RXSTATUS_RXCLEAR BIT(0)
+#define USBC_RXSTATUS_RXDATA BIT(7)
+
+#define USBC_RXINFO_RXBYTES(i) (((i) >> 3) & 0x1f)
+
+#define USBC_TXCMD_BUF_RDY BIT(0)
+#define USBC_TXCMD_START BIT(1)
+#define USBC_TXCMD_NOP (0 << 5)
+#define USBC_TXCMD_MSG (1 << 5)
+#define USBC_TXCMD_CR (2 << 5)
+#define USBC_TXCMD_HR (3 << 5)
+#define USBC_TXCMD_BIST (4 << 5)
+
+#define USBC_TXINFO_RETRIES(d) (d << 3)
+
struct wcove_typec {
struct mutex lock; /* device lock */
struct device *dev;
struct regmap *regmap;
- struct typec_port *port;
- struct typec_capability cap;
- struct typec_partner *partner;
+ guid_t guid;
+
+ bool vbus;
+
+ struct tcpc_dev tcpc;
+ struct tcpm_port *tcpm;
};
+#define tcpc_to_wcove(_tcpc_) container_of(_tcpc_, struct wcove_typec, tcpc)
+
enum wcove_typec_func {
WCOVE_FUNC_DRIVE_VBUS = 1,
WCOVE_FUNC_ORIENTATION,
@@ -105,8 +174,7 @@ enum wcove_typec_role {
WCOVE_ROLE_DEVICE,
};
-static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49,
- 0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
+#define WCOVE_DSM_UUID "482383f0-2876-4e49-8685-db66211af037"
static int wcove_typec_func(struct wcove_typec *wcove,
enum wcove_typec_func func, int param)
@@ -118,7 +186,7 @@ static int wcove_typec_func(struct wcove_typec *wcove,
tmp.type = ACPI_TYPE_INTEGER;
tmp.integer.value = param;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &guid, 1, func,
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &wcove->guid, 1, func,
&argv4);
if (!obj) {
dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
@@ -129,158 +197,349 @@ static int wcove_typec_func(struct wcove_typec *wcove,
return 0;
}
-static irqreturn_t wcove_typec_irq(int irq, void *data)
+static int wcove_init(struct tcpc_dev *tcpc)
{
- enum typec_role role = TYPEC_SINK;
- struct typec_partner_desc partner;
- struct wcove_typec *wcove = data;
- unsigned int cc1_ctrl;
- unsigned int cc2_ctrl;
- unsigned int cc_irq1;
- unsigned int cc_irq2;
- unsigned int status1;
- unsigned int status2;
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
int ret;
- mutex_lock(&wcove->lock);
-
- ret = regmap_read(wcove->regmap, USBC_IRQ1, &cc_irq1);
+ /* Unmask everything */
+ ret = regmap_write(wcove->regmap, USBC_IRQMASK1, 0);
if (ret)
- goto err;
+ return ret;
- ret = regmap_read(wcove->regmap, USBC_IRQ2, &cc_irq2);
- if (ret)
- goto err;
+ return regmap_write(wcove->regmap, USBC_IRQMASK2, 0);
+}
- ret = regmap_read(wcove->regmap, USBC_STATUS1, &status1);
- if (ret)
- goto err;
+static int wcove_get_vbus(struct tcpc_dev *tcpc)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int cc1ctrl;
+ int ret;
- ret = regmap_read(wcove->regmap, USBC_STATUS2, &status2);
+ ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1ctrl);
if (ret)
- goto err;
+ return ret;
- ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1_ctrl);
- if (ret)
- goto err;
+ wcove->vbus = !!(cc1ctrl & USBC_CC_CTRL_VBUSOK);
- ret = regmap_read(wcove->regmap, USBC_CC2_CTRL, &cc2_ctrl);
- if (ret)
- goto err;
+ return wcove->vbus;
+}
- if (cc_irq1) {
- if (cc_irq1 & USBC_IRQ1_OVERTEMP)
- dev_err(wcove->dev, "VCONN Switch Over Temperature!\n");
- if (cc_irq1 & USBC_IRQ1_SHORT)
- dev_err(wcove->dev, "VCONN Switch Short Circuit!\n");
- ret = regmap_write(wcove->regmap, USBC_IRQ1, cc_irq1);
- if (ret)
- goto err;
- }
+static int wcove_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
- if (cc_irq2) {
- ret = regmap_write(wcove->regmap, USBC_IRQ2, cc_irq2);
- if (ret)
- goto err;
- /*
- * Ignoring any PD communication interrupts until the PD support
- * is available
- */
- if (cc_irq2 & ~USBC_IRQ2_CC_CHANGE) {
- dev_WARN(wcove->dev, "USB PD handling missing\n");
- goto err;
+ return wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VBUS, on);
+}
+
+static int wcove_set_vconn(struct tcpc_dev *tcpc, bool on)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+
+ return wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, on);
+}
+
+static enum typec_cc_status wcove_to_typec_cc(unsigned int cc)
+{
+ if (cc & UCSC_CC_STATUS_SNK_RP) {
+ if (cc & UCSC_CC_STATUS_PWRDEFSNK)
+ return TYPEC_CC_RP_DEF;
+ else if (cc & UCSC_CC_STATUS_PWR_1P5A_SNK)
+ return TYPEC_CC_RP_1_5;
+ else if (cc & UCSC_CC_STATUS_PWR_3A_SNK)
+ return TYPEC_CC_RP_3_0;
+ } else {
+ switch (UCSC_CC_STATUS_RX(cc)) {
+ case USBC_CC_STATUS_RD:
+ return TYPEC_CC_RD;
+ case USBC_CC_STATUS_RA:
+ return TYPEC_CC_RA;
+ default:
+ break;
}
}
+ return TYPEC_CC_OPEN;
+}
- if (status1 & USBC_STATUS1_DET_ONGOING)
- goto out;
+static int wcove_get_cc(struct tcpc_dev *tcpc, enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int cc1_status;
+ unsigned int cc2_status;
+ int ret;
- if (USBC_STATUS1_RSLT(status1) == USBC_RSLT_NOTHING) {
- if (wcove->partner) {
- typec_unregister_partner(wcove->partner);
- wcove->partner = NULL;
- }
+ ret = regmap_read(wcove->regmap, USBC_CC1_STATUS, &cc1_status);
+ if (ret)
+ return ret;
- wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
- WCOVE_ORIENTATION_NORMAL);
+ ret = regmap_read(wcove->regmap, USBC_CC2_STATUS, &cc2_status);
+ if (ret)
+ return ret;
- /* This makes sure the device controller is disconnected */
- wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST);
+ *cc1 = wcove_to_typec_cc(cc1_status);
+ *cc2 = wcove_to_typec_cc(cc2_status);
- /* Port to default role */
- typec_set_data_role(wcove->port, TYPEC_DEVICE);
- typec_set_pwr_role(wcove->port, TYPEC_SINK);
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_USB);
+ return 0;
+}
- goto out;
- }
+static int wcove_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
+{
+ /* XXX: Relying on the HW FSM to configure things correctly for now */
+ return 0;
+}
- if (wcove->partner)
- goto out;
+static int wcove_set_polarity(struct tcpc_dev *tcpc, enum typec_cc_polarity pol)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
- switch (USBC_STATUS1_ORIENT(status1)) {
- case USBC_ORIENT_NORMAL:
- wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
- WCOVE_ORIENTATION_NORMAL);
- break;
- case USBC_ORIENT_REVERSE:
- wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
- WCOVE_ORIENTATION_REVERSE);
- default:
- break;
+ return wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION, pol);
+}
+
+static int wcove_set_current_limit(struct tcpc_dev *tcpc, u32 max_ma, u32 mv)
+{
+ return 0;
+}
+
+static int wcove_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role role, enum typec_data_role data)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int val;
+ int ret;
+
+ ret = wcove_typec_func(wcove, WCOVE_FUNC_ROLE, data == TYPEC_HOST ?
+ WCOVE_ROLE_HOST : WCOVE_ROLE_DEVICE);
+ if (ret)
+ return ret;
+
+ val = role;
+ val |= data << USBC_PDCFG3_DATAROLE_SHIFT;
+ val |= PD_REV20 << USBC_PDCFG3_SOP_SHIFT;
+
+ return regmap_write(wcove->regmap, USBC_PDCFG3, val);
+}
+
+static int wcove_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+
+ return regmap_write(wcove->regmap, USBC_PDCFG2,
+ on ? USBC_PDCFG2_SOP : 0);
+}
+
+static int wcove_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int info = 0;
+ unsigned int cmd;
+ int ret;
+
+ ret = regmap_read(wcove->regmap, USBC_TXCMD, &cmd);
+ if (ret)
+ return ret;
+
+ if (!(cmd & USBC_TXCMD_BUF_RDY)) {
+ dev_warn(wcove->dev, "%s: Last transmission still ongoing!",
+ __func__);
+ return -EBUSY;
}
- memset(&partner, 0, sizeof(partner));
+ if (msg) {
+ const u8 *data = (void *)msg;
+ int i;
+
+ for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+ ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+ }
- switch (USBC_STATUS1_RSLT(status1)) {
- case USBC_RSLT_SRC_DEFAULT:
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_USB);
+ switch (type) {
+ case TCPC_TX_SOP:
+ case TCPC_TX_SOP_PRIME:
+ case TCPC_TX_SOP_PRIME_PRIME:
+ case TCPC_TX_SOP_DEBUG_PRIME:
+ case TCPC_TX_SOP_DEBUG_PRIME_PRIME:
+ info = type + 1;
+ cmd = USBC_TXCMD_MSG;
break;
- case USBC_RSLT_SRC_1_5A:
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_1_5A);
+ case TCPC_TX_HARD_RESET:
+ cmd = USBC_TXCMD_HR;
break;
- case USBC_RSLT_SRC_3_0A:
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_3_0A);
+ case TCPC_TX_CABLE_RESET:
+ cmd = USBC_TXCMD_CR;
break;
- case USBC_RSLT_SNK:
- role = TYPEC_SOURCE;
+ case TCPC_TX_BIST_MODE_2:
+ cmd = USBC_TXCMD_BIST;
break;
- case USBC_RSLT_DEBUG_ACC:
- partner.accessory = TYPEC_ACCESSORY_DEBUG;
+ default:
+ return -EINVAL;
+ }
+
+ /* NOTE Setting maximum number of retries (7) */
+ ret = regmap_write(wcove->regmap, USBC_TXINFO,
+ info | USBC_TXINFO_RETRIES(7));
+ if (ret)
+ return ret;
+
+ return regmap_write(wcove->regmap, USBC_TXCMD, cmd | USBC_TXCMD_START);
+}
+
+static int wcove_start_drp_toggling(struct tcpc_dev *tcpc,
+ enum typec_cc_status cc)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int usbc_ctrl;
+
+ usbc_ctrl = USBC_CONTROL1_MODE_DRP | USBC_CONTROL1_DRPTOGGLE_RANDOM;
+
+ switch (cc) {
+ case TYPEC_CC_RP_1_5:
+ usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_180;
break;
- case USBC_RSLT_AUDIO_ACC:
- partner.accessory = TYPEC_ACCESSORY_AUDIO;
+ case TYPEC_CC_RP_3_0:
+ usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_330;
break;
default:
- dev_WARN(wcove->dev, "%s Undefined result\n", __func__);
- goto err;
+ usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_80;
+ break;
}
- if (role == TYPEC_SINK) {
- wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_DEVICE);
- typec_set_data_role(wcove->port, TYPEC_DEVICE);
- typec_set_pwr_role(wcove->port, TYPEC_SINK);
- } else {
- wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST);
- typec_set_pwr_role(wcove->port, TYPEC_SOURCE);
- typec_set_data_role(wcove->port, TYPEC_HOST);
+ return regmap_write(wcove->regmap, USBC_CONTROL1, usbc_ctrl);
+}
+
+static int wcove_read_rx_buffer(struct wcove_typec *wcove, void *msg)
+{
+ unsigned int info;
+ int ret;
+ int i;
+
+ ret = regmap_read(wcove->regmap, USBC_RXINFO, &info);
+ if (ret)
+ return ret;
+
+ /* FIXME: Check that USBC_RXINFO_RXBYTES(info) matches the header */
+
+ for (i = 0; i < USBC_RXINFO_RXBYTES(info); i++) {
+ ret = regmap_read(wcove->regmap, USBC_RX_DATA + i, msg + i);
+ if (ret)
+ return ret;
}
- wcove->partner = typec_register_partner(wcove->port, &partner);
- if (!wcove->partner)
- dev_err(wcove->dev, "failed register partner\n");
-out:
- /* If either CC pins is requesting VCONN, we turn it on */
- if ((cc1_ctrl & USBC_CC_CTRL_VCONN_EN) ||
- (cc2_ctrl & USBC_CC_CTRL_VCONN_EN))
- wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, true);
- else
+ return regmap_write(wcove->regmap, USBC_RXSTATUS,
+ USBC_RXSTATUS_RXCLEAR);
+}
+
+static irqreturn_t wcove_typec_irq(int irq, void *data)
+{
+ struct wcove_typec *wcove = data;
+ unsigned int usbc_irq1 = 0;
+ unsigned int usbc_irq2 = 0;
+ unsigned int cc1ctrl;
+ int ret;
+
+ mutex_lock(&wcove->lock);
+
+ /* Read.. */
+ ret = regmap_read(wcove->regmap, USBC_IRQ1, &usbc_irq1);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_IRQ2, &usbc_irq2);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1ctrl);
+ if (ret)
+ goto err;
+
+ if (!wcove->tcpm)
+ goto err;
+
+ /* ..check.. */
+ if (usbc_irq1 & USBC_IRQ1_OVERTEMP) {
+ dev_err(wcove->dev, "VCONN Switch Over Temperature!\n");
+ wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, false);
+ /* REVISIT: Report an error? */
+ }
+
+ if (usbc_irq1 & USBC_IRQ1_SHORT) {
+ dev_err(wcove->dev, "VCONN Switch Short Circuit!\n");
wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, false);
+ /* REVISIT: Report an error? */
+ }
+
+ if (wcove->vbus != !!(cc1ctrl & USBC_CC_CTRL_VBUSOK))
+ tcpm_vbus_change(wcove->tcpm);
+
+ /* REVISIT: See if tcpm code can be made to consider Type-C HW FSMs */
+ if (usbc_irq2 & USBC_IRQ2_CC_CHANGE)
+ tcpm_cc_change(wcove->tcpm);
+
+ if (usbc_irq2 & USBC_IRQ2_RX_PD) {
+ unsigned int status;
+
+ /*
+ * FIXME: Need to check if TX is ongoing and report
+ * TX_DIREGARDED if needed?
+ */
+
+ ret = regmap_read(wcove->regmap, USBC_RXSTATUS, &status);
+ if (ret)
+ goto err;
+
+ /* Flush all buffers */
+ while (status & USBC_RXSTATUS_RXDATA) {
+ struct pd_message msg;
+
+ ret = wcove_read_rx_buffer(wcove, &msg);
+ if (ret) {
+ dev_err(wcove->dev, "%s: RX read failed\n",
+ __func__);
+ goto err;
+ }
+
+ tcpm_pd_receive(wcove->tcpm, &msg);
+
+ ret = regmap_read(wcove->regmap, USBC_RXSTATUS,
+ &status);
+ if (ret)
+ goto err;
+ }
+ }
+
+ if (usbc_irq2 & USBC_IRQ2_RX_HR)
+ tcpm_pd_hard_reset(wcove->tcpm);
+
+ /* REVISIT: if (usbc_irq2 & USBC_IRQ2_RX_CR) */
+
+ if (usbc_irq2 & USBC_IRQ2_TX_SUCCESS)
+ tcpm_pd_transmit_complete(wcove->tcpm, TCPC_TX_SUCCESS);
+
+ if (usbc_irq2 & USBC_IRQ2_TX_FAIL)
+ tcpm_pd_transmit_complete(wcove->tcpm, TCPC_TX_FAILED);
- /* Relying on the FSM to know when we need to drive VBUS. */
- wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VBUS,
- !!(status2 & USBC_STATUS2_VBUS_REQ));
err:
+ /* ..and clear. */
+ if (usbc_irq1) {
+ ret = regmap_write(wcove->regmap, USBC_IRQ1, usbc_irq1);
+ if (ret)
+ dev_WARN(wcove->dev, "%s failed to clear IRQ1\n",
+ __func__);
+ }
+
+ if (usbc_irq2) {
+ ret = regmap_write(wcove->regmap, USBC_IRQ2, usbc_irq2);
+ if (ret)
+ dev_WARN(wcove->dev, "%s failed to clear IRQ2\n",
+ __func__);
+ }
+
/* REVISIT: Clear WhiskeyCove CHGR Type-C interrupt */
regmap_write(wcove->regmap, WCOVE_CHGRIRQ0, BIT(5));
@@ -288,11 +547,41 @@ err:
return IRQ_HANDLED;
}
+/*
+ * The following power levels should be safe to use with Joule board.
+ */
+static const u32 src_pdo[] = {
+ PDO_FIXED(5000, 1500, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
+ PDO_FIXED_USB_COMM),
+};
+
+static const u32 snk_pdo[] = {
+ PDO_FIXED(12000, 3000, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
+ PDO_FIXED_USB_COMM),
+ PDO_BATT(4750, 12000, 15000),
+ PDO_VAR(4750, 12000, 3000),
+};
+
+static struct tcpc_config wcove_typec_config = {
+ .src_pdo = src_pdo,
+ .nr_src_pdo = ARRAY_SIZE(src_pdo),
+ .snk_pdo = snk_pdo,
+ .nr_snk_pdo = ARRAY_SIZE(snk_pdo),
+
+ .max_snk_mv = 12000,
+ .max_snk_ma = 3000,
+ .max_snk_mw = 36000,
+ .operating_snk_mw = 15000,
+
+ .type = TYPEC_PORT_DRP,
+ .default_role = TYPEC_SINK,
+};
+
static int wcove_typec_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct wcove_typec *wcove;
- unsigned int val;
+ int irq;
int ret;
wcove = devm_kzalloc(&pdev->dev, sizeof(*wcove), GFP_KERNEL);
@@ -303,43 +592,47 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->dev = &pdev->dev;
wcove->regmap = pmic->regmap;
- ret = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
+ irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
platform_get_irq(pdev, 0));
- if (ret < 0)
- return ret;
+ if (irq < 0)
+ return irq;
- ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
- wcove_typec_irq, IRQF_ONESHOT,
- "wcove_typec", wcove);
+ ret = guid_parse(WCOVE_DSM_UUID, &wcove->guid);
if (ret)
return ret;
- if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &guid, 0, 0x1f)) {
+ if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &wcove->guid, 0, 0x1f)) {
dev_err(&pdev->dev, "Missing _DSM functions\n");
return -ENODEV;
}
- wcove->cap.type = TYPEC_PORT_DRP;
- wcove->cap.revision = USB_TYPEC_REV_1_1;
- wcove->cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ wcove->tcpc.init = wcove_init;
+ wcove->tcpc.get_vbus = wcove_get_vbus;
+ wcove->tcpc.set_vbus = wcove_set_vbus;
+ wcove->tcpc.set_cc = wcove_set_cc;
+ wcove->tcpc.get_cc = wcove_get_cc;
+ wcove->tcpc.set_polarity = wcove_set_polarity;
+ wcove->tcpc.set_vconn = wcove_set_vconn;
+ wcove->tcpc.set_current_limit = wcove_set_current_limit;
+ wcove->tcpc.start_drp_toggling = wcove_start_drp_toggling;
- /* Make sure the PD PHY is disabled until USB PD is available */
- regmap_read(wcove->regmap, USBC_CONTROL3, &val);
- regmap_write(wcove->regmap, USBC_CONTROL3, val | USBC_CONTROL3_PD_DIS);
+ wcove->tcpc.set_pd_rx = wcove_set_pd_rx;
+ wcove->tcpc.set_roles = wcove_set_roles;
+ wcove->tcpc.pd_transmit = wcove_pd_transmit;
- /* DRP mode without accessory support */
- regmap_read(wcove->regmap, USBC_CONTROL1, &val);
- regmap_write(wcove->regmap, USBC_CONTROL1, USBC_CONTROL1_MODE_DRP(val));
+ wcove->tcpc.config = &wcove_typec_config;
- wcove->port = typec_register_port(&pdev->dev, &wcove->cap);
- if (!wcove->port)
- return -ENODEV;
+ wcove->tcpm = tcpm_register_port(wcove->dev, &wcove->tcpc);
+ if (IS_ERR(wcove->tcpm))
+ return PTR_ERR(wcove->tcpm);
- /* Unmask everything */
- regmap_read(wcove->regmap, USBC_IRQMASK1, &val);
- regmap_write(wcove->regmap, USBC_IRQMASK1, val & ~USBC_IRQMASK1_ALL);
- regmap_read(wcove->regmap, USBC_IRQMASK2, &val);
- regmap_write(wcove->regmap, USBC_IRQMASK2, val & ~USBC_IRQMASK2_ALL);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wcove_typec_irq, IRQF_ONESHOT,
+ "wcove_typec", wcove);
+ if (ret) {
+ tcpm_unregister_port(wcove->tcpm);
+ return ret;
+ }
platform_set_drvdata(pdev, wcove);
return 0;
@@ -356,8 +649,8 @@ static int wcove_typec_remove(struct platform_device *pdev)
regmap_read(wcove->regmap, USBC_IRQMASK2, &val);
regmap_write(wcove->regmap, USBC_IRQMASK2, val | USBC_IRQMASK2_ALL);
- typec_unregister_partner(wcove->partner);
- typec_unregister_port(wcove->port);
+ tcpm_unregister_port(wcove->tcpm);
+
return 0;
}
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 006f65c72a34..d9a6ff6e673c 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -1,2 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 714c5bcedf2b..79046fe66426 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Connector System Software Interface driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/completion.h>
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index cabd47612b0a..44eb4e1ea817 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UCSI ACPI driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index bb0bd732e29a..26ca0ec01fd5 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Skeleton driver - 2.2
*
* Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
* This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
* but has been rewritten to be easier to read and use.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 910f027773aa..14a72357800a 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#ifndef __USBIP_STUB_H
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index c653ce533430..a3df8ee82faf 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/device.h>
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 7170404e8979..4f48b306713f 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/string.h>
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 191b176ffedf..536e037f541f 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <asm/byteorder.h>
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index be50cef645d8..b18bce96c212 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 2281f3562870..f7978933b402 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <asm/byteorder.h>
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 3050fc99a417..e5de35c8c505 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#ifndef __USBIP_COMMON_H
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index f1635662c299..5b4c0864ad92 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
index 5cfb59e98e44..5659dce1526e 100644
--- a/drivers/usb/usbip/vhci.h
+++ b/drivers/usb/usbip/vhci.h
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#ifndef __USBIP_VHCI_H
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 11b9a22799cc..713e94170963 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/init.h>
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index ef2f2d5ca6b2..90577e8b2282 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 1b9f60a22e0b..e78f7472cac4 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index 3e7878fe2fd4..d625a2ff4b71 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vudc.h b/drivers/usb/usbip/vudc.h
index 25e01b09c4c3..cf968192e59f 100644
--- a/drivers/usb/usbip/vudc.h
+++ b/drivers/usb/usbip/vudc.h
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __USBIP_VUDC_H
@@ -104,7 +92,7 @@ struct vudc {
struct usbip_device ud;
struct transfer_timer tr_timer;
- struct timeval start_time;
+ struct timespec64 start_time;
struct list_head urb_queue;
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 968471b62cbc..1b9a4f87db59 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
@@ -135,16 +123,15 @@ struct vep *vudc_find_endpoint(struct vudc *udc, u8 address)
/* gadget ops */
-/* FIXME - this will probably misbehave when suspend/resume is added */
static int vgadget_get_frame(struct usb_gadget *_gadget)
{
- struct timeval now;
+ struct timespec64 now;
struct vudc *udc = usb_gadget_to_vudc(_gadget);
- do_gettimeofday(&now);
+ ktime_get_ts64(&now);
return ((now.tv_sec - udc->start_time.tv_sec) * 1000 +
- (now.tv_usec - udc->start_time.tv_usec) / 1000)
- % 0x7FF;
+ (now.tv_nsec - udc->start_time.tv_nsec) / NSEC_PER_MSEC)
+ & 0x7FF;
}
static int vgadget_set_selfpowered(struct usb_gadget *_gadget, int value)
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
index 9e655714e389..3fc22037a82f 100644
--- a/drivers/usb/usbip/vudc_main.c
+++ b/drivers/usb/usbip/vudc_main.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index e429b59f6f8a..df1e30989148 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <net/sock.h>
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 0f98f2c7475f..1adc8af292ec 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
@@ -161,7 +149,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
udc->ud.status = SDEV_ST_USED;
spin_unlock_irq(&udc->ud.lock);
- do_gettimeofday(&udc->start_time);
+ ktime_get_ts64(&udc->start_time);
v_start_timer(udc);
udc->connected = 1;
} else {
diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
index 4cfd475ee865..c9db846ee4f6 100644
--- a/drivers/usb/usbip/vudc_transfer.c
+++ b/drivers/usb/usbip/vudc_transfer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
@@ -6,19 +7,6 @@
* Based on dummy_hcd.c, which is:
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/usb.h>
@@ -311,9 +299,9 @@ top:
return sent;
}
-static void v_timer(unsigned long _vudc)
+static void v_timer(struct timer_list *t)
{
- struct vudc *udc = (struct vudc *) _vudc;
+ struct vudc *udc = from_timer(udc, t, tr_timer.timer);
struct transfer_timer *timer = &udc->tr_timer;
struct urbp *urb_p, *tmp;
unsigned long flags;
@@ -459,7 +447,7 @@ void v_init_timer(struct vudc *udc)
{
struct transfer_timer *t = &udc->tr_timer;
- setup_timer(&t->timer, v_timer, (unsigned long) udc);
+ timer_setup(&t->timer, v_timer, 0);
t->state = VUDC_TR_STOPPED;
}
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 234661782fa0..1440ae0919ec 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <net/sock.h>
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index aa4e440e9975..222228c5c1e1 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB - Cable Based Association
*
@@ -6,21 +7,6 @@
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* WUSB devices have to be paired (associated in WUSB lingo) so
* that they can connect to the system.
*
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 062c205f0046..4c00be2d1993 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Ultra Wide Band
* AES-128 CCM Encryption
@@ -5,21 +6,6 @@
* Copyright (C) 2007 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* We don't do any encryption here; we use the Linux Kernel's AES-128
* crypto modules to construct keys and payload blocks in a way
* defined by WUSB1.0[6]. Check the erratas, as typos are are patched
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
index 78212f8180ce..85a1acf3a729 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB devices
* sysfs bindings
@@ -5,21 +6,6 @@
* Copyright (C) 2007 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* Get them out of the way...
*/
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index bf9551735938..fcb06aef2675 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* Device Connect handling
@@ -5,21 +6,6 @@
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
* FIXME: this file needs to be broken up, it's grown too big
*
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 3f485df96226..acce0d551eb2 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* MMC (Microscheduled Management Command) handling
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* WUIEs and MMC IEs...well, they are almost the same at the end. MMC
* IEs are Wireless USB IEs that go into the MMC period...[what is
* that? look in Design-overview.txt].
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
index 090f27371a8f..30f569131471 100644
--- a/drivers/usb/wusbcore/pal.c
+++ b/drivers/usb/wusbcore/pal.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* UWB Protocol Adaptation Layer (PAL) glue.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "wusbhc.h"
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
index 7b1b2e2fb673..6dcfc6825f55 100644
--- a/drivers/usb/wusbcore/reservation.c
+++ b/drivers/usb/wusbcore/reservation.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB cluster reservation management
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/uwb.h>
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index a082fe62b1f0..20c08cd9dcbf 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Root Hub operations
@@ -6,21 +7,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* We fake a root hub that has fake ports (as many as simultaneous
* devices the Wireless USB Host Controller can deal with). For each
* port we keep an state in @wusbhc->port[index] identical to the one
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 170f2c38de9b..33d2f5d7f33b 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Security support: encryption enablement, etc
@@ -5,21 +6,6 @@
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
*/
#include <linux/types.h>
@@ -28,6 +14,7 @@
#include <linux/random.h>
#include <linux/export.h>
#include "wusbhc.h"
+#include <asm/unaligned.h>
static void wusbhc_gtk_rekey_work(struct work_struct *work);
@@ -367,7 +354,6 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct usb_device *usb_dev = wusb_dev->usb_dev;
struct device *dev = &usb_dev->dev;
u32 tkid;
- __le32 tkid_le;
struct usb_handshake *hs;
struct aes_ccm_nonce ccm_n;
u8 mic[8];
@@ -385,11 +371,10 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
goto error_dev_set_encryption;
tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
- tkid_le = cpu_to_le32(tkid);
hs[0].bMessageNumber = 1;
hs[0].bStatus = 0;
- memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID));
+ put_unaligned_le32(tkid, hs[0].tTKID);
hs[0].bReserved = 0;
memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
@@ -441,7 +426,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
/* Setup the CCM nonce */
memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
- memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
+ put_unaligned_le32(tkid, ccm_n.tkid);
ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
ccm_n.dest_addr.data[0] = wusb_dev->addr;
ccm_n.dest_addr.data[1] = 0;
@@ -472,7 +457,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
/* Send Handshake3 */
hs[2].bMessageNumber = 3;
hs[2].bStatus = 0;
- memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID));
+ put_unaligned_le32(tkid, hs[2].tTKID);
hs[2].bReserved = 0;
memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index d01496fd27fe..6827075fb8a1 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wire Adapter Host Controller Driver
* Common items to HWA and DWA based HCDs
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
*/
#include <linux/slab.h>
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index edc7267157f3..ec90fff21deb 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HWA Host Controller Driver
* Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* This driver implements a USB Host Controller (struct usb_hcd) for a
* Wireless USB Host Controller based on the Wireless USB 1.0
* Host-Wire-Adapter specification (in layman terms, a USB-dongle that
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index e3819fc182b0..9fdcb6b84abf 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* Notification EndPoint support
@@ -5,21 +6,6 @@
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* This part takes care of getting the notification from the hw
* only and dispatching through wusbwad into
* wa_notif_dispatch. Handling is done there.
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index c7ecdbe19a32..d0f1a6698460 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter
* rpipe management
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
*
* RPIPE
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index e70322b1dd02..7fca4e7e556d 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter
* Data transfer and URB enqueing
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* How transfers work: get a buffer, break it up in segments (segment
* size is a multiple of the maxpacket size). For each segment issue a
* segment request (struct wa_xfer_*), then send the data buffer if
@@ -2156,6 +2142,7 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
* do not increment RPIPE avail for the WA_SEG_DELAYED case
* since it has not been submitted to the RPIPE.
*/
+ /* fall through */
case WA_SEG_DELAYED:
xfer->segs_done++;
current_seg->status = status;
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 5338e42533c8..e5ba6140c1ba 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* sysfs glue, wusbcore module support and life cycle management
@@ -6,21 +7,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* Creation/destruction of wusbhc is split in two parts; that that
* doesn't require the HCD to be added (wusbhc_{create,destroy}) and
* the one that requires (phase B, wusbhc_b_{create,destroy}).
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 8c5bd000739b..7681d796ca5b 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Common infrastructure for WHCI and HWA WUSB-HC drivers
@@ -6,21 +7,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* This driver implements parts common to all Wireless USB Host
* Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
* by:
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 5628fe114347..115a36f6f403 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -808,6 +808,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
{
__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
offset + PCI_EXP_DEVCTL);
+ int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
@@ -833,6 +834,27 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
pci_try_reset_function(vdev->pdev);
}
+ /*
+ * MPS is virtualized to the user, writes do not change the physical
+ * register since determining a proper MPS value requires a system wide
+ * device view. The MRRS is largely independent of MPS, but since the
+ * user does not have that system-wide view, they might set a safe, but
+ * inefficiently low value. Here we allow writes through to hardware,
+ * but we set the floor to the physical device MPS setting, so that
+ * we can at least use full TLPs, as defined by the MPS value.
+ *
+ * NB, if any devices actually depend on an artificially low MRRS
+ * setting, this will need to be revisited, perhaps with a quirk
+ * though pcie_set_readrq().
+ */
+ if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
+ readrq = 128 <<
+ ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
+ readrq = max(readrq, pcie_get_mps(vdev->pdev));
+
+ pcie_set_readrq(vdev->pdev, readrq);
+ }
+
return count;
}
@@ -849,11 +871,14 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
/*
* Allow writes to device control fields, except devctl_phantom,
- * which could confuse IOMMU, and the ARI bit in devctl2, which
- * is set at probe time. FLR gets virtualized via our writefn.
+ * which could confuse IOMMU, MPS, which can break communication
+ * with other physical devices, and the ARI bit in devctl2, which
+ * is set at probe time. FLR and MRRS get virtualized via our
+ * writefn.
*/
p_setw(perm, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
+ PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
diff --git a/drivers/vfio/platform/reset/Kconfig b/drivers/vfio/platform/reset/Kconfig
index 70cccc582bee..392e3c09def0 100644
--- a/drivers/vfio/platform/reset/Kconfig
+++ b/drivers/vfio/platform/reset/Kconfig
@@ -13,3 +13,12 @@ config VFIO_PLATFORM_AMDXGBE_RESET
Enables the VFIO platform driver to handle reset for AMD XGBE
If you don't know what to do here, say N.
+
+config VFIO_PLATFORM_BCMFLEXRM_RESET
+ tristate "VFIO support for Broadcom FlexRM reset"
+ depends on VFIO_PLATFORM && (ARCH_BCM_IPROC || COMPILE_TEST)
+ default ARCH_BCM_IPROC
+ help
+ Enables the VFIO platform driver to handle reset for Broadcom FlexRM
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/platform/reset/Makefile b/drivers/vfio/platform/reset/Makefile
index 90ba20b646da..57abd4f0ac5b 100644
--- a/drivers/vfio/platform/reset/Makefile
+++ b/drivers/vfio/platform/reset/Makefile
@@ -6,3 +6,4 @@ ccflags-y += -Idrivers/vfio/platform
obj-$(CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET) += vfio-platform-calxedaxgmac.o
obj-$(CONFIG_VFIO_PLATFORM_AMDXGBE_RESET) += vfio-platform-amdxgbe.o
+obj-$(CONFIG_VFIO_PLATFORM_BCMFLEXRM_RESET) += vfio_platform_bcmflexrm.o
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
new file mode 100644
index 000000000000..d45c3be71198
--- /dev/null
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This driver provides reset support for Broadcom FlexRM ring manager
+ * to VFIO platform.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "vfio_platform_private.h"
+
+/* FlexRM configuration */
+#define RING_REGS_SIZE 0x10000
+#define RING_VER_MAGIC 0x76303031
+
+/* Per-Ring register offsets */
+#define RING_VER 0x000
+#define RING_CONTROL 0x034
+#define RING_FLUSH_DONE 0x038
+
+/* Register RING_CONTROL fields */
+#define CONTROL_FLUSH_SHIFT 5
+
+/* Register RING_FLUSH_DONE fields */
+#define FLUSH_DONE_MASK 0x1
+
+static int vfio_platform_bcmflexrm_shutdown(void __iomem *ring)
+{
+ unsigned int timeout;
+
+ /* Disable/inactivate ring */
+ writel_relaxed(0x0, ring + RING_CONTROL);
+
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring + RING_CONTROL);
+ do {
+ if (readl_relaxed(ring + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK)
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev)
+{
+ void __iomem *ring;
+ int rc = 0, ret = 0, ring_num = 0;
+ struct vfio_platform_region *reg = &vdev->regions[0];
+
+ /* Map FlexRM ring registers if not mapped */
+ if (!reg->ioaddr) {
+ reg->ioaddr = ioremap_nocache(reg->addr, reg->size);
+ if (!reg->ioaddr)
+ return -ENOMEM;
+ }
+
+ /* Discover and shutdown each FlexRM ring */
+ for (ring = reg->ioaddr;
+ ring < (reg->ioaddr + reg->size); ring += RING_REGS_SIZE) {
+ if (readl_relaxed(ring + RING_VER) == RING_VER_MAGIC) {
+ rc = vfio_platform_bcmflexrm_shutdown(ring);
+ if (rc) {
+ dev_warn(vdev->device,
+ "FlexRM ring%d shutdown error %d\n",
+ ring_num, rc);
+ ret |= rc;
+ }
+ ring_num++;
+ }
+ }
+
+ return ret;
+}
+
+module_vfio_reset_handler("brcm,iproc-flexrm-mbox",
+ vfio_platform_bcmflexrm_reset);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
+MODULE_DESCRIPTION("Reset support for Broadcom FlexRM VFIO platform device");
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index f5a86f651f38..2bc3705a99bd 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
{
struct vfio_group *group = data;
struct vfio_device *device;
- struct device_driver *drv = ACCESS_ONCE(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct vfio_unbound_dev *unbound;
int ret = -EINVAL;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 63112c36ab2d..759a5bdd40e1 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -507,6 +507,8 @@ static int tce_iommu_clear(struct tce_container *container,
enum dma_data_direction direction;
for ( ; pages; --pages, ++entry) {
+ cond_resched();
+
direction = DMA_NONE;
oldhpa = 0;
ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 92155cce926d..e30e29ae4819 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -767,6 +767,9 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
return -EINVAL;
if (!unmap->size || unmap->size & mask)
return -EINVAL;
+ if (unmap->iova + unmap->size < unmap->iova ||
+ unmap->size > SIZE_MAX)
+ return -EINVAL;
WARN_ON(mask & PAGE_MASK);
again:
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 68677d930e20..8d626d7c2e7e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -471,6 +471,7 @@ static void handle_tx(struct vhost_net *net)
goto out;
vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
hdr_size = nvq->vhost_hlen;
zcopy = nvq->ubufs;
@@ -556,6 +557,7 @@ static void handle_tx(struct vhost_net *net)
% UIO_MAXIOV;
}
vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
break;
}
if (err != len)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 046f6d280af5..35e929f132e8 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
continue;
}
- tpg = ACCESS_ONCE(vs_tpg[*target]);
+ tpg = READ_ONCE(vs_tpg[*target]);
if (unlikely(!tpg)) {
/* Target does not exist, fail the request */
vhost_scsi_send_bad_target(vs, vq, head, out);
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index a9e9cef20ed6..2b6c6aaf4e14 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -251,7 +251,7 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
struct spi_transfer xfer_regindex, xfer_regvalue;
unsigned char tbuf[CMD_BUFSIZE];
unsigned char rbuf[CMD_BUFSIZE];
- int ret, len = 0;
+ int ret;
memset(&xfer_regindex, 0, sizeof(struct spi_transfer));
memset(&xfer_regvalue, 0, sizeof(struct spi_transfer));
@@ -273,7 +273,6 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
ret = spi_sync(spi, &msg);
spi_message_init(&msg);
- len = 0;
tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_REG,
START_RW_WRITE));
tbuf[1] = set_tx_byte((value & 0xFF00) >> 8);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 9bd17682655a..1c2289ddd555 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
{
unsigned int lth = pb->lth_brightness;
- int duty_cycle;
+ u64 duty_cycle;
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
- return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
+ duty_cycle *= pb->period - lth;
+ do_div(duty_cycle, pb->scale);
+
+ return duty_cycle + lth;
}
static int pwm_backlight_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index fd524ad860a5..380917c86276 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -239,8 +239,7 @@ tps65217_bl_parse_dt(struct platform_device *pdev)
}
if (!of_property_read_u32(node, "default-brightness", &val)) {
- if (val < 0 ||
- val > 100) {
+ if (val > 100) {
dev_err(&pdev->dev,
"invalid 'default-brightness' value in the device tree\n");
err = ERR_PTR(-EINVAL);
@@ -275,17 +274,9 @@ static int tps65217_bl_probe(struct platform_device *pdev)
struct tps65217_bl_pdata *pdata;
struct backlight_properties bl_props;
- if (tps->dev->of_node) {
- pdata = tps65217_bl_parse_dt(pdev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data provided\n");
- return -EINVAL;
- }
- }
+ pdata = tps65217_bl_parse_dt(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl),
GFP_KERNEL);
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 7cc51223db1c..5dd284008630 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -511,7 +511,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
ca91cx42_bridge = image->parent;
/* Find pci_dev container of dev */
- if (ca91cx42_bridge->parent == NULL) {
+ if (!ca91cx42_bridge->parent) {
dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
return -EINVAL;
}
@@ -529,14 +529,12 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
- if (image->bus_resource.name == NULL) {
+ if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(ca91cx42_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
+ if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
@@ -562,7 +560,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
- if (image->kern_base == NULL) {
+ if (!image->kern_base) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
@@ -574,7 +572,7 @@ err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
@@ -588,7 +586,7 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
@@ -1036,10 +1034,8 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
dev = list->parent->parent->parent;
/* XXX descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(dev, "Failed to allocate memory for dma resource "
- "structure\n");
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
@@ -1052,7 +1048,7 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
goto err_align;
}
- memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(entry->descriptor));
if (dest->type == VME_DMA_VME) {
entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
@@ -1323,7 +1319,7 @@ static int ca91cx42_lm_set(struct vme_lm_resource *lm,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor callback attached, "
"can't reset\n");
@@ -1432,7 +1428,7 @@ static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Existing callback attached\n");
return -EBUSY;
@@ -1567,7 +1563,7 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
+ if (!bridge->crcsr_kernel) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
@@ -1618,21 +1614,15 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* We want to support more than one of each bridge so we need to
* dynamically allocate the bridge structure
*/
- ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
-
- if (ca91cx42_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
+ if (!ca91cx42_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(ca91cx42_bridge);
- ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
-
- if (ca91cx42_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
+ if (!ca91cx42_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -1688,11 +1678,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add master windows to list */
for (i = 0; i < CA91C142_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -1706,7 +1693,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&ca91cx42_bridge->master_resources);
@@ -1714,11 +1701,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -1741,11 +1725,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < CA91C142_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
+ dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
+ if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
@@ -1762,10 +1743,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 30b3acc93833..7d83691047f4 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -409,7 +409,7 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
/* Each location monitor covers 8 bytes */
if (((lm_base + (8 * i)) <= addr) &&
((lm_base + (8 * i) + 8) > addr)) {
- if (bridge->lm_callback[i] != NULL)
+ if (bridge->lm_callback[i])
bridge->lm_callback[i](
bridge->lm_data[i]);
}
@@ -866,7 +866,7 @@ static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
pr_err("Location monitor callback attached, can't reset\n");
return -EBUSY;
@@ -940,7 +940,7 @@ static int fake_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
pr_err("Existing callback attached\n");
return -EBUSY;
@@ -978,7 +978,7 @@ static int fake_lm_detach(struct vme_lm_resource *lm, int monitor)
/* If all location monitors disabled, disable global Location Monitor */
tmp = 0;
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL)
+ if (bridge->lm_callback[i])
tmp = 1;
}
@@ -1003,7 +1003,7 @@ static void *fake_alloc_consistent(struct device *parent, size_t size,
{
void *alloc = kmalloc(size, GFP_KERNEL);
- if (alloc != NULL)
+ if (alloc)
*dma = fake_ptr_to_pci(alloc);
return alloc;
@@ -1039,7 +1039,7 @@ static int fake_crcsr_init(struct vme_bridge *fake_bridge)
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL);
bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel);
- if (bridge->crcsr_kernel == NULL)
+ if (!bridge->crcsr_kernel)
return -ENOMEM;
vstat = fake_slot_get(fake_bridge);
@@ -1075,14 +1075,14 @@ static int __init fake_init(void)
/* If we want to support more than one bridge at some point, we need to
* dynamically allocate this so we get one per device.
*/
- fake_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
- if (fake_bridge == NULL) {
+ fake_bridge = kzalloc(sizeof(*fake_bridge), GFP_KERNEL);
+ if (!fake_bridge) {
retval = -ENOMEM;
goto err_struct;
}
- fake_device = kzalloc(sizeof(struct fake_driver), GFP_KERNEL);
- if (fake_device == NULL) {
+ fake_device = kzalloc(sizeof(*fake_device), GFP_KERNEL);
+ if (!fake_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -1104,9 +1104,8 @@ static int __init fake_init(void)
/* Add master windows to list */
INIT_LIST_HEAD(&fake_bridge->master_resources);
for (i = 0; i < FAKE_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -1131,9 +1130,8 @@ static int __init fake_init(void)
/* Add slave windows to list */
INIT_LIST_HEAD(&fake_bridge->slave_resources);
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -1154,9 +1152,8 @@ static int __init fake_init(void)
/* Add location monitor to list */
INIT_LIST_HEAD(&fake_bridge->lm_resources);
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- pr_err("Failed to allocate memory for location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index fc1b634b969a..647d231d4422 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -741,18 +741,16 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
- if (image->bus_resource.name == NULL) {
+ if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(tsi148_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
+ if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
@@ -778,7 +776,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
- if (image->kern_base == NULL) {
+ if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
@@ -790,7 +788,7 @@ err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
@@ -804,7 +802,7 @@ static void tsi148_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/*
@@ -1641,10 +1639,8 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
- "dma resource structure\n");
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
@@ -1661,7 +1657,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
- memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(entry->descriptor));
/* Fill out source part */
switch (src->type) {
@@ -1756,8 +1752,9 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
list_add_tail(&entry->list, &list->entries);
entry->dma_handle = dma_map_single(tsi148_bridge->parent,
- &entry->descriptor,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+ &entry->descriptor,
+ sizeof(entry->descriptor),
+ DMA_TO_DEVICE);
if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
dev_err(tsi148_bridge->parent, "DMA mapping error\n");
retval = -EINVAL;
@@ -1946,7 +1943,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
@@ -2071,7 +2068,7 @@ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
@@ -2208,7 +2205,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
+ if (!bridge->crcsr_kernel) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
@@ -2294,19 +2291,15 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
- tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
- if (tsi148_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
+ if (!tsi148_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(tsi148_bridge);
- tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
- if (tsi148_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
+ if (!tsi148_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -2371,10 +2364,9 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
master_num--;
tsi148_device->flush_image =
- kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
- if (tsi148_device->flush_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "flush resource structure\n");
+ kmalloc(sizeof(*tsi148_device->flush_image),
+ GFP_KERNEL);
+ if (!tsi148_device->flush_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -2383,17 +2375,14 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
memset(&tsi148_device->flush_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(tsi148_device->flush_image->bus_resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
for (i = 0; i < master_num; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -2410,7 +2399,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&tsi148_bridge->master_resources);
@@ -2418,11 +2407,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -2442,11 +2428,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < TSI148_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
+ dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
+ if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
@@ -2465,10 +2448,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 6a3ead42aba8..81246221a13b 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -92,23 +92,23 @@ void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
{
struct vme_bridge *bridge;
- if (resource == NULL) {
+ if (!resource) {
printk(KERN_ERR "No resource\n");
return NULL;
}
bridge = find_bridge(resource);
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return NULL;
}
- if (bridge->parent == NULL) {
+ if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return NULL;
}
- if (bridge->alloc_consistent == NULL) {
+ if (!bridge->alloc_consistent) {
printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
bridge->name);
return NULL;
@@ -132,23 +132,23 @@ void vme_free_consistent(struct vme_resource *resource, size_t size,
{
struct vme_bridge *bridge;
- if (resource == NULL) {
+ if (!resource) {
printk(KERN_ERR "No resource\n");
return;
}
bridge = find_bridge(resource);
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return;
}
- if (bridge->parent == NULL) {
+ if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return;
}
- if (bridge->free_consistent == NULL) {
+ if (!bridge->free_consistent) {
printk(KERN_ERR "free_consistent not supported by bridge %s\n",
bridge->name);
return;
@@ -208,29 +208,27 @@ int vme_check_window(u32 aspace, unsigned long long vme_base,
{
int retval = 0;
+ if (vme_base + size < size)
+ return -EINVAL;
+
switch (aspace) {
case VME_A16:
- if (((vme_base + size) > VME_A16_MAX) ||
- (vme_base > VME_A16_MAX))
+ if (vme_base + size > VME_A16_MAX)
retval = -EFAULT;
break;
case VME_A24:
- if (((vme_base + size) > VME_A24_MAX) ||
- (vme_base > VME_A24_MAX))
+ if (vme_base + size > VME_A24_MAX)
retval = -EFAULT;
break;
case VME_A32:
- if (((vme_base + size) > VME_A32_MAX) ||
- (vme_base > VME_A32_MAX))
+ if (vme_base + size > VME_A32_MAX)
retval = -EFAULT;
break;
case VME_A64:
- if ((size != 0) && (vme_base > U64_MAX + 1 - size))
- retval = -EFAULT;
+ /* The VME_A64_MAX limit is actually U64_MAX + 1 */
break;
case VME_CRCSR:
- if (((vme_base + size) > VME_CRCSR_MAX) ||
- (vme_base > VME_CRCSR_MAX))
+ if (vme_base + size > VME_CRCSR_MAX)
retval = -EFAULT;
break;
case VME_USER1:
@@ -303,7 +301,7 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -313,7 +311,7 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
slave_image = list_entry(slave_pos,
struct vme_slave_resource, list);
- if (slave_image == NULL) {
+ if (!slave_image) {
printk(KERN_ERR "Registered NULL Slave resource\n");
continue;
}
@@ -333,14 +331,13 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
}
/* No free image */
- if (allocated_image == NULL)
+ if (!allocated_image)
goto err_image;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_SLAVE;
resource->entry = &allocated_image->list;
@@ -389,7 +386,7 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
image = list_entry(resource->entry, struct vme_slave_resource, list);
- if (bridge->slave_set == NULL) {
+ if (!bridge->slave_set) {
printk(KERN_ERR "Function not supported\n");
return -ENOSYS;
}
@@ -438,7 +435,7 @@ int vme_slave_get(struct vme_resource *resource, int *enabled,
image = list_entry(resource->entry, struct vme_slave_resource, list);
- if (bridge->slave_get == NULL) {
+ if (!bridge->slave_get) {
printk(KERN_ERR "vme_slave_get not supported\n");
return -EINVAL;
}
@@ -465,7 +462,7 @@ void vme_slave_free(struct vme_resource *resource)
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
- if (slave_image == NULL) {
+ if (!slave_image) {
printk(KERN_ERR "Can't find slave resource\n");
return;
}
@@ -505,7 +502,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -515,7 +512,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
master_image = list_entry(master_pos,
struct vme_master_resource, list);
- if (master_image == NULL) {
+ if (!master_image) {
printk(KERN_WARNING "Registered NULL master resource\n");
continue;
}
@@ -536,16 +533,15 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
}
/* Check to see if we found a resource */
- if (allocated_image == NULL) {
+ if (!allocated_image) {
printk(KERN_ERR "Can't find a suitable resource\n");
goto err_image;
}
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_MASTER;
resource->entry = &allocated_image->list;
@@ -594,7 +590,7 @@ int vme_master_set(struct vme_resource *resource, int enabled,
image = list_entry(resource->entry, struct vme_master_resource, list);
- if (bridge->master_set == NULL) {
+ if (!bridge->master_set) {
printk(KERN_WARNING "vme_master_set not supported\n");
return -EINVAL;
}
@@ -644,7 +640,7 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
image = list_entry(resource->entry, struct vme_master_resource, list);
- if (bridge->master_get == NULL) {
+ if (!bridge->master_get) {
printk(KERN_WARNING "%s not supported\n", __func__);
return -EINVAL;
}
@@ -676,7 +672,7 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
struct vme_master_resource *image;
size_t length;
- if (bridge->master_read == NULL) {
+ if (!bridge->master_read) {
printk(KERN_WARNING "Reading from resource not supported\n");
return -EINVAL;
}
@@ -725,7 +721,7 @@ ssize_t vme_master_write(struct vme_resource *resource, void *buf,
struct vme_master_resource *image;
size_t length;
- if (bridge->master_write == NULL) {
+ if (!bridge->master_write) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
@@ -776,7 +772,7 @@ unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
- if (bridge->master_rmw == NULL) {
+ if (!bridge->master_rmw) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
@@ -846,7 +842,7 @@ void vme_master_free(struct vme_resource *resource)
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
- if (master_image == NULL) {
+ if (!master_image) {
printk(KERN_ERR "Can't find master resource\n");
return;
}
@@ -886,7 +882,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
printk(KERN_ERR "No VME resource Attribute tests done\n");
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -895,8 +891,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
list_for_each(dma_pos, &bridge->dma_resources) {
dma_ctrlr = list_entry(dma_pos,
struct vme_dma_resource, list);
-
- if (dma_ctrlr == NULL) {
+ if (!dma_ctrlr) {
printk(KERN_ERR "Registered NULL DMA resource\n");
continue;
}
@@ -915,14 +910,13 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
}
/* Check to see if we found a resource */
- if (allocated_ctrlr == NULL)
+ if (!allocated_ctrlr)
goto err_ctrlr;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_DMA;
resource->entry = &allocated_ctrlr->list;
@@ -951,7 +945,6 @@ EXPORT_SYMBOL(vme_dma_request);
*/
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
- struct vme_dma_resource *ctrlr;
struct vme_dma_list *dma_list;
if (resource->type != VME_DMA) {
@@ -959,15 +952,14 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
return NULL;
}
- ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
-
- dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
- if (dma_list == NULL) {
- printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
+ dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
+ if (!dma_list)
return NULL;
- }
+
INIT_LIST_HEAD(&dma_list->entries);
- dma_list->parent = ctrlr;
+ dma_list->parent = list_entry(resource->entry,
+ struct vme_dma_resource,
+ list);
mutex_init(&dma_list->mtx);
return dma_list;
@@ -990,17 +982,13 @@ struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
- if (pattern_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
+ pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
+ if (!pattern_attr)
goto err_pat;
- }
attributes->type = VME_DMA_PATTERN;
attributes->private = (void *)pattern_attr;
@@ -1034,19 +1022,13 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
/* XXX Run some sanity checks here */
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
- if (pci_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
+ pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
+ if (!pci_attr)
goto err_pci;
- }
-
-
attributes->type = VME_DMA_PCI;
attributes->private = (void *)pci_attr;
@@ -1081,18 +1063,13 @@ struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
- attributes = kmalloc(
- sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
- if (vme_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
+ vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
+ if (!vme_attr)
goto err_vme;
- }
attributes->type = VME_DMA_VME;
attributes->private = (void *)vme_attr;
@@ -1148,7 +1125,7 @@ int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_add == NULL) {
+ if (!bridge->dma_list_add) {
printk(KERN_WARNING "Link List DMA generation not supported\n");
return -EINVAL;
}
@@ -1181,7 +1158,7 @@ int vme_dma_list_exec(struct vme_dma_list *list)
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_exec == NULL) {
+ if (!bridge->dma_list_exec) {
printk(KERN_ERR "Link List DMA execution not supported\n");
return -EINVAL;
}
@@ -1210,14 +1187,14 @@ int vme_dma_list_free(struct vme_dma_list *list)
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_empty == NULL) {
+ if (!bridge->dma_list_empty) {
printk(KERN_WARNING "Emptying of Link Lists not supported\n");
return -EINVAL;
}
if (!mutex_trylock(&list->mtx)) {
printk(KERN_ERR "Link List in use\n");
- return -EINVAL;
+ return -EBUSY;
}
/*
@@ -1342,8 +1319,7 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
call = bridge->irq[level - 1].callback[statid].func;
priv_data = bridge->irq[level - 1].callback[statid].priv_data;
-
- if (call != NULL)
+ if (call)
call(level, statid, priv_data);
else
printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
@@ -1374,7 +1350,7 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
@@ -1384,7 +1360,7 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
return -EINVAL;
}
- if (bridge->irq_set == NULL) {
+ if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return -EINVAL;
}
@@ -1423,7 +1399,7 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return;
}
@@ -1433,7 +1409,7 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
return;
}
- if (bridge->irq_set == NULL) {
+ if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return;
}
@@ -1470,7 +1446,7 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
@@ -1480,7 +1456,7 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
return -EINVAL;
}
- if (bridge->irq_generate == NULL) {
+ if (!bridge->irq_generate) {
printk(KERN_WARNING "Interrupt generation not supported\n");
return -EINVAL;
}
@@ -1508,7 +1484,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -1517,8 +1493,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
list_for_each(lm_pos, &bridge->lm_resources) {
lm = list_entry(lm_pos,
struct vme_lm_resource, list);
-
- if (lm == NULL) {
+ if (!lm) {
printk(KERN_ERR "Registered NULL Location Monitor resource\n");
continue;
}
@@ -1535,14 +1510,13 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
}
/* Check to see if we found a resource */
- if (allocated_lm == NULL)
+ if (!allocated_lm)
goto err_lm;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_LM;
resource->entry = &allocated_lm->list;
@@ -1612,7 +1586,7 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_set == NULL) {
+ if (!bridge->lm_set) {
printk(KERN_ERR "vme_lm_set not supported\n");
return -EINVAL;
}
@@ -1648,7 +1622,7 @@ int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_get == NULL) {
+ if (!bridge->lm_get) {
printk(KERN_ERR "vme_lm_get not supported\n");
return -EINVAL;
}
@@ -1685,7 +1659,7 @@ int vme_lm_attach(struct vme_resource *resource, int monitor,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_attach == NULL) {
+ if (!bridge->lm_attach) {
printk(KERN_ERR "vme_lm_attach not supported\n");
return -EINVAL;
}
@@ -1718,7 +1692,7 @@ int vme_lm_detach(struct vme_resource *resource, int monitor)
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_detach == NULL) {
+ if (!bridge->lm_detach) {
printk(KERN_ERR "vme_lm_detach not supported\n");
return -EINVAL;
}
@@ -1780,12 +1754,12 @@ int vme_slot_num(struct vme_dev *vdev)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
- if (bridge->slot_get == NULL) {
+ if (!bridge->slot_get) {
printk(KERN_WARNING "vme_slot_num not supported\n");
return -EINVAL;
}
@@ -1808,7 +1782,7 @@ int vme_bus_num(struct vme_dev *vdev)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
pr_err("Can't find VME bus\n");
return -EINVAL;
}
@@ -1888,7 +1862,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
struct vme_dev *tmp;
for (i = 0; i < ndevs; i++) {
- vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
err = -ENOMEM;
goto err_devalloc;
@@ -2020,30 +1994,26 @@ static int vme_bus_match(struct device *dev, struct device_driver *drv)
static int vme_bus_probe(struct device *dev)
{
- int retval = -ENODEV;
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
+ if (driver->probe)
+ return driver->probe(vdev);
- if (driver->probe != NULL)
- retval = driver->probe(vdev);
-
- return retval;
+ return -ENODEV;
}
static int vme_bus_remove(struct device *dev)
{
- int retval = -ENODEV;
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
+ if (driver->remove)
+ return driver->remove(vdev);
- if (driver->remove != NULL)
- retval = driver->remove(vdev);
-
- return retval;
+ return -ENODEV;
}
struct bus_type vme_bus_type = {
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 3c945f9f5f0f..7931231d8e80 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -148,4 +148,19 @@ config W1_SLAVE_DS28E04
If you are unsure, say N.
+config W1_SLAVE_DS28E17
+ tristate "1-wire-to-I2C master bridge (DS28E17)"
+ select CRC16
+ depends on I2C
+ help
+ Say Y here if you want to use the DS28E17 1-wire-to-I2C master bridge.
+ For each DS28E17 detected, a new I2C adapter is created within the
+ kernel. I2C devices on that bus can be configured to be used by the
+ kernel and userspace tools as on any other "native" I2C bus.
+
+ This driver is also available as a module. If so, the module
+ will be called w1_ds28e17.
+
+ If you are unsure, say N.
+
endmenu
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 79c611ce5f18..d5f4f4d5b9e5 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
+obj-$(CONFIG_W1_SLAVE_DS28E17) += w1_ds28e17.o
diff --git a/drivers/w1/slaves/w1_ds28e17.c b/drivers/w1/slaves/w1_ds28e17.c
new file mode 100644
index 000000000000..e78b63ea4daf
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds28e17.c
@@ -0,0 +1,771 @@
+/*
+ * w1_ds28e17.c - w1 family 19 (DS28E17) driver
+ *
+ * Copyright (c) 2016 Jan Kandziora <jjj@gmx.de>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#define CRC16_INIT 0
+
+#include <linux/w1.h>
+
+#define W1_FAMILY_DS28E17 0x19
+
+/* Module setup. */
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jan Kandziora <jjj@gmx.de>");
+MODULE_DESCRIPTION("w1 family 19 driver for DS28E17, 1-wire to I2C master bridge");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E17));
+
+
+/* Default I2C speed to be set when a DS28E17 is detected. */
+static int i2c_speed = 100;
+module_param_named(speed, i2c_speed, int, (S_IRUSR | S_IWUSR));
+MODULE_PARM_DESC(speed, "Default I2C speed to be set when a DS28E17 is detected");
+
+/* Default I2C stretch value to be set when a DS28E17 is detected. */
+static char i2c_stretch = 1;
+module_param_named(stretch, i2c_stretch, byte, (S_IRUSR | S_IWUSR));
+MODULE_PARM_DESC(stretch, "Default I2C stretch value to be set when a DS28E17 is detected");
+
+/* DS28E17 device command codes. */
+#define W1_F19_WRITE_DATA_WITH_STOP 0x4B
+#define W1_F19_WRITE_DATA_NO_STOP 0x5A
+#define W1_F19_WRITE_DATA_ONLY 0x69
+#define W1_F19_WRITE_DATA_ONLY_WITH_STOP 0x78
+#define W1_F19_READ_DATA_WITH_STOP 0x87
+#define W1_F19_WRITE_READ_DATA_WITH_STOP 0x2D
+#define W1_F19_WRITE_CONFIGURATION 0xD2
+#define W1_F19_READ_CONFIGURATION 0xE1
+#define W1_F19_ENABLE_SLEEP_MODE 0x1E
+#define W1_F19_READ_DEVICE_REVISION 0xC4
+
+/* DS28E17 status bits */
+#define W1_F19_STATUS_CRC 0x01
+#define W1_F19_STATUS_ADDRESS 0x02
+#define W1_F19_STATUS_START 0x08
+
+/*
+ * Maximum number of I2C bytes to transfer within one CRC16 protected onewire
+ * command.
+ * */
+#define W1_F19_WRITE_DATA_LIMIT 255
+
+/* Maximum number of I2C bytes to read with one onewire command. */
+#define W1_F19_READ_DATA_LIMIT 255
+
+/* Constants for calculating the busy sleep. */
+#define W1_F19_BUSY_TIMEBASES { 90, 23, 10 }
+#define W1_F19_BUSY_GRATUITY 1000
+
+/* Number of checks for the busy flag before timeout. */
+#define W1_F19_BUSY_CHECKS 1000
+
+
+/* Slave specific data. */
+struct w1_f19_data {
+ u8 speed;
+ u8 stretch;
+ struct i2c_adapter adapter;
+};
+
+
+/* Wait a while until the busy flag clears. */
+static int w1_f19_i2c_busy_wait(struct w1_slave *sl, size_t count)
+{
+ const unsigned long timebases[3] = W1_F19_BUSY_TIMEBASES;
+ struct w1_f19_data *data = sl->family_data;
+ unsigned int checks;
+
+ /* Check the busy flag first in any case.*/
+ if (w1_touch_bit(sl->master, 1) == 0)
+ return 0;
+
+ /*
+ * Do a generously long sleep in the beginning,
+ * as we have to wait at least this time for all
+ * the I2C bytes at the given speed to be transferred.
+ */
+ usleep_range(timebases[data->speed] * (data->stretch) * count,
+ timebases[data->speed] * (data->stretch) * count
+ + W1_F19_BUSY_GRATUITY);
+
+ /* Now continusly check the busy flag sent by the DS28E17. */
+ checks = W1_F19_BUSY_CHECKS;
+ while ((checks--) > 0) {
+ /* Return success if the busy flag is cleared. */
+ if (w1_touch_bit(sl->master, 1) == 0)
+ return 0;
+
+ /* Wait one non-streched byte timeslot. */
+ udelay(timebases[data->speed]);
+ }
+
+ /* Timeout. */
+ dev_warn(&sl->dev, "busy timeout\n");
+ return -ETIMEDOUT;
+}
+
+
+/* Utility function: result. */
+static size_t w1_f19_error(struct w1_slave *sl, u8 w1_buf[])
+{
+ /* Warnings. */
+ if (w1_buf[0] & W1_F19_STATUS_CRC)
+ dev_warn(&sl->dev, "crc16 mismatch\n");
+ if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
+ dev_warn(&sl->dev, "i2c device not responding\n");
+ if ((w1_buf[0] & (W1_F19_STATUS_CRC | W1_F19_STATUS_ADDRESS)) == 0
+ && w1_buf[1] != 0) {
+ dev_warn(&sl->dev, "i2c short write, %d bytes not acknowledged\n",
+ w1_buf[1]);
+ }
+
+ /* Check error conditions. */
+ if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
+ return -ENXIO;
+ if (w1_buf[0] & W1_F19_STATUS_START)
+ return -EAGAIN;
+ if (w1_buf[0] != 0 || w1_buf[1] != 0)
+ return -EIO;
+
+ /* All ok. */
+ return 0;
+}
+
+
+/* Utility function: write data to I2C slave, single chunk. */
+static int __w1_f19_i2c_write(struct w1_slave *sl,
+ const u8 *command, size_t command_count,
+ const u8 *buffer, size_t count)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[2];
+
+ /* Send command and I2C data to DS28E17. */
+ crc = crc16(CRC16_INIT, command, command_count);
+ w1_write_block(sl->master, command, command_count);
+
+ w1_buf[0] = count;
+ crc = crc16(crc, w1_buf, 1);
+ w1_write_8(sl->master, w1_buf[0]);
+
+ crc = crc16(crc, buffer, count);
+ w1_write_block(sl->master, buffer, count);
+
+ w1_buf[0] = ~(crc & 0xFF);
+ w1_buf[1] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 2);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_read_block(sl->master, w1_buf, 2);
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Return number of bytes written. */
+ return count;
+}
+
+
+/* Write data to I2C slave. */
+static int w1_f19_i2c_write(struct w1_slave *sl, u16 i2c_address,
+ const u8 *buffer, size_t count, bool stop)
+{
+ int result;
+ int remaining = count;
+ const u8 *p;
+ u8 command[2];
+
+ /* Check input. */
+ if (count == 0)
+ return -EOPNOTSUPP;
+
+ /* Check whether we need multiple commands. */
+ if (count <= W1_F19_WRITE_DATA_LIMIT) {
+ /*
+ * Small data amount. Data can be sent with
+ * a single onewire command.
+ */
+
+ /* Send all data to DS28E17. */
+ command[0] = (stop ? W1_F19_WRITE_DATA_WITH_STOP
+ : W1_F19_WRITE_DATA_NO_STOP);
+ command[1] = i2c_address << 1;
+ result = __w1_f19_i2c_write(sl, command, 2, buffer, count);
+ } else {
+ /* Large data amount. Data has to be sent in multiple chunks. */
+
+ /* Send first chunk to DS28E17. */
+ p = buffer;
+ command[0] = W1_F19_WRITE_DATA_NO_STOP;
+ command[1] = i2c_address << 1;
+ result = __w1_f19_i2c_write(sl, command, 2, p,
+ W1_F19_WRITE_DATA_LIMIT);
+ if (result < 0)
+ return result;
+
+ /* Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master))
+ return -EIO;
+
+ /* Next data chunk. */
+ p += W1_F19_WRITE_DATA_LIMIT;
+ remaining -= W1_F19_WRITE_DATA_LIMIT;
+
+ while (remaining > W1_F19_WRITE_DATA_LIMIT) {
+ /* Send intermediate chunk to DS28E17. */
+ command[0] = W1_F19_WRITE_DATA_ONLY;
+ result = __w1_f19_i2c_write(sl, command, 1, p,
+ W1_F19_WRITE_DATA_LIMIT);
+ if (result < 0)
+ return result;
+
+ /* Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master))
+ return -EIO;
+
+ /* Next data chunk. */
+ p += W1_F19_WRITE_DATA_LIMIT;
+ remaining -= W1_F19_WRITE_DATA_LIMIT;
+ }
+
+ /* Send final chunk to DS28E17. */
+ command[0] = (stop ? W1_F19_WRITE_DATA_ONLY_WITH_STOP
+ : W1_F19_WRITE_DATA_ONLY);
+ result = __w1_f19_i2c_write(sl, command, 1, p, remaining);
+ }
+
+ return result;
+}
+
+
+/* Read data from I2C slave. */
+static int w1_f19_i2c_read(struct w1_slave *sl, u16 i2c_address,
+ u8 *buffer, size_t count)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[5];
+
+ /* Check input. */
+ if (count == 0)
+ return -EOPNOTSUPP;
+
+ /* Send command to DS28E17. */
+ w1_buf[0] = W1_F19_READ_DATA_WITH_STOP;
+ w1_buf[1] = i2c_address << 1 | 0x01;
+ w1_buf[2] = count;
+ crc = crc16(CRC16_INIT, w1_buf, 3);
+ w1_buf[3] = ~(crc & 0xFF);
+ w1_buf[4] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 5);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_buf[0] = w1_read_8(sl->master);
+ w1_buf[1] = 0;
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Read received I2C data from DS28E17. */
+ return w1_read_block(sl->master, buffer, count);
+}
+
+
+/* Write to, then read data from I2C slave. */
+static int w1_f19_i2c_write_read(struct w1_slave *sl, u16 i2c_address,
+ const u8 *wbuffer, size_t wcount, u8 *rbuffer, size_t rcount)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[3];
+
+ /* Check input. */
+ if (wcount == 0 || rcount == 0)
+ return -EOPNOTSUPP;
+
+ /* Send command and I2C data to DS28E17. */
+ w1_buf[0] = W1_F19_WRITE_READ_DATA_WITH_STOP;
+ w1_buf[1] = i2c_address << 1;
+ w1_buf[2] = wcount;
+ crc = crc16(CRC16_INIT, w1_buf, 3);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ crc = crc16(crc, wbuffer, wcount);
+ w1_write_block(sl->master, wbuffer, wcount);
+
+ w1_buf[0] = rcount;
+ crc = crc16(crc, w1_buf, 1);
+ w1_buf[1] = ~(crc & 0xFF);
+ w1_buf[2] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, wcount + rcount + 2) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_read_block(sl->master, w1_buf, 2);
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Read received I2C data from DS28E17. */
+ return w1_read_block(sl->master, rbuffer, rcount);
+}
+
+
+/* Do an I2C master transfer. */
+static int w1_f19_i2c_master_transfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct w1_slave *sl = (struct w1_slave *) adapter->algo_data;
+ int i = 0;
+ int result = 0;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Select DS28E17. */
+ if (w1_reset_select_slave(sl)) {
+ i = -EIO;
+ goto error;
+ }
+
+ /* Loop while there are still messages to transfer. */
+ while (i < num) {
+ /*
+ * Check for special case: Small write followed
+ * by read to same I2C device.
+ */
+ if (i < (num-1)
+ && msgs[i].addr == msgs[i+1].addr
+ && !(msgs[i].flags & I2C_M_RD)
+ && (msgs[i+1].flags & I2C_M_RD)
+ && (msgs[i].len <= W1_F19_WRITE_DATA_LIMIT)) {
+ /*
+ * The DS28E17 has a combined transfer
+ * for small write+read.
+ */
+ result = w1_f19_i2c_write_read(sl, msgs[i].addr,
+ msgs[i].buf, msgs[i].len,
+ msgs[i+1].buf, msgs[i+1].len);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+
+ /*
+ * Check if we should interpret the read data
+ * as a length byte. The DS28E17 unfortunately
+ * has no read without stop, so we can just do
+ * another simple read in that case.
+ */
+ if (msgs[i+1].flags & I2C_M_RECV_LEN) {
+ result = w1_f19_i2c_read(sl, msgs[i+1].addr,
+ &(msgs[i+1].buf[1]), msgs[i+1].buf[0]);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+
+ /* Eat up read message, too. */
+ i++;
+ } else if (msgs[i].flags & I2C_M_RD) {
+ /* Read transfer. */
+ result = w1_f19_i2c_read(sl, msgs[i].addr,
+ msgs[i].buf, msgs[i].len);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+
+ /*
+ * Check if we should interpret the read data
+ * as a length byte. The DS28E17 unfortunately
+ * has no read without stop, so we can just do
+ * another simple read in that case.
+ */
+ if (msgs[i].flags & I2C_M_RECV_LEN) {
+ result = w1_f19_i2c_read(sl,
+ msgs[i].addr,
+ &(msgs[i].buf[1]),
+ msgs[i].buf[0]);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+ } else {
+ /*
+ * Write transfer.
+ * Stop condition only for last
+ * transfer.
+ */
+ result = w1_f19_i2c_write(sl,
+ msgs[i].addr,
+ msgs[i].buf,
+ msgs[i].len,
+ i == (num-1));
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+
+ /* Next message. */
+ i++;
+
+ /* Are there still messages to send/receive? */
+ if (i < num) {
+ /* Yes. Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master)) {
+ i = -EIO;
+ goto error;
+ }
+ }
+ }
+
+error:
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ /* Return number of messages processed or error. */
+ return i;
+}
+
+
+/* Get I2C adapter functionality. */
+static u32 w1_f19_i2c_functionality(struct i2c_adapter *adapter)
+{
+ /*
+ * Plain I2C functions only.
+ * SMBus is emulated by the kernel's I2C layer.
+ * No "I2C_FUNC_SMBUS_QUICK"
+ * No "I2C_FUNC_SMBUS_READ_BLOCK_DATA"
+ * No "I2C_FUNC_SMBUS_BLOCK_PROC_CALL"
+ */
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK |
+ I2C_FUNC_SMBUS_PEC;
+}
+
+
+/* I2C adapter quirks. */
+static const struct i2c_adapter_quirks w1_f19_i2c_adapter_quirks = {
+ .max_read_len = W1_F19_READ_DATA_LIMIT,
+};
+
+/* I2C algorithm. */
+static const struct i2c_algorithm w1_f19_i2c_algorithm = {
+ .master_xfer = w1_f19_i2c_master_transfer,
+ .functionality = w1_f19_i2c_functionality,
+};
+
+
+/* Read I2C speed from DS28E17. */
+static int w1_f19_get_i2c_speed(struct w1_slave *sl)
+{
+ struct w1_f19_data *data = sl->family_data;
+ int result = -EIO;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Select slave. */
+ if (w1_reset_select_slave(sl))
+ goto error;
+
+ /* Read slave configuration byte. */
+ w1_write_8(sl->master, W1_F19_READ_CONFIGURATION);
+ result = w1_read_8(sl->master);
+ if (result < 0 || result > 2) {
+ result = -EIO;
+ goto error;
+ }
+
+ /* Update speed in slave specific data. */
+ data->speed = result;
+
+error:
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return result;
+}
+
+
+/* Set I2C speed on DS28E17. */
+static int __w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
+{
+ struct w1_f19_data *data = sl->family_data;
+ const int i2c_speeds[3] = { 100, 400, 900 };
+ u8 w1_buf[2];
+
+ /* Select slave. */
+ if (w1_reset_select_slave(sl))
+ return -EIO;
+
+ w1_buf[0] = W1_F19_WRITE_CONFIGURATION;
+ w1_buf[1] = speed;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ /* Update speed in slave specific data. */
+ data->speed = speed;
+
+ dev_info(&sl->dev, "i2c speed set to %d kBaud\n", i2c_speeds[speed]);
+
+ return 0;
+}
+
+static int w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
+{
+ int result;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Set I2C speed on DS28E17. */
+ result = __w1_f19_set_i2c_speed(sl, speed);
+
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return result;
+}
+
+
+/* Sysfs attributes. */
+
+/* I2C speed attribute for a single chip. */
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ int result;
+
+ /* Read current speed from slave. Updates data->speed. */
+ result = w1_f19_get_i2c_speed(sl);
+ if (result < 0)
+ return result;
+
+ /* Return current speed value. */
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ int error;
+
+ /* Valid values are: "100", "400", "900" */
+ if (count < 3 || count > 4 || !buf)
+ return -EINVAL;
+ if (count == 4 && buf[3] != '\n')
+ return -EINVAL;
+ if (buf[1] != '0' || buf[2] != '0')
+ return -EINVAL;
+
+ /* Set speed on slave. */
+ switch (buf[0]) {
+ case '1':
+ error = w1_f19_set_i2c_speed(sl, 0);
+ break;
+ case '4':
+ error = w1_f19_set_i2c_speed(sl, 1);
+ break;
+ case '9':
+ error = w1_f19_set_i2c_speed(sl, 2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (error < 0)
+ return error;
+
+ /* Return bytes written. */
+ return count;
+}
+
+static DEVICE_ATTR_RW(speed);
+
+
+/* Busy stretch attribute for a single chip. */
+static ssize_t stretch_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ struct w1_f19_data *data = sl->family_data;
+
+ /* Return current stretch value. */
+ return sprintf(buf, "%d\n", data->stretch);
+}
+
+static ssize_t stretch_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ struct w1_f19_data *data = sl->family_data;
+
+ /* Valid values are '1' to '9' */
+ if (count < 1 || count > 2 || !buf)
+ return -EINVAL;
+ if (count == 2 && buf[1] != '\n')
+ return -EINVAL;
+ if (buf[0] < '1' || buf[0] > '9')
+ return -EINVAL;
+
+ /* Set busy stretch value. */
+ data->stretch = buf[0] & 0x0F;
+
+ /* Return bytes written. */
+ return count;
+}
+
+static DEVICE_ATTR_RW(stretch);
+
+
+/* All attributes. */
+static struct attribute *w1_f19_attrs[] = {
+ &dev_attr_speed.attr,
+ &dev_attr_stretch.attr,
+ NULL,
+};
+
+static const struct attribute_group w1_f19_group = {
+ .attrs = w1_f19_attrs,
+};
+
+static const struct attribute_group *w1_f19_groups[] = {
+ &w1_f19_group,
+ NULL,
+};
+
+
+/* Slave add and remove functions. */
+static int w1_f19_add_slave(struct w1_slave *sl)
+{
+ struct w1_f19_data *data = NULL;
+
+ /* Allocate memory for slave specific data. */
+ data = devm_kzalloc(&sl->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ sl->family_data = data;
+
+ /* Setup default I2C speed on slave. */
+ switch (i2c_speed) {
+ case 100:
+ __w1_f19_set_i2c_speed(sl, 0);
+ break;
+ case 400:
+ __w1_f19_set_i2c_speed(sl, 1);
+ break;
+ case 900:
+ __w1_f19_set_i2c_speed(sl, 2);
+ break;
+ default:
+ /*
+ * A i2c_speed module parameter of anything else
+ * than 100, 400, 900 means not to touch the
+ * speed of the DS28E17.
+ * We assume 400kBaud, the power-on value.
+ */
+ data->speed = 1;
+ }
+
+ /*
+ * Setup default busy stretch
+ * configuration for the DS28E17.
+ */
+ data->stretch = i2c_stretch;
+
+ /* Setup I2C adapter. */
+ data->adapter.owner = THIS_MODULE;
+ data->adapter.algo = &w1_f19_i2c_algorithm;
+ data->adapter.algo_data = sl;
+ strcpy(data->adapter.name, "w1-");
+ strcat(data->adapter.name, sl->name);
+ data->adapter.dev.parent = &sl->dev;
+ data->adapter.quirks = &w1_f19_i2c_adapter_quirks;
+
+ return i2c_add_adapter(&data->adapter);
+}
+
+static void w1_f19_remove_slave(struct w1_slave *sl)
+{
+ struct w1_f19_data *family_data = sl->family_data;
+
+ /* Delete I2C adapter. */
+ i2c_del_adapter(&family_data->adapter);
+
+ /* Free slave specific data. */
+ devm_kfree(&sl->dev, family_data);
+ sl->family_data = NULL;
+}
+
+
+/* Declarations within the w1 subsystem. */
+static struct w1_family_ops w1_f19_fops = {
+ .add_slave = w1_f19_add_slave,
+ .remove_slave = w1_f19_remove_slave,
+ .groups = w1_f19_groups,
+};
+
+static struct w1_family w1_family_19 = {
+ .fid = W1_FAMILY_DS28E17,
+ .fops = &w1_f19_fops,
+};
+
+
+/* Module init and remove functions. */
+static int __init w1_f19_init(void)
+{
+ return w1_register_family(&w1_family_19);
+}
+
+static void __exit w1_f19_fini(void)
+{
+ w1_unregister_family(&w1_family_19);
+}
+
+module_init(w1_f19_init);
+module_exit(w1_f19_fini);
+
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 259525c3382a..3c350dfbcd0b 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -268,17 +268,18 @@ static inline int w1_therm_eeprom(struct device *device)
int ret, max_trying = 10;
u8 *family_data = sl->family_data;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto post_unlock;
-
if (!sl->family_data) {
ret = -ENODEV;
- goto pre_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(rom, 0, sizeof(rom));
while (max_trying--) {
@@ -306,17 +307,17 @@ static inline int w1_therm_eeprom(struct device *device)
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto post_unlock;
+ goto dec_refcnt;
}
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret != 0)
- goto post_unlock;
+ goto dec_refcnt;
} else if (!w1_strong_pullup) {
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto pre_unlock;
+ goto mt_unlock;
}
}
@@ -324,11 +325,11 @@ static inline int w1_therm_eeprom(struct device *device)
}
}
-pre_unlock:
+mt_unlock:
mutex_unlock(&dev->bus_mutex);
-
-post_unlock:
+dec_refcnt:
atomic_dec(THERM_REFCNT(family_data));
+error:
return ret;
}
@@ -350,20 +351,22 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
if (val > 12 || val < 9) {
pr_warn("Unsupported precision\n");
- return -1;
+ ret = -EINVAL;
+ goto error;
}
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto post_unlock;
-
if (!sl->family_data) {
ret = -ENODEV;
- goto pre_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(rom, 0, sizeof(rom));
/* translate precision to bitmask (see datasheet page 9) */
@@ -411,11 +414,10 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
}
}
-pre_unlock:
mutex_unlock(&dev->bus_mutex);
-
-post_unlock:
+dec_refcnt:
atomic_dec(THERM_REFCNT(family_data));
+error:
return ret;
}
@@ -490,17 +492,18 @@ static ssize_t read_therm(struct device *device,
int ret, max_trying = 10;
u8 *family_data = sl->family_data;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto error;
-
if (!family_data) {
ret = -ENODEV;
- goto mt_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(info->rom, 0, sizeof(info->rom));
while (max_trying--) {
@@ -542,7 +545,7 @@ static ssize_t read_therm(struct device *device,
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto dec_refcnt;
+ goto mt_unlock;
}
}
@@ -567,10 +570,10 @@ static ssize_t read_therm(struct device *device,
break;
}
-dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
mt_unlock:
mutex_unlock(&dev->bus_mutex);
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(family_data));
error:
return ret;
}
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index d191e1f80579..075d120e7b88 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -58,7 +58,7 @@ static u8 w1_read_bit(struct w1_master *dev);
* @dev: the master device
* @bit: 0 - write a 0, 1 - write a 0 read the level
*/
-static u8 w1_touch_bit(struct w1_master *dev, int bit)
+u8 w1_touch_bit(struct w1_master *dev, int bit)
{
if (dev->bus_master->touch_bit)
return dev->bus_master->touch_bit(dev->bus_master->data, bit);
@@ -69,6 +69,7 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
return 0;
}
}
+EXPORT_SYMBOL_GPL(w1_touch_bit);
/**
* w1_write_bit() - Generates a write-0 or write-1 cycle.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c722cbfdc7e6..ca200d1f310a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1460,7 +1460,7 @@ config INDYDOG
config JZ4740_WDT
tristate "Ingenic jz4740 SoC hardware watchdog"
- depends on MACH_JZ4740
+ depends on MACH_JZ4740 || MACH_JZ4780
select WATCHDOG_CORE
help
Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 4472882f06df..18e46e31523c 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -220,7 +220,7 @@ static void afs_kill_pages(struct address_space *mapping,
_enter("{%x:%u},%lx-%lx",
vnode->fid.vid, vnode->fid.vnode, first, last);
- pagevec_init(&pv, 0);
+ pagevec_init(&pv);
do {
_debug("kill %lx-%lx", first, last);
@@ -262,7 +262,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
_enter("{%x:%u},%lx-%lx",
vnode->fid.vid, vnode->fid.vnode, first, last);
- pagevec_init(&pv, 0);
+ pagevec_init(&pv);
do {
_debug("redirty %lx-%lx", first, last);
@@ -556,20 +556,13 @@ static int afs_writepages_region(struct address_space *mapping,
_enter(",,%lx,%lx,", index, end);
do {
- n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY,
- 1, &page);
+ n = find_get_pages_range_tag(mapping, &index, end,
+ PAGECACHE_TAG_DIRTY, 1, &page);
if (!n)
break;
_debug("wback %lx", page->index);
- if (page->index > end) {
- *_next = index;
- put_page(page);
- _leave(" = 0 [%lx]", *_next);
- return 0;
- }
-
/* at this point we hold neither mapping->tree_lock nor lock on
* the page itself: the page may be truncated or invalidated
* (changing page->mapping to NULL), or even swizzled back from
@@ -662,7 +655,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
_enter("{%x:%u},{%lx-%lx}",
vnode->fid.vid, vnode->fid.vnode, first, last);
- pagevec_init(&pv, 0);
+ pagevec_init(&pv);
do {
_debug("done %lx-%lx", first, last);
diff --git a/fs/aio.c b/fs/aio.c
index 5a2487217072..e6de7715228c 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -576,7 +576,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
* actually has a cancel function, hence the cmpxchg()
*/
- cancel = ACCESS_ONCE(kiocb->ki_cancel);
+ cancel = READ_ONCE(kiocb->ki_cancel);
do {
if (!cancel || cancel == KIOCB_CANCELLED)
return -EINVAL;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 73b01e474fdc..c697882b3aba 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1699,7 +1699,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
long signr, size_t *total)
{
unsigned int i;
- unsigned int regset_size = view->regsets[0].n * view->regsets[0].size;
+ unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
/*
* NT_PRSTATUS is the one special case, because the regset data
@@ -1708,11 +1708,11 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
* We assume that regset 0 is NT_PRSTATUS.
*/
fill_prstatus(&t->prstatus, t->task, signr);
- (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size,
+ (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
&t->prstatus.pr_reg, NULL);
fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
- PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus);
+ PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
*total += notesize(&t->notes[0]);
do_thread_regset_writeback(t->task, &view->regsets[0]);
@@ -1728,7 +1728,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
- size_t size = regset->n * regset->size;
+ size_t size = regset_size(t->task, regset);
void *data = kmalloc(size, GFP_KERNEL);
if (unlikely(!data))
return 0;
@@ -1743,7 +1743,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
size, data);
else {
SET_PR_FPVALID(&t->prstatus,
- 1, regset_size);
+ 1, regset0_size);
fill_note(&t->notes[i], "CORE",
NT_PRFPREG, size, data);
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 789f55e851ae..4a181fcb5175 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -54,18 +54,6 @@ struct block_device *I_BDEV(struct inode *inode)
}
EXPORT_SYMBOL(I_BDEV);
-void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
- vaf.fmt = fmt;
- vaf.va = &args;
- printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
- va_end(args);
-}
-
static void bdev_write_inode(struct block_device *bdev)
{
struct inode *inode = bdev->bd_inode;
@@ -249,7 +237,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(bio.bi_private))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_mq_poll(bdev_get_queue(bdev), qc))
+ !blk_poll(bdev_get_queue(bdev), qc))
io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -414,7 +402,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_mq_poll(bdev_get_queue(bdev), qc))
+ !blk_poll(bdev_get_queue(bdev), qc))
io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -674,7 +662,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
if (!ops->rw_page || bdev_get_integrity(bdev))
return result;
- result = blk_queue_enter(bdev->bd_queue, false);
+ result = blk_queue_enter(bdev->bd_queue, 0);
if (result)
return result;
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
@@ -710,7 +698,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
if (!ops->rw_page || bdev_get_integrity(bdev))
return -EOPNOTSUPP;
- result = blk_queue_enter(bdev->bd_queue, false);
+ result = blk_queue_enter(bdev->bd_queue, 0);
if (result)
return result;
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index a26c63b4ad68..2e558227931a 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -91,3 +91,14 @@ config BTRFS_ASSERT
any of the assertions trip. This is meant for btrfs developers only.
If unsure, say N.
+
+config BTRFS_FS_REF_VERIFY
+ bool "Btrfs with the ref verify tool compiled in"
+ depends on BTRFS_FS
+ default n
+ help
+ Enable run-time extent reference verification instrumentation. This
+ is meant to be used by btrfs developers for tracking down extent
+ reference problems or verifying they didn't break something.
+
+ If unsure, say N.
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index f2cd9dedb037..6fe881d5cb38 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -10,10 +10,11 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
- uuid-tree.o props.o hash.o free-space-tree.o
+ uuid-tree.o props.o hash.o free-space-tree.o tree-checker.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
+btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index e00c8a9fd5bb..d5540749f0e5 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -67,7 +67,7 @@ struct btrfs_workqueue {
static void normal_work_helper(struct btrfs_work *work);
#define BTRFS_WORK_HELPER(name) \
-void btrfs_##name(struct work_struct *arg) \
+noinline_for_stack void btrfs_##name(struct work_struct *arg) \
{ \
struct btrfs_work *work = container_of(arg, struct btrfs_work, \
normal_work); \
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b517ef1477ea..7d0dc100a09a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -40,12 +40,14 @@ static int check_extent_in_eb(const struct btrfs_key *key,
const struct extent_buffer *eb,
const struct btrfs_file_extent_item *fi,
u64 extent_item_pos,
- struct extent_inode_elem **eie)
+ struct extent_inode_elem **eie,
+ bool ignore_offset)
{
u64 offset = 0;
struct extent_inode_elem *e;
- if (!btrfs_file_extent_compression(eb, fi) &&
+ if (!ignore_offset &&
+ !btrfs_file_extent_compression(eb, fi) &&
!btrfs_file_extent_encryption(eb, fi) &&
!btrfs_file_extent_other_encoding(eb, fi)) {
u64 data_offset;
@@ -84,7 +86,8 @@ static void free_inode_elem_list(struct extent_inode_elem *eie)
static int find_extent_in_eb(const struct extent_buffer *eb,
u64 wanted_disk_byte, u64 extent_item_pos,
- struct extent_inode_elem **eie)
+ struct extent_inode_elem **eie,
+ bool ignore_offset)
{
u64 disk_byte;
struct btrfs_key key;
@@ -113,7 +116,7 @@ static int find_extent_in_eb(const struct extent_buffer *eb,
if (disk_byte != wanted_disk_byte)
continue;
- ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
+ ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
if (ret < 0)
return ret;
}
@@ -419,7 +422,7 @@ static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
struct ulist *parents, struct prelim_ref *ref,
int level, u64 time_seq, const u64 *extent_item_pos,
- u64 total_refs)
+ u64 total_refs, bool ignore_offset)
{
int ret = 0;
int slot;
@@ -472,7 +475,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (extent_item_pos) {
ret = check_extent_in_eb(&key, eb, fi,
*extent_item_pos,
- &eie);
+ &eie, ignore_offset);
if (ret < 0)
break;
}
@@ -510,7 +513,8 @@ next:
static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 time_seq,
struct prelim_ref *ref, struct ulist *parents,
- const u64 *extent_item_pos, u64 total_refs)
+ const u64 *extent_item_pos, u64 total_refs,
+ bool ignore_offset)
{
struct btrfs_root *root;
struct btrfs_key root_key;
@@ -581,7 +585,7 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
}
ret = add_all_parents(root, path, parents, ref, level, time_seq,
- extent_item_pos, total_refs);
+ extent_item_pos, total_refs, ignore_offset);
out:
path->lowest_level = 0;
btrfs_release_path(path);
@@ -616,7 +620,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u64 time_seq,
struct preftrees *preftrees,
const u64 *extent_item_pos, u64 total_refs,
- struct share_check *sc)
+ struct share_check *sc, bool ignore_offset)
{
int err;
int ret = 0;
@@ -661,7 +665,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
}
err = resolve_indirect_ref(fs_info, path, time_seq, ref,
parents, extent_item_pos,
- total_refs);
+ total_refs, ignore_offset);
/*
* we can only tolerate ENOENT,otherwise,we should catch error
* and return directly.
@@ -769,6 +773,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
struct btrfs_key key;
struct btrfs_key tmp_op_key;
struct btrfs_key *op_key = NULL;
+ struct rb_node *n;
int count;
int ret = 0;
@@ -778,7 +783,9 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
}
spin_lock(&head->lock);
- list_for_each_entry(node, &head->ref_list, list) {
+ for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
+ node = rb_entry(n, struct btrfs_delayed_ref_node,
+ ref_node);
if (node->seq > seq)
continue;
@@ -1107,13 +1114,17 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
*
* Otherwise this returns 0 for success and <0 for an error.
*
+ * If ignore_offset is set to false, only extent refs whose offsets match
+ * extent_item_pos are returned. If true, every extent ref is returned
+ * and extent_item_pos is ignored.
+ *
* FIXME some caching might speed things up
*/
static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist *refs,
struct ulist *roots, const u64 *extent_item_pos,
- struct share_check *sc)
+ struct share_check *sc, bool ignore_offset)
{
struct btrfs_key key;
struct btrfs_path *path;
@@ -1178,7 +1189,7 @@ again:
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -1189,7 +1200,7 @@ again:
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
goto again;
}
spin_unlock(&delayed_refs->lock);
@@ -1235,7 +1246,7 @@ again:
WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root));
ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
- extent_item_pos, total_refs, sc);
+ extent_item_pos, total_refs, sc, ignore_offset);
if (ret)
goto out;
@@ -1282,7 +1293,7 @@ again:
btrfs_tree_read_lock(eb);
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
ret = find_extent_in_eb(eb, bytenr,
- *extent_item_pos, &eie);
+ *extent_item_pos, &eie, ignore_offset);
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
if (ret < 0)
@@ -1350,7 +1361,7 @@ static void free_leaf_list(struct ulist *blocks)
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 time_seq, struct ulist **leafs,
- const u64 *extent_item_pos)
+ const u64 *extent_item_pos, bool ignore_offset)
{
int ret;
@@ -1359,7 +1370,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
return -ENOMEM;
ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
- *leafs, NULL, extent_item_pos, NULL);
+ *leafs, NULL, extent_item_pos, NULL, ignore_offset);
if (ret < 0 && ret != -ENOENT) {
free_leaf_list(*leafs);
return ret;
@@ -1383,7 +1394,8 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
*/
static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots)
+ u64 time_seq, struct ulist **roots,
+ bool ignore_offset)
{
struct ulist *tmp;
struct ulist_node *node = NULL;
@@ -1402,7 +1414,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
- tmp, *roots, NULL, NULL);
+ tmp, *roots, NULL, NULL, ignore_offset);
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
ulist_free(*roots);
@@ -1421,14 +1433,15 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots)
+ u64 time_seq, struct ulist **roots,
+ bool ignore_offset)
{
int ret;
if (!trans)
down_read(&fs_info->commit_root_sem);
ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
- time_seq, roots);
+ time_seq, roots, ignore_offset);
if (!trans)
up_read(&fs_info->commit_root_sem);
return ret;
@@ -1483,7 +1496,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
ULIST_ITER_INIT(&uiter);
while (1) {
ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
- roots, NULL, &shared);
+ roots, NULL, &shared, false);
if (ret == BACKREF_FOUND_SHARED) {
/* this is the only condition under which we return 1 */
ret = 1;
@@ -1877,7 +1890,8 @@ static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
u64 extent_item_objectid, u64 extent_item_pos,
int search_commit_root,
- iterate_extent_inodes_t *iterate, void *ctx)
+ iterate_extent_inodes_t *iterate, void *ctx,
+ bool ignore_offset)
{
int ret;
struct btrfs_trans_handle *trans = NULL;
@@ -1903,14 +1917,15 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
tree_mod_seq_elem.seq, &refs,
- &extent_item_pos);
+ &extent_item_pos, ignore_offset);
if (ret)
goto out;
ULIST_ITER_INIT(&ref_uiter);
while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
- tree_mod_seq_elem.seq, &roots);
+ tree_mod_seq_elem.seq, &roots,
+ ignore_offset);
if (ret)
break;
ULIST_ITER_INIT(&root_uiter);
@@ -1943,7 +1958,8 @@ out:
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
- iterate_extent_inodes_t *iterate, void *ctx)
+ iterate_extent_inodes_t *iterate, void *ctx,
+ bool ignore_offset)
{
int ret;
u64 extent_item_pos;
@@ -1961,7 +1977,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
extent_item_pos = logical - found_key.objectid;
ret = iterate_extent_inodes(fs_info, found_key.objectid,
extent_item_pos, search_commit_root,
- iterate, ctx);
+ iterate, ctx, ignore_offset);
return ret;
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index e410335841aa..0c2fab8514ff 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -43,17 +43,19 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
u64 extent_item_objectid,
u64 extent_offset, int search_commit_root,
- iterate_extent_inodes_t *iterate, void *ctx);
+ iterate_extent_inodes_t *iterate, void *ctx,
+ bool ignore_offset);
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
- iterate_extent_inodes_t *iterate, void *ctx);
+ iterate_extent_inodes_t *iterate, void *ctx,
+ bool ignore_offset);
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 time_seq, struct ulist **roots);
+ u64 time_seq, struct ulist **roots, bool ignore_offset);
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
u32 name_len, unsigned long name_off,
struct extent_buffer *eb_in, u64 parent,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index eccadb5f62a5..63f0ccc92a71 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -36,14 +36,13 @@
#define BTRFS_INODE_ORPHAN_META_RESERVED 1
#define BTRFS_INODE_DUMMY 2
#define BTRFS_INODE_IN_DEFRAG 3
-#define BTRFS_INODE_DELALLOC_META_RESERVED 4
-#define BTRFS_INODE_HAS_ORPHAN_ITEM 5
-#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
-#define BTRFS_INODE_NEEDS_FULL_SYNC 7
-#define BTRFS_INODE_COPY_EVERYTHING 8
-#define BTRFS_INODE_IN_DELALLOC_LIST 9
-#define BTRFS_INODE_READDIO_NEED_LOCK 10
-#define BTRFS_INODE_HAS_PROPS 11
+#define BTRFS_INODE_HAS_ORPHAN_ITEM 4
+#define BTRFS_INODE_HAS_ASYNC_EXTENT 5
+#define BTRFS_INODE_NEEDS_FULL_SYNC 6
+#define BTRFS_INODE_COPY_EVERYTHING 7
+#define BTRFS_INODE_IN_DELALLOC_LIST 8
+#define BTRFS_INODE_READDIO_NEED_LOCK 9
+#define BTRFS_INODE_HAS_PROPS 10
/* in memory btrfs inode */
struct btrfs_inode {
@@ -176,7 +175,8 @@ struct btrfs_inode {
* of extent items we've reserved metadata for.
*/
unsigned outstanding_extents;
- unsigned reserved_extents;
+
+ struct btrfs_block_rsv block_rsv;
/*
* Cached values of inode properties
@@ -267,6 +267,17 @@ static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
return false;
}
+static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
+ int mod)
+{
+ lockdep_assert_held(&inode->lock);
+ inode->outstanding_extents += mod;
+ if (btrfs_is_free_space_inode(inode))
+ return;
+ trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode),
+ mod);
+}
+
static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
{
int ret = 0;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 7d5a9b51f0d7..7d51b5a5b505 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -613,7 +613,7 @@ static void btrfsic_dev_state_hashtable_add(
struct btrfsic_dev_state_hashtable *h)
{
const unsigned int hashval =
- (((unsigned int)((uintptr_t)ds->bdev)) &
+ (((unsigned int)((uintptr_t)ds->bdev->bd_dev)) &
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
list_add(&ds->collision_resolving_node, h->table + hashval);
@@ -2803,7 +2803,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bio() is also called before
* btrfsic_mount(), this might return NULL */
- dev_state = btrfsic_dev_state_lookup(bio_dev(bio));
+ dev_state = btrfsic_dev_state_lookup(bio_dev(bio) + bio->bi_partno);
if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i = 0;
@@ -2913,7 +2913,7 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
state = kvzalloc(sizeof(*state), GFP_KERNEL);
if (!state) {
pr_info("btrfs check-integrity: allocation failed!\n");
- return -1;
+ return -ENOMEM;
}
if (!btrfsic_is_initialized) {
@@ -2945,7 +2945,7 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
if (NULL == ds) {
pr_info("btrfs check-integrity: kmalloc() failed!\n");
mutex_unlock(&btrfsic_mutex);
- return -1;
+ return -ENOMEM;
}
ds->bdev = device->bdev;
ds->state = state;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 280384bf34f1..b35ce16b3df3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -33,6 +33,8 @@
#include <linux/bit_spinlock.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
+#include <linux/sort.h>
+#include <linux/log2.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -255,7 +257,8 @@ static void end_compressed_bio_write(struct bio *bio)
cb->start,
cb->start + cb->len - 1,
NULL,
- bio->bi_status ? 0 : 1);
+ bio->bi_status ?
+ BLK_STS_OK : BLK_STS_NOTSUPP);
cb->compressed_pages[0]->mapping = NULL;
end_compressed_writeback(inode, cb);
@@ -706,7 +709,86 @@ out:
return ret;
}
-static struct {
+/*
+ * Heuristic uses systematic sampling to collect data from the input data
+ * range, the logic can be tuned by the following constants:
+ *
+ * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
+ * @SAMPLING_INTERVAL - range from which the sampled data can be collected
+ */
+#define SAMPLING_READ_SIZE (16)
+#define SAMPLING_INTERVAL (256)
+
+/*
+ * For statistical analysis of the input data we consider bytes that form a
+ * Galois Field of 256 objects. Each object has an attribute count, ie. how
+ * many times the object appeared in the sample.
+ */
+#define BUCKET_SIZE (256)
+
+/*
+ * The size of the sample is based on a statistical sampling rule of thumb.
+ * The common way is to perform sampling tests as long as the number of
+ * elements in each cell is at least 5.
+ *
+ * Instead of 5, we choose 32 to obtain more accurate results.
+ * If the data contain the maximum number of symbols, which is 256, we obtain a
+ * sample size bound by 8192.
+ *
+ * For a sample of at most 8KB of data per data range: 16 consecutive bytes
+ * from up to 512 locations.
+ */
+#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
+ SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
+
+struct bucket_item {
+ u32 count;
+};
+
+struct heuristic_ws {
+ /* Partial copy of input data */
+ u8 *sample;
+ u32 sample_size;
+ /* Buckets store counters for each byte value */
+ struct bucket_item *bucket;
+ struct list_head list;
+};
+
+static void free_heuristic_ws(struct list_head *ws)
+{
+ struct heuristic_ws *workspace;
+
+ workspace = list_entry(ws, struct heuristic_ws, list);
+
+ kvfree(workspace->sample);
+ kfree(workspace->bucket);
+ kfree(workspace);
+}
+
+static struct list_head *alloc_heuristic_ws(void)
+{
+ struct heuristic_ws *ws;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ return ERR_PTR(-ENOMEM);
+
+ ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
+ if (!ws->sample)
+ goto fail;
+
+ ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
+ if (!ws->bucket)
+ goto fail;
+
+ INIT_LIST_HEAD(&ws->list);
+ return &ws->list;
+fail:
+ free_heuristic_ws(&ws->list);
+ return ERR_PTR(-ENOMEM);
+}
+
+struct workspaces_list {
struct list_head idle_ws;
spinlock_t ws_lock;
/* Number of free workspaces */
@@ -715,7 +797,11 @@ static struct {
atomic_t total_ws;
/* Waiters for a free workspace */
wait_queue_head_t ws_wait;
-} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
+};
+
+static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
+
+static struct workspaces_list btrfs_heuristic_ws;
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zlib_compress,
@@ -725,11 +811,25 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
void __init btrfs_init_compress(void)
{
+ struct list_head *workspace;
int i;
- for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
- struct list_head *workspace;
+ INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
+ spin_lock_init(&btrfs_heuristic_ws.ws_lock);
+ atomic_set(&btrfs_heuristic_ws.total_ws, 0);
+ init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
+
+ workspace = alloc_heuristic_ws();
+ if (IS_ERR(workspace)) {
+ pr_warn(
+ "BTRFS: cannot preallocate heuristic workspace, will try later\n");
+ } else {
+ atomic_set(&btrfs_heuristic_ws.total_ws, 1);
+ btrfs_heuristic_ws.free_ws = 1;
+ list_add(workspace, &btrfs_heuristic_ws.idle_ws);
+ }
+ for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
spin_lock_init(&btrfs_comp_ws[i].ws_lock);
atomic_set(&btrfs_comp_ws[i].total_ws, 0);
@@ -756,18 +856,32 @@ void __init btrfs_init_compress(void)
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/
-static struct list_head *find_workspace(int type)
+static struct list_head *__find_workspace(int type, bool heuristic)
{
struct list_head *workspace;
int cpus = num_online_cpus();
int idx = type - 1;
unsigned nofs_flag;
+ struct list_head *idle_ws;
+ spinlock_t *ws_lock;
+ atomic_t *total_ws;
+ wait_queue_head_t *ws_wait;
+ int *free_ws;
+
+ if (heuristic) {
+ idle_ws = &btrfs_heuristic_ws.idle_ws;
+ ws_lock = &btrfs_heuristic_ws.ws_lock;
+ total_ws = &btrfs_heuristic_ws.total_ws;
+ ws_wait = &btrfs_heuristic_ws.ws_wait;
+ free_ws = &btrfs_heuristic_ws.free_ws;
+ } else {
+ idle_ws = &btrfs_comp_ws[idx].idle_ws;
+ ws_lock = &btrfs_comp_ws[idx].ws_lock;
+ total_ws = &btrfs_comp_ws[idx].total_ws;
+ ws_wait = &btrfs_comp_ws[idx].ws_wait;
+ free_ws = &btrfs_comp_ws[idx].free_ws;
+ }
- struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
- spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
- atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
- wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
- int *free_ws = &btrfs_comp_ws[idx].free_ws;
again:
spin_lock(ws_lock);
if (!list_empty(idle_ws)) {
@@ -797,7 +911,10 @@ again:
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = btrfs_compress_op[idx]->alloc_workspace();
+ if (heuristic)
+ workspace = alloc_heuristic_ws();
+ else
+ workspace = btrfs_compress_op[idx]->alloc_workspace();
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -828,18 +945,38 @@ again:
return workspace;
}
+static struct list_head *find_workspace(int type)
+{
+ return __find_workspace(type, false);
+}
+
/*
* put a workspace struct back on the list or free it if we have enough
* idle ones sitting around
*/
-static void free_workspace(int type, struct list_head *workspace)
+static void __free_workspace(int type, struct list_head *workspace,
+ bool heuristic)
{
int idx = type - 1;
- struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
- spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
- atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
- wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
- int *free_ws = &btrfs_comp_ws[idx].free_ws;
+ struct list_head *idle_ws;
+ spinlock_t *ws_lock;
+ atomic_t *total_ws;
+ wait_queue_head_t *ws_wait;
+ int *free_ws;
+
+ if (heuristic) {
+ idle_ws = &btrfs_heuristic_ws.idle_ws;
+ ws_lock = &btrfs_heuristic_ws.ws_lock;
+ total_ws = &btrfs_heuristic_ws.total_ws;
+ ws_wait = &btrfs_heuristic_ws.ws_wait;
+ free_ws = &btrfs_heuristic_ws.free_ws;
+ } else {
+ idle_ws = &btrfs_comp_ws[idx].idle_ws;
+ ws_lock = &btrfs_comp_ws[idx].ws_lock;
+ total_ws = &btrfs_comp_ws[idx].total_ws;
+ ws_wait = &btrfs_comp_ws[idx].ws_wait;
+ free_ws = &btrfs_comp_ws[idx].free_ws;
+ }
spin_lock(ws_lock);
if (*free_ws <= num_online_cpus()) {
@@ -850,7 +987,10 @@ static void free_workspace(int type, struct list_head *workspace)
}
spin_unlock(ws_lock);
- btrfs_compress_op[idx]->free_workspace(workspace);
+ if (heuristic)
+ free_heuristic_ws(workspace);
+ else
+ btrfs_compress_op[idx]->free_workspace(workspace);
atomic_dec(total_ws);
wake:
/*
@@ -861,6 +1001,11 @@ wake:
wake_up(ws_wait);
}
+static void free_workspace(int type, struct list_head *ws)
+{
+ return __free_workspace(type, ws, false);
+}
+
/*
* cleanup function for module exit
*/
@@ -869,6 +1014,13 @@ static void free_workspaces(void)
struct list_head *workspace;
int i;
+ while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
+ workspace = btrfs_heuristic_ws.idle_ws.next;
+ list_del(workspace);
+ free_heuristic_ws(workspace);
+ atomic_dec(&btrfs_heuristic_ws.total_ws);
+ }
+
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
workspace = btrfs_comp_ws[i].idle_ws.next;
@@ -883,6 +1035,11 @@ static void free_workspaces(void)
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.
*
+ * @type_level is encoded algorithm and level, where level 0 means whatever
+ * default the algorithm chooses and is opaque here;
+ * - compression algo are 0-3
+ * - the level are bits 4-7
+ *
* @out_pages is an in/out parameter, holds maximum number of pages to allocate
* and returns number of actually allocated pages
*
@@ -897,7 +1054,7 @@ static void free_workspaces(void)
* @max_out tells us the max number of bytes that we're allowed to
* stuff into pages
*/
-int btrfs_compress_pages(int type, struct address_space *mapping,
+int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
u64 start, struct page **pages,
unsigned long *out_pages,
unsigned long *total_in,
@@ -905,9 +1062,11 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
{
struct list_head *workspace;
int ret;
+ int type = type_level & 0xF;
workspace = find_workspace(type);
+ btrfs_compress_op[type - 1]->set_level(workspace, type_level);
ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
start, pages,
out_pages,
@@ -1066,6 +1225,211 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
}
/*
+ * Shannon Entropy calculation
+ *
+ * Pure byte distribution analysis fails to determine compressiability of data.
+ * Try calculating entropy to estimate the average minimum number of bits
+ * needed to encode the sampled data.
+ *
+ * For convenience, return the percentage of needed bits, instead of amount of
+ * bits directly.
+ *
+ * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
+ * and can be compressible with high probability
+ *
+ * @ENTROPY_LVL_HIGH - data are not compressible with high probability
+ *
+ * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
+ */
+#define ENTROPY_LVL_ACEPTABLE (65)
+#define ENTROPY_LVL_HIGH (80)
+
+/*
+ * For increasead precision in shannon_entropy calculation,
+ * let's do pow(n, M) to save more digits after comma:
+ *
+ * - maximum int bit length is 64
+ * - ilog2(MAX_SAMPLE_SIZE) -> 13
+ * - 13 * 4 = 52 < 64 -> M = 4
+ *
+ * So use pow(n, 4).
+ */
+static inline u32 ilog2_w(u64 n)
+{
+ return ilog2(n * n * n * n);
+}
+
+static u32 shannon_entropy(struct heuristic_ws *ws)
+{
+ const u32 entropy_max = 8 * ilog2_w(2);
+ u32 entropy_sum = 0;
+ u32 p, p_base, sz_base;
+ u32 i;
+
+ sz_base = ilog2_w(ws->sample_size);
+ for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
+ p = ws->bucket[i].count;
+ p_base = ilog2_w(p);
+ entropy_sum += p * (sz_base - p_base);
+ }
+
+ entropy_sum /= ws->sample_size;
+ return entropy_sum * 100 / entropy_max;
+}
+
+/* Compare buckets by size, ascending */
+static int bucket_comp_rev(const void *lv, const void *rv)
+{
+ const struct bucket_item *l = (const struct bucket_item *)lv;
+ const struct bucket_item *r = (const struct bucket_item *)rv;
+
+ return r->count - l->count;
+}
+
+/*
+ * Size of the core byte set - how many bytes cover 90% of the sample
+ *
+ * There are several types of structured binary data that use nearly all byte
+ * values. The distribution can be uniform and counts in all buckets will be
+ * nearly the same (eg. encrypted data). Unlikely to be compressible.
+ *
+ * Other possibility is normal (Gaussian) distribution, where the data could
+ * be potentially compressible, but we have to take a few more steps to decide
+ * how much.
+ *
+ * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
+ * compression algo can easy fix that
+ * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
+ * probability is not compressible
+ */
+#define BYTE_CORE_SET_LOW (64)
+#define BYTE_CORE_SET_HIGH (200)
+
+static int byte_core_set_size(struct heuristic_ws *ws)
+{
+ u32 i;
+ u32 coreset_sum = 0;
+ const u32 core_set_threshold = ws->sample_size * 90 / 100;
+ struct bucket_item *bucket = ws->bucket;
+
+ /* Sort in reverse order */
+ sort(bucket, BUCKET_SIZE, sizeof(*bucket), &bucket_comp_rev, NULL);
+
+ for (i = 0; i < BYTE_CORE_SET_LOW; i++)
+ coreset_sum += bucket[i].count;
+
+ if (coreset_sum > core_set_threshold)
+ return i;
+
+ for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
+ coreset_sum += bucket[i].count;
+ if (coreset_sum > core_set_threshold)
+ break;
+ }
+
+ return i;
+}
+
+/*
+ * Count byte values in buckets.
+ * This heuristic can detect textual data (configs, xml, json, html, etc).
+ * Because in most text-like data byte set is restricted to limited number of
+ * possible characters, and that restriction in most cases makes data easy to
+ * compress.
+ *
+ * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
+ * less - compressible
+ * more - need additional analysis
+ */
+#define BYTE_SET_THRESHOLD (64)
+
+static u32 byte_set_size(const struct heuristic_ws *ws)
+{
+ u32 i;
+ u32 byte_set_size = 0;
+
+ for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
+ if (ws->bucket[i].count > 0)
+ byte_set_size++;
+ }
+
+ /*
+ * Continue collecting count of byte values in buckets. If the byte
+ * set size is bigger then the threshold, it's pointless to continue,
+ * the detection technique would fail for this type of data.
+ */
+ for (; i < BUCKET_SIZE; i++) {
+ if (ws->bucket[i].count > 0) {
+ byte_set_size++;
+ if (byte_set_size > BYTE_SET_THRESHOLD)
+ return byte_set_size;
+ }
+ }
+
+ return byte_set_size;
+}
+
+static bool sample_repeated_patterns(struct heuristic_ws *ws)
+{
+ const u32 half_of_sample = ws->sample_size / 2;
+ const u8 *data = ws->sample;
+
+ return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
+}
+
+static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
+ struct heuristic_ws *ws)
+{
+ struct page *page;
+ u64 index, index_end;
+ u32 i, curr_sample_pos;
+ u8 *in_data;
+
+ /*
+ * Compression handles the input data by chunks of 128KiB
+ * (defined by BTRFS_MAX_UNCOMPRESSED)
+ *
+ * We do the same for the heuristic and loop over the whole range.
+ *
+ * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
+ * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
+ */
+ if (end - start > BTRFS_MAX_UNCOMPRESSED)
+ end = start + BTRFS_MAX_UNCOMPRESSED;
+
+ index = start >> PAGE_SHIFT;
+ index_end = end >> PAGE_SHIFT;
+
+ /* Don't miss unaligned end */
+ if (!IS_ALIGNED(end, PAGE_SIZE))
+ index_end++;
+
+ curr_sample_pos = 0;
+ while (index < index_end) {
+ page = find_get_page(inode->i_mapping, index);
+ in_data = kmap(page);
+ /* Handle case where the start is not aligned to PAGE_SIZE */
+ i = start % PAGE_SIZE;
+ while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
+ /* Don't sample any garbage from the last page */
+ if (start > end - SAMPLING_READ_SIZE)
+ break;
+ memcpy(&ws->sample[curr_sample_pos], &in_data[i],
+ SAMPLING_READ_SIZE);
+ i += SAMPLING_INTERVAL;
+ start += SAMPLING_INTERVAL;
+ curr_sample_pos += SAMPLING_READ_SIZE;
+ }
+ kunmap(page);
+ put_page(page);
+
+ index++;
+ }
+
+ ws->sample_size = curr_sample_pos;
+}
+
+/*
* Compression heuristic.
*
* For now is's a naive and optimistic 'return true', we'll extend the logic to
@@ -1082,18 +1446,87 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
*/
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
- u64 index = start >> PAGE_SHIFT;
- u64 end_index = end >> PAGE_SHIFT;
- struct page *page;
- int ret = 1;
+ struct list_head *ws_list = __find_workspace(0, true);
+ struct heuristic_ws *ws;
+ u32 i;
+ u8 byte;
+ int ret = 0;
- while (index <= end_index) {
- page = find_get_page(inode->i_mapping, index);
- kmap(page);
- kunmap(page);
- put_page(page);
- index++;
+ ws = list_entry(ws_list, struct heuristic_ws, list);
+
+ heuristic_collect_sample(inode, start, end, ws);
+
+ if (sample_repeated_patterns(ws)) {
+ ret = 1;
+ goto out;
+ }
+
+ memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
+
+ for (i = 0; i < ws->sample_size; i++) {
+ byte = ws->sample[i];
+ ws->bucket[byte].count++;
+ }
+
+ i = byte_set_size(ws);
+ if (i < BYTE_SET_THRESHOLD) {
+ ret = 2;
+ goto out;
+ }
+
+ i = byte_core_set_size(ws);
+ if (i <= BYTE_CORE_SET_LOW) {
+ ret = 3;
+ goto out;
}
+ if (i >= BYTE_CORE_SET_HIGH) {
+ ret = 0;
+ goto out;
+ }
+
+ i = shannon_entropy(ws);
+ if (i <= ENTROPY_LVL_ACEPTABLE) {
+ ret = 4;
+ goto out;
+ }
+
+ /*
+ * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
+ * needed to give green light to compression.
+ *
+ * For now just assume that compression at that level is not worth the
+ * resources because:
+ *
+ * 1. it is possible to defrag the data later
+ *
+ * 2. the data would turn out to be hardly compressible, eg. 150 byte
+ * values, every bucket has counter at level ~54. The heuristic would
+ * be confused. This can happen when data have some internal repeated
+ * patterns like "abbacbbc...". This can be detected by analyzing
+ * pairs of bytes, which is too costly.
+ */
+ if (i < ENTROPY_LVL_HIGH) {
+ ret = 5;
+ goto out;
+ } else {
+ ret = 0;
+ goto out;
+ }
+
+out:
+ __free_workspace(0, ws_list, true);
return ret;
}
+
+unsigned int btrfs_compress_str2level(const char *str)
+{
+ if (strncmp(str, "zlib", 4) != 0)
+ return 0;
+
+ /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
+ if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
+ return str[5] - '0';
+
+ return 0;
+}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index d2781ff8f994..da20755ebf21 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -76,7 +76,7 @@ struct compressed_bio {
void btrfs_init_compress(void);
void btrfs_exit_compress(void);
-int btrfs_compress_pages(int type, struct address_space *mapping,
+int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
u64 start, struct page **pages,
unsigned long *out_pages,
unsigned long *total_in,
@@ -95,6 +95,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
+unsigned btrfs_compress_str2level(const char *str);
+
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
BTRFS_COMPRESS_ZLIB = 1,
@@ -124,6 +126,8 @@ struct btrfs_compress_op {
struct page *dest_page,
unsigned long start_byte,
size_t srclen, size_t destlen);
+
+ void (*set_level)(struct list_head *ws, unsigned int type);
};
extern const struct btrfs_compress_op btrfs_zlib_compress;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 6d49db7d86be..531e0a8645b0 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -192,7 +192,7 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
* tree until you end up with a lock on the root. A locked buffer
* is returned, with a reference held.
*/
-static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
+struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
{
struct extent_buffer *eb;
@@ -5496,8 +5496,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
} else if (left_end_reached) {
if (right_level == 0) {
- ret = changed_cb(left_root, right_root,
- left_path, right_path,
+ ret = changed_cb(left_path, right_path,
&right_key,
BTRFS_COMPARE_TREE_DELETED,
ctx);
@@ -5508,8 +5507,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
continue;
} else if (right_end_reached) {
if (left_level == 0) {
- ret = changed_cb(left_root, right_root,
- left_path, right_path,
+ ret = changed_cb(left_path, right_path,
&left_key,
BTRFS_COMPARE_TREE_NEW,
ctx);
@@ -5523,8 +5521,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
if (left_level == 0 && right_level == 0) {
cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
if (cmp < 0) {
- ret = changed_cb(left_root, right_root,
- left_path, right_path,
+ ret = changed_cb(left_path, right_path,
&left_key,
BTRFS_COMPARE_TREE_NEW,
ctx);
@@ -5532,8 +5529,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out;
advance_left = ADVANCE;
} else if (cmp > 0) {
- ret = changed_cb(left_root, right_root,
- left_path, right_path,
+ ret = changed_cb(left_path, right_path,
&right_key,
BTRFS_COMPARE_TREE_DELETED,
ctx);
@@ -5550,8 +5546,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
result = BTRFS_COMPARE_TREE_CHANGED;
else
result = BTRFS_COMPARE_TREE_SAME;
- ret = changed_cb(left_root, right_root,
- left_path, right_path,
+ ret = changed_cb(left_path, right_path,
&left_key, result, ctx);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8fc690384c58..f7df5536ab61 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -523,7 +523,7 @@ struct btrfs_caching_control {
};
/* Once caching_thread() finds this much free space, it will wake up waiters. */
-#define CACHING_CTL_WAKE_UP (1024 * 1024 * 2)
+#define CACHING_CTL_WAKE_UP SZ_2M
struct btrfs_io_ctl {
void *cur, *orig;
@@ -763,8 +763,6 @@ struct btrfs_fs_info {
* delayed dir index item
*/
struct btrfs_block_rsv global_block_rsv;
- /* block reservation for delay allocation */
- struct btrfs_block_rsv delalloc_block_rsv;
/* block reservation for metadata operations */
struct btrfs_block_rsv trans_block_rsv;
/* block reservation for chunk tree */
@@ -790,6 +788,7 @@ struct btrfs_fs_info {
*/
unsigned long pending_changes;
unsigned long compress_type:4;
+ unsigned int compress_level;
int commit_interval;
/*
* It is a suggestive number, the read side is safe even it gets a
@@ -878,9 +877,6 @@ struct btrfs_fs_info {
rwlock_t tree_mod_log_lock;
struct rb_root tree_mod_log;
- atomic_t nr_async_submits;
- atomic_t async_submit_draining;
- atomic_t nr_async_bios;
atomic_t async_delalloc_pages;
atomic_t open_ioctl_trans;
@@ -1100,6 +1096,11 @@ struct btrfs_fs_info {
u32 nodesize;
u32 sectorsize;
u32 stripesize;
+
+#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ spinlock_t ref_verify_lock;
+ struct rb_root block_tree;
+#endif
};
static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
@@ -1338,6 +1339,7 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
#define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25)
#define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26)
#define BTRFS_MOUNT_NOLOGREPLAY (1 << 27)
+#define BTRFS_MOUNT_REF_VERIFY (1 << 28)
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
#define BTRFS_DEFAULT_MAX_INLINE (2048)
@@ -2639,7 +2641,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct extent_buffer *buf,
u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- u64 root_objectid, u64 owner,
+ struct btrfs_root *root, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins);
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
@@ -2658,7 +2660,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
+ struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset);
@@ -2670,7 +2672,7 @@ void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
+ struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset);
@@ -2744,6 +2746,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
u64 *qgroup_reserved, bool use_global_rsv);
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
+
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode,
@@ -2751,6 +2755,9 @@ int btrfs_delalloc_reserve_space(struct inode *inode,
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
+void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *rsv,
+ unsigned short type);
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
@@ -2809,6 +2816,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
const struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
+struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *key, int lowest_level,
u64 min_trans);
@@ -2821,9 +2829,7 @@ enum btrfs_compare_tree_result {
BTRFS_COMPARE_TREE_CHANGED,
BTRFS_COMPARE_TREE_SAME,
};
-typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root,
- struct btrfs_root *right_root,
- struct btrfs_path *left_path,
+typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path,
struct btrfs_path *right_path,
struct btrfs_key *key,
enum btrfs_compare_tree_result result,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 19e4ad2f3f2e..5d73f79ded8b 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -581,7 +581,6 @@ static int btrfs_delayed_inode_reserve_metadata(
struct btrfs_block_rsv *dst_rsv;
u64 num_bytes;
int ret;
- bool release = false;
src_rsv = trans->block_rsv;
dst_rsv = &fs_info->delayed_block_rsv;
@@ -589,36 +588,13 @@ static int btrfs_delayed_inode_reserve_metadata(
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
/*
- * If our block_rsv is the delalloc block reserve then check and see if
- * we have our extra reservation for updating the inode. If not fall
- * through and try to reserve space quickly.
- *
- * We used to try and steal from the delalloc block rsv or the global
- * reserve, but we'd steal a full reservation, which isn't kind. We are
- * here through delalloc which means we've likely just cowed down close
- * to the leaf that contains the inode, so we would steal less just
- * doing the fallback inode update, so if we do end up having to steal
- * from the global block rsv we hopefully only steal one or two blocks
- * worth which is less likely to hurt us.
- */
- if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
- spin_lock(&inode->lock);
- if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &inode->runtime_flags))
- release = true;
- else
- src_rsv = NULL;
- spin_unlock(&inode->lock);
- }
-
- /*
* btrfs_dirty_inode will update the inode under btrfs_join_transaction
* which doesn't reserve space for speed. This is a problem since we
* still need to reserve space for this update, so try to reserve the
* space.
*
* Now if src_rsv == delalloc_block_rsv we'll let it just steal since
- * we're accounted for.
+ * we always reserve enough to update the inode item.
*/
if (!src_rsv || (!trans->bytes_reserved &&
src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
@@ -643,32 +619,12 @@ static int btrfs_delayed_inode_reserve_metadata(
}
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
-
- /*
- * Migrate only takes a reservation, it doesn't touch the size of the
- * block_rsv. This is to simplify people who don't normally have things
- * migrated from their block rsv. If they go to release their
- * reservation, that will decrease the size as well, so if migrate
- * reduced size we'd end up with a negative size. But for the
- * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
- * but we could in fact do this reserve/migrate dance several times
- * between the time we did the original reservation and we'd clean it
- * up. So to take care of this, release the space for the meta
- * reservation here. I think it may be time for a documentation page on
- * how block rsvs. work.
- */
if (!ret) {
trace_btrfs_space_reservation(fs_info, "delayed_inode",
btrfs_ino(inode), num_bytes, 1);
node->bytes_reserved = num_bytes;
}
- if (release) {
- trace_btrfs_space_reservation(fs_info, "delalloc",
- btrfs_ino(inode), num_bytes, 0);
- btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
- }
-
return ret;
}
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 93ffa898df6d..83be8f9fd906 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -40,10 +40,10 @@ struct kmem_cache *btrfs_delayed_extent_op_cachep;
/*
* compare two delayed tree backrefs with same bytenr and type
*/
-static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
- struct btrfs_delayed_tree_ref *ref1, int type)
+static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
+ struct btrfs_delayed_tree_ref *ref2)
{
- if (type == BTRFS_TREE_BLOCK_REF_KEY) {
+ if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
if (ref1->root < ref2->root)
return -1;
if (ref1->root > ref2->root)
@@ -60,8 +60,8 @@ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
/*
* compare two delayed data backrefs with same bytenr and type
*/
-static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
- struct btrfs_delayed_data_ref *ref1)
+static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
+ struct btrfs_delayed_data_ref *ref2)
{
if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
if (ref1->root < ref2->root)
@@ -85,6 +85,34 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
return 0;
}
+static int comp_refs(struct btrfs_delayed_ref_node *ref1,
+ struct btrfs_delayed_ref_node *ref2,
+ bool check_seq)
+{
+ int ret = 0;
+
+ if (ref1->type < ref2->type)
+ return -1;
+ if (ref1->type > ref2->type)
+ return 1;
+ if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
+ ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
+ btrfs_delayed_node_to_tree_ref(ref2));
+ else
+ ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
+ btrfs_delayed_node_to_data_ref(ref2));
+ if (ret)
+ return ret;
+ if (check_seq) {
+ if (ref1->seq < ref2->seq)
+ return -1;
+ if (ref1->seq > ref2->seq)
+ return 1;
+ }
+ return 0;
+}
+
/* insert a new ref to head ref rbtree */
static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
struct rb_node *node)
@@ -96,15 +124,43 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
u64 bytenr;
ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
- bytenr = ins->node.bytenr;
+ bytenr = ins->bytenr;
while (*p) {
parent_node = *p;
entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
href_node);
- if (bytenr < entry->node.bytenr)
+ if (bytenr < entry->bytenr)
+ p = &(*p)->rb_left;
+ else if (bytenr > entry->bytenr)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ rb_link_node(node, parent_node, p);
+ rb_insert_color(node, root);
+ return NULL;
+}
+
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
+ struct btrfs_delayed_ref_node *ins)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *node = &ins->ref_node;
+ struct rb_node *parent_node = NULL;
+ struct btrfs_delayed_ref_node *entry;
+
+ while (*p) {
+ int comp;
+
+ parent_node = *p;
+ entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
+ ref_node);
+ comp = comp_refs(ins, entry, true);
+ if (comp < 0)
p = &(*p)->rb_left;
- else if (bytenr > entry->node.bytenr)
+ else if (comp > 0)
p = &(*p)->rb_right;
else
return entry;
@@ -133,15 +189,15 @@ find_ref_head(struct rb_root *root, u64 bytenr,
while (n) {
entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
- if (bytenr < entry->node.bytenr)
+ if (bytenr < entry->bytenr)
n = n->rb_left;
- else if (bytenr > entry->node.bytenr)
+ else if (bytenr > entry->bytenr)
n = n->rb_right;
else
return entry;
}
if (entry && return_bigger) {
- if (bytenr > entry->node.bytenr) {
+ if (bytenr > entry->bytenr) {
n = rb_next(&entry->href_node);
if (!n)
n = rb_first(root);
@@ -164,17 +220,17 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
if (mutex_trylock(&head->mutex))
return 0;
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
spin_lock(&delayed_refs->lock);
- if (!head->node.in_tree) {
+ if (RB_EMPTY_NODE(&head->href_node)) {
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return -EAGAIN;
}
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return 0;
}
@@ -183,15 +239,11 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref)
{
- if (btrfs_delayed_ref_is_head(ref)) {
- head = btrfs_delayed_node_to_head(ref);
- rb_erase(&head->href_node, &delayed_refs->href_root);
- } else {
- assert_spin_locked(&head->lock);
- list_del(&ref->list);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
- }
+ assert_spin_locked(&head->lock);
+ rb_erase(&ref->ref_node, &head->ref_tree);
+ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
ref->in_tree = 0;
btrfs_put_delayed_ref(ref);
atomic_dec(&delayed_refs->num_entries);
@@ -206,36 +258,18 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
u64 seq)
{
struct btrfs_delayed_ref_node *next;
+ struct rb_node *node = rb_next(&ref->ref_node);
bool done = false;
- next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
- list);
- while (!done && &next->list != &head->ref_list) {
+ while (!done && node) {
int mod;
- struct btrfs_delayed_ref_node *next2;
-
- next2 = list_next_entry(next, list);
-
- if (next == ref)
- goto next;
+ next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ node = rb_next(node);
if (seq && next->seq >= seq)
- goto next;
-
- if (next->type != ref->type)
- goto next;
-
- if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
- ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
- comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
- btrfs_delayed_node_to_tree_ref(next),
- ref->type))
- goto next;
- if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
- ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
- comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
- btrfs_delayed_node_to_data_ref(next)))
- goto next;
+ break;
+ if (comp_refs(ref, next, false))
+ break;
if (ref->action == next->action) {
mod = next->ref_mod;
@@ -259,8 +293,6 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
}
-next:
- next = next2;
}
return done;
@@ -272,11 +304,12 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
+ struct rb_node *node;
u64 seq = 0;
assert_spin_locked(&head->lock);
- if (list_empty(&head->ref_list))
+ if (RB_EMPTY_ROOT(&head->ref_tree))
return;
/* We don't have too many refs to merge for data. */
@@ -293,22 +326,13 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
}
spin_unlock(&fs_info->tree_mod_seq_lock);
- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
- list);
- while (&ref->list != &head->ref_list) {
+again:
+ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
if (seq && ref->seq >= seq)
- goto next;
-
- if (merge_ref(trans, delayed_refs, head, ref, seq)) {
- if (list_empty(&head->ref_list))
- break;
- ref = list_first_entry(&head->ref_list,
- struct btrfs_delayed_ref_node,
- list);
continue;
- }
-next:
- ref = list_next_entry(ref, list);
+ if (merge_ref(trans, delayed_refs, head, ref, seq))
+ goto again;
}
}
@@ -380,8 +404,8 @@ again:
head->processing = 1;
WARN_ON(delayed_refs->num_heads_ready == 0);
delayed_refs->num_heads_ready--;
- delayed_refs->run_delayed_start = head->node.bytenr +
- head->node.num_bytes;
+ delayed_refs->run_delayed_start = head->bytenr +
+ head->num_bytes;
return head;
}
@@ -391,37 +415,19 @@ again:
* Return 0 for insert.
* Return >0 for merge.
*/
-static int
-add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_root *root,
- struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *ref)
+static int insert_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *root,
+ struct btrfs_delayed_ref_head *href,
+ struct btrfs_delayed_ref_node *ref)
{
struct btrfs_delayed_ref_node *exist;
int mod;
int ret = 0;
spin_lock(&href->lock);
- /* Check whether we can merge the tail node with ref */
- if (list_empty(&href->ref_list))
- goto add_tail;
- exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
- list);
- /* No need to compare bytenr nor is_head */
- if (exist->type != ref->type || exist->seq != ref->seq)
- goto add_tail;
-
- if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
- exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
- comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
- btrfs_delayed_node_to_tree_ref(ref),
- ref->type))
- goto add_tail;
- if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
- exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
- comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
- btrfs_delayed_node_to_data_ref(ref)))
- goto add_tail;
+ exist = tree_insert(&href->ref_tree, ref);
+ if (!exist)
+ goto inserted;
/* Now we are sure we can merge */
ret = 1;
@@ -452,9 +458,7 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
drop_delayed_ref(trans, root, href, exist);
spin_unlock(&href->lock);
return ret;
-
-add_tail:
- list_add_tail(&ref->list, &href->ref_list);
+inserted:
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
@@ -469,20 +473,16 @@ add_tail:
*/
static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_node *existing,
- struct btrfs_delayed_ref_node *update,
+ struct btrfs_delayed_ref_head *existing,
+ struct btrfs_delayed_ref_head *update,
int *old_ref_mod_ret)
{
- struct btrfs_delayed_ref_head *existing_ref;
- struct btrfs_delayed_ref_head *ref;
int old_ref_mod;
- existing_ref = btrfs_delayed_node_to_head(existing);
- ref = btrfs_delayed_node_to_head(update);
- BUG_ON(existing_ref->is_data != ref->is_data);
+ BUG_ON(existing->is_data != update->is_data);
- spin_lock(&existing_ref->lock);
- if (ref->must_insert_reserved) {
+ spin_lock(&existing->lock);
+ if (update->must_insert_reserved) {
/* if the extent was freed and then
* reallocated before the delayed ref
* entries were processed, we can end up
@@ -490,7 +490,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
* the must_insert_reserved flag set.
* Set it again here
*/
- existing_ref->must_insert_reserved = ref->must_insert_reserved;
+ existing->must_insert_reserved = update->must_insert_reserved;
/*
* update the num_bytes so we make sure the accounting
@@ -500,22 +500,22 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
}
- if (ref->extent_op) {
- if (!existing_ref->extent_op) {
- existing_ref->extent_op = ref->extent_op;
+ if (update->extent_op) {
+ if (!existing->extent_op) {
+ existing->extent_op = update->extent_op;
} else {
- if (ref->extent_op->update_key) {
- memcpy(&existing_ref->extent_op->key,
- &ref->extent_op->key,
- sizeof(ref->extent_op->key));
- existing_ref->extent_op->update_key = true;
+ if (update->extent_op->update_key) {
+ memcpy(&existing->extent_op->key,
+ &update->extent_op->key,
+ sizeof(update->extent_op->key));
+ existing->extent_op->update_key = true;
}
- if (ref->extent_op->update_flags) {
- existing_ref->extent_op->flags_to_set |=
- ref->extent_op->flags_to_set;
- existing_ref->extent_op->update_flags = true;
+ if (update->extent_op->update_flags) {
+ existing->extent_op->flags_to_set |=
+ update->extent_op->flags_to_set;
+ existing->extent_op->update_flags = true;
}
- btrfs_free_delayed_extent_op(ref->extent_op);
+ btrfs_free_delayed_extent_op(update->extent_op);
}
}
/*
@@ -523,23 +523,23 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
* only need the lock for this case cause we could be processing it
* currently, for refs we just added we know we're a-ok.
*/
- old_ref_mod = existing_ref->total_ref_mod;
+ old_ref_mod = existing->total_ref_mod;
if (old_ref_mod_ret)
*old_ref_mod_ret = old_ref_mod;
existing->ref_mod += update->ref_mod;
- existing_ref->total_ref_mod += update->ref_mod;
+ existing->total_ref_mod += update->ref_mod;
/*
* If we are going to from a positive ref mod to a negative or vice
* versa we need to make sure to adjust pending_csums accordingly.
*/
- if (existing_ref->is_data) {
- if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
+ if (existing->is_data) {
+ if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
delayed_refs->pending_csums -= existing->num_bytes;
- if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
+ if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
delayed_refs->pending_csums += existing->num_bytes;
}
- spin_unlock(&existing_ref->lock);
+ spin_unlock(&existing->lock);
}
/*
@@ -550,14 +550,13 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *ref,
+ struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
int action, int is_data, int *qrecord_inserted_ret,
int *old_ref_mod, int *new_ref_mod)
{
struct btrfs_delayed_ref_head *existing;
- struct btrfs_delayed_ref_head *head_ref = NULL;
struct btrfs_delayed_ref_root *delayed_refs;
int count_mod = 1;
int must_insert_reserved = 0;
@@ -593,26 +592,21 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
delayed_refs = &trans->transaction->delayed_refs;
- /* first set the basic ref node struct up */
- refcount_set(&ref->refs, 1);
- ref->bytenr = bytenr;
- ref->num_bytes = num_bytes;
- ref->ref_mod = count_mod;
- ref->type = 0;
- ref->action = 0;
- ref->is_head = 1;
- ref->in_tree = 1;
- ref->seq = 0;
-
- head_ref = btrfs_delayed_node_to_head(ref);
+ refcount_set(&head_ref->refs, 1);
+ head_ref->bytenr = bytenr;
+ head_ref->num_bytes = num_bytes;
+ head_ref->ref_mod = count_mod;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
- INIT_LIST_HEAD(&head_ref->ref_list);
+ head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
+ RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
head_ref->qgroup_reserved = 0;
head_ref->qgroup_ref_root = 0;
+ spin_lock_init(&head_ref->lock);
+ mutex_init(&head_ref->mutex);
/* Record qgroup extent info if provided */
if (qrecord) {
@@ -632,17 +626,14 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
qrecord_inserted = 1;
}
- spin_lock_init(&head_ref->lock);
- mutex_init(&head_ref->mutex);
-
- trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
+ trace_add_delayed_ref_head(fs_info, head_ref, action);
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
WARN_ON(ref_root && reserved && existing->qgroup_ref_root
&& existing->qgroup_reserved);
- update_existing_head_ref(delayed_refs, &existing->node, ref,
+ update_existing_head_ref(delayed_refs, existing, head_ref,
old_ref_mod);
/*
* we've updated the existing ref, free the newly
@@ -699,7 +690,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
- INIT_LIST_HEAD(&ref->list);
+ RB_CLEAR_NODE(&ref->ref_node);
INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_tree_ref(ref);
@@ -713,7 +704,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
/*
* XXX: memory should be freed at the same level allocated.
@@ -756,7 +747,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
- INIT_LIST_HEAD(&ref->list);
+ RB_CLEAR_NODE(&ref->ref_node);
INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_data_ref(ref);
@@ -772,8 +763,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
-
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
if (ret > 0)
kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
}
@@ -821,7 +811,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, 0, 0, action, 0,
&qrecord_inserted, old_ref_mod,
new_ref_mod);
@@ -888,7 +878,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, ref_root, reserved,
action, 1, &qrecord_inserted,
old_ref_mod, new_ref_mod);
@@ -920,7 +910,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
+ add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data, NULL, NULL, NULL);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index ce88e4ac5276..a43af432f859 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -26,18 +26,8 @@
#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
-/*
- * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
- * same ref_node structure.
- * Ref_head is in a higher logic level than tree/data ref, and duplicated
- * bytenr/num_bytes in ref_node is really a waste or memory, they should be
- * referred from ref_head.
- * This gets more disgusting after we use list to store tree/data ref in
- * ref_head. Must clean this mess up later.
- */
struct btrfs_delayed_ref_node {
- /*data/tree ref use list, stored in ref_head->ref_list. */
- struct list_head list;
+ struct rb_node ref_node;
/*
* If action is BTRFS_ADD_DELAYED_REF, also link this node to
* ref_head->ref_add_list, then we do not need to iterate the
@@ -91,8 +81,9 @@ struct btrfs_delayed_extent_op {
* reference count modifications we've queued up.
*/
struct btrfs_delayed_ref_head {
- struct btrfs_delayed_ref_node node;
-
+ u64 bytenr;
+ u64 num_bytes;
+ refcount_t refs;
/*
* the mutex is held while running the refs, and it is also
* held when checking the sum of reference modifications.
@@ -100,7 +91,7 @@ struct btrfs_delayed_ref_head {
struct mutex mutex;
spinlock_t lock;
- struct list_head ref_list;
+ struct rb_root ref_tree;
/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
struct list_head ref_add_list;
@@ -116,6 +107,14 @@ struct btrfs_delayed_ref_head {
int total_ref_mod;
/*
+ * This is the current outstanding mod references for this bytenr. This
+ * is used with lookup_extent_info to get an accurate reference count
+ * for a bytenr, so it is adjusted as delayed refs are run so that any
+ * on disk reference count + ref_mod is accurate.
+ */
+ int ref_mod;
+
+ /*
* For qgroup reserved space freeing.
*
* ref_root and reserved will be recorded after
@@ -234,15 +233,18 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
case BTRFS_SHARED_DATA_REF_KEY:
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
break;
- case 0:
- kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
- break;
default:
BUG();
}
}
}
+static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
+{
+ if (refcount_dec_and_test(&head->refs))
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
+}
+
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -283,35 +285,17 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
u64 seq);
/*
- * a node might live in a head or a regular ref, this lets you
- * test for the proper type to use.
- */
-static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
-{
- return node->is_head;
-}
-
-/*
* helper functions to cast a node into its container
*/
static inline struct btrfs_delayed_tree_ref *
btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
{
- WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_tree_ref, node);
}
static inline struct btrfs_delayed_data_ref *
btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
{
- WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_data_ref, node);
}
-
-static inline struct btrfs_delayed_ref_head *
-btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
-{
- WARN_ON(!btrfs_delayed_ref_is_head(node));
- return container_of(node, struct btrfs_delayed_ref_head, node);
-}
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index dfdab849037b..efce9a2fa9be 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -50,6 +50,8 @@
#include "sysfs.h"
#include "qgroup.h"
#include "compression.h"
+#include "tree-checker.h"
+#include "ref-verify.h"
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
@@ -543,146 +545,6 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
return ret;
}
-#define CORRUPT(reason, eb, root, slot) \
- btrfs_crit(root->fs_info, \
- "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
- btrfs_header_level(eb) == 0 ? "leaf" : "node", \
- reason, btrfs_header_bytenr(eb), root->objectid, slot)
-
-static noinline int check_leaf(struct btrfs_root *root,
- struct extent_buffer *leaf)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_key key;
- struct btrfs_key leaf_key;
- u32 nritems = btrfs_header_nritems(leaf);
- int slot;
-
- /*
- * Extent buffers from a relocation tree have a owner field that
- * corresponds to the subvolume tree they are based on. So just from an
- * extent buffer alone we can not find out what is the id of the
- * corresponding subvolume tree, so we can not figure out if the extent
- * buffer corresponds to the root of the relocation tree or not. So skip
- * this check for relocation trees.
- */
- if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
- struct btrfs_root *check_root;
-
- key.objectid = btrfs_header_owner(leaf);
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- check_root = btrfs_get_fs_root(fs_info, &key, false);
- /*
- * The only reason we also check NULL here is that during
- * open_ctree() some roots has not yet been set up.
- */
- if (!IS_ERR_OR_NULL(check_root)) {
- struct extent_buffer *eb;
-
- eb = btrfs_root_node(check_root);
- /* if leaf is the root, then it's fine */
- if (leaf != eb) {
- CORRUPT("non-root leaf's nritems is 0",
- leaf, check_root, 0);
- free_extent_buffer(eb);
- return -EIO;
- }
- free_extent_buffer(eb);
- }
- return 0;
- }
-
- if (nritems == 0)
- return 0;
-
- /* Check the 0 item */
- if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
- BTRFS_LEAF_DATA_SIZE(fs_info)) {
- CORRUPT("invalid item offset size pair", leaf, root, 0);
- return -EIO;
- }
-
- /*
- * Check to make sure each items keys are in the correct order and their
- * offsets make sense. We only have to loop through nritems-1 because
- * we check the current slot against the next slot, which verifies the
- * next slot's offset+size makes sense and that the current's slot
- * offset is correct.
- */
- for (slot = 0; slot < nritems - 1; slot++) {
- btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
- btrfs_item_key_to_cpu(leaf, &key, slot + 1);
-
- /* Make sure the keys are in the right order */
- if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
- CORRUPT("bad key order", leaf, root, slot);
- return -EIO;
- }
-
- /*
- * Make sure the offset and ends are right, remember that the
- * item data starts at the end of the leaf and grows towards the
- * front.
- */
- if (btrfs_item_offset_nr(leaf, slot) !=
- btrfs_item_end_nr(leaf, slot + 1)) {
- CORRUPT("slot offset bad", leaf, root, slot);
- return -EIO;
- }
-
- /*
- * Check to make sure that we don't point outside of the leaf,
- * just in case all the items are consistent to each other, but
- * all point outside of the leaf.
- */
- if (btrfs_item_end_nr(leaf, slot) >
- BTRFS_LEAF_DATA_SIZE(fs_info)) {
- CORRUPT("slot end outside of leaf", leaf, root, slot);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-static int check_node(struct btrfs_root *root, struct extent_buffer *node)
-{
- unsigned long nr = btrfs_header_nritems(node);
- struct btrfs_key key, next_key;
- int slot;
- u64 bytenr;
- int ret = 0;
-
- if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
- btrfs_crit(root->fs_info,
- "corrupt node: block %llu root %llu nritems %lu",
- node->start, root->objectid, nr);
- return -EIO;
- }
-
- for (slot = 0; slot < nr - 1; slot++) {
- bytenr = btrfs_node_blockptr(node, slot);
- btrfs_node_key_to_cpu(node, &key, slot);
- btrfs_node_key_to_cpu(node, &next_key, slot + 1);
-
- if (!bytenr) {
- CORRUPT("invalid item slot", node, root, slot);
- ret = -EIO;
- goto out;
- }
-
- if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
- CORRUPT("bad key order", node, root, slot);
- ret = -EIO;
- goto out;
- }
- }
-out:
- return ret;
-}
-
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
u64 phy_offset, struct page *page,
u64 start, u64 end, int mirror)
@@ -748,12 +610,12 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
* that we don't try and read the other copies of this block, just
* return -EIO.
*/
- if (found_level == 0 && check_leaf(root, eb)) {
+ if (found_level == 0 && btrfs_check_leaf(root, eb)) {
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = -EIO;
}
- if (found_level > 0 && check_node(root, eb))
+ if (found_level > 0 && btrfs_check_node(root, eb))
ret = -EIO;
if (!ret)
@@ -879,22 +741,9 @@ static void run_one_async_start(struct btrfs_work *work)
static void run_one_async_done(struct btrfs_work *work)
{
- struct btrfs_fs_info *fs_info;
struct async_submit_bio *async;
- int limit;
async = container_of(work, struct async_submit_bio, work);
- fs_info = async->fs_info;
-
- limit = btrfs_async_submit_limit(fs_info);
- limit = limit * 2 / 3;
-
- /*
- * atomic_dec_return implies a barrier for waitqueue_active
- */
- if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
- waitqueue_active(&fs_info->async_submit_wait))
- wake_up(&fs_info->async_submit_wait);
/* If an error occurred we just want to clean up the bio and move on */
if (async->status) {
@@ -942,19 +791,10 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
async->status = 0;
- atomic_inc(&fs_info->nr_async_submits);
-
if (op_is_sync(bio->bi_opf))
btrfs_set_work_high_priority(&async->work);
btrfs_queue_work(fs_info->workers, &async->work);
-
- while (atomic_read(&fs_info->async_submit_draining) &&
- atomic_read(&fs_info->nr_async_submits)) {
- wait_event(fs_info->async_submit_wait,
- (atomic_read(&fs_info->nr_async_submits) == 0));
- }
-
return 0;
}
@@ -1005,9 +845,9 @@ static blk_status_t __btree_submit_bio_done(void *private_data, struct bio *bio,
return ret;
}
-static int check_async_write(unsigned long bio_flags)
+static int check_async_write(struct btrfs_inode *bi)
{
- if (bio_flags & EXTENT_BIO_TREE_LOG)
+ if (atomic_read(&bi->sync_writers))
return 0;
#ifdef CONFIG_X86
if (static_cpu_has(X86_FEATURE_XMM4_2))
@@ -1022,7 +862,7 @@ static blk_status_t btree_submit_bio_hook(void *private_data, struct bio *bio,
{
struct inode *inode = private_data;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- int async = check_async_write(bio_flags);
+ int async = check_async_write(BTRFS_I(inode));
blk_status_t ret;
if (bio_op(bio) != REQ_OP_WRITE) {
@@ -2607,14 +2447,6 @@ int open_ctree(struct super_block *sb,
goto fail_delalloc_bytes;
}
- fs_info->btree_inode = new_inode(sb);
- if (!fs_info->btree_inode) {
- err = -ENOMEM;
- goto fail_bio_counter;
- }
-
- mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
-
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
@@ -2647,17 +2479,12 @@ int open_ctree(struct super_block *sb,
btrfs_mapping_init(&fs_info->mapping_tree);
btrfs_init_block_rsv(&fs_info->global_block_rsv,
BTRFS_BLOCK_RSV_GLOBAL);
- btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
- BTRFS_BLOCK_RSV_DELALLOC);
btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
BTRFS_BLOCK_RSV_DELOPS);
- atomic_set(&fs_info->nr_async_submits, 0);
atomic_set(&fs_info->async_delalloc_pages, 0);
- atomic_set(&fs_info->async_submit_draining, 0);
- atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->defrag_running, 0);
atomic_set(&fs_info->qgroup_op_seq, 0);
atomic_set(&fs_info->reada_works_cnt, 0);
@@ -2673,12 +2500,21 @@ int open_ctree(struct super_block *sb,
/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
spin_lock_init(&fs_info->reada_lock);
+ btrfs_init_ref_verify(fs_info);
fs_info->thread_pool_size = min_t(unsigned long,
num_online_cpus() + 2, 8);
INIT_LIST_HEAD(&fs_info->ordered_roots);
spin_lock_init(&fs_info->ordered_root_lock);
+
+ fs_info->btree_inode = new_inode(sb);
+ if (!fs_info->btree_inode) {
+ err = -ENOMEM;
+ goto fail_bio_counter;
+ }
+ mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
+
fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
GFP_KERNEL);
if (!fs_info->delayed_root) {
@@ -2895,12 +2731,13 @@ int open_ctree(struct super_block *sb,
sb->s_bdi->congested_fn = btrfs_congested_fn;
sb->s_bdi->congested_data = fs_info;
sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
- sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
+ sb->s_bdi->ra_pages = VM_MAX_READAHEAD * SZ_1K / PAGE_SIZE;
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize);
+ memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE);
mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_sys_array(fs_info);
@@ -3083,6 +2920,9 @@ retry_root_backup:
if (ret)
goto fail_trans_kthread;
+ if (btrfs_build_ref_tree(fs_info))
+ btrfs_err(fs_info, "couldn't build ref tree");
+
/* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 &&
!btrfs_test_opt(fs_info, NOLOGREPLAY)) {
@@ -3948,6 +3788,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
cleanup_srcu_struct(&fs_info->subvol_srcu);
btrfs_free_stripe_hash_table(fs_info);
+ btrfs_free_ref_cache(fs_info);
__btrfs_free_block_rsv(root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
@@ -4007,7 +3848,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
buf->len,
fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
+ if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(root, buf)) {
btrfs_print_leaf(buf);
ASSERT(0);
}
@@ -4272,26 +4113,28 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
- struct btrfs_delayed_ref_node *tmp;
+ struct rb_node *n;
bool pin_bytes = false;
head = rb_entry(node, struct btrfs_delayed_ref_head,
href_node);
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
spin_lock(&delayed_refs->lock);
continue;
}
spin_lock(&head->lock);
- list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
- list) {
+ while ((n = rb_first(&head->ref_tree)) != NULL) {
+ ref = rb_entry(n, struct btrfs_delayed_ref_node,
+ ref_node);
ref->in_tree = 0;
- list_del(&ref->list);
+ rb_erase(&ref->ref_node, &head->ref_tree);
+ RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
atomic_dec(&delayed_refs->num_entries);
@@ -4304,16 +4147,16 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (head->processing == 0)
delayed_refs->num_heads_ready--;
atomic_dec(&delayed_refs->num_entries);
- head->node.in_tree = 0;
rb_erase(&head->href_node, &delayed_refs->href_root);
+ RB_CLEAR_NODE(&head->href_node);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
mutex_unlock(&head->mutex);
if (pin_bytes)
- btrfs_pin_extent(fs_info, head->node.bytenr,
- head->node.num_bytes, 1);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_pin_extent(fs_info, head->bytenr,
+ head->num_bytes, 1);
+ btrfs_put_delayed_ref_head(head);
cond_resched();
spin_lock(&delayed_refs->lock);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 24cefde30e30..7208ecef7088 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/percpu_counter.h>
+#include <linux/lockdep.h>
#include "hash.h"
#include "tree-log.h"
#include "disk-io.h"
@@ -38,6 +39,7 @@
#include "math.h"
#include "sysfs.h"
#include "qgroup.h"
+#include "ref-verify.h"
#undef SCRAMBLE_DELAYED_REFS
@@ -61,9 +63,6 @@ enum {
CHUNK_ALLOC_FORCE = 2,
};
-static int update_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 num_bytes, int alloc);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *node, u64 parent,
@@ -91,17 +90,8 @@ static int find_next_key(struct btrfs_path *path, int level,
static void dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
-static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 ram_bytes, u64 num_bytes, int delalloc);
-static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
- u64 num_bytes, int delalloc);
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
-static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- u64 orig_bytes,
- enum btrfs_reserve_flush_enum flush,
- bool system_chunk);
static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
u64 num_bytes);
@@ -652,7 +642,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->cached = BTRFS_CACHE_FAST;
spin_unlock(&cache->lock);
- if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
+ if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
mutex_lock(&caching_ctl->mutex);
ret = load_free_space_cache(fs_info, cache);
@@ -923,7 +913,7 @@ search_again:
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -934,7 +924,7 @@ search_again:
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
goto search_again;
}
spin_lock(&head->lock);
@@ -943,7 +933,7 @@ search_again:
else
BUG_ON(num_refs == 0);
- num_refs += head->node.ref_mod;
+ num_refs += head->ref_mod;
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
}
@@ -2189,16 +2179,20 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
/* Can return -ENOMEM */
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
+ struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int old_ref_mod, new_ref_mod;
int ret;
BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
root_objectid == BTRFS_TREE_LOG_OBJECTID);
+ btrfs_ref_tree_mod(root, bytenr, num_bytes, parent, root_objectid,
+ owner, offset, BTRFS_ADD_DELAYED_REF);
+
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
num_bytes, parent,
@@ -2344,7 +2338,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_key key;
@@ -2366,14 +2360,14 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- key.objectid = node->bytenr;
+ key.objectid = head->bytenr;
if (metadata) {
key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = extent_op->level;
} else {
key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = node->num_bytes;
+ key.offset = head->num_bytes;
}
again:
@@ -2390,17 +2384,17 @@ again:
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
- if (key.objectid == node->bytenr &&
+ if (key.objectid == head->bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
- key.offset == node->num_bytes)
+ key.offset == head->num_bytes)
ret = 0;
}
if (ret > 0) {
btrfs_release_path(path);
metadata = 0;
- key.objectid = node->bytenr;
- key.offset = node->num_bytes;
+ key.objectid = head->bytenr;
+ key.offset = head->num_bytes;
key.type = BTRFS_EXTENT_ITEM_KEY;
goto again;
}
@@ -2507,44 +2501,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
return 0;
}
- if (btrfs_delayed_ref_is_head(node)) {
- struct btrfs_delayed_ref_head *head;
- /*
- * we've hit the end of the chain and we were supposed
- * to insert this extent into the tree. But, it got
- * deleted before we ever needed to insert it, so all
- * we have to do is clean up the accounting
- */
- BUG_ON(extent_op);
- head = btrfs_delayed_node_to_head(node);
- trace_run_delayed_ref_head(fs_info, node, head, node->action);
-
- if (head->total_ref_mod < 0) {
- struct btrfs_block_group_cache *cache;
-
- cache = btrfs_lookup_block_group(fs_info, node->bytenr);
- ASSERT(cache);
- percpu_counter_add(&cache->space_info->total_bytes_pinned,
- -node->num_bytes);
- btrfs_put_block_group(cache);
- }
-
- if (insert_reserved) {
- btrfs_pin_extent(fs_info, node->bytenr,
- node->num_bytes, 1);
- if (head->is_data) {
- ret = btrfs_del_csums(trans, fs_info,
- node->bytenr,
- node->num_bytes);
- }
- }
-
- /* Also free its reserved qgroup space */
- btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
- head->qgroup_reserved);
- return ret;
- }
-
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
@@ -2563,7 +2519,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
- if (list_empty(&head->ref_list))
+ if (RB_EMPTY_ROOT(&head->ref_tree))
return NULL;
/*
@@ -2576,12 +2532,114 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
return list_first_entry(&head->ref_add_list,
struct btrfs_delayed_ref_node, add_list);
- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
- list);
+ ref = rb_entry(rb_first(&head->ref_tree),
+ struct btrfs_delayed_ref_node, ref_node);
ASSERT(list_empty(&ref->add_list));
return ref;
}
+static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
+{
+ spin_lock(&delayed_refs->lock);
+ head->processing = 0;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(head);
+}
+
+static int cleanup_extent_op(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_extent_op *extent_op = head->extent_op;
+ int ret;
+
+ if (!extent_op)
+ return 0;
+ head->extent_op = NULL;
+ if (head->must_insert_reserved) {
+ btrfs_free_delayed_extent_op(extent_op);
+ return 0;
+ }
+ spin_unlock(&head->lock);
+ ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
+ return ret ? ret : 1;
+}
+
+static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_ref_root *delayed_refs;
+ int ret;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+ ret = cleanup_extent_op(trans, fs_info, head);
+ if (ret < 0) {
+ unselect_delayed_ref_head(delayed_refs, head);
+ btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
+ return ret;
+ } else if (ret) {
+ return ret;
+ }
+
+ /*
+ * Need to drop our head ref lock and re-acquire the delayed ref lock
+ * and then re-check to make sure nobody got added.
+ */
+ spin_unlock(&head->lock);
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+ if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ return 1;
+ }
+ delayed_refs->num_heads--;
+ rb_erase(&head->href_node, &delayed_refs->href_root);
+ RB_CLEAR_NODE(&head->href_node);
+ spin_unlock(&delayed_refs->lock);
+ spin_unlock(&head->lock);
+ atomic_dec(&delayed_refs->num_entries);
+
+ trace_run_delayed_ref_head(fs_info, head, 0);
+
+ if (head->total_ref_mod < 0) {
+ struct btrfs_block_group_cache *cache;
+
+ cache = btrfs_lookup_block_group(fs_info, head->bytenr);
+ ASSERT(cache);
+ percpu_counter_add(&cache->space_info->total_bytes_pinned,
+ -head->num_bytes);
+ btrfs_put_block_group(cache);
+
+ if (head->is_data) {
+ spin_lock(&delayed_refs->lock);
+ delayed_refs->pending_csums -= head->num_bytes;
+ spin_unlock(&delayed_refs->lock);
+ }
+ }
+
+ if (head->must_insert_reserved) {
+ btrfs_pin_extent(fs_info, head->bytenr,
+ head->num_bytes, 1);
+ if (head->is_data) {
+ ret = btrfs_del_csums(trans, fs_info, head->bytenr,
+ head->num_bytes);
+ }
+ }
+
+ /* Also free its reserved qgroup space */
+ btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
+ head->qgroup_reserved);
+ btrfs_delayed_ref_unlock(head);
+ btrfs_put_delayed_ref_head(head);
+ return 0;
+}
+
/*
* Returns 0 on success or if called with an already aborted transaction.
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
@@ -2655,11 +2713,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
- spin_lock(&delayed_refs->lock);
- locked_ref->processing = 0;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_delayed_ref_unlock(locked_ref);
+ unselect_delayed_ref_head(delayed_refs, locked_ref);
locked_ref = NULL;
cond_resched();
count++;
@@ -2667,102 +2721,55 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
/*
- * record the must insert reserved flag before we
- * drop the spin lock.
+ * We're done processing refs in this ref_head, clean everything
+ * up and move on to the next ref_head.
*/
- must_insert_reserved = locked_ref->must_insert_reserved;
- locked_ref->must_insert_reserved = 0;
-
- extent_op = locked_ref->extent_op;
- locked_ref->extent_op = NULL;
-
if (!ref) {
-
-
- /* All delayed refs have been processed, Go ahead
- * and send the head node to run_one_delayed_ref,
- * so that any accounting fixes can happen
- */
- ref = &locked_ref->node;
-
- if (extent_op && must_insert_reserved) {
- btrfs_free_delayed_extent_op(extent_op);
- extent_op = NULL;
- }
-
- if (extent_op) {
- spin_unlock(&locked_ref->lock);
- ret = run_delayed_extent_op(trans, fs_info,
- ref, extent_op);
- btrfs_free_delayed_extent_op(extent_op);
-
- if (ret) {
- /*
- * Need to reset must_insert_reserved if
- * there was an error so the abort stuff
- * can cleanup the reserved space
- * properly.
- */
- if (must_insert_reserved)
- locked_ref->must_insert_reserved = 1;
- spin_lock(&delayed_refs->lock);
- locked_ref->processing = 0;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_debug(fs_info,
- "run_delayed_extent_op returned %d",
- ret);
- btrfs_delayed_ref_unlock(locked_ref);
- return ret;
- }
+ ret = cleanup_ref_head(trans, fs_info, locked_ref);
+ if (ret > 0 ) {
+ /* We dropped our lock, we need to loop. */
+ ret = 0;
continue;
+ } else if (ret) {
+ return ret;
}
+ locked_ref = NULL;
+ count++;
+ continue;
+ }
- /*
- * Need to drop our head ref lock and re-acquire the
- * delayed ref lock and then re-check to make sure
- * nobody got added.
- */
- spin_unlock(&locked_ref->lock);
- spin_lock(&delayed_refs->lock);
- spin_lock(&locked_ref->lock);
- if (!list_empty(&locked_ref->ref_list) ||
- locked_ref->extent_op) {
- spin_unlock(&locked_ref->lock);
- spin_unlock(&delayed_refs->lock);
- continue;
- }
- ref->in_tree = 0;
- delayed_refs->num_heads--;
- rb_erase(&locked_ref->href_node,
- &delayed_refs->href_root);
- spin_unlock(&delayed_refs->lock);
- } else {
- actual_count++;
- ref->in_tree = 0;
- list_del(&ref->list);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
+ actual_count++;
+ ref->in_tree = 0;
+ rb_erase(&ref->ref_node, &locked_ref->ref_tree);
+ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ /*
+ * When we play the delayed ref, also correct the ref_mod on
+ * head
+ */
+ switch (ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ locked_ref->ref_mod -= ref->ref_mod;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ locked_ref->ref_mod += ref->ref_mod;
+ break;
+ default:
+ WARN_ON(1);
}
atomic_dec(&delayed_refs->num_entries);
- if (!btrfs_delayed_ref_is_head(ref)) {
- /*
- * when we play the delayed ref, also correct the
- * ref_mod on head
- */
- switch (ref->action) {
- case BTRFS_ADD_DELAYED_REF:
- case BTRFS_ADD_DELAYED_EXTENT:
- locked_ref->node.ref_mod -= ref->ref_mod;
- break;
- case BTRFS_DROP_DELAYED_REF:
- locked_ref->node.ref_mod += ref->ref_mod;
- break;
- default:
- WARN_ON(1);
- }
- }
+ /*
+ * Record the must-insert_reserved flag before we drop the spin
+ * lock.
+ */
+ must_insert_reserved = locked_ref->must_insert_reserved;
+ locked_ref->must_insert_reserved = 0;
+
+ extent_op = locked_ref->extent_op;
+ locked_ref->extent_op = NULL;
spin_unlock(&locked_ref->lock);
ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
@@ -2770,33 +2777,13 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- spin_lock(&delayed_refs->lock);
- locked_ref->processing = 0;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_delayed_ref_unlock(locked_ref);
+ unselect_delayed_ref_head(delayed_refs, locked_ref);
btrfs_put_delayed_ref(ref);
btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
ret);
return ret;
}
- /*
- * If this node is a head, that means all the refs in this head
- * have been dealt with, and we will pick the next head to deal
- * with, so we must unlock the head and drop it from the cluster
- * list before we release it.
- */
- if (btrfs_delayed_ref_is_head(ref)) {
- if (locked_ref->is_data &&
- locked_ref->total_ref_mod < 0) {
- spin_lock(&delayed_refs->lock);
- delayed_refs->pending_csums -= ref->num_bytes;
- spin_unlock(&delayed_refs->lock);
- }
- btrfs_delayed_ref_unlock(locked_ref);
- locked_ref = NULL;
- }
btrfs_put_delayed_ref(ref);
count++;
cond_resched();
@@ -3100,33 +3087,16 @@ again:
spin_unlock(&delayed_refs->lock);
goto out;
}
+ head = rb_entry(node, struct btrfs_delayed_ref_head,
+ href_node);
+ refcount_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
- while (node) {
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
- if (btrfs_delayed_ref_is_head(&head->node)) {
- struct btrfs_delayed_ref_node *ref;
-
- ref = &head->node;
- refcount_inc(&ref->refs);
-
- spin_unlock(&delayed_refs->lock);
- /*
- * Mutex was contended, block until it's
- * released and try again
- */
- mutex_lock(&head->mutex);
- mutex_unlock(&head->mutex);
+ /* Mutex was contended, block until it's released and retry. */
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(ref);
- cond_resched();
- goto again;
- } else {
- WARN_ON(1);
- }
- node = rb_next(node);
- }
- spin_unlock(&delayed_refs->lock);
+ btrfs_put_delayed_ref_head(head);
cond_resched();
goto again;
}
@@ -3169,6 +3139,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
struct btrfs_delayed_data_ref *data_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_transaction *cur_trans;
+ struct rb_node *node;
int ret = 0;
cur_trans = root->fs_info->running_transaction;
@@ -3184,7 +3155,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
}
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -3195,13 +3166,18 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
spin_lock(&head->lock);
- list_for_each_entry(ref, &head->ref_list, list) {
+ /*
+ * XXX: We should replace this with a proper search function in the
+ * future.
+ */
+ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
ret = 1;
@@ -3351,7 +3327,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
int level;
int ret = 0;
int (*process_func)(struct btrfs_trans_handle *,
- struct btrfs_fs_info *,
+ struct btrfs_root *,
u64, u64, u64, u64, u64, u64);
@@ -3391,7 +3367,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
- ret = process_func(trans, fs_info, bytenr, num_bytes,
+ ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, key.objectid,
key.offset);
if (ret)
@@ -3399,7 +3375,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
} else {
bytenr = btrfs_node_blockptr(buf, i);
num_bytes = fs_info->nodesize;
- ret = process_func(trans, fs_info, bytenr, num_bytes,
+ ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, level - 1, 0);
if (ret)
goto fail;
@@ -4836,7 +4812,6 @@ static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
u64 orig, bool wait_ordered)
{
- struct btrfs_block_rsv *block_rsv;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
u64 delalloc_bytes;
@@ -4852,8 +4827,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, u64 to_reclaim,
to_reclaim = items * EXTENT_SIZE_PER_ITEM;
trans = (struct btrfs_trans_handle *)current->journal_info;
- block_rsv = &fs_info->delalloc_block_rsv;
- space_info = block_rsv->space_info;
+ space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
delalloc_bytes = percpu_counter_sum_positive(
&fs_info->delalloc_bytes);
@@ -4912,6 +4886,13 @@ skip_async:
}
}
+struct reserve_ticket {
+ u64 bytes;
+ int error;
+ struct list_head list;
+ wait_queue_head_t wait;
+};
+
/**
* maybe_commit_transaction - possibly commit the transaction if its ok to
* @root - the root we're allocating for
@@ -4923,18 +4904,29 @@ skip_async:
* will return -ENOSPC.
*/
static int may_commit_transaction(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info,
- u64 bytes, int force)
+ struct btrfs_space_info *space_info)
{
+ struct reserve_ticket *ticket = NULL;
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
struct btrfs_trans_handle *trans;
+ u64 bytes;
trans = (struct btrfs_trans_handle *)current->journal_info;
if (trans)
return -EAGAIN;
- if (force)
- goto commit;
+ spin_lock(&space_info->lock);
+ if (!list_empty(&space_info->priority_tickets))
+ ticket = list_first_entry(&space_info->priority_tickets,
+ struct reserve_ticket, list);
+ else if (!list_empty(&space_info->tickets))
+ ticket = list_first_entry(&space_info->tickets,
+ struct reserve_ticket, list);
+ bytes = (ticket) ? ticket->bytes : 0;
+ spin_unlock(&space_info->lock);
+
+ if (!bytes)
+ return 0;
/* See if there is enough pinned space to make this reservation */
if (percpu_counter_compare(&space_info->total_bytes_pinned,
@@ -4949,8 +4941,12 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
return -ENOSPC;
spin_lock(&delayed_rsv->lock);
+ if (delayed_rsv->size > bytes)
+ bytes = 0;
+ else
+ bytes -= delayed_rsv->size;
if (percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes - delayed_rsv->size) < 0) {
+ bytes) < 0) {
spin_unlock(&delayed_rsv->lock);
return -ENOSPC;
}
@@ -4964,13 +4960,6 @@ commit:
return btrfs_commit_transaction(trans);
}
-struct reserve_ticket {
- u64 bytes;
- int error;
- struct list_head list;
- wait_queue_head_t wait;
-};
-
/*
* Try to flush some data based on policy set by @state. This is only advisory
* and may fail for various reasons. The caller is supposed to examine the
@@ -5020,8 +5009,7 @@ static void flush_space(struct btrfs_fs_info *fs_info,
ret = 0;
break;
case COMMIT_TRANS:
- ret = may_commit_transaction(fs_info, space_info,
- num_bytes, 0);
+ ret = may_commit_transaction(fs_info, space_info);
break;
default:
ret = -ENOSPC;
@@ -5575,11 +5563,12 @@ again:
}
}
-static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
+static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
struct btrfs_block_rsv *dest, u64 num_bytes)
{
struct btrfs_space_info *space_info = block_rsv->space_info;
+ u64 ret;
spin_lock(&block_rsv->lock);
if (num_bytes == (u64)-1)
@@ -5594,6 +5583,7 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
}
spin_unlock(&block_rsv->lock);
+ ret = num_bytes;
if (num_bytes > 0) {
if (dest) {
spin_lock(&dest->lock);
@@ -5613,6 +5603,7 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
space_info_add_old_bytes(fs_info, space_info,
num_bytes);
}
+ return ret;
}
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
@@ -5636,6 +5627,15 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
rsv->type = type;
}
+void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *rsv,
+ unsigned short type)
+{
+ btrfs_init_block_rsv(rsv, type);
+ rsv->space_info = __find_space_info(fs_info,
+ BTRFS_BLOCK_GROUP_METADATA);
+}
+
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type)
{
@@ -5645,9 +5645,7 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
if (!block_rsv)
return NULL;
- btrfs_init_block_rsv(block_rsv, type);
- block_rsv->space_info = __find_space_info(fs_info,
- BTRFS_BLOCK_GROUP_METADATA);
+ btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
return block_rsv;
}
@@ -5730,6 +5728,66 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
+/**
+ * btrfs_inode_rsv_refill - refill the inode block rsv.
+ * @inode - the inode we are refilling.
+ * @flush - the flusing restriction.
+ *
+ * Essentially the same as btrfs_block_rsv_refill, except it uses the
+ * block_rsv->size as the minimum size. We'll either refill the missing amount
+ * or return if we already have enough space. This will also handle the resreve
+ * tracepoint for the reserved amount.
+ */
+int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
+ enum btrfs_reserve_flush_enum flush)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
+ u64 num_bytes = 0;
+ int ret = -ENOSPC;
+
+ spin_lock(&block_rsv->lock);
+ if (block_rsv->reserved < block_rsv->size)
+ num_bytes = block_rsv->size - block_rsv->reserved;
+ spin_unlock(&block_rsv->lock);
+
+ if (num_bytes == 0)
+ return 0;
+
+ ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+ if (!ret) {
+ block_rsv_add_bytes(block_rsv, num_bytes, 0);
+ trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ btrfs_ino(inode), num_bytes, 1);
+ }
+ return ret;
+}
+
+/**
+ * btrfs_inode_rsv_release - release any excessive reservation.
+ * @inode - the inode we need to release from.
+ *
+ * This is the same as btrfs_block_rsv_release, except that it handles the
+ * tracepoint for the reservation.
+ */
+void btrfs_inode_rsv_release(struct btrfs_inode *inode)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
+ u64 released = 0;
+
+ /*
+ * Since we statically set the block_rsv->size we just want to say we
+ * are releasing 0 bytes, and then we'll just get the reservation over
+ * the size free'd.
+ */
+ released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0);
+ if (released > 0)
+ trace_btrfs_space_reservation(fs_info, "delalloc",
+ btrfs_ino(inode), released, 0);
+}
+
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
@@ -5801,7 +5859,6 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
fs_info->global_block_rsv.space_info = space_info;
- fs_info->delalloc_block_rsv.space_info = space_info;
fs_info->trans_block_rsv.space_info = space_info;
fs_info->empty_block_rsv.space_info = space_info;
fs_info->delayed_block_rsv.space_info = space_info;
@@ -5821,8 +5878,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
{
block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
(u64)-1);
- WARN_ON(fs_info->delalloc_block_rsv.size > 0);
- WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
WARN_ON(fs_info->trans_block_rsv.size > 0);
WARN_ON(fs_info->trans_block_rsv.reserved > 0);
WARN_ON(fs_info->chunk_block_rsv.size > 0);
@@ -5834,12 +5889,15 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
- if (!trans->block_rsv)
+ if (!trans->block_rsv) {
+ ASSERT(!trans->bytes_reserved);
return;
+ }
if (!trans->bytes_reserved)
return;
+ ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
trace_btrfs_space_reservation(fs_info, "transaction",
trans->transid, trans->bytes_reserved, 0);
btrfs_block_rsv_release(fs_info, trans->block_rsv,
@@ -5961,104 +6019,37 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
}
-/**
- * drop_outstanding_extent - drop an outstanding extent
- * @inode: the inode we're dropping the extent for
- * @num_bytes: the number of bytes we're releasing.
- *
- * This is called when we are freeing up an outstanding extent, either called
- * after an error or after an extent is written. This will return the number of
- * reserved extents that need to be freed. This must be called with
- * BTRFS_I(inode)->lock held.
- */
-static unsigned drop_outstanding_extent(struct btrfs_inode *inode,
- u64 num_bytes)
-{
- unsigned drop_inode_space = 0;
- unsigned dropped_extents = 0;
- unsigned num_extents;
-
- num_extents = count_max_extents(num_bytes);
- ASSERT(num_extents);
- ASSERT(inode->outstanding_extents >= num_extents);
- inode->outstanding_extents -= num_extents;
-
- if (inode->outstanding_extents == 0 &&
- test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &inode->runtime_flags))
- drop_inode_space = 1;
-
- /*
- * If we have more or the same amount of outstanding extents than we have
- * reserved then we need to leave the reserved extents count alone.
- */
- if (inode->outstanding_extents >= inode->reserved_extents)
- return drop_inode_space;
-
- dropped_extents = inode->reserved_extents - inode->outstanding_extents;
- inode->reserved_extents -= dropped_extents;
- return dropped_extents + drop_inode_space;
-}
-
-/**
- * calc_csum_metadata_size - return the amount of metadata space that must be
- * reserved/freed for the given bytes.
- * @inode: the inode we're manipulating
- * @num_bytes: the number of bytes in question
- * @reserve: 1 if we are reserving space, 0 if we are freeing space
- *
- * This adjusts the number of csum_bytes in the inode and then returns the
- * correct amount of metadata that must either be reserved or freed. We
- * calculate how many checksums we can fit into one leaf and then divide the
- * number of bytes that will need to be checksumed by this value to figure out
- * how many checksums will be required. If we are adding bytes then the number
- * may go up and we will return the number of additional bytes that must be
- * reserved. If it is going down we will return the number of bytes that must
- * be freed.
- *
- * This must be called with BTRFS_I(inode)->lock held.
- */
-static u64 calc_csum_metadata_size(struct btrfs_inode *inode, u64 num_bytes,
- int reserve)
+static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
+ struct btrfs_inode *inode)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
- u64 old_csums, num_csums;
-
- if (inode->flags & BTRFS_INODE_NODATASUM && inode->csum_bytes == 0)
- return 0;
-
- old_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
- if (reserve)
- inode->csum_bytes += num_bytes;
- else
- inode->csum_bytes -= num_bytes;
- num_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes);
-
- /* No change, no need to reserve more */
- if (old_csums == num_csums)
- return 0;
+ struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
+ u64 reserve_size = 0;
+ u64 csum_leaves;
+ unsigned outstanding_extents;
- if (reserve)
- return btrfs_calc_trans_metadata_size(fs_info,
- num_csums - old_csums);
+ lockdep_assert_held(&inode->lock);
+ outstanding_extents = inode->outstanding_extents;
+ if (outstanding_extents)
+ reserve_size = btrfs_calc_trans_metadata_size(fs_info,
+ outstanding_extents + 1);
+ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info,
+ inode->csum_bytes);
+ reserve_size += btrfs_calc_trans_metadata_size(fs_info,
+ csum_leaves);
- return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums);
+ spin_lock(&block_rsv->lock);
+ block_rsv->size = reserve_size;
+ spin_unlock(&block_rsv->lock);
}
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
struct btrfs_root *root = inode->root;
- struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv;
- u64 to_reserve = 0;
- u64 csum_bytes;
unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
bool delalloc_lock = true;
- u64 to_free = 0;
- unsigned dropped;
- bool release_extra = false;
/* If we are a free space inode we need to not flush since we will be in
* the middle of a transaction commit. We also don't need the delalloc
@@ -6084,19 +6075,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
+ /* Add our new extents and calculate the new rsv size. */
spin_lock(&inode->lock);
nr_extents = count_max_extents(num_bytes);
- inode->outstanding_extents += nr_extents;
-
- nr_extents = 0;
- if (inode->outstanding_extents > inode->reserved_extents)
- nr_extents += inode->outstanding_extents -
- inode->reserved_extents;
-
- /* We always want to reserve a slot for updating the inode. */
- to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1);
- to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
- csum_bytes = inode->csum_bytes;
+ btrfs_mod_outstanding_extents(inode, nr_extents);
+ inode->csum_bytes += num_bytes;
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
@@ -6106,92 +6090,26 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
goto out_fail;
}
- ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
+ ret = btrfs_inode_rsv_refill(inode, flush);
if (unlikely(ret)) {
btrfs_qgroup_free_meta(root,
nr_extents * fs_info->nodesize);
goto out_fail;
}
- spin_lock(&inode->lock);
- if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &inode->runtime_flags)) {
- to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1);
- release_extra = true;
- }
- inode->reserved_extents += nr_extents;
- spin_unlock(&inode->lock);
-
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
-
- if (to_reserve)
- trace_btrfs_space_reservation(fs_info, "delalloc",
- btrfs_ino(inode), to_reserve, 1);
- if (release_extra)
- btrfs_block_rsv_release(fs_info, block_rsv,
- btrfs_calc_trans_metadata_size(fs_info, 1));
return 0;
out_fail:
spin_lock(&inode->lock);
- dropped = drop_outstanding_extent(inode, num_bytes);
- /*
- * If the inodes csum_bytes is the same as the original
- * csum_bytes then we know we haven't raced with any free()ers
- * so we can just reduce our inodes csum bytes and carry on.
- */
- if (inode->csum_bytes == csum_bytes) {
- calc_csum_metadata_size(inode, num_bytes, 0);
- } else {
- u64 orig_csum_bytes = inode->csum_bytes;
- u64 bytes;
-
- /*
- * This is tricky, but first we need to figure out how much we
- * freed from any free-ers that occurred during this
- * reservation, so we reset ->csum_bytes to the csum_bytes
- * before we dropped our lock, and then call the free for the
- * number of bytes that were freed while we were trying our
- * reservation.
- */
- bytes = csum_bytes - inode->csum_bytes;
- inode->csum_bytes = csum_bytes;
- to_free = calc_csum_metadata_size(inode, bytes, 0);
-
-
- /*
- * Now we need to see how much we would have freed had we not
- * been making this reservation and our ->csum_bytes were not
- * artificially inflated.
- */
- inode->csum_bytes = csum_bytes - num_bytes;
- bytes = csum_bytes - orig_csum_bytes;
- bytes = calc_csum_metadata_size(inode, bytes, 0);
-
- /*
- * Now reset ->csum_bytes to what it should be. If bytes is
- * more than to_free then we would have freed more space had we
- * not had an artificially high ->csum_bytes, so we need to free
- * the remainder. If bytes is the same or less then we don't
- * need to do anything, the other free-ers did the correct
- * thing.
- */
- inode->csum_bytes = orig_csum_bytes - num_bytes;
- if (bytes > to_free)
- to_free = bytes - to_free;
- else
- to_free = 0;
- }
+ nr_extents = count_max_extents(num_bytes);
+ btrfs_mod_outstanding_extents(inode, -nr_extents);
+ inode->csum_bytes -= num_bytes;
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
- if (dropped)
- to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
- if (to_free) {
- btrfs_block_rsv_release(fs_info, block_rsv, to_free);
- trace_btrfs_space_reservation(fs_info, "delalloc",
- btrfs_ino(inode), to_free, 0);
- }
+ btrfs_inode_rsv_release(inode);
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return ret;
@@ -6199,36 +6117,55 @@ out_fail:
/**
* btrfs_delalloc_release_metadata - release a metadata reservation for an inode
- * @inode: the inode to release the reservation for
- * @num_bytes: the number of bytes we're releasing
+ * @inode: the inode to release the reservation for.
+ * @num_bytes: the number of bytes we are releasing.
*
* This will release the metadata reservation for an inode. This can be called
* once we complete IO for a given set of bytes to release their metadata
- * reservations.
+ * reservations, or on error for the same reason.
*/
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
- u64 to_free = 0;
- unsigned dropped;
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
spin_lock(&inode->lock);
- dropped = drop_outstanding_extent(inode, num_bytes);
-
- if (num_bytes)
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ inode->csum_bytes -= num_bytes;
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
spin_unlock(&inode->lock);
- if (dropped > 0)
- to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
if (btrfs_is_testing(fs_info))
return;
- trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode),
- to_free, 0);
+ btrfs_inode_rsv_release(inode);
+}
+
+/**
+ * btrfs_delalloc_release_extents - release our outstanding_extents
+ * @inode: the inode to balance the reservation for.
+ * @num_bytes: the number of bytes we originally reserved with
+ *
+ * When we reserve space we increase outstanding_extents for the extents we may
+ * add. Once we've set the range as delalloc or created our ordered extents we
+ * have outstanding_extents to track the real usage, so we use this to free our
+ * temporarily tracked outstanding_extents. This _must_ be used in conjunction
+ * with btrfs_delalloc_reserve_metadata.
+ */
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ unsigned num_extents;
+
+ spin_lock(&inode->lock);
+ num_extents = count_max_extents(num_bytes);
+ btrfs_mod_outstanding_extents(inode, -num_extents);
+ btrfs_calculate_inode_block_rsv_size(fs_info, inode);
+ spin_unlock(&inode->lock);
+
+ if (btrfs_is_testing(fs_info))
+ return;
- btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free);
+ btrfs_inode_rsv_release(inode);
}
/**
@@ -6275,10 +6212,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode,
* @inode: inode we're releasing space for
* @start: start position of the space already reserved
* @len: the len of the space already reserved
- *
- * This must be matched with a call to btrfs_delalloc_reserve_space. This is
- * called in the case that we don't need the metadata AND data reservations
- * anymore. So if there is an error or we insert an inline extent.
+ * @release_bytes: the len of the space we consumed or didn't use
*
* This function will release the metadata space that was not used and will
* decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
@@ -6286,7 +6220,8 @@ int btrfs_delalloc_reserve_space(struct inode *inode,
* Also it will handle the qgroup reserved space.
*/
void btrfs_delalloc_release_space(struct inode *inode,
- struct extent_changeset *reserved, u64 start, u64 len)
+ struct extent_changeset *reserved,
+ u64 start, u64 len)
{
btrfs_delalloc_release_metadata(BTRFS_I(inode), len);
btrfs_free_reserved_data_space(inode, reserved, start, len);
@@ -6944,7 +6879,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
BUG_ON(!is_data && refs_to_drop != 1);
if (is_data)
- skinny_metadata = 0;
+ skinny_metadata = false;
ret = lookup_extent_backref(trans, info, path, &iref,
bytenr, num_bytes, parent,
@@ -7199,7 +7134,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
goto out_delayed_unlock;
spin_lock(&head->lock);
- if (!list_empty(&head->ref_list))
+ if (!RB_EMPTY_ROOT(&head->ref_tree))
goto out;
if (head->extent_op) {
@@ -7220,9 +7155,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
* at this point we have a head with no other entries. Go
* ahead and process it.
*/
- head->node.in_tree = 0;
rb_erase(&head->href_node, &delayed_refs->href_root);
-
+ RB_CLEAR_NODE(&head->href_node);
atomic_dec(&delayed_refs->num_entries);
/*
@@ -7241,7 +7175,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
ret = 1;
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return ret;
out:
spin_unlock(&head->lock);
@@ -7263,6 +7197,10 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
int old_ref_mod, new_ref_mod;
+ btrfs_ref_tree_mod(root, buf->start, buf->len, parent,
+ root->root_key.objectid,
+ btrfs_header_level(buf), 0,
+ BTRFS_DROP_DELAYED_REF);
ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
buf->len, parent,
root->root_key.objectid,
@@ -7315,16 +7253,21 @@ out:
/* Can return -ENOMEM */
int btrfs_free_extent(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
+ struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
int old_ref_mod, new_ref_mod;
int ret;
if (btrfs_is_testing(fs_info))
return 0;
+ if (root_objectid != BTRFS_TREE_LOG_OBJECTID)
+ btrfs_ref_tree_mod(root, bytenr, num_bytes, parent,
+ root_objectid, owner, offset,
+ BTRFS_DROP_DELAYED_REF);
/*
* tree log blocks never actually go into the extent allocation
@@ -8292,17 +8235,22 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
}
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
- u64 root_objectid, u64 owner,
+ struct btrfs_root *root, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
- BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
+ BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+
+ btrfs_ref_tree_mod(root, ins->objectid, ins->offset, 0,
+ root->root_key.objectid, owner, offset,
+ BTRFS_ADD_DELAYED_EXTENT);
ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
- ins->offset, 0, root_objectid, owner,
+ ins->offset, 0,
+ root->root_key.objectid, owner,
offset, ram_bytes,
BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
return ret;
@@ -8524,6 +8472,9 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
extent_op->is_data = false;
extent_op->level = level;
+ btrfs_ref_tree_mod(root, ins.objectid, ins.offset, parent,
+ root_objectid, level, 0,
+ BTRFS_ADD_DELAYED_EXTENT);
ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
ins.offset, parent,
root_objectid, level,
@@ -8880,7 +8831,7 @@ skip:
ret);
}
}
- ret = btrfs_free_extent(trans, fs_info, bytenr, blocksize,
+ ret = btrfs_free_extent(trans, root, bytenr, blocksize,
parent, root->root_key.objectid,
level - 1, 0);
if (ret)
@@ -9297,7 +9248,7 @@ out:
* don't have it in the radix (like when we recover after a power fail
* or unmount) so we don't leak memory.
*/
- if (!for_reloc && root_dropped == false)
+ if (!for_reloc && !root_dropped)
btrfs_add_dead_root(root);
if (err && err != -EAGAIN)
btrfs_handle_fs_error(fs_info, err, NULL);
@@ -9954,9 +9905,9 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
return 0;
}
-static void __link_block_group(struct btrfs_space_info *space_info,
- struct btrfs_block_group_cache *cache)
+static void link_block_group(struct btrfs_block_group_cache *cache)
{
+ struct btrfs_space_info *space_info = cache->space_info;
int index = get_block_group_index(cache);
bool first = false;
@@ -10164,7 +10115,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
cache->space_info = space_info;
- __link_block_group(space_info, cache);
+ link_block_group(cache);
set_avail_alloc_bits(info, cache->flags);
if (btrfs_chunk_readonly(info, cache->key.objectid)) {
@@ -10323,7 +10274,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->bytes_super, &cache->space_info);
update_global_block_rsv(fs_info);
- __link_block_group(cache->space_info, cache);
+ link_block_group(cache);
list_add_tail(&cache->bg_list, &trans->new_bgs);
@@ -10373,6 +10324,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* remove it.
*/
free_excluded_extents(fs_info, block_group);
+ btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
+ block_group->key.offset);
memcpy(&key, &block_group->key, sizeof(key));
index = get_block_group_index(block_group);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7fa50e12f18e..16045ea86fc1 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -110,7 +110,6 @@ struct extent_page_data {
struct bio *bio;
struct extent_io_tree *tree;
get_extent_t *get_extent;
- unsigned long bio_flags;
/* tells writepage not to lock the state bits for this range
* it still does the unlocking
@@ -2762,8 +2761,8 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page,
*/
static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
struct writeback_control *wbc,
- struct page *page, sector_t sector,
- size_t size, unsigned long offset,
+ struct page *page, u64 offset,
+ size_t size, unsigned long pg_offset,
struct block_device *bdev,
struct bio **bio_ret,
bio_end_io_t end_io_func,
@@ -2777,6 +2776,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
int contig = 0;
int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
size_t page_size = min_t(size_t, size, PAGE_SIZE);
+ sector_t sector = offset >> 9;
if (bio_ret && *bio_ret) {
bio = *bio_ret;
@@ -2787,8 +2787,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
if (prev_bio_flags != bio_flags || !contig ||
force_bio_submit ||
- merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
- bio_add_page(bio, page, page_size, offset) < page_size) {
+ merge_bio(tree, page, pg_offset, page_size, bio, bio_flags) ||
+ bio_add_page(bio, page, page_size, pg_offset) < page_size) {
ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
if (ret < 0) {
*bio_ret = NULL;
@@ -2802,8 +2802,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
}
}
- bio = btrfs_bio_alloc(bdev, (u64)sector << 9);
- bio_add_page(bio, page, page_size, offset);
+ bio = btrfs_bio_alloc(bdev, offset);
+ bio_add_page(bio, page, page_size, pg_offset);
bio->bi_end_io = end_io_func;
bio->bi_private = tree;
bio->bi_write_hint = page->mapping->host->i_write_hint;
@@ -2893,7 +2893,6 @@ static int __do_readpage(struct extent_io_tree *tree,
u64 last_byte = i_size_read(inode);
u64 block_start;
u64 cur_end;
- sector_t sector;
struct extent_map *em;
struct block_device *bdev;
int ret = 0;
@@ -2929,6 +2928,7 @@ static int __do_readpage(struct extent_io_tree *tree,
}
while (cur <= end) {
bool force_bio_submit = false;
+ u64 offset;
if (cur >= last_byte) {
char *userpage;
@@ -2968,9 +2968,9 @@ static int __do_readpage(struct extent_io_tree *tree,
iosize = ALIGN(iosize, blocksize);
if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
disk_io_size = em->block_len;
- sector = em->block_start >> 9;
+ offset = em->block_start;
} else {
- sector = (em->block_start + extent_offset) >> 9;
+ offset = em->block_start + extent_offset;
disk_io_size = iosize;
}
bdev = em->bdev;
@@ -3063,8 +3063,8 @@ static int __do_readpage(struct extent_io_tree *tree,
}
ret = submit_extent_page(REQ_OP_READ | read_flags, tree, NULL,
- page, sector, disk_io_size, pg_offset,
- bdev, bio,
+ page, offset, disk_io_size,
+ pg_offset, bdev, bio,
end_bio_extent_readpage, mirror_num,
*bio_flags,
this_bio_flag,
@@ -3325,7 +3325,6 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
u64 extent_offset;
u64 block_start;
u64 iosize;
- sector_t sector;
struct extent_map *em;
struct block_device *bdev;
size_t pg_offset = 0;
@@ -3368,6 +3367,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
while (cur <= end) {
u64 em_end;
+ u64 offset;
if (cur >= i_size) {
if (tree->ops && tree->ops->writepage_end_io_hook)
@@ -3389,7 +3389,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
BUG_ON(end < cur);
iosize = min(em_end - cur, end - cur + 1);
iosize = ALIGN(iosize, blocksize);
- sector = (em->block_start + extent_offset) >> 9;
+ offset = em->block_start + extent_offset;
bdev = em->bdev;
block_start = em->block_start;
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
@@ -3432,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
}
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
- page, sector, iosize, pg_offset,
+ page, offset, iosize, pg_offset,
bdev, &epd->bio,
end_bio_extent_writepage,
0, 0, 0, false);
@@ -3716,7 +3716,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
u64 offset = eb->start;
u32 nritems;
unsigned long i, num_pages;
- unsigned long bio_flags = 0;
unsigned long start, end;
unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
int ret = 0;
@@ -3724,8 +3723,6 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
num_pages = num_extent_pages(eb->start, eb->len);
atomic_set(&eb->io_pages, num_pages);
- if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
- bio_flags = EXTENT_BIO_TREE_LOG;
/* set btree blocks beyond nritems with 0 to avoid stale content. */
nritems = btrfs_header_nritems(eb);
@@ -3749,11 +3746,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, tree, wbc,
- p, offset >> 9, PAGE_SIZE, 0, bdev,
+ p, offset, PAGE_SIZE, 0, bdev,
&epd->bio,
end_bio_extent_buffer_writepage,
- 0, epd->bio_flags, bio_flags, false);
- epd->bio_flags = bio_flags;
+ 0, 0, 0, false);
if (ret) {
set_btree_ioerr(p);
if (PageWriteback(p))
@@ -3790,7 +3786,6 @@ int btree_write_cache_pages(struct address_space *mapping,
.tree = tree,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
- .bio_flags = 0,
};
int ret = 0;
int done = 0;
@@ -3802,7 +3797,7 @@ int btree_write_cache_pages(struct address_space *mapping,
int scanned = 0;
int tag;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -3819,8 +3814,8 @@ retry:
if (wbc->sync_mode == WB_SYNC_ALL)
tag_pages_for_writeback(mapping, index, end);
while (!done && !nr_to_write_done && (index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+ (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+ tag))) {
unsigned i;
scanned = 1;
@@ -3830,11 +3825,6 @@ retry:
if (!PagePrivate(page))
continue;
- if (!wbc->range_cyclic && page->index > end) {
- done = 1;
- break;
- }
-
spin_lock(&mapping->private_lock);
if (!PagePrivate(page)) {
spin_unlock(&mapping->private_lock);
@@ -3946,7 +3936,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
if (!igrab(inode))
return 0;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -3966,8 +3956,8 @@ retry:
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
+ (nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
+ &index, end, tag))) {
unsigned i;
scanned = 1;
@@ -3992,12 +3982,6 @@ retry:
continue;
}
- if (!wbc->range_cyclic && page->index > end) {
- done = 1;
- unlock_page(page);
- continue;
- }
-
if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page))
flush_fn(data);
@@ -4063,7 +4047,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
if (epd->bio) {
int ret;
- ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
+ ret = submit_one_bio(epd->bio, 0, 0);
BUG_ON(ret < 0); /* -ENOMEM */
epd->bio = NULL;
}
@@ -4086,7 +4070,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.get_extent = get_extent,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
- .bio_flags = 0,
};
ret = __extent_writepage(page, wbc, &epd);
@@ -4111,7 +4094,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
.get_extent = get_extent,
.extent_locked = 1,
.sync_io = mode == WB_SYNC_ALL,
- .bio_flags = 0,
};
struct writeback_control wbc_writepages = {
.sync_mode = mode,
@@ -4151,7 +4133,6 @@ int extent_writepages(struct extent_io_tree *tree,
.get_extent = get_extent,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
- .bio_flags = 0,
};
ret = extent_write_cache_pages(mapping, wbc, __extent_writepage, &epd,
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index e5535bbe6953..4a8861379d3e 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -34,7 +34,6 @@
* type for this bio
*/
#define EXTENT_BIO_COMPRESSED 1
-#define EXTENT_BIO_TREE_LOG 2
#define EXTENT_BIO_FLAG_SHIFT 16
/* these are bit numbers for test/set bit */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index aafcc785f840..f80254d82f40 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -856,7 +856,7 @@ next_slot:
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
- ret = btrfs_inc_extent_ref(trans, fs_info,
+ ret = btrfs_inc_extent_ref(trans, root,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
@@ -940,7 +940,7 @@ delete_extent_item:
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
- ret = btrfs_free_extent(trans, fs_info,
+ ret = btrfs_free_extent(trans, root,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
key.objectid, key.offset -
@@ -1234,7 +1234,7 @@ again:
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
+ ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@@ -1268,7 +1268,7 @@ again:
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
- ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@@ -1288,7 +1288,7 @@ again:
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
- ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@@ -1590,7 +1590,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
int ret = 0;
bool only_release_metadata = false;
bool force_page_uptodate = false;
- bool need_unlock;
nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
PAGE_SIZE / (sizeof(struct page *)));
@@ -1613,6 +1612,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t copied;
size_t dirty_sectors;
size_t num_sectors;
+ int extents_locked;
WARN_ON(num_pages > nrptrs);
@@ -1656,6 +1656,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
}
+ WARN_ON(reserve_bytes == 0);
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
reserve_bytes);
if (ret) {
@@ -1669,7 +1670,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
release_bytes = reserve_bytes;
- need_unlock = false;
again:
/*
* This is going to setup the pages array with the number of
@@ -1679,19 +1679,23 @@ again:
ret = prepare_pages(inode, pages, num_pages,
pos, write_bytes,
force_page_uptodate);
- if (ret)
+ if (ret) {
+ btrfs_delalloc_release_extents(BTRFS_I(inode),
+ reserve_bytes);
break;
+ }
- ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages,
+ extents_locked = lock_and_cleanup_extent_if_need(
+ BTRFS_I(inode), pages,
num_pages, pos, write_bytes, &lockstart,
&lockend, &cached_state);
- if (ret < 0) {
- if (ret == -EAGAIN)
+ if (extents_locked < 0) {
+ if (extents_locked == -EAGAIN)
goto again;
+ btrfs_delalloc_release_extents(BTRFS_I(inode),
+ reserve_bytes);
+ ret = extents_locked;
break;
- } else if (ret > 0) {
- need_unlock = true;
- ret = 0;
}
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
@@ -1718,23 +1722,10 @@ again:
PAGE_SIZE);
}
- /*
- * If we had a short copy we need to release the excess delaloc
- * bytes we reserved. We need to increment outstanding_extents
- * because btrfs_delalloc_release_space and
- * btrfs_delalloc_release_metadata will decrement it, but
- * we still have an outstanding extent for the chunk we actually
- * managed to copy.
- */
if (num_sectors > dirty_sectors) {
/* release everything except the sectors we dirtied */
release_bytes -= dirty_sectors <<
fs_info->sb->s_blocksize_bits;
- if (copied > 0) {
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents++;
- spin_unlock(&BTRFS_I(inode)->lock);
- }
if (only_release_metadata) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
release_bytes);
@@ -1756,10 +1747,11 @@ again:
if (copied > 0)
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
pos, copied, NULL);
- if (need_unlock)
+ if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state,
GFP_NOFS);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) {
btrfs_drop_pages(pages, num_pages);
break;
@@ -2046,7 +2038,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
int ret = 0, err;
- bool full_sync = 0;
+ bool full_sync = false;
u64 len;
/*
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 684f12247db7..fe5e0324dca9 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1286,12 +1286,8 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
{
- u64 start, end;
int ret;
- start = block_group->key.objectid;
- end = block_group->key.objectid + block_group->key.offset;
-
block_group->needs_free_space = 0;
ret = add_new_free_space_info(trans, fs_info, block_group, path);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index d02019747d00..022b19336fee 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -500,11 +500,12 @@ again:
ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
prealloc, prealloc, &alloc_hint);
if (ret) {
- btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
goto out_put;
}
ret = btrfs_write_out_ino_cache(root, trans, path, inode);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
out_put:
iput(inode);
out_release:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d94e3f68b9b1..b93fe05a39c7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -42,6 +42,7 @@
#include <linux/blkdev.h>
#include <linux/posix_acl_xattr.h>
#include <linux/uio.h>
+#include <linux/magic.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -67,7 +68,6 @@ struct btrfs_iget_args {
};
struct btrfs_dio_data {
- u64 outstanding_extents;
u64 reserve;
u64 unsubmitted_oe_range_start;
u64 unsubmitted_oe_range_end;
@@ -316,7 +316,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
btrfs_free_path(path);
return PTR_ERR(trans);
}
- trans->block_rsv = &fs_info->delalloc_block_rsv;
+ trans->block_rsv = &BTRFS_I(inode)->block_rsv;
if (compressed_size && compressed_pages)
extent_item_size = btrfs_file_extent_calc_inline_size(
@@ -348,7 +348,6 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
}
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
- btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start);
btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0);
out:
/*
@@ -458,7 +457,6 @@ static noinline void compress_file_range(struct inode *inode,
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 num_bytes;
u64 blocksize = fs_info->sectorsize;
u64 actual_end;
u64 isize = i_size_read(inode);
@@ -508,8 +506,6 @@ again:
total_compressed = min_t(unsigned long, total_compressed,
BTRFS_MAX_UNCOMPRESSED);
- num_bytes = ALIGN(end - start + 1, blocksize);
- num_bytes = max(blocksize, num_bytes);
total_in = 0;
ret = 0;
@@ -542,7 +538,10 @@ again:
*/
extent_range_clear_dirty_for_io(inode, start, end);
redirty = 1;
- ret = btrfs_compress_pages(compress_type,
+
+ /* Compression level is applied here and only here */
+ ret = btrfs_compress_pages(
+ compress_type | (fs_info->compress_level << 4),
inode->i_mapping, start,
pages,
&nr_pages,
@@ -570,7 +569,7 @@ again:
cont:
if (start == 0) {
/* lets try to make an inline extent */
- if (ret || total_in < (actual_end - start)) {
+ if (ret || total_in < actual_end) {
/* we didn't compress the entire range, try
* to make an uncompressed inline extent.
*/
@@ -584,16 +583,21 @@ cont:
}
if (ret <= 0) {
unsigned long clear_flags = EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW | EXTENT_DEFRAG;
+ EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
+ EXTENT_DO_ACCOUNTING;
unsigned long page_error_op;
- clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
/*
* inline extent creation worked or returned error,
* we don't need to create any more async work items.
* Unlock and free up our temp pages.
+ *
+ * We use DO_ACCOUNTING here because we need the
+ * delalloc_release_metadata to be done _after_ we drop
+ * our outstanding extent for clearing delalloc for this
+ * range.
*/
extent_clear_unlock_delalloc(inode, start, end, end,
NULL, clear_flags,
@@ -602,10 +606,6 @@ cont:
PAGE_SET_WRITEBACK |
page_error_op |
PAGE_END_WRITEBACK);
- if (ret == 0)
- btrfs_free_reserved_data_space_noquota(inode,
- start,
- end - start + 1);
goto free_pages_out;
}
}
@@ -625,7 +625,6 @@ cont:
*/
total_in = ALIGN(total_in, PAGE_SIZE);
if (total_compressed + blocksize <= total_in) {
- num_bytes = total_in;
*num_added += 1;
/*
@@ -633,12 +632,12 @@ cont:
* allocation on disk for these compressed pages, and
* will submit them to the elevator.
*/
- add_async_extent(async_cow, start, num_bytes,
+ add_async_extent(async_cow, start, total_in,
total_compressed, pages, nr_pages,
compress_type);
- if (start + num_bytes < end) {
- start += num_bytes;
+ if (start + total_in < end) {
+ start += total_in;
pages = NULL;
cond_resched();
goto again;
@@ -982,15 +981,19 @@ static noinline int cow_file_range(struct inode *inode,
ret = cow_file_range_inline(root, inode, start, end, 0,
BTRFS_COMPRESS_NONE, NULL);
if (ret == 0) {
+ /*
+ * We use DO_ACCOUNTING here because we need the
+ * delalloc_release_metadata to be run _after_ we drop
+ * our outstanding extent for clearing delalloc for this
+ * range.
+ */
extent_clear_unlock_delalloc(inode, start, end,
delalloc_end, NULL,
EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW |
- EXTENT_DEFRAG, PAGE_UNLOCK |
+ EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
+ EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
PAGE_END_WRITEBACK);
- btrfs_free_reserved_data_space_noquota(inode, start,
- end - start + 1);
*nr_written = *nr_written +
(end - start + PAGE_SIZE) / PAGE_SIZE;
*page_started = 1;
@@ -1226,13 +1229,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
- while (atomic_read(&fs_info->async_submit_draining) &&
- atomic_read(&fs_info->async_delalloc_pages)) {
- wait_event(fs_info->async_submit_wait,
- (atomic_read(&fs_info->async_delalloc_pages) ==
- 0));
- }
-
*nr_written += nr_pages;
start = cur_end + 1;
}
@@ -1635,7 +1631,7 @@ static void btrfs_split_extent_hook(void *private_data,
}
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents++;
+ btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
spin_unlock(&BTRFS_I(inode)->lock);
}
@@ -1665,7 +1661,7 @@ static void btrfs_merge_extent_hook(void *private_data,
/* we're not bigger than the max, unreserve the space and go */
if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents--;
+ btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
spin_unlock(&BTRFS_I(inode)->lock);
return;
}
@@ -1696,7 +1692,7 @@ static void btrfs_merge_extent_hook(void *private_data,
return;
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents--;
+ btrfs_mod_outstanding_extents(BTRFS_I(inode), -1);
spin_unlock(&BTRFS_I(inode)->lock);
}
@@ -1766,15 +1762,12 @@ static void btrfs_set_bit_hook(void *private_data,
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
+ u32 num_extents = count_max_extents(len);
bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode));
- if (*bits & EXTENT_FIRST_DELALLOC) {
- *bits &= ~EXTENT_FIRST_DELALLOC;
- } else {
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents++;
- spin_unlock(&BTRFS_I(inode)->lock);
- }
+ spin_lock(&BTRFS_I(inode)->lock);
+ btrfs_mod_outstanding_extents(BTRFS_I(inode), num_extents);
+ spin_unlock(&BTRFS_I(inode)->lock);
/* For sanity tests */
if (btrfs_is_testing(fs_info))
@@ -1828,13 +1821,9 @@ static void btrfs_clear_bit_hook(void *private_data,
struct btrfs_root *root = inode->root;
bool do_list = !btrfs_is_free_space_inode(inode);
- if (*bits & EXTENT_FIRST_DELALLOC) {
- *bits &= ~EXTENT_FIRST_DELALLOC;
- } else if (!(*bits & EXTENT_CLEAR_META_RESV)) {
- spin_lock(&inode->lock);
- inode->outstanding_extents -= num_extents;
- spin_unlock(&inode->lock);
- }
+ spin_lock(&inode->lock);
+ btrfs_mod_outstanding_extents(inode, -num_extents);
+ spin_unlock(&inode->lock);
/*
* We don't reserve metadata space for space cache inodes so we
@@ -2105,6 +2094,7 @@ again:
0);
ClearPageChecked(page);
set_page_dirty(page);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
&cached_state, GFP_NOFS);
@@ -2229,8 +2219,9 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
if (ret < 0)
goto out;
qg_released = ret;
- ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
- btrfs_ino(BTRFS_I(inode)), file_pos, qg_released, &ins);
+ ret = btrfs_alloc_reserved_file_extent(trans, root,
+ btrfs_ino(BTRFS_I(inode)),
+ file_pos, qg_released, &ins);
out:
btrfs_free_path(path);
@@ -2464,7 +2455,7 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
ret = iterate_inodes_from_logical(old->bytenr +
old->extent_offset, fs_info,
path, record_one_backref,
- old);
+ old, false);
if (ret < 0 && ret != -ENOENT)
return false;
@@ -2682,7 +2673,7 @@ again:
inode_add_bytes(inode, len);
btrfs_release_path(path);
- ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
+ ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
new->disk_len, 0,
backref->root_id, backref->inum,
new->file_pos); /* start - extent_offset */
@@ -2964,7 +2955,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans = NULL;
goto out;
}
- trans->block_rsv = &fs_info->delalloc_block_rsv;
+ trans->block_rsv = &BTRFS_I(inode)->block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
@@ -3000,7 +2991,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
- trans->block_rsv = &fs_info->delalloc_block_rsv;
+ trans->block_rsv = &BTRFS_I(inode)->block_rsv;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
@@ -3058,9 +3049,6 @@ out:
0, &cached_state, GFP_NOFS);
}
- if (root != fs_info->tree_root)
- btrfs_delalloc_release_metadata(BTRFS_I(inode),
- ordered_extent->len);
if (trans)
btrfs_end_transaction(trans);
@@ -4372,47 +4360,11 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
}
-static int truncate_inline_extent(struct inode *inode,
- struct btrfs_path *path,
- struct btrfs_key *found_key,
- const u64 item_end,
- const u64 new_size)
-{
- struct extent_buffer *leaf = path->nodes[0];
- int slot = path->slots[0];
- struct btrfs_file_extent_item *fi;
- u32 size = (u32)(new_size - found_key->offset);
- struct btrfs_root *root = BTRFS_I(inode)->root;
-
- fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
-
- if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
- loff_t offset = new_size;
- loff_t page_end = ALIGN(offset, PAGE_SIZE);
-
- /*
- * Zero out the remaining of the last page of our inline extent,
- * instead of directly truncating our inline extent here - that
- * would be much more complex (decompressing all the data, then
- * compressing the truncated data, which might be bigger than
- * the size of the inline extent, resize the extent, etc).
- * We release the path because to get the page we might need to
- * read the extent item from disk (data not in the page cache).
- */
- btrfs_release_path(path);
- return btrfs_truncate_block(inode, offset, page_end - offset,
- 0);
- }
-
- btrfs_set_file_extent_ram_bytes(leaf, fi, size);
- size = btrfs_file_extent_calc_inline_size(size);
- btrfs_truncate_item(root->fs_info, path, size, 1);
-
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
- inode_sub_bytes(inode, item_end + 1 - new_size);
-
- return 0;
-}
+/*
+ * Return this if we need to call truncate_block for the last bit of the
+ * truncate.
+ */
+#define NEED_TRUNCATE_BLOCK 1
/*
* this can truncate away extent items, csum items and directory items.
@@ -4451,9 +4403,9 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
int err = 0;
u64 ino = btrfs_ino(BTRFS_I(inode));
u64 bytes_deleted = 0;
- bool be_nice = 0;
- bool should_throttle = 0;
- bool should_end = 0;
+ bool be_nice = false;
+ bool should_throttle = false;
+ bool should_end = false;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
@@ -4463,7 +4415,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
*/
if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
test_bit(BTRFS_ROOT_REF_COWS, &root->state))
- be_nice = 1;
+ be_nice = true;
path = btrfs_alloc_path();
if (!path)
@@ -4573,11 +4525,6 @@ search_again:
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
- if (del_item)
- last_size = found_key.offset;
- else
- last_size = new_size;
-
if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
u64 num_dec;
extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
@@ -4619,40 +4566,30 @@ search_again:
*/
if (!del_item &&
btrfs_file_extent_encryption(leaf, fi) == 0 &&
- btrfs_file_extent_other_encoding(leaf, fi) == 0) {
-
+ btrfs_file_extent_other_encoding(leaf, fi) == 0 &&
+ btrfs_file_extent_compression(leaf, fi) == 0) {
+ u32 size = (u32)(new_size - found_key.offset);
+
+ btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+ size = btrfs_file_extent_calc_inline_size(size);
+ btrfs_truncate_item(root->fs_info, path, size, 1);
+ } else if (!del_item) {
/*
- * Need to release path in order to truncate a
- * compressed extent. So delete any accumulated
- * extent items so far.
+ * We have to bail so the last_size is set to
+ * just before this extent.
*/
- if (btrfs_file_extent_compression(leaf, fi) !=
- BTRFS_COMPRESS_NONE && pending_del_nr) {
- err = btrfs_del_items(trans, root, path,
- pending_del_slot,
- pending_del_nr);
- if (err) {
- btrfs_abort_transaction(trans,
- err);
- goto error;
- }
- pending_del_nr = 0;
- }
+ err = NEED_TRUNCATE_BLOCK;
+ break;
+ }
- err = truncate_inline_extent(inode, path,
- &found_key,
- item_end,
- new_size);
- if (err) {
- btrfs_abort_transaction(trans, err);
- goto error;
- }
- } else if (test_bit(BTRFS_ROOT_REF_COWS,
- &root->state)) {
+ if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
inode_sub_bytes(inode, item_end + 1 - new_size);
- }
}
delete:
+ if (del_item)
+ last_size = found_key.offset;
+ else
+ last_size = new_size;
if (del_item) {
if (!pending_del_nr) {
/* no pending yet, add ourselves */
@@ -4669,14 +4606,14 @@ delete:
} else {
break;
}
- should_throttle = 0;
+ should_throttle = false;
if (found_extent &&
(test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == fs_info->tree_root)) {
btrfs_set_path_blocking(path);
bytes_deleted += extent_num_bytes;
- ret = btrfs_free_extent(trans, fs_info, extent_start,
+ ret = btrfs_free_extent(trans, root, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset);
@@ -4688,11 +4625,11 @@ delete:
if (be_nice) {
if (truncate_space_check(trans, root,
extent_num_bytes)) {
- should_end = 1;
+ should_end = true;
}
if (btrfs_should_throttle_delayed_refs(trans,
fs_info))
- should_throttle = 1;
+ should_throttle = true;
}
}
@@ -4801,8 +4738,11 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
(!len || ((len & (blocksize - 1)) == 0)))
goto out;
+ block_start = round_down(from, blocksize);
+ block_end = block_start + blocksize - 1;
+
ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
- round_down(from, blocksize), blocksize);
+ block_start, blocksize);
if (ret)
goto out;
@@ -4810,15 +4750,12 @@ again:
page = find_or_create_page(mapping, index, mask);
if (!page) {
btrfs_delalloc_release_space(inode, data_reserved,
- round_down(from, blocksize),
- blocksize);
+ block_start, blocksize);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
ret = -ENOMEM;
goto out;
}
- block_start = round_down(from, blocksize);
- block_end = block_start + blocksize - 1;
-
if (!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
lock_page(page);
@@ -4883,6 +4820,7 @@ out_unlock:
if (ret)
btrfs_delalloc_release_space(inode, data_reserved, block_start,
blocksize);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
unlock_page(page);
put_page(page);
out:
@@ -7797,33 +7735,6 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
return em;
}
-static void adjust_dio_outstanding_extents(struct inode *inode,
- struct btrfs_dio_data *dio_data,
- const u64 len)
-{
- unsigned num_extents = count_max_extents(len);
-
- /*
- * If we have an outstanding_extents count still set then we're
- * within our reservation, otherwise we need to adjust our inode
- * counter appropriately.
- */
- if (dio_data->outstanding_extents >= num_extents) {
- dio_data->outstanding_extents -= num_extents;
- } else {
- /*
- * If dio write length has been split due to no large enough
- * contiguous space, we need to compensate our inode counter
- * appropriately.
- */
- u64 num_needed = num_extents - dio_data->outstanding_extents;
-
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents += num_needed;
- spin_unlock(&BTRFS_I(inode)->lock);
- }
-}
-
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -7985,7 +7896,6 @@ unlock:
if (!dio_data->overwrite && start + len > i_size_read(inode))
i_size_write(inode, start + len);
- adjust_dio_outstanding_extents(inode, dio_data, len);
WARN_ON(dio_data->reserve < len);
dio_data->reserve -= len;
dio_data->unsubmitted_oe_range_end = start + len;
@@ -8015,14 +7925,6 @@ unlock_err:
err:
if (dio_data)
current->journal_info = dio_data;
- /*
- * Compensate the delalloc release we do in btrfs_direct_IO() when we
- * write less data then expected, so that we don't underflow our inode's
- * outstanding extents counter.
- */
- if (create && dio_data)
- adjust_dio_outstanding_extents(inode, dio_data, len);
-
return ret;
}
@@ -8495,7 +8397,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
if (dip->errors) {
bio_io_error(dip->orig_bio);
} else {
- dip->dio_bio->bi_status = 0;
+ dip->dio_bio->bi_status = BLK_STS_OK;
bio_endio(dip->orig_bio);
}
out:
@@ -8577,7 +8479,7 @@ __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
goto err;
}
map:
- ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
+ ret = btrfs_map_bio(fs_info, bio, 0, 0);
err:
bio_put(bio);
return ret;
@@ -8786,7 +8688,6 @@ free_ordered:
}
static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
- struct kiocb *iocb,
const struct iov_iter *iter, loff_t offset)
{
int seg;
@@ -8833,7 +8734,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
bool relock = false;
ssize_t ret;
- if (check_direct_IO(fs_info, iocb, iter, offset))
+ if (check_direct_IO(fs_info, iter, offset))
return 0;
inode_dio_begin(inode);
@@ -8868,7 +8769,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
offset, count);
if (ret)
goto out;
- dio_data.outstanding_extents = count_max_extents(count);
/*
* We need to know how many extents we reserved so that we can
@@ -8915,6 +8815,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else if (ret >= 0 && (size_t)ret < count)
btrfs_delalloc_release_space(inode, data_reserved,
offset, count - (size_t)ret);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), count);
}
out:
if (wakeup)
@@ -9232,9 +9133,6 @@ again:
fs_info->sectorsize);
if (reserved_space < PAGE_SIZE) {
end = page_start + reserved_space - 1;
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents++;
- spin_unlock(&BTRFS_I(inode)->lock);
btrfs_delalloc_release_space(inode, data_reserved,
page_start, PAGE_SIZE - reserved_space);
}
@@ -9286,12 +9184,14 @@ again:
out_unlock:
if (!ret) {
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
sb_end_pagefault(inode->i_sb);
extent_changeset_free(data_reserved);
return VM_FAULT_LOCKED;
}
unlock_page(page);
out:
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
btrfs_delalloc_release_space(inode, data_reserved, page_start,
reserved_space);
out_noreserve:
@@ -9387,12 +9287,12 @@ static int btrfs_truncate(struct inode *inode)
ret = btrfs_truncate_inode_items(trans, root, inode,
inode->i_size,
BTRFS_EXTENT_DATA_KEY);
+ trans->block_rsv = &fs_info->trans_block_rsv;
if (ret != -ENOSPC && ret != -EAGAIN) {
err = ret;
break;
}
- trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
err = ret;
@@ -9416,6 +9316,27 @@ static int btrfs_truncate(struct inode *inode)
trans->block_rsv = rsv;
}
+ /*
+ * We can't call btrfs_truncate_block inside a trans handle as we could
+ * deadlock with freeze, if we got NEED_TRUNCATE_BLOCK then we know
+ * we've truncated everything except the last little bit, and can do
+ * btrfs_truncate_block and then update the disk_i_size.
+ */
+ if (ret == NEED_TRUNCATE_BLOCK) {
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
+
+ ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
+ if (ret)
+ goto out;
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+ btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+ }
+
if (ret == 0 && inode->i_nlink > 0) {
trans->block_rsv = root->orphan_block_rsv;
ret = btrfs_orphan_del(trans, BTRFS_I(inode));
@@ -9480,6 +9401,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct inode *btrfs_alloc_inode(struct super_block *sb)
{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_inode *ei;
struct inode *inode;
@@ -9506,8 +9428,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
spin_lock_init(&ei->lock);
ei->outstanding_extents = 0;
- ei->reserved_extents = 0;
-
+ if (sb->s_magic != BTRFS_TEST_MAGIC)
+ btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv,
+ BTRFS_BLOCK_RSV_DELALLOC);
ei->runtime_flags = 0;
ei->prop_compress = BTRFS_COMPRESS_NONE;
ei->defrag_compress = BTRFS_COMPRESS_NONE;
@@ -9557,8 +9480,9 @@ void btrfs_destroy_inode(struct inode *inode)
WARN_ON(!hlist_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
+ WARN_ON(BTRFS_I(inode)->block_rsv.reserved);
+ WARN_ON(BTRFS_I(inode)->block_rsv.size);
WARN_ON(BTRFS_I(inode)->outstanding_extents);
- WARN_ON(BTRFS_I(inode)->reserved_extents);
WARN_ON(BTRFS_I(inode)->delalloc_bytes);
WARN_ON(BTRFS_I(inode)->new_delalloc_bytes);
WARN_ON(BTRFS_I(inode)->csum_bytes);
@@ -10337,19 +10261,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
ret = __start_delalloc_inodes(root, delay_iput, -1);
if (ret > 0)
ret = 0;
- /*
- * the filemap_flush will queue IO into the worker threads, but
- * we have to make sure the IO is actually started and that
- * ordered extents get created before we return
- */
- atomic_inc(&fs_info->async_submit_draining);
- while (atomic_read(&fs_info->nr_async_submits) ||
- atomic_read(&fs_info->async_delalloc_pages)) {
- wait_event(fs_info->async_submit_wait,
- (atomic_read(&fs_info->nr_async_submits) == 0 &&
- atomic_read(&fs_info->async_delalloc_pages) == 0));
- }
- atomic_dec(&fs_info->async_submit_draining);
return ret;
}
@@ -10391,14 +10302,6 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
spin_unlock(&fs_info->delalloc_root_lock);
ret = 0;
- atomic_inc(&fs_info->async_submit_draining);
- while (atomic_read(&fs_info->nr_async_submits) ||
- atomic_read(&fs_info->async_delalloc_pages)) {
- wait_event(fs_info->async_submit_wait,
- (atomic_read(&fs_info->nr_async_submits) == 0 &&
- atomic_read(&fs_info->async_delalloc_pages) == 0));
- }
- atomic_dec(&fs_info->async_submit_draining);
out:
if (!list_empty_careful(&splice)) {
spin_lock(&fs_info->delalloc_root_lock);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 6c7a49faf4e0..fd172a93d11a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -86,6 +86,19 @@ struct btrfs_ioctl_received_subvol_args_32 {
struct btrfs_ioctl_received_subvol_args_32)
#endif
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+struct btrfs_ioctl_send_args_32 {
+ __s64 send_fd; /* in */
+ __u64 clone_sources_count; /* in */
+ compat_uptr_t clone_sources; /* in */
+ __u64 parent_root; /* in */
+ __u64 flags; /* in */
+ __u64 reserved[4]; /* in */
+} __attribute__ ((__packed__));
+
+#define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \
+ struct btrfs_ioctl_send_args_32)
+#endif
static int btrfs_clone(struct inode *src, struct inode *inode,
u64 off, u64 olen, u64 olen_aligned, u64 destoff,
@@ -609,23 +622,6 @@ fail_free:
return ret;
}
-static void btrfs_wait_for_no_snapshotting_writes(struct btrfs_root *root)
-{
- s64 writers;
- DEFINE_WAIT(wait);
-
- do {
- prepare_to_wait(&root->subv_writers->wait, &wait,
- TASK_UNINTERRUPTIBLE);
-
- writers = percpu_counter_sum(&root->subv_writers->counter);
- if (writers)
- schedule();
-
- finish_wait(&root->subv_writers->wait, &wait);
- } while (writers);
-}
-
static int create_snapshot(struct btrfs_root *root, struct inode *dir,
struct dentry *dentry,
u64 *async_transid, bool readonly,
@@ -654,7 +650,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
atomic_inc(&root->will_be_snapshotted);
smp_mb__after_atomic();
- btrfs_wait_for_no_snapshotting_writes(root);
+ /* wait for no snapshot writes */
+ wait_event(root->subv_writers->wait,
+ percpu_counter_sum(&root->subv_writers->counter) == 0);
ret = btrfs_start_delalloc_inodes(root, 0);
if (ret)
@@ -1219,6 +1217,7 @@ again:
unlock_page(pages[i]);
put_page(pages[i]);
}
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return i_done;
out:
@@ -1229,6 +1228,7 @@ out:
btrfs_delalloc_release_space(inode, data_reserved,
start_index << PAGE_SHIFT,
page_cnt << PAGE_SHIFT);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
extent_changeset_free(data_reserved);
return ret;
@@ -1420,21 +1420,6 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
filemap_flush(inode->i_mapping);
}
- if (do_compress) {
- /* the filemap_flush will queue IO into the worker threads, but
- * we have to make sure the IO is actually started and that
- * ordered extents get created before we return
- */
- atomic_inc(&fs_info->async_submit_draining);
- while (atomic_read(&fs_info->nr_async_submits) ||
- atomic_read(&fs_info->async_delalloc_pages)) {
- wait_event(fs_info->async_submit_wait,
- (atomic_read(&fs_info->nr_async_submits) == 0 &&
- atomic_read(&fs_info->async_delalloc_pages) == 0));
- }
- atomic_dec(&fs_info->async_submit_draining);
- }
-
if (range->compress_type == BTRFS_COMPRESS_LZO) {
btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
} else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
@@ -1842,8 +1827,13 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
+ if (ret < 0) {
+ btrfs_end_transaction(trans);
+ goto out_reset;
+ }
+
+ ret = btrfs_commit_transaction(trans);
- btrfs_commit_transaction(trans);
out_reset:
if (ret)
btrfs_set_root_flags(&root->root_item, root_flags);
@@ -2179,7 +2169,7 @@ static noinline int btrfs_ioctl_tree_search_v2(struct file *file,
inode = file_inode(file);
ret = search_ioctl(inode, &args.key, &buf_size,
- (char *)(&uarg->buf[0]));
+ (char __user *)(&uarg->buf[0]));
if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key)))
ret = -EFAULT;
else if (ret == -EOVERFLOW &&
@@ -3706,7 +3696,7 @@ process_slot:
if (disko) {
inode_add_bytes(inode, datal);
ret = btrfs_inc_extent_ref(trans,
- fs_info,
+ root,
disko, diskl, 0,
root->root_key.objectid,
btrfs_ino(BTRFS_I(inode)),
@@ -4129,10 +4119,12 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_space_info *dest_orig;
struct btrfs_ioctl_space_info __user *user_dest;
struct btrfs_space_info *info;
- u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
- BTRFS_BLOCK_GROUP_SYSTEM,
- BTRFS_BLOCK_GROUP_METADATA,
- BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
+ static const u64 types[] = {
+ BTRFS_BLOCK_GROUP_DATA,
+ BTRFS_BLOCK_GROUP_SYSTEM,
+ BTRFS_BLOCK_GROUP_METADATA,
+ BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA
+ };
int num_types = 4;
int alloc_size;
int ret = 0;
@@ -4504,8 +4496,8 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
ipath->fspath->val[i] = rel_ptr;
}
- ret = copy_to_user((void *)(unsigned long)ipa->fspath,
- (void *)(unsigned long)ipath->fspath, size);
+ ret = copy_to_user((void __user *)(unsigned long)ipa->fspath,
+ ipath->fspath, size);
if (ret) {
ret = -EFAULT;
goto out;
@@ -4540,13 +4532,14 @@ static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
}
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
- void __user *arg)
+ void __user *arg, int version)
{
int ret = 0;
int size;
struct btrfs_ioctl_logical_ino_args *loi;
struct btrfs_data_container *inodes = NULL;
struct btrfs_path *path = NULL;
+ bool ignore_offset;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -4555,13 +4548,30 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
if (IS_ERR(loi))
return PTR_ERR(loi);
+ if (version == 1) {
+ ignore_offset = false;
+ size = min_t(u32, loi->size, SZ_64K);
+ } else {
+ /* All reserved bits must be 0 for now */
+ if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) {
+ ret = -EINVAL;
+ goto out_loi;
+ }
+ /* Only accept flags we have defined so far */
+ if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) {
+ ret = -EINVAL;
+ goto out_loi;
+ }
+ ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET;
+ size = min_t(u32, loi->size, SZ_16M);
+ }
+
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
- size = min_t(u32, loi->size, SZ_64K);
inodes = init_data_container(size);
if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes);
@@ -4570,20 +4580,21 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
}
ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
- build_ino_list, inodes);
+ build_ino_list, inodes, ignore_offset);
if (ret == -EINVAL)
ret = -ENOENT;
if (ret < 0)
goto out;
- ret = copy_to_user((void *)(unsigned long)loi->inodes,
- (void *)(unsigned long)inodes, size);
+ ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes,
+ size);
if (ret)
ret = -EFAULT;
out:
btrfs_free_path(path);
kvfree(inodes);
+out_loi:
kfree(loi);
return ret;
@@ -5160,15 +5171,11 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
root->root_key.objectid);
if (ret < 0 && ret != -EEXIST) {
btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
goto out;
}
}
ret = btrfs_commit_transaction(trans);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
-
out:
up_write(&fs_info->subvol_sem);
mnt_drop_write_file(file);
@@ -5490,6 +5497,41 @@ out_drop_write:
return ret;
}
+static int _btrfs_ioctl_send(struct file *file, void __user *argp, bool compat)
+{
+ struct btrfs_ioctl_send_args *arg;
+ int ret;
+
+ if (compat) {
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ struct btrfs_ioctl_send_args_32 args32;
+
+ ret = copy_from_user(&args32, argp, sizeof(args32));
+ if (ret)
+ return -EFAULT;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+ arg->send_fd = args32.send_fd;
+ arg->clone_sources_count = args32.clone_sources_count;
+ arg->clone_sources = compat_ptr(args32.clone_sources);
+ arg->parent_root = args32.parent_root;
+ arg->flags = args32.flags;
+ memcpy(arg->reserved, args32.reserved,
+ sizeof(args32.reserved));
+#else
+ return -ENOTTY;
+#endif
+ } else {
+ arg = memdup_user(argp, sizeof(*arg));
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+ }
+ ret = btrfs_ioctl_send(file, arg);
+ kfree(arg);
+ return ret;
+}
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
@@ -5554,7 +5596,9 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_INO_PATHS:
return btrfs_ioctl_ino_to_path(root, argp);
case BTRFS_IOC_LOGICAL_INO:
- return btrfs_ioctl_logical_to_ino(fs_info, argp);
+ return btrfs_ioctl_logical_to_ino(fs_info, argp, 1);
+ case BTRFS_IOC_LOGICAL_INO_V2:
+ return btrfs_ioctl_logical_to_ino(fs_info, argp, 2);
case BTRFS_IOC_SPACE_INFO:
return btrfs_ioctl_space_info(fs_info, argp);
case BTRFS_IOC_SYNC: {
@@ -5595,7 +5639,11 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_set_received_subvol_32(file, argp);
#endif
case BTRFS_IOC_SEND:
- return btrfs_ioctl_send(file, argp);
+ return _btrfs_ioctl_send(file, argp, false);
+#if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT)
+ case BTRFS_IOC_SEND_32:
+ return _btrfs_ioctl_send(file, argp, true);
+#endif
case BTRFS_IOC_GET_DEV_STATS:
return btrfs_ioctl_get_dev_stats(fs_info, argp);
case BTRFS_IOC_QUOTA_CTL:
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index d433e75d489a..6c7f18cd3b61 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -430,10 +430,15 @@ out:
return ret;
}
+static void lzo_set_level(struct list_head *ws, unsigned int type)
+{
+}
+
const struct btrfs_compress_op btrfs_lzo_compress = {
.alloc_workspace = lzo_alloc_workspace,
.free_workspace = lzo_free_workspace,
.compress_pages = lzo_compress_pages,
.decompress_bio = lzo_decompress_bio,
.decompress = lzo_decompress,
+ .set_level = lzo_set_level,
};
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a3aca495e33e..5b311aeddcc8 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -242,6 +242,15 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
}
spin_unlock(&root->ordered_extent_lock);
+ /*
+ * We don't need the count_max_extents here, we can assume that all of
+ * that work has been done at higher layers, so this is truly the
+ * smallest the extent is going to get.
+ */
+ spin_lock(&BTRFS_I(inode)->lock);
+ btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
+ spin_unlock(&BTRFS_I(inode)->lock);
+
return 0;
}
@@ -591,11 +600,19 @@ void btrfs_remove_ordered_extent(struct inode *inode,
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_inode_tree *tree;
- struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+ struct btrfs_root *root = btrfs_inode->root;
struct rb_node *node;
bool dec_pending_ordered = false;
- tree = &BTRFS_I(inode)->ordered_tree;
+ /* This is paired with btrfs_add_ordered_extent. */
+ spin_lock(&btrfs_inode->lock);
+ btrfs_mod_outstanding_extents(btrfs_inode, -1);
+ spin_unlock(&btrfs_inode->lock);
+ if (root != fs_info->tree_root)
+ btrfs_delalloc_release_metadata(btrfs_inode, entry->len);
+
+ tree = &btrfs_inode->ordered_tree;
spin_lock_irq(&tree->lock);
node = &entry->rb_node;
rb_erase(node, &tree->tree);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e172d4843eae..168fd03ca3ac 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1441,7 +1441,7 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
u64 bytenr = qrecord->bytenr;
int ret;
- ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root);
+ ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
if (ret < 0)
return ret;
@@ -2031,7 +2031,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
/* Search commit root to find old_roots */
ret = btrfs_find_all_roots(NULL, fs_info,
record->bytenr, 0,
- &record->old_roots);
+ &record->old_roots, false);
if (ret < 0)
goto cleanup;
}
@@ -2042,7 +2042,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
* root. It's safe inside commit_transaction().
*/
ret = btrfs_find_all_roots(trans, fs_info,
- record->bytenr, SEQ_LAST, &new_roots);
+ record->bytenr, SEQ_LAST, &new_roots, false);
if (ret < 0)
goto cleanup;
if (qgroup_to_skip) {
@@ -2570,7 +2570,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
num_bytes = found.offset;
ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
- &roots);
+ &roots, false);
if (ret < 0)
goto out;
/* For rescan, just pass old_roots as NULL */
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 24a62224b24b..a7f79254ecca 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1326,6 +1326,9 @@ write_data:
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
+
+ while ((bio = bio_list_pop(&bio_list)))
+ bio_put(bio);
}
/*
@@ -1582,6 +1585,10 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
+
+ while ((bio = bio_list_pop(&bio_list)))
+ bio_put(bio);
+
return -EIO;
finish:
@@ -2107,6 +2114,10 @@ cleanup:
if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
rbio_orig_end_io(rbio, BLK_STS_IOERR);
+
+ while ((bio = bio_list_pop(&bio_list)))
+ bio_put(bio);
+
return -EIO;
}
@@ -2231,12 +2242,18 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
ASSERT(!bio->bi_iter.bi_size);
rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
- for (i = 0; i < rbio->real_stripes; i++) {
+ /*
+ * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
+ * to the end position, so this search can start from the first parity
+ * stripe.
+ */
+ for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
if (bbio->stripes[i].dev == scrub_dev) {
rbio->scrubp = i;
break;
}
}
+ ASSERT(i < rbio->real_stripes);
/* Now we just support the sectorsize equals to page size */
ASSERT(fs_info->sectorsize == PAGE_SIZE);
@@ -2454,6 +2471,9 @@ submit_write:
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
+
+ while ((bio = bio_list_pop(&bio_list)))
+ bio_put(bio);
}
static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
@@ -2563,12 +2583,12 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
int stripe;
struct bio *bio;
+ bio_list_init(&bio_list);
+
ret = alloc_rbio_essential_pages(rbio);
if (ret)
goto cleanup;
- bio_list_init(&bio_list);
-
atomic_set(&rbio->error, 0);
/*
* build a list of bios to read all the missing parts of this
@@ -2636,6 +2656,10 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
+
+ while ((bio = bio_list_pop(&bio_list)))
+ bio_put(bio);
+
return;
finish:
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
new file mode 100644
index 000000000000..34878699d363
--- /dev/null
+++ b/fs/btrfs/ref-verify.c
@@ -0,0 +1,1031 @@
+/*
+ * Copyright (C) 2014 Facebook. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "locking.h"
+#include "delayed-ref.h"
+#include "ref-verify.h"
+
+/*
+ * Used to keep track the roots and number of refs each root has for a given
+ * bytenr. This just tracks the number of direct references, no shared
+ * references.
+ */
+struct root_entry {
+ u64 root_objectid;
+ u64 num_refs;
+ struct rb_node node;
+};
+
+/*
+ * These are meant to represent what should exist in the extent tree, these can
+ * be used to verify the extent tree is consistent as these should all match
+ * what the extent tree says.
+ */
+struct ref_entry {
+ u64 root_objectid;
+ u64 parent;
+ u64 owner;
+ u64 offset;
+ u64 num_refs;
+ struct rb_node node;
+};
+
+#define MAX_TRACE 16
+
+/*
+ * Whenever we add/remove a reference we record the action. The action maps
+ * back to the delayed ref action. We hold the ref we are changing in the
+ * action so we can account for the history properly, and we record the root we
+ * were called with since it could be different from ref_root. We also store
+ * stack traces because thats how I roll.
+ */
+struct ref_action {
+ int action;
+ u64 root;
+ struct ref_entry ref;
+ struct list_head list;
+ unsigned long trace[MAX_TRACE];
+ unsigned int trace_len;
+};
+
+/*
+ * One of these for every block we reference, it holds the roots and references
+ * to it as well as all of the ref actions that have occured to it. We never
+ * free it until we unmount the file system in order to make sure re-allocations
+ * are happening properly.
+ */
+struct block_entry {
+ u64 bytenr;
+ u64 len;
+ u64 num_refs;
+ int metadata;
+ int from_disk;
+ struct rb_root roots;
+ struct rb_root refs;
+ struct rb_node node;
+ struct list_head actions;
+};
+
+static struct block_entry *insert_block_entry(struct rb_root *root,
+ struct block_entry *be)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent_node = NULL;
+ struct block_entry *entry;
+
+ while (*p) {
+ parent_node = *p;
+ entry = rb_entry(parent_node, struct block_entry, node);
+ if (entry->bytenr > be->bytenr)
+ p = &(*p)->rb_left;
+ else if (entry->bytenr < be->bytenr)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ rb_link_node(&be->node, parent_node, p);
+ rb_insert_color(&be->node, root);
+ return NULL;
+}
+
+static struct block_entry *lookup_block_entry(struct rb_root *root, u64 bytenr)
+{
+ struct rb_node *n;
+ struct block_entry *entry = NULL;
+
+ n = root->rb_node;
+ while (n) {
+ entry = rb_entry(n, struct block_entry, node);
+ if (entry->bytenr < bytenr)
+ n = n->rb_right;
+ else if (entry->bytenr > bytenr)
+ n = n->rb_left;
+ else
+ return entry;
+ }
+ return NULL;
+}
+
+static struct root_entry *insert_root_entry(struct rb_root *root,
+ struct root_entry *re)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent_node = NULL;
+ struct root_entry *entry;
+
+ while (*p) {
+ parent_node = *p;
+ entry = rb_entry(parent_node, struct root_entry, node);
+ if (entry->root_objectid > re->root_objectid)
+ p = &(*p)->rb_left;
+ else if (entry->root_objectid < re->root_objectid)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ rb_link_node(&re->node, parent_node, p);
+ rb_insert_color(&re->node, root);
+ return NULL;
+
+}
+
+static int comp_refs(struct ref_entry *ref1, struct ref_entry *ref2)
+{
+ if (ref1->root_objectid < ref2->root_objectid)
+ return -1;
+ if (ref1->root_objectid > ref2->root_objectid)
+ return 1;
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
+ if (ref1->owner < ref2->owner)
+ return -1;
+ if (ref1->owner > ref2->owner)
+ return 1;
+ if (ref1->offset < ref2->offset)
+ return -1;
+ if (ref1->offset > ref2->offset)
+ return 1;
+ return 0;
+}
+
+static struct ref_entry *insert_ref_entry(struct rb_root *root,
+ struct ref_entry *ref)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent_node = NULL;
+ struct ref_entry *entry;
+ int cmp;
+
+ while (*p) {
+ parent_node = *p;
+ entry = rb_entry(parent_node, struct ref_entry, node);
+ cmp = comp_refs(entry, ref);
+ if (cmp > 0)
+ p = &(*p)->rb_left;
+ else if (cmp < 0)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ rb_link_node(&ref->node, parent_node, p);
+ rb_insert_color(&ref->node, root);
+ return NULL;
+
+}
+
+static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
+{
+ struct rb_node *n;
+ struct root_entry *entry = NULL;
+
+ n = root->rb_node;
+ while (n) {
+ entry = rb_entry(n, struct root_entry, node);
+ if (entry->root_objectid < objectid)
+ n = n->rb_right;
+ else if (entry->root_objectid > objectid)
+ n = n->rb_left;
+ else
+ return entry;
+ }
+ return NULL;
+}
+
+#ifdef CONFIG_STACKTRACE
+static void __save_stack_trace(struct ref_action *ra)
+{
+ struct stack_trace stack_trace;
+
+ stack_trace.max_entries = MAX_TRACE;
+ stack_trace.nr_entries = 0;
+ stack_trace.entries = ra->trace;
+ stack_trace.skip = 2;
+ save_stack_trace(&stack_trace);
+ ra->trace_len = stack_trace.nr_entries;
+}
+
+static void __print_stack_trace(struct btrfs_fs_info *fs_info,
+ struct ref_action *ra)
+{
+ struct stack_trace trace;
+
+ if (ra->trace_len == 0) {
+ btrfs_err(fs_info, " ref-verify: no stacktrace");
+ return;
+ }
+ trace.nr_entries = ra->trace_len;
+ trace.entries = ra->trace;
+ print_stack_trace(&trace, 2);
+}
+#else
+static void inline __save_stack_trace(struct ref_action *ra)
+{
+}
+
+static void inline __print_stack_trace(struct btrfs_fs_info *fs_info,
+ struct ref_action *ra)
+{
+ btrfs_err(fs_info, " ref-verify: no stacktrace support");
+}
+#endif
+
+static void free_block_entry(struct block_entry *be)
+{
+ struct root_entry *re;
+ struct ref_entry *ref;
+ struct ref_action *ra;
+ struct rb_node *n;
+
+ while ((n = rb_first(&be->roots))) {
+ re = rb_entry(n, struct root_entry, node);
+ rb_erase(&re->node, &be->roots);
+ kfree(re);
+ }
+
+ while((n = rb_first(&be->refs))) {
+ ref = rb_entry(n, struct ref_entry, node);
+ rb_erase(&ref->node, &be->refs);
+ kfree(ref);
+ }
+
+ while (!list_empty(&be->actions)) {
+ ra = list_first_entry(&be->actions, struct ref_action,
+ list);
+ list_del(&ra->list);
+ kfree(ra);
+ }
+ kfree(be);
+}
+
+static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
+ u64 bytenr, u64 len,
+ u64 root_objectid)
+{
+ struct block_entry *be = NULL, *exist;
+ struct root_entry *re = NULL;
+
+ re = kzalloc(sizeof(struct root_entry), GFP_KERNEL);
+ be = kzalloc(sizeof(struct block_entry), GFP_KERNEL);
+ if (!be || !re) {
+ kfree(re);
+ kfree(be);
+ return ERR_PTR(-ENOMEM);
+ }
+ be->bytenr = bytenr;
+ be->len = len;
+
+ re->root_objectid = root_objectid;
+ re->num_refs = 0;
+
+ spin_lock(&fs_info->ref_verify_lock);
+ exist = insert_block_entry(&fs_info->block_tree, be);
+ if (exist) {
+ if (root_objectid) {
+ struct root_entry *exist_re;
+
+ exist_re = insert_root_entry(&exist->roots, re);
+ if (exist_re)
+ kfree(re);
+ }
+ kfree(be);
+ return exist;
+ }
+
+ be->num_refs = 0;
+ be->metadata = 0;
+ be->from_disk = 0;
+ be->roots = RB_ROOT;
+ be->refs = RB_ROOT;
+ INIT_LIST_HEAD(&be->actions);
+ if (root_objectid)
+ insert_root_entry(&be->roots, re);
+ else
+ kfree(re);
+ return be;
+}
+
+static int add_tree_block(struct btrfs_fs_info *fs_info, u64 ref_root,
+ u64 parent, u64 bytenr, int level)
+{
+ struct block_entry *be;
+ struct root_entry *re;
+ struct ref_entry *ref = NULL, *exist;
+
+ ref = kmalloc(sizeof(struct ref_entry), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+
+ if (parent)
+ ref->root_objectid = 0;
+ else
+ ref->root_objectid = ref_root;
+ ref->parent = parent;
+ ref->owner = level;
+ ref->offset = 0;
+ ref->num_refs = 1;
+
+ be = add_block_entry(fs_info, bytenr, fs_info->nodesize, ref_root);
+ if (IS_ERR(be)) {
+ kfree(ref);
+ return PTR_ERR(be);
+ }
+ be->num_refs++;
+ be->from_disk = 1;
+ be->metadata = 1;
+
+ if (!parent) {
+ ASSERT(ref_root);
+ re = lookup_root_entry(&be->roots, ref_root);
+ ASSERT(re);
+ re->num_refs++;
+ }
+ exist = insert_ref_entry(&be->refs, ref);
+ if (exist) {
+ exist->num_refs++;
+ kfree(ref);
+ }
+ spin_unlock(&fs_info->ref_verify_lock);
+
+ return 0;
+}
+
+static int add_shared_data_ref(struct btrfs_fs_info *fs_info,
+ u64 parent, u32 num_refs, u64 bytenr,
+ u64 num_bytes)
+{
+ struct block_entry *be;
+ struct ref_entry *ref;
+
+ ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+ be = add_block_entry(fs_info, bytenr, num_bytes, 0);
+ if (IS_ERR(be)) {
+ kfree(ref);
+ return PTR_ERR(be);
+ }
+ be->num_refs += num_refs;
+
+ ref->parent = parent;
+ ref->num_refs = num_refs;
+ if (insert_ref_entry(&be->refs, ref)) {
+ spin_unlock(&fs_info->ref_verify_lock);
+ btrfs_err(fs_info, "existing shared ref when reading from disk?");
+ kfree(ref);
+ return -EINVAL;
+ }
+ spin_unlock(&fs_info->ref_verify_lock);
+ return 0;
+}
+
+static int add_extent_data_ref(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_extent_data_ref *dref,
+ u64 bytenr, u64 num_bytes)
+{
+ struct block_entry *be;
+ struct ref_entry *ref;
+ struct root_entry *re;
+ u64 ref_root = btrfs_extent_data_ref_root(leaf, dref);
+ u64 owner = btrfs_extent_data_ref_objectid(leaf, dref);
+ u64 offset = btrfs_extent_data_ref_offset(leaf, dref);
+ u32 num_refs = btrfs_extent_data_ref_count(leaf, dref);
+
+ ref = kzalloc(sizeof(struct ref_entry), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+ be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
+ if (IS_ERR(be)) {
+ kfree(ref);
+ return PTR_ERR(be);
+ }
+ be->num_refs += num_refs;
+
+ ref->parent = 0;
+ ref->owner = owner;
+ ref->root_objectid = ref_root;
+ ref->offset = offset;
+ ref->num_refs = num_refs;
+ if (insert_ref_entry(&be->refs, ref)) {
+ spin_unlock(&fs_info->ref_verify_lock);
+ btrfs_err(fs_info, "existing ref when reading from disk?");
+ kfree(ref);
+ return -EINVAL;
+ }
+
+ re = lookup_root_entry(&be->roots, ref_root);
+ if (!re) {
+ spin_unlock(&fs_info->ref_verify_lock);
+ btrfs_err(fs_info, "missing root in new block entry?");
+ return -EINVAL;
+ }
+ re->num_refs += num_refs;
+ spin_unlock(&fs_info->ref_verify_lock);
+ return 0;
+}
+
+static int process_extent_item(struct btrfs_fs_info *fs_info,
+ struct btrfs_path *path, struct btrfs_key *key,
+ int slot, int *tree_block_level)
+{
+ struct btrfs_extent_item *ei;
+ struct btrfs_extent_inline_ref *iref;
+ struct btrfs_extent_data_ref *dref;
+ struct btrfs_shared_data_ref *sref;
+ struct extent_buffer *leaf = path->nodes[0];
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ unsigned long end, ptr;
+ u64 offset, flags, count;
+ int type, ret;
+
+ ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+ flags = btrfs_extent_flags(leaf, ei);
+
+ if ((key->type == BTRFS_EXTENT_ITEM_KEY) &&
+ flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ struct btrfs_tree_block_info *info;
+
+ info = (struct btrfs_tree_block_info *)(ei + 1);
+ *tree_block_level = btrfs_tree_block_level(leaf, info);
+ iref = (struct btrfs_extent_inline_ref *)(info + 1);
+ } else {
+ if (key->type == BTRFS_METADATA_ITEM_KEY)
+ *tree_block_level = key->offset;
+ iref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ }
+
+ ptr = (unsigned long)iref;
+ end = (unsigned long)ei + item_size;
+ while (ptr < end) {
+ iref = (struct btrfs_extent_inline_ref *)ptr;
+ type = btrfs_extent_inline_ref_type(leaf, iref);
+ offset = btrfs_extent_inline_ref_offset(leaf, iref);
+ switch (type) {
+ case BTRFS_TREE_BLOCK_REF_KEY:
+ ret = add_tree_block(fs_info, offset, 0, key->objectid,
+ *tree_block_level);
+ break;
+ case BTRFS_SHARED_BLOCK_REF_KEY:
+ ret = add_tree_block(fs_info, 0, offset, key->objectid,
+ *tree_block_level);
+ break;
+ case BTRFS_EXTENT_DATA_REF_KEY:
+ dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+ ret = add_extent_data_ref(fs_info, leaf, dref,
+ key->objectid, key->offset);
+ break;
+ case BTRFS_SHARED_DATA_REF_KEY:
+ sref = (struct btrfs_shared_data_ref *)(iref + 1);
+ count = btrfs_shared_data_ref_count(leaf, sref);
+ ret = add_shared_data_ref(fs_info, offset, count,
+ key->objectid, key->offset);
+ break;
+ default:
+ btrfs_err(fs_info, "invalid key type in iref");
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ break;
+ ptr += btrfs_extent_inline_ref_size(type);
+ }
+ return ret;
+}
+
+static int process_leaf(struct btrfs_root *root,
+ struct btrfs_path *path, u64 *bytenr, u64 *num_bytes)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_extent_data_ref *dref;
+ struct btrfs_shared_data_ref *sref;
+ u32 count;
+ int i = 0, tree_block_level = 0, ret;
+ struct btrfs_key key;
+ int nritems = btrfs_header_nritems(leaf);
+
+ for (i = 0; i < nritems; i++) {
+ btrfs_item_key_to_cpu(leaf, &key, i);
+ switch (key.type) {
+ case BTRFS_EXTENT_ITEM_KEY:
+ *num_bytes = key.offset;
+ case BTRFS_METADATA_ITEM_KEY:
+ *bytenr = key.objectid;
+ ret = process_extent_item(fs_info, path, &key, i,
+ &tree_block_level);
+ break;
+ case BTRFS_TREE_BLOCK_REF_KEY:
+ ret = add_tree_block(fs_info, key.offset, 0,
+ key.objectid, tree_block_level);
+ break;
+ case BTRFS_SHARED_BLOCK_REF_KEY:
+ ret = add_tree_block(fs_info, 0, key.offset,
+ key.objectid, tree_block_level);
+ break;
+ case BTRFS_EXTENT_DATA_REF_KEY:
+ dref = btrfs_item_ptr(leaf, i,
+ struct btrfs_extent_data_ref);
+ ret = add_extent_data_ref(fs_info, leaf, dref, *bytenr,
+ *num_bytes);
+ break;
+ case BTRFS_SHARED_DATA_REF_KEY:
+ sref = btrfs_item_ptr(leaf, i,
+ struct btrfs_shared_data_ref);
+ count = btrfs_shared_data_ref_count(leaf, sref);
+ ret = add_shared_data_ref(fs_info, key.offset, count,
+ *bytenr, *num_bytes);
+ break;
+ default:
+ break;
+ }
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/* Walk down to the leaf from the given level */
+static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
+ int level, u64 *bytenr, u64 *num_bytes)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_buffer *eb;
+ u64 block_bytenr, gen;
+ int ret = 0;
+
+ while (level >= 0) {
+ if (level) {
+ block_bytenr = btrfs_node_blockptr(path->nodes[level],
+ path->slots[level]);
+ gen = btrfs_node_ptr_generation(path->nodes[level],
+ path->slots[level]);
+ eb = read_tree_block(fs_info, block_bytenr, gen);
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
+ if (!extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ return -EIO;
+ }
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ path->nodes[level-1] = eb;
+ path->slots[level-1] = 0;
+ path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING;
+ } else {
+ ret = process_leaf(root, path, bytenr, num_bytes);
+ if (ret)
+ break;
+ }
+ level--;
+ }
+ return ret;
+}
+
+/* Walk up to the next node that needs to be processed */
+static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
+ int *level)
+{
+ int l;
+
+ for (l = 0; l < BTRFS_MAX_LEVEL; l++) {
+ if (!path->nodes[l])
+ continue;
+ if (l) {
+ path->slots[l]++;
+ if (path->slots[l] <
+ btrfs_header_nritems(path->nodes[l])) {
+ *level = l;
+ return 0;
+ }
+ }
+ btrfs_tree_unlock_rw(path->nodes[l], path->locks[l]);
+ free_extent_buffer(path->nodes[l]);
+ path->nodes[l] = NULL;
+ path->slots[l] = 0;
+ path->locks[l] = 0;
+ }
+
+ return 1;
+}
+
+static void dump_ref_action(struct btrfs_fs_info *fs_info,
+ struct ref_action *ra)
+{
+ btrfs_err(fs_info,
+" Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
+ ra->action, ra->root, ra->ref.root_objectid, ra->ref.parent,
+ ra->ref.owner, ra->ref.offset, ra->ref.num_refs);
+ __print_stack_trace(fs_info, ra);
+}
+
+/*
+ * Dumps all the information from the block entry to printk, it's going to be
+ * awesome.
+ */
+static void dump_block_entry(struct btrfs_fs_info *fs_info,
+ struct block_entry *be)
+{
+ struct ref_entry *ref;
+ struct root_entry *re;
+ struct ref_action *ra;
+ struct rb_node *n;
+
+ btrfs_err(fs_info,
+"dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
+ be->bytenr, be->len, be->num_refs, be->metadata,
+ be->from_disk);
+
+ for (n = rb_first(&be->refs); n; n = rb_next(n)) {
+ ref = rb_entry(n, struct ref_entry, node);
+ btrfs_err(fs_info,
+" ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
+ ref->root_objectid, ref->parent, ref->owner,
+ ref->offset, ref->num_refs);
+ }
+
+ for (n = rb_first(&be->roots); n; n = rb_next(n)) {
+ re = rb_entry(n, struct root_entry, node);
+ btrfs_err(fs_info, " root entry %llu, num_refs %llu",
+ re->root_objectid, re->num_refs);
+ }
+
+ list_for_each_entry(ra, &be->actions, list)
+ dump_ref_action(fs_info, ra);
+}
+
+/*
+ * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
+ * @root: the root we are making this modification from.
+ * @bytenr: the bytenr we are modifying.
+ * @num_bytes: number of bytes.
+ * @parent: the parent bytenr.
+ * @ref_root: the original root owner of the bytenr.
+ * @owner: level in the case of metadata, inode in the case of data.
+ * @offset: 0 for metadata, file offset for data.
+ * @action: the action that we are doing, this is the same as the delayed ref
+ * action.
+ *
+ * This will add an action item to the given bytenr and do sanity checks to make
+ * sure we haven't messed something up. If we are making a new allocation and
+ * this block entry has history we will delete all previous actions as long as
+ * our sanity checks pass as they are no longer needed.
+ */
+int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
+ u64 parent, u64 ref_root, u64 owner, u64 offset,
+ int action)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct ref_entry *ref = NULL, *exist;
+ struct ref_action *ra = NULL;
+ struct block_entry *be = NULL;
+ struct root_entry *re = NULL;
+ int ret = 0;
+ bool metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
+
+ if (!btrfs_test_opt(root->fs_info, REF_VERIFY))
+ return 0;
+
+ ref = kzalloc(sizeof(struct ref_entry), GFP_NOFS);
+ ra = kmalloc(sizeof(struct ref_action), GFP_NOFS);
+ if (!ra || !ref) {
+ kfree(ref);
+ kfree(ra);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (parent) {
+ ref->parent = parent;
+ } else {
+ ref->root_objectid = ref_root;
+ ref->owner = owner;
+ ref->offset = offset;
+ }
+ ref->num_refs = (action == BTRFS_DROP_DELAYED_REF) ? -1 : 1;
+
+ memcpy(&ra->ref, ref, sizeof(struct ref_entry));
+ /*
+ * Save the extra info from the delayed ref in the ref action to make it
+ * easier to figure out what is happening. The real ref's we add to the
+ * ref tree need to reflect what we save on disk so it matches any
+ * on-disk refs we pre-loaded.
+ */
+ ra->ref.owner = owner;
+ ra->ref.offset = offset;
+ ra->ref.root_objectid = ref_root;
+ __save_stack_trace(ra);
+
+ INIT_LIST_HEAD(&ra->list);
+ ra->action = action;
+ ra->root = root->objectid;
+
+ /*
+ * This is an allocation, preallocate the block_entry in case we haven't
+ * used it before.
+ */
+ ret = -EINVAL;
+ if (action == BTRFS_ADD_DELAYED_EXTENT) {
+ /*
+ * For subvol_create we'll just pass in whatever the parent root
+ * is and the new root objectid, so let's not treat the passed
+ * in root as if it really has a ref for this bytenr.
+ */
+ be = add_block_entry(root->fs_info, bytenr, num_bytes, ref_root);
+ if (IS_ERR(be)) {
+ kfree(ra);
+ ret = PTR_ERR(be);
+ goto out;
+ }
+ be->num_refs++;
+ if (metadata)
+ be->metadata = 1;
+
+ if (be->num_refs != 1) {
+ btrfs_err(fs_info,
+ "re-allocated a block that still has references to it!");
+ dump_block_entry(fs_info, be);
+ dump_ref_action(fs_info, ra);
+ goto out_unlock;
+ }
+
+ while (!list_empty(&be->actions)) {
+ struct ref_action *tmp;
+
+ tmp = list_first_entry(&be->actions, struct ref_action,
+ list);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ } else {
+ struct root_entry *tmp;
+
+ if (!parent) {
+ re = kmalloc(sizeof(struct root_entry), GFP_NOFS);
+ if (!re) {
+ kfree(ref);
+ kfree(ra);
+ ret = -ENOMEM;
+ goto out;
+ }
+ /*
+ * This is the root that is modifying us, so it's the
+ * one we want to lookup below when we modify the
+ * re->num_refs.
+ */
+ ref_root = root->objectid;
+ re->root_objectid = root->objectid;
+ re->num_refs = 0;
+ }
+
+ spin_lock(&root->fs_info->ref_verify_lock);
+ be = lookup_block_entry(&root->fs_info->block_tree, bytenr);
+ if (!be) {
+ btrfs_err(fs_info,
+"trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
+ action, (unsigned long long)bytenr,
+ (unsigned long long)num_bytes);
+ dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
+ goto out_unlock;
+ }
+
+ if (!parent) {
+ tmp = insert_root_entry(&be->roots, re);
+ if (tmp) {
+ kfree(re);
+ re = tmp;
+ }
+ }
+ }
+
+ exist = insert_ref_entry(&be->refs, ref);
+ if (exist) {
+ if (action == BTRFS_DROP_DELAYED_REF) {
+ if (exist->num_refs == 0) {
+ btrfs_err(fs_info,
+"dropping a ref for a existing root that doesn't have a ref on the block");
+ dump_block_entry(fs_info, be);
+ dump_ref_action(fs_info, ra);
+ kfree(ra);
+ goto out_unlock;
+ }
+ exist->num_refs--;
+ if (exist->num_refs == 0) {
+ rb_erase(&exist->node, &be->refs);
+ kfree(exist);
+ }
+ } else if (!be->metadata) {
+ exist->num_refs++;
+ } else {
+ btrfs_err(fs_info,
+"attempting to add another ref for an existing ref on a tree block");
+ dump_block_entry(fs_info, be);
+ dump_ref_action(fs_info, ra);
+ kfree(ra);
+ goto out_unlock;
+ }
+ kfree(ref);
+ } else {
+ if (action == BTRFS_DROP_DELAYED_REF) {
+ btrfs_err(fs_info,
+"dropping a ref for a root that doesn't have a ref on the block");
+ dump_block_entry(fs_info, be);
+ dump_ref_action(fs_info, ra);
+ kfree(ra);
+ goto out_unlock;
+ }
+ }
+
+ if (!parent && !re) {
+ re = lookup_root_entry(&be->roots, ref_root);
+ if (!re) {
+ /*
+ * This shouldn't happen because we will add our re
+ * above when we lookup the be with !parent, but just in
+ * case catch this case so we don't panic because I
+ * didn't thik of some other corner case.
+ */
+ btrfs_err(fs_info, "failed to find root %llu for %llu",
+ root->objectid, be->bytenr);
+ dump_block_entry(fs_info, be);
+ dump_ref_action(fs_info, ra);
+ kfree(ra);
+ goto out_unlock;
+ }
+ }
+ if (action == BTRFS_DROP_DELAYED_REF) {
+ if (re)
+ re->num_refs--;
+ be->num_refs--;
+ } else if (action == BTRFS_ADD_DELAYED_REF) {
+ be->num_refs++;
+ if (re)
+ re->num_refs++;
+ }
+ list_add_tail(&ra->list, &be->actions);
+ ret = 0;
+out_unlock:
+ spin_unlock(&root->fs_info->ref_verify_lock);
+out:
+ if (ret)
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ return ret;
+}
+
+/* Free up the ref cache */
+void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
+{
+ struct block_entry *be;
+ struct rb_node *n;
+
+ if (!btrfs_test_opt(fs_info, REF_VERIFY))
+ return;
+
+ spin_lock(&fs_info->ref_verify_lock);
+ while ((n = rb_first(&fs_info->block_tree))) {
+ be = rb_entry(n, struct block_entry, node);
+ rb_erase(&be->node, &fs_info->block_tree);
+ free_block_entry(be);
+ cond_resched_lock(&fs_info->ref_verify_lock);
+ }
+ spin_unlock(&fs_info->ref_verify_lock);
+}
+
+void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
+ u64 len)
+{
+ struct block_entry *be = NULL, *entry;
+ struct rb_node *n;
+
+ if (!btrfs_test_opt(fs_info, REF_VERIFY))
+ return;
+
+ spin_lock(&fs_info->ref_verify_lock);
+ n = fs_info->block_tree.rb_node;
+ while (n) {
+ entry = rb_entry(n, struct block_entry, node);
+ if (entry->bytenr < start) {
+ n = n->rb_right;
+ } else if (entry->bytenr > start) {
+ n = n->rb_left;
+ } else {
+ be = entry;
+ break;
+ }
+ /* We want to get as close to start as possible */
+ if (be == NULL ||
+ (entry->bytenr < start && be->bytenr > start) ||
+ (entry->bytenr < start && entry->bytenr > be->bytenr))
+ be = entry;
+ }
+
+ /*
+ * Could have an empty block group, maybe have something to check for
+ * this case to verify we were actually empty?
+ */
+ if (!be) {
+ spin_unlock(&fs_info->ref_verify_lock);
+ return;
+ }
+
+ n = &be->node;
+ while (n) {
+ be = rb_entry(n, struct block_entry, node);
+ n = rb_next(n);
+ if (be->bytenr < start && be->bytenr + be->len > start) {
+ btrfs_err(fs_info,
+ "block entry overlaps a block group [%llu,%llu]!",
+ start, len);
+ dump_block_entry(fs_info, be);
+ continue;
+ }
+ if (be->bytenr < start)
+ continue;
+ if (be->bytenr >= start + len)
+ break;
+ if (be->bytenr + be->len > start + len) {
+ btrfs_err(fs_info,
+ "block entry overlaps a block group [%llu,%llu]!",
+ start, len);
+ dump_block_entry(fs_info, be);
+ }
+ rb_erase(&be->node, &fs_info->block_tree);
+ free_block_entry(be);
+ }
+ spin_unlock(&fs_info->ref_verify_lock);
+}
+
+/* Walk down all roots and build the ref tree, meant to be called at mount */
+int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_path *path;
+ struct btrfs_root *root;
+ struct extent_buffer *eb;
+ u64 bytenr = 0, num_bytes = 0;
+ int ret, level;
+
+ if (!btrfs_test_opt(fs_info, REF_VERIFY))
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ eb = btrfs_read_lock_root_node(fs_info->extent_root);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ level = btrfs_header_level(eb);
+ path->nodes[level] = eb;
+ path->slots[level] = 0;
+ path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+
+ while (1) {
+ /*
+ * We have to keep track of the bytenr/num_bytes we last hit
+ * because we could have run out of space for an inline ref, and
+ * would have had to added a ref key item which may appear on a
+ * different leaf from the original extent item.
+ */
+ ret = walk_down_tree(fs_info->extent_root, path, level,
+ &bytenr, &num_bytes);
+ if (ret)
+ break;
+ ret = walk_up_tree(root, path, &level);
+ if (ret < 0)
+ break;
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+ btrfs_free_ref_cache(fs_info);
+ }
+ btrfs_free_path(path);
+ return ret;
+}
diff --git a/fs/btrfs/ref-verify.h b/fs/btrfs/ref-verify.h
new file mode 100644
index 000000000000..3bf02ce0e1e2
--- /dev/null
+++ b/fs/btrfs/ref-verify.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2014 Facebook. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#ifndef __REF_VERIFY__
+#define __REF_VERIFY__
+
+#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info);
+void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info);
+int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
+ u64 parent, u64 ref_root, u64 owner, u64 offset,
+ int action);
+void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
+ u64 len);
+
+static inline void btrfs_init_ref_verify(struct btrfs_fs_info *fs_info)
+{
+ spin_lock_init(&fs_info->ref_verify_lock);
+ fs_info->block_tree = RB_ROOT;
+}
+#else
+static inline int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
+{
+ return 0;
+}
+
+static inline void btrfs_free_ref_cache(struct btrfs_fs_info *fs_info)
+{
+}
+
+static inline int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr,
+ u64 num_bytes, u64 parent, u64 ref_root,
+ u64 owner, u64 offset, int action)
+{
+ return 0;
+}
+
+static inline void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info,
+ u64 start, u64 len)
+{
+}
+
+static inline void btrfs_init_ref_verify(struct btrfs_fs_info *fs_info)
+{
+}
+
+#endif /* CONFIG_BTRFS_FS_REF_VERIFY */
+#endif /* _REF_VERIFY__ */
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 9841faef08ea..4cf2eb67eba6 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1742,7 +1742,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
- ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+ ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
num_bytes, parent,
btrfs_header_owner(leaf),
key.objectid, key.offset);
@@ -1751,7 +1751,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
break;
}
- ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
@@ -1952,21 +1952,21 @@ again:
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(path->nodes[level]);
- ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr,
+ ret = btrfs_inc_extent_ref(trans, src, old_bytenr,
blocksize, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+ ret = btrfs_inc_extent_ref(trans, dest, new_bytenr,
blocksize, 0, dest->root_key.objectid,
level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
- ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize,
+ ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
0, dest->root_key.objectid, level - 1,
0);
BUG_ON(ret);
@@ -2808,7 +2808,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
trans->transid);
btrfs_mark_buffer_dirty(upper->eb);
- ret = btrfs_inc_extent_ref(trans, root->fs_info,
+ ret = btrfs_inc_extent_ref(trans, root,
node->eb->start, blocksize,
upper->eb->start,
btrfs_header_owner(upper->eb),
@@ -3246,6 +3246,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
put_page(page);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE);
+ btrfs_delalloc_release_extents(BTRFS_I(inode),
+ PAGE_SIZE);
ret = -EIO;
goto out;
}
@@ -3275,6 +3277,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
put_page(page);
index++;
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 95bcc3cce78f..3338407ef0f0 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -226,10 +226,6 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
struct btrfs_root *root;
int err = 0;
int ret;
- bool can_recover = true;
-
- if (sb_rdonly(fs_info->sb))
- can_recover = false;
path = btrfs_alloc_path();
if (!path)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e3f6c49e5c4d..b2f871d80982 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -231,7 +231,7 @@ struct scrub_warning {
struct btrfs_path *path;
u64 extent_item_size;
const char *errstr;
- sector_t sector;
+ u64 physical;
u64 logical;
struct btrfs_device *dev;
};
@@ -797,10 +797,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
*/
for (i = 0; i < ipath->fspath->elem_cnt; ++i)
btrfs_warn_in_rcu(fs_info,
- "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
+"%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %llu, links %u (path: %s)",
swarn->errstr, swarn->logical,
rcu_str_deref(swarn->dev->name),
- (unsigned long long)swarn->sector,
+ swarn->physical,
root, inum, offset,
min(isize - offset, (u64)PAGE_SIZE), nlink,
(char *)(unsigned long)ipath->fspath->val[i]);
@@ -810,10 +810,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
err:
btrfs_warn_in_rcu(fs_info,
- "%s at logical %llu on dev %s, sector %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
+ "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
swarn->errstr, swarn->logical,
rcu_str_deref(swarn->dev->name),
- (unsigned long long)swarn->sector,
+ swarn->physical,
root, inum, offset, ret);
free_ipath(ipath);
@@ -845,7 +845,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
if (!path)
return;
- swarn.sector = (sblock->pagev[0]->physical) >> 9;
+ swarn.physical = sblock->pagev[0]->physical;
swarn.logical = sblock->pagev[0]->logical;
swarn.errstr = errstr;
swarn.dev = NULL;
@@ -868,10 +868,10 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
item_size, &ref_root,
&ref_level);
btrfs_warn_in_rcu(fs_info,
- "%s at logical %llu on dev %s, sector %llu: metadata %s (level %d) in tree %llu",
+"%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
errstr, swarn.logical,
rcu_str_deref(dev->name),
- (unsigned long long)swarn.sector,
+ swarn.physical,
ref_level ? "node" : "leaf",
ret < 0 ? -1 : ref_level,
ret < 0 ? -1 : ref_root);
@@ -883,7 +883,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
swarn.dev = dev;
iterate_extent_inodes(fs_info, found_key.objectid,
extent_item_pos, 1,
- scrub_print_warning_inode, &swarn);
+ scrub_print_warning_inode, &swarn, false);
}
out:
@@ -1047,7 +1047,7 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
* can be found.
*/
ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
- scrub_fixup_readpage, fixup);
+ scrub_fixup_readpage, fixup, false);
if (ret < 0) {
uncorrectable = 1;
goto out;
@@ -4390,7 +4390,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
}
ret = iterate_inodes_from_logical(logical, fs_info, path,
- record_inode_for_nocow, nocow_ctx);
+ record_inode_for_nocow, nocow_ctx, false);
if (ret != 0 && ret != -ENOENT) {
btrfs_warn(fs_info,
"iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d",
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 8fd195cfe81b..c10e4c70f02d 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -26,6 +26,7 @@
#include <linux/radix-tree.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
+#include <linux/compat.h>
#include "send.h"
#include "backref.h"
@@ -992,7 +993,6 @@ typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
* path must point to the dir item when called.
*/
static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *found_key,
iterate_dir_item_t iterate, void *ctx)
{
int ret = 0;
@@ -1271,12 +1271,6 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
*/
if (ino >= bctx->cur_objectid)
return 0;
-#if 0
- if (ino > bctx->cur_objectid)
- return 0;
- if (offset + bctx->extent_len > bctx->cur_offset)
- return 0;
-#endif
}
bctx->found++;
@@ -1429,7 +1423,7 @@ static int find_extent_clone(struct send_ctx *sctx,
extent_item_pos = 0;
ret = iterate_extent_inodes(fs_info, found_key.objectid,
extent_item_pos, 1, __iterate_backrefs,
- backref_ctx);
+ backref_ctx, false);
if (ret < 0)
goto out;
@@ -4106,8 +4100,8 @@ out:
return ret;
}
-static int record_ref(struct btrfs_root *root, int num, u64 dir, int index,
- struct fs_path *name, void *ctx, struct list_head *refs)
+static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
+ void *ctx, struct list_head *refs)
{
int ret = 0;
struct send_ctx *sctx = ctx;
@@ -4143,8 +4137,7 @@ static int __record_new_ref(int num, u64 dir, int index,
void *ctx)
{
struct send_ctx *sctx = ctx;
- return record_ref(sctx->send_root, num, dir, index, name,
- ctx, &sctx->new_refs);
+ return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
}
@@ -4153,8 +4146,8 @@ static int __record_deleted_ref(int num, u64 dir, int index,
void *ctx)
{
struct send_ctx *sctx = ctx;
- return record_ref(sctx->parent_root, num, dir, index, name,
- ctx, &sctx->deleted_refs);
+ return record_ref(sctx->parent_root, dir, name, ctx,
+ &sctx->deleted_refs);
}
static int record_new_ref(struct send_ctx *sctx)
@@ -4498,7 +4491,7 @@ static int process_new_xattr(struct send_ctx *sctx)
int ret = 0;
ret = iterate_dir_item(sctx->send_root, sctx->left_path,
- sctx->cmp_key, __process_new_xattr, sctx);
+ __process_new_xattr, sctx);
return ret;
}
@@ -4506,7 +4499,7 @@ static int process_new_xattr(struct send_ctx *sctx)
static int process_deleted_xattr(struct send_ctx *sctx)
{
return iterate_dir_item(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, __process_deleted_xattr, sctx);
+ __process_deleted_xattr, sctx);
}
struct find_xattr_ctx {
@@ -4551,7 +4544,7 @@ static int find_xattr(struct btrfs_root *root,
ctx.found_data = NULL;
ctx.found_data_len = 0;
- ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
+ ret = iterate_dir_item(root, path, __find_xattr, &ctx);
if (ret < 0)
return ret;
@@ -4621,11 +4614,11 @@ static int process_changed_xattr(struct send_ctx *sctx)
int ret = 0;
ret = iterate_dir_item(sctx->send_root, sctx->left_path,
- sctx->cmp_key, __process_changed_new_xattr, sctx);
+ __process_changed_new_xattr, sctx);
if (ret < 0)
goto out;
ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
- sctx->cmp_key, __process_changed_deleted_xattr, sctx);
+ __process_changed_deleted_xattr, sctx);
out:
return ret;
@@ -4675,8 +4668,7 @@ static int process_all_new_xattrs(struct send_ctx *sctx)
goto out;
}
- ret = iterate_dir_item(root, path, &found_key,
- __process_new_xattr, sctx);
+ ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
if (ret < 0)
goto out;
@@ -4723,16 +4715,27 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
/* initial readahead */
memset(&sctx->ra, 0, sizeof(struct file_ra_state));
file_ra_state_init(&sctx->ra, inode->i_mapping);
- page_cache_sync_readahead(inode->i_mapping, &sctx->ra, NULL, index,
- last_index - index + 1);
while (index <= last_index) {
unsigned cur_len = min_t(unsigned, len,
PAGE_SIZE - pg_offset);
- page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
+
+ page = find_lock_page(inode->i_mapping, index);
if (!page) {
- ret = -ENOMEM;
- break;
+ page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
+ NULL, index, last_index + 1 - index);
+
+ page = find_or_create_page(inode->i_mapping, index,
+ GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ if (PageReadahead(page)) {
+ page_cache_async_readahead(inode->i_mapping, &sctx->ra,
+ NULL, page, index, last_index + 1 - index);
}
if (!PageUptodate(page)) {
@@ -6162,9 +6165,7 @@ out:
* Updates compare related fields in sctx and simply forwards to the actual
* changed_xxx functions.
*/
-static int changed_cb(struct btrfs_root *left_root,
- struct btrfs_root *right_root,
- struct btrfs_path *left_path,
+static int changed_cb(struct btrfs_path *left_path,
struct btrfs_path *right_path,
struct btrfs_key *key,
enum btrfs_compare_tree_result result,
@@ -6246,8 +6247,8 @@ static int full_send_tree(struct send_ctx *sctx)
slot = path->slots[0];
btrfs_item_key_to_cpu(eb, &found_key, slot);
- ret = changed_cb(send_root, NULL, path, NULL,
- &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
+ ret = changed_cb(path, NULL, &found_key,
+ BTRFS_COMPARE_TREE_NEW, sctx);
if (ret < 0)
goto out;
@@ -6365,13 +6366,12 @@ static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
spin_unlock(&root->root_item_lock);
}
-long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
+long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
{
int ret = 0;
struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root;
- struct btrfs_ioctl_send_args *arg = NULL;
struct btrfs_key key;
struct send_ctx *sctx = NULL;
u32 i;
@@ -6407,13 +6407,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
goto out;
}
- arg = memdup_user(arg_, sizeof(*arg));
- if (IS_ERR(arg)) {
- ret = PTR_ERR(arg);
- arg = NULL;
- goto out;
- }
-
/*
* Check that we don't overflow at later allocations, we request
* clone_sources_count + 1 items, and compare to unsigned long inside
@@ -6654,7 +6647,6 @@ out:
if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
btrfs_root_dec_send_in_progress(sctx->parent_root);
- kfree(arg);
kvfree(clone_sources_tmp);
if (sctx) {
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 02e00166c4da..3aa4bc55754f 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -130,5 +130,5 @@ enum {
#define BTRFS_SEND_A_MAX (__BTRFS_SEND_A_MAX - 1)
#ifdef __KERNEL__
-long btrfs_ioctl_send(struct file *mnt_file, void __user *arg);
+long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg);
#endif
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 161694b66038..65af029559b5 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -202,7 +202,6 @@ static struct ratelimit_state printk_limits[] = {
void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
- struct super_block *sb = fs_info->sb;
char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
struct va_format vaf;
va_list args;
@@ -228,7 +227,8 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
vaf.va = &args;
if (__ratelimit(ratelimit))
- printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf);
+ printk("%sBTRFS %s (device %s): %pV\n", lvl, type,
+ fs_info ? fs_info->sb->s_id : "<unknown>", &vaf);
va_end(args);
}
@@ -292,7 +292,7 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
vaf.va = &args;
errstr = btrfs_decode_error(errno);
- if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR))
+ if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
s_id, function, line, &vaf, errno, errstr);
@@ -326,6 +326,9 @@ enum {
#ifdef CONFIG_BTRFS_DEBUG
Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
#endif
+#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ Opt_ref_verify,
+#endif
Opt_err,
};
@@ -387,6 +390,9 @@ static const match_table_t tokens = {
{Opt_fragment_metadata, "fragment=metadata"},
{Opt_fragment_all, "fragment=all"},
#endif
+#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ {Opt_ref_verify, "ref_verify"},
+#endif
{Opt_err, NULL},
};
@@ -502,6 +508,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
strncmp(args[0].from, "zlib", 4) == 0) {
compress_type = "zlib";
info->compress_type = BTRFS_COMPRESS_ZLIB;
+ info->compress_level =
+ btrfs_compress_str2level(args[0].from);
btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -549,9 +557,9 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
compress_force != saved_compress_force)) ||
(!btrfs_test_opt(info, COMPRESS) &&
no_compress == 1)) {
- btrfs_info(info, "%s %s compression",
+ btrfs_info(info, "%s %s compression, level %d",
(compress_force) ? "force" : "use",
- compress_type);
+ compress_type, info->compress_level);
}
compress_force = false;
break;
@@ -825,6 +833,12 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
break;
#endif
+#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ case Opt_ref_verify:
+ btrfs_info(info, "doing ref verification");
+ btrfs_set_opt(info->mount_opt, REF_VERIFY);
+ break;
+#endif
case Opt_err:
btrfs_info(info, "unrecognized mount option '%s'", p);
ret = -EINVAL;
@@ -1205,8 +1219,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
* happens. The pending operations are delayed to the
* next commit after thawing.
*/
- if (__sb_start_write(sb, SB_FREEZE_WRITE, false))
- __sb_end_write(sb, SB_FREEZE_WRITE);
+ if (sb_start_write_trylock(sb))
+ sb_end_write(sb);
else
return 0;
trans = btrfs_start_transaction(root, 0);
@@ -1246,6 +1260,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_printf(seq, ",compress-force=%s", compress_type);
else
seq_printf(seq, ",compress=%s", compress_type);
+ if (info->compress_level)
+ seq_printf(seq, ":%d", info->compress_level);
}
if (btrfs_test_opt(info, NOSSD))
seq_puts(seq, ",nossd");
@@ -1305,6 +1321,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (btrfs_test_opt(info, FRAGMENT_METADATA))
seq_puts(seq, ",fragment=metadata");
#endif
+ if (btrfs_test_opt(info, REF_VERIFY))
+ seq_puts(seq, ",ref_verify");
seq_printf(seq, ",subvolid=%llu",
BTRFS_I(d_inode(dentry))->root->root_key.objectid);
seq_puts(seq, ",subvol=");
@@ -2112,7 +2130,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* succeed even if the Avail is zero. But this is better than the other
* way around.
*/
- thresh = 4 * 1024 * 1024;
+ thresh = SZ_4M;
if (!mixed && total_free_meta - thresh < block_rsv->size)
buf->f_bavail = 0;
@@ -2319,6 +2337,9 @@ static void btrfs_print_mod_info(void)
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
", integrity-checker=on"
#endif
+#ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ ", ref-verify=on"
+#endif
"\n",
btrfs_crc32c_impl());
}
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 883881b16c86..a28bba801264 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -247,7 +247,7 @@ static ssize_t global_rsv_size_show(struct kobject *kobj,
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
return btrfs_show_u64(&block_rsv->size, &block_rsv->lock, buf);
}
-BTRFS_ATTR(global_rsv_size, global_rsv_size_show);
+BTRFS_ATTR(allocation, global_rsv_size, global_rsv_size_show);
static ssize_t global_rsv_reserved_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -256,15 +256,15 @@ static ssize_t global_rsv_reserved_show(struct kobject *kobj,
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
return btrfs_show_u64(&block_rsv->reserved, &block_rsv->lock, buf);
}
-BTRFS_ATTR(global_rsv_reserved, global_rsv_reserved_show);
+BTRFS_ATTR(allocation, global_rsv_reserved, global_rsv_reserved_show);
#define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
#define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj)
static ssize_t raid_bytes_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf);
-BTRFS_RAID_ATTR(total_bytes, raid_bytes_show);
-BTRFS_RAID_ATTR(used_bytes, raid_bytes_show);
+BTRFS_ATTR(raid, total_bytes, raid_bytes_show);
+BTRFS_ATTR(raid, used_bytes, raid_bytes_show);
static ssize_t raid_bytes_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -277,7 +277,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
down_read(&sinfo->groups_sem);
list_for_each_entry(block_group, &sinfo->block_groups[index], list) {
- if (&attr->attr == BTRFS_RAID_ATTR_PTR(total_bytes))
+ if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes))
val += block_group->key.offset;
else
val += btrfs_block_group_used(&block_group->item);
@@ -287,8 +287,8 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
}
static struct attribute *raid_attributes[] = {
- BTRFS_RAID_ATTR_PTR(total_bytes),
- BTRFS_RAID_ATTR_PTR(used_bytes),
+ BTRFS_ATTR_PTR(raid, total_bytes),
+ BTRFS_ATTR_PTR(raid, used_bytes),
NULL
};
@@ -311,7 +311,7 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \
struct btrfs_space_info *sinfo = to_space_info(kobj); \
return btrfs_show_u64(&sinfo->field, &sinfo->lock, buf); \
} \
-BTRFS_ATTR(field, btrfs_space_info_show_##field)
+BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field)
static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj,
struct kobj_attribute *a,
@@ -331,19 +331,20 @@ SPACE_INFO_ATTR(bytes_may_use);
SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
-BTRFS_ATTR(total_bytes_pinned, btrfs_space_info_show_total_bytes_pinned);
+BTRFS_ATTR(space_info, total_bytes_pinned,
+ btrfs_space_info_show_total_bytes_pinned);
static struct attribute *space_info_attrs[] = {
- BTRFS_ATTR_PTR(flags),
- BTRFS_ATTR_PTR(total_bytes),
- BTRFS_ATTR_PTR(bytes_used),
- BTRFS_ATTR_PTR(bytes_pinned),
- BTRFS_ATTR_PTR(bytes_reserved),
- BTRFS_ATTR_PTR(bytes_may_use),
- BTRFS_ATTR_PTR(bytes_readonly),
- BTRFS_ATTR_PTR(disk_used),
- BTRFS_ATTR_PTR(disk_total),
- BTRFS_ATTR_PTR(total_bytes_pinned),
+ BTRFS_ATTR_PTR(space_info, flags),
+ BTRFS_ATTR_PTR(space_info, total_bytes),
+ BTRFS_ATTR_PTR(space_info, bytes_used),
+ BTRFS_ATTR_PTR(space_info, bytes_pinned),
+ BTRFS_ATTR_PTR(space_info, bytes_reserved),
+ BTRFS_ATTR_PTR(space_info, bytes_may_use),
+ BTRFS_ATTR_PTR(space_info, bytes_readonly),
+ BTRFS_ATTR_PTR(space_info, disk_used),
+ BTRFS_ATTR_PTR(space_info, disk_total),
+ BTRFS_ATTR_PTR(space_info, total_bytes_pinned),
NULL,
};
@@ -361,8 +362,8 @@ struct kobj_type space_info_ktype = {
};
static const struct attribute *allocation_attrs[] = {
- BTRFS_ATTR_PTR(global_rsv_reserved),
- BTRFS_ATTR_PTR(global_rsv_size),
+ BTRFS_ATTR_PTR(allocation, global_rsv_reserved),
+ BTRFS_ATTR_PTR(allocation, global_rsv_size),
NULL,
};
@@ -415,7 +416,7 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
return len;
}
-BTRFS_ATTR_RW(label, btrfs_label_show, btrfs_label_store);
+BTRFS_ATTR_RW(, label, btrfs_label_show, btrfs_label_store);
static ssize_t btrfs_nodesize_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -425,7 +426,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
}
-BTRFS_ATTR(nodesize, btrfs_nodesize_show);
+BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -436,7 +437,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
fs_info->super_copy->sectorsize);
}
-BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
+BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -447,7 +448,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
fs_info->super_copy->sectorsize);
}
-BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
+BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
static ssize_t quota_override_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
@@ -487,14 +488,14 @@ static ssize_t quota_override_store(struct kobject *kobj,
return len;
}
-BTRFS_ATTR_RW(quota_override, quota_override_show, quota_override_store);
+BTRFS_ATTR_RW(, quota_override, quota_override_show, quota_override_store);
static const struct attribute *btrfs_attrs[] = {
- BTRFS_ATTR_PTR(label),
- BTRFS_ATTR_PTR(nodesize),
- BTRFS_ATTR_PTR(sectorsize),
- BTRFS_ATTR_PTR(clone_alignment),
- BTRFS_ATTR_PTR(quota_override),
+ BTRFS_ATTR_PTR(, label),
+ BTRFS_ATTR_PTR(, nodesize),
+ BTRFS_ATTR_PTR(, sectorsize),
+ BTRFS_ATTR_PTR(, clone_alignment),
+ BTRFS_ATTR_PTR(, quota_override),
NULL,
};
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index 4cb908305e5d..80457f31c29f 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -21,21 +21,16 @@ enum btrfs_feature_set {
.store = _store, \
}
-#define BTRFS_ATTR_RW(_name, _show, _store) \
- static struct kobj_attribute btrfs_attr_##_name = \
+#define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \
+ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
__INIT_KOBJ_ATTR(_name, 0644, _show, _store)
-#define BTRFS_ATTR(_name, _show) \
- static struct kobj_attribute btrfs_attr_##_name = \
+#define BTRFS_ATTR(_prefix, _name, _show) \
+ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \
__INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
-#define BTRFS_ATTR_PTR(_name) (&btrfs_attr_##_name.attr)
-
-#define BTRFS_RAID_ATTR(_name, _show) \
- static struct kobj_attribute btrfs_raid_attr_##_name = \
- __INIT_KOBJ_ATTR(_name, 0444, _show, NULL)
-
-#define BTRFS_RAID_ATTR_PTR(_name) (&btrfs_raid_attr_##_name.attr)
+#define BTRFS_ATTR_PTR(_prefix, _name) \
+ (&btrfs_attr_##_prefix##_##_name.attr)
struct btrfs_feature_attr {
@@ -44,15 +39,16 @@ struct btrfs_feature_attr {
u64 feature_bit;
};
-#define BTRFS_FEAT_ATTR(_name, _feature_set, _prefix, _feature_bit) \
-static struct btrfs_feature_attr btrfs_attr_##_name = { \
+#define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \
+static struct btrfs_feature_attr btrfs_attr_features_##_name = { \
.kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO, \
btrfs_feature_attr_show, \
btrfs_feature_attr_store), \
.feature_set = _feature_set, \
- .feature_bit = _prefix ##_## _feature_bit, \
+ .feature_bit = _feature_prefix ##_## _feature_bit, \
}
-#define BTRFS_FEAT_ATTR_PTR(_name) (&btrfs_attr_##_name.kobj_attr.attr)
+#define BTRFS_FEAT_ATTR_PTR(_name) \
+ (&btrfs_attr_features_##_name.kobj_attr.attr)
#define BTRFS_FEAT_ATTR_COMPAT(name, feature) \
BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature)
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 1458bb0ea124..8444a018cca2 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -500,7 +500,8 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
path = btrfs_alloc_path();
if (!path) {
test_msg("Couldn't allocate path\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
ret = add_block_group_free_space(&trans, root->fs_info, cache);
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 8c91d03cc82d..f797642c013d 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -770,7 +770,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
offset = em->start + em->len;
free_extent_map(em);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, 4096 * 1024, 0);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, SZ_4M, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
@@ -968,7 +968,6 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
btrfs_test_inode_set_ops(inode);
/* [BTRFS_MAX_EXTENT_SIZE] */
- BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
NULL, 0);
if (ret) {
@@ -983,7 +982,6 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
- BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
NULL, 0);
@@ -1003,7 +1001,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
EXTENT_DELALLOC | EXTENT_DIRTY |
- EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
+ EXTENT_UPTODATE, 0, 0,
NULL, GFP_KERNEL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
@@ -1017,7 +1015,6 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
}
/* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
- BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1)
+ sectorsize - 1,
@@ -1035,12 +1032,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/*
* [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
- *
- * I'm artificially adding 2 to outstanding_extents because in the
- * buffered IO case we'd add things up as we go, but I don't feel like
- * doing that here, this isn't the interesting case we want to test.
*/
- BTRFS_I(inode)->outstanding_extents += 2;
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
(BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
@@ -1059,7 +1051,6 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/*
* [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
*/
- BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
@@ -1079,7 +1070,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_MAX_EXTENT_SIZE + sectorsize,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
+ EXTENT_UPTODATE, 0, 0,
NULL, GFP_KERNEL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
@@ -1096,7 +1087,6 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
* Refill the hole again just for good measure, because I thought it
* might fail and I'd rather satisfy my paranoia at this point.
*/
- BTRFS_I(inode)->outstanding_extents++;
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
@@ -1114,7 +1104,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* Empty */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
+ EXTENT_UPTODATE, 0, 0,
NULL, GFP_KERNEL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
@@ -1131,7 +1121,7 @@ out:
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
+ EXTENT_UPTODATE, 0, 0,
NULL, GFP_KERNEL);
iput(inode);
btrfs_free_dummy_root(root);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 0f4ce970d195..90204b166643 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -240,7 +240,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
* we can only call btrfs_qgroup_account_extent() directly to test
* quota.
*/
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
+ false);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
@@ -252,7 +253,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+ false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -275,7 +277,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
old_roots = NULL;
new_roots = NULL;
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
+ false);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
@@ -286,7 +289,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root,
if (ret)
return -EINVAL;
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+ false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -337,7 +341,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return ret;
}
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
+ false);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
@@ -349,7 +354,8 @@ static int test_multiple_refs(struct btrfs_root *root,
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+ false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -370,7 +376,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
+ false);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
@@ -382,7 +389,8 @@ static int test_multiple_refs(struct btrfs_root *root,
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+ false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
@@ -409,7 +417,8 @@ static int test_multiple_refs(struct btrfs_root *root,
return -EINVAL;
}
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots,
+ false);
if (ret) {
ulist_free(old_roots);
test_msg("Couldn't find old roots: %d\n", ret);
@@ -421,7 +430,8 @@ static int test_multiple_refs(struct btrfs_root *root,
if (ret)
return ret;
- ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
+ ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots,
+ false);
if (ret) {
ulist_free(old_roots);
ulist_free(new_roots);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index f615d59b0489..5a8c2649af2f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -797,8 +797,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- if (fs_info->global_block_rsv.space_info->full &&
- btrfs_check_space_for_delayed_refs(trans, fs_info))
+ if (btrfs_check_space_for_delayed_refs(trans, fs_info))
return 1;
return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
@@ -950,6 +949,7 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
u64 start = 0;
u64 end;
+ atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
mark, &cached_state)) {
bool wait_writeback = false;
@@ -985,6 +985,7 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
cond_resched();
start = end + 1;
}
+ atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
return werr;
}
@@ -1915,8 +1916,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
+ /*
+ * We use writeback_inodes_sb here because if we used
+ * btrfs_start_delalloc_roots we would deadlock with fs freeze.
+ * Currently are holding the fs freeze lock, if we do an async flush
+ * we'll do btrfs_join_transaction() and deadlock because we need to
+ * wait for the fs freeze lock. Using the direct flushing we benefit
+ * from already being in a transaction and our join_transaction doesn't
+ * have to re-take the fs freeze lock.
+ */
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- return btrfs_start_delalloc_roots(fs_info, 1, -1);
+ writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
return 0;
}
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
new file mode 100644
index 000000000000..114fc5f0ecc5
--- /dev/null
+++ b/fs/btrfs/tree-checker.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (C) Qu Wenruo 2017. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program.
+ */
+
+/*
+ * The module is used to catch unexpected/corrupted tree block data.
+ * Such behavior can be caused either by a fuzzed image or bugs.
+ *
+ * The objective is to do leaf/node validation checks when tree block is read
+ * from disk, and check *every* possible member, so other code won't
+ * need to checking them again.
+ *
+ * Due to the potential and unwanted damage, every checker needs to be
+ * carefully reviewed otherwise so it does not prevent mount of valid images.
+ */
+
+#include "ctree.h"
+#include "tree-checker.h"
+#include "disk-io.h"
+#include "compression.h"
+
+/*
+ * Error message should follow the following format:
+ * corrupt <type>: <identifier>, <reason>[, <bad_value>]
+ *
+ * @type: leaf or node
+ * @identifier: the necessary info to locate the leaf/node.
+ * It's recommened to decode key.objecitd/offset if it's
+ * meaningful.
+ * @reason: describe the error
+ * @bad_value: optional, it's recommened to output bad value and its
+ * expected value (range).
+ *
+ * Since comma is used to separate the components, only space is allowed
+ * inside each component.
+ */
+
+/*
+ * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
+ * Allows callers to customize the output.
+ */
+__printf(4, 5)
+static void generic_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ root->objectid, btrfs_header_bytenr(eb), slot, &vaf);
+ va_end(args);
+}
+
+/*
+ * Customized reporter for extent data item, since its key objectid and
+ * offset has its own meaning.
+ */
+__printf(4, 5)
+static void file_extent_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", root->objectid,
+ btrfs_header_bytenr(eb), slot, key.objectid, key.offset, &vaf);
+ va_end(args);
+}
+
+/*
+ * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
+ * Else return 1
+ */
+#define CHECK_FE_ALIGNED(root, leaf, slot, fi, name, alignment) \
+({ \
+ if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
+ file_extent_err((root), (leaf), (slot), \
+ "invalid %s for file extent, have %llu, should be aligned to %u", \
+ (#name), btrfs_file_extent_##name((leaf), (fi)), \
+ (alignment)); \
+ (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
+})
+
+static int check_extent_data_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_file_extent_item *fi;
+ u32 sectorsize = root->fs_info->sectorsize;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+
+ if (!IS_ALIGNED(key->offset, sectorsize)) {
+ file_extent_err(root, leaf, slot,
+"unaligned file_offset for file extent, have %llu should be aligned to %u",
+ key->offset, sectorsize);
+ return -EUCLEAN;
+ }
+
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
+ file_extent_err(root, leaf, slot,
+ "invalid type for file extent, have %u expect range [0, %u]",
+ btrfs_file_extent_type(leaf, fi),
+ BTRFS_FILE_EXTENT_TYPES);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Support for new compression/encrption must introduce incompat flag,
+ * and must be caught in open_ctree().
+ */
+ if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
+ file_extent_err(root, leaf, slot,
+ "invalid compression for file extent, have %u expect range [0, %u]",
+ btrfs_file_extent_compression(leaf, fi),
+ BTRFS_COMPRESS_TYPES);
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_encryption(leaf, fi)) {
+ file_extent_err(root, leaf, slot,
+ "invalid encryption for file extent, have %u expect 0",
+ btrfs_file_extent_encryption(leaf, fi));
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
+ /* Inline extent must have 0 as key offset */
+ if (key->offset) {
+ file_extent_err(root, leaf, slot,
+ "invalid file_offset for inline file extent, have %llu expect 0",
+ key->offset);
+ return -EUCLEAN;
+ }
+
+ /* Compressed inline extent has no on-disk size, skip it */
+ if (btrfs_file_extent_compression(leaf, fi) !=
+ BTRFS_COMPRESS_NONE)
+ return 0;
+
+ /* Uncompressed inline extent size must match item size */
+ if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
+ btrfs_file_extent_ram_bytes(leaf, fi)) {
+ file_extent_err(root, leaf, slot,
+ "invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
+ item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
+ btrfs_file_extent_ram_bytes(leaf, fi));
+ return -EUCLEAN;
+ }
+ return 0;
+ }
+
+ /* Regular or preallocated extent has fixed item size */
+ if (item_size != sizeof(*fi)) {
+ file_extent_err(root, leaf, slot,
+ "invalid item size for reg/prealloc file extent, have %u expect %zu",
+ item_size, sizeof(*fi));
+ return -EUCLEAN;
+ }
+ if (CHECK_FE_ALIGNED(root, leaf, slot, fi, ram_bytes, sectorsize) ||
+ CHECK_FE_ALIGNED(root, leaf, slot, fi, disk_bytenr, sectorsize) ||
+ CHECK_FE_ALIGNED(root, leaf, slot, fi, disk_num_bytes, sectorsize) ||
+ CHECK_FE_ALIGNED(root, leaf, slot, fi, offset, sectorsize) ||
+ CHECK_FE_ALIGNED(root, leaf, slot, fi, num_bytes, sectorsize))
+ return -EUCLEAN;
+ return 0;
+}
+
+static int check_csum_item(struct btrfs_root *root, struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ u32 sectorsize = root->fs_info->sectorsize;
+ u32 csumsize = btrfs_super_csum_size(root->fs_info->super_copy);
+
+ if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
+ generic_err(root, leaf, slot,
+ "invalid key objectid for csum item, have %llu expect %llu",
+ key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(key->offset, sectorsize)) {
+ generic_err(root, leaf, slot,
+ "unaligned key offset for csum item, have %llu should be aligned to %u",
+ key->offset, sectorsize);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
+ generic_err(root, leaf, slot,
+ "unaligned item size for csum item, have %u should be aligned to %u",
+ btrfs_item_size_nr(leaf, slot), csumsize);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+/*
+ * Common point to switch the item-specific validation.
+ */
+static int check_leaf_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ int ret = 0;
+
+ switch (key->type) {
+ case BTRFS_EXTENT_DATA_KEY:
+ ret = check_extent_data_item(root, leaf, key, slot);
+ break;
+ case BTRFS_EXTENT_CSUM_KEY:
+ ret = check_csum_item(root, leaf, key, slot);
+ break;
+ }
+ return ret;
+}
+
+int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ /* No valid key type is 0, so all key should be larger than this key */
+ struct btrfs_key prev_key = {0, 0, 0};
+ struct btrfs_key key;
+ u32 nritems = btrfs_header_nritems(leaf);
+ int slot;
+
+ /*
+ * Extent buffers from a relocation tree have a owner field that
+ * corresponds to the subvolume tree they are based on. So just from an
+ * extent buffer alone we can not find out what is the id of the
+ * corresponding subvolume tree, so we can not figure out if the extent
+ * buffer corresponds to the root of the relocation tree or not. So
+ * skip this check for relocation trees.
+ */
+ if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
+ struct btrfs_root *check_root;
+
+ key.objectid = btrfs_header_owner(leaf);
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ check_root = btrfs_get_fs_root(fs_info, &key, false);
+ /*
+ * The only reason we also check NULL here is that during
+ * open_ctree() some roots has not yet been set up.
+ */
+ if (!IS_ERR_OR_NULL(check_root)) {
+ struct extent_buffer *eb;
+
+ eb = btrfs_root_node(check_root);
+ /* if leaf is the root, then it's fine */
+ if (leaf != eb) {
+ generic_err(check_root, leaf, 0,
+ "invalid nritems, have %u should not be 0 for non-root leaf",
+ nritems);
+ free_extent_buffer(eb);
+ return -EUCLEAN;
+ }
+ free_extent_buffer(eb);
+ }
+ return 0;
+ }
+
+ if (nritems == 0)
+ return 0;
+
+ /*
+ * Check the following things to make sure this is a good leaf, and
+ * leaf users won't need to bother with similar sanity checks:
+ *
+ * 1) key ordering
+ * 2) item offset and size
+ * No overlap, no hole, all inside the leaf.
+ * 3) item content
+ * If possible, do comprehensive sanity check.
+ * NOTE: All checks must only rely on the item data itself.
+ */
+ for (slot = 0; slot < nritems; slot++) {
+ u32 item_end_expected;
+ int ret;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
+ /* Make sure the keys are in the right order */
+ if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
+ generic_err(root, leaf, slot,
+ "bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
+ prev_key.objectid, prev_key.type,
+ prev_key.offset, key.objectid, key.type,
+ key.offset);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Make sure the offset and ends are right, remember that the
+ * item data starts at the end of the leaf and grows towards the
+ * front.
+ */
+ if (slot == 0)
+ item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
+ else
+ item_end_expected = btrfs_item_offset_nr(leaf,
+ slot - 1);
+ if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
+ generic_err(root, leaf, slot,
+ "unexpected item end, have %u expect %u",
+ btrfs_item_end_nr(leaf, slot),
+ item_end_expected);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Check to make sure that we don't point outside of the leaf,
+ * just in case all the items are consistent to each other, but
+ * all point outside of the leaf.
+ */
+ if (btrfs_item_end_nr(leaf, slot) >
+ BTRFS_LEAF_DATA_SIZE(fs_info)) {
+ generic_err(root, leaf, slot,
+ "slot end outside of leaf, have %u expect range [0, %u]",
+ btrfs_item_end_nr(leaf, slot),
+ BTRFS_LEAF_DATA_SIZE(fs_info));
+ return -EUCLEAN;
+ }
+
+ /* Also check if the item pointer overlaps with btrfs item. */
+ if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
+ btrfs_item_ptr_offset(leaf, slot)) {
+ generic_err(root, leaf, slot,
+ "slot overlaps with its data, item end %lu data start %lu",
+ btrfs_item_nr_offset(slot) +
+ sizeof(struct btrfs_item),
+ btrfs_item_ptr_offset(leaf, slot));
+ return -EUCLEAN;
+ }
+
+ /* Check if the item size and content meet other criteria */
+ ret = check_leaf_item(root, leaf, &key, slot);
+ if (ret < 0)
+ return ret;
+
+ prev_key.objectid = key.objectid;
+ prev_key.type = key.type;
+ prev_key.offset = key.offset;
+ }
+
+ return 0;
+}
+
+int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
+{
+ unsigned long nr = btrfs_header_nritems(node);
+ struct btrfs_key key, next_key;
+ int slot;
+ u64 bytenr;
+ int ret = 0;
+
+ if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
+ btrfs_crit(root->fs_info,
+"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
+ root->objectid, node->start,
+ nr == 0 ? "small" : "large", nr,
+ BTRFS_NODEPTRS_PER_BLOCK(root->fs_info));
+ return -EUCLEAN;
+ }
+
+ for (slot = 0; slot < nr - 1; slot++) {
+ bytenr = btrfs_node_blockptr(node, slot);
+ btrfs_node_key_to_cpu(node, &key, slot);
+ btrfs_node_key_to_cpu(node, &next_key, slot + 1);
+
+ if (!bytenr) {
+ generic_err(root, node, slot,
+ "invalid NULL node pointer");
+ ret = -EUCLEAN;
+ goto out;
+ }
+ if (!IS_ALIGNED(bytenr, root->fs_info->sectorsize)) {
+ generic_err(root, node, slot,
+ "unaligned pointer, have %llu should be aligned to %u",
+ bytenr, root->fs_info->sectorsize);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
+ generic_err(root, node, slot,
+ "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
+ key.objectid, key.type, key.offset,
+ next_key.objectid, next_key.type,
+ next_key.offset);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
new file mode 100644
index 000000000000..96c486e95d70
--- /dev/null
+++ b/fs/btrfs/tree-checker.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) Qu Wenruo 2017. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program.
+ */
+
+#ifndef __BTRFS_TREE_CHECKER__
+#define __BTRFS_TREE_CHECKER__
+
+#include "ctree.h"
+#include "extent_io.h"
+
+int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf);
+int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
+
+#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c800d067fcbf..aa7c71cff575 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -717,7 +717,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
ins.offset);
if (ret == 0) {
- ret = btrfs_inc_extent_ref(trans, fs_info,
+ ret = btrfs_inc_extent_ref(trans, root,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset);
@@ -2699,34 +2699,36 @@ static void wait_log_commit(struct btrfs_root *root, int transid)
* so we know that if ours is more than 2 older than the
* current transaction, we're done
*/
- do {
+ for (;;) {
prepare_to_wait(&root->log_commit_wait[index],
&wait, TASK_UNINTERRUPTIBLE);
- mutex_unlock(&root->log_mutex);
- if (root->log_transid_committed < transid &&
- atomic_read(&root->log_commit[index]))
- schedule();
+ if (!(root->log_transid_committed < transid &&
+ atomic_read(&root->log_commit[index])))
+ break;
- finish_wait(&root->log_commit_wait[index], &wait);
+ mutex_unlock(&root->log_mutex);
+ schedule();
mutex_lock(&root->log_mutex);
- } while (root->log_transid_committed < transid &&
- atomic_read(&root->log_commit[index]));
+ }
+ finish_wait(&root->log_commit_wait[index], &wait);
}
static void wait_for_writer(struct btrfs_root *root)
{
DEFINE_WAIT(wait);
- while (atomic_read(&root->log_writers)) {
- prepare_to_wait(&root->log_writer_wait,
- &wait, TASK_UNINTERRUPTIBLE);
+ for (;;) {
+ prepare_to_wait(&root->log_writer_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (!atomic_read(&root->log_writers))
+ break;
+
mutex_unlock(&root->log_mutex);
- if (atomic_read(&root->log_writers))
- schedule();
- finish_wait(&root->log_writer_wait, &wait);
+ schedule();
mutex_lock(&root->log_mutex);
}
+ finish_wait(&root->log_writer_wait, &wait);
}
static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
@@ -4645,7 +4647,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct btrfs_key min_key;
struct btrfs_key max_key;
struct btrfs_root *log = root->log_root;
- struct extent_buffer *src = NULL;
LIST_HEAD(logged_list);
u64 last_extent = 0;
int err = 0;
@@ -4888,7 +4889,6 @@ again:
goto next_slot;
}
- src = path->nodes[0];
if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
ins_nr++;
goto next_slot;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b39737568c22..f1ecb938ba4d 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -360,7 +360,6 @@ static noinline void run_scheduled_bios(struct btrfs_device *device)
int again = 0;
unsigned long num_run;
unsigned long batch_run = 0;
- unsigned long limit;
unsigned long last_waited = 0;
int force_reg = 0;
int sync_pending = 0;
@@ -375,8 +374,6 @@ static noinline void run_scheduled_bios(struct btrfs_device *device)
blk_start_plug(&plug);
bdi = device->bdev->bd_bdi;
- limit = btrfs_async_submit_limit(fs_info);
- limit = limit * 2 / 3;
loop:
spin_lock(&device->io_lock);
@@ -443,13 +440,6 @@ loop_lock:
pending = pending->bi_next;
cur->bi_next = NULL;
- /*
- * atomic_dec_return implies a barrier for waitqueue_active
- */
- if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
- waitqueue_active(&fs_info->async_submit_wait))
- wake_up(&fs_info->async_submit_wait);
-
BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
/*
@@ -517,12 +507,6 @@ loop_lock:
&device->work);
goto done;
}
- /* unplug every 64 requests just for good measure */
- if (batch_run % 64 == 0) {
- blk_finish_plug(&plug);
- blk_start_plug(&plug);
- sync_pending = 0;
- }
}
cond_resched();
@@ -547,7 +531,7 @@ static void pending_bios_fn(struct btrfs_work *work)
}
-void btrfs_free_stale_device(struct btrfs_device *cur_dev)
+static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
{
struct btrfs_fs_devices *fs_devs;
struct btrfs_device *dev;
@@ -1068,14 +1052,15 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
return ret;
}
-void btrfs_release_disk_super(struct page *page)
+static void btrfs_release_disk_super(struct page *page)
{
kunmap(page);
put_page(page);
}
-int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
- struct page **page, struct btrfs_super_block **disk_super)
+static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
+ struct page **page,
+ struct btrfs_super_block **disk_super)
{
void *p;
pgoff_t index;
@@ -1817,8 +1802,8 @@ static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
return 0;
}
-struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs,
- struct btrfs_device *device)
+static struct btrfs_device * btrfs_find_next_active_device(
+ struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
{
struct btrfs_device *next_device;
@@ -2031,19 +2016,20 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
}
btrfs_close_bdev(srcdev);
-
call_rcu(&srcdev->rcu, free_device);
- /*
- * unless fs_devices is seed fs, num_devices shouldn't go
- * zero
- */
- BUG_ON(!fs_devices->num_devices && !fs_devices->seeding);
-
/* if this is no devs we rather delete the fs_devices */
if (!fs_devices->num_devices) {
struct btrfs_fs_devices *tmp_fs_devices;
+ /*
+ * On a mounted FS, num_devices can't be zero unless it's a
+ * seed. In case of a seed device being replaced, the replace
+ * target added to the sprout FS, so there will be no more
+ * device left under the seed FS.
+ */
+ ASSERT(fs_devices->seeding);
+
tmp_fs_devices = fs_info->fs_devices;
while (tmp_fs_devices) {
if (tmp_fs_devices->seed == fs_devices) {
@@ -2323,6 +2309,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
u64 tmp;
int seeding_dev = 0;
int ret = 0;
+ bool unlocked = false;
if (sb_rdonly(sb) && !fs_info->fs_devices->seeding)
return -EROFS;
@@ -2399,7 +2386,10 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (seeding_dev) {
sb->s_flags &= ~MS_RDONLY;
ret = btrfs_prepare_sprout(fs_info);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto error_trans;
+ }
}
device->fs_devices = fs_info->fs_devices;
@@ -2445,14 +2435,14 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
mutex_unlock(&fs_info->chunk_mutex);
if (ret) {
btrfs_abort_transaction(trans, ret);
- goto error_trans;
+ goto error_sysfs;
}
}
ret = btrfs_add_device(trans, fs_info, device);
if (ret) {
btrfs_abort_transaction(trans, ret);
- goto error_trans;
+ goto error_sysfs;
}
if (seeding_dev) {
@@ -2461,7 +2451,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
ret = btrfs_finish_sprout(trans, fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
- goto error_trans;
+ goto error_sysfs;
}
/* Sprouting would change fsid of the mounted root,
@@ -2479,6 +2469,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
+ unlocked = true;
if (ret) /* transaction commit */
return ret;
@@ -2491,7 +2482,9 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (IS_ERR(trans)) {
if (PTR_ERR(trans) == -ENOENT)
return 0;
- return PTR_ERR(trans);
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto error_sysfs;
}
ret = btrfs_commit_transaction(trans);
}
@@ -2500,14 +2493,18 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
update_dev_time(device_path);
return ret;
+error_sysfs:
+ btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
error_trans:
- btrfs_end_transaction(trans);
+ if (seeding_dev)
+ sb->s_flags |= MS_RDONLY;
+ if (trans)
+ btrfs_end_transaction(trans);
rcu_string_free(device->name);
- btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
kfree(device);
error:
blkdev_put(bdev, FMODE_EXCL);
- if (seeding_dev) {
+ if (seeding_dev && !unlocked) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
}
@@ -4813,16 +4810,16 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
em_tree = &info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
- if (!ret) {
- list_add_tail(&em->list, &trans->transaction->pending_chunks);
- refcount_inc(&em->refs);
- }
- write_unlock(&em_tree->lock);
if (ret) {
+ write_unlock(&em_tree->lock);
free_extent_map(em);
goto error;
}
+ list_add_tail(&em->list, &trans->transaction->pending_chunks);
+ refcount_inc(&em->refs);
+ write_unlock(&em_tree->lock);
+
ret = btrfs_make_block_group(trans, info, 0, type, start, num_bytes);
if (ret)
goto error_del_extent;
@@ -5695,10 +5692,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
- if (op != BTRFS_MAP_WRITE && op != BTRFS_MAP_GET_READ_MIRRORS)
+ if (!need_full_stripe(op))
mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
+ if (need_full_stripe(op))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -5711,7 +5708,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS) {
+ if (need_full_stripe(op)) {
num_stripes = map->num_stripes;
} else if (mirror_num) {
stripe_index = mirror_num - 1;
@@ -5725,7 +5722,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= map->sub_stripes;
- if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
+ if (need_full_stripe(op))
num_stripes = map->sub_stripes;
else if (mirror_num)
stripe_index += mirror_num - 1;
@@ -5740,9 +5737,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- if (need_raid_map &&
- (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS ||
- mirror_num > 1)) {
+ if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */
stripe_nr = div64_u64(raid56_full_stripe_start,
stripe_len * nr_data_stripes(map));
@@ -5769,9 +5764,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
/* We distribute the parity blocks across stripes */
div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
&stripe_index);
- if ((op != BTRFS_MAP_WRITE &&
- op != BTRFS_MAP_GET_READ_MIRRORS) &&
- mirror_num <= 1)
+ if (!need_full_stripe(op) && mirror_num <= 1)
mirror_num = 1;
}
} else {
@@ -6033,7 +6026,7 @@ static void btrfs_end_bio(struct bio *bio)
* this bio is actually up to date, we didn't
* go over the max number of errors
*/
- bio->bi_status = 0;
+ bio->bi_status = BLK_STS_OK;
}
btrfs_end_bbio(bbio, bio);
@@ -6069,13 +6062,6 @@ static noinline void btrfs_schedule_bio(struct btrfs_device *device,
return;
}
- /*
- * nr_async_bios allows us to reliably return congestion to the
- * higher layers. Otherwise, the async bio makes it appear we have
- * made progress against dirty pages when we've really just put it
- * on a queue for later
- */
- atomic_inc(&fs_info->nr_async_bios);
WARN_ON(bio->bi_next);
bio->bi_next = NULL;
@@ -6144,7 +6130,10 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio->bi_iter.bi_sector = logical >> 9;
- bio->bi_status = BLK_STS_IOERR;
+ if (atomic_read(&bbio->error) > bbio->max_errors)
+ bio->bi_status = BLK_STS_IOERR;
+ else
+ bio->bi_status = BLK_STS_OK;
btrfs_end_bbio(bbio, bio);
}
}
@@ -6249,7 +6238,7 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
device = btrfs_alloc_device(NULL, &devid, dev_uuid);
if (IS_ERR(device))
- return NULL;
+ return device;
list_add(&device->dev_list, &fs_devices->devices);
device->fs_devices = fs_devices;
@@ -6377,6 +6366,17 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
return 0;
}
+static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
+ u64 devid, u8 *uuid, bool error)
+{
+ if (error)
+ btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
+ devid, uuid);
+ else
+ btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
+ devid, uuid);
+}
+
static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
@@ -6447,18 +6447,21 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
- btrfs_report_missing_device(fs_info, devid, uuid);
- return -EIO;
+ btrfs_report_missing_device(fs_info, devid, uuid, true);
+ return -ENOENT;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
add_missing_dev(fs_info->fs_devices, devid,
uuid);
- if (!map->stripes[i].dev) {
+ if (IS_ERR(map->stripes[i].dev)) {
free_extent_map(em);
- return -EIO;
+ btrfs_err(fs_info,
+ "failed to init missing dev %llu: %ld",
+ devid, PTR_ERR(map->stripes[i].dev));
+ return PTR_ERR(map->stripes[i].dev);
}
- btrfs_report_missing_device(fs_info, devid, uuid);
+ btrfs_report_missing_device(fs_info, devid, uuid, false);
}
map->stripes[i].dev->in_fs_metadata = 1;
}
@@ -6577,19 +6580,28 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
if (!device) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
- btrfs_report_missing_device(fs_info, devid, dev_uuid);
- return -EIO;
+ btrfs_report_missing_device(fs_info, devid,
+ dev_uuid, true);
+ return -ENOENT;
}
device = add_missing_dev(fs_devices, devid, dev_uuid);
- if (!device)
- return -ENOMEM;
- btrfs_report_missing_device(fs_info, devid, dev_uuid);
+ if (IS_ERR(device)) {
+ btrfs_err(fs_info,
+ "failed to add missing dev %llu: %ld",
+ devid, PTR_ERR(device));
+ return PTR_ERR(device);
+ }
+ btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
} else {
if (!device->bdev) {
- btrfs_report_missing_device(fs_info, devid, dev_uuid);
- if (!btrfs_test_opt(fs_info, DEGRADED))
- return -EIO;
+ if (!btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_report_missing_device(fs_info,
+ devid, dev_uuid, true);
+ return -ENOENT;
+ }
+ btrfs_report_missing_device(fs_info, devid,
+ dev_uuid, false);
}
if(!device->bdev && !device->missing) {
@@ -6756,12 +6768,6 @@ out_short_read:
return -EIO;
}
-void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, u64 devid,
- u8 *uuid)
-{
- btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing", devid, uuid);
-}
-
/*
* Check if all chunks in the fs are OK for read-write degraded mount
*
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 6108fdfec67f..ff15208344a7 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -542,7 +542,5 @@ void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info);
-void btrfs_report_missing_device(struct btrfs_fs_info *fs_info, u64 devid,
- u8 *uuid);
#endif
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index c248f9286366..2b52950dc2c6 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -37,6 +37,7 @@ struct workspace {
z_stream strm;
char *buf;
struct list_head list;
+ int level;
};
static void zlib_free_workspace(struct list_head *ws)
@@ -96,7 +97,7 @@ static int zlib_compress_pages(struct list_head *ws,
*total_out = 0;
*total_in = 0;
- if (Z_OK != zlib_deflateInit(&workspace->strm, 3)) {
+ if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
pr_warn("BTRFS: deflateInit failed\n");
ret = -EIO;
goto out;
@@ -402,10 +403,22 @@ next:
return ret;
}
+static void zlib_set_level(struct list_head *ws, unsigned int type)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ unsigned level = (type & 0xF0) >> 4;
+
+ if (level > 9)
+ level = 9;
+
+ workspace->level = level > 0 ? level : 3;
+}
+
const struct btrfs_compress_op btrfs_zlib_compress = {
.alloc_workspace = zlib_alloc_workspace,
.free_workspace = zlib_free_workspace,
.compress_pages = zlib_compress_pages,
.decompress_bio = zlib_decompress_bio,
.decompress = zlib_decompress,
+ .set_level = zlib_set_level,
};
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 607ce47b483a..17f2dd8fddb8 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -423,10 +423,15 @@ finish:
return ret;
}
+static void zstd_set_level(struct list_head *ws, unsigned int type)
+{
+}
+
const struct btrfs_compress_op btrfs_zstd_compress = {
.alloc_workspace = zstd_alloc_workspace,
.free_workspace = zstd_free_workspace,
.compress_pages = zstd_compress_pages,
.decompress_bio = zstd_decompress_bio,
.decompress = zstd_decompress,
+ .set_level = zstd_set_level,
};
diff --git a/fs/buffer.c b/fs/buffer.c
index 170df856bdb9..0736a6a2e2f0 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -253,27 +253,6 @@ out:
}
/*
- * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
- */
-static void free_more_memory(void)
-{
- struct zoneref *z;
- int nid;
-
- wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
- yield();
-
- for_each_online_node(nid) {
-
- z = first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
- gfp_zone(GFP_NOFS), NULL);
- if (z->zone)
- try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
- GFP_NOFS, NULL);
- }
-}
-
-/*
* I/O completion handler for block_read_full_page() - pages
* which come unlocked at the end of I/O.
*/
@@ -861,16 +840,19 @@ int remove_inode_buffers(struct inode *inode)
* which may not fail from ordinary buffer allocations.
*/
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- int retry)
+ bool retry)
{
struct buffer_head *bh, *head;
+ gfp_t gfp = GFP_NOFS;
long offset;
-try_again:
+ if (retry)
+ gfp |= __GFP_NOFAIL;
+
head = NULL;
offset = PAGE_SIZE;
while ((offset -= size) >= 0) {
- bh = alloc_buffer_head(GFP_NOFS);
+ bh = alloc_buffer_head(gfp);
if (!bh)
goto no_grow;
@@ -896,23 +878,7 @@ no_grow:
} while (head);
}
- /*
- * Return failure for non-async IO requests. Async IO requests
- * are not allowed to fail, so we have to wait until buffer heads
- * become available. But we don't want tasks sleeping with
- * partially complete buffers, so all were released above.
- */
- if (!retry)
- return NULL;
-
- /* We're _really_ low on memory. Now we just
- * wait for old buffer heads to become free due to
- * finishing IO. Since this is an async request and
- * the reserve list is empty, we're sure there are
- * async buffer heads in use.
- */
- free_more_memory();
- goto try_again;
+ return NULL;
}
EXPORT_SYMBOL_GPL(alloc_page_buffers);
@@ -1001,8 +967,6 @@ grow_dev_page(struct block_device *bdev, sector_t block,
gfp_mask |= __GFP_NOFAIL;
page = find_or_create_page(inode->i_mapping, index, gfp_mask);
- if (!page)
- return ret;
BUG_ON(!PageLocked(page));
@@ -1021,9 +985,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
/*
* Allocate some buffers for this page
*/
- bh = alloc_page_buffers(page, size, 0);
- if (!bh)
- goto failed;
+ bh = alloc_page_buffers(page, size, true);
/*
* Link the page to the buffers and initialise them. Take the
@@ -1103,8 +1065,6 @@ __getblk_slow(struct block_device *bdev, sector_t block,
ret = grow_buffers(bdev, block, size, gfp);
if (ret < 0)
return NULL;
- if (ret == 0)
- free_more_memory();
}
}
@@ -1575,7 +1535,7 @@ void create_empty_buffers(struct page *page,
{
struct buffer_head *bh, *head, *tail;
- head = alloc_page_buffers(page, blocksize, 1);
+ head = alloc_page_buffers(page, blocksize, true);
bh = head;
do {
bh->b_state |= b_state;
@@ -1632,7 +1592,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
struct buffer_head *head;
end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
count = pagevec_count(&pvec);
for (i = 0; i < count; i++) {
@@ -1692,7 +1652,8 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
+ create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
+ b_state);
return page_buffers(page);
}
@@ -1978,8 +1939,8 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
case IOMAP_MAPPED:
if (offset >= i_size_read(inode))
set_buffer_new(bh);
- bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) +
- ((offset - iomap->offset) >> inode->i_blkbits);
+ bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
+ inode->i_blkbits;
set_buffer_mapped(bh);
break;
}
@@ -2638,7 +2599,7 @@ int nobh_write_begin(struct address_space *mapping,
* Be careful: the buffer linked list is a NULL terminated one, rather
* than the circular one we're used to.
*/
- head = alloc_page_buffers(page, blocksize, 0);
+ head = alloc_page_buffers(page, blocksize, false);
if (!head) {
ret = -ENOMEM;
goto out_release;
@@ -3055,8 +3016,16 @@ void guard_bio_eod(int op, struct bio *bio)
sector_t maxsector;
struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
unsigned truncated_bytes;
+ struct hd_struct *part;
+
+ rcu_read_lock();
+ part = __disk_get_part(bio->bi_disk, bio->bi_partno);
+ if (part)
+ maxsector = part_nr_sects_read(part);
+ else
+ maxsector = get_capacity(bio->bi_disk);
+ rcu_read_unlock();
- maxsector = get_capacity(bio->bi_disk);
if (!maxsector)
return;
@@ -3545,7 +3514,7 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
if (length <= 0)
return -ENOENT;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
do {
unsigned nr_pages, i;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 18d7aa61ef0f..883bc7bb12c5 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -256,8 +256,7 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
goto backing_page_already_present;
if (!newpage) {
- newpage = __page_cache_alloc(cachefiles_gfp |
- __GFP_COLD);
+ newpage = __page_cache_alloc(cachefiles_gfp);
if (!newpage)
goto nomem_monitor;
}
@@ -493,8 +492,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
goto backing_page_already_present;
if (!newpage) {
- newpage = __page_cache_alloc(cachefiles_gfp |
- __GFP_COLD);
+ newpage = __page_cache_alloc(cachefiles_gfp);
if (!newpage)
goto nomem;
}
@@ -710,7 +708,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
/* calculate the shift required to use bmap */
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
- pagevec_init(&pagevec, 0);
+ pagevec_init(&pagevec);
op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
op->op.flags |= FSCACHE_OP_ASYNC;
@@ -844,7 +842,7 @@ int cachefiles_allocate_pages(struct fscache_retrieval *op,
ret = cachefiles_has_space(cache, 0, *nr_pages);
if (ret == 0) {
- pagevec_init(&pagevec, 0);
+ pagevec_init(&pagevec);
list_for_each_entry(page, pages, lru) {
if (pagevec_add(&pagevec, page) == 0)
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 4d622654bfbc..dbf07051aacd 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -680,7 +680,7 @@ static void ceph_release_pages(struct page **pages, int num)
struct pagevec pvec;
int i;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
for (i = 0; i < num; i++) {
if (pagevec_add(&pvec, pages[i]) == 0)
pagevec_release(&pvec);
@@ -811,7 +811,7 @@ static int ceph_writepages_start(struct address_space *mapping,
if (fsc->mount_options->wsize < wsize)
wsize = fsc->mount_options->wsize;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
index = start_index;
@@ -870,15 +870,10 @@ retry:
max_pages = wsize >> PAGE_SHIFT;
get_more_pages:
- pvec_pages = min_t(unsigned, PAGEVEC_SIZE,
- max_pages - locked_pages);
- if (end - index < (u64)(pvec_pages - 1))
- pvec_pages = (unsigned)(end - index) + 1;
-
- pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY,
- pvec_pages);
- dout("pagevec_lookup_tag got %d\n", pvec_pages);
+ pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index,
+ end, PAGECACHE_TAG_DIRTY,
+ max_pages - locked_pages);
+ dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
if (!pvec_pages && !locked_pages)
break;
for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
@@ -896,16 +891,6 @@ get_more_pages:
unlock_page(page);
continue;
}
- if (page->index > end) {
- dout("end of range %p\n", page);
- /* can't be range_cyclic (1st pass) because
- * end == -1 in that case. */
- stop = true;
- if (ceph_wbc.head_snapc)
- done = true;
- unlock_page(page);
- break;
- }
if (strip_unit_end && (page->index > strip_unit_end)) {
dout("end of strip unit %p\n", page);
unlock_page(page);
@@ -1177,8 +1162,7 @@ release_pvec_pages:
index = 0;
while ((index <= end) &&
(nr = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_WRITEBACK,
- PAGEVEC_SIZE))) {
+ PAGECACHE_TAG_WRITEBACK))) {
for (i = 0; i < nr; i++) {
page = pvec.pages[i];
if (page_snap_context(page) != snapc)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 92fdf9c35de2..df9f682708c6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1963,8 +1963,6 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
pgoff_t end, pgoff_t *index,
unsigned int *found_pages)
{
- unsigned int nr_pages;
- struct page **pages;
struct cifs_writedata *wdata;
wdata = cifs_writedata_alloc((unsigned int)tofind,
@@ -1972,23 +1970,8 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
if (!wdata)
return NULL;
- /*
- * find_get_pages_tag seems to return a max of 256 on each
- * iteration, so we must call it several times in order to
- * fill the array or the wsize is effectively limited to
- * 256 * PAGE_SIZE.
- */
- *found_pages = 0;
- pages = wdata->pages;
- do {
- nr_pages = find_get_pages_tag(mapping, index,
- PAGECACHE_TAG_DIRTY, tofind,
- pages);
- *found_pages += nr_pages;
- tofind -= nr_pages;
- pages += nr_pages;
- } while (nr_pages && tofind && *index <= end);
-
+ *found_pages = find_get_pages_range_tag(mapping, index, end,
+ PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
return wdata;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index bdb963d0ba32..e06740436b92 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2087,22 +2087,6 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
return sg;
}
-struct cifs_crypt_result {
- int err;
- struct completion completion;
-};
-
-static void cifs_crypt_complete(struct crypto_async_request *req, int err)
-{
- struct cifs_crypt_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int
smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
{
@@ -2143,12 +2127,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
struct aead_request *req;
char *iv;
unsigned int iv_len;
- struct cifs_crypt_result result = {0, };
+ DECLARE_CRYPTO_WAIT(wait);
struct crypto_aead *tfm;
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
- init_completion(&result.completion);
-
rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
if (rc) {
cifs_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
@@ -2208,14 +2190,10 @@ crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
aead_request_set_ad(req, assoc_data_len);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- cifs_crypt_complete, &result);
+ crypto_req_done, &wait);
- rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
-
- if (rc == -EINPROGRESS || rc == -EBUSY) {
- wait_for_completion(&result.completion);
- rc = result.err;
- }
+ rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 56fb26127fef..577cff24707b 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -584,7 +584,7 @@ static void detach_attrs(struct config_item * item)
static int populate_attrs(struct config_item *item)
{
- struct config_item_type *t = item->ci_type;
+ const struct config_item_type *t = item->ci_type;
struct configfs_attribute *attr;
struct configfs_bin_attribute *bin_attr;
int error = 0;
@@ -901,7 +901,7 @@ static void configfs_detach_group(struct config_item *item)
static void client_disconnect_notify(struct config_item *parent_item,
struct config_item *item)
{
- struct config_item_type *type;
+ const struct config_item_type *type;
type = parent_item->ci_type;
BUG_ON(!type);
@@ -920,7 +920,7 @@ static void client_disconnect_notify(struct config_item *parent_item,
static void client_drop_item(struct config_item *parent_item,
struct config_item *item)
{
- struct config_item_type *type;
+ const struct config_item_type *type;
type = parent_item->ci_type;
BUG_ON(!type);
@@ -1260,7 +1260,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct config_item *parent_item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
- struct config_item_type *type;
+ const struct config_item_type *type;
struct module *subsys_owner = NULL, *new_item_owner = NULL;
char *name;
@@ -1810,7 +1810,7 @@ EXPORT_SYMBOL(configfs_unregister_group);
struct config_group *
configfs_register_default_group(struct config_group *parent_group,
const char *name,
- struct config_item_type *item_type)
+ const struct config_item_type *item_type)
{
int ret;
struct config_group *group;
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 39da1103d341..62580dba3552 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -166,7 +166,7 @@ configfs_read_bin_file(struct file *file, char __user *buf,
retval = -ETXTBSY;
goto out;
}
- buffer->read_in_progress = 1;
+ buffer->read_in_progress = true;
if (buffer->needs_read_fill) {
/* perform first read with buf == NULL to get extent */
@@ -325,7 +325,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
len = -ETXTBSY;
goto out;
}
- buffer->write_in_progress = 1;
+ buffer->write_in_progress = true;
/* buffer grows? */
if (*ppos + count > buffer->bin_buffer_size) {
@@ -429,8 +429,8 @@ static int check_perm(struct inode * inode, struct file * file, int type)
}
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
- buffer->read_in_progress = 0;
- buffer->write_in_progress = 0;
+ buffer->read_in_progress = false;
+ buffer->write_in_progress = false;
buffer->ops = ops;
file->private_data = buffer;
goto Done;
@@ -488,10 +488,10 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
ssize_t len = 0;
int ret;
- buffer->read_in_progress = 0;
+ buffer->read_in_progress = false;
if (buffer->write_in_progress) {
- buffer->write_in_progress = 0;
+ buffer->write_in_progress = false;
len = bin_attr->write(item, buffer->bin_buffer,
buffer->bin_buffer_size);
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index a66f6624d899..88f266efc09b 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -113,7 +113,7 @@ EXPORT_SYMBOL(config_item_set_name);
void config_item_init_type_name(struct config_item *item,
const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
config_item_set_name(item, "%s", name);
item->ci_type = type;
@@ -122,7 +122,7 @@ void config_item_init_type_name(struct config_item *item,
EXPORT_SYMBOL(config_item_init_type_name);
void config_group_init_type_name(struct config_group *group, const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
config_item_set_name(&group->cg_item, "%s", name);
group->cg_item.ci_type = type;
@@ -148,7 +148,7 @@ EXPORT_SYMBOL(config_item_get_unless_zero);
static void config_item_cleanup(struct config_item *item)
{
- struct config_item_type *t = item->ci_type;
+ const struct config_item_type *t = item->ci_type;
struct config_group *s = item->ci_group;
struct config_item *parent = item->ci_parent;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index c8aabba502f6..78ffc2699993 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -138,7 +138,7 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
struct configfs_dirent *sd;
struct config_item *parent_item;
struct config_item *target_item = NULL;
- struct config_item_type *type;
+ const struct config_item_type *type;
sd = dentry->d_parent->d_fsdata;
/*
@@ -186,7 +186,7 @@ int configfs_unlink(struct inode *dir, struct dentry *dentry)
struct configfs_dirent *sd = dentry->d_fsdata;
struct configfs_symlink *sl;
struct config_item *parent_item;
- struct config_item_type *type;
+ const struct config_item_type *type;
int ret;
ret = -EPERM; /* What lack-of-symlink returns */
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index 9f6607f17b53..cb496989a6b6 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
-fscrypto-y := crypto.o fname.o policy.o keyinfo.o
+fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
fscrypto-$(CONFIG_BLOCK) += bio.o
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index c7835df7e7b8..732a786cce9d 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -126,21 +126,6 @@ struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
}
EXPORT_SYMBOL(fscrypt_get_ctx);
-/**
- * page_crypt_complete() - completion callback for page crypto
- * @req: The asynchronous cipher request context
- * @res: The result of the cipher operation
- */
-static void page_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct fscrypt_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
u64 lblk_num, struct page *src_page,
struct page *dest_page, unsigned int len,
@@ -151,7 +136,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
u8 padding[FS_IV_SIZE - sizeof(__le64)];
} iv;
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
@@ -179,7 +164,7 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- page_crypt_complete, &ecr);
+ crypto_req_done, &wait);
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
@@ -187,14 +172,9 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
sg_set_page(&src, src_page, len, offs);
skcipher_request_set_crypt(req, &src, &dst, len, &iv);
if (rw == FS_DECRYPT)
- res = crypto_skcipher_decrypt(req);
+ res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
else
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res) {
printk_ratelimited(KERN_ERR
@@ -340,7 +320,7 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
return -ECHILD;
dir = dget_parent(dentry);
- if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+ if (!IS_ENCRYPTED(d_inode(dir))) {
dput(dir);
return 0;
}
@@ -410,11 +390,8 @@ int fscrypt_initialize(unsigned int cop_flags)
{
int i, res = -ENOMEM;
- /*
- * No need to allocate a bounce page pool if there already is one or
- * this FS won't use it.
- */
- if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
+ /* No need to allocate a bounce page pool if this FS won't use it. */
+ if (cop_flags & FS_CFLG_OWN_PAGES)
return 0;
mutex_lock(&fscrypt_init_mutex);
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 8606da1df0aa..305541bcd108 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -16,21 +16,6 @@
#include "fscrypt_private.h"
/**
- * fname_crypt_complete() - completion callback for filename crypto
- * @req: The asynchronous cipher request context
- * @res: The result of the cipher operation
- */
-static void fname_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct fscrypt_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-/**
* fname_encrypt() - encrypt a filename
*
* The caller must have allocated sufficient memory for the @oname string.
@@ -41,7 +26,7 @@ static int fname_encrypt(struct inode *inode,
const struct qstr *iname, struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
@@ -77,17 +62,12 @@ static int fname_encrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- fname_crypt_complete, &ecr);
+ crypto_req_done, &wait);
sg_init_one(&sg, oname->name, cryptlen);
skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
/* Do the encryption */
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- /* Request is being completed asynchronously; wait for it */
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
printk_ratelimited(KERN_ERR
@@ -111,7 +91,7 @@ static int fname_decrypt(struct inode *inode,
struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
@@ -132,7 +112,7 @@ static int fname_decrypt(struct inode *inode,
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- fname_crypt_complete, &ecr);
+ crypto_req_done, &wait);
/* Initialize IV */
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
@@ -141,11 +121,7 @@ static int fname_decrypt(struct inode *inode,
sg_init_one(&src_sg, iname->name, iname->len);
sg_init_one(&dst_sg, oname->name, oname->len);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
- res = crypto_skcipher_decrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
skcipher_request_free(req);
if (res < 0) {
printk_ratelimited(KERN_ERR
@@ -383,8 +359,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
- if (!dir->i_sb->s_cop->is_encrypted(dir) ||
- fscrypt_is_dot_dotdot(iname)) {
+ if (!IS_ENCRYPTED(dir) || fscrypt_is_dot_dotdot(iname)) {
fname->disk_name.name = (unsigned char *)iname->name;
fname->disk_name.len = iname->len;
return 0;
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 092e9dad1414..c0b4f5597e1a 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -12,7 +12,8 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
-#include <linux/fscrypt_supp.h>
+#define __FS_HAS_ENCRYPTION 1
+#include <linux/fscrypt.h>
#include <crypto/hash.h>
/* Encryption parameters */
@@ -70,16 +71,6 @@ typedef enum {
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
-struct fscrypt_completion_result {
- struct completion completion;
- int res;
-};
-
-#define DECLARE_FS_COMPLETION_RESULT(ecr) \
- struct fscrypt_completion_result ecr = { \
- COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 }
-
-
/* crypto.c */
extern int fscrypt_initialize(unsigned int cop_flags);
extern struct workqueue_struct *fscrypt_read_workqueue;
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
new file mode 100644
index 000000000000..9f5fb2eb9cf7
--- /dev/null
+++ b/fs/crypto/hooks.c
@@ -0,0 +1,112 @@
+/*
+ * fs/crypto/hooks.c
+ *
+ * Encryption hooks for higher-level filesystem operations.
+ */
+
+#include <linux/ratelimit.h>
+#include "fscrypt_private.h"
+
+/**
+ * fscrypt_file_open - prepare to open a possibly-encrypted regular file
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * Currently, an encrypted regular file can only be opened if its encryption key
+ * is available; access to the raw encrypted contents is not supported.
+ * Therefore, we first set up the inode's encryption key (if not already done)
+ * and return an error if it's unavailable.
+ *
+ * We also verify that if the parent directory (from the path via which the file
+ * is being opened) is encrypted, then the inode being opened uses the same
+ * encryption policy. This is needed as part of the enforcement that all files
+ * in an encrypted directory tree use the same encryption policy, as a
+ * protection against certain types of offline attacks. Note that this check is
+ * needed even when opening an *unencrypted* file, since it's forbidden to have
+ * an unencrypted file in an encrypted directory.
+ *
+ * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
+ */
+int fscrypt_file_open(struct inode *inode, struct file *filp)
+{
+ int err;
+ struct dentry *dir;
+
+ err = fscrypt_require_key(inode);
+ if (err)
+ return err;
+
+ dir = dget_parent(file_dentry(filp));
+ if (IS_ENCRYPTED(d_inode(dir)) &&
+ !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+ pr_warn_ratelimited("fscrypt: inconsistent encryption contexts: %lu/%lu",
+ d_inode(dir)->i_ino, inode->i_ino);
+ err = -EPERM;
+ }
+ dput(dir);
+ return err;
+}
+EXPORT_SYMBOL_GPL(fscrypt_file_open);
+
+int __fscrypt_prepare_link(struct inode *inode, struct inode *dir)
+{
+ int err;
+
+ err = fscrypt_require_key(dir);
+ if (err)
+ return err;
+
+ if (!fscrypt_has_permitted_context(dir, inode))
+ return -EPERM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fscrypt_prepare_link);
+
+int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ int err;
+
+ err = fscrypt_require_key(old_dir);
+ if (err)
+ return err;
+
+ err = fscrypt_require_key(new_dir);
+ if (err)
+ return err;
+
+ if (old_dir != new_dir) {
+ if (IS_ENCRYPTED(new_dir) &&
+ !fscrypt_has_permitted_context(new_dir,
+ d_inode(old_dentry)))
+ return -EPERM;
+
+ if ((flags & RENAME_EXCHANGE) &&
+ IS_ENCRYPTED(old_dir) &&
+ !fscrypt_has_permitted_context(old_dir,
+ d_inode(new_dentry)))
+ return -EPERM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fscrypt_prepare_rename);
+
+int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry)
+{
+ int err = fscrypt_get_encryption_info(dir);
+
+ if (err)
+ return err;
+
+ if (fscrypt_has_encryption_key(dir)) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+ }
+
+ d_set_d_op(dentry, &fscrypt_d_ops);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index a38630214058..5e6e846f5a24 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -18,17 +18,6 @@
static struct crypto_shash *essiv_hash_tfm;
-static void derive_crypt_complete(struct crypto_async_request *req, int rc)
-{
- struct fscrypt_completion_result *ecr = req->data;
-
- if (rc == -EINPROGRESS)
- return;
-
- ecr->res = rc;
- complete(&ecr->completion);
-}
-
/**
* derive_key_aes() - Derive a key using AES-128-ECB
* @deriving_key: Encryption key used for derivation.
@@ -43,7 +32,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
{
int res = 0;
struct skcipher_request *req = NULL;
- DECLARE_FS_COMPLETION_RESULT(ecr);
+ DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
@@ -60,7 +49,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- derive_crypt_complete, &ecr);
+ crypto_req_done, &wait);
res = crypto_skcipher_setkey(tfm, deriving_key,
FS_AES_128_ECB_KEY_SIZE);
if (res < 0)
@@ -70,11 +59,7 @@ static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
sg_init_one(&dst_sg, derived_raw_key, source_key->size);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
NULL);
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
+ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
out:
skcipher_request_free(req);
crypto_free_skcipher(tfm);
@@ -274,7 +259,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (res < 0) {
if (!fscrypt_dummy_context_enabled(inode) ||
- inode->i_sb->s_cop->is_encrypted(inode))
+ IS_ENCRYPTED(inode))
return res;
/* Fake up a context for an unencrypted directory */
memset(&ctx, 0, sizeof(ctx));
@@ -374,7 +359,7 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
struct fscrypt_info *prev;
if (ci == NULL)
- ci = ACCESS_ONCE(inode->i_crypt_info);
+ ci = READ_ONCE(inode->i_crypt_info);
if (ci == NULL)
return;
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index a120649beeca..c6d431a5cce9 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -110,7 +110,7 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
struct fscrypt_policy policy;
int res;
- if (!inode->i_sb->s_cop->is_encrypted(inode))
+ if (!IS_ENCRYPTED(inode))
return -ENODATA;
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
@@ -167,11 +167,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
return 1;
/* No restrictions if the parent directory is unencrypted */
- if (!cops->is_encrypted(parent))
+ if (!IS_ENCRYPTED(parent))
return 1;
/* Encrypted directories must not contain unencrypted files */
- if (!cops->is_encrypted(child))
+ if (!IS_ENCRYPTED(child))
return 0;
/*
diff --git a/fs/dax.c b/fs/dax.c
index f001d8c72a06..3652b26a0048 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -565,7 +565,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
ret = __radix_tree_lookup(page_tree, index, &node, &slot);
WARN_ON_ONCE(ret != entry);
__radix_tree_replace(page_tree, node, slot,
- new_entry, NULL, NULL);
+ new_entry, NULL);
entry = new_entry;
}
@@ -614,6 +614,13 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
continue;
+ /*
+ * No need to call mmu_notifier_invalidate_range() as we are
+ * downgrading page table protection not changing it to point
+ * to a new page.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
if (pmdp) {
#ifdef CONFIG_FS_DAX_PMD
pmd_t pmd;
@@ -628,7 +635,6 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pmd = pmd_wrprotect(pmd);
pmd = pmd_mkclean(pmd);
set_pmd_at(vma->vm_mm, address, pmdp, pmd);
- mmu_notifier_invalidate_range(vma->vm_mm, start, end);
unlock_pmd:
spin_unlock(ptl);
#endif
@@ -643,7 +649,6 @@ unlock_pmd:
pte = pte_wrprotect(pte);
pte = pte_mkclean(pte);
set_pte_at(vma->vm_mm, address, ptep, pte);
- mmu_notifier_invalidate_range(vma->vm_mm, start, end);
unlock_pte:
pte_unmap_unlock(ptep, ptl);
}
@@ -789,7 +794,7 @@ int dax_writeback_mapping_range(struct address_space *mapping,
tag_pages_for_writeback(mapping, start_index, end_index);
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
while (!done) {
pvec.nr = find_get_entries_tag(mapping, start_index,
PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
@@ -938,7 +943,7 @@ EXPORT_SYMBOL_GPL(__dax_zero_page_range);
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
{
- return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
+ return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
}
static loff_t
diff --git a/fs/dcache.c b/fs/dcache.c
index f90141387f01..5c7df1df81ff 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
{
/*
* Be careful about RCU walk racing with rename:
- * use 'lockless_dereference' to fetch the name pointer.
+ * use 'READ_ONCE' to fetch the name pointer.
*
* NOTE! Even if a rename will mean that the length
* was not loaded atomically, we don't care. The
@@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
* early because the data cannot match (there can
* be no NUL in the ct/tcount data)
*/
- const unsigned char *cs = lockless_dereference(dentry->d_name.name);
+ const unsigned char *cs = READ_ONCE(dentry->d_name.name);
return dentry_string_cmp(cs, ct, tcount);
}
@@ -630,7 +630,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
rcu_read_lock();
spin_unlock(&dentry->d_lock);
again:
- parent = ACCESS_ONCE(dentry->d_parent);
+ parent = READ_ONCE(dentry->d_parent);
spin_lock(&parent->d_lock);
/*
* We can't blindly lock dentry until we are sure
@@ -721,7 +721,7 @@ static inline bool fast_dput(struct dentry *dentry)
* around with a zero refcount.
*/
smp_rmb();
- d_flags = ACCESS_ONCE(dentry->d_flags);
+ d_flags = READ_ONCE(dentry->d_flags);
d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
/* Nothing to do? Dropping the reference was all we needed? */
@@ -850,11 +850,11 @@ struct dentry *dget_parent(struct dentry *dentry)
* locking.
*/
rcu_read_lock();
- ret = ACCESS_ONCE(dentry->d_parent);
+ ret = READ_ONCE(dentry->d_parent);
gotref = lockref_get_not_zero(&ret->d_lockref);
rcu_read_unlock();
if (likely(gotref)) {
- if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
+ if (likely(ret == READ_ONCE(dentry->d_parent)))
return ret;
dput(ret);
}
@@ -2705,8 +2705,6 @@ static void swap_names(struct dentry *dentry, struct dentry *target)
*/
unsigned int i;
BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
- kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
- kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
swap(((long *) &dentry->d_iname)[i],
((long *) &target->d_iname)[i]);
@@ -3040,7 +3038,7 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* @buflen: allocated length of the buffer
* @name: name string and length qstr structure
*
- * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
+ * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to
* make sure that either the old or the new name pointer and length are
* fetched. However, there may be mismatch between length and pointer.
* The length cannot be trusted, we need to copy it byte-by-byte until
@@ -3054,8 +3052,8 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
*/
static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
{
- const char *dname = ACCESS_ONCE(name->name);
- u32 dlen = ACCESS_ONCE(name->len);
+ const char *dname = READ_ONCE(name->name);
+ u32 dlen = READ_ONCE(name->len);
char *p;
smp_read_barrier_depends();
@@ -3120,7 +3118,7 @@ restart:
struct dentry * parent;
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
- struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
+ struct mount *parent = READ_ONCE(mnt->mnt_parent);
/* Escaped? */
if (dentry != vfsmnt->mnt_root) {
bptr = *buffer;
@@ -3130,7 +3128,7 @@ restart:
}
/* Global root? */
if (mnt != parent) {
- dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
+ dentry = READ_ONCE(mnt->mnt_mountpoint);
mnt = parent;
vfsmnt = &mnt->mnt;
continue;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 6dabc4a10396..cd12e6576b48 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -1,16 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* file.c - part of debugfs, a tiny little debug file system
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* debugfs is for people to use instead of /proc or /sys.
* See Documentation/filesystems/ for more details.
- *
*/
#include <linux/module.h>
@@ -22,7 +18,6 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/device.h>
-#include <linux/srcu.h>
#include <asm/poll.h>
#include "internal.h"
@@ -48,66 +43,108 @@ const struct file_operations debugfs_noop_file_operations = {
.llseek = noop_llseek,
};
+#define F_DENTRY(filp) ((filp)->f_path.dentry)
+
+const struct file_operations *debugfs_real_fops(const struct file *filp)
+{
+ struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata;
+
+ if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) {
+ /*
+ * Urgh, we've been called w/o a protecting
+ * debugfs_file_get().
+ */
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return fsd->real_fops;
+}
+EXPORT_SYMBOL_GPL(debugfs_real_fops);
+
/**
- * debugfs_use_file_start - mark the beginning of file data access
+ * debugfs_file_get - mark the beginning of file data access
* @dentry: the dentry object whose data is being accessed.
- * @srcu_idx: a pointer to some memory to store a SRCU index in.
*
- * Up to a matching call to debugfs_use_file_finish(), any
- * successive call into the file removing functions debugfs_remove()
- * and debugfs_remove_recursive() will block. Since associated private
+ * Up to a matching call to debugfs_file_put(), any successive call
+ * into the file removing functions debugfs_remove() and
+ * debugfs_remove_recursive() will block. Since associated private
* file data may only get freed after a successful return of any of
* the removal functions, you may safely access it after a successful
- * call to debugfs_use_file_start() without worrying about
- * lifetime issues.
+ * call to debugfs_file_get() without worrying about lifetime issues.
*
* If -%EIO is returned, the file has already been removed and thus,
* it is not safe to access any of its data. If, on the other hand,
* it is allowed to access the file data, zero is returned.
- *
- * Regardless of the return code, any call to
- * debugfs_use_file_start() must be followed by a matching call
- * to debugfs_use_file_finish().
*/
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
- __acquires(&debugfs_srcu)
+int debugfs_file_get(struct dentry *dentry)
{
- *srcu_idx = srcu_read_lock(&debugfs_srcu);
- barrier();
+ struct debugfs_fsdata *fsd;
+ void *d_fsd;
+
+ d_fsd = READ_ONCE(dentry->d_fsdata);
+ if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) {
+ fsd = d_fsd;
+ } else {
+ fsd = kmalloc(sizeof(*fsd), GFP_KERNEL);
+ if (!fsd)
+ return -ENOMEM;
+
+ fsd->real_fops = (void *)((unsigned long)d_fsd &
+ ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
+ refcount_set(&fsd->active_users, 1);
+ init_completion(&fsd->active_users_drained);
+ if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) {
+ kfree(fsd);
+ fsd = READ_ONCE(dentry->d_fsdata);
+ }
+ }
+
+ /*
+ * In case of a successful cmpxchg() above, this check is
+ * strictly necessary and must follow it, see the comment in
+ * __debugfs_remove_file().
+ * OTOH, if the cmpxchg() hasn't been executed or wasn't
+ * successful, this serves the purpose of not starving
+ * removers.
+ */
if (d_unlinked(dentry))
return -EIO;
+
+ if (!refcount_inc_not_zero(&fsd->active_users))
+ return -EIO;
+
return 0;
}
-EXPORT_SYMBOL_GPL(debugfs_use_file_start);
+EXPORT_SYMBOL_GPL(debugfs_file_get);
/**
- * debugfs_use_file_finish - mark the end of file data access
- * @srcu_idx: the SRCU index "created" by a former call to
- * debugfs_use_file_start().
+ * debugfs_file_put - mark the end of file data access
+ * @dentry: the dentry object formerly passed to
+ * debugfs_file_get().
*
* Allow any ongoing concurrent call into debugfs_remove() or
* debugfs_remove_recursive() blocked by a former call to
- * debugfs_use_file_start() to proceed and return to its caller.
+ * debugfs_file_get() to proceed and return to its caller.
*/
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu)
+void debugfs_file_put(struct dentry *dentry)
{
- srcu_read_unlock(&debugfs_srcu, srcu_idx);
-}
-EXPORT_SYMBOL_GPL(debugfs_use_file_finish);
+ struct debugfs_fsdata *fsd = READ_ONCE(dentry->d_fsdata);
-#define F_DENTRY(filp) ((filp)->f_path.dentry)
+ if (refcount_dec_and_test(&fsd->active_users))
+ complete(&fsd->active_users_drained);
+}
+EXPORT_SYMBOL_GPL(debugfs_file_put);
static int open_proxy_open(struct inode *inode, struct file *filp)
{
- const struct dentry *dentry = F_DENTRY(filp);
+ struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops = NULL;
- int srcu_idx, r;
+ int r;
- r = debugfs_use_file_start(dentry, &srcu_idx);
- if (r) {
- r = -ENOENT;
- goto out;
- }
+ r = debugfs_file_get(dentry);
+ if (r)
+ return r == -EIO ? -ENOENT : r;
real_fops = debugfs_real_fops(filp);
real_fops = fops_get(real_fops);
@@ -124,7 +161,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
r = real_fops->open(inode, filp);
out:
- debugfs_use_file_finish(srcu_idx);
+ debugfs_file_put(dentry);
return r;
}
@@ -138,16 +175,16 @@ const struct file_operations debugfs_open_proxy_file_operations = {
#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \
static ret_type full_proxy_ ## name(proto) \
{ \
- const struct dentry *dentry = F_DENTRY(filp); \
- const struct file_operations *real_fops = \
- debugfs_real_fops(filp); \
- int srcu_idx; \
+ struct dentry *dentry = F_DENTRY(filp); \
+ const struct file_operations *real_fops; \
ret_type r; \
\
- r = debugfs_use_file_start(dentry, &srcu_idx); \
- if (likely(!r)) \
- r = real_fops->name(args); \
- debugfs_use_file_finish(srcu_idx); \
+ r = debugfs_file_get(dentry); \
+ if (unlikely(r)) \
+ return r; \
+ real_fops = debugfs_real_fops(filp); \
+ r = real_fops->name(args); \
+ debugfs_file_put(dentry); \
return r; \
}
@@ -172,18 +209,16 @@ FULL_PROXY_FUNC(unlocked_ioctl, long, filp,
static unsigned int full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
- const struct dentry *dentry = F_DENTRY(filp);
- const struct file_operations *real_fops = debugfs_real_fops(filp);
- int srcu_idx;
+ struct dentry *dentry = F_DENTRY(filp);
unsigned int r = 0;
+ const struct file_operations *real_fops;
- if (debugfs_use_file_start(dentry, &srcu_idx)) {
- debugfs_use_file_finish(srcu_idx);
+ if (debugfs_file_get(dentry))
return POLLHUP;
- }
+ real_fops = debugfs_real_fops(filp);
r = real_fops->poll(filp, wait);
- debugfs_use_file_finish(srcu_idx);
+ debugfs_file_put(dentry);
return r;
}
@@ -227,16 +262,14 @@ static void __full_proxy_fops_init(struct file_operations *proxy_fops,
static int full_proxy_open(struct inode *inode, struct file *filp)
{
- const struct dentry *dentry = F_DENTRY(filp);
+ struct dentry *dentry = F_DENTRY(filp);
const struct file_operations *real_fops = NULL;
struct file_operations *proxy_fops = NULL;
- int srcu_idx, r;
+ int r;
- r = debugfs_use_file_start(dentry, &srcu_idx);
- if (r) {
- r = -ENOENT;
- goto out;
- }
+ r = debugfs_file_get(dentry);
+ if (r)
+ return r == -EIO ? -ENOENT : r;
real_fops = debugfs_real_fops(filp);
real_fops = fops_get(real_fops);
@@ -274,7 +307,7 @@ free_proxy:
kfree(proxy_fops);
fops_put(real_fops);
out:
- debugfs_use_file_finish(srcu_idx);
+ debugfs_file_put(dentry);
return r;
}
@@ -285,13 +318,14 @@ const struct file_operations debugfs_full_proxy_file_operations = {
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
+ struct dentry *dentry = F_DENTRY(file);
ssize_t ret;
- int srcu_idx;
- ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
- if (likely(!ret))
- ret = simple_attr_read(file, buf, len, ppos);
- debugfs_use_file_finish(srcu_idx);
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+ ret = simple_attr_read(file, buf, len, ppos);
+ debugfs_file_put(dentry);
return ret;
}
EXPORT_SYMBOL_GPL(debugfs_attr_read);
@@ -299,13 +333,14 @@ EXPORT_SYMBOL_GPL(debugfs_attr_read);
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
+ struct dentry *dentry = F_DENTRY(file);
ssize_t ret;
- int srcu_idx;
- ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
- if (likely(!ret))
- ret = simple_attr_write(file, buf, len, ppos);
- debugfs_use_file_finish(srcu_idx);
+ ret = debugfs_file_get(dentry);
+ if (unlikely(ret))
+ return ret;
+ ret = simple_attr_write(file, buf, len, ppos);
+ debugfs_file_put(dentry);
return ret;
}
EXPORT_SYMBOL_GPL(debugfs_attr_write);
@@ -739,14 +774,14 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf,
{
char buf[3];
bool val;
- int r, srcu_idx;
+ int r;
+ struct dentry *dentry = F_DENTRY(file);
- r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
- if (likely(!r))
- val = *(bool *)file->private_data;
- debugfs_use_file_finish(srcu_idx);
- if (r)
+ r = debugfs_file_get(dentry);
+ if (unlikely(r))
return r;
+ val = *(bool *)file->private_data;
+ debugfs_file_put(dentry);
if (val)
buf[0] = 'Y';
@@ -764,8 +799,9 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
char buf[32];
size_t buf_size;
bool bv;
- int r, srcu_idx;
+ int r;
bool *val = file->private_data;
+ struct dentry *dentry = F_DENTRY(file);
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@@ -773,12 +809,11 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
buf[buf_size] = '\0';
if (strtobool(buf, &bv) == 0) {
- r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
- if (likely(!r))
- *val = bv;
- debugfs_use_file_finish(srcu_idx);
- if (r)
+ r = debugfs_file_get(dentry);
+ if (unlikely(r))
return r;
+ *val = bv;
+ debugfs_file_put(dentry);
}
return count;
@@ -840,14 +875,15 @@ static ssize_t read_file_blob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct debugfs_blob_wrapper *blob = file->private_data;
+ struct dentry *dentry = F_DENTRY(file);
ssize_t r;
- int srcu_idx;
- r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx);
- if (likely(!r))
- r = simple_read_from_buffer(user_buf, count, ppos, blob->data,
- blob->size);
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(dentry);
+ if (unlikely(r))
+ return r;
+ r = simple_read_from_buffer(user_buf, count, ppos, blob->data,
+ blob->size);
+ debugfs_file_put(dentry);
return r;
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index c59f015f386e..63a998c3f252 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -1,16 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* inode.c - part of debugfs, a tiny little debug file system
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* debugfs is for people to use instead of /proc or /sys.
* See ./Documentation/core-api/kernel-api.rst for more details.
- *
*/
#include <linux/module.h>
@@ -27,14 +23,11 @@
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
-#include <linux/srcu.h>
#include "internal.h"
#define DEBUGFS_DEFAULT_MODE 0700
-DEFINE_SRCU(debugfs_srcu);
-
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
@@ -185,6 +178,14 @@ static const struct super_operations debugfs_super_operations = {
.evict_inode = debugfs_evict_inode,
};
+static void debugfs_release_dentry(struct dentry *dentry)
+{
+ void *fsd = dentry->d_fsdata;
+
+ if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
+ kfree(dentry->d_fsdata);
+}
+
static struct vfsmount *debugfs_automount(struct path *path)
{
debugfs_automount_t f;
@@ -194,6 +195,7 @@ static struct vfsmount *debugfs_automount(struct path *path)
static const struct dentry_operations debugfs_dops = {
.d_delete = always_delete_dentry,
+ .d_release = debugfs_release_dentry,
.d_automount = debugfs_automount,
};
@@ -358,7 +360,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
inode->i_private = data;
inode->i_fop = proxy_fops;
- dentry->d_fsdata = (void *)real_fops;
+ dentry->d_fsdata = (void *)((unsigned long)real_fops |
+ DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
d_instantiate(dentry, inode);
fsnotify_create(d_inode(dentry->d_parent), dentry);
@@ -615,18 +618,43 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
}
EXPORT_SYMBOL_GPL(debugfs_create_symlink);
+static void __debugfs_remove_file(struct dentry *dentry, struct dentry *parent)
+{
+ struct debugfs_fsdata *fsd;
+
+ simple_unlink(d_inode(parent), dentry);
+ d_delete(dentry);
+
+ /*
+ * Paired with the closing smp_mb() implied by a successful
+ * cmpxchg() in debugfs_file_get(): either
+ * debugfs_file_get() must see a dead dentry or we must see a
+ * debugfs_fsdata instance at ->d_fsdata here (or both).
+ */
+ smp_mb();
+ fsd = READ_ONCE(dentry->d_fsdata);
+ if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
+ return;
+ if (!refcount_dec_and_test(&fsd->active_users))
+ wait_for_completion(&fsd->active_users_drained);
+}
+
static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
{
int ret = 0;
if (simple_positive(dentry)) {
dget(dentry);
- if (d_is_dir(dentry))
- ret = simple_rmdir(d_inode(parent), dentry);
- else
- simple_unlink(d_inode(parent), dentry);
- if (!ret)
- d_delete(dentry);
+ if (!d_is_reg(dentry)) {
+ if (d_is_dir(dentry))
+ ret = simple_rmdir(d_inode(parent), dentry);
+ else
+ simple_unlink(d_inode(parent), dentry);
+ if (!ret)
+ d_delete(dentry);
+ } else {
+ __debugfs_remove_file(dentry, parent);
+ }
dput(dentry);
}
return ret;
@@ -660,8 +688,6 @@ void debugfs_remove(struct dentry *dentry)
inode_unlock(d_inode(parent));
if (!ret)
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-
- synchronize_srcu(&debugfs_srcu);
}
EXPORT_SYMBOL_GPL(debugfs_remove);
@@ -735,8 +761,6 @@ void debugfs_remove_recursive(struct dentry *dentry)
if (!__debugfs_remove(child, parent))
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
inode_unlock(d_inode(parent));
-
- synchronize_srcu(&debugfs_srcu);
}
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index b3e8443a1f47..f0d73d86cc1a 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* internal.h - declarations internal to debugfs
*
* Copyright (C) 2016 Nicolai Stange <nicstange@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
*/
#ifndef _DEBUGFS_INTERNAL_H_
@@ -19,4 +15,18 @@ extern const struct file_operations debugfs_noop_file_operations;
extern const struct file_operations debugfs_open_proxy_file_operations;
extern const struct file_operations debugfs_full_proxy_file_operations;
+struct debugfs_fsdata {
+ const struct file_operations *real_fops;
+ refcount_t active_users;
+ struct completion active_users_drained;
+};
+
+/*
+ * A dentry's ->d_fsdata either points to the real fops or to a
+ * dynamically allocated debugfs_fsdata instance.
+ * In order to distinguish between these two cases, a real fops
+ * pointer gets its lowest bit set.
+ */
+#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0)
+
#endif /* _DEBUGFS_INTERNAL_H_ */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b53e66d9abd7..3aafb3343a65 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -497,7 +497,7 @@ static struct bio *dio_await_one(struct dio *dio)
dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
- !blk_mq_poll(dio->bio_disk->queue, dio->bio_cookie))
+ !blk_poll(dio->bio_disk->queue, dio->bio_cookie))
io_schedule();
/* wake up sets us TASK_RUNNING */
spin_lock_irqsave(&dio->bio_lock, flags);
@@ -1152,7 +1152,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
- unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
+ unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 07fed838d8fd..562fa8c3edff 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -181,6 +181,8 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
spin_lock(&dlm_cb_seq_spin);
new_seq = ++dlm_cb_seq;
+ if (!dlm_cb_seq)
+ new_seq = ++dlm_cb_seq;
spin_unlock(&dlm_cb_seq_spin);
if (lkb->lkb_flags & DLM_IFL_USER) {
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 7211e826d90d..1270551d24e3 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -282,44 +282,44 @@ static struct configfs_item_operations node_ops = {
.release = release_node,
};
-static struct config_item_type clusters_type = {
+static const struct config_item_type clusters_type = {
.ct_group_ops = &clusters_ops,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type cluster_type = {
+static const struct config_item_type cluster_type = {
.ct_item_ops = &cluster_ops,
.ct_attrs = cluster_attrs,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type spaces_type = {
+static const struct config_item_type spaces_type = {
.ct_group_ops = &spaces_ops,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type space_type = {
+static const struct config_item_type space_type = {
.ct_item_ops = &space_ops,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type comms_type = {
+static const struct config_item_type comms_type = {
.ct_group_ops = &comms_ops,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type comm_type = {
+static const struct config_item_type comm_type = {
.ct_item_ops = &comm_ops,
.ct_attrs = comm_attrs,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type nodes_type = {
+static const struct config_item_type nodes_type = {
.ct_group_ops = &nodes_ops,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type node_type = {
+static const struct config_item_type node_type = {
.ct_item_ops = &node_ops,
.ct_attrs = node_attrs,
.ct_owner = THIS_MODULE,
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index d4aaddec1b16..cc91963683de 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1003,7 +1003,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
if (r->res_master_nodeid == our_nodeid) {
log_error(ls, "from_master %d our_master", from_nodeid);
dlm_dump_rsb(r);
- dlm_send_rcom_lookup_dump(r, from_nodeid);
goto out_found;
}
@@ -2465,14 +2464,12 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
lkb->lkb_grmode = DLM_LOCK_NL;
lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
- } else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
- if (err)
- *err = -EDEADLK;
- else {
- log_print("can_be_granted deadlock %x now %d",
- lkb->lkb_id, now);
- dlm_dump_rsb(r);
- }
+ } else if (err) {
+ *err = -EDEADLK;
+ } else {
+ log_print("can_be_granted deadlock %x now %d",
+ lkb->lkb_id, now);
+ dlm_dump_rsb(r);
}
goto out;
}
@@ -2501,13 +2498,6 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
return rv;
}
-/* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
- for locks pending on the convert list. Once verified (watch for these
- log_prints), we should be able to just call _can_be_granted() and not
- bother with the demote/deadlk cases here (and there's no easy way to deal
- with a deadlk here, we'd have to generate something like grant_lock with
- the deadlk error.) */
-
/* Returns the highest requested mode of all blocked conversions; sets
cw if there's a blocked conversion to DLM_LOCK_CW. */
@@ -2545,9 +2535,22 @@ static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
}
if (deadlk) {
- log_print("WARN: pending deadlock %x node %d %s",
- lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
- dlm_dump_rsb(r);
+ /*
+ * If DLM_LKB_NODLKWT flag is set and conversion
+ * deadlock is detected, we request blocking AST and
+ * down (or cancel) conversion.
+ */
+ if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
+ if (lkb->lkb_highbast < lkb->lkb_rqmode) {
+ queue_bast(r, lkb, lkb->lkb_rqmode);
+ lkb->lkb_highbast = lkb->lkb_rqmode;
+ }
+ } else {
+ log_print("WARN: pending deadlock %x node %d %s",
+ lkb->lkb_id, lkb->lkb_nodeid,
+ r->res_name);
+ dlm_dump_rsb(r);
+ }
continue;
}
@@ -3123,7 +3126,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
deadlock, so we leave it on the granted queue and return EDEADLK in
the ast for the convert. */
- if (deadlk) {
+ if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
/* it's left on the granted queue */
revert_lock(r, lkb);
queue_cast(r, lkb, -EDEADLK);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 4813d0e0cd9b..05707850f93a 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -107,11 +107,11 @@ struct connection {
unsigned long flags;
#define CF_READ_PENDING 1
#define CF_WRITE_PENDING 2
-#define CF_CONNECT_PENDING 3
#define CF_INIT_PENDING 4
#define CF_IS_OTHERCON 5
#define CF_CLOSE 6
#define CF_APP_LIMITED 7
+#define CF_CLOSING 8
struct list_head writequeue; /* List of outgoing writequeue_entries */
spinlock_t writequeue_lock;
int (*rx_action) (struct connection *); /* What to do when active */
@@ -124,10 +124,6 @@ struct connection {
struct connection *othercon;
struct work_struct rwork; /* Receive workqueue */
struct work_struct swork; /* Send workqueue */
- void (*orig_error_report)(struct sock *);
- void (*orig_data_ready)(struct sock *);
- void (*orig_state_change)(struct sock *);
- void (*orig_write_space)(struct sock *);
};
#define sock2con(x) ((struct connection *)(x)->sk_user_data)
@@ -150,6 +146,13 @@ struct dlm_node_addr {
struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
};
+static struct listen_sock_callbacks {
+ void (*sk_error_report)(struct sock *);
+ void (*sk_data_ready)(struct sock *);
+ void (*sk_state_change)(struct sock *);
+ void (*sk_write_space)(struct sock *);
+} listen_sock;
+
static LIST_HEAD(dlm_node_addrs);
static DEFINE_SPINLOCK(dlm_node_addrs_spin);
@@ -408,17 +411,23 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
/* Data available on socket or listen socket received a connect */
static void lowcomms_data_ready(struct sock *sk)
{
- struct connection *con = sock2con(sk);
+ struct connection *con;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ con = sock2con(sk);
if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
queue_work(recv_workqueue, &con->rwork);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void lowcomms_write_space(struct sock *sk)
{
- struct connection *con = sock2con(sk);
+ struct connection *con;
+ read_lock_bh(&sk->sk_callback_lock);
+ con = sock2con(sk);
if (!con)
- return;
+ goto out;
clear_bit(SOCK_NOSPACE, &con->sock->flags);
@@ -427,16 +436,17 @@ static void lowcomms_write_space(struct sock *sk)
clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
}
- if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
- queue_work(send_workqueue, &con->swork);
+ queue_work(send_workqueue, &con->swork);
+out:
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void lowcomms_connect_sock(struct connection *con)
{
if (test_bit(CF_CLOSE, &con->flags))
return;
- if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
- queue_work(send_workqueue, &con->swork);
+ queue_work(send_workqueue, &con->swork);
+ cond_resched();
}
static void lowcomms_state_change(struct sock *sk)
@@ -480,7 +490,7 @@ static void lowcomms_error_report(struct sock *sk)
if (con == NULL)
goto out;
- orig_report = con->orig_error_report;
+ orig_report = listen_sock.sk_error_report;
if (con->sock == NULL ||
kernel_getpeername(con->sock, (struct sockaddr *)&saddr, &buflen)) {
printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
@@ -517,27 +527,31 @@ out:
}
/* Note: sk_callback_lock must be locked before calling this function. */
-static void save_callbacks(struct connection *con, struct sock *sk)
+static void save_listen_callbacks(struct socket *sock)
{
- con->orig_data_ready = sk->sk_data_ready;
- con->orig_state_change = sk->sk_state_change;
- con->orig_write_space = sk->sk_write_space;
- con->orig_error_report = sk->sk_error_report;
+ struct sock *sk = sock->sk;
+
+ listen_sock.sk_data_ready = sk->sk_data_ready;
+ listen_sock.sk_state_change = sk->sk_state_change;
+ listen_sock.sk_write_space = sk->sk_write_space;
+ listen_sock.sk_error_report = sk->sk_error_report;
}
-static void restore_callbacks(struct connection *con, struct sock *sk)
+static void restore_callbacks(struct socket *sock)
{
+ struct sock *sk = sock->sk;
+
write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = NULL;
- sk->sk_data_ready = con->orig_data_ready;
- sk->sk_state_change = con->orig_state_change;
- sk->sk_write_space = con->orig_write_space;
- sk->sk_error_report = con->orig_error_report;
+ sk->sk_data_ready = listen_sock.sk_data_ready;
+ sk->sk_state_change = listen_sock.sk_state_change;
+ sk->sk_write_space = listen_sock.sk_write_space;
+ sk->sk_error_report = listen_sock.sk_error_report;
write_unlock_bh(&sk->sk_callback_lock);
}
/* Make a socket active */
-static void add_sock(struct socket *sock, struct connection *con, bool save_cb)
+static void add_sock(struct socket *sock, struct connection *con)
{
struct sock *sk = sock->sk;
@@ -545,8 +559,6 @@ static void add_sock(struct socket *sock, struct connection *con, bool save_cb)
con->sock = sock;
sk->sk_user_data = con;
- if (save_cb)
- save_callbacks(con, sk);
/* Install a data_ready callback */
sk->sk_data_ready = lowcomms_data_ready;
sk->sk_write_space = lowcomms_write_space;
@@ -579,17 +591,20 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
static void close_connection(struct connection *con, bool and_other,
bool tx, bool rx)
{
- clear_bit(CF_CONNECT_PENDING, &con->flags);
- clear_bit(CF_WRITE_PENDING, &con->flags);
- if (tx && cancel_work_sync(&con->swork))
+ bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
+
+ if (tx && !closing && cancel_work_sync(&con->swork)) {
log_print("canceled swork for node %d", con->nodeid);
- if (rx && cancel_work_sync(&con->rwork))
+ clear_bit(CF_WRITE_PENDING, &con->flags);
+ }
+ if (rx && !closing && cancel_work_sync(&con->rwork)) {
log_print("canceled rwork for node %d", con->nodeid);
+ clear_bit(CF_READ_PENDING, &con->flags);
+ }
mutex_lock(&con->sock_mutex);
if (con->sock) {
- if (!test_bit(CF_IS_OTHERCON, &con->flags))
- restore_callbacks(con, con->sock->sk);
+ restore_callbacks(con->sock);
sock_release(con->sock);
con->sock = NULL;
}
@@ -604,6 +619,7 @@ static void close_connection(struct connection *con, bool and_other,
con->retries = 0;
mutex_unlock(&con->sock_mutex);
+ clear_bit(CF_CLOSING, &con->flags);
}
/* Data received from remote end */
@@ -700,7 +716,7 @@ out_resched:
out_close:
mutex_unlock(&con->sock_mutex);
if (ret != -EAGAIN) {
- close_connection(con, false, true, false);
+ close_connection(con, true, true, false);
/* Reconnect when there is something to send */
}
/* Don't return success if we really got EOF */
@@ -728,22 +744,14 @@ static int tcp_accept_from_sock(struct connection *con)
}
mutex_unlock(&connections_lock);
- memset(&peeraddr, 0, sizeof(peeraddr));
- result = sock_create_lite(dlm_local_addr[0]->ss_family,
- SOCK_STREAM, IPPROTO_TCP, &newsock);
- if (result < 0)
- return -ENOMEM;
-
mutex_lock_nested(&con->sock_mutex, 0);
- result = -ENOTCONN;
- if (con->sock == NULL)
- goto accept_err;
-
- newsock->type = con->sock->type;
- newsock->ops = con->sock->ops;
+ if (!con->sock) {
+ mutex_unlock(&con->sock_mutex);
+ return -ENOTCONN;
+ }
- result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK, true);
+ result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
if (result < 0)
goto accept_err;
@@ -794,31 +802,33 @@ static int tcp_accept_from_sock(struct connection *con)
othercon->nodeid = nodeid;
othercon->rx_action = receive_from_sock;
mutex_init(&othercon->sock_mutex);
+ INIT_LIST_HEAD(&othercon->writequeue);
+ spin_lock_init(&othercon->writequeue_lock);
INIT_WORK(&othercon->swork, process_send_sockets);
INIT_WORK(&othercon->rwork, process_recv_sockets);
set_bit(CF_IS_OTHERCON, &othercon->flags);
}
+ mutex_lock_nested(&othercon->sock_mutex, 2);
if (!othercon->sock) {
newcon->othercon = othercon;
- othercon->sock = newsock;
- newsock->sk->sk_user_data = othercon;
- add_sock(newsock, othercon, false);
+ add_sock(newsock, othercon);
addcon = othercon;
+ mutex_unlock(&othercon->sock_mutex);
}
else {
printk("Extra connection from node %d attempted\n", nodeid);
result = -EAGAIN;
+ mutex_unlock(&othercon->sock_mutex);
mutex_unlock(&newcon->sock_mutex);
goto accept_err;
}
}
else {
- newsock->sk->sk_user_data = newcon;
newcon->rx_action = receive_from_sock;
/* accept copies the sk after we've saved the callbacks, so we
don't want to save them a second time or comm errors will
result in calling sk_error_report recursively. */
- add_sock(newsock, newcon, false);
+ add_sock(newsock, newcon);
addcon = newcon;
}
@@ -837,7 +847,8 @@ static int tcp_accept_from_sock(struct connection *con)
accept_err:
mutex_unlock(&con->sock_mutex);
- sock_release(newsock);
+ if (newsock)
+ sock_release(newsock);
if (result != -EAGAIN)
log_print("error accepting connection from node: %d", result);
@@ -911,26 +922,28 @@ static int sctp_accept_from_sock(struct connection *con)
othercon->nodeid = nodeid;
othercon->rx_action = receive_from_sock;
mutex_init(&othercon->sock_mutex);
+ INIT_LIST_HEAD(&othercon->writequeue);
+ spin_lock_init(&othercon->writequeue_lock);
INIT_WORK(&othercon->swork, process_send_sockets);
INIT_WORK(&othercon->rwork, process_recv_sockets);
set_bit(CF_IS_OTHERCON, &othercon->flags);
}
+ mutex_lock_nested(&othercon->sock_mutex, 2);
if (!othercon->sock) {
newcon->othercon = othercon;
- othercon->sock = newsock;
- newsock->sk->sk_user_data = othercon;
- add_sock(newsock, othercon, false);
+ add_sock(newsock, othercon);
addcon = othercon;
+ mutex_unlock(&othercon->sock_mutex);
} else {
printk("Extra connection from node %d attempted\n", nodeid);
ret = -EAGAIN;
+ mutex_unlock(&othercon->sock_mutex);
mutex_unlock(&newcon->sock_mutex);
goto accept_err;
}
} else {
- newsock->sk->sk_user_data = newcon;
newcon->rx_action = receive_from_sock;
- add_sock(newsock, newcon, false);
+ add_sock(newsock, newcon);
addcon = newcon;
}
@@ -1055,10 +1068,9 @@ static void sctp_connect_to_sock(struct connection *con)
if (result < 0)
goto socket_err;
- sock->sk->sk_user_data = con;
con->rx_action = receive_from_sock;
con->connect_action = sctp_connect_to_sock;
- add_sock(sock, con, true);
+ add_sock(sock, con);
/* Bind to all addresses. */
if (sctp_bind_addrs(con, 0))
@@ -1079,7 +1091,6 @@ static void sctp_connect_to_sock(struct connection *con)
if (result == 0)
goto out;
-
bind_err:
con->sock = NULL;
sock_release(sock);
@@ -1098,14 +1109,12 @@ socket_err:
con->retries, result);
mutex_unlock(&con->sock_mutex);
msleep(1000);
- clear_bit(CF_CONNECT_PENDING, &con->flags);
lowcomms_connect_sock(con);
return;
}
out:
mutex_unlock(&con->sock_mutex);
- set_bit(CF_WRITE_PENDING, &con->flags);
}
/* Connect a new socket to its peer */
@@ -1143,10 +1152,9 @@ static void tcp_connect_to_sock(struct connection *con)
goto out_err;
}
- sock->sk->sk_user_data = con;
con->rx_action = receive_from_sock;
con->connect_action = tcp_connect_to_sock;
- add_sock(sock, con, true);
+ add_sock(sock, con);
/* Bind to our cluster-known address connecting to avoid
routing problems */
@@ -1194,13 +1202,11 @@ out_err:
con->retries, result);
mutex_unlock(&con->sock_mutex);
msleep(1000);
- clear_bit(CF_CONNECT_PENDING, &con->flags);
lowcomms_connect_sock(con);
return;
}
out:
mutex_unlock(&con->sock_mutex);
- set_bit(CF_WRITE_PENDING, &con->flags);
return;
}
@@ -1235,10 +1241,12 @@ static struct socket *tcp_create_listen_sock(struct connection *con,
if (result < 0) {
log_print("Failed to set SO_REUSEADDR on socket: %d", result);
}
+ write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = con;
-
+ save_listen_callbacks(sock);
con->rx_action = tcp_accept_from_sock;
con->connect_action = tcp_connect_to_sock;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
/* Bind to our port */
make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
@@ -1320,6 +1328,7 @@ static int sctp_listen_for_all(void)
write_lock_bh(&sock->sk->sk_callback_lock);
/* Init con struct */
sock->sk->sk_user_data = con;
+ save_listen_callbacks(sock);
con->sock = sock;
con->sock->sk->sk_data_ready = lowcomms_data_ready;
con->rx_action = sctp_accept_from_sock;
@@ -1366,7 +1375,7 @@ static int tcp_listen_for_all(void)
sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
if (sock) {
- add_sock(sock, con, true);
+ add_sock(sock, con);
result = 0;
}
else {
@@ -1456,9 +1465,7 @@ void dlm_lowcomms_commit_buffer(void *mh)
e->len = e->end - e->offset;
spin_unlock(&con->writequeue_lock);
- if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
- queue_work(send_workqueue, &con->swork);
- }
+ queue_work(send_workqueue, &con->swork);
return;
out:
@@ -1527,13 +1534,16 @@ out:
send_error:
mutex_unlock(&con->sock_mutex);
- close_connection(con, false, false, true);
- lowcomms_connect_sock(con);
+ close_connection(con, true, false, true);
+ /* Requeue the send work. When the work daemon runs again, it will try
+ a new connection, then call this function again. */
+ queue_work(send_workqueue, &con->swork);
return;
out_connect:
mutex_unlock(&con->sock_mutex);
- lowcomms_connect_sock(con);
+ queue_work(send_workqueue, &con->swork);
+ cond_resched();
}
static void clean_one_writequeue(struct connection *con)
@@ -1593,9 +1603,10 @@ static void process_send_sockets(struct work_struct *work)
{
struct connection *con = container_of(work, struct connection, swork);
- if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags))
+ clear_bit(CF_WRITE_PENDING, &con->flags);
+ if (con->sock == NULL) /* not mutex protected so check it inside too */
con->connect_action(con);
- if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags))
+ if (!list_empty(&con->writequeue))
send_to_sock(con);
}
@@ -1632,11 +1643,25 @@ static int work_start(void)
return 0;
}
-static void stop_conn(struct connection *con)
+static void _stop_conn(struct connection *con, bool and_other)
{
- con->flags |= 0x0F;
- if (con->sock && con->sock->sk)
+ mutex_lock(&con->sock_mutex);
+ set_bit(CF_CLOSE, &con->flags);
+ set_bit(CF_READ_PENDING, &con->flags);
+ set_bit(CF_WRITE_PENDING, &con->flags);
+ if (con->sock && con->sock->sk) {
+ write_lock_bh(&con->sock->sk->sk_callback_lock);
con->sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&con->sock->sk->sk_callback_lock);
+ }
+ if (con->othercon && and_other)
+ _stop_conn(con->othercon, false);
+ mutex_unlock(&con->sock_mutex);
+}
+
+static void stop_conn(struct connection *con)
+{
+ _stop_conn(con, true);
}
static void free_conn(struct connection *con)
@@ -1648,6 +1673,36 @@ static void free_conn(struct connection *con)
kmem_cache_free(con_cache, con);
}
+static void work_flush(void)
+{
+ int ok;
+ int i;
+ struct hlist_node *n;
+ struct connection *con;
+
+ flush_workqueue(recv_workqueue);
+ flush_workqueue(send_workqueue);
+ do {
+ ok = 1;
+ foreach_conn(stop_conn);
+ flush_workqueue(recv_workqueue);
+ flush_workqueue(send_workqueue);
+ for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
+ hlist_for_each_entry_safe(con, n,
+ &connection_hash[i], list) {
+ ok &= test_bit(CF_READ_PENDING, &con->flags);
+ ok &= test_bit(CF_WRITE_PENDING, &con->flags);
+ if (con->othercon) {
+ ok &= test_bit(CF_READ_PENDING,
+ &con->othercon->flags);
+ ok &= test_bit(CF_WRITE_PENDING,
+ &con->othercon->flags);
+ }
+ }
+ }
+ } while (!ok);
+}
+
void dlm_lowcomms_stop(void)
{
/* Set all the flags to prevent any
@@ -1655,11 +1710,10 @@ void dlm_lowcomms_stop(void)
*/
mutex_lock(&connections_lock);
dlm_allow_conn = 0;
- foreach_conn(stop_conn);
+ mutex_unlock(&connections_lock);
+ work_flush();
clean_writequeues();
foreach_conn(free_conn);
- mutex_unlock(&connections_lock);
-
work_stop();
kmem_cache_destroy(con_cache);
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index f3f5e72a29ba..70c625999d36 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -155,6 +155,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
goto out;
}
+retry:
error = create_rcom(ls, nodeid, DLM_RCOM_STATUS,
sizeof(struct rcom_status), &rc, &mh);
if (error)
@@ -169,6 +170,8 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
error = dlm_wait_function(ls, &rcom_response);
disallow_sync_reply(ls);
+ if (error == -ETIMEDOUT)
+ goto retry;
if (error)
goto out;
@@ -276,6 +279,7 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
ls->ls_recover_nodeid = nodeid;
+retry:
error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh);
if (error)
goto out;
@@ -288,6 +292,8 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
error = dlm_wait_function(ls, &rcom_response);
disallow_sync_reply(ls);
+ if (error == -ETIMEDOUT)
+ goto retry;
out:
return error;
}
@@ -332,25 +338,6 @@ int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
return error;
}
-int dlm_send_rcom_lookup_dump(struct dlm_rsb *r, int to_nodeid)
-{
- struct dlm_rcom *rc;
- struct dlm_mhandle *mh;
- struct dlm_ls *ls = r->res_ls;
- int error;
-
- error = create_rcom(ls, to_nodeid, DLM_RCOM_LOOKUP, r->res_length,
- &rc, &mh);
- if (error)
- goto out;
- memcpy(rc->rc_buf, r->res_name, r->res_length);
- rc->rc_id = 0xFFFFFFFF;
-
- send_rcom(ls, mh, rc);
- out:
- return error;
-}
-
static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
@@ -362,6 +349,7 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
if (error)
return;
+ /* Old code would send this special id to trigger a debug dump. */
if (rc_in->rc_id == 0xFFFFFFFF) {
log_error(ls, "receive_rcom_lookup dump from %d", nodeid);
dlm_dump_rsb_name(ls, rc_in->rc_buf, len);
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
index f8e243463c15..206723ab744d 100644
--- a/fs/dlm/rcom.h
+++ b/fs/dlm/rcom.h
@@ -17,7 +17,6 @@
int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags);
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
-int dlm_send_rcom_lookup_dump(struct dlm_rsb *r, int to_nodeid);
int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid);
int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index eaea789bf97d..ce2aa54ca2e2 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -52,6 +52,10 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
dlm_config.ci_recover_timer * HZ);
if (rv)
break;
+ if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) {
+ log_debug(ls, "dlm_wait_function timed out");
+ return -ETIMEDOUT;
+ }
}
if (dlm_recovery_stopped(ls)) {
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 6859b4bf971e..6f4e1d42d733 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -287,11 +287,23 @@ static int dlm_recoverd(void *arg)
set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
wake_up(&ls->ls_recover_lock_wait);
- while (!kthread_should_stop()) {
+ while (1) {
+ /*
+ * We call kthread_should_stop() after set_current_state().
+ * This is because it works correctly if kthread_stop() is
+ * called just before set_current_state().
+ */
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+ break;
+ }
if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) &&
- !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags))
+ !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
+ if (kthread_should_stop())
+ break;
schedule();
+ }
set_current_state(TASK_RUNNING);
if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 6b801186baa5..25aeaa7328ba 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -660,7 +660,7 @@ static struct ecryptfs_cache_info {
struct kmem_cache **cache;
const char *name;
size_t size;
- unsigned long flags;
+ slab_flags_t flags;
void (*ctor)(void *obj);
} ecryptfs_cache_infos[] = {
{
diff --git a/fs/exec.c b/fs/exec.c
index 3e14ba25f678..1d6243d9f2b6 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1911,7 +1911,7 @@ void set_dumpable(struct mm_struct *mm, int value)
return;
do {
- old = ACCESS_ONCE(mm->flags);
+ old = READ_ONCE(mm->flags);
new = (old & ~MMF_DUMPABLE_MASK) | value;
} while (cmpxchg(&mm->flags, old, new) != old);
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 1442a4c734c8..9b2ac55ac34f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -821,11 +821,11 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (ret == 0) {
iomap->type = IOMAP_HOLE;
- iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->addr = IOMAP_NULL_ADDR;
iomap->length = 1 << blkbits;
} else {
iomap->type = IOMAP_MAPPED;
- iomap->blkno = (sector_t)bno << (blkbits - 9);
+ iomap->addr = (u64)bno << blkbits;
iomap->length = (u64)ret << blkbits;
iomap->flags |= IOMAP_F_MERGED;
}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 1458706bd2ec..e2b6be03e69b 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -479,10 +479,10 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
-static int parse_options(char *options, struct super_block *sb)
+static int parse_options(char *options, struct super_block *sb,
+ struct ext2_mount_options *opts)
{
char *p;
- struct ext2_sb_info *sbi = EXT2_SB(sb);
substring_t args[MAX_OPT_ARGS];
int option;
kuid_t uid;
@@ -499,16 +499,16 @@ static int parse_options(char *options, struct super_block *sb)
token = match_token(p, tokens, args);
switch (token) {
case Opt_bsd_df:
- clear_opt (sbi->s_mount_opt, MINIX_DF);
+ clear_opt (opts->s_mount_opt, MINIX_DF);
break;
case Opt_minix_df:
- set_opt (sbi->s_mount_opt, MINIX_DF);
+ set_opt (opts->s_mount_opt, MINIX_DF);
break;
case Opt_grpid:
- set_opt (sbi->s_mount_opt, GRPID);
+ set_opt (opts->s_mount_opt, GRPID);
break;
case Opt_nogrpid:
- clear_opt (sbi->s_mount_opt, GRPID);
+ clear_opt (opts->s_mount_opt, GRPID);
break;
case Opt_resuid:
if (match_int(&args[0], &option))
@@ -519,7 +519,7 @@ static int parse_options(char *options, struct super_block *sb)
return 0;
}
- sbi->s_resuid = uid;
+ opts->s_resuid = uid;
break;
case Opt_resgid:
if (match_int(&args[0], &option))
@@ -529,51 +529,51 @@ static int parse_options(char *options, struct super_block *sb)
ext2_msg(sb, KERN_ERR, "Invalid gid value %d", option);
return 0;
}
- sbi->s_resgid = gid;
+ opts->s_resgid = gid;
break;
case Opt_sb:
/* handled by get_sb_block() instead of here */
/* *sb_block = match_int(&args[0]); */
break;
case Opt_err_panic:
- clear_opt (sbi->s_mount_opt, ERRORS_CONT);
- clear_opt (sbi->s_mount_opt, ERRORS_RO);
- set_opt (sbi->s_mount_opt, ERRORS_PANIC);
+ clear_opt (opts->s_mount_opt, ERRORS_CONT);
+ clear_opt (opts->s_mount_opt, ERRORS_RO);
+ set_opt (opts->s_mount_opt, ERRORS_PANIC);
break;
case Opt_err_ro:
- clear_opt (sbi->s_mount_opt, ERRORS_CONT);
- clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
- set_opt (sbi->s_mount_opt, ERRORS_RO);
+ clear_opt (opts->s_mount_opt, ERRORS_CONT);
+ clear_opt (opts->s_mount_opt, ERRORS_PANIC);
+ set_opt (opts->s_mount_opt, ERRORS_RO);
break;
case Opt_err_cont:
- clear_opt (sbi->s_mount_opt, ERRORS_RO);
- clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
- set_opt (sbi->s_mount_opt, ERRORS_CONT);
+ clear_opt (opts->s_mount_opt, ERRORS_RO);
+ clear_opt (opts->s_mount_opt, ERRORS_PANIC);
+ set_opt (opts->s_mount_opt, ERRORS_CONT);
break;
case Opt_nouid32:
- set_opt (sbi->s_mount_opt, NO_UID32);
+ set_opt (opts->s_mount_opt, NO_UID32);
break;
case Opt_nocheck:
- clear_opt (sbi->s_mount_opt, CHECK);
+ clear_opt (opts->s_mount_opt, CHECK);
break;
case Opt_debug:
- set_opt (sbi->s_mount_opt, DEBUG);
+ set_opt (opts->s_mount_opt, DEBUG);
break;
case Opt_oldalloc:
- set_opt (sbi->s_mount_opt, OLDALLOC);
+ set_opt (opts->s_mount_opt, OLDALLOC);
break;
case Opt_orlov:
- clear_opt (sbi->s_mount_opt, OLDALLOC);
+ clear_opt (opts->s_mount_opt, OLDALLOC);
break;
case Opt_nobh:
- set_opt (sbi->s_mount_opt, NOBH);
+ set_opt (opts->s_mount_opt, NOBH);
break;
#ifdef CONFIG_EXT2_FS_XATTR
case Opt_user_xattr:
- set_opt (sbi->s_mount_opt, XATTR_USER);
+ set_opt (opts->s_mount_opt, XATTR_USER);
break;
case Opt_nouser_xattr:
- clear_opt (sbi->s_mount_opt, XATTR_USER);
+ clear_opt (opts->s_mount_opt, XATTR_USER);
break;
#else
case Opt_user_xattr:
@@ -584,10 +584,10 @@ static int parse_options(char *options, struct super_block *sb)
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
case Opt_acl:
- set_opt(sbi->s_mount_opt, POSIX_ACL);
+ set_opt(opts->s_mount_opt, POSIX_ACL);
break;
case Opt_noacl:
- clear_opt(sbi->s_mount_opt, POSIX_ACL);
+ clear_opt(opts->s_mount_opt, POSIX_ACL);
break;
#else
case Opt_acl:
@@ -598,13 +598,13 @@ static int parse_options(char *options, struct super_block *sb)
#endif
case Opt_xip:
ext2_msg(sb, KERN_INFO, "use dax instead of xip");
- set_opt(sbi->s_mount_opt, XIP);
+ set_opt(opts->s_mount_opt, XIP);
/* Fall through */
case Opt_dax:
#ifdef CONFIG_FS_DAX
ext2_msg(sb, KERN_WARNING,
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- set_opt(sbi->s_mount_opt, DAX);
+ set_opt(opts->s_mount_opt, DAX);
#else
ext2_msg(sb, KERN_INFO, "dax option not supported");
#endif
@@ -613,11 +613,11 @@ static int parse_options(char *options, struct super_block *sb)
#if defined(CONFIG_QUOTA)
case Opt_quota:
case Opt_usrquota:
- set_opt(sbi->s_mount_opt, USRQUOTA);
+ set_opt(opts->s_mount_opt, USRQUOTA);
break;
case Opt_grpquota:
- set_opt(sbi->s_mount_opt, GRPQUOTA);
+ set_opt(opts->s_mount_opt, GRPQUOTA);
break;
#else
case Opt_quota:
@@ -629,11 +629,11 @@ static int parse_options(char *options, struct super_block *sb)
#endif
case Opt_reservation:
- set_opt(sbi->s_mount_opt, RESERVATION);
+ set_opt(opts->s_mount_opt, RESERVATION);
ext2_msg(sb, KERN_INFO, "reservations ON");
break;
case Opt_noreservation:
- clear_opt(sbi->s_mount_opt, RESERVATION);
+ clear_opt(opts->s_mount_opt, RESERVATION);
ext2_msg(sb, KERN_INFO, "reservations OFF");
break;
case Opt_ignore:
@@ -830,6 +830,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
int i, j;
__le32 features;
int err;
+ struct ext2_mount_options opts;
err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
@@ -890,35 +891,39 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT2_DEFM_DEBUG)
- set_opt(sbi->s_mount_opt, DEBUG);
+ set_opt(opts.s_mount_opt, DEBUG);
if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
- set_opt(sbi->s_mount_opt, GRPID);
+ set_opt(opts.s_mount_opt, GRPID);
if (def_mount_opts & EXT2_DEFM_UID16)
- set_opt(sbi->s_mount_opt, NO_UID32);
+ set_opt(opts.s_mount_opt, NO_UID32);
#ifdef CONFIG_EXT2_FS_XATTR
if (def_mount_opts & EXT2_DEFM_XATTR_USER)
- set_opt(sbi->s_mount_opt, XATTR_USER);
+ set_opt(opts.s_mount_opt, XATTR_USER);
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
if (def_mount_opts & EXT2_DEFM_ACL)
- set_opt(sbi->s_mount_opt, POSIX_ACL);
+ set_opt(opts.s_mount_opt, POSIX_ACL);
#endif
if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
- set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+ set_opt(opts.s_mount_opt, ERRORS_PANIC);
else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
- set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ set_opt(opts.s_mount_opt, ERRORS_CONT);
else
- set_opt(sbi->s_mount_opt, ERRORS_RO);
+ set_opt(opts.s_mount_opt, ERRORS_RO);
- sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
- sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
+ opts.s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
+ opts.s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
- set_opt(sbi->s_mount_opt, RESERVATION);
+ set_opt(opts.s_mount_opt, RESERVATION);
- if (!parse_options((char *) data, sb))
+ if (!parse_options((char *) data, sb, &opts))
goto failed_mount;
+ sbi->s_mount_opt = opts.s_mount_opt;
+ sbi->s_resuid = opts.s_resuid;
+ sbi->s_resgid = opts.s_resgid;
+
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
MS_POSIXACL : 0);
@@ -1312,46 +1317,36 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
{
struct ext2_sb_info * sbi = EXT2_SB(sb);
struct ext2_super_block * es;
- struct ext2_mount_options old_opts;
- unsigned long old_sb_flags;
+ struct ext2_mount_options new_opts;
int err;
sync_filesystem(sb);
- spin_lock(&sbi->s_lock);
- /* Store the old options */
- old_sb_flags = sb->s_flags;
- old_opts.s_mount_opt = sbi->s_mount_opt;
- old_opts.s_resuid = sbi->s_resuid;
- old_opts.s_resgid = sbi->s_resgid;
+ spin_lock(&sbi->s_lock);
+ new_opts.s_mount_opt = sbi->s_mount_opt;
+ new_opts.s_resuid = sbi->s_resuid;
+ new_opts.s_resgid = sbi->s_resgid;
+ spin_unlock(&sbi->s_lock);
/*
* Allow the "check" option to be passed as a remount option.
*/
- if (!parse_options(data, sb)) {
- err = -EINVAL;
- goto restore_opts;
- }
-
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ if (!parse_options(data, sb, &new_opts))
+ return -EINVAL;
+ spin_lock(&sbi->s_lock);
es = sbi->s_es;
- if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT2_MOUNT_DAX) {
+ if ((sbi->s_mount_opt ^ new_opts.s_mount_opt) & EXT2_MOUNT_DAX) {
ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
"dax flag with busy inodes while remounting");
- sbi->s_mount_opt ^= EXT2_MOUNT_DAX;
- }
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb)) {
- spin_unlock(&sbi->s_lock);
- return 0;
+ new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
}
+ if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ goto out_set;
if (*flags & MS_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
- !(sbi->s_mount_state & EXT2_VALID_FS)) {
- spin_unlock(&sbi->s_lock);
- return 0;
- }
+ !(sbi->s_mount_state & EXT2_VALID_FS))
+ goto out_set;
/*
* OK, we are remounting a valid rw partition rdonly, so set
@@ -1362,22 +1357,20 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
spin_unlock(&sbi->s_lock);
err = dquot_suspend(sb, -1);
- if (err < 0) {
- spin_lock(&sbi->s_lock);
- goto restore_opts;
- }
+ if (err < 0)
+ return err;
ext2_sync_super(sb, es, 1);
} else {
__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
~EXT2_FEATURE_RO_COMPAT_SUPP);
if (ret) {
+ spin_unlock(&sbi->s_lock);
ext2_msg(sb, KERN_WARNING,
"warning: couldn't remount RDWR because of "
"unsupported optional features (%x).",
le32_to_cpu(ret));
- err = -EROFS;
- goto restore_opts;
+ return -EROFS;
}
/*
* Mounting a RDONLY partition read-write, so reread and
@@ -1394,14 +1387,16 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
dquot_resume(sb, -1);
}
- return 0;
-restore_opts:
- sbi->s_mount_opt = old_opts.s_mount_opt;
- sbi->s_resuid = old_opts.s_resuid;
- sbi->s_resgid = old_opts.s_resgid;
- sb->s_flags = old_sb_flags;
+ spin_lock(&sbi->s_lock);
+out_set:
+ sbi->s_mount_opt = new_opts.s_mount_opt;
+ sbi->s_resuid = new_opts.s_resuid;
+ sbi->s_resgid = new_opts.s_resgid;
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
spin_unlock(&sbi->s_lock);
- return err;
+
+ return 0;
}
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039fd96ff..73b850f5659c 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,6 +37,7 @@ config EXT4_FS
select CRC16
select CRYPTO
select CRYPTO_CRC32C
+ select FS_IOMAP
help
This is the next generation of the ext3 filesystem.
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index d5ddfb96c83c..a943e568292e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -601,22 +601,21 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
* ext4_should_retry_alloc() is called when ENOSPC is returned, and if
* it is profitable to retry the operation, this function will wait
* for the current or committing transaction to complete, and then
- * return TRUE.
- *
- * if the total number of retries exceed three times, return FALSE.
+ * return TRUE. We will only retry once.
*/
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
{
if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
- (*retries)++ > 3 ||
+ (*retries)++ > 1 ||
!EXT4_SB(sb)->s_journal)
return 0;
- jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
-
smp_mb();
- if (EXT4_SB(sb)->s_mb_free_pending)
- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+ if (EXT4_SB(sb)->s_mb_free_pending == 0)
+ return 0;
+
+ jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+ jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 58a0304566db..4e091eae38b1 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -34,17 +34,15 @@
#include <linux/percpu_counter.h>
#include <linux/ratelimit.h>
#include <crypto/hash.h>
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-#include <linux/fscrypt_supp.h>
-#else
-#include <linux/fscrypt_notsupp.h>
-#endif
#include <linux/falloc.h>
#include <linux/percpu-rwsem.h>
#ifdef __KERNEL__
#include <linux/compat.h>
#endif
+#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
+#include <linux/fscrypt.h>
+
/*
* The fourth extended filesystem constants/structures
*/
@@ -546,8 +544,8 @@ struct ext4_new_group_data {
__u64 inode_table;
__u32 blocks_count;
__u16 reserved_blocks;
- __u16 unused;
- __u32 free_blocks_count;
+ __u16 mdata_blocks;
+ __u32 free_clusters_count;
};
/* Indexes used to index group tables in ext4_new_group_data */
@@ -645,43 +643,6 @@ enum {
#define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
#define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
-#ifndef FS_IOC_FSGETXATTR
-/* Until the uapi changes get merged for project quota... */
-
-#define FS_IOC_FSGETXATTR _IOR('X', 31, struct fsxattr)
-#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
-
-/*
- * Structure for FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR.
- */
-struct fsxattr {
- __u32 fsx_xflags; /* xflags field value (get/set) */
- __u32 fsx_extsize; /* extsize field value (get/set)*/
- __u32 fsx_nextents; /* nextents field value (get) */
- __u32 fsx_projid; /* project identifier (get/set) */
- unsigned char fsx_pad[12];
-};
-
-/*
- * Flags for the fsx_xflags field
- */
-#define FS_XFLAG_REALTIME 0x00000001 /* data in realtime volume */
-#define FS_XFLAG_PREALLOC 0x00000002 /* preallocated file extents */
-#define FS_XFLAG_IMMUTABLE 0x00000008 /* file cannot be modified */
-#define FS_XFLAG_APPEND 0x00000010 /* all writes append */
-#define FS_XFLAG_SYNC 0x00000020 /* all writes synchronous */
-#define FS_XFLAG_NOATIME 0x00000040 /* do not update access time */
-#define FS_XFLAG_NODUMP 0x00000080 /* do not include in backups */
-#define FS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */
-#define FS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */
-#define FS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */
-#define FS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */
-#define FS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
-#define FS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */
-#define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
-#define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
-#endif /* !defined(FS_IOC_FSGETXATTR) */
-
#define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR
#define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR
@@ -1393,8 +1354,6 @@ struct ext4_sb_info {
int s_first_ino;
unsigned int s_inode_readahead_blks;
unsigned int s_inode_goal;
- spinlock_t s_next_gen_lock;
- u32 s_next_generation;
u32 s_hash_seed[4];
int s_def_hash_version;
int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
@@ -2516,9 +2475,6 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
ext4_fsblk_t pblk, ext4_lblk_t len);
-extern int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk,
- unsigned int map_len,
- struct extent_status *result);
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
@@ -3049,6 +3005,10 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
extern int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline, __u64 start, __u64 len);
+
+struct iomap;
+extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
+
extern int ext4_try_to_evict_inline_data(handle_t *handle,
struct inode *inode,
int needed);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 97f0fd06728d..07bca11749d4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4794,7 +4794,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode)) {
+ (offset + len > i_size_read(inode) ||
+ offset + len > EXT4_I(inode)->i_disksize)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
@@ -4965,7 +4966,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode)) {
+ (offset + len > i_size_read(inode) ||
+ offset + len > EXT4_I(inode)->i_disksize)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5cb9aa3ad249..ad204d2724ac 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -21,6 +21,7 @@
#include <linux/time.h>
#include <linux/fs.h>
+#include <linux/iomap.h>
#include <linux/mount.h>
#include <linux/path.h>
#include <linux/dax.h>
@@ -365,7 +366,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct vfsmount *mnt = filp->f_path.mnt;
- struct dentry *dir;
struct path path;
char buf[64], *cp;
int ret;
@@ -405,25 +405,11 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
ext4_journal_stop(handle);
}
}
- if (ext4_encrypted_inode(inode)) {
- ret = fscrypt_get_encryption_info(inode);
- if (ret)
- return -EACCES;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
- dir = dget_parent(file_dentry(filp));
- if (ext4_encrypted_inode(d_inode(dir)) &&
- !fscrypt_has_permitted_context(d_inode(dir), inode)) {
- ext4_warning(inode->i_sb,
- "Inconsistent encryption contexts: %lu/%lu",
- (unsigned long) d_inode(dir)->i_ino,
- (unsigned long) inode->i_ino);
- dput(dir);
- return -EPERM;
- }
- dput(dir);
+ ret = fscrypt_file_open(inode, filp);
+ if (ret)
+ return ret;
+
/*
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
@@ -439,248 +425,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
}
/*
- * Here we use ext4_map_blocks() to get a block mapping for a extent-based
- * file rather than ext4_ext_walk_space() because we can introduce
- * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
- * function. When extent status tree has been fully implemented, it will
- * track all extent status for a file and we can directly use it to
- * retrieve the offset for SEEK_DATA/SEEK_HOLE.
- */
-
-/*
- * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
- * lookup page cache to check whether or not there has some data between
- * [startoff, endoff] because, if this range contains an unwritten extent,
- * we determine this extent as a data or a hole according to whether the
- * page cache has data or not.
- */
-static int ext4_find_unwritten_pgoff(struct inode *inode,
- int whence,
- ext4_lblk_t end_blk,
- loff_t *offset)
-{
- struct pagevec pvec;
- unsigned int blkbits;
- pgoff_t index;
- pgoff_t end;
- loff_t endoff;
- loff_t startoff;
- loff_t lastoff;
- int found = 0;
-
- blkbits = inode->i_sb->s_blocksize_bits;
- startoff = *offset;
- lastoff = startoff;
- endoff = (loff_t)end_blk << blkbits;
-
- index = startoff >> PAGE_SHIFT;
- end = (endoff - 1) >> PAGE_SHIFT;
-
- pagevec_init(&pvec, 0);
- do {
- int i;
- unsigned long nr_pages;
-
- nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
- &index, end);
- if (nr_pages == 0)
- break;
-
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
- struct buffer_head *bh, *head;
-
- /*
- * If current offset is smaller than the page offset,
- * there is a hole at this offset.
- */
- if (whence == SEEK_HOLE && lastoff < endoff &&
- lastoff < page_offset(pvec.pages[i])) {
- found = 1;
- *offset = lastoff;
- goto out;
- }
-
- lock_page(page);
-
- if (unlikely(page->mapping != inode->i_mapping)) {
- unlock_page(page);
- continue;
- }
-
- if (!page_has_buffers(page)) {
- unlock_page(page);
- continue;
- }
-
- if (page_has_buffers(page)) {
- lastoff = page_offset(page);
- bh = head = page_buffers(page);
- do {
- if (lastoff + bh->b_size <= startoff)
- goto next;
- if (buffer_uptodate(bh) ||
- buffer_unwritten(bh)) {
- if (whence == SEEK_DATA)
- found = 1;
- } else {
- if (whence == SEEK_HOLE)
- found = 1;
- }
- if (found) {
- *offset = max_t(loff_t,
- startoff, lastoff);
- unlock_page(page);
- goto out;
- }
-next:
- lastoff += bh->b_size;
- bh = bh->b_this_page;
- } while (bh != head);
- }
-
- lastoff = page_offset(page) + PAGE_SIZE;
- unlock_page(page);
- }
-
- pagevec_release(&pvec);
- } while (index <= end);
-
- /* There are no pages upto endoff - that would be a hole in there. */
- if (whence == SEEK_HOLE && lastoff < endoff) {
- found = 1;
- *offset = lastoff;
- }
-out:
- pagevec_release(&pvec);
- return found;
-}
-
-/*
- * ext4_seek_data() retrieves the offset for SEEK_DATA.
- */
-static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
-{
- struct inode *inode = file->f_mapping->host;
- struct extent_status es;
- ext4_lblk_t start, last, end;
- loff_t dataoff, isize;
- int blkbits;
- int ret;
-
- inode_lock(inode);
-
- isize = i_size_read(inode);
- if (offset < 0 || offset >= isize) {
- inode_unlock(inode);
- return -ENXIO;
- }
-
- blkbits = inode->i_sb->s_blocksize_bits;
- start = offset >> blkbits;
- last = start;
- end = isize >> blkbits;
- dataoff = offset;
-
- do {
- ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
- if (ret <= 0) {
- /* No extent found -> no data */
- if (ret == 0)
- ret = -ENXIO;
- inode_unlock(inode);
- return ret;
- }
-
- last = es.es_lblk;
- if (last != start)
- dataoff = (loff_t)last << blkbits;
- if (!ext4_es_is_unwritten(&es))
- break;
-
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
- es.es_lblk + es.es_len, &dataoff))
- break;
- last += es.es_len;
- dataoff = (loff_t)last << blkbits;
- cond_resched();
- } while (last <= end);
-
- inode_unlock(inode);
-
- if (dataoff > isize)
- return -ENXIO;
-
- return vfs_setpos(file, dataoff, maxsize);
-}
-
-/*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
- */
-static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
-{
- struct inode *inode = file->f_mapping->host;
- struct extent_status es;
- ext4_lblk_t start, last, end;
- loff_t holeoff, isize;
- int blkbits;
- int ret;
-
- inode_lock(inode);
-
- isize = i_size_read(inode);
- if (offset < 0 || offset >= isize) {
- inode_unlock(inode);
- return -ENXIO;
- }
-
- blkbits = inode->i_sb->s_blocksize_bits;
- start = offset >> blkbits;
- last = start;
- end = isize >> blkbits;
- holeoff = offset;
-
- do {
- ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
- if (ret < 0) {
- inode_unlock(inode);
- return ret;
- }
- /* Found a hole? */
- if (ret == 0 || es.es_lblk > last) {
- if (last != start)
- holeoff = (loff_t)last << blkbits;
- break;
- }
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_es_is_unwritten(&es) &&
- ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
- last + es.es_len, &holeoff))
- break;
-
- last += es.es_len;
- holeoff = (loff_t)last << blkbits;
- cond_resched();
- } while (last <= end);
-
- inode_unlock(inode);
-
- if (holeoff > isize)
- holeoff = isize;
-
- return vfs_setpos(file, holeoff, maxsize);
-}
-
-/*
* ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
* by calling generic_file_llseek_size() with the appropriate maxbytes
* value for each.
@@ -696,18 +440,24 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
maxbytes = inode->i_sb->s_maxbytes;
switch (whence) {
- case SEEK_SET:
- case SEEK_CUR:
- case SEEK_END:
+ default:
return generic_file_llseek_size(file, offset, whence,
maxbytes, i_size_read(inode));
- case SEEK_DATA:
- return ext4_seek_data(file, offset, maxbytes);
case SEEK_HOLE:
- return ext4_seek_hole(file, offset, maxbytes);
+ inode_lock_shared(inode);
+ offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
+ inode_unlock_shared(inode);
+ break;
+ case SEEK_DATA:
+ inode_lock_shared(inode);
+ offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
+ inode_unlock_shared(inode);
+ break;
}
- return -EINVAL;
+ if (offset < 0)
+ return offset;
+ return vfs_setpos(file, offset, maxbytes);
}
const struct file_operations ext4_file_operations = {
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index c5f697a3fad4..b4267d72f249 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1139,9 +1139,7 @@ got:
inode->i_ino);
goto out;
}
- spin_lock(&sbi->s_next_gen_lock);
- inode->i_generation = sbi->s_next_generation++;
- spin_unlock(&sbi->s_next_gen_lock);
+ inode->i_generation = prandom_u32();
/* Precompute checksum seed for inode metadata */
if (ext4_has_metadata_csum(sb)) {
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 28c5c3abddb3..1367553c43bb 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/iomap.h>
#include <linux/fiemap.h>
#include "ext4_jbd2.h"
@@ -302,11 +303,6 @@ static int ext4_create_inline_data(handle_t *handle,
EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
- /*
- * Propagate changes to inode->i_flags as well - e.g. S_DAX may
- * get cleared
- */
- ext4_set_inode_flags(inode);
get_bh(is.iloc.bh);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -451,11 +447,6 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
}
}
ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
- /*
- * Propagate changes to inode->i_flags as well - e.g. S_DAX may
- * get set.
- */
- ext4_set_inode_flags(inode);
get_bh(is.iloc.bh);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -1827,6 +1818,38 @@ int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
return ret;
}
+int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap)
+{
+ __u64 addr;
+ int error = -EAGAIN;
+ struct ext4_iloc iloc;
+
+ down_read(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode))
+ goto out;
+
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error)
+ goto out;
+
+ addr = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
+ addr += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
+ addr += offsetof(struct ext4_inode, i_block);
+
+ brelse(iloc.bh);
+
+ iomap->addr = addr;
+ iomap->offset = 0;
+ iomap->length = min_t(loff_t, ext4_get_inline_size(inode),
+ i_size_read(inode));
+ iomap->type = 0;
+ iomap->flags = IOMAP_F_DATA_INLINE;
+
+out:
+ up_read(&EXT4_I(inode)->xattr_sem);
+ return error;
+}
+
int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline, __u64 start, __u64 len)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 90afeb7293a6..8d2b582fb141 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1719,7 +1719,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
ext4_es_remove_extent(inode, start, last - start + 1);
}
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
while (index <= end) {
nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
if (nr_pages == 0)
@@ -2345,7 +2345,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
lblk = start << bpp_bits;
pblock = mpd->map.m_pblk;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
while (start <= end) {
nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
&start, end);
@@ -2616,12 +2616,12 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
else
tag = PAGECACHE_TAG_DIRTY;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
mpd->map.m_len = 0;
mpd->next_page = index;
while (index <= end) {
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+ tag);
if (nr_pages == 0)
goto out;
@@ -2629,16 +2629,6 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
struct page *page = pvec.pages[i];
/*
- * At this point, the page may be truncated or
- * invalidated (changing page->mapping to NULL), or
- * even swizzled back from swapper_space to tmpfs file
- * mapping. However, page->index will not change
- * because we have a reference on the page.
- */
- if (page->index > end)
- goto out;
-
- /*
* Accumulated enough dirty pages? This doesn't apply
* to WB_SYNC_ALL mode. For integrity sync we have to
* keep going because someone may be concurrently
@@ -3394,7 +3384,6 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
return try_to_free_buffers(page);
}
-#ifdef CONFIG_FS_DAX
static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned flags, struct iomap *iomap)
{
@@ -3403,17 +3392,54 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
unsigned long first_block = offset >> blkbits;
unsigned long last_block = (offset + length - 1) >> blkbits;
struct ext4_map_blocks map;
+ bool delalloc = false;
int ret;
- if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
- return -ERANGE;
+
+ if (flags & IOMAP_REPORT) {
+ if (ext4_has_inline_data(inode)) {
+ ret = ext4_inline_data_iomap(inode, iomap);
+ if (ret != -EAGAIN) {
+ if (ret == 0 && offset >= iomap->length)
+ ret = -ENOENT;
+ return ret;
+ }
+ }
+ } else {
+ if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
+ return -ERANGE;
+ }
map.m_lblk = first_block;
map.m_len = last_block - first_block + 1;
- if (!(flags & IOMAP_WRITE)) {
+ if (flags & IOMAP_REPORT) {
ret = ext4_map_blocks(NULL, inode, &map, 0);
- } else {
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0) {
+ ext4_lblk_t end = map.m_lblk + map.m_len - 1;
+ struct extent_status es;
+
+ ext4_es_find_delayed_extent_range(inode, map.m_lblk, end, &es);
+
+ if (!es.es_len || es.es_lblk > end) {
+ /* entire range is a hole */
+ } else if (es.es_lblk > map.m_lblk) {
+ /* range starts with a hole */
+ map.m_len = es.es_lblk - map.m_lblk;
+ } else {
+ ext4_lblk_t offs = 0;
+
+ if (es.es_lblk < map.m_lblk)
+ offs = map.m_lblk - es.es_lblk;
+ map.m_lblk = es.es_lblk + offs;
+ map.m_len = es.es_len - offs;
+ delalloc = true;
+ }
+ }
+ } else if (flags & IOMAP_WRITE) {
int dio_credits;
handle_t *handle;
int retries = 0;
@@ -3464,17 +3490,21 @@ retry:
}
}
ext4_journal_stop(handle);
+ } else {
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ return ret;
}
iomap->flags = 0;
iomap->bdev = inode->i_sb->s_bdev;
iomap->dax_dev = sbi->s_daxdev;
iomap->offset = first_block << blkbits;
+ iomap->length = (u64)map.m_len << blkbits;
if (ret == 0) {
- iomap->type = IOMAP_HOLE;
- iomap->blkno = IOMAP_NULL_BLOCK;
- iomap->length = (u64)map.m_len << blkbits;
+ iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE;
+ iomap->addr = IOMAP_NULL_ADDR;
} else {
if (map.m_flags & EXT4_MAP_MAPPED) {
iomap->type = IOMAP_MAPPED;
@@ -3484,12 +3514,12 @@ retry:
WARN_ON_ONCE(1);
return -EIO;
}
- iomap->blkno = (sector_t)map.m_pblk << (blkbits - 9);
- iomap->length = (u64)map.m_len << blkbits;
+ iomap->addr = (u64)map.m_pblk << blkbits;
}
if (map.m_flags & EXT4_MAP_NEW)
iomap->flags |= IOMAP_F_NEW;
+
return 0;
}
@@ -3550,8 +3580,6 @@ const struct iomap_ops ext4_iomap_ops = {
.iomap_end = ext4_iomap_end,
};
-#endif
-
static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private)
{
@@ -4573,6 +4601,21 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
}
+static bool ext4_should_use_dax(struct inode *inode)
+{
+ if (!test_opt(inode->i_sb, DAX))
+ return false;
+ if (!S_ISREG(inode->i_mode))
+ return false;
+ if (ext4_should_journal_data(inode))
+ return false;
+ if (ext4_has_inline_data(inode))
+ return false;
+ if (ext4_encrypted_inode(inode))
+ return false;
+ return true;
+}
+
void ext4_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT4_I(inode)->i_flags;
@@ -4588,12 +4631,13 @@ void ext4_set_inode_flags(struct inode *inode)
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
- if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode) &&
- !ext4_should_journal_data(inode) && !ext4_has_inline_data(inode) &&
- !ext4_encrypted_inode(inode))
+ if (ext4_should_use_dax(inode))
new_fl |= S_DAX;
+ if (flags & EXT4_ENCRYPT_FL)
+ new_fl |= S_ENCRYPTED;
inode_set_flags(inode, new_fl,
- S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
+ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
+ S_ENCRYPTED);
}
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
@@ -5309,6 +5353,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
return error;
+ error = fscrypt_prepare_setattr(dentry, attr);
+ if (error)
+ return error;
+
if (is_quota_modification(inode, attr)) {
error = dquot_initialize(inode);
if (error)
@@ -5354,14 +5402,6 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
loff_t oldsize = inode->i_size;
int shrink = (attr->ia_size <= inode->i_size);
- if (ext4_encrypted_inode(inode)) {
- error = fscrypt_get_encryption_info(inode);
- if (error)
- return error;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
-
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -5967,11 +6007,6 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
}
ext4_set_aops(inode);
- /*
- * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated.
- * E.g. S_DAX may get cleared / set.
- */
- ext4_set_inode_flags(inode);
jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem);
@@ -6107,70 +6142,3 @@ int ext4_filemap_fault(struct vm_fault *vmf)
return err;
}
-
-/*
- * Find the first extent at or after @lblk in an inode that is not a hole.
- * Search for @map_len blocks at most. The extent is returned in @result.
- *
- * The function returns 1 if we found an extent. The function returns 0 in
- * case there is no extent at or after @lblk and in that case also sets
- * @result->es_len to 0. In case of error, the error code is returned.
- */
-int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk,
- unsigned int map_len, struct extent_status *result)
-{
- struct ext4_map_blocks map;
- struct extent_status es = {};
- int ret;
-
- map.m_lblk = lblk;
- map.m_len = map_len;
-
- /*
- * For non-extent based files this loop may iterate several times since
- * we do not determine full hole size.
- */
- while (map.m_len > 0) {
- ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0)
- return ret;
- /* There's extent covering m_lblk? Just return it. */
- if (ret > 0) {
- int status;
-
- ext4_es_store_pblock(result, map.m_pblk);
- result->es_lblk = map.m_lblk;
- result->es_len = map.m_len;
- if (map.m_flags & EXT4_MAP_UNWRITTEN)
- status = EXTENT_STATUS_UNWRITTEN;
- else
- status = EXTENT_STATUS_WRITTEN;
- ext4_es_store_status(result, status);
- return 1;
- }
- ext4_es_find_delayed_extent_range(inode, map.m_lblk,
- map.m_lblk + map.m_len - 1,
- &es);
- /* Is delalloc data before next block in extent tree? */
- if (es.es_len && es.es_lblk < map.m_lblk + map.m_len) {
- ext4_lblk_t offset = 0;
-
- if (es.es_lblk < lblk)
- offset = lblk - es.es_lblk;
- result->es_lblk = es.es_lblk + offset;
- ext4_es_store_pblock(result,
- ext4_es_pblock(&es) + offset);
- result->es_len = es.es_len - offset;
- ext4_es_store_status(result, ext4_es_status(&es));
-
- return 1;
- }
- /* There's a hole at m_lblk, advance us after it */
- map.m_lblk += map.m_len;
- map_len -= map.m_len;
- map.m_len = map_len;
- cond_resched();
- }
- result->es_len = 0;
- return 0;
-}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 75d83471f65c..b7558f292420 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -15,6 +15,7 @@
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/quotaops.h>
+#include <linux/random.h>
#include <linux/uuid.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
@@ -99,7 +100,6 @@ static long swap_inode_boot_loader(struct super_block *sb,
int err;
struct inode *inode_bl;
struct ext4_inode_info *ei_bl;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode))
return -EINVAL;
@@ -158,10 +158,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
inode->i_ctime = inode_bl->i_ctime = current_time(inode);
- spin_lock(&sbi->s_next_gen_lock);
- inode->i_generation = sbi->s_next_generation++;
- inode_bl->i_generation = sbi->s_next_generation++;
- spin_unlock(&sbi->s_next_gen_lock);
+ inode->i_generation = prandom_u32();
+ inode_bl->i_generation = prandom_u32();
ext4_discard_preallocations(inode);
@@ -291,10 +289,20 @@ flags_err:
if (err)
goto flags_out;
- if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
+ if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
+ /*
+ * Changes to the journaling mode can cause unsafe changes to
+ * S_DAX if we are using the DAX mount option.
+ */
+ if (test_opt(inode->i_sb, DAX)) {
+ err = -EBUSY;
+ goto flags_out;
+ }
+
err = ext4_change_inode_journal_flag(inode, jflag);
- if (err)
- goto flags_out;
+ if (err)
+ goto flags_out;
+ }
if (migrate) {
if (flags & EXT4_EXTENTS_FL)
err = ext4_ext_migrate(inode);
@@ -862,12 +870,6 @@ group_add_out:
int err = 0, err2 = 0;
ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;
- if (ext4_has_feature_bigalloc(sb)) {
- ext4_msg(sb, KERN_ERR,
- "Online resizing not (yet) supported with bigalloc");
- return -EOPNOTSUPP;
- }
-
if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
sizeof(__u64))) {
return -EFAULT;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 701085620cd8..d9f8b90a93ed 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4994,8 +4994,11 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
struct ext4_group_desc *desc;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_buddy e4b;
- int err = 0, ret, blk_free_count;
- ext4_grpblk_t blocks_freed;
+ int err = 0, ret, free_clusters_count;
+ ext4_grpblk_t clusters_freed;
+ ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
+ ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
+ unsigned long cluster_count = last_cluster - first_cluster + 1;
ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
@@ -5007,8 +5010,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
* Check to see if we are freeing blocks across a group
* boundary.
*/
- if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
- ext4_warning(sb, "too much blocks added to group %u",
+ if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
+ ext4_warning(sb, "too many blocks added to group %u",
block_group);
err = -EINVAL;
goto error_return;
@@ -5054,14 +5057,14 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (err)
goto error_return;
- for (i = 0, blocks_freed = 0; i < count; i++) {
+ for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
BUFFER_TRACE(bitmap_bh, "clear bit");
if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
ext4_error(sb, "bit already cleared for block %llu",
(ext4_fsblk_t)(block + i));
BUFFER_TRACE(bitmap_bh, "bit already cleared");
} else {
- blocks_freed++;
+ clusters_freed++;
}
}
@@ -5075,19 +5078,20 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
* them with group lock_held
*/
ext4_lock_group(sb, block_group);
- mb_clear_bits(bitmap_bh->b_data, bit, count);
- mb_free_blocks(NULL, &e4b, bit, count);
- blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
- ext4_free_group_clusters_set(sb, desc, blk_free_count);
+ mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
+ mb_free_blocks(NULL, &e4b, bit, cluster_count);
+ free_clusters_count = clusters_freed +
+ ext4_free_group_clusters(sb, desc);
+ ext4_free_group_clusters_set(sb, desc, free_clusters_count);
ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter,
- EXT4_NUM_B2C(sbi, blocks_freed));
+ clusters_freed);
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
+ atomic64_add(clusters_freed,
&sbi->s_flex_groups[flex_group].free_clusters);
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index bd48a8d83961..798b3ac680db 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1539,24 +1539,14 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
struct inode *inode;
struct ext4_dir_entry_2 *de;
struct buffer_head *bh;
+ int err;
- if (ext4_encrypted_inode(dir)) {
- int res = fscrypt_get_encryption_info(dir);
-
- /*
- * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
- * created while the directory was encrypted and we
- * have access to the key.
- */
- if (fscrypt_has_encryption_key(dir))
- fscrypt_set_encrypted_dentry(dentry);
- fscrypt_set_d_op(dentry);
- if (res && res != -ENOKEY)
- return ERR_PTR(res);
- }
+ err = fscrypt_prepare_lookup(dir, dentry, flags);
+ if (err)
+ return ERR_PTR(err);
- if (dentry->d_name.len > EXT4_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
+ if (dentry->d_name.len > EXT4_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (IS_ERR(bh))
@@ -3222,9 +3212,10 @@ static int ext4_link(struct dentry *old_dentry,
if (inode->i_nlink >= EXT4_LINK_MAX)
return -EMLINK;
- if (ext4_encrypted_inode(dir) &&
- !fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
+
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) &&
(!projid_eq(EXT4_I(dir)->i_projid,
@@ -3516,12 +3507,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
EXT4_I(old_dentry->d_inode)->i_projid)))
return -EXDEV;
- if ((ext4_encrypted_inode(old_dir) &&
- !fscrypt_has_encryption_key(old_dir)) ||
- (ext4_encrypted_inode(new_dir) &&
- !fscrypt_has_encryption_key(new_dir)))
- return -ENOKEY;
-
retval = dquot_initialize(old.dir);
if (retval)
return retval;
@@ -3550,13 +3535,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
goto end_rename;
- if ((old.dir != new.dir) &&
- ext4_encrypted_inode(new.dir) &&
- !fscrypt_has_permitted_context(new.dir, old.inode)) {
- retval = -EPERM;
- goto end_rename;
- }
-
new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
&new.de, &new.inlined);
if (IS_ERR(new.bh)) {
@@ -3722,19 +3700,6 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
int retval;
struct timespec ctime;
- if ((ext4_encrypted_inode(old_dir) &&
- !fscrypt_has_encryption_key(old_dir)) ||
- (ext4_encrypted_inode(new_dir) &&
- !fscrypt_has_encryption_key(new_dir)))
- return -ENOKEY;
-
- if ((ext4_encrypted_inode(old_dir) ||
- ext4_encrypted_inode(new_dir)) &&
- (old_dir != new_dir) &&
- (!fscrypt_has_permitted_context(new_dir, old.inode) ||
- !fscrypt_has_permitted_context(old_dir, new.inode)))
- return -EPERM;
-
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) &&
!projid_eq(EXT4_I(new_dir)->i_projid,
EXT4_I(old_dentry->d_inode)->i_projid)) ||
@@ -3861,12 +3826,19 @@ static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
+ int err;
+
if (unlikely(ext4_forced_shutdown(EXT4_SB(old_dir->i_sb))))
return -EIO;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
+ err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+ if (err)
+ return err;
+
if (flags & RENAME_EXCHANGE) {
return ext4_cross_rename(old_dir, old_dentry,
new_dir, new_dentry);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 1dac59c24792..50443bda8e98 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -107,7 +107,7 @@ static int verify_group_input(struct super_block *sb,
overhead = ext4_group_overhead_blocks(sb, group);
metaend = start + overhead;
- input->free_blocks_count = free_blocks_count =
+ input->free_clusters_count = free_blocks_count =
input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
if (test_opt(sb, DEBUG))
@@ -258,6 +258,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
ext4_group_t last_group;
unsigned overhead;
__u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
+ int i;
BUG_ON(flex_gd->count == 0 || group_data == NULL);
@@ -294,7 +295,7 @@ next_group:
group_data[bb_index].block_bitmap = start_blk++;
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
- group_data[group].free_blocks_count--;
+ group_data[group].mdata_blocks++;
flex_gd->bg_flags[group] &= uninit_mask;
}
@@ -305,7 +306,7 @@ next_group:
group_data[ib_index].inode_bitmap = start_blk++;
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
- group_data[group].free_blocks_count--;
+ group_data[group].mdata_blocks++;
flex_gd->bg_flags[group] &= uninit_mask;
}
@@ -324,15 +325,22 @@ next_group:
if (start_blk + itb > next_group_start) {
flex_gd->bg_flags[group + 1] &= uninit_mask;
overhead = start_blk + itb - next_group_start;
- group_data[group + 1].free_blocks_count -= overhead;
+ group_data[group + 1].mdata_blocks += overhead;
itb -= overhead;
}
- group_data[group].free_blocks_count -= itb;
+ group_data[group].mdata_blocks += itb;
flex_gd->bg_flags[group] &= uninit_mask;
start_blk += EXT4_SB(sb)->s_itb_per_group;
}
+ /* Update free clusters count to exclude metadata blocks */
+ for (i = 0; i < flex_gd->count; i++) {
+ group_data[i].free_clusters_count -=
+ EXT4_NUM_B2C(EXT4_SB(sb),
+ group_data[i].mdata_blocks);
+ }
+
if (test_opt(sb, DEBUG)) {
int i;
group = group_data[0].group;
@@ -342,12 +350,13 @@ next_group:
flexbg_size);
for (i = 0; i < flex_gd->count; i++) {
- printk(KERN_DEBUG "adding %s group %u: %u "
- "blocks (%d free)\n",
+ ext4_debug(
+ "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
ext4_bg_has_super(sb, group + i) ? "normal" :
"no-super", group + i,
group_data[i].blocks_count,
- group_data[i].free_blocks_count);
+ group_data[i].free_clusters_count,
+ group_data[i].mdata_blocks);
}
}
return 0;
@@ -399,7 +408,7 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh)
}
/*
- * set_flexbg_block_bitmap() mark @count blocks starting from @block used.
+ * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
*
* Helper function for ext4_setup_new_group_blocks() which set .
*
@@ -409,22 +418,26 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh)
*/
static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
struct ext4_new_flex_group_data *flex_gd,
- ext4_fsblk_t block, ext4_group_t count)
+ ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ ext4_group_t count = last_cluster - first_cluster + 1;
ext4_group_t count2;
- ext4_debug("mark blocks [%llu/%u] used\n", block, count);
- for (count2 = count; count > 0; count -= count2, block += count2) {
+ ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
+ last_cluster);
+ for (count2 = count; count > 0;
+ count -= count2, first_cluster += count2) {
ext4_fsblk_t start;
struct buffer_head *bh;
ext4_group_t group;
int err;
- group = ext4_get_group_number(sb, block);
- start = ext4_group_first_block_no(sb, group);
+ group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
+ start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
group -= flex_gd->groups[0].group;
- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
+ count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
if (count2 > count)
count2 = count;
@@ -445,9 +458,9 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
err = ext4_journal_get_write_access(handle, bh);
if (err)
return err;
- ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
- block - start, count2);
- ext4_set_bits(bh->b_data, block - start, count2);
+ ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
+ first_cluster, first_cluster - start, count2);
+ ext4_set_bits(bh->b_data, first_cluster - start, count2);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
if (unlikely(err))
@@ -596,9 +609,10 @@ handle_bb:
if (overhead != 0) {
ext4_debug("mark backup superblock %#04llx (+0)\n",
start);
- ext4_set_bits(bh->b_data, 0, overhead);
+ ext4_set_bits(bh->b_data, 0,
+ EXT4_NUM_B2C(sbi, overhead));
}
- ext4_mark_bitmap_end(group_data[i].blocks_count,
+ ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
if (err)
@@ -643,7 +657,11 @@ handle_ib:
continue;
}
err = set_flexbg_block_bitmap(sb, handle,
- flex_gd, start, count);
+ flex_gd,
+ EXT4_B2C(sbi, start),
+ EXT4_B2C(sbi,
+ start + count
+ - 1));
if (err)
goto out;
count = group_table_count[j];
@@ -653,7 +671,11 @@ handle_ib:
if (count) {
err = set_flexbg_block_bitmap(sb, handle,
- flex_gd, start, count);
+ flex_gd,
+ EXT4_B2C(sbi, start),
+ EXT4_B2C(sbi,
+ start + count
+ - 1));
if (err)
goto out;
}
@@ -841,7 +863,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
ext4_std_error(sb, err);
goto exit_inode;
}
- inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
+ inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
+ (9 - EXT4_SB(sb)->s_cluster_bits);
ext4_mark_iloc_dirty(handle, inode, &iloc);
memset(gdb_bh->b_data, 0, sb->s_blocksize);
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
@@ -936,6 +959,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
{
struct super_block *sb = inode->i_sb;
int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
+ int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
struct buffer_head **primary;
struct buffer_head *dind;
struct ext4_iloc iloc;
@@ -1011,7 +1035,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
if (!err)
err = err2;
}
- inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
+
+ inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
ext4_mark_iloc_dirty(handle, inode, &iloc);
exit_bh:
@@ -1245,7 +1270,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
ext4_group_t group;
__u16 *bg_flags = flex_gd->bg_flags;
int i, gdb_off, gdb_num, err = 0;
-
+
for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
group = group_data->group;
@@ -1272,7 +1297,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
ext4_inode_table_set(sb, gdp, group_data->inode_table);
ext4_free_group_clusters_set(sb, gdp,
- EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
+ group_data->free_clusters_count);
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
if (ext4_has_group_desc_csum(sb))
ext4_itable_unused_set(sb, gdp,
@@ -1328,7 +1353,7 @@ static void ext4_update_super(struct super_block *sb,
*/
for (i = 0; i < flex_gd->count; i++) {
blocks_count += group_data[i].blocks_count;
- free_blocks += group_data[i].free_blocks_count;
+ free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
}
reserved_blocks = ext4_r_blocks_count(es) * 100;
@@ -1500,17 +1525,18 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
ext4_fsblk_t n_blocks_count,
unsigned long flexbg_size)
{
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
struct ext4_new_group_data *group_data = flex_gd->groups;
ext4_fsblk_t o_blocks_count;
ext4_group_t n_group;
ext4_group_t group;
ext4_group_t last_group;
ext4_grpblk_t last;
- ext4_grpblk_t blocks_per_group;
+ ext4_grpblk_t clusters_per_group;
unsigned long i;
- blocks_per_group = EXT4_BLOCKS_PER_GROUP(sb);
+ clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
o_blocks_count = ext4_blocks_count(es);
@@ -1531,9 +1557,10 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
int overhead;
group_data[i].group = group + i;
- group_data[i].blocks_count = blocks_per_group;
+ group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
overhead = ext4_group_overhead_blocks(sb, group + i);
- group_data[i].free_blocks_count = blocks_per_group - overhead;
+ group_data[i].mdata_blocks = overhead;
+ group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
if (ext4_has_group_desc_csum(sb)) {
flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
EXT4_BG_INODE_UNINIT;
@@ -1547,10 +1574,10 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
/* We need to initialize block bitmap of last group. */
flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
- if ((last_group == n_group) && (last != blocks_per_group - 1)) {
- group_data[i - 1].blocks_count = last + 1;
- group_data[i - 1].free_blocks_count -= blocks_per_group-
- last - 1;
+ if ((last_group == n_group) && (last != clusters_per_group - 1)) {
+ group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
+ group_data[i - 1].free_clusters_count -= clusters_per_group -
+ last - 1;
}
return 1;
@@ -1797,7 +1824,8 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
}
/* Do a quick sanity check of the resize inode */
- if (inode->i_blocks != 1 << (inode->i_blkbits - 9))
+ if (inode->i_blocks != 1 << (inode->i_blkbits -
+ (9 - sbi->s_cluster_bits)))
goto invalid_resize_inode;
for (i = 0; i < EXT4_N_BLOCKS; i++) {
if (i == EXT4_DIND_BLOCK) {
@@ -1960,7 +1988,7 @@ retry:
if (n_group == o_group)
add = n_blocks_count - o_blocks_count;
else
- add = EXT4_BLOCKS_PER_GROUP(sb) - (offset + 1);
+ add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
if (add > 0) {
err = ext4_group_extend_no_check(sb, o_blocks_count, add);
if (err)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b0915b734a38..0556cd036b69 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1159,6 +1159,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
if (inode->i_ino == EXT4_ROOT_INO)
return -EPERM;
+ if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
+ return -EINVAL;
+
res = ext4_convert_inline_data(inode);
if (res)
return res;
@@ -1181,7 +1184,8 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
ext4_clear_inode_state(inode,
EXT4_STATE_MAY_INLINE_DATA);
/*
- * Update inode->i_flags - e.g. S_DAX may get disabled
+ * Update inode->i_flags - S_ENCRYPTED will be enabled,
+ * S_DAX may be disabled
*/
ext4_set_inode_flags(inode);
}
@@ -1206,7 +1210,10 @@ retry:
ctx, len, 0);
if (!res) {
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
- /* Update inode->i_flags - e.g. S_DAX may get disabled */
+ /*
+ * Update inode->i_flags - S_ENCRYPTED will be enabled,
+ * S_DAX may be disabled
+ */
ext4_set_inode_flags(inode);
res = ext4_mark_inode_dirty(handle, inode);
if (res)
@@ -1237,14 +1244,9 @@ static const struct fscrypt_operations ext4_cryptops = {
.get_context = ext4_get_context,
.set_context = ext4_set_context,
.dummy_context = ext4_dummy_context,
- .is_encrypted = ext4_encrypted_inode,
.empty_dir = ext4_empty_dir,
.max_namelen = ext4_max_namelen,
};
-#else
-static const struct fscrypt_operations ext4_cryptops = {
- .is_encrypted = ext4_encrypted_inode,
-};
#endif
#ifdef CONFIG_QUOTA
@@ -2791,14 +2793,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
* This function is called once a day if we have errors logged
* on the file system
*/
-static void print_daily_error_info(unsigned long arg)
+static void print_daily_error_info(struct timer_list *t)
{
- struct super_block *sb = (struct super_block *) arg;
- struct ext4_sb_info *sbi;
- struct ext4_super_block *es;
-
- sbi = EXT4_SB(sb);
- es = sbi->s_es;
+ struct ext4_sb_info *sbi = from_timer(sbi, t, s_err_report);
+ struct super_block *sb = sbi->s_sb;
+ struct ext4_super_block *es = sbi->s_es;
if (es->s_error_count)
/* fsck newer than v1.41.13 is needed to clean this condition. */
@@ -3708,6 +3707,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
+ if (ext4_has_feature_inline_data(sb)) {
+ ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
+ " that may contain inline data");
+ goto failed_mount;
+ }
err = bdev_dax_supported(sb, blocksize);
if (err)
goto failed_mount;
@@ -3977,11 +3981,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
sbi->s_gdb_count = db_count;
- get_random_bytes(&sbi->s_next_generation, sizeof(u32));
- spin_lock_init(&sbi->s_next_gen_lock);
- setup_timer(&sbi->s_err_report, print_daily_error_info,
- (unsigned long) sb);
+ timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
/* Register extent status tree shrinker */
if (ext4_es_register_shrinker(sbi))
@@ -3996,7 +3997,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ext4_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
sb->s_cop = &ext4_cryptops;
+#endif
#ifdef CONFIG_QUOTA
sb->dq_op = &ext4_quota_operations;
if (ext4_has_feature_quota(sb))
@@ -4612,7 +4615,8 @@ static int ext4_load_journal(struct super_block *sb,
"required on readonly filesystem");
if (really_read_only) {
ext4_msg(sb, KERN_ERR, "write access "
- "unavailable, cannot proceed");
+ "unavailable, cannot proceed "
+ "(try mounting with noload)");
return -EROFS;
}
ext4_msg(sb, KERN_INFO, "write access will "
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 04fe1df052b2..0bb8e2c022d3 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -305,25 +305,22 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
long nr_to_write, enum iostat_type io_type)
{
struct address_space *mapping = META_MAPPING(sbi);
- pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
+ pgoff_t index = 0, prev = ULONG_MAX;
struct pagevec pvec;
long nwritten = 0;
+ int nr_pages;
struct writeback_control wbc = {
.for_reclaim = 0,
};
struct blk_plug plug;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
blk_start_plug(&plug);
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (unlikely(nr_pages == 0))
- break;
+ while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 36b535207c88..7b3ad5d8e2e9 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1635,7 +1635,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int range_whole = 0;
int tag;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
if (get_dirty_pages(mapping->host) <=
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
@@ -1669,8 +1669,8 @@ retry:
while (!done && (index <= end)) {
int i;
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
+ nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+ tag);
if (nr_pages == 0)
break;
@@ -1678,11 +1678,6 @@ retry:
struct page *page = pvec.pages[i];
bool submitted = false;
- if (page->index > end) {
- done = 1;
- break;
- }
-
done_index = page->index;
retry_write:
lock_page(page);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 4b4a72f392be..115204fdefcc 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -23,13 +23,11 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/quotaops.h>
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
-#include <linux/fscrypt_supp.h>
-#else
-#include <linux/fscrypt_notsupp.h>
-#endif
#include <crypto/hash.h>
+#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
+#include <linux/fscrypt.h>
+
#ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
#else
@@ -2949,6 +2947,7 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
file_set_encrypt(inode);
+ inode->i_flags |= S_ENCRYPTED;
#endif
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 517e112c8a9a..f78b76ec4707 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -313,18 +313,19 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
pgoff_t pgofs, int whence)
{
- struct pagevec pvec;
+ struct page *page;
int nr_pages;
if (whence != SEEK_DATA)
return 0;
/* find first dirty page index */
- pagevec_init(&pvec, 0);
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
- PAGECACHE_TAG_DIRTY, 1);
- pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
- pagevec_release(&pvec);
+ nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
+ 1, &page);
+ if (!nr_pages)
+ return ULONG_MAX;
+ pgofs = page->index;
+ put_page(page);
return pgofs;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 50c88e37ed66..53fb08810ee9 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -43,8 +43,11 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_NOATIME;
if (flags & FS_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
+ if (f2fs_encrypted_inode(inode))
+ new_fl |= S_ENCRYPTED;
inode_set_flags(inode, new_fl,
- S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
+ S_ENCRYPTED);
}
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index fca87835a1da..b33dac9592ca 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1277,21 +1277,17 @@ release_page:
static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
{
- pgoff_t index, end;
+ pgoff_t index;
struct pagevec pvec;
struct page *last_page = NULL;
+ int nr_pages;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
index = 0;
- end = ULONG_MAX;
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -1425,13 +1421,14 @@ static int f2fs_write_node_page(struct page *page,
int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic)
{
- pgoff_t index, end;
+ pgoff_t index;
pgoff_t last_idx = ULONG_MAX;
struct pagevec pvec;
int ret = 0;
struct page *last_page = NULL;
bool marked = false;
nid_t ino = inode->i_ino;
+ int nr_pages;
if (atomic) {
last_page = last_fsync_dnode(sbi, ino);
@@ -1439,17 +1436,12 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
return PTR_ERR_OR_ZERO(last_page);
}
retry:
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
index = 0;
- end = ULONG_MAX;
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -1548,25 +1540,21 @@ out:
int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type)
{
- pgoff_t index, end;
+ pgoff_t index;
struct pagevec pvec;
int step = 0;
int nwritten = 0;
int ret = 0;
+ int nr_pages;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
next_step:
index = 0;
- end = ULONG_MAX;
-
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -1655,27 +1643,20 @@ out:
int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
{
- pgoff_t index = 0, end = ULONG_MAX;
+ pgoff_t index = 0;
struct pagevec pvec;
int ret2, ret = 0;
+ int nr_pages;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
- while (index <= end) {
- int i, nr_pages;
- nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_WRITEBACK,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
- if (nr_pages == 0)
- break;
+ while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_WRITEBACK))) {
+ int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- /* until radix tree lookup accepts end_index */
- if (unlikely(page->index > end))
- continue;
-
if (ino && ino_of_node(page) == ino) {
f2fs_wait_on_page_writeback(page, NODE, true);
if (TestClearPageError(page))
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 933c3d529e65..97e03c637e90 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1594,14 +1594,9 @@ static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
.set_context = f2fs_set_context,
- .is_encrypted = f2fs_encrypted_inode,
.empty_dir = f2fs_empty_dir,
.max_namelen = f2fs_max_namelen,
};
-#else
-static const struct fscrypt_operations f2fs_cryptops = {
- .is_encrypted = f2fs_encrypted_inode,
-};
#endif
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
@@ -2320,7 +2315,9 @@ try_onemore:
#endif
sb->s_op = &f2fs_sops;
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
sb->s_cop = &f2fs_cryptops;
+#endif
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8d78ffd7b399..30f47d0f74a0 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -725,7 +725,7 @@ static void send_sigio_to_task(struct task_struct *p,
* F_SETSIG can change ->signum lockless in parallel, make
* sure we read it once and use the same value throughout.
*/
- int signum = ACCESS_ONCE(fown->signum);
+ int signum = READ_ONCE(fown->signum);
if (!sigio_perm(p, fown, signum))
return;
diff --git a/fs/file_table.c b/fs/file_table.c
index 61517f57f8ef..2dc9f38bd195 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -201,11 +201,11 @@ static void __fput(struct file *file)
eventpoll_release(file);
locks_remove_file(file);
+ ima_file_free(file);
if (unlikely(file->f_flags & FASYNC)) {
if (file->f_op->fasync)
file->f_op->fasync(-1, file, 0);
}
- ima_file_free(file);
if (file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
@@ -312,7 +312,7 @@ void put_filp(struct file *file)
void __init files_init(void)
{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL);
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 245c430a2e41..08f5debd07d1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -933,33 +933,36 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
#endif /* CONFIG_CGROUP_WRITEBACK */
-void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
- bool range_cyclic, enum wb_reason reason)
+/*
+ * Add in the number of potentially dirty inodes, because each inode
+ * write can dirty pagecache in the underlying blockdev.
+ */
+static unsigned long get_nr_dirty_pages(void)
{
- struct wb_writeback_work *work;
+ return global_node_page_state(NR_FILE_DIRTY) +
+ global_node_page_state(NR_UNSTABLE_NFS) +
+ get_nr_dirty_inodes();
+}
+static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
+{
if (!wb_has_dirty_io(wb))
return;
/*
- * This is WB_SYNC_NONE writeback, so if allocation fails just
- * wakeup the thread for old dirty data writeback
+ * All callers of this function want to start writeback of all
+ * dirty pages. Places like vmscan can call this at a very
+ * high frequency, causing pointless allocations of tons of
+ * work items and keeping the flusher threads busy retrieving
+ * that work. Ensure that we only allow one of them pending and
+ * inflight at the time.
*/
- work = kzalloc(sizeof(*work),
- GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (!work) {
- trace_writeback_nowork(wb);
- wb_wakeup(wb);
+ if (test_bit(WB_start_all, &wb->state) ||
+ test_and_set_bit(WB_start_all, &wb->state))
return;
- }
-
- work->sync_mode = WB_SYNC_NONE;
- work->nr_pages = nr_pages;
- work->range_cyclic = range_cyclic;
- work->reason = reason;
- work->auto_free = 1;
- wb_queue_work(wb, work);
+ wb->start_all_reason = reason;
+ wb_wakeup(wb);
}
/**
@@ -1814,17 +1817,6 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
return work;
}
-/*
- * Add in the number of potentially dirty inodes, because each inode
- * write can dirty pagecache in the underlying blockdev.
- */
-static unsigned long get_nr_dirty_pages(void)
-{
- return global_node_page_state(NR_FILE_DIRTY) +
- global_node_page_state(NR_UNSTABLE_NFS) +
- get_nr_dirty_inodes();
-}
-
static long wb_check_background_flush(struct bdi_writeback *wb)
{
if (wb_over_bg_thresh(wb)) {
@@ -1877,6 +1869,30 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
return 0;
}
+static long wb_check_start_all(struct bdi_writeback *wb)
+{
+ long nr_pages;
+
+ if (!test_bit(WB_start_all, &wb->state))
+ return 0;
+
+ nr_pages = get_nr_dirty_pages();
+ if (nr_pages) {
+ struct wb_writeback_work work = {
+ .nr_pages = wb_split_bdi_pages(wb, nr_pages),
+ .sync_mode = WB_SYNC_NONE,
+ .range_cyclic = 1,
+ .reason = wb->start_all_reason,
+ };
+
+ nr_pages = wb_writeback(wb, &work);
+ }
+
+ clear_bit(WB_start_all, &wb->state);
+ return nr_pages;
+}
+
+
/*
* Retrieve work items and do the writeback they describe
*/
@@ -1893,6 +1909,11 @@ static long wb_do_writeback(struct bdi_writeback *wb)
}
/*
+ * Check for a flush-everything request
+ */
+ wrote += wb_check_start_all(wb);
+
+ /*
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);
@@ -1947,10 +1968,33 @@ void wb_workfn(struct work_struct *work)
}
/*
- * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
- * the whole world.
+ * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
+ * write back the whole world.
*/
-void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
+static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
+ enum wb_reason reason)
+{
+ struct bdi_writeback *wb;
+
+ if (!bdi_has_dirty_io(bdi))
+ return;
+
+ list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
+ wb_start_writeback(wb, reason);
+}
+
+void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
+ enum wb_reason reason)
+{
+ rcu_read_lock();
+ __wakeup_flusher_threads_bdi(bdi, reason);
+ rcu_read_unlock();
+}
+
+/*
+ * Wakeup the flusher threads to start writeback of all currently dirty pages
+ */
+void wakeup_flusher_threads(enum wb_reason reason)
{
struct backing_dev_info *bdi;
@@ -1960,20 +2004,9 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
if (blk_needs_flush_plug(current))
blk_schedule_flush_plug(current);
- if (!nr_pages)
- nr_pages = get_nr_dirty_pages();
-
rcu_read_lock();
- list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
- struct bdi_writeback *wb;
-
- if (!bdi_has_dirty_io(bdi))
- continue;
-
- list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
- wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages),
- false, reason);
- }
+ list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
+ __wakeup_flusher_threads_bdi(bdi, reason);
rcu_read_unlock();
}
@@ -2343,37 +2376,19 @@ void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
EXPORT_SYMBOL(writeback_inodes_sb);
/**
- * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
+ * try_to_writeback_inodes_sb - try to start writeback if none underway
* @sb: the superblock
- * @nr: the number of pages to write
- * @reason: the reason of writeback
+ * @reason: reason why some writeback work was initiated
*
- * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
- * Returns 1 if writeback was started, 0 if not.
+ * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
*/
-bool try_to_writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
- enum wb_reason reason)
+void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
if (!down_read_trylock(&sb->s_umount))
- return false;
+ return;
- __writeback_inodes_sb_nr(sb, nr, reason, true);
+ __writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
up_read(&sb->s_umount);
- return true;
-}
-EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
-
-/**
- * try_to_writeback_inodes_sb - try to start writeback if none underway
- * @sb: the superblock
- * @reason: reason why some writeback work was initiated
- *
- * Implement by try_to_writeback_inodes_sb_nr()
- * Returns 1 if writeback was started, 0 if not.
- */
-bool try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
-{
- return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
}
EXPORT_SYMBOL(try_to_writeback_inodes_sb);
diff --git a/fs/fs_pin.c b/fs/fs_pin.c
index 0d285fd5b44a..a6497cf8ae53 100644
--- a/fs/fs_pin.c
+++ b/fs/fs_pin.c
@@ -79,7 +79,7 @@ void mnt_pin_kill(struct mount *m)
while (1) {
struct hlist_node *p;
rcu_read_lock();
- p = ACCESS_ONCE(m->mnt_pins.first);
+ p = READ_ONCE(m->mnt_pins.first);
if (!p) {
rcu_read_unlock();
break;
@@ -93,7 +93,7 @@ void group_pin_kill(struct hlist_head *p)
while (1) {
struct hlist_node *q;
rcu_read_lock();
- q = ACCESS_ONCE(p->first);
+ q = READ_ONCE(p->first);
if (!q) {
rcu_read_unlock();
break;
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 0ad3fd3ad0b4..961029e04027 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -1175,7 +1175,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
return;
}
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
next = 0;
do {
if (!pagevec_lookup(&pvec, mapping, &next))
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 13c65dd2d37d..17f0d05bfd4c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -33,7 +33,7 @@ static struct fuse_dev *fuse_get_dev(struct file *file)
* Lockless access is OK, because file->private data is set
* once during mount and is valid until the file is released.
*/
- return ACCESS_ONCE(file->private_data);
+ return READ_ONCE(file->private_data);
}
static void fuse_request_init(struct fuse_req *req, struct page **pages,
@@ -1636,7 +1636,7 @@ out_finish:
static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
{
- release_pages(req->pages, req->num_pages, false);
+ release_pages(req->pages, req->num_pages);
}
static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 94a745acaef8..2f504d615d92 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -31,7 +31,7 @@ static struct kmem_cache *fuse_inode_cachep;
struct list_head fuse_conn_list;
DEFINE_MUTEX(fuse_mutex);
-static int set_global_limit(const char *val, struct kernel_param *kp);
+static int set_global_limit(const char *val, const struct kernel_param *kp);
unsigned max_user_bgreq;
module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
@@ -823,7 +823,7 @@ static void sanitize_global_limit(unsigned *limit)
*limit = (1 << 16) - 1;
}
-static int set_global_limit(const char *val, struct kernel_param *kp)
+static int set_global_limit(const char *val, const struct kernel_param *kp)
{
int rv;
@@ -1273,9 +1273,9 @@ static int __init fuse_fs_init(void)
int err;
fuse_inode_cachep = kmem_cache_create("fuse_inode",
- sizeof(struct fuse_inode), 0,
- SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
- fuse_inode_init_once);
+ sizeof(struct fuse_inode), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT,
+ fuse_inode_init_once);
err = -ENOMEM;
if (!fuse_inode_cachep)
goto out;
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 90c6a8faaecb..43c827a7cce5 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -4,6 +4,7 @@ config GFS2_FS
select FS_POSIX_ACL
select CRC32
select QUOTACTL
+ select FS_IOMAP
help
A cluster filesystem.
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 9d5eecb123de..776717f1eeea 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -141,6 +141,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
ret = __gfs2_set_acl(inode, acl, type);
if (!ret && mode != inode->i_mode) {
+ inode->i_ctime = current_time(inode);
inode->i_mode = mode;
mark_inode_dirty(inode);
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 68ed06962537..1daf15a1f00c 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -280,22 +280,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
for(i = 0; i < nr_pages; i++) {
struct page *page = pvec->pages[i];
- /*
- * At this point, the page may be truncated or
- * invalidated (changing page->mapping to NULL), or
- * even swizzled back from swapper_space to tmpfs file
- * mapping. However, page->index will not change
- * because we have a reference on the page.
- */
- if (page->index > end) {
- /*
- * can't be range_cyclic (1st pass) because
- * end == -1 in that case.
- */
- ret = 1;
- break;
- }
-
*done_index = page->index;
lock_page(page);
@@ -387,7 +371,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
int range_whole = 0;
int tag;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -413,8 +397,8 @@ retry:
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && (index <= end)) {
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+ tag);
if (nr_pages == 0)
break;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 3dd0cceefa43..d5f0d96169c5 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -13,6 +13,7 @@
#include <linux/blkdev.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
+#include <linux/iomap.h>
#include "gfs2.h"
#include "incore.h"
@@ -36,6 +37,8 @@
struct metapath {
struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
__u16 mp_list[GFS2_MAX_META_HEIGHT];
+ int mp_fheight; /* find_metapath height */
+ int mp_aheight; /* actual height (lookup height) */
};
/**
@@ -235,9 +238,9 @@ static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
{
unsigned int i;
+ mp->mp_fheight = height;
for (i = height; i--;)
mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
-
}
static inline unsigned int metapath_branch_start(const struct metapath *mp)
@@ -248,7 +251,7 @@ static inline unsigned int metapath_branch_start(const struct metapath *mp)
}
/**
- * metaptr1 - Return the first possible metadata pointer in a metaath buffer
+ * metaptr1 - Return the first possible metadata pointer in a metapath buffer
* @height: The metadata height (0 = dinode)
* @mp: The metapath
*/
@@ -345,10 +348,13 @@ static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
for (x = 0; x < end_of_metadata; x++) {
ret = lookup_mp_height(ip, mp, x);
if (ret)
- return ret;
+ goto out;
}
- return ip->i_height;
+ ret = ip->i_height;
+out:
+ mp->mp_aheight = ret;
+ return ret;
}
/**
@@ -480,10 +486,11 @@ static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
* @inode: The GFS2 inode
* @lblock: The logical starting block of the extent
* @bh_map: This is used to return the mapping details
- * @mp: The metapath
- * @sheight: The starting height (i.e. whats already mapped)
- * @height: The height to build to
+ * @zero_new: True if newly allocated blocks should be zeroed
+ * @mp: The metapath, with proper height information calculated
* @maxlen: The max number of data blocks to alloc
+ * @dblock: Pointer to return the resulting new block
+ * @dblks: Pointer to return the number of blocks allocated
*
* In this routine we may have to alloc:
* i) Indirect blocks to grow the metadata tree height
@@ -499,63 +506,63 @@ static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
* Returns: errno on error
*/
-static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
- struct buffer_head *bh_map, struct metapath *mp,
- const unsigned int sheight,
- const unsigned int height,
- const size_t maxlen)
+static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
+ unsigned flags, struct metapath *mp)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct super_block *sb = sdp->sd_vfs;
struct buffer_head *dibh = mp->mp_bh[0];
- u64 bn, dblock = 0;
+ u64 bn;
unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
unsigned dblks = 0;
unsigned ptrs_per_blk;
- const unsigned end_of_metadata = height - 1;
+ const unsigned end_of_metadata = mp->mp_fheight - 1;
int ret;
- int eob = 0;
enum alloc_state state;
__be64 *ptr;
__be64 zero_bn = 0;
+ size_t maxlen = iomap->length >> inode->i_blkbits;
- BUG_ON(sheight < 1);
+ BUG_ON(mp->mp_aheight < 1);
BUG_ON(dibh == NULL);
gfs2_trans_add_meta(ip->i_gl, dibh);
- if (height == sheight) {
+ if (mp->mp_fheight == mp->mp_aheight) {
struct buffer_head *bh;
+ int eob;
+
/* Bottom indirect block exists, find unalloced extent size */
ptr = metapointer(end_of_metadata, mp);
bh = mp->mp_bh[end_of_metadata];
- dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
- &eob);
+ dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr,
+ maxlen, &eob);
BUG_ON(dblks < 1);
state = ALLOC_DATA;
} else {
/* Need to allocate indirect blocks */
- ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
+ ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs :
+ sdp->sd_diptrs;
dblks = min(maxlen, (size_t)(ptrs_per_blk -
mp->mp_list[end_of_metadata]));
- if (height == ip->i_height) {
+ if (mp->mp_fheight == ip->i_height) {
/* Writing into existing tree, extend tree down */
- iblks = height - sheight;
+ iblks = mp->mp_fheight - mp->mp_aheight;
state = ALLOC_GROW_DEPTH;
} else {
/* Building up tree height */
state = ALLOC_GROW_HEIGHT;
- iblks = height - ip->i_height;
+ iblks = mp->mp_fheight - ip->i_height;
branch_start = metapath_branch_start(mp);
- iblks += (height - branch_start);
+ iblks += (mp->mp_fheight - branch_start);
}
}
/* start of the second part of the function (state machine) */
blks = dblks + iblks;
- i = sheight;
+ i = mp->mp_aheight;
do {
int error;
n = blks - alloced;
@@ -573,9 +580,10 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
sizeof(struct gfs2_dinode));
zero_bn = *ptr;
}
- for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
+ for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
+ i++, n--)
gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
- if (i - 1 == height - ip->i_height) {
+ if (i - 1 == mp->mp_fheight - ip->i_height) {
i--;
gfs2_buffer_copy_tail(mp->mp_bh[i],
sizeof(struct gfs2_meta_header),
@@ -587,7 +595,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
sizeof(struct gfs2_meta_header));
*ptr = zero_bn;
state = ALLOC_GROW_DEPTH;
- for(i = branch_start; i < height; i++) {
+ for(i = branch_start; i < mp->mp_fheight; i++) {
if (mp->mp_bh[i] == NULL)
break;
brelse(mp->mp_bh[i]);
@@ -599,12 +607,12 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
break;
/* Branching from existing tree */
case ALLOC_GROW_DEPTH:
- if (i > 1 && i < height)
+ if (i > 1 && i < mp->mp_fheight)
gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
- for (; i < height && n > 0; i++, n--)
+ for (; i < mp->mp_fheight && n > 0; i++, n--)
gfs2_indirect_init(mp, ip->i_gl, i,
mp->mp_list[i-1], bn++);
- if (i == height)
+ if (i == mp->mp_fheight)
state = ALLOC_DATA;
if (n == 0)
break;
@@ -615,119 +623,269 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
dblks = n;
ptr = metapointer(end_of_metadata, mp);
- dblock = bn;
+ iomap->addr = bn << inode->i_blkbits;
+ iomap->flags |= IOMAP_F_NEW;
while (n-- > 0)
*ptr++ = cpu_to_be64(bn++);
- if (buffer_zeronew(bh_map)) {
- ret = sb_issue_zeroout(sb, dblock, dblks,
- GFP_NOFS);
+ if (flags & IOMAP_ZERO) {
+ ret = sb_issue_zeroout(sb, iomap->addr >> inode->i_blkbits,
+ dblks, GFP_NOFS);
if (ret) {
fs_err(sdp,
"Failed to zero data buffers\n");
- clear_buffer_zeronew(bh_map);
+ flags &= ~IOMAP_ZERO;
}
}
break;
}
- } while ((state != ALLOC_DATA) || !dblock);
+ } while (iomap->addr == IOMAP_NULL_ADDR);
- ip->i_height = height;
+ iomap->length = (u64)dblks << inode->i_blkbits;
+ ip->i_height = mp->mp_fheight;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
- map_bh(bh_map, inode->i_sb, dblock);
- bh_map->b_size = dblks << inode->i_blkbits;
- set_buffer_new(bh_map);
return 0;
}
/**
- * gfs2_block_map - Map a block from an inode to a disk block
+ * hole_size - figure out the size of a hole
* @inode: The inode
- * @lblock: The logical block number
- * @bh_map: The bh to be mapped
- * @create: True if its ok to alloc blocks to satify the request
+ * @lblock: The logical starting block number
+ * @mp: The metapath
*
- * Sets buffer_mapped() if successful, sets buffer_boundary() if a
- * read of metadata will be required before the next block can be
- * mapped. Sets buffer_new() if new blocks were allocated.
+ * Returns: The hole size in bytes
*
- * Returns: errno
*/
+static u64 hole_size(struct inode *inode, sector_t lblock, struct metapath *mp)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct metapath mp_eof;
+ u64 factor = 1;
+ int hgt;
+ u64 holesz = 0;
+ const __be64 *first, *end, *ptr;
+ const struct buffer_head *bh;
+ u64 lblock_stop = (i_size_read(inode) - 1) >> inode->i_blkbits;
+ int zeroptrs;
+ bool done = false;
+
+ /* Get another metapath, to the very last byte */
+ find_metapath(sdp, lblock_stop, &mp_eof, ip->i_height);
+ for (hgt = ip->i_height - 1; hgt >= 0 && !done; hgt--) {
+ bh = mp->mp_bh[hgt];
+ if (bh) {
+ zeroptrs = 0;
+ first = metapointer(hgt, mp);
+ end = (const __be64 *)(bh->b_data + bh->b_size);
+
+ for (ptr = first; ptr < end; ptr++) {
+ if (*ptr) {
+ done = true;
+ break;
+ } else {
+ zeroptrs++;
+ }
+ }
+ } else {
+ zeroptrs = sdp->sd_inptrs;
+ }
+ if (factor * zeroptrs >= lblock_stop - lblock + 1) {
+ holesz = lblock_stop - lblock + 1;
+ break;
+ }
+ holesz += factor * zeroptrs;
-int gfs2_block_map(struct inode *inode, sector_t lblock,
- struct buffer_head *bh_map, int create)
+ factor *= sdp->sd_inptrs;
+ if (hgt && (mp->mp_list[hgt - 1] < mp_eof.mp_list[hgt - 1]))
+ (mp->mp_list[hgt - 1])++;
+ }
+ return holesz << inode->i_blkbits;
+}
+
+static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+
+ iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
+ sizeof(struct gfs2_dinode);
+ iomap->offset = 0;
+ iomap->length = i_size_read(inode);
+ iomap->type = IOMAP_MAPPED;
+ iomap->flags = IOMAP_F_DATA_INLINE;
+}
+
+/**
+ * gfs2_iomap_begin - Map blocks from an inode to disk blocks
+ * @inode: The inode
+ * @pos: Starting position in bytes
+ * @length: Length to map, in bytes
+ * @flags: iomap flags
+ * @iomap: The iomap structure
+ *
+ * Returns: errno
+ */
+int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, struct iomap *iomap)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- unsigned int bsize = sdp->sd_sb.sb_bsize;
- const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
+ struct metapath mp = { .mp_aheight = 1, };
+ unsigned int factor = sdp->sd_sb.sb_bsize;
const u64 *arr = sdp->sd_heightsize;
__be64 *ptr;
- u64 size;
- struct metapath mp;
+ sector_t lblock;
+ sector_t lend;
int ret;
int eob;
unsigned int len;
struct buffer_head *bh;
u8 height;
- BUG_ON(maxlen == 0);
+ trace_gfs2_iomap_start(ip, pos, length, flags);
+ if (!length) {
+ ret = -EINVAL;
+ goto out;
+ }
- memset(&mp, 0, sizeof(mp));
- bmap_lock(ip, create);
- clear_buffer_mapped(bh_map);
- clear_buffer_new(bh_map);
- clear_buffer_boundary(bh_map);
- trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
+ if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) {
+ gfs2_stuffed_iomap(inode, iomap);
+ if (pos >= iomap->length)
+ return -ENOENT;
+ ret = 0;
+ goto out;
+ }
+
+ lblock = pos >> inode->i_blkbits;
+ lend = (pos + length + sdp->sd_sb.sb_bsize - 1) >> inode->i_blkbits;
+
+ iomap->offset = lblock << inode->i_blkbits;
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
+ iomap->flags = IOMAP_F_MERGED;
+ bmap_lock(ip, 0);
+
+ /*
+ * Directory data blocks have a struct gfs2_meta_header header, so the
+ * remaining size is smaller than the filesystem block size. Logical
+ * block numbers for directories are in units of this remaining size!
+ */
if (gfs2_is_dir(ip)) {
- bsize = sdp->sd_jbsize;
+ factor = sdp->sd_jbsize;
arr = sdp->sd_jheightsize;
}
ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
if (ret)
- goto out;
+ goto out_release;
height = ip->i_height;
- size = (lblock + 1) * bsize;
- while (size > arr[height])
+ while ((lblock + 1) * factor > arr[height])
height++;
find_metapath(sdp, lblock, &mp, height);
- ret = 1;
if (height > ip->i_height || gfs2_is_stuffed(ip))
goto do_alloc;
+
ret = lookup_metapath(ip, &mp);
if (ret < 0)
- goto out;
- if (ret != ip->i_height)
+ goto out_release;
+
+ if (mp.mp_aheight != ip->i_height)
goto do_alloc;
+
ptr = metapointer(ip->i_height - 1, &mp);
if (*ptr == 0)
goto do_alloc;
- map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
+
+ iomap->type = IOMAP_MAPPED;
+ iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
+
bh = mp.mp_bh[ip->i_height - 1];
- len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
- bh_map->b_size = (len << inode->i_blkbits);
+ len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, lend - lblock, &eob);
if (eob)
- set_buffer_boundary(bh_map);
+ iomap->flags |= IOMAP_F_BOUNDARY;
+ iomap->length = (u64)len << inode->i_blkbits;
+
ret = 0;
-out:
+
+out_release:
release_metapath(&mp);
- trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
- bmap_unlock(ip, create);
+ bmap_unlock(ip, 0);
+out:
+ trace_gfs2_iomap_end(ip, iomap, ret);
return ret;
do_alloc:
- /* All allocations are done here, firstly check create flag */
- if (!create) {
- BUG_ON(gfs2_is_stuffed(ip));
+ if (!(flags & IOMAP_WRITE)) {
+ if (pos >= i_size_read(inode)) {
+ ret = -ENOENT;
+ goto out_release;
+ }
ret = 0;
+ iomap->length = hole_size(inode, lblock, &mp);
+ goto out_release;
+ }
+
+ ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+ goto out_release;
+}
+
+/**
+ * gfs2_block_map - Map a block from an inode to a disk block
+ * @inode: The inode
+ * @lblock: The logical block number
+ * @bh_map: The bh to be mapped
+ * @create: True if its ok to alloc blocks to satify the request
+ *
+ * Sets buffer_mapped() if successful, sets buffer_boundary() if a
+ * read of metadata will be required before the next block can be
+ * mapped. Sets buffer_new() if new blocks were allocated.
+ *
+ * Returns: errno
+ */
+
+int gfs2_block_map(struct inode *inode, sector_t lblock,
+ struct buffer_head *bh_map, int create)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct iomap iomap;
+ int ret, flags = 0;
+
+ clear_buffer_mapped(bh_map);
+ clear_buffer_new(bh_map);
+ clear_buffer_boundary(bh_map);
+ trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
+
+ if (create)
+ flags |= IOMAP_WRITE;
+ if (buffer_zeronew(bh_map))
+ flags |= IOMAP_ZERO;
+ ret = gfs2_iomap_begin(inode, (loff_t)lblock << inode->i_blkbits,
+ bh_map->b_size, flags, &iomap);
+ if (ret) {
+ if (!create && ret == -ENOENT) {
+ /* Return unmapped buffer beyond the end of file. */
+ ret = 0;
+ }
goto out;
}
- /* At this point ret is the tree depth of already allocated blocks */
- ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
- goto out;
+ if (iomap.length > bh_map->b_size) {
+ iomap.length = bh_map->b_size;
+ iomap.flags &= ~IOMAP_F_BOUNDARY;
+ }
+ if (iomap.addr != IOMAP_NULL_ADDR)
+ map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
+ bh_map->b_size = iomap.length;
+ if (iomap.flags & IOMAP_F_BOUNDARY)
+ set_buffer_boundary(bh_map);
+ if (iomap.flags & IOMAP_F_NEW)
+ set_buffer_new(bh_map);
+
+out:
+ trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
+ return ret;
}
/*
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 81ded5e2aaa2..443cc182cf18 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -10,6 +10,8 @@
#ifndef __BMAP_DOT_H__
#define __BMAP_DOT_H__
+#include <linux/iomap.h>
+
#include "inode.h"
struct inode;
@@ -47,6 +49,8 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
extern int gfs2_block_map(struct inode *inode, sector_t lblock,
struct buffer_head *bh, int create);
+extern int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
+ unsigned flags, struct iomap *iomap);
extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new,
u64 *dblock, unsigned *extlen);
extern int gfs2_setattr_size(struct inode *inode, u64 size);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 33a0cb5701a3..58705ef8643a 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -60,9 +60,7 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
loff_t error;
switch (whence) {
- case SEEK_END: /* These reference inode->i_size */
- case SEEK_DATA:
- case SEEK_HOLE:
+ case SEEK_END:
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
&i_gh);
if (!error) {
@@ -70,8 +68,21 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
gfs2_glock_dq_uninit(&i_gh);
}
break;
+
+ case SEEK_DATA:
+ error = gfs2_seek_data(file, offset);
+ break;
+
+ case SEEK_HOLE:
+ error = gfs2_seek_hole(file, offset);
+ break;
+
case SEEK_CUR:
case SEEK_SET:
+ /*
+ * These don't reference inode->i_size and don't depend on the
+ * block mapping, so we don't need the glock.
+ */
error = generic_file_llseek(file, offset, whence);
break;
default:
@@ -108,45 +119,22 @@ static int gfs2_readdir(struct file *file, struct dir_context *ctx)
}
/**
- * fsflags_cvt
- * @table: A table of 32 u32 flags
- * @val: a 32 bit value to convert
- *
- * This function can be used to convert between fsflags values and
- * GFS2's own flags values.
+ * fsflag_gfs2flag
*
- * Returns: the converted flags
+ * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
+ * and to GFS2_DIF_JDATA for non-directories.
*/
-static u32 fsflags_cvt(const u32 *table, u32 val)
-{
- u32 res = 0;
- while(val) {
- if (val & 1)
- res |= *table;
- table++;
- val >>= 1;
- }
- return res;
-}
-
-static const u32 fsflags_to_gfs2[32] = {
- [3] = GFS2_DIF_SYNC,
- [4] = GFS2_DIF_IMMUTABLE,
- [5] = GFS2_DIF_APPENDONLY,
- [7] = GFS2_DIF_NOATIME,
- [12] = GFS2_DIF_EXHASH,
- [14] = GFS2_DIF_INHERIT_JDATA,
- [17] = GFS2_DIF_TOPDIR,
-};
-
-static const u32 gfs2_to_fsflags[32] = {
- [gfs2fl_Sync] = FS_SYNC_FL,
- [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
- [gfs2fl_AppendOnly] = FS_APPEND_FL,
- [gfs2fl_NoAtime] = FS_NOATIME_FL,
- [gfs2fl_ExHash] = FS_INDEX_FL,
- [gfs2fl_TopLevel] = FS_TOPDIR_FL,
- [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
+static struct {
+ u32 fsflag;
+ u32 gfsflag;
+} fsflag_gfs2flag[] = {
+ {FS_SYNC_FL, GFS2_DIF_SYNC},
+ {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
+ {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
+ {FS_NOATIME_FL, GFS2_DIF_NOATIME},
+ {FS_INDEX_FL, GFS2_DIF_EXHASH},
+ {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
+ {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
};
static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
@@ -154,17 +142,23 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
struct inode *inode = file_inode(filp);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
- int error;
- u32 fsflags;
+ int i, error;
+ u32 gfsflags, fsflags = 0;
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
error = gfs2_glock_nq(&gh);
if (error)
goto out_uninit;
- fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
- if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
- fsflags |= FS_JOURNAL_DATA_FL;
+ gfsflags = ip->i_diskflags;
+ if (S_ISDIR(inode->i_mode))
+ gfsflags &= ~GFS2_DIF_JDATA;
+ else
+ gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
+ for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
+ if (gfsflags & fsflag_gfs2flag[i].gfsflag)
+ fsflags |= fsflag_gfs2flag[i].fsflag;
+
if (put_user(fsflags, ptr))
error = -EFAULT;
@@ -199,7 +193,6 @@ void gfs2_set_inode_flags(struct inode *inode)
GFS2_DIF_APPENDONLY| \
GFS2_DIF_NOATIME| \
GFS2_DIF_SYNC| \
- GFS2_DIF_SYSTEM| \
GFS2_DIF_TOPDIR| \
GFS2_DIF_INHERIT_JDATA)
@@ -238,10 +231,6 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
if ((new_flags ^ flags) == 0)
goto out;
- error = -EINVAL;
- if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
- goto out;
-
error = -EPERM;
if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
goto out;
@@ -256,7 +245,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
goto out;
}
if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
- if (flags & GFS2_DIF_JDATA)
+ if (new_flags & GFS2_DIF_JDATA)
gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
error = filemap_fdatawrite(inode->i_mapping);
if (error)
@@ -264,6 +253,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
error = filemap_fdatawait(inode->i_mapping);
if (error)
goto out;
+ if (new_flags & GFS2_DIF_JDATA)
+ gfs2_ordered_del_inode(ip);
}
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
if (error)
@@ -271,6 +262,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
goto out_trans_end;
+ inode->i_ctime = current_time(inode);
gfs2_trans_add_meta(ip->i_gl, bh);
ip->i_diskflags = new_flags;
gfs2_dinode_out(ip, bh->b_data);
@@ -289,19 +281,33 @@ out_drop_write:
static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
{
struct inode *inode = file_inode(filp);
- u32 fsflags, gfsflags;
+ u32 fsflags, gfsflags = 0;
+ u32 mask;
+ int i;
if (get_user(fsflags, ptr))
return -EFAULT;
- gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
- if (!S_ISDIR(inode->i_mode)) {
- gfsflags &= ~GFS2_DIF_TOPDIR;
- if (gfsflags & GFS2_DIF_INHERIT_JDATA)
- gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
- return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
+ for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
+ if (fsflags & fsflag_gfs2flag[i].fsflag) {
+ fsflags &= ~fsflag_gfs2flag[i].fsflag;
+ gfsflags |= fsflag_gfs2flag[i].gfsflag;
+ }
+ }
+ if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
+ return -EINVAL;
+
+ mask = GFS2_FLAGS_USER_SET;
+ if (S_ISDIR(inode->i_mode)) {
+ mask &= ~GFS2_DIF_JDATA;
+ } else {
+ /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
+ if (gfsflags & GFS2_DIF_TOPDIR)
+ return -EINVAL;
+ mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
}
- return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA));
+
+ return do_gfs2_set_flags(filp, gfsflags, mask);
}
static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 863749e29bf9..4e971b1c7f92 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -18,7 +18,7 @@
#include <linux/posix_acl.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/fiemap.h>
+#include <linux/iomap.h>
#include <linux/security.h>
#include <linux/uaccess.h>
@@ -189,7 +189,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
gfs2_set_iop(inode);
- inode->i_atime.tv_sec = 0;
+ /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
+ inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
inode->i_atime.tv_nsec = 0;
unlock_new_inode(inode);
@@ -1986,6 +1987,7 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat,
struct inode *inode = d_inode(path->dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
+ u32 gfsflags;
int error;
gfs2_holder_mark_uninitialized(&gh);
@@ -1995,13 +1997,30 @@ static int gfs2_getattr(const struct path *path, struct kstat *stat,
return error;
}
+ gfsflags = ip->i_diskflags;
+ if (gfsflags & GFS2_DIF_APPENDONLY)
+ stat->attributes |= STATX_ATTR_APPEND;
+ if (gfsflags & GFS2_DIF_IMMUTABLE)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+
+ stat->attributes_mask |= (STATX_ATTR_APPEND |
+ STATX_ATTR_COMPRESSED |
+ STATX_ATTR_ENCRYPTED |
+ STATX_ATTR_IMMUTABLE |
+ STATX_ATTR_NODUMP);
+
generic_fillattr(inode, stat);
+
if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh);
return 0;
}
+const struct iomap_ops gfs2_iomap_ops = {
+ .iomap_begin = gfs2_iomap_begin,
+};
+
static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -2009,41 +2028,59 @@ static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct gfs2_holder gh;
int ret;
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
- if (ret)
- return ret;
-
- inode_lock(inode);
+ inode_lock_shared(inode);
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (ret)
goto out;
- if (gfs2_is_stuffed(ip)) {
- u64 phys = ip->i_no_addr << inode->i_blkbits;
- u64 size = i_size_read(inode);
- u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED|
- FIEMAP_EXTENT_DATA_INLINE;
- phys += sizeof(struct gfs2_dinode);
- phys += start;
- if (start + len > size)
- len = size - start;
- if (start < size)
- ret = fiemap_fill_next_extent(fieinfo, start, phys,
- len, flags);
- if (ret == 1)
- ret = 0;
- } else {
- ret = __generic_block_fiemap(inode, fieinfo, start, len,
- gfs2_block_map);
- }
+ ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops);
gfs2_glock_dq_uninit(&gh);
+
out:
- inode_unlock(inode);
+ inode_unlock_shared(inode);
return ret;
}
+loff_t gfs2_seek_data(struct file *file, loff_t offset)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_holder gh;
+ loff_t ret;
+
+ inode_lock_shared(inode);
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ if (!ret)
+ ret = iomap_seek_data(inode, offset, &gfs2_iomap_ops);
+ gfs2_glock_dq_uninit(&gh);
+ inode_unlock_shared(inode);
+
+ if (ret < 0)
+ return ret;
+ return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
+}
+
+loff_t gfs2_seek_hole(struct file *file, loff_t offset)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_holder gh;
+ loff_t ret;
+
+ inode_lock_shared(inode);
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ if (!ret)
+ ret = iomap_seek_hole(inode, offset, &gfs2_iomap_ops);
+ gfs2_glock_dq_uninit(&gh);
+ inode_unlock_shared(inode);
+
+ if (ret < 0)
+ return ret;
+ return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
+}
+
const struct inode_operations gfs2_file_iops = {
.permission = gfs2_permission,
.setattr = gfs2_setattr,
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index aace8ce34a18..b5b6341a4f5c 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -109,6 +109,8 @@ extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr);
extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
extern int gfs2_open_common(struct inode *inode, struct file *file);
+extern loff_t gfs2_seek_data(struct file *file, loff_t offset);
+extern loff_t gfs2_seek_hole(struct file *file, loff_t offset);
extern const struct inode_operations gfs2_file_iops;
extern const struct inode_operations gfs2_dir_iops;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 8e54f2e3a304..9cb5c9a97d69 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -754,14 +754,15 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
int ret = 0;
+ bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (flush_all)
gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH);
if (bdi->wb.dirty_exceeded)
gfs2_ail1_flush(sdp, wbc);
else
filemap_fdatawrite(metamapping);
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (flush_all)
ret = filemap_fdatawait(metamapping);
if (ret)
mark_inode_dirty_sync(inode);
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 2f159265693b..f67a709589d3 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -13,6 +13,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/writeback.h>
#include <linux/ktime.h>
+#include <linux/iomap.h>
#include "incore.h"
#include "glock.h"
#include "rgrp.h"
@@ -470,6 +471,70 @@ TRACE_EVENT(gfs2_bmap,
__entry->errno)
);
+TRACE_EVENT(gfs2_iomap_start,
+
+ TP_PROTO(const struct gfs2_inode *ip, loff_t pos, ssize_t length,
+ u16 flags),
+
+ TP_ARGS(ip, pos, length, flags),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( u64, inum )
+ __field( loff_t, pos )
+ __field( ssize_t, length )
+ __field( u16, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
+ __entry->inum = ip->i_no_addr;
+ __entry->pos = pos;
+ __entry->length = length;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("%u,%u bmap %llu iomap start %llu/%lu flags:%08x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->inum,
+ (unsigned long long)__entry->pos,
+ (unsigned long)__entry->length, (u16)__entry->flags)
+);
+
+TRACE_EVENT(gfs2_iomap_end,
+
+ TP_PROTO(const struct gfs2_inode *ip, struct iomap *iomap, int ret),
+
+ TP_ARGS(ip, iomap, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( u64, inum )
+ __field( loff_t, offset )
+ __field( ssize_t, length )
+ __field( u16, flags )
+ __field( u16, type )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
+ __entry->inum = ip->i_no_addr;
+ __entry->offset = iomap->offset;
+ __entry->length = iomap->length;
+ __entry->flags = iomap->flags;
+ __entry->type = iomap->type;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%u,%u bmap %llu iomap end %llu/%lu ty:%d flags:%08x rc:%d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->inum,
+ (unsigned long long)__entry->offset,
+ (unsigned long)__entry->length, (u16)__entry->type,
+ (u16)__entry->flags, __entry->ret)
+);
+
/* Keep track of blocks as they are allocated/freed */
TRACE_EVENT(gfs2_block_alloc,
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index affef3c066e0..a85ca8b2c9ba 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -145,7 +145,7 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
*
* This is used in two distinct cases:
* i) In ordered write mode
- * We put the data buffer on a list so that we can ensure that its
+ * We put the data buffer on a list so that we can ensure that it's
* synced to disk at the right time
* ii) In journaled data mode
* We need to journal the data block in the same way as metadata in
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index ea09e41dbb49..05de20954659 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -231,7 +231,6 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
struct gfs2_holder rg_gh;
- struct buffer_head *dibh;
__be64 *dataptrs;
u64 bn = 0;
u64 bstart = 0;
@@ -308,13 +307,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
ea->ea_num_ptrs = 0;
}
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (!error) {
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
- gfs2_trans_add_meta(ip->i_gl, dibh);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
- }
+ ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
gfs2_trans_end(sdp);
@@ -616,7 +610,6 @@ static int gfs2_xattr_get(const struct xattr_handler *handler,
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
- bool need_unlock = false;
int ret;
/* During lookup, SELinux calls this function with the glock locked. */
@@ -625,10 +618,11 @@ static int gfs2_xattr_get(const struct xattr_handler *handler,
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
if (ret)
return ret;
- need_unlock = true;
+ } else {
+ gfs2_holder_mark_uninitialized(&gh);
}
ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
- if (need_unlock)
+ if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh);
return ret;
}
@@ -749,7 +743,6 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
ea_skeleton_call_t skeleton_call, void *private)
{
struct gfs2_alloc_parms ap = { .target = blks };
- struct buffer_head *dibh;
int error;
error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
@@ -774,13 +767,8 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
goto out_end_trans;
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (!error) {
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
- gfs2_trans_add_meta(ip->i_gl, dibh);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
- }
+ ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
out_end_trans:
gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -891,7 +879,6 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea, struct ea_set *es)
{
struct gfs2_ea_request *er = es->es_er;
- struct buffer_head *dibh;
int error;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
@@ -908,14 +895,9 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
if (es->es_el)
ea_set_remove_stuffed(ip, es->es_el);
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (error)
- goto out;
ip->i_inode.i_ctime = current_time(&ip->i_inode);
- gfs2_trans_add_meta(ip->i_gl, dibh);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
-out:
+ __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
+
gfs2_trans_end(GFS2_SB(&ip->i_inode));
return error;
}
@@ -1111,7 +1093,6 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
{
struct gfs2_ea_header *ea = el->el_ea;
struct gfs2_ea_header *prev = el->el_prev;
- struct buffer_head *dibh;
int error;
error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
@@ -1132,13 +1113,8 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
ea->ea_type = GFS2_EATYPE_UNUSED;
}
- error = gfs2_meta_inode_buffer(ip, &dibh);
- if (!error) {
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
- gfs2_trans_add_meta(ip->i_gl, dibh);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
- }
+ ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC);
gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -1268,11 +1244,20 @@ static int gfs2_xattr_set(const struct xattr_handler *handler,
if (ret)
return ret;
- ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
- if (ret)
- return ret;
+ /* May be called from gfs_setattr with the glock locked. */
+
+ if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ if (ret)
+ return ret;
+ } else {
+ if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
+ return -EIO;
+ gfs2_holder_mark_uninitialized(&gh);
+ }
ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
- gfs2_glock_dq_uninit(&gh);
+ if (gfs2_holder_initialized(&gh))
+ gfs2_glock_dq_uninit(&gh);
return ret;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index ed113ea17aff..1e76730aac0d 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -407,7 +407,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
next = start;
while (next < end) {
/*
@@ -668,7 +668,6 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
return error;
if (ia_valid & ATTR_SIZE) {
- error = -EINVAL;
if (attr->ia_size & ~huge_page_mask(h))
return -EINVAL;
error = hugetlb_vmtruncate(inode, attr->ia_size);
diff --git a/fs/inode.c b/fs/inode.c
index d1e35b53bb23..fd401028a309 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -2090,7 +2090,7 @@ void inode_set_flags(struct inode *inode, unsigned int flags,
WARN_ON_ONCE(flags & ~mask);
do {
- old_flags = ACCESS_ONCE(inode->i_flags);
+ old_flags = READ_ONCE(inode->i_flags);
new_flags = (old_flags & ~mask) | flags;
} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
new_flags) != old_flags));
diff --git a/fs/iomap.c b/fs/iomap.c
index d4801f8dd4fd..b9f74803e56c 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -350,8 +350,8 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
struct iomap *iomap)
{
- sector_t sector = iomap->blkno +
- (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
+ sector_t sector = (iomap->addr +
+ (pos & PAGE_MASK) - iomap->offset) >> 9;
return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
offset, bytes);
@@ -510,11 +510,12 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
flags |= FIEMAP_EXTENT_MERGED;
if (iomap->flags & IOMAP_F_SHARED)
flags |= FIEMAP_EXTENT_SHARED;
+ if (iomap->flags & IOMAP_F_DATA_INLINE)
+ flags |= FIEMAP_EXTENT_DATA_INLINE;
return fiemap_fill_next_extent(fi, iomap->offset,
- iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
+ iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
iomap->length, flags);
-
}
static loff_t
@@ -830,7 +831,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
bio = bio_alloc(GFP_KERNEL, 1);
bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector =
- iomap->blkno + ((pos - iomap->offset) >> 9);
+ (iomap->addr + pos - iomap->offset) >> 9;
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
@@ -909,7 +910,7 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
bio = bio_alloc(GFP_KERNEL, nr_pages);
bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector =
- iomap->blkno + ((pos - iomap->offset) >> 9);
+ (iomap->addr + pos - iomap->offset) >> 9;
bio->bi_write_hint = dio->iocb->ki_hint;
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
@@ -1056,7 +1057,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!dio->submit.last_queue ||
- !blk_mq_poll(dio->submit.last_queue,
+ !blk_poll(dio->submit.last_queue,
dio->submit.cookie))
io_schedule();
}
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 57d4c3e2e94a..055ec6c586f7 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -73,41 +73,41 @@ static inline struct iso_inode_info *ISOFS_I(struct inode *inode)
return container_of(inode, struct iso_inode_info, vfs_inode);
}
-static inline int isonum_711(char *p)
+static inline int isonum_711(u8 *p)
{
- return *(u8 *)p;
+ return *p;
}
-static inline int isonum_712(char *p)
+static inline int isonum_712(s8 *p)
{
- return *(s8 *)p;
+ return *p;
}
-static inline unsigned int isonum_721(char *p)
+static inline unsigned int isonum_721(u8 *p)
{
return get_unaligned_le16(p);
}
-static inline unsigned int isonum_722(char *p)
+static inline unsigned int isonum_722(u8 *p)
{
return get_unaligned_be16(p);
}
-static inline unsigned int isonum_723(char *p)
+static inline unsigned int isonum_723(u8 *p)
{
/* Ignore bigendian datum due to broken mastering programs */
return get_unaligned_le16(p);
}
-static inline unsigned int isonum_731(char *p)
+static inline unsigned int isonum_731(u8 *p)
{
return get_unaligned_le32(p);
}
-static inline unsigned int isonum_732(char *p)
+static inline unsigned int isonum_732(u8 *p)
{
return get_unaligned_be32(p);
}
-static inline unsigned int isonum_733(char *p)
+static inline unsigned int isonum_733(u8 *p)
{
/* Ignore bigendian datum due to broken mastering programs */
return get_unaligned_le32(p);
}
-extern int iso_date(char *, int);
+extern int iso_date(u8 *, int);
struct inode; /* To make gcc happy */
diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
index ef03625431bb..1558cf22ef8a 100644
--- a/fs/isofs/rock.h
+++ b/fs/isofs/rock.h
@@ -7,78 +7,78 @@
*/
struct SU_SP_s {
- unsigned char magic[2];
- unsigned char skip;
+ __u8 magic[2];
+ __u8 skip;
} __attribute__ ((packed));
struct SU_CE_s {
- char extent[8];
- char offset[8];
- char size[8];
+ __u8 extent[8];
+ __u8 offset[8];
+ __u8 size[8];
};
struct SU_ER_s {
- unsigned char len_id;
- unsigned char len_des;
- unsigned char len_src;
- unsigned char ext_ver;
- char data[0];
+ __u8 len_id;
+ __u8 len_des;
+ __u8 len_src;
+ __u8 ext_ver;
+ __u8 data[0];
} __attribute__ ((packed));
struct RR_RR_s {
- char flags[1];
+ __u8 flags[1];
} __attribute__ ((packed));
struct RR_PX_s {
- char mode[8];
- char n_links[8];
- char uid[8];
- char gid[8];
+ __u8 mode[8];
+ __u8 n_links[8];
+ __u8 uid[8];
+ __u8 gid[8];
};
struct RR_PN_s {
- char dev_high[8];
- char dev_low[8];
+ __u8 dev_high[8];
+ __u8 dev_low[8];
};
struct SL_component {
- unsigned char flags;
- unsigned char len;
- char text[0];
+ __u8 flags;
+ __u8 len;
+ __u8 text[0];
} __attribute__ ((packed));
struct RR_SL_s {
- unsigned char flags;
+ __u8 flags;
struct SL_component link;
} __attribute__ ((packed));
struct RR_NM_s {
- unsigned char flags;
+ __u8 flags;
char name[0];
} __attribute__ ((packed));
struct RR_CL_s {
- char location[8];
+ __u8 location[8];
};
struct RR_PL_s {
- char location[8];
+ __u8 location[8];
};
struct stamp {
- char time[7];
+ __u8 time[7]; /* actually 6 unsigned, 1 signed */
} __attribute__ ((packed));
struct RR_TF_s {
- char flags;
+ __u8 flags;
struct stamp times[0]; /* Variable number of these beasts */
} __attribute__ ((packed));
/* Linux-specific extension for transparent decompression */
struct RR_ZF_s {
- char algorithm[2];
- char parms[2];
- char real_size[8];
+ __u8 algorithm[2];
+ __u8 parms[2];
+ __u8 real_size[8];
};
/*
@@ -94,9 +94,9 @@ struct RR_ZF_s {
#define TF_LONG_FORM 128
struct rock_ridge {
- char signature[2];
- unsigned char len;
- unsigned char version;
+ __u8 signature[2];
+ __u8 len;
+ __u8 version;
union {
struct SU_SP_s SP;
struct SU_CE_s CE;
diff --git a/fs/isofs/util.c b/fs/isofs/util.c
index 42544bf0e222..e88dba721661 100644
--- a/fs/isofs/util.c
+++ b/fs/isofs/util.c
@@ -16,7 +16,7 @@
* to GMT. Thus we should always be correct.
*/
-int iso_date(char * p, int flag)
+int iso_date(u8 *p, int flag)
{
int year, month, day, hour, minute, second, tz;
int crtime;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 7d5ef3bf3f3e..d2a85c9720e9 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -165,11 +165,11 @@ static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
* Helper function used to manage commit timeouts
*/
-static void commit_timeout(unsigned long __data)
+static void commit_timeout(struct timer_list *t)
{
- struct task_struct * p = (struct task_struct *) __data;
+ journal_t *journal = from_timer(journal, t, j_commit_timer);
- wake_up_process(p);
+ wake_up_process(journal->j_task);
}
/*
@@ -197,8 +197,7 @@ static int kjournald2(void *arg)
* Set up an interval timer which can be used to trigger a commit wakeup
* after the commit interval expires
*/
- setup_timer(&journal->j_commit_timer, commit_timeout,
- (unsigned long)current);
+ timer_setup(&journal->j_commit_timer, commit_timeout, 0);
set_freezable();
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 1c4b9ad4d7ab..1a3b0cc22ad3 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -663,6 +663,8 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
} else {
INCREMENT(mpStat.pagealloc);
mp = alloc_metapage(GFP_NOFS);
+ if (!mp)
+ goto unlock;
mp->page = page;
mp->sb = inode->i_sb;
mp->flag = 0;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 2f14677169c3..2f7b3af5b8b7 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -853,7 +853,6 @@ out:
}
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
- inode->i_version++;
inode->i_mtime = inode->i_ctime = current_time(inode);
mark_inode_dirty(inode);
inode_unlock(inode);
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index b995bdc13976..b837fb7e290a 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -602,7 +602,7 @@ static struct ctl_table nlm_sysctl_root[] = {
*/
#define param_set_min_max(name, type, which_strtol, min, max) \
-static int param_set_##name(const char *val, struct kernel_param *kp) \
+static int param_set_##name(const char *val, const struct kernel_param *kp) \
{ \
char *endp; \
__typeof__(type) num = which_strtol(val, &endp, 0); \
diff --git a/fs/namei.c b/fs/namei.c
index ed8b9488a890..5424b10cfdc4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1210,7 +1210,7 @@ static int follow_managed(struct path *path, struct nameidata *nd)
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
* the components of that value change under us */
- while (managed = ACCESS_ONCE(path->dentry->d_flags),
+ while (managed = READ_ONCE(path->dentry->d_flags),
managed &= DCACHE_MANAGED_DENTRY,
unlikely(managed != 0)) {
/* Allow the filesystem to manage the transit without i_mutex
@@ -1395,7 +1395,7 @@ int follow_down(struct path *path)
unsigned managed;
int ret;
- while (managed = ACCESS_ONCE(path->dentry->d_flags),
+ while (managed = READ_ONCE(path->dentry->d_flags),
unlikely(managed & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held.
diff --git a/fs/namespace.c b/fs/namespace.c
index d18deb4c410b..e158ec6b527b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -353,7 +353,7 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+ while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index b5ec1d980dc9..0c57c5c5d40a 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -120,10 +120,6 @@ static inline int ncp_case_sensitive(const struct inode *i)
/*
* Note: leave the hash unchanged if the directory
* is case-sensitive.
- *
- * Accessing the parent inode can be racy under RCU pathwalking.
- * Use ACCESS_ONCE() to make sure we use _one_ particular inode,
- * the callers will handle races.
*/
static int
ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
@@ -148,11 +144,6 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
return 0;
}
-/*
- * Accessing the parent inode can be racy under RCU pathwalking.
- * Use ACCESS_ONCE() to make sure we use _one_ particular inode,
- * the callers will handle races.
- */
static int
ncp_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 5ceaeb1f6fb6..f439f1c45008 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1081,7 +1081,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
int error;
if (flags & LOOKUP_RCU) {
- parent = ACCESS_ONCE(dentry->d_parent);
+ parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
@@ -1168,7 +1168,7 @@ out_set_verifier:
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
out_valid:
if (flags & LOOKUP_RCU) {
- if (parent != ACCESS_ONCE(dentry->d_parent))
+ if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
} else
dput(parent);
@@ -1582,7 +1582,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
struct inode *dir;
if (flags & LOOKUP_RCU) {
- parent = ACCESS_ONCE(dentry->d_parent);
+ parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
@@ -1596,7 +1596,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
ret = -ECHILD;
if (!(flags & LOOKUP_RCU))
dput(parent);
- else if (parent != ACCESS_ONCE(dentry->d_parent))
+ else if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
goto out;
}
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 3f880ae0966b..70b8bf781fce 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -66,7 +66,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
bex->es = PNFS_BLOCK_READ_DATA;
else
bex->es = PNFS_BLOCK_READWRITE_DATA;
- bex->soff = (iomap.blkno << 9);
+ bex->soff = iomap.addr;
break;
case IOMAP_UNWRITTEN:
if (seg->iomode & IOMODE_RW) {
@@ -79,7 +79,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
}
bex->es = PNFS_BLOCK_INVALID_DATA;
- bex->soff = (iomap.blkno << 9);
+ bex->soff = iomap.addr;
break;
}
/*FALLTHRU*/
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 06ffa135dfa6..16a7a67a11c9 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -2156,10 +2156,10 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
level++)
INIT_LIST_HEAD(&lists[level]);
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
- while (pagevec_lookup_tag(&pvec, btcache, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE)) {
+ while (pagevec_lookup_tag(&pvec, btcache, &index,
+ PAGECACHE_TAG_DIRTY)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
bh = head = page_buffers(pvec.pages[i]);
do {
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 8616c46d33da..68241512d7c1 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -255,10 +255,9 @@ int nilfs_copy_dirty_pages(struct address_space *dmap,
pgoff_t index = 0;
int err = 0;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
repeat:
- if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE))
+ if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
return 0;
for (i = 0; i < pagevec_count(&pvec); i++) {
@@ -310,7 +309,7 @@ void nilfs_copy_back_pages(struct address_space *dmap,
pgoff_t index = 0;
int err;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
repeat:
n = pagevec_lookup(&pvec, smap, &index);
if (!n)
@@ -374,10 +373,10 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
unsigned int i;
pgoff_t index = 0;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
- while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE)) {
+ while (pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -519,7 +518,7 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
repeat:
pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 70ded52dc1dd..f65392fecb5c 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -708,21 +708,17 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
index = start >> PAGE_SHIFT;
last = end >> PAGE_SHIFT;
}
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
repeat:
if (unlikely(index > last) ||
- !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
- min_t(pgoff_t, last - index,
- PAGEVEC_SIZE - 1) + 1))
+ !pagevec_lookup_range_tag(&pvec, mapping, &index, last,
+ PAGECACHE_TAG_DIRTY))
return ndirties;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct buffer_head *bh, *head;
struct page *page = pvec.pages[i];
- if (unlikely(page->index > last))
- break;
-
lock_page(page);
if (!page_has_buffers(page))
create_empty_buffers(page, i_blocksize(inode), 0);
@@ -757,10 +753,10 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
unsigned int i;
pgoff_t index = 0;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
- while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
- PAGEVEC_SIZE)) {
+ while (pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
bh = head = page_buffers(pvec.pages[i]);
do {
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index cba328315929..63a1ca4b9dee 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -319,7 +319,11 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
} else {
- fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0);
+ error = fsnotify_add_mark_locked(new_fsn_mark, inode, NULL, 0);
+ if (error) {
+ mutex_unlock(&dnotify_group->mark_mutex);
+ goto out_err;
+ }
spin_lock(&new_fsn_mark->lock);
fsn_mark = new_fsn_mark;
dn_mark = new_dn_mark;
@@ -345,6 +349,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
*/
if (dn_mark == new_dn_mark)
destroy = 1;
+ error = 0;
goto out;
}
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
index e5f911bd80d2..41355ce74ac0 100644
--- a/fs/notify/fanotify/Kconfig
+++ b/fs/notify/fanotify/Kconfig
@@ -21,6 +21,6 @@ config FANOTIFY_ACCESS_PERMISSIONS
decisions concerning filesystem events. This is used by some fanotify
listeners which need to scan files before allowing the system access to
use those files. This is used by some anti-malware vendors and by some
- hierarchical storage managent systems.
+ hierarchical storage management systems.
If unsure, say N.
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 09640b546363..6702a6a0bbb5 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -10,6 +10,7 @@
#include <linux/sched/user.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/audit.h>
#include "fanotify.h"
@@ -36,15 +37,13 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
pr_debug("%s: list=%p event=%p\n", __func__, list, event);
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/*
* Don't merge a permission event with any other event so that we know
* the event structure we have created in fanotify_handle_event() is the
* one we should check for permission response.
*/
- if (event->mask & FAN_ALL_PERM_EVENTS)
+ if (fanotify_is_perm_event(event->mask))
return 0;
-#endif
list_for_each_entry_reverse(test_event, list, list) {
if (should_merge(test_event, event)) {
@@ -56,7 +55,6 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
return 0;
}
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
static int fanotify_get_response(struct fsnotify_group *group,
struct fanotify_perm_event_info *event,
struct fsnotify_iter_info *iter_info)
@@ -65,21 +63,10 @@ static int fanotify_get_response(struct fsnotify_group *group,
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
- /*
- * fsnotify_prepare_user_wait() fails if we race with mark deletion.
- * Just let the operation pass in that case.
- */
- if (!fsnotify_prepare_user_wait(iter_info)) {
- event->response = FAN_ALLOW;
- goto out;
- }
-
wait_event(group->fanotify_data.access_waitq, event->response);
- fsnotify_finish_user_wait(iter_info);
-out:
/* userspace responded, convert to something usable */
- switch (event->response) {
+ switch (event->response & ~FAN_AUDIT) {
case FAN_ALLOW:
ret = 0;
break;
@@ -87,6 +74,11 @@ out:
default:
ret = -EPERM;
}
+
+ /* Check if the response should be audited */
+ if (event->response & FAN_AUDIT)
+ audit_fanotify(event->response & ~FAN_AUDIT);
+
event->response = 0;
pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
@@ -94,7 +86,6 @@ out:
return ret;
}
-#endif
static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmnt_mark,
@@ -153,8 +144,7 @@ struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
{
struct fanotify_event_info *event;
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
- if (mask & FAN_ALL_PERM_EVENTS) {
+ if (fanotify_is_perm_event(mask)) {
struct fanotify_perm_event_info *pevent;
pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
@@ -165,7 +155,6 @@ struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
pevent->response = 0;
goto init;
}
-#endif
event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
if (!event)
return NULL;
@@ -212,9 +201,19 @@ static int fanotify_handle_event(struct fsnotify_group *group,
pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
mask);
+ if (fanotify_is_perm_event(mask)) {
+ /*
+ * fsnotify_prepare_user_wait() fails if we race with mark
+ * deletion. Just let the operation pass in that case.
+ */
+ if (!fsnotify_prepare_user_wait(iter_info))
+ return 0;
+ }
+
event = fanotify_alloc_event(inode, mask, data);
+ ret = -ENOMEM;
if (unlikely(!event))
- return -ENOMEM;
+ goto finish;
fsn_event = &event->fse;
ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
@@ -224,16 +223,16 @@ static int fanotify_handle_event(struct fsnotify_group *group,
/* Our event wasn't used in the end. Free it. */
fsnotify_destroy_event(group, fsn_event);
- return 0;
- }
-
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
- if (mask & FAN_ALL_PERM_EVENTS) {
+ ret = 0;
+ } else if (fanotify_is_perm_event(mask)) {
ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
iter_info);
fsnotify_destroy_event(group, fsn_event);
}
-#endif
+finish:
+ if (fanotify_is_perm_event(mask))
+ fsnotify_finish_user_wait(iter_info);
+
return ret;
}
@@ -253,13 +252,11 @@ static void fanotify_free_event(struct fsnotify_event *fsn_event)
event = FANOTIFY_E(fsn_event);
path_put(&event->path);
put_pid(event->tgid);
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
- if (fsn_event->mask & FAN_ALL_PERM_EVENTS) {
+ if (fanotify_is_perm_event(fsn_event->mask)) {
kmem_cache_free(fanotify_perm_event_cachep,
FANOTIFY_PE(fsn_event));
return;
}
-#endif
kmem_cache_free(fanotify_event_cachep, event);
}
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index 7dacb7d80727..256d9d1ddea9 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -22,7 +22,6 @@ struct fanotify_event_info {
struct pid *tgid;
};
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/*
* Structure for permission fanotify events. It gets allocated and freed in
* fanotify_handle_event() since we wait there for user response. When the
@@ -41,7 +40,12 @@ FANOTIFY_PE(struct fsnotify_event *fse)
{
return container_of(fse, struct fanotify_perm_event_info, fae.fse);
}
-#endif
+
+static inline bool fanotify_is_perm_event(u32 mask)
+{
+ return IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS) &&
+ mask & FAN_ALL_PERM_EVENTS;
+}
static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse)
{
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 9752e7270e61..d0d4bc4c4b70 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -143,7 +143,6 @@ static int fill_event_metadata(struct fsnotify_group *group,
return ret;
}
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
static struct fanotify_perm_event_info *dequeue_event(
struct fsnotify_group *group, int fd)
{
@@ -180,7 +179,7 @@ static int process_access_response(struct fsnotify_group *group,
* userspace can send a valid response or we will clean it up after the
* timeout
*/
- switch (response) {
+ switch (response & ~FAN_AUDIT) {
case FAN_ALLOW:
case FAN_DENY:
break;
@@ -191,6 +190,9 @@ static int process_access_response(struct fsnotify_group *group,
if (fd < 0)
return -EINVAL;
+ if ((response & FAN_AUDIT) && !group->fanotify_data.audit)
+ return -EINVAL;
+
event = dequeue_event(group, fd);
if (!event)
return -ENOENT;
@@ -200,7 +202,6 @@ static int process_access_response(struct fsnotify_group *group,
return 0;
}
-#endif
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
@@ -222,10 +223,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
fanotify_event_metadata.event_len))
goto out_close_fd;
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
- if (event->mask & FAN_ALL_PERM_EVENTS)
+ if (fanotify_is_perm_event(event->mask))
FANOTIFY_PE(event)->fd = fd;
-#endif
if (fd != FAN_NOFD)
fd_install(fd, f);
@@ -310,10 +309,9 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
* Permission events get queued to wait for response. Other
* events can be destroyed now.
*/
- if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
+ if (!fanotify_is_perm_event(kevent->mask)) {
fsnotify_destroy_event(group, kevent);
} else {
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
if (ret <= 0) {
FANOTIFY_PE(kevent)->response = FAN_DENY;
wake_up(&group->fanotify_data.access_waitq);
@@ -323,7 +321,6 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
&group->fanotify_data.access_list);
spin_unlock(&group->notification_lock);
}
-#endif
}
if (ret < 0)
break;
@@ -339,11 +336,13 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_response response = { .fd = -1, .response = -1 };
struct fsnotify_group *group;
int ret;
+ if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
+ return -EINVAL;
+
group = file->private_data;
if (count > sizeof(response))
@@ -359,16 +358,11 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t
count = ret;
return count;
-#else
- return -EINVAL;
-#endif
}
static int fanotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
-
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_perm_event_info *event, *next;
struct fsnotify_event *fsn_event;
@@ -404,14 +398,14 @@ static int fanotify_release(struct inode *ignored, struct file *file)
spin_unlock(&group->notification_lock);
fsnotify_destroy_event(group, fsn_event);
spin_lock(&group->notification_lock);
- } else
+ } else {
FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
+ }
}
spin_unlock(&group->notification_lock);
/* Response for all permission events it set, wakeup waiters */
wake_up(&group->fanotify_data.access_waitq);
-#endif
/* matches the fanotify_init->fsnotify_alloc_group */
fsnotify_destroy_group(group);
@@ -722,7 +716,11 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef CONFIG_AUDITSYSCALL
+ if (flags & ~(FAN_ALL_INIT_FLAGS | FAN_ENABLE_AUDIT))
+#else
if (flags & ~FAN_ALL_INIT_FLAGS)
+#endif
return -EINVAL;
if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
@@ -769,10 +767,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if (force_o_largefile())
event_f_flags |= O_LARGEFILE;
group->fanotify_data.f_flags = event_f_flags;
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
-#endif
switch (flags & FAN_ALL_CLASS_BITS) {
case FAN_CLASS_NOTIF:
group->priority = FS_PRIO_0;
@@ -806,6 +802,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
}
+ if (flags & FAN_ENABLE_AUDIT) {
+ fd = -EPERM;
+ if (!capable(CAP_AUDIT_WRITE))
+ goto out_destroy_group;
+ group->fanotify_data.audit = true;
+ }
+
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
if (fd < 0)
goto out_destroy_group;
@@ -826,6 +829,7 @@ SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
struct fsnotify_group *group;
struct fd f;
struct path path;
+ u32 valid_mask = FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD;
int ret;
pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
@@ -856,11 +860,10 @@ SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
mask &= ~FAN_ONDIR;
}
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
- if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
-#else
- if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
-#endif
+ if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
+ valid_mask |= FAN_ALL_PERM_EVENTS;
+
+ if (mask & ~valid_mask)
return -EINVAL;
f = fdget(fanotify_fd);
@@ -950,10 +953,10 @@ static int __init fanotify_user_setup(void)
{
fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
- fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
- SLAB_PANIC);
-#endif
+ if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
+ fanotify_perm_event_cachep =
+ KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC);
+ }
return 0;
}
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 517f88c1dbe5..d478629c728b 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -157,6 +157,9 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f)
if (group->fanotify_data.max_marks == UINT_MAX)
flags |= FAN_UNLIMITED_MARKS;
+ if (group->fanotify_data.audit)
+ flags |= FAN_ENABLE_AUDIT;
+
seq_printf(m, "fanotify flags:%x event-flags:%x\n",
flags, group->fanotify_data.f_flags);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 0c4583b61717..81d8959b6aef 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -243,6 +243,29 @@ static int send_to_group(struct inode *to_tell,
file_name, cookie, iter_info);
}
+static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp)
+{
+ struct fsnotify_mark_connector *conn;
+ struct hlist_node *node = NULL;
+
+ conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
+ if (conn)
+ node = srcu_dereference(conn->list.first, &fsnotify_mark_srcu);
+
+ return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
+}
+
+static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
+{
+ struct hlist_node *node = NULL;
+
+ if (mark)
+ node = srcu_dereference(mark->obj_list.next,
+ &fsnotify_mark_srcu);
+
+ return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
+}
+
/*
* This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call
@@ -252,11 +275,7 @@ static int send_to_group(struct inode *to_tell,
int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
const unsigned char *file_name, u32 cookie)
{
- struct hlist_node *inode_node = NULL, *vfsmount_node = NULL;
- struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
- struct fsnotify_group *inode_group, *vfsmount_group;
- struct fsnotify_mark_connector *inode_conn, *vfsmount_conn;
- struct fsnotify_iter_info iter_info;
+ struct fsnotify_iter_info iter_info = {};
struct mount *mnt;
int ret = 0;
/* global tests shouldn't care about events on child only the specific event */
@@ -291,26 +310,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
if ((mask & FS_MODIFY) ||
(test_mask & to_tell->i_fsnotify_mask)) {
- inode_conn = srcu_dereference(to_tell->i_fsnotify_marks,
- &fsnotify_mark_srcu);
- if (inode_conn)
- inode_node = srcu_dereference(inode_conn->list.first,
- &fsnotify_mark_srcu);
+ iter_info.inode_mark =
+ fsnotify_first_mark(&to_tell->i_fsnotify_marks);
}
if (mnt && ((mask & FS_MODIFY) ||
(test_mask & mnt->mnt_fsnotify_mask))) {
- inode_conn = srcu_dereference(to_tell->i_fsnotify_marks,
- &fsnotify_mark_srcu);
- if (inode_conn)
- inode_node = srcu_dereference(inode_conn->list.first,
- &fsnotify_mark_srcu);
- vfsmount_conn = srcu_dereference(mnt->mnt_fsnotify_marks,
- &fsnotify_mark_srcu);
- if (vfsmount_conn)
- vfsmount_node = srcu_dereference(
- vfsmount_conn->list.first,
- &fsnotify_mark_srcu);
+ iter_info.inode_mark =
+ fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+ iter_info.vfsmount_mark =
+ fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
}
/*
@@ -318,39 +327,19 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
* ignore masks are properly reflected for mount mark notifications.
* That's why this traversal is so complicated...
*/
- while (inode_node || vfsmount_node) {
- inode_group = NULL;
- inode_mark = NULL;
- vfsmount_group = NULL;
- vfsmount_mark = NULL;
-
- if (inode_node) {
- inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
- struct fsnotify_mark, obj_list);
- inode_group = inode_mark->group;
- }
-
- if (vfsmount_node) {
- vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
- struct fsnotify_mark, obj_list);
- vfsmount_group = vfsmount_mark->group;
- }
-
- if (inode_group && vfsmount_group) {
- int cmp = fsnotify_compare_groups(inode_group,
- vfsmount_group);
- if (cmp > 0) {
- inode_group = NULL;
+ while (iter_info.inode_mark || iter_info.vfsmount_mark) {
+ struct fsnotify_mark *inode_mark = iter_info.inode_mark;
+ struct fsnotify_mark *vfsmount_mark = iter_info.vfsmount_mark;
+
+ if (inode_mark && vfsmount_mark) {
+ int cmp = fsnotify_compare_groups(inode_mark->group,
+ vfsmount_mark->group);
+ if (cmp > 0)
inode_mark = NULL;
- } else if (cmp < 0) {
- vfsmount_group = NULL;
+ else if (cmp < 0)
vfsmount_mark = NULL;
- }
}
- iter_info.inode_mark = inode_mark;
- iter_info.vfsmount_mark = vfsmount_mark;
-
ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
data, data_is, cookie, file_name,
&iter_info);
@@ -358,12 +347,12 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
goto out;
- if (inode_group)
- inode_node = srcu_dereference(inode_node->next,
- &fsnotify_mark_srcu);
- if (vfsmount_group)
- vfsmount_node = srcu_dereference(vfsmount_node->next,
- &fsnotify_mark_srcu);
+ if (inode_mark)
+ iter_info.inode_mark =
+ fsnotify_next_mark(iter_info.inode_mark);
+ if (vfsmount_mark)
+ iter_info.vfsmount_mark =
+ fsnotify_next_mark(iter_info.vfsmount_mark);
}
ret = 0;
out:
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 32357534de18..b7a4b6a69efa 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -107,7 +107,7 @@ void fsnotify_destroy_group(struct fsnotify_group *group)
*/
void fsnotify_get_group(struct fsnotify_group *group)
{
- atomic_inc(&group->refcnt);
+ refcount_inc(&group->refcnt);
}
/*
@@ -115,7 +115,7 @@ void fsnotify_get_group(struct fsnotify_group *group)
*/
void fsnotify_put_group(struct fsnotify_group *group)
{
- if (atomic_dec_and_test(&group->refcnt))
+ if (refcount_dec_and_test(&group->refcnt))
fsnotify_final_destroy_group(group);
}
@@ -131,7 +131,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
return ERR_PTR(-ENOMEM);
/* set to 0 when there a no external references to this group */
- atomic_set(&group->refcnt, 1);
+ refcount_set(&group->refcnt, 1);
atomic_set(&group->num_marks, 0);
atomic_set(&group->user_waits, 0);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 7cc7d3fb1862..d3c20e0bb046 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -376,7 +376,7 @@ static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group
fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */
- BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+ BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
}
return i_mark;
@@ -446,7 +446,7 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
* One ref for being in the idr
* one ref grabbed by inotify_idr_find
*/
- if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
+ if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
/* we can't really recover with bad ref cnting.. */
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 9991f8826734..e9191b416434 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -105,18 +105,8 @@ static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
void fsnotify_get_mark(struct fsnotify_mark *mark)
{
- WARN_ON_ONCE(!atomic_read(&mark->refcnt));
- atomic_inc(&mark->refcnt);
-}
-
-/*
- * Get mark reference when we found the mark via lockless traversal of object
- * list. Mark can be already removed from the list by now and on its way to be
- * destroyed once SRCU period ends.
- */
-static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
-{
- return atomic_inc_not_zero(&mark->refcnt);
+ WARN_ON_ONCE(!refcount_read(&mark->refcnt));
+ refcount_inc(&mark->refcnt);
}
static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
@@ -211,7 +201,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
/* Catch marks that were actually never attached to object */
if (!mark->connector) {
- if (atomic_dec_and_test(&mark->refcnt))
+ if (refcount_dec_and_test(&mark->refcnt))
fsnotify_final_mark_destroy(mark);
return;
}
@@ -220,7 +210,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
* We have to be careful so that traversals of obj_list under lock can
* safely grab mark reference.
*/
- if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+ if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
return;
conn = mark->connector;
@@ -256,32 +246,60 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
FSNOTIFY_REAPER_DELAY);
}
-bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
+/*
+ * Get mark reference when we found the mark via lockless traversal of object
+ * list. Mark can be already removed from the list by now and on its way to be
+ * destroyed once SRCU period ends.
+ *
+ * Also pin the group so it doesn't disappear under us.
+ */
+static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
{
- struct fsnotify_group *group;
-
- if (WARN_ON_ONCE(!iter_info->inode_mark && !iter_info->vfsmount_mark))
- return false;
-
- if (iter_info->inode_mark)
- group = iter_info->inode_mark->group;
- else
- group = iter_info->vfsmount_mark->group;
+ if (!mark)
+ return true;
+
+ if (refcount_inc_not_zero(&mark->refcnt)) {
+ spin_lock(&mark->lock);
+ if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
+ /* mark is attached, group is still alive then */
+ atomic_inc(&mark->group->user_waits);
+ spin_unlock(&mark->lock);
+ return true;
+ }
+ spin_unlock(&mark->lock);
+ fsnotify_put_mark(mark);
+ }
+ return false;
+}
- /*
- * Since acquisition of mark reference is an atomic op as well, we can
- * be sure this inc is seen before any effect of refcount increment.
- */
- atomic_inc(&group->user_waits);
+/*
+ * Puts marks and wakes up group destruction if necessary.
+ *
+ * Pairs with fsnotify_get_mark_safe()
+ */
+static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
+{
+ if (mark) {
+ struct fsnotify_group *group = mark->group;
- if (iter_info->inode_mark) {
- /* This can fail if mark is being removed */
- if (!fsnotify_get_mark_safe(iter_info->inode_mark))
- goto out_wait;
+ fsnotify_put_mark(mark);
+ /*
+ * We abuse notification_waitq on group shutdown for waiting for
+ * all marks pinned when waiting for userspace.
+ */
+ if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
+ wake_up(&group->notification_waitq);
}
- if (iter_info->vfsmount_mark) {
- if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark))
- goto out_inode;
+}
+
+bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
+{
+ /* This can fail if mark is being removed */
+ if (!fsnotify_get_mark_safe(iter_info->inode_mark))
+ return false;
+ if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark)) {
+ fsnotify_put_mark_wake(iter_info->inode_mark);
+ return false;
}
/*
@@ -292,34 +310,13 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
return true;
-out_inode:
- if (iter_info->inode_mark)
- fsnotify_put_mark(iter_info->inode_mark);
-out_wait:
- if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
- wake_up(&group->notification_waitq);
- return false;
}
void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
{
- struct fsnotify_group *group = NULL;
-
iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
- if (iter_info->inode_mark) {
- group = iter_info->inode_mark->group;
- fsnotify_put_mark(iter_info->inode_mark);
- }
- if (iter_info->vfsmount_mark) {
- group = iter_info->vfsmount_mark->group;
- fsnotify_put_mark(iter_info->vfsmount_mark);
- }
- /*
- * We abuse notification_waitq on group shutdown for waiting for all
- * marks pinned when waiting for userspace.
- */
- if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
- wake_up(&group->notification_waitq);
+ fsnotify_put_mark_wake(iter_info->inode_mark);
+ fsnotify_put_mark_wake(iter_info->vfsmount_mark);
}
/*
@@ -338,7 +335,7 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
- atomic_read(&mark->refcnt) < 1 +
+ refcount_read(&mark->refcnt) < 1 +
!!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
spin_lock(&mark->lock);
@@ -599,9 +596,11 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct inode *inode,
return ret;
err:
+ spin_lock(&mark->lock);
mark->flags &= ~(FSNOTIFY_MARK_FLAG_ALIVE |
FSNOTIFY_MARK_FLAG_ATTACHED);
list_del_init(&mark->g_list);
+ spin_unlock(&mark->lock);
atomic_dec(&group->num_marks);
fsnotify_put_mark(mark);
@@ -738,7 +737,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
{
memset(mark, 0, sizeof(*mark));
spin_lock_init(&mark->lock);
- atomic_set(&mark->refcnt, 1);
+ refcount_set(&mark->refcnt, 1);
fsnotify_get_group(group);
mark->group = group;
}
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index cc91856b5e2d..3a2e509c77c5 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -1739,7 +1739,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
spin_lock(&mapping->private_lock);
if (unlikely(!page_has_buffers(page))) {
spin_unlock(&mapping->private_lock);
- bh = head = alloc_page_buffers(page, bh_size, 1);
+ bh = head = alloc_page_buffers(page, bh_size, true);
spin_lock(&mapping->private_lock);
if (likely(!page_has_buffers(page))) {
struct buffer_head *tail;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index b6f402194f02..ee8392aee9f6 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -507,7 +507,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
if (unlikely(!page_has_buffers(page))) {
struct buffer_head *tail;
- bh = head = alloc_page_buffers(page, blocksize, 1);
+ bh = head = alloc_page_buffers(page, blocksize, true);
do {
set_buffer_uptodate(bh);
tail = bh;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index addd7c5f2d3e..ab5105f9767e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -3585,8 +3585,6 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
* The easy case - we can just plop the record right in.
*/
*left_rec = *split_rec;
-
- has_empty_extent = 0;
} else
le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 88a31e9340a0..d1516327b787 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -134,6 +134,19 @@ bail:
return err;
}
+static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ int ret = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ down_read(&oi->ip_alloc_sem);
+ ret = ocfs2_get_block(inode, iblock, bh_result, create);
+ up_read(&oi->ip_alloc_sem);
+
+ return ret;
+}
+
int ocfs2_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
@@ -2128,7 +2141,7 @@ static void ocfs2_dio_free_write_ctx(struct inode *inode,
* called like this: dio->get_blocks(dio->inode, fs_startblk,
* fs_count, map_bh, dio->rw == WRITE);
*/
-static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
+static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -2154,12 +2167,9 @@ static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
* while file size will be changed.
*/
if (pos + total_len <= i_size_read(inode)) {
- down_read(&oi->ip_alloc_sem);
- /* This is the fast path for re-write. */
- ret = ocfs2_get_block(inode, iblock, bh_result, create);
-
- up_read(&oi->ip_alloc_sem);
+ /* This is the fast path for re-write. */
+ ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
if (buffer_mapped(bh_result) &&
!buffer_new(bh_result) &&
ret == 0)
@@ -2424,9 +2434,9 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return 0;
if (iov_iter_rw(iter) == READ)
- get_block = ocfs2_get_block;
+ get_block = ocfs2_lock_get_block;
else
- get_block = ocfs2_dio_get_block;
+ get_block = ocfs2_dio_wr_get_block;
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, get_block,
diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h
index b97bcc6dde7c..b1bb70c8ca4d 100644
--- a/fs/ocfs2/buffer_head_io.h
+++ b/fs/ocfs2/buffer_head_io.h
@@ -28,9 +28,6 @@
#include <linux/buffer_head.h>
-void ocfs2_end_buffer_io_sync(struct buffer_head *bh,
- int uptodate);
-
int ocfs2_write_block(struct ocfs2_super *osb,
struct buffer_head *bh,
struct ocfs2_caching_info *ci);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index d0206042d068..ea8c551bcd7e 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -2025,7 +2025,7 @@ static struct configfs_item_operations o2hb_region_item_ops = {
.release = o2hb_region_release,
};
-static struct config_item_type o2hb_region_type = {
+static const struct config_item_type o2hb_region_type = {
.ct_item_ops = &o2hb_region_item_ops,
.ct_attrs = o2hb_region_attrs,
.ct_owner = THIS_MODULE,
@@ -2310,7 +2310,7 @@ static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
.drop_item = o2hb_heartbeat_group_drop_item,
};
-static struct config_item_type o2hb_heartbeat_group_type = {
+static const struct config_item_type o2hb_heartbeat_group_type = {
.ct_group_ops = &o2hb_heartbeat_group_group_ops,
.ct_attrs = o2hb_heartbeat_group_attrs,
.ct_owner = THIS_MODULE,
diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h
index 3ef5137dc362..a9e67efc0004 100644
--- a/fs/ocfs2/cluster/heartbeat.h
+++ b/fs/ocfs2/cluster/heartbeat.h
@@ -79,10 +79,8 @@ void o2hb_fill_node_map(unsigned long *map,
unsigned bytes);
void o2hb_exit(void);
int o2hb_init(void);
-int o2hb_check_node_heartbeating(u8 node_num);
int o2hb_check_node_heartbeating_no_sem(u8 node_num);
int o2hb_check_node_heartbeating_from_callback(u8 node_num);
-int o2hb_check_local_node_heartbeating(void);
void o2hb_stop_all_regions(void);
int o2hb_get_all_regions(char *region_uuids, u8 numregions);
int o2hb_global_heartbeat_active(void);
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index b17d180bdc16..da64c3a20eeb 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
"panic", /* O2NM_FENCE_PANIC */
};
+static inline void o2nm_lock_subsystem(void);
+static inline void o2nm_unlock_subsystem(void);
+
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
struct o2nm_node *node = NULL;
@@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
{
/* through the first node_set .parent
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
- return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+ if (node->nd_item.ci_parent)
+ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+ else
+ return NULL;
}
enum {
@@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
int ret = 0;
@@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ o2nm_unlock_subsystem();
+ return -EINVAL;
+ }
+
write_lock(&cluster->cl_nodes_lock);
if (cluster->cl_nodes[tmp])
ret = -EEXIST;
@@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
set_bit(tmp, cluster->cl_nodes_bitmap);
}
write_unlock(&cluster->cl_nodes_lock);
+ o2nm_unlock_subsystem();
+
if (ret)
return ret;
@@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
int ret, i;
struct rb_node **p, *parent;
unsigned int octets[4];
@@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
}
+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ o2nm_unlock_subsystem();
+ return -EINVAL;
+ }
+
ret = 0;
write_lock(&cluster->cl_nodes_lock);
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
@@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
}
write_unlock(&cluster->cl_nodes_lock);
+ o2nm_unlock_subsystem();
+
if (ret)
return ret;
@@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
@@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* the only failure case is trying to set a new local node
* when a different one is already set */
if (tmp && tmp == cluster->cl_has_local &&
- cluster->cl_local_node != node->nd_num)
- return -EBUSY;
+ cluster->cl_local_node != node->nd_num) {
+ ret = -EBUSY;
+ goto out;
+ }
/* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node);
if (ret)
- return ret;
+ goto out;
}
if (!tmp && cluster->cl_has_local &&
@@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
cluster->cl_local_node = node->nd_num;
}
- return count;
+ ret = count;
+
+out:
+ o2nm_unlock_subsystem();
+ return ret;
}
CONFIGFS_ATTR(o2nm_node_, num);
@@ -378,7 +415,7 @@ static struct configfs_item_operations o2nm_node_item_ops = {
.release = o2nm_node_release,
};
-static struct config_item_type o2nm_node_type = {
+static const struct config_item_type o2nm_node_type = {
.ct_item_ops = &o2nm_node_item_ops,
.ct_attrs = o2nm_node_attrs,
.ct_owner = THIS_MODULE,
@@ -619,7 +656,7 @@ static struct configfs_group_operations o2nm_node_group_group_ops = {
.drop_item = o2nm_node_group_drop_item,
};
-static struct config_item_type o2nm_node_group_type = {
+static const struct config_item_type o2nm_node_group_type = {
.ct_group_ops = &o2nm_node_group_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -637,7 +674,7 @@ static struct configfs_item_operations o2nm_cluster_item_ops = {
.release = o2nm_cluster_release,
};
-static struct config_item_type o2nm_cluster_type = {
+static const struct config_item_type o2nm_cluster_type = {
.ct_item_ops = &o2nm_cluster_item_ops,
.ct_attrs = o2nm_cluster_attrs,
.ct_owner = THIS_MODULE,
@@ -722,7 +759,7 @@ static struct configfs_group_operations o2nm_cluster_group_group_ops = {
.drop_item = o2nm_cluster_group_drop_item,
};
-static struct config_item_type o2nm_cluster_group_type = {
+static const struct config_item_type o2nm_cluster_group_type = {
.ct_group_ops = &o2nm_cluster_group_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -738,6 +775,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
},
};
+static inline void o2nm_lock_subsystem(void)
+{
+ mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
+static inline void o2nm_unlock_subsystem(void)
+{
+ mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
int o2nm_depend_item(struct config_item *item)
{
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index a2b19fbdcf46..e1fea149f50b 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -394,7 +394,6 @@ int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
{
if (dlm->dlm_worker) {
- flush_workqueue(dlm->dlm_worker);
destroy_workqueue(dlm->dlm_worker);
dlm->dlm_worker = NULL;
}
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3e04279446e8..9c3e0f13ca87 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2616,7 +2616,9 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
* otherwise the assert_master from the new
* master will destroy this.
*/
- dlm_get_mle_inuse(mle);
+ if (ret != -EEXIST)
+ dlm_get_mle_inuse(mle);
+
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 74407c6dd592..ec8f75813beb 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2419,6 +2419,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
dlm_lockres_put(res);
continue;
}
+ dlm_move_lockres_to_recovery_list(dlm, res);
} else if (res->owner == dlm->node_num) {
dlm_free_dead_locks(dlm, res, dead_node);
__dlm_lockres_calc_usage(dlm, res);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 9ab9e1892b5f..9c7c18c0e129 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -88,13 +88,13 @@ struct workqueue_struct *user_dlm_worker;
*/
#define DLMFS_CAPABILITIES "bast stackglue"
static int param_set_dlmfs_capabilities(const char *val,
- struct kernel_param *kp)
+ const struct kernel_param *kp)
{
printk(KERN_ERR "%s: readonly parameter\n", kp->name);
return -EINVAL;
}
static int param_get_dlmfs_capabilities(char *buffer,
- struct kernel_param *kp)
+ const struct kernel_param *kp)
{
return strlcpy(buffer, DLMFS_CAPABILITIES,
strlen(DLMFS_CAPABILITIES) + 1);
@@ -670,7 +670,6 @@ static void __exit exit_dlmfs_fs(void)
{
unregister_filesystem(&dlmfs_fs_type);
- flush_workqueue(user_dlm_worker);
destroy_workqueue(user_dlm_worker);
/*
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 6e41fc8fabbe..dc455d45a66a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1161,6 +1161,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
if (size_change) {
+ /*
+ * Here we should wait dio to finish before inode lock
+ * to avoid a deadlock between ocfs2_setattr() and
+ * ocfs2_dio_end_io_write()
+ */
+ inode_dio_wait(inode);
+
status = ocfs2_rw_lock(inode, 1);
if (status < 0) {
mlog_errno(status);
@@ -1200,8 +1207,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
if (status)
goto bail_unlock;
- inode_dio_wait(inode);
-
if (i_size_read(inode) >= attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
status = ocfs2_begin_ordered_truncate(inode,
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 71f22c8fbffd..9f0b95abc09f 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1147,12 +1147,9 @@ int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT, NULL,
ALLOC_NEW_GROUP);
- if (status < 0 && status != -ENOSPC) {
+ if (status < 0 && status != -ENOSPC)
mlog_errno(status);
- goto bail;
- }
-bail:
return status;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 80733496b22a..040bbb6a6e4b 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2521,10 +2521,8 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
/* This function assumes that the caller has the main osb resource */
/* ocfs2_initializer_super have already created this workqueue */
- if (osb->ocfs2_wq) {
- flush_workqueue(osb->ocfs2_wq);
+ if (osb->ocfs2_wq)
destroy_workqueue(osb->ocfs2_wq);
- }
ocfs2_free_slot_info(osb);
diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h
index b023e4f3d740..d4550c8bbc41 100644
--- a/fs/ocfs2/super.h
+++ b/fs/ocfs2/super.h
@@ -26,9 +26,6 @@
#ifndef OCFS2_SUPER_H
#define OCFS2_SUPER_H
-int ocfs2_publish_get_mount_state(struct ocfs2_super *osb,
- int node_num);
-
__printf(3, 4)
int __ocfs2_error(struct super_block *sb, const char *function,
const char *fmt, ...);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 25d9b5adcd42..36b49bd09264 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
{
- return lockless_dereference(oi->__upperdentry);
+ return READ_ONCE(oi->__upperdentry);
}
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 698b74dd750e..c310e3ff7f3f 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -754,7 +754,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
struct inode *inode = file_inode(file);
- realfile = lockless_dereference(od->upperfile);
+ realfile = READ_ONCE(od->upperfile);
if (!realfile) {
struct path upperpath;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9390032a11e1..6f6fc1672ad1 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -138,7 +138,7 @@ static const char * const task_state_array[] = {
static inline const char *get_task_state(struct task_struct *tsk)
{
BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != ARRAY_SIZE(task_state_array));
- return task_state_array[__get_task_state(tsk)];
+ return task_state_array[task_state_index(tsk)];
}
static inline int get_task_umask(struct task_struct *tsk)
@@ -454,7 +454,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
cutime = sig->cutime;
cstime = sig->cstime;
cgtime = sig->cgtime;
- rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
+ rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
/* add up live thread stats at the group level */
if (whole) {
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 2da657848cfc..d0cf1c50bb6c 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -15,6 +15,7 @@
#include <linux/tty.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
+#include "internal.h"
/*
* The /proc/tty directory inodes...
@@ -165,7 +166,7 @@ void proc_tty_unregister_driver(struct tty_driver *driver)
if (!ent)
return;
- remove_proc_entry(driver->driver_name, proc_tty_driver);
+ remove_proc_entry(ent->name, proc_tty_driver);
driver->proc_entry = NULL;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6744bd706ecf..875231c36cb3 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -26,7 +26,7 @@
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
- unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
+ unsigned long text, lib, swap, anon, file, shmem;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
anon = get_mm_counter(mm, MM_ANONPAGES);
@@ -50,8 +50,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
- ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
- pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
@@ -67,7 +65,6 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
- "VmPMD:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
total_vm << (PAGE_SHIFT-10),
@@ -80,8 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
shmem << (PAGE_SHIFT-10),
mm->data_vm << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
- ptes >> 10,
- pmds >> 10,
+ mm_pgtables_bytes(mm) >> 10,
swap << (PAGE_SHIFT-10));
hugetlb_report_usage(m, mm);
}
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 7626ee11b06c..7b635d173213 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -28,7 +28,7 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
poll_wait(file, &p->ns->poll, wait);
- event = ACCESS_ONCE(ns->event);
+ event = READ_ONCE(ns->event);
if (m->poll_event != event) {
m->poll_event = event;
res |= POLLERR | POLLPRI;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9f78b5015f2e..39f1b0b0c76f 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -645,8 +645,15 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
err = sb->dq_op->write_dquot(dquot);
- if (!ret && err)
- ret = err;
+ if (err) {
+ /*
+ * Clear dirty bit anyway to avoid infinite
+ * loop here.
+ */
+ clear_dquot_dirty(dquot);
+ if (!ret)
+ ret = err;
+ }
dqput(dquot);
spin_lock(&dq_list_lock);
}
@@ -2139,7 +2146,7 @@ int dquot_file_open(struct inode *inode, struct file *file)
error = generic_file_open(inode, file);
if (!error && (file->f_mode & FMODE_WRITE))
- dquot_initialize(inode);
+ error = dquot_initialize(inode);
return error;
}
EXPORT_SYMBOL(dquot_file_open);
diff --git a/fs/readdir.c b/fs/readdir.c
index d336db65a33e..1b83b0ad183b 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -37,13 +37,12 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
if (res)
goto out;
- if (shared) {
- inode_lock_shared(inode);
- } else {
+ if (shared)
+ res = down_read_killable(&inode->i_rwsem);
+ else
res = down_write_killable(&inode->i_rwsem);
- if (res)
- goto out;
- }
+ if (res)
+ goto out;
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
diff --git a/fs/splice.c b/fs/splice.c
index f3084cce0ea6..39e2dc01ac12 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(add_to_pipe);
*/
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
- unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+ unsigned int buffers = READ_ONCE(pipe->buffers);
spd->nr_pages_max = buffers;
if (buffers <= PIPE_DEF_BUFFERS)
diff --git a/fs/sync.c b/fs/sync.c
index 83ac79a960dd..6e0a2cbaf6de 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -109,7 +109,7 @@ SYSCALL_DEFINE0(sync)
{
int nowait = 0, wait = 1;
- wakeup_flusher_threads(0, WB_REASON_SYNC);
+ wakeup_flusher_threads(WB_REASON_SYNC);
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c
index 16a5d5c82073..616a688f5d8f 100644
--- a/fs/ubifs/crypto.c
+++ b/fs/ubifs/crypto.c
@@ -88,7 +88,6 @@ const struct fscrypt_operations ubifs_crypt_operations = {
.key_prefix = "ubifs:",
.get_context = ubifs_crypt_get_context,
.set_context = ubifs_crypt_set_context,
- .is_encrypted = __ubifs_crypt_is_encrypted,
.empty_dir = ubifs_crypt_empty_dir,
.max_namelen = ubifs_crypt_max_namelen,
};
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index fdc311246807..0164bcc827f8 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -38,7 +38,8 @@ void ubifs_set_inode_flags(struct inode *inode)
{
unsigned int flags = ubifs_inode(inode)->flags;
- inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_DIRSYNC);
+ inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_DIRSYNC |
+ S_ENCRYPTED);
if (flags & UBIFS_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & UBIFS_APPEND_FL)
@@ -47,6 +48,8 @@ void ubifs_set_inode_flags(struct inode *inode)
inode->i_flags |= S_IMMUTABLE;
if (flags & UBIFS_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
+ if (flags & UBIFS_CRYPT_FL)
+ inode->i_flags |= S_ENCRYPTED;
}
/*
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 5496b17b959c..7503e7cdf870 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2007,12 +2007,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
return c;
}
-#ifndef CONFIG_UBIFS_FS_ENCRYPTION
-const struct fscrypt_operations ubifs_crypt_operations = {
- .is_encrypted = __ubifs_crypt_is_encrypted,
-};
-#endif
-
static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ubifs_info *c = sb->s_fs_info;
@@ -2055,7 +2049,9 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
sb->s_op = &ubifs_super_operations;
sb->s_xattr = ubifs_xattr_handlers;
+#ifdef CONFIG_UBIFS_FS_ENCRYPTION
sb->s_cop = &ubifs_crypt_operations;
+#endif
mutex_lock(&c->umount_mutex);
err = mount_ubifs(c);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index cd43651f1731..63c7468147eb 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -38,12 +38,11 @@
#include <linux/backing-dev.h>
#include <linux/security.h>
#include <linux/xattr.h>
-#ifdef CONFIG_UBIFS_FS_ENCRYPTION
-#include <linux/fscrypt_supp.h>
-#else
-#include <linux/fscrypt_notsupp.h>
-#endif
#include <linux/random.h>
+
+#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_UBIFS_FS_ENCRYPTION)
+#include <linux/fscrypt.h>
+
#include "ubifs-media.h"
/* Version of this UBIFS implementation */
@@ -1835,18 +1834,13 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
extern const struct fscrypt_operations ubifs_crypt_operations;
-static inline bool __ubifs_crypt_is_encrypted(struct inode *inode)
+static inline bool ubifs_crypt_is_encrypted(const struct inode *inode)
{
- struct ubifs_inode *ui = ubifs_inode(inode);
+ const struct ubifs_inode *ui = ubifs_inode(inode);
return ui->flags & UBIFS_CRYPT_FL;
}
-static inline bool ubifs_crypt_is_encrypted(const struct inode *inode)
-{
- return __ubifs_crypt_is_encrypted((struct inode *)inode);
-}
-
/* Normal UBIFS messages */
__printf(2, 3)
void ubifs_msg(const struct ubifs_info *c, const char *fmt, ...);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index c13eae819cbc..5ddc89d564fd 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -170,6 +170,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
err = ubifs_jnl_update(c, host, nm, inode, 0, 1);
if (err)
goto out_cancel;
+ ubifs_set_inode_flags(host);
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index e0fd65fe73e8..1b961b1d9699 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -58,7 +58,7 @@ static int __load_block_bitmap(struct super_block *sb,
int nr_groups = bitmap->s_nr_groups;
if (block_group >= nr_groups) {
- udf_debug("block_group (%d) > nr_groups (%d)\n",
+ udf_debug("block_group (%u) > nr_groups (%d)\n",
block_group, nr_groups);
}
@@ -122,7 +122,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
if (bloc->logicalBlockNum + count < count ||
(bloc->logicalBlockNum + count) > partmap->s_partition_len) {
- udf_debug("%d < %d || %d + %d > %d\n",
+ udf_debug("%u < %d || %u + %u > %u\n",
bloc->logicalBlockNum, 0,
bloc->logicalBlockNum, count,
partmap->s_partition_len);
@@ -151,9 +151,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
bh = bitmap->s_block_bitmap[bitmap_nr];
for (i = 0; i < count; i++) {
if (udf_set_bit(bit + i, bh->b_data)) {
- udf_debug("bit %ld already set\n", bit + i);
+ udf_debug("bit %lu already set\n", bit + i);
udf_debug("byte=%2x\n",
- ((char *)bh->b_data)[(bit + i) >> 3]);
+ ((__u8 *)bh->b_data)[(bit + i) >> 3]);
}
}
udf_add_free_space(sb, sbi->s_partition, count);
@@ -218,16 +218,18 @@ out:
return alloc_count;
}
-static int udf_bitmap_new_block(struct super_block *sb,
+static udf_pblk_t udf_bitmap_new_block(struct super_block *sb,
struct udf_bitmap *bitmap, uint16_t partition,
uint32_t goal, int *err)
{
struct udf_sb_info *sbi = UDF_SB(sb);
- int newbit, bit = 0, block, block_group, group_start;
+ int newbit, bit = 0;
+ udf_pblk_t block;
+ int block_group, group_start;
int end_goal, nr_groups, bitmap_nr, i;
struct buffer_head *bh = NULL;
char *ptr;
- int newblock = 0;
+ udf_pblk_t newblock = 0;
*err = -ENOSPC;
mutex_lock(&sbi->s_alloc_mutex);
@@ -362,7 +364,7 @@ static void udf_table_free_blocks(struct super_block *sb,
partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
if (bloc->logicalBlockNum + count < count ||
(bloc->logicalBlockNum + count) > partmap->s_partition_len) {
- udf_debug("%d < %d || %d + %d > %d\n",
+ udf_debug("%u < %d || %u + %u > %u\n",
bloc->logicalBlockNum, 0,
bloc->logicalBlockNum, count,
partmap->s_partition_len);
@@ -515,7 +517,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
while (first_block != eloc.logicalBlockNum &&
(etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
- udf_debug("eloc=%d, elen=%d, first_block=%d\n",
+ udf_debug("eloc=%u, elen=%u, first_block=%u\n",
eloc.logicalBlockNum, elen, first_block);
; /* empty loop body */
}
@@ -545,13 +547,14 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
return alloc_count;
}
-static int udf_table_new_block(struct super_block *sb,
+static udf_pblk_t udf_table_new_block(struct super_block *sb,
struct inode *table, uint16_t partition,
uint32_t goal, int *err)
{
struct udf_sb_info *sbi = UDF_SB(sb);
uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
- uint32_t newblock = 0, adsize;
+ udf_pblk_t newblock = 0;
+ uint32_t adsize;
uint32_t elen, goal_elen = 0;
struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
struct extent_position epos, goal_epos;
@@ -700,12 +703,12 @@ inline int udf_prealloc_blocks(struct super_block *sb,
return allocated;
}
-inline int udf_new_block(struct super_block *sb,
+inline udf_pblk_t udf_new_block(struct super_block *sb,
struct inode *inode,
uint16_t partition, uint32_t goal, int *err)
{
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
- int block;
+ udf_pblk_t block;
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
block = udf_bitmap_new_block(sb,
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 2d0e028067eb..c19dba45aa20 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -43,7 +43,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
struct udf_fileident_bh fibh = { .sbh = NULL, .ebh = NULL};
struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
- int block, iblock;
+ udf_pblk_t block, iblock;
loff_t nf_pos;
int flen;
unsigned char *fname = NULL, *copy_name = NULL;
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 7aa48bd7cbaf..0a98a2369738 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -26,7 +26,8 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
sector_t *offset)
{
struct fileIdentDesc *fi;
- int i, num, block;
+ int i, num;
+ udf_pblk_t block;
struct buffer_head *tmp, *bha[16];
struct udf_inode_info *iinfo = UDF_I(dir);
@@ -51,7 +52,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
}
if (fibh->eoffset == dir->i_sb->s_blocksize) {
- int lextoffset = epos->offset;
+ uint32_t lextoffset = epos->offset;
unsigned char blocksize_bits = dir->i_sb->s_blocksize_bits;
if (udf_next_aext(dir, epos, eloc, elen, 1) !=
@@ -110,7 +111,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
memcpy((uint8_t *)cfi, (uint8_t *)fi,
sizeof(struct fileIdentDesc));
} else if (fibh->eoffset > dir->i_sb->s_blocksize) {
- int lextoffset = epos->offset;
+ uint32_t lextoffset = epos->offset;
if (udf_next_aext(dir, epos, eloc, elen, 1) !=
(EXT_RECORDED_ALLOCATED >> 30))
@@ -175,7 +176,7 @@ struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset)
if (fi->descTag.tagIdent != cpu_to_le16(TAG_IDENT_FID)) {
udf_debug("0x%x != TAG_IDENT_FID\n",
le16_to_cpu(fi->descTag.tagIdent));
- udf_debug("offset: %u sizeof: %lu bufsize: %u\n",
+ udf_debug("offset: %d sizeof: %lu bufsize: %d\n",
*offset, (unsigned long)sizeof(struct fileIdentDesc),
bufsize);
return NULL;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index c1ed18a10ce4..b6e420c1bfeb 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -50,7 +50,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
struct super_block *sb = dir->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
struct inode *inode;
- int block;
+ udf_pblk_t block;
uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
struct udf_inode_info *iinfo;
struct udf_inode_info *dinfo = UDF_I(dir);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 8dacf4f57414..c23744d5ae5c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -52,7 +52,7 @@ static int udf_alloc_i_data(struct inode *inode, size_t size);
static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
static int8_t udf_insert_aext(struct inode *, struct extent_position,
struct kernel_lb_addr, uint32_t);
-static void udf_split_extents(struct inode *, int *, int, int,
+static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
struct kernel_long_ad *, int *);
static void udf_prealloc_extents(struct inode *, int, int,
struct kernel_long_ad *, int *);
@@ -316,10 +316,10 @@ int udf_expand_file_adinicb(struct inode *inode)
return err;
}
-struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
- int *err)
+struct buffer_head *udf_expand_dir_adinicb(struct inode *inode,
+ udf_pblk_t *block, int *err)
{
- int newblock;
+ udf_pblk_t newblock;
struct buffer_head *dbh = NULL;
struct kernel_lb_addr eloc;
uint8_t alloctype;
@@ -446,7 +446,7 @@ abort:
return err;
}
-static struct buffer_head *udf_getblk(struct inode *inode, long block,
+static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block,
int create, int *err)
{
struct buffer_head *bh;
@@ -480,7 +480,7 @@ static int udf_do_extend_file(struct inode *inode,
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
struct super_block *sb = inode->i_sb;
struct kernel_lb_addr prealloc_loc = {};
- int prealloc_len = 0;
+ uint32_t prealloc_len = 0;
struct udf_inode_info *iinfo;
int err;
@@ -663,11 +663,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
struct kernel_lb_addr eloc, tmpeloc;
int c = 1;
loff_t lbcount = 0, b_off = 0;
- uint32_t newblocknum, newblock;
+ udf_pblk_t newblocknum, newblock;
sector_t offset = 0;
int8_t etype;
struct udf_inode_info *iinfo = UDF_I(inode);
- int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
+ udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
int lastblock = 0;
bool isBeyondEOF;
@@ -879,8 +879,8 @@ out_free:
}
static void udf_split_extents(struct inode *inode, int *c, int offset,
- int newblocknum, struct kernel_long_ad *laarr,
- int *endnum)
+ udf_pblk_t newblocknum,
+ struct kernel_long_ad *laarr, int *endnum)
{
unsigned long blocksize = inode->i_sb->s_blocksize;
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
@@ -1166,7 +1166,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
}
}
-struct buffer_head *udf_bread(struct inode *inode, int block,
+struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
int create, int *err)
{
struct buffer_head *bh = NULL;
@@ -1193,7 +1193,7 @@ int udf_setsize(struct inode *inode, loff_t newsize)
{
int err;
struct udf_inode_info *iinfo;
- int bsize = i_blocksize(inode);
+ unsigned int bsize = i_blocksize(inode);
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
@@ -1278,14 +1278,14 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
reread:
if (iloc->partitionReferenceNum >= sbi->s_partitions) {
- udf_debug("partition reference: %d > logical volume partitions: %d\n",
+ udf_debug("partition reference: %u > logical volume partitions: %u\n",
iloc->partitionReferenceNum, sbi->s_partitions);
return -EIO;
}
if (iloc->logicalBlockNum >=
sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
- udf_debug("block=%d, partition=%d out of range\n",
+ udf_debug("block=%u, partition=%u out of range\n",
iloc->logicalBlockNum, iloc->partitionReferenceNum);
return -EIO;
}
@@ -1304,13 +1304,13 @@ reread:
*/
bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
if (!bh) {
- udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino);
+ udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino);
return -EIO;
}
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
ident != TAG_IDENT_USE) {
- udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n",
+ udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n",
inode->i_ino, ident);
goto out;
}
@@ -1346,7 +1346,7 @@ reread:
}
brelse(ibh);
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
- udf_err(inode->i_sb, "unsupported strategy type: %d\n",
+ udf_err(inode->i_sb, "unsupported strategy type: %u\n",
le16_to_cpu(fe->icbTag.strategyType));
goto out;
}
@@ -1547,7 +1547,7 @@ reread:
udf_debug("METADATA BITMAP FILE-----\n");
break;
default:
- udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n",
+ udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n",
inode->i_ino, fe->icbTag.fileType);
goto out;
}
@@ -1852,7 +1852,7 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
return inode;
}
-int udf_setup_indirect_aext(struct inode *inode, int block,
+int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
struct extent_position *epos)
{
struct super_block *sb = inode->i_sb;
@@ -1994,7 +1994,7 @@ int udf_add_aext(struct inode *inode, struct extent_position *epos,
if (epos->offset + (2 * adsize) > sb->s_blocksize) {
int err;
- int new_block;
+ udf_pblk_t new_block;
new_block = udf_new_block(sb, NULL,
epos->block.partitionReferenceNum,
@@ -2076,7 +2076,7 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
(EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
- int block;
+ udf_pblk_t block;
if (++indirections > UDF_MAX_INDIR_EXTS) {
udf_err(inode->i_sb,
@@ -2091,7 +2091,7 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
epos->bh = udf_tread(inode->i_sb, block);
if (!epos->bh) {
- udf_debug("reading block %d failed!\n", block);
+ udf_debug("reading block %u failed!\n", block);
return -1;
}
}
@@ -2146,7 +2146,7 @@ int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
break;
default:
- udf_debug("alloc_type = %d unsupported\n", iinfo->i_alloc_type);
+ udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type);
return -1;
}
@@ -2289,13 +2289,13 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
return etype;
}
-long udf_block_map(struct inode *inode, sector_t block)
+udf_pblk_t udf_block_map(struct inode *inode, sector_t block)
{
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
- int ret;
+ udf_pblk_t ret;
down_read(&UDF_I(inode)->i_data_sem);
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index 3949c4bec3a3..401e64cde1be 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -28,7 +28,7 @@
#include "udf_i.h"
#include "udf_sb.h"
-struct buffer_head *udf_tgetblk(struct super_block *sb, int block)
+struct buffer_head *udf_tgetblk(struct super_block *sb, udf_pblk_t block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
return sb_getblk(sb, udf_fixed_to_variable(block));
@@ -36,7 +36,7 @@ struct buffer_head *udf_tgetblk(struct super_block *sb, int block)
return sb_getblk(sb, block);
}
-struct buffer_head *udf_tread(struct super_block *sb, int block)
+struct buffer_head *udf_tread(struct super_block *sb, udf_pblk_t block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
return sb_bread(sb, udf_fixed_to_variable(block));
@@ -209,7 +209,7 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
bh = udf_tread(sb, block);
if (!bh) {
- udf_err(sb, "read failed, block=%u, location=%d\n",
+ udf_err(sb, "read failed, block=%u, location=%u\n",
block, location);
return NULL;
}
@@ -247,7 +247,7 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
le16_to_cpu(tag_p->descCRCLength)))
return bh;
- udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", block,
+ udf_debug("Crc failure block %u: crc = %u, crclen = %u\n", block,
le16_to_cpu(tag_p->descCRC),
le16_to_cpu(tag_p->descCRCLength));
error_out:
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 885198dfd9f8..0458dd47e105 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -164,7 +164,8 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
{
struct fileIdentDesc *fi = NULL;
loff_t f_pos;
- int block, flen;
+ udf_pblk_t block;
+ int flen;
unsigned char *fname = NULL, *copy_name = NULL;
unsigned char *nameptr;
uint8_t lfi;
@@ -352,7 +353,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
int nfidlen;
uint8_t lfi;
uint16_t liu;
- int block;
+ udf_pblk_t block;
struct kernel_lb_addr eloc;
uint32_t elen = 0;
sector_t offset;
@@ -749,7 +750,7 @@ static int empty_dir(struct inode *dir)
struct udf_fileident_bh fibh;
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
- int block;
+ udf_pblk_t block;
struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
@@ -839,7 +840,7 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
if (retval)
goto end_rmdir;
if (inode->i_nlink != 2)
- udf_warn(inode->i_sb, "empty directory has nlink != 2 (%d)\n",
+ udf_warn(inode->i_sb, "empty directory has nlink != 2 (%u)\n",
inode->i_nlink);
clear_nlink(inode);
inode->i_size = 0;
@@ -881,7 +882,7 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
goto end_unlink;
if (!inode->i_nlink) {
- udf_debug("Deleting nonexistent file (%lu), %d\n",
+ udf_debug("Deleting nonexistent file (%lu), %u\n",
inode->i_ino, inode->i_nlink);
set_nlink(inode, 1);
}
@@ -913,7 +914,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
int eoffset, elen = 0;
uint8_t *ea;
int err;
- int block;
+ udf_pblk_t block;
unsigned char *name = NULL;
int namelen;
struct udf_inode_info *iinfo;
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 888c364b2fe9..090baff83990 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -32,7 +32,7 @@ uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
if (partition >= sbi->s_partitions) {
- udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
+ udf_debug("block=%u, partition=%u, offset=%u: invalid partition\n",
block, partition, offset);
return 0xFFFFFFFF;
}
@@ -59,7 +59,7 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
vdata = &map->s_type_specific.s_virtual;
if (block > vdata->s_num_entries) {
- udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
+ udf_debug("Trying to access block beyond end of VAT (%u max %u)\n",
block, vdata->s_num_entries);
return 0xFFFFFFFF;
}
@@ -83,7 +83,7 @@ uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
bh = sb_bread(sb, loc);
if (!bh) {
- udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
+ udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%u,%u) VAT: %u[%u]\n",
sb, block, partition, loc, index);
return 0xFFFFFFFF;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 99cb81d0077f..f80e0a0f24d3 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -366,7 +366,7 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
if (sbi->s_dmode != UDF_INVALID_MODE)
seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
- seq_printf(seq, ",session=%u", sbi->s_session);
+ seq_printf(seq, ",session=%d", sbi->s_session);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
if (sbi->s_anchor != 0)
@@ -703,9 +703,9 @@ static loff_t udf_check_vsd(struct super_block *sb)
else
sectorsize = sb->s_blocksize;
- sector += (sbi->s_session << sb->s_blocksize_bits);
+ sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
- udf_debug("Starting at sector %u (%ld byte sectors)\n",
+ udf_debug("Starting at sector %u (%lu byte sectors)\n",
(unsigned int)(sector >> sb->s_blocksize_bits),
sb->s_blocksize);
/* Process the sequence (if applicable). The hard limit on the sector
@@ -868,7 +868,7 @@ static int udf_find_fileset(struct super_block *sb,
if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) && bh) {
- udf_debug("Fileset at block=%d, partition=%d\n",
+ udf_debug("Fileset at block=%u, partition=%u\n",
fileset->logicalBlockNum,
fileset->partitionReferenceNum);
@@ -981,14 +981,14 @@ static int udf_load_metadata_files(struct super_block *sb, int partition,
mdata->s_phys_partition_ref = type1_index;
/* metadata address */
- udf_debug("Metadata file location: block = %d part = %d\n",
+ udf_debug("Metadata file location: block = %u part = %u\n",
mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
mdata->s_phys_partition_ref);
if (IS_ERR(fe)) {
/* mirror file entry */
- udf_debug("Mirror metadata file location: block = %d part = %d\n",
+ udf_debug("Mirror metadata file location: block = %u part = %u\n",
mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
@@ -1012,7 +1012,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition,
addr.logicalBlockNum = mdata->s_bitmap_file_loc;
addr.partitionReferenceNum = mdata->s_phys_partition_ref;
- udf_debug("Bitmap file location: block = %d part = %d\n",
+ udf_debug("Bitmap file location: block = %u part = %u\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
fe = udf_iget_special(sb, &addr);
@@ -1042,7 +1042,7 @@ static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
- udf_debug("Rootdir at block=%d, partition=%d\n",
+ udf_debug("Rootdir at block=%u, partition=%u\n",
root->logicalBlockNum, root->partitionReferenceNum);
}
@@ -1097,7 +1097,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
- udf_debug("Partition (%d type %x) starts at physical %d, block length %d\n",
+ udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
p_index, map->s_partition_type,
map->s_partition_root, map->s_partition_len);
@@ -1122,7 +1122,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
}
map->s_uspace.s_table = inode;
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
- udf_debug("unallocSpaceTable (part %d) @ %ld\n",
+ udf_debug("unallocSpaceTable (part %d) @ %lu\n",
p_index, map->s_uspace.s_table->i_ino);
}
@@ -1134,7 +1134,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
- udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
+ udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
p_index, bitmap->s_extPosition);
}
@@ -1157,7 +1157,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
}
map->s_fspace.s_table = inode;
map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
- udf_debug("freedSpaceTable (part %d) @ %ld\n",
+ udf_debug("freedSpaceTable (part %d) @ %lu\n",
p_index, map->s_fspace.s_table->i_ino);
}
@@ -1169,7 +1169,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
bitmap->s_extPosition = le32_to_cpu(
phd->freedSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
- udf_debug("freedSpaceBitmap (part %d) @ %d\n",
+ udf_debug("freedSpaceBitmap (part %d) @ %u\n",
p_index, bitmap->s_extPosition);
}
return 0;
@@ -1282,7 +1282,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
/* First scan for TYPE1 and SPARABLE partitions */
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
- udf_debug("Searching map: (%d == %d)\n",
+ udf_debug("Searching map: (%u == %u)\n",
map->s_partition_num, partitionNumber);
if (map->s_partition_num == partitionNumber &&
(map->s_partition_type == UDF_TYPE1_MAP15 ||
@@ -1291,7 +1291,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
}
if (i >= sbi->s_partitions) {
- udf_debug("Partition (%d) not found in partition map\n",
+ udf_debug("Partition (%u) not found in partition map\n",
partitionNumber);
ret = 0;
goto out_bh;
@@ -1483,7 +1483,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
struct metadataPartitionMap *mdm =
(struct metadataPartitionMap *)
&(lvd->partitionMaps[offset]);
- udf_debug("Parsing Logical vol part %d type %d id=%s\n",
+ udf_debug("Parsing Logical vol part %d type %u id=%s\n",
i, type, UDF_ID_METADATA);
map->s_partition_type = UDF_METADATA_MAP25;
@@ -1505,17 +1505,17 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
udf_debug("Metadata Ident suffix=0x%x\n",
le16_to_cpu(*(__le16 *)
mdm->partIdent.identSuffix));
- udf_debug("Metadata part num=%d\n",
+ udf_debug("Metadata part num=%u\n",
le16_to_cpu(mdm->partitionNum));
- udf_debug("Metadata part alloc unit size=%d\n",
+ udf_debug("Metadata part alloc unit size=%u\n",
le32_to_cpu(mdm->allocUnitSize));
- udf_debug("Metadata file loc=%d\n",
+ udf_debug("Metadata file loc=%u\n",
le32_to_cpu(mdm->metadataFileLoc));
- udf_debug("Mirror file loc=%d\n",
+ udf_debug("Mirror file loc=%u\n",
le32_to_cpu(mdm->metadataMirrorFileLoc));
- udf_debug("Bitmap file loc=%d\n",
+ udf_debug("Bitmap file loc=%u\n",
le32_to_cpu(mdm->metadataBitmapFileLoc));
- udf_debug("Flags: %d %d\n",
+ udf_debug("Flags: %d %u\n",
mdata->s_flags, mdm->flags);
} else {
udf_debug("Unknown ident: %s\n",
@@ -1525,7 +1525,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
map->s_partition_num = le16_to_cpu(upm2->partitionNum);
}
- udf_debug("Partition (%d:%d) type %d on volume %d\n",
+ udf_debug("Partition (%d:%u) type %u on volume %u\n",
i, map->s_partition_num, type, map->s_volumeseqnum);
}
@@ -1533,7 +1533,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
*fileset = lelb_to_cpu(la->extLocation);
- udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
+ udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
fileset->logicalBlockNum,
fileset->partitionReferenceNum);
}
@@ -2159,7 +2159,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
if (ret < 0) {
if (!silent && ret != -EACCES) {
- pr_notice("Scanning with blocksize %d failed\n",
+ pr_notice("Scanning with blocksize %u failed\n",
uopt.blocksize);
}
brelse(sbi->s_lvid_bh);
@@ -2184,7 +2184,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
goto error_out;
}
- udf_debug("Lastblock=%d\n", sbi->s_last_block);
+ udf_debug("Lastblock=%u\n", sbi->s_last_block);
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDescImpUse *lvidiu =
@@ -2255,7 +2255,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
/* perhaps it's not extensible enough, but for now ... */
inode = udf_iget(sb, &rootdir);
if (IS_ERR(inode)) {
- udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
+ udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
ret = PTR_ERR(inode);
goto error_out;
@@ -2389,7 +2389,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
struct buffer_head *bh = NULL;
unsigned int accum = 0;
int index;
- int block = 0, newblock;
+ udf_pblk_t block = 0, newblock;
struct kernel_lb_addr loc;
uint32_t bytes;
uint8_t *ptr;
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 42b8c57795cb..b647f0bd150c 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -48,7 +48,7 @@ static void extent_trunc(struct inode *inode, struct extent_position *epos,
if (elen != nelen) {
udf_write_aext(inode, epos, &neloc, nelen, 0);
- if (last_block - first_block > 0) {
+ if (last_block > first_block) {
if (etype == (EXT_RECORDED_ALLOCATED >> 30))
mark_inode_dirty(inode);
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index fa206558128d..f5e0fe78979e 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -74,6 +74,8 @@ static inline size_t udf_ext0_offset(struct inode *inode)
/* computes tag checksum */
u8 udf_tag_checksum(const struct tag *t);
+typedef uint32_t udf_pblk_t;
+
struct dentry;
struct inode;
struct task_struct;
@@ -145,15 +147,17 @@ static inline struct inode *udf_iget(struct super_block *sb,
return __udf_iget(sb, ino, false);
}
extern int udf_expand_file_adinicb(struct inode *);
-extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
-extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
+extern struct buffer_head *udf_expand_dir_adinicb(struct inode *inode,
+ udf_pblk_t *block, int *err);
+extern struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
+ int create, int *err);
extern int udf_setsize(struct inode *, loff_t);
extern void udf_evict_inode(struct inode *);
extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
-extern long udf_block_map(struct inode *, sector_t);
+extern udf_pblk_t udf_block_map(struct inode *inode, sector_t block);
extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, sector_t *);
-extern int udf_setup_indirect_aext(struct inode *inode, int block,
+extern int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
struct extent_position *epos);
extern int __udf_add_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t elen, int inc);
@@ -169,8 +173,9 @@ extern int8_t udf_current_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, int);
/* misc.c */
-extern struct buffer_head *udf_tgetblk(struct super_block *, int);
-extern struct buffer_head *udf_tread(struct super_block *, int);
+extern struct buffer_head *udf_tgetblk(struct super_block *sb,
+ udf_pblk_t block);
+extern struct buffer_head *udf_tread(struct super_block *sb, udf_pblk_t block);
extern struct genericFormat *udf_add_extendedattr(struct inode *, uint32_t,
uint32_t, uint8_t);
extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t,
@@ -229,8 +234,8 @@ extern void udf_free_blocks(struct super_block *, struct inode *,
struct kernel_lb_addr *, uint32_t, uint32_t);
extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t,
uint32_t, uint32_t);
-extern int udf_new_block(struct super_block *, struct inode *, uint16_t,
- uint32_t, int *);
+extern udf_pblk_t udf_new_block(struct super_block *sb, struct inode *inode,
+ uint16_t partition, uint32_t goal, int *err);
/* directory.c */
extern struct fileIdentDesc *udf_fileident_read(struct inode *, loff_t *,
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 695389a4fc23..f897e55f2cd0 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -200,7 +200,7 @@ static int udf_name_from_CS0(uint8_t *str_o, int str_max_len,
cmp_id = ocu[0];
if (cmp_id != 8 && cmp_id != 16) {
memset(str_o, 0, str_max_len);
- pr_err("unknown compression code (%d)\n", cmp_id);
+ pr_err("unknown compression code (%u)\n", cmp_id);
return -EINVAL;
}
u_ch = cmp_id >> 3;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 1c713fd5b3e6..ac9a4e65ca49 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
* in __get_user_pages if userfaultfd_release waits on the
* caller of handle_userfault to release the mmap_sem.
*/
- if (unlikely(ACCESS_ONCE(ctx->released))) {
+ if (unlikely(READ_ONCE(ctx->released))) {
/*
* Don't return VM_FAULT_SIGBUS in this case, so a non
* cooperative manager can close the uffd after the
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
vmf->flags, reason);
up_read(&mm->mmap_sem);
- if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
+ if (likely(must_wait && !READ_ONCE(ctx->released) &&
(return_to_userland ? !signal_pending(current) :
!fatal_signal_pending(current)))) {
wake_up_poll(&ctx->fd_wqh, POLLIN);
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
set_current_state(TASK_KILLABLE);
if (ewq->msg.event == 0)
break;
- if (ACCESS_ONCE(ctx->released) ||
+ if (READ_ONCE(ctx->released) ||
fatal_signal_pending(current)) {
/*
* &ewq->wq may be queued in fork_event, but
@@ -668,7 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
ctx->features = octx->features;
ctx->released = false;
ctx->mm = vma->vm_mm;
- atomic_inc(&ctx->mm->mm_count);
+ mmgrab(ctx->mm);
userfaultfd_ctx_get(octx);
fctx->orig = octx;
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
- ACCESS_ONCE(ctx->released) = true;
+ WRITE_ONCE(ctx->released, true);
if (!mmget_not_zero(mm))
goto wakeup;
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 1b98cfa342ab..f42fcf1b5465 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -71,6 +71,23 @@ config XFS_RT
If unsure, say N.
+config XFS_ONLINE_SCRUB
+ bool "XFS online metadata check support"
+ default n
+ depends on XFS_FS
+ help
+ If you say Y here you will be able to check metadata on a
+ mounted XFS filesystem. This feature is intended to reduce
+ filesystem downtime by supplementing xfs_repair. The key
+ advantage here is to look for problems proactively so that
+ they can be dealt with in a controlled manner.
+
+ This feature is considered EXPERIMENTAL. Use with caution!
+
+ See the xfs_scrub man page in section 8 for additional information.
+
+ If unsure, say N.
+
config XFS_WARN
bool "XFS Verbose Warnings"
depends on XFS_FS && !XFS_DEBUG
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index a6e955bfead8..7ceb41a9786a 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -49,6 +49,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_dquot_buf.o \
xfs_ialloc.o \
xfs_ialloc_btree.o \
+ xfs_iext_tree.o \
xfs_inode_fork.o \
xfs_inode_buf.o \
xfs_log_rlimit.o \
@@ -135,3 +136,31 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o
xfs-$(CONFIG_EXPORTFS_BLOCK_OPS) += xfs_pnfs.o
+
+# online scrub/repair
+ifeq ($(CONFIG_XFS_ONLINE_SCRUB),y)
+
+# Tracepoints like to blow up, so build that before everything else
+
+xfs-y += $(addprefix scrub/, \
+ trace.o \
+ agheader.o \
+ alloc.o \
+ attr.o \
+ bmap.o \
+ btree.o \
+ common.o \
+ dabtree.o \
+ dir.o \
+ ialloc.o \
+ inode.o \
+ parent.o \
+ refcount.o \
+ rmap.o \
+ scrub.o \
+ symlink.o \
+ )
+
+xfs-$(CONFIG_XFS_RT) += scrub/rtbitmap.o
+xfs-$(CONFIG_XFS_QUOTA) += scrub/quota.o
+endif
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 4d85992d75b2..4b87472f35bc 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -104,7 +104,7 @@ kmem_zone_init(int size, char *zone_name)
}
static inline kmem_zone_t *
-kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
+kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
void (*construct)(void *))
{
return kmem_cache_create(zone_name, size, 0, flags, construct);
@@ -119,8 +119,7 @@ kmem_zone_free(kmem_zone_t *zone, void *ptr)
static inline void
kmem_zone_destroy(kmem_zone_t *zone)
{
- if (zone)
- kmem_cache_destroy(zone);
+ kmem_cache_destroy(zone);
}
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index df3e600835e8..2291f4224e24 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -27,6 +27,7 @@
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_alloc.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index f965ce832bc0..0da80019a917 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -31,6 +31,7 @@
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_cksum.h"
#include "xfs_trace.h"
@@ -2931,3 +2932,52 @@ xfs_alloc_query_all(
query.fn = fn;
return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
}
+
+/* Find the size of the AG, in blocks. */
+xfs_agblock_t
+xfs_ag_block_count(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno)
+{
+ ASSERT(agno < mp->m_sb.sb_agcount);
+
+ if (agno < mp->m_sb.sb_agcount - 1)
+ return mp->m_sb.sb_agblocks;
+ return mp->m_sb.sb_dblocks - (agno * mp->m_sb.sb_agblocks);
+}
+
+/*
+ * Verify that an AG block number pointer neither points outside the AG
+ * nor points at static metadata.
+ */
+bool
+xfs_verify_agbno(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t agbno)
+{
+ xfs_agblock_t eoag;
+
+ eoag = xfs_ag_block_count(mp, agno);
+ if (agbno >= eoag)
+ return false;
+ if (agbno <= XFS_AGFL_BLOCK(mp))
+ return false;
+ return true;
+}
+
+/*
+ * Verify that an FS block number pointer neither points outside the
+ * filesystem nor points at static AG metadata.
+ */
+bool
+xfs_verify_fsbno(
+ struct xfs_mount *mp,
+ xfs_fsblock_t fsbno)
+{
+ xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno);
+
+ if (agno >= mp->m_sb.sb_agcount)
+ return false;
+ return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
+}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index ef26edc2e938..7ba2d129d504 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -232,5 +232,9 @@ int xfs_alloc_query_range(struct xfs_btree_cur *cur,
xfs_alloc_query_range_fn fn, void *priv);
int xfs_alloc_query_all(struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn,
void *priv);
+xfs_agblock_t xfs_ag_block_count(struct xfs_mount *mp, xfs_agnumber_t agno);
+bool xfs_verify_agbno(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agblock_t agbno);
+bool xfs_verify_fsbno(struct xfs_mount *mp, xfs_fsblock_t fsbno);
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 5c16db86b38f..53cc8b986eac 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -397,13 +397,9 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
/* rounded down */
offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
- switch (dp->i_d.di_format) {
- case XFS_DINODE_FMT_DEV:
+ if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) {
minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
return (offset >= minforkoff) ? minforkoff : 0;
- case XFS_DINODE_FMT_UUID:
- minforkoff = roundup(sizeof(uuid_t), 8) >> 3;
- return (offset >= minforkoff) ? minforkoff : 0;
}
/*
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 89263797cf32..08df809e2315 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -38,6 +38,7 @@
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_rtalloc.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
@@ -112,28 +113,21 @@ xfs_bmap_compute_maxlevels(
STATIC int /* error */
xfs_bmbt_lookup_eq(
struct xfs_btree_cur *cur,
- xfs_fileoff_t off,
- xfs_fsblock_t bno,
- xfs_filblks_t len,
+ struct xfs_bmbt_irec *irec,
int *stat) /* success/failure */
{
- cur->bc_rec.b.br_startoff = off;
- cur->bc_rec.b.br_startblock = bno;
- cur->bc_rec.b.br_blockcount = len;
+ cur->bc_rec.b = *irec;
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
}
STATIC int /* error */
-xfs_bmbt_lookup_ge(
+xfs_bmbt_lookup_first(
struct xfs_btree_cur *cur,
- xfs_fileoff_t off,
- xfs_fsblock_t bno,
- xfs_filblks_t len,
int *stat) /* success/failure */
{
- cur->bc_rec.b.br_startoff = off;
- cur->bc_rec.b.br_startblock = bno;
- cur->bc_rec.b.br_blockcount = len;
+ cur->bc_rec.b.br_startoff = 0;
+ cur->bc_rec.b.br_startblock = 0;
+ cur->bc_rec.b.br_blockcount = 0;
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
}
@@ -160,21 +154,17 @@ static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
}
/*
- * Update the record referred to by cur to the value given
- * by [off, bno, len, state].
+ * Update the record referred to by cur to the value given by irec
* This either works (return 0) or gets an EFSCORRUPTED error.
*/
STATIC int
xfs_bmbt_update(
struct xfs_btree_cur *cur,
- xfs_fileoff_t off,
- xfs_fsblock_t bno,
- xfs_filblks_t len,
- xfs_exntst_t state)
+ struct xfs_bmbt_irec *irec)
{
union xfs_btree_rec rec;
- xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
+ xfs_bmbt_disk_set_all(&rec.bmbt, irec);
return xfs_btree_update(cur, &rec);
}
@@ -242,7 +232,6 @@ xfs_bmap_forkoff_reset(
{
if (whichfork == XFS_ATTR_FORK &&
ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
- ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
@@ -499,31 +488,6 @@ error_norelse:
}
/*
- * Add bmap trace insert entries for all the contents of the extent records.
- */
-void
-xfs_bmap_trace_exlist(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t cnt, /* count of entries in the list */
- int whichfork, /* data or attr or cow fork */
- unsigned long caller_ip)
-{
- xfs_extnum_t idx; /* extent record index */
- xfs_ifork_t *ifp; /* inode fork pointer */
- int state = 0;
-
- if (whichfork == XFS_ATTR_FORK)
- state |= BMAP_ATTRFORK;
- else if (whichfork == XFS_COW_FORK)
- state |= BMAP_COWFORK;
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(cnt == xfs_iext_count(ifp));
- for (idx = 0; idx < cnt; idx++)
- trace_xfs_extlist(ip, idx, state, caller_ip);
-}
-
-/*
* Validate that the bmbt_irecs being returned from bmapi are valid
* given the caller's original parameters. Specifically check the
* ranges of the returned irecs to ensure that they only extend beyond
@@ -657,8 +621,8 @@ xfs_bmap_btree_to_extents(
cbno = be64_to_cpu(*pp);
*logflagsp = 0;
#ifdef DEBUG
- if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
- return error;
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
+ xfs_btree_check_lptr(cur, cbno, 1));
#endif
error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
&xfs_bmbt_buf_ops);
@@ -703,14 +667,14 @@ xfs_bmap_extents_to_btree(
xfs_bmbt_rec_t *arp; /* child record pointer */
struct xfs_btree_block *block; /* btree root block */
xfs_btree_cur_t *cur; /* bmap btree cursor */
- xfs_bmbt_rec_host_t *ep; /* extent record pointer */
int error; /* error return value */
- xfs_extnum_t i, cnt; /* extent record index */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_bmbt_key_t *kp; /* root block key pointer */
xfs_mount_t *mp; /* mount structure */
- xfs_extnum_t nextents; /* number of file extents */
xfs_bmbt_ptr_t *pp; /* root block address pointer */
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec rec;
+ xfs_extnum_t cnt = 0;
mp = ip->i_mount;
ASSERT(whichfork != XFS_COW_FORK);
@@ -789,15 +753,12 @@ xfs_bmap_extents_to_btree(
XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS);
- arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
- nextents = xfs_iext_count(ifp);
- for (cnt = i = 0; i < nextents; i++) {
- ep = xfs_iext_get_ext(ifp, i);
- if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
- arp->l0 = cpu_to_be64(ep->l0);
- arp->l1 = cpu_to_be64(ep->l1);
- arp++; cnt++;
- }
+ for_each_xfs_iext(ifp, &icur, &rec) {
+ if (isnullstartblock(rec.br_startblock))
+ continue;
+ arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
+ xfs_bmbt_disk_set_all(arp, &rec);
+ cnt++;
}
ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
xfs_btree_set_numrecs(ablock, cnt);
@@ -845,6 +806,8 @@ xfs_bmap_local_to_extents_empty(
xfs_bmap_forkoff_reset(ip, whichfork);
ifp->if_flags &= ~XFS_IFINLINE;
ifp->if_flags |= XFS_IFEXTENTS;
+ ifp->if_u1.if_root = NULL;
+ ifp->if_height = 0;
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
}
@@ -868,6 +831,7 @@ xfs_bmap_local_to_extents(
xfs_alloc_arg_t args; /* allocation arguments */
xfs_buf_t *bp; /* buffer for extent block */
struct xfs_bmbt_irec rec;
+ struct xfs_iext_cursor icur;
/*
* We don't want to deal with the case of keeping inode data inline yet.
@@ -885,8 +849,7 @@ xfs_bmap_local_to_extents(
flags = 0;
error = 0;
- ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
- XFS_IFINLINE);
+ ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = ip->i_mount;
@@ -930,15 +893,16 @@ xfs_bmap_local_to_extents(
xfs_bmap_local_to_extents_empty(ip, whichfork);
flags |= XFS_ILOG_CORE;
+ ifp->if_u1.if_root = NULL;
+ ifp->if_height = 0;
+
rec.br_startoff = 0;
rec.br_startblock = args.fsbno;
rec.br_blockcount = 1;
rec.br_state = XFS_EXT_NORM;
- xfs_iext_insert(ip, 0, 1, &rec, 0);
+ xfs_iext_first(ifp, &icur);
+ xfs_iext_insert(ip, &icur, &rec, 0);
- trace_xfs_bmap_post_update(ip, 0,
- whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
- _THIS_IP_);
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
ip->i_d.di_nblocks = 1;
xfs_trans_mod_dquot_byino(tp, ip,
@@ -973,7 +937,8 @@ xfs_bmap_add_attrfork_btree(
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
cur->bc_private.b.dfops = dfops;
cur->bc_private.b.firstblock = *firstblock;
- if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
+ error = xfs_bmbt_lookup_first(cur, &stat);
+ if (error)
goto error0;
/* must be at least one entry */
XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
@@ -1124,9 +1089,6 @@ xfs_bmap_add_attrfork(
case XFS_DINODE_FMT_DEV:
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
break;
- case XFS_DINODE_FMT_UUID:
- ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
- break;
case XFS_DINODE_FMT_LOCAL:
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
@@ -1206,32 +1168,35 @@ trans_cancel:
*/
/*
- * Read in the extents to if_extents.
- * All inode fields are set up by caller, we just traverse the btree
- * and copy the records in. If the file system cannot contain unwritten
- * extents, the records are checked for no "state" flags.
+ * Read in extents from a btree-format inode.
*/
-int /* error */
-xfs_bmap_read_extents(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- int whichfork) /* data or attr fork */
+int
+xfs_iread_extents(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ int whichfork)
{
- struct xfs_btree_block *block; /* current btree block */
- xfs_fsblock_t bno; /* block # of "block" */
- xfs_buf_t *bp; /* buffer for "block" */
- int error; /* error return value */
- xfs_extnum_t i, j; /* index into the extents list */
- xfs_ifork_t *ifp; /* fork structure */
- int level; /* btree level, for checking */
- xfs_mount_t *mp; /* file system mount structure */
- __be64 *pp; /* pointer to block address */
- /* REFERENCED */
- xfs_extnum_t room; /* number of entries there's room for */
+ struct xfs_mount *mp = ip->i_mount;
+ int state = xfs_bmap_fork_to_state(whichfork);
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
+ struct xfs_btree_block *block = ifp->if_broot;
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec new;
+ xfs_fsblock_t bno;
+ struct xfs_buf *bp;
+ xfs_extnum_t i, j;
+ int level;
+ __be64 *pp;
+ int error;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+
+ if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
- mp = ip->i_mount;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- block = ifp->if_broot;
/*
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
*/
@@ -1248,21 +1213,23 @@ xfs_bmap_read_extents(
error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
if (error)
- return error;
+ goto out;
block = XFS_BUF_TO_BLOCK(bp);
if (level == 0)
break;
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(mp,
- XFS_FSB_SANITY_CHECK(mp, bno), error0);
+ XFS_FSB_SANITY_CHECK(mp, bno), out_brelse);
xfs_trans_brelse(tp, bp);
}
+
/*
* Here with bp and block set to the leftmost leaf node in the tree.
*/
- room = xfs_iext_count(ifp);
i = 0;
+ xfs_iext_first(ifp, &icur);
+
/*
* Loop over all leaf nodes. Copy information to the extent records.
*/
@@ -1272,14 +1239,15 @@ xfs_bmap_read_extents(
xfs_extnum_t num_recs;
num_recs = xfs_btree_get_numrecs(block);
- if (unlikely(i + num_recs > room)) {
- ASSERT(i + num_recs <= room);
+ if (unlikely(i + num_recs > nextents)) {
+ ASSERT(i + num_recs <= nextents);
xfs_warn(ip->i_mount,
"corrupt dinode %Lu, (btree extents).",
(unsigned long long) ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
+ XFS_CORRUPTION_ERROR(__func__,
XFS_ERRLEVEL_LOW, ip->i_mount, block);
- goto error0;
+ error = -EFSCORRUPTED;
+ goto out_brelse;
}
/*
* Read-ahead the next leaf block, if any.
@@ -1292,15 +1260,17 @@ xfs_bmap_read_extents(
* Copy records into the extent records.
*/
frp = XFS_BMBT_REC_ADDR(mp, block, 1);
- for (j = 0; j < num_recs; j++, i++, frp++) {
- xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
- trp->l0 = be64_to_cpu(frp->l0);
- trp->l1 = be64_to_cpu(frp->l1);
- if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) {
+ for (j = 0; j < num_recs; j++, frp++, i++) {
+ xfs_bmbt_disk_get_all(frp, &new);
+ if (!xfs_bmbt_validate_extent(mp, whichfork, &new)) {
XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
XFS_ERRLEVEL_LOW, mp);
- goto error0;
+ error = -EFSCORRUPTED;
+ goto out_brelse;
}
+ xfs_iext_insert(ip, &icur, &new, state);
+ trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
+ xfs_iext_next(ifp, &icur);
}
xfs_trans_brelse(tp, bp);
bno = nextbno;
@@ -1312,71 +1282,74 @@ xfs_bmap_read_extents(
error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
if (error)
- return error;
+ goto out;
block = XFS_BUF_TO_BLOCK(bp);
}
- if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
- return -EFSCORRUPTED;
+
+ if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
ASSERT(i == xfs_iext_count(ifp));
- XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
+
+ ifp->if_flags |= XFS_IFEXTENTS;
return 0;
-error0:
+
+out_brelse:
xfs_trans_brelse(tp, bp);
- return -EFSCORRUPTED;
+out:
+ xfs_iext_destroy(ifp);
+ return error;
}
/*
- * Returns the file-relative block number of the first unused block(s)
- * in the file with at least "len" logically contiguous blocks free.
- * This is the lowest-address hole if the file has holes, else the first block
- * past the end of file.
- * Return 0 if the file is currently local (in-inode).
+ * Returns the relative block number of the first unused block(s) in the given
+ * fork with at least "len" logically contiguous blocks free. This is the
+ * lowest-address hole if the fork has holes, else the first block past the end
+ * of fork. Return 0 if the fork is currently local (in-inode).
*/
int /* error */
xfs_bmap_first_unused(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_inode_t *ip, /* incore inode */
- xfs_extlen_t len, /* size of hole to find */
- xfs_fileoff_t *first_unused, /* unused block */
- int whichfork) /* data or attr fork */
+ struct xfs_trans *tp, /* transaction pointer */
+ struct xfs_inode *ip, /* incore inode */
+ xfs_extlen_t len, /* size of hole to find */
+ xfs_fileoff_t *first_unused, /* unused block */
+ int whichfork) /* data or attr fork */
{
- int error; /* error return value */
- int idx; /* extent record index */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_fileoff_t lastaddr; /* last block number seen */
- xfs_fileoff_t lowest; /* lowest useful block */
- xfs_fileoff_t max; /* starting useful block */
- xfs_extnum_t nextents; /* number of extent entries */
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_bmbt_irec got;
+ struct xfs_iext_cursor icur;
+ xfs_fileoff_t lastaddr = 0;
+ xfs_fileoff_t lowest, max;
+ int error;
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
*first_unused = 0;
return 0;
}
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!(ifp->if_flags & XFS_IFEXTENTS) &&
- (error = xfs_iread_extents(tp, ip, whichfork)))
- return error;
- lowest = *first_unused;
- nextents = xfs_iext_count(ifp);
- for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
- struct xfs_bmbt_irec got;
- xfs_iext_get_extent(ifp, idx, &got);
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(tp, ip, whichfork);
+ if (error)
+ return error;
+ }
+ lowest = max = *first_unused;
+ for_each_xfs_iext(ifp, &icur, &got) {
/*
* See if the hole before this extent will work.
*/
if (got.br_startoff >= lowest + len &&
- got.br_startoff - max >= len) {
- *first_unused = max;
- return 0;
- }
+ got.br_startoff - max >= len)
+ break;
lastaddr = got.br_startoff + got.br_blockcount;
max = XFS_FILEOFF_MAX(lastaddr, lowest);
}
+
*first_unused = max;
return 0;
}
@@ -1396,7 +1369,7 @@ xfs_bmap_last_before(
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_bmbt_irec got;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
int error;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
@@ -1416,17 +1389,8 @@ xfs_bmap_last_before(
return error;
}
- if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) {
- if (got.br_startoff <= *last_block - 1)
- return 0;
- }
-
- if (xfs_iext_get_extent(ifp, idx - 1, &got)) {
- *last_block = got.br_startoff + got.br_blockcount;
- return 0;
- }
-
- *last_block = 0;
+ if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
+ *last_block = 0;
return 0;
}
@@ -1439,8 +1403,8 @@ xfs_bmap_last_extent(
int *is_empty)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_iext_cursor icur;
int error;
- int nextents;
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
@@ -1448,14 +1412,11 @@ xfs_bmap_last_extent(
return error;
}
- nextents = xfs_iext_count(ifp);
- if (nextents == 0) {
+ xfs_iext_last(ifp, &icur);
+ if (!xfs_iext_get_extent(ifp, &icur, rec))
*is_empty = 1;
- return 0;
- }
-
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
- *is_empty = 0;
+ else
+ *is_empty = 0;
return 0;
}
@@ -1540,10 +1501,10 @@ xfs_bmap_one_block(
xfs_inode_t *ip, /* incore inode */
int whichfork) /* data or attr fork */
{
- xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
xfs_ifork_t *ifp; /* inode fork pointer */
int rval; /* return value */
xfs_bmbt_irec_t s; /* internal version of extent */
+ struct xfs_iext_cursor icur;
#ifndef DEBUG
if (whichfork == XFS_DATA_FORK)
@@ -1555,8 +1516,8 @@ xfs_bmap_one_block(
return 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
- ep = xfs_iext_get_ext(ifp, 0);
- xfs_bmbt_get_all(ep, &s);
+ xfs_iext_first(ifp, &icur);
+ xfs_iext_get_extent(ifp, &icur, &s);
rval = s.br_startoff == 0 && s.br_blockcount == 1;
if (rval && whichfork == XFS_DATA_FORK)
ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
@@ -1576,8 +1537,6 @@ xfs_bmap_add_extent_delay_real(
int whichfork)
{
struct xfs_bmbt_irec *new = &bma->got;
- int diff; /* temp value */
- xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
int error; /* error return value */
int i; /* temp state */
xfs_ifork_t *ifp; /* inode fork pointer */
@@ -1585,14 +1544,14 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
- int state = 0;/* state bits, accessed thru macros */
+ int state = xfs_bmap_fork_to_state(whichfork);
xfs_filblks_t da_new; /* new count del alloc blocks used */
xfs_filblks_t da_old; /* old count del alloc blocks used */
xfs_filblks_t temp=0; /* value for da_new calculations */
- xfs_filblks_t temp2=0;/* value for da_new calculations */
int tmp_rval; /* partial logging flags */
struct xfs_mount *mp;
xfs_extnum_t *nextents;
+ struct xfs_bmbt_irec old;
mp = bma->ip->i_mount;
ifp = XFS_IFORK_PTR(bma->ip, whichfork);
@@ -1600,8 +1559,6 @@ xfs_bmap_add_extent_delay_real(
nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
&bma->ip->i_d.di_nextents);
- ASSERT(bma->idx >= 0);
- ASSERT(bma->idx <= xfs_iext_count(ifp));
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!bma->cur ||
(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
@@ -1612,15 +1569,12 @@ xfs_bmap_add_extent_delay_real(
#define RIGHT r[1]
#define PREV r[2]
- if (whichfork == XFS_COW_FORK)
- state |= BMAP_COWFORK;
-
/*
* Set up a bunch of variables to make the tests simpler.
*/
- ep = xfs_iext_get_ext(ifp, bma->idx);
- xfs_bmbt_get_all(ep, &PREV);
+ xfs_iext_get_extent(ifp, &bma->icur, &PREV);
new_endoff = new->br_startoff + new->br_blockcount;
+ ASSERT(isnullstartblock(PREV.br_startblock));
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
@@ -1640,10 +1594,8 @@ xfs_bmap_add_extent_delay_real(
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (bma->idx > 0) {
+ if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
state |= BMAP_LEFT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
-
if (isnullstartblock(LEFT.br_startblock))
state |= BMAP_LEFT_DELAY;
}
@@ -1660,10 +1612,8 @@ xfs_bmap_add_extent_delay_real(
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (bma->idx < xfs_iext_count(ifp) - 1) {
+ if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
state |= BMAP_RIGHT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
-
if (isnullstartblock(RIGHT.br_startblock))
state |= BMAP_RIGHT_DELAY;
}
@@ -1693,22 +1643,19 @@ xfs_bmap_add_extent_delay_real(
* Filling in all of a previously delayed allocation extent.
* The left and right neighbors are both contiguous with new.
*/
- bma->idx--;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
- LEFT.br_blockcount + PREV.br_blockcount +
- RIGHT.br_blockcount);
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
-
- xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
+ LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
+
+ xfs_iext_remove(bma->ip, &bma->icur, state);
+ xfs_iext_remove(bma->ip, &bma->icur, state);
+ xfs_iext_prev(ifp, &bma->icur);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
(*nextents)--;
+
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
- RIGHT.br_startblock,
- RIGHT.br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
@@ -1720,11 +1667,7 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
- LEFT.br_startblock,
- LEFT.br_blockcount +
- PREV.br_blockcount +
- RIGHT.br_blockcount, LEFT.br_state);
+ error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
}
@@ -1735,28 +1678,22 @@ xfs_bmap_add_extent_delay_real(
* Filling in all of a previously delayed allocation extent.
* The left neighbor is contiguous, the right is not.
*/
- bma->idx--;
+ old = LEFT;
+ LEFT.br_blockcount += PREV.br_blockcount;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
- LEFT.br_blockcount + PREV.br_blockcount);
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+ xfs_iext_remove(bma->ip, &bma->icur, state);
+ xfs_iext_prev(ifp, &bma->icur);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
- xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
- LEFT.br_startblock, LEFT.br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
- LEFT.br_startblock,
- LEFT.br_blockcount +
- PREV.br_blockcount, LEFT.br_state);
+ error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
}
@@ -1767,27 +1704,23 @@ xfs_bmap_add_extent_delay_real(
* Filling in all of a previously delayed allocation extent.
* The right neighbor is contiguous, the left is not.
*/
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_startblock(ep, new->br_startblock);
- xfs_bmbt_set_blockcount(ep,
- PREV.br_blockcount + RIGHT.br_blockcount);
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+ PREV.br_startblock = new->br_startblock;
+ PREV.br_blockcount += RIGHT.br_blockcount;
+
+ xfs_iext_next(ifp, &bma->icur);
+ xfs_iext_remove(bma->ip, &bma->icur, state);
+ xfs_iext_prev(ifp, &bma->icur);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
- xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
- RIGHT.br_startblock,
- RIGHT.br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
- new->br_startblock,
- PREV.br_blockcount +
- RIGHT.br_blockcount, PREV.br_state);
+ error = xfs_bmbt_update(bma->cur, &PREV);
if (error)
goto done;
}
@@ -1799,23 +1732,19 @@ xfs_bmap_add_extent_delay_real(
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_startblock(ep, new->br_startblock);
- xfs_bmbt_set_state(ep, new->br_state);
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
+ PREV.br_startblock = new->br_startblock;
+ PREV.br_state = new->br_state;
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
(*nextents)++;
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
@@ -1828,40 +1757,33 @@ xfs_bmap_add_extent_delay_real(
* Filling in the first part of a previous delayed allocation.
* The left neighbor is contiguous.
*/
- trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
- LEFT.br_blockcount + new->br_blockcount);
- xfs_bmbt_set_startoff(ep,
- PREV.br_startoff + new->br_blockcount);
- trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
-
+ old = LEFT;
temp = PREV.br_blockcount - new->br_blockcount;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
+ startblockval(PREV.br_startblock));
+
+ LEFT.br_blockcount += new->br_blockcount;
+
+ PREV.br_blockcount = temp;
+ PREV.br_startoff += new->br_blockcount;
+ PREV.br_startblock = nullstartblock(da_new);
+
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
+ xfs_iext_prev(ifp, &bma->icur);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
+
if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
- LEFT.br_startblock, LEFT.br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
- LEFT.br_startblock,
- LEFT.br_blockcount +
- new->br_blockcount,
- LEFT.br_state);
+ error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
}
- da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
- startblockval(PREV.br_startblock));
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
-
- bma->idx--;
break;
case BMAP_LEFT_FILLING:
@@ -1869,23 +1791,16 @@ xfs_bmap_add_extent_delay_real(
* Filling in the first part of a previous delayed allocation.
* The left neighbor is not contiguous.
*/
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_startoff(ep, new_endoff);
- temp = PREV.br_blockcount - new->br_blockcount;
- xfs_bmbt_set_blockcount(ep, temp);
- xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
(*nextents)++;
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
@@ -1900,12 +1815,18 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
}
+
+ temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
- ep = xfs_iext_get_ext(ifp, bma->idx + 1);
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
- trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
+
+ PREV.br_startoff = new_endoff;
+ PREV.br_blockcount = temp;
+ PREV.br_startblock = nullstartblock(da_new);
+ xfs_iext_next(ifp, &bma->icur);
+ xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
+ xfs_iext_prev(ifp, &bma->icur);
break;
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1913,40 +1834,34 @@ xfs_bmap_add_extent_delay_real(
* Filling in the last part of a previous delayed allocation.
* The right neighbor is contiguous with the new allocation.
*/
- temp = PREV.br_blockcount - new->br_blockcount;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
- new->br_startoff, new->br_startblock,
- new->br_blockcount + RIGHT.br_blockcount,
- RIGHT.br_state);
- trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
+ old = RIGHT;
+ RIGHT.br_startoff = new->br_startoff;
+ RIGHT.br_startblock = new->br_startblock;
+ RIGHT.br_blockcount += new->br_blockcount;
+
if (bma->cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
- RIGHT.br_startblock,
- RIGHT.br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(bma->cur, new->br_startoff,
- new->br_startblock,
- new->br_blockcount +
- RIGHT.br_blockcount,
- RIGHT.br_state);
+ error = xfs_bmbt_update(bma->cur, &RIGHT);
if (error)
goto done;
}
+ temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock));
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
- bma->idx++;
+ PREV.br_blockcount = temp;
+ PREV.br_startblock = nullstartblock(da_new);
+
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
+ xfs_iext_next(ifp, &bma->icur);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
break;
case BMAP_RIGHT_FILLING:
@@ -1954,22 +1869,16 @@ xfs_bmap_add_extent_delay_real(
* Filling in the last part of a previous delayed allocation.
* The right neighbor is not contiguous.
*/
- temp = PREV.br_blockcount - new->br_blockcount;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
- xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
(*nextents)++;
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
@@ -1984,14 +1893,16 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
}
+
+ temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
- ep = xfs_iext_get_ext(ifp, bma->idx);
- xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
- bma->idx++;
+ PREV.br_startblock = nullstartblock(da_new);
+ PREV.br_blockcount = temp;
+ xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
+ xfs_iext_next(ifp, &bma->icur);
break;
case 0:
@@ -2015,30 +1926,40 @@ xfs_bmap_add_extent_delay_real(
* PREV @ idx LEFT RIGHT
* inserted at idx + 1
*/
- temp = new->br_startoff - PREV.br_startoff;
- temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
- trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
+ old = PREV;
+
+ /* LEFT is the new middle */
LEFT = *new;
+
+ /* RIGHT is the new right */
RIGHT.br_state = PREV.br_state;
- RIGHT.br_startblock = nullstartblock(
- (int)xfs_bmap_worst_indlen(bma->ip, temp2));
RIGHT.br_startoff = new_endoff;
- RIGHT.br_blockcount = temp2;
- /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
- xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
+ RIGHT.br_blockcount =
+ PREV.br_startoff + PREV.br_blockcount - new_endoff;
+ RIGHT.br_startblock =
+ nullstartblock(xfs_bmap_worst_indlen(bma->ip,
+ RIGHT.br_blockcount));
+
+ /* truncate PREV */
+ PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
+ PREV.br_startblock =
+ nullstartblock(xfs_bmap_worst_indlen(bma->ip,
+ PREV.br_blockcount));
+ xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
+
+ xfs_iext_next(ifp, &bma->icur);
+ xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
+ xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
(*nextents)++;
+
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
- bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
@@ -2053,30 +1974,9 @@ xfs_bmap_add_extent_delay_real(
if (error)
goto done;
}
- temp = xfs_bmap_worst_indlen(bma->ip, temp);
- temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
- diff = (int)(temp + temp2 -
- (startblockval(PREV.br_startblock) -
- (bma->cur ?
- bma->cur->bc_private.b.allocated : 0)));
- if (diff > 0) {
- error = xfs_mod_fdblocks(bma->ip->i_mount,
- -((int64_t)diff), false);
- ASSERT(!error);
- if (error)
- goto done;
- }
-
- ep = xfs_iext_get_ext(ifp, bma->idx);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
- trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
- nullstartblock((int)temp2));
- trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
- bma->idx++;
- da_new = temp + temp2;
+ da_new = startblockval(PREV.br_startblock) +
+ startblockval(RIGHT.br_startblock);
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@@ -2110,19 +2010,17 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
- /* adjust for changes in reserved delayed indirect blocks */
- if (da_old || da_new) {
- temp = da_new;
- if (bma->cur)
- temp += bma->cur->bc_private.b.allocated;
- if (temp < da_old)
- xfs_mod_fdblocks(bma->ip->i_mount,
- (int64_t)(da_old - temp), false);
+ if (bma->cur) {
+ da_new += bma->cur->bc_private.b.allocated;
+ bma->cur->bc_private.b.allocated = 0;
}
- /* clear out the allocated field, done with it now in any case. */
- if (bma->cur)
- bma->cur->bc_private.b.allocated = 0;
+ /* adjust for changes in reserved delayed indirect blocks */
+ if (da_new != da_old) {
+ ASSERT(state == 0 || da_new < da_old);
+ error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
+ false);
+ }
xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
done:
@@ -2142,7 +2040,7 @@ xfs_bmap_add_extent_unwritten_real(
struct xfs_trans *tp,
xfs_inode_t *ip, /* incore inode pointer */
int whichfork,
- xfs_extnum_t *idx, /* extent number to update/insert */
+ struct xfs_iext_cursor *icur,
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
xfs_fsblock_t *first, /* pointer to firstblock variable */
@@ -2150,28 +2048,22 @@ xfs_bmap_add_extent_unwritten_real(
int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
- xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
int error; /* error return value */
int i; /* temp state */
xfs_ifork_t *ifp; /* inode fork pointer */
xfs_fileoff_t new_endoff; /* end offset of new entry */
- xfs_exntst_t newext; /* new extent state */
- xfs_exntst_t oldext; /* old extent state */
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
- int state = 0;/* state bits, accessed thru macros */
+ int state = xfs_bmap_fork_to_state(whichfork);
struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmbt_irec old;
*logflagsp = 0;
cur = *curp;
ifp = XFS_IFORK_PTR(ip, whichfork);
- if (whichfork == XFS_COW_FORK)
- state |= BMAP_COWFORK;
- ASSERT(*idx >= 0);
- ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(!isnullstartblock(new->br_startblock));
XFS_STATS_INC(mp, xs_add_exlist);
@@ -2184,12 +2076,8 @@ xfs_bmap_add_extent_unwritten_real(
* Set up a bunch of variables to make the tests simpler.
*/
error = 0;
- ep = xfs_iext_get_ext(ifp, *idx);
- xfs_bmbt_get_all(ep, &PREV);
- newext = new->br_state;
- oldext = (newext == XFS_EXT_UNWRITTEN) ?
- XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
- ASSERT(PREV.br_state == oldext);
+ xfs_iext_get_extent(ifp, icur, &PREV);
+ ASSERT(new->br_state != PREV.br_state);
new_endoff = new->br_startoff + new->br_blockcount;
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
@@ -2207,10 +2095,8 @@ xfs_bmap_add_extent_unwritten_real(
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (*idx > 0) {
+ if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
state |= BMAP_LEFT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
-
if (isnullstartblock(LEFT.br_startblock))
state |= BMAP_LEFT_DELAY;
}
@@ -2218,7 +2104,7 @@ xfs_bmap_add_extent_unwritten_real(
if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
- LEFT.br_state == newext &&
+ LEFT.br_state == new->br_state &&
LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
state |= BMAP_LEFT_CONTIG;
@@ -2227,9 +2113,8 @@ xfs_bmap_add_extent_unwritten_real(
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (*idx < xfs_iext_count(ifp) - 1) {
+ if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
state |= BMAP_RIGHT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
if (isnullstartblock(RIGHT.br_startblock))
state |= BMAP_RIGHT_DELAY;
}
@@ -2237,7 +2122,7 @@ xfs_bmap_add_extent_unwritten_real(
if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
new_endoff == RIGHT.br_startoff &&
new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
- newext == RIGHT.br_state &&
+ new->br_state == RIGHT.br_state &&
new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
BMAP_RIGHT_FILLING)) !=
@@ -2258,24 +2143,20 @@ xfs_bmap_add_extent_unwritten_real(
* Setting all of a previous oldext extent to newext.
* The left and right neighbors are both contiguous with new.
*/
- --*idx;
-
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
- LEFT.br_blockcount + PREV.br_blockcount +
- RIGHT.br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
- xfs_iext_remove(ip, *idx + 1, 2, state);
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &LEFT);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
- RIGHT.br_startblock,
- RIGHT.br_blockcount, &i)))
+ error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_delete(cur, &i)))
@@ -2290,10 +2171,8 @@ xfs_bmap_add_extent_unwritten_real(
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
- LEFT.br_startblock,
- LEFT.br_blockcount + PREV.br_blockcount +
- RIGHT.br_blockcount, LEFT.br_state)))
+ error = xfs_bmbt_update(cur, &LEFT);
+ if (error)
goto done;
}
break;
@@ -2303,23 +2182,19 @@ xfs_bmap_add_extent_unwritten_real(
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- --*idx;
+ LEFT.br_blockcount += PREV.br_blockcount;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
- LEFT.br_blockcount + PREV.br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
-
- xfs_iext_remove(ip, *idx + 1, 1, state);
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &LEFT);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
- PREV.br_startblock, PREV.br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_delete(cur, &i)))
@@ -2328,10 +2203,8 @@ xfs_bmap_add_extent_unwritten_real(
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
- LEFT.br_startblock,
- LEFT.br_blockcount + PREV.br_blockcount,
- LEFT.br_state)))
+ error = xfs_bmbt_update(cur, &LEFT);
+ if (error)
goto done;
}
break;
@@ -2341,21 +2214,22 @@ xfs_bmap_add_extent_unwritten_real(
* Setting all of a previous oldext extent to newext.
* The right neighbor is contiguous, the left is not.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep,
- PREV.br_blockcount + RIGHT.br_blockcount);
- xfs_bmbt_set_state(ep, newext);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
- xfs_iext_remove(ip, *idx + 1, 1, state);
+ PREV.br_blockcount += RIGHT.br_blockcount;
+ PREV.br_state = new->br_state;
+
+ xfs_iext_next(ifp, icur);
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &PREV);
+
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
- RIGHT.br_startblock,
- RIGHT.br_blockcount, &i)))
+ error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
if ((error = xfs_btree_delete(cur, &i)))
@@ -2364,10 +2238,8 @@ xfs_bmap_add_extent_unwritten_real(
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
- new->br_startblock,
- new->br_blockcount + RIGHT.br_blockcount,
- newext)))
+ error = xfs_bmbt_update(cur, &PREV);
+ if (error)
goto done;
}
break;
@@ -2378,22 +2250,19 @@ xfs_bmap_add_extent_unwritten_real(
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_state(ep, newext);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ PREV.br_state = new->br_state;
+ xfs_iext_update_extent(ip, state, icur, &PREV);
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- newext)))
+ error = xfs_bmbt_update(cur, &PREV);
+ if (error)
goto done;
}
break;
@@ -2403,43 +2272,32 @@ xfs_bmap_add_extent_unwritten_real(
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is contiguous.
*/
- trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
- LEFT.br_blockcount + new->br_blockcount);
- xfs_bmbt_set_startoff(ep,
- PREV.br_startoff + new->br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
-
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_startblock(ep,
- new->br_startblock + new->br_blockcount);
- xfs_bmbt_set_blockcount(ep,
- PREV.br_blockcount - new->br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
-
- --*idx;
+ LEFT.br_blockcount += new->br_blockcount;
+
+ old = PREV;
+ PREV.br_startoff += new->br_blockcount;
+ PREV.br_startblock += new->br_blockcount;
+ PREV.br_blockcount -= new->br_blockcount;
+
+ xfs_iext_update_extent(ip, state, icur, &PREV);
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &LEFT);
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
- PREV.br_startblock, PREV.br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur,
- PREV.br_startoff + new->br_blockcount,
- PREV.br_startblock + new->br_blockcount,
- PREV.br_blockcount - new->br_blockcount,
- oldext)))
+ error = xfs_bmbt_update(cur, &PREV);
+ if (error)
goto done;
- if ((error = xfs_btree_decrement(cur, 0, &i)))
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
goto done;
- error = xfs_bmbt_update(cur, LEFT.br_startoff,
- LEFT.br_startblock,
- LEFT.br_blockcount + new->br_blockcount,
- LEFT.br_state);
+ error = xfs_bmbt_update(cur, &LEFT);
if (error)
goto done;
}
@@ -2450,32 +2308,25 @@ xfs_bmap_add_extent_unwritten_real(
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is not contiguous.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
- xfs_bmbt_set_startoff(ep, new_endoff);
- xfs_bmbt_set_blockcount(ep,
- PREV.br_blockcount - new->br_blockcount);
- xfs_bmbt_set_startblock(ep,
- new->br_startblock + new->br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
-
- xfs_iext_insert(ip, *idx, 1, new, state);
+ old = PREV;
+ PREV.br_startoff += new->br_blockcount;
+ PREV.br_startblock += new->br_blockcount;
+ PREV.br_blockcount -= new->br_blockcount;
+
+ xfs_iext_update_extent(ip, state, icur, &PREV);
+ xfs_iext_insert(ip, icur, new, state);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
- PREV.br_startblock, PREV.br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur,
- PREV.br_startoff + new->br_blockcount,
- PREV.br_startblock + new->br_blockcount,
- PREV.br_blockcount - new->br_blockcount,
- oldext)))
+ error = xfs_bmbt_update(cur, &PREV);
+ if (error)
goto done;
cur->bc_rec.b = *new;
if ((error = xfs_btree_insert(cur, &i)))
@@ -2489,39 +2340,33 @@ xfs_bmap_add_extent_unwritten_real(
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is contiguous with the new allocation.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep,
- PREV.br_blockcount - new->br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ old = PREV;
+ PREV.br_blockcount -= new->br_blockcount;
- ++*idx;
+ RIGHT.br_startoff = new->br_startoff;
+ RIGHT.br_startblock = new->br_startblock;
+ RIGHT.br_blockcount += new->br_blockcount;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
- new->br_startoff, new->br_startblock,
- new->br_blockcount + RIGHT.br_blockcount, newext);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ xfs_iext_update_extent(ip, state, icur, &PREV);
+ xfs_iext_next(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &RIGHT);
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
rval = 0;
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
- PREV.br_startblock,
- PREV.br_blockcount, &i)))
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
- PREV.br_startblock,
- PREV.br_blockcount - new->br_blockcount,
- oldext)))
+ error = xfs_bmbt_update(cur, &PREV);
+ if (error)
goto done;
- if ((error = xfs_btree_increment(cur, 0, &i)))
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
goto done;
- if ((error = xfs_bmbt_update(cur, new->br_startoff,
- new->br_startblock,
- new->br_blockcount + RIGHT.br_blockcount,
- newext)))
+ error = xfs_bmbt_update(cur, &RIGHT);
+ if (error)
goto done;
}
break;
@@ -2531,13 +2376,12 @@ xfs_bmap_add_extent_unwritten_real(
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is not contiguous.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep,
- PREV.br_blockcount - new->br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ old = PREV;
+ PREV.br_blockcount -= new->br_blockcount;
- ++*idx;
- xfs_iext_insert(ip, *idx, 1, new, state);
+ xfs_iext_update_extent(ip, state, icur, &PREV);
+ xfs_iext_next(ifp, icur);
+ xfs_iext_insert(ip, icur, new, state);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
@@ -2545,22 +2389,17 @@ xfs_bmap_add_extent_unwritten_real(
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
- PREV.br_startblock, PREV.br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
- PREV.br_startblock,
- PREV.br_blockcount - new->br_blockcount,
- oldext)))
+ error = xfs_bmbt_update(cur, &PREV);
+ if (error)
goto done;
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
- cur->bc_rec.b.br_state = XFS_EXT_NORM;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
@@ -2573,20 +2412,20 @@ xfs_bmap_add_extent_unwritten_real(
* newext. Contiguity is impossible here.
* One extent becomes three extents.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep,
- new->br_startoff - PREV.br_startoff);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ old = PREV;
+ PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
r[0] = *new;
r[1].br_startoff = new_endoff;
r[1].br_blockcount =
- PREV.br_startoff + PREV.br_blockcount - new_endoff;
+ old.br_startoff + old.br_blockcount - new_endoff;
r[1].br_startblock = new->br_startblock + new->br_blockcount;
- r[1].br_state = oldext;
+ r[1].br_state = PREV.br_state;
- ++*idx;
- xfs_iext_insert(ip, *idx, 2, &r[0], state);
+ xfs_iext_update_extent(ip, state, icur, &PREV);
+ xfs_iext_next(ifp, icur);
+ xfs_iext_insert(ip, icur, &r[1], state);
+ xfs_iext_insert(ip, icur, &r[0], state);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
@@ -2594,20 +2433,16 @@ xfs_bmap_add_extent_unwritten_real(
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
rval = XFS_ILOG_CORE;
- if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
- PREV.br_startblock, PREV.br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
/* new right extent - oldext */
- if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
- r[1].br_startblock, r[1].br_blockcount,
- r[1].br_state)))
+ error = xfs_bmbt_update(cur, &r[1]);
+ if (error)
goto done;
/* new left extent - oldext */
cur->bc_rec.b = PREV;
- cur->bc_rec.b.br_blockcount =
- new->br_startoff - PREV.br_startoff;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
@@ -2616,13 +2451,11 @@ xfs_bmap_add_extent_unwritten_real(
* we are about to insert as we can't trust it after
* the previous insert.
*/
- if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
- new->br_startblock, new->br_blockcount,
- &i)))
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
+ if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
/* new middle extent - newext */
- cur->bc_rec.b.br_state = new->br_state;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
@@ -2681,7 +2514,7 @@ STATIC void
xfs_bmap_add_extent_hole_delay(
xfs_inode_t *ip, /* incore inode pointer */
int whichfork,
- xfs_extnum_t *idx, /* extent number to update/insert */
+ struct xfs_iext_cursor *icur,
xfs_bmbt_irec_t *new) /* new data to add to file extents */
{
xfs_ifork_t *ifp; /* inode fork pointer */
@@ -2689,22 +2522,17 @@ xfs_bmap_add_extent_hole_delay(
xfs_filblks_t newlen=0; /* new indirect size */
xfs_filblks_t oldlen=0; /* old indirect size */
xfs_bmbt_irec_t right; /* right neighbor extent entry */
- int state; /* state bits, accessed thru macros */
- xfs_filblks_t temp=0; /* temp for indirect calculations */
+ int state = xfs_bmap_fork_to_state(whichfork);
+ xfs_filblks_t temp; /* temp for indirect calculations */
ifp = XFS_IFORK_PTR(ip, whichfork);
- state = 0;
- if (whichfork == XFS_COW_FORK)
- state |= BMAP_COWFORK;
ASSERT(isnullstartblock(new->br_startblock));
/*
* Check and set flags if this segment has a left neighbor
*/
- if (*idx > 0) {
+ if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
state |= BMAP_LEFT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
-
if (isnullstartblock(left.br_startblock))
state |= BMAP_LEFT_DELAY;
}
@@ -2713,10 +2541,8 @@ xfs_bmap_add_extent_hole_delay(
* Check and set flags if the current (right) segment exists.
* If it doesn't exist, we're converting the hole at end-of-file.
*/
- if (*idx < xfs_iext_count(ifp)) {
+ if (xfs_iext_get_extent(ifp, icur, &right)) {
state |= BMAP_RIGHT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
-
if (isnullstartblock(right.br_startblock))
state |= BMAP_RIGHT_DELAY;
}
@@ -2748,22 +2574,20 @@ xfs_bmap_add_extent_hole_delay(
* on the left and on the right.
* Merge all three into a single extent record.
*/
- --*idx;
temp = left.br_blockcount + new->br_blockcount +
right.br_blockcount;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock) +
startblockval(right.br_startblock);
newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
oldlen);
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
- nullstartblock((int)newlen));
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ left.br_startblock = nullstartblock(newlen);
+ left.br_blockcount = temp;
- xfs_iext_remove(ip, *idx + 1, 1, state);
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &left);
break;
case BMAP_LEFT_CONTIG:
@@ -2772,18 +2596,17 @@ xfs_bmap_add_extent_hole_delay(
* on the left.
* Merge the new allocation with the left neighbor.
*/
- --*idx;
temp = left.br_blockcount + new->br_blockcount;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock);
newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
oldlen);
- xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
- nullstartblock((int)newlen));
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ left.br_blockcount = temp;
+ left.br_startblock = nullstartblock(newlen);
+
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &left);
break;
case BMAP_RIGHT_CONTIG:
@@ -2792,16 +2615,15 @@ xfs_bmap_add_extent_hole_delay(
* on the right.
* Merge the new allocation with the right neighbor.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
temp = new->br_blockcount + right.br_blockcount;
oldlen = startblockval(new->br_startblock) +
startblockval(right.br_startblock);
newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
oldlen);
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
- new->br_startoff,
- nullstartblock((int)newlen), temp, right.br_state);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ right.br_startoff = new->br_startoff;
+ right.br_startblock = nullstartblock(newlen);
+ right.br_blockcount = temp;
+ xfs_iext_update_extent(ip, state, icur, &right);
break;
case 0:
@@ -2811,7 +2633,7 @@ xfs_bmap_add_extent_hole_delay(
* Insert a new entry.
*/
oldlen = newlen = 0;
- xfs_iext_insert(ip, *idx, 1, new, state);
+ xfs_iext_insert(ip, icur, new, state);
break;
}
if (oldlen != newlen) {
@@ -2832,7 +2654,7 @@ xfs_bmap_add_extent_hole_real(
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
- xfs_extnum_t *idx,
+ struct xfs_iext_cursor *icur,
struct xfs_btree_cur **curp,
struct xfs_bmbt_irec *new,
xfs_fsblock_t *first,
@@ -2847,27 +2669,19 @@ xfs_bmap_add_extent_hole_real(
xfs_bmbt_irec_t left; /* left neighbor extent entry */
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int rval=0; /* return value (logging flags) */
- int state; /* state bits, accessed thru macros */
+ int state = xfs_bmap_fork_to_state(whichfork);
+ struct xfs_bmbt_irec old;
- ASSERT(*idx >= 0);
- ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
- state = 0;
- if (whichfork == XFS_ATTR_FORK)
- state |= BMAP_ATTRFORK;
- if (whichfork == XFS_COW_FORK)
- state |= BMAP_COWFORK;
-
/*
* Check and set flags if this segment has a left neighbor.
*/
- if (*idx > 0) {
+ if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
state |= BMAP_LEFT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
if (isnullstartblock(left.br_startblock))
state |= BMAP_LEFT_DELAY;
}
@@ -2876,9 +2690,8 @@ xfs_bmap_add_extent_hole_real(
* Check and set flags if this segment has a current value.
* Not true if we're inserting into the "hole" at eof.
*/
- if (*idx < xfs_iext_count(ifp)) {
+ if (xfs_iext_get_extent(ifp, icur, &right)) {
state |= BMAP_RIGHT_VALID;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
if (isnullstartblock(right.br_startblock))
state |= BMAP_RIGHT_DELAY;
}
@@ -2915,14 +2728,11 @@ xfs_bmap_add_extent_hole_real(
* left and on the right.
* Merge all three into a single extent record.
*/
- --*idx;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
- left.br_blockcount + new->br_blockcount +
- right.br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ left.br_blockcount += new->br_blockcount + right.br_blockcount;
- xfs_iext_remove(ip, *idx + 1, 1, state);
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &left);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@@ -2930,9 +2740,7 @@ xfs_bmap_add_extent_hole_real(
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
- right.br_startblock, right.br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(cur, &right, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
@@ -2944,12 +2752,7 @@ xfs_bmap_add_extent_hole_real(
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(cur, left.br_startoff,
- left.br_startblock,
- left.br_blockcount +
- new->br_blockcount +
- right.br_blockcount,
- left.br_state);
+ error = xfs_bmbt_update(cur, &left);
if (error)
goto done;
}
@@ -2961,27 +2764,21 @@ xfs_bmap_add_extent_hole_real(
* on the left.
* Merge the new allocation with the left neighbor.
*/
- --*idx;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
- left.br_blockcount + new->br_blockcount);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ old = left;
+ left.br_blockcount += new->br_blockcount;
+
+ xfs_iext_prev(ifp, icur);
+ xfs_iext_update_extent(ip, state, icur, &left);
if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
- error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
- left.br_startblock, left.br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(cur, left.br_startoff,
- left.br_startblock,
- left.br_blockcount +
- new->br_blockcount,
- left.br_state);
+ error = xfs_bmbt_update(cur, &left);
if (error)
goto done;
}
@@ -2993,29 +2790,22 @@ xfs_bmap_add_extent_hole_real(
* on the right.
* Merge the new allocation with the right neighbor.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
- new->br_startoff, new->br_startblock,
- new->br_blockcount + right.br_blockcount,
- right.br_state);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ old = right;
+
+ right.br_startoff = new->br_startoff;
+ right.br_startblock = new->br_startblock;
+ right.br_blockcount += new->br_blockcount;
+ xfs_iext_update_extent(ip, state, icur, &right);
if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
- error = xfs_bmbt_lookup_eq(cur,
- right.br_startoff,
- right.br_startblock,
- right.br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- error = xfs_bmbt_update(cur, new->br_startoff,
- new->br_startblock,
- new->br_blockcount +
- right.br_blockcount,
- right.br_state);
+ error = xfs_bmbt_update(cur, &right);
if (error)
goto done;
}
@@ -3027,21 +2817,17 @@ xfs_bmap_add_extent_hole_real(
* real allocation.
* Insert a new entry.
*/
- xfs_iext_insert(ip, *idx, 1, new, state);
+ xfs_iext_insert(ip, icur, new, state);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur == NULL) {
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
- error = xfs_bmbt_lookup_eq(cur,
- new->br_startoff,
- new->br_startblock,
- new->br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
- cur->bc_rec.b.br_state = new->br_state;
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
@@ -3981,7 +3767,7 @@ xfs_bmapi_read(
struct xfs_bmbt_irec got;
xfs_fileoff_t obno;
xfs_fileoff_t end;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
int error;
bool eof = false;
int n = 0;
@@ -4023,7 +3809,7 @@ xfs_bmapi_read(
return error;
}
- if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got))
+ if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
eof = true;
end = bno + len;
obno = bno;
@@ -4055,7 +3841,7 @@ xfs_bmapi_read(
break;
/* Else go on to the next record. */
- if (!xfs_iext_get_extent(ifp, ++idx, &got))
+ if (!xfs_iext_next_extent(ifp, &icur, &got))
eof = true;
}
*nmap = n;
@@ -4083,7 +3869,7 @@ xfs_bmapi_reserve_delalloc(
xfs_filblks_t len,
xfs_filblks_t prealloc,
struct xfs_bmbt_irec *got,
- xfs_extnum_t *lastx,
+ struct xfs_iext_cursor *icur,
int eof)
{
struct xfs_mount *mp = ip->i_mount;
@@ -4113,7 +3899,7 @@ xfs_bmapi_reserve_delalloc(
if (extsz) {
struct xfs_bmbt_irec prev;
- if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
+ if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
prev.br_startoff = NULLFILEOFF;
error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
@@ -4162,7 +3948,7 @@ xfs_bmapi_reserve_delalloc(
got->br_blockcount = alen;
got->br_state = XFS_EXT_NORM;
- xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
+ xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
/*
* Tag the inode if blocks were preallocated. Note that COW fork
@@ -4207,10 +3993,7 @@ xfs_bmapi_allocate(
if (bma->wasdel) {
bma->length = (xfs_extlen_t)bma->got.br_blockcount;
bma->offset = bma->got.br_startoff;
- if (bma->idx) {
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
- &bma->prev);
- }
+ xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
} else {
bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
if (!bma->eof)
@@ -4295,7 +4078,7 @@ xfs_bmapi_allocate(
error = xfs_bmap_add_extent_delay_real(bma, whichfork);
else
error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
- whichfork, &bma->idx, &bma->cur, &bma->got,
+ whichfork, &bma->icur, &bma->cur, &bma->got,
bma->firstblock, bma->dfops, &bma->logflags);
bma->logflags |= tmp_logflags;
@@ -4307,7 +4090,7 @@ xfs_bmapi_allocate(
* or xfs_bmap_add_extent_hole_real might have merged it into one of
* the neighbouring ones.
*/
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
+ xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
ASSERT(bma->got.br_startoff <= bma->offset);
ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
@@ -4365,8 +4148,8 @@ xfs_bmapi_convert_unwritten(
}
error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
- &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
- &tmp_logflags);
+ &bma->icur, &bma->cur, mval, bma->firstblock,
+ bma->dfops, &tmp_logflags);
/*
* Log the inode core unconditionally in the unwritten extent conversion
* path because the conversion might not have done so (e.g., if the
@@ -4388,7 +4171,7 @@ xfs_bmapi_convert_unwritten(
* xfs_bmap_add_extent_unwritten_real might have merged it into one
* of the neighbouring ones.
*/
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
+ xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
/*
* We may have combined previously unwritten space with written space,
@@ -4507,9 +4290,9 @@ xfs_bmapi_write(
end = bno + len;
obno = bno;
- if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got))
+ if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
eof = true;
- if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev))
+ if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
bma.prev.br_startoff = NULLFILEOFF;
bma.tp = tp;
bma.ip = ip;
@@ -4551,7 +4334,8 @@ xfs_bmapi_write(
* First, deal with the hole before the allocated space
* that we found, if any.
*/
- if (need_alloc || wasdelay) {
+ if ((need_alloc || wasdelay) &&
+ !(flags & XFS_BMAPI_CONVERT_ONLY)) {
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
@@ -4614,7 +4398,7 @@ xfs_bmapi_write(
/* Else go on to the next record. */
bma.prev = bma.got;
- if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got))
+ if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
eof = true;
}
*nmap = n;
@@ -4687,7 +4471,7 @@ xfs_bmapi_remap(
struct xfs_btree_cur *cur = NULL;
xfs_fsblock_t firstblock = NULLFSBLOCK;
struct xfs_bmbt_irec got;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
int logflags = 0, error;
ASSERT(len > 0);
@@ -4711,7 +4495,7 @@ xfs_bmapi_remap(
return error;
}
- if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) {
+ if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
/* make sure we only reflink into a hole. */
ASSERT(got.br_startoff > bno);
ASSERT(got.br_startoff - bno >= len);
@@ -4732,8 +4516,8 @@ xfs_bmapi_remap(
got.br_blockcount = len;
got.br_state = XFS_EXT_NORM;
- error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur,
- &got, &firstblock, dfops, &logflags);
+ error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &icur,
+ &cur, &got, &firstblock, dfops, &logflags);
if (error)
goto error0;
@@ -4849,7 +4633,7 @@ int
xfs_bmap_del_extent_delay(
struct xfs_inode *ip,
int whichfork,
- xfs_extnum_t *idx,
+ struct xfs_iext_cursor *icur,
struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del)
{
@@ -4859,7 +4643,8 @@ xfs_bmap_del_extent_delay(
int64_t da_old, da_new, da_diff = 0;
xfs_fileoff_t del_endoff, got_endoff;
xfs_filblks_t got_indlen, new_indlen, stolen;
- int error = 0, state = 0;
+ int state = xfs_bmap_fork_to_state(whichfork);
+ int error = 0;
bool isrt;
XFS_STATS_INC(mp, xs_del_exlist);
@@ -4870,8 +4655,6 @@ xfs_bmap_del_extent_delay(
da_old = startblockval(got->br_startblock);
da_new = 0;
- ASSERT(*idx >= 0);
- ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(del->br_blockcount > 0);
ASSERT(got->br_startoff <= del->br_startoff);
ASSERT(got_endoff >= del_endoff);
@@ -4895,46 +4678,39 @@ xfs_bmap_del_extent_delay(
return error;
ip->i_delayed_blks -= del->br_blockcount;
- if (whichfork == XFS_COW_FORK)
- state |= BMAP_COWFORK;
-
if (got->br_startoff == del->br_startoff)
- state |= BMAP_LEFT_CONTIG;
+ state |= BMAP_LEFT_FILLING;
if (got_endoff == del_endoff)
- state |= BMAP_RIGHT_CONTIG;
+ state |= BMAP_RIGHT_FILLING;
- switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
- case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Matches the whole extent. Delete the entry.
*/
- xfs_iext_remove(ip, *idx, 1, state);
- --*idx;
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
break;
- case BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_FILLING:
/*
* Deleting the first part of the extent.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
got->br_startoff = del_endoff;
got->br_blockcount -= del->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
got->br_blockcount), da_old);
got->br_startblock = nullstartblock((int)da_new);
- xfs_iext_update_extent(ifp, *idx, got);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ xfs_iext_update_extent(ip, state, icur, got);
break;
- case BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING:
/*
* Deleting the last part of the extent.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
got->br_blockcount = got->br_blockcount - del->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
got->br_blockcount), da_old);
got->br_startblock = nullstartblock((int)da_new);
- xfs_iext_update_extent(ifp, *idx, got);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ xfs_iext_update_extent(ip, state, icur, got);
break;
case 0:
/*
@@ -4946,8 +4722,6 @@ xfs_bmap_del_extent_delay(
* Warn if either of the new indlen reservations is zero as this
* can lead to delalloc problems.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
-
got->br_blockcount = del->br_startoff - got->br_startoff;
got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
@@ -4959,15 +4733,14 @@ xfs_bmap_del_extent_delay(
del->br_blockcount);
got->br_startblock = nullstartblock((int)got_indlen);
- xfs_iext_update_extent(ifp, *idx, got);
- trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
new.br_startoff = del_endoff;
new.br_state = got->br_state;
new.br_startblock = nullstartblock((int)new_indlen);
- ++*idx;
- xfs_iext_insert(ip, *idx, 1, &new, state);
+ xfs_iext_update_extent(ip, state, icur, got);
+ xfs_iext_next(ifp, icur);
+ xfs_iext_insert(ip, icur, &new, state);
da_new = got_indlen + new_indlen - stolen;
del->br_blockcount -= stolen;
@@ -4986,7 +4759,7 @@ xfs_bmap_del_extent_delay(
void
xfs_bmap_del_extent_cow(
struct xfs_inode *ip,
- xfs_extnum_t *idx,
+ struct xfs_iext_cursor *icur,
struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del)
{
@@ -5001,75 +4774,67 @@ xfs_bmap_del_extent_cow(
del_endoff = del->br_startoff + del->br_blockcount;
got_endoff = got->br_startoff + got->br_blockcount;
- ASSERT(*idx >= 0);
- ASSERT(*idx <= xfs_iext_count(ifp));
ASSERT(del->br_blockcount > 0);
ASSERT(got->br_startoff <= del->br_startoff);
ASSERT(got_endoff >= del_endoff);
ASSERT(!isnullstartblock(got->br_startblock));
if (got->br_startoff == del->br_startoff)
- state |= BMAP_LEFT_CONTIG;
+ state |= BMAP_LEFT_FILLING;
if (got_endoff == del_endoff)
- state |= BMAP_RIGHT_CONTIG;
+ state |= BMAP_RIGHT_FILLING;
- switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
- case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Matches the whole extent. Delete the entry.
*/
- xfs_iext_remove(ip, *idx, 1, state);
- --*idx;
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
break;
- case BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_FILLING:
/*
* Deleting the first part of the extent.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
got->br_startoff = del_endoff;
got->br_blockcount -= del->br_blockcount;
got->br_startblock = del->br_startblock + del->br_blockcount;
- xfs_iext_update_extent(ifp, *idx, got);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ xfs_iext_update_extent(ip, state, icur, got);
break;
- case BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING:
/*
* Deleting the last part of the extent.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
got->br_blockcount -= del->br_blockcount;
- xfs_iext_update_extent(ifp, *idx, got);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ xfs_iext_update_extent(ip, state, icur, got);
break;
case 0:
/*
* Deleting the middle of the extent.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
got->br_blockcount = del->br_startoff - got->br_startoff;
- xfs_iext_update_extent(ifp, *idx, got);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
new.br_startoff = del_endoff;
new.br_blockcount = got_endoff - del_endoff;
new.br_state = got->br_state;
new.br_startblock = del->br_startblock + del->br_blockcount;
- ++*idx;
- xfs_iext_insert(ip, *idx, 1, &new, state);
+ xfs_iext_update_extent(ip, state, icur, got);
+ xfs_iext_next(ifp, icur);
+ xfs_iext_insert(ip, icur, &new, state);
break;
}
}
/*
* Called by xfs_bmapi to update file extent records and the btree
- * after removing space (or undoing a delayed allocation).
+ * after removing space.
*/
STATIC int /* error */
-xfs_bmap_del_extent(
+xfs_bmap_del_extent_real(
xfs_inode_t *ip, /* incore inode pointer */
xfs_trans_t *tp, /* current transaction pointer */
- xfs_extnum_t *idx, /* extent number to update/delete */
+ struct xfs_iext_cursor *icur,
struct xfs_defer_ops *dfops, /* list of extents to be freed */
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
@@ -5077,16 +4842,12 @@ xfs_bmap_del_extent(
int whichfork, /* data or attr fork */
int bflags) /* bmapi flags */
{
- xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
- xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
xfs_fsblock_t del_endblock=0; /* first block past del */
xfs_fileoff_t del_endoff; /* first offset past del */
- int delay; /* current block is delayed allocated */
int do_fx; /* free extent at end of routine */
- xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
int error; /* error return value */
- int flags; /* inode logging flags */
- xfs_bmbt_irec_t got; /* current extent entry */
+ int flags = 0;/* inode logging flags */
+ struct xfs_bmbt_irec got; /* current extent entry */
xfs_fileoff_t got_endoff; /* first offset past got */
int i; /* temp state */
xfs_ifork_t *ifp; /* inode fork pointer */
@@ -5095,103 +4856,81 @@ xfs_bmap_del_extent(
xfs_bmbt_irec_t new; /* new record to be inserted */
/* REFERENCED */
uint qfield; /* quota field to update */
- xfs_filblks_t temp; /* for indirect length calculations */
- xfs_filblks_t temp2; /* for indirect length calculations */
- int state = 0;
+ int state = xfs_bmap_fork_to_state(whichfork);
+ struct xfs_bmbt_irec old;
mp = ip->i_mount;
XFS_STATS_INC(mp, xs_del_exlist);
- if (whichfork == XFS_ATTR_FORK)
- state |= BMAP_ATTRFORK;
- else if (whichfork == XFS_COW_FORK)
- state |= BMAP_COWFORK;
-
ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
ASSERT(del->br_blockcount > 0);
- ep = xfs_iext_get_ext(ifp, *idx);
- xfs_bmbt_get_all(ep, &got);
+ xfs_iext_get_extent(ifp, icur, &got);
ASSERT(got.br_startoff <= del->br_startoff);
del_endoff = del->br_startoff + del->br_blockcount;
got_endoff = got.br_startoff + got.br_blockcount;
ASSERT(got_endoff >= del_endoff);
- delay = isnullstartblock(got.br_startblock);
- ASSERT(isnullstartblock(del->br_startblock) == delay);
- flags = 0;
+ ASSERT(!isnullstartblock(got.br_startblock));
qfield = 0;
error = 0;
+
/*
- * If deleting a real allocation, must free up the disk space.
+ * If it's the case where the directory code is running with no block
+ * reservation, and the deleted block is in the middle of its extent,
+ * and the resulting insert of an extent would cause transformation to
+ * btree format, then reject it. The calling code will then swap blocks
+ * around instead. We have to do this now, rather than waiting for the
+ * conversion to btree format, since the transaction will be dirty then.
*/
- if (!delay) {
- flags = XFS_ILOG_CORE;
- /*
- * Realtime allocation. Free it and record di_nblocks update.
- */
- if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
- xfs_fsblock_t bno;
- xfs_filblks_t len;
-
- ASSERT(do_mod(del->br_blockcount,
- mp->m_sb.sb_rextsize) == 0);
- ASSERT(do_mod(del->br_startblock,
- mp->m_sb.sb_rextsize) == 0);
- bno = del->br_startblock;
- len = del->br_blockcount;
- do_div(bno, mp->m_sb.sb_rextsize);
- do_div(len, mp->m_sb.sb_rextsize);
- error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
- if (error)
- goto done;
- do_fx = 0;
- nblks = len * mp->m_sb.sb_rextsize;
- qfield = XFS_TRANS_DQ_RTBCOUNT;
- }
- /*
- * Ordinary allocation.
- */
- else {
- do_fx = 1;
- nblks = del->br_blockcount;
- qfield = XFS_TRANS_DQ_BCOUNT;
- }
- /*
- * Set up del_endblock and cur for later.
- */
- del_endblock = del->br_startblock + del->br_blockcount;
- if (cur) {
- if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
- got.br_startblock, got.br_blockcount,
- &i)))
- goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- }
- da_old = da_new = 0;
- } else {
- da_old = startblockval(got.br_startblock);
- da_new = 0;
- nblks = 0;
+ if (tp->t_blk_res == 0 &&
+ XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(ip, whichfork) >=
+ XFS_IFORK_MAXEXT(ip, whichfork) &&
+ del->br_startoff > got.br_startoff && del_endoff < got_endoff)
+ return -ENOSPC;
+
+ flags = XFS_ILOG_CORE;
+ if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
+ xfs_fsblock_t bno;
+ xfs_filblks_t len;
+
+ ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
+ ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
+ bno = del->br_startblock;
+ len = del->br_blockcount;
+ do_div(bno, mp->m_sb.sb_rextsize);
+ do_div(len, mp->m_sb.sb_rextsize);
+ error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+ if (error)
+ goto done;
do_fx = 0;
+ nblks = len * mp->m_sb.sb_rextsize;
+ qfield = XFS_TRANS_DQ_RTBCOUNT;
+ } else {
+ do_fx = 1;
+ nblks = del->br_blockcount;
+ qfield = XFS_TRANS_DQ_BCOUNT;
}
- /*
- * Set flag value to use in switch statement.
- * Left-contig is 2, right-contig is 1.
- */
- switch (((got.br_startoff == del->br_startoff) << 1) |
- (got_endoff == del_endoff)) {
- case 3:
+ del_endblock = del->br_startblock + del->br_blockcount;
+ if (cur) {
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ }
+
+ if (got.br_startoff == del->br_startoff)
+ state |= BMAP_LEFT_FILLING;
+ if (got_endoff == del_endoff)
+ state |= BMAP_RIGHT_FILLING;
+
+ switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Matches the whole extent. Delete the entry.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_iext_remove(ip, *idx, 1,
- whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
- --*idx;
- if (delay)
- break;
-
+ xfs_iext_remove(ip, icur, state);
+ xfs_iext_prev(ifp, icur);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
flags |= XFS_ILOG_CORE;
@@ -5203,168 +4942,106 @@ xfs_bmap_del_extent(
goto done;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
break;
-
- case 2:
+ case BMAP_LEFT_FILLING:
/*
* Deleting the first part of the extent.
*/
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_startoff(ep, del_endoff);
- temp = got.br_blockcount - del->br_blockcount;
- xfs_bmbt_set_blockcount(ep, temp);
- if (delay) {
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- da_old);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
- da_new = temp;
- break;
- }
- xfs_bmbt_set_startblock(ep, del_endblock);
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ got.br_startoff = del_endoff;
+ got.br_startblock = del_endblock;
+ got.br_blockcount -= del->br_blockcount;
+ xfs_iext_update_extent(ip, state, icur, &got);
if (!cur) {
flags |= xfs_ilog_fext(whichfork);
break;
}
- if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
- got.br_blockcount - del->br_blockcount,
- got.br_state)))
+ error = xfs_bmbt_update(cur, &got);
+ if (error)
goto done;
break;
-
- case 1:
+ case BMAP_RIGHT_FILLING:
/*
* Deleting the last part of the extent.
*/
- temp = got.br_blockcount - del->br_blockcount;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
- if (delay) {
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- da_old);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
- da_new = temp;
- break;
- }
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ got.br_blockcount -= del->br_blockcount;
+ xfs_iext_update_extent(ip, state, icur, &got);
if (!cur) {
flags |= xfs_ilog_fext(whichfork);
break;
}
- if ((error = xfs_bmbt_update(cur, got.br_startoff,
- got.br_startblock,
- got.br_blockcount - del->br_blockcount,
- got.br_state)))
+ error = xfs_bmbt_update(cur, &got);
+ if (error)
goto done;
break;
-
case 0:
/*
* Deleting the middle of the extent.
*/
- temp = del->br_startoff - got.br_startoff;
- trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
+ old = got;
+
+ got.br_blockcount = del->br_startoff - got.br_startoff;
+ xfs_iext_update_extent(ip, state, icur, &got);
+
new.br_startoff = del_endoff;
- temp2 = got_endoff - del_endoff;
- new.br_blockcount = temp2;
+ new.br_blockcount = got_endoff - del_endoff;
new.br_state = got.br_state;
- if (!delay) {
- new.br_startblock = del_endblock;
- flags |= XFS_ILOG_CORE;
- if (cur) {
- if ((error = xfs_bmbt_update(cur,
- got.br_startoff,
- got.br_startblock, temp,
- got.br_state)))
- goto done;
- if ((error = xfs_btree_increment(cur, 0, &i)))
- goto done;
- cur->bc_rec.b = new;
- error = xfs_btree_insert(cur, &i);
- if (error && error != -ENOSPC)
- goto done;
+ new.br_startblock = del_endblock;
+
+ flags |= XFS_ILOG_CORE;
+ if (cur) {
+ error = xfs_bmbt_update(cur, &got);
+ if (error)
+ goto done;
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ cur->bc_rec.b = new;
+ error = xfs_btree_insert(cur, &i);
+ if (error && error != -ENOSPC)
+ goto done;
+ /*
+ * If get no-space back from btree insert, it tried a
+ * split, and we have a zero block reservation. Fix up
+ * our state and return the error.
+ */
+ if (error == -ENOSPC) {
/*
- * If get no-space back from btree insert,
- * it tried a split, and we have a zero
- * block reservation.
- * Fix up our state and return the error.
+ * Reset the cursor, don't trust it after any
+ * insert operation.
*/
- if (error == -ENOSPC) {
- /*
- * Reset the cursor, don't trust
- * it after any insert operation.
- */
- if ((error = xfs_bmbt_lookup_eq(cur,
- got.br_startoff,
- got.br_startblock,
- temp, &i)))
- goto done;
- XFS_WANT_CORRUPTED_GOTO(mp,
- i == 1, done);
- /*
- * Update the btree record back
- * to the original value.
- */
- if ((error = xfs_bmbt_update(cur,
- got.br_startoff,
- got.br_startblock,
- got.br_blockcount,
- got.br_state)))
- goto done;
- /*
- * Reset the extent record back
- * to the original value.
- */
- xfs_bmbt_set_blockcount(ep,
- got.br_blockcount);
- flags = 0;
- error = -ENOSPC;
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
+ if (error)
goto done;
- }
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- } else
- flags |= xfs_ilog_fext(whichfork);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
- } else {
- xfs_filblks_t stolen;
- ASSERT(whichfork == XFS_DATA_FORK);
-
- /*
- * Distribute the original indlen reservation across the
- * two new extents. Steal blocks from the deleted extent
- * if necessary. Stealing blocks simply fudges the
- * fdblocks accounting in xfs_bunmapi().
- */
- temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
- temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
- stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
- del->br_blockcount);
- da_new = temp + temp2 - stolen;
- del->br_blockcount -= stolen;
-
- /*
- * Set the reservation for each extent. Warn if either
- * is zero as this can lead to delalloc problems.
- */
- WARN_ON_ONCE(!temp || !temp2);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- new.br_startblock = nullstartblock((int)temp2);
- }
- trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
- xfs_iext_insert(ip, *idx + 1, 1, &new, state);
- ++*idx;
+ /*
+ * Update the btree record back
+ * to the original value.
+ */
+ error = xfs_bmbt_update(cur, &old);
+ if (error)
+ goto done;
+ /*
+ * Reset the extent record back
+ * to the original value.
+ */
+ xfs_iext_update_extent(ip, state, icur, &old);
+ flags = 0;
+ error = -ENOSPC;
+ goto done;
+ }
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ } else
+ flags |= xfs_ilog_fext(whichfork);
+ XFS_IFORK_NEXT_SET(ip, whichfork,
+ XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+ xfs_iext_next(ifp, icur);
+ xfs_iext_insert(ip, icur, &new, state);
break;
}
/* remove reverse mapping */
- if (!delay) {
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
- if (error)
- goto done;
- }
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
+ if (error)
+ goto done;
/*
* If we need to, add to list of extents to delete.
@@ -5390,13 +5067,6 @@ xfs_bmap_del_extent(
if (qfield && !(bflags & XFS_BMAPI_REMAP))
xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
- /*
- * Account for change in delayed indirect blocks.
- * Nothing to do for disk quota accounting here.
- */
- ASSERT(da_old >= da_new);
- if (da_old > da_new)
- xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
done:
*logflagsp = flags;
return error;
@@ -5412,7 +5082,7 @@ int /* error */
__xfs_bunmapi(
xfs_trans_t *tp, /* transaction pointer */
struct xfs_inode *ip, /* incore inode */
- xfs_fileoff_t bno, /* starting offset to unmap */
+ xfs_fileoff_t start, /* first file offset deleted */
xfs_filblks_t *rlen, /* i/o: amount remaining */
int flags, /* misc flags */
xfs_extnum_t nexts, /* number of extents max */
@@ -5427,11 +5097,9 @@ __xfs_bunmapi(
xfs_bmbt_irec_t got; /* current extent record */
xfs_ifork_t *ifp; /* inode fork pointer */
int isrt; /* freeing in rt area */
- xfs_extnum_t lastx; /* last extent index used */
int logflags; /* transaction logging flags */
xfs_extlen_t mod; /* rt extent offset */
xfs_mount_t *mp; /* mount structure */
- xfs_fileoff_t start; /* first file offset deleted */
int tmp_logflags; /* partial logging flags */
int wasdel; /* was a delayed alloc extent */
int whichfork; /* data or attribute fork */
@@ -5439,8 +5107,11 @@ __xfs_bunmapi(
xfs_filblks_t len = *rlen; /* length to unmap in file */
xfs_fileoff_t max_len;
xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
+ xfs_fileoff_t end;
+ struct xfs_iext_cursor icur;
+ bool done = false;
- trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
+ trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
whichfork = xfs_bmapi_whichfork(flags);
ASSERT(whichfork != XFS_COW_FORK);
@@ -5479,18 +5150,13 @@ __xfs_bunmapi(
}
XFS_STATS_INC(mp, xs_blk_unmap);
isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
- start = bno;
- bno = start + len - 1;
+ end = start + len;
- /*
- * Check to see if the given block number is past the end of the
- * file, back up to the last block if so...
- */
- if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) {
- ASSERT(lastx > 0);
- xfs_iext_get_extent(ifp, --lastx, &got);
- bno = got.br_startoff + got.br_blockcount - 1;
+ if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
+ *rlen = 0;
+ return 0;
}
+ end--;
logflags = 0;
if (ifp->if_flags & XFS_IFBROOT) {
@@ -5513,24 +5179,24 @@ __xfs_bunmapi(
}
extno = 0;
- while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
+ while (end != (xfs_fileoff_t)-1 && end >= start &&
(nexts == 0 || extno < nexts) && max_len > 0) {
/*
- * Is the found extent after a hole in which bno lives?
+ * Is the found extent after a hole in which end lives?
* Just back up to the previous extent, if so.
*/
- if (got.br_startoff > bno) {
- if (--lastx < 0)
- break;
- xfs_iext_get_extent(ifp, lastx, &got);
+ if (got.br_startoff > end &&
+ !xfs_iext_prev_extent(ifp, &icur, &got)) {
+ done = true;
+ break;
}
/*
* Is the last block of this extent before the range
* we're supposed to delete? If so, we're done.
*/
- bno = XFS_FILEOFF_MIN(bno,
+ end = XFS_FILEOFF_MIN(end,
got.br_startoff + got.br_blockcount - 1);
- if (bno < start)
+ if (end < start)
break;
/*
* Then deal with the (possibly delayed) allocated space
@@ -5555,8 +5221,8 @@ __xfs_bunmapi(
if (!wasdel)
del.br_startblock += start - got.br_startoff;
}
- if (del.br_startoff + del.br_blockcount > bno + 1)
- del.br_blockcount = bno + 1 - del.br_startoff;
+ if (del.br_startoff + del.br_blockcount > end + 1)
+ del.br_blockcount = end + 1 - del.br_startoff;
/* How much can we safely unmap? */
if (max_len < del.br_blockcount) {
@@ -5582,13 +5248,13 @@ __xfs_bunmapi(
* This piece is unwritten, or we're not
* using unwritten extents. Skip over it.
*/
- ASSERT(bno >= mod);
- bno -= mod > del.br_blockcount ?
+ ASSERT(end >= mod);
+ end -= mod > del.br_blockcount ?
del.br_blockcount : mod;
- if (bno < got.br_startoff) {
- if (--lastx >= 0)
- xfs_bmbt_get_all(xfs_iext_get_ext(
- ifp, lastx), &got);
+ if (end < got.br_startoff &&
+ !xfs_iext_prev_extent(ifp, &icur, &got)) {
+ done = true;
+ break;
}
continue;
}
@@ -5609,7 +5275,7 @@ __xfs_bunmapi(
}
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
- whichfork, &lastx, &cur, &del,
+ whichfork, &icur, &cur, &del,
firstblock, dfops, &logflags);
if (error)
goto error0;
@@ -5634,10 +5300,13 @@ __xfs_bunmapi(
* Can't make it unwritten. There isn't
* a full extent here so just skip it.
*/
- ASSERT(bno >= del.br_blockcount);
- bno -= del.br_blockcount;
- if (got.br_startoff > bno && --lastx >= 0)
- xfs_iext_get_extent(ifp, lastx, &got);
+ ASSERT(end >= del.br_blockcount);
+ end -= del.br_blockcount;
+ if (got.br_startoff > end &&
+ !xfs_iext_prev_extent(ifp, &icur, &got)) {
+ done = true;
+ break;
+ }
continue;
} else if (del.br_state == XFS_EXT_UNWRITTEN) {
struct xfs_bmbt_irec prev;
@@ -5648,8 +5317,8 @@ __xfs_bunmapi(
* Unwrite the killed part of that one and
* try again.
*/
- ASSERT(lastx > 0);
- xfs_iext_get_extent(ifp, lastx - 1, &prev);
+ if (!xfs_iext_prev_extent(ifp, &icur, &prev))
+ ASSERT(0);
ASSERT(prev.br_state == XFS_EXT_NORM);
ASSERT(!isnullstartblock(prev.br_startblock));
ASSERT(del.br_startblock ==
@@ -5661,9 +5330,8 @@ __xfs_bunmapi(
prev.br_startoff = start;
}
prev.br_state = XFS_EXT_UNWRITTEN;
- lastx--;
error = xfs_bmap_add_extent_unwritten_real(tp,
- ip, whichfork, &lastx, &cur,
+ ip, whichfork, &icur, &cur,
&prev, firstblock, dfops,
&logflags);
if (error)
@@ -5673,7 +5341,7 @@ __xfs_bunmapi(
ASSERT(del.br_state == XFS_EXT_NORM);
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
- ip, whichfork, &lastx, &cur,
+ ip, whichfork, &icur, &cur,
&del, firstblock, dfops,
&logflags);
if (error)
@@ -5682,85 +5350,39 @@ __xfs_bunmapi(
}
}
- /*
- * If it's the case where the directory code is running
- * with no block reservation, and the deleted block is in
- * the middle of its extent, and the resulting insert
- * of an extent would cause transformation to btree format,
- * then reject it. The calling code will then swap
- * blocks around instead.
- * We have to do this now, rather than waiting for the
- * conversion to btree format, since the transaction
- * will be dirty.
- */
- if (!wasdel && tp->t_blk_res == 0 &&
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
- XFS_IFORK_MAXEXT(ip, whichfork) &&
- del.br_startoff > got.br_startoff &&
- del.br_startoff + del.br_blockcount <
- got.br_startoff + got.br_blockcount) {
- error = -ENOSPC;
- goto error0;
+ if (wasdel) {
+ error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
+ &got, &del);
+ } else {
+ error = xfs_bmap_del_extent_real(ip, tp, &icur, dfops,
+ cur, &del, &tmp_logflags, whichfork,
+ flags);
+ logflags |= tmp_logflags;
}
- /*
- * Unreserve quota and update realtime free space, if
- * appropriate. If delayed allocation, update the inode delalloc
- * counter now and wait to update the sb counters as
- * xfs_bmap_del_extent() might need to borrow some blocks.
- */
- if (wasdel) {
- ASSERT(startblockval(del.br_startblock) > 0);
- if (isrt) {
- xfs_filblks_t rtexts;
-
- rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
- do_div(rtexts, mp->m_sb.sb_rextsize);
- xfs_mod_frextents(mp, (int64_t)rtexts);
- (void)xfs_trans_reserve_quota_nblks(NULL,
- ip, -((long)del.br_blockcount), 0,
- XFS_QMOPT_RES_RTBLKS);
- } else {
- (void)xfs_trans_reserve_quota_nblks(NULL,
- ip, -((long)del.br_blockcount), 0,
- XFS_QMOPT_RES_REGBLKS);
- }
- ip->i_delayed_blks -= del.br_blockcount;
- if (cur)
- cur->bc_private.b.flags |=
- XFS_BTCUR_BPRV_WASDEL;
- } else if (cur)
- cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
-
- error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
- &tmp_logflags, whichfork, flags);
- logflags |= tmp_logflags;
if (error)
goto error0;
- if (!isrt && wasdel)
- xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
-
max_len -= del.br_blockcount;
- bno = del.br_startoff - 1;
+ end = del.br_startoff - 1;
nodelete:
/*
* If not done go on to the next (previous) record.
*/
- if (bno != (xfs_fileoff_t)-1 && bno >= start) {
- if (lastx >= 0) {
- xfs_iext_get_extent(ifp, lastx, &got);
- if (got.br_startoff > bno && --lastx >= 0)
- xfs_iext_get_extent(ifp, lastx, &got);
+ if (end != (xfs_fileoff_t)-1 && end >= start) {
+ if (!xfs_iext_get_extent(ifp, &icur, &got) ||
+ (got.br_startoff > end &&
+ !xfs_iext_prev_extent(ifp, &icur, &got))) {
+ done = true;
+ break;
}
extno++;
}
}
- if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
+ if (done || end == (xfs_fileoff_t)-1 || end < start)
*rlen = 0;
else
- *rlen = bno - start + 1;
+ *rlen = end - start + 1;
/*
* Convert to a btree if necessary.
@@ -5878,14 +5500,13 @@ xfs_bmse_merge(
struct xfs_inode *ip,
int whichfork,
xfs_fileoff_t shift, /* shift fsb */
- int current_ext, /* idx of gotp */
+ struct xfs_iext_cursor *icur,
struct xfs_bmbt_irec *got, /* extent to shift */
struct xfs_bmbt_irec *left, /* preceding extent */
struct xfs_btree_cur *cur,
int *logflags, /* output */
struct xfs_defer_ops *dfops)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_bmbt_irec new;
xfs_filblks_t blockcount;
int error, i;
@@ -5913,8 +5534,7 @@ xfs_bmse_merge(
}
/* lookup and remove the extent to merge */
- error = xfs_bmbt_lookup_eq(cur, got->br_startoff, got->br_startblock,
- got->br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(cur, got, &i);
if (error)
return error;
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
@@ -5925,20 +5545,20 @@ xfs_bmse_merge(
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
/* lookup and update size of the previous extent */
- error = xfs_bmbt_lookup_eq(cur, left->br_startoff, left->br_startblock,
- left->br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(cur, left, &i);
if (error)
return error;
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
- error = xfs_bmbt_update(cur, new.br_startoff, new.br_startblock,
- new.br_blockcount, new.br_state);
+ error = xfs_bmbt_update(cur, &new);
if (error)
return error;
done:
- xfs_iext_update_extent(ifp, current_ext - 1, &new);
- xfs_iext_remove(ip, current_ext, 1, 0);
+ xfs_iext_remove(ip, icur, 0);
+ xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
+ xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
+ &new);
/* update reverse mapping. rmap functions merge the rmaps for us */
error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
@@ -5949,183 +5569,83 @@ done:
return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
}
-/*
- * Shift a single extent.
- */
-STATIC int
-xfs_bmse_shift_one(
- struct xfs_inode *ip,
- int whichfork,
- xfs_fileoff_t offset_shift_fsb,
- int *current_ext,
- struct xfs_bmbt_irec *got,
- struct xfs_btree_cur *cur,
- int *logflags,
- enum shift_direction direction,
- struct xfs_defer_ops *dfops)
+static int
+xfs_bmap_shift_update_extent(
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_iext_cursor *icur,
+ struct xfs_bmbt_irec *got,
+ struct xfs_btree_cur *cur,
+ int *logflags,
+ struct xfs_defer_ops *dfops,
+ xfs_fileoff_t startoff)
{
- struct xfs_ifork *ifp;
- struct xfs_mount *mp;
- xfs_fileoff_t startoff;
- struct xfs_bmbt_irec adj_irec, new;
- int error;
- int i;
- int total_extents;
-
- mp = ip->i_mount;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- total_extents = xfs_iext_count(ifp);
-
- /* delalloc extents should be prevented by caller */
- XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got->br_startblock));
-
- if (direction == SHIFT_LEFT) {
- startoff = got->br_startoff - offset_shift_fsb;
-
- /*
- * Check for merge if we've got an extent to the left,
- * otherwise make sure there's enough room at the start
- * of the file for the shift.
- */
- if (!*current_ext) {
- if (got->br_startoff < offset_shift_fsb)
- return -EINVAL;
- goto update_current_ext;
- }
-
- /*
- * grab the left extent and check for a large enough hole.
- */
- xfs_iext_get_extent(ifp, *current_ext - 1, &adj_irec);
- if (startoff < adj_irec.br_startoff + adj_irec.br_blockcount)
- return -EINVAL;
-
- /* check whether to merge the extent or shift it down */
- if (xfs_bmse_can_merge(&adj_irec, got, offset_shift_fsb)) {
- return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
- *current_ext, got, &adj_irec,
- cur, logflags, dfops);
- }
- } else {
- startoff = got->br_startoff + offset_shift_fsb;
- /* nothing to move if this is the last extent */
- if (*current_ext >= (total_extents - 1))
- goto update_current_ext;
-
- /*
- * If this is not the last extent in the file, make sure there
- * is enough room between current extent and next extent for
- * accommodating the shift.
- */
- xfs_iext_get_extent(ifp, *current_ext + 1, &adj_irec);
- if (startoff + got->br_blockcount > adj_irec.br_startoff)
- return -EINVAL;
-
- /*
- * Unlike a left shift (which involves a hole punch),
- * a right shift does not modify extent neighbors
- * in any way. We should never find mergeable extents
- * in this scenario. Check anyways and warn if we
- * encounter two extents that could be one.
- */
- if (xfs_bmse_can_merge(got, &adj_irec, offset_shift_fsb))
- WARN_ON_ONCE(1);
- }
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmbt_irec prev = *got;
+ int error, i;
- /*
- * Increment the extent index for the next iteration, update the start
- * offset of the in-core extent and update the btree if applicable.
- */
-update_current_ext:
*logflags |= XFS_ILOG_CORE;
- new = *got;
- new.br_startoff = startoff;
+ got->br_startoff = startoff;
if (cur) {
- error = xfs_bmbt_lookup_eq(cur, got->br_startoff,
- got->br_startblock, got->br_blockcount, &i);
+ error = xfs_bmbt_lookup_eq(cur, &prev, &i);
if (error)
return error;
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
- error = xfs_bmbt_update(cur, new.br_startoff,
- new.br_startblock, new.br_blockcount,
- new.br_state);
+ error = xfs_bmbt_update(cur, got);
if (error)
return error;
} else {
*logflags |= XFS_ILOG_DEXT;
}
- xfs_iext_update_extent(ifp, *current_ext, &new);
-
- if (direction == SHIFT_LEFT)
- (*current_ext)++;
- else
- (*current_ext)--;
+ xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
+ got);
/* update reverse mapping */
- error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &prev);
if (error)
return error;
- return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
+ return xfs_rmap_map_extent(mp, dfops, ip, whichfork, got);
}
-/*
- * Shift extent records to the left/right to cover/create a hole.
- *
- * The maximum number of extents to be shifted in a single operation is
- * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
- * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
- * is the length by which each extent is shifted. If there is no hole to shift
- * the extents into, this will be considered invalid operation and we abort
- * immediately.
- */
int
-xfs_bmap_shift_extents(
+xfs_bmap_collapse_extents(
struct xfs_trans *tp,
struct xfs_inode *ip,
xfs_fileoff_t *next_fsb,
xfs_fileoff_t offset_shift_fsb,
- int *done,
+ bool *done,
xfs_fileoff_t stop_fsb,
xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops,
- enum shift_direction direction,
- int num_exts)
+ struct xfs_defer_ops *dfops)
{
- struct xfs_btree_cur *cur = NULL;
- struct xfs_bmbt_irec got;
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp;
- xfs_extnum_t nexts = 0;
- xfs_extnum_t current_ext;
- xfs_extnum_t total_extents;
- xfs_extnum_t stop_extent;
- int error = 0;
- int whichfork = XFS_DATA_FORK;
- int logflags = 0;
+ int whichfork = XFS_DATA_FORK;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_btree_cur *cur = NULL;
+ struct xfs_bmbt_irec got, prev;
+ struct xfs_iext_cursor icur;
+ xfs_fileoff_t new_startoff;
+ int error = 0;
+ int logflags = 0;
if (unlikely(XFS_TEST_ERROR(
(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmap_shift_extents",
- XFS_ERRLEVEL_LOW, mp);
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
- ifp = XFS_IFORK_PTR(ip, whichfork);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- /* Read in all the extents */
error = xfs_iread_extents(tp, ip, whichfork);
if (error)
return error;
@@ -6138,107 +5658,165 @@ xfs_bmap_shift_extents(
cur->bc_private.b.flags = 0;
}
- /*
- * There may be delalloc extents in the data fork before the range we
- * are collapsing out, so we cannot use the count of real extents here.
- * Instead we have to calculate it from the incore fork.
- */
- total_extents = xfs_iext_count(ifp);
- if (total_extents == 0) {
- *done = 1;
+ if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
+ *done = true;
goto del_cursor;
}
+ XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
- /*
- * In case of first right shift, we need to initialize next_fsb
- */
- if (*next_fsb == NULLFSBLOCK) {
- ASSERT(direction == SHIFT_RIGHT);
-
- current_ext = total_extents - 1;
- xfs_iext_get_extent(ifp, current_ext, &got);
- if (stop_fsb > got.br_startoff) {
- *done = 1;
+ new_startoff = got.br_startoff - offset_shift_fsb;
+ if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
+ if (new_startoff < prev.br_startoff + prev.br_blockcount) {
+ error = -EINVAL;
goto del_cursor;
}
- *next_fsb = got.br_startoff;
+
+ if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
+ error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
+ &icur, &got, &prev, cur, &logflags,
+ dfops);
+ if (error)
+ goto del_cursor;
+ goto done;
+ }
} else {
- /*
- * Look up the extent index for the fsb where we start shifting. We can
- * henceforth iterate with current_ext as extent list changes are locked
- * out via ilock.
- *
- * If next_fsb lies in a hole beyond which there are no extents we are
- * done.
- */
- if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &current_ext,
- &got)) {
- *done = 1;
+ if (got.br_startoff < offset_shift_fsb) {
+ error = -EINVAL;
goto del_cursor;
}
}
- /* Lookup the extent index at which we have to stop */
- if (direction == SHIFT_RIGHT) {
- struct xfs_bmbt_irec s;
+ error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
+ &logflags, dfops, new_startoff);
+ if (error)
+ goto del_cursor;
+
+done:
+ if (!xfs_iext_next_extent(ifp, &icur, &got)) {
+ *done = true;
+ goto del_cursor;
+ }
+
+ *next_fsb = got.br_startoff;
+del_cursor:
+ if (cur)
+ xfs_btree_del_cursor(cur,
+ error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ if (logflags)
+ xfs_trans_log_inode(tp, ip, logflags);
+ return error;
+}
+
+int
+xfs_bmap_insert_extents(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ xfs_fileoff_t *next_fsb,
+ xfs_fileoff_t offset_shift_fsb,
+ bool *done,
+ xfs_fileoff_t stop_fsb,
+ xfs_fsblock_t *firstblock,
+ struct xfs_defer_ops *dfops)
+{
+ int whichfork = XFS_DATA_FORK;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_btree_cur *cur = NULL;
+ struct xfs_bmbt_irec got, next;
+ struct xfs_iext_cursor icur;
+ xfs_fileoff_t new_startoff;
+ int error = 0;
+ int logflags = 0;
+
+ if (unlikely(XFS_TEST_ERROR(
+ (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
+ mp, XFS_ERRTAG_BMAPIFORMAT))) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
+
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(tp, ip, whichfork);
+ if (error)
+ return error;
+ }
+
+ if (ifp->if_flags & XFS_IFBROOT) {
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ cur->bc_private.b.firstblock = *firstblock;
+ cur->bc_private.b.dfops = dfops;
+ cur->bc_private.b.flags = 0;
+ }
- xfs_iext_lookup_extent(ip, ifp, stop_fsb, &stop_extent, &s);
- /* Make stop_extent exclusive of shift range */
- stop_extent--;
- if (current_ext <= stop_extent) {
- error = -EIO;
+ if (*next_fsb == NULLFSBLOCK) {
+ xfs_iext_last(ifp, &icur);
+ if (!xfs_iext_get_extent(ifp, &icur, &got) ||
+ stop_fsb > got.br_startoff) {
+ *done = true;
goto del_cursor;
}
} else {
- stop_extent = total_extents;
- if (current_ext >= stop_extent) {
- error = -EIO;
+ if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
+ *done = true;
goto del_cursor;
}
}
+ XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
- while (nexts++ < num_exts) {
- error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
- &current_ext, &got, cur, &logflags,
- direction, dfops);
- if (error)
+ if (stop_fsb >= got.br_startoff + got.br_blockcount) {
+ error = -EIO;
+ goto del_cursor;
+ }
+
+ new_startoff = got.br_startoff + offset_shift_fsb;
+ if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
+ if (new_startoff + got.br_blockcount > next.br_startoff) {
+ error = -EINVAL;
goto del_cursor;
- /*
- * If there was an extent merge during the shift, the extent
- * count can change. Update the total and grade the next record.
- */
- if (direction == SHIFT_LEFT) {
- total_extents = xfs_iext_count(ifp);
- stop_extent = total_extents;
}
- if (current_ext == stop_extent) {
- *done = 1;
- *next_fsb = NULLFSBLOCK;
- break;
- }
- xfs_iext_get_extent(ifp, current_ext, &got);
+ /*
+ * Unlike a left shift (which involves a hole punch), a right
+ * shift does not modify extent neighbors in any way. We should
+ * never find mergeable extents in this scenario. Check anyways
+ * and warn if we encounter two extents that could be one.
+ */
+ if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
+ WARN_ON_ONCE(1);
}
- if (!*done)
- *next_fsb = got.br_startoff;
+ error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
+ &logflags, dfops, new_startoff);
+ if (error)
+ goto del_cursor;
+
+ if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
+ stop_fsb >= got.br_startoff + got.br_blockcount) {
+ *done = true;
+ goto del_cursor;
+ }
+ *next_fsb = got.br_startoff;
del_cursor:
if (cur)
xfs_btree_del_cursor(cur,
error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
-
if (logflags)
xfs_trans_log_inode(tp, ip, logflags);
-
return error;
}
/*
- * Splits an extent into two extents at split_fsb block such that it is
- * the first block of the current_ext. @current_ext is a target extent
- * to be split. @split_fsb is a block where the extents is split.
- * If split_fsb lies in a hole or the first block of extents, just return 0.
+ * Splits an extent into two extents at split_fsb block such that it is the
+ * first block of the current_ext. @ext is a target extent to be split.
+ * @split_fsb is a block where the extents is split. If split_fsb lies in a
+ * hole or the first block of extents, just return 0.
*/
STATIC int
xfs_bmap_split_extent_at(
@@ -6255,7 +5833,7 @@ xfs_bmap_split_extent_at(
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
xfs_fsblock_t gotblkcnt; /* new block count for got */
- xfs_extnum_t current_ext;
+ struct xfs_iext_cursor icur;
int error = 0;
int logflags = 0;
int i = 0;
@@ -6283,7 +5861,7 @@ xfs_bmap_split_extent_at(
/*
* If there are not extents, or split_fsb lies in a hole we are done.
*/
- if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &current_ext, &got) ||
+ if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
got.br_startoff >= split_fsb)
return 0;
@@ -6298,44 +5876,35 @@ xfs_bmap_split_extent_at(
cur->bc_private.b.firstblock = *firstfsb;
cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
- error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
- got.br_startblock,
- got.br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto del_cursor;
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
}
got.br_blockcount = gotblkcnt;
- xfs_iext_update_extent(ifp, current_ext, &got);
+ xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
+ &got);
logflags = XFS_ILOG_CORE;
if (cur) {
- error = xfs_bmbt_update(cur, got.br_startoff,
- got.br_startblock,
- got.br_blockcount,
- got.br_state);
+ error = xfs_bmbt_update(cur, &got);
if (error)
goto del_cursor;
} else
logflags |= XFS_ILOG_DEXT;
/* Add new extent */
- current_ext++;
- xfs_iext_insert(ip, current_ext, 1, &new, 0);
+ xfs_iext_next(ifp, &icur);
+ xfs_iext_insert(ip, &icur, &new, 0);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur) {
- error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
- new.br_startblock, new.br_blockcount,
- &i);
+ error = xfs_bmbt_lookup_eq(cur, &new, &i);
if (error)
goto del_cursor;
XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
- cur->bc_rec.b.br_state = new.br_state;
-
error = xfs_btree_insert(cur, &i);
if (error)
goto del_cursor;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 502e0d8fb4ff..e36d75799cd5 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -43,7 +43,7 @@ struct xfs_bmalloca {
xfs_fsblock_t blkno; /* starting block of new extent */
struct xfs_btree_cur *cur; /* btree cursor */
- xfs_extnum_t idx; /* current extent index */
+ struct xfs_iext_cursor icur; /* incore extent cursor */
int nallocs;/* number of extents alloc'd */
int logflags;/* flags for transaction logging */
@@ -113,6 +113,9 @@ struct xfs_extent_free_item
/* Only convert delalloc space, don't allocate entirely new extents */
#define XFS_BMAPI_DELALLOC 0x400
+/* Only convert unwritten extents, don't allocate new blocks */
+#define XFS_BMAPI_CONVERT_ONLY 0x800
+
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
{ XFS_BMAPI_METADATA, "METADATA" }, \
@@ -124,7 +127,8 @@ struct xfs_extent_free_item
{ XFS_BMAPI_ZERO, "ZERO" }, \
{ XFS_BMAPI_REMAP, "REMAP" }, \
{ XFS_BMAPI_COWFORK, "COWFORK" }, \
- { XFS_BMAPI_DELALLOC, "DELALLOC" }
+ { XFS_BMAPI_DELALLOC, "DELALLOC" }, \
+ { XFS_BMAPI_CONVERT_ONLY, "CONVERT_ONLY" }
static inline int xfs_bmapi_aflag(int w)
@@ -183,29 +187,6 @@ static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec)
!isnullstartblock(irec->br_startblock);
}
-/*
- * This macro is used to determine how many extents will be shifted
- * in one write transaction. We could require two splits,
- * an extent move on the first and an extent merge on the second,
- * So it is proper that one extent is shifted inside write transaction
- * at a time.
- */
-#define XFS_BMAP_MAX_SHIFT_EXTENTS 1
-
-enum shift_direction {
- SHIFT_LEFT = 0,
- SHIFT_RIGHT,
-};
-
-#ifdef DEBUG
-void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
- int whichfork, unsigned long caller_ip);
-#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
- xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
-#else
-#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
-#endif
-
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
@@ -222,8 +203,6 @@ int xfs_bmap_last_before(struct xfs_trans *tp, struct xfs_inode *ip,
int xfs_bmap_last_offset(struct xfs_inode *ip, xfs_fileoff_t *unused,
int whichfork);
int xfs_bmap_one_block(struct xfs_inode *ip, int whichfork);
-int xfs_bmap_read_extents(struct xfs_trans *tp, struct xfs_inode *ip,
- int whichfork);
int xfs_bmapi_read(struct xfs_inode *ip, xfs_fileoff_t bno,
xfs_filblks_t len, struct xfs_bmbt_irec *mval,
int *nmap, int flags);
@@ -241,20 +220,25 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops, int *done);
int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
- xfs_extnum_t *idx, struct xfs_bmbt_irec *got,
+ struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *del);
+void xfs_bmap_del_extent_cow(struct xfs_inode *ip,
+ struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *got,
struct xfs_bmbt_irec *del);
-void xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx,
- struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del);
uint xfs_default_attroffset(struct xfs_inode *ip);
-int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
+int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
+ xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
+ bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
+ struct xfs_defer_ops *dfops);
+int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
- int *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
- struct xfs_defer_ops *dfops, enum shift_direction direction,
- int num_exts);
+ bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
+ struct xfs_defer_ops *dfops);
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
- struct xfs_bmbt_irec *got, xfs_extnum_t *lastx, int eof);
+ struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
+ int eof);
enum xfs_bmap_intent_type {
XFS_BMAP_MAP = 1,
@@ -278,4 +262,16 @@ int xfs_bmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
int xfs_bmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
+static inline int xfs_bmap_fork_to_state(int whichfork)
+{
+ switch (whichfork) {
+ case XFS_ATTR_FORK:
+ return BMAP_ATTRFORK;
+ case XFS_COW_FORK:
+ return BMAP_COWFORK;
+ default:
+ return 0;
+ }
+}
+
#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index a6331ffa51e3..c10aecaaae44 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -38,22 +38,6 @@
#include "xfs_rmap.h"
/*
- * Determine the extent state.
- */
-/* ARGSUSED */
-STATIC xfs_exntst_t
-xfs_extent_state(
- xfs_filblks_t blks,
- int extent_flag)
-{
- if (extent_flag) {
- ASSERT(blks != 0); /* saved for DMIG */
- return XFS_EXT_UNWRITTEN;
- }
- return XFS_EXT_NORM;
-}
-
-/*
* Convert on-disk form of btree root to in-memory form.
*/
void
@@ -87,84 +71,21 @@ xfs_bmdr_to_bmbt(
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
}
-/*
- * Convert a compressed bmap extent record to an uncompressed form.
- * This code must be in sync with the routines xfs_bmbt_get_startoff,
- * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
- */
-STATIC void
-__xfs_bmbt_get_all(
- uint64_t l0,
- uint64_t l1,
- xfs_bmbt_irec_t *s)
-{
- int ext_flag;
- xfs_exntst_t st;
-
- ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
- s->br_startoff = ((xfs_fileoff_t)l0 &
- xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
- s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
- (((xfs_fsblock_t)l1) >> 21);
- s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
- /* This is xfs_extent_state() in-line */
- if (ext_flag) {
- ASSERT(s->br_blockcount != 0); /* saved for DMIG */
- st = XFS_EXT_UNWRITTEN;
- } else
- st = XFS_EXT_NORM;
- s->br_state = st;
-}
-
void
-xfs_bmbt_get_all(
- xfs_bmbt_rec_host_t *r,
- xfs_bmbt_irec_t *s)
-{
- __xfs_bmbt_get_all(r->l0, r->l1, s);
-}
-
-/*
- * Extract the blockcount field from an in memory bmap extent record.
- */
-xfs_filblks_t
-xfs_bmbt_get_blockcount(
- xfs_bmbt_rec_host_t *r)
-{
- return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
-}
-
-/*
- * Extract the startblock field from an in memory bmap extent record.
- */
-xfs_fsblock_t
-xfs_bmbt_get_startblock(
- xfs_bmbt_rec_host_t *r)
-{
- return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
- (((xfs_fsblock_t)r->l1) >> 21);
-}
-
-/*
- * Extract the startoff field from an in memory bmap extent record.
- */
-xfs_fileoff_t
-xfs_bmbt_get_startoff(
- xfs_bmbt_rec_host_t *r)
-{
- return ((xfs_fileoff_t)r->l0 &
- xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
-}
-
-xfs_exntst_t
-xfs_bmbt_get_state(
- xfs_bmbt_rec_host_t *r)
-{
- int ext_flag;
-
- ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
- return xfs_extent_state(xfs_bmbt_get_blockcount(r),
- ext_flag);
+xfs_bmbt_disk_get_all(
+ struct xfs_bmbt_rec *rec,
+ struct xfs_bmbt_irec *irec)
+{
+ uint64_t l0 = get_unaligned_be64(&rec->l0);
+ uint64_t l1 = get_unaligned_be64(&rec->l1);
+
+ irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
+ irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
+ irec->br_blockcount = l1 & xfs_mask64lo(21);
+ if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
+ irec->br_state = XFS_EXT_UNWRITTEN;
+ else
+ irec->br_state = XFS_EXT_NORM;
}
/*
@@ -188,142 +109,29 @@ xfs_bmbt_disk_get_startoff(
xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
}
-
-/*
- * Set all the fields in a bmap extent record from the arguments.
- */
-void
-xfs_bmbt_set_allf(
- xfs_bmbt_rec_host_t *r,
- xfs_fileoff_t startoff,
- xfs_fsblock_t startblock,
- xfs_filblks_t blockcount,
- xfs_exntst_t state)
-{
- int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
-
- ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
- ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
- ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
-
- ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
-
- r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
- ((xfs_bmbt_rec_base_t)startoff << 9) |
- ((xfs_bmbt_rec_base_t)startblock >> 43);
- r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
- ((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
-}
-
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
void
-xfs_bmbt_set_all(
- xfs_bmbt_rec_host_t *r,
- xfs_bmbt_irec_t *s)
-{
- xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
- s->br_blockcount, s->br_state);
-}
-
-
-/*
- * Set all the fields in a disk format bmap extent record from the arguments.
- */
-void
-xfs_bmbt_disk_set_allf(
- xfs_bmbt_rec_t *r,
- xfs_fileoff_t startoff,
- xfs_fsblock_t startblock,
- xfs_filblks_t blockcount,
- xfs_exntst_t state)
-{
- int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
-
- ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
- ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
- ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
- ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
-
- r->l0 = cpu_to_be64(
- ((xfs_bmbt_rec_base_t)extent_flag << 63) |
- ((xfs_bmbt_rec_base_t)startoff << 9) |
- ((xfs_bmbt_rec_base_t)startblock >> 43));
- r->l1 = cpu_to_be64(
- ((xfs_bmbt_rec_base_t)startblock << 21) |
- ((xfs_bmbt_rec_base_t)blockcount &
- (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
-}
-
-/*
- * Set all the fields in a bmap extent record from the uncompressed form.
- */
-STATIC void
xfs_bmbt_disk_set_all(
- xfs_bmbt_rec_t *r,
- xfs_bmbt_irec_t *s)
-{
- xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
- s->br_blockcount, s->br_state);
-}
-
-/*
- * Set the blockcount field in a bmap extent record.
- */
-void
-xfs_bmbt_set_blockcount(
- xfs_bmbt_rec_host_t *r,
- xfs_filblks_t v)
+ struct xfs_bmbt_rec *r,
+ struct xfs_bmbt_irec *s)
{
- ASSERT((v & xfs_mask64hi(43)) == 0);
- r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
- (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
-}
-
-/*
- * Set the startblock field in a bmap extent record.
- */
-void
-xfs_bmbt_set_startblock(
- xfs_bmbt_rec_host_t *r,
- xfs_fsblock_t v)
-{
- ASSERT((v & xfs_mask64hi(12)) == 0);
- r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
- (xfs_bmbt_rec_base_t)(v >> 43);
- r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
- (xfs_bmbt_rec_base_t)(v << 21);
-}
+ int extent_flag = (s->br_state != XFS_EXT_NORM);
-/*
- * Set the startoff field in a bmap extent record.
- */
-void
-xfs_bmbt_set_startoff(
- xfs_bmbt_rec_host_t *r,
- xfs_fileoff_t v)
-{
- ASSERT((v & xfs_mask64hi(9)) == 0);
- r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
- ((xfs_bmbt_rec_base_t)v << 9) |
- (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
-}
+ ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
+ ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
+ ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
+ ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
-/*
- * Set the extent state field in a bmap extent record.
- */
-void
-xfs_bmbt_set_state(
- xfs_bmbt_rec_host_t *r,
- xfs_exntst_t v)
-{
- ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
- if (v == XFS_EXT_NORM)
- r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
- else
- r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
+ put_unaligned_be64(
+ ((xfs_bmbt_rec_base_t)extent_flag << 63) |
+ ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
+ ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
+ put_unaligned_be64(
+ ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
+ ((xfs_bmbt_rec_base_t)s->br_blockcount &
+ (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
}
/*
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h
index 9da5a8d4f184..135b8c56d23e 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.h
+++ b/fs/xfs/libxfs/xfs_bmap_btree.h
@@ -98,25 +98,11 @@ struct xfs_trans;
*/
extern void xfs_bmdr_to_bmbt(struct xfs_inode *, xfs_bmdr_block_t *, int,
struct xfs_btree_block *, int);
-extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
-extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r);
-extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r);
-extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r);
-extern xfs_exntst_t xfs_bmbt_get_state(xfs_bmbt_rec_host_t *r);
+void xfs_bmbt_disk_set_all(struct xfs_bmbt_rec *r, struct xfs_bmbt_irec *s);
extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r);
extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r);
-
-extern void xfs_bmbt_set_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s);
-extern void xfs_bmbt_set_allf(xfs_bmbt_rec_host_t *r, xfs_fileoff_t o,
- xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
-extern void xfs_bmbt_set_blockcount(xfs_bmbt_rec_host_t *r, xfs_filblks_t v);
-extern void xfs_bmbt_set_startblock(xfs_bmbt_rec_host_t *r, xfs_fsblock_t v);
-extern void xfs_bmbt_set_startoff(xfs_bmbt_rec_host_t *r, xfs_fileoff_t v);
-extern void xfs_bmbt_set_state(xfs_bmbt_rec_host_t *r, xfs_exntst_t v);
-
-extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o,
- xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
+extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int,
xfs_bmdr_block_t *, int);
@@ -136,9 +122,9 @@ extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
* Check that the extent does not contain an invalid unwritten extent flag.
*/
static inline bool xfs_bmbt_validate_extent(struct xfs_mount *mp, int whichfork,
- struct xfs_bmbt_rec_host *ep)
+ struct xfs_bmbt_irec *irec)
{
- if (ep->l0 >> (64 - BMBT_EXNTFLAG_BITLEN) == 0)
+ if (irec->br_state == XFS_EXT_NORM)
return true;
if (whichfork == XFS_DATA_FORK &&
xfs_sb_version_hasextflgbit(&mp->m_sb))
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5bfb88261c7e..5f33adf8eecb 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -29,6 +29,7 @@
#include "xfs_inode_item.h"
#include "xfs_buf_item.h"
#include "xfs_btree.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
@@ -63,44 +64,63 @@ xfs_btree_magic(
return magic;
}
-STATIC int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_lblock(
- struct xfs_btree_cur *cur, /* btree cursor */
- struct xfs_btree_block *block, /* btree long form block pointer */
- int level, /* level of the btree block */
- struct xfs_buf *bp) /* buffer for block, if any */
+/*
+ * Check a long btree block header. Return the address of the failing check,
+ * or NULL if everything is ok.
+ */
+xfs_failaddr_t
+__xfs_btree_check_lblock(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_buf *bp)
{
- int lblock_ok = 1; /* block passes checks */
- struct xfs_mount *mp; /* file system mount point */
+ struct xfs_mount *mp = cur->bc_mp;
xfs_btnum_t btnum = cur->bc_btnum;
- int crc;
-
- mp = cur->bc_mp;
- crc = xfs_sb_version_hascrc(&mp->m_sb);
+ int crc = xfs_sb_version_hascrc(&mp->m_sb);
if (crc) {
- lblock_ok = lblock_ok &&
- uuid_equal(&block->bb_u.l.bb_uuid,
- &mp->m_sb.sb_meta_uuid) &&
- block->bb_u.l.bb_blkno == cpu_to_be64(
- bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
+ if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
+ return __this_address;
+ if (block->bb_u.l.bb_blkno !=
+ cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
+ return __this_address;
+ if (block->bb_u.l.bb_pad != cpu_to_be32(0))
+ return __this_address;
}
- lblock_ok = lblock_ok &&
- be32_to_cpu(block->bb_magic) == xfs_btree_magic(crc, btnum) &&
- be16_to_cpu(block->bb_level) == level &&
- be16_to_cpu(block->bb_numrecs) <=
- cur->bc_ops->get_maxrecs(cur, level) &&
- block->bb_u.l.bb_leftsib &&
- (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) ||
- XFS_FSB_SANITY_CHECK(mp,
- be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
- block->bb_u.l.bb_rightsib &&
- (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) ||
- XFS_FSB_SANITY_CHECK(mp,
- be64_to_cpu(block->bb_u.l.bb_rightsib)));
-
- if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
+ if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
+ return __this_address;
+ if (be16_to_cpu(block->bb_level) != level)
+ return __this_address;
+ if (be16_to_cpu(block->bb_numrecs) >
+ cur->bc_ops->get_maxrecs(cur, level))
+ return __this_address;
+ if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
+ !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib),
+ level + 1))
+ return __this_address;
+ if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
+ !xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib),
+ level + 1))
+ return __this_address;
+
+ return NULL;
+}
+
+/* Check a long btree block header. */
+static int
+xfs_btree_check_lblock(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ xfs_failaddr_t fa;
+
+ fa = __xfs_btree_check_lblock(cur, block, level, bp);
+ if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
XFS_ERRTAG_BTREE_CHECK_LBLOCK))) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
@@ -110,48 +130,61 @@ xfs_btree_check_lblock(
return 0;
}
-STATIC int /* error (0 or EFSCORRUPTED) */
-xfs_btree_check_sblock(
- struct xfs_btree_cur *cur, /* btree cursor */
- struct xfs_btree_block *block, /* btree short form block pointer */
- int level, /* level of the btree block */
- struct xfs_buf *bp) /* buffer containing block */
+/*
+ * Check a short btree block header. Return the address of the failing check,
+ * or NULL if everything is ok.
+ */
+xfs_failaddr_t
+__xfs_btree_check_sblock(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp; /* file system mount point */
- struct xfs_buf *agbp; /* buffer for ag. freespace struct */
- struct xfs_agf *agf; /* ag. freespace structure */
- xfs_agblock_t agflen; /* native ag. freespace length */
- int sblock_ok = 1; /* block passes checks */
+ struct xfs_mount *mp = cur->bc_mp;
xfs_btnum_t btnum = cur->bc_btnum;
- int crc;
-
- mp = cur->bc_mp;
- crc = xfs_sb_version_hascrc(&mp->m_sb);
- agbp = cur->bc_private.a.agbp;
- agf = XFS_BUF_TO_AGF(agbp);
- agflen = be32_to_cpu(agf->agf_length);
+ int crc = xfs_sb_version_hascrc(&mp->m_sb);
if (crc) {
- sblock_ok = sblock_ok &&
- uuid_equal(&block->bb_u.s.bb_uuid,
- &mp->m_sb.sb_meta_uuid) &&
- block->bb_u.s.bb_blkno == cpu_to_be64(
- bp ? bp->b_bn : XFS_BUF_DADDR_NULL);
+ if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
+ return __this_address;
+ if (block->bb_u.s.bb_blkno !=
+ cpu_to_be64(bp ? bp->b_bn : XFS_BUF_DADDR_NULL))
+ return __this_address;
}
- sblock_ok = sblock_ok &&
- be32_to_cpu(block->bb_magic) == xfs_btree_magic(crc, btnum) &&
- be16_to_cpu(block->bb_level) == level &&
- be16_to_cpu(block->bb_numrecs) <=
- cur->bc_ops->get_maxrecs(cur, level) &&
- (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
- be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
- block->bb_u.s.bb_leftsib &&
- (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
- be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
- block->bb_u.s.bb_rightsib;
-
- if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp,
+ if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
+ return __this_address;
+ if (be16_to_cpu(block->bb_level) != level)
+ return __this_address;
+ if (be16_to_cpu(block->bb_numrecs) >
+ cur->bc_ops->get_maxrecs(cur, level))
+ return __this_address;
+ if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
+ !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib),
+ level + 1))
+ return __this_address;
+ if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
+ !xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib),
+ level + 1))
+ return __this_address;
+
+ return NULL;
+}
+
+/* Check a short btree block header. */
+STATIC int
+xfs_btree_check_sblock(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ int level,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ xfs_failaddr_t fa;
+
+ fa = __xfs_btree_check_sblock(cur, block, level, bp);
+ if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
XFS_ERRTAG_BTREE_CHECK_SBLOCK))) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
@@ -177,59 +210,53 @@ xfs_btree_check_block(
return xfs_btree_check_sblock(cur, block, level, bp);
}
-/*
- * Check that (long) pointer is ok.
- */
-int /* error (0 or EFSCORRUPTED) */
+/* Check that this long pointer is valid and points within the fs. */
+bool
xfs_btree_check_lptr(
- struct xfs_btree_cur *cur, /* btree cursor */
- xfs_fsblock_t bno, /* btree block disk address */
- int level) /* btree block level */
+ struct xfs_btree_cur *cur,
+ xfs_fsblock_t fsbno,
+ int level)
{
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
- level > 0 &&
- bno != NULLFSBLOCK &&
- XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
- return 0;
+ if (level <= 0)
+ return false;
+ return xfs_verify_fsbno(cur->bc_mp, fsbno);
}
-#ifdef DEBUG
-/*
- * Check that (short) pointer is ok.
- */
-STATIC int /* error (0 or EFSCORRUPTED) */
+/* Check that this short pointer is valid and points within the AG. */
+bool
xfs_btree_check_sptr(
- struct xfs_btree_cur *cur, /* btree cursor */
- xfs_agblock_t bno, /* btree block disk address */
- int level) /* btree block level */
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t agbno,
+ int level)
{
- xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
-
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
- level > 0 &&
- bno != NULLAGBLOCK &&
- bno != 0 &&
- bno < agblocks);
- return 0;
+ if (level <= 0)
+ return false;
+ return xfs_verify_agbno(cur->bc_mp, cur->bc_private.a.agno, agbno);
}
+#ifdef DEBUG
/*
- * Check that block ptr is ok.
+ * Check that a given (indexed) btree pointer at a certain level of a
+ * btree is valid and doesn't point past where it should.
*/
-STATIC int /* error (0 or EFSCORRUPTED) */
+static int
xfs_btree_check_ptr(
- struct xfs_btree_cur *cur, /* btree cursor */
- union xfs_btree_ptr *ptr, /* btree block disk address */
- int index, /* offset from ptr to check */
- int level) /* btree block level */
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int index,
+ int level)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
- return xfs_btree_check_lptr(cur,
- be64_to_cpu((&ptr->l)[index]), level);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
+ xfs_btree_check_lptr(cur,
+ be64_to_cpu((&ptr->l)[index]), level));
} else {
- return xfs_btree_check_sptr(cur,
- be32_to_cpu((&ptr->s)[index]), level);
+ XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
+ xfs_btree_check_sptr(cur,
+ be32_to_cpu((&ptr->s)[index]), level));
}
+
+ return 0;
}
#endif
@@ -1027,7 +1054,7 @@ xfs_btree_setbuf(
}
}
-STATIC int
+bool
xfs_btree_ptr_is_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
@@ -1052,7 +1079,7 @@ xfs_btree_set_ptr_null(
/*
* Get/set/init sibling pointers
*/
-STATIC void
+void
xfs_btree_get_sibling(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
@@ -2001,7 +2028,7 @@ error0:
}
/* Find the high key storage area from a regular key. */
-STATIC union xfs_btree_key *
+union xfs_btree_key *
xfs_btree_high_key_from_key(
struct xfs_btree_cur *cur,
union xfs_btree_key *key)
@@ -2075,7 +2102,7 @@ xfs_btree_get_node_keys(
}
/* Derive the keys for any btree block. */
-STATIC void
+void
xfs_btree_get_keys(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block,
@@ -4914,3 +4941,15 @@ xfs_btree_count_blocks(
return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
blocks);
}
+
+/* Compare two btree pointers. */
+int64_t
+xfs_btree_diff_two_ptrs(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *a,
+ const union xfs_btree_ptr *b)
+{
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
+ return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
+}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index f2a88c3b1159..b57501c6f71d 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -255,6 +255,14 @@ typedef struct xfs_btree_cur
*/
#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr))
+/*
+ * Internal long and short btree block checks. They return NULL if the
+ * block is ok or the address of the failed check otherwise.
+ */
+xfs_failaddr_t __xfs_btree_check_lblock(struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block, int level, struct xfs_buf *bp);
+xfs_failaddr_t __xfs_btree_check_sblock(struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block, int level, struct xfs_buf *bp);
/*
* Check that block header is ok.
@@ -269,10 +277,19 @@ xfs_btree_check_block(
/*
* Check that (long) pointer is ok.
*/
-int /* error (0 or EFSCORRUPTED) */
+bool /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lptr(
struct xfs_btree_cur *cur, /* btree cursor */
- xfs_fsblock_t ptr, /* btree block disk address */
+ xfs_fsblock_t fsbno, /* btree block disk address */
+ int level); /* btree block level */
+
+/*
+ * Check that (short) pointer is ok.
+ */
+bool /* error (0 or EFSCORRUPTED) */
+xfs_btree_check_sptr(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t agbno, /* btree block disk address */
int level); /* btree block level */
/*
@@ -517,5 +534,16 @@ int xfs_btree_lookup_get_block(struct xfs_btree_cur *cur, int level,
union xfs_btree_ptr *pp, struct xfs_btree_block **blkp);
struct xfs_btree_block *xfs_btree_get_block(struct xfs_btree_cur *cur,
int level, struct xfs_buf **bpp);
+bool xfs_btree_ptr_is_null(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr);
+int64_t xfs_btree_diff_two_ptrs(struct xfs_btree_cur *cur,
+ const union xfs_btree_ptr *a,
+ const union xfs_btree_ptr *b);
+void xfs_btree_get_sibling(struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_ptr *ptr, int lr);
+void xfs_btree_get_keys(struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block, union xfs_btree_key *key);
+union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
+ union xfs_btree_key *key);
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 6d4335815c3f..651611530d2f 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -1466,6 +1466,7 @@ xfs_da3_node_lookup_int(
int max;
int error;
int retval;
+ unsigned int expected_level = 0;
struct xfs_inode *dp = state->args->dp;
args = state->args;
@@ -1474,7 +1475,7 @@ xfs_da3_node_lookup_int(
* Descend thru the B-tree searching each level for the right
* node to use, until the right hashval is found.
*/
- blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0;
+ blkno = args->geo->leafblk;
for (blk = &state->path.blk[0], state->path.active = 1;
state->path.active <= XFS_DA_NODE_MAXDEPTH;
blk++, state->path.active++) {
@@ -1517,6 +1518,18 @@ xfs_da3_node_lookup_int(
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
+ /* Tree taller than we can handle; bail out! */
+ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
+ return -EFSCORRUPTED;
+
+ /* Check the level from the root. */
+ if (blkno == args->geo->leafblk)
+ expected_level = nodehdr.level - 1;
+ else if (expected_level != nodehdr.level)
+ return -EFSCORRUPTED;
+ else
+ expected_level--;
+
max = nodehdr.count;
blk->hashval = be32_to_cpu(btree[max - 1].hashval);
@@ -1562,8 +1575,15 @@ xfs_da3_node_lookup_int(
blk->index = probe;
blkno = be32_to_cpu(btree[probe].before);
}
+
+ /* We can't point back to the root. */
+ if (blkno == args->geo->leafblk)
+ return -EFSCORRUPTED;
}
+ if (expected_level != 0)
+ return -EFSCORRUPTED;
+
/*
* A leaf block that ends in the hashval that we are interested in
* (final hashval == search hashval) means that the next block may
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index ccf9783fd3f0..e10778c102ea 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -30,6 +30,8 @@
#include "xfs_bmap.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
+#include "xfs_ialloc.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -38,7 +40,9 @@ struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
/*
* Convert inode mode to directory entry filetype
*/
-unsigned char xfs_mode_to_ftype(int mode)
+unsigned char
+xfs_mode_to_ftype(
+ int mode)
{
switch (mode & S_IFMT) {
case S_IFREG:
@@ -202,22 +206,8 @@ xfs_dir_ino_validate(
xfs_mount_t *mp,
xfs_ino_t ino)
{
- xfs_agblock_t agblkno;
- xfs_agino_t agino;
- xfs_agnumber_t agno;
- int ino_ok;
- int ioff;
-
- agno = XFS_INO_TO_AGNO(mp, ino);
- agblkno = XFS_INO_TO_AGBNO(mp, ino);
- ioff = XFS_INO_TO_OFFSET(mp, ino);
- agino = XFS_OFFBNO_TO_AGINO(mp, agblkno, ioff);
- ino_ok =
- agno < mp->m_sb.sb_agcount &&
- agblkno < mp->m_sb.sb_agblocks &&
- agblkno != 0 &&
- ioff < (1 << mp->m_sb.sb_inopblog) &&
- XFS_AGINO_TO_INO(mp, agno, agino) == ino;
+ bool ino_ok = xfs_verify_dir_ino(mp, ino);
+
if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE))) {
xfs_warn(mp, "Invalid inode number 0x%Lx",
(unsigned long long) ino);
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 21c8f8bf94d5..1a8f2cf977ca 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -324,4 +324,21 @@ xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
sizeof(struct xfs_dir2_leaf_tail));
}
+/*
+ * The Linux API doesn't pass down the total size of the buffer
+ * we read into down to the filesystem. With the filldir concept
+ * it's not needed for correct information, but the XFS dir2 leaf
+ * code wants an estimate of the buffer size to calculate it's
+ * readahead window and size the buffers used for mapping to
+ * physical blocks.
+ *
+ * Try to give it an estimate that's good enough, maybe at some
+ * point we can change the ->readdir prototype to include the
+ * buffer size. For now we use the current glibc buffer size.
+ * musl libc hardcodes 2k and dietlibc uses PAGE_SIZE.
+ */
+#define XFS_READDIR_BUFSIZE (32768)
+
+unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp, uint8_t filetype);
+
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
new file mode 100644
index 000000000000..bc1789d95152
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_errortag.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
+ * Copyright (C) 2017 Oracle.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_ERRORTAG_H_
+#define __XFS_ERRORTAG_H_
+
+/*
+ * error injection tags - the labels can be anything you want
+ * but each tag should have its own unique number
+ */
+
+#define XFS_ERRTAG_NOERROR 0
+#define XFS_ERRTAG_IFLUSH_1 1
+#define XFS_ERRTAG_IFLUSH_2 2
+#define XFS_ERRTAG_IFLUSH_3 3
+#define XFS_ERRTAG_IFLUSH_4 4
+#define XFS_ERRTAG_IFLUSH_5 5
+#define XFS_ERRTAG_IFLUSH_6 6
+#define XFS_ERRTAG_DA_READ_BUF 7
+#define XFS_ERRTAG_BTREE_CHECK_LBLOCK 8
+#define XFS_ERRTAG_BTREE_CHECK_SBLOCK 9
+#define XFS_ERRTAG_ALLOC_READ_AGF 10
+#define XFS_ERRTAG_IALLOC_READ_AGI 11
+#define XFS_ERRTAG_ITOBP_INOTOBP 12
+#define XFS_ERRTAG_IUNLINK 13
+#define XFS_ERRTAG_IUNLINK_REMOVE 14
+#define XFS_ERRTAG_DIR_INO_VALIDATE 15
+#define XFS_ERRTAG_BULKSTAT_READ_CHUNK 16
+#define XFS_ERRTAG_IODONE_IOERR 17
+#define XFS_ERRTAG_STRATREAD_IOERR 18
+#define XFS_ERRTAG_STRATCMPL_IOERR 19
+#define XFS_ERRTAG_DIOWRITE_IOERR 20
+#define XFS_ERRTAG_BMAPIFORMAT 21
+#define XFS_ERRTAG_FREE_EXTENT 22
+#define XFS_ERRTAG_RMAP_FINISH_ONE 23
+#define XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE 24
+#define XFS_ERRTAG_REFCOUNT_FINISH_ONE 25
+#define XFS_ERRTAG_BMAP_FINISH_ONE 26
+#define XFS_ERRTAG_AG_RESV_CRITICAL 27
+/*
+ * DEBUG mode instrumentation to test and/or trigger delayed allocation
+ * block killing in the event of failed writes. When enabled, all
+ * buffered writes are silenty dropped and handled as if they failed.
+ * All delalloc blocks in the range of the write (including pre-existing
+ * delalloc blocks!) are tossed as part of the write failure error
+ * handling sequence.
+ */
+#define XFS_ERRTAG_DROP_WRITES 28
+#define XFS_ERRTAG_LOG_BAD_CRC 29
+#define XFS_ERRTAG_LOG_ITEM_PIN 30
+#define XFS_ERRTAG_BUF_LRU_REF 31
+#define XFS_ERRTAG_MAX 32
+
+/*
+ * Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
+ */
+#define XFS_RANDOM_DEFAULT 100
+#define XFS_RANDOM_IFLUSH_1 XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_2 XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_3 XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_4 XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_5 XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IFLUSH_6 XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_DA_READ_BUF XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_BTREE_CHECK_LBLOCK (XFS_RANDOM_DEFAULT/4)
+#define XFS_RANDOM_BTREE_CHECK_SBLOCK XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_ALLOC_READ_AGF XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IALLOC_READ_AGI XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_ITOBP_INOTOBP XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IUNLINK XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IUNLINK_REMOVE XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_DIR_INO_VALIDATE XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_BULKSTAT_READ_CHUNK XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_IODONE_IOERR (XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_STRATREAD_IOERR (XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_STRATCMPL_IOERR (XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_FREE_EXTENT 1
+#define XFS_RANDOM_RMAP_FINISH_ONE 1
+#define XFS_RANDOM_REFCOUNT_CONTINUE_UPDATE 1
+#define XFS_RANDOM_REFCOUNT_FINISH_ONE 1
+#define XFS_RANDOM_BMAP_FINISH_ONE 1
+#define XFS_RANDOM_AG_RESV_CRITICAL 4
+#define XFS_RANDOM_DROP_WRITES 1
+#define XFS_RANDOM_LOG_BAD_CRC 1
+#define XFS_RANDOM_LOG_ITEM_PIN 1
+#define XFS_RANDOM_BUF_LRU_REF 2
+
+#endif /* __XFS_ERRORTAG_H_ */
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 23229f0c5b15..1acb584fc5f7 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -315,6 +315,11 @@ static inline bool xfs_sb_good_version(struct xfs_sb *sbp)
return false;
}
+static inline bool xfs_sb_version_hasrealtime(struct xfs_sb *sbp)
+{
+ return sbp->sb_rblocks > 0;
+}
+
/*
* Detect a mismatched features2 field. Older kernels read/wrote
* this into the wrong slot, so to be safe we keep them in sync.
@@ -500,12 +505,12 @@ xfs_sb_has_incompat_log_feature(
/*
* V5 superblock specific feature checks
*/
-static inline int xfs_sb_version_hascrc(struct xfs_sb *sbp)
+static inline bool xfs_sb_version_hascrc(struct xfs_sb *sbp)
{
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
}
-static inline int xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
+static inline bool xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
{
return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
}
@@ -518,7 +523,7 @@ static inline int xfs_sb_version_hasftype(struct xfs_sb *sbp)
(sbp->sb_features2 & XFS_SB_VERSION2_FTYPE));
}
-static inline int xfs_sb_version_hasfinobt(xfs_sb_t *sbp)
+static inline bool xfs_sb_version_hasfinobt(xfs_sb_t *sbp)
{
return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
(sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_FINOBT);
@@ -941,7 +946,7 @@ typedef enum xfs_dinode_fmt {
XFS_DINODE_FMT_LOCAL, /* bulk data */
XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
- XFS_DINODE_FMT_UUID /* uuid_t */
+ XFS_DINODE_FMT_UUID /* added long ago, but never used */
} xfs_dinode_fmt_t;
/*
@@ -1142,7 +1147,7 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
* Dquot and dquot block format definitions
*/
#define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */
-#define XFS_DQUOT_VERSION (u_int8_t)0x01 /* latest version number */
+#define XFS_DQUOT_VERSION (uint8_t)0x01 /* latest version number */
/*
* This is the main portion of the on-disk representation of quota
@@ -1548,10 +1553,6 @@ typedef struct xfs_bmbt_rec {
typedef uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
-typedef struct xfs_bmbt_rec_host {
- uint64_t l0, l1;
-} xfs_bmbt_rec_host_t;
-
/*
* Values and macros for delayed-allocation startblock fields.
*/
@@ -1577,24 +1578,6 @@ static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
}
/*
- * Possible extent states.
- */
-typedef enum {
- XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
-} xfs_exntst_t;
-
-/*
- * Incore version of above.
- */
-typedef struct xfs_bmbt_irec
-{
- xfs_fileoff_t br_startoff; /* starting file offset */
- xfs_fsblock_t br_startblock; /* starting block number */
- xfs_filblks_t br_blockcount; /* number of blocks */
- xfs_exntst_t br_state; /* extent state */
-} xfs_bmbt_irec_t;
-
-/*
* Key structure for non-leaf levels of the tree.
*/
typedef struct xfs_bmbt_key {
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 8c61f21535d4..b90924104596 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -468,6 +468,82 @@ typedef struct xfs_swapext
#define XFS_FSOP_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
#define XFS_FSOP_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
+/* metadata scrubbing */
+struct xfs_scrub_metadata {
+ __u32 sm_type; /* What to check? */
+ __u32 sm_flags; /* flags; see below. */
+ __u64 sm_ino; /* inode number. */
+ __u32 sm_gen; /* inode generation. */
+ __u32 sm_agno; /* ag number. */
+ __u64 sm_reserved[5]; /* pad to 64 bytes */
+};
+
+/*
+ * Metadata types and flags for scrub operation.
+ */
+
+/* Scrub subcommands. */
+#define XFS_SCRUB_TYPE_PROBE 0 /* presence test ioctl */
+#define XFS_SCRUB_TYPE_SB 1 /* superblock */
+#define XFS_SCRUB_TYPE_AGF 2 /* AG free header */
+#define XFS_SCRUB_TYPE_AGFL 3 /* AG free list */
+#define XFS_SCRUB_TYPE_AGI 4 /* AG inode header */
+#define XFS_SCRUB_TYPE_BNOBT 5 /* freesp by block btree */
+#define XFS_SCRUB_TYPE_CNTBT 6 /* freesp by length btree */
+#define XFS_SCRUB_TYPE_INOBT 7 /* inode btree */
+#define XFS_SCRUB_TYPE_FINOBT 8 /* free inode btree */
+#define XFS_SCRUB_TYPE_RMAPBT 9 /* reverse mapping btree */
+#define XFS_SCRUB_TYPE_REFCNTBT 10 /* reference count btree */
+#define XFS_SCRUB_TYPE_INODE 11 /* inode record */
+#define XFS_SCRUB_TYPE_BMBTD 12 /* data fork block mapping */
+#define XFS_SCRUB_TYPE_BMBTA 13 /* attr fork block mapping */
+#define XFS_SCRUB_TYPE_BMBTC 14 /* CoW fork block mapping */
+#define XFS_SCRUB_TYPE_DIR 15 /* directory */
+#define XFS_SCRUB_TYPE_XATTR 16 /* extended attribute */
+#define XFS_SCRUB_TYPE_SYMLINK 17 /* symbolic link */
+#define XFS_SCRUB_TYPE_PARENT 18 /* parent pointers */
+#define XFS_SCRUB_TYPE_RTBITMAP 19 /* realtime bitmap */
+#define XFS_SCRUB_TYPE_RTSUM 20 /* realtime summary */
+#define XFS_SCRUB_TYPE_UQUOTA 21 /* user quotas */
+#define XFS_SCRUB_TYPE_GQUOTA 22 /* group quotas */
+#define XFS_SCRUB_TYPE_PQUOTA 23 /* project quotas */
+
+/* Number of scrub subcommands. */
+#define XFS_SCRUB_TYPE_NR 24
+
+/* i: Repair this metadata. */
+#define XFS_SCRUB_IFLAG_REPAIR (1 << 0)
+
+/* o: Metadata object needs repair. */
+#define XFS_SCRUB_OFLAG_CORRUPT (1 << 1)
+
+/*
+ * o: Metadata object could be optimized. It's not corrupt, but
+ * we could improve on it somehow.
+ */
+#define XFS_SCRUB_OFLAG_PREEN (1 << 2)
+
+/* o: Cross-referencing failed. */
+#define XFS_SCRUB_OFLAG_XFAIL (1 << 3)
+
+/* o: Metadata object disagrees with cross-referenced metadata. */
+#define XFS_SCRUB_OFLAG_XCORRUPT (1 << 4)
+
+/* o: Scan was not complete. */
+#define XFS_SCRUB_OFLAG_INCOMPLETE (1 << 5)
+
+/* o: Metadata object looked funny but isn't corrupt. */
+#define XFS_SCRUB_OFLAG_WARNING (1 << 6)
+
+#define XFS_SCRUB_FLAGS_IN (XFS_SCRUB_IFLAG_REPAIR)
+#define XFS_SCRUB_FLAGS_OUT (XFS_SCRUB_OFLAG_CORRUPT | \
+ XFS_SCRUB_OFLAG_PREEN | \
+ XFS_SCRUB_OFLAG_XFAIL | \
+ XFS_SCRUB_OFLAG_XCORRUPT | \
+ XFS_SCRUB_OFLAG_INCOMPLETE | \
+ XFS_SCRUB_OFLAG_WARNING)
+#define XFS_SCRUB_FLAGS_ALL (XFS_SCRUB_FLAGS_IN | XFS_SCRUB_FLAGS_OUT)
+
/*
* ioctl limits
*/
@@ -511,6 +587,7 @@ typedef struct xfs_swapext
#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
/* XFS_IOC_GETFSMAP ------ hoisted 59 */
+#define XFS_IOC_SCRUB_METADATA _IOWR('X', 60, struct xfs_scrub_metadata)
/*
* ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index dfd643909f85..de3f04a98656 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -31,6 +31,7 @@
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_rtalloc.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_bmap.h"
#include "xfs_cksum.h"
@@ -2664,3 +2665,93 @@ xfs_ialloc_pagi_init(
xfs_trans_brelse(tp, bp);
return 0;
}
+
+/* Calculate the first and last possible inode number in an AG. */
+void
+xfs_ialloc_agino_range(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agino_t *first,
+ xfs_agino_t *last)
+{
+ xfs_agblock_t bno;
+ xfs_agblock_t eoag;
+
+ eoag = xfs_ag_block_count(mp, agno);
+
+ /*
+ * Calculate the first inode, which will be in the first
+ * cluster-aligned block after the AGFL.
+ */
+ bno = round_up(XFS_AGFL_BLOCK(mp) + 1,
+ xfs_ialloc_cluster_alignment(mp));
+ *first = XFS_OFFBNO_TO_AGINO(mp, bno, 0);
+
+ /*
+ * Calculate the last inode, which will be at the end of the
+ * last (aligned) cluster that can be allocated in the AG.
+ */
+ bno = round_down(eoag, xfs_ialloc_cluster_alignment(mp));
+ *last = XFS_OFFBNO_TO_AGINO(mp, bno, 0) - 1;
+}
+
+/*
+ * Verify that an AG inode number pointer neither points outside the AG
+ * nor points at static metadata.
+ */
+bool
+xfs_verify_agino(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_agino_t agino)
+{
+ xfs_agino_t first;
+ xfs_agino_t last;
+
+ xfs_ialloc_agino_range(mp, agno, &first, &last);
+ return agino >= first && agino <= last;
+}
+
+/*
+ * Verify that an FS inode number pointer neither points outside the
+ * filesystem nor points at static AG metadata.
+ */
+bool
+xfs_verify_ino(
+ struct xfs_mount *mp,
+ xfs_ino_t ino)
+{
+ xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ino);
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
+
+ if (agno >= mp->m_sb.sb_agcount)
+ return false;
+ if (XFS_AGINO_TO_INO(mp, agno, agino) != ino)
+ return false;
+ return xfs_verify_agino(mp, agno, agino);
+}
+
+/* Is this an internal inode number? */
+bool
+xfs_internal_inum(
+ struct xfs_mount *mp,
+ xfs_ino_t ino)
+{
+ return ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
+ (xfs_sb_version_hasquota(&mp->m_sb) &&
+ xfs_is_quota_inode(&mp->m_sb, ino));
+}
+
+/*
+ * Verify that a directory entry's inode number doesn't point at an internal
+ * inode, empty space, or static AG metadata.
+ */
+bool
+xfs_verify_dir_ino(
+ struct xfs_mount *mp,
+ xfs_ino_t ino)
+{
+ if (xfs_internal_inum(mp, ino))
+ return false;
+ return xfs_verify_ino(mp, ino);
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index b32cfb5aeb5b..d2bdcd5e7312 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -173,5 +173,12 @@ void xfs_inobt_btrec_to_irec(struct xfs_mount *mp, union xfs_btree_rec *rec,
struct xfs_inobt_rec_incore *irec);
int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
+void xfs_ialloc_agino_range(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agino_t *first, xfs_agino_t *last);
+bool xfs_verify_agino(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agino_t agino);
+bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
+bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
+bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
new file mode 100644
index 000000000000..343a94246f5b
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -0,0 +1,1043 @@
+/*
+ * Copyright (c) 2017 Christoph Hellwig.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "xfs.h"
+#include "xfs_format.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_inode.h"
+#include "xfs_inode_fork.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_trace.h"
+
+/*
+ * In-core extent record layout:
+ *
+ * +-------+----------------------------+
+ * | 00:53 | all 54 bits of startoff |
+ * | 54:63 | low 10 bits of startblock |
+ * +-------+----------------------------+
+ * | 00:20 | all 21 bits of length |
+ * | 21 | unwritten extent bit |
+ * | 22:63 | high 42 bits of startblock |
+ * +-------+----------------------------+
+ */
+#define XFS_IEXT_STARTOFF_MASK xfs_mask64lo(BMBT_STARTOFF_BITLEN)
+#define XFS_IEXT_LENGTH_MASK xfs_mask64lo(BMBT_BLOCKCOUNT_BITLEN)
+#define XFS_IEXT_STARTBLOCK_MASK xfs_mask64lo(BMBT_STARTBLOCK_BITLEN)
+
+struct xfs_iext_rec {
+ uint64_t lo;
+ uint64_t hi;
+};
+
+/*
+ * Given that the length can't be a zero, only an empty hi value indicates an
+ * unused record.
+ */
+static bool xfs_iext_rec_is_empty(struct xfs_iext_rec *rec)
+{
+ return rec->hi == 0;
+}
+
+static inline void xfs_iext_rec_clear(struct xfs_iext_rec *rec)
+{
+ rec->lo = 0;
+ rec->hi = 0;
+}
+
+static void
+xfs_iext_set(
+ struct xfs_iext_rec *rec,
+ struct xfs_bmbt_irec *irec)
+{
+ ASSERT((irec->br_startoff & ~XFS_IEXT_STARTOFF_MASK) == 0);
+ ASSERT((irec->br_blockcount & ~XFS_IEXT_LENGTH_MASK) == 0);
+ ASSERT((irec->br_startblock & ~XFS_IEXT_STARTBLOCK_MASK) == 0);
+
+ rec->lo = irec->br_startoff & XFS_IEXT_STARTOFF_MASK;
+ rec->hi = irec->br_blockcount & XFS_IEXT_LENGTH_MASK;
+
+ rec->lo |= (irec->br_startblock << 54);
+ rec->hi |= ((irec->br_startblock & ~xfs_mask64lo(10)) << (22 - 10));
+
+ if (irec->br_state == XFS_EXT_UNWRITTEN)
+ rec->hi |= (1 << 21);
+}
+
+static void
+xfs_iext_get(
+ struct xfs_bmbt_irec *irec,
+ struct xfs_iext_rec *rec)
+{
+ irec->br_startoff = rec->lo & XFS_IEXT_STARTOFF_MASK;
+ irec->br_blockcount = rec->hi & XFS_IEXT_LENGTH_MASK;
+
+ irec->br_startblock = rec->lo >> 54;
+ irec->br_startblock |= (rec->hi & xfs_mask64hi(42)) >> (22 - 10);
+
+ if (rec->hi & (1 << 21))
+ irec->br_state = XFS_EXT_UNWRITTEN;
+ else
+ irec->br_state = XFS_EXT_NORM;
+}
+
+enum {
+ NODE_SIZE = 256,
+ KEYS_PER_NODE = NODE_SIZE / (sizeof(uint64_t) + sizeof(void *)),
+ RECS_PER_LEAF = (NODE_SIZE - (2 * sizeof(struct xfs_iext_leaf *))) /
+ sizeof(struct xfs_iext_rec),
+};
+
+/*
+ * In-core extent btree block layout:
+ *
+ * There are two types of blocks in the btree: leaf and inner (non-leaf) blocks.
+ *
+ * The leaf blocks are made up by %KEYS_PER_NODE extent records, which each
+ * contain the startoffset, blockcount, startblock and unwritten extent flag.
+ * See above for the exact format, followed by pointers to the previous and next
+ * leaf blocks (if there are any).
+ *
+ * The inner (non-leaf) blocks first contain KEYS_PER_NODE lookup keys, followed
+ * by an equal number of pointers to the btree blocks at the next lower level.
+ *
+ * +-------+-------+-------+-------+-------+----------+----------+
+ * Leaf: | rec 1 | rec 2 | rec 3 | rec 4 | rec N | prev-ptr | next-ptr |
+ * +-------+-------+-------+-------+-------+----------+----------+
+ *
+ * +-------+-------+-------+-------+-------+-------+------+-------+
+ * Inner: | key 1 | key 2 | key 3 | key N | ptr 1 | ptr 2 | ptr3 | ptr N |
+ * +-------+-------+-------+-------+-------+-------+------+-------+
+ */
+struct xfs_iext_node {
+ uint64_t keys[KEYS_PER_NODE];
+#define XFS_IEXT_KEY_INVALID (1ULL << 63)
+ void *ptrs[KEYS_PER_NODE];
+};
+
+struct xfs_iext_leaf {
+ struct xfs_iext_rec recs[RECS_PER_LEAF];
+ struct xfs_iext_leaf *prev;
+ struct xfs_iext_leaf *next;
+};
+
+inline xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp)
+{
+ return ifp->if_bytes / sizeof(struct xfs_iext_rec);
+}
+
+static inline int xfs_iext_max_recs(struct xfs_ifork *ifp)
+{
+ if (ifp->if_height == 1)
+ return xfs_iext_count(ifp);
+ return RECS_PER_LEAF;
+}
+
+static inline struct xfs_iext_rec *cur_rec(struct xfs_iext_cursor *cur)
+{
+ return &cur->leaf->recs[cur->pos];
+}
+
+static inline bool xfs_iext_valid(struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur)
+{
+ if (!cur->leaf)
+ return false;
+ if (cur->pos < 0 || cur->pos >= xfs_iext_max_recs(ifp))
+ return false;
+ if (xfs_iext_rec_is_empty(cur_rec(cur)))
+ return false;
+ return true;
+}
+
+static void *
+xfs_iext_find_first_leaf(
+ struct xfs_ifork *ifp)
+{
+ struct xfs_iext_node *node = ifp->if_u1.if_root;
+ int height;
+
+ if (!ifp->if_height)
+ return NULL;
+
+ for (height = ifp->if_height; height > 1; height--) {
+ node = node->ptrs[0];
+ ASSERT(node);
+ }
+
+ return node;
+}
+
+static void *
+xfs_iext_find_last_leaf(
+ struct xfs_ifork *ifp)
+{
+ struct xfs_iext_node *node = ifp->if_u1.if_root;
+ int height, i;
+
+ if (!ifp->if_height)
+ return NULL;
+
+ for (height = ifp->if_height; height > 1; height--) {
+ for (i = 1; i < KEYS_PER_NODE; i++)
+ if (!node->ptrs[i])
+ break;
+ node = node->ptrs[i - 1];
+ ASSERT(node);
+ }
+
+ return node;
+}
+
+void
+xfs_iext_first(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur)
+{
+ cur->pos = 0;
+ cur->leaf = xfs_iext_find_first_leaf(ifp);
+}
+
+void
+xfs_iext_last(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur)
+{
+ int i;
+
+ cur->leaf = xfs_iext_find_last_leaf(ifp);
+ if (!cur->leaf) {
+ cur->pos = 0;
+ return;
+ }
+
+ for (i = 1; i < xfs_iext_max_recs(ifp); i++) {
+ if (xfs_iext_rec_is_empty(&cur->leaf->recs[i]))
+ break;
+ }
+ cur->pos = i - 1;
+}
+
+void
+xfs_iext_next(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur)
+{
+ if (!cur->leaf) {
+ ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
+ xfs_iext_first(ifp, cur);
+ return;
+ }
+
+ ASSERT(cur->pos >= 0);
+ ASSERT(cur->pos < xfs_iext_max_recs(ifp));
+
+ cur->pos++;
+ if (ifp->if_height > 1 && !xfs_iext_valid(ifp, cur) &&
+ cur->leaf->next) {
+ cur->leaf = cur->leaf->next;
+ cur->pos = 0;
+ }
+}
+
+void
+xfs_iext_prev(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur)
+{
+ if (!cur->leaf) {
+ ASSERT(cur->pos <= 0 || cur->pos >= RECS_PER_LEAF);
+ xfs_iext_last(ifp, cur);
+ return;
+ }
+
+ ASSERT(cur->pos >= 0);
+ ASSERT(cur->pos <= RECS_PER_LEAF);
+
+recurse:
+ do {
+ cur->pos--;
+ if (xfs_iext_valid(ifp, cur))
+ return;
+ } while (cur->pos > 0);
+
+ if (ifp->if_height > 1 && cur->leaf->prev) {
+ cur->leaf = cur->leaf->prev;
+ cur->pos = RECS_PER_LEAF;
+ goto recurse;
+ }
+}
+
+static inline int
+xfs_iext_key_cmp(
+ struct xfs_iext_node *node,
+ int n,
+ xfs_fileoff_t offset)
+{
+ if (node->keys[n] > offset)
+ return 1;
+ if (node->keys[n] < offset)
+ return -1;
+ return 0;
+}
+
+static inline int
+xfs_iext_rec_cmp(
+ struct xfs_iext_rec *rec,
+ xfs_fileoff_t offset)
+{
+ uint64_t rec_offset = rec->lo & XFS_IEXT_STARTOFF_MASK;
+ u32 rec_len = rec->hi & XFS_IEXT_LENGTH_MASK;
+
+ if (rec_offset > offset)
+ return 1;
+ if (rec_offset + rec_len <= offset)
+ return -1;
+ return 0;
+}
+
+static void *
+xfs_iext_find_level(
+ struct xfs_ifork *ifp,
+ xfs_fileoff_t offset,
+ int level)
+{
+ struct xfs_iext_node *node = ifp->if_u1.if_root;
+ int height, i;
+
+ if (!ifp->if_height)
+ return NULL;
+
+ for (height = ifp->if_height; height > level; height--) {
+ for (i = 1; i < KEYS_PER_NODE; i++)
+ if (xfs_iext_key_cmp(node, i, offset) > 0)
+ break;
+
+ node = node->ptrs[i - 1];
+ if (!node)
+ break;
+ }
+
+ return node;
+}
+
+static int
+xfs_iext_node_pos(
+ struct xfs_iext_node *node,
+ xfs_fileoff_t offset)
+{
+ int i;
+
+ for (i = 1; i < KEYS_PER_NODE; i++) {
+ if (xfs_iext_key_cmp(node, i, offset) > 0)
+ break;
+ }
+
+ return i - 1;
+}
+
+static int
+xfs_iext_node_insert_pos(
+ struct xfs_iext_node *node,
+ xfs_fileoff_t offset)
+{
+ int i;
+
+ for (i = 0; i < KEYS_PER_NODE; i++) {
+ if (xfs_iext_key_cmp(node, i, offset) > 0)
+ return i;
+ }
+
+ return KEYS_PER_NODE;
+}
+
+static int
+xfs_iext_node_nr_entries(
+ struct xfs_iext_node *node,
+ int start)
+{
+ int i;
+
+ for (i = start; i < KEYS_PER_NODE; i++) {
+ if (node->keys[i] == XFS_IEXT_KEY_INVALID)
+ break;
+ }
+
+ return i;
+}
+
+static int
+xfs_iext_leaf_nr_entries(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_leaf *leaf,
+ int start)
+{
+ int i;
+
+ for (i = start; i < xfs_iext_max_recs(ifp); i++) {
+ if (xfs_iext_rec_is_empty(&leaf->recs[i]))
+ break;
+ }
+
+ return i;
+}
+
+static inline uint64_t
+xfs_iext_leaf_key(
+ struct xfs_iext_leaf *leaf,
+ int n)
+{
+ return leaf->recs[n].lo & XFS_IEXT_STARTOFF_MASK;
+}
+
+static void
+xfs_iext_grow(
+ struct xfs_ifork *ifp)
+{
+ struct xfs_iext_node *node = kmem_zalloc(NODE_SIZE, KM_NOFS);
+ int i;
+
+ if (ifp->if_height == 1) {
+ struct xfs_iext_leaf *prev = ifp->if_u1.if_root;
+
+ node->keys[0] = xfs_iext_leaf_key(prev, 0);
+ node->ptrs[0] = prev;
+ } else {
+ struct xfs_iext_node *prev = ifp->if_u1.if_root;
+
+ ASSERT(ifp->if_height > 1);
+
+ node->keys[0] = prev->keys[0];
+ node->ptrs[0] = prev;
+ }
+
+ for (i = 1; i < KEYS_PER_NODE; i++)
+ node->keys[i] = XFS_IEXT_KEY_INVALID;
+
+ ifp->if_u1.if_root = node;
+ ifp->if_height++;
+}
+
+static void
+xfs_iext_update_node(
+ struct xfs_ifork *ifp,
+ xfs_fileoff_t old_offset,
+ xfs_fileoff_t new_offset,
+ int level,
+ void *ptr)
+{
+ struct xfs_iext_node *node = ifp->if_u1.if_root;
+ int height, i;
+
+ for (height = ifp->if_height; height > level; height--) {
+ for (i = 0; i < KEYS_PER_NODE; i++) {
+ if (i > 0 && xfs_iext_key_cmp(node, i, old_offset) > 0)
+ break;
+ if (node->keys[i] == old_offset)
+ node->keys[i] = new_offset;
+ }
+ node = node->ptrs[i - 1];
+ ASSERT(node);
+ }
+
+ ASSERT(node == ptr);
+}
+
+static struct xfs_iext_node *
+xfs_iext_split_node(
+ struct xfs_iext_node **nodep,
+ int *pos,
+ int *nr_entries)
+{
+ struct xfs_iext_node *node = *nodep;
+ struct xfs_iext_node *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
+ const int nr_move = KEYS_PER_NODE / 2;
+ int nr_keep = nr_move + (KEYS_PER_NODE & 1);
+ int i = 0;
+
+ /* for sequential append operations just spill over into the new node */
+ if (*pos == KEYS_PER_NODE) {
+ *nodep = new;
+ *pos = 0;
+ *nr_entries = 0;
+ goto done;
+ }
+
+
+ for (i = 0; i < nr_move; i++) {
+ new->keys[i] = node->keys[nr_keep + i];
+ new->ptrs[i] = node->ptrs[nr_keep + i];
+
+ node->keys[nr_keep + i] = XFS_IEXT_KEY_INVALID;
+ node->ptrs[nr_keep + i] = NULL;
+ }
+
+ if (*pos >= nr_keep) {
+ *nodep = new;
+ *pos -= nr_keep;
+ *nr_entries = nr_move;
+ } else {
+ *nr_entries = nr_keep;
+ }
+done:
+ for (; i < KEYS_PER_NODE; i++)
+ new->keys[i] = XFS_IEXT_KEY_INVALID;
+ return new;
+}
+
+static void
+xfs_iext_insert_node(
+ struct xfs_ifork *ifp,
+ uint64_t offset,
+ void *ptr,
+ int level)
+{
+ struct xfs_iext_node *node, *new;
+ int i, pos, nr_entries;
+
+again:
+ if (ifp->if_height < level)
+ xfs_iext_grow(ifp);
+
+ new = NULL;
+ node = xfs_iext_find_level(ifp, offset, level);
+ pos = xfs_iext_node_insert_pos(node, offset);
+ nr_entries = xfs_iext_node_nr_entries(node, pos);
+
+ ASSERT(pos >= nr_entries || xfs_iext_key_cmp(node, pos, offset) != 0);
+ ASSERT(nr_entries <= KEYS_PER_NODE);
+
+ if (nr_entries == KEYS_PER_NODE)
+ new = xfs_iext_split_node(&node, &pos, &nr_entries);
+
+ /*
+ * Update the pointers in higher levels if the first entry changes
+ * in an existing node.
+ */
+ if (node != new && pos == 0 && nr_entries > 0)
+ xfs_iext_update_node(ifp, node->keys[0], offset, level, node);
+
+ for (i = nr_entries; i > pos; i--) {
+ node->keys[i] = node->keys[i - 1];
+ node->ptrs[i] = node->ptrs[i - 1];
+ }
+ node->keys[pos] = offset;
+ node->ptrs[pos] = ptr;
+
+ if (new) {
+ offset = new->keys[0];
+ ptr = new;
+ level++;
+ goto again;
+ }
+}
+
+static struct xfs_iext_leaf *
+xfs_iext_split_leaf(
+ struct xfs_iext_cursor *cur,
+ int *nr_entries)
+{
+ struct xfs_iext_leaf *leaf = cur->leaf;
+ struct xfs_iext_leaf *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
+ const int nr_move = RECS_PER_LEAF / 2;
+ int nr_keep = nr_move + (RECS_PER_LEAF & 1);
+ int i;
+
+ /* for sequential append operations just spill over into the new node */
+ if (cur->pos == RECS_PER_LEAF) {
+ cur->leaf = new;
+ cur->pos = 0;
+ *nr_entries = 0;
+ goto done;
+ }
+
+ for (i = 0; i < nr_move; i++) {
+ new->recs[i] = leaf->recs[nr_keep + i];
+ xfs_iext_rec_clear(&leaf->recs[nr_keep + i]);
+ }
+
+ if (cur->pos >= nr_keep) {
+ cur->leaf = new;
+ cur->pos -= nr_keep;
+ *nr_entries = nr_move;
+ } else {
+ *nr_entries = nr_keep;
+ }
+done:
+ if (leaf->next)
+ leaf->next->prev = new;
+ new->next = leaf->next;
+ new->prev = leaf;
+ leaf->next = new;
+ return new;
+}
+
+static void
+xfs_iext_alloc_root(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur)
+{
+ ASSERT(ifp->if_bytes == 0);
+
+ ifp->if_u1.if_root = kmem_zalloc(sizeof(struct xfs_iext_rec), KM_NOFS);
+ ifp->if_height = 1;
+
+ /* now that we have a node step into it */
+ cur->leaf = ifp->if_u1.if_root;
+ cur->pos = 0;
+}
+
+static void
+xfs_iext_realloc_root(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur)
+{
+ size_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec);
+ void *new;
+
+ /* account for the prev/next pointers */
+ if (new_size / sizeof(struct xfs_iext_rec) == RECS_PER_LEAF)
+ new_size = NODE_SIZE;
+
+ new = kmem_realloc(ifp->if_u1.if_root, new_size, KM_NOFS);
+ memset(new + ifp->if_bytes, 0, new_size - ifp->if_bytes);
+ ifp->if_u1.if_root = new;
+ cur->leaf = new;
+}
+
+void
+xfs_iext_insert(
+ struct xfs_inode *ip,
+ struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *irec,
+ int state)
+{
+ struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
+ xfs_fileoff_t offset = irec->br_startoff;
+ struct xfs_iext_leaf *new = NULL;
+ int nr_entries, i;
+
+ trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
+
+ if (ifp->if_height == 0)
+ xfs_iext_alloc_root(ifp, cur);
+ else if (ifp->if_height == 1)
+ xfs_iext_realloc_root(ifp, cur);
+
+ nr_entries = xfs_iext_leaf_nr_entries(ifp, cur->leaf, cur->pos);
+ ASSERT(nr_entries <= RECS_PER_LEAF);
+ ASSERT(cur->pos >= nr_entries ||
+ xfs_iext_rec_cmp(cur_rec(cur), irec->br_startoff) != 0);
+
+ if (nr_entries == RECS_PER_LEAF)
+ new = xfs_iext_split_leaf(cur, &nr_entries);
+
+ /*
+ * Update the pointers in higher levels if the first entry changes
+ * in an existing node.
+ */
+ if (cur->leaf != new && cur->pos == 0 && nr_entries > 0) {
+ xfs_iext_update_node(ifp, xfs_iext_leaf_key(cur->leaf, 0),
+ offset, 1, cur->leaf);
+ }
+
+ for (i = nr_entries; i > cur->pos; i--)
+ cur->leaf->recs[i] = cur->leaf->recs[i - 1];
+ xfs_iext_set(cur_rec(cur), irec);
+ ifp->if_bytes += sizeof(struct xfs_iext_rec);
+
+ if (new)
+ xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
+}
+
+static struct xfs_iext_node *
+xfs_iext_rebalance_node(
+ struct xfs_iext_node *parent,
+ int *pos,
+ struct xfs_iext_node *node,
+ int nr_entries)
+{
+ /*
+ * If the neighbouring nodes are completely full, or have different
+ * parents, we might never be able to merge our node, and will only
+ * delete it once the number of entries hits zero.
+ */
+ if (nr_entries == 0)
+ return node;
+
+ if (*pos > 0) {
+ struct xfs_iext_node *prev = parent->ptrs[*pos - 1];
+ int nr_prev = xfs_iext_node_nr_entries(prev, 0), i;
+
+ if (nr_prev + nr_entries <= KEYS_PER_NODE) {
+ for (i = 0; i < nr_entries; i++) {
+ prev->keys[nr_prev + i] = node->keys[i];
+ prev->ptrs[nr_prev + i] = node->ptrs[i];
+ }
+ return node;
+ }
+ }
+
+ if (*pos + 1 < xfs_iext_node_nr_entries(parent, *pos)) {
+ struct xfs_iext_node *next = parent->ptrs[*pos + 1];
+ int nr_next = xfs_iext_node_nr_entries(next, 0), i;
+
+ if (nr_entries + nr_next <= KEYS_PER_NODE) {
+ /*
+ * Merge the next node into this node so that we don't
+ * have to do an additional update of the keys in the
+ * higher levels.
+ */
+ for (i = 0; i < nr_next; i++) {
+ node->keys[nr_entries + i] = next->keys[i];
+ node->ptrs[nr_entries + i] = next->ptrs[i];
+ }
+
+ ++*pos;
+ return next;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+xfs_iext_remove_node(
+ struct xfs_ifork *ifp,
+ xfs_fileoff_t offset,
+ void *victim)
+{
+ struct xfs_iext_node *node, *parent;
+ int level = 2, pos, nr_entries, i;
+
+ ASSERT(level <= ifp->if_height);
+ node = xfs_iext_find_level(ifp, offset, level);
+ pos = xfs_iext_node_pos(node, offset);
+again:
+ ASSERT(node->ptrs[pos]);
+ ASSERT(node->ptrs[pos] == victim);
+ kmem_free(victim);
+
+ nr_entries = xfs_iext_node_nr_entries(node, pos) - 1;
+ offset = node->keys[0];
+ for (i = pos; i < nr_entries; i++) {
+ node->keys[i] = node->keys[i + 1];
+ node->ptrs[i] = node->ptrs[i + 1];
+ }
+ node->keys[nr_entries] = XFS_IEXT_KEY_INVALID;
+ node->ptrs[nr_entries] = NULL;
+
+ if (pos == 0 && nr_entries > 0) {
+ xfs_iext_update_node(ifp, offset, node->keys[0], level, node);
+ offset = node->keys[0];
+ }
+
+ if (nr_entries >= KEYS_PER_NODE / 2)
+ return;
+
+ if (level < ifp->if_height) {
+ /*
+ * If we aren't at the root yet try to find a neighbour node to
+ * merge with (or delete the node if it is empty), and then
+ * recurse up to the next level.
+ */
+ level++;
+ parent = xfs_iext_find_level(ifp, offset, level);
+ pos = xfs_iext_node_pos(parent, offset);
+
+ ASSERT(pos != KEYS_PER_NODE);
+ ASSERT(parent->ptrs[pos] == node);
+
+ node = xfs_iext_rebalance_node(parent, &pos, node, nr_entries);
+ if (node) {
+ victim = node;
+ node = parent;
+ goto again;
+ }
+ } else if (nr_entries == 1) {
+ /*
+ * If we are at the root and only one entry is left we can just
+ * free this node and update the root pointer.
+ */
+ ASSERT(node == ifp->if_u1.if_root);
+ ifp->if_u1.if_root = node->ptrs[0];
+ ifp->if_height--;
+ kmem_free(node);
+ }
+}
+
+static void
+xfs_iext_rebalance_leaf(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur,
+ struct xfs_iext_leaf *leaf,
+ xfs_fileoff_t offset,
+ int nr_entries)
+{
+ /*
+ * If the neighbouring nodes are completely full we might never be able
+ * to merge our node, and will only delete it once the number of
+ * entries hits zero.
+ */
+ if (nr_entries == 0)
+ goto remove_node;
+
+ if (leaf->prev) {
+ int nr_prev = xfs_iext_leaf_nr_entries(ifp, leaf->prev, 0), i;
+
+ if (nr_prev + nr_entries <= RECS_PER_LEAF) {
+ for (i = 0; i < nr_entries; i++)
+ leaf->prev->recs[nr_prev + i] = leaf->recs[i];
+
+ if (cur->leaf == leaf) {
+ cur->leaf = leaf->prev;
+ cur->pos += nr_prev;
+ }
+ goto remove_node;
+ }
+ }
+
+ if (leaf->next) {
+ int nr_next = xfs_iext_leaf_nr_entries(ifp, leaf->next, 0), i;
+
+ if (nr_entries + nr_next <= RECS_PER_LEAF) {
+ /*
+ * Merge the next node into this node so that we don't
+ * have to do an additional update of the keys in the
+ * higher levels.
+ */
+ for (i = 0; i < nr_next; i++) {
+ leaf->recs[nr_entries + i] =
+ leaf->next->recs[i];
+ }
+
+ if (cur->leaf == leaf->next) {
+ cur->leaf = leaf;
+ cur->pos += nr_entries;
+ }
+
+ offset = xfs_iext_leaf_key(leaf->next, 0);
+ leaf = leaf->next;
+ goto remove_node;
+ }
+ }
+
+ return;
+remove_node:
+ if (leaf->prev)
+ leaf->prev->next = leaf->next;
+ if (leaf->next)
+ leaf->next->prev = leaf->prev;
+ xfs_iext_remove_node(ifp, offset, leaf);
+}
+
+static void
+xfs_iext_free_last_leaf(
+ struct xfs_ifork *ifp)
+{
+ ifp->if_u1.if_root = NULL;
+ ifp->if_height--;
+ kmem_free(ifp->if_u1.if_root);
+}
+
+void
+xfs_iext_remove(
+ struct xfs_inode *ip,
+ struct xfs_iext_cursor *cur,
+ int state)
+{
+ struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
+ struct xfs_iext_leaf *leaf = cur->leaf;
+ xfs_fileoff_t offset = xfs_iext_leaf_key(leaf, 0);
+ int i, nr_entries;
+
+ trace_xfs_iext_remove(ip, cur, state, _RET_IP_);
+
+ ASSERT(ifp->if_height > 0);
+ ASSERT(ifp->if_u1.if_root != NULL);
+ ASSERT(xfs_iext_valid(ifp, cur));
+
+ nr_entries = xfs_iext_leaf_nr_entries(ifp, leaf, cur->pos) - 1;
+ for (i = cur->pos; i < nr_entries; i++)
+ leaf->recs[i] = leaf->recs[i + 1];
+ xfs_iext_rec_clear(&leaf->recs[nr_entries]);
+ ifp->if_bytes -= sizeof(struct xfs_iext_rec);
+
+ if (cur->pos == 0 && nr_entries > 0) {
+ xfs_iext_update_node(ifp, offset, xfs_iext_leaf_key(leaf, 0), 1,
+ leaf);
+ offset = xfs_iext_leaf_key(leaf, 0);
+ } else if (cur->pos == nr_entries) {
+ if (ifp->if_height > 1 && leaf->next)
+ cur->leaf = leaf->next;
+ else
+ cur->leaf = NULL;
+ cur->pos = 0;
+ }
+
+ if (nr_entries >= RECS_PER_LEAF / 2)
+ return;
+
+ if (ifp->if_height > 1)
+ xfs_iext_rebalance_leaf(ifp, cur, leaf, offset, nr_entries);
+ else if (nr_entries == 0)
+ xfs_iext_free_last_leaf(ifp);
+}
+
+/*
+ * Lookup the extent covering bno.
+ *
+ * If there is an extent covering bno return the extent index, and store the
+ * expanded extent structure in *gotp, and the extent cursor in *cur.
+ * If there is no extent covering bno, but there is an extent after it (e.g.
+ * it lies in a hole) return that extent in *gotp and its cursor in *cur
+ * instead.
+ * If bno is beyond the last extent return false, and return an invalid
+ * cursor value.
+ */
+bool
+xfs_iext_lookup_extent(
+ struct xfs_inode *ip,
+ struct xfs_ifork *ifp,
+ xfs_fileoff_t offset,
+ struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *gotp)
+{
+ XFS_STATS_INC(ip->i_mount, xs_look_exlist);
+
+ cur->leaf = xfs_iext_find_level(ifp, offset, 1);
+ if (!cur->leaf) {
+ cur->pos = 0;
+ return false;
+ }
+
+ for (cur->pos = 0; cur->pos < xfs_iext_max_recs(ifp); cur->pos++) {
+ struct xfs_iext_rec *rec = cur_rec(cur);
+
+ if (xfs_iext_rec_is_empty(rec))
+ break;
+ if (xfs_iext_rec_cmp(rec, offset) >= 0)
+ goto found;
+ }
+
+ /* Try looking in the next node for an entry > offset */
+ if (ifp->if_height == 1 || !cur->leaf->next)
+ return false;
+ cur->leaf = cur->leaf->next;
+ cur->pos = 0;
+ if (!xfs_iext_valid(ifp, cur))
+ return false;
+found:
+ xfs_iext_get(gotp, cur_rec(cur));
+ return true;
+}
+
+/*
+ * Returns the last extent before end, and if this extent doesn't cover
+ * end, update end to the end of the extent.
+ */
+bool
+xfs_iext_lookup_extent_before(
+ struct xfs_inode *ip,
+ struct xfs_ifork *ifp,
+ xfs_fileoff_t *end,
+ struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *gotp)
+{
+ /* could be optimized to not even look up the next on a match.. */
+ if (xfs_iext_lookup_extent(ip, ifp, *end - 1, cur, gotp) &&
+ gotp->br_startoff <= *end - 1)
+ return true;
+ if (!xfs_iext_prev_extent(ifp, cur, gotp))
+ return false;
+ *end = gotp->br_startoff + gotp->br_blockcount;
+ return true;
+}
+
+void
+xfs_iext_update_extent(
+ struct xfs_inode *ip,
+ int state,
+ struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *new)
+{
+ struct xfs_ifork *ifp = xfs_iext_state_to_fork(ip, state);
+
+ if (cur->pos == 0) {
+ struct xfs_bmbt_irec old;
+
+ xfs_iext_get(&old, cur_rec(cur));
+ if (new->br_startoff != old.br_startoff) {
+ xfs_iext_update_node(ifp, old.br_startoff,
+ new->br_startoff, 1, cur->leaf);
+ }
+ }
+
+ trace_xfs_bmap_pre_update(ip, cur, state, _RET_IP_);
+ xfs_iext_set(cur_rec(cur), new);
+ trace_xfs_bmap_post_update(ip, cur, state, _RET_IP_);
+}
+
+/*
+ * Return true if the cursor points at an extent and return the extent structure
+ * in gotp. Else return false.
+ */
+bool
+xfs_iext_get_extent(
+ struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *gotp)
+{
+ if (!xfs_iext_valid(ifp, cur))
+ return false;
+ xfs_iext_get(gotp, cur_rec(cur));
+ return true;
+}
+
+/*
+ * This is a recursive function, because of that we need to be extremely
+ * careful with stack usage.
+ */
+static void
+xfs_iext_destroy_node(
+ struct xfs_iext_node *node,
+ int level)
+{
+ int i;
+
+ if (level > 1) {
+ for (i = 0; i < KEYS_PER_NODE; i++) {
+ if (node->keys[i] == XFS_IEXT_KEY_INVALID)
+ break;
+ xfs_iext_destroy_node(node->ptrs[i], level - 1);
+ }
+ }
+
+ kmem_free(node);
+}
+
+void
+xfs_iext_destroy(
+ struct xfs_ifork *ifp)
+{
+ xfs_iext_destroy_node(ifp->if_u1.if_root, ifp->if_height);
+
+ ifp->if_bytes = 0;
+ ifp->if_height = 0;
+ ifp->if_u1.if_root = NULL;
+}
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 378f8fbc91a7..6b7989038d75 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -24,6 +24,7 @@
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_inode.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_cksum.h"
#include "xfs_icache.h"
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 31840ca24018..1c90ec41e9df 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -42,21 +42,27 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
+static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev)
+{
+ return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
+}
+
/*
- * Move inode type and inode format specific information from the
- * on-disk inode to the in-core inode. For fifos, devs, and sockets
- * this means set if_rdev to the proper value. For files, directories,
- * and symlinks this means to bring in the in-line data or extent
- * pointers. For a file in B-tree format, only the root is immediately
- * brought in-core. The rest will be in-lined in if_extents when it
- * is first referenced (see xfs_iread_extents()).
+ * Copy inode type and data and attr format specific information from the
+ * on-disk inode to the in-core inode and fork structures. For fifos, devices,
+ * and sockets this means set i_rdev to the proper value. For files,
+ * directories, and symlinks this means to bring in the in-line data or extent
+ * pointers as well as the attribute fork. For a fork in B-tree format, only
+ * the root is immediately brought in-core. The rest will be read in later when
+ * first referenced (see xfs_iread_extents()).
*/
int
xfs_iformat_fork(
- xfs_inode_t *ip,
- xfs_dinode_t *dip)
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
{
- xfs_attr_shortform_t *atp;
+ struct inode *inode = VFS_I(ip);
+ struct xfs_attr_shortform *atp;
int size;
int error = 0;
xfs_fsize_t di_size;
@@ -95,8 +101,7 @@ xfs_iformat_fork(
return -EFSCORRUPTED;
}
- if (unlikely(xfs_is_reflink_inode(ip) &&
- (VFS_I(ip)->i_mode & S_IFMT) != S_IFREG)) {
+ if (unlikely(xfs_is_reflink_inode(ip) && !S_ISREG(inode->i_mode))) {
xfs_warn(ip->i_mount,
"corrupt dinode %llu, wrong file type for reflink.",
ip->i_ino);
@@ -115,7 +120,7 @@ xfs_iformat_fork(
return -EFSCORRUPTED;
}
- switch (VFS_I(ip)->i_mode & S_IFMT) {
+ switch (inode->i_mode & S_IFMT) {
case S_IFIFO:
case S_IFCHR:
case S_IFBLK:
@@ -126,7 +131,7 @@ xfs_iformat_fork(
return -EFSCORRUPTED;
}
ip->i_d.di_size = 0;
- ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
+ inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
break;
case S_IFREG:
@@ -184,8 +189,7 @@ xfs_iformat_fork(
return error;
/* Check inline dir contents. */
- if (S_ISDIR(VFS_I(ip)->i_mode) &&
- dip->di_format == XFS_DINODE_FMT_LOCAL) {
+ if (S_ISDIR(inode->i_mode) && dip->di_format == XFS_DINODE_FMT_LOCAL) {
error = xfs_dir2_sf_verify(ip);
if (error) {
xfs_idestroy_fork(ip, XFS_DATA_FORK);
@@ -265,19 +269,14 @@ xfs_init_local_fork(
if (zero_terminate)
mem_size++;
- if (size == 0)
- ifp->if_u1.if_data = NULL;
- else if (mem_size <= sizeof(ifp->if_u2.if_inline_data))
- ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
- else {
+ if (size) {
real_size = roundup(mem_size, 4);
ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
- }
-
- if (size) {
memcpy(ifp->if_u1.if_data, data, size);
if (zero_terminate)
ifp->if_u1.if_data[size] = '\0';
+ } else {
+ ifp->if_u1.if_data = NULL;
}
ifp->if_bytes = size;
@@ -288,13 +287,6 @@ xfs_init_local_fork(
/*
* The file is in-lined in the on-disk inode.
- * If it fits into if_inline_data, then copy
- * it there, otherwise allocate a buffer for it
- * and copy the data there. Either way, set
- * if_data to point at the data.
- * If we allocate a buffer for the data, make
- * sure that its size is a multiple of 4 and
- * record the real size in i_real_bytes.
*/
STATIC int
xfs_iformat_local(
@@ -324,9 +316,7 @@ xfs_iformat_local(
/*
* The file consists of a set of extents all of which fit into the on-disk
- * inode. If there are few enough extents to fit into the if_inline_ext, then
- * copy them there. Otherwise allocate a buffer for them and copy them into it.
- * Either way, set if_extents to point at the extents.
+ * inode.
*/
STATIC int
xfs_iformat_extents(
@@ -336,9 +326,12 @@ xfs_iformat_extents(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ int state = xfs_bmap_fork_to_state(whichfork);
int nex = XFS_DFORK_NEXTENTS(dip, whichfork);
int size = nex * sizeof(xfs_bmbt_rec_t);
+ struct xfs_iext_cursor icur;
struct xfs_bmbt_rec *dp;
+ struct xfs_bmbt_irec new;
int i;
/*
@@ -354,27 +347,25 @@ xfs_iformat_extents(
}
ifp->if_real_bytes = 0;
- if (nex == 0)
- ifp->if_u1.if_extents = NULL;
- else if (nex <= XFS_INLINE_EXTS)
- ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
- else
- xfs_iext_add(ifp, 0, nex);
-
- ifp->if_bytes = size;
+ ifp->if_bytes = 0;
+ ifp->if_u1.if_root = NULL;
+ ifp->if_height = 0;
if (size) {
dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
+
+ xfs_iext_first(ifp, &icur);
for (i = 0; i < nex; i++, dp++) {
- xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
- ep->l0 = get_unaligned_be64(&dp->l0);
- ep->l1 = get_unaligned_be64(&dp->l1);
- if (!xfs_bmbt_validate_extent(mp, whichfork, ep)) {
+ xfs_bmbt_disk_get_all(dp, &new);
+ if (!xfs_bmbt_validate_extent(mp, whichfork, &new)) {
XFS_ERROR_REPORT("xfs_iformat_extents(2)",
XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
+
+ xfs_iext_insert(ip, &icur, &new, state);
+ trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
+ xfs_iext_next(ifp, &icur);
}
- XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
}
ifp->if_flags |= XFS_IFEXTENTS;
return 0;
@@ -440,47 +431,14 @@ xfs_iformat_btree(
ifp->if_flags &= ~XFS_IFEXTENTS;
ifp->if_flags |= XFS_IFBROOT;
+ ifp->if_real_bytes = 0;
+ ifp->if_bytes = 0;
+ ifp->if_u1.if_root = NULL;
+ ifp->if_height = 0;
return 0;
}
/*
- * Read in extents from a btree-format inode.
- * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
- */
-int
-xfs_iread_extents(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- int whichfork)
-{
- int error;
- xfs_ifork_t *ifp;
- xfs_extnum_t nextents;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
- XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
- ip->i_mount);
- return -EFSCORRUPTED;
- }
- nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
- ifp = XFS_IFORK_PTR(ip, whichfork);
-
- /*
- * We know that the size is valid (it's checked in iformat_btree)
- */
- ifp->if_bytes = ifp->if_real_bytes = 0;
- xfs_iext_add(ifp, 0, nextents);
- error = xfs_bmap_read_extents(tp, ip, whichfork);
- if (error) {
- xfs_iext_destroy(ifp);
- return error;
- }
- ifp->if_flags |= XFS_IFEXTENTS;
- return 0;
-}
-/*
* Reallocate the space for if_broot based on the number of records
* being added or deleted as indicated in rec_diff. Move the records
* and pointers in if_broot to fit the new size. When shrinking this
@@ -644,26 +602,9 @@ xfs_idata_realloc(
ASSERT(new_size >= 0);
if (new_size == 0) {
- if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
- kmem_free(ifp->if_u1.if_data);
- }
+ kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
real_size = 0;
- } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
- /*
- * If the valid extents/data can fit in if_inline_ext/data,
- * copy them from the malloc'd vector and free it.
- */
- if (ifp->if_u1.if_data == NULL) {
- ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
- } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
- ASSERT(ifp->if_real_bytes != 0);
- memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
- new_size);
- kmem_free(ifp->if_u1.if_data);
- ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
- }
- real_size = 0;
} else {
/*
* Stuck with malloc/realloc.
@@ -677,7 +618,7 @@ xfs_idata_realloc(
ASSERT(ifp->if_real_bytes == 0);
ifp->if_u1.if_data = kmem_alloc(real_size,
KM_SLEEP | KM_NOFS);
- } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
+ } else {
/*
* Only do the realloc if the underlying size
* is really changing.
@@ -688,12 +629,6 @@ xfs_idata_realloc(
real_size,
KM_SLEEP | KM_NOFS);
}
- } else {
- ASSERT(ifp->if_real_bytes == 0);
- ifp->if_u1.if_data = kmem_alloc(real_size,
- KM_SLEEP | KM_NOFS);
- memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
- ifp->if_bytes);
}
}
ifp->if_real_bytes = real_size;
@@ -721,23 +656,18 @@ xfs_idestroy_fork(
* so check and free it up if we do.
*/
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
- if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
- (ifp->if_u1.if_data != NULL)) {
+ if (ifp->if_u1.if_data != NULL) {
ASSERT(ifp->if_real_bytes != 0);
kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
ifp->if_real_bytes = 0;
}
- } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
- ((ifp->if_flags & XFS_IFEXTIREC) ||
- ((ifp->if_u1.if_extents != NULL) &&
- (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
- ASSERT(ifp->if_real_bytes != 0);
+ } else if ((ifp->if_flags & XFS_IFEXTENTS) && ifp->if_height) {
xfs_iext_destroy(ifp);
}
- ASSERT(ifp->if_u1.if_extents == NULL ||
- ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
+
ASSERT(ifp->if_real_bytes == 0);
+
if (whichfork == XFS_ATTR_FORK) {
kmem_zone_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
@@ -747,19 +677,9 @@ xfs_idestroy_fork(
}
}
-/* Count number of incore extents based on if_bytes */
-xfs_extnum_t
-xfs_iext_count(struct xfs_ifork *ifp)
-{
- return ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-}
-
/*
* Convert in-core extents to on-disk form
*
- * For either the data or attr fork in extent format, we need to endian convert
- * the in-core extent as we place them into the on-disk inode.
- *
* In the case of the data fork, the in-core and on-disk fork sizes can be
* different due to delayed allocation extents. We only copy on-disk extents
* here, so callers must always use the physical fork size to determine the
@@ -768,53 +688,32 @@ xfs_iext_count(struct xfs_ifork *ifp)
*/
int
xfs_iextents_copy(
- xfs_inode_t *ip,
- xfs_bmbt_rec_t *dp,
+ struct xfs_inode *ip,
+ struct xfs_bmbt_rec *dp,
int whichfork)
{
- int copied;
- int i;
- xfs_ifork_t *ifp;
- int nrecs;
- xfs_fsblock_t start_block;
+ int state = xfs_bmap_fork_to_state(whichfork);
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec rec;
+ int copied = 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
ASSERT(ifp->if_bytes > 0);
- nrecs = xfs_iext_count(ifp);
- XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
- ASSERT(nrecs > 0);
-
- /*
- * There are some delayed allocation extents in the
- * inode, so copy the extents one at a time and skip
- * the delayed ones. There must be at least one
- * non-delayed extent.
- */
- copied = 0;
- for (i = 0; i < nrecs; i++) {
- xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
-
- ASSERT(xfs_bmbt_validate_extent(ip->i_mount, whichfork, ep));
-
- start_block = xfs_bmbt_get_startblock(ep);
- if (isnullstartblock(start_block)) {
- /*
- * It's a delayed allocation extent, so skip it.
- */
+ for_each_xfs_iext(ifp, &icur, &rec) {
+ if (isnullstartblock(rec.br_startblock))
continue;
- }
-
- /* Translate to on disk format */
- put_unaligned_be64(ep->l0, &dp->l0);
- put_unaligned_be64(ep->l1, &dp->l1);
+ ASSERT(xfs_bmbt_validate_extent(ip->i_mount, whichfork, &rec));
+ xfs_bmbt_disk_set_all(dp, &rec);
+ trace_xfs_write_extent(ip, &icur, state, _RET_IP_);
+ copied += sizeof(struct xfs_bmbt_rec);
dp++;
- copied++;
}
- ASSERT(copied != 0);
- return (copied * (uint)sizeof(xfs_bmbt_rec_t));
+ ASSERT(copied > 0);
+ ASSERT(copied <= ifp->if_bytes);
+ return copied;
}
/*
@@ -872,7 +771,6 @@ xfs_iflush_fork(
!(iip->ili_fields & extflag[whichfork]));
if ((iip->ili_fields & extflag[whichfork]) &&
(ifp->if_bytes > 0)) {
- ASSERT(xfs_iext_get_ext(ifp, 0));
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
whichfork);
@@ -894,16 +792,7 @@ xfs_iflush_fork(
case XFS_DINODE_FMT_DEV:
if (iip->ili_fields & XFS_ILOG_DEV) {
ASSERT(whichfork == XFS_DATA_FORK);
- xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
- }
- break;
-
- case XFS_DINODE_FMT_UUID:
- if (iip->ili_fields & XFS_ILOG_UUID) {
- ASSERT(whichfork == XFS_DATA_FORK);
- memcpy(XFS_DFORK_DPTR(dip),
- &ip->i_df.if_u2.if_uuid,
- sizeof(uuid_t));
+ xfs_dinode_put_rdev(dip, sysv_encode_dev(VFS_I(ip)->i_rdev));
}
break;
@@ -913,33 +802,6 @@ xfs_iflush_fork(
}
}
-/*
- * Return a pointer to the extent record at file index idx.
- */
-xfs_bmbt_rec_host_t *
-xfs_iext_get_ext(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_extnum_t idx) /* index of target extent */
-{
- ASSERT(idx >= 0);
- ASSERT(idx < xfs_iext_count(ifp));
-
- if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
- return ifp->if_u1.if_ext_irec->er_extbuf;
- } else if (ifp->if_flags & XFS_IFEXTIREC) {
- xfs_ext_irec_t *erp; /* irec pointer */
- int erp_idx = 0; /* irec index */
- xfs_extnum_t page_idx = idx; /* ext index in target list */
-
- erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
- return &erp->er_extbuf[page_idx];
- } else if (ifp->if_bytes) {
- return &ifp->if_u1.if_extents[idx];
- } else {
- return NULL;
- }
-}
-
/* Convert bmap state flags to an inode fork. */
struct xfs_ifork *
xfs_iext_state_to_fork(
@@ -954,1011 +816,6 @@ xfs_iext_state_to_fork(
}
/*
- * Insert new item(s) into the extent records for incore inode
- * fork 'ifp'. 'count' new items are inserted at index 'idx'.
- */
-void
-xfs_iext_insert(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* starting index of new items */
- xfs_extnum_t count, /* number of inserted items */
- xfs_bmbt_irec_t *new, /* items to insert */
- int state) /* type of extent conversion */
-{
- xfs_ifork_t *ifp = xfs_iext_state_to_fork(ip, state);
- xfs_extnum_t i; /* extent record index */
-
- trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
-
- ASSERT(ifp->if_flags & XFS_IFEXTENTS);
- xfs_iext_add(ifp, idx, count);
- for (i = idx; i < idx + count; i++, new++)
- xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
-}
-
-/*
- * This is called when the amount of space required for incore file
- * extents needs to be increased. The ext_diff parameter stores the
- * number of new extents being added and the idx parameter contains
- * the extent index where the new extents will be added. If the new
- * extents are being appended, then we just need to (re)allocate and
- * initialize the space. Otherwise, if the new extents are being
- * inserted into the middle of the existing entries, a bit more work
- * is required to make room for the new extents to be inserted. The
- * caller is responsible for filling in the new extent entries upon
- * return.
- */
-void
-xfs_iext_add(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_extnum_t idx, /* index to begin adding exts */
- int ext_diff) /* number of extents to add */
-{
- int byte_diff; /* new bytes being added */
- int new_size; /* size of extents after adding */
- xfs_extnum_t nextents; /* number of extents in file */
-
- nextents = xfs_iext_count(ifp);
- ASSERT((idx >= 0) && (idx <= nextents));
- byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
- new_size = ifp->if_bytes + byte_diff;
- /*
- * If the new number of extents (nextents + ext_diff)
- * fits inside the inode, then continue to use the inline
- * extent buffer.
- */
- if (nextents + ext_diff <= XFS_INLINE_EXTS) {
- if (idx < nextents) {
- memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
- &ifp->if_u2.if_inline_ext[idx],
- (nextents - idx) * sizeof(xfs_bmbt_rec_t));
- memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
- }
- ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
- ifp->if_real_bytes = 0;
- }
- /*
- * Otherwise use a linear (direct) extent list.
- * If the extents are currently inside the inode,
- * xfs_iext_realloc_direct will switch us from
- * inline to direct extent allocation mode.
- */
- else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
- xfs_iext_realloc_direct(ifp, new_size);
- if (idx < nextents) {
- memmove(&ifp->if_u1.if_extents[idx + ext_diff],
- &ifp->if_u1.if_extents[idx],
- (nextents - idx) * sizeof(xfs_bmbt_rec_t));
- memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
- }
- }
- /* Indirection array */
- else {
- xfs_ext_irec_t *erp;
- int erp_idx = 0;
- int page_idx = idx;
-
- ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
- if (ifp->if_flags & XFS_IFEXTIREC) {
- erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
- } else {
- xfs_iext_irec_init(ifp);
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- erp = ifp->if_u1.if_ext_irec;
- }
- /* Extents fit in target extent page */
- if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
- if (page_idx < erp->er_extcount) {
- memmove(&erp->er_extbuf[page_idx + ext_diff],
- &erp->er_extbuf[page_idx],
- (erp->er_extcount - page_idx) *
- sizeof(xfs_bmbt_rec_t));
- memset(&erp->er_extbuf[page_idx], 0, byte_diff);
- }
- erp->er_extcount += ext_diff;
- xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
- }
- /* Insert a new extent page */
- else if (erp) {
- xfs_iext_add_indirect_multi(ifp,
- erp_idx, page_idx, ext_diff);
- }
- /*
- * If extent(s) are being appended to the last page in
- * the indirection array and the new extent(s) don't fit
- * in the page, then erp is NULL and erp_idx is set to
- * the next index needed in the indirection array.
- */
- else {
- uint count = ext_diff;
-
- while (count) {
- erp = xfs_iext_irec_new(ifp, erp_idx);
- erp->er_extcount = min(count, XFS_LINEAR_EXTS);
- count -= erp->er_extcount;
- if (count)
- erp_idx++;
- }
- }
- }
- ifp->if_bytes = new_size;
-}
-
-/*
- * This is called when incore extents are being added to the indirection
- * array and the new extents do not fit in the target extent list. The
- * erp_idx parameter contains the irec index for the target extent list
- * in the indirection array, and the idx parameter contains the extent
- * index within the list. The number of extents being added is stored
- * in the count parameter.
- *
- * |-------| |-------|
- * | | | | idx - number of extents before idx
- * | idx | | count |
- * | | | | count - number of extents being inserted at idx
- * |-------| |-------|
- * | count | | nex2 | nex2 - number of extents after idx + count
- * |-------| |-------|
- */
-void
-xfs_iext_add_indirect_multi(
- xfs_ifork_t *ifp, /* inode fork pointer */
- int erp_idx, /* target extent irec index */
- xfs_extnum_t idx, /* index within target list */
- int count) /* new extents being added */
-{
- int byte_diff; /* new bytes being added */
- xfs_ext_irec_t *erp; /* pointer to irec entry */
- xfs_extnum_t ext_diff; /* number of extents to add */
- xfs_extnum_t ext_cnt; /* new extents still needed */
- xfs_extnum_t nex2; /* extents after idx + count */
- xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
- int nlists; /* number of irec's (lists) */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- erp = &ifp->if_u1.if_ext_irec[erp_idx];
- nex2 = erp->er_extcount - idx;
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-
- /*
- * Save second part of target extent list
- * (all extents past */
- if (nex2) {
- byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
- nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
- memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
- erp->er_extcount -= nex2;
- xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
- memset(&erp->er_extbuf[idx], 0, byte_diff);
- }
-
- /*
- * Add the new extents to the end of the target
- * list, then allocate new irec record(s) and
- * extent buffer(s) as needed to store the rest
- * of the new extents.
- */
- ext_cnt = count;
- ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
- if (ext_diff) {
- erp->er_extcount += ext_diff;
- xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
- ext_cnt -= ext_diff;
- }
- while (ext_cnt) {
- erp_idx++;
- erp = xfs_iext_irec_new(ifp, erp_idx);
- ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
- erp->er_extcount = ext_diff;
- xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
- ext_cnt -= ext_diff;
- }
-
- /* Add nex2 extents back to indirection array */
- if (nex2) {
- xfs_extnum_t ext_avail;
- int i;
-
- byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
- ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
- i = 0;
- /*
- * If nex2 extents fit in the current page, append
- * nex2_ep after the new extents.
- */
- if (nex2 <= ext_avail) {
- i = erp->er_extcount;
- }
- /*
- * Otherwise, check if space is available in the
- * next page.
- */
- else if ((erp_idx < nlists - 1) &&
- (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
- ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
- erp_idx++;
- erp++;
- /* Create a hole for nex2 extents */
- memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
- erp->er_extcount * sizeof(xfs_bmbt_rec_t));
- }
- /*
- * Final choice, create a new extent page for
- * nex2 extents.
- */
- else {
- erp_idx++;
- erp = xfs_iext_irec_new(ifp, erp_idx);
- }
- memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
- kmem_free(nex2_ep);
- erp->er_extcount += nex2;
- xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
- }
-}
-
-/*
- * This is called when the amount of space required for incore file
- * extents needs to be decreased. The ext_diff parameter stores the
- * number of extents to be removed and the idx parameter contains
- * the extent index where the extents will be removed from.
- *
- * If the amount of space needed has decreased below the linear
- * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
- * extent array. Otherwise, use kmem_realloc() to adjust the
- * size to what is needed.
- */
-void
-xfs_iext_remove(
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index to begin removing exts */
- int ext_diff, /* number of extents to remove */
- int state) /* type of extent conversion */
-{
- xfs_ifork_t *ifp = xfs_iext_state_to_fork(ip, state);
- xfs_extnum_t nextents; /* number of extents in file */
- int new_size; /* size of extents after removal */
-
- trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
-
- ASSERT(ext_diff > 0);
- nextents = xfs_iext_count(ifp);
- new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
-
- if (new_size == 0) {
- xfs_iext_destroy(ifp);
- } else if (ifp->if_flags & XFS_IFEXTIREC) {
- xfs_iext_remove_indirect(ifp, idx, ext_diff);
- } else if (ifp->if_real_bytes) {
- xfs_iext_remove_direct(ifp, idx, ext_diff);
- } else {
- xfs_iext_remove_inline(ifp, idx, ext_diff);
- }
- ifp->if_bytes = new_size;
-}
-
-/*
- * This removes ext_diff extents from the inline buffer, beginning
- * at extent index idx.
- */
-void
-xfs_iext_remove_inline(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_extnum_t idx, /* index to begin removing exts */
- int ext_diff) /* number of extents to remove */
-{
- int nextents; /* number of extents in file */
-
- ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
- ASSERT(idx < XFS_INLINE_EXTS);
- nextents = xfs_iext_count(ifp);
- ASSERT(((nextents - ext_diff) > 0) &&
- (nextents - ext_diff) < XFS_INLINE_EXTS);
-
- if (idx + ext_diff < nextents) {
- memmove(&ifp->if_u2.if_inline_ext[idx],
- &ifp->if_u2.if_inline_ext[idx + ext_diff],
- (nextents - (idx + ext_diff)) *
- sizeof(xfs_bmbt_rec_t));
- memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
- 0, ext_diff * sizeof(xfs_bmbt_rec_t));
- } else {
- memset(&ifp->if_u2.if_inline_ext[idx], 0,
- ext_diff * sizeof(xfs_bmbt_rec_t));
- }
-}
-
-/*
- * This removes ext_diff extents from a linear (direct) extent list,
- * beginning at extent index idx. If the extents are being removed
- * from the end of the list (ie. truncate) then we just need to re-
- * allocate the list to remove the extra space. Otherwise, if the
- * extents are being removed from the middle of the existing extent
- * entries, then we first need to move the extent records beginning
- * at idx + ext_diff up in the list to overwrite the records being
- * removed, then remove the extra space via kmem_realloc.
- */
-void
-xfs_iext_remove_direct(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_extnum_t idx, /* index to begin removing exts */
- int ext_diff) /* number of extents to remove */
-{
- xfs_extnum_t nextents; /* number of extents in file */
- int new_size; /* size of extents after removal */
-
- ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
- new_size = ifp->if_bytes -
- (ext_diff * sizeof(xfs_bmbt_rec_t));
- nextents = xfs_iext_count(ifp);
-
- if (new_size == 0) {
- xfs_iext_destroy(ifp);
- return;
- }
- /* Move extents up in the list (if needed) */
- if (idx + ext_diff < nextents) {
- memmove(&ifp->if_u1.if_extents[idx],
- &ifp->if_u1.if_extents[idx + ext_diff],
- (nextents - (idx + ext_diff)) *
- sizeof(xfs_bmbt_rec_t));
- }
- memset(&ifp->if_u1.if_extents[nextents - ext_diff],
- 0, ext_diff * sizeof(xfs_bmbt_rec_t));
- /*
- * Reallocate the direct extent list. If the extents
- * will fit inside the inode then xfs_iext_realloc_direct
- * will switch from direct to inline extent allocation
- * mode for us.
- */
- xfs_iext_realloc_direct(ifp, new_size);
- ifp->if_bytes = new_size;
-}
-
-/*
- * This is called when incore extents are being removed from the
- * indirection array and the extents being removed span multiple extent
- * buffers. The idx parameter contains the file extent index where we
- * want to begin removing extents, and the count parameter contains
- * how many extents need to be removed.
- *
- * |-------| |-------|
- * | nex1 | | | nex1 - number of extents before idx
- * |-------| | count |
- * | | | | count - number of extents being removed at idx
- * | count | |-------|
- * | | | nex2 | nex2 - number of extents after idx + count
- * |-------| |-------|
- */
-void
-xfs_iext_remove_indirect(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_extnum_t idx, /* index to begin removing extents */
- int count) /* number of extents to remove */
-{
- xfs_ext_irec_t *erp; /* indirection array pointer */
- int erp_idx = 0; /* indirection array index */
- xfs_extnum_t ext_cnt; /* extents left to remove */
- xfs_extnum_t ext_diff; /* extents to remove in current list */
- xfs_extnum_t nex1; /* number of extents before idx */
- xfs_extnum_t nex2; /* extents after idx + count */
- int page_idx = idx; /* index in target extent list */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
- ASSERT(erp != NULL);
- nex1 = page_idx;
- ext_cnt = count;
- while (ext_cnt) {
- nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
- ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
- /*
- * Check for deletion of entire list;
- * xfs_iext_irec_remove() updates extent offsets.
- */
- if (ext_diff == erp->er_extcount) {
- xfs_iext_irec_remove(ifp, erp_idx);
- ext_cnt -= ext_diff;
- nex1 = 0;
- if (ext_cnt) {
- ASSERT(erp_idx < ifp->if_real_bytes /
- XFS_IEXT_BUFSZ);
- erp = &ifp->if_u1.if_ext_irec[erp_idx];
- nex1 = 0;
- continue;
- } else {
- break;
- }
- }
- /* Move extents up (if needed) */
- if (nex2) {
- memmove(&erp->er_extbuf[nex1],
- &erp->er_extbuf[nex1 + ext_diff],
- nex2 * sizeof(xfs_bmbt_rec_t));
- }
- /* Zero out rest of page */
- memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
- ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
- /* Update remaining counters */
- erp->er_extcount -= ext_diff;
- xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
- ext_cnt -= ext_diff;
- nex1 = 0;
- erp_idx++;
- erp++;
- }
- ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
- xfs_iext_irec_compact(ifp);
-}
-
-/*
- * Create, destroy, or resize a linear (direct) block of extents.
- */
-void
-xfs_iext_realloc_direct(
- xfs_ifork_t *ifp, /* inode fork pointer */
- int new_size) /* new size of extents after adding */
-{
- int rnew_size; /* real new size of extents */
-
- rnew_size = new_size;
-
- ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
- ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
- (new_size != ifp->if_real_bytes)));
-
- /* Free extent records */
- if (new_size == 0) {
- xfs_iext_destroy(ifp);
- }
- /* Resize direct extent list and zero any new bytes */
- else if (ifp->if_real_bytes) {
- /* Check if extents will fit inside the inode */
- if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
- xfs_iext_direct_to_inline(ifp, new_size /
- (uint)sizeof(xfs_bmbt_rec_t));
- ifp->if_bytes = new_size;
- return;
- }
- if (!is_power_of_2(new_size)){
- rnew_size = roundup_pow_of_two(new_size);
- }
- if (rnew_size != ifp->if_real_bytes) {
- ifp->if_u1.if_extents =
- kmem_realloc(ifp->if_u1.if_extents,
- rnew_size, KM_NOFS);
- }
- if (rnew_size > ifp->if_real_bytes) {
- memset(&ifp->if_u1.if_extents[ifp->if_bytes /
- (uint)sizeof(xfs_bmbt_rec_t)], 0,
- rnew_size - ifp->if_real_bytes);
- }
- }
- /* Switch from the inline extent buffer to a direct extent list */
- else {
- if (!is_power_of_2(new_size)) {
- rnew_size = roundup_pow_of_two(new_size);
- }
- xfs_iext_inline_to_direct(ifp, rnew_size);
- }
- ifp->if_real_bytes = rnew_size;
- ifp->if_bytes = new_size;
-}
-
-/*
- * Switch from linear (direct) extent records to inline buffer.
- */
-void
-xfs_iext_direct_to_inline(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_extnum_t nextents) /* number of extents in file */
-{
- ASSERT(ifp->if_flags & XFS_IFEXTENTS);
- ASSERT(nextents <= XFS_INLINE_EXTS);
- /*
- * The inline buffer was zeroed when we switched
- * from inline to direct extent allocation mode,
- * so we don't need to clear it here.
- */
- memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
- nextents * sizeof(xfs_bmbt_rec_t));
- kmem_free(ifp->if_u1.if_extents);
- ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
- ifp->if_real_bytes = 0;
-}
-
-/*
- * Switch from inline buffer to linear (direct) extent records.
- * new_size should already be rounded up to the next power of 2
- * by the caller (when appropriate), so use new_size as it is.
- * However, since new_size may be rounded up, we can't update
- * if_bytes here. It is the caller's responsibility to update
- * if_bytes upon return.
- */
-void
-xfs_iext_inline_to_direct(
- xfs_ifork_t *ifp, /* inode fork pointer */
- int new_size) /* number of extents in file */
-{
- ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
- memset(ifp->if_u1.if_extents, 0, new_size);
- if (ifp->if_bytes) {
- memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
- ifp->if_bytes);
- memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
- sizeof(xfs_bmbt_rec_t));
- }
- ifp->if_real_bytes = new_size;
-}
-
-/*
- * Resize an extent indirection array to new_size bytes.
- */
-STATIC void
-xfs_iext_realloc_indirect(
- xfs_ifork_t *ifp, /* inode fork pointer */
- int new_size) /* new indirection array size */
-{
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- ASSERT(ifp->if_real_bytes);
- ASSERT((new_size >= 0) &&
- (new_size != ((ifp->if_real_bytes / XFS_IEXT_BUFSZ) *
- sizeof(xfs_ext_irec_t))));
- if (new_size == 0) {
- xfs_iext_destroy(ifp);
- } else {
- ifp->if_u1.if_ext_irec =
- kmem_realloc(ifp->if_u1.if_ext_irec, new_size, KM_NOFS);
- }
-}
-
-/*
- * Switch from indirection array to linear (direct) extent allocations.
- */
-STATIC void
-xfs_iext_indirect_to_direct(
- xfs_ifork_t *ifp) /* inode fork pointer */
-{
- xfs_bmbt_rec_host_t *ep; /* extent record pointer */
- xfs_extnum_t nextents; /* number of extents in file */
- int size; /* size of file extents */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nextents = xfs_iext_count(ifp);
- ASSERT(nextents <= XFS_LINEAR_EXTS);
- size = nextents * sizeof(xfs_bmbt_rec_t);
-
- xfs_iext_irec_compact_pages(ifp);
- ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
-
- ep = ifp->if_u1.if_ext_irec->er_extbuf;
- kmem_free(ifp->if_u1.if_ext_irec);
- ifp->if_flags &= ~XFS_IFEXTIREC;
- ifp->if_u1.if_extents = ep;
- ifp->if_bytes = size;
- if (nextents < XFS_LINEAR_EXTS) {
- xfs_iext_realloc_direct(ifp, size);
- }
-}
-
-/*
- * Remove all records from the indirection array.
- */
-STATIC void
-xfs_iext_irec_remove_all(
- struct xfs_ifork *ifp)
-{
- int nlists;
- int i;
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- for (i = 0; i < nlists; i++)
- kmem_free(ifp->if_u1.if_ext_irec[i].er_extbuf);
- kmem_free(ifp->if_u1.if_ext_irec);
- ifp->if_flags &= ~XFS_IFEXTIREC;
-}
-
-/*
- * Free incore file extents.
- */
-void
-xfs_iext_destroy(
- xfs_ifork_t *ifp) /* inode fork pointer */
-{
- if (ifp->if_flags & XFS_IFEXTIREC) {
- xfs_iext_irec_remove_all(ifp);
- } else if (ifp->if_real_bytes) {
- kmem_free(ifp->if_u1.if_extents);
- } else if (ifp->if_bytes) {
- memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
- sizeof(xfs_bmbt_rec_t));
- }
- ifp->if_u1.if_extents = NULL;
- ifp->if_real_bytes = 0;
- ifp->if_bytes = 0;
-}
-
-/*
- * Return a pointer to the extent record for file system block bno.
- */
-xfs_bmbt_rec_host_t * /* pointer to found extent record */
-xfs_iext_bno_to_ext(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_fileoff_t bno, /* block number to search for */
- xfs_extnum_t *idxp) /* index of target extent */
-{
- xfs_bmbt_rec_host_t *base; /* pointer to first extent */
- xfs_filblks_t blockcount = 0; /* number of blocks in extent */
- xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
- xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
- int high; /* upper boundary in search */
- xfs_extnum_t idx = 0; /* index of target extent */
- int low; /* lower boundary in search */
- xfs_extnum_t nextents; /* number of file extents */
- xfs_fileoff_t startoff = 0; /* start offset of extent */
-
- nextents = xfs_iext_count(ifp);
- if (nextents == 0) {
- *idxp = 0;
- return NULL;
- }
- low = 0;
- if (ifp->if_flags & XFS_IFEXTIREC) {
- /* Find target extent list */
- int erp_idx = 0;
- erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
- base = erp->er_extbuf;
- high = erp->er_extcount - 1;
- } else {
- base = ifp->if_u1.if_extents;
- high = nextents - 1;
- }
- /* Binary search extent records */
- while (low <= high) {
- idx = (low + high) >> 1;
- ep = base + idx;
- startoff = xfs_bmbt_get_startoff(ep);
- blockcount = xfs_bmbt_get_blockcount(ep);
- if (bno < startoff) {
- high = idx - 1;
- } else if (bno >= startoff + blockcount) {
- low = idx + 1;
- } else {
- /* Convert back to file-based extent index */
- if (ifp->if_flags & XFS_IFEXTIREC) {
- idx += erp->er_extoff;
- }
- *idxp = idx;
- return ep;
- }
- }
- /* Convert back to file-based extent index */
- if (ifp->if_flags & XFS_IFEXTIREC) {
- idx += erp->er_extoff;
- }
- if (bno >= startoff + blockcount) {
- if (++idx == nextents) {
- ep = NULL;
- } else {
- ep = xfs_iext_get_ext(ifp, idx);
- }
- }
- *idxp = idx;
- return ep;
-}
-
-/*
- * Return a pointer to the indirection array entry containing the
- * extent record for filesystem block bno. Store the index of the
- * target irec in *erp_idxp.
- */
-xfs_ext_irec_t * /* pointer to found extent record */
-xfs_iext_bno_to_irec(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_fileoff_t bno, /* block number to search for */
- int *erp_idxp) /* irec index of target ext list */
-{
- xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
- xfs_ext_irec_t *erp_next; /* next indirection array entry */
- int erp_idx; /* indirection array index */
- int nlists; /* number of extent irec's (lists) */
- int high; /* binary search upper limit */
- int low; /* binary search lower limit */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- erp_idx = 0;
- low = 0;
- high = nlists - 1;
- while (low <= high) {
- erp_idx = (low + high) >> 1;
- erp = &ifp->if_u1.if_ext_irec[erp_idx];
- erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
- if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
- high = erp_idx - 1;
- } else if (erp_next && bno >=
- xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
- low = erp_idx + 1;
- } else {
- break;
- }
- }
- *erp_idxp = erp_idx;
- return erp;
-}
-
-/*
- * Return a pointer to the indirection array entry containing the
- * extent record at file extent index *idxp. Store the index of the
- * target irec in *erp_idxp and store the page index of the target
- * extent record in *idxp.
- */
-xfs_ext_irec_t *
-xfs_iext_idx_to_irec(
- xfs_ifork_t *ifp, /* inode fork pointer */
- xfs_extnum_t *idxp, /* extent index (file -> page) */
- int *erp_idxp, /* pointer to target irec */
- int realloc) /* new bytes were just added */
-{
- xfs_ext_irec_t *prev; /* pointer to previous irec */
- xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
- int erp_idx; /* indirection array index */
- int nlists; /* number of irec's (ex lists) */
- int high; /* binary search upper limit */
- int low; /* binary search lower limit */
- xfs_extnum_t page_idx = *idxp; /* extent index in target list */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- ASSERT(page_idx >= 0);
- ASSERT(page_idx <= xfs_iext_count(ifp));
- ASSERT(page_idx < xfs_iext_count(ifp) || realloc);
-
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- erp_idx = 0;
- low = 0;
- high = nlists - 1;
-
- /* Binary search extent irec's */
- while (low <= high) {
- erp_idx = (low + high) >> 1;
- erp = &ifp->if_u1.if_ext_irec[erp_idx];
- prev = erp_idx > 0 ? erp - 1 : NULL;
- if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
- realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
- high = erp_idx - 1;
- } else if (page_idx > erp->er_extoff + erp->er_extcount ||
- (page_idx == erp->er_extoff + erp->er_extcount &&
- !realloc)) {
- low = erp_idx + 1;
- } else if (page_idx == erp->er_extoff + erp->er_extcount &&
- erp->er_extcount == XFS_LINEAR_EXTS) {
- ASSERT(realloc);
- page_idx = 0;
- erp_idx++;
- erp = erp_idx < nlists ? erp + 1 : NULL;
- break;
- } else {
- page_idx -= erp->er_extoff;
- break;
- }
- }
- *idxp = page_idx;
- *erp_idxp = erp_idx;
- return erp;
-}
-
-/*
- * Allocate and initialize an indirection array once the space needed
- * for incore extents increases above XFS_IEXT_BUFSZ.
- */
-void
-xfs_iext_irec_init(
- xfs_ifork_t *ifp) /* inode fork pointer */
-{
- xfs_ext_irec_t *erp; /* indirection array pointer */
- xfs_extnum_t nextents; /* number of extents in file */
-
- ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
- nextents = xfs_iext_count(ifp);
- ASSERT(nextents <= XFS_LINEAR_EXTS);
-
- erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
-
- if (nextents == 0) {
- ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
- } else if (!ifp->if_real_bytes) {
- xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
- } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
- xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
- }
- erp->er_extbuf = ifp->if_u1.if_extents;
- erp->er_extcount = nextents;
- erp->er_extoff = 0;
-
- ifp->if_flags |= XFS_IFEXTIREC;
- ifp->if_real_bytes = XFS_IEXT_BUFSZ;
- ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
- ifp->if_u1.if_ext_irec = erp;
-
- return;
-}
-
-/*
- * Allocate and initialize a new entry in the indirection array.
- */
-xfs_ext_irec_t *
-xfs_iext_irec_new(
- xfs_ifork_t *ifp, /* inode fork pointer */
- int erp_idx) /* index for new irec */
-{
- xfs_ext_irec_t *erp; /* indirection array pointer */
- int i; /* loop counter */
- int nlists; /* number of irec's (ex lists) */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-
- /* Resize indirection array */
- xfs_iext_realloc_indirect(ifp, ++nlists *
- sizeof(xfs_ext_irec_t));
- /*
- * Move records down in the array so the
- * new page can use erp_idx.
- */
- erp = ifp->if_u1.if_ext_irec;
- for (i = nlists - 1; i > erp_idx; i--) {
- memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
- }
- ASSERT(i == erp_idx);
-
- /* Initialize new extent record */
- erp = ifp->if_u1.if_ext_irec;
- erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
- ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
- memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
- erp[erp_idx].er_extcount = 0;
- erp[erp_idx].er_extoff = erp_idx > 0 ?
- erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
- return (&erp[erp_idx]);
-}
-
-/*
- * Remove a record from the indirection array.
- */
-void
-xfs_iext_irec_remove(
- xfs_ifork_t *ifp, /* inode fork pointer */
- int erp_idx) /* irec index to remove */
-{
- xfs_ext_irec_t *erp; /* indirection array pointer */
- int i; /* loop counter */
- int nlists; /* number of irec's (ex lists) */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- erp = &ifp->if_u1.if_ext_irec[erp_idx];
- if (erp->er_extbuf) {
- xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
- -erp->er_extcount);
- kmem_free(erp->er_extbuf);
- }
- /* Compact extent records */
- erp = ifp->if_u1.if_ext_irec;
- for (i = erp_idx; i < nlists - 1; i++) {
- memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
- }
- /*
- * Manually free the last extent record from the indirection
- * array. A call to xfs_iext_realloc_indirect() with a size
- * of zero would result in a call to xfs_iext_destroy() which
- * would in turn call this function again, creating a nasty
- * infinite loop.
- */
- if (--nlists) {
- xfs_iext_realloc_indirect(ifp,
- nlists * sizeof(xfs_ext_irec_t));
- } else {
- kmem_free(ifp->if_u1.if_ext_irec);
- }
- ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
-}
-
-/*
- * This is called to clean up large amounts of unused memory allocated
- * by the indirection array. Before compacting anything though, verify
- * that the indirection array is still needed and switch back to the
- * linear extent list (or even the inline buffer) if possible. The
- * compaction policy is as follows:
- *
- * Full Compaction: Extents fit into a single page (or inline buffer)
- * Partial Compaction: Extents occupy less than 50% of allocated space
- * No Compaction: Extents occupy at least 50% of allocated space
- */
-void
-xfs_iext_irec_compact(
- xfs_ifork_t *ifp) /* inode fork pointer */
-{
- xfs_extnum_t nextents; /* number of extents in file */
- int nlists; /* number of irec's (ex lists) */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- nextents = xfs_iext_count(ifp);
-
- if (nextents == 0) {
- xfs_iext_destroy(ifp);
- } else if (nextents <= XFS_INLINE_EXTS) {
- xfs_iext_indirect_to_direct(ifp);
- xfs_iext_direct_to_inline(ifp, nextents);
- } else if (nextents <= XFS_LINEAR_EXTS) {
- xfs_iext_indirect_to_direct(ifp);
- } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
- xfs_iext_irec_compact_pages(ifp);
- }
-}
-
-/*
- * Combine extents from neighboring extent pages.
- */
-void
-xfs_iext_irec_compact_pages(
- xfs_ifork_t *ifp) /* inode fork pointer */
-{
- xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
- int erp_idx = 0; /* indirection array index */
- int nlists; /* number of irec's (ex lists) */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- while (erp_idx < nlists - 1) {
- erp = &ifp->if_u1.if_ext_irec[erp_idx];
- erp_next = erp + 1;
- if (erp_next->er_extcount <=
- (XFS_LINEAR_EXTS - erp->er_extcount)) {
- memcpy(&erp->er_extbuf[erp->er_extcount],
- erp_next->er_extbuf, erp_next->er_extcount *
- sizeof(xfs_bmbt_rec_t));
- erp->er_extcount += erp_next->er_extcount;
- /*
- * Free page before removing extent record
- * so er_extoffs don't get modified in
- * xfs_iext_irec_remove.
- */
- kmem_free(erp_next->er_extbuf);
- erp_next->er_extbuf = NULL;
- xfs_iext_irec_remove(ifp, erp_idx + 1);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- } else {
- erp_idx++;
- }
- }
-}
-
-/*
- * This is called to update the er_extoff field in the indirection
- * array when extents have been added or removed from one of the
- * extent lists. erp_idx contains the irec index to begin updating
- * at and ext_diff contains the number of extents that were added
- * or removed.
- */
-void
-xfs_iext_irec_update_extoffs(
- xfs_ifork_t *ifp, /* inode fork pointer */
- int erp_idx, /* irec index to update */
- int ext_diff) /* number of new extents */
-{
- int i; /* loop counter */
- int nlists; /* number of irec's (ex lists */
-
- ASSERT(ifp->if_flags & XFS_IFEXTIREC);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- for (i = erp_idx; i < nlists; i++) {
- ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
- }
-}
-
-/*
* Initialize an inode's copy-on-write fork.
*/
void
@@ -1974,61 +831,3 @@ xfs_ifork_init_cow(
ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
ip->i_cnextents = 0;
}
-
-/*
- * Lookup the extent covering bno.
- *
- * If there is an extent covering bno return the extent index, and store the
- * expanded extent structure in *gotp, and the extent index in *idx.
- * If there is no extent covering bno, but there is an extent after it (e.g.
- * it lies in a hole) return that extent in *gotp and its index in *idx
- * instead.
- * If bno is beyond the last extent return false, and return the index after
- * the last valid index in *idxp.
- */
-bool
-xfs_iext_lookup_extent(
- struct xfs_inode *ip,
- struct xfs_ifork *ifp,
- xfs_fileoff_t bno,
- xfs_extnum_t *idxp,
- struct xfs_bmbt_irec *gotp)
-{
- struct xfs_bmbt_rec_host *ep;
-
- XFS_STATS_INC(ip->i_mount, xs_look_exlist);
-
- ep = xfs_iext_bno_to_ext(ifp, bno, idxp);
- if (!ep)
- return false;
- xfs_bmbt_get_all(ep, gotp);
- return true;
-}
-
-/*
- * Return true if there is an extent at index idx, and return the expanded
- * extent structure at idx in that case. Else return false.
- */
-bool
-xfs_iext_get_extent(
- struct xfs_ifork *ifp,
- xfs_extnum_t idx,
- struct xfs_bmbt_irec *gotp)
-{
- if (idx < 0 || idx >= xfs_iext_count(ifp))
- return false;
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), gotp);
- return true;
-}
-
-void
-xfs_iext_update_extent(
- struct xfs_ifork *ifp,
- xfs_extnum_t idx,
- struct xfs_bmbt_irec *gotp)
-{
- ASSERT(idx >= 0);
- ASSERT(idx < xfs_iext_count(ifp));
-
- xfs_bmbt_set_all(xfs_iext_get_ext(ifp, idx), gotp);
-}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 11af705219f6..b9f0098e33b8 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -22,56 +22,19 @@ struct xfs_inode_log_item;
struct xfs_dinode;
/*
- * The following xfs_ext_irec_t struct introduces a second (top) level
- * to the in-core extent allocation scheme. These structs are allocated
- * in a contiguous block, creating an indirection array where each entry
- * (irec) contains a pointer to a buffer of in-core extent records which
- * it manages. Each extent buffer is 4k in size, since 4k is the system
- * page size on Linux i386 and systems with larger page sizes don't seem
- * to gain much, if anything, by using their native page size as the
- * extent buffer size. Also, using 4k extent buffers everywhere provides
- * a consistent interface for CXFS across different platforms.
- *
- * There is currently no limit on the number of irec's (extent lists)
- * allowed, so heavily fragmented files may require an indirection array
- * which spans multiple system pages of memory. The number of extents
- * which would require this amount of contiguous memory is very large
- * and should not cause problems in the foreseeable future. However,
- * if the memory needed for the contiguous array ever becomes a problem,
- * it is possible that a third level of indirection may be required.
- */
-typedef struct xfs_ext_irec {
- xfs_bmbt_rec_host_t *er_extbuf; /* block of extent records */
- xfs_extnum_t er_extoff; /* extent offset in file */
- xfs_extnum_t er_extcount; /* number of extents in page/block */
-} xfs_ext_irec_t;
-
-/*
* File incore extent information, present for each of data & attr forks.
*/
-#define XFS_IEXT_BUFSZ 4096
-#define XFS_LINEAR_EXTS (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t))
-#define XFS_INLINE_EXTS 2
-#define XFS_INLINE_DATA 32
typedef struct xfs_ifork {
int if_bytes; /* bytes in if_u1 */
int if_real_bytes; /* bytes allocated in if_u1 */
struct xfs_btree_block *if_broot; /* file's incore btree root */
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
+ int if_height; /* height of the extent tree */
union {
- xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
- xfs_ext_irec_t *if_ext_irec; /* irec map file exts */
+ void *if_root; /* extent tree root */
char *if_data; /* inline file data */
} if_u1;
- union {
- xfs_bmbt_rec_host_t if_inline_ext[XFS_INLINE_EXTS];
- /* very small file extents */
- char if_inline_data[XFS_INLINE_DATA];
- /* very small file data */
- xfs_dev_t if_rdev; /* dev number if special */
- uuid_t if_uuid; /* mount point value */
- } if_u2;
} xfs_ifork_t;
/*
@@ -80,7 +43,6 @@ typedef struct xfs_ifork {
#define XFS_IFINLINE 0x01 /* Inline data is read in */
#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */
#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */
-#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */
/*
* Fork handling.
@@ -150,45 +112,75 @@ int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
int);
void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
-struct xfs_bmbt_rec_host *
- xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t);
-xfs_extnum_t xfs_iext_count(struct xfs_ifork *);
-void xfs_iext_insert(struct xfs_inode *, xfs_extnum_t, xfs_extnum_t,
- struct xfs_bmbt_irec *, int);
-void xfs_iext_add(struct xfs_ifork *, xfs_extnum_t, int);
-void xfs_iext_add_indirect_multi(struct xfs_ifork *, int,
- xfs_extnum_t, int);
-void xfs_iext_remove(struct xfs_inode *, xfs_extnum_t, int, int);
-void xfs_iext_remove_inline(struct xfs_ifork *, xfs_extnum_t, int);
-void xfs_iext_remove_direct(struct xfs_ifork *, xfs_extnum_t, int);
-void xfs_iext_remove_indirect(struct xfs_ifork *, xfs_extnum_t, int);
-void xfs_iext_realloc_direct(struct xfs_ifork *, int);
-void xfs_iext_direct_to_inline(struct xfs_ifork *, xfs_extnum_t);
-void xfs_iext_inline_to_direct(struct xfs_ifork *, int);
+xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp);
+void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *, int);
+void xfs_iext_remove(struct xfs_inode *, struct xfs_iext_cursor *,
+ int);
void xfs_iext_destroy(struct xfs_ifork *);
-struct xfs_bmbt_rec_host *
- xfs_iext_bno_to_ext(struct xfs_ifork *, xfs_fileoff_t, int *);
-struct xfs_ext_irec *
- xfs_iext_bno_to_irec(struct xfs_ifork *, xfs_fileoff_t, int *);
-struct xfs_ext_irec *
- xfs_iext_idx_to_irec(struct xfs_ifork *, xfs_extnum_t *, int *,
- int);
-void xfs_iext_irec_init(struct xfs_ifork *);
-struct xfs_ext_irec *
- xfs_iext_irec_new(struct xfs_ifork *, int);
-void xfs_iext_irec_remove(struct xfs_ifork *, int);
-void xfs_iext_irec_compact(struct xfs_ifork *);
-void xfs_iext_irec_compact_pages(struct xfs_ifork *);
-void xfs_iext_irec_compact_full(struct xfs_ifork *);
-void xfs_iext_irec_update_extoffs(struct xfs_ifork *, int, int);
bool xfs_iext_lookup_extent(struct xfs_inode *ip,
struct xfs_ifork *ifp, xfs_fileoff_t bno,
- xfs_extnum_t *idxp, struct xfs_bmbt_irec *gotp);
-bool xfs_iext_get_extent(struct xfs_ifork *ifp, xfs_extnum_t idx,
+ struct xfs_iext_cursor *cur,
struct xfs_bmbt_irec *gotp);
-void xfs_iext_update_extent(struct xfs_ifork *ifp, xfs_extnum_t idx,
+bool xfs_iext_lookup_extent_before(struct xfs_inode *ip,
+ struct xfs_ifork *ifp, xfs_fileoff_t *end,
+ struct xfs_iext_cursor *cur,
struct xfs_bmbt_irec *gotp);
+bool xfs_iext_get_extent(struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *gotp);
+void xfs_iext_update_extent(struct xfs_inode *ip, int state,
+ struct xfs_iext_cursor *cur,
+ struct xfs_bmbt_irec *gotp);
+
+void xfs_iext_first(struct xfs_ifork *, struct xfs_iext_cursor *);
+void xfs_iext_last(struct xfs_ifork *, struct xfs_iext_cursor *);
+void xfs_iext_next(struct xfs_ifork *, struct xfs_iext_cursor *);
+void xfs_iext_prev(struct xfs_ifork *, struct xfs_iext_cursor *);
+
+static inline bool xfs_iext_next_extent(struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
+{
+ xfs_iext_next(ifp, cur);
+ return xfs_iext_get_extent(ifp, cur, gotp);
+}
+
+static inline bool xfs_iext_prev_extent(struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
+{
+ xfs_iext_prev(ifp, cur);
+ return xfs_iext_get_extent(ifp, cur, gotp);
+}
+
+/*
+ * Return the extent after cur in gotp without updating the cursor.
+ */
+static inline bool xfs_iext_peek_next_extent(struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
+{
+ struct xfs_iext_cursor ncur = *cur;
+
+ xfs_iext_next(ifp, &ncur);
+ return xfs_iext_get_extent(ifp, &ncur, gotp);
+}
+
+/*
+ * Return the extent before cur in gotp without updating the cursor.
+ */
+static inline bool xfs_iext_peek_prev_extent(struct xfs_ifork *ifp,
+ struct xfs_iext_cursor *cur, struct xfs_bmbt_irec *gotp)
+{
+ struct xfs_iext_cursor ncur = *cur;
+
+ xfs_iext_prev(ifp, &ncur);
+ return xfs_iext_get_extent(ifp, &ncur, gotp);
+}
+
+#define for_each_xfs_iext(ifp, ext, got) \
+ for (xfs_iext_first((ifp), (ext)); \
+ xfs_iext_get_extent((ifp), (ext), (got)); \
+ xfs_iext_next((ifp), (ext)))
extern struct kmem_zone *xfs_ifork_zone;
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 71de185735e0..996f035ee205 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -264,7 +264,7 @@ typedef struct xfs_trans_header {
* (if any) is indicated in the ilf_dsize field. Changes to this structure
* must be added on to the end.
*/
-typedef struct xfs_inode_log_format {
+struct xfs_inode_log_format {
uint16_t ilf_type; /* inode log item type */
uint16_t ilf_size; /* size of this item */
uint32_t ilf_fields; /* flags for fields logged */
@@ -274,12 +274,12 @@ typedef struct xfs_inode_log_format {
uint64_t ilf_ino; /* inode number */
union {
uint32_t ilfu_rdev; /* rdev value for dev inode*/
- uuid_t ilfu_uuid; /* mount point value */
+ u8 __pad[16]; /* unused */
} ilf_u;
int64_t ilf_blkno; /* blkno of inode buffer */
int32_t ilf_len; /* len of inode buffer */
int32_t ilf_boffset; /* off of inode in buffer */
-} xfs_inode_log_format_t;
+};
/*
* Old 32 bit systems will log in this format without the 64 bit
@@ -295,7 +295,7 @@ struct xfs_inode_log_format_32 {
uint64_t ilf_ino; /* inode number */
union {
uint32_t ilfu_rdev; /* rdev value for dev inode*/
- uuid_t ilfu_uuid; /* mount point value */
+ u8 __pad[16]; /* unused */
} ilf_u;
int64_t ilf_blkno; /* blkno of inode buffer */
int32_t ilf_len; /* len of inode buffer */
@@ -311,7 +311,7 @@ struct xfs_inode_log_format_32 {
#define XFS_ILOG_DEXT 0x004 /* log i_df.if_extents */
#define XFS_ILOG_DBROOT 0x008 /* log i_df.i_broot */
#define XFS_ILOG_DEV 0x010 /* log the dev field */
-#define XFS_ILOG_UUID 0x020 /* log the uuid field */
+#define XFS_ILOG_UUID 0x020 /* added long ago, but never used */
#define XFS_ILOG_ADATA 0x040 /* log i_af.if_data */
#define XFS_ILOG_AEXT 0x080 /* log i_af.if_extents */
#define XFS_ILOG_ABROOT 0x100 /* log i_af.i_broot */
@@ -329,9 +329,9 @@ struct xfs_inode_log_format_32 {
#define XFS_ILOG_NONCORE (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
- XFS_ILOG_UUID | XFS_ILOG_ADATA | \
- XFS_ILOG_AEXT | XFS_ILOG_ABROOT | \
- XFS_ILOG_DOWNER | XFS_ILOG_AOWNER)
+ XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
+ XFS_ILOG_ABROOT | XFS_ILOG_DOWNER | \
+ XFS_ILOG_AOWNER)
#define XFS_ILOG_DFORK (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
XFS_ILOG_DBROOT)
@@ -341,10 +341,10 @@ struct xfs_inode_log_format_32 {
#define XFS_ILOG_ALL (XFS_ILOG_CORE | XFS_ILOG_DDATA | \
XFS_ILOG_DEXT | XFS_ILOG_DBROOT | \
- XFS_ILOG_DEV | XFS_ILOG_UUID | \
- XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
- XFS_ILOG_ABROOT | XFS_ILOG_TIMESTAMP | \
- XFS_ILOG_DOWNER | XFS_ILOG_AOWNER)
+ XFS_ILOG_DEV | XFS_ILOG_ADATA | \
+ XFS_ILOG_AEXT | XFS_ILOG_ABROOT | \
+ XFS_ILOG_TIMESTAMP | XFS_ILOG_DOWNER | \
+ XFS_ILOG_AOWNER)
static inline int xfs_ilog_fbroot(int w)
{
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 9d5406b4f663..585b35d34142 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -30,6 +30,7 @@
#include "xfs_bmap.h"
#include "xfs_refcount_btree.h"
#include "xfs_alloc.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 55c88a732690..dd019cee1b3b 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -34,6 +34,7 @@
#include "xfs_rmap_btree.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
#include "xfs_bmap.h"
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 5d4e43ef4eea..3fb29a5ea915 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -672,7 +672,6 @@ xfs_rtmodify_range(
/*
* Compute a mask of relevant bits.
*/
- bit = 0;
mask = ((xfs_rtword_t)1 << lastbit) - 1;
/*
* Set/clear the active bits.
@@ -1086,3 +1085,15 @@ xfs_rtalloc_query_all(
return xfs_rtalloc_query_range(tp, &keys[0], &keys[1], fn, priv);
}
+
+/*
+ * Verify that an realtime block number pointer doesn't point off the
+ * end of the realtime device.
+ */
+bool
+xfs_verify_rtbno(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ return rtbno < mp->m_sb.sb_rblocks;
+}
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 0220159bd463..3c560695c546 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -48,6 +48,12 @@ typedef int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */
typedef int64_t xfs_sfiloff_t; /* signed block number in a file */
/*
+ * New verifiers will return the instruction address of the failing check.
+ * NULL means everything is ok.
+ */
+typedef void * xfs_failaddr_t;
+
+/*
* Null values for the types.
*/
#define NULLFSBLOCK ((xfs_fsblock_t)-1)
@@ -136,5 +142,21 @@ typedef uint32_t xfs_dqid_t;
#define XFS_NBWORD (1 << XFS_NBWORDLOG)
#define XFS_WORDMASK ((1 << XFS_WORDLOG) - 1)
+struct xfs_iext_cursor {
+ struct xfs_iext_leaf *leaf;
+ int pos;
+};
+
+typedef enum {
+ XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
+} xfs_exntst_t;
+
+typedef struct xfs_bmbt_irec
+{
+ xfs_fileoff_t br_startoff; /* starting file offset */
+ xfs_fsblock_t br_startblock; /* starting block number */
+ xfs_filblks_t br_blockcount; /* number of blocks */
+ xfs_exntst_t br_state; /* extent state */
+} xfs_bmbt_irec_t;
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
new file mode 100644
index 000000000000..2a9b4f9e93c6
--- /dev/null
+++ b/fs/xfs/scrub/agheader.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+
+/*
+ * Set up scrub to check all the static metadata in each AG.
+ * This means the SB, AGF, AGI, and AGFL headers.
+ */
+int
+xfs_scrub_setup_ag_header(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = sc->mp;
+
+ if (sc->sm->sm_agno >= mp->m_sb.sb_agcount ||
+ sc->sm->sm_ino || sc->sm->sm_gen)
+ return -EINVAL;
+ return xfs_scrub_setup_fs(sc, ip);
+}
+
+/* Walk all the blocks in the AGFL. */
+int
+xfs_scrub_walk_agfl(
+ struct xfs_scrub_context *sc,
+ int (*fn)(struct xfs_scrub_context *,
+ xfs_agblock_t bno, void *),
+ void *priv)
+{
+ struct xfs_agf *agf;
+ __be32 *agfl_bno;
+ struct xfs_mount *mp = sc->mp;
+ unsigned int flfirst;
+ unsigned int fllast;
+ int i;
+ int error;
+
+ agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
+ flfirst = be32_to_cpu(agf->agf_flfirst);
+ fllast = be32_to_cpu(agf->agf_fllast);
+
+ /* Nothing to walk in an empty AGFL. */
+ if (agf->agf_flcount == cpu_to_be32(0))
+ return 0;
+
+ /* first to last is a consecutive list. */
+ if (fllast >= flfirst) {
+ for (i = flfirst; i <= fllast; i++) {
+ error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
+ if (error)
+ return error;
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return error;
+ }
+
+ return 0;
+ }
+
+ /* first to the end */
+ for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) {
+ error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
+ if (error)
+ return error;
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return error;
+ }
+
+ /* the start to last. */
+ for (i = 0; i <= fllast; i++) {
+ error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
+ if (error)
+ return error;
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return error;
+ }
+
+ return 0;
+}
+
+/* Superblock */
+
+/*
+ * Scrub the filesystem superblock.
+ *
+ * Note: We do /not/ attempt to check AG 0's superblock. Mount is
+ * responsible for validating all the geometry information in sb 0, so
+ * if the filesystem is capable of initiating online scrub, then clearly
+ * sb 0 is ok and we can use its information to check everything else.
+ */
+int
+xfs_scrub_superblock(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *bp;
+ struct xfs_dsb *sb;
+ xfs_agnumber_t agno;
+ uint32_t v2_ok;
+ __be32 features_mask;
+ int error;
+ __be16 vernum_mask;
+
+ agno = sc->sm->sm_agno;
+ if (agno == 0)
+ return 0;
+
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
+ XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
+ if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
+ return error;
+
+ sb = XFS_BUF_TO_SBP(bp);
+
+ /*
+ * Verify the geometries match. Fields that are permanently
+ * set by mkfs are checked; fields that can be updated later
+ * (and are not propagated to backup superblocks) are preen
+ * checked.
+ */
+ if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Check sb_versionnum bits that are set at mkfs time. */
+ vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
+ XFS_SB_VERSION_NUMBITS |
+ XFS_SB_VERSION_ALIGNBIT |
+ XFS_SB_VERSION_DALIGNBIT |
+ XFS_SB_VERSION_SHAREDBIT |
+ XFS_SB_VERSION_LOGV2BIT |
+ XFS_SB_VERSION_SECTORBIT |
+ XFS_SB_VERSION_EXTFLGBIT |
+ XFS_SB_VERSION_DIRV2BIT);
+ if ((sb->sb_versionnum & vernum_mask) !=
+ (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Check sb_versionnum bits that can be set after mkfs time. */
+ vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
+ XFS_SB_VERSION_NLINKBIT |
+ XFS_SB_VERSION_QUOTABIT);
+ if ((sb->sb_versionnum & vernum_mask) !=
+ (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
+ xfs_scrub_block_set_preen(sc, bp);
+
+ /*
+ * Skip the summary counters since we track them in memory anyway.
+ * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
+ */
+
+ if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ /*
+ * Skip the quota flags since repair will force quotacheck.
+ * sb_qflags
+ */
+
+ if (sb->sb_flags != mp->m_sb.sb_flags)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Do we see any invalid bits in sb_features2? */
+ if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
+ if (sb->sb_features2 != 0)
+ xfs_scrub_block_set_corrupt(sc, bp);
+ } else {
+ v2_ok = XFS_SB_VERSION2_OKBITS;
+ if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
+ v2_ok |= XFS_SB_VERSION2_CRCBIT;
+
+ if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_features2 != sb->sb_bad_features2)
+ xfs_scrub_block_set_preen(sc, bp);
+ }
+
+ /* Check sb_features2 flags that are set at mkfs time. */
+ features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
+ XFS_SB_VERSION2_PROJID32BIT |
+ XFS_SB_VERSION2_CRCBIT |
+ XFS_SB_VERSION2_FTYPE);
+ if ((sb->sb_features2 & features_mask) !=
+ (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Check sb_features2 flags that can be set after mkfs time. */
+ features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
+ if ((sb->sb_features2 & features_mask) !=
+ (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb)) {
+ /* all v5 fields must be zero */
+ if (memchr_inv(&sb->sb_features_compat, 0,
+ sizeof(struct xfs_dsb) -
+ offsetof(struct xfs_dsb, sb_features_compat)))
+ xfs_scrub_block_set_corrupt(sc, bp);
+ } else {
+ /* Check compat flags; all are set at mkfs time. */
+ features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
+ if ((sb->sb_features_compat & features_mask) !=
+ (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Check ro compat flags; all are set at mkfs time. */
+ features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
+ XFS_SB_FEAT_RO_COMPAT_FINOBT |
+ XFS_SB_FEAT_RO_COMPAT_RMAPBT |
+ XFS_SB_FEAT_RO_COMPAT_REFLINK);
+ if ((sb->sb_features_ro_compat & features_mask) !=
+ (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
+ features_mask))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Check incompat flags; all are set at mkfs time. */
+ features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
+ XFS_SB_FEAT_INCOMPAT_FTYPE |
+ XFS_SB_FEAT_INCOMPAT_SPINODES |
+ XFS_SB_FEAT_INCOMPAT_META_UUID);
+ if ((sb->sb_features_incompat & features_mask) !=
+ (cpu_to_be32(mp->m_sb.sb_features_incompat) &
+ features_mask))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Check log incompat flags; all are set at mkfs time. */
+ features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
+ if ((sb->sb_features_log_incompat & features_mask) !=
+ (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
+ features_mask))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ /* Don't care about sb_crc */
+
+ if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
+ xfs_scrub_block_set_preen(sc, bp);
+
+ /* Don't care about sb_lsn */
+ }
+
+ if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
+ /* The metadata UUID must be the same for all supers */
+ if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
+ xfs_scrub_block_set_corrupt(sc, bp);
+ }
+
+ /* Everything else must be zero. */
+ if (memchr_inv(sb + 1, 0,
+ BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
+ xfs_scrub_block_set_corrupt(sc, bp);
+
+ return error;
+}
+
+/* AGF */
+
+/* Scrub the AGF. */
+int
+xfs_scrub_agf(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agf *agf;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_agblock_t eoag;
+ xfs_agblock_t agfl_first;
+ xfs_agblock_t agfl_last;
+ xfs_agblock_t agfl_count;
+ xfs_agblock_t fl_count;
+ int level;
+ int error = 0;
+
+ agno = sc->sa.agno = sc->sm->sm_agno;
+ error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ &sc->sa.agf_bp, &sc->sa.agfl_bp);
+ if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
+ goto out;
+
+ agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+
+ /* Check the AG length */
+ eoag = be32_to_cpu(agf->agf_length);
+ if (eoag != xfs_ag_block_count(mp, agno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+ /* Check the AGF btree roots and levels */
+ agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
+ if (!xfs_verify_agbno(mp, agno, agbno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+ agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
+ if (!xfs_verify_agbno(mp, agno, agbno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+ level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
+ if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+ level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
+ if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
+ if (!xfs_verify_agbno(mp, agno, agbno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+ level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
+ if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ }
+
+ if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ agbno = be32_to_cpu(agf->agf_refcount_root);
+ if (!xfs_verify_agbno(mp, agno, agbno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+ level = be32_to_cpu(agf->agf_refcount_level);
+ if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ }
+
+ /* Check the AGFL counters */
+ agfl_first = be32_to_cpu(agf->agf_flfirst);
+ agfl_last = be32_to_cpu(agf->agf_fllast);
+ agfl_count = be32_to_cpu(agf->agf_flcount);
+ if (agfl_last > agfl_first)
+ fl_count = agfl_last - agfl_first + 1;
+ else
+ fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1;
+ if (agfl_count != 0 && fl_count != agfl_count)
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+
+out:
+ return error;
+}
+
+/* AGFL */
+
+struct xfs_scrub_agfl_info {
+ unsigned int sz_entries;
+ unsigned int nr_entries;
+ xfs_agblock_t *entries;
+};
+
+/* Scrub an AGFL block. */
+STATIC int
+xfs_scrub_agfl_block(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ void *priv)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_scrub_agfl_info *sai = priv;
+ xfs_agnumber_t agno = sc->sa.agno;
+
+ if (xfs_verify_agbno(mp, agno, agbno) &&
+ sai->nr_entries < sai->sz_entries)
+ sai->entries[sai->nr_entries++] = agbno;
+ else
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
+
+ return 0;
+}
+
+static int
+xfs_scrub_agblock_cmp(
+ const void *pa,
+ const void *pb)
+{
+ const xfs_agblock_t *a = pa;
+ const xfs_agblock_t *b = pb;
+
+ return (int)*a - (int)*b;
+}
+
+/* Scrub the AGFL. */
+int
+xfs_scrub_agfl(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_scrub_agfl_info sai = { 0 };
+ struct xfs_agf *agf;
+ xfs_agnumber_t agno;
+ unsigned int agflcount;
+ unsigned int i;
+ int error;
+
+ agno = sc->sa.agno = sc->sm->sm_agno;
+ error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ &sc->sa.agf_bp, &sc->sa.agfl_bp);
+ if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
+ goto out;
+ if (!sc->sa.agf_bp)
+ return -EFSCORRUPTED;
+
+ /* Allocate buffer to ensure uniqueness of AGFL entries. */
+ agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ agflcount = be32_to_cpu(agf->agf_flcount);
+ if (agflcount > XFS_AGFL_SIZE(sc->mp)) {
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ goto out;
+ }
+ sai.sz_entries = agflcount;
+ sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
+ if (!sai.entries) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /* Check the blocks in the AGFL. */
+ error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
+ if (error)
+ goto out_free;
+
+ if (agflcount != sai.nr_entries) {
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ goto out_free;
+ }
+
+ /* Sort entries, check for duplicates. */
+ sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
+ xfs_scrub_agblock_cmp, NULL);
+ for (i = 1; i < sai.nr_entries; i++) {
+ if (sai.entries[i] == sai.entries[i - 1]) {
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ break;
+ }
+ }
+
+out_free:
+ kmem_free(sai.entries);
+out:
+ return error;
+}
+
+/* AGI */
+
+/* Scrub the AGI. */
+int
+xfs_scrub_agi(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_agi *agi;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_agblock_t eoag;
+ xfs_agino_t agino;
+ xfs_agino_t first_agino;
+ xfs_agino_t last_agino;
+ xfs_agino_t icount;
+ int i;
+ int level;
+ int error = 0;
+
+ agno = sc->sa.agno = sc->sm->sm_agno;
+ error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
+ &sc->sa.agf_bp, &sc->sa.agfl_bp);
+ if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
+ goto out;
+
+ agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
+
+ /* Check the AG length */
+ eoag = be32_to_cpu(agi->agi_length);
+ if (eoag != xfs_ag_block_count(mp, agno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+ /* Check btree roots and levels */
+ agbno = be32_to_cpu(agi->agi_root);
+ if (!xfs_verify_agbno(mp, agno, agbno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+ level = be32_to_cpu(agi->agi_level);
+ if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+ if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ agbno = be32_to_cpu(agi->agi_free_root);
+ if (!xfs_verify_agbno(mp, agno, agbno))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+ level = be32_to_cpu(agi->agi_free_level);
+ if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ }
+
+ /* Check inode counters */
+ xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
+ icount = be32_to_cpu(agi->agi_count);
+ if (icount > last_agino - first_agino + 1 ||
+ icount < be32_to_cpu(agi->agi_freecount))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+ /* Check inode pointers */
+ agino = be32_to_cpu(agi->agi_newino);
+ if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+ agino = be32_to_cpu(agi->agi_dirino);
+ if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+ /* Check unlinked inode buckets */
+ for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
+ agino = be32_to_cpu(agi->agi_unlinked[i]);
+ if (agino == NULLAGINO)
+ continue;
+ if (!xfs_verify_agino(mp, agno, agino))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ }
+
+ if (agi->agi_pad32 != cpu_to_be32(0))
+ xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+
+out:
+ return error;
+}
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
new file mode 100644
index 000000000000..059663e13414
--- /dev/null
+++ b/fs/xfs/scrub/alloc.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_alloc.h"
+#include "xfs_rmap.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+
+/*
+ * Set us up to scrub free space btrees.
+ */
+int
+xfs_scrub_setup_ag_allocbt(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ return xfs_scrub_setup_ag_btree(sc, ip, false);
+}
+
+/* Free space btree scrubber. */
+
+/* Scrub a bnobt/cntbt record. */
+STATIC int
+xfs_scrub_allocbt_rec(
+ struct xfs_scrub_btree *bs,
+ union xfs_btree_rec *rec)
+{
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ int error = 0;
+
+ bno = be32_to_cpu(rec->alloc.ar_startblock);
+ len = be32_to_cpu(rec->alloc.ar_blockcount);
+
+ if (bno + len <= bno ||
+ !xfs_verify_agbno(mp, agno, bno) ||
+ !xfs_verify_agbno(mp, agno, bno + len - 1))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ return error;
+}
+
+/* Scrub the freespace btrees for some AG. */
+STATIC int
+xfs_scrub_allocbt(
+ struct xfs_scrub_context *sc,
+ xfs_btnum_t which)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_btree_cur *cur;
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
+ cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
+ return xfs_scrub_btree(sc, cur, xfs_scrub_allocbt_rec, &oinfo, NULL);
+}
+
+int
+xfs_scrub_bnobt(
+ struct xfs_scrub_context *sc)
+{
+ return xfs_scrub_allocbt(sc, XFS_BTNUM_BNO);
+}
+
+int
+xfs_scrub_cntbt(
+ struct xfs_scrub_context *sc)
+{
+ return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT);
+}
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
new file mode 100644
index 000000000000..4ed80474f545
--- /dev/null
+++ b/fs/xfs/scrub/attr.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2.h"
+#include "xfs_attr.h"
+#include "xfs_attr_leaf.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/dabtree.h"
+#include "scrub/trace.h"
+
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+/* Set us up to scrub an inode's extended attributes. */
+int
+xfs_scrub_setup_xattr(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ size_t sz;
+
+ /*
+ * Allocate the buffer without the inode lock held. We need enough
+ * space to read every xattr value in the file or enough space to
+ * hold three copies of the xattr free space bitmap. (Not both at
+ * the same time.)
+ */
+ sz = max_t(size_t, XATTR_SIZE_MAX, 3 * sizeof(long) *
+ BITS_TO_LONGS(sc->mp->m_attr_geo->blksize));
+ sc->buf = kmem_zalloc_large(sz, KM_SLEEP);
+ if (!sc->buf)
+ return -ENOMEM;
+
+ return xfs_scrub_setup_inode_contents(sc, ip, 0);
+}
+
+/* Extended Attributes */
+
+struct xfs_scrub_xattr {
+ struct xfs_attr_list_context context;
+ struct xfs_scrub_context *sc;
+};
+
+/*
+ * Check that an extended attribute key can be looked up by hash.
+ *
+ * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked)
+ * to call this function for every attribute key in an inode. Once
+ * we're here, we load the attribute value to see if any errors happen,
+ * or if we get more or less data than we expected.
+ */
+static void
+xfs_scrub_xattr_listent(
+ struct xfs_attr_list_context *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen)
+{
+ struct xfs_scrub_xattr *sx;
+ struct xfs_da_args args = { NULL };
+ int error = 0;
+
+ sx = container_of(context, struct xfs_scrub_xattr, context);
+
+ if (flags & XFS_ATTR_INCOMPLETE) {
+ /* Incomplete attr key, just mark the inode for preening. */
+ xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino, NULL);
+ return;
+ }
+
+ args.flags = ATTR_KERNOTIME;
+ if (flags & XFS_ATTR_ROOT)
+ args.flags |= ATTR_ROOT;
+ else if (flags & XFS_ATTR_SECURE)
+ args.flags |= ATTR_SECURE;
+ args.geo = context->dp->i_mount->m_attr_geo;
+ args.whichfork = XFS_ATTR_FORK;
+ args.dp = context->dp;
+ args.name = name;
+ args.namelen = namelen;
+ args.hashval = xfs_da_hashname(args.name, args.namelen);
+ args.trans = context->tp;
+ args.value = sx->sc->buf;
+ args.valuelen = XATTR_SIZE_MAX;
+
+ error = xfs_attr_get_ilocked(context->dp, &args);
+ if (error == -EEXIST)
+ error = 0;
+ if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
+ &error))
+ goto fail_xref;
+ if (args.valuelen != valuelen)
+ xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
+ args.blkno);
+
+fail_xref:
+ return;
+}
+
+/*
+ * Mark a range [start, start+len) in this map. Returns true if the
+ * region was free, and false if there's a conflict or a problem.
+ *
+ * Within a char, the lowest bit of the char represents the byte with
+ * the smallest address
+ */
+STATIC bool
+xfs_scrub_xattr_set_map(
+ struct xfs_scrub_context *sc,
+ unsigned long *map,
+ unsigned int start,
+ unsigned int len)
+{
+ unsigned int mapsize = sc->mp->m_attr_geo->blksize;
+ bool ret = true;
+
+ if (start >= mapsize)
+ return false;
+ if (start + len > mapsize) {
+ len = mapsize - start;
+ ret = false;
+ }
+
+ if (find_next_bit(map, mapsize, start) < start + len)
+ ret = false;
+ bitmap_set(map, start, len);
+
+ return ret;
+}
+
+/*
+ * Check the leaf freemap from the usage bitmap. Returns false if the
+ * attr freemap has problems or points to used space.
+ */
+STATIC bool
+xfs_scrub_xattr_check_freemap(
+ struct xfs_scrub_context *sc,
+ unsigned long *map,
+ struct xfs_attr3_icleaf_hdr *leafhdr)
+{
+ unsigned long *freemap;
+ unsigned long *dstmap;
+ unsigned int mapsize = sc->mp->m_attr_geo->blksize;
+ int i;
+
+ /* Construct bitmap of freemap contents. */
+ freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
+ bitmap_zero(freemap, mapsize);
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ if (!xfs_scrub_xattr_set_map(sc, freemap,
+ leafhdr->freemap[i].base,
+ leafhdr->freemap[i].size))
+ return false;
+ }
+
+ /* Look for bits that are set in freemap and are marked in use. */
+ dstmap = freemap + BITS_TO_LONGS(mapsize);
+ return bitmap_and(dstmap, freemap, map, mapsize) == 0;
+}
+
+/*
+ * Check this leaf entry's relations to everything else.
+ * Returns the number of bytes used for the name/value data.
+ */
+STATIC void
+xfs_scrub_xattr_entry(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ char *buf_end,
+ struct xfs_attr_leafblock *leaf,
+ struct xfs_attr3_icleaf_hdr *leafhdr,
+ unsigned long *usedmap,
+ struct xfs_attr_leaf_entry *ent,
+ int idx,
+ unsigned int *usedbytes,
+ __u32 *last_hashval)
+{
+ struct xfs_mount *mp = ds->state->mp;
+ char *name_end;
+ struct xfs_attr_leaf_name_local *lentry;
+ struct xfs_attr_leaf_name_remote *rentry;
+ unsigned int nameidx;
+ unsigned int namesize;
+
+ if (ent->pad2 != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+
+ /* Hash values in order? */
+ if (be32_to_cpu(ent->hashval) < *last_hashval)
+ xfs_scrub_da_set_corrupt(ds, level);
+ *last_hashval = be32_to_cpu(ent->hashval);
+
+ nameidx = be16_to_cpu(ent->nameidx);
+ if (nameidx < leafhdr->firstused ||
+ nameidx >= mp->m_attr_geo->blksize) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ return;
+ }
+
+ /* Check the name information. */
+ if (ent->flags & XFS_ATTR_LOCAL) {
+ lentry = xfs_attr3_leaf_name_local(leaf, idx);
+ namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
+ be16_to_cpu(lentry->valuelen));
+ name_end = (char *)lentry + namesize;
+ if (lentry->namelen == 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ } else {
+ rentry = xfs_attr3_leaf_name_remote(leaf, idx);
+ namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
+ name_end = (char *)rentry + namesize;
+ if (rentry->namelen == 0 || rentry->valueblk == 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ }
+ if (name_end > buf_end)
+ xfs_scrub_da_set_corrupt(ds, level);
+
+ if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
+ xfs_scrub_da_set_corrupt(ds, level);
+ if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ *usedbytes += namesize;
+}
+
+/* Scrub an attribute leaf. */
+STATIC int
+xfs_scrub_xattr_block(
+ struct xfs_scrub_da_btree *ds,
+ int level)
+{
+ struct xfs_attr3_icleaf_hdr leafhdr;
+ struct xfs_mount *mp = ds->state->mp;
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
+ struct xfs_buf *bp = blk->bp;
+ xfs_dablk_t *last_checked = ds->private;
+ struct xfs_attr_leafblock *leaf = bp->b_addr;
+ struct xfs_attr_leaf_entry *ent;
+ struct xfs_attr_leaf_entry *entries;
+ unsigned long *usedmap = ds->sc->buf;
+ char *buf_end;
+ size_t off;
+ __u32 last_hashval = 0;
+ unsigned int usedbytes = 0;
+ unsigned int hdrsize;
+ int i;
+
+ if (*last_checked == blk->blkno)
+ return 0;
+ *last_checked = blk->blkno;
+ bitmap_zero(usedmap, mp->m_attr_geo->blksize);
+
+ /* Check all the padding. */
+ if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) {
+ struct xfs_attr3_leafblock *leaf = bp->b_addr;
+
+ if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
+ leaf->hdr.info.hdr.pad != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ } else {
+ if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ }
+
+ /* Check the leaf header */
+ xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
+ hdrsize = xfs_attr3_leaf_hdr_size(leaf);
+
+ if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
+ xfs_scrub_da_set_corrupt(ds, level);
+ if (leafhdr.firstused > mp->m_attr_geo->blksize)
+ xfs_scrub_da_set_corrupt(ds, level);
+ if (leafhdr.firstused < hdrsize)
+ xfs_scrub_da_set_corrupt(ds, level);
+ if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
+ xfs_scrub_da_set_corrupt(ds, level);
+
+ if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ entries = xfs_attr3_leaf_entryp(leaf);
+ if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
+ xfs_scrub_da_set_corrupt(ds, level);
+
+ buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
+ for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
+ /* Mark the leaf entry itself. */
+ off = (char *)ent - (char *)leaf;
+ if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off,
+ sizeof(xfs_attr_leaf_entry_t))) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out;
+ }
+
+ /* Check the entry and nameval. */
+ xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
+ usedmap, ent, i, &usedbytes, &last_hashval);
+
+ if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+ }
+
+ if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
+ xfs_scrub_da_set_corrupt(ds, level);
+
+ if (leafhdr.usedbytes != usedbytes)
+ xfs_scrub_da_set_corrupt(ds, level);
+
+out:
+ return 0;
+}
+
+/* Scrub a attribute btree record. */
+STATIC int
+xfs_scrub_xattr_rec(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ void *rec)
+{
+ struct xfs_mount *mp = ds->state->mp;
+ struct xfs_attr_leaf_entry *ent = rec;
+ struct xfs_da_state_blk *blk;
+ struct xfs_attr_leaf_name_local *lentry;
+ struct xfs_attr_leaf_name_remote *rentry;
+ struct xfs_buf *bp;
+ xfs_dahash_t calc_hash;
+ xfs_dahash_t hash;
+ int nameidx;
+ int hdrsize;
+ unsigned int badflags;
+ int error;
+
+ blk = &ds->state->path.blk[level];
+
+ /* Check the whole block, if necessary. */
+ error = xfs_scrub_xattr_block(ds, level);
+ if (error)
+ goto out;
+ if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ /* Check the hash of the entry. */
+ error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
+ if (error)
+ goto out;
+
+ /* Find the attr entry's location. */
+ bp = blk->bp;
+ hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
+ nameidx = be16_to_cpu(ent->nameidx);
+ if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out;
+ }
+
+ /* Retrieve the entry and check it. */
+ hash = be32_to_cpu(ent->hashval);
+ badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
+ XFS_ATTR_INCOMPLETE);
+ if ((ent->flags & badflags) != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ if (ent->flags & XFS_ATTR_LOCAL) {
+ lentry = (struct xfs_attr_leaf_name_local *)
+ (((char *)bp->b_addr) + nameidx);
+ if (lentry->namelen <= 0) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out;
+ }
+ calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
+ } else {
+ rentry = (struct xfs_attr_leaf_name_remote *)
+ (((char *)bp->b_addr) + nameidx);
+ if (rentry->namelen <= 0) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out;
+ }
+ calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
+ }
+ if (calc_hash != hash)
+ xfs_scrub_da_set_corrupt(ds, level);
+
+out:
+ return error;
+}
+
+/* Scrub the extended attribute metadata. */
+int
+xfs_scrub_xattr(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_scrub_xattr sx;
+ struct attrlist_cursor_kern cursor = { 0 };
+ xfs_dablk_t last_checked = -1U;
+ int error = 0;
+
+ if (!xfs_inode_hasattr(sc->ip))
+ return -ENOENT;
+
+ memset(&sx, 0, sizeof(sx));
+ /* Check attribute tree structure */
+ error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec,
+ &last_checked);
+ if (error)
+ goto out;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ /* Check that every attr key can also be looked up by hash. */
+ sx.context.dp = sc->ip;
+ sx.context.cursor = &cursor;
+ sx.context.resynch = 1;
+ sx.context.put_listent = xfs_scrub_xattr_listent;
+ sx.context.tp = sc->tp;
+ sx.context.flags = ATTR_INCOMPLETE;
+ sx.sc = sc;
+
+ /*
+ * Look up every xattr in this file by name.
+ *
+ * Use the backend implementation of xfs_attr_list to call
+ * xfs_scrub_xattr_listent on every attribute key in this inode.
+ * In other words, we use the same iterator/callback mechanism
+ * that listattr uses to scrub extended attributes, though in our
+ * _listent function, we check the value of the attribute.
+ *
+ * The VFS only locks i_rwsem when modifying attrs, so keep all
+ * three locks held because that's the only way to ensure we're
+ * the only thread poking into the da btree. We traverse the da
+ * btree while holding a leaf buffer locked for the xattr name
+ * iteration, which doesn't really follow the usual buffer
+ * locking order.
+ */
+ error = xfs_attr_list_int_ilocked(&sx.context);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
+ goto out;
+out:
+ return error;
+}
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
new file mode 100644
index 000000000000..42fec0bcd9e1
--- /dev/null
+++ b/fs/xfs/scrub/bmap.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_inode_fork.h"
+#include "xfs_alloc.h"
+#include "xfs_rtalloc.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_rmap.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+
+/* Set us up with an inode's bmap. */
+int
+xfs_scrub_setup_inode_bmap(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error;
+
+ error = xfs_scrub_get_inode(sc, ip);
+ if (error)
+ goto out;
+
+ sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
+ xfs_ilock(sc->ip, sc->ilock_flags);
+
+ /*
+ * We don't want any ephemeral data fork updates sitting around
+ * while we inspect block mappings, so wait for directio to finish
+ * and flush dirty data if we have delalloc reservations.
+ */
+ if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
+ sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
+ inode_dio_wait(VFS_I(sc->ip));
+ error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
+ if (error)
+ goto out;
+ }
+
+ /* Got the inode, lock it and we're ready to go. */
+ error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
+ if (error)
+ goto out;
+ sc->ilock_flags |= XFS_ILOCK_EXCL;
+ xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
+
+out:
+ /* scrub teardown will unlock and release the inode */
+ return error;
+}
+
+/*
+ * Inode fork block mapping (BMBT) scrubber.
+ * More complex than the others because we have to scrub
+ * all the extents regardless of whether or not the fork
+ * is in btree format.
+ */
+
+struct xfs_scrub_bmap_info {
+ struct xfs_scrub_context *sc;
+ xfs_fileoff_t lastoff;
+ bool is_rt;
+ bool is_shared;
+ int whichfork;
+};
+
+/* Scrub a single extent record. */
+STATIC int
+xfs_scrub_bmap_extent(
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_bmbt_irec *irec)
+{
+ struct xfs_mount *mp = info->sc->mp;
+ struct xfs_buf *bp = NULL;
+ int error = 0;
+
+ if (cur)
+ xfs_btree_get_block(cur, 0, &bp);
+
+ /*
+ * Check for out-of-order extents. This record could have come
+ * from the incore list, for which there is no ordering check.
+ */
+ if (irec->br_startoff < info->lastoff)
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /* There should never be a "hole" extent in either extent list. */
+ if (irec->br_startblock == HOLESTARTBLOCK)
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /*
+ * Check for delalloc extents. We never iterate the ones in the
+ * in-core extent scan, and we should never see these in the bmbt.
+ */
+ if (isnullstartblock(irec->br_startblock))
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /* Make sure the extent points to a valid place. */
+ if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+ if (info->is_rt &&
+ (!xfs_verify_rtbno(mp, irec->br_startblock) ||
+ !xfs_verify_rtbno(mp, irec->br_startblock +
+ irec->br_blockcount - 1)))
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+ if (!info->is_rt &&
+ (!xfs_verify_fsbno(mp, irec->br_startblock) ||
+ !xfs_verify_fsbno(mp, irec->br_startblock +
+ irec->br_blockcount - 1)))
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /* We don't allow unwritten extents on attr forks. */
+ if (irec->br_state == XFS_EXT_UNWRITTEN &&
+ info->whichfork == XFS_ATTR_FORK)
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ info->lastoff = irec->br_startoff + irec->br_blockcount;
+ return error;
+}
+
+/* Scrub a bmbt record. */
+STATIC int
+xfs_scrub_bmapbt_rec(
+ struct xfs_scrub_btree *bs,
+ union xfs_btree_rec *rec)
+{
+ struct xfs_bmbt_irec irec;
+ struct xfs_scrub_bmap_info *info = bs->private;
+ struct xfs_inode *ip = bs->cur->bc_private.b.ip;
+ struct xfs_buf *bp = NULL;
+ struct xfs_btree_block *block;
+ uint64_t owner;
+ int i;
+
+ /*
+ * Check the owners of the btree blocks up to the level below
+ * the root since the verifiers don't do that.
+ */
+ if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
+ bs->cur->bc_ptrs[0] == 1) {
+ for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
+ block = xfs_btree_get_block(bs->cur, i, &bp);
+ owner = be64_to_cpu(block->bb_u.l.bb_owner);
+ if (owner != ip->i_ino)
+ xfs_scrub_fblock_set_corrupt(bs->sc,
+ info->whichfork, 0);
+ }
+ }
+
+ /* Set up the in-core record and scrub it. */
+ xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
+ return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
+}
+
+/* Scan the btree records. */
+STATIC int
+xfs_scrub_bmap_btree(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ struct xfs_scrub_bmap_info *info)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
+ error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
+ xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
+ XFS_BTREE_NOERROR);
+ return error;
+}
+
+/*
+ * Scrub an inode fork's block mappings.
+ *
+ * First we scan every record in every btree block, if applicable.
+ * Then we unconditionally scan the incore extent cache.
+ */
+STATIC int
+xfs_scrub_bmap(
+ struct xfs_scrub_context *sc,
+ int whichfork)
+{
+ struct xfs_bmbt_irec irec;
+ struct xfs_scrub_bmap_info info = { NULL };
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_ifork *ifp;
+ xfs_fileoff_t endoff;
+ struct xfs_iext_cursor icur;
+ bool found;
+ int error = 0;
+
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+
+ info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
+ info.whichfork = whichfork;
+ info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
+ info.sc = sc;
+
+ switch (whichfork) {
+ case XFS_COW_FORK:
+ /* Non-existent CoW forks are ignorable. */
+ if (!ifp)
+ goto out;
+ /* No CoW forks on non-reflink inodes/filesystems. */
+ if (!xfs_is_reflink_inode(ip)) {
+ xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino, NULL);
+ goto out;
+ }
+ break;
+ case XFS_ATTR_FORK:
+ if (!ifp)
+ goto out;
+ if (!xfs_sb_version_hasattr(&mp->m_sb) &&
+ !xfs_sb_version_hasattr2(&mp->m_sb))
+ xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino, NULL);
+ break;
+ default:
+ ASSERT(whichfork == XFS_DATA_FORK);
+ break;
+ }
+
+ /* Check the fork values */
+ switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+ case XFS_DINODE_FMT_UUID:
+ case XFS_DINODE_FMT_DEV:
+ case XFS_DINODE_FMT_LOCAL:
+ /* No mappings to check. */
+ goto out;
+ case XFS_DINODE_FMT_EXTENTS:
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ goto out;
+ }
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ if (whichfork == XFS_COW_FORK) {
+ xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ goto out;
+ }
+
+ error = xfs_scrub_bmap_btree(sc, whichfork, &info);
+ if (error)
+ goto out;
+ break;
+ default:
+ xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
+ goto out;
+ }
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ /* Now try to scrub the in-memory extent list. */
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(sc->tp, ip, whichfork);
+ if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
+ goto out;
+ }
+
+ /* Find the offset of the last extent in the mapping. */
+ error = xfs_bmap_last_offset(ip, &endoff, whichfork);
+ if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
+ goto out;
+
+ /* Scrub extent records. */
+ info.lastoff = 0;
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ for (found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &irec);
+ found != 0;
+ found = xfs_iext_next_extent(ifp, &icur, &irec)) {
+ if (xfs_scrub_should_terminate(sc, &error))
+ break;
+ if (isnullstartblock(irec.br_startblock))
+ continue;
+ if (irec.br_startoff >= endoff) {
+ xfs_scrub_fblock_set_corrupt(sc, whichfork,
+ irec.br_startoff);
+ goto out;
+ }
+ error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
+ if (error)
+ goto out;
+ }
+
+out:
+ return error;
+}
+
+/* Scrub an inode's data fork. */
+int
+xfs_scrub_bmap_data(
+ struct xfs_scrub_context *sc)
+{
+ return xfs_scrub_bmap(sc, XFS_DATA_FORK);
+}
+
+/* Scrub an inode's attr fork. */
+int
+xfs_scrub_bmap_attr(
+ struct xfs_scrub_context *sc)
+{
+ return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
+}
+
+/* Scrub an inode's CoW fork. */
+int
+xfs_scrub_bmap_cow(
+ struct xfs_scrub_context *sc)
+{
+ if (!xfs_is_reflink_inode(sc->ip))
+ return -ENOENT;
+
+ return xfs_scrub_bmap(sc, XFS_COW_FORK);
+}
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
new file mode 100644
index 000000000000..df0766132ace
--- /dev/null
+++ b/fs/xfs/scrub/btree.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+
+/* btree scrubbing */
+
+/*
+ * Check for btree operation errors. See the section about handling
+ * operational errors in common.c.
+ */
+bool
+xfs_scrub_btree_process_error(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
+{
+ if (*error == 0)
+ return true;
+
+ switch (*error) {
+ case -EDEADLOCK:
+ /* Used to restart an op with deadlock avoidance. */
+ trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ break;
+ case -EFSBADCRC:
+ case -EFSCORRUPTED:
+ /* Note the badness but don't abort. */
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ *error = 0;
+ /* fall through */
+ default:
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+ trace_xfs_scrub_ifork_btree_op_error(sc, cur, level,
+ *error, __return_address);
+ else
+ trace_xfs_scrub_btree_op_error(sc, cur, level,
+ *error, __return_address);
+ break;
+ }
+ return false;
+}
+
+/* Record btree block corruption. */
+void
+xfs_scrub_btree_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+
+ if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
+ trace_xfs_scrub_ifork_btree_error(sc, cur, level,
+ __return_address);
+ else
+ trace_xfs_scrub_btree_error(sc, cur, level,
+ __return_address);
+}
+
+/*
+ * Make sure this record is in order and doesn't stray outside of the parent
+ * keys.
+ */
+STATIC void
+xfs_scrub_btree_rec(
+ struct xfs_scrub_btree *bs)
+{
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_rec *rec;
+ union xfs_btree_key key;
+ union xfs_btree_key hkey;
+ union xfs_btree_key *keyp;
+ struct xfs_btree_block *block;
+ struct xfs_btree_block *keyblock;
+ struct xfs_buf *bp;
+
+ block = xfs_btree_get_block(cur, 0, &bp);
+ rec = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
+
+ trace_xfs_scrub_btree_rec(bs->sc, cur, 0);
+
+ /* If this isn't the first record, are they in order? */
+ if (!bs->firstrec && !cur->bc_ops->recs_inorder(cur, &bs->lastrec, rec))
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, 0);
+ bs->firstrec = false;
+ memcpy(&bs->lastrec, rec, cur->bc_ops->rec_len);
+
+ if (cur->bc_nlevels == 1)
+ return;
+
+ /* Is this at least as large as the parent low key? */
+ cur->bc_ops->init_key_from_rec(&key, rec);
+ keyblock = xfs_btree_get_block(cur, 1, &bp);
+ keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[1], keyblock);
+ if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+
+ if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
+ return;
+
+ /* Is this no larger than the parent high key? */
+ cur->bc_ops->init_high_key_from_rec(&hkey, rec);
+ keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[1], keyblock);
+ if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+}
+
+/*
+ * Make sure this key is in order and doesn't stray outside of the parent
+ * keys.
+ */
+STATIC void
+xfs_scrub_btree_key(
+ struct xfs_scrub_btree *bs,
+ int level)
+{
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_key *key;
+ union xfs_btree_key *keyp;
+ struct xfs_btree_block *block;
+ struct xfs_btree_block *keyblock;
+ struct xfs_buf *bp;
+
+ block = xfs_btree_get_block(cur, level, &bp);
+ key = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
+
+ trace_xfs_scrub_btree_key(bs->sc, cur, level);
+
+ /* If this isn't the first key, are they in order? */
+ if (!bs->firstkey[level] &&
+ !cur->bc_ops->keys_inorder(cur, &bs->lastkey[level], key))
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ bs->firstkey[level] = false;
+ memcpy(&bs->lastkey[level], key, cur->bc_ops->key_len);
+
+ if (level + 1 >= cur->bc_nlevels)
+ return;
+
+ /* Is this at least as large as the parent low key? */
+ keyblock = xfs_btree_get_block(cur, level + 1, &bp);
+ keyp = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
+ if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+
+ if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
+ return;
+
+ /* Is this no larger than the parent high key? */
+ key = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
+ keyp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1], keyblock);
+ if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+}
+
+/*
+ * Check a btree pointer. Returns true if it's ok to use this pointer.
+ * Callers do not need to set the corrupt flag.
+ */
+static bool
+xfs_scrub_btree_ptr_ok(
+ struct xfs_scrub_btree *bs,
+ int level,
+ union xfs_btree_ptr *ptr)
+{
+ bool res;
+
+ /* A btree rooted in an inode has no block pointer to the root. */
+ if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
+ level == bs->cur->bc_nlevels)
+ return true;
+
+ /* Otherwise, check the pointers. */
+ if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level);
+ else
+ res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
+ if (!res)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+
+ return res;
+}
+
+/* Check that a btree block's sibling matches what we expect it. */
+STATIC int
+xfs_scrub_btree_block_check_sibling(
+ struct xfs_scrub_btree *bs,
+ int level,
+ int direction,
+ union xfs_btree_ptr *sibling)
+{
+ struct xfs_btree_cur *cur = bs->cur;
+ struct xfs_btree_block *pblock;
+ struct xfs_buf *pbp;
+ struct xfs_btree_cur *ncur = NULL;
+ union xfs_btree_ptr *pp;
+ int success;
+ int error;
+
+ error = xfs_btree_dup_cursor(cur, &ncur);
+ if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error) ||
+ !ncur)
+ return error;
+
+ /*
+ * If the pointer is null, we shouldn't be able to move the upper
+ * level pointer anywhere.
+ */
+ if (xfs_btree_ptr_is_null(cur, sibling)) {
+ if (direction > 0)
+ error = xfs_btree_increment(ncur, level + 1, &success);
+ else
+ error = xfs_btree_decrement(ncur, level + 1, &success);
+ if (error == 0 && success)
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ error = 0;
+ goto out;
+ }
+
+ /* Increment upper level pointer. */
+ if (direction > 0)
+ error = xfs_btree_increment(ncur, level + 1, &success);
+ else
+ error = xfs_btree_decrement(ncur, level + 1, &success);
+ if (!xfs_scrub_btree_process_error(bs->sc, cur, level + 1, &error))
+ goto out;
+ if (!success) {
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, level + 1);
+ goto out;
+ }
+
+ /* Compare upper level pointer to sibling pointer. */
+ pblock = xfs_btree_get_block(ncur, level + 1, &pbp);
+ pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
+ if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp))
+ goto out;
+
+ if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+out:
+ xfs_btree_del_cursor(ncur, XFS_BTREE_ERROR);
+ return error;
+}
+
+/* Check the siblings of a btree block. */
+STATIC int
+xfs_scrub_btree_block_check_siblings(
+ struct xfs_scrub_btree *bs,
+ struct xfs_btree_block *block)
+{
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_ptr leftsib;
+ union xfs_btree_ptr rightsib;
+ int level;
+ int error = 0;
+
+ xfs_btree_get_sibling(cur, block, &leftsib, XFS_BB_LEFTSIB);
+ xfs_btree_get_sibling(cur, block, &rightsib, XFS_BB_RIGHTSIB);
+ level = xfs_btree_get_level(block);
+
+ /* Root block should never have siblings. */
+ if (level == cur->bc_nlevels - 1) {
+ if (!xfs_btree_ptr_is_null(cur, &leftsib) ||
+ !xfs_btree_ptr_is_null(cur, &rightsib))
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
+ goto out;
+ }
+
+ /*
+ * Does the left & right sibling pointers match the adjacent
+ * parent level pointers?
+ * (These function absorbs error codes for us.)
+ */
+ error = xfs_scrub_btree_block_check_sibling(bs, level, -1, &leftsib);
+ if (error)
+ return error;
+ error = xfs_scrub_btree_block_check_sibling(bs, level, 1, &rightsib);
+ if (error)
+ return error;
+out:
+ return error;
+}
+
+/*
+ * Grab and scrub a btree block given a btree pointer. Returns block
+ * and buffer pointers (if applicable) if they're ok to use.
+ */
+STATIC int
+xfs_scrub_btree_get_block(
+ struct xfs_scrub_btree *bs,
+ int level,
+ union xfs_btree_ptr *pp,
+ struct xfs_btree_block **pblock,
+ struct xfs_buf **pbp)
+{
+ void *failed_at;
+ int error;
+
+ *pblock = NULL;
+ *pbp = NULL;
+
+ error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock);
+ if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, level, &error) ||
+ !*pblock)
+ return error;
+
+ xfs_btree_get_block(bs->cur, level, pbp);
+ if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ failed_at = __xfs_btree_check_lblock(bs->cur, *pblock,
+ level, *pbp);
+ else
+ failed_at = __xfs_btree_check_sblock(bs->cur, *pblock,
+ level, *pbp);
+ if (failed_at) {
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
+ return 0;
+ }
+
+ /*
+ * Check the block's siblings; this function absorbs error codes
+ * for us.
+ */
+ return xfs_scrub_btree_block_check_siblings(bs, *pblock);
+}
+
+/*
+ * Check that the low and high keys of this block match the keys stored
+ * in the parent block.
+ */
+STATIC void
+xfs_scrub_btree_block_keys(
+ struct xfs_scrub_btree *bs,
+ int level,
+ struct xfs_btree_block *block)
+{
+ union xfs_btree_key block_keys;
+ struct xfs_btree_cur *cur = bs->cur;
+ union xfs_btree_key *high_bk;
+ union xfs_btree_key *parent_keys;
+ union xfs_btree_key *high_pk;
+ struct xfs_btree_block *parent_block;
+ struct xfs_buf *bp;
+
+ if (level >= cur->bc_nlevels - 1)
+ return;
+
+ /* Calculate the keys for this block. */
+ xfs_btree_get_keys(cur, block, &block_keys);
+
+ /* Obtain the parent's copy of the keys for this block. */
+ parent_block = xfs_btree_get_block(cur, level + 1, &bp);
+ parent_keys = xfs_btree_key_addr(cur, cur->bc_ptrs[level + 1],
+ parent_block);
+
+ if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+
+ if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
+ return;
+
+ /* Get high keys */
+ high_bk = xfs_btree_high_key_from_key(cur, &block_keys);
+ high_pk = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level + 1],
+ parent_block);
+
+ if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, cur, 1);
+}
+
+/*
+ * Visit all nodes and leaves of a btree. Check that all pointers and
+ * records are in order, that the keys reflect the records, and use a callback
+ * so that the caller can verify individual records.
+ */
+int
+xfs_scrub_btree(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ xfs_scrub_btree_rec_fn scrub_fn,
+ struct xfs_owner_info *oinfo,
+ void *private)
+{
+ struct xfs_scrub_btree bs = { NULL };
+ union xfs_btree_ptr ptr;
+ union xfs_btree_ptr *pp;
+ union xfs_btree_rec *recp;
+ struct xfs_btree_block *block;
+ int level;
+ struct xfs_buf *bp;
+ int i;
+ int error = 0;
+
+ /* Initialize scrub state */
+ bs.cur = cur;
+ bs.scrub_rec = scrub_fn;
+ bs.oinfo = oinfo;
+ bs.firstrec = true;
+ bs.private = private;
+ bs.sc = sc;
+ for (i = 0; i < XFS_BTREE_MAXLEVELS; i++)
+ bs.firstkey[i] = true;
+ INIT_LIST_HEAD(&bs.to_check);
+
+ /* Don't try to check a tree with a height we can't handle. */
+ if (cur->bc_nlevels > XFS_BTREE_MAXLEVELS) {
+ xfs_scrub_btree_set_corrupt(sc, cur, 0);
+ goto out;
+ }
+
+ /*
+ * Load the root of the btree. The helper function absorbs
+ * error codes for us.
+ */
+ level = cur->bc_nlevels - 1;
+ cur->bc_ops->init_ptr_from_cur(cur, &ptr);
+ if (!xfs_scrub_btree_ptr_ok(&bs, cur->bc_nlevels, &ptr))
+ goto out;
+ error = xfs_scrub_btree_get_block(&bs, level, &ptr, &block, &bp);
+ if (error || !block)
+ goto out;
+
+ cur->bc_ptrs[level] = 1;
+
+ while (level < cur->bc_nlevels) {
+ block = xfs_btree_get_block(cur, level, &bp);
+
+ if (level == 0) {
+ /* End of leaf, pop back towards the root. */
+ if (cur->bc_ptrs[level] >
+ be16_to_cpu(block->bb_numrecs)) {
+ xfs_scrub_btree_block_keys(&bs, level, block);
+ if (level < cur->bc_nlevels - 1)
+ cur->bc_ptrs[level + 1]++;
+ level++;
+ continue;
+ }
+
+ /* Records in order for scrub? */
+ xfs_scrub_btree_rec(&bs);
+
+ /* Call out to the record checker. */
+ recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
+ error = bs.scrub_rec(&bs, recp);
+ if (error)
+ break;
+ if (xfs_scrub_should_terminate(sc, &error) ||
+ (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ break;
+
+ cur->bc_ptrs[level]++;
+ continue;
+ }
+
+ /* End of node, pop back towards the root. */
+ if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
+ xfs_scrub_btree_block_keys(&bs, level, block);
+ if (level < cur->bc_nlevels - 1)
+ cur->bc_ptrs[level + 1]++;
+ level++;
+ continue;
+ }
+
+ /* Keys in order for scrub? */
+ xfs_scrub_btree_key(&bs, level);
+
+ /* Drill another level deeper. */
+ pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
+ if (!xfs_scrub_btree_ptr_ok(&bs, level, pp)) {
+ cur->bc_ptrs[level]++;
+ continue;
+ }
+ level--;
+ error = xfs_scrub_btree_get_block(&bs, level, pp, &block, &bp);
+ if (error || !block)
+ goto out;
+
+ cur->bc_ptrs[level] = 1;
+ }
+
+out:
+ return error;
+}
diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
new file mode 100644
index 000000000000..4de825a626d1
--- /dev/null
+++ b/fs/xfs/scrub/btree.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_SCRUB_BTREE_H__
+#define __XFS_SCRUB_BTREE_H__
+
+/* btree scrub */
+
+/* Check for btree operation errors. */
+bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur, int level, int *error);
+
+/* Check for btree corruption. */
+void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur, int level);
+
+struct xfs_scrub_btree;
+typedef int (*xfs_scrub_btree_rec_fn)(
+ struct xfs_scrub_btree *bs,
+ union xfs_btree_rec *rec);
+
+struct xfs_scrub_btree {
+ /* caller-provided scrub state */
+ struct xfs_scrub_context *sc;
+ struct xfs_btree_cur *cur;
+ xfs_scrub_btree_rec_fn scrub_rec;
+ struct xfs_owner_info *oinfo;
+ void *private;
+
+ /* internal scrub state */
+ union xfs_btree_rec lastrec;
+ bool firstrec;
+ union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
+ bool firstkey[XFS_BTREE_MAXLEVELS];
+ struct list_head to_check;
+};
+int xfs_scrub_btree(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ xfs_scrub_btree_rec_fn scrub_fn,
+ struct xfs_owner_info *oinfo, void *private);
+
+#endif /* __XFS_SCRUB_BTREE_H__ */
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
new file mode 100644
index 000000000000..ac95fe911d96
--- /dev/null
+++ b/fs/xfs/scrub/common.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_itable.h"
+#include "xfs_alloc.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_refcount.h"
+#include "xfs_refcount_btree.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_log.h"
+#include "xfs_trans_priv.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+#include "scrub/btree.h"
+
+/* Common code for the metadata scrubbers. */
+
+/*
+ * Handling operational errors.
+ *
+ * The *_process_error() family of functions are used to process error return
+ * codes from functions called as part of a scrub operation.
+ *
+ * If there's no error, we return true to tell the caller that it's ok
+ * to move on to the next check in its list.
+ *
+ * For non-verifier errors (e.g. ENOMEM) we return false to tell the
+ * caller that something bad happened, and we preserve *error so that
+ * the caller can return the *error up the stack to userspace.
+ *
+ * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
+ * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words,
+ * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
+ * not via return codes. We return false to tell the caller that
+ * something bad happened. Since the error has been cleared, the caller
+ * will (presumably) return that zero and scrubbing will move on to
+ * whatever's next.
+ *
+ * ftrace can be used to record the precise metadata location and the
+ * approximate code location of the failed operation.
+ */
+
+/* Check for operational errors. */
+bool
+xfs_scrub_process_error(
+ struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
+{
+ switch (*error) {
+ case 0:
+ return true;
+ case -EDEADLOCK:
+ /* Used to restart an op with deadlock avoidance. */
+ trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ break;
+ case -EFSBADCRC:
+ case -EFSCORRUPTED:
+ /* Note the badness but don't abort. */
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ *error = 0;
+ /* fall through */
+ default:
+ trace_xfs_scrub_op_error(sc, agno, bno, *error,
+ __return_address);
+ break;
+ }
+ return false;
+}
+
+/* Check for operational errors for a file offset. */
+bool
+xfs_scrub_fblock_process_error(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
+{
+ switch (*error) {
+ case 0:
+ return true;
+ case -EDEADLOCK:
+ /* Used to restart an op with deadlock avoidance. */
+ trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ break;
+ case -EFSBADCRC:
+ case -EFSCORRUPTED:
+ /* Note the badness but don't abort. */
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ *error = 0;
+ /* fall through */
+ default:
+ trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
+ __return_address);
+ break;
+ }
+ return false;
+}
+
+/*
+ * Handling scrub corruption/optimization/warning checks.
+ *
+ * The *_set_{corrupt,preen,warning}() family of functions are used to
+ * record the presence of metadata that is incorrect (corrupt), could be
+ * optimized somehow (preen), or should be flagged for administrative
+ * review but is not incorrect (warn).
+ *
+ * ftrace can be used to record the precise metadata location and
+ * approximate code location of the failed check.
+ */
+
+/* Record a block which could be optimized. */
+void
+xfs_scrub_block_set_preen(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
+ trace_xfs_scrub_block_preen(sc, bp->b_bn, __return_address);
+}
+
+/*
+ * Record an inode which could be optimized. The trace data will
+ * include the block given by bp if bp is given; otherwise it will use
+ * the block location of the inode record itself.
+ */
+void
+xfs_scrub_ino_set_preen(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
+ trace_xfs_scrub_ino_preen(sc, ino, bp ? bp->b_bn : 0,
+ __return_address);
+}
+
+/* Record a corrupt block. */
+void
+xfs_scrub_block_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
+}
+
+/*
+ * Record a corrupt inode. The trace data will include the block given
+ * by bp if bp is given; otherwise it will use the block location of the
+ * inode record itself.
+ */
+void
+xfs_scrub_ino_set_corrupt(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ trace_xfs_scrub_ino_error(sc, ino, bp ? bp->b_bn : 0, __return_address);
+}
+
+/* Record corruption in a block indexed by a file fork. */
+void
+xfs_scrub_fblock_set_corrupt(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
+}
+
+/*
+ * Warn about inodes that need administrative review but is not
+ * incorrect.
+ */
+void
+xfs_scrub_ino_set_warning(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
+ trace_xfs_scrub_ino_warning(sc, ino, bp ? bp->b_bn : 0,
+ __return_address);
+}
+
+/* Warn about a block indexed by a file fork that needs review. */
+void
+xfs_scrub_fblock_set_warning(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
+ trace_xfs_scrub_fblock_warning(sc, whichfork, offset, __return_address);
+}
+
+/* Signal an incomplete scrub. */
+void
+xfs_scrub_set_incomplete(
+ struct xfs_scrub_context *sc)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
+ trace_xfs_scrub_incomplete(sc, __return_address);
+}
+
+/*
+ * AG scrubbing
+ *
+ * These helpers facilitate locking an allocation group's header
+ * buffers, setting up cursors for all btrees that are present, and
+ * cleaning everything up once we're through.
+ */
+
+/* Decide if we want to return an AG header read failure. */
+static inline bool
+want_ag_read_header_failure(
+ struct xfs_scrub_context *sc,
+ unsigned int type)
+{
+ /* Return all AG header read failures when scanning btrees. */
+ if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
+ sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
+ sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
+ return true;
+ /*
+ * If we're scanning a given type of AG header, we only want to
+ * see read failures from that specific header. We'd like the
+ * other headers to cross-check them, but this isn't required.
+ */
+ if (sc->sm->sm_type == type)
+ return true;
+ return false;
+}
+
+/*
+ * Grab all the headers for an AG.
+ *
+ * The headers should be released by xfs_scrub_ag_free, but as a fail
+ * safe we attach all the buffers we grab to the scrub transaction so
+ * they'll all be freed when we cancel it.
+ */
+int
+xfs_scrub_ag_read_headers(
+ struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno,
+ struct xfs_buf **agi,
+ struct xfs_buf **agf,
+ struct xfs_buf **agfl)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error;
+
+ error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
+ if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
+ goto out;
+
+ error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf);
+ if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
+ goto out;
+
+ error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
+ if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
+ goto out;
+
+out:
+ return error;
+}
+
+/* Release all the AG btree cursors. */
+void
+xfs_scrub_ag_btcur_free(
+ struct xfs_scrub_ag *sa)
+{
+ if (sa->refc_cur)
+ xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
+ if (sa->rmap_cur)
+ xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
+ if (sa->fino_cur)
+ xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
+ if (sa->ino_cur)
+ xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
+ if (sa->cnt_cur)
+ xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
+ if (sa->bno_cur)
+ xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
+
+ sa->refc_cur = NULL;
+ sa->rmap_cur = NULL;
+ sa->fino_cur = NULL;
+ sa->ino_cur = NULL;
+ sa->bno_cur = NULL;
+ sa->cnt_cur = NULL;
+}
+
+/* Initialize all the btree cursors for an AG. */
+int
+xfs_scrub_ag_btcur_init(
+ struct xfs_scrub_context *sc,
+ struct xfs_scrub_ag *sa)
+{
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sa->agno;
+
+ if (sa->agf_bp) {
+ /* Set up a bnobt cursor for cross-referencing. */
+ sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
+ agno, XFS_BTNUM_BNO);
+ if (!sa->bno_cur)
+ goto err;
+
+ /* Set up a cntbt cursor for cross-referencing. */
+ sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
+ agno, XFS_BTNUM_CNT);
+ if (!sa->cnt_cur)
+ goto err;
+ }
+
+ /* Set up a inobt cursor for cross-referencing. */
+ if (sa->agi_bp) {
+ sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
+ agno, XFS_BTNUM_INO);
+ if (!sa->ino_cur)
+ goto err;
+ }
+
+ /* Set up a finobt cursor for cross-referencing. */
+ if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb)) {
+ sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
+ agno, XFS_BTNUM_FINO);
+ if (!sa->fino_cur)
+ goto err;
+ }
+
+ /* Set up a rmapbt cursor for cross-referencing. */
+ if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
+ agno);
+ if (!sa->rmap_cur)
+ goto err;
+ }
+
+ /* Set up a refcountbt cursor for cross-referencing. */
+ if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb)) {
+ sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
+ sa->agf_bp, agno, NULL);
+ if (!sa->refc_cur)
+ goto err;
+ }
+
+ return 0;
+err:
+ return -ENOMEM;
+}
+
+/* Release the AG header context and btree cursors. */
+void
+xfs_scrub_ag_free(
+ struct xfs_scrub_context *sc,
+ struct xfs_scrub_ag *sa)
+{
+ xfs_scrub_ag_btcur_free(sa);
+ if (sa->agfl_bp) {
+ xfs_trans_brelse(sc->tp, sa->agfl_bp);
+ sa->agfl_bp = NULL;
+ }
+ if (sa->agf_bp) {
+ xfs_trans_brelse(sc->tp, sa->agf_bp);
+ sa->agf_bp = NULL;
+ }
+ if (sa->agi_bp) {
+ xfs_trans_brelse(sc->tp, sa->agi_bp);
+ sa->agi_bp = NULL;
+ }
+ sa->agno = NULLAGNUMBER;
+}
+
+/*
+ * For scrub, grab the AGI and the AGF headers, in that order. Locking
+ * order requires us to get the AGI before the AGF. We use the
+ * transaction to avoid deadlocking on crosslinked metadata buffers;
+ * either the caller passes one in (bmap scrub) or we have to create a
+ * transaction ourselves.
+ */
+int
+xfs_scrub_ag_init(
+ struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno,
+ struct xfs_scrub_ag *sa)
+{
+ int error;
+
+ sa->agno = agno;
+ error = xfs_scrub_ag_read_headers(sc, agno, &sa->agi_bp,
+ &sa->agf_bp, &sa->agfl_bp);
+ if (error)
+ return error;
+
+ return xfs_scrub_ag_btcur_init(sc, sa);
+}
+
+/* Per-scrubber setup functions */
+
+/* Set us up with a transaction and an empty context. */
+int
+xfs_scrub_setup_fs(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ return xfs_scrub_trans_alloc(sc->sm, sc->mp, &sc->tp);
+}
+
+/* Set us up with AG headers and btree cursors. */
+int
+xfs_scrub_setup_ag_btree(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip,
+ bool force_log)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error;
+
+ /*
+ * If the caller asks us to checkpont the log, do so. This
+ * expensive operation should be performed infrequently and only
+ * as a last resort. Any caller that sets force_log should
+ * document why they need to do so.
+ */
+ if (force_log) {
+ error = xfs_scrub_checkpoint_log(mp);
+ if (error)
+ return error;
+ }
+
+ error = xfs_scrub_setup_ag_header(sc, ip);
+ if (error)
+ return error;
+
+ return xfs_scrub_ag_init(sc, sc->sm->sm_agno, &sc->sa);
+}
+
+/* Push everything out of the log onto disk. */
+int
+xfs_scrub_checkpoint_log(
+ struct xfs_mount *mp)
+{
+ int error;
+
+ error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
+ if (error)
+ return error;
+ xfs_ail_push_all_sync(mp->m_ail);
+ return 0;
+}
+
+/*
+ * Given an inode and the scrub control structure, grab either the
+ * inode referenced in the control structure or the inode passed in.
+ * The inode is not locked.
+ */
+int
+xfs_scrub_get_inode(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip_in)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip = NULL;
+ int error;
+
+ /*
+ * If userspace passed us an AG number or a generation number
+ * without an inode number, they haven't got a clue so bail out
+ * immediately.
+ */
+ if (sc->sm->sm_agno || (sc->sm->sm_gen && !sc->sm->sm_ino))
+ return -EINVAL;
+
+ /* We want to scan the inode we already had opened. */
+ if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
+ sc->ip = ip_in;
+ return 0;
+ }
+
+ /* Look up the inode, see if the generation number matches. */
+ if (xfs_internal_inum(mp, sc->sm->sm_ino))
+ return -ENOENT;
+ error = xfs_iget(mp, NULL, sc->sm->sm_ino,
+ XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
+ if (error == -ENOENT || error == -EINVAL) {
+ /* inode doesn't exist... */
+ return -ENOENT;
+ } else if (error) {
+ trace_xfs_scrub_op_error(sc,
+ XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
+ XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
+ error, __return_address);
+ return error;
+ }
+ if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
+ iput(VFS_I(ip));
+ return -ENOENT;
+ }
+
+ sc->ip = ip;
+ return 0;
+}
+
+/* Set us up to scrub a file's contents. */
+int
+xfs_scrub_setup_inode_contents(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip,
+ unsigned int resblks)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error;
+
+ error = xfs_scrub_get_inode(sc, ip);
+ if (error)
+ return error;
+
+ /* Got the inode, lock it and we're ready to go. */
+ sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
+ xfs_ilock(sc->ip, sc->ilock_flags);
+ error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
+ if (error)
+ goto out;
+ sc->ilock_flags |= XFS_ILOCK_EXCL;
+ xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
+
+out:
+ /* scrub teardown will unlock and release the inode for us */
+ return error;
+}
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
new file mode 100644
index 000000000000..5c043855570e
--- /dev/null
+++ b/fs/xfs/scrub/common.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_SCRUB_COMMON_H__
+#define __XFS_SCRUB_COMMON_H__
+
+/*
+ * We /could/ terminate a scrub/repair operation early. If we're not
+ * in a good place to continue (fatal signal, etc.) then bail out.
+ * Note that we're careful not to make any judgements about *error.
+ */
+static inline bool
+xfs_scrub_should_terminate(
+ struct xfs_scrub_context *sc,
+ int *error)
+{
+ if (fatal_signal_pending(current)) {
+ if (*error == 0)
+ *error = -EAGAIN;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Grab an empty transaction so that we can re-grab locked buffers if
+ * one of our btrees turns out to be cyclic.
+ */
+static inline int
+xfs_scrub_trans_alloc(
+ struct xfs_scrub_metadata *sm,
+ struct xfs_mount *mp,
+ struct xfs_trans **tpp)
+{
+ return xfs_trans_alloc_empty(mp, tpp);
+}
+
+bool xfs_scrub_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+ xfs_agblock_t bno, int *error);
+bool xfs_scrub_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
+ xfs_fileoff_t offset, int *error);
+
+void xfs_scrub_block_set_preen(struct xfs_scrub_context *sc,
+ struct xfs_buf *bp);
+void xfs_scrub_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino,
+ struct xfs_buf *bp);
+
+void xfs_scrub_block_set_corrupt(struct xfs_scrub_context *sc,
+ struct xfs_buf *bp);
+void xfs_scrub_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino,
+ struct xfs_buf *bp);
+void xfs_scrub_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
+ xfs_fileoff_t offset);
+
+void xfs_scrub_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino,
+ struct xfs_buf *bp);
+void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
+ xfs_fileoff_t offset);
+
+void xfs_scrub_set_incomplete(struct xfs_scrub_context *sc);
+int xfs_scrub_checkpoint_log(struct xfs_mount *mp);
+
+/* Setup functions */
+int xfs_scrub_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+int xfs_scrub_setup_ag_header(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_ag_allocbt(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_ag_iallocbt(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_ag_rmapbt(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_ag_refcountbt(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_inode(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_inode_bmap(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_inode_bmap_data(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_directory(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_xattr(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_symlink(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+int xfs_scrub_setup_parent(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip);
+#ifdef CONFIG_XFS_RT
+int xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+#else
+static inline int
+xfs_scrub_setup_rt(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+{
+ return -ENOENT;
+}
+#endif
+#ifdef CONFIG_XFS_QUOTA
+int xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip);
+#else
+static inline int
+xfs_scrub_setup_quota(struct xfs_scrub_context *sc, struct xfs_inode *ip)
+{
+ return -ENOENT;
+}
+#endif
+
+void xfs_scrub_ag_free(struct xfs_scrub_context *sc, struct xfs_scrub_ag *sa);
+int xfs_scrub_ag_init(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+ struct xfs_scrub_ag *sa);
+int xfs_scrub_ag_read_headers(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+ struct xfs_buf **agi, struct xfs_buf **agf,
+ struct xfs_buf **agfl);
+void xfs_scrub_ag_btcur_free(struct xfs_scrub_ag *sa);
+int xfs_scrub_ag_btcur_init(struct xfs_scrub_context *sc,
+ struct xfs_scrub_ag *sa);
+int xfs_scrub_walk_agfl(struct xfs_scrub_context *sc,
+ int (*fn)(struct xfs_scrub_context *, xfs_agblock_t bno,
+ void *),
+ void *priv);
+
+int xfs_scrub_setup_ag_btree(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip, bool force_log);
+int xfs_scrub_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
+int xfs_scrub_setup_inode_contents(struct xfs_scrub_context *sc,
+ struct xfs_inode *ip, unsigned int resblks);
+
+#endif /* __XFS_SCRUB_COMMON_H__ */
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
new file mode 100644
index 000000000000..d94edd93cba8
--- /dev/null
+++ b/fs/xfs/scrub/dabtree.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_inode_fork.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
+#include "xfs_attr_leaf.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+#include "scrub/dabtree.h"
+
+/* Directory/Attribute Btree */
+
+/*
+ * Check for da btree operation errors. See the section about handling
+ * operational errors in common.c.
+ */
+bool
+xfs_scrub_da_process_error(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ int *error)
+{
+ struct xfs_scrub_context *sc = ds->sc;
+
+ if (*error == 0)
+ return true;
+
+ switch (*error) {
+ case -EDEADLOCK:
+ /* Used to restart an op with deadlock avoidance. */
+ trace_xfs_scrub_deadlock_retry(sc->ip, sc->sm, *error);
+ break;
+ case -EFSBADCRC:
+ case -EFSCORRUPTED:
+ /* Note the badness but don't abort. */
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ *error = 0;
+ /* fall through */
+ default:
+ trace_xfs_scrub_file_op_error(sc, ds->dargs.whichfork,
+ xfs_dir2_da_to_db(ds->dargs.geo,
+ ds->state->path.blk[level].blkno),
+ *error, __return_address);
+ break;
+ }
+ return false;
+}
+
+/*
+ * Check for da btree corruption. See the section about handling
+ * operational errors in common.c.
+ */
+void
+xfs_scrub_da_set_corrupt(
+ struct xfs_scrub_da_btree *ds,
+ int level)
+{
+ struct xfs_scrub_context *sc = ds->sc;
+
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+
+ trace_xfs_scrub_fblock_error(sc, ds->dargs.whichfork,
+ xfs_dir2_da_to_db(ds->dargs.geo,
+ ds->state->path.blk[level].blkno),
+ __return_address);
+}
+
+/* Find an entry at a certain level in a da btree. */
+STATIC void *
+xfs_scrub_da_btree_entry(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ int rec)
+{
+ char *ents;
+ struct xfs_da_state_blk *blk;
+ void *baddr;
+
+ /* Dispatch the entry finding function. */
+ blk = &ds->state->path.blk[level];
+ baddr = blk->bp->b_addr;
+ switch (blk->magic) {
+ case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
+ ents = (char *)xfs_attr3_leaf_entryp(baddr);
+ return ents + (rec * sizeof(struct xfs_attr_leaf_entry));
+ case XFS_DIR2_LEAFN_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ ents = (char *)ds->dargs.dp->d_ops->leaf_ents_p(baddr);
+ return ents + (rec * sizeof(struct xfs_dir2_leaf_entry));
+ case XFS_DIR2_LEAF1_MAGIC:
+ case XFS_DIR3_LEAF1_MAGIC:
+ ents = (char *)ds->dargs.dp->d_ops->leaf_ents_p(baddr);
+ return ents + (rec * sizeof(struct xfs_dir2_leaf_entry));
+ case XFS_DA_NODE_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
+ ents = (char *)ds->dargs.dp->d_ops->node_tree_p(baddr);
+ return ents + (rec * sizeof(struct xfs_da_node_entry));
+ }
+
+ return NULL;
+}
+
+/* Scrub a da btree hash (key). */
+int
+xfs_scrub_da_btree_hash(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ __be32 *hashp)
+{
+ struct xfs_da_state_blk *blks;
+ struct xfs_da_node_entry *entry;
+ xfs_dahash_t hash;
+ xfs_dahash_t parent_hash;
+
+ /* Is this hash in order? */
+ hash = be32_to_cpu(*hashp);
+ if (hash < ds->hashes[level])
+ xfs_scrub_da_set_corrupt(ds, level);
+ ds->hashes[level] = hash;
+
+ if (level == 0)
+ return 0;
+
+ /* Is this hash no larger than the parent hash? */
+ blks = ds->state->path.blk;
+ entry = xfs_scrub_da_btree_entry(ds, level - 1, blks[level - 1].index);
+ parent_hash = be32_to_cpu(entry->hashval);
+ if (parent_hash < hash)
+ xfs_scrub_da_set_corrupt(ds, level);
+
+ return 0;
+}
+
+/*
+ * Check a da btree pointer. Returns true if it's ok to use this
+ * pointer.
+ */
+STATIC bool
+xfs_scrub_da_btree_ptr_ok(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ xfs_dablk_t blkno)
+{
+ if (blkno < ds->lowest || (ds->highest != 0 && blkno >= ds->highest)) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * The da btree scrubber can handle leaf1 blocks as a degenerate
+ * form of leafn blocks. Since the regular da code doesn't handle
+ * leaf1, we must multiplex the verifiers.
+ */
+static void
+xfs_scrub_da_btree_read_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
+
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DIR2_LEAF1_MAGIC:
+ case XFS_DIR3_LEAF1_MAGIC:
+ bp->b_ops = &xfs_dir3_leaf1_buf_ops;
+ bp->b_ops->verify_read(bp);
+ return;
+ default:
+ /*
+ * xfs_da3_node_buf_ops already know how to handle
+ * DA*_NODE, ATTR*_LEAF, and DIR*_LEAFN blocks.
+ */
+ bp->b_ops = &xfs_da3_node_buf_ops;
+ bp->b_ops->verify_read(bp);
+ return;
+ }
+}
+static void
+xfs_scrub_da_btree_write_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
+
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DIR2_LEAF1_MAGIC:
+ case XFS_DIR3_LEAF1_MAGIC:
+ bp->b_ops = &xfs_dir3_leaf1_buf_ops;
+ bp->b_ops->verify_write(bp);
+ return;
+ default:
+ /*
+ * xfs_da3_node_buf_ops already know how to handle
+ * DA*_NODE, ATTR*_LEAF, and DIR*_LEAFN blocks.
+ */
+ bp->b_ops = &xfs_da3_node_buf_ops;
+ bp->b_ops->verify_write(bp);
+ return;
+ }
+}
+
+static const struct xfs_buf_ops xfs_scrub_da_btree_buf_ops = {
+ .name = "xfs_scrub_da_btree",
+ .verify_read = xfs_scrub_da_btree_read_verify,
+ .verify_write = xfs_scrub_da_btree_write_verify,
+};
+
+/* Check a block's sibling. */
+STATIC int
+xfs_scrub_da_btree_block_check_sibling(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ int direction,
+ xfs_dablk_t sibling)
+{
+ int retval;
+ int error;
+
+ memcpy(&ds->state->altpath, &ds->state->path,
+ sizeof(ds->state->altpath));
+
+ /*
+ * If the pointer is null, we shouldn't be able to move the upper
+ * level pointer anywhere.
+ */
+ if (sibling == 0) {
+ error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
+ direction, false, &retval);
+ if (error == 0 && retval == 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ error = 0;
+ goto out;
+ }
+
+ /* Move the alternate cursor one block in the direction given. */
+ error = xfs_da3_path_shift(ds->state, &ds->state->altpath,
+ direction, false, &retval);
+ if (!xfs_scrub_da_process_error(ds, level, &error))
+ return error;
+ if (retval) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ return error;
+ }
+
+ /* Compare upper level pointer to sibling pointer. */
+ if (ds->state->altpath.blk[level].blkno != sibling)
+ xfs_scrub_da_set_corrupt(ds, level);
+ xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
+out:
+ return error;
+}
+
+/* Check a block's sibling pointers. */
+STATIC int
+xfs_scrub_da_btree_block_check_siblings(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ struct xfs_da_blkinfo *hdr)
+{
+ xfs_dablk_t forw;
+ xfs_dablk_t back;
+ int error = 0;
+
+ forw = be32_to_cpu(hdr->forw);
+ back = be32_to_cpu(hdr->back);
+
+ /* Top level blocks should not have sibling pointers. */
+ if (level == 0) {
+ if (forw != 0 || back != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ return 0;
+ }
+
+ /*
+ * Check back (left) and forw (right) pointers. These functions
+ * absorb error codes for us.
+ */
+ error = xfs_scrub_da_btree_block_check_sibling(ds, level, 0, back);
+ if (error)
+ goto out;
+ error = xfs_scrub_da_btree_block_check_sibling(ds, level, 1, forw);
+
+out:
+ memset(&ds->state->altpath, 0, sizeof(ds->state->altpath));
+ return error;
+}
+
+/* Load a dir/attribute block from a btree. */
+STATIC int
+xfs_scrub_da_btree_block(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ xfs_dablk_t blkno)
+{
+ struct xfs_da_state_blk *blk;
+ struct xfs_da_intnode *node;
+ struct xfs_da_node_entry *btree;
+ struct xfs_da3_blkinfo *hdr3;
+ struct xfs_da_args *dargs = &ds->dargs;
+ struct xfs_inode *ip = ds->dargs.dp;
+ xfs_ino_t owner;
+ int *pmaxrecs;
+ struct xfs_da3_icnode_hdr nodehdr;
+ int error = 0;
+
+ blk = &ds->state->path.blk[level];
+ ds->state->path.active = level + 1;
+
+ /* Release old block. */
+ if (blk->bp) {
+ xfs_trans_brelse(dargs->trans, blk->bp);
+ blk->bp = NULL;
+ }
+
+ /* Check the pointer. */
+ blk->blkno = blkno;
+ if (!xfs_scrub_da_btree_ptr_ok(ds, level, blkno))
+ goto out_nobuf;
+
+ /* Read the buffer. */
+ error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2,
+ &blk->bp, dargs->whichfork,
+ &xfs_scrub_da_btree_buf_ops);
+ if (!xfs_scrub_da_process_error(ds, level, &error))
+ goto out_nobuf;
+
+ /*
+ * We didn't find a dir btree root block, which means that
+ * there's no LEAF1/LEAFN tree (at least not where it's supposed
+ * to be), so jump out now.
+ */
+ if (ds->dargs.whichfork == XFS_DATA_FORK && level == 0 &&
+ blk->bp == NULL)
+ goto out_nobuf;
+
+ /* It's /not/ ok for attr trees not to have a da btree. */
+ if (blk->bp == NULL) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out_nobuf;
+ }
+
+ hdr3 = blk->bp->b_addr;
+ blk->magic = be16_to_cpu(hdr3->hdr.magic);
+ pmaxrecs = &ds->maxrecs[level];
+
+ /* We only started zeroing the header on v5 filesystems. */
+ if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb) && hdr3->hdr.pad)
+ xfs_scrub_da_set_corrupt(ds, level);
+
+ /* Check the owner. */
+ if (xfs_sb_version_hascrc(&ip->i_mount->m_sb)) {
+ owner = be64_to_cpu(hdr3->owner);
+ if (owner != ip->i_ino)
+ xfs_scrub_da_set_corrupt(ds, level);
+ }
+
+ /* Check the siblings. */
+ error = xfs_scrub_da_btree_block_check_siblings(ds, level, &hdr3->hdr);
+ if (error)
+ goto out;
+
+ /* Interpret the buffer. */
+ switch (blk->magic) {
+ case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
+ xfs_trans_buf_set_type(dargs->trans, blk->bp,
+ XFS_BLFT_ATTR_LEAF_BUF);
+ blk->magic = XFS_ATTR_LEAF_MAGIC;
+ blk->hashval = xfs_attr_leaf_lasthash(blk->bp, pmaxrecs);
+ if (ds->tree_level != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ break;
+ case XFS_DIR2_LEAFN_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ xfs_trans_buf_set_type(dargs->trans, blk->bp,
+ XFS_BLFT_DIR_LEAFN_BUF);
+ blk->magic = XFS_DIR2_LEAFN_MAGIC;
+ blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
+ if (ds->tree_level != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ break;
+ case XFS_DIR2_LEAF1_MAGIC:
+ case XFS_DIR3_LEAF1_MAGIC:
+ xfs_trans_buf_set_type(dargs->trans, blk->bp,
+ XFS_BLFT_DIR_LEAF1_BUF);
+ blk->magic = XFS_DIR2_LEAF1_MAGIC;
+ blk->hashval = xfs_dir2_leaf_lasthash(ip, blk->bp, pmaxrecs);
+ if (ds->tree_level != 0)
+ xfs_scrub_da_set_corrupt(ds, level);
+ break;
+ case XFS_DA_NODE_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
+ xfs_trans_buf_set_type(dargs->trans, blk->bp,
+ XFS_BLFT_DA_NODE_BUF);
+ blk->magic = XFS_DA_NODE_MAGIC;
+ node = blk->bp->b_addr;
+ ip->d_ops->node_hdr_from_disk(&nodehdr, node);
+ btree = ip->d_ops->node_tree_p(node);
+ *pmaxrecs = nodehdr.count;
+ blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval);
+ if (level == 0) {
+ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out_freebp;
+ }
+ ds->tree_level = nodehdr.level;
+ } else {
+ if (ds->tree_level != nodehdr.level) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out_freebp;
+ }
+ }
+
+ /* XXX: Check hdr3.pad32 once we know how to fix it. */
+ break;
+ default:
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out_freebp;
+ }
+
+out:
+ return error;
+out_freebp:
+ xfs_trans_brelse(dargs->trans, blk->bp);
+ blk->bp = NULL;
+out_nobuf:
+ blk->blkno = 0;
+ return error;
+}
+
+/* Visit all nodes and leaves of a da btree. */
+int
+xfs_scrub_da_btree(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_scrub_da_btree_rec_fn scrub_fn,
+ void *private)
+{
+ struct xfs_scrub_da_btree ds = {};
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_da_state_blk *blks;
+ struct xfs_da_node_entry *key;
+ void *rec;
+ xfs_dablk_t blkno;
+ int level;
+ int error;
+
+ /* Skip short format data structures; no btree to scan. */
+ if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE)
+ return 0;
+
+ /* Set up initial da state. */
+ ds.dargs.dp = sc->ip;
+ ds.dargs.whichfork = whichfork;
+ ds.dargs.trans = sc->tp;
+ ds.dargs.op_flags = XFS_DA_OP_OKNOENT;
+ ds.state = xfs_da_state_alloc();
+ ds.state->args = &ds.dargs;
+ ds.state->mp = mp;
+ ds.sc = sc;
+ ds.private = private;
+ if (whichfork == XFS_ATTR_FORK) {
+ ds.dargs.geo = mp->m_attr_geo;
+ ds.lowest = 0;
+ ds.highest = 0;
+ } else {
+ ds.dargs.geo = mp->m_dir_geo;
+ ds.lowest = ds.dargs.geo->leafblk;
+ ds.highest = ds.dargs.geo->freeblk;
+ }
+ blkno = ds.lowest;
+ level = 0;
+
+ /* Find the root of the da tree, if present. */
+ blks = ds.state->path.blk;
+ error = xfs_scrub_da_btree_block(&ds, level, blkno);
+ if (error)
+ goto out_state;
+ /*
+ * We didn't find a block at ds.lowest, which means that there's
+ * no LEAF1/LEAFN tree (at least not where it's supposed to be),
+ * so jump out now.
+ */
+ if (blks[level].bp == NULL)
+ goto out_state;
+
+ blks[level].index = 0;
+ while (level >= 0 && level < XFS_DA_NODE_MAXDEPTH) {
+ /* Handle leaf block. */
+ if (blks[level].magic != XFS_DA_NODE_MAGIC) {
+ /* End of leaf, pop back towards the root. */
+ if (blks[level].index >= ds.maxrecs[level]) {
+ if (level > 0)
+ blks[level - 1].index++;
+ ds.tree_level++;
+ level--;
+ continue;
+ }
+
+ /* Dispatch record scrubbing. */
+ rec = xfs_scrub_da_btree_entry(&ds, level,
+ blks[level].index);
+ error = scrub_fn(&ds, level, rec);
+ if (error)
+ break;
+ if (xfs_scrub_should_terminate(sc, &error) ||
+ (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ break;
+
+ blks[level].index++;
+ continue;
+ }
+
+
+ /* End of node, pop back towards the root. */
+ if (blks[level].index >= ds.maxrecs[level]) {
+ if (level > 0)
+ blks[level - 1].index++;
+ ds.tree_level++;
+ level--;
+ continue;
+ }
+
+ /* Hashes in order for scrub? */
+ key = xfs_scrub_da_btree_entry(&ds, level, blks[level].index);
+ error = xfs_scrub_da_btree_hash(&ds, level, &key->hashval);
+ if (error)
+ goto out;
+
+ /* Drill another level deeper. */
+ blkno = be32_to_cpu(key->before);
+ level++;
+ ds.tree_level--;
+ error = xfs_scrub_da_btree_block(&ds, level, blkno);
+ if (error)
+ goto out;
+ if (blks[level].bp == NULL)
+ goto out;
+
+ blks[level].index = 0;
+ }
+
+out:
+ /* Release all the buffers we're tracking. */
+ for (level = 0; level < XFS_DA_NODE_MAXDEPTH; level++) {
+ if (blks[level].bp == NULL)
+ continue;
+ xfs_trans_brelse(sc->tp, blks[level].bp);
+ blks[level].bp = NULL;
+ }
+
+out_state:
+ xfs_da_state_free(ds.state);
+ return error;
+}
diff --git a/fs/xfs/scrub/dabtree.h b/fs/xfs/scrub/dabtree.h
new file mode 100644
index 000000000000..d31468d68cef
--- /dev/null
+++ b/fs/xfs/scrub/dabtree.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_SCRUB_DABTREE_H__
+#define __XFS_SCRUB_DABTREE_H__
+
+/* dir/attr btree */
+
+struct xfs_scrub_da_btree {
+ struct xfs_da_args dargs;
+ xfs_dahash_t hashes[XFS_DA_NODE_MAXDEPTH];
+ int maxrecs[XFS_DA_NODE_MAXDEPTH];
+ struct xfs_da_state *state;
+ struct xfs_scrub_context *sc;
+ void *private;
+
+ /*
+ * Lowest and highest directory block address in which we expect
+ * to find dir/attr btree node blocks. For a directory this
+ * (presumably) means between LEAF_OFFSET and FREE_OFFSET; for
+ * attributes there is no limit.
+ */
+ xfs_dablk_t lowest;
+ xfs_dablk_t highest;
+
+ int tree_level;
+};
+
+typedef int (*xfs_scrub_da_btree_rec_fn)(struct xfs_scrub_da_btree *ds,
+ int level, void *rec);
+
+/* Check for da btree operation errors. */
+bool xfs_scrub_da_process_error(struct xfs_scrub_da_btree *ds, int level, int *error);
+
+/* Check for da btree corruption. */
+void xfs_scrub_da_set_corrupt(struct xfs_scrub_da_btree *ds, int level);
+
+int xfs_scrub_da_btree_hash(struct xfs_scrub_da_btree *ds, int level,
+ __be32 *hashp);
+int xfs_scrub_da_btree(struct xfs_scrub_context *sc, int whichfork,
+ xfs_scrub_da_btree_rec_fn scrub_fn, void *private);
+
+#endif /* __XFS_SCRUB_DABTREE_H__ */
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
new file mode 100644
index 000000000000..69e1efdd4019
--- /dev/null
+++ b/fs/xfs/scrub/dir.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_itable.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
+#include "xfs_ialloc.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+#include "scrub/dabtree.h"
+
+/* Set us up to scrub directories. */
+int
+xfs_scrub_setup_directory(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ return xfs_scrub_setup_inode_contents(sc, ip, 0);
+}
+
+/* Directories */
+
+/* Scrub a directory entry. */
+
+struct xfs_scrub_dir_ctx {
+ /* VFS fill-directory iterator */
+ struct dir_context dir_iter;
+
+ struct xfs_scrub_context *sc;
+};
+
+/* Check that an inode's mode matches a given DT_ type. */
+STATIC int
+xfs_scrub_dir_check_ftype(
+ struct xfs_scrub_dir_ctx *sdc,
+ xfs_fileoff_t offset,
+ xfs_ino_t inum,
+ int dtype)
+{
+ struct xfs_mount *mp = sdc->sc->mp;
+ struct xfs_inode *ip;
+ int ino_dtype;
+ int error = 0;
+
+ if (!xfs_sb_version_hasftype(&mp->m_sb)) {
+ if (dtype != DT_UNKNOWN && dtype != DT_DIR)
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ offset);
+ goto out;
+ }
+
+ /*
+ * Grab the inode pointed to by the dirent. We release the
+ * inode before we cancel the scrub transaction. Since we're
+ * don't know a priori that releasing the inode won't trigger
+ * eofblocks cleanup (which allocates what would be a nested
+ * transaction), we can't use DONTCACHE here because DONTCACHE
+ * inodes can trigger immediate inactive cleanup of the inode.
+ */
+ error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
+ if (!xfs_scrub_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ &error))
+ goto out;
+
+ /* Convert mode to the DT_* values that dir_emit uses. */
+ ino_dtype = xfs_dir3_get_dtype(mp,
+ xfs_mode_to_ftype(VFS_I(ip)->i_mode));
+ if (ino_dtype != dtype)
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ iput(VFS_I(ip));
+out:
+ return error;
+}
+
+/*
+ * Scrub a single directory entry.
+ *
+ * We use the VFS directory iterator (i.e. readdir) to call this
+ * function for every directory entry in a directory. Once we're here,
+ * we check the inode number to make sure it's sane, then we check that
+ * we can look up this filename. Finally, we check the ftype.
+ */
+STATIC int
+xfs_scrub_dir_actor(
+ struct dir_context *dir_iter,
+ const char *name,
+ int namelen,
+ loff_t pos,
+ u64 ino,
+ unsigned type)
+{
+ struct xfs_mount *mp;
+ struct xfs_inode *ip;
+ struct xfs_scrub_dir_ctx *sdc;
+ struct xfs_name xname;
+ xfs_ino_t lookup_ino;
+ xfs_dablk_t offset;
+ int error = 0;
+
+ sdc = container_of(dir_iter, struct xfs_scrub_dir_ctx, dir_iter);
+ ip = sdc->sc->ip;
+ mp = ip->i_mount;
+ offset = xfs_dir2_db_to_da(mp->m_dir_geo,
+ xfs_dir2_dataptr_to_db(mp->m_dir_geo, pos));
+
+ /* Does this inode number make sense? */
+ if (!xfs_verify_dir_ino(mp, ino)) {
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ goto out;
+ }
+
+ if (!strncmp(".", name, namelen)) {
+ /* If this is "." then check that the inum matches the dir. */
+ if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ offset);
+ if (ino != ip->i_ino)
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ offset);
+ } else if (!strncmp("..", name, namelen)) {
+ /*
+ * If this is ".." in the root inode, check that the inum
+ * matches this dir.
+ */
+ if (xfs_sb_version_hasftype(&mp->m_sb) && type != DT_DIR)
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ offset);
+ if (ip->i_ino == mp->m_sb.sb_rootino && ino != ip->i_ino)
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK,
+ offset);
+ }
+
+ /* Verify that we can look up this name by hash. */
+ xname.name = name;
+ xname.len = namelen;
+ xname.type = XFS_DIR3_FT_UNKNOWN;
+
+ error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
+ if (!xfs_scrub_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ &error))
+ goto fail_xref;
+ if (lookup_ino != ino) {
+ xfs_scrub_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
+ goto out;
+ }
+
+ /* Verify the file type. This function absorbs error codes. */
+ error = xfs_scrub_dir_check_ftype(sdc, offset, lookup_ino, type);
+ if (error)
+ goto out;
+out:
+ return error;
+fail_xref:
+ return error;
+}
+
+/* Scrub a directory btree record. */
+STATIC int
+xfs_scrub_dir_rec(
+ struct xfs_scrub_da_btree *ds,
+ int level,
+ void *rec)
+{
+ struct xfs_mount *mp = ds->state->mp;
+ struct xfs_dir2_leaf_entry *ent = rec;
+ struct xfs_inode *dp = ds->dargs.dp;
+ struct xfs_dir2_data_entry *dent;
+ struct xfs_buf *bp;
+ xfs_ino_t ino;
+ xfs_dablk_t rec_bno;
+ xfs_dir2_db_t db;
+ xfs_dir2_data_aoff_t off;
+ xfs_dir2_dataptr_t ptr;
+ xfs_dahash_t calc_hash;
+ xfs_dahash_t hash;
+ unsigned int tag;
+ int error;
+
+ /* Check the hash of the entry. */
+ error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
+ if (error)
+ goto out;
+
+ /* Valid hash pointer? */
+ ptr = be32_to_cpu(ent->address);
+ if (ptr == 0)
+ return 0;
+
+ /* Find the directory entry's location. */
+ db = xfs_dir2_dataptr_to_db(mp->m_dir_geo, ptr);
+ off = xfs_dir2_dataptr_to_off(mp->m_dir_geo, ptr);
+ rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db);
+
+ if (rec_bno >= mp->m_dir_geo->leafblk) {
+ xfs_scrub_da_set_corrupt(ds, level);
+ goto out;
+ }
+ error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp);
+ if (!xfs_scrub_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
+ &error))
+ goto out;
+ if (!bp) {
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out;
+ }
+
+ /* Retrieve the entry, sanity check it, and compare hashes. */
+ dent = (struct xfs_dir2_data_entry *)(((char *)bp->b_addr) + off);
+ ino = be64_to_cpu(dent->inumber);
+ hash = be32_to_cpu(ent->hashval);
+ tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
+ if (!xfs_verify_dir_ino(mp, ino) || tag != off)
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ if (dent->namelen == 0) {
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out_relse;
+ }
+ calc_hash = xfs_da_hashname(dent->name, dent->namelen);
+ if (calc_hash != hash)
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+
+out_relse:
+ xfs_trans_brelse(ds->dargs.trans, bp);
+out:
+ return error;
+}
+
+/*
+ * Is this unused entry either in the bestfree or smaller than all of
+ * them? We've already checked that the bestfrees are sorted longest to
+ * shortest, and that there aren't any bogus entries.
+ */
+STATIC void
+xfs_scrub_directory_check_free_entry(
+ struct xfs_scrub_context *sc,
+ xfs_dablk_t lblk,
+ struct xfs_dir2_data_free *bf,
+ struct xfs_dir2_data_unused *dup)
+{
+ struct xfs_dir2_data_free *dfp;
+ unsigned int dup_length;
+
+ dup_length = be16_to_cpu(dup->length);
+
+ /* Unused entry is shorter than any of the bestfrees */
+ if (dup_length < be16_to_cpu(bf[XFS_DIR2_DATA_FD_COUNT - 1].length))
+ return;
+
+ for (dfp = &bf[XFS_DIR2_DATA_FD_COUNT - 1]; dfp >= bf; dfp--)
+ if (dup_length == be16_to_cpu(dfp->length))
+ return;
+
+ /* Unused entry should be in the bestfrees but wasn't found. */
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+}
+
+/* Check free space info in a directory data block. */
+STATIC int
+xfs_scrub_directory_data_bestfree(
+ struct xfs_scrub_context *sc,
+ xfs_dablk_t lblk,
+ bool is_block)
+{
+ struct xfs_dir2_data_unused *dup;
+ struct xfs_dir2_data_free *dfp;
+ struct xfs_buf *bp;
+ struct xfs_dir2_data_free *bf;
+ struct xfs_mount *mp = sc->mp;
+ const struct xfs_dir_ops *d_ops;
+ char *ptr;
+ char *endptr;
+ u16 tag;
+ unsigned int nr_bestfrees = 0;
+ unsigned int nr_frees = 0;
+ unsigned int smallest_bestfree;
+ int newlen;
+ int offset;
+ int error;
+
+ d_ops = sc->ip->d_ops;
+
+ if (is_block) {
+ /* dir block format */
+ if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
+ } else {
+ /* dir data format */
+ error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp);
+ }
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ goto out;
+
+ /* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
+
+ /* Do the bestfrees correspond to actual free space? */
+ bf = d_ops->data_bestfree_p(bp->b_addr);
+ smallest_bestfree = UINT_MAX;
+ for (dfp = &bf[0]; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) {
+ offset = be16_to_cpu(dfp->offset);
+ if (offset == 0)
+ continue;
+ if (offset >= mp->m_dir_geo->blksize) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out_buf;
+ }
+ dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset);
+ tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
+
+ /* bestfree doesn't match the entry it points at? */
+ if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
+ be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
+ tag != ((char *)dup - (char *)bp->b_addr)) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out_buf;
+ }
+
+ /* bestfree records should be ordered largest to smallest */
+ if (smallest_bestfree < be16_to_cpu(dfp->length)) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out_buf;
+ }
+
+ smallest_bestfree = be16_to_cpu(dfp->length);
+ nr_bestfrees++;
+ }
+
+ /* Make sure the bestfrees are actually the best free spaces. */
+ ptr = (char *)d_ops->data_entry_p(bp->b_addr);
+ if (is_block) {
+ struct xfs_dir2_block_tail *btp;
+
+ btp = xfs_dir2_block_tail_p(mp->m_dir_geo, bp->b_addr);
+ endptr = (char *)xfs_dir2_block_leaf_p(btp);
+ } else
+ endptr = (char *)bp->b_addr + BBTOB(bp->b_length);
+
+ /* Iterate the entries, stopping when we hit or go past the end. */
+ while (ptr < endptr) {
+ dup = (struct xfs_dir2_data_unused *)ptr;
+ /* Skip real entries */
+ if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG)) {
+ struct xfs_dir2_data_entry *dep;
+
+ dep = (struct xfs_dir2_data_entry *)ptr;
+ newlen = d_ops->data_entsize(dep->namelen);
+ if (newlen <= 0) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ lblk);
+ goto out_buf;
+ }
+ ptr += newlen;
+ continue;
+ }
+
+ /* Spot check this free entry */
+ tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
+ if (tag != ((char *)dup - (char *)bp->b_addr))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+
+ /*
+ * Either this entry is a bestfree or it's smaller than
+ * any of the bestfrees.
+ */
+ xfs_scrub_directory_check_free_entry(sc, lblk, bf, dup);
+
+ /* Move on. */
+ newlen = be16_to_cpu(dup->length);
+ if (newlen <= 0) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out_buf;
+ }
+ ptr += newlen;
+ if (ptr <= endptr)
+ nr_frees++;
+ }
+
+ /* We're required to fill all the space. */
+ if (ptr != endptr)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+
+ /* Did we see at least as many free slots as there are bestfrees? */
+ if (nr_frees < nr_bestfrees)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+out_buf:
+ xfs_trans_brelse(sc->tp, bp);
+out:
+ return error;
+}
+
+/*
+ * Does the free space length in the free space index block ($len) match
+ * the longest length in the directory data block's bestfree array?
+ * Assume that we've already checked that the data block's bestfree
+ * array is in order.
+ */
+STATIC void
+xfs_scrub_directory_check_freesp(
+ struct xfs_scrub_context *sc,
+ xfs_dablk_t lblk,
+ struct xfs_buf *dbp,
+ unsigned int len)
+{
+ struct xfs_dir2_data_free *dfp;
+
+ dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr);
+
+ if (len != be16_to_cpu(dfp->length))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+
+ if (len > 0 && be16_to_cpu(dfp->offset) == 0)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+}
+
+/* Check free space info in a directory leaf1 block. */
+STATIC int
+xfs_scrub_directory_leaf1_bestfree(
+ struct xfs_scrub_context *sc,
+ struct xfs_da_args *args,
+ xfs_dablk_t lblk)
+{
+ struct xfs_dir3_icleaf_hdr leafhdr;
+ struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir2_leaf_tail *ltp;
+ struct xfs_dir2_leaf *leaf;
+ struct xfs_buf *dbp;
+ struct xfs_buf *bp;
+ const struct xfs_dir_ops *d_ops = sc->ip->d_ops;
+ struct xfs_da_geometry *geo = sc->mp->m_dir_geo;
+ __be16 *bestp;
+ __u16 best;
+ __u32 hash;
+ __u32 lasthash = 0;
+ __u32 bestcount;
+ unsigned int stale = 0;
+ int i;
+ int error;
+
+ /* Read the free space block. */
+ error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ goto out;
+
+ leaf = bp->b_addr;
+ d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ ents = d_ops->leaf_ents_p(leaf);
+ ltp = xfs_dir2_leaf_tail_p(geo, leaf);
+ bestcount = be32_to_cpu(ltp->bestcount);
+ bestp = xfs_dir2_leaf_bests_p(ltp);
+
+ if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
+ struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
+
+ if (hdr3->pad != cpu_to_be32(0))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ }
+
+ /*
+ * There should be as many bestfree slots as there are dir data
+ * blocks that can fit under i_size.
+ */
+ if (bestcount != xfs_dir2_byte_to_db(geo, sc->ip->i_d.di_size)) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out;
+ }
+
+ /* Is the leaf count even remotely sane? */
+ if (leafhdr.count > d_ops->leaf_max_ents(geo)) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out;
+ }
+
+ /* Leaves and bests don't overlap in leaf format. */
+ if ((char *)&ents[leafhdr.count] > (char *)bestp) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out;
+ }
+
+ /* Check hash value order, count stale entries. */
+ for (i = 0; i < leafhdr.count; i++) {
+ hash = be32_to_cpu(ents[i].hashval);
+ if (i > 0 && lasthash > hash)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ lasthash = hash;
+ if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ stale++;
+ }
+ if (leafhdr.stale != stale)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+
+ /* Check all the bestfree entries. */
+ for (i = 0; i < bestcount; i++, bestp++) {
+ best = be16_to_cpu(*bestp);
+ if (best == NULLDATAOFF)
+ continue;
+ error = xfs_dir3_data_read(sc->tp, sc->ip,
+ i * args->geo->fsbcount, -1, &dbp);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk,
+ &error))
+ continue;
+ xfs_scrub_directory_check_freesp(sc, lblk, dbp, best);
+ xfs_trans_brelse(sc->tp, dbp);
+ }
+out:
+ return error;
+}
+
+/* Check free space info in a directory freespace block. */
+STATIC int
+xfs_scrub_directory_free_bestfree(
+ struct xfs_scrub_context *sc,
+ struct xfs_da_args *args,
+ xfs_dablk_t lblk)
+{
+ struct xfs_dir3_icfree_hdr freehdr;
+ struct xfs_buf *dbp;
+ struct xfs_buf *bp;
+ __be16 *bestp;
+ __u16 best;
+ unsigned int stale = 0;
+ int i;
+ int error;
+
+ /* Read the free space block */
+ error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ goto out;
+
+ if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
+ struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
+
+ if (hdr3->pad != cpu_to_be32(0))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ }
+
+ /* Check all the entries. */
+ sc->ip->d_ops->free_hdr_from_disk(&freehdr, bp->b_addr);
+ bestp = sc->ip->d_ops->free_bests_p(bp->b_addr);
+ for (i = 0; i < freehdr.nvalid; i++, bestp++) {
+ best = be16_to_cpu(*bestp);
+ if (best == NULLDATAOFF) {
+ stale++;
+ continue;
+ }
+ error = xfs_dir3_data_read(sc->tp, sc->ip,
+ (freehdr.firstdb + i) * args->geo->fsbcount,
+ -1, &dbp);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk,
+ &error))
+ continue;
+ xfs_scrub_directory_check_freesp(sc, lblk, dbp, best);
+ xfs_trans_brelse(sc->tp, dbp);
+ }
+
+ if (freehdr.nused + stale != freehdr.nvalid)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+out:
+ return error;
+}
+
+/* Check free space information in directories. */
+STATIC int
+xfs_scrub_directory_blocks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_bmbt_irec got;
+ struct xfs_da_args args;
+ struct xfs_ifork *ifp;
+ struct xfs_mount *mp = sc->mp;
+ xfs_fileoff_t leaf_lblk;
+ xfs_fileoff_t free_lblk;
+ xfs_fileoff_t lblk;
+ struct xfs_iext_cursor icur;
+ xfs_dablk_t dabno;
+ bool found;
+ int is_block = 0;
+ int error;
+
+ /* Ignore local format directories. */
+ if (sc->ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
+ sc->ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
+ return 0;
+
+ ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
+ lblk = XFS_B_TO_FSB(mp, XFS_DIR2_DATA_OFFSET);
+ leaf_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_LEAF_OFFSET);
+ free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET);
+
+ /* Is this a block dir? */
+ args.dp = sc->ip;
+ args.geo = mp->m_dir_geo;
+ args.trans = sc->tp;
+ error = xfs_dir2_isblock(&args, &is_block);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
+ goto out;
+
+ /* Iterate all the data extents in the directory... */
+ found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
+ while (found) {
+ /* Block directories only have a single block at offset 0. */
+ if (is_block &&
+ (got.br_startoff > 0 ||
+ got.br_blockcount != args.geo->fsbcount)) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ got.br_startoff);
+ break;
+ }
+
+ /* No more data blocks... */
+ if (got.br_startoff >= leaf_lblk)
+ break;
+
+ /*
+ * Check each data block's bestfree data.
+ *
+ * Iterate all the fsbcount-aligned block offsets in
+ * this directory. The directory block reading code is
+ * smart enough to do its own bmap lookups to handle
+ * discontiguous directory blocks. When we're done
+ * with the extent record, re-query the bmap at the
+ * next fsbcount-aligned offset to avoid redundant
+ * block checks.
+ */
+ for (lblk = roundup((xfs_dablk_t)got.br_startoff,
+ args.geo->fsbcount);
+ lblk < got.br_startoff + got.br_blockcount;
+ lblk += args.geo->fsbcount) {
+ error = xfs_scrub_directory_data_bestfree(sc, lblk,
+ is_block);
+ if (error)
+ goto out;
+ }
+ dabno = got.br_startoff + got.br_blockcount;
+ lblk = roundup(dabno, args.geo->fsbcount);
+ found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
+ }
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ /* Look for a leaf1 block, which has free info. */
+ if (xfs_iext_lookup_extent(sc->ip, ifp, leaf_lblk, &icur, &got) &&
+ got.br_startoff == leaf_lblk &&
+ got.br_blockcount == args.geo->fsbcount &&
+ !xfs_iext_next_extent(ifp, &icur, &got)) {
+ if (is_block) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out;
+ }
+ error = xfs_scrub_directory_leaf1_bestfree(sc, &args,
+ leaf_lblk);
+ if (error)
+ goto out;
+ }
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ /* Scan for free blocks */
+ lblk = free_lblk;
+ found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
+ while (found) {
+ /*
+ * Dirs can't have blocks mapped above 2^32.
+ * Single-block dirs shouldn't even be here.
+ */
+ lblk = got.br_startoff;
+ if (lblk & ~0xFFFFFFFFULL) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out;
+ }
+ if (is_block) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
+ goto out;
+ }
+
+ /*
+ * Check each dir free block's bestfree data.
+ *
+ * Iterate all the fsbcount-aligned block offsets in
+ * this directory. The directory block reading code is
+ * smart enough to do its own bmap lookups to handle
+ * discontiguous directory blocks. When we're done
+ * with the extent record, re-query the bmap at the
+ * next fsbcount-aligned offset to avoid redundant
+ * block checks.
+ */
+ for (lblk = roundup((xfs_dablk_t)got.br_startoff,
+ args.geo->fsbcount);
+ lblk < got.br_startoff + got.br_blockcount;
+ lblk += args.geo->fsbcount) {
+ error = xfs_scrub_directory_free_bestfree(sc, &args,
+ lblk);
+ if (error)
+ goto out;
+ }
+ dabno = got.br_startoff + got.br_blockcount;
+ lblk = roundup(dabno, args.geo->fsbcount);
+ found = xfs_iext_lookup_extent(sc->ip, ifp, lblk, &icur, &got);
+ }
+out:
+ return error;
+}
+
+/* Scrub a whole directory. */
+int
+xfs_scrub_directory(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_scrub_dir_ctx sdc = {
+ .dir_iter.actor = xfs_scrub_dir_actor,
+ .dir_iter.pos = 0,
+ .sc = sc,
+ };
+ size_t bufsize;
+ loff_t oldpos;
+ int error = 0;
+
+ if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
+ return -ENOENT;
+
+ /* Plausible size? */
+ if (sc->ip->i_d.di_size < xfs_dir2_sf_hdr_size(0)) {
+ xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino, NULL);
+ goto out;
+ }
+
+ /* Check directory tree structure */
+ error = xfs_scrub_da_btree(sc, XFS_DATA_FORK, xfs_scrub_dir_rec, NULL);
+ if (error)
+ return error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return error;
+
+ /* Check the freespace. */
+ error = xfs_scrub_directory_blocks(sc);
+ if (error)
+ return error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return error;
+
+ /*
+ * Check that every dirent we see can also be looked up by hash.
+ * Userspace usually asks for a 32k buffer, so we will too.
+ */
+ bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE,
+ sc->ip->i_d.di_size);
+
+ /*
+ * Look up every name in this directory by hash.
+ *
+ * Use the xfs_readdir function to call xfs_scrub_dir_actor on
+ * every directory entry in this directory. In _actor, we check
+ * the name, inode number, and ftype (if applicable) of the
+ * entry. xfs_readdir uses the VFS filldir functions to provide
+ * iteration context.
+ *
+ * The VFS grabs a read or write lock via i_rwsem before it reads
+ * or writes to a directory. If we've gotten this far we've
+ * already obtained IOLOCK_EXCL, which (since 4.10) is the same as
+ * getting a write lock on i_rwsem. Therefore, it is safe for us
+ * to drop the ILOCK here in order to reuse the _readdir and
+ * _dir_lookup routines, which do their own ILOCK locking.
+ */
+ oldpos = 0;
+ sc->ilock_flags &= ~XFS_ILOCK_EXCL;
+ xfs_iunlock(sc->ip, XFS_ILOCK_EXCL);
+ while (true) {
+ error = xfs_readdir(sc->tp, sc->ip, &sdc.dir_iter, bufsize);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ &error))
+ goto out;
+ if (oldpos == sdc.dir_iter.pos)
+ break;
+ oldpos = sdc.dir_iter.pos;
+ }
+
+out:
+ return error;
+}
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
new file mode 100644
index 000000000000..496d6f2fbb9e
--- /dev/null
+++ b/fs/xfs/scrub/ialloc.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_icache.h"
+#include "xfs_rmap.h"
+#include "xfs_log.h"
+#include "xfs_trans_priv.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+
+/*
+ * Set us up to scrub inode btrees.
+ * If we detect a discrepancy between the inobt and the inode,
+ * try again after forcing logged inode cores out to disk.
+ */
+int
+xfs_scrub_setup_ag_iallocbt(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ return xfs_scrub_setup_ag_btree(sc, ip, sc->try_harder);
+}
+
+/* Inode btree scrubber. */
+
+/* Is this chunk worth checking? */
+STATIC bool
+xfs_scrub_iallocbt_chunk(
+ struct xfs_scrub_btree *bs,
+ struct xfs_inobt_rec_incore *irec,
+ xfs_agino_t agino,
+ xfs_extlen_t len)
+{
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agblock_t bno;
+
+ bno = XFS_AGINO_TO_AGBNO(mp, agino);
+ if (bno + len <= bno ||
+ !xfs_verify_agbno(mp, agno, bno) ||
+ !xfs_verify_agbno(mp, agno, bno + len - 1))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ return true;
+}
+
+/* Count the number of free inodes. */
+static unsigned int
+xfs_scrub_iallocbt_freecount(
+ xfs_inofree_t freemask)
+{
+ BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
+ return hweight64(freemask);
+}
+
+/* Check a particular inode with ir_free. */
+STATIC int
+xfs_scrub_iallocbt_check_cluster_freemask(
+ struct xfs_scrub_btree *bs,
+ xfs_ino_t fsino,
+ xfs_agino_t chunkino,
+ xfs_agino_t clusterino,
+ struct xfs_inobt_rec_incore *irec,
+ struct xfs_buf *bp)
+{
+ struct xfs_dinode *dip;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ bool inode_is_free = false;
+ bool freemask_ok;
+ bool inuse;
+ int error = 0;
+
+ if (xfs_scrub_should_terminate(bs->sc, &error))
+ return error;
+
+ dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize);
+ if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
+ (dip->di_version >= 3 &&
+ be64_to_cpu(dip->di_ino) != fsino + clusterino)) {
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ goto out;
+ }
+
+ if (irec->ir_free & XFS_INOBT_MASK(chunkino + clusterino))
+ inode_is_free = true;
+ error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp,
+ fsino + clusterino, &inuse);
+ if (error == -ENODATA) {
+ /* Not cached, just read the disk buffer */
+ freemask_ok = inode_is_free ^ !!(dip->di_mode);
+ if (!bs->sc->try_harder && !freemask_ok)
+ return -EDEADLOCK;
+ } else if (error < 0) {
+ /*
+ * Inode is only half assembled, or there was an IO error,
+ * or the verifier failed, so don't bother trying to check.
+ * The inode scrubber can deal with this.
+ */
+ goto out;
+ } else {
+ /* Inode is all there. */
+ freemask_ok = inode_is_free ^ inuse;
+ }
+ if (!freemask_ok)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+out:
+ return 0;
+}
+
+/* Make sure the free mask is consistent with what the inodes think. */
+STATIC int
+xfs_scrub_iallocbt_check_freemask(
+ struct xfs_scrub_btree *bs,
+ struct xfs_inobt_rec_incore *irec)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_imap imap;
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ struct xfs_dinode *dip;
+ struct xfs_buf *bp;
+ xfs_ino_t fsino;
+ xfs_agino_t nr_inodes;
+ xfs_agino_t agino;
+ xfs_agino_t chunkino;
+ xfs_agino_t clusterino;
+ xfs_agblock_t agbno;
+ int blks_per_cluster;
+ uint16_t holemask;
+ uint16_t ir_holemask;
+ int error = 0;
+
+ /* Make sure the freemask matches the inode records. */
+ blks_per_cluster = xfs_icluster_size_fsb(mp);
+ nr_inodes = XFS_OFFBNO_TO_AGINO(mp, blks_per_cluster, 0);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+
+ for (agino = irec->ir_startino;
+ agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
+ agino += blks_per_cluster * mp->m_sb.sb_inopblock) {
+ fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
+ chunkino = agino - irec->ir_startino;
+ agbno = XFS_AGINO_TO_AGBNO(mp, agino);
+
+ /* Compute the holemask mask for this cluster. */
+ for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
+ clusterino += XFS_INODES_PER_HOLEMASK_BIT)
+ holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
+ XFS_INODES_PER_HOLEMASK_BIT);
+
+ /* The whole cluster must be a hole or not a hole. */
+ ir_holemask = (irec->ir_holemask & holemask);
+ if (ir_holemask != holemask && ir_holemask != 0) {
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ continue;
+ }
+
+ /* If any part of this is a hole, skip it. */
+ if (ir_holemask)
+ continue;
+
+ /* Grab the inode cluster buffer. */
+ imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
+ agbno);
+ imap.im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
+ imap.im_boffset = 0;
+
+ error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
+ &dip, &bp, 0, 0);
+ if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, 0, &error))
+ continue;
+
+ /* Which inodes are free? */
+ for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
+ error = xfs_scrub_iallocbt_check_cluster_freemask(bs,
+ fsino, chunkino, clusterino, irec, bp);
+ if (error) {
+ xfs_trans_brelse(bs->cur->bc_tp, bp);
+ return error;
+ }
+ }
+
+ xfs_trans_brelse(bs->cur->bc_tp, bp);
+ }
+
+ return error;
+}
+
+/* Scrub an inobt/finobt record. */
+STATIC int
+xfs_scrub_iallocbt_rec(
+ struct xfs_scrub_btree *bs,
+ union xfs_btree_rec *rec)
+{
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ struct xfs_inobt_rec_incore irec;
+ uint64_t holes;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agino_t agino;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ int holecount;
+ int i;
+ int error = 0;
+ unsigned int real_freecount;
+ uint16_t holemask;
+
+ xfs_inobt_btrec_to_irec(mp, rec, &irec);
+
+ if (irec.ir_count > XFS_INODES_PER_CHUNK ||
+ irec.ir_freecount > XFS_INODES_PER_CHUNK)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ real_freecount = irec.ir_freecount +
+ (XFS_INODES_PER_CHUNK - irec.ir_count);
+ if (real_freecount != xfs_scrub_iallocbt_freecount(irec.ir_free))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ agino = irec.ir_startino;
+ /* Record has to be properly aligned within the AG. */
+ if (!xfs_verify_agino(mp, agno, agino) ||
+ !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ goto out;
+ }
+
+ /* Make sure this record is aligned to cluster and inoalignmnt size. */
+ agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
+ if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
+ (agbno & (xfs_icluster_size_fsb(mp) - 1)))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ /* Handle non-sparse inodes */
+ if (!xfs_inobt_issparse(irec.ir_holemask)) {
+ len = XFS_B_TO_FSB(mp,
+ XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
+ if (irec.ir_count != XFS_INODES_PER_CHUNK)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len))
+ goto out;
+ goto check_freemask;
+ }
+
+ /* Check each chunk of a sparse inode cluster. */
+ holemask = irec.ir_holemask;
+ holecount = 0;
+ len = XFS_B_TO_FSB(mp,
+ XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
+ holes = ~xfs_inobt_irec_to_allocmask(&irec);
+ if ((holes & irec.ir_free) != holes ||
+ irec.ir_freecount > irec.ir_count)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
+ if (holemask & 1)
+ holecount += XFS_INODES_PER_HOLEMASK_BIT;
+ else if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len))
+ break;
+ holemask >>= 1;
+ agino += XFS_INODES_PER_HOLEMASK_BIT;
+ }
+
+ if (holecount > XFS_INODES_PER_CHUNK ||
+ holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+check_freemask:
+ error = xfs_scrub_iallocbt_check_freemask(bs, &irec);
+ if (error)
+ goto out;
+
+out:
+ return error;
+}
+
+/* Scrub the inode btrees for some AG. */
+STATIC int
+xfs_scrub_iallocbt(
+ struct xfs_scrub_context *sc,
+ xfs_btnum_t which)
+{
+ struct xfs_btree_cur *cur;
+ struct xfs_owner_info oinfo;
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
+ cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
+ return xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo, NULL);
+}
+
+int
+xfs_scrub_inobt(
+ struct xfs_scrub_context *sc)
+{
+ return xfs_scrub_iallocbt(sc, XFS_BTNUM_INO);
+}
+
+int
+xfs_scrub_finobt(
+ struct xfs_scrub_context *sc)
+{
+ return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO);
+}
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
new file mode 100644
index 000000000000..637b7a892313
--- /dev/null
+++ b/fs/xfs/scrub/inode.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_inode_buf.h"
+#include "xfs_inode_fork.h"
+#include "xfs_ialloc.h"
+#include "xfs_da_format.h"
+#include "xfs_reflink.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+
+/*
+ * Grab total control of the inode metadata. It doesn't matter here if
+ * the file data is still changing; exclusive access to the metadata is
+ * the goal.
+ */
+int
+xfs_scrub_setup_inode(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error;
+
+ /*
+ * Try to get the inode. If the verifiers fail, we try again
+ * in raw mode.
+ */
+ error = xfs_scrub_get_inode(sc, ip);
+ switch (error) {
+ case 0:
+ break;
+ case -EFSCORRUPTED:
+ case -EFSBADCRC:
+ return 0;
+ default:
+ return error;
+ }
+
+ /* Got the inode, lock it and we're ready to go. */
+ sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
+ xfs_ilock(sc->ip, sc->ilock_flags);
+ error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
+ if (error)
+ goto out;
+ sc->ilock_flags |= XFS_ILOCK_EXCL;
+ xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
+
+out:
+ /* scrub teardown will unlock and release the inode for us */
+ return error;
+}
+
+/* Inode core */
+
+/*
+ * Validate di_extsize hint.
+ *
+ * The rules are documented at xfs_ioctl_setattr_check_extsize().
+ * These functions must be kept in sync with each other.
+ */
+STATIC void
+xfs_scrub_inode_extsize(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags)
+{
+ struct xfs_mount *mp = sc->mp;
+ bool rt_flag;
+ bool hint_flag;
+ bool inherit_flag;
+ uint32_t extsize;
+ uint32_t extsize_bytes;
+ uint32_t blocksize_bytes;
+
+ rt_flag = (flags & XFS_DIFLAG_REALTIME);
+ hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
+ inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
+ extsize = be32_to_cpu(dip->di_extsize);
+ extsize_bytes = XFS_FSB_TO_B(sc->mp, extsize);
+
+ if (rt_flag)
+ blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
+ else
+ blocksize_bytes = mp->m_sb.sb_blocksize;
+
+ if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
+ goto bad;
+
+ if (hint_flag && !S_ISREG(mode))
+ goto bad;
+
+ if (inherit_flag && !S_ISDIR(mode))
+ goto bad;
+
+ if ((hint_flag || inherit_flag) && extsize == 0)
+ goto bad;
+
+ if (!(hint_flag || inherit_flag) && extsize != 0)
+ goto bad;
+
+ if (extsize_bytes % blocksize_bytes)
+ goto bad;
+
+ if (extsize > MAXEXTLEN)
+ goto bad;
+
+ if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
+ goto bad;
+
+ return;
+bad:
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+}
+
+/*
+ * Validate di_cowextsize hint.
+ *
+ * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
+ * These functions must be kept in sync with each other.
+ */
+STATIC void
+xfs_scrub_inode_cowextsize(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags,
+ uint64_t flags2)
+{
+ struct xfs_mount *mp = sc->mp;
+ bool rt_flag;
+ bool hint_flag;
+ uint32_t extsize;
+ uint32_t extsize_bytes;
+
+ rt_flag = (flags & XFS_DIFLAG_REALTIME);
+ hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
+ extsize = be32_to_cpu(dip->di_cowextsize);
+ extsize_bytes = XFS_FSB_TO_B(sc->mp, extsize);
+
+ if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
+ goto bad;
+
+ if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
+ goto bad;
+
+ if (hint_flag && extsize == 0)
+ goto bad;
+
+ if (!hint_flag && extsize != 0)
+ goto bad;
+
+ if (hint_flag && rt_flag)
+ goto bad;
+
+ if (extsize_bytes % mp->m_sb.sb_blocksize)
+ goto bad;
+
+ if (extsize > MAXEXTLEN)
+ goto bad;
+
+ if (extsize > mp->m_sb.sb_agblocks / 2)
+ goto bad;
+
+ return;
+bad:
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+}
+
+/* Make sure the di_flags make sense for the inode. */
+STATIC void
+xfs_scrub_inode_flags(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags)
+{
+ struct xfs_mount *mp = sc->mp;
+
+ if (flags & ~XFS_DIFLAG_ANY)
+ goto bad;
+
+ /* rt flags require rt device */
+ if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
+ !mp->m_rtdev_targp)
+ goto bad;
+
+ /* new rt bitmap flag only valid for rbmino */
+ if ((flags & XFS_DIFLAG_NEWRTBM) && ino != mp->m_sb.sb_rbmino)
+ goto bad;
+
+ /* directory-only flags */
+ if ((flags & (XFS_DIFLAG_RTINHERIT |
+ XFS_DIFLAG_EXTSZINHERIT |
+ XFS_DIFLAG_PROJINHERIT |
+ XFS_DIFLAG_NOSYMLINKS)) &&
+ !S_ISDIR(mode))
+ goto bad;
+
+ /* file-only flags */
+ if ((flags & (XFS_DIFLAG_REALTIME | FS_XFLAG_EXTSIZE)) &&
+ !S_ISREG(mode))
+ goto bad;
+
+ /* filestreams and rt make no sense */
+ if ((flags & XFS_DIFLAG_FILESTREAM) && (flags & XFS_DIFLAG_REALTIME))
+ goto bad;
+
+ return;
+bad:
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+}
+
+/* Make sure the di_flags2 make sense for the inode. */
+STATIC void
+xfs_scrub_inode_flags2(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino,
+ uint16_t mode,
+ uint16_t flags,
+ uint64_t flags2)
+{
+ struct xfs_mount *mp = sc->mp;
+
+ if (flags2 & ~XFS_DIFLAG2_ANY)
+ goto bad;
+
+ /* reflink flag requires reflink feature */
+ if ((flags2 & XFS_DIFLAG2_REFLINK) &&
+ !xfs_sb_version_hasreflink(&mp->m_sb))
+ goto bad;
+
+ /* cowextsize flag is checked w.r.t. mode separately */
+
+ /* file/dir-only flags */
+ if ((flags2 & XFS_DIFLAG2_DAX) && !(S_ISREG(mode) || S_ISDIR(mode)))
+ goto bad;
+
+ /* file-only flags */
+ if ((flags2 & XFS_DIFLAG2_REFLINK) && !S_ISREG(mode))
+ goto bad;
+
+ /* realtime and reflink make no sense, currently */
+ if ((flags & XFS_DIFLAG_REALTIME) && (flags2 & XFS_DIFLAG2_REFLINK))
+ goto bad;
+
+ /* dax and reflink make no sense, currently */
+ if ((flags2 & XFS_DIFLAG2_DAX) && (flags2 & XFS_DIFLAG2_REFLINK))
+ goto bad;
+
+ return;
+bad:
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+}
+
+/* Scrub all the ondisk inode fields. */
+STATIC void
+xfs_scrub_dinode(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp,
+ struct xfs_dinode *dip,
+ xfs_ino_t ino)
+{
+ struct xfs_mount *mp = sc->mp;
+ size_t fork_recs;
+ unsigned long long isize;
+ uint64_t flags2;
+ uint32_t nextents;
+ uint16_t flags;
+ uint16_t mode;
+
+ flags = be16_to_cpu(dip->di_flags);
+ if (dip->di_version >= 3)
+ flags2 = be64_to_cpu(dip->di_flags2);
+ else
+ flags2 = 0;
+
+ /* di_mode */
+ mode = be16_to_cpu(dip->di_mode);
+ if (mode & ~(S_IALLUGO | S_IFMT))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ /* v1/v2 fields */
+ switch (dip->di_version) {
+ case 1:
+ /*
+ * We autoconvert v1 inodes into v2 inodes on writeout,
+ * so just mark this inode for preening.
+ */
+ xfs_scrub_ino_set_preen(sc, ino, bp);
+ break;
+ case 2:
+ case 3:
+ if (dip->di_onlink != 0)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ if (dip->di_mode == 0 && sc->ip)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ if (dip->di_projid_hi != 0 &&
+ !xfs_sb_version_hasprojid32bit(&mp->m_sb))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ default:
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ return;
+ }
+
+ /*
+ * di_uid/di_gid -- -1 isn't invalid, but there's no way that
+ * userspace could have created that.
+ */
+ if (dip->di_uid == cpu_to_be32(-1U) ||
+ dip->di_gid == cpu_to_be32(-1U))
+ xfs_scrub_ino_set_warning(sc, ino, bp);
+
+ /* di_format */
+ switch (dip->di_format) {
+ case XFS_DINODE_FMT_DEV:
+ if (!S_ISCHR(mode) && !S_ISBLK(mode) &&
+ !S_ISFIFO(mode) && !S_ISSOCK(mode))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ case XFS_DINODE_FMT_LOCAL:
+ if (!S_ISDIR(mode) && !S_ISLNK(mode))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ case XFS_DINODE_FMT_EXTENTS:
+ if (!S_ISREG(mode) && !S_ISDIR(mode) && !S_ISLNK(mode))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ if (!S_ISREG(mode) && !S_ISDIR(mode))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ case XFS_DINODE_FMT_UUID:
+ default:
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ }
+
+ /*
+ * di_size. xfs_dinode_verify checks for things that screw up
+ * the VFS such as the upper bit being set and zero-length
+ * symlinks/directories, but we can do more here.
+ */
+ isize = be64_to_cpu(dip->di_size);
+ if (isize & (1ULL << 63))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ /* Devices, fifos, and sockets must have zero size */
+ if (!S_ISDIR(mode) && !S_ISREG(mode) && !S_ISLNK(mode) && isize != 0)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ /* Directories can't be larger than the data section size (32G) */
+ if (S_ISDIR(mode) && (isize == 0 || isize >= XFS_DIR2_SPACE_SIZE))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ /* Symlinks can't be larger than SYMLINK_MAXLEN */
+ if (S_ISLNK(mode) && (isize == 0 || isize >= XFS_SYMLINK_MAXLEN))
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ /*
+ * Warn if the running kernel can't handle the kinds of offsets
+ * needed to deal with the file size. In other words, if the
+ * pagecache can't cache all the blocks in this file due to
+ * overly large offsets, flag the inode for admin review.
+ */
+ if (isize >= mp->m_super->s_maxbytes)
+ xfs_scrub_ino_set_warning(sc, ino, bp);
+
+ /* di_nblocks */
+ if (flags2 & XFS_DIFLAG2_REFLINK) {
+ ; /* nblocks can exceed dblocks */
+ } else if (flags & XFS_DIFLAG_REALTIME) {
+ /*
+ * nblocks is the sum of data extents (in the rtdev),
+ * attr extents (in the datadev), and both forks' bmbt
+ * blocks (in the datadev). This clumsy check is the
+ * best we can do without cross-referencing with the
+ * inode forks.
+ */
+ if (be64_to_cpu(dip->di_nblocks) >=
+ mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ } else {
+ if (be64_to_cpu(dip->di_nblocks) >= mp->m_sb.sb_dblocks)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ }
+
+ xfs_scrub_inode_flags(sc, bp, dip, ino, mode, flags);
+
+ xfs_scrub_inode_extsize(sc, bp, dip, ino, mode, flags);
+
+ /* di_nextents */
+ nextents = be32_to_cpu(dip->di_nextents);
+ fork_recs = XFS_DFORK_DSIZE(dip, mp) / sizeof(struct xfs_bmbt_rec);
+ switch (dip->di_format) {
+ case XFS_DINODE_FMT_EXTENTS:
+ if (nextents > fork_recs)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ if (nextents <= fork_recs)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ default:
+ if (nextents != 0)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ }
+
+ /* di_forkoff */
+ if (XFS_DFORK_APTR(dip) >= (char *)dip + mp->m_sb.sb_inodesize)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ if (dip->di_anextents != 0 && dip->di_forkoff == 0)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ if (dip->di_forkoff == 0 && dip->di_aformat != XFS_DINODE_FMT_EXTENTS)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ /* di_aformat */
+ if (dip->di_aformat != XFS_DINODE_FMT_LOCAL &&
+ dip->di_aformat != XFS_DINODE_FMT_EXTENTS &&
+ dip->di_aformat != XFS_DINODE_FMT_BTREE)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
+ /* di_anextents */
+ nextents = be16_to_cpu(dip->di_anextents);
+ fork_recs = XFS_DFORK_ASIZE(dip, mp) / sizeof(struct xfs_bmbt_rec);
+ switch (dip->di_aformat) {
+ case XFS_DINODE_FMT_EXTENTS:
+ if (nextents > fork_recs)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ if (nextents <= fork_recs)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ break;
+ default:
+ if (nextents != 0)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ }
+
+ if (dip->di_version >= 3) {
+ xfs_scrub_inode_flags2(sc, bp, dip, ino, mode, flags, flags2);
+ xfs_scrub_inode_cowextsize(sc, bp, dip, ino, mode, flags,
+ flags2);
+ }
+}
+
+/* Map and read a raw inode. */
+STATIC int
+xfs_scrub_inode_map_raw(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf **bpp,
+ struct xfs_dinode **dipp)
+{
+ struct xfs_imap imap;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *bp = NULL;
+ struct xfs_dinode *dip;
+ int error;
+
+ error = xfs_imap(mp, sc->tp, ino, &imap, XFS_IGET_UNTRUSTED);
+ if (error == -EINVAL) {
+ /*
+ * Inode could have gotten deleted out from under us;
+ * just forget about it.
+ */
+ error = -ENOENT;
+ goto out;
+ }
+ if (!xfs_scrub_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
+ XFS_INO_TO_AGBNO(mp, ino), &error))
+ goto out;
+
+ error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
+ imap.im_blkno, imap.im_len, XBF_UNMAPPED, &bp,
+ NULL);
+ if (!xfs_scrub_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
+ XFS_INO_TO_AGBNO(mp, ino), &error))
+ goto out;
+
+ /*
+ * Is this really an inode? We disabled verifiers in the above
+ * xfs_trans_read_buf call because the inode buffer verifier
+ * fails on /any/ inode record in the inode cluster with a bad
+ * magic or version number, not just the one that we're
+ * checking. Therefore, grab the buffer unconditionally, attach
+ * the inode verifiers by hand, and run the inode verifier only
+ * on the one inode we want.
+ */
+ bp->b_ops = &xfs_inode_buf_ops;
+ dip = xfs_buf_offset(bp, imap.im_boffset);
+ if (!xfs_dinode_verify(mp, ino, dip) ||
+ !xfs_dinode_good_version(mp, dip->di_version)) {
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ goto out_buf;
+ }
+
+ /* ...and is it the one we asked for? */
+ if (be32_to_cpu(dip->di_gen) != sc->sm->sm_gen) {
+ error = -ENOENT;
+ goto out_buf;
+ }
+
+ *dipp = dip;
+ *bpp = bp;
+out:
+ return error;
+out_buf:
+ xfs_trans_brelse(sc->tp, bp);
+ return error;
+}
+
+/* Scrub an inode. */
+int
+xfs_scrub_inode(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_dinode di;
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_buf *bp = NULL;
+ struct xfs_dinode *dip;
+ xfs_ino_t ino;
+
+ bool has_shared;
+ int error = 0;
+
+ /* Did we get the in-core inode, or are we doing this manually? */
+ if (sc->ip) {
+ ino = sc->ip->i_ino;
+ xfs_inode_to_disk(sc->ip, &di, 0);
+ dip = &di;
+ } else {
+ /* Map & read inode. */
+ ino = sc->sm->sm_ino;
+ error = xfs_scrub_inode_map_raw(sc, ino, &bp, &dip);
+ if (error || !bp)
+ goto out;
+ }
+
+ xfs_scrub_dinode(sc, bp, dip, ino);
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ /* Now let's do the things that require a live inode. */
+ if (!sc->ip)
+ goto out;
+
+ /*
+ * Does this inode have the reflink flag set but no shared extents?
+ * Set the preening flag if this is the case.
+ */
+ if (xfs_is_reflink_inode(sc->ip)) {
+ error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
+ &has_shared);
+ if (!xfs_scrub_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
+ XFS_INO_TO_AGBNO(mp, ino), &error))
+ goto out;
+ if (!has_shared)
+ xfs_scrub_ino_set_preen(sc, ino, bp);
+ }
+
+out:
+ if (bp)
+ xfs_trans_brelse(sc->tp, bp);
+ return error;
+}
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
new file mode 100644
index 000000000000..63a25334fc83
--- /dev/null
+++ b/fs/xfs/scrub/parent.c
@@ -0,0 +1,317 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
+#include "xfs_ialloc.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+
+/* Set us up to scrub parents. */
+int
+xfs_scrub_setup_parent(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ return xfs_scrub_setup_inode_contents(sc, ip, 0);
+}
+
+/* Parent pointers */
+
+/* Look for an entry in a parent pointing to this inode. */
+
+struct xfs_scrub_parent_ctx {
+ struct dir_context dc;
+ xfs_ino_t ino;
+ xfs_nlink_t nlink;
+};
+
+/* Look for a single entry in a directory pointing to an inode. */
+STATIC int
+xfs_scrub_parent_actor(
+ struct dir_context *dc,
+ const char *name,
+ int namelen,
+ loff_t pos,
+ u64 ino,
+ unsigned type)
+{
+ struct xfs_scrub_parent_ctx *spc;
+
+ spc = container_of(dc, struct xfs_scrub_parent_ctx, dc);
+ if (spc->ino == ino)
+ spc->nlink++;
+ return 0;
+}
+
+/* Count the number of dentries in the parent dir that point to this inode. */
+STATIC int
+xfs_scrub_parent_count_parent_dentries(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *parent,
+ xfs_nlink_t *nlink)
+{
+ struct xfs_scrub_parent_ctx spc = {
+ .dc.actor = xfs_scrub_parent_actor,
+ .dc.pos = 0,
+ .ino = sc->ip->i_ino,
+ .nlink = 0,
+ };
+ size_t bufsize;
+ loff_t oldpos;
+ uint lock_mode;
+ int error = 0;
+
+ /*
+ * If there are any blocks, read-ahead block 0 as we're almost
+ * certain to have the next operation be a read there. This is
+ * how we guarantee that the parent's extent map has been loaded,
+ * if there is one.
+ */
+ lock_mode = xfs_ilock_data_map_shared(parent);
+ if (parent->i_d.di_nextents > 0)
+ error = xfs_dir3_data_readahead(parent, 0, -1);
+ xfs_iunlock(parent, lock_mode);
+ if (error)
+ return error;
+
+ /*
+ * Iterate the parent dir to confirm that there is
+ * exactly one entry pointing back to the inode being
+ * scanned.
+ */
+ bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE,
+ parent->i_d.di_size);
+ oldpos = 0;
+ while (true) {
+ error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize);
+ if (error)
+ goto out;
+ if (oldpos == spc.dc.pos)
+ break;
+ oldpos = spc.dc.pos;
+ }
+ *nlink = spc.nlink;
+out:
+ return error;
+}
+
+/*
+ * Given the inode number of the alleged parent of the inode being
+ * scrubbed, try to validate that the parent has exactly one directory
+ * entry pointing back to the inode being scrubbed.
+ */
+STATIC int
+xfs_scrub_parent_validate(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t dnum,
+ bool *try_again)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *dp = NULL;
+ xfs_nlink_t expected_nlink;
+ xfs_nlink_t nlink;
+ int error = 0;
+
+ *try_again = false;
+
+ /* '..' must not point to ourselves. */
+ if (sc->ip->i_ino == dnum) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out;
+ }
+
+ /*
+ * If we're an unlinked directory, the parent /won't/ have a link
+ * to us. Otherwise, it should have one link.
+ */
+ expected_nlink = VFS_I(sc->ip)->i_nlink == 0 ? 0 : 1;
+
+ /*
+ * Grab this parent inode. We release the inode before we
+ * cancel the scrub transaction. Since we're don't know a
+ * priori that releasing the inode won't trigger eofblocks
+ * cleanup (which allocates what would be a nested transaction)
+ * if the parent pointer erroneously points to a file, we
+ * can't use DONTCACHE here because DONTCACHE inodes can trigger
+ * immediate inactive cleanup of the inode.
+ */
+ error = xfs_iget(mp, sc->tp, dnum, 0, 0, &dp);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ goto out;
+ if (dp == sc->ip) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out_rele;
+ }
+
+ /*
+ * We prefer to keep the inode locked while we lock and search
+ * its alleged parent for a forward reference. If we can grab
+ * the iolock, validate the pointers and we're done. We must
+ * use nowait here to avoid an ABBA deadlock on the parent and
+ * the child inodes.
+ */
+ if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
+ error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ &error))
+ goto out_unlock;
+ if (nlink != expected_nlink)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out_unlock;
+ }
+
+ /*
+ * The game changes if we get here. We failed to lock the parent,
+ * so we're going to try to verify both pointers while only holding
+ * one lock so as to avoid deadlocking with something that's actually
+ * trying to traverse down the directory tree.
+ */
+ xfs_iunlock(sc->ip, sc->ilock_flags);
+ sc->ilock_flags = 0;
+ xfs_ilock(dp, XFS_IOLOCK_SHARED);
+
+ /* Go looking for our dentry. */
+ error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ goto out_unlock;
+
+ /* Drop the parent lock, relock this inode. */
+ xfs_iunlock(dp, XFS_IOLOCK_SHARED);
+ sc->ilock_flags = XFS_IOLOCK_EXCL;
+ xfs_ilock(sc->ip, sc->ilock_flags);
+
+ /*
+ * If we're an unlinked directory, the parent /won't/ have a link
+ * to us. Otherwise, it should have one link. We have to re-set
+ * it here because we dropped the lock on sc->ip.
+ */
+ expected_nlink = VFS_I(sc->ip)->i_nlink == 0 ? 0 : 1;
+
+ /* Look up '..' to see if the inode changed. */
+ error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ goto out_rele;
+
+ /* Drat, parent changed. Try again! */
+ if (dnum != dp->i_ino) {
+ iput(VFS_I(dp));
+ *try_again = true;
+ return 0;
+ }
+ iput(VFS_I(dp));
+
+ /*
+ * '..' didn't change, so check that there was only one entry
+ * for us in the parent.
+ */
+ if (nlink != expected_nlink)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ return error;
+
+out_unlock:
+ xfs_iunlock(dp, XFS_IOLOCK_SHARED);
+out_rele:
+ iput(VFS_I(dp));
+out:
+ return error;
+}
+
+/* Scrub a parent pointer. */
+int
+xfs_scrub_parent(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_mount *mp = sc->mp;
+ xfs_ino_t dnum;
+ bool try_again;
+ int tries = 0;
+ int error = 0;
+
+ /*
+ * If we're a directory, check that the '..' link points up to
+ * a directory that has one entry pointing to us.
+ */
+ if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
+ return -ENOENT;
+
+ /* We're not a special inode, are we? */
+ if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out;
+ }
+
+ /*
+ * The VFS grabs a read or write lock via i_rwsem before it reads
+ * or writes to a directory. If we've gotten this far we've
+ * already obtained IOLOCK_EXCL, which (since 4.10) is the same as
+ * getting a write lock on i_rwsem. Therefore, it is safe for us
+ * to drop the ILOCK here in order to do directory lookups.
+ */
+ sc->ilock_flags &= ~(XFS_ILOCK_EXCL | XFS_MMAPLOCK_EXCL);
+ xfs_iunlock(sc->ip, XFS_ILOCK_EXCL | XFS_MMAPLOCK_EXCL);
+
+ /* Look up '..' */
+ error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ goto out;
+ if (!xfs_verify_dir_ino(mp, dnum)) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out;
+ }
+
+ /* Is this the root dir? Then '..' must point to itself. */
+ if (sc->ip == mp->m_rootip) {
+ if (sc->ip->i_ino != mp->m_sb.sb_rootino ||
+ sc->ip->i_ino != dnum)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out;
+ }
+
+ do {
+ error = xfs_scrub_parent_validate(sc, dnum, &try_again);
+ if (error)
+ goto out;
+ } while (try_again && ++tries < 20);
+
+ /*
+ * We gave it our best shot but failed, so mark this scrub
+ * incomplete. Userspace can decide if it wants to try again.
+ */
+ if (try_again && tries == 20)
+ xfs_scrub_set_incomplete(sc);
+out:
+ return error;
+}
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
new file mode 100644
index 000000000000..8e58ba842946
--- /dev/null
+++ b/fs/xfs/scrub/quota.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_inode_fork.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_quota.h"
+#include "xfs_qm.h"
+#include "xfs_dquot.h"
+#include "xfs_dquot_item.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+
+/* Convert a scrub type code to a DQ flag, or return 0 if error. */
+static inline uint
+xfs_scrub_quota_to_dqtype(
+ struct xfs_scrub_context *sc)
+{
+ switch (sc->sm->sm_type) {
+ case XFS_SCRUB_TYPE_UQUOTA:
+ return XFS_DQ_USER;
+ case XFS_SCRUB_TYPE_GQUOTA:
+ return XFS_DQ_GROUP;
+ case XFS_SCRUB_TYPE_PQUOTA:
+ return XFS_DQ_PROJ;
+ default:
+ return 0;
+ }
+}
+
+/* Set us up to scrub a quota. */
+int
+xfs_scrub_setup_quota(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ uint dqtype;
+
+ /*
+ * If userspace gave us an AG number or inode data, they don't
+ * know what they're doing. Get out.
+ */
+ if (sc->sm->sm_agno || sc->sm->sm_ino || sc->sm->sm_gen)
+ return -EINVAL;
+
+ dqtype = xfs_scrub_quota_to_dqtype(sc);
+ if (dqtype == 0)
+ return -EINVAL;
+ if (!xfs_this_quota_on(sc->mp, dqtype))
+ return -ENOENT;
+ return 0;
+}
+
+/* Quotas. */
+
+/* Scrub the fields in an individual quota item. */
+STATIC void
+xfs_scrub_quota_item(
+ struct xfs_scrub_context *sc,
+ uint dqtype,
+ struct xfs_dquot *dq,
+ xfs_dqid_t id)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_disk_dquot *d = &dq->q_core;
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ xfs_fileoff_t offset;
+ unsigned long long bsoft;
+ unsigned long long isoft;
+ unsigned long long rsoft;
+ unsigned long long bhard;
+ unsigned long long ihard;
+ unsigned long long rhard;
+ unsigned long long bcount;
+ unsigned long long icount;
+ unsigned long long rcount;
+ xfs_ino_t fs_icount;
+
+ offset = id * qi->qi_dqperchunk;
+
+ /*
+ * We fed $id and DQNEXT into the xfs_qm_dqget call, which means
+ * that the actual dquot we got must either have the same id or
+ * the next higher id.
+ */
+ if (id > be32_to_cpu(d->d_id))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ /* Did we get the dquot type we wanted? */
+ if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ /* Check the limits. */
+ bhard = be64_to_cpu(d->d_blk_hardlimit);
+ ihard = be64_to_cpu(d->d_ino_hardlimit);
+ rhard = be64_to_cpu(d->d_rtb_hardlimit);
+
+ bsoft = be64_to_cpu(d->d_blk_softlimit);
+ isoft = be64_to_cpu(d->d_ino_softlimit);
+ rsoft = be64_to_cpu(d->d_rtb_softlimit);
+
+ /*
+ * Warn if the hard limits are larger than the fs.
+ * Administrators can do this, though in production this seems
+ * suspect, which is why we flag it for review.
+ *
+ * Complain about corruption if the soft limit is greater than
+ * the hard limit.
+ */
+ if (bhard > mp->m_sb.sb_dblocks)
+ xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ if (bsoft > bhard)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ if (ihard > mp->m_maxicount)
+ xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ if (isoft > ihard)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ if (rhard > mp->m_sb.sb_rblocks)
+ xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ if (rsoft > rhard)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ /* Check the resource counts. */
+ bcount = be64_to_cpu(d->d_bcount);
+ icount = be64_to_cpu(d->d_icount);
+ rcount = be64_to_cpu(d->d_rtbcount);
+ fs_icount = percpu_counter_sum(&mp->m_icount);
+
+ /*
+ * Check that usage doesn't exceed physical limits. However, on
+ * a reflink filesystem we're allowed to exceed physical space
+ * if there are no quota limits.
+ */
+ if (xfs_sb_version_hasreflink(&mp->m_sb)) {
+ if (mp->m_sb.sb_dblocks < bcount)
+ xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK,
+ offset);
+ } else {
+ if (mp->m_sb.sb_dblocks < bcount)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ offset);
+ }
+ if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
+
+ /*
+ * We can violate the hard limits if the admin suddenly sets a
+ * lower limit than the actual usage. However, we flag it for
+ * admin review.
+ */
+ if (id != 0 && bhard != 0 && bcount > bhard)
+ xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ if (id != 0 && ihard != 0 && icount > ihard)
+ xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ if (id != 0 && rhard != 0 && rcount > rhard)
+ xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+}
+
+/* Scrub all of a quota type's items. */
+int
+xfs_scrub_quota(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_bmbt_irec irec = { 0 };
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_inode *ip;
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ struct xfs_dquot *dq;
+ xfs_fileoff_t max_dqid_off;
+ xfs_fileoff_t off = 0;
+ xfs_dqid_t id = 0;
+ uint dqtype;
+ int nimaps;
+ int error;
+
+ if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
+ return -ENOENT;
+
+ mutex_lock(&qi->qi_quotaofflock);
+ dqtype = xfs_scrub_quota_to_dqtype(sc);
+ if (!xfs_this_quota_on(sc->mp, dqtype)) {
+ error = -ENOENT;
+ goto out_unlock_quota;
+ }
+
+ /* Attach to the quota inode and set sc->ip so that reporting works. */
+ ip = xfs_quota_inode(sc->mp, dqtype);
+ sc->ip = ip;
+
+ /* Look for problem extents. */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
+ xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino, NULL);
+ goto out_unlock_inode;
+ }
+ max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
+ while (1) {
+ if (xfs_scrub_should_terminate(sc, &error))
+ break;
+
+ off = irec.br_startoff + irec.br_blockcount;
+ nimaps = 1;
+ error = xfs_bmapi_read(ip, off, -1, &irec, &nimaps,
+ XFS_BMAPI_ENTIRE);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, off,
+ &error))
+ goto out_unlock_inode;
+ if (!nimaps)
+ break;
+ if (irec.br_startblock == HOLESTARTBLOCK)
+ continue;
+
+ /* Check the extent record doesn't point to crap. */
+ if (irec.br_startblock + irec.br_blockcount <=
+ irec.br_startblock)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ irec.br_startoff);
+ if (!xfs_verify_fsbno(mp, irec.br_startblock) ||
+ !xfs_verify_fsbno(mp, irec.br_startblock +
+ irec.br_blockcount - 1))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
+ irec.br_startoff);
+
+ /*
+ * Unwritten extents or blocks mapped above the highest
+ * quota id shouldn't happen.
+ */
+ if (isnullstartblock(irec.br_startblock) ||
+ irec.br_startoff > max_dqid_off ||
+ irec.br_startoff + irec.br_blockcount > max_dqid_off + 1)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, off);
+ }
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
+
+ /* Check all the quota items. */
+ while (id < ((xfs_dqid_t)-1ULL)) {
+ if (xfs_scrub_should_terminate(sc, &error))
+ break;
+
+ error = xfs_qm_dqget(mp, NULL, id, dqtype, XFS_QMOPT_DQNEXT,
+ &dq);
+ if (error == -ENOENT)
+ break;
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK,
+ id * qi->qi_dqperchunk, &error))
+ break;
+
+ xfs_scrub_quota_item(sc, dqtype, dq, id);
+
+ id = be32_to_cpu(dq->q_core.d_id) + 1;
+ xfs_qm_dqput(dq);
+ if (!id)
+ break;
+ }
+
+out:
+ /* We set sc->ip earlier, so make sure we clear it now. */
+ sc->ip = NULL;
+out_unlock_quota:
+ mutex_unlock(&qi->qi_quotaofflock);
+ return error;
+
+out_unlock_inode:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ goto out;
+}
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
new file mode 100644
index 000000000000..2f88a8d44bd0
--- /dev/null
+++ b/fs/xfs/scrub/refcount.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_alloc.h"
+#include "xfs_rmap.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+
+/*
+ * Set us up to scrub reference count btrees.
+ */
+int
+xfs_scrub_setup_ag_refcountbt(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ return xfs_scrub_setup_ag_btree(sc, ip, false);
+}
+
+/* Reference count btree scrubber. */
+
+/* Scrub a refcountbt record. */
+STATIC int
+xfs_scrub_refcountbt_rec(
+ struct xfs_scrub_btree *bs,
+ union xfs_btree_rec *rec)
+{
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
+ bool has_cowflag;
+ int error = 0;
+
+ bno = be32_to_cpu(rec->refc.rc_startblock);
+ len = be32_to_cpu(rec->refc.rc_blockcount);
+ refcount = be32_to_cpu(rec->refc.rc_refcount);
+
+ /* Only CoW records can have refcount == 1. */
+ has_cowflag = (bno & XFS_REFC_COW_START);
+ if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ /* Check the extent. */
+ bno &= ~XFS_REFC_COW_START;
+ if (bno + len <= bno ||
+ !xfs_verify_agbno(mp, agno, bno) ||
+ !xfs_verify_agbno(mp, agno, bno + len - 1))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (refcount == 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ return error;
+}
+
+/* Scrub the refcount btree for some AG. */
+int
+xfs_scrub_refcountbt(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
+ return xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
+ &oinfo, NULL);
+}
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
new file mode 100644
index 000000000000..97846c424690
--- /dev/null
+++ b/fs/xfs/scrub/rmap.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_rmap.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/btree.h"
+#include "scrub/trace.h"
+
+/*
+ * Set us up to scrub reverse mapping btrees.
+ */
+int
+xfs_scrub_setup_ag_rmapbt(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ return xfs_scrub_setup_ag_btree(sc, ip, false);
+}
+
+/* Reverse-mapping scrubber. */
+
+/* Scrub an rmapbt record. */
+STATIC int
+xfs_scrub_rmapbt_rec(
+ struct xfs_scrub_btree *bs,
+ union xfs_btree_rec *rec)
+{
+ struct xfs_mount *mp = bs->cur->bc_mp;
+ struct xfs_rmap_irec irec;
+ xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
+ bool non_inode;
+ bool is_unwritten;
+ bool is_bmbt;
+ bool is_attr;
+ int error;
+
+ error = xfs_rmap_btrec_to_irec(rec, &irec);
+ if (!xfs_scrub_btree_process_error(bs->sc, bs->cur, 0, &error))
+ goto out;
+
+ /* Check extent. */
+ if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (irec.rm_owner == XFS_RMAP_OWN_FS) {
+ /*
+ * xfs_verify_agbno returns false for static fs metadata.
+ * Since that only exists at the start of the AG, validate
+ * that by hand.
+ */
+ if (irec.rm_startblock != 0 ||
+ irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ } else {
+ /*
+ * Otherwise we must point somewhere past the static metadata
+ * but before the end of the FS. Run the regular check.
+ */
+ if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
+ !xfs_verify_agbno(mp, agno, irec.rm_startblock +
+ irec.rm_blockcount - 1))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ }
+
+ /* Check flags. */
+ non_inode = XFS_RMAP_NON_INODE_OWNER(irec.rm_owner);
+ is_bmbt = irec.rm_flags & XFS_RMAP_BMBT_BLOCK;
+ is_attr = irec.rm_flags & XFS_RMAP_ATTR_FORK;
+ is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
+
+ if (is_bmbt && irec.rm_offset != 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (non_inode && irec.rm_offset != 0)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (is_unwritten && (is_bmbt || non_inode || is_attr))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (non_inode && (is_bmbt || is_unwritten || is_attr))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+
+ if (!non_inode) {
+ if (!xfs_verify_ino(mp, irec.rm_owner))
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ } else {
+ /* Non-inode owner within the magic values? */
+ if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
+ irec.rm_owner > XFS_RMAP_OWN_FS)
+ xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ }
+out:
+ return error;
+}
+
+/* Scrub the rmap btree for some AG. */
+int
+xfs_scrub_rmapbt(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
+ return xfs_scrub_btree(sc, sc->sa.rmap_cur, xfs_scrub_rmapbt_rec,
+ &oinfo, NULL);
+}
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
new file mode 100644
index 000000000000..c6fedb698008
--- /dev/null
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_alloc.h"
+#include "xfs_rtalloc.h"
+#include "xfs_inode.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+
+/* Set us up with the realtime metadata locked. */
+int
+xfs_scrub_setup_rt(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = sc->mp;
+ int error = 0;
+
+ /*
+ * If userspace gave us an AG number or inode data, they don't
+ * know what they're doing. Get out.
+ */
+ if (sc->sm->sm_agno || sc->sm->sm_ino || sc->sm->sm_gen)
+ return -EINVAL;
+
+ error = xfs_scrub_setup_fs(sc, ip);
+ if (error)
+ return error;
+
+ sc->ilock_flags = XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP;
+ sc->ip = mp->m_rbmip;
+ xfs_ilock(sc->ip, sc->ilock_flags);
+
+ return 0;
+}
+
+/* Realtime bitmap. */
+
+/* Scrub a free extent record from the realtime bitmap. */
+STATIC int
+xfs_scrub_rtbitmap_rec(
+ struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *rec,
+ void *priv)
+{
+ struct xfs_scrub_context *sc = priv;
+
+ if (rec->ar_startblock + rec->ar_blockcount <= rec->ar_startblock ||
+ !xfs_verify_rtbno(sc->mp, rec->ar_startblock) ||
+ !xfs_verify_rtbno(sc->mp, rec->ar_startblock +
+ rec->ar_blockcount - 1))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ return 0;
+}
+
+/* Scrub the realtime bitmap. */
+int
+xfs_scrub_rtbitmap(
+ struct xfs_scrub_context *sc)
+{
+ int error;
+
+ error = xfs_rtalloc_query_all(sc->tp, xfs_scrub_rtbitmap_rec, sc);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ goto out;
+
+out:
+ return error;
+}
+
+/* Scrub the realtime summary. */
+int
+xfs_scrub_rtsummary(
+ struct xfs_scrub_context *sc)
+{
+ /* XXX: implement this some day */
+ return -ENOENT;
+}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
new file mode 100644
index 000000000000..9c42c4efd01e
--- /dev/null
+++ b/fs/xfs/scrub/scrub.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_itable.h"
+#include "xfs_alloc.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_refcount.h"
+#include "xfs_refcount_btree.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+#include "scrub/scrub.h"
+#include "scrub/btree.h"
+
+/*
+ * Online Scrub and Repair
+ *
+ * Traditionally, XFS (the kernel driver) did not know how to check or
+ * repair on-disk data structures. That task was left to the xfs_check
+ * and xfs_repair tools, both of which require taking the filesystem
+ * offline for a thorough but time consuming examination. Online
+ * scrub & repair, on the other hand, enables us to check the metadata
+ * for obvious errors while carefully stepping around the filesystem's
+ * ongoing operations, locking rules, etc.
+ *
+ * Given that most XFS metadata consist of records stored in a btree,
+ * most of the checking functions iterate the btree blocks themselves
+ * looking for irregularities. When a record block is encountered, each
+ * record can be checked for obviously bad values. Record values can
+ * also be cross-referenced against other btrees to look for potential
+ * misunderstandings between pieces of metadata.
+ *
+ * It is expected that the checkers responsible for per-AG metadata
+ * structures will lock the AG headers (AGI, AGF, AGFL), iterate the
+ * metadata structure, and perform any relevant cross-referencing before
+ * unlocking the AG and returning the results to userspace. These
+ * scrubbers must not keep an AG locked for too long to avoid tying up
+ * the block and inode allocators.
+ *
+ * Block maps and b-trees rooted in an inode present a special challenge
+ * because they can involve extents from any AG. The general scrubber
+ * structure of lock -> check -> xref -> unlock still holds, but AG
+ * locking order rules /must/ be obeyed to avoid deadlocks. The
+ * ordering rule, of course, is that we must lock in increasing AG
+ * order. Helper functions are provided to track which AG headers we've
+ * already locked. If we detect an imminent locking order violation, we
+ * can signal a potential deadlock, in which case the scrubber can jump
+ * out to the top level, lock all the AGs in order, and retry the scrub.
+ *
+ * For file data (directories, extended attributes, symlinks) scrub, we
+ * can simply lock the inode and walk the data. For btree data
+ * (directories and attributes) we follow the same btree-scrubbing
+ * strategy outlined previously to check the records.
+ *
+ * We use a bit of trickery with transactions to avoid buffer deadlocks
+ * if there is a cycle in the metadata. The basic problem is that
+ * travelling down a btree involves locking the current buffer at each
+ * tree level. If a pointer should somehow point back to a buffer that
+ * we've already examined, we will deadlock due to the second buffer
+ * locking attempt. Note however that grabbing a buffer in transaction
+ * context links the locked buffer to the transaction. If we try to
+ * re-grab the buffer in the context of the same transaction, we avoid
+ * the second lock attempt and continue. Between the verifier and the
+ * scrubber, something will notice that something is amiss and report
+ * the corruption. Therefore, each scrubber will allocate an empty
+ * transaction, attach buffers to it, and cancel the transaction at the
+ * end of the scrub run. Cancelling a non-dirty transaction simply
+ * unlocks the buffers.
+ *
+ * There are four pieces of data that scrub can communicate to
+ * userspace. The first is the error code (errno), which can be used to
+ * communicate operational errors in performing the scrub. There are
+ * also three flags that can be set in the scrub context. If the data
+ * structure itself is corrupt, the CORRUPT flag will be set. If
+ * the metadata is correct but otherwise suboptimal, the PREEN flag
+ * will be set.
+ */
+
+/*
+ * Scrub probe -- userspace uses this to probe if we're willing to scrub
+ * or repair a given mountpoint. This will be used by xfs_scrub to
+ * probe the kernel's abilities to scrub (and repair) the metadata. We
+ * do this by validating the ioctl inputs from userspace, preparing the
+ * filesystem for a scrub (or a repair) operation, and immediately
+ * returning to userspace. Userspace can use the returned errno and
+ * structure state to decide (in broad terms) if scrub/repair are
+ * supported by the running kernel.
+ */
+static int
+xfs_scrub_probe(
+ struct xfs_scrub_context *sc)
+{
+ int error = 0;
+
+ if (sc->sm->sm_ino || sc->sm->sm_agno)
+ return -EINVAL;
+ if (xfs_scrub_should_terminate(sc, &error))
+ return error;
+
+ return 0;
+}
+
+/* Scrub setup and teardown */
+
+/* Free all the resources and finish the transactions. */
+STATIC int
+xfs_scrub_teardown(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip_in,
+ int error)
+{
+ xfs_scrub_ag_free(sc, &sc->sa);
+ if (sc->tp) {
+ xfs_trans_cancel(sc->tp);
+ sc->tp = NULL;
+ }
+ if (sc->ip) {
+ xfs_iunlock(sc->ip, sc->ilock_flags);
+ if (sc->ip != ip_in &&
+ !xfs_internal_inum(sc->mp, sc->ip->i_ino))
+ iput(VFS_I(sc->ip));
+ sc->ip = NULL;
+ }
+ if (sc->buf) {
+ kmem_free(sc->buf);
+ sc->buf = NULL;
+ }
+ return error;
+}
+
+/* Scrubbing dispatch. */
+
+static const struct xfs_scrub_meta_ops meta_scrub_ops[] = {
+ { /* ioctl presence test */
+ .setup = xfs_scrub_setup_fs,
+ .scrub = xfs_scrub_probe,
+ },
+ { /* superblock */
+ .setup = xfs_scrub_setup_ag_header,
+ .scrub = xfs_scrub_superblock,
+ },
+ { /* agf */
+ .setup = xfs_scrub_setup_ag_header,
+ .scrub = xfs_scrub_agf,
+ },
+ { /* agfl */
+ .setup = xfs_scrub_setup_ag_header,
+ .scrub = xfs_scrub_agfl,
+ },
+ { /* agi */
+ .setup = xfs_scrub_setup_ag_header,
+ .scrub = xfs_scrub_agi,
+ },
+ { /* bnobt */
+ .setup = xfs_scrub_setup_ag_allocbt,
+ .scrub = xfs_scrub_bnobt,
+ },
+ { /* cntbt */
+ .setup = xfs_scrub_setup_ag_allocbt,
+ .scrub = xfs_scrub_cntbt,
+ },
+ { /* inobt */
+ .setup = xfs_scrub_setup_ag_iallocbt,
+ .scrub = xfs_scrub_inobt,
+ },
+ { /* finobt */
+ .setup = xfs_scrub_setup_ag_iallocbt,
+ .scrub = xfs_scrub_finobt,
+ .has = xfs_sb_version_hasfinobt,
+ },
+ { /* rmapbt */
+ .setup = xfs_scrub_setup_ag_rmapbt,
+ .scrub = xfs_scrub_rmapbt,
+ .has = xfs_sb_version_hasrmapbt,
+ },
+ { /* refcountbt */
+ .setup = xfs_scrub_setup_ag_refcountbt,
+ .scrub = xfs_scrub_refcountbt,
+ .has = xfs_sb_version_hasreflink,
+ },
+ { /* inode record */
+ .setup = xfs_scrub_setup_inode,
+ .scrub = xfs_scrub_inode,
+ },
+ { /* inode data fork */
+ .setup = xfs_scrub_setup_inode_bmap,
+ .scrub = xfs_scrub_bmap_data,
+ },
+ { /* inode attr fork */
+ .setup = xfs_scrub_setup_inode_bmap,
+ .scrub = xfs_scrub_bmap_attr,
+ },
+ { /* inode CoW fork */
+ .setup = xfs_scrub_setup_inode_bmap,
+ .scrub = xfs_scrub_bmap_cow,
+ },
+ { /* directory */
+ .setup = xfs_scrub_setup_directory,
+ .scrub = xfs_scrub_directory,
+ },
+ { /* extended attributes */
+ .setup = xfs_scrub_setup_xattr,
+ .scrub = xfs_scrub_xattr,
+ },
+ { /* symbolic link */
+ .setup = xfs_scrub_setup_symlink,
+ .scrub = xfs_scrub_symlink,
+ },
+ { /* parent pointers */
+ .setup = xfs_scrub_setup_parent,
+ .scrub = xfs_scrub_parent,
+ },
+ { /* realtime bitmap */
+ .setup = xfs_scrub_setup_rt,
+ .scrub = xfs_scrub_rtbitmap,
+ .has = xfs_sb_version_hasrealtime,
+ },
+ { /* realtime summary */
+ .setup = xfs_scrub_setup_rt,
+ .scrub = xfs_scrub_rtsummary,
+ .has = xfs_sb_version_hasrealtime,
+ },
+ { /* user quota */
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
+ },
+ { /* group quota */
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
+ },
+ { /* project quota */
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
+ },
+};
+
+/* This isn't a stable feature, warn once per day. */
+static inline void
+xfs_scrub_experimental_warning(
+ struct xfs_mount *mp)
+{
+ static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
+ "xfs_scrub_warning", 86400 * HZ, 1);
+ ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
+
+ if (__ratelimit(&scrub_warning))
+ xfs_alert(mp,
+"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
+}
+
+/* Dispatch metadata scrubbing. */
+int
+xfs_scrub_metadata(
+ struct xfs_inode *ip,
+ struct xfs_scrub_metadata *sm)
+{
+ struct xfs_scrub_context sc;
+ struct xfs_mount *mp = ip->i_mount;
+ const struct xfs_scrub_meta_ops *ops;
+ bool try_harder = false;
+ int error = 0;
+
+ trace_xfs_scrub_start(ip, sm, error);
+
+ /* Forbidden if we are shut down or mounted norecovery. */
+ error = -ESHUTDOWN;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ goto out;
+ error = -ENOTRECOVERABLE;
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+ goto out;
+
+ /* Check our inputs. */
+ error = -EINVAL;
+ sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
+ if (sm->sm_flags & ~XFS_SCRUB_FLAGS_IN)
+ goto out;
+ if (memchr_inv(sm->sm_reserved, 0, sizeof(sm->sm_reserved)))
+ goto out;
+
+ /* Do we know about this type of metadata? */
+ error = -ENOENT;
+ if (sm->sm_type >= XFS_SCRUB_TYPE_NR)
+ goto out;
+ ops = &meta_scrub_ops[sm->sm_type];
+ if (ops->scrub == NULL)
+ goto out;
+
+ /*
+ * We won't scrub any filesystem that doesn't have the ability
+ * to record unwritten extents. The option was made default in
+ * 2003, removed from mkfs in 2007, and cannot be disabled in
+ * v5, so if we find a filesystem without this flag it's either
+ * really old or totally unsupported. Avoid it either way.
+ * We also don't support v1-v3 filesystems, which aren't
+ * mountable.
+ */
+ error = -EOPNOTSUPP;
+ if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
+ goto out;
+
+ /* Does this fs even support this type of metadata? */
+ error = -ENOENT;
+ if (ops->has && !ops->has(&mp->m_sb))
+ goto out;
+
+ /* We don't know how to repair anything yet. */
+ error = -EOPNOTSUPP;
+ if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
+ goto out;
+
+ xfs_scrub_experimental_warning(mp);
+
+retry_op:
+ /* Set up for the operation. */
+ memset(&sc, 0, sizeof(sc));
+ sc.mp = ip->i_mount;
+ sc.sm = sm;
+ sc.ops = ops;
+ sc.try_harder = try_harder;
+ sc.sa.agno = NULLAGNUMBER;
+ error = sc.ops->setup(&sc, ip);
+ if (error)
+ goto out_teardown;
+
+ /* Scrub for errors. */
+ error = sc.ops->scrub(&sc);
+ if (!try_harder && error == -EDEADLOCK) {
+ /*
+ * Scrubbers return -EDEADLOCK to mean 'try harder'.
+ * Tear down everything we hold, then set up again with
+ * preparation for worst-case scenarios.
+ */
+ error = xfs_scrub_teardown(&sc, ip, 0);
+ if (error)
+ goto out;
+ try_harder = true;
+ goto retry_op;
+ } else if (error)
+ goto out_teardown;
+
+ if (sc.sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
+ XFS_SCRUB_OFLAG_XCORRUPT))
+ xfs_alert_ratelimited(mp, "Corruption detected during scrub.");
+
+out_teardown:
+ error = xfs_scrub_teardown(&sc, ip, error);
+out:
+ trace_xfs_scrub_done(ip, sm, error);
+ if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
+ sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ error = 0;
+ }
+ return error;
+}
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
new file mode 100644
index 000000000000..e9ec041cf713
--- /dev/null
+++ b/fs/xfs/scrub/scrub.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_SCRUB_SCRUB_H__
+#define __XFS_SCRUB_SCRUB_H__
+
+struct xfs_scrub_context;
+
+struct xfs_scrub_meta_ops {
+ /* Acquire whatever resources are needed for the operation. */
+ int (*setup)(struct xfs_scrub_context *,
+ struct xfs_inode *);
+
+ /* Examine metadata for errors. */
+ int (*scrub)(struct xfs_scrub_context *);
+
+ /* Decide if we even have this piece of metadata. */
+ bool (*has)(struct xfs_sb *);
+};
+
+/* Buffer pointers and btree cursors for an entire AG. */
+struct xfs_scrub_ag {
+ xfs_agnumber_t agno;
+
+ /* AG btree roots */
+ struct xfs_buf *agf_bp;
+ struct xfs_buf *agfl_bp;
+ struct xfs_buf *agi_bp;
+
+ /* AG btrees */
+ struct xfs_btree_cur *bno_cur;
+ struct xfs_btree_cur *cnt_cur;
+ struct xfs_btree_cur *ino_cur;
+ struct xfs_btree_cur *fino_cur;
+ struct xfs_btree_cur *rmap_cur;
+ struct xfs_btree_cur *refc_cur;
+};
+
+struct xfs_scrub_context {
+ /* General scrub state. */
+ struct xfs_mount *mp;
+ struct xfs_scrub_metadata *sm;
+ const struct xfs_scrub_meta_ops *ops;
+ struct xfs_trans *tp;
+ struct xfs_inode *ip;
+ void *buf;
+ uint ilock_flags;
+ bool try_harder;
+
+ /* State tracking for single-AG operations. */
+ struct xfs_scrub_ag sa;
+};
+
+/* Metadata scrubbers */
+int xfs_scrub_tester(struct xfs_scrub_context *sc);
+int xfs_scrub_superblock(struct xfs_scrub_context *sc);
+int xfs_scrub_agf(struct xfs_scrub_context *sc);
+int xfs_scrub_agfl(struct xfs_scrub_context *sc);
+int xfs_scrub_agi(struct xfs_scrub_context *sc);
+int xfs_scrub_bnobt(struct xfs_scrub_context *sc);
+int xfs_scrub_cntbt(struct xfs_scrub_context *sc);
+int xfs_scrub_inobt(struct xfs_scrub_context *sc);
+int xfs_scrub_finobt(struct xfs_scrub_context *sc);
+int xfs_scrub_rmapbt(struct xfs_scrub_context *sc);
+int xfs_scrub_refcountbt(struct xfs_scrub_context *sc);
+int xfs_scrub_inode(struct xfs_scrub_context *sc);
+int xfs_scrub_bmap_data(struct xfs_scrub_context *sc);
+int xfs_scrub_bmap_attr(struct xfs_scrub_context *sc);
+int xfs_scrub_bmap_cow(struct xfs_scrub_context *sc);
+int xfs_scrub_directory(struct xfs_scrub_context *sc);
+int xfs_scrub_xattr(struct xfs_scrub_context *sc);
+int xfs_scrub_symlink(struct xfs_scrub_context *sc);
+int xfs_scrub_parent(struct xfs_scrub_context *sc);
+#ifdef CONFIG_XFS_RT
+int xfs_scrub_rtbitmap(struct xfs_scrub_context *sc);
+int xfs_scrub_rtsummary(struct xfs_scrub_context *sc);
+#else
+static inline int
+xfs_scrub_rtbitmap(struct xfs_scrub_context *sc)
+{
+ return -ENOENT;
+}
+static inline int
+xfs_scrub_rtsummary(struct xfs_scrub_context *sc)
+{
+ return -ENOENT;
+}
+#endif
+#ifdef CONFIG_XFS_QUOTA
+int xfs_scrub_quota(struct xfs_scrub_context *sc);
+#else
+static inline int
+xfs_scrub_quota(struct xfs_scrub_context *sc)
+{
+ return -ENOENT;
+}
+#endif
+
+#endif /* __XFS_SCRUB_SCRUB_H__ */
diff --git a/fs/xfs/scrub/symlink.c b/fs/xfs/scrub/symlink.c
new file mode 100644
index 000000000000..3aa3d60f7c16
--- /dev/null
+++ b/fs/xfs/scrub/symlink.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_btree.h"
+#include "xfs_bit.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_inode.h"
+#include "xfs_inode_fork.h"
+#include "xfs_symlink.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+#include "scrub/trace.h"
+
+/* Set us up to scrub a symbolic link. */
+int
+xfs_scrub_setup_symlink(
+ struct xfs_scrub_context *sc,
+ struct xfs_inode *ip)
+{
+ /* Allocate the buffer without the inode lock held. */
+ sc->buf = kmem_zalloc_large(XFS_SYMLINK_MAXLEN + 1, KM_SLEEP);
+ if (!sc->buf)
+ return -ENOMEM;
+
+ return xfs_scrub_setup_inode_contents(sc, ip, 0);
+}
+
+/* Symbolic links. */
+
+int
+xfs_scrub_symlink(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_inode *ip = sc->ip;
+ struct xfs_ifork *ifp;
+ loff_t len;
+ int error = 0;
+
+ if (!S_ISLNK(VFS_I(ip)->i_mode))
+ return -ENOENT;
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ len = ip->i_d.di_size;
+
+ /* Plausible size? */
+ if (len > XFS_SYMLINK_MAXLEN || len <= 0) {
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out;
+ }
+
+ /* Inline symlink? */
+ if (ifp->if_flags & XFS_IFINLINE) {
+ if (len > XFS_IFORK_DSIZE(ip) ||
+ len > strnlen(ifp->if_u1.if_data, XFS_IFORK_DSIZE(ip)))
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+ goto out;
+ }
+
+ /* Remote symlink; must read the contents. */
+ error = xfs_readlink_bmap_ilocked(sc->ip, sc->buf);
+ if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ goto out;
+ if (strnlen(sc->buf, XFS_SYMLINK_MAXLEN) < len)
+ xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
+out:
+ return error;
+}
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
new file mode 100644
index 000000000000..472080e75788
--- /dev/null
+++ b/fs/xfs/scrub/trace.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_da_format.h"
+#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_trans.h"
+#include "xfs_bit.h"
+#include "scrub/xfs_scrub.h"
+#include "scrub/scrub.h"
+#include "scrub/common.h"
+
+/* Figure out which block the btree cursor was pointing to. */
+static inline xfs_fsblock_t
+xfs_scrub_btree_cur_fsbno(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ if (level < cur->bc_nlevels && cur->bc_bufs[level])
+ return XFS_DADDR_TO_FSB(cur->bc_mp, cur->bc_bufs[level]->b_bn);
+ else if (level == cur->bc_nlevels - 1 &&
+ cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_private.b.ip->i_ino);
+ else if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS))
+ return XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno, 0);
+ return NULLFSBLOCK;
+}
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "scrub/trace.h"
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
new file mode 100644
index 000000000000..c4ebfb5c1ee8
--- /dev/null
+++ b/fs/xfs/scrub/trace.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xfs_scrub
+
+#if !defined(_TRACE_XFS_SCRUB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XFS_SCRUB_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "xfs_bit.h"
+
+DECLARE_EVENT_CLASS(xfs_scrub_class,
+ TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
+ int error),
+ TP_ARGS(ip, sm, error),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(unsigned int, type)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_ino_t, inum)
+ __field(unsigned int, gen)
+ __field(unsigned int, flags)
+ __field(int, error)
+ ),
+ TP_fast_assign(
+ __entry->dev = ip->i_mount->m_super->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->type = sm->sm_type;
+ __entry->agno = sm->sm_agno;
+ __entry->inum = sm->sm_ino;
+ __entry->gen = sm->sm_gen;
+ __entry->flags = sm->sm_flags;
+ __entry->error = error;
+ ),
+ TP_printk("dev %d:%d ino %llu type %u agno %u inum %llu gen %u flags 0x%x error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->type,
+ __entry->agno,
+ __entry->inum,
+ __entry->gen,
+ __entry->flags,
+ __entry->error)
+)
+#define DEFINE_SCRUB_EVENT(name) \
+DEFINE_EVENT(xfs_scrub_class, name, \
+ TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, \
+ int error), \
+ TP_ARGS(ip, sm, error))
+
+DEFINE_SCRUB_EVENT(xfs_scrub_start);
+DEFINE_SCRUB_EVENT(xfs_scrub_done);
+DEFINE_SCRUB_EVENT(xfs_scrub_deadlock_retry);
+
+TRACE_EVENT(xfs_scrub_op_error,
+ TP_PROTO(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
+ xfs_agblock_t bno, int error, void *ret_ip),
+ TP_ARGS(sc, agno, bno, error, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, type)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(int, error)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->agno = agno;
+ __entry->bno = bno;
+ __entry->error = error;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d type %u agno %u agbno %u error %d ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->agno,
+ __entry->bno,
+ __entry->error,
+ __entry->ret_ip)
+);
+
+TRACE_EVENT(xfs_scrub_file_op_error,
+ TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+ xfs_fileoff_t offset, int error, void *ret_ip),
+ TP_ARGS(sc, whichfork, offset, error, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, whichfork)
+ __field(unsigned int, type)
+ __field(xfs_fileoff_t, offset)
+ __field(int, error)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = sc->ip->i_mount->m_super->s_dev;
+ __entry->ino = sc->ip->i_ino;
+ __entry->whichfork = whichfork;
+ __entry->type = sc->sm->sm_type;
+ __entry->offset = offset;
+ __entry->error = error;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d ino %llu fork %d type %u offset %llu error %d ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->whichfork,
+ __entry->type,
+ __entry->offset,
+ __entry->error,
+ __entry->ret_ip)
+);
+
+DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
+ TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, void *ret_ip),
+ TP_ARGS(sc, daddr, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, type)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ xfs_fsblock_t fsbno;
+ xfs_agnumber_t agno;
+ xfs_agblock_t bno;
+
+ fsbno = XFS_DADDR_TO_FSB(sc->mp, daddr);
+ agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
+ bno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
+
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->agno = agno;
+ __entry->bno = bno;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d type %u agno %u agbno %u ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->agno,
+ __entry->bno,
+ __entry->ret_ip)
+)
+
+#define DEFINE_SCRUB_BLOCK_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_scrub_block_error_class, name, \
+ TP_PROTO(struct xfs_scrub_context *sc, xfs_daddr_t daddr, \
+ void *ret_ip), \
+ TP_ARGS(sc, daddr, ret_ip))
+
+DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_error);
+DEFINE_SCRUB_BLOCK_ERROR_EVENT(xfs_scrub_block_preen);
+
+DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
+ TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, xfs_daddr_t daddr,
+ void *ret_ip),
+ TP_ARGS(sc, ino, daddr, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(unsigned int, type)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ xfs_fsblock_t fsbno;
+ xfs_agnumber_t agno;
+ xfs_agblock_t bno;
+
+ if (daddr) {
+ fsbno = XFS_DADDR_TO_FSB(sc->mp, daddr);
+ agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
+ bno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
+ } else {
+ agno = XFS_INO_TO_AGNO(sc->mp, ino);
+ bno = XFS_AGINO_TO_AGBNO(sc->mp,
+ XFS_INO_TO_AGINO(sc->mp, ino));
+ }
+
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->ino = ino;
+ __entry->type = sc->sm->sm_type;
+ __entry->agno = agno;
+ __entry->bno = bno;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d ino %llu type %u agno %u agbno %u ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->type,
+ __entry->agno,
+ __entry->bno,
+ __entry->ret_ip)
+)
+
+#define DEFINE_SCRUB_INO_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_scrub_ino_error_class, name, \
+ TP_PROTO(struct xfs_scrub_context *sc, xfs_ino_t ino, \
+ xfs_daddr_t daddr, void *ret_ip), \
+ TP_ARGS(sc, ino, daddr, ret_ip))
+
+DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_error);
+DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_preen);
+DEFINE_SCRUB_INO_ERROR_EVENT(xfs_scrub_ino_warning);
+
+DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
+ TP_PROTO(struct xfs_scrub_context *sc, int whichfork,
+ xfs_fileoff_t offset, void *ret_ip),
+ TP_ARGS(sc, whichfork, offset, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, whichfork)
+ __field(unsigned int, type)
+ __field(xfs_fileoff_t, offset)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = sc->ip->i_mount->m_super->s_dev;
+ __entry->ino = sc->ip->i_ino;
+ __entry->whichfork = whichfork;
+ __entry->type = sc->sm->sm_type;
+ __entry->offset = offset;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d ino %llu fork %d type %u offset %llu ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->whichfork,
+ __entry->type,
+ __entry->offset,
+ __entry->ret_ip)
+);
+
+#define DEFINE_SCRUB_FBLOCK_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_scrub_fblock_error_class, name, \
+ TP_PROTO(struct xfs_scrub_context *sc, int whichfork, \
+ xfs_fileoff_t offset, void *ret_ip), \
+ TP_ARGS(sc, whichfork, offset, ret_ip))
+
+DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_error);
+DEFINE_SCRUB_FBLOCK_ERROR_EVENT(xfs_scrub_fblock_warning);
+
+TRACE_EVENT(xfs_scrub_incomplete,
+ TP_PROTO(struct xfs_scrub_context *sc, void *ret_ip),
+ TP_ARGS(sc, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, type)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d type %u ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->ret_ip)
+);
+
+TRACE_EVENT(xfs_scrub_btree_op_error,
+ TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ int level, int error, void *ret_ip),
+ TP_ARGS(sc, cur, level, error, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, type)
+ __field(xfs_btnum_t, btnum)
+ __field(int, level)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(int, ptr);
+ __field(int, error)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
+ __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
+ __entry->ptr = cur->bc_ptrs[level];
+ __entry->error = error;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->btnum,
+ __entry->level,
+ __entry->ptr,
+ __entry->agno,
+ __entry->bno,
+ __entry->error,
+ __entry->ret_ip)
+);
+
+TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
+ TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ int level, int error, void *ret_ip),
+ TP_ARGS(sc, cur, level, error, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, whichfork)
+ __field(unsigned int, type)
+ __field(xfs_btnum_t, btnum)
+ __field(int, level)
+ __field(int, ptr)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(int, error)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->ino = sc->ip->i_ino;
+ __entry->whichfork = cur->bc_private.b.whichfork;
+ __entry->type = sc->sm->sm_type;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->ptr = cur->bc_ptrs[level];
+ __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
+ __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
+ __entry->error = error;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d ino %llu fork %d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->whichfork,
+ __entry->type,
+ __entry->btnum,
+ __entry->level,
+ __entry->ptr,
+ __entry->agno,
+ __entry->bno,
+ __entry->error,
+ __entry->ret_ip)
+);
+
+TRACE_EVENT(xfs_scrub_btree_error,
+ TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ int level, void *ret_ip),
+ TP_ARGS(sc, cur, level, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, type)
+ __field(xfs_btnum_t, btnum)
+ __field(int, level)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(int, ptr);
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
+ __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
+ __entry->ptr = cur->bc_ptrs[level];
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->btnum,
+ __entry->level,
+ __entry->ptr,
+ __entry->agno,
+ __entry->bno,
+ __entry->ret_ip)
+);
+
+TRACE_EVENT(xfs_scrub_ifork_btree_error,
+ TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ int level, void *ret_ip),
+ TP_ARGS(sc, cur, level, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, whichfork)
+ __field(unsigned int, type)
+ __field(xfs_btnum_t, btnum)
+ __field(int, level)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(int, ptr);
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->ino = sc->ip->i_ino;
+ __entry->whichfork = cur->bc_private.b.whichfork;
+ __entry->type = sc->sm->sm_type;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
+ __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
+ __entry->ptr = cur->bc_ptrs[level];
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d ino %llu fork %d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->whichfork,
+ __entry->type,
+ __entry->btnum,
+ __entry->level,
+ __entry->ptr,
+ __entry->agno,
+ __entry->bno,
+ __entry->ret_ip)
+);
+
+DECLARE_EVENT_CLASS(xfs_scrub_sbtree_class,
+ TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur,
+ int level),
+ TP_ARGS(sc, cur, level),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(xfs_btnum_t, btnum)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, bno)
+ __field(int, level)
+ __field(int, nlevels)
+ __field(int, ptr)
+ ),
+ TP_fast_assign(
+ xfs_fsblock_t fsbno = xfs_scrub_btree_cur_fsbno(cur, level);
+
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->btnum = cur->bc_btnum;
+ __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsbno);
+ __entry->bno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
+ __entry->level = level;
+ __entry->nlevels = cur->bc_nlevels;
+ __entry->ptr = cur->bc_ptrs[level];
+ ),
+ TP_printk("dev %d:%d type %u btnum %d agno %u agbno %u level %d nlevels %d ptr %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->btnum,
+ __entry->agno,
+ __entry->bno,
+ __entry->level,
+ __entry->nlevels,
+ __entry->ptr)
+)
+#define DEFINE_SCRUB_SBTREE_EVENT(name) \
+DEFINE_EVENT(xfs_scrub_sbtree_class, name, \
+ TP_PROTO(struct xfs_scrub_context *sc, struct xfs_btree_cur *cur, \
+ int level), \
+ TP_ARGS(sc, cur, level))
+
+DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_rec);
+DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_key);
+
+#endif /* _TRACE_XFS_SCRUB_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE scrub/trace
+#include <trace/define_trace.h>
diff --git a/fs/xfs/scrub/xfs_scrub.h b/fs/xfs/scrub/xfs_scrub.h
new file mode 100644
index 000000000000..e00e0eadac6a
--- /dev/null
+++ b/fs/xfs/scrub/xfs_scrub.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_SCRUB_H__
+#define __XFS_SCRUB_H__
+
+#ifndef CONFIG_XFS_ONLINE_SCRUB
+# define xfs_scrub_metadata(ip, sm) (-ENOTTY)
+#else
+int xfs_scrub_metadata(struct xfs_inode *ip, struct xfs_scrub_metadata *sm);
+#endif /* CONFIG_XFS_ONLINE_SCRUB */
+
+#endif /* __XFS_SCRUB_H__ */
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 80cd0fd86783..5ff7f228d616 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -19,7 +19,6 @@
#define __XFS_H__
#ifdef CONFIG_XFS_DEBUG
-#define STATIC
#define DEBUG 1
#define XFS_BUF_LOCK_TRACKING 1
#endif
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 5d5a5e277f35..d07bf27451c9 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -48,6 +48,8 @@ struct xfs_attr_list_context;
#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
+#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
+
#define XFS_ATTR_FLAGS \
{ ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
{ ATTR_ROOT, "ROOT" }, \
@@ -56,7 +58,8 @@ struct xfs_attr_list_context;
{ ATTR_CREATE, "CREATE" }, \
{ ATTR_REPLACE, "REPLACE" }, \
{ ATTR_KERNOTIME, "KERNOTIME" }, \
- { ATTR_KERNOVAL, "KERNOVAL" }
+ { ATTR_KERNOVAL, "KERNOVAL" }, \
+ { ATTR_INCOMPLETE, "INCOMPLETE" }
/*
* The maximum size (into the kernel or returned from the kernel) of an
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index e3a950ed35a8..52818ea2eb50 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -251,47 +251,44 @@ xfs_attr3_node_inactive(
* traversal of the tree so we may deal with many blocks
* before we come back to this one.
*/
- error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp,
- XFS_ATTR_FORK);
+ error = xfs_da3_node_read(*trans, dp, child_fsb, -1, &child_bp,
+ XFS_ATTR_FORK);
if (error)
return error;
- if (child_bp) {
- /* save for re-read later */
- child_blkno = XFS_BUF_ADDR(child_bp);
- /*
- * Invalidate the subtree, however we have to.
- */
- info = child_bp->b_addr;
- switch (info->magic) {
- case cpu_to_be16(XFS_DA_NODE_MAGIC):
- case cpu_to_be16(XFS_DA3_NODE_MAGIC):
- error = xfs_attr3_node_inactive(trans, dp,
- child_bp, level + 1);
- break;
- case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
- case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
- error = xfs_attr3_leaf_inactive(trans, dp,
- child_bp);
- break;
- default:
- error = -EIO;
- xfs_trans_brelse(*trans, child_bp);
- break;
- }
- if (error)
- return error;
+ /* save for re-read later */
+ child_blkno = XFS_BUF_ADDR(child_bp);
- /*
- * Remove the subsidiary block from the cache
- * and from the log.
- */
- error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
- &child_bp, XFS_ATTR_FORK);
- if (error)
- return error;
- xfs_trans_binval(*trans, child_bp);
+ /*
+ * Invalidate the subtree, however we have to.
+ */
+ info = child_bp->b_addr;
+ switch (info->magic) {
+ case cpu_to_be16(XFS_DA_NODE_MAGIC):
+ case cpu_to_be16(XFS_DA3_NODE_MAGIC):
+ error = xfs_attr3_node_inactive(trans, dp, child_bp,
+ level + 1);
+ break;
+ case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
+ case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
+ error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
+ break;
+ default:
+ error = -EIO;
+ xfs_trans_brelse(*trans, child_bp);
+ break;
}
+ if (error)
+ return error;
+
+ /*
+ * Remove the subsidiary block from the cache and from the log.
+ */
+ error = xfs_da_get_buf(*trans, dp, 0, child_blkno, &child_bp,
+ XFS_ATTR_FORK);
+ if (error)
+ return error;
+ xfs_trans_binval(*trans, child_bp);
/*
* If we're not done, re-read the parent to get the next
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 7740c8a5e736..3e59a348ea71 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -204,19 +204,103 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
return 0;
}
+/*
+ * We didn't find the block & hash mentioned in the cursor state, so
+ * walk down the attr btree looking for the hash.
+ */
STATIC int
-xfs_attr_node_list(xfs_attr_list_context_t *context)
+xfs_attr_node_list_lookup(
+ struct xfs_attr_list_context *context,
+ struct attrlist_cursor_kern *cursor,
+ struct xfs_buf **pbp)
{
- attrlist_cursor_kern_t *cursor;
- xfs_attr_leafblock_t *leaf;
- xfs_da_intnode_t *node;
- struct xfs_attr3_icleaf_hdr leafhdr;
- struct xfs_da3_icnode_hdr nodehdr;
- struct xfs_da_node_entry *btree;
- int error, i;
- struct xfs_buf *bp;
- struct xfs_inode *dp = context->dp;
- struct xfs_mount *mp = dp->i_mount;
+ struct xfs_da3_icnode_hdr nodehdr;
+ struct xfs_da_intnode *node;
+ struct xfs_da_node_entry *btree;
+ struct xfs_inode *dp = context->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_trans *tp = context->tp;
+ struct xfs_buf *bp;
+ int i;
+ int error = 0;
+ unsigned int expected_level = 0;
+ uint16_t magic;
+
+ ASSERT(*pbp == NULL);
+ cursor->blkno = 0;
+ for (;;) {
+ error = xfs_da3_node_read(tp, dp, cursor->blkno, -1, &bp,
+ XFS_ATTR_FORK);
+ if (error)
+ return error;
+ node = bp->b_addr;
+ magic = be16_to_cpu(node->hdr.info.magic);
+ if (magic == XFS_ATTR_LEAF_MAGIC ||
+ magic == XFS_ATTR3_LEAF_MAGIC)
+ break;
+ if (magic != XFS_DA_NODE_MAGIC &&
+ magic != XFS_DA3_NODE_MAGIC) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
+ node);
+ goto out_corruptbuf;
+ }
+
+ dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+
+ /* Tree taller than we can handle; bail out! */
+ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
+ goto out_corruptbuf;
+
+ /* Check the level from the root node. */
+ if (cursor->blkno == 0)
+ expected_level = nodehdr.level - 1;
+ else if (expected_level != nodehdr.level)
+ goto out_corruptbuf;
+ else
+ expected_level--;
+
+ btree = dp->d_ops->node_tree_p(node);
+ for (i = 0; i < nodehdr.count; btree++, i++) {
+ if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
+ cursor->blkno = be32_to_cpu(btree->before);
+ trace_xfs_attr_list_node_descend(context,
+ btree);
+ break;
+ }
+ }
+ xfs_trans_brelse(tp, bp);
+
+ if (i == nodehdr.count)
+ return 0;
+
+ /* We can't point back to the root. */
+ if (cursor->blkno == 0)
+ return -EFSCORRUPTED;
+ }
+
+ if (expected_level != 0)
+ goto out_corruptbuf;
+
+ *pbp = bp;
+ return 0;
+
+out_corruptbuf:
+ xfs_trans_brelse(tp, bp);
+ return -EFSCORRUPTED;
+}
+
+STATIC int
+xfs_attr_node_list(
+ struct xfs_attr_list_context *context)
+{
+ struct xfs_attr3_icleaf_hdr leafhdr;
+ struct attrlist_cursor_kern *cursor;
+ struct xfs_attr_leafblock *leaf;
+ struct xfs_da_intnode *node;
+ struct xfs_buf *bp;
+ struct xfs_inode *dp = context->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ int error;
trace_xfs_attr_node_list(context);
@@ -277,47 +361,9 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
* Note that start of node block is same as start of leaf block.
*/
if (bp == NULL) {
- cursor->blkno = 0;
- for (;;) {
- uint16_t magic;
-
- error = xfs_da3_node_read(context->tp, dp,
- cursor->blkno, -1, &bp,
- XFS_ATTR_FORK);
- if (error)
- return error;
- node = bp->b_addr;
- magic = be16_to_cpu(node->hdr.info.magic);
- if (magic == XFS_ATTR_LEAF_MAGIC ||
- magic == XFS_ATTR3_LEAF_MAGIC)
- break;
- if (magic != XFS_DA_NODE_MAGIC &&
- magic != XFS_DA3_NODE_MAGIC) {
- XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
- XFS_ERRLEVEL_LOW,
- context->dp->i_mount,
- node);
- xfs_trans_brelse(context->tp, bp);
- return -EFSCORRUPTED;
- }
-
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
- for (i = 0; i < nodehdr.count; btree++, i++) {
- if (cursor->hashval
- <= be32_to_cpu(btree->hashval)) {
- cursor->blkno = be32_to_cpu(btree->before);
- trace_xfs_attr_list_node_descend(context,
- btree);
- break;
- }
- }
- if (i == nodehdr.count) {
- xfs_trans_brelse(context->tp, bp);
- return 0;
- }
- xfs_trans_brelse(context->tp, bp);
- }
+ error = xfs_attr_node_list_lookup(context, cursor, &bp);
+ if (error || !bp)
+ return error;
}
ASSERT(bp != NULL);
@@ -407,7 +453,8 @@ xfs_attr3_leaf_list_int(
cursor->offset = 0;
}
- if (entry->flags & XFS_ATTR_INCOMPLETE)
+ if ((entry->flags & XFS_ATTR_INCOMPLETE) &&
+ !(context->flags & ATTR_INCOMPLETE))
continue; /* skip incomplete entries */
if (entry->flags & XFS_ATTR_LOCAL) {
@@ -499,8 +546,8 @@ xfs_attr_list_int(
#define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \
(((struct attrlist_ent *) 0)->a_name - (char *) 0)
#define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \
- ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \
- & ~(sizeof(u_int32_t)-1))
+ ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(uint32_t)-1) \
+ & ~(sizeof(uint32_t)-1))
/*
* Format an attribute and copy it out to the user's buffer.
@@ -583,6 +630,10 @@ xfs_attr_list(
(cursor->hashval || cursor->blkno || cursor->offset))
return -EINVAL;
+ /* Only internal consumers can retrieve incomplete attrs. */
+ if (flags & ATTR_INCOMPLETE)
+ return -EINVAL;
+
/*
* Check for a properly aligned buffer.
*/
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 6503cfa44262..6d37ab43195f 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -229,15 +229,17 @@ xfs_bmap_count_leaves(
struct xfs_ifork *ifp,
xfs_filblks_t *count)
{
+ struct xfs_iext_cursor icur;
struct xfs_bmbt_irec got;
- xfs_extnum_t numrecs = 0, i = 0;
+ xfs_extnum_t numrecs = 0;
- while (xfs_iext_get_extent(ifp, i++, &got)) {
+ for_each_xfs_iext(ifp, &icur, &got) {
if (!isnullstartblock(got.br_startblock)) {
*count += got.br_blockcount;
numrecs++;
}
}
+
return numrecs;
}
@@ -405,125 +407,103 @@ xfs_bmap_count_blocks(
return 0;
}
-/*
- * returns 1 for success, 0 if we failed to map the extent.
- */
-STATIC int
-xfs_getbmapx_fix_eof_hole(
- xfs_inode_t *ip, /* xfs incore inode pointer */
- int whichfork,
- struct getbmapx *out, /* output structure */
- int prealloced, /* this is a file with
- * preallocated data space */
- int64_t end, /* last block requested */
- xfs_fsblock_t startblock,
- bool moretocome)
+static int
+xfs_getbmap_report_one(
+ struct xfs_inode *ip,
+ struct getbmapx *bmv,
+ struct kgetbmap *out,
+ int64_t bmv_end,
+ struct xfs_bmbt_irec *got)
{
- int64_t fixlen;
- xfs_mount_t *mp; /* file system mount point */
- xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_extnum_t lastx; /* last extent pointer */
- xfs_fileoff_t fileblock;
-
- if (startblock == HOLESTARTBLOCK) {
- mp = ip->i_mount;
- out->bmv_block = -1;
- fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
- fixlen -= out->bmv_offset;
- if (prealloced && out->bmv_offset + out->bmv_length == end) {
- /* Came to hole at EOF. Trim it. */
- if (fixlen <= 0)
- return 0;
- out->bmv_length = fixlen;
- }
+ struct kgetbmap *p = out + bmv->bmv_entries;
+ bool shared = false, trimmed = false;
+ int error;
+
+ error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
+ if (error)
+ return error;
+
+ if (isnullstartblock(got->br_startblock) ||
+ got->br_startblock == DELAYSTARTBLOCK) {
+ /*
+ * Delalloc extents that start beyond EOF can occur due to
+ * speculative EOF allocation when the delalloc extent is larger
+ * than the largest freespace extent at conversion time. These
+ * extents cannot be converted by data writeback, so can exist
+ * here even if we are not supposed to be finding delalloc
+ * extents.
+ */
+ if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
+ ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
+
+ p->bmv_oflags |= BMV_OF_DELALLOC;
+ p->bmv_block = -2;
} else {
- if (startblock == DELAYSTARTBLOCK)
- out->bmv_block = -2;
- else
- out->bmv_block = xfs_fsb_to_db(ip, startblock);
- fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!moretocome &&
- xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
- (lastx == xfs_iext_count(ifp) - 1))
- out->bmv_oflags |= BMV_OF_LAST;
+ p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
}
- return 1;
+ if (got->br_state == XFS_EXT_UNWRITTEN &&
+ (bmv->bmv_iflags & BMV_IF_PREALLOC))
+ p->bmv_oflags |= BMV_OF_PREALLOC;
+
+ if (shared)
+ p->bmv_oflags |= BMV_OF_SHARED;
+
+ p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
+ p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
+
+ bmv->bmv_offset = p->bmv_offset + p->bmv_length;
+ bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
+ bmv->bmv_entries++;
+ return 0;
}
-/* Adjust the reported bmap around shared/unshared extent transitions. */
-STATIC int
-xfs_getbmap_adjust_shared(
- struct xfs_inode *ip,
- int whichfork,
- struct xfs_bmbt_irec *map,
- struct getbmapx *out,
- struct xfs_bmbt_irec *next_map)
+static void
+xfs_getbmap_report_hole(
+ struct xfs_inode *ip,
+ struct getbmapx *bmv,
+ struct kgetbmap *out,
+ int64_t bmv_end,
+ xfs_fileoff_t bno,
+ xfs_fileoff_t end)
{
- struct xfs_mount *mp = ip->i_mount;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_agblock_t ebno;
- xfs_extlen_t elen;
- xfs_extlen_t nlen;
- int error;
+ struct kgetbmap *p = out + bmv->bmv_entries;
- next_map->br_startblock = NULLFSBLOCK;
- next_map->br_startoff = NULLFILEOFF;
- next_map->br_blockcount = 0;
+ if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
+ return;
- /* Only written data blocks can be shared. */
- if (!xfs_is_reflink_inode(ip) ||
- whichfork != XFS_DATA_FORK ||
- !xfs_bmap_is_real_extent(map))
- return 0;
+ p->bmv_block = -1;
+ p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
+ p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
- agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
- agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
- error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
- map->br_blockcount, &ebno, &elen, true);
- if (error)
- return error;
+ bmv->bmv_offset = p->bmv_offset + p->bmv_length;
+ bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
+ bmv->bmv_entries++;
+}
- if (ebno == NULLAGBLOCK) {
- /* No shared blocks at all. */
- return 0;
- } else if (agbno == ebno) {
- /*
- * Shared extent at (agbno, elen). Shrink the reported
- * extent length and prepare to move the start of map[i]
- * to agbno+elen, with the aim of (re)formatting the new
- * map[i] the next time through the inner loop.
- */
- out->bmv_length = XFS_FSB_TO_BB(mp, elen);
- out->bmv_oflags |= BMV_OF_SHARED;
- if (elen != map->br_blockcount) {
- *next_map = *map;
- next_map->br_startblock += elen;
- next_map->br_startoff += elen;
- next_map->br_blockcount -= elen;
- }
- map->br_blockcount -= elen;
- } else {
- /*
- * There's an unshared extent (agbno, ebno - agbno)
- * followed by shared extent at (ebno, elen). Shrink
- * the reported extent length to cover only the unshared
- * extent and prepare to move up the start of map[i] to
- * ebno, with the aim of (re)formatting the new map[i]
- * the next time through the inner loop.
- */
- *next_map = *map;
- nlen = ebno - agbno;
- out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
- next_map->br_startblock += nlen;
- next_map->br_startoff += nlen;
- next_map->br_blockcount -= nlen;
- map->br_blockcount -= nlen;
- }
+static inline bool
+xfs_getbmap_full(
+ struct getbmapx *bmv)
+{
+ return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
+}
- return 0;
+static bool
+xfs_getbmap_next_rec(
+ struct xfs_bmbt_irec *rec,
+ xfs_fileoff_t total_end)
+{
+ xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
+
+ if (end == total_end)
+ return false;
+
+ rec->br_startoff += rec->br_blockcount;
+ if (!isnullstartblock(rec->br_startblock) &&
+ rec->br_startblock != DELAYSTARTBLOCK)
+ rec->br_startblock += rec->br_blockcount;
+ rec->br_blockcount = total_end - end;
+ return true;
}
/*
@@ -535,33 +515,22 @@ xfs_getbmap_adjust_shared(
*/
int /* error code */
xfs_getbmap(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
struct getbmapx *bmv, /* user bmap structure */
- xfs_bmap_format_t formatter, /* format to user */
- void *arg) /* formatter arg */
+ struct kgetbmap *out)
{
- int64_t bmvend; /* last block requested */
- int error = 0; /* return value */
- int64_t fixlen; /* length for -1 case */
- int i; /* extent number */
- int lock; /* lock state */
- xfs_bmbt_irec_t *map; /* buffer for user's data */
- xfs_mount_t *mp; /* file system mount point */
- int nex; /* # of user extents can do */
- int subnex; /* # of bmapi's can do */
- int nmap; /* number of map entries */
- struct getbmapx *out; /* output structure */
- int whichfork; /* data or attr fork */
- int prealloced; /* this is a file with
- * preallocated data space */
- int iflags; /* interface flags */
- int bmapi_flags; /* flags for xfs_bmapi */
- int cur_ext = 0;
- struct xfs_bmbt_irec inject_map;
-
- mp = ip->i_mount;
- iflags = bmv->bmv_iflags;
-
+ struct xfs_mount *mp = ip->i_mount;
+ int iflags = bmv->bmv_iflags;
+ int whichfork, lock, error = 0;
+ int64_t bmv_end, max_len;
+ xfs_fileoff_t bno, first_bno;
+ struct xfs_ifork *ifp;
+ struct xfs_bmbt_irec got, rec;
+ xfs_filblks_t len;
+ struct xfs_iext_cursor icur;
+
+ if (bmv->bmv_iflags & ~BMV_IF_VALID)
+ return -EINVAL;
#ifndef DEBUG
/* Only allow CoW fork queries if we're debugging. */
if (iflags & BMV_IF_COWFORK)
@@ -570,89 +539,42 @@ xfs_getbmap(
if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
return -EINVAL;
+ if (bmv->bmv_length < -1)
+ return -EINVAL;
+ bmv->bmv_entries = 0;
+ if (bmv->bmv_length == 0)
+ return 0;
+
if (iflags & BMV_IF_ATTRFORK)
whichfork = XFS_ATTR_FORK;
else if (iflags & BMV_IF_COWFORK)
whichfork = XFS_COW_FORK;
else
whichfork = XFS_DATA_FORK;
+ ifp = XFS_IFORK_PTR(ip, whichfork);
+ xfs_ilock(ip, XFS_IOLOCK_SHARED);
switch (whichfork) {
case XFS_ATTR_FORK:
- if (XFS_IFORK_Q(ip)) {
- if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
- ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
- return -EINVAL;
- } else if (unlikely(
- ip->i_d.di_aformat != 0 &&
- ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
- XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
- ip->i_mount);
- return -EFSCORRUPTED;
- }
+ if (!XFS_IFORK_Q(ip))
+ goto out_unlock_iolock;
- prealloced = 0;
- fixlen = 1LL << 32;
+ max_len = 1LL << 32;
+ lock = xfs_ilock_attr_map_shared(ip);
break;
case XFS_COW_FORK:
- if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
- return -EINVAL;
+ /* No CoW fork? Just return */
+ if (!ifp)
+ goto out_unlock_iolock;
- if (xfs_get_cowextsz_hint(ip)) {
- prealloced = 1;
- fixlen = mp->m_super->s_maxbytes;
- } else {
- prealloced = 0;
- fixlen = XFS_ISIZE(ip);
- }
- break;
- default:
- /* Local format data forks report no extents. */
- if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- bmv->bmv_entries = 0;
- return 0;
- }
- if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
- return -EINVAL;
+ if (xfs_get_cowextsz_hint(ip))
+ max_len = mp->m_super->s_maxbytes;
+ else
+ max_len = XFS_ISIZE(ip);
- if (xfs_get_extsz_hint(ip) ||
- ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
- prealloced = 1;
- fixlen = mp->m_super->s_maxbytes;
- } else {
- prealloced = 0;
- fixlen = XFS_ISIZE(ip);
- }
+ lock = XFS_ILOCK_SHARED;
+ xfs_ilock(ip, lock);
break;
- }
-
- if (bmv->bmv_length == -1) {
- fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
- bmv->bmv_length =
- max_t(int64_t, fixlen - bmv->bmv_offset, 0);
- } else if (bmv->bmv_length == 0) {
- bmv->bmv_entries = 0;
- return 0;
- } else if (bmv->bmv_length < 0) {
- return -EINVAL;
- }
-
- nex = bmv->bmv_count - 1;
- if (nex <= 0)
- return -EINVAL;
- bmvend = bmv->bmv_offset + bmv->bmv_length;
-
-
- if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
- return -ENOMEM;
- out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
- if (!out)
- return -ENOMEM;
-
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- switch (whichfork) {
case XFS_DATA_FORK:
if (!(iflags & BMV_IF_DELALLOC) &&
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
@@ -670,154 +592,105 @@ xfs_getbmap(
*/
}
+ if (xfs_get_extsz_hint(ip) ||
+ (ip->i_d.di_flags &
+ (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
+ max_len = mp->m_super->s_maxbytes;
+ else
+ max_len = XFS_ISIZE(ip);
+
lock = xfs_ilock_data_map_shared(ip);
break;
- case XFS_COW_FORK:
- lock = XFS_ILOCK_SHARED;
- xfs_ilock(ip, lock);
- break;
- case XFS_ATTR_FORK:
- lock = xfs_ilock_attr_map_shared(ip);
- break;
}
- /*
- * Don't let nex be bigger than the number of extents
- * we can have assuming alternating holes and real extents.
- */
- if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
- nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
-
- bmapi_flags = xfs_bmapi_aflag(whichfork);
- if (!(iflags & BMV_IF_PREALLOC))
- bmapi_flags |= XFS_BMAPI_IGSTATE;
-
- /*
- * Allocate enough space to handle "subnex" maps at a time.
- */
- error = -ENOMEM;
- subnex = 16;
- map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
- if (!map)
+ switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ break;
+ case XFS_DINODE_FMT_LOCAL:
+ /* Local format inode forks report no extents. */
goto out_unlock_ilock;
+ default:
+ error = -EINVAL;
+ goto out_unlock_ilock;
+ }
- bmv->bmv_entries = 0;
-
- if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
- (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
- error = 0;
- goto out_free_map;
+ if (bmv->bmv_length == -1) {
+ max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
+ bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
}
- do {
- nmap = (nex> subnex) ? subnex : nex;
- error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
- XFS_BB_TO_FSB(mp, bmv->bmv_length),
- map, &nmap, bmapi_flags);
- if (error)
- goto out_free_map;
- ASSERT(nmap <= subnex);
-
- for (i = 0; i < nmap && bmv->bmv_length &&
- cur_ext < bmv->bmv_count - 1; i++) {
- out[cur_ext].bmv_oflags = 0;
- if (map[i].br_state == XFS_EXT_UNWRITTEN)
- out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
- else if (map[i].br_startblock == DELAYSTARTBLOCK)
- out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
- out[cur_ext].bmv_offset =
- XFS_FSB_TO_BB(mp, map[i].br_startoff);
- out[cur_ext].bmv_length =
- XFS_FSB_TO_BB(mp, map[i].br_blockcount);
- out[cur_ext].bmv_unused1 = 0;
- out[cur_ext].bmv_unused2 = 0;
+ bmv_end = bmv->bmv_offset + bmv->bmv_length;
- /*
- * delayed allocation extents that start beyond EOF can
- * occur due to speculative EOF allocation when the
- * delalloc extent is larger than the largest freespace
- * extent at conversion time. These extents cannot be
- * converted by data writeback, so can exist here even
- * if we are not supposed to be finding delalloc
- * extents.
- */
- if (map[i].br_startblock == DELAYSTARTBLOCK &&
- map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
- ASSERT((iflags & BMV_IF_DELALLOC) != 0);
-
- if (map[i].br_startblock == HOLESTARTBLOCK &&
- whichfork == XFS_ATTR_FORK) {
- /* came to the end of attribute fork */
- out[cur_ext].bmv_oflags |= BMV_OF_LAST;
- goto out_free_map;
- }
+ first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
+ len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
- /* Is this a shared block? */
- error = xfs_getbmap_adjust_shared(ip, whichfork,
- &map[i], &out[cur_ext], &inject_map);
- if (error)
- goto out_free_map;
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, whichfork);
+ if (error)
+ goto out_unlock_ilock;
+ }
- if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
- &out[cur_ext], prealloced, bmvend,
- map[i].br_startblock,
- inject_map.br_startblock != NULLFSBLOCK))
- goto out_free_map;
+ if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
+ /*
+ * Report a whole-file hole if the delalloc flag is set to
+ * stay compatible with the old implementation.
+ */
+ if (iflags & BMV_IF_DELALLOC)
+ xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
+ XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
+ goto out_unlock_ilock;
+ }
- bmv->bmv_offset =
- out[cur_ext].bmv_offset +
- out[cur_ext].bmv_length;
- bmv->bmv_length =
- max_t(int64_t, 0, bmvend - bmv->bmv_offset);
+ while (!xfs_getbmap_full(bmv)) {
+ xfs_trim_extent(&got, first_bno, len);
- /*
- * In case we don't want to return the hole,
- * don't increase cur_ext so that we can reuse
- * it in the next loop.
- */
- if ((iflags & BMV_IF_NO_HOLES) &&
- map[i].br_startblock == HOLESTARTBLOCK) {
- memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
- continue;
- }
+ /*
+ * Report an entry for a hole if this extent doesn't directly
+ * follow the previous one.
+ */
+ if (got.br_startoff > bno) {
+ xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
+ got.br_startoff);
+ if (xfs_getbmap_full(bmv))
+ break;
+ }
- /*
- * In order to report shared extents accurately,
- * we report each distinct shared/unshared part
- * of a single bmbt record using multiple bmap
- * extents. To make that happen, we iterate the
- * same map array item multiple times, each
- * time trimming out the subextent that we just
- * reported.
- *
- * Because of this, we must check the out array
- * index (cur_ext) directly against bmv_count-1
- * to avoid overflows.
- */
- if (inject_map.br_startblock != NULLFSBLOCK) {
- map[i] = inject_map;
- i--;
+ /*
+ * In order to report shared extents accurately, we report each
+ * distinct shared / unshared part of a single bmbt record with
+ * an individual getbmapx record.
+ */
+ bno = got.br_startoff + got.br_blockcount;
+ rec = got;
+ do {
+ error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
+ &rec);
+ if (error || xfs_getbmap_full(bmv))
+ goto out_unlock_ilock;
+ } while (xfs_getbmap_next_rec(&rec, bno));
+
+ if (!xfs_iext_next_extent(ifp, &icur, &got)) {
+ xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
+
+ out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
+
+ if (whichfork != XFS_ATTR_FORK && bno < end &&
+ !xfs_getbmap_full(bmv)) {
+ xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
+ bno, end);
}
- bmv->bmv_entries++;
- cur_ext++;
+ break;
}
- } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
- out_free_map:
- kmem_free(map);
- out_unlock_ilock:
- xfs_iunlock(ip, lock);
- out_unlock_iolock:
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
-
- for (i = 0; i < cur_ext; i++) {
- /* format results & advance arg */
- error = formatter(&arg, &out[i]);
- if (error)
+ if (bno >= first_bno + len)
break;
}
- kmem_free(out);
+out_unlock_ilock:
+ xfs_iunlock(ip, lock);
+out_unlock_iolock:
+ xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return error;
}
@@ -1389,53 +1262,12 @@ out:
}
-/*
- * @next_fsb will keep track of the extent currently undergoing shift.
- * @stop_fsb will keep track of the extent at which we have to stop.
- * If we are shifting left, we will start with block (offset + len) and
- * shift each extent till last extent.
- * If we are shifting right, we will start with last extent inside file space
- * and continue until we reach the block corresponding to offset.
- */
static int
-xfs_shift_file_space(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t len,
- enum shift_direction direction)
+xfs_prepare_shift(
+ struct xfs_inode *ip,
+ loff_t offset)
{
- int done = 0;
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
int error;
- struct xfs_defer_ops dfops;
- xfs_fsblock_t first_block;
- xfs_fileoff_t stop_fsb;
- xfs_fileoff_t next_fsb;
- xfs_fileoff_t shift_fsb;
- uint resblks;
-
- ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
-
- if (direction == SHIFT_LEFT) {
- /*
- * Reserve blocks to cover potential extent merges after left
- * shift operations.
- */
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
- next_fsb = XFS_B_TO_FSB(mp, offset + len);
- stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
- } else {
- /*
- * If right shift, delegate the work of initialization of
- * next_fsb to xfs_bmap_shift_extent as it has ilock held.
- */
- resblks = 0;
- next_fsb = NULLFSBLOCK;
- stop_fsb = XFS_B_TO_FSB(mp, offset);
- }
-
- shift_fsb = XFS_B_TO_FSB(mp, len);
/*
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
@@ -1451,8 +1283,7 @@ xfs_shift_file_space(
* Writeback and invalidate cache for the remainder of the file as we're
* about to shift down every extent from offset to EOF.
*/
- error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- offset, -1);
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
if (error)
return error;
error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
@@ -1472,16 +1303,50 @@ xfs_shift_file_space(
return error;
}
- /*
- * The extent shifting code works on extent granularity. So, if
- * stop_fsb is not the starting block of extent, we need to split
- * the extent at stop_fsb.
- */
- if (direction == SHIFT_RIGHT) {
- error = xfs_bmap_split_extent(ip, stop_fsb);
- if (error)
- return error;
- }
+ return 0;
+}
+
+/*
+ * xfs_collapse_file_space()
+ * This routine frees disk space and shift extent for the given file.
+ * The first thing we do is to free data blocks in the specified range
+ * by calling xfs_free_file_space(). It would also sync dirty data
+ * and invalidate page cache over the region on which collapse range
+ * is working. And Shift extent records to the left to cover a hole.
+ * RETURNS:
+ * 0 on success
+ * errno on error
+ *
+ */
+int
+xfs_collapse_file_space(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_defer_ops dfops;
+ xfs_fsblock_t first_block;
+ xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
+ xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
+ xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
+ uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ bool done = false;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
+
+ trace_xfs_collapse_file_space(ip);
+
+ error = xfs_free_file_space(ip, offset, len);
+ if (error)
+ return error;
+
+ error = xfs_prepare_shift(ip, offset);
+ if (error)
+ return error;
while (!error && !done) {
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
@@ -1495,25 +1360,17 @@ xfs_shift_file_space(
XFS_QMOPT_RES_REGBLKS);
if (error)
goto out_trans_cancel;
-
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_defer_init(&dfops, &first_block);
-
- /*
- * We are using the write transaction in which max 2 bmbt
- * updates are allowed
- */
- error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
- &done, stop_fsb, &first_block, &dfops,
- direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
+ error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
+ &done, stop_fsb, &first_block, &dfops);
if (error)
goto out_bmap_cancel;
error = xfs_defer_finish(&tp, &dfops);
if (error)
goto out_bmap_cancel;
-
error = xfs_trans_commit(tp);
}
@@ -1527,36 +1384,6 @@ out_trans_cancel:
}
/*
- * xfs_collapse_file_space()
- * This routine frees disk space and shift extent for the given file.
- * The first thing we do is to free data blocks in the specified range
- * by calling xfs_free_file_space(). It would also sync dirty data
- * and invalidate page cache over the region on which collapse range
- * is working. And Shift extent records to the left to cover a hole.
- * RETURNS:
- * 0 on success
- * errno on error
- *
- */
-int
-xfs_collapse_file_space(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t len)
-{
- int error;
-
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
- trace_xfs_collapse_file_space(ip);
-
- error = xfs_free_file_space(ip, offset, len);
- if (error)
- return error;
-
- return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
-}
-
-/*
* xfs_insert_file_space()
* This routine create hole space by shifting extents for the given file.
* The first thing we do is to sync dirty data and invalidate page cache
@@ -1574,10 +1401,60 @@ xfs_insert_file_space(
loff_t offset,
loff_t len)
{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_defer_ops dfops;
+ xfs_fsblock_t first_block;
+ xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
+ xfs_fileoff_t next_fsb = NULLFSBLOCK;
+ xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
+ bool done = false;
+
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
+
trace_xfs_insert_file_space(ip);
- return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
+ error = xfs_prepare_shift(ip, offset);
+ if (error)
+ return error;
+
+ /*
+ * The extent shifting code works on extent granularity. So, if stop_fsb
+ * is not the starting block of extent, we need to split the extent at
+ * stop_fsb.
+ */
+ error = xfs_bmap_split_extent(ip, stop_fsb);
+ if (error)
+ return error;
+
+ while (!error && !done) {
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
+ &tp);
+ if (error)
+ break;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_defer_init(&dfops, &first_block);
+ error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
+ &done, stop_fsb, &first_block, &dfops);
+ if (error)
+ goto out_bmap_cancel;
+
+ error = xfs_defer_finish(&tp, &dfops);
+ if (error)
+ goto out_bmap_cancel;
+ error = xfs_trans_commit(tp);
+ }
+
+ return error;
+
+out_bmap_cancel:
+ xfs_defer_cancel(&dfops);
+ xfs_trans_cancel(tp);
+ return error;
}
/*
@@ -1832,7 +1709,6 @@ xfs_swap_extent_forks(
xfs_filblks_t aforkblks = 0;
xfs_filblks_t taforkblks = 0;
xfs_extnum_t junk;
- xfs_extnum_t nextents;
uint64_t tmp;
int error;
@@ -1907,13 +1783,6 @@ xfs_swap_extent_forks(
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
- /*
- * If the extents fit in the inode, fix the pointer. Otherwise
- * it's already NULL or pointing to the extent.
- */
- nextents = xfs_iext_count(&ip->i_df);
- if (nextents <= XFS_INLINE_EXTS)
- ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
@@ -1925,13 +1794,6 @@ xfs_swap_extent_forks(
switch (tip->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
- /*
- * If the extents fit in the inode, fix the pointer. Otherwise
- * it's already NULL or pointing to the extent.
- */
- nextents = xfs_iext_count(&tip->i_df);
- if (nextents <= XFS_INLINE_EXTS)
- tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
(*target_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 7d330b3c77c3..4d4ae48bd4f6 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -47,10 +47,14 @@ int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_fileoff_t start_fsb, xfs_fileoff_t length);
-/* bmap to userspace formatter - copy to user & advance pointer */
-typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *);
+struct kgetbmap {
+ __s64 bmv_offset; /* file offset of segment in blocks */
+ __s64 bmv_block; /* starting block (64-bit daddr_t) */
+ __s64 bmv_length; /* length of segment, blocks */
+ __s32 bmv_oflags; /* output flags */
+};
int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
- xfs_bmap_format_t formatter, void *arg);
+ struct kgetbmap *out);
/* functions in xfs_bmap.c that are only needed by xfs_bmap_util.c */
int xfs_bmap_extsize_align(struct xfs_mount *mp, struct xfs_bmbt_irec *gotp,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 2f97c12ca75e..4db6e8d780f6 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -42,6 +42,8 @@
#include "xfs_mount.h"
#include "xfs_trace.h"
#include "xfs_log.h"
+#include "xfs_errortag.h"
+#include "xfs_error.h"
static kmem_zone_t *xfs_buf_zone;
@@ -2129,3 +2131,17 @@ xfs_buf_terminate(void)
{
kmem_zone_destroy(xfs_buf_zone);
}
+
+void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
+{
+ /*
+ * Set the lru reference count to 0 based on the error injection tag.
+ * This allows userspace to disrupt buffer caching for debug/testing
+ * purposes.
+ */
+ if (XFS_TEST_ERROR(false, bp->b_target->bt_mount,
+ XFS_ERRTAG_BUF_LRU_REF))
+ lru_ref = 0;
+
+ atomic_set(&bp->b_lru_ref, lru_ref);
+}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index bf71507ddb16..f873bb786824 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -352,10 +352,7 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
-static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
-{
- atomic_set(&bp->b_lru_ref, lru_ref);
-}
+void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
static inline int xfs_buf_ispinned(struct xfs_buf *bp)
{
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index ba2638d37031..0c58918bc0ad 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -41,7 +41,7 @@ static unsigned char xfs_dir3_filetype_table[] = {
DT_FIFO, DT_SOCK, DT_LNK, DT_WHT,
};
-static unsigned char
+unsigned char
xfs_dir3_get_dtype(
struct xfs_mount *mp,
uint8_t filetype)
@@ -266,7 +266,7 @@ xfs_dir2_leaf_readbuf(
xfs_dablk_t next_ra;
xfs_dablk_t map_off;
xfs_dablk_t last_da;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
int ra_want;
int error = 0;
@@ -283,7 +283,7 @@ xfs_dir2_leaf_readbuf(
*/
last_da = xfs_dir2_byte_to_da(geo, XFS_DIR2_LEAF_OFFSET);
map_off = xfs_dir2_db_to_da(geo, xfs_dir2_byte_to_db(geo, *cur_off));
- if (!xfs_iext_lookup_extent(dp, ifp, map_off, &idx, &map))
+ if (!xfs_iext_lookup_extent(dp, ifp, map_off, &icur, &map))
goto out;
if (map.br_startoff >= last_da)
goto out;
@@ -311,7 +311,7 @@ xfs_dir2_leaf_readbuf(
if (next_ra >= last_da)
goto out_no_ra;
if (map.br_blockcount < geo->fsbcount &&
- !xfs_iext_get_extent(ifp, ++idx, &map))
+ !xfs_iext_next_extent(ifp, &icur, &map))
goto out_no_ra;
if (map.br_startoff >= last_da)
goto out_no_ra;
@@ -334,7 +334,7 @@ xfs_dir2_leaf_readbuf(
ra_want -= geo->fsbcount;
next_ra += geo->fsbcount;
}
- if (!xfs_iext_get_extent(ifp, ++idx, &map)) {
+ if (!xfs_iext_next_extent(ifp, &icur, &map)) {
*ra_blk = last_da;
break;
}
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index cd82429d8df7..d57c2db64e59 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -53,13 +53,6 @@
* otherwise by the lowest id first, see xfs_dqlock2.
*/
-#ifdef DEBUG
-xfs_buftarg_t *xfs_dqerror_target;
-int xfs_do_dqerror;
-int xfs_dqreq_num;
-int xfs_dqerror_mod = 33;
-#endif
-
struct kmem_zone *xfs_qm_dqtrxzone;
static struct kmem_zone *xfs_qm_dqzone;
@@ -703,7 +696,7 @@ xfs_dq_get_next_id(
xfs_dqid_t next_id = *id + 1; /* simple advance */
uint lock_flags;
struct xfs_bmbt_irec got;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor cur;
xfs_fsblock_t start;
int error = 0;
@@ -727,7 +720,7 @@ xfs_dq_get_next_id(
return error;
}
- if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &idx, &got)) {
+ if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
/* contiguous chunk, bump startoff for the id calculation */
if (got.br_startoff < start)
got.br_startoff = start;
@@ -770,15 +763,6 @@ xfs_qm_dqget(
return -ESRCH;
}
-#ifdef DEBUG
- if (xfs_do_dqerror) {
- if ((xfs_dqerror_target == mp->m_ddev_targp) &&
- (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
- xfs_debug(mp, "Returning error in dqget");
- return -EIO;
- }
- }
-
ASSERT(type == XFS_DQ_USER ||
type == XFS_DQ_PROJ ||
type == XFS_DQ_GROUP);
@@ -786,7 +770,6 @@ xfs_qm_dqget(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(xfs_inode_dquot(ip, type) == NULL);
}
-#endif
restart:
mutex_lock(&qi->qi_tree_lock);
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index eaf86f55b7f2..4c9f35d983b2 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -21,6 +21,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_sysfs.h"
@@ -58,6 +59,7 @@ static unsigned int xfs_errortag_random_default[] = {
XFS_RANDOM_DROP_WRITES,
XFS_RANDOM_LOG_BAD_CRC,
XFS_RANDOM_LOG_ITEM_PIN,
+ XFS_RANDOM_BUF_LRU_REF,
};
struct xfs_errortag_attr {
@@ -163,6 +165,7 @@ XFS_ERRORTAG_ATTR_RW(ag_resv_critical, XFS_ERRTAG_AG_RESV_CRITICAL);
XFS_ERRORTAG_ATTR_RW(drop_writes, XFS_ERRTAG_DROP_WRITES);
XFS_ERRORTAG_ATTR_RW(log_bad_crc, XFS_ERRTAG_LOG_BAD_CRC);
XFS_ERRORTAG_ATTR_RW(log_item_pin, XFS_ERRTAG_LOG_ITEM_PIN);
+XFS_ERRORTAG_ATTR_RW(buf_lru_ref, XFS_ERRTAG_BUF_LRU_REF);
static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(noerror),
@@ -196,10 +199,11 @@ static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(drop_writes),
XFS_ERRORTAG_ATTR_LIST(log_bad_crc),
XFS_ERRORTAG_ATTR_LIST(log_item_pin),
+ XFS_ERRORTAG_ATTR_LIST(buf_lru_ref),
NULL,
};
-struct kobj_type xfs_errortag_ktype = {
+static struct kobj_type xfs_errortag_ktype = {
.release = xfs_sysfs_release,
.sysfs_ops = &xfs_errortag_sysfs_ops,
.default_attrs = xfs_errortag_attrs,
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 7c4bef3bddb7..ea816c1bf8db 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -63,87 +63,6 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
} \
}
-/*
- * error injection tags - the labels can be anything you want
- * but each tag should have its own unique number
- */
-
-#define XFS_ERRTAG_NOERROR 0
-#define XFS_ERRTAG_IFLUSH_1 1
-#define XFS_ERRTAG_IFLUSH_2 2
-#define XFS_ERRTAG_IFLUSH_3 3
-#define XFS_ERRTAG_IFLUSH_4 4
-#define XFS_ERRTAG_IFLUSH_5 5
-#define XFS_ERRTAG_IFLUSH_6 6
-#define XFS_ERRTAG_DA_READ_BUF 7
-#define XFS_ERRTAG_BTREE_CHECK_LBLOCK 8
-#define XFS_ERRTAG_BTREE_CHECK_SBLOCK 9
-#define XFS_ERRTAG_ALLOC_READ_AGF 10
-#define XFS_ERRTAG_IALLOC_READ_AGI 11
-#define XFS_ERRTAG_ITOBP_INOTOBP 12
-#define XFS_ERRTAG_IUNLINK 13
-#define XFS_ERRTAG_IUNLINK_REMOVE 14
-#define XFS_ERRTAG_DIR_INO_VALIDATE 15
-#define XFS_ERRTAG_BULKSTAT_READ_CHUNK 16
-#define XFS_ERRTAG_IODONE_IOERR 17
-#define XFS_ERRTAG_STRATREAD_IOERR 18
-#define XFS_ERRTAG_STRATCMPL_IOERR 19
-#define XFS_ERRTAG_DIOWRITE_IOERR 20
-#define XFS_ERRTAG_BMAPIFORMAT 21
-#define XFS_ERRTAG_FREE_EXTENT 22
-#define XFS_ERRTAG_RMAP_FINISH_ONE 23
-#define XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE 24
-#define XFS_ERRTAG_REFCOUNT_FINISH_ONE 25
-#define XFS_ERRTAG_BMAP_FINISH_ONE 26
-#define XFS_ERRTAG_AG_RESV_CRITICAL 27
-/*
- * DEBUG mode instrumentation to test and/or trigger delayed allocation
- * block killing in the event of failed writes. When enabled, all
- * buffered writes are silenty dropped and handled as if they failed.
- * All delalloc blocks in the range of the write (including pre-existing
- * delalloc blocks!) are tossed as part of the write failure error
- * handling sequence.
- */
-#define XFS_ERRTAG_DROP_WRITES 28
-#define XFS_ERRTAG_LOG_BAD_CRC 29
-#define XFS_ERRTAG_LOG_ITEM_PIN 30
-#define XFS_ERRTAG_MAX 31
-
-/*
- * Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
- */
-#define XFS_RANDOM_DEFAULT 100
-#define XFS_RANDOM_IFLUSH_1 XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IFLUSH_2 XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IFLUSH_3 XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IFLUSH_4 XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IFLUSH_5 XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IFLUSH_6 XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_DA_READ_BUF XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_BTREE_CHECK_LBLOCK (XFS_RANDOM_DEFAULT/4)
-#define XFS_RANDOM_BTREE_CHECK_SBLOCK XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_ALLOC_READ_AGF XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IALLOC_READ_AGI XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_ITOBP_INOTOBP XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IUNLINK XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IUNLINK_REMOVE XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_DIR_INO_VALIDATE XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_BULKSTAT_READ_CHUNK XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_IODONE_IOERR (XFS_RANDOM_DEFAULT/10)
-#define XFS_RANDOM_STRATREAD_IOERR (XFS_RANDOM_DEFAULT/10)
-#define XFS_RANDOM_STRATCMPL_IOERR (XFS_RANDOM_DEFAULT/10)
-#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10)
-#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
-#define XFS_RANDOM_FREE_EXTENT 1
-#define XFS_RANDOM_RMAP_FINISH_ONE 1
-#define XFS_RANDOM_REFCOUNT_CONTINUE_UPDATE 1
-#define XFS_RANDOM_REFCOUNT_FINISH_ONE 1
-#define XFS_RANDOM_BMAP_FINISH_ONE 1
-#define XFS_RANDOM_AG_RESV_CRITICAL 4
-#define XFS_RANDOM_DROP_WRITES 1
-#define XFS_RANDOM_LOG_BAD_CRC 1
-#define XFS_RANDOM_LOG_ITEM_PIN 1
-
#ifdef DEBUG
extern int xfs_errortag_init(struct xfs_mount *mp);
extern void xfs_errortag_del(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 6526ef0e2a23..18146873a8b3 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -984,7 +984,7 @@ xfs_file_readdir(
* point we can change the ->readdir prototype to include the
* buffer size. For now we use the current glibc buffer size.
*/
- bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
+ bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
return xfs_readdir(NULL, ip, ctx, bufsize);
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 34227115a5d6..43005fbe8b1e 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -610,7 +610,7 @@ again:
} else {
rcu_read_unlock();
if (flags & XFS_IGET_INCORE) {
- error = -ENOENT;
+ error = -ENODATA;
goto out_error_or_again;
}
XFS_STATS_INC(mp, xs_ig_missed);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 4ec5b7f45401..d8226f7a5dde 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -39,6 +39,7 @@
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_filestream.h"
@@ -384,14 +385,6 @@ xfs_isilocked(
}
#endif
-#ifdef DEBUG
-int xfs_locked_n;
-int xfs_small_retries;
-int xfs_middle_retries;
-int xfs_lots_retries;
-int xfs_lock_delays;
-#endif
-
/*
* xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
* DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
@@ -544,24 +537,11 @@ again:
if ((attempts % 5) == 0) {
delay(1); /* Don't just spin the CPU */
-#ifdef DEBUG
- xfs_lock_delays++;
-#endif
}
i = 0;
try_lock = 0;
goto again;
}
-
-#ifdef DEBUG
- if (attempts) {
- if (attempts < 5) xfs_small_retries++;
- else if (attempts < 100) xfs_middle_retries++;
- else xfs_lots_retries++;
- } else {
- xfs_locked_n++;
- }
-#endif
}
/*
@@ -767,7 +747,7 @@ xfs_ialloc(
xfs_inode_t *pip,
umode_t mode,
xfs_nlink_t nlink,
- xfs_dev_t rdev,
+ dev_t rdev,
prid_t prid,
int okalloc,
xfs_buf_t **ialloc_context,
@@ -819,6 +799,7 @@ xfs_ialloc(
set_nlink(inode, nlink);
ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
+ inode->i_rdev = rdev;
xfs_set_projid(ip, prid);
if (pip && XFS_INHERIT_GID(pip)) {
@@ -867,7 +848,6 @@ xfs_ialloc(
case S_IFBLK:
case S_IFSOCK:
ip->i_d.di_format = XFS_DINODE_FMT_DEV;
- ip->i_df.if_u2.if_rdev = rdev;
ip->i_df.if_flags = 0;
flags |= XFS_ILOG_DEV;
break;
@@ -933,7 +913,7 @@ xfs_ialloc(
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
ip->i_df.if_flags = XFS_IFEXTENTS;
ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
- ip->i_df.if_u1.if_extents = NULL;
+ ip->i_df.if_u1.if_root = NULL;
break;
default:
ASSERT(0);
@@ -975,7 +955,7 @@ xfs_dir_ialloc(
the inode. */
umode_t mode,
xfs_nlink_t nlink,
- xfs_dev_t rdev,
+ dev_t rdev,
prid_t prid, /* project id */
int okalloc, /* ok to allocate new space */
xfs_inode_t **ipp, /* pointer to inode; it will be
@@ -1147,7 +1127,7 @@ xfs_create(
xfs_inode_t *dp,
struct xfs_name *name,
umode_t mode,
- xfs_dev_t rdev,
+ dev_t rdev,
xfs_inode_t **ipp)
{
int is_dir = S_ISDIR(mode);
@@ -1183,7 +1163,6 @@ xfs_create(
return error;
if (is_dir) {
- rdev = 0;
resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
tres = &M_RES(mp)->tr_mkdir;
} else {
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0ee453de239a..cc13c3763721 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -391,7 +391,7 @@ void xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
int xfs_create(struct xfs_inode *dp, struct xfs_name *name,
- umode_t mode, xfs_dev_t rdev, struct xfs_inode **ipp);
+ umode_t mode, dev_t rdev, struct xfs_inode **ipp);
int xfs_create_tmpfile(struct xfs_inode *dp, struct dentry *dentry,
umode_t mode, struct xfs_inode **ipp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
@@ -428,7 +428,7 @@ xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
- xfs_nlink_t, xfs_dev_t, prid_t, int,
+ xfs_nlink_t, dev_t, prid_t, int,
struct xfs_inode **, int *);
/* from xfs_file.c */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9bbc2d7cc8cb..6ee5c3bf19ad 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -72,7 +72,6 @@ xfs_inode_item_data_fork_size(
break;
case XFS_DINODE_FMT_DEV:
- case XFS_DINODE_FMT_UUID:
break;
default:
ASSERT(0);
@@ -156,15 +155,13 @@ xfs_inode_item_format_data_fork(
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
iip->ili_fields &=
- ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
- XFS_ILOG_DEV | XFS_ILOG_UUID);
+ ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
if ((iip->ili_fields & XFS_ILOG_DEXT) &&
ip->i_d.di_nextents > 0 &&
ip->i_df.if_bytes > 0) {
struct xfs_bmbt_rec *p;
- ASSERT(ip->i_df.if_u1.if_extents != NULL);
ASSERT(xfs_iext_count(&ip->i_df) > 0);
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
@@ -181,8 +178,7 @@ xfs_inode_item_format_data_fork(
break;
case XFS_DINODE_FMT_BTREE:
iip->ili_fields &=
- ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT |
- XFS_ILOG_DEV | XFS_ILOG_UUID);
+ ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV);
if ((iip->ili_fields & XFS_ILOG_DBROOT) &&
ip->i_df.if_broot_bytes > 0) {
@@ -200,8 +196,7 @@ xfs_inode_item_format_data_fork(
break;
case XFS_DINODE_FMT_LOCAL:
iip->ili_fields &=
- ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT |
- XFS_ILOG_DEV | XFS_ILOG_UUID);
+ ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
if ((iip->ili_fields & XFS_ILOG_DDATA) &&
ip->i_df.if_bytes > 0) {
/*
@@ -224,17 +219,9 @@ xfs_inode_item_format_data_fork(
break;
case XFS_DINODE_FMT_DEV:
iip->ili_fields &=
- ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
- XFS_ILOG_DEXT | XFS_ILOG_UUID);
+ ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEXT);
if (iip->ili_fields & XFS_ILOG_DEV)
- ilf->ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev;
- break;
- case XFS_DINODE_FMT_UUID:
- iip->ili_fields &=
- ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
- XFS_ILOG_DEXT | XFS_ILOG_DEV);
- if (iip->ili_fields & XFS_ILOG_UUID)
- ilf->ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid;
+ ilf->ilf_u.ilfu_rdev = sysv_encode_dev(VFS_I(ip)->i_rdev);
break;
default:
ASSERT(0);
@@ -264,7 +251,6 @@ xfs_inode_item_format_attr_fork(
ASSERT(xfs_iext_count(ip->i_afp) ==
ip->i_d.di_anextents);
- ASSERT(ip->i_afp->if_u1.if_extents != NULL);
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
@@ -441,7 +427,7 @@ xfs_inode_item_format(
ilf->ilf_dsize = 0;
ilf->ilf_asize = 0;
ilf->ilf_pad = 0;
- uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
+ memset(&ilf->ilf_u, 0, sizeof(ilf->ilf_u));
xlog_finish_iovec(lv, vecp, sizeof(*ilf));
@@ -892,8 +878,7 @@ xfs_inode_item_format_convert(
in_f->ilf_asize = in_f32->ilf_asize;
in_f->ilf_dsize = in_f32->ilf_dsize;
in_f->ilf_ino = in_f32->ilf_ino;
- /* copy biggest field of ilf_u */
- uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
+ memcpy(&in_f->ilf_u, &in_f32->ilf_u, sizeof(in_f->ilf_u));
in_f->ilf_blkno = in_f32->ilf_blkno;
in_f->ilf_len = in_f32->ilf_len;
in_f->ilf_boffset = in_f32->ilf_boffset;
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 4c7722e325b3..b72373a33cd9 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -48,7 +48,7 @@ extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_iflush_abort(struct xfs_inode *, bool);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
- xfs_inode_log_format_t *);
+ struct xfs_inode_log_format *);
extern struct kmem_zone *xfs_ili_zone;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index aa75389be8cf..20dc65fef6a4 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -44,6 +44,7 @@
#include "xfs_btree.h"
#include <linux/fsmap.h>
#include "xfs_fsmap.h"
+#include "scrub/xfs_scrub.h"
#include <linux/capability.h>
#include <linux/cred.h>
@@ -310,8 +311,8 @@ xfs_readlink_by_handle(
int
xfs_set_dmattrs(
xfs_inode_t *ip,
- u_int evmask,
- u_int16_t state)
+ uint evmask,
+ uint16_t state)
{
xfs_mount_t *mp = ip->i_mount;
xfs_trans_t *tp;
@@ -1201,6 +1202,8 @@ out_unlock:
* 8. for non-realtime files, the extent size hint must be limited
* to half the AG size to avoid alignment extending the extent beyond the
* limits of the AG.
+ *
+ * Please keep this function in sync with xfs_scrub_inode_extsize.
*/
static int
xfs_ioctl_setattr_check_extsize(
@@ -1257,6 +1260,8 @@ xfs_ioctl_setattr_check_extsize(
* 5. Extent size must be a multiple of the appropriate block size.
* 6. The extent size hint must be limited to half the AG size to avoid
* alignment extending the extent beyond the limits of the AG.
+ *
+ * Please keep this function in sync with xfs_scrub_inode_cowextsize.
*/
static int
xfs_ioctl_setattr_check_cowextsize(
@@ -1540,17 +1545,26 @@ out_drop_write:
return error;
}
-STATIC int
-xfs_getbmap_format(void **ap, struct getbmapx *bmv)
+static bool
+xfs_getbmap_format(
+ struct kgetbmap *p,
+ struct getbmapx __user *u,
+ size_t recsize)
{
- struct getbmap __user *base = (struct getbmap __user *)*ap;
-
- /* copy only getbmap portion (not getbmapx) */
- if (copy_to_user(base, bmv, sizeof(struct getbmap)))
- return -EFAULT;
-
- *ap += sizeof(struct getbmap);
- return 0;
+ if (put_user(p->bmv_offset, &u->bmv_offset) ||
+ put_user(p->bmv_block, &u->bmv_block) ||
+ put_user(p->bmv_length, &u->bmv_length) ||
+ put_user(0, &u->bmv_count) ||
+ put_user(0, &u->bmv_entries))
+ return false;
+ if (recsize < sizeof(struct getbmapx))
+ return true;
+ if (put_user(0, &u->bmv_iflags) ||
+ put_user(p->bmv_oflags, &u->bmv_oflags) ||
+ put_user(0, &u->bmv_unused1) ||
+ put_user(0, &u->bmv_unused2))
+ return false;
+ return true;
}
STATIC int
@@ -1560,68 +1574,57 @@ xfs_ioc_getbmap(
void __user *arg)
{
struct getbmapx bmx = { 0 };
- int error;
-
- /* struct getbmap is a strict subset of struct getbmapx. */
- if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
- return -EFAULT;
+ struct kgetbmap *buf;
+ size_t recsize;
+ int error, i;
- if (bmx.bmv_count < 2)
+ switch (cmd) {
+ case XFS_IOC_GETBMAPA:
+ bmx.bmv_iflags = BMV_IF_ATTRFORK;
+ /*FALLTHRU*/
+ case XFS_IOC_GETBMAP:
+ if (file->f_mode & FMODE_NOCMTIME)
+ bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
+ /* struct getbmap is a strict subset of struct getbmapx. */
+ recsize = sizeof(struct getbmap);
+ break;
+ case XFS_IOC_GETBMAPX:
+ recsize = sizeof(struct getbmapx);
+ break;
+ default:
return -EINVAL;
+ }
- bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
- if (file->f_mode & FMODE_NOCMTIME)
- bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
-
- error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, xfs_getbmap_format,
- (__force struct getbmap *)arg+1);
- if (error)
- return error;
-
- /* copy back header - only size of getbmap */
- if (copy_to_user(arg, &bmx, sizeof(struct getbmap)))
- return -EFAULT;
- return 0;
-}
-
-STATIC int
-xfs_getbmapx_format(void **ap, struct getbmapx *bmv)
-{
- struct getbmapx __user *base = (struct getbmapx __user *)*ap;
-
- if (copy_to_user(base, bmv, sizeof(struct getbmapx)))
- return -EFAULT;
-
- *ap += sizeof(struct getbmapx);
- return 0;
-}
-
-STATIC int
-xfs_ioc_getbmapx(
- struct xfs_inode *ip,
- void __user *arg)
-{
- struct getbmapx bmx;
- int error;
-
- if (copy_from_user(&bmx, arg, sizeof(bmx)))
+ if (copy_from_user(&bmx, arg, recsize))
return -EFAULT;
if (bmx.bmv_count < 2)
return -EINVAL;
+ if (bmx.bmv_count > ULONG_MAX / recsize)
+ return -ENOMEM;
- if (bmx.bmv_iflags & (~BMV_IF_VALID))
- return -EINVAL;
+ buf = kmem_zalloc_large(bmx.bmv_count * sizeof(*buf), 0);
+ if (!buf)
+ return -ENOMEM;
- error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format,
- (__force struct getbmapx *)arg+1);
+ error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
if (error)
- return error;
+ goto out_free_buf;
- /* copy back header */
- if (copy_to_user(arg, &bmx, sizeof(struct getbmapx)))
- return -EFAULT;
+ error = -EFAULT;
+ if (copy_to_user(arg, &bmx, recsize))
+ goto out_free_buf;
+ arg += recsize;
+
+ for (i = 0; i < bmx.bmv_entries; i++) {
+ if (!xfs_getbmap_format(buf + i, arg, recsize))
+ goto out_free_buf;
+ arg += recsize;
+ }
+ error = 0;
+out_free_buf:
+ kmem_free(buf);
return 0;
}
@@ -1703,6 +1706,30 @@ xfs_ioc_getfsmap(
return 0;
}
+STATIC int
+xfs_ioc_scrub_metadata(
+ struct xfs_inode *ip,
+ void __user *arg)
+{
+ struct xfs_scrub_metadata scrub;
+ int error;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&scrub, arg, sizeof(scrub)))
+ return -EFAULT;
+
+ error = xfs_scrub_metadata(ip, &scrub);
+ if (error)
+ return error;
+
+ if (copy_to_user(arg, &scrub, sizeof(scrub)))
+ return -EFAULT;
+
+ return 0;
+}
+
int
xfs_ioc_swapext(
xfs_swapext_t *sxp)
@@ -1878,14 +1905,15 @@ xfs_file_ioctl(
case XFS_IOC_GETBMAP:
case XFS_IOC_GETBMAPA:
- return xfs_ioc_getbmap(filp, cmd, arg);
-
case XFS_IOC_GETBMAPX:
- return xfs_ioc_getbmapx(ip, arg);
+ return xfs_ioc_getbmap(filp, cmd, arg);
case FS_IOC_GETFSMAP:
return xfs_ioc_getfsmap(ip, arg);
+ case XFS_IOC_SCRUB_METADATA:
+ return xfs_ioc_scrub_metadata(ip, arg);
+
case XFS_IOC_FD_TO_HANDLE:
case XFS_IOC_PATH_TO_HANDLE:
case XFS_IOC_PATH_TO_FSHANDLE: {
diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index e86c3ea137d2..8de879f0c7d5 100644
--- a/fs/xfs/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
@@ -86,7 +86,7 @@ xfs_file_compat_ioctl(
extern int
xfs_set_dmattrs(
struct xfs_inode *ip,
- u_int evmask,
- u_int16_t state);
+ uint evmask,
+ uint16_t state);
#endif
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index fa0bc4d46065..35c79e246fde 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -556,6 +556,7 @@ xfs_file_compat_ioctl(
case XFS_IOC_ERROR_INJECTION:
case XFS_IOC_ERROR_CLEARALL:
case FS_IOC_GETFSMAP:
+ case XFS_IOC_SCRUB_METADATA:
return xfs_file_ioctl(filp, cmd, p);
#ifndef BROKEN_X86_ALIGNMENT
/* These are handled fine if no alignment issues */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index f179bdf1644d..18077e2189a9 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -30,6 +30,7 @@
#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trans.h"
#include "xfs_trans_space.h"
@@ -54,13 +55,13 @@ xfs_bmbt_to_iomap(
struct xfs_mount *mp = ip->i_mount;
if (imap->br_startblock == HOLESTARTBLOCK) {
- iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
} else if (imap->br_startblock == DELAYSTARTBLOCK) {
- iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_DELALLOC;
} else {
- iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
+ iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
if (imap->br_state == XFS_EXT_UNWRITTEN)
iomap->type = IOMAP_UNWRITTEN;
else
@@ -389,7 +390,7 @@ xfs_iomap_prealloc_size(
struct xfs_inode *ip,
loff_t offset,
loff_t count,
- xfs_extnum_t idx)
+ struct xfs_iext_cursor *icur)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
@@ -414,7 +415,7 @@ xfs_iomap_prealloc_size(
*/
if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
- !xfs_iext_get_extent(ifp, idx - 1, &prev) ||
+ !xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
prev.br_startoff + prev.br_blockcount < offset_fsb)
return mp->m_writeio_blocks;
@@ -532,7 +533,7 @@ xfs_file_iomap_begin_delay(
xfs_fileoff_t end_fsb;
int error = 0, eof = 0;
struct xfs_bmbt_irec got;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
xfs_fsblock_t prealloc_blocks = 0;
ASSERT(!XFS_IS_REALTIME_INODE(ip));
@@ -557,7 +558,7 @@ xfs_file_iomap_begin_delay(
goto out_unlock;
}
- eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
+ eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got);
if (!eof && got.br_startoff <= offset_fsb) {
if (xfs_is_reflink_inode(ip)) {
bool shared;
@@ -591,7 +592,8 @@ xfs_file_iomap_begin_delay(
end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
if (eof) {
- prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx);
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count,
+ &icur);
if (prealloc_blocks) {
xfs_extlen_t align;
xfs_off_t end_offset;
@@ -613,7 +615,8 @@ xfs_file_iomap_begin_delay(
retry:
error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
- end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof);
+ end_fsb - offset_fsb, prealloc_blocks, &got, &icur,
+ eof);
switch (error) {
case 0:
break;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 17081c77ef86..56475fcd76f2 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -160,7 +160,6 @@ xfs_generic_create(
if (S_ISCHR(mode) || S_ISBLK(mode)) {
if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff))
return -EINVAL;
- rdev = sysv_encode_dev(rdev);
} else {
rdev = 0;
}
@@ -535,8 +534,7 @@ xfs_vn_getattr(
case S_IFBLK:
case S_IFCHR:
stat->blksize = BLKDEV_IOSIZE;
- stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
- sysv_minor(ip->i_df.if_u2.if_rdev));
+ stat->rdev = inode->i_rdev;
break;
default:
if (XFS_IS_REALTIME_INODE(ip)) {
@@ -886,22 +884,6 @@ xfs_setattr_size(
return error;
/*
- * We are going to log the inode size change in this transaction so
- * any previous writes that are beyond the on disk EOF and the new
- * EOF that have not been written out need to be written here. If we
- * do not write the data out, we expose ourselves to the null files
- * problem. Note that this includes any block zeroing we did above;
- * otherwise those blocks may not be zeroed after a crash.
- */
- if (did_zeroing ||
- (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
- error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- ip->i_d.di_size, newsize);
- if (error)
- return error;
- }
-
- /*
* We've already locked out new page faults, so now we can safely remove
* pages from the page cache knowing they won't get refaulted until we
* drop the XFS_MMAP_EXCL lock after the extent manipulations are
@@ -917,9 +899,29 @@ xfs_setattr_size(
* user visible changes). There's not much we can do about this, except
* to hope that the caller sees ENOMEM and retries the truncate
* operation.
+ *
+ * And we update in-core i_size and truncate page cache beyond newsize
+ * before writeback the [di_size, newsize] range, so we're guaranteed
+ * not to write stale data past the new EOF on truncate down.
*/
truncate_setsize(inode, newsize);
+ /*
+ * We are going to log the inode size change in this transaction so
+ * any previous writes that are beyond the on disk EOF and the new
+ * EOF that have not been written out need to be written here. If we
+ * do not write the data out, we expose ourselves to the null files
+ * problem. Note that this includes any block zeroing we did above;
+ * otherwise those blocks may not be zeroed after a crash.
+ */
+ if (did_zeroing ||
+ (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+ ip->i_d.di_size, newsize - 1);
+ if (error)
+ return error;
+ }
+
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error)
return error;
@@ -1231,18 +1233,6 @@ xfs_setup_inode(
inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid);
inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid);
- switch (inode->i_mode & S_IFMT) {
- case S_IFBLK:
- case S_IFCHR:
- inode->i_rdev =
- MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
- sysv_minor(ip->i_df.if_u2.if_rdev));
- break;
- default:
- inode->i_rdev = 0;
- break;
- }
-
i_size_write(inode, ip->i_d.di_size);
xfs_diflags_to_iflags(inode, ip);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index c393a2f6d8c3..d58310514423 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -31,16 +31,6 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
-int
-xfs_internal_inum(
- xfs_mount_t *mp,
- xfs_ino_t ino)
-{
- return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino ||
- (xfs_sb_version_hasquota(&mp->m_sb) &&
- xfs_is_quota_inode(&mp->m_sb, ino)));
-}
-
/*
* Return stat information for one inode.
* Return 0 if ok, else errno.
@@ -119,12 +109,11 @@ xfs_bulkstat_one_int(
switch (dic->di_format) {
case XFS_DINODE_FMT_DEV:
- buf->bs_rdev = ip->i_df.if_u2.if_rdev;
+ buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
buf->bs_blksize = BLKDEV_IOSIZE;
buf->bs_blocks = 0;
break;
case XFS_DINODE_FMT_LOCAL:
- case XFS_DINODE_FMT_UUID:
buf->bs_rdev = 0;
buf->bs_blksize = mp->m_sb.sb_blocksize;
buf->bs_blocks = 0;
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 17e86e0541af..6ea8b3912fa4 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -96,6 +96,4 @@ xfs_inumbers(
void __user *buffer, /* buffer with inode info */
inumbers_fmt_pf formatter);
-int xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
-
#endif /* __XFS_ITABLE_H__ */
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index dcd1292664b3..6282bfc1afa9 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -142,6 +142,13 @@ typedef __u32 xfs_nlink_t;
#define SYNCHRONIZE() barrier()
#define __return_address __builtin_return_address(0)
+/*
+ * Return the address of a label. Use barrier() so that the optimizer
+ * won't reorder code to refactor the error jumpouts into a single
+ * return, which throws off the reported address.
+ */
+#define __this_address ({ __label__ __here; __here: barrier(); &&__here; })
+
#define XFS_PROJID_DEFAULT 0
#define MIN(a,b) (min(a,b))
@@ -243,10 +250,6 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
#define ASSERT(expr) \
(likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-#ifndef STATIC
-# define STATIC noinline
-#endif
-
#else /* !DEBUG */
#ifdef XFS_WARN
@@ -254,21 +257,15 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
#define ASSERT(expr) \
(likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
-#ifndef STATIC
-# define STATIC static noinline
-#endif
-
#else /* !DEBUG && !XFS_WARN */
#define ASSERT(expr) ((void)0)
-#ifndef STATIC
-# define STATIC static noinline
-#endif
-
#endif /* XFS_WARN */
#endif /* DEBUG */
+#define STATIC static noinline
+
#ifdef CONFIG_XFS_RT
/*
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index dc95a49d62e7..38d4227895ae 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -22,6 +22,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
@@ -608,6 +609,7 @@ xfs_log_mount(
xfs_daddr_t blk_offset,
int num_bblks)
{
+ bool fatal = xfs_sb_version_hascrc(&mp->m_sb);
int error = 0;
int min_logfsbs;
@@ -659,9 +661,20 @@ xfs_log_mount(
XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
XFS_MAX_LOG_BYTES);
error = -EINVAL;
+ } else if (mp->m_sb.sb_logsunit > 1 &&
+ mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
+ xfs_warn(mp,
+ "log stripe unit %u bytes must be a multiple of block size",
+ mp->m_sb.sb_logsunit);
+ error = -EINVAL;
+ fatal = true;
}
if (error) {
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ /*
+ * Log check errors are always fatal on v5; or whenever bad
+ * metadata leads to a crash.
+ */
+ if (fatal) {
xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
ASSERT(0);
goto out_free_log;
@@ -744,6 +757,7 @@ xfs_log_mount_finish(
{
int error = 0;
bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
+ bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED;
if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
@@ -780,6 +794,21 @@ xfs_log_mount_finish(
mp->m_super->s_flags &= ~MS_ACTIVE;
evict_inodes(mp->m_super);
+ /*
+ * Drain the buffer LRU after log recovery. This is required for v4
+ * filesystems to avoid leaving around buffers with NULL verifier ops,
+ * but we do it unconditionally to make sure we're always in a clean
+ * cache state after mount.
+ *
+ * Don't push in the error case because the AIL may have pending intents
+ * that aren't removed until recovery is cancelled.
+ */
+ if (!error && recovered) {
+ xfs_log_force(mp, XFS_LOG_SYNC);
+ xfs_ail_push_all_sync(mp->m_ail);
+ }
+ xfs_wait_buftarg(mp->m_ddev_targp);
+
if (readonly)
mp->m_flags |= XFS_MOUNT_RDONLY;
@@ -3734,7 +3763,7 @@ xlog_ticket_alloc(
* one of the iclogs. This uses backup pointers stored in a different
* part of the log in case we trash the log structure.
*/
-void
+STATIC void
xlog_verify_dest_ptr(
struct xlog *log,
void *ptr)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 51bf7b827387..129975970d99 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -592,9 +592,9 @@ xlog_valid_lsn(
* a transiently forward state. Instead, we can see the LSN in a
* transiently behind state if we happen to race with a cycle wrap.
*/
- cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
+ cur_cycle = READ_ONCE(log->l_curr_cycle);
smp_rmb();
- cur_block = ACCESS_ONCE(log->l_curr_block);
+ cur_block = READ_ONCE(log->l_curr_block);
if ((CYCLE_LSN(lsn) > cur_cycle) ||
(CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index ee34899396b2..87b1c331f9eb 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -85,17 +85,21 @@ struct xfs_buf_cancel {
*/
/*
- * Verify the given count of basic blocks is valid number of blocks
- * to specify for an operation involving the given XFS log buffer.
- * Returns nonzero if the count is valid, 0 otherwise.
+ * Verify the log-relative block number and length in basic blocks are valid for
+ * an operation involving the given XFS log buffer. Returns true if the fields
+ * are valid, false otherwise.
*/
-
-static inline int
-xlog_buf_bbcount_valid(
+static inline bool
+xlog_verify_bp(
struct xlog *log,
+ xfs_daddr_t blk_no,
int bbcount)
{
- return bbcount > 0 && bbcount <= log->l_logBBsize;
+ if (blk_no < 0 || blk_no >= log->l_logBBsize)
+ return false;
+ if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
+ return false;
+ return true;
}
/*
@@ -110,7 +114,11 @@ xlog_get_bp(
{
struct xfs_buf *bp;
- if (!xlog_buf_bbcount_valid(log, nbblks)) {
+ /*
+ * Pass log block 0 since we don't have an addr yet, buffer will be
+ * verified on read.
+ */
+ if (!xlog_verify_bp(log, 0, nbblks)) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
@@ -180,9 +188,10 @@ xlog_bread_noalign(
{
int error;
- if (!xlog_buf_bbcount_valid(log, nbblks)) {
- xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
- nbblks);
+ if (!xlog_verify_bp(log, blk_no, nbblks)) {
+ xfs_warn(log->l_mp,
+ "Invalid log block/length (0x%llx, 0x%x) for buffer",
+ blk_no, nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return -EFSCORRUPTED;
}
@@ -265,9 +274,10 @@ xlog_bwrite(
{
int error;
- if (!xlog_buf_bbcount_valid(log, nbblks)) {
- xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
- nbblks);
+ if (!xlog_verify_bp(log, blk_no, nbblks)) {
+ xfs_warn(log->l_mp,
+ "Invalid log block/length (0x%llx, 0x%x) for buffer",
+ blk_no, nbblks);
XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return -EFSCORRUPTED;
}
@@ -753,7 +763,7 @@ xlog_find_head(
* in the in-core log. The following number can be made tighter if
* we actually look at the block size of the filesystem.
*/
- num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
+ num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
if (head_blk >= num_scan_bblks) {
/*
* We are guaranteed that the entire check can be performed
@@ -2975,7 +2985,7 @@ xlog_recover_inode_pass2(
struct xlog_recover_item *item,
xfs_lsn_t current_lsn)
{
- xfs_inode_log_format_t *in_f;
+ struct xfs_inode_log_format *in_f;
xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
xfs_dinode_t *dip;
@@ -2989,10 +2999,10 @@ xlog_recover_inode_pass2(
uint isize;
int need_free = 0;
- if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
+ if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
in_f = item->ri_buf[0].i_addr;
} else {
- in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
+ in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), KM_SLEEP);
need_free = 1;
error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
if (error)
@@ -3163,16 +3173,8 @@ xlog_recover_inode_pass2(
}
fields = in_f->ilf_fields;
- switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
- case XFS_ILOG_DEV:
+ if (fields & XFS_ILOG_DEV)
xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
- break;
- case XFS_ILOG_UUID:
- memcpy(XFS_DFORK_DPTR(dip),
- &in_f->ilf_u.ilfu_uuid,
- sizeof(uuid_t));
- break;
- }
if (in_f->ilf_size == 2)
goto out_owner_change;
@@ -4297,7 +4299,7 @@ xlog_recover_add_to_trans(
char *dp,
int len)
{
- xfs_inode_log_format_t *in_f; /* any will do */
+ struct xfs_inode_log_format *in_f; /* any will do */
xlog_recover_item_t *item;
char *ptr;
@@ -4331,7 +4333,7 @@ xlog_recover_add_to_trans(
ptr = kmem_alloc(len, KM_SLEEP);
memcpy(ptr, dp, len);
- in_f = (xfs_inode_log_format_t *)ptr;
+ in_f = (struct xfs_inode_log_format *)ptr;
/* take the tail entry */
item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
@@ -5823,7 +5825,7 @@ xlog_recover_cancel(
* Read all of the agf and agi counters and check that they
* are consistent with the superblock counters.
*/
-void
+STATIC void
xlog_recover_check_summary(
struct xlog *log)
{
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index e9727d0a541a..c879b517cc94 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1022,10 +1022,21 @@ xfs_mountfs(
xfs_rtunmount_inodes(mp);
out_rele_rip:
IRELE(rip);
- cancel_delayed_work_sync(&mp->m_reclaim_work);
- xfs_reclaim_inodes(mp, SYNC_WAIT);
/* Clean out dquots that might be in memory after quotacheck. */
xfs_qm_unmount(mp);
+ /*
+ * Cancel all delayed reclaim work and reclaim the inodes directly.
+ * We have to do this /after/ rtunmount and qm_unmount because those
+ * two will have scheduled delayed reclaim for the rt/quota inodes.
+ *
+ * This is slightly different from the unmountfs call sequence
+ * because we could be tearing down a partially set up mount. In
+ * particular, if log_mount_finish fails we bail out without calling
+ * qm_unmount_quotas and therefore rely on qm_unmount to release the
+ * quota inodes.
+ */
+ cancel_delayed_work_sync(&mp->m_reclaim_work);
+ xfs_reclaim_inodes(mp, SYNC_WAIT);
out_log_dealloc:
mp->m_flags |= XFS_MOUNT_UNMOUNTING;
xfs_log_mount_cancel(mp);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 37e603bf1591..cc041a29eb70 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -273,7 +273,7 @@ xfs_reflink_reserve_cow(
struct xfs_bmbt_irec got;
int error = 0;
bool eof = false, trimmed;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
/*
* Search the COW fork extent list first. This serves two purposes:
@@ -284,7 +284,7 @@ xfs_reflink_reserve_cow(
* tree.
*/
- if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &idx, &got))
+ if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &icur, &got))
eof = true;
if (!eof && got.br_startoff <= imap->br_startoff) {
trace_xfs_reflink_cow_found(ip, imap);
@@ -312,7 +312,7 @@ xfs_reflink_reserve_cow(
return error;
error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
- imap->br_blockcount, 0, &got, &idx, eof);
+ imap->br_blockcount, 0, &got, &icur, eof);
if (error == -ENOSPC || error == -EDQUOT)
trace_xfs_reflink_cow_enospc(ip, imap);
if (error)
@@ -353,29 +353,22 @@ xfs_reflink_convert_cow(
xfs_off_t offset,
xfs_off_t count)
{
- struct xfs_bmbt_irec got;
- struct xfs_defer_ops dfops;
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
- xfs_extnum_t idx;
- bool found;
- int error = 0;
+ xfs_filblks_t count_fsb = end_fsb - offset_fsb;
+ struct xfs_bmbt_irec imap;
+ struct xfs_defer_ops dfops;
+ xfs_fsblock_t first_block = NULLFSBLOCK;
+ int nimaps = 1, error = 0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
+ ASSERT(count != 0);
- /* Convert all the extents to real from unwritten. */
- for (found = xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
- found && got.br_startoff < end_fsb;
- found = xfs_iext_get_extent(ifp, ++idx, &got)) {
- error = xfs_reflink_convert_cow_extent(ip, &got, offset_fsb,
- end_fsb - offset_fsb, &dfops);
- if (error)
- break;
- }
-
- /* Finish up. */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmapi_write(NULL, ip, offset_fsb, count_fsb,
+ XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT |
+ XFS_BMAPI_CONVERT_ONLY, &first_block, 0, &imap, &nimaps,
+ &dfops);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
@@ -399,7 +392,7 @@ xfs_reflink_allocate_cow(
bool trimmed;
xfs_filblks_t resaligned;
xfs_extlen_t resblks = 0;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
retry:
ASSERT(xfs_is_reflink_inode(ip));
@@ -409,7 +402,7 @@ retry:
* Even if the extent is not shared we might have a preallocation for
* it in the COW fork. If so use it.
*/
- if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &idx, &got) &&
+ if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
got.br_startoff <= offset_fsb) {
*shared = true;
@@ -496,13 +489,13 @@ xfs_reflink_find_cow_mapping(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
xfs_fileoff_t offset_fsb;
struct xfs_bmbt_irec got;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
ASSERT(xfs_is_reflink_inode(ip));
offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
- if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
+ if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
return false;
if (got.br_startoff > offset_fsb)
return false;
@@ -524,18 +517,18 @@ xfs_reflink_trim_irec_to_next_cow(
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
if (!xfs_is_reflink_inode(ip))
return;
/* Find the extent in the CoW fork. */
- if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
+ if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
return;
/* This is the extent before; try sliding up one. */
if (got.br_startoff < offset_fsb) {
- if (!xfs_iext_get_extent(ifp, idx + 1, &got))
+ if (!xfs_iext_next_extent(ifp, &icur, &got))
return;
}
@@ -562,24 +555,32 @@ xfs_reflink_cancel_cow_blocks(
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
struct xfs_bmbt_irec got, del;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
xfs_fsblock_t firstfsb;
struct xfs_defer_ops dfops;
int error = 0;
if (!xfs_is_reflink_inode(ip))
return 0;
- if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
+ if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
return 0;
- while (got.br_startoff < end_fsb) {
+ /* Walk backwards until we're out of the I/O range... */
+ while (got.br_startoff + got.br_blockcount > offset_fsb) {
del = got;
xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
+
+ /* Extent delete may have bumped ext forward */
+ if (!del.br_blockcount) {
+ xfs_iext_prev(ifp, &icur);
+ goto next_extent;
+ }
+
trace_xfs_reflink_cancel_cow(ip, &del);
if (isnullstartblock(del.br_startblock)) {
error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
- &idx, &got, &del);
+ &icur, &got, &del);
if (error)
break;
} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
@@ -610,10 +611,10 @@ xfs_reflink_cancel_cow_blocks(
}
/* Remove the mapping from the CoW fork. */
- xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
+ xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
}
-
- if (!xfs_iext_get_extent(ifp, ++idx, &got))
+next_extent:
+ if (!xfs_iext_get_extent(ifp, &icur, &got))
break;
}
@@ -698,7 +699,7 @@ xfs_reflink_end_cow(
int error;
unsigned int resblks;
xfs_filblks_t rlen;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
trace_xfs_reflink_end_cow(ip, offset, count);
@@ -733,27 +734,22 @@ xfs_reflink_end_cow(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- /* If there is a hole at end_fsb - 1 go to the previous extent */
- if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
- got.br_startoff > end_fsb) {
- /*
- * In case of racing, overlapping AIO writes no COW extents
- * might be left by the time I/O completes for the loser of
- * the race. In that case we are done.
- */
- if (idx <= 0)
- goto out_cancel;
- xfs_iext_get_extent(ifp, --idx, &got);
- }
+ /*
+ * In case of racing, overlapping AIO writes no COW extents might be
+ * left by the time I/O completes for the loser of the race. In that
+ * case we are done.
+ */
+ if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+ goto out_cancel;
/* Walk backwards until we're out of the I/O range... */
while (got.br_startoff + got.br_blockcount > offset_fsb) {
del = got;
xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
- /* Extent delete may have bumped idx forward */
+ /* Extent delete may have bumped ext forward */
if (!del.br_blockcount) {
- idx--;
+ xfs_iext_prev(ifp, &icur);
goto next_extent;
}
@@ -765,7 +761,7 @@ xfs_reflink_end_cow(
* allocated but have not yet been involved in a write.
*/
if (got.br_state == XFS_EXT_UNWRITTEN) {
- idx--;
+ xfs_iext_prev(ifp, &icur);
goto next_extent;
}
@@ -796,14 +792,14 @@ xfs_reflink_end_cow(
goto out_defer;
/* Remove the mapping from the CoW fork. */
- xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
+ xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
xfs_defer_ijoin(&dfops, ip);
error = xfs_defer_finish(&tp, &dfops);
if (error)
goto out_defer;
next_extent:
- if (!xfs_iext_get_extent(ifp, idx, &got))
+ if (!xfs_iext_get_extent(ifp, &icur, &got))
break;
}
@@ -1433,7 +1429,7 @@ xfs_reflink_inode_has_shared_extents(
xfs_extlen_t aglen;
xfs_agblock_t rbno;
xfs_extlen_t rlen;
- xfs_extnum_t idx;
+ struct xfs_iext_cursor icur;
bool found;
int error;
@@ -1445,7 +1441,7 @@ xfs_reflink_inode_has_shared_extents(
}
*has_shared = false;
- found = xfs_iext_lookup_extent(ip, ifp, 0, &idx, &got);
+ found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
while (found) {
if (isnullstartblock(got.br_startblock) ||
got.br_state != XFS_EXT_NORM)
@@ -1464,7 +1460,7 @@ xfs_reflink_inode_has_shared_extents(
return 0;
}
next:
- found = xfs_iext_get_extent(ifp, ++idx, &got);
+ found = xfs_iext_next_extent(ifp, &icur, &got);
}
return 0;
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 79defa722bf1..3f30f846d7f2 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -138,6 +138,7 @@ int xfs_rtalloc_query_range(struct xfs_trans *tp,
int xfs_rtalloc_query_all(struct xfs_trans *tp,
xfs_rtalloc_query_range_fn fn,
void *priv);
+bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
#else
# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS)
# define xfs_rtfree_extent(t,b,l) (ENOSYS)
@@ -146,6 +147,7 @@ int xfs_rtalloc_query_all(struct xfs_trans *tp,
# define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS)
# define xfs_rtalloc_query_all(t,f,p) (ENOSYS)
# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS)
+# define xfs_verify_rtbno(m, r) (false)
static inline int /* error */
xfs_rtmount_init(
xfs_mount_t *mp) /* file system mount structure */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index bb5514688d47..515ba042d75c 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -218,53 +218,15 @@ TRACE_EVENT(xfs_attr_list_node_descend,
__entry->bt_before)
);
-TRACE_EVENT(xfs_iext_insert,
- TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
- struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
- TP_ARGS(ip, idx, r, state, caller_ip),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(xfs_extnum_t, idx)
- __field(xfs_fileoff_t, startoff)
- __field(xfs_fsblock_t, startblock)
- __field(xfs_filblks_t, blockcount)
- __field(xfs_exntst_t, state)
- __field(int, bmap_state)
- __field(unsigned long, caller_ip)
- ),
- TP_fast_assign(
- __entry->dev = VFS_I(ip)->i_sb->s_dev;
- __entry->ino = ip->i_ino;
- __entry->idx = idx;
- __entry->startoff = r->br_startoff;
- __entry->startblock = r->br_startblock;
- __entry->blockcount = r->br_blockcount;
- __entry->state = r->br_state;
- __entry->bmap_state = state;
- __entry->caller_ip = caller_ip;
- ),
- TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
- "offset %lld block %lld count %lld flag %d caller %ps",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
- (long)__entry->idx,
- __entry->startoff,
- (int64_t)__entry->startblock,
- __entry->blockcount,
- __entry->state,
- (char *)__entry->caller_ip)
-);
-
DECLARE_EVENT_CLASS(xfs_bmap_class,
- TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
+ TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state,
unsigned long caller_ip),
- TP_ARGS(ip, idx, state, caller_ip),
+ TP_ARGS(ip, cur, state, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
- __field(xfs_extnum_t, idx)
+ __field(void *, leaf);
+ __field(int, pos);
__field(xfs_fileoff_t, startoff)
__field(xfs_fsblock_t, startblock)
__field(xfs_filblks_t, blockcount)
@@ -277,10 +239,11 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
struct xfs_bmbt_irec r;
ifp = xfs_iext_state_to_fork(ip, state);
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
+ xfs_iext_get_extent(ifp, cur, &r);
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
- __entry->idx = idx;
+ __entry->leaf = cur->leaf;
+ __entry->pos = cur->pos;
__entry->startoff = r.br_startoff;
__entry->startblock = r.br_startblock;
__entry->blockcount = r.br_blockcount;
@@ -288,12 +251,13 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
__entry->bmap_state = state;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
+ TP_printk("dev %d:%d ino 0x%llx state %s cur 0x%p/%d "
"offset %lld block %lld count %lld flag %d caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
- (long)__entry->idx,
+ __entry->leaf,
+ __entry->pos,
__entry->startoff,
(int64_t)__entry->startblock,
__entry->blockcount,
@@ -303,13 +267,15 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
#define DEFINE_BMAP_EVENT(name) \
DEFINE_EVENT(xfs_bmap_class, name, \
- TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
+ TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, \
unsigned long caller_ip), \
- TP_ARGS(ip, idx, state, caller_ip))
+ TP_ARGS(ip, cur, state, caller_ip))
+DEFINE_BMAP_EVENT(xfs_iext_insert);
DEFINE_BMAP_EVENT(xfs_iext_remove);
DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
DEFINE_BMAP_EVENT(xfs_bmap_post_update);
-DEFINE_BMAP_EVENT(xfs_extlist);
+DEFINE_BMAP_EVENT(xfs_read_extent);
+DEFINE_BMAP_EVENT(xfs_write_extent);
DECLARE_EVENT_CLASS(xfs_buf_class,
TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 354368a906e5..cef89f7127d3 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -25,6 +25,7 @@
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_trace.h"
+#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_log.h"
@@ -514,11 +515,26 @@ xfsaild(
current->flags |= PF_MEMALLOC;
set_freezable();
- while (!kthread_should_stop()) {
+ while (1) {
if (tout && tout <= 20)
- __set_current_state(TASK_KILLABLE);
+ set_current_state(TASK_KILLABLE);
else
- __set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /*
+ * Check kthread_should_stop() after we set the task state
+ * to guarantee that we either see the stop bit and exit or
+ * the task state is reset to runnable such that it's not
+ * scheduled out indefinitely and detects the stop bit at
+ * next iteration.
+ *
+ * A memory barrier is included in above task state set to
+ * serialize again kthread_stop().
+ */
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
spin_lock(&ailp->xa_lock);
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index ad54610ea6cd..17d61b1f2511 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -126,8 +126,12 @@ struct acpi_exception_info {
#define AE_NOT_CONFIGURED EXCEP_ENV (0x001C)
#define AE_ACCESS EXCEP_ENV (0x001D)
#define AE_IO_ERROR EXCEP_ENV (0x001E)
+#define AE_NUMERIC_OVERFLOW EXCEP_ENV (0x001F)
+#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020)
+#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021)
+#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022)
-#define AE_CODE_ENV_MAX 0x001E
+#define AE_CODE_ENV_MAX 0x0022
/*
* Programmer exceptions
@@ -263,7 +267,15 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
EXCEP_TXT("AE_NOT_CONFIGURED",
"The interface is not part of the current subsystem configuration"),
EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation"),
- EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred")
+ EXCEP_TXT("AE_IO_ERROR", "An I/O error occurred"),
+ EXCEP_TXT("AE_NUMERIC_OVERFLOW",
+ "Overflow during string-to-integer conversion"),
+ EXCEP_TXT("AE_HEX_OVERFLOW",
+ "Overflow during ASCII hex-to-binary conversion"),
+ EXCEP_TXT("AE_DECIMAL_OVERFLOW",
+ "Overflow during ASCII decimal-to-binary conversion"),
+ EXCEP_TXT("AE_OCTAL_OVERFLOW",
+ "Overflow during ASCII octal-to-binary conversion")
};
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index fa1505292f6c..f849be28e082 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -211,7 +211,7 @@ struct acpi_device_flags {
u32 of_compatible_ok:1;
u32 coherent_dma:1;
u32 cca_seen:1;
- u32 spi_i2c_slave:1;
+ u32 serial_bus_slave:1;
u32 reserved:19;
};
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index c66eb8ffa454..d5c0f5153c4e 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -287,6 +287,8 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
/*
* Platform and hardware-independent physical memory interfaces
*/
+int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width);
+
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory
acpi_status
acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 53c5e2f7bcec..e1dd1a8d42b6 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170728
+#define ACPI_CA_VERSION 0x20170831
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 6b8714a428b6..7a89e6de94da 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -69,6 +69,7 @@
#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
+#define ACPI_SIG_PDTT "PDTT" /* Processor Debug Trigger Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
@@ -1282,6 +1283,35 @@ struct acpi_nfit_flush_address {
/*******************************************************************************
*
+ * PDTT - Processor Debug Trigger Table (ACPI 6.2)
+ * Version 0
+ *
+ ******************************************************************************/
+
+struct acpi_table_pdtt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 trigger_count;
+ u8 reserved[3];
+ u32 array_offset;
+};
+
+/*
+ * PDTT Communication Channel Identifier Structure.
+ * The number of these structures is defined by trigger_count above,
+ * starting at array_offset.
+ */
+struct acpi_pdtt_channel {
+ u16 sub_channel_id;
+};
+
+/* Mask and Flags for above */
+
+#define ACPI_PDTT_SUBCHANNEL_ID_MASK 0x00FF
+#define ACPI_PDTT_RUNTIME_TRIGGER (1<<8)
+#define ACPI_PPTT_WAIT_COMPLETION (1<<9)
+
+/*******************************************************************************
+ *
* PPTT - Processor Properties Topology Table (ACPI 6.2)
* Version 1
*
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 1797e81a3204..680f80960c3d 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -51,7 +51,6 @@ int erst_clear(u64 record_id);
int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data);
void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err);
-void arch_apei_flush_tlb_one(unsigned long addr);
#endif
#endif
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 8caa79c61703..cd6ef45e614e 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -13,6 +13,7 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
+#define MAX_PCC_SUBSPACES 256
#ifdef CONFIG_PCC
extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
int subspace_id);
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 49be4bba1e96..34a028a7bcc5 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -244,4 +244,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
#define atomic_long_inc_not_zero(l) \
ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+#define atomic_long_cond_read_acquire(v, c) \
+ ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
index 8fb83b43006a..da09fb986459 100644
--- a/include/asm-generic/audit_dir_write.h
+++ b/include/asm-generic/audit_dir_write.h
@@ -31,3 +31,6 @@ __NR_renameat,
__NR_linkat,
__NR_symlinkat,
#endif
+#ifdef __NR_renameat2
+__NR_renameat2,
+#endif
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
index dfc52b3c103f..f9f1d0ae11d9 100644
--- a/include/asm-generic/audit_write.h
+++ b/include/asm-generic/audit_write.h
@@ -20,3 +20,6 @@ __NR_ftruncate64,
#ifdef __NR_bind
__NR_bind, /* bind can affect fs object only in one way... */
#endif
+#ifdef __NR_fallocate
+__NR_fallocate,
+#endif
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index d2013064dc69..dc9726fdac8f 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -26,6 +26,20 @@
#if BITS_PER_LONG == 64
+/**
+ * do_div - returns 2 values: calculate remainder and update new dividend
+ * @n: pointer to uint64_t dividend (will be updated)
+ * @base: uint32_t divisor
+ *
+ * Summary:
+ * ``uint32_t remainder = *n % base;``
+ * ``*n = *n / base;``
+ *
+ * Return: (uint32_t)remainder
+ *
+ * NOTE: macro parameter @n is evaluated multiple times,
+ * beware of side effects!
+ */
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 7d026bf27713..0f7062bd55e5 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -26,51 +26,20 @@
/*
* Writer states & reader shift and bias.
- *
- * | +0 | +1 | +2 | +3 |
- * ----+----+----+----+----+
- * LE | 78 | 56 | 34 | 12 | 0x12345678
- * ----+----+----+----+----+
- * | wr | rd |
- * +----+----+----+----+
- *
- * ----+----+----+----+----+
- * BE | 12 | 34 | 56 | 78 | 0x12345678
- * ----+----+----+----+----+
- * | rd | wr |
- * +----+----+----+----+
*/
-#define _QW_WAITING 1 /* A writer is waiting */
-#define _QW_LOCKED 0xff /* A writer holds the lock */
-#define _QW_WMASK 0xff /* Writer mask */
-#define _QR_SHIFT 8 /* Reader count shift */
+#define _QW_WAITING 0x100 /* A writer is waiting */
+#define _QW_LOCKED 0x0ff /* A writer holds the lock */
+#define _QW_WMASK 0x1ff /* Writer mask */
+#define _QR_SHIFT 9 /* Reader count shift */
#define _QR_BIAS (1U << _QR_SHIFT)
/*
* External function declarations
*/
-extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_read_lock_slowpath(struct qrwlock *lock);
extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queued_read_can_lock- would read_trylock() succeed?
- * @lock: Pointer to queue rwlock structure
- */
-static inline int queued_read_can_lock(struct qrwlock *lock)
-{
- return !(atomic_read(&lock->cnts) & _QW_WMASK);
-}
-
-/**
- * queued_write_can_lock- would write_trylock() succeed?
- * @lock: Pointer to queue rwlock structure
- */
-static inline int queued_write_can_lock(struct qrwlock *lock)
-{
- return !atomic_read(&lock->cnts);
-}
-
-/**
* queued_read_trylock - try to acquire read lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: 1 if lock acquired, 0 if failed
@@ -118,7 +87,7 @@ static inline void queued_read_lock(struct qrwlock *lock)
return;
/* The slowpath will decrement the reader count, if necessary. */
- queued_read_lock_slowpath(lock, cnts);
+ queued_read_lock_slowpath(lock);
}
/**
@@ -147,30 +116,18 @@ static inline void queued_read_unlock(struct qrwlock *lock)
}
/**
- * __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
- * @lock : Pointer to queue rwlock structure
- * Return: the write byte address of a queue rwlock
- */
-static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
-{
- return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
-}
-
-/**
* queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
- smp_store_release(__qrwlock_write_byte(lock), 0);
+ smp_store_release(&lock->wlocked, 0);
}
/*
* Remapping rwlock architecture specific functions to the corresponding
* queue rwlock functions.
*/
-#define arch_read_can_lock(l) queued_read_can_lock(l)
-#define arch_write_can_lock(l) queued_write_can_lock(l)
#define arch_read_lock(l) queued_read_lock(l)
#define arch_write_lock(l) queued_write_lock(l)
#define arch_read_trylock(l) queued_read_trylock(l)
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index d93573eff162..137ecdd16daa 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -10,12 +10,23 @@
*/
typedef struct qrwlock {
- atomic_t cnts;
+ union {
+ atomic_t cnts;
+ struct {
+#ifdef __LITTLE_ENDIAN
+ u8 wlocked; /* Locked for write? */
+ u8 __lstate[3];
+#else
+ u8 __lstate[3];
+ u8 wlocked; /* Locked for write? */
+#endif
+ };
+ };
arch_spinlock_t wait_lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \
- .cnts = ATOMIC_INIT(0), \
+ { .cnts = ATOMIC_INIT(0), }, \
.wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
}
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 66260777d644..b37b4ad7eb94 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -121,6 +121,5 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
#define arch_spin_lock(l) queued_spin_lock(l)
#define arch_spin_trylock(l) queued_spin_trylock(l)
#define arch_spin_unlock(l) queued_spin_unlock(l)
-#define arch_spin_lock_flags(l, f) queued_spin_lock(l)
#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index bdbe43bac230..93e67a055a4d 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -38,6 +38,16 @@ static inline void __down_read(struct rw_semaphore *sem)
rwsem_down_read_failed(sem);
}
+static inline int __down_read_killable(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
+ if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ return -EINTR;
+ }
+
+ return 0;
+}
+
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
long tmp;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8acfc1e099e1..bdcd1caae092 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -459,6 +459,7 @@
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
+ *(.text..refcount) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
@@ -687,7 +688,7 @@
#define BUG_TABLE
#endif
-#ifdef CONFIG_ORC_UNWINDER
+#ifdef CONFIG_UNWINDER_ORC
#define ORC_UNWIND_TABLE \
. = ALIGN(4); \
.orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
@@ -778,6 +779,24 @@
#endif
/*
+ * Memory encryption operates on a page basis. Since we need to clear
+ * the memory encryption mask for this section, it needs to be aligned
+ * on a page boundary and be a page-size multiple in length.
+ *
+ * Note: We use a separate section so that only this section gets
+ * decrypted to avoid exposing more than we wish.
+ */
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+#define PERCPU_DECRYPTED_SECTION \
+ . = ALIGN(PAGE_SIZE); \
+ *(.data..percpu..decrypted) \
+ . = ALIGN(PAGE_SIZE);
+#else
+#define PERCPU_DECRYPTED_SECTION
+#endif
+
+
+/*
* Default discarded sections.
*
* Some archs want to discard exit text/data at runtime rather than
@@ -815,6 +834,7 @@
. = ALIGN(cacheline); \
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
+ PERCPU_DECRYPTED_SECTION \
VMLINUX_SYMBOL(__per_cpu_end) = .;
/**
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index cc805b72994a..349e5957c949 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -67,7 +67,9 @@ enum arch_timer_spi_nr {
#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
-#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */
+#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100
+#define ARCH_TIMER_EVT_STREAM_FREQ \
+ (USEC_PER_SEC / ARCH_TIMER_EVT_STREAM_PERIOD_US)
struct arch_timer_kvm_info {
struct timecounter timecounter;
@@ -93,6 +95,7 @@ struct arch_timer_mem {
extern u32 arch_timer_get_rate(void);
extern u64 (*arch_timer_read_counter)(void);
extern struct arch_timer_kvm_info *arch_timer_get_kvm_info(void);
+extern bool arch_timer_evtstrm_available(void);
#else
@@ -106,6 +109,11 @@ static inline u64 arch_timer_read_counter(void)
return 0;
}
+static inline bool arch_timer_evtstrm_available(void)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index f638998fb6d0..71e1bb24d79f 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -53,7 +53,7 @@ struct dh {
*
* Return: size of the key in bytes
*/
-int crypto_dh_key_len(const struct dh *params);
+unsigned int crypto_dh_key_len(const struct dh *params);
/**
* crypto_dh_encode_key() - encode the private key
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 22f884c97387..8f941102af36 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -126,8 +126,7 @@ struct drbg_state {
__u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
- struct completion ctr_completion; /* CTR mode async handler */
- int ctr_async_err; /* CTR mode async error */
+ struct crypto_wait ctr_wait; /* CTR mode async wait obj */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index 1aff2a8a3a68..d696317c43a8 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -54,7 +54,7 @@ struct ecdh {
*
* Return: size of the key in bytes
*/
-int crypto_ecdh_key_len(const struct ecdh *params);
+unsigned int crypto_ecdh_key_len(const struct ecdh *params);
/**
* crypto_ecdh_encode_key() - encode the private key
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
new file mode 100644
index 000000000000..c50e057ea17e
--- /dev/null
+++ b/include/crypto/gcm.h
@@ -0,0 +1,8 @@
+#ifndef _CRYPTO_GCM_H
+#define _CRYPTO_GCM_H
+
+#define GCM_AES_IV_SIZE 12
+#define GCM_RFC4106_IV_SIZE 8
+#define GCM_RFC4543_IV_SIZE 8
+
+#endif
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 0977fb18ff68..fa0a63d298dc 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -227,7 +227,7 @@ struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
-
+void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
kzfree(t);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index b5727bcd2336..0ed31fd80242 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -75,6 +75,7 @@ struct ahash_request {
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
+ * Note: mandatory.
* @update: Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
@@ -84,16 +85,20 @@ struct ahash_request {
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point.
+ * Note: mandatory.
* @final: Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
- * point.
+ * point unless hardware requires it to finish the transformation
+ * (then the data buffered by the device driver is processed).
+ * Note: mandatory.
* @finup: Combination of @update and @final. This function is effectively a
* combination of @update and @final calls issued in sequence. As some
* hardware cannot do @update and @final separately, this callback was
* added to allow such hardware to be used at least by IPsec. Data
* processing can happen synchronously [SHASH] or asynchronously [AHASH]
* at this point.
+ * Note: optional.
* @digest: Combination of @init and @update and @final. This function
* effectively behaves as the entire chain of operations, @init,
* @update and @final issued in sequence. Just like @finup, this was
@@ -416,11 +421,10 @@ static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
* needed to perform the cipher operation
*
* This function is a "short-hand" for the function calls of
- * crypto_ahash_update and crypto_shash_final. The parameters have the same
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
* meaning as discussed for those separate functions.
*
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
+ * Return: see crypto_ahash_final()
*/
int crypto_ahash_finup(struct ahash_request *req);
@@ -433,8 +437,11 @@ int crypto_ahash_finup(struct ahash_request *req);
* based on all data added to the cipher handle. The message digest is placed
* into the output buffer registered with the ahash_request handle.
*
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
+ * Return:
+ * 0 if the message digest was successfully calculated;
+ * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later;
+ * -EBUSY if queue is full and request should be resubmitted later;
+ * other < 0 if an error occurred
*/
int crypto_ahash_final(struct ahash_request *req);
@@ -447,8 +454,7 @@ int crypto_ahash_final(struct ahash_request *req);
* crypto_ahash_update and crypto_ahash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
+ * Return: see crypto_ahash_final()
*/
int crypto_ahash_digest(struct ahash_request *req);
@@ -493,8 +499,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
* handle. Any potentially existing state created by previous operations is
* discarded.
*
- * Return: 0 if the message digest initialization was successful; < 0 if an
- * error occurred
+ * Return: see crypto_ahash_final()
*/
static inline int crypto_ahash_init(struct ahash_request *req)
{
@@ -510,8 +515,7 @@ static inline int crypto_ahash_init(struct ahash_request *req)
* is pointed to by the scatter/gather list registered in the &ahash_request
* handle
*
- * Return: 0 if the message digest update was successful; < 0 if an error
- * occurred
+ * Return: see crypto_ahash_final()
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 75ec9c662268..6abf0a3604dc 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -40,11 +40,6 @@ struct alg_sock {
void *private;
};
-struct af_alg_completion {
- struct completion completion;
- int err;
-};
-
struct af_alg_control {
struct af_alg_iv *iv;
int op;
@@ -152,7 +147,7 @@ struct af_alg_ctx {
void *iv;
size_t aead_assoclen;
- struct af_alg_completion completion;
+ struct crypto_wait wait;
size_t used;
size_t rcvused;
@@ -177,19 +172,11 @@ void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
-int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
-void af_alg_complete(struct crypto_async_request *req, int err);
-
static inline struct alg_sock *alg_sk(struct sock *sk)
{
return (struct alg_sock *)sk;
}
-static inline void af_alg_init_completion(struct af_alg_completion *completion)
-{
- init_completion(&completion->completion);
-}
-
/**
* Size of available buffer for sending data from user space to kernel.
*
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
new file mode 100644
index 000000000000..1438942dc773
--- /dev/null
+++ b/include/crypto/sm3.h
@@ -0,0 +1,40 @@
+/*
+ * Common values for SM3 algorithm
+ */
+
+#ifndef _CRYPTO_SM3_H
+#define _CRYPTO_SM3_H
+
+#include <linux/types.h>
+
+#define SM3_DIGEST_SIZE 32
+#define SM3_BLOCK_SIZE 64
+
+#define SM3_T1 0x79CC4519
+#define SM3_T2 0x7A879D8A
+
+#define SM3_IVA 0x7380166f
+#define SM3_IVB 0x4914b2b9
+#define SM3_IVC 0x172442d7
+#define SM3_IVD 0xda8a0600
+#define SM3_IVE 0xa96f30bc
+#define SM3_IVF 0x163138aa
+#define SM3_IVG 0xe38dee4d
+#define SM3_IVH 0xb0fb0e4e
+
+extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE];
+
+struct sm3_state {
+ u32 state[SM3_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buffer[SM3_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
new file mode 100644
index 000000000000..256948e39296
--- /dev/null
+++ b/include/crypto/sm3_base.h
@@ -0,0 +1,117 @@
+/*
+ * sm3_base.h - core logic for SM3 implementations
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Written by Gilad Ben-Yossef <gilad@benyossef.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sm3.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
+
+static inline int sm3_base_init(struct shash_desc *desc)
+{
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SM3_IVA;
+ sctx->state[1] = SM3_IVB;
+ sctx->state[2] = SM3_IVC;
+ sctx->state[3] = SM3_IVD;
+ sctx->state[4] = SM3_IVE;
+ sctx->state[5] = SM3_IVF;
+ sctx->state[6] = SM3_IVG;
+ sctx->state[7] = SM3_IVH;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sm3_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sm3_block_fn *block_fn)
+{
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SM3_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ blocks = len / SM3_BLOCK_SIZE;
+ len %= SM3_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SM3_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+
+ return 0;
+}
+
+static inline int sm3_base_do_finalize(struct shash_desc *desc,
+ sm3_block_fn *block_fn)
+{
+ const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buffer, 1);
+
+ return 0;
+}
+
+static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
+{
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sm3_state){};
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/include/drm/amd_asic_type.h
index 9df1fea8e971..599028f66585 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.h
+++ b/include/drm/amd_asic_type.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,28 +20,33 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#ifndef __AMD_ASIC_TYPE_H__
+#define __AMD_ASIC_TYPE_H__
/*
- * radeon_kfd.h defines the private interface between the
- * AMD kernel graphics drivers and the AMD KFD.
+ * Supported ASIC types
*/
+enum amd_asic_type {
+ CHIP_TAHITI = 0,
+ CHIP_PITCAIRN,
+ CHIP_VERDE,
+ CHIP_OLAND,
+ CHIP_HAINAN,
+ CHIP_BONAIRE,
+ CHIP_KAVERI,
+ CHIP_KABINI,
+ CHIP_HAWAII,
+ CHIP_MULLINS,
+ CHIP_TOPAZ,
+ CHIP_TONGA,
+ CHIP_FIJI,
+ CHIP_CARRIZO,
+ CHIP_STONEY,
+ CHIP_POLARIS10,
+ CHIP_POLARIS11,
+ CHIP_POLARIS12,
+ CHIP_VEGA10,
+ CHIP_RAVEN,
+ CHIP_LAST,
+};
-#ifndef RADEON_KFD_H_INCLUDED
-#define RADEON_KFD_H_INCLUDED
-
-#include <linux/types.h>
-#include "kgd_kfd_interface.h"
-
-struct radeon_device;
-
-int radeon_kfd_init(void);
-void radeon_kfd_fini(void);
-
-void radeon_kfd_suspend(struct radeon_device *rdev);
-int radeon_kfd_resume(struct radeon_device *rdev);
-void radeon_kfd_interrupt(struct radeon_device *rdev,
- const void *ih_ring_entry);
-void radeon_kfd_device_probe(struct radeon_device *rdev);
-void radeon_kfd_device_init(struct radeon_device *rdev);
-void radeon_kfd_device_fini(struct radeon_device *rdev);
-
-#endif /* RADEON_KFD_H_INCLUDED */
+#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index fbdfc8d7f3c7..96a5e0f6ff12 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -262,6 +262,10 @@ enum {
#define MHL_RAPK_UNSUPPORTED 0x02 /* Rcvd RAP action code not supported */
#define MHL_RAPK_BUSY 0x03 /* Responder too busy to respond */
+/* Bit masks for RCP messages */
+#define MHL_RCP_KEY_RELEASED_MASK 0x80
+#define MHL_RCP_KEY_ID_MASK 0x7F
+
/*
* Error status codes for RCPE messages
*/
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 7277783a4ff0..59be1232d005 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -136,6 +136,7 @@ struct pci_controller;
#define DRM_UT_ATOMIC 0x10
#define DRM_UT_VBL 0x20
#define DRM_UT_STATE 0x40
+#define DRM_UT_LEASE 0x80
/***********************************************************************/
/** \name DRM template customization defaults */
@@ -250,6 +251,9 @@ struct pci_controller;
#define DRM_DEBUG_VBL(fmt, ...) \
drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_LEASE(fmt, ...) \
+ drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 8a5808eb5628..5afd6e364fb6 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,7 +144,6 @@ struct __drm_planes_state {
struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state, *old_state, *new_state;
- struct drm_crtc_commit *commit;
s32 __user *out_fence_ptr;
unsigned last_vblank_count;
};
@@ -237,6 +236,18 @@ struct drm_atomic_state {
struct drm_modeset_acquire_ctx *acquire_ctx;
/**
+ * @fake_commit:
+ *
+ * Used for signaling unbound planes/connectors.
+ * When a connector or plane is not bound to any CRTC, it's still important
+ * to preserve linearity to prevent the atomic states from being freed to early.
+ *
+ * This commit (if set) is not bound to any crtc, but will be completed when
+ * drm_atomic_helper_commit_hw_done() is called.
+ */
+ struct drm_crtc_commit *fake_commit;
+
+ /**
* @commit_work:
*
* Work item which can be used by the driver or helpers to execute the
@@ -252,10 +263,14 @@ void __drm_crtc_commit_free(struct kref *kref);
* @commit: CRTC commit
*
* Increases the reference of @commit.
+ *
+ * Returns:
+ * The pointer to @commit, with reference increased.
*/
-static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
+static inline struct drm_crtc_commit *drm_crtc_commit_get(struct drm_crtc_commit *commit)
{
kref_get(&commit->ref);
+ return commit;
}
/**
@@ -555,31 +570,6 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
/**
- * for_each_connector_in_state - iterate over all connectors in an atomic update
- * @__state: &struct drm_atomic_state pointer
- * @connector: &struct drm_connector iteration cursor
- * @connector_state: &struct drm_connector_state iteration cursor
- * @__i: int iteration cursor, for macro-internal use
- *
- * This iterates over all connectors in an atomic update. Note that before the
- * software state is committed (by calling drm_atomic_helper_swap_state(), this
- * points to the new state, while afterwards it points to the old state. Due to
- * this tricky confusion this macro is deprecated.
- *
- * FIXME:
- *
- * Replace all usage of this with one of the explicit iterators below and then
- * remove this macro.
- */
-#define for_each_connector_in_state(__state, connector, connector_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_connector && \
- ((connector) = (__state)->connectors[__i].ptr, \
- (connector_state) = (__state)->connectors[__i].state, 1); \
- (__i)++) \
- for_each_if (connector)
-
-/**
* for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update
* @__state: &struct drm_atomic_state pointer
* @connector: &struct drm_connector iteration cursor
@@ -595,12 +585,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->num_connector && \
- ((connector) = (__state)->connectors[__i].ptr, \
- (old_connector_state) = (__state)->connectors[__i].old_state, \
- (new_connector_state) = (__state)->connectors[__i].new_state, 1); \
- (__i)++) \
- for_each_if (connector)
+ (__i) < (__state)->num_connector; \
+ (__i)++) \
+ for_each_if ((__state)->connectors[__i].ptr && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (old_connector_state) = (__state)->connectors[__i].old_state, \
+ (new_connector_state) = (__state)->connectors[__i].new_state, 1))
/**
* for_each_old_connector_in_state - iterate over all connectors in an atomic update
@@ -616,11 +606,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->num_connector && \
- ((connector) = (__state)->connectors[__i].ptr, \
- (old_connector_state) = (__state)->connectors[__i].old_state, 1); \
- (__i)++) \
- for_each_if (connector)
+ (__i) < (__state)->num_connector; \
+ (__i)++) \
+ for_each_if ((__state)->connectors[__i].ptr && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (old_connector_state) = (__state)->connectors[__i].old_state, 1))
/**
* for_each_new_connector_in_state - iterate over all connectors in an atomic update
@@ -636,36 +626,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->num_connector && \
- ((connector) = (__state)->connectors[__i].ptr, \
- (new_connector_state) = (__state)->connectors[__i].new_state, 1); \
- (__i)++) \
- for_each_if (connector)
-
-/**
- * for_each_crtc_in_state - iterate over all connectors in an atomic update
- * @__state: &struct drm_atomic_state pointer
- * @crtc: &struct drm_crtc iteration cursor
- * @crtc_state: &struct drm_crtc_state iteration cursor
- * @__i: int iteration cursor, for macro-internal use
- *
- * This iterates over all CRTCs in an atomic update. Note that before the
- * software state is committed (by calling drm_atomic_helper_swap_state(), this
- * points to the new state, while afterwards it points to the old state. Due to
- * this tricky confusion this macro is deprecated.
- *
- * FIXME:
- *
- * Replace all usage of this with one of the explicit iterators below and then
- * remove this macro.
- */
-#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_crtc && \
- ((crtc) = (__state)->crtcs[__i].ptr, \
- (crtc_state) = (__state)->crtcs[__i].state, 1); \
- (__i)++) \
- for_each_if (crtc_state)
+ (__i) < (__state)->num_connector; \
+ (__i)++) \
+ for_each_if ((__state)->connectors[__i].ptr && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (new_connector_state) = (__state)->connectors[__i].new_state, 1))
/**
* for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -681,12 +646,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_crtc && \
- ((crtc) = (__state)->crtcs[__i].ptr, \
- (old_crtc_state) = (__state)->crtcs[__i].old_state, \
- (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \
+ (__i) < (__state)->dev->mode_config.num_crtc; \
(__i)++) \
- for_each_if (crtc)
+ for_each_if ((__state)->crtcs[__i].ptr && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (old_crtc_state) = (__state)->crtcs[__i].old_state, \
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
/**
* for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -701,11 +666,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_crtc && \
- ((crtc) = (__state)->crtcs[__i].ptr, \
- (old_crtc_state) = (__state)->crtcs[__i].old_state, 1); \
+ (__i) < (__state)->dev->mode_config.num_crtc; \
(__i)++) \
- for_each_if (crtc)
+ for_each_if ((__state)->crtcs[__i].ptr && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (old_crtc_state) = (__state)->crtcs[__i].old_state, 1))
/**
* for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -720,36 +685,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_crtc && \
- ((crtc) = (__state)->crtcs[__i].ptr, \
- (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \
+ (__i) < (__state)->dev->mode_config.num_crtc; \
(__i)++) \
- for_each_if (crtc)
-
-/**
- * for_each_plane_in_state - iterate over all planes in an atomic update
- * @__state: &struct drm_atomic_state pointer
- * @plane: &struct drm_plane iteration cursor
- * @plane_state: &struct drm_plane_state iteration cursor
- * @__i: int iteration cursor, for macro-internal use
- *
- * This iterates over all planes in an atomic update. Note that before the
- * software state is committed (by calling drm_atomic_helper_swap_state(), this
- * points to the new state, while afterwards it points to the old state. Due to
- * this tricky confusion this macro is deprecated.
- *
- * FIXME:
- *
- * Replace all usage of this with one of the explicit iterators below and then
- * remove this macro.
- */
-#define for_each_plane_in_state(__state, plane, plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_total_plane && \
- ((plane) = (__state)->planes[__i].ptr, \
- (plane_state) = (__state)->planes[__i].state, 1); \
- (__i)++) \
- for_each_if (plane_state)
+ for_each_if ((__state)->crtcs[__i].ptr && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
/**
* for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
@@ -765,12 +705,12 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_total_plane && \
- ((plane) = (__state)->planes[__i].ptr, \
- (old_plane_state) = (__state)->planes[__i].old_state, \
- (new_plane_state) = (__state)->planes[__i].new_state, 1); \
+ (__i) < (__state)->dev->mode_config.num_total_plane; \
(__i)++) \
- for_each_if (plane)
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (old_plane_state) = (__state)->planes[__i].old_state,\
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
@@ -785,12 +725,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_total_plane && \
- ((plane) = (__state)->planes[__i].ptr, \
- (old_plane_state) = (__state)->planes[__i].old_state, 1); \
+ (__i) < (__state)->dev->mode_config.num_total_plane; \
(__i)++) \
- for_each_if (plane)
-
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (old_plane_state) = (__state)->planes[__i].old_state, 1))
/**
* for_each_new_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
@@ -804,11 +743,11 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
*/
#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
- (__i) < (__state)->dev->mode_config.num_total_plane && \
- ((plane) = (__state)->planes[__i].ptr, \
- (new_plane_state) = (__state)->planes[__i].new_state, 1); \
+ (__i) < (__state)->dev->mode_config.num_total_plane; \
(__i)++) \
- for_each_if (plane)
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
* for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
@@ -828,8 +767,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
((obj) = (__state)->private_objs[__i].ptr, \
(old_obj_state) = (__state)->private_objs[__i].old_state, \
(new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if (obj)
+ (__i)++)
/**
* for_each_old_private_obj_in_state - iterate over all private objects in an atomic update
@@ -847,8 +785,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i) < (__state)->num_private_objs && \
((obj) = (__state)->private_objs[__i].ptr, \
(old_obj_state) = (__state)->private_objs[__i].old_state, 1); \
- (__i)++) \
- for_each_if (obj)
+ (__i)++)
/**
* for_each_new_private_obj_in_state - iterate over all private objects in an atomic update
@@ -866,8 +803,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i) < (__state)->num_private_objs && \
((obj) = (__state)->private_objs[__i].ptr, \
(new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if (obj)
+ (__i)++)
/**
* drm_atomic_crtc_needs_modeset - compute combined modeset need
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 81a40c2a9a3e..86bff9841b54 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -52,6 +52,12 @@ struct drm_lock_data {
* @dev: Link back to the DRM device
* @lock: DRI1 lock information.
* @driver_priv: Pointer to driver-private information.
+ * @lessor: Lease holder
+ * @lessee_id: id for lessees. Owners always have id 0
+ * @lessee_list: other lessees of the same master
+ * @lessees: drm_masters leasing from this one
+ * @leases: Objects leased to this drm_master.
+ * @lessee_idr: All lessees under this owner (only used where lessor == NULL)
*
* Note that master structures are only relevant for the legacy/primary device
* nodes, hence there can only be one per device, not one per drm_minor.
@@ -76,10 +82,25 @@ struct drm_master {
struct idr magic_map;
struct drm_lock_data lock;
void *driver_priv;
+
+ /* Tree of display resource leases, each of which is a drm_master struct
+ * All of these get activated simultaneously, so drm_device master points
+ * at the top of the tree (for which lessor is NULL). Protected by
+ * &drm_device.mode_config.idr_mutex.
+ */
+
+ struct drm_master *lessor;
+ int lessee_id;
+ struct list_head lessee_list;
+ struct list_head lessees;
+ struct idr leases;
+ struct idr lessee_idr;
};
struct drm_master *drm_master_get(struct drm_master *master);
void drm_master_put(struct drm_master **master);
bool drm_is_current_master(struct drm_file *fpriv);
+struct drm_master *drm_master_create(struct drm_device *dev);
+
#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 6522d4cbc9d9..682d01ba920c 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -245,7 +245,7 @@ struct drm_bridge {
void *driver_private;
};
-int drm_bridge_add(struct drm_bridge *bridge);
+void drm_bridge_add(struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ea8da401c93c..7a7140543012 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -347,6 +347,13 @@ struct drm_connector_state {
struct drm_atomic_state *state;
+ /**
+ * @commit: Tracks the pending commit to prevent use-after-free conditions.
+ *
+ * Is only set when @crtc is NULL.
+ */
+ struct drm_crtc_commit *commit;
+
struct drm_tv_connector_state tv;
/**
@@ -888,8 +895,7 @@ struct drm_connector {
* This is protected by @drm_mode_config.connection_mutex. Note that
* nonblocking atomic commits access the current connector state without
* taking locks. Either by going through the &struct drm_atomic_state
- * pointers, see for_each_connector_in_state(),
- * for_each_oldnew_connector_in_state(),
+ * pointers, see for_each_oldnew_connector_in_state(),
* for_each_old_connector_in_state() and
* for_each_new_connector_in_state(). Or through careful ordering of
* atomic commit operations as implemented in the atomic helpers, see
@@ -927,16 +933,18 @@ static inline unsigned drm_connector_index(struct drm_connector *connector)
/**
* drm_connector_lookup - lookup connector object
* @dev: DRM device
+ * @file_priv: drm file to check for lease against.
* @id: connector object id
*
* This function looks up the connector object specified by id
* add takes a reference to it.
*/
static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR);
+ mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CONNECTOR);
return mo ? obj_to_connector(mo) : NULL;
}
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 1a642020e306..a2d81d2907a9 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -253,6 +253,15 @@ struct drm_crtc_state {
*/
struct drm_pending_vblank_event *event;
+ /**
+ * @commit:
+ *
+ * This tracks how the commit for this update proceeds through the
+ * various phases. This is never cleared, except when we destroy the
+ * state, so that subsequent commits can synchronize with previous ones.
+ */
+ struct drm_crtc_commit *commit;
+
struct drm_atomic_state *state;
};
@@ -797,10 +806,10 @@ struct drm_crtc {
* This is protected by @mutex. Note that nonblocking atomic commits
* access the current CRTC state without taking locks. Either by going
* through the &struct drm_atomic_state pointers, see
- * for_each_crtc_in_state(), for_each_oldnew_crtc_in_state(),
- * for_each_old_crtc_in_state() and for_each_new_crtc_in_state(). Or
- * through careful ordering of atomic commit operations as implemented
- * in the atomic helpers, see &struct drm_crtc_commit.
+ * for_each_oldnew_crtc_in_state(), for_each_old_crtc_in_state() and
+ * for_each_new_crtc_in_state(). Or through careful ordering of atomic
+ * commit operations as implemented in the atomic helpers, see
+ * &struct drm_crtc_commit.
*/
struct drm_crtc_state *state;
@@ -808,10 +817,16 @@ struct drm_crtc {
* @commit_list:
*
* List of &drm_crtc_commit structures tracking pending commits.
- * Protected by @commit_lock. This list doesn't hold its own full
- * reference, but burrows it from the ongoing commit. Commit entries
- * must be removed from this list once the commit is fully completed,
- * but before it's correspoding &drm_atomic_state gets destroyed.
+ * Protected by @commit_lock. This list holds its own full reference,
+ * as does the ongoing commit.
+ *
+ * "Note that the commit for a state change is also tracked in
+ * &drm_crtc_state.commit. For accessing the immediately preceding
+ * commit in an atomic update it is recommended to just use that
+ * pointer in the old CRTC state, since accessing that doesn't need
+ * any locking or list-walking. @commit_list should only be used to
+ * stall for framebuffer cleanup that's signalled through
+ * &drm_crtc_commit.cleanup_done."
*/
struct list_head commit_list;
@@ -937,6 +952,7 @@ struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx);
/**
* drm_crtc_find - look up a CRTC object from its ID
* @dev: DRM device
+ * @file_priv: drm file to check for lease against.
* @id: &drm_mode_object ID
*
* This can be used to look up a CRTC from its userspace ID. Only used by
@@ -944,10 +960,11 @@ struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx);
* userspace interface should be done using &drm_property.
*/
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
- uint32_t id)
+ struct drm_file *file_priv,
+ uint32_t id)
{
struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC);
+ mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_CRTC);
return mo ? obj_to_crtc(mo) : NULL;
}
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index b17476a6909c..8b9ac321c3bd 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -735,9 +735,20 @@
# define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07
+#define DP_SYNCHRONIZATION_LATENCY_IN_SINK 0x2009 /* edp 1.4 */
+# define DP_MAX_RESYNC_FRAME_COUNT_MASK (0xf << 0)
+# define DP_MAX_RESYNC_FRAME_COUNT_SHIFT 0
+# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4)
+# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4
+
#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
+#define DP_LANE0_1_STATUS_ESI 0x200c /* status same as 0x202 */
+#define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */
+#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
+#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+
#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */
# define DP_GTC_CAP (1 << 0) /* DP 1.3 */
# define DP_SST_SPLIT_SDP_CAP (1 << 1) /* DP 1.4 */
@@ -871,6 +882,18 @@ void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
+#define DP_SDP_AUDIO_TIMESTAMP 0x01
+#define DP_SDP_AUDIO_STREAM 0x02
+#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */
+#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */
+#define DP_SDP_ISRC 0x06 /* DP 1.2 */
+#define DP_SDP_VSC 0x07 /* DP 1.2 */
+#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */
+#define DP_SDP_PPS 0x10 /* DP 1.4 */
+#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */
+#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
+/* 0x80+ CEA-861 infoframe types */
+
struct edp_sdp_header {
u8 HB0; /* Secondary Data Packet ID */
u8 HB1; /* Secondary Data Packet Type */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index d55abb75f29a..7f78d26a0766 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -631,5 +631,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
int slots);
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, bool power_up);
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 71bbaaec836d..412e83a4d3db 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -155,7 +155,7 @@ struct drm_driver {
* reverse order of the initialization. Similarly to the load
* hook, this handler is deprecated and its usage should be
* dropped in favor of an open-coded teardown function at the
- * driver layer. See drm_dev_unregister() and drm_dev_unref()
+ * driver layer. See drm_dev_unregister() and drm_dev_put()
* for the proper way to remove a &struct drm_device.
*
* The unload() hook is called right after unregistering
@@ -324,7 +324,7 @@ struct drm_driver {
*/
bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
int *max_error,
- struct timeval *vblank_time,
+ ktime_t *vblank_time,
bool in_vblank_irq);
/**
@@ -611,7 +611,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
-void drm_dev_ref(struct drm_device *dev);
+void drm_dev_get(struct drm_device *dev);
+void drm_dev_put(struct drm_device *dev);
void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
void drm_dev_unplug(struct drm_device *dev);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 1e1908a6b1d6..6f35909b8add 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -341,6 +341,8 @@ int drm_av_sync_delay(struct drm_connector *connector,
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
struct edid *drm_load_edid_firmware(struct drm_connector *connector);
+int __drm_set_edid_firmware_path(const char *path);
+int __drm_get_edid_firmware_path(char *buf, size_t bufsize);
#else
static inline struct edid *
drm_load_edid_firmware(struct drm_connector *connector)
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 8d8245ec0181..ee4cfbe63c52 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -208,17 +208,19 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
/**
* drm_encoder_find - find a &drm_encoder
* @dev: DRM device
+ * @file_priv: drm file to check for lease against.
* @id: encoder id
*
* Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around
* drm_mode_object_find().
*/
static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+ mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER);
return mo ? obj_to_encoder(mo) : NULL;
}
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 1df291d11710..faf56c53df28 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -29,16 +29,6 @@ void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state);
void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
bool state);
-void drm_fb_cma_destroy(struct drm_framebuffer *fb);
-int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned int *handle);
-
-struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
- const struct drm_framebuffer_funcs *funcs);
-struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd);
-
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
@@ -46,9 +36,6 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane);
-int drm_fb_cma_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state);
-
#ifdef CONFIG_DEBUG_FS
struct seq_file;
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index b6996ddb19d6..4c5ee4ae54df 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -205,6 +205,7 @@ int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id);
void drm_framebuffer_remove(struct drm_framebuffer *fb);
void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index db9cfa07235e..5ca7cdc3f527 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -2,8 +2,8 @@
#define __DRM_GEM_FB_HELPER_H__
struct drm_device;
-struct drm_file;
struct drm_fb_helper_surface_size;
+struct drm_file;
struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h
new file mode 100644
index 000000000000..fbc0ab54855b
--- /dev/null
+++ b/include/drm/drm_lease.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright © 2017 Keith Packard <keithp@keithp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _DRM_LEASE_H_
+#define _DRM_LEASE_H_
+
+struct drm_file;
+struct drm_device;
+struct drm_master;
+
+struct drm_master *drm_lease_owner(struct drm_master *master);
+
+void drm_lease_destroy(struct drm_master *lessee);
+
+bool drm_lease_held(struct drm_file *file_priv, int id);
+
+bool _drm_lease_held(struct drm_file *file_priv, int id);
+
+void drm_lease_revoke(struct drm_master *master);
+
+uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs);
+
+int drm_mode_create_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+int drm_mode_list_lessees_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+int drm_mode_get_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+#endif /* _DRM_LEASE_H_ */
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 1b37368416c8..0b4ac2ebc610 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -430,19 +430,6 @@ struct drm_mode_config {
struct list_head encoder_list;
/**
- * @num_overlay_plane:
- *
- * Number of overlay planes on this device, excluding primary and cursor
- * planes.
- *
- * Track number of overlay planes separately from number of total
- * planes. By default we only advertise overlay planes to userspace; if
- * userspace sets the "universal plane" capability bit, we'll go ahead
- * and expose all planes. This is invariant over the lifetime of a
- * device and hence doesn't need any locks.
- */
- int num_overlay_plane;
- /**
* @num_total_plane:
*
* Number of universal (i.e. with primary/curso) planes on this device.
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index a767b4a30a6d..7ba3913f30b5 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -24,9 +24,11 @@
#define __DRM_MODESET_H__
#include <linux/kref.h>
+#include <drm/drm_lease.h>
struct drm_object_properties;
struct drm_property;
struct drm_device;
+struct drm_file;
/**
* struct drm_mode_object - base structure for modeset objects
@@ -113,6 +115,7 @@ struct drm_object_properties {
}
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id, uint32_t type);
void drm_mode_object_get(struct drm_mode_object *obj);
void drm_mode_object_put(struct drm_mode_object *obj);
@@ -151,4 +154,6 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val);
+
+bool drm_mode_object_lease_required(uint32_t type);
#endif
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index c55cf3ff6847..16646c44b7df 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -314,7 +314,7 @@ struct drm_crtc_helper_funcs {
* implementation in drm_atomic_helper_check().
*
* When using drm_atomic_helper_check_planes() this hook is called
- * after the &drm_plane_helper_funcs.atomc_check hook for planes, which
+ * after the &drm_plane_helper_funcs.atomic_check hook for planes, which
* allows drivers to assign shared resources requested by planes in this
* callback here. For more complicated dependencies the driver can call
* the provided check helpers multiple times until the computed state
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 4b27c2bb955c..a685d1bb21f2 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -34,6 +34,7 @@ struct drm_modeset_lock;
* @contended: used internally for -EDEADLK handling
* @locked: list of held locks
* @trylock_only: trylock mode used in atomic contexts/panic notifiers
+ * @interruptible: whether interruptible locking should be used.
*
* Each thread competing for a set of locks must use one acquire
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and
@@ -59,6 +60,9 @@ struct drm_modeset_acquire_ctx {
* Trylock mode, use only for panic handlers!
*/
bool trylock_only;
+
+ /* Perform interruptible waits on this context. */
+ bool interruptible;
};
/**
@@ -82,12 +86,13 @@ struct drm_modeset_lock {
struct list_head head;
};
+#define DRM_MODESET_ACQUIRE_INTERRUPTIBLE BIT(0)
+
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
uint32_t flags);
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx);
-void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
-int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx);
+int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx);
void drm_modeset_lock_init(struct drm_modeset_lock *lock);
@@ -111,8 +116,7 @@ static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx);
-int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
- struct drm_modeset_acquire_ctx *ctx);
+int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock);
void drm_modeset_unlock(struct drm_modeset_lock *lock);
struct drm_device;
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 4f835490d77a..b93c239afb60 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -3,6 +3,9 @@
#define __DRM_OF_H__
#include <linux/of_graph.h>
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
+#include <drm/drm_bridge.h>
+#endif
struct component_master_ops;
struct component_match;
@@ -68,6 +71,34 @@ static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
}
#endif
+/*
+ * drm_of_panel_bridge_remove - remove panel bridge
+ * @np: device tree node containing panel bridge output ports
+ *
+ * Remove the panel bridge of a given DT node's port and endpoint number
+ *
+ * Returns zero if successful, or one of the standard error codes if it fails.
+ */
+static inline int drm_of_panel_bridge_remove(const struct device_node *np,
+ int port, int endpoint)
+{
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
+ struct drm_bridge *bridge;
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_node(np, port, endpoint);
+ if (!remote)
+ return -ENODEV;
+
+ bridge = of_drm_find_bridge(remote);
+ drm_panel_bridge_remove(bridge);
+
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
struct drm_encoder *encoder)
{
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 73f90f9d057f..571615079230 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -123,6 +123,14 @@ struct drm_plane_state {
*/
bool visible;
+ /**
+ * @commit: Tracks the pending commit to prevent use-after-free conditions,
+ * and for async plane updates.
+ *
+ * May be NULL.
+ */
+ struct drm_crtc_commit *commit;
+
struct drm_atomic_state *state;
};
@@ -531,10 +539,10 @@ struct drm_plane {
* This is protected by @mutex. Note that nonblocking atomic commits
* access the current plane state without taking locks. Either by going
* through the &struct drm_atomic_state pointers, see
- * for_each_plane_in_state(), for_each_oldnew_plane_in_state(),
- * for_each_old_plane_in_state() and for_each_new_plane_in_state(). Or
- * through careful ordering of atomic commit operations as implemented
- * in the atomic helpers, see &struct drm_crtc_commit.
+ * for_each_oldnew_plane_in_state(), for_each_old_plane_in_state() and
+ * for_each_new_plane_in_state(). Or through careful ordering of atomic
+ * commit operations as implemented in the atomic helpers, see
+ * &struct drm_crtc_commit.
*/
struct drm_plane_state *state;
@@ -583,16 +591,18 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
/**
* drm_plane_find - find a &drm_plane
* @dev: DRM device
+ * @file_priv: drm file to check for lease against.
* @id: plane id
*
* Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around
* drm_mode_object_find().
*/
static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE);
+ mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE);
return mo ? obj_to_plane(mo) : NULL;
}
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 37355c623e6c..8a522b4bed40 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -305,17 +305,19 @@ drm_property_unreference_blob(struct drm_property_blob *blob)
}
/**
- * drm_connector_find - find property object
+ * drm_property_find - find property object
* @dev: DRM device
+ * @file_priv: drm file to check for lease against.
* @id: property object id
*
* This function looks up the property object specified by id and returns it.
*/
static inline struct drm_property *drm_property_find(struct drm_device *dev,
+ struct drm_file *file_priv,
uint32_t id)
{
struct drm_mode_object *mo;
- mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY);
+ mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY);
return mo ? obj_to_property(mo) : NULL;
}
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index c00fee539822..43e2f382d2f0 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -136,5 +136,10 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
u32 handle,
struct dma_fence **fence);
void drm_syncobj_free(struct kref *kref);
+int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
+ struct dma_fence *fence);
+int drm_syncobj_get_handle(struct drm_file *file_private,
+ struct drm_syncobj *syncobj, u32 *handle);
+int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd);
#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 7fba9efe4951..848b463a0af5 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -48,9 +48,17 @@ struct drm_pending_vblank_event {
*/
unsigned int pipe;
/**
+ * @sequence: frame event should be triggered at
+ */
+ u64 sequence;
+ /**
* @event: Actual event which will be sent to userspace.
*/
- struct drm_event_vblank event;
+ union {
+ struct drm_event base;
+ struct drm_event_vblank vbl;
+ struct drm_event_crtc_sequence seq;
+ } event;
};
/**
@@ -88,11 +96,11 @@ struct drm_vblank_crtc {
/**
* @count: Current software vblank counter.
*/
- u32 count;
+ u64 count;
/**
* @time: Vblank timestamp corresponding to @count.
*/
- struct timeval time;
+ ktime_t time;
/**
* @refcount: Number of users/waiters of the vblank interrupt. Only when
@@ -152,13 +160,16 @@ struct drm_vblank_crtc {
};
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
- struct timeval *vblanktime);
+u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
+u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+ ktime_t *vblanktime);
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
+void drm_vblank_set_event(struct drm_pending_vblank_event *e,
+ u64 *seq,
+ ktime_t *now);
bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
int drm_crtc_vblank_get(struct drm_crtc *crtc);
@@ -172,7 +183,7 @@ u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
- struct timeval *vblank_time,
+ ktime_t *vblank_time,
bool in_vblank_irq);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 34c8f5600ce0..972a25633525 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -118,92 +118,125 @@
#define INTEL_IRONLAKE_M_IDS(info) \
INTEL_VGA_DEVICE(0x0046, info)
-#define INTEL_SNB_D_IDS(info) \
+#define INTEL_SNB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0102, info), \
- INTEL_VGA_DEVICE(0x0112, info), \
- INTEL_VGA_DEVICE(0x0122, info), \
INTEL_VGA_DEVICE(0x010A, info)
-#define INTEL_SNB_M_IDS(info) \
- INTEL_VGA_DEVICE(0x0106, info), \
+#define INTEL_SNB_D_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x0112, info), \
+ INTEL_VGA_DEVICE(0x0122, info)
+
+#define INTEL_SNB_D_IDS(info) \
+ INTEL_SNB_D_GT1_IDS(info), \
+ INTEL_SNB_D_GT2_IDS(info)
+
+#define INTEL_SNB_M_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x0106, info)
+
+#define INTEL_SNB_M_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0116, info), \
INTEL_VGA_DEVICE(0x0126, info)
+#define INTEL_SNB_M_IDS(info) \
+ INTEL_SNB_M_GT1_IDS(info), \
+ INTEL_SNB_M_GT2_IDS(info)
+
+#define INTEL_IVB_M_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */
+
+#define INTEL_IVB_M_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
+
#define INTEL_IVB_M_IDS(info) \
- INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
+ INTEL_IVB_M_GT1_IDS(info), \
+ INTEL_IVB_M_GT2_IDS(info)
-#define INTEL_IVB_D_IDS(info) \
+#define INTEL_IVB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */
+
+#define INTEL_IVB_D_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
+#define INTEL_IVB_D_IDS(info) \
+ INTEL_IVB_D_GT1_IDS(info), \
+ INTEL_IVB_D_GT2_IDS(info)
+
#define INTEL_IVB_Q_IDS(info) \
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
-#define INTEL_HSW_IDS(info) \
+#define INTEL_HSW_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
- INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
- INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
- INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
- INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
- INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
- INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
- INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
- INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
- INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
- INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
- INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
- INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
+
+#define INTEL_HSW_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
+ INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
+ INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
+ INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
+ INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
+ INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
+
+#define INTEL_HSW_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
+ INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
+ INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
+ INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
+ INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
+#define INTEL_HSW_IDS(info) \
+ INTEL_HSW_GT1_IDS(info), \
+ INTEL_HSW_GT2_IDS(info), \
+ INTEL_HSW_GT3_IDS(info)
+
#define INTEL_VLV_IDS(info) \
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
@@ -212,17 +245,19 @@
INTEL_VGA_DEVICE(0x0157, info), \
INTEL_VGA_DEVICE(0x0155, info)
-#define INTEL_BDW_GT12_IDS(info) \
+#define INTEL_BDW_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
- INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
+ INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
+ INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
+
+#define INTEL_BDW_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
- INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
- INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
- INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
+ INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
@@ -243,7 +278,8 @@
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_IDS(info) \
- INTEL_BDW_GT12_IDS(info), \
+ INTEL_BDW_GT1_IDS(info), \
+ INTEL_BDW_GT2_IDS(info), \
INTEL_BDW_GT3_IDS(info), \
INTEL_BDW_RSVD_IDS(info)
@@ -303,7 +339,6 @@
#define INTEL_KBL_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
- INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
@@ -313,6 +348,7 @@
#define INTEL_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
@@ -335,20 +371,22 @@
INTEL_KBL_GT4_IDS(info)
/* CFL S */
-#define INTEL_CFL_S_IDS(info) \
+#define INTEL_CFL_S_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
+ INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */
+
+#define INTEL_CFL_S_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */
/* CFL H */
-#define INTEL_CFL_H_IDS(info) \
+#define INTEL_CFL_H_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
/* CFL U */
-#define INTEL_CFL_U_IDS(info) \
+#define INTEL_CFL_U_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \
diff --git a/include/drm/ttm/ttm_debug.h b/include/drm/ttm/ttm_debug.h
new file mode 100644
index 000000000000..b5e460fa5086
--- /dev/null
+++ b/include/drm/ttm/ttm_debug.h
@@ -0,0 +1,31 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2017 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Tom St Denis <tom.stdenis@amd.com>
+ */
+extern void ttm_trace_dma_map(struct device *dev, struct ttm_dma_tt *tt);
+extern void ttm_trace_dma_unmap(struct device *dev, struct ttm_dma_tt *tt);
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index c4520890f267..2c1e3598effe 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -150,10 +150,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page,
- bool no_wait, bool interruptible);
+ struct page *page, uint64_t size);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page);
+ struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size);
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 49a828425fa2..38a2b4770c35 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void);
*
* Add backing pages to all of @ttm
*/
-extern int ttm_pool_populate(struct ttm_tt *ttm);
+int ttm_pool_populate(struct ttm_tt *ttm);
/**
* ttm_pool_unpopulate:
@@ -56,12 +56,12 @@ extern int ttm_pool_populate(struct ttm_tt *ttm);
*
* Free all pages of @ttm
*/
-extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
+void ttm_pool_unpopulate(struct ttm_tt *ttm);
/**
* Output the state of pools to debugfs file
*/
-extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
@@ -78,10 +78,21 @@ void ttm_dma_page_alloc_fini(void);
/**
* Output the state of pools to debugfs file
*/
-extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
-extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+
+
+/**
+ * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
+ */
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+
+/**
+ * Unpopulates and DMA unmaps pages as part of a
+ * ttm_dma_unpopulate() request */
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
#else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
@@ -105,6 +116,16 @@ static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
struct device *dev)
{
}
+
+static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ return -ENOMEM;
+}
+
+static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+}
+
#endif
#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index 32dd58a3d13c..dd549ff04295 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -31,6 +31,6 @@
/* Bit 3 express GPIO suspend/resume persistence */
#define GPIO_SLEEP_MAINTAIN_VALUE 0
-#define GPIO_SLEEP_MAY_LOOSE_VALUE 8
+#define GPIO_SLEEP_MAY_LOSE_VALUE 8
#endif
diff --git a/include/dt-bindings/gpio/meson-gxbb-gpio.h b/include/dt-bindings/gpio/meson-gxbb-gpio.h
index 58654fd7aa1e..43a68a1110f0 100644
--- a/include/dt-bindings/gpio/meson-gxbb-gpio.h
+++ b/include/dt-bindings/gpio/meson-gxbb-gpio.h
@@ -29,6 +29,7 @@
#define GPIOAO_11 11
#define GPIOAO_12 12
#define GPIOAO_13 13
+#define GPIO_TEST_N 14
#define GPIOZ_0 0
#define GPIOZ_1 1
@@ -149,6 +150,5 @@
#define GPIOCLK_1 116
#define GPIOCLK_2 117
#define GPIOCLK_3 118
-#define GPIO_TEST_N 119
#endif
diff --git a/include/dt-bindings/gpio/meson-gxl-gpio.h b/include/dt-bindings/gpio/meson-gxl-gpio.h
index 684d0d7add1c..01f2a2abd35e 100644
--- a/include/dt-bindings/gpio/meson-gxl-gpio.h
+++ b/include/dt-bindings/gpio/meson-gxl-gpio.h
@@ -25,6 +25,7 @@
#define GPIOAO_7 7
#define GPIOAO_8 8
#define GPIOAO_9 9
+#define GPIO_TEST_N 10
#define GPIOZ_0 0
#define GPIOZ_1 1
@@ -126,6 +127,5 @@
#define GPIOX_18 97
#define GPIOCLK_0 98
#define GPIOCLK_1 99
-#define GPIO_TEST_N 100
#endif
diff --git a/include/dt-bindings/gpio/uniphier-gpio.h b/include/dt-bindings/gpio/uniphier-gpio.h
new file mode 100644
index 000000000000..9f0ad174f61c
--- /dev/null
+++ b/include/dt-bindings/gpio/uniphier-gpio.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#ifndef _DT_BINDINGS_GPIO_UNIPHIER_H
+#define _DT_BINDINGS_GPIO_UNIPHIER_H
+
+#define UNIPHIER_GPIO_LINES_PER_BANK 8
+
+#define UNIPHIER_GPIO_IRQ_OFFSET ((UNIPHIER_GPIO_LINES_PER_BANK) * 15)
+
+#define UNIPHIER_GPIO_PORT(bank, line) \
+ ((UNIPHIER_GPIO_LINES_PER_BANK) * (bank) + (line))
+
+#define UNIPHIER_GPIO_IRQ(n) ((UNIPHIER_GPIO_IRQ_OFFSET) + (n))
+
+#endif /* _DT_BINDINGS_GPIO_UNIPHIER_H */
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
new file mode 100644
index 000000000000..a75d304473d5
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -0,0 +1,887 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE 0
+#define AGG_SCHEME_LEG 1
+#define AGG_SCHEME_1 2
+
+/* Topology related enums */
+#define MSM_BUS_FAB_DEFAULT 0
+#define MSM_BUS_FAB_APPSS 0
+#define MSM_BUS_FAB_SYSTEM 1024
+#define MSM_BUS_FAB_MMSS 2048
+#define MSM_BUS_FAB_SYSTEM_FPB 3072
+#define MSM_BUS_FAB_CPSS_FPB 4096
+
+#define MSM_BUS_FAB_BIMC 0
+#define MSM_BUS_FAB_SYS_NOC 1024
+#define MSM_BUS_FAB_MMSS_NOC 2048
+#define MSM_BUS_FAB_OCMEM_NOC 3072
+#define MSM_BUS_FAB_PERIPH_NOC 4096
+#define MSM_BUS_FAB_CONFIG_NOC 5120
+#define MSM_BUS_FAB_OCMEM_VNOC 6144
+#define MSM_BUS_FAB_MMSS_AHB 2049
+#define MSM_BUS_FAB_A0_NOC 6145
+#define MSM_BUS_FAB_A1_NOC 6146
+#define MSM_BUS_FAB_A2_NOC 6147
+#define MSM_BUS_FAB_GNOC 6148
+#define MSM_BUS_FAB_CR_VIRT 6149
+
+#define MSM_BUS_MASTER_FIRST 1
+#define MSM_BUS_MASTER_AMPSS_M0 1
+#define MSM_BUS_MASTER_AMPSS_M1 2
+#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define MSM_BUS_MASTER_SPS 6
+#define MSM_BUS_MASTER_ADM_PORT0 7
+#define MSM_BUS_MASTER_ADM_PORT1 8
+#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define MSM_BUS_MASTER_ADM1_PORT1 10
+#define MSM_BUS_MASTER_LPASS_PROC 11
+#define MSM_BUS_MASTER_MSS_PROCI 12
+#define MSM_BUS_MASTER_MSS_PROCD 13
+#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define MSM_BUS_MASTER_LPASS 15
+#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define MSM_BUS_MASTER_ADM1_CI 19
+#define MSM_BUS_MASTER_ADM0_CI 20
+#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define MSM_BUS_MASTER_MDP_PORT0 22
+#define MSM_BUS_MASTER_MDP_PORT1 23
+#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define MSM_BUS_MASTER_ROTATOR 25
+#define MSM_BUS_MASTER_GRAPHICS_3D 26
+#define MSM_BUS_MASTER_JPEG_DEC 27
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define MSM_BUS_MASTER_VFE 29
+#define MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define MSM_BUS_MASTER_VPE 30
+#define MSM_BUS_MASTER_JPEG_ENC 31
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define MSM_BUS_MASTER_SPDM 36
+#define MSM_BUS_MASTER_RPM 37
+#define MSM_BUS_MASTER_MSS 38
+#define MSM_BUS_MASTER_RIVA 39
+#define MSM_BUS_MASTER_SNOC_VMEM 40
+#define MSM_BUS_MASTER_MSS_SW_PROC 41
+#define MSM_BUS_MASTER_MSS_FW_PROC 42
+#define MSM_BUS_MASTER_HMSS 43
+#define MSM_BUS_MASTER_GSS_NAV 44
+#define MSM_BUS_MASTER_PCIE 45
+#define MSM_BUS_MASTER_SATA 46
+#define MSM_BUS_MASTER_CRYPTO 47
+#define MSM_BUS_MASTER_VIDEO_CAP 48
+#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define MSM_BUS_MASTER_VIDEO_ENC 50
+#define MSM_BUS_MASTER_VIDEO_DEC 51
+#define MSM_BUS_MASTER_LPASS_AHB 52
+#define MSM_BUS_MASTER_QDSS_BAM 53
+#define MSM_BUS_MASTER_SNOC_CFG 54
+#define MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define MSM_BUS_MASTER_MSS_NAV 57
+#define MSM_BUS_MASTER_OCMEM_DMA 58
+#define MSM_BUS_MASTER_WCSS 59
+#define MSM_BUS_MASTER_QDSS_ETR 60
+#define MSM_BUS_MASTER_USB3 61
+#define MSM_BUS_MASTER_JPEG 62
+#define MSM_BUS_MASTER_VIDEO_P0 63
+#define MSM_BUS_MASTER_VIDEO_P1 64
+#define MSM_BUS_MASTER_MSS_PROC 65
+#define MSM_BUS_MASTER_JPEG_OCMEM 66
+#define MSM_BUS_MASTER_MDP_OCMEM 67
+#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define MSM_BUS_MASTER_VFE_OCMEM 70
+#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define MSM_BUS_MASTER_RPM_INST 72
+#define MSM_BUS_MASTER_RPM_DATA 73
+#define MSM_BUS_MASTER_RPM_SYS 74
+#define MSM_BUS_MASTER_DEHR 75
+#define MSM_BUS_MASTER_QDSS_DAP 76
+#define MSM_BUS_MASTER_TIC 77
+#define MSM_BUS_MASTER_SDCC_1 78
+#define MSM_BUS_MASTER_SDCC_3 79
+#define MSM_BUS_MASTER_SDCC_4 80
+#define MSM_BUS_MASTER_SDCC_2 81
+#define MSM_BUS_MASTER_TSIF 82
+#define MSM_BUS_MASTER_BAM_DMA 83
+#define MSM_BUS_MASTER_BLSP_2 84
+#define MSM_BUS_MASTER_USB_HSIC 85
+#define MSM_BUS_MASTER_BLSP_1 86
+#define MSM_BUS_MASTER_USB_HS 87
+#define MSM_BUS_MASTER_PNOC_CFG 88
+#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define MSM_BUS_MASTER_IPA 90
+#define MSM_BUS_MASTER_QPIC 91
+#define MSM_BUS_MASTER_MDPE 92
+#define MSM_BUS_MASTER_USB_HS2 93
+#define MSM_BUS_MASTER_VPU 94
+#define MSM_BUS_MASTER_UFS 95
+#define MSM_BUS_MASTER_BCAST 96
+#define MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define MSM_BUS_MASTER_EMAC 98
+#define MSM_BUS_MASTER_VPU_1 99
+#define MSM_BUS_MASTER_PCIE_1 100
+#define MSM_BUS_MASTER_USB3_1 101
+#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define MSM_BUS_MASTER_TCU_0 104
+#define MSM_BUS_MASTER_TCU_1 105
+#define MSM_BUS_MASTER_CPP 106
+#define MSM_BUS_MASTER_AUDIO 107
+#define MSM_BUS_MASTER_PCIE_2 108
+#define MSM_BUS_MASTER_VFE1 109
+#define MSM_BUS_MASTER_XM_USB_HS1 110
+#define MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define MSM_BUS_MASTER_BIMC_PCNOC 112
+#define MSM_BUS_MASTER_XI_USB_HSIC 113
+#define MSM_BUS_MASTER_SGMII 114
+#define MSM_BUS_SPMI_FETCHER 115
+#define MSM_BUS_MASTER_GNOC_BIMC 116
+#define MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define MSM_BUS_MASTER_CNOC_A2NOC 118
+#define MSM_BUS_MASTER_WLAN 119
+#define MSM_BUS_MASTER_MSS_CE 120
+#define MSM_BUS_MASTER_CDSP_PROC 121
+#define MSM_BUS_MASTER_GNOC_SNOC 122
+#define MSM_BUS_MASTER_PIMEM 123
+#define MSM_BUS_MASTER_MASTER_LAST 124
+
+#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define MSM_BUS_SNOC_MM_INT_0 10000
+#define MSM_BUS_SNOC_MM_INT_1 10001
+#define MSM_BUS_SNOC_MM_INT_2 10002
+#define MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define MSM_BUS_SNOC_INT_0 10004
+#define MSM_BUS_SNOC_INT_1 10005
+#define MSM_BUS_SNOC_INT_BIMC 10006
+#define MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define MSM_BUS_SNOC_QDSS_INT 10009
+#define MSM_BUS_PNOC_SNOC_MAS 10010
+#define MSM_BUS_PNOC_SNOC_SLV 10011
+#define MSM_BUS_PNOC_INT_0 10012
+#define MSM_BUS_PNOC_INT_1 10013
+#define MSM_BUS_PNOC_M_0 10014
+#define MSM_BUS_PNOC_M_1 10015
+#define MSM_BUS_BIMC_SNOC_MAS 10016
+#define MSM_BUS_BIMC_SNOC_SLV 10017
+#define MSM_BUS_PNOC_SLV_0 10018
+#define MSM_BUS_PNOC_SLV_1 10019
+#define MSM_BUS_PNOC_SLV_2 10020
+#define MSM_BUS_PNOC_SLV_3 10021
+#define MSM_BUS_PNOC_SLV_4 10022
+#define MSM_BUS_PNOC_SLV_8 10023
+#define MSM_BUS_PNOC_SLV_9 10024
+#define MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define MSM_BUS_MNOC_BIMC_MAS 10027
+#define MSM_BUS_MNOC_BIMC_SLV 10028
+#define MSM_BUS_BIMC_MNOC_MAS 10029
+#define MSM_BUS_BIMC_MNOC_SLV 10030
+#define MSM_BUS_SNOC_BIMC_MAS 10031
+#define MSM_BUS_SNOC_BIMC_SLV 10032
+#define MSM_BUS_CNOC_SNOC_MAS 10033
+#define MSM_BUS_CNOC_SNOC_SLV 10034
+#define MSM_BUS_SNOC_CNOC_MAS 10035
+#define MSM_BUS_SNOC_CNOC_SLV 10036
+#define MSM_BUS_OVNOC_SNOC_MAS 10037
+#define MSM_BUS_OVNOC_SNOC_SLV 10038
+#define MSM_BUS_SNOC_OVNOC_MAS 10039
+#define MSM_BUS_SNOC_OVNOC_SLV 10040
+#define MSM_BUS_SNOC_PNOC_MAS 10041
+#define MSM_BUS_SNOC_PNOC_SLV 10042
+#define MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define MSM_BUS_PNOC_SLV_5 10047
+#define MSM_BUS_PNOC_SLV_7 10048
+#define MSM_BUS_PNOC_INT_2 10049
+#define MSM_BUS_PNOC_INT_3 10050
+#define MSM_BUS_PNOC_INT_4 10051
+#define MSM_BUS_PNOC_INT_5 10052
+#define MSM_BUS_PNOC_INT_6 10053
+#define MSM_BUS_PNOC_INT_7 10054
+#define MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define MSM_BUS_PNOC_A1NOC_MAS 10057
+#define MSM_BUS_PNOC_A1NOC_SLV 10058
+#define MSM_BUS_CNOC_A1NOC_MAS 10059
+#define MSM_BUS_A0NOC_SNOC_MAS 10060
+#define MSM_BUS_A0NOC_SNOC_SLV 10061
+#define MSM_BUS_A1NOC_SNOC_SLV 10062
+#define MSM_BUS_A1NOC_SNOC_MAS 10063
+#define MSM_BUS_A2NOC_SNOC_MAS 10064
+#define MSM_BUS_A2NOC_SNOC_SLV 10065
+#define MSM_BUS_SNOC_INT_2 10066
+#define MSM_BUS_A0NOC_QDSS_INT 10067
+#define MSM_BUS_INT_LAST 10068
+
+#define MSM_BUS_INT_TEST_ID 20000
+#define MSM_BUS_INT_TEST_LAST 20050
+
+#define MSM_BUS_SLAVE_FIRST 512
+#define MSM_BUS_SLAVE_EBI_CH0 512
+#define MSM_BUS_SLAVE_EBI_CH1 513
+#define MSM_BUS_SLAVE_AMPSS_L2 514
+#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define MSM_BUS_SLAVE_SPS 518
+#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define MSM_BUS_SLAVE_AMPSS 520
+#define MSM_BUS_SLAVE_MSS 521
+#define MSM_BUS_SLAVE_LPASS 522
+#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define MSM_BUS_SLAVE_CORESIGHT 526
+#define MSM_BUS_SLAVE_RIVA 527
+#define MSM_BUS_SLAVE_SMI 528
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define MSM_BUS_SLAVE_MM_IMEM 531
+#define MSM_BUS_SLAVE_CRYPTO 532
+#define MSM_BUS_SLAVE_SPDM 533
+#define MSM_BUS_SLAVE_RPM 534
+#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define MSM_BUS_SLAVE_MPM 536
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define MSM_BUS_SLAVE_GSBI1_UART 542
+#define MSM_BUS_SLAVE_GSBI2_UART 543
+#define MSM_BUS_SLAVE_GSBI3_UART 544
+#define MSM_BUS_SLAVE_GSBI4_UART 545
+#define MSM_BUS_SLAVE_GSBI5_UART 546
+#define MSM_BUS_SLAVE_GSBI6_UART 547
+#define MSM_BUS_SLAVE_GSBI7_UART 548
+#define MSM_BUS_SLAVE_GSBI8_UART 549
+#define MSM_BUS_SLAVE_GSBI9_UART 550
+#define MSM_BUS_SLAVE_GSBI10_UART 551
+#define MSM_BUS_SLAVE_GSBI11_UART 552
+#define MSM_BUS_SLAVE_GSBI12_UART 553
+#define MSM_BUS_SLAVE_GSBI1_QUP 554
+#define MSM_BUS_SLAVE_GSBI2_QUP 555
+#define MSM_BUS_SLAVE_GSBI3_QUP 556
+#define MSM_BUS_SLAVE_GSBI4_QUP 557
+#define MSM_BUS_SLAVE_GSBI5_QUP 558
+#define MSM_BUS_SLAVE_GSBI6_QUP 559
+#define MSM_BUS_SLAVE_GSBI7_QUP 560
+#define MSM_BUS_SLAVE_GSBI8_QUP 561
+#define MSM_BUS_SLAVE_GSBI9_QUP 562
+#define MSM_BUS_SLAVE_GSBI10_QUP 563
+#define MSM_BUS_SLAVE_GSBI11_QUP 564
+#define MSM_BUS_SLAVE_GSBI12_QUP 565
+#define MSM_BUS_SLAVE_EBI2_NAND 566
+#define MSM_BUS_SLAVE_EBI2_CS0 567
+#define MSM_BUS_SLAVE_EBI2_CS1 568
+#define MSM_BUS_SLAVE_EBI2_CS2 569
+#define MSM_BUS_SLAVE_EBI2_CS3 570
+#define MSM_BUS_SLAVE_EBI2_CS4 571
+#define MSM_BUS_SLAVE_EBI2_CS5 572
+#define MSM_BUS_SLAVE_USB_FS1 573
+#define MSM_BUS_SLAVE_USB_FS2 574
+#define MSM_BUS_SLAVE_TSIF 575
+#define MSM_BUS_SLAVE_MSM_TSSC 576
+#define MSM_BUS_SLAVE_MSM_PDM 577
+#define MSM_BUS_SLAVE_MSM_DIMEM 578
+#define MSM_BUS_SLAVE_MSM_TCSR 579
+#define MSM_BUS_SLAVE_MSM_PRNG 580
+#define MSM_BUS_SLAVE_GSS 581
+#define MSM_BUS_SLAVE_SATA 582
+#define MSM_BUS_SLAVE_USB3 583
+#define MSM_BUS_SLAVE_WCSS 584
+#define MSM_BUS_SLAVE_OCIMEM 585
+#define MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define MSM_BUS_SLAVE_QDSS_STM 588
+#define MSM_BUS_SLAVE_CAMERA_CFG 589
+#define MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define MSM_BUS_SLAVE_OCMEM_CFG 591
+#define MSM_BUS_SLAVE_CPR_CFG 592
+#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define MSM_BUS_SLAVE_MISC_CFG 594
+#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define MSM_BUS_SLAVE_VENUS_CFG 596
+#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define MSM_BUS_SLAVE_OCMEM 604
+#define MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define MSM_BUS_SLAVE_SDCC_1 606
+#define MSM_BUS_SLAVE_SDCC_3 607
+#define MSM_BUS_SLAVE_SDCC_2 608
+#define MSM_BUS_SLAVE_SDCC_4 609
+#define MSM_BUS_SLAVE_BAM_DMA 610
+#define MSM_BUS_SLAVE_BLSP_2 611
+#define MSM_BUS_SLAVE_USB_HSIC 612
+#define MSM_BUS_SLAVE_BLSP_1 613
+#define MSM_BUS_SLAVE_USB_HS 614
+#define MSM_BUS_SLAVE_PDM 615
+#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define MSM_BUS_SLAVE_PRNG 618
+#define MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define MSM_BUS_SLAVE_CLK_CTL 620
+#define MSM_BUS_SLAVE_CNOC_MSS 621
+#define MSM_BUS_SLAVE_SECURITY 622
+#define MSM_BUS_SLAVE_TCSR 623
+#define MSM_BUS_SLAVE_TLMM 624
+#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define MSM_BUS_SLAVE_IMEM_CFG 627
+#define MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define MSM_BUS_SLAVE_BIMC_CFG 629
+#define MSM_BUS_SLAVE_BOOT_ROM 630
+#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define MSM_BUS_SLAVE_PMIC_ARB 632
+#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define MSM_BUS_SLAVE_DEHR_CFG 634
+#define MSM_BUS_SLAVE_QDSS_CFG 635
+#define MSM_BUS_SLAVE_RBCPR_CFG 636
+#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define MSM_BUS_SLAVE_PNOC_CFG 641
+#define MSM_BUS_SLAVE_SNOC_CFG 642
+#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define MSM_BUS_SLAVE_IPS_CFG 647
+#define MSM_BUS_SLAVE_QPIC 648
+#define MSM_BUS_SLAVE_DSI_CFG 649
+#define MSM_BUS_SLAVE_UFS_CFG 650
+#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define MSM_BUS_SLAVE_PCIE_CFG 653
+#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define MSM_BUS_SLAVE_VPU_CFG 658
+#define MSM_BUS_SLAVE_BCAST_CFG 659
+#define MSM_BUS_SLAVE_KLM_CFG 660
+#define MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define MSM_BUS_SLAVE_OCMEM_GFX 662
+#define MSM_BUS_SLAVE_CATS_128 663
+#define MSM_BUS_SLAVE_OCMEM_64 664
+#define MSM_BUS_SLAVE_PCIE_0 665
+#define MSM_BUS_SLAVE_PCIE_1 666
+#define MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define MSM_BUS_SLAVE_SRVC_MNOC 669
+#define MSM_BUS_SLAVE_USB_HS2 670
+#define MSM_BUS_SLAVE_AUDIO 671
+#define MSM_BUS_SLAVE_TCU 672
+#define MSM_BUS_SLAVE_APPSS 673
+#define MSM_BUS_SLAVE_PCIE_PARF 674
+#define MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define MSM_BUS_SLAVE_IPA_CFG 676
+#define MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define MSM_BUS_SLAVE_HMSS_L3 680
+#define MSM_BUS_SLAVE_PIMEM_CFG 681
+#define MSM_BUS_SLAVE_DCC_CFG 682
+#define MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define MSM_BUS_SLAVE_A0NOC_CFG 686
+#define MSM_BUS_SLAVE_A1NOC_CFG 687
+#define MSM_BUS_SLAVE_A2NOC_CFG 688
+#define MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define MSM_BUS_SLAVE_SSC_CFG 697
+#define MSM_BUS_SLAVE_DSA_CFG 698
+#define MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define MSM_BUS_SLAVE_VMEM_CFG 708
+#define MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define MSM_BUS_SLAVE_VMEM 710
+#define MSM_BUS_SLAVE_AHB2PHY 711
+#define MSM_BUS_SLAVE_PIMEM 712
+#define MSM_BUS_SLAVE_SNOC_VMEM 713
+#define MSM_BUS_SLAVE_PCIE_2 714
+#define MSM_BUS_SLAVE_RBCPR_MX 715
+#define MSM_BUS_SLAVE_RBCPR_CX 716
+#define MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define MSM_BUS_SLAVE_SGMII 719
+#define MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define MSM_BUS_PNOC_SLV_6 721
+#define MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define MSM_BUS_SLAVE_WLAN 723
+#define MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define MSM_BUS_SLAVE_GLM 726
+#define MSM_BUS_SLAVE_GNOC_BIMC 727
+#define MSM_BUS_SLAVE_GNOC_SNOC 728
+#define MSM_BUS_SLAVE_QM_CFG 729
+#define MSM_BUS_SLAVE_TLMM_EAST 730
+#define MSM_BUS_SLAVE_TLMM_NORTH 731
+#define MSM_BUS_SLAVE_TLMM_WEST 732
+#define MSM_BUS_SLAVE_SKL 733
+#define MSM_BUS_SLAVE_LPASS_TCM 734
+#define MSM_BUS_SLAVE_TLMM_SOUTH 735
+#define MSM_BUS_SLAVE_TLMM_CENTER 736
+#define MSM_BUS_MSS_NAV_CE_MPU_CFG 737
+#define MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
+#define MSM_BUS_SLAVE_CDSP 739
+#define MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
+#define MSM_BUS_SLAVE_LPASS_MPU_CFG 741
+#define MSM_BUS_SLAVE_CSI_PHY_CFG 742
+#define MSM_BUS_SLAVE_LAST 743
+
+#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define ICBID_MASTER_APPSS_PROC 0
+#define ICBID_MASTER_MSS_PROC 1
+#define ICBID_MASTER_MNOC_BIMC 2
+#define ICBID_MASTER_SNOC_BIMC 3
+#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define ICBID_MASTER_CNOC_MNOC_CFG 5
+#define ICBID_MASTER_GFX3D 6
+#define ICBID_MASTER_JPEG 7
+#define ICBID_MASTER_MDP 8
+#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define ICBID_MASTER_VIDEO 9
+#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define ICBID_MASTER_VIDEO_P1 10
+#define ICBID_MASTER_VFE 11
+#define ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define ICBID_MASTER_CNOC_ONOC_CFG 12
+#define ICBID_MASTER_JPEG_OCMEM 13
+#define ICBID_MASTER_MDP_OCMEM 14
+#define ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define ICBID_MASTER_VFE_OCMEM 17
+#define ICBID_MASTER_LPASS_AHB 18
+#define ICBID_MASTER_QDSS_BAM 19
+#define ICBID_MASTER_SNOC_CFG 20
+#define ICBID_MASTER_BIMC_SNOC 21
+#define ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define ICBID_MASTER_CNOC_SNOC 22
+#define ICBID_MASTER_CRYPTO 23
+#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define ICBID_MASTER_CRYPTO_CORE1 24
+#define ICBID_MASTER_LPASS_PROC 25
+#define ICBID_MASTER_MSS 26
+#define ICBID_MASTER_MSS_NAV 27
+#define ICBID_MASTER_OCMEM_DMA 28
+#define ICBID_MASTER_PNOC_SNOC 29
+#define ICBID_MASTER_WCSS 30
+#define ICBID_MASTER_QDSS_ETR 31
+#define ICBID_MASTER_USB3 32
+#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define ICBID_MASTER_SDCC_1 33
+#define ICBID_MASTER_SDCC_3 34
+#define ICBID_MASTER_SDCC_2 35
+#define ICBID_MASTER_SDCC_4 36
+#define ICBID_MASTER_TSIF 37
+#define ICBID_MASTER_BAM_DMA 38
+#define ICBID_MASTER_BLSP_2 39
+#define ICBID_MASTER_USB_HSIC 40
+#define ICBID_MASTER_BLSP_1 41
+#define ICBID_MASTER_USB_HS 42
+#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define ICBID_MASTER_PNOC_CFG 43
+#define ICBID_MASTER_SNOC_PNOC 44
+#define ICBID_MASTER_RPM_INST 45
+#define ICBID_MASTER_RPM_DATA 46
+#define ICBID_MASTER_RPM_SYS 47
+#define ICBID_MASTER_DEHR 48
+#define ICBID_MASTER_QDSS_DAP 49
+#define ICBID_MASTER_SPDM 50
+#define ICBID_MASTER_TIC 51
+#define ICBID_MASTER_SNOC_CNOC 52
+#define ICBID_MASTER_GFX3D_OCMEM 53
+#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define ICBID_MASTER_OVIRT_SNOC 54
+#define ICBID_MASTER_SNOC_OVIRT 55
+#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define ICBID_MASTER_ONOC_OVIRT 56
+#define ICBID_MASTER_USB_HS2 57
+#define ICBID_MASTER_QPIC 58
+#define ICBID_MASTER_IPA 59
+#define ICBID_MASTER_DSI 60
+#define ICBID_MASTER_MDP1 61
+#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define ICBID_MASTER_VPU_PROC 62
+#define ICBID_MASTER_VPU 63
+#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define ICBID_MASTER_CRYPTO_CORE2 64
+#define ICBID_MASTER_PCIE_0 65
+#define ICBID_MASTER_PCIE_1 66
+#define ICBID_MASTER_SATA 67
+#define ICBID_MASTER_UFS 68
+#define ICBID_MASTER_USB3_1 69
+#define ICBID_MASTER_VIDEO_OCMEM 70
+#define ICBID_MASTER_VPU1 71
+#define ICBID_MASTER_VCAP 72
+#define ICBID_MASTER_EMAC 73
+#define ICBID_MASTER_BCAST 74
+#define ICBID_MASTER_MMSS_PROC 75
+#define ICBID_MASTER_SNOC_BIMC_1 76
+#define ICBID_MASTER_SNOC_PCNOC 77
+#define ICBID_MASTER_AUDIO 78
+#define ICBID_MASTER_MM_INT_0 79
+#define ICBID_MASTER_MM_INT_1 80
+#define ICBID_MASTER_MM_INT_2 81
+#define ICBID_MASTER_MM_INT_BIMC 82
+#define ICBID_MASTER_MSS_INT 83
+#define ICBID_MASTER_PCNOC_CFG 84
+#define ICBID_MASTER_PCNOC_INT_0 85
+#define ICBID_MASTER_PCNOC_INT_1 86
+#define ICBID_MASTER_PCNOC_M_0 87
+#define ICBID_MASTER_PCNOC_M_1 88
+#define ICBID_MASTER_PCNOC_S_0 89
+#define ICBID_MASTER_PCNOC_S_1 90
+#define ICBID_MASTER_PCNOC_S_2 91
+#define ICBID_MASTER_PCNOC_S_3 92
+#define ICBID_MASTER_PCNOC_S_4 93
+#define ICBID_MASTER_PCNOC_S_6 94
+#define ICBID_MASTER_PCNOC_S_7 95
+#define ICBID_MASTER_PCNOC_S_8 96
+#define ICBID_MASTER_PCNOC_S_9 97
+#define ICBID_MASTER_QDSS_INT 98
+#define ICBID_MASTER_SNOC_INT_0 99
+#define ICBID_MASTER_SNOC_INT_1 100
+#define ICBID_MASTER_SNOC_INT_BIMC 101
+#define ICBID_MASTER_TCU_0 102
+#define ICBID_MASTER_TCU_1 103
+#define ICBID_MASTER_BIMC_INT_0 104
+#define ICBID_MASTER_BIMC_INT_1 105
+#define ICBID_MASTER_CAMERA 106
+#define ICBID_MASTER_RICA 107
+#define ICBID_MASTER_SNOC_BIMC_2 108
+#define ICBID_MASTER_BIMC_SNOC_1 109
+#define ICBID_MASTER_A0NOC_SNOC 110
+#define ICBID_MASTER_A1NOC_SNOC 111
+#define ICBID_MASTER_A2NOC_SNOC 112
+#define ICBID_MASTER_PIMEM 113
+#define ICBID_MASTER_SNOC_VMEM 114
+#define ICBID_MASTER_CPP 115
+#define ICBID_MASTER_CNOC_A1NOC 116
+#define ICBID_MASTER_PNOC_A1NOC 117
+#define ICBID_MASTER_HMSS 118
+#define ICBID_MASTER_PCIE_2 119
+#define ICBID_MASTER_ROTATOR 120
+#define ICBID_MASTER_VENUS_VMEM 121
+#define ICBID_MASTER_DCC 122
+#define ICBID_MASTER_MCDMA 123
+#define ICBID_MASTER_PCNOC_INT_2 124
+#define ICBID_MASTER_PCNOC_INT_3 125
+#define ICBID_MASTER_PCNOC_INT_4 126
+#define ICBID_MASTER_PCNOC_INT_5 127
+#define ICBID_MASTER_PCNOC_INT_6 128
+#define ICBID_MASTER_PCNOC_S_5 129
+#define ICBID_MASTER_SENSORS_AHB 130
+#define ICBID_MASTER_SENSORS_PROC 131
+#define ICBID_MASTER_QSPI 132
+#define ICBID_MASTER_VFE1 133
+#define ICBID_MASTER_SNOC_INT_2 134
+#define ICBID_MASTER_SMMNOC_BIMC 135
+#define ICBID_MASTER_CRVIRT_A1NOC 136
+#define ICBID_MASTER_XM_USB_HS1 137
+#define ICBID_MASTER_XI_USB_HS1 138
+#define ICBID_MASTER_PCNOC_BIMC_1 139
+#define ICBID_MASTER_BIMC_PCNOC 140
+#define ICBID_MASTER_XI_HSIC 141
+#define ICBID_MASTER_SGMII 142
+#define ICBID_MASTER_SPMI_FETCHER 143
+#define ICBID_MASTER_GNOC_BIMC 144
+#define ICBID_MASTER_CRVIRT_A2NOC 145
+#define ICBID_MASTER_CNOC_A2NOC 146
+#define ICBID_MASTER_WLAN 147
+#define ICBID_MASTER_MSS_CE 148
+#define ICBID_MASTER_CDSP_PROC 149
+#define ICBID_MASTER_GNOC_SNOC 150
+
+#define ICBID_SLAVE_EBI1 0
+#define ICBID_SLAVE_APPSS_L2 1
+#define ICBID_SLAVE_BIMC_SNOC 2
+#define ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define ICBID_SLAVE_CAMERA_CFG 3
+#define ICBID_SLAVE_DISPLAY_CFG 4
+#define ICBID_SLAVE_OCMEM_CFG 5
+#define ICBID_SLAVE_CPR_CFG 6
+#define ICBID_SLAVE_CPR_XPU_CFG 7
+#define ICBID_SLAVE_MISC_CFG 8
+#define ICBID_SLAVE_MISC_XPU_CFG 9
+#define ICBID_SLAVE_VENUS_CFG 10
+#define ICBID_SLAVE_GFX3D_CFG 11
+#define ICBID_SLAVE_MMSS_CLK_CFG 12
+#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define ICBID_SLAVE_MNOC_MPU_CFG 14
+#define ICBID_SLAVE_ONOC_MPU_CFG 15
+#define ICBID_SLAVE_MNOC_BIMC 16
+#define ICBID_SLAVE_SERVICE_MNOC 17
+#define ICBID_SLAVE_OCMEM 18
+#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define ICBID_SLAVE_SERVICE_ONOC 19
+#define ICBID_SLAVE_APPSS 20
+#define ICBID_SLAVE_LPASS 21
+#define ICBID_SLAVE_USB3 22
+#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define ICBID_SLAVE_WCSS 23
+#define ICBID_SLAVE_SNOC_BIMC 24
+#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define ICBID_SLAVE_SNOC_CNOC 25
+#define ICBID_SLAVE_IMEM 26
+#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define ICBID_SLAVE_SNOC_OVIRT 27
+#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define ICBID_SLAVE_SNOC_PNOC 28
+#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define ICBID_SLAVE_SERVICE_SNOC 29
+#define ICBID_SLAVE_QDSS_STM 30
+#define ICBID_SLAVE_SDCC_1 31
+#define ICBID_SLAVE_SDCC_3 32
+#define ICBID_SLAVE_SDCC_2 33
+#define ICBID_SLAVE_SDCC_4 34
+#define ICBID_SLAVE_TSIF 35
+#define ICBID_SLAVE_BAM_DMA 36
+#define ICBID_SLAVE_BLSP_2 37
+#define ICBID_SLAVE_USB_HSIC 38
+#define ICBID_SLAVE_BLSP_1 39
+#define ICBID_SLAVE_USB_HS 40
+#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define ICBID_SLAVE_PDM 41
+#define ICBID_SLAVE_PERIPH_APU_CFG 42
+#define ICBID_SLAVE_PNOC_MPU_CFG 43
+#define ICBID_SLAVE_PRNG 44
+#define ICBID_SLAVE_PNOC_SNOC 45
+#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define ICBID_SLAVE_SERVICE_PNOC 46
+#define ICBID_SLAVE_CLK_CTL 47
+#define ICBID_SLAVE_CNOC_MSS 48
+#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define ICBID_SLAVE_SECURITY 49
+#define ICBID_SLAVE_TCSR 50
+#define ICBID_SLAVE_TLMM 51
+#define ICBID_SLAVE_CRYPTO_0_CFG 52
+#define ICBID_SLAVE_CRYPTO_1_CFG 53
+#define ICBID_SLAVE_IMEM_CFG 54
+#define ICBID_SLAVE_MESSAGE_RAM 55
+#define ICBID_SLAVE_BIMC_CFG 56
+#define ICBID_SLAVE_BOOT_ROM 57
+#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define ICBID_SLAVE_PMIC_ARB 59
+#define ICBID_SLAVE_SPDM_WRAPPER 60
+#define ICBID_SLAVE_DEHR_CFG 61
+#define ICBID_SLAVE_MPM 62
+#define ICBID_SLAVE_QDSS_CFG 63
+#define ICBID_SLAVE_RBCPR_CFG 64
+#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define ICBID_SLAVE_SNOC_MPU_CFG 67
+#define ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define ICBID_SLAVE_PNOC_CFG 69
+#define ICBID_SLAVE_SNOC_CFG 70
+#define ICBID_SLAVE_EBI1_DLL_CFG 71
+#define ICBID_SLAVE_PHY_APU_CFG 72
+#define ICBID_SLAVE_EBI1_PHY_CFG 73
+#define ICBID_SLAVE_RPM 74
+#define ICBID_SLAVE_CNOC_SNOC 75
+#define ICBID_SLAVE_SERVICE_CNOC 76
+#define ICBID_SLAVE_OVIRT_SNOC 77
+#define ICBID_SLAVE_OVIRT_OCMEM 78
+#define ICBID_SLAVE_USB_HS2 79
+#define ICBID_SLAVE_QPIC 80
+#define ICBID_SLAVE_IPS_CFG 81
+#define ICBID_SLAVE_DSI_CFG 82
+#define ICBID_SLAVE_USB3_1 83
+#define ICBID_SLAVE_PCIE_0 84
+#define ICBID_SLAVE_PCIE_1 85
+#define ICBID_SLAVE_PSS_SMMU_CFG 86
+#define ICBID_SLAVE_CRYPTO_2_CFG 87
+#define ICBID_SLAVE_PCIE_0_CFG 88
+#define ICBID_SLAVE_PCIE_1_CFG 89
+#define ICBID_SLAVE_SATA_CFG 90
+#define ICBID_SLAVE_SPSS_GENI_IR 91
+#define ICBID_SLAVE_UFS_CFG 92
+#define ICBID_SLAVE_AVSYNC_CFG 93
+#define ICBID_SLAVE_VPU_CFG 94
+#define ICBID_SLAVE_USB_PHY_CFG 95
+#define ICBID_SLAVE_RBCPR_MX_CFG 96
+#define ICBID_SLAVE_PCIE_PARF 97
+#define ICBID_SLAVE_VCAP_CFG 98
+#define ICBID_SLAVE_EMAC_CFG 99
+#define ICBID_SLAVE_BCAST_CFG 100
+#define ICBID_SLAVE_KLM_CFG 101
+#define ICBID_SLAVE_DISPLAY_PWM 102
+#define ICBID_SLAVE_GENI 103
+#define ICBID_SLAVE_SNOC_BIMC_1 104
+#define ICBID_SLAVE_AUDIO 105
+#define ICBID_SLAVE_CATS_0 106
+#define ICBID_SLAVE_CATS_1 107
+#define ICBID_SLAVE_MM_INT_0 108
+#define ICBID_SLAVE_MM_INT_1 109
+#define ICBID_SLAVE_MM_INT_2 110
+#define ICBID_SLAVE_MM_INT_BIMC 111
+#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define ICBID_SLAVE_MSS_INT 113
+#define ICBID_SLAVE_PCNOC_INT_0 114
+#define ICBID_SLAVE_PCNOC_INT_1 115
+#define ICBID_SLAVE_PCNOC_M_0 116
+#define ICBID_SLAVE_PCNOC_M_1 117
+#define ICBID_SLAVE_PCNOC_S_0 118
+#define ICBID_SLAVE_PCNOC_S_1 119
+#define ICBID_SLAVE_PCNOC_S_2 120
+#define ICBID_SLAVE_PCNOC_S_3 121
+#define ICBID_SLAVE_PCNOC_S_4 122
+#define ICBID_SLAVE_PCNOC_S_6 123
+#define ICBID_SLAVE_PCNOC_S_7 124
+#define ICBID_SLAVE_PCNOC_S_8 125
+#define ICBID_SLAVE_PCNOC_S_9 126
+#define ICBID_SLAVE_PRNG_XPU_CFG 127
+#define ICBID_SLAVE_QDSS_INT 128
+#define ICBID_SLAVE_RPM_XPU_CFG 129
+#define ICBID_SLAVE_SNOC_INT_0 130
+#define ICBID_SLAVE_SNOC_INT_1 131
+#define ICBID_SLAVE_SNOC_INT_BIMC 132
+#define ICBID_SLAVE_TCU 133
+#define ICBID_SLAVE_BIMC_INT_0 134
+#define ICBID_SLAVE_BIMC_INT_1 135
+#define ICBID_SLAVE_RICA_CFG 136
+#define ICBID_SLAVE_SNOC_BIMC_2 137
+#define ICBID_SLAVE_BIMC_SNOC_1 138
+#define ICBID_SLAVE_PNOC_A1NOC 139
+#define ICBID_SLAVE_SNOC_VMEM 140
+#define ICBID_SLAVE_A0NOC_SNOC 141
+#define ICBID_SLAVE_A1NOC_SNOC 142
+#define ICBID_SLAVE_A2NOC_SNOC 143
+#define ICBID_SLAVE_A0NOC_CFG 144
+#define ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define ICBID_SLAVE_A1NOC_CFG 147
+#define ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define ICBID_SLAVE_A2NOC_CFG 150
+#define ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define ICBID_SLAVE_AHB2PHY 153
+#define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define ICBID_SLAVE_DCC_CFG 155
+#define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define ICBID_SLAVE_DSA_CFG 157
+#define ICBID_SLAVE_DSA_MPU_CFG 158
+#define ICBID_SLAVE_SSC_MPU_CFG 159
+#define ICBID_SLAVE_HMSS_L3 160
+#define ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define ICBID_SLAVE_MMAGIC_CFG 162
+#define ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define ICBID_SLAVE_PCIE_2 164
+#define ICBID_SLAVE_PCIE_2_CFG 165
+#define ICBID_SLAVE_PIMEM 166
+#define ICBID_SLAVE_PIMEM_CFG 167
+#define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define ICBID_SLAVE_RBCPR_CX 169
+#define ICBID_SLAVE_RBCPR_MX 170
+#define ICBID_SLAVE_SMMU_CPP_CFG 171
+#define ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define ICBID_SLAVE_SMMU_MDP_CFG 173
+#define ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define ICBID_SLAVE_SMMU_VFE_CFG 176
+#define ICBID_SLAVE_SSC_CFG 177
+#define ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define ICBID_SLAVE_VMEM 179
+#define ICBID_SLAVE_VMEM_CFG 180
+#define ICBID_SLAVE_QDSS_MPU_CFG 181
+#define ICBID_SLAVE_USB3_PHY_CFG 182
+#define ICBID_SLAVE_IPA_CFG 183
+#define ICBID_SLAVE_PCNOC_INT_2 184
+#define ICBID_SLAVE_PCNOC_INT_3 185
+#define ICBID_SLAVE_PCNOC_INT_4 186
+#define ICBID_SLAVE_PCNOC_INT_5 187
+#define ICBID_SLAVE_PCNOC_INT_6 188
+#define ICBID_SLAVE_PCNOC_S_5 189
+#define ICBID_SLAVE_QSPI 190
+#define ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define ICBID_SLAVE_MSS_MPU_CFG 194
+#define ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define ICBID_SLAVE_SKL 196
+#define ICBID_SLAVE_SNOC_INT_2 197
+#define ICBID_SLAVE_SMMNOC_BIMC 198
+#define ICBID_SLAVE_CRVIRT_A1NOC 199
+#define ICBID_SLAVE_SGMII 200
+#define ICBID_SLAVE_QHS4_APPS 201
+#define ICBID_SLAVE_BIMC_PCNOC 202
+#define ICBID_SLAVE_PCNOC_BIMC_1 203
+#define ICBID_SLAVE_SPMI_FETCHER 204
+#define ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define ICBID_SLAVE_WLAN 206
+#define ICBID_SLAVE_CRVIRT_A2NOC 207
+#define ICBID_SLAVE_CNOC_A2NOC 208
+#define ICBID_SLAVE_GLM 209
+#define ICBID_SLAVE_GNOC_BIMC 210
+#define ICBID_SLAVE_GNOC_SNOC 211
+#define ICBID_SLAVE_QM_CFG 212
+#define ICBID_SLAVE_TLMM_EAST 213
+#define ICBID_SLAVE_TLMM_NORTH 214
+#define ICBID_SLAVE_TLMM_WEST 215
+#define ICBID_SLAVE_LPASS_TCM 216
+#define ICBID_SLAVE_TLMM_SOUTH 217
+#define ICBID_SLAVE_TLMM_CENTER 218
+#define ICBID_SLAVE_MSS_NAV_CE_MPU_CFG 219
+#define ICBID_SLAVE_A2NOC_THROTTLE_CFG 220
+#define ICBID_SLAVE_CDSP 221
+#define ICBID_SLAVE_CDSP_SMMU_CFG 222
+#define ICBID_SLAVE_LPASS_MPU_CFG 223
+#define ICBID_SLAVE_CSI_PHY_CFG 224
+#endif
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 6c901930eb3e..d16e8755f6a9 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -15,5 +15,6 @@
#define PHY_TYPE_PCIE 2
#define PHY_TYPE_USB2 3
#define PHY_TYPE_USB3 4
+#define PHY_TYPE_UFS 5
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/lib/libgcc.h b/include/lib/libgcc.h
new file mode 100644
index 000000000000..32e1e0f4b2d0
--- /dev/null
+++ b/include/lib/libgcc.h
@@ -0,0 +1,43 @@
+/*
+ * include/lib/libgcc.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#ifndef __LIB_LIBGCC_H
+#define __LIB_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+ int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+ int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+ struct DWstruct s;
+ long long ll;
+} DWunion;
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 502af53ec012..dc1ebfeeb5ec 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -864,21 +864,16 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
-int acpi_dev_runtime_suspend(struct device *dev);
-int acpi_dev_runtime_resume(struct device *dev);
+int acpi_dev_suspend(struct device *dev, bool wakeup);
+int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
-struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
#else
static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
-static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
-{
- return NULL;
-}
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return -ENODEV;
@@ -887,22 +882,30 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
int acpi_dev_suspend_late(struct device *dev);
-int acpi_dev_resume_early(struct device *dev);
int acpi_subsys_prepare(struct device *dev);
void acpi_subsys_complete(struct device *dev);
int acpi_subsys_suspend_late(struct device *dev);
+int acpi_subsys_suspend_noirq(struct device *dev);
+int acpi_subsys_resume_noirq(struct device *dev);
int acpi_subsys_resume_early(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
+int acpi_subsys_freeze_late(struct device *dev);
+int acpi_subsys_freeze_noirq(struct device *dev);
+int acpi_subsys_thaw_noirq(struct device *dev);
#else
-static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
+static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
+static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
+static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; }
+static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; }
+static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; }
#endif
#ifdef CONFIG_ACPI
@@ -1254,4 +1257,13 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
}
#endif
+#ifdef CONFIG_ACPI_LPIT
+int lpit_read_residency_count_address(u64 *address);
+#else
+static inline int lpit_read_residency_count_address(u64 *address)
+{
+ return -EINVAL;
+}
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8d3f0bf80379..2f7a29242b87 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -49,8 +49,8 @@ static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
u64 *size) { }
-static inline
-const struct iommu_ops *iort_iommu_configure(struct device *dev)
+static inline const struct iommu_ops *iort_iommu_configure(
+ struct device *dev)
{ return NULL; }
#endif
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index d4fcb0efb896..304511267c82 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -6,15 +6,30 @@
#define _LINUX_ARCH_TOPOLOGY_H_
#include <linux/types.h>
+#include <linux/percpu.h>
void topology_normalize_cpu_scale(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+DECLARE_PER_CPU(unsigned long, cpu_scale);
+
struct sched_domain;
-unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
+static inline
+unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, freq_scale);
+
+static inline
+unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(freq_scale, cpu);
+}
+
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index cd18203d6ff3..8b276fd9a127 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -654,6 +654,8 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
@@ -1073,6 +1075,8 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v
}
#endif
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+
#include <asm-generic/atomic-long.h>
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index cb708eb8accc..af410d9fbf2d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -149,12 +149,6 @@ extern void audit_log_key(struct audit_buffer *ab,
extern void audit_log_link_denied(const char *operation,
const struct path *link);
extern void audit_log_lost(const char *message);
-#ifdef CONFIG_SECURITY
-extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
-#else
-static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
-{ }
-#endif
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab,
@@ -203,8 +197,6 @@ static inline void audit_log_key(struct audit_buffer *ab, char *key)
static inline void audit_log_link_denied(const char *string,
const struct path *link)
{ }
-static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
-{ }
static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
@@ -356,6 +348,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
extern void __audit_log_kern_module(char *name);
+extern void __audit_fanotify(unsigned int response);
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -452,6 +445,12 @@ static inline void audit_log_kern_module(char *name)
__audit_log_kern_module(name);
}
+static inline void audit_fanotify(unsigned int response)
+{
+ if (!audit_dummy_context())
+ __audit_fanotify(response);
+}
+
extern int audit_n_rules;
extern int audit_signals;
#else /* CONFIG_AUDITSYSCALL */
@@ -568,6 +567,9 @@ static inline void audit_log_kern_module(char *name)
{
}
+static inline void audit_fanotify(unsigned int response)
+{ }
+
static inline void audit_ptrace(struct task_struct *t)
{ }
#define audit_n_rules 0
diff --git a/include/linux/average.h b/include/linux/average.h
index 1b6f5560c264..a1a8f09631ce 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -2,6 +2,10 @@
#ifndef _LINUX_AVERAGE_H
#define _LINUX_AVERAGE_H
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
/*
* Exponentially weighted moving average (EWMA)
*
@@ -49,7 +53,7 @@
static inline void ewma_##name##_add(struct ewma_##name *e, \
unsigned long val) \
{ \
- unsigned long internal = ACCESS_ONCE(e->internal); \
+ unsigned long internal = READ_ONCE(e->internal); \
unsigned long weight_rcp = ilog2(_weight_rcp); \
unsigned long precision = _precision; \
\
@@ -58,10 +62,10 @@
BUILD_BUG_ON((_precision) > 30); \
BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp); \
\
- ACCESS_ONCE(e->internal) = internal ? \
+ WRITE_ONCE(e->internal, internal ? \
(((internal << weight_rcp) - internal) + \
(val << precision)) >> weight_rcp : \
- (val << precision); \
+ (val << precision)); \
}
#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index fff4cfa0c21d..bfe86b54f6c1 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -25,6 +25,7 @@ enum wb_state {
WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
+ WB_start_all, /* nr_pages == 0 (all) work pending */
};
enum wb_congested_state {
@@ -45,6 +46,28 @@ enum wb_stat_item {
#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
/*
+ * why some writeback work was initiated
+ */
+enum wb_reason {
+ WB_REASON_BACKGROUND,
+ WB_REASON_VMSCAN,
+ WB_REASON_SYNC,
+ WB_REASON_PERIODIC,
+ WB_REASON_LAPTOP_TIMER,
+ WB_REASON_FREE_MORE_MEM,
+ WB_REASON_FS_FREE_SPACE,
+ /*
+ * There is no bdi forker thread any more and works are done
+ * by emergency worker, however, this is TPs userland visible
+ * and we'll be exposing exactly the same information,
+ * so it has a mismatch name.
+ */
+ WB_REASON_FORKER_THREAD,
+
+ WB_REASON_MAX,
+};
+
+/*
* For cgroup writeback, multiple wb's may map to the same blkcg. Those
* wb's can operate mostly independently but should share the congested
* state. To facilitate such sharing, the congested state is tracked using
@@ -116,6 +139,7 @@ struct bdi_writeback {
struct fprop_local_percpu completions;
int dirty_exceeded;
+ enum wb_reason start_all_reason;
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 16621579a3db..e54e7e0033eb 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -39,8 +39,6 @@ static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
}
-void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
- bool range_cyclic, enum wb_reason reason);
void wb_start_background_writeback(struct bdi_writeback *wb);
void wb_workfn(struct work_struct *work);
void wb_wakeup_delayed(struct bdi_writeback *wb);
@@ -95,7 +93,7 @@ extern void wb_writeout_inc(struct bdi_writeback *wb);
/*
* maximal error of a stat counter.
*/
-static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
+static inline unsigned long wb_stat_error(void)
{
#ifdef CONFIG_SMP
return nr_cpu_ids * WB_STAT_BATCH;
@@ -124,6 +122,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*
* BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
+ * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
+ * inefficient.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
@@ -131,6 +131,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#define BDI_CAP_STABLE_WRITES 0x00000008
#define BDI_CAP_STRICTLIMIT 0x00000010
#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
+#define BDI_CAP_SYNCHRONOUS_IO 0x00000040
#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
(BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
@@ -175,8 +176,11 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
long congestion_wait(int sync, long timeout);
long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout);
-int pdflush_proc_obsolete(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+
+static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
+{
+ return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
+}
static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
{
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 275c91c99516..d4eec19a6d3c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -129,18 +129,6 @@ static inline void *bio_data(struct bio *bio)
#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
/*
- * queues that have highmem support enabled may still need to revert to
- * PIO transfers occasionally and thus map high pages temporarily. For
- * permanent PIO fall back, user is probably better off disabling highmem
- * I/O completely on that queue (see ide-dma for example)
- */
-#define __bio_kmap_atomic(bio, iter) \
- (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
- bio_iter_iovec((bio), (iter)).bv_offset)
-
-#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
-
-/*
* merge helpers etc
*/
@@ -522,13 +510,11 @@ do { \
#ifdef CONFIG_BLK_CGROUP
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
-int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
static inline int bio_associate_blkcg(struct bio *bio,
struct cgroup_subsys_state *blkcg_css) { return 0; }
-static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
static inline void bio_clone_blkcg_association(struct bio *dst,
struct bio *src) { }
@@ -575,17 +561,6 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
}
#endif
-static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
- unsigned long *flags)
-{
- return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
-}
-#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
-
-#define bio_kmap_irq(bio, flags) \
- __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
-#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
-
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 19748a5b0e77..3489253e38fc 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -22,65 +22,74 @@
* See lib/bitmap.c for more details.
*/
-/*
+/**
+ * DOC: bitmap overview
+ *
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
*
- * bitmap_zero(dst, nbits) *dst = 0UL
- * bitmap_fill(dst, nbits) *dst = ~0UL
- * bitmap_copy(dst, src, nbits) *dst = *src
- * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
- * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
- * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
- * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
- * bitmap_complement(dst, src, nbits) *dst = ~(*src)
- * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
- * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
- * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
- * bitmap_empty(src, nbits) Are all bits zero in *src?
- * bitmap_full(src, nbits) Are all bits set in *src?
- * bitmap_weight(src, nbits) Hamming Weight: number set bits
- * bitmap_set(dst, pos, nbits) Set specified bit area
- * bitmap_clear(dst, pos, nbits) Clear specified bit area
- * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
- * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
- * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
- * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
- * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
- * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
- * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
- * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
- * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
- * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
- * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
- * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
- * bitmap_release_region(bitmap, pos, order) Free specified bit region
- * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
- * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
- * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ * ::
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
+ * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
+ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
+ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
+ * bitmap_release_region(bitmap, pos, order) Free specified bit region
+ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
+ * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
+ *
*/
-/*
- * Also the following operations in asm/bitops.h apply to bitmaps.
+/**
+ * DOC: bitmap bitops
+ *
+ * Also the following operations in asm/bitops.h apply to bitmaps.::
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*
- * set_bit(bit, addr) *addr |= bit
- * clear_bit(bit, addr) *addr &= ~bit
- * change_bit(bit, addr) *addr ^= bit
- * test_bit(bit, addr) Is bit set in *addr?
- * test_and_set_bit(bit, addr) Set bit and return old value
- * test_and_clear_bit(bit, addr) Clear bit and return old value
- * test_and_change_bit(bit, addr) Change bit and return old value
- * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
- * find_first_bit(addr, nbits) Position first set bit in *addr
- * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
- * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
*/
-/*
+/**
+ * DOC: declare bitmap
* The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
* to declare an array named 'name' of just enough unsigned longs to
* contain all bit positions from 0 to 'bits' - 1.
@@ -361,8 +370,9 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
-/*
+/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
+ * @n: u64 value
*
* Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit
* integers in 32-bit environment, and 64-bit integers in 64-bit one.
@@ -393,14 +403,14 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
((unsigned long) ((u64)(n) >> 32))
#endif
-/*
+/**
* bitmap_from_u64 - Check and swap words within u64.
* @mask: source bitmap
* @dst: destination bitmap
*
- * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
+ * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]``
* to read u64 mask, we will get the wrong word.
- * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
+ * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index d03c5dd6185d..4cac4e1a72ff 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -228,6 +228,30 @@ static inline unsigned long __ffs64(u64 word)
return __ffs((unsigned long)word);
}
+/**
+ * assign_bit - Assign value to a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ * @value: the value to assign
+ */
+static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
+ bool value)
+{
+ if (value)
+ set_bit(nr, addr);
+ else
+ clear_bit(nr, addr);
+}
+
+static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
+ bool value)
+{
+ if (value)
+ __set_bit(nr, addr);
+ else
+ __clear_bit(nr, addr);
+}
+
#ifdef __KERNEL__
#ifndef set_mask_bits
@@ -237,7 +261,7 @@ static inline unsigned long __ffs64(u64 word)
typeof(*ptr) old, new; \
\
do { \
- old = ACCESS_ONCE(*ptr); \
+ old = READ_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
@@ -252,7 +276,7 @@ static inline unsigned long __ffs64(u64 word)
typeof(*ptr) old, new; \
\
do { \
- old = ACCESS_ONCE(*ptr); \
+ old = READ_ONCE(*ptr); \
new = old & ~clear; \
} while (!(old & test) && \
cmpxchg(ptr, old, new) != old); \
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 8bbc3716507a..e9825ff57b15 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -20,6 +20,7 @@
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
#include <linux/atomic.h>
+#include <linux/kthread.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
@@ -224,22 +225,16 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
return css ? container_of(css, struct blkcg, css) : NULL;
}
-static inline struct blkcg *task_blkcg(struct task_struct *tsk)
-{
- return css_to_blkcg(task_css(tsk, io_cgrp_id));
-}
-
static inline struct blkcg *bio_blkcg(struct bio *bio)
{
+ struct cgroup_subsys_state *css;
+
if (bio && bio->bi_css)
return css_to_blkcg(bio->bi_css);
- return task_blkcg(current);
-}
-
-static inline struct cgroup_subsys_state *
-task_get_blkcg_css(struct task_struct *task)
-{
- return task_get_css(task, io_cgrp_id);
+ css = kthread_blkcg();
+ if (css)
+ return css_to_blkcg(css);
+ return css_to_blkcg(task_css(current, io_cgrp_id));
}
/**
@@ -736,12 +731,6 @@ struct blkcg_policy {
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
-static inline struct cgroup_subsys_state *
-task_get_blkcg_css(struct task_struct *task)
-{
- return NULL;
-}
-
#ifdef CONFIG_BLOCK
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 994cbb0f7ffc..95c9a5c862e2 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -31,10 +31,12 @@ struct blk_mq_hw_ctx {
struct sbitmap ctx_map;
+ struct blk_mq_ctx *dispatch_from;
+
struct blk_mq_ctx **ctxs;
unsigned int nr_ctx;
- wait_queue_entry_t dispatch_wait;
+ wait_queue_entry_t dispatch_wait;
atomic_t wait_index;
struct blk_mq_tags *tags;
@@ -91,6 +93,8 @@ struct blk_mq_queue_data {
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
const struct blk_mq_queue_data *);
+typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
+typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -113,6 +117,15 @@ struct blk_mq_ops {
queue_rq_fn *queue_rq;
/*
+ * Reserve budget before queue request, once .queue_rq is
+ * run, it is driver's responsibility to release the
+ * reserved budget. Also we have to handle failure case
+ * of .get_budget for avoiding I/O deadlock.
+ */
+ get_budget_fn *get_budget;
+ put_budget_fn *put_budget;
+
+ /*
* Called on request timeout
*/
timeout_fn *timeout;
@@ -169,8 +182,7 @@ enum {
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
- BLK_MQ_S_TAG_WAITING = 3,
- BLK_MQ_S_START_ON_RUN = 4,
+ BLK_MQ_S_START_ON_RUN = 3,
BLK_MQ_MAX_DEPTH = 10240,
@@ -198,15 +210,21 @@ void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
enum {
- BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
- BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
- BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
+ /* return when out of requests */
+ BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
+ /* allocate from reserved pool */
+ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
+ /* allocate internal/sched tag */
+ BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
+ /* set RQF_PREEMPT */
+ BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
};
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
- unsigned int flags);
+ blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
- unsigned int op, unsigned int flags, unsigned int hctx_idx);
+ unsigned int op, blk_mq_req_flags_t flags,
+ unsigned int hctx_idx);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
enum {
@@ -249,7 +267,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
@@ -260,8 +278,8 @@ void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
- int (reinit_request)(void *, struct request *));
+int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
+ int (reinit_request)(void *, struct request *));
int blk_mq_map_queues(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 96ac3815542c..a1e628e032da 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -163,6 +163,8 @@ struct bio {
*/
#define BIO_RESET_BITS BVEC_POOL_OFFSET
+typedef __u32 __bitwise blk_mq_req_flags_t;
+
/*
* Operations and flags common to the bio and request structures.
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
@@ -225,11 +227,14 @@ enum req_flag_bits {
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
+ __REQ_NOWAIT, /* Don't wait if request will block */
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_NOWAIT, /* Don't wait if request will block */
+ /* for driver use */
+ __REQ_DRV,
+
__REQ_NR_BITS, /* stops here */
};
@@ -246,9 +251,11 @@ enum req_flag_bits {
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
+#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
+
+#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -330,11 +337,10 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
}
struct blk_rq_stat {
- s64 mean;
+ u64 mean;
u64 min;
u64 max;
- s32 nr_samples;
- s32 nr_batch;
+ u32 nr_samples;
u64 batch;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8da66379f7ea..8089ca17db9a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -267,6 +267,7 @@ struct blk_queue_ctx;
typedef void (request_fn_proc) (struct request_queue *q);
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
@@ -409,6 +410,7 @@ struct request_queue {
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
+ poll_q_fn *poll_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
softirq_done_fn *softirq_done_fn;
@@ -610,7 +612,6 @@ struct request_queue {
#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 8 /* supports request stacking */
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
@@ -632,14 +633,13 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
+#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_POLL))
@@ -723,8 +723,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_stackable(q) \
- test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secure_erase(q) \
(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
@@ -736,6 +734,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
+#define blk_queue_preempt_only(q) \
+ test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+
+extern int blk_set_preempt_only(struct request_queue *q);
+extern void blk_clear_preempt_only(struct request_queue *q);
static inline bool blk_account_rq(struct request *rq)
{
@@ -923,24 +926,17 @@ static inline void rq_flush_dcache_pages(struct request *rq)
}
#endif
-#ifdef CONFIG_PRINTK
-#define vfs_msg(sb, level, fmt, ...) \
- __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
-#else
-#define vfs_msg(sb, level, fmt, ...) \
-do { \
- no_printk(fmt, ##__VA_ARGS__); \
- __vfs_msg(sb, "", " "); \
-} while (0)
-#endif
-
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
+extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
+extern struct request *blk_get_request_flags(struct request_queue *,
+ unsigned int op,
+ blk_mq_req_flags_t flags);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
gfp_t gfp_mask);
extern void blk_requeue_request(struct request_queue *, struct request *);
@@ -964,7 +960,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
-extern int blk_queue_enter(struct request_queue *q, bool nowait);
+extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
extern void blk_start_queue_async(struct request_queue *q);
@@ -991,7 +987,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
-bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+bool blk_poll(struct request_queue *q, blk_qc_t cookie);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -1110,6 +1106,8 @@ extern struct request *blk_peek_request(struct request_queue *q);
extern void blk_start_request(struct request *rq);
extern struct request *blk_fetch_request(struct request_queue *q);
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
/*
* Request completion related functions.
*
@@ -1372,7 +1370,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
+extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index fdf40ca04b3c..a53063e9d7d8 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -161,6 +161,9 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
+void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr,
+ phys_addr_t max_addr, int nid);
void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
phys_addr_t align, phys_addr_t min_addr,
phys_addr_t max_addr, int nid);
@@ -177,6 +180,14 @@ static inline void * __init memblock_virt_alloc(
NUMA_NO_NODE);
}
+static inline void * __init memblock_virt_alloc_raw(
+ phys_addr_t size, phys_addr_t align)
+{
+ return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT,
+ BOOTMEM_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
static inline void * __init memblock_virt_alloc_nopanic(
phys_addr_t size, phys_addr_t align)
{
@@ -258,6 +269,14 @@ static inline void * __init memblock_virt_alloc(
return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
}
+static inline void * __init memblock_virt_alloc_raw(
+ phys_addr_t size, phys_addr_t align)
+{
+ if (!align)
+ align = SMP_CACHE_BYTES;
+ return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
+}
+
static inline void * __init memblock_virt_alloc_nopanic(
phys_addr_t size, phys_addr_t align)
{
@@ -310,6 +329,14 @@ static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
min_addr);
}
+static inline void * __init memblock_virt_alloc_try_nid_raw(
+ phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr, int nid)
+{
+ return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
+ min_addr, max_addr);
+}
+
static inline void * __init memblock_virt_alloc_try_nid_nopanic(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index afa37f807f12..8b1bf8d3d4a2 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -157,7 +157,7 @@ void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
int try_to_free_buffers(struct page *);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- int retry);
+ bool retry);
void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 4efabcb51347..f2736348ca26 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -9,8 +9,6 @@
* the Free Software Foundation
*/
-#include <linux/kmemcheck.h>
-
#define C2PORT_NAME_LEN 32
struct device;
@@ -22,10 +20,8 @@ struct device;
/* Main struct */
struct c2port_ops;
struct c2port_device {
- kmemcheck_bitfield_begin(flags);
unsigned int access:1;
unsigned int flash_access:1;
- kmemcheck_bitfield_end(flags);
int id;
char name[C2PORT_NAME_LEN];
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 1dff0a478b45..8b7fd8eeccee 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -17,6 +17,7 @@
#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
+#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
#include <linux/bpf-cgroup.h>
@@ -255,6 +256,57 @@ struct css_set {
struct rcu_head rcu_head;
};
+/*
+ * cgroup basic resource usage statistics. Accounting is done per-cpu in
+ * cgroup_cpu_stat which is then lazily propagated up the hierarchy on
+ * reads.
+ *
+ * When a stat gets updated, the cgroup_cpu_stat and its ancestors are
+ * linked into the updated tree. On the following read, propagation only
+ * considers and consumes the updated tree. This makes reading O(the
+ * number of descendants which have been active since last read) instead of
+ * O(the total number of descendants).
+ *
+ * This is important because there can be a lot of (draining) cgroups which
+ * aren't active and stat may be read frequently. The combination can
+ * become very expensive. By propagating selectively, increasing reading
+ * frequency decreases the cost of each read.
+ */
+struct cgroup_cpu_stat {
+ /*
+ * ->sync protects all the current counters. These are the only
+ * fields which get updated in the hot path.
+ */
+ struct u64_stats_sync sync;
+ struct task_cputime cputime;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the global counters.
+ */
+ struct task_cputime last_cputime;
+
+ /*
+ * Child cgroups with stat updates on this cpu since the last read
+ * are linked on the parent's ->updated_children through
+ * ->updated_next.
+ *
+ * In addition to being more compact, singly-linked list pointing
+ * to the cgroup makes it unnecessary for each per-cpu struct to
+ * point back to the associated cgroup.
+ *
+ * Protected by per-cpu cgroup_cpu_stat_lock.
+ */
+ struct cgroup *updated_children; /* terminated by self cgroup */
+ struct cgroup *updated_next; /* NULL iff not on the list */
+};
+
+struct cgroup_stat {
+ /* per-cpu statistics are collected into the folowing global counters */
+ struct task_cputime cputime;
+ struct prev_cputime prev_cputime;
+};
+
struct cgroup {
/* self css with NULL ->ss, points back to this cgroup */
struct cgroup_subsys_state self;
@@ -354,6 +406,11 @@ struct cgroup {
*/
struct cgroup *dom_cgrp;
+ /* cgroup basic resource statistics */
+ struct cgroup_cpu_stat __percpu *cpu_stat;
+ struct cgroup_stat pending_stat; /* pending from children */
+ struct cgroup_stat stat;
+
/*
* list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand.
@@ -513,6 +570,8 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ int (*css_extra_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index dddbc29e2009..473e0c0abb86 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -23,6 +23,7 @@
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
+#include <linux/kernel_stat.h>
#include <linux/cgroup-defs.h>
@@ -690,6 +691,63 @@ static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
#endif /* !CONFIG_CGROUPS */
/*
+ * Basic resource stats.
+ */
+#ifdef CONFIG_CGROUPS
+
+#ifdef CONFIG_CGROUP_CPUACCT
+void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
+#else
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+static inline void cpuacct_account_field(struct task_struct *tsk, int index,
+ u64 val) {}
+#endif
+
+void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
+void __cgroup_account_cputime_field(struct cgroup *cgrp,
+ enum cpu_usage_stat index, u64 delta_exec);
+
+static inline void cgroup_account_cputime(struct task_struct *task,
+ u64 delta_exec)
+{
+ struct cgroup *cgrp;
+
+ cpuacct_charge(task, delta_exec);
+
+ rcu_read_lock();
+ cgrp = task_dfl_cgroup(task);
+ if (cgroup_parent(cgrp))
+ __cgroup_account_cputime(cgrp, delta_exec);
+ rcu_read_unlock();
+}
+
+static inline void cgroup_account_cputime_field(struct task_struct *task,
+ enum cpu_usage_stat index,
+ u64 delta_exec)
+{
+ struct cgroup *cgrp;
+
+ cpuacct_account_field(task, index, delta_exec);
+
+ rcu_read_lock();
+ cgrp = task_dfl_cgroup(task);
+ if (cgroup_parent(cgrp))
+ __cgroup_account_cputime_field(cgrp, index, delta_exec);
+ rcu_read_unlock();
+}
+
+#else /* CONFIG_CGROUPS */
+
+static inline void cgroup_account_cputime(struct task_struct *task,
+ u64 delta_exec) {}
+static inline void cgroup_account_cputime_field(struct task_struct *task,
+ enum cpu_usage_stat index,
+ u64 delta_exec) {}
+
+#endif /* CONFIG_CGROUPS */
+
+/*
* sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
* definition in cgroup-defs.h.
*/
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 54dfef70a072..a06583e41f80 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_H
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index bb78e5bdff26..2272ded07496 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_H
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 523d1b74550f..bfa08160db3a 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_H
+#ifndef __LINUX_COMPILER_TYPES_H
#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 202710420d6d..3672353a0acd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -2,111 +2,12 @@
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H
-#ifndef __ASSEMBLY__
+#include <linux/compiler_types.h>
-#ifdef __CHECKER__
-# define __user __attribute__((noderef, address_space(1)))
-# define __kernel __attribute__((address_space(0)))
-# define __safe __attribute__((safe))
-# define __force __attribute__((force))
-# define __nocast __attribute__((nocast))
-# define __iomem __attribute__((noderef, address_space(2)))
-# define __must_hold(x) __attribute__((context(x,1,1)))
-# define __acquires(x) __attribute__((context(x,0,1)))
-# define __releases(x) __attribute__((context(x,1,0)))
-# define __acquire(x) __context__(x,1)
-# define __release(x) __context__(x,-1)
-# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu __attribute__((noderef, address_space(3)))
-# define __rcu __attribute__((noderef, address_space(4)))
-# define __private __attribute__((noderef))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
-# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
-#else /* __CHECKER__ */
-# ifdef STRUCTLEAK_PLUGIN
-# define __user __attribute__((user))
-# else
-# define __user
-# endif
-# define __kernel
-# define __safe
-# define __force
-# define __nocast
-# define __iomem
-# define __chk_user_ptr(x) (void)0
-# define __chk_io_ptr(x) (void)0
-# define __builtin_warning(x, y...) (1)
-# define __must_hold(x)
-# define __acquires(x)
-# define __releases(x)
-# define __acquire(x) (void)0
-# define __release(x) (void)0
-# define __cond_lock(x,c) (c)
-# define __percpu
-# define __rcu
-# define __private
-# define ACCESS_PRIVATE(p, member) ((p)->member)
-#endif /* __CHECKER__ */
-
-/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
-#define ___PASTE(a,b) a##b
-#define __PASTE(a,b) ___PASTE(a,b)
+#ifndef __ASSEMBLY__
#ifdef __KERNEL__
-#ifdef __GNUC__
-#include <linux/compiler-gcc.h>
-#endif
-
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
-#define notrace __attribute__((hotpatch(0,0)))
-#else
-#define notrace __attribute__((no_instrument_function))
-#endif
-
-/* Intel compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __INTEL_COMPILER
-# include <linux/compiler-intel.h>
-#endif
-
-/* Clang compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __clang__
-#include <linux/compiler-clang.h>
-#endif
-
-/*
- * Generic compiler-dependent macros required for kernel
- * build go below this comment. Actual compiler/compiler version
- * specific implementations come from the above header files
- */
-
-struct ftrace_branch_data {
- const char *func;
- const char *file;
- unsigned line;
- union {
- struct {
- unsigned long correct;
- unsigned long incorrect;
- };
- struct {
- unsigned long miss;
- unsigned long hit;
- };
- unsigned long miss_hit[2];
- };
-};
-
-struct ftrace_likely_data {
- struct ftrace_branch_data data;
- unsigned long constant;
-};
-
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
@@ -333,6 +234,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*/
+#include <asm/barrier.h>
#define __READ_ONCE(x, check) \
({ \
@@ -341,6 +243,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
__read_once_size(&(x), __u.__c, sizeof(x)); \
else \
__read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
+ smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
__u.__val; \
})
#define READ_ONCE(x) __READ_ONCE(x, 1)
@@ -363,167 +266,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#endif /* __ASSEMBLY__ */
-#ifdef __KERNEL__
-/*
- * Allow us to mark functions as 'deprecated' and have gcc emit a nice
- * warning for each use, in hopes of speeding the functions removal.
- * Usage is:
- * int __deprecated foo(void)
- */
-#ifndef __deprecated
-# define __deprecated /* unimplemented */
-#endif
-
-#ifdef MODULE
-#define __deprecated_for_modules __deprecated
-#else
-#define __deprecated_for_modules
-#endif
-
-#ifndef __must_check
-#define __must_check
-#endif
-
-#ifndef CONFIG_ENABLE_MUST_CHECK
-#undef __must_check
-#define __must_check
-#endif
-#ifndef CONFIG_ENABLE_WARN_DEPRECATED
-#undef __deprecated
-#undef __deprecated_for_modules
-#define __deprecated
-#define __deprecated_for_modules
-#endif
-
-#ifndef __malloc
-#define __malloc
-#endif
-
-/*
- * Allow us to avoid 'defined but not used' warnings on functions and data,
- * as well as force them to be emitted to the assembly file.
- *
- * As of gcc 3.4, static functions that are not marked with attribute((used))
- * may be elided from the assembly file. As of gcc 3.4, static data not so
- * marked will not be elided, but this may change in a future gcc version.
- *
- * NOTE: Because distributions shipped with a backported unit-at-a-time
- * compiler in gcc 3.3, we must define __used to be __attribute__((used))
- * for gcc >=3.3 instead of 3.4.
- *
- * In prior versions of gcc, such functions and data would be emitted, but
- * would be warned about except with attribute((unused)).
- *
- * Mark functions that are referenced only in inline assembly as __used so
- * the code is emitted even though it appears to be unreferenced.
- */
-#ifndef __used
-# define __used /* unimplemented */
-#endif
-
-#ifndef __maybe_unused
-# define __maybe_unused /* unimplemented */
-#endif
-
-#ifndef __always_unused
-# define __always_unused /* unimplemented */
-#endif
-
-#ifndef noinline
-#define noinline
-#endif
-
-/*
- * Rather then using noinline to prevent stack consumption, use
- * noinline_for_stack instead. For documentation reasons.
- */
-#define noinline_for_stack noinline
-
-#ifndef __always_inline
-#define __always_inline inline
-#endif
-
-#endif /* __KERNEL__ */
-
-/*
- * From the GCC manual:
- *
- * Many functions do not examine any values except their arguments,
- * and have no effects except the return value. Basically this is
- * just slightly more strict class than the `pure' attribute above,
- * since function is not allowed to read global memory.
- *
- * Note that a function that has pointer arguments and examines the
- * data pointed to must _not_ be declared `const'. Likewise, a
- * function that calls a non-`const' function usually must not be
- * `const'. It does not make sense for a `const' function to return
- * `void'.
- */
-#ifndef __attribute_const__
-# define __attribute_const__ /* unimplemented */
-#endif
-
-#ifndef __designated_init
-# define __designated_init
-#endif
-
-#ifndef __latent_entropy
-# define __latent_entropy
-#endif
-
-#ifndef __randomize_layout
-# define __randomize_layout __designated_init
-#endif
-
-#ifndef __no_randomize_layout
-# define __no_randomize_layout
-#endif
-
-#ifndef randomized_struct_fields_start
-# define randomized_struct_fields_start
-# define randomized_struct_fields_end
-#endif
-
-/*
- * Tell gcc if a function is cold. The compiler will assume any path
- * directly leading to the call is unlikely.
- */
-
-#ifndef __cold
-#define __cold
-#endif
-
-/* Simple shorthand for a section definition */
-#ifndef __section
-# define __section(S) __attribute__ ((__section__(#S)))
-#endif
-
-#ifndef __visible
-#define __visible
-#endif
-
-#ifndef __nostackprotector
-# define __nostackprotector
-#endif
-
-/*
- * Assume alignment of return value.
- */
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
-#endif
-
-
-/* Are two types/vars the same type (ignoring qualifiers)? */
-#ifndef __same_type
-# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-#endif
-
-/* Is this type a native word size -- useful for atomic operations */
-#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-#endif
-
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
@@ -605,24 +347,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
-/**
- * lockless_dereference() - safely load a pointer for later dereference
- * @p: The pointer to load
- *
- * Similar to rcu_dereference(), but for situations where the pointed-to
- * object's lifetime is managed by something other than RCU. That
- * "something other" might be reference counting or simple immortality.
- *
- * The seemingly unused variable ___typecheck_p validates that @p is
- * indeed a pointer type by using a pointer to typeof(*p) as the type.
- * Taking a pointer to typeof(*p) again is needed in case p is void *.
- */
-#define lockless_dereference(p) \
-({ \
- typeof(p) _________p1 = READ_ONCE(p); \
- typeof(*(p)) *___typecheck_p __maybe_unused; \
- smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
- (_________p1); \
-})
-
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
new file mode 100644
index 000000000000..6b79a9bba9a7
--- /dev/null
+++ b/include/linux/compiler_types.h
@@ -0,0 +1,274 @@
+#ifndef __LINUX_COMPILER_TYPES_H
+#define __LINUX_COMPILER_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+# define __kernel __attribute__((address_space(0)))
+# define __safe __attribute__((safe))
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __iomem __attribute__((noderef, address_space(2)))
+# define __must_hold(x) __attribute__((context(x,1,1)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
+# define __rcu __attribute__((noderef, address_space(4)))
+# define __private __attribute__((noderef))
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
+# ifdef STRUCTLEAK_PLUGIN
+# define __user __attribute__((user))
+# else
+# define __user
+# endif
+# define __kernel
+# define __safe
+# define __force
+# define __nocast
+# define __iomem
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+# define __builtin_warning(x, y...) (1)
+# define __must_hold(x)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+# define __percpu
+# define __rcu
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+#endif /* __CHECKER__ */
+
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
+#ifdef __KERNEL__
+
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
+#define notrace __attribute__((hotpatch(0,0)))
+#else
+#define notrace __attribute__((no_instrument_function))
+#endif
+
+/* Intel compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __INTEL_COMPILER
+# include <linux/compiler-intel.h>
+#endif
+
+/* Clang compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __clang__
+#include <linux/compiler-clang.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros required for kernel
+ * build go below this comment. Actual compiler/compiler version
+ * specific implementations come from the above header files
+ */
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+struct ftrace_likely_data {
+ struct ftrace_branch_data data;
+ unsigned long constant;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
+ * warning for each use, in hopes of speeding the functions removal.
+ * Usage is:
+ * int __deprecated foo(void)
+ */
+#ifndef __deprecated
+# define __deprecated /* unimplemented */
+#endif
+
+#ifdef MODULE
+#define __deprecated_for_modules __deprecated
+#else
+#define __deprecated_for_modules
+#endif
+
+#ifndef __must_check
+#define __must_check
+#endif
+
+#ifndef CONFIG_ENABLE_MUST_CHECK
+#undef __must_check
+#define __must_check
+#endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
+
+#ifndef __malloc
+#define __malloc
+#endif
+
+/*
+ * Allow us to avoid 'defined but not used' warnings on functions and data,
+ * as well as force them to be emitted to the assembly file.
+ *
+ * As of gcc 3.4, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.4, static data not so
+ * marked will not be elided, but this may change in a future gcc version.
+ *
+ * NOTE: Because distributions shipped with a backported unit-at-a-time
+ * compiler in gcc 3.3, we must define __used to be __attribute__((used))
+ * for gcc >=3.3 instead of 3.4.
+ *
+ * In prior versions of gcc, such functions and data would be emitted, but
+ * would be warned about except with attribute((unused)).
+ *
+ * Mark functions that are referenced only in inline assembly as __used so
+ * the code is emitted even though it appears to be unreferenced.
+ */
+#ifndef __used
+# define __used /* unimplemented */
+#endif
+
+#ifndef __maybe_unused
+# define __maybe_unused /* unimplemented */
+#endif
+
+#ifndef __always_unused
+# define __always_unused /* unimplemented */
+#endif
+
+#ifndef noinline
+#define noinline
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentation reasons.
+ */
+#define noinline_for_stack noinline
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions do not examine any values except their arguments,
+ * and have no effects except the return value. Basically this is
+ * just slightly more strict class than the `pure' attribute above,
+ * since function is not allowed to read global memory.
+ *
+ * Note that a function that has pointer arguments and examines the
+ * data pointed to must _not_ be declared `const'. Likewise, a
+ * function that calls a non-`const' function usually must not be
+ * `const'. It does not make sense for a `const' function to return
+ * `void'.
+ */
+#ifndef __attribute_const__
+# define __attribute_const__ /* unimplemented */
+#endif
+
+#ifndef __designated_init
+# define __designated_init
+#endif
+
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
+#ifndef __randomize_layout
+# define __randomize_layout __designated_init
+#endif
+
+#ifndef __no_randomize_layout
+# define __no_randomize_layout
+#endif
+
+#ifndef randomized_struct_fields_start
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
+#endif
+
+/*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+ */
+
+#ifndef __cold
+#define __cold
+#endif
+
+/* Simple shorthand for a section definition */
+#ifndef __section
+# define __section(S) __attribute__ ((__section__(#S)))
+#endif
+
+#ifndef __visible
+#define __visible
+#endif
+
+#ifndef __nostackprotector
+# define __nostackprotector
+#endif
+
+/*
+ * Assume alignment of return value.
+ */
+#ifndef __assume_aligned
+#define __assume_aligned(a, ...)
+#endif
+
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
+#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 7828451e161a..0662a417febe 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -50,15 +50,23 @@ static inline void complete_release_commit(struct completion *x)
lock_commit_crosslock((struct lockdep_map *)&x->map);
}
+#define init_completion_map(x, m) \
+do { \
+ lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
+ (m)->name, (m)->key, 0); \
+ __init_completion(x); \
+} while (0)
+
#define init_completion(x) \
do { \
static struct lock_class_key __key; \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
- "(complete)" #x, \
+ "(completion)" #x, \
&__key, 0); \
__init_completion(x); \
} while (0)
#else
+#define init_completion_map(x, m) __init_completion(x)
#define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
@@ -68,12 +76,15 @@ static inline void complete_release_commit(struct completion *x) {}
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
- STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) }
+ STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
#else
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#endif
+#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
+ (*({ init_completion_map(&(work), &(map)); &(work); }))
+
#define COMPLETION_INITIALIZER_ONSTACK(work) \
(*({ init_completion(&work); &work; }))
@@ -103,8 +114,11 @@ static inline void complete_release_commit(struct completion *x) {}
#ifdef CONFIG_LOCKDEP
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
+ struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
#else
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work)
#endif
/**
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c96709049683..90b90f8baf99 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -58,7 +58,7 @@ struct config_item {
struct list_head ci_entry;
struct config_item *ci_parent;
struct config_group *ci_group;
- struct config_item_type *ci_type;
+ const struct config_item_type *ci_type;
struct dentry *ci_dentry;
};
@@ -72,7 +72,7 @@ static inline char *config_item_name(struct config_item * item)
extern void config_item_init_type_name(struct config_item *item,
const char *name,
- struct config_item_type *type);
+ const struct config_item_type *type);
extern struct config_item *config_item_get(struct config_item *);
extern struct config_item *config_item_get_unless_zero(struct config_item *);
@@ -101,7 +101,7 @@ struct config_group {
extern void config_group_init(struct config_group *group);
extern void config_group_init_type_name(struct config_group *group,
const char *name,
- struct config_item_type *type);
+ const struct config_item_type *type);
static inline struct config_group *to_config_group(struct config_item *item)
{
@@ -261,7 +261,7 @@ void configfs_remove_default_groups(struct config_group *group);
struct config_group *
configfs_register_default_group(struct config_group *parent_group,
const char *name,
- struct config_item_type *item_type);
+ const struct config_item_type *item_type);
void configfs_unregister_default_group(struct config_group *group);
/* These functions can sleep and can alloc with GFP_KERNEL */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 938ea8ae0ba4..a04ef7c15c6a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -56,27 +56,17 @@ extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
-struct notifier_block;
-
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
- * lock is dropped */
-#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
- * perhaps due to preemption. */
-
-/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
- * operation in progress
+
+/*
+ * These states are not related to the core CPU hotplug mechanism. They are
+ * used by various (sub)architectures to track internal state
*/
-#define CPU_TASKS_FROZEN 0x0010
-
-#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
-#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
-#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
-#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
+#define CPU_ONLINE 0x0002 /* CPU is up */
+#define CPU_UP_PREPARE 0x0003 /* CPU coming up */
+#define CPU_DEAD 0x0007 /* CPU dead */
+#define CPU_DEAD_FROZEN 0x0008 /* CPU timed out on unplug */
+#define CPU_POST_DEAD 0x0009 /* CPU successfully unplugged */
+#define CPU_BROKEN 0x000B /* CPU did not die properly */
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 537ff842ff73..28734ee185a7 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -919,6 +919,9 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
extern unsigned int arch_freq_get_on_cpu(int cpu);
+extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq);
+
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 2477a5cb5bd5..201ab7267986 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -99,6 +99,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_HIP04_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
+ CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
@@ -154,6 +155,9 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 8d3125c493b2..75b565194437 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -131,6 +131,11 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
return 0;
}
+static inline unsigned int cpumask_last(const struct cpumask *srcp)
+{
+ return 0;
+}
+
/* Valid inputs for n are -1 and 0. */
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
@@ -179,6 +184,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
+/**
+ * cpumask_last - get the last CPU in a cpumask
+ * @srcp: - the cpumask pointer
+ *
+ * Returns >= nr_cpumask_bits if no CPUs set.
+ */
+static inline unsigned int cpumask_last(const struct cpumask *srcp)
+{
+ return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
unsigned int cpumask_next(int n, const struct cpumask *srcp);
/**
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 84da9978e951..78508ca4b108 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <linux/completion.h>
/*
* Autoloaded crypto modules should only use a prefixed name to avoid allowing
@@ -468,6 +469,45 @@ struct crypto_alg {
} CRYPTO_MINALIGN_ATTR;
/*
+ * A helper struct for waiting for completion of async crypto ops
+ */
+struct crypto_wait {
+ struct completion completion;
+ int err;
+};
+
+/*
+ * Macro for declaring a crypto op async wait object on stack
+ */
+#define DECLARE_CRYPTO_WAIT(_wait) \
+ struct crypto_wait _wait = { \
+ COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
+
+/*
+ * Async ops completion helper functioons
+ */
+void crypto_req_done(struct crypto_async_request *req, int err);
+
+static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+{
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ wait_for_completion(&wait->completion);
+ reinit_completion(&wait->completion);
+ err = wait->err;
+ break;
+ };
+
+ return err;
+}
+
+static inline void crypto_init_wait(struct crypto_wait *wait)
+{
+ init_completion(&wait->completion);
+}
+
+/*
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 83925bd12c0b..05ee0f19448a 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -157,6 +157,9 @@ struct cyclades_port {
struct cyclades_icount icount;
struct completion shutdown_wait;
int throttle;
+#ifdef CONFIG_CYZ_INTR
+ struct timer_list rx_full_timer;
+#endif
};
#define CLOSING_WAIT_DELAY 30*HZ
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f05a659cdf34..65cd8ab60b7a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -520,7 +520,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
}
/**
- * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE()
* @dentry: The dentry to query
*
* This is the helper normal filesystems should use to get at their own inodes
@@ -528,7 +528,7 @@ static inline struct inode *d_inode(const struct dentry *dentry)
*/
static inline struct inode *d_inode_rcu(const struct dentry *dentry)
{
- return ACCESS_ONCE(dentry->d_inode);
+ return READ_ONCE(dentry->d_inode);
}
/**
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index b93efc8feecd..f36ecc2a5712 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* debugfs.h - a tiny little debug file system
*
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* debugfs is for people to use instead of /proc or /sys.
* See Documentation/filesystems/ for more details.
*/
@@ -23,7 +20,6 @@
struct device;
struct file_operations;
-struct srcu_struct;
struct debugfs_blob_wrapper {
void *data;
@@ -43,25 +39,6 @@ struct debugfs_regset32 {
extern struct dentry *arch_debugfs_dir;
-extern struct srcu_struct debugfs_srcu;
-
-/**
- * debugfs_real_fops - getter for the real file operation
- * @filp: a pointer to a struct file
- *
- * Must only be called under the protection established by
- * debugfs_use_file_start().
- */
-static inline const struct file_operations *debugfs_real_fops(const struct file *filp)
- __must_hold(&debugfs_srcu)
-{
- /*
- * Neither the pointer to the struct file_operations, nor its
- * contents ever change -- srcu_dereference() is not needed here.
- */
- return filp->f_path.dentry->d_fsdata;
-}
-
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
@@ -107,10 +84,10 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
void debugfs_remove_recursive(struct dentry *dentry);
-int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx)
- __acquires(&debugfs_srcu);
+const struct file_operations *debugfs_real_fops(const struct file *filp);
-void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu);
+int debugfs_file_get(struct dentry *dentry);
+void debugfs_file_put(struct dentry *dentry);
ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
@@ -239,15 +216,12 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
-static inline int debugfs_use_file_start(const struct dentry *dentry,
- int *srcu_idx)
- __acquires(&debugfs_srcu)
+static inline int debugfs_file_get(struct dentry *dentry)
{
return 0;
}
-static inline void debugfs_use_file_finish(int srcu_idx)
- __releases(&debugfs_srcu)
+static inline void debugfs_file_put(struct dentry *dentry)
{ }
static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 597294e0cc40..3aae5b3af87c 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -19,6 +19,13 @@
#define DEVFREQ_NAME_LEN 16
+/* DEVFREQ governor name */
+#define DEVFREQ_GOV_SIMPLE_ONDEMAND "simple_ondemand"
+#define DEVFREQ_GOV_PERFORMANCE "performance"
+#define DEVFREQ_GOV_POWERSAVE "powersave"
+#define DEVFREQ_GOV_USERSPACE "userspace"
+#define DEVFREQ_GOV_PASSIVE "passive"
+
/* DEVFREQ notifier interface */
#define DEVFREQ_TRANSITION_NOTIFIER (0)
@@ -84,8 +91,9 @@ struct devfreq_dev_status {
* from devfreq_remove_device() call. If the user
* has registered devfreq->nb at a notifier-head,
* this is the time to unregister it.
- * @freq_table: Optional list of frequencies to support statistics.
- * @max_state: The size of freq_table.
+ * @freq_table: Optional list of frequencies to support statistics
+ * and freq_table must be generated in ascending order.
+ * @max_state: The size of freq_table.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
@@ -120,6 +128,8 @@ struct devfreq_dev_profile {
* touch this.
* @min_freq: Limit minimum frequency requested by user (0: none)
* @max_freq: Limit maximum frequency requested by user (0: none)
+ * @scaling_min_freq: Limit minimum frequency requested by OPP interface
+ * @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
* @total_trans: Number of devfreq transitions
* @trans_table: Statistics of devfreq transitions
@@ -153,6 +163,8 @@ struct devfreq {
unsigned long min_freq;
unsigned long max_freq;
+ unsigned long scaling_min_freq;
+ unsigned long scaling_max_freq;
bool stop_polling;
/* information for device frequency transition */
diff --git a/include/linux/device.h b/include/linux/device.h
index 66fe271c2544..9d32000725da 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -97,6 +97,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @p: The private data of the driver core, only the driver core can
* touch this.
* @lock_key: Lock class key for use by the lock validator
+ * @force_dma: Assume devices on this bus should be set up by dma_configure()
+ * even if DMA capability is not explicitly described by firmware.
*
* A bus is a channel between the processor and one or more devices. For the
* purposes of the device model, all devices are connected via a bus, even if
@@ -135,6 +137,8 @@ struct bus_type {
struct subsys_private *p;
struct lock_class_key lock_key;
+
+ bool force_dma;
};
extern int __must_check bus_register(struct bus_type *bus);
@@ -370,9 +374,6 @@ int subsys_virtual_register(struct bus_type *subsys,
* @devnode: Callback to provide the devtmpfs.
* @class_release: Called to release this class.
* @dev_release: Called to release the device.
- * @suspend: Used to put the device to sleep mode, usually to a low power
- * state.
- * @resume: Used to bring the device from the sleep mode.
* @shutdown_pre: Called at shut-down time before driver shutdown.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
@@ -400,8 +401,6 @@ struct class {
void (*class_release)(struct class *class);
void (*dev_release)(struct device *dev);
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
int (*shutdown_pre)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
@@ -1075,6 +1074,16 @@ static inline void dev_pm_syscore_device(struct device *dev, bool val)
#endif
}
+static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
+{
+ dev->power.driver_flags = flags;
+}
+
+static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
+{
+ return !!(dev->power.driver_flags & flags);
+}
+
static inline void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 171895072435..efdabbb64e3c 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -128,7 +128,7 @@ struct dma_fence_cb {
* implementation know that there is another driver waiting on
* the signal (ie. hw->sw case).
*
- * This function can be called called from atomic context, but not
+ * This function can be called from atomic context, but not
* from irq context, so normal spinlocks can be used.
*
* A return value of false indicates the fence already passed,
@@ -248,9 +248,12 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
struct dma_fence *fence;
fence = rcu_dereference(*fencep);
- if (!fence || !dma_fence_get_rcu(fence))
+ if (!fence)
return NULL;
+ if (!dma_fence_get_rcu(fence))
+ continue;
+
/* The atomic_inc_not_zero() inside dma_fence_get_rcu()
* provides a full memory barrier upon success (such as now).
* This is paired with the write barrier from assigning
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 7653ea66874d..e8f8e8fb244d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -9,7 +9,6 @@
#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
-#include <linux/kmemcheck.h>
#include <linux/bug.h>
#include <linux/mem_encrypt.h>
@@ -127,6 +126,8 @@ struct dma_map_ops {
void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
@@ -230,7 +231,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
- kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size,
@@ -263,11 +263,8 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- int i, ents;
- struct scatterlist *s;
+ int ents;
- for_each_sg(sg, s, nents, i)
- kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0);
@@ -297,7 +294,6 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
- kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
@@ -437,6 +433,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->cache_sync)
+ ops->cache_sync(dev, vaddr, size, dir);
+}
+
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 3ae300052553..34b98f276ed0 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -41,20 +41,6 @@ struct xilinx_vdma_config {
int ext_fsync;
};
-/**
- * enum xdma_ip_type: DMA IP type.
- *
- * XDMA_TYPE_AXIDMA: Axi dma ip.
- * XDMA_TYPE_CDMA: Axi cdma ip.
- * XDMA_TYPE_VDMA: Axi vdma ip.
- *
- */
-enum xdma_ip_type {
- XDMA_TYPE_AXIDMA = 0,
- XDMA_TYPE_CDMA,
- XDMA_TYPE_VDMA,
-};
-
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
struct xilinx_vdma_config *cfg);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8319101170fc..f838764993eb 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -329,7 +329,7 @@ enum dma_slave_buswidth {
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
- * Legal values: 1, 2, 4, 8.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
@@ -404,14 +404,16 @@ enum dma_residue_granularity {
DMA_RESIDUE_GRANULARITY_BURST = 2,
};
-/* struct dma_slave_caps - expose capabilities of a slave channel only
- *
- * @src_addr_widths: bit mask of src addr widths the channel supports
- * @dst_addr_widths: bit mask of dstn addr widths the channel supports
- * @directions: bit mask of slave direction the channel supported
- * since the enum dma_transfer_direction is not defined as bits for each
- * type of direction, the dma controller should fill (1 << <TYPE>) and same
- * should be checked by controller as well
+/**
+ * struct dma_slave_caps - expose capabilities of a slave channel only
+ * @src_addr_widths: bit mask of src addr widths the channel supports.
+ * Width is specified in bytes, e.g. for a channel supporting
+ * a width of 4 the mask should have BIT(4) set.
+ * @dst_addr_widths: bit mask of dst addr widths the channel supports
+ * @directions: bit mask of slave directions the channel supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
* @max_burst: max burst capability per-transfer
* @cmd_pause: true, if pause and thereby resume is supported
* @cmd_terminate: true, if terminate cmd is supported
@@ -678,11 +680,13 @@ struct dma_filter {
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @src_addr_widths: bit mask of src addr widths the device supports
+ * Width is specified in bytes, e.g. for a device supporting
+ * a width of 4 the mask should have BIT(4) set.
* @dst_addr_widths: bit mask of dst addr widths the device supports
- * @directions: bit mask of slave direction the device supports since
- * the enum dma_transfer_direction is not defined as bits for
- * each type of direction, the dma controller should fill (1 <<
- * <TYPE>) and same should be checked by controller as well
+ * @directions: bit mask of slave directions the device supports.
+ * Since the enum dma_transfer_direction is not defined as bit flag for
+ * each type, the dma controller should set BIT(<TYPE>) and same
+ * should be checked by controller as well
* @max_burst: max burst capability per-transfer
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e8ffba1052d3..e2433bc50210 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -112,6 +112,7 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
+extern void dmar_register_bus_notifier(void);
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index be1214731505..99fc06f0afc1 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -89,7 +89,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
- return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
+ return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
}
/* Record number of completed objects and recalculate the limit. */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ddb7632d73b9..3d794b3dc532 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -145,6 +145,7 @@ struct elevator_type
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
+ const char *elevator_alias;
struct module *elevator_owner;
bool uses_mq;
#ifdef CONFIG_BLK_DEBUG_FS
diff --git a/include/linux/extcon-provider.h b/include/linux/extcon-provider.h
new file mode 100644
index 000000000000..2feca5881fa7
--- /dev/null
+++ b/include/linux/extcon-provider.h
@@ -0,0 +1,142 @@
+/*
+ * External Connector (extcon) framework
+ * - linux/include/linux/extcon-provider.h for extcon provider device driver.
+ *
+ * Copyright (C) 2017 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_EXTCON_PROVIDER_H__
+#define __LINUX_EXTCON_PROVIDER_H__
+
+#include <linux/extcon.h>
+
+struct extcon_dev;
+
+#if IS_ENABLED(CONFIG_EXTCON)
+
+/* Following APIs register/unregister the extcon device. */
+extern int extcon_dev_register(struct extcon_dev *edev);
+extern void extcon_dev_unregister(struct extcon_dev *edev);
+extern int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev);
+extern void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev);
+
+/* Following APIs allocate/free the memory of the extcon device. */
+extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
+extern void extcon_dev_free(struct extcon_dev *edev);
+extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *cable);
+extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+
+/* Synchronize the state and property value for each external connector. */
+extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+
+/*
+ * Following APIs set the connected state of each external connector.
+ * The 'id' argument indicates the defined external connector.
+ */
+extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool state);
+extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool state);
+
+/*
+ * Following APIs set the property of each external connector.
+ * The 'id' argument indicates the defined external connector
+ * and the 'prop' indicates the extcon property.
+ *
+ * And extcon_set_property_capability() set the capability of the property
+ * for each external connector. They are used to set the capability of the
+ * property of each external connector based on the id and property.
+ */
+extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val);
+extern int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop);
+
+#else /* CONFIG_EXTCON */
+static inline int extcon_dev_register(struct extcon_dev *edev)
+{
+ return 0;
+}
+
+static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
+
+static inline int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void extcon_dev_free(struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *cable)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
+
+
+static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
+ bool state)
+{
+ return 0;
+}
+
+static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
+ bool state)
+{
+ return 0;
+}
+
+static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
+{
+ return 0;
+}
+
+static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
+ unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_sync(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
+{
+ return 0;
+}
+
+static inline int extcon_set_property_capability(struct extcon_dev *edev,
+ unsigned int id, unsigned int prop)
+{
+ return 0;
+}
+#endif /* CONFIG_EXTCON */
+#endif /* __LINUX_EXTCON_PROVIDER_H__ */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 744d60ca80c3..6d94e82c8ad9 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,5 +1,6 @@
/*
* External Connector (extcon) framework
+ * - linux/include/linux/extcon.h for extcon consumer device driver.
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -170,61 +171,29 @@ union extcon_property_value {
int intval; /* type : integer (intval) */
};
-struct extcon_cable;
struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
-
-/* Following APIs register/unregister the extcon device. */
-extern int extcon_dev_register(struct extcon_dev *edev);
-extern void extcon_dev_unregister(struct extcon_dev *edev);
-extern int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev);
-extern void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev);
-
-/* Following APIs allocate/free the memory of the extcon device. */
-extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
-extern void extcon_dev_free(struct extcon_dev *edev);
-extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable);
-extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
-
-/* Synchronize the state and property value for each external connector. */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
-
/*
- * Following APIs get/set the connected state of each external connector.
+ * Following APIs get the connected state of each external connector.
* The 'id' argument indicates the defined external connector.
*/
extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
-extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool state);
-extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool state);
/*
- * Following APIs get/set the property of each external connector.
+ * Following APIs get the property of each external connector.
* The 'id' argument indicates the defined external connector
* and the 'prop' indicates the extcon property.
*
- * And extcon_get/set_property_capability() set the capability of the property
- * for each external connector. They are used to set the capability of the
+ * And extcon_get_property_capability() get the capability of the property
+ * for each external connector. They are used to get the capability of the
* property of each external connector based on the id and property.
*/
extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value *prop_val);
-extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val);
-extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val);
extern int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
-extern int extcon_set_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop);
/*
* Following APIs register the notifier block in order to detect
@@ -268,79 +237,17 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
#else /* CONFIG_EXTCON */
-static inline int extcon_dev_register(struct extcon_dev *edev)
-{
- return 0;
-}
-
-static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
-
-static inline int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev)
-{
- return -EINVAL;
-}
-
-static inline void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev) { }
-
-static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline void extcon_dev_free(struct extcon_dev *edev) { }
-
-static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
-
-
static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
{
return 0;
}
-static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool state)
-{
- return 0;
-}
-
-static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool state)
-{
- return 0;
-}
-
-static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
-{
- return 0;
-}
-
static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value *prop_val)
{
return 0;
}
-static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val)
-{
- return 0;
-}
-
-static inline int extcon_set_property_sync(struct extcon_dev *edev,
- unsigned int id, unsigned int prop,
- union extcon_property_value prop_val)
-{
- return 0;
-}
static inline int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop)
@@ -348,12 +255,6 @@ static inline int extcon_get_property_capability(struct extcon_dev *edev,
return 0;
}
-static inline int extcon_set_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
-{
- return 0;
-}
-
static inline int extcon_register_notifier(struct extcon_dev *edev,
unsigned int id, struct notifier_block *nb)
{
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0cd02ff4ae30..80b5b482cb46 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -454,13 +454,11 @@ struct bpf_binary_header {
struct bpf_prog {
u16 pages; /* Number of allocated pages */
- kmemcheck_bitfield_begin(meta);
u16 jited:1, /* Is our filter JIT'ed? */
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1; /* Do we need dst entry? */
- kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 3995df1d068f..21f5aa0b217f 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -182,7 +182,7 @@ static inline void freezable_schedule_unsafe(void)
}
/*
- * Like freezable_schedule_timeout(), but should not block the freezer. Do not
+ * Like schedule_timeout(), but should not block the freezer. Do not
* call this with locks held.
*/
static inline long freezable_schedule_timeout(long timeout)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 885266aae2d7..269086440071 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1854,6 +1854,7 @@ struct super_operations {
#else
#define S_DAX 0 /* Make all the DAX code disappear */
#endif
+#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1893,6 +1894,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
+#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
@@ -2793,6 +2795,7 @@ extern int do_pipe_flags(int *, int);
id(KEXEC_IMAGE, kexec-image) \
id(KEXEC_INITRAMFS, kexec-initramfs) \
id(POLICY, security-policy) \
+ id(X509_CERTIFICATE, x509-certificate) \
id(MAX_ID, )
#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
new file mode 100644
index 000000000000..08b4b40c5aa8
--- /dev/null
+++ b/include/linux/fscrypt.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fscrypt.h: declarations for per-file encryption
+ *
+ * Filesystems that implement per-file encryption include this header
+ * file with the __FS_HAS_ENCRYPTION set according to whether that filesystem
+ * is being built with encryption support or not.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+#ifndef _LINUX_FSCRYPT_H
+#define _LINUX_FSCRYPT_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+
+#define FS_CRYPTO_BLOCK_SIZE 16
+
+struct fscrypt_info;
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+};
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+ unsigned int flags;
+ const char *key_prefix;
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ bool (*dummy_context)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
+#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ if (inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode))
+ return true;
+ return false;
+}
+
+static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
+ u32 filenames_mode)
+{
+ if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
+ return true;
+
+ if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+ return true;
+
+ return false;
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
+#if __FS_HAS_ENCRYPTION
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+}
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return (inode->i_crypt_info != NULL);
+}
+
+#include <linux/fscrypt_supp.h>
+
+#else /* !__FS_HAS_ENCRYPTION */
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
+static inline bool fscrypt_has_encryption_key(const struct inode *inode)
+{
+ return 0;
+}
+
+#include <linux/fscrypt_notsupp.h>
+#endif /* __FS_HAS_ENCRYPTION */
+
+/**
+ * fscrypt_require_key - require an inode's encryption key
+ * @inode: the inode we need the key for
+ *
+ * If the inode is encrypted, set up its encryption key if not already done.
+ * Then require that the key be present and return -ENOKEY otherwise.
+ *
+ * No locks are needed, and the key will live as long as the struct inode --- so
+ * it won't go away from under you.
+ *
+ * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
+ * if a problem occurred while setting up the encryption key.
+ */
+static inline int fscrypt_require_key(struct inode *inode)
+{
+ if (IS_ENCRYPTED(inode)) {
+ int err = fscrypt_get_encryption_info(inode);
+
+ if (err)
+ return err;
+ if (!fscrypt_has_encryption_key(inode))
+ return -ENOKEY;
+ }
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory
+ * @old_dentry: an existing dentry for the inode being linked
+ * @dir: the target directory
+ * @dentry: negative dentry for the target filename
+ *
+ * A new link can only be added to an encrypted directory if the directory's
+ * encryption key is available --- since otherwise we'd have no way to encrypt
+ * the filename. Therefore, we first set up the directory's encryption key (if
+ * not already done) and return an error if it's unavailable.
+ *
+ * We also verify that the link will not violate the constraint that all files
+ * in an encrypted directory tree use the same encryption policy.
+ *
+ * Return: 0 on success, -ENOKEY if the directory's encryption key is missing,
+ * -EPERM if the link would result in an inconsistent encryption policy, or
+ * another -errno code.
+ */
+static inline int fscrypt_prepare_link(struct dentry *old_dentry,
+ struct inode *dir,
+ struct dentry *dentry)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_link(d_inode(old_dentry), dir);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories
+ * @old_dir: source directory
+ * @old_dentry: dentry for source file
+ * @new_dir: target directory
+ * @new_dentry: dentry for target location (may be negative unless exchanging)
+ * @flags: rename flags (we care at least about %RENAME_EXCHANGE)
+ *
+ * Prepare for ->rename() where the source and/or target directories may be
+ * encrypted. A new link can only be added to an encrypted directory if the
+ * directory's encryption key is available --- since otherwise we'd have no way
+ * to encrypt the filename. A rename to an existing name, on the other hand,
+ * *is* cryptographically possible without the key. However, we take the more
+ * conservative approach and just forbid all no-key renames.
+ *
+ * We also verify that the rename will not violate the constraint that all files
+ * in an encrypted directory tree use the same encryption policy.
+ *
+ * Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the
+ * rename would cause inconsistent encryption policies, or another -errno code.
+ */
+static inline int fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (IS_ENCRYPTED(old_dir) || IS_ENCRYPTED(new_dir))
+ return __fscrypt_prepare_rename(old_dir, old_dentry,
+ new_dir, new_dentry, flags);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory
+ * @dir: directory being searched
+ * @dentry: filename being looked up
+ * @flags: lookup flags
+ *
+ * Prepare for ->lookup() in a directory which may be encrypted. Lookups can be
+ * done with or without the directory's encryption key; without the key,
+ * filenames are presented in encrypted form. Therefore, we'll try to set up
+ * the directory's encryption key, but even without it the lookup can continue.
+ *
+ * To allow invalidating stale dentries if the directory's encryption key is
+ * added later, we also install a custom ->d_revalidate() method and use the
+ * DCACHE_ENCRYPTED_WITH_KEY flag to indicate whether a given dentry is a
+ * plaintext name (flag set) or a ciphertext name (flag cleared).
+ *
+ * Return: 0 on success, -errno if a problem occurred while setting up the
+ * encryption key
+ */
+static inline int fscrypt_prepare_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags)
+{
+ if (IS_ENCRYPTED(dir))
+ return __fscrypt_prepare_lookup(dir, dentry);
+ return 0;
+}
+
+/**
+ * fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Prepare for ->setattr() on a possibly-encrypted inode. On an encrypted file,
+ * most attribute changes are allowed even without the encryption key. However,
+ * without the encryption key we do have to forbid truncates. This is needed
+ * because the size being truncated to may not be a multiple of the filesystem
+ * block size, and in that case we'd have to decrypt the final block, zero the
+ * portion past i_size, and re-encrypt it. (We *could* allow truncating to a
+ * filesystem block boundary, but it's simpler to just forbid all truncates ---
+ * and we already forbid all other contents modifications without the key.)
+ *
+ * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code
+ * if a problem occurred while setting up the encryption key.
+ */
+static inline int fscrypt_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ if (attr->ia_valid & ATTR_SIZE)
+ return fscrypt_require_key(d_inode(dentry));
+ return 0;
+}
+
+#endif /* _LINUX_FSCRYPT_H */
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
deleted file mode 100644
index 854d724978fa..000000000000
--- a/include/linux/fscrypt_common.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * fscrypt_common.h: common declarations for per-file encryption
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * Written by Michael Halcrow, 2015.
- * Modified by Jaegeuk Kim, 2015.
- */
-
-#ifndef _LINUX_FSCRYPT_COMMON_H
-#define _LINUX_FSCRYPT_COMMON_H
-
-#include <linux/key.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/bio.h>
-#include <linux/dcache.h>
-#include <crypto/skcipher.h>
-#include <uapi/linux/fs.h>
-
-#define FS_CRYPTO_BLOCK_SIZE 16
-
-struct fscrypt_info;
-
-struct fscrypt_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct fscrypt_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __packed;
-
-struct fscrypt_str {
- unsigned char *name;
- u32 len;
-};
-
-struct fscrypt_name {
- const struct qstr *usr_fname;
- struct fscrypt_str disk_name;
- u32 hash;
- u32 minor_hash;
- struct fscrypt_str crypto_buf;
-};
-
-#define FSTR_INIT(n, l) { .name = n, .len = l }
-#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
-#define fname_name(p) ((p)->disk_name.name)
-#define fname_len(p) ((p)->disk_name.len)
-
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
-/*
- * crypto opertions for filesystems
- */
-struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
- int (*get_context)(struct inode *, void *, size_t);
- int (*set_context)(struct inode *, const void *, size_t, void *);
- bool (*dummy_context)(struct inode *);
- bool (*is_encrypted)(struct inode *);
- bool (*empty_dir)(struct inode *);
- unsigned (*max_namelen)(struct inode *);
-};
-
-/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
-#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
-
-static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
-{
- if (inode->i_sb->s_cop->dummy_context &&
- inode->i_sb->s_cop->dummy_context(inode))
- return true;
- return false;
-}
-
-static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
- u32 filenames_mode)
-{
- if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS)
- return true;
-
- if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
- return true;
-
- return false;
-}
-
-static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
-{
- if (str->len == 1 && str->name[0] == '.')
- return true;
-
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
- return true;
-
- return false;
-}
-
-static inline struct page *fscrypt_control_page(struct page *page)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
-#else
- WARN_ON_ONCE(1);
- return ERR_PTR(-EINVAL);
-#endif
-}
-
-static inline int fscrypt_has_encryption_key(const struct inode *inode)
-{
-#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
- return (inode->i_crypt_info != NULL);
-#else
- return 0;
-#endif
-}
-
-#endif /* _LINUX_FSCRYPT_COMMON_H */
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 19609ceea350..63e58808519a 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -4,13 +4,16 @@
*
* This stubs out the fscrypt functions for filesystems configured without
* encryption support.
+ *
+ * Do not include this file directly. Use fscrypt.h instead!
*/
+#ifndef _LINUX_FSCRYPT_H
+#error "Incorrect include of linux/fscrypt_notsupp.h!"
+#endif
#ifndef _LINUX_FSCRYPT_NOTSUPP_H
#define _LINUX_FSCRYPT_NOTSUPP_H
-#include <linux/fscrypt_common.h>
-
/* crypto.c */
static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
gfp_t gfp_flags)
@@ -98,7 +101,7 @@ static inline int fscrypt_setup_filename(struct inode *dir,
const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
- if (dir->i_sb->s_cop->is_encrypted(dir))
+ if (IS_ENCRYPTED(dir))
return -EOPNOTSUPP;
memset(fname, 0, sizeof(struct fscrypt_name));
@@ -175,4 +178,34 @@ static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
return -EOPNOTSUPP;
}
+/* hooks.c */
+
+static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_ENCRYPTED(inode))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int __fscrypt_prepare_link(struct inode *inode,
+ struct inode *dir)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __fscrypt_prepare_lookup(struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* _LINUX_FSCRYPT_NOTSUPP_H */
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 5153dce22f09..cf9e9fc02f0a 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -2,14 +2,15 @@
/*
* fscrypt_supp.h
*
- * This is included by filesystems configured with encryption support.
+ * Do not include this file directly. Use fscrypt.h instead!
*/
+#ifndef _LINUX_FSCRYPT_H
+#error "Incorrect include of linux/fscrypt_supp.h!"
+#endif
#ifndef _LINUX_FSCRYPT_SUPP_H
#define _LINUX_FSCRYPT_SUPP_H
-#include <linux/fscrypt_common.h>
-
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
@@ -143,4 +144,14 @@ extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
+/* hooks.c */
+extern int fscrypt_file_open(struct inode *inode, struct file *filp);
+extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
+extern int __fscrypt_prepare_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags);
+extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+
#endif /* _LINUX_FSCRYPT_SUPP_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 3597ef78df4d..067d52e95f02 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
+#include <linux/refcount.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -136,7 +137,7 @@ struct fsnotify_group {
* inotify_init() and the refcnt will hit 0 only when that fd has been
* closed.
*/
- atomic_t refcnt; /* things with interest in this group */
+ refcount_t refcnt; /* things with interest in this group */
const struct fsnotify_ops *ops; /* how this group handles things */
@@ -183,14 +184,13 @@ struct fsnotify_group {
#endif
#ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data {
-#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
-#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
struct user_struct *user;
+ bool audit;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
@@ -244,7 +244,7 @@ struct fsnotify_mark {
__u32 mask;
/* We hold one for presence in g_list. Also one ref for each 'thing'
* in kernel that found and may be using this mark. */
- atomic_t refcnt;
+ refcount_t refcnt;
/* Group this mark is for. Set on mark creation, stable until last ref
* is dropped */
struct fsnotify_group *group;
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 0c35b6caf0f6..411a84c6c400 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -68,7 +68,7 @@ struct fwnode_reference_args {
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
*/
struct fwnode_operations {
- void (*get)(struct fwnode_handle *fwnode);
+ struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
bool (*property_present)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index b96dd4e1e663..ecc2928e8046 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -31,7 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds genl mutex.
*/
#define genl_dereference(p) \
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 44790523057f..5144ebe046c9 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -141,6 +141,7 @@ struct hd_struct {
#define GENHD_FL_NATIVE_CAPACITY 128
#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
#define GENHD_FL_NO_PART_SCAN 512
+#define GENHD_FL_HIDDEN 1024
enum {
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
@@ -207,6 +208,7 @@ struct gendisk {
#endif /* CONFIG_BLK_DEV_INTEGRITY */
int node_id;
struct badblocks *bb;
+ struct lockdep_map lockdep_map;
};
static inline struct gendisk *part_to_disk(struct hd_struct *part)
@@ -235,7 +237,7 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
static inline dev_t disk_devt(struct gendisk *disk)
{
- return disk_to_dev(disk)->devt;
+ return MKDEV(disk->major, disk->first_minor);
}
static inline dev_t part_devt(struct hd_struct *part)
@@ -243,6 +245,7 @@ static inline dev_t part_devt(struct hd_struct *part)
return part_to_dev(part)->devt;
}
+extern struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
static inline void disk_put_part(struct hd_struct *part)
@@ -591,8 +594,7 @@ extern void __delete_partition(struct percpu_ref *);
extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
-extern struct gendisk *alloc_disk_node(int minors, int node_id);
-extern struct gendisk *alloc_disk(int minors);
+extern struct gendisk *__alloc_disk_node(int minors, int node_id);
extern struct kobject *get_disk(struct gendisk *disk);
extern void put_disk(struct gendisk *disk);
extern void blk_register_region(dev_t devt, unsigned long range,
@@ -616,6 +618,24 @@ extern ssize_t part_fail_store(struct device *dev,
const char *buf, size_t count);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+#define alloc_disk_node(minors, node_id) \
+({ \
+ static struct lock_class_key __key; \
+ const char *__name; \
+ struct gendisk *__disk; \
+ \
+ __name = "(gendisk_completion)"#minors"("#node_id")"; \
+ \
+ __disk = __alloc_disk_node(minors, node_id); \
+ \
+ if (__disk) \
+ lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
+ \
+ __disk; \
+})
+
+#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
+
static inline int hd_ref_init(struct hd_struct *part)
{
if (percpu_ref_init(&part->ref, __delete_partition, 0,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 710143741eb5..1a4582b44d32 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -24,7 +24,6 @@ struct vm_area_struct;
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
-#define ___GFP_COLD 0x100u
#define ___GFP_NOWARN 0x200u
#define ___GFP_RETRY_MAYFAIL 0x400u
#define ___GFP_NOFAIL 0x800u
@@ -37,7 +36,6 @@ struct vm_area_struct;
#define ___GFP_THISNODE 0x40000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_ACCOUNT 0x100000u
-#define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_WRITE 0x800000u
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
@@ -193,27 +191,15 @@ struct vm_area_struct;
/*
* Action modifiers
*
- * __GFP_COLD indicates that the caller does not expect to be used in the near
- * future. Where possible, a cache-cold page will be returned.
- *
* __GFP_NOWARN suppresses allocation failure reports.
*
* __GFP_COMP address compound page metadata.
*
* __GFP_ZERO returns a zeroed page on success.
- *
- * __GFP_NOTRACK avoids tracking with kmemcheck.
- *
- * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
- * distinguishing in the source between false positives and allocations that
- * cannot be supported (e.g. page tables).
*/
-#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
-#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
-#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
/* Disable lockdep for GFP context tracking */
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
@@ -539,8 +525,8 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-extern void free_hot_cold_page(struct page *page, bool cold);
-extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+extern void free_unref_page(struct page *page);
+extern void free_unref_page_list(struct list_head *list);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h
deleted file mode 100644
index 096659169215..000000000000
--- a/include/linux/gpio-fan.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * include/linux/gpio-fan.h
- *
- * Platform data structure for GPIO fan driver
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_GPIO_FAN_H
-#define __LINUX_GPIO_FAN_H
-
-struct gpio_fan_alarm {
- unsigned gpio;
- unsigned active_low;
-};
-
-struct gpio_fan_speed {
- int rpm;
- int ctrl_val;
-};
-
-struct gpio_fan_platform_data {
- int num_ctrl;
- unsigned *ctrl; /* fan control GPIOs. */
- struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */
- /*
- * Speed conversion array: rpm from/to GPIO bit field.
- * This array _must_ be sorted in ascending rpm order.
- */
- int num_speed;
- struct gpio_fan_speed *speed;
-};
-
-#endif /* __LINUX_GPIO_FAN_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index c4a350d83578..7447d85dbe2f 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -29,6 +29,7 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
+#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
/**
* Optional flags that can be passed to one of gpiod_* to configure direction
@@ -40,6 +41,11 @@ enum gpiod_flags {
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
+ GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
+ GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
+ GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL |
+ GPIOD_FLAGS_BIT_OPEN_DRAIN,
};
#ifdef CONFIG_GPIOLIB
@@ -100,10 +106,15 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
+int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
void gpiod_set_value(struct gpio_desc *desc, int value);
void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
+int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
void gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -111,11 +122,17 @@ void gpiod_set_raw_array_value(unsigned int array_size,
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
+int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
+int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -306,6 +323,14 @@ static inline int gpiod_get_value(const struct gpio_desc *desc)
WARN_ON(1);
return 0;
}
+static inline int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
@@ -324,6 +349,14 @@ static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
WARN_ON(1);
return 0;
}
+static inline int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
@@ -343,6 +376,14 @@ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
WARN_ON(1);
return 0;
}
+static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
@@ -361,6 +402,14 @@ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
WARN_ON(1);
return 0;
}
+static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
int value)
{
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 424e5139ff10..55e672592fa9 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -20,6 +20,131 @@ struct module;
#ifdef CONFIG_GPIOLIB
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+/**
+ * struct gpio_irq_chip - GPIO interrupt controller
+ */
+struct gpio_irq_chip {
+ /**
+ * @chip:
+ *
+ * GPIO IRQ chip implementation, provided by GPIO driver.
+ */
+ struct irq_chip *chip;
+
+ /**
+ * @domain:
+ *
+ * Interrupt translation domain; responsible for mapping between GPIO
+ * hwirq number and Linux IRQ number.
+ */
+ struct irq_domain *domain;
+
+ /**
+ * @domain_ops:
+ *
+ * Table of interrupt domain operations for this IRQ chip.
+ */
+ const struct irq_domain_ops *domain_ops;
+
+ /**
+ * @handler:
+ *
+ * The IRQ handler to use (often a predefined IRQ core function) for
+ * GPIO IRQs, provided by GPIO driver.
+ */
+ irq_flow_handler_t handler;
+
+ /**
+ * @default_type:
+ *
+ * Default IRQ triggering type applied during GPIO driver
+ * initialization, provided by GPIO driver.
+ */
+ unsigned int default_type;
+
+ /**
+ * @lock_key:
+ *
+ * Per GPIO IRQ chip lockdep class.
+ */
+ struct lock_class_key *lock_key;
+
+ /**
+ * @parent_handler:
+ *
+ * The interrupt handler for the GPIO chip's parent interrupts, may be
+ * NULL if the parent interrupts are nested rather than cascaded.
+ */
+ irq_flow_handler_t parent_handler;
+
+ /**
+ * @parent_handler_data:
+ *
+ * Data associated, and passed to, the handler for the parent
+ * interrupt.
+ */
+ void *parent_handler_data;
+
+ /**
+ * @num_parents:
+ *
+ * The number of interrupt parents of a GPIO chip.
+ */
+ unsigned int num_parents;
+
+ /**
+ * @parents:
+ *
+ * A list of interrupt parents of a GPIO chip. This is owned by the
+ * driver, so the core will only reference this list, not modify it.
+ */
+ unsigned int *parents;
+
+ /**
+ * @map:
+ *
+ * A list of interrupt parents for each line of a GPIO chip.
+ */
+ unsigned int *map;
+
+ /**
+ * @threaded:
+ *
+ * True if set the interrupt handling uses nested threads.
+ */
+ bool threaded;
+
+ /**
+ * @need_valid_mask:
+ *
+ * If set core allocates @valid_mask with all bits set to one.
+ */
+ bool need_valid_mask;
+
+ /**
+ * @valid_mask:
+ *
+ * If not %NULL holds bitmask of GPIOs which are valid to be included
+ * in IRQ domain of the chip.
+ */
+ unsigned long *valid_mask;
+
+ /**
+ * @first:
+ *
+ * Required for static IRQ allocation. If set, irq_domain_add_simple()
+ * will allocate and map all IRQs during initialization.
+ */
+ unsigned int first;
+};
+
+static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
+{
+ return container_of(chip, struct gpio_irq_chip, chip);
+}
+#endif
+
/**
* struct gpio_chip - abstract a GPIO controller
* @label: a functional name for the GPIO device, such as a part
@@ -36,6 +161,8 @@ struct module;
* @direction_input: configures signal "offset" as input, or returns error
* @direction_output: configures signal "offset" as output, or returns error
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
+ * @get_multiple: reads values for multiple signals defined by "mask" and
+ * stores them in "bits", returns 0 on success or negative error
* @set: assigns output value for signal "offset"
* @set_multiple: assigns output values for multiple signals defined by "mask"
* @set_config: optional hook for all kinds of settings. Uses the same
@@ -66,9 +193,9 @@ struct module;
* registers.
* @read_reg: reader function for generic GPIO
* @write_reg: writer function for generic GPIO
- * @pin2mask: some generic GPIO controllers work with the big-endian bits
- * notation, e.g. in a 8-bits register, GPIO7 is the least significant
- * bit. This callback assigns the right bit mask.
+ * @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing
+ * line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the
+ * generic GPIO core. It is for internal housekeeping only.
* @reg_dat: data (in) register for generic GPIO
* @reg_set: output set register (out=high) for generic GPIO
* @reg_clr: output clear register (out=low) for generic GPIO
@@ -81,23 +208,6 @@ struct module;
* safely.
* @bgpio_dir: shadowed direction register for generic GPIO to clear/set
* direction safely.
- * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
- * @irqdomain: Interrupt translation domain; responsible for mapping
- * between GPIO hwirq number and linux irq number
- * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
- * @irq_handler: the irq handler to use (often a predefined irq core function)
- * for GPIO IRQs, provided by GPIO driver
- * @irq_default_type: default IRQ triggering type applied during GPIO driver
- * initialization, provided by GPIO driver
- * @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number,
- * provided by GPIO driver for chained interrupt (not for nested
- * interrupts).
- * @irq_nested: True if set the interrupt handling is nested.
- * @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
- * bits set to one
- * @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
- * be included in IRQ domain of the chip
- * @lock_key: per GPIO IRQ chip lockdep class
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -127,6 +237,9 @@ struct gpio_chip {
unsigned offset, int value);
int (*get)(struct gpio_chip *chip,
unsigned offset);
+ int (*get_multiple)(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits);
void (*set)(struct gpio_chip *chip,
unsigned offset, int value);
void (*set_multiple)(struct gpio_chip *chip,
@@ -148,7 +261,7 @@ struct gpio_chip {
#if IS_ENABLED(CONFIG_GPIO_GENERIC)
unsigned long (*read_reg)(void __iomem *reg);
void (*write_reg)(void __iomem *reg, unsigned long data);
- unsigned long (*pin2mask)(struct gpio_chip *gc, unsigned int pin);
+ bool be_bits;
void __iomem *reg_dat;
void __iomem *reg_set;
void __iomem *reg_clr;
@@ -164,16 +277,14 @@ struct gpio_chip {
* With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
* to handle IRQs for most practical cases.
*/
- struct irq_chip *irqchip;
- struct irq_domain *irqdomain;
- unsigned int irq_base;
- irq_flow_handler_t irq_handler;
- unsigned int irq_default_type;
- unsigned int irq_chained_parent;
- bool irq_nested;
- bool irq_need_valid_mask;
- unsigned long *irq_valid_mask;
- struct lock_class_key *lock_key;
+
+ /**
+ * @irq:
+ *
+ * Integrates interrupt chip functionality with the GPIO chip. Can be
+ * used to handle IRQs for most practical cases.
+ */
+ struct gpio_irq_chip irq;
#endif
#if defined(CONFIG_OF_GPIO)
@@ -211,7 +322,41 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
unsigned offset);
/* add/remove chips */
-extern int gpiochip_add_data(struct gpio_chip *chip, void *data);
+extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+ struct lock_class_key *lock_key);
+
+/**
+ * gpiochip_add_data() - register a gpio_chip
+ * @chip: the chip to register, with chip->base initialized
+ * @data: driver-private data associated with this chip
+ *
+ * Context: potentially before irqs will work
+ *
+ * When gpiochip_add_data() is called very early during boot, so that GPIOs
+ * can be freely used, the chip->parent device must be registered before
+ * the gpio framework's arch_initcall(). Otherwise sysfs initialization
+ * for GPIOs will fail rudely.
+ *
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * ie after core_initcall().
+ *
+ * If chip->base is negative, this requests dynamic assignment of
+ * a range of valid GPIOs.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * chip->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
+ */
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_add_data(chip, data) ({ \
+ static struct lock_class_key key; \
+ gpiochip_add_data_with_key(chip, data, &key); \
+ })
+#else
+#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL)
+#endif
+
static inline int gpiochip_add(struct gpio_chip *chip)
{
return gpiochip_add_data(chip, NULL);
@@ -265,6 +410,10 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
#ifdef CONFIG_GPIOLIB_IRQCHIP
+int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq);
+void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq);
+
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int parent_irq,
@@ -279,7 +428,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
- bool nested,
+ bool threaded,
struct lock_class_key *lock_key);
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index b7225369e568..846be7c69a52 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -11,7 +11,7 @@ enum gpio_lookup_flags {
GPIO_OPEN_DRAIN = (1 << 1),
GPIO_OPEN_SOURCE = (1 << 2),
GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
- GPIO_SLEEP_MAY_LOOSE_VALUE = (1 << 3),
+ GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3),
};
/**
diff --git a/include/linux/gpio_mouse.h b/include/linux/gpio_mouse.h
deleted file mode 100644
index 44ed7aa14d85..000000000000
--- a/include/linux/gpio_mouse.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Driver for simulating a mouse on GPIO lines.
- *
- * Copyright (C) 2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _GPIO_MOUSE_H
-#define _GPIO_MOUSE_H
-
-#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00
-#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01
-
-#define GPIO_MOUSE_PIN_UP 0
-#define GPIO_MOUSE_PIN_DOWN 1
-#define GPIO_MOUSE_PIN_LEFT 2
-#define GPIO_MOUSE_PIN_RIGHT 3
-#define GPIO_MOUSE_PIN_BLEFT 4
-#define GPIO_MOUSE_PIN_BMIDDLE 5
-#define GPIO_MOUSE_PIN_BRIGHT 6
-#define GPIO_MOUSE_PIN_MAX 7
-
-/**
- * struct gpio_mouse_platform_data
- * @scan_ms: integer in ms specifying the scan periode.
- * @polarity: Pin polarity, active high or low.
- * @up: GPIO line for up value.
- * @down: GPIO line for down value.
- * @left: GPIO line for left value.
- * @right: GPIO line for right value.
- * @bleft: GPIO line for left button.
- * @bmiddle: GPIO line for middle button.
- * @bright: GPIO line for right button.
- *
- * This struct must be added to the platform_device in the board code.
- * It is used by the gpio_mouse driver to setup GPIO lines and to
- * calculate mouse movement.
- */
-struct gpio_mouse_platform_data {
- int scan_ms;
- int polarity;
-
- union {
- struct {
- int up;
- int down;
- int left;
- int right;
-
- int bleft;
- int bmiddle;
- int bright;
- };
- int pins[GPIO_MOUSE_PIN_MAX];
- };
-};
-
-#endif /* _GPIO_MOUSE_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index fc7aae64dcde..331dc377c275 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -231,6 +231,7 @@ struct hid_sensor_common {
unsigned usage_id;
atomic_t data_ready;
atomic_t user_requested_state;
+ atomic_t runtime_pm_enable;
int poll_interval;
int raw_hystersis;
int latency_ms;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index ab05a86269dc..d491027a7c22 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -289,6 +289,7 @@ struct hid_item {
#define HID_DG_DEVICEINDEX 0x000d0053
#define HID_DG_CONTACTCOUNT 0x000d0054
#define HID_DG_CONTACTMAX 0x000d0055
+#define HID_DG_SCANTIME 0x000d0056
#define HID_DG_BUTTONTYPE 0x000d0059
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
@@ -753,6 +754,7 @@ struct hid_driver {
* @stop: called on remove
* @open: called by input layer on open
* @close: called by input layer on close
+ * @power: request underlying hardware to enter requested power mode
* @parse: this method is called only once to parse the device data,
* shouldn't allocate anything to not leak memory
* @request: send report request to device (e.g. feature report)
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 96e69979f84d..325017ad9311 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -471,9 +471,9 @@ static inline void hmm_devmem_page_set_drvdata(struct page *page,
* @page: pointer to struct page
* Return: driver data value
*/
-static inline unsigned long hmm_devmem_page_get_drvdata(struct page *page)
+static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
{
- unsigned long *drvdata = (unsigned long *)&page->pgmap;
+ const unsigned long *drvdata = (const unsigned long *)&page->pgmap;
return drvdata[1];
}
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 630b1a98ab58..ddf7f9ca86cc 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -157,7 +157,7 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp);
u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
u32 *value);
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
unsigned long flags);
void host1x_syncpt_free(struct host1x_syncpt *sp);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 87067d23a48b..a8a126259bc4 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -222,7 +222,7 @@ extern struct page *huge_zero_page;
static inline bool is_huge_zero_page(struct page *page)
{
- return ACCESS_ONCE(huge_zero_page) == page;
+ return READ_ONCE(huge_zero_page) == page;
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6431087816ba..f3e97c5f94c9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -719,6 +719,10 @@ struct vmbus_channel {
struct vmbus_close_msg close_msg;
+ /* Statistics */
+ u64 interrupts; /* Host to Guest interrupts */
+ u64 sig_events; /* Guest to Host events */
+
/* Channel callback's invoked in softirq context */
struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
@@ -829,6 +833,11 @@ struct vmbus_channel {
struct rcu_head rcu;
/*
+ * For sysfs per-channel properties.
+ */
+ struct kobject kobj;
+
+ /*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
* at the expense of latency:
@@ -1089,6 +1098,7 @@ struct hv_device {
struct device device;
struct vmbus_channel *channel;
+ struct kset *channels_kset;
};
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index b4054fd5b6f6..b19563f9a8eb 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -7,8 +7,12 @@
* Juergen Gross <jgross@suse.com>
*/
-#ifdef CONFIG_HYPERVISOR_GUEST
-#include <asm/hypervisor.h>
+#ifdef CONFIG_X86
+#include <asm/x86_init.h>
+static inline void hypervisor_pin_vcpu(int cpu)
+{
+ x86_platform.hyper.pin_vcpu(cpu);
+}
#else
static inline void hypervisor_pin_vcpu(int cpu)
{
diff --git a/include/linux/i2c-gpio.h b/include/linux/i2c-gpio.h
index c1bcb1f1d73b..352c1426fd4d 100644
--- a/include/linux/i2c-gpio.h
+++ b/include/linux/i2c-gpio.h
@@ -12,8 +12,6 @@
/**
* struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio
- * @sda_pin: GPIO pin ID to use for SDA
- * @scl_pin: GPIO pin ID to use for SCL
* @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz
* @timeout: clock stretching timeout in jiffies. If the slave keeps
* SCL low for longer than this, the transfer will time out.
@@ -26,8 +24,6 @@
* @scl_is_output_only: SCL output drivers cannot be turned off.
*/
struct i2c_gpio_platform_data {
- unsigned int sda_pin;
- unsigned int scl_pin;
int udelay;
int timeout;
unsigned int sda_is_open_drain:1;
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index a1385023a29b..fb0e040b1abb 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -42,7 +42,6 @@
* properly set.
*/
struct i2c_smbus_alert_setup {
- unsigned int alert_edge_triggered:1;
int irq;
};
@@ -50,4 +49,13 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
+int of_i2c_setup_smbus_alert(struct i2c_adapter *adap);
+#else
+static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap)
+{
+ return 0;
+}
+#endif
+
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index d501d3956f13..0f774406fad0 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -304,6 +304,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @type: chip type, to initialize i2c_client.name
* @flags: to initialize i2c_client.flags
* @addr: stored in i2c_client.addr
+ * @dev_name: Overrides the default <busnr>-<addr> dev_name if set
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
@@ -328,6 +329,7 @@ struct i2c_board_info {
char type[I2C_NAME_SIZE];
unsigned short flags;
unsigned short addr;
+ const char *dev_name;
void *platform_data;
struct dev_archdata *archdata;
struct device_node *of_node;
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 30294603526f..d95cae09dea0 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -247,7 +247,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
- int en_port_count = ACCESS_ONCE(team->en_port_count);
+ int en_port_count = READ_ONCE(team->en_port_count);
if (unlikely(!en_port_count))
return 0;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 7b0fa8b5c120..f9bd6e8ab138 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -131,28 +131,39 @@ struct st_sensor_das {
};
/**
+ * struct st_sensor_int_drdy - ST sensor device drdy line parameters
+ * @addr: address of INT drdy register.
+ * @mask: mask to enable drdy line.
+ * @addr_od: address to enable/disable Open Drain on the INT line.
+ * @mask_od: mask to enable/disable Open Drain on the INT line.
+ */
+struct st_sensor_int_drdy {
+ u8 addr;
+ u8 mask;
+ u8 addr_od;
+ u8 mask_od;
+};
+
+/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
- * @addr: address of the register.
- * @mask_int1: mask to enable/disable IRQ on INT1 pin.
- * @mask_int2: mask to enable/disable IRQ on INT2 pin.
+ * struct int1 - data-ready configuration register for INT1 pin.
+ * struct int2 - data-ready configuration register for INT2 pin.
* @addr_ihl: address to enable/disable active low on the INT lines.
* @mask_ihl: mask to enable/disable active low on the INT lines.
- * @addr_od: address to enable/disable Open Drain on the INT lines.
- * @mask_od: mask to enable/disable Open Drain on the INT lines.
- * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt
+ * struct stat_drdy - status register of DRDY (data ready) interrupt.
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
*/
struct st_sensor_data_ready_irq {
- u8 addr;
- u8 mask_int1;
- u8 mask_int2;
+ struct st_sensor_int_drdy int1;
+ struct st_sensor_int_drdy int2;
u8 addr_ihl;
u8 mask_ihl;
- u8 addr_od;
- u8 mask_od;
- u8 addr_stat_drdy;
+ struct {
+ u8 addr;
+ u8 mask;
+ } stat_drdy;
struct {
u8 en_addr;
u8 en_mask;
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index c380daa40c0e..20b61347ea58 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -365,12 +365,9 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
#define INDIO_MAX_RAW_ELEMENTS 4
struct iio_trigger; /* forward declaration */
-struct iio_dev;
/**
* struct iio_info - constant information about device
- * @driver_module: module structure used to ensure correct
- * ownership of chrdevs etc
* @event_attrs: event control attributes
* @attrs: general purpose device attributes
* @read_raw: function to request a value from the device.
@@ -425,7 +422,6 @@ struct iio_dev;
* were flushed and there was an error.
**/
struct iio_info {
- struct module *driver_module;
const struct attribute_group *event_attrs;
const struct attribute_group *attrs;
@@ -518,6 +514,7 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
* @id: [INTERN] used to identify device internally
+ * @driver_module: [INTERN] used to make it harder to undercut users
* @modes: [DRIVER] operating modes supported by device
* @currentmode: [DRIVER] current operating mode
* @dev: [DRIVER] device structure, should be assigned a parent
@@ -558,6 +555,7 @@ struct iio_buffer_setup_ops {
*/
struct iio_dev {
int id;
+ struct module *driver_module;
int modes;
int currentmode;
@@ -604,9 +602,34 @@ struct iio_dev {
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
-int iio_device_register(struct iio_dev *indio_dev);
+/**
+ * iio_device_register() - register a device with the IIO subsystem
+ * @indio_dev: Device structure filled by the device driver
+ **/
+#define iio_device_register(iio_dev) \
+ __iio_device_register((iio_dev), THIS_MODULE)
+int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
void iio_device_unregister(struct iio_dev *indio_dev);
-int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
+/**
+ * devm_iio_device_register - Resource-managed iio_device_register()
+ * @dev: Device to allocate iio_dev for
+ * @indio_dev: Device structure filled by the device driver
+ *
+ * Managed iio_device_register. The IIO device registered with this
+ * function is automatically unregistered on driver detach. This function
+ * calls iio_device_register() internally. Refer to that function for more
+ * information.
+ *
+ * If an iio_dev registered with this function needs to be unregistered
+ * separately, devm_iio_device_unregister() must be used.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+#define devm_iio_device_register(dev, indio_dev) \
+ __devm_iio_device_register((dev), (indio_dev), THIS_MODULE);
+int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
+ struct module *this_mod);
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index fa7931933067..8642b91a7577 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -60,7 +60,7 @@ void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&d->group, name, type);
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index c97eab67558f..0c43738a9e24 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -60,7 +60,7 @@ void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
- struct config_item_type *type)
+ const struct config_item_type *type)
{
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&t->group, name, type);
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 7142d8d6e470..7d5e44518379 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -23,7 +23,6 @@ struct iio_trigger;
/**
* struct iio_trigger_ops - operations structure for an iio_trigger.
- * @owner: used to monitor usage count of the trigger.
* @set_trigger_state: switch on/off the trigger on demand
* @try_reenable: function to reenable the trigger when the
* use count is zero (may be NULL)
@@ -34,7 +33,6 @@ struct iio_trigger;
* instances of a given device.
**/
struct iio_trigger_ops {
- struct module *owner;
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
int (*try_reenable)(struct iio_trigger *trig);
int (*validate_device)(struct iio_trigger *trig,
@@ -62,6 +60,7 @@ struct iio_trigger_ops {
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
+ struct module *owner;
int id;
const char *name;
struct device dev;
@@ -87,14 +86,14 @@ static inline struct iio_trigger *to_iio_trigger(struct device *d)
static inline void iio_trigger_put(struct iio_trigger *trig)
{
- module_put(trig->ops->owner);
+ module_put(trig->owner);
put_device(&trig->dev);
}
static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
- __module_get(trig->ops->owner);
+ __module_get(trig->owner);
return trig;
}
@@ -127,10 +126,16 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
* iio_trigger_register() - register a trigger with the IIO core
* @trig_info: trigger to be registered
**/
-int iio_trigger_register(struct iio_trigger *trig_info);
-
-int devm_iio_trigger_register(struct device *dev,
- struct iio_trigger *trig_info);
+#define iio_trigger_register(trig_info) \
+ __iio_trigger_register((trig_info), THIS_MODULE)
+int __iio_trigger_register(struct iio_trigger *trig_info,
+ struct module *this_mod);
+
+#define devm_iio_trigger_register(dev, trig_info) \
+ __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE)
+int __devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info,
+ struct module *this_mod);
/**
* iio_trigger_unregister() - unregister a trigger from the core
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 485a5b48f038..f3274d9f46a2 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -212,6 +212,7 @@
#define DMA_FSTS_IQE (1 << 4)
#define DMA_FSTS_ICE (1 << 5)
#define DMA_FSTS_ITE (1 << 6)
+#define DMA_FSTS_PRO (1 << 7)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index baeb872283d9..69c238210325 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -594,21 +594,6 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
__tasklet_hi_schedule(t);
}
-extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
-
-/*
- * This version avoids touching any other tasklets. Needed for kmemcheck
- * in order not to take any page faults while enqueueing this tasklet;
- * consider VERY carefully whether you really need this or
- * tasklet_hi_schedule()...
- */
-static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- __tasklet_hi_schedule_first(t);
-}
-
-
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 8a7c6d26b147..ca10767ab73d 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -16,27 +16,29 @@ struct vm_fault;
*/
#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
-#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
-#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
+#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */
+#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */
/*
* Flags for all iomap mappings:
*/
-#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
+#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
+#define IOMAP_F_BOUNDARY 0x02 /* mapping ends at metadata boundary */
/*
* Flags that only need to be reported for IOMAP_REPORT requests:
*/
-#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x20 /* block shared with another file */
+#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
+#define IOMAP_F_SHARED 0x20 /* block shared with another file */
+#define IOMAP_F_DATA_INLINE 0x40 /* data inline in the inode */
/*
- * Magic value for blkno:
+ * Magic value for addr:
*/
-#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
+#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
struct iomap {
- sector_t blkno; /* 1st sector of mapping, 512b units */
+ u64 addr; /* disk offset of mapping, bytes */
loff_t offset; /* file offset of mapping, bytes */
u64 length; /* length of mapping, bytes */
u16 type; /* type of mapping */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 83c8d6530f0f..93b4183cf53d 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -271,11 +271,14 @@ extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *));
extern int
+walk_mem_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
- int (*func)(u64, u64, void *));
+ int (*func)(struct resource *, void *));
extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
- void *arg, int (*func)(u64, u64, void *));
+ void *arg, int (*func)(struct resource *, void *));
/* True if any part of r1 overlaps r2 */
static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 2cdd74809899..627efac73e6d 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -3,6 +3,7 @@
#define IOPRIO_H
#include <linux/sched.h>
+#include <linux/sched/rt.h>
#include <linux/iocontext.h>
/*
@@ -63,7 +64,7 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR)
+ else if (task_is_realtime(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
diff --git a/include/linux/iova.h b/include/linux/iova.h
index d179b9bf7814..928442dda565 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -70,10 +70,12 @@ struct iova_fq {
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
- struct rb_node *cached32_node; /* Save last alloced node */
+ struct rb_node *cached_node; /* Save last alloced node */
+ struct rb_node *cached32_node; /* Save last 32-bit alloced node */
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
+ struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
@@ -148,12 +150,12 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn);
+ unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
- unsigned long start_pfn, unsigned long pfn_32bit);
+ unsigned long start_pfn);
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -210,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad,
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
- unsigned long limit_pfn)
+ unsigned long limit_pfn,
+ bool flush_rcache)
{
return 0;
}
@@ -229,8 +232,7 @@ static inline void copy_reserved_iova(struct iova_domain *from,
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
- unsigned long start_pfn,
- unsigned long pfn_32bit)
+ unsigned long start_pfn)
{
}
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index f1045b2c6a00..f4ffacf4fe9d 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -113,9 +113,9 @@ int ipmi_create_user(unsigned int if_num,
int ipmi_destroy_user(ipmi_user_t user);
/* Get the IPMI version of the BMC we are talking to. */
-void ipmi_get_version(ipmi_user_t user,
- unsigned char *major,
- unsigned char *minor);
+int ipmi_get_version(ipmi_user_t user,
+ unsigned char *major,
+ unsigned char *minor);
/* Set and get the slave address and LUN that we will use for our
source messages. Note that this affects the interface, not just
@@ -277,7 +277,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len);
*/
enum ipmi_addr_src {
SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
- SI_PCI, SI_DEVICETREE, SI_LAST
+ SI_PCI, SI_DEVICETREE, SI_PLATFORM, SI_LAST
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index f8cea14485dd..5be51281e14d 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -162,27 +162,27 @@ struct ipmi_device_id {
#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
-/* Take a pointer to a raw data buffer and a length and extract device
- id information from it. The first byte of data must point to the
- netfn << 2, the data should be of the format:
- netfn << 2, cmd, completion code, data
- as normally comes from a device interface. */
-static inline int ipmi_demangle_device_id(const unsigned char *data,
+/* Take a pointer to an IPMI response and extract device id information from
+ * it. @netfn is in the IPMI_NETFN_ format, so may need to be shifted from
+ * a SI response.
+ */
+static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
+ const unsigned char *data,
unsigned int data_len,
struct ipmi_device_id *id)
{
- if (data_len < 9)
+ if (data_len < 7)
return -EINVAL;
- if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 ||
- data[1] != IPMI_GET_DEVICE_ID_CMD)
+ if (netfn != IPMI_NETFN_APP_RESPONSE || cmd != IPMI_GET_DEVICE_ID_CMD)
/* Strange, didn't get the response we expected. */
return -EINVAL;
- if (data[2] != 0)
+ if (data[0] != 0)
/* That's odd, it shouldn't be able to fail. */
return -EINVAL;
- data += 3;
- data_len -= 3;
+ data++;
+ data_len--;
+
id->device_id = data[0];
id->device_revision = data[1];
id->firmware_revision_1 = data[2];
@@ -214,7 +214,6 @@ static inline int ipmi_demangle_device_id(const unsigned char *data,
call. */
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
- struct ipmi_device_id *device_id,
struct device *dev,
unsigned char slave_addr);
@@ -242,11 +241,13 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
msg->done(msg);
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
/* Allow the lower layer to add things to the proc filesystem
directory for this interface. Note that the entry will
automatically be dstroyed when the interface is destroyed. */
int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
const struct file_operations *proc_ops,
void *data);
+#endif
#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4536286cc4d2..b01d06db9101 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1114,6 +1114,28 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
return readl(gc->reg_base + reg_offset);
}
+struct irq_matrix;
+struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
+ unsigned int alloc_start,
+ unsigned int alloc_end);
+void irq_matrix_online(struct irq_matrix *m);
+void irq_matrix_offline(struct irq_matrix *m);
+void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
+int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
+void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
+int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+void irq_matrix_reserve(struct irq_matrix *m);
+void irq_matrix_remove_reserved(struct irq_matrix *m);
+int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
+ bool reserved, unsigned int *mapped_cpu);
+void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
+ unsigned int bit, bool managed);
+void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
+unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
+unsigned int irq_matrix_allocated(struct irq_matrix *m);
+unsigned int irq_matrix_reserved(struct irq_matrix *m);
+void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
+
/* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
#define INVALID_HWIRQ (~0UL)
irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 9270d73ea682..0e81035b678f 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -34,10 +34,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
bool irq_work_queue(struct irq_work *work);
-
-#ifdef CONFIG_SMP
bool irq_work_queue_on(struct irq_work *work, int cpu);
-#endif
void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 14b74f22d43c..c00c4c33e432 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -68,6 +68,7 @@
#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
+#define GICD_TYPER_RSS (1U << 26)
#define GICD_TYPER_LPIS (1U << 17)
#define GICD_TYPER_MBIS (1U << 16)
@@ -461,6 +462,7 @@
#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT)
#define ICC_CTLR_EL1_A3V_SHIFT 15
#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT)
+#define ICC_CTLR_EL1_RSS (0x1 << 18)
#define ICC_PMR_EL1_SHIFT 0
#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT)
#define ICC_BPR0_EL1_SHIFT 0
@@ -549,6 +551,8 @@
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
+#define ICC_SGI1R_RS_SHIFT 44
+#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT)
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 58a4d89aa82c..447da8ca2156 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -20,6 +20,12 @@
struct its_vpe;
+/*
+ * Maximum number of ITTs when GITS_TYPER.VMOVP == 0, using the
+ * ITSList mechanism to perform inter-ITS synchronization.
+ */
+#define GICv4_ITS_LIST_MAX 16
+
/* Embedded in kvm.arch */
struct its_vm {
struct fwnode_handle *fwnode;
@@ -30,6 +36,7 @@ struct its_vm {
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
+ u32 vlpi_count[GICv4_ITS_LIST_MAX];
};
/* Embedded in kvm_vcpu.arch */
@@ -64,12 +71,14 @@ struct its_vpe {
* @vm: Pointer to the GICv4 notion of a VM
* @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
* @vintid: Virtual LPI number
+ * @properties: Priority and enable bits (as written in the prop table)
* @db_enabled: Is the VPE doorbell to be generated?
*/
struct its_vlpi_map {
struct its_vm *vm;
struct its_vpe *vpe;
u32 vintid;
+ u8 properties;
bool db_enabled;
};
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
index 2e3d1afeb674..f19ccee7749f 100644
--- a/include/linux/irqchip/irq-omap-intc.h
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -18,8 +18,6 @@
#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
-void omap3_init_irq(void);
-
int omap_irq_pending(void);
void omap_intc_save_context(void);
void omap_intc_restore_context(void);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index b6084898d330..dd418955962b 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -94,6 +94,7 @@ struct irq_desc {
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
struct dentry *debugfs_file;
+ const char *dev_name;
#endif
#ifdef CONFIG_SPARSE_IRQ
struct rcu_head rcu;
@@ -245,6 +246,14 @@ static inline int irq_is_percpu(unsigned int irq)
return desc->status_use_accessors & IRQ_PER_CPU;
}
+static inline int irq_is_percpu_devid(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status_use_accessors & IRQ_PER_CPU_DEVID;
+}
+
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
{
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b1037dfc47e4..a34355d19546 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -33,6 +33,7 @@
#include <linux/types.h>
#include <linux/irqhandler.h>
#include <linux/of.h>
+#include <linux/mutex.h>
#include <linux/radix-tree.h>
struct device_node;
@@ -41,6 +42,7 @@ struct of_device_id;
struct irq_chip;
struct irq_data;
struct cpumask;
+struct seq_file;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@@ -105,18 +107,21 @@ struct irq_domain_ops {
int (*xlate)(struct irq_domain *d, struct device_node *node,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type);
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
int (*alloc)(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *arg);
void (*free)(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs);
- void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *out_hwirq, unsigned int *out_type);
#endif
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ void (*debug_show)(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind);
+#endif
};
extern struct irq_domain_ops irq_generic_chip_ops;
@@ -134,8 +139,8 @@ struct irq_domain_chip_generic;
* @mapcount: The number of mapped interrupts
*
* Optional elements
- * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
- * when decoding device tree interrupt specifiers.
+ * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
+ * to swap it for the of_node via the irq_domain_get_of_node accessor
* @gc: Pointer to a list of generic chips. There is a helper function for
* setting up one or more generic chips for interrupt controllers
* drivers using the generic chip library which uses this pointer.
@@ -173,6 +178,7 @@ struct irq_domain {
unsigned int revmap_direct_max_irq;
unsigned int revmap_size;
struct radix_tree_root revmap_tree;
+ struct mutex revmap_tree_mutex;
unsigned int linear_revmap[];
};
@@ -438,7 +444,7 @@ extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct cpumask *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
-extern void irq_domain_activate_irq(struct irq_data *irq_data);
+extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
@@ -508,8 +514,6 @@ static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-static inline void irq_domain_activate_irq(struct irq_data *data) { }
-static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
unsigned int nr_irqs, int node, void *arg)
{
@@ -558,8 +562,6 @@ irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
-static inline void irq_domain_activate_irq(struct irq_data *data) { }
-static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
static inline struct irq_domain *irq_find_matching_fwnode(
struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
{
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 3b7675bcca64..c7b368c734af 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -82,9 +82,9 @@
extern bool static_key_initialized;
-#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
- "%s used before call to jump_label_init", \
- __func__)
+#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
+ "%s(): static key '%pS' used before call to jump_label_init()", \
+ __func__, (key))
#ifdef HAVE_JUMP_LABEL
@@ -212,13 +212,13 @@ static __always_inline bool static_key_true(struct static_key *key)
static inline void static_key_slow_inc(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
atomic_inc(&key->enabled);
}
static inline void static_key_slow_dec(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
atomic_dec(&key->enabled);
}
@@ -237,7 +237,7 @@ static inline int jump_label_apply_nops(struct module *mod)
static inline void static_key_enable(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -248,7 +248,7 @@ static inline void static_key_enable(struct static_key *key)
static inline void static_key_disable(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index fc13ff289903..baa8eabbaa56 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -25,18 +25,18 @@ struct static_key_deferred {
};
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
static_key_slow_dec(&key->key);
}
static inline void static_key_deferred_flush(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
}
#endif /* HAVE_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 11dd93e42580..708f337d780b 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -14,6 +14,12 @@
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#ifndef CONFIG_64BIT
+# define KALLSYM_FMT "%08lx"
+#else
+# define KALLSYM_FMT "%016lx"
+#endif
+
struct module;
#ifdef CONFIG_KALLSYMS
@@ -46,6 +52,9 @@ extern void __print_symbol(const char *fmt, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+/* How and when do we show kallsyms values? */
+extern int kallsyms_show_value(void);
+
#else /* !CONFIG_KALLSYMS */
static inline unsigned long kallsyms_lookup_name(const char *name)
@@ -104,6 +113,11 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
return -ERANGE;
}
+static inline int kallsyms_show_value(void)
+{
+ return false;
+}
+
/* Stupid that this does nothing, but I didn't create this mess. */
#define __print_symbol(fmt, addr)
#endif /*CONFIG_KALLSYMS*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5017269e3f04..e3eb834c9a35 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -46,7 +46,7 @@ void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
- unsigned long *flags);
+ slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -95,7 +95,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
size_t *size,
- unsigned long *flags) {}
+ slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 1c08c925cefb..f16f6ceb3875 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -160,7 +160,7 @@ struct kexec_buf {
};
int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(u64, u64, void *));
+ int (*func)(struct resource *, void *));
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
#endif /* CONFIG_KEXEC_FILE */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 41eb6fdf87a8..7b45959ebd92 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -325,7 +325,7 @@ __kfifo_uint_must_check_helper( \
*
* This macro dynamically allocates a new fifo buffer.
*
- * The numer of elements will be rounded-up to a power of 2.
+ * The number of elements will be rounded-up to a power of 2.
* The fifo will be release with kfifo_free().
* Return 0 if no error, otherwise an error code.
*/
@@ -358,9 +358,9 @@ __kfifo_int_must_check_helper( \
* @buffer: the preallocated buffer to be used
* @size: the size of the internal buffer, this have to be a power of 2
*
- * This macro initialize a fifo using a preallocated buffer.
+ * This macro initializes a fifo using a preallocated buffer.
*
- * The numer of elements will be rounded-up to a power of 2.
+ * The number of elements will be rounded-up to a power of 2.
* Return 0 if no error, otherwise an error code.
*/
#define kfifo_init(fifo, buffer, size) \
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 7b1d7bead7d9..ea32a7d3cf1b 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -1,172 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_KMEMCHECK_H
-#define LINUX_KMEMCHECK_H
-
-#include <linux/mm_types.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_KMEMCHECK
-extern int kmemcheck_enabled;
-
-/* The slab-related functions. */
-void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
-void kmemcheck_free_shadow(struct page *page, int order);
-void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
- size_t size);
-void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
-
-void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
- gfp_t gfpflags);
-
-void kmemcheck_show_pages(struct page *p, unsigned int n);
-void kmemcheck_hide_pages(struct page *p, unsigned int n);
-
-bool kmemcheck_page_is_tracked(struct page *p);
-
-void kmemcheck_mark_unallocated(void *address, unsigned int n);
-void kmemcheck_mark_uninitialized(void *address, unsigned int n);
-void kmemcheck_mark_initialized(void *address, unsigned int n);
-void kmemcheck_mark_freed(void *address, unsigned int n);
-
-void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
-void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
-void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
-
-int kmemcheck_show_addr(unsigned long address);
-int kmemcheck_hide_addr(unsigned long address);
-
-bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
-
-/*
- * Bitfield annotations
- *
- * How to use: If you have a struct using bitfields, for example
- *
- * struct a {
- * int x:8, y:8;
- * };
- *
- * then this should be rewritten as
- *
- * struct a {
- * kmemcheck_bitfield_begin(flags);
- * int x:8, y:8;
- * kmemcheck_bitfield_end(flags);
- * };
- *
- * Now the "flags_begin" and "flags_end" members may be used to refer to the
- * beginning and end, respectively, of the bitfield (and things like
- * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
- * fields should be annotated:
- *
- * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
- * kmemcheck_annotate_bitfield(a, flags);
- */
-#define kmemcheck_bitfield_begin(name) \
- int name##_begin[0];
-
-#define kmemcheck_bitfield_end(name) \
- int name##_end[0];
-
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- int _n; \
- \
- if (!ptr) \
- break; \
- \
- _n = (long) &((ptr)->name##_end) \
- - (long) &((ptr)->name##_begin); \
- BUILD_BUG_ON(_n < 0); \
- \
- kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
- } while (0)
-
-#define kmemcheck_annotate_variable(var) \
- do { \
- kmemcheck_mark_initialized(&(var), sizeof(var)); \
- } while (0) \
-
-#else
-#define kmemcheck_enabled 0
-
-static inline void
-kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
-{
-}
-
-static inline void
-kmemcheck_free_shadow(struct page *page, int order)
-{
-}
-
-static inline void
-kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
- size_t size)
-{
-}
-
-static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
- size_t size)
-{
-}
-
-static inline void kmemcheck_pagealloc_alloc(struct page *p,
- unsigned int order, gfp_t gfpflags)
-{
-}
-
-static inline bool kmemcheck_page_is_tracked(struct page *p)
-{
- return false;
-}
-
-static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_freed(void *address, unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_unallocated_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline void kmemcheck_mark_initialized_pages(struct page *p,
- unsigned int n)
-{
-}
-
-static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
-{
- return true;
-}
-
-#define kmemcheck_bitfield_begin(name)
-#define kmemcheck_bitfield_end(name)
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- } while (0)
-
-#define kmemcheck_annotate_variable(var) \
- do { \
- } while (0)
-
-#endif /* CONFIG_KMEMCHECK */
-
-#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 590343f6c1b1..5ac416e2d339 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -48,14 +48,14 @@ extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_alloc(ptr, size, min_count, gfp);
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
if (!(flags & SLAB_NOLEAKTRACE))
kmemleak_free(ptr);
@@ -76,7 +76,7 @@ static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
{
}
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
- int min_count, unsigned long flags,
+ int min_count, slab_flags_t flags,
gfp_t gfp)
{
}
@@ -94,7 +94,7 @@ static inline void kmemleak_free(const void *ptr)
static inline void kmemleak_free_part(const void *ptr, size_t size)
{
}
-static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
{
}
static inline void kmemleak_free_percpu(const void __percpu *ptr)
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index bd2684700b74..9440a2fc8893 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -391,10 +391,6 @@ int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-int register_jprobe(struct jprobe *p);
-void unregister_jprobe(struct jprobe *p);
-int register_jprobes(struct jprobe **jps, int num);
-void unregister_jprobes(struct jprobe **jps, int num);
void jprobe_return(void);
unsigned long arch_deref_entry_point(void *);
@@ -443,20 +439,6 @@ static inline void unregister_kprobe(struct kprobe *p)
static inline void unregister_kprobes(struct kprobe **kps, int num)
{
}
-static inline int register_jprobe(struct jprobe *p)
-{
- return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
- return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
static inline void jprobe_return(void)
{
}
@@ -486,6 +468,20 @@ static inline int enable_kprobe(struct kprobe *kp)
return -ENOSYS;
}
#endif /* CONFIG_KPROBES */
+static inline int register_jprobe(struct jprobe *p)
+{
+ return -ENOSYS;
+}
+static inline int register_jprobes(struct jprobe **jps, int num)
+{
+ return -ENOSYS;
+}
+static inline void unregister_jprobe(struct jprobe *p)
+{
+}
+static inline void unregister_jprobes(struct jprobe **jps, int num)
+{
+}
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -496,11 +492,11 @@ static inline int enable_kretprobe(struct kretprobe *rp)
}
static inline int disable_jprobe(struct jprobe *jp)
{
- return disable_kprobe(&jp->kp);
+ return -ENOSYS;
}
static inline int enable_jprobe(struct jprobe *jp)
{
- return enable_kprobe(&jp->kp);
+ return -ENOSYS;
}
#ifndef CONFIG_KPROBES
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 86d53a3cb497..3203e36b2ee8 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -4,6 +4,7 @@
/* Simple interface for creating and stopping kernel threads without mess. */
#include <linux/err.h>
#include <linux/sched.h>
+#include <linux/cgroup.h>
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
@@ -199,4 +200,14 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+#ifdef CONFIG_BLK_CGROUP
+void kthread_associate_blkcg(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *kthread_blkcg(void);
+#else
+static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
+static inline struct cgroup_subsys_state *kthread_blkcg(void)
+{
+ return NULL;
+}
+#endif
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index bf6db4fe895b..5579c64c8fd6 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -40,16 +40,16 @@ struct led_classdev {
int flags;
/* Lower 16 bits reflect status */
-#define LED_SUSPENDED (1 << 0)
-#define LED_UNREGISTERING (1 << 1)
+#define LED_SUSPENDED BIT(0)
+#define LED_UNREGISTERING BIT(1)
/* Upper 16 bits reflect control information */
-#define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_SYSFS_DISABLE (1 << 17)
-#define LED_DEV_CAP_FLASH (1 << 18)
-#define LED_HW_PLUGGABLE (1 << 19)
-#define LED_PANIC_INDICATOR (1 << 20)
-#define LED_BRIGHT_HW_CHANGED (1 << 21)
-#define LED_RETAIN_AT_SHUTDOWN (1 << 22)
+#define LED_CORE_SUSPENDRESUME BIT(16)
+#define LED_SYSFS_DISABLE BIT(17)
+#define LED_DEV_CAP_FLASH BIT(18)
+#define LED_HW_PLUGGABLE BIT(19)
+#define LED_PANIC_INDICATOR BIT(20)
+#define LED_BRIGHT_HW_CHANGED BIT(21)
+#define LED_RETAIN_AT_SHUTDOWN BIT(22)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 931c32f1f18d..ed9826b21c5e 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -522,6 +522,7 @@ enum ata_lpm_policy {
ATA_LPM_UNKNOWN,
ATA_LPM_MAX_POWER,
ATA_LPM_MED_POWER,
+ ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */
ATA_LPM_MIN_POWER,
};
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index a29a8db5cc2f..2d1d9de06728 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -57,6 +57,7 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
@@ -70,6 +71,7 @@ struct nvm_dev_ops {
nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io;
+ nvm_submit_io_sync_fn *submit_io_sync;
nvm_create_dma_pool_fn *create_dma_pool;
nvm_destroy_dma_pool_fn *destroy_dma_pool;
@@ -461,10 +463,9 @@ struct nvm_tgt_type {
/* For internal use */
struct list_head list;
+ struct module *owner;
};
-extern struct nvm_tgt_type *nvm_find_target_type(const char *, int);
-
extern int nvm_register_tgt_type(struct nvm_tgt_type *);
extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
@@ -479,10 +480,8 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
+extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
-extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
- const struct ppa_addr *, int, int);
-extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
@@ -491,8 +490,6 @@ extern void nvm_end_io(struct nvm_rq *);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-extern int nvm_dev_factory(struct nvm_dev *, int flags);
-
extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
#else /* CONFIG_NVM */
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 2e6f90bd52aa..f68db9e450eb 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_LINKAGE_H
#define _LINUX_LINKAGE_H
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
#include <linux/stringify.h>
#include <linux/export.h>
#include <asm/linkage.h>
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 194991ef9347..fc5c1be3f6f4 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -87,10 +87,35 @@ struct klp_func {
bool transition;
};
+struct klp_object;
+
+/**
+ * struct klp_callbacks - pre/post live-(un)patch callback structure
+ * @pre_patch: executed before code patching
+ * @post_patch: executed after code patching
+ * @pre_unpatch: executed before code unpatching
+ * @post_unpatch: executed after code unpatching
+ * @post_unpatch_enabled: flag indicating if post-unpatch callback
+ * should run
+ *
+ * All callbacks are optional. Only the pre-patch callback, if provided,
+ * will be unconditionally executed. If the parent klp_object fails to
+ * patch for any reason, including a non-zero error status returned from
+ * the pre-patch callback, no further callbacks will be executed.
+ */
+struct klp_callbacks {
+ int (*pre_patch)(struct klp_object *obj);
+ void (*post_patch)(struct klp_object *obj);
+ void (*pre_unpatch)(struct klp_object *obj);
+ void (*post_unpatch)(struct klp_object *obj);
+ bool post_unpatch_enabled;
+};
+
/**
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
* @funcs: function entries for functions to be patched in the object
+ * @callbacks: functions to be executed pre/post (un)patching
* @kobj: kobject for sysfs resources
* @mod: kernel module associated with the patched object
* (NULL for vmlinux)
@@ -100,6 +125,7 @@ struct klp_object {
/* external */
const char *name;
struct klp_func *funcs;
+ struct klp_callbacks callbacks;
/* internal */
struct kobject kobj;
@@ -164,6 +190,14 @@ static inline bool klp_have_reliable_stack(void)
IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
}
+void *klp_shadow_get(void *obj, unsigned long id);
+void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
+ size_t size, gfp_t gfp_flags);
+void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
+ size_t size, gfp_t gfp_flags);
+void klp_shadow_free(void *obj, unsigned long id);
+void klp_shadow_free_all(unsigned long id);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 1957635e6d5f..85abc2915e8d 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -198,7 +198,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
static inline bool llist_empty(const struct llist_head *head)
{
- return ACCESS_ONCE(head->first) == NULL;
+ return READ_ONCE(head->first) == NULL;
}
static inline struct llist_node *llist_next(struct llist_node *node)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f301d31b473c..a842551fe044 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -528,6 +528,11 @@ static inline void lockdep_on(void)
*/
struct lock_class_key { };
+/*
+ * The lockdep_map takes no space if lockdep is disabled:
+ */
+struct lockdep_map { };
+
#define lockdep_depth(tsk) (0)
#define lockdep_is_held_type(l, r) (1)
@@ -720,9 +725,24 @@ do { \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
+
+#define lockdep_assert_irqs_enabled() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ !current->hardirqs_enabled, \
+ "IRQs not enabled as expected\n"); \
+ } while (0)
+
+#define lockdep_assert_irqs_disabled() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ current->hardirqs_enabled, \
+ "IRQs not disabled as expected\n"); \
+ } while (0)
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define lockdep_assert_irqs_enabled() do { } while (0)
+# define lockdep_assert_irqs_disabled() do { } while (0)
#endif
#ifdef CONFIG_LOCKDEP
diff --git a/include/linux/log2.h b/include/linux/log2.h
index c373295f359f..41a1ae010993 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -37,19 +37,23 @@ int __ilog2_u64(u64 n)
}
#endif
-/*
- * Determine whether some value is a power of two, where zero is
+/**
+ * is_power_of_2() - check if a value is a power of two
+ * @n: the value to check
+ *
+ * Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
+ * Return: true if @n is a power of 2, otherwise false.
*/
-
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
-/*
- * round up to nearest power of two
+/**
+ * __roundup_pow_of_two() - round up to nearest power of two
+ * @n: value to round up
*/
static inline __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
@@ -57,8 +61,9 @@ unsigned long __roundup_pow_of_two(unsigned long n)
return 1UL << fls_long(n - 1);
}
-/*
- * round down to nearest power of two
+/**
+ * __rounddown_pow_of_two() - round down to nearest power of two
+ * @n: value to round down
*/
static inline __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
@@ -67,12 +72,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
}
/**
- * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
- * @n - parameter
+ * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
+ * @n: parameter
*
* constant-capable log of base 2 calculation
* - this can be used to initialise global variables from constant data, hence
- * the massive ternary operator construction
+ * the massive ternary operator construction
*
* selects the appropriately-sized optimised version depending on sizeof(n)
*/
@@ -150,7 +155,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* roundup_pow_of_two - round the given value up to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
@@ -167,7 +172,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
- * @n - parameter
+ * @n: parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
@@ -180,6 +185,12 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
__rounddown_pow_of_two(n) \
)
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
@@ -193,13 +204,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ob2(5) = 3
* ... and so on.
*/
-
-static inline __attribute_const__
-int __order_base_2(unsigned long n)
-{
- return n > 1 ? ilog2(n - 1) + 1 : 0;
-}
-
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 082de345b73c..837f2f2d1d34 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -12,6 +12,11 @@
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ * @remainder: pointer to unsigned 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*
* This is commonly provided by 32bit archs to provide an optimized 64bit
* divide.
@@ -24,6 +29,11 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
+ * @remainder: pointer to signed 32bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -33,6 +43,11 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ * @remainder: pointer to unsigned 64bit remainder
+ *
+ * Return: sets ``*remainder``, then returns dividend / divisor
*/
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
{
@@ -42,6 +57,10 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
@@ -50,6 +69,10 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
/**
* div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 64bit divisor
+ *
+ * Return: dividend / divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
@@ -89,6 +112,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
/**
* div_u64 - unsigned 64bit divide with 32bit divisor
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
*
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
@@ -104,6 +129,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
/**
* div_s64 - signed 64bit divide with 32bit divisor
+ * @dividend: signed 64bit dividend
+ * @divisor: signed 32bit divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 265a9cd21cb4..b310a9c18113 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -23,11 +23,14 @@
#define sme_me_mask 0ULL
+static inline bool sme_active(void) { return false; }
+static inline bool sev_active(void) { return false; }
+
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
-static inline bool sme_active(void)
+static inline bool mem_encrypt_active(void)
{
- return !!sme_me_mask;
+ return sme_me_mask;
}
static inline u64 sme_get_me_mask(void)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index bae11c7e7bf3..7ed0f7782d16 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -237,6 +237,22 @@ unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
nid, flags, p_start, p_end, p_nid)
+/**
+ * for_each_resv_unavail_range - iterate through reserved and unavailable memory
+ * @i: u64 used as loop variable
+ * @flags: pick from blocks based on memory attributes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over unavailable but reserved (reserved && !memory) areas of memblock.
+ * Available as soon as memblock is initialized.
+ * Note: because this memory does not belong to any physical node, flags and
+ * nid arguments do not make sense and thus not exported as arguments.
+ */
+#define for_each_resv_unavail_range(i, p_start, p_end) \
+ for_each_mem_range(i, &memblock.reserved, &memblock.memory, \
+ NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
+
static inline void memblock_set_region_flags(struct memblock_region *r,
unsigned long flags)
{
@@ -389,10 +405,10 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
region++)
-#define for_each_memblock_type(memblock_type, rgn) \
- for (idx = 0, rgn = &memblock_type->regions[0]; \
- idx < memblock_type->cnt; \
- idx++, rgn = &memblock_type->regions[idx])
+#define for_each_memblock_type(i, memblock_type, rgn) \
+ for (i = 0, rgn = &memblock_type->regions[0]; \
+ i < memblock_type->cnt; \
+ i++, rgn = &memblock_type->regions[i])
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index bfeecf179895..f72dc53848d7 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -174,6 +174,9 @@ struct arizona_pdata {
/** Mode for outputs */
int out_mono[ARIZONA_MAX_OUTPUT];
+ /** Limit output volumes */
+ unsigned int out_vol_limit[2 * ARIZONA_MAX_OUTPUT];
+
/** PDM speaker mute setting */
unsigned int spk_mute[ARIZONA_MAX_PDM_SPK];
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index e9c908c4fba8..78dc85365c4f 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -131,6 +131,9 @@ enum axp20x_variants {
#define AXP803_DCDC6_V_OUT 0x25
#define AXP803_DCDC_FREQ_CTRL 0x3b
+/* Other DCDC regulator control registers are the same as AXP803 */
+#define AXP813_DCDC7_V_OUT 0x26
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index c19303b0ccfd..b8908bf8d315 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -245,10 +245,13 @@ enum max77843_irq_muic {
#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4)
/* MAX77843 CHG_CNFG_00 register */
+#define MAX77843_CHG_MODE_MASK 0x0f
#define MAX77843_CHG_DISABLE 0x00
#define MAX77843_CHG_ENABLE 0x05
#define MAX77843_CHG_MASK 0x01
+#define MAX77843_CHG_OTG_MASK 0x02
#define MAX77843_CHG_BUCK_MASK 0x04
+#define MAX77843_CHG_BOOST_MASK 0x08
/* MAX77843 CHG_CNFG_01 register */
#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00
@@ -347,6 +350,7 @@ enum max77843_irq_muic {
/* MAX77843 CONTROL register */
#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT 0
#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT 3
+#define MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT 6
#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT 7
#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT 0
#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT 1
@@ -363,6 +367,7 @@ enum max77843_irq_muic {
#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK BIT(MAX77843_MUIC_CONTROL1_NOBCCOMP_SHIFT)
#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 6dec43826303..3c8568aa82a5 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -20,7 +20,7 @@
#include <linux/leds.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/of_gpio.h>
#include <linux/usb/phy_companion.h>
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 116816fb9110..a2a1318a3d0c 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -334,6 +334,7 @@
#define DCM_DRP_RD_DATA_H 0xFC29
#define SD_VPCLK0_CTL 0xFC2A
#define SD_VPCLK1_CTL 0xFC2B
+#define PHASE_SELECT_MASK 0x1F
#define SD_DCMPS0_CTL 0xFC2C
#define SD_DCMPS1_CTL 0xFC2D
#define SD_VPTX_CTL SD_VPCLK0_CTL
@@ -573,6 +574,12 @@
#define MSGTXDATA3 0xFE47
#define MSGTXCTL 0xFE48
#define LTR_CTL 0xFE4A
+#define LTR_TX_EN_MASK BIT(7)
+#define LTR_TX_EN_1 BIT(7)
+#define LTR_TX_EN_0 0
+#define LTR_LATENCY_MODE_MASK BIT(6)
+#define LTR_LATENCY_MODE_HW 0
+#define LTR_LATENCY_MODE_SW BIT(6)
#define OBFF_CFG 0xFE4C
#define CDRESUMECTL 0xFE52
@@ -616,11 +623,15 @@
#define L1SUB_CONFIG2 0xFE8E
#define L1SUB_AUTO_CFG 0x02
#define L1SUB_CONFIG3 0xFE8F
+#define L1OFF_MBIAS2_EN_5250 BIT(7)
#define DUMMY_REG_RESET_0 0xFE90
#define AUTOLOAD_CFG_BASE 0xFF00
#define PETXCFG 0xFF03
+#define FORCE_CLKREQ_DELINK_MASK BIT(7)
+#define FORCE_CLKREQ_LOW 0x80
+#define FORCE_CLKREQ_HIGH 0x00
#define PM_CTRL1 0xFF44
#define CD_RESUME_EN_MASK 0xF0
@@ -844,6 +855,9 @@
#define PHY_DIG1E_RX_EN_KEEP 0x0001
#define PHY_DUM_REG 0x1F
+#define PCR_ASPM_SETTING_REG1 0x160
+#define PCR_ASPM_SETTING_REG2 0x168
+
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
@@ -876,14 +890,79 @@ struct pcr_ops {
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+
+ void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
+ int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
+ int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val);
+ void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
+ void (*full_on)(struct rtsx_pcr *pcr);
+ void (*power_saving)(struct rtsx_pcr *pcr);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+#define ASPM_L1_1_EN_MASK BIT(3)
+#define ASPM_L1_2_EN_MASK BIT(2)
+#define PM_L1_1_EN_MASK BIT(1)
+#define PM_L1_2_EN_MASK BIT(0)
+
+#define ASPM_L1_1_EN BIT(0)
+#define ASPM_L1_2_EN BIT(1)
+#define PM_L1_1_EN BIT(2)
+#define PM_L1_2_EN BIT(3)
+#define LTR_L1SS_PWR_GATE_EN BIT(4)
+#define L1_SNOOZE_TEST_EN BIT(5)
+#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
+
+enum dev_aspm_mode {
+ DEV_ASPM_DISABLE = 0,
+ DEV_ASPM_DYNAMIC,
+ DEV_ASPM_BACKDOOR,
+ DEV_ASPM_STATIC,
+};
+
+/*
+ * struct rtsx_cr_option - card reader option
+ * @dev_flags: device flags
+ * @force_clkreq_0: force clock request
+ * @ltr_en: enable ltr mode flag
+ * @ltr_enabled: ltr mode in configure space flag
+ * @ltr_active: ltr mode status
+ * @ltr_active_latency: ltr mode active latency
+ * @ltr_idle_latency: ltr mode idle latency
+ * @ltr_l1off_latency: ltr mode l1off latency
+ * @dev_aspm_mode: device aspm mode
+ * @l1_snooze_delay: l1 snooze delay
+ * @ltr_l1off_sspwrgate: ltr l1off sspwrgate
+ * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
+ */
+struct rtsx_cr_option {
+ u32 dev_flags;
+ bool force_clkreq_0;
+ bool ltr_en;
+ bool ltr_enabled;
+ bool ltr_active;
+ u32 ltr_active_latency;
+ u32 ltr_idle_latency;
+ u32 ltr_l1off_latency;
+ enum dev_aspm_mode dev_aspm_mode;
+ u32 l1_snooze_delay;
+ u8 ltr_l1off_sspwrgate;
+ u8 ltr_l1off_snooze_sspwrgate;
+};
+
+#define rtsx_set_dev_flag(cr, flag) \
+ ((cr)->option.dev_flags |= (flag))
+#define rtsx_clear_dev_flag(cr, flag) \
+ ((cr)->option.dev_flags &= ~(flag))
+#define rtsx_check_dev_flag(cr, flag) \
+ ((cr)->option.dev_flags & (flag))
+
struct rtsx_pcr {
struct pci_dev *pci;
unsigned int id;
int pcie_cap;
+ struct rtsx_cr_option option;
/* pci resources */
unsigned long addr;
@@ -940,6 +1019,7 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
#define PCR_REVERSE_SOCKET (1 << 1)
@@ -964,6 +1044,11 @@ struct rtsx_pcr {
u8 dma_error_count;
};
+#define PID_524A 0x524A
+#define PID_5249 0x5249
+#define PID_5250 0x5250
+#define PID_525A 0x525A
+
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index eac285756b37..b5dd108421c8 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -263,7 +263,6 @@ struct tps65217_board {
struct tps65217 {
struct device *dev;
struct tps65217_board *pdata;
- unsigned long id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
@@ -278,11 +277,6 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev)
return dev_get_drvdata(dev);
}
-static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
-{
- return tps65217->id;
-}
-
int tps65217_reg_read(struct tps65217 *tps, unsigned int reg,
unsigned int *val);
int tps65217_reg_write(struct tps65217 *tps, unsigned int reg,
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index bccd2d68b1e3..f069c518c0ed 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -246,24 +246,6 @@ enum tps65218_irqs {
};
/**
- * struct tps_info - packages regulator constraints
- * @id: Id of the regulator
- * @name: Voltage regulator name
- * @min_uV: minimum micro volts
- * @max_uV: minimum micro volts
- * @strobe: sequencing strobe value for the regulator
- *
- * This data is used to check the regualtor voltage limits while setting.
- */
-struct tps_info {
- int id;
- const char *name;
- int min_uV;
- int max_uV;
- int strobe;
-};
-
-/**
* struct tps65218 - tps65218 sub-driver chip access routines
*
* Device data may be used to access the TPS65218 chip
@@ -280,7 +262,6 @@ struct tps65218 {
u32 irq_mask;
struct regmap_irq_chip_data *irq_data;
struct regulator_desc desc[TPS65218_NUM_REGULATOR];
- struct tps_info *info[TPS65218_NUM_REGULATOR];
struct regmap *regmap;
u8 *strobes;
};
diff --git a/include/linux/mfd/wm97xx.h b/include/linux/mfd/wm97xx.h
new file mode 100644
index 000000000000..45fb54f19d09
--- /dev/null
+++ b/include/linux/mfd/wm97xx.h
@@ -0,0 +1,25 @@
+/*
+ * wm97xx client interface
+ *
+ * Copyright (C) 2017 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MFD_WM97XX_H
+#define __LINUX_MFD_WM97XX_H
+
+struct regmap;
+struct wm97xx_batt_pdata;
+struct snd_ac97;
+
+struct wm97xx_platform_data {
+ struct snd_ac97 *ac97;
+ struct regmap *regmap;
+ struct wm97xx_batt_pdata *batt_pdata;
+};
+
+#endif
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 09cebe528488..508e8cc5ee86 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -136,6 +136,9 @@ enum {
MLX4_CQE_BAD_FCS = 1 << 4,
};
+#define MLX4_MAX_CQ_PERIOD (BIT(16) - 1)
+#define MLX4_MAX_CQ_COUNT (BIT(16) - 1)
+
static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
void __iomem *uar_page,
spinlock_t *doorbell_lock)
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 6a57ec2f1ef7..48c181a2acc9 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -125,11 +125,16 @@ struct mlx5_cq_modify_params {
enum {
CQE_SIZE_64 = 0,
CQE_SIZE_128 = 1,
+ CQE_SIZE_128_PAD = 2,
};
-static inline int cqe_sz_to_mlx_sz(u8 size)
+#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
+#define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
+
+static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
- return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+ return padding_128_en ? CQE_SIZE_128_PAD :
+ size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 3e5363f760dd..38a7577a9ce7 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -614,7 +614,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
- u8 reserved_at_23[0x1d];
+ u8 reserved_at_23[0x1b];
+ u8 max_geneve_opt_len[0x1];
+ u8 tunnel_stateless_geneve_rx[0x1];
u8 reserved_at_40[0x10];
u8 lro_min_mss_size[0x10];
@@ -744,6 +746,7 @@ enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3,
};
enum {
@@ -1047,7 +1050,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 num_of_uars_per_page[0x20];
u8 reserved_at_540[0x40];
- u8 reserved_at_580[0x3f];
+ u8 reserved_at_580[0x3d];
+ u8 cqe_128_always[0x1];
+ u8 cqe_compression_128[0x1];
u8 cqe_compression[0x1];
u8 cqe_compression_timeout[0x10];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 43edf659453b..c7b1d617dff6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -96,6 +96,15 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
/*
+ * On some architectures it is expensive to call memset() for small sizes.
+ * Those architectures should provide their own implementation of "struct page"
+ * zeroing by defining this macro in <asm/pgtable.h>.
+ */
+#ifndef mm_zero_struct_page
+#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
+#endif
+
+/*
* Default maximum number of active map areas, this limits the number of vmas
* per mm struct. Users can overwrite this number by sysctl but there is a
* problem.
@@ -1431,7 +1440,13 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
struct bdi_writeback *wb);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
-void cancel_dirty_page(struct page *page);
+void __cancel_dirty_page(struct page *page);
+static inline void cancel_dirty_page(struct page *page)
+{
+ /* Avoid atomic ops, locking, etc. when not actually needed. */
+ if (PageDirty(page))
+ __cancel_dirty_page(page);
+}
int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
@@ -1599,26 +1614,32 @@ static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
#endif
-#ifdef __PAGETABLE_PUD_FOLDED
+#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
unsigned long address)
{
return 0;
}
+static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
+
#else
int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
-#endif
-#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
-static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
- unsigned long address)
+static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
- return 0;
+ atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
-static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
+static inline void mm_dec_nr_puds(struct mm_struct *mm)
+{
+ atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
+}
+#endif
-static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
+static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ unsigned long address)
{
return 0;
}
@@ -1629,25 +1650,47 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
#else
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
-static inline void mm_nr_pmds_init(struct mm_struct *mm)
+static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
- atomic_long_set(&mm->nr_pmds, 0);
+ atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
-static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
- return atomic_long_read(&mm->nr_pmds);
+ atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
+#endif
-static inline void mm_inc_nr_pmds(struct mm_struct *mm)
+#ifdef CONFIG_MMU
+static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
{
- atomic_long_inc(&mm->nr_pmds);
+ atomic_long_set(&mm->pgtables_bytes, 0);
}
-static inline void mm_dec_nr_pmds(struct mm_struct *mm)
+static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->pgtables_bytes);
+}
+
+static inline void mm_inc_nr_ptes(struct mm_struct *mm)
+{
+ atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
+}
+
+static inline void mm_dec_nr_ptes(struct mm_struct *mm)
{
- atomic_long_dec(&mm->nr_pmds);
+ atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
}
+#else
+
+static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
+static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
+static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
#endif
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
@@ -2002,6 +2045,12 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
+#ifdef CONFIG_HAVE_MEMBLOCK
+void zero_resv_unavail(void);
+#else
+static inline void zero_resv_unavail(void) {}
+#endif
+
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, enum memmap_context);
@@ -2496,7 +2545,7 @@ void vmemmap_populate_print_last(void);
void vmemmap_free(unsigned long start, unsigned long end);
#endif
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
- unsigned long size);
+ unsigned long nr_pages);
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c85f11dafd56..cfd0ac4e5e0e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -48,8 +48,10 @@ struct page {
* inode address_space, or NULL.
* If page mapped as anonymous
* memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
+ * it points to anon_vma object
+ * or KSM private structure. See
+ * PAGE_MAPPING_ANON and
+ * PAGE_MAPPING_KSM.
*/
void *s_mem; /* slab first object */
atomic_t compound_mapcount; /* first tail page */
@@ -207,14 +209,6 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_KMEMCHECK
- /*
- * kmemcheck wants to track the status of each byte in a page; this
- * is a pointer to such a status block. NULL if not tracked.
- */
- void *shadow;
-#endif
-
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
@@ -399,9 +393,8 @@ struct mm_struct {
*/
atomic_t mm_count;
- atomic_long_t nr_ptes; /* PTE page table pages */
-#if CONFIG_PGTABLE_LEVELS > 2
- atomic_long_t nr_pmds; /* PMD page table pages */
+#ifdef CONFIG_MMU
+ atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
int map_count; /* number of VMAs */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 9a43763a68ad..e7743eca1021 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -255,6 +255,10 @@ struct mmc_supply {
struct regulator *vqmmc; /* Optional Vccq supply */
};
+struct mmc_ctx {
+ struct task_struct *task;
+};
+
struct mmc_host {
struct device *parent;
struct device class_dev;
@@ -350,6 +354,8 @@ struct mmc_host {
#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
+ int fixed_drv_type; /* fixed driver type for non-removable media */
+
mmc_pm_flag_t pm_caps; /* supported pm features */
/* host specific block data */
@@ -388,8 +394,9 @@ struct mmc_host {
struct mmc_card *card; /* device attached to this host */
wait_queue_head_t wq;
- struct task_struct *claimer; /* task that has host claimed */
+ struct mmc_ctx *claimer; /* context that has host claimed */
int claim_cnt; /* "claim" nesting count */
+ struct mmc_ctx default_ctx; /* default context */
struct delayed_work detect;
int detect_change; /* card detect flag */
@@ -469,6 +476,8 @@ void mmc_detect_change(struct mmc_host *, unsigned long delay);
void mmc_request_done(struct mmc_host *, struct mmc_request *);
void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
+
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
index 36f986d4a59a..1d42872d22f3 100644
--- a/include/linux/mmc/sdhci-pci-data.h
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -15,7 +15,4 @@ struct sdhci_pci_data {
extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
int slotno);
-
-extern int sdhci_pci_spt_drive_strength;
-
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 2cf1c3c807f6..b25dc9db19fc 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -156,7 +156,8 @@ struct mmu_notifier_ops {
* shared page-tables, it not necessary to implement the
* invalidate_range_start()/end() notifiers, as
* invalidate_range() alread catches the points in time when an
- * external TLB range needs to be flushed.
+ * external TLB range needs to be flushed. For more in depth
+ * discussion on this see Documentation/vm/mmu_notifier.txt
*
* The invalidate_range() function is called under the ptl
* spin-lock and not allowed to sleep.
@@ -213,7 +214,8 @@ extern void __mmu_notifier_change_pte(struct mm_struct *mm,
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end,
+ bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
@@ -267,7 +269,14 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_end(mm, start, end);
+ __mmu_notifier_invalidate_range_end(mm, start, end, false);
+}
+
+static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range_end(mm, start, end, true);
}
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
@@ -438,6 +447,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
{
}
+static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index c9c4a81b9767..67f2e3c38939 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -700,7 +700,8 @@ typedef struct pglist_data {
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
- unsigned long static_init_size;
+ /* Number of non-deferred pages */
+ unsigned long static_init_pgcnt;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -712,12 +713,6 @@ typedef struct pglist_data {
/* Fields commonly accessed by the page reclaim scanner */
struct lruvec lruvec;
- /*
- * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
- * this node's LRU. Maintained by the pageout code.
- */
- unsigned int inactive_ratio;
-
unsigned long flags;
ZONE_PADDING(_pad2_)
@@ -1151,13 +1146,17 @@ struct mem_section {
#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
#ifdef CONFIG_SPARSEMEM_EXTREME
-extern struct mem_section *mem_section[NR_SECTION_ROOTS];
+extern struct mem_section **mem_section;
#else
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
+#ifdef CONFIG_SPARSEMEM_EXTREME
+ if (!mem_section)
+ return NULL;
+#endif
if (!mem_section[SECTION_NR_TO_ROOT(nr)])
return NULL;
return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
diff --git a/include/linux/module.h b/include/linux/module.h
index fe5aa3736707..c69b49abe877 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -639,6 +639,8 @@ static inline bool is_livepatch_module(struct module *mod)
}
#endif /* CONFIG_LIVEPATCH */
+bool is_module_sig_enforced(void);
+
#else /* !CONFIG_MODULES... */
static inline struct module *__module_address(unsigned long addr)
@@ -753,6 +755,11 @@ static inline bool module_requested_async_probing(struct module *module)
return false;
}
+static inline bool is_module_sig_enforced(void)
+{
+ return false;
+}
+
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 1d7140fef154..ba36506db4fb 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -228,19 +228,11 @@ struct kparam_array
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
/* Obsolete - use module_param_cb() */
-#define module_param_call(name, set, get, arg, perm) \
- static const struct kernel_param_ops __param_ops_##name = \
- { .flags = 0, (void *)set, (void *)get }; \
+#define module_param_call(name, _set, _get, arg, perm) \
+ static const struct kernel_param_ops __param_ops_##name = \
+ { .flags = 0, .set = _set, .get = _get }; \
__module_param_call(MODULE_PARAM_PREFIX, \
- name, &__param_ops_##name, arg, \
- (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
-
-/* We don't get oldget: it's often a new-style param_get_uint, etc. */
-static inline int
-__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
-{
- return 0;
-}
+ name, &__param_ops_##name, arg, perm, -1, 0)
#ifdef CONFIG_SYSFS
extern void kernel_param_lock(struct module *mod);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index cdd069cf9ed8..1f1bbb5b4679 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -284,6 +284,11 @@ enum {
MSI_FLAG_PCI_MSIX = (1 << 3),
/* Needs early activate, required for PCI */
MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
+ /*
+ * Must reactivate when irq is started even when
+ * MSI_FLAG_ACTIVATE_EARLY has been set.
+ */
+ MSI_FLAG_MUST_REACTIVATE = (1 << 5),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/linux/net.h b/include/linux/net.h
index d97d80d7fdf8..caeb159abda5 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -22,7 +22,6 @@
#include <linux/random.h>
#include <linux/wait.h>
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
#include <linux/once.h>
#include <linux/fs.h>
@@ -111,9 +110,7 @@ struct socket_wq {
struct socket {
socket_state state;
- kmemcheck_bitfield_begin(type);
short type;
- kmemcheck_bitfield_end(type);
unsigned long flags;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 414a5e769fde..495ba4dd9da5 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -67,7 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
* @ss: The nfnetlink subsystem ID
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds the NFNL subsystem mutex.
*/
#define nfnl_dereference(p, ss) \
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6ddb4a5da371..49b4257ce1ea 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -17,9 +17,6 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
}
enum netlink_skb_flags {
- NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
- NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
- NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
};
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index de1c50b93c61..15cab3967d6d 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -104,7 +104,9 @@ extern nodemask_t _unused_nodemask_arg_;
*
* Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
*/
-#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits
+#define nodemask_pr_args(maskp) \
+ ((maskp) != NULL) ? MAX_NUMNODES : 0, \
+ ((maskp) != NULL) ? (maskp)->bits : NULL
/*
* The inline keyword gives the compiler room to decide to inline, or
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index a726f96010d5..496ff759f84c 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -40,6 +40,8 @@
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
+ * @dev_loss_tmo: maximum delay for reconnects to an association on
+ * this device. Used only on a remoteport.
*
* Initialization values for dynamic port fields:
* @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must
@@ -50,6 +52,7 @@ struct nvme_fc_port_info {
u64 port_name;
u32 port_role;
u32 port_id;
+ u32 dev_loss_tmo;
};
@@ -102,8 +105,6 @@ enum nvmefc_fcp_datadir {
};
-#define NVME_FC_MAX_SEGMENTS 256
-
/**
* struct nvmefc_fcp_req - Request structure passed from NVME-FC transport
* to LLDD in order to perform a NVME FCP IO operation.
@@ -202,6 +203,9 @@ enum nvme_fc_obj_state {
* The length of the buffer corresponds to the local_priv_sz
* value specified in the nvme_fc_port_template supplied by
* the LLDD.
+ * @dev_loss_tmo: maximum delay for reconnects to an association on
+ * this device. To modify, lldd must call
+ * nvme_fc_set_remoteport_devloss().
*
* Fields with dynamic values. Values may change base on link state. LLDD
* may reference fields directly to change them. Initialized by the
@@ -259,10 +263,9 @@ struct nvme_fc_remote_port {
u32 port_role;
u64 node_name;
u64 port_name;
-
struct nvme_fc_local_port *localport;
-
void *private;
+ u32 dev_loss_tmo;
/* dynamic fields */
u32 port_id;
@@ -446,6 +449,10 @@ int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport);
+void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport);
+
+int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport,
+ u32 dev_loss_tmo);
/*
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 9310ce77d8e1..aea87f0d917b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -90,6 +90,14 @@ enum {
};
#define NVME_AQ_DEPTH 32
+#define NVME_NR_AEN_COMMANDS 1
+#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
+
+/*
+ * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
+ * NVM-Express 1.2 specification, section 4.1.2.
+ */
+#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
enum {
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
@@ -267,6 +275,7 @@ enum {
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
+ NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
};
struct nvme_lbaf {
@@ -396,6 +405,21 @@ struct nvme_fw_slot_info_log {
};
enum {
+ NVME_CMD_EFFECTS_CSUPP = 1 << 0,
+ NVME_CMD_EFFECTS_LBCC = 1 << 1,
+ NVME_CMD_EFFECTS_NCC = 1 << 2,
+ NVME_CMD_EFFECTS_NIC = 1 << 3,
+ NVME_CMD_EFFECTS_CCC = 1 << 4,
+ NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+};
+
+struct nvme_effects_log {
+ __le32 acs[256];
+ __le32 iocs[256];
+ __u8 resv[2048];
+};
+
+enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
NVME_SMART_CRIT_RELIABILITY = 1 << 2,
@@ -404,6 +428,10 @@ enum {
};
enum {
+ NVME_AER_ERROR = 0,
+ NVME_AER_SMART = 1,
+ NVME_AER_CSS = 6,
+ NVME_AER_VS = 7,
NVME_AER_NOTICE_NS_CHANGED = 0x0002,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x0102,
};
@@ -681,6 +709,7 @@ enum nvme_admin_opcode {
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
nvme_admin_security_recv = 0x82,
+ nvme_admin_sanitize_nvm = 0x84,
};
enum {
@@ -712,6 +741,7 @@ enum {
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
+ NVME_LOG_CMD_EFFECTS = 0x05,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
diff --git a/include/linux/of.h b/include/linux/of.h
index b32d418d011a..d3dea1d1e3a9 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -37,9 +37,15 @@ struct property {
int length;
void *value;
struct property *next;
+#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
unsigned long _flags;
+#endif
+#if defined(CONFIG_OF_PROMTREE)
unsigned int unique_id;
+#endif
+#if defined(CONFIG_OF_KOBJ)
struct bin_attribute attr;
+#endif
};
#if defined(CONFIG_SPARC)
@@ -58,7 +64,9 @@ struct device_node {
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
+#if defined(CONFIG_OF_KOBJ)
struct kobject kobj;
+#endif
unsigned long _flags;
void *data;
#if defined(CONFIG_SPARC)
@@ -103,21 +111,17 @@ extern struct kobj_type of_node_ktype;
extern const struct fwnode_operations of_fwnode_ops;
static inline void of_node_init(struct device_node *node)
{
+#if defined(CONFIG_OF_KOBJ)
kobject_init(&node->kobj, &of_node_ktype);
+#endif
node->fwnode.ops = &of_fwnode_ops;
}
-/* true when node is initialized */
-static inline int of_node_is_initialized(struct device_node *node)
-{
- return node && node->kobj.state_initialized;
-}
-
-/* true when node is attached (i.e. present on sysfs) */
-static inline int of_node_is_attached(struct device_node *node)
-{
- return node && node->kobj.state_in_sysfs;
-}
+#if defined(CONFIG_OF_KOBJ)
+#define of_node_kobj(n) (&(n)->kobj)
+#else
+#define of_node_kobj(n) NULL
+#endif
#ifdef CONFIG_OF_DYNAMIC
extern struct device_node *of_node_get(struct device_node *node);
@@ -203,6 +207,7 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
clear_bit(flag, &n->_flags);
}
+#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
static inline int of_property_check_flag(struct property *p, unsigned long flag)
{
return test_bit(flag, &p->_flags);
@@ -217,6 +222,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
clear_bit(flag, &p->_flags);
}
+#endif
extern struct device_node *__of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_find_all_nodes(struct device_node *prev);
@@ -1321,9 +1327,6 @@ static inline int of_reconfig_get_state_change(unsigned long action,
}
#endif /* CONFIG_OF_DYNAMIC */
-/* CONFIG_OF_RESOLVE api */
-extern int of_resolve_phandles(struct device_node *tree);
-
/**
* of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
* @np: Pointer to the given device_node
@@ -1340,7 +1343,7 @@ static inline bool of_device_is_system_power_controller(const struct device_node
*/
enum of_overlay_notify_action {
- OF_OVERLAY_PRE_APPLY,
+ OF_OVERLAY_PRE_APPLY = 0,
OF_OVERLAY_POST_APPLY,
OF_OVERLAY_PRE_REMOVE,
OF_OVERLAY_POST_REMOVE,
@@ -1354,26 +1357,26 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
/* ID based overlays; the API for external users */
-int of_overlay_create(struct device_node *tree);
-int of_overlay_destroy(int id);
-int of_overlay_destroy_all(void);
+int of_overlay_apply(struct device_node *tree, int *ovcs_id);
+int of_overlay_remove(int *ovcs_id);
+int of_overlay_remove_all(void);
int of_overlay_notifier_register(struct notifier_block *nb);
int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_create(struct device_node *tree)
+static inline int of_overlay_apply(struct device_node *tree, int *ovcs_id)
{
return -ENOTSUPP;
}
-static inline int of_overlay_destroy(int id)
+static inline int of_overlay_remove(int *ovcs_id)
{
return -ENOTSUPP;
}
-static inline int of_overlay_destroy_all(void)
+static inline int of_overlay_remove_all(void)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index b8ac44c9748e..30e40fb6936b 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -50,6 +50,8 @@ extern const __be32 *of_get_address(struct device_node *dev, int index,
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
+extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node);
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
@@ -86,7 +88,13 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
- return -1;
+ return -ENOSYS;
+}
+
+static inline int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return -ENOSYS;
}
static inline struct of_pci_range *of_pci_range_parser_one(
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index ca10f43564de..1fe205582111 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -31,7 +31,7 @@ enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_SLEEP_MAY_LOOSE_VALUE = 0x8,
+ OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 2f9c1f93b1ce..5d13d25da2c8 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -37,6 +37,7 @@
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
* @cb_cpu: Callback cpu for serializatioon.
+ * @cpu: Cpu for parallelization.
* @seq_nr: Sequence number of the parallelized data object.
* @info: Used to pass information from the parallel to the serial function.
* @parallel: Parallel execution function.
@@ -46,6 +47,7 @@ struct padata_priv {
struct list_head list;
struct parallel_data *pd;
int cb_cpu;
+ int cpu;
int info;
void (*parallel)(struct padata_priv *padata);
void (*serial)(struct padata_priv *padata);
@@ -85,6 +87,7 @@ struct padata_serial_queue {
* @swork: work struct for serialization.
* @pd: Backpointer to the internal control structure.
* @work: work struct for parallelization.
+ * @reorder_work: work struct for reordering.
* @num_obj: Number of objects that are processed by this cpu.
* @cpu_index: Index of the cpu.
*/
@@ -93,6 +96,7 @@ struct padata_parallel_queue {
struct padata_list reorder;
struct parallel_data *pd;
struct work_struct work;
+ struct work_struct reorder_work;
atomic_t num_obj;
int cpu_index;
};
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 584b14c774c1..3ec44e27aa9d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -18,7 +18,7 @@
* Various page->flags bits:
*
* PG_reserved is set for special pages, which can never be swapped out. Some
- * of them might not even exist (eg empty_bad_page)...
+ * of them might not even exist...
*
* The PG_private bitflag is set on pagecache pages if they contain filesystem
* specific data (which is normally at page->private). It can be used by
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 05a04e603686..cdad58bbfd8b 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -31,7 +31,7 @@ static inline bool is_migrate_isolate(int migratetype)
#endif
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
- bool skip_hwpoisoned_pages);
+ int migratetype, bool skip_hwpoisoned_pages);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e08b5339023c..34ce3ebf97d5 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -16,6 +16,8 @@
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
+struct pagevec;
+
/*
* Bits in mapping->flags.
*/
@@ -116,7 +118,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
-void release_pages(struct page **pages, int nr, bool cold);
+void release_pages(struct page **pages, int nr);
/*
* speculatively take a reference to a page.
@@ -232,15 +234,9 @@ static inline struct page *page_cache_alloc(struct address_space *x)
return __page_cache_alloc(mapping_gfp_mask(x));
}
-static inline struct page *page_cache_alloc_cold(struct address_space *x)
-{
- return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
-}
-
static inline gfp_t readahead_gfp_mask(struct address_space *x)
{
- return mapping_gfp_mask(x) |
- __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
+ return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
}
typedef int filler_t(void *, struct page *);
@@ -366,8 +362,16 @@ static inline unsigned find_get_pages(struct address_space *mapping,
}
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
-unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
- int tag, unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
+ pgoff_t end, int tag, unsigned int nr_pages,
+ struct page **pages);
+static inline unsigned find_get_pages_tag(struct address_space *mapping,
+ pgoff_t *index, int tag, unsigned int nr_pages,
+ struct page **pages)
+{
+ return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
+ nr_pages, pages);
+}
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
@@ -616,6 +620,8 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow);
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
+void delete_from_page_cache_batch(struct address_space *mapping,
+ struct pagevec *pvec);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 2636c0c0f279..5fb6580f7f23 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -17,7 +17,7 @@ struct address_space;
struct pagevec {
unsigned long nr;
- unsigned long cold;
+ bool percpu_pvec_drained;
struct page *pages[PAGEVEC_SIZE];
};
@@ -38,14 +38,22 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec,
return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
}
-unsigned pagevec_lookup_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, int tag,
- unsigned nr_pages);
+unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
+ struct address_space *mapping, pgoff_t *index, pgoff_t end,
+ int tag);
+unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
+ struct address_space *mapping, pgoff_t *index, pgoff_t end,
+ int tag, unsigned max_pages);
+static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
+ struct address_space *mapping, pgoff_t *index, int tag)
+{
+ return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
+}
-static inline void pagevec_init(struct pagevec *pvec, int cold)
+static inline void pagevec_init(struct pagevec *pvec)
{
pvec->nr = 0;
- pvec->cold = cold;
+ pvec->percpu_pvec_drained = false;
}
static inline void pagevec_reinit(struct pagevec *pvec)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d16a7c037ec0..96c94980d1ff 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -206,13 +206,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
/* Do not use FLR even if device advertises PCI_AF_CAP */
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
- /*
- * Resume before calling the driver's system suspend hooks, disabling
- * the direct_complete optimization.
- */
- PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
/* Don't use Relaxed Ordering for TLPs directed at this device */
- PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
};
enum pci_irq_reroute_variant {
@@ -597,6 +592,10 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
}
+#define for_each_pci_bridge(dev, bus) \
+ list_for_each_entry(dev, &bus->devices, bus_list) \
+ if (!pci_is_bridge(dev)) {} else
+
static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
{
dev = pci_physfn(dev);
@@ -1090,7 +1089,6 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
void pcie_flr(struct pci_dev *dev);
-int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
int pci_reset_function_locked(struct pci_dev *dev);
@@ -1107,6 +1105,8 @@ void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+void pci_release_resource(struct pci_dev *dev, int resno);
+int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
@@ -1186,6 +1186,7 @@ void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_assign_irq(struct pci_dev *dev);
@@ -1959,8 +1960,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
-int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
-void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
+int pci_iov_add_virtfn(struct pci_dev *dev, int id);
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
@@ -1977,12 +1978,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
-static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
+static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
}
static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
- int id, int reset) { }
+ int id) { }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
static inline int pci_vfs_assigned(struct pci_dev *dev)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8f16299ca068..2d2096ba1cfe 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -173,6 +173,21 @@
DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
/*
+ * Declaration/definition used for per-CPU variables that should be accessed
+ * as decrypted when memory encryption is enabled in the guest.
+ */
+#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT)
+
+#define DECLARE_PER_CPU_DECRYPTED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
+
+#define DEFINE_PER_CPU_DECRYPTED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "..decrypted")
+#else
+#define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name)
+#endif
+
+/*
* Intermodule exports for per-CPU variables. sparse forgets about
* address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
* noop if __CHECKER__.
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 79b18a20cf5d..874b71a70058 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -485,9 +485,9 @@ struct perf_addr_filters_head {
};
/**
- * enum perf_event_active_state - the states of a event
+ * enum perf_event_state - the states of a event
*/
-enum perf_event_active_state {
+enum perf_event_state {
PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
@@ -578,7 +578,7 @@ struct perf_event {
struct pmu *pmu;
void *pmu_private;
- enum perf_event_active_state state;
+ enum perf_event_state state;
unsigned int attach_state;
local64_t count;
atomic64_t child_count;
@@ -588,26 +588,10 @@ struct perf_event {
* has been enabled (i.e. eligible to run, and the task has
* been scheduled in, if this is a per-task event)
* and running (scheduled onto the CPU), respectively.
- *
- * They are computed from tstamp_enabled, tstamp_running and
- * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
*/
u64 total_time_enabled;
u64 total_time_running;
-
- /*
- * These are timestamps used for computing total_time_enabled
- * and total_time_running when the event is in INACTIVE or
- * ACTIVE state, measured in nanoseconds from an arbitrary point
- * in time.
- * tstamp_enabled: the notional time when the event was enabled
- * tstamp_running: the notional time when the event was scheduled on
- * tstamp_stopped: in INACTIVE state, the notional time when the
- * event was scheduled off.
- */
- u64 tstamp_enabled;
- u64 tstamp_running;
- u64 tstamp_stopped;
+ u64 tstamp;
/*
* timestamp shadows the actual context timing but it can
@@ -699,7 +683,6 @@ struct perf_event {
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp; /* cgroup event is attach to */
- int cgrp_defer_enabled;
#endif
struct list_head sb_list;
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 35c070ea6ea3..0a2c18a9771d 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -31,10 +31,7 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
*/
void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
-int ufs_qcom_phy_start_serdes(struct phy *phy);
int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
-int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
-int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
void ufs_qcom_phy_save_controller_version(struct phy *phy,
u8 major, u16 minor, u16 step);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e694d4008c4a..4f8423a948d5 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -29,6 +29,8 @@ enum phy_mode {
PHY_MODE_USB_OTG,
PHY_MODE_SGMII,
PHY_MODE_10GKR,
+ PHY_MODE_UFS_HS_A,
+ PHY_MODE_UFS_HS_B,
};
/**
@@ -39,6 +41,7 @@ enum phy_mode {
* @power_off: powering off the phy
* @set_mode: set the mode of the phy
* @reset: resetting the phy
+ * @calibrate: calibrate the phy
* @owner: the module owner containing the ops
*/
struct phy_ops {
@@ -48,6 +51,7 @@ struct phy_ops {
int (*power_off)(struct phy *phy);
int (*set_mode)(struct phy *phy, enum phy_mode mode);
int (*reset)(struct phy *phy);
+ int (*calibrate)(struct phy *phy);
struct module *owner;
};
@@ -141,6 +145,7 @@ int phy_power_on(struct phy *phy);
int phy_power_off(struct phy *phy);
int phy_set_mode(struct phy *phy, enum phy_mode mode);
int phy_reset(struct phy *phy);
+int phy_calibrate(struct phy *phy);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
@@ -262,6 +267,13 @@ static inline int phy_reset(struct phy *phy)
return -ENOSYS;
}
+static inline int phy_calibrate(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_get_bus_width(struct phy *phy)
{
return -ENOSYS;
@@ -291,7 +303,7 @@ static inline struct phy *devm_phy_get(struct device *dev, const char *string)
static inline struct phy *devm_phy_optional_get(struct device *dev,
const char *string)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct phy *devm_of_phy_get(struct device *dev,
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index a0f2aba72fa9..0412cc9833e9 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -25,8 +25,8 @@ struct device;
#ifdef CONFIG_PINCTRL
/* External interface to pin control */
-extern int pinctrl_request_gpio(unsigned gpio);
-extern void pinctrl_free_gpio(unsigned gpio);
+extern int pinctrl_gpio_request(unsigned gpio);
+extern void pinctrl_gpio_free(unsigned gpio);
extern int pinctrl_gpio_direction_input(unsigned gpio);
extern int pinctrl_gpio_direction_output(unsigned gpio);
extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
@@ -62,12 +62,12 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#else /* !CONFIG_PINCTRL */
-static inline int pinctrl_request_gpio(unsigned gpio)
+static inline int pinctrl_gpio_request(unsigned gpio)
{
return 0;
}
-static inline void pinctrl_free_gpio(unsigned gpio)
+static inline void pinctrl_gpio_free(unsigned gpio)
{
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 5d8bc7f21c2a..ec6dadcc1fde 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -90,6 +90,10 @@
* @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
* this parameter (on a custom format) tells the driver which alternative
* slew rate to use.
+ * @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs)
+ * or latch delay (on outputs) this parameter (in a custom format)
+ * specifies the clock skew or latch delay. It typically controls how
+ * many double inverters are put in front of the line.
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -117,6 +121,7 @@ enum pin_config_param {
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
+ PIN_CONFIG_SKEW_DELAY,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
diff --git a/include/linux/platform_data/i2c-nuc900.h b/include/linux/platform_data/i2c-nuc900.h
deleted file mode 100644
index 23036273a97e..000000000000
--- a/include/linux/platform_data/i2c-nuc900.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_NUC900_I2C_H
-#define __ASM_ARCH_NUC900_I2C_H
-
-struct nuc900_platform_i2c {
- int bus_num;
- unsigned long bus_freq;
-};
-
-#endif /* __ASM_ARCH_NUC900_I2C_H */
diff --git a/include/linux/platform_data/media/gpio-ir-recv.h b/include/linux/platform_data/media/gpio-ir-recv.h
deleted file mode 100644
index 0c298f569d5a..000000000000
--- a/include/linux/platform_data/media/gpio-ir-recv.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GPIO_IR_RECV_H__
-#define __GPIO_IR_RECV_H__
-
-struct gpio_ir_recv_platform_data {
- int gpio_nr;
- bool active_low;
- u64 allowed_protos;
- const char *map_name;
-};
-
-#endif /* __GPIO_IR_RECV_H__ */
diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h
deleted file mode 100644
index 12289c1e9413..000000000000
--- a/include/linux/platform_data/sht15.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * sht15.h - support for the SHT15 Temperature and Humidity Sensor
- *
- * Copyright (c) 2009 Jonathan Cameron
- *
- * Copyright (c) 2007 Wouter Horre
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * For further information, see the Documentation/hwmon/sht15 file.
- */
-
-#ifndef _PDATA_SHT15_H
-#define _PDATA_SHT15_H
-
-/**
- * struct sht15_platform_data - sht15 connectivity info
- * @gpio_data: no. of gpio to which bidirectional data line is
- * connected.
- * @gpio_sck: no. of gpio to which the data clock is connected.
- * @supply_mv: supply voltage in mv. Overridden by regulator if
- * available.
- * @checksum: flag to indicate the checksum should be validated.
- * @no_otp_reload: flag to indicate no reload from OTP.
- * @low_resolution: flag to indicate the temp/humidity resolution to use.
- */
-struct sht15_platform_data {
- int gpio_data;
- int gpio_sck;
- int supply_mv;
- bool checksum;
- bool no_otp_reload;
- bool low_resolution;
-};
-
-#endif /* _PDATA_SHT15_H */
diff --git a/include/linux/platform_data/st1232_pdata.h b/include/linux/platform_data/st1232_pdata.h
deleted file mode 100644
index 1dcd23bee24e..000000000000
--- a/include/linux/platform_data/st1232_pdata.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_ST1232_PDATA_H
-#define _LINUX_ST1232_PDATA_H
-
-/*
- * Optional platform data
- *
- * Use this if you want the driver to drive the reset pin.
- */
-struct st1232_pdata {
- int reset_gpio;
-};
-
-#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 47ded8aa8a5d..65d39115f06d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -550,6 +550,33 @@ struct pm_subsys_data {
#endif
};
+/*
+ * Driver flags to control system suspend/resume behavior.
+ *
+ * These flags can be set by device drivers at the probe time. They need not be
+ * cleared by the drivers as the driver core will take care of that.
+ *
+ * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
+ * SMART_PREPARE: Check the return value of the driver's ->prepare callback.
+ * SMART_SUSPEND: No need to resume the device from runtime suspend.
+ *
+ * Setting SMART_PREPARE instructs bus types and PM domains which may want
+ * system suspend/resume callbacks to be skipped for the device to return 0 from
+ * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in
+ * other words, the system suspend/resume callbacks can only be skipped for the
+ * device if its driver doesn't object against that). This flag has no effect
+ * if NEVER_SKIP is set.
+ *
+ * Setting SMART_SUSPEND instructs bus types and PM domains which may want to
+ * runtime resume the device upfront during system suspend that doing so is not
+ * necessary from the driver's perspective. It also may cause them to skip
+ * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
+ * the driver if they decide to leave the device in runtime suspend.
+ */
+#define DPM_FLAG_NEVER_SKIP BIT(0)
+#define DPM_FLAG_SMART_PREPARE BIT(1)
+#define DPM_FLAG_SMART_SUSPEND BIT(2)
+
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
@@ -561,6 +588,7 @@ struct dev_pm_info {
bool is_late_suspended:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
+ u32 driver_flags;
spinlock_t lock;
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
@@ -736,7 +764,8 @@ extern int pm_generic_poweroff_noirq(struct device *dev);
extern int pm_generic_poweroff_late(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
-extern void pm_complete_with_resume_check(struct device *dev);
+
+extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 84f423d5633e..04dbef9847d3 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -18,9 +18,10 @@
#include <linux/spinlock.h>
/* Defines used for the flags field in the struct generic_pm_domain */
-#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
-#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
-#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
+#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
+#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
+#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
+#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) /* Keep devices active if wakeup */
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -35,7 +36,6 @@ struct dev_power_governor {
struct gpd_dev_ops {
int (*start)(struct device *dev);
int (*stop)(struct device *dev);
- bool (*active_wakeup)(struct device *dev);
};
struct genpd_power_state {
@@ -64,8 +64,11 @@ struct generic_pm_domain {
unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
+ unsigned int performance_state; /* Aggregated max performance state */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ int (*set_performance_state)(struct generic_pm_domain *genpd,
+ unsigned int state);
struct gpd_dev_ops dev_ops;
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
@@ -121,6 +124,7 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
+ unsigned int performance_state;
void *data;
};
@@ -148,6 +152,8 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
extern int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
extern int pm_genpd_remove(struct generic_pm_domain *genpd);
+extern int dev_pm_genpd_set_performance_state(struct device *dev,
+ unsigned int state);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -188,6 +194,12 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
return -ENOTSUPP;
}
+static inline int dev_pm_genpd_set_performance_state(struct device *dev,
+ unsigned int state)
+{
+ return -ENOTSUPP;
+}
+
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 51ec727b4824..6c2d2e88f066 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -124,7 +124,9 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table);
+void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev, int (*get_pstate)(struct device *dev, unsigned long rate));
+void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -243,7 +245,15 @@ static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device
return ERR_PTR(-ENOTSUPP);
}
-static inline void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table) {}
+static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
+
+static inline struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
+ int (*get_pstate)(struct device *dev, unsigned long rate))
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table) {}
static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 2a3acf4dba9a..6ea1ae373d77 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -28,19 +28,21 @@ enum pm_qos_flags_status {
PM_QOS_FLAGS_ALL,
};
-#define PM_QOS_DEFAULT_VALUE -1
+#define PM_QOS_DEFAULT_VALUE (-1)
+#define PM_QOS_LATENCY_ANY S32_MAX
+#define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC)
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
-#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
+#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY
+#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
+#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
-#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
-#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
struct pm_qos_request {
struct plist_node node;
@@ -175,7 +177,8 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
+ pm_qos_read_value(&dev->power.qos->resume_latency);
}
#else
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
@@ -185,9 +188,9 @@ static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
s32 mask)
{ return PM_QOS_FLAGS_UNDEFINED; }
static inline s32 __dev_pm_qos_read_value(struct device *dev)
- { return 0; }
+ { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
static inline s32 dev_pm_qos_read_value(struct device *dev)
- { return 0; }
+ { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
static inline int dev_pm_qos_add_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type,
@@ -233,9 +236,15 @@ static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
{ return 0; }
static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
-static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
+{
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
-static inline s32 dev_pm_qos_raw_read_value(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+{
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
#endif
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 2efb08a60e63..f0fc4700b6ff 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- ACCESS_ONCE(dev->power.last_busy) = jiffies;
+ WRITE_ONCE(dev->power.last_busy, jiffies);
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 335926039adc..905bba92f015 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -189,7 +189,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
-extern int kptr_restrict;
extern int
devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
@@ -280,6 +279,8 @@ static inline void printk_safe_flush_on_panic(void)
}
#endif
+extern int kptr_restrict;
+
extern asmlinkage void dump_stack(void) __cold;
#ifndef pr_fmt
diff --git a/include/linux/property.h b/include/linux/property.h
index 6bebee13c5e0..f6189a3ac63c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -100,7 +100,7 @@ struct fwnode_handle *fwnode_get_named_child_node(
struct fwnode_handle *device_get_named_child_node(struct device *dev,
const char *childname);
-void fwnode_handle_get(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
unsigned int device_get_child_node_count(struct device *dev);
@@ -293,6 +293,10 @@ struct fwnode_handle *
fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
u32 endpoint);
+#define fwnode_graph_for_each_endpoint(fwnode, child) \
+ for (child = NULL; \
+ (child = fwnode_graph_get_next_endpoint(fwnode, child)); )
+
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 567ebb5eaab0..0ca448c1cb42 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -301,18 +301,17 @@ void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
unsigned long index);
-typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
+typedef void (*radix_tree_update_node_t)(struct radix_tree_node *);
void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
void __rcu **slot, void *entry,
- radix_tree_update_node_t update_node, void *private);
+ radix_tree_update_node_t update_node);
void radix_tree_iter_replace(struct radix_tree_root *,
const struct radix_tree_iter *, void __rcu **slot, void *entry);
void radix_tree_replace_slot(struct radix_tree_root *,
void __rcu **slot, void *entry);
void __radix_tree_delete_node(struct radix_tree_root *,
struct radix_tree_node *,
- radix_tree_update_node_t update_node,
- void *private);
+ radix_tree_update_node_t update_node);
void radix_tree_iter_delete(struct radix_tree_root *,
struct radix_tree_iter *iter, void __rcu **slot);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index c2cdd45a880a..127f534fec94 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -275,7 +275,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
- container_of(lockless_dereference(ptr), type, member)
+ container_of(READ_ONCE(ptr), type, member)
/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -368,7 +368,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* example is when items are added to the list, but never deleted.
*/
#define list_entry_lockless(ptr, type, member) \
- container_of((typeof(ptr))lockless_dereference(ptr), type, member)
+ container_of((typeof(ptr))READ_ONCE(ptr), type, member)
/**
* list_for_each_entry_lockless - iterate over rcu list of given type
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1a9f70d44af9..a6ddc42f87a5 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -346,7 +346,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define __rcu_dereference_check(p, c, space) \
({ \
/* Dependency order vs. p above. */ \
- typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
+ typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(________p1)); \
@@ -360,7 +360,7 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_dereference_raw(p) \
({ \
/* Dependency order vs. p above. */ \
- typeof(p) ________p1 = lockless_dereference(p); \
+ typeof(p) ________p1 = READ_ONCE(p); \
((typeof(*p) __force __kernel *)(________p1)); \
})
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 978abfbac617..15eddc1353ba 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -120,21 +120,65 @@ struct reg_sequence {
*/
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int __ret; \
+ might_sleep_if(__sleep_us); \
+ for (;;) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ if (__ret) \
+ break; \
+ if (cond) \
+ break; \
+ if ((__timeout_us) && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ break; \
+ } \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ } \
+ __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
+/**
+ * regmap_field_read_poll_timeout - Poll until a condition is met or timeout
+ *
+ * @field: Regmap field to read from
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val. Must not be called
+ * from atomic context if sleep_us or timeout_us are used.
+ *
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ */
+#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __sleep_us = (sleep_us); \
+ ktime_t timeout = ktime_add_us(ktime_get(), __timeout_us); \
int pollret; \
- might_sleep_if(sleep_us); \
+ might_sleep_if(__sleep_us); \
for (;;) { \
- pollret = regmap_read((map), (addr), &(val)); \
+ pollret = regmap_field_read((field), &(val)); \
if (pollret) \
break; \
if (cond) \
break; \
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
- pollret = regmap_read((map), (addr), &(val)); \
+ if (__timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ pollret = regmap_field_read((field), &(val)); \
break; \
} \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
} \
pollret ?: ((cond) ? 0 : -ETIMEDOUT); \
})
@@ -273,6 +317,9 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
+ * @hwlock_id: Specify the hardware spinlock id.
+ * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
+ * HWLOCK_IRQ or 0.
*/
struct regmap_config {
const char *name;
@@ -317,6 +364,9 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
+
+ unsigned int hwlock_id;
+ unsigned int hwlock_mode;
};
/**
diff --git a/include/linux/regset.h b/include/linux/regset.h
index 8e0c9febf495..494cedaafdf2 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -107,6 +107,28 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
int immediate);
/**
+ * user_regset_get_size_fn - type of @get_size function in &struct user_regset
+ * @target: thread being examined
+ * @regset: regset being examined
+ *
+ * This call is optional; usually the pointer is %NULL.
+ *
+ * When provided, this function must return the current size of regset
+ * data, as observed by the @get function in &struct user_regset. The
+ * value returned must be a multiple of @size. The returned size is
+ * required to be valid only until the next time (if any) @regset is
+ * modified for @target.
+ *
+ * This function is intended for dynamically sized regsets. A regset
+ * that is statically sized does not need to implement it.
+ *
+ * This function should not be called directly: instead, callers should
+ * call regset_size() to determine the current size of a regset.
+ */
+typedef unsigned int user_regset_get_size_fn(struct task_struct *target,
+ const struct user_regset *regset);
+
+/**
* struct user_regset - accessible thread CPU state
* @n: Number of slots (registers).
* @size: Size in bytes of a slot (register).
@@ -117,19 +139,33 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
* @set: Function to store values.
* @active: Function to report if regset is active, or %NULL.
* @writeback: Function to write data back to user memory, or %NULL.
+ * @get_size: Function to return the regset's size, or %NULL.
*
* This data structure describes a machine resource we call a register set.
* This is part of the state of an individual thread, not necessarily
* actual CPU registers per se. A register set consists of a number of
* similar slots, given by @n. Each slot is @size bytes, and aligned to
- * @align bytes (which is at least @size).
+ * @align bytes (which is at least @size). For dynamically-sized
+ * regsets, @n must contain the maximum possible number of slots for the
+ * regset, and @get_size must point to a function that returns the
+ * current regset size.
*
- * These functions must be called only on the current thread or on a
- * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are
- * guaranteed will not be woken up and return to user mode, and that we
- * have called wait_task_inactive() on. (The target thread always might
- * wake up for SIGKILL while these functions are working, in which case
- * that thread's user_regset state might be scrambled.)
+ * Callers that need to know only the current size of the regset and do
+ * not care about its internal structure should call regset_size()
+ * instead of inspecting @n or calling @get_size.
+ *
+ * For backward compatibility, the @get and @set methods must pad to, or
+ * accept, @n * @size bytes, even if the current regset size is smaller.
+ * The precise semantics of these operations depend on the regset being
+ * accessed.
+ *
+ * The functions to which &struct user_regset members point must be
+ * called only on the current thread or on a thread that is in
+ * %TASK_STOPPED or %TASK_TRACED state, that we are guaranteed will not
+ * be woken up and return to user mode, and that we have called
+ * wait_task_inactive() on. (The target thread always might wake up for
+ * SIGKILL while these functions are working, in which case that
+ * thread's user_regset state might be scrambled.)
*
* The @pos argument must be aligned according to @align; the @count
* argument must be a multiple of @size. These functions are not
@@ -156,6 +192,7 @@ struct user_regset {
user_regset_set_fn *set;
user_regset_active_fn *active;
user_regset_writeback_fn *writeback;
+ user_regset_get_size_fn *get_size;
unsigned int n;
unsigned int size;
unsigned int align;
@@ -371,5 +408,21 @@ static inline int copy_regset_from_user(struct task_struct *target,
return regset->set(target, regset, offset, size, NULL, data);
}
+/**
+ * regset_size - determine the current size of a regset
+ * @target: thread to be examined
+ * @regset: regset to be examined
+ *
+ * Note that the returned size is valid only until the next time
+ * (if any) @regset is modified for @target.
+ */
+static inline unsigned int regset_size(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ if (!regset->get_size)
+ return regset->n * regset->size;
+ else
+ return regset->get_size(target, regset);
+}
#endif /* <linux/regset.h> */
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 80cb40b7c88d..f2fd2d3bf58f 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,6 +1,6 @@
/*
* da9211.h - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
@@ -25,8 +25,11 @@ enum da9211_chip_id {
DA9211,
DA9212,
DA9213,
+ DA9223,
DA9214,
+ DA9224,
DA9215,
+ DA9225,
};
struct da9211_pdata {
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index fa6ace66fea5..289e4d54e3e0 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -2,7 +2,6 @@
#ifndef _LINUX_RING_BUFFER_H
#define _LINUX_RING_BUFFER_H
-#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
@@ -14,9 +13,7 @@ struct ring_buffer_iter;
* Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
- kmemcheck_bitfield_begin(bitfield);
u32 type_len:5, time_delta:27;
- kmemcheck_bitfield_end(bitfield);
u32 array[];
};
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index d090d466cd0b..2032ce2eb20b 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -70,7 +70,7 @@ static inline bool lockdep_rtnl_is_held(void)
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * both the smp_read_barrier_depends() and the READ_ONCE(), because
* caller holds RTNL.
*/
#define rtnl_dereference(p) \
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index bc2994ed66e1..3dcd617e65ae 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -38,6 +38,15 @@ do { \
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
+
+#ifndef arch_read_lock_flags
+# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#endif
+
+#ifndef arch_write_lock_flags
+# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+#endif
+
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_lock_flags(lock, flags) \
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
@@ -50,9 +59,6 @@ do { \
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
-#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
-
/*
* Define the various rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 5b9b84b20407..86ebb4bf9c6e 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -211,7 +211,7 @@ static inline void __raw_write_lock(rwlock_t *lock)
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
-#endif /* CONFIG_PREEMPT */
+#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
{
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index dfa34d803439..56707d5ff6ad 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -112,6 +112,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
+extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index a1904aadbc45..0dcc60e820de 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -211,10 +211,14 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb);
*/
bool sbitmap_any_bit_clear(const struct sbitmap *sb);
+#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
+#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
+
typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
/**
- * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * @start: Where to start the iteration.
* @sb: Bitmap to iterate over.
* @fn: Callback. Should return true to continue or false to break early.
* @data: Pointer to pass to callback.
@@ -222,35 +226,61 @@ typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
* This is inline even though it's non-trivial so that the function calls to the
* callback will hopefully get optimized away.
*/
-static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
- void *data)
+static inline void __sbitmap_for_each_set(struct sbitmap *sb,
+ unsigned int start,
+ sb_for_each_fn fn, void *data)
{
- unsigned int i;
+ unsigned int index;
+ unsigned int nr;
+ unsigned int scanned = 0;
- for (i = 0; i < sb->map_nr; i++) {
- struct sbitmap_word *word = &sb->map[i];
- unsigned int off, nr;
+ if (start >= sb->depth)
+ start = 0;
+ index = SB_NR_TO_INDEX(sb, start);
+ nr = SB_NR_TO_BIT(sb, start);
- if (!word->word)
- continue;
+ while (scanned < sb->depth) {
+ struct sbitmap_word *word = &sb->map[index];
+ unsigned int depth = min_t(unsigned int, word->depth - nr,
+ sb->depth - scanned);
- nr = 0;
- off = i << sb->shift;
+ scanned += depth;
+ if (!word->word)
+ goto next;
+
+ /*
+ * On the first iteration of the outer loop, we need to add the
+ * bit offset back to the size of the word for find_next_bit().
+ * On all other iterations, nr is zero, so this is a noop.
+ */
+ depth += nr;
while (1) {
- nr = find_next_bit(&word->word, word->depth, nr);
- if (nr >= word->depth)
+ nr = find_next_bit(&word->word, depth, nr);
+ if (nr >= depth)
break;
-
- if (!fn(sb, off + nr, data))
+ if (!fn(sb, (index << sb->shift) + nr, data))
return;
nr++;
}
+next:
+ nr = 0;
+ if (++index >= sb->map_nr)
+ index = 0;
}
}
-#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
-#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
+/**
+ * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
+ * @sb: Bitmap to iterate over.
+ * @fn: Callback. Should return true to continue or false to break early.
+ * @data: Pointer to pass to callback.
+ */
+static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
+ void *data)
+{
+ __sbitmap_for_each_set(sb, 0, fn, data);
+}
static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
unsigned int bitnr)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index d87dfa41142d..b7c83254c566 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -22,6 +22,12 @@ struct scatterlist {
};
/*
+ * Since the above length field is an unsigned int, below we define the maximum
+ * length in bytes that can be stored in one scatterlist entry.
+ */
+#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
+
+/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
@@ -262,10 +268,13 @@ void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
struct scatterlist *, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
-int sg_alloc_table_from_pages(struct sg_table *sgt,
- struct page **pages, unsigned int n_pages,
- unsigned long offset, unsigned long size,
- gfp_t gfp_mask);
+int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ gfp_t gfp_mask);
+int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, gfp_t gfp_mask);
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fdf74f27acf1..a5dc7c98b0a2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -166,8 +166,6 @@ struct task_group;
/* Task command name length: */
#define TASK_COMM_LEN 16
-extern cpumask_var_t cpu_isolated_map;
-
extern void scheduler_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
@@ -332,9 +330,11 @@ struct load_weight {
struct sched_avg {
u64 last_update_time;
u64 load_sum;
+ u64 runnable_load_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
+ unsigned long runnable_load_avg;
unsigned long util_avg;
};
@@ -377,6 +377,7 @@ struct sched_statistics {
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
+ unsigned long runnable_weight;
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
@@ -472,10 +473,10 @@ struct sched_dl_entity {
* conditions between the inactive timer handler and the wakeup
* code.
*/
- int dl_throttled;
- int dl_boosted;
- int dl_yielded;
- int dl_non_contending;
+ int dl_throttled : 1;
+ int dl_boosted : 1;
+ int dl_yielded : 1;
+ int dl_non_contending : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -1246,7 +1247,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
-static inline unsigned int __get_task_state(struct task_struct *tsk)
+static inline unsigned int task_state_index(struct task_struct *tsk)
{
unsigned int tsk_state = READ_ONCE(tsk->state);
unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
@@ -1259,7 +1260,7 @@ static inline unsigned int __get_task_state(struct task_struct *tsk)
return fls(state);
}
-static inline char __task_state_to_char(unsigned int state)
+static inline char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
@@ -1270,7 +1271,7 @@ static inline char __task_state_to_char(unsigned int state)
static inline char task_state_to_char(struct task_struct *tsk)
{
- return __task_state_to_char(__get_task_state(tsk));
+ return task_index_to_char(task_state_index(tsk));
}
/**
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index d0677f6739f6..53f883f5a2fd 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -54,7 +54,8 @@ static inline void task_cputime_scaled(struct task_struct *t,
extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
-
+extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ u64 *ut, u64 *st);
/*
* Thread group CPU time accounting.
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
new file mode 100644
index 000000000000..d849431c8060
--- /dev/null
+++ b/include/linux/sched/isolation.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_SCHED_ISOLATION_H
+#define _LINUX_SCHED_ISOLATION_H
+
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/tick.h>
+
+enum hk_flags {
+ HK_FLAG_TIMER = 1,
+ HK_FLAG_RCU = (1 << 1),
+ HK_FLAG_MISC = (1 << 2),
+ HK_FLAG_SCHED = (1 << 3),
+ HK_FLAG_TICK = (1 << 4),
+ HK_FLAG_DOMAIN = (1 << 5),
+};
+
+#ifdef CONFIG_CPU_ISOLATION
+DECLARE_STATIC_KEY_FALSE(housekeeping_overriden);
+extern int housekeeping_any_cpu(enum hk_flags flags);
+extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
+extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
+extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
+extern void __init housekeeping_init(void);
+
+#else
+
+static inline int housekeeping_any_cpu(enum hk_flags flags)
+{
+ return smp_processor_id();
+}
+
+static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
+{
+ return cpu_possible_mask;
+}
+
+static inline void housekeeping_affine(struct task_struct *t,
+ enum hk_flags flags) { }
+static inline void housekeeping_init(void) { }
+#endif /* CONFIG_CPU_ISOLATION */
+
+static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
+{
+#ifdef CONFIG_CPU_ISOLATION
+ if (static_branch_unlikely(&housekeeping_overriden))
+ return housekeeping_test_cpu(cpu, flags);
+#endif
+ return true;
+}
+
+#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index db865ed25ef3..e5af028c08b4 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -18,6 +18,17 @@ static inline int rt_task(struct task_struct *p)
return rt_prio(p->prio);
}
+static inline bool task_is_realtime(struct task_struct *tsk)
+{
+ int policy = tsk->policy;
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return true;
+ if (policy == SCHED_DEADLINE)
+ return true;
+ return false;
+}
+
#ifdef CONFIG_RT_MUTEXES
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index d6a18a3839cc..1c1a1512ec55 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -38,9 +38,9 @@ extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_migration_cost;
-extern unsigned int sysctl_sched_nr_migrate;
-extern unsigned int sysctl_sched_time_avg;
+extern __read_mostly unsigned int sysctl_sched_migration_cost;
+extern __read_mostly unsigned int sysctl_sched_nr_migrate;
+extern __read_mostly unsigned int sysctl_sched_time_avg;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 5553e04e59c9..37b044e78333 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -501,4 +501,9 @@ static inline int uart_handle_break(struct uart_port *port)
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
+/*
+ * Common device tree parsing helpers
+ */
+void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf);
+
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index ed91ce57c428..06b295bec00d 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -54,6 +54,8 @@ extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
unsigned long flags);
+extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
+ const char *name, loff_t size, unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 54fe91183a8e..ed06e1c28fc7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,7 +15,6 @@
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/bug.h>
@@ -711,7 +710,6 @@ struct sk_buff {
/* Following fields are _not_ copied in __copy_skb_header()
* Note that queue_mapping is here mostly to fill a hole.
*/
- kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
/* if you move cloned around you also must adapt those constants */
@@ -730,7 +728,6 @@ struct sk_buff {
head_frag:1,
xmit_more:1,
__unused:1; /* one bit hole */
- kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
@@ -2664,7 +2661,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
* 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
* code in gfp_to_alloc_flags that should be enforcing this.
*/
- gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
+ gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index af5aa65c7c18..50697a1d6621 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,13 +21,20 @@
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
-#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
-#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
-#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
-#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
-#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
-#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
+/* DEBUG: Perform (expensive) checks on alloc/free */
+#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
+/* DEBUG: Red zone objs in a cache */
+#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
+/* DEBUG: Poison objects */
+#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
+/* Align objs on cache lines */
+#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
+/* Use GFP_DMA memory */
+#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+/* DEBUG: Store the last owner for bug hunting */
+#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
+/* Panic if kmem_cache_create() fails */
+#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
/*
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
@@ -65,44 +72,45 @@
*
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
-#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
-#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
+/* Defer freeing slabs to RCU */
+#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
+/* Spread some memory over cpuset */
+#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
+/* Trace allocations and frees */
+#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS 0x00400000UL
+# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
#else
-# define SLAB_DEBUG_OBJECTS 0x00000000UL
+# define SLAB_DEBUG_OBJECTS 0
#endif
-#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+/* Avoid kmemleak tracing */
+#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
-/* Don't track use of uninitialized memory */
-#ifdef CONFIG_KMEMCHECK
-# define SLAB_NOTRACK 0x01000000UL
-#else
-# define SLAB_NOTRACK 0x00000000UL
-#endif
+/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
+# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
#else
-# define SLAB_FAILSLAB 0x00000000UL
+# define SLAB_FAILSLAB 0
#endif
+/* Account to memcg */
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
-# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
+# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
#else
-# define SLAB_ACCOUNT 0x00000000UL
+# define SLAB_ACCOUNT 0
#endif
#ifdef CONFIG_KASAN
-#define SLAB_KASAN 0x08000000UL
+#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
#else
-#define SLAB_KASAN 0x00000000UL
+#define SLAB_KASAN 0
#endif
/* The following flags affect the page allocator grouping pages by mobility */
-#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+/* Objects are reclaimable */
+#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -128,7 +136,7 @@ void __init kmem_cache_init(void);
bool slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
- unsigned long,
+ slab_flags_t,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
@@ -459,9 +467,6 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
- * %__GFP_COLD - Request cache-cold pages instead of
- * trying to return cache-warm pages.
- *
* %__GFP_HIGH - This allocation has high priority and may use emergency pools.
*
* %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
@@ -636,6 +641,22 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
+static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+ int node)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ if (__builtin_constant_p(n) && __builtin_constant_p(size))
+ return kmalloc_node(n * size, flags, node);
+ return __kmalloc_node(n * size, flags, node);
+}
+
+static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+{
+ return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
+}
+
+
#ifdef CONFIG_NUMA
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8f7d2b1656d2..072e46e9e1d5 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -20,7 +20,7 @@ struct kmem_cache {
struct reciprocal_value reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */
- unsigned int flags; /* constant flags */
+ slab_flags_t flags; /* constant flags */
unsigned int num; /* # of objs per slab */
/* 3) cache_grow/shrink */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 39fa09bcde23..0adae162dc8f 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -82,7 +82,7 @@ struct kmem_cache_order_objects {
struct kmem_cache {
struct kmem_cache_cpu __percpu *cpu_slab;
/* Used for retriving partial slabs etc */
- unsigned long flags;
+ slab_flags_t flags;
unsigned long min_partial;
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h
index 003f3dd5b192..12e548938bbb 100644
--- a/include/linux/soc/brcmstb/brcmstb.h
+++ b/include/linux/soc/brcmstb/brcmstb.h
@@ -2,10 +2,27 @@
#ifndef __BRCMSTB_SOC_H
#define __BRCMSTB_SOC_H
+static inline u32 BRCM_ID(u32 reg)
+{
+ return reg >> 28 ? reg >> 16 : reg >> 8;
+}
+
+static inline u32 BRCM_REV(u32 reg)
+{
+ return reg & 0xff;
+}
+
/*
* Bus Interface Unit control register setup, must happen early during boot,
* before SMP is brought up, called by machine entry point.
*/
void brcmstb_biuctrl_init(void);
+/*
+ * Helper functions for getting family or product id from the
+ * SoC driver.
+ */
+u32 brcmstb_get_family_id(void);
+u32 brcmstb_get_product_id(void);
+
#endif /* __BRCMSTB_SOC_H */
diff --git a/include/linux/spi/spi-fsl-dspi.h b/include/linux/spi/spi-fsl-dspi.h
new file mode 100644
index 000000000000..74c9bae20bf2
--- /dev/null
+++ b/include/linux/spi/spi-fsl-dspi.h
@@ -0,0 +1,31 @@
+/*
+ * Freescale DSPI controller driver
+ *
+ * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SPI_FSL_DSPI_HEADER_H
+#define SPI_FSL_DSPI_HEADER_H
+
+/**
+ * struct fsl_dspi_platform_data - platform data for the Freescale DSPI driver
+ * @bus_num: board specific identifier for this DSPI driver.
+ * @cs_num: number of chip selects supported by this DSPI driver.
+ */
+struct fsl_dspi_platform_data {
+ u32 cs_num;
+ u32 bus_num;
+ u32 sck_cs_delay;
+ u32 cs_sck_delay;
+};
+
+#endif /* SPI_FSL_DSPI_HEADER_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 341e1a12bfc7..a39186194cd6 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -166,6 +166,10 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
arch_spin_lock(&lock->raw_lock);
}
+#ifndef arch_spin_lock_flags
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#endif
+
static inline void
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
{
@@ -279,12 +283,6 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
1 : ({ local_irq_restore(flags); 0; }); \
})
-/**
- * raw_spin_can_lock - would raw_spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
-
/* Include rwlock functions */
#include <linux/rwlock.h>
@@ -397,11 +395,6 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
return raw_spin_is_contended(&lock->rlock);
}
-static __always_inline int spin_can_lock(spinlock_t *lock)
-{
- return raw_spin_can_lock(&lock->rlock);
-}
-
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
/*
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 612fb530af41..0ac9112c1bbe 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -32,14 +32,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
barrier();
}
-static inline void
-arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- local_irq_save(flags);
- lock->slock = 0;
- barrier();
-}
-
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -77,7 +69,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define arch_read_can_lock(lock) (((void)(lock), 1))
-#define arch_write_can_lock(lock) (((void)(lock), 1))
-
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index f02fb5db8914..c2b8128799c1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -171,8 +171,9 @@ enum {
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
+ SWP_SYNCHRONOUS_IO = (1 << 11), /* synchronous IO is efficient */
/* add others here before... */
- SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 12), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
@@ -297,7 +298,18 @@ struct vma_swap_readahead {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
-void workingset_update_node(struct radix_tree_node *node, void *private);
+
+/* Do not use directly, use workingset_lookup_update */
+void workingset_update_node(struct radix_tree_node *node);
+
+/* Returns workingset_update_node() if the mapping has shadow entries. */
+#define workingset_lookup_update(mapping) \
+({ \
+ radix_tree_update_node_t __helper = workingset_update_node; \
+ if (dax_mapping(mapping) || shmem_mapping(mapping)) \
+ __helper = NULL; \
+ __helper; \
+})
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
@@ -462,9 +474,11 @@ extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
+extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
+extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
extern bool reuse_swap_page(struct page *, int *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -473,6 +487,16 @@ extern void exit_swap_address_space(unsigned int type);
#else /* CONFIG_SWAP */
+static inline int swap_readpage(struct page *page, bool do_poll)
+{
+ return 0;
+}
+
+static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+{
+ return NULL;
+}
+
#define swap_address_space(entry) (NULL)
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
@@ -486,7 +510,7 @@ extern void exit_swap_address_space(unsigned int type);
#define free_page_and_swap_cache(page) \
put_page(page)
#define free_pages_and_swap_cache(pages, nr) \
- release_pages((pages), (nr), false);
+ release_pages((pages), (nr));
static inline void show_swap_cache_info(void)
{
@@ -577,6 +601,11 @@ static inline int page_swapcount(struct page *page)
return 0;
}
+static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
+{
+ return 0;
+}
+
static inline int __swp_swapcount(swp_entry_t entry)
{
return 0;
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 0ad87c434ae6..790ca021203a 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -25,8 +25,12 @@
* @file: file representing this fence
* @sync_file_list: membership in global file list
* @wq: wait queue for fence signaling
+ * @flags: flags for the sync_file
* @fence: fence with the fences in the sync_file
* @cb: fence callback information
+ *
+ * flags:
+ * POLL_ENABLED: whether userspace is currently poll()'ing or not
*/
struct sync_file {
struct file *file;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 4bcdf00c110f..34f053a150a9 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -44,10 +44,9 @@ enum {
#endif
#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
-# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
- __GFP_ZERO)
+# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
#else
-# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
+# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT)
#endif
/*
diff --git a/include/linux/tick.h b/include/linux/tick.h
index cf413b344ddb..f442d1a42025 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -138,7 +138,6 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
#ifdef CONFIG_NO_HZ_FULL
extern bool tick_nohz_full_running;
extern cpumask_var_t tick_nohz_full_mask;
-extern cpumask_var_t housekeeping_mask;
static inline bool tick_nohz_full_enabled(void)
{
@@ -162,11 +161,6 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
cpumask_or(mask, mask, tick_nohz_full_mask);
}
-static inline int housekeeping_any_cpu(void)
-{
- return cpumask_any_and(housekeeping_mask, cpu_online_mask);
-}
-
extern void tick_nohz_dep_set(enum tick_dep_bits bit);
extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
@@ -235,11 +229,8 @@ static inline void tick_dep_clear_signal(struct signal_struct *signal,
extern void tick_nohz_full_kick_cpu(int cpu);
extern void __tick_nohz_task_switch(void);
+extern void __init tick_nohz_full_setup(cpumask_var_t cpumask);
#else
-static inline int housekeeping_any_cpu(void)
-{
- return smp_processor_id();
-}
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
@@ -259,35 +250,9 @@ static inline void tick_dep_clear_signal(struct signal_struct *signal,
static inline void tick_nohz_full_kick_cpu(int cpu) { }
static inline void __tick_nohz_task_switch(void) { }
+static inline void tick_nohz_full_setup(cpumask_var_t cpumask) { }
#endif
-static inline const struct cpumask *housekeeping_cpumask(void)
-{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_enabled())
- return housekeeping_mask;
-#endif
- return cpu_possible_mask;
-}
-
-static inline bool is_housekeeping_cpu(int cpu)
-{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_enabled())
- return cpumask_test_cpu(cpu, housekeeping_mask);
-#endif
- return true;
-}
-
-static inline void housekeeping_affine(struct task_struct *t)
-{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_enabled())
- set_cpus_allowed_ptr(t, housekeeping_mask);
-
-#endif
-}
-
static inline void tick_nohz_task_switch(void)
{
if (tick_nohz_full_enabled())
diff --git a/include/linux/types.h b/include/linux/types.h
index 34fce54e4f1b..c94d59ef96cc 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -156,6 +156,7 @@ typedef u32 dma_addr_t;
#endif
typedef unsigned __bitwise gfp_t;
+typedef unsigned __bitwise slab_flags_t;
typedef unsigned __bitwise fmode_t;
#ifdef CONFIG_PHYS_ADDR_T_64BIT
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
deleted file mode 100644
index 75de43da2301..000000000000
--- a/include/linux/uinput.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * User level driver support for input subsystem
- *
- * Heavily based on evdev.c by Vojtech Pavlik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
- *
- * Changes/Revisions:
- * 0.5 08/13/2015 (David Herrmann <dh.herrmann@gmail.com> &
- * Benjamin Tissoires <benjamin.tissoires@redhat.com>)
- * - add UI_DEV_SETUP ioctl
- * - add UI_ABS_SETUP ioctl
- * - add UI_GET_VERSION ioctl
- * 0.4 01/09/2014 (Benjamin Tissoires <benjamin.tissoires@redhat.com>)
- * - add UI_GET_SYSNAME ioctl
- * 0.3 24/05/2006 (Anssi Hannula <anssi.hannulagmail.com>)
- * - update ff support for the changes in kernel interface
- * - add UINPUT_VERSION
- * 0.2 16/10/2004 (Micah Dowty <micah@navi.cx>)
- * - added force feedback support
- * - added UI_SET_PHYS
- * 0.1 20/06/2002
- * - first public version
- */
-#ifndef __UINPUT_H_
-#define __UINPUT_H_
-
-#include <uapi/linux/uinput.h>
-
-#define UINPUT_NAME "uinput"
-#define UINPUT_BUFFER_SIZE 16
-#define UINPUT_NUM_REQUESTS 16
-
-enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
-
-struct uinput_request {
- unsigned int id;
- unsigned int code; /* UI_FF_UPLOAD, UI_FF_ERASE */
-
- int retval;
- struct completion done;
-
- union {
- unsigned int effect_id;
- struct {
- struct ff_effect *effect;
- struct ff_effect *old;
- } upload;
- } u;
-};
-
-struct uinput_device {
- struct input_dev *dev;
- struct mutex mutex;
- enum uinput_state state;
- wait_queue_head_t waitq;
- unsigned char ready;
- unsigned char head;
- unsigned char tail;
- struct input_event buff[UINPUT_BUFFER_SIZE];
- unsigned int ff_effects_max;
-
- struct uinput_request *requests[UINPUT_NUM_REQUESTS];
- wait_queue_head_t requests_waitq;
- spinlock_t requests_lock;
-};
-#endif /* __UINPUT_H_ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 9c63792a8134..fbbe974661f2 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1729,6 +1729,8 @@ static inline int usb_urb_dir_out(struct urb *urb)
return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
}
+int usb_urb_ep_type_check(const struct urb *urb);
+
void *usb_alloc_coherent(struct usb_device *dev, size_t size,
gfp_t mem_flags, dma_addr_t *dma);
void usb_free_coherent(struct usb_device *dev, size_t size,
@@ -1767,7 +1769,21 @@ extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
unsigned char descindex, void *buf, int size);
extern int usb_get_status(struct usb_device *dev,
- int type, int target, void *data);
+ int recip, int type, int target, void *data);
+
+static inline int usb_get_std_status(struct usb_device *dev,
+ int recip, int target, void *data)
+{
+ return usb_get_status(dev, recip, USB_STATUS_TYPE_STANDARD, target,
+ data);
+}
+
+static inline int usb_get_ptm_status(struct usb_device *dev, void *data)
+{
+ return usb_get_status(dev, USB_RECIP_DEVICE, USB_STATUS_TYPE_PTM,
+ 0, data);
+}
+
extern int usb_string(struct usb_device *dev, int index,
char *buf, size_t size);
diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h
index 0a4a18b3c1bb..d7f3cb9b9db5 100644
--- a/include/linux/usb/association.h
+++ b/include/linux/usb/association.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB - Cable Based Association
*
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index fd73bc0e9027..3119d0ace7aa 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2010 Daniel Mack <daniel@caiaq.de>
*
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
index 3d84619110a4..170acd500ea1 100644
--- a/include/linux/usb/audio.h
+++ b/include/linux/usb/audio.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* <linux/usb/audio.h> -- USB Audio definitions.
*
diff --git a/include/linux/usb/c67x00.h b/include/linux/usb/c67x00.h
index 83c6b45470ca..2fc39e3b7281 100644
--- a/include/linux/usb/c67x00.h
+++ b/include/linux/usb/c67x00.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip
*
diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h
index 0b3f4295c025..9b895f93d8de 100644
--- a/include/linux/usb/cdc-wdm.h
+++ b/include/linux/usb/cdc-wdm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB CDC Device Management subdriver
*
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index b5706f94ee9e..35d784cf32a4 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB CDC common helpers
*
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 1a59699cf82a..1646c06989df 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
/*
* Copyright (C) ST-Ericsson 2010-2012
* Contact: Alexey Orishko <alexey.orishko@stericsson.com>
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index f665d2ceac20..cef0e44601f8 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* composite.h -- framework for usb gadgets which are composite devices
*
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index e479033bd782..a15ce99dfc2d 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
*
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
index db0431b39a63..dd742afdc03f 100644
--- a/include/linux/usb/ehci_pdriver.h
+++ b/include/linux/usb/ehci_pdriver.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
*
diff --git a/include/linux/usb/g_hid.h b/include/linux/usb/g_hid.h
index 50f5745df28c..7581e488c237 100644
--- a/include/linux/usb/g_hid.h
+++ b/include/linux/usb/g_hid.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* g_hid.h -- Header file for USB HID gadget driver
*
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 21468a722c4a..0142f3af0da6 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* <linux/usb/gadget.h>
*
@@ -188,6 +189,8 @@ struct usb_ep_caps {
* @ops: Function pointers used to access hardware-specific operations.
* @ep_list:the gadget's ep_list holds all of its endpoints
* @caps:The structure describing types and directions supported by endoint.
+ * @enabled: The current endpoint enabled/disabled state.
+ * @claimed: True if this endpoint is claimed by a function.
* @maxpacket:The maximum packet size used on this endpoint. The initial
* value can sometimes be reduced (hardware allowing), according to
* the endpoint descriptor used to configure the endpoint.
@@ -349,6 +352,9 @@ struct usb_gadget_ops {
* or B-Peripheral wants to take host role.
* @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
* MaxPacketSize.
+ * @quirk_altset_not_supp: UDC controller doesn't support alt settings.
+ * @quirk_stall_not_supp: UDC controller doesn't support stalling.
+ * @quirk_zlp_not_supp: UDC controller doesn't support ZLP.
* @quirk_avoids_skb_reserve: udc/platform wants to avoid skb_reserve() in
* u_ether.c to improve performance.
* @is_selfpowered: if the gadget is self-powered.
diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h
index 837bba604a0b..804fb06cf6d6 100644
--- a/include/linux/usb/gpio_vbus.h
+++ b/include/linux/usb/gpio_vbus.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* A simple GPIO VBUS sensing driver for B peripheral only devices
* with internal transceivers.
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index a1f03ebfde47..176900528822 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
*
diff --git a/include/linux/usb/input.h b/include/linux/usb/input.h
index 0e010b220e85..974befa72ac0 100644
--- a/include/linux/usb/input.h
+++ b/include/linux/usb/input.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Dmitry Torokhov
*
diff --git a/include/linux/usb/isp1301.h b/include/linux/usb/isp1301.h
index d3a851c28b6a..dedb3b2473e8 100644
--- a/include/linux/usb/isp1301.h
+++ b/include/linux/usb/isp1301.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NXP ISP1301 USB transceiver driver
*
diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h
index a4ba31ab2fed..2dfe68183495 100644
--- a/include/linux/usb/m66592.h
+++ b/include/linux/usb/m66592.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* M66592 driver platform data
*
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
deleted file mode 100644
index 974c3796a23f..000000000000
--- a/include/linux/usb/msm_hsusb_hw.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
-#define __LINUX_USB_GADGET_MSM72K_UDC_H__
-
-/* USB phy selector - in TCSR address range */
-#define USB2_PHY_SEL 0xfd4ab000
-
-#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
-#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
-#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
-#define ULPI_TX_PKT_EN_CLR_FIX BIT(19)
-
-#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
-
-#define USB_USBCMD (MSM_USB_BASE + 0x0140)
-#define USB_PORTSC (MSM_USB_BASE + 0x0184)
-#define USB_OTGSC (MSM_USB_BASE + 0x01A4)
-#define USB_USBMODE (MSM_USB_BASE + 0x01A8)
-#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240)
-#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
-
-#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7)
-#define USBCMD_SESS_VLD_CTRL BIT(25)
-
-#define USBCMD_RESET 2
-#define USB_USBINTR (MSM_USB_BASE + 0x0148)
-
-#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
-#define PORTSC_PTS_MASK (3 << 30)
-#define PORTSC_PTS_ULPI (2 << 30)
-#define PORTSC_PTS_SERIAL (3 << 30)
-
-#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170)
-#define ULPI_RUN (1 << 30)
-#define ULPI_WRITE (1 << 29)
-#define ULPI_READ (0 << 29)
-#define ULPI_ADDR(n) (((n) & 255) << 16)
-#define ULPI_DATA(n) ((n) & 255)
-#define ULPI_DATA_READ(n) (((n) >> 8) & 255)
-
-/* synopsys 28nm phy registers */
-#define ULPI_PWR_CLK_MNG_REG 0x88
-#define OTG_COMP_DISABLE BIT(0)
-
-#define ULPI_MISC_A 0x96
-#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
-#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
-
-#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
-#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
-#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
-#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */
-
-/* OTG definitions */
-#define OTGSC_INTSTS_MASK (0x7f << 16)
-#define OTGSC_ID (1 << 8)
-#define OTGSC_BSV (1 << 11)
-#define OTGSC_IDIS (1 << 16)
-#define OTGSC_BSVIS (1 << 19)
-#define OTGSC_IDIE (1 << 24)
-#define OTGSC_BSVIE (1 << 27)
-
-#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */
diff --git a/include/linux/usb/musb-ux500.h b/include/linux/usb/musb-ux500.h
index 1e2c7130f6e1..c4b7ad9850ca 100644
--- a/include/linux/usb/musb-ux500.h
+++ b/include/linux/usb/musb-ux500.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2013 ST-Ericsson AB
*
diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h
index 725120224472..08b85caecfaf 100644
--- a/include/linux/usb/net2280.h
+++ b/include/linux/usb/net2280.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NetChip 2280 high/full speed USB device controller.
* Unlike many such controllers, this one talks PCI.
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 4031f47629ec..6cbe7a5c2b57 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OF helpers for usb devices.
*
diff --git a/include/linux/usb/ohci_pdriver.h b/include/linux/usb/ohci_pdriver.h
index 012f2b7eb2b6..7eb16cf587ee 100644
--- a/include/linux/usb/ohci_pdriver.h
+++ b/include/linux/usb/ohci_pdriver.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
*
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index a0a8f878503c..e78eb577d0fa 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/staging/typec/pd.h b/include/linux/usb/pd.h
index 30b32ad72acd..e00051ced806 100644
--- a/drivers/staging/typec/pd.h
+++ b/include/linux/usb/pd.h
@@ -104,6 +104,11 @@ static inline unsigned int pd_header_msgid_le(__le16 header)
#define PD_MAX_PAYLOAD 7
+/**
+ * struct pd_message - PD message as seen on wire
+ * @header: PD message header
+ * @payload: PD message payload
+ */
struct pd_message {
__le16 header;
__le32 payload[PD_MAX_PAYLOAD];
diff --git a/drivers/staging/typec/pd_bdo.h b/include/linux/usb/pd_bdo.h
index 90b94d9fea5d..90b94d9fea5d 100644
--- a/drivers/staging/typec/pd_bdo.h
+++ b/include/linux/usb/pd_bdo.h
diff --git a/drivers/staging/typec/pd_vdo.h b/include/linux/usb/pd_vdo.h
index d92259f8de0a..d92259f8de0a 100644
--- a/drivers/staging/typec/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h
index edd2ec23d282..407f530061cd 100644
--- a/include/linux/usb/phy_companion.h
+++ b/include/linux/usb/phy_companion.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* phy-companion.h -- phy companion to indicate the comparator part of PHY
*
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 7cd553a3ce05..f1fcec2fd5f8 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -57,4 +57,10 @@
*/
#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
+/*
+ * Device needs to be disconnected before suspend to prevent spurious
+ * wakeup.
+ */
+#define USB_QUIRK_DISCONNECT_SUSPEND BIT(12)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
index 55805f9dcf21..c0753d026bbf 100644
--- a/include/linux/usb/r8a66597.h
+++ b/include/linux/usb/r8a66597.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 driver platform data
*
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 00a47d058d83..67102f3d59d4 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB
*
@@ -183,8 +184,9 @@ struct renesas_usbhs_driver_param {
#define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */
};
-#define USBHS_TYPE_RCAR_GEN2 1
-#define USBHS_TYPE_RCAR_GEN3 2
+#define USBHS_TYPE_RCAR_GEN2 1
+#define USBHS_TYPE_RCAR_GEN3 2
+#define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3
/*
* option:
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index d44ef85db177..809bccd08455 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Host Side support for RNDIS Networking Links
* Copyright (C) 2005 by David Brownell
diff --git a/include/linux/usb/samsung_usb_phy.h b/include/linux/usb/samsung_usb_phy.h
index 916782699f1c..dc0071741695 100644
--- a/include/linux/usb/samsung_usb_phy.h
+++ b/include/linux/usb/samsung_usb_phy.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* http://www.samsung.com/
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index e2f0ab07eea5..106551a5616e 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter stuff
*
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
index 305ee8db7faf..e0240f864548 100644
--- a/include/linux/usb/storage.h
+++ b/include/linux/usb/storage.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#ifndef __LINUX_USB_STORAGE_H
#define __LINUX_USB_STORAGE_H
diff --git a/drivers/staging/typec/tcpm.h b/include/linux/usb/tcpm.h
index 7e9a6b7b5cd6..073197f0d2bb 100644
--- a/drivers/staging/typec/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -54,6 +54,27 @@ enum tcpm_transmit_type {
TCPC_TX_BIST_MODE_2 = 7
};
+/**
+ * struct tcpc_config - Port configuration
+ * @src_pdo: PDO parameters sent to port partner as response to
+ * PD_CTRL_GET_SOURCE_CAP message
+ * @nr_src_pdo: Number of entries in @src_pdo
+ * @snk_pdo: PDO parameters sent to partner as response to
+ * PD_CTRL_GET_SINK_CAP message
+ * @nr_snk_pdo: Number of entries in @snk_pdo
+ * @max_snk_mv: Maximum acceptable sink voltage in mV
+ * @max_snk_ma: Maximum sink current in mA
+ * @max_snk_mw: Maximum required sink power in mW
+ * @operating_snk_mw:
+ * Required operating sink power in mW
+ * @type: Port type (TYPEC_PORT_DFP, TYPEC_PORT_UFP, or
+ * TYPEC_PORT_DRP)
+ * @default_role:
+ * Default port role (TYPEC_SINK or TYPEC_SOURCE).
+ * Set to TYPEC_NO_PREFERRED_ROLE if no default role.
+ * @try_role_hw:True if try.{Src,Snk} is implemented in hardware
+ * @alt_modes: List of supported alternate modes
+ */
struct tcpc_config {
const u32 *src_pdo;
unsigned int nr_src_pdo;
@@ -79,7 +100,6 @@ struct tcpc_config {
enum tcpc_usb_switch {
TCPC_USB_SWITCH_CONNECT,
TCPC_USB_SWITCH_DISCONNECT,
- TCPC_USB_SWITCH_RESTORE, /* TODO FIXME */
};
/* Mux state attributes */
@@ -104,17 +124,40 @@ struct tcpc_mux_dev {
void *priv_data;
};
+/**
+ * struct tcpc_dev - Port configuration and callback functions
+ * @config: Pointer to port configuration
+ * @get_vbus: Called to read current VBUS state
+ * @get_current_limit:
+ * Optional; called by the tcpm core when configured as a snk
+ * and cc=Rp-def. This allows the tcpm to provide a fallback
+ * current-limit detection method for the cc=Rp-def case.
+ * For example, some tcpcs may include BC1.2 charger detection
+ * and use that in this case.
+ * @set_cc: Called to set value of CC pins
+ * @get_cc: Called to read current CC pin values
+ * @set_polarity:
+ * Called to set polarity
+ * @set_vconn: Called to enable or disable VCONN
+ * @set_vbus: Called to enable or disable VBUS
+ * @set_current_limit:
+ * Optional; called to set current limit as negotiated
+ * with partner.
+ * @set_pd_rx: Called to enable or disable reception of PD messages
+ * @set_roles: Called to set power and data roles
+ * @start_drp_toggling:
+ * Optional; if supported by hardware, called to start DRP
+ * toggling. DRP toggling is stopped automatically if
+ * a connection is established.
+ * @try_role: Optional; called to set a preferred role
+ * @pd_transmit:Called to transmit PD message
+ * @mux: Pointer to multiplexer data
+ */
struct tcpc_dev {
const struct tcpc_config *config;
int (*init)(struct tcpc_dev *dev);
int (*get_vbus)(struct tcpc_dev *dev);
- /*
- * This optional callback gets called by the tcpm core when configured
- * as a snk and cc=Rp-def. This allows the tcpm to provide a fallback
- * current-limit detection method for the cc=Rp-def case. E.g. some
- * tcpcs may include BC1.2 charger detection and use that in this case.
- */
int (*get_current_limit)(struct tcpc_dev *dev);
int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index 1de16c324ec8..d641ea1660b7 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Google, Inc.
*
diff --git a/include/linux/usb/tilegx.h b/include/linux/usb/tilegx.h
index 2d65e3435680..817908573fe8 100644
--- a/include/linux/usb/tilegx.h
+++ b/include/linux/usb/tilegx.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 5f07407a367a..c515765adab7 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ulpi.h -- ULPI defines and function prorotypes
*
diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h
index 11525d8d89a7..7189e3387bf9 100644
--- a/include/linux/usb/usb338x.h
+++ b/include/linux/usb/usb338x.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB 338x super/high/full speed USB device controller.
* Unlike many such controllers, this one talks PCI.
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 97116379db5f..a69877734c4e 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Networking Link Interface
*
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
index c1257130769b..64a840b5106e 100644
--- a/include/linux/usb/wusb-wa.h
+++ b/include/linux/usb/wusb-wa.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Wire Adapter constants and structures.
*
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index eeb28329fa3c..9e4a3213f2c2 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Standard Definitions
* Event Size Tables
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
index 80c1cca1f529..0a37f1283bf0 100644
--- a/include/linux/usb/xhci-dbgp.h
+++ b/include/linux/usb/xhci-dbgp.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Standalone xHCI debug capability driver
*
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index 04a26285416c..79aab0065ec8 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1e0cb72e0598..1779c9817b39 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -7,9 +7,19 @@
#include <linux/mmzone.h>
#include <linux/vm_event_item.h>
#include <linux/atomic.h>
+#include <linux/static_key.h>
extern int sysctl_stat_interval;
+#ifdef CONFIG_NUMA
+#define ENABLE_NUMA_STAT 1
+#define DISABLE_NUMA_STAT 0
+extern int sysctl_vm_numa_stat;
+DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
+extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
+ int write, void __user *buffer, size_t *length, loff_t *ppos);
+#endif
+
#ifdef CONFIG_VM_EVENT_COUNTERS
/*
* Light weight per cpu counter implementation.
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 5b2972946dda..694101f744c7 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -293,6 +293,7 @@ void w1_unregister_family(struct w1_family *family);
w1_unregister_family)
u8 w1_triplet(struct w1_master *dev, int bdir);
+u8 w1_touch_bit(struct w1_master *dev, int bit);
void w1_write_8(struct w1_master *, u8);
u8 w1_read_8(struct w1_master *);
int w1_reset_bus(struct w1_master *);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 31fef0db46d4..01a050fc6650 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -219,7 +219,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
\
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
- lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
@@ -400,7 +400,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
static struct lock_class_key __key; \
const char *__lock_name; \
\
- __lock_name = #fmt#args; \
+ __lock_name = "(wq_completion)"#fmt#args; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index e12d92808e98..f42d85631d17 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -43,28 +43,6 @@ enum writeback_sync_modes {
};
/*
- * why some writeback work was initiated
- */
-enum wb_reason {
- WB_REASON_BACKGROUND,
- WB_REASON_VMSCAN,
- WB_REASON_SYNC,
- WB_REASON_PERIODIC,
- WB_REASON_LAPTOP_TIMER,
- WB_REASON_FREE_MORE_MEM,
- WB_REASON_FS_FREE_SPACE,
- /*
- * There is no bdi forker thread any more and works are done
- * by emergency worker, however, this is TPs userland visible
- * and we'll be exposing exactly the same information,
- * so it has a mismatch name.
- */
- WB_REASON_FORKER_THREAD,
-
- WB_REASON_MAX,
-};
-
-/*
* A control structure which tells the writeback code what to do. These are
* always on the stack, and hence need no locking. They are always initialised
* in a manner such that unspecified fields are set to zero.
@@ -186,11 +164,11 @@ struct bdi_writeback;
void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
-bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
- enum wb_reason reason);
+void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason);
void sync_inodes_sb(struct super_block *);
-void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
+void wakeup_flusher_threads(enum wb_reason reason);
+void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
+ enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index f09cc9579d53..83b3e17e0a07 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -21,71 +21,8 @@
#define LINUX_CEC_PIN_H
#include <linux/types.h>
-#include <linux/atomic.h>
#include <media/cec.h>
-enum cec_pin_state {
- /* CEC is off */
- CEC_ST_OFF,
- /* CEC is idle, waiting for Rx or Tx */
- CEC_ST_IDLE,
-
- /* Tx states */
-
- /* Pending Tx, waiting for Signal Free Time to expire */
- CEC_ST_TX_WAIT,
- /* Low-drive was detected, wait for bus to go high */
- CEC_ST_TX_WAIT_FOR_HIGH,
- /* Drive CEC low for the start bit */
- CEC_ST_TX_START_BIT_LOW,
- /* Drive CEC high for the start bit */
- CEC_ST_TX_START_BIT_HIGH,
- /* Drive CEC low for the 0 bit */
- CEC_ST_TX_DATA_BIT_0_LOW,
- /* Drive CEC high for the 0 bit */
- CEC_ST_TX_DATA_BIT_0_HIGH,
- /* Drive CEC low for the 1 bit */
- CEC_ST_TX_DATA_BIT_1_LOW,
- /* Drive CEC high for the 1 bit */
- CEC_ST_TX_DATA_BIT_1_HIGH,
- /*
- * Wait for start of sample time to check for Ack bit or first
- * four initiator bits to check for Arbitration Lost.
- */
- CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE,
- /* Wait for end of bit period after sampling */
- CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE,
-
- /* Rx states */
-
- /* Start bit low detected */
- CEC_ST_RX_START_BIT_LOW,
- /* Start bit high detected */
- CEC_ST_RX_START_BIT_HIGH,
- /* Wait for bit sample time */
- CEC_ST_RX_DATA_SAMPLE,
- /* Wait for earliest end of bit period after sampling */
- CEC_ST_RX_DATA_POST_SAMPLE,
- /* Wait for CEC to go high (i.e. end of bit period */
- CEC_ST_RX_DATA_HIGH,
- /* Drive CEC low to send 0 Ack bit */
- CEC_ST_RX_ACK_LOW,
- /* End of 0 Ack time, wait for earliest end of bit period */
- CEC_ST_RX_ACK_LOW_POST,
- /* Wait for CEC to go high (i.e. end of bit period */
- CEC_ST_RX_ACK_HIGH_POST,
- /* Wait for earliest end of bit period and end of message */
- CEC_ST_RX_ACK_FINISH,
-
- /* Start low drive */
- CEC_ST_LOW_DRIVE,
- /* Monitor pin using interrupts */
- CEC_ST_RX_IRQ,
-
- /* Total number of pin states */
- CEC_PIN_STATES
-};
-
/**
* struct cec_pin_ops - low-level CEC pin operations
* @read: read the CEC pin. Return true if high, false if low.
@@ -97,6 +34,9 @@ enum cec_pin_state {
* @free: optional. Free any allocated resources. Called when the
* adapter is deleted.
* @status: optional, log status information.
+ * @read_hpd: read the HPD pin. Return true if high, false if low or
+ * an error if negative. If NULL or -ENOTTY is returned,
+ * then this is not supported.
*
* These operations are used by the cec pin framework to manipulate
* the CEC pin.
@@ -109,50 +49,7 @@ struct cec_pin_ops {
void (*disable_irq)(struct cec_adapter *adap);
void (*free)(struct cec_adapter *adap);
void (*status)(struct cec_adapter *adap, struct seq_file *file);
-};
-
-#define CEC_NUM_PIN_EVENTS 128
-
-#define CEC_PIN_IRQ_UNCHANGED 0
-#define CEC_PIN_IRQ_DISABLE 1
-#define CEC_PIN_IRQ_ENABLE 2
-
-struct cec_pin {
- struct cec_adapter *adap;
- const struct cec_pin_ops *ops;
- struct task_struct *kthread;
- wait_queue_head_t kthread_waitq;
- struct hrtimer timer;
- ktime_t ts;
- unsigned int wait_usecs;
- u16 la_mask;
- bool enabled;
- bool monitor_all;
- bool rx_eom;
- bool enable_irq_failed;
- enum cec_pin_state state;
- struct cec_msg tx_msg;
- u32 tx_bit;
- bool tx_nacked;
- u32 tx_signal_free_time;
- struct cec_msg rx_msg;
- u32 rx_bit;
-
- struct cec_msg work_rx_msg;
- u8 work_tx_status;
- ktime_t work_tx_ts;
- atomic_t work_irq_change;
- atomic_t work_pin_events;
- unsigned int work_pin_events_wr;
- unsigned int work_pin_events_rd;
- ktime_t work_pin_ts[CEC_NUM_PIN_EVENTS];
- bool work_pin_is_high[CEC_NUM_PIN_EVENTS];
- ktime_t timer_ts;
- u32 timer_cnt;
- u32 timer_100ms_overruns;
- u32 timer_300ms_overruns;
- u32 timer_max_overrun;
- u32 timer_sum_overrun;
+ int (*read_hpd)(struct cec_adapter *adap);
};
/**
diff --git a/include/media/cec.h b/include/media/cec.h
index df6b3bd31284..16341210d3ba 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -91,7 +91,7 @@ struct cec_event_entry {
};
#define CEC_NUM_CORE_EVENTS 2
-#define CEC_NUM_EVENTS CEC_EVENT_PIN_CEC_HIGH
+#define CEC_NUM_EVENTS CEC_EVENT_PIN_HPD_HIGH
struct cec_fh {
struct list_head list;
@@ -297,6 +297,16 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap,
bool is_high, ktime_t ts);
/**
+ * cec_queue_pin_hpd_event() - queue a pin event with a given timestamp.
+ *
+ * @adap: pointer to the cec adapter
+ * @is_high: when true the HPD pin is high, otherwise it is low
+ * @ts: the timestamp for this event
+ *
+ */
+void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
+
+/**
* cec_get_edid_phys_addr() - find and return the physical address
*
* @edid: pointer to the EDID data
@@ -417,6 +427,10 @@ static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
{
+ if (parent)
+ *parent = phys_addr;
+ if (port)
+ *port = 0;
return 0;
}
diff --git a/include/media/drv-intf/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h
index 4e89e9f12a1e..6f80fb7f31a5 100644
--- a/include/media/drv-intf/saa7146_vv.h
+++ b/include/media/drv-intf/saa7146_vv.h
@@ -108,6 +108,7 @@ struct saa7146_vv
struct saa7146_dmaqueue vbi_dmaq;
struct v4l2_vbi_format vbi_fmt;
struct timer_list vbi_read_timeout;
+ struct file *vbi_read_timeout_file;
/* vbi workaround interrupt queue */
wait_queue_head_t vbi_wq;
int vbi_fieldcount;
@@ -184,7 +185,7 @@ int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev)
void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
-void saa7146_buffer_timeout(unsigned long data);
+void saa7146_buffer_timeout(struct timer_list *t);
void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q,
struct saa7146_buf *buf);
@@ -203,14 +204,14 @@ void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data);
/* from saa7146_video.c */
extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
extern const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops;
-extern struct saa7146_use_ops saa7146_video_uops;
+extern const struct saa7146_use_ops saa7146_video_uops;
int saa7146_start_preview(struct saa7146_fh *fh);
int saa7146_stop_preview(struct saa7146_fh *fh);
long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
int saa7146_s_ctrl(struct v4l2_ctrl *ctrl);
/* from saa7146_vbi.c */
-extern struct saa7146_use_ops saa7146_vbi_uops;
+extern const struct saa7146_use_ops saa7146_vbi_uops;
/* resource management functions */
int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit);
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 86d15a9b6c01..857da67bd931 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -9,7 +9,6 @@
#ifndef _LINUX_LIRC_DEV_H
#define _LINUX_LIRC_DEV_H
-#define MAX_IRCTL_DEVICES 8
#define BUFLEN 16
#include <linux/slab.h>
@@ -18,6 +17,8 @@
#include <linux/poll.h>
#include <linux/kfifo.h>
#include <media/lirc.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
struct lirc_buffer {
wait_queue_head_t wait_poll;
@@ -112,84 +113,69 @@ static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
}
/**
- * struct lirc_driver - Defines the parameters on a LIRC driver
- *
- * @name: this string will be used for logs
- *
- * @minor: indicates minor device (/dev/lirc) number for
- * registered driver if caller fills it with negative
- * value, then the first free minor number will be used
- * (if available).
- *
- * @code_length: length of the remote control key code expressed in bits.
- *
- * @buffer_size: Number of FIFO buffers with @chunk_size size. If zero,
- * creates a buffer with BUFLEN size (16 bytes).
+ * struct lirc_dev - represents a LIRC device
*
+ * @name: used for logging
+ * @minor: the minor device (/dev/lircX) number for the device
+ * @code_length: length of a remote control key code expressed in bits
* @features: lirc compatible hardware features, like LIRC_MODE_RAW,
* LIRC_CAN\_\*, as defined at include/media/lirc.h.
- *
+ * @buffer_size: Number of FIFO buffers with @chunk_size size.
+ * Only used if @rbuf is NULL.
* @chunk_size: Size of each FIFO buffer.
- *
- * @data: it may point to any driver data and this pointer will
- * be passed to all callback functions.
- *
- * @min_timeout: Minimum timeout for record. Valid only if
- * LIRC_CAN_SET_REC_TIMEOUT is defined.
- *
- * @max_timeout: Maximum timeout for record. Valid only if
- * LIRC_CAN_SET_REC_TIMEOUT is defined.
- *
- * @rbuf: if not NULL, it will be used as a read buffer, you will
+ * Only used if @rbuf is NULL.
+ * @data: private per-driver data
+ * @buf: if %NULL, lirc_dev will allocate and manage the buffer,
+ * otherwise allocated by the caller which will
* have to write to the buffer by other means, like irq's
* (see also lirc_serial.c).
- *
- * @rdev: Pointed to struct rc_dev associated with the LIRC
- * device.
- *
- * @fops: file_operations for drivers which don't fit the current
- * driver model.
- * Some ioctl's can be directly handled by lirc_dev if the
- * driver's ioctl function is NULL or if it returns
- * -ENOIOCTLCMD (see also lirc_serial.c).
- *
- * @dev: pointer to the struct device associated with the LIRC
- * device.
- *
+ * @buf_internal: whether lirc_dev has allocated the read buffer or not
+ * @rdev: &struct rc_dev associated with the device
+ * @fops: &struct file_operations for the device
* @owner: the module owning this struct
+ * @attached: if the device is still live
+ * @open: open count for the device's chardev
+ * @mutex: serialises file_operations calls
+ * @dev: &struct device assigned to the device
+ * @cdev: &struct cdev assigned to the device
*/
-struct lirc_driver {
+struct lirc_dev {
char name[40];
- int minor;
+ unsigned int minor;
__u32 code_length;
- unsigned int buffer_size; /* in chunks holding one code each */
__u32 features;
+ unsigned int buffer_size; /* in chunks holding one code each */
unsigned int chunk_size;
+ struct lirc_buffer *buf;
+ bool buf_internal;
void *data;
- int min_timeout;
- int max_timeout;
- struct lirc_buffer *rbuf;
struct rc_dev *rdev;
const struct file_operations *fops;
- struct device *dev;
struct module *owner;
+
+ bool attached;
+ int open;
+
+ struct mutex mutex; /* protect from simultaneous accesses */
+
+ struct device dev;
+ struct cdev cdev;
};
-/* following functions can be called ONLY from user context
- *
- * returns negative value on error or minor number
- * of the registered device if success
- * contents of the structure pointed by p is copied
- */
-extern int lirc_register_driver(struct lirc_driver *d);
+struct lirc_dev *lirc_allocate_device(void);
+
+void lirc_free_device(struct lirc_dev *d);
+
+int lirc_register_device(struct lirc_dev *d);
+
+void lirc_unregister_device(struct lirc_dev *d);
-/* returns negative value on error or 0 if success
-*/
-extern int lirc_unregister_driver(int minor);
+/* Must be called in the open fop before lirc_get_pdata() can be used */
+void lirc_init_pdata(struct inode *inode, struct file *file);
-/* Returns the private data stored in the lirc_driver
+/* Returns the private data stored in the lirc_dev
* associated with the given device file pointer.
*/
void *lirc_get_pdata(struct file *file);
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 2a160e6e823c..72197cb43781 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -211,6 +211,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_ALINK_DTU_M "rc-alink-dtu-m"
#define RC_MAP_ANYSEE "rc-anysee"
#define RC_MAP_APAC_VIEWCOMP "rc-apac-viewcomp"
+#define RC_MAP_ASTROMETA_T2HYBRID "rc-astrometa-t2hybrid"
#define RC_MAP_ASUS_PC39 "rc-asus-pc39"
#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100"
#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600"
@@ -258,6 +259,8 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce"
#define RC_MAP_GOTVIEW7135 "rc-gotview7135"
#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge"
+#define RC_MAP_HISI_POPLAR "rc-hisi-poplar"
+#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo"
#define RC_MAP_IMON_MCE "rc-imon-mce"
#define RC_MAP_IMON_PAD "rc-imon-pad"
#define RC_MAP_IODATA_BCTV7E "rc-iodata-bctv7e"
@@ -300,6 +303,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_REDDO "rc-reddo"
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
+#define RC_MAP_TANGO "rc-tango"
#define RC_MAP_TBS_NEC "rc-tbs-nec"
#define RC_MAP_TECHNISAT_TS35 "rc-technisat-ts35"
#define RC_MAP_TECHNISAT_USB2 "rc-technisat-usb2"
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index c69d8c8a66d0..6152434cbe82 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -50,6 +50,10 @@ enum v4l2_async_match_type {
* @match: union of per-bus type matching data sets
* @list: used to link struct v4l2_async_subdev objects, waiting to be
* probed, to a notifier->waiting list
+ *
+ * When this struct is used as a member in a driver specific struct,
+ * the driver specific struct shall contain the &struct
+ * v4l2_async_subdev as its first member.
*/
struct v4l2_async_subdev {
enum v4l2_async_match_type match_type;
@@ -76,32 +80,47 @@ struct v4l2_async_subdev {
};
/**
+ * struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations
+ * @bound: a subdevice driver has successfully probed one of the subdevices
+ * @complete: All subdevices have been probed successfully. The complete
+ * callback is only executed for the root notifier.
+ * @unbind: a subdevice is leaving
+ */
+struct v4l2_async_notifier_operations {
+ int (*bound)(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd);
+ int (*complete)(struct v4l2_async_notifier *notifier);
+ void (*unbind)(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd);
+};
+
+/**
* struct v4l2_async_notifier - v4l2_device notifier data
*
- * @num_subdevs: number of subdevices
+ * @ops: notifier operations
+ * @num_subdevs: number of subdevices used in the subdevs array
+ * @max_subdevs: number of subdevices allocated in the subdevs array
* @subdevs: array of pointers to subdevice descriptors
- * @v4l2_dev: pointer to struct v4l2_device
+ * @v4l2_dev: v4l2_device of the root notifier, NULL otherwise
+ * @sd: sub-device that registered the notifier, NULL otherwise
+ * @parent: parent notifier
* @waiting: list of struct v4l2_async_subdev, waiting for their drivers
* @done: list of struct v4l2_subdev, already probed
* @list: member in a global list of notifiers
- * @bound: a subdevice driver has successfully probed one of subdevices
- * @complete: all subdevices have been probed successfully
- * @unbind: a subdevice is leaving
*/
struct v4l2_async_notifier {
+ const struct v4l2_async_notifier_operations *ops;
unsigned int num_subdevs;
+ unsigned int max_subdevs;
struct v4l2_async_subdev **subdevs;
struct v4l2_device *v4l2_dev;
+ struct v4l2_subdev *sd;
+ struct v4l2_async_notifier *parent;
struct list_head waiting;
struct list_head done;
struct list_head list;
- int (*bound)(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
- int (*complete)(struct v4l2_async_notifier *notifier);
- void (*unbind)(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
};
/**
@@ -114,6 +133,16 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
struct v4l2_async_notifier *notifier);
/**
+ * v4l2_async_subdev_notifier_register - registers a subdevice asynchronous
+ * notifier for a sub-device
+ *
+ * @sd: pointer to &struct v4l2_subdev
+ * @notifier: pointer to &struct v4l2_async_notifier
+ */
+int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier);
+
+/**
* v4l2_async_notifier_unregister - unregisters a subdevice asynchronous notifier
*
* @notifier: pointer to &struct v4l2_async_notifier
@@ -121,6 +150,22 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier);
/**
+ * v4l2_async_notifier_cleanup - clean up notifier resources
+ * @notifier: the notifier the resources of which are to be cleaned up
+ *
+ * Release memory resources related to a notifier, including the async
+ * sub-devices allocated for the purposes of the notifier but not the notifier
+ * itself. The user is responsible for calling this function to clean up the
+ * notifier after calling @v4l2_async_notifier_parse_fwnode_endpoints or
+ * @v4l2_fwnode_reference_parse_sensor_common.
+ *
+ * There is no harm from calling v4l2_async_notifier_cleanup in other
+ * cases as long as its memory has been zeroed after it has been
+ * allocated.
+ */
+void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier);
+
+/**
* v4l2_async_register_subdev - registers a sub-device to the asynchronous
* subdevice framework
*
@@ -129,6 +174,28 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier);
int v4l2_async_register_subdev(struct v4l2_subdev *sd);
/**
+ * v4l2_async_register_subdev_sensor_common - registers a sensor sub-device to
+ * the asynchronous sub-device
+ * framework and parse set up common
+ * sensor related devices
+ *
+ * @sd: pointer to struct &v4l2_subdev
+ *
+ * This function is just like v4l2_async_register_subdev() with the exception
+ * that calling it will also parse firmware interfaces for remote references
+ * using v4l2_async_notifier_parse_fwnode_sensor_common() and registers the
+ * async sub-devices. The sub-device is similarly unregistered by calling
+ * v4l2_async_unregister_subdev().
+ *
+ * While registered, the subdev module is marked as in-use.
+ *
+ * An error is returned if the module is no longer loaded on any attempts
+ * to register it.
+ */
+int __must_check v4l2_async_register_subdev_sensor_common(
+ struct v4l2_subdev *sd);
+
+/**
* v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous
* subdevice framework
*
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 7adec9851d9e..b5b465677d28 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -25,6 +25,8 @@
#include <media/v4l2-mediabus.h>
struct fwnode_handle;
+struct v4l2_async_notifier;
+struct v4l2_async_subdev;
#define V4L2_FWNODE_CSI2_MAX_DATA_LANES 4
@@ -113,13 +115,237 @@ struct v4l2_fwnode_link {
unsigned int remote_port;
};
+/**
+ * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
+ * @fwnode: pointer to the endpoint's fwnode handle
+ * @vep: pointer to the V4L2 fwnode data structure
+ *
+ * All properties are optional. If none are found, we don't set any flags. This
+ * means the port has a static configuration and no properties have to be
+ * specified explicitly. If any properties that identify the bus as parallel
+ * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
+ * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
+ * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
+ * reference to @fwnode.
+ *
+ * NOTE: This function does not parse properties the size of which is variable
+ * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
+ * new drivers instead.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep);
+
+/**
+ * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by
+ * v4l2_fwnode_endpoint_alloc_parse()
+ * @vep: the V4L2 fwnode the resources of which are to be released
+ *
+ * It is safe to call this function with NULL argument or on a V4L2 fwnode the
+ * parsing of which failed.
+ */
+void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
+
+/**
+ * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties
+ * @fwnode: pointer to the endpoint's fwnode handle
+ *
+ * All properties are optional. If none are found, we don't set any flags. This
+ * means the port has a static configuration and no properties have to be
+ * specified explicitly. If any properties that identify the bus as parallel
+ * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
+ * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
+ * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
+ * reference to @fwnode.
+ *
+ * v4l2_fwnode_endpoint_alloc_parse() has two important differences to
+ * v4l2_fwnode_endpoint_parse():
+ *
+ * 1. It also parses variable size data.
+ *
+ * 2. The memory it has allocated to store the variable size data must be freed
+ * using v4l2_fwnode_endpoint_free() when no longer needed.
+ *
+ * Return: Pointer to v4l2_fwnode_endpoint if successful, on an error pointer
+ * on error.
+ */
struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
struct fwnode_handle *fwnode);
-void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
+
+/**
+ * v4l2_fwnode_parse_link() - parse a link between two endpoints
+ * @fwnode: pointer to the endpoint's fwnode at the local end of the link
+ * @link: pointer to the V4L2 fwnode link data structure
+ *
+ * Fill the link structure with the local and remote nodes and port numbers.
+ * The local_node and remote_node fields are set to point to the local and
+ * remote port's parent nodes respectively (the port parent node being the
+ * parent node of the port node if that node isn't a 'ports' node, or the
+ * grand-parent node of the port node otherwise).
+ *
+ * A reference is taken to both the local and remote nodes, the caller must use
+ * v4l2_fwnode_put_link() to drop the references when done with the
+ * link.
+ *
+ * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be
+ * found.
+ */
int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
struct v4l2_fwnode_link *link);
+
+/**
+ * v4l2_fwnode_put_link() - drop references to nodes in a link
+ * @link: pointer to the V4L2 fwnode link data structure
+ *
+ * Drop references to the local and remote nodes in the link. This function
+ * must be called on every link parsed with v4l2_fwnode_parse_link().
+ */
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link);
+
+/**
+ * typedef parse_endpoint_func - Driver's callback function to be called on
+ * each V4L2 fwnode endpoint.
+ *
+ * @dev: pointer to &struct device
+ * @vep: pointer to &struct v4l2_fwnode_endpoint
+ * @asd: pointer to &struct v4l2_async_subdev
+ *
+ * Return:
+ * * %0 on success
+ * * %-ENOTCONN if the endpoint is to be skipped but this
+ * should not be considered as an error
+ * * %-EINVAL if the endpoint configuration is invalid
+ */
+typedef int (*parse_endpoint_func)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd);
+
+
+/**
+ * v4l2_async_notifier_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
+ * device node
+ * @dev: the device the endpoints of which are to be parsed
+ * @notifier: notifier for @dev
+ * @asd_struct_size: size of the driver's async sub-device struct, including
+ * sizeof(struct v4l2_async_subdev). The &struct
+ * v4l2_async_subdev shall be the first member of
+ * the driver's async sub-device struct, i.e. both
+ * begin at the same memory address.
+ * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
+ * endpoint. Optional.
+ *
+ * Parse the fwnode endpoints of the @dev device and populate the async sub-
+ * devices array of the notifier. The @parse_endpoint callback function is
+ * called for each endpoint with the corresponding async sub-device pointer to
+ * let the caller initialize the driver-specific part of the async sub-device
+ * structure.
+ *
+ * The notifier memory shall be zeroed before this function is called on the
+ * notifier.
+ *
+ * This function may not be called on a registered notifier and may be called on
+ * a notifier only once.
+ *
+ * Do not change the notifier's subdevs array, take references to the subdevs
+ * array itself or change the notifier's num_subdevs field. This is because this
+ * function allocates and reallocates the subdevs array based on parsing
+ * endpoints.
+ *
+ * The &struct v4l2_fwnode_endpoint passed to the callback function
+ * @parse_endpoint is released once the function is finished. If there is a need
+ * to retain that configuration, the user needs to allocate memory for it.
+ *
+ * Any notifier populated using this function must be released with a call to
+ * v4l2_async_notifier_cleanup() after it has been unregistered and the async
+ * sub-devices are no longer in use, even if the function returned an error.
+ *
+ * Return: %0 on success, including when no async sub-devices are found
+ * %-ENOMEM if memory allocation failed
+ * %-EINVAL if graph or endpoint parsing failed
+ * Other error codes as returned by @parse_endpoint
+ */
+int v4l2_async_notifier_parse_fwnode_endpoints(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint);
+
+/**
+ * v4l2_async_notifier_parse_fwnode_endpoints_by_port - Parse V4L2 fwnode
+ * endpoints of a port in a
+ * device node
+ * @dev: the device the endpoints of which are to be parsed
+ * @notifier: notifier for @dev
+ * @asd_struct_size: size of the driver's async sub-device struct, including
+ * sizeof(struct v4l2_async_subdev). The &struct
+ * v4l2_async_subdev shall be the first member of
+ * the driver's async sub-device struct, i.e. both
+ * begin at the same memory address.
+ * @port: port number where endpoints are to be parsed
+ * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
+ * endpoint. Optional.
+ *
+ * This function is just like v4l2_async_notifier_parse_fwnode_endpoints() with
+ * the exception that it only parses endpoints in a given port. This is useful
+ * on devices that have both sinks and sources: the async sub-devices connected
+ * to sources have already been configured by another driver (on capture
+ * devices). In this case the driver must know which ports to parse.
+ *
+ * Parse the fwnode endpoints of the @dev device on a given @port and populate
+ * the async sub-devices array of the notifier. The @parse_endpoint callback
+ * function is called for each endpoint with the corresponding async sub-device
+ * pointer to let the caller initialize the driver-specific part of the async
+ * sub-device structure.
+ *
+ * The notifier memory shall be zeroed before this function is called on the
+ * notifier the first time.
+ *
+ * This function may not be called on a registered notifier and may be called on
+ * a notifier only once per port.
+ *
+ * Do not change the notifier's subdevs array, take references to the subdevs
+ * array itself or change the notifier's num_subdevs field. This is because this
+ * function allocates and reallocates the subdevs array based on parsing
+ * endpoints.
+ *
+ * The &struct v4l2_fwnode_endpoint passed to the callback function
+ * @parse_endpoint is released once the function is finished. If there is a need
+ * to retain that configuration, the user needs to allocate memory for it.
+ *
+ * Any notifier populated using this function must be released with a call to
+ * v4l2_async_notifier_cleanup() after it has been unregistered and the async
+ * sub-devices are no longer in use, even if the function returned an error.
+ *
+ * Return: %0 on success, including when no async sub-devices are found
+ * %-ENOMEM if memory allocation failed
+ * %-EINVAL if graph or endpoint parsing failed
+ * Other error codes as returned by @parse_endpoint
+ */
+int v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size, unsigned int port,
+ parse_endpoint_func parse_endpoint);
+
+/**
+ * v4l2_fwnode_reference_parse_sensor_common - parse common references on
+ * sensors for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ *
+ * Parse common sensor properties for remote devices related to the
+ * sensor and set up async sub-devices for them.
+ *
+ * Any notifier populated using this function must be released with a call to
+ * v4l2_async_notifier_release() after it has been unregistered and the async
+ * sub-devices are no longer in use, even in the case the function returned an
+ * error.
+ *
+ * Return: 0 on success
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+int v4l2_async_notifier_parse_fwnode_sensor_common(
+ struct device *dev, struct v4l2_async_notifier *notifier);
+
#endif /* _V4L2_FWNODE_H */
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index e83872078376..ec399c770301 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -793,6 +793,8 @@ struct v4l2_subdev_platform_data {
* list.
* @asd: Pointer to respective &struct v4l2_async_subdev.
* @notifier: Pointer to the managing notifier.
+ * @subdev_notifier: A sub-device notifier implicitly registered for the sub-
+ * device using v4l2_device_register_sensor_subdev().
* @pdata: common part of subdevice platform data
*
* Each instance of a subdev driver should create this struct, either
@@ -823,6 +825,7 @@ struct v4l2_subdev {
struct list_head async_list;
struct v4l2_async_subdev *asd;
struct v4l2_async_notifier *notifier;
+ struct v4l2_async_notifier *subdev_notifier;
struct v4l2_subdev_platform_data *pdata;
};
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 2135c9ba6ac3..39efb968b7a4 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -17,7 +17,6 @@
#define _INET_SOCK_H
#include <linux/bitops.h>
-#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jhash.h>
@@ -84,7 +83,6 @@ struct inet_request_sock {
#define ireq_state req.__req_common.skc_state
#define ireq_family req.__req_common.skc_family
- kmemcheck_bitfield_begin(flags);
u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
@@ -94,7 +92,6 @@ struct inet_request_sock {
acked : 1,
no_srccheck: 1,
smc_ok : 1;
- kmemcheck_bitfield_end(flags);
u32 ir_mark;
union {
struct ip_options_rcu __rcu *ireq_opt;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 6a75d67a30fd..1356fa6a7566 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -15,8 +15,6 @@
#ifndef _INET_TIMEWAIT_SOCK_
#define _INET_TIMEWAIT_SOCK_
-
-#include <linux/kmemcheck.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/types.h>
@@ -69,14 +67,12 @@ struct inet_timewait_sock {
/* Socket demultiplex comparisons on incoming packets. */
/* these three are in inet_sock */
__be16 tw_sport;
- kmemcheck_bitfield_begin(flags);
/* And these are ours. */
unsigned int tw_kill : 1,
tw_transparent : 1,
tw_flowlabel : 20,
tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
- kmemcheck_bitfield_end(flags);
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
};
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 5d08c1950e7d..ff68cf288f9b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -984,12 +984,12 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
{
- return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
+ return READ_ONCE(ipvs->sysctl_sync_threshold[1]);
}
static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
{
- return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
+ return READ_ONCE(ipvs->sysctl_sync_refresh_period);
}
static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
@@ -1014,7 +1014,7 @@ static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
{
- return ACCESS_ONCE(ipvs->sysctl_sync_ports);
+ return READ_ONCE(ipvs->sysctl_sync_ports);
}
static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 792c3f6d30ce..f5223bf2c420 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -285,7 +285,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
struct kernel_param;
-int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
int nf_conntrack_hash_resize(unsigned int hashsize);
extern struct hlist_nulls_head *nf_conntrack_hash;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 01570a8f2982..fecc6112c768 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1170,8 +1170,8 @@ static inline u8 nft_genmask_next(const struct net *net)
static inline u8 nft_genmask_cur(const struct net *net)
{
- /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */
- return 1 << ACCESS_ONCE(net->nft.gencursor);
+ /* Use READ_ONCE() to prevent refetching the value for atomicity */
+ return 1 << READ_ONCE(net->nft.gencursor);
}
#define NFT_GENMASK_ANY ((1 << 0) | (1 << 1))
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 5e12975fc658..44668c29701a 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -160,6 +160,7 @@ struct netns_ipv4 {
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
int sysctl_tcp_fastopen;
+ const struct tcp_congestion_ops __rcu *tcp_congestion_control;
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
spinlock_t tcp_fastopen_ctx_lock;
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
diff --git a/include/net/sock.h b/include/net/sock.h
index 688a823dccc3..79e1a2c7912c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -267,6 +267,7 @@ struct sock_common {
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
* @sk_gso_max_segs: Maximum number of GSO segments
+ * @sk_pacing_shift: scaling factor for TCP Small Queues
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -439,7 +440,6 @@ struct sock {
#define SK_FL_TYPE_MASK 0xffff0000
#endif
- kmemcheck_bitfield_begin(flags);
unsigned int sk_padding : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
@@ -448,9 +448,8 @@ struct sock {
sk_protocol : 8,
sk_type : 16;
#define SK_PROTOCOL_MAX U8_MAX
- kmemcheck_bitfield_end(flags);
-
u16 sk_gso_max_segs;
+ u8 sk_pacing_shift;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
@@ -1112,7 +1111,7 @@ struct proto {
struct kmem_cache *slab;
unsigned int obj_size;
- int slab_flags;
+ slab_flags_t slab_flags;
struct percpu_counter *orphan_count;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ed71511e67a6..85ea578195d4 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1002,8 +1002,8 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
void tcp_assign_congestion_control(struct sock *sk);
void tcp_init_congestion_control(struct sock *sk);
void tcp_cleanup_congestion_control(struct sock *sk);
-int tcp_set_default_congestion_control(const char *name);
-void tcp_get_default_congestion_control(char *name);
+int tcp_set_default_congestion_control(struct net *net, const char *name);
+void tcp_get_default_congestion_control(struct net *net, char *name);
void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
@@ -1017,7 +1017,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
+u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
char *tcp_ca_get_name_by_key(u32 key, char *buffer);
#else
@@ -1630,9 +1630,6 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
{
if (tcp_write_queue_empty(sk))
tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
-
- if (tcp_sk(sk)->highest_sack == skb_unlinked)
- tcp_sk(sk)->highest_sack = NULL;
}
static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
@@ -1645,12 +1642,8 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb
__tcp_add_write_queue_tail(sk, skb);
/* Queue it, remembering where we must start sending. */
- if (sk->sk_write_queue.next == skb) {
+ if (sk->sk_write_queue.next == skb)
tcp_chrono_start(sk, TCP_CHRONO_BUSY);
-
- if (tcp_sk(sk)->highest_sack == NULL)
- tcp_sk(sk)->highest_sack = skb;
- }
}
/* Insert new before skb on the write queue of sk. */
@@ -1708,9 +1701,7 @@ static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
{
- struct sk_buff *next = skb_rb_next(skb);
-
- tcp_sk(sk)->highest_sack = next ?: tcp_send_head(sk);
+ tcp_sk(sk)->highest_sack = skb_rb_next(skb);
}
static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
@@ -1720,9 +1711,7 @@ static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
static inline void tcp_highest_sack_reset(struct sock *sk)
{
- struct sk_buff *skb = tcp_rtx_queue_head(sk);
-
- tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk);
+ tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
}
/* Called when old skb is about to be deleted and replaced by new skb */
diff --git a/include/net/tls.h b/include/net/tls.h
index b89d397dd62f..936cfc5cab7d 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -35,6 +35,10 @@
#define _TLS_OFFLOAD_H
#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/socket.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
#include <uapi/linux/tls.h>
@@ -83,6 +87,8 @@ struct tls_context {
void *priv_ctx;
+ u8 tx_conf:2;
+
u16 prepend_size;
u16 tag_size;
u16 overhead_size;
@@ -97,7 +103,6 @@ struct tls_context {
u16 pending_open_record_frags;
int (*push_pending_record)(struct sock *sk, int flags);
- void (*free_resources)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
@@ -122,6 +127,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
void tls_sw_close(struct sock *sk, long timeout);
+void tls_sw_free_tx_resources(struct sock *sk);
void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
void tls_icsk_clean_acked(struct sock *sk);
@@ -212,6 +218,21 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
}
+static inline void tls_make_aad(char *buf,
+ size_t size,
+ char *record_sequence,
+ int record_sequence_size,
+ unsigned char record_type)
+{
+ memcpy(buf, record_sequence, record_sequence_size);
+
+ buf[8] = record_type;
+ buf[9] = TLS_1_2_VERSION_MAJOR;
+ buf[10] = TLS_1_2_VERSION_MINOR;
+ buf[11] = size >> 8;
+ buf[12] = size & 0xFF;
+}
+
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ec5008cf5d51..18c564f60e93 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -125,8 +125,9 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
void rdma_addr_cancel(struct rdma_dev_addr *addr);
-int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
- const unsigned char *dst_dev_addr);
+void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev,
+ const unsigned char *dst_dev_addr);
int rdma_addr_size(struct sockaddr *addr);
@@ -245,10 +246,11 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g
static inline enum ib_mtu iboe_get_mtu(int mtu)
{
/*
- * reduce IB headers from effective IBoE MTU. 28 stands for
- * atomic header which is the biggest possible header after BTH
+ * Reduce IB headers from effective IBoE MTU.
*/
- mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28;
+ mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES +
+ IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES +
+ IB_ICRC_BYTES);
if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096))
return IB_MTU_4096;
@@ -305,12 +307,12 @@ static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac)
static inline int rdma_is_multicast_addr(struct in6_addr *addr)
{
- u32 ipv4_addr;
+ __be32 ipv4_addr;
if (addr->s6_addr[0] == 0xff)
return 1;
- memcpy(&ipv4_addr, addr->s6_addr + 12, 4);
+ ipv4_addr = addr->s6_addr32[3];
return (ipv6_addr_v4mapped(addr) && ipv4_is_multicast(ipv4_addr));
}
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 36655899ee02..7ea1382ad0e5 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -37,14 +37,17 @@
#include <uapi/linux/if_ether.h>
enum {
- IB_LRH_BYTES = 8,
- IB_ETH_BYTES = 14,
- IB_VLAN_BYTES = 4,
- IB_GRH_BYTES = 40,
- IB_IP4_BYTES = 20,
- IB_UDP_BYTES = 8,
- IB_BTH_BYTES = 12,
- IB_DETH_BYTES = 8
+ IB_LRH_BYTES = 8,
+ IB_ETH_BYTES = 14,
+ IB_VLAN_BYTES = 4,
+ IB_GRH_BYTES = 40,
+ IB_IP4_BYTES = 20,
+ IB_UDP_BYTES = 8,
+ IB_BTH_BYTES = 12,
+ IB_DETH_BYTES = 8,
+ IB_EXT_ATOMICETH_BYTES = 28,
+ IB_EXT_XRC_BYTES = 4,
+ IB_ICRC_BYTES = 4
};
struct ib_field {
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 355b81f4242d..1f7f604db5aa 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -590,20 +590,20 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
}
-static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
+static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
- rec->ib.slid = htons(ntohl(slid));
+ rec->ib.slid = cpu_to_be16(slid);
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
- rec->opa.slid = slid;
+ rec->opa.slid = cpu_to_be32(slid);
}
-static inline void sa_path_set_dlid(struct sa_path_rec *rec, __be32 dlid)
+static inline void sa_path_set_dlid(struct sa_path_rec *rec, u32 dlid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
- rec->ib.dlid = htons(ntohl(dlid));
+ rec->ib.dlid = cpu_to_be16(dlid);
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
- rec->opa.dlid = dlid;
+ rec->opa.dlid = cpu_to_be32(dlid);
}
static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 5eb7f5bc8248..6a17f856f841 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -111,10 +111,6 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
u64 bound);
-void rbt_ib_umem_insert(struct umem_odp_node *node,
- struct rb_root_cached *root);
-void rbt_ib_umem_remove(struct umem_odp_node *node,
- struct rb_root_cached *root);
typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
void *cookie);
/*
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e8608b2dc844..fd84cda5ed7c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -229,6 +229,8 @@ enum ib_device_cap_flags {
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
+ /* The device supports padding incoming writes to cacheline. */
+ IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
};
enum ib_signature_prot_cap {
@@ -309,6 +311,15 @@ struct ib_cq_init_attr {
u32 flags;
};
+enum ib_cq_attr_mask {
+ IB_CQ_MODERATE = 1 << 0,
+};
+
+struct ib_cq_caps {
+ u16 max_cq_moderation_count;
+ u16 max_cq_moderation_period;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -359,6 +370,7 @@ struct ib_device_attr {
u32 max_wq_type_rq;
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
struct ib_tm_caps tm_caps;
+ struct ib_cq_caps cq_caps;
};
enum ib_mtu {
@@ -1098,6 +1110,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
IB_QP_CREATE_SOURCE_QPN = 1 << 10,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1621,6 +1634,7 @@ enum ib_wq_flags {
IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
+ IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
};
struct ib_wq_init_attr {
@@ -2858,6 +2872,21 @@ void ib_dealloc_pd(struct ib_pd *pd);
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
/**
+ * rdma_create_user_ah - Creates an address handle for the given address vector.
+ * It resolves destination mac address for ah attribute of RoCE type.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ * @udata: pointer to user's input output buffer information need by
+ * provider driver.
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata);
+/**
* ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
* work completion.
* @hdr: the L3 header to parse
@@ -3140,13 +3169,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
int ib_resize_cq(struct ib_cq *cq, int cqe);
/**
- * ib_modify_cq - Modifies moderation params of the CQ
+ * rdma_set_cq_moderation - Modifies moderation params of the CQ
* @cq: The CQ to modify.
* @cq_count: number of CQEs that will trigger an event
* @cq_period: max period of time in usec before triggering an event
*
*/
-int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
/**
* ib_destroy_cq - Destroys the specified CQ.
@@ -3607,8 +3636,6 @@ void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
-int ib_resolve_eth_dmac(struct ib_device *device,
- struct rdma_ah_attr *ah_attr);
int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index e6e90f18e6d5..f68fca296631 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -97,15 +97,15 @@ static inline u32 opa_get_lid_from_gid(const union ib_gid *gid)
* @dlid: The DLID
* @slid: The SLID
*/
-static inline bool opa_is_extended_lid(u32 dlid, u32 slid)
+static inline bool opa_is_extended_lid(__be32 dlid, __be32 slid)
{
if ((be32_to_cpu(dlid) >=
be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
(be32_to_cpu(slid) >=
be16_to_cpu(IB_MULTICAST_LID_BASE)))
return true;
- else
- return false;
+
+ return false;
}
/* Get multicast lid base */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0eed3d8752fa..89ab88c342b6 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -282,7 +282,6 @@ struct rvt_qp {
u32 remote_qpn;
u32 qkey; /* QKEY for this QP (for UD or RD) */
u32 s_size; /* send work queue size */
- u32 s_ahgpsn; /* set to the psn in the copy of the header */
u16 pmtu; /* decoded from path_mtu */
u8 log_pmtu; /* shift for pmtu */
@@ -344,7 +343,6 @@ struct rvt_qp {
struct rvt_swqe *s_wqe;
struct rvt_sge_state s_sge; /* current send request data */
struct rvt_mregion *s_rdma_mr;
- u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
u32 s_last_psn; /* last response PSN processed */
@@ -358,8 +356,10 @@ struct rvt_qp {
u32 s_acked; /* last un-ACK'ed entry */
u32 s_last; /* last completed entry */
u32 s_lsn; /* limit sequence number (credit) */
- u16 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u32 s_ahgpsn; /* set to the psn in the copy of the header */
+ u16 s_cur_size; /* size of send packet in bytes */
u16 s_rdma_ack_cnt;
+ u8 s_hdrwords; /* size of s_hdr in 32 bit words */
s8 s_ahgidx;
u8 s_state; /* opcode of last packet sent */
u8 s_ack_state; /* opcode of packet to ACK */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 388aaf72b480..0f9cbf96c093 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -60,38 +60,32 @@ enum sas_phy_type {
* so when updating/adding events here, please also
* update the other file too.
*/
-enum ha_event {
- HAE_RESET = 0U,
- HA_NUM_EVENTS = 1,
-};
-
enum port_event {
PORTE_BYTES_DMAED = 0U,
- PORTE_BROADCAST_RCVD = 1,
- PORTE_LINK_RESET_ERR = 2,
- PORTE_TIMER_EVENT = 3,
- PORTE_HARD_RESET = 4,
- PORT_NUM_EVENTS = 5,
+ PORTE_BROADCAST_RCVD,
+ PORTE_LINK_RESET_ERR,
+ PORTE_TIMER_EVENT,
+ PORTE_HARD_RESET,
+ PORT_NUM_EVENTS,
};
enum phy_event {
PHYE_LOSS_OF_SIGNAL = 0U,
- PHYE_OOB_DONE = 1,
- PHYE_OOB_ERROR = 2,
- PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */
- PHYE_RESUME_TIMEOUT = 4,
- PHY_NUM_EVENTS = 5,
+ PHYE_OOB_DONE,
+ PHYE_OOB_ERROR,
+ PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
+ PHYE_RESUME_TIMEOUT,
+ PHY_NUM_EVENTS,
};
enum discover_event {
DISCE_DISCOVER_DOMAIN = 0U,
- DISCE_REVALIDATE_DOMAIN = 1,
- DISCE_PORT_GONE = 2,
- DISCE_PROBE = 3,
- DISCE_SUSPEND = 4,
- DISCE_RESUME = 5,
- DISCE_DESTRUCT = 6,
- DISC_NUM_EVENTS = 7,
+ DISCE_REVALIDATE_DOMAIN,
+ DISCE_PROBE,
+ DISCE_SUSPEND,
+ DISCE_RESUME,
+ DISCE_DESTRUCT,
+ DISC_NUM_EVENTS,
};
/* ---------- Expander Devices ---------- */
@@ -261,8 +255,6 @@ struct sas_discovery {
/* The port struct is Class:RW, driver:RO */
struct asd_sas_port {
/* private: */
- struct completion port_gone_completion;
-
struct sas_discovery disc;
struct domain_device *port_dev;
spinlock_t dev_list_lock;
@@ -362,18 +354,6 @@ struct scsi_core {
};
-struct sas_ha_event {
- struct sas_work work;
- struct sas_ha_struct *ha;
-};
-
-static inline struct sas_ha_event *to_sas_ha_event(struct work_struct *work)
-{
- struct sas_ha_event *ev = container_of(work, typeof(*ev), work.work);
-
- return ev;
-}
-
enum sas_ha_state {
SAS_HA_REGISTERED,
SAS_HA_DRAINING,
@@ -383,9 +363,6 @@ enum sas_ha_state {
struct sas_ha_struct {
/* private: */
- struct sas_ha_event ha_events[HA_NUM_EVENTS];
- unsigned long pending;
-
struct list_head defer_q; /* work queued while draining */
struct mutex drain_mutex;
unsigned long state;
@@ -415,7 +392,6 @@ struct sas_ha_struct {
* their siblings when forming wide ports */
/* LLDD calls these to notify the class of an event. */
- int (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 571ddb49b926..1fb6ad3c5006 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -65,9 +65,10 @@ enum scsi_device_event {
SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */
+ SDEV_EVT_POWER_ON_RESET_OCCURRED, /* 29 00 UA reported */
SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
- SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,
+ SDEV_EVT_LAST = SDEV_EVT_POWER_ON_RESET_OCCURRED,
SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
};
@@ -221,6 +222,7 @@ struct scsi_device {
unsigned char access_state;
struct mutex state_mutex;
enum scsi_device_state sdev_state;
+ struct task_struct *quiesced_by;
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 3575693bb628..3cf125b56c3a 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -4,32 +4,57 @@
/*
* Flags for SCSI devices that need special treatment
*/
-#define BLIST_NOLUN 0x001 /* Only scan LUN 0 */
-#define BLIST_FORCELUN 0x002 /* Known to have LUNs, force scanning,
- deprecated: Use max_luns=N */
-#define BLIST_BORKEN 0x004 /* Flag for broken handshaking */
-#define BLIST_KEY 0x008 /* unlock by special command */
-#define BLIST_SINGLELUN 0x010 /* Do not use LUNs in parallel */
-#define BLIST_NOTQ 0x020 /* Buggy Tagged Command Queuing */
-#define BLIST_SPARSELUN 0x040 /* Non consecutive LUN numbering */
-#define BLIST_MAX5LUN 0x080 /* Avoid LUNS >= 5 */
-#define BLIST_ISROM 0x100 /* Treat as (removable) CD-ROM */
-#define BLIST_LARGELUN 0x200 /* LUNs past 7 on a SCSI-2 device */
-#define BLIST_INQUIRY_36 0x400 /* override additional length field */
-#define BLIST_NOSTARTONADD 0x1000 /* do not do automatic start on add */
-#define BLIST_REPORTLUN2 0x20000 /* try REPORT_LUNS even for SCSI-2 devs
- (if HBA supports more than 8 LUNs) */
-#define BLIST_NOREPORTLUN 0x40000 /* don't try REPORT_LUNS scan (SCSI-3 devs) */
-#define BLIST_NOT_LOCKABLE 0x80000 /* don't use PREVENT-ALLOW commands */
-#define BLIST_NO_ULD_ATTACH 0x100000 /* device is actually for RAID config */
-#define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */
-#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
-#define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
-#define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
-#define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */
-#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
-#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
-#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
-#define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */
+
+/* Only scan LUN 0 */
+#define BLIST_NOLUN ((__force __u32 __bitwise)(1 << 0))
+/* Known to have LUNs, force scanning.
+ * DEPRECATED: Use max_luns=N */
+#define BLIST_FORCELUN ((__force __u32 __bitwise)(1 << 1))
+/* Flag for broken handshaking */
+#define BLIST_BORKEN ((__force __u32 __bitwise)(1 << 2))
+/* unlock by special command */
+#define BLIST_KEY ((__force __u32 __bitwise)(1 << 3))
+/* Do not use LUNs in parallel */
+#define BLIST_SINGLELUN ((__force __u32 __bitwise)(1 << 4))
+/* Buggy Tagged Command Queuing */
+#define BLIST_NOTQ ((__force __u32 __bitwise)(1 << 5))
+/* Non consecutive LUN numbering */
+#define BLIST_SPARSELUN ((__force __u32 __bitwise)(1 << 6))
+/* Avoid LUNS >= 5 */
+#define BLIST_MAX5LUN ((__force __u32 __bitwise)(1 << 7))
+/* Treat as (removable) CD-ROM */
+#define BLIST_ISROM ((__force __u32 __bitwise)(1 << 8))
+/* LUNs past 7 on a SCSI-2 device */
+#define BLIST_LARGELUN ((__force __u32 __bitwise)(1 << 9))
+/* override additional length field */
+#define BLIST_INQUIRY_36 ((__force __u32 __bitwise)(1 << 10))
+/* do not do automatic start on add */
+#define BLIST_NOSTARTONADD ((__force __u32 __bitwise)(1 << 12))
+/* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
+#define BLIST_REPORTLUN2 ((__force __u32 __bitwise)(1 << 17))
+/* don't try REPORT_LUNS scan (SCSI-3 devs) */
+#define BLIST_NOREPORTLUN ((__force __u32 __bitwise)(1 << 18))
+/* don't use PREVENT-ALLOW commands */
+#define BLIST_NOT_LOCKABLE ((__force __u32 __bitwise)(1 << 19))
+/* device is actually for RAID config */
+#define BLIST_NO_ULD_ATTACH ((__force __u32 __bitwise)(1 << 20))
+/* select without ATN */
+#define BLIST_SELECT_NO_ATN ((__force __u32 __bitwise)(1 << 21))
+/* retry HARDWARE_ERROR */
+#define BLIST_RETRY_HWERROR ((__force __u32 __bitwise)(1 << 22))
+/* maximum 512 sector cdb length */
+#define BLIST_MAX_512 ((__force __u32 __bitwise)(1 << 23))
+/* Disable T10 PI (DIF) */
+#define BLIST_NO_DIF ((__force __u32 __bitwise)(1 << 25))
+/* Ignore SBC-3 VPD pages */
+#define BLIST_SKIP_VPD_PAGES ((__force __u32 __bitwise)(1 << 26))
+/* Attempt to read VPD pages */
+#define BLIST_TRY_VPD_PAGES ((__force __u32 __bitwise)(1 << 28))
+/* don't try to issue RSOC */
+#define BLIST_NO_RSOC ((__force __u32 __bitwise)(1 << 29))
+/* maximum 1024 sector cdb length */
+#define BLIST_MAX_1024 ((__force __u32 __bitwise)(1 << 30))
+/* Use UNMAP limit for WRITE SAME */
+#define BLIST_UNMAP_LIMIT_WS ((__force __u32 __bitwise)(1 << 31))
#endif
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 1c41dbcfcb35..1df8efb0ee01 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -302,19 +302,42 @@ struct scsi_lun {
/* Reporting options for REPORT ZONES */
enum zbc_zone_reporting_options {
- ZBC_ZONE_REPORTING_OPTION_ALL = 0,
- ZBC_ZONE_REPORTING_OPTION_EMPTY,
- ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN,
- ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN,
- ZBC_ZONE_REPORTING_OPTION_CLOSED,
- ZBC_ZONE_REPORTING_OPTION_FULL,
- ZBC_ZONE_REPORTING_OPTION_READONLY,
- ZBC_ZONE_REPORTING_OPTION_OFFLINE,
- ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10,
- ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE,
- ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f,
+ ZBC_ZONE_REPORTING_OPTION_ALL = 0x00,
+ ZBC_ZONE_REPORTING_OPTION_EMPTY = 0x01,
+ ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN = 0x02,
+ ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN = 0x03,
+ ZBC_ZONE_REPORTING_OPTION_CLOSED = 0x04,
+ ZBC_ZONE_REPORTING_OPTION_FULL = 0x05,
+ ZBC_ZONE_REPORTING_OPTION_READONLY = 0x06,
+ ZBC_ZONE_REPORTING_OPTION_OFFLINE = 0x07,
+ /* 0x08 to 0x0f are reserved */
+ ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10,
+ ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE = 0x11,
+ /* 0x12 to 0x3e are reserved */
+ ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f,
};
#define ZBC_REPORT_ZONE_PARTIAL 0x80
+/* Zone types of REPORT ZONES zone descriptors */
+enum zbc_zone_type {
+ ZBC_ZONE_TYPE_CONV = 0x1,
+ ZBC_ZONE_TYPE_SEQWRITE_REQ = 0x2,
+ ZBC_ZONE_TYPE_SEQWRITE_PREF = 0x3,
+ /* 0x4 to 0xf are reserved */
+};
+
+/* Zone conditions of REPORT ZONES zone descriptors */
+enum zbc_zone_cond {
+ ZBC_ZONE_COND_NO_WP = 0x0,
+ ZBC_ZONE_COND_EMPTY = 0x1,
+ ZBC_ZONE_COND_IMP_OPEN = 0x2,
+ ZBC_ZONE_COND_EXP_OPEN = 0x3,
+ ZBC_ZONE_COND_CLOSED = 0x4,
+ /* 0x5 to 0xc are reserved */
+ ZBC_ZONE_COND_READONLY = 0xd,
+ ZBC_ZONE_COND_FULL = 0xe,
+ ZBC_ZONE_COND_OFFLINE = 0xf,
+};
+
#endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index e8644eea9fe5..8cf30215c177 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -139,6 +139,8 @@ enum fc_vport_state {
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
#define FC_PORTSPEED_25GBIT 0x800
+#define FC_PORTSPEED_64BIT 0x1000
+#define FC_PORTSPEED_128BIT 0x2000
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
new file mode 100644
index 000000000000..ec04be9ab119
--- /dev/null
+++ b/include/sound/ac97/codec.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SOUND_AC97_CODEC2_H
+#define __SOUND_AC97_CODEC2_H
+
+#include <linux/device.h>
+
+#define AC97_ID(vendor_id1, vendor_id2) \
+ ((((vendor_id1) & 0xffff) << 16) | ((vendor_id2) & 0xffff))
+#define AC97_DRIVER_ID(vendor_id1, vendor_id2, mask_id1, mask_id2, _data) \
+ { .id = (((vendor_id1) & 0xffff) << 16) | ((vendor_id2) & 0xffff), \
+ .mask = (((mask_id1) & 0xffff) << 16) | ((mask_id2) & 0xffff), \
+ .data = (_data) }
+
+struct ac97_controller;
+struct clk;
+
+/**
+ * struct ac97_id - matches a codec device and driver on an ac97 bus
+ * @id: The significant bits if the codec vendor ID1 and ID2
+ * @mask: Bitmask specifying which bits of the id field are significant when
+ * matching. A driver binds to a device when :
+ * ((vendorID1 << 8 | vendorID2) & (mask_id1 << 8 | mask_id2)) == id.
+ * @data: Private data used by the driver.
+ */
+struct ac97_id {
+ unsigned int id;
+ unsigned int mask;
+ void *data;
+};
+
+/**
+ * ac97_codec_device - a ac97 codec
+ * @dev: the core device
+ * @vendor_id: the vendor_id of the codec, as sensed on the AC-link
+ * @num: the codec number, 0 is primary, 1 is first slave, etc ...
+ * @clk: the clock BIT_CLK provided by the codec
+ * @ac97_ctrl: ac97 digital controller on the same AC-link
+ *
+ * This is the device instantiated for each codec living on a AC-link. There are
+ * normally 0 to 4 codec devices per AC-link, and all of them are controlled by
+ * an AC97 digital controller.
+ */
+struct ac97_codec_device {
+ struct device dev;
+ unsigned int vendor_id;
+ unsigned int num;
+ struct clk *clk;
+ struct ac97_controller *ac97_ctrl;
+};
+
+/**
+ * ac97_codec_driver - a ac97 codec driver
+ * @driver: the device driver structure
+ * @probe: the function called when a ac97_codec_device is matched
+ * @remove: the function called when the device is unbound/removed
+ * @shutdown: shutdown function (might be NULL)
+ * @id_table: ac97 vendor_id match table, { } member terminated
+ */
+struct ac97_codec_driver {
+ struct device_driver driver;
+ int (*probe)(struct ac97_codec_device *);
+ int (*remove)(struct ac97_codec_device *);
+ void (*shutdown)(struct ac97_codec_device *);
+ const struct ac97_id *id_table;
+};
+
+static inline struct ac97_codec_device *to_ac97_device(struct device *d)
+{
+ return container_of(d, struct ac97_codec_device, dev);
+}
+
+static inline struct ac97_codec_driver *to_ac97_driver(struct device_driver *d)
+{
+ return container_of(d, struct ac97_codec_driver, driver);
+}
+
+#if IS_ENABLED(CONFIG_AC97_BUS_NEW)
+int snd_ac97_codec_driver_register(struct ac97_codec_driver *drv);
+void snd_ac97_codec_driver_unregister(struct ac97_codec_driver *drv);
+#else
+static inline int
+snd_ac97_codec_driver_register(struct ac97_codec_driver *drv)
+{
+ return 0;
+}
+static inline void
+snd_ac97_codec_driver_unregister(struct ac97_codec_driver *drv)
+{
+}
+#endif
+
+
+static inline struct device *
+ac97_codec_dev2dev(struct ac97_codec_device *adev)
+{
+ return &adev->dev;
+}
+
+static inline void *ac97_get_drvdata(struct ac97_codec_device *adev)
+{
+ return dev_get_drvdata(ac97_codec_dev2dev(adev));
+}
+
+static inline void ac97_set_drvdata(struct ac97_codec_device *adev,
+ void *data)
+{
+ dev_set_drvdata(ac97_codec_dev2dev(adev), data);
+}
+
+void *snd_ac97_codec_get_platdata(const struct ac97_codec_device *adev);
+
+#endif
diff --git a/include/sound/ac97/compat.h b/include/sound/ac97/compat.h
new file mode 100644
index 000000000000..1351cba40048
--- /dev/null
+++ b/include/sound/ac97/compat.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file is for backward compatibility with snd_ac97 structure and its
+ * multiple usages, such as the snd_ac97_bus and snd_ac97_build_ops.
+ *
+ */
+#ifndef AC97_COMPAT_H
+#define AC97_COMPAT_H
+
+#include <sound/ac97_codec.h>
+
+struct snd_ac97 *snd_ac97_compat_alloc(struct ac97_codec_device *adev);
+void snd_ac97_compat_release(struct snd_ac97 *ac97);
+
+#endif
diff --git a/include/sound/ac97/controller.h b/include/sound/ac97/controller.h
new file mode 100644
index 000000000000..b36ecdd64f14
--- /dev/null
+++ b/include/sound/ac97/controller.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef AC97_CONTROLLER_H
+#define AC97_CONTROLLER_H
+
+#include <linux/device.h>
+#include <linux/list.h>
+
+#define AC97_BUS_MAX_CODECS 4
+#define AC97_SLOTS_AVAILABLE_ALL 0xf
+
+struct ac97_controller_ops;
+
+/**
+ * struct ac97_controller - The AC97 controller of the AC-Link
+ * @ops: the AC97 operations.
+ * @controllers: linked list of all existing controllers.
+ * @adap: the shell device ac97-%d, ie. ac97 adapter
+ * @nr: the number of the shell device
+ * @slots_available: the mask of accessible/scanable codecs.
+ * @parent: the device providing the AC97 controller.
+ * @codecs: the 4 possible AC97 codecs (NULL if none found).
+ * @codecs_pdata: platform_data for each codec (NULL if no pdata).
+ *
+ * This structure is internal to AC97 bus, and should not be used by the
+ * controllers themselves, excepting for using @dev.
+ */
+struct ac97_controller {
+ const struct ac97_controller_ops *ops;
+ struct list_head controllers;
+ struct device adap;
+ int nr;
+ unsigned short slots_available;
+ struct device *parent;
+ struct ac97_codec_device *codecs[AC97_BUS_MAX_CODECS];
+ void *codecs_pdata[AC97_BUS_MAX_CODECS];
+};
+
+/**
+ * struct ac97_controller_ops - The AC97 operations
+ * @reset: Cold reset of the AC97 AC-Link.
+ * @warm_reset: Warm reset of the AC97 AC-Link.
+ * @read: Read of a single AC97 register.
+ * Returns the register value or a negative error code.
+ * @write: Write of a single AC97 register.
+ *
+ * These are the basic operation an AC97 controller must provide for an AC97
+ * access functions. Amongst these, all but the last 2 are mandatory.
+ * The slot number is also known as the AC97 codec number, between 0 and 3.
+ */
+struct ac97_controller_ops {
+ void (*reset)(struct ac97_controller *adrv);
+ void (*warm_reset)(struct ac97_controller *adrv);
+ int (*write)(struct ac97_controller *adrv, int slot,
+ unsigned short reg, unsigned short val);
+ int (*read)(struct ac97_controller *adrv, int slot, unsigned short reg);
+};
+
+#if IS_ENABLED(CONFIG_AC97_BUS_NEW)
+struct ac97_controller *snd_ac97_controller_register(
+ const struct ac97_controller_ops *ops, struct device *dev,
+ unsigned short slots_available, void **codecs_pdata);
+void snd_ac97_controller_unregister(struct ac97_controller *ac97_ctrl);
+#else
+static inline struct ac97_controller *
+snd_ac97_controller_register(const struct ac97_controller_ops *ops,
+ struct device *dev,
+ unsigned short slots_available,
+ void **codecs_pdata)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void
+snd_ac97_controller_unregister(struct ac97_controller *ac97_ctrl)
+{
+}
+#endif
+
+#endif
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
new file mode 100644
index 000000000000..4bb86d379bd5
--- /dev/null
+++ b/include/sound/ac97/regs.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
+ * Universal interface for Audio Codec '97
+ *
+ * For more details look to AC '97 component specification revision 2.1
+ * by Intel Corporation (http://developer.intel.com).
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+ * AC'97 codec registers
+ */
+
+#define AC97_RESET 0x00 /* Reset */
+#define AC97_MASTER 0x02 /* Master Volume */
+#define AC97_HEADPHONE 0x04 /* Headphone Volume (optional) */
+#define AC97_MASTER_MONO 0x06 /* Master Volume Mono (optional) */
+#define AC97_MASTER_TONE 0x08 /* Master Tone (Bass & Treble) (optional) */
+#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optinal) */
+#define AC97_PHONE 0x0c /* Phone Volume (optional) */
+#define AC97_MIC 0x0e /* MIC Volume */
+#define AC97_LINE 0x10 /* Line In Volume */
+#define AC97_CD 0x12 /* CD Volume */
+#define AC97_VIDEO 0x14 /* Video Volume (optional) */
+#define AC97_AUX 0x16 /* AUX Volume (optional) */
+#define AC97_PCM 0x18 /* PCM Volume */
+#define AC97_REC_SEL 0x1a /* Record Select */
+#define AC97_REC_GAIN 0x1c /* Record Gain */
+#define AC97_REC_GAIN_MIC 0x1e /* Record Gain MIC (optional) */
+#define AC97_GENERAL_PURPOSE 0x20 /* General Purpose (optional) */
+#define AC97_3D_CONTROL 0x22 /* 3D Control (optional) */
+#define AC97_INT_PAGING 0x24 /* Audio Interrupt & Paging (AC'97 2.3) */
+#define AC97_POWERDOWN 0x26 /* Powerdown control / status */
+/* range 0x28-0x3a - AUDIO AC'97 2.0 extensions */
+#define AC97_EXTENDED_ID 0x28 /* Extended Audio ID */
+#define AC97_EXTENDED_STATUS 0x2a /* Extended Audio Status and Control */
+#define AC97_PCM_FRONT_DAC_RATE 0x2c /* PCM Front DAC Rate */
+#define AC97_PCM_SURR_DAC_RATE 0x2e /* PCM Surround DAC Rate */
+#define AC97_PCM_LFE_DAC_RATE 0x30 /* PCM LFE DAC Rate */
+#define AC97_PCM_LR_ADC_RATE 0x32 /* PCM LR ADC Rate */
+#define AC97_PCM_MIC_ADC_RATE 0x34 /* PCM MIC ADC Rate */
+#define AC97_CENTER_LFE_MASTER 0x36 /* Center + LFE Master Volume */
+#define AC97_SURROUND_MASTER 0x38 /* Surround (Rear) Master Volume */
+#define AC97_SPDIF 0x3a /* S/PDIF control */
+/* range 0x3c-0x58 - MODEM */
+#define AC97_EXTENDED_MID 0x3c /* Extended Modem ID */
+#define AC97_EXTENDED_MSTATUS 0x3e /* Extended Modem Status and Control */
+#define AC97_LINE1_RATE 0x40 /* Line1 DAC/ADC Rate */
+#define AC97_LINE2_RATE 0x42 /* Line2 DAC/ADC Rate */
+#define AC97_HANDSET_RATE 0x44 /* Handset DAC/ADC Rate */
+#define AC97_LINE1_LEVEL 0x46 /* Line1 DAC/ADC Level */
+#define AC97_LINE2_LEVEL 0x48 /* Line2 DAC/ADC Level */
+#define AC97_HANDSET_LEVEL 0x4a /* Handset DAC/ADC Level */
+#define AC97_GPIO_CFG 0x4c /* GPIO Configuration */
+#define AC97_GPIO_POLARITY 0x4e /* GPIO Pin Polarity/Type, 0=low, 1=high active */
+#define AC97_GPIO_STICKY 0x50 /* GPIO Pin Sticky, 0=not, 1=sticky */
+#define AC97_GPIO_WAKEUP 0x52 /* GPIO Pin Wakeup, 0=no int, 1=yes int */
+#define AC97_GPIO_STATUS 0x54 /* GPIO Pin Status, slot 12 */
+#define AC97_MISC_AFE 0x56 /* Miscellaneous Modem AFE Status and Control */
+/* range 0x5a-0x7b - Vendor Specific */
+#define AC97_VENDOR_ID1 0x7c /* Vendor ID1 */
+#define AC97_VENDOR_ID2 0x7e /* Vendor ID2 / revision */
+/* range 0x60-0x6f (page 1) - extended codec registers */
+#define AC97_CODEC_CLASS_REV 0x60 /* Codec Class/Revision */
+#define AC97_PCI_SVID 0x62 /* PCI Subsystem Vendor ID */
+#define AC97_PCI_SID 0x64 /* PCI Subsystem ID */
+#define AC97_FUNC_SELECT 0x66 /* Function Select */
+#define AC97_FUNC_INFO 0x68 /* Function Information */
+#define AC97_SENSE_INFO 0x6a /* Sense Details */
+
+/* volume controls */
+#define AC97_MUTE_MASK_MONO 0x8000
+#define AC97_MUTE_MASK_STEREO 0x8080
+
+/* slot allocation */
+#define AC97_SLOT_TAG 0
+#define AC97_SLOT_CMD_ADDR 1
+#define AC97_SLOT_CMD_DATA 2
+#define AC97_SLOT_PCM_LEFT 3
+#define AC97_SLOT_PCM_RIGHT 4
+#define AC97_SLOT_MODEM_LINE1 5
+#define AC97_SLOT_PCM_CENTER 6
+#define AC97_SLOT_MIC 6 /* input */
+#define AC97_SLOT_SPDIF_LEFT1 6
+#define AC97_SLOT_PCM_SLEFT 7 /* surround left */
+#define AC97_SLOT_PCM_LEFT_0 7 /* double rate operation */
+#define AC97_SLOT_SPDIF_LEFT 7
+#define AC97_SLOT_PCM_SRIGHT 8 /* surround right */
+#define AC97_SLOT_PCM_RIGHT_0 8 /* double rate operation */
+#define AC97_SLOT_SPDIF_RIGHT 8
+#define AC97_SLOT_LFE 9
+#define AC97_SLOT_SPDIF_RIGHT1 9
+#define AC97_SLOT_MODEM_LINE2 10
+#define AC97_SLOT_PCM_LEFT_1 10 /* double rate operation */
+#define AC97_SLOT_SPDIF_LEFT2 10
+#define AC97_SLOT_HANDSET 11 /* output */
+#define AC97_SLOT_PCM_RIGHT_1 11 /* double rate operation */
+#define AC97_SLOT_SPDIF_RIGHT2 11
+#define AC97_SLOT_MODEM_GPIO 12 /* modem GPIO */
+#define AC97_SLOT_PCM_CENTER_1 12 /* double rate operation */
+
+/* basic capabilities (reset register) */
+#define AC97_BC_DEDICATED_MIC 0x0001 /* Dedicated Mic PCM In Channel */
+#define AC97_BC_RESERVED1 0x0002 /* Reserved (was Modem Line Codec support) */
+#define AC97_BC_BASS_TREBLE 0x0004 /* Bass & Treble Control */
+#define AC97_BC_SIM_STEREO 0x0008 /* Simulated stereo */
+#define AC97_BC_HEADPHONE 0x0010 /* Headphone Out Support */
+#define AC97_BC_LOUDNESS 0x0020 /* Loudness (bass boost) Support */
+#define AC97_BC_16BIT_DAC 0x0000 /* 16-bit DAC resolution */
+#define AC97_BC_18BIT_DAC 0x0040 /* 18-bit DAC resolution */
+#define AC97_BC_20BIT_DAC 0x0080 /* 20-bit DAC resolution */
+#define AC97_BC_DAC_MASK 0x00c0
+#define AC97_BC_16BIT_ADC 0x0000 /* 16-bit ADC resolution */
+#define AC97_BC_18BIT_ADC 0x0100 /* 18-bit ADC resolution */
+#define AC97_BC_20BIT_ADC 0x0200 /* 20-bit ADC resolution */
+#define AC97_BC_ADC_MASK 0x0300
+#define AC97_BC_3D_TECH_ID_MASK 0x7c00 /* Per-vendor ID of 3D enhancement */
+
+/* general purpose */
+#define AC97_GP_DRSS_MASK 0x0c00 /* double rate slot select */
+#define AC97_GP_DRSS_1011 0x0000 /* LR(C) 10+11(+12) */
+#define AC97_GP_DRSS_78 0x0400 /* LR 7+8 */
+
+/* powerdown bits */
+#define AC97_PD_ADC_STATUS 0x0001 /* ADC status (RO) */
+#define AC97_PD_DAC_STATUS 0x0002 /* DAC status (RO) */
+#define AC97_PD_MIXER_STATUS 0x0004 /* Analog mixer status (RO) */
+#define AC97_PD_VREF_STATUS 0x0008 /* Vref status (RO) */
+#define AC97_PD_PR0 0x0100 /* Power down PCM ADCs and input MUX */
+#define AC97_PD_PR1 0x0200 /* Power down PCM front DAC */
+#define AC97_PD_PR2 0x0400 /* Power down Mixer (Vref still on) */
+#define AC97_PD_PR3 0x0800 /* Power down Mixer (Vref off) */
+#define AC97_PD_PR4 0x1000 /* Power down AC-Link */
+#define AC97_PD_PR5 0x2000 /* Disable internal clock usage */
+#define AC97_PD_PR6 0x4000 /* Headphone amplifier */
+#define AC97_PD_EAPD 0x8000 /* External Amplifer Power Down (EAPD) */
+
+/* extended audio ID bit defines */
+#define AC97_EI_VRA 0x0001 /* Variable bit rate supported */
+#define AC97_EI_DRA 0x0002 /* Double rate supported */
+#define AC97_EI_SPDIF 0x0004 /* S/PDIF out supported */
+#define AC97_EI_VRM 0x0008 /* Variable bit rate supported for MIC */
+#define AC97_EI_DACS_SLOT_MASK 0x0030 /* DACs slot assignment */
+#define AC97_EI_DACS_SLOT_SHIFT 4
+#define AC97_EI_CDAC 0x0040 /* PCM Center DAC available */
+#define AC97_EI_SDAC 0x0080 /* PCM Surround DACs available */
+#define AC97_EI_LDAC 0x0100 /* PCM LFE DAC available */
+#define AC97_EI_AMAP 0x0200 /* indicates optional slot/DAC mapping based on codec ID */
+#define AC97_EI_REV_MASK 0x0c00 /* AC'97 revision mask */
+#define AC97_EI_REV_22 0x0400 /* AC'97 revision 2.2 */
+#define AC97_EI_REV_23 0x0800 /* AC'97 revision 2.3 */
+#define AC97_EI_REV_SHIFT 10
+#define AC97_EI_ADDR_MASK 0xc000 /* physical codec ID (address) */
+#define AC97_EI_ADDR_SHIFT 14
+
+/* extended audio status and control bit defines */
+#define AC97_EA_VRA 0x0001 /* Variable bit rate enable bit */
+#define AC97_EA_DRA 0x0002 /* Double-rate audio enable bit */
+#define AC97_EA_SPDIF 0x0004 /* S/PDIF out enable bit */
+#define AC97_EA_VRM 0x0008 /* Variable bit rate for MIC enable bit */
+#define AC97_EA_SPSA_SLOT_MASK 0x0030 /* Mask for slot assignment bits */
+#define AC97_EA_SPSA_SLOT_SHIFT 4
+#define AC97_EA_SPSA_3_4 0x0000 /* Slot assigned to 3 & 4 */
+#define AC97_EA_SPSA_7_8 0x0010 /* Slot assigned to 7 & 8 */
+#define AC97_EA_SPSA_6_9 0x0020 /* Slot assigned to 6 & 9 */
+#define AC97_EA_SPSA_10_11 0x0030 /* Slot assigned to 10 & 11 */
+#define AC97_EA_CDAC 0x0040 /* PCM Center DAC is ready (Read only) */
+#define AC97_EA_SDAC 0x0080 /* PCM Surround DACs are ready (Read only) */
+#define AC97_EA_LDAC 0x0100 /* PCM LFE DAC is ready (Read only) */
+#define AC97_EA_MDAC 0x0200 /* MIC ADC is ready (Read only) */
+#define AC97_EA_SPCV 0x0400 /* S/PDIF configuration valid (Read only) */
+#define AC97_EA_PRI 0x0800 /* Turns the PCM Center DAC off */
+#define AC97_EA_PRJ 0x1000 /* Turns the PCM Surround DACs off */
+#define AC97_EA_PRK 0x2000 /* Turns the PCM LFE DAC off */
+#define AC97_EA_PRL 0x4000 /* Turns the MIC ADC off */
+
+/* S/PDIF control bit defines */
+#define AC97_SC_PRO 0x0001 /* Professional status */
+#define AC97_SC_NAUDIO 0x0002 /* Non audio stream */
+#define AC97_SC_COPY 0x0004 /* Copyright status */
+#define AC97_SC_PRE 0x0008 /* Preemphasis status */
+#define AC97_SC_CC_MASK 0x07f0 /* Category Code mask */
+#define AC97_SC_CC_SHIFT 4
+#define AC97_SC_L 0x0800 /* Generation Level status */
+#define AC97_SC_SPSR_MASK 0x3000 /* S/PDIF Sample Rate bits */
+#define AC97_SC_SPSR_SHIFT 12
+#define AC97_SC_SPSR_44K 0x0000 /* Use 44.1kHz Sample rate */
+#define AC97_SC_SPSR_48K 0x2000 /* Use 48kHz Sample rate */
+#define AC97_SC_SPSR_32K 0x3000 /* Use 32kHz Sample rate */
+#define AC97_SC_DRS 0x4000 /* Double Rate S/PDIF */
+#define AC97_SC_V 0x8000 /* Validity status */
+
+/* Interrupt and Paging bit defines (AC'97 2.3) */
+#define AC97_PAGE_MASK 0x000f /* Page Selector */
+#define AC97_PAGE_VENDOR 0 /* Vendor-specific registers */
+#define AC97_PAGE_1 1 /* Extended Codec Registers page 1 */
+#define AC97_INT_ENABLE 0x0800 /* Interrupt Enable */
+#define AC97_INT_SENSE 0x1000 /* Sense Cycle */
+#define AC97_INT_CAUSE_SENSE 0x2000 /* Sense Cycle Completed (RO) */
+#define AC97_INT_CAUSE_GPIO 0x4000 /* GPIO bits changed (RO) */
+#define AC97_INT_STATUS 0x8000 /* Interrupt Status */
+
+/* extended modem ID bit defines */
+#define AC97_MEI_LINE1 0x0001 /* Line1 present */
+#define AC97_MEI_LINE2 0x0002 /* Line2 present */
+#define AC97_MEI_HANDSET 0x0004 /* Handset present */
+#define AC97_MEI_CID1 0x0008 /* caller ID decode for Line1 is supported */
+#define AC97_MEI_CID2 0x0010 /* caller ID decode for Line2 is supported */
+#define AC97_MEI_ADDR_MASK 0xc000 /* physical codec ID (address) */
+#define AC97_MEI_ADDR_SHIFT 14
+
+/* extended modem status and control bit defines */
+#define AC97_MEA_GPIO 0x0001 /* GPIO is ready (ro) */
+#define AC97_MEA_MREF 0x0002 /* Vref is up to nominal level (ro) */
+#define AC97_MEA_ADC1 0x0004 /* ADC1 operational (ro) */
+#define AC97_MEA_DAC1 0x0008 /* DAC1 operational (ro) */
+#define AC97_MEA_ADC2 0x0010 /* ADC2 operational (ro) */
+#define AC97_MEA_DAC2 0x0020 /* DAC2 operational (ro) */
+#define AC97_MEA_HADC 0x0040 /* HADC operational (ro) */
+#define AC97_MEA_HDAC 0x0080 /* HDAC operational (ro) */
+#define AC97_MEA_PRA 0x0100 /* GPIO power down (high) */
+#define AC97_MEA_PRB 0x0200 /* reserved */
+#define AC97_MEA_PRC 0x0400 /* ADC1 power down (high) */
+#define AC97_MEA_PRD 0x0800 /* DAC1 power down (high) */
+#define AC97_MEA_PRE 0x1000 /* ADC2 power down (high) */
+#define AC97_MEA_PRF 0x2000 /* DAC2 power down (high) */
+#define AC97_MEA_PRG 0x4000 /* HADC power down (high) */
+#define AC97_MEA_PRH 0x8000 /* HDAC power down (high) */
+
+/* modem gpio status defines */
+#define AC97_GPIO_LINE1_OH 0x0001 /* Off Hook Line1 */
+#define AC97_GPIO_LINE1_RI 0x0002 /* Ring Detect Line1 */
+#define AC97_GPIO_LINE1_CID 0x0004 /* Caller ID path enable Line1 */
+#define AC97_GPIO_LINE1_LCS 0x0008 /* Loop Current Sense Line1 */
+#define AC97_GPIO_LINE1_PULSE 0x0010 /* Opt./ Pulse Dial Line1 (out) */
+#define AC97_GPIO_LINE1_HL1R 0x0020 /* Opt./ Handset to Line1 relay control (out) */
+#define AC97_GPIO_LINE1_HOHD 0x0040 /* Opt./ Handset off hook detect Line1 (in) */
+#define AC97_GPIO_LINE12_AC 0x0080 /* Opt./ Int.bit 1 / Line1/2 AC (out) */
+#define AC97_GPIO_LINE12_DC 0x0100 /* Opt./ Int.bit 2 / Line1/2 DC (out) */
+#define AC97_GPIO_LINE12_RS 0x0200 /* Opt./ Int.bit 3 / Line1/2 RS (out) */
+#define AC97_GPIO_LINE2_OH 0x0400 /* Off Hook Line2 */
+#define AC97_GPIO_LINE2_RI 0x0800 /* Ring Detect Line2 */
+#define AC97_GPIO_LINE2_CID 0x1000 /* Caller ID path enable Line2 */
+#define AC97_GPIO_LINE2_LCS 0x2000 /* Loop Current Sense Line2 */
+#define AC97_GPIO_LINE2_PULSE 0x4000 /* Opt./ Pulse Dial Line2 (out) */
+#define AC97_GPIO_LINE2_HL1R 0x8000 /* Opt./ Handset to Line2 relay control (out) */
+
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 15aa5f07c955..89d311a503d3 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -28,6 +28,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/workqueue.h>
+#include <sound/ac97/regs.h>
#include <sound/pcm.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -35,244 +36,6 @@
/* maximum number of devices on the AC97 bus */
#define AC97_BUS_MAX_DEVICES 4
-/*
- * AC'97 codec registers
- */
-
-#define AC97_RESET 0x00 /* Reset */
-#define AC97_MASTER 0x02 /* Master Volume */
-#define AC97_HEADPHONE 0x04 /* Headphone Volume (optional) */
-#define AC97_MASTER_MONO 0x06 /* Master Volume Mono (optional) */
-#define AC97_MASTER_TONE 0x08 /* Master Tone (Bass & Treble) (optional) */
-#define AC97_PC_BEEP 0x0a /* PC Beep Volume (optinal) */
-#define AC97_PHONE 0x0c /* Phone Volume (optional) */
-#define AC97_MIC 0x0e /* MIC Volume */
-#define AC97_LINE 0x10 /* Line In Volume */
-#define AC97_CD 0x12 /* CD Volume */
-#define AC97_VIDEO 0x14 /* Video Volume (optional) */
-#define AC97_AUX 0x16 /* AUX Volume (optional) */
-#define AC97_PCM 0x18 /* PCM Volume */
-#define AC97_REC_SEL 0x1a /* Record Select */
-#define AC97_REC_GAIN 0x1c /* Record Gain */
-#define AC97_REC_GAIN_MIC 0x1e /* Record Gain MIC (optional) */
-#define AC97_GENERAL_PURPOSE 0x20 /* General Purpose (optional) */
-#define AC97_3D_CONTROL 0x22 /* 3D Control (optional) */
-#define AC97_INT_PAGING 0x24 /* Audio Interrupt & Paging (AC'97 2.3) */
-#define AC97_POWERDOWN 0x26 /* Powerdown control / status */
-/* range 0x28-0x3a - AUDIO AC'97 2.0 extensions */
-#define AC97_EXTENDED_ID 0x28 /* Extended Audio ID */
-#define AC97_EXTENDED_STATUS 0x2a /* Extended Audio Status and Control */
-#define AC97_PCM_FRONT_DAC_RATE 0x2c /* PCM Front DAC Rate */
-#define AC97_PCM_SURR_DAC_RATE 0x2e /* PCM Surround DAC Rate */
-#define AC97_PCM_LFE_DAC_RATE 0x30 /* PCM LFE DAC Rate */
-#define AC97_PCM_LR_ADC_RATE 0x32 /* PCM LR ADC Rate */
-#define AC97_PCM_MIC_ADC_RATE 0x34 /* PCM MIC ADC Rate */
-#define AC97_CENTER_LFE_MASTER 0x36 /* Center + LFE Master Volume */
-#define AC97_SURROUND_MASTER 0x38 /* Surround (Rear) Master Volume */
-#define AC97_SPDIF 0x3a /* S/PDIF control */
-/* range 0x3c-0x58 - MODEM */
-#define AC97_EXTENDED_MID 0x3c /* Extended Modem ID */
-#define AC97_EXTENDED_MSTATUS 0x3e /* Extended Modem Status and Control */
-#define AC97_LINE1_RATE 0x40 /* Line1 DAC/ADC Rate */
-#define AC97_LINE2_RATE 0x42 /* Line2 DAC/ADC Rate */
-#define AC97_HANDSET_RATE 0x44 /* Handset DAC/ADC Rate */
-#define AC97_LINE1_LEVEL 0x46 /* Line1 DAC/ADC Level */
-#define AC97_LINE2_LEVEL 0x48 /* Line2 DAC/ADC Level */
-#define AC97_HANDSET_LEVEL 0x4a /* Handset DAC/ADC Level */
-#define AC97_GPIO_CFG 0x4c /* GPIO Configuration */
-#define AC97_GPIO_POLARITY 0x4e /* GPIO Pin Polarity/Type, 0=low, 1=high active */
-#define AC97_GPIO_STICKY 0x50 /* GPIO Pin Sticky, 0=not, 1=sticky */
-#define AC97_GPIO_WAKEUP 0x52 /* GPIO Pin Wakeup, 0=no int, 1=yes int */
-#define AC97_GPIO_STATUS 0x54 /* GPIO Pin Status, slot 12 */
-#define AC97_MISC_AFE 0x56 /* Miscellaneous Modem AFE Status and Control */
-/* range 0x5a-0x7b - Vendor Specific */
-#define AC97_VENDOR_ID1 0x7c /* Vendor ID1 */
-#define AC97_VENDOR_ID2 0x7e /* Vendor ID2 / revision */
-/* range 0x60-0x6f (page 1) - extended codec registers */
-#define AC97_CODEC_CLASS_REV 0x60 /* Codec Class/Revision */
-#define AC97_PCI_SVID 0x62 /* PCI Subsystem Vendor ID */
-#define AC97_PCI_SID 0x64 /* PCI Subsystem ID */
-#define AC97_FUNC_SELECT 0x66 /* Function Select */
-#define AC97_FUNC_INFO 0x68 /* Function Information */
-#define AC97_SENSE_INFO 0x6a /* Sense Details */
-
-/* volume controls */
-#define AC97_MUTE_MASK_MONO 0x8000
-#define AC97_MUTE_MASK_STEREO 0x8080
-
-/* slot allocation */
-#define AC97_SLOT_TAG 0
-#define AC97_SLOT_CMD_ADDR 1
-#define AC97_SLOT_CMD_DATA 2
-#define AC97_SLOT_PCM_LEFT 3
-#define AC97_SLOT_PCM_RIGHT 4
-#define AC97_SLOT_MODEM_LINE1 5
-#define AC97_SLOT_PCM_CENTER 6
-#define AC97_SLOT_MIC 6 /* input */
-#define AC97_SLOT_SPDIF_LEFT1 6
-#define AC97_SLOT_PCM_SLEFT 7 /* surround left */
-#define AC97_SLOT_PCM_LEFT_0 7 /* double rate operation */
-#define AC97_SLOT_SPDIF_LEFT 7
-#define AC97_SLOT_PCM_SRIGHT 8 /* surround right */
-#define AC97_SLOT_PCM_RIGHT_0 8 /* double rate operation */
-#define AC97_SLOT_SPDIF_RIGHT 8
-#define AC97_SLOT_LFE 9
-#define AC97_SLOT_SPDIF_RIGHT1 9
-#define AC97_SLOT_MODEM_LINE2 10
-#define AC97_SLOT_PCM_LEFT_1 10 /* double rate operation */
-#define AC97_SLOT_SPDIF_LEFT2 10
-#define AC97_SLOT_HANDSET 11 /* output */
-#define AC97_SLOT_PCM_RIGHT_1 11 /* double rate operation */
-#define AC97_SLOT_SPDIF_RIGHT2 11
-#define AC97_SLOT_MODEM_GPIO 12 /* modem GPIO */
-#define AC97_SLOT_PCM_CENTER_1 12 /* double rate operation */
-
-/* basic capabilities (reset register) */
-#define AC97_BC_DEDICATED_MIC 0x0001 /* Dedicated Mic PCM In Channel */
-#define AC97_BC_RESERVED1 0x0002 /* Reserved (was Modem Line Codec support) */
-#define AC97_BC_BASS_TREBLE 0x0004 /* Bass & Treble Control */
-#define AC97_BC_SIM_STEREO 0x0008 /* Simulated stereo */
-#define AC97_BC_HEADPHONE 0x0010 /* Headphone Out Support */
-#define AC97_BC_LOUDNESS 0x0020 /* Loudness (bass boost) Support */
-#define AC97_BC_16BIT_DAC 0x0000 /* 16-bit DAC resolution */
-#define AC97_BC_18BIT_DAC 0x0040 /* 18-bit DAC resolution */
-#define AC97_BC_20BIT_DAC 0x0080 /* 20-bit DAC resolution */
-#define AC97_BC_DAC_MASK 0x00c0
-#define AC97_BC_16BIT_ADC 0x0000 /* 16-bit ADC resolution */
-#define AC97_BC_18BIT_ADC 0x0100 /* 18-bit ADC resolution */
-#define AC97_BC_20BIT_ADC 0x0200 /* 20-bit ADC resolution */
-#define AC97_BC_ADC_MASK 0x0300
-#define AC97_BC_3D_TECH_ID_MASK 0x7c00 /* Per-vendor ID of 3D enhancement */
-
-/* general purpose */
-#define AC97_GP_DRSS_MASK 0x0c00 /* double rate slot select */
-#define AC97_GP_DRSS_1011 0x0000 /* LR(C) 10+11(+12) */
-#define AC97_GP_DRSS_78 0x0400 /* LR 7+8 */
-
-/* powerdown bits */
-#define AC97_PD_ADC_STATUS 0x0001 /* ADC status (RO) */
-#define AC97_PD_DAC_STATUS 0x0002 /* DAC status (RO) */
-#define AC97_PD_MIXER_STATUS 0x0004 /* Analog mixer status (RO) */
-#define AC97_PD_VREF_STATUS 0x0008 /* Vref status (RO) */
-#define AC97_PD_PR0 0x0100 /* Power down PCM ADCs and input MUX */
-#define AC97_PD_PR1 0x0200 /* Power down PCM front DAC */
-#define AC97_PD_PR2 0x0400 /* Power down Mixer (Vref still on) */
-#define AC97_PD_PR3 0x0800 /* Power down Mixer (Vref off) */
-#define AC97_PD_PR4 0x1000 /* Power down AC-Link */
-#define AC97_PD_PR5 0x2000 /* Disable internal clock usage */
-#define AC97_PD_PR6 0x4000 /* Headphone amplifier */
-#define AC97_PD_EAPD 0x8000 /* External Amplifer Power Down (EAPD) */
-
-/* extended audio ID bit defines */
-#define AC97_EI_VRA 0x0001 /* Variable bit rate supported */
-#define AC97_EI_DRA 0x0002 /* Double rate supported */
-#define AC97_EI_SPDIF 0x0004 /* S/PDIF out supported */
-#define AC97_EI_VRM 0x0008 /* Variable bit rate supported for MIC */
-#define AC97_EI_DACS_SLOT_MASK 0x0030 /* DACs slot assignment */
-#define AC97_EI_DACS_SLOT_SHIFT 4
-#define AC97_EI_CDAC 0x0040 /* PCM Center DAC available */
-#define AC97_EI_SDAC 0x0080 /* PCM Surround DACs available */
-#define AC97_EI_LDAC 0x0100 /* PCM LFE DAC available */
-#define AC97_EI_AMAP 0x0200 /* indicates optional slot/DAC mapping based on codec ID */
-#define AC97_EI_REV_MASK 0x0c00 /* AC'97 revision mask */
-#define AC97_EI_REV_22 0x0400 /* AC'97 revision 2.2 */
-#define AC97_EI_REV_23 0x0800 /* AC'97 revision 2.3 */
-#define AC97_EI_REV_SHIFT 10
-#define AC97_EI_ADDR_MASK 0xc000 /* physical codec ID (address) */
-#define AC97_EI_ADDR_SHIFT 14
-
-/* extended audio status and control bit defines */
-#define AC97_EA_VRA 0x0001 /* Variable bit rate enable bit */
-#define AC97_EA_DRA 0x0002 /* Double-rate audio enable bit */
-#define AC97_EA_SPDIF 0x0004 /* S/PDIF out enable bit */
-#define AC97_EA_VRM 0x0008 /* Variable bit rate for MIC enable bit */
-#define AC97_EA_SPSA_SLOT_MASK 0x0030 /* Mask for slot assignment bits */
-#define AC97_EA_SPSA_SLOT_SHIFT 4
-#define AC97_EA_SPSA_3_4 0x0000 /* Slot assigned to 3 & 4 */
-#define AC97_EA_SPSA_7_8 0x0010 /* Slot assigned to 7 & 8 */
-#define AC97_EA_SPSA_6_9 0x0020 /* Slot assigned to 6 & 9 */
-#define AC97_EA_SPSA_10_11 0x0030 /* Slot assigned to 10 & 11 */
-#define AC97_EA_CDAC 0x0040 /* PCM Center DAC is ready (Read only) */
-#define AC97_EA_SDAC 0x0080 /* PCM Surround DACs are ready (Read only) */
-#define AC97_EA_LDAC 0x0100 /* PCM LFE DAC is ready (Read only) */
-#define AC97_EA_MDAC 0x0200 /* MIC ADC is ready (Read only) */
-#define AC97_EA_SPCV 0x0400 /* S/PDIF configuration valid (Read only) */
-#define AC97_EA_PRI 0x0800 /* Turns the PCM Center DAC off */
-#define AC97_EA_PRJ 0x1000 /* Turns the PCM Surround DACs off */
-#define AC97_EA_PRK 0x2000 /* Turns the PCM LFE DAC off */
-#define AC97_EA_PRL 0x4000 /* Turns the MIC ADC off */
-
-/* S/PDIF control bit defines */
-#define AC97_SC_PRO 0x0001 /* Professional status */
-#define AC97_SC_NAUDIO 0x0002 /* Non audio stream */
-#define AC97_SC_COPY 0x0004 /* Copyright status */
-#define AC97_SC_PRE 0x0008 /* Preemphasis status */
-#define AC97_SC_CC_MASK 0x07f0 /* Category Code mask */
-#define AC97_SC_CC_SHIFT 4
-#define AC97_SC_L 0x0800 /* Generation Level status */
-#define AC97_SC_SPSR_MASK 0x3000 /* S/PDIF Sample Rate bits */
-#define AC97_SC_SPSR_SHIFT 12
-#define AC97_SC_SPSR_44K 0x0000 /* Use 44.1kHz Sample rate */
-#define AC97_SC_SPSR_48K 0x2000 /* Use 48kHz Sample rate */
-#define AC97_SC_SPSR_32K 0x3000 /* Use 32kHz Sample rate */
-#define AC97_SC_DRS 0x4000 /* Double Rate S/PDIF */
-#define AC97_SC_V 0x8000 /* Validity status */
-
-/* Interrupt and Paging bit defines (AC'97 2.3) */
-#define AC97_PAGE_MASK 0x000f /* Page Selector */
-#define AC97_PAGE_VENDOR 0 /* Vendor-specific registers */
-#define AC97_PAGE_1 1 /* Extended Codec Registers page 1 */
-#define AC97_INT_ENABLE 0x0800 /* Interrupt Enable */
-#define AC97_INT_SENSE 0x1000 /* Sense Cycle */
-#define AC97_INT_CAUSE_SENSE 0x2000 /* Sense Cycle Completed (RO) */
-#define AC97_INT_CAUSE_GPIO 0x4000 /* GPIO bits changed (RO) */
-#define AC97_INT_STATUS 0x8000 /* Interrupt Status */
-
-/* extended modem ID bit defines */
-#define AC97_MEI_LINE1 0x0001 /* Line1 present */
-#define AC97_MEI_LINE2 0x0002 /* Line2 present */
-#define AC97_MEI_HANDSET 0x0004 /* Handset present */
-#define AC97_MEI_CID1 0x0008 /* caller ID decode for Line1 is supported */
-#define AC97_MEI_CID2 0x0010 /* caller ID decode for Line2 is supported */
-#define AC97_MEI_ADDR_MASK 0xc000 /* physical codec ID (address) */
-#define AC97_MEI_ADDR_SHIFT 14
-
-/* extended modem status and control bit defines */
-#define AC97_MEA_GPIO 0x0001 /* GPIO is ready (ro) */
-#define AC97_MEA_MREF 0x0002 /* Vref is up to nominal level (ro) */
-#define AC97_MEA_ADC1 0x0004 /* ADC1 operational (ro) */
-#define AC97_MEA_DAC1 0x0008 /* DAC1 operational (ro) */
-#define AC97_MEA_ADC2 0x0010 /* ADC2 operational (ro) */
-#define AC97_MEA_DAC2 0x0020 /* DAC2 operational (ro) */
-#define AC97_MEA_HADC 0x0040 /* HADC operational (ro) */
-#define AC97_MEA_HDAC 0x0080 /* HDAC operational (ro) */
-#define AC97_MEA_PRA 0x0100 /* GPIO power down (high) */
-#define AC97_MEA_PRB 0x0200 /* reserved */
-#define AC97_MEA_PRC 0x0400 /* ADC1 power down (high) */
-#define AC97_MEA_PRD 0x0800 /* DAC1 power down (high) */
-#define AC97_MEA_PRE 0x1000 /* ADC2 power down (high) */
-#define AC97_MEA_PRF 0x2000 /* DAC2 power down (high) */
-#define AC97_MEA_PRG 0x4000 /* HADC power down (high) */
-#define AC97_MEA_PRH 0x8000 /* HDAC power down (high) */
-
-/* modem gpio status defines */
-#define AC97_GPIO_LINE1_OH 0x0001 /* Off Hook Line1 */
-#define AC97_GPIO_LINE1_RI 0x0002 /* Ring Detect Line1 */
-#define AC97_GPIO_LINE1_CID 0x0004 /* Caller ID path enable Line1 */
-#define AC97_GPIO_LINE1_LCS 0x0008 /* Loop Current Sense Line1 */
-#define AC97_GPIO_LINE1_PULSE 0x0010 /* Opt./ Pulse Dial Line1 (out) */
-#define AC97_GPIO_LINE1_HL1R 0x0020 /* Opt./ Handset to Line1 relay control (out) */
-#define AC97_GPIO_LINE1_HOHD 0x0040 /* Opt./ Handset off hook detect Line1 (in) */
-#define AC97_GPIO_LINE12_AC 0x0080 /* Opt./ Int.bit 1 / Line1/2 AC (out) */
-#define AC97_GPIO_LINE12_DC 0x0100 /* Opt./ Int.bit 2 / Line1/2 DC (out) */
-#define AC97_GPIO_LINE12_RS 0x0200 /* Opt./ Int.bit 3 / Line1/2 RS (out) */
-#define AC97_GPIO_LINE2_OH 0x0400 /* Off Hook Line2 */
-#define AC97_GPIO_LINE2_RI 0x0800 /* Ring Detect Line2 */
-#define AC97_GPIO_LINE2_CID 0x1000 /* Caller ID path enable Line2 */
-#define AC97_GPIO_LINE2_LCS 0x2000 /* Loop Current Sense Line2 */
-#define AC97_GPIO_LINE2_PULSE 0x4000 /* Opt./ Pulse Dial Line2 (out) */
-#define AC97_GPIO_LINE2_HL1R 0x8000 /* Opt./ Handset to Line2 relay control (out) */
-
/* specific - SigmaTel */
#define AC97_SIGMATEL_OUTSEL 0x64 /* Output Select, STAC9758 */
#define AC97_SIGMATEL_INSEL 0x66 /* Input Select, STAC9758 */
diff --git a/include/sound/core.h b/include/sound/core.h
index 4104a9d1001f..5f181b875c2f 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -133,6 +133,7 @@ struct snd_card {
struct device card_dev; /* cardX object for sysfs */
const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
+ wait_queue_head_t remove_sleep;
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
@@ -240,6 +241,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
struct snd_card **card_ret);
int snd_card_disconnect(struct snd_card *card);
+void snd_card_disconnect_sync(struct snd_card *card);
int snd_card_free(struct snd_card *card);
int snd_card_free_when_closed(struct snd_card *card);
void snd_card_set_id(struct snd_card *card, const char *id);
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index d8afd8a5bd76..68169e3749de 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -112,8 +112,7 @@ void snd_hdac_device_unregister(struct hdac_device *codec);
int snd_hdac_device_set_chip_name(struct hdac_device *codec, const char *name);
int snd_hdac_codec_modalias(struct hdac_device *hdac, char *buf, size_t size);
-int snd_hdac_refresh_widgets(struct hdac_device *codec);
-int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec);
+int snd_hdac_refresh_widgets(struct hdac_device *codec, bool sysfs);
unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
unsigned int verb, unsigned int parm);
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 5e710d848bd3..63f75450d3db 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -2,10 +2,13 @@
#ifndef PXA2XX_LIB_H
#define PXA2XX_LIB_H
+#include <uapi/sound/asound.h>
#include <linux/platform_device.h>
-#include <sound/ac97_codec.h>
/* PCM */
+struct snd_pcm_substream;
+struct snd_pcm_hw_params;
+struct snd_pcm;
extern int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
@@ -22,12 +25,12 @@ extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
/* AC97 */
-extern unsigned short pxa2xx_ac97_read(struct snd_ac97 *ac97, unsigned short reg);
-extern void pxa2xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val);
+extern int pxa2xx_ac97_read(int slot, unsigned short reg);
+extern int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val);
-extern bool pxa2xx_ac97_try_warm_reset(struct snd_ac97 *ac97);
-extern bool pxa2xx_ac97_try_cold_reset(struct snd_ac97 *ac97);
-extern void pxa2xx_ac97_finish_reset(struct snd_ac97 *ac97);
+extern bool pxa2xx_ac97_try_warm_reset(void);
+extern bool pxa2xx_ac97_try_cold_reset(void);
+extern void pxa2xx_ac97_finish_reset(void);
extern int pxa2xx_ac97_hw_suspend(void);
extern int pxa2xx_ac97_hw_resume(void);
diff --git a/include/sound/rt5651.h b/include/sound/rt5651.h
index d35de758dfb5..18b79a761f10 100644
--- a/include/sound/rt5651.h
+++ b/include/sound/rt5651.h
@@ -11,11 +11,19 @@
#ifndef __LINUX_SND_RT5651_H
#define __LINUX_SND_RT5651_H
+enum rt5651_jd_src {
+ RT5651_JD_NULL,
+ RT5651_JD1_1,
+ RT5651_JD1_2,
+ RT5651_JD2,
+};
+
struct rt5651_platform_data {
/* IN2 can optionally be differential */
bool in2_diff;
bool dmic_en;
+ enum rt5651_jd_src jd_src;
};
#endif
diff --git a/include/sound/rt5663.h b/include/sound/rt5663.h
index 7d00e5849706..7b90a8f1034c 100644
--- a/include/sound/rt5663.h
+++ b/include/sound/rt5663.h
@@ -16,6 +16,9 @@ struct rt5663_platform_data {
unsigned int dc_offset_r_manual;
unsigned int dc_offset_l_manual_mic;
unsigned int dc_offset_r_manual_mic;
+
+ unsigned int impedance_sensing_num;
+ unsigned int *impedance_sensing_table;
};
#endif
diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h
index 6231eb57361f..55053557c898 100644
--- a/include/sound/snd_wavefront.h
+++ b/include/sound/snd_wavefront.h
@@ -29,6 +29,7 @@ struct _snd_wavefront_midi {
struct snd_rawmidi_substream *substream_output[2];
struct snd_rawmidi_substream *substream_input[2];
struct timer_list timer;
+ snd_wavefront_card_t *timer_card;
spinlock_t open;
spinlock_t virtual; /* protects isvirtual */
};
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
new file mode 100644
index 000000000000..1a9191cd4bb3
--- /dev/null
+++ b/include/sound/soc-acpi-intel-match.h
@@ -0,0 +1,32 @@
+
+/*
+ * Copyright (C) 2017, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
+#define __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
+
+#include <linux/stddef.h>
+#include <linux/acpi.h>
+
+/*
+ * these tables are not constants, some fields can be used for
+ * pdata or machine ops
+ */
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
+
+#endif
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
new file mode 100644
index 000000000000..a7d8d335b043
--- /dev/null
+++ b/include/sound/soc-acpi.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_SND_SOC_ACPI_H
+#define __LINUX_SND_SOC_ACPI_H
+
+#include <linux/stddef.h>
+#include <linux/acpi.h>
+
+struct snd_soc_acpi_package_context {
+ char *name; /* package name */
+ int length; /* number of elements */
+ struct acpi_buffer *format;
+ struct acpi_buffer *state;
+ bool data_valid;
+};
+
+#if IS_ENABLED(CONFIG_ACPI)
+/* translation fron HID to I2C name, needed for DAI codec_name */
+const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
+bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+ struct snd_soc_acpi_package_context *ctx);
+#else
+static inline const char *
+snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
+{
+ return NULL;
+}
+static inline bool
+snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+ struct snd_soc_acpi_package_context *ctx)
+{
+ return false;
+}
+#endif
+
+/* acpi match */
+struct snd_soc_acpi_mach *
+snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
+
+/* acpi check hid */
+bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
+
+/**
+ * snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
+ * related to the hardware, except for the firmware and topology file names.
+ * A platform supported by legacy and Sound Open Firmware (SOF) would expose
+ * all firmware/topology related fields.
+ *
+ * @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @drv_name: machine driver name
+ * @fw_filename: firmware file name. Used when SOF is not enabled.
+ * @board: board name
+ * @machine_quirk: pointer to quirk, usually based on DMI information when
+ * ACPI ID alone is not sufficient, wrong or misleading
+ * @quirk_data: data used to uniquely identify a machine, usually a list of
+ * audio codecs whose presence if checked with ACPI
+ * @pdata: intended for platform data or machine specific-ops. This structure
+ * is not constant since this field may be updated at run-time
+ * @sof_fw_filename: Sound Open Firmware file name, if enabled
+ * @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
+ * @asoc_plat_name: ASoC platform name, used for binding machine drivers
+ * if non NULL
+ * @new_mach_data: machine driver private data fixup
+ */
+/* Descriptor for SST ASoC machine driver */
+struct snd_soc_acpi_mach {
+ const u8 id[ACPI_ID_LEN];
+ const char *drv_name;
+ const char *fw_filename;
+ const char *board;
+ struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
+ const void *quirk_data;
+ void *pdata;
+ const char *sof_fw_filename;
+ const char *sof_tplg_filename;
+ const char *asoc_plat_name;
+ struct platform_device * (*new_mach_data)(void *pdata);
+};
+
+#define SND_SOC_ACPI_MAX_CODECS 3
+
+/**
+ * struct snd_soc_acpi_codecs: Structure to hold secondary codec information
+ * apart from the matched one, this data will be passed to the quirk function
+ * to match with the ACPI detected devices
+ *
+ * @num_codecs: number of secondary codecs used in the platform
+ * @codecs: holds the codec IDs
+ *
+ */
+struct snd_soc_acpi_codecs {
+ int num_codecs;
+ u8 codecs[SND_SOC_ACPI_MAX_CODECS][ACPI_ID_LEN];
+};
+
+/* check all codecs */
+struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg);
+
+#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index d22de9712c45..1a7323238c49 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -468,6 +468,11 @@ int snd_soc_register_codec(struct device *dev,
const struct snd_soc_codec_driver *codec_drv,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_codec(struct device *dev);
+int snd_soc_add_component(struct device *dev,
+ struct snd_soc_component *component,
+ const struct snd_soc_component_driver *component_driver,
+ struct snd_soc_dai_driver *dai_drv,
+ int num_dai);
int snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
@@ -475,6 +480,8 @@ int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
void snd_soc_unregister_component(struct device *dev);
+struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
+ const char *driver_name);
int snd_soc_cache_init(struct snd_soc_codec *codec);
int snd_soc_cache_exit(struct snd_soc_codec *codec);
@@ -795,6 +802,10 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ /* pcm creation and destruction */
+ int (*pcm_new)(struct snd_soc_pcm_runtime *);
+ void (*pcm_free)(struct snd_pcm *);
+
/* component wide operations */
int (*set_sysclk)(struct snd_soc_component *component,
int clk_id, int source, unsigned int freq, int dir);
@@ -812,10 +823,22 @@ struct snd_soc_component_driver {
void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type,
int subseq);
int (*stream_event)(struct snd_soc_component *, int event);
+ int (*set_bias_level)(struct snd_soc_component *component,
+ enum snd_soc_bias_level level);
+
+ const struct snd_pcm_ops *ops;
+ const struct snd_compr_ops *compr_ops;
/* probe ordering - for components with runtime dependencies */
int probe_order;
int remove_order;
+
+ /* bits */
+ unsigned int idle_bias_on:1;
+ unsigned int suspend_bias_off:1;
+ unsigned int pmdown_time:1; /* care pmdown_time at stop */
+ unsigned int endianness:1;
+ unsigned int non_legacy_dai_naming:1;
};
struct snd_soc_component {
@@ -872,6 +895,8 @@ struct snd_soc_component {
void (*remove)(struct snd_soc_component *);
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ int (*pcm_new)(struct snd_soc_component *, struct snd_soc_pcm_runtime *);
+ void (*pcm_free)(struct snd_soc_component *, struct snd_pcm *);
int (*set_sysclk)(struct snd_soc_component *component,
int clk_id, int source, unsigned int freq, int dir);
@@ -879,6 +904,8 @@ struct snd_soc_component {
int source, unsigned int freq_in, unsigned int freq_out);
int (*set_jack)(struct snd_soc_component *component,
struct snd_soc_jack *jack, void *data);
+ int (*set_bias_level)(struct snd_soc_component *component,
+ enum snd_soc_bias_level level);
/* machine specific init */
int (*init)(struct snd_soc_component *component);
@@ -1413,6 +1440,21 @@ static inline void snd_soc_codec_init_bias_level(struct snd_soc_codec *codec,
}
/**
+ * snd_soc_component_init_bias_level() - Initialize COMPONENT DAPM bias level
+ * @component: The COMPONENT for which to initialize the DAPM bias level
+ * @level: The DAPM level to initialize to
+ *
+ * Initializes the COMPONENT DAPM bias level. See snd_soc_dapm_init_bias_level().
+ */
+static inline void
+snd_soc_component_init_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ snd_soc_dapm_init_bias_level(
+ snd_soc_component_get_dapm(component), level);
+}
+
+/**
* snd_soc_dapm_get_bias_level() - Get current CODEC DAPM bias level
* @codec: The CODEC for which to get the DAPM bias level
*
@@ -1425,6 +1467,19 @@ static inline enum snd_soc_bias_level snd_soc_codec_get_bias_level(
}
/**
+ * snd_soc_component_get_bias_level() - Get current COMPONENT DAPM bias level
+ * @component: The COMPONENT for which to get the DAPM bias level
+ *
+ * Returns: The current DAPM bias level of the COMPONENT.
+ */
+static inline enum snd_soc_bias_level
+snd_soc_component_get_bias_level(struct snd_soc_component *component)
+{
+ return snd_soc_dapm_get_bias_level(
+ snd_soc_component_get_dapm(component));
+}
+
+/**
* snd_soc_codec_force_bias_level() - Set the CODEC DAPM bias level
* @codec: The CODEC for which to set the level
* @level: The level to set to
@@ -1440,6 +1495,23 @@ static inline int snd_soc_codec_force_bias_level(struct snd_soc_codec *codec,
}
/**
+ * snd_soc_component_force_bias_level() - Set the COMPONENT DAPM bias level
+ * @component: The COMPONENT for which to set the level
+ * @level: The level to set to
+ *
+ * Forces the COMPONENT bias level to a specific state. See
+ * snd_soc_dapm_force_bias_level().
+ */
+static inline int
+snd_soc_component_force_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ return snd_soc_dapm_force_bias_level(
+ snd_soc_component_get_dapm(component),
+ level);
+}
+
+/**
* snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
* @kcontrol: The kcontrol
*
@@ -1452,6 +1524,19 @@ static inline struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(
return snd_soc_dapm_to_codec(snd_soc_dapm_kcontrol_dapm(kcontrol));
}
+/**
+ * snd_soc_dapm_kcontrol_component() - Returns the component associated to a kcontrol
+ * @kcontrol: The kcontrol
+ *
+ * This function must only be used on DAPM contexts that are known to be part of
+ * a COMPONENT (e.g. in a COMPONENT driver). Otherwise the behavior is undefined.
+ */
+static inline struct snd_soc_component *snd_soc_dapm_kcontrol_component(
+ struct snd_kcontrol *kcontrol)
+{
+ return snd_soc_dapm_to_component(snd_soc_dapm_kcontrol_dapm(kcontrol));
+}
+
/* codec IO */
unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -1468,9 +1553,23 @@ static inline int snd_soc_cache_sync(struct snd_soc_codec *codec)
return regcache_sync(codec->component.regmap);
}
+/**
+ * snd_soc_component_cache_sync() - Sync the register cache with the hardware
+ * @component: COMPONENT to sync
+ *
+ * Note: This function will call regcache_sync()
+ */
+static inline int snd_soc_component_cache_sync(
+ struct snd_soc_component *component)
+{
+ return regcache_sync(component->regmap);
+}
+
/* component IO */
int snd_soc_component_read(struct snd_soc_component *component,
unsigned int reg, unsigned int *val);
+unsigned int snd_soc_component_read32(struct snd_soc_component *component,
+ unsigned int reg);
int snd_soc_component_write(struct snd_soc_component *component,
unsigned int reg, unsigned int val);
int snd_soc_component_update_bits(struct snd_soc_component *component,
@@ -1487,6 +1586,8 @@ int snd_soc_component_set_sysclk(struct snd_soc_component *component,
int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id,
int source, unsigned int freq_in,
unsigned int freq_out);
+int snd_soc_component_set_jack(struct snd_soc_component *component,
+ struct snd_soc_jack *jack, void *data);
#ifdef CONFIG_REGMAP
@@ -1720,6 +1821,20 @@ struct snd_soc_dai *snd_soc_find_dai(
#include <sound/soc-dai.h>
+static inline
+struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
+ const char *dai_name)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ if (!strcmp(rtd->codec_dai->name, dai_name))
+ return rtd->codec_dai;
+ }
+
+ return NULL;
+}
+
#ifdef CONFIG_DEBUG_FS
extern struct dentry *snd_soc_debugfs_root;
#endif
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index b77d579e3ecf..4d75a2c426ca 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -9,16 +9,16 @@
/*
* For struct iscsi_tiqn->tiqn_wwn default groups
*/
-extern struct config_item_type iscsi_stat_instance_cit;
-extern struct config_item_type iscsi_stat_sess_err_cit;
-extern struct config_item_type iscsi_stat_tgt_attr_cit;
-extern struct config_item_type iscsi_stat_login_cit;
-extern struct config_item_type iscsi_stat_logout_cit;
+extern const struct config_item_type iscsi_stat_instance_cit;
+extern const struct config_item_type iscsi_stat_sess_err_cit;
+extern const struct config_item_type iscsi_stat_tgt_attr_cit;
+extern const struct config_item_type iscsi_stat_login_cit;
+extern const struct config_item_type iscsi_stat_logout_cit;
/*
* For struct iscsi_session->se_sess default groups
*/
-extern struct config_item_type iscsi_stat_sess_cit;
+extern const struct config_item_type iscsi_stat_sess_cit;
/* iSCSI session error types */
#define ISCSI_SESS_ERR_UNKNOWN 0
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 32d0c1fe2bfa..4342a329821f 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -29,6 +29,13 @@ struct btrfs_qgroup_extent_record;
struct btrfs_qgroup;
struct prelim_ref;
+TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR);
+TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS);
+TRACE_DEFINE_ENUM(FLUSH_DELALLOC);
+TRACE_DEFINE_ENUM(FLUSH_DELALLOC_WAIT);
+TRACE_DEFINE_ENUM(ALLOC_CHUNK);
+TRACE_DEFINE_ENUM(COMMIT_TRANS);
+
#define show_ref_type(type) \
__print_symbolic(type, \
{ BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
@@ -792,11 +799,10 @@ DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref,
DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action),
+ TP_ARGS(fs_info, head_ref, action),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -806,8 +812,8 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = ref->bytenr;
- __entry->num_bytes = ref->num_bytes;
+ __entry->bytenr = head_ref->bytenr;
+ __entry->num_bytes = head_ref->num_bytes;
__entry->action = action;
__entry->is_data = head_ref->is_data;
),
@@ -822,21 +828,19 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action)
+ TP_ARGS(fs_info, head_ref, action)
);
DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action)
+ TP_ARGS(fs_info, head_ref, action)
);
#define show_chunk_type(type) \
@@ -1692,6 +1696,27 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
TP_ARGS(fs_info, oldref, newref, tree_size)
);
+TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
+ TP_PROTO(struct btrfs_root *root, u64 ino, int mod),
+
+ TP_ARGS(root, ino, mod),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_objectid )
+ __field( u64, ino )
+ __field( int, mod )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->root_objectid = root->objectid;
+ __entry->ino = ino;
+ __entry->mod = mod;
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d",
+ show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->ino, __entry->mod)
+);
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/irq_matrix.h b/include/trace/events/irq_matrix.h
new file mode 100644
index 000000000000..267d4cbbf360
--- /dev/null
+++ b/include/trace/events/irq_matrix.h
@@ -0,0 +1,201 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq_matrix
+
+#if !defined(_TRACE_IRQ_MATRIX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IRQ_MATRIX_H
+
+#include <linux/tracepoint.h>
+
+struct irq_matrix;
+struct cpumap;
+
+DECLARE_EVENT_CLASS(irq_matrix_global,
+
+ TP_PROTO(struct irq_matrix *matrix),
+
+ TP_ARGS(matrix),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, online_maps )
+ __field( unsigned int, global_available )
+ __field( unsigned int, global_reserved )
+ __field( unsigned int, total_allocated )
+ ),
+
+ TP_fast_assign(
+ __entry->online_maps = matrix->online_maps;
+ __entry->global_available = matrix->global_available;
+ __entry->global_reserved = matrix->global_reserved;
+ __entry->total_allocated = matrix->total_allocated;
+ ),
+
+ TP_printk("online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
+ __entry->online_maps, __entry->global_available,
+ __entry->global_reserved, __entry->total_allocated)
+);
+
+DECLARE_EVENT_CLASS(irq_matrix_global_update,
+
+ TP_PROTO(int bit, struct irq_matrix *matrix),
+
+ TP_ARGS(bit, matrix),
+
+ TP_STRUCT__entry(
+ __field( int, bit )
+ __field( unsigned int, online_maps )
+ __field( unsigned int, global_available )
+ __field( unsigned int, global_reserved )
+ __field( unsigned int, total_allocated )
+ ),
+
+ TP_fast_assign(
+ __entry->bit = bit;
+ __entry->online_maps = matrix->online_maps;
+ __entry->global_available = matrix->global_available;
+ __entry->global_reserved = matrix->global_reserved;
+ __entry->total_allocated = matrix->total_allocated;
+ ),
+
+ TP_printk("bit=%d online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u",
+ __entry->bit, __entry->online_maps,
+ __entry->global_available, __entry->global_reserved,
+ __entry->total_allocated)
+);
+
+DECLARE_EVENT_CLASS(irq_matrix_cpu,
+
+ TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix,
+ struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap),
+
+ TP_STRUCT__entry(
+ __field( int, bit )
+ __field( unsigned int, cpu )
+ __field( bool, online )
+ __field( unsigned int, available )
+ __field( unsigned int, allocated )
+ __field( unsigned int, managed )
+ __field( unsigned int, online_maps )
+ __field( unsigned int, global_available )
+ __field( unsigned int, global_reserved )
+ __field( unsigned int, total_allocated )
+ ),
+
+ TP_fast_assign(
+ __entry->bit = bit;
+ __entry->cpu = cpu;
+ __entry->online = cmap->online;
+ __entry->available = cmap->available;
+ __entry->allocated = cmap->allocated;
+ __entry->managed = cmap->managed;
+ __entry->online_maps = matrix->online_maps;
+ __entry->global_available = matrix->global_available;
+ __entry->global_reserved = matrix->global_reserved;
+ __entry->total_allocated = matrix->total_allocated;
+ ),
+
+ TP_printk("bit=%d cpu=%u online=%d avl=%u alloc=%u managed=%u online_maps=%u global_avl=%u, global_rsvd=%u, total_alloc=%u",
+ __entry->bit, __entry->cpu, __entry->online,
+ __entry->available, __entry->allocated,
+ __entry->managed, __entry->online_maps,
+ __entry->global_available, __entry->global_reserved,
+ __entry->total_allocated)
+);
+
+DEFINE_EVENT(irq_matrix_global, irq_matrix_online,
+
+ TP_PROTO(struct irq_matrix *matrix),
+
+ TP_ARGS(matrix)
+);
+
+DEFINE_EVENT(irq_matrix_global, irq_matrix_offline,
+
+ TP_PROTO(struct irq_matrix *matrix),
+
+ TP_ARGS(matrix)
+);
+
+DEFINE_EVENT(irq_matrix_global, irq_matrix_reserve,
+
+ TP_PROTO(struct irq_matrix *matrix),
+
+ TP_ARGS(matrix)
+);
+
+DEFINE_EVENT(irq_matrix_global, irq_matrix_remove_reserved,
+
+ TP_PROTO(struct irq_matrix *matrix),
+
+ TP_ARGS(matrix)
+);
+
+DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system,
+
+ TP_PROTO(int bit, struct irq_matrix *matrix),
+
+ TP_ARGS(bit, matrix)
+);
+
+DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_reserved,
+
+ TP_PROTO(int bit, unsigned int cpu,
+ struct irq_matrix *matrix, struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap)
+);
+
+DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed,
+
+ TP_PROTO(int bit, unsigned int cpu,
+ struct irq_matrix *matrix, struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap)
+);
+
+DEFINE_EVENT(irq_matrix_cpu, irq_matrix_remove_managed,
+
+ TP_PROTO(int bit, unsigned int cpu,
+ struct irq_matrix *matrix, struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap)
+);
+
+DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_managed,
+
+ TP_PROTO(int bit, unsigned int cpu,
+ struct irq_matrix *matrix, struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap)
+);
+
+DEFINE_EVENT(irq_matrix_cpu, irq_matrix_assign,
+
+ TP_PROTO(int bit, unsigned int cpu,
+ struct irq_matrix *matrix, struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap)
+);
+
+DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc,
+
+ TP_PROTO(int bit, unsigned int cpu,
+ struct irq_matrix *matrix, struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap)
+);
+
+DEFINE_EVENT(irq_matrix_cpu, irq_matrix_free,
+
+ TP_PROTO(int bit, unsigned int cpu,
+ struct irq_matrix *matrix, struct cpumap *cmap),
+
+ TP_ARGS(bit, cpu, matrix, cmap)
+);
+
+
+#endif /* _TRACE_IRQ_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 285feeadac39..eb57e3037deb 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -172,24 +172,21 @@ TRACE_EVENT(mm_page_free,
TRACE_EVENT(mm_page_free_batched,
- TP_PROTO(struct page *page, int cold),
+ TP_PROTO(struct page *page),
- TP_ARGS(page, cold),
+ TP_ARGS(page),
TP_STRUCT__entry(
__field( unsigned long, pfn )
- __field( int, cold )
),
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->cold = cold;
),
- TP_printk("page=%p pfn=%lu order=0 cold=%d",
+ TP_printk("page=%p pfn=%lu order=0",
pfn_to_page(__entry->pfn),
- __entry->pfn,
- __entry->cold)
+ __entry->pfn)
);
TRACE_EVENT(mm_page_alloc,
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 648cbf603736..dbe1bb058c09 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -32,7 +32,6 @@
{(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \
{(unsigned long)__GFP_IO, "__GFP_IO"}, \
{(unsigned long)__GFP_FS, "__GFP_FS"}, \
- {(unsigned long)__GFP_COLD, "__GFP_COLD"}, \
{(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \
{(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \
{(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \
@@ -46,7 +45,6 @@
{(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \
{(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \
{(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \
- {(unsigned long)__GFP_NOTRACK, "__GFP_NOTRACK"}, \
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index da10aa21bebc..306b31de5194 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -118,7 +118,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
if (preempt)
return TASK_STATE_MAX;
- return __get_task_state(p);
+ return task_state_index(p);
}
#endif /* CREATE_TRACE_POINTS */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 2e1fa7910306..32db72c7c055 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -287,7 +287,6 @@ DEFINE_EVENT(writeback_class, name, \
TP_PROTO(struct bdi_writeback *wb), \
TP_ARGS(wb))
-DEFINE_WRITEBACK_EVENT(writeback_nowork);
DEFINE_WRITEBACK_EVENT(writeback_wake_background);
TRACE_EVENT(writeback_bdi_register,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 7b8fa11c2285..919248fb4028 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -52,6 +52,8 @@ extern "C" {
#define DRM_AMDGPU_GEM_USERPTR 0x11
#define DRM_AMDGPU_WAIT_FENCES 0x12
#define DRM_AMDGPU_VM 0x13
+#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
+#define DRM_AMDGPU_SCHED 0x15
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -67,6 +69,8 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
+#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
+#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -87,6 +91,10 @@ extern "C" {
#define AMDGPU_GEM_CREATE_SHADOW (1 << 4)
/* Flag that allocating the BO should use linear VRAM */
#define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5)
+/* Flag that BO is always valid in this VM */
+#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
+/* Flag that BO sharing will be explicitly synchronized */
+#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -162,13 +170,22 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
+/* Context priority level */
+#define AMDGPU_CTX_PRIORITY_UNSET -2048
+#define AMDGPU_CTX_PRIORITY_VERY_LOW -1023
+#define AMDGPU_CTX_PRIORITY_LOW -512
+#define AMDGPU_CTX_PRIORITY_NORMAL 0
+/* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */
+#define AMDGPU_CTX_PRIORITY_HIGH 512
+#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
+
struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32 op;
/** For future use, no flags defined so far */
__u32 flags;
__u32 ctx_id;
- __u32 _pad;
+ __s32 priority;
};
union drm_amdgpu_ctx_out {
@@ -212,6 +229,21 @@ union drm_amdgpu_vm {
struct drm_amdgpu_vm_out out;
};
+/* sched ioctl */
+#define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1
+
+struct drm_amdgpu_sched_in {
+ /* AMDGPU_SCHED_OP_* */
+ __u32 op;
+ __u32 fd;
+ __s32 priority;
+ __u32 flags;
+};
+
+union drm_amdgpu_sched {
+ struct drm_amdgpu_sched_in in;
+};
+
/*
* This is not a reliable API and you should expect it to fail for any
* number of reasons and have fallback path that do not use userptr to
@@ -513,6 +545,21 @@ struct drm_amdgpu_cs_chunk_sem {
__u32 handle;
};
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
+#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
+
+union drm_amdgpu_fence_to_handle {
+ struct {
+ struct drm_amdgpu_fence fence;
+ __u32 what;
+ __u32 pad;
+ } in;
+ struct {
+ __u32 handle;
+ } out;
+};
+
struct drm_amdgpu_cs_chunk_data {
union {
struct drm_amdgpu_cs_chunk_ib ib_data;
@@ -611,6 +658,7 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_VDDGFX 0x7
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
+#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 97677cd6964d..6fdff5945c8a 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -737,6 +737,28 @@ struct drm_syncobj_array {
__u32 pad;
};
+/* Query current scanout sequence number */
+struct drm_crtc_get_sequence {
+ __u32 crtc_id; /* requested crtc_id */
+ __u32 active; /* return: crtc output is active */
+ __u64 sequence; /* return: most recent vblank sequence */
+ __s64 sequence_ns; /* return: most recent time of first pixel out */
+};
+
+/* Queue event to be delivered at specified sequence. Time stamp marks
+ * when the first pixel of the refresh cycle leaves the display engine
+ * for the display
+ */
+#define DRM_CRTC_SEQUENCE_RELATIVE 0x00000001 /* sequence is relative to current */
+#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS 0x00000002 /* Use next sequence if we've missed */
+
+struct drm_crtc_queue_sequence {
+ __u32 crtc_id;
+ __u32 flags;
+ __u64 sequence; /* on input, target sequence. on output, actual sequence */
+ __u64 user_data; /* user data passed to event */
+};
+
#if defined(__cplusplus)
}
#endif
@@ -819,6 +841,9 @@ extern "C" {
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+#define DRM_IOCTL_CRTC_GET_SEQUENCE DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
+#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
+
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
@@ -863,6 +888,11 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
+#define DRM_IOCTL_MODE_CREATE_LEASE DRM_IOWR(0xC6, struct drm_mode_create_lease)
+#define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees)
+#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
+#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@@ -893,6 +923,7 @@ struct drm_event {
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
+#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
struct drm_event base;
@@ -903,6 +934,16 @@ struct drm_event_vblank {
__u32 crtc_id; /* 0 on older kernels that do not support this */
};
+/* Event delivered at sequence. Time stamp marks when the first pixel
+ * of the refresh cycle leaves the display engine for the display
+ */
+struct drm_event_crtc_sequence {
+ struct drm_event base;
+ __u64 user_data;
+ __s64 time_ns;
+ __u64 sequence;
+};
+
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 54fc38c3c3f1..5597a87154e5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -749,9 +749,9 @@ struct drm_format_modifier {
* If the number formats grew to 128, and formats 98-102 are
* supported with the modifier:
*
- * 0x0000003c00000000 0000000000000000
+ * 0x0000007c00000000 0000000000000000
* ^
- * |__offset = 64, formats = 0x3c00000000
+ * |__offset = 64, formats = 0x7c00000000
*
*/
__u64 formats;
@@ -782,6 +782,72 @@ struct drm_mode_destroy_blob {
__u32 blob_id;
};
+/**
+ * Lease mode resources, creating another drm_master.
+ */
+struct drm_mode_create_lease {
+ /** Pointer to array of object ids (__u32) */
+ __u64 object_ids;
+ /** Number of object ids */
+ __u32 object_count;
+ /** flags for new FD (O_CLOEXEC, etc) */
+ __u32 flags;
+
+ /** Return: unique identifier for lessee. */
+ __u32 lessee_id;
+ /** Return: file descriptor to new drm_master file */
+ __u32 fd;
+};
+
+/**
+ * List lesses from a drm_master
+ */
+struct drm_mode_list_lessees {
+ /** Number of lessees.
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_lessees;
+ __u32 pad;
+
+ /** Pointer to lessees.
+ * pointer to __u64 array of lessee ids
+ */
+ __u64 lessees_ptr;
+};
+
+/**
+ * Get leased objects
+ */
+struct drm_mode_get_lease {
+ /** Number of leased objects.
+ * On input, provides length of the array.
+ * On output, provides total number. No
+ * more than the input number will be written
+ * back, so two calls can be used to get
+ * the size and then the data.
+ */
+ __u32 count_objects;
+ __u32 pad;
+
+ /** Pointer to objects.
+ * pointer to __u32 array of object ids
+ */
+ __u64 objects_ptr;
+};
+
+/**
+ * Revoke lease
+ */
+struct drm_mode_revoke_lease {
+ /** Unique ID of lessee
+ */
+ __u32 lessee_id;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index d4463f3fa427..e9b997a0ef27 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -151,6 +151,19 @@ struct drm_etnaviv_gem_submit_bo {
__u64 presumed; /* in/out, presumed buffer address */
};
+/* performance monitor request (pmr) */
+#define ETNA_PM_PROCESS_PRE 0x0001
+#define ETNA_PM_PROCESS_POST 0x0002
+struct drm_etnaviv_gem_submit_pmr {
+ __u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */
+ __u8 domain; /* in, pm domain */
+ __u8 pad;
+ __u16 signal; /* in, pm signal */
+ __u32 sequence; /* in, sequence number */
+ __u32 read_offset; /* in, offset from read_bo */
+ __u32 read_idx; /* in, index of read_bo buffer */
+};
+
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -176,6 +189,9 @@ struct drm_etnaviv_gem_submit {
__u64 stream; /* in, ptr to cmdstream */
__u32 flags; /* in, mask of ETNA_SUBMIT_x */
__s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
+ __u64 pmrs; /* in, ptr to array of submit_pmr's */
+ __u32 nr_pmrs; /* in, number of submit_pmr's */
+ __u32 pad;
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -211,6 +227,27 @@ struct drm_etnaviv_gem_wait {
struct drm_etnaviv_timespec timeout; /* in */
};
+/*
+ * Performance Monitor (PM):
+ */
+
+struct drm_etnaviv_pm_domain {
+ __u32 pipe; /* in */
+ __u8 iter; /* in/out, select pm domain at index iter */
+ __u8 id; /* out, id of domain */
+ __u16 nr_signals; /* out, how many signals does this domain provide */
+ char name[64]; /* out, name of domain */
+};
+
+struct drm_etnaviv_pm_signal {
+ __u32 pipe; /* in */
+ __u8 domain; /* in, pm domain index */
+ __u8 pad;
+ __u16 iter; /* in/out, select pm source at index iter */
+ __u16 id; /* out, id of signal */
+ char name[64]; /* out, name of domain */
+};
+
#define DRM_ETNAVIV_GET_PARAM 0x00
/* placeholder:
#define DRM_ETNAVIV_SET_PARAM 0x01
@@ -223,7 +260,9 @@ struct drm_etnaviv_gem_wait {
#define DRM_ETNAVIV_WAIT_FENCE 0x07
#define DRM_ETNAVIV_GEM_USERPTR 0x08
#define DRM_ETNAVIV_GEM_WAIT 0x09
-#define DRM_ETNAVIV_NUM_IOCTLS 0x0a
+#define DRM_ETNAVIV_PM_QUERY_DOM 0x0a
+#define DRM_ETNAVIV_PM_QUERY_SIG 0x0b
+#define DRM_ETNAVIV_NUM_IOCTLS 0x0c
#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
@@ -234,6 +273,8 @@ struct drm_etnaviv_gem_wait {
#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
+#define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
+#define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 9816590d3ad2..ac3c6503ca27 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -397,10 +397,20 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
-/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
* priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
*/
#define I915_PARAM_HAS_SCHEDULER 41
+#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
+#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
+#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
+
#define I915_PARAM_HUC_STATUS 42
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -1309,14 +1319,16 @@ struct drm_i915_reg_read {
* be specified
*/
__u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
- * single instruction 8byte read, in order to workaround that use
- * offset (0x2538 | 1) instead.
+ * single instruction 8byte read, in order to workaround that pass
+ * flag I915_REG_READ_8B_WA in offset field.
*
*/
@@ -1359,6 +1371,10 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
#define I915_CONTEXT_PARAM_BANNABLE 0x5
+#define I915_CONTEXT_PARAM_PRIORITY 0x6
+#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
+#define I915_CONTEXT_DEFAULT_PRIORITY 0
+#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
__u64 value;
};
@@ -1510,9 +1526,14 @@ struct drm_i915_perf_oa_config {
__u32 n_boolean_regs;
__u32 n_flex_regs;
- __u64 __user mux_regs_ptr;
- __u64 __user boolean_regs_ptr;
- __u64 __user flex_regs_ptr;
+ /*
+ * These fields are pointers to tuples of u32 values (register
+ * address, value). For example the expected length of the buffer
+ * pointed by mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ */
+ __u64 mux_regs_ptr;
+ __u64 boolean_regs_ptr;
+ __u64 flex_regs_ptr;
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index ad4eb2863e70..bbbaffad772d 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -73,6 +73,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05
#define MSM_PARAM_GMEM_BASE 0x06
+#define MSM_PARAM_NR_RINGS 0x07
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
@@ -218,6 +219,7 @@ struct drm_msm_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
+ __u32 queueid; /* in, submitqueue id */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
@@ -231,6 +233,7 @@ struct drm_msm_wait_fence {
__u32 fence; /* in */
__u32 pad;
struct drm_msm_timespec timeout; /* in */
+ __u32 queueid; /* in, submitqueue id */
};
/* madvise provides a way to tell the kernel in case a buffers contents
@@ -254,6 +257,20 @@ struct drm_msm_gem_madvise {
__u32 retained; /* out, whether backing store still exists */
};
+/*
+ * Draw queues allow the user to set specific submission parameter. Command
+ * submissions specify a specific submitqueue to use. ID 0 is reserved for
+ * backwards compatibility as a "default" submitqueue
+ */
+
+#define MSM_SUBMITQUEUE_FLAGS (0)
+
+struct drm_msm_submitqueue {
+ __u32 flags; /* in, MSM_SUBMITQUEUE_x */
+ __u32 prio; /* in, Priority level */
+ __u32 id; /* out, identifier */
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
@@ -265,6 +282,11 @@ struct drm_msm_gem_madvise {
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
#define DRM_MSM_GEM_MADVISE 0x08
+/* placeholder:
+#define DRM_MSM_GEM_SVM_NEW 0x09
+ */
+#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
+#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -274,6 +296,8 @@ struct drm_msm_gem_madvise {
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
index afae87004963..52263b575bdc 100644
--- a/include/uapi/drm/vc4_drm.h
+++ b/include/uapi/drm/vc4_drm.h
@@ -41,6 +41,7 @@ extern "C" {
#define DRM_VC4_SET_TILING 0x08
#define DRM_VC4_GET_TILING 0x09
#define DRM_VC4_LABEL_BO 0x0a
+#define DRM_VC4_GEM_MADVISE 0x0b
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -53,6 +54,7 @@ extern "C" {
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
+#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
@@ -305,6 +307,7 @@ struct drm_vc4_get_hang_state {
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
+#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
struct drm_vc4_get_param {
__u32 param;
@@ -333,6 +336,22 @@ struct drm_vc4_label_bo {
__u64 name;
};
+/*
+ * States prefixed with '__' are internal states and cannot be passed to the
+ * DRM_IOCTL_VC4_GEM_MADVISE ioctl.
+ */
+#define VC4_MADV_WILLNEED 0
+#define VC4_MADV_DONTNEED 1
+#define __VC4_MADV_PURGED 2
+#define __VC4_MADV_NOTSUPP 3
+
+struct drm_vc4_gem_madvise {
+ __u32 handle;
+ __u32 madv;
+ __u32 retained;
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 7668582db6ba..4e61a9e05132 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -113,6 +113,7 @@
#define AUDIT_FEATURE_CHANGE 1328 /* audit log listing feature changes */
#define AUDIT_REPLACE 1329 /* Replace auditd if this packet unanswerd */
#define AUDIT_KERN_MODULE 1330 /* Kernel Module events */
+#define AUDIT_FANOTIFY 1331 /* Fanotify access decision */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -156,8 +157,9 @@
#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */
#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */
#define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */
+#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
-#define AUDIT_NR_FILTERS 6
+#define AUDIT_NR_FILTERS 7
#define AUDIT_FILTER_PREPEND 0x10 /* Prepend to front of list */
@@ -257,6 +259,7 @@
#define AUDIT_OBJ_LEV_HIGH 23
#define AUDIT_LOGINUID_SET 24
#define AUDIT_SESSIONID 25 /* Session ID */
+#define AUDIT_FSTYPE 26 /* FileSystem Type */
/* These are ONLY useful when checking
* at syscall exit time (AUDIT_AT_EXIT). */
@@ -336,13 +339,15 @@ enum {
#define AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND 0x00000008
#define AUDIT_FEATURE_BITMAP_SESSIONID_FILTER 0x00000010
#define AUDIT_FEATURE_BITMAP_LOST_RESET 0x00000020
+#define AUDIT_FEATURE_BITMAP_FILTER_FS 0x00000040
#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH | \
AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND | \
AUDIT_FEATURE_BITMAP_SESSIONID_FILTER | \
- AUDIT_FEATURE_BITMAP_LOST_RESET)
+ AUDIT_FEATURE_BITMAP_LOST_RESET | \
+ AUDIT_FEATURE_BITMAP_FILTER_FS)
/* deprecated: AUDIT_VERSION_* */
#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 6cdfd12cd14c..ce615b75e855 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -609,10 +609,14 @@ struct btrfs_ioctl_ino_path_args {
struct btrfs_ioctl_logical_ino_args {
__u64 logical; /* in */
__u64 size; /* in */
- __u64 reserved[4];
+ __u64 reserved[3]; /* must be 0 for now */
+ __u64 flags; /* in, v2 only */
/* struct btrfs_data_container *inodes; out */
__u64 inodes;
};
+/* Return every ref to the extent, not just those containing logical block.
+ * Requires logical == extent bytenr. */
+#define BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET (1ULL << 0)
enum btrfs_dev_stat_values {
/* disk I/O failure stats */
@@ -836,5 +840,7 @@ enum btrfs_err_code {
struct btrfs_ioctl_feature_flags[3])
#define BTRFS_IOC_RM_DEV_V2 _IOW(BTRFS_IOCTL_MAGIC, 58, \
struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_LOGICAL_INO_V2 _IOWR(BTRFS_IOCTL_MAGIC, 59, \
+ struct btrfs_ioctl_logical_ino_args)
#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 8f659bb7badc..6d6e5da51527 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -733,6 +733,7 @@ struct btrfs_balance_item {
#define BTRFS_FILE_EXTENT_INLINE 0
#define BTRFS_FILE_EXTENT_REG 1
#define BTRFS_FILE_EXTENT_PREALLOC 2
+#define BTRFS_FILE_EXTENT_TYPES 2
struct btrfs_file_extent_item {
/*
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index c3114c989e91..b51fbe1941a7 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -411,6 +411,8 @@ struct cec_log_addrs {
#define CEC_EVENT_LOST_MSGS 2
#define CEC_EVENT_PIN_CEC_LOW 3
#define CEC_EVENT_PIN_CEC_HIGH 4
+#define CEC_EVENT_PIN_HPD_LOW 5
+#define CEC_EVENT_PIN_HPD_HIGH 6
#define CEC_EVENT_FL_INITIAL_STATE (1 << 0)
#define CEC_EVENT_FL_DROPPED_EVENTS (1 << 1)
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index f46de499b51b..b297b65845d6 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -831,7 +831,7 @@ struct dtv_fe_stats {
* @cmd: Digital TV command.
* @reserved: Not used.
* @u: Union with the values for the command.
- * @result: Result of the command set (currently unused).
+ * @result: Unused
*
* The @u union may have either one of the values below:
*
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index c58627c0d6fb..bb6836986200 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -412,11 +412,13 @@ typedef struct elf64_shdr {
#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
+#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
+#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
#define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */
#define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */
#define NT_METAG_TLS 0x502 /* Metag TLS pointer */
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index f79c4e1a84b9..74247917de04 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -36,6 +36,7 @@
#define FAN_UNLIMITED_QUEUE 0x00000010
#define FAN_UNLIMITED_MARKS 0x00000020
+#define FAN_ENABLE_AUDIT 0x00000040
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
FAN_ALL_CLASS_BITS | FAN_UNLIMITED_QUEUE |\
@@ -100,6 +101,8 @@ struct fanotify_response {
/* Legit userspace responses to a _PERM event */
#define FAN_ALLOW 0x01
#define FAN_DENY 0x02
+#define FAN_AUDIT 0x10 /* Bit mask to create audit record for result */
+
/* No fd set in event */
#define FAN_NOFD -1
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index f4058bd4c373..061fa62958a2 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -407,6 +407,7 @@
#define BTN_TOOL_MOUSE 0x146
#define BTN_TOOL_LENS 0x147
#define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */
+#define BTN_STYLUS3 0x149
#define BTN_TOUCH 0x14a
#define BTN_STYLUS 0x14b
#define BTN_STYLUS2 0x14c
diff --git a/include/uapi/linux/iso_fs.h b/include/uapi/linux/iso_fs.h
index 78b4ebcf8ab0..a2555176f6d1 100644
--- a/include/uapi/linux/iso_fs.h
+++ b/include/uapi/linux/iso_fs.h
@@ -13,10 +13,10 @@
#define ISODCL(from, to) (to - from + 1)
struct iso_volume_descriptor {
- char type[ISODCL(1,1)]; /* 711 */
+ __u8 type[ISODCL(1,1)]; /* 711 */
char id[ISODCL(2,6)];
- char version[ISODCL(7,7)];
- char data[ISODCL(8,2048)];
+ __u8 version[ISODCL(7,7)];
+ __u8 data[ISODCL(8,2048)];
};
/* volume descriptor types */
@@ -27,24 +27,24 @@ struct iso_volume_descriptor {
#define ISO_STANDARD_ID "CD001"
struct iso_primary_descriptor {
- char type [ISODCL ( 1, 1)]; /* 711 */
+ __u8 type [ISODCL ( 1, 1)]; /* 711 */
char id [ISODCL ( 2, 6)];
- char version [ISODCL ( 7, 7)]; /* 711 */
- char unused1 [ISODCL ( 8, 8)];
+ __u8 version [ISODCL ( 7, 7)]; /* 711 */
+ __u8 unused1 [ISODCL ( 8, 8)];
char system_id [ISODCL ( 9, 40)]; /* achars */
char volume_id [ISODCL ( 41, 72)]; /* dchars */
- char unused2 [ISODCL ( 73, 80)];
- char volume_space_size [ISODCL ( 81, 88)]; /* 733 */
- char unused3 [ISODCL ( 89, 120)];
- char volume_set_size [ISODCL (121, 124)]; /* 723 */
- char volume_sequence_number [ISODCL (125, 128)]; /* 723 */
- char logical_block_size [ISODCL (129, 132)]; /* 723 */
- char path_table_size [ISODCL (133, 140)]; /* 733 */
- char type_l_path_table [ISODCL (141, 144)]; /* 731 */
- char opt_type_l_path_table [ISODCL (145, 148)]; /* 731 */
- char type_m_path_table [ISODCL (149, 152)]; /* 732 */
- char opt_type_m_path_table [ISODCL (153, 156)]; /* 732 */
- char root_directory_record [ISODCL (157, 190)]; /* 9.1 */
+ __u8 unused2 [ISODCL ( 73, 80)];
+ __u8 volume_space_size [ISODCL ( 81, 88)]; /* 733 */
+ __u8 unused3 [ISODCL ( 89, 120)];
+ __u8 volume_set_size [ISODCL (121, 124)]; /* 723 */
+ __u8 volume_sequence_number [ISODCL (125, 128)]; /* 723 */
+ __u8 logical_block_size [ISODCL (129, 132)]; /* 723 */
+ __u8 path_table_size [ISODCL (133, 140)]; /* 733 */
+ __u8 type_l_path_table [ISODCL (141, 144)]; /* 731 */
+ __u8 opt_type_l_path_table [ISODCL (145, 148)]; /* 731 */
+ __u8 type_m_path_table [ISODCL (149, 152)]; /* 732 */
+ __u8 opt_type_m_path_table [ISODCL (153, 156)]; /* 732 */
+ __u8 root_directory_record [ISODCL (157, 190)]; /* 9.1 */
char volume_set_id [ISODCL (191, 318)]; /* dchars */
char publisher_id [ISODCL (319, 446)]; /* achars */
char preparer_id [ISODCL (447, 574)]; /* achars */
@@ -52,36 +52,36 @@ struct iso_primary_descriptor {
char copyright_file_id [ISODCL (703, 739)]; /* 7.5 dchars */
char abstract_file_id [ISODCL (740, 776)]; /* 7.5 dchars */
char bibliographic_file_id [ISODCL (777, 813)]; /* 7.5 dchars */
- char creation_date [ISODCL (814, 830)]; /* 8.4.26.1 */
- char modification_date [ISODCL (831, 847)]; /* 8.4.26.1 */
- char expiration_date [ISODCL (848, 864)]; /* 8.4.26.1 */
- char effective_date [ISODCL (865, 881)]; /* 8.4.26.1 */
- char file_structure_version [ISODCL (882, 882)]; /* 711 */
- char unused4 [ISODCL (883, 883)];
- char application_data [ISODCL (884, 1395)];
- char unused5 [ISODCL (1396, 2048)];
+ __u8 creation_date [ISODCL (814, 830)]; /* 8.4.26.1 */
+ __u8 modification_date [ISODCL (831, 847)]; /* 8.4.26.1 */
+ __u8 expiration_date [ISODCL (848, 864)]; /* 8.4.26.1 */
+ __u8 effective_date [ISODCL (865, 881)]; /* 8.4.26.1 */
+ __u8 file_structure_version [ISODCL (882, 882)]; /* 711 */
+ __u8 unused4 [ISODCL (883, 883)];
+ __u8 application_data [ISODCL (884, 1395)];
+ __u8 unused5 [ISODCL (1396, 2048)];
};
/* Almost the same as the primary descriptor but two fields are specified */
struct iso_supplementary_descriptor {
- char type [ISODCL ( 1, 1)]; /* 711 */
+ __u8 type [ISODCL ( 1, 1)]; /* 711 */
char id [ISODCL ( 2, 6)];
- char version [ISODCL ( 7, 7)]; /* 711 */
- char flags [ISODCL ( 8, 8)]; /* 853 */
+ __u8 version [ISODCL ( 7, 7)]; /* 711 */
+ __u8 flags [ISODCL ( 8, 8)]; /* 853 */
char system_id [ISODCL ( 9, 40)]; /* achars */
char volume_id [ISODCL ( 41, 72)]; /* dchars */
- char unused2 [ISODCL ( 73, 80)];
- char volume_space_size [ISODCL ( 81, 88)]; /* 733 */
- char escape [ISODCL ( 89, 120)]; /* 856 */
- char volume_set_size [ISODCL (121, 124)]; /* 723 */
- char volume_sequence_number [ISODCL (125, 128)]; /* 723 */
- char logical_block_size [ISODCL (129, 132)]; /* 723 */
- char path_table_size [ISODCL (133, 140)]; /* 733 */
- char type_l_path_table [ISODCL (141, 144)]; /* 731 */
- char opt_type_l_path_table [ISODCL (145, 148)]; /* 731 */
- char type_m_path_table [ISODCL (149, 152)]; /* 732 */
- char opt_type_m_path_table [ISODCL (153, 156)]; /* 732 */
- char root_directory_record [ISODCL (157, 190)]; /* 9.1 */
+ __u8 unused2 [ISODCL ( 73, 80)];
+ __u8 volume_space_size [ISODCL ( 81, 88)]; /* 733 */
+ __u8 escape [ISODCL ( 89, 120)]; /* 856 */
+ __u8 volume_set_size [ISODCL (121, 124)]; /* 723 */
+ __u8 volume_sequence_number [ISODCL (125, 128)]; /* 723 */
+ __u8 logical_block_size [ISODCL (129, 132)]; /* 723 */
+ __u8 path_table_size [ISODCL (133, 140)]; /* 733 */
+ __u8 type_l_path_table [ISODCL (141, 144)]; /* 731 */
+ __u8 opt_type_l_path_table [ISODCL (145, 148)]; /* 731 */
+ __u8 type_m_path_table [ISODCL (149, 152)]; /* 732 */
+ __u8 opt_type_m_path_table [ISODCL (153, 156)]; /* 732 */
+ __u8 root_directory_record [ISODCL (157, 190)]; /* 9.1 */
char volume_set_id [ISODCL (191, 318)]; /* dchars */
char publisher_id [ISODCL (319, 446)]; /* achars */
char preparer_id [ISODCL (447, 574)]; /* achars */
@@ -89,54 +89,54 @@ struct iso_supplementary_descriptor {
char copyright_file_id [ISODCL (703, 739)]; /* 7.5 dchars */
char abstract_file_id [ISODCL (740, 776)]; /* 7.5 dchars */
char bibliographic_file_id [ISODCL (777, 813)]; /* 7.5 dchars */
- char creation_date [ISODCL (814, 830)]; /* 8.4.26.1 */
- char modification_date [ISODCL (831, 847)]; /* 8.4.26.1 */
- char expiration_date [ISODCL (848, 864)]; /* 8.4.26.1 */
- char effective_date [ISODCL (865, 881)]; /* 8.4.26.1 */
- char file_structure_version [ISODCL (882, 882)]; /* 711 */
- char unused4 [ISODCL (883, 883)];
- char application_data [ISODCL (884, 1395)];
- char unused5 [ISODCL (1396, 2048)];
+ __u8 creation_date [ISODCL (814, 830)]; /* 8.4.26.1 */
+ __u8 modification_date [ISODCL (831, 847)]; /* 8.4.26.1 */
+ __u8 expiration_date [ISODCL (848, 864)]; /* 8.4.26.1 */
+ __u8 effective_date [ISODCL (865, 881)]; /* 8.4.26.1 */
+ __u8 file_structure_version [ISODCL (882, 882)]; /* 711 */
+ __u8 unused4 [ISODCL (883, 883)];
+ __u8 application_data [ISODCL (884, 1395)];
+ __u8 unused5 [ISODCL (1396, 2048)];
};
#define HS_STANDARD_ID "CDROM"
struct hs_volume_descriptor {
- char foo [ISODCL ( 1, 8)]; /* 733 */
- char type [ISODCL ( 9, 9)]; /* 711 */
+ __u8 foo [ISODCL ( 1, 8)]; /* 733 */
+ __u8 type [ISODCL ( 9, 9)]; /* 711 */
char id [ISODCL ( 10, 14)];
- char version [ISODCL ( 15, 15)]; /* 711 */
- char data[ISODCL(16,2048)];
+ __u8 version [ISODCL ( 15, 15)]; /* 711 */
+ __u8 data[ISODCL(16,2048)];
};
struct hs_primary_descriptor {
- char foo [ISODCL ( 1, 8)]; /* 733 */
- char type [ISODCL ( 9, 9)]; /* 711 */
- char id [ISODCL ( 10, 14)];
- char version [ISODCL ( 15, 15)]; /* 711 */
- char unused1 [ISODCL ( 16, 16)]; /* 711 */
+ __u8 foo [ISODCL ( 1, 8)]; /* 733 */
+ __u8 type [ISODCL ( 9, 9)]; /* 711 */
+ __u8 id [ISODCL ( 10, 14)];
+ __u8 version [ISODCL ( 15, 15)]; /* 711 */
+ __u8 unused1 [ISODCL ( 16, 16)]; /* 711 */
char system_id [ISODCL ( 17, 48)]; /* achars */
char volume_id [ISODCL ( 49, 80)]; /* dchars */
- char unused2 [ISODCL ( 81, 88)]; /* 733 */
- char volume_space_size [ISODCL ( 89, 96)]; /* 733 */
- char unused3 [ISODCL ( 97, 128)]; /* 733 */
- char volume_set_size [ISODCL (129, 132)]; /* 723 */
- char volume_sequence_number [ISODCL (133, 136)]; /* 723 */
- char logical_block_size [ISODCL (137, 140)]; /* 723 */
- char path_table_size [ISODCL (141, 148)]; /* 733 */
- char type_l_path_table [ISODCL (149, 152)]; /* 731 */
- char unused4 [ISODCL (153, 180)]; /* 733 */
- char root_directory_record [ISODCL (181, 214)]; /* 9.1 */
+ __u8 unused2 [ISODCL ( 81, 88)]; /* 733 */
+ __u8 volume_space_size [ISODCL ( 89, 96)]; /* 733 */
+ __u8 unused3 [ISODCL ( 97, 128)]; /* 733 */
+ __u8 volume_set_size [ISODCL (129, 132)]; /* 723 */
+ __u8 volume_sequence_number [ISODCL (133, 136)]; /* 723 */
+ __u8 logical_block_size [ISODCL (137, 140)]; /* 723 */
+ __u8 path_table_size [ISODCL (141, 148)]; /* 733 */
+ __u8 type_l_path_table [ISODCL (149, 152)]; /* 731 */
+ __u8 unused4 [ISODCL (153, 180)]; /* 733 */
+ __u8 root_directory_record [ISODCL (181, 214)]; /* 9.1 */
};
/* We use this to help us look up the parent inode numbers. */
struct iso_path_table{
- unsigned char name_len[2]; /* 721 */
- char extent[4]; /* 731 */
- char parent[2]; /* 721 */
+ __u8 name_len[2]; /* 721 */
+ __u8 extent[4]; /* 731 */
+ __u8 parent[2]; /* 721 */
char name[0];
} __attribute__((packed));
@@ -144,16 +144,16 @@ struct iso_path_table{
there is an extra reserved byte after the flags */
struct iso_directory_record {
- char length [ISODCL (1, 1)]; /* 711 */
- char ext_attr_length [ISODCL (2, 2)]; /* 711 */
- char extent [ISODCL (3, 10)]; /* 733 */
- char size [ISODCL (11, 18)]; /* 733 */
- char date [ISODCL (19, 25)]; /* 7 by 711 */
- char flags [ISODCL (26, 26)];
- char file_unit_size [ISODCL (27, 27)]; /* 711 */
- char interleave [ISODCL (28, 28)]; /* 711 */
- char volume_sequence_number [ISODCL (29, 32)]; /* 723 */
- unsigned char name_len [ISODCL (33, 33)]; /* 711 */
+ __u8 length [ISODCL (1, 1)]; /* 711 */
+ __u8 ext_attr_length [ISODCL (2, 2)]; /* 711 */
+ __u8 extent [ISODCL (3, 10)]; /* 733 */
+ __u8 size [ISODCL (11, 18)]; /* 733 */
+ __u8 date [ISODCL (19, 25)]; /* 7 by 711 */
+ __u8 flags [ISODCL (26, 26)];
+ __u8 file_unit_size [ISODCL (27, 27)]; /* 711 */
+ __u8 interleave [ISODCL (28, 28)]; /* 711 */
+ __u8 volume_sequence_number [ISODCL (29, 32)]; /* 723 */
+ __u8 name_len [ISODCL (33, 33)]; /* 711 */
char name [0];
} __attribute__((packed));
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 26283fefdf5f..731d0df722e3 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -169,7 +169,7 @@ struct kfd_ioctl_dbg_wave_control_args {
#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
#define KFD_IOC_WAIT_RESULT_FAIL 2
-#define KFD_SIGNAL_EVENT_LIMIT 256
+#define KFD_SIGNAL_EVENT_LIMIT 4096
struct kfd_ioctl_create_event_args {
__u64 event_page_offset; /* from KFD */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 87c2c840b27d..70c2b2ade048 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -747,6 +747,7 @@
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
+#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
/* Virtual Channel */
@@ -940,9 +941,13 @@
#define PCI_SATA_SIZEOF_LONG 16
/* Resizable BARs */
+#define PCI_REBAR_CAP 4 /* capability register */
+#define PCI_REBAR_CAP_SIZES 0x00FFFFF0 /* supported BAR sizes */
#define PCI_REBAR_CTRL 8 /* control register */
-#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
-#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
+#define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */
+#define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */
+#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */
+#define PCI_REBAR_CTRL_BAR_SIZE 0x00001F00 /* BAR size */
/* Dynamic Power Allocation */
#define PCI_DPA_CAP 4 /* capability register */
@@ -961,6 +966,7 @@
/* Downstream Port Containment */
#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_IRQ 0x1f /* DPC Interrupt Message Number */
#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */
#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */
@@ -996,19 +1002,25 @@
#define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */
#define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */
-/* L1 PM Substates */
-#define PCI_L1SS_CAP 4 /* capability register */
-#define PCI_L1SS_CAP_PCIPM_L1_2 1 /* PCI PM L1.2 Support */
-#define PCI_L1SS_CAP_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
-#define PCI_L1SS_CAP_ASPM_L1_2 4 /* ASPM L1.2 Support */
-#define PCI_L1SS_CAP_ASPM_L1_1 8 /* ASPM L1.1 Support */
-#define PCI_L1SS_CAP_L1_PM_SS 16 /* L1 PM Substates Support */
-#define PCI_L1SS_CTL1 8 /* Control Register 1 */
-#define PCI_L1SS_CTL1_PCIPM_L1_2 1 /* PCI PM L1.2 Enable */
-#define PCI_L1SS_CTL1_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
-#define PCI_L1SS_CTL1_ASPM_L1_2 4 /* ASPM L1.2 Support */
-#define PCI_L1SS_CTL1_ASPM_L1_1 8 /* ASPM L1.1 Support */
-#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000F
-#define PCI_L1SS_CTL2 0xC /* Control Register 2 */
+/* ASPM L1 PM Substates */
+#define PCI_L1SS_CAP 0x04 /* Capabilities Register */
+#define PCI_L1SS_CAP_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Supported */
+#define PCI_L1SS_CAP_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Supported */
+#define PCI_L1SS_CAP_ASPM_L1_2 0x00000004 /* ASPM L1.2 Supported */
+#define PCI_L1SS_CAP_ASPM_L1_1 0x00000008 /* ASPM L1.1 Supported */
+#define PCI_L1SS_CAP_L1_PM_SS 0x00000010 /* L1 PM Substates Supported */
+#define PCI_L1SS_CAP_CM_RESTORE_TIME 0x0000ff00 /* Port Common_Mode_Restore_Time */
+#define PCI_L1SS_CAP_P_PWR_ON_SCALE 0x00030000 /* Port T_POWER_ON scale */
+#define PCI_L1SS_CAP_P_PWR_ON_VALUE 0x00f80000 /* Port T_POWER_ON value */
+#define PCI_L1SS_CTL1 0x08 /* Control 1 Register */
+#define PCI_L1SS_CTL1_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Enable */
+#define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */
+#define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */
+#define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */
+#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f
+#define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */
+#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
+#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
+#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 362493a2f950..b9a4953018ed 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -942,6 +942,7 @@ enum perf_callchain_context {
#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index b640071421f7..af5f8c2df87a 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -198,4 +198,13 @@ struct prctl_mm_map {
# define PR_CAP_AMBIENT_LOWER 3
# define PR_CAP_AMBIENT_CLEAR_ALL 4
+/* arm64 Scalable Vector Extension controls */
+/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
+#define PR_SVE_SET_VL 50 /* set task vector length */
+# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SVE_GET_VL 51 /* get task vector length */
+/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
+# define PR_SVE_VL_LEN_MASK 0xffff
+# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 9656aad8f8f7..9d4afea308a4 100644
--- a/include/uapi/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -20,12 +20,12 @@
* RxRPC socket address
*/
struct sockaddr_rxrpc {
- sa_family_t srx_family; /* address family */
- u16 srx_service; /* service desired */
- u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
- u16 transport_len; /* length of transport address */
+ __kernel_sa_family_t srx_family; /* address family */
+ __u16 srx_service; /* service desired */
+ __u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
+ __u16 transport_len; /* length of transport address */
union {
- sa_family_t family; /* transport address family */
+ __kernel_sa_family_t family; /* transport address family */
struct sockaddr_in sin; /* IPv4 transport address */
struct sockaddr_in6 sin6; /* IPv6 transport address */
} transport;
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 619fe6111dc9..be07b5470f4b 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -158,6 +158,7 @@
*/
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
+#define UART_DIV_MAX 0xFFFF /* Max divisor value */
/*
* LCR=0xBF (or DLAB=1 for 16C660)
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index f65b92e0e1f9..ee8220f8dcf5 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <linux/compiler.h>
+#include <linux/compiler_types.h>
#ifndef __always_inline
#define __always_inline inline
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index d5e0682ab837..293b2cdad88d 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -35,10 +35,6 @@
#define _UAPI_LINUX_TLS_H
#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <linux/socket.h>
-#include <linux/tcp.h>
-#include <net/tcp.h>
/* TLS socket options */
#define TLS_TX 1 /* Set transmit parameters */
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index cec06625f407..41a0a81b01e6 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -144,6 +144,10 @@
#define TEST_PACKET 4
#define TEST_FORCE_EN 5
+/* Status Type */
+#define USB_STATUS_TYPE_STANDARD 0
+#define USB_STATUS_TYPE_PTM 1
+
/*
* New Feature Selectors as added by USB 3.0
* See USB 3.0 spec Table 9-7
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index a92be0f492a9..c1395b5bd432 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -66,6 +66,9 @@
#define XATTR_NAME_SMACKTRANSMUTE XATTR_SECURITY_PREFIX XATTR_SMACK_TRANSMUTE
#define XATTR_NAME_SMACKMMAP XATTR_SECURITY_PREFIX XATTR_SMACK_MMAP
+#define XATTR_APPARMOR_SUFFIX "apparmor"
+#define XATTR_NAME_APPARMOR XATTR_SECURITY_PREFIX XATTR_APPARMOR_SUFFIX
+
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index e0e83a105953..7e11bb8651b6 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -101,7 +101,8 @@ enum {
IB_USER_VERBS_EX_CMD_MODIFY_WQ,
IB_USER_VERBS_EX_CMD_DESTROY_WQ,
IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
- IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL
+ IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
+ IB_USER_VERBS_EX_CMD_MODIFY_CQ
};
/*
@@ -125,6 +126,12 @@ struct ib_uverbs_comp_event_desc {
__u64 cq_handle;
};
+struct ib_uverbs_cq_moderation_caps {
+ __u16 max_cq_moderation_count;
+ __u16 max_cq_moderation_period;
+ __u32 reserved;
+};
+
/*
* All commands from userspace should start with a __u32 command field
* followed by __u16 in_words and out_words fields (which give the
@@ -263,6 +270,7 @@ struct ib_uverbs_ex_query_device_resp {
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
struct ib_uverbs_tm_caps tm_caps;
+ struct ib_uverbs_cq_moderation_caps cq_moderation_caps;
};
struct ib_uverbs_query_port {
@@ -1151,6 +1159,18 @@ struct ib_uverbs_ex_destroy_rwq_ind_table {
__u32 ind_tbl_handle;
};
+struct ib_uverbs_cq_moderation {
+ __u16 cq_count;
+ __u16 cq_period;
+};
+
+struct ib_uverbs_ex_modify_cq {
+ __u32 cq_handle;
+ __u32 attr_mask;
+ struct ib_uverbs_cq_moderation attr;
+ __u32 reserved;
+};
+
#define IB_DEVICE_NAME_MAX 64
#endif /* IB_USER_VERBS_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 23dba2d40907..a33e0517d3fd 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -40,6 +40,7 @@
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
+ MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
};
enum {
@@ -191,6 +192,32 @@ struct mlx5_ib_sw_parsing_caps {
__u32 supported_qpts;
};
+struct mlx5_ib_striding_rq_caps {
+ __u32 min_single_stride_log_num_of_bytes;
+ __u32 max_single_stride_log_num_of_bytes;
+ __u32 min_single_wqe_log_num_of_strides;
+ __u32 max_single_wqe_log_num_of_strides;
+
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_RAW_PACKET
+ */
+ __u32 supported_qpts;
+ __u32 reserved;
+};
+
+enum mlx5_ib_query_dev_resp_flags {
+ /* Support 128B CQE compression */
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
+};
+
+enum mlx5_ib_tunnel_offloads {
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
+ MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2
+};
+
struct mlx5_ib_query_device_resp {
__u32 comp_mask;
__u32 response_length;
@@ -199,8 +226,15 @@ struct mlx5_ib_query_device_resp {
struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
struct mlx5_packet_pacing_caps packet_pacing_caps;
__u32 mlx5_ib_support_multi_pkt_send_wqes;
- __u32 reserved;
+ __u32 flags; /* Use enum mlx5_ib_query_dev_resp_flags */
struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
+ struct mlx5_ib_striding_rq_caps striding_rq_caps;
+ __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
+ __u32 reserved;
+};
+
+enum mlx5_ib_create_cq_flags {
+ MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
};
struct mlx5_ib_create_cq {
@@ -209,7 +243,7 @@ struct mlx5_ib_create_cq {
__u32 cqe_size;
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
- __u16 reserved; /* explicit padding (optional on i386) */
+ __u16 flags;
};
struct mlx5_ib_create_cq_resp {
@@ -271,7 +305,9 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
- MLX5_RX_HASH_DST_PORT_UDP = 1 << 7
+ MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
+ /* Save bits for future fields */
+ MLX5_RX_HASH_INNER = 1 << 31
};
struct mlx5_ib_create_qp_rss {
@@ -281,7 +317,7 @@ struct mlx5_ib_create_qp_rss {
__u8 reserved[6];
__u8 rx_hash_key[128]; /* valid only for Toeplitz */
__u32 comp_mask;
- __u32 reserved1;
+ __u32 flags;
};
struct mlx5_ib_create_qp_resp {
@@ -295,6 +331,10 @@ struct mlx5_ib_alloc_mw {
__u16 reserved2;
};
+enum mlx5_ib_create_wq_mask {
+ MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
+};
+
struct mlx5_ib_create_wq {
__u64 buf_addr;
__u64 db_addr;
@@ -303,7 +343,9 @@ struct mlx5_ib_create_wq {
__u32 user_index;
__u32 flags;
__u32 comp_mask;
- __u32 reserved;
+ __u32 single_stride_log_num_of_bytes;
+ __u32 single_wqe_log_num_of_strides;
+ __u32 two_byte_shift_en;
};
struct mlx5_ib_create_ah_resp {
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 912ea1556a0b..aaa352f2f110 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -159,6 +159,8 @@ struct pvrdma_resize_cq {
struct pvrdma_create_srq {
__u64 buf_addr;
+ __u32 buf_size;
+ __u32 reserved;
};
struct pvrdma_create_srq_resp {
diff --git a/init/Kconfig b/init/Kconfig
index 3c1faaa2af4a..7d5a6fbac56a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -472,6 +472,13 @@ config TASK_IO_ACCOUNTING
endmenu # "CPU/Task time and stats accounting"
+config CPU_ISOLATION
+ bool "CPU isolation"
+ help
+ Make sure that CPUs running critical tasks are not disturbed by
+ any source of "noise" such as unbound workqueues, timers, kthreads...
+ Unbound jobs get offloaded to housekeeping CPUs.
+
source "kernel/rcu/Kconfig"
config BUILD_BIN2C
@@ -1379,15 +1386,6 @@ config USERFAULTFD
Enable the userfaultfd() system call that allows to intercept and
handle page faults in userland.
-config PCI_QUIRKS
- default y
- bool "Enable PCI quirk workarounds" if EXPERT
- depends on PCI
- help
- This enables workarounds for various PCI chipset
- bugs/quirks. Disable this only if your target machine is
- unaffected by PCI quirks.
-
config MEMBARRIER
bool "Enable membarrier() system call" if EXPERT
default y
@@ -1657,12 +1655,6 @@ config HAVE_GENERIC_DMA_COHERENT
bool
default n
-config SLABINFO
- bool
- depends on PROC_FS
- depends on SLAB || SLUB_DEBUG
- default y
-
config RT_MUTEXES
bool
@@ -1752,7 +1744,7 @@ config MODULE_SIG
help
Check modules for valid signatures upon load: the signature
is simply appended to the module. For more information see
- Documentation/module-signing.txt.
+ <file:Documentation/admin-guide/module-signing.rst>.
Note that this option adds the OpenSSL development packages as a
kernel build dependency so that the signing tool can use its crypto
diff --git a/init/do_mounts.c b/init/do_mounts.c
index f6d4dd764a52..7cf4f6dafd5f 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -380,8 +380,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
void __init mount_block_root(char *name, int flags)
{
- struct page *page = alloc_page(GFP_KERNEL |
- __GFP_NOTRACK_FALSE_POSITIVE);
+ struct page *page = alloc_page(GFP_KERNEL);
char *fs_names = page_address(page);
char *p;
#ifdef CONFIG_BLOCK
diff --git a/init/main.c b/init/main.c
index 0ee9c6866ada..859a786f7c0a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -46,6 +46,7 @@
#include <linux/cgroup.h>
#include <linux/efi.h>
#include <linux/tick.h>
+#include <linux/sched/isolation.h>
#include <linux/interrupt.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
@@ -69,7 +70,6 @@
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
-#include <linux/kmemcheck.h>
#include <linux/sfi.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -606,6 +606,7 @@ asmlinkage __visible void __init start_kernel(void)
early_irq_init();
init_IRQ();
tick_init();
+ housekeeping_init();
rcu_init_nohz();
init_timers();
hrtimers_init();
@@ -664,12 +665,12 @@ asmlinkage __visible void __init start_kernel(void)
debug_objects_mem_init();
setup_per_cpu_pageset();
numa_policy_init();
+ acpi_early_init();
if (late_time_init)
late_time_init();
calibrate_delay();
pidmap_init();
anon_vma_init();
- acpi_early_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_enter_virtual_mode();
diff --git a/kernel/acct.c b/kernel/acct.c
index 6670fbd3e466..d15c0ee4d955 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -147,7 +147,7 @@ static struct bsd_acct_struct *acct_get(struct pid_namespace *ns)
again:
smp_rmb();
rcu_read_lock();
- res = to_acct(ACCESS_ONCE(ns->bacct));
+ res = to_acct(READ_ONCE(ns->bacct));
if (!res) {
rcu_read_unlock();
return NULL;
@@ -159,7 +159,7 @@ again:
}
rcu_read_unlock();
mutex_lock(&res->lock);
- if (res != to_acct(ACCESS_ONCE(ns->bacct))) {
+ if (res != to_acct(READ_ONCE(ns->bacct))) {
mutex_unlock(&res->lock);
acct_put(res);
goto again;
diff --git a/kernel/audit.c b/kernel/audit.c
index be1c28fd4d57..227db99b0f19 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -85,13 +85,13 @@ static int audit_initialized;
#define AUDIT_OFF 0
#define AUDIT_ON 1
#define AUDIT_LOCKED 2
-u32 audit_enabled;
-u32 audit_ever_enabled;
+u32 audit_enabled = AUDIT_OFF;
+bool audit_ever_enabled = !!AUDIT_OFF;
EXPORT_SYMBOL_GPL(audit_enabled);
/* Default state when kernel boots without any parameters. */
-static u32 audit_default;
+static u32 audit_default = AUDIT_OFF;
/* If auditing cannot proceed, audit_failure selects what happens. */
static u32 audit_failure = AUDIT_FAIL_PRINTK;
@@ -1197,25 +1197,28 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
pid_t auditd_pid;
struct pid *req_pid = task_tgid(current);
- /* sanity check - PID values must match */
- if (new_pid != pid_vnr(req_pid))
+ /* Sanity check - PID values must match. Setting
+ * pid to 0 is how auditd ends auditing. */
+ if (new_pid && (new_pid != pid_vnr(req_pid)))
return -EINVAL;
/* test the auditd connection */
audit_replace(req_pid);
auditd_pid = auditd_pid_vnr();
- /* only the current auditd can unregister itself */
- if ((!new_pid) && (new_pid != auditd_pid)) {
- audit_log_config_change("audit_pid", new_pid,
- auditd_pid, 0);
- return -EACCES;
- }
- /* replacing a healthy auditd is not allowed */
- if (auditd_pid && new_pid) {
- audit_log_config_change("audit_pid", new_pid,
- auditd_pid, 0);
- return -EEXIST;
+ if (auditd_pid) {
+ /* replacing a healthy auditd is not allowed */
+ if (new_pid) {
+ audit_log_config_change("audit_pid",
+ new_pid, auditd_pid, 0);
+ return -EEXIST;
+ }
+ /* only current auditd can unregister itself */
+ if (pid_vnr(req_pid) != auditd_pid) {
+ audit_log_config_change("audit_pid",
+ new_pid, auditd_pid, 0);
+ return -EACCES;
+ }
}
if (new_pid) {
@@ -1549,8 +1552,6 @@ static int __init audit_init(void)
register_pernet_subsys(&audit_net_ops);
audit_initialized = AUDIT_INITIALIZED;
- audit_enabled = audit_default;
- audit_ever_enabled |= !!audit_default;
kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
if (IS_ERR(kauditd_task)) {
@@ -1564,14 +1565,21 @@ static int __init audit_init(void)
return 0;
}
-__initcall(audit_init);
+postcore_initcall(audit_init);
/* Process kernel command-line parameter at boot time. audit=0 or audit=1. */
static int __init audit_enable(char *str)
{
- audit_default = !!simple_strtol(str, NULL, 0);
- if (!audit_default)
+ long val;
+
+ if (kstrtol(str, 0, &val))
+ panic("audit: invalid 'audit' parameter value (%s)\n", str);
+ audit_default = (val ? AUDIT_ON : AUDIT_OFF);
+
+ if (audit_default == AUDIT_OFF)
audit_initialized = AUDIT_DISABLED;
+ if (audit_set_enabled(audit_default))
+ panic("audit: error setting audit state (%d)\n", audit_default);
pr_info("%s\n", audit_default ?
"enabled (after initialization)" : "disabled (until reboot)");
@@ -2337,32 +2345,6 @@ void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
}
}
-#ifdef CONFIG_SECURITY
-/**
- * audit_log_secctx - Converts and logs SELinux context
- * @ab: audit_buffer
- * @secid: security number
- *
- * This is a helper function that calls security_secid_to_secctx to convert
- * secid to secctx and then adds the (converted) SELinux context to the audit
- * log by calling audit_log_format, thus also preventing leak of internal secid
- * to userspace. If secid cannot be converted audit_panic is called.
- */
-void audit_log_secctx(struct audit_buffer *ab, u32 secid)
-{
- u32 len;
- char *secctx;
-
- if (security_secid_to_secctx(secid, &secctx, &len)) {
- audit_panic("Cannot convert secid to context");
- } else {
- audit_log_format(ab, " obj=%s", secctx);
- security_release_secctx(secctx, len);
- }
-}
-EXPORT_SYMBOL(audit_log_secctx);
-#endif
-
EXPORT_SYMBOL(audit_log_start);
EXPORT_SYMBOL(audit_log_end);
EXPORT_SYMBOL(audit_log_format);
diff --git a/kernel/audit.h b/kernel/audit.h
index 9b110ae17ee3..af5bc59487ed 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -208,7 +208,7 @@ struct audit_context {
struct audit_proctitle proctitle;
};
-extern u32 audit_ever_enabled;
+extern bool audit_ever_enabled;
extern void audit_copy_inode(struct audit_names *name,
const struct dentry *dentry,
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index d4b050d9a66e..fd353120e0d9 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1008,7 +1008,7 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
* We are guaranteed to have at least one reference to the mark from
* either the inode or the caller of fsnotify_destroy_mark().
*/
- BUG_ON(atomic_read(&entry->refcnt) < 1);
+ BUG_ON(refcount_read(&entry->refcnt) < 1);
}
static const struct fsnotify_ops audit_tree_ops = {
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 0b0aa5854dac..4a1758adb222 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -56,7 +56,8 @@ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = {
LIST_HEAD_INIT(audit_filter_list[3]),
LIST_HEAD_INIT(audit_filter_list[4]),
LIST_HEAD_INIT(audit_filter_list[5]),
-#if AUDIT_NR_FILTERS != 6
+ LIST_HEAD_INIT(audit_filter_list[6]),
+#if AUDIT_NR_FILTERS != 7
#error Fix audit_filter_list initialiser
#endif
};
@@ -67,6 +68,7 @@ static struct list_head audit_rules_list[AUDIT_NR_FILTERS] = {
LIST_HEAD_INIT(audit_rules_list[3]),
LIST_HEAD_INIT(audit_rules_list[4]),
LIST_HEAD_INIT(audit_rules_list[5]),
+ LIST_HEAD_INIT(audit_rules_list[6]),
};
DEFINE_MUTEX(audit_filter_mutex);
@@ -263,6 +265,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *
#endif
case AUDIT_FILTER_USER:
case AUDIT_FILTER_TYPE:
+ case AUDIT_FILTER_FS:
;
}
if (unlikely(rule->action == AUDIT_POSSIBLE)) {
@@ -338,6 +341,21 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
entry->rule.listnr != AUDIT_FILTER_USER)
return -EINVAL;
break;
+ case AUDIT_FSTYPE:
+ if (entry->rule.listnr != AUDIT_FILTER_FS)
+ return -EINVAL;
+ break;
+ }
+
+ switch(entry->rule.listnr) {
+ case AUDIT_FILTER_FS:
+ switch(f->type) {
+ case AUDIT_FSTYPE:
+ case AUDIT_FILTERKEY:
+ break;
+ default:
+ return -EINVAL;
+ }
}
switch(f->type) {
@@ -391,6 +409,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
return -EINVAL;
/* FALL THROUGH */
case AUDIT_ARCH:
+ case AUDIT_FSTYPE:
if (f->op != Audit_not_equal && f->op != Audit_equal)
return -EINVAL;
break;
@@ -910,10 +929,13 @@ static inline int audit_add_rule(struct audit_entry *entry)
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
- /* If either of these, don't count towards total */
- if (entry->rule.listnr == AUDIT_FILTER_USER ||
- entry->rule.listnr == AUDIT_FILTER_TYPE)
+ /* If any of these, don't count towards total */
+ switch(entry->rule.listnr) {
+ case AUDIT_FILTER_USER:
+ case AUDIT_FILTER_TYPE:
+ case AUDIT_FILTER_FS:
dont_count = 1;
+ }
#endif
mutex_lock(&audit_filter_mutex);
@@ -989,10 +1011,13 @@ int audit_del_rule(struct audit_entry *entry)
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
- /* If either of these, don't count towards total */
- if (entry->rule.listnr == AUDIT_FILTER_USER ||
- entry->rule.listnr == AUDIT_FILTER_TYPE)
+ /* If any of these, don't count towards total */
+ switch(entry->rule.listnr) {
+ case AUDIT_FILTER_USER:
+ case AUDIT_FILTER_TYPE:
+ case AUDIT_FILTER_FS:
dont_count = 1;
+ }
#endif
mutex_lock(&audit_filter_mutex);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index ecc23e25c9eb..e80459f7e132 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1869,10 +1869,33 @@ void __audit_inode_child(struct inode *parent,
struct inode *inode = d_backing_inode(dentry);
const char *dname = dentry->d_name.name;
struct audit_names *n, *found_parent = NULL, *found_child = NULL;
+ struct audit_entry *e;
+ struct list_head *list = &audit_filter_list[AUDIT_FILTER_FS];
+ int i;
if (!context->in_syscall)
return;
+ rcu_read_lock();
+ if (!list_empty(list)) {
+ list_for_each_entry_rcu(e, list, list) {
+ for (i = 0; i < e->rule.field_count; i++) {
+ struct audit_field *f = &e->rule.fields[i];
+
+ if (f->type == AUDIT_FSTYPE) {
+ if (audit_comparator(parent->i_sb->s_magic,
+ f->op, f->val)) {
+ if (e->rule.action == AUDIT_NEVER) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ }
+ }
+ }
+ }
+ rcu_read_unlock();
+
if (inode)
handle_one(inode);
@@ -2390,6 +2413,12 @@ void __audit_log_kern_module(char *name)
context->type = AUDIT_KERN_MODULE;
}
+void __audit_fanotify(unsigned int response)
+{
+ audit_log(current->audit_context, GFP_KERNEL,
+ AUDIT_FANOTIFY, "resp=%u", response);
+}
+
static void audit_log_task(struct audit_buffer *ab)
{
kuid_t auid, uid;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 8a6c37762330..b9f8686a84cf 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -85,8 +85,6 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
if (fp == NULL)
return NULL;
- kmemcheck_annotate_bitfield(fp, meta);
-
aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
if (aux == NULL) {
vfree(fp);
@@ -127,8 +125,6 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
if (fp == NULL) {
__bpf_prog_uncharge(fp_old->aux->user, delta);
} else {
- kmemcheck_annotate_bitfield(fp, meta);
-
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = pages;
fp->aux->prog = fp;
@@ -675,8 +671,6 @@ static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
if (fp != NULL) {
- kmemcheck_annotate_bitfield(fp, meta);
-
/* aux->prog still points to the fp_other one, so
* when promoting the clone to the real program,
* this still needs to be adapted.
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 5c51d1985b51..673fa6fe2d73 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -78,8 +78,10 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
{
struct pcpu_freelist_head *head;
struct pcpu_freelist_node *node;
+ unsigned long flags;
int orig_cpu, cpu;
+ local_irq_save(flags);
orig_cpu = cpu = raw_smp_processor_id();
while (1) {
head = per_cpu_ptr(s->freelist, cpu);
@@ -87,14 +89,16 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
node = head->first;
if (node) {
head->first = node->next;
- raw_spin_unlock(&head->lock);
+ raw_spin_unlock_irqrestore(&head->lock, flags);
return node;
}
raw_spin_unlock(&head->lock);
cpu = cpumask_next(cpu, cpu_possible_mask);
if (cpu >= nr_cpu_ids)
cpu = 0;
- if (cpu == orig_cpu)
+ if (cpu == orig_cpu) {
+ local_irq_restore(flags);
return NULL;
+ }
}
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4a942e2e753d..dd54d20ace2f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -799,12 +799,13 @@ static int check_stack_read(struct bpf_verifier_env *env,
/* check read/write into map element returned by bpf_map_lookup_elem() */
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
- int size)
+ int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_map *map = regs[regno].map_ptr;
- if (off < 0 || size <= 0 || off + size > map->value_size) {
+ if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
+ off + size > map->value_size) {
verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
map->value_size, off, size);
return -EACCES;
@@ -814,7 +815,7 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
/* check read/write into a map element with possible variable offset */
static int check_map_access(struct bpf_verifier_env *env, u32 regno,
- int off, int size)
+ int off, int size, bool zero_size_allowed)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *reg = &state->regs[regno];
@@ -837,7 +838,8 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
regno);
return -EACCES;
}
- err = __check_map_access(env, regno, reg->smin_value + off, size);
+ err = __check_map_access(env, regno, reg->smin_value + off, size,
+ zero_size_allowed);
if (err) {
verbose(env, "R%d min value is outside of the array range\n",
regno);
@@ -853,7 +855,8 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
regno);
return -EACCES;
}
- err = __check_map_access(env, regno, reg->umax_value + off, size);
+ err = __check_map_access(env, regno, reg->umax_value + off, size,
+ zero_size_allowed);
if (err)
verbose(env, "R%d max value is outside of the array range\n",
regno);
@@ -889,12 +892,13 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
}
static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
- int off, int size)
+ int off, int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = &regs[regno];
- if (off < 0 || size <= 0 || (u64)off + size > reg->range) {
+ if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
+ (u64)off + size > reg->range) {
verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, reg->off, reg->range);
return -EACCES;
@@ -903,7 +907,7 @@ static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
}
static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
- int size)
+ int size, bool zero_size_allowed)
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = &regs[regno];
@@ -922,7 +926,7 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
regno);
return -EACCES;
}
- err = __check_packet_access(env, regno, off, size);
+ err = __check_packet_access(env, regno, off, size, zero_size_allowed);
if (err) {
verbose(env, "R%d offset is outside of the packet\n", regno);
return err;
@@ -1097,7 +1101,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
- err = check_map_access(env, regno, off, size);
+ err = check_map_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
@@ -1184,7 +1188,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
value_regno);
return -EACCES;
}
- err = check_packet_access(env, regno, off, size);
+ err = check_packet_access(env, regno, off, size, false);
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else {
@@ -1281,7 +1285,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
- access_size <= 0) {
+ access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
@@ -1319,9 +1323,11 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
switch (reg->type) {
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
- return check_packet_access(env, regno, reg->off, access_size);
+ return check_packet_access(env, regno, reg->off, access_size,
+ zero_size_allowed);
case PTR_TO_MAP_VALUE:
- return check_map_access(env, regno, reg->off, access_size);
+ return check_map_access(env, regno, reg->off, access_size,
+ zero_size_allowed);
default: /* scalar_value|ptr_to_stack or invalid ptr */
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
@@ -1415,7 +1421,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
}
if (type_is_pkt_pointer(type))
err = check_packet_access(env, regno, reg->off,
- meta->map_ptr->key_size);
+ meta->map_ptr->key_size,
+ false);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->key_size,
@@ -1431,7 +1438,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
}
if (type_is_pkt_pointer(type))
err = check_packet_access(env, regno, reg->off,
- meta->map_ptr->value_size);
+ meta->map_ptr->value_size,
+ false);
else
err = check_stack_boundary(env, regno,
meta->map_ptr->value_size,
diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile
index ae448f7632cc..2be89a003185 100644
--- a/kernel/cgroup/Makefile
+++ b/kernel/cgroup/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y := cgroup.o namespace.o cgroup-v1.o
+obj-y := cgroup.o stat.o namespace.o cgroup-v1.o
obj-$(CONFIG_CGROUP_FREEZER) += freezer.o
obj-$(CONFIG_CGROUP_PIDS) += pids.o
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index bf54ade001be..b928b27050c6 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -201,6 +201,15 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
int cgroup_task_count(const struct cgroup *cgrp);
/*
+ * stat.c
+ */
+void cgroup_stat_flush(struct cgroup *cgrp);
+int cgroup_stat_init(struct cgroup *cgrp);
+void cgroup_stat_exit(struct cgroup *cgrp);
+void cgroup_stat_show_cputime(struct seq_file *seq);
+void cgroup_stat_boot(void);
+
+/*
* namespace.c
*/
extern const struct proc_ns_operations cgroupns_operations;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 00f5b358aeac..0b1ffe147f24 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -142,12 +142,14 @@ static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
};
#undef SUBSYS
+static DEFINE_PER_CPU(struct cgroup_cpu_stat, cgrp_dfl_root_cpu_stat);
+
/*
* The default hierarchy, reserved for the subsystems that are otherwise
* unattached - it never has more than a single cgroup, and all tasks are
* part of that cgroup.
*/
-struct cgroup_root cgrp_dfl_root;
+struct cgroup_root cgrp_dfl_root = { .cgrp.cpu_stat = &cgrp_dfl_root_cpu_stat };
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
/*
@@ -462,6 +464,28 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
}
/**
+ * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest
+ *
+ * Find and get @cgrp's css assocaited with @ss. If the css doesn't exist
+ * or is offline, %NULL is returned.
+ */
+static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+ css = cgroup_css(cgrp, ss);
+ if (!css || !css_tryget_online(css))
+ css = NULL;
+ rcu_read_unlock();
+
+ return css;
+}
+
+/**
* cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
@@ -647,6 +671,14 @@ struct css_set init_css_set = {
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
.mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
.mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
+
+ /*
+ * The following field is re-initialized when this cset gets linked
+ * in cgroup_init(). However, let's initialize the field
+ * statically too so that the default cgroup can be accessed safely
+ * early during boot.
+ */
+ .dfl_cgrp = &cgrp_dfl_root.cgrp,
};
static int css_set_count = 1; /* 1 for init_css_set */
@@ -3315,6 +3347,37 @@ static int cgroup_stat_show(struct seq_file *seq, void *v)
return 0;
}
+static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq,
+ struct cgroup *cgrp, int ssid)
+{
+ struct cgroup_subsys *ss = cgroup_subsys[ssid];
+ struct cgroup_subsys_state *css;
+ int ret;
+
+ if (!ss->css_extra_stat_show)
+ return 0;
+
+ css = cgroup_tryget_css(cgrp, ss);
+ if (!css)
+ return 0;
+
+ ret = ss->css_extra_stat_show(seq, css);
+ css_put(css);
+ return ret;
+}
+
+static int cpu_stat_show(struct seq_file *seq, void *v)
+{
+ struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
+ int ret = 0;
+
+ cgroup_stat_show_cputime(seq);
+#ifdef CONFIG_CGROUP_SCHED
+ ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id);
+#endif
+ return ret;
+}
+
static int cgroup_file_open(struct kernfs_open_file *of)
{
struct cftype *cft = of->kn->priv;
@@ -4422,6 +4485,11 @@ static struct cftype cgroup_base_files[] = {
.name = "cgroup.stat",
.seq_show = cgroup_stat_show,
},
+ {
+ .name = "cpu.stat",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cpu_stat_show,
+ },
{ } /* terminate */
};
@@ -4482,6 +4550,8 @@ static void css_free_work_fn(struct work_struct *work)
*/
cgroup_put(cgroup_parent(cgrp));
kernfs_put(cgrp->kn);
+ if (cgroup_on_dfl(cgrp))
+ cgroup_stat_exit(cgrp);
kfree(cgrp);
} else {
/*
@@ -4526,6 +4596,9 @@ static void css_release_work_fn(struct work_struct *work)
/* cgroup release path */
trace_cgroup_release(cgrp);
+ if (cgroup_on_dfl(cgrp))
+ cgroup_stat_flush(cgrp);
+
for (tcgrp = cgroup_parent(cgrp); tcgrp;
tcgrp = cgroup_parent(tcgrp))
tcgrp->nr_dying_descendants--;
@@ -4709,6 +4782,12 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
if (ret)
goto out_free_cgrp;
+ if (cgroup_on_dfl(parent)) {
+ ret = cgroup_stat_init(cgrp);
+ if (ret)
+ goto out_cancel_ref;
+ }
+
/*
* Temporarily set the pointer to NULL, so idr_find() won't return
* a half-baked cgroup.
@@ -4716,7 +4795,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
if (cgrp->id < 0) {
ret = -ENOMEM;
- goto out_cancel_ref;
+ goto out_stat_exit;
}
init_cgroup_housekeeping(cgrp);
@@ -4767,6 +4846,9 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
out_idr_free:
cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
+out_stat_exit:
+ if (cgroup_on_dfl(parent))
+ cgroup_stat_exit(cgrp);
out_cancel_ref:
percpu_ref_exit(&cgrp->self.refcnt);
out_free_cgrp:
@@ -5161,6 +5243,8 @@ int __init cgroup_init(void)
BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
+ cgroup_stat_boot();
+
/*
* The latency of the synchronize_sched() is too high for cgroups,
* avoid it at the cost of forcing all readers into the slow path.
@@ -5780,3 +5864,72 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
return ret;
}
#endif /* CONFIG_CGROUP_BPF */
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_delegatable_files(struct cftype *files, char *buf,
+ ssize_t size, const char *prefix)
+{
+ struct cftype *cft;
+ ssize_t ret = 0;
+
+ for (cft = files; cft && cft->name[0] != '\0'; cft++) {
+ if (!(cft->flags & CFTYPE_NS_DELEGATABLE))
+ continue;
+
+ if (prefix)
+ ret += snprintf(buf + ret, size - ret, "%s.", prefix);
+
+ ret += snprintf(buf + ret, size - ret, "%s\n", cft->name);
+
+ if (unlikely(ret >= size)) {
+ WARN_ON(1);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct cgroup_subsys *ss;
+ int ssid;
+ ssize_t ret = 0;
+
+ ret = show_delegatable_files(cgroup_base_files, buf, PAGE_SIZE - ret,
+ NULL);
+
+ for_each_subsys(ss, ssid)
+ ret += show_delegatable_files(ss->dfl_cftypes, buf + ret,
+ PAGE_SIZE - ret,
+ cgroup_subsys_name[ssid]);
+
+ return ret;
+}
+static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate);
+
+static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "nsdelegate\n");
+}
+static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features);
+
+static struct attribute *cgroup_sysfs_attrs[] = {
+ &cgroup_delegate_attr.attr,
+ &cgroup_features_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cgroup_sysfs_attr_group = {
+ .attrs = cgroup_sysfs_attrs,
+ .name = "cgroup",
+};
+
+static int __init cgroup_sysfs_init(void)
+{
+ return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group);
+}
+subsys_initcall(cgroup_sysfs_init);
+#endif /* CONFIG_SYSFS */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 4657e2924ecb..f7efa7b4d825 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -57,7 +57,7 @@
#include <linux/backing-dev.h>
#include <linux/sort.h>
#include <linux/oom.h>
-
+#include <linux/sched/isolation.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
@@ -656,7 +656,6 @@ static int generate_sched_domains(cpumask_var_t **domains,
int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */
cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
- cpumask_var_t non_isolated_cpus; /* load balanced CPUs */
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] struct cpumask slot */
@@ -666,10 +665,6 @@ static int generate_sched_domains(cpumask_var_t **domains,
dattr = NULL;
csa = NULL;
- if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL))
- goto done;
- cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
-
/* Special case for the 99% of systems with one, full, sched domain */
if (is_sched_load_balance(&top_cpuset)) {
ndoms = 1;
@@ -683,7 +678,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
update_domain_attr_tree(dattr, &top_cpuset);
}
cpumask_and(doms[0], top_cpuset.effective_cpus,
- non_isolated_cpus);
+ housekeeping_cpumask(HK_FLAG_DOMAIN));
goto done;
}
@@ -707,7 +702,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
*/
if (!cpumask_empty(cp->cpus_allowed) &&
!(is_sched_load_balance(cp) &&
- cpumask_intersects(cp->cpus_allowed, non_isolated_cpus)))
+ cpumask_intersects(cp->cpus_allowed,
+ housekeeping_cpumask(HK_FLAG_DOMAIN))))
continue;
if (is_sched_load_balance(cp))
@@ -789,7 +785,7 @@ restart:
if (apn == b->pn) {
cpumask_or(dp, dp, b->effective_cpus);
- cpumask_and(dp, dp, non_isolated_cpus);
+ cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN));
if (dattr)
update_domain_attr_tree(dattr + nslot, b);
@@ -802,7 +798,6 @@ restart:
BUG_ON(nslot != ndoms);
done:
- free_cpumask_var(non_isolated_cpus);
kfree(csa);
/*
diff --git a/kernel/cgroup/stat.c b/kernel/cgroup/stat.c
new file mode 100644
index 000000000000..133b465691d6
--- /dev/null
+++ b/kernel/cgroup/stat.c
@@ -0,0 +1,334 @@
+#include "cgroup-internal.h"
+
+#include <linux/sched/cputime.h>
+
+static DEFINE_MUTEX(cgroup_stat_mutex);
+static DEFINE_PER_CPU(raw_spinlock_t, cgroup_cpu_stat_lock);
+
+static struct cgroup_cpu_stat *cgroup_cpu_stat(struct cgroup *cgrp, int cpu)
+{
+ return per_cpu_ptr(cgrp->cpu_stat, cpu);
+}
+
+/**
+ * cgroup_cpu_stat_updated - keep track of updated cpu_stat
+ * @cgrp: target cgroup
+ * @cpu: cpu on which cpu_stat was updated
+ *
+ * @cgrp's cpu_stat on @cpu was updated. Put it on the parent's matching
+ * cpu_stat->updated_children list. See the comment on top of
+ * cgroup_cpu_stat definition for details.
+ */
+static void cgroup_cpu_stat_updated(struct cgroup *cgrp, int cpu)
+{
+ raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu);
+ struct cgroup *parent;
+ unsigned long flags;
+
+ /*
+ * Speculative already-on-list test. This may race leading to
+ * temporary inaccuracies, which is fine.
+ *
+ * Because @parent's updated_children is terminated with @parent
+ * instead of NULL, we can tell whether @cgrp is on the list by
+ * testing the next pointer for NULL.
+ */
+ if (cgroup_cpu_stat(cgrp, cpu)->updated_next)
+ return;
+
+ raw_spin_lock_irqsave(cpu_lock, flags);
+
+ /* put @cgrp and all ancestors on the corresponding updated lists */
+ for (parent = cgroup_parent(cgrp); parent;
+ cgrp = parent, parent = cgroup_parent(cgrp)) {
+ struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
+ struct cgroup_cpu_stat *pcstat = cgroup_cpu_stat(parent, cpu);
+
+ /*
+ * Both additions and removals are bottom-up. If a cgroup
+ * is already in the tree, all ancestors are.
+ */
+ if (cstat->updated_next)
+ break;
+
+ cstat->updated_next = pcstat->updated_children;
+ pcstat->updated_children = cgrp;
+ }
+
+ raw_spin_unlock_irqrestore(cpu_lock, flags);
+}
+
+/**
+ * cgroup_cpu_stat_pop_updated - iterate and dismantle cpu_stat updated tree
+ * @pos: current position
+ * @root: root of the tree to traversal
+ * @cpu: target cpu
+ *
+ * Walks the udpated cpu_stat tree on @cpu from @root. %NULL @pos starts
+ * the traversal and %NULL return indicates the end. During traversal,
+ * each returned cgroup is unlinked from the tree. Must be called with the
+ * matching cgroup_cpu_stat_lock held.
+ *
+ * The only ordering guarantee is that, for a parent and a child pair
+ * covered by a given traversal, if a child is visited, its parent is
+ * guaranteed to be visited afterwards.
+ */
+static struct cgroup *cgroup_cpu_stat_pop_updated(struct cgroup *pos,
+ struct cgroup *root, int cpu)
+{
+ struct cgroup_cpu_stat *cstat;
+ struct cgroup *parent;
+
+ if (pos == root)
+ return NULL;
+
+ /*
+ * We're gonna walk down to the first leaf and visit/remove it. We
+ * can pick whatever unvisited node as the starting point.
+ */
+ if (!pos)
+ pos = root;
+ else
+ pos = cgroup_parent(pos);
+
+ /* walk down to the first leaf */
+ while (true) {
+ cstat = cgroup_cpu_stat(pos, cpu);
+ if (cstat->updated_children == pos)
+ break;
+ pos = cstat->updated_children;
+ }
+
+ /*
+ * Unlink @pos from the tree. As the updated_children list is
+ * singly linked, we have to walk it to find the removal point.
+ * However, due to the way we traverse, @pos will be the first
+ * child in most cases. The only exception is @root.
+ */
+ parent = cgroup_parent(pos);
+ if (parent && cstat->updated_next) {
+ struct cgroup_cpu_stat *pcstat = cgroup_cpu_stat(parent, cpu);
+ struct cgroup_cpu_stat *ncstat;
+ struct cgroup **nextp;
+
+ nextp = &pcstat->updated_children;
+ while (true) {
+ ncstat = cgroup_cpu_stat(*nextp, cpu);
+ if (*nextp == pos)
+ break;
+
+ WARN_ON_ONCE(*nextp == parent);
+ nextp = &ncstat->updated_next;
+ }
+
+ *nextp = cstat->updated_next;
+ cstat->updated_next = NULL;
+ }
+
+ return pos;
+}
+
+static void cgroup_stat_accumulate(struct cgroup_stat *dst_stat,
+ struct cgroup_stat *src_stat)
+{
+ dst_stat->cputime.utime += src_stat->cputime.utime;
+ dst_stat->cputime.stime += src_stat->cputime.stime;
+ dst_stat->cputime.sum_exec_runtime += src_stat->cputime.sum_exec_runtime;
+}
+
+static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
+{
+ struct cgroup *parent = cgroup_parent(cgrp);
+ struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
+ struct task_cputime *last_cputime = &cstat->last_cputime;
+ struct task_cputime cputime;
+ struct cgroup_stat delta;
+ unsigned seq;
+
+ lockdep_assert_held(&cgroup_stat_mutex);
+
+ /* fetch the current per-cpu values */
+ do {
+ seq = __u64_stats_fetch_begin(&cstat->sync);
+ cputime = cstat->cputime;
+ } while (__u64_stats_fetch_retry(&cstat->sync, seq));
+
+ /* accumulate the deltas to propgate */
+ delta.cputime.utime = cputime.utime - last_cputime->utime;
+ delta.cputime.stime = cputime.stime - last_cputime->stime;
+ delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime -
+ last_cputime->sum_exec_runtime;
+ *last_cputime = cputime;
+
+ /* transfer the pending stat into delta */
+ cgroup_stat_accumulate(&delta, &cgrp->pending_stat);
+ memset(&cgrp->pending_stat, 0, sizeof(cgrp->pending_stat));
+
+ /* propagate delta into the global stat and the parent's pending */
+ cgroup_stat_accumulate(&cgrp->stat, &delta);
+ if (parent)
+ cgroup_stat_accumulate(&parent->pending_stat, &delta);
+}
+
+/* see cgroup_stat_flush() */
+static void cgroup_stat_flush_locked(struct cgroup *cgrp)
+{
+ int cpu;
+
+ lockdep_assert_held(&cgroup_stat_mutex);
+
+ for_each_possible_cpu(cpu) {
+ raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_cpu_stat_lock, cpu);
+ struct cgroup *pos = NULL;
+
+ raw_spin_lock_irq(cpu_lock);
+ while ((pos = cgroup_cpu_stat_pop_updated(pos, cgrp, cpu)))
+ cgroup_cpu_stat_flush_one(pos, cpu);
+ raw_spin_unlock_irq(cpu_lock);
+ }
+}
+
+/**
+ * cgroup_stat_flush - flush stats in @cgrp's subtree
+ * @cgrp: target cgroup
+ *
+ * Collect all per-cpu stats in @cgrp's subtree into the global counters
+ * and propagate them upwards. After this function returns, all cgroups in
+ * the subtree have up-to-date ->stat.
+ *
+ * This also gets all cgroups in the subtree including @cgrp off the
+ * ->updated_children lists.
+ */
+void cgroup_stat_flush(struct cgroup *cgrp)
+{
+ mutex_lock(&cgroup_stat_mutex);
+ cgroup_stat_flush_locked(cgrp);
+ mutex_unlock(&cgroup_stat_mutex);
+}
+
+static struct cgroup_cpu_stat *cgroup_cpu_stat_account_begin(struct cgroup *cgrp)
+{
+ struct cgroup_cpu_stat *cstat;
+
+ cstat = get_cpu_ptr(cgrp->cpu_stat);
+ u64_stats_update_begin(&cstat->sync);
+ return cstat;
+}
+
+static void cgroup_cpu_stat_account_end(struct cgroup *cgrp,
+ struct cgroup_cpu_stat *cstat)
+{
+ u64_stats_update_end(&cstat->sync);
+ cgroup_cpu_stat_updated(cgrp, smp_processor_id());
+ put_cpu_ptr(cstat);
+}
+
+void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
+{
+ struct cgroup_cpu_stat *cstat;
+
+ cstat = cgroup_cpu_stat_account_begin(cgrp);
+ cstat->cputime.sum_exec_runtime += delta_exec;
+ cgroup_cpu_stat_account_end(cgrp, cstat);
+}
+
+void __cgroup_account_cputime_field(struct cgroup *cgrp,
+ enum cpu_usage_stat index, u64 delta_exec)
+{
+ struct cgroup_cpu_stat *cstat;
+
+ cstat = cgroup_cpu_stat_account_begin(cgrp);
+
+ switch (index) {
+ case CPUTIME_USER:
+ case CPUTIME_NICE:
+ cstat->cputime.utime += delta_exec;
+ break;
+ case CPUTIME_SYSTEM:
+ case CPUTIME_IRQ:
+ case CPUTIME_SOFTIRQ:
+ cstat->cputime.stime += delta_exec;
+ break;
+ default:
+ break;
+ }
+
+ cgroup_cpu_stat_account_end(cgrp, cstat);
+}
+
+void cgroup_stat_show_cputime(struct seq_file *seq)
+{
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
+ u64 usage, utime, stime;
+
+ if (!cgroup_parent(cgrp))
+ return;
+
+ mutex_lock(&cgroup_stat_mutex);
+
+ cgroup_stat_flush_locked(cgrp);
+
+ usage = cgrp->stat.cputime.sum_exec_runtime;
+ cputime_adjust(&cgrp->stat.cputime, &cgrp->stat.prev_cputime,
+ &utime, &stime);
+
+ mutex_unlock(&cgroup_stat_mutex);
+
+ do_div(usage, NSEC_PER_USEC);
+ do_div(utime, NSEC_PER_USEC);
+ do_div(stime, NSEC_PER_USEC);
+
+ seq_printf(seq, "usage_usec %llu\n"
+ "user_usec %llu\n"
+ "system_usec %llu\n",
+ usage, utime, stime);
+}
+
+int cgroup_stat_init(struct cgroup *cgrp)
+{
+ int cpu;
+
+ /* the root cgrp has cpu_stat preallocated */
+ if (!cgrp->cpu_stat) {
+ cgrp->cpu_stat = alloc_percpu(struct cgroup_cpu_stat);
+ if (!cgrp->cpu_stat)
+ return -ENOMEM;
+ }
+
+ /* ->updated_children list is self terminated */
+ for_each_possible_cpu(cpu)
+ cgroup_cpu_stat(cgrp, cpu)->updated_children = cgrp;
+
+ prev_cputime_init(&cgrp->stat.prev_cputime);
+
+ return 0;
+}
+
+void cgroup_stat_exit(struct cgroup *cgrp)
+{
+ int cpu;
+
+ cgroup_stat_flush(cgrp);
+
+ /* sanity check */
+ for_each_possible_cpu(cpu) {
+ struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
+
+ if (WARN_ON_ONCE(cstat->updated_children != cgrp) ||
+ WARN_ON_ONCE(cstat->updated_next))
+ return;
+ }
+
+ free_percpu(cgrp->cpu_stat);
+ cgrp->cpu_stat = NULL;
+}
+
+void __init cgroup_stat_boot(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ raw_spin_lock_init(per_cpu_ptr(&cgroup_cpu_stat_lock, cpu));
+
+ BUG_ON(cgroup_stat_init(&cgrp_dfl_root.cgrp));
+}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 42d24bd64ea4..3939a4674e0a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -209,7 +209,7 @@ static int event_function(void *info)
struct perf_event_context *task_ctx = cpuctx->task_ctx;
int ret = 0;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
perf_ctx_lock(cpuctx, task_ctx);
/*
@@ -306,7 +306,7 @@ static void event_function_local(struct perf_event *event, event_f func, void *d
struct task_struct *task = READ_ONCE(ctx->task);
struct perf_event_context *task_ctx = NULL;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (task) {
if (task == TASK_TOMBSTONE)
@@ -582,6 +582,88 @@ static inline u64 perf_event_clock(struct perf_event *event)
return event->clock();
}
+/*
+ * State based event timekeeping...
+ *
+ * The basic idea is to use event->state to determine which (if any) time
+ * fields to increment with the current delta. This means we only need to
+ * update timestamps when we change state or when they are explicitly requested
+ * (read).
+ *
+ * Event groups make things a little more complicated, but not terribly so. The
+ * rules for a group are that if the group leader is OFF the entire group is
+ * OFF, irrespecive of what the group member states are. This results in
+ * __perf_effective_state().
+ *
+ * A futher ramification is that when a group leader flips between OFF and
+ * !OFF, we need to update all group member times.
+ *
+ *
+ * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
+ * need to make sure the relevant context time is updated before we try and
+ * update our timestamps.
+ */
+
+static __always_inline enum perf_event_state
+__perf_effective_state(struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+
+ if (leader->state <= PERF_EVENT_STATE_OFF)
+ return leader->state;
+
+ return event->state;
+}
+
+static __always_inline void
+__perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
+{
+ enum perf_event_state state = __perf_effective_state(event);
+ u64 delta = now - event->tstamp;
+
+ *enabled = event->total_time_enabled;
+ if (state >= PERF_EVENT_STATE_INACTIVE)
+ *enabled += delta;
+
+ *running = event->total_time_running;
+ if (state >= PERF_EVENT_STATE_ACTIVE)
+ *running += delta;
+}
+
+static void perf_event_update_time(struct perf_event *event)
+{
+ u64 now = perf_event_time(event);
+
+ __perf_update_times(event, now, &event->total_time_enabled,
+ &event->total_time_running);
+ event->tstamp = now;
+}
+
+static void perf_event_update_sibling_time(struct perf_event *leader)
+{
+ struct perf_event *sibling;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry)
+ perf_event_update_time(sibling);
+}
+
+static void
+perf_event_set_state(struct perf_event *event, enum perf_event_state state)
+{
+ if (event->state == state)
+ return;
+
+ perf_event_update_time(event);
+ /*
+ * If a group leader gets enabled/disabled all its siblings
+ * are affected too.
+ */
+ if ((event->state < 0) ^ (state < 0))
+ perf_event_update_sibling_time(event);
+
+ WRITE_ONCE(event->state, state);
+}
+
#ifdef CONFIG_CGROUP_PERF
static inline bool
@@ -841,40 +923,6 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
event->shadow_ctx_time = now - t->timestamp;
}
-static inline void
-perf_cgroup_defer_enabled(struct perf_event *event)
-{
- /*
- * when the current task's perf cgroup does not match
- * the event's, we need to remember to call the
- * perf_mark_enable() function the first time a task with
- * a matching perf cgroup is scheduled in.
- */
- if (is_cgroup_event(event) && !perf_cgroup_match(event))
- event->cgrp_defer_enabled = 1;
-}
-
-static inline void
-perf_cgroup_mark_enabled(struct perf_event *event,
- struct perf_event_context *ctx)
-{
- struct perf_event *sub;
- u64 tstamp = perf_event_time(event);
-
- if (!event->cgrp_defer_enabled)
- return;
-
- event->cgrp_defer_enabled = 0;
-
- event->tstamp_enabled = tstamp - event->total_time_enabled;
- list_for_each_entry(sub, &event->sibling_list, group_entry) {
- if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
- sub->tstamp_enabled = tstamp - sub->total_time_enabled;
- sub->cgrp_defer_enabled = 0;
- }
- }
-}
-
/*
* Update cpuctx->cgrp so that it is set when first cgroup event is added and
* cleared when last cgroup event is removed.
@@ -975,17 +1023,6 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
}
static inline void
-perf_cgroup_defer_enabled(struct perf_event *event)
-{
-}
-
-static inline void
-perf_cgroup_mark_enabled(struct perf_event *event,
- struct perf_event_context *ctx)
-{
-}
-
-static inline void
list_update_cgroup_event(struct perf_event *event,
struct perf_event_context *ctx, bool add)
{
@@ -1006,7 +1043,7 @@ static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
struct perf_cpu_context *cpuctx;
int rotations = 0;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
rotations = perf_rotate_context(cpuctx);
@@ -1093,7 +1130,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
{
struct list_head *head = this_cpu_ptr(&active_ctx_list);
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
WARN_ON(!list_empty(&ctx->active_ctx_list));
@@ -1102,7 +1139,7 @@ static void perf_event_ctx_activate(struct perf_event_context *ctx)
static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
{
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
WARN_ON(list_empty(&ctx->active_ctx_list));
@@ -1202,7 +1239,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
again:
rcu_read_lock();
- ctx = ACCESS_ONCE(event->ctx);
+ ctx = READ_ONCE(event->ctx);
if (!atomic_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
@@ -1398,60 +1435,6 @@ static u64 perf_event_time(struct perf_event *event)
return ctx ? ctx->time : 0;
}
-/*
- * Update the total_time_enabled and total_time_running fields for a event.
- */
-static void update_event_times(struct perf_event *event)
-{
- struct perf_event_context *ctx = event->ctx;
- u64 run_end;
-
- lockdep_assert_held(&ctx->lock);
-
- if (event->state < PERF_EVENT_STATE_INACTIVE ||
- event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
- return;
-
- /*
- * in cgroup mode, time_enabled represents
- * the time the event was enabled AND active
- * tasks were in the monitored cgroup. This is
- * independent of the activity of the context as
- * there may be a mix of cgroup and non-cgroup events.
- *
- * That is why we treat cgroup events differently
- * here.
- */
- if (is_cgroup_event(event))
- run_end = perf_cgroup_event_time(event);
- else if (ctx->is_active)
- run_end = ctx->time;
- else
- run_end = event->tstamp_stopped;
-
- event->total_time_enabled = run_end - event->tstamp_enabled;
-
- if (event->state == PERF_EVENT_STATE_INACTIVE)
- run_end = event->tstamp_stopped;
- else
- run_end = perf_event_time(event);
-
- event->total_time_running = run_end - event->tstamp_running;
-
-}
-
-/*
- * Update total_time_enabled and total_time_running for all events in a group.
- */
-static void update_group_times(struct perf_event *leader)
-{
- struct perf_event *event;
-
- update_event_times(leader);
- list_for_each_entry(event, &leader->sibling_list, group_entry)
- update_event_times(event);
-}
-
static enum event_type_t get_event_type(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
@@ -1494,6 +1477,8 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
event->attach_state |= PERF_ATTACH_CONTEXT;
+ event->tstamp = perf_event_time(event);
+
/*
* If we're a stand alone event or group leader, we go to the context
* list, group events are kept attached to the group so that
@@ -1701,8 +1686,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
if (event->group_leader == event)
list_del_init(&event->group_entry);
- update_group_times(event);
-
/*
* If event was in error state, then keep it
* that way, otherwise bogus counts will be
@@ -1711,7 +1694,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
* of the event
*/
if (event->state > PERF_EVENT_STATE_OFF)
- event->state = PERF_EVENT_STATE_OFF;
+ perf_event_set_state(event, PERF_EVENT_STATE_OFF);
ctx->generation++;
}
@@ -1810,38 +1793,24 @@ event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
- u64 tstamp = perf_event_time(event);
- u64 delta;
+ enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
- /*
- * An event which could not be activated because of
- * filter mismatch still needs to have its timings
- * maintained, otherwise bogus information is return
- * via read() for time_enabled, time_running:
- */
- if (event->state == PERF_EVENT_STATE_INACTIVE &&
- !event_filter_match(event)) {
- delta = tstamp - event->tstamp_stopped;
- event->tstamp_running += delta;
- event->tstamp_stopped = tstamp;
- }
-
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
perf_pmu_disable(event->pmu);
- event->tstamp_stopped = tstamp;
event->pmu->del(event, 0);
event->oncpu = -1;
- event->state = PERF_EVENT_STATE_INACTIVE;
+
if (event->pending_disable) {
event->pending_disable = 0;
- event->state = PERF_EVENT_STATE_OFF;
+ state = PERF_EVENT_STATE_OFF;
}
+ perf_event_set_state(event, state);
if (!is_software_event(event))
cpuctx->active_oncpu--;
@@ -1861,7 +1830,9 @@ group_sched_out(struct perf_event *group_event,
struct perf_event_context *ctx)
{
struct perf_event *event;
- int state = group_event->state;
+
+ if (group_event->state != PERF_EVENT_STATE_ACTIVE)
+ return;
perf_pmu_disable(ctx->pmu);
@@ -1875,7 +1846,7 @@ group_sched_out(struct perf_event *group_event,
perf_pmu_enable(ctx->pmu);
- if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
+ if (group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
@@ -1895,6 +1866,11 @@ __perf_remove_from_context(struct perf_event *event,
{
unsigned long flags = (unsigned long)info;
+ if (ctx->is_active & EVENT_TIME) {
+ update_context_time(ctx);
+ update_cgrp_time_from_cpuctx(cpuctx);
+ }
+
event_sched_out(event, cpuctx, ctx);
if (flags & DETACH_GROUP)
perf_group_detach(event);
@@ -1957,14 +1933,17 @@ static void __perf_event_disable(struct perf_event *event,
if (event->state < PERF_EVENT_STATE_INACTIVE)
return;
- update_context_time(ctx);
- update_cgrp_time_from_event(event);
- update_group_times(event);
+ if (ctx->is_active & EVENT_TIME) {
+ update_context_time(ctx);
+ update_cgrp_time_from_event(event);
+ }
+
if (event == event->group_leader)
group_sched_out(event, cpuctx, ctx);
else
event_sched_out(event, cpuctx, ctx);
- event->state = PERF_EVENT_STATE_OFF;
+
+ perf_event_set_state(event, PERF_EVENT_STATE_OFF);
}
/*
@@ -2021,8 +2000,7 @@ void perf_event_disable_inatomic(struct perf_event *event)
}
static void perf_set_shadow_time(struct perf_event *event,
- struct perf_event_context *ctx,
- u64 tstamp)
+ struct perf_event_context *ctx)
{
/*
* use the correct time source for the time snapshot
@@ -2050,9 +2028,9 @@ static void perf_set_shadow_time(struct perf_event *event,
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
- perf_cgroup_set_shadow_time(event, tstamp);
+ perf_cgroup_set_shadow_time(event, event->tstamp);
else
- event->shadow_ctx_time = tstamp - ctx->timestamp;
+ event->shadow_ctx_time = event->tstamp - ctx->timestamp;
}
#define MAX_INTERRUPTS (~0ULL)
@@ -2065,7 +2043,6 @@ event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
- u64 tstamp = perf_event_time(event);
int ret = 0;
lockdep_assert_held(&ctx->lock);
@@ -2075,11 +2052,12 @@ event_sched_in(struct perf_event *event,
WRITE_ONCE(event->oncpu, smp_processor_id());
/*
- * Order event::oncpu write to happen before the ACTIVE state
- * is visible.
+ * Order event::oncpu write to happen before the ACTIVE state is
+ * visible. This allows perf_event_{stop,read}() to observe the correct
+ * ->oncpu if it sees ACTIVE.
*/
smp_wmb();
- WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
+ perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
/*
* Unthrottle events, since we scheduled we might have missed several
@@ -2091,26 +2069,19 @@ event_sched_in(struct perf_event *event,
event->hw.interrupts = 0;
}
- /*
- * The new state must be visible before we turn it on in the hardware:
- */
- smp_wmb();
-
perf_pmu_disable(event->pmu);
- perf_set_shadow_time(event, ctx, tstamp);
+ perf_set_shadow_time(event, ctx);
perf_log_itrace_start(event);
if (event->pmu->add(event, PERF_EF_START)) {
- event->state = PERF_EVENT_STATE_INACTIVE;
+ perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
event->oncpu = -1;
ret = -EAGAIN;
goto out;
}
- event->tstamp_running += tstamp - event->tstamp_stopped;
-
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
@@ -2134,8 +2105,6 @@ group_sched_in(struct perf_event *group_event,
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = ctx->pmu;
- u64 now = ctx->time;
- bool simulate = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
@@ -2165,27 +2134,13 @@ group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
- * The events up to the failed event are scheduled out normally,
- * tstamp_stopped will be updated.
- *
- * The failed events and the remaining siblings need to have
- * their timings updated as if they had gone thru event_sched_in()
- * and event_sched_out(). This is required to get consistent timings
- * across the group. This also takes care of the case where the group
- * could never be scheduled by ensuring tstamp_stopped is set to mark
- * the time the event was actually stopped, such that time delta
- * calculation in update_event_times() is correct.
+ * The events up to the failed event are scheduled out normally.
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
- simulate = true;
+ break;
- if (simulate) {
- event->tstamp_running += now - event->tstamp_stopped;
- event->tstamp_stopped = now;
- } else {
- event_sched_out(event, cpuctx, ctx);
- }
+ event_sched_out(event, cpuctx, ctx);
}
event_sched_out(group_event, cpuctx, ctx);
@@ -2227,46 +2182,11 @@ static int group_can_go_on(struct perf_event *event,
return can_add_hw;
}
-/*
- * Complement to update_event_times(). This computes the tstamp_* values to
- * continue 'enabled' state from @now, and effectively discards the time
- * between the prior tstamp_stopped and now (as we were in the OFF state, or
- * just switched (context) time base).
- *
- * This further assumes '@event->state == INACTIVE' (we just came from OFF) and
- * cannot have been scheduled in yet. And going into INACTIVE state means
- * '@event->tstamp_stopped = @now'.
- *
- * Thus given the rules of update_event_times():
- *
- * total_time_enabled = tstamp_stopped - tstamp_enabled
- * total_time_running = tstamp_stopped - tstamp_running
- *
- * We can insert 'tstamp_stopped == now' and reverse them to compute new
- * tstamp_* values.
- */
-static void __perf_event_enable_time(struct perf_event *event, u64 now)
-{
- WARN_ON_ONCE(event->state != PERF_EVENT_STATE_INACTIVE);
-
- event->tstamp_stopped = now;
- event->tstamp_enabled = now - event->total_time_enabled;
- event->tstamp_running = now - event->total_time_running;
-}
-
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
- u64 tstamp = perf_event_time(event);
-
list_add_event(event, ctx);
perf_group_attach(event);
- /*
- * We can be called with event->state == STATE_OFF when we create with
- * .disabled = 1. In that case the IOC_ENABLE will call this function.
- */
- if (event->state == PERF_EVENT_STATE_INACTIVE)
- __perf_event_enable_time(event, tstamp);
}
static void ctx_sched_out(struct perf_event_context *ctx,
@@ -2498,28 +2418,6 @@ again:
}
/*
- * Put a event into inactive state and update time fields.
- * Enabling the leader of a group effectively enables all
- * the group members that aren't explicitly disabled, so we
- * have to update their ->tstamp_enabled also.
- * Note: this works for group members as well as group leaders
- * since the non-leader members' sibling_lists will be empty.
- */
-static void __perf_event_mark_enabled(struct perf_event *event)
-{
- struct perf_event *sub;
- u64 tstamp = perf_event_time(event);
-
- event->state = PERF_EVENT_STATE_INACTIVE;
- __perf_event_enable_time(event, tstamp);
- list_for_each_entry(sub, &event->sibling_list, group_entry) {
- /* XXX should not be > INACTIVE if event isn't */
- if (sub->state >= PERF_EVENT_STATE_INACTIVE)
- __perf_event_enable_time(sub, tstamp);
- }
-}
-
-/*
* Cross CPU call to enable a performance event
*/
static void __perf_event_enable(struct perf_event *event,
@@ -2537,14 +2435,12 @@ static void __perf_event_enable(struct perf_event *event,
if (ctx->is_active)
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
- __perf_event_mark_enabled(event);
+ perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
if (!ctx->is_active)
return;
if (!event_filter_match(event)) {
- if (is_cgroup_event(event))
- perf_cgroup_defer_enabled(event);
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
@@ -2864,18 +2760,10 @@ static void __perf_event_sync_stat(struct perf_event *event,
* we know the event must be on the current CPU, therefore we
* don't need to use it.
*/
- switch (event->state) {
- case PERF_EVENT_STATE_ACTIVE:
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
- /* fall-through */
- case PERF_EVENT_STATE_INACTIVE:
- update_event_times(event);
- break;
-
- default:
- break;
- }
+ perf_event_update_time(event);
/*
* In order to keep per-task stats reliable we need to flip the event
@@ -3112,10 +3000,6 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
if (!event_filter_match(event))
continue;
- /* may need to reset tstamp_enabled */
- if (is_cgroup_event(event))
- perf_cgroup_mark_enabled(event, ctx);
-
if (group_can_go_on(event, cpuctx, 1))
group_sched_in(event, cpuctx, ctx);
@@ -3123,10 +3007,8 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
* If this pinned group hasn't been scheduled,
* put it in error state.
*/
- if (event->state == PERF_EVENT_STATE_INACTIVE) {
- update_group_times(event);
- event->state = PERF_EVENT_STATE_ERROR;
- }
+ if (event->state == PERF_EVENT_STATE_INACTIVE)
+ perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
}
}
@@ -3148,10 +3030,6 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
if (!event_filter_match(event))
continue;
- /* may need to reset tstamp_enabled */
- if (is_cgroup_event(event))
- perf_cgroup_mark_enabled(event, ctx);
-
if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
@@ -3523,7 +3401,7 @@ void perf_event_task_tick(void)
struct perf_event_context *ctx, *tmp;
int throttled;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
@@ -3543,7 +3421,7 @@ static int event_enable_on_exec(struct perf_event *event,
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
- __perf_event_mark_enabled(event);
+ perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
return 1;
}
@@ -3637,12 +3515,15 @@ static void __perf_event_read(void *info)
return;
raw_spin_lock(&ctx->lock);
- if (ctx->is_active) {
+ if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
- update_event_times(event);
+ perf_event_update_time(event);
+ if (data->group)
+ perf_event_update_sibling_time(event);
+
if (event->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
@@ -3657,7 +3538,6 @@ static void __perf_event_read(void *info)
pmu->read(event);
list_for_each_entry(sub, &event->sibling_list, group_entry) {
- update_event_times(sub);
if (sub->state == PERF_EVENT_STATE_ACTIVE) {
/*
* Use sibling's PMU rather than @event's since
@@ -3691,7 +3571,6 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
{
unsigned long flags;
int ret = 0;
- u64 now;
/*
* Disabling interrupts avoids all counter scheduling (context
@@ -3722,23 +3601,25 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
goto out;
}
- now = event->shadow_ctx_time + perf_clock();
- if (enabled)
- *enabled = now - event->tstamp_enabled;
/*
* If the event is currently on this CPU, its either a per-task event,
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
* oncpu == -1).
*/
- if (event->oncpu == smp_processor_id()) {
+ if (event->oncpu == smp_processor_id())
event->pmu->read(event);
- if (running)
- *running = now - event->tstamp_running;
- } else if (running) {
- *running = event->total_time_running;
- }
*value = local64_read(&event->count);
+ if (enabled || running) {
+ u64 now = event->shadow_ctx_time + perf_clock();
+ u64 __enabled, __running;
+
+ __perf_update_times(event, now, &__enabled, &__running);
+ if (enabled)
+ *enabled = __enabled;
+ if (running)
+ *running = __running;
+ }
out:
local_irq_restore(flags);
@@ -3747,23 +3628,35 @@ out:
static int perf_event_read(struct perf_event *event, bool group)
{
+ enum perf_event_state state = READ_ONCE(event->state);
int event_cpu, ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
- if (event->state == PERF_EVENT_STATE_ACTIVE) {
- struct perf_read_data data = {
- .event = event,
- .group = group,
- .ret = 0,
- };
+again:
+ if (state == PERF_EVENT_STATE_ACTIVE) {
+ struct perf_read_data data;
+
+ /*
+ * Orders the ->state and ->oncpu loads such that if we see
+ * ACTIVE we must also see the right ->oncpu.
+ *
+ * Matches the smp_wmb() from event_sched_in().
+ */
+ smp_rmb();
event_cpu = READ_ONCE(event->oncpu);
if ((unsigned)event_cpu >= nr_cpu_ids)
return 0;
+ data = (struct perf_read_data){
+ .event = event,
+ .group = group,
+ .ret = 0,
+ };
+
preempt_disable();
event_cpu = __perf_event_read_cpu(event, event_cpu);
@@ -3780,24 +3673,30 @@ static int perf_event_read(struct perf_event *event, bool group)
(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
preempt_enable();
ret = data.ret;
- } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+
+ } else if (state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
+ state = event->state;
+ if (state != PERF_EVENT_STATE_INACTIVE) {
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
+ goto again;
+ }
+
/*
- * may read while context is not active
- * (e.g., thread is blocked), in that case
- * we cannot update context time
+ * May read while context is not active (e.g., thread is
+ * blocked), in that case we cannot update context time
*/
- if (ctx->is_active) {
+ if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
+
+ perf_event_update_time(event);
if (group)
- update_group_times(event);
- else
- update_event_times(event);
+ perf_event_update_sibling_time(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
@@ -4243,7 +4142,7 @@ static void perf_remove_from_owner(struct perf_event *event)
* indeed free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
- owner = lockless_dereference(event->owner);
+ owner = READ_ONCE(event->owner);
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
@@ -4340,7 +4239,7 @@ again:
* Cannot change, child events are not migrated, see the
* comment with perf_event_ctx_lock_nested().
*/
- ctx = lockless_dereference(child->ctx);
+ ctx = READ_ONCE(child->ctx);
/*
* Since child_mutex nests inside ctx::mutex, we must jump
* through hoops. We start by grabbing a reference on the ctx.
@@ -4400,7 +4299,7 @@ static int perf_release(struct inode *inode, struct file *file)
return 0;
}
-u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
u64 total = 0;
@@ -4428,6 +4327,18 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
return total;
}
+
+u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+{
+ struct perf_event_context *ctx;
+ u64 count;
+
+ ctx = perf_event_ctx_lock(event);
+ count = __perf_event_read_value(event, enabled, running);
+ perf_event_ctx_unlock(event, ctx);
+
+ return count;
+}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static int __perf_read_group_add(struct perf_event *leader,
@@ -4443,6 +4354,8 @@ static int __perf_read_group_add(struct perf_event *leader,
if (ret)
return ret;
+ raw_spin_lock_irqsave(&ctx->lock, flags);
+
/*
* Since we co-schedule groups, {enabled,running} times of siblings
* will be identical to those of the leader, so we only publish one
@@ -4465,8 +4378,6 @@ static int __perf_read_group_add(struct perf_event *leader,
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
- raw_spin_lock_irqsave(&ctx->lock, flags);
-
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
values[n++] += perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
@@ -4530,7 +4441,7 @@ static int perf_read_one(struct perf_event *event,
u64 values[4];
int n = 0;
- values[n++] = perf_event_read_value(event, &enabled, &running);
+ values[n++] = __perf_event_read_value(event, &enabled, &running);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
@@ -4909,8 +4820,7 @@ static void calc_timer_values(struct perf_event *event,
*now = perf_clock();
ctx_time = event->shadow_ctx_time + *now;
- *enabled = ctx_time - event->tstamp_enabled;
- *running = ctx_time - event->tstamp_running;
+ __perf_update_times(event, ctx_time, enabled, running);
}
static void perf_event_init_userpage(struct perf_event *event)
@@ -5314,8 +5224,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!rb)
goto aux_unlock;
- aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
- aux_size = ACCESS_ONCE(rb->user_page->aux_size);
+ aux_offset = READ_ONCE(rb->user_page->aux_offset);
+ aux_size = READ_ONCE(rb->user_page->aux_size);
if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
goto aux_unlock;
@@ -9405,6 +9315,11 @@ static void account_event(struct perf_event *event)
inc = true;
if (inc) {
+ /*
+ * We need the mutex here because static_branch_enable()
+ * must complete *before* the perf_sched_count increment
+ * becomes visible.
+ */
if (atomic_inc_not_zero(&perf_sched_count))
goto enabled;
@@ -10530,7 +10445,7 @@ perf_event_exit_event(struct perf_event *child_event,
if (parent_event)
perf_group_detach(child_event);
list_del_event(child_event, child_ctx);
- child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
+ perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
raw_spin_unlock_irq(&child_ctx->lock);
/*
@@ -10768,7 +10683,7 @@ inherit_event(struct perf_event *parent_event,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
- enum perf_event_active_state parent_state = parent_event->state;
+ enum perf_event_state parent_state = parent_event->state;
struct perf_event *child_event;
unsigned long flags;
@@ -11104,6 +11019,7 @@ static void __perf_event_exit_context(void *__info)
struct perf_event *event;
raw_spin_lock(&ctx->lock);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
list_for_each_entry(event, &ctx->event_list, event_entry)
__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
raw_spin_unlock(&ctx->lock);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index f684d8e5fa2b..141aa2ca8728 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -381,7 +381,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
* (B) <-> (C) ordering is still observed by the pmu driver.
*/
if (!rb->aux_overwrite) {
- aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
+ aux_tail = READ_ONCE(rb->user_page->aux_tail);
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
if (aux_head - aux_tail < perf_aux_size(rb))
handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
@@ -411,6 +411,7 @@ err:
return NULL;
}
+EXPORT_SYMBOL_GPL(perf_aux_output_begin);
static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
{
@@ -480,6 +481,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
rb_free_aux(rb);
ring_buffer_put(rb);
}
+EXPORT_SYMBOL_GPL(perf_aux_output_end);
/*
* Skip over a given number of bytes in the AUX buffer, due to, for example,
@@ -505,6 +507,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
return 0;
}
+EXPORT_SYMBOL_GPL(perf_aux_output_skip);
void *perf_get_aux(struct perf_output_handle *handle)
{
@@ -514,6 +517,7 @@ void *perf_get_aux(struct perf_output_handle *handle)
return handle->rb->aux_priv;
}
+EXPORT_SYMBOL_GPL(perf_get_aux);
#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
diff --git a/kernel/exit.c b/kernel/exit.c
index f6cad39f35df..6b4298a41167 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1339,7 +1339,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
* Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
* can't confuse the checks below.
*/
- int exit_state = ACCESS_ONCE(p->exit_state);
+ int exit_state = READ_ONCE(p->exit_state);
int ret;
if (unlikely(exit_state == EXIT_DEAD))
diff --git a/kernel/extable.c b/kernel/extable.c
index 9aa1cc41ecf7..a17fdb63dc3e 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -31,6 +31,8 @@
* mutex protecting text section modification (dynamic code patching).
* some users need to sleep (allocating memory...) while they hold this lock.
*
+ * Note: Also protects SMP-alternatives modification on x86.
+ *
* NOT exported to modules - patching kernel text is a really delicate matter.
*/
DEFINE_MUTEX(text_mutex);
diff --git a/kernel/fork.c b/kernel/fork.c
index 07cc743698d3..4e55eedba8d6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -469,7 +469,7 @@ void __init fork_init(void)
/* create a slab on which task_structs can be allocated */
task_struct_cachep = kmem_cache_create("task_struct",
arch_task_struct_size, align,
- SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
+ SLAB_PANIC|SLAB_ACCOUNT, NULL);
#endif
/* do the arch specific task caches init */
@@ -817,8 +817,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
- atomic_long_set(&mm->nr_ptes, 0);
- mm_nr_pmds_init(mm);
+ mm_pgtables_bytes_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
mm->pinned_vm = 0;
@@ -872,12 +871,9 @@ static void check_mm(struct mm_struct *mm)
"mm:%p idx:%d val:%ld\n", mm, i, x);
}
- if (atomic_long_read(&mm->nr_ptes))
- pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
- atomic_long_read(&mm->nr_ptes));
- if (mm_nr_pmds(mm))
- pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
- mm_nr_pmds(mm));
+ if (mm_pgtables_bytes(mm))
+ pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
+ mm_pgtables_bytes(mm));
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
@@ -2209,18 +2205,18 @@ void __init proc_caches_init(void)
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
- SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
+ SLAB_ACCOUNT, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
/*
* FIXME! The "sizeof(struct mm_struct)" currently includes the
@@ -2231,7 +2227,7 @@ void __init proc_caches_init(void)
*/
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
mmap_init();
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index a117adf7084b..89e355866450 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -97,6 +97,12 @@ config HANDLE_DOMAIN_IRQ
config IRQ_TIMINGS
bool
+config GENERIC_IRQ_MATRIX_ALLOCATOR
+ bool
+
+config GENERIC_IRQ_RESERVATION_MODE
+ bool
+
config IRQ_DOMAIN_DEBUG
bool "Expose hardware/virtual IRQ mapping via debugfs"
depends on IRQ_DOMAIN && DEBUG_FS
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index ed15d142694b..ff6e352e3a6c 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
obj-$(CONFIG_SMP) += affinity.o
obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
+obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index befa671fba64..4e8089b319ae 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -54,7 +54,7 @@ unsigned long probe_irq_on(void)
if (desc->irq_data.chip->irq_set_type)
desc->irq_data.chip->irq_set_type(&desc->irq_data,
IRQ_TYPE_PROBE);
- irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE);
+ irq_activate_and_startup(desc, IRQ_NORESEND);
}
raw_spin_unlock_irq(&desc->lock);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 5a2ef92c2782..043bfc35b353 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -207,20 +207,24 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
* Catch code which fiddles with enable_irq() on a managed
* and potentially shutdown IRQ. Chained interrupt
* installment or irq auto probing should not happen on
- * managed irqs either. Emit a warning, break the affinity
- * and start it up as a normal interrupt.
+ * managed irqs either.
*/
if (WARN_ON_ONCE(force))
- return IRQ_STARTUP_NORMAL;
+ return IRQ_STARTUP_ABORT;
/*
* The interrupt was requested, but there is no online CPU
* in it's affinity mask. Put it into managed shutdown
* state and let the cpu hotplug mechanism start it up once
* a CPU in the mask becomes available.
*/
- irqd_set_managed_shutdown(d);
return IRQ_STARTUP_ABORT;
}
+ /*
+ * Managed interrupts have reserved resources, so this should not
+ * happen.
+ */
+ if (WARN_ON(irq_domain_activate_irq(d, false)))
+ return IRQ_STARTUP_ABORT;
return IRQ_STARTUP_MANAGED;
}
#else
@@ -236,7 +240,9 @@ static int __irq_startup(struct irq_desc *desc)
struct irq_data *d = irq_desc_get_irq_data(desc);
int ret = 0;
- irq_domain_activate_irq(d);
+ /* Warn if this interrupt is not activated but try nevertheless */
+ WARN_ON_ONCE(!irqd_is_activated(d));
+
if (d->chip->irq_startup) {
ret = d->chip->irq_startup(d);
irq_state_clr_disabled(desc);
@@ -269,6 +275,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
ret = __irq_startup(desc);
break;
case IRQ_STARTUP_ABORT:
+ irqd_set_managed_shutdown(d);
return 0;
}
}
@@ -278,6 +285,22 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
return ret;
}
+int irq_activate(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+
+ if (!irqd_affinity_is_managed(d))
+ return irq_domain_activate_irq(d, false);
+ return 0;
+}
+
+void irq_activate_and_startup(struct irq_desc *desc, bool resend)
+{
+ if (WARN_ON(irq_activate(desc)))
+ return;
+ irq_startup(desc, resend, IRQ_START_FORCE);
+}
+
static void __irq_disable(struct irq_desc *desc, bool mask);
void irq_shutdown(struct irq_desc *desc)
@@ -953,7 +976,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
desc->action = &chained_action;
- irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
+ irq_activate_and_startup(desc, IRQ_RESEND);
}
}
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index c3fdb36dec30..7f608ac39653 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -81,6 +81,8 @@ irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
data->domain ? data->domain->name : "");
seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
irq_debug_show_chip(m, data, ind + 1);
+ if (data->domain && data->domain->ops && data->domain->ops->debug_show)
+ data->domain->ops->debug_show(m, NULL, data, ind + 1);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
if (!data->parent_data)
return;
@@ -149,6 +151,7 @@ static int irq_debug_show(struct seq_file *m, void *p)
raw_spin_lock_irq(&desc->lock);
data = irq_desc_get_irq_data(desc);
seq_printf(m, "handler: %pf\n", desc->handle_irq);
+ seq_printf(m, "device: %s\n", desc->dev_name);
seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
ARRAY_SIZE(irqdesc_states));
@@ -226,6 +229,15 @@ static const struct file_operations dfs_irq_ops = {
.release = single_release,
};
+void irq_debugfs_copy_devname(int irq, struct device *dev)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ const char *name = dev_name(dev);
+
+ if (name)
+ desc->dev_name = kstrdup(name, GFP_KERNEL);
+}
+
void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
{
char name [10];
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 44ed5f8c8759..07d08ca701ec 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -75,6 +75,8 @@ extern void __enable_irq(struct irq_desc *desc);
#define IRQ_START_FORCE true
#define IRQ_START_COND false
+extern int irq_activate(struct irq_desc *desc);
+extern void irq_activate_and_startup(struct irq_desc *desc, bool resend);
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
extern void irq_shutdown(struct irq_desc *desc);
@@ -437,6 +439,18 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
}
#endif /* !CONFIG_GENERIC_PENDING_IRQ */
+#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
+static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
+{
+ irqd_set_activated(data);
+ return 0;
+}
+static inline void irq_domain_deactivate_irq(struct irq_data *data)
+{
+ irqd_clr_activated(data);
+}
+#endif
+
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
#include <linux/debugfs.h>
@@ -444,7 +458,9 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc);
static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
{
debugfs_remove(desc->debugfs_file);
+ kfree(desc->dev_name);
}
+void irq_debugfs_copy_devname(int irq, struct device *dev);
# ifdef CONFIG_IRQ_DOMAIN
void irq_domain_debugfs_init(struct dentry *root);
# else
@@ -459,4 +475,7 @@ static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d)
static inline void irq_remove_debugfs_entry(struct irq_desc *d)
{
}
+static inline void irq_debugfs_copy_devname(int irq, struct device *dev)
+{
+}
#endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 82afb7ed369f..49b54e9979cc 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -27,7 +27,7 @@ static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP)
static int __init irq_affinity_setup(char *str)
{
- zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+ alloc_bootmem_cpumask_var(&irq_default_affinity);
cpulist_parse(str, irq_default_affinity);
/*
* Set at least the boot cpu. We don't want to end up with
@@ -40,10 +40,8 @@ __setup("irqaffinity=", irq_affinity_setup);
static void __init init_irq_default_affinity(void)
{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- if (!irq_default_affinity)
+ if (!cpumask_available(irq_default_affinity))
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
-#endif
if (cpumask_empty(irq_default_affinity))
cpumask_setall(irq_default_affinity);
}
@@ -448,7 +446,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
}
}
- flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
+ flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
mask = NULL;
for (i = 0; i < cnt; i++) {
@@ -462,6 +460,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
goto err;
irq_insert_desc(start + i, desc);
irq_sysfs_add(start + i, desc);
+ irq_add_debugfs_entry(start + i, desc);
}
bitmap_set(allocated_irqs, start, cnt);
return start;
@@ -863,6 +862,7 @@ int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
return 0;
}
+EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
void kstat_incr_irq_this_cpu(unsigned int irq)
{
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index ac4644e92b49..4f4f60015e8a 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -21,7 +21,6 @@
static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);
-static DEFINE_MUTEX(revmap_trees_mutex);
static struct irq_domain *irq_default_domain;
static void irq_domain_check_hierarchy(struct irq_domain *domain);
@@ -211,6 +210,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
/* Fill structure */
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
+ mutex_init(&domain->revmap_tree_mutex);
domain->ops = ops;
domain->host_data = host_data;
domain->hwirq_max = hwirq_max;
@@ -462,9 +462,9 @@ static void irq_domain_clear_mapping(struct irq_domain *domain,
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = 0;
} else {
- mutex_lock(&revmap_trees_mutex);
+ mutex_lock(&domain->revmap_tree_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
- mutex_unlock(&revmap_trees_mutex);
+ mutex_unlock(&domain->revmap_tree_mutex);
}
}
@@ -475,9 +475,9 @@ static void irq_domain_set_mapping(struct irq_domain *domain,
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = irq_data->irq;
} else {
- mutex_lock(&revmap_trees_mutex);
+ mutex_lock(&domain->revmap_tree_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
- mutex_unlock(&revmap_trees_mutex);
+ mutex_unlock(&domain->revmap_tree_mutex);
}
}
@@ -921,8 +921,7 @@ static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
chip = irq_data_get_irq_chip(data);
seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
- seq_printf(m, data ? "0x%p " : " %p ",
- irq_data_get_irq_chip_data(data));
+ seq_printf(m, "0x%p ", irq_data_get_irq_chip_data(data));
seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
@@ -1459,11 +1458,11 @@ static void irq_domain_fix_revmap(struct irq_data *d)
return; /* Not using radix tree. */
/* Fix up the revmap. */
- mutex_lock(&revmap_trees_mutex);
+ mutex_lock(&d->domain->revmap_tree_mutex);
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
if (slot)
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
- mutex_unlock(&revmap_trees_mutex);
+ mutex_unlock(&d->domain->revmap_tree_mutex);
}
/**
@@ -1682,28 +1681,36 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
-static void __irq_domain_activate_irq(struct irq_data *irq_data)
+static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
{
if (irq_data && irq_data->domain) {
struct irq_domain *domain = irq_data->domain;
+ if (domain->ops->deactivate)
+ domain->ops->deactivate(domain, irq_data);
if (irq_data->parent_data)
- __irq_domain_activate_irq(irq_data->parent_data);
- if (domain->ops->activate)
- domain->ops->activate(domain, irq_data);
+ __irq_domain_deactivate_irq(irq_data->parent_data);
}
}
-static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
+static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
{
- if (irq_data && irq_data->domain) {
- struct irq_domain *domain = irq_data->domain;
+ int ret = 0;
- if (domain->ops->deactivate)
- domain->ops->deactivate(domain, irq_data);
- if (irq_data->parent_data)
- __irq_domain_deactivate_irq(irq_data->parent_data);
+ if (irqd && irqd->domain) {
+ struct irq_domain *domain = irqd->domain;
+
+ if (irqd->parent_data)
+ ret = __irq_domain_activate_irq(irqd->parent_data,
+ early);
+ if (!ret && domain->ops->activate) {
+ ret = domain->ops->activate(domain, irqd, early);
+ /* Rollback in case of error */
+ if (ret && irqd->parent_data)
+ __irq_domain_deactivate_irq(irqd->parent_data);
+ }
}
+ return ret;
}
/**
@@ -1714,12 +1721,15 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
* This is the second step to call domain_ops->activate to program interrupt
* controllers, so the interrupt could actually get delivered.
*/
-void irq_domain_activate_irq(struct irq_data *irq_data)
+int irq_domain_activate_irq(struct irq_data *irq_data, bool early)
{
- if (!irqd_is_activated(irq_data)) {
- __irq_domain_activate_irq(irq_data);
+ int ret = 0;
+
+ if (!irqd_is_activated(irq_data))
+ ret = __irq_domain_activate_irq(irq_data, early);
+ if (!ret)
irqd_set_activated(irq_data);
- }
+ return ret;
}
/**
@@ -1810,6 +1820,8 @@ irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
d->revmap_size + d->revmap_direct_max_irq);
seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
+ if (d->ops && d->ops->debug_show)
+ d->ops->debug_show(m, d, NULL, ind + 1);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
if (!d->parent)
return;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4bff6a10ae8e..2ff1c0c82fc9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -398,7 +398,8 @@ int irq_select_affinity_usr(unsigned int irq)
/**
* irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
* @irq: interrupt number to set affinity
- * @vcpu_info: vCPU specific data
+ * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
+ * specific data for percpu_devid interrupts
*
* This function uses the vCPU specific data to set the vCPU
* affinity for an irq. The vCPU specific data is passed from
@@ -536,7 +537,7 @@ void __enable_irq(struct irq_desc *desc)
* time. If it was already started up, then irq_startup()
* will invoke irq_enable() under the hood.
*/
- irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
+ irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
break;
}
default:
@@ -1305,7 +1306,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* thread_mask assigned. See the loop above which or's
* all existing action->thread_mask bits.
*/
- new->thread_mask = 1 << ffz(thread_mask);
+ new->thread_mask = 1UL << ffz(thread_mask);
} else if (new->handler == irq_default_primary_handler &&
!(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
@@ -1342,6 +1343,21 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
goto out_unlock;
}
+ /*
+ * Activate the interrupt. That activation must happen
+ * independently of IRQ_NOAUTOEN. request_irq() can fail
+ * and the callers are supposed to handle
+ * that. enable_irq() of an interrupt requested with
+ * IRQ_NOAUTOEN is not supposed to fail. The activation
+ * keeps it in shutdown mode, it merily associates
+ * resources if necessary and if that's not possible it
+ * fails. Interrupts which are in managed shutdown mode
+ * will simply ignore that activation request.
+ */
+ ret = irq_activate(desc);
+ if (ret)
+ goto out_unlock;
+
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
IRQS_ONESHOT | IRQS_WAITING);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
@@ -1417,7 +1433,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
wake_up_process(new->secondary->thread);
register_irq_proc(irq, desc);
- irq_add_debugfs_entry(irq, desc);
new->dir = NULL;
register_handler_proc(irq, new);
return 0;
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
new file mode 100644
index 000000000000..a3cbbc8191c5
--- /dev/null
+++ b/kernel/irq/matrix.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/bitmap.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+
+#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
+
+struct cpumap {
+ unsigned int available;
+ unsigned int allocated;
+ unsigned int managed;
+ bool online;
+ unsigned long alloc_map[IRQ_MATRIX_SIZE];
+ unsigned long managed_map[IRQ_MATRIX_SIZE];
+};
+
+struct irq_matrix {
+ unsigned int matrix_bits;
+ unsigned int alloc_start;
+ unsigned int alloc_end;
+ unsigned int alloc_size;
+ unsigned int global_available;
+ unsigned int global_reserved;
+ unsigned int systembits_inalloc;
+ unsigned int total_allocated;
+ unsigned int online_maps;
+ struct cpumap __percpu *maps;
+ unsigned long scratch_map[IRQ_MATRIX_SIZE];
+ unsigned long system_map[IRQ_MATRIX_SIZE];
+};
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/irq_matrix.h>
+
+/**
+ * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
+ * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
+ * @alloc_start: From which bit the allocation search starts
+ * @alloc_end: At which bit the allocation search ends, i.e first
+ * invalid bit
+ */
+__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
+ unsigned int alloc_start,
+ unsigned int alloc_end)
+{
+ struct irq_matrix *m;
+
+ if (matrix_bits > IRQ_MATRIX_BITS)
+ return NULL;
+
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return NULL;
+
+ m->matrix_bits = matrix_bits;
+ m->alloc_start = alloc_start;
+ m->alloc_end = alloc_end;
+ m->alloc_size = alloc_end - alloc_start;
+ m->maps = alloc_percpu(*m->maps);
+ if (!m->maps) {
+ kfree(m);
+ return NULL;
+ }
+ return m;
+}
+
+/**
+ * irq_matrix_online - Bring the local CPU matrix online
+ * @m: Matrix pointer
+ */
+void irq_matrix_online(struct irq_matrix *m)
+{
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+ BUG_ON(cm->online);
+
+ bitmap_zero(cm->alloc_map, m->matrix_bits);
+ cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
+ cm->allocated = 0;
+ m->global_available += cm->available;
+ cm->online = true;
+ m->online_maps++;
+ trace_irq_matrix_online(m);
+}
+
+/**
+ * irq_matrix_offline - Bring the local CPU matrix offline
+ * @m: Matrix pointer
+ */
+void irq_matrix_offline(struct irq_matrix *m)
+{
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+ /* Update the global available size */
+ m->global_available -= cm->available;
+ cm->online = false;
+ m->online_maps--;
+ trace_irq_matrix_offline(m);
+}
+
+static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
+ unsigned int num, bool managed)
+{
+ unsigned int area, start = m->alloc_start;
+ unsigned int end = m->alloc_end;
+
+ bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
+ bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
+ area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
+ if (area >= end)
+ return area;
+ if (managed)
+ bitmap_set(cm->managed_map, area, num);
+ else
+ bitmap_set(cm->alloc_map, area, num);
+ return area;
+}
+
+/**
+ * irq_matrix_assign_system - Assign system wide entry in the matrix
+ * @m: Matrix pointer
+ * @bit: Which bit to reserve
+ * @replace: Replace an already allocated vector with a system
+ * vector at the same bit position.
+ *
+ * The BUG_ON()s below are on purpose. If this goes wrong in the
+ * early boot process, then the chance to survive is about zero.
+ * If this happens when the system is life, it's not much better.
+ */
+void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
+ bool replace)
+{
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+ BUG_ON(bit > m->matrix_bits);
+ BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
+
+ set_bit(bit, m->system_map);
+ if (replace) {
+ BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
+ cm->allocated--;
+ m->total_allocated--;
+ }
+ if (bit >= m->alloc_start && bit < m->alloc_end)
+ m->systembits_inalloc++;
+
+ trace_irq_matrix_assign_system(bit, m);
+}
+
+/**
+ * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
+ * @m: Matrix pointer
+ * @msk: On which CPUs the bits should be reserved.
+ *
+ * Can be called for offline CPUs. Note, this will only reserve one bit
+ * on all CPUs in @msk, but it's not guaranteed that the bits are at the
+ * same offset on all CPUs
+ */
+int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
+{
+ unsigned int cpu, failed_cpu;
+
+ for_each_cpu(cpu, msk) {
+ struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
+ unsigned int bit;
+
+ bit = matrix_alloc_area(m, cm, 1, true);
+ if (bit >= m->alloc_end)
+ goto cleanup;
+ cm->managed++;
+ if (cm->online) {
+ cm->available--;
+ m->global_available--;
+ }
+ trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
+ }
+ return 0;
+cleanup:
+ failed_cpu = cpu;
+ for_each_cpu(cpu, msk) {
+ if (cpu == failed_cpu)
+ break;
+ irq_matrix_remove_managed(m, cpumask_of(cpu));
+ }
+ return -ENOSPC;
+}
+
+/**
+ * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
+ * @m: Matrix pointer
+ * @msk: On which CPUs the bits should be removed
+ *
+ * Can be called for offline CPUs
+ *
+ * This removes not allocated managed interrupts from the map. It does
+ * not matter which one because the managed interrupts free their
+ * allocation when they shut down. If not, the accounting is screwed,
+ * but all what can be done at this point is warn about it.
+ */
+void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, msk) {
+ struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
+ unsigned int bit, end = m->alloc_end;
+
+ if (WARN_ON_ONCE(!cm->managed))
+ continue;
+
+ /* Get managed bit which are not allocated */
+ bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
+
+ bit = find_first_bit(m->scratch_map, end);
+ if (WARN_ON_ONCE(bit >= end))
+ continue;
+
+ clear_bit(bit, cm->managed_map);
+
+ cm->managed--;
+ if (cm->online) {
+ cm->available++;
+ m->global_available++;
+ }
+ trace_irq_matrix_remove_managed(bit, cpu, m, cm);
+ }
+}
+
+/**
+ * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
+ * @m: Matrix pointer
+ * @cpu: On which CPU the interrupt should be allocated
+ */
+int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+{
+ struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
+ unsigned int bit, end = m->alloc_end;
+
+ /* Get managed bit which are not allocated */
+ bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
+ bit = find_first_bit(m->scratch_map, end);
+ if (bit >= end)
+ return -ENOSPC;
+ set_bit(bit, cm->alloc_map);
+ cm->allocated++;
+ m->total_allocated++;
+ trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
+ return bit;
+}
+
+/**
+ * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
+ * @m: Matrix pointer
+ * @bit: Which bit to mark
+ *
+ * This should only be used to mark preallocated vectors
+ */
+void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
+{
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+ if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
+ return;
+ if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
+ return;
+ cm->allocated++;
+ m->total_allocated++;
+ cm->available--;
+ m->global_available--;
+ trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
+}
+
+/**
+ * irq_matrix_reserve - Reserve interrupts
+ * @m: Matrix pointer
+ *
+ * This is merily a book keeping call. It increments the number of globally
+ * reserved interrupt bits w/o actually allocating them. This allows to
+ * setup interrupt descriptors w/o assigning low level resources to it.
+ * The actual allocation happens when the interrupt gets activated.
+ */
+void irq_matrix_reserve(struct irq_matrix *m)
+{
+ if (m->global_reserved <= m->global_available &&
+ m->global_reserved + 1 > m->global_available)
+ pr_warn("Interrupt reservation exceeds available resources\n");
+
+ m->global_reserved++;
+ trace_irq_matrix_reserve(m);
+}
+
+/**
+ * irq_matrix_remove_reserved - Remove interrupt reservation
+ * @m: Matrix pointer
+ *
+ * This is merily a book keeping call. It decrements the number of globally
+ * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
+ * interrupt was never in use and a real vector allocated, which undid the
+ * reservation.
+ */
+void irq_matrix_remove_reserved(struct irq_matrix *m)
+{
+ m->global_reserved--;
+ trace_irq_matrix_remove_reserved(m);
+}
+
+/**
+ * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
+ * @m: Matrix pointer
+ * @msk: Which CPUs to search in
+ * @reserved: Allocate previously reserved interrupts
+ * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
+ */
+int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
+ bool reserved, unsigned int *mapped_cpu)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, msk) {
+ struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
+ unsigned int bit;
+
+ if (!cm->online)
+ continue;
+
+ bit = matrix_alloc_area(m, cm, 1, false);
+ if (bit < m->alloc_end) {
+ cm->allocated++;
+ cm->available--;
+ m->total_allocated++;
+ m->global_available--;
+ if (reserved)
+ m->global_reserved--;
+ *mapped_cpu = cpu;
+ trace_irq_matrix_alloc(bit, cpu, m, cm);
+ return bit;
+ }
+ }
+ return -ENOSPC;
+}
+
+/**
+ * irq_matrix_free - Free allocated interrupt in the matrix
+ * @m: Matrix pointer
+ * @cpu: Which CPU map needs be updated
+ * @bit: The bit to remove
+ * @managed: If true, the interrupt is managed and not accounted
+ * as available.
+ */
+void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
+ unsigned int bit, bool managed)
+{
+ struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
+
+ if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
+ return;
+
+ if (cm->online) {
+ clear_bit(bit, cm->alloc_map);
+ cm->allocated--;
+ m->total_allocated--;
+ if (!managed) {
+ cm->available++;
+ m->global_available++;
+ }
+ }
+ trace_irq_matrix_free(bit, cpu, m, cm);
+}
+
+/**
+ * irq_matrix_available - Get the number of globally available irqs
+ * @m: Pointer to the matrix to query
+ * @cpudown: If true, the local CPU is about to go down, adjust
+ * the number of available irqs accordingly
+ */
+unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
+{
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+ return m->global_available - cpudown ? cm->available : 0;
+}
+
+/**
+ * irq_matrix_reserved - Get the number of globally reserved irqs
+ * @m: Pointer to the matrix to query
+ */
+unsigned int irq_matrix_reserved(struct irq_matrix *m)
+{
+ return m->global_reserved;
+}
+
+/**
+ * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
+ * @m: Pointer to the matrix to search
+ *
+ * This returns number of allocated irqs
+ */
+unsigned int irq_matrix_allocated(struct irq_matrix *m)
+{
+ struct cpumap *cm = this_cpu_ptr(m->maps);
+
+ return cm->allocated;
+}
+
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+/**
+ * irq_matrix_debug_show - Show detailed allocation information
+ * @sf: Pointer to the seq_file to print to
+ * @m: Pointer to the matrix allocator
+ * @ind: Indentation for the print format
+ *
+ * Note, this is a lockless snapshot.
+ */
+void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
+{
+ unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
+ int cpu;
+
+ seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
+ seq_printf(sf, "Global available: %6u\n", m->global_available);
+ seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
+ seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
+ seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
+ m->system_map);
+ seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
+
+ seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
+ cpu, cm->available, cm->managed, cm->allocated,
+ m->matrix_bits, cm->alloc_map);
+ }
+ cpus_read_unlock();
+}
+#endif
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 3fa4bd59f569..edb987b2c58d 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -16,6 +16,8 @@
#include <linux/msi.h>
#include <linux/slab.h>
+#include "internals.h"
+
/**
* alloc_msi_entry - Allocate an initialize msi_entry
* @dev: Pointer to the device for which this is allocated
@@ -100,13 +102,14 @@ int msi_domain_set_affinity(struct irq_data *irq_data,
return ret;
}
-static void msi_domain_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+static int msi_domain_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
struct msi_msg msg;
BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
irq_chip_write_msi_msg(irq_data, &msg);
+ return 0;
}
static void msi_domain_deactivate(struct irq_domain *domain,
@@ -373,8 +376,10 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
return ret;
}
- for (i = 0; i < desc->nvec_used; i++)
+ for (i = 0; i < desc->nvec_used; i++) {
irq_set_msi_desc_off(virq, i, desc);
+ irq_debugfs_copy_devname(virq + i, dev);
+ }
}
if (ops->msi_finish)
@@ -396,11 +401,28 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct irq_data *irq_data;
irq_data = irq_domain_get_irq_data(domain, desc->irq);
- irq_domain_activate_irq(irq_data);
+ ret = irq_domain_activate_irq(irq_data, true);
+ if (ret)
+ goto cleanup;
+ if (info->flags & MSI_FLAG_MUST_REACTIVATE)
+ irqd_clr_activated(irq_data);
}
}
-
return 0;
+
+cleanup:
+ for_each_msi_entry(desc, dev) {
+ struct irq_data *irqd;
+
+ if (desc->irq == virq)
+ break;
+
+ irqd = irq_domain_get_irq_data(domain, desc->irq);
+ if (irqd_is_activated(irqd))
+ irq_domain_deactivate_irq(irqd);
+ }
+ msi_domain_free_irqs(domain, dev);
+ return ret;
}
/**
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index c010cc0daf79..e8f374971e37 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -155,8 +155,9 @@ static ssize_t write_irq_affinity(int type, struct file *file,
*/
err = irq_select_affinity_usr(irq) ? -EINVAL : count;
} else {
- irq_set_affinity(irq, new_value);
- err = count;
+ err = irq_set_affinity(irq, new_value);
+ if (!err)
+ err = count;
}
free_cpumask:
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index c8c1d073fbf1..e0923fa4927a 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -264,7 +264,7 @@ u64 irq_timings_next_event(u64 now)
* order to prevent the timings circular buffer to be updated
* while we are reading it.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
/*
* Number of elements in the circular buffer: If it happens it
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index bcf107ce0854..40e9d739c169 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -56,7 +56,6 @@ void __weak arch_irq_work_raise(void)
*/
}
-#ifdef CONFIG_SMP
/*
* Enqueue the irq_work @work on @cpu unless it's already pending
* somewhere.
@@ -68,6 +67,8 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu));
+#ifdef CONFIG_SMP
+
/* Arch remote IPI send/receive backend aren't NMI safe */
WARN_ON_ONCE(in_nmi());
@@ -78,10 +79,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
arch_send_call_function_single_ipi(cpu);
+#else /* #ifdef CONFIG_SMP */
+ irq_work_queue(work);
+#endif /* #else #ifdef CONFIG_SMP */
+
return true;
}
-EXPORT_SYMBOL_GPL(irq_work_queue_on);
-#endif
/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
@@ -128,9 +131,9 @@ bool irq_work_needs_cpu(void)
static void irq_work_run_list(struct llist_head *list)
{
- unsigned long flags;
- struct irq_work *work;
+ struct irq_work *work, *tmp;
struct llist_node *llnode;
+ unsigned long flags;
BUG_ON(!irqs_disabled());
@@ -138,11 +141,7 @@ static void irq_work_run_list(struct llist_head *list)
return;
llnode = llist_del_all(list);
- while (llnode != NULL) {
- work = llist_entry(llnode, struct irq_work, llnode);
-
- llnode = llist_next(llnode);
-
+ llist_for_each_entry_safe(work, tmp, llnode, llnode) {
/*
* Clear the PENDING bit, after this point the @work
* can be re-used.
@@ -188,7 +187,7 @@ void irq_work_tick(void)
*/
void irq_work_sync(struct irq_work *work)
{
- WARN_ON_ONCE(irqs_disabled());
+ lockdep_assert_irqs_enabled();
while (work->flags & IRQ_WORK_BUSY)
cpu_relax();
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 0bf2e8f5244a..8ff4ca4665ff 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -83,7 +83,7 @@ static void static_key_slow_inc_cpuslocked(struct static_key *key)
{
int v, v1;
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
/*
* Careful if we get concurrent static_key_slow_inc() calls;
@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
void static_key_enable_cpuslocked(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) > 0) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(static_key_enable);
void static_key_disable_cpuslocked(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
if (atomic_read(&key->enabled) != 1) {
WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
@@ -224,21 +224,21 @@ static void jump_label_update_timeout(struct work_struct *work)
void static_key_slow_dec(struct static_key *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec(key, 0, NULL);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
__static_key_slow_dec(&key->key, key->timeout, &key->work);
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
void static_key_deferred_flush(struct static_key_deferred *key)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
flush_delayed_work(&key->work);
}
EXPORT_SYMBOL_GPL(static_key_deferred_flush);
@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(static_key_deferred_flush);
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
- STATIC_KEY_CHECK_USE();
+ STATIC_KEY_CHECK_USE(key);
key->timeout = rl;
INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 127e7cfafa55..1e6ae66c6244 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -480,6 +480,7 @@ struct kallsym_iter {
char name[KSYM_NAME_LEN];
char module_name[MODULE_NAME_LEN];
int exported;
+ int show_value;
};
static int get_ksymbol_mod(struct kallsym_iter *iter)
@@ -582,12 +583,15 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
+ unsigned long value;
struct kallsym_iter *iter = m->private;
/* Some debugging symbols have no name. Ignore them. */
if (!iter->name[0])
return 0;
+ value = iter->show_value ? iter->value : 0;
+
if (iter->module_name[0]) {
char type;
@@ -597,10 +601,10 @@ static int s_show(struct seq_file *m, void *p)
*/
type = iter->exported ? toupper(iter->type) :
tolower(iter->type);
- seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
+ seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value,
type, iter->name, iter->module_name);
} else
- seq_printf(m, "%pK %c %s\n", (void *)iter->value,
+ seq_printf(m, KALLSYM_FMT " %c %s\n", value,
iter->type, iter->name);
return 0;
}
@@ -612,6 +616,40 @@ static const struct seq_operations kallsyms_op = {
.show = s_show
};
+static inline int kallsyms_for_perf(void)
+{
+#ifdef CONFIG_PERF_EVENTS
+ extern int sysctl_perf_event_paranoid;
+ if (sysctl_perf_event_paranoid <= 1)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * We show kallsyms information even to normal users if we've enabled
+ * kernel profiling and are explicitly not paranoid (so kptr_restrict
+ * is clear, and sysctl_perf_event_paranoid isn't set).
+ *
+ * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
+ * block even that).
+ */
+int kallsyms_show_value(void)
+{
+ switch (kptr_restrict) {
+ case 0:
+ if (kallsyms_for_perf())
+ return 1;
+ /* fallthrough */
+ case 1:
+ if (has_capability_noaudit(current, CAP_SYSLOG))
+ return 1;
+ /* fallthrough */
+ default:
+ return 0;
+ }
+}
+
static int kallsyms_open(struct inode *inode, struct file *file)
{
/*
@@ -625,6 +663,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
return -ENOMEM;
reset_iter(iter, 0);
+ iter->show_value = kallsyms_show_value();
return 0;
}
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 9f48f4412297..e5bcd94c1efb 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -406,9 +406,10 @@ static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
return 1;
}
-static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
+static int locate_mem_hole_callback(struct resource *res, void *arg)
{
struct kexec_buf *kbuf = (struct kexec_buf *)arg;
+ u64 start = res->start, end = res->end;
unsigned long sz = end - start + 1;
/* Returning 0 will take to next memory range */
@@ -437,7 +438,7 @@ static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
* func returning non-zero, then zero will be returned.
*/
int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(u64, u64, void *))
+ int (*func)(struct resource *, void *))
{
if (kbuf->image->type == KEXEC_TYPE_CRASH)
return walk_iomem_res_desc(crashk_res.desc,
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a1606a4224e1..da2ccf142358 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -117,7 +117,7 @@ enum kprobe_slot_state {
SLOT_USED = 2,
};
-static void *alloc_insn_page(void)
+void __weak *alloc_insn_page(void)
{
return module_alloc(PAGE_SIZE);
}
@@ -573,13 +573,15 @@ static void kprobe_optimizer(struct work_struct *work)
do_unoptimize_kprobes();
/*
- * Step 2: Wait for quiesence period to ensure all running interrupts
- * are done. Because optprobe may modify multiple instructions
- * there is a chance that Nth instruction is interrupted. In that
- * case, running interrupt can return to 2nd-Nth byte of jump
- * instruction. This wait is for avoiding it.
+ * Step 2: Wait for quiesence period to ensure all potentially
+ * preempted tasks to have normally scheduled. Because optprobe
+ * may modify multiple instructions, there is a chance that Nth
+ * instruction is preempted. In that case, such tasks can return
+ * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
+ * Note that on non-preemptive kernel, this is transparently converted
+ * to synchronoze_sched() to wait for all interrupts to have completed.
*/
- synchronize_sched();
+ synchronize_rcu_tasks();
/* Step 3: Optimize kprobes after quiesence period */
do_optimize_kprobes();
@@ -1769,6 +1771,7 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
}
+#if 0
int register_jprobes(struct jprobe **jps, int num)
{
int ret = 0, i;
@@ -1837,6 +1840,7 @@ void unregister_jprobes(struct jprobe **jps, int num)
}
}
EXPORT_SYMBOL_GPL(unregister_jprobes);
+#endif
#ifdef CONFIG_KRETPROBES
/*
diff --git a/kernel/kthread.c b/kernel/kthread.c
index ba3992c8c375..8af313081b0d 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -20,7 +20,6 @@
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
-#include <linux/cgroup.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
@@ -47,6 +46,9 @@ struct kthread {
void *data;
struct completion parked;
struct completion exited;
+#ifdef CONFIG_BLK_CGROUP
+ struct cgroup_subsys_state *blkcg_css;
+#endif
};
enum KTHREAD_BITS {
@@ -74,11 +76,17 @@ static inline struct kthread *to_kthread(struct task_struct *k)
void free_kthread_struct(struct task_struct *k)
{
+ struct kthread *kthread;
+
/*
* Can be NULL if this kthread was created by kernel_thread()
* or if kmalloc() in kthread() failed.
*/
- kfree(to_kthread(k));
+ kthread = to_kthread(k);
+#ifdef CONFIG_BLK_CGROUP
+ WARN_ON_ONCE(kthread && kthread->blkcg_css);
+#endif
+ kfree(kthread);
}
/**
@@ -196,7 +204,7 @@ static int kthread(void *_create)
struct kthread *self;
int ret;
- self = kmalloc(sizeof(*self), GFP_KERNEL);
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
set_kthread_struct(self);
/* If user was SIGKILLed, I release the structure. */
@@ -212,7 +220,6 @@ static int kthread(void *_create)
do_exit(-ENOMEM);
}
- self->flags = 0;
self->data = data;
init_completion(&self->exited);
init_completion(&self->parked);
@@ -1152,3 +1159,54 @@ void kthread_destroy_worker(struct kthread_worker *worker)
kfree(worker);
}
EXPORT_SYMBOL(kthread_destroy_worker);
+
+#ifdef CONFIG_BLK_CGROUP
+/**
+ * kthread_associate_blkcg - associate blkcg to current kthread
+ * @css: the cgroup info
+ *
+ * Current thread must be a kthread. The thread is running jobs on behalf of
+ * other threads. In some cases, we expect the jobs attach cgroup info of
+ * original threads instead of that of current thread. This function stores
+ * original thread's cgroup info in current kthread context for later
+ * retrieval.
+ */
+void kthread_associate_blkcg(struct cgroup_subsys_state *css)
+{
+ struct kthread *kthread;
+
+ if (!(current->flags & PF_KTHREAD))
+ return;
+ kthread = to_kthread(current);
+ if (!kthread)
+ return;
+
+ if (kthread->blkcg_css) {
+ css_put(kthread->blkcg_css);
+ kthread->blkcg_css = NULL;
+ }
+ if (css) {
+ css_get(css);
+ kthread->blkcg_css = css;
+ }
+}
+EXPORT_SYMBOL(kthread_associate_blkcg);
+
+/**
+ * kthread_blkcg - get associated blkcg css of current kthread
+ *
+ * Current thread must be a kthread.
+ */
+struct cgroup_subsys_state *kthread_blkcg(void)
+{
+ struct kthread *kthread;
+
+ if (current->flags & PF_KTHREAD) {
+ kthread = to_kthread(current);
+ if (kthread)
+ return kthread->blkcg_css;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(kthread_blkcg);
+#endif
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
index 2b8bdb1925da..b36ceda6488e 100644
--- a/kernel/livepatch/Makefile
+++ b/kernel/livepatch/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_LIVEPATCH) += livepatch.o
-livepatch-objs := core.o patch.o transition.o
+livepatch-objs := core.o patch.o shadow.o transition.o
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index bf8c8fd72589..de9e45dca70f 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -54,11 +54,6 @@ static bool klp_is_module(struct klp_object *obj)
return obj->name;
}
-static bool klp_is_object_loaded(struct klp_object *obj)
-{
- return !obj->name || obj->mod;
-}
-
/* sets obj->mod if object is not vmlinux and module is found */
static void klp_find_object_module(struct klp_object *obj)
{
@@ -285,6 +280,11 @@ static int klp_write_object_relocations(struct module *pmod,
static int __klp_disable_patch(struct klp_patch *patch)
{
+ struct klp_object *obj;
+
+ if (WARN_ON(!patch->enabled))
+ return -EINVAL;
+
if (klp_transition_patch)
return -EBUSY;
@@ -295,6 +295,10 @@ static int __klp_disable_patch(struct klp_patch *patch)
klp_init_transition(patch, KLP_UNPATCHED);
+ klp_for_each_object(patch, obj)
+ if (obj->patched)
+ klp_pre_unpatch_callback(obj);
+
/*
* Enforce the order of the func->transition writes in
* klp_init_transition() and the TIF_PATCH_PENDING writes in
@@ -388,13 +392,18 @@ static int __klp_enable_patch(struct klp_patch *patch)
if (!klp_is_object_loaded(obj))
continue;
- ret = klp_patch_object(obj);
+ ret = klp_pre_patch_callback(obj);
if (ret) {
- pr_warn("failed to enable patch '%s'\n",
- patch->mod->name);
+ pr_warn("pre-patch callback failed for object '%s'\n",
+ klp_is_module(obj) ? obj->name : "vmlinux");
+ goto err;
+ }
- klp_cancel_transition();
- return ret;
+ ret = klp_patch_object(obj);
+ if (ret) {
+ pr_warn("failed to patch object '%s'\n",
+ klp_is_module(obj) ? obj->name : "vmlinux");
+ goto err;
}
}
@@ -403,6 +412,11 @@ static int __klp_enable_patch(struct klp_patch *patch)
patch->enabled = true;
return 0;
+err:
+ pr_warn("failed to enable patch '%s'\n", patch->mod->name);
+
+ klp_cancel_transition();
+ return ret;
}
/**
@@ -854,9 +868,15 @@ static void klp_cleanup_module_patches_limited(struct module *mod,
* is in transition.
*/
if (patch->enabled || patch == klp_transition_patch) {
+
+ if (patch != klp_transition_patch)
+ klp_pre_unpatch_callback(obj);
+
pr_notice("reverting patch '%s' on unloading module '%s'\n",
patch->mod->name, obj->mod->name);
klp_unpatch_object(obj);
+
+ klp_post_unpatch_callback(obj);
}
klp_free_object_loaded(obj);
@@ -906,13 +926,25 @@ int klp_module_coming(struct module *mod)
pr_notice("applying patch '%s' to loading module '%s'\n",
patch->mod->name, obj->mod->name);
+ ret = klp_pre_patch_callback(obj);
+ if (ret) {
+ pr_warn("pre-patch callback failed for object '%s'\n",
+ obj->name);
+ goto err;
+ }
+
ret = klp_patch_object(obj);
if (ret) {
pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
patch->mod->name, obj->mod->name, ret);
+
+ klp_post_unpatch_callback(obj);
goto err;
}
+ if (patch != klp_transition_patch)
+ klp_post_patch_callback(obj);
+
break;
}
}
diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h
index a351601d7f76..48a83d4364cf 100644
--- a/kernel/livepatch/core.h
+++ b/kernel/livepatch/core.h
@@ -2,6 +2,46 @@
#ifndef _LIVEPATCH_CORE_H
#define _LIVEPATCH_CORE_H
+#include <linux/livepatch.h>
+
extern struct mutex klp_mutex;
+static inline bool klp_is_object_loaded(struct klp_object *obj)
+{
+ return !obj->name || obj->mod;
+}
+
+static inline int klp_pre_patch_callback(struct klp_object *obj)
+{
+ int ret = 0;
+
+ if (obj->callbacks.pre_patch)
+ ret = (*obj->callbacks.pre_patch)(obj);
+
+ obj->callbacks.post_unpatch_enabled = !ret;
+
+ return ret;
+}
+
+static inline void klp_post_patch_callback(struct klp_object *obj)
+{
+ if (obj->callbacks.post_patch)
+ (*obj->callbacks.post_patch)(obj);
+}
+
+static inline void klp_pre_unpatch_callback(struct klp_object *obj)
+{
+ if (obj->callbacks.pre_unpatch)
+ (*obj->callbacks.pre_unpatch)(obj);
+}
+
+static inline void klp_post_unpatch_callback(struct klp_object *obj)
+{
+ if (obj->callbacks.post_unpatch_enabled &&
+ obj->callbacks.post_unpatch)
+ (*obj->callbacks.post_unpatch)(obj);
+
+ obj->callbacks.post_unpatch_enabled = false;
+}
+
#endif /* _LIVEPATCH_CORE_H */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 52c4e907c14b..82d584225dc6 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/printk.h>
+#include "core.h"
#include "patch.h"
#include "transition.h"
diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c
new file mode 100644
index 000000000000..fdac27588d60
--- /dev/null
+++ b/kernel/livepatch/shadow.c
@@ -0,0 +1,277 @@
+/*
+ * shadow.c - Shadow Variables
+ *
+ * Copyright (C) 2014 Josh Poimboeuf <jpoimboe@redhat.com>
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: Shadow variable API concurrency notes:
+ *
+ * The shadow variable API provides a simple relationship between an
+ * <obj, id> pair and a pointer value. It is the responsibility of the
+ * caller to provide any mutual exclusion required of the shadow data.
+ *
+ * Once a shadow variable is attached to its parent object via the
+ * klp_shadow_*alloc() API calls, it is considered live: any subsequent
+ * call to klp_shadow_get() may then return the shadow variable's data
+ * pointer. Callers of klp_shadow_*alloc() should prepare shadow data
+ * accordingly.
+ *
+ * The klp_shadow_*alloc() API calls may allocate memory for new shadow
+ * variable structures. Their implementation does not call kmalloc
+ * inside any spinlocks, but API callers should pass GFP flags according
+ * to their specific needs.
+ *
+ * The klp_shadow_hash is an RCU-enabled hashtable and is safe against
+ * concurrent klp_shadow_free() and klp_shadow_get() operations.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hashtable.h>
+#include <linux/slab.h>
+#include <linux/livepatch.h>
+
+static DEFINE_HASHTABLE(klp_shadow_hash, 12);
+
+/*
+ * klp_shadow_lock provides exclusive access to the klp_shadow_hash and
+ * the shadow variables it references.
+ */
+static DEFINE_SPINLOCK(klp_shadow_lock);
+
+/**
+ * struct klp_shadow - shadow variable structure
+ * @node: klp_shadow_hash hash table node
+ * @rcu_head: RCU is used to safely free this structure
+ * @obj: pointer to parent object
+ * @id: data identifier
+ * @data: data area
+ */
+struct klp_shadow {
+ struct hlist_node node;
+ struct rcu_head rcu_head;
+ void *obj;
+ unsigned long id;
+ char data[];
+};
+
+/**
+ * klp_shadow_match() - verify a shadow variable matches given <obj, id>
+ * @shadow: shadow variable to match
+ * @obj: pointer to parent object
+ * @id: data identifier
+ *
+ * Return: true if the shadow variable matches.
+ */
+static inline bool klp_shadow_match(struct klp_shadow *shadow, void *obj,
+ unsigned long id)
+{
+ return shadow->obj == obj && shadow->id == id;
+}
+
+/**
+ * klp_shadow_get() - retrieve a shadow variable data pointer
+ * @obj: pointer to parent object
+ * @id: data identifier
+ *
+ * Return: the shadow variable data element, NULL on failure.
+ */
+void *klp_shadow_get(void *obj, unsigned long id)
+{
+ struct klp_shadow *shadow;
+
+ rcu_read_lock();
+
+ hash_for_each_possible_rcu(klp_shadow_hash, shadow, node,
+ (unsigned long)obj) {
+
+ if (klp_shadow_match(shadow, obj, id)) {
+ rcu_read_unlock();
+ return shadow->data;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(klp_shadow_get);
+
+static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
+ size_t size, gfp_t gfp_flags, bool warn_on_exist)
+{
+ struct klp_shadow *new_shadow;
+ void *shadow_data;
+ unsigned long flags;
+
+ /* Check if the shadow variable already exists */
+ shadow_data = klp_shadow_get(obj, id);
+ if (shadow_data)
+ goto exists;
+
+ /* Allocate a new shadow variable for use inside the lock below */
+ new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
+ if (!new_shadow)
+ return NULL;
+
+ new_shadow->obj = obj;
+ new_shadow->id = id;
+
+ /* Initialize the shadow variable if data provided */
+ if (data)
+ memcpy(new_shadow->data, data, size);
+
+ /* Look for <obj, id> again under the lock */
+ spin_lock_irqsave(&klp_shadow_lock, flags);
+ shadow_data = klp_shadow_get(obj, id);
+ if (unlikely(shadow_data)) {
+ /*
+ * Shadow variable was found, throw away speculative
+ * allocation.
+ */
+ spin_unlock_irqrestore(&klp_shadow_lock, flags);
+ kfree(new_shadow);
+ goto exists;
+ }
+
+ /* No <obj, id> found, so attach the newly allocated one */
+ hash_add_rcu(klp_shadow_hash, &new_shadow->node,
+ (unsigned long)new_shadow->obj);
+ spin_unlock_irqrestore(&klp_shadow_lock, flags);
+
+ return new_shadow->data;
+
+exists:
+ if (warn_on_exist) {
+ WARN(1, "Duplicate shadow variable <%p, %lx>\n", obj, id);
+ return NULL;
+ }
+
+ return shadow_data;
+}
+
+/**
+ * klp_shadow_alloc() - allocate and add a new shadow variable
+ * @obj: pointer to parent object
+ * @id: data identifier
+ * @data: pointer to data to attach to parent
+ * @size: size of attached data
+ * @gfp_flags: GFP mask for allocation
+ *
+ * Allocates @size bytes for new shadow variable data using @gfp_flags
+ * and copies @size bytes from @data into the new shadow variable's own
+ * data space. If @data is NULL, @size bytes are still allocated, but
+ * no copy is performed. The new shadow variable is then added to the
+ * global hashtable.
+ *
+ * If an existing <obj, id> shadow variable can be found, this routine
+ * will issue a WARN, exit early and return NULL.
+ *
+ * Return: the shadow variable data element, NULL on duplicate or
+ * failure.
+ */
+void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
+ size_t size, gfp_t gfp_flags)
+{
+ return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true);
+}
+EXPORT_SYMBOL_GPL(klp_shadow_alloc);
+
+/**
+ * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
+ * @obj: pointer to parent object
+ * @id: data identifier
+ * @data: pointer to data to attach to parent
+ * @size: size of attached data
+ * @gfp_flags: GFP mask for allocation
+ *
+ * Returns a pointer to existing shadow data if an <obj, id> shadow
+ * variable is already present. Otherwise, it creates a new shadow
+ * variable like klp_shadow_alloc().
+ *
+ * This function guarantees that only one shadow variable exists with
+ * the given @id for the given @obj. It also guarantees that the shadow
+ * variable will be initialized by the given @data only when it did not
+ * exist before.
+ *
+ * Return: the shadow variable data element, NULL on failure.
+ */
+void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
+ size_t size, gfp_t gfp_flags)
+{
+ return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false);
+}
+EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);
+
+/**
+ * klp_shadow_free() - detach and free a <obj, id> shadow variable
+ * @obj: pointer to parent object
+ * @id: data identifier
+ *
+ * This function releases the memory for this <obj, id> shadow variable
+ * instance, callers should stop referencing it accordingly.
+ */
+void klp_shadow_free(void *obj, unsigned long id)
+{
+ struct klp_shadow *shadow;
+ unsigned long flags;
+
+ spin_lock_irqsave(&klp_shadow_lock, flags);
+
+ /* Delete <obj, id> from hash */
+ hash_for_each_possible(klp_shadow_hash, shadow, node,
+ (unsigned long)obj) {
+
+ if (klp_shadow_match(shadow, obj, id)) {
+ hash_del_rcu(&shadow->node);
+ kfree_rcu(shadow, rcu_head);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&klp_shadow_lock, flags);
+}
+EXPORT_SYMBOL_GPL(klp_shadow_free);
+
+/**
+ * klp_shadow_free_all() - detach and free all <*, id> shadow variables
+ * @id: data identifier
+ *
+ * This function releases the memory for all <*, id> shadow variable
+ * instances, callers should stop referencing them accordingly.
+ */
+void klp_shadow_free_all(unsigned long id)
+{
+ struct klp_shadow *shadow;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&klp_shadow_lock, flags);
+
+ /* Delete all <*, id> from hash */
+ hash_for_each(klp_shadow_hash, i, shadow, node) {
+ if (klp_shadow_match(shadow, shadow->obj, id)) {
+ hash_del_rcu(&shadow->node);
+ kfree_rcu(shadow, rcu_head);
+ }
+ }
+
+ spin_unlock_irqrestore(&klp_shadow_lock, flags);
+}
+EXPORT_SYMBOL_GPL(klp_shadow_free_all);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index b004a1fb6032..56add6327736 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -82,6 +82,10 @@ static void klp_complete_transition(void)
unsigned int cpu;
bool immediate_func = false;
+ pr_debug("'%s': completing %s transition\n",
+ klp_transition_patch->mod->name,
+ klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+
if (klp_target_state == KLP_UNPATCHED) {
/*
* All tasks have transitioned to KLP_UNPATCHED so we can now
@@ -109,9 +113,6 @@ static void klp_complete_transition(void)
}
}
- if (klp_target_state == KLP_UNPATCHED && !immediate_func)
- module_put(klp_transition_patch->mod);
-
/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
if (klp_target_state == KLP_PATCHED)
klp_synchronize_transition();
@@ -130,6 +131,27 @@ static void klp_complete_transition(void)
}
done:
+ klp_for_each_object(klp_transition_patch, obj) {
+ if (!klp_is_object_loaded(obj))
+ continue;
+ if (klp_target_state == KLP_PATCHED)
+ klp_post_patch_callback(obj);
+ else if (klp_target_state == KLP_UNPATCHED)
+ klp_post_unpatch_callback(obj);
+ }
+
+ pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
+ klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+
+ /*
+ * See complementary comment in __klp_enable_patch() for why we
+ * keep the module reference for immediate patches.
+ */
+ if (!klp_transition_patch->immediate && !immediate_func &&
+ klp_target_state == KLP_UNPATCHED) {
+ module_put(klp_transition_patch->mod);
+ }
+
klp_target_state = KLP_UNDEFINED;
klp_transition_patch = NULL;
}
@@ -145,6 +167,9 @@ void klp_cancel_transition(void)
if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
return;
+ pr_debug("'%s': canceling patching transition, going to unpatch\n",
+ klp_transition_patch->mod->name);
+
klp_target_state = KLP_UNPATCHED;
klp_complete_transition();
}
@@ -408,9 +433,6 @@ void klp_try_complete_transition(void)
}
success:
- pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
- klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
-
/* we're done, now cleanup the data structures */
klp_complete_transition();
}
@@ -426,7 +448,8 @@ void klp_start_transition(void)
WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
- pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
+ pr_notice("'%s': starting %s transition\n",
+ klp_transition_patch->mod->name,
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
@@ -482,6 +505,9 @@ void klp_init_transition(struct klp_patch *patch, int state)
*/
klp_target_state = state;
+ pr_debug("'%s': initializing %s transition\n", patch->mod->name,
+ klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+
/*
* If the patch can be applied or reverted immediately, skip the
* per-task transitions.
@@ -547,6 +573,11 @@ void klp_reverse_transition(void)
unsigned int cpu;
struct task_struct *g, *task;
+ pr_debug("'%s': reversing transition from %s\n",
+ klp_transition_patch->mod->name,
+ klp_target_state == KLP_PATCHED ? "patching to unpatching" :
+ "unpatching to patching");
+
klp_transition_patch->enabled = !klp_transition_patch->enabled;
klp_target_state = !klp_target_state;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e36e652d996f..9776da8db180 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -47,7 +47,6 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
-#include <linux/kmemcheck.h>
#include <linux/random.h>
#include <linux/jhash.h>
@@ -76,6 +75,19 @@ module_param(lock_stat, int, 0644);
#define lock_stat 0
#endif
+#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
+static int crossrelease_fullstack = 1;
+#else
+static int crossrelease_fullstack;
+#endif
+static int __init allow_crossrelease_fullstack(char *str)
+{
+ crossrelease_fullstack = 1;
+ return 0;
+}
+
+early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
+
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
@@ -3225,8 +3237,6 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
{
int i;
- kmemcheck_mark_initialized(lock, sizeof(*lock));
-
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
lock->class_cache[i] = NULL;
@@ -4863,8 +4873,14 @@ static void add_xhlock(struct held_lock *hlock)
xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
xhlock->trace.entries = xhlock->trace_entries;
- xhlock->trace.skip = 3;
- save_stack_trace(&xhlock->trace);
+
+ if (crossrelease_fullstack) {
+ xhlock->trace.skip = 3;
+ save_stack_trace(&xhlock->trace);
+ } else {
+ xhlock->trace.nr_entries = 1;
+ xhlock->trace.entries[0] = hlock->acquire_ip;
+ }
}
static inline int same_context_xhlock(struct hist_lock *xhlock)
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 2655f26ec882..c7471c3fb798 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -23,49 +23,11 @@
#include <linux/spinlock.h>
#include <asm/qrwlock.h>
-/*
- * This internal data structure is used for optimizing access to some of
- * the subfields within the atomic_t cnts.
- */
-struct __qrwlock {
- union {
- atomic_t cnts;
- struct {
-#ifdef __LITTLE_ENDIAN
- u8 wmode; /* Writer mode */
- u8 rcnts[3]; /* Reader counts */
-#else
- u8 rcnts[3]; /* Reader counts */
- u8 wmode; /* Writer mode */
-#endif
- };
- };
- arch_spinlock_t lock;
-};
-
-/**
- * rspin_until_writer_unlock - inc reader count & spin until writer is gone
- * @lock : Pointer to queue rwlock structure
- * @writer: Current queue rwlock writer status byte
- *
- * In interrupt context or at the head of the queue, the reader will just
- * increment the reader count & wait until the writer releases the lock.
- */
-static __always_inline void
-rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
-{
- while ((cnts & _QW_WMASK) == _QW_LOCKED) {
- cpu_relax();
- cnts = atomic_read_acquire(&lock->cnts);
- }
-}
-
/**
* queued_read_lock_slowpath - acquire read lock of a queue rwlock
* @lock: Pointer to queue rwlock structure
- * @cnts: Current qrwlock lock value
*/
-void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
+void queued_read_lock_slowpath(struct qrwlock *lock)
{
/*
* Readers come here when they cannot get the lock without waiting
@@ -73,13 +35,11 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
if (unlikely(in_interrupt())) {
/*
* Readers in interrupt context will get the lock immediately
- * if the writer is just waiting (not holding the lock yet).
- * The rspin_until_writer_unlock() function returns immediately
- * in this case. Otherwise, they will spin (with ACQUIRE
- * semantics) until the lock is available without waiting in
- * the queue.
+ * if the writer is just waiting (not holding the lock yet),
+ * so spin with ACQUIRE semantics until the lock is available
+ * without waiting in the queue.
*/
- rspin_until_writer_unlock(lock, cnts);
+ atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
return;
}
atomic_sub(_QR_BIAS, &lock->cnts);
@@ -88,14 +48,14 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
* Put the reader into the wait queue
*/
arch_spin_lock(&lock->wait_lock);
+ atomic_add(_QR_BIAS, &lock->cnts);
/*
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write.
*/
- cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
- rspin_until_writer_unlock(lock, cnts);
+ atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
/*
* Signal the next one in queue to become queue head
@@ -110,8 +70,6 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
*/
void queued_write_lock_slowpath(struct qrwlock *lock)
{
- u32 cnts;
-
/* Put the writer into the wait queue */
arch_spin_lock(&lock->wait_lock);
@@ -120,30 +78,14 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
(atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
goto unlock;
- /*
- * Set the waiting flag to notify readers that a writer is pending,
- * or wait for a previous writer to go away.
- */
- for (;;) {
- struct __qrwlock *l = (struct __qrwlock *)lock;
-
- if (!READ_ONCE(l->wmode) &&
- (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
- break;
+ /* Set the waiting flag to notify readers that a writer is pending */
+ atomic_add(_QW_WAITING, &lock->cnts);
- cpu_relax();
- }
-
- /* When no more readers, set the locked flag */
- for (;;) {
- cnts = atomic_read(&lock->cnts);
- if ((cnts == _QW_WAITING) &&
- (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
- _QW_LOCKED) == _QW_WAITING))
- break;
-
- cpu_relax();
- }
+ /* When no more readers or writers, set the locked flag */
+ do {
+ atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
+ } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
+ _QW_LOCKED) != _QW_WAITING);
unlock:
arch_spin_unlock(&lock->wait_lock);
}
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 15b6a39366c6..6ee477765e6c 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -61,21 +61,50 @@ struct pv_node {
#include "qspinlock_stat.h"
/*
+ * Hybrid PV queued/unfair lock
+ *
* By replacing the regular queued_spin_trylock() with the function below,
* it will be called once when a lock waiter enter the PV slowpath before
- * being queued. By allowing one lock stealing attempt here when the pending
- * bit is off, it helps to reduce the performance impact of lock waiter
- * preemption without the drawback of lock starvation.
+ * being queued.
+ *
+ * The pending bit is set by the queue head vCPU of the MCS wait queue in
+ * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
+ * When that bit becomes visible to the incoming waiters, no lock stealing
+ * is allowed. The function will return immediately to make the waiters
+ * enter the MCS wait queue. So lock starvation shouldn't happen as long
+ * as the queued mode vCPUs are actively running to set the pending bit
+ * and hence disabling lock stealing.
+ *
+ * When the pending bit isn't set, the lock waiters will stay in the unfair
+ * mode spinning on the lock unless the MCS wait queue is empty. In this
+ * case, the lock waiters will enter the queued mode slowpath trying to
+ * become the queue head and set the pending bit.
+ *
+ * This hybrid PV queued/unfair lock combines the best attributes of a
+ * queued lock (no lock starvation) and an unfair lock (good performance
+ * on not heavily contended locks).
*/
-#define queued_spin_trylock(l) pv_queued_spin_steal_lock(l)
-static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
+#define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l)
+static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
- if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
- (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
- qstat_inc(qstat_pv_lock_stealing, true);
- return true;
+ /*
+ * Stay in unfair lock mode as long as queued mode waiters are
+ * present in the MCS wait queue but the pending bit isn't set.
+ */
+ for (;;) {
+ int val = atomic_read(&lock->val);
+
+ if (!(val & _Q_LOCKED_PENDING_MASK) &&
+ (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
+ qstat_inc(qstat_pv_lock_stealing, true);
+ return true;
+ }
+ if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
+ break;
+
+ cpu_relax();
}
return false;
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index a6c76a4832b4..f549c552dbf1 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -29,6 +29,22 @@ void __sched down_read(struct rw_semaphore *sem)
EXPORT_SYMBOL(down_read);
+int __sched down_read_killable(struct rw_semaphore *sem)
+{
+ might_sleep();
+ rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
+
+ if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
+ rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ return -EINTR;
+ }
+
+ rwsem_set_reader_owned(sem);
+ return 0;
+}
+
+EXPORT_SYMBOL(down_read_killable);
+
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 6e40fdfba326..1fd1a7543cdd 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -30,11 +30,10 @@
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
/*
* The __lock_function inlines are taken from
- * include/linux/spinlock_api_smp.h
+ * spinlock : include/linux/spinlock_api_smp.h
+ * rwlock : include/linux/rwlock_api_smp.h
*/
#else
-#define raw_read_can_lock(l) read_can_lock(l)
-#define raw_write_can_lock(l) write_can_lock(l)
/*
* Some architectures can relax in favour of the CPU owning the lock.
@@ -69,7 +68,7 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ while ((lock)->break_lock) \
arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
@@ -89,7 +88,7 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ while ((lock)->break_lock) \
arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
diff --git a/kernel/module.c b/kernel/module.c
index de66ec825992..222aba4aa960 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -278,6 +278,16 @@ static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
#endif /* !CONFIG_MODULE_SIG_FORCE */
+/*
+ * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
+ * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
+ */
+bool is_module_sig_enforced(void)
+{
+ return sig_enforce;
+}
+EXPORT_SYMBOL(is_module_sig_enforced);
+
/* Block module loading/unloading? */
int modules_disabled = 0;
core_param(nomodule, modules_disabled, bint, 0);
@@ -837,10 +847,8 @@ static int add_module_usage(struct module *a, struct module *b)
pr_debug("Allocating new usage for %s.\n", a->name);
use = kmalloc(sizeof(*use), GFP_ATOMIC);
- if (!use) {
- pr_warn("%s: out of memory loading\n", a->name);
+ if (!use)
return -ENOMEM;
- }
use->source = a;
use->target = b;
@@ -1516,7 +1524,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
sattr->mattr.show = module_sect_show;
sattr->mattr.store = NULL;
sattr->mattr.attr.name = sattr->name;
- sattr->mattr.attr.mode = S_IRUGO;
+ sattr->mattr.attr.mode = S_IRUSR;
*(gattr++) = &(sattr++)->mattr.attr;
}
*gattr = NULL;
@@ -4147,6 +4155,7 @@ static int m_show(struct seq_file *m, void *p)
{
struct module *mod = list_entry(p, struct module, list);
char buf[MODULE_FLAGS_BUF_SIZE];
+ unsigned long value;
/* We always ignore unformed modules. */
if (mod->state == MODULE_STATE_UNFORMED)
@@ -4162,7 +4171,8 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading" :
"Live");
/* Used by oprofile and other similar tools. */
- seq_printf(m, " 0x%pK", mod->core_layout.base);
+ value = m->private ? 0 : (unsigned long)mod->core_layout.base;
+ seq_printf(m, " 0x" KALLSYM_FMT, value);
/* Taints info */
if (mod->taints)
@@ -4184,9 +4194,23 @@ static const struct seq_operations modules_op = {
.show = m_show
};
+/*
+ * This also sets the "private" pointer to non-NULL if the
+ * kernel pointers should be hidden (so you can just test
+ * "m->private" to see if you should keep the values private).
+ *
+ * We use the same logic as for /proc/kallsyms.
+ */
static int modules_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &modules_op);
+ int err = seq_open(file, &modules_op);
+
+ if (!err) {
+ struct seq_file *m = file->private_data;
+ m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+ }
+
+ return 0;
}
static const struct file_operations proc_modules_operations = {
diff --git a/kernel/padata.c b/kernel/padata.c
index 868f947166d7..f262c9a4e70a 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -131,6 +131,7 @@ int padata_do_parallel(struct padata_instance *pinst,
padata->cb_cpu = cb_cpu;
target_cpu = padata_cpu_hash(pd);
+ padata->cpu = target_cpu;
queue = per_cpu_ptr(pd->pqueue, target_cpu);
spin_lock(&queue->parallel.lock);
@@ -275,11 +276,51 @@ static void padata_reorder(struct parallel_data *pd)
return;
}
+static void invoke_padata_reorder(struct work_struct *work)
+{
+ struct padata_parallel_queue *pqueue;
+ struct parallel_data *pd;
+
+ local_bh_disable();
+ pqueue = container_of(work, struct padata_parallel_queue, reorder_work);
+ pd = pqueue->pd;
+ padata_reorder(pd);
+ local_bh_enable();
+}
+
static void padata_reorder_timer(unsigned long arg)
{
struct parallel_data *pd = (struct parallel_data *)arg;
+ unsigned int weight;
+ int target_cpu, cpu;
- padata_reorder(pd);
+ cpu = get_cpu();
+
+ /* We don't lock pd here to not interfere with parallel processing
+ * padata_reorder() calls on other CPUs. We just need any CPU out of
+ * the cpumask.pcpu set. It would be nice if it's the right one but
+ * it doesn't matter if we're off to the next one by using an outdated
+ * pd->processed value.
+ */
+ weight = cpumask_weight(pd->cpumask.pcpu);
+ target_cpu = padata_index_to_cpu(pd, pd->processed % weight);
+
+ /* ensure to call the reorder callback on the correct CPU */
+ if (cpu != target_cpu) {
+ struct padata_parallel_queue *pqueue;
+ struct padata_instance *pinst;
+
+ /* The timer function is serialized wrt itself -- no locking
+ * needed.
+ */
+ pinst = pd->pinst;
+ pqueue = per_cpu_ptr(pd->pqueue, target_cpu);
+ queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work);
+ } else {
+ padata_reorder(pd);
+ }
+
+ put_cpu();
}
static void padata_serial_worker(struct work_struct *serial_work)
@@ -323,10 +364,21 @@ void padata_do_serial(struct padata_priv *padata)
int cpu;
struct padata_parallel_queue *pqueue;
struct parallel_data *pd;
+ int reorder_via_wq = 0;
pd = padata->pd;
cpu = get_cpu();
+
+ /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
+ * was called on -- or, at least, enqueue the padata object into the
+ * correct per-cpu queue.
+ */
+ if (cpu != padata->cpu) {
+ reorder_via_wq = 1;
+ cpu = padata->cpu;
+ }
+
pqueue = per_cpu_ptr(pd->pqueue, cpu);
spin_lock(&pqueue->reorder.lock);
@@ -336,7 +388,13 @@ void padata_do_serial(struct padata_priv *padata)
put_cpu();
- padata_reorder(pd);
+ /* If we're running on the wrong CPU, call padata_reorder() via a
+ * kernel worker.
+ */
+ if (reorder_via_wq)
+ queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
+ else
+ padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);
@@ -384,8 +442,14 @@ static void padata_init_pqueues(struct parallel_data *pd)
struct padata_parallel_queue *pqueue;
cpu_index = 0;
- for_each_cpu(cpu, pd->cpumask.pcpu) {
+ for_each_possible_cpu(cpu) {
pqueue = per_cpu_ptr(pd->pqueue, cpu);
+
+ if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
+ pqueue->cpu_index = -1;
+ continue;
+ }
+
pqueue->pd = pd;
pqueue->cpu_index = cpu_index;
cpu_index++;
@@ -393,6 +457,7 @@ static void padata_init_pqueues(struct parallel_data *pd)
__padata_list_init(&pqueue->reorder);
__padata_list_init(&pqueue->parallel);
INIT_WORK(&pqueue->work, padata_parallel_worker);
+ INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder);
atomic_set(&pqueue->num_obj, 0);
}
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index e8517b63eb37..e880ca22c5a5 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -259,20 +259,6 @@ config APM_EMULATION
anything, try disabling/enabling this option (or disabling/enabling
APM in your BIOS).
-config PM_OPP
- bool
- select SRCU
- ---help---
- SOCs have a standard set of tuples consisting of frequency and
- voltage pairs that the device will support per voltage domain. This
- is called Operating Performance Point or OPP. The actual definitions
- of OPP varies over silicon within the same family of devices.
-
- OPP layer organizes the data internally using device pointers
- representing individual voltage domains and provides SOC
- implementations a ready to use framework to manage OPPs.
- For more information, read <file:Documentation/power/opp.txt>
-
config PM_CLK
def_bool y
depends on PM && HAVE_CLK
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 97b0df71303e..9d7503910ce2 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -701,8 +701,8 @@ static int __init pm_qos_power_init(void)
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
ret = register_pm_qos_misc(pm_qos_array[i], d);
if (ret < 0) {
- printk(KERN_ERR "pm_qos_param: %s setup failed\n",
- pm_qos_array[i]->name);
+ pr_err("%s: %s setup failed\n",
+ __func__, pm_qos_array[i]->name);
return ret;
}
}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0972a8e09d08..bce0464524d8 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -10,6 +10,8 @@
*
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/version.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -967,7 +969,7 @@ void __init __register_nosave_region(unsigned long start_pfn,
region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions);
Report:
- printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
+ pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
(unsigned long long) start_pfn << PAGE_SHIFT,
((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
}
@@ -1039,7 +1041,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
list_for_each_entry(region, &nosave_regions, list) {
unsigned long pfn;
- pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
+ pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
(unsigned long long) region->start_pfn << PAGE_SHIFT,
((unsigned long long) region->end_pfn << PAGE_SHIFT)
- 1);
@@ -1095,7 +1097,7 @@ int create_basic_memory_bitmaps(void)
free_pages_map = bm2;
mark_nosave_pages(forbidden_pages_map);
- pr_debug("PM: Basic memory bitmaps created\n");
+ pr_debug("Basic memory bitmaps created\n");
return 0;
@@ -1131,7 +1133,7 @@ void free_basic_memory_bitmaps(void)
memory_bm_free(bm2, PG_UNSAFE_CLEAR);
kfree(bm2);
- pr_debug("PM: Basic memory bitmaps freed\n");
+ pr_debug("Basic memory bitmaps freed\n");
}
void clear_free_pages(void)
@@ -1152,7 +1154,7 @@ void clear_free_pages(void)
pfn = memory_bm_next_pfn(bm);
}
memory_bm_position_reset(bm);
- pr_info("PM: free pages cleared after restore\n");
+ pr_info("free pages cleared after restore\n");
#endif /* PAGE_POISONING_ZERO */
}
@@ -1690,7 +1692,7 @@ int hibernate_preallocate_memory(void)
ktime_t start, stop;
int error;
- printk(KERN_INFO "PM: Preallocating image memory... ");
+ pr_info("Preallocating image memory... ");
start = ktime_get();
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
@@ -1821,13 +1823,13 @@ int hibernate_preallocate_memory(void)
out:
stop = ktime_get();
- printk(KERN_CONT "done (allocated %lu pages)\n", pages);
+ pr_cont("done (allocated %lu pages)\n", pages);
swsusp_show_speed(start, stop, pages, "Allocated");
return 0;
err_out:
- printk(KERN_CONT "\n");
+ pr_cont("\n");
swsusp_free();
return -ENOMEM;
}
@@ -1867,8 +1869,8 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
free += zone_page_state(zone, NR_FREE_PAGES);
nr_pages += count_pages_for_highmem(nr_highmem);
- pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
- nr_pages, PAGES_FOR_IO, free);
+ pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
+ nr_pages, PAGES_FOR_IO, free);
return free > nr_pages + PAGES_FOR_IO;
}
@@ -1882,7 +1884,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
*/
static inline int get_highmem_buffer(int safe_needed)
{
- buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
+ buffer = get_image_page(GFP_ATOMIC, safe_needed);
return buffer ? 0 : -ENOMEM;
}
@@ -1943,7 +1945,7 @@ static int swsusp_alloc(struct memory_bitmap *copy_bm,
while (nr_pages-- > 0) {
struct page *page;
- page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
+ page = alloc_image_page(GFP_ATOMIC);
if (!page)
goto err_out;
memory_bm_set_bit(copy_bm, page_to_pfn(page));
@@ -1961,20 +1963,20 @@ asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
- printk(KERN_INFO "PM: Creating hibernation image:\n");
+ pr_info("Creating hibernation image:\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
- printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
+ pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
- printk(KERN_ERR "PM: Not enough free memory\n");
+ pr_err("Not enough free memory\n");
return -ENOMEM;
}
if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
- printk(KERN_ERR "PM: Memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -1995,8 +1997,7 @@ asmlinkage __visible int swsusp_save(void)
nr_copy_pages = nr_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
- nr_pages);
+ pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
return 0;
}
@@ -2170,7 +2171,7 @@ static int check_header(struct swsusp_info *info)
if (!reason && info->num_physpages != get_num_physpages())
reason = "memory size";
if (reason) {
- printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
+ pr_err("Image mismatch: %s\n", reason);
return -EPERM;
}
return 0;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index ccd2d20e6b06..0685c4499431 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -437,7 +437,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
error = suspend_ops->enter(state);
trace_suspend_resume(TPS("machine_suspend"),
state, false);
- events_check_enabled = false;
} else if (*wakeup) {
error = -EBUSY;
}
@@ -582,6 +581,7 @@ static int enter_state(suspend_state_t state)
pm_restore_gfp_mask();
Finish:
+ events_check_enabled = false;
pm_pr_dbg("Finishing wakeup.\n");
suspend_finish();
Unlock:
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index d7cdc426ee38..293ead59eccc 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -12,6 +12,8 @@
*
*/
+#define pr_fmt(fmt) "PM: " fmt
+
#include <linux/module.h>
#include <linux/file.h>
#include <linux/delay.h>
@@ -241,9 +243,9 @@ static void hib_end_io(struct bio *bio)
struct page *page = bio->bi_io_vec[0].bv_page;
if (bio->bi_status) {
- printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
- MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
+ MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
+ (unsigned long long)bio->bi_iter.bi_sector);
}
if (bio_data_dir(bio) == WRITE)
@@ -273,8 +275,8 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
bio_set_op_attrs(bio, op, op_flags);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
- (unsigned long long)bio->bi_iter.bi_sector);
+ pr_err("Adding page to bio failed at %llu\n",
+ (unsigned long long)bio->bi_iter.bi_sector);
bio_put(bio);
return -EFAULT;
}
@@ -319,7 +321,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
swsusp_resume_block, swsusp_header, NULL);
} else {
- printk(KERN_ERR "PM: Swap header not found!\n");
+ pr_err("Swap header not found!\n");
error = -ENODEV;
}
return error;
@@ -413,8 +415,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
ret = swsusp_swap_check();
if (ret) {
if (ret != -ENOSPC)
- printk(KERN_ERR "PM: Cannot find swap device, try "
- "swapon -a.\n");
+ pr_err("Cannot find swap device, try swapon -a\n");
return ret;
}
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
@@ -491,9 +492,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
{
if (!error) {
flush_swap_writer(handle);
- printk(KERN_INFO "PM: S");
+ pr_info("S");
error = mark_swapfiles(handle, flags);
- printk("|\n");
+ pr_cont("|\n");
}
if (error)
@@ -542,7 +543,7 @@ static int save_image(struct swap_map_handle *handle,
hib_init_batch(&hb);
- printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
+ pr_info("Saving image data pages (%u pages)...\n",
nr_to_write);
m = nr_to_write / 10;
if (!m)
@@ -557,8 +558,8 @@ static int save_image(struct swap_map_handle *handle,
if (ret)
break;
if (!(nr_pages % m))
- printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
- nr_pages / m * 10);
+ pr_info("Image saving progress: %3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
}
err2 = hib_wait_io(&hb);
@@ -566,7 +567,7 @@ static int save_image(struct swap_map_handle *handle,
if (!ret)
ret = err2;
if (!ret)
- printk(KERN_INFO "PM: Image saving done.\n");
+ pr_info("Image saving done\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
return ret;
}
@@ -692,14 +693,14 @@ static int save_image_lzo(struct swap_map_handle *handle,
page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
if (!page) {
- printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ pr_err("Failed to allocate LZO page\n");
ret = -ENOMEM;
goto out_clean;
}
data = vmalloc(sizeof(*data) * nr_threads);
if (!data) {
- printk(KERN_ERR "PM: Failed to allocate LZO data\n");
+ pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -708,7 +709,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
- printk(KERN_ERR "PM: Failed to allocate crc\n");
+ pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -726,8 +727,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
"image_compress/%u", thr);
if (IS_ERR(data[thr].thr)) {
data[thr].thr = NULL;
- printk(KERN_ERR
- "PM: Cannot start compression threads\n");
+ pr_err("Cannot start compression threads\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -749,7 +749,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
if (IS_ERR(crc->thr)) {
crc->thr = NULL;
- printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
+ pr_err("Cannot start CRC32 thread\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -760,10 +760,9 @@ static int save_image_lzo(struct swap_map_handle *handle,
*/
handle->reqd_free_pages = reqd_free_pages();
- printk(KERN_INFO
- "PM: Using %u thread(s) for compression.\n"
- "PM: Compressing and saving image data (%u pages)...\n",
- nr_threads, nr_to_write);
+ pr_info("Using %u thread(s) for compression\n", nr_threads);
+ pr_info("Compressing and saving image data (%u pages)...\n",
+ nr_to_write);
m = nr_to_write / 10;
if (!m)
m = 1;
@@ -783,10 +782,8 @@ static int save_image_lzo(struct swap_map_handle *handle,
data_of(*snapshot), PAGE_SIZE);
if (!(nr_pages % m))
- printk(KERN_INFO
- "PM: Image saving progress: "
- "%3d%%\n",
- nr_pages / m * 10);
+ pr_info("Image saving progress: %3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
}
if (!off)
@@ -813,15 +810,14 @@ static int save_image_lzo(struct swap_map_handle *handle,
ret = data[thr].ret;
if (ret < 0) {
- printk(KERN_ERR "PM: LZO compression failed\n");
+ pr_err("LZO compression failed\n");
goto out_finish;
}
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
lzo1x_worst_compress(data[thr].unc_len))) {
- printk(KERN_ERR
- "PM: Invalid LZO compressed length\n");
+ pr_err("Invalid LZO compressed length\n");
ret = -1;
goto out_finish;
}
@@ -857,7 +853,7 @@ out_finish:
if (!ret)
ret = err2;
if (!ret)
- printk(KERN_INFO "PM: Image saving done.\n");
+ pr_info("Image saving done\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
out_clean:
if (crc) {
@@ -888,7 +884,7 @@ static int enough_swap(unsigned int nr_pages, unsigned int flags)
unsigned int free_swap = count_swap_pages(root_swap, 1);
unsigned int required;
- pr_debug("PM: Free swap pages: %u\n", free_swap);
+ pr_debug("Free swap pages: %u\n", free_swap);
required = PAGES_FOR_IO + nr_pages;
return free_swap > required;
@@ -915,12 +911,12 @@ int swsusp_write(unsigned int flags)
pages = snapshot_get_image_size();
error = get_swap_writer(&handle);
if (error) {
- printk(KERN_ERR "PM: Cannot get swap writer\n");
+ pr_err("Cannot get swap writer\n");
return error;
}
if (flags & SF_NOCOMPRESS_MODE) {
if (!enough_swap(pages, flags)) {
- printk(KERN_ERR "PM: Not enough free swap\n");
+ pr_err("Not enough free swap\n");
error = -ENOSPC;
goto out_finish;
}
@@ -1068,8 +1064,7 @@ static int load_image(struct swap_map_handle *handle,
hib_init_batch(&hb);
clean_pages_on_read = true;
- printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
- nr_to_read);
+ pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
m = nr_to_read / 10;
if (!m)
m = 1;
@@ -1087,8 +1082,8 @@ static int load_image(struct swap_map_handle *handle,
if (ret)
break;
if (!(nr_pages % m))
- printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
- nr_pages / m * 10);
+ pr_info("Image loading progress: %3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
}
err2 = hib_wait_io(&hb);
@@ -1096,7 +1091,7 @@ static int load_image(struct swap_map_handle *handle,
if (!ret)
ret = err2;
if (!ret) {
- printk(KERN_INFO "PM: Image loading done.\n");
+ pr_info("Image loading done\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = -ENODATA;
@@ -1190,14 +1185,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
if (!page) {
- printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ pr_err("Failed to allocate LZO page\n");
ret = -ENOMEM;
goto out_clean;
}
data = vmalloc(sizeof(*data) * nr_threads);
if (!data) {
- printk(KERN_ERR "PM: Failed to allocate LZO data\n");
+ pr_err("Failed to allocate LZO data\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -1206,7 +1201,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
- printk(KERN_ERR "PM: Failed to allocate crc\n");
+ pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -1226,8 +1221,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
"image_decompress/%u", thr);
if (IS_ERR(data[thr].thr)) {
data[thr].thr = NULL;
- printk(KERN_ERR
- "PM: Cannot start decompression threads\n");
+ pr_err("Cannot start decompression threads\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -1249,7 +1243,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
if (IS_ERR(crc->thr)) {
crc->thr = NULL;
- printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
+ pr_err("Cannot start CRC32 thread\n");
ret = -ENOMEM;
goto out_clean;
}
@@ -1274,8 +1268,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
if (!page[i]) {
if (i < LZO_CMP_PAGES) {
ring_size = i;
- printk(KERN_ERR
- "PM: Failed to allocate LZO pages\n");
+ pr_err("Failed to allocate LZO pages\n");
ret = -ENOMEM;
goto out_clean;
} else {
@@ -1285,10 +1278,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
want = ring_size = i;
- printk(KERN_INFO
- "PM: Using %u thread(s) for decompression.\n"
- "PM: Loading and decompressing image data (%u pages)...\n",
- nr_threads, nr_to_read);
+ pr_info("Using %u thread(s) for decompression\n", nr_threads);
+ pr_info("Loading and decompressing image data (%u pages)...\n",
+ nr_to_read);
m = nr_to_read / 10;
if (!m)
m = 1;
@@ -1348,8 +1340,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
lzo1x_worst_compress(LZO_UNC_SIZE))) {
- printk(KERN_ERR
- "PM: Invalid LZO compressed length\n");
+ pr_err("Invalid LZO compressed length\n");
ret = -1;
goto out_finish;
}
@@ -1400,16 +1391,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
ret = data[thr].ret;
if (ret < 0) {
- printk(KERN_ERR
- "PM: LZO decompression failed\n");
+ pr_err("LZO decompression failed\n");
goto out_finish;
}
if (unlikely(!data[thr].unc_len ||
data[thr].unc_len > LZO_UNC_SIZE ||
data[thr].unc_len & (PAGE_SIZE - 1))) {
- printk(KERN_ERR
- "PM: Invalid LZO uncompressed length\n");
+ pr_err("Invalid LZO uncompressed length\n");
ret = -1;
goto out_finish;
}
@@ -1420,10 +1409,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
data[thr].unc + off, PAGE_SIZE);
if (!(nr_pages % m))
- printk(KERN_INFO
- "PM: Image loading progress: "
- "%3d%%\n",
- nr_pages / m * 10);
+ pr_info("Image loading progress: %3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
ret = snapshot_write_next(snapshot);
@@ -1448,15 +1435,14 @@ out_finish:
}
stop = ktime_get();
if (!ret) {
- printk(KERN_INFO "PM: Image loading done.\n");
+ pr_info("Image loading done\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = -ENODATA;
if (!ret) {
if (swsusp_header->flags & SF_CRC32_MODE) {
if(handle->crc32 != swsusp_header->crc32) {
- printk(KERN_ERR
- "PM: Invalid image CRC32!\n");
+ pr_err("Invalid image CRC32!\n");
ret = -ENODATA;
}
}
@@ -1513,9 +1499,9 @@ int swsusp_read(unsigned int *flags_p)
swap_reader_finish(&handle);
end:
if (!error)
- pr_debug("PM: Image successfully loaded\n");
+ pr_debug("Image successfully loaded\n");
else
- pr_debug("PM: Error %d resuming\n", error);
+ pr_debug("Error %d resuming\n", error);
return error;
}
@@ -1552,13 +1538,13 @@ put:
if (error)
blkdev_put(hib_resume_bdev, FMODE_READ);
else
- pr_debug("PM: Image signature found, resuming\n");
+ pr_debug("Image signature found, resuming\n");
} else {
error = PTR_ERR(hib_resume_bdev);
}
if (error)
- pr_debug("PM: Image not found (code %d)\n", error);
+ pr_debug("Image not found (code %d)\n", error);
return error;
}
@@ -1570,7 +1556,7 @@ put:
void swsusp_close(fmode_t mode)
{
if (IS_ERR(hib_resume_bdev)) {
- pr_debug("PM: Image device not initialised\n");
+ pr_debug("Image device not initialised\n");
return;
}
@@ -1594,7 +1580,7 @@ int swsusp_unmark(void)
swsusp_resume_block,
swsusp_header, NULL);
} else {
- printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
+ pr_err("Cannot find swsusp signature!\n");
error = -ENODEV;
}
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index e4b43fef89f5..59c471de342a 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -203,6 +203,21 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
extern int rcu_cpu_stall_suppress;
int rcu_jiffies_till_stall_check(void);
+#define rcu_ftrace_dump_stall_suppress() \
+do { \
+ if (!rcu_cpu_stall_suppress) \
+ rcu_cpu_stall_suppress = 3; \
+} while (0)
+
+#define rcu_ftrace_dump_stall_unsuppress() \
+do { \
+ if (rcu_cpu_stall_suppress == 3) \
+ rcu_cpu_stall_suppress = 0; \
+} while (0)
+
+#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
+#define rcu_ftrace_dump_stall_suppress()
+#define rcu_ftrace_dump_stall_unsuppress()
#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
/*
@@ -220,8 +235,12 @@ do { \
static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
\
if (!atomic_read(&___rfd_beenhere) && \
- !atomic_xchg(&___rfd_beenhere, 1)) \
+ !atomic_xchg(&___rfd_beenhere, 1)) { \
+ tracing_off(); \
+ rcu_ftrace_dump_stall_suppress(); \
ftrace_dump(oops_dump_mode); \
+ rcu_ftrace_dump_stall_unsuppress(); \
+ } \
} while (0)
void rcu_early_boot_tests(void);
diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
index 7649fcd2c4c7..88cba7c2956c 100644
--- a/kernel/rcu/rcu_segcblist.c
+++ b/kernel/rcu/rcu_segcblist.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
#include "rcu_segcblist.h"
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 96a3cdaeed91..74f6b0146b98 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -51,6 +51,7 @@
#include <asm/byteorder.h>
#include <linux/torture.h>
#include <linux/vmalloc.h>
+#include <linux/sched/debug.h>
#include "rcu.h"
@@ -89,6 +90,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
torture_param(int, stall_cpu_holdoff, 10,
"Time to wait before starting stall (s).");
+torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
torture_param(int, stat_interval, 60,
"Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test");
@@ -1239,6 +1241,7 @@ rcu_torture_stats_print(void)
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
static unsigned long rtcv_snap = ULONG_MAX;
+ static bool splatted;
struct task_struct *wtp;
for_each_possible_cpu(cpu) {
@@ -1324,6 +1327,10 @@ rcu_torture_stats_print(void)
gpnum, completed, flags,
wtp == NULL ? ~0UL : wtp->state,
wtp == NULL ? -1 : (int)task_cpu(wtp));
+ if (!splatted && wtp) {
+ sched_show_task(wtp);
+ splatted = true;
+ }
show_rcu_gp_kthreads();
rcu_ftrace_dump(DUMP_ALL);
}
@@ -1357,7 +1364,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
"test_boost=%d/%d test_boost_interval=%d "
"test_boost_duration=%d shutdown_secs=%d "
- "stall_cpu=%d stall_cpu_holdoff=%d "
+ "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
"n_barrier_cbs=%d "
"onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
@@ -1365,7 +1372,7 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost,
test_boost_interval, test_boost_duration, shutdown_secs,
- stall_cpu, stall_cpu_holdoff,
+ stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
n_barrier_cbs,
onoff_interval, onoff_holdoff);
}
@@ -1430,12 +1437,19 @@ static int rcu_torture_stall(void *args)
if (!kthread_should_stop()) {
stop_at = get_seconds() + stall_cpu;
/* RCU CPU stall is expected behavior in following code. */
- pr_alert("rcu_torture_stall start.\n");
rcu_read_lock();
- preempt_disable();
+ if (stall_cpu_irqsoff)
+ local_irq_disable();
+ else
+ preempt_disable();
+ pr_alert("rcu_torture_stall start on CPU %d.\n",
+ smp_processor_id());
while (ULONG_CMP_LT(get_seconds(), stop_at))
continue; /* Induce RCU CPU stall warning. */
- preempt_enable();
+ if (stall_cpu_irqsoff)
+ local_irq_enable();
+ else
+ preempt_enable();
rcu_read_unlock();
pr_alert("rcu_torture_stall end.\n");
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3e3650e94ae6..f9c0ca2ccf0c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -534,8 +534,8 @@ module_param(rcu_kick_kthreads, bool, 0644);
* How long the grace period must be before we start recruiting
* quiescent-state help from rcu_note_context_switch().
*/
-static ulong jiffies_till_sched_qs = HZ / 20;
-module_param(jiffies_till_sched_qs, ulong, 0644);
+static ulong jiffies_till_sched_qs = HZ / 10;
+module_param(jiffies_till_sched_qs, ulong, 0444);
static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp);
@@ -734,7 +734,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
int *fp = &rnp->need_future_gp[idx];
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
return READ_ONCE(*fp);
}
@@ -746,7 +746,7 @@ static int rcu_future_needs_gp(struct rcu_state *rsp)
static bool
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (rcu_gp_in_progress(rsp))
return false; /* No, a grace period is already in progress. */
if (rcu_future_needs_gp(rsp))
@@ -773,7 +773,7 @@ static void rcu_eqs_enter_common(bool user)
struct rcu_data *rdp;
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
!user && !is_idle_task(current)) {
@@ -837,10 +837,13 @@ static void rcu_eqs_enter(bool user)
* We crowbar the ->dynticks_nesting field to zero to allow for
* the possibility of usermode upcalls having messed up our count
* of interrupt nesting level during the prior busy period.
+ *
+ * If you add or remove a call to rcu_idle_enter(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_idle_enter(void)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rcu_eqs_enter(false);
}
@@ -852,10 +855,13 @@ void rcu_idle_enter(void)
* is permitted between this call and rcu_user_exit(). This way the
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
+ *
+ * If you add or remove a call to rcu_user_enter(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_user_enter(void)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rcu_eqs_enter(true);
}
#endif /* CONFIG_NO_HZ_FULL */
@@ -875,12 +881,15 @@ void rcu_user_enter(void)
* Use things like work queues to work around this limitation.
*
* You have been warned.
+ *
+ * If you add or remove a call to rcu_irq_exit(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_irq_exit(void)
{
struct rcu_dynticks *rdtp;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
/* Page faults can happen in NMI handlers, so check... */
@@ -899,6 +908,9 @@ void rcu_irq_exit(void)
/*
* Wrapper for rcu_irq_exit() where interrupts are enabled.
+ *
+ * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_irq_exit_irqson(void)
{
@@ -947,7 +959,7 @@ static void rcu_eqs_exit(bool user)
struct rcu_dynticks *rdtp;
long long oldval;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
@@ -971,6 +983,9 @@ static void rcu_eqs_exit(bool user)
* allow for the possibility of usermode upcalls messing up our count
* of interrupt nesting level during the busy period that is just
* now starting.
+ *
+ * If you add or remove a call to rcu_idle_exit(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_idle_exit(void)
{
@@ -987,6 +1002,9 @@ void rcu_idle_exit(void)
*
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
+ *
+ * If you add or remove a call to rcu_user_exit(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_user_exit(void)
{
@@ -1012,13 +1030,16 @@ void rcu_user_exit(void)
* Use things like work queues to work around this limitation.
*
* You have been warned.
+ *
+ * If you add or remove a call to rcu_irq_enter(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_irq_enter(void)
{
struct rcu_dynticks *rdtp;
long long oldval;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
/* Page faults can happen in NMI handlers, so check... */
@@ -1037,6 +1058,9 @@ void rcu_irq_enter(void)
/*
* Wrapper for rcu_irq_enter() where interrupts are enabled.
+ *
+ * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_irq_enter_irqson(void)
{
@@ -1055,6 +1079,9 @@ void rcu_irq_enter_irqson(void)
* that the CPU is active. This implementation permits nested NMIs, as
* long as the nesting level does not overflow an int. (You will probably
* run out of stack space first.)
+ *
+ * If you add or remove a call to rcu_nmi_enter(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_nmi_enter(void)
{
@@ -1087,6 +1114,9 @@ void rcu_nmi_enter(void)
* RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
* to let the RCU grace-period handling know that the CPU is back to
* being RCU-idle.
+ *
+ * If you add or remove a call to rcu_nmi_exit(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
*/
void rcu_nmi_exit(void)
{
@@ -1207,6 +1237,22 @@ static int rcu_is_cpu_rrupt_from_idle(void)
}
/*
+ * We are reporting a quiescent state on behalf of some other CPU, so
+ * it is our responsibility to check for and handle potential overflow
+ * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
+ * After all, the CPU might be in deep idle state, and thus executing no
+ * code whatsoever.
+ */
+static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
+{
+ lockdep_assert_held(&rnp->lock);
+ if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
+ WRITE_ONCE(rdp->gpwrap, true);
+ if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
+ rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+}
+
+/*
* Snapshot the specified CPU's dynticks counter so that we can later
* credit them with an implicit quiescent state. Return 1 if this CPU
* is in dynticks idle mode, which is an extended quiescent state.
@@ -1216,15 +1262,34 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
- if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
- rdp->mynode->gpnum))
- WRITE_ONCE(rdp->gpwrap, true);
+ rcu_gpnum_ovf(rdp->mynode, rdp);
return 1;
}
return 0;
}
/*
+ * Handler for the irq_work request posted when a grace period has
+ * gone on for too long, but not yet long enough for an RCU CPU
+ * stall warning. Set state appropriately, but just complain if
+ * there is unexpected state on entry.
+ */
+static void rcu_iw_handler(struct irq_work *iwp)
+{
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+
+ rdp = container_of(iwp, struct rcu_data, rcu_iw);
+ rnp = rdp->mynode;
+ raw_spin_lock_rcu_node(rnp);
+ if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
+ rdp->rcu_iw_gpnum = rnp->gpnum;
+ rdp->rcu_iw_pending = false;
+ }
+ raw_spin_unlock_rcu_node(rnp);
+}
+
+/*
* Return true if the specified CPU has passed through a quiescent
* state by virtue of being in or having passed through an dynticks
* idle state since the last call to dyntick_save_progress_counter()
@@ -1235,8 +1300,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
unsigned long jtsq;
bool *rnhqp;
bool *ruqp;
- unsigned long rjtsc;
- struct rcu_node *rnp;
+ struct rcu_node *rnp = rdp->mynode;
/*
* If the CPU passed through or entered a dynticks idle phase with
@@ -1249,34 +1313,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
rdp->dynticks_fqs++;
+ rcu_gpnum_ovf(rnp, rdp);
return 1;
}
- /* Compute and saturate jiffies_till_sched_qs. */
- jtsq = jiffies_till_sched_qs;
- rjtsc = rcu_jiffies_till_stall_check();
- if (jtsq > rjtsc / 2) {
- WRITE_ONCE(jiffies_till_sched_qs, rjtsc);
- jtsq = rjtsc / 2;
- } else if (jtsq < 1) {
- WRITE_ONCE(jiffies_till_sched_qs, 1);
- jtsq = 1;
- }
-
/*
* Has this CPU encountered a cond_resched_rcu_qs() since the
* beginning of the grace period? For this to be the case,
* the CPU has to have noticed the current grace period. This
* might not be the case for nohz_full CPUs looping in the kernel.
*/
- rnp = rdp->mynode;
+ jtsq = jiffies_till_sched_qs;
ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+ rcu_gpnum_ovf(rnp, rdp);
return 1;
- } else {
+ } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
/* Load rcu_qs_ctr before store to rcu_urgent_qs. */
smp_store_release(ruqp, true);
}
@@ -1285,6 +1340,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
rdp->offline_fqs++;
+ rcu_gpnum_ovf(rnp, rdp);
return 1;
}
@@ -1304,10 +1360,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* updates are only once every few jiffies, the probability of
* lossage (and thus of slight grace-period extension) is
* quite low.
- *
- * Note that if the jiffies_till_sched_qs boot/sysfs parameter
- * is set too high, we override with half of the RCU CPU stall
- * warning delay.
*/
rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
if (!READ_ONCE(*rnhqp) &&
@@ -1316,15 +1368,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
WRITE_ONCE(*rnhqp, true);
/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
smp_store_release(ruqp, true);
- rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
+ rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
}
/*
- * If more than halfway to RCU CPU stall-warning time, do
- * a resched_cpu() to try to loosen things up a bit.
+ * If more than halfway to RCU CPU stall-warning time, do a
+ * resched_cpu() to try to loosen things up a bit. Also check to
+ * see if the CPU is getting hammered with interrupts, but only
+ * once per grace period, just to keep the IPIs down to a dull roar.
*/
- if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2)
+ if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
resched_cpu(rdp->cpu);
+ if (IS_ENABLED(CONFIG_IRQ_WORK) &&
+ !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+ (rnp->ffmask & rdp->grpmask)) {
+ init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
+ rdp->rcu_iw_pending = true;
+ rdp->rcu_iw_gpnum = rnp->gpnum;
+ irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
+ }
+ }
return 0;
}
@@ -1513,6 +1576,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
{
int cpu;
unsigned long flags;
+ struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rcu_get_root(rsp);
long totqlen = 0;
@@ -1528,7 +1592,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
*/
pr_err("INFO: %s self-detected stall on CPU", rsp->name);
print_cpu_stall_info_begin();
+ raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
print_cpu_stall_info(rsp, smp_processor_id());
+ raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
@@ -1922,6 +1988,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
rdp->core_needs_qs = need_gp;
zero_cpu_stall_ticks(rdp);
WRITE_ONCE(rdp->gpwrap, false);
+ rcu_gpnum_ovf(rnp, rdp);
}
return ret;
}
@@ -3702,6 +3769,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->cpu_no_qs.b.norm = true;
rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
rdp->core_needs_qs = false;
+ rdp->rcu_iw_pending = false;
+ rdp->rcu_iw_gpnum = rnp->gpnum - 1;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
@@ -3739,10 +3808,24 @@ static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
*/
int rcutree_online_cpu(unsigned int cpu)
{
- sync_sched_exp_online_cleanup(cpu);
- rcutree_affinity_setting(cpu, -1);
+ unsigned long flags;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ rnp = rdp->mynode;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rnp->ffmask |= rdp->grpmask;
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
if (IS_ENABLED(CONFIG_TREE_SRCU))
srcu_online_cpu(cpu);
+ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+ return 0; /* Too early in boot for scheduler work. */
+ sync_sched_exp_online_cleanup(cpu);
+ rcutree_affinity_setting(cpu, -1);
return 0;
}
@@ -3752,6 +3835,19 @@ int rcutree_online_cpu(unsigned int cpu)
*/
int rcutree_offline_cpu(unsigned int cpu)
{
+ unsigned long flags;
+ struct rcu_data *rdp;
+ struct rcu_node *rnp;
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ rnp = rdp->mynode;
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ rnp->ffmask &= ~rdp->grpmask;
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+
rcutree_affinity_setting(cpu, cpu);
if (IS_ENABLED(CONFIG_TREE_SRCU))
srcu_offline_cpu(cpu);
@@ -4200,8 +4296,7 @@ void __init rcu_init(void)
for_each_online_cpu(cpu) {
rcutree_prepare_cpu(cpu);
rcu_cpu_starting(cpu);
- if (IS_ENABLED(CONFIG_TREE_SRCU))
- srcu_online_cpu(cpu);
+ rcutree_online_cpu(cpu);
}
}
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 8e1f285f0a70..46a5d1991450 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -103,6 +103,7 @@ struct rcu_node {
/* Online CPUs for next expedited GP. */
/* Any CPU that has ever been online will */
/* have its bit set. */
+ unsigned long ffmask; /* Fully functional CPUs. */
unsigned long grpmask; /* Mask to apply to parent qsmask. */
/* Only one bit will be set in this mask. */
int grplo; /* lowest-numbered CPU or group here. */
@@ -285,6 +286,10 @@ struct rcu_data {
/* 8) RCU CPU stall data. */
unsigned int softirq_snap; /* Snapshot of softirq activity. */
+ /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
+ struct irq_work rcu_iw; /* Check for non-irq activity. */
+ bool rcu_iw_pending; /* Is ->rcu_iw pending? */
+ unsigned long rcu_iw_gpnum; /* ->gpnum associated with ->rcu_iw. */
int cpu;
struct rcu_state *rsp;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index e85946d9843b..db85ca3975f1 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -29,6 +29,7 @@
#include <linux/oom.h>
#include <linux/sched/debug.h>
#include <linux/smpboot.h>
+#include <linux/sched/isolation.h>
#include <uapi/linux/sched/types.h>
#include "../time/tick-internal.h"
@@ -54,6 +55,7 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
* This probably needs to be excluded from -rt builds.
*/
#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
+#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -325,7 +327,7 @@ static void rcu_preempt_note_context_switch(bool preempt)
struct rcu_data *rdp;
struct rcu_node *rnp;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_preempt_note_context_switch() invoked with interrupts enabled!!!\n");
+ lockdep_assert_irqs_disabled();
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
if (t->rcu_read_lock_nesting > 0 &&
!t->rcu_read_unlock_special.b.blocked) {
@@ -530,7 +532,7 @@ void rcu_read_unlock_special(struct task_struct *t)
/* Unboost if we were boosted. */
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
- rt_mutex_unlock(&rnp->boost_mtx);
+ rt_mutex_futex_unlock(&rnp->boost_mtx);
/*
* If this was the last task on the expedited lists,
@@ -911,8 +913,6 @@ void exit_rcu(void)
#ifdef CONFIG_RCU_BOOST
-#include "../locking/rtmutex_common.h"
-
static void rcu_wake_cond(struct task_struct *t, int status)
{
/*
@@ -1421,7 +1421,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
unsigned long dj;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_needs_cpu() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
/* Snapshot to detect later posting of non-lazy callback. */
rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
@@ -1470,7 +1470,7 @@ static void rcu_prepare_for_idle(void)
struct rcu_state *rsp;
int tne;
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_prepare_for_idle() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (rcu_is_nocb_cpu(smp_processor_id()))
return;
@@ -1507,7 +1507,7 @@ static void rcu_prepare_for_idle(void)
rdtp->last_accelerate = jiffies;
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
- if (rcu_segcblist_pend_cbs(&rdp->cblist))
+ if (!rcu_segcblist_pend_cbs(&rdp->cblist))
continue;
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
@@ -1525,7 +1525,7 @@ static void rcu_prepare_for_idle(void)
*/
static void rcu_cleanup_after_idle(void)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_cleanup_after_idle() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (rcu_is_nocb_cpu(smp_processor_id()))
return;
if (rcu_try_advance_all_cbs())
@@ -1671,6 +1671,7 @@ static void print_cpu_stall_info_begin(void)
*/
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
{
+ unsigned long delta;
char fast_no_hz[72];
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_dynticks *rdtp = rdp->dynticks;
@@ -1685,11 +1686,15 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
ticks_value = rsp->gpnum - rdp->gpnum;
}
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
- pr_err("\t%d-%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
+ delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
+ pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
cpu,
"O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
"N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
+ !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
+ rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
+ "!."[!delta],
ticks_value, ticks_title,
rcu_dynticks_snap(rdtp) & 0xfff,
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
@@ -2012,7 +2017,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
struct rcu_data *rdp,
unsigned long flags)
{
- RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_nocb_adopt_orphan_cbs() invoked with irqs enabled!!!");
+ lockdep_assert_irqs_disabled();
if (!rcu_is_nocb_cpu(smp_processor_id()))
return false; /* Not NOCBs CPU, caller must migrate CBs. */
__call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
@@ -2584,7 +2589,7 @@ static void rcu_bind_gp_kthread(void)
if (!tick_nohz_full_enabled())
return;
- housekeeping_affine(current);
+ housekeeping_affine(current, HK_FLAG_RCU);
}
/* Record the current task on dyntick-idle entry. */
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 5033b66d2753..fbd56d6e575b 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -51,6 +51,7 @@
#include <linux/kthread.h>
#include <linux/tick.h>
#include <linux/rcupdate_wait.h>
+#include <linux/sched/isolation.h>
#define CREATE_TRACE_POINTS
@@ -494,6 +495,7 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
#endif
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
+EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
module_param(rcu_cpu_stall_suppress, int, 0644);
@@ -575,7 +577,6 @@ DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
module_param(rcu_task_stall_timeout, int, 0644);
-static void rcu_spawn_tasks_kthread(void);
static struct task_struct *rcu_tasks_kthread_ptr;
/**
@@ -600,7 +601,6 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
{
unsigned long flags;
bool needwake;
- bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
rhp->next = NULL;
rhp->func = func;
@@ -610,11 +610,8 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
rcu_tasks_cbs_tail = &rhp->next;
raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
/* We can't create the thread unless interrupts are enabled. */
- if ((needwake && havetask) ||
- (!havetask && !irqs_disabled_flags(flags))) {
- rcu_spawn_tasks_kthread();
+ if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
wake_up(&rcu_tasks_cbs_wq);
- }
}
EXPORT_SYMBOL_GPL(call_rcu_tasks);
@@ -718,7 +715,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
LIST_HEAD(rcu_tasks_holdouts);
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
- housekeeping_affine(current);
+ housekeeping_affine(current, HK_FLAG_RCU);
/*
* Each pass through the following loop makes one check for
@@ -853,27 +850,18 @@ static int __noreturn rcu_tasks_kthread(void *arg)
}
}
-/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
-static void rcu_spawn_tasks_kthread(void)
+/* Spawn rcu_tasks_kthread() at core_initcall() time. */
+static int __init rcu_spawn_tasks_kthread(void)
{
- static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
struct task_struct *t;
- if (READ_ONCE(rcu_tasks_kthread_ptr)) {
- smp_mb(); /* Ensure caller sees full kthread. */
- return;
- }
- mutex_lock(&rcu_tasks_kthread_mutex);
- if (rcu_tasks_kthread_ptr) {
- mutex_unlock(&rcu_tasks_kthread_mutex);
- return;
- }
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
BUG_ON(IS_ERR(t));
smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t);
- mutex_unlock(&rcu_tasks_kthread_mutex);
+ return 0;
}
+core_initcall(rcu_spawn_tasks_kthread);
/* Do the srcu_read_lock() for the above synchronize_srcu(). */
void exit_tasks_rcu_start(void)
diff --git a/kernel/resource.c b/kernel/resource.c
index 9b5f04404152..54ba6de3757c 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -397,9 +397,32 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
res->start = p->start;
if (res->end > p->end)
res->end = p->end;
+ res->flags = p->flags;
+ res->desc = p->desc;
return 0;
}
+static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
+ bool first_level_children_only,
+ void *arg,
+ int (*func)(struct resource *, void *))
+{
+ u64 orig_end = res->end;
+ int ret = -1;
+
+ while ((res->start < res->end) &&
+ !find_next_iomem_res(res, desc, first_level_children_only)) {
+ ret = (*func)(res, arg);
+ if (ret)
+ break;
+
+ res->start = res->end + 1;
+ res->end = orig_end;
+ }
+
+ return ret;
+}
+
/*
* Walks through iomem resources and calls func() with matching resource
* ranges. This walks through whole tree and not just first level children.
@@ -415,29 +438,15 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
* <linux/ioport.h> and set it in 'desc' of a target resource entry.
*/
int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
- u64 end, void *arg, int (*func)(u64, u64, void *))
+ u64 end, void *arg, int (*func)(struct resource *, void *))
{
struct resource res;
- u64 orig_end;
- int ret = -1;
res.start = start;
res.end = end;
res.flags = flags;
- orig_end = res.end;
-
- while ((res.start < res.end) &&
- (!find_next_iomem_res(&res, desc, false))) {
-
- ret = (*func)(res.start, res.end, arg);
- if (ret)
- break;
-
- res.start = res.end + 1;
- res.end = orig_end;
- }
- return ret;
+ return __walk_iomem_res_desc(&res, desc, false, arg, func);
}
/*
@@ -448,25 +457,33 @@ int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
* ranges.
*/
int walk_system_ram_res(u64 start, u64 end, void *arg,
- int (*func)(u64, u64, void *))
+ int (*func)(struct resource *, void *))
{
struct resource res;
- u64 orig_end;
- int ret = -1;
res.start = start;
res.end = end;
res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- orig_end = res.end;
- while ((res.start < res.end) &&
- (!find_next_iomem_res(&res, IORES_DESC_NONE, true))) {
- ret = (*func)(res.start, res.end, arg);
- if (ret)
- break;
- res.start = res.end + 1;
- res.end = orig_end;
- }
- return ret;
+
+ return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+ arg, func);
+}
+
+/*
+ * This function calls the @func callback against all memory ranges, which
+ * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
+ */
+int walk_mem_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *))
+{
+ struct resource res;
+
+ res.start = start;
+ res.end = end;
+ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
+ arg, func);
}
#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
@@ -508,6 +525,7 @@ static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
{
return 1;
}
+
/*
* This generic page_is_ram() returns true if specified address is
* registered as System RAM in iomem_resource list.
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index a9ee16bbc693..e2f9d4feff40 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
obj-$(CONFIG_MEMBARRIER) += membarrier.o
+obj-$(CONFIG_CPU_ISOLATION) += isolation.o
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index ca0f8fc945c6..e086babe6c61 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -388,7 +388,7 @@ void sched_clock_tick(void)
if (unlikely(!sched_clock_running))
return;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
scd = this_scd();
__scd_stamp(scd);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d17c5da523a0..a092f350f3a2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -26,6 +26,7 @@
#include <linux/profile.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/sched/isolation.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -42,18 +43,21 @@
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
/*
* Debugging: various feature bits
+ *
+ * If SCHED_DEBUG is disabled, each compilation unit has its own copy of
+ * sysctl_sched_features, defined in sched.h, to allow constants propagation
+ * at compile time and compiler optimization based on features default.
*/
-
#define SCHED_FEAT(name, enabled) \
(1UL << __SCHED_FEAT_##name) * enabled |
-
const_debug unsigned int sysctl_sched_features =
#include "features.h"
0;
-
#undef SCHED_FEAT
+#endif
/*
* Number of tasks to iterate in a single balance run.
@@ -83,9 +87,6 @@ __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
-/* CPUs with isolated domains */
-cpumask_var_t cpu_isolated_map;
-
/*
* __task_rq_lock - lock the rq @p resides on.
*/
@@ -505,8 +506,7 @@ void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!raw_spin_trylock_irqsave(&rq->lock, flags))
- return;
+ raw_spin_lock_irqsave(&rq->lock, flags);
resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -526,7 +526,7 @@ int get_nohz_timer_target(void)
int i, cpu = smp_processor_id();
struct sched_domain *sd;
- if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
+ if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
return cpu;
rcu_read_lock();
@@ -535,15 +535,15 @@ int get_nohz_timer_target(void)
if (cpu == i)
continue;
- if (!idle_cpu(i) && is_housekeeping_cpu(i)) {
+ if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) {
cpu = i;
goto unlock;
}
}
}
- if (!is_housekeeping_cpu(cpu))
- cpu = housekeeping_any_cpu();
+ if (!housekeeping_cpu(cpu, HK_FLAG_TIMER))
+ cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
unlock:
rcu_read_unlock();
return cpu;
@@ -733,7 +733,7 @@ int tg_nop(struct task_group *tg, void *data)
}
#endif
-static void set_load_weight(struct task_struct *p)
+static void set_load_weight(struct task_struct *p, bool update_load)
{
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
@@ -747,8 +747,16 @@ static void set_load_weight(struct task_struct *p)
return;
}
- load->weight = scale_load(sched_prio_to_weight[prio]);
- load->inv_weight = sched_prio_to_wmult[prio];
+ /*
+ * SCHED_OTHER tasks have to update their load when changing their
+ * weight
+ */
+ if (update_load && p->sched_class == &fair_sched_class) {
+ reweight_task(p, prio);
+ } else {
+ load->weight = scale_load(sched_prio_to_weight[prio]);
+ load->inv_weight = sched_prio_to_wmult[prio];
+ }
}
static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2358,7 +2366,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = __normal_prio(p);
- set_load_weight(p);
+ set_load_weight(p, false);
/*
* We don't need the reset flag anymore after the fork. It has
@@ -3805,7 +3813,7 @@ void set_user_nice(struct task_struct *p, long nice)
put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice);
- set_load_weight(p);
+ set_load_weight(p, true);
old_prio = p->prio;
p->prio = effective_prio(p);
delta = p->prio - old_prio;
@@ -3962,7 +3970,7 @@ static void __setscheduler_params(struct task_struct *p,
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
- set_load_weight(p);
+ set_load_weight(p, true);
}
/* Actually do priority change: must hold pi & rq lock. */
@@ -4842,6 +4850,7 @@ int __sched _cond_resched(void)
preempt_schedule_common();
return 1;
}
+ rcu_all_qs();
return 0;
}
EXPORT_SYMBOL(_cond_resched);
@@ -5165,6 +5174,7 @@ void sched_show_task(struct task_struct *p)
show_stack(p, NULL);
put_task_stack(p);
}
+EXPORT_SYMBOL_GPL(sched_show_task);
static inline bool
state_filter_match(unsigned long state_filter, struct task_struct *p)
@@ -5726,10 +5736,6 @@ static inline void sched_init_smt(void) { }
void __init sched_init_smp(void)
{
- cpumask_var_t non_isolated_cpus;
-
- alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
-
sched_init_numa();
/*
@@ -5739,16 +5745,12 @@ void __init sched_init_smp(void)
*/
mutex_lock(&sched_domains_mutex);
sched_init_domains(cpu_active_mask);
- cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
- if (cpumask_empty(non_isolated_cpus))
- cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
mutex_unlock(&sched_domains_mutex);
/* Move init over to a non-isolated CPU */
- if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
+ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
BUG();
sched_init_granularity();
- free_cpumask_var(non_isolated_cpus);
init_sched_rt_class();
init_sched_dl_class();
@@ -5933,7 +5935,7 @@ void __init sched_init(void)
atomic_set(&rq->nr_iowait, 0);
}
- set_load_weight(&init_task);
+ set_load_weight(&init_task, false);
/*
* The boot idle thread does lazy MMU switching as well:
@@ -5952,9 +5954,6 @@ void __init sched_init(void)
calc_load_update = jiffies + LOAD_FREQ;
#ifdef CONFIG_SMP
- /* May be allocated at isolcpus cmdline parse time */
- if (cpu_isolated_map == NULL)
- zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
idle_thread_set_boot_cpu();
set_cpu_rq_start_time(smp_processor_id());
#endif
@@ -6621,7 +6620,7 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
return ret;
}
-static int cpu_stats_show(struct seq_file *sf, void *v)
+static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
{
struct task_group *tg = css_tg(seq_css(sf));
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
@@ -6661,7 +6660,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
}
#endif /* CONFIG_RT_GROUP_SCHED */
-static struct cftype cpu_files[] = {
+static struct cftype cpu_legacy_files[] = {
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
@@ -6682,7 +6681,7 @@ static struct cftype cpu_files[] = {
},
{
.name = "stat",
- .seq_show = cpu_stats_show,
+ .seq_show = cpu_cfs_stat_show,
},
#endif
#ifdef CONFIG_RT_GROUP_SCHED
@@ -6700,16 +6699,182 @@ static struct cftype cpu_files[] = {
{ } /* Terminate */
};
+static int cpu_extra_stat_show(struct seq_file *sf,
+ struct cgroup_subsys_state *css)
+{
+#ifdef CONFIG_CFS_BANDWIDTH
+ {
+ struct task_group *tg = css_tg(css);
+ struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
+ u64 throttled_usec;
+
+ throttled_usec = cfs_b->throttled_time;
+ do_div(throttled_usec, NSEC_PER_USEC);
+
+ seq_printf(sf, "nr_periods %d\n"
+ "nr_throttled %d\n"
+ "throttled_usec %llu\n",
+ cfs_b->nr_periods, cfs_b->nr_throttled,
+ throttled_usec);
+ }
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ struct task_group *tg = css_tg(css);
+ u64 weight = scale_load_down(tg->shares);
+
+ return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024);
+}
+
+static int cpu_weight_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 weight)
+{
+ /*
+ * cgroup weight knobs should use the common MIN, DFL and MAX
+ * values which are 1, 100 and 10000 respectively. While it loses
+ * a bit of range on both ends, it maps pretty well onto the shares
+ * value used by scheduler and the round-trip conversions preserve
+ * the original value over the entire range.
+ */
+ if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX)
+ return -ERANGE;
+
+ weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL);
+
+ return sched_group_set_shares(css_tg(css), scale_load(weight));
+}
+
+static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ unsigned long weight = scale_load_down(css_tg(css)->shares);
+ int last_delta = INT_MAX;
+ int prio, delta;
+
+ /* find the closest nice value to the current weight */
+ for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) {
+ delta = abs(sched_prio_to_weight[prio] - weight);
+ if (delta >= last_delta)
+ break;
+ last_delta = delta;
+ }
+
+ return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO);
+}
+
+static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
+ struct cftype *cft, s64 nice)
+{
+ unsigned long weight;
+
+ if (nice < MIN_NICE || nice > MAX_NICE)
+ return -ERANGE;
+
+ weight = sched_prio_to_weight[NICE_TO_PRIO(nice) - MAX_RT_PRIO];
+ return sched_group_set_shares(css_tg(css), scale_load(weight));
+}
+#endif
+
+static void __maybe_unused cpu_period_quota_print(struct seq_file *sf,
+ long period, long quota)
+{
+ if (quota < 0)
+ seq_puts(sf, "max");
+ else
+ seq_printf(sf, "%ld", quota);
+
+ seq_printf(sf, " %ld\n", period);
+}
+
+/* caller should put the current value in *@periodp before calling */
+static int __maybe_unused cpu_period_quota_parse(char *buf,
+ u64 *periodp, u64 *quotap)
+{
+ char tok[21]; /* U64_MAX */
+
+ if (!sscanf(buf, "%s %llu", tok, periodp))
+ return -EINVAL;
+
+ *periodp *= NSEC_PER_USEC;
+
+ if (sscanf(tok, "%llu", quotap))
+ *quotap *= NSEC_PER_USEC;
+ else if (!strcmp(tok, "max"))
+ *quotap = RUNTIME_INF;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+static int cpu_max_show(struct seq_file *sf, void *v)
+{
+ struct task_group *tg = css_tg(seq_css(sf));
+
+ cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg));
+ return 0;
+}
+
+static ssize_t cpu_max_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct task_group *tg = css_tg(of_css(of));
+ u64 period = tg_get_cfs_period(tg);
+ u64 quota;
+ int ret;
+
+ ret = cpu_period_quota_parse(buf, &period, &quota);
+ if (!ret)
+ ret = tg_set_cfs_bandwidth(tg, period, quota);
+ return ret ?: nbytes;
+}
+#endif
+
+static struct cftype cpu_files[] = {
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ {
+ .name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .read_u64 = cpu_weight_read_u64,
+ .write_u64 = cpu_weight_write_u64,
+ },
+ {
+ .name = "weight.nice",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .read_s64 = cpu_weight_nice_read_s64,
+ .write_s64 = cpu_weight_nice_write_s64,
+ },
+#endif
+#ifdef CONFIG_CFS_BANDWIDTH
+ {
+ .name = "max",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cpu_max_show,
+ .write = cpu_max_write,
+ },
+#endif
+ { } /* terminate */
+};
+
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
.css_online = cpu_cgroup_css_online,
.css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
+ .css_extra_stat_show = cpu_extra_stat_show,
.fork = cpu_cgroup_fork,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
- .legacy_cftypes = cpu_files,
+ .legacy_cftypes = cpu_legacy_files,
+ .dfl_cftypes = cpu_files,
.early_init = true,
+ .threaded = true,
};
#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/cpuacct.h b/kernel/sched/cpuacct.h
deleted file mode 100644
index a8358a57a316..000000000000
--- a/kernel/sched/cpuacct.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_CGROUP_CPUACCT
-
-extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
-extern void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
-
-#else
-
-static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime)
-{
-}
-
-static inline void
-cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
-{
-}
-
-#endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index ba0da243fdd8..2f52ec0f1539 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -282,8 +282,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq)
+ if (busy && next_f < sg_policy->next_freq) {
next_f = sg_policy->next_freq;
+
+ /* Reset cached freq as next_freq has changed */
+ sg_policy->cached_raw_freq = 0;
+ }
}
sugov_update_commit(sg_policy, time, next_f);
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 14d2dbf97c53..bac6ac9a4ec7 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -109,7 +109,7 @@ static inline void task_group_account_field(struct task_struct *p, int index,
*/
__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
- cpuacct_account_field(p, index, tmp);
+ cgroup_account_cputime_field(p, index, tmp);
}
/*
@@ -259,8 +259,7 @@ static inline u64 account_other_time(u64 max)
{
u64 accounted;
- /* Shall be converted to a lockdep-enabled lightweight check */
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
accounted = steal_account_process_time(max);
@@ -447,6 +446,13 @@ void vtime_account_irq_enter(struct task_struct *tsk)
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
+void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ u64 *ut, u64 *st)
+{
+ *ut = curr->utime;
+ *st = curr->stime;
+}
+
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
*ut = p->utime;
@@ -585,9 +591,8 @@ drop_precision:
*
* Assuming that rtime_i+1 >= rtime_i.
*/
-static void cputime_adjust(struct task_cputime *curr,
- struct prev_cputime *prev,
- u64 *ut, u64 *st)
+void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ u64 *ut, u64 *st)
{
u64 rtime, stime, utime;
unsigned long flags;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 4ae5c1ea90e2..2473736c7616 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -243,7 +243,7 @@ static void task_non_contending(struct task_struct *p)
if (p->state == TASK_DEAD)
sub_rq_bw(p->dl.dl_bw, &rq->dl);
raw_spin_lock(&dl_b->lock);
- __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+ __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
__dl_clear_params(p);
raw_spin_unlock(&dl_b->lock);
}
@@ -1144,7 +1144,7 @@ static void update_curr_dl(struct rq *rq)
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = rq_clock_task(rq);
- cpuacct_charge(curr, delta_exec);
+ cgroup_account_cputime(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
@@ -1210,7 +1210,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
}
raw_spin_lock(&dl_b->lock);
- __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+ __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
@@ -1365,6 +1365,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
update_dl_entity(dl_se, pi_se);
} else if (flags & ENQUEUE_REPLENISH) {
replenish_dl_entity(dl_se, pi_se);
+ } else if ((flags & ENQUEUE_RESTORE) &&
+ dl_time_before(dl_se->deadline,
+ rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
+ setup_new_dl_entity(dl_se);
}
__enqueue_dl_entity(dl_se);
@@ -2167,7 +2171,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
* until we complete the update.
*/
raw_spin_lock(&src_dl_b->lock);
- __dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+ __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
raw_spin_unlock(&src_dl_b->lock);
}
@@ -2256,13 +2260,6 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
return;
}
- /*
- * If p is boosted we already updated its params in
- * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
- * p's deadline being now already after rq_clock(rq).
- */
- if (dl_time_before(p->dl.deadline, rq_clock(rq)))
- setup_new_dl_entity(&p->dl);
if (rq->curr != p) {
#ifdef CONFIG_SMP
@@ -2452,7 +2449,7 @@ int sched_dl_overflow(struct task_struct *p, int policy,
if (dl_policy(policy) && !task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
if (hrtimer_active(&p->dl.inactive_timer))
- __dl_clear(dl_b, p->dl.dl_bw, cpus);
+ __dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
@@ -2464,7 +2461,7 @@ int sched_dl_overflow(struct task_struct *p, int policy,
* But this would require to set the task's "inactive
* timer" when the task is not inactive.
*/
- __dl_clear(dl_b, p->dl.dl_bw, cpus);
+ __dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus);
dl_change_utilization(p, new_bw);
err = 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2f93e4a2d9f6..1ca0130ed4f9 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -441,9 +441,11 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
P_SCHEDSTAT(se->statistics.wait_count);
}
P(se->load.weight);
+ P(se->runnable_weight);
#ifdef CONFIG_SMP
P(se->avg.load_avg);
P(se->avg.util_avg);
+ P(se->avg.runnable_load_avg);
#endif
#undef PN_SCHEDSTAT
@@ -558,16 +560,19 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SMP
+ SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
cfs_rq->avg.load_avg);
SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
- cfs_rq->runnable_load_avg);
+ cfs_rq->avg.runnable_load_avg);
SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
cfs_rq->avg.util_avg);
- SEQ_printf(m, " .%-30s: %ld\n", "removed_load_avg",
- atomic_long_read(&cfs_rq->removed_load_avg));
- SEQ_printf(m, " .%-30s: %ld\n", "removed_util_avg",
- atomic_long_read(&cfs_rq->removed_util_avg));
+ SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
+ cfs_rq->removed.load_avg);
+ SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
+ cfs_rq->removed.util_avg);
+ SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum",
+ cfs_rq->removed.runnable_sum);
#ifdef CONFIG_FAIR_GROUP_SCHED
SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
cfs_rq->tg_load_avg_contrib);
@@ -1004,10 +1009,13 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
"nr_involuntary_switches", (long long)p->nivcsw);
P(se.load.weight);
+ P(se.runnable_weight);
#ifdef CONFIG_SMP
P(se.avg.load_sum);
+ P(se.avg.runnable_load_sum);
P(se.avg.util_sum);
P(se.avg.load_avg);
+ P(se.avg.runnable_load_avg);
P(se.avg.util_avg);
P(se.avg.last_update_time);
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5c09ddf8c832..4037e19bbca2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -33,6 +33,7 @@
#include <linux/mempolicy.h>
#include <linux/migrate.h>
#include <linux/task_work.h>
+#include <linux/sched/isolation.h>
#include <trace/events/sched.h>
@@ -717,13 +718,8 @@ void init_entity_runnable_average(struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
- sa->last_update_time = 0;
- /*
- * sched_avg's period_contrib should be strictly less then 1024, so
- * we give it 1023 to make sure it is almost a period (1024us), and
- * will definitely be update (after enqueue).
- */
- sa->period_contrib = 1023;
+ memset(sa, 0, sizeof(*sa));
+
/*
* Tasks are intialized with full load to be seen as heavy tasks until
* they get a chance to stabilize to their real load level.
@@ -731,13 +727,10 @@ void init_entity_runnable_average(struct sched_entity *se)
* nothing has been attached to the task group yet.
*/
if (entity_is_task(se))
- sa->load_avg = scale_load_down(se->load.weight);
- sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
- /*
- * At this point, util_avg won't be used in select_task_rq_fair anyway
- */
- sa->util_avg = 0;
- sa->util_sum = 0;
+ sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight);
+
+ se->runnable_weight = se->load.weight;
+
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
@@ -785,7 +778,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
} else {
sa->util_avg = cap;
}
- sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
}
if (entity_is_task(se)) {
@@ -852,7 +844,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
struct task_struct *curtask = task_of(curr);
trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
- cpuacct_charge(curtask, delta_exec);
+ cgroup_account_cputime(curtask, delta_exec);
account_group_exec_runtime(curtask, delta_exec);
}
@@ -2026,7 +2018,7 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
} else {
- delta = p->se.avg.load_sum / p->se.load.weight;
+ delta = p->se.avg.load_sum;
*period = LOAD_AVG_MAX;
}
@@ -2693,18 +2685,226 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->nr_running--;
}
+/*
+ * Signed add and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define add_positive(_ptr, _val) do { \
+ typeof(_ptr) ptr = (_ptr); \
+ typeof(_val) val = (_val); \
+ typeof(*ptr) res, var = READ_ONCE(*ptr); \
+ \
+ res = var + val; \
+ \
+ if (val < 0 && res > var) \
+ res = 0; \
+ \
+ WRITE_ONCE(*ptr, res); \
+} while (0)
+
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do { \
+ typeof(_ptr) ptr = (_ptr); \
+ typeof(*ptr) val = (_val); \
+ typeof(*ptr) res, var = READ_ONCE(*ptr); \
+ res = var - val; \
+ if (res > var) \
+ res = 0; \
+ WRITE_ONCE(*ptr, res); \
+} while (0)
+
+#ifdef CONFIG_SMP
+/*
+ * XXX we want to get rid of these helpers and use the full load resolution.
+ */
+static inline long se_weight(struct sched_entity *se)
+{
+ return scale_load_down(se->load.weight);
+}
+
+static inline long se_runnable(struct sched_entity *se)
+{
+ return scale_load_down(se->runnable_weight);
+}
+
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->runnable_weight += se->runnable_weight;
+
+ cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
+ cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
+}
+
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->runnable_weight -= se->runnable_weight;
+
+ sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
+ sub_positive(&cfs_rq->avg.runnable_load_sum,
+ se_runnable(se) * se->avg.runnable_load_sum);
+}
+
+static inline void
+enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ cfs_rq->avg.load_avg += se->avg.load_avg;
+ cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
+}
+
+static inline void
+dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+ sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+}
+#else
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+#endif
+
+static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+ unsigned long weight, unsigned long runnable)
+{
+ if (se->on_rq) {
+ /* commit outstanding execution time */
+ if (cfs_rq->curr == se)
+ update_curr(cfs_rq);
+ account_entity_dequeue(cfs_rq, se);
+ dequeue_runnable_load_avg(cfs_rq, se);
+ }
+ dequeue_load_avg(cfs_rq, se);
+
+ se->runnable_weight = runnable;
+ update_load_set(&se->load, weight);
+
+#ifdef CONFIG_SMP
+ do {
+ u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
+
+ se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
+ se->avg.runnable_load_avg =
+ div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
+ } while (0);
+#endif
+
+ enqueue_load_avg(cfs_rq, se);
+ if (se->on_rq) {
+ account_entity_enqueue(cfs_rq, se);
+ enqueue_runnable_load_avg(cfs_rq, se);
+ }
+}
+
+void reweight_task(struct task_struct *p, int prio)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct load_weight *load = &se->load;
+ unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+
+ reweight_entity(cfs_rq, se, weight, weight);
+ load->inv_weight = sched_prio_to_wmult[prio];
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
+/*
+ * All this does is approximate the hierarchical proportion which includes that
+ * global sum we all love to hate.
+ *
+ * That is, the weight of a group entity, is the proportional share of the
+ * group weight based on the group runqueue weights. That is:
+ *
+ * tg->weight * grq->load.weight
+ * ge->load.weight = ----------------------------- (1)
+ * \Sum grq->load.weight
+ *
+ * Now, because computing that sum is prohibitively expensive to compute (been
+ * there, done that) we approximate it with this average stuff. The average
+ * moves slower and therefore the approximation is cheaper and more stable.
+ *
+ * So instead of the above, we substitute:
+ *
+ * grq->load.weight -> grq->avg.load_avg (2)
+ *
+ * which yields the following:
+ *
+ * tg->weight * grq->avg.load_avg
+ * ge->load.weight = ------------------------------ (3)
+ * tg->load_avg
+ *
+ * Where: tg->load_avg ~= \Sum grq->avg.load_avg
+ *
+ * That is shares_avg, and it is right (given the approximation (2)).
+ *
+ * The problem with it is that because the average is slow -- it was designed
+ * to be exactly that of course -- this leads to transients in boundary
+ * conditions. In specific, the case where the group was idle and we start the
+ * one task. It takes time for our CPU's grq->avg.load_avg to build up,
+ * yielding bad latency etc..
+ *
+ * Now, in that special case (1) reduces to:
+ *
+ * tg->weight * grq->load.weight
+ * ge->load.weight = ----------------------------- = tg->weight (4)
+ * grp->load.weight
+ *
+ * That is, the sum collapses because all other CPUs are idle; the UP scenario.
+ *
+ * So what we do is modify our approximation (3) to approach (4) in the (near)
+ * UP case, like:
+ *
+ * ge->load.weight =
+ *
+ * tg->weight * grq->load.weight
+ * --------------------------------------------------- (5)
+ * tg->load_avg - grq->avg.load_avg + grq->load.weight
+ *
+ * But because grq->load.weight can drop to 0, resulting in a divide by zero,
+ * we need to use grq->avg.load_avg as its lower bound, which then gives:
+ *
+ *
+ * tg->weight * grq->load.weight
+ * ge->load.weight = ----------------------------- (6)
+ * tg_load_avg'
+ *
+ * Where:
+ *
+ * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
+ * max(grq->load.weight, grq->avg.load_avg)
+ *
+ * And that is shares_weight and is icky. In the (near) UP case it approaches
+ * (4) while in the normal case it approaches (3). It consistently
+ * overestimates the ge->load.weight and therefore:
+ *
+ * \Sum ge->load.weight >= tg->weight
+ *
+ * hence icky!
+ */
+static long calc_group_shares(struct cfs_rq *cfs_rq)
{
- long tg_weight, load, shares;
+ long tg_weight, tg_shares, load, shares;
+ struct task_group *tg = cfs_rq->tg;
- /*
- * This really should be: cfs_rq->avg.load_avg, but instead we use
- * cfs_rq->load.weight, which is its upper bound. This helps ramp up
- * the shares for small weight interactive tasks.
- */
- load = scale_load_down(cfs_rq->load.weight);
+ tg_shares = READ_ONCE(tg->shares);
+
+ load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
tg_weight = atomic_long_read(&tg->load_avg);
@@ -2712,7 +2912,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
tg_weight -= cfs_rq->tg_load_avg_contrib;
tg_weight += load;
- shares = (tg->shares * load);
+ shares = (tg_shares * load);
if (tg_weight)
shares /= tg_weight;
@@ -2728,63 +2928,86 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
* case no task is runnable on a CPU MIN_SHARES=2 should be returned
* instead of 0.
*/
- if (shares < MIN_SHARES)
- shares = MIN_SHARES;
- if (shares > tg->shares)
- shares = tg->shares;
-
- return shares;
-}
-# else /* CONFIG_SMP */
-static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
- return tg->shares;
+ return clamp_t(long, shares, MIN_SHARES, tg_shares);
}
-# endif /* CONFIG_SMP */
-static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
- unsigned long weight)
+/*
+ * This calculates the effective runnable weight for a group entity based on
+ * the group entity weight calculated above.
+ *
+ * Because of the above approximation (2), our group entity weight is
+ * an load_avg based ratio (3). This means that it includes blocked load and
+ * does not represent the runnable weight.
+ *
+ * Approximate the group entity's runnable weight per ratio from the group
+ * runqueue:
+ *
+ * grq->avg.runnable_load_avg
+ * ge->runnable_weight = ge->load.weight * -------------------------- (7)
+ * grq->avg.load_avg
+ *
+ * However, analogous to above, since the avg numbers are slow, this leads to
+ * transients in the from-idle case. Instead we use:
+ *
+ * ge->runnable_weight = ge->load.weight *
+ *
+ * max(grq->avg.runnable_load_avg, grq->runnable_weight)
+ * ----------------------------------------------------- (8)
+ * max(grq->avg.load_avg, grq->load.weight)
+ *
+ * Where these max() serve both to use the 'instant' values to fix the slow
+ * from-idle and avoid the /0 on to-idle, similar to (6).
+ */
+static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
{
- if (se->on_rq) {
- /* commit outstanding execution time */
- if (cfs_rq->curr == se)
- update_curr(cfs_rq);
- account_entity_dequeue(cfs_rq, se);
- }
+ long runnable, load_avg;
- update_load_set(&se->load, weight);
+ load_avg = max(cfs_rq->avg.load_avg,
+ scale_load_down(cfs_rq->load.weight));
- if (se->on_rq)
- account_entity_enqueue(cfs_rq, se);
+ runnable = max(cfs_rq->avg.runnable_load_avg,
+ scale_load_down(cfs_rq->runnable_weight));
+
+ runnable *= shares;
+ if (load_avg)
+ runnable /= load_avg;
+
+ return clamp_t(long, runnable, MIN_SHARES, shares);
}
+# endif /* CONFIG_SMP */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
-static void update_cfs_shares(struct sched_entity *se)
+/*
+ * Recomputes the group entity based on the current state of its group
+ * runqueue.
+ */
+static void update_cfs_group(struct sched_entity *se)
{
- struct cfs_rq *cfs_rq = group_cfs_rq(se);
- struct task_group *tg;
- long shares;
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ long shares, runnable;
- if (!cfs_rq)
+ if (!gcfs_rq)
return;
- if (throttled_hierarchy(cfs_rq))
+ if (throttled_hierarchy(gcfs_rq))
return;
- tg = cfs_rq->tg;
-
#ifndef CONFIG_SMP
- if (likely(se->load.weight == tg->shares))
+ runnable = shares = READ_ONCE(gcfs_rq->tg->shares);
+
+ if (likely(se->load.weight == shares))
return;
+#else
+ shares = calc_group_shares(gcfs_rq);
+ runnable = calc_group_runnable(gcfs_rq, shares);
#endif
- shares = calc_cfs_shares(cfs_rq, tg);
- reweight_entity(cfs_rq_of(se), se, shares);
+ reweight_entity(cfs_rq_of(se), se, shares, runnable);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_cfs_shares(struct sched_entity *se)
+static inline void update_cfs_group(struct sched_entity *se)
{
}
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -2893,7 +3116,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
*/
static __always_inline u32
accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
- unsigned long weight, int running, struct cfs_rq *cfs_rq)
+ unsigned long load, unsigned long runnable, int running)
{
unsigned long scale_freq, scale_cpu;
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
@@ -2910,10 +3133,8 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
*/
if (periods) {
sa->load_sum = decay_load(sa->load_sum, periods);
- if (cfs_rq) {
- cfs_rq->runnable_load_sum =
- decay_load(cfs_rq->runnable_load_sum, periods);
- }
+ sa->runnable_load_sum =
+ decay_load(sa->runnable_load_sum, periods);
sa->util_sum = decay_load((u64)(sa->util_sum), periods);
/*
@@ -2926,11 +3147,10 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
sa->period_contrib = delta;
contrib = cap_scale(contrib, scale_freq);
- if (weight) {
- sa->load_sum += weight * contrib;
- if (cfs_rq)
- cfs_rq->runnable_load_sum += weight * contrib;
- }
+ if (load)
+ sa->load_sum += load * contrib;
+ if (runnable)
+ sa->runnable_load_sum += runnable * contrib;
if (running)
sa->util_sum += contrib * scale_cpu;
@@ -2966,8 +3186,8 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
* = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/
static __always_inline int
-___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
- unsigned long weight, int running, struct cfs_rq *cfs_rq)
+___update_load_sum(u64 now, int cpu, struct sched_avg *sa,
+ unsigned long load, unsigned long runnable, int running)
{
u64 delta;
@@ -3000,8 +3220,8 @@ ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
* this happens during idle_balance() which calls
* update_blocked_averages()
*/
- if (!weight)
- running = 0;
+ if (!load)
+ runnable = running = 0;
/*
* Now we know we crossed measurement unit boundaries. The *_avg
@@ -3010,63 +3230,96 @@ ___update_load_avg(u64 now, int cpu, struct sched_avg *sa,
* Step 1: accumulate *_sum since last_update_time. If we haven't
* crossed period boundaries, finish.
*/
- if (!accumulate_sum(delta, cpu, sa, weight, running, cfs_rq))
+ if (!accumulate_sum(delta, cpu, sa, load, runnable, running))
return 0;
+ return 1;
+}
+
+static __always_inline void
+___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
+{
+ u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
+
/*
* Step 2: update *_avg.
*/
- sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib);
- if (cfs_rq) {
- cfs_rq->runnable_load_avg =
- div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX - 1024 + sa->period_contrib);
- }
- sa->util_avg = sa->util_sum / (LOAD_AVG_MAX - 1024 + sa->period_contrib);
-
- return 1;
+ sa->load_avg = div_u64(load * sa->load_sum, divider);
+ sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
+ sa->util_avg = sa->util_sum / divider;
}
+/*
+ * sched_entity:
+ *
+ * task:
+ * se_runnable() == se_weight()
+ *
+ * group: [ see update_cfs_group() ]
+ * se_weight() = tg->weight * grq->load_avg / tg->load_avg
+ * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
+ *
+ * load_sum := runnable_sum
+ * load_avg = se_weight(se) * runnable_avg
+ *
+ * runnable_load_sum := runnable_sum
+ * runnable_load_avg = se_runnable(se) * runnable_avg
+ *
+ * XXX collapse load_sum and runnable_load_sum
+ *
+ * cfq_rs:
+ *
+ * load_sum = \Sum se_weight(se) * se->avg.load_sum
+ * load_avg = \Sum se->avg.load_avg
+ *
+ * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
+ * runnable_load_avg = \Sum se->avg.runable_load_avg
+ */
+
static int
__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)
{
- return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL);
+ if (entity_is_task(se))
+ se->runnable_weight = se->load.weight;
+
+ if (___update_load_sum(now, cpu, &se->avg, 0, 0, 0)) {
+ ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ return 1;
+ }
+
+ return 0;
}
static int
__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- return ___update_load_avg(now, cpu, &se->avg,
- se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
+ if (entity_is_task(se))
+ se->runnable_weight = se->load.weight;
+
+ if (___update_load_sum(now, cpu, &se->avg, !!se->on_rq, !!se->on_rq,
+ cfs_rq->curr == se)) {
+
+ ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
+ return 1;
+ }
+
+ return 0;
}
static int
__update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
{
- return ___update_load_avg(now, cpu, &cfs_rq->avg,
- scale_load_down(cfs_rq->load.weight),
- cfs_rq->curr != NULL, cfs_rq);
-}
+ if (___update_load_sum(now, cpu, &cfs_rq->avg,
+ scale_load_down(cfs_rq->load.weight),
+ scale_load_down(cfs_rq->runnable_weight),
+ cfs_rq->curr != NULL)) {
-/*
- * Signed add and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
-#define add_positive(_ptr, _val) do { \
- typeof(_ptr) ptr = (_ptr); \
- typeof(_val) val = (_val); \
- typeof(*ptr) res, var = READ_ONCE(*ptr); \
- \
- res = var + val; \
- \
- if (val < 0 && res > var) \
- res = 0; \
- \
- WRITE_ONCE(*ptr, res); \
-} while (0)
+ ___update_load_avg(&cfs_rq->avg, 1, 1);
+ return 1;
+ }
+
+ return 0;
+}
#ifdef CONFIG_FAIR_GROUP_SCHED
/**
@@ -3149,11 +3402,77 @@ void set_task_rq_fair(struct sched_entity *se,
se->avg.last_update_time = n_last_update_time;
}
-/* Take into account change of utilization of a child task group */
+
+/*
+ * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
+ * propagate its contribution. The key to this propagation is the invariant
+ * that for each group:
+ *
+ * ge->avg == grq->avg (1)
+ *
+ * _IFF_ we look at the pure running and runnable sums. Because they
+ * represent the very same entity, just at different points in the hierarchy.
+ *
+ *
+ * Per the above update_tg_cfs_util() is trivial (and still 'wrong') and
+ * simply copies the running sum over.
+ *
+ * However, update_tg_cfs_runnable() is more complex. So we have:
+ *
+ * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
+ *
+ * And since, like util, the runnable part should be directly transferable,
+ * the following would _appear_ to be the straight forward approach:
+ *
+ * grq->avg.load_avg = grq->load.weight * grq->avg.running_avg (3)
+ *
+ * And per (1) we have:
+ *
+ * ge->avg.running_avg == grq->avg.running_avg
+ *
+ * Which gives:
+ *
+ * ge->load.weight * grq->avg.load_avg
+ * ge->avg.load_avg = ----------------------------------- (4)
+ * grq->load.weight
+ *
+ * Except that is wrong!
+ *
+ * Because while for entities historical weight is not important and we
+ * really only care about our future and therefore can consider a pure
+ * runnable sum, runqueues can NOT do this.
+ *
+ * We specifically want runqueues to have a load_avg that includes
+ * historical weights. Those represent the blocked load, the load we expect
+ * to (shortly) return to us. This only works by keeping the weights as
+ * integral part of the sum. We therefore cannot decompose as per (3).
+ *
+ * OK, so what then?
+ *
+ *
+ * Another way to look at things is:
+ *
+ * grq->avg.load_avg = \Sum se->avg.load_avg
+ *
+ * Therefore, per (2):
+ *
+ * grq->avg.load_avg = \Sum se->load.weight * se->avg.runnable_avg
+ *
+ * And the very thing we're propagating is a change in that sum (someone
+ * joined/left). So we can easily know the runnable change, which would be, per
+ * (2) the already tracked se->load_avg divided by the corresponding
+ * se->weight.
+ *
+ * Basically (4) but in differential form:
+ *
+ * d(runnable_avg) += se->avg.load_avg / se->load.weight
+ * (5)
+ * ge->avg.load_avg += ge->load.weight * d(runnable_avg)
+ */
+
static inline void
-update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- struct cfs_rq *gcfs_rq = group_cfs_rq(se);
long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
/* Nothing to update */
@@ -3169,102 +3488,65 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
}
-/* Take into account change of load of a child task group */
static inline void
-update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- struct cfs_rq *gcfs_rq = group_cfs_rq(se);
- long delta, load = gcfs_rq->avg.load_avg;
+ long runnable_sum = gcfs_rq->prop_runnable_sum;
+ long runnable_load_avg, load_avg;
+ s64 runnable_load_sum, load_sum;
- /*
- * If the load of group cfs_rq is null, the load of the
- * sched_entity will also be null so we can skip the formula
- */
- if (load) {
- long tg_load;
+ if (!runnable_sum)
+ return;
- /* Get tg's load and ensure tg_load > 0 */
- tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
+ gcfs_rq->prop_runnable_sum = 0;
- /* Ensure tg_load >= load and updated with current load*/
- tg_load -= gcfs_rq->tg_load_avg_contrib;
- tg_load += load;
+ load_sum = (s64)se_weight(se) * runnable_sum;
+ load_avg = div_s64(load_sum, LOAD_AVG_MAX);
- /*
- * We need to compute a correction term in the case that the
- * task group is consuming more CPU than a task of equal
- * weight. A task with a weight equals to tg->shares will have
- * a load less or equal to scale_load_down(tg->shares).
- * Similarly, the sched_entities that represent the task group
- * at parent level, can't have a load higher than
- * scale_load_down(tg->shares). And the Sum of sched_entities'
- * load must be <= scale_load_down(tg->shares).
- */
- if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
- /* scale gcfs_rq's load into tg's shares*/
- load *= scale_load_down(gcfs_rq->tg->shares);
- load /= tg_load;
- }
- }
+ add_positive(&se->avg.load_sum, runnable_sum);
+ add_positive(&se->avg.load_avg, load_avg);
- delta = load - se->avg.load_avg;
+ add_positive(&cfs_rq->avg.load_avg, load_avg);
+ add_positive(&cfs_rq->avg.load_sum, load_sum);
- /* Nothing to update */
- if (!delta)
- return;
-
- /* Set new sched_entity's load */
- se->avg.load_avg = load;
- se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
+ runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
+ runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
- /* Update parent cfs_rq load */
- add_positive(&cfs_rq->avg.load_avg, delta);
- cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
+ add_positive(&se->avg.runnable_load_sum, runnable_sum);
+ add_positive(&se->avg.runnable_load_avg, runnable_load_avg);
- /*
- * If the sched_entity is already enqueued, we also have to update the
- * runnable load avg.
- */
if (se->on_rq) {
- /* Update parent cfs_rq runnable_load_avg */
- add_positive(&cfs_rq->runnable_load_avg, delta);
- cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
+ add_positive(&cfs_rq->avg.runnable_load_avg, runnable_load_avg);
+ add_positive(&cfs_rq->avg.runnable_load_sum, runnable_load_sum);
}
}
-static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq)
-{
- cfs_rq->propagate_avg = 1;
-}
-
-static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se)
+static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
{
- struct cfs_rq *cfs_rq = group_cfs_rq(se);
-
- if (!cfs_rq->propagate_avg)
- return 0;
-
- cfs_rq->propagate_avg = 0;
- return 1;
+ cfs_rq->propagate = 1;
+ cfs_rq->prop_runnable_sum += runnable_sum;
}
/* Update task and its cfs_rq load average */
static inline int propagate_entity_load_avg(struct sched_entity *se)
{
- struct cfs_rq *cfs_rq;
+ struct cfs_rq *cfs_rq, *gcfs_rq;
if (entity_is_task(se))
return 0;
- if (!test_and_clear_tg_cfs_propagate(se))
+ gcfs_rq = group_cfs_rq(se);
+ if (!gcfs_rq->propagate)
return 0;
+ gcfs_rq->propagate = 0;
+
cfs_rq = cfs_rq_of(se);
- set_tg_cfs_propagate(cfs_rq);
+ add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
- update_tg_cfs_util(cfs_rq, se);
- update_tg_cfs_load(cfs_rq, se);
+ update_tg_cfs_util(cfs_rq, se, gcfs_rq);
+ update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
return 1;
}
@@ -3288,7 +3570,7 @@ static inline bool skip_blocked_update(struct sched_entity *se)
* If there is a pending propagation, we have to update the load and
* the utilization of the sched_entity:
*/
- if (gcfs_rq->propagate_avg)
+ if (gcfs_rq->propagate)
return false;
/*
@@ -3308,27 +3590,10 @@ static inline int propagate_entity_load_avg(struct sched_entity *se)
return 0;
}
-static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
+static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
#endif /* CONFIG_FAIR_GROUP_SCHED */
-/*
- * Unsigned subtract and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
-#define sub_positive(_ptr, _val) do { \
- typeof(_ptr) ptr = (_ptr); \
- typeof(*ptr) val = (_val); \
- typeof(*ptr) res, var = READ_ONCE(*ptr); \
- res = var - val; \
- if (res > var) \
- res = 0; \
- WRITE_ONCE(*ptr, res); \
-} while (0)
-
/**
* update_cfs_rq_load_avg - update the cfs_rq's load/util averages
* @now: current time, as per cfs_rq_clock_task()
@@ -3348,65 +3613,45 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
+ unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0;
struct sched_avg *sa = &cfs_rq->avg;
- int decayed, removed_load = 0, removed_util = 0;
+ int decayed = 0;
- if (atomic_long_read(&cfs_rq->removed_load_avg)) {
- s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
+ if (cfs_rq->removed.nr) {
+ unsigned long r;
+ u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
+
+ raw_spin_lock(&cfs_rq->removed.lock);
+ swap(cfs_rq->removed.util_avg, removed_util);
+ swap(cfs_rq->removed.load_avg, removed_load);
+ swap(cfs_rq->removed.runnable_sum, removed_runnable_sum);
+ cfs_rq->removed.nr = 0;
+ raw_spin_unlock(&cfs_rq->removed.lock);
+
+ r = removed_load;
sub_positive(&sa->load_avg, r);
- sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
- removed_load = 1;
- set_tg_cfs_propagate(cfs_rq);
- }
+ sub_positive(&sa->load_sum, r * divider);
- if (atomic_long_read(&cfs_rq->removed_util_avg)) {
- long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
+ r = removed_util;
sub_positive(&sa->util_avg, r);
- sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
- removed_util = 1;
- set_tg_cfs_propagate(cfs_rq);
+ sub_positive(&sa->util_sum, r * divider);
+
+ add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);
+
+ decayed = 1;
}
- decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
+ decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
#ifndef CONFIG_64BIT
smp_wmb();
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
- if (decayed || removed_util)
+ if (decayed)
cfs_rq_util_change(cfs_rq);
- return decayed || removed_load;
-}
-
-/*
- * Optional action to be done while updating the load average
- */
-#define UPDATE_TG 0x1
-#define SKIP_AGE_LOAD 0x2
-
-/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int flags)
-{
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 now = cfs_rq_clock_task(cfs_rq);
- struct rq *rq = rq_of(cfs_rq);
- int cpu = cpu_of(rq);
- int decayed;
-
- /*
- * Track task load average for carrying it to new CPU after migrated, and
- * track group sched_entity load average for task_h_load calc in migration
- */
- if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
- __update_load_avg_se(now, cpu, cfs_rq, se);
-
- decayed = update_cfs_rq_load_avg(now, cfs_rq);
- decayed |= propagate_entity_load_avg(se);
-
- if (decayed && (flags & UPDATE_TG))
- update_tg_load_avg(cfs_rq, 0);
+ return decayed;
}
/**
@@ -3419,12 +3664,39 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
*/
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
+
+ /*
+ * When we attach the @se to the @cfs_rq, we must align the decay
+ * window because without that, really weird and wonderful things can
+ * happen.
+ *
+ * XXX illustrate
+ */
se->avg.last_update_time = cfs_rq->avg.last_update_time;
- cfs_rq->avg.load_avg += se->avg.load_avg;
- cfs_rq->avg.load_sum += se->avg.load_sum;
+ se->avg.period_contrib = cfs_rq->avg.period_contrib;
+
+ /*
+ * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
+ * period_contrib. This isn't strictly correct, but since we're
+ * entirely outside of the PELT hierarchy, nobody cares if we truncate
+ * _sum a little.
+ */
+ se->avg.util_sum = se->avg.util_avg * divider;
+
+ se->avg.load_sum = divider;
+ if (se_weight(se)) {
+ se->avg.load_sum =
+ div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
+ }
+
+ se->avg.runnable_load_sum = se->avg.load_sum;
+
+ enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
- set_tg_cfs_propagate(cfs_rq);
+
+ add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
cfs_rq_util_change(cfs_rq);
}
@@ -3439,39 +3711,47 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
-
- sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
- sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
+ dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
- set_tg_cfs_propagate(cfs_rq);
+
+ add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
cfs_rq_util_change(cfs_rq);
}
-/* Add the load generated by se into cfs_rq's load average */
-static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+/*
+ * Optional action to be done while updating the load average
+ */
+#define UPDATE_TG 0x1
+#define SKIP_AGE_LOAD 0x2
+#define DO_ATTACH 0x4
+
+/* Update task and its cfs_rq load average */
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- struct sched_avg *sa = &se->avg;
+ u64 now = cfs_rq_clock_task(cfs_rq);
+ struct rq *rq = rq_of(cfs_rq);
+ int cpu = cpu_of(rq);
+ int decayed;
+
+ /*
+ * Track task load average for carrying it to new CPU after migrated, and
+ * track group sched_entity load average for task_h_load calc in migration
+ */
+ if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
+ __update_load_avg_se(now, cpu, cfs_rq, se);
- cfs_rq->runnable_load_avg += sa->load_avg;
- cfs_rq->runnable_load_sum += sa->load_sum;
+ decayed = update_cfs_rq_load_avg(now, cfs_rq);
+ decayed |= propagate_entity_load_avg(se);
+
+ if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
- if (!sa->last_update_time) {
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, 0);
- }
-}
-/* Remove the runnable load generated by se from cfs_rq's runnable load average */
-static inline void
-dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- cfs_rq->runnable_load_avg =
- max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
- cfs_rq->runnable_load_sum =
- max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
+ } else if (decayed && (flags & UPDATE_TG))
+ update_tg_load_avg(cfs_rq, 0);
}
#ifndef CONFIG_64BIT
@@ -3515,6 +3795,7 @@ void sync_entity_load_avg(struct sched_entity *se)
void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ unsigned long flags;
/*
* tasks cannot exit without having gone through wake_up_new_task() ->
@@ -3527,13 +3808,18 @@ void remove_entity_load_avg(struct sched_entity *se)
*/
sync_entity_load_avg(se);
- atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
- atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
+
+ raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
+ ++cfs_rq->removed.nr;
+ cfs_rq->removed.util_avg += se->avg.util_avg;
+ cfs_rq->removed.load_avg += se->avg.load_avg;
+ cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */
+ raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
}
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
{
- return cfs_rq->runnable_load_avg;
+ return cfs_rq->avg.runnable_load_avg;
}
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
@@ -3553,16 +3839,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
+#define DO_ATTACH 0x0
-static inline void update_load_avg(struct sched_entity *se, int not_used1)
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
{
- cfs_rq_util_change(cfs_rq_of(se));
+ cfs_rq_util_change(cfs_rq);
}
-static inline void
-enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
-static inline void
-dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void remove_entity_load_avg(struct sched_entity *se) {}
static inline void
@@ -3707,9 +3990,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* its group cfs_rq
* - Add its new weight to cfs_rq->load.weight
*/
- update_load_avg(se, UPDATE_TG);
- enqueue_entity_load_avg(cfs_rq, se);
- update_cfs_shares(se);
+ update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
+ update_cfs_group(se);
+ enqueue_runnable_load_avg(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
if (flags & ENQUEUE_WAKEUP)
@@ -3791,8 +4074,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(se, UPDATE_TG);
- dequeue_entity_load_avg(cfs_rq, se);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ dequeue_runnable_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se, flags);
@@ -3815,7 +4098,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
- update_cfs_shares(se);
+ update_cfs_group(se);
/*
* Now advance min_vruntime if @se was the entity holding it back,
@@ -3879,7 +4162,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
}
update_stats_curr_start(cfs_rq, se);
@@ -3981,7 +4264,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
/* in !on_rq case, update occurred at dequeue */
- update_load_avg(prev, 0);
+ update_load_avg(cfs_rq, prev, 0);
}
cfs_rq->curr = NULL;
}
@@ -3997,8 +4280,8 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
/*
* Ensure that runnable average is periodically updated.
*/
- update_load_avg(curr, UPDATE_TG);
- update_cfs_shares(curr);
+ update_load_avg(cfs_rq, curr, UPDATE_TG);
+ update_cfs_group(curr);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -4915,8 +5198,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
- update_cfs_shares(se);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ update_cfs_group(se);
}
if (!se)
@@ -4974,8 +5257,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
- update_cfs_shares(se);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ update_cfs_group(se);
}
if (!se)
@@ -5449,6 +5732,8 @@ static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
+ *
+ * Assumes p is allowed on at least one CPU in sd.
*/
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
@@ -5456,8 +5741,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
{
struct sched_group *idlest = NULL, *group = sd->groups;
struct sched_group *most_spare_sg = NULL;
- unsigned long min_runnable_load = ULONG_MAX, this_runnable_load = 0;
- unsigned long min_avg_load = ULONG_MAX, this_avg_load = 0;
+ unsigned long min_runnable_load = ULONG_MAX;
+ unsigned long this_runnable_load = ULONG_MAX;
+ unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
unsigned long most_spare = 0, this_spare = 0;
int load_idx = sd->forkexec_idx;
int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
@@ -5578,10 +5864,10 @@ skip_spare:
}
/*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
+ * find_idlest_group_cpu - find the idlest cpu among the cpus in group.
*/
static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{
unsigned long load, min_load = ULONG_MAX;
unsigned int min_exit_latency = UINT_MAX;
@@ -5630,6 +5916,53 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
}
+static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
+ int cpu, int prev_cpu, int sd_flag)
+{
+ int new_cpu = cpu;
+
+ if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
+ return prev_cpu;
+
+ while (sd) {
+ struct sched_group *group;
+ struct sched_domain *tmp;
+ int weight;
+
+ if (!(sd->flags & sd_flag)) {
+ sd = sd->child;
+ continue;
+ }
+
+ group = find_idlest_group(sd, p, cpu, sd_flag);
+ if (!group) {
+ sd = sd->child;
+ continue;
+ }
+
+ new_cpu = find_idlest_group_cpu(group, p, cpu);
+ if (new_cpu == cpu) {
+ /* Now try balancing at a lower domain level of cpu */
+ sd = sd->child;
+ continue;
+ }
+
+ /* Now try balancing at a lower domain level of new_cpu */
+ cpu = new_cpu;
+ weight = sd->span_weight;
+ sd = NULL;
+ for_each_domain(cpu, tmp) {
+ if (weight <= tmp->span_weight)
+ break;
+ if (tmp->flags & sd_flag)
+ sd = tmp;
+ }
+ /* while loop will break here if sd == NULL */
+ }
+
+ return new_cpu;
+}
+
#ifdef CONFIG_SCHED_SMT
static inline void set_idle_cores(int cpu, int val)
@@ -5982,50 +6315,30 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
new_cpu = cpu;
}
+ if (sd && !(sd_flag & SD_BALANCE_FORK)) {
+ /*
+ * We're going to need the task's util for capacity_spare_wake
+ * in find_idlest_group. Sync it up to prev_cpu's
+ * last_update_time.
+ */
+ sync_entity_load_avg(&p->se);
+ }
+
if (!sd) {
- pick_cpu:
+pick_cpu:
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
- } else while (sd) {
- struct sched_group *group;
- int weight;
-
- if (!(sd->flags & sd_flag)) {
- sd = sd->child;
- continue;
- }
-
- group = find_idlest_group(sd, p, cpu, sd_flag);
- if (!group) {
- sd = sd->child;
- continue;
- }
-
- new_cpu = find_idlest_cpu(group, p, cpu);
- if (new_cpu == -1 || new_cpu == cpu) {
- /* Now try balancing at a lower domain level of cpu */
- sd = sd->child;
- continue;
- }
-
- /* Now try balancing at a lower domain level of new_cpu */
- cpu = new_cpu;
- weight = sd->span_weight;
- sd = NULL;
- for_each_domain(cpu, tmp) {
- if (weight <= tmp->span_weight)
- break;
- if (tmp->flags & sd_flag)
- sd = tmp;
- }
- /* while loop will break here if sd == NULL */
+ } else {
+ new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
}
rcu_read_unlock();
return new_cpu;
}
+static void detach_entity_cfs_rq(struct sched_entity *se);
+
/*
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
* cfs_rq_of(p) references at time of call are still valid and identify the
@@ -6059,14 +6372,25 @@ static void migrate_task_rq_fair(struct task_struct *p)
se->vruntime -= min_vruntime;
}
- /*
- * We are supposed to update the task to "current" time, then its up to date
- * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
- * what current time is, so simply throw away the out-of-date time. This
- * will result in the wakee task is less decayed, but giving the wakee more
- * load sounds not bad.
- */
- remove_entity_load_avg(&p->se);
+ if (p->on_rq == TASK_ON_RQ_MIGRATING) {
+ /*
+ * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
+ * rq->lock and can modify state directly.
+ */
+ lockdep_assert_held(&task_rq(p)->lock);
+ detach_entity_cfs_rq(&p->se);
+
+ } else {
+ /*
+ * We are supposed to update the task to "current" time, then
+ * its up to date and ready to go to new CPU/cfs_rq. But we
+ * have difficulty in getting what current time is, so simply
+ * throw away the out-of-date time. This will result in the
+ * wakee task is less decayed, but giving the wakee more load
+ * sounds not bad.
+ */
+ remove_entity_load_avg(&p->se);
+ }
/* Tell new CPU we are migrated */
p->se.avg.last_update_time = 0;
@@ -6334,10 +6658,7 @@ again:
set_next_entity(cfs_rq, se);
}
- if (hrtick_enabled(rq))
- hrtick_start_fair(rq, p);
-
- return p;
+ goto done;
simple:
#endif
@@ -6351,6 +6672,16 @@ simple:
p = task_of(se);
+done: __maybe_unused
+#ifdef CONFIG_SMP
+ /*
+ * Move the next running task to the front of
+ * the list, so our cfs_tasks list becomes MRU
+ * one.
+ */
+ list_move(&p->se.group_node, &rq->cfs_tasks);
+#endif
+
if (hrtick_enabled(rq))
hrtick_start_fair(rq, p);
@@ -6786,11 +7117,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
*/
static struct task_struct *detach_one_task(struct lb_env *env)
{
- struct task_struct *p, *n;
+ struct task_struct *p;
lockdep_assert_held(&env->src_rq->lock);
- list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
+ list_for_each_entry_reverse(p,
+ &env->src_rq->cfs_tasks, se.group_node) {
if (!can_migrate_task(p, env))
continue;
@@ -6836,7 +7168,7 @@ static int detach_tasks(struct lb_env *env)
if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
break;
- p = list_first_entry(tasks, struct task_struct, se.group_node);
+ p = list_last_entry(tasks, struct task_struct, se.group_node);
env->loop++;
/* We've more or less seen every task there is, call it quits */
@@ -6886,7 +7218,7 @@ static int detach_tasks(struct lb_env *env)
continue;
next:
- list_move_tail(&p->se.group_node, tasks);
+ list_move(&p->se.group_node, tasks);
}
/*
@@ -6962,7 +7294,7 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
if (cfs_rq->avg.util_sum)
return false;
- if (cfs_rq->runnable_load_sum)
+ if (cfs_rq->avg.runnable_load_sum)
return false;
return true;
@@ -6994,7 +7326,7 @@ static void update_blocked_averages(int cpu)
/* Propagate pending load changes to the parent, if any: */
se = cfs_rq->tg->se[cpu];
if (se && !skip_blocked_update(se))
- update_load_avg(se, 0);
+ update_load_avg(cfs_rq_of(se), se, 0);
/*
* There can be a lot of idle CPU cgroups. Don't let fully
@@ -7875,8 +8207,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
if (busiest->group_type == group_imbalanced)
goto force_balance;
- /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
- if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
+ /*
+ * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
+ * capacities from resulting in underutilization due to avg_load.
+ */
+ if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
busiest->group_no_capacity)
goto force_balance;
@@ -8693,7 +9028,7 @@ void nohz_balance_enter_idle(int cpu)
return;
/* Spare idle load balancing on CPUs that don't want to be disturbed: */
- if (!is_housekeeping_cpu(cpu))
+ if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
return;
if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
@@ -9158,7 +9493,7 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(cfs_rq, se, UPDATE_TG);
}
}
#else
@@ -9170,7 +9505,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/* Catch up with the cfs_rq and remove our load when we leave */
- update_load_avg(se, 0);
+ update_load_avg(cfs_rq, se, 0);
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
@@ -9189,7 +9524,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
#endif
/* Synchronize entity with its cfs_rq */
- update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
+ update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
propagate_entity_cfs_rq(se);
@@ -9271,11 +9606,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
#ifdef CONFIG_SMP
-#ifdef CONFIG_FAIR_GROUP_SCHED
- cfs_rq->propagate_avg = 0;
-#endif
- atomic_long_set(&cfs_rq->removed_load_avg, 0);
- atomic_long_set(&cfs_rq->removed_util_avg, 0);
+ raw_spin_lock_init(&cfs_rq->removed.lock);
#endif
}
@@ -9473,8 +9804,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
for_each_sched_entity(se) {
- update_load_avg(se, UPDATE_TG);
- update_cfs_shares(se);
+ update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
+ update_cfs_group(se);
}
rq_unlock_irqrestore(rq, &rf);
}
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 257f4f0b4532..7dae9eb8c042 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -209,6 +209,7 @@ exit_idle:
*/
static void do_idle(void)
{
+ int cpu = smp_processor_id();
/*
* If the arch has a polling bit, we maintain an invariant:
*
@@ -219,14 +220,13 @@ static void do_idle(void)
*/
__current_set_polling();
- quiet_vmstat();
tick_nohz_idle_enter();
while (!need_resched()) {
check_pgt_cache();
rmb();
- if (cpu_is_offline(smp_processor_id())) {
+ if (cpu_is_offline(cpu)) {
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
new file mode 100644
index 000000000000..b71b436f59f2
--- /dev/null
+++ b/kernel/sched/isolation.c
@@ -0,0 +1,155 @@
+/*
+ * Housekeeping management. Manage the targets for routine code that can run on
+ * any CPU: unbound workqueues, timers, kthreads and any offloadable work.
+ *
+ * Copyright (C) 2017 Red Hat, Inc., Frederic Weisbecker
+ *
+ */
+
+#include <linux/sched/isolation.h>
+#include <linux/tick.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/static_key.h>
+#include <linux/ctype.h>
+
+DEFINE_STATIC_KEY_FALSE(housekeeping_overriden);
+EXPORT_SYMBOL_GPL(housekeeping_overriden);
+static cpumask_var_t housekeeping_mask;
+static unsigned int housekeeping_flags;
+
+int housekeeping_any_cpu(enum hk_flags flags)
+{
+ if (static_branch_unlikely(&housekeeping_overriden))
+ if (housekeeping_flags & flags)
+ return cpumask_any_and(housekeeping_mask, cpu_online_mask);
+ return smp_processor_id();
+}
+EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
+
+const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
+{
+ if (static_branch_unlikely(&housekeeping_overriden))
+ if (housekeeping_flags & flags)
+ return housekeeping_mask;
+ return cpu_possible_mask;
+}
+EXPORT_SYMBOL_GPL(housekeeping_cpumask);
+
+void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
+{
+ if (static_branch_unlikely(&housekeeping_overriden))
+ if (housekeeping_flags & flags)
+ set_cpus_allowed_ptr(t, housekeeping_mask);
+}
+EXPORT_SYMBOL_GPL(housekeeping_affine);
+
+bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
+{
+ if (static_branch_unlikely(&housekeeping_overriden))
+ if (housekeeping_flags & flags)
+ return cpumask_test_cpu(cpu, housekeeping_mask);
+ return true;
+}
+EXPORT_SYMBOL_GPL(housekeeping_test_cpu);
+
+void __init housekeeping_init(void)
+{
+ if (!housekeeping_flags)
+ return;
+
+ static_branch_enable(&housekeeping_overriden);
+
+ /* We need at least one CPU to handle housekeeping work */
+ WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
+}
+
+static int __init housekeeping_setup(char *str, enum hk_flags flags)
+{
+ cpumask_var_t non_housekeeping_mask;
+ int err;
+
+ alloc_bootmem_cpumask_var(&non_housekeeping_mask);
+ err = cpulist_parse(str, non_housekeeping_mask);
+ if (err < 0 || cpumask_last(non_housekeeping_mask) >= nr_cpu_ids) {
+ pr_warn("Housekeeping: nohz_full= or isolcpus= incorrect CPU range\n");
+ free_bootmem_cpumask_var(non_housekeeping_mask);
+ return 0;
+ }
+
+ if (!housekeeping_flags) {
+ alloc_bootmem_cpumask_var(&housekeeping_mask);
+ cpumask_andnot(housekeeping_mask,
+ cpu_possible_mask, non_housekeeping_mask);
+ if (cpumask_empty(housekeeping_mask))
+ cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
+ } else {
+ cpumask_var_t tmp;
+
+ alloc_bootmem_cpumask_var(&tmp);
+ cpumask_andnot(tmp, cpu_possible_mask, non_housekeeping_mask);
+ if (!cpumask_equal(tmp, housekeeping_mask)) {
+ pr_warn("Housekeeping: nohz_full= must match isolcpus=\n");
+ free_bootmem_cpumask_var(tmp);
+ free_bootmem_cpumask_var(non_housekeeping_mask);
+ return 0;
+ }
+ free_bootmem_cpumask_var(tmp);
+ }
+
+ if ((flags & HK_FLAG_TICK) && !(housekeeping_flags & HK_FLAG_TICK)) {
+ if (IS_ENABLED(CONFIG_NO_HZ_FULL)) {
+ tick_nohz_full_setup(non_housekeeping_mask);
+ } else {
+ pr_warn("Housekeeping: nohz unsupported."
+ " Build with CONFIG_NO_HZ_FULL\n");
+ free_bootmem_cpumask_var(non_housekeeping_mask);
+ return 0;
+ }
+ }
+
+ housekeeping_flags |= flags;
+
+ free_bootmem_cpumask_var(non_housekeeping_mask);
+
+ return 1;
+}
+
+static int __init housekeeping_nohz_full_setup(char *str)
+{
+ unsigned int flags;
+
+ flags = HK_FLAG_TICK | HK_FLAG_TIMER | HK_FLAG_RCU | HK_FLAG_MISC;
+
+ return housekeeping_setup(str, flags);
+}
+__setup("nohz_full=", housekeeping_nohz_full_setup);
+
+static int __init housekeeping_isolcpus_setup(char *str)
+{
+ unsigned int flags = 0;
+
+ while (isalpha(*str)) {
+ if (!strncmp(str, "nohz,", 5)) {
+ str += 5;
+ flags |= HK_FLAG_TICK;
+ continue;
+ }
+
+ if (!strncmp(str, "domain,", 7)) {
+ str += 7;
+ flags |= HK_FLAG_DOMAIN;
+ continue;
+ }
+
+ pr_warn("isolcpus: Error, unknown flag\n");
+ return 0;
+ }
+
+ /* Default behaviour for isolcpus without flags */
+ if (!flags)
+ flags |= HK_FLAG_DOMAIN;
+
+ return housekeeping_setup(str, flags);
+}
+__setup("isolcpus=", housekeeping_isolcpus_setup);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 3c96c80e0992..4056c19ca3f0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -74,10 +74,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
-#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
-static void push_irq_work_func(struct irq_work *work);
-#endif
-
void init_rt_rq(struct rt_rq *rt_rq)
{
struct rt_prio_array *array;
@@ -97,13 +93,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
-
-#ifdef HAVE_RT_PUSH_IPI
- rt_rq->push_flags = 0;
- rt_rq->push_cpu = nr_cpu_ids;
- raw_spin_lock_init(&rt_rq->push_lock);
- init_irq_work(&rt_rq->push_work, push_irq_work_func);
-#endif
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
@@ -980,7 +969,7 @@ static void update_curr_rt(struct rq *rq)
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = rq_clock_task(rq);
- cpuacct_charge(curr, delta_exec);
+ cgroup_account_cputime(curr, delta_exec);
sched_rt_avg_update(rq, delta_exec);
@@ -1876,241 +1865,166 @@ static void push_rt_tasks(struct rq *rq)
}
#ifdef HAVE_RT_PUSH_IPI
+
/*
- * The search for the next cpu always starts at rq->cpu and ends
- * when we reach rq->cpu again. It will never return rq->cpu.
- * This returns the next cpu to check, or nr_cpu_ids if the loop
- * is complete.
+ * When a high priority task schedules out from a CPU and a lower priority
+ * task is scheduled in, a check is made to see if there's any RT tasks
+ * on other CPUs that are waiting to run because a higher priority RT task
+ * is currently running on its CPU. In this case, the CPU with multiple RT
+ * tasks queued on it (overloaded) needs to be notified that a CPU has opened
+ * up that may be able to run one of its non-running queued RT tasks.
+ *
+ * All CPUs with overloaded RT tasks need to be notified as there is currently
+ * no way to know which of these CPUs have the highest priority task waiting
+ * to run. Instead of trying to take a spinlock on each of these CPUs,
+ * which has shown to cause large latency when done on machines with many
+ * CPUs, sending an IPI to the CPUs to have them push off the overloaded
+ * RT tasks waiting to run.
+ *
+ * Just sending an IPI to each of the CPUs is also an issue, as on large
+ * count CPU machines, this can cause an IPI storm on a CPU, especially
+ * if its the only CPU with multiple RT tasks queued, and a large number
+ * of CPUs scheduling a lower priority task at the same time.
+ *
+ * Each root domain has its own irq work function that can iterate over
+ * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
+ * tassk must be checked if there's one or many CPUs that are lowering
+ * their priority, there's a single irq work iterator that will try to
+ * push off RT tasks that are waiting to run.
+ *
+ * When a CPU schedules a lower priority task, it will kick off the
+ * irq work iterator that will jump to each CPU with overloaded RT tasks.
+ * As it only takes the first CPU that schedules a lower priority task
+ * to start the process, the rto_start variable is incremented and if
+ * the atomic result is one, then that CPU will try to take the rto_lock.
+ * This prevents high contention on the lock as the process handles all
+ * CPUs scheduling lower priority tasks.
+ *
+ * All CPUs that are scheduling a lower priority task will increment the
+ * rt_loop_next variable. This will make sure that the irq work iterator
+ * checks all RT overloaded CPUs whenever a CPU schedules a new lower
+ * priority task, even if the iterator is in the middle of a scan. Incrementing
+ * the rt_loop_next will cause the iterator to perform another scan.
*
- * rq->rt.push_cpu holds the last cpu returned by this function,
- * or if this is the first instance, it must hold rq->cpu.
*/
static int rto_next_cpu(struct rq *rq)
{
- int prev_cpu = rq->rt.push_cpu;
+ struct root_domain *rd = rq->rd;
+ int next;
int cpu;
- cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
-
/*
- * If the previous cpu is less than the rq's CPU, then it already
- * passed the end of the mask, and has started from the beginning.
- * We end if the next CPU is greater or equal to rq's CPU.
+ * When starting the IPI RT pushing, the rto_cpu is set to -1,
+ * rt_next_cpu() will simply return the first CPU found in
+ * the rto_mask.
+ *
+ * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
+ * will return the next CPU found in the rto_mask.
+ *
+ * If there are no more CPUs left in the rto_mask, then a check is made
+ * against rto_loop and rto_loop_next. rto_loop is only updated with
+ * the rto_lock held, but any CPU may increment the rto_loop_next
+ * without any locking.
*/
- if (prev_cpu < rq->cpu) {
- if (cpu >= rq->cpu)
- return nr_cpu_ids;
+ for (;;) {
- } else if (cpu >= nr_cpu_ids) {
- /*
- * We passed the end of the mask, start at the beginning.
- * If the result is greater or equal to the rq's CPU, then
- * the loop is finished.
- */
- cpu = cpumask_first(rq->rd->rto_mask);
- if (cpu >= rq->cpu)
- return nr_cpu_ids;
- }
- rq->rt.push_cpu = cpu;
+ /* When rto_cpu is -1 this acts like cpumask_first() */
+ cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
- /* Return cpu to let the caller know if the loop is finished or not */
- return cpu;
-}
+ rd->rto_cpu = cpu;
-static int find_next_push_cpu(struct rq *rq)
-{
- struct rq *next_rq;
- int cpu;
+ if (cpu < nr_cpu_ids)
+ return cpu;
- while (1) {
- cpu = rto_next_cpu(rq);
- if (cpu >= nr_cpu_ids)
- break;
- next_rq = cpu_rq(cpu);
+ rd->rto_cpu = -1;
+
+ /*
+ * ACQUIRE ensures we see the @rto_mask changes
+ * made prior to the @next value observed.
+ *
+ * Matches WMB in rt_set_overload().
+ */
+ next = atomic_read_acquire(&rd->rto_loop_next);
- /* Make sure the next rq can push to this rq */
- if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
+ if (rd->rto_loop == next)
break;
+
+ rd->rto_loop = next;
}
- return cpu;
+ return -1;
}
-#define RT_PUSH_IPI_EXECUTING 1
-#define RT_PUSH_IPI_RESTART 2
+static inline bool rto_start_trylock(atomic_t *v)
+{
+ return !atomic_cmpxchg_acquire(v, 0, 1);
+}
-/*
- * When a high priority task schedules out from a CPU and a lower priority
- * task is scheduled in, a check is made to see if there's any RT tasks
- * on other CPUs that are waiting to run because a higher priority RT task
- * is currently running on its CPU. In this case, the CPU with multiple RT
- * tasks queued on it (overloaded) needs to be notified that a CPU has opened
- * up that may be able to run one of its non-running queued RT tasks.
- *
- * On large CPU boxes, there's the case that several CPUs could schedule
- * a lower priority task at the same time, in which case it will look for
- * any overloaded CPUs that it could pull a task from. To do this, the runqueue
- * lock must be taken from that overloaded CPU. Having 10s of CPUs all fighting
- * for a single overloaded CPU's runqueue lock can produce a large latency.
- * (This has actually been observed on large boxes running cyclictest).
- * Instead of taking the runqueue lock of the overloaded CPU, each of the
- * CPUs that scheduled a lower priority task simply sends an IPI to the
- * overloaded CPU. An IPI is much cheaper than taking an runqueue lock with
- * lots of contention. The overloaded CPU will look to push its non-running
- * RT task off, and if it does, it can then ignore the other IPIs coming
- * in, and just pass those IPIs off to any other overloaded CPU.
- *
- * When a CPU schedules a lower priority task, it only sends an IPI to
- * the "next" CPU that has overloaded RT tasks. This prevents IPI storms,
- * as having 10 CPUs scheduling lower priority tasks and 10 CPUs with
- * RT overloaded tasks, would cause 100 IPIs to go out at once.
- *
- * The overloaded RT CPU, when receiving an IPI, will try to push off its
- * overloaded RT tasks and then send an IPI to the next CPU that has
- * overloaded RT tasks. This stops when all CPUs with overloaded RT tasks
- * have completed. Just because a CPU may have pushed off its own overloaded
- * RT task does not mean it should stop sending the IPI around to other
- * overloaded CPUs. There may be another RT task waiting to run on one of
- * those CPUs that are of higher priority than the one that was just
- * pushed.
- *
- * An optimization that could possibly be made is to make a CPU array similar
- * to the cpupri array mask of all running RT tasks, but for the overloaded
- * case, then the IPI could be sent to only the CPU with the highest priority
- * RT task waiting, and that CPU could send off further IPIs to the CPU with
- * the next highest waiting task. Since the overloaded case is much less likely
- * to happen, the complexity of this implementation may not be worth it.
- * Instead, just send an IPI around to all overloaded CPUs.
- *
- * The rq->rt.push_flags holds the status of the IPI that is going around.
- * A run queue can only send out a single IPI at a time. The possible flags
- * for rq->rt.push_flags are:
- *
- * (None or zero): No IPI is going around for the current rq
- * RT_PUSH_IPI_EXECUTING: An IPI for the rq is being passed around
- * RT_PUSH_IPI_RESTART: The priority of the running task for the rq
- * has changed, and the IPI should restart
- * circulating the overloaded CPUs again.
- *
- * rq->rt.push_cpu contains the CPU that is being sent the IPI. It is updated
- * before sending to the next CPU.
- *
- * Instead of having all CPUs that schedule a lower priority task send
- * an IPI to the same "first" CPU in the RT overload mask, they send it
- * to the next overloaded CPU after their own CPU. This helps distribute
- * the work when there's more than one overloaded CPU and multiple CPUs
- * scheduling in lower priority tasks.
- *
- * When a rq schedules a lower priority task than what was currently
- * running, the next CPU with overloaded RT tasks is examined first.
- * That is, if CPU 1 and 5 are overloaded, and CPU 3 schedules a lower
- * priority task, it will send an IPI first to CPU 5, then CPU 5 will
- * send to CPU 1 if it is still overloaded. CPU 1 will clear the
- * rq->rt.push_flags if RT_PUSH_IPI_RESTART is not set.
- *
- * The first CPU to notice IPI_RESTART is set, will clear that flag and then
- * send an IPI to the next overloaded CPU after the rq->cpu and not the next
- * CPU after push_cpu. That is, if CPU 1, 4 and 5 are overloaded when CPU 3
- * schedules a lower priority task, and the IPI_RESTART gets set while the
- * handling is being done on CPU 5, it will clear the flag and send it back to
- * CPU 4 instead of CPU 1.
- *
- * Note, the above logic can be disabled by turning off the sched_feature
- * RT_PUSH_IPI. Then the rq lock of the overloaded CPU will simply be
- * taken by the CPU requesting a pull and the waiting RT task will be pulled
- * by that CPU. This may be fine for machines with few CPUs.
- */
-static void tell_cpu_to_push(struct rq *rq)
+static inline void rto_start_unlock(atomic_t *v)
{
- int cpu;
+ atomic_set_release(v, 0);
+}
- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
- raw_spin_lock(&rq->rt.push_lock);
- /* Make sure it's still executing */
- if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
- /*
- * Tell the IPI to restart the loop as things have
- * changed since it started.
- */
- rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
- raw_spin_unlock(&rq->rt.push_lock);
- return;
- }
- raw_spin_unlock(&rq->rt.push_lock);
- }
+static void tell_cpu_to_push(struct rq *rq)
+{
+ int cpu = -1;
- /* When here, there's no IPI going around */
+ /* Keep the loop going if the IPI is currently active */
+ atomic_inc(&rq->rd->rto_loop_next);
- rq->rt.push_cpu = rq->cpu;
- cpu = find_next_push_cpu(rq);
- if (cpu >= nr_cpu_ids)
+ /* Only one CPU can initiate a loop at a time */
+ if (!rto_start_trylock(&rq->rd->rto_loop_start))
return;
- rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
+ raw_spin_lock(&rq->rd->rto_lock);
+
+ /*
+ * The rto_cpu is updated under the lock, if it has a valid cpu
+ * then the IPI is still running and will continue due to the
+ * update to loop_next, and nothing needs to be done here.
+ * Otherwise it is finishing up and an ipi needs to be sent.
+ */
+ if (rq->rd->rto_cpu < 0)
+ cpu = rto_next_cpu(rq);
- irq_work_queue_on(&rq->rt.push_work, cpu);
+ raw_spin_unlock(&rq->rd->rto_lock);
+
+ rto_start_unlock(&rq->rd->rto_loop_start);
+
+ if (cpu >= 0)
+ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
/* Called from hardirq context */
-static void try_to_push_tasks(void *arg)
+void rto_push_irq_work_func(struct irq_work *work)
{
- struct rt_rq *rt_rq = arg;
- struct rq *rq, *src_rq;
- int this_cpu;
+ struct rq *rq;
int cpu;
- this_cpu = rt_rq->push_cpu;
+ rq = this_rq();
- /* Paranoid check */
- BUG_ON(this_cpu != smp_processor_id());
-
- rq = cpu_rq(this_cpu);
- src_rq = rq_of_rt_rq(rt_rq);
-
-again:
+ /*
+ * We do not need to grab the lock to check for has_pushable_tasks.
+ * When it gets updated, a check is made if a push is possible.
+ */
if (has_pushable_tasks(rq)) {
raw_spin_lock(&rq->lock);
- push_rt_task(rq);
+ push_rt_tasks(rq);
raw_spin_unlock(&rq->lock);
}
- /* Pass the IPI to the next rt overloaded queue */
- raw_spin_lock(&rt_rq->push_lock);
- /*
- * If the source queue changed since the IPI went out,
- * we need to restart the search from that CPU again.
- */
- if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
- rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
- rt_rq->push_cpu = src_rq->cpu;
- }
+ raw_spin_lock(&rq->rd->rto_lock);
- cpu = find_next_push_cpu(src_rq);
+ /* Pass the IPI to the next rt overloaded queue */
+ cpu = rto_next_cpu(rq);
- if (cpu >= nr_cpu_ids)
- rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
- raw_spin_unlock(&rt_rq->push_lock);
+ raw_spin_unlock(&rq->rd->rto_lock);
- if (cpu >= nr_cpu_ids)
+ if (cpu < 0)
return;
- /*
- * It is possible that a restart caused this CPU to be
- * chosen again. Don't bother with an IPI, just see if we
- * have more to push.
- */
- if (unlikely(cpu == rq->cpu))
- goto again;
-
/* Try the next RT overloaded CPU */
- irq_work_queue_on(&rt_rq->push_work, cpu);
-}
-
-static void push_irq_work_func(struct irq_work *work)
-{
- struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
-
- try_to_push_tasks(rt_rq);
+ irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3b448ba82225..b19552a212de 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -30,6 +30,7 @@
#include <linux/irq_work.h>
#include <linux/tick.h>
#include <linux/slab.h>
+#include <linux/cgroup.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
@@ -37,7 +38,6 @@
#include "cpupri.h"
#include "cpudeadline.h"
-#include "cpuacct.h"
#ifdef CONFIG_SCHED_DEBUG
# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
@@ -227,7 +227,7 @@ struct dl_bw {
static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
static inline
-void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
+void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
{
dl_b->total_bw -= tsk_bw;
__dl_update(dl_b, (s32)tsk_bw / cpus);
@@ -256,7 +256,6 @@ extern int sched_dl_overflow(struct task_struct *p, int policy,
extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
extern bool __checkparam_dl(const struct sched_attr *attr);
-extern void __dl_clear_params(struct task_struct *p);
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
extern int dl_task_can_attach(struct task_struct *p,
const struct cpumask *cs_cpus_allowed);
@@ -419,6 +418,7 @@ struct cfs_bandwidth { };
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
+ unsigned long runnable_weight;
unsigned int nr_running, h_nr_running;
u64 exec_clock;
@@ -444,18 +444,22 @@ struct cfs_rq {
* CFS load tracking
*/
struct sched_avg avg;
- u64 runnable_load_sum;
- unsigned long runnable_load_avg;
-#ifdef CONFIG_FAIR_GROUP_SCHED
- unsigned long tg_load_avg_contrib;
- unsigned long propagate_avg;
-#endif
- atomic_long_t removed_load_avg, removed_util_avg;
#ifndef CONFIG_64BIT
u64 load_last_update_time_copy;
#endif
+ struct {
+ raw_spinlock_t lock ____cacheline_aligned;
+ int nr;
+ unsigned long load_avg;
+ unsigned long util_avg;
+ unsigned long runnable_sum;
+ } removed;
#ifdef CONFIG_FAIR_GROUP_SCHED
+ unsigned long tg_load_avg_contrib;
+ long propagate;
+ long prop_runnable_sum;
+
/*
* h_load = weight * f(tg)
*
@@ -502,7 +506,7 @@ static inline int rt_bandwidth_enabled(void)
}
/* RT IPI pull logic requires IRQ_WORK */
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
# define HAVE_RT_PUSH_IPI
#endif
@@ -524,12 +528,6 @@ struct rt_rq {
unsigned long rt_nr_total;
int overloaded;
struct plist_head pushable_tasks;
-#ifdef HAVE_RT_PUSH_IPI
- int push_flags;
- int push_cpu;
- struct irq_work push_work;
- raw_spinlock_t push_lock;
-#endif
#endif /* CONFIG_SMP */
int rt_queued;
@@ -638,6 +636,19 @@ struct root_domain {
struct dl_bw dl_bw;
struct cpudl cpudl;
+#ifdef HAVE_RT_PUSH_IPI
+ /*
+ * For IPI pull requests, loop across the rto_mask.
+ */
+ struct irq_work rto_push_work;
+ raw_spinlock_t rto_lock;
+ /* These are only updated and read within rto_lock */
+ int rto_loop;
+ int rto_cpu;
+ /* These atomics are updated outside of a lock */
+ atomic_t rto_loop_next;
+ atomic_t rto_loop_start;
+#endif
/*
* The "RT overload" flag: it gets set if a CPU has more than
* one runnable RT task.
@@ -655,6 +666,9 @@ extern void init_defrootdomain(void);
extern int sched_init_domains(const struct cpumask *cpu_map);
extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
+#ifdef HAVE_RT_PUSH_IPI
+extern void rto_push_irq_work_func(struct irq_work *work);
+#endif
#endif /* CONFIG_SMP */
/*
@@ -1219,8 +1233,6 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
# define const_debug const
#endif
-extern const_debug unsigned int sysctl_sched_features;
-
#define SCHED_FEAT(name, enabled) \
__SCHED_FEAT_##name ,
@@ -1232,6 +1244,13 @@ enum {
#undef SCHED_FEAT
#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
+
+/*
+ * To support run-time toggling of sched features, all the translation units
+ * (but core.c) reference the sysctl_sched_features defined in core.c.
+ */
+extern const_debug unsigned int sysctl_sched_features;
+
#define SCHED_FEAT(name, enabled) \
static __always_inline bool static_branch_##name(struct static_key *key) \
{ \
@@ -1239,13 +1258,27 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
}
#include "features.h"
-
#undef SCHED_FEAT
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
+
#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
+
+/*
+ * Each translation unit has its own copy of sysctl_sched_features to allow
+ * constants propagation at compile time and compiler optimization based on
+ * features default.
+ */
+#define SCHED_FEAT(name, enabled) \
+ (1UL << __SCHED_FEAT_##name) * enabled |
+static const_debug __maybe_unused unsigned int sysctl_sched_features =
+#include "features.h"
+ 0;
+#undef SCHED_FEAT
+
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
+
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
extern struct static_key_false sched_numa_balancing;
@@ -1530,6 +1563,8 @@ extern void init_sched_dl_class(void);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
+extern void reweight_task(struct task_struct *p, int prio);
+
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 45caf90b24cd..210b1f2146ff 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -72,7 +72,7 @@ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = rq_clock_task(rq);
- cpuacct_charge(curr, delta_exec);
+ cgroup_account_cputime(curr, delta_exec);
}
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 6798276d29af..034cbed7f88b 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
#include <linux/mutex.h>
+#include <linux/sched/isolation.h>
#include "sched.h"
@@ -269,6 +270,12 @@ static int init_rootdomain(struct root_domain *rd)
if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_dlo_mask;
+#ifdef HAVE_RT_PUSH_IPI
+ rd->rto_cpu = -1;
+ raw_spin_lock_init(&rd->rto_lock);
+ init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
+#endif
+
init_dl_bw(&rd->dl_bw);
if (cpudl_init(&rd->cpudl) != 0)
goto free_rto_mask;
@@ -464,21 +471,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
update_top_cache_domain(cpu);
}
-/* Setup the mask of CPUs configured for isolated domains */
-static int __init isolated_cpu_setup(char *str)
-{
- int ret;
-
- alloc_bootmem_cpumask_var(&cpu_isolated_map);
- ret = cpulist_parse(str, cpu_isolated_map);
- if (ret) {
- pr_err("sched: Error, all isolcpus= values must be between 0 and %u\n", nr_cpu_ids);
- return 0;
- }
- return 1;
-}
-__setup("isolcpus=", isolated_cpu_setup);
-
struct s_data {
struct sched_domain ** __percpu sd;
struct root_domain *rd;
@@ -1158,6 +1150,7 @@ sd_init(struct sched_domain_topology_level *tl,
sd->smt_gain = 1178; /* ~15% */
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
+ sd->flags |= SD_PREFER_SIBLING;
sd->imbalance_pct = 117;
sd->cache_nice_tries = 1;
sd->busy_idx = 2;
@@ -1332,6 +1325,10 @@ void sched_init_numa(void)
if (!sched_domains_numa_distance)
return;
+ /* Includes NUMA identity node at level 0. */
+ sched_domains_numa_distance[level++] = curr_distance;
+ sched_domains_numa_levels = level;
+
/*
* O(nr_nodes^2) deduplicating selection sort -- in order to find the
* unique distances in the node_distance() table.
@@ -1379,8 +1376,7 @@ void sched_init_numa(void)
return;
/*
- * 'level' contains the number of unique distances, excluding the
- * identity distance node_distance(i,i).
+ * 'level' contains the number of unique distances
*
* The sched_domains_numa_distance[] array includes the actual distance
* numbers.
@@ -1442,9 +1438,18 @@ void sched_init_numa(void)
tl[i] = sched_domain_topology[i];
/*
+ * Add the NUMA identity distance, aka single NODE.
+ */
+ tl[i++] = (struct sched_domain_topology_level){
+ .mask = sd_numa_mask,
+ .numa_level = 0,
+ SD_INIT_NAME(NODE)
+ };
+
+ /*
* .. and append 'j' levels of NUMA goodness.
*/
- for (j = 0; j < level; i++, j++) {
+ for (j = 1; j < level; i++, j++) {
tl[i] = (struct sched_domain_topology_level){
.mask = sd_numa_mask,
.sd_flags = cpu_numa_flags,
@@ -1774,7 +1779,7 @@ int sched_init_domains(const struct cpumask *cpu_map)
doms_cur = alloc_sched_domains(ndoms_cur);
if (!doms_cur)
doms_cur = &fallback_doms;
- cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
+ cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN));
err = build_sched_domains(doms_cur[0], NULL);
register_sched_domain_sysctl();
@@ -1857,7 +1862,8 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
doms_new = alloc_sched_domains(1);
if (doms_new) {
n = 1;
- cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
+ cpumask_and(doms_new[0], cpu_active_mask,
+ housekeeping_cpumask(HK_FLAG_DOMAIN));
}
} else {
n = ndoms_new;
@@ -1880,7 +1886,8 @@ match1:
if (!doms_new) {
n = 0;
doms_new = &fallback_doms;
- cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
+ cpumask_and(doms_new[0], cpu_active_mask,
+ housekeeping_cpumask(HK_FLAG_DOMAIN));
}
/* Build new domains: */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 418a1c045933..5f0dfb2abb8d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -190,7 +190,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
u32 ret = SECCOMP_RET_ALLOW;
/* Make sure cross-thread synced filter points somewhere sane. */
struct seccomp_filter *f =
- lockless_dereference(current->seccomp.filter);
+ READ_ONCE(current->seccomp.filter);
/* Ensure unexpected behavior doesn't result in failing open. */
if (unlikely(WARN_ON(f == NULL)))
diff --git a/kernel/signal.c b/kernel/signal.c
index 8dcd8825b2de..aa1fb9f905db 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1036,8 +1036,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
else
override_rlimit = 0;
- q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
- override_rlimit);
+ q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
if (q) {
list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
diff --git a/kernel/smp.c b/kernel/smp.c
index c94dd85c8d41..084c8b3a2681 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -213,7 +213,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
call_single_data_t *csd, *csd_next;
static bool warned;
- WARN_ON(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 4e09821f9d9e..2f5e87f1bae2 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__local_bh_disable_ip);
static void __local_bh_enable(unsigned int cnt)
{
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
@@ -158,7 +158,8 @@ EXPORT_SYMBOL(_local_bh_enable);
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
- WARN_ON_ONCE(in_irq() || irqs_disabled());
+ WARN_ON_ONCE(in_irq());
+ lockdep_assert_irqs_enabled();
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable();
#endif
@@ -396,9 +397,8 @@ void irq_exit(void)
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
#else
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
#endif
-
account_irq_exit_time(current);
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
@@ -486,16 +486,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
}
EXPORT_SYMBOL(__tasklet_hi_schedule);
-void __tasklet_hi_schedule_first(struct tasklet_struct *t)
-{
- BUG_ON(!irqs_disabled());
-
- t->next = __this_cpu_read(tasklet_hi_vec.head);
- __this_cpu_write(tasklet_hi_vec.head, t);
- __raise_softirq_irqoff(HI_SOFTIRQ);
-}
-EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-
static __latent_entropy void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
diff --git a/kernel/sys.c b/kernel/sys.c
index 524a4cb9bbe2..83ffd7dccf23 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -111,6 +111,12 @@
#ifndef SET_FP_MODE
# define SET_FP_MODE(a,b) (-EINVAL)
#endif
+#ifndef SVE_SET_VL
+# define SVE_SET_VL(a) (-EINVAL)
+#endif
+#ifndef SVE_GET_VL
+# define SVE_GET_VL() (-EINVAL)
+#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
@@ -2386,6 +2392,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_GET_FP_MODE:
error = GET_FP_MODE(me);
break;
+ case PR_SVE_SET_VL:
+ error = SVE_SET_VL(arg2);
+ break;
+ case PR_SVE_GET_VL:
+ error = SVE_GET_VL();
+ break;
default:
error = -EINVAL;
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d9c31bc2eaea..4a13a389e99b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -30,7 +30,6 @@
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/ctype.h>
-#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -1174,15 +1173,6 @@ static struct ctl_table kern_table[] = {
.extra2 = &one_thousand,
},
#endif
-#ifdef CONFIG_KMEMCHECK
- {
- .procname = "kmemcheck",
- .data = &kmemcheck_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
{
.procname = "panic_on_warn",
.data = &panic_on_warn,
@@ -1342,11 +1332,6 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
- .procname = "nr_pdflush_threads",
- .mode = 0444 /* read-only */,
- .proc_handler = pdflush_proc_obsolete,
- },
- {
.procname = "swappiness",
.data = &vm_swappiness,
.maxlen = sizeof(vm_swappiness),
@@ -1371,6 +1356,15 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &hugetlb_mempolicy_sysctl_handler,
},
+ {
+ .procname = "numa_stat",
+ .data = &sysctl_vm_numa_stat,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = sysctl_vm_numa_stat_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
#endif
{
.procname = "hugetlb_shm_group",
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 5718b3ea202a..0fef395662a6 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -68,7 +68,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
- while ((work = lockless_dereference(*pprev))) {
+ while ((work = READ_ONCE(*pprev))) {
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 0dbab6d1acb4..dd53e354f630 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -22,7 +22,7 @@
#define div_factor 3
-static u32 rand1, preh_val, posth_val, jph_val;
+static u32 rand1, preh_val, posth_val;
static int errors, handler_errors, num_tests;
static u32 (*target)(u32 value);
static u32 (*target2)(u32 value);
@@ -34,6 +34,10 @@ static noinline u32 kprobe_target(u32 value)
static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
+ if (preemptible()) {
+ handler_errors++;
+ pr_err("pre-handler is preemptible\n");
+ }
preh_val = (rand1 / div_factor);
return 0;
}
@@ -41,6 +45,10 @@ static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
+ if (preemptible()) {
+ handler_errors++;
+ pr_err("post-handler is preemptible\n");
+ }
if (preh_val != (rand1 / div_factor)) {
handler_errors++;
pr_err("incorrect value in post_handler\n");
@@ -154,8 +162,15 @@ static int test_kprobes(void)
}
+#if 0
+static u32 jph_val;
+
static u32 j_kprobe_target(u32 value)
{
+ if (preemptible()) {
+ handler_errors++;
+ pr_err("jprobe-handler is preemptible\n");
+ }
if (value != rand1) {
handler_errors++;
pr_err("incorrect value in jprobe handler\n");
@@ -227,11 +242,19 @@ static int test_jprobes(void)
return 0;
}
+#else
+#define test_jprobe() (0)
+#define test_jprobes() (0)
+#endif
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
+ if (preemptible()) {
+ handler_errors++;
+ pr_err("kretprobe entry handler is preemptible\n");
+ }
krph_val = (rand1 / div_factor);
return 0;
}
@@ -240,6 +263,10 @@ static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
+ if (preemptible()) {
+ handler_errors++;
+ pr_err("kretprobe return handler is preemptible\n");
+ }
if (ret != (rand1 / div_factor)) {
handler_errors++;
pr_err("incorrect value in kretprobe handler\n");
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 88f75f92ef36..d32520840fde 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -758,9 +758,7 @@ void clock_was_set(void)
*/
void hrtimers_resume(void)
{
- WARN_ONCE(!irqs_disabled(),
- KERN_INFO "hrtimers_resume() called with IRQs enabled!");
-
+ lockdep_assert_irqs_disabled();
/* Retrigger on the local CPU */
retrigger_next_event(NULL);
/* And schedule a retrigger for all others */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 5b117110b55b..1f27887aa194 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -603,7 +603,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1034,7 +1034,7 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
@@ -1125,7 +1125,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
struct k_itimer *timer, *next;
unsigned long flags;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
/*
* The fast path checks that there are no expired thread or thread
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c7a899c5ce64..99578f06c8d4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -27,6 +27,7 @@
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
#include <linux/context_tracking.h>
+#include <linux/mm.h>
#include <asm/irq_regs.h>
@@ -165,7 +166,6 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
#ifdef CONFIG_NO_HZ_FULL
cpumask_var_t tick_nohz_full_mask;
-cpumask_var_t housekeeping_mask;
bool tick_nohz_full_running;
static atomic_t tick_dep_mask;
@@ -198,7 +198,7 @@ static bool check_tick_dependency(atomic_t *dep)
static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
{
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
if (unlikely(!cpu_online(cpu)))
return false;
@@ -385,20 +385,13 @@ out:
local_irq_restore(flags);
}
-/* Parse the boot-time nohz CPU list from the kernel parameters. */
-static int __init tick_nohz_full_setup(char *str)
+/* Get the boot-time nohz CPU list from the kernel parameters. */
+void __init tick_nohz_full_setup(cpumask_var_t cpumask)
{
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
- if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
- pr_warn("NO_HZ: Incorrect nohz_full cpumask\n");
- free_bootmem_cpumask_var(tick_nohz_full_mask);
- return 1;
- }
+ cpumask_copy(tick_nohz_full_mask, cpumask);
tick_nohz_full_running = true;
-
- return 1;
}
-__setup("nohz_full=", tick_nohz_full_setup);
static int tick_nohz_cpu_down(unsigned int cpu)
{
@@ -437,13 +430,6 @@ void __init tick_nohz_init(void)
return;
}
- if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
- WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
- cpumask_clear(tick_nohz_full_mask);
- tick_nohz_full_running = false;
- return;
- }
-
/*
* Full dynticks uses irq work to drive the tick rescheduling on safe
* locking contexts. But then we need irq work to raise its own
@@ -452,7 +438,6 @@ void __init tick_nohz_init(void)
if (!arch_irq_work_has_interrupt()) {
pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
cpumask_clear(tick_nohz_full_mask);
- cpumask_copy(housekeeping_mask, cpu_possible_mask);
tick_nohz_full_running = false;
return;
}
@@ -465,9 +450,6 @@ void __init tick_nohz_init(void)
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
- cpumask_andnot(housekeeping_mask,
- cpu_possible_mask, tick_nohz_full_mask);
-
for_each_cpu(cpu, tick_nohz_full_mask)
context_tracking_cpu_set(cpu);
@@ -477,12 +459,6 @@ void __init tick_nohz_init(void)
WARN_ON(ret < 0);
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
cpumask_pr_args(tick_nohz_full_mask));
-
- /*
- * We need at least one CPU to handle housekeeping work such
- * as timekeeping, unbound timers, workqueues, ...
- */
- WARN_ON_ONCE(cpumask_empty(housekeeping_mask));
}
#endif
@@ -787,6 +763,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
if (!ts->tick_stopped) {
calc_load_nohz_start();
cpu_load_update_nohz_start();
+ quiet_vmstat();
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
@@ -960,8 +937,7 @@ void tick_nohz_idle_enter(void)
{
struct tick_sched *ts;
- WARN_ON_ONCE(irqs_disabled());
-
+ lockdep_assert_irqs_enabled();
/*
* Update the idle state in the scheduler domain hierarchy
* when tick_nohz_stop_sched_tick() is called from the idle loop.
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 434c840e2d82..f54b7b6b4a4b 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -224,7 +224,7 @@ config HWLAT_TRACER
select GENERIC_TRACER
help
This tracer, when enabled will create one or more kernel threads,
- depening on what the cpumask file is set to, which each thread
+ depending on what the cpumask file is set to, which each thread
spinning in a loop looking for interruptions caused by
something other than the kernel. For example, if a
System Management Interrupt (SMI) takes a noticeable amount of
@@ -239,7 +239,7 @@ config HWLAT_TRACER
iteration
A kernel thread is created that will spin with interrupts disabled
- for "width" microseconds in every "widow" cycle. It will not spin
+ for "width" microseconds in every "window" cycle. It will not spin
for "window - width" microseconds, where the system can
continue to operate.
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 45a3928544ce..206e0e2ace53 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -66,7 +66,8 @@ static struct tracer_flags blk_tracer_flags = {
};
/* Global reference count of probes */
-static atomic_t blk_probes_ref = ATOMIC_INIT(0);
+static DEFINE_MUTEX(blk_probe_mutex);
+static int blk_probes_ref;
static void blk_register_tracepoints(void);
static void blk_unregister_tracepoints(void);
@@ -329,14 +330,29 @@ static void blk_trace_free(struct blk_trace *bt)
kfree(bt);
}
+static void get_probe_ref(void)
+{
+ mutex_lock(&blk_probe_mutex);
+ if (++blk_probes_ref == 1)
+ blk_register_tracepoints();
+ mutex_unlock(&blk_probe_mutex);
+}
+
+static void put_probe_ref(void)
+{
+ mutex_lock(&blk_probe_mutex);
+ if (!--blk_probes_ref)
+ blk_unregister_tracepoints();
+ mutex_unlock(&blk_probe_mutex);
+}
+
static void blk_trace_cleanup(struct blk_trace *bt)
{
blk_trace_free(bt);
- if (atomic_dec_and_test(&blk_probes_ref))
- blk_unregister_tracepoints();
+ put_probe_ref();
}
-int blk_trace_remove(struct request_queue *q)
+static int __blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;
@@ -349,6 +365,17 @@ int blk_trace_remove(struct request_queue *q)
return 0;
}
+
+int blk_trace_remove(struct request_queue *q)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_remove(q);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_remove);
static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
@@ -538,8 +565,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
if (cmpxchg(&q->blk_trace, NULL, bt))
goto err;
- if (atomic_inc_return(&blk_probes_ref) == 1)
- blk_register_tracepoints();
+ get_probe_ref();
ret = 0;
err:
@@ -550,9 +576,8 @@ err:
return ret;
}
-int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
- struct block_device *bdev,
- char __user *arg)
+static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev, char __user *arg)
{
struct blk_user_trace_setup buts;
int ret;
@@ -571,6 +596,19 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
}
return 0;
}
+
+int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev,
+ char __user *arg)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_setup(q, name, dev, bdev, arg);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_setup);
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
@@ -607,7 +645,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
}
#endif
-int blk_trace_startstop(struct request_queue *q, int start)
+static int __blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
struct blk_trace *bt = q->blk_trace;
@@ -646,6 +684,17 @@ int blk_trace_startstop(struct request_queue *q, int start)
return ret;
}
+
+int blk_trace_startstop(struct request_queue *q, int start)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_startstop(q, start);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_startstop);
/*
@@ -676,7 +725,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
switch (cmd) {
case BLKTRACESETUP:
bdevname(bdev, b);
- ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
+ ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
break;
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
case BLKTRACESETUP32:
@@ -687,10 +736,10 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
case BLKTRACESTART:
start = 1;
case BLKTRACESTOP:
- ret = blk_trace_startstop(q, start);
+ ret = __blk_trace_startstop(q, start);
break;
case BLKTRACETEARDOWN:
- ret = blk_trace_remove(q);
+ ret = __blk_trace_remove(q);
break;
default:
ret = -ENOTTY;
@@ -708,10 +757,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
**/
void blk_trace_shutdown(struct request_queue *q)
{
+ mutex_lock(&q->blk_trace_mutex);
+
if (q->blk_trace) {
- blk_trace_startstop(q, 0);
- blk_trace_remove(q);
+ __blk_trace_startstop(q, 0);
+ __blk_trace_remove(q);
}
+
+ mutex_unlock(&q->blk_trace_mutex);
}
#ifdef CONFIG_BLK_CGROUP
@@ -1558,9 +1611,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
if (bt == NULL)
return -EINVAL;
- if (atomic_dec_and_test(&blk_probes_ref))
- blk_unregister_tracepoints();
-
+ put_probe_ref();
blk_trace_free(bt);
return 0;
}
@@ -1591,8 +1642,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
if (cmpxchg(&q->blk_trace, NULL, bt))
goto free_bt;
- if (atomic_inc_return(&blk_probes_ref) == 1)
- blk_register_tracepoints();
+ get_probe_ref();
return 0;
free_bt:
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 506efe6e8ed9..a5580c670866 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -78,12 +78,16 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
- int ret;
+ int ret = 0;
+
+ if (unlikely(size == 0))
+ goto out;
ret = probe_kernel_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
+ out:
return ret;
}
@@ -92,7 +96,7 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
- .arg2_type = ARG_CONST_SIZE,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_ANYTHING,
};
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 81279c6602ff..d57fede84b38 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -13,7 +13,6 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kthread.h> /* for self test */
-#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
@@ -2055,7 +2054,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
}
event = __rb_page_index(tail_page, tail);
- kmemcheck_annotate_bitfield(event, bitfield);
/* account for padding bytes */
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
@@ -2686,7 +2684,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
/* We reserved something on the buffer */
event = __rb_page_index(tail_page, tail);
- kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(cpu_buffer, event, info);
local_inc(&tail_page->entries);
@@ -2724,7 +2721,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* if it happened, we have to fail the write.
*/
barrier();
- if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
+ if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
local_dec(&cpu_buffer->committing);
local_dec(&cpu_buffer->commits);
return NULL;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 401b0639116f..6b0b343a36a2 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1460,7 +1460,7 @@ extern struct trace_event_file *find_event_file(struct trace_array *tr,
static inline void *event_file_data(struct file *filp)
{
- return ACCESS_ONCE(file_inode(filp)->i_private);
+ return READ_ONCE(file_inode(filp)->i_private);
}
extern struct mutex event_mutex;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index c738e764e2a5..90db994ac900 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -921,8 +921,8 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
trace_assign_type(field, iter->ent);
- T = __task_state_to_char(field->next_state);
- S = __task_state_to_char(field->prev_state);
+ T = task_index_to_char(field->next_state);
+ S = task_index_to_char(field->prev_state);
trace_find_cmdline(field->next_pid, comm);
trace_seq_printf(&iter->seq,
" %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
@@ -957,8 +957,8 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent);
if (!S)
- S = __task_state_to_char(field->prev_state);
- T = __task_state_to_char(field->next_state);
+ S = task_index_to_char(field->prev_state);
+ T = task_index_to_char(field->next_state);
trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
field->prev_pid,
field->prev_prio,
@@ -993,8 +993,8 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
trace_assign_type(field, iter->ent);
if (!S)
- S = __task_state_to_char(field->prev_state);
- T = __task_state_to_char(field->next_state);
+ S = task_index_to_char(field->prev_state);
+ T = task_index_to_char(field->next_state);
SEQ_PUT_HEX_FIELD(s, field->prev_pid);
SEQ_PUT_HEX_FIELD(s, field->prev_prio);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 7d461dcd4831..a86b303e6c67 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -398,10 +398,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio;
- entry->prev_state = __get_task_state(prev);
+ entry->prev_state = task_state_index(prev);
entry->next_pid = next->pid;
entry->next_prio = next->prio;
- entry->next_state = __get_task_state(next);
+ entry->next_state = task_state_index(next);
entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event))
@@ -426,10 +426,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio;
- entry->prev_state = __get_task_state(curr);
+ entry->prev_state = task_state_index(curr);
entry->next_pid = wakee->pid;
entry->next_prio = wakee->prio;
- entry->next_state = __get_task_state(wakee);
+ entry->next_state = task_state_index(wakee);
entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event))
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 719a52a4064a..734accc02418 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -78,7 +78,7 @@ check_stack(unsigned long ip, unsigned long *stack)
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
static int tracer_frame;
- int frame_size = ACCESS_ONCE(tracer_frame);
+ int frame_size = READ_ONCE(tracer_frame);
int i, x;
this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index c490f1e4313b..d32b45662fb6 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -894,7 +894,7 @@ static bool new_idmap_permitted(const struct file *file,
int proc_setgroups_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
- unsigned long userns_flags = ACCESS_ONCE(ns->flags);
+ unsigned long userns_flags = READ_ONCE(ns->flags);
seq_printf(seq, "%s\n",
(userns_flags & USERNS_SETGROUPS_ALLOWED) ?
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index c8e06703e44c..576d18045811 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -25,6 +25,7 @@
#include <linux/workqueue.h>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
+#include <linux/sched/isolation.h>
#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
@@ -774,15 +775,11 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
void __init lockup_detector_init(void)
{
-#ifdef CONFIG_NO_HZ_FULL
- if (tick_nohz_full_enabled()) {
+ if (tick_nohz_full_enabled())
pr_info("Disabling watchdog on nohz_full cores by default\n");
- cpumask_copy(&watchdog_cpumask, housekeeping_mask);
- } else
- cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
-#else
- cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
-#endif
+
+ cpumask_copy(&watchdog_cpumask,
+ housekeeping_cpumask(HK_FLAG_TIMER));
if (!watchdog_nmi_probe())
nmi_watchdog_available = true;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3b67c0a0df16..dde6298f6b22 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1376,7 +1376,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
debug_work_activate(work);
@@ -2490,15 +2490,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
- /*
- * Explicitly init the crosslock for wq_barrier::done, make its lock
- * key a subkey of the corresponding work. As a result we won't
- * build a dependency between wq_barrier::done and unrelated work.
- */
- lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
- "(complete)wq_barr::done",
- target->lockdep_map.key, 1);
- __init_completion(&barr->done);
+ init_completion_map(&barr->done, &target->lockdep_map);
+
barr->task = current;
/*
@@ -2604,16 +2597,13 @@ void flush_workqueue(struct workqueue_struct *wq)
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
- .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
+ .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
};
int next_color;
if (WARN_ON(!wq_online))
return;
- lock_map_acquire(&wq->lockdep_map);
- lock_map_release(&wq->lockdep_map);
-
mutex_lock(&wq->mutex);
/*
@@ -2876,9 +2866,6 @@ bool flush_work(struct work_struct *work)
if (WARN_ON(!wq_online))
return false;
- lock_map_acquire(&work->lockdep_map);
- lock_map_release(&work->lockdep_map);
-
if (start_flush_work(work, &barr)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
@@ -4637,7 +4624,7 @@ static void rebind_workers(struct worker_pool *pool)
* concurrency management. Note that when or whether
* @worker clears REBOUND doesn't affect correctness.
*
- * ACCESS_ONCE() is necessary because @worker->flags may be
+ * WRITE_ONCE() is necessary because @worker->flags may be
* tested without holding any lock in
* wq_worker_waking_up(). Without it, NOT_RUNNING test may
* fail incorrectly leading to premature concurrency
@@ -4646,7 +4633,7 @@ static void rebind_workers(struct worker_pool *pool)
WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
worker_flags |= WORKER_REBOUND;
worker_flags &= ~WORKER_UNBOUND;
- ACCESS_ONCE(worker->flags) = worker_flags;
+ WRITE_ONCE(worker->flags, worker_flags);
}
spin_unlock_irq(&pool->lock);
@@ -5003,9 +4990,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
*
* Unbound workqueues have the following extra attributes.
*
- * id RO int : the associated pool ID
+ * pool_ids RO int : the associated pool IDs for each node
* nice RW int : nice value of the workers
* cpumask RW mask : bitmask of allowed CPUs for the workers
+ * numa RW bool : whether enable NUMA affinity
*/
struct wq_device {
struct workqueue_struct *wq;
diff --git a/lib/Kconfig b/lib/Kconfig
index b1445b22a6de..a2b6745324ab 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -587,3 +587,21 @@ config STRING_SELFTEST
bool "Test string functions"
endmenu
+
+config GENERIC_ASHLDI3
+ bool
+
+config GENERIC_ASHRDI3
+ bool
+
+config GENERIC_LSHRDI3
+ bool
+
+config GENERIC_MULDI3
+ bool
+
+config GENERIC_CMPDI2
+ bool
+
+config GENERIC_UCMPDI2
+ bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dfdad67d8f6c..8ffd891857ab 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -280,7 +280,6 @@ config PAGE_OWNER
config DEBUG_FS
bool "Debug Filesystem"
- select SRCU
help
debugfs is a virtual file system that kernel developers use to put
debugging files into. Enable this option to be able to read and
@@ -376,7 +375,7 @@ config STACK_VALIDATION
that runtime stack traces are more reliable.
This is also a prerequisite for generation of ORC unwind data, which
- is needed for CONFIG_ORC_UNWINDER.
+ is needed for CONFIG_UNWINDER_ORC.
For more information, see
tools/objtool/Documentation/stack-validation.txt.
@@ -504,7 +503,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
config DEBUG_SLAB
bool "Debug slab memory allocations"
- depends on DEBUG_KERNEL && SLAB && !KMEMCHECK
+ depends on DEBUG_KERNEL && SLAB
help
Say Y here to have the kernel do limited verification on memory
allocation as well as poisoning memory on free to catch use of freed
@@ -516,7 +515,7 @@ config DEBUG_SLAB_LEAK
config SLUB_DEBUG_ON
bool "SLUB debugging on by default"
- depends on SLUB && SLUB_DEBUG && !KMEMCHECK
+ depends on SLUB && SLUB_DEBUG
default n
help
Boot with debugging on by default. SLUB boots by default with
@@ -730,8 +729,6 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N".
-source "lib/Kconfig.kmemcheck"
-
source "lib/Kconfig.kasan"
endmenu # "Memory Debugging"
@@ -1092,8 +1089,8 @@ config PROVE_LOCKING
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
- select LOCKDEP_CROSSRELEASE if BROKEN
- select LOCKDEP_COMPLETIONS if BROKEN
+ select LOCKDEP_CROSSRELEASE
+ select LOCKDEP_COMPLETIONS
select TRACE_IRQFLAGS
default n
help
@@ -1179,6 +1176,21 @@ config LOCKDEP_COMPLETIONS
A deadlock caused by wait_for_completion() and complete() can be
detected by lockdep using crossrelease feature.
+config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
+ bool "Enable the boot parameter, crossrelease_fullstack"
+ depends on LOCKDEP_CROSSRELEASE
+ default n
+ help
+ The lockdep "cross-release" feature needs to record stack traces
+ (of calling functions) for all acquisitions, for eventual later
+ use during analysis. By default only a single caller is recorded,
+ because the unwind operation can be very expensive with deeper
+ stack chains.
+
+ However a boot parameter, crossrelease_fullstack, was
+ introduced since sometimes deeper traces are required for full
+ analysis. This option turns on the boot parameter.
+
config DEBUG_LOCKDEP
bool "Lock dependency engine debugging"
depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck
deleted file mode 100644
index 846e039a86b4..000000000000
--- a/lib/Kconfig.kmemcheck
+++ /dev/null
@@ -1,94 +0,0 @@
-config HAVE_ARCH_KMEMCHECK
- bool
-
-if HAVE_ARCH_KMEMCHECK
-
-menuconfig KMEMCHECK
- bool "kmemcheck: trap use of uninitialized memory"
- depends on DEBUG_KERNEL
- depends on !X86_USE_3DNOW
- depends on SLUB || SLAB
- depends on !CC_OPTIMIZE_FOR_SIZE
- depends on !FUNCTION_TRACER
- select FRAME_POINTER
- select STACKTRACE
- default n
- help
- This option enables tracing of dynamically allocated kernel memory
- to see if memory is used before it has been given an initial value.
- Be aware that this requires half of your memory for bookkeeping and
- will insert extra code at *every* read and write to tracked memory
- thus slow down the kernel code (but user code is unaffected).
-
- The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable
- or enable kmemcheck at boot-time. If the kernel is started with
- kmemcheck=0, the large memory and CPU overhead is not incurred.
-
-choice
- prompt "kmemcheck: default mode at boot"
- depends on KMEMCHECK
- default KMEMCHECK_ONESHOT_BY_DEFAULT
- help
- This option controls the default behaviour of kmemcheck when the
- kernel boots and no kmemcheck= parameter is given.
-
-config KMEMCHECK_DISABLED_BY_DEFAULT
- bool "disabled"
- depends on KMEMCHECK
-
-config KMEMCHECK_ENABLED_BY_DEFAULT
- bool "enabled"
- depends on KMEMCHECK
-
-config KMEMCHECK_ONESHOT_BY_DEFAULT
- bool "one-shot"
- depends on KMEMCHECK
- help
- In one-shot mode, only the first error detected is reported before
- kmemcheck is disabled.
-
-endchoice
-
-config KMEMCHECK_QUEUE_SIZE
- int "kmemcheck: error queue size"
- depends on KMEMCHECK
- default 64
- help
- Select the maximum number of errors to store in the queue. Since
- errors can occur virtually anywhere and in any context, we need a
- temporary storage area which is guarantueed not to generate any
- other faults. The queue will be emptied as soon as a tasklet may
- be scheduled. If the queue is full, new error reports will be
- lost.
-
-config KMEMCHECK_SHADOW_COPY_SHIFT
- int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)"
- depends on KMEMCHECK
- range 2 8
- default 5
- help
- Select the number of shadow bytes to save along with each entry of
- the queue. These bytes indicate what parts of an allocation are
- initialized, uninitialized, etc. and will be displayed when an
- error is detected to help the debugging of a particular problem.
-
-config KMEMCHECK_PARTIAL_OK
- bool "kmemcheck: allow partially uninitialized memory"
- depends on KMEMCHECK
- default y
- help
- This option works around certain GCC optimizations that produce
- 32-bit reads from 16-bit variables where the upper 16 bits are
- thrown away afterwards. This may of course also hide some real
- bugs.
-
-config KMEMCHECK_BITOPS_OK
- bool "kmemcheck: allow bit-field manipulation"
- depends on KMEMCHECK
- default n
- help
- This option silences warnings that would be generated for bit-field
- accesses where not all the bits are initialized at the same time.
- This may also hide some real bugs.
-
-endif
diff --git a/lib/Makefile b/lib/Makefile
index b8f2c16fccaa..136a0b254564 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -248,3 +248,11 @@ UBSAN_SANITIZE_ubsan.o := n
obj-$(CONFIG_SBITMAP) += sbitmap.o
obj-$(CONFIG_PARMAN) += parman.o
+
+# GCC library routines
+obj-$(CONFIG_GENERIC_ASHLDI3) += ashldi3.o
+obj-$(CONFIG_GENERIC_ASHRDI3) += ashrdi3.o
+obj-$(CONFIG_GENERIC_LSHRDI3) += lshrdi3.o
+obj-$(CONFIG_GENERIC_MULDI3) += muldi3.o
+obj-$(CONFIG_GENERIC_CMPDI2) += cmpdi2.o
+obj-$(CONFIG_GENERIC_UCMPDI2) += ucmpdi2.o
diff --git a/lib/ashldi3.c b/lib/ashldi3.c
new file mode 100644
index 000000000000..1b6087db95a5
--- /dev/null
+++ b/lib/ashldi3.c
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/export.h>
+
+#include <lib/libgcc.h>
+
+long long notrace __ashldi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.low = 0;
+ w.s.high = (unsigned int) uu.s.low << -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.low >> bm;
+
+ w.s.low = (unsigned int) uu.s.low << b;
+ w.s.high = ((unsigned int) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__ashldi3);
diff --git a/lib/ashrdi3.c b/lib/ashrdi3.c
new file mode 100644
index 000000000000..2e67c97ac65a
--- /dev/null
+++ b/lib/ashrdi3.c
@@ -0,0 +1,46 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/export.h>
+
+#include <lib/libgcc.h>
+
+long long notrace __ashrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high =
+ uu.s.high >> 31;
+ w.s.low = uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__ashrdi3);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 4e53be8bc590..b77d51da8c73 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -39,7 +39,7 @@ begin_node:
/* Descend through a shortcut */
shortcut = assoc_array_ptr_to_shortcut(cursor);
smp_read_barrier_depends();
- cursor = ACCESS_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node);
}
node = assoc_array_ptr_to_node(cursor);
@@ -55,7 +55,7 @@ begin_node:
*/
has_meta = 0;
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
has_meta |= (unsigned long)ptr;
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
@@ -89,7 +89,7 @@ continue_node:
smp_read_barrier_depends();
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
if (assoc_array_ptr_is_meta(ptr)) {
cursor = ptr;
goto begin_node;
@@ -98,7 +98,7 @@ continue_node:
finished_node:
/* Move up to the parent (may need to skip back over a shortcut) */
- parent = ACCESS_ONCE(node->back_pointer);
+ parent = READ_ONCE(node->back_pointer);
slot = node->parent_slot;
if (parent == stop)
return 0;
@@ -107,7 +107,7 @@ finished_node:
shortcut = assoc_array_ptr_to_shortcut(parent);
smp_read_barrier_depends();
cursor = parent;
- parent = ACCESS_ONCE(shortcut->back_pointer);
+ parent = READ_ONCE(shortcut->back_pointer);
slot = shortcut->parent_slot;
if (parent == stop)
return 0;
@@ -147,7 +147,7 @@ int assoc_array_iterate(const struct assoc_array *array,
void *iterator_data),
void *iterator_data)
{
- struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
+ struct assoc_array_ptr *root = READ_ONCE(array->root);
if (!root)
return 0;
@@ -194,7 +194,7 @@ assoc_array_walk(const struct assoc_array *array,
pr_devel("-->%s()\n", __func__);
- cursor = ACCESS_ONCE(array->root);
+ cursor = READ_ONCE(array->root);
if (!cursor)
return assoc_array_walk_tree_empty;
@@ -220,7 +220,7 @@ consider_node:
slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
slot &= ASSOC_ARRAY_FAN_MASK;
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
pr_devel("consider slot %x [ix=%d type=%lu]\n",
slot, level, (unsigned long)ptr & 3);
@@ -294,7 +294,7 @@ follow_shortcut:
} while (sc_level < shortcut->skip_to_level);
/* The shortcut matches the leaf's index to this point. */
- cursor = ACCESS_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node);
if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
level = sc_level;
goto jumped;
@@ -337,7 +337,7 @@ void *assoc_array_find(const struct assoc_array *array,
* the terminal node.
*/
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = ACCESS_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]);
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
* and dereferencing the pointer - but only if we are
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c82c61b66e16..d8f0c094b18e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -18,7 +18,9 @@
#include <asm/page.h>
-/*
+/**
+ * DOC: bitmap introduction
+ *
* bitmaps provide an array of bits, implemented using an an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
diff --git a/lib/cmpdi2.c b/lib/cmpdi2.c
new file mode 100644
index 000000000000..6d7ebf6c2b86
--- /dev/null
+++ b/lib/cmpdi2.c
@@ -0,0 +1,42 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/export.h>
+
+#include <lib/libgcc.h>
+
+word_type notrace __cmpdi2(long long a, long long b)
+{
+ const DWunion au = {
+ .ll = a
+ };
+ const DWunion bu = {
+ .ll = b
+ };
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+
+ if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+ return 0;
+ else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+ return 2;
+
+ return 1;
+}
+EXPORT_SYMBOL(__cmpdi2);
diff --git a/lib/crc32.c b/lib/crc32.c
index 6ddc92bc1460..2ef20fe84b69 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -225,7 +225,7 @@ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
}
/**
- * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
+ * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time
* @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
* @len: The number of bytes. @crc is multiplied by x^(8*@len)
* @polynomial: The modulus used to reduce the result to 32 bits.
diff --git a/lib/crc4.c b/lib/crc4.c
index cf6db46661be..164ed9444cd3 100644
--- a/lib/crc4.c
+++ b/lib/crc4.c
@@ -15,7 +15,7 @@ static const uint8_t crc4_tab[] = {
/**
* crc4 - calculate the 4-bit crc of a value.
- * @crc: starting crc4
+ * @c: starting crc4
* @x: value to checksum
* @bits: number of bits in @x to checksum
*
diff --git a/lib/crc8.c b/lib/crc8.c
index 87b59cafdb83..595a5a75e3cd 100644
--- a/lib/crc8.c
+++ b/lib/crc8.c
@@ -20,11 +20,11 @@
#include <linux/crc8.h>
#include <linux/printk.h>
-/*
+/**
* crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
*
- * table: table to be filled.
- * polynomial: polynomial for which table is to be filled.
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
*/
void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
{
@@ -42,11 +42,11 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
}
EXPORT_SYMBOL(crc8_populate_msb);
-/*
+/**
* crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
*
- * table: table to be filled.
- * polynomial: polynomial for which table is to be filled.
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
*/
void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
{
@@ -63,13 +63,13 @@ void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
}
EXPORT_SYMBOL(crc8_populate_lsb);
-/*
+/**
* crc8 - calculate a crc8 over the given input data.
*
- * table: crc table used for calculation.
- * pdata: pointer to data buffer.
- * nbytes: number of bytes in data buffer.
- * crc: previous returned crc8 value.
+ * @table: crc table used for calculation.
+ * @pdata: pointer to data buffer.
+ * @nbytes: number of bytes in data buffer.
+ * @crc: previous returned crc8 value.
*/
u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc)
{
diff --git a/lib/div64.c b/lib/div64.c
index 58e2a404097e..01c8602bb6ff 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -61,6 +61,12 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
#endif
+/**
+ * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ * @remainder: 64bit remainder
+ */
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index 8dbfdf6445f8..e659a027036e 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -21,7 +21,7 @@ void dql_completed(struct dql *dql, unsigned int count)
unsigned int ovlimit, completed, num_queued;
bool all_prev_completed;
- num_queued = ACCESS_ONCE(dql->num_queued);
+ num_queued = READ_ONCE(dql->num_queued);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
diff --git a/lib/gcd.c b/lib/gcd.c
index 135ee6407a5e..227dea924425 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -13,6 +13,12 @@
#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS)
/* If __ffs is available, the even/odd algorithm benchmarks slower. */
+
+/**
+ * gcd - calculate and return the greatest common divisor of 2 unsigned longs
+ * @a: first value
+ * @b: second value
+ */
unsigned long gcd(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
diff --git a/lib/idr.c b/lib/idr.c
index edd9b2be1651..2593ce513a18 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -171,7 +171,7 @@ void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id)
if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
return ERR_PTR(-ENOENT);
- __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL);
+ __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL);
return entry;
}
diff --git a/lib/llist.c b/lib/llist.c
index ae5872b1df0c..7062e931a7bb 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -41,7 +41,7 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
struct llist_node *first;
do {
- new_last->next = first = ACCESS_ONCE(head->first);
+ new_last->next = first = READ_ONCE(head->first);
} while (cmpxchg(&head->first, first, new_first) != first);
return !first;
diff --git a/lib/lshrdi3.c b/lib/lshrdi3.c
new file mode 100644
index 000000000000..8e845f4bb65f
--- /dev/null
+++ b/lib/lshrdi3.c
@@ -0,0 +1,45 @@
+/*
+ * lib/lshrdi3.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/module.h>
+#include <lib/libgcc.h>
+
+long long notrace __lshrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.high = 0;
+ w.s.low = (unsigned int) uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = (unsigned int) uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__lshrdi3);
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index e24388a863a7..468fb7cd1221 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -26,6 +26,7 @@
* however I decided to publish this code under the plain GPL.
*/
+#include <linux/sched.h>
#include <linux/string.h>
#include "mpi-internal.h"
#include "longlong.h"
@@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
}
e <<= 1;
c--;
+ cond_resched();
}
i--;
diff --git a/lib/muldi3.c b/lib/muldi3.c
new file mode 100644
index 000000000000..88938543e10a
--- /dev/null
+++ b/lib/muldi3.c
@@ -0,0 +1,72 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/export.h>
+#include <lib/libgcc.h>
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C. */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ unsigned long __x0, __x1, __x2, __x3; \
+ unsigned short __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart(u); \
+ __uh = __ll_highpart(u); \
+ __vl = __ll_lowpart(v); \
+ __vh = __ll_highpart(v); \
+ \
+ __x0 = (unsigned long) __ul * __vl; \
+ __x1 = (unsigned long) __ul * __vh; \
+ __x2 = (unsigned long) __uh * __vl; \
+ __x3 = (unsigned long) __uh * __vh; \
+ \
+ __x1 += __ll_highpart(__x0); /* this can't give carry */\
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos */ \
+ \
+ (w1) = __x3 + __ll_highpart(__x1); \
+ (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+ } while (0)
+#endif
+
+#if !defined(__umulsidi3)
+#define __umulsidi3(u, v) ({ \
+ DWunion __w; \
+ umul_ppmm(__w.s.high, __w.s.low, u, v); \
+ __w.ll; \
+ })
+#endif
+
+long long notrace __muldi3(long long u, long long v)
+{
+ const DWunion uu = {.ll = u};
+ const DWunion vv = {.ll = v};
+ DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
+
+ w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high
+ + (unsigned long) uu.s.high * (unsigned long) vv.s.low);
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__muldi3);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8b1feca1230a..c8d55565fafa 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -677,8 +677,7 @@ out:
* @root radix tree root
*/
static inline bool radix_tree_shrink(struct radix_tree_root *root,
- radix_tree_update_node_t update_node,
- void *private)
+ radix_tree_update_node_t update_node)
{
bool shrunk = false;
@@ -739,7 +738,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
if (!radix_tree_is_internal_node(child)) {
node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
if (update_node)
- update_node(node, private);
+ update_node(node);
}
WARN_ON_ONCE(!list_empty(&node->private_list));
@@ -752,7 +751,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
static bool delete_node(struct radix_tree_root *root,
struct radix_tree_node *node,
- radix_tree_update_node_t update_node, void *private)
+ radix_tree_update_node_t update_node)
{
bool deleted = false;
@@ -762,8 +761,8 @@ static bool delete_node(struct radix_tree_root *root,
if (node->count) {
if (node_to_entry(node) ==
rcu_dereference_raw(root->rnode))
- deleted |= radix_tree_shrink(root, update_node,
- private);
+ deleted |= radix_tree_shrink(root,
+ update_node);
return deleted;
}
@@ -1173,7 +1172,6 @@ static int calculate_count(struct radix_tree_root *root,
* @slot: pointer to slot in @node
* @item: new item to store in the slot.
* @update_node: callback for changing leaf nodes
- * @private: private data to pass to @update_node
*
* For use with __radix_tree_lookup(). Caller must hold tree write locked
* across slot lookup and replacement.
@@ -1181,7 +1179,7 @@ static int calculate_count(struct radix_tree_root *root,
void __radix_tree_replace(struct radix_tree_root *root,
struct radix_tree_node *node,
void __rcu **slot, void *item,
- radix_tree_update_node_t update_node, void *private)
+ radix_tree_update_node_t update_node)
{
void *old = rcu_dereference_raw(*slot);
int exceptional = !!radix_tree_exceptional_entry(item) -
@@ -1201,9 +1199,9 @@ void __radix_tree_replace(struct radix_tree_root *root,
return;
if (update_node)
- update_node(node, private);
+ update_node(node);
- delete_node(root, node, update_node, private);
+ delete_node(root, node, update_node);
}
/**
@@ -1225,7 +1223,7 @@ void __radix_tree_replace(struct radix_tree_root *root,
void radix_tree_replace_slot(struct radix_tree_root *root,
void __rcu **slot, void *item)
{
- __radix_tree_replace(root, NULL, slot, item, NULL, NULL);
+ __radix_tree_replace(root, NULL, slot, item, NULL);
}
EXPORT_SYMBOL(radix_tree_replace_slot);
@@ -1242,7 +1240,7 @@ void radix_tree_iter_replace(struct radix_tree_root *root,
const struct radix_tree_iter *iter,
void __rcu **slot, void *item)
{
- __radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
+ __radix_tree_replace(root, iter->node, slot, item, NULL);
}
#ifdef CONFIG_RADIX_TREE_MULTIORDER
@@ -1972,7 +1970,6 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
* @root: radix tree root
* @node: node containing @index
* @update_node: callback for changing leaf nodes
- * @private: private data to pass to @update_node
*
* After clearing the slot at @index in @node from radix tree
* rooted at @root, call this function to attempt freeing the
@@ -1980,10 +1977,9 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
*/
void __radix_tree_delete_node(struct radix_tree_root *root,
struct radix_tree_node *node,
- radix_tree_update_node_t update_node,
- void *private)
+ radix_tree_update_node_t update_node)
{
- delete_node(root, node, update_node, private);
+ delete_node(root, node, update_node);
}
static bool __radix_tree_delete(struct radix_tree_root *root,
@@ -2001,7 +1997,7 @@ static bool __radix_tree_delete(struct radix_tree_root *root,
node_tag_clear(root, node, tag, offset);
replace_slot(slot, NULL, node, -1, exceptional);
- return node && delete_node(root, node, NULL, NULL);
+ return node && delete_node(root, node, NULL);
}
/**
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index be7b4dd6b68d..7c1c55f7daaa 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -370,41 +370,49 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
EXPORT_SYMBOL(sg_alloc_table);
/**
- * sg_alloc_table_from_pages - Allocate and initialize an sg table from
- * an array of pages
- * @sgt: The sg table header to use
- * @pages: Pointer to an array of page pointers
- * @n_pages: Number of pages in the pages array
- * @offset: Offset from start of the first page to the start of a buffer
- * @size: Number of valid bytes in the buffer (after offset)
- * @gfp_mask: GFP allocation mask
+ * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
+ * an array of pages
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
+ * @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table from a list of pages. Contiguous
- * ranges of the pages are squashed into a single scatterlist node. A user
- * may provide an offset at a start and a size of valid data in a buffer
- * specified by the page array. The returned sg table is released by
- * sg_free_table.
+ * ranges of the pages are squashed into a single scatterlist node up to the
+ * maximum size specified in @max_segment. An user may provide an offset at a
+ * start and a size of valid data in a buffer specified by the page array.
+ * The returned sg table is released by sg_free_table.
*
* Returns:
* 0 on success, negative error on failure
*/
-int sg_alloc_table_from_pages(struct sg_table *sgt,
- struct page **pages, unsigned int n_pages,
- unsigned long offset, unsigned long size,
- gfp_t gfp_mask)
+int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ gfp_t gfp_mask)
{
- unsigned int chunks;
- unsigned int i;
- unsigned int cur_page;
+ unsigned int chunks, cur_page, seg_len, i;
int ret;
struct scatterlist *s;
+ if (WARN_ON(!max_segment || offset_in_page(max_segment)))
+ return -EINVAL;
+
/* compute number of contiguous chunks */
chunks = 1;
- for (i = 1; i < n_pages; ++i)
- if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
- ++chunks;
+ seg_len = 0;
+ for (i = 1; i < n_pages; i++) {
+ seg_len += PAGE_SIZE;
+ if (seg_len >= max_segment ||
+ page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+ chunks++;
+ seg_len = 0;
+ }
+ }
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -413,17 +421,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
- unsigned long chunk_size;
- unsigned int j;
+ unsigned int j, chunk_size;
/* look for the end of the current chunk */
- for (j = cur_page + 1; j < n_pages; ++j)
- if (page_to_pfn(pages[j]) !=
+ seg_len = 0;
+ for (j = cur_page + 1; j < n_pages; j++) {
+ seg_len += PAGE_SIZE;
+ if (seg_len >= max_segment ||
+ page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
+ }
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
- sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+ sg_set_page(s, pages[cur_page],
+ min_t(unsigned long, size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
@@ -431,6 +443,35 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
return 0;
}
+EXPORT_SYMBOL(__sg_alloc_table_from_pages);
+
+/**
+ * sg_alloc_table_from_pages - Allocate and initialize an sg table from
+ * an array of pages
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table from a list of pages. Contiguous
+ * ranges of the pages are squashed into a single scatterlist node. A user
+ * may provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array. The returned sg table is released by
+ * sg_free_table.
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ */
+int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
+ SCATTERLIST_MAX_SEGMENT, gfp_mask);
+}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
void __sg_page_iter_start(struct sg_page_iter *piter,
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 8c6c83ef57a4..cea19aaf303c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
- if (sme_active())
- pr_warn_once("SME is active and system is using DMA bounce buffers\n");
+ if (mem_encrypt_active())
+ pr_warn_once("%s is active and system is using DMA bounce buffers\n",
+ sme_active() ? "SME" : "SEV");
mask = dma_get_seg_boundary(hwdev);
diff --git a/lib/ucmpdi2.c b/lib/ucmpdi2.c
new file mode 100644
index 000000000000..49a53505c8e3
--- /dev/null
+++ b/lib/ucmpdi2.c
@@ -0,0 +1,35 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/module.h>
+#include <lib/libgcc.h>
+
+word_type __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+ const DWunion au = {.ll = a};
+ const DWunion bu = {.ll = b};
+
+ if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
+ return 0;
+ else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
+ return 2;
+ if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+ return 0;
+ else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+ return 2;
+ return 1;
+}
+EXPORT_SYMBOL(__ucmpdi2);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 86c3385b9eb3..1746bae94d41 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -620,8 +620,8 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
rcu_read_lock();
for (i = 0; i < depth; i++, d = p) {
- p = ACCESS_ONCE(d->d_parent);
- array[i] = ACCESS_ONCE(d->d_name.name);
+ p = READ_ONCE(d->d_parent);
+ array[i] = READ_ONCE(d->d_name.name);
if (p == d) {
if (i)
array[i] = "";
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
index ac809b1e64f7..bd1d182419d7 100644
--- a/lib/xz/xz_dec_stream.c
+++ b/lib/xz/xz_dec_stream.c
@@ -583,6 +583,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
if (ret != XZ_OK)
return ret;
+ /* Fall through */
+
case SEQ_BLOCK_START:
/* We need one byte of input to continue. */
if (b->in_pos == b->in_size)
@@ -606,6 +608,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->temp.pos = 0;
s->sequence = SEQ_BLOCK_HEADER;
+ /* Fall through */
+
case SEQ_BLOCK_HEADER:
if (!fill_temp(s, b))
return XZ_OK;
@@ -616,6 +620,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_UNCOMPRESS;
+ /* Fall through */
+
case SEQ_BLOCK_UNCOMPRESS:
ret = dec_block(s, b);
if (ret != XZ_STREAM_END)
@@ -623,6 +629,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_PADDING;
+ /* Fall through */
+
case SEQ_BLOCK_PADDING:
/*
* Size of Compressed Data + Block Padding
@@ -643,6 +651,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_BLOCK_CHECK;
+ /* Fall through */
+
case SEQ_BLOCK_CHECK:
if (s->check_type == XZ_CHECK_CRC32) {
ret = crc32_validate(s, b);
@@ -665,6 +675,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_INDEX_PADDING;
+ /* Fall through */
+
case SEQ_INDEX_PADDING:
while ((s->index.size + (b->in_pos - s->in_start))
& 3) {
@@ -687,6 +699,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->sequence = SEQ_INDEX_CRC32;
+ /* Fall through */
+
case SEQ_INDEX_CRC32:
ret = crc32_validate(s, b);
if (ret != XZ_STREAM_END)
@@ -695,6 +709,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
s->temp.size = STREAM_HEADER_SIZE;
s->sequence = SEQ_STREAM_FOOTER;
+ /* Fall through */
+
case SEQ_STREAM_FOOTER:
if (!fill_temp(s, b))
return XZ_OK;
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 5b0adf1435de..e5e606ee5f71 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -11,7 +11,6 @@ config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
- depends on !KMEMCHECK
select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
diff --git a/mm/Makefile b/mm/Makefile
index 4659b93cba43..e7ebd176fb93 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -17,7 +17,6 @@ KCOV_INSTRUMENT_slub.o := n
KCOV_INSTRUMENT_page_alloc.o := n
KCOV_INSTRUMENT_debug-pagealloc.o := n
KCOV_INSTRUMENT_kmemleak.o := n
-KCOV_INSTRUMENT_kmemcheck.o := n
KCOV_INSTRUMENT_memcontrol.o := n
KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n
@@ -70,7 +69,6 @@ obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += page_poison.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
-obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
obj-$(CONFIG_KASAN) += kasan/
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index e19606bb41a0..74b52dfd5852 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -1072,23 +1072,3 @@ out:
return ret;
}
EXPORT_SYMBOL(wait_iff_congested);
-
-int pdflush_proc_obsolete(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- char kbuf[] = "0\n";
-
- if (*ppos || *lenp < sizeof(kbuf)) {
- *lenp = 0;
- return 0;
- }
-
- if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
- return -EFAULT;
- pr_warn_once("%s exported in /proc is scheduled for removal\n",
- table->procname);
-
- *lenp = 2;
- *ppos += *lenp;
- return 2;
-}
diff --git a/mm/cma.c b/mm/cma.c
index 022e52bd8370..0607729abf3b 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -461,7 +461,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
trace_cma_alloc(pfn, page, count, align);
if (ret && !(gfp_mask & __GFP_NOWARN)) {
- pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
+ pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
__func__, count, ret);
cma_debug_show_areas(cma);
}
diff --git a/mm/debug.c b/mm/debug.c
index 6726bec731c9..d947f3e03b0d 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -105,7 +105,7 @@ void dump_mm(const struct mm_struct *mm)
"get_unmapped_area %p\n"
#endif
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
- "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
+ "pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
@@ -135,8 +135,7 @@ void dump_mm(const struct mm_struct *mm)
mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
mm->pgd, atomic_read(&mm->mm_users),
atomic_read(&mm->mm_count),
- atomic_long_read((atomic_long_t *)&mm->nr_ptes),
- mm_nr_pmds((struct mm_struct *)mm),
+ mm_pgtables_bytes(mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
diff --git a/mm/filemap.c b/mm/filemap.c
index 5bcc87adbeeb..ee83baaf855d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -35,6 +35,7 @@
#include <linux/hugetlb.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
+#include <linux/shmem_fs.h>
#include <linux/rmap.h>
#include "internal.h"
@@ -134,7 +135,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
*shadowp = p;
}
__radix_tree_replace(&mapping->page_tree, node, slot, page,
- workingset_update_node, mapping);
+ workingset_lookup_update(mapping));
mapping->nrpages++;
return 0;
}
@@ -162,9 +163,12 @@ static void page_cache_tree_delete(struct address_space *mapping,
radix_tree_clear_tags(&mapping->page_tree, node, slot);
__radix_tree_replace(&mapping->page_tree, node, slot, shadow,
- workingset_update_node, mapping);
+ workingset_lookup_update(mapping));
}
+ page->mapping = NULL;
+ /* Leave page->index set: truncation lookup relies upon it */
+
if (shadow) {
mapping->nrexceptional += nr;
/*
@@ -178,17 +182,11 @@ static void page_cache_tree_delete(struct address_space *mapping,
mapping->nrpages -= nr;
}
-/*
- * Delete a page from the page cache and free it. Caller has to make
- * sure the page is locked and that nobody else uses it - or that usage
- * is safe. The caller must hold the mapping's tree_lock.
- */
-void __delete_from_page_cache(struct page *page, void *shadow)
+static void unaccount_page_cache_page(struct address_space *mapping,
+ struct page *page)
{
- struct address_space *mapping = page->mapping;
- int nr = hpage_nr_pages(page);
+ int nr;
- trace_mm_filemap_delete_from_page_cache(page);
/*
* if we're uptodate, flush out into the cleancache, otherwise
* invalidate any existing cleancache entries. We can't leave
@@ -224,15 +222,12 @@ void __delete_from_page_cache(struct page *page, void *shadow)
}
}
- page_cache_tree_delete(mapping, page, shadow);
-
- page->mapping = NULL;
- /* Leave page->index set: truncation lookup relies upon it */
-
/* hugetlb pages do not participate in page cache accounting. */
if (PageHuge(page))
return;
+ nr = hpage_nr_pages(page);
+
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
if (PageSwapBacked(page)) {
__mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
@@ -243,17 +238,51 @@ void __delete_from_page_cache(struct page *page, void *shadow)
}
/*
- * At this point page must be either written or cleaned by truncate.
- * Dirty page here signals a bug and loss of unwritten data.
+ * At this point page must be either written or cleaned by
+ * truncate. Dirty page here signals a bug and loss of
+ * unwritten data.
*
- * This fixes dirty accounting after removing the page entirely but
- * leaves PageDirty set: it has no effect for truncated page and
- * anyway will be cleared before returning page into buddy allocator.
+ * This fixes dirty accounting after removing the page entirely
+ * but leaves PageDirty set: it has no effect for truncated
+ * page and anyway will be cleared before returning page into
+ * buddy allocator.
*/
if (WARN_ON_ONCE(PageDirty(page)))
account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
}
+/*
+ * Delete a page from the page cache and free it. Caller has to make
+ * sure the page is locked and that nobody else uses it - or that usage
+ * is safe. The caller must hold the mapping's tree_lock.
+ */
+void __delete_from_page_cache(struct page *page, void *shadow)
+{
+ struct address_space *mapping = page->mapping;
+
+ trace_mm_filemap_delete_from_page_cache(page);
+
+ unaccount_page_cache_page(mapping, page);
+ page_cache_tree_delete(mapping, page, shadow);
+}
+
+static void page_cache_free_page(struct address_space *mapping,
+ struct page *page)
+{
+ void (*freepage)(struct page *);
+
+ freepage = mapping->a_ops->freepage;
+ if (freepage)
+ freepage(page);
+
+ if (PageTransHuge(page) && !PageHuge(page)) {
+ page_ref_sub(page, HPAGE_PMD_NR);
+ VM_BUG_ON_PAGE(page_count(page) <= 0, page);
+ } else {
+ put_page(page);
+ }
+}
+
/**
* delete_from_page_cache - delete page from page cache
* @page: the page which the kernel is trying to remove from page cache
@@ -266,27 +295,98 @@ void delete_from_page_cache(struct page *page)
{
struct address_space *mapping = page_mapping(page);
unsigned long flags;
- void (*freepage)(struct page *);
BUG_ON(!PageLocked(page));
-
- freepage = mapping->a_ops->freepage;
-
spin_lock_irqsave(&mapping->tree_lock, flags);
__delete_from_page_cache(page, NULL);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- if (freepage)
- freepage(page);
+ page_cache_free_page(mapping, page);
+}
+EXPORT_SYMBOL(delete_from_page_cache);
- if (PageTransHuge(page) && !PageHuge(page)) {
- page_ref_sub(page, HPAGE_PMD_NR);
- VM_BUG_ON_PAGE(page_count(page) <= 0, page);
- } else {
- put_page(page);
+/*
+ * page_cache_tree_delete_batch - delete several pages from page cache
+ * @mapping: the mapping to which pages belong
+ * @pvec: pagevec with pages to delete
+ *
+ * The function walks over mapping->page_tree and removes pages passed in @pvec
+ * from the radix tree. The function expects @pvec to be sorted by page index.
+ * It tolerates holes in @pvec (radix tree entries at those indices are not
+ * modified). The function expects only THP head pages to be present in the
+ * @pvec and takes care to delete all corresponding tail pages from the radix
+ * tree as well.
+ *
+ * The function expects mapping->tree_lock to be held.
+ */
+static void
+page_cache_tree_delete_batch(struct address_space *mapping,
+ struct pagevec *pvec)
+{
+ struct radix_tree_iter iter;
+ void **slot;
+ int total_pages = 0;
+ int i = 0, tail_pages = 0;
+ struct page *page;
+ pgoff_t start;
+
+ start = pvec->pages[0]->index;
+ radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+ if (i >= pagevec_count(pvec) && !tail_pages)
+ break;
+ page = radix_tree_deref_slot_protected(slot,
+ &mapping->tree_lock);
+ if (radix_tree_exceptional_entry(page))
+ continue;
+ if (!tail_pages) {
+ /*
+ * Some page got inserted in our range? Skip it. We
+ * have our pages locked so they are protected from
+ * being removed.
+ */
+ if (page != pvec->pages[i])
+ continue;
+ WARN_ON_ONCE(!PageLocked(page));
+ if (PageTransHuge(page) && !PageHuge(page))
+ tail_pages = HPAGE_PMD_NR - 1;
+ page->mapping = NULL;
+ /*
+ * Leave page->index set: truncation lookup relies
+ * upon it
+ */
+ i++;
+ } else {
+ tail_pages--;
+ }
+ radix_tree_clear_tags(&mapping->page_tree, iter.node, slot);
+ __radix_tree_replace(&mapping->page_tree, iter.node, slot, NULL,
+ workingset_lookup_update(mapping));
+ total_pages++;
}
+ mapping->nrpages -= total_pages;
+}
+
+void delete_from_page_cache_batch(struct address_space *mapping,
+ struct pagevec *pvec)
+{
+ int i;
+ unsigned long flags;
+
+ if (!pagevec_count(pvec))
+ return;
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ for (i = 0; i < pagevec_count(pvec); i++) {
+ trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
+
+ unaccount_page_cache_page(mapping, pvec->pages[i]);
+ }
+ page_cache_tree_delete_batch(mapping, pvec);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+
+ for (i = 0; i < pagevec_count(pvec); i++)
+ page_cache_free_page(mapping, pvec->pages[i]);
}
-EXPORT_SYMBOL(delete_from_page_cache);
int filemap_check_errors(struct address_space *mapping)
{
@@ -419,20 +519,18 @@ static void __filemap_fdatawait_range(struct address_space *mapping,
if (end_byte < start_byte)
return;
- pagevec_init(&pvec, 0);
- while ((index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_WRITEBACK,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
+ pagevec_init(&pvec);
+ while (index <= end) {
unsigned i;
+ nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
+ end, PAGECACHE_TAG_WRITEBACK);
+ if (!nr_pages)
+ break;
+
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- /* until radix tree lookup accepts end_index */
- if (page->index > end)
- continue;
-
wait_on_page_writeback(page);
ClearPageError(page);
}
@@ -1755,9 +1853,10 @@ repeat:
EXPORT_SYMBOL(find_get_pages_contig);
/**
- * find_get_pages_tag - find and return pages that match @tag
+ * find_get_pages_range_tag - find and return pages in given range matching @tag
* @mapping: the address_space to search
* @index: the starting page index
+ * @end: The final page index (inclusive)
* @tag: the tag index
* @nr_pages: the maximum number of pages
* @pages: where the resulting pages are placed
@@ -1765,8 +1864,9 @@ EXPORT_SYMBOL(find_get_pages_contig);
* Like find_get_pages, except we only return pages which are tagged with
* @tag. We update @index to index the next page for the traversal.
*/
-unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
- int tag, unsigned int nr_pages, struct page **pages)
+unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
+ pgoff_t end, int tag, unsigned int nr_pages,
+ struct page **pages)
{
struct radix_tree_iter iter;
void **slot;
@@ -1779,6 +1879,9 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
radix_tree_for_each_tagged(slot, &mapping->page_tree,
&iter, *index, tag) {
struct page *head, *page;
+
+ if (iter.index > end)
+ break;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
@@ -1820,18 +1923,28 @@ repeat:
}
pages[ret] = page;
- if (++ret == nr_pages)
- break;
+ if (++ret == nr_pages) {
+ *index = pages[ret - 1]->index + 1;
+ goto out;
+ }
}
+ /*
+ * We come here when we got at @end. We take care to not overflow the
+ * index @index as it confuses some of the callers. This breaks the
+ * iteration when there is page at index -1 but that is already broken
+ * anyway.
+ */
+ if (end == (pgoff_t)-1)
+ *index = (pgoff_t)-1;
+ else
+ *index = end + 1;
+out:
rcu_read_unlock();
- if (ret)
- *index = pages[ret - 1]->index + 1;
-
return ret;
}
-EXPORT_SYMBOL(find_get_pages_tag);
+EXPORT_SYMBOL(find_get_pages_range_tag);
/**
* find_get_entries_tag - find and return entries that match @tag
@@ -2160,7 +2273,7 @@ no_cached_page:
* Ok, it wasn't cached, so we need to create a new
* page..
*/
- page = page_cache_alloc_cold(mapping);
+ page = page_cache_alloc(mapping);
if (!page) {
error = -ENOMEM;
goto out;
@@ -2272,7 +2385,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
int ret;
do {
- page = __page_cache_alloc(gfp_mask|__GFP_COLD);
+ page = __page_cache_alloc(gfp_mask);
if (!page)
return -ENOMEM;
@@ -2676,7 +2789,7 @@ static struct page *do_read_cache_page(struct address_space *mapping,
repeat:
page = find_get_page(mapping, index);
if (!page) {
- page = __page_cache_alloc(gfp | __GFP_COLD);
+ page = __page_cache_alloc(gfp);
if (!page)
return ERR_PTR(-ENOMEM);
err = add_to_page_cache_lru(page, mapping, index, gfp);
diff --git a/mm/gup.c b/mm/gup.c
index b2b4d4263768..dfcde13f289a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1643,6 +1643,47 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
+static void gup_pgd_range(unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pgd_t *pgdp;
+
+ pgdp = pgd_offset(current->mm, addr);
+ do {
+ pgd_t pgd = READ_ONCE(*pgdp);
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ return;
+ if (unlikely(pgd_huge(pgd))) {
+ if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
+ pages, nr))
+ return;
+ } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
+ if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
+ PGDIR_SHIFT, next, write, pages, nr))
+ return;
+ } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
+ return;
+ } while (pgdp++, addr = next, addr != end);
+}
+
+#ifndef gup_fast_permitted
+/*
+ * Check if it's allowed to use __get_user_pages_fast() for the range, or
+ * we need to fall back to the slow version:
+ */
+bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
+{
+ unsigned long len, end;
+
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ return end >= start;
+}
+#endif
+
/*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP. It will only return non-negative values.
@@ -1650,10 +1691,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
- struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
- unsigned long next, flags;
- pgd_t *pgdp;
+ unsigned long flags;
int nr = 0;
start &= PAGE_MASK;
@@ -1677,45 +1716,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* block IPIs that come from THPs splitting.
*/
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = READ_ONCE(*pgdp);
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (unlikely(pgd_huge(pgd))) {
- if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
- pages, &nr))
- break;
- } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
- if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
- PGDIR_SHIFT, next, write, pages, &nr))
- break;
- } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
+ if (gup_fast_permitted(start, nr_pages, write)) {
+ local_irq_save(flags);
+ gup_pgd_range(addr, end, write, pages, &nr);
+ local_irq_restore(flags);
+ }
return nr;
}
-#ifndef gup_fast_permitted
-/*
- * Check if it's allowed to use __get_user_pages_fast() for the range, or
- * we need to fall back to the slow version:
- */
-bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
-{
- unsigned long len, end;
-
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- return end >= start;
-}
-#endif
-
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
@@ -1735,12 +1744,22 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
+ unsigned long addr, len, end;
int nr = 0, ret = 0;
start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
if (gup_fast_permitted(start, nr_pages, write)) {
- nr = __get_user_pages_fast(start, nr_pages, write, pages);
+ local_irq_disable();
+ gup_pgd_range(addr, end, write, pages, &nr);
+ local_irq_enable();
ret = nr;
}
diff --git a/mm/hmm.c b/mm/hmm.c
index a88a847bccba..ea19742a5d60 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -803,11 +803,10 @@ static RADIX_TREE(hmm_devmem_radix, GFP_KERNEL);
static void hmm_devmem_radix_release(struct resource *resource)
{
- resource_size_t key, align_start, align_size, align_end;
+ resource_size_t key, align_start, align_size;
align_start = resource->start & ~(PA_SECTION_SIZE - 1);
align_size = ALIGN(resource_size(resource), PA_SECTION_SIZE);
- align_end = align_start + align_size - 1;
mutex_lock(&hmm_devmem_lock);
for (key = resource->start;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1981ed697dab..86fe697e8bfb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -39,10 +39,10 @@
#include "internal.h"
/*
- * By default transparent hugepage support is disabled in order that avoid
- * to risk increase the memory footprint of applications without a guaranteed
- * benefit. When transparent hugepage support is enabled, is for all mappings,
- * and khugepaged scans all mappings.
+ * By default, transparent hugepage support is disabled in order to avoid
+ * risking an increased memory footprint for applications that are not
+ * guaranteed to benefit from it. When transparent hugepage support is
+ * enabled, it is for all mappings, and khugepaged scans all mappings.
* Defrag is invoked by khugepaged hugepage allocations and by page faults
* for all hugepage allocations.
*/
@@ -606,7 +606,7 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
- atomic_long_inc(&vma->vm_mm->nr_ptes);
+ mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC);
}
@@ -662,7 +662,7 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
if (pgtable)
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
- atomic_long_inc(&mm->nr_ptes);
+ mm_inc_nr_ptes(mm);
return true;
}
@@ -747,7 +747,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
if (pgtable) {
pgtable_trans_huge_deposit(mm, pmd, pgtable);
- atomic_long_inc(&mm->nr_ptes);
+ mm_inc_nr_ptes(mm);
}
set_pmd_at(mm, addr, pmd, entry);
@@ -942,7 +942,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
- atomic_long_inc(&dst_mm->nr_ptes);
+ mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
ret = 0;
@@ -978,7 +978,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
get_page(src_page);
page_dup_rmap(src_page, true);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
- atomic_long_inc(&dst_mm->nr_ptes);
+ mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
pmdp_set_wrprotect(src_mm, addr, src_pmd);
@@ -1189,8 +1189,15 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
goto out_free_pages;
VM_BUG_ON_PAGE(!PageHead(page), page);
+ /*
+ * Leave pmd empty until pte is filled note we must notify here as
+ * concurrent CPU thread might write to new page before the call to
+ * mmu_notifier_invalidate_range_end() happens which can lead to a
+ * device seeing memory write in different order than CPU.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
- /* leave pmd empty until pte is filled */
pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
pmd_populate(vma->vm_mm, &_pmd, pgtable);
@@ -1216,7 +1223,12 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
page_remove_rmap(page, true);
spin_unlock(vmf->ptl);
- mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+ /*
+ * No need to double call mmu_notifier->invalidate_range() callback as
+ * the above pmdp_huge_clear_flush_notify() did already call it.
+ */
+ mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
+ mmun_end);
ret |= VM_FAULT_WRITE;
put_page(page);
@@ -1365,7 +1377,12 @@ alloc:
}
spin_unlock(vmf->ptl);
out_mn:
- mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+ /*
+ * No need to double call mmu_notifier->invalidate_range() callback as
+ * the above pmdp_huge_clear_flush_notify() did already call it.
+ */
+ mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
+ mmun_end);
out:
return ret;
out_unlock:
@@ -1678,7 +1695,7 @@ static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pte_free(mm, pgtable);
- atomic_long_dec(&mm->nr_ptes);
+ mm_dec_nr_ptes(mm);
}
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
@@ -2017,7 +2034,12 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
out:
spin_unlock(ptl);
- mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE);
+ /*
+ * No need to double call mmu_notifier->invalidate_range() callback as
+ * the above pudp_huge_clear_flush_notify() did already call it.
+ */
+ mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
+ HPAGE_PUD_SIZE);
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
@@ -2029,8 +2051,15 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
pmd_t _pmd;
int i;
- /* leave pmd empty until pte is filled */
- pmdp_huge_clear_flush_notify(vma, haddr, pmd);
+ /*
+ * Leave pmd empty until pte is filled note that it is fine to delay
+ * notification until mmu_notifier_invalidate_range_end() as we are
+ * replacing a zero pmd write protected page with a zero pte write
+ * protected page.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
+ pmdp_huge_clear_flush(vma, haddr, pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
@@ -2085,6 +2114,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
return;
} else if (is_huge_zero_pmd(*pmd)) {
+ /*
+ * FIXME: Do we want to invalidate secondary mmu by calling
+ * mmu_notifier_invalidate_range() see comments below inside
+ * __split_huge_pmd() ?
+ *
+ * We are going from a zero huge page write protected to zero
+ * small page also write protected so it does not seems useful
+ * to invalidate secondary mmu at this time.
+ */
return __split_huge_zero_page_pmd(vma, haddr, pmd);
}
@@ -2220,7 +2258,21 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
__split_huge_pmd_locked(vma, pmd, haddr, freeze);
out:
spin_unlock(ptl);
- mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
+ /*
+ * No need to double call mmu_notifier->invalidate_range() callback.
+ * They are 3 cases to consider inside __split_huge_pmd_locked():
+ * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
+ * 2) __split_huge_zero_page_pmd() read only zero page and any write
+ * fault will trigger a flush_notify before pointing to a new page
+ * (it is fine if the secondary mmu keeps pointing to the old zero
+ * page in the meantime)
+ * 3) Split a huge pmd into pte pointing to the same page. No need
+ * to invalidate secondary tlb entry they are all still valid.
+ * any further changes to individual pte will notify. So no need
+ * to call mmu_notifier->invalidate_range()
+ */
+ mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
+ HPAGE_PMD_SIZE);
}
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
@@ -2718,7 +2770,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
- return ACCESS_ONCE(pgdata->split_queue_len);
+ return READ_ONCE(pgdata->split_queue_len);
}
static unsigned long deferred_split_scan(struct shrinker *shrink,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2d2ff5e8bf2b..681b300185c0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3256,9 +3256,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
} else {
if (cow) {
+ /*
+ * No need to notify as we are downgrading page
+ * table protection not changing it to point
+ * to a new page.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
huge_ptep_set_wrprotect(src, addr, src_pte);
- mmu_notifier_invalidate_range(src, mmun_start,
- mmun_end);
}
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
@@ -4318,7 +4323,12 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* and that page table be reused and filled with junk.
*/
flush_hugetlb_tlb_range(vma, start, end);
- mmu_notifier_invalidate_range(mm, start, end);
+ /*
+ * No need to call mmu_notifier_invalidate_range() we are downgrading
+ * page table protection not changing it to point to a new page.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end);
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6f319fb81718..405bba487df5 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -337,7 +337,7 @@ static size_t optimal_redzone(size_t object_size)
}
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
- unsigned long *flags)
+ slab_flags_t *flags)
{
int redzone_adjust;
int orig_size = *size;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 43cb3043311b..ea4ff259b671 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1270,7 +1270,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
_pmd = pmdp_collapse_flush(vma, addr, pmd);
spin_unlock(ptl);
up_write(&vma->vm_mm->mmap_sem);
- atomic_long_dec(&vma->vm_mm->nr_ptes);
+ mm_dec_nr_ptes(vma->vm_mm);
pte_free(vma->vm_mm, pmd_pgtable(_pmd));
}
}
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
index 800d64b854ea..cec594032515 100644
--- a/mm/kmemcheck.c
+++ b/mm/kmemcheck.c
@@ -1,126 +1 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/gfp.h>
-#include <linux/mm_types.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include "slab.h"
-#include <linux/kmemcheck.h>
-
-void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
-{
- struct page *shadow;
- int pages;
- int i;
-
- pages = 1 << order;
-
- /*
- * With kmemcheck enabled, we need to allocate a memory area for the
- * shadow bits as well.
- */
- shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
- if (!shadow) {
- if (printk_ratelimit())
- pr_err("kmemcheck: failed to allocate shadow bitmap\n");
- return;
- }
-
- for(i = 0; i < pages; ++i)
- page[i].shadow = page_address(&shadow[i]);
-
- /*
- * Mark it as non-present for the MMU so that our accesses to
- * this memory will trigger a page fault and let us analyze
- * the memory accesses.
- */
- kmemcheck_hide_pages(page, pages);
-}
-
-void kmemcheck_free_shadow(struct page *page, int order)
-{
- struct page *shadow;
- int pages;
- int i;
-
- if (!kmemcheck_page_is_tracked(page))
- return;
-
- pages = 1 << order;
-
- kmemcheck_show_pages(page, pages);
-
- shadow = virt_to_page(page[0].shadow);
-
- for(i = 0; i < pages; ++i)
- page[i].shadow = NULL;
-
- __free_pages(shadow, order);
-}
-
-void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
- size_t size)
-{
- if (unlikely(!object)) /* Skip object if allocation failed */
- return;
-
- /*
- * Has already been memset(), which initializes the shadow for us
- * as well.
- */
- if (gfpflags & __GFP_ZERO)
- return;
-
- /* No need to initialize the shadow of a non-tracked slab. */
- if (s->flags & SLAB_NOTRACK)
- return;
-
- if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
- /*
- * Allow notracked objects to be allocated from
- * tracked caches. Note however that these objects
- * will still get page faults on access, they just
- * won't ever be flagged as uninitialized. If page
- * faults are not acceptable, the slab cache itself
- * should be marked NOTRACK.
- */
- kmemcheck_mark_initialized(object, size);
- } else if (!s->ctor) {
- /*
- * New objects should be marked uninitialized before
- * they're returned to the called.
- */
- kmemcheck_mark_uninitialized(object, size);
- }
-}
-
-void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
-{
- /* TODO: RCU freeing is unsupported for now; hide false positives. */
- if (!s->ctor && !(s->flags & SLAB_TYPESAFE_BY_RCU))
- kmemcheck_mark_freed(object, size);
-}
-
-void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
- gfp_t gfpflags)
-{
- int pages;
-
- if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
- return;
-
- pages = 1 << order;
-
- /*
- * NOTE: We choose to track GFP_ZERO pages too; in fact, they
- * can become uninitialized by copying uninitialized memory
- * into them.
- */
-
- /* XXX: Can use zone->node for node? */
- kmemcheck_alloc_shadow(page, order, gfpflags, -1);
-
- if (gfpflags & __GFP_ZERO)
- kmemcheck_mark_initialized_pages(page, pages);
- else
- kmemcheck_mark_uninitialized_pages(page, pages);
-}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 7780cd83a495..e4738d5e9b8c 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -110,7 +110,6 @@
#include <linux/atomic.h>
#include <linux/kasan.h>
-#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
#include <linux/memory_hotplug.h>
@@ -1238,9 +1237,6 @@ static bool update_checksum(struct kmemleak_object *object)
{
u32 old_csum = object->checksum;
- if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
- return false;
-
kasan_disable_current();
object->checksum = crc32(0, (void *)object->pointer, object->size);
kasan_enable_current();
@@ -1314,11 +1310,6 @@ static void scan_block(void *_start, void *_end,
if (scan_should_stop())
break;
- /* don't scan uninitialized memory */
- if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
- BYTES_PER_POINTER))
- continue;
-
kasan_disable_current();
pointer = *ptr;
kasan_enable_current();
@@ -2104,7 +2095,7 @@ static int __init kmemleak_late_init(void)
return -ENOMEM;
}
- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
+ dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
&kmemleak_fops);
if (!dentry)
pr_warn("Failed to create the debugfs kmemleak file\n");
diff --git a/mm/ksm.c b/mm/ksm.c
index 6cb60f46cce5..be8f4576f842 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1052,8 +1052,13 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* So we clear the pte and flush the tlb before the check
* this assure us that no O_DIRECT can happen after the check
* or in the middle of the check.
+ *
+ * No need to notify as we are downgrading page table to read
+ * only not changing it to point to a new page.
+ *
+ * See Documentation/vm/mmu_notifier.txt
*/
- entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
+ entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
/*
* Check that no O_DIRECT or similar I/O is in progress on the
* page
@@ -1136,7 +1141,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
}
flush_cache_page(vma, addr, pte_pfn(*ptep));
- ptep_clear_flush_notify(vma, addr, ptep);
+ /*
+ * No need to notify as we are replacing a read only page with another
+ * read only page with the same content.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
+ ptep_clear_flush(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, newpte);
page_remove_rmap(page, false);
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f141f0c80ff3..fd41e969ede5 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -221,6 +221,7 @@ restart:
switch (ret) {
case LRU_REMOVED_RETRY:
assert_spin_locked(&nlru->lock);
+ /* fall through */
case LRU_REMOVED:
isolated++;
nlru->nr_items--;
diff --git a/mm/memblock.c b/mm/memblock.c
index 91205780e6b1..46aacdfa4f4d 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -533,7 +533,7 @@ repeat:
base = obase;
nr_new = 0;
- for_each_memblock_type(type, rgn) {
+ for_each_memblock_type(idx, type, rgn) {
phys_addr_t rbase = rgn->base;
phys_addr_t rend = rbase + rgn->size;
@@ -637,7 +637,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
if (memblock_double_array(type, base, size) < 0)
return -ENOMEM;
- for_each_memblock_type(type, rgn) {
+ for_each_memblock_type(idx, type, rgn) {
phys_addr_t rbase = rgn->base;
phys_addr_t rend = rbase + rgn->size;
@@ -1327,7 +1327,6 @@ again:
return NULL;
done:
ptr = phys_to_virt(alloc);
- memset(ptr, 0, size);
/*
* The min_count is set to 0 so that bootmem allocated blocks
@@ -1341,6 +1340,45 @@ done:
}
/**
+ * memblock_virt_alloc_try_nid_raw - allocate boot memory block without zeroing
+ * memory and without panicking
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @min_addr: the lower bound of the memory region from where the allocation
+ * is preferred (phys address)
+ * @max_addr: the upper bound of the memory region from where the allocation
+ * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
+ * allocate only from memory limited by memblock.current_limit value
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Public function, provides additional debug information (including caller
+ * info), if enabled. Does not zero allocated memory, does not panic if request
+ * cannot be satisfied.
+ *
+ * RETURNS:
+ * Virtual address of allocated memory block on success, NULL on failure.
+ */
+void * __init memblock_virt_alloc_try_nid_raw(
+ phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid)
+{
+ void *ptr;
+
+ memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
+ __func__, (u64)size, (u64)align, nid, (u64)min_addr,
+ (u64)max_addr, (void *)_RET_IP_);
+
+ ptr = memblock_virt_alloc_internal(size, align,
+ min_addr, max_addr, nid);
+#ifdef CONFIG_DEBUG_VM
+ if (ptr && size > 0)
+ memset(ptr, 0xff, size);
+#endif
+ return ptr;
+}
+
+/**
* memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
* @size: size of memory block to be allocated in bytes
* @align: alignment of the region and block's size
@@ -1351,8 +1389,8 @@ done:
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
- * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
- * additional debug information (including caller info), if enabled.
+ * Public function, provides additional debug information (including caller
+ * info), if enabled. This function zeroes the allocated memory.
*
* RETURNS:
* Virtual address of allocated memory block on success, NULL on failure.
@@ -1362,11 +1400,17 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
phys_addr_t min_addr, phys_addr_t max_addr,
int nid)
{
+ void *ptr;
+
memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
__func__, (u64)size, (u64)align, nid, (u64)min_addr,
(u64)max_addr, (void *)_RET_IP_);
- return memblock_virt_alloc_internal(size, align, min_addr,
- max_addr, nid);
+
+ ptr = memblock_virt_alloc_internal(size, align,
+ min_addr, max_addr, nid);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
}
/**
@@ -1380,7 +1424,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
* allocate only from memory limited by memblock.current_limit value
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
*
- * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
+ * Public panicking version of memblock_virt_alloc_try_nid_nopanic()
* which provides debug information (including caller info), if enabled,
* and panics if the request can not be satisfied.
*
@@ -1399,8 +1443,10 @@ void * __init memblock_virt_alloc_try_nid(
(u64)max_addr, (void *)_RET_IP_);
ptr = memblock_virt_alloc_internal(size, align,
min_addr, max_addr, nid);
- if (ptr)
+ if (ptr) {
+ memset(ptr, 0, size);
return ptr;
+ }
panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
__func__, (u64)size, (u64)align, nid, (u64)min_addr,
@@ -1715,7 +1761,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type)
pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
- for_each_memblock_type(type, rgn) {
+ for_each_memblock_type(idx, type, rgn) {
char nid_buf[32] = "";
base = rgn->base;
@@ -1739,7 +1785,7 @@ memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
unsigned long size = 0;
int idx;
- for_each_memblock_type((&memblock.reserved), rgn) {
+ for_each_memblock_type(idx, (&memblock.reserved), rgn) {
phys_addr_t start, end;
if (rgn->base + rgn->size < start_addr)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 661f046ad318..50e6906314f8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4049,7 +4049,7 @@ static struct cftype mem_cgroup_legacy_files[] = {
.write = mem_cgroup_reset,
.read_u64 = mem_cgroup_read_u64,
},
-#ifdef CONFIG_SLABINFO
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
{
.name = "kmem.slabinfo",
.seq_start = memcg_slab_start,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 88366626c0b7..4acdf393a801 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1587,7 +1587,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
- pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n",
+ pr_info("soft offline: %#lx: hugepage migration failed %d, type %lx (%pGp)\n",
pfn, ret, page->flags, &page->flags);
if (!list_empty(&pagelist))
putback_movable_pages(&pagelist);
diff --git a/mm/memory.c b/mm/memory.c
index a728bed16c20..85e7a87da79f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -438,7 +438,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
pgtable_t token = pmd_pgtable(*pmd);
pmd_clear(pmd);
pte_free_tlb(tlb, token, addr);
- atomic_long_dec(&tlb->mm->nr_ptes);
+ mm_dec_nr_ptes(tlb->mm);
}
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -506,6 +506,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
pud = pud_offset(p4d, start);
p4d_clear(p4d);
pud_free_tlb(tlb, pud, start);
+ mm_dec_nr_puds(tlb->mm);
}
static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -665,7 +666,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
ptl = pmd_lock(mm, pmd);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
- atomic_long_inc(&mm->nr_ptes);
+ mm_inc_nr_ptes(mm);
pmd_populate(mm, pmd, new);
new = NULL;
}
@@ -2554,7 +2555,11 @@ static int wp_page_copy(struct vm_fault *vmf)
put_page(new_page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ /*
+ * No need to double call mmu_notifier->invalidate_range() callback as
+ * the above ptep_clear_flush_notify() did already call it.
+ */
+ mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
if (old_page) {
/*
* Don't let another task, with possibly unlocked vma,
@@ -2842,7 +2847,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
int do_swap_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *page = NULL, *swapcache;
+ struct page *page = NULL, *swapcache = NULL;
struct mem_cgroup *memcg;
struct vma_swap_readahead swap_ra;
swp_entry_t entry;
@@ -2881,17 +2886,36 @@ int do_swap_page(struct vm_fault *vmf)
}
goto out;
}
+
+
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
if (!page)
page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
vmf->address);
if (!page) {
- if (vma_readahead)
- page = do_swap_page_readahead(entry,
- GFP_HIGHUSER_MOVABLE, vmf, &swap_ra);
- else
- page = swapin_readahead(entry,
- GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+ struct swap_info_struct *si = swp_swap_info(entry);
+
+ if (si->flags & SWP_SYNCHRONOUS_IO &&
+ __swap_count(si, entry) == 1) {
+ /* skip swapcache */
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+ if (page) {
+ __SetPageLocked(page);
+ __SetPageSwapBacked(page);
+ set_page_private(page, entry.val);
+ lru_cache_add_anon(page);
+ swap_readpage(page, true);
+ }
+ } else {
+ if (vma_readahead)
+ page = do_swap_page_readahead(entry,
+ GFP_HIGHUSER_MOVABLE, vmf, &swap_ra);
+ else
+ page = swapin_readahead(entry,
+ GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+ swapcache = page;
+ }
+
if (!page) {
/*
* Back out if somebody else faulted in this pte
@@ -2920,7 +2944,6 @@ int do_swap_page(struct vm_fault *vmf)
goto out_release;
}
- swapcache = page;
locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2935,7 +2958,8 @@ int do_swap_page(struct vm_fault *vmf)
* test below, are not enough to exclude that. Even if it is still
* swapcache, we need to check that the page's swap has not changed.
*/
- if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
+ if (unlikely((!PageSwapCache(page) ||
+ page_private(page) != entry.val)) && swapcache)
goto out_page;
page = ksm_might_need_to_copy(page, vma, vmf->address);
@@ -2988,14 +3012,16 @@ int do_swap_page(struct vm_fault *vmf)
pte = pte_mksoft_dirty(pte);
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
vmf->orig_pte = pte;
- if (page == swapcache) {
- do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
- mem_cgroup_commit_charge(page, memcg, true, false);
- activate_page(page);
- } else { /* ksm created a completely new copy */
+
+ /* ksm created a completely new copy */
+ if (unlikely(page != swapcache && swapcache)) {
page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma);
+ } else {
+ do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
+ mem_cgroup_commit_charge(page, memcg, true, false);
+ activate_page(page);
}
swap_free(entry);
@@ -3003,7 +3029,7 @@ int do_swap_page(struct vm_fault *vmf)
(vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
- if (page != swapcache) {
+ if (page != swapcache && swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
@@ -3036,7 +3062,7 @@ out_page:
unlock_page(page);
out_release:
put_page(page);
- if (page != swapcache) {
+ if (page != swapcache && swapcache) {
unlock_page(swapcache);
put_page(swapcache);
}
@@ -3212,7 +3238,7 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
goto map_pte;
}
- atomic_long_inc(&vma->vm_mm->nr_ptes);
+ mm_inc_nr_ptes(vma->vm_mm);
pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
spin_unlock(vmf->ptl);
vmf->prealloc_pte = NULL;
@@ -3271,7 +3297,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
* We are going to consume the prealloc table,
* count that as nr_ptes.
*/
- atomic_long_inc(&vma->vm_mm->nr_ptes);
+ mm_inc_nr_ptes(vma->vm_mm);
vmf->prealloc_pte = NULL;
}
@@ -3891,9 +3917,9 @@ static int handle_pte_fault(struct vm_fault *vmf)
/*
* some architectures can have larger ptes than wordsize,
* e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
- * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
- * atomic accesses. The code below just needs a consistent
- * view for the ifs and we later double check anyway with the
+ * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
+ * accesses. The code below just needs a consistent view
+ * for the ifs and we later double check anyway with the
* ptl lock held. So here a barrier will do.
*/
barrier();
@@ -4124,15 +4150,17 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_5LEVEL_HACK
- if (p4d_present(*p4d)) /* Another has populated it */
- pud_free(mm, new);
- else
+ if (!p4d_present(*p4d)) {
+ mm_inc_nr_puds(mm);
p4d_populate(mm, p4d, new);
-#else
- if (pgd_present(*p4d)) /* Another has populated it */
+ } else /* Another has populated it */
pud_free(mm, new);
- else
+#else
+ if (!pgd_present(*p4d)) {
+ mm_inc_nr_puds(mm);
pgd_populate(mm, p4d, new);
+ } else /* Another has populated it */
+ pud_free(mm, new);
#endif /* __ARCH_HAS_5LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
@@ -4457,17 +4485,15 @@ void print_vma_addr(char *prefix, unsigned long ip)
struct vm_area_struct *vma;
/*
- * Do not print if we are in atomic
- * contexts (in exception stacks, etc.):
+ * we might be running from an atomic context so we cannot sleep
*/
- if (preempt_count())
+ if (!down_read_trylock(&mm->mmap_sem))
return;
- down_read(&mm->mmap_sem);
vma = find_vma(mm, ip);
if (vma && vma->vm_file) {
struct file *f = vma->vm_file;
- char *buf = (char *)__get_free_page(GFP_KERNEL);
+ char *buf = (char *)__get_free_page(GFP_NOWAIT);
if (buf) {
char *p;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index d4b5f29906b9..c52aa05b106c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -265,7 +265,7 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
/*
* Make all the pages reserved so that nobody will stumble over half
* initialized state.
- * FIXME: We also have to associate it with a node because pfn_to_node
+ * FIXME: We also have to associate it with a node because page_to_nid
* relies on having page with the proper node.
*/
for (i = 0; i < PAGES_PER_SECTION; i++) {
@@ -1590,11 +1590,11 @@ static void node_states_clear_node(int node, struct memory_notify *arg)
}
static int __ref __offline_pages(unsigned long start_pfn,
- unsigned long end_pfn, unsigned long timeout)
+ unsigned long end_pfn)
{
- unsigned long pfn, nr_pages, expire;
+ unsigned long pfn, nr_pages;
long offlined_pages;
- int ret, drain, retry_max, node;
+ int ret, node;
unsigned long flags;
unsigned long valid_start, valid_end;
struct zone *zone;
@@ -1630,44 +1630,22 @@ static int __ref __offline_pages(unsigned long start_pfn,
goto failed_removal;
pfn = start_pfn;
- expire = jiffies + timeout;
- drain = 0;
- retry_max = 5;
repeat:
/* start memory hot removal */
- ret = -EAGAIN;
- if (time_after(jiffies, expire))
- goto failed_removal;
ret = -EINTR;
if (signal_pending(current))
goto failed_removal;
- ret = 0;
- if (drain) {
- lru_add_drain_all_cpuslocked();
- cond_resched();
- drain_all_pages(zone);
- }
+
+ cond_resched();
+ lru_add_drain_all_cpuslocked();
+ drain_all_pages(zone);
pfn = scan_movable_pages(start_pfn, end_pfn);
if (pfn) { /* We have movable pages */
ret = do_migrate_range(pfn, end_pfn);
- if (!ret) {
- drain = 1;
- goto repeat;
- } else {
- if (ret < 0)
- if (--retry_max == 0)
- goto failed_removal;
- yield();
- drain = 1;
- goto repeat;
- }
+ goto repeat;
}
- /* drain all zone's lru pagevec, this is asynchronous... */
- lru_add_drain_all_cpuslocked();
- yield();
- /* drain pcp pages, this is synchronous. */
- drain_all_pages(zone);
+
/*
* dissolve free hugepages in the memory block before doing offlining
* actually in order to make hugetlbfs's object counting consistent.
@@ -1677,10 +1655,8 @@ repeat:
goto failed_removal;
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
- if (offlined_pages < 0) {
- ret = -EBUSY;
- goto failed_removal;
- }
+ if (offlined_pages < 0)
+ goto repeat;
pr_info("Offlined Pages %ld\n", offlined_pages);
/* Ok, all of our target is isolated.
We cannot do rollback at this point. */
@@ -1728,7 +1704,7 @@ failed_removal:
/* Must be protected by mem_hotplug_begin() or a device_lock */
int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
{
- return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
+ return __offline_pages(start_pfn, start_pfn + nr_pages);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a2af6d58a68f..4ce44d3ff03d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -85,6 +85,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compat.h>
+#include <linux/ptrace.h>
#include <linux/swap.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
@@ -1365,7 +1366,6 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
const unsigned long __user *, old_nodes,
const unsigned long __user *, new_nodes)
{
- const struct cred *cred = current_cred(), *tcred;
struct mm_struct *mm = NULL;
struct task_struct *task;
nodemask_t task_nodes;
@@ -1401,15 +1401,10 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
err = -EINVAL;
/*
- * Check if this process has the right to modify the specified
- * process. The right exists if the process has administrative
- * capabilities, superuser privileges or the same
- * userid as the target process.
+ * Check if this process has the right to modify the specified process.
+ * Use the regular "ptrace_may_access()" checks.
*/
- tcred = __task_cred(task);
- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
- !capable(CAP_SYS_NICE)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out_put;
@@ -1920,6 +1915,9 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
struct page *page;
page = __alloc_pages(gfp, order, nid);
+ /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
+ if (!static_branch_likely(&vm_numa_stat_key))
+ return page;
if (page && page_to_nid(page) == nid) {
preempt_disable();
__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
diff --git a/mm/mempool.c b/mm/mempool.c
index c4a23cdae3f0..7d8c5a0010a2 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -189,7 +189,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
if (!pool)
return NULL;
- pool->elements = kmalloc_node(min_nr * sizeof(void *),
+ pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
gfp_mask, node_id);
if (!pool->elements) {
kfree(pool);
diff --git a/mm/migrate.c b/mm/migrate.c
index 1236449b4777..4d0be47a322a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2089,7 +2089,11 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
spin_unlock(ptl);
- mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ /*
+ * No need to double call mmu_notifier->invalidate_range() callback as
+ * the above pmdp_huge_clear_flush_notify() did already call it.
+ */
+ mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
/* Take an "isolate" reference and put new page on the LRU. */
get_page(new_page);
@@ -2805,9 +2809,14 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
}
+ /*
+ * No need to double call mmu_notifier->invalidate_range() callback as
+ * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
+ * did already call it.
+ */
if (notified)
- mmu_notifier_invalidate_range_end(mm, mmu_start,
- migrate->end);
+ mmu_notifier_invalidate_range_only_end(mm, mmu_start,
+ migrate->end);
}
/*
diff --git a/mm/mlock.c b/mm/mlock.c
index 46af369c13e5..30472d438794 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -289,7 +289,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
struct pagevec pvec_putback;
int pgrescued = 0;
- pagevec_init(&pvec_putback, 0);
+ pagevec_init(&pvec_putback);
/* Phase 1: page isolation */
spin_lock_irq(zone_lru_lock(zone));
@@ -448,7 +448,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
struct pagevec pvec;
struct zone *zone;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
@@ -670,8 +670,6 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
if (!can_do_mlock())
return -EPERM;
- lru_add_drain_all(); /* flush pagevec */
-
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
@@ -798,9 +796,6 @@ SYSCALL_DEFINE1(mlockall, int, flags)
if (!can_do_mlock())
return -EPERM;
- if (flags & MCL_CURRENT)
- lru_add_drain_all(); /* flush pagevec */
-
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 314285284e6e..96edb33fd09a 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -190,7 +190,9 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+ unsigned long start,
+ unsigned long end,
+ bool only_end)
{
struct mmu_notifier *mn;
int id;
@@ -204,8 +206,13 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
* subsystem registers either invalidate_range_start()/end() or
* invalidate_range(), so this will be no additional overhead
* (besides the pointer check).
+ *
+ * We skip call to invalidate_range() if we know it is safe ie
+ * call site use mmu_notifier_invalidate_range_only_end() which
+ * is safe to do when we know that a call to invalidate_range()
+ * already happen under page table lock.
*/
- if (mn->ops->invalidate_range)
+ if (!only_end && mn->ops->invalidate_range)
mn->ops->invalidate_range(mn, mm, start, end);
if (mn->ops->invalidate_range_end)
mn->ops->invalidate_range_end(mn, mm, start, end);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index dee0f75c3013..c86fbd1b590e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -44,6 +44,7 @@
#include <asm/tlb.h>
#include "internal.h"
+#include "slab.h"
#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
@@ -161,6 +162,25 @@ static bool oom_unkillable_task(struct task_struct *p,
return false;
}
+/*
+ * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
+ * than all user memory (LRU pages)
+ */
+static bool is_dump_unreclaim_slabs(void)
+{
+ unsigned long nr_lru;
+
+ nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
+ global_node_page_state(NR_INACTIVE_ANON) +
+ global_node_page_state(NR_ACTIVE_FILE) +
+ global_node_page_state(NR_INACTIVE_FILE) +
+ global_node_page_state(NR_ISOLATED_ANON) +
+ global_node_page_state(NR_ISOLATED_FILE) +
+ global_node_page_state(NR_UNEVICTABLE);
+
+ return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
+}
+
/**
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
@@ -201,7 +221,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* task's rss, pagetable and swap space use.
*/
points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
- atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
+ mm_pgtables_bytes(p->mm) / PAGE_SIZE;
task_unlock(p);
/*
@@ -369,15 +389,15 @@ static void select_bad_process(struct oom_control *oc)
* Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
* are not shown.
- * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
- * swapents, oom_score_adj value, and name.
+ * State information includes task's pid, uid, tgid, vm size, rss,
+ * pgtables_bytes, swapents, oom_score_adj value, and name.
*/
static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
- pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
+ pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
rcu_read_lock();
for_each_process(p) {
if (oom_unkillable_task(p, memcg, nodemask))
@@ -393,11 +413,10 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
continue;
}
- pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
+ pr_info("[%5d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
- atomic_long_read(&task->mm->nr_ptes),
- mm_nr_pmds(task->mm),
+ mm_pgtables_bytes(task->mm),
get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
@@ -407,23 +426,22 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
static void dump_header(struct oom_control *oc, struct task_struct *p)
{
- pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=",
- current->comm, oc->gfp_mask, &oc->gfp_mask);
- if (oc->nodemask)
- pr_cont("%*pbl", nodemask_pr_args(oc->nodemask));
- else
- pr_cont("(null)");
- pr_cont(", order=%d, oom_score_adj=%hd\n",
- oc->order, current->signal->oom_score_adj);
+ pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
+ current->comm, oc->gfp_mask, &oc->gfp_mask,
+ nodemask_pr_args(oc->nodemask), oc->order,
+ current->signal->oom_score_adj);
if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
pr_warn("COMPACTION is disabled!!!\n");
cpuset_print_current_mems_allowed();
dump_stack();
- if (oc->memcg)
+ if (is_memcg_oom(oc))
mem_cgroup_print_oom_info(oc->memcg, p);
- else
+ else {
show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
+ if (is_dump_unreclaim_slabs())
+ dump_unreclaimable_slab();
+ }
if (sysctl_oom_dump_tasks)
dump_tasks(oc->memcg, oc->nodemask);
}
@@ -618,9 +636,6 @@ static int oom_reaper(void *unused)
static void wake_oom_reaper(struct task_struct *tsk)
{
- if (!oom_reaper_th)
- return;
-
/* tsk is already queued? */
if (tsk == oom_reaper_list || tsk->oom_reaper_list)
return;
@@ -638,11 +653,6 @@ static void wake_oom_reaper(struct task_struct *tsk)
static int __init oom_init(void)
{
oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
- if (IS_ERR(oom_reaper_th)) {
- pr_err("Unable to start OOM reaper %ld. Continuing regardless\n",
- PTR_ERR(oom_reaper_th));
- oom_reaper_th = NULL;
- }
return 0;
}
subsys_initcall(oom_init)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0b9c5cbe8eba..8a1551154285 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -433,8 +433,11 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
else
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
- if (bg_thresh >= thresh)
+ if (unlikely(bg_thresh >= thresh)) {
+ pr_warn("vm direct limit must be set greater than background limit.\n");
bg_thresh = thresh / 2;
+ }
+
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
@@ -625,9 +628,9 @@ EXPORT_SYMBOL_GPL(wb_writeout_inc);
* On idle system, we can be called long after we scheduled because we use
* deferred timers so count with missed periods.
*/
-static void writeout_period(unsigned long t)
+static void writeout_period(struct timer_list *t)
{
- struct wb_domain *dom = (void *)t;
+ struct wb_domain *dom = from_timer(dom, t, period_timer);
int miss_periods = (jiffies - dom->period_time) /
VM_COMPLETIONS_PERIOD_LEN;
@@ -650,8 +653,7 @@ int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
spin_lock_init(&dom->lock);
- setup_deferrable_timer(&dom->period_timer, writeout_period,
- (unsigned long)dom);
+ timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
dom->dirty_limit_tstamp = jiffies;
@@ -1543,7 +1545,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
* actually dirty; with m+n sitting in the percpu
* deltas.
*/
- if (dtc->wb_thresh < 2 * wb_stat_error(wb)) {
+ if (dtc->wb_thresh < 2 * wb_stat_error()) {
wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
} else {
@@ -1559,8 +1561,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
* If we're over `background_thresh' then the writeback threads are woken to
* perform some writeout.
*/
-static void balance_dirty_pages(struct address_space *mapping,
- struct bdi_writeback *wb,
+static void balance_dirty_pages(struct bdi_writeback *wb,
unsigned long pages_dirtied)
{
struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
@@ -1802,7 +1803,7 @@ pause:
* more page. However wb_dirty has accounting errors. So use
* the larger and more IO friendly wb_stat_error.
*/
- if (sdtc->wb_dirty <= wb_stat_error(wb))
+ if (sdtc->wb_dirty <= wb_stat_error())
break;
if (fatal_signal_pending(current))
@@ -1910,7 +1911,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
preempt_enable();
if (unlikely(current->nr_dirtied >= ratelimit))
- balance_dirty_pages(mapping, wb, current->nr_dirtied);
+ balance_dirty_pages(wb, current->nr_dirtied);
wb_put(wb);
}
@@ -1972,31 +1973,31 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, buffer, length, ppos);
- return 0;
+ unsigned int old_interval = dirty_writeback_interval;
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, length, ppos);
+
+ /*
+ * Writing 0 to dirty_writeback_interval will disable periodic writeback
+ * and a different non-zero value will wakeup the writeback threads.
+ * wb_wakeup_delayed() would be more appropriate, but it's a pain to
+ * iterate over all bdis and wbs.
+ * The reason we do this is to make the change take effect immediately.
+ */
+ if (!ret && write && dirty_writeback_interval &&
+ dirty_writeback_interval != old_interval)
+ wakeup_flusher_threads(WB_REASON_PERIODIC);
+
+ return ret;
}
#ifdef CONFIG_BLOCK
void laptop_mode_timer_fn(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
- int nr_pages = global_node_page_state(NR_FILE_DIRTY) +
- global_node_page_state(NR_UNSTABLE_NFS);
- struct bdi_writeback *wb;
-
- /*
- * We want to write everything out, not just down to the dirty
- * threshold
- */
- if (!bdi_has_dirty_io(q->backing_dev_info))
- return;
- rcu_read_lock();
- list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
- if (wb_has_dirty_io(wb))
- wb_start_writeback(wb, nr_pages, true,
- WB_REASON_LAPTOP_TIMER);
- rcu_read_unlock();
+ wakeup_flusher_threads_bdi(q->backing_dev_info, WB_REASON_LAPTOP_TIMER);
}
/*
@@ -2167,7 +2168,7 @@ int write_cache_pages(struct address_space *mapping,
int range_whole = 0;
int tag;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -2194,30 +2195,14 @@ retry:
while (!done && (index <= end)) {
int i;
- nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
+ tag);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- /*
- * At this point, the page may be truncated or
- * invalidated (changing page->mapping to NULL), or
- * even swizzled back from swapper_space to tmpfs file
- * mapping. However, page->index will not change
- * because we have a reference on the page.
- */
- if (page->index > end) {
- /*
- * can't be range_cyclic (1st pass) because
- * end == -1 in that case.
- */
- done = 1;
- break;
- }
-
done_index = page->index;
lock_page(page);
@@ -2623,7 +2608,7 @@ EXPORT_SYMBOL(set_page_dirty_lock);
* page without actually doing it through the VM. Can you say "ext3 is
* horribly ugly"? Thought you could.
*/
-void cancel_dirty_page(struct page *page)
+void __cancel_dirty_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
@@ -2644,7 +2629,7 @@ void cancel_dirty_page(struct page *page)
ClearPageDirty(page);
}
}
-EXPORT_SYMBOL(cancel_dirty_page);
+EXPORT_SYMBOL(__cancel_dirty_page);
/*
* Clear a page's dirty flag, while caring for dirty memory accounting.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 77e4d3c5c57b..55ded92f9809 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -24,7 +24,6 @@
#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/module.h>
#include <linux/suspend.h>
@@ -83,6 +82,8 @@ DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif
+DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
+
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
* N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
@@ -290,28 +291,37 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+
+/*
+ * Determine how many pages need to be initialized durig early boot
+ * (non-deferred initialization).
+ * The value of first_deferred_pfn will be set later, once non-deferred pages
+ * are initialized, but for now set it ULONG_MAX.
+ */
static inline void reset_deferred_meminit(pg_data_t *pgdat)
{
- unsigned long max_initialise;
- unsigned long reserved_lowmem;
+ phys_addr_t start_addr, end_addr;
+ unsigned long max_pgcnt;
+ unsigned long reserved;
/*
* Initialise at least 2G of a node but also take into account that
* two large system hashes that can take up 1GB for 0.25TB/node.
*/
- max_initialise = max(2UL << (30 - PAGE_SHIFT),
- (pgdat->node_spanned_pages >> 8));
+ max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
+ (pgdat->node_spanned_pages >> 8));
/*
* Compensate the all the memblock reservations (e.g. crash kernel)
* from the initial estimation to make sure we will initialize enough
* memory to boot.
*/
- reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
- pgdat->node_start_pfn + max_initialise);
- max_initialise += reserved_lowmem;
+ start_addr = PFN_PHYS(pgdat->node_start_pfn);
+ end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
+ reserved = memblock_reserved_memory_within(start_addr, end_addr);
+ max_pgcnt += PHYS_PFN(reserved);
- pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
+ pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
pgdat->first_deferred_pfn = ULONG_MAX;
}
@@ -338,7 +348,7 @@ static inline bool update_defer_init(pg_data_t *pgdat,
if (zone_end < pgdat_end_pfn(pgdat))
return true;
(*nr_initialised)++;
- if ((*nr_initialised > pgdat->static_init_size) &&
+ if ((*nr_initialised > pgdat->static_init_pgcnt) &&
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
pgdat->first_deferred_pfn = pfn;
return false;
@@ -1013,7 +1023,6 @@ static __always_inline bool free_pages_prepare(struct page *page,
VM_BUG_ON_PAGE(PageTail(page), page);
trace_mm_page_free(page, order);
- kmemcheck_free_shadow(page, order);
/*
* Check tail pages before head page information is cleared to
@@ -1170,6 +1179,7 @@ static void free_one_page(struct zone *zone,
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid)
{
+ mm_zero_struct_page(page);
set_page_links(page, zone, nid, pfn);
init_page_count(page);
page_mapcount_reset(page);
@@ -1410,14 +1420,17 @@ void clear_zone_contiguous(struct zone *zone)
}
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-static void __init deferred_free_range(struct page *page,
- unsigned long pfn, int nr_pages)
+static void __init deferred_free_range(unsigned long pfn,
+ unsigned long nr_pages)
{
- int i;
+ struct page *page;
+ unsigned long i;
- if (!page)
+ if (!nr_pages)
return;
+ page = pfn_to_page(pfn);
+
/* Free a large naturally-aligned chunk if possible */
if (nr_pages == pageblock_nr_pages &&
(pfn & (pageblock_nr_pages - 1)) == 0) {
@@ -1443,19 +1456,109 @@ static inline void __init pgdat_init_report_one_done(void)
complete(&pgdat_init_all_done_comp);
}
+/*
+ * Helper for deferred_init_range, free the given range, reset the counters, and
+ * return number of pages freed.
+ */
+static inline unsigned long __init __def_free(unsigned long *nr_free,
+ unsigned long *free_base_pfn,
+ struct page **page)
+{
+ unsigned long nr = *nr_free;
+
+ deferred_free_range(*free_base_pfn, nr);
+ *free_base_pfn = 0;
+ *nr_free = 0;
+ *page = NULL;
+
+ return nr;
+}
+
+static unsigned long __init deferred_init_range(int nid, int zid,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct mminit_pfnnid_cache nid_init_state = { };
+ unsigned long nr_pgmask = pageblock_nr_pages - 1;
+ unsigned long free_base_pfn = 0;
+ unsigned long nr_pages = 0;
+ unsigned long nr_free = 0;
+ struct page *page = NULL;
+ unsigned long pfn;
+
+ /*
+ * First we check if pfn is valid on architectures where it is possible
+ * to have holes within pageblock_nr_pages. On systems where it is not
+ * possible, this function is optimized out.
+ *
+ * Then, we check if a current large page is valid by only checking the
+ * validity of the head pfn.
+ *
+ * meminit_pfn_in_nid is checked on systems where pfns can interleave
+ * within a node: a pfn is between start and end of a node, but does not
+ * belong to this memory node.
+ *
+ * Finally, we minimize pfn page lookups and scheduler checks by
+ * performing it only once every pageblock_nr_pages.
+ *
+ * We do it in two loops: first we initialize struct page, than free to
+ * buddy allocator, becuse while we are freeing pages we can access
+ * pages that are ahead (computing buddy page in __free_one_page()).
+ */
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+ if ((pfn & nr_pgmask) || pfn_valid(pfn)) {
+ if (meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
+ if (page && (pfn & nr_pgmask))
+ page++;
+ else
+ page = pfn_to_page(pfn);
+ __init_single_page(page, pfn, zid, nid);
+ cond_resched();
+ }
+ }
+ }
+
+ page = NULL;
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+ if (!pfn_valid_within(pfn)) {
+ nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
+ } else if (!(pfn & nr_pgmask) && !pfn_valid(pfn)) {
+ nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
+ } else if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
+ nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
+ } else if (page && (pfn & nr_pgmask)) {
+ page++;
+ nr_free++;
+ } else {
+ nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
+ page = pfn_to_page(pfn);
+ free_base_pfn = pfn;
+ nr_free = 1;
+ cond_resched();
+ }
+ }
+ /* Free the last block of pages to allocator */
+ nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
+
+ return nr_pages;
+}
+
/* Initialise remaining memory on a node */
static int __init deferred_init_memmap(void *data)
{
pg_data_t *pgdat = data;
int nid = pgdat->node_id;
- struct mminit_pfnnid_cache nid_init_state = { };
unsigned long start = jiffies;
unsigned long nr_pages = 0;
- unsigned long walk_start, walk_end;
- int i, zid;
+ unsigned long spfn, epfn;
+ phys_addr_t spa, epa;
+ int zid;
struct zone *zone;
unsigned long first_init_pfn = pgdat->first_deferred_pfn;
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+ u64 i;
if (first_init_pfn == ULONG_MAX) {
pgdat_init_report_one_done();
@@ -1477,83 +1580,12 @@ static int __init deferred_init_memmap(void *data)
if (first_init_pfn < zone_end_pfn(zone))
break;
}
+ first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
- for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
- unsigned long pfn, end_pfn;
- struct page *page = NULL;
- struct page *free_base_page = NULL;
- unsigned long free_base_pfn = 0;
- int nr_to_free = 0;
-
- end_pfn = min(walk_end, zone_end_pfn(zone));
- pfn = first_init_pfn;
- if (pfn < walk_start)
- pfn = walk_start;
- if (pfn < zone->zone_start_pfn)
- pfn = zone->zone_start_pfn;
-
- for (; pfn < end_pfn; pfn++) {
- if (!pfn_valid_within(pfn))
- goto free_range;
-
- /*
- * Ensure pfn_valid is checked every
- * pageblock_nr_pages for memory holes
- */
- if ((pfn & (pageblock_nr_pages - 1)) == 0) {
- if (!pfn_valid(pfn)) {
- page = NULL;
- goto free_range;
- }
- }
-
- if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
- page = NULL;
- goto free_range;
- }
-
- /* Minimise pfn page lookups and scheduler checks */
- if (page && (pfn & (pageblock_nr_pages - 1)) != 0) {
- page++;
- } else {
- nr_pages += nr_to_free;
- deferred_free_range(free_base_page,
- free_base_pfn, nr_to_free);
- free_base_page = NULL;
- free_base_pfn = nr_to_free = 0;
-
- page = pfn_to_page(pfn);
- cond_resched();
- }
-
- if (page->flags) {
- VM_BUG_ON(page_zone(page) != zone);
- goto free_range;
- }
-
- __init_single_page(page, pfn, zid, nid);
- if (!free_base_page) {
- free_base_page = page;
- free_base_pfn = pfn;
- nr_to_free = 0;
- }
- nr_to_free++;
-
- /* Where possible, batch up pages for a single free */
- continue;
-free_range:
- /* Free the current block of pages to allocator */
- nr_pages += nr_to_free;
- deferred_free_range(free_base_page, free_base_pfn,
- nr_to_free);
- free_base_page = NULL;
- free_base_pfn = nr_to_free = 0;
- }
- /* Free the last block of pages to allocator */
- nr_pages += nr_to_free;
- deferred_free_range(free_base_page, free_base_pfn, nr_to_free);
-
- first_init_pfn = max(end_pfn, first_init_pfn);
+ for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
+ spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
+ epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
+ nr_pages += deferred_init_range(nid, zid, spfn, epfn);
}
/* Sanity check that the next zone really is unpopulated */
@@ -1792,7 +1824,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
* Go through the free lists for the given migratetype and remove
* the smallest available page from the freelists
*/
-static inline
+static __always_inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype)
{
@@ -1836,7 +1868,7 @@ static int fallbacks[MIGRATE_TYPES][4] = {
};
#ifdef CONFIG_CMA
-static struct page *__rmqueue_cma_fallback(struct zone *zone,
+static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)
{
return __rmqueue_smallest(zone, order, MIGRATE_CMA);
@@ -2217,7 +2249,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* deviation from the rest of this file, to make the for loop
* condition simpler.
*/
-static inline bool
+static __always_inline bool
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
{
struct free_area *area;
@@ -2289,8 +2321,8 @@ do_steal:
* Do the hard work of removing an element from the buddy allocator.
* Call me with the zone->lock already held.
*/
-static struct page *__rmqueue(struct zone *zone, unsigned int order,
- int migratetype)
+static __always_inline struct page *
+__rmqueue(struct zone *zone, unsigned int order, int migratetype)
{
struct page *page;
@@ -2315,7 +2347,7 @@ retry:
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
- int migratetype, bool cold)
+ int migratetype)
{
int i, alloced = 0;
@@ -2329,19 +2361,16 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
continue;
/*
- * Split buddy pages returned by expand() are received here
- * in physical page order. The page is added to the callers and
- * list and the list head then moves forward. From the callers
- * perspective, the linked list is ordered by page number in
- * some conditions. This is useful for IO devices that can
- * merge IO requests if the physical pages are ordered
- * properly.
+ * Split buddy pages returned by expand() are received here in
+ * physical page order. The page is added to the tail of
+ * caller's list. From the callers perspective, the linked list
+ * is ordered by page number under some conditions. This is
+ * useful for IO devices that can forward direction from the
+ * head, thus also in the physical page order. This is useful
+ * for IO devices that can merge IO requests if the physical
+ * pages are ordered properly.
*/
- if (likely(!cold))
- list_add(&page->lru, list);
- else
- list_add_tail(&page->lru, list);
- list = &page->lru;
+ list_add_tail(&page->lru, list);
alloced++;
if (is_migrate_cma(get_pcppage_migratetype(page)))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
@@ -2590,24 +2619,25 @@ void mark_free_pages(struct zone *zone)
}
#endif /* CONFIG_PM */
-/*
- * Free a 0-order page
- * cold == true ? free a cold page : free a hot page
- */
-void free_hot_cold_page(struct page *page, bool cold)
+static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
{
- struct zone *zone = page_zone(page);
- struct per_cpu_pages *pcp;
- unsigned long flags;
- unsigned long pfn = page_to_pfn(page);
int migratetype;
if (!free_pcp_prepare(page))
- return;
+ return false;
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
- local_irq_save(flags);
+ return true;
+}
+
+static void free_unref_page_commit(struct page *page, unsigned long pfn)
+{
+ struct zone *zone = page_zone(page);
+ struct per_cpu_pages *pcp;
+ int migratetype;
+
+ migratetype = get_pcppage_migratetype(page);
__count_vm_event(PGFREE);
/*
@@ -2620,38 +2650,62 @@ void free_hot_cold_page(struct page *page, bool cold)
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, pfn, 0, migratetype);
- goto out;
+ return;
}
migratetype = MIGRATE_MOVABLE;
}
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- if (!cold)
- list_add(&page->lru, &pcp->lists[migratetype]);
- else
- list_add_tail(&page->lru, &pcp->lists[migratetype]);
+ list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
free_pcppages_bulk(zone, batch, pcp);
pcp->count -= batch;
}
+}
-out:
+/*
+ * Free a 0-order page
+ */
+void free_unref_page(struct page *page)
+{
+ unsigned long flags;
+ unsigned long pfn = page_to_pfn(page);
+
+ if (!free_unref_page_prepare(page, pfn))
+ return;
+
+ local_irq_save(flags);
+ free_unref_page_commit(page, pfn);
local_irq_restore(flags);
}
/*
* Free a list of 0-order pages
*/
-void free_hot_cold_page_list(struct list_head *list, bool cold)
+void free_unref_page_list(struct list_head *list)
{
struct page *page, *next;
+ unsigned long flags, pfn;
+
+ /* Prepare pages for freeing */
+ list_for_each_entry_safe(page, next, list, lru) {
+ pfn = page_to_pfn(page);
+ if (!free_unref_page_prepare(page, pfn))
+ list_del(&page->lru);
+ set_page_private(page, pfn);
+ }
+ local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
- trace_mm_page_free_batched(page, cold);
- free_hot_cold_page(page, cold);
+ unsigned long pfn = page_private(page);
+
+ set_page_private(page, 0);
+ trace_mm_page_free_batched(page);
+ free_unref_page_commit(page, pfn);
}
+ local_irq_restore(flags);
}
/*
@@ -2669,15 +2723,6 @@ void split_page(struct page *page, unsigned int order)
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
-#ifdef CONFIG_KMEMCHECK
- /*
- * Split shadow pages too, because free(page[0]) would
- * otherwise free the whole shadow.
- */
- if (kmemcheck_page_is_tracked(page))
- split_page(virt_to_page(page[0].shadow), order);
-#endif
-
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
split_page_owner(page, order);
@@ -2743,6 +2788,10 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
#ifdef CONFIG_NUMA
enum numa_stat_item local_stat = NUMA_LOCAL;
+ /* skip numa counters update if numa stats is disabled */
+ if (!static_branch_likely(&vm_numa_stat_key))
+ return;
+
if (z->node != numa_node_id())
local_stat = NUMA_OTHER;
@@ -2758,7 +2807,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
- bool cold, struct per_cpu_pages *pcp,
+ struct per_cpu_pages *pcp,
struct list_head *list)
{
struct page *page;
@@ -2767,16 +2816,12 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
- migratetype, cold);
+ migratetype);
if (unlikely(list_empty(list)))
return NULL;
}
- if (cold)
- page = list_last_entry(list, struct page, lru);
- else
- page = list_first_entry(list, struct page, lru);
-
+ page = list_first_entry(list, struct page, lru);
list_del(&page->lru);
pcp->count--;
} while (check_new_pcp(page));
@@ -2791,14 +2836,13 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
{
struct per_cpu_pages *pcp;
struct list_head *list;
- bool cold = ((gfp_flags & __GFP_COLD) != 0);
struct page *page;
unsigned long flags;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
- page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
+ page = __rmqueue_pcplist(zone, migratetype, pcp, list);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -3006,9 +3050,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
if (!area->nr_free)
continue;
- if (alloc_harder)
- return true;
-
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
if (!list_empty(&area->free_list[mt]))
return true;
@@ -3020,6 +3061,9 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
return true;
}
#endif
+ if (alloc_harder &&
+ !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
+ return true;
}
return false;
}
@@ -3235,20 +3279,14 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
return;
- pr_warn("%s: ", current->comm);
-
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- pr_cont("%pV", &vaf);
+ pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl\n",
+ current->comm, &vaf, gfp_mask, &gfp_mask,
+ nodemask_pr_args(nodemask));
va_end(args);
- pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask);
- if (nodemask)
- pr_cont("%*pbl\n", nodemask_pr_args(nodemask));
- else
- pr_cont("(null)\n");
-
cpuset_print_current_mems_allowed();
dump_stack();
@@ -3868,8 +3906,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
enum compact_result compact_result;
int compaction_retries;
int no_progress_loops;
- unsigned long alloc_start = jiffies;
- unsigned int stall_timeout = 10 * HZ;
unsigned int cpuset_mems_cookie;
int reserve_flags;
@@ -4001,14 +4037,6 @@ retry:
if (!can_direct_reclaim)
goto nopage;
- /* Make sure we know about allocations which stall for too long */
- if (time_after(jiffies, alloc_start + stall_timeout)) {
- warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
- "page allocation stalls for %ums, order:%u",
- jiffies_to_msecs(jiffies-alloc_start), order);
- stall_timeout += 10 * HZ;
- }
-
/* Avoid recursion of direct reclaim */
if (current->flags & PF_MEMALLOC)
goto nopage;
@@ -4223,9 +4251,6 @@ out:
page = NULL;
}
- if (kmemcheck_enabled && page)
- kmemcheck_pagealloc_alloc(page, order, gfp_mask);
-
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
return page;
@@ -4262,7 +4287,7 @@ void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
if (order == 0)
- free_hot_cold_page(page, false);
+ free_unref_page(page);
else
__free_pages_ok(page, order);
}
@@ -4320,7 +4345,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
unsigned int order = compound_order(page);
if (order == 0)
- free_hot_cold_page(page, false);
+ free_unref_page(page);
else
__free_pages_ok(page, order);
}
@@ -6126,6 +6151,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
}
}
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
{
unsigned long __maybe_unused start = 0;
@@ -6135,7 +6161,6 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
if (!pgdat->node_spanned_pages)
return;
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
offset = pgdat->node_start_pfn - start;
/* ia64 gets its own node_mem_map, before this, without bootmem */
@@ -6157,6 +6182,9 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
pgdat->node_id);
pgdat->node_mem_map = map + offset;
}
+ pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
+ __func__, pgdat->node_id, (unsigned long)pgdat,
+ (unsigned long)pgdat->node_mem_map);
#ifndef CONFIG_NEED_MULTIPLE_NODES
/*
* With no DISCONTIG, the global mem_map is just set as node 0's
@@ -6169,8 +6197,10 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
}
#endif
-#endif /* CONFIG_FLAT_NODE_MEM_MAP */
}
+#else
+static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
+#endif /* CONFIG_FLAT_NODE_MEM_MAP */
void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
unsigned long node_start_pfn, unsigned long *zholes_size)
@@ -6197,16 +6227,49 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
zones_size, zholes_size);
alloc_node_mem_map(pgdat);
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
- printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
- nid, (unsigned long)pgdat,
- (unsigned long)pgdat->node_mem_map);
-#endif
reset_deferred_meminit(pgdat);
free_area_init_core(pgdat);
}
+#ifdef CONFIG_HAVE_MEMBLOCK
+/*
+ * Only struct pages that are backed by physical memory are zeroed and
+ * initialized by going through __init_single_page(). But, there are some
+ * struct pages which are reserved in memblock allocator and their fields
+ * may be accessed (for example page_to_pfn() on some configuration accesses
+ * flags). We must explicitly zero those struct pages.
+ */
+void __paginginit zero_resv_unavail(void)
+{
+ phys_addr_t start, end;
+ unsigned long pfn;
+ u64 i, pgcnt;
+
+ /*
+ * Loop through ranges that are reserved, but do not have reported
+ * physical memory backing.
+ */
+ pgcnt = 0;
+ for_each_resv_unavail_range(i, &start, &end) {
+ for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
+ mm_zero_struct_page(pfn_to_page(pfn));
+ pgcnt++;
+ }
+ }
+
+ /*
+ * Struct pages that do not have backing memory. This could be because
+ * firmware is using some of this memory, or for some other reasons.
+ * Once memblock is changed so such behaviour is not allowed: i.e.
+ * list of "reserved" memory must be a subset of list of "memory", then
+ * this code can be removed.
+ */
+ if (pgcnt)
+ pr_info("Reserved but unavailable: %lld pages", pgcnt);
+}
+#endif /* CONFIG_HAVE_MEMBLOCK */
+
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
#if MAX_NUMNODES > 1
@@ -6630,6 +6693,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
node_set_state(nid, N_MEMORY);
check_for_memory(pgdat, nid);
}
+ zero_resv_unavail();
}
static int __init cmdline_parse_core(char *p, unsigned long *core)
@@ -6793,6 +6857,7 @@ void __init free_area_init(unsigned long *zones_size)
{
free_area_init_node(0, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
+ zero_resv_unavail();
}
static int page_alloc_cpu_dead(unsigned int cpu)
@@ -7305,18 +7370,17 @@ void *__init alloc_large_system_hash(const char *tablename,
log2qty = ilog2(numentries);
- /*
- * memblock allocator returns zeroed memory already, so HASH_ZERO is
- * currently not used when HASH_EARLY is specified.
- */
gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
do {
size = bucketsize << log2qty;
- if (flags & HASH_EARLY)
- table = memblock_virt_alloc_nopanic(size, 0);
- else if (hashdist)
+ if (flags & HASH_EARLY) {
+ if (flags & HASH_ZERO)
+ table = memblock_virt_alloc_nopanic(size, 0);
+ else
+ table = memblock_virt_alloc_raw(size, 0);
+ } else if (hashdist) {
table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
- else {
+ } else {
/*
* If bucketsize is not a power-of-two, we may free
* some pages at the end of hash table which
@@ -7353,10 +7417,10 @@ void *__init alloc_large_system_hash(const char *tablename,
* race condition. So you can't expect this function should be exact.
*/
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
+ int migratetype,
bool skip_hwpoisoned_pages)
{
unsigned long pfn, iter, found;
- int mt;
/*
* For avoiding noise data, lru_add_drain_all() should be called
@@ -7364,8 +7428,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
*/
if (zone_idx(zone) == ZONE_MOVABLE)
return false;
- mt = get_pageblock_migratetype(page);
- if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
+
+ /*
+ * CMA allocations (alloc_contig_range) really need to mark isolate
+ * CMA pageblocks even when they are not movable in fact so consider
+ * them movable here.
+ */
+ if (is_migrate_cma(migratetype) &&
+ is_migrate_cma(get_pageblock_migratetype(page)))
return false;
pfn = page_to_pfn(page);
@@ -7377,6 +7447,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
page = pfn_to_page(check);
+ if (PageReserved(page))
+ return true;
+
/*
* Hugepages are not in LRU lists, but they're movable.
* We need not scan over tail pages bacause we don't
@@ -7450,7 +7523,7 @@ bool is_pageblock_removable_nolock(struct page *page)
if (!zone_spans_pfn(zone, pfn))
return false;
- return !has_unmovable_pages(zone, page, 0, true);
+ return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
}
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 4f0367d472c4..2c16216c29b6 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -125,7 +125,6 @@ struct page_ext *lookup_page_ext(struct page *page)
struct page_ext *base;
base = NODE_DATA(page_to_nid(page))->node_page_ext;
-#if defined(CONFIG_DEBUG_VM)
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
@@ -134,7 +133,6 @@ struct page_ext *lookup_page_ext(struct page *page)
*/
if (unlikely(!base))
return NULL;
-#endif
index = pfn - round_down(node_start_pfn(page_to_nid(page)),
MAX_ORDER_NR_PAGES);
return get_entry(base, index);
@@ -199,7 +197,6 @@ struct page_ext *lookup_page_ext(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
-#if defined(CONFIG_DEBUG_VM)
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
@@ -208,7 +205,6 @@ struct page_ext *lookup_page_ext(struct page *page)
*/
if (!section->page_ext)
return NULL;
-#endif
return get_entry(section->page_ext, pfn);
}
diff --git a/mm/page_io.c b/mm/page_io.c
index 5d882de3fbfd..e93f1a4cacd7 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -347,7 +347,7 @@ out:
return ret;
}
-int swap_readpage(struct page *page, bool do_poll)
+int swap_readpage(struct page *page, bool synchronous)
{
struct bio *bio;
int ret = 0;
@@ -355,7 +355,7 @@ int swap_readpage(struct page *page, bool do_poll)
blk_qc_t qc;
struct gendisk *disk;
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
+ VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageUptodate(page), page);
if (frontswap_load(page) == 0) {
@@ -403,12 +403,12 @@ int swap_readpage(struct page *page, bool do_poll)
count_vm_event(PSWPIN);
bio_get(bio);
qc = submit_bio(bio);
- while (do_poll) {
+ while (synchronous) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!READ_ONCE(bio->bi_private))
break;
- if (!blk_mq_poll(disk->queue, qc))
+ if (!blk_poll(disk->queue, qc))
break;
}
__set_current_state(TASK_RUNNING);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 44f213935bf6..165ed8117bd1 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -15,7 +15,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/page_isolation.h>
-static int set_migratetype_isolate(struct page *page,
+static int set_migratetype_isolate(struct page *page, int migratetype,
bool skip_hwpoisoned_pages)
{
struct zone *zone;
@@ -52,7 +52,7 @@ static int set_migratetype_isolate(struct page *page,
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
- if (!has_unmovable_pages(zone, page, arg.pages_found,
+ if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
skip_hwpoisoned_pages))
ret = 0;
@@ -64,14 +64,14 @@ static int set_migratetype_isolate(struct page *page,
out:
if (!ret) {
unsigned long nr_pages;
- int migratetype = get_pageblock_migratetype(page);
+ int mt = get_pageblock_migratetype(page);
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
zone->nr_isolate_pageblock++;
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
NULL);
- __mod_zone_freepage_state(zone, -nr_pages, migratetype);
+ __mod_zone_freepage_state(zone, -nr_pages, mt);
}
spin_unlock_irqrestore(&zone->lock, flags);
@@ -183,7 +183,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
if (page &&
- set_migratetype_isolate(page, skip_hwpoisoned_pages)) {
+ set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
undo_pfn = pfn;
goto undo;
}
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 4f44b95b9d1e..8592543a0f15 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -20,9 +20,9 @@
#define PAGE_OWNER_STACK_DEPTH (16)
struct page_owner {
- unsigned int order;
+ unsigned short order;
+ short last_migrate_reason;
gfp_t gfp_mask;
- int last_migrate_reason;
depot_stack_handle_t handle;
};
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 8bd4afa83cb8..23a3e415ac2c 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -188,8 +188,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
- if (pte && walk->hugetlb_entry)
+
+ if (pte)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
+ else if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, walk);
+
if (err)
break;
} while (addr = next, addr != end);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 15dab691ea70..9158e5a81391 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -81,7 +81,7 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
struct page **pages, int page_start, int page_end)
{
- const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
+ const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM;
unsigned int cpu, tcpu;
int i;
diff --git a/mm/percpu.c b/mm/percpu.c
index a0e0c82c1e4c..79e3549cab0f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1856,7 +1856,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
- ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
+ ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
if (!ptr)
return NULL;
ai = ptr;
@@ -2719,6 +2719,7 @@ void __init setup_per_cpu_areas(void)
if (pcpu_setup_first_chunk(ai, fc) < 0)
panic("Failed to initialize percpu areas.");
+ pcpu_free_alloc_info(ai);
}
#endif /* CONFIG_SMP */
diff --git a/mm/rmap.c b/mm/rmap.c
index b874c4761e84..47db27f8049e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -899,7 +899,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
while (page_vma_mapped_walk(&pvmw)) {
- unsigned long cstart, cend;
+ unsigned long cstart;
int ret = 0;
cstart = address = pvmw.address;
@@ -915,7 +915,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
entry = pte_wrprotect(entry);
entry = pte_mkclean(entry);
set_pte_at(vma->vm_mm, address, pte, entry);
- cend = cstart + PAGE_SIZE;
ret = 1;
} else {
#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -931,7 +930,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
entry = pmd_mkclean(entry);
set_pmd_at(vma->vm_mm, address, pmd, entry);
cstart &= PMD_MASK;
- cend = cstart + PMD_SIZE;
ret = 1;
#else
/* unexpected pmd-mapped page? */
@@ -939,10 +937,15 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
#endif
}
- if (ret) {
- mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
+ /*
+ * No need to call mmu_notifier_invalidate_range() as we are
+ * downgrading page table protection not changing it to point
+ * to a new page.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
+ if (ret)
(*cleaned)++;
- }
}
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
@@ -1318,7 +1321,7 @@ void page_remove_rmap(struct page *page, bool compound)
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
+ * before us: so leave the reset to free_unref_page,
* and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
@@ -1426,6 +1429,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+ /*
+ * No need to invalidate here it will synchronize on
+ * against the special swap migration pte.
+ */
goto discard;
}
@@ -1483,6 +1490,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* will take care of the rest.
*/
dec_mm_counter(mm, mm_counter(page));
+ /* We have to invalidate as we cleared the pte */
+ mmu_notifier_invalidate_range(mm, address,
+ address + PAGE_SIZE);
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
swp_entry_t entry;
@@ -1498,6 +1508,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
+ /*
+ * No need to invalidate here it will synchronize on
+ * against the special swap migration pte.
+ */
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(subpage) };
pte_t swp_pte;
@@ -1509,6 +1523,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
WARN_ON_ONCE(1);
ret = false;
/* We have to invalidate as we cleared the pte */
+ mmu_notifier_invalidate_range(mm, address,
+ address + PAGE_SIZE);
page_vma_mapped_walk_done(&pvmw);
break;
}
@@ -1516,6 +1532,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/* MADV_FREE page check */
if (!PageSwapBacked(page)) {
if (!PageDirty(page)) {
+ /* Invalidate as we cleared the pte */
+ mmu_notifier_invalidate_range(mm,
+ address, address + PAGE_SIZE);
dec_mm_counter(mm, MM_ANONPAGES);
goto discard;
}
@@ -1549,13 +1568,39 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
- } else
+ /* Invalidate as we cleared the pte */
+ mmu_notifier_invalidate_range(mm, address,
+ address + PAGE_SIZE);
+ } else {
+ /*
+ * We should not need to notify here as we reach this
+ * case only from freeze_page() itself only call from
+ * split_huge_page_to_list() so everything below must
+ * be true:
+ * - page is not anonymous
+ * - page is locked
+ *
+ * So as it is a locked file back page thus it can not
+ * be remove from the page cache and replace by a new
+ * page before mmu_notifier_invalidate_range_end so no
+ * concurrent thread might update its page table to
+ * point at new page while a device still is using this
+ * page.
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
dec_mm_counter(mm, mm_counter_file(page));
+ }
discard:
+ /*
+ * No need to call mmu_notifier_invalidate_range() it has be
+ * done above for all cases requiring it to happen under page
+ * table lock before mmu_notifier_invalidate_range_end()
+ *
+ * See Documentation/vm/mmu_notifier.txt
+ */
page_remove_rmap(subpage, PageHuge(page));
put_page(page);
- mmu_notifier_invalidate_range(mm, address,
- address + PAGE_SIZE);
}
mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
diff --git a/mm/shmem.c b/mm/shmem.c
index 07a1d22807be..1f97d77551c3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -338,7 +338,7 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
if (item != expected)
return -ENOENT;
__radix_tree_replace(&mapping->page_tree, node, pslot,
- replacement, NULL, NULL);
+ replacement, NULL);
return 0;
}
@@ -747,7 +747,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index = 0;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
/*
* Minor point, but we might as well stop if someone else SHM_LOCKs it.
*/
@@ -790,7 +790,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
if (lend == -1)
end = -1; /* unsigned, so actually very big */
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
index = start;
while (index < end) {
pvec.nr = find_get_entries(mapping, index,
@@ -2528,7 +2528,7 @@ static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
bool done = false;
int i;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
pvec.nr = 1; /* start small: we may be there already */
while (!done) {
pvec.nr = find_get_entries(mapping, index,
@@ -3862,12 +3862,11 @@ static void shmem_init_inode(void *foo)
inode_init_once(&info->vfs_inode);
}
-static int shmem_init_inodecache(void)
+static void shmem_init_inodecache(void)
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
- return 0;
}
static void shmem_destroy_inodecache(void)
@@ -3991,9 +3990,7 @@ int __init shmem_init(void)
if (shmem_inode_cachep)
return 0;
- error = shmem_init_inodecache();
- if (error)
- goto out3;
+ shmem_init_inodecache();
error = register_filesystem(&shmem_fs_type);
if (error) {
@@ -4020,7 +4017,6 @@ out1:
unregister_filesystem(&shmem_fs_type);
out2:
shmem_destroy_inodecache();
-out3:
shm_mnt = ERR_PTR(error);
return error;
}
@@ -4102,6 +4098,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma)
if (i_size >= HPAGE_PMD_SIZE &&
i_size >> PAGE_SHIFT >= off)
return true;
+ /* fall through */
case SHMEM_HUGE_ADVISE:
/* TODO: implement fadvise() hints */
return (vma->vm_flags & VM_HUGEPAGE);
@@ -4183,7 +4180,7 @@ static const struct dentry_operations anon_ops = {
.d_dname = simple_dname
};
-static struct file *__shmem_file_setup(const char *name, loff_t size,
+static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
unsigned long flags, unsigned int i_flags)
{
struct file *res;
@@ -4192,8 +4189,8 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
struct super_block *sb;
struct qstr this;
- if (IS_ERR(shm_mnt))
- return ERR_CAST(shm_mnt);
+ if (IS_ERR(mnt))
+ return ERR_CAST(mnt);
if (size < 0 || size > MAX_LFS_FILESIZE)
return ERR_PTR(-EINVAL);
@@ -4205,8 +4202,8 @@ static struct file *__shmem_file_setup(const char *name, loff_t size,
this.name = name;
this.len = strlen(name);
this.hash = 0; /* will go */
- sb = shm_mnt->mnt_sb;
- path.mnt = mntget(shm_mnt);
+ sb = mnt->mnt_sb;
+ path.mnt = mntget(mnt);
path.dentry = d_alloc_pseudo(sb, &this);
if (!path.dentry)
goto put_memory;
@@ -4251,7 +4248,7 @@ put_path:
*/
struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
{
- return __shmem_file_setup(name, size, flags, S_PRIVATE);
+ return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
}
/**
@@ -4262,11 +4259,25 @@ struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned lon
*/
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
- return __shmem_file_setup(name, size, flags, 0);
+ return __shmem_file_setup(shm_mnt, name, size, flags, 0);
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
+ * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
+ * @mnt: the tmpfs mount where the file will be created
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
+ loff_t size, unsigned long flags)
+{
+ return __shmem_file_setup(mnt, name, size, flags, 0);
+}
+EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
+
+/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
*/
@@ -4281,7 +4292,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
* accessible to the user through its mapping, use S_PRIVATE flag to
* bypass file security, in the same way as shmem_kernel_file_setup().
*/
- file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
+ file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
if (IS_ERR(file))
return PTR_ERR(file);
diff --git a/mm/slab.c b/mm/slab.c
index b7095884fd93..183e996dde5f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -114,7 +114,6 @@
#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
-#include <linux/kmemcheck.h>
#include <linux/memory.h>
#include <linux/prefetch.h>
#include <linux/sched/task_stack.h>
@@ -252,8 +251,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
} while (0)
-#define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
-#define CFLGS_OFF_SLAB (0x80000000UL)
+#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U)
+#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U)
#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
@@ -441,7 +440,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
* Calculate the number of objects and left-over bytes for a given buffer size.
*/
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
- unsigned long flags, size_t *left_over)
+ slab_flags_t flags, size_t *left_over)
{
unsigned int num;
size_t slab_size = PAGE_SIZE << gfporder;
@@ -1410,10 +1409,8 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
int nr_pages;
flags |= cachep->allocflags;
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- flags |= __GFP_RECLAIMABLE;
- page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
+ page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page) {
slab_out_of_memory(cachep, flags, nodeid);
return NULL;
@@ -1435,15 +1432,6 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
- if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
- kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
-
- if (cachep->ctor)
- kmemcheck_mark_uninitialized_pages(page, nr_pages);
- else
- kmemcheck_mark_unallocated_pages(page, nr_pages);
- }
-
return page;
}
@@ -1455,8 +1443,6 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
int order = cachep->gfporder;
unsigned long nr_freed = (1 << order);
- kmemcheck_free_shadow(page, order);
-
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
else
@@ -1761,7 +1747,7 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
* towards high-order requests, this should be changed.
*/
static size_t calculate_slab_order(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left_over = 0;
int gfporder;
@@ -1888,8 +1874,8 @@ static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
return 0;
}
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
@@ -1897,7 +1883,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *cachep;
@@ -1915,7 +1901,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
}
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
@@ -1938,7 +1924,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
}
static bool set_off_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
@@ -1972,7 +1958,7 @@ static bool set_off_slab_cache(struct kmem_cache *cachep,
}
static bool set_on_slab_cache(struct kmem_cache *cachep,
- size_t size, unsigned long flags)
+ size_t size, slab_flags_t flags)
{
size_t left;
@@ -2008,8 +1994,7 @@ static bool set_on_slab_cache(struct kmem_cache *cachep,
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-int
-__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
{
size_t ralign = BYTES_PER_WORD;
gfp_t gfp;
@@ -2144,6 +2129,8 @@ done:
cachep->allocflags = __GFP_COMP;
if (flags & SLAB_CACHE_DMA)
cachep->allocflags |= GFP_DMA;
+ if (flags & SLAB_RECLAIM_ACCOUNT)
+ cachep->allocflags |= __GFP_RECLAIMABLE;
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
@@ -3516,8 +3503,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
- kmemcheck_slab_free(cachep, objp, cachep->object_size);
-
/*
* Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which
@@ -4097,7 +4082,6 @@ out:
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
}
-#ifdef CONFIG_SLABINFO
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{
unsigned long active_objs, num_objs, active_slabs;
@@ -4405,7 +4389,6 @@ static int __init slab_proc_init(void)
return 0;
}
module_init(slab_proc_init);
-#endif
#ifdef CONFIG_HARDENED_USERCOPY
/*
diff --git a/mm/slab.h b/mm/slab.h
index 028cdc7df67e..ad657ffa44e5 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -21,7 +21,7 @@ struct kmem_cache {
unsigned int object_size;/* The original size of the object */
unsigned int size; /* The aligned/padded/added on size */
unsigned int align; /* Alignment as calculated */
- unsigned long flags; /* Active flags on the slab */
+ slab_flags_t flags; /* Active flags on the slab */
const char *name; /* Slab name for sysfs */
int refcount; /* Use counter */
void (*ctor)(void *); /* Called on object slot creation */
@@ -40,7 +40,6 @@ struct kmem_cache {
#include <linux/memcontrol.h>
#include <linux/fault-inject.h>
-#include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/random.h>
@@ -79,13 +78,13 @@ extern const struct kmalloc_info_struct {
unsigned long size;
} kmalloc_info[];
-unsigned long calculate_alignment(unsigned long flags,
+unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size);
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
-void create_kmalloc_caches(unsigned long);
+void create_kmalloc_caches(slab_flags_t);
/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
@@ -93,32 +92,32 @@ struct kmem_cache *kmalloc_slab(size_t, gfp_t);
/* Functions provided by the slab allocators */
-extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
+int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
- unsigned long flags);
+ slab_flags_t flags);
extern void create_boot_cache(struct kmem_cache *, const char *name,
- size_t size, unsigned long flags);
+ size_t size, slab_flags_t flags);
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(size_t size, size_t align,
- unsigned long flags, const char *name, void (*ctor)(void *));
+ slab_flags_t flags, const char *name, void (*ctor)(void *));
#ifndef CONFIG_SLOB
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *));
+ slab_flags_t flags, void (*ctor)(void *));
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *));
#else
static inline struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{ return NULL; }
-static inline unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
@@ -142,10 +141,10 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
#if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
- SLAB_NOTRACK | SLAB_ACCOUNT)
+ SLAB_ACCOUNT)
#elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
- SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
+ SLAB_TEMPORARY | SLAB_ACCOUNT)
#else
#define SLAB_CACHE_FLAGS (0)
#endif
@@ -164,7 +163,6 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
SLAB_NOLEAKTRACE | \
SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | \
- SLAB_NOTRACK | \
SLAB_ACCOUNT)
int __kmem_cache_shutdown(struct kmem_cache *);
@@ -259,7 +257,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
* memcg_caches issues a write barrier to match this (see
* memcg_create_kmem_cache()).
*/
- cachep = lockless_dereference(arr->entries[idx]);
+ cachep = READ_ONCE(arr->entries[idx]);
rcu_read_unlock();
return cachep;
@@ -439,7 +437,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
for (i = 0; i < size; i++) {
void *object = p[i];
- kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1,
s->flags, flags);
kasan_slab_alloc(s, object, flags);
@@ -506,6 +503,14 @@ void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
void memcg_slab_stop(struct seq_file *m, void *p);
int memcg_slab_show(struct seq_file *m, void *p);
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
+void dump_unreclaimable_slab(void);
+#else
+static inline void dump_unreclaimable_slab(void)
+{
+}
+#endif
+
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
#ifdef CONFIG_SLAB_FREELIST_RANDOM
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0d7fe71ff5e4..c8cb36774ba1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
SLAB_FAILSLAB | SLAB_KASAN)
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
- SLAB_NOTRACK | SLAB_ACCOUNT)
+ SLAB_ACCOUNT)
/*
* Merge control. If this is set then no merging of slab caches will occur.
@@ -291,7 +291,7 @@ int slab_unmergeable(struct kmem_cache *s)
}
struct kmem_cache *find_mergeable(size_t size, size_t align,
- unsigned long flags, const char *name, void (*ctor)(void *))
+ slab_flags_t flags, const char *name, void (*ctor)(void *))
{
struct kmem_cache *s;
@@ -341,7 +341,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
*/
-unsigned long calculate_alignment(unsigned long flags,
+unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size)
{
/*
@@ -366,7 +366,7 @@ unsigned long calculate_alignment(unsigned long flags,
static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *),
+ slab_flags_t flags, void (*ctor)(void *),
struct mem_cgroup *memcg, struct kmem_cache *root_cache)
{
struct kmem_cache *s;
@@ -431,7 +431,7 @@ out_free_cache:
*/
struct kmem_cache *
kmem_cache_create(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s = NULL;
const char *cache_name;
@@ -879,7 +879,7 @@ bool slab_is_available(void)
#ifndef CONFIG_SLOB
/* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
- unsigned long flags)
+ slab_flags_t flags)
{
int err;
@@ -899,7 +899,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
}
struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
- unsigned long flags)
+ slab_flags_t flags)
{
struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
@@ -1057,7 +1057,7 @@ void __init setup_kmalloc_cache_index_table(void)
}
}
-static void __init new_kmalloc_cache(int idx, unsigned long flags)
+static void __init new_kmalloc_cache(int idx, slab_flags_t flags)
{
kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
kmalloc_info[idx].size, flags);
@@ -1068,7 +1068,7 @@ static void __init new_kmalloc_cache(int idx, unsigned long flags)
* may already have been created because they were needed to
* enable allocations for slab creation.
*/
-void __init create_kmalloc_caches(unsigned long flags)
+void __init create_kmalloc_caches(slab_flags_t flags)
{
int i;
@@ -1184,8 +1184,7 @@ void cache_random_seq_destroy(struct kmem_cache *cachep)
}
#endif /* CONFIG_SLAB_FREELIST_RANDOM */
-#ifdef CONFIG_SLABINFO
-
+#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
#ifdef CONFIG_SLAB
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
#else
@@ -1281,7 +1280,41 @@ static int slab_show(struct seq_file *m, void *p)
return 0;
}
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+void dump_unreclaimable_slab(void)
+{
+ struct kmem_cache *s, *s2;
+ struct slabinfo sinfo;
+
+ /*
+ * Here acquiring slab_mutex is risky since we don't prefer to get
+ * sleep in oom path. But, without mutex hold, it may introduce a
+ * risk of crash.
+ * Use mutex_trylock to protect the list traverse, dump nothing
+ * without acquiring the mutex.
+ */
+ if (!mutex_trylock(&slab_mutex)) {
+ pr_warn("excessive unreclaimable slab but cannot dump stats\n");
+ return;
+ }
+
+ pr_info("Unreclaimable slab info:\n");
+ pr_info("Name Used Total\n");
+
+ list_for_each_entry_safe(s, s2, &slab_caches, list) {
+ if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
+ continue;
+
+ get_slabinfo(s, &sinfo);
+
+ if (sinfo.num_objs > 0)
+ pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
+ (sinfo.active_objs * s->size) / 1024,
+ (sinfo.num_objs * s->size) / 1024);
+ }
+ mutex_unlock(&slab_mutex);
+}
+
+#if defined(CONFIG_MEMCG)
void *memcg_slab_start(struct seq_file *m, loff_t *pos)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
@@ -1355,7 +1388,7 @@ static int __init slab_proc_init(void)
return 0;
}
module_init(slab_proc_init);
-#endif /* CONFIG_SLABINFO */
+#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
static __always_inline void *__do_krealloc(const void *p, size_t new_size,
gfp_t flags)
diff --git a/mm/slob.c b/mm/slob.c
index 10249160b693..623e8a5c46ce 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -330,7 +330,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags);
}
- if (unlikely((gfp & __GFP_ZERO) && b))
+ if (unlikely(gfp & __GFP_ZERO))
memset(b, 0, size);
return b;
}
@@ -524,7 +524,7 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);
-int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
{
if (flags & SLAB_TYPESAFE_BY_RCU) {
/* leave room for rcu footer at the end of object */
diff --git a/mm/slub.c b/mm/slub.c
index 1efbb8123037..cfd56e5a35fb 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -22,7 +22,6 @@
#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
-#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
@@ -193,8 +192,10 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
/* Internal SLUB flags */
-#define __OBJECT_POISON 0x80000000UL /* Poison object */
-#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
+/* Poison object */
+#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U)
+/* Use cmpxchg_double */
+#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
/*
* Tracking user of a slab.
@@ -485,9 +486,9 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
* Debug settings:
*/
#if defined(CONFIG_SLUB_DEBUG_ON)
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
+static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
#else
-static int slub_debug;
+static slab_flags_t slub_debug;
#endif
static char *slub_debug_slabs;
@@ -1289,8 +1290,8 @@ out:
__setup("slub_debug", setup_slub_debug);
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
/*
@@ -1322,8 +1323,8 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
-unsigned long kmem_cache_flags(unsigned long object_size,
- unsigned long flags, const char *name,
+slab_flags_t kmem_cache_flags(unsigned long object_size,
+ slab_flags_t flags, const char *name,
void (*ctor)(void *))
{
return flags;
@@ -1370,12 +1371,11 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x)
* So in order to make the debug calls that expect irqs to be
* disabled we need to disable interrupts temporarily.
*/
-#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
+#ifdef CONFIG_LOCKDEP
{
unsigned long flags;
local_irq_save(flags);
- kmemcheck_slab_free(s, x, s->object_size);
debug_check_no_locks_freed(x, s->object_size);
local_irq_restore(flags);
}
@@ -1399,8 +1399,7 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
* Compiler cannot detect this function can be removed if slab_free_hook()
* evaluates to nothing. Thus, catch all relevant config debug options here.
*/
-#if defined(CONFIG_KMEMCHECK) || \
- defined(CONFIG_LOCKDEP) || \
+#if defined(CONFIG_LOCKDEP) || \
defined(CONFIG_DEBUG_KMEMLEAK) || \
defined(CONFIG_DEBUG_OBJECTS_FREE) || \
defined(CONFIG_KASAN)
@@ -1436,8 +1435,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
struct page *page;
int order = oo_order(oo);
- flags |= __GFP_NOTRACK;
-
if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
else
@@ -1596,22 +1593,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
stat(s, ORDER_FALLBACK);
}
- if (kmemcheck_enabled &&
- !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
- int pages = 1 << oo_order(oo);
-
- kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
-
- /*
- * Objects from caches that have a constructor don't get
- * cleared when they're allocated, so we need to do it here.
- */
- if (s->ctor)
- kmemcheck_mark_uninitialized_pages(page, pages);
- else
- kmemcheck_mark_unallocated_pages(page, pages);
- }
-
page->objects = oo_objects(oo);
order = compound_order(page);
@@ -1687,8 +1668,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
check_object(s, page, p, SLUB_RED_INACTIVE);
}
- kmemcheck_free_shadow(page, compound_order(page));
-
mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
@@ -3477,7 +3456,7 @@ static void set_cpu_partial(struct kmem_cache *s)
*/
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
- unsigned long flags = s->flags;
+ slab_flags_t flags = s->flags;
size_t size = s->object_size;
int order;
@@ -3593,7 +3572,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
return !!oo_objects(s->oo);
}
-static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
+static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
{
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;
@@ -3655,7 +3634,7 @@ error:
if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)s->size, s->size,
- oo_order(s->oo), s->offset, flags);
+ oo_order(s->oo), s->offset, (unsigned long)flags);
return -EINVAL;
}
@@ -3792,7 +3771,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
struct page *page;
void *ptr = NULL;
- flags |= __GFP_COMP | __GFP_NOTRACK;
+ flags |= __GFP_COMP;
page = alloc_pages_node(node, flags, get_order(size));
if (page)
ptr = page_address(page);
@@ -4245,7 +4224,7 @@ void __init kmem_cache_init_late(void)
struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void *))
+ slab_flags_t flags, void (*ctor)(void *))
{
struct kmem_cache *s, *c;
@@ -4275,7 +4254,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
return s;
}
-int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
+int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
{
int err;
@@ -5655,8 +5634,6 @@ static char *create_unique_id(struct kmem_cache *s)
*p++ = 'a';
if (s->flags & SLAB_CONSISTENCY_CHECKS)
*p++ = 'F';
- if (!(s->flags & SLAB_NOTRACK))
- *p++ = 't';
if (s->flags & SLAB_ACCOUNT)
*p++ = 'A';
if (p != name + 1)
@@ -5704,6 +5681,10 @@ static int sysfs_slab_add(struct kmem_cache *s)
return 0;
}
+ if (!unmergeable && disable_higher_order_debug &&
+ (slub_debug & DEBUG_METADATA_FLAGS))
+ unmergeable = 1;
+
if (unmergeable) {
/*
* Slabcache can never be merged so we can use the name proper.
@@ -5852,7 +5833,7 @@ __initcall(slab_sysfs_init);
/*
* The /proc/slabinfo ABI
*/
-#ifdef CONFIG_SLABINFO
+#ifdef CONFIG_SLUB_DEBUG
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
{
unsigned long nr_slabs = 0;
@@ -5884,4 +5865,4 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
{
return -EIO;
}
-#endif /* CONFIG_SLABINFO */
+#endif /* CONFIG_SLUB_DEBUG */
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 478ce6d4a2c4..17acf01791fa 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -42,7 +42,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
- return memblock_virt_alloc_try_nid(size, align, goal,
+ return memblock_virt_alloc_try_nid_raw(size, align, goal,
BOOTMEM_ALLOC_ACCESSIBLE, node);
}
@@ -53,13 +53,20 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
/* If the main allocator is up use that, fallback to bootmem. */
if (slab_is_available()) {
+ gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+ int order = get_order(size);
+ static bool warned;
struct page *page;
- page = alloc_pages_node(node,
- GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL,
- get_order(size));
+ page = alloc_pages_node(node, gfp_mask, order);
if (page)
return page_address(page);
+
+ if (!warned) {
+ warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
+ "vmemmap alloc failure: order:%u", order);
+ warned = true;
+ }
return NULL;
} else
return __earlyonly_bootmem_alloc(node, size, size,
@@ -180,11 +187,22 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
return pte;
}
+static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
+{
+ void *p = vmemmap_alloc_block(size, node);
+
+ if (!p)
+ return NULL;
+ memset(p, 0, size);
+
+ return p;
+}
+
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
{
pmd_t *pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
pmd_populate_kernel(&init_mm, pmd, p);
@@ -196,7 +214,7 @@ pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
{
pud_t *pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
pud_populate(&init_mm, pud, p);
@@ -208,7 +226,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
{
p4d_t *p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
p4d_populate(&init_mm, p4d, p);
@@ -220,7 +238,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
{
pgd_t *pgd = pgd_offset_k(addr);
if (pgd_none(*pgd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, node);
+ void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
if (!p)
return NULL;
pgd_populate(&init_mm, pgd, p);
diff --git a/mm/sparse.c b/mm/sparse.c
index 4900707ae146..7a5dacaa06e3 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -23,8 +23,7 @@
* 1) mem_section - memory sections, mem_map's for valid memory
*/
#ifdef CONFIG_SPARSEMEM_EXTREME
-struct mem_section *mem_section[NR_SECTION_ROOTS]
- ____cacheline_internodealigned_in_smp;
+struct mem_section **mem_section;
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
____cacheline_internodealigned_in_smp;
@@ -101,7 +100,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid)
int __section_nr(struct mem_section* ms)
{
unsigned long root_nr;
- struct mem_section* root;
+ struct mem_section *root = NULL;
for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
@@ -112,7 +111,7 @@ int __section_nr(struct mem_section* ms)
break;
}
- VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
+ VM_BUG_ON(!root);
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
@@ -208,6 +207,16 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
{
unsigned long pfn;
+#ifdef CONFIG_SPARSEMEM_EXTREME
+ if (unlikely(!mem_section)) {
+ unsigned long size, align;
+
+ size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
+ align = 1 << (INTERNODE_CACHE_SHIFT);
+ mem_section = memblock_virt_alloc(size, align);
+ }
+#endif
+
start &= PAGE_SECTION_MASK;
mminit_validate_memmodel_limits(&start, &end);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
@@ -330,11 +339,17 @@ again:
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
unsigned long usemap_snr, pgdat_snr;
- static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
- static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
+ static unsigned long old_usemap_snr;
+ static unsigned long old_pgdat_snr;
struct pglist_data *pgdat = NODE_DATA(nid);
int usemap_nid;
+ /* First call */
+ if (!old_usemap_snr) {
+ old_usemap_snr = NR_MEM_SECTIONS;
+ old_pgdat_snr = NR_MEM_SECTIONS;
+ }
+
usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
if (usemap_snr == pgdat_snr)
@@ -438,9 +453,9 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
}
size = PAGE_ALIGN(size);
- map = memblock_virt_alloc_try_nid(size * map_count,
- PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
- BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
+ map = memblock_virt_alloc_try_nid_raw(size * map_count,
+ PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
if (map) {
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
if (!present_section_nr(pnum))
diff --git a/mm/swap.c b/mm/swap.c
index a77d68f2c1b6..38e1b6374a97 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -76,7 +76,7 @@ static void __page_cache_release(struct page *page)
static void __put_single_page(struct page *page)
{
__page_cache_release(page);
- free_hot_cold_page(page, false);
+ free_unref_page(page);
}
static void __put_compound_page(struct page *page)
@@ -210,7 +210,7 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
}
if (pgdat)
spin_unlock_irqrestore(&pgdat->lru_lock, flags);
- release_pages(pvec->pages, pvec->nr, pvec->cold);
+ release_pages(pvec->pages, pvec->nr);
pagevec_reinit(pvec);
}
@@ -740,7 +740,7 @@ void lru_add_drain_all(void)
* Decrement the reference count on all the pages in @pages. If it
* fell to zero, remove the page from the LRU and free it.
*/
-void release_pages(struct page **pages, int nr, bool cold)
+void release_pages(struct page **pages, int nr)
{
int i;
LIST_HEAD(pages_to_free);
@@ -817,7 +817,7 @@ void release_pages(struct page **pages, int nr, bool cold)
spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
mem_cgroup_uncharge_list(&pages_to_free);
- free_hot_cold_page_list(&pages_to_free, cold);
+ free_unref_page_list(&pages_to_free);
}
EXPORT_SYMBOL(release_pages);
@@ -833,8 +833,11 @@ EXPORT_SYMBOL(release_pages);
*/
void __pagevec_release(struct pagevec *pvec)
{
- lru_add_drain();
- release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
+ if (!pvec->percpu_pvec_drained) {
+ lru_add_drain();
+ pvec->percpu_pvec_drained = true;
+ }
+ release_pages(pvec->pages, pagevec_count(pvec));
pagevec_reinit(pvec);
}
EXPORT_SYMBOL(__pagevec_release);
@@ -986,15 +989,25 @@ unsigned pagevec_lookup_range(struct pagevec *pvec,
}
EXPORT_SYMBOL(pagevec_lookup_range);
-unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
- pgoff_t *index, int tag, unsigned nr_pages)
+unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
+ struct address_space *mapping, pgoff_t *index, pgoff_t end,
+ int tag)
{
- pvec->nr = find_get_pages_tag(mapping, index, tag,
- nr_pages, pvec->pages);
+ pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
+ PAGEVEC_SIZE, pvec->pages);
return pagevec_count(pvec);
}
-EXPORT_SYMBOL(pagevec_lookup_tag);
+EXPORT_SYMBOL(pagevec_lookup_range_tag);
+unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
+ struct address_space *mapping, pgoff_t *index, pgoff_t end,
+ int tag, unsigned max_pages)
+{
+ pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
+ min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
+ return pagevec_count(pvec);
+}
+EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
/*
* Perform any setup for the swap system
*/
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index d81cfc5a43d5..bebc19292018 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -149,6 +149,13 @@ static int alloc_swap_slot_cache(unsigned int cpu)
cache->nr = 0;
cache->cur = 0;
cache->n_ret = 0;
+ /*
+ * We initialized alloc_lock and free_lock earlier. We use
+ * !cache->slots or !cache->slots_ret to know if it is safe to acquire
+ * the corresponding lock and use the cache. Memory barrier below
+ * ensures the assumption.
+ */
+ mb();
cache->slots = slots;
slots = NULL;
cache->slots_ret = slots_ret;
@@ -275,7 +282,7 @@ int free_swap_slot(swp_entry_t entry)
struct swap_slots_cache *cache;
cache = raw_cpu_ptr(&swp_slots);
- if (use_swap_slot_cache && cache->slots_ret) {
+ if (likely(use_swap_slot_cache && cache->slots_ret)) {
spin_lock_irq(&cache->free_lock);
/* Swap slots cache may be deactivated before acquiring lock */
if (!use_swap_slot_cache || !cache->slots_ret) {
@@ -326,7 +333,7 @@ swp_entry_t get_swap_page(struct page *page)
*/
cache = raw_cpu_ptr(&swp_slots);
- if (check_cache_active()) {
+ if (likely(check_cache_active() && cache->slots)) {
mutex_lock(&cache->alloc_lock);
if (cache->slots) {
repeat:
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 326439428daf..39ae7cfad90f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -36,9 +36,9 @@ static const struct address_space_operations swap_aops = {
#endif
};
-struct address_space *swapper_spaces[MAX_SWAPFILES];
-static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
-bool swap_vma_readahead = true;
+struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
+static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
+bool swap_vma_readahead __read_mostly = true;
#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
@@ -319,7 +319,7 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
lru_add_drain();
for (i = 0; i < nr; i++)
free_swap_cache(pagep[i]);
- release_pages(pagep, nr, false);
+ release_pages(pagep, nr);
}
/*
@@ -559,6 +559,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
unsigned long mask;
+ struct swap_info_struct *si = swp_swap_info(entry);
struct blk_plug plug;
bool do_poll = true, page_allocated;
@@ -572,6 +573,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
end_offset = offset | mask;
if (!start_offset) /* First page is swap header. */
start_offset++;
+ if (end_offset >= si->max)
+ end_offset = si->max - 1;
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e47a21e64764..3074b02eaa09 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1328,6 +1328,13 @@ int page_swapcount(struct page *page)
return count;
}
+int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
+{
+ pgoff_t offset = swp_offset(entry);
+
+ return swap_count(si->swap_map[offset]);
+}
+
static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
{
int count = 0;
@@ -3169,6 +3176,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
p->flags |= SWP_STABLE_WRITES;
+ if (bdi_cap_synchronous_io(inode_to_bdi(inode)))
+ p->flags |= SWP_SYNCHRONOUS_IO;
+
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
int cpu;
unsigned long ci, nr_cluster;
@@ -3452,10 +3462,15 @@ int swapcache_prepare(swp_entry_t entry)
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
+struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+{
+ return swap_info[swp_type(entry)];
+}
+
struct swap_info_struct *page_swap_info(struct page *page)
{
- swp_entry_t swap = { .val = page_private(page) };
- return swap_info[swp_type(swap)];
+ swp_entry_t entry = { .val = page_private(page) };
+ return swp_swap_info(entry);
}
/*
@@ -3463,7 +3478,6 @@ struct swap_info_struct *page_swap_info(struct page *page)
*/
struct address_space *__page_file_mapping(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return page_swap_info(page)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(__page_file_mapping);
@@ -3471,7 +3485,6 @@ EXPORT_SYMBOL_GPL(__page_file_mapping);
pgoff_t __page_file_index(struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
- VM_BUG_ON_PAGE(!PageSwapCache(page), page);
return swp_offset(swap);
}
EXPORT_SYMBOL_GPL(__page_file_index);
diff --git a/mm/truncate.c b/mm/truncate.c
index 2330223841fb..e4b4cf0f4070 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -25,44 +25,85 @@
#include <linux/rmap.h>
#include "internal.h"
-static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
- void *entry)
+/*
+ * Regular page slots are stabilized by the page lock even without the tree
+ * itself locked. These unlocked entries need verification under the tree
+ * lock.
+ */
+static inline void __clear_shadow_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
{
struct radix_tree_node *node;
void **slot;
- spin_lock_irq(&mapping->tree_lock);
- /*
- * Regular page slots are stabilized by the page lock even
- * without the tree itself locked. These unlocked entries
- * need verification under the tree lock.
- */
if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
- goto unlock;
+ return;
if (*slot != entry)
- goto unlock;
+ return;
__radix_tree_replace(&mapping->page_tree, node, slot, NULL,
- workingset_update_node, mapping);
+ workingset_update_node);
mapping->nrexceptional--;
-unlock:
+}
+
+static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
+ void *entry)
+{
+ spin_lock_irq(&mapping->tree_lock);
+ __clear_shadow_entry(mapping, index, entry);
spin_unlock_irq(&mapping->tree_lock);
}
/*
- * Unconditionally remove exceptional entry. Usually called from truncate path.
+ * Unconditionally remove exceptional entries. Usually called from truncate
+ * path. Note that the pagevec may be altered by this function by removing
+ * exceptional entries similar to what pagevec_remove_exceptionals does.
*/
-static void truncate_exceptional_entry(struct address_space *mapping,
- pgoff_t index, void *entry)
+static void truncate_exceptional_pvec_entries(struct address_space *mapping,
+ struct pagevec *pvec, pgoff_t *indices,
+ pgoff_t end)
{
+ int i, j;
+ bool dax, lock;
+
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return;
- if (dax_mapping(mapping)) {
- dax_delete_mapping_entry(mapping, index);
+ for (j = 0; j < pagevec_count(pvec); j++)
+ if (radix_tree_exceptional_entry(pvec->pages[j]))
+ break;
+
+ if (j == pagevec_count(pvec))
return;
+
+ dax = dax_mapping(mapping);
+ lock = !dax && indices[j] < end;
+ if (lock)
+ spin_lock_irq(&mapping->tree_lock);
+
+ for (i = j; i < pagevec_count(pvec); i++) {
+ struct page *page = pvec->pages[i];
+ pgoff_t index = indices[i];
+
+ if (!radix_tree_exceptional_entry(page)) {
+ pvec->pages[j++] = page;
+ continue;
+ }
+
+ if (index >= end)
+ continue;
+
+ if (unlikely(dax)) {
+ dax_delete_mapping_entry(mapping, index);
+ continue;
+ }
+
+ __clear_shadow_entry(mapping, index, page);
}
- clear_shadow_entry(mapping, index, entry);
+
+ if (lock)
+ spin_unlock_irq(&mapping->tree_lock);
+ pvec->nr = j;
}
/*
@@ -134,11 +175,17 @@ void do_invalidatepage(struct page *page, unsigned int offset,
* its lock, b) when a concurrent invalidate_mapping_pages got there first and
* c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
*/
-static int
-truncate_complete_page(struct address_space *mapping, struct page *page)
+static void
+truncate_cleanup_page(struct address_space *mapping, struct page *page)
{
- if (page->mapping != mapping)
- return -EIO;
+ if (page_mapped(page)) {
+ loff_t holelen;
+
+ holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
+ unmap_mapping_range(mapping,
+ (loff_t)page->index << PAGE_SHIFT,
+ holelen, 0);
+ }
if (page_has_private(page))
do_invalidatepage(page, 0, PAGE_SIZE);
@@ -150,8 +197,6 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
*/
cancel_dirty_page(page);
ClearPageMappedToDisk(page);
- delete_from_page_cache(page);
- return 0;
}
/*
@@ -180,16 +225,14 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
- loff_t holelen;
VM_BUG_ON_PAGE(PageTail(page), page);
- holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
- if (page_mapped(page)) {
- unmap_mapping_range(mapping,
- (loff_t)page->index << PAGE_SHIFT,
- holelen, 0);
- }
- return truncate_complete_page(mapping, page);
+ if (page->mapping != mapping)
+ return -EIO;
+
+ truncate_cleanup_page(mapping, page);
+ delete_from_page_cache(page);
+ return 0;
}
/*
@@ -287,11 +330,19 @@ void truncate_inode_pages_range(struct address_space *mapping,
else
end = (lend + 1) >> PAGE_SHIFT;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
index = start;
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
indices)) {
+ /*
+ * Pagevec array has exceptional entries and we may also fail
+ * to lock some pages. So we store pages that can be deleted
+ * in a new pagevec.
+ */
+ struct pagevec locked_pvec;
+
+ pagevec_init(&locked_pvec);
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -300,11 +351,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (index >= end)
break;
- if (radix_tree_exceptional_entry(page)) {
- truncate_exceptional_entry(mapping, index,
- page);
+ if (radix_tree_exceptional_entry(page))
continue;
- }
if (!trylock_page(page))
continue;
@@ -313,15 +361,22 @@ void truncate_inode_pages_range(struct address_space *mapping,
unlock_page(page);
continue;
}
- truncate_inode_page(mapping, page);
- unlock_page(page);
+ if (page->mapping != mapping) {
+ unlock_page(page);
+ continue;
+ }
+ pagevec_add(&locked_pvec, page);
}
- pagevec_remove_exceptionals(&pvec);
+ for (i = 0; i < pagevec_count(&locked_pvec); i++)
+ truncate_cleanup_page(mapping, locked_pvec.pages[i]);
+ delete_from_page_cache_batch(mapping, &locked_pvec);
+ for (i = 0; i < pagevec_count(&locked_pvec); i++)
+ unlock_page(locked_pvec.pages[i]);
+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
pagevec_release(&pvec);
cond_resched();
index++;
}
-
if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
@@ -379,6 +434,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
pagevec_release(&pvec);
break;
}
+
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -390,11 +446,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
break;
}
- if (radix_tree_exceptional_entry(page)) {
- truncate_exceptional_entry(mapping, index,
- page);
+ if (radix_tree_exceptional_entry(page))
continue;
- }
lock_page(page);
WARN_ON(page_to_index(page) != index);
@@ -402,7 +455,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
truncate_inode_page(mapping, page);
unlock_page(page);
}
- pagevec_remove_exceptionals(&pvec);
+ truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
pagevec_release(&pvec);
index++;
}
@@ -500,7 +553,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
unsigned long count = 0;
int i;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
indices)) {
@@ -630,7 +683,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
goto out;
- pagevec_init(&pvec, 0);
+ pagevec_init(&pvec);
index = start;
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eb2f0315b8c0..c02c850ea349 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1349,7 +1349,7 @@ keep:
mem_cgroup_uncharge_list(&free_pages);
try_to_unmap_flush();
- free_hot_cold_page_list(&free_pages, true);
+ free_unref_page_list(&free_pages);
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
@@ -1824,7 +1824,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge_list(&page_list);
- free_hot_cold_page_list(&page_list, true);
+ free_unref_page_list(&page_list);
/*
* If reclaim is isolating dirty pages under writeback, it implies
@@ -1868,7 +1868,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* also allow kswapd to start writing pages during reclaim.
*/
if (stat.nr_unqueued_dirty == nr_taken) {
- wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+ wakeup_flusher_threads(WB_REASON_VMSCAN);
set_bit(PGDAT_DIRTY, &pgdat->flags);
}
@@ -2063,7 +2063,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_unlock_irq(&pgdat->lru_lock);
mem_cgroup_uncharge_list(&l_hold);
- free_hot_cold_page_list(&l_hold, true);
+ free_unref_page_list(&l_hold);
trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
nr_deactivate, nr_rotated, sc->priority, file);
}
@@ -2082,7 +2082,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
* If that fails and refaulting is observed, the inactive list grows.
*
* The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
- * on this LRU, maintained by the pageout code. A zone->inactive_ratio
+ * on this LRU, maintained by the pageout code. An inactive_ratio
* of 3 means 3:1 or 25% of the pages are kept on the inactive list.
*
* total target max
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4bb13e72ac97..40b2db6db6b1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -32,6 +32,77 @@
#define NUMA_STATS_THRESHOLD (U16_MAX - 2)
+#ifdef CONFIG_NUMA
+int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
+
+/* zero numa counters within a zone */
+static void zero_zone_numa_counters(struct zone *zone)
+{
+ int item, cpu;
+
+ for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
+ atomic_long_set(&zone->vm_numa_stat[item], 0);
+ for_each_online_cpu(cpu)
+ per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
+ = 0;
+ }
+}
+
+/* zero numa counters of all the populated zones */
+static void zero_zones_numa_counters(void)
+{
+ struct zone *zone;
+
+ for_each_populated_zone(zone)
+ zero_zone_numa_counters(zone);
+}
+
+/* zero global numa counters */
+static void zero_global_numa_counters(void)
+{
+ int item;
+
+ for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
+ atomic_long_set(&vm_numa_stat[item], 0);
+}
+
+static void invalid_numa_statistics(void)
+{
+ zero_zones_numa_counters();
+ zero_global_numa_counters();
+}
+
+static DEFINE_MUTEX(vm_numa_stat_lock);
+
+int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+ int ret, oldval;
+
+ mutex_lock(&vm_numa_stat_lock);
+ if (write)
+ oldval = sysctl_vm_numa_stat;
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+ if (ret || !write)
+ goto out;
+
+ if (oldval == sysctl_vm_numa_stat)
+ goto out;
+ else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
+ static_branch_enable(&vm_numa_stat_key);
+ pr_info("enable numa statistics\n");
+ } else {
+ static_branch_disable(&vm_numa_stat_key);
+ invalid_numa_statistics();
+ pr_info("disable numa statistics, and clear numa counters\n");
+ }
+
+out:
+ mutex_unlock(&vm_numa_stat_lock);
+ return ret;
+}
+#endif
+
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
EXPORT_PER_CPU_SYMBOL(vm_event_states);
@@ -1564,11 +1635,9 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
}
seq_printf(m,
"\n node_unreclaimable: %u"
- "\n start_pfn: %lu"
- "\n node_inactive_ratio: %u",
+ "\n start_pfn: %lu",
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
- zone->zone_start_pfn,
- zone->zone_pgdat->inactive_ratio);
+ zone->zone_start_pfn);
seq_putc(m, '\n');
}
diff --git a/mm/workingset.c b/mm/workingset.c
index b997c9de28f6..b7d616a3bbbe 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -340,14 +340,8 @@ out:
static struct list_lru shadow_nodes;
-void workingset_update_node(struct radix_tree_node *node, void *private)
+void workingset_update_node(struct radix_tree_node *node)
{
- struct address_space *mapping = private;
-
- /* Only regular page cache has shadow entries */
- if (dax_mapping(mapping) || shmem_mapping(mapping))
- return;
-
/*
* Track non-empty nodes that contain only shadow entries;
* unlink those that contain pages or are being freed.
@@ -475,7 +469,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out_invalid;
inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
__radix_tree_delete_node(&mapping->page_tree, node,
- workingset_update_node, mapping);
+ workingset_lookup_update(mapping));
out_invalid:
spin_unlock(&mapping->tree_lock);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 7c38e850a8fc..685049a9048d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1349,7 +1349,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
* pools/users, we can't allow mapping in interrupt context
* because it can corrupt another users mappings.
*/
- WARN_ON_ONCE(in_interrupt());
+ BUG_ON(in_interrupt());
/* From now on, migration cannot move the object */
pin_tag(handle);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 67bae0f11c67..d0ef0a8e8831 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -657,6 +657,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
[IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
[IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
+ [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
[IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
[IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
};
diff --git a/net/core/dev.c b/net/core/dev.c
index 30b5fe32c525..8ee29f4f5fa9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1064,7 +1064,10 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
unsigned long *inuse;
struct net_device *d;
- p = strnchr(name, IFNAMSIZ-1, '%');
+ if (!dev_valid_name(name))
+ return -EINVAL;
+
+ p = strchr(name, '%');
if (p) {
/*
* Verify the string as this thing may have come from
@@ -1095,8 +1098,7 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
free_page((unsigned long) inuse);
}
- if (buf != name)
- snprintf(buf, IFNAMSIZ, name, i);
+ snprintf(buf, IFNAMSIZ, name, i);
if (!__dev_get_by_name(net, buf))
return i;
@@ -1104,7 +1106,21 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
* when the name is long and there isn't enough space left
* for the digits, or if all bits are used.
*/
- return -ENFILE;
+ return p ? -ENFILE : -EEXIST;
+}
+
+static int dev_alloc_name_ns(struct net *net,
+ struct net_device *dev,
+ const char *name)
+{
+ char buf[IFNAMSIZ];
+ int ret;
+
+ BUG_ON(!net);
+ ret = __dev_alloc_name(net, name, buf);
+ if (ret >= 0)
+ strlcpy(dev->name, buf, IFNAMSIZ);
+ return ret;
}
/**
@@ -1123,48 +1139,14 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
int dev_alloc_name(struct net_device *dev, const char *name)
{
- char buf[IFNAMSIZ];
- struct net *net;
- int ret;
-
- BUG_ON(!dev_net(dev));
- net = dev_net(dev);
- ret = __dev_alloc_name(net, name, buf);
- if (ret >= 0)
- strlcpy(dev->name, buf, IFNAMSIZ);
- return ret;
+ return dev_alloc_name_ns(dev_net(dev), dev, name);
}
EXPORT_SYMBOL(dev_alloc_name);
-static int dev_alloc_name_ns(struct net *net,
- struct net_device *dev,
- const char *name)
-{
- char buf[IFNAMSIZ];
- int ret;
-
- ret = __dev_alloc_name(net, name, buf);
- if (ret >= 0)
- strlcpy(dev->name, buf, IFNAMSIZ);
- return ret;
-}
-
int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
- BUG_ON(!net);
-
- if (!dev_valid_name(name))
- return -EINVAL;
-
- if (strchr(name, '%'))
- return dev_alloc_name_ns(net, dev, name);
- else if (__dev_get_by_name(net, name))
- return -EEXIST;
- else if (dev->name != name)
- strlcpy(dev->name, name, IFNAMSIZ);
-
- return 0;
+ return dev_alloc_name_ns(net, dev, name);
}
EXPORT_SYMBOL(dev_get_valid_name);
@@ -3754,7 +3736,7 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
- cpu = ACCESS_ONCE(rflow->cpu);
+ cpu = READ_ONCE(rflow->cpu);
if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) <
@@ -8667,6 +8649,8 @@ static void __net_exit netdev_exit(struct net *net)
{
kfree(net->dev_name_head);
kfree(net->dev_index_head);
+ if (net != &init_net)
+ WARN_ON_ONCE(!list_empty(&net->dev_base_head));
}
static struct pernet_operations __net_initdata netdev_net_ops = {
diff --git a/net/core/fib_notifier.c b/net/core/fib_notifier.c
index 4fc202dbdfb6..0c048bdeb016 100644
--- a/net/core/fib_notifier.c
+++ b/net/core/fib_notifier.c
@@ -34,12 +34,14 @@ static unsigned int fib_seq_sum(void)
rtnl_lock();
for_each_net(net) {
- list_for_each_entry(ops, &net->fib_notifier_ops, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ops, &net->fib_notifier_ops, list) {
if (!try_module_get(ops->owner))
continue;
fib_seq += ops->fib_seq_read(net);
module_put(ops->owner);
}
+ rcu_read_unlock();
}
rtnl_unlock();
@@ -161,8 +163,14 @@ static int __net_init fib_notifier_net_init(struct net *net)
return 0;
}
+static void __net_exit fib_notifier_net_exit(struct net *net)
+{
+ WARN_ON_ONCE(!list_empty(&net->fib_notifier_ops));
+}
+
static struct pernet_operations fib_notifier_net_ops = {
.init = fib_notifier_net_init,
+ .exit = fib_notifier_net_exit,
};
static int __init fib_notifier_init(void)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index fafd0a41e3f7..98e1066c3d55 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -1022,8 +1022,14 @@ static int __net_init fib_rules_net_init(struct net *net)
return 0;
}
+static void __net_exit fib_rules_net_exit(struct net *net)
+{
+ WARN_ON_ONCE(!list_empty(&net->rules_ops));
+}
+
static struct pernet_operations fib_rules_net_ops = {
.init = fib_rules_net_init,
+ .exit = fib_rules_net_exit,
};
static int __init fib_rules_init(void)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 51d5836d8fb9..799b75268291 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -382,7 +382,7 @@ static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
struct net_device *netdev = to_net_dev(dev);
struct net *net = dev_net(netdev);
size_t count = len;
- ssize_t ret;
+ ssize_t ret = 0;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
@@ -391,9 +391,20 @@ static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
if (len > 0 && buf[len - 1] == '\n')
--count;
- ret = dev_set_alias(netdev, buf, count);
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (dev_isalive(netdev)) {
+ ret = dev_set_alias(netdev, buf, count);
+ if (ret < 0)
+ goto err;
+ ret = len;
+ netdev_state_change(netdev);
+ }
+err:
+ rtnl_unlock();
- return ret < 0 ? ret : len;
+ return ret;
}
static ssize_t ifalias_show(struct device *dev,
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 912731bed7b7..57557a6a950c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -334,7 +334,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo;
- WARN_ON_ONCE(!irqs_disabled());
+ lockdep_assert_irqs_disabled();
npinfo = rcu_dereference_bh(np->dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 40db0b7e37ac..f95a15086225 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3385,7 +3385,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
- unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
+ unsigned int burst = READ_ONCE(pkt_dev->burst);
struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq;
struct sk_buff *skb;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8134c00df6c2..6b0ff396fa9d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -41,7 +41,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
@@ -234,14 +233,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- kmemcheck_annotate_variable(shinfo->destructor_arg);
if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff_fclones *fclones;
fclones = container_of(skb, struct sk_buff_fclones, skb1);
- kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG;
refcount_set(&fclones->fclone_ref, 1);
@@ -301,7 +298,6 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
- kmemcheck_annotate_variable(shinfo->destructor_arg);
return skb;
}
@@ -357,7 +353,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
- return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+ return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -370,7 +366,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz)
{
- return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+ return __napi_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(napi_alloc_frag);
@@ -1283,7 +1279,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (!n)
return NULL;
- kmemcheck_annotate_bitfield(n, flags1);
n->fclone = SKB_FCLONE_UNAVAILABLE;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 57bbd6040eb6..c0b5b2f17412 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1469,8 +1469,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
sk = kmalloc(prot->obj_size, priority);
if (sk != NULL) {
- kmemcheck_annotate_bitfield(sk, flags);
-
if (security_sk_alloc(sk, family, priority))
goto out_free;
@@ -2746,6 +2744,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_max_pacing_rate = ~0U;
sk->sk_pacing_rate = ~0U;
+ sk->sk_pacing_shift = 10;
sk->sk_incoming_cpu = -1;
/*
* Before updating sk_refcnt, we must commit prior changes to memory
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 2fed892094bc..03c3bdf25468 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -7,6 +7,7 @@ config HAVE_NET_DSA
config NET_DSA
tristate "Distributed Switch Architecture"
depends on HAVE_NET_DSA && MAY_USE_DEVLINK
+ depends on BRIDGE || BRIDGE=n
select NET_SWITCHDEV
select PHYLIB
---help---
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index b8c5e52b2eff..548c00254c07 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -42,6 +42,10 @@
#define LAN9303_TAG_LEN 4
# define LAN9303_TAG_TX_USE_ALR BIT(3)
# define LAN9303_TAG_TX_STP_OVERRIDE BIT(4)
+# define LAN9303_TAG_RX_IGMP BIT(3)
+# define LAN9303_TAG_RX_STP BIT(4)
+# define LAN9303_TAG_RX_TRAPPED_TO_CPU (LAN9303_TAG_RX_IGMP | \
+ LAN9303_TAG_RX_STP)
/* Decide whether to transmit using ALR lookup, or transmit directly to
* port using tag. ALR learning is performed only when using ALR lookup.
@@ -91,9 +95,8 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt)
{
u16 *lan9303_tag;
+ u16 lan9303_tag1;
unsigned int source_port;
- u16 ether_type_nw;
- u8 ip_protocol;
if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) {
dev_warn_ratelimited(&dev->dev,
@@ -114,7 +117,8 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
return NULL;
}
- source_port = ntohs(lan9303_tag[1]) & 0x3;
+ lan9303_tag1 = ntohs(lan9303_tag[1]);
+ source_port = lan9303_tag1 & 0x3;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
if (!skb->dev) {
@@ -128,19 +132,7 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
skb_pull_rcsum(skb, 2 + 2);
memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN),
2 * ETH_ALEN);
- skb->offload_fwd_mark = !ether_addr_equal(skb->data - ETH_HLEN,
- eth_stp_addr);
-
- /* We also need IGMP packets to have skb->offload_fwd_mark = 0.
- * Solving this for all conceivable situations would add more cost to
- * every packet. Instead we handle just the common case:
- * No VLAN tag + Ethernet II framing.
- * Test least probable term first.
- */
- ether_type_nw = lan9303_tag[2];
- ip_protocol = *(skb->data + 9);
- if (ip_protocol == IPPROTO_IGMP && ether_type_nw == htons(ETH_P_IP))
- skb->offload_fwd_mark = 0;
+ skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU);
return skb;
}
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 37db44f60718..4dd95cdd8070 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -240,7 +240,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
if (err == -EINPROGRESS)
goto out;
- if (err == -EBUSY)
+ if (err == -ENOSPC)
err = NET_XMIT_DROP;
goto out_free;
}
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index b00e4a43b4dc..d57aa64fa7c7 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -432,7 +432,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
case -EINPROGRESS:
goto error;
- case -EBUSY:
+ case -ENOSPC:
err = NET_XMIT_DROP;
break;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 589caaa90613..f04d944f8abe 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -710,7 +710,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
bool ecn_ca = false;
nla_strlcpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
} else {
val = nla_get_u32(nla);
}
@@ -1030,7 +1030,7 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
char tmp[TCP_CA_NAME_MAX];
nla_strlcpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
if (val == TCP_CA_UNSPEC)
return -EINVAL;
} else {
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 7f3ef5c287a1..26a3d0315728 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -164,7 +164,7 @@ static void inet_frag_worker(struct work_struct *work)
local_bh_disable();
- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
+ for (i = READ_ONCE(f->next_bucket); budget; --budget) {
evicted += inet_evict_bucket(f, &f->hash[i]);
i = (i + 1) & (INETFRAGS_HASHSZ - 1);
if (evicted > INETFRAGS_EVICT_MAX)
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index a4bab81f1462..c690cd0d9b3f 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/inet_hashtables.h>
@@ -167,8 +166,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
if (tw) {
const struct inet_sock *inet = inet_sk(sk);
- kmemcheck_annotate_bitfield(tw, flags);
-
tw->tw_dr = dr;
/* Give us an identity. */
tw->tw_daddr = inet->inet_daddr;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bc40bd411196..3b427757b1f8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -495,7 +495,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
{
u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
- u32 old = ACCESS_ONCE(*p_tstamp);
+ u32 old = READ_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
u32 new, delta = 0;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ef0ff3357a44..93e172118a94 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -201,6 +201,8 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ struct net *net = container_of(ctl->data, struct net,
+ ipv4.tcp_congestion_control);
char val[TCP_CA_NAME_MAX];
struct ctl_table tbl = {
.data = val,
@@ -208,11 +210,11 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
};
int ret;
- tcp_get_default_congestion_control(val);
+ tcp_get_default_congestion_control(net, val);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
if (write && ret == 0)
- ret = tcp_set_default_congestion_control(val);
+ ret = tcp_set_default_congestion_control(net, val);
return ret;
}
@@ -447,12 +449,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
- {
- .procname = "tcp_congestion_control",
- .mode = 0644,
- .maxlen = TCP_CA_NAME_MAX,
- .proc_handler = proc_tcp_congestion_control,
- },
#ifdef CONFIG_NETLABEL
{
.procname = "cipso_cache_enable",
@@ -764,6 +760,13 @@ static struct ctl_table ipv4_net_table[] = {
},
#endif
{
+ .procname = "tcp_congestion_control",
+ .data = &init_net.ipv4.tcp_congestion_control,
+ .mode = 0644,
+ .maxlen = TCP_CA_NAME_MAX,
+ .proc_handler = proc_tcp_congestion_control,
+ },
+ {
.procname = "tcp_keepalive_time",
.data = &init_net.ipv4.sysctl_tcp_keepalive_time,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 2f26124fd160..bc6c02f16243 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -33,9 +33,11 @@ static struct tcp_congestion_ops *tcp_ca_find(const char *name)
}
/* Must be called with rcu lock held */
-static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
+static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
+ const char *name)
{
- const struct tcp_congestion_ops *ca = tcp_ca_find(name);
+ struct tcp_congestion_ops *ca = tcp_ca_find(name);
+
#ifdef CONFIG_MODULES
if (!ca && capable(CAP_NET_ADMIN)) {
rcu_read_unlock();
@@ -115,7 +117,7 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
}
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
-u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
+u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
{
const struct tcp_congestion_ops *ca;
u32 key = TCP_CA_UNSPEC;
@@ -123,7 +125,7 @@ u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
might_sleep();
rcu_read_lock();
- ca = __tcp_ca_find_autoload(name);
+ ca = tcp_ca_find_autoload(net, name);
if (ca) {
key = ca->key;
*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
@@ -153,23 +155,18 @@ EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
/* Assign choice of congestion control. */
void tcp_assign_congestion_control(struct sock *sk)
{
+ struct net *net = sock_net(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_congestion_ops *ca;
+ const struct tcp_congestion_ops *ca;
rcu_read_lock();
- list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
- if (likely(try_module_get(ca->owner))) {
- icsk->icsk_ca_ops = ca;
- goto out;
- }
- /* Fallback to next available. The last really
- * guaranteed fallback is Reno from this list.
- */
- }
-out:
+ ca = rcu_dereference(net->ipv4.tcp_congestion_control);
+ if (unlikely(!try_module_get(ca->owner)))
+ ca = &tcp_reno;
+ icsk->icsk_ca_ops = ca;
rcu_read_unlock();
- memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (ca->flags & TCP_CONG_NEEDS_ECN)
INET_ECN_xmit(sk);
else
@@ -214,29 +211,27 @@ void tcp_cleanup_congestion_control(struct sock *sk)
}
/* Used by sysctl to change default congestion control */
-int tcp_set_default_congestion_control(const char *name)
+int tcp_set_default_congestion_control(struct net *net, const char *name)
{
struct tcp_congestion_ops *ca;
- int ret = -ENOENT;
-
- spin_lock(&tcp_cong_list_lock);
- ca = tcp_ca_find(name);
-#ifdef CONFIG_MODULES
- if (!ca && capable(CAP_NET_ADMIN)) {
- spin_unlock(&tcp_cong_list_lock);
+ const struct tcp_congestion_ops *prev;
+ int ret;
- request_module("tcp_%s", name);
- spin_lock(&tcp_cong_list_lock);
- ca = tcp_ca_find(name);
- }
-#endif
+ rcu_read_lock();
+ ca = tcp_ca_find_autoload(net, name);
+ if (!ca) {
+ ret = -ENOENT;
+ } else if (!try_module_get(ca->owner)) {
+ ret = -EBUSY;
+ } else {
+ prev = xchg(&net->ipv4.tcp_congestion_control, ca);
+ if (prev)
+ module_put(prev->owner);
- if (ca) {
- ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */
- list_move(&ca->list, &tcp_cong_list);
+ ca->flags |= TCP_CONG_NON_RESTRICTED;
ret = 0;
}
- spin_unlock(&tcp_cong_list_lock);
+ rcu_read_unlock();
return ret;
}
@@ -244,7 +239,8 @@ int tcp_set_default_congestion_control(const char *name)
/* Set default value from kernel configuration at bootup */
static int __init tcp_congestion_default(void)
{
- return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
+ return tcp_set_default_congestion_control(&init_net,
+ CONFIG_DEFAULT_TCP_CONG);
}
late_initcall(tcp_congestion_default);
@@ -264,14 +260,12 @@ void tcp_get_available_congestion_control(char *buf, size_t maxlen)
}
/* Get current default congestion control */
-void tcp_get_default_congestion_control(char *name)
+void tcp_get_default_congestion_control(struct net *net, char *name)
{
- struct tcp_congestion_ops *ca;
- /* We will always have reno... */
- BUG_ON(list_empty(&tcp_cong_list));
+ const struct tcp_congestion_ops *ca;
rcu_read_lock();
- ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
+ ca = rcu_dereference(net->ipv4.tcp_congestion_control);
strncpy(name, ca->name, TCP_CA_NAME_MAX);
rcu_read_unlock();
}
@@ -351,12 +345,14 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, boo
if (!load)
ca = tcp_ca_find(name);
else
- ca = __tcp_ca_find_autoload(name);
+ ca = tcp_ca_find_autoload(sock_net(sk), name);
+
/* No change asking for existing value */
if (ca == icsk->icsk_ca_ops) {
icsk->icsk_ca_setsockopt = 1;
goto out;
}
+
if (!ca) {
err = -ENOENT;
} else if (!load) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c3447c5512fd..f844c06c0676 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -795,12 +795,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
if (likely(tp->srtt_us))
do_div(rate, tp->srtt_us);
- /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
+ /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
* without any lock. We want to make sure compiler wont store
* intermediate values in this location.
*/
- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
- sk->sk_max_pacing_rate);
+ WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
+ sk->sk_max_pacing_rate));
}
/* Calculate rto without backoff. This is the second half of Van Jacobson's
@@ -3534,7 +3534,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
icsk->icsk_retransmits = 0;
}
- prior_fack = tcp_highest_sack_seq(tp);
+ prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
rs.prior_in_flight = tcp_packets_in_flight(tp);
/* ts_recent update must be made after we are sure that the packet
@@ -6130,7 +6130,6 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
if (req) {
struct inet_request_sock *ireq = inet_rsk(req);
- kmemcheck_annotate_bitfield(ireq, flags);
ireq->ireq_opt = NULL;
#if IS_ENABLED(CONFIG_IPV6)
ireq->pktopts = NULL;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1eac84b8044e..c6bc0c4d19c6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2430,6 +2430,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
{
int cpu;
+ module_put(net->ipv4.tcp_congestion_control->owner);
+
for_each_possible_cpu(cpu)
inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
free_percpu(net->ipv4.tcp_sk);
@@ -2522,6 +2524,13 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
+ /* Reno is always built in */
+ if (!net_eq(net, &init_net) &&
+ try_module_get(init_net.ipv4.tcp_congestion_control->owner))
+ net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
+ else
+ net->ipv4.tcp_congestion_control = &tcp_reno;
+
return 0;
fail:
tcp_sk_exit(net);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0256f7a41041..540b7d92cc70 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1720,7 +1720,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
{
u32 bytes, segs;
- bytes = min(sk->sk_pacing_rate >> 10,
+ bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift,
sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms,
@@ -1961,7 +1961,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
goto send_now;
- win_divisor = ACCESS_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
+ win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
if (win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -2198,7 +2198,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
{
unsigned int limit;
- limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
+ limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift);
limit = min_t(u32, limit,
sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
limit <<= factor;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index a6699af05539..e4ff25c947c5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1852,7 +1852,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
/* if we're overly short, let UDP handle it */
- encap_rcv = ACCESS_ONCE(up->encap_rcv);
+ encap_rcv = READ_ONCE(up->encap_rcv);
if (encap_rcv) {
int ret;
@@ -2297,7 +2297,7 @@ void udp_destroy_sock(struct sock *sk)
unlock_sock_fast(sk, slow);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
- encap_destroy = ACCESS_ONCE(up->encap_destroy);
+ encap_destroy = READ_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a6dffd65eb9d..a0ae1c9d37df 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -231,7 +231,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
- .accept_dad = 1,
+ .accept_dad = 0,
.suppress_frag_ndisc = 1,
.accept_ra_mtu = 1,
.stable_secret = {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 37bb33fbc742..78c974391567 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -444,7 +444,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
if (err == -EINPROGRESS)
goto out;
- if (err == -EBUSY)
+ if (err == -ENOSPC)
err = NET_XMIT_DROP;
goto out_free;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 4000b71bfdc5..a902ff8f59be 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -396,7 +396,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
case -EINPROGRESS:
goto error;
- case -EBUSY:
+ case -ENOSPC:
err = NET_XMIT_DROP;
break;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 00882fdb1223..3d3092adf1d2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -491,7 +491,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
if (!t)
goto out;
- tproto = ACCESS_ONCE(t->parms.proto);
+ tproto = READ_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto out;
@@ -891,7 +891,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
if (t) {
- u8 tproto = ACCESS_ONCE(t->parms.proto);
+ u8 tproto = READ_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto drop;
@@ -1226,7 +1226,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- tproto = ACCESS_ONCE(t->parms.proto);
+ tproto = READ_ONCE(t->parms.proto);
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
@@ -1296,7 +1296,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
- tproto = ACCESS_ONCE(t->parms.proto);
+ tproto = READ_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
return -1;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 70d9659fc1e9..05eb7bc36156 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2378,6 +2378,7 @@ out:
static int ip6_convert_metrics(struct mx6_config *mxc,
const struct fib6_config *cfg)
{
+ struct net *net = cfg->fc_nlinfo.nl_net;
bool ecn_ca = false;
struct nlattr *nla;
int remaining;
@@ -2403,7 +2404,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
char tmp[TCP_CA_NAME_MAX];
nla_strlcpy(tmp, nla, sizeof(tmp));
- val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+ val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
if (val == TCP_CA_UNSPEC)
goto err;
} else {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 40d7234c27b9..3f30fa313bf2 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -606,7 +606,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
/* if we're overly short, let UDP handle it */
- encap_rcv = ACCESS_ONCE(up->encap_rcv);
+ encap_rcv = READ_ONCE(up->encap_rcv);
if (encap_rcv) {
int ret;
@@ -1432,7 +1432,7 @@ void udpv6_destroy_sock(struct sock *sk)
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
- encap_destroy = ACCESS_ONCE(up->encap_destroy);
+ encap_destroy = READ_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4e438bc7ee87..f85f0d7480ac 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -338,6 +338,14 @@ static int __net_init xfrm6_tunnel_net_init(struct net *net)
static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
{
+ struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
+ unsigned int i;
+
+ for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
+ WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
+
+ for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
+ WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byspi[i]));
}
static struct pernet_operations xfrm6_tunnel_net_ops = {
diff --git a/net/key/af_key.c b/net/key/af_key.c
index a00d607e7224..3dffb892d52c 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3845,7 +3845,7 @@ static void __net_exit pfkey_net_exit(struct net *net)
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
pfkey_exit_proc(net);
- BUG_ON(!hlist_empty(&net_pfkey->table));
+ WARN_ON(!hlist_empty(&net_pfkey->table));
}
static struct pernet_operations pfkey_net_ops = {
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 350fcd39ebd8..115918ad8eca 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1833,6 +1833,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
{
struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel = NULL;
+ int hash;
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
@@ -1842,6 +1843,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
flush_workqueue(l2tp_wq);
rcu_barrier();
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+ WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
}
static struct pernet_operations l2tp_net_ops = {
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index dd3e83328ad5..82cb93f66b9b 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -193,7 +193,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
*/
rcv = rcu_dereference(sap->rcv_func);
dest = llc_pdu_type(skb);
- sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL;
+ sap_handler = dest ? READ_ONCE(llc_type_handlers[dest - 1]) : NULL;
if (unlikely(!sap_handler)) {
if (rcv)
rcv(skb, dev, pt, orig_dev);
@@ -214,7 +214,7 @@ drop:
kfree_skb(skb);
goto out;
handle_station:
- sta_handler = ACCESS_ONCE(llc_station_handler);
+ sta_handler = READ_ONCE(llc_station_handler);
if (!sta_handler)
goto drop;
sta_handler(skb);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 9673e157bf8f..a3060e55122c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2017,7 +2017,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
{
- u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
return -EINVAL;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 13f740875507..9ee71cb276d7 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -458,7 +458,7 @@ static inline bool in_persistence(struct ip_vs_conn *cp)
static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs,
struct ip_vs_conn *cp, int pkts)
{
- unsigned long orig = ACCESS_ONCE(cp->sync_endtime);
+ unsigned long orig = READ_ONCE(cp->sync_endtime);
unsigned long now = jiffies;
unsigned long n = (now + cp->timeout) & ~3UL;
unsigned int sync_refresh_period;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5749fcaa2770..85f643c1e227 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1945,7 +1945,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
return 0;
}
-int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
+int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
{
unsigned int hashsize;
int rc;
diff --git a/net/netfilter/nf_nat_ftp.c b/net/netfilter/nf_nat_ftp.c
index e84a578dbe35..d76afafdc699 100644
--- a/net/netfilter/nf_nat_ftp.c
+++ b/net/netfilter/nf_nat_ftp.c
@@ -134,7 +134,7 @@ static int __init nf_nat_ftp_init(void)
}
/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */
-static int warn_set(const char *val, struct kernel_param *kp)
+static int warn_set(const char *val, const struct kernel_param *kp)
{
printk(KERN_INFO KBUILD_MODNAME
": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c
index 0648cb096bd8..dcb5f6375d9d 100644
--- a/net/netfilter/nf_nat_irc.c
+++ b/net/netfilter/nf_nat_irc.c
@@ -106,7 +106,7 @@ static int __init nf_nat_irc_init(void)
}
/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */
-static int warn_set(const char *val, struct kernel_param *kp)
+static int warn_set(const char *val, const struct kernel_param *kp)
{
printk(KERN_INFO KBUILD_MODNAME
": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index c9796629858f..a16356cacec3 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -401,7 +401,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
outdev = entry->state.out;
- switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
+ switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
case NFQNL_COPY_META:
case NFQNL_COPY_NONE:
break;
@@ -412,7 +412,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
skb_checksum_help(entskb))
return NULL;
- data_len = ACCESS_ONCE(queue->copy_range);
+ data_len = READ_ONCE(queue->copy_range);
if (data_len > entskb->len)
data_len = entskb->len;
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index d177dd066504..4d748975117d 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(netlbl_calipso_ops_register);
static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
{
- return ACCESS_ONCE(calipso_ops);
+ return READ_ONCE(calipso_ops);
}
/**
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a3eab903a9cd..b9e0ee4e22f5 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -128,7 +128,6 @@ static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
};
static int netlink_dump(struct sock *sk);
-static void netlink_skb_destructor(struct sk_buff *skb);
/* nl_table locking explained:
* Lookup and traversal are protected with an RCU read-side lock. Insertion
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index bb4dae198c78..dc424798ba6f 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -338,7 +338,7 @@ size_t ovs_tun_key_attr_size(void)
+ nla_total_size(4); /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */
}
-size_t ovs_nsh_key_attr_size(void)
+static size_t ovs_nsh_key_attr_size(void)
{
/* Whenever adding new OVS_NSH_KEY_ FIELDS, we should consider
* updating this function.
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 2a5ba356c472..3fbfc78991ac 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -42,19 +42,12 @@ static const struct nla_policy band_policy[OVS_BAND_ATTR_MAX + 1] = {
[OVS_BAND_ATTR_STATS] = { .len = sizeof(struct ovs_flow_stats) },
};
-static void rcu_free_ovs_meter_callback(struct rcu_head *rcu)
-{
- struct dp_meter *meter = container_of(rcu, struct dp_meter, rcu);
-
- kfree(meter);
-}
-
static void ovs_meter_free(struct dp_meter *meter)
{
if (!meter)
return;
- call_rcu(&meter->rcu, rcu_free_ovs_meter_callback);
+ kfree_rcu(meter, rcu);
}
static struct hlist_head *meter_hash_bucket(const struct datapath *dp,
@@ -106,7 +99,7 @@ ovs_meter_cmd_reply_start(struct genl_info *info, u8 cmd,
*ovs_reply_header = genlmsg_put(skb, info->snd_portid,
info->snd_seq,
&dp_meter_genl_family, 0, cmd);
- if (!ovs_reply_header) {
+ if (!*ovs_reply_header) {
nlmsg_free(skb);
return ERR_PTR(-EMSGSIZE);
}
@@ -166,7 +159,7 @@ static int ovs_meter_cmd_features(struct sk_buff *skb, struct genl_info *info)
reply = ovs_meter_cmd_reply_start(info, OVS_METER_CMD_FEATURES,
&ovs_reply_header);
- if (!reply)
+ if (IS_ERR(reply))
return PTR_ERR(reply);
if (nla_put_u32(reply, OVS_METER_ATTR_MAX_METERS, U32_MAX) ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9603f6ff17a4..737092ca9b4e 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4560,6 +4560,7 @@ static int __net_init packet_net_init(struct net *net)
static void __net_exit packet_net_exit(struct net *net)
{
remove_proc_entry("packet", net->proc_net);
+ WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
}
static struct pernet_operations packet_net_ops = {
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 2cb4c5dfad6f..77787512fc32 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -331,7 +331,10 @@ static int __net_init phonet_init_net(struct net *net)
static void __net_exit phonet_exit_net(struct net *net)
{
+ struct phonet_net *pnn = phonet_pernet(net);
+
remove_proc_entry("phonet", net->proc_net);
+ WARN_ON_ONCE(!list_empty(&pnn->pndevs.list));
}
static struct pernet_operations phonet_net_ops = {
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index 86ef907067bb..e0f70c4051b6 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -139,8 +139,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
return -EINVAL;
}
- dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
- rdsibdev_to_node(rds_ibdev));
+ dma_pages = kmalloc_array_node(sizeof(u64), page_cnt, GFP_ATOMIC,
+ rdsibdev_to_node(rds_ibdev));
if (!dma_pages) {
ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
return -ENOMEM;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 344b2dcad52d..9b5c46b052fd 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -322,6 +322,14 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
}
EXPORT_SYMBOL(rxrpc_kernel_begin_call);
+/*
+ * Dummy function used to stop the notifier talking to recvmsg().
+ */
+static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
+ unsigned long call_user_ID)
+{
+}
+
/**
* rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
* @sock: The socket the call is on
@@ -336,6 +344,14 @@ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
mutex_lock(&call->user_mutex);
rxrpc_release_call(rxrpc_sk(sock->sk), call);
+
+ /* Make sure we're not going to call back into a kernel service */
+ if (call->notify_rx) {
+ spin_lock_bh(&call->notify_lock);
+ call->notify_rx = rxrpc_dummy_notify_rx;
+ spin_unlock_bh(&call->notify_lock);
+ }
+
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index ea5600b747cc..b2151993d384 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -525,6 +525,7 @@ struct rxrpc_call {
unsigned long flags;
unsigned long events;
spinlock_t lock;
+ spinlock_t notify_lock; /* Kernel notification lock */
rwlock_t state_lock; /* lock for state transition */
u32 abort_code; /* Local/remote abort code */
int error; /* Local error incurred */
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 7a77844aab16..3574508baf9a 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -386,7 +386,7 @@ recheck_state:
now = ktime_get_real();
if (ktime_before(call->expire_at, now)) {
- rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
+ rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index fcdd6555a820..4c7fbc6dcce7 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -124,6 +124,7 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
INIT_LIST_HEAD(&call->sock_link);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
+ spin_lock_init(&call->notify_lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 1e37eb1c0c66..1b592073ec96 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -298,8 +298,6 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
write_unlock(&call->state_lock);
if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
- rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, 0, false, true,
- rxrpc_propose_ack_client_tx_end);
trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
} else {
trace_rxrpc_transmit(call, rxrpc_transmit_end);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 71e6f713fbe7..f47659c7b224 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -35,7 +35,8 @@ struct rxrpc_abort_buffer {
/*
* Fill out an ACK packet.
*/
-static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
+static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
+ struct rxrpc_call *call,
struct rxrpc_ack_buffer *pkt,
rxrpc_seq_t *_hard_ack,
rxrpc_seq_t *_top,
@@ -77,8 +78,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
} while (before_eq(seq, top));
}
- mtu = call->conn->params.peer->if_mtu;
- mtu -= call->conn->params.peer->hdrsize;
+ mtu = conn->params.peer->if_mtu;
+ mtu -= conn->params.peer->hdrsize;
jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
pkt->ackinfo.maxMTU = htonl(mtu);
@@ -148,7 +149,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
}
call->ackr_reason = 0;
}
- n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top, reason);
+ n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
spin_unlock_bh(&call->lock);
@@ -221,6 +222,16 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
rxrpc_serial_t serial;
int ret;
+ /* Don't bother sending aborts for a client call once the server has
+ * hard-ACK'd all of its request data. After that point, we're not
+ * going to stop the operation proceeding, and whilst we might limit
+ * the reply, it's not worth it if we can send a new call on the same
+ * channel instead, thereby closing off this call.
+ */
+ if (rxrpc_is_client_call(call) &&
+ test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ return 0;
+
spin_lock_bh(&call->lock);
if (call->conn)
conn = rxrpc_get_connection_maybe(call->conn);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index e4937b3f3685..8510a98b87e1 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -40,7 +40,9 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
sk = &rx->sk;
if (rx && sk->sk_state < RXRPC_CLOSE) {
if (call->notify_rx) {
+ spin_lock_bh(&call->notify_lock);
call->notify_rx(sk, call, call->user_call_ID);
+ spin_unlock_bh(&call->notify_lock);
} else {
write_lock_bh(&rx->recvmsg_lock);
if (list_empty(&call->recvmsg_link)) {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b686e755fda9..dd70924cbcdf 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -312,9 +312,9 @@ static bool loss_event(struct netem_sched_data *q)
* std deviation sigma. Uses table lookup to approximate the desired
* distribution, and a uniformly-distributed pseudo-random source.
*/
-static s64 tabledist(s64 mu, s64 sigma,
+static s64 tabledist(s64 mu, s32 sigma,
struct crndstate *state,
- const struct disttable *dist)
+ const struct disttable *dist)
{
s64 x;
long t;
@@ -327,7 +327,7 @@ static s64 tabledist(s64 mu, s64 sigma,
/* default uniform distribution */
if (dist == NULL)
- return (rnd % (2*sigma)) - sigma + mu;
+ return (rnd % (2 * sigma)) - sigma + mu;
t = dist->table[rnd % dist->size];
x = (sigma % NETEM_DIST_SCALE) * t;
@@ -339,10 +339,8 @@ static s64 tabledist(s64 mu, s64 sigma,
return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
}
-static u64 packet_len_2_sched_time(unsigned int len,
- struct netem_sched_data *q)
+static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
{
- u64 offset;
len += q->packet_overhead;
if (q->cell_size) {
@@ -352,9 +350,8 @@ static u64 packet_len_2_sched_time(unsigned int len,
cells++;
len = cells * (q->cell_size + q->cell_overhead);
}
- offset = (u64)len * NSEC_PER_SEC;
- do_div(offset, q->rate);
- return offset;
+
+ return div64_u64(len * NSEC_PER_SEC, q->rate);
}
static void tfifo_reset(struct Qdisc *sch)
@@ -556,7 +553,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
now = last->time_to_send;
}
- delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
+ delay += packet_time_ns(qdisc_pkt_len(skb), q);
}
cb->time_to_send = now + delay;
diff --git a/net/socket.c b/net/socket.c
index c729625eb5d3..42d8e9c9ccd5 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -568,7 +568,6 @@ struct socket *sock_alloc(void)
sock = SOCKET_I(inode);
- kmemcheck_annotate_bitfield(sock, type);
inode->i_ino = get_next_ino();
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 33f4ae68426d..387cc4add6f6 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -50,7 +50,7 @@ EXPORT_SYMBOL_GPL(svc_pool_map);
static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
static int
-param_set_pool_mode(const char *val, struct kernel_param *kp)
+param_set_pool_mode(const char *val, const struct kernel_param *kp)
{
int *ip = (int *)kp->arg;
struct svc_pool_map *m = &svc_pool_map;
@@ -80,7 +80,7 @@ out:
}
static int
-param_get_pool_mode(char *buf, struct kernel_param *kp)
+param_get_pool_mode(char *buf, const struct kernel_param *kp)
{
int *ip = (int *)kp->arg;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 60aff60e30ad..e07ee3ae0023 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -45,8 +45,18 @@ MODULE_AUTHOR("Mellanox Technologies");
MODULE_DESCRIPTION("Transport Layer Security Support");
MODULE_LICENSE("Dual BSD/GPL");
-static struct proto tls_base_prot;
-static struct proto tls_sw_prot;
+enum {
+ TLS_BASE_TX,
+ TLS_SW_TX,
+ TLS_NUM_CONFIG,
+};
+
+static struct proto tls_prots[TLS_NUM_CONFIG];
+
+static inline void update_sk_prot(struct sock *sk, struct tls_context *ctx)
+{
+ sk->sk_prot = &tls_prots[ctx->tx_conf];
+}
int wait_on_pending_writer(struct sock *sk, long *timeo)
{
@@ -216,6 +226,12 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
void (*sk_proto_close)(struct sock *sk, long timeout);
lock_sock(sk);
+ sk_proto_close = ctx->sk_proto_close;
+
+ if (ctx->tx_conf == TLS_BASE_TX) {
+ kfree(ctx);
+ goto skip_tx_cleanup;
+ }
if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
tls_handle_open_record(sk, 0);
@@ -232,13 +248,14 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
sg++;
}
}
- ctx->free_resources(sk);
+
kfree(ctx->rec_seq);
kfree(ctx->iv);
- sk_proto_close = ctx->sk_proto_close;
- kfree(ctx);
+ if (ctx->tx_conf == TLS_SW_TX)
+ tls_sw_free_tx_resources(sk);
+skip_tx_cleanup:
release_sock(sk);
sk_proto_close(sk, timeout);
}
@@ -338,46 +355,41 @@ static int tls_getsockopt(struct sock *sk, int level, int optname,
static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
unsigned int optlen)
{
- struct tls_crypto_info *crypto_info, tmp_crypto_info;
+ struct tls_crypto_info *crypto_info;
struct tls_context *ctx = tls_get_ctx(sk);
- struct proto *prot = NULL;
int rc = 0;
+ int tx_conf;
if (!optval || (optlen < sizeof(*crypto_info))) {
rc = -EINVAL;
goto out;
}
- rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
+ crypto_info = &ctx->crypto_send;
+ /* Currently we don't support set crypto info more than one time */
+ if (TLS_CRYPTO_INFO_READY(crypto_info))
+ goto out;
+
+ rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
if (rc) {
rc = -EFAULT;
goto out;
}
/* check version */
- if (tmp_crypto_info.version != TLS_1_2_VERSION) {
+ if (crypto_info->version != TLS_1_2_VERSION) {
rc = -ENOTSUPP;
- goto out;
+ goto err_crypto_info;
}
- /* get user crypto info */
- crypto_info = &ctx->crypto_send;
-
- /* Currently we don't support set crypto info more than one time */
- if (TLS_CRYPTO_INFO_READY(crypto_info))
- goto out;
-
- switch (tmp_crypto_info.cipher_type) {
+ switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
rc = -EINVAL;
goto out;
}
- rc = copy_from_user(
- crypto_info,
- optval,
- sizeof(struct tls12_crypto_info_aes_gcm_128));
-
+ rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
+ optlen - sizeof(*crypto_info));
if (rc) {
rc = -EFAULT;
goto err_crypto_info;
@@ -389,18 +401,16 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
goto out;
}
- ctx->sk_write_space = sk->sk_write_space;
- sk->sk_write_space = tls_write_space;
-
- ctx->sk_proto_close = sk->sk_prot->close;
-
/* currently SW is default, we will have ethtool in future */
rc = tls_set_sw_offload(sk, ctx);
- prot = &tls_sw_prot;
+ tx_conf = TLS_SW_TX;
if (rc)
goto err_crypto_info;
- sk->sk_prot = prot;
+ ctx->tx_conf = tx_conf;
+ update_sk_prot(sk, ctx);
+ ctx->sk_write_space = sk->sk_write_space;
+ sk->sk_write_space = tls_write_space;
goto out;
err_crypto_info:
@@ -453,7 +463,10 @@ static int tls_init(struct sock *sk)
icsk->icsk_ulp_data = ctx;
ctx->setsockopt = sk->sk_prot->setsockopt;
ctx->getsockopt = sk->sk_prot->getsockopt;
- sk->sk_prot = &tls_base_prot;
+ ctx->sk_proto_close = sk->sk_prot->close;
+
+ ctx->tx_conf = TLS_BASE_TX;
+ update_sk_prot(sk, ctx);
out:
return rc;
}
@@ -464,16 +477,21 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.init = tls_init,
};
+static void build_protos(struct proto *prot, struct proto *base)
+{
+ prot[TLS_BASE_TX] = *base;
+ prot[TLS_BASE_TX].setsockopt = tls_setsockopt;
+ prot[TLS_BASE_TX].getsockopt = tls_getsockopt;
+ prot[TLS_BASE_TX].close = tls_sk_proto_close;
+
+ prot[TLS_SW_TX] = prot[TLS_BASE_TX];
+ prot[TLS_SW_TX].sendmsg = tls_sw_sendmsg;
+ prot[TLS_SW_TX].sendpage = tls_sw_sendpage;
+}
+
static int __init tls_register(void)
{
- tls_base_prot = tcp_prot;
- tls_base_prot.setsockopt = tls_setsockopt;
- tls_base_prot.getsockopt = tls_getsockopt;
-
- tls_sw_prot = tls_base_prot;
- tls_sw_prot.sendmsg = tls_sw_sendmsg;
- tls_sw_prot.sendpage = tls_sw_sendpage;
- tls_sw_prot.close = tls_sk_proto_close;
+ build_protos(tls_prots, &tcp_prot);
tcp_register_ulp(&tcp_tls_ulp_ops);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 7d80040a37b6..73d19210dd49 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -39,22 +39,6 @@
#include <net/tls.h>
-static inline void tls_make_aad(int recv,
- char *buf,
- size_t size,
- char *record_sequence,
- int record_sequence_size,
- unsigned char record_type)
-{
- memcpy(buf, record_sequence, record_sequence_size);
-
- buf[8] = record_type;
- buf[9] = TLS_1_2_VERSION_MAJOR;
- buf[10] = TLS_1_2_VERSION_MINOR;
- buf[11] = size >> 8;
- buf[12] = size & 0xFF;
-}
-
static void trim_sg(struct sock *sk, struct scatterlist *sg,
int *sg_num_elem, unsigned int *sg_size, int target_size)
{
@@ -219,7 +203,7 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
struct aead_request *aead_req;
int rc;
- aead_req = kmalloc(req_size, flags);
+ aead_req = kzalloc(req_size, flags);
if (!aead_req)
return -ENOMEM;
@@ -249,7 +233,7 @@ static int tls_push_record(struct sock *sk, int flags,
sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
- tls_make_aad(0, ctx->aad_space, ctx->sg_plaintext_size,
+ tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
tls_ctx->rec_seq, tls_ctx->rec_seq_size,
record_type);
@@ -639,7 +623,7 @@ sendpage_end:
return ret;
}
-static void tls_sw_free_resources(struct sock *sk)
+void tls_sw_free_tx_resources(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
@@ -650,6 +634,7 @@ static void tls_sw_free_resources(struct sock *sk)
tls_free_both_sg(sk);
kfree(ctx);
+ kfree(tls_ctx);
}
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
@@ -679,7 +664,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
}
ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
- ctx->free_resources = tls_sw_free_resources;
crypto_info = &ctx->crypto_send;
switch (crypto_info->cipher_type) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index fce2cbe6a193..bb16f1ec766e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -14274,7 +14274,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct sk_buff *msg;
void *hdr;
- u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
+ u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid);
if (!nlportid)
return false;
diff --git a/samples/Kconfig b/samples/Kconfig
index 9cb63188d3ef..c332a3b9de05 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -71,11 +71,10 @@ config SAMPLE_RPMSG_CLIENT
the rpmsg bus.
config SAMPLE_LIVEPATCH
- tristate "Build live patching sample -- loadable modules only"
+ tristate "Build live patching samples -- loadable modules only"
depends on LIVEPATCH && m
help
- Builds a sample live patch that replaces the procfs handler
- for /proc/cmdline to print "this has been live patched".
+ Build sample live patch demonstrations.
config SAMPLE_CONFIGFS
tristate "Build configfs patching sample -- loadable modules only"
diff --git a/samples/bpf/xdp_router_ipv4_user.c b/samples/bpf/xdp_router_ipv4_user.c
index 2c1fe3f4b1a4..916462112d55 100644
--- a/samples/bpf/xdp_router_ipv4_user.c
+++ b/samples/bpf/xdp_router_ipv4_user.c
@@ -206,12 +206,13 @@ static void read_route(struct nlmsghdr *nh, int nll)
direct_entry.arp.mac = 0;
direct_entry.arp.dst = 0;
if (route.dst_len == 32) {
- if (nh->nlmsg_type == RTM_DELROUTE)
+ if (nh->nlmsg_type == RTM_DELROUTE) {
assert(bpf_map_delete_elem(map_fd[3], &route.dst) == 0);
- else
+ } else {
if (bpf_map_lookup_elem(map_fd[2], &route.dst, &direct_entry.arp.mac) == 0)
direct_entry.arp.dst = route.dst;
assert(bpf_map_update_elem(map_fd[3], &route.dst, &direct_entry, 0) == 0);
+ }
}
for (i = 0; i < 4; i++)
prefix_key->data[i] = (route.dst >> i * 8) & 0xff;
diff --git a/samples/configfs/configfs_sample.c b/samples/configfs/configfs_sample.c
index 1ea33119e532..004a4e201476 100644
--- a/samples/configfs/configfs_sample.c
+++ b/samples/configfs/configfs_sample.c
@@ -115,7 +115,7 @@ static struct configfs_attribute *childless_attrs[] = {
NULL,
};
-static struct config_item_type childless_type = {
+static const struct config_item_type childless_type = {
.ct_attrs = childless_attrs,
.ct_owner = THIS_MODULE,
};
@@ -193,7 +193,7 @@ static struct configfs_item_operations simple_child_item_ops = {
.release = simple_child_release,
};
-static struct config_item_type simple_child_type = {
+static const struct config_item_type simple_child_type = {
.ct_item_ops = &simple_child_item_ops,
.ct_attrs = simple_child_attrs,
.ct_owner = THIS_MODULE,
@@ -261,7 +261,7 @@ static struct configfs_group_operations simple_children_group_ops = {
.make_item = simple_children_make_item,
};
-static struct config_item_type simple_children_type = {
+static const struct config_item_type simple_children_type = {
.ct_item_ops = &simple_children_item_ops,
.ct_group_ops = &simple_children_group_ops,
.ct_attrs = simple_children_attrs,
@@ -331,7 +331,7 @@ static struct configfs_group_operations group_children_group_ops = {
.make_group = group_children_make_group,
};
-static struct config_item_type group_children_type = {
+static const struct config_item_type group_children_type = {
.ct_group_ops = &group_children_group_ops,
.ct_attrs = group_children_attrs,
.ct_owner = THIS_MODULE,
diff --git a/samples/connector/cn_test.c b/samples/connector/cn_test.c
index d12cc944b696..95cd06f4ec1e 100644
--- a/samples/connector/cn_test.c
+++ b/samples/connector/cn_test.c
@@ -125,12 +125,12 @@ static int cn_test_want_notify(void)
#endif
static u32 cn_test_timer_counter;
-static void cn_test_timer_func(unsigned long __data)
+static void cn_test_timer_func(struct timer_list *unused)
{
struct cn_msg *m;
char data[32];
- pr_debug("%s: timer fired with data %lu\n", __func__, __data);
+ pr_debug("%s: timer fired\n", __func__);
m = kzalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC);
if (m) {
@@ -168,7 +168,7 @@ static int cn_test_init(void)
goto err_out;
}
- setup_timer(&cn_test_timer, cn_test_timer_func, 0);
+ timer_setup(&cn_test_timer, cn_test_timer_func, 0);
mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000));
pr_info("initialized with id={%u.%u}\n",
diff --git a/samples/kprobes/Makefile b/samples/kprobes/Makefile
index 68739bc4fc6a..880e54d2c082 100644
--- a/samples/kprobes/Makefile
+++ b/samples/kprobes/Makefile
@@ -1,5 +1,5 @@
# builds the kprobes example kernel modules;
# then to use one (as root): insmod <module_name.ko>
-obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o jprobe_example.o
+obj-$(CONFIG_SAMPLE_KPROBES) += kprobe_example.o
obj-$(CONFIG_SAMPLE_KRETPROBES) += kretprobe_example.o
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
deleted file mode 100644
index e3c0a40909f7..000000000000
--- a/samples/kprobes/jprobe_example.c
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Here's a sample kernel module showing the use of jprobes to dump
- * the arguments of _do_fork().
- *
- * For more information on theory of operation of jprobes, see
- * Documentation/kprobes.txt
- *
- * Build and insert the kernel module as done in the kprobe example.
- * You will see the trace data in /var/log/messages and on the
- * console whenever _do_fork() is invoked to create a new process.
- * (Some messages may be suppressed if syslogd is configured to
- * eliminate duplicate messages.)
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/kprobes.h>
-
-/*
- * Jumper probe for _do_fork.
- * Mirror principle enables access to arguments of the probed routine
- * from the probe handler.
- */
-
-/* Proxy routine having the same arguments as actual _do_fork() routine */
-static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stack_size, int __user *parent_tidptr,
- int __user *child_tidptr, unsigned long tls)
-{
- pr_info("jprobe: clone_flags = 0x%lx, stack_start = 0x%lx "
- "stack_size = 0x%lx\n", clone_flags, stack_start, stack_size);
-
- /* Always end with a call to jprobe_return(). */
- jprobe_return();
- return 0;
-}
-
-static struct jprobe my_jprobe = {
- .entry = j_do_fork,
- .kp = {
- .symbol_name = "_do_fork",
- },
-};
-
-static int __init jprobe_init(void)
-{
- int ret;
-
- ret = register_jprobe(&my_jprobe);
- if (ret < 0) {
- pr_err("register_jprobe failed, returned %d\n", ret);
- return -1;
- }
- pr_info("Planted jprobe at %p, handler addr %p\n",
- my_jprobe.kp.addr, my_jprobe.entry);
- return 0;
-}
-
-static void __exit jprobe_exit(void)
-{
- unregister_jprobe(&my_jprobe);
- pr_info("jprobe at %p unregistered\n", my_jprobe.kp.addr);
-}
-
-module_init(jprobe_init)
-module_exit(jprobe_exit)
-MODULE_LICENSE("GPL");
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index 88b3e2d227ae..67de3b774bc9 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -47,6 +47,10 @@ static int handler_pre(struct kprobe *p, struct pt_regs *regs)
" pstate = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate);
#endif
+#ifdef CONFIG_S390
+ pr_info("<%s> pre_handler: p->addr, 0x%p, ip = 0x%lx, flags = 0x%lx\n",
+ p->symbol_name, p->addr, regs->psw.addr, regs->flags);
+#endif
/* A dump_stack() here will give a stack backtrace */
return 0;
@@ -76,6 +80,10 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs,
pr_info("<%s> post_handler: p->addr = 0x%p, pstate = 0x%lx\n",
p->symbol_name, p->addr, (long)regs->pstate);
#endif
+#ifdef CONFIG_S390
+ pr_info("<%s> pre_handler: p->addr, 0x%p, flags = 0x%lx\n",
+ p->symbol_name, p->addr, regs->flags);
+#endif
}
/*
diff --git a/samples/livepatch/Makefile b/samples/livepatch/Makefile
index 10319d7ea0b1..2472ce39a18d 100644
--- a/samples/livepatch/Makefile
+++ b/samples/livepatch/Makefile
@@ -1 +1,7 @@
obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-sample.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-mod.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix1.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-shadow-fix2.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-demo.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-mod.o
+obj-$(CONFIG_SAMPLE_LIVEPATCH) += livepatch-callbacks-busymod.o
diff --git a/samples/livepatch/livepatch-callbacks-busymod.c b/samples/livepatch/livepatch-callbacks-busymod.c
new file mode 100644
index 000000000000..80d06e103f1b
--- /dev/null
+++ b/samples/livepatch/livepatch-callbacks-busymod.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * livepatch-callbacks-busymod.c - (un)patching callbacks demo support module
+ *
+ *
+ * Purpose
+ * -------
+ *
+ * Simple module to demonstrate livepatch (un)patching callbacks.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-callbacks-mod.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+static int sleep_secs;
+module_param(sleep_secs, int, 0644);
+MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)");
+
+static void busymod_work_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work, busymod_work_func);
+
+static void busymod_work_func(struct work_struct *work)
+{
+ pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs);
+ msleep(sleep_secs * 1000);
+ pr_info("%s exit\n", __func__);
+}
+
+static int livepatch_callbacks_mod_init(void)
+{
+ pr_info("%s\n", __func__);
+ schedule_delayed_work(&work,
+ msecs_to_jiffies(1000 * 0));
+ return 0;
+}
+
+static void livepatch_callbacks_mod_exit(void)
+{
+ cancel_delayed_work_sync(&work);
+ pr_info("%s\n", __func__);
+}
+
+module_init(livepatch_callbacks_mod_init);
+module_exit(livepatch_callbacks_mod_exit);
+MODULE_LICENSE("GPL");
diff --git a/samples/livepatch/livepatch-callbacks-demo.c b/samples/livepatch/livepatch-callbacks-demo.c
new file mode 100644
index 000000000000..3d115bd68442
--- /dev/null
+++ b/samples/livepatch/livepatch-callbacks-demo.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * livepatch-callbacks-demo.c - (un)patching callbacks livepatch demo
+ *
+ *
+ * Purpose
+ * -------
+ *
+ * Demonstration of registering livepatch (un)patching callbacks.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * Step 1 - load the simple module
+ *
+ * insmod samples/livepatch/livepatch-callbacks-mod.ko
+ *
+ *
+ * Step 2 - load the demonstration livepatch (with callbacks)
+ *
+ * insmod samples/livepatch/livepatch-callbacks-demo.ko
+ *
+ *
+ * Step 3 - cleanup
+ *
+ * echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ * rmmod livepatch_callbacks_demo
+ * rmmod livepatch_callbacks_mod
+ *
+ * Watch dmesg output to see livepatch enablement, callback execution
+ * and patching operations for both vmlinux and module targets.
+ *
+ * NOTE: swap the insmod order of livepatch-callbacks-mod.ko and
+ * livepatch-callbacks-demo.ko to observe what happens when a
+ * target module is loaded after a livepatch with callbacks.
+ *
+ * NOTE: 'pre_patch_ret' is a module parameter that sets the pre-patch
+ * callback return status. Try setting up a non-zero status
+ * such as -19 (-ENODEV):
+ *
+ * # Load demo livepatch, vmlinux is patched
+ * insmod samples/livepatch/livepatch-callbacks-demo.ko
+ *
+ * # Setup next pre-patch callback to return -ENODEV
+ * echo -19 > /sys/module/livepatch_callbacks_demo/parameters/pre_patch_ret
+ *
+ * # Module loader refuses to load the target module
+ * insmod samples/livepatch/livepatch-callbacks-mod.ko
+ * insmod: ERROR: could not insert module samples/livepatch/livepatch-callbacks-mod.ko: No such device
+ *
+ * NOTE: There is a second target module,
+ * livepatch-callbacks-busymod.ko, available for experimenting
+ * with livepatch (un)patch callbacks. This module contains
+ * a 'sleep_secs' parameter that parks the module on one of the
+ * functions that the livepatch demo module wants to patch.
+ * Modifying this value and tweaking the order of module loads can
+ * effectively demonstrate stalled patch transitions:
+ *
+ * # Load a target module, let it park on 'busymod_work_func' for
+ * # thirty seconds
+ * insmod samples/livepatch/livepatch-callbacks-busymod.ko sleep_secs=30
+ *
+ * # Meanwhile load the livepatch
+ * insmod samples/livepatch/livepatch-callbacks-demo.ko
+ *
+ * # ... then load and unload another target module while the
+ * # transition is in progress
+ * insmod samples/livepatch/livepatch-callbacks-mod.ko
+ * rmmod samples/livepatch/livepatch-callbacks-mod.ko
+ *
+ * # Finally cleanup
+ * echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled
+ * rmmod samples/livepatch/livepatch-callbacks-demo.ko
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int pre_patch_ret;
+module_param(pre_patch_ret, int, 0644);
+MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return pre_patch_ret;
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+static void patched_work_func(struct work_struct *work)
+{
+ pr_info("%s\n", __func__);
+}
+
+static struct klp_func no_funcs[] = {
+ { }
+};
+
+static struct klp_func busymod_funcs[] = {
+ {
+ .old_name = "busymod_work_func",
+ .new_func = patched_work_func,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "livepatch_callbacks_mod",
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "livepatch_callbacks_busymod",
+ .funcs = busymod_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_callbacks_demo_init(void)
+{
+ int ret;
+
+ if (!klp_have_reliable_stack() && !patch.immediate) {
+ /*
+ * WARNING: Be very careful when using 'patch.immediate' in
+ * your patches. It's ok to use it for simple patches like
+ * this, but for more complex patches which change function
+ * semantics, locking semantics, or data structures, it may not
+ * be safe. Use of this option will also prevent removal of
+ * the patch.
+ *
+ * See Documentation/livepatch/livepatch.txt for more details.
+ */
+ patch.immediate = true;
+ pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
+ }
+
+ ret = klp_register_patch(&patch);
+ if (ret)
+ return ret;
+ ret = klp_enable_patch(&patch);
+ if (ret) {
+ WARN_ON(klp_unregister_patch(&patch));
+ return ret;
+ }
+ return 0;
+}
+
+static void livepatch_callbacks_demo_exit(void)
+{
+ WARN_ON(klp_unregister_patch(&patch));
+}
+
+module_init(livepatch_callbacks_demo_init);
+module_exit(livepatch_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-callbacks-mod.c b/samples/livepatch/livepatch-callbacks-mod.c
new file mode 100644
index 000000000000..e610ce29ba44
--- /dev/null
+++ b/samples/livepatch/livepatch-callbacks-mod.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * livepatch-callbacks-mod.c - (un)patching callbacks demo support module
+ *
+ *
+ * Purpose
+ * -------
+ *
+ * Simple module to demonstrate livepatch (un)patching callbacks.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-callbacks-demo.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+static int livepatch_callbacks_mod_init(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static void livepatch_callbacks_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+}
+
+module_init(livepatch_callbacks_mod_init);
+module_exit(livepatch_callbacks_mod_exit);
+MODULE_LICENSE("GPL");
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
new file mode 100644
index 000000000000..fbe0a1f3d99b
--- /dev/null
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * livepatch-shadow-fix1.c - Shadow variables, livepatch demo
+ *
+ * Purpose
+ * -------
+ *
+ * Fixes the memory leak introduced in livepatch-shadow-mod through the
+ * use of a shadow variable. This fix demonstrates the "extending" of
+ * short-lived data structures by patching its allocation and release
+ * functions.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-shadow-mod.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+#include <linux/slab.h>
+
+/* Shadow variable enums */
+#define SV_LEAK 1
+
+/* Allocate new dummies every second */
+#define ALLOC_PERIOD 1
+/* Check for expired dummies after a few new ones have been allocated */
+#define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
+/* Dummies expire after a few cleanup instances */
+#define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
+
+struct dummy {
+ struct list_head list;
+ unsigned long jiffies_expire;
+};
+
+struct dummy *livepatch_fix1_dummy_alloc(void)
+{
+ struct dummy *d;
+ void *leak;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return NULL;
+
+ d->jiffies_expire = jiffies +
+ msecs_to_jiffies(1000 * EXPIRE_PERIOD);
+
+ /*
+ * Patch: save the extra memory location into a SV_LEAK shadow
+ * variable. A patched dummy_free routine can later fetch this
+ * pointer to handle resource release.
+ */
+ leak = kzalloc(sizeof(int), GFP_KERNEL);
+ klp_shadow_alloc(d, SV_LEAK, &leak, sizeof(leak), GFP_KERNEL);
+
+ pr_info("%s: dummy @ %p, expires @ %lx\n",
+ __func__, d, d->jiffies_expire);
+
+ return d;
+}
+
+void livepatch_fix1_dummy_free(struct dummy *d)
+{
+ void **shadow_leak, *leak;
+
+ /*
+ * Patch: fetch the saved SV_LEAK shadow variable, detach and
+ * free it. Note: handle cases where this shadow variable does
+ * not exist (ie, dummy structures allocated before this livepatch
+ * was loaded.)
+ */
+ shadow_leak = klp_shadow_get(d, SV_LEAK);
+ if (shadow_leak) {
+ leak = *shadow_leak;
+ klp_shadow_free(d, SV_LEAK);
+ kfree(leak);
+ pr_info("%s: dummy @ %p, prevented leak @ %p\n",
+ __func__, d, leak);
+ } else {
+ pr_info("%s: dummy @ %p leaked!\n", __func__, d);
+ }
+
+ kfree(d);
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "dummy_alloc",
+ .new_func = livepatch_fix1_dummy_alloc,
+ },
+ {
+ .old_name = "dummy_free",
+ .new_func = livepatch_fix1_dummy_free,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = "livepatch_shadow_mod",
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_shadow_fix1_init(void)
+{
+ int ret;
+
+ if (!klp_have_reliable_stack() && !patch.immediate) {
+ /*
+ * WARNING: Be very careful when using 'patch.immediate' in
+ * your patches. It's ok to use it for simple patches like
+ * this, but for more complex patches which change function
+ * semantics, locking semantics, or data structures, it may not
+ * be safe. Use of this option will also prevent removal of
+ * the patch.
+ *
+ * See Documentation/livepatch/livepatch.txt for more details.
+ */
+ patch.immediate = true;
+ pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
+ }
+
+ ret = klp_register_patch(&patch);
+ if (ret)
+ return ret;
+ ret = klp_enable_patch(&patch);
+ if (ret) {
+ WARN_ON(klp_unregister_patch(&patch));
+ return ret;
+ }
+ return 0;
+}
+
+static void livepatch_shadow_fix1_exit(void)
+{
+ /* Cleanup any existing SV_LEAK shadow variables */
+ klp_shadow_free_all(SV_LEAK);
+
+ WARN_ON(klp_unregister_patch(&patch));
+}
+
+module_init(livepatch_shadow_fix1_init);
+module_exit(livepatch_shadow_fix1_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c
new file mode 100644
index 000000000000..53c1794bdc5f
--- /dev/null
+++ b/samples/livepatch/livepatch-shadow-fix2.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * livepatch-shadow-fix2.c - Shadow variables, livepatch demo
+ *
+ * Purpose
+ * -------
+ *
+ * Adds functionality to livepatch-shadow-mod's in-flight data
+ * structures through a shadow variable. The livepatch patches a
+ * routine that periodically inspects data structures, incrementing a
+ * per-data-structure counter, creating the counter if needed.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * This module is not intended to be standalone. See the "Usage"
+ * section of livepatch-shadow-mod.c.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+#include <linux/slab.h>
+
+/* Shadow variable enums */
+#define SV_LEAK 1
+#define SV_COUNTER 2
+
+struct dummy {
+ struct list_head list;
+ unsigned long jiffies_expire;
+};
+
+bool livepatch_fix2_dummy_check(struct dummy *d, unsigned long jiffies)
+{
+ int *shadow_count;
+ int count;
+
+ /*
+ * Patch: handle in-flight dummy structures, if they do not
+ * already have a SV_COUNTER shadow variable, then attach a
+ * new one.
+ */
+ count = 0;
+ shadow_count = klp_shadow_get_or_alloc(d, SV_COUNTER,
+ &count, sizeof(count),
+ GFP_NOWAIT);
+ if (shadow_count)
+ *shadow_count += 1;
+
+ return time_after(jiffies, d->jiffies_expire);
+}
+
+void livepatch_fix2_dummy_free(struct dummy *d)
+{
+ void **shadow_leak, *leak;
+ int *shadow_count;
+
+ /* Patch: copy the memory leak patch from the fix1 module. */
+ shadow_leak = klp_shadow_get(d, SV_LEAK);
+ if (shadow_leak) {
+ leak = *shadow_leak;
+ klp_shadow_free(d, SV_LEAK);
+ kfree(leak);
+ pr_info("%s: dummy @ %p, prevented leak @ %p\n",
+ __func__, d, leak);
+ } else {
+ pr_info("%s: dummy @ %p leaked!\n", __func__, d);
+ }
+
+ /*
+ * Patch: fetch the SV_COUNTER shadow variable and display
+ * the final count. Detach the shadow variable.
+ */
+ shadow_count = klp_shadow_get(d, SV_COUNTER);
+ if (shadow_count) {
+ pr_info("%s: dummy @ %p, check counter = %d\n",
+ __func__, d, *shadow_count);
+ klp_shadow_free(d, SV_COUNTER);
+ }
+
+ kfree(d);
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "dummy_check",
+ .new_func = livepatch_fix2_dummy_check,
+ },
+ {
+ .old_name = "dummy_free",
+ .new_func = livepatch_fix2_dummy_free,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = "livepatch_shadow_mod",
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int livepatch_shadow_fix2_init(void)
+{
+ int ret;
+
+ if (!klp_have_reliable_stack() && !patch.immediate) {
+ /*
+ * WARNING: Be very careful when using 'patch.immediate' in
+ * your patches. It's ok to use it for simple patches like
+ * this, but for more complex patches which change function
+ * semantics, locking semantics, or data structures, it may not
+ * be safe. Use of this option will also prevent removal of
+ * the patch.
+ *
+ * See Documentation/livepatch/livepatch.txt for more details.
+ */
+ patch.immediate = true;
+ pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
+ }
+
+ ret = klp_register_patch(&patch);
+ if (ret)
+ return ret;
+ ret = klp_enable_patch(&patch);
+ if (ret) {
+ WARN_ON(klp_unregister_patch(&patch));
+ return ret;
+ }
+ return 0;
+}
+
+static void livepatch_shadow_fix2_exit(void)
+{
+ /* Cleanup any existing SV_COUNTER shadow variables */
+ klp_shadow_free_all(SV_COUNTER);
+
+ WARN_ON(klp_unregister_patch(&patch));
+}
+
+module_init(livepatch_shadow_fix2_init);
+module_exit(livepatch_shadow_fix2_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c
new file mode 100644
index 000000000000..4c54b250332d
--- /dev/null
+++ b/samples/livepatch/livepatch-shadow-mod.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * livepatch-shadow-mod.c - Shadow variables, buggy module demo
+ *
+ * Purpose
+ * -------
+ *
+ * As a demonstration of livepatch shadow variable API, this module
+ * introduces memory leak behavior that livepatch modules
+ * livepatch-shadow-fix1.ko and livepatch-shadow-fix2.ko correct and
+ * enhance.
+ *
+ * WARNING - even though the livepatch-shadow-fix modules patch the
+ * memory leak, please load these modules at your own risk -- some
+ * amount of memory may leaked before the bug is patched.
+ *
+ *
+ * Usage
+ * -----
+ *
+ * Step 1 - Load the buggy demonstration module:
+ *
+ * insmod samples/livepatch/livepatch-shadow-mod.ko
+ *
+ * Watch dmesg output for a few moments to see new dummy being allocated
+ * and a periodic cleanup check. (Note: a small amount of memory is
+ * being leaked.)
+ *
+ *
+ * Step 2 - Load livepatch fix1:
+ *
+ * insmod samples/livepatch/livepatch-shadow-fix1.ko
+ *
+ * Continue watching dmesg and note that now livepatch_fix1_dummy_free()
+ * and livepatch_fix1_dummy_alloc() are logging messages about leaked
+ * memory and eventually leaks prevented.
+ *
+ *
+ * Step 3 - Load livepatch fix2 (on top of fix1):
+ *
+ * insmod samples/livepatch/livepatch-shadow-fix2.ko
+ *
+ * This module extends functionality through shadow variables, as a new
+ * "check" counter is added to the dummy structure. Periodic dmesg
+ * messages will log these as dummies are cleaned up.
+ *
+ *
+ * Step 4 - Cleanup
+ *
+ * Unwind the demonstration by disabling the livepatch fix modules, then
+ * removing them and the demo module:
+ *
+ * echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix2/enabled
+ * echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix1/enabled
+ * rmmod livepatch-shadow-fix2
+ * rmmod livepatch-shadow-fix1
+ * rmmod livepatch-shadow-mod
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/workqueue.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Buggy module for shadow variable demo");
+
+/* Allocate new dummies every second */
+#define ALLOC_PERIOD 1
+/* Check for expired dummies after a few new ones have been allocated */
+#define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
+/* Dummies expire after a few cleanup instances */
+#define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
+
+/*
+ * Keep a list of all the dummies so we can clean up any residual ones
+ * on module exit
+ */
+LIST_HEAD(dummy_list);
+DEFINE_MUTEX(dummy_list_mutex);
+
+struct dummy {
+ struct list_head list;
+ unsigned long jiffies_expire;
+};
+
+noinline struct dummy *dummy_alloc(void)
+{
+ struct dummy *d;
+ void *leak;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return NULL;
+
+ d->jiffies_expire = jiffies +
+ msecs_to_jiffies(1000 * EXPIRE_PERIOD);
+
+ /* Oops, forgot to save leak! */
+ leak = kzalloc(sizeof(int), GFP_KERNEL);
+
+ pr_info("%s: dummy @ %p, expires @ %lx\n",
+ __func__, d, d->jiffies_expire);
+
+ return d;
+}
+
+noinline void dummy_free(struct dummy *d)
+{
+ pr_info("%s: dummy @ %p, expired = %lx\n",
+ __func__, d, d->jiffies_expire);
+
+ kfree(d);
+}
+
+noinline bool dummy_check(struct dummy *d, unsigned long jiffies)
+{
+ return time_after(jiffies, d->jiffies_expire);
+}
+
+/*
+ * alloc_work_func: allocates new dummy structures, allocates additional
+ * memory, aptly named "leak", but doesn't keep
+ * permanent record of it.
+ */
+
+static void alloc_work_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(alloc_dwork, alloc_work_func);
+
+static void alloc_work_func(struct work_struct *work)
+{
+ struct dummy *d;
+
+ d = dummy_alloc();
+ if (!d)
+ return;
+
+ mutex_lock(&dummy_list_mutex);
+ list_add(&d->list, &dummy_list);
+ mutex_unlock(&dummy_list_mutex);
+
+ schedule_delayed_work(&alloc_dwork,
+ msecs_to_jiffies(1000 * ALLOC_PERIOD));
+}
+
+/*
+ * cleanup_work_func: frees dummy structures. Without knownledge of
+ * "leak", it leaks the additional memory that
+ * alloc_work_func created.
+ */
+
+static void cleanup_work_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cleanup_dwork, cleanup_work_func);
+
+static void cleanup_work_func(struct work_struct *work)
+{
+ struct dummy *d, *tmp;
+ unsigned long j;
+
+ j = jiffies;
+ pr_info("%s: jiffies = %lx\n", __func__, j);
+
+ mutex_lock(&dummy_list_mutex);
+ list_for_each_entry_safe(d, tmp, &dummy_list, list) {
+
+ /* Kick out and free any expired dummies */
+ if (dummy_check(d, j)) {
+ list_del(&d->list);
+ dummy_free(d);
+ }
+ }
+ mutex_unlock(&dummy_list_mutex);
+
+ schedule_delayed_work(&cleanup_dwork,
+ msecs_to_jiffies(1000 * CLEANUP_PERIOD));
+}
+
+static int livepatch_shadow_mod_init(void)
+{
+ schedule_delayed_work(&alloc_dwork,
+ msecs_to_jiffies(1000 * ALLOC_PERIOD));
+ schedule_delayed_work(&cleanup_dwork,
+ msecs_to_jiffies(1000 * CLEANUP_PERIOD));
+
+ return 0;
+}
+
+static void livepatch_shadow_mod_exit(void)
+{
+ struct dummy *d, *tmp;
+
+ /* Wait for any dummies at work */
+ cancel_delayed_work_sync(&alloc_dwork);
+ cancel_delayed_work_sync(&cleanup_dwork);
+
+ /* Cleanup residual dummies */
+ list_for_each_entry_safe(d, tmp, &dummy_list, list) {
+ list_del(&d->list);
+ dummy_free(d);
+ }
+}
+
+module_init(livepatch_shadow_mod_init);
+module_exit(livepatch_shadow_mod_exit);
diff --git a/samples/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
index 49db1def1721..f42ce551bb48 100644
--- a/samples/mic/mpssd/mpssd.c
+++ b/samples/mic/mpssd/mpssd.c
@@ -65,7 +65,7 @@ static struct mic_info mic_list;
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
#define GSO_ENABLED 1
#define MAX_GSO_SIZE (64 * 1024)
@@ -382,7 +382,7 @@ disp_iovec(struct mic_info *mic, struct mic_copy_desc *copy,
static inline __u16 read_avail_idx(struct mic_vring *vr)
{
- return ACCESS_ONCE(vr->info->avail_idx);
+ return READ_ONCE(vr->info->avail_idx);
}
static inline void txrx_prepare(int type, bool tx, struct mic_vring *vr,
@@ -523,7 +523,7 @@ spin_for_descriptors(struct mic_info *mic, struct mic_vring *vr)
{
__u16 avail_idx = read_avail_idx(vr);
- while (avail_idx == le16toh(ACCESS_ONCE(vr->vr.avail->idx))) {
+ while (avail_idx == le16toh(READ_ONCE(vr->vr.avail->idx))) {
#ifdef DEBUG
mpsslog("%s %s waiting for desc avail %d info_avail %d\n",
mic->name, __func__,
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 5bcd91455ec8..80b4a70315b6 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -96,7 +96,7 @@
* __entry->bar.x = y;
* __array: There are three fields (type, name, size). The type is the
- * type of elements in teh array, the name is the name of the array.
+ * type of elements in the array, the name is the name of the array.
* size is the number of items in the array (not the total size).
*
* __array( char, foo, 10) is the same as saying: char foo[10];
@@ -113,7 +113,7 @@
* type is the type of the element, name is the name of the array.
* The size is different than __array. It is not a static number,
* but the algorithm to figure out the length of the array for the
- * specific instance of tracepoint. Again, size is the numebr of
+ * specific instance of tracepoint. Again, size is the number of
* items in the array, not the total length in bytes.
*
* __dynamic_array( int, foo, bar) is similar to: int foo[bar];
@@ -126,9 +126,9 @@
* Notice, that "__entry" is not needed here.
*
* __string: This is a special kind of __dynamic_array. It expects to
- * have a nul terminated character array passed to it (it allows
+ * have a null terminated character array passed to it (it allows
* for NULL too, which would be converted into "(null)"). __string
- * takes two paramenter (name, src), where name is the name of
+ * takes two parameter (name, src), where name is the name of
* the string saved, and src is the string to copy into the
* ring buffer.
*
@@ -445,7 +445,7 @@ DECLARE_EVENT_CLASS(foo_template,
/*
* Here's a better way for the previous samples (except, the first
- * exmaple had more fields and could not be used here).
+ * example had more fields and could not be used here).
*/
DEFINE_EVENT(foo_template, foo_with_template_simple,
TP_PROTO(const char *foo, int bar),
diff --git a/samples/v4l/v4l2-pci-skeleton.c b/samples/v4l/v4l2-pci-skeleton.c
index 483e9bca9444..f520e3aef9c6 100644
--- a/samples/v4l/v4l2-pci-skeleton.c
+++ b/samples/v4l/v4l2-pci-skeleton.c
@@ -772,8 +772,10 @@ static int skeleton_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Allocate a new instance */
skel = devm_kzalloc(&pdev->dev, sizeof(struct skeleton), GFP_KERNEL);
- if (!skel)
- return -ENOMEM;
+ if (!skel) {
+ ret = -ENOMEM;
+ goto disable_pci;
+ }
/* Allocate the interrupt */
ret = devm_request_irq(&pdev->dev, pdev->irq,
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index ca495686b9c3..09f255bdf3ac 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -1413,7 +1413,7 @@ struct attribute_group *mdev_type_groups[] = {
NULL,
};
-struct mdev_parent_ops mdev_fops = {
+static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
.dev_attr_groups = mtty_dev_groups,
.mdev_attr_groups = mdev_dev_groups,
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index bb831d49bcfd..e63af4e19382 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -259,7 +259,7 @@ ifneq ($(SKIP_STACK_VALIDATION),1)
__objtool_obj := $(objtree)/tools/objtool/objtool
-objtool_args = $(if $(CONFIG_ORC_UNWINDER),orc generate,check)
+objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check)
ifndef CONFIG_FRAME_POINTER
objtool_args += --no-fp
diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst
index c8ba6e7f9868..7301ab5e2e06 100644
--- a/scripts/Makefile.dtbinst
+++ b/scripts/Makefile.dtbinst
@@ -6,8 +6,6 @@
# INSTALL_DTBS_PATH directory or the default location:
#
# $INSTALL_PATH/dtbs/$KERNELRELEASE
-#
-# Traverse through subdirectories listed in $(dts-dirs).
# ==========================================================================
src := $(obj)
@@ -21,8 +19,8 @@ include include/config/auto.conf
include scripts/Kbuild.include
include $(src)/Makefile
-dtbinst-files := $(dtb-y)
-dtbinst-dirs := $(dts-dirs)
+dtbinst-files := $(sort $(dtb-y) $(if $(CONFIG_OF_ALL_DTBS), $(dtb-)))
+dtbinst-dirs := $(subdir-y) $(subdir-m)
# Helper targets for Installing DTBs into the boot directory
quiet_cmd_dtb_install = INSTALL $<
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 04b5633df1cf..2278405cbc80 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -70,6 +70,11 @@ obj-dirs := $(dir $(multi-objs) $(obj-y))
real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
real-objs-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m)))
+# DTB
+# If CONFIG_OF_ALL_DTBS is enabled, all DT blobs are built
+extra-y += $(dtb-y)
+extra-$(CONFIG_OF_ALL_DTBS) += $(dtb-)
+
# Add subdir path
extra-y := $(addprefix $(obj)/,$(extra-y))
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index a27677146410..6f099f915dcf 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -12,18 +12,22 @@ from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
-if len(sys.argv) != 3:
- sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])
+if len(sys.argv) < 3:
+ sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0])
+ sys.stderr.write("The options are:\n")
+ sys.stderr.write("-c cateogrize output based on symbole type\n")
+ sys.stderr.write("-d Show delta of Data Section\n")
+ sys.stderr.write("-t Show delta of text Section\n")
sys.exit(-1)
re_NUMBER = re.compile(r'\.[0-9]+')
-def getsizes(file):
+def getsizes(file, format):
sym = {}
with os.popen("nm --size-sort " + file) as f:
for line in f:
size, type, name = line.split()
- if type in "tTdDbBrR":
+ if type in format:
# strip generated symbols
if name.startswith("__mod_"): continue
if name.startswith("SyS_"): continue
@@ -34,44 +38,61 @@ def getsizes(file):
sym[name] = sym.get(name, 0) + int(size, 16)
return sym
-old = getsizes(sys.argv[1])
-new = getsizes(sys.argv[2])
-grow, shrink, add, remove, up, down = 0, 0, 0, 0, 0, 0
-delta, common = [], {}
-otot, ntot = 0, 0
+def calc(oldfile, newfile, format):
+ old = getsizes(oldfile, format)
+ new = getsizes(newfile, format)
+ grow, shrink, add, remove, up, down = 0, 0, 0, 0, 0, 0
+ delta, common = [], {}
+ otot, ntot = 0, 0
-for a in old:
- if a in new:
- common[a] = 1
+ for a in old:
+ if a in new:
+ common[a] = 1
-for name in old:
- otot += old[name]
- if name not in common:
- remove += 1
- down += old[name]
- delta.append((-old[name], name))
+ for name in old:
+ otot += old[name]
+ if name not in common:
+ remove += 1
+ down += old[name]
+ delta.append((-old[name], name))
-for name in new:
- ntot += new[name]
- if name not in common:
- add += 1
- up += new[name]
- delta.append((new[name], name))
+ for name in new:
+ ntot += new[name]
+ if name not in common:
+ add += 1
+ up += new[name]
+ delta.append((new[name], name))
-for name in common:
+ for name in common:
d = new.get(name, 0) - old.get(name, 0)
if d>0: grow, up = grow+1, up+d
if d<0: shrink, down = shrink+1, down-d
delta.append((d, name))
-delta.sort()
-delta.reverse()
+ delta.sort()
+ delta.reverse()
+ return grow, shrink, add, remove, up, down, delta, old, new, otot, ntot
-print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
- (add, remove, grow, shrink, up, -down, up-down))
-print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
-for d, n in delta:
- if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
+def print_result(symboltype, symbolformat, argc):
+ grow, shrink, add, remove, up, down, delta, old, new, otot, ntot = \
+ calc(sys.argv[argc - 1], sys.argv[argc], symbolformat)
-print("Total: Before=%d, After=%d, chg %+.2f%%" % \
- (otot, ntot, (ntot - otot)*100.0/otot))
+ print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+ (add, remove, grow, shrink, up, -down, up-down))
+ print("%-40s %7s %7s %+7s" % (symboltype, "old", "new", "delta"))
+ for d, n in delta:
+ if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
+
+ print("Total: Before=%d, After=%d, chg %+.2f%%" % \
+ (otot, ntot, (ntot - otot)*100.0/otot))
+
+if sys.argv[1] == "-c":
+ print_result("Function", "tT", 3)
+ print_result("Data", "dDbB", 3)
+ print_result("RO Data", "rR", 3)
+elif sys.argv[1] == "-d":
+ print_result("Data", "dDbBrR", 3)
+elif sys.argv[1] == "-t":
+ print_result("Function", "tT", 3)
+else:
+ print_result("Function", "tTdDbBrR", 2)
diff --git a/scripts/coccinelle/api/drm-get-put.cocci b/scripts/coccinelle/api/drm-get-put.cocci
index bf1313274f0b..91fceb8f1fa2 100644
--- a/scripts/coccinelle/api/drm-get-put.cocci
+++ b/scripts/coccinelle/api/drm-get-put.cocci
@@ -51,6 +51,9 @@ expression object;
|
- drm_property_unreference_blob(object)
+ drm_property_blob_put(object)
+|
+- drm_dev_unref(object)
++ drm_dev_put(object)
)
@r depends on report@
@@ -82,6 +85,8 @@ drm_gem_object_unreference_unlocked(object)
drm_property_unreference_blob@p(object)
|
drm_property_reference_blob@p(object)
+|
+drm_dev_unref@p(object)
)
@script:python depends on report@
diff --git a/scripts/documentation-file-ref-check b/scripts/documentation-file-ref-check
new file mode 100755
index 000000000000..bc1659900e89
--- /dev/null
+++ b/scripts/documentation-file-ref-check
@@ -0,0 +1,15 @@
+#!/bin/sh
+# Treewide grep for references to files under Documentation, and report
+# non-existing files in stderr.
+
+for f in $(git ls-files); do
+ for ref in $(grep -ho "Documentation/[A-Za-z0-9_.,~/*+-]*" "$f"); do
+ # presume trailing . and , are not part of the name
+ ref=${ref%%[.,]}
+
+ # use ls to handle wildcards
+ if ! ls $ref >/dev/null 2>&1; then
+ echo "$f: $ref" >&2
+ fi
+ done
+done
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index 62ea8f83d4a0..e66138449886 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -873,7 +873,7 @@ static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct no
while (size--)
reg = (reg << 32) | fdt32_to_cpu(*(cells++));
- snprintf(unit_addr, sizeof(unit_addr), "%llx", (unsigned long long)reg);
+ snprintf(unit_addr, sizeof(unit_addr), "%"PRIx64, reg);
if (!streq(unitname, unit_addr))
FAIL(c, dti, "Node %s simple-bus unit address format error, expected \"%s\"",
node->fullpath, unit_addr);
@@ -956,6 +956,274 @@ static void check_obsolete_chosen_interrupt_controller(struct check *c,
WARNING(obsolete_chosen_interrupt_controller,
check_obsolete_chosen_interrupt_controller, NULL);
+struct provider {
+ const char *prop_name;
+ const char *cell_name;
+ bool optional;
+};
+
+static void check_property_phandle_args(struct check *c,
+ struct dt_info *dti,
+ struct node *node,
+ struct property *prop,
+ const struct provider *provider)
+{
+ struct node *root = dti->dt;
+ int cell, cellsize = 0;
+
+ if (prop->val.len % sizeof(cell_t)) {
+ FAIL(c, dti, "property '%s' size (%d) is invalid, expected multiple of %zu in node %s",
+ prop->name, prop->val.len, sizeof(cell_t), node->fullpath);
+ return;
+ }
+
+ for (cell = 0; cell < prop->val.len / sizeof(cell_t); cell += cellsize + 1) {
+ struct node *provider_node;
+ struct property *cellprop;
+ int phandle;
+
+ phandle = propval_cell_n(prop, cell);
+ /*
+ * Some bindings use a cell value 0 or -1 to skip over optional
+ * entries when each index position has a specific definition.
+ */
+ if (phandle == 0 || phandle == -1) {
+ /* Give up if this is an overlay with external references */
+ if (dti->dtsflags & DTSF_PLUGIN)
+ break;
+
+ cellsize = 0;
+ continue;
+ }
+
+ /* If we have markers, verify the current cell is a phandle */
+ if (prop->val.markers) {
+ struct marker *m = prop->val.markers;
+ for_each_marker_of_type(m, REF_PHANDLE) {
+ if (m->offset == (cell * sizeof(cell_t)))
+ break;
+ }
+ if (!m)
+ FAIL(c, dti, "Property '%s', cell %d is not a phandle reference in %s",
+ prop->name, cell, node->fullpath);
+ }
+
+ provider_node = get_node_by_phandle(root, phandle);
+ if (!provider_node) {
+ FAIL(c, dti, "Could not get phandle node for %s:%s(cell %d)",
+ node->fullpath, prop->name, cell);
+ break;
+ }
+
+ cellprop = get_property(provider_node, provider->cell_name);
+ if (cellprop) {
+ cellsize = propval_cell(cellprop);
+ } else if (provider->optional) {
+ cellsize = 0;
+ } else {
+ FAIL(c, dti, "Missing property '%s' in node %s or bad phandle (referred from %s:%s[%d])",
+ provider->cell_name,
+ provider_node->fullpath,
+ node->fullpath, prop->name, cell);
+ break;
+ }
+
+ if (prop->val.len < ((cell + cellsize + 1) * sizeof(cell_t))) {
+ FAIL(c, dti, "%s property size (%d) too small for cell size %d in %s",
+ prop->name, prop->val.len, cellsize, node->fullpath);
+ }
+ }
+}
+
+static void check_provider_cells_property(struct check *c,
+ struct dt_info *dti,
+ struct node *node)
+{
+ struct provider *provider = c->data;
+ struct property *prop;
+
+ prop = get_property(node, provider->prop_name);
+ if (!prop)
+ return;
+
+ check_property_phandle_args(c, dti, node, prop, provider);
+}
+#define WARNING_PROPERTY_PHANDLE_CELLS(nm, propname, cells_name, ...) \
+ static struct provider nm##_provider = { (propname), (cells_name), __VA_ARGS__ }; \
+ WARNING(nm##_property, check_provider_cells_property, &nm##_provider, &phandle_references);
+
+WARNING_PROPERTY_PHANDLE_CELLS(clocks, "clocks", "#clock-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(cooling_device, "cooling-device", "#cooling-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(dmas, "dmas", "#dma-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(hwlocks, "hwlocks", "#hwlock-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(interrupts_extended, "interrupts-extended", "#interrupt-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(io_channels, "io-channels", "#io-channel-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(iommus, "iommus", "#iommu-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(mboxes, "mboxes", "#mbox-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(msi_parent, "msi-parent", "#msi-cells", true);
+WARNING_PROPERTY_PHANDLE_CELLS(mux_controls, "mux-controls", "#mux-control-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(phys, "phys", "#phy-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(power_domains, "power-domains", "#power-domain-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(pwms, "pwms", "#pwm-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(resets, "resets", "#reset-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(sound_dais, "sound-dais", "#sound-dai-cells");
+WARNING_PROPERTY_PHANDLE_CELLS(thermal_sensors, "thermal-sensors", "#thermal-sensor-cells");
+
+static bool prop_is_gpio(struct property *prop)
+{
+ char *str;
+
+ /*
+ * *-gpios and *-gpio can appear in property names,
+ * so skip over any false matches (only one known ATM)
+ */
+ if (strstr(prop->name, "nr-gpio"))
+ return false;
+
+ str = strrchr(prop->name, '-');
+ if (str)
+ str++;
+ else
+ str = prop->name;
+ if (!(streq(str, "gpios") || streq(str, "gpio")))
+ return false;
+
+ return true;
+}
+
+static void check_gpios_property(struct check *c,
+ struct dt_info *dti,
+ struct node *node)
+{
+ struct property *prop;
+
+ /* Skip GPIO hog nodes which have 'gpios' property */
+ if (get_property(node, "gpio-hog"))
+ return;
+
+ for_each_property(node, prop) {
+ struct provider provider;
+
+ if (!prop_is_gpio(prop))
+ continue;
+
+ provider.prop_name = prop->name;
+ provider.cell_name = "#gpio-cells";
+ provider.optional = false;
+ check_property_phandle_args(c, dti, node, prop, &provider);
+ }
+
+}
+WARNING(gpios_property, check_gpios_property, NULL, &phandle_references);
+
+static void check_deprecated_gpio_property(struct check *c,
+ struct dt_info *dti,
+ struct node *node)
+{
+ struct property *prop;
+
+ for_each_property(node, prop) {
+ char *str;
+
+ if (!prop_is_gpio(prop))
+ continue;
+
+ str = strstr(prop->name, "gpio");
+ if (!streq(str, "gpio"))
+ continue;
+
+ FAIL(c, dti, "'[*-]gpio' is deprecated, use '[*-]gpios' instead for %s:%s",
+ node->fullpath, prop->name);
+ }
+
+}
+CHECK(deprecated_gpio_property, check_deprecated_gpio_property, NULL);
+
+static bool node_is_interrupt_provider(struct node *node)
+{
+ struct property *prop;
+
+ prop = get_property(node, "interrupt-controller");
+ if (prop)
+ return true;
+
+ prop = get_property(node, "interrupt-map");
+ if (prop)
+ return true;
+
+ return false;
+}
+static void check_interrupts_property(struct check *c,
+ struct dt_info *dti,
+ struct node *node)
+{
+ struct node *root = dti->dt;
+ struct node *irq_node = NULL, *parent = node;
+ struct property *irq_prop, *prop = NULL;
+ int irq_cells, phandle;
+
+ irq_prop = get_property(node, "interrupts");
+ if (!irq_prop)
+ return;
+
+ if (irq_prop->val.len % sizeof(cell_t))
+ FAIL(c, dti, "property '%s' size (%d) is invalid, expected multiple of %zu in node %s",
+ irq_prop->name, irq_prop->val.len, sizeof(cell_t),
+ node->fullpath);
+
+ while (parent && !prop) {
+ if (parent != node && node_is_interrupt_provider(parent)) {
+ irq_node = parent;
+ break;
+ }
+
+ prop = get_property(parent, "interrupt-parent");
+ if (prop) {
+ phandle = propval_cell(prop);
+ /* Give up if this is an overlay with external references */
+ if ((phandle == 0 || phandle == -1) &&
+ (dti->dtsflags & DTSF_PLUGIN))
+ return;
+
+ irq_node = get_node_by_phandle(root, phandle);
+ if (!irq_node) {
+ FAIL(c, dti, "Bad interrupt-parent phandle for %s",
+ node->fullpath);
+ return;
+ }
+ if (!node_is_interrupt_provider(irq_node))
+ FAIL(c, dti,
+ "Missing interrupt-controller or interrupt-map property in %s",
+ irq_node->fullpath);
+
+ break;
+ }
+
+ parent = parent->parent;
+ }
+
+ if (!irq_node) {
+ FAIL(c, dti, "Missing interrupt-parent for %s", node->fullpath);
+ return;
+ }
+
+ prop = get_property(irq_node, "#interrupt-cells");
+ if (!prop) {
+ FAIL(c, dti, "Missing #interrupt-cells in interrupt-parent %s",
+ irq_node->fullpath);
+ return;
+ }
+
+ irq_cells = propval_cell(prop);
+ if (irq_prop->val.len % (irq_cells * sizeof(cell_t))) {
+ FAIL(c, dti,
+ "interrupts size is (%d), expected multiple of %d in %s",
+ irq_prop->val.len, (int)(irq_cells * sizeof(cell_t)),
+ node->fullpath);
+ }
+}
+WARNING(interrupts_property, check_interrupts_property, &phandle_references);
+
static struct check *check_table[] = {
&duplicate_node_names, &duplicate_property_names,
&node_name_chars, &node_name_format, &property_name_chars,
@@ -987,6 +1255,27 @@ static struct check *check_table[] = {
&avoid_default_addr_size,
&obsolete_chosen_interrupt_controller,
+ &clocks_property,
+ &cooling_device_property,
+ &dmas_property,
+ &hwlocks_property,
+ &interrupts_extended_property,
+ &io_channels_property,
+ &iommus_property,
+ &mboxes_property,
+ &msi_parent_property,
+ &mux_controls_property,
+ &phys_property,
+ &power_domains_property,
+ &pwms_property,
+ &resets_property,
+ &sound_dais_property,
+ &thermal_sensors_property,
+
+ &deprecated_gpio_property,
+ &gpios_property,
+ &interrupts_property,
+
&always_fail,
};
diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped
index 64c243772398..011bb9632ff2 100644
--- a/scripts/dtc/dtc-lexer.lex.c_shipped
+++ b/scripts/dtc/dtc-lexer.lex.c_shipped
@@ -1397,7 +1397,7 @@ static int yy_get_next_buffer (void)
{
char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
char *source = (yytext_ptr);
- yy_size_t number_to_move, i;
+ int number_to_move, i;
int ret_val;
if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] )
@@ -1426,7 +1426,7 @@ static int yy_get_next_buffer (void)
/* Try to read more data. */
/* First move last chars to start of buffer. */
- number_to_move = (yy_size_t) ((yy_c_buf_p) - (yytext_ptr)) - 1;
+ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr) - 1);
for ( i = 0; i < number_to_move; ++i )
*(dest++) = *(source++);
@@ -1508,7 +1508,7 @@ static int yy_get_next_buffer (void)
else
ret_val = EOB_ACT_CONTINUE_SCAN;
- if ((int) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ if (((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
/* Extend the array by 50%, plus the number we really need. */
int new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size );
@@ -1987,10 +1987,10 @@ YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len )
YY_BUFFER_STATE b;
char *buf;
yy_size_t n;
- yy_size_t i;
+ int i;
/* Get memory for full buffer, including space for trailing EOB's. */
- n = (yy_size_t) _yybytes_len + 2;
+ n = (yy_size_t) (_yybytes_len + 2);
buf = (char *) yyalloc(n );
if ( ! buf )
YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
diff --git a/scripts/dtc/dtc-parser.tab.c_shipped b/scripts/dtc/dtc-parser.tab.c_shipped
index 0a7a5ed86f04..aea514fa6928 100644
--- a/scripts/dtc/dtc-parser.tab.c_shipped
+++ b/scripts/dtc/dtc-parser.tab.c_shipped
@@ -448,7 +448,7 @@ union yyalloc
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 30
/* YYNRULES -- Number of rules. */
-#define YYNRULES 84
+#define YYNRULES 85
/* YYNSTATES -- Number of states. */
#define YYNSTATES 149
@@ -499,14 +499,14 @@ static const yytype_uint8 yytranslate[] =
static const yytype_uint16 yyrline[] =
{
0, 109, 109, 117, 121, 128, 129, 139, 142, 149,
- 153, 161, 165, 170, 181, 191, 206, 214, 217, 224,
- 228, 232, 236, 244, 248, 252, 256, 260, 276, 286,
- 294, 297, 301, 308, 324, 329, 348, 362, 369, 370,
- 371, 378, 382, 383, 387, 388, 392, 393, 397, 398,
- 402, 403, 407, 408, 412, 413, 414, 418, 419, 420,
- 421, 422, 426, 427, 428, 432, 433, 434, 438, 439,
- 448, 457, 461, 462, 463, 464, 469, 472, 476, 484,
- 487, 491, 499, 503, 507
+ 153, 161, 165, 170, 181, 200, 213, 220, 228, 231,
+ 238, 242, 246, 250, 258, 262, 266, 270, 274, 290,
+ 300, 308, 311, 315, 322, 338, 343, 362, 376, 383,
+ 384, 385, 392, 396, 397, 401, 402, 406, 407, 411,
+ 412, 416, 417, 421, 422, 426, 427, 428, 432, 433,
+ 434, 435, 436, 440, 441, 442, 446, 447, 448, 452,
+ 453, 462, 471, 475, 476, 477, 478, 483, 486, 490,
+ 498, 501, 505, 513, 517, 521
};
#endif
@@ -582,20 +582,20 @@ static const yytype_int8 yypact[] =
static const yytype_uint8 yydefact[] =
{
0, 0, 0, 5, 7, 3, 1, 6, 0, 0,
- 0, 7, 0, 38, 39, 0, 0, 10, 0, 2,
- 8, 4, 0, 0, 0, 72, 0, 41, 42, 44,
- 46, 48, 50, 52, 54, 57, 64, 67, 71, 0,
- 17, 11, 0, 0, 0, 0, 73, 74, 75, 40,
+ 16, 7, 0, 39, 40, 0, 0, 10, 0, 2,
+ 8, 4, 0, 0, 0, 73, 0, 42, 43, 45,
+ 47, 49, 51, 53, 55, 58, 65, 68, 72, 0,
+ 18, 11, 0, 0, 0, 0, 74, 75, 76, 41,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 9,
- 79, 0, 0, 14, 12, 45, 0, 47, 49, 51,
- 53, 55, 56, 60, 61, 59, 58, 62, 63, 65,
- 66, 69, 68, 70, 0, 0, 0, 0, 18, 0,
- 79, 15, 13, 0, 0, 0, 20, 30, 82, 22,
- 84, 0, 81, 80, 43, 21, 83, 0, 0, 16,
- 29, 19, 31, 0, 23, 32, 26, 0, 76, 34,
- 0, 0, 0, 0, 37, 36, 24, 35, 33, 0,
- 77, 78, 25, 0, 28, 0, 0, 0, 27
+ 80, 0, 0, 14, 12, 46, 0, 48, 50, 52,
+ 54, 56, 57, 61, 62, 60, 59, 63, 64, 66,
+ 67, 70, 69, 71, 0, 0, 0, 0, 19, 0,
+ 80, 15, 13, 0, 0, 0, 21, 31, 83, 23,
+ 85, 0, 82, 81, 44, 22, 84, 0, 0, 17,
+ 30, 20, 32, 0, 24, 33, 27, 0, 77, 35,
+ 0, 0, 0, 0, 38, 37, 25, 36, 34, 0,
+ 78, 79, 26, 0, 29, 0, 0, 0, 28
};
/* YYPGOTO[NTERM-NUM]. */
@@ -678,28 +678,28 @@ static const yytype_uint8 yystos[] =
static const yytype_uint8 yyr1[] =
{
0, 48, 49, 50, 50, 51, 51, 52, 52, 53,
- 53, 54, 54, 54, 54, 54, 55, 56, 56, 57,
- 57, 57, 57, 58, 58, 58, 58, 58, 58, 58,
- 59, 59, 59, 60, 60, 60, 60, 60, 61, 61,
- 61, 62, 63, 63, 64, 64, 65, 65, 66, 66,
- 67, 67, 68, 68, 69, 69, 69, 70, 70, 70,
- 70, 70, 71, 71, 71, 72, 72, 72, 73, 73,
- 73, 73, 74, 74, 74, 74, 75, 75, 75, 76,
- 76, 76, 77, 77, 77
+ 53, 54, 54, 54, 54, 54, 54, 55, 56, 56,
+ 57, 57, 57, 57, 58, 58, 58, 58, 58, 58,
+ 58, 59, 59, 59, 60, 60, 60, 60, 60, 61,
+ 61, 61, 62, 63, 63, 64, 64, 65, 65, 66,
+ 66, 67, 67, 68, 68, 69, 69, 69, 70, 70,
+ 70, 70, 70, 71, 71, 71, 72, 72, 72, 73,
+ 73, 73, 73, 74, 74, 74, 74, 75, 75, 75,
+ 76, 76, 76, 77, 77, 77
};
/* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
0, 2, 3, 2, 4, 1, 2, 0, 2, 4,
- 2, 2, 3, 4, 3, 4, 5, 0, 2, 4,
- 2, 3, 2, 2, 3, 4, 2, 9, 5, 2,
- 0, 2, 2, 3, 1, 2, 2, 2, 1, 1,
- 3, 1, 1, 5, 1, 3, 1, 3, 1, 3,
- 1, 3, 1, 3, 1, 3, 3, 1, 3, 3,
- 3, 3, 3, 3, 1, 3, 3, 1, 3, 3,
- 3, 1, 1, 2, 2, 2, 0, 2, 2, 0,
- 2, 2, 2, 3, 2
+ 2, 2, 3, 4, 3, 4, 0, 5, 0, 2,
+ 4, 2, 3, 2, 2, 3, 4, 2, 9, 5,
+ 2, 0, 2, 2, 3, 1, 2, 2, 2, 1,
+ 1, 3, 1, 1, 5, 1, 3, 1, 3, 1,
+ 3, 1, 3, 1, 3, 1, 3, 3, 1, 3,
+ 3, 3, 3, 3, 3, 1, 3, 3, 1, 3,
+ 3, 3, 1, 1, 2, 2, 2, 0, 2, 2,
+ 0, 2, 2, 2, 3, 2
};
@@ -1572,17 +1572,26 @@ yyreduce:
{
struct node *target = get_node_by_ref((yyvsp[-2].node), (yyvsp[-1].labelref));
- if (target)
+ if (target) {
merge_nodes(target, (yyvsp[0].node));
- else
- ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref));
+ } else {
+ /*
+ * We rely on the rule being always:
+ * versioninfo plugindecl memreserves devicetree
+ * so $-1 is what we want (plugindecl)
+ */
+ if ((yyvsp[(-1) - (3)].flags) & DTSF_PLUGIN)
+ add_orphan_node((yyvsp[-2].node), (yyvsp[0].node), (yyvsp[-1].labelref));
+ else
+ ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref));
+ }
(yyval.node) = (yyvsp[-2].node);
}
-#line 1582 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1591 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 15:
-#line 192 "dtc-parser.y" /* yacc.c:1646 */
+#line 201 "dtc-parser.y" /* yacc.c:1646 */
{
struct node *target = get_node_by_ref((yyvsp[-3].node), (yyvsp[-1].labelref));
@@ -1594,100 +1603,109 @@ yyreduce:
(yyval.node) = (yyvsp[-3].node);
}
-#line 1598 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1607 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 16:
-#line 207 "dtc-parser.y" /* yacc.c:1646 */
+#line 213 "dtc-parser.y" /* yacc.c:1646 */
{
- (yyval.node) = build_node((yyvsp[-3].proplist), (yyvsp[-2].nodelist));
+ /* build empty node */
+ (yyval.node) = name_node(build_node(NULL, NULL), "");
}
-#line 1606 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1616 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 17:
-#line 214 "dtc-parser.y" /* yacc.c:1646 */
+#line 221 "dtc-parser.y" /* yacc.c:1646 */
{
- (yyval.proplist) = NULL;
+ (yyval.node) = build_node((yyvsp[-3].proplist), (yyvsp[-2].nodelist));
}
-#line 1614 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1624 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 18:
-#line 218 "dtc-parser.y" /* yacc.c:1646 */
+#line 228 "dtc-parser.y" /* yacc.c:1646 */
{
- (yyval.proplist) = chain_property((yyvsp[0].prop), (yyvsp[-1].proplist));
+ (yyval.proplist) = NULL;
}
-#line 1622 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1632 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 19:
-#line 225 "dtc-parser.y" /* yacc.c:1646 */
+#line 232 "dtc-parser.y" /* yacc.c:1646 */
{
- (yyval.prop) = build_property((yyvsp[-3].propnodename), (yyvsp[-1].data));
+ (yyval.proplist) = chain_property((yyvsp[0].prop), (yyvsp[-1].proplist));
}
-#line 1630 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1640 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 20:
-#line 229 "dtc-parser.y" /* yacc.c:1646 */
+#line 239 "dtc-parser.y" /* yacc.c:1646 */
{
- (yyval.prop) = build_property((yyvsp[-1].propnodename), empty_data);
+ (yyval.prop) = build_property((yyvsp[-3].propnodename), (yyvsp[-1].data));
}
-#line 1638 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1648 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 21:
-#line 233 "dtc-parser.y" /* yacc.c:1646 */
+#line 243 "dtc-parser.y" /* yacc.c:1646 */
{
- (yyval.prop) = build_property_delete((yyvsp[-1].propnodename));
+ (yyval.prop) = build_property((yyvsp[-1].propnodename), empty_data);
}
-#line 1646 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1656 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
case 22:
-#line 237 "dtc-parser.y" /* yacc.c:1646 */
+#line 247 "dtc-parser.y" /* yacc.c:1646 */
+ {
+ (yyval.prop) = build_property_delete((yyvsp[-1].propnodename));
+ }
+#line 1664 "dtc-parser.tab.c" /* yacc.c:1646 */
+ break;
+
+ case 23:
+#line 251 "dtc-parser.y" /* yacc.c:1646 */
{
add_label(&(yyvsp[0].prop)->labels, (yyvsp[-1].labelref));
(yyval.prop) = (yyvsp[0].prop);
}
-#line 1655 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1673 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 23:
-#line 245 "dtc-parser.y" /* yacc.c:1646 */
+ case 24:
+#line 259 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_merge((yyvsp[-1].data), (yyvsp[0].data));
}
-#line 1663 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1681 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 24:
-#line 249 "dtc-parser.y" /* yacc.c:1646 */
+ case 25:
+#line 263 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_merge((yyvsp[-2].data), (yyvsp[-1].array).data);
}
-#line 1671 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1689 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 25:
-#line 253 "dtc-parser.y" /* yacc.c:1646 */
+ case 26:
+#line 267 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_merge((yyvsp[-3].data), (yyvsp[-1].data));
}
-#line 1679 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1697 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 26:
-#line 257 "dtc-parser.y" /* yacc.c:1646 */
+ case 27:
+#line 271 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_add_marker((yyvsp[-1].data), REF_PATH, (yyvsp[0].labelref));
}
-#line 1687 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1705 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 27:
-#line 261 "dtc-parser.y" /* yacc.c:1646 */
+ case 28:
+#line 275 "dtc-parser.y" /* yacc.c:1646 */
{
FILE *f = srcfile_relative_open((yyvsp[-5].data).val, NULL);
struct data d;
@@ -1703,11 +1721,11 @@ yyreduce:
(yyval.data) = data_merge((yyvsp[-8].data), d);
fclose(f);
}
-#line 1707 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1725 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 28:
-#line 277 "dtc-parser.y" /* yacc.c:1646 */
+ case 29:
+#line 291 "dtc-parser.y" /* yacc.c:1646 */
{
FILE *f = srcfile_relative_open((yyvsp[-1].data).val, NULL);
struct data d = empty_data;
@@ -1717,43 +1735,43 @@ yyreduce:
(yyval.data) = data_merge((yyvsp[-4].data), d);
fclose(f);
}
-#line 1721 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1739 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 29:
-#line 287 "dtc-parser.y" /* yacc.c:1646 */
+ case 30:
+#line 301 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref));
}
-#line 1729 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1747 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 30:
-#line 294 "dtc-parser.y" /* yacc.c:1646 */
+ case 31:
+#line 308 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = empty_data;
}
-#line 1737 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1755 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 31:
-#line 298 "dtc-parser.y" /* yacc.c:1646 */
+ case 32:
+#line 312 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = (yyvsp[-1].data);
}
-#line 1745 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1763 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 32:
-#line 302 "dtc-parser.y" /* yacc.c:1646 */
+ case 33:
+#line 316 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref));
}
-#line 1753 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1771 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 33:
-#line 309 "dtc-parser.y" /* yacc.c:1646 */
+ case 34:
+#line 323 "dtc-parser.y" /* yacc.c:1646 */
{
unsigned long long bits;
@@ -1769,20 +1787,20 @@ yyreduce:
(yyval.array).data = empty_data;
(yyval.array).bits = bits;
}
-#line 1773 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1791 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 34:
-#line 325 "dtc-parser.y" /* yacc.c:1646 */
+ case 35:
+#line 339 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.array).data = empty_data;
(yyval.array).bits = 32;
}
-#line 1782 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1800 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 35:
-#line 330 "dtc-parser.y" /* yacc.c:1646 */
+ case 36:
+#line 344 "dtc-parser.y" /* yacc.c:1646 */
{
if ((yyvsp[-1].array).bits < 64) {
uint64_t mask = (1ULL << (yyvsp[-1].array).bits) - 1;
@@ -1801,11 +1819,11 @@ yyreduce:
(yyval.array).data = data_append_integer((yyvsp[-1].array).data, (yyvsp[0].integer), (yyvsp[-1].array).bits);
}
-#line 1805 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1823 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 36:
-#line 349 "dtc-parser.y" /* yacc.c:1646 */
+ case 37:
+#line 363 "dtc-parser.y" /* yacc.c:1646 */
{
uint64_t val = ~0ULL >> (64 - (yyvsp[-1].array).bits);
@@ -1819,129 +1837,129 @@ yyreduce:
(yyval.array).data = data_append_integer((yyvsp[-1].array).data, val, (yyvsp[-1].array).bits);
}
-#line 1823 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1841 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 37:
-#line 363 "dtc-parser.y" /* yacc.c:1646 */
+ case 38:
+#line 377 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.array).data = data_add_marker((yyvsp[-1].array).data, LABEL, (yyvsp[0].labelref));
}
-#line 1831 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1849 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 40:
-#line 372 "dtc-parser.y" /* yacc.c:1646 */
+ case 41:
+#line 386 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.integer) = (yyvsp[-1].integer);
}
-#line 1839 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1857 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 43:
-#line 383 "dtc-parser.y" /* yacc.c:1646 */
+ case 44:
+#line 397 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-4].integer) ? (yyvsp[-2].integer) : (yyvsp[0].integer); }
-#line 1845 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1863 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 45:
-#line 388 "dtc-parser.y" /* yacc.c:1646 */
+ case 46:
+#line 402 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) || (yyvsp[0].integer); }
-#line 1851 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1869 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 47:
-#line 393 "dtc-parser.y" /* yacc.c:1646 */
+ case 48:
+#line 407 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) && (yyvsp[0].integer); }
-#line 1857 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1875 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 49:
-#line 398 "dtc-parser.y" /* yacc.c:1646 */
+ case 50:
+#line 412 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) | (yyvsp[0].integer); }
-#line 1863 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1881 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 51:
-#line 403 "dtc-parser.y" /* yacc.c:1646 */
+ case 52:
+#line 417 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) ^ (yyvsp[0].integer); }
-#line 1869 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1887 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 53:
-#line 408 "dtc-parser.y" /* yacc.c:1646 */
+ case 54:
+#line 422 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) & (yyvsp[0].integer); }
-#line 1875 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1893 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 55:
-#line 413 "dtc-parser.y" /* yacc.c:1646 */
+ case 56:
+#line 427 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) == (yyvsp[0].integer); }
-#line 1881 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1899 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 56:
-#line 414 "dtc-parser.y" /* yacc.c:1646 */
+ case 57:
+#line 428 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) != (yyvsp[0].integer); }
-#line 1887 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1905 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 58:
-#line 419 "dtc-parser.y" /* yacc.c:1646 */
+ case 59:
+#line 433 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) < (yyvsp[0].integer); }
-#line 1893 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1911 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 59:
-#line 420 "dtc-parser.y" /* yacc.c:1646 */
+ case 60:
+#line 434 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) > (yyvsp[0].integer); }
-#line 1899 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1917 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 60:
-#line 421 "dtc-parser.y" /* yacc.c:1646 */
+ case 61:
+#line 435 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) <= (yyvsp[0].integer); }
-#line 1905 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1923 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 61:
-#line 422 "dtc-parser.y" /* yacc.c:1646 */
+ case 62:
+#line 436 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) >= (yyvsp[0].integer); }
-#line 1911 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1929 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 62:
-#line 426 "dtc-parser.y" /* yacc.c:1646 */
+ case 63:
+#line 440 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) << (yyvsp[0].integer); }
-#line 1917 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1935 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 63:
-#line 427 "dtc-parser.y" /* yacc.c:1646 */
+ case 64:
+#line 441 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) >> (yyvsp[0].integer); }
-#line 1923 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1941 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 65:
-#line 432 "dtc-parser.y" /* yacc.c:1646 */
+ case 66:
+#line 446 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) + (yyvsp[0].integer); }
-#line 1929 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1947 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 66:
-#line 433 "dtc-parser.y" /* yacc.c:1646 */
+ case 67:
+#line 447 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) - (yyvsp[0].integer); }
-#line 1935 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1953 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 68:
-#line 438 "dtc-parser.y" /* yacc.c:1646 */
+ case 69:
+#line 452 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = (yyvsp[-2].integer) * (yyvsp[0].integer); }
-#line 1941 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1959 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 69:
-#line 440 "dtc-parser.y" /* yacc.c:1646 */
+ case 70:
+#line 454 "dtc-parser.y" /* yacc.c:1646 */
{
if ((yyvsp[0].integer) != 0) {
(yyval.integer) = (yyvsp[-2].integer) / (yyvsp[0].integer);
@@ -1950,11 +1968,11 @@ yyreduce:
(yyval.integer) = 0;
}
}
-#line 1954 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1972 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 70:
-#line 449 "dtc-parser.y" /* yacc.c:1646 */
+ case 71:
+#line 463 "dtc-parser.y" /* yacc.c:1646 */
{
if ((yyvsp[0].integer) != 0) {
(yyval.integer) = (yyvsp[-2].integer) % (yyvsp[0].integer);
@@ -1963,103 +1981,103 @@ yyreduce:
(yyval.integer) = 0;
}
}
-#line 1967 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1985 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 73:
-#line 462 "dtc-parser.y" /* yacc.c:1646 */
+ case 74:
+#line 476 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = -(yyvsp[0].integer); }
-#line 1973 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1991 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 74:
-#line 463 "dtc-parser.y" /* yacc.c:1646 */
+ case 75:
+#line 477 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = ~(yyvsp[0].integer); }
-#line 1979 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 1997 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 75:
-#line 464 "dtc-parser.y" /* yacc.c:1646 */
+ case 76:
+#line 478 "dtc-parser.y" /* yacc.c:1646 */
{ (yyval.integer) = !(yyvsp[0].integer); }
-#line 1985 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2003 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 76:
-#line 469 "dtc-parser.y" /* yacc.c:1646 */
+ case 77:
+#line 483 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = empty_data;
}
-#line 1993 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2011 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 77:
-#line 473 "dtc-parser.y" /* yacc.c:1646 */
+ case 78:
+#line 487 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_append_byte((yyvsp[-1].data), (yyvsp[0].byte));
}
-#line 2001 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2019 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 78:
-#line 477 "dtc-parser.y" /* yacc.c:1646 */
+ case 79:
+#line 491 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref));
}
-#line 2009 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2027 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 79:
-#line 484 "dtc-parser.y" /* yacc.c:1646 */
+ case 80:
+#line 498 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.nodelist) = NULL;
}
-#line 2017 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2035 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 80:
-#line 488 "dtc-parser.y" /* yacc.c:1646 */
+ case 81:
+#line 502 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.nodelist) = chain_node((yyvsp[-1].node), (yyvsp[0].nodelist));
}
-#line 2025 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2043 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 81:
-#line 492 "dtc-parser.y" /* yacc.c:1646 */
+ case 82:
+#line 506 "dtc-parser.y" /* yacc.c:1646 */
{
ERROR(&(yylsp[0]), "Properties must precede subnodes");
YYERROR;
}
-#line 2034 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2052 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 82:
-#line 500 "dtc-parser.y" /* yacc.c:1646 */
+ case 83:
+#line 514 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.node) = name_node((yyvsp[0].node), (yyvsp[-1].propnodename));
}
-#line 2042 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2060 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 83:
-#line 504 "dtc-parser.y" /* yacc.c:1646 */
+ case 84:
+#line 518 "dtc-parser.y" /* yacc.c:1646 */
{
(yyval.node) = name_node(build_node_delete(), (yyvsp[-1].propnodename));
}
-#line 2050 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2068 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
- case 84:
-#line 508 "dtc-parser.y" /* yacc.c:1646 */
+ case 85:
+#line 522 "dtc-parser.y" /* yacc.c:1646 */
{
add_label(&(yyvsp[0].node)->labels, (yyvsp[-1].labelref));
(yyval.node) = (yyvsp[0].node);
}
-#line 2059 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2077 "dtc-parser.tab.c" /* yacc.c:1646 */
break;
-#line 2063 "dtc-parser.tab.c" /* yacc.c:1646 */
+#line 2081 "dtc-parser.tab.c" /* yacc.c:1646 */
default: break;
}
/* User semantic actions sometimes alter yychar, and that requires
@@ -2294,7 +2312,7 @@ yyreturn:
#endif
return yyresult;
}
-#line 514 "dtc-parser.y" /* yacc.c:1906 */
+#line 528 "dtc-parser.y" /* yacc.c:1906 */
void yyerror(char const *s)
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index ca3f5003427c..affc81a8f9ab 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -182,10 +182,19 @@ devicetree:
{
struct node *target = get_node_by_ref($1, $2);
- if (target)
+ if (target) {
merge_nodes(target, $3);
- else
- ERROR(&@2, "Label or path %s not found", $2);
+ } else {
+ /*
+ * We rely on the rule being always:
+ * versioninfo plugindecl memreserves devicetree
+ * so $-1 is what we want (plugindecl)
+ */
+ if ($<flags>-1 & DTSF_PLUGIN)
+ add_orphan_node($1, $3, $2);
+ else
+ ERROR(&@2, "Label or path %s not found", $2);
+ }
$$ = $1;
}
| devicetree DT_DEL_NODE DT_REF ';'
@@ -200,6 +209,11 @@ devicetree:
$$ = $1;
}
+ | /* empty */
+ {
+ /* build empty node */
+ $$ = name_node(build_node(NULL, NULL), "");
+ }
;
nodedef:
diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c
index f5eed9d72c02..5ed873c72ad1 100644
--- a/scripts/dtc/dtc.c
+++ b/scripts/dtc/dtc.c
@@ -31,7 +31,7 @@ int reservenum; /* Number of memory reservation slots */
int minsize; /* Minimum blob size */
int padsize; /* Additional padding to blob */
int alignsize; /* Additional padding to blob accroding to the alignsize */
-int phandle_format = PHANDLE_BOTH; /* Use linux,phandle or phandle properties */
+int phandle_format = PHANDLE_EPAPR; /* Use linux,phandle or phandle properties */
int generate_symbols; /* enable symbols & fixup support */
int generate_fixups; /* suppress generation of fixups on symbol support */
int auto_label_aliases; /* auto generate labels -> aliases */
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index fc24e17510fd..35cf926cc14a 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -31,6 +31,7 @@
#include <ctype.h>
#include <errno.h>
#include <unistd.h>
+#include <inttypes.h>
#include <libfdt_env.h>
#include <fdt.h>
@@ -202,6 +203,7 @@ struct node *build_node_delete(void);
struct node *name_node(struct node *node, char *name);
struct node *chain_node(struct node *first, struct node *list);
struct node *merge_nodes(struct node *old_node, struct node *new_node);
+void add_orphan_node(struct node *old_node, struct node *new_node, char *ref);
void add_property(struct node *node, struct property *prop);
void delete_property_by_name(struct node *node, char *name);
@@ -215,6 +217,7 @@ void append_to_property(struct node *node,
const char *get_unitname(struct node *node);
struct property *get_property(struct node *node, const char *propname);
cell_t propval_cell(struct property *prop);
+cell_t propval_cell_n(struct property *prop, int n);
struct property *get_property_by_label(struct node *tree, const char *label,
struct node **node);
struct marker *get_marker_label(struct node *tree, const char *label,
diff --git a/scripts/dtc/libfdt/fdt_addresses.c b/scripts/dtc/libfdt/fdt_addresses.c
new file mode 100644
index 000000000000..eff4dbcc729d
--- /dev/null
+++ b/scripts/dtc/libfdt/fdt_addresses.c
@@ -0,0 +1,96 @@
+/*
+ * libfdt - Flat Device Tree manipulation
+ * Copyright (C) 2014 David Gibson <david@gibson.dropbear.id.au>
+ *
+ * libfdt is dual licensed: you can use it either under the terms of
+ * the GPL, or the BSD license, at your option.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Alternatively,
+ *
+ * b) Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+int fdt_address_cells(const void *fdt, int nodeoffset)
+{
+ const fdt32_t *ac;
+ int val;
+ int len;
+
+ ac = fdt_getprop(fdt, nodeoffset, "#address-cells", &len);
+ if (!ac)
+ return 2;
+
+ if (len != sizeof(*ac))
+ return -FDT_ERR_BADNCELLS;
+
+ val = fdt32_to_cpu(*ac);
+ if ((val <= 0) || (val > FDT_MAX_NCELLS))
+ return -FDT_ERR_BADNCELLS;
+
+ return val;
+}
+
+int fdt_size_cells(const void *fdt, int nodeoffset)
+{
+ const fdt32_t *sc;
+ int val;
+ int len;
+
+ sc = fdt_getprop(fdt, nodeoffset, "#size-cells", &len);
+ if (!sc)
+ return 2;
+
+ if (len != sizeof(*sc))
+ return -FDT_ERR_BADNCELLS;
+
+ val = fdt32_to_cpu(*sc);
+ if ((val < 0) || (val > FDT_MAX_NCELLS))
+ return -FDT_ERR_BADNCELLS;
+
+ return val;
+}
diff --git a/scripts/dtc/libfdt/fdt_empty_tree.c b/scripts/dtc/libfdt/fdt_empty_tree.c
index f72d13b1d19c..f2ae9b77c285 100644
--- a/scripts/dtc/libfdt/fdt_empty_tree.c
+++ b/scripts/dtc/libfdt/fdt_empty_tree.c
@@ -81,4 +81,3 @@ int fdt_create_empty_tree(void *buf, int bufsize)
return fdt_open_into(buf, buf, bufsize);
}
-
diff --git a/scripts/dtc/libfdt/fdt_overlay.c b/scripts/dtc/libfdt/fdt_overlay.c
new file mode 100644
index 000000000000..bd81241e6658
--- /dev/null
+++ b/scripts/dtc/libfdt/fdt_overlay.c
@@ -0,0 +1,861 @@
+#include "libfdt_env.h"
+
+#include <fdt.h>
+#include <libfdt.h>
+
+#include "libfdt_internal.h"
+
+/**
+ * overlay_get_target_phandle - retrieves the target phandle of a fragment
+ * @fdto: pointer to the device tree overlay blob
+ * @fragment: node offset of the fragment in the overlay
+ *
+ * overlay_get_target_phandle() retrieves the target phandle of an
+ * overlay fragment when that fragment uses a phandle (target
+ * property) instead of a path (target-path property).
+ *
+ * returns:
+ * the phandle pointed by the target property
+ * 0, if the phandle was not found
+ * -1, if the phandle was malformed
+ */
+static uint32_t overlay_get_target_phandle(const void *fdto, int fragment)
+{
+ const fdt32_t *val;
+ int len;
+
+ val = fdt_getprop(fdto, fragment, "target", &len);
+ if (!val)
+ return 0;
+
+ if ((len != sizeof(*val)) || (fdt32_to_cpu(*val) == (uint32_t)-1))
+ return (uint32_t)-1;
+
+ return fdt32_to_cpu(*val);
+}
+
+/**
+ * overlay_get_target - retrieves the offset of a fragment's target
+ * @fdt: Base device tree blob
+ * @fdto: Device tree overlay blob
+ * @fragment: node offset of the fragment in the overlay
+ * @pathp: pointer which receives the path of the target (or NULL)
+ *
+ * overlay_get_target() retrieves the target offset in the base
+ * device tree of a fragment, no matter how the actual targetting is
+ * done (through a phandle or a path)
+ *
+ * returns:
+ * the targetted node offset in the base device tree
+ * Negative error code on error
+ */
+static int overlay_get_target(const void *fdt, const void *fdto,
+ int fragment, char const **pathp)
+{
+ uint32_t phandle;
+ const char *path = NULL;
+ int path_len = 0, ret;
+
+ /* Try first to do a phandle based lookup */
+ phandle = overlay_get_target_phandle(fdto, fragment);
+ if (phandle == (uint32_t)-1)
+ return -FDT_ERR_BADPHANDLE;
+
+ /* no phandle, try path */
+ if (!phandle) {
+ /* And then a path based lookup */
+ path = fdt_getprop(fdto, fragment, "target-path", &path_len);
+ if (path)
+ ret = fdt_path_offset(fdt, path);
+ else
+ ret = path_len;
+ } else
+ ret = fdt_node_offset_by_phandle(fdt, phandle);
+
+ /*
+ * If we haven't found either a target or a
+ * target-path property in a node that contains a
+ * __overlay__ subnode (we wouldn't be called
+ * otherwise), consider it a improperly written
+ * overlay
+ */
+ if (ret < 0 && path_len == -FDT_ERR_NOTFOUND)
+ ret = -FDT_ERR_BADOVERLAY;
+
+ /* return on error */
+ if (ret < 0)
+ return ret;
+
+ /* return pointer to path (if available) */
+ if (pathp)
+ *pathp = path ? path : NULL;
+
+ return ret;
+}
+
+/**
+ * overlay_phandle_add_offset - Increases a phandle by an offset
+ * @fdt: Base device tree blob
+ * @node: Device tree overlay blob
+ * @name: Name of the property to modify (phandle or linux,phandle)
+ * @delta: offset to apply
+ *
+ * overlay_phandle_add_offset() increments a node phandle by a given
+ * offset.
+ *
+ * returns:
+ * 0 on success.
+ * Negative error code on error
+ */
+static int overlay_phandle_add_offset(void *fdt, int node,
+ const char *name, uint32_t delta)
+{
+ const fdt32_t *val;
+ uint32_t adj_val;
+ int len;
+
+ val = fdt_getprop(fdt, node, name, &len);
+ if (!val)
+ return len;
+
+ if (len != sizeof(*val))
+ return -FDT_ERR_BADPHANDLE;
+
+ adj_val = fdt32_to_cpu(*val);
+ if ((adj_val + delta) < adj_val)
+ return -FDT_ERR_NOPHANDLES;
+
+ adj_val += delta;
+ if (adj_val == (uint32_t)-1)
+ return -FDT_ERR_NOPHANDLES;
+
+ return fdt_setprop_inplace_u32(fdt, node, name, adj_val);
+}
+
+/**
+ * overlay_adjust_node_phandles - Offsets the phandles of a node
+ * @fdto: Device tree overlay blob
+ * @node: Offset of the node we want to adjust
+ * @delta: Offset to shift the phandles of
+ *
+ * overlay_adjust_node_phandles() adds a constant to all the phandles
+ * of a given node. This is mainly use as part of the overlay
+ * application process, when we want to update all the overlay
+ * phandles to not conflict with the overlays of the base device tree.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_adjust_node_phandles(void *fdto, int node,
+ uint32_t delta)
+{
+ int child;
+ int ret;
+
+ ret = overlay_phandle_add_offset(fdto, node, "phandle", delta);
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ return ret;
+
+ ret = overlay_phandle_add_offset(fdto, node, "linux,phandle", delta);
+ if (ret && ret != -FDT_ERR_NOTFOUND)
+ return ret;
+
+ fdt_for_each_subnode(child, fdto, node) {
+ ret = overlay_adjust_node_phandles(fdto, child, delta);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * overlay_adjust_local_phandles - Adjust the phandles of a whole overlay
+ * @fdto: Device tree overlay blob
+ * @delta: Offset to shift the phandles of
+ *
+ * overlay_adjust_local_phandles() adds a constant to all the
+ * phandles of an overlay. This is mainly use as part of the overlay
+ * application process, when we want to update all the overlay
+ * phandles to not conflict with the overlays of the base device tree.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_adjust_local_phandles(void *fdto, uint32_t delta)
+{
+ /*
+ * Start adjusting the phandles from the overlay root
+ */
+ return overlay_adjust_node_phandles(fdto, 0, delta);
+}
+
+/**
+ * overlay_update_local_node_references - Adjust the overlay references
+ * @fdto: Device tree overlay blob
+ * @tree_node: Node offset of the node to operate on
+ * @fixup_node: Node offset of the matching local fixups node
+ * @delta: Offset to shift the phandles of
+ *
+ * overlay_update_local_nodes_references() update the phandles
+ * pointing to a node within the device tree overlay by adding a
+ * constant delta.
+ *
+ * This is mainly used as part of a device tree application process,
+ * where you want the device tree overlays phandles to not conflict
+ * with the ones from the base device tree before merging them.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_update_local_node_references(void *fdto,
+ int tree_node,
+ int fixup_node,
+ uint32_t delta)
+{
+ int fixup_prop;
+ int fixup_child;
+ int ret;
+
+ fdt_for_each_property_offset(fixup_prop, fdto, fixup_node) {
+ const fdt32_t *fixup_val;
+ const char *tree_val;
+ const char *name;
+ int fixup_len;
+ int tree_len;
+ int i;
+
+ fixup_val = fdt_getprop_by_offset(fdto, fixup_prop,
+ &name, &fixup_len);
+ if (!fixup_val)
+ return fixup_len;
+
+ if (fixup_len % sizeof(uint32_t))
+ return -FDT_ERR_BADOVERLAY;
+
+ tree_val = fdt_getprop(fdto, tree_node, name, &tree_len);
+ if (!tree_val) {
+ if (tree_len == -FDT_ERR_NOTFOUND)
+ return -FDT_ERR_BADOVERLAY;
+
+ return tree_len;
+ }
+
+ for (i = 0; i < (fixup_len / sizeof(uint32_t)); i++) {
+ fdt32_t adj_val;
+ uint32_t poffset;
+
+ poffset = fdt32_to_cpu(fixup_val[i]);
+
+ /*
+ * phandles to fixup can be unaligned.
+ *
+ * Use a memcpy for the architectures that do
+ * not support unaligned accesses.
+ */
+ memcpy(&adj_val, tree_val + poffset, sizeof(adj_val));
+
+ adj_val = cpu_to_fdt32(fdt32_to_cpu(adj_val) + delta);
+
+ ret = fdt_setprop_inplace_namelen_partial(fdto,
+ tree_node,
+ name,
+ strlen(name),
+ poffset,
+ &adj_val,
+ sizeof(adj_val));
+ if (ret == -FDT_ERR_NOSPACE)
+ return -FDT_ERR_BADOVERLAY;
+
+ if (ret)
+ return ret;
+ }
+ }
+
+ fdt_for_each_subnode(fixup_child, fdto, fixup_node) {
+ const char *fixup_child_name = fdt_get_name(fdto, fixup_child,
+ NULL);
+ int tree_child;
+
+ tree_child = fdt_subnode_offset(fdto, tree_node,
+ fixup_child_name);
+ if (tree_child == -FDT_ERR_NOTFOUND)
+ return -FDT_ERR_BADOVERLAY;
+ if (tree_child < 0)
+ return tree_child;
+
+ ret = overlay_update_local_node_references(fdto,
+ tree_child,
+ fixup_child,
+ delta);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * overlay_update_local_references - Adjust the overlay references
+ * @fdto: Device tree overlay blob
+ * @delta: Offset to shift the phandles of
+ *
+ * overlay_update_local_references() update all the phandles pointing
+ * to a node within the device tree overlay by adding a constant
+ * delta to not conflict with the base overlay.
+ *
+ * This is mainly used as part of a device tree application process,
+ * where you want the device tree overlays phandles to not conflict
+ * with the ones from the base device tree before merging them.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_update_local_references(void *fdto, uint32_t delta)
+{
+ int fixups;
+
+ fixups = fdt_path_offset(fdto, "/__local_fixups__");
+ if (fixups < 0) {
+ /* There's no local phandles to adjust, bail out */
+ if (fixups == -FDT_ERR_NOTFOUND)
+ return 0;
+
+ return fixups;
+ }
+
+ /*
+ * Update our local references from the root of the tree
+ */
+ return overlay_update_local_node_references(fdto, 0, fixups,
+ delta);
+}
+
+/**
+ * overlay_fixup_one_phandle - Set an overlay phandle to the base one
+ * @fdt: Base Device Tree blob
+ * @fdto: Device tree overlay blob
+ * @symbols_off: Node offset of the symbols node in the base device tree
+ * @path: Path to a node holding a phandle in the overlay
+ * @path_len: number of path characters to consider
+ * @name: Name of the property holding the phandle reference in the overlay
+ * @name_len: number of name characters to consider
+ * @poffset: Offset within the overlay property where the phandle is stored
+ * @label: Label of the node referenced by the phandle
+ *
+ * overlay_fixup_one_phandle() resolves an overlay phandle pointing to
+ * a node in the base device tree.
+ *
+ * This is part of the device tree overlay application process, when
+ * you want all the phandles in the overlay to point to the actual
+ * base dt nodes.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_fixup_one_phandle(void *fdt, void *fdto,
+ int symbols_off,
+ const char *path, uint32_t path_len,
+ const char *name, uint32_t name_len,
+ int poffset, const char *label)
+{
+ const char *symbol_path;
+ uint32_t phandle;
+ fdt32_t phandle_prop;
+ int symbol_off, fixup_off;
+ int prop_len;
+
+ if (symbols_off < 0)
+ return symbols_off;
+
+ symbol_path = fdt_getprop(fdt, symbols_off, label,
+ &prop_len);
+ if (!symbol_path)
+ return prop_len;
+
+ symbol_off = fdt_path_offset(fdt, symbol_path);
+ if (symbol_off < 0)
+ return symbol_off;
+
+ phandle = fdt_get_phandle(fdt, symbol_off);
+ if (!phandle)
+ return -FDT_ERR_NOTFOUND;
+
+ fixup_off = fdt_path_offset_namelen(fdto, path, path_len);
+ if (fixup_off == -FDT_ERR_NOTFOUND)
+ return -FDT_ERR_BADOVERLAY;
+ if (fixup_off < 0)
+ return fixup_off;
+
+ phandle_prop = cpu_to_fdt32(phandle);
+ return fdt_setprop_inplace_namelen_partial(fdto, fixup_off,
+ name, name_len, poffset,
+ &phandle_prop,
+ sizeof(phandle_prop));
+};
+
+/**
+ * overlay_fixup_phandle - Set an overlay phandle to the base one
+ * @fdt: Base Device Tree blob
+ * @fdto: Device tree overlay blob
+ * @symbols_off: Node offset of the symbols node in the base device tree
+ * @property: Property offset in the overlay holding the list of fixups
+ *
+ * overlay_fixup_phandle() resolves all the overlay phandles pointed
+ * to in a __fixups__ property, and updates them to match the phandles
+ * in use in the base device tree.
+ *
+ * This is part of the device tree overlay application process, when
+ * you want all the phandles in the overlay to point to the actual
+ * base dt nodes.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_fixup_phandle(void *fdt, void *fdto, int symbols_off,
+ int property)
+{
+ const char *value;
+ const char *label;
+ int len;
+
+ value = fdt_getprop_by_offset(fdto, property,
+ &label, &len);
+ if (!value) {
+ if (len == -FDT_ERR_NOTFOUND)
+ return -FDT_ERR_INTERNAL;
+
+ return len;
+ }
+
+ do {
+ const char *path, *name, *fixup_end;
+ const char *fixup_str = value;
+ uint32_t path_len, name_len;
+ uint32_t fixup_len;
+ char *sep, *endptr;
+ int poffset, ret;
+
+ fixup_end = memchr(value, '\0', len);
+ if (!fixup_end)
+ return -FDT_ERR_BADOVERLAY;
+ fixup_len = fixup_end - fixup_str;
+
+ len -= fixup_len + 1;
+ value += fixup_len + 1;
+
+ path = fixup_str;
+ sep = memchr(fixup_str, ':', fixup_len);
+ if (!sep || *sep != ':')
+ return -FDT_ERR_BADOVERLAY;
+
+ path_len = sep - path;
+ if (path_len == (fixup_len - 1))
+ return -FDT_ERR_BADOVERLAY;
+
+ fixup_len -= path_len + 1;
+ name = sep + 1;
+ sep = memchr(name, ':', fixup_len);
+ if (!sep || *sep != ':')
+ return -FDT_ERR_BADOVERLAY;
+
+ name_len = sep - name;
+ if (!name_len)
+ return -FDT_ERR_BADOVERLAY;
+
+ poffset = strtoul(sep + 1, &endptr, 10);
+ if ((*endptr != '\0') || (endptr <= (sep + 1)))
+ return -FDT_ERR_BADOVERLAY;
+
+ ret = overlay_fixup_one_phandle(fdt, fdto, symbols_off,
+ path, path_len, name, name_len,
+ poffset, label);
+ if (ret)
+ return ret;
+ } while (len > 0);
+
+ return 0;
+}
+
+/**
+ * overlay_fixup_phandles - Resolve the overlay phandles to the base
+ * device tree
+ * @fdt: Base Device Tree blob
+ * @fdto: Device tree overlay blob
+ *
+ * overlay_fixup_phandles() resolves all the overlay phandles pointing
+ * to nodes in the base device tree.
+ *
+ * This is one of the steps of the device tree overlay application
+ * process, when you want all the phandles in the overlay to point to
+ * the actual base dt nodes.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_fixup_phandles(void *fdt, void *fdto)
+{
+ int fixups_off, symbols_off;
+ int property;
+
+ /* We can have overlays without any fixups */
+ fixups_off = fdt_path_offset(fdto, "/__fixups__");
+ if (fixups_off == -FDT_ERR_NOTFOUND)
+ return 0; /* nothing to do */
+ if (fixups_off < 0)
+ return fixups_off;
+
+ /* And base DTs without symbols */
+ symbols_off = fdt_path_offset(fdt, "/__symbols__");
+ if ((symbols_off < 0 && (symbols_off != -FDT_ERR_NOTFOUND)))
+ return symbols_off;
+
+ fdt_for_each_property_offset(property, fdto, fixups_off) {
+ int ret;
+
+ ret = overlay_fixup_phandle(fdt, fdto, symbols_off, property);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * overlay_apply_node - Merges a node into the base device tree
+ * @fdt: Base Device Tree blob
+ * @target: Node offset in the base device tree to apply the fragment to
+ * @fdto: Device tree overlay blob
+ * @node: Node offset in the overlay holding the changes to merge
+ *
+ * overlay_apply_node() merges a node into a target base device tree
+ * node pointed.
+ *
+ * This is part of the final step in the device tree overlay
+ * application process, when all the phandles have been adjusted and
+ * resolved and you just have to merge overlay into the base device
+ * tree.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_apply_node(void *fdt, int target,
+ void *fdto, int node)
+{
+ int property;
+ int subnode;
+
+ fdt_for_each_property_offset(property, fdto, node) {
+ const char *name;
+ const void *prop;
+ int prop_len;
+ int ret;
+
+ prop = fdt_getprop_by_offset(fdto, property, &name,
+ &prop_len);
+ if (prop_len == -FDT_ERR_NOTFOUND)
+ return -FDT_ERR_INTERNAL;
+ if (prop_len < 0)
+ return prop_len;
+
+ ret = fdt_setprop(fdt, target, name, prop, prop_len);
+ if (ret)
+ return ret;
+ }
+
+ fdt_for_each_subnode(subnode, fdto, node) {
+ const char *name = fdt_get_name(fdto, subnode, NULL);
+ int nnode;
+ int ret;
+
+ nnode = fdt_add_subnode(fdt, target, name);
+ if (nnode == -FDT_ERR_EXISTS) {
+ nnode = fdt_subnode_offset(fdt, target, name);
+ if (nnode == -FDT_ERR_NOTFOUND)
+ return -FDT_ERR_INTERNAL;
+ }
+
+ if (nnode < 0)
+ return nnode;
+
+ ret = overlay_apply_node(fdt, nnode, fdto, subnode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * overlay_merge - Merge an overlay into its base device tree
+ * @fdt: Base Device Tree blob
+ * @fdto: Device tree overlay blob
+ *
+ * overlay_merge() merges an overlay into its base device tree.
+ *
+ * This is the next to last step in the device tree overlay application
+ * process, when all the phandles have been adjusted and resolved and
+ * you just have to merge overlay into the base device tree.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_merge(void *fdt, void *fdto)
+{
+ int fragment;
+
+ fdt_for_each_subnode(fragment, fdto, 0) {
+ int overlay;
+ int target;
+ int ret;
+
+ /*
+ * Each fragments will have an __overlay__ node. If
+ * they don't, it's not supposed to be merged
+ */
+ overlay = fdt_subnode_offset(fdto, fragment, "__overlay__");
+ if (overlay == -FDT_ERR_NOTFOUND)
+ continue;
+
+ if (overlay < 0)
+ return overlay;
+
+ target = overlay_get_target(fdt, fdto, fragment, NULL);
+ if (target < 0)
+ return target;
+
+ ret = overlay_apply_node(fdt, target, fdto, overlay);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int get_path_len(const void *fdt, int nodeoffset)
+{
+ int len = 0, namelen;
+ const char *name;
+
+ FDT_CHECK_HEADER(fdt);
+
+ for (;;) {
+ name = fdt_get_name(fdt, nodeoffset, &namelen);
+ if (!name)
+ return namelen;
+
+ /* root? we're done */
+ if (namelen == 0)
+ break;
+
+ nodeoffset = fdt_parent_offset(fdt, nodeoffset);
+ if (nodeoffset < 0)
+ return nodeoffset;
+ len += namelen + 1;
+ }
+
+ /* in case of root pretend it's "/" */
+ if (len == 0)
+ len++;
+ return len;
+}
+
+/**
+ * overlay_symbol_update - Update the symbols of base tree after a merge
+ * @fdt: Base Device Tree blob
+ * @fdto: Device tree overlay blob
+ *
+ * overlay_symbol_update() updates the symbols of the base tree with the
+ * symbols of the applied overlay
+ *
+ * This is the last step in the device tree overlay application
+ * process, allowing the reference of overlay symbols by subsequent
+ * overlay operations.
+ *
+ * returns:
+ * 0 on success
+ * Negative error code on failure
+ */
+static int overlay_symbol_update(void *fdt, void *fdto)
+{
+ int root_sym, ov_sym, prop, path_len, fragment, target;
+ int len, frag_name_len, ret, rel_path_len;
+ const char *s, *e;
+ const char *path;
+ const char *name;
+ const char *frag_name;
+ const char *rel_path;
+ const char *target_path;
+ char *buf;
+ void *p;
+
+ ov_sym = fdt_subnode_offset(fdto, 0, "__symbols__");
+
+ /* if no overlay symbols exist no problem */
+ if (ov_sym < 0)
+ return 0;
+
+ root_sym = fdt_subnode_offset(fdt, 0, "__symbols__");
+
+ /* it no root symbols exist we should create them */
+ if (root_sym == -FDT_ERR_NOTFOUND)
+ root_sym = fdt_add_subnode(fdt, 0, "__symbols__");
+
+ /* any error is fatal now */
+ if (root_sym < 0)
+ return root_sym;
+
+ /* iterate over each overlay symbol */
+ fdt_for_each_property_offset(prop, fdto, ov_sym) {
+ path = fdt_getprop_by_offset(fdto, prop, &name, &path_len);
+ if (!path)
+ return path_len;
+
+ /* verify it's a string property (terminated by a single \0) */
+ if (path_len < 1 || memchr(path, '\0', path_len) != &path[path_len - 1])
+ return -FDT_ERR_BADVALUE;
+
+ /* keep end marker to avoid strlen() */
+ e = path + path_len;
+
+ /* format: /<fragment-name>/__overlay__/<relative-subnode-path> */
+
+ if (*path != '/')
+ return -FDT_ERR_BADVALUE;
+
+ /* get fragment name first */
+ s = strchr(path + 1, '/');
+ if (!s)
+ return -FDT_ERR_BADOVERLAY;
+
+ frag_name = path + 1;
+ frag_name_len = s - path - 1;
+
+ /* verify format; safe since "s" lies in \0 terminated prop */
+ len = sizeof("/__overlay__/") - 1;
+ if ((e - s) < len || memcmp(s, "/__overlay__/", len))
+ return -FDT_ERR_BADOVERLAY;
+
+ rel_path = s + len;
+ rel_path_len = e - rel_path;
+
+ /* find the fragment index in which the symbol lies */
+ ret = fdt_subnode_offset_namelen(fdto, 0, frag_name,
+ frag_name_len);
+ /* not found? */
+ if (ret < 0)
+ return -FDT_ERR_BADOVERLAY;
+ fragment = ret;
+
+ /* an __overlay__ subnode must exist */
+ ret = fdt_subnode_offset(fdto, fragment, "__overlay__");
+ if (ret < 0)
+ return -FDT_ERR_BADOVERLAY;
+
+ /* get the target of the fragment */
+ ret = overlay_get_target(fdt, fdto, fragment, &target_path);
+ if (ret < 0)
+ return ret;
+ target = ret;
+
+ /* if we have a target path use */
+ if (!target_path) {
+ ret = get_path_len(fdt, target);
+ if (ret < 0)
+ return ret;
+ len = ret;
+ } else {
+ len = strlen(target_path);
+ }
+
+ ret = fdt_setprop_placeholder(fdt, root_sym, name,
+ len + (len > 1) + rel_path_len + 1, &p);
+ if (ret < 0)
+ return ret;
+
+ if (!target_path) {
+ /* again in case setprop_placeholder changed it */
+ ret = overlay_get_target(fdt, fdto, fragment, &target_path);
+ if (ret < 0)
+ return ret;
+ target = ret;
+ }
+
+ buf = p;
+ if (len > 1) { /* target is not root */
+ if (!target_path) {
+ ret = fdt_get_path(fdt, target, buf, len + 1);
+ if (ret < 0)
+ return ret;
+ } else
+ memcpy(buf, target_path, len + 1);
+
+ } else
+ len--;
+
+ buf[len] = '/';
+ memcpy(buf + len + 1, rel_path, rel_path_len);
+ buf[len + 1 + rel_path_len] = '\0';
+ }
+
+ return 0;
+}
+
+int fdt_overlay_apply(void *fdt, void *fdto)
+{
+ uint32_t delta = fdt_get_max_phandle(fdt);
+ int ret;
+
+ FDT_CHECK_HEADER(fdt);
+ FDT_CHECK_HEADER(fdto);
+
+ ret = overlay_adjust_local_phandles(fdto, delta);
+ if (ret)
+ goto err;
+
+ ret = overlay_update_local_references(fdto, delta);
+ if (ret)
+ goto err;
+
+ ret = overlay_fixup_phandles(fdt, fdto);
+ if (ret)
+ goto err;
+
+ ret = overlay_merge(fdt, fdto);
+ if (ret)
+ goto err;
+
+ ret = overlay_symbol_update(fdt, fdto);
+ if (ret)
+ goto err;
+
+ /*
+ * The overlay has been damaged, erase its magic.
+ */
+ fdt_set_magic(fdto, ~0);
+
+ return 0;
+
+err:
+ /*
+ * The overlay might have been damaged, erase its magic.
+ */
+ fdt_set_magic(fdto, ~0);
+
+ /*
+ * The base device tree might have been damaged, erase its
+ * magic.
+ */
+ fdt_set_magic(fdt, ~0);
+
+ return ret;
+}
diff --git a/scripts/dtc/libfdt/fdt_ro.c b/scripts/dtc/libfdt/fdt_ro.c
index 3d00d2eee0e3..08de2cce674d 100644
--- a/scripts/dtc/libfdt/fdt_ro.c
+++ b/scripts/dtc/libfdt/fdt_ro.c
@@ -60,7 +60,7 @@ static int _fdt_nodename_eq(const void *fdt, int offset,
{
const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1);
- if (! p)
+ if (!p)
/* short match */
return 0;
@@ -327,7 +327,7 @@ const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
const struct fdt_property *prop;
prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp);
- if (! prop)
+ if (!prop)
return NULL;
return prop->data;
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 3fd5847377c9..5c3a2bb0bc6b 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -207,7 +207,7 @@ static int _fdt_resize_property(void *fdt, int nodeoffset, const char *name,
int err;
*prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
- if (! (*prop))
+ if (!*prop)
return oldlen;
if ((err = _fdt_splice_struct(fdt, (*prop)->data, FDT_TAGALIGN(oldlen),
@@ -269,8 +269,8 @@ int fdt_set_name(void *fdt, int nodeoffset, const char *name)
return 0;
}
-int fdt_setprop(void *fdt, int nodeoffset, const char *name,
- const void *val, int len)
+int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name,
+ int len, void **prop_data)
{
struct fdt_property *prop;
int err;
@@ -283,8 +283,22 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
if (err)
return err;
+ *prop_data = prop->data;
+ return 0;
+}
+
+int fdt_setprop(void *fdt, int nodeoffset, const char *name,
+ const void *val, int len)
+{
+ void *prop_data;
+ int err;
+
+ err = fdt_setprop_placeholder(fdt, nodeoffset, name, len, &prop_data);
+ if (err)
+ return err;
+
if (len)
- memcpy(prop->data, val, len);
+ memcpy(prop_data, val, len);
return 0;
}
@@ -323,7 +337,7 @@ int fdt_delprop(void *fdt, int nodeoffset, const char *name)
FDT_RW_CHECK_HEADER(fdt);
prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
- if (! prop)
+ if (!prop)
return len;
proplen = sizeof(*prop) + FDT_TAGALIGN(len);
diff --git a/scripts/dtc/libfdt/fdt_sw.c b/scripts/dtc/libfdt/fdt_sw.c
index 6a804859fd0c..2bd15e7aef87 100644
--- a/scripts/dtc/libfdt/fdt_sw.c
+++ b/scripts/dtc/libfdt/fdt_sw.c
@@ -220,7 +220,7 @@ static int _fdt_find_add_string(void *fdt, const char *s)
return offset;
}
-int fdt_property(void *fdt, const char *name, const void *val, int len)
+int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp)
{
struct fdt_property *prop;
int nameoff;
@@ -238,7 +238,19 @@ int fdt_property(void *fdt, const char *name, const void *val, int len)
prop->tag = cpu_to_fdt32(FDT_PROP);
prop->nameoff = cpu_to_fdt32(nameoff);
prop->len = cpu_to_fdt32(len);
- memcpy(prop->data, val, len);
+ *valp = prop->data;
+ return 0;
+}
+
+int fdt_property(void *fdt, const char *name, const void *val, int len)
+{
+ void *ptr;
+ int ret;
+
+ ret = fdt_property_placeholder(fdt, name, len, &ptr);
+ if (ret)
+ return ret;
+ memcpy(ptr, val, len);
return 0;
}
diff --git a/scripts/dtc/libfdt/fdt_wip.c b/scripts/dtc/libfdt/fdt_wip.c
index 6aaab399929c..5e859198622b 100644
--- a/scripts/dtc/libfdt/fdt_wip.c
+++ b/scripts/dtc/libfdt/fdt_wip.c
@@ -82,7 +82,7 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
int proplen;
propval = fdt_getprop(fdt, nodeoffset, name, &proplen);
- if (! propval)
+ if (!propval)
return proplen;
if (proplen != len)
@@ -107,7 +107,7 @@ int fdt_nop_property(void *fdt, int nodeoffset, const char *name)
int len;
prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
- if (! prop)
+ if (!prop)
return len;
_fdt_nop_region(prop, len + sizeof(*prop));
diff --git a/scripts/dtc/libfdt/libfdt.h b/scripts/dtc/libfdt/libfdt.h
index ba86caa73d01..7f83023ee109 100644
--- a/scripts/dtc/libfdt/libfdt.h
+++ b/scripts/dtc/libfdt/libfdt.h
@@ -1314,6 +1314,22 @@ static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
{
return fdt_property_u32(fdt, name, val);
}
+
+/**
+ * fdt_property_placeholder - add a new property and return a ptr to its value
+ *
+ * @fdt: pointer to the device tree blob
+ * @name: name of property to add
+ * @len: length of property value in bytes
+ * @valp: returns a pointer to where where the value should be placed
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_NOSPACE, standard meanings
+ */
+int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp);
+
#define fdt_property_string(fdt, name, str) \
fdt_property(fdt, name, str, strlen(str)+1)
int fdt_end_node(void *fdt);
@@ -1433,6 +1449,37 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name,
const void *val, int len);
/**
+ * fdt_setprop _placeholder - allocate space for a property
+ * @fdt: pointer to the device tree blob
+ * @nodeoffset: offset of the node whose property to change
+ * @name: name of the property to change
+ * @len: length of the property value
+ * @prop_data: return pointer to property data
+ *
+ * fdt_setprop_placeholer() allocates the named property in the given node.
+ * If the property exists it is resized. In either case a pointer to the
+ * property data is returned.
+ *
+ * This function may insert or delete data from the blob, and will
+ * therefore change the offsets of some existing nodes.
+ *
+ * returns:
+ * 0, on success
+ * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to
+ * contain the new property value
+ * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_BADMAGIC,
+ * -FDT_ERR_BADVERSION,
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_BADSTRUCTURE,
+ * -FDT_ERR_BADLAYOUT,
+ * -FDT_ERR_TRUNCATED, standard meanings
+ */
+int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name,
+ int len, void **prop_data);
+
+/**
* fdt_setprop_u32 - set a property to a 32-bit integer
* @fdt: pointer to the device tree blob
* @nodeoffset: offset of the node whose property to change
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index 3673de07e4e5..6846ad2fd6d2 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -216,6 +216,28 @@ struct node *merge_nodes(struct node *old_node, struct node *new_node)
return old_node;
}
+void add_orphan_node(struct node *dt, struct node *new_node, char *ref)
+{
+ static unsigned int next_orphan_fragment = 0;
+ struct node *node;
+ struct property *p;
+ struct data d = empty_data;
+ char *name;
+
+ d = data_add_marker(d, REF_PHANDLE, ref);
+ d = data_append_integer(d, 0xffffffff, 32);
+
+ p = build_property("target", d);
+
+ xasprintf(&name, "fragment@%u",
+ next_orphan_fragment++);
+ name_node(new_node, "__overlay__");
+ node = build_node(p, new_node);
+ name_node(node, name);
+
+ add_child(dt, node);
+}
+
struct node *chain_node(struct node *first, struct node *list)
{
assert(first->next_sibling == NULL);
@@ -396,6 +418,12 @@ cell_t propval_cell(struct property *prop)
return fdt32_to_cpu(*((fdt32_t *)prop->val.val));
}
+cell_t propval_cell_n(struct property *prop, int n)
+{
+ assert(prop->val.len / sizeof(cell_t) >= n);
+ return fdt32_to_cpu(*((fdt32_t *)prop->val.val + n));
+}
+
struct property *get_property_by_label(struct node *tree, const char *label,
struct node **node)
{
@@ -478,7 +506,8 @@ struct node *get_node_by_path(struct node *tree, const char *path)
p = strchr(path, '/');
for_each_child(tree, child) {
- if (p && strneq(path, child->name, p-path))
+ if (p && (strlen(child->name) == p-path) &&
+ strneq(path, child->name, p-path))
return get_node_by_path(child, p+1);
else if (!p && streq(path, child->name))
return child;
diff --git a/scripts/dtc/update-dtc-source.sh b/scripts/dtc/update-dtc-source.sh
index 62f0d5325fd3..fe8926bb3b54 100755
--- a/scripts/dtc/update-dtc-source.sh
+++ b/scripts/dtc/update-dtc-source.sh
@@ -35,7 +35,9 @@ DTC_SOURCE="checks.c data.c dtc.c dtc.h flattree.c fstree.c livetree.c srcpos.c
srcpos.h treesource.c util.c util.h version_gen.h Makefile.dtc \
dtc-lexer.l dtc-parser.y"
DTC_GENERATED="dtc-lexer.lex.c dtc-parser.tab.c dtc-parser.tab.h"
-LIBFDT_SOURCE="Makefile.libfdt fdt.c fdt.h fdt_empty_tree.c fdt_ro.c fdt_rw.c fdt_strerror.c fdt_sw.c fdt_wip.c libfdt.h libfdt_env.h libfdt_internal.h"
+LIBFDT_SOURCE="Makefile.libfdt fdt.c fdt.h fdt_addresses.c fdt_empty_tree.c \
+ fdt_overlay.c fdt_ro.c fdt_rw.c fdt_strerror.c fdt_sw.c \
+ fdt_wip.c libfdt.h libfdt_env.h libfdt_internal.h"
get_last_dtc_version() {
git log --oneline scripts/dtc/ | grep 'upstream' | head -1 | sed -e 's/^.* \(.*\)/\1/'
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index 1229e07b4912..6a4e84798966 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.4.4-g756ffc4f"
+#define DTC_VERSION "DTC 1.4.5-gc1e55a55"
diff --git a/scripts/find-unused-docs.sh b/scripts/find-unused-docs.sh
new file mode 100755
index 000000000000..3f46f8977dc4
--- /dev/null
+++ b/scripts/find-unused-docs.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# (c) 2017, Jonathan Corbet <corbet@lwn.net>
+# sayli karnik <karniksayli1995@gmail.com>
+#
+# This script detects files with kernel-doc comments for exported functions
+# that are not included in documentation.
+#
+# usage: Run 'scripts/find-unused-docs.sh directory' from top level of kernel
+# tree.
+#
+# example: $scripts/find-unused-docs.sh drivers/scsi
+#
+# Licensed under the terms of the GNU GPL License
+
+if ! [ -d "Documentation" ]; then
+ echo "Run from top level of kernel tree"
+ exit 1
+fi
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: scripts/find-unused-docs.sh directory"
+ exit 1
+fi
+
+if ! [ -d "$1" ]; then
+ echo "Directory $1 doesn't exist"
+ exit 1
+fi
+
+cd "$( dirname "${BASH_SOURCE[0]}" )"
+cd ..
+
+cd Documentation/
+
+echo "The following files contain kerneldoc comments for exported functions \
+that are not used in the formatted documentation"
+
+# FILES INCLUDED
+
+files_included=($(grep -rHR ".. kernel-doc" --include \*.rst | cut -d " " -f 3))
+
+declare -A FILES_INCLUDED
+
+for each in "${files_included[@]}"; do
+ FILES_INCLUDED[$each]="$each"
+ done
+
+cd ..
+
+# FILES NOT INCLUDED
+
+for file in `find $1 -name '*.c'`; do
+
+ if [[ ${FILES_INCLUDED[$file]+_} ]]; then
+ continue;
+ fi
+ str=$(scripts/kernel-doc -text -export "$file" 2>/dev/null)
+ if [[ -n "$str" ]]; then
+ echo "$file"
+ fi
+ done
+
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 4d1ea96e8794..a18bca720995 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -34,7 +34,7 @@ do
sed -r \
-e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
-e 's/__attribute_const__([ \t]|$)/\1/g' \
- -e 's@^#include <linux/compiler.h>@@' \
+ -e 's@^#include <linux/compiler(|_types).h>@@' \
-e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
-e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \
-e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 9d3eafea58f0..7bd52b8f63d4 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2168,7 +2168,7 @@ sub dump_struct($$) {
my $nested;
if ($x =~ /(struct|union)\s+(\w+)\s*{(.*)}/) {
- #my $decl_type = $1;
+ my $decl_type = $1;
$declaration_name = $2;
my $members = $3;
@@ -2182,8 +2182,6 @@ sub dump_struct($$) {
# strip comments:
$members =~ s/\/\*.*?\*\///gos;
$nested =~ s/\/\*.*?\*\///gos;
- # strip kmemcheck_bitfield_{begin,end}.*;
- $members =~ s/kmemcheck_bitfield_.*?;//gos;
# strip attributes
$members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
$members =~ s/__aligned\s*\([^;]*\)//gos;
@@ -2194,7 +2192,7 @@ sub dump_struct($$) {
$members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+), ([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
create_parameterlist($members, ';', $file);
- check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
+ check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual, $nested);
output_declaration($declaration_name,
'struct',
@@ -2226,6 +2224,8 @@ sub dump_enum($$) {
if ($x =~ /enum\s+(\w+)\s*{(.*)}/) {
$declaration_name = $1;
my $members = $2;
+ my %_members;
+
$members =~ s/\s+$//;
foreach my $arg (split ',', $members) {
@@ -2236,9 +2236,16 @@ sub dump_enum($$) {
print STDERR "${file}:$.: warning: Enum value '$arg' ".
"not described in enum '$declaration_name'\n";
}
-
+ $_members{$arg} = 1;
}
+ while (my ($k, $v) = each %parameterdescs) {
+ if (!exists($_members{$k})) {
+ print STDERR "${file}:$.: warning: Excess enum value " .
+ "'$k' description in '$declaration_name'\n";
+ }
+ }
+
output_declaration($declaration_name,
'enum',
{'enum' => $declaration_name,
@@ -2506,7 +2513,7 @@ sub check_sections($$$$$$) {
} else {
if ($nested !~ m/\Q$sects[$sx]\E/) {
print STDERR "${file}:$.: warning: " .
- "Excess struct/union/enum/typedef member " .
+ "Excess $decl_type member " .
"'$sects[$sx]' " .
"description in '$decl_name'\n";
++$warnings;
diff --git a/scripts/leaking_addresses.pl b/scripts/leaking_addresses.pl
index 2977371b2956..bc5788000018 100755
--- a/scripts/leaking_addresses.pl
+++ b/scripts/leaking_addresses.pl
@@ -7,25 +7,6 @@
# - Scans dmesg output.
# - Walks directory tree and parses each file (for each directory in @DIRS).
#
-# You can configure the behaviour of the script;
-#
-# - By adding paths, for directories you do not want to walk;
-# absolute paths: @skip_walk_dirs_abs
-# directory names: @skip_walk_dirs_any
-#
-# - By adding paths, for files you do not want to parse;
-# absolute paths: @skip_parse_files_abs
-# file names: @skip_parse_files_any
-#
-# The use of @skip_xxx_xxx_any causes files to be skipped where ever they occur.
-# For example adding 'fd' to @skip_walk_dirs_any causes the fd/ directory to be
-# skipped for all PID sub-directories of /proc
-#
-# The same thing can be achieved by passing command line options to --dont-walk
-# and --dont-parse. If absolute paths are supplied to these options they are
-# appended to the @skip_xxx_xxx_abs arrays. If file names are supplied to these
-# options, they are appended to the @skip_xxx_xxx_any arrays.
-#
# Use --debug to output path before parsing, this is useful to find files that
# cause the script to choke.
#
@@ -40,6 +21,7 @@ use File::Spec;
use Cwd 'abs_path';
use Term::ANSIColor qw(:constants);
use Getopt::Long qw(:config no_auto_abbrev);
+use Config;
my $P = $0;
my $V = '0.01';
@@ -47,21 +29,36 @@ my $V = '0.01';
# Directories to scan.
my @DIRS = ('/proc', '/sys');
+# Timer for parsing each file, in seconds.
+my $TIMEOUT = 10;
+
+# Script can only grep for kernel addresses on the following architectures. If
+# your architecture is not listed here and has a grep'able kernel address please
+# consider submitting a patch.
+my @SUPPORTED_ARCHITECTURES = ('x86_64', 'ppc64');
+
# Command line options.
my $help = 0;
my $debug = 0;
-my @dont_walk = ();
-my @dont_parse = ();
+my $raw = 0;
+my $output_raw = ""; # Write raw results to file.
+my $input_raw = ""; # Read raw results from file instead of scanning.
+
+my $suppress_dmesg = 0; # Don't show dmesg in output.
+my $squash_by_path = 0; # Summary report grouped by absolute path.
+my $squash_by_filename = 0; # Summary report grouped by filename.
# Do not parse these files (absolute path).
my @skip_parse_files_abs = ('/proc/kmsg',
'/proc/kcore',
'/proc/fs/ext4/sdb1/mb_groups',
'/proc/1/fd/3',
+ '/sys/firmware/devicetree',
+ '/proc/device-tree',
'/sys/kernel/debug/tracing/trace_pipe',
'/sys/kernel/security/apparmor/revision');
-# Do not parse thes files under any subdirectory.
+# Do not parse these files under any subdirectory.
my @skip_parse_files_any = ('0',
'1',
'2',
@@ -82,6 +79,7 @@ my @skip_walk_dirs_any = ('self',
'thread-self',
'cwd',
'fd',
+ 'usbmon',
'stderr',
'stdin',
'stdout');
@@ -91,24 +89,31 @@ sub help
my ($exitcode) = @_;
print << "EOM";
+
Usage: $P [OPTIONS]
Version: $V
Options:
- --dont-walk=<dir> Don't walk tree starting at <dir>.
- --dont-parse=<file> Don't parse <file>.
- -d, --debug Display debugging output.
- -h, --help, --version Display this help and exit.
+ -o, --output-raw=<file> Save results for future processing.
+ -i, --input-raw=<file> Read results from file instead of scanning.
+ --raw Show raw results (default).
+ --suppress-dmesg Do not show dmesg results.
+ --squash-by-path Show one result per unique path.
+ --squash-by-filename Show one result per unique filename.
+ -d, --debug Display debugging output.
+ -h, --help, --version Display this help and exit.
+
+Examples:
-If an absolute path is passed to --dont_XXX then this path is skipped. If a
-single filename is passed then this file/directory will be skipped when
-appearing under any subdirectory.
+ # Scan kernel and dump raw results.
+ $0
-Example:
+ # Scan kernel and save results to file.
+ $0 --output-raw scan.out
- # Just scan dmesg output.
- scripts/leaking_addresses.pl --dont_walk_abs /proc --dont_walk_abs /sys
+ # View summary report.
+ $0 --input-raw scan.out --squash-by-filename
Scans the running (64 bit) kernel for potential leaking addresses.
@@ -117,99 +122,136 @@ EOM
}
GetOptions(
- 'dont-walk=s' => \@dont_walk,
- 'dont-parse=s' => \@dont_parse,
'd|debug' => \$debug,
'h|help' => \$help,
- 'version' => \$help
+ 'version' => \$help,
+ 'o|output-raw=s' => \$output_raw,
+ 'i|input-raw=s' => \$input_raw,
+ 'suppress-dmesg' => \$suppress_dmesg,
+ 'squash-by-path' => \$squash_by_path,
+ 'squash-by-filename' => \$squash_by_filename,
+ 'raw' => \$raw,
) or help(1);
help(0) if ($help);
-push_to_global();
+if ($input_raw) {
+ format_output($input_raw);
+ exit(0);
+}
+
+if (!$input_raw and ($squash_by_path or $squash_by_filename)) {
+ printf "\nSummary reporting only available with --input-raw=<file>\n";
+ printf "(First run scan with --output-raw=<file>.)\n";
+ exit(128);
+}
+
+if (!is_supported_architecture()) {
+ printf "\nScript does not support your architecture, sorry.\n";
+ printf "\nCurrently we support: \n\n";
+ foreach(@SUPPORTED_ARCHITECTURES) {
+ printf "\t%s\n", $_;
+ }
+
+ my $archname = $Config{archname};
+ printf "\n\$ perl -MConfig -e \'print \"\$Config{archname}\\n\"\'\n";
+ printf "%s\n", $archname;
+
+ exit(129);
+}
+
+if ($output_raw) {
+ open my $fh, '>', $output_raw or die "$0: $output_raw: $!\n";
+ select $fh;
+}
parse_dmesg();
walk(@DIRS);
exit 0;
-sub debug_arrays
+sub dprint
{
- print 'dirs_any: ' . join(", ", @skip_walk_dirs_any) . "\n";
- print 'dirs_abs: ' . join(", ", @skip_walk_dirs_abs) . "\n";
- print 'parse_any: ' . join(", ", @skip_parse_files_any) . "\n";
- print 'parse_abs: ' . join(", ", @skip_parse_files_abs) . "\n";
+ printf(STDERR @_) if $debug;
}
-sub dprint
+sub is_supported_architecture
{
- printf(STDERR @_) if $debug;
+ return (is_x86_64() or is_ppc64());
}
-sub push_in_abs_any
+sub is_x86_64
{
- my ($in, $abs, $any) = @_;
-
- foreach my $path (@$in) {
- if (File::Spec->file_name_is_absolute($path)) {
- push @$abs, $path;
- } elsif (index($path,'/') == -1) {
- push @$any, $path;
- } else {
- print 'path error: ' . $path;
- }
+ my $archname = $Config{archname};
+
+ if ($archname =~ m/x86_64/) {
+ return 1;
}
+ return 0;
}
-# Push command line options to global arrays.
-sub push_to_global
+sub is_ppc64
{
- push_in_abs_any(\@dont_walk, \@skip_walk_dirs_abs, \@skip_walk_dirs_any);
- push_in_abs_any(\@dont_parse, \@skip_parse_files_abs, \@skip_parse_files_any);
+ my $archname = $Config{archname};
+
+ if ($archname =~ m/powerpc/ and $archname =~ m/64/) {
+ return 1;
+ }
+ return 0;
}
sub is_false_positive
{
- my ($match) = @_;
+ my ($match) = @_;
- if ($match =~ '\b(0x)?(f|F){16}\b' or
- $match =~ '\b(0x)?0{16}\b') {
- return 1;
- }
+ if ($match =~ '\b(0x)?(f|F){16}\b' or
+ $match =~ '\b(0x)?0{16}\b') {
+ return 1;
+ }
- # vsyscall memory region, we should probably check against a range here.
- if ($match =~ '\bf{10}600000\b' or
- $match =~ '\bf{10}601000\b') {
- return 1;
- }
+ if (is_x86_64) {
+ # vsyscall memory region, we should probably check against a range here.
+ if ($match =~ '\bf{10}600000\b' or
+ $match =~ '\bf{10}601000\b') {
+ return 1;
+ }
+ }
- return 0;
+ return 0;
}
# True if argument potentially contains a kernel address.
sub may_leak_address
{
- my ($line) = @_;
- my $address = '\b(0x)?ffff[[:xdigit:]]{12}\b';
+ my ($line) = @_;
+ my $address_re;
- # Signal masks.
- if ($line =~ '^SigBlk:' or
- $line =~ '^SigCgt:') {
- return 0;
- }
+ # Signal masks.
+ if ($line =~ '^SigBlk:' or
+ $line =~ '^SigIgn:' or
+ $line =~ '^SigCgt:') {
+ return 0;
+ }
- if ($line =~ '\bKEY=[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b' or
- $line =~ '\b[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b') {
+ if ($line =~ '\bKEY=[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b' or
+ $line =~ '\b[[:xdigit:]]{14} [[:xdigit:]]{16} [[:xdigit:]]{16}\b') {
return 0;
- }
+ }
- while (/($address)/g) {
- if (!is_false_positive($1)) {
- return 1;
- }
- }
+ # One of these is guaranteed to be true.
+ if (is_x86_64()) {
+ $address_re = '\b(0x)?ffff[[:xdigit:]]{12}\b';
+ } elsif (is_ppc64()) {
+ $address_re = '\b(0x)?[89abcdef]00[[:xdigit:]]{13}\b';
+ }
- return 0;
+ while (/($address_re)/g) {
+ if (!is_false_positive($1)) {
+ return 1;
+ }
+ }
+
+ return 0;
}
sub parse_dmesg
@@ -246,6 +288,23 @@ sub skip_parse
return skip($path, \@skip_parse_files_abs, \@skip_parse_files_any);
}
+sub timed_parse_file
+{
+ my ($file) = @_;
+
+ eval {
+ local $SIG{ALRM} = sub { die "alarm\n" }; # NB: \n required.
+ alarm $TIMEOUT;
+ parse_file($file);
+ alarm 0;
+ };
+
+ if ($@) {
+ die unless $@ eq "alarm\n"; # Propagate unexpected errors.
+ printf STDERR "timed out parsing: %s\n", $file;
+ }
+}
+
sub parse_file
{
my ($file) = @_;
@@ -281,7 +340,6 @@ sub skip_walk
sub walk
{
my @dirs = @_;
- my %seen;
while (my $pwd = shift @dirs) {
next if (skip_walk($pwd));
@@ -298,8 +356,146 @@ sub walk
if (-d $path) {
push @dirs, $path;
} else {
- parse_file($path);
+ timed_parse_file($path);
}
}
}
}
+
+sub format_output
+{
+ my ($file) = @_;
+
+ # Default is to show raw results.
+ if ($raw or (!$squash_by_path and !$squash_by_filename)) {
+ dump_raw_output($file);
+ return;
+ }
+
+ my ($total, $dmesg, $paths, $files) = parse_raw_file($file);
+
+ printf "\nTotal number of results from scan (incl dmesg): %d\n", $total;
+
+ if (!$suppress_dmesg) {
+ print_dmesg($dmesg);
+ }
+
+ if ($squash_by_filename) {
+ squash_by($files, 'filename');
+ }
+
+ if ($squash_by_path) {
+ squash_by($paths, 'path');
+ }
+}
+
+sub dump_raw_output
+{
+ my ($file) = @_;
+
+ open (my $fh, '<', $file) or die "$0: $file: $!\n";
+ while (<$fh>) {
+ if ($suppress_dmesg) {
+ if ("dmesg:" eq substr($_, 0, 6)) {
+ next;
+ }
+ }
+ print $_;
+ }
+ close $fh;
+}
+
+sub parse_raw_file
+{
+ my ($file) = @_;
+
+ my $total = 0; # Total number of lines parsed.
+ my @dmesg; # dmesg output.
+ my %files; # Unique filenames containing leaks.
+ my %paths; # Unique paths containing leaks.
+
+ open (my $fh, '<', $file) or die "$0: $file: $!\n";
+ while (my $line = <$fh>) {
+ $total++;
+
+ if ("dmesg:" eq substr($line, 0, 6)) {
+ push @dmesg, $line;
+ next;
+ }
+
+ cache_path(\%paths, $line);
+ cache_filename(\%files, $line);
+ }
+
+ return $total, \@dmesg, \%paths, \%files;
+}
+
+sub print_dmesg
+{
+ my ($dmesg) = @_;
+
+ print "\ndmesg output:\n";
+
+ if (@$dmesg == 0) {
+ print "<no results>\n";
+ return;
+ }
+
+ foreach(@$dmesg) {
+ my $index = index($_, ': ');
+ $index += 2; # skid ': '
+ print substr($_, $index);
+ }
+}
+
+sub squash_by
+{
+ my ($ref, $desc) = @_;
+
+ print "\nResults squashed by $desc (excl dmesg). ";
+ print "Displaying [<number of results> <$desc>], <example result>\n";
+
+ if (keys %$ref == 0) {
+ print "<no results>\n";
+ return;
+ }
+
+ foreach(keys %$ref) {
+ my $lines = $ref->{$_};
+ my $length = @$lines;
+ printf "[%d %s] %s", $length, $_, @$lines[0];
+ }
+}
+
+sub cache_path
+{
+ my ($paths, $line) = @_;
+
+ my $index = index($line, ': ');
+ my $path = substr($line, 0, $index);
+
+ $index += 2; # skip ': '
+ add_to_cache($paths, $path, substr($line, $index));
+}
+
+sub cache_filename
+{
+ my ($files, $line) = @_;
+
+ my $index = index($line, ': ');
+ my $path = substr($line, 0, $index);
+ my $filename = basename($path);
+
+ $index += 2; # skip ': '
+ add_to_cache($files, $filename, substr($line, $index));
+}
+
+sub add_to_cache
+{
+ my ($cache, $key, $value) = @_;
+
+ if (!$cache->{$key}) {
+ $cache->{$key} = ();
+ }
+ push @{$cache->{$key}}, $value;
+}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 98314b400a95..f51cf977c65b 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1963,7 +1963,7 @@ static void read_symbols(char *modname)
}
license = get_modinfo(info.modinfo, info.modinfo_len, "license");
- if (info.modinfo && !license && !is_vmlinux(modname))
+ if (!license && !is_vmlinux(modname))
warn("modpost: missing MODULE_LICENSE() in %s\n"
"see include/linux/module.h for "
"more information\n", modname);
diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h
index 436b3a722357..f546707a2bbb 100644
--- a/security/apparmor/include/lib.h
+++ b/security/apparmor/include/lib.h
@@ -19,17 +19,6 @@
#include "match.h"
-/* Provide our own test for whether a write lock is held for asserts
- * this is because on none SMP systems write_can_lock will always
- * resolve to true, which is what you want for code making decisions
- * based on it, but wrong for asserts checking that the lock is held
- */
-#ifdef CONFIG_SMP
-#define write_is_locked(X) !write_can_lock(X)
-#else
-#define write_is_locked(X) (1)
-#endif /* CONFIG_SMP */
-
/*
* DEBUG remains global (no per profile flag) since it is mostly used in sysctl
* which is not related to profile accesses.
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index c5b99b954580..ad28e03a6f30 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -80,7 +80,7 @@ void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
AA_BUG(!orig);
AA_BUG(!new);
- AA_BUG(!write_is_locked(&labels_set(orig)->lock));
+ lockdep_assert_held_exclusive(&labels_set(orig)->lock);
tmp = rcu_dereference_protected(orig->proxy->label,
&labels_ns(orig)->lock);
@@ -571,7 +571,7 @@ static bool __label_remove(struct aa_label *label, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!label);
- AA_BUG(!write_is_locked(&ls->lock));
+ lockdep_assert_held_exclusive(&ls->lock);
if (new)
__aa_proxy_redirect(label, new);
@@ -608,7 +608,7 @@ static bool __label_replace(struct aa_label *old, struct aa_label *new)
AA_BUG(!ls);
AA_BUG(!old);
AA_BUG(!new);
- AA_BUG(!write_is_locked(&ls->lock));
+ lockdep_assert_held_exclusive(&ls->lock);
AA_BUG(new->flags & FLAG_IN_TREE);
if (!label_is_stale(old))
@@ -645,7 +645,7 @@ static struct aa_label *__label_insert(struct aa_labelset *ls,
AA_BUG(!ls);
AA_BUG(!label);
AA_BUG(labels_set(label) != ls);
- AA_BUG(!write_is_locked(&ls->lock));
+ lockdep_assert_held_exclusive(&ls->lock);
AA_BUG(label->flags & FLAG_IN_TREE);
/* Figure out where to put new node */
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 1346ee5be04f..17893fde4487 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -813,11 +813,11 @@ static const struct kernel_param_ops param_ops_aalockpolicy = {
.get = param_get_aalockpolicy
};
-static int param_set_audit(const char *val, struct kernel_param *kp);
-static int param_get_audit(char *buffer, struct kernel_param *kp);
+static int param_set_audit(const char *val, const struct kernel_param *kp);
+static int param_get_audit(char *buffer, const struct kernel_param *kp);
-static int param_set_mode(const char *val, struct kernel_param *kp);
-static int param_get_mode(char *buffer, struct kernel_param *kp);
+static int param_set_mode(const char *val, const struct kernel_param *kp);
+static int param_get_mode(char *buffer, const struct kernel_param *kp);
/* Flag values, also controllable via /sys/module/apparmor/parameters
* We define special types as we want to do additional mediation.
@@ -951,7 +951,7 @@ static int param_get_aauint(char *buffer, const struct kernel_param *kp)
return param_get_uint(buffer, kp);
}
-static int param_get_audit(char *buffer, struct kernel_param *kp)
+static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
@@ -960,7 +960,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp)
return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
}
-static int param_set_audit(const char *val, struct kernel_param *kp)
+static int param_set_audit(const char *val, const struct kernel_param *kp)
{
int i;
@@ -981,7 +981,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp)
return -EINVAL;
}
-static int param_get_mode(char *buffer, struct kernel_param *kp)
+static int param_get_mode(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
return -EINVAL;
@@ -991,7 +991,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
}
-static int param_set_mode(const char *val, struct kernel_param *kp)
+static int param_set_mode(const char *val, const struct kernel_param *kp)
{
int i;
diff --git a/security/commoncap.c b/security/commoncap.c
index fc46f5b85251..4f8e09340956 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -536,7 +536,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
bool *effective,
- bool *has_cap)
+ bool *has_fcap)
{
struct cred *new = bprm->cred;
unsigned i;
@@ -546,7 +546,7 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
*effective = true;
if (caps->magic_etc & VFS_CAP_REVISION_MASK)
- *has_cap = true;
+ *has_fcap = true;
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
@@ -653,7 +653,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
-static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
+static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_fcap)
{
int rc = 0;
struct cpu_vfs_cap_data vcaps;
@@ -684,7 +684,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_c
goto out;
}
- rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
+ rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_fcap);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
@@ -696,6 +696,115 @@ out:
return rc;
}
+static inline bool root_privileged(void) { return !issecure(SECURE_NOROOT); }
+
+static inline bool __is_real(kuid_t uid, struct cred *cred)
+{ return uid_eq(cred->uid, uid); }
+
+static inline bool __is_eff(kuid_t uid, struct cred *cred)
+{ return uid_eq(cred->euid, uid); }
+
+static inline bool __is_suid(kuid_t uid, struct cred *cred)
+{ return !__is_real(uid, cred) && __is_eff(uid, cred); }
+
+/*
+ * handle_privileged_root - Handle case of privileged root
+ * @bprm: The execution parameters, including the proposed creds
+ * @has_fcap: Are any file capabilities set?
+ * @effective: Do we have effective root privilege?
+ * @root_uid: This namespace' root UID WRT initial USER namespace
+ *
+ * Handle the case where root is privileged and hasn't been neutered by
+ * SECURE_NOROOT. If file capabilities are set, they won't be combined with
+ * set UID root and nothing is changed. If we are root, cap_permitted is
+ * updated. If we have become set UID root, the effective bit is set.
+ */
+static void handle_privileged_root(struct linux_binprm *bprm, bool has_fcap,
+ bool *effective, kuid_t root_uid)
+{
+ const struct cred *old = current_cred();
+ struct cred *new = bprm->cred;
+
+ if (!root_privileged())
+ return;
+ /*
+ * If the legacy file capability is set, then don't set privs
+ * for a setuid root binary run by a non-root user. Do set it
+ * for a root user just to cause least surprise to an admin.
+ */
+ if (has_fcap && __is_suid(root_uid, new)) {
+ warn_setuid_and_fcaps_mixed(bprm->filename);
+ return;
+ }
+ /*
+ * To support inheritance of root-permissions and suid-root
+ * executables under compatibility mode, we override the
+ * capability sets for the file.
+ */
+ if (__is_eff(root_uid, new) || __is_real(root_uid, new)) {
+ /* pP' = (cap_bset & ~0) | (pI & ~0) */
+ new->cap_permitted = cap_combine(old->cap_bset,
+ old->cap_inheritable);
+ }
+ /*
+ * If only the real uid is 0, we do not set the effective bit.
+ */
+ if (__is_eff(root_uid, new))
+ *effective = true;
+}
+
+#define __cap_gained(field, target, source) \
+ !cap_issubset(target->cap_##field, source->cap_##field)
+#define __cap_grew(target, source, cred) \
+ !cap_issubset(cred->cap_##target, cred->cap_##source)
+#define __cap_full(field, cred) \
+ cap_issubset(CAP_FULL_SET, cred->cap_##field)
+
+static inline bool __is_setuid(struct cred *new, const struct cred *old)
+{ return !uid_eq(new->euid, old->uid); }
+
+static inline bool __is_setgid(struct cred *new, const struct cred *old)
+{ return !gid_eq(new->egid, old->gid); }
+
+/*
+ * 1) Audit candidate if current->cap_effective is set
+ *
+ * We do not bother to audit if 3 things are true:
+ * 1) cap_effective has all caps
+ * 2) we became root *OR* are were already root
+ * 3) root is supposed to have all caps (SECURE_NOROOT)
+ * Since this is just a normal root execing a process.
+ *
+ * Number 1 above might fail if you don't have a full bset, but I think
+ * that is interesting information to audit.
+ *
+ * A number of other conditions require logging:
+ * 2) something prevented setuid root getting all caps
+ * 3) non-setuid root gets fcaps
+ * 4) non-setuid root gets ambient
+ */
+static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
+ kuid_t root, bool has_fcap)
+{
+ bool ret = false;
+
+ if ((__cap_grew(effective, ambient, new) &&
+ !(__cap_full(effective, new) &&
+ (__is_eff(root, new) || __is_real(root, new)) &&
+ root_privileged())) ||
+ (root_privileged() &&
+ __is_suid(root, new) &&
+ !__cap_full(effective, new)) ||
+ (!__is_setuid(new, old) &&
+ ((has_fcap &&
+ __cap_gained(permitted, new, old)) ||
+ __cap_gained(ambient, new, old))))
+
+ ret = true;
+
+ return ret;
+}
+
/**
* cap_bprm_set_creds - Set up the proposed credentials for execve().
* @bprm: The execution parameters, including the proposed creds
@@ -708,61 +817,33 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective, has_cap = false, is_setid;
+ bool effective = false, has_fcap = false, is_setid;
int ret;
kuid_t root_uid;
if (WARN_ON(!cap_ambient_invariant_ok(old)))
return -EPERM;
- effective = false;
- ret = get_file_caps(bprm, &effective, &has_cap);
+ ret = get_file_caps(bprm, &effective, &has_fcap);
if (ret < 0)
return ret;
root_uid = make_kuid(new->user_ns, 0);
- if (!issecure(SECURE_NOROOT)) {
- /*
- * If the legacy file capability is set, then don't set privs
- * for a setuid root binary run by a non-root user. Do set it
- * for a root user just to cause least surprise to an admin.
- */
- if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) {
- warn_setuid_and_fcaps_mixed(bprm->filename);
- goto skip;
- }
- /*
- * To support inheritance of root-permissions and suid-root
- * executables under compatibility mode, we override the
- * capability sets for the file.
- *
- * If only the real uid is 0, we do not set the effective bit.
- */
- if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) {
- /* pP' = (cap_bset & ~0) | (pI & ~0) */
- new->cap_permitted = cap_combine(old->cap_bset,
- old->cap_inheritable);
- }
- if (uid_eq(new->euid, root_uid))
- effective = true;
- }
-skip:
+ handle_privileged_root(bprm, has_fcap, &effective, root_uid);
/* if we have fs caps, clear dangerous personality flags */
- if (!cap_issubset(new->cap_permitted, old->cap_permitted))
+ if (__cap_gained(permitted, new, old))
bprm->per_clear |= PER_CLEAR_ON_SETID;
-
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
* credentials unless they have the appropriate permit.
*
* In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
- is_setid = !uid_eq(new->euid, old->uid) || !gid_eq(new->egid, old->gid);
+ is_setid = __is_setuid(new, old) || __is_setgid(new, old);
- if ((is_setid ||
- !cap_issubset(new->cap_permitted, old->cap_permitted)) &&
+ if ((is_setid || __cap_gained(permitted, new, old)) &&
((bprm->unsafe & ~LSM_UNSAFE_PTRACE) ||
!ptracer_capable(current, new->user_ns))) {
/* downgrade; they get no more than they had, and maybe less */
@@ -779,7 +860,7 @@ skip:
new->sgid = new->fsgid = new->egid;
/* File caps or setid cancels ambient. */
- if (has_cap || is_setid)
+ if (has_fcap || is_setid)
cap_clear(new->cap_ambient);
/*
@@ -800,26 +881,10 @@ skip:
if (WARN_ON(!cap_ambient_invariant_ok(new)))
return -EPERM;
- /*
- * Audit candidate if current->cap_effective is set
- *
- * We do not bother to audit if 3 things are true:
- * 1) cap_effective has all caps
- * 2) we are root
- * 3) root is supposed to have all caps (SECURE_NOROOT)
- * Since this is just a normal root execing a process.
- *
- * Number 1 above might fail if you don't have a full bset, but I think
- * that is interesting information to audit.
- */
- if (!cap_issubset(new->cap_effective, new->cap_ambient)) {
- if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
- !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
- issecure(SECURE_NOROOT)) {
- ret = audit_log_bprm_fcaps(bprm, new, old);
- if (ret < 0)
- return ret;
- }
+ if (nonroot_raised_pE(new, old, root_uid, has_fcap)) {
+ ret = audit_log_bprm_fcaps(bprm, new, old);
+ if (ret < 0)
+ return ret;
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
@@ -829,13 +894,11 @@ skip:
/* Check for privilege-elevated exec. */
bprm->cap_elevated = 0;
- if (is_setid) {
+ if (is_setid ||
+ (!__is_real(root_uid, new) &&
+ (effective ||
+ __cap_grew(permitted, ambient, new))))
bprm->cap_elevated = 1;
- } else if (!uid_eq(new->uid, root_uid)) {
- if (effective ||
- !cap_issubset(new->cap_permitted, new->cap_ambient))
- bprm->cap_elevated = 1;
- }
return 0;
}
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 06554c448dce..6f9e4ce568cd 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -112,21 +112,25 @@ int __init integrity_init_keyring(const unsigned int id)
int __init integrity_load_x509(const unsigned int id, const char *path)
{
key_ref_t key;
- char *data;
+ void *data;
+ loff_t size;
int rc;
if (!keyring[id])
return -EINVAL;
- rc = integrity_read_file(path, &data);
- if (rc < 0)
+ rc = kernel_read_file_from_path(path, &data, &size, 0,
+ READING_X509_CERTIFICATE);
+ if (rc < 0) {
+ pr_err("Unable to open file: %s (%d)", path, rc);
return rc;
+ }
key = key_create_or_update(make_key_ref(keyring[id], 1),
"asymmetric",
NULL,
data,
- rc,
+ size,
((KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ),
KEY_ALLOC_NOT_IN_QUOTA);
@@ -139,6 +143,6 @@ int __init integrity_load_x509(const unsigned int id, const char *path)
key_ref_to_ptr(key)->description, path);
key_ref_put(key);
}
- kfree(data);
+ vfree(data);
return 0;
}
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index f5f12727771a..241aca315b0c 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -23,6 +23,9 @@
#define EVM_INIT_HMAC 0x0001
#define EVM_INIT_X509 0x0002
+#define EVM_SETUP 0x80000000 /* userland has signaled key load */
+
+#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP)
extern int evm_initialized;
extern char *evm_hmac;
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 1d32cd20009a..bcd64baf8788 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -80,7 +80,7 @@ static struct shash_desc *init_desc(char type)
if (type == EVM_XATTR_HMAC) {
if (!(evm_initialized & EVM_INIT_HMAC)) {
- pr_err("HMAC key is not set\n");
+ pr_err_once("HMAC key is not set\n");
return ERR_PTR(-ENOKEY);
}
tfm = &hmac_tfm;
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 063d38aef64e..9826c02e2db8 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -49,6 +49,9 @@ char *evm_config_xattrnames[] = {
XATTR_NAME_SMACKMMAP,
#endif
#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+ XATTR_NAME_APPARMOR,
+#endif
#ifdef CONFIG_IMA_APPRAISE
XATTR_NAME_IMA,
#endif
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index c8dccd54d501..319cf16d6603 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -40,7 +40,7 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
if (*ppos != 0)
return 0;
- sprintf(temp, "%d", evm_initialized);
+ sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP));
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
@@ -61,24 +61,29 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
static ssize_t evm_write_key(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char temp[80];
- int i;
+ int i, ret;
- if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_INIT_HMAC))
+ if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP))
return -EPERM;
- if (count >= sizeof(temp) || count == 0)
- return -EINVAL;
-
- if (copy_from_user(temp, buf, count) != 0)
- return -EFAULT;
+ ret = kstrtoint_from_user(buf, count, 0, &i);
- temp[count] = '\0';
+ if (ret)
+ return ret;
- if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
+ /* Reject invalid values */
+ if (!i || (i & ~EVM_INIT_MASK) != 0)
return -EINVAL;
- evm_init_key();
+ if (i & EVM_INIT_HMAC) {
+ ret = evm_init_key();
+ if (ret != 0)
+ return ret;
+ /* Forbid further writes after the symmetric key is loaded */
+ i |= EVM_SETUP;
+ }
+
+ evm_initialized |= i;
return count;
}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 6fc888ca468e..c84e05866052 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -200,55 +200,6 @@ int integrity_kernel_read(struct file *file, loff_t offset,
}
/*
- * integrity_read_file - read entire file content into the buffer
- *
- * This is function opens a file, allocates the buffer of required
- * size, read entire file content to the buffer and closes the file
- *
- * It is used only by init code.
- *
- */
-int __init integrity_read_file(const char *path, char **data)
-{
- struct file *file;
- loff_t size;
- char *buf;
- int rc = -EINVAL;
-
- if (!path || !*path)
- return -EINVAL;
-
- file = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- pr_err("Unable to open file: %s (%d)", path, rc);
- return rc;
- }
-
- size = i_size_read(file_inode(file));
- if (size <= 0)
- goto out;
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = integrity_kernel_read(file, 0, buf, size);
- if (rc == size) {
- *data = buf;
- } else {
- kfree(buf);
- if (rc >= 0)
- rc = -EIO;
- }
-out:
- fput(file);
- return rc;
-}
-
-/*
* integrity_load_keys - load integrity keys hook
*
* Hooks is called from init/main.c:kernel_init_freeable()
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index c2edba8de35e..c7e8db0ea4c0 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -199,42 +199,59 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
struct inode *inode = file_inode(file);
const char *filename = file->f_path.dentry->d_name.name;
int result = 0;
+ int length;
+ void *tmpbuf;
+ u64 i_version;
struct {
struct ima_digest_data hdr;
char digest[IMA_MAX_DIGEST_SIZE];
} hash;
- if (!(iint->flags & IMA_COLLECTED)) {
- u64 i_version = file_inode(file)->i_version;
+ if (iint->flags & IMA_COLLECTED)
+ goto out;
- if (file->f_flags & O_DIRECT) {
- audit_cause = "failed(directio)";
- result = -EACCES;
- goto out;
- }
+ /*
+ * Dectecting file change is based on i_version. On filesystems
+ * which do not support i_version, support is limited to an initial
+ * measurement/appraisal/audit.
+ */
+ i_version = file_inode(file)->i_version;
+ hash.hdr.algo = algo;
- hash.hdr.algo = algo;
-
- result = (!buf) ? ima_calc_file_hash(file, &hash.hdr) :
- ima_calc_buffer_hash(buf, size, &hash.hdr);
- if (!result) {
- int length = sizeof(hash.hdr) + hash.hdr.length;
- void *tmpbuf = krealloc(iint->ima_hash, length,
- GFP_NOFS);
- if (tmpbuf) {
- iint->ima_hash = tmpbuf;
- memcpy(iint->ima_hash, &hash, length);
- iint->version = i_version;
- iint->flags |= IMA_COLLECTED;
- } else
- result = -ENOMEM;
- }
+ /* Initialize hash digest to 0's in case of failure */
+ memset(&hash.digest, 0, sizeof(hash.digest));
+
+ if (buf)
+ result = ima_calc_buffer_hash(buf, size, &hash.hdr);
+ else
+ result = ima_calc_file_hash(file, &hash.hdr);
+
+ if (result && result != -EBADF && result != -EINVAL)
+ goto out;
+
+ length = sizeof(hash.hdr) + hash.hdr.length;
+ tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
+ if (!tmpbuf) {
+ result = -ENOMEM;
+ goto out;
}
+
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+ iint->flags |= IMA_COLLECTED;
out:
- if (result)
+ if (result) {
+ if (file->f_flags & O_DIRECT)
+ audit_cause = "failed(directio)";
+
integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
filename, "collect_data", audit_cause,
result, 0);
+ }
return result;
}
@@ -278,7 +295,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
}
result = ima_store_template(entry, violation, inode, filename, pcr);
- if (!result || result == -EEXIST) {
+ if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) {
iint->flags |= IMA_MEASURED;
iint->measured_pcrs |= (0x1 << pcr);
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 809ba70fbbbf..ec7dfa02c051 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -40,7 +40,7 @@ __setup("ima_appraise=", default_appraise_setup);
*/
bool is_ima_appraise_enabled(void)
{
- return (ima_appraise & IMA_APPRAISE_ENFORCE) ? 1 : 0;
+ return ima_appraise & IMA_APPRAISE_ENFORCE;
}
/*
@@ -405,7 +405,7 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
return -EINVAL;
ima_reset_appraise_flags(d_backing_inode(dentry),
- (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
+ xvalue->type == EVM_IMA_XATTR_DIGSIG);
result = 0;
}
return result;
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 802d5d20f36f..9057b163c378 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -27,11 +27,6 @@
#include "ima.h"
-struct ahash_completion {
- struct completion completion;
- int err;
-};
-
/* minimum file size for ahash use */
static unsigned long ima_ahash_minsize;
module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
@@ -196,30 +191,13 @@ static void ima_free_atfm(struct crypto_ahash *tfm)
crypto_free_ahash(tfm);
}
-static void ahash_complete(struct crypto_async_request *req, int err)
+static inline int ahash_wait(int err, struct crypto_wait *wait)
{
- struct ahash_completion *res = req->data;
- if (err == -EINPROGRESS)
- return;
- res->err = err;
- complete(&res->completion);
-}
+ err = crypto_wait_req(err, wait);
-static int ahash_wait(int err, struct ahash_completion *res)
-{
- switch (err) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&res->completion);
- reinit_completion(&res->completion);
- err = res->err;
- /* fall through */
- default:
+ if (err)
pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
- }
return err;
}
@@ -233,7 +211,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
struct ahash_request *req;
struct scatterlist sg[1];
- struct ahash_completion res;
+ struct crypto_wait wait;
size_t rbuf_size[2];
hash->length = crypto_ahash_digestsize(tfm);
@@ -242,12 +220,12 @@ static int ima_calc_file_hash_atfm(struct file *file,
if (!req)
return -ENOMEM;
- init_completion(&res.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- ahash_complete, &res);
+ crypto_req_done, &wait);
- rc = ahash_wait(crypto_ahash_init(req), &res);
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
if (rc)
goto out1;
@@ -288,7 +266,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
* read/request, wait for the completion of the
* previous ahash_update() request.
*/
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
if (rc)
goto out3;
}
@@ -304,7 +282,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
* read/request, wait for the completion of the
* previous ahash_update() request.
*/
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
if (rc)
goto out3;
}
@@ -318,7 +296,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
active = !active; /* swap buffers, if we use two */
}
/* wait for the last update request to complete */
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
out3:
if (read)
file->f_mode &= ~FMODE_READ;
@@ -327,7 +305,7 @@ out3:
out2:
if (!rc) {
ahash_request_set_crypt(req, NULL, hash->digest, 0);
- rc = ahash_wait(crypto_ahash_final(req), &res);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
}
out1:
ahash_request_free(req);
@@ -441,6 +419,16 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
loff_t i_size;
int rc;
+ /*
+ * For consistency, fail file's opened with the O_DIRECT flag on
+ * filesystems mounted with/without DAX option.
+ */
+ if (file->f_flags & O_DIRECT) {
+ hash->length = hash_digest_size[ima_hash_algo];
+ hash->algo = ima_hash_algo;
+ return -EINVAL;
+ }
+
i_size = i_size_read(file_inode(file));
if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
@@ -527,7 +515,7 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
{
struct ahash_request *req;
struct scatterlist sg;
- struct ahash_completion res;
+ struct crypto_wait wait;
int rc, ahash_rc = 0;
hash->length = crypto_ahash_digestsize(tfm);
@@ -536,12 +524,12 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
if (!req)
return -ENOMEM;
- init_completion(&res.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- ahash_complete, &res);
+ crypto_req_done, &wait);
- rc = ahash_wait(crypto_ahash_init(req), &res);
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
if (rc)
goto out;
@@ -551,10 +539,10 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
ahash_rc = crypto_ahash_update(req);
/* wait for the update request to complete */
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
if (!rc) {
ahash_request_set_crypt(req, NULL, hash->digest, 0);
- rc = ahash_wait(crypto_ahash_final(req), &res);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
}
out:
ahash_request_free(req);
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ad491c51e833..fa540c0469da 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -32,7 +32,7 @@ bool ima_canonical_fmt;
static int __init default_canonical_fmt_setup(char *str)
{
#ifdef __BIG_ENDIAN
- ima_canonical_fmt = 1;
+ ima_canonical_fmt = true;
#endif
return 1;
}
@@ -429,10 +429,10 @@ static int ima_release_policy(struct inode *inode, struct file *file)
}
ima_update_policy();
-#ifndef CONFIG_IMA_WRITE_POLICY
+#if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY)
securityfs_remove(ima_policy);
ima_policy = NULL;
-#else
+#elif defined(CONFIG_IMA_WRITE_POLICY)
clear_bit(IMA_FS_BUSY, &ima_fs_flags);
#endif
return 0;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 2aebb7984437..770654694efc 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -51,6 +51,8 @@ static int __init hash_setup(char *str)
ima_hash_algo = HASH_ALGO_SHA1;
else if (strncmp(str, "md5", 3) == 0)
ima_hash_algo = HASH_ALGO_MD5;
+ else
+ return 1;
goto out;
}
@@ -60,6 +62,8 @@ static int __init hash_setup(char *str)
break;
}
}
+ if (i == HASH_ALGO__LAST)
+ return 1;
out:
hash_setup_done = 1;
return 1;
@@ -235,11 +239,8 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
- if (rc != 0) {
- if (file->f_flags & O_DIRECT)
- rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
+ if (rc != 0 && rc != -EBADF && rc != -EINVAL)
goto out_digsig;
- }
if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
pathname = ima_d_path(&file->f_path, &pathbuf, filename);
@@ -247,12 +248,14 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
xattr_value, xattr_len, pcr);
- if (action & IMA_APPRAISE_SUBMASK)
+ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK))
rc = ima_appraise_measurement(func, iint, file, pathname,
xattr_value, xattr_len, opened);
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
+ if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
+ rc = 0;
out_digsig:
if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) &&
!(iint->flags & IMA_NEW_FILE))
@@ -359,12 +362,12 @@ void ima_post_path_mknod(struct dentry *dentry)
*/
int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
{
+ bool sig_enforce = is_module_sig_enforced();
+
if (!file && read_id == READING_MODULE) {
-#ifndef CONFIG_MODULE_SIG_FORCE
- if ((ima_appraise & IMA_APPRAISE_MODULES) &&
+ if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES) &&
(ima_appraise & IMA_APPRAISE_ENFORCE))
return -EACCES; /* INTEGRITY_UNKNOWN */
-#endif
return 0; /* We rely on module signature checking */
}
return 0;
@@ -406,6 +409,10 @@ int ima_post_read_file(struct file *file, void *buf, loff_t size,
if (!file && read_id == READING_MODULE) /* MODULE_SIG_FORCE enabled */
return 0;
+ /* permit signed certs */
+ if (!file && read_id == READING_X509_CERTIFICATE)
+ return 0;
+
if (!file || !buf || size == 0) { /* should never happen */
if (ima_appraise & IMA_APPRAISE_ENFORCE)
return -EACCES;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 95209a5f8595..ee4613fa5840 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -196,9 +196,9 @@ static int __init policy_setup(char *str)
if ((strcmp(p, "tcb") == 0) && !ima_policy)
ima_policy = DEFAULT_TCB;
else if (strcmp(p, "appraise_tcb") == 0)
- ima_use_appraise_tcb = 1;
+ ima_use_appraise_tcb = true;
else if (strcmp(p, "secure_boot") == 0)
- ima_use_secure_boot = 1;
+ ima_use_secure_boot = true;
}
return 1;
@@ -207,7 +207,7 @@ __setup("ima_policy=", policy_setup);
static int __init default_appraise_policy_setup(char *str)
{
- ima_use_appraise_tcb = 1;
+ ima_use_appraise_tcb = true;
return 1;
}
__setup("ima_appraise_tcb", default_appraise_policy_setup);
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index a53e7e4ab06c..e1bf040fb110 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -120,8 +120,6 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
int integrity_kernel_read(struct file *file, loff_t offset,
void *addr, unsigned long count);
-int __init integrity_read_file(const char *path, char **data);
-
#define INTEGRITY_KEYRING_EVM 0
#define INTEGRITY_KEYRING_IMA 1
#define INTEGRITY_KEYRING_MODULE 2
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2e3a627fc0b1..8644d864e3c1 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2935,13 +2935,12 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
{
const struct task_security_struct *tsec = current_security();
struct superblock_security_struct *sbsec;
- u32 sid, newsid, clen;
+ u32 newsid, clen;
int rc;
char *context;
sbsec = dir->i_sb->s_security;
- sid = tsec->sid;
newsid = tsec->create_sid;
rc = selinux_determine_inode_label(current_security(),
@@ -3141,27 +3140,6 @@ static int selinux_inode_getattr(const struct path *path)
return path_has_perm(current_cred(), path, FILE__GETATTR);
}
-static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
-{
- const struct cred *cred = current_cred();
-
- if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof XATTR_SECURITY_PREFIX - 1)) {
- if (!strcmp(name, XATTR_NAME_CAPS)) {
- if (!capable(CAP_SETFCAP))
- return -EPERM;
- } else if (!capable(CAP_SYS_ADMIN)) {
- /* A different attribute in the security namespace.
- Restrict to administrator. */
- return -EPERM;
- }
- }
-
- /* Not an attribute we recognize, so just check the
- ordinary setattr permission. */
- return dentry_has_perm(cred, dentry, FILE__SETATTR);
-}
-
static bool has_cap_mac_admin(bool audit)
{
const struct cred *cred = current_cred();
@@ -3184,8 +3162,15 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
u32 newsid, sid = current_sid();
int rc = 0;
- if (strcmp(name, XATTR_NAME_SELINUX))
- return selinux_inode_setotherxattr(dentry, name);
+ if (strcmp(name, XATTR_NAME_SELINUX)) {
+ rc = cap_inode_setxattr(dentry, name, value, size, flags);
+ if (rc)
+ return rc;
+
+ /* Not an attribute we recognize, so just check the
+ ordinary setattr permission. */
+ return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
+ }
sbsec = inode->i_sb->s_security;
if (!(sbsec->flags & SBLABEL_MNT))
@@ -3208,18 +3193,17 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
if (!has_cap_mac_admin(true)) {
struct audit_buffer *ab;
size_t audit_size;
- const char *str;
/* We strip a nul only if it is at the end, otherwise the
* context contains a nul and we should audit that */
if (value) {
- str = value;
+ const char *str = value;
+
if (str[size - 1] == '\0')
audit_size = size - 1;
else
audit_size = size;
} else {
- str = "";
audit_size = 0;
}
ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
@@ -3299,8 +3283,15 @@ static int selinux_inode_listxattr(struct dentry *dentry)
static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
{
- if (strcmp(name, XATTR_NAME_SELINUX))
- return selinux_inode_setotherxattr(dentry, name);
+ if (strcmp(name, XATTR_NAME_SELINUX)) {
+ int rc = cap_inode_removexattr(dentry, name);
+ if (rc)
+ return rc;
+
+ /* Not an attribute we recognize, so just check the
+ ordinary setattr permission. */
+ return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
+ }
/* No one is allowed to remove a SELinux security label.
You can change the label, but all data must be labeled. */
@@ -3995,8 +3986,8 @@ static int selinux_task_getioprio(struct task_struct *p)
PROCESS__GETSCHED, NULL);
}
-int selinux_task_prlimit(const struct cred *cred, const struct cred *tcred,
- unsigned int flags)
+static int selinux_task_prlimit(const struct cred *cred, const struct cred *tcred,
+ unsigned int flags)
{
u32 av = 0;
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 771c96afe1d5..c91543a617ac 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -361,7 +361,6 @@ static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list *
*ret_list = NULL;
- len = 0;
rc = next_entry(buf, fp, sizeof(u32));
if (rc)
return rc;
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 6bd6dcd954fa..fe25b3fb2154 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -10,6 +10,8 @@
#include <linux/sched.h>
#include "hashtab.h"
+static struct kmem_cache *hashtab_node_cachep;
+
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
u32 size)
@@ -58,7 +60,7 @@ int hashtab_insert(struct hashtab *h, void *key, void *datum)
if (cur && (h->keycmp(h, key, cur->key) == 0))
return -EEXIST;
- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ newnode = kmem_cache_zalloc(hashtab_node_cachep, GFP_KERNEL);
if (!newnode)
return -ENOMEM;
newnode->key = key;
@@ -107,7 +109,7 @@ void hashtab_destroy(struct hashtab *h)
while (cur) {
temp = cur;
cur = cur->next;
- kfree(temp);
+ kmem_cache_free(hashtab_node_cachep, temp);
}
h->htable[i] = NULL;
}
@@ -149,7 +151,7 @@ void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
slots_used = 0;
max_chain_len = 0;
- for (slots_used = max_chain_len = i = 0; i < h->size; i++) {
+ for (i = 0; i < h->size; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
@@ -167,3 +169,14 @@ void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
info->slots_used = slots_used;
info->max_chain_len = max_chain_len;
}
+void hashtab_cache_init(void)
+{
+ hashtab_node_cachep = kmem_cache_create("hashtab_node",
+ sizeof(struct hashtab_node),
+ 0, SLAB_PANIC, NULL);
+}
+
+void hashtab_cache_destroy(void)
+{
+ kmem_cache_destroy(hashtab_node_cachep);
+}
diff --git a/security/selinux/ss/hashtab.h b/security/selinux/ss/hashtab.h
index 3e3e42bfd150..6183ee2a2e7a 100644
--- a/security/selinux/ss/hashtab.h
+++ b/security/selinux/ss/hashtab.h
@@ -85,4 +85,8 @@ int hashtab_map(struct hashtab *h,
/* Fill info with some hash table statistics */
void hashtab_stat(struct hashtab *h, struct hashtab_info *info);
+/* Use kmem_cache for hashtab_node */
+void hashtab_cache_init(void);
+void hashtab_cache_destroy(void);
+
#endif /* _SS_HASHTAB_H */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index e4a1c0dc561a..33cfe5d3d6cb 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2060,10 +2060,12 @@ int security_load_policy(void *data, size_t len)
if (!ss_initialized) {
avtab_cache_init();
ebitmap_cache_init();
+ hashtab_cache_init();
rc = policydb_read(&policydb, fp);
if (rc) {
avtab_cache_destroy();
ebitmap_cache_destroy();
+ hashtab_cache_destroy();
goto out;
}
@@ -2075,6 +2077,7 @@ int security_load_policy(void *data, size_t len)
policydb_destroy(&policydb);
avtab_cache_destroy();
ebitmap_cache_destroy();
+ hashtab_cache_destroy();
goto out;
}
@@ -2083,6 +2086,7 @@ int security_load_policy(void *data, size_t len)
policydb_destroy(&policydb);
avtab_cache_destroy();
ebitmap_cache_destroy();
+ hashtab_cache_destroy();
goto out;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 286171a16ed2..14cc7940b36d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4600,6 +4600,82 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
return 0;
}
+static int smack_inode_copy_up(struct dentry *dentry, struct cred **new)
+{
+
+ struct task_smack *tsp;
+ struct smack_known *skp;
+ struct inode_smack *isp;
+ struct cred *new_creds = *new;
+
+ if (new_creds == NULL) {
+ new_creds = prepare_creds();
+ if (new_creds == NULL)
+ return -ENOMEM;
+ }
+
+ tsp = new_creds->security;
+
+ /*
+ * Get label from overlay inode and set it in create_sid
+ */
+ isp = d_inode(dentry->d_parent)->i_security;
+ skp = isp->smk_inode;
+ tsp->smk_task = skp;
+ *new = new_creds;
+ return 0;
+}
+
+static int smack_inode_copy_up_xattr(const char *name)
+{
+ /*
+ * Return 1 if this is the smack access Smack attribute.
+ */
+ if (strcmp(name, XATTR_NAME_SMACK) == 0)
+ return 1;
+
+ return -EOPNOTSUPP;
+}
+
+static int smack_dentry_create_files_as(struct dentry *dentry, int mode,
+ struct qstr *name,
+ const struct cred *old,
+ struct cred *new)
+{
+ struct task_smack *otsp = old->security;
+ struct task_smack *ntsp = new->security;
+ struct inode_smack *isp;
+ int may;
+
+ /*
+ * Use the process credential unless all of
+ * the transmuting criteria are met
+ */
+ ntsp->smk_task = otsp->smk_task;
+
+ /*
+ * the attribute of the containing directory
+ */
+ isp = d_inode(dentry->d_parent)->i_security;
+
+ if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
+ rcu_read_lock();
+ may = smk_access_entry(otsp->smk_task->smk_known,
+ isp->smk_inode->smk_known,
+ &otsp->smk_task->smk_rules);
+ rcu_read_unlock();
+
+ /*
+ * If the directory is transmuting and the rule
+ * providing access is transmuting use the containing
+ * directory label instead of the process label.
+ */
+ if (may > 0 && (may & MAY_TRANSMUTE))
+ ntsp->smk_task = isp->smk_inode;
+ }
+ return 0;
+}
+
static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
@@ -4735,6 +4811,9 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(inode_notifysecctx, smack_inode_notifysecctx),
LSM_HOOK_INIT(inode_setsecctx, smack_inode_setsecctx),
LSM_HOOK_INIT(inode_getsecctx, smack_inode_getsecctx),
+ LSM_HOOK_INIT(inode_copy_up, smack_inode_copy_up),
+ LSM_HOOK_INIT(inode_copy_up_xattr, smack_inode_copy_up_xattr),
+ LSM_HOOK_INIT(dentry_create_files_as, smack_dentry_create_files_as),
};
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index d330b060dcff..0f73fe30e37a 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -157,7 +157,7 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
if (!buffer)
return NULL;
- tomoyo_convert_time(get_seconds(), &stamp);
+ tomoyo_convert_time(ktime_get_real_seconds(), &stamp);
pos = snprintf(buffer, tomoyo_buffer_len - 1,
"#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s "
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 21691b99e61f..25eed4b0b0e8 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2257,7 +2257,7 @@ static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = {
/* Timestamp counter for last updated. */
static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
/* Counter for number of updates. */
-static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
+static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
/**
* tomoyo_update_stat - Update statistic counters.
@@ -2272,7 +2272,7 @@ void tomoyo_update_stat(const u8 index)
* I don't use atomic operations because race condition is not fatal.
*/
tomoyo_stat_updated[index]++;
- tomoyo_stat_modified[index] = get_seconds();
+ tomoyo_stat_modified[index] = ktime_get_real_seconds();
}
/**
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index e4097d7994b1..7adccdd8e36d 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -1037,7 +1037,7 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
bool (*check_entry) (struct tomoyo_request_info *,
const struct tomoyo_acl_info *));
void tomoyo_check_profile(void);
-void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp);
+void tomoyo_convert_time(time64_t time, struct tomoyo_time *stamp);
void tomoyo_del_condition(struct list_head *element);
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 580b318910f1..d3d9d9f1edb0 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -87,38 +87,17 @@ const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
* @stamp: Pointer to "struct tomoyo_time".
*
* Returns nothing.
- *
- * This function does not handle Y2038 problem.
*/
-void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp)
+void tomoyo_convert_time(time64_t time64, struct tomoyo_time *stamp)
{
- static const u16 tomoyo_eom[2][12] = {
- { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
- { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
- };
- u16 y;
- u8 m;
- bool r;
- stamp->sec = time % 60;
- time /= 60;
- stamp->min = time % 60;
- time /= 60;
- stamp->hour = time % 24;
- time /= 24;
- for (y = 1970; ; y++) {
- const unsigned short days = (y & 3) ? 365 : 366;
- if (time < days)
- break;
- time -= days;
- }
- r = (y & 3) == 0;
- for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++)
- ;
- if (m)
- time -= tomoyo_eom[r][m - 1];
- stamp->year = y;
- stamp->month = ++m;
- stamp->day = ++time;
+ struct tm tm;
+ time64_to_tm(time64, 0, &tm);
+ stamp->sec = tm.tm_sec;
+ stamp->min = tm.tm_min;
+ stamp->hour = tm.tm_hour;
+ stamp->day = tm.tm_mday;
+ stamp->month = tm.tm_mon + 1;
+ stamp->year = tm.tm_year + 1900;
}
/**
diff --git a/sound/Kconfig b/sound/Kconfig
index d7d2aac9542e..6833db9002ec 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -3,25 +3,7 @@ menuconfig SOUND
depends on HAS_IOMEM
help
If you have a sound card in your computer, i.e. if it can say more
- than an occasional beep, say Y. Be sure to have all the information
- about your sound card and its configuration down (I/O port,
- interrupt and DMA channel), because you will be asked for it.
-
- You want to read the Sound-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. General information about
- the modular sound system is contained in the files
- <file:Documentation/sound/oss/Introduction>. The file
- <file:Documentation/sound/oss/README.OSS> contains some slightly
- outdated but still useful information as well. Newer sound
- driver documentation is found in <file:Documentation/sound/alsa/*>.
-
- If you have a PnP sound card and you want to configure it at boot
- time using the ISA PnP tools (read
- <http://www.roestock.demon.co.uk/isapnptools/>), then you need to
- compile the sound card support as a module and load that module
- after the PnP configuration is finished. To do this, choose M here
- and read <file:Documentation/sound/oss/README.modules>; the module
- will be called soundcore.
+ than an occasional beep, say Y.
if SOUND
@@ -80,6 +62,8 @@ source "sound/hda/Kconfig"
source "sound/ppc/Kconfig"
+source "sound/ac97/Kconfig"
+
source "sound/aoa/Kconfig"
source "sound/arm/Kconfig"
@@ -114,19 +98,6 @@ source "sound/synth/Kconfig"
endif # SND
-menuconfig SOUND_PRIME
- tristate "Open Sound System (DEPRECATED)"
- select SOUND_OSS_CORE
- depends on BROKEN
- help
- Say 'Y' or 'M' to enable Open Sound System drivers.
-
-if SOUND_PRIME
-
-source "sound/oss/Kconfig"
-
-endif # SOUND_PRIME
-
endif # !UML
endif # SOUND
diff --git a/sound/Makefile b/sound/Makefile
index f2d1d093bcdc..99d8c31262c8 100644
--- a/sound/Makefile
+++ b/sound/Makefile
@@ -3,14 +3,14 @@
#
obj-$(CONFIG_SOUND) += soundcore.o
-obj-$(CONFIG_SOUND_PRIME) += oss/
-obj-$(CONFIG_DMASOUND) += oss/
+obj-$(CONFIG_DMASOUND) += oss/dmasound/
obj-$(CONFIG_SND) += core/ i2c/ drivers/ isa/ pci/ ppc/ arm/ sh/ synth/ usb/ \
firewire/ sparc/ spi/ parisc/ pcmcia/ mips/ soc/ atmel/ hda/ x86/
obj-$(CONFIG_SND_AOA) += aoa/
# This one must be compilable even if sound is configured out
obj-$(CONFIG_AC97_BUS) += ac97_bus.o
+obj-$(CONFIG_AC97_BUS_NEW) += ac97/
ifeq ($(CONFIG_SND),y)
obj-y += last.o
diff --git a/sound/ac97/Kconfig b/sound/ac97/Kconfig
new file mode 100644
index 000000000000..f8a64e15e5bf
--- /dev/null
+++ b/sound/ac97/Kconfig
@@ -0,0 +1,19 @@
+#
+# AC97 configuration
+#
+
+
+config AC97_BUS_NEW
+ tristate
+ select AC97
+ help
+ This is the new AC97 bus type, successor of AC97_BUS. The ported
+ drivers which benefit from the AC97 automatic probing should "select"
+ this instead of the AC97_BUS.
+ Say Y here if you want to have AC97 devices, which are sound oriented
+ devices around an AC-Link.
+
+config AC97_BUS_COMPAT
+ bool
+ depends on AC97_BUS_NEW
+ depends on !AC97_BUS
diff --git a/sound/ac97/Makefile b/sound/ac97/Makefile
new file mode 100644
index 000000000000..f9c2640bfb59
--- /dev/null
+++ b/sound/ac97/Makefile
@@ -0,0 +1,8 @@
+#
+# make for AC97 bus drivers
+#
+
+obj-$(CONFIG_AC97_BUS_NEW) += ac97.o
+
+ac97-y += bus.o codec.o
+ac97-$(CONFIG_AC97_BUS_COMPAT) += snd_ac97_compat.o
diff --git a/sound/ac97/ac97_core.h b/sound/ac97/ac97_core.h
new file mode 100644
index 000000000000..08441a4fda7c
--- /dev/null
+++ b/sound/ac97/ac97_core.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+unsigned int snd_ac97_bus_scan_one(struct ac97_controller *ac97,
+ unsigned int codec_num);
+
+static inline bool ac97_ids_match(unsigned int id1, unsigned int id2,
+ unsigned int mask)
+{
+ return (id1 & mask) == (id2 & mask);
+}
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
new file mode 100644
index 000000000000..31f858eceffc
--- /dev/null
+++ b/sound/ac97/bus.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/controller.h>
+#include <sound/ac97/regs.h>
+
+#include "ac97_core.h"
+
+/*
+ * Protects ac97_controllers and each ac97_controller structure.
+ */
+static DEFINE_MUTEX(ac97_controllers_mutex);
+static DEFINE_IDR(ac97_adapter_idr);
+static LIST_HEAD(ac97_controllers);
+
+static struct bus_type ac97_bus_type;
+
+static inline struct ac97_controller*
+to_ac97_controller(struct device *ac97_adapter)
+{
+ return container_of(ac97_adapter, struct ac97_controller, adap);
+}
+
+static int ac97_unbound_ctrl_write(struct ac97_controller *adrv, int slot,
+ unsigned short reg, unsigned short val)
+{
+ return -ENODEV;
+}
+
+static int ac97_unbound_ctrl_read(struct ac97_controller *adrv, int slot,
+ unsigned short reg)
+{
+ return -ENODEV;
+}
+
+static const struct ac97_controller_ops ac97_unbound_ctrl_ops = {
+ .write = ac97_unbound_ctrl_write,
+ .read = ac97_unbound_ctrl_read,
+};
+
+static struct ac97_controller ac97_unbound_ctrl = {
+ .ops = &ac97_unbound_ctrl_ops,
+};
+
+static struct ac97_codec_device *
+ac97_codec_find(struct ac97_controller *ac97_ctrl, unsigned int codec_num)
+{
+ if (codec_num >= AC97_BUS_MAX_CODECS)
+ return ERR_PTR(-EINVAL);
+
+ return ac97_ctrl->codecs[codec_num];
+}
+
+static void ac97_codec_release(struct device *dev)
+{
+ struct ac97_codec_device *adev;
+ struct ac97_controller *ac97_ctrl;
+
+ adev = to_ac97_device(dev);
+ ac97_ctrl = adev->ac97_ctrl;
+ ac97_ctrl->codecs[adev->num] = NULL;
+ kfree(adev);
+}
+
+static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
+ unsigned int vendor_id)
+{
+ struct ac97_codec_device *codec;
+ int ret;
+
+ codec = kzalloc(sizeof(*codec), GFP_KERNEL);
+ if (!codec)
+ return -ENOMEM;
+ ac97_ctrl->codecs[idx] = codec;
+ codec->vendor_id = vendor_id;
+ codec->dev.release = ac97_codec_release;
+ codec->dev.bus = &ac97_bus_type;
+ codec->dev.parent = &ac97_ctrl->adap;
+ codec->num = idx;
+ codec->ac97_ctrl = ac97_ctrl;
+
+ device_initialize(&codec->dev);
+ dev_set_name(&codec->dev, "%s:%u", dev_name(ac97_ctrl->parent), idx);
+
+ ret = device_add(&codec->dev);
+ if (ret)
+ goto err_free_codec;
+
+ return 0;
+err_free_codec:
+ put_device(&codec->dev);
+ kfree(codec);
+ ac97_ctrl->codecs[idx] = NULL;
+
+ return ret;
+}
+
+unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv,
+ unsigned int codec_num)
+{
+ unsigned short vid1, vid2;
+ int ret;
+
+ ret = adrv->ops->read(adrv, codec_num, AC97_VENDOR_ID1);
+ vid1 = (ret & 0xffff);
+ if (ret < 0)
+ return 0;
+
+ ret = adrv->ops->read(adrv, codec_num, AC97_VENDOR_ID2);
+ vid2 = (ret & 0xffff);
+ if (ret < 0)
+ return 0;
+
+ dev_dbg(&adrv->adap, "%s(codec_num=%u): vendor_id=0x%08x\n",
+ __func__, codec_num, AC97_ID(vid1, vid2));
+ return AC97_ID(vid1, vid2);
+}
+
+static int ac97_bus_scan(struct ac97_controller *ac97_ctrl)
+{
+ int ret, i;
+ unsigned int vendor_id;
+
+ for (i = 0; i < AC97_BUS_MAX_CODECS; i++) {
+ if (ac97_codec_find(ac97_ctrl, i))
+ continue;
+ if (!(ac97_ctrl->slots_available & BIT(i)))
+ continue;
+ vendor_id = snd_ac97_bus_scan_one(ac97_ctrl, i);
+ if (!vendor_id)
+ continue;
+
+ ret = ac97_codec_add(ac97_ctrl, i, vendor_id);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int ac97_bus_reset(struct ac97_controller *ac97_ctrl)
+{
+ ac97_ctrl->ops->reset(ac97_ctrl);
+
+ return 0;
+}
+
+/**
+ * snd_ac97_codec_driver_register - register an AC97 codec driver
+ * @dev: AC97 driver codec to register
+ *
+ * Register an AC97 codec driver to the ac97 bus driver, aka. the AC97 digital
+ * controller.
+ *
+ * Returns 0 on success or error code
+ */
+int snd_ac97_codec_driver_register(struct ac97_codec_driver *drv)
+{
+ drv->driver.bus = &ac97_bus_type;
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(snd_ac97_codec_driver_register);
+
+/**
+ * snd_ac97_codec_driver_unregister - unregister an AC97 codec driver
+ * @dev: AC97 codec driver to unregister
+ *
+ * Unregister a previously registered ac97 codec driver.
+ */
+void snd_ac97_codec_driver_unregister(struct ac97_codec_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(snd_ac97_codec_driver_unregister);
+
+/**
+ * snd_ac97_codec_get_platdata - get platform_data
+ * @adev: the ac97 codec device
+ *
+ * For legacy platforms, in order to have platform_data in codec drivers
+ * available, while ac97 device are auto-created upon probe, this retrieves the
+ * platdata which was setup on ac97 controller registration.
+ *
+ * Returns the platform data pointer
+ */
+void *snd_ac97_codec_get_platdata(const struct ac97_codec_device *adev)
+{
+ struct ac97_controller *ac97_ctrl = adev->ac97_ctrl;
+
+ return ac97_ctrl->codecs_pdata[adev->num];
+}
+EXPORT_SYMBOL_GPL(snd_ac97_codec_get_platdata);
+
+static void ac97_ctrl_codecs_unregister(struct ac97_controller *ac97_ctrl)
+{
+ int i;
+
+ for (i = 0; i < AC97_BUS_MAX_CODECS; i++)
+ if (ac97_ctrl->codecs[i]) {
+ ac97_ctrl->codecs[i]->ac97_ctrl = &ac97_unbound_ctrl;
+ device_unregister(&ac97_ctrl->codecs[i]->dev);
+ }
+}
+
+static ssize_t cold_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct ac97_controller *ac97_ctrl;
+
+ mutex_lock(&ac97_controllers_mutex);
+ ac97_ctrl = to_ac97_controller(dev);
+ ac97_ctrl->ops->reset(ac97_ctrl);
+ mutex_unlock(&ac97_controllers_mutex);
+ return len;
+}
+static DEVICE_ATTR_WO(cold_reset);
+
+static ssize_t warm_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct ac97_controller *ac97_ctrl;
+
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&ac97_controllers_mutex);
+ ac97_ctrl = to_ac97_controller(dev);
+ ac97_ctrl->ops->warm_reset(ac97_ctrl);
+ mutex_unlock(&ac97_controllers_mutex);
+ return len;
+}
+static DEVICE_ATTR_WO(warm_reset);
+
+static struct attribute *ac97_controller_device_attrs[] = {
+ &dev_attr_cold_reset.attr,
+ &dev_attr_warm_reset.attr,
+ NULL
+};
+
+static struct attribute_group ac97_adapter_attr_group = {
+ .name = "ac97_operations",
+ .attrs = ac97_controller_device_attrs,
+};
+
+static const struct attribute_group *ac97_adapter_groups[] = {
+ &ac97_adapter_attr_group,
+ NULL,
+};
+
+static void ac97_del_adapter(struct ac97_controller *ac97_ctrl)
+{
+ mutex_lock(&ac97_controllers_mutex);
+ ac97_ctrl_codecs_unregister(ac97_ctrl);
+ list_del(&ac97_ctrl->controllers);
+ mutex_unlock(&ac97_controllers_mutex);
+
+ device_unregister(&ac97_ctrl->adap);
+}
+
+static void ac97_adapter_release(struct device *dev)
+{
+ struct ac97_controller *ac97_ctrl;
+
+ ac97_ctrl = to_ac97_controller(dev);
+ idr_remove(&ac97_adapter_idr, ac97_ctrl->nr);
+ dev_dbg(&ac97_ctrl->adap, "adapter unregistered by %s\n",
+ dev_name(ac97_ctrl->parent));
+}
+
+static const struct device_type ac97_adapter_type = {
+ .groups = ac97_adapter_groups,
+ .release = ac97_adapter_release,
+};
+
+static int ac97_add_adapter(struct ac97_controller *ac97_ctrl)
+{
+ int ret;
+
+ mutex_lock(&ac97_controllers_mutex);
+ ret = idr_alloc(&ac97_adapter_idr, ac97_ctrl, 0, 0, GFP_KERNEL);
+ ac97_ctrl->nr = ret;
+ if (ret >= 0) {
+ dev_set_name(&ac97_ctrl->adap, "ac97-%d", ret);
+ ac97_ctrl->adap.type = &ac97_adapter_type;
+ ac97_ctrl->adap.parent = ac97_ctrl->parent;
+ ret = device_register(&ac97_ctrl->adap);
+ if (ret)
+ put_device(&ac97_ctrl->adap);
+ }
+ if (!ret)
+ list_add(&ac97_ctrl->controllers, &ac97_controllers);
+ mutex_unlock(&ac97_controllers_mutex);
+
+ if (!ret)
+ dev_dbg(&ac97_ctrl->adap, "adapter registered by %s\n",
+ dev_name(ac97_ctrl->parent));
+ return ret;
+}
+
+/**
+ * snd_ac97_controller_register - register an ac97 controller
+ * @ops: the ac97 bus operations
+ * @dev: the device providing the ac97 DC function
+ * @slots_available: mask of the ac97 codecs that can be scanned and probed
+ * bit0 => codec 0, bit1 => codec 1 ... bit 3 => codec 3
+ *
+ * Register a digital controller which can control up to 4 ac97 codecs. This is
+ * the controller side of the AC97 AC-link, while the slave side are the codecs.
+ *
+ * Returns a valid controller upon success, negative pointer value upon error
+ */
+struct ac97_controller *snd_ac97_controller_register(
+ const struct ac97_controller_ops *ops, struct device *dev,
+ unsigned short slots_available, void **codecs_pdata)
+{
+ struct ac97_controller *ac97_ctrl;
+ int ret, i;
+
+ ac97_ctrl = kzalloc(sizeof(*ac97_ctrl), GFP_KERNEL);
+ if (!ac97_ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < AC97_BUS_MAX_CODECS && codecs_pdata; i++)
+ ac97_ctrl->codecs_pdata[i] = codecs_pdata[i];
+
+ ac97_ctrl->ops = ops;
+ ac97_ctrl->slots_available = slots_available;
+ ac97_ctrl->parent = dev;
+ ret = ac97_add_adapter(ac97_ctrl);
+
+ if (ret)
+ goto err;
+ ac97_bus_reset(ac97_ctrl);
+ ac97_bus_scan(ac97_ctrl);
+
+ return ac97_ctrl;
+err:
+ kfree(ac97_ctrl);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(snd_ac97_controller_register);
+
+/**
+ * snd_ac97_controller_unregister - unregister an ac97 controller
+ * @ac97_ctrl: the device previously provided to ac97_controller_register()
+ *
+ */
+void snd_ac97_controller_unregister(struct ac97_controller *ac97_ctrl)
+{
+ ac97_del_adapter(ac97_ctrl);
+}
+EXPORT_SYMBOL_GPL(snd_ac97_controller_unregister);
+
+#ifdef CONFIG_PM
+static int ac97_pm_runtime_suspend(struct device *dev)
+{
+ struct ac97_codec_device *codec = to_ac97_device(dev);
+ int ret = pm_generic_runtime_suspend(dev);
+
+ if (ret == 0 && dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ clk_disable(codec->clk);
+ else
+ clk_disable_unprepare(codec->clk);
+ }
+
+ return ret;
+}
+
+static int ac97_pm_runtime_resume(struct device *dev)
+{
+ struct ac97_codec_device *codec = to_ac97_device(dev);
+ int ret;
+
+ if (dev->driver) {
+ if (pm_runtime_is_irq_safe(dev))
+ ret = clk_enable(codec->clk);
+ else
+ ret = clk_prepare_enable(codec->clk);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops ac97_pm = {
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+ SET_RUNTIME_PM_OPS(
+ ac97_pm_runtime_suspend,
+ ac97_pm_runtime_resume,
+ NULL)
+};
+
+static int ac97_get_enable_clk(struct ac97_codec_device *adev)
+{
+ int ret;
+
+ adev->clk = clk_get(&adev->dev, "ac97_clk");
+ if (IS_ERR(adev->clk))
+ return PTR_ERR(adev->clk);
+
+ ret = clk_prepare_enable(adev->clk);
+ if (ret)
+ clk_put(adev->clk);
+
+ return ret;
+}
+
+static void ac97_put_disable_clk(struct ac97_codec_device *adev)
+{
+ clk_disable_unprepare(adev->clk);
+ clk_put(adev->clk);
+}
+
+static ssize_t vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ac97_codec_device *codec = to_ac97_device(dev);
+
+ return sprintf(buf, "%08x", codec->vendor_id);
+}
+DEVICE_ATTR_RO(vendor_id);
+
+static struct attribute *ac97_dev_attrs[] = {
+ &dev_attr_vendor_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ac97_dev);
+
+static int ac97_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ac97_codec_device *adev = to_ac97_device(dev);
+ struct ac97_codec_driver *adrv = to_ac97_driver(drv);
+ const struct ac97_id *id = adrv->id_table;
+ int i = 0;
+
+ if (adev->vendor_id == 0x0 || adev->vendor_id == 0xffffffff)
+ return false;
+
+ do {
+ if (ac97_ids_match(id[i].id, adev->vendor_id, id[i].mask))
+ return true;
+ } while (id[i++].id);
+
+ return false;
+}
+
+static int ac97_bus_probe(struct device *dev)
+{
+ struct ac97_codec_device *adev = to_ac97_device(dev);
+ struct ac97_codec_driver *adrv = to_ac97_driver(dev->driver);
+ int ret;
+
+ ret = ac97_get_enable_clk(adev);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = adrv->probe(adev);
+ if (ret == 0)
+ return 0;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+ ac97_put_disable_clk(adev);
+
+ return ret;
+}
+
+static int ac97_bus_remove(struct device *dev)
+{
+ struct ac97_codec_device *adev = to_ac97_device(dev);
+ struct ac97_codec_driver *adrv = to_ac97_driver(dev->driver);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret)
+ return ret;
+
+ ret = adrv->remove(adev);
+ pm_runtime_put_noidle(dev);
+ if (ret == 0)
+ ac97_put_disable_clk(adev);
+
+ return ret;
+}
+
+static struct bus_type ac97_bus_type = {
+ .name = "ac97bus",
+ .dev_groups = ac97_dev_groups,
+ .match = ac97_bus_match,
+ .pm = &ac97_pm,
+ .probe = ac97_bus_probe,
+ .remove = ac97_bus_remove,
+};
+
+static int __init ac97_bus_init(void)
+{
+ return bus_register(&ac97_bus_type);
+}
+subsys_initcall(ac97_bus_init);
+
+static void __exit ac97_bus_exit(void)
+{
+ bus_unregister(&ac97_bus_type);
+}
+module_exit(ac97_bus_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
diff --git a/sound/ac97/codec.c b/sound/ac97/codec.c
new file mode 100644
index 000000000000..a835f03744bf
--- /dev/null
+++ b/sound/ac97/codec.c
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <sound/ac97_codec.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/controller.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <sound/soc.h> /* For compat_ac97_* */
+
diff --git a/sound/ac97/snd_ac97_compat.c b/sound/ac97/snd_ac97_compat.c
new file mode 100644
index 000000000000..61544e0d8de4
--- /dev/null
+++ b/sound/ac97/snd_ac97_compat.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/compat.h>
+#include <sound/ac97/controller.h>
+#include <sound/soc.h>
+
+#include "ac97_core.h"
+
+static void compat_ac97_reset(struct snd_ac97 *ac97)
+{
+ struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
+ struct ac97_controller *actrl = adev->ac97_ctrl;
+
+ if (actrl->ops->reset)
+ actrl->ops->reset(actrl);
+}
+
+static void compat_ac97_warm_reset(struct snd_ac97 *ac97)
+{
+ struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
+ struct ac97_controller *actrl = adev->ac97_ctrl;
+
+ if (actrl->ops->warm_reset)
+ actrl->ops->warm_reset(actrl);
+}
+
+static void compat_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
+ unsigned short val)
+{
+ struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
+ struct ac97_controller *actrl = adev->ac97_ctrl;
+
+ actrl->ops->write(actrl, ac97->num, reg, val);
+}
+
+static unsigned short compat_ac97_read(struct snd_ac97 *ac97,
+ unsigned short reg)
+{
+ struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
+ struct ac97_controller *actrl = adev->ac97_ctrl;
+
+ return actrl->ops->read(actrl, ac97->num, reg);
+}
+
+static struct snd_ac97_bus_ops compat_snd_ac97_bus_ops = {
+ .reset = compat_ac97_reset,
+ .warm_reset = compat_ac97_warm_reset,
+ .write = compat_ac97_write,
+ .read = compat_ac97_read,
+};
+
+static struct snd_ac97_bus compat_soc_ac97_bus = {
+ .ops = &compat_snd_ac97_bus_ops,
+};
+
+struct snd_ac97 *snd_ac97_compat_alloc(struct ac97_codec_device *adev)
+{
+ struct snd_ac97 *ac97;
+
+ ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
+ if (ac97 == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ac97->dev = adev->dev;
+ ac97->private_data = adev;
+ ac97->bus = &compat_soc_ac97_bus;
+ return ac97;
+}
+EXPORT_SYMBOL_GPL(snd_ac97_compat_alloc);
+
+void snd_ac97_compat_release(struct snd_ac97 *ac97)
+{
+ kfree(ac97);
+}
+EXPORT_SYMBOL_GPL(snd_ac97_compat_release);
+
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+ unsigned int id_mask)
+{
+ struct ac97_codec_device *adev = to_ac97_device(ac97->private_data);
+ struct ac97_controller *actrl = adev->ac97_ctrl;
+ unsigned int scanned;
+
+ if (try_warm) {
+ compat_ac97_warm_reset(ac97);
+ scanned = snd_ac97_bus_scan_one(actrl, adev->num);
+ if (ac97_ids_match(scanned, adev->vendor_id, id_mask))
+ return 1;
+ }
+
+ compat_ac97_reset(ac97);
+ compat_ac97_warm_reset(ac97);
+ scanned = snd_ac97_bus_scan_one(actrl, adev->num);
+ if (ac97_ids_match(scanned, adev->vendor_id, id_mask))
+ return 0;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(snd_ac97_reset);
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index 39c3969ac1c7..5950a9e218d9 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -20,7 +20,6 @@
#include <linux/io.h>
#include <linux/gpio.h>
-#include <sound/ac97_codec.h>
#include <sound/pxa2xx-lib.h>
#include <mach/irqs.h>
@@ -46,38 +45,41 @@ extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
* 1 jiffy timeout if interrupt never comes).
*/
-unsigned short pxa2xx_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
+int pxa2xx_ac97_read(int slot, unsigned short reg)
{
- unsigned short val = -1;
+ int val = -ENODEV;
volatile u32 *reg_addr;
+ if (slot > 0)
+ return -ENODEV;
+
mutex_lock(&car_mutex);
/* set up primary or secondary codec space */
if (cpu_is_pxa25x() && reg == AC97_GPIO_STATUS)
- reg_addr = ac97->num ? &SMC_REG_BASE : &PMC_REG_BASE;
+ reg_addr = slot ? &SMC_REG_BASE : &PMC_REG_BASE;
else
- reg_addr = ac97->num ? &SAC_REG_BASE : &PAC_REG_BASE;
+ reg_addr = slot ? &SAC_REG_BASE : &PAC_REG_BASE;
reg_addr += (reg >> 1);
/* start read access across the ac97 link */
GSR = GSR_CDONE | GSR_SDONE;
gsr_bits = 0;
- val = *reg_addr;
+ val = (*reg_addr & 0xffff);
if (reg == AC97_GPIO_STATUS)
goto out;
if (wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_SDONE, 1) <= 0 &&
!((GSR | gsr_bits) & GSR_SDONE)) {
printk(KERN_ERR "%s: read error (ac97_reg=%d GSR=%#lx)\n",
__func__, reg, GSR | gsr_bits);
- val = -1;
+ val = -ETIMEDOUT;
goto out;
}
/* valid data now */
GSR = GSR_CDONE | GSR_SDONE;
gsr_bits = 0;
- val = *reg_addr;
+ val = (*reg_addr & 0xffff);
/* but we've just started another cycle... */
wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_SDONE, 1);
@@ -86,29 +88,32 @@ out: mutex_unlock(&car_mutex);
}
EXPORT_SYMBOL_GPL(pxa2xx_ac97_read);
-void pxa2xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
- unsigned short val)
+int pxa2xx_ac97_write(int slot, unsigned short reg, unsigned short val)
{
volatile u32 *reg_addr;
+ int ret = 0;
mutex_lock(&car_mutex);
/* set up primary or secondary codec space */
if (cpu_is_pxa25x() && reg == AC97_GPIO_STATUS)
- reg_addr = ac97->num ? &SMC_REG_BASE : &PMC_REG_BASE;
+ reg_addr = slot ? &SMC_REG_BASE : &PMC_REG_BASE;
else
- reg_addr = ac97->num ? &SAC_REG_BASE : &PAC_REG_BASE;
+ reg_addr = slot ? &SAC_REG_BASE : &PAC_REG_BASE;
reg_addr += (reg >> 1);
GSR = GSR_CDONE | GSR_SDONE;
gsr_bits = 0;
*reg_addr = val;
if (wait_event_timeout(gsr_wq, (GSR | gsr_bits) & GSR_CDONE, 1) <= 0 &&
- !((GSR | gsr_bits) & GSR_CDONE))
+ !((GSR | gsr_bits) & GSR_CDONE)) {
printk(KERN_ERR "%s: write error (ac97_reg=%d GSR=%#lx)\n",
__func__, reg, GSR | gsr_bits);
+ ret = -EIO;
+ }
mutex_unlock(&car_mutex);
+ return ret;
}
EXPORT_SYMBOL_GPL(pxa2xx_ac97_write);
@@ -188,7 +193,7 @@ static inline void pxa_ac97_cold_pxa3xx(void)
}
#endif
-bool pxa2xx_ac97_try_warm_reset(struct snd_ac97 *ac97)
+bool pxa2xx_ac97_try_warm_reset(void)
{
unsigned long gsr;
unsigned int timeout = 100;
@@ -225,7 +230,7 @@ bool pxa2xx_ac97_try_warm_reset(struct snd_ac97 *ac97)
}
EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_warm_reset);
-bool pxa2xx_ac97_try_cold_reset(struct snd_ac97 *ac97)
+bool pxa2xx_ac97_try_cold_reset(void)
{
unsigned long gsr;
unsigned int timeout = 1000;
@@ -263,7 +268,7 @@ bool pxa2xx_ac97_try_cold_reset(struct snd_ac97 *ac97)
EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_cold_reset);
-void pxa2xx_ac97_finish_reset(struct snd_ac97 *ac97)
+void pxa2xx_ac97_finish_reset(void)
{
GCR &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
GCR |= GCR_SDONE_IE|GCR_CDONE_IE;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index fbd5dad0c484..4bc244c40f80 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -29,19 +29,38 @@
#include "pxa2xx-pcm.h"
-static void pxa2xx_ac97_reset(struct snd_ac97 *ac97)
+static void pxa2xx_ac97_legacy_reset(struct snd_ac97 *ac97)
{
- if (!pxa2xx_ac97_try_cold_reset(ac97)) {
- pxa2xx_ac97_try_warm_reset(ac97);
- }
+ if (!pxa2xx_ac97_try_cold_reset())
+ pxa2xx_ac97_try_warm_reset();
+
+ pxa2xx_ac97_finish_reset();
+}
+
+static unsigned short pxa2xx_ac97_legacy_read(struct snd_ac97 *ac97,
+ unsigned short reg)
+{
+ int ret;
+
+ ret = pxa2xx_ac97_read(ac97->num, reg);
+ if (ret < 0)
+ return 0;
+ else
+ return (unsigned short)(ret & 0xffff);
+}
+
+static void pxa2xx_ac97_legacy_write(struct snd_ac97 *ac97,
+ unsigned short reg, unsigned short val)
+{
+ int __always_unused ret;
- pxa2xx_ac97_finish_reset(ac97);
+ ret = pxa2xx_ac97_write(ac97->num, reg, val);
}
static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
- .read = pxa2xx_ac97_read,
- .write = pxa2xx_ac97_write,
- .reset = pxa2xx_ac97_reset,
+ .read = pxa2xx_ac97_legacy_read,
+ .write = pxa2xx_ac97_legacy_write,
+ .reset = pxa2xx_ac97_legacy_reset,
};
static struct pxad_param pxa2xx_ac97_pcm_out_req = {
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index 6e47b823bcaa..18cb6f476bf4 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -127,7 +127,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
return 0;
}
-static struct snd_timer_hardware hrtimer_hw = {
+static const struct snd_timer_hardware hrtimer_hw __initconst = {
.flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET,
.open = snd_hrtimer_open,
.close = snd_hrtimer_close,
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index a73baa1242be..8faae3d1455d 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -228,6 +228,8 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
memset(&info, 0, sizeof(info));
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
+ if (info.index >= 32)
+ return -EINVAL;
/* check whether the dsp was already loaded */
if (hw->dsp_loaded & (1 << info.index))
return -EBUSY;
diff --git a/sound/core/init.c b/sound/core/init.c
index 32ebe2f6bc59..168ae03d3a1c 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -255,6 +255,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
#ifdef CONFIG_PM
init_waitqueue_head(&card->power_sleep);
#endif
+ init_waitqueue_head(&card->remove_sleep);
device_initialize(&card->card_dev);
card->card_dev.parent = parent;
@@ -452,6 +453,35 @@ int snd_card_disconnect(struct snd_card *card)
}
EXPORT_SYMBOL(snd_card_disconnect);
+/**
+ * snd_card_disconnect_sync - disconnect card and wait until files get closed
+ * @card: card object to disconnect
+ *
+ * This calls snd_card_disconnect() for disconnecting all belonging components
+ * and waits until all pending files get closed.
+ * It assures that all accesses from user-space finished so that the driver
+ * can release its resources gracefully.
+ */
+void snd_card_disconnect_sync(struct snd_card *card)
+{
+ int err;
+
+ err = snd_card_disconnect(card);
+ if (err < 0) {
+ dev_err(card->dev,
+ "snd_card_disconnect error (%d), skipping sync\n",
+ err);
+ return;
+ }
+
+ spin_lock_irq(&card->files_lock);
+ wait_event_lock_irq(card->remove_sleep,
+ list_empty(&card->files_list),
+ card->files_lock);
+ spin_unlock_irq(&card->files_lock);
+}
+EXPORT_SYMBOL_GPL(snd_card_disconnect_sync);
+
static int snd_card_do_free(struct snd_card *card)
{
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
@@ -957,6 +987,8 @@ int snd_card_file_remove(struct snd_card *card, struct file *file)
break;
}
}
+ if (list_empty(&card->files_list))
+ wake_up_all(&card->remove_sleep);
spin_unlock(&card->files_lock);
if (!found) {
dev_err(card->dev, "card file remove problem (%p)\n", file);
diff --git a/sound/core/jack.c b/sound/core/jack.c
index f652e90efd7e..84c2a17c56ee 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -310,7 +310,7 @@ EXPORT_SYMBOL(snd_jack_set_parent);
* @type: Jack report type for this key
* @keytype: Input layer key type to be reported
*
- * Map a SND_JACK_BTN_ button type to an input layer key, allowing
+ * Map a SND_JACK_BTN_* button type to an input layer key, allowing
* reporting of keys on accessories via the jack abstraction. If no
* mapping is provided but keys are enabled in the jack type then
* BTN_n numeric buttons will be reported.
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 7eadb7fd8074..9070f277f8db 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -775,6 +775,9 @@ static int _snd_pcm_new(struct snd_card *card, const char *id, int device,
.dev_register = snd_pcm_dev_register,
.dev_disconnect = snd_pcm_dev_disconnect,
};
+ static struct snd_device_ops internal_ops = {
+ .dev_free = snd_pcm_dev_free,
+ };
if (snd_BUG_ON(!card))
return -ENXIO;
@@ -801,7 +804,8 @@ static int _snd_pcm_new(struct snd_card *card, const char *id, int device,
if (err < 0)
goto free_pcm;
- err = snd_device_new(card, SNDRV_DEV_PCM, pcm, &ops);
+ err = snd_device_new(card, SNDRV_DEV_PCM, pcm,
+ internal ? &internal_ops : &ops);
if (err < 0)
goto free_pcm;
@@ -1099,8 +1103,6 @@ static int snd_pcm_dev_register(struct snd_device *device)
if (snd_BUG_ON(!device || !device->device_data))
return -ENXIO;
pcm = device->device_data;
- if (pcm->internal)
- return 0;
mutex_lock(&register_mutex);
err = snd_pcm_add(pcm);
@@ -1152,6 +1154,10 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) {
snd_pcm_stream_lock_irq(substream);
if (substream->runtime) {
+ if (snd_pcm_running(substream))
+ snd_pcm_stop(substream,
+ SNDRV_PCM_STATE_DISCONNECTED);
+ /* to be sure, set the state unconditionally */
substream->runtime->status->state = SNDRV_PCM_STATE_DISCONNECTED;
wake_up(&substream->runtime->sleep);
wake_up(&substream->runtime->tsleep);
@@ -1159,12 +1165,10 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
snd_pcm_stream_unlock_irq(substream);
}
}
- if (!pcm->internal) {
- pcm_call_notify(pcm, n_disconnect);
- }
+
+ pcm_call_notify(pcm, n_disconnect);
for (cidx = 0; cidx < 2; cidx++) {
- if (!pcm->internal)
- snd_unregister_device(&pcm->streams[cidx].dev);
+ snd_unregister_device(&pcm->streams[cidx].dev);
free_chmap(&pcm->streams[cidx]);
}
mutex_unlock(&pcm->open_mutex);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 2fec2feac387..a4d92e46c459 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -195,7 +195,6 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
{
- struct snd_pcm_runtime *runtime;
struct snd_pcm *pcm = substream->pcm;
struct snd_pcm_str *pstr = substream->pstr;
@@ -211,7 +210,6 @@ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
info->subdevices_count = pstr->substream_count;
info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
strlcpy(info->subname, substream->name, sizeof(info->subname));
- runtime = substream->runtime;
return 0;
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index d10c780dfd54..6e22eea72654 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -802,6 +802,10 @@ static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_e
return -EMLINK;
}
+ if (snd_seq_ev_is_variable(event) &&
+ snd_BUG_ON(atomic && (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR)))
+ return -EINVAL;
+
if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
result = deliver_to_subscribers(client, event, atomic, hop);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 15e82a656d96..ee09dace8bb1 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1069,15 +1069,17 @@ EXPORT_SYMBOL(snd_timer_global_register);
struct snd_timer_system_private {
struct timer_list tlist;
+ struct snd_timer *snd_timer;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
-static void snd_timer_s_function(unsigned long data)
+static void snd_timer_s_function(struct timer_list *t)
{
- struct snd_timer *timer = (struct snd_timer *)data;
- struct snd_timer_system_private *priv = timer->private_data;
+ struct snd_timer_system_private *priv = from_timer(priv, t,
+ tlist);
+ struct snd_timer *timer = priv->snd_timer;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
@@ -1159,7 +1161,8 @@ static int snd_timer_register_system(void)
snd_timer_free(timer);
return -ENOMEM;
}
- setup_timer(&priv->tlist, snd_timer_s_function, (unsigned long) timer);
+ priv->snd_timer = timer;
+ timer_setup(&priv->tlist, snd_timer_s_function, 0);
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 135adb17703c..afac886ffa28 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -529,9 +529,9 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable)
return running;
}
-static void loopback_timer_function(unsigned long data)
+static void loopback_timer_function(struct timer_list *t)
{
- struct loopback_pcm *dpcm = (struct loopback_pcm *)data;
+ struct loopback_pcm *dpcm = from_timer(dpcm, t, timer);
unsigned long flags;
spin_lock_irqsave(&dpcm->cable->lock, flags);
@@ -675,8 +675,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
}
dpcm->loopback = loopback;
dpcm->substream = substream;
- setup_timer(&dpcm->timer, loopback_timer_function,
- (unsigned long)dpcm);
+ timer_setup(&dpcm->timer, loopback_timer_function, 0);
cable = loopback->cables[substream->number][dev];
if (!cable) {
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index c0939a0164a6..7b2b1f766b00 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -306,9 +306,9 @@ static int dummy_systimer_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static void dummy_systimer_callback(unsigned long data)
+static void dummy_systimer_callback(struct timer_list *t)
{
- struct dummy_systimer_pcm *dpcm = (struct dummy_systimer_pcm *)data;
+ struct dummy_systimer_pcm *dpcm = from_timer(dpcm, t, timer);
unsigned long flags;
int elapsed = 0;
@@ -343,8 +343,7 @@ static int dummy_systimer_create(struct snd_pcm_substream *substream)
if (!dpcm)
return -ENOMEM;
substream->runtime->private_data = dpcm;
- setup_timer(&dpcm->timer, dummy_systimer_callback,
- (unsigned long) dpcm);
+ timer_setup(&dpcm->timer, dummy_systimer_callback, 0);
spin_lock_init(&dpcm->lock);
dpcm->substream = substream;
return 0;
diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
index b997222274bd..3e745f47dd2f 100644
--- a/sound/drivers/mpu401/mpu401_uart.c
+++ b/sound/drivers/mpu401/mpu401_uart.c
@@ -169,9 +169,9 @@ EXPORT_SYMBOL(snd_mpu401_uart_interrupt_tx);
* timer callback
* reprogram the timer and call the interrupt job
*/
-static void snd_mpu401_uart_timer(unsigned long data)
+static void snd_mpu401_uart_timer(struct timer_list *t)
{
- struct snd_mpu401 *mpu = (struct snd_mpu401 *)data;
+ struct snd_mpu401 *mpu = from_timer(mpu, t, timer);
unsigned long flags;
spin_lock_irqsave(&mpu->timer_lock, flags);
@@ -191,8 +191,7 @@ static void snd_mpu401_uart_add_timer (struct snd_mpu401 *mpu, int input)
spin_lock_irqsave (&mpu->timer_lock, flags);
if (mpu->timer_invoked == 0) {
- setup_timer(&mpu->timer, snd_mpu401_uart_timer,
- (unsigned long)mpu);
+ timer_setup(&mpu->timer, snd_mpu401_uart_timer, 0);
mod_timer(&mpu->timer, 1 + jiffies);
}
mpu->timer_invoked |= input ? MPU401_MODE_INPUT_TIMER :
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index 0f6392001e30..547662e02fcc 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -406,10 +406,10 @@ static void snd_mtpav_input_trigger(struct snd_rawmidi_substream *substream, int
* timer interrupt for outputs
*/
-static void snd_mtpav_output_timer(unsigned long data)
+static void snd_mtpav_output_timer(struct timer_list *t)
{
unsigned long flags;
- struct mtpav *chip = (struct mtpav *)data;
+ struct mtpav *chip = from_timer(chip, t, timer);
int p;
spin_lock_irqsave(&chip->spinlock, flags);
@@ -707,8 +707,7 @@ static int snd_mtpav_probe(struct platform_device *dev)
mtp_card->share_irq = 0;
mtp_card->inmidistate = 0;
mtp_card->outmidihwport = 0xffffffff;
- setup_timer(&mtp_card->timer, snd_mtpav_output_timer,
- (unsigned long) mtp_card);
+ timer_setup(&mtp_card->timer, snd_mtpav_output_timer, 0);
card->private_free = snd_mtpav_free;
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index 13c0a7e1bc2b..bb3f3a5a6951 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -238,10 +238,10 @@ static int opl3_get_voice(struct snd_opl3 *opl3, int instr_4op,
/*
* System timer interrupt function
*/
-void snd_opl3_timer_func(unsigned long data)
+void snd_opl3_timer_func(struct timer_list *t)
{
- struct snd_opl3 *opl3 = (struct snd_opl3 *)data;
+ struct snd_opl3 *opl3 = from_timer(opl3, t, tlist);
unsigned long flags;
int again = 0;
int i;
diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c
index d3e91be8b23a..5f881c4cdf1f 100644
--- a/sound/drivers/opl3/opl3_seq.c
+++ b/sound/drivers/opl3/opl3_seq.c
@@ -248,7 +248,7 @@ static int snd_opl3_seq_probe(struct device *_dev)
}
/* setup system timer */
- setup_timer(&opl3->tlist, snd_opl3_timer_func, (unsigned long) opl3);
+ timer_setup(&opl3->tlist, snd_opl3_timer_func, 0);
spin_lock_init(&opl3->sys_timer_lock);
opl3->sys_timer_status = 0;
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
index eaef435e0528..a2445163008e 100644
--- a/sound/drivers/opl3/opl3_voice.h
+++ b/sound/drivers/opl3/opl3_voice.h
@@ -37,7 +37,7 @@ void snd_opl3_nrpn(void *p, struct snd_midi_channel *chan, struct snd_midi_chann
void snd_opl3_sysex(void *p, unsigned char *buf, int len, int parsed, struct snd_midi_channel_set *chset);
void snd_opl3_calc_volume(unsigned char *reg, int vel, struct snd_midi_channel *chan);
-void snd_opl3_timer_func(unsigned long data);
+void snd_opl3_timer_func(struct timer_list *t);
/* Prototypes for opl3_drums.c */
void snd_opl3_load_drums(struct snd_opl3 *opl3);
diff --git a/sound/drivers/serial-u16550.c b/sound/drivers/serial-u16550.c
index 88e66ea0306d..0a67b8b9f176 100644
--- a/sound/drivers/serial-u16550.c
+++ b/sound/drivers/serial-u16550.c
@@ -309,12 +309,12 @@ static irqreturn_t snd_uart16550_interrupt(int irq, void *dev_id)
}
/* When the polling mode, this function calls snd_uart16550_io_loop. */
-static void snd_uart16550_buffer_timer(unsigned long data)
+static void snd_uart16550_buffer_timer(struct timer_list *t)
{
unsigned long flags;
struct snd_uart16550 *uart;
- uart = (struct snd_uart16550 *)data;
+ uart = from_timer(uart, t, buffer_timer);
spin_lock_irqsave(&uart->open_lock, flags);
snd_uart16550_del_timer(uart);
snd_uart16550_io_loop(uart);
@@ -828,8 +828,7 @@ static int snd_uart16550_create(struct snd_card *card,
uart->prev_in = 0;
uart->rstatus = 0;
memset(uart->prev_status, 0x80, sizeof(unsigned char) * SNDRV_SERIAL_MAX_OUTS);
- setup_timer(&uart->buffer_timer, snd_uart16550_buffer_timer,
- (unsigned long)uart);
+ timer_setup(&uart->buffer_timer, snd_uart16550_buffer_timer, 0);
uart->timer_running = 0;
/* Register device */
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c
index 23ccddb20de1..4210e5c6262e 100644
--- a/sound/firewire/amdtp-am824.c
+++ b/sound/firewire/amdtp-am824.c
@@ -247,7 +247,7 @@ void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct amdtp_am824 *p = s->protocol;
if (port < p->midi_ports)
- ACCESS_ONCE(p->midi[port]) = midi;
+ WRITE_ONCE(p->midi[port], midi);
}
EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger);
@@ -336,7 +336,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, __be32 *buffe
unsigned int data_blocks, unsigned int *syt)
{
struct amdtp_am824 *p = s->protocol;
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
@@ -357,7 +357,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, __be32 *buffe
unsigned int data_blocks, unsigned int *syt)
{
struct amdtp_am824 *p = s->protocol;
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 3fc581a5ad62..4a1dc145327b 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -376,7 +376,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
ptr = s->pcm_buffer_pointer + frames;
if (ptr >= pcm->runtime->buffer_size)
ptr -= pcm->runtime->buffer_size;
- ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
+ WRITE_ONCE(s->pcm_buffer_pointer, ptr);
s->pcm_period_pointer += frames;
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
@@ -388,7 +388,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
static void pcm_period_tasklet(unsigned long data)
{
struct amdtp_stream *s = (void *)data;
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
if (pcm)
snd_pcm_period_elapsed(pcm);
@@ -453,7 +453,7 @@ static int handle_out_packet(struct amdtp_stream *s,
s->data_block_counter =
(s->data_block_counter + data_blocks) & 0xff;
- buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
+ buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
(s->data_block_quadlets << CIP_DBS_SHIFT) |
((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
s->data_block_counter);
@@ -472,7 +472,7 @@ static int handle_out_packet(struct amdtp_stream *s,
if (queue_out_packet(s, payload_length) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -504,7 +504,7 @@ static int handle_out_packet_without_header(struct amdtp_stream *s,
if (queue_out_packet(s, payload_length) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -621,7 +621,7 @@ end:
if (queue_in_packet(s) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -649,7 +649,7 @@ static int handle_in_packet_without_header(struct amdtp_stream *s,
if (queue_in_packet(s) < 0)
return -EIO;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
@@ -947,7 +947,7 @@ unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
if (!in_interrupt() && amdtp_stream_running(s))
fw_iso_context_flush_completions(s->context);
- return ACCESS_ONCE(s->pcm_buffer_pointer);
+ return READ_ONCE(s->pcm_buffer_pointer);
}
EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
@@ -977,9 +977,8 @@ EXPORT_SYMBOL(amdtp_stream_pcm_ack);
void amdtp_stream_update(struct amdtp_stream *s)
{
/* Precomputing. */
- ACCESS_ONCE(s->source_node_id_field) =
- (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) &
- CIP_SID_MASK;
+ WRITE_ONCE(s->source_node_id_field,
+ (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
}
EXPORT_SYMBOL(amdtp_stream_update);
@@ -1022,7 +1021,7 @@ void amdtp_stream_pcm_abort(struct amdtp_stream *s)
{
struct snd_pcm_substream *pcm;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm)
snd_pcm_stop_xrun(pcm);
}
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index a608dae83348..e45de3eecfe3 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -221,7 +221,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
struct snd_pcm_substream *pcm)
{
- ACCESS_ONCE(s->pcm) = pcm;
+ WRITE_ONCE(s->pcm, pcm);
}
static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index 1453c34ce99f..4a884a335248 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -327,7 +327,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct amdtp_dot *p = s->protocol;
if (port < MAX_MIDI_PORTS)
- ACCESS_ONCE(p->midi[port]) = midi;
+ WRITE_ONCE(p->midi[port], midi);
}
static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
@@ -338,7 +338,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm) {
read_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
@@ -359,7 +359,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm) {
write_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
diff --git a/sound/firewire/fireface/amdtp-ff.c b/sound/firewire/fireface/amdtp-ff.c
index 780da9deb2f0..77c7598b61ab 100644
--- a/sound/firewire/fireface/amdtp-ff.c
+++ b/sound/firewire/fireface/amdtp-ff.c
@@ -108,7 +108,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
unsigned int data_blocks,
unsigned int *syt)
{
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
@@ -127,7 +127,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
unsigned int data_blocks,
unsigned int *syt)
{
- struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
diff --git a/sound/firewire/fireface/ff-midi.c b/sound/firewire/fireface/ff-midi.c
index 949ee56b4e0e..6a49611ee462 100644
--- a/sound/firewire/fireface/ff-midi.c
+++ b/sound/firewire/fireface/ff-midi.c
@@ -22,7 +22,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream)
ff->running_status[substream->number] = 0;
ff->rx_midi_error[substream->number] = false;
- ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = substream;
+ WRITE_ONCE(ff->rx_midi_substreams[substream->number], substream);
return 0;
}
@@ -38,7 +38,7 @@ static int midi_playback_close(struct snd_rawmidi_substream *substream)
struct snd_ff *ff = substream->rmidi->private_data;
cancel_work_sync(&ff->rx_midi_work[substream->number]);
- ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = NULL;
+ WRITE_ONCE(ff->rx_midi_substreams[substream->number], NULL);
return 0;
}
@@ -52,10 +52,10 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *substream,
spin_lock_irqsave(&ff->lock, flags);
if (up)
- ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) =
- substream;
+ WRITE_ONCE(ff->tx_midi_substreams[substream->number],
+ substream);
else
- ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = NULL;
+ WRITE_ONCE(ff->tx_midi_substreams[substream->number], NULL);
spin_unlock_irqrestore(&ff->lock, flags);
}
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c
index dd6c8e839647..332b29f8ed75 100644
--- a/sound/firewire/fireface/ff-transaction.c
+++ b/sound/firewire/fireface/ff-transaction.c
@@ -12,7 +12,7 @@ static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port,
int rcode)
{
struct snd_rawmidi_substream *substream =
- ACCESS_ONCE(ff->rx_midi_substreams[port]);
+ READ_ONCE(ff->rx_midi_substreams[port]);
if (rcode_is_permanent_error(rcode)) {
ff->rx_midi_error[port] = true;
@@ -60,7 +60,7 @@ static inline void fill_midi_buf(struct snd_ff *ff, unsigned int port,
static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
{
struct snd_rawmidi_substream *substream =
- ACCESS_ONCE(ff->rx_midi_substreams[port]);
+ READ_ONCE(ff->rx_midi_substreams[port]);
u8 *buf = (u8 *)ff->msg_buf[port];
int i, len;
@@ -159,7 +159,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
*/
index = (quad >> 8) & 0xff;
if (index > 0) {
- substream = ACCESS_ONCE(ff->tx_midi_substreams[0]);
+ substream = READ_ONCE(ff->tx_midi_substreams[0]);
if (substream != NULL) {
byte = quad & 0xff;
snd_rawmidi_receive(substream, &byte, 1);
@@ -169,7 +169,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
/* Message in second port. */
index = (quad >> 24) & 0xff;
if (index > 0) {
- substream = ACCESS_ONCE(ff->tx_midi_substreams[1]);
+ substream = READ_ONCE(ff->tx_midi_substreams[1]);
if (substream != NULL) {
byte = (quad >> 16) & 0xff;
snd_rawmidi_receive(substream, &byte, 1);
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 5826aa8362f1..46092fa3ff9b 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
ptr += count;
if (ptr >= runtime->buffer_size)
ptr -= runtime->buffer_size;
- ACCESS_ONCE(isight->buffer_pointer) = ptr;
+ WRITE_ONCE(isight->buffer_pointer, ptr);
isight->period_counter += count;
if (isight->period_counter >= runtime->period_size) {
@@ -111,7 +111,7 @@ static void isight_samples(struct isight *isight,
struct snd_pcm_runtime *runtime;
unsigned int count1;
- if (!ACCESS_ONCE(isight->pcm_running))
+ if (!READ_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
@@ -131,7 +131,7 @@ static void isight_samples(struct isight *isight,
static void isight_pcm_abort(struct isight *isight)
{
- if (ACCESS_ONCE(isight->pcm_active))
+ if (READ_ONCE(isight->pcm_active))
snd_pcm_stop_xrun(isight->pcm);
}
@@ -141,7 +141,7 @@ static void isight_dropped_samples(struct isight *isight, unsigned int total)
u32 dropped;
unsigned int count1;
- if (!ACCESS_ONCE(isight->pcm_running))
+ if (!READ_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
@@ -293,7 +293,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
return err;
- ACCESS_ONCE(isight->pcm_active) = true;
+ WRITE_ONCE(isight->pcm_active, true);
return 0;
}
@@ -331,7 +331,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
- ACCESS_ONCE(isight->pcm_active) = false;
+ WRITE_ONCE(isight->pcm_active, false);
mutex_lock(&isight->mutex);
isight_stop_streaming(isight);
@@ -424,10 +424,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- ACCESS_ONCE(isight->pcm_running) = true;
+ WRITE_ONCE(isight->pcm_running, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
- ACCESS_ONCE(isight->pcm_running) = false;
+ WRITE_ONCE(isight->pcm_running, false);
break;
default:
return -EINVAL;
@@ -439,7 +439,7 @@ static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
- return ACCESS_ONCE(isight->buffer_pointer);
+ return READ_ONCE(isight->buffer_pointer);
}
static int isight_create_pcm(struct isight *isight)
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index 96f0091144bb..f0555a24d90e 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -310,7 +310,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
if (p->midi_ports)
read_midi_messages(s, buffer, data_blocks);
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (data_blocks > 0 && pcm)
read_pcm_s32(s, pcm->runtime, buffer, data_blocks);
@@ -374,7 +374,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
if (p->midi_ports)
write_midi_messages(s, buffer, data_blocks);
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm)
write_pcm_s32(s, pcm->runtime, buffer, data_blocks);
else
diff --git a/sound/firewire/oxfw/oxfw-scs1x.c b/sound/firewire/oxfw/oxfw-scs1x.c
index 02d595665898..f33497cdc706 100644
--- a/sound/firewire/oxfw/oxfw-scs1x.c
+++ b/sound/firewire/oxfw/oxfw-scs1x.c
@@ -112,7 +112,7 @@ static void handle_hss(struct fw_card *card, struct fw_request *request,
}
if (length >= 1) {
- stream = ACCESS_ONCE(scs->input);
+ stream = READ_ONCE(scs->input);
if (stream)
midi_input_packet(scs, stream, data, length);
}
@@ -183,7 +183,7 @@ static void scs_output_work(struct work_struct *work)
if (scs->transaction_running)
return;
- stream = ACCESS_ONCE(scs->output);
+ stream = READ_ONCE(scs->output);
if (!stream || scs->error) {
scs->output_idle = true;
wake_up(&scs->idle_wait);
@@ -291,9 +291,9 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *stream, int up)
if (up) {
scs->input_escape_count = 0;
- ACCESS_ONCE(scs->input) = stream;
+ WRITE_ONCE(scs->input, stream);
} else {
- ACCESS_ONCE(scs->input) = NULL;
+ WRITE_ONCE(scs->input, NULL);
}
}
@@ -319,10 +319,10 @@ static void midi_playback_trigger(struct snd_rawmidi_substream *stream, int up)
scs->transaction_bytes = 0;
scs->error = false;
- ACCESS_ONCE(scs->output) = stream;
+ WRITE_ONCE(scs->output, stream);
schedule_work(&scs->work);
} else {
- ACCESS_ONCE(scs->output) = NULL;
+ WRITE_ONCE(scs->output, NULL);
}
}
static void midi_playback_drain(struct snd_rawmidi_substream *stream)
diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c
index 6aff1fc1c72d..ab482423c165 100644
--- a/sound/firewire/tascam/amdtp-tascam.c
+++ b/sound/firewire/tascam/amdtp-tascam.c
@@ -124,7 +124,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
{
struct snd_pcm_substream *pcm;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (data_blocks > 0 && pcm)
read_pcm_s32(s, pcm, buffer, data_blocks);
@@ -143,7 +143,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
/* This field is not used. */
*syt = 0x0000;
- pcm = ACCESS_ONCE(s->pcm);
+ pcm = READ_ONCE(s->pcm);
if (pcm)
write_pcm_s32(s, pcm, buffer, data_blocks);
else
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
index 8967c52f5032..2ad692dd4b13 100644
--- a/sound/firewire/tascam/tascam-transaction.c
+++ b/sound/firewire/tascam/tascam-transaction.c
@@ -148,7 +148,7 @@ static void async_midi_port_callback(struct fw_card *card, int rcode,
void *callback_data)
{
struct snd_fw_async_midi_port *port = callback_data;
- struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
+ struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
/* This port is closed. */
if (substream == NULL)
@@ -173,7 +173,7 @@ static void midi_port_work(struct work_struct *work)
{
struct snd_fw_async_midi_port *port =
container_of(work, struct snd_fw_async_midi_port, work);
- struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
+ struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
int generation;
/* Under transacting or error state. */
@@ -282,7 +282,7 @@ static void handle_midi_tx(struct fw_card *card, struct fw_request *request,
bytes = 3;
}
- substream = ACCESS_ONCE(tscm->tx_midi_substreams[port]);
+ substream = READ_ONCE(tscm->tx_midi_substreams[port]);
if (substream != NULL)
snd_rawmidi_receive(substream, b + 1, bytes);
}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index f6d2985b2520..560ec0986e1a 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -319,7 +319,8 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
break;
default:
- dev_dbg(bus->dev, "Unknown capability %d\n", cur_cap);
+ dev_err(bus->dev, "Unknown capability %d\n", cur_cap);
+ cur_cap = 0;
break;
}
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 19deb306facb..06f845e293cb 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -87,7 +87,7 @@ int snd_hdac_device_init(struct hdac_device *codec, struct hdac_bus *bus,
fg = codec->afg ? codec->afg : codec->mfg;
- err = snd_hdac_refresh_widgets(codec);
+ err = snd_hdac_refresh_widgets(codec, false);
if (err < 0)
goto error;
@@ -388,11 +388,12 @@ static void setup_fg_nodes(struct hdac_device *codec)
/**
* snd_hdac_refresh_widgets - Reset the widget start/end nodes
* @codec: the codec object
+ * @sysfs: re-initialize sysfs tree, too
*/
-int snd_hdac_refresh_widgets(struct hdac_device *codec)
+int snd_hdac_refresh_widgets(struct hdac_device *codec, bool sysfs)
{
hda_nid_t start_nid;
- int nums;
+ int nums, err;
nums = snd_hdac_get_sub_nodes(codec, codec->afg, &start_nid);
if (!start_nid || nums <= 0 || nums >= 0xff) {
@@ -401,6 +402,12 @@ int snd_hdac_refresh_widgets(struct hdac_device *codec)
return -EINVAL;
}
+ if (sysfs) {
+ err = hda_widget_sysfs_reinit(codec, start_nid, nums);
+ if (err < 0)
+ return err;
+ }
+
codec->num_nodes = nums;
codec->start_nid = start_nid;
codec->end_nid = start_nid + nums;
@@ -408,36 +415,6 @@ int snd_hdac_refresh_widgets(struct hdac_device *codec)
}
EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets);
-/**
- * snd_hdac_refresh_widget_sysfs - Reset the codec widgets and reinit the
- * codec sysfs
- * @codec: the codec object
- *
- * first we need to remove sysfs, then refresh widgets and lastly
- * recreate it
- */
-int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec)
-{
- int ret;
-
- if (device_is_registered(&codec->dev))
- hda_widget_sysfs_exit(codec);
- ret = snd_hdac_refresh_widgets(codec);
- if (ret) {
- dev_err(&codec->dev, "failed to refresh widget: %d\n", ret);
- return ret;
- }
- if (device_is_registered(&codec->dev)) {
- ret = hda_widget_sysfs_init(codec);
- if (ret) {
- dev_err(&codec->dev, "failed to init sysfs: %d\n", ret);
- return ret;
- }
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_hdac_refresh_widget_sysfs);
-
/* return CONNLIST_LEN parameter of the given widget */
static unsigned int get_num_conns(struct hdac_device *codec, hda_nid_t nid)
{
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
index 3c2d45ee6ab8..fb2aa344981e 100644
--- a/sound/hda/hdac_sysfs.c
+++ b/sound/hda/hdac_sysfs.c
@@ -415,3 +415,50 @@ void hda_widget_sysfs_exit(struct hdac_device *codec)
{
widget_tree_free(codec);
}
+
+int hda_widget_sysfs_reinit(struct hdac_device *codec,
+ hda_nid_t start_nid, int num_nodes)
+{
+ struct hdac_widget_tree *tree;
+ hda_nid_t end_nid = start_nid + num_nodes;
+ hda_nid_t nid;
+ int i;
+
+ if (!codec->widgets)
+ return hda_widget_sysfs_init(codec);
+
+ tree = kmemdup(codec->widgets, sizeof(*tree), GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
+ tree->nodes = kcalloc(num_nodes + 1, sizeof(*tree->nodes), GFP_KERNEL);
+ if (!tree->nodes) {
+ kfree(tree);
+ return -ENOMEM;
+ }
+
+ /* prune non-existing nodes */
+ for (i = 0, nid = codec->start_nid; i < codec->num_nodes; i++, nid++) {
+ if (nid < start_nid || nid >= end_nid)
+ free_widget_node(codec->widgets->nodes[i],
+ &widget_node_group);
+ }
+
+ /* add new nodes */
+ for (i = 0, nid = start_nid; i < num_nodes; i++, nid++) {
+ if (nid < codec->start_nid || nid >= codec->end_nid)
+ add_widget_node(tree->root, nid, &widget_node_group,
+ &tree->nodes[i]);
+ else
+ tree->nodes[i] =
+ codec->widgets->nodes[nid - codec->start_nid];
+ }
+
+ /* replace with the new tree */
+ kfree(codec->widgets->nodes);
+ kfree(codec->widgets);
+ codec->widgets = tree;
+
+ kobject_uevent(tree->root, KOBJ_CHANGE);
+ return 0;
+}
diff --git a/sound/hda/local.h b/sound/hda/local.h
index 7258fa8ce268..877631e39373 100644
--- a/sound/hda/local.h
+++ b/sound/hda/local.h
@@ -29,6 +29,8 @@ static inline unsigned int get_wcaps_channels(u32 wcaps)
extern const struct attribute_group *hdac_dev_attr_groups[];
int hda_widget_sysfs_init(struct hdac_device *codec);
+int hda_widget_sysfs_reinit(struct hdac_device *codec, hda_nid_t start_nid,
+ int num_nodes);
void hda_widget_sysfs_exit(struct hdac_device *codec);
#endif /* __HDAC_LOCAL_H */
diff --git a/sound/i2c/other/ak4117.c b/sound/i2c/other/ak4117.c
index 3ab099fb8c15..b923342cadf4 100644
--- a/sound/i2c/other/ak4117.c
+++ b/sound/i2c/other/ak4117.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
#define AK4117_ADDR 0x00 /* fixed address */
-static void snd_ak4117_timer(unsigned long data);
+static void snd_ak4117_timer(struct timer_list *t);
static void reg_write(struct ak4117 *ak4117, unsigned char reg, unsigned char val)
{
@@ -91,7 +91,7 @@ int snd_ak4117_create(struct snd_card *card, ak4117_read_t *read, ak4117_write_t
chip->read = read;
chip->write = write;
chip->private_data = private_data;
- setup_timer(&chip->timer, snd_ak4117_timer, (unsigned long)chip);
+ timer_setup(&chip->timer, snd_ak4117_timer, 0);
for (reg = 0; reg < 5; reg++)
chip->regmap[reg] = pgm[reg];
@@ -529,9 +529,9 @@ int snd_ak4117_check_rate_and_errors(struct ak4117 *ak4117, unsigned int flags)
return res;
}
-static void snd_ak4117_timer(unsigned long data)
+static void snd_ak4117_timer(struct timer_list *t)
{
- struct ak4117 *chip = (struct ak4117 *)data;
+ struct ak4117 *chip = from_timer(chip, t, timer);
if (chip->init)
return;
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index 8f34551abd8d..bc5af71d3bdb 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -193,9 +193,9 @@ static inline int emu8k_get_curpos(struct snd_emu8k_pcm *rec, int ch)
* timer interrupt handler
* check the current position and update the period if necessary.
*/
-static void emu8k_pcm_timer_func(unsigned long data)
+static void emu8k_pcm_timer_func(struct timer_list *t)
{
- struct snd_emu8k_pcm *rec = (struct snd_emu8k_pcm *)data;
+ struct snd_emu8k_pcm *rec = from_timer(rec, t, timer);
int ptr, delta;
spin_lock(&rec->timer_lock);
@@ -241,7 +241,7 @@ static int emu8k_pcm_open(struct snd_pcm_substream *subs)
runtime->private_data = rec;
spin_lock_init(&rec->timer_lock);
- setup_timer(&rec->timer, emu8k_pcm_timer_func, (unsigned long)rec);
+ timer_setup(&rec->timer, emu8k_pcm_timer_func, 0);
runtime->hw = emu8k_pcm_hw;
runtime->hw.buffer_bytes_max = emu->mem_size - LOOP_BLANK_SIZE * 3;
diff --git a/sound/isa/sb/sb8_midi.c b/sound/isa/sb/sb8_midi.c
index bd672abb4854..4affdcb78f72 100644
--- a/sound/isa/sb/sb8_midi.c
+++ b/sound/isa/sb/sb8_midi.c
@@ -138,6 +138,7 @@ static int snd_sb8dsp_midi_output_close(struct snd_rawmidi_substream *substream)
struct snd_sb *chip;
chip = substream->rmidi->private_data;
+ del_timer_sync(&chip->midi_timer);
spin_lock_irqsave(&chip->open_lock, flags);
chip->open &= ~(SB_OPEN_MIDI_OUTPUT | SB_OPEN_MIDI_OUTPUT_TRIGGER);
chip->midi_substream_output = NULL;
@@ -209,10 +210,10 @@ static void snd_sb8dsp_midi_output_write(struct snd_rawmidi_substream *substream
}
}
-static void snd_sb8dsp_midi_output_timer(unsigned long data)
+static void snd_sb8dsp_midi_output_timer(struct timer_list *t)
{
- struct snd_rawmidi_substream *substream = (struct snd_rawmidi_substream *) data;
- struct snd_sb * chip = substream->rmidi->private_data;
+ struct snd_sb *chip = from_timer(chip, t, midi_timer);
+ struct snd_rawmidi_substream *substream = chip->midi_substream_output;
unsigned long flags;
spin_lock_irqsave(&chip->open_lock, flags);
@@ -230,9 +231,6 @@ static void snd_sb8dsp_midi_output_trigger(struct snd_rawmidi_substream *substre
spin_lock_irqsave(&chip->open_lock, flags);
if (up) {
if (!(chip->open & SB_OPEN_MIDI_OUTPUT_TRIGGER)) {
- setup_timer(&chip->midi_timer,
- snd_sb8dsp_midi_output_timer,
- (unsigned long) substream);
mod_timer(&chip->midi_timer, 1 + jiffies);
chip->open |= SB_OPEN_MIDI_OUTPUT_TRIGGER;
}
@@ -275,6 +273,7 @@ int snd_sb8dsp_midi(struct snd_sb *chip, int device)
if (chip->hardware >= SB_HW_20)
rmidi->info_flags |= SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = chip;
+ timer_setup(&chip->midi_timer, snd_sb8dsp_midi_output_timer, 0);
chip->rmidi = rmidi;
return 0;
}
diff --git a/sound/isa/wavefront/wavefront_midi.c b/sound/isa/wavefront/wavefront_midi.c
index 2aa05f3aaa38..556b14738970 100644
--- a/sound/isa/wavefront/wavefront_midi.c
+++ b/sound/isa/wavefront/wavefront_midi.c
@@ -349,10 +349,10 @@ static void snd_wavefront_midi_input_trigger(struct snd_rawmidi_substream *subst
spin_unlock_irqrestore (&midi->virtual, flags);
}
-static void snd_wavefront_midi_output_timer(unsigned long data)
+static void snd_wavefront_midi_output_timer(struct timer_list *t)
{
- snd_wavefront_card_t *card = (snd_wavefront_card_t *)data;
- snd_wavefront_midi_t *midi = &card->wavefront.midi;
+ snd_wavefront_midi_t *midi = from_timer(midi, t, timer);
+ snd_wavefront_card_t *card = midi->timer_card;
unsigned long flags;
spin_lock_irqsave (&midi->virtual, flags);
@@ -383,9 +383,9 @@ static void snd_wavefront_midi_output_trigger(struct snd_rawmidi_substream *subs
if (up) {
if ((midi->mode[mpu] & MPU401_MODE_OUTPUT_TRIGGER) == 0) {
if (!midi->istimer) {
- setup_timer(&midi->timer,
+ timer_setup(&midi->timer,
snd_wavefront_midi_output_timer,
- (unsigned long) substream->rmidi->card->private_data);
+ 0);
mod_timer(&midi->timer, 1 + jiffies);
}
midi->istimer++;
diff --git a/sound/oss/CHANGELOG b/sound/oss/CHANGELOG
deleted file mode 100644
index 8706cd66ca1f..000000000000
--- a/sound/oss/CHANGELOG
+++ /dev/null
@@ -1,369 +0,0 @@
-Note these changes relate to Hannu's code and don't include the changes
-made outside of this for modularising the sound
-
-Changelog for version 3.8o
---------------------------
-
-Since 3.8h
-- Included support for OPL3-SA1 and SoftOSS
-
-Since 3.8
-- Fixed SNDCTL_DSP_GETOSPACE
-- Compatibility fixes for Linux 2.1.47
-
-Since 3.8-beta21
-- Fixed all known bugs (I think).
-
-Since 3.8-beta8
-- Lot of fixes to audio playback code in dmabuf.c
-
-Since 3.8-beta6
-- Fixed the famous Quake delay bug.
-
-Since 3.8-beta5
-- Fixed many bugs in audio playback.
-
-Since 3.8-beta4
-- Just minor changes.
-
-Since 3.8-beta1
-- Major rewrite of audio playback handling.
-- Added AWE32 support by Takashi Iwai (in ./lowlevel/).
-
-Since 3.7-beta#
-- Passing of ioctl() parameters between soundcard.c and other modules has been
-changed so that arg always points to kernel space.
-- Some bugfixes.
-
-Since 3.7-beta5
-- Disabled MIDI input with GUS PnP (Interwave). There seems to be constant
-stream of received 0x00 bytes when the MIDI receiver is enabled.
-
-Since 3.5
-- Changes almost everywhere.
-- Support for OPTi 82C924-based sound cards.
-
-Since 3.5.4-beta8
-- Fixed a bug in handling of non-fragment sized writes in 16 bit/stereo mode
- with GUS.
-- Limited minimum fragment size with some audio devices (GUS=512 and
- SB=32). These devices require more time to "recover" from processing
- of each fragment.
-
-Since 3.5.4-beta6/7
-- There seems to be problems in the OPTi 82C930 so cards based on this
- chip don't necessarily work yet. There are problems in detecting the
- MIDI interface. Also mixer volumes may be seriously wrong on some systems.
- You can safely use this driver version with C930 if it looks to work.
- However please don't complain if you have problems with it. C930 support
- should be fixed in future releases.
-- Got initialization of GUS PnP to work. With this version GUS PnP should
- work in GUS compatible mode after initialization using isapnptools.
-- Fixed a bug in handling of full duplex cards in write only mode. This has
- been causing "audio device opening" errors with RealAudio player.
-
-Since 3.5.4.beta5
-- Changes to OPTi 82C930 driver.
-- Major changes to the Soundscape driver. The driver requires now just one
- DMA channel. The extra audio/dsp device (the "Not functional" one) used
- for code download in the earlier versions has been eliminated. There is now
- just one /dev/dsp# device which is used both for code download and audio.
-
-Since 3.5.4.beta4
-- Minor changes.
-
-Since 3.5.4-beta2
-- Fixed silent playback with ESS 688/1688.
-- Got SB16 to work without the 16 bit DMA channel (only the 8 bit one
- is required for 8 and 16 bit modes).
-- Added the "lowlevel" subdirectory for additional low level drivers that
- are not part of USS core. See lowlevel/README for more info.
-- Included support for ACI mixer (by Markus Kuhn). ACI is a mixer used in
- miroPCM sound cards. See lowlevel/aci.readme for more info.
-- Support for Aztech Washington chipset (AZT2316 ASIC).
-
-Since 3.5.4-beta1
-- Reduced clicking with AD1848.
-- Support for OPTi 82C930. Only half duplex at this time. 16 bit playback
- is sometimes just white noise (occurs randomly).
-
-Since 3.5.2
-- Major changes to the SB/Jazz16/ESS driver (most parts rewritten).
- The most noticeable new feature is support for multiple SB cards at the same
- time.
-- Renamed sb16_midi.c to uart401.c. Also modified it to work also with
- other MPU401 UART compatible cards than SB16/ESS/Jazz.
-- Some changes which reduce clicking in audio playback.
-- Copying policy is now GPL.
-
-Since 3.5.1
-- TB Maui initialization support
-Since 3.5
-- Improved handling of playback underrun situations.
-
-Since 3.5-beta10
-- Bug fixing
-
-Since 3.5-beta9
-- Fixed for compatibility with Linux 1.3.70 and later.
-- Changed boot time passing of 16 bit DMA channel number to SB driver.
-
-Since 3.5-beta8
-- Minor changes
-
-Since 3.5-beta7
-- enhancements to configure program (by Jeff Tranter):
- - prompts are in same format as 1.3.x Linux kernel config program
- - on-line help for each question
- - fixed some compile warnings detected by gcc/g++ -Wall
- - minor grammatical changes to prompts
-
-Since 3.5-beta6
-- Fixed bugs in mmap() support.
-- Minor changes to Maui driver.
-
-Since 3.5-beta5
-- Fixed crash after recording with ESS688. It's generally a good
- idea to stop inbound DMA transfers before freeing the memory
- buffer.
-- Fixed handling of AD1845 codec (for example Shuttle Sound System).
-- Few other fixes.
-
-Since 3.5-beta4
-- Fixed bug in handling of uninitialized instruments with GUS.
-
-Since 3.5-beta3
-- Few changes which decrease popping at end/beginning of audio playback.
-
-Since 3.5-beta2
-- Removed MAD16+CS4231 hack made in previous version since it didn't
- help.
-- Fixed the above bug in proper way and in proper place. Many thanks
- to James Hightower.
-
-Since 3.5-beta1
-- Bug fixes.
-- Full duplex audio with MAD16+CS4231 may work now. The driver configures
- SB DMA of MAD16 so that it doesn't conflict with codec's DMA channels.
- The side effect is that all 8 bit DMA channels (0,1,3) are populated in
- duplex mode.
-
-Since 3.5-alpha9
-- Bug fixes (mostly in Jazz16 and ESS1688/688 supports).
-- Temporarily disabled recording with ESS1688/688 since it causes crash.
-- Changed audio buffer partitioning algorithm so that it selects
- smaller fragment size than earlier. This improves real time capabilities
- of the driver and makes recording to disk to work better. Unfortunately
- this change breaks some programs which assume that fragments cannot be
- shorter than 4096 bytes.
-
-Since 3.5-alpha8
-- Bug fixes
-
-Since 3.5-alpha7
-- Linux kernel compatible configuration (_EXPERIMENTAL_). Enable
- using command "cd /linux/drivers/sound;make script" and then
- just run kernel's make config normally.
-- Minor fixes to the SB support. Hopefully the driver works with
- all SB models now.
-- Added support for ESS ES1688 "AudioDrive" based cards.
-
-Since 3.5-alpha6
-- SB Pro and SB16 supports are no longer separately selectable options.
- Enabling SB enables them too.
-- Changed all #ifndef EXCLUDE_xx stuff to #ifdef CONFIG_xx. Modified
-configure to handle this.
-- Removed initialization messages from the
-modularized version. They can be enabled by using init_trace=1 in
-the insmod command line (insmod sound init_trace=1).
-- More AIX stuff.
-- Added support for synchronizing dsp/audio devices with /dev/sequencer.
-- mmap() support for dsp/audio devices.
-
-Since 3.5-alpha5
-- AIX port.
-- Changed some xxx_PATCH macros in soundcard.h to work with
- big endian machines.
-
-Since 3.5-alpha4
-- Removed the 'setfx' stuff from the version distributed with kernel
- sources. Running 'setfx' is required again.
-
-Since 3.5-alpha3
-- Moved stuff from the 'setfx' program to the AudioTrix Pro driver.
-
-Since 3.5-alpha2
-- Modifications to makefile and configure.c. Unnecessary sources
- are no longer compiled. Newly created local.h is also copied to
- /etc/soundconf. "make oldconfig" reads /etc/soundconf and produces
- new local.h which is compatible with current version of the driver.
-- Some fixes to the SB16 support.
-- Fixed random protection fault in gus_wave.c
-
-Since 3.5-alpha1
-- Modified to work with Linux-1.3.33 and later
-- Some minor changes
-
-Since 3.0.2
-- Support for CS4232 based PnP cards (AcerMagic S23 etc).
-- Full duplex support for some CS4231, CS4232 and AD1845 based cards
-(GUS MAX, AudioTrix Pro, AcerMagic S23 and many MAD16/Mozart cards
-having a codec mentioned above).
-- Almost fully rewritten loadable modules support.
-- Fixed some bugs.
-- Huge amount of testing (more testing is still required).
-- mmap() support (works with some cards). Requires much more testing.
-- Sample/patch/program loading for TB Maui/Tropez. No initialization
-since TB doesn't allow me to release that code.
-- Using CS4231 compatible codecs as timer for /dev/music.
-
-Since 3.0.1
-- Added allocation of I/O ports, DMA channels and interrupts
-to the initialization code. This may break modules support since
-the driver may not free some resources on unload. Should be fixed soon.
-
-Since 3.0
-- Some important bug fixes.
-- select() for /dev/dsp and /dev/audio (Linux only).
-(To use select() with read, you have to call read() to start
-the recording. Calling write() kills recording immediately so
-use select() carefully when you are writing a half duplex app.
-Full duplex mode is not implemented yet.) Select works also with
-/dev/sequencer and /dev/music. Maybe with /dev/midi## too.
-
-Since 3.0-beta2
-- Minor fixes.
-- Added Readme.cards
-
-Since 3.0-beta1
-- Minor fixes to the modules support.
-- Eliminated call to sb_free_irq() in ad1848.c
-- Rewritten MAD16&Mozart support (not tested with MAD16 Pro).
-- Fix to DMA initialization of PSS cards.
-- Some fixes to ad1848/cs42xx mixer support (GUS MAX, MSS, etc.)
-- Fixed some bugs in the PSS driver which caused I/O errors with
- the MSS mode (/dev/dsp).
-
-Since 3.0-950506
-- Recording with GUS MAX fixed. It works when the driver is configured
- to use two DMA channels with GUS MAX (16 bit ones recommended).
-
-Since 3.0-94xxxx
-- Too many changes
-
-Since 3.0-940818
-- Fixes for Linux 1.1.4x.
-- Disables Disney Sound System with SG NX Pro 16 (less noise).
-
-Since 2.90-2
-- Fixes to soundcard.h
-- Non blocking mode to /dev/sequencer
-- Experimental detection code for Ensoniq Soundscape.
-
-Since 2.90
-- Minor and major bug fixes
-
-Since pre-3.0-940712
-- GUS MAX support
-- Partially working MSS/WSS support (could work with some cards).
-- Hardware u-Law and A-Law support with AD1848/CS4248 and CS4231 codecs
- (GUS MAX, GUS16, WSS etc). Hardware ADPCM is possible with GUS16 and
- GUS MAX, but it doesn't work yet.
-Since pre-3.0-940426
-- AD1848/CS4248/CS4231 codec support (MSS, GUS MAX, Aztec, Orchid etc).
-This codec chip is used in various sound cards. This version is developed
-for the 16 bit daughtercard of GUS. It should work with other cards also
-if the following requirements are met:
- - The I/O, IRQ and DMA settings are jumper selectable or
- the card is initialized by booting DOS before booting Linux (etc.).
- - You add the IO, IRQ and DMA settings manually to the local.h.
- (Just define GUS16_BASE, GUS16_IRQ and GUS16_DMA). Note that
- the base address bust be the base address of the codec chip not the
- card itself. For the GUS16 these are the same but most MSS compatible
- cards have the codec located at card_base+4.
-- Some minor changes
-
-Since 2.5 (******* MAJOR REWRITE ***********)
-
-This version is based on v2.3. I have tried to maintain two versions
-together so that this one should have the same features than v2.5.
-Something may still be missing. If you notice such things, please let me
-know.
-
-The Readme.v30 contains more details.
-
-- /dev/midi## devices.
-- /dev/sequencer2
-
-Since 2.5-beta2
-- Some fine tuning to the GUS v3.7 mixer code.
-- Fixed speed limits for the plain SB (1.0 to 2.0).
-
-Since 2.5-beta
-- Fixed OPL-3 detection with SB. Caused problems with PAS16.
-- GUS v3.7 mixer support.
-
-Since 2.4
-- Mixer support for Sound Galaxy NX Pro (define __SGNXPRO__ on your local.h).
-- Fixed truncated sound on /dev/dsp when the device is closed.
-- Linear volume mode for GUS
-- Pitch bends larger than +/- 2 octaves.
-- MIDI recording for SB and SB Pro. (Untested).
-- Some other fixes.
-- SB16 MIDI and DSP drivers only initialized if SB16 actually installed.
-- Implemented better detection for OPL-3. This should be useful if you
- have an old SB Pro (the non-OPL-3 one) or a SB 2.0 clone which has a OPL-3.
-- SVR4.2 support by Ian Hartas. Initial ALPHA TEST version (untested).
-
-Since 2.3b
-- Fixed bug which made it impossible to make long recordings to disk.
- Recording was not restarted after a buffer overflow situation.
-- Limited mixer support for GUS.
-- Numerous improvements to the GUS driver by Andrew Robinson. Including
- some click removal etc.
-
-Since 2.3
-- Fixed some minor bugs in the SB16 driver.
-
-Since 2.2b
-- Full SB16 DSP support. 8/16 bit, mono/stereo
-- The SCO and FreeBSD versions should be in sync now. There are some
- problems with SB16 and GUS in the FreeBSD versions.
- The DMA buffer allocation of the SCO version has been polished but
- there could still be some problems. At least it hogs memory.
- The DMA channel
- configuration method used in the SCO/System is a hack.
-- Support for the MPU emulation of the SB16.
-- Some big arrays are now allocated boot time. This makes the BSS segment
- smaller which makes it possible to use the full driver with
- NetBSD. These arrays are not allocated if no suitable sound card is available.
-- Fixed a bug in the compute_and_set_volume in gus_wave.c
-- Fixed the too fast mono playback problem of SB Pro and PAS16.
-
-Since 2.2
-- Stereo recording for SB Pro. Somehow it was missing and nobody
- had noticed it earlier.
-- Minor polishing.
-- Interpreting of boot time arguments (sound=) for Linux.
-- Breakup of sb_dsp.c. Parts of the code has been moved to
- sb_mixer.c and sb_midi.c
-
-Since 2.1
-- Preliminary support for SB16.
- - The SB16 mixer is supported in its native mode.
- - Digitized voice capability up to 44.1 kHz/8 bit/mono
- (16 bit and stereo support coming in the next release).
-- Fixed some bugs in the digitized voice driver for PAS16.
-- Proper initialization of the SB emulation of latest PAS16 models.
-
-- Significantly improved /dev/dsp and /dev/audio support.
- - Now supports half duplex mode. It's now possible to record and
- playback without closing and reopening the device.
- - It's possible to use smaller buffers than earlier. There is a new
- ioctl(fd, SNDCTL_DSP_SUBDIVIDE, &n) where n should be 1, 2 or 4.
- This call instructs the driver to use smaller buffers. The default
- buffer size (0.5 to 1.0 seconds) is divided by n. Should be called
- immediately after opening the device.
-
-Since 2.0
-Just cosmetic changes.
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
deleted file mode 100644
index 4033fe58f0cf..000000000000
--- a/sound/oss/Kconfig
+++ /dev/null
@@ -1,533 +0,0 @@
-# 18 Apr 1998, Michael Elizabeth Chastain, <mailto:mec@shout.net>
-# More hacking for modularisation.
-#
-# Prompt user for primary drivers.
-
-config SOUND_BCM_CS4297A
- tristate "Crystal Sound CS4297a (for Swarm)"
- depends on SIBYTE_SWARM
- help
- The BCM91250A has a Crystal CS4297a on synchronous serial
- port B (in addition to the DB-9 serial port). Say Y or M
- here to enable the sound chip instead of the UART. Also
- note that CONFIG_KGDB should not be enabled at the same
- time, since it also attempts to use this UART port.
-
-config SOUND_MSNDCLAS
- tristate "Support for Turtle Beach MultiSound Classic, Tahiti, Monterey"
- depends on (m || !STANDALONE) && ISA
- help
- Say M here if you have a Turtle Beach MultiSound Classic, Tahiti or
- Monterey (not for the Pinnacle or Fiji).
-
- See <file:Documentation/sound/oss/MultiSound> for important information
- about this driver. Note that it has been discontinued, but the
- Voyetra Turtle Beach knowledge base entry for it is still available
- at <http://www.turtlebeach.com/site/kb_ftp/790.asp>.
-
-comment "Compiled-in MSND Classic support requires firmware during compilation."
- depends on SOUND_PRIME && SOUND_MSNDCLAS=y
-
-config MSNDCLAS_HAVE_BOOT
- bool
- depends on SOUND_MSNDCLAS=y && !STANDALONE
- default y
-
-config MSNDCLAS_INIT_FILE
- string "Full pathname of MSNDINIT.BIN firmware file"
- depends on SOUND_MSNDCLAS
- default "/etc/sound/msndinit.bin"
- help
- The MultiSound cards have two firmware files which are required for
- operation, and are not currently included. These files can be
- obtained from Turtle Beach. See
- <file:Documentation/sound/oss/MultiSound> for information on how to
- obtain this.
-
-config MSNDCLAS_PERM_FILE
- string "Full pathname of MSNDPERM.BIN firmware file"
- depends on SOUND_MSNDCLAS
- default "/etc/sound/msndperm.bin"
- help
- The MultiSound cards have two firmware files which are required for
- operation, and are not currently included. These files can be
- obtained from Turtle Beach. See
- <file:Documentation/sound/oss/MultiSound> for information on how to
- obtain this.
-
-config MSNDCLAS_IRQ
- int "MSND Classic IRQ 5, 7, 9, 10, 11, 12"
- depends on SOUND_MSNDCLAS=y
- default "5"
- help
- Interrupt Request line for the MultiSound Classic and related cards.
-
-config MSNDCLAS_MEM
- hex "MSND Classic memory B0000, C8000, D0000, D8000, E0000, E8000"
- depends on SOUND_MSNDCLAS=y
- default "D0000"
- help
- Memory-mapped I/O base address for the MultiSound Classic and
- related cards.
-
-config MSNDCLAS_IO
- hex "MSND Classic I/O 210, 220, 230, 240, 250, 260, 290, 3E0"
- depends on SOUND_MSNDCLAS=y
- default "290"
- help
- I/O port address for the MultiSound Classic and related cards.
-
-config SOUND_MSNDPIN
- tristate "Support for Turtle Beach MultiSound Pinnacle, Fiji"
- depends on (m || !STANDALONE) && ISA
- help
- Say M here if you have a Turtle Beach MultiSound Pinnacle or Fiji.
- See <file:Documentation/sound/oss/MultiSound> for important information
- about this driver. Note that it has been discontinued, but the
- Voyetra Turtle Beach knowledge base entry for it is still available
- at <http://www.turtlebeach.com/site/kb_ftp/600.asp>.
-
-comment "Compiled-in MSND Pinnacle support requires firmware during compilation."
- depends on SOUND_PRIME && SOUND_MSNDPIN=y
-
-config MSNDPIN_HAVE_BOOT
- bool
- depends on SOUND_MSNDPIN=y
- default y
-
-config MSNDPIN_INIT_FILE
- string "Full pathname of PNDSPINI.BIN firmware file"
- depends on SOUND_MSNDPIN
- default "/etc/sound/pndspini.bin"
- help
- The MultiSound cards have two firmware files which are required
- for operation, and are not currently included. These files can be
- obtained from Turtle Beach. See
- <file:Documentation/sound/oss/MultiSound> for information on how to
- obtain this.
-
-config MSNDPIN_PERM_FILE
- string "Full pathname of PNDSPERM.BIN firmware file"
- depends on SOUND_MSNDPIN
- default "/etc/sound/pndsperm.bin"
- help
- The MultiSound cards have two firmware files which are required for
- operation, and are not currently included. These files can be
- obtained from Turtle Beach. See
- <file:Documentation/sound/oss/MultiSound> for information on how to
- obtain this.
-
-config MSNDPIN_IRQ
- int "MSND Pinnacle IRQ 5, 7, 9, 10, 11, 12"
- depends on SOUND_MSNDPIN=y
- default "5"
- help
- Interrupt request line for the primary synthesizer on MultiSound
- Pinnacle and Fiji sound cards.
-
-config MSNDPIN_MEM
- hex "MSND Pinnacle memory B0000, C8000, D0000, D8000, E0000, E8000"
- depends on SOUND_MSNDPIN=y
- default "D0000"
- help
- Memory-mapped I/O base address for the primary synthesizer on
- MultiSound Pinnacle and Fiji sound cards.
-
-config MSNDPIN_IO
- hex "MSND Pinnacle I/O 210, 220, 230, 240, 250, 260, 290, 3E0"
- depends on SOUND_MSNDPIN=y
- default "290"
- help
- Memory-mapped I/O base address for the primary synthesizer on
- MultiSound Pinnacle and Fiji sound cards.
-
-config MSNDPIN_DIGITAL
- bool "MSND Pinnacle has S/PDIF I/O"
- depends on SOUND_MSNDPIN=y
- help
- If you have the S/PDIF daughter board for the Pinnacle or Fiji,
- answer Y here; otherwise, say N. If you have this, you will be able
- to play and record from the S/PDIF port (digital signal). See
- <file:Documentation/sound/oss/MultiSound> for information on how to make
- use of this capability.
-
-config MSNDPIN_NONPNP
- bool "MSND Pinnacle non-PnP Mode"
- depends on SOUND_MSNDPIN=y
- help
- The Pinnacle and Fiji card resources can be configured either with
- PnP, or through a configuration port. Say Y here if your card is NOT
- in PnP mode. For the Pinnacle, configuration in non-PnP mode allows
- use of the IDE and joystick peripherals on the card as well; these
- do not show up when the card is in PnP mode. Specifying zero for any
- resource of a device will disable the device. If you are running the
- card in PnP mode, you must say N here and use isapnptools to
- configure the card's resources.
-
-comment "MSND Pinnacle DSP section will be configured to above parameters."
- depends on SOUND_MSNDPIN=y && MSNDPIN_NONPNP
-
-config MSNDPIN_CFG
- hex "MSND Pinnacle config port 250,260,270"
- depends on MSNDPIN_NONPNP
- default "250"
- help
- This is the port which the Pinnacle and Fiji uses to configure the
- card's resources when not in PnP mode. If your card is in PnP mode,
- then be sure to say N to the previous option, "MSND Pinnacle Non-PnP
- Mode".
-
-comment "Pinnacle-specific Device Configuration (0 disables)"
- depends on SOUND_MSNDPIN=y && MSNDPIN_NONPNP
-
-config MSNDPIN_MPU_IO
- hex "MSND Pinnacle MPU I/O (e.g. 330)"
- depends on MSNDPIN_NONPNP
- default "0"
- help
- Memory-mapped I/O base address for the Kurzweil daughterboard
- synthesizer on MultiSound Pinnacle and Fiji sound cards.
-
-config MSNDPIN_MPU_IRQ
- int "MSND Pinnacle MPU IRQ (e.g. 9)"
- depends on MSNDPIN_NONPNP
- default "0"
- help
- Interrupt request number for the Kurzweil daughterboard
- synthesizer on MultiSound Pinnacle and Fiji sound cards.
-
-config MSNDPIN_IDE_IO0
- hex "MSND Pinnacle IDE I/O 0 (e.g. 170)"
- depends on MSNDPIN_NONPNP
- default "0"
- help
- CD-ROM drive 0 memory-mapped I/O base address for the MultiSound
- Pinnacle and Fiji sound cards.
-
-config MSNDPIN_IDE_IO1
- hex "MSND Pinnacle IDE I/O 1 (e.g. 376)"
- depends on MSNDPIN_NONPNP
- default "0"
- help
- CD-ROM drive 1 memory-mapped I/O base address for the MultiSound
- Pinnacle and Fiji sound cards.
-
-config MSNDPIN_IDE_IRQ
- int "MSND Pinnacle IDE IRQ (e.g. 15)"
- depends on MSNDPIN_NONPNP
- default "0"
- help
- Interrupt request number for the IDE CD-ROM interface on the
- MultiSound Pinnacle and Fiji sound cards.
-
-config MSNDPIN_JOYSTICK_IO
- hex "MSND Pinnacle joystick I/O (e.g. 200)"
- depends on MSNDPIN_NONPNP
- default "0"
- help
- Memory-mapped I/O base address for the joystick port on MultiSound
- Pinnacle and Fiji sound cards.
-
-config MSND_FIFOSIZE
- int "MSND buffer size (kB)"
- depends on SOUND_MSNDPIN=y || SOUND_MSNDCLAS=y
- default "128"
- help
- Configures the size of each audio buffer, in kilobytes, for
- recording and playing in the MultiSound drivers (both the Classic
- and Pinnacle). Larger values reduce the chance of data overruns at
- the expense of overall latency. If unsure, use the default.
-
-menuconfig SOUND_OSS
- tristate "OSS sound modules"
- depends on ISA_DMA_API && (VIRT_TO_BUS || ARCH_RPC || ARCH_NETWINDER)
- depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
- help
- OSS is the Open Sound System suite of sound card drivers. They make
- sound programming easier since they provide a common API. Say Y or
- M here (the module will be called sound) if you haven't found a
- driver for your sound card above, then pick your driver from the
- list below.
-
-if SOUND_OSS
-
-config SOUND_TRACEINIT
- bool "Verbose initialisation"
- help
- Verbose soundcard initialization -- affects the format of autoprobe
- and initialization messages at boot time.
-
-config SOUND_DMAP
- bool "Persistent DMA buffers"
- ---help---
- Linux can often have problems allocating DMA buffers for ISA sound
- cards on machines with more than 16MB of RAM. This is because ISA
- DMA buffers must exist below the 16MB boundary and it is quite
- possible that a large enough free block in this region cannot be
- found after the machine has been running for a while. If you say Y
- here the DMA buffers (64Kb) will be allocated at boot time and kept
- until the shutdown. This option is only useful if you said Y to
- "OSS sound modules", above. If you said M to "OSS sound modules"
- then you can get the persistent DMA buffer functionality by passing
- the command-line argument "dmabuf=1" to the sound module.
-
- Say Y unless you have 16MB or more RAM or a PCI sound card.
-
-config SOUND_VMIDI
- tristate "Loopback MIDI device support"
- help
- Support for MIDI loopback on port 1 or 2.
-
-config SOUND_TRIX
- tristate "MediaTrix AudioTrix Pro support"
- help
- Answer Y if you have the AudioTriX Pro sound card manufactured
- by MediaTrix.
-
-config TRIX_HAVE_BOOT
- bool "Have TRXPRO.HEX firmware file"
- depends on SOUND_TRIX=y && !STANDALONE
- help
- The MediaTrix AudioTrix Pro has an on-board microcontroller which
- needs to be initialized by downloading the code from the file
- TRXPRO.HEX in the DOS driver directory. If you don't have the
- TRXPRO.HEX file handy you may skip this step. However, the SB and
- MPU-401 modes of AudioTrix Pro will not work without this file!
-
-config TRIX_BOOT_FILE
- string "Full pathname of TRXPRO.HEX firmware file"
- depends on TRIX_HAVE_BOOT
- default "/etc/sound/trxpro.hex"
- help
- Enter the full pathname of your TRXPRO.HEX file, starting from /.
-
-config SOUND_MSS
- tristate "Microsoft Sound System support"
- ---help---
- Again think carefully before answering Y to this question. It's
- safe to answer Y if you have the original Windows Sound System card
- made by Microsoft or Aztech SG 16 Pro (or NX16 Pro). Also you may
- say Y in case your card is NOT among these:
-
- ATI Stereo F/X, AdLib, Audio Excell DSP16, Cardinal DSP16,
- Ensoniq SoundScape (and compatibles made by Reveal and Spea),
- Gravis Ultrasound, Gravis Ultrasound ACE, Gravis Ultrasound Max,
- Gravis Ultrasound with 16 bit option, Logitech Sound Man 16,
- Logitech SoundMan Games, Logitech SoundMan Wave, MAD16 Pro (OPTi
- 82C929), Media Vision Jazz16, MediaTriX AudioTriX Pro, Microsoft
- Windows Sound System (MSS/WSS), Mozart (OAK OTI-601), Orchid
- SW32, Personal Sound System (PSS), Pro Audio Spectrum 16, Pro
- Audio Studio 16, Pro Sonic 16, Roland MPU-401 MIDI interface,
- Sound Blaster 1.0, Sound Blaster 16, Sound Blaster 16ASP, Sound
- Blaster 2.0, Sound Blaster AWE32, Sound Blaster Pro, TI TM4000M
- notebook, ThunderBoard, Turtle Beach Tropez, Yamaha FM
- synthesizers (OPL2, OPL3 and OPL4), 6850 UART MIDI Interface.
-
- For cards having native support in VoxWare, consult the card
- specific instructions in <file:Documentation/sound/oss/README.OSS>.
- Some drivers have their own MSS support and saying Y to this option
- will cause a conflict.
-
- If you compile the driver into the kernel, you have to add
- "ad1848=<io>,<irq>,<dma>,<dma2>[,<type>]" to the kernel command
- line.
-
-config SOUND_MPU401
- tristate "MPU-401 support (NOT for SB16)"
- ---help---
- Be careful with this question. The MPU401 interface is supported by
- all sound cards. However, some natively supported cards have their
- own driver for MPU401. Enabling this MPU401 option with these cards
- will cause a conflict. Also, enabling MPU401 on a system that
- doesn't really have a MPU401 could cause some trouble. If your card
- was in the list of supported cards, look at the card specific
- instructions in the <file:Documentation/sound/oss/README.OSS> file. It
- is safe to answer Y if you have a true MPU401 MIDI interface card.
-
- If you compile the driver into the kernel, you have to add
- "mpu401=<io>,<irq>" to the kernel command line.
-
-config SOUND_PAS
- tristate "ProAudioSpectrum 16 support"
- ---help---
- Answer Y only if you have a Pro Audio Spectrum 16, ProAudio Studio
- 16 or Logitech SoundMan 16 sound card. Answer N if you have some
- other card made by Media Vision or Logitech since those are not
- PAS16 compatible. Please read <file:Documentation/sound/oss/PAS16>.
- It is not necessary to add Sound Blaster support separately; it
- is included in PAS support.
-
- If you compile the driver into the kernel, you have to add
- "pas2=<io>,<irq>,<dma>,<dma2>,<sbio>,<sbirq>,<sbdma>,<sbdma2>
- to the kernel command line.
-
-config PAS_JOYSTICK
- bool "Enable PAS16 joystick port"
- depends on SOUND_PAS=y
- help
- Say Y here to enable the Pro Audio Spectrum 16's auxiliary joystick
- port.
-
-config SOUND_PSS
- tristate "PSS (AD1848, ADSP-2115, ESC614) support"
- help
- Answer Y or M if you have an Orchid SW32, Cardinal DSP16, Beethoven
- ADSP-16 or some other card based on the PSS chipset (AD1848 codec +
- ADSP-2115 DSP chip + Echo ESC614 ASIC CHIP). For more information on
- how to compile it into the kernel or as a module see the file
- <file:Documentation/sound/oss/PSS>.
-
- If you compile the driver into the kernel, you have to add
- "pss=<io>,<mssio>,<mssirq>,<mssdma>,<mpuio>,<mpuirq>" to the kernel
- command line.
-
-config PSS_MIXER
- bool "Enable PSS mixer (Beethoven ADSP-16 and other compatible)"
- depends on SOUND_PSS
- help
- Answer Y for Beethoven ADSP-16. You may try to say Y also for other
- cards if they have master volume, bass, treble, and you can't
- control it under Linux. If you answer N for Beethoven ADSP-16, you
- can't control master volume, bass, treble and synth volume.
-
- If you said M to "PSS support" above, you may enable or disable this
- PSS mixer with the module parameter pss_mixer. For more information
- see the file <file:Documentation/sound/oss/PSS>.
-
-config PSS_HAVE_BOOT
- bool "Have DSPxxx.LD firmware file"
- depends on SOUND_PSS && !STANDALONE
- help
- If you have the DSPxxx.LD file or SYNTH.LD file for you card, say Y
- to include this file. Without this file the synth device (OPL) may
- not work.
-
-config PSS_BOOT_FILE
- string "Full pathname of DSPxxx.LD firmware file"
- depends on PSS_HAVE_BOOT
- default "/etc/sound/dsp001.ld"
- help
- Enter the full pathname of your DSPxxx.LD file or SYNTH.LD file,
- starting from /.
-
-config SOUND_SB
- tristate "100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support"
- ---help---
- Answer Y if you have an original Sound Blaster card made by Creative
- Labs or a 100% hardware compatible clone (like the Thunderboard or
- SM Games). For an unknown card you may answer Y if the card claims
- to be Sound Blaster-compatible.
-
- Please read the file <file:Documentation/sound/oss/Soundblaster>.
-
- You should also say Y here for cards based on the Avance Logic
- ALS-007 and ALS-1X0 chips (read <file:Documentation/sound/oss/ALS>) and
- for cards based on ESS chips (read
- <file:Documentation/sound/oss/ESS1868> and
- <file:Documentation/sound/oss/ESS>). If you have an IBM Mwave
- card, say Y here and read <file:Documentation/sound/oss/mwave>.
-
- If you compile the driver into the kernel and don't want to use
- isapnp, you have to add "sb=<io>,<irq>,<dma>,<dma2>" to the kernel
- command line.
-
- You can say M here to compile this driver as a module; the module is
- called sb.
-
-config SOUND_YM3812
- tristate "Yamaha FM synthesizer (YM3812/OPL-3) support"
- ---help---
- Answer Y if your card has a FM chip made by Yamaha (OPL2/OPL3/OPL4).
- Answering Y is usually a safe and recommended choice, however some
- cards may have software (TSR) FM emulation. Enabling FM support with
- these cards may cause trouble (I don't currently know of any such
- cards, however). Please read the file
- <file:Documentation/sound/oss/OPL3> if your card has an OPL3 chip.
-
- If you compile the driver into the kernel, you have to add
- "opl3=<io>" to the kernel command line.
-
- If unsure, say Y.
-
-config SOUND_UART6850
- tristate "6850 UART support"
- help
- This option enables support for MIDI interfaces based on the 6850
- UART chip. This interface is rarely found on sound cards. It's safe
- to answer N to this question.
-
- If you compile the driver into the kernel, you have to add
- "uart6850=<io>,<irq>" to the kernel command line.
-
-config SOUND_AEDSP16
- tristate "Gallant Audio Cards (SC-6000 and SC-6600 based)"
- ---help---
- Answer Y if you have a Gallant's Audio Excel DSP 16 card. This
- driver supports Audio Excel DSP 16 but not the III nor PnP versions
- of this card.
-
- The Gallant's Audio Excel DSP 16 card can emulate either an SBPro or
- a Microsoft Sound System card, so you should have said Y to either
- "100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support"
- or "Microsoft Sound System support", above, and you need to answer
- the "MSS emulation" and "SBPro emulation" questions below
- accordingly. You should say Y to one and only one of these two
- questions.
-
- Read the <file:Documentation/sound/oss/README.OSS> file and the head of
- <file:sound/oss/aedsp16.c> as well as
- <file:Documentation/sound/oss/AudioExcelDSP16> to get more information
- about this driver and its configuration.
-
-config SC6600
- bool "SC-6600 based audio cards (new Audio Excel DSP 16)"
- depends on SOUND_AEDSP16
- help
- The SC6600 is the new version of DSP mounted on the Audio Excel DSP
- 16 cards. Find in the manual the FCC ID of your audio card and
- answer Y if you have an SC6600 DSP.
-
-config SC6600_JOY
- bool "Activate SC-6600 Joystick Interface"
- depends on SC6600
- help
- Say Y here in order to use the joystick interface of the Audio Excel
- DSP 16 card.
-
-config SC6600_CDROM
- int "SC-6600 CDROM Interface (4=None, 3=IDE, 1=Panasonic, 0=?Sony?)"
- depends on SC6600
- default "4"
- help
- This is used to activate the CD-ROM interface of the Audio Excel
- DSP 16 card. Enter: 0 for Sony, 1 for Panasonic, 2 for IDE, 4 for no
- CD-ROM present.
-
-config SC6600_CDROMBASE
- hex "SC-6600 CDROM Interface I/O Address"
- depends on SC6600
- default "0"
- help
- Base I/O port address for the CD-ROM interface of the Audio Excel
- DSP 16 card.
-
-config SOUND_VIDC
- tristate "VIDC 16-bit sound"
- depends on ARM && ARCH_ACORN
- help
- 16-bit support for the VIDC onboard sound hardware found on Acorn
- machines.
-
-config SOUND_WAVEARTIST
- tristate "Netwinder WaveArtist"
- depends on ARM && ARCH_NETWINDER
- help
- Say Y here to include support for the Rockwell WaveArtist sound
- system. This driver is mainly for the NetWinder.
-
-config SOUND_KAHLUA
- tristate "XpressAudio Sound Blaster emulation"
- depends on SOUND_SB
-
-endif # SOUND_OSS
-
diff --git a/sound/oss/Makefile b/sound/oss/Makefile
deleted file mode 100644
index 6564eace4749..000000000000
--- a/sound/oss/Makefile
+++ /dev/null
@@ -1,108 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the Linux sound card driver
-#
-# 18 Apr 1998, Michael Elizabeth Chastain, <mailto:mec@shout.net>
-# Rewritten to use lists instead of if-statements.
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_SOUND_OSS) += sound.o
-
-# Please leave it as is, cause the link order is significant !
-
-obj-$(CONFIG_SOUND_AEDSP16) += aedsp16.o
-obj-$(CONFIG_SOUND_PSS) += pss.o ad1848.o mpu401.o
-obj-$(CONFIG_SOUND_TRIX) += trix.o ad1848.o sb_lib.o uart401.o
-obj-$(CONFIG_SOUND_MSS) += ad1848.o
-obj-$(CONFIG_SOUND_PAS) += pas2.o sb.o sb_lib.o uart401.o
-obj-$(CONFIG_SOUND_SB) += sb.o sb_lib.o uart401.o
-obj-$(CONFIG_SOUND_KAHLUA) += kahlua.o
-obj-$(CONFIG_SOUND_MPU401) += mpu401.o
-obj-$(CONFIG_SOUND_UART6850) += uart6850.o
-obj-$(CONFIG_SOUND_YM3812) += opl3.o
-obj-$(CONFIG_SOUND_VMIDI) += v_midi.o
-obj-$(CONFIG_SOUND_VIDC) += vidc_mod.o
-obj-$(CONFIG_SOUND_WAVEARTIST) += waveartist.o
-obj-$(CONFIG_SOUND_MSNDCLAS) += msnd.o msnd_classic.o
-obj-$(CONFIG_SOUND_MSNDPIN) += msnd.o msnd_pinnacle.o
-obj-$(CONFIG_SOUND_BCM_CS4297A) += swarm_cs4297a.o
-
-obj-$(CONFIG_DMASOUND) += dmasound/
-
-# Declare multi-part drivers.
-
-sound-objs := \
- dev_table.o soundcard.o \
- audio.o dmabuf.o \
- midi_synth.o midibuf.o \
- sequencer.o sound_timer.o sys_timer.o
-
-pas2-objs := pas2_card.o pas2_midi.o pas2_mixer.o pas2_pcm.o
-sb-objs := sb_card.o
-sb_lib-objs := sb_common.o sb_audio.o sb_midi.o sb_mixer.o sb_ess.o
-vidc_mod-objs := vidc.o vidc_fill.o
-
-hostprogs-y := bin2hex hex2hex
-
-# Files generated that shall be removed upon make clean
-clean-files := msndperm.c msndinit.c pndsperm.c pndspini.c \
- pss_boot.h trix_boot.h
-
-# Firmware files that need translation
-#
-# The translated files are protected by a file that keeps track
-# of what name was used to build them. If the name changes, they
-# will be forced to be remade.
-#
-
-# Turtle Beach MultiSound
-
-ifeq ($(CONFIG_MSNDCLAS_HAVE_BOOT),y)
- $(obj)/msnd_classic.o: $(obj)/msndperm.c $(obj)/msndinit.c
-
- $(obj)/msndperm.c: $(patsubst "%", %, $(CONFIG_MSNDCLAS_PERM_FILE)) $(obj)/bin2hex
- $(obj)/bin2hex msndperm < $< > $@
-
- $(obj)/msndinit.c: $(patsubst "%", %, $(CONFIG_MSNDCLAS_INIT_FILE)) $(obj)/bin2hex
- $(obj)/bin2hex msndinit < $< > $@
-endif
-
-ifeq ($(CONFIG_MSNDPIN_HAVE_BOOT),y)
- $(obj)/msnd_pinnacle.o: $(obj)/pndsperm.c $(obj)/pndspini.c
-
- $(obj)/pndsperm.c: $(patsubst "%", %, $(CONFIG_MSNDPIN_PERM_FILE)) $(obj)/bin2hex
- $(obj)/bin2hex pndsperm < $< > $@
-
- $(obj)/pndspini.c: $(patsubst "%", %, $(CONFIG_MSNDPIN_INIT_FILE)) $(obj)/bin2hex
- $(obj)/bin2hex pndspini < $< > $@
-endif
-
-# PSS (ECHO-ADI2111)
-
-$(obj)/pss.o: $(obj)/pss_boot.h
-
-ifeq ($(CONFIG_PSS_HAVE_BOOT),y)
- $(obj)/pss_boot.h: $(patsubst "%", %, $(CONFIG_PSS_BOOT_FILE)) $(obj)/bin2hex
- $(obj)/bin2hex pss_synth < $< > $@
-else
- $(obj)/pss_boot.h:
- $(Q)( \
- echo 'static unsigned char * pss_synth = NULL;'; \
- echo 'static int pss_synthLen = 0;'; \
- ) > $@
-endif
-
-# MediaTrix AudioTrix Pro
-
-$(obj)/trix.o: $(obj)/trix_boot.h
-
-ifeq ($(CONFIG_TRIX_HAVE_BOOT),y)
- $(obj)/trix_boot.h: $(patsubst "%", %, $(CONFIG_TRIX_BOOT_FILE)) $(obj)/hex2hex
- $(obj)/hex2hex -i trix_boot < $< > $@
-else
- $(obj)/trix_boot.h:
- $(Q)( \
- echo 'static unsigned char * trix_boot = NULL;'; \
- echo 'static int trix_boot_len = 0;'; \
- ) > $@
-endif
diff --git a/sound/oss/README.FIRST b/sound/oss/README.FIRST
deleted file mode 100644
index 90fdcf063d2d..000000000000
--- a/sound/oss/README.FIRST
+++ /dev/null
@@ -1,6 +0,0 @@
-The modular sound driver patches were funded by Red Hat Software
-(www.redhat.com). The sound driver here is thus a modified version of
-Hannu's code. Please bear that in mind when considering the appropriate
-forums for bug reporting.
-
-Alan Cox
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
deleted file mode 100644
index 2421f59cf279..000000000000
--- a/sound/oss/ad1848.c
+++ /dev/null
@@ -1,3062 +0,0 @@
-/*
- * sound/oss/ad1848.c
- *
- * The low level driver for the AD1848/CS4248 codec chip which
- * is used for example in the MS Sound System.
- *
- * The CS4231 which is used in the GUS MAX and some other cards is
- * upwards compatible with AD1848 and this driver is able to drive it.
- *
- * CS4231A and AD1845 are upward compatible with CS4231. However
- * the new features of these chips are different.
- *
- * CS4232 is a PnP audio chip which contains a CS4231A (and SB, MPU).
- * CS4232A is an improved version of CS4232.
- *
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * general sleep/wakeup clean up.
- * Alan Cox : reformatted. Fixed SMP bugs. Moved to kernel alloc/free
- * of irqs. Use dev_id.
- * Christoph Hellwig : adapted to module_init/module_exit
- * Aki Laukkanen : added power management support
- * Arnaldo C. de Melo : added missing restore_flags in ad1848_resume
- * Miguel Freitas : added ISA PnP support
- * Alan Cox : Added CS4236->4239 identification
- * Daniel T. Cobra : Alernate config/mixer for later chips
- * Alan Cox : Merged chip idents and config code
- *
- * TODO
- * APM save restore assist code on IBM thinkpad
- *
- * Status:
- * Tested. Believed fully functional.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/slab.h>
-#include <linux/isapnp.h>
-#include <linux/pnp.h>
-#include <linux/spinlock.h>
-
-#include "sound_config.h"
-
-#include "ad1848.h"
-#include "ad1848_mixer.h"
-
-typedef struct
-{
- spinlock_t lock;
- int base;
- int irq;
- int dma1, dma2;
- int dual_dma; /* 1, when two DMA channels allocated */
- int subtype;
- unsigned char MCE_bit;
- unsigned char saved_regs[64]; /* Includes extended register space */
- int debug_flag;
-
- int audio_flags;
- int record_dev, playback_dev;
-
- int xfer_count;
- int audio_mode;
- int open_mode;
- int intr_active;
- char *chip_name, *name;
- int model;
-#define MD_1848 1
-#define MD_4231 2
-#define MD_4231A 3
-#define MD_1845 4
-#define MD_4232 5
-#define MD_C930 6
-#define MD_IWAVE 7
-#define MD_4235 8 /* Crystal Audio CS4235 */
-#define MD_1845_SSCAPE 9 /* Ensoniq Soundscape PNP*/
-#define MD_4236 10 /* 4236 and higher */
-#define MD_42xB 11 /* CS 42xB */
-#define MD_4239 12 /* CS4239 */
-
- /* Mixer parameters */
- int recmask;
- int supported_devices, orig_devices;
- int supported_rec_devices, orig_rec_devices;
- int *levels;
- short mixer_reroute[32];
- int dev_no;
- volatile unsigned long timer_ticks;
- int timer_running;
- int irq_ok;
- mixer_ents *mix_devices;
- int mixer_output_port;
-} ad1848_info;
-
-typedef struct ad1848_port_info
-{
- int open_mode;
- int speed;
- unsigned char speed_bits;
- int channels;
- int audio_format;
- unsigned char format_bits;
-}
-ad1848_port_info;
-
-static struct address_info cfg;
-static int nr_ad1848_devs;
-
-static bool deskpro_xl;
-static bool deskpro_m;
-static bool soundpro;
-
-#ifndef EXCLUDE_TIMERS
-static int timer_installed = -1;
-#endif
-
-static int loaded;
-
-static int ad_format_mask[13 /*devc->model */ ] =
-{
- 0,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW, /* AD1845 */
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM,
- AFMT_U8 | AFMT_S16_LE /* CS4235 */,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW /* Ensoniq Soundscape*/,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM,
- AFMT_U8 | AFMT_S16_LE | AFMT_MU_LAW | AFMT_A_LAW | AFMT_S16_BE | AFMT_IMA_ADPCM
-};
-
-static ad1848_info adev_info[MAX_AUDIO_DEV];
-
-#define io_Index_Addr(d) ((d)->base)
-#define io_Indexed_Data(d) ((d)->base+1)
-#define io_Status(d) ((d)->base+2)
-#define io_Polled_IO(d) ((d)->base+3)
-
-static struct {
- unsigned char flags;
-#define CAP_F_TIMER 0x01
-} capabilities [10 /*devc->model */ ] = {
- {0}
- ,{0} /* MD_1848 */
- ,{CAP_F_TIMER} /* MD_4231 */
- ,{CAP_F_TIMER} /* MD_4231A */
- ,{CAP_F_TIMER} /* MD_1845 */
- ,{CAP_F_TIMER} /* MD_4232 */
- ,{0} /* MD_C930 */
- ,{CAP_F_TIMER} /* MD_IWAVE */
- ,{0} /* MD_4235 */
- ,{CAP_F_TIMER} /* MD_1845_SSCAPE */
-};
-
-#ifdef CONFIG_PNP
-static int isapnp = 1;
-static int isapnpjump;
-static bool reverse;
-
-static int audio_activated;
-#else
-static int isapnp;
-#endif
-
-
-
-static int ad1848_open(int dev, int mode);
-static void ad1848_close(int dev);
-static void ad1848_output_block(int dev, unsigned long buf, int count, int intrflag);
-static void ad1848_start_input(int dev, unsigned long buf, int count, int intrflag);
-static int ad1848_prepare_for_output(int dev, int bsize, int bcount);
-static int ad1848_prepare_for_input(int dev, int bsize, int bcount);
-static void ad1848_halt(int dev);
-static void ad1848_halt_input(int dev);
-static void ad1848_halt_output(int dev);
-static void ad1848_trigger(int dev, int bits);
-static irqreturn_t adintr(int irq, void *dev_id);
-
-#ifndef EXCLUDE_TIMERS
-static int ad1848_tmr_install(int dev);
-static void ad1848_tmr_reprogram(int dev);
-#endif
-
-static int ad_read(ad1848_info * devc, int reg)
-{
- int x;
- int timeout = 900000;
-
- while (timeout > 0 && inb(devc->base) == 0x80) /*Are we initializing */
- timeout--;
-
- if(reg < 32)
- {
- outb(((unsigned char) (reg & 0xff) | devc->MCE_bit), io_Index_Addr(devc));
- x = inb(io_Indexed_Data(devc));
- }
- else
- {
- int xreg, xra;
-
- xreg = (reg & 0xff) - 32;
- xra = (((xreg & 0x0f) << 4) & 0xf0) | 0x08 | ((xreg & 0x10) >> 2);
- outb(((unsigned char) (23 & 0xff) | devc->MCE_bit), io_Index_Addr(devc));
- outb(((unsigned char) (xra & 0xff)), io_Indexed_Data(devc));
- x = inb(io_Indexed_Data(devc));
- }
-
- return x;
-}
-
-static void ad_write(ad1848_info * devc, int reg, int data)
-{
- int timeout = 900000;
-
- while (timeout > 0 && inb(devc->base) == 0x80) /* Are we initializing */
- timeout--;
-
- if(reg < 32)
- {
- outb(((unsigned char) (reg & 0xff) | devc->MCE_bit), io_Index_Addr(devc));
- outb(((unsigned char) (data & 0xff)), io_Indexed_Data(devc));
- }
- else
- {
- int xreg, xra;
-
- xreg = (reg & 0xff) - 32;
- xra = (((xreg & 0x0f) << 4) & 0xf0) | 0x08 | ((xreg & 0x10) >> 2);
- outb(((unsigned char) (23 & 0xff) | devc->MCE_bit), io_Index_Addr(devc));
- outb(((unsigned char) (xra & 0xff)), io_Indexed_Data(devc));
- outb((unsigned char) (data & 0xff), io_Indexed_Data(devc));
- }
-}
-
-static void wait_for_calibration(ad1848_info * devc)
-{
- int timeout;
-
- /*
- * Wait until the auto calibration process has finished.
- *
- * 1) Wait until the chip becomes ready (reads don't return 0x80).
- * 2) Wait until the ACI bit of I11 gets on and then off.
- */
-
- timeout = 100000;
- while (timeout > 0 && inb(devc->base) == 0x80)
- timeout--;
- if (inb(devc->base) & 0x80)
- printk(KERN_WARNING "ad1848: Auto calibration timed out(1).\n");
-
- timeout = 100;
- while (timeout > 0 && !(ad_read(devc, 11) & 0x20))
- timeout--;
- if (!(ad_read(devc, 11) & 0x20))
- return;
-
- timeout = 80000;
- while (timeout > 0 && (ad_read(devc, 11) & 0x20))
- timeout--;
- if (ad_read(devc, 11) & 0x20)
- if ((devc->model != MD_1845) && (devc->model != MD_1845_SSCAPE))
- printk(KERN_WARNING "ad1848: Auto calibration timed out(3).\n");
-}
-
-static void ad_mute(ad1848_info * devc)
-{
- int i;
- unsigned char prev;
-
- /*
- * Save old register settings and mute output channels
- */
-
- for (i = 6; i < 8; i++)
- {
- prev = devc->saved_regs[i] = ad_read(devc, i);
- }
-
-}
-
-static void ad_unmute(ad1848_info * devc)
-{
-}
-
-static void ad_enter_MCE(ad1848_info * devc)
-{
- int timeout = 1000;
- unsigned short prev;
-
- while (timeout > 0 && inb(devc->base) == 0x80) /*Are we initializing */
- timeout--;
-
- devc->MCE_bit = 0x40;
- prev = inb(io_Index_Addr(devc));
- if (prev & 0x40)
- {
- return;
- }
- outb((devc->MCE_bit), io_Index_Addr(devc));
-}
-
-static void ad_leave_MCE(ad1848_info * devc)
-{
- unsigned char prev, acal;
- int timeout = 1000;
-
- while (timeout > 0 && inb(devc->base) == 0x80) /*Are we initializing */
- timeout--;
-
- acal = ad_read(devc, 9);
-
- devc->MCE_bit = 0x00;
- prev = inb(io_Index_Addr(devc));
- outb((0x00), io_Index_Addr(devc)); /* Clear the MCE bit */
-
- if ((prev & 0x40) == 0) /* Not in MCE mode */
- {
- return;
- }
- outb((0x00), io_Index_Addr(devc)); /* Clear the MCE bit */
- if (acal & 0x08) /* Auto calibration is enabled */
- wait_for_calibration(devc);
-}
-
-static int ad1848_set_recmask(ad1848_info * devc, int mask)
-{
- unsigned char recdev;
- int i, n;
- unsigned long flags;
-
- mask &= devc->supported_rec_devices;
-
- /* Rename the mixer bits if necessary */
- for (i = 0; i < 32; i++)
- {
- if (devc->mixer_reroute[i] != i)
- {
- if (mask & (1 << i))
- {
- mask &= ~(1 << i);
- mask |= (1 << devc->mixer_reroute[i]);
- }
- }
- }
-
- n = 0;
- for (i = 0; i < 32; i++) /* Count selected device bits */
- if (mask & (1 << i))
- n++;
-
- spin_lock_irqsave(&devc->lock,flags);
- if (!soundpro) {
- if (n == 0)
- mask = SOUND_MASK_MIC;
- else if (n != 1) { /* Too many devices selected */
- mask &= ~devc->recmask; /* Filter out active settings */
-
- n = 0;
- for (i = 0; i < 32; i++) /* Count selected device bits */
- if (mask & (1 << i))
- n++;
-
- if (n != 1)
- mask = SOUND_MASK_MIC;
- }
- switch (mask) {
- case SOUND_MASK_MIC:
- recdev = 2;
- break;
-
- case SOUND_MASK_LINE:
- case SOUND_MASK_LINE3:
- recdev = 0;
- break;
-
- case SOUND_MASK_CD:
- case SOUND_MASK_LINE1:
- recdev = 1;
- break;
-
- case SOUND_MASK_IMIX:
- recdev = 3;
- break;
-
- default:
- mask = SOUND_MASK_MIC;
- recdev = 2;
- }
-
- recdev <<= 6;
- ad_write(devc, 0, (ad_read(devc, 0) & 0x3f) | recdev);
- ad_write(devc, 1, (ad_read(devc, 1) & 0x3f) | recdev);
- } else { /* soundpro */
- unsigned char val;
- int set_rec_bit;
- int j;
-
- for (i = 0; i < 32; i++) { /* For each bit */
- if ((devc->supported_rec_devices & (1 << i)) == 0)
- continue; /* Device not supported */
-
- for (j = LEFT_CHN; j <= RIGHT_CHN; j++) {
- if (devc->mix_devices[i][j].nbits == 0) /* Inexistent channel */
- continue;
-
- /*
- * This is tricky:
- * set_rec_bit becomes 1 if the corresponding bit in mask is set
- * then it gets flipped if the polarity is inverse
- */
- set_rec_bit = ((mask & (1 << i)) != 0) ^ devc->mix_devices[i][j].recpol;
-
- val = ad_read(devc, devc->mix_devices[i][j].recreg);
- val &= ~(1 << devc->mix_devices[i][j].recpos);
- val |= (set_rec_bit << devc->mix_devices[i][j].recpos);
- ad_write(devc, devc->mix_devices[i][j].recreg, val);
- }
- }
- }
- spin_unlock_irqrestore(&devc->lock,flags);
-
- /* Rename the mixer bits back if necessary */
- for (i = 0; i < 32; i++)
- {
- if (devc->mixer_reroute[i] != i)
- {
- if (mask & (1 << devc->mixer_reroute[i]))
- {
- mask &= ~(1 << devc->mixer_reroute[i]);
- mask |= (1 << i);
- }
- }
- }
- devc->recmask = mask;
- return mask;
-}
-
-static void oss_change_bits(ad1848_info *devc, unsigned char *regval,
- unsigned char *muteval, int dev, int chn, int newval)
-{
- unsigned char mask;
- int shift;
- int mute;
- int mutemask;
- int set_mute_bit;
-
- set_mute_bit = (newval == 0) ^ devc->mix_devices[dev][chn].mutepol;
-
- if (devc->mix_devices[dev][chn].polarity == 1) /* Reverse */
- newval = 100 - newval;
-
- mask = (1 << devc->mix_devices[dev][chn].nbits) - 1;
- shift = devc->mix_devices[dev][chn].bitpos;
-
- if (devc->mix_devices[dev][chn].mutepos == 8)
- { /* if there is no mute bit */
- mute = 0; /* No mute bit; do nothing special */
- mutemask = ~0; /* No mute bit; do nothing special */
- }
- else
- {
- mute = (set_mute_bit << devc->mix_devices[dev][chn].mutepos);
- mutemask = ~(1 << devc->mix_devices[dev][chn].mutepos);
- }
-
- newval = (int) ((newval * mask) + 50) / 100; /* Scale it */
- *regval &= ~(mask << shift); /* Clear bits */
- *regval |= (newval & mask) << shift; /* Set new value */
-
- *muteval &= mutemask;
- *muteval |= mute;
-}
-
-static int ad1848_mixer_get(ad1848_info * devc, int dev)
-{
- if (!((1 << dev) & devc->supported_devices))
- return -EINVAL;
-
- dev = devc->mixer_reroute[dev];
-
- return devc->levels[dev];
-}
-
-static void ad1848_mixer_set_channel(ad1848_info *devc, int dev, int value, int channel)
-{
- int regoffs, muteregoffs;
- unsigned char val, muteval;
- unsigned long flags;
-
- regoffs = devc->mix_devices[dev][channel].regno;
- muteregoffs = devc->mix_devices[dev][channel].mutereg;
- val = ad_read(devc, regoffs);
-
- if (muteregoffs != regoffs) {
- muteval = ad_read(devc, muteregoffs);
- oss_change_bits(devc, &val, &muteval, dev, channel, value);
- }
- else
- oss_change_bits(devc, &val, &val, dev, channel, value);
-
- spin_lock_irqsave(&devc->lock,flags);
- ad_write(devc, regoffs, val);
- devc->saved_regs[regoffs] = val;
- if (muteregoffs != regoffs) {
- ad_write(devc, muteregoffs, muteval);
- devc->saved_regs[muteregoffs] = muteval;
- }
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static int ad1848_mixer_set(ad1848_info * devc, int dev, int value)
-{
- int left = value & 0x000000ff;
- int right = (value & 0x0000ff00) >> 8;
- int retvol;
-
- if (dev > 31)
- return -EINVAL;
-
- if (!(devc->supported_devices & (1 << dev)))
- return -EINVAL;
-
- dev = devc->mixer_reroute[dev];
-
- if (devc->mix_devices[dev][LEFT_CHN].nbits == 0)
- return -EINVAL;
-
- if (left > 100)
- left = 100;
- if (right > 100)
- right = 100;
-
- if (devc->mix_devices[dev][RIGHT_CHN].nbits == 0) /* Mono control */
- right = left;
-
- retvol = left | (right << 8);
-
- /* Scale volumes */
- left = mix_cvt[left];
- right = mix_cvt[right];
-
- devc->levels[dev] = retvol;
-
- /*
- * Set the left channel
- */
- ad1848_mixer_set_channel(devc, dev, left, LEFT_CHN);
-
- /*
- * Set the right channel
- */
- if (devc->mix_devices[dev][RIGHT_CHN].nbits == 0)
- goto out;
- ad1848_mixer_set_channel(devc, dev, right, RIGHT_CHN);
-
- out:
- return retvol;
-}
-
-static void ad1848_mixer_reset(ad1848_info * devc)
-{
- int i;
- char name[32];
- unsigned long flags;
-
- devc->mix_devices = &(ad1848_mix_devices[0]);
-
- sprintf(name, "%s_%d", devc->chip_name, nr_ad1848_devs);
-
- for (i = 0; i < 32; i++)
- devc->mixer_reroute[i] = i;
-
- devc->supported_rec_devices = MODE1_REC_DEVICES;
-
- switch (devc->model)
- {
- case MD_4231:
- case MD_4231A:
- case MD_1845:
- case MD_1845_SSCAPE:
- devc->supported_devices = MODE2_MIXER_DEVICES;
- break;
-
- case MD_C930:
- devc->supported_devices = C930_MIXER_DEVICES;
- devc->mix_devices = &(c930_mix_devices[0]);
- break;
-
- case MD_IWAVE:
- devc->supported_devices = MODE3_MIXER_DEVICES;
- devc->mix_devices = &(iwave_mix_devices[0]);
- break;
-
- case MD_42xB:
- case MD_4239:
- devc->mix_devices = &(cs42xb_mix_devices[0]);
- devc->supported_devices = MODE3_MIXER_DEVICES;
- break;
- case MD_4232:
- case MD_4235:
- case MD_4236:
- devc->supported_devices = MODE3_MIXER_DEVICES;
- break;
-
- case MD_1848:
- if (soundpro) {
- devc->supported_devices = SPRO_MIXER_DEVICES;
- devc->supported_rec_devices = SPRO_REC_DEVICES;
- devc->mix_devices = &(spro_mix_devices[0]);
- break;
- }
-
- default:
- devc->supported_devices = MODE1_MIXER_DEVICES;
- }
-
- devc->orig_devices = devc->supported_devices;
- devc->orig_rec_devices = devc->supported_rec_devices;
-
- devc->levels = load_mixer_volumes(name, default_mixer_levels, 1);
-
- for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
- {
- if (devc->supported_devices & (1 << i))
- ad1848_mixer_set(devc, i, devc->levels[i]);
- }
-
- ad1848_set_recmask(devc, SOUND_MASK_MIC);
-
- devc->mixer_output_port = devc->levels[31] | AUDIO_HEADPHONE | AUDIO_LINE_OUT;
-
- spin_lock_irqsave(&devc->lock,flags);
- if (!soundpro) {
- if (devc->mixer_output_port & AUDIO_SPEAKER)
- ad_write(devc, 26, ad_read(devc, 26) & ~0x40); /* Unmute mono out */
- else
- ad_write(devc, 26, ad_read(devc, 26) | 0x40); /* Mute mono out */
- } else {
- /*
- * From the "wouldn't it be nice if the mixer API had (better)
- * support for custom stuff" category
- */
- /* Enable surround mode and SB16 mixer */
- ad_write(devc, 16, 0x60);
- }
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static int ad1848_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- ad1848_info *devc = mixer_devs[dev]->devc;
- int val;
-
- if (cmd == SOUND_MIXER_PRIVATE1)
- {
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
-
- if (val != 0xffff)
- {
- unsigned long flags;
- val &= (AUDIO_SPEAKER | AUDIO_HEADPHONE | AUDIO_LINE_OUT);
- devc->mixer_output_port = val;
- val |= AUDIO_HEADPHONE | AUDIO_LINE_OUT; /* Always on */
- devc->mixer_output_port = val;
- spin_lock_irqsave(&devc->lock,flags);
- if (val & AUDIO_SPEAKER)
- ad_write(devc, 26, ad_read(devc, 26) & ~0x40); /* Unmute mono out */
- else
- ad_write(devc, 26, ad_read(devc, 26) | 0x40); /* Mute mono out */
- spin_unlock_irqrestore(&devc->lock,flags);
- }
- val = devc->mixer_output_port;
- return put_user(val, (int __user *)arg);
- }
- if (cmd == SOUND_MIXER_PRIVATE2)
- {
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- return(ad1848_control(AD1848_MIXER_REROUTE, val));
- }
- if (((cmd >> 8) & 0xff) == 'M')
- {
- if (_SIOC_DIR(cmd) & _SIOC_WRITE)
- {
- switch (cmd & 0xff)
- {
- case SOUND_MIXER_RECSRC:
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- val = ad1848_set_recmask(devc, val);
- break;
-
- default:
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- val = ad1848_mixer_set(devc, cmd & 0xff, val);
- break;
- }
- return put_user(val, (int __user *)arg);
- }
- else
- {
- switch (cmd & 0xff)
- {
- /*
- * Return parameters
- */
-
- case SOUND_MIXER_RECSRC:
- val = devc->recmask;
- break;
-
- case SOUND_MIXER_DEVMASK:
- val = devc->supported_devices;
- break;
-
- case SOUND_MIXER_STEREODEVS:
- val = devc->supported_devices;
- if (devc->model != MD_C930)
- val &= ~(SOUND_MASK_SPEAKER | SOUND_MASK_IMIX);
- break;
-
- case SOUND_MIXER_RECMASK:
- val = devc->supported_rec_devices;
- break;
-
- case SOUND_MIXER_CAPS:
- val=SOUND_CAP_EXCL_INPUT;
- break;
-
- default:
- val = ad1848_mixer_get(devc, cmd & 0xff);
- break;
- }
- return put_user(val, (int __user *)arg);
- }
- }
- else
- return -EINVAL;
-}
-
-static int ad1848_set_speed(int dev, int arg)
-{
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- /*
- * The sampling speed is encoded in the least significant nibble of I8. The
- * LSB selects the clock source (0=24.576 MHz, 1=16.9344 MHz) and other
- * three bits select the divisor (indirectly):
- *
- * The available speeds are in the following table. Keep the speeds in
- * the increasing order.
- */
- typedef struct
- {
- int speed;
- unsigned char bits;
- }
- speed_struct;
-
- static speed_struct speed_table[] =
- {
- {5510, (0 << 1) | 1},
- {5510, (0 << 1) | 1},
- {6620, (7 << 1) | 1},
- {8000, (0 << 1) | 0},
- {9600, (7 << 1) | 0},
- {11025, (1 << 1) | 1},
- {16000, (1 << 1) | 0},
- {18900, (2 << 1) | 1},
- {22050, (3 << 1) | 1},
- {27420, (2 << 1) | 0},
- {32000, (3 << 1) | 0},
- {33075, (6 << 1) | 1},
- {37800, (4 << 1) | 1},
- {44100, (5 << 1) | 1},
- {48000, (6 << 1) | 0}
- };
-
- int i, n, selected = -1;
-
- n = sizeof(speed_table) / sizeof(speed_struct);
-
- if (arg <= 0)
- return portc->speed;
-
- if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE) /* AD1845 has different timer than others */
- {
- if (arg < 4000)
- arg = 4000;
- if (arg > 50000)
- arg = 50000;
-
- portc->speed = arg;
- portc->speed_bits = speed_table[3].bits;
- return portc->speed;
- }
- if (arg < speed_table[0].speed)
- selected = 0;
- if (arg > speed_table[n - 1].speed)
- selected = n - 1;
-
- for (i = 1 /*really */ ; selected == -1 && i < n; i++)
- {
- if (speed_table[i].speed == arg)
- selected = i;
- else if (speed_table[i].speed > arg)
- {
- int diff1, diff2;
-
- diff1 = arg - speed_table[i - 1].speed;
- diff2 = speed_table[i].speed - arg;
-
- if (diff1 < diff2)
- selected = i - 1;
- else
- selected = i;
- }
- }
- if (selected == -1)
- {
- printk(KERN_WARNING "ad1848: Can't find speed???\n");
- selected = 3;
- }
- portc->speed = speed_table[selected].speed;
- portc->speed_bits = speed_table[selected].bits;
- return portc->speed;
-}
-
-static short ad1848_set_channels(int dev, short arg)
-{
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- if (arg != 1 && arg != 2)
- return portc->channels;
-
- portc->channels = arg;
- return arg;
-}
-
-static unsigned int ad1848_set_bits(int dev, unsigned int arg)
-{
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- static struct format_tbl
- {
- int format;
- unsigned char bits;
- }
- format2bits[] =
- {
- {
- 0, 0
- }
- ,
- {
- AFMT_MU_LAW, 1
- }
- ,
- {
- AFMT_A_LAW, 3
- }
- ,
- {
- AFMT_IMA_ADPCM, 5
- }
- ,
- {
- AFMT_U8, 0
- }
- ,
- {
- AFMT_S16_LE, 2
- }
- ,
- {
- AFMT_S16_BE, 6
- }
- ,
- {
- AFMT_S8, 0
- }
- ,
- {
- AFMT_U16_LE, 0
- }
- ,
- {
- AFMT_U16_BE, 0
- }
- };
- int i, n = sizeof(format2bits) / sizeof(struct format_tbl);
-
- if (arg == 0)
- return portc->audio_format;
-
- if (!(arg & ad_format_mask[devc->model]))
- arg = AFMT_U8;
-
- portc->audio_format = arg;
-
- for (i = 0; i < n; i++)
- if (format2bits[i].format == arg)
- {
- if ((portc->format_bits = format2bits[i].bits) == 0)
- return portc->audio_format = AFMT_U8; /* Was not supported */
-
- return arg;
- }
- /* Still hanging here. Something must be terribly wrong */
- portc->format_bits = 0;
- return portc->audio_format = AFMT_U8;
-}
-
-static struct audio_driver ad1848_audio_driver =
-{
- .owner = THIS_MODULE,
- .open = ad1848_open,
- .close = ad1848_close,
- .output_block = ad1848_output_block,
- .start_input = ad1848_start_input,
- .prepare_for_input = ad1848_prepare_for_input,
- .prepare_for_output = ad1848_prepare_for_output,
- .halt_io = ad1848_halt,
- .halt_input = ad1848_halt_input,
- .halt_output = ad1848_halt_output,
- .trigger = ad1848_trigger,
- .set_speed = ad1848_set_speed,
- .set_bits = ad1848_set_bits,
- .set_channels = ad1848_set_channels
-};
-
-static struct mixer_operations ad1848_mixer_operations =
-{
- .owner = THIS_MODULE,
- .id = "SOUNDPORT",
- .name = "AD1848/CS4248/CS4231",
- .ioctl = ad1848_mixer_ioctl
-};
-
-static int ad1848_open(int dev, int mode)
-{
- ad1848_info *devc;
- ad1848_port_info *portc;
- unsigned long flags;
-
- if (dev < 0 || dev >= num_audiodevs)
- return -ENXIO;
-
- devc = (ad1848_info *) audio_devs[dev]->devc;
- portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- /* here we don't have to protect against intr */
- spin_lock(&devc->lock);
- if (portc->open_mode || (devc->open_mode & mode))
- {
- spin_unlock(&devc->lock);
- return -EBUSY;
- }
- devc->dual_dma = 0;
-
- if (audio_devs[dev]->flags & DMA_DUPLEX)
- {
- devc->dual_dma = 1;
- }
- devc->intr_active = 0;
- devc->audio_mode = 0;
- devc->open_mode |= mode;
- portc->open_mode = mode;
- spin_unlock(&devc->lock);
- ad1848_trigger(dev, 0);
-
- if (mode & OPEN_READ)
- devc->record_dev = dev;
- if (mode & OPEN_WRITE)
- devc->playback_dev = dev;
-/*
- * Mute output until the playback really starts. This decreases clicking (hope so).
- */
- spin_lock_irqsave(&devc->lock,flags);
- ad_mute(devc);
- spin_unlock_irqrestore(&devc->lock,flags);
-
- return 0;
-}
-
-static void ad1848_close(int dev)
-{
- unsigned long flags;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- devc->intr_active = 0;
- ad1848_halt(dev);
-
- spin_lock_irqsave(&devc->lock,flags);
-
- devc->audio_mode = 0;
- devc->open_mode &= ~portc->open_mode;
- portc->open_mode = 0;
-
- ad_unmute(devc);
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static void ad1848_output_block(int dev, unsigned long buf, int count, int intrflag)
-{
- unsigned long flags, cnt;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- cnt = count;
-
- if (portc->audio_format == AFMT_IMA_ADPCM)
- {
- cnt /= 4;
- }
- else
- {
- if (portc->audio_format & (AFMT_S16_LE | AFMT_S16_BE)) /* 16 bit data */
- cnt >>= 1;
- }
- if (portc->channels > 1)
- cnt >>= 1;
- cnt--;
-
- if ((devc->audio_mode & PCM_ENABLE_OUTPUT) && (audio_devs[dev]->flags & DMA_AUTOMODE) &&
- intrflag &&
- cnt == devc->xfer_count)
- {
- devc->audio_mode |= PCM_ENABLE_OUTPUT;
- devc->intr_active = 1;
- return; /*
- * Auto DMA mode on. No need to react
- */
- }
- spin_lock_irqsave(&devc->lock,flags);
-
- ad_write(devc, 15, (unsigned char) (cnt & 0xff));
- ad_write(devc, 14, (unsigned char) ((cnt >> 8) & 0xff));
-
- devc->xfer_count = cnt;
- devc->audio_mode |= PCM_ENABLE_OUTPUT;
- devc->intr_active = 1;
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static void ad1848_start_input(int dev, unsigned long buf, int count, int intrflag)
-{
- unsigned long flags, cnt;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- cnt = count;
- if (portc->audio_format == AFMT_IMA_ADPCM)
- {
- cnt /= 4;
- }
- else
- {
- if (portc->audio_format & (AFMT_S16_LE | AFMT_S16_BE)) /* 16 bit data */
- cnt >>= 1;
- }
- if (portc->channels > 1)
- cnt >>= 1;
- cnt--;
-
- if ((devc->audio_mode & PCM_ENABLE_INPUT) && (audio_devs[dev]->flags & DMA_AUTOMODE) &&
- intrflag &&
- cnt == devc->xfer_count)
- {
- devc->audio_mode |= PCM_ENABLE_INPUT;
- devc->intr_active = 1;
- return; /*
- * Auto DMA mode on. No need to react
- */
- }
- spin_lock_irqsave(&devc->lock,flags);
-
- if (devc->model == MD_1848)
- {
- ad_write(devc, 15, (unsigned char) (cnt & 0xff));
- ad_write(devc, 14, (unsigned char) ((cnt >> 8) & 0xff));
- }
- else
- {
- ad_write(devc, 31, (unsigned char) (cnt & 0xff));
- ad_write(devc, 30, (unsigned char) ((cnt >> 8) & 0xff));
- }
-
- ad_unmute(devc);
-
- devc->xfer_count = cnt;
- devc->audio_mode |= PCM_ENABLE_INPUT;
- devc->intr_active = 1;
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static int ad1848_prepare_for_output(int dev, int bsize, int bcount)
-{
- int timeout;
- unsigned char fs, old_fs, tmp = 0;
- unsigned long flags;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- ad_mute(devc);
-
- spin_lock_irqsave(&devc->lock,flags);
- fs = portc->speed_bits | (portc->format_bits << 5);
-
- if (portc->channels > 1)
- fs |= 0x10;
-
- ad_enter_MCE(devc); /* Enables changes to the format select reg */
-
- if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE) /* Use alternate speed select registers */
- {
- fs &= 0xf0; /* Mask off the rate select bits */
-
- ad_write(devc, 22, (portc->speed >> 8) & 0xff); /* Speed MSB */
- ad_write(devc, 23, portc->speed & 0xff); /* Speed LSB */
- }
- old_fs = ad_read(devc, 8);
-
- if (devc->model == MD_4232 || devc->model >= MD_4236)
- {
- tmp = ad_read(devc, 16);
- ad_write(devc, 16, tmp | 0x30);
- }
- if (devc->model == MD_IWAVE)
- ad_write(devc, 17, 0xc2); /* Disable variable frequency select */
-
- ad_write(devc, 8, fs);
-
- /*
- * Write to I8 starts resynchronization. Wait until it completes.
- */
-
- timeout = 0;
- while (timeout < 100 && inb(devc->base) != 0x80)
- timeout++;
- timeout = 0;
- while (timeout < 10000 && inb(devc->base) == 0x80)
- timeout++;
-
- if (devc->model >= MD_4232)
- ad_write(devc, 16, tmp & ~0x30);
-
- ad_leave_MCE(devc); /*
- * Starts the calibration process.
- */
- spin_unlock_irqrestore(&devc->lock,flags);
- devc->xfer_count = 0;
-
-#ifndef EXCLUDE_TIMERS
- if (dev == timer_installed && devc->timer_running)
- if ((fs & 0x01) != (old_fs & 0x01))
- {
- ad1848_tmr_reprogram(dev);
- }
-#endif
- ad1848_halt_output(dev);
- return 0;
-}
-
-static int ad1848_prepare_for_input(int dev, int bsize, int bcount)
-{
- int timeout;
- unsigned char fs, old_fs, tmp = 0;
- unsigned long flags;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- if (devc->audio_mode)
- return 0;
-
- spin_lock_irqsave(&devc->lock,flags);
- fs = portc->speed_bits | (portc->format_bits << 5);
-
- if (portc->channels > 1)
- fs |= 0x10;
-
- ad_enter_MCE(devc); /* Enables changes to the format select reg */
-
- if ((devc->model == MD_1845) || (devc->model == MD_1845_SSCAPE)) /* Use alternate speed select registers */
- {
- fs &= 0xf0; /* Mask off the rate select bits */
-
- ad_write(devc, 22, (portc->speed >> 8) & 0xff); /* Speed MSB */
- ad_write(devc, 23, portc->speed & 0xff); /* Speed LSB */
- }
- if (devc->model == MD_4232)
- {
- tmp = ad_read(devc, 16);
- ad_write(devc, 16, tmp | 0x30);
- }
- if (devc->model == MD_IWAVE)
- ad_write(devc, 17, 0xc2); /* Disable variable frequency select */
-
- /*
- * If mode >= 2 (CS4231), set I28. It's the capture format register.
- */
-
- if (devc->model != MD_1848)
- {
- old_fs = ad_read(devc, 28);
- ad_write(devc, 28, fs);
-
- /*
- * Write to I28 starts resynchronization. Wait until it completes.
- */
-
- timeout = 0;
- while (timeout < 100 && inb(devc->base) != 0x80)
- timeout++;
-
- timeout = 0;
- while (timeout < 10000 && inb(devc->base) == 0x80)
- timeout++;
-
- if (devc->model != MD_1848 && devc->model != MD_1845 && devc->model != MD_1845_SSCAPE)
- {
- /*
- * CS4231 compatible devices don't have separate sampling rate selection
- * register for recording an playback. The I8 register is shared so we have to
- * set the speed encoding bits of it too.
- */
- unsigned char tmp = portc->speed_bits | (ad_read(devc, 8) & 0xf0);
-
- ad_write(devc, 8, tmp);
- /*
- * Write to I8 starts resynchronization. Wait until it completes.
- */
- timeout = 0;
- while (timeout < 100 && inb(devc->base) != 0x80)
- timeout++;
-
- timeout = 0;
- while (timeout < 10000 && inb(devc->base) == 0x80)
- timeout++;
- }
- }
- else
- { /* For AD1848 set I8. */
-
- old_fs = ad_read(devc, 8);
- ad_write(devc, 8, fs);
- /*
- * Write to I8 starts resynchronization. Wait until it completes.
- */
- timeout = 0;
- while (timeout < 100 && inb(devc->base) != 0x80)
- timeout++;
- timeout = 0;
- while (timeout < 10000 && inb(devc->base) == 0x80)
- timeout++;
- }
-
- if (devc->model == MD_4232)
- ad_write(devc, 16, tmp & ~0x30);
-
- ad_leave_MCE(devc); /*
- * Starts the calibration process.
- */
- spin_unlock_irqrestore(&devc->lock,flags);
- devc->xfer_count = 0;
-
-#ifndef EXCLUDE_TIMERS
- if (dev == timer_installed && devc->timer_running)
- {
- if ((fs & 0x01) != (old_fs & 0x01))
- {
- ad1848_tmr_reprogram(dev);
- }
- }
-#endif
- ad1848_halt_input(dev);
- return 0;
-}
-
-static void ad1848_halt(int dev)
-{
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
-
- unsigned char bits = ad_read(devc, 9);
-
- if (bits & 0x01 && (portc->open_mode & OPEN_WRITE))
- ad1848_halt_output(dev);
-
- if (bits & 0x02 && (portc->open_mode & OPEN_READ))
- ad1848_halt_input(dev);
- devc->audio_mode = 0;
-}
-
-static void ad1848_halt_input(int dev)
-{
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- unsigned long flags;
-
- if (!(ad_read(devc, 9) & 0x02))
- return; /* Capture not enabled */
-
- spin_lock_irqsave(&devc->lock,flags);
-
- ad_mute(devc);
-
- {
- int tmout;
-
- if(!isa_dma_bridge_buggy)
- disable_dma(audio_devs[dev]->dmap_in->dma);
-
- for (tmout = 0; tmout < 100000; tmout++)
- if (ad_read(devc, 11) & 0x10)
- break;
- ad_write(devc, 9, ad_read(devc, 9) & ~0x02); /* Stop capture */
-
- if(!isa_dma_bridge_buggy)
- enable_dma(audio_devs[dev]->dmap_in->dma);
- devc->audio_mode &= ~PCM_ENABLE_INPUT;
- }
-
- outb(0, io_Status(devc)); /* Clear interrupt status */
- outb(0, io_Status(devc)); /* Clear interrupt status */
-
- devc->audio_mode &= ~PCM_ENABLE_INPUT;
-
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static void ad1848_halt_output(int dev)
-{
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- unsigned long flags;
-
- if (!(ad_read(devc, 9) & 0x01))
- return; /* Playback not enabled */
-
- spin_lock_irqsave(&devc->lock,flags);
-
- ad_mute(devc);
- {
- int tmout;
-
- if(!isa_dma_bridge_buggy)
- disable_dma(audio_devs[dev]->dmap_out->dma);
-
- for (tmout = 0; tmout < 100000; tmout++)
- if (ad_read(devc, 11) & 0x10)
- break;
- ad_write(devc, 9, ad_read(devc, 9) & ~0x01); /* Stop playback */
-
- if(!isa_dma_bridge_buggy)
- enable_dma(audio_devs[dev]->dmap_out->dma);
-
- devc->audio_mode &= ~PCM_ENABLE_OUTPUT;
- }
-
- outb((0), io_Status(devc)); /* Clear interrupt status */
- outb((0), io_Status(devc)); /* Clear interrupt status */
-
- devc->audio_mode &= ~PCM_ENABLE_OUTPUT;
-
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static void ad1848_trigger(int dev, int state)
-{
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
- unsigned long flags;
- unsigned char tmp, old;
-
- spin_lock_irqsave(&devc->lock,flags);
- state &= devc->audio_mode;
-
- tmp = old = ad_read(devc, 9);
-
- if (portc->open_mode & OPEN_READ)
- {
- if (state & PCM_ENABLE_INPUT)
- tmp |= 0x02;
- else
- tmp &= ~0x02;
- }
- if (portc->open_mode & OPEN_WRITE)
- {
- if (state & PCM_ENABLE_OUTPUT)
- tmp |= 0x01;
- else
- tmp &= ~0x01;
- }
- /* ad_mute(devc); */
- if (tmp != old)
- {
- ad_write(devc, 9, tmp);
- ad_unmute(devc);
- }
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static void ad1848_init_hw(ad1848_info * devc)
-{
- int i;
- int *init_values;
-
- /*
- * Initial values for the indirect registers of CS4248/AD1848.
- */
- static int init_values_a[] =
- {
- 0xa8, 0xa8, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00,
- 0x00, 0x0c, 0x02, 0x00, 0x8a, 0x01, 0x00, 0x00,
-
- /* Positions 16 to 31 just for CS4231/2 and ad1845 */
- 0x80, 0x00, 0x10, 0x10, 0x00, 0x00, 0x1f, 0x40,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- };
-
- static int init_values_b[] =
- {
- /*
- Values for the newer chips
- Some of the register initialization values were changed. In
- order to get rid of the click that preceded PCM playback,
- calibration was disabled on the 10th byte. On that same byte,
- dual DMA was enabled; on the 11th byte, ADC dithering was
- enabled, since that is theoretically desirable; on the 13th
- byte, Mode 3 was selected, to enable access to extended
- registers.
- */
- 0xa8, 0xa8, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00,
- 0x00, 0x00, 0x06, 0x00, 0xe0, 0x01, 0x00, 0x00,
- 0x80, 0x00, 0x10, 0x10, 0x00, 0x00, 0x1f, 0x40,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- };
-
- /*
- * Select initialisation data
- */
-
- init_values = init_values_a;
- if(devc->model >= MD_4236)
- init_values = init_values_b;
-
- for (i = 0; i < 16; i++)
- ad_write(devc, i, init_values[i]);
-
-
- ad_mute(devc); /* Initialize some variables */
- ad_unmute(devc); /* Leave it unmuted now */
-
- if (devc->model > MD_1848)
- {
- if (devc->model == MD_1845_SSCAPE)
- ad_write(devc, 12, ad_read(devc, 12) | 0x50);
- else
- ad_write(devc, 12, ad_read(devc, 12) | 0x40); /* Mode2 = enabled */
-
- if (devc->model == MD_IWAVE)
- ad_write(devc, 12, 0x6c); /* Select codec mode 3 */
-
- if (devc->model != MD_1845_SSCAPE)
- for (i = 16; i < 32; i++)
- ad_write(devc, i, init_values[i]);
-
- if (devc->model == MD_IWAVE)
- ad_write(devc, 16, 0x30); /* Playback and capture counters enabled */
- }
- if (devc->model > MD_1848)
- {
- if (devc->audio_flags & DMA_DUPLEX)
- ad_write(devc, 9, ad_read(devc, 9) & ~0x04); /* Dual DMA mode */
- else
- ad_write(devc, 9, ad_read(devc, 9) | 0x04); /* Single DMA mode */
-
- if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE)
- ad_write(devc, 27, ad_read(devc, 27) | 0x08); /* Alternate freq select enabled */
-
- if (devc->model == MD_IWAVE)
- { /* Some magic Interwave specific initialization */
- ad_write(devc, 12, 0x6c); /* Select codec mode 3 */
- ad_write(devc, 16, 0x30); /* Playback and capture counters enabled */
- ad_write(devc, 17, 0xc2); /* Alternate feature enable */
- }
- }
- else
- {
- devc->audio_flags &= ~DMA_DUPLEX;
- ad_write(devc, 9, ad_read(devc, 9) | 0x04); /* Single DMA mode */
- if (soundpro)
- ad_write(devc, 12, ad_read(devc, 12) | 0x40); /* Mode2 = enabled */
- }
-
- outb((0), io_Status(devc)); /* Clear pending interrupts */
-
- /*
- * Toggle the MCE bit. It completes the initialization phase.
- */
-
- ad_enter_MCE(devc); /* In case the bit was off */
- ad_leave_MCE(devc);
-
- ad1848_mixer_reset(devc);
-}
-
-int ad1848_detect(struct resource *ports, int *ad_flags, int *osp)
-{
- unsigned char tmp;
- ad1848_info *devc = &adev_info[nr_ad1848_devs];
- unsigned char tmp1 = 0xff, tmp2 = 0xff;
- int optiC930 = 0; /* OPTi 82C930 flag */
- int interwave = 0;
- int ad1847_flag = 0;
- int cs4248_flag = 0;
- int sscape_flag = 0;
- int io_base = ports->start;
-
- int i;
-
- DDB(printk("ad1848_detect(%x)\n", io_base));
-
- if (ad_flags)
- {
- if (*ad_flags == 0x12345678)
- {
- interwave = 1;
- *ad_flags = 0;
- }
-
- if (*ad_flags == 0x87654321)
- {
- sscape_flag = 1;
- *ad_flags = 0;
- }
-
- if (*ad_flags == 0x12345677)
- {
- cs4248_flag = 1;
- *ad_flags = 0;
- }
- }
- if (nr_ad1848_devs >= MAX_AUDIO_DEV)
- {
- printk(KERN_ERR "ad1848 - Too many audio devices\n");
- return 0;
- }
- spin_lock_init(&devc->lock);
- devc->base = io_base;
- devc->irq_ok = 0;
- devc->timer_running = 0;
- devc->MCE_bit = 0x40;
- devc->irq = 0;
- devc->open_mode = 0;
- devc->chip_name = devc->name = "AD1848";
- devc->model = MD_1848; /* AD1848 or CS4248 */
- devc->levels = NULL;
- devc->debug_flag = 0;
-
- /*
- * Check that the I/O address is in use.
- *
- * The bit 0x80 of the base I/O port is known to be 0 after the
- * chip has performed its power on initialization. Just assume
- * this has happened before the OS is starting.
- *
- * If the I/O address is unused, it typically returns 0xff.
- */
-
- if (inb(devc->base) == 0xff)
- {
- DDB(printk("ad1848_detect: The base I/O address appears to be dead\n"));
- }
-
- /*
- * Wait for the device to stop initialization
- */
-
- DDB(printk("ad1848_detect() - step 0\n"));
-
- for (i = 0; i < 10000000; i++)
- {
- unsigned char x = inb(devc->base);
-
- if (x == 0xff || !(x & 0x80))
- break;
- }
-
- DDB(printk("ad1848_detect() - step A\n"));
-
- if (inb(devc->base) == 0x80) /* Not ready. Let's wait */
- ad_leave_MCE(devc);
-
- if ((inb(devc->base) & 0x80) != 0x00) /* Not a AD1848 */
- {
- DDB(printk("ad1848 detect error - step A (%02x)\n", (int) inb(devc->base)));
- return 0;
- }
-
- /*
- * Test if it's possible to change contents of the indirect registers.
- * Registers 0 and 1 are ADC volume registers. The bit 0x10 is read only
- * so try to avoid using it.
- */
-
- DDB(printk("ad1848_detect() - step B\n"));
- ad_write(devc, 0, 0xaa);
- ad_write(devc, 1, 0x45); /* 0x55 with bit 0x10 clear */
-
- if ((tmp1 = ad_read(devc, 0)) != 0xaa || (tmp2 = ad_read(devc, 1)) != 0x45)
- {
- if (tmp2 == 0x65) /* AD1847 has couple of bits hardcoded to 1 */
- ad1847_flag = 1;
- else
- {
- DDB(printk("ad1848 detect error - step B (%x/%x)\n", tmp1, tmp2));
- return 0;
- }
- }
- DDB(printk("ad1848_detect() - step C\n"));
- ad_write(devc, 0, 0x45);
- ad_write(devc, 1, 0xaa);
-
- if ((tmp1 = ad_read(devc, 0)) != 0x45 || (tmp2 = ad_read(devc, 1)) != 0xaa)
- {
- if (tmp2 == 0x8a) /* AD1847 has few bits hardcoded to 1 */
- ad1847_flag = 1;
- else
- {
- DDB(printk("ad1848 detect error - step C (%x/%x)\n", tmp1, tmp2));
- return 0;
- }
- }
-
- /*
- * The indirect register I12 has some read only bits. Let's
- * try to change them.
- */
-
- DDB(printk("ad1848_detect() - step D\n"));
- tmp = ad_read(devc, 12);
- ad_write(devc, 12, (~tmp) & 0x0f);
-
- if ((tmp & 0x0f) != ((tmp1 = ad_read(devc, 12)) & 0x0f))
- {
- DDB(printk("ad1848 detect error - step D (%x)\n", tmp1));
- return 0;
- }
-
- /*
- * NOTE! Last 4 bits of the reg I12 tell the chip revision.
- * 0x01=RevB and 0x0A=RevC.
- */
-
- /*
- * The original AD1848/CS4248 has just 15 indirect registers. This means
- * that I0 and I16 should return the same value (etc.).
- * However this doesn't work with CS4248. Actually it seems to be impossible
- * to detect if the chip is a CS4231 or CS4248.
- * Ensure that the Mode2 enable bit of I12 is 0. Otherwise this test fails
- * with CS4231.
- */
-
- /*
- * OPTi 82C930 has mode2 control bit in another place. This test will fail
- * with it. Accept this situation as a possible indication of this chip.
- */
-
- DDB(printk("ad1848_detect() - step F\n"));
- ad_write(devc, 12, 0); /* Mode2=disabled */
-
- for (i = 0; i < 16; i++)
- {
- if ((tmp1 = ad_read(devc, i)) != (tmp2 = ad_read(devc, i + 16)))
- {
- DDB(printk("ad1848 detect step F(%d/%x/%x) - OPTi chip???\n", i, tmp1, tmp2));
- if (!ad1847_flag)
- optiC930 = 1;
- break;
- }
- }
-
- /*
- * Try to switch the chip to mode2 (CS4231) by setting the MODE2 bit (0x40).
- * The bit 0x80 is always 1 in CS4248 and CS4231.
- */
-
- DDB(printk("ad1848_detect() - step G\n"));
-
- if (ad_flags && *ad_flags == 400)
- *ad_flags = 0;
- else
- ad_write(devc, 12, 0x40); /* Set mode2, clear 0x80 */
-
-
- if (ad_flags)
- *ad_flags = 0;
-
- tmp1 = ad_read(devc, 12);
- if (tmp1 & 0x80)
- {
- if (ad_flags)
- *ad_flags |= AD_F_CS4248;
-
- devc->chip_name = "CS4248"; /* Our best knowledge just now */
- }
- if (optiC930 || (tmp1 & 0xc0) == (0x80 | 0x40))
- {
- /*
- * CS4231 detected - is it?
- *
- * Verify that setting I0 doesn't change I16.
- */
-
- DDB(printk("ad1848_detect() - step H\n"));
- ad_write(devc, 16, 0); /* Set I16 to known value */
-
- ad_write(devc, 0, 0x45);
- if ((tmp1 = ad_read(devc, 16)) != 0x45) /* No change -> CS4231? */
- {
- ad_write(devc, 0, 0xaa);
- if ((tmp1 = ad_read(devc, 16)) == 0xaa) /* Rotten bits? */
- {
- DDB(printk("ad1848 detect error - step H(%x)\n", tmp1));
- return 0;
- }
-
- /*
- * Verify that some bits of I25 are read only.
- */
-
- DDB(printk("ad1848_detect() - step I\n"));
- tmp1 = ad_read(devc, 25); /* Original bits */
- ad_write(devc, 25, ~tmp1); /* Invert all bits */
- if ((ad_read(devc, 25) & 0xe7) == (tmp1 & 0xe7))
- {
- int id;
-
- /*
- * It's at least CS4231
- */
-
- devc->chip_name = "CS4231";
- devc->model = MD_4231;
-
- /*
- * It could be an AD1845 or CS4231A as well.
- * CS4231 and AD1845 report the same revision info in I25
- * while the CS4231A reports different.
- */
-
- id = ad_read(devc, 25);
- if ((id & 0xe7) == 0x80) /* Device busy??? */
- id = ad_read(devc, 25);
- if ((id & 0xe7) == 0x80) /* Device still busy??? */
- id = ad_read(devc, 25);
- DDB(printk("ad1848_detect() - step J (%02x/%02x)\n", id, ad_read(devc, 25)));
-
- if ((id & 0xe7) == 0x80) {
- /*
- * It must be a CS4231 or AD1845. The register I23 of
- * CS4231 is undefined and it appears to be read only.
- * AD1845 uses I23 for setting sample rate. Assume
- * the chip is AD1845 if I23 is changeable.
- */
-
- unsigned char tmp = ad_read(devc, 23);
- ad_write(devc, 23, ~tmp);
-
- if (interwave)
- {
- devc->model = MD_IWAVE;
- devc->chip_name = "IWave";
- }
- else if (ad_read(devc, 23) != tmp) /* AD1845 ? */
- {
- devc->chip_name = "AD1845";
- devc->model = MD_1845;
- }
- else if (cs4248_flag)
- {
- if (ad_flags)
- *ad_flags |= AD_F_CS4248;
- devc->chip_name = "CS4248";
- devc->model = MD_1848;
- ad_write(devc, 12, ad_read(devc, 12) & ~0x40); /* Mode2 off */
- }
- ad_write(devc, 23, tmp); /* Restore */
- }
- else
- {
- switch (id & 0x1f) {
- case 3: /* CS4236/CS4235/CS42xB/CS4239 */
- {
- int xid;
- ad_write(devc, 12, ad_read(devc, 12) | 0x60); /* switch to mode 3 */
- ad_write(devc, 23, 0x9c); /* select extended register 25 */
- xid = inb(io_Indexed_Data(devc));
- ad_write(devc, 12, ad_read(devc, 12) & ~0x60); /* back to mode 0 */
- switch (xid & 0x1f)
- {
- case 0x00:
- devc->chip_name = "CS4237B(B)";
- devc->model = MD_42xB;
- break;
- case 0x08:
- /* Seems to be a 4238 ?? */
- devc->chip_name = "CS4238";
- devc->model = MD_42xB;
- break;
- case 0x09:
- devc->chip_name = "CS4238B";
- devc->model = MD_42xB;
- break;
- case 0x0b:
- devc->chip_name = "CS4236B";
- devc->model = MD_4236;
- break;
- case 0x10:
- devc->chip_name = "CS4237B";
- devc->model = MD_42xB;
- break;
- case 0x1d:
- devc->chip_name = "CS4235";
- devc->model = MD_4235;
- break;
- case 0x1e:
- devc->chip_name = "CS4239";
- devc->model = MD_4239;
- break;
- default:
- printk("Chip ident is %X.\n", xid&0x1F);
- devc->chip_name = "CS42xx";
- devc->model = MD_4232;
- break;
- }
- }
- break;
-
- case 2: /* CS4232/CS4232A */
- devc->chip_name = "CS4232";
- devc->model = MD_4232;
- break;
-
- case 0:
- if ((id & 0xe0) == 0xa0)
- {
- devc->chip_name = "CS4231A";
- devc->model = MD_4231A;
- }
- else
- {
- devc->chip_name = "CS4321";
- devc->model = MD_4231;
- }
- break;
-
- default: /* maybe */
- DDB(printk("ad1848: I25 = %02x/%02x\n", ad_read(devc, 25), ad_read(devc, 25) & 0xe7));
- if (optiC930)
- {
- devc->chip_name = "82C930";
- devc->model = MD_C930;
- }
- else
- {
- devc->chip_name = "CS4231";
- devc->model = MD_4231;
- }
- }
- }
- }
- ad_write(devc, 25, tmp1); /* Restore bits */
-
- DDB(printk("ad1848_detect() - step K\n"));
- }
- } else if (tmp1 == 0x0a) {
- /*
- * Is it perhaps a SoundPro CMI8330?
- * If so, then we should be able to change indirect registers
- * greater than I15 after activating MODE2, even though reading
- * back I12 does not show it.
- */
-
- /*
- * Let's try comparing register values
- */
- for (i = 0; i < 16; i++) {
- if ((tmp1 = ad_read(devc, i)) != (tmp2 = ad_read(devc, i + 16))) {
- DDB(printk("ad1848 detect step H(%d/%x/%x) - SoundPro chip?\n", i, tmp1, tmp2));
- soundpro = 1;
- devc->chip_name = "SoundPro CMI 8330";
- break;
- }
- }
- }
-
- DDB(printk("ad1848_detect() - step L\n"));
- if (ad_flags)
- {
- if (devc->model != MD_1848)
- *ad_flags |= AD_F_CS4231;
- }
- DDB(printk("ad1848_detect() - Detected OK\n"));
-
- if (devc->model == MD_1848 && ad1847_flag)
- devc->chip_name = "AD1847";
-
-
- if (sscape_flag == 1)
- devc->model = MD_1845_SSCAPE;
-
- return 1;
-}
-
-int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback,
- int dma_capture, int share_dma, int *osp, struct module *owner)
-{
- /*
- * NOTE! If irq < 0, there is another driver which has allocated the IRQ
- * so that this driver doesn't need to allocate/deallocate it.
- * The actually used IRQ is ABS(irq).
- */
-
- int my_dev;
- char dev_name[100];
- int e;
-
- ad1848_info *devc = &adev_info[nr_ad1848_devs];
-
- ad1848_port_info *portc = NULL;
-
- devc->irq = (irq > 0) ? irq : 0;
- devc->open_mode = 0;
- devc->timer_ticks = 0;
- devc->dma1 = dma_playback;
- devc->dma2 = dma_capture;
- devc->subtype = cfg.card_subtype;
- devc->audio_flags = DMA_AUTOMODE;
- devc->playback_dev = devc->record_dev = 0;
- if (name != NULL)
- devc->name = name;
-
- if (name != NULL && name[0] != 0)
- sprintf(dev_name,
- "%s (%s)", name, devc->chip_name);
- else
- sprintf(dev_name,
- "Generic audio codec (%s)", devc->chip_name);
-
- rename_region(ports, devc->name);
-
- conf_printf2(dev_name, devc->base, devc->irq, dma_playback, dma_capture);
-
- if (devc->model == MD_1848 || devc->model == MD_C930)
- devc->audio_flags |= DMA_HARDSTOP;
-
- if (devc->model > MD_1848)
- {
- if (devc->dma1 == devc->dma2 || devc->dma2 == -1 || devc->dma1 == -1)
- devc->audio_flags &= ~DMA_DUPLEX;
- else
- devc->audio_flags |= DMA_DUPLEX;
- }
-
- portc = kmalloc(sizeof(ad1848_port_info), GFP_KERNEL);
- if(portc==NULL) {
- release_region(devc->base, 4);
- return -1;
- }
-
- if ((my_dev = sound_install_audiodrv(AUDIO_DRIVER_VERSION,
- dev_name,
- &ad1848_audio_driver,
- sizeof(struct audio_driver),
- devc->audio_flags,
- ad_format_mask[devc->model],
- devc,
- dma_playback,
- dma_capture)) < 0)
- {
- release_region(devc->base, 4);
- kfree(portc);
- return -1;
- }
-
- audio_devs[my_dev]->portc = portc;
- audio_devs[my_dev]->mixer_dev = -1;
- if (owner)
- audio_devs[my_dev]->d->owner = owner;
- memset((char *) portc, 0, sizeof(*portc));
-
- nr_ad1848_devs++;
-
- ad1848_init_hw(devc);
-
- if (irq > 0)
- {
- devc->dev_no = my_dev;
- if (request_irq(devc->irq, adintr, 0, devc->name,
- (void *)(long)my_dev) < 0)
- {
- printk(KERN_WARNING "ad1848: Unable to allocate IRQ\n");
- /* Don't free it either then.. */
- devc->irq = 0;
- }
- if (capabilities[devc->model].flags & CAP_F_TIMER)
- {
-#ifndef CONFIG_SMP
- int x;
- unsigned char tmp = ad_read(devc, 16);
-#endif
-
- devc->timer_ticks = 0;
-
- ad_write(devc, 21, 0x00); /* Timer MSB */
- ad_write(devc, 20, 0x10); /* Timer LSB */
-#ifndef CONFIG_SMP
- ad_write(devc, 16, tmp | 0x40); /* Enable timer */
- for (x = 0; x < 100000 && devc->timer_ticks == 0; x++);
- ad_write(devc, 16, tmp & ~0x40); /* Disable timer */
-
- if (devc->timer_ticks == 0)
- printk(KERN_WARNING "ad1848: Interrupt test failed (IRQ%d)\n", irq);
- else
- {
- DDB(printk("Interrupt test OK\n"));
- devc->irq_ok = 1;
- }
-#else
- devc->irq_ok = 1;
-#endif
- }
- else
- devc->irq_ok = 1; /* Couldn't test. assume it's OK */
- } else if (irq < 0)
- devc->dev_no = my_dev;
-
-#ifndef EXCLUDE_TIMERS
- if ((capabilities[devc->model].flags & CAP_F_TIMER) &&
- devc->irq_ok)
- ad1848_tmr_install(my_dev);
-#endif
-
- if (!share_dma)
- {
- if (sound_alloc_dma(dma_playback, devc->name))
- printk(KERN_WARNING "ad1848.c: Can't allocate DMA%d\n", dma_playback);
-
- if (dma_capture != dma_playback)
- if (sound_alloc_dma(dma_capture, devc->name))
- printk(KERN_WARNING "ad1848.c: Can't allocate DMA%d\n", dma_capture);
- }
-
- if ((e = sound_install_mixer(MIXER_DRIVER_VERSION,
- dev_name,
- &ad1848_mixer_operations,
- sizeof(struct mixer_operations),
- devc)) >= 0)
- {
- audio_devs[my_dev]->mixer_dev = e;
- if (owner)
- mixer_devs[e]->owner = owner;
- }
- return my_dev;
-}
-
-int ad1848_control(int cmd, int arg)
-{
- ad1848_info *devc;
- unsigned long flags;
-
- if (nr_ad1848_devs < 1)
- return -ENODEV;
-
- devc = &adev_info[nr_ad1848_devs - 1];
-
- switch (cmd)
- {
- case AD1848_SET_XTAL: /* Change clock frequency of AD1845 (only ) */
- if (devc->model != MD_1845 && devc->model != MD_1845_SSCAPE)
- return -EINVAL;
- spin_lock_irqsave(&devc->lock,flags);
- ad_enter_MCE(devc);
- ad_write(devc, 29, (ad_read(devc, 29) & 0x1f) | (arg << 5));
- ad_leave_MCE(devc);
- spin_unlock_irqrestore(&devc->lock,flags);
- break;
-
- case AD1848_MIXER_REROUTE:
- {
- int o = (arg >> 8) & 0xff;
- int n = arg & 0xff;
-
- if (o < 0 || o >= SOUND_MIXER_NRDEVICES)
- return -EINVAL;
-
- if (!(devc->supported_devices & (1 << o)) &&
- !(devc->supported_rec_devices & (1 << o)))
- return -EINVAL;
-
- if (n == SOUND_MIXER_NONE)
- { /* Just hide this control */
- ad1848_mixer_set(devc, o, 0); /* Shut up it */
- devc->supported_devices &= ~(1 << o);
- devc->supported_rec_devices &= ~(1 << o);
- break;
- }
-
- /* Make the mixer control identified by o to appear as n */
- if (n < 0 || n >= SOUND_MIXER_NRDEVICES)
- return -EINVAL;
-
- devc->mixer_reroute[n] = o; /* Rename the control */
- if (devc->supported_devices & (1 << o))
- devc->supported_devices |= (1 << n);
- if (devc->supported_rec_devices & (1 << o))
- devc->supported_rec_devices |= (1 << n);
-
- devc->supported_devices &= ~(1 << o);
- devc->supported_rec_devices &= ~(1 << o);
- }
- break;
- }
- return 0;
-}
-
-void ad1848_unload(int io_base, int irq, int dma_playback, int dma_capture, int share_dma)
-{
- int i, mixer, dev = 0;
- ad1848_info *devc = NULL;
-
- for (i = 0; devc == NULL && i < nr_ad1848_devs; i++)
- {
- if (adev_info[i].base == io_base)
- {
- devc = &adev_info[i];
- dev = devc->dev_no;
- }
- }
-
- if (devc != NULL)
- {
- kfree(audio_devs[dev]->portc);
- release_region(devc->base, 4);
-
- if (!share_dma)
- {
- if (devc->irq > 0) /* There is no point in freeing irq, if it wasn't allocated */
- free_irq(devc->irq, (void *)(long)devc->dev_no);
-
- sound_free_dma(dma_playback);
-
- if (dma_playback != dma_capture)
- sound_free_dma(dma_capture);
-
- }
- mixer = audio_devs[devc->dev_no]->mixer_dev;
- if(mixer>=0)
- sound_unload_mixerdev(mixer);
-
- nr_ad1848_devs--;
- for ( ; i < nr_ad1848_devs ; i++)
- adev_info[i] = adev_info[i+1];
- }
- else
- printk(KERN_ERR "ad1848: Can't find device to be unloaded. Base=%x\n", io_base);
-}
-
-static irqreturn_t adintr(int irq, void *dev_id)
-{
- unsigned char status;
- ad1848_info *devc;
- int dev;
- int alt_stat = 0xff;
- unsigned char c930_stat = 0;
- int cnt = 0;
-
- dev = (long)dev_id;
- devc = (ad1848_info *) audio_devs[dev]->devc;
-
-interrupt_again: /* Jump back here if int status doesn't reset */
-
- status = inb(io_Status(devc));
-
- if (status == 0x80)
- printk(KERN_DEBUG "adintr: Why?\n");
- if (devc->model == MD_1848)
- outb((0), io_Status(devc)); /* Clear interrupt status */
-
- if (status & 0x01)
- {
- if (devc->model == MD_C930)
- { /* 82C930 has interrupt status register in MAD16 register MC11 */
-
- spin_lock(&devc->lock);
-
- /* 0xe0e is C930 address port
- * 0xe0f is C930 data port
- */
- outb(11, 0xe0e);
- c930_stat = inb(0xe0f);
- outb((~c930_stat), 0xe0f);
-
- spin_unlock(&devc->lock);
-
- alt_stat = (c930_stat << 2) & 0x30;
- }
- else if (devc->model != MD_1848)
- {
- spin_lock(&devc->lock);
- alt_stat = ad_read(devc, 24);
- ad_write(devc, 24, ad_read(devc, 24) & ~alt_stat); /* Selective ack */
- spin_unlock(&devc->lock);
- }
-
- if ((devc->open_mode & OPEN_READ) && (devc->audio_mode & PCM_ENABLE_INPUT) && (alt_stat & 0x20))
- {
- DMAbuf_inputintr(devc->record_dev);
- }
- if ((devc->open_mode & OPEN_WRITE) && (devc->audio_mode & PCM_ENABLE_OUTPUT) &&
- (alt_stat & 0x10))
- {
- DMAbuf_outputintr(devc->playback_dev, 1);
- }
- if (devc->model != MD_1848 && (alt_stat & 0x40)) /* Timer interrupt */
- {
- devc->timer_ticks++;
-#ifndef EXCLUDE_TIMERS
- if (timer_installed == dev && devc->timer_running)
- sound_timer_interrupt();
-#endif
- }
- }
-/*
- * Sometimes playback or capture interrupts occur while a timer interrupt
- * is being handled. The interrupt will not be retriggered if we don't
- * handle it now. Check if an interrupt is still pending and restart
- * the handler in this case.
- */
- if (inb(io_Status(devc)) & 0x01 && cnt++ < 4)
- {
- goto interrupt_again;
- }
- return IRQ_HANDLED;
-}
-
-/*
- * Experimental initialization sequence for the integrated sound system
- * of the Compaq Deskpro M.
- */
-
-static int init_deskpro_m(struct address_info *hw_config)
-{
- unsigned char tmp;
-
- if ((tmp = inb(0xc44)) == 0xff)
- {
- DDB(printk("init_deskpro_m: Dead port 0xc44\n"));
- return 0;
- }
-
- outb(0x10, 0xc44);
- outb(0x40, 0xc45);
- outb(0x00, 0xc46);
- outb(0xe8, 0xc47);
- outb(0x14, 0xc44);
- outb(0x40, 0xc45);
- outb(0x00, 0xc46);
- outb(0xe8, 0xc47);
- outb(0x10, 0xc44);
-
- return 1;
-}
-
-/*
- * Experimental initialization sequence for the integrated sound system
- * of Compaq Deskpro XL.
- */
-
-static int init_deskpro(struct address_info *hw_config)
-{
- unsigned char tmp;
-
- if ((tmp = inb(0xc44)) == 0xff)
- {
- DDB(printk("init_deskpro: Dead port 0xc44\n"));
- return 0;
- }
- outb((tmp | 0x04), 0xc44); /* Select bank 1 */
- if (inb(0xc44) != 0x04)
- {
- DDB(printk("init_deskpro: Invalid bank1 signature in port 0xc44\n"));
- return 0;
- }
- /*
- * OK. It looks like a Deskpro so let's proceed.
- */
-
- /*
- * I/O port 0xc44 Audio configuration register.
- *
- * bits 0xc0: Audio revision bits
- * 0x00 = Compaq Business Audio
- * 0x40 = MS Sound System Compatible (reset default)
- * 0x80 = Reserved
- * 0xc0 = Reserved
- * bit 0x20: No Wait State Enable
- * 0x00 = Disabled (reset default, DMA mode)
- * 0x20 = Enabled (programmed I/O mode)
- * bit 0x10: MS Sound System Decode Enable
- * 0x00 = Decoding disabled (reset default)
- * 0x10 = Decoding enabled
- * bit 0x08: FM Synthesis Decode Enable
- * 0x00 = Decoding Disabled (reset default)
- * 0x08 = Decoding enabled
- * bit 0x04 Bank select
- * 0x00 = Bank 0
- * 0x04 = Bank 1
- * bits 0x03 MSS Base address
- * 0x00 = 0x530 (reset default)
- * 0x01 = 0x604
- * 0x02 = 0xf40
- * 0x03 = 0xe80
- */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc44 (before): ");
- outb((tmp & ~0x04), 0xc44);
- printk("%02x ", inb(0xc44));
- outb((tmp | 0x04), 0xc44);
- printk("%02x\n", inb(0xc44));
-#endif
-
- /* Set bank 1 of the register */
- tmp = 0x58; /* MSS Mode, MSS&FM decode enabled */
-
- switch (hw_config->io_base)
- {
- case 0x530:
- tmp |= 0x00;
- break;
- case 0x604:
- tmp |= 0x01;
- break;
- case 0xf40:
- tmp |= 0x02;
- break;
- case 0xe80:
- tmp |= 0x03;
- break;
- default:
- DDB(printk("init_deskpro: Invalid MSS port %x\n", hw_config->io_base));
- return 0;
- }
- outb((tmp & ~0x04), 0xc44); /* Write to bank=0 */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc44 (after): ");
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- printk("%02x ", inb(0xc44));
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- printk("%02x\n", inb(0xc44));
-#endif
-
- /*
- * I/O port 0xc45 FM Address Decode/MSS ID Register.
- *
- * bank=0, bits 0xfe: FM synthesis Decode Compare bits 7:1 (default=0x88)
- * bank=0, bit 0x01: SBIC Power Control Bit
- * 0x00 = Powered up
- * 0x01 = Powered down
- * bank=1, bits 0xfc: MSS ID (default=0x40)
- */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc45 (before): ");
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- printk("%02x ", inb(0xc45));
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- printk("%02x\n", inb(0xc45));
-#endif
-
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- outb((0x88), 0xc45); /* FM base 7:0 = 0x88 */
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- outb((0x10), 0xc45); /* MSS ID = 0x10 (MSS port returns 0x04) */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc45 (after): ");
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- printk("%02x ", inb(0xc45));
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- printk("%02x\n", inb(0xc45));
-#endif
-
-
- /*
- * I/O port 0xc46 FM Address Decode/Address ASIC Revision Register.
- *
- * bank=0, bits 0xff: FM synthesis Decode Compare bits 15:8 (default=0x03)
- * bank=1, bits 0xff: Audio addressing ASIC id
- */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc46 (before): ");
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- printk("%02x ", inb(0xc46));
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- printk("%02x\n", inb(0xc46));
-#endif
-
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- outb((0x03), 0xc46); /* FM base 15:8 = 0x03 */
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- outb((0x11), 0xc46); /* ASIC ID = 0x11 */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc46 (after): ");
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- printk("%02x ", inb(0xc46));
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- printk("%02x\n", inb(0xc46));
-#endif
-
- /*
- * I/O port 0xc47 FM Address Decode Register.
- *
- * bank=0, bits 0xff: Decode enable selection for various FM address bits
- * bank=1, bits 0xff: Reserved
- */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc47 (before): ");
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- printk("%02x ", inb(0xc47));
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- printk("%02x\n", inb(0xc47));
-#endif
-
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- outb((0x7c), 0xc47); /* FM decode enable bits = 0x7c */
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- outb((0x00), 0xc47); /* Reserved bank1 = 0x00 */
-
-#ifdef DEBUGXL
- /* Debug printing */
- printk("Port 0xc47 (after): ");
- outb((tmp & ~0x04), 0xc44); /* Select bank=0 */
- printk("%02x ", inb(0xc47));
- outb((tmp | 0x04), 0xc44); /* Select bank=1 */
- printk("%02x\n", inb(0xc47));
-#endif
-
- /*
- * I/O port 0xc6f = Audio Disable Function Register
- */
-
-#ifdef DEBUGXL
- printk("Port 0xc6f (before) = %02x\n", inb(0xc6f));
-#endif
-
- outb((0x80), 0xc6f);
-
-#ifdef DEBUGXL
- printk("Port 0xc6f (after) = %02x\n", inb(0xc6f));
-#endif
-
- return 1;
-}
-
-int probe_ms_sound(struct address_info *hw_config, struct resource *ports)
-{
- unsigned char tmp;
-
- DDB(printk("Entered probe_ms_sound(%x, %d)\n", hw_config->io_base, hw_config->card_subtype));
-
- if (hw_config->card_subtype == 1) /* Has no IRQ/DMA registers */
- {
- /* check_opl3(0x388, hw_config); */
- return ad1848_detect(ports, NULL, hw_config->osp);
- }
-
- if (deskpro_xl && hw_config->card_subtype == 2) /* Compaq Deskpro XL */
- {
- if (!init_deskpro(hw_config))
- return 0;
- }
-
- if (deskpro_m) /* Compaq Deskpro M */
- {
- if (!init_deskpro_m(hw_config))
- return 0;
- }
-
- /*
- * Check if the IO port returns valid signature. The original MS Sound
- * system returns 0x04 while some cards (AudioTrix Pro for example)
- * return 0x00 or 0x0f.
- */
-
- if ((tmp = inb(hw_config->io_base + 3)) == 0xff) /* Bus float */
- {
- int ret;
-
- DDB(printk("I/O address is inactive (%x)\n", tmp));
- if (!(ret = ad1848_detect(ports, NULL, hw_config->osp)))
- return 0;
- return 1;
- }
- DDB(printk("MSS signature = %x\n", tmp & 0x3f));
- if ((tmp & 0x3f) != 0x04 &&
- (tmp & 0x3f) != 0x0f &&
- (tmp & 0x3f) != 0x00)
- {
- int ret;
-
- MDB(printk(KERN_ERR "No MSS signature detected on port 0x%x (0x%x)\n", hw_config->io_base, (int) inb(hw_config->io_base + 3)));
- DDB(printk("Trying to detect codec anyway but IRQ/DMA may not work\n"));
- if (!(ret = ad1848_detect(ports, NULL, hw_config->osp)))
- return 0;
-
- hw_config->card_subtype = 1;
- return 1;
- }
- if ((hw_config->irq != 5) &&
- (hw_config->irq != 7) &&
- (hw_config->irq != 9) &&
- (hw_config->irq != 10) &&
- (hw_config->irq != 11) &&
- (hw_config->irq != 12))
- {
- printk(KERN_ERR "MSS: Bad IRQ %d\n", hw_config->irq);
- return 0;
- }
- if (hw_config->dma != 0 && hw_config->dma != 1 && hw_config->dma != 3)
- {
- printk(KERN_ERR "MSS: Bad DMA %d\n", hw_config->dma);
- return 0;
- }
- /*
- * Check that DMA0 is not in use with a 8 bit board.
- */
-
- if (hw_config->dma == 0 && inb(hw_config->io_base + 3) & 0x80)
- {
- printk(KERN_ERR "MSS: Can't use DMA0 with a 8 bit card/slot\n");
- return 0;
- }
- if (hw_config->irq > 7 && hw_config->irq != 9 && inb(hw_config->io_base + 3) & 0x80)
- {
- printk(KERN_ERR "MSS: Can't use IRQ%d with a 8 bit card/slot\n", hw_config->irq);
- return 0;
- }
- return ad1848_detect(ports, NULL, hw_config->osp);
-}
-
-void attach_ms_sound(struct address_info *hw_config, struct resource *ports, struct module *owner)
-{
- static signed char interrupt_bits[12] =
- {
- -1, -1, -1, -1, -1, 0x00, -1, 0x08, -1, 0x10, 0x18, 0x20
- };
- signed char bits;
- char dma2_bit = 0;
-
- static char dma_bits[4] =
- {
- 1, 2, 0, 3
- };
-
- int config_port = hw_config->io_base + 0;
- int version_port = hw_config->io_base + 3;
- int dma = hw_config->dma;
- int dma2 = hw_config->dma2;
-
- if (hw_config->card_subtype == 1) /* Has no IRQ/DMA registers */
- {
- hw_config->slots[0] = ad1848_init("MS Sound System", ports,
- hw_config->irq,
- hw_config->dma,
- hw_config->dma2, 0,
- hw_config->osp,
- owner);
- return;
- }
- /*
- * Set the IRQ and DMA addresses.
- */
-
- bits = interrupt_bits[hw_config->irq];
- if (bits == -1)
- {
- printk(KERN_ERR "MSS: Bad IRQ %d\n", hw_config->irq);
- release_region(ports->start, 4);
- release_region(ports->start - 4, 4);
- return;
- }
- outb((bits | 0x40), config_port);
- if ((inb(version_port) & 0x40) == 0)
- printk(KERN_ERR "[MSS: IRQ Conflict?]\n");
-
-/*
- * Handle the capture DMA channel
- */
-
- if (dma2 != -1 && dma2 != dma)
- {
- if (!((dma == 0 && dma2 == 1) ||
- (dma == 1 && dma2 == 0) ||
- (dma == 3 && dma2 == 0)))
- { /* Unsupported combination. Try to swap channels */
- int tmp = dma;
-
- dma = dma2;
- dma2 = tmp;
- }
- if ((dma == 0 && dma2 == 1) ||
- (dma == 1 && dma2 == 0) ||
- (dma == 3 && dma2 == 0))
- {
- dma2_bit = 0x04; /* Enable capture DMA */
- }
- else
- {
- printk(KERN_WARNING "MSS: Invalid capture DMA\n");
- dma2 = dma;
- }
- }
- else
- {
- dma2 = dma;
- }
-
- hw_config->dma = dma;
- hw_config->dma2 = dma2;
-
- outb((bits | dma_bits[dma] | dma2_bit), config_port); /* Write IRQ+DMA setup */
-
- hw_config->slots[0] = ad1848_init("MS Sound System", ports,
- hw_config->irq,
- dma, dma2, 0,
- hw_config->osp,
- THIS_MODULE);
-}
-
-void unload_ms_sound(struct address_info *hw_config)
-{
- ad1848_unload(hw_config->io_base + 4,
- hw_config->irq,
- hw_config->dma,
- hw_config->dma2, 0);
- sound_unload_audiodev(hw_config->slots[0]);
- release_region(hw_config->io_base, 4);
-}
-
-#ifndef EXCLUDE_TIMERS
-
-/*
- * Timer stuff (for /dev/music).
- */
-
-static unsigned int current_interval;
-
-static unsigned int ad1848_tmr_start(int dev, unsigned int usecs)
-{
- unsigned long flags;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
- unsigned long xtal_nsecs; /* nanoseconds per xtal oscillator tick */
- unsigned long divider;
-
- spin_lock_irqsave(&devc->lock,flags);
-
- /*
- * Length of the timer interval (in nanoseconds) depends on the
- * selected crystal oscillator. Check this from bit 0x01 of I8.
- *
- * AD1845 has just one oscillator which has cycle time of 10.050 us
- * (when a 24.576 MHz xtal oscillator is used).
- *
- * Convert requested interval to nanoseconds before computing
- * the timer divider.
- */
-
- if (devc->model == MD_1845 || devc->model == MD_1845_SSCAPE)
- xtal_nsecs = 10050;
- else if (ad_read(devc, 8) & 0x01)
- xtal_nsecs = 9920;
- else
- xtal_nsecs = 9969;
-
- divider = (usecs * 1000 + xtal_nsecs / 2) / xtal_nsecs;
-
- if (divider < 100) /* Don't allow shorter intervals than about 1ms */
- divider = 100;
-
- if (divider > 65535) /* Overflow check */
- divider = 65535;
-
- ad_write(devc, 21, (divider >> 8) & 0xff); /* Set upper bits */
- ad_write(devc, 20, divider & 0xff); /* Set lower bits */
- ad_write(devc, 16, ad_read(devc, 16) | 0x40); /* Start the timer */
- devc->timer_running = 1;
- spin_unlock_irqrestore(&devc->lock,flags);
-
- return current_interval = (divider * xtal_nsecs + 500) / 1000;
-}
-
-static void ad1848_tmr_reprogram(int dev)
-{
- /*
- * Audio driver has changed sampling rate so that a different xtal
- * oscillator was selected. We have to reprogram the timer rate.
- */
-
- ad1848_tmr_start(dev, current_interval);
- sound_timer_syncinterval(current_interval);
-}
-
-static void ad1848_tmr_disable(int dev)
-{
- unsigned long flags;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
-
- spin_lock_irqsave(&devc->lock,flags);
- ad_write(devc, 16, ad_read(devc, 16) & ~0x40);
- devc->timer_running = 0;
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static void ad1848_tmr_restart(int dev)
-{
- unsigned long flags;
- ad1848_info *devc = (ad1848_info *) audio_devs[dev]->devc;
-
- if (current_interval == 0)
- return;
-
- spin_lock_irqsave(&devc->lock,flags);
- ad_write(devc, 16, ad_read(devc, 16) | 0x40);
- devc->timer_running = 1;
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static struct sound_lowlev_timer ad1848_tmr =
-{
- 0,
- 2,
- ad1848_tmr_start,
- ad1848_tmr_disable,
- ad1848_tmr_restart
-};
-
-static int ad1848_tmr_install(int dev)
-{
- if (timer_installed != -1)
- return 0; /* Don't install another timer */
-
- timer_installed = ad1848_tmr.dev = dev;
- sound_timer_init(&ad1848_tmr, audio_devs[dev]->name);
-
- return 1;
-}
-#endif /* EXCLUDE_TIMERS */
-
-EXPORT_SYMBOL(ad1848_detect);
-EXPORT_SYMBOL(ad1848_init);
-EXPORT_SYMBOL(ad1848_unload);
-EXPORT_SYMBOL(ad1848_control);
-EXPORT_SYMBOL(probe_ms_sound);
-EXPORT_SYMBOL(attach_ms_sound);
-EXPORT_SYMBOL(unload_ms_sound);
-
-static int __initdata io = -1;
-static int __initdata irq = -1;
-static int __initdata dma = -1;
-static int __initdata dma2 = -1;
-static int __initdata type = 0;
-
-module_param_hw(io, int, ioport, 0); /* I/O for a raw AD1848 card */
-module_param_hw(irq, int, irq, 0); /* IRQ to use */
-module_param_hw(dma, int, dma, 0); /* First DMA channel */
-module_param_hw(dma2, int, dma, 0); /* Second DMA channel */
-module_param(type, int, 0); /* Card type */
-module_param(deskpro_xl, bool, 0); /* Special magic for Deskpro XL boxen */
-module_param(deskpro_m, bool, 0); /* Special magic for Deskpro M box */
-module_param(soundpro, bool, 0); /* More special magic for SoundPro chips */
-
-#ifdef CONFIG_PNP
-module_param(isapnp, int, 0);
-module_param(isapnpjump, int, 0);
-module_param(reverse, bool, 0);
-MODULE_PARM_DESC(isapnp, "When set to 0, Plug & Play support will be disabled");
-MODULE_PARM_DESC(isapnpjump, "Jumps to a specific slot in the driver's PnP table. Use the source, Luke.");
-MODULE_PARM_DESC(reverse, "When set to 1, will reverse ISAPnP search order");
-
-static struct pnp_dev *ad1848_dev = NULL;
-
-/* Please add new entries at the end of the table */
-static struct {
- char *name;
- unsigned short card_vendor, card_device,
- vendor, function;
- short mss_io, irq, dma, dma2; /* index into isapnp table */
- int type;
-} ad1848_isapnp_list[] __initdata = {
- {"CMI 8330 SoundPRO",
- ISAPNP_VENDOR('C','M','I'), ISAPNP_DEVICE(0x0001),
- ISAPNP_VENDOR('@','@','@'), ISAPNP_FUNCTION(0x0001),
- 0, 0, 0,-1, 0},
- {"CS4232 based card",
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0000),
- 0, 0, 0, 1, 0},
- {"CS4232 based card",
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0100),
- 0, 0, 0, 1, 0},
- {"OPL3-SA2 WSS mode",
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('Y','M','H'), ISAPNP_FUNCTION(0x0021),
- 1, 0, 0, 1, 1},
- {"Advanced Gravis InterWave Audio",
- ISAPNP_VENDOR('G','R','V'), ISAPNP_DEVICE(0x0001),
- ISAPNP_VENDOR('G','R','V'), ISAPNP_FUNCTION(0x0000),
- 0, 0, 0, 1, 0},
- {NULL}
-};
-
-#ifdef MODULE
-static struct isapnp_device_id id_table[] = {
- { ISAPNP_VENDOR('C','M','I'), ISAPNP_DEVICE(0x0001),
- ISAPNP_VENDOR('@','@','@'), ISAPNP_FUNCTION(0x0001), 0 },
- { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0000), 0 },
- { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('C','S','C'), ISAPNP_FUNCTION(0x0100), 0 },
- /* The main driver for this card is opl3sa2
- { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('Y','M','H'), ISAPNP_FUNCTION(0x0021), 0 },
- */
- { ISAPNP_VENDOR('G','R','V'), ISAPNP_DEVICE(0x0001),
- ISAPNP_VENDOR('G','R','V'), ISAPNP_FUNCTION(0x0000), 0 },
- {0}
-};
-
-MODULE_DEVICE_TABLE(isapnp, id_table);
-#endif
-
-static struct pnp_dev *activate_dev(char *devname, char *resname, struct pnp_dev *dev)
-{
- int err;
-
- err = pnp_device_attach(dev);
- if (err < 0)
- return(NULL);
-
- if((err = pnp_activate_dev(dev)) < 0) {
- printk(KERN_ERR "ad1848: %s %s config failed (out of resources?)[%d]\n", devname, resname, err);
-
- pnp_device_detach(dev);
-
- return(NULL);
- }
- audio_activated = 1;
- return(dev);
-}
-
-static struct pnp_dev __init *ad1848_init_generic(struct pnp_card *bus,
- struct address_info *hw_config, int slot)
-{
-
- /* Configure Audio device */
- if((ad1848_dev = pnp_find_dev(bus, ad1848_isapnp_list[slot].vendor, ad1848_isapnp_list[slot].function, NULL)))
- {
- if((ad1848_dev = activate_dev(ad1848_isapnp_list[slot].name, "ad1848", ad1848_dev)))
- {
- hw_config->io_base = pnp_port_start(ad1848_dev, ad1848_isapnp_list[slot].mss_io);
- hw_config->irq = pnp_irq(ad1848_dev, ad1848_isapnp_list[slot].irq);
- hw_config->dma = pnp_dma(ad1848_dev, ad1848_isapnp_list[slot].dma);
- if(ad1848_isapnp_list[slot].dma2 != -1)
- hw_config->dma2 = pnp_dma(ad1848_dev, ad1848_isapnp_list[slot].dma2);
- else
- hw_config->dma2 = -1;
- hw_config->card_subtype = ad1848_isapnp_list[slot].type;
- } else
- return(NULL);
- } else
- return(NULL);
-
- return(ad1848_dev);
-}
-
-static int __init ad1848_isapnp_init(struct address_info *hw_config, struct pnp_card *bus, int slot)
-{
- char *busname = bus->name[0] ? bus->name : ad1848_isapnp_list[slot].name;
-
- /* Initialize this baby. */
-
- if(ad1848_init_generic(bus, hw_config, slot)) {
- /* We got it. */
-
- printk(KERN_NOTICE "ad1848: PnP reports '%s' at i/o %#x, irq %d, dma %d, %d\n",
- busname,
- hw_config->io_base, hw_config->irq, hw_config->dma,
- hw_config->dma2);
- return 1;
- }
- return 0;
-}
-
-static int __init ad1848_isapnp_probe(struct address_info *hw_config)
-{
- static int first = 1;
- int i;
-
- /* Count entries in sb_isapnp_list */
- for (i = 0; ad1848_isapnp_list[i].card_vendor != 0; i++);
- i--;
-
- /* Check and adjust isapnpjump */
- if( isapnpjump < 0 || isapnpjump > i) {
- isapnpjump = reverse ? i : 0;
- printk(KERN_ERR "ad1848: Valid range for isapnpjump is 0-%d. Adjusted to %d.\n", i, isapnpjump);
- }
-
- if(!first || !reverse)
- i = isapnpjump;
- first = 0;
- while(ad1848_isapnp_list[i].card_vendor != 0) {
- static struct pnp_card *bus = NULL;
-
- while ((bus = pnp_find_card(
- ad1848_isapnp_list[i].card_vendor,
- ad1848_isapnp_list[i].card_device,
- bus))) {
-
- if(ad1848_isapnp_init(hw_config, bus, i)) {
- isapnpjump = i; /* start next search from here */
- return 0;
- }
- }
- i += reverse ? -1 : 1;
- }
-
- return -ENODEV;
-}
-#endif
-
-
-static int __init init_ad1848(void)
-{
- printk(KERN_INFO "ad1848/cs4248 codec driver Copyright (C) by Hannu Savolainen 1993-1996\n");
-
-#ifdef CONFIG_PNP
- if(isapnp && (ad1848_isapnp_probe(&cfg) < 0) ) {
- printk(KERN_NOTICE "ad1848: No ISAPnP cards found, trying standard ones...\n");
- isapnp = 0;
- }
-#endif
-
- if(io != -1) {
- struct resource *ports;
- if( isapnp == 0 )
- {
- if(irq == -1 || dma == -1) {
- printk(KERN_WARNING "ad1848: must give I/O , IRQ and DMA.\n");
- return -EINVAL;
- }
-
- cfg.irq = irq;
- cfg.io_base = io;
- cfg.dma = dma;
- cfg.dma2 = dma2;
- cfg.card_subtype = type;
- }
-
- ports = request_region(io + 4, 4, "ad1848");
-
- if (!ports)
- return -EBUSY;
-
- if (!request_region(io, 4, "WSS config")) {
- release_region(io + 4, 4);
- return -EBUSY;
- }
-
- if (!probe_ms_sound(&cfg, ports)) {
- release_region(io + 4, 4);
- release_region(io, 4);
- return -ENODEV;
- }
- attach_ms_sound(&cfg, ports, THIS_MODULE);
- loaded = 1;
- }
- return 0;
-}
-
-static void __exit cleanup_ad1848(void)
-{
- if(loaded)
- unload_ms_sound(&cfg);
-
-#ifdef CONFIG_PNP
- if(ad1848_dev){
- if(audio_activated)
- pnp_device_detach(ad1848_dev);
- }
-#endif
-}
-
-module_init(init_ad1848);
-module_exit(cleanup_ad1848);
-
-#ifndef MODULE
-static int __init setup_ad1848(char *str)
-{
- /* io, irq, dma, dma2, type */
- int ints[6];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
- dma = ints[3];
- dma2 = ints[4];
- type = ints[5];
-
- return 1;
-}
-
-__setup("ad1848=", setup_ad1848);
-#endif
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/ad1848.h b/sound/oss/ad1848.h
deleted file mode 100644
index 390f03e13d09..000000000000
--- a/sound/oss/ad1848.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#include <linux/interrupt.h>
-
-#define AD_F_CS4231 0x0001 /* Returned if a CS4232 (or compatible) detected */
-#define AD_F_CS4248 0x0001 /* Returned if a CS4248 (or compatible) detected */
-
-#define AD1848_SET_XTAL 1
-#define AD1848_MIXER_REROUTE 2
-
-#define AD1848_REROUTE(oldctl, newctl) \
- ad1848_control(AD1848_MIXER_REROUTE, ((oldctl)<<8)|(newctl))
-
-
-int ad1848_init(char *name, struct resource *ports, int irq, int dma_playback,
- int dma_capture, int share_dma, int *osp, struct module *owner);
-void ad1848_unload (int io_base, int irq, int dma_playback, int dma_capture, int share_dma);
-
-int ad1848_detect (struct resource *ports, int *flags, int *osp);
-int ad1848_control(int cmd, int arg);
-
-void attach_ms_sound(struct address_info * hw_config, struct resource *ports, struct module * owner);
-
-int probe_ms_sound(struct address_info *hw_config, struct resource *ports);
-void unload_ms_sound(struct address_info *hw_info);
diff --git a/sound/oss/ad1848_mixer.h b/sound/oss/ad1848_mixer.h
deleted file mode 100644
index 2cf719b5fbbc..000000000000
--- a/sound/oss/ad1848_mixer.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * sound/oss/ad1848_mixer.h
- *
- * Definitions for the mixer of AD1848 and compatible codecs.
- */
-
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-
-
-/*
- * The AD1848 codec has generic input lines called Line, Aux1 and Aux2.
- * Sound card manufacturers have connected actual inputs (CD, synth, line,
- * etc) to these inputs in different order. Therefore it's difficult
- * to assign mixer channels to these inputs correctly. The following
- * contains two alternative mappings. The first one is for GUS MAX and
- * the second is just a generic one (line1, line2 and line3).
- * (Actually this is not a mapping but rather some kind of interleaving
- * solution).
- */
-#define MODE1_REC_DEVICES (SOUND_MASK_LINE3 | SOUND_MASK_MIC | \
- SOUND_MASK_LINE1 | SOUND_MASK_IMIX)
-
-#define SPRO_REC_DEVICES (SOUND_MASK_LINE | SOUND_MASK_MIC | \
- SOUND_MASK_CD | SOUND_MASK_LINE1)
-
-#define MODE1_MIXER_DEVICES (SOUND_MASK_LINE1 | SOUND_MASK_MIC | \
- SOUND_MASK_LINE2 | \
- SOUND_MASK_IGAIN | \
- SOUND_MASK_PCM | SOUND_MASK_IMIX)
-
-#define MODE2_MIXER_DEVICES (SOUND_MASK_LINE1 | SOUND_MASK_LINE2 | \
- SOUND_MASK_MIC | \
- SOUND_MASK_LINE3 | SOUND_MASK_SPEAKER | \
- SOUND_MASK_IGAIN | \
- SOUND_MASK_PCM | SOUND_MASK_IMIX)
-
-#define MODE3_MIXER_DEVICES (MODE2_MIXER_DEVICES | SOUND_MASK_VOLUME)
-
-/* OPTi 82C930 has no IMIX level control, but it can still be selected as an
- * input
- */
-#define C930_MIXER_DEVICES (SOUND_MASK_LINE1 | SOUND_MASK_LINE2 | \
- SOUND_MASK_MIC | SOUND_MASK_VOLUME | \
- SOUND_MASK_LINE3 | \
- SOUND_MASK_IGAIN | SOUND_MASK_PCM)
-
-#define SPRO_MIXER_DEVICES (SOUND_MASK_VOLUME | SOUND_MASK_PCM | \
- SOUND_MASK_LINE | SOUND_MASK_SYNTH | \
- SOUND_MASK_CD | SOUND_MASK_MIC | \
- SOUND_MASK_SPEAKER | SOUND_MASK_LINE1 | \
- SOUND_MASK_OGAIN)
-
-struct mixer_def {
- unsigned int regno:6; /* register number for volume */
- unsigned int polarity:1; /* volume polarity: 0=normal, 1=reversed */
- unsigned int bitpos:3; /* position of bits in register for volume */
- unsigned int nbits:3; /* number of bits in register for volume */
- unsigned int mutereg:6; /* register number for mute bit */
- unsigned int mutepol:1; /* mute polarity: 0=normal, 1=reversed */
- unsigned int mutepos:4; /* position of mute bit in register */
- unsigned int recreg:6; /* register number for recording bit */
- unsigned int recpol:1; /* recording polarity: 0=normal, 1=reversed */
- unsigned int recpos:4; /* position of recording bit in register */
-};
-
-static char mix_cvt[101] = {
- 0, 0, 3, 7,10,13,16,19,21,23,26,28,30,32,34,35,37,39,40,42,
- 43,45,46,47,49,50,51,52,53,55,56,57,58,59,60,61,62,63,64,65,
- 65,66,67,68,69,70,70,71,72,73,73,74,75,75,76,77,77,78,79,79,
- 80,81,81,82,82,83,84,84,85,85,86,86,87,87,88,88,89,89,90,90,
- 91,91,92,92,93,93,94,94,95,95,96,96,96,97,97,98,98,98,99,99,
- 100
-};
-
-typedef struct mixer_def mixer_ent;
-typedef mixer_ent mixer_ents[2];
-
-/*
- * Most of the mixer entries work in backwards. Setting the polarity field
- * makes them to work correctly.
- *
- * The channel numbering used by individual sound cards is not fixed. Some
- * cards have assigned different meanings for the AUX1, AUX2 and LINE inputs.
- * The current version doesn't try to compensate this.
- */
-
-#define MIX_ENT(name, reg_l, pola_l, pos_l, len_l, reg_r, pola_r, pos_r, len_r, mute_bit) \
- [name] = {{reg_l, pola_l, pos_l, len_l, reg_l, 0, mute_bit, 0, 0, 8}, \
- {reg_r, pola_r, pos_r, len_r, reg_r, 0, mute_bit, 0, 0, 8}}
-
-#define MIX_ENT2(name, reg_l, pola_l, pos_l, len_l, mute_reg_l, mute_pola_l, mute_pos_l, \
- rec_reg_l, rec_pola_l, rec_pos_l, \
- reg_r, pola_r, pos_r, len_r, mute_reg_r, mute_pola_r, mute_pos_r, \
- rec_reg_r, rec_pola_r, rec_pos_r) \
- [name] = {{reg_l, pola_l, pos_l, len_l, mute_reg_l, mute_pola_l, mute_pos_l, \
- rec_reg_l, rec_pola_l, rec_pos_l}, \
- {reg_r, pola_r, pos_r, len_r, mute_reg_r, mute_pola_r, mute_pos_r, \
- rec_reg_r, rec_pola_r, rec_pos_r}}
-
-static mixer_ents ad1848_mix_devices[32] = {
- MIX_ENT(SOUND_MIXER_VOLUME, 27, 1, 0, 4, 29, 1, 0, 4, 8),
- MIX_ENT(SOUND_MIXER_BASS, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_TREBLE, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_SYNTH, 4, 1, 0, 5, 5, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_PCM, 6, 1, 0, 6, 7, 1, 0, 6, 7),
- MIX_ENT(SOUND_MIXER_SPEAKER, 26, 1, 0, 4, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_LINE, 18, 1, 0, 5, 19, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_MIC, 0, 0, 5, 1, 1, 0, 5, 1, 8),
- MIX_ENT(SOUND_MIXER_CD, 2, 1, 0, 5, 3, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_IMIX, 13, 1, 2, 6, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_ALTPCM, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_RECLEV, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_IGAIN, 0, 0, 0, 4, 1, 0, 0, 4, 8),
- MIX_ENT(SOUND_MIXER_OGAIN, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_LINE1, 2, 1, 0, 5, 3, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_LINE2, 4, 1, 0, 5, 5, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_LINE3, 18, 1, 0, 5, 19, 1, 0, 5, 7)
-};
-
-static mixer_ents iwave_mix_devices[32] = {
- MIX_ENT(SOUND_MIXER_VOLUME, 25, 1, 0, 5, 27, 1, 0, 5, 8),
- MIX_ENT(SOUND_MIXER_BASS, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_TREBLE, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_SYNTH, 4, 1, 0, 5, 5, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_PCM, 6, 1, 0, 6, 7, 1, 0, 6, 7),
- MIX_ENT(SOUND_MIXER_SPEAKER, 26, 1, 0, 4, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_LINE, 18, 1, 0, 5, 19, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_MIC, 0, 0, 5, 1, 1, 0, 5, 1, 8),
- MIX_ENT(SOUND_MIXER_CD, 2, 1, 0, 5, 3, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_IMIX, 16, 1, 0, 5, 17, 1, 0, 5, 8),
- MIX_ENT(SOUND_MIXER_ALTPCM, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_RECLEV, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_IGAIN, 0, 0, 0, 4, 1, 0, 0, 4, 8),
- MIX_ENT(SOUND_MIXER_OGAIN, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_LINE1, 2, 1, 0, 5, 3, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_LINE2, 4, 1, 0, 5, 5, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_LINE3, 18, 1, 0, 5, 19, 1, 0, 5, 7)
-};
-
-static mixer_ents cs42xb_mix_devices[32] = {
- /* Digital master volume actually has seven bits, but we only use
- six to avoid the discontinuity when the analog gain kicks in. */
- MIX_ENT(SOUND_MIXER_VOLUME, 46, 1, 0, 6, 47, 1, 0, 6, 7),
- MIX_ENT(SOUND_MIXER_BASS, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_TREBLE, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_SYNTH, 4, 1, 0, 5, 5, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_PCM, 6, 1, 0, 6, 7, 1, 0, 6, 7),
- MIX_ENT(SOUND_MIXER_SPEAKER, 26, 1, 0, 4, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_LINE, 18, 1, 0, 5, 19, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_MIC, 34, 1, 0, 5, 35, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_CD, 2, 1, 0, 5, 3, 1, 0, 5, 7),
- /* For the IMIX entry, it was not possible to use the MIX_ENT macro
- because the mute bit is in different positions for the two
- channels and requires reverse polarity. */
- [SOUND_MIXER_IMIX] = {{13, 1, 2, 6, 13, 1, 0, 0, 0, 8},
- {42, 1, 0, 6, 42, 1, 7, 0, 0, 8}},
- MIX_ENT(SOUND_MIXER_ALTPCM, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_RECLEV, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_IGAIN, 0, 0, 0, 4, 1, 0, 0, 4, 8),
- MIX_ENT(SOUND_MIXER_OGAIN, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_LINE1, 2, 1, 0, 5, 3, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_LINE2, 4, 1, 0, 5, 5, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_LINE3, 38, 1, 0, 6, 39, 1, 0, 6, 7)
-};
-
-/* OPTi 82C930 has somewhat different port addresses.
- * Note: VOLUME == SPEAKER, SYNTH == LINE2, LINE == LINE3, CD == LINE1
- * VOLUME, SYNTH, LINE, CD are not enabled above.
- * MIC is level of mic monitoring direct to output. Same for CD, LINE, etc.
- */
-static mixer_ents c930_mix_devices[32] = {
- MIX_ENT(SOUND_MIXER_VOLUME, 22, 1, 1, 5, 23, 1, 1, 5, 7),
- MIX_ENT(SOUND_MIXER_BASS, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_TREBLE, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_SYNTH, 4, 1, 1, 4, 5, 1, 1, 4, 7),
- MIX_ENT(SOUND_MIXER_PCM, 6, 1, 0, 5, 7, 1, 0, 5, 7),
- MIX_ENT(SOUND_MIXER_SPEAKER, 22, 1, 1, 5, 23, 1, 1, 5, 7),
- MIX_ENT(SOUND_MIXER_LINE, 18, 1, 1, 4, 19, 1, 1, 4, 7),
- MIX_ENT(SOUND_MIXER_MIC, 20, 1, 1, 4, 21, 1, 1, 4, 7),
- MIX_ENT(SOUND_MIXER_CD, 2, 1, 1, 4, 3, 1, 1, 4, 7),
- MIX_ENT(SOUND_MIXER_IMIX, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_ALTPCM, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_RECLEV, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_IGAIN, 0, 0, 0, 4, 1, 0, 0, 4, 8),
- MIX_ENT(SOUND_MIXER_OGAIN, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT(SOUND_MIXER_LINE1, 2, 1, 1, 4, 3, 1, 1, 4, 7),
- MIX_ENT(SOUND_MIXER_LINE2, 4, 1, 1, 4, 5, 1, 1, 4, 7),
- MIX_ENT(SOUND_MIXER_LINE3, 18, 1, 1, 4, 19, 1, 1, 4, 7)
-};
-
-static mixer_ents spro_mix_devices[32] = {
- MIX_ENT (SOUND_MIXER_VOLUME, 19, 0, 4, 4, 19, 0, 0, 4, 8),
- MIX_ENT (SOUND_MIXER_BASS, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT (SOUND_MIXER_TREBLE, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT2(SOUND_MIXER_SYNTH, 4, 1, 1, 4, 23, 0, 3, 0, 0, 8,
- 5, 1, 1, 4, 23, 0, 3, 0, 0, 8),
- MIX_ENT (SOUND_MIXER_PCM, 6, 1, 1, 4, 7, 1, 1, 4, 8),
- MIX_ENT (SOUND_MIXER_SPEAKER, 18, 0, 3, 2, 0, 0, 0, 0, 8),
- MIX_ENT2(SOUND_MIXER_LINE, 20, 0, 4, 4, 17, 1, 4, 16, 0, 2,
- 20, 0, 0, 4, 17, 1, 3, 16, 0, 1),
- MIX_ENT2(SOUND_MIXER_MIC, 18, 0, 0, 3, 17, 1, 0, 16, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
- MIX_ENT2(SOUND_MIXER_CD, 21, 0, 4, 4, 17, 1, 2, 16, 0, 4,
- 21, 0, 0, 4, 17, 1, 1, 16, 0, 3),
- MIX_ENT (SOUND_MIXER_IMIX, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT (SOUND_MIXER_ALTPCM, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT (SOUND_MIXER_RECLEV, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT (SOUND_MIXER_IGAIN, 0, 0, 0, 0, 0, 0, 0, 0, 8),
- MIX_ENT (SOUND_MIXER_OGAIN, 17, 1, 6, 1, 0, 0, 0, 0, 8),
- /* This is external wavetable */
- MIX_ENT2(SOUND_MIXER_LINE1, 22, 0, 4, 4, 23, 1, 1, 23, 0, 4,
- 22, 0, 0, 4, 23, 1, 0, 23, 0, 5),
-};
-
-static int default_mixer_levels[32] =
-{
- 0x3232, /* Master Volume */
- 0x3232, /* Bass */
- 0x3232, /* Treble */
- 0x4b4b, /* FM */
- 0x3232, /* PCM */
- 0x1515, /* PC Speaker */
- 0x2020, /* Ext Line */
- 0x1010, /* Mic */
- 0x4b4b, /* CD */
- 0x0000, /* Recording monitor */
- 0x4b4b, /* Second PCM */
- 0x4b4b, /* Recording level */
- 0x4b4b, /* Input gain */
- 0x4b4b, /* Output gain */
- 0x2020, /* Line1 */
- 0x2020, /* Line2 */
- 0x1515 /* Line3 (usually line in)*/
-};
-
-#define LEFT_CHN 0
-#define RIGHT_CHN 1
-
-/*
- * Channel enable bits for ioctl(SOUND_MIXER_PRIVATE1)
- */
-
-#ifndef AUDIO_SPEAKER
-#define AUDIO_SPEAKER 0x01 /* Enable mono output */
-#define AUDIO_HEADPHONE 0x02 /* Sparc only */
-#define AUDIO_LINE_OUT 0x04 /* Sparc only */
-#endif
diff --git a/sound/oss/aedsp16.c b/sound/oss/aedsp16.c
deleted file mode 100644
index f058ed6bdb69..000000000000
--- a/sound/oss/aedsp16.c
+++ /dev/null
@@ -1,1373 +0,0 @@
-/*
- sound/oss/aedsp16.c
-
- Audio Excel DSP 16 software configuration routines
- Copyright (C) 1995,1996,1997,1998 Riccardo Facchetti (fizban@tin.it)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- */
-/*
- * Include the main OSS Lite header file. It include all the os, OSS Lite, etc
- * headers needed by this source.
- */
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include "sound_config.h"
-
-/*
-
- READ THIS
-
- This module started to configure the Audio Excel DSP 16 Sound Card.
- Now works with the SC-6000 (old aedsp16) and new SC-6600 based cards.
-
- NOTE: I have NO idea about Audio Excel DSP 16 III. If someone owns this
- audio card and want to see the kernel support for it, please contact me.
-
- Audio Excel DSP 16 is an SB pro II, Microsoft Sound System and MPU-401
- compatible card.
- It is software-only configurable (no jumpers to hard-set irq/dma/mpu-irq),
- so before this module, the only way to configure the DSP under linux was
- boot the MS-DOS loading the sound.sys device driver (this driver soft-
- configure the sound board hardware by massaging someone of its registers),
- and then ctrl-alt-del to boot linux with the DSP configured by the DOS
- driver.
-
- This module works configuring your Audio Excel DSP 16's irq, dma and
- mpu-401-irq. The OSS Lite routines rely on the fact that if the
- hardware is there, they can detect it. The problem with AEDSP16 is
- that no hardware can be found by the probe routines if the sound card
- is not configured properly. Sometimes the kernel probe routines can find
- an SBPRO even when the card is not configured (this is the standard setup
- of the card), but the SBPRO emulation don't work well if the card is not
- properly initialized. For this reason
-
- aedsp16_init_board()
-
- routine is called before the OSS Lite probe routines try to detect the
- hardware.
-
- NOTE (READ THE NOTE TOO, IT CONTAIN USEFUL INFORMATIONS)
-
- NOTE: Now it works with SC-6000 and SC-6600 based audio cards. The new cards
- have no jumper switch at all. No more WSS or MPU-401 I/O port switches. They
- have to be configured by software.
-
- NOTE: The driver is merged with the new OSS Lite sound driver. It works
- as a lowlevel driver.
-
- The Audio Excel DSP 16 Sound Card emulates both SBPRO and MSS;
- the OSS Lite sound driver can be configured for SBPRO and MSS cards
- at the same time, but the aedsp16 can't be two cards!!
- When we configure it, we have to choose the SBPRO or the MSS emulation
- for AEDSP16. We also can install a *REAL* card of the other type (see [1]).
-
- NOTE: If someone can test the combination AEDSP16+MSS or AEDSP16+SBPRO
- please let me know if it works.
-
- The MPU-401 support can be compiled in together with one of the other
- two operating modes.
-
- NOTE: This is something like plug-and-play: we have only to plug
- the AEDSP16 board in the socket, and then configure and compile
- a kernel that uses the AEDSP16 software configuration capability.
- No jumper setting is needed!
-
- For example, if you want AEDSP16 to be an SBPro, on irq 10, dma 3
- you have just to make config the OSS Lite package, configuring
- the AEDSP16 sound card, then activating the SBPro emulation mode
- and at last configuring IRQ and DMA.
- Compile the kernel and run it.
-
- NOTE: This means for SC-6000 cards that you can choose irq and dma,
- but not the I/O addresses. To change I/O addresses you have to set
- them with jumpers. For SC-6600 cards you have no jumpers so you have
- to set up your full card configuration in the make config.
-
- You can change the irq/dma/mirq settings WITHOUT THE NEED to open
- your computer and massage the jumpers (there are no irq/dma/mirq
- jumpers to be configured anyway, only I/O BASE values have to be
- configured with jumpers)
-
- For some ununderstandable reason, the card default of irq 7, dma 1,
- don't work for me. Seems to be an IRQ or DMA conflict. Under heavy
- HDD work, the kernel start to erupt out a lot of messages like:
-
- 'Sound: DMA timed out - IRQ/DRQ config error?'
-
- For what I can say, I have NOT any conflict at irq 7 (under linux I'm
- using the lp polling driver), and dma line 1 is unused as stated by
- /proc/dma. I can suppose this is a bug of AEDSP16. I know my hardware so
- I'm pretty sure I have not any conflict, but may be I'm wrong. Who knows!
- Anyway a setting of irq 10, dma 3 works really fine.
-
- NOTE: if someone can use AEDSP16 with irq 7, dma 1, please let me know
- the emulation mode, all the installed hardware and the hardware
- configuration (irq and dma settings of all the hardware).
-
- This init module should work with SBPRO+MSS, when one of the two is
- the AEDSP16 emulation and the other the real card. (see [1])
- For example:
-
- AEDSP16 (0x220) in SBPRO emu (0x220) + real MSS + other
- AEDSP16 (0x220) in MSS emu + real SBPRO (0x240) + other
-
- MPU401 should work. (see [2])
-
- [1]
- ---
- Date: Mon, 29 Jul 1997 08:35:40 +0100
- From: Mr S J Greenaway <sjg95@unixfe.rl.ac.uk>
-
- [...]
- Just to let you know got my Audio Excel (emulating a MSS) working
- with my original SB16, thanks for the driver!
- [...]
- ---
-
- [2] Not tested by me for lack of hardware.
-
- TODO, WISHES AND TECH
-
- - About I/O ports allocation -
-
- Request the 2x0h region (port base) in any case if we are using this card.
-
- NOTE: the "aedsp16 (base)" string with which we are requesting the aedsp16
- port base region (see code) does not mean necessarily that we are emulating
- sbpro. Even if this region is the sbpro I/O ports region, we use this
- region to access the control registers of the card, and if emulating
- sbpro, I/O sbpro registers too. If we are emulating MSS, the sbpro
- registers are not used, in no way, to emulate an sbpro: they are
- used only for configuration purposes.
-
- Started Fri Mar 17 16:13:18 MET 1995
-
- v0.1 (ALPHA, was a user-level program called AudioExcelDSP16.c)
- - Initial code.
- v0.2 (ALPHA)
- - Cleanups.
- - Integrated with Linux voxware v 2.90-2 kernel sound driver.
- - SoundBlaster Pro mode configuration.
- - Microsoft Sound System mode configuration.
- - MPU-401 mode configuration.
- v0.3 (ALPHA)
- - Cleanups.
- - Rearranged the code to let aedsp16_init_board be more general.
- - Erased the REALLY_SLOW_IO. We don't need it. Erased the linux/io.h
- inclusion too. We rely on os.h
- - Used the to get a variable
- len string (we are not sure about the len of Copyright string).
- This works with any SB and compatible.
- - Added the code to request_region at device init (should go in
- the main body of voxware).
- v0.4 (BETA)
- - Better configure.c patch for aedsp16 configuration (better
- logic of inclusion of AEDSP16 support)
- - Modified the conditional compilation to better support more than
- one sound card of the emulated type (read the NOTES above)
- - Moved the sb init routine from the attach to the very first
- probe in sb_card.c
- - Rearrangements and cleanups
- - Wiped out some unnecessary code and variables: this is kernel
- code so it is better save some TEXT and DATA
- - Fixed the request_region code. We must allocate the aedsp16 (sbpro)
- I/O ports in any case because they are used to access the DSP
- configuration registers and we can not allow anyone to get them.
- v0.5
- - cleanups on comments
- - prep for diffs against v3.0-proto-950402
- v0.6
- - removed the request_region()s when compiling the MODULE sound.o
- because we are not allowed (by the actual voxware structure) to
- release_region()
- v0.7 (pre ALPHA, not distributed)
- - started porting this module to kernel 1.3.84. Dummy probe/attach
- routines.
- v0.8 (ALPHA)
- - attached all the init routines.
- v0.9 (BETA)
- - Integrated with linux-pre2.0.7
- - Integrated with configuration scripts.
- - Cleaned up and beautyfied the code.
- v0.9.9 (BETA)
- - Thanks to Piercarlo Grandi: corrected the conditonal compilation code.
- Now only the code configured is compiled in, with some memory saving.
- v0.9.10
- - Integration into the sound/lowlevel/ section of the sound driver.
- - Re-organized the code.
- v0.9.11 (not distributed)
- - Rewritten the init interface-routines to initialize the AEDSP16 in
- one shot.
- - More cosmetics.
- - SC-6600 support.
- - More soft/hard configuration.
- v0.9.12
- - Refined the v0.9.11 code with conditional compilation to distinguish
- between SC-6000 and SC-6600 code.
- v1.0.0
- - Prep for merging with OSS Lite and Linux kernel 2.1.13
- - Corrected a bug in request/check/release region calls (thanks to the
- new kernel exception handling).
- v1.1
- - Revamped for integration with new modularized sound drivers: to enhance
- the flexibility of modular version, I have removed all the conditional
- compilation for SBPRO, MPU and MSS code. Now it is all managed with
- the ae_config structure.
- v1.2
- - Module informations added.
- - Removed aedsp16_delay_10msec(), now using mdelay(10)
- - All data and funcs moved to .*.init section.
- v1.3
- Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/27
- - got rid of check_region
-
- Known Problems:
- - Audio Excel DSP 16 III don't work with this driver.
-
- Credits:
- Many thanks to Gerald Britton <gbritton@CapAccess.org>. He helped me a
- lot in testing the 0.9.11 and 0.9.12 versions of this driver.
-
- */
-
-
-#define VERSION "1.3" /* Version of Audio Excel DSP 16 driver */
-
-#undef AEDSP16_DEBUG /* Define this to 1 to enable debug code */
-#undef AEDSP16_DEBUG_MORE /* Define this to 1 to enable more debug */
-#undef AEDSP16_INFO /* Define this to 1 to enable info code */
-
-#if defined(AEDSP16_DEBUG)
-# define DBG(x) printk x
-# if defined(AEDSP16_DEBUG_MORE)
-# define DBG1(x) printk x
-# else
-# define DBG1(x)
-# endif
-#else
-# define DBG(x)
-# define DBG1(x)
-#endif
-
-/*
- * Misc definitions
- */
-#define TRUE 1
-#define FALSE 0
-
-/*
- * Region Size for request/check/release region.
- */
-#define IOBASE_REGION_SIZE 0x10
-
-/*
- * Hardware related defaults
- */
-#define DEF_AEDSP16_IOB 0x220 /* 0x220(default) 0x240 */
-#define DEF_AEDSP16_IRQ 7 /* 5 7(default) 9 10 11 */
-#define DEF_AEDSP16_MRQ 0 /* 5 7 9 10 0(default), 0 means disable */
-#define DEF_AEDSP16_DMA 1 /* 0 1(default) 3 */
-
-/*
- * Commands of AEDSP16's DSP (SBPRO+special).
- * Some of them are COMMAND_xx, in the future they may change.
- */
-#define WRITE_MDIRQ_CFG 0x50 /* Set M&I&DRQ mask (the real config) */
-#define COMMAND_52 0x52 /* */
-#define READ_HARD_CFG 0x58 /* Read Hardware Config (I/O base etc) */
-#define COMMAND_5C 0x5c /* */
-#define COMMAND_60 0x60 /* */
-#define COMMAND_66 0x66 /* */
-#define COMMAND_6C 0x6c /* */
-#define COMMAND_6E 0x6e /* */
-#define COMMAND_88 0x88 /* */
-#define DSP_INIT_MSS 0x8c /* Enable Microsoft Sound System mode */
-#define COMMAND_C5 0xc5 /* */
-#define GET_DSP_VERSION 0xe1 /* Get DSP Version */
-#define GET_DSP_COPYRIGHT 0xe3 /* Get DSP Copyright */
-
-/*
- * Offsets of AEDSP16 DSP I/O ports. The offset is added to base I/O port
- * to have the actual I/O port.
- * Register permissions are:
- * (wo) == Write Only
- * (ro) == Read Only
- * (w-) == Write
- * (r-) == Read
- */
-#define DSP_RESET 0x06 /* offset of DSP RESET (wo) */
-#define DSP_READ 0x0a /* offset of DSP READ (ro) */
-#define DSP_WRITE 0x0c /* offset of DSP WRITE (w-) */
-#define DSP_COMMAND 0x0c /* offset of DSP COMMAND (w-) */
-#define DSP_STATUS 0x0c /* offset of DSP STATUS (r-) */
-#define DSP_DATAVAIL 0x0e /* offset of DSP DATA AVAILABLE (ro) */
-
-
-#define RETRY 10 /* Various retry values on I/O opera- */
-#define STATUSRETRY 1000 /* tions. Sometimes we have to */
-#define HARDRETRY 500000 /* wait for previous cmd to complete */
-
-/*
- * Size of character arrays that store name and version of sound card
- */
-#define CARDNAMELEN 15 /* Size of the card's name in chars */
-#define CARDVERLEN 10 /* Size of the card's version in chars */
-#define CARDVERDIGITS 2 /* Number of digits in the version */
-
-#if defined(CONFIG_SC6600)
-/*
- * Bitmapped flags of hard configuration
- */
-/*
- * Decode macros (xl == low byte, xh = high byte)
- */
-#define IOBASE(xl) ((xl & 0x01)?0x240:0x220)
-#define JOY(xl) (xl & 0x02)
-#define MPUADDR(xl) ( \
- (xl & 0x0C)?0x330: \
- (xl & 0x08)?0x320: \
- (xl & 0x04)?0x310: \
- 0x300)
-#define WSSADDR(xl) ((xl & 0x10)?0xE80:0x530)
-#define CDROM(xh) (xh & 0x20)
-#define CDROMADDR(xh) (((xh & 0x1F) << 4) + 0x200)
-/*
- * Encode macros
- */
-#define BLDIOBASE(xl, val) { \
- xl &= ~0x01; \
- if (val == 0x240) \
- xl |= 0x01; \
- }
-#define BLDJOY(xl, val) { \
- xl &= ~0x02; \
- if (val == 1) \
- xl |= 0x02; \
- }
-#define BLDMPUADDR(xl, val) { \
- xl &= ~0x0C; \
- switch (val) { \
- case 0x330: \
- xl |= 0x0C; \
- break; \
- case 0x320: \
- xl |= 0x08; \
- break; \
- case 0x310: \
- xl |= 0x04; \
- break; \
- case 0x300: \
- xl |= 0x00; \
- break; \
- default: \
- xl |= 0x00; \
- break; \
- } \
- }
-#define BLDWSSADDR(xl, val) { \
- xl &= ~0x10; \
- if (val == 0xE80) \
- xl |= 0x10; \
- }
-#define BLDCDROM(xh, val) { \
- xh &= ~0x20; \
- if (val == 1) \
- xh |= 0x20; \
- }
-#define BLDCDROMADDR(xh, val) { \
- int tmp = val; \
- tmp -= 0x200; \
- tmp >>= 4; \
- tmp &= 0x1F; \
- xh |= tmp; \
- xh &= 0x7F; \
- xh |= 0x40; \
- }
-#endif /* CONFIG_SC6600 */
-
-/*
- * Bit mapped flags for calling aedsp16_init_board(), and saving the current
- * emulation mode.
- */
-#define INIT_NONE (0 )
-#define INIT_SBPRO (1<<0)
-#define INIT_MSS (1<<1)
-#define INIT_MPU401 (1<<2)
-
-static int soft_cfg __initdata = 0; /* bitmapped config */
-static int soft_cfg_mss __initdata = 0; /* bitmapped mss config */
-static int ver[CARDVERDIGITS] __initdata = {0, 0}; /* DSP Ver:
- hi->ver[0] lo->ver[1] */
-
-#if defined(CONFIG_SC6600)
-static int hard_cfg[2] /* lo<-hard_cfg[0] hi<-hard_cfg[1] */
- __initdata = { 0, 0};
-#endif /* CONFIG_SC6600 */
-
-#if defined(CONFIG_SC6600)
-/* Decoded hard configuration */
-struct d_hcfg {
- int iobase;
- int joystick;
- int mpubase;
- int wssbase;
- int cdrom;
- int cdrombase;
-};
-
-static struct d_hcfg decoded_hcfg __initdata = {0, };
-
-#endif /* CONFIG_SC6600 */
-
-/* orVals contain the values to be or'ed */
-struct orVals {
- int val; /* irq|mirq|dma */
- int or; /* soft_cfg |= TheStruct.or */
-};
-
-/* aedsp16_info contain the audio card configuration */
-struct aedsp16_info {
- int base_io; /* base I/O address for accessing card */
- int irq; /* irq value for DSP I/O */
- int mpu_irq; /* irq for mpu401 interface I/O */
- int dma; /* dma value for DSP I/O */
- int mss_base; /* base I/O for Microsoft Sound System */
- int mpu_base; /* base I/O for MPU-401 emulation */
- int init; /* Initialization status of the card */
-};
-
-/*
- * Magic values that the DSP will eat when configuring irq/mirq/dma
- */
-/* DSP IRQ conversion array */
-static struct orVals orIRQ[] __initdata = {
- {0x05, 0x28},
- {0x07, 0x08},
- {0x09, 0x10},
- {0x0a, 0x18},
- {0x0b, 0x20},
- {0x00, 0x00}
-};
-
-/* MPU-401 IRQ conversion array */
-static struct orVals orMIRQ[] __initdata = {
- {0x05, 0x04},
- {0x07, 0x44},
- {0x09, 0x84},
- {0x0a, 0xc4},
- {0x00, 0x00}
-};
-
-/* DMA Channels conversion array */
-static struct orVals orDMA[] __initdata = {
- {0x00, 0x01},
- {0x01, 0x02},
- {0x03, 0x03},
- {0x00, 0x00}
-};
-
-static struct aedsp16_info ae_config = {
- .base_io = DEF_AEDSP16_IOB,
- .irq = DEF_AEDSP16_IRQ,
- .mpu_irq = DEF_AEDSP16_MRQ,
- .dma = DEF_AEDSP16_DMA,
- .mss_base = -1,
- .mpu_base = -1,
- .init = INIT_NONE
-};
-
-/*
- * Buffers to store audio card informations
- */
-static char DSPCopyright[CARDNAMELEN + 1] __initdata = {0, };
-static char DSPVersion[CARDVERLEN + 1] __initdata = {0, };
-
-static int __init aedsp16_wait_data(int port)
-{
- int loop = STATUSRETRY;
- unsigned char ret = 0;
-
- DBG1(("aedsp16_wait_data (0x%x): ", port));
-
- do {
- ret = inb(port + DSP_DATAVAIL);
- /*
- * Wait for data available (bit 7 of ret == 1)
- */
- } while (!(ret & 0x80) && loop--);
-
- if (ret & 0x80) {
- DBG1(("success.\n"));
- return TRUE;
- }
-
- DBG1(("failure.\n"));
- return FALSE;
-}
-
-static int __init aedsp16_read(int port)
-{
- int inbyte;
-
- DBG((" Read DSP Byte (0x%x): ", port));
-
- if (aedsp16_wait_data(port) == FALSE) {
- DBG(("failure.\n"));
- return -1;
- }
-
- inbyte = inb(port + DSP_READ);
-
- DBG(("read [0x%x]/{%c}.\n", inbyte, inbyte));
-
- return inbyte;
-}
-
-static int __init aedsp16_test_dsp(int port)
-{
- return ((aedsp16_read(port) == 0xaa) ? TRUE : FALSE);
-}
-
-static int __init aedsp16_dsp_reset(int port)
-{
- /*
- * Reset DSP
- */
-
- DBG(("Reset DSP:\n"));
-
- outb(1, (port + DSP_RESET));
- udelay(10);
- outb(0, (port + DSP_RESET));
- udelay(10);
- udelay(10);
- if (aedsp16_test_dsp(port) == TRUE) {
- DBG(("success.\n"));
- return TRUE;
- } else
- DBG(("failure.\n"));
- return FALSE;
-}
-
-static int __init aedsp16_write(int port, int cmd)
-{
- unsigned char ret;
- int loop = HARDRETRY;
-
- DBG((" Write DSP Byte (0x%x) [0x%x]: ", port, cmd));
-
- do {
- ret = inb(port + DSP_STATUS);
- /*
- * DSP ready to receive data if bit 7 of ret == 0
- */
- if (!(ret & 0x80)) {
- outb(cmd, port + DSP_COMMAND);
- DBG(("success.\n"));
- return 0;
- }
- } while (loop--);
-
- DBG(("timeout.\n"));
- printk("[AEDSP16] DSP Command (0x%x) timeout.\n", cmd);
-
- return -1;
-}
-
-#if defined(CONFIG_SC6600)
-
-#if defined(AEDSP16_INFO) || defined(AEDSP16_DEBUG)
-void __init aedsp16_pinfo(void) {
- DBG(("\n Base address: %x\n", decoded_hcfg.iobase));
- DBG((" Joystick : %s present\n", decoded_hcfg.joystick?"":" not"));
- DBG((" WSS addr : %x\n", decoded_hcfg.wssbase));
- DBG((" MPU-401 addr: %x\n", decoded_hcfg.mpubase));
- DBG((" CDROM : %s present\n", (decoded_hcfg.cdrom!=4)?"":" not"));
- DBG((" CDROMADDR : %x\n\n", decoded_hcfg.cdrombase));
-}
-#endif
-
-static void __init aedsp16_hard_decode(void) {
-
- DBG((" aedsp16_hard_decode: 0x%x, 0x%x\n", hard_cfg[0], hard_cfg[1]));
-
-/*
- * Decode Cfg Bytes.
- */
- decoded_hcfg.iobase = IOBASE(hard_cfg[0]);
- decoded_hcfg.joystick = JOY(hard_cfg[0]);
- decoded_hcfg.wssbase = WSSADDR(hard_cfg[0]);
- decoded_hcfg.mpubase = MPUADDR(hard_cfg[0]);
- decoded_hcfg.cdrom = CDROM(hard_cfg[1]);
- decoded_hcfg.cdrombase = CDROMADDR(hard_cfg[1]);
-
-#if defined(AEDSP16_INFO) || defined(AEDSP16_DEBUG)
- printk(" Original sound card configuration:\n");
- aedsp16_pinfo();
-#endif
-
-/*
- * Now set up the real kernel configuration.
- */
- decoded_hcfg.iobase = ae_config.base_io;
- decoded_hcfg.wssbase = ae_config.mss_base;
- decoded_hcfg.mpubase = ae_config.mpu_base;
-
-#if defined(CONFIG_SC6600_JOY)
- decoded_hcfg.joystick = CONFIG_SC6600_JOY; /* Enable */
-#endif
-#if defined(CONFIG_SC6600_CDROM)
- decoded_hcfg.cdrom = CONFIG_SC6600_CDROM; /* 4:N-3:I-2:G-1:P-0:S */
-#endif
-#if defined(CONFIG_SC6600_CDROMBASE)
- decoded_hcfg.cdrombase = CONFIG_SC6600_CDROMBASE; /* 0 Disable */
-#endif
-
-#if defined(AEDSP16_DEBUG)
- DBG((" New Values:\n"));
- aedsp16_pinfo();
-#endif
-
- DBG(("success.\n"));
-}
-
-static void __init aedsp16_hard_encode(void) {
-
- DBG((" aedsp16_hard_encode: 0x%x, 0x%x\n", hard_cfg[0], hard_cfg[1]));
-
- hard_cfg[0] = 0;
- hard_cfg[1] = 0;
-
- hard_cfg[0] |= 0x20;
-
- BLDIOBASE (hard_cfg[0], decoded_hcfg.iobase);
- BLDWSSADDR(hard_cfg[0], decoded_hcfg.wssbase);
- BLDMPUADDR(hard_cfg[0], decoded_hcfg.mpubase);
- BLDJOY(hard_cfg[0], decoded_hcfg.joystick);
- BLDCDROM(hard_cfg[1], decoded_hcfg.cdrom);
- BLDCDROMADDR(hard_cfg[1], decoded_hcfg.cdrombase);
-
-#if defined(AEDSP16_DEBUG)
- aedsp16_pinfo();
-#endif
-
- DBG((" aedsp16_hard_encode: 0x%x, 0x%x\n", hard_cfg[0], hard_cfg[1]));
- DBG(("success.\n"));
-
-}
-
-static int __init aedsp16_hard_write(int port) {
-
- DBG(("aedsp16_hard_write:\n"));
-
- if (aedsp16_write(port, COMMAND_6C)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_6C);
- DBG(("failure.\n"));
- return FALSE;
- }
- if (aedsp16_write(port, COMMAND_5C)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_5C);
- DBG(("failure.\n"));
- return FALSE;
- }
- if (aedsp16_write(port, hard_cfg[0])) {
- printk("[AEDSP16] DATA 0x%x: failed!\n", hard_cfg[0]);
- DBG(("failure.\n"));
- return FALSE;
- }
- if (aedsp16_write(port, hard_cfg[1])) {
- printk("[AEDSP16] DATA 0x%x: failed!\n", hard_cfg[1]);
- DBG(("failure.\n"));
- return FALSE;
- }
- if (aedsp16_write(port, COMMAND_C5)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_C5);
- DBG(("failure.\n"));
- return FALSE;
- }
-
- DBG(("success.\n"));
-
- return TRUE;
-}
-
-static int __init aedsp16_hard_read(int port) {
-
- DBG(("aedsp16_hard_read:\n"));
-
- if (aedsp16_write(port, READ_HARD_CFG)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", READ_HARD_CFG);
- DBG(("failure.\n"));
- return FALSE;
- }
-
- if ((hard_cfg[0] = aedsp16_read(port)) == -1) {
- printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
- READ_HARD_CFG);
- DBG(("failure.\n"));
- return FALSE;
- }
- if ((hard_cfg[1] = aedsp16_read(port)) == -1) {
- printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
- READ_HARD_CFG);
- DBG(("failure.\n"));
- return FALSE;
- }
- if (aedsp16_read(port) == -1) {
- printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
- READ_HARD_CFG);
- DBG(("failure.\n"));
- return FALSE;
- }
-
- DBG(("success.\n"));
-
- return TRUE;
-}
-
-static int __init aedsp16_ext_cfg_write(int port) {
-
- int extcfg, val;
-
- if (aedsp16_write(port, COMMAND_66)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_66);
- return FALSE;
- }
-
- extcfg = 7;
- if (decoded_hcfg.cdrom != 2)
- extcfg = 0x0F;
- if ((decoded_hcfg.cdrom == 4) ||
- (decoded_hcfg.cdrom == 3))
- extcfg &= ~2;
- if (decoded_hcfg.cdrombase == 0)
- extcfg &= ~2;
- if (decoded_hcfg.mpubase == 0)
- extcfg &= ~1;
-
- if (aedsp16_write(port, extcfg)) {
- printk("[AEDSP16] Write extcfg: failed!\n");
- return FALSE;
- }
- if (aedsp16_write(port, 0)) {
- printk("[AEDSP16] Write extcfg: failed!\n");
- return FALSE;
- }
- if (decoded_hcfg.cdrom == 3) {
- if (aedsp16_write(port, COMMAND_52)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_52);
- return FALSE;
- }
- if ((val = aedsp16_read(port)) == -1) {
- printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n"
- , COMMAND_52);
- return FALSE;
- }
- val &= 0x7F;
- if (aedsp16_write(port, COMMAND_60)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_60);
- return FALSE;
- }
- if (aedsp16_write(port, val)) {
- printk("[AEDSP16] Write val: failed!\n");
- return FALSE;
- }
- }
-
- return TRUE;
-}
-
-#endif /* CONFIG_SC6600 */
-
-static int __init aedsp16_cfg_write(int port) {
- if (aedsp16_write(port, WRITE_MDIRQ_CFG)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", WRITE_MDIRQ_CFG);
- return FALSE;
- }
- if (aedsp16_write(port, soft_cfg)) {
- printk("[AEDSP16] Initialization of (M)IRQ and DMA: failed!\n");
- return FALSE;
- }
- return TRUE;
-}
-
-static int __init aedsp16_init_mss(int port)
-{
- DBG(("aedsp16_init_mss:\n"));
-
- mdelay(10);
-
- if (aedsp16_write(port, DSP_INIT_MSS)) {
- printk("[AEDSP16] aedsp16_init_mss [0x%x]: failed!\n",
- DSP_INIT_MSS);
- DBG(("failure.\n"));
- return FALSE;
- }
-
- mdelay(10);
-
- if (aedsp16_cfg_write(port) == FALSE)
- return FALSE;
-
- outb(soft_cfg_mss, ae_config.mss_base);
-
- DBG(("success.\n"));
-
- return TRUE;
-}
-
-static int __init aedsp16_setup_board(int port) {
- int loop = RETRY;
-
-#if defined(CONFIG_SC6600)
- int val = 0;
-
- if (aedsp16_hard_read(port) == FALSE) {
- printk("[AEDSP16] aedsp16_hard_read: failed!\n");
- return FALSE;
- }
-
- if (aedsp16_write(port, COMMAND_52)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_52);
- return FALSE;
- }
-
- if ((val = aedsp16_read(port)) == -1) {
- printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
- COMMAND_52);
- return FALSE;
- }
-#endif
-
- do {
- if (aedsp16_write(port, COMMAND_88)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_88);
- return FALSE;
- }
- mdelay(10);
- } while ((aedsp16_wait_data(port) == FALSE) && loop--);
-
- if (aedsp16_read(port) == -1) {
- printk("[AEDSP16] aedsp16_read after CMD 0x%x: failed\n",
- COMMAND_88);
- return FALSE;
- }
-
-#if !defined(CONFIG_SC6600)
- if (aedsp16_write(port, COMMAND_5C)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_5C);
- return FALSE;
- }
-#endif
-
- if (aedsp16_cfg_write(port) == FALSE)
- return FALSE;
-
-#if defined(CONFIG_SC6600)
- if (aedsp16_write(port, COMMAND_60)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_60);
- return FALSE;
- }
- if (aedsp16_write(port, val)) {
- printk("[AEDSP16] DATA 0x%x: failed!\n", val);
- return FALSE;
- }
- if (aedsp16_write(port, COMMAND_6E)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_6E);
- return FALSE;
- }
- if (aedsp16_write(port, ver[0])) {
- printk("[AEDSP16] DATA 0x%x: failed!\n", ver[0]);
- return FALSE;
- }
- if (aedsp16_write(port, ver[1])) {
- printk("[AEDSP16] DATA 0x%x: failed!\n", ver[1]);
- return FALSE;
- }
-
- if (aedsp16_hard_write(port) == FALSE) {
- printk("[AEDSP16] aedsp16_hard_write: failed!\n");
- return FALSE;
- }
-
- if (aedsp16_write(port, COMMAND_5C)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", COMMAND_5C);
- return FALSE;
- }
-
-#if defined(THIS_IS_A_THING_I_HAVE_NOT_TESTED_YET)
- if (aedsp16_cfg_write(port) == FALSE)
- return FALSE;
-#endif
-
-#endif
-
- return TRUE;
-}
-
-static int __init aedsp16_stdcfg(int port) {
- if (aedsp16_write(port, WRITE_MDIRQ_CFG)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", WRITE_MDIRQ_CFG);
- return FALSE;
- }
- /*
- * 0x0A == (IRQ 7, DMA 1, MIRQ 0)
- */
- if (aedsp16_write(port, 0x0A)) {
- printk("[AEDSP16] aedsp16_stdcfg: failed!\n");
- return FALSE;
- }
- return TRUE;
-}
-
-static int __init aedsp16_dsp_version(int port)
-{
- int len = 0;
- int ret;
-
- DBG(("Get DSP Version:\n"));
-
- if (aedsp16_write(ae_config.base_io, GET_DSP_VERSION)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", GET_DSP_VERSION);
- DBG(("failed.\n"));
- return FALSE;
- }
-
- do {
- if ((ret = aedsp16_read(port)) == -1) {
- DBG(("failed.\n"));
- return FALSE;
- }
- /*
- * We already know how many int are stored (2), so we know when the
- * string is finished.
- */
- ver[len++] = ret;
- } while (len < CARDVERDIGITS);
- sprintf(DSPVersion, "%d.%d", ver[0], ver[1]);
-
- DBG(("success.\n"));
-
- return TRUE;
-}
-
-static int __init aedsp16_dsp_copyright(int port)
-{
- int len = 0;
- int ret;
-
- DBG(("Get DSP Copyright:\n"));
-
- if (aedsp16_write(ae_config.base_io, GET_DSP_COPYRIGHT)) {
- printk("[AEDSP16] CMD 0x%x: failed!\n", GET_DSP_COPYRIGHT);
- DBG(("failed.\n"));
- return FALSE;
- }
-
- do {
- if ((ret = aedsp16_read(port)) == -1) {
- /*
- * If no more data available, return to the caller, no error if len>0.
- * We have no other way to know when the string is finished.
- */
- if (len)
- break;
- else {
- DBG(("failed.\n"));
- return FALSE;
- }
- }
-
- DSPCopyright[len++] = ret;
-
- } while (len < CARDNAMELEN);
-
- DBG(("success.\n"));
-
- return TRUE;
-}
-
-static void __init aedsp16_init_tables(void)
-{
- int i = 0;
-
- memset(DSPCopyright, 0, CARDNAMELEN + 1);
- memset(DSPVersion, 0, CARDVERLEN + 1);
-
- for (i = 0; orIRQ[i].or; i++)
- if (orIRQ[i].val == ae_config.irq) {
- soft_cfg |= orIRQ[i].or;
- soft_cfg_mss |= orIRQ[i].or;
- }
-
- for (i = 0; orMIRQ[i].or; i++)
- if (orMIRQ[i].or == ae_config.mpu_irq)
- soft_cfg |= orMIRQ[i].or;
-
- for (i = 0; orDMA[i].or; i++)
- if (orDMA[i].val == ae_config.dma) {
- soft_cfg |= orDMA[i].or;
- soft_cfg_mss |= orDMA[i].or;
- }
-}
-
-static int __init aedsp16_init_board(void)
-{
- aedsp16_init_tables();
-
- if (aedsp16_dsp_reset(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_dsp_reset: failed!\n");
- return FALSE;
- }
- if (aedsp16_dsp_copyright(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_dsp_copyright: failed!\n");
- return FALSE;
- }
-
- /*
- * My AEDSP16 card return SC-6000 in DSPCopyright, so
- * if we have something different, we have to be warned.
- */
- if (strcmp("SC-6000", DSPCopyright))
- printk("[AEDSP16] Warning: non SC-6000 audio card!\n");
-
- if (aedsp16_dsp_version(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_dsp_version: failed!\n");
- return FALSE;
- }
-
- if (aedsp16_stdcfg(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_stdcfg: failed!\n");
- return FALSE;
- }
-
-#if defined(CONFIG_SC6600)
- if (aedsp16_hard_read(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_hard_read: failed!\n");
- return FALSE;
- }
-
- aedsp16_hard_decode();
-
- aedsp16_hard_encode();
-
- if (aedsp16_hard_write(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_hard_write: failed!\n");
- return FALSE;
- }
-
- if (aedsp16_ext_cfg_write(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_ext_cfg_write: failed!\n");
- return FALSE;
- }
-#endif /* CONFIG_SC6600 */
-
- if (aedsp16_setup_board(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] aedsp16_setup_board: failed!\n");
- return FALSE;
- }
-
- if (ae_config.mss_base != -1) {
- if (ae_config.init & INIT_MSS) {
- if (aedsp16_init_mss(ae_config.base_io) == FALSE) {
- printk("[AEDSP16] Can not initialize"
- "Microsoft Sound System mode.\n");
- return FALSE;
- }
- }
- }
-
-#if !defined(MODULE) || defined(AEDSP16_INFO) || defined(AEDSP16_DEBUG)
-
- printk("Audio Excel DSP 16 init v%s (%s %s) [",
- VERSION, DSPCopyright,
- DSPVersion);
-
- if (ae_config.mpu_base != -1) {
- if (ae_config.init & INIT_MPU401) {
- printk("MPU401");
- if ((ae_config.init & INIT_MSS) ||
- (ae_config.init & INIT_SBPRO))
- printk(" ");
- }
- }
-
- if (ae_config.mss_base == -1) {
- if (ae_config.init & INIT_SBPRO) {
- printk("SBPro");
- if (ae_config.init & INIT_MSS)
- printk(" ");
- }
- }
-
- if (ae_config.mss_base != -1)
- if (ae_config.init & INIT_MSS)
- printk("MSS");
-
- printk("]\n");
-#endif /* MODULE || AEDSP16_INFO || AEDSP16_DEBUG */
-
- mdelay(10);
-
- return TRUE;
-}
-
-static int __init init_aedsp16_sb(void)
-{
- DBG(("init_aedsp16_sb: "));
-
-/*
- * If the card is already init'ed MSS, we can not init it to SBPRO too
- * because the board can not emulate simultaneously MSS and SBPRO.
- */
- if (ae_config.init & INIT_MSS)
- return FALSE;
- if (ae_config.init & INIT_SBPRO)
- return FALSE;
-
- ae_config.init |= INIT_SBPRO;
-
- DBG(("done.\n"));
-
- return TRUE;
-}
-
-static void uninit_aedsp16_sb(void)
-{
- DBG(("uninit_aedsp16_sb: "));
-
- ae_config.init &= ~INIT_SBPRO;
-
- DBG(("done.\n"));
-}
-
-static int __init init_aedsp16_mss(void)
-{
- DBG(("init_aedsp16_mss: "));
-
-/*
- * If the card is already init'ed SBPRO, we can not init it to MSS too
- * because the board can not emulate simultaneously MSS and SBPRO.
- */
- if (ae_config.init & INIT_SBPRO)
- return FALSE;
- if (ae_config.init & INIT_MSS)
- return FALSE;
-/*
- * We must allocate the CONFIG_AEDSP16_BASE region too because these are the
- * I/O ports to access card's control registers.
- */
- if (!(ae_config.init & INIT_MPU401)) {
- if (!request_region(ae_config.base_io, IOBASE_REGION_SIZE,
- "aedsp16 (base)")) {
- printk(
- "AEDSP16 BASE I/O port region is already in use.\n");
- return FALSE;
- }
- }
-
- ae_config.init |= INIT_MSS;
-
- DBG(("done.\n"));
-
- return TRUE;
-}
-
-static void uninit_aedsp16_mss(void)
-{
- DBG(("uninit_aedsp16_mss: "));
-
- if ((!(ae_config.init & INIT_MPU401)) &&
- (ae_config.init & INIT_MSS)) {
- release_region(ae_config.base_io, IOBASE_REGION_SIZE);
- DBG(("AEDSP16 base region released.\n"));
- }
-
- ae_config.init &= ~INIT_MSS;
- DBG(("done.\n"));
-}
-
-static int __init init_aedsp16_mpu(void)
-{
- DBG(("init_aedsp16_mpu: "));
-
- if (ae_config.init & INIT_MPU401)
- return FALSE;
-
-/*
- * We must request the CONFIG_AEDSP16_BASE region too because these are the I/O
- * ports to access card's control registers.
- */
- if (!(ae_config.init & (INIT_MSS | INIT_SBPRO))) {
- if (!request_region(ae_config.base_io, IOBASE_REGION_SIZE,
- "aedsp16 (base)")) {
- printk(
- "AEDSP16 BASE I/O port region is already in use.\n");
- return FALSE;
- }
- }
-
- ae_config.init |= INIT_MPU401;
-
- DBG(("done.\n"));
-
- return TRUE;
-}
-
-static void uninit_aedsp16_mpu(void)
-{
- DBG(("uninit_aedsp16_mpu: "));
-
- if ((!(ae_config.init & (INIT_MSS | INIT_SBPRO))) &&
- (ae_config.init & INIT_MPU401)) {
- release_region(ae_config.base_io, IOBASE_REGION_SIZE);
- DBG(("AEDSP16 base region released.\n"));
- }
-
- ae_config.init &= ~INIT_MPU401;
-
- DBG(("done.\n"));
-}
-
-static int __init init_aedsp16(void)
-{
- int initialized = FALSE;
-
- DBG(("Initializing BASE[0x%x] IRQ[%d] DMA[%d] MIRQ[%d]\n",
- ae_config.base_io,ae_config.irq,ae_config.dma,ae_config.mpu_irq));
-
- if (ae_config.mss_base == -1) {
- if (init_aedsp16_sb() == FALSE) {
- uninit_aedsp16_sb();
- } else {
- initialized = TRUE;
- }
- }
-
- if (ae_config.mpu_base != -1) {
- if (init_aedsp16_mpu() == FALSE) {
- uninit_aedsp16_mpu();
- } else {
- initialized = TRUE;
- }
- }
-
-/*
- * In the sequence of init routines, the MSS init MUST be the last!
- * This because of the special register programming the MSS mode needs.
- * A board reset would disable the MSS mode restoring the default SBPRO
- * mode.
- */
- if (ae_config.mss_base != -1) {
- if (init_aedsp16_mss() == FALSE) {
- uninit_aedsp16_mss();
- } else {
- initialized = TRUE;
- }
- }
-
- if (initialized)
- initialized = aedsp16_init_board();
- return initialized;
-}
-
-static void __exit uninit_aedsp16(void)
-{
- if (ae_config.mss_base != -1)
- uninit_aedsp16_mss();
- else
- uninit_aedsp16_sb();
- if (ae_config.mpu_base != -1)
- uninit_aedsp16_mpu();
-}
-
-static int __initdata io = -1;
-static int __initdata irq = -1;
-static int __initdata dma = -1;
-static int __initdata mpu_irq = -1;
-static int __initdata mss_base = -1;
-static int __initdata mpu_base = -1;
-
-module_param_hw(io, int, ioport, 0);
-MODULE_PARM_DESC(io, "I/O base address (0x220 0x240)");
-module_param_hw(irq, int, irq, 0);
-MODULE_PARM_DESC(irq, "IRQ line (5 7 9 10 11)");
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(dma, "dma line (0 1 3)");
-module_param_hw(mpu_irq, int, irq, 0);
-MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ line (5 7 9 10 0)");
-module_param_hw(mss_base, int, ioport, 0);
-MODULE_PARM_DESC(mss_base, "MSS emulation I/O base address (0x530 0xE80)");
-module_param_hw(mpu_base, int, ioport, 0);
-MODULE_PARM_DESC(mpu_base,"MPU-401 I/O base address (0x300 0x310 0x320 0x330)");
-MODULE_AUTHOR("Riccardo Facchetti <fizban@tin.it>");
-MODULE_DESCRIPTION("Audio Excel DSP 16 Driver Version " VERSION);
-MODULE_LICENSE("GPL");
-
-static int __init do_init_aedsp16(void) {
- printk("Audio Excel DSP 16 init driver Copyright (C) Riccardo Facchetti 1995-98\n");
- if (io == -1 || dma == -1 || irq == -1) {
- printk(KERN_INFO "aedsp16: I/O, IRQ and DMA are mandatory\n");
- return -EINVAL;
- }
-
- ae_config.base_io = io;
- ae_config.irq = irq;
- ae_config.dma = dma;
-
- ae_config.mss_base = mss_base;
- ae_config.mpu_base = mpu_base;
- ae_config.mpu_irq = mpu_irq;
-
- if (init_aedsp16() == FALSE) {
- printk(KERN_ERR "aedsp16: initialization failed\n");
- /*
- * XXX
- * What error should we return here ?
- */
- return -EINVAL;
- }
- return 0;
-}
-
-static void __exit cleanup_aedsp16(void) {
- uninit_aedsp16();
-}
-
-module_init(do_init_aedsp16);
-module_exit(cleanup_aedsp16);
-
-#ifndef MODULE
-static int __init setup_aedsp16(char *str)
-{
- /* io, irq, dma, mss_io, mpu_io, mpu_irq */
- int ints[7];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
- dma = ints[3];
- mss_base = ints[4];
- mpu_base = ints[5];
- mpu_irq = ints[6];
- return 1;
-}
-
-__setup("aedsp16=", setup_aedsp16);
-#endif
diff --git a/sound/oss/audio.c b/sound/oss/audio.c
deleted file mode 100644
index 09c932f899b8..000000000000
--- a/sound/oss/audio.c
+++ /dev/null
@@ -1,985 +0,0 @@
-/*
- * sound/oss/audio.c
- *
- * Device file manager for /dev/audio
- */
-
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-/*
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * Thomas Sailer : moved several static variables into struct audio_operations
- * (which is grossly misnamed btw.) because they have the same
- * lifetime as the rest in there and dynamic allocation saves
- * 12k or so
- * Thomas Sailer : use more logical O_NONBLOCK semantics
- * Daniel Rodriksson: reworked the use of the device specific copy_user
- * still generic
- * Horst von Brand: Add missing #include <linux/string.h>
- * Chris Rankin : Update the module-usage counter for the coprocessor,
- * and decrement the counters again if we cannot open
- * the audio device.
- */
-
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/kmod.h>
-
-#include "sound_config.h"
-#include "ulaw.h"
-#include "coproc.h"
-
-#define NEUTRAL8 0x80
-#define NEUTRAL16 0x00
-
-
-static int dma_ioctl(int dev, unsigned int cmd, void __user *arg);
-
-static int set_format(int dev, int fmt)
-{
- if (fmt != AFMT_QUERY)
- {
- audio_devs[dev]->local_conversion = 0;
-
- if (!(audio_devs[dev]->format_mask & fmt)) /* Not supported */
- {
- if (fmt == AFMT_MU_LAW)
- {
- fmt = AFMT_U8;
- audio_devs[dev]->local_conversion = CNV_MU_LAW;
- }
- else
- fmt = AFMT_U8; /* This is always supported */
- }
- audio_devs[dev]->audio_format = audio_devs[dev]->d->set_bits(dev, fmt);
- audio_devs[dev]->local_format = fmt;
- }
- else
- return audio_devs[dev]->local_format;
-
- if (audio_devs[dev]->local_conversion)
- return audio_devs[dev]->local_conversion;
- else
- return audio_devs[dev]->local_format;
-}
-
-int audio_open(int dev, struct file *file)
-{
- int ret;
- int bits;
- int dev_type = dev & 0x0f;
- int mode = translate_mode(file);
- const struct audio_driver *driver;
- const struct coproc_operations *coprocessor;
-
- dev = dev >> 4;
-
- if (dev_type == SND_DEV_DSP16)
- bits = 16;
- else
- bits = 8;
-
- if (dev < 0 || dev >= num_audiodevs)
- return -ENXIO;
-
- driver = audio_devs[dev]->d;
-
- if (!try_module_get(driver->owner))
- return -ENODEV;
-
- if ((ret = DMAbuf_open(dev, mode)) < 0)
- goto error_1;
-
- if ( (coprocessor = audio_devs[dev]->coproc) != NULL ) {
- if (!try_module_get(coprocessor->owner))
- goto error_2;
-
- if ((ret = coprocessor->open(coprocessor->devc, COPR_PCM)) < 0) {
- printk(KERN_WARNING "Sound: Can't access coprocessor device\n");
- goto error_3;
- }
- }
-
- audio_devs[dev]->local_conversion = 0;
-
- if (dev_type == SND_DEV_AUDIO)
- set_format(dev, AFMT_MU_LAW);
- else
- set_format(dev, bits);
-
- audio_devs[dev]->audio_mode = AM_NONE;
-
- return 0;
-
- /*
- * Clean-up stack: this is what needs (un)doing if
- * we can't open the audio device ...
- */
- error_3:
- module_put(coprocessor->owner);
-
- error_2:
- DMAbuf_release(dev, mode);
-
- error_1:
- module_put(driver->owner);
-
- return ret;
-}
-
-static void sync_output(int dev)
-{
- int p, i;
- int l;
- struct dma_buffparms *dmap = audio_devs[dev]->dmap_out;
-
- if (dmap->fragment_size <= 0)
- return;
- dmap->flags |= DMA_POST;
-
- /* Align the write pointer with fragment boundaries */
-
- if ((l = dmap->user_counter % dmap->fragment_size) > 0)
- {
- int len;
- unsigned long offs = dmap->user_counter % dmap->bytes_in_use;
-
- len = dmap->fragment_size - l;
- memset(dmap->raw_buf + offs, dmap->neutral_byte, len);
- DMAbuf_move_wrpointer(dev, len);
- }
-
- /*
- * Clean all unused buffer fragments.
- */
-
- p = dmap->qtail;
- dmap->flags |= DMA_POST;
-
- for (i = dmap->qlen + 1; i < dmap->nbufs; i++)
- {
- p = (p + 1) % dmap->nbufs;
- if (((dmap->raw_buf + p * dmap->fragment_size) + dmap->fragment_size) >
- (dmap->raw_buf + dmap->buffsize))
- printk(KERN_ERR "audio: Buffer error 2\n");
-
- memset(dmap->raw_buf + p * dmap->fragment_size,
- dmap->neutral_byte,
- dmap->fragment_size);
- }
-
- dmap->flags |= DMA_DIRTY;
-}
-
-void audio_release(int dev, struct file *file)
-{
- const struct coproc_operations *coprocessor;
- int mode = translate_mode(file);
-
- dev = dev >> 4;
-
- /*
- * We do this in DMAbuf_release(). Why are we doing it
- * here? Why don't we test the file mode before setting
- * both flags? DMAbuf_release() does.
- * ...pester...pester...pester...
- */
- audio_devs[dev]->dmap_out->closing = 1;
- audio_devs[dev]->dmap_in->closing = 1;
-
- /*
- * We need to make sure we allocated the dmap_out buffer
- * before we go mucking around with it in sync_output().
- */
- if (mode & OPEN_WRITE)
- sync_output(dev);
-
- if ( (coprocessor = audio_devs[dev]->coproc) != NULL ) {
- coprocessor->close(coprocessor->devc, COPR_PCM);
- module_put(coprocessor->owner);
- }
- DMAbuf_release(dev, mode);
-
- module_put(audio_devs[dev]->d->owner);
-}
-
-static void translate_bytes(const unsigned char *table, unsigned char *buff, int n)
-{
- unsigned long i;
-
- if (n <= 0)
- return;
-
- for (i = 0; i < n; ++i)
- buff[i] = table[buff[i]];
-}
-
-int audio_write(int dev, struct file *file, const char __user *buf, int count)
-{
- int c, p, l, buf_size, used, returned;
- int err;
- char *dma_buf;
-
- dev = dev >> 4;
-
- p = 0;
- c = count;
-
- if(count < 0)
- return -EINVAL;
-
- if (!(audio_devs[dev]->open_mode & OPEN_WRITE))
- return -EPERM;
-
- if (audio_devs[dev]->flags & DMA_DUPLEX)
- audio_devs[dev]->audio_mode |= AM_WRITE;
- else
- audio_devs[dev]->audio_mode = AM_WRITE;
-
- if (!count) /* Flush output */
- {
- sync_output(dev);
- return 0;
- }
-
- while (c)
- {
- if ((err = DMAbuf_getwrbuffer(dev, &dma_buf, &buf_size, !!(file->f_flags & O_NONBLOCK))) < 0)
- {
- /* Handle nonblocking mode */
- if ((file->f_flags & O_NONBLOCK) && err == -EAGAIN)
- return p? p : -EAGAIN; /* No more space. Return # of accepted bytes */
- return err;
- }
- l = c;
-
- if (l > buf_size)
- l = buf_size;
-
- returned = l;
- used = l;
- if (!audio_devs[dev]->d->copy_user)
- {
- if ((dma_buf + l) >
- (audio_devs[dev]->dmap_out->raw_buf + audio_devs[dev]->dmap_out->buffsize))
- {
- printk(KERN_ERR "audio: Buffer error 3 (%lx,%d), (%lx, %d)\n", (long) dma_buf, l, (long) audio_devs[dev]->dmap_out->raw_buf, (int) audio_devs[dev]->dmap_out->buffsize);
- return -EDOM;
- }
- if (dma_buf < audio_devs[dev]->dmap_out->raw_buf)
- {
- printk(KERN_ERR "audio: Buffer error 13 (%lx<%lx)\n", (long) dma_buf, (long) audio_devs[dev]->dmap_out->raw_buf);
- return -EDOM;
- }
- if(copy_from_user(dma_buf, &(buf)[p], l))
- return -EFAULT;
- }
- else audio_devs[dev]->d->copy_user (dev,
- dma_buf, 0,
- buf, p,
- c, buf_size,
- &used, &returned,
- l);
- l = returned;
-
- if (audio_devs[dev]->local_conversion & CNV_MU_LAW)
- {
- translate_bytes(ulaw_dsp, (unsigned char *) dma_buf, l);
- }
- c -= used;
- p += used;
- DMAbuf_move_wrpointer(dev, l);
-
- }
-
- return count;
-}
-
-int audio_read(int dev, struct file *file, char __user *buf, int count)
-{
- int c, p, l;
- char *dmabuf;
- int buf_no;
-
- dev = dev >> 4;
- p = 0;
- c = count;
-
- if (!(audio_devs[dev]->open_mode & OPEN_READ))
- return -EPERM;
-
- if ((audio_devs[dev]->audio_mode & AM_WRITE) && !(audio_devs[dev]->flags & DMA_DUPLEX))
- sync_output(dev);
-
- if (audio_devs[dev]->flags & DMA_DUPLEX)
- audio_devs[dev]->audio_mode |= AM_READ;
- else
- audio_devs[dev]->audio_mode = AM_READ;
-
- while(c)
- {
- if ((buf_no = DMAbuf_getrdbuffer(dev, &dmabuf, &l, !!(file->f_flags & O_NONBLOCK))) < 0)
- {
- /*
- * Nonblocking mode handling. Return current # of bytes
- */
-
- if (p > 0) /* Avoid throwing away data */
- return p; /* Return it instead */
-
- if ((file->f_flags & O_NONBLOCK) && buf_no == -EAGAIN)
- return -EAGAIN;
-
- return buf_no;
- }
- if (l > c)
- l = c;
-
- /*
- * Insert any local processing here.
- */
-
- if (audio_devs[dev]->local_conversion & CNV_MU_LAW)
- {
- translate_bytes(dsp_ulaw, (unsigned char *) dmabuf, l);
- }
-
- {
- char *fixit = dmabuf;
-
- if(copy_to_user(&(buf)[p], fixit, l))
- return -EFAULT;
- }
-
- DMAbuf_rmchars(dev, buf_no, l);
-
- p += l;
- c -= l;
- }
-
- return count - c;
-}
-
-int audio_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg)
-{
- int val, count;
- unsigned long flags;
- struct dma_buffparms *dmap;
- int __user *p = arg;
-
- dev = dev >> 4;
-
- if (_IOC_TYPE(cmd) == 'C') {
- if (audio_devs[dev]->coproc) /* Coprocessor ioctl */
- return audio_devs[dev]->coproc->ioctl(audio_devs[dev]->coproc->devc, cmd, arg, 0);
- /* else
- printk(KERN_DEBUG"/dev/dsp%d: No coprocessor for this device\n", dev); */
- return -ENXIO;
- }
- else switch (cmd)
- {
- case SNDCTL_DSP_SYNC:
- if (!(audio_devs[dev]->open_mode & OPEN_WRITE))
- return 0;
- if (audio_devs[dev]->dmap_out->fragment_size == 0)
- return 0;
- sync_output(dev);
- DMAbuf_sync(dev);
- DMAbuf_reset(dev);
- return 0;
-
- case SNDCTL_DSP_POST:
- if (!(audio_devs[dev]->open_mode & OPEN_WRITE))
- return 0;
- if (audio_devs[dev]->dmap_out->fragment_size == 0)
- return 0;
- audio_devs[dev]->dmap_out->flags |= DMA_POST | DMA_DIRTY;
- sync_output(dev);
- dma_ioctl(dev, SNDCTL_DSP_POST, NULL);
- return 0;
-
- case SNDCTL_DSP_RESET:
- audio_devs[dev]->audio_mode = AM_NONE;
- DMAbuf_reset(dev);
- return 0;
-
- case SNDCTL_DSP_GETFMTS:
- val = audio_devs[dev]->format_mask | AFMT_MU_LAW;
- break;
-
- case SNDCTL_DSP_SETFMT:
- if (get_user(val, p))
- return -EFAULT;
- val = set_format(dev, val);
- break;
-
- case SNDCTL_DSP_GETISPACE:
- if (!(audio_devs[dev]->open_mode & OPEN_READ))
- return 0;
- if ((audio_devs[dev]->audio_mode & AM_WRITE) && !(audio_devs[dev]->flags & DMA_DUPLEX))
- return -EBUSY;
- return dma_ioctl(dev, cmd, arg);
-
- case SNDCTL_DSP_GETOSPACE:
- if (!(audio_devs[dev]->open_mode & OPEN_WRITE))
- return -EPERM;
- if ((audio_devs[dev]->audio_mode & AM_READ) && !(audio_devs[dev]->flags & DMA_DUPLEX))
- return -EBUSY;
- return dma_ioctl(dev, cmd, arg);
-
- case SNDCTL_DSP_NONBLOCK:
- spin_lock(&file->f_lock);
- file->f_flags |= O_NONBLOCK;
- spin_unlock(&file->f_lock);
- return 0;
-
- case SNDCTL_DSP_GETCAPS:
- val = 1 | DSP_CAP_MMAP; /* Revision level of this ioctl() */
- if (audio_devs[dev]->flags & DMA_DUPLEX &&
- audio_devs[dev]->open_mode == OPEN_READWRITE)
- val |= DSP_CAP_DUPLEX;
- if (audio_devs[dev]->coproc)
- val |= DSP_CAP_COPROC;
- if (audio_devs[dev]->d->local_qlen) /* Device has hidden buffers */
- val |= DSP_CAP_BATCH;
- if (audio_devs[dev]->d->trigger) /* Supports SETTRIGGER */
- val |= DSP_CAP_TRIGGER;
- break;
-
- case SOUND_PCM_WRITE_RATE:
- if (get_user(val, p))
- return -EFAULT;
- val = audio_devs[dev]->d->set_speed(dev, val);
- break;
-
- case SOUND_PCM_READ_RATE:
- val = audio_devs[dev]->d->set_speed(dev, 0);
- break;
-
- case SNDCTL_DSP_STEREO:
- if (get_user(val, p))
- return -EFAULT;
- if (val > 1 || val < 0)
- return -EINVAL;
- val = audio_devs[dev]->d->set_channels(dev, val + 1) - 1;
- break;
-
- case SOUND_PCM_WRITE_CHANNELS:
- if (get_user(val, p))
- return -EFAULT;
- val = audio_devs[dev]->d->set_channels(dev, val);
- break;
-
- case SOUND_PCM_READ_CHANNELS:
- val = audio_devs[dev]->d->set_channels(dev, 0);
- break;
-
- case SOUND_PCM_READ_BITS:
- val = audio_devs[dev]->d->set_bits(dev, 0);
- break;
-
- case SNDCTL_DSP_SETDUPLEX:
- if (audio_devs[dev]->open_mode != OPEN_READWRITE)
- return -EPERM;
- return (audio_devs[dev]->flags & DMA_DUPLEX) ? 0 : -EIO;
-
- case SNDCTL_DSP_PROFILE:
- if (get_user(val, p))
- return -EFAULT;
- if (audio_devs[dev]->open_mode & OPEN_WRITE)
- audio_devs[dev]->dmap_out->applic_profile = val;
- if (audio_devs[dev]->open_mode & OPEN_READ)
- audio_devs[dev]->dmap_in->applic_profile = val;
- return 0;
-
- case SNDCTL_DSP_GETODELAY:
- dmap = audio_devs[dev]->dmap_out;
- if (!(audio_devs[dev]->open_mode & OPEN_WRITE))
- return -EINVAL;
- if (!(dmap->flags & DMA_ALLOC_DONE))
- {
- val=0;
- break;
- }
-
- spin_lock_irqsave(&dmap->lock,flags);
- /* Compute number of bytes that have been played */
- count = DMAbuf_get_buffer_pointer (dev, dmap, DMODE_OUTPUT);
- if (count < dmap->fragment_size && dmap->qhead != 0)
- count += dmap->bytes_in_use; /* Pointer wrap not handled yet */
- count += dmap->byte_counter;
-
- /* Subtract current count from the number of bytes written by app */
- count = dmap->user_counter - count;
- if (count < 0)
- count = 0;
- spin_unlock_irqrestore(&dmap->lock,flags);
- val = count;
- break;
-
- default:
- return dma_ioctl(dev, cmd, arg);
- }
- return put_user(val, p);
-}
-
-void audio_init_devices(void)
-{
- /*
- * NOTE! This routine could be called several times during boot.
- */
-}
-
-void reorganize_buffers(int dev, struct dma_buffparms *dmap, int recording)
-{
- /*
- * This routine breaks the physical device buffers to logical ones.
- */
-
- struct audio_operations *dsp_dev = audio_devs[dev];
-
- unsigned i, n;
- unsigned sr, nc, sz, bsz;
-
- sr = dsp_dev->d->set_speed(dev, 0);
- nc = dsp_dev->d->set_channels(dev, 0);
- sz = dsp_dev->d->set_bits(dev, 0);
-
- if (sz == 8)
- dmap->neutral_byte = NEUTRAL8;
- else
- dmap->neutral_byte = NEUTRAL16;
-
- if (sr < 1 || nc < 1 || sz < 1)
- {
-/* printk(KERN_DEBUG "Warning: Invalid PCM parameters[%d] sr=%d, nc=%d, sz=%d\n", dev, sr, nc, sz);*/
- sr = DSP_DEFAULT_SPEED;
- nc = 1;
- sz = 8;
- }
-
- sz = sr * nc * sz;
-
- sz /= 8; /* #bits -> #bytes */
- dmap->data_rate = sz;
-
- if (!dmap->needs_reorg)
- return;
- dmap->needs_reorg = 0;
-
- if (dmap->fragment_size == 0)
- {
- /* Compute the fragment size using the default algorithm */
-
- /*
- * Compute a buffer size for time not exceeding 1 second.
- * Usually this algorithm gives a buffer size for 0.5 to 1.0 seconds
- * of sound (using the current speed, sample size and #channels).
- */
-
- bsz = dmap->buffsize;
- while (bsz > sz)
- bsz /= 2;
-
- if (bsz == dmap->buffsize)
- bsz /= 2; /* Needs at least 2 buffers */
-
- /*
- * Split the computed fragment to smaller parts. After 3.5a9
- * the default subdivision is 4 which should give better
- * results when recording.
- */
-
- if (dmap->subdivision == 0) /* Not already set */
- {
- dmap->subdivision = 4; /* Init to the default value */
-
- if ((bsz / dmap->subdivision) > 4096)
- dmap->subdivision *= 2;
- if ((bsz / dmap->subdivision) < 4096)
- dmap->subdivision = 1;
- }
- bsz /= dmap->subdivision;
-
- if (bsz < 16)
- bsz = 16; /* Just a sanity check */
-
- dmap->fragment_size = bsz;
- }
- else
- {
- /*
- * The process has specified the buffer size with SNDCTL_DSP_SETFRAGMENT or
- * the buffer size computation has already been done.
- */
- if (dmap->fragment_size > (dmap->buffsize / 2))
- dmap->fragment_size = (dmap->buffsize / 2);
- bsz = dmap->fragment_size;
- }
-
- if (audio_devs[dev]->min_fragment)
- if (bsz < (1 << audio_devs[dev]->min_fragment))
- bsz = 1 << audio_devs[dev]->min_fragment;
- if (audio_devs[dev]->max_fragment)
- if (bsz > (1 << audio_devs[dev]->max_fragment))
- bsz = 1 << audio_devs[dev]->max_fragment;
- bsz &= ~0x07; /* Force size which is multiple of 8 bytes */
-#ifdef OS_DMA_ALIGN_CHECK
- OS_DMA_ALIGN_CHECK(bsz);
-#endif
-
- n = dmap->buffsize / bsz;
- if (n > MAX_SUB_BUFFERS)
- n = MAX_SUB_BUFFERS;
- if (n > dmap->max_fragments)
- n = dmap->max_fragments;
-
- if (n < 2)
- {
- n = 2;
- bsz /= 2;
- }
- dmap->nbufs = n;
- dmap->bytes_in_use = n * bsz;
- dmap->fragment_size = bsz;
- dmap->max_byte_counter = (dmap->data_rate * 60 * 60) +
- dmap->bytes_in_use; /* Approximately one hour */
-
- if (dmap->raw_buf)
- {
- memset(dmap->raw_buf, dmap->neutral_byte, dmap->bytes_in_use);
- }
-
- for (i = 0; i < dmap->nbufs; i++)
- {
- dmap->counts[i] = 0;
- }
-
- dmap->flags |= DMA_ALLOC_DONE | DMA_EMPTY;
-}
-
-static int dma_subdivide(int dev, struct dma_buffparms *dmap, int fact)
-{
- if (fact == 0)
- {
- fact = dmap->subdivision;
- if (fact == 0)
- fact = 1;
- return fact;
- }
- if (dmap->subdivision != 0 || dmap->fragment_size) /* Too late to change */
- return -EINVAL;
-
- if (fact > MAX_REALTIME_FACTOR)
- return -EINVAL;
-
- if (fact != 1 && fact != 2 && fact != 4 && fact != 8 && fact != 16)
- return -EINVAL;
-
- dmap->subdivision = fact;
- return fact;
-}
-
-static int dma_set_fragment(int dev, struct dma_buffparms *dmap, int fact)
-{
- int bytes, count;
-
- if (fact == 0)
- return -EIO;
-
- if (dmap->subdivision != 0 ||
- dmap->fragment_size) /* Too late to change */
- return -EINVAL;
-
- bytes = fact & 0xffff;
- count = (fact >> 16) & 0x7fff;
-
- if (count == 0)
- count = MAX_SUB_BUFFERS;
- else if (count < MAX_SUB_BUFFERS)
- count++;
-
- if (bytes < 4 || bytes > 17) /* <16 || > 512k */
- return -EINVAL;
-
- if (count < 2)
- return -EINVAL;
-
- if (audio_devs[dev]->min_fragment > 0)
- if (bytes < audio_devs[dev]->min_fragment)
- bytes = audio_devs[dev]->min_fragment;
-
- if (audio_devs[dev]->max_fragment > 0)
- if (bytes > audio_devs[dev]->max_fragment)
- bytes = audio_devs[dev]->max_fragment;
-
-#ifdef OS_DMA_MINBITS
- if (bytes < OS_DMA_MINBITS)
- bytes = OS_DMA_MINBITS;
-#endif
-
- dmap->fragment_size = (1 << bytes);
- dmap->max_fragments = count;
-
- if (dmap->fragment_size > dmap->buffsize)
- dmap->fragment_size = dmap->buffsize;
-
- if (dmap->fragment_size == dmap->buffsize &&
- audio_devs[dev]->flags & DMA_AUTOMODE)
- dmap->fragment_size /= 2; /* Needs at least 2 buffers */
-
- dmap->subdivision = 1; /* Disable SNDCTL_DSP_SUBDIVIDE */
- return bytes | ((count - 1) << 16);
-}
-
-static int dma_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- struct dma_buffparms *dmap_out = audio_devs[dev]->dmap_out;
- struct dma_buffparms *dmap_in = audio_devs[dev]->dmap_in;
- struct dma_buffparms *dmap;
- audio_buf_info info;
- count_info cinfo;
- int fact, ret, changed, bits, count, err;
- unsigned long flags;
-
- switch (cmd)
- {
- case SNDCTL_DSP_SUBDIVIDE:
- ret = 0;
- if (get_user(fact, (int __user *)arg))
- return -EFAULT;
- if (audio_devs[dev]->open_mode & OPEN_WRITE)
- ret = dma_subdivide(dev, dmap_out, fact);
- if (ret < 0)
- return ret;
- if (audio_devs[dev]->open_mode != OPEN_WRITE ||
- (audio_devs[dev]->flags & DMA_DUPLEX &&
- audio_devs[dev]->open_mode & OPEN_READ))
- ret = dma_subdivide(dev, dmap_in, fact);
- if (ret < 0)
- return ret;
- break;
-
- case SNDCTL_DSP_GETISPACE:
- case SNDCTL_DSP_GETOSPACE:
- dmap = dmap_out;
- if (cmd == SNDCTL_DSP_GETISPACE && !(audio_devs[dev]->open_mode & OPEN_READ))
- return -EINVAL;
- if (cmd == SNDCTL_DSP_GETOSPACE && !(audio_devs[dev]->open_mode & OPEN_WRITE))
- return -EINVAL;
- if (cmd == SNDCTL_DSP_GETISPACE && audio_devs[dev]->flags & DMA_DUPLEX)
- dmap = dmap_in;
- if (dmap->mapping_flags & DMA_MAP_MAPPED)
- return -EINVAL;
- if (!(dmap->flags & DMA_ALLOC_DONE))
- reorganize_buffers(dev, dmap, (cmd == SNDCTL_DSP_GETISPACE));
- info.fragstotal = dmap->nbufs;
- if (cmd == SNDCTL_DSP_GETISPACE)
- info.fragments = dmap->qlen;
- else
- {
- if (!DMAbuf_space_in_queue(dev))
- info.fragments = 0;
- else
- {
- info.fragments = DMAbuf_space_in_queue(dev);
- if (audio_devs[dev]->d->local_qlen)
- {
- int tmp = audio_devs[dev]->d->local_qlen(dev);
- if (tmp && info.fragments)
- tmp--; /*
- * This buffer has been counted twice
- */
- info.fragments -= tmp;
- }
- }
- }
- if (info.fragments < 0)
- info.fragments = 0;
- else if (info.fragments > dmap->nbufs)
- info.fragments = dmap->nbufs;
-
- info.fragsize = dmap->fragment_size;
- info.bytes = info.fragments * dmap->fragment_size;
-
- if (cmd == SNDCTL_DSP_GETISPACE && dmap->qlen)
- info.bytes -= dmap->counts[dmap->qhead];
- else
- {
- info.fragments = info.bytes / dmap->fragment_size;
- info.bytes -= dmap->user_counter % dmap->fragment_size;
- }
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_SETTRIGGER:
- if (get_user(bits, (int __user *)arg))
- return -EFAULT;
- bits &= audio_devs[dev]->open_mode;
- if (audio_devs[dev]->d->trigger == NULL)
- return -EINVAL;
- if (!(audio_devs[dev]->flags & DMA_DUPLEX) && (bits & PCM_ENABLE_INPUT) &&
- (bits & PCM_ENABLE_OUTPUT))
- return -EINVAL;
-
- if (bits & PCM_ENABLE_INPUT)
- {
- spin_lock_irqsave(&dmap_in->lock,flags);
- changed = (audio_devs[dev]->enable_bits ^ bits) & PCM_ENABLE_INPUT;
- if (changed && audio_devs[dev]->go)
- {
- reorganize_buffers(dev, dmap_in, 1);
- if ((err = audio_devs[dev]->d->prepare_for_input(dev,
- dmap_in->fragment_size, dmap_in->nbufs)) < 0) {
- spin_unlock_irqrestore(&dmap_in->lock,flags);
- return err;
- }
- dmap_in->dma_mode = DMODE_INPUT;
- audio_devs[dev]->enable_bits |= PCM_ENABLE_INPUT;
- DMAbuf_activate_recording(dev, dmap_in);
- } else
- audio_devs[dev]->enable_bits &= ~PCM_ENABLE_INPUT;
- spin_unlock_irqrestore(&dmap_in->lock,flags);
- }
- if (bits & PCM_ENABLE_OUTPUT)
- {
- spin_lock_irqsave(&dmap_out->lock,flags);
- changed = (audio_devs[dev]->enable_bits ^ bits) & PCM_ENABLE_OUTPUT;
- if (changed &&
- (dmap_out->mapping_flags & DMA_MAP_MAPPED || dmap_out->qlen > 0) &&
- audio_devs[dev]->go)
- {
- if (!(dmap_out->flags & DMA_ALLOC_DONE))
- reorganize_buffers(dev, dmap_out, 0);
- dmap_out->dma_mode = DMODE_OUTPUT;
- audio_devs[dev]->enable_bits |= PCM_ENABLE_OUTPUT;
- dmap_out->counts[dmap_out->qhead] = dmap_out->fragment_size;
- DMAbuf_launch_output(dev, dmap_out);
- } else
- audio_devs[dev]->enable_bits &= ~PCM_ENABLE_OUTPUT;
- spin_unlock_irqrestore(&dmap_out->lock,flags);
- }
-#if 0
- if (changed && audio_devs[dev]->d->trigger)
- audio_devs[dev]->d->trigger(dev, bits * audio_devs[dev]->go);
-#endif
- /* Falls through... */
-
- case SNDCTL_DSP_GETTRIGGER:
- ret = audio_devs[dev]->enable_bits;
- break;
-
- case SNDCTL_DSP_SETSYNCRO:
- if (!audio_devs[dev]->d->trigger)
- return -EINVAL;
- audio_devs[dev]->d->trigger(dev, 0);
- audio_devs[dev]->go = 0;
- return 0;
-
- case SNDCTL_DSP_GETIPTR:
- if (!(audio_devs[dev]->open_mode & OPEN_READ))
- return -EINVAL;
- spin_lock_irqsave(&dmap_in->lock,flags);
- cinfo.bytes = dmap_in->byte_counter;
- cinfo.ptr = DMAbuf_get_buffer_pointer(dev, dmap_in, DMODE_INPUT) & ~3;
- if (cinfo.ptr < dmap_in->fragment_size && dmap_in->qtail != 0)
- cinfo.bytes += dmap_in->bytes_in_use; /* Pointer wrap not handled yet */
- cinfo.blocks = dmap_in->qlen;
- cinfo.bytes += cinfo.ptr;
- if (dmap_in->mapping_flags & DMA_MAP_MAPPED)
- dmap_in->qlen = 0; /* Reset interrupt counter */
- spin_unlock_irqrestore(&dmap_in->lock,flags);
- if (copy_to_user(arg, &cinfo, sizeof(cinfo)))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_GETOPTR:
- if (!(audio_devs[dev]->open_mode & OPEN_WRITE))
- return -EINVAL;
-
- spin_lock_irqsave(&dmap_out->lock,flags);
- cinfo.bytes = dmap_out->byte_counter;
- cinfo.ptr = DMAbuf_get_buffer_pointer(dev, dmap_out, DMODE_OUTPUT) & ~3;
- if (cinfo.ptr < dmap_out->fragment_size && dmap_out->qhead != 0)
- cinfo.bytes += dmap_out->bytes_in_use; /* Pointer wrap not handled yet */
- cinfo.blocks = dmap_out->qlen;
- cinfo.bytes += cinfo.ptr;
- if (dmap_out->mapping_flags & DMA_MAP_MAPPED)
- dmap_out->qlen = 0; /* Reset interrupt counter */
- spin_unlock_irqrestore(&dmap_out->lock,flags);
- if (copy_to_user(arg, &cinfo, sizeof(cinfo)))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_GETODELAY:
- if (!(audio_devs[dev]->open_mode & OPEN_WRITE))
- return -EINVAL;
- if (!(dmap_out->flags & DMA_ALLOC_DONE))
- {
- ret=0;
- break;
- }
- spin_lock_irqsave(&dmap_out->lock,flags);
- /* Compute number of bytes that have been played */
- count = DMAbuf_get_buffer_pointer (dev, dmap_out, DMODE_OUTPUT);
- if (count < dmap_out->fragment_size && dmap_out->qhead != 0)
- count += dmap_out->bytes_in_use; /* Pointer wrap not handled yet */
- count += dmap_out->byte_counter;
- /* Subtract current count from the number of bytes written by app */
- count = dmap_out->user_counter - count;
- if (count < 0)
- count = 0;
- spin_unlock_irqrestore(&dmap_out->lock,flags);
- ret = count;
- break;
-
- case SNDCTL_DSP_POST:
- if (audio_devs[dev]->dmap_out->qlen > 0)
- if (!(audio_devs[dev]->dmap_out->flags & DMA_ACTIVE))
- DMAbuf_launch_output(dev, audio_devs[dev]->dmap_out);
- return 0;
-
- case SNDCTL_DSP_GETBLKSIZE:
- dmap = dmap_out;
- if (audio_devs[dev]->open_mode & OPEN_WRITE)
- reorganize_buffers(dev, dmap_out, (audio_devs[dev]->open_mode == OPEN_READ));
- if (audio_devs[dev]->open_mode == OPEN_READ ||
- (audio_devs[dev]->flags & DMA_DUPLEX &&
- audio_devs[dev]->open_mode & OPEN_READ))
- reorganize_buffers(dev, dmap_in, (audio_devs[dev]->open_mode == OPEN_READ));
- if (audio_devs[dev]->open_mode == OPEN_READ)
- dmap = dmap_in;
- ret = dmap->fragment_size;
- break;
-
- case SNDCTL_DSP_SETFRAGMENT:
- ret = 0;
- if (get_user(fact, (int __user *)arg))
- return -EFAULT;
- if (audio_devs[dev]->open_mode & OPEN_WRITE)
- ret = dma_set_fragment(dev, dmap_out, fact);
- if (ret < 0)
- return ret;
- if (audio_devs[dev]->open_mode == OPEN_READ ||
- (audio_devs[dev]->flags & DMA_DUPLEX &&
- audio_devs[dev]->open_mode & OPEN_READ))
- ret = dma_set_fragment(dev, dmap_in, fact);
- if (ret < 0)
- return ret;
- if (!arg) /* don't know what this is good for, but preserve old semantics */
- return 0;
- break;
-
- default:
- if (!audio_devs[dev]->d->ioctl)
- return -EINVAL;
- return audio_devs[dev]->d->ioctl(dev, cmd, arg);
- }
- return put_user(ret, (int __user *)arg);
-}
diff --git a/sound/oss/bin2hex.c b/sound/oss/bin2hex.c
deleted file mode 100644
index 26c04ce04d71..000000000000
--- a/sound/oss/bin2hex.c
+++ /dev/null
@@ -1,40 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-int main( int argc, const char * argv [] )
-{
- const char * varname;
- int i = 0;
- int c;
- int id = 0;
-
- if(argv[1] && strcmp(argv[1],"-i")==0)
- {
- argv++;
- argc--;
- id=1;
- }
-
- if(argc==1)
- {
- fprintf(stderr, "bin2hex: [-i] firmware\n");
- exit(1);
- }
-
- varname = argv[1];
- printf( "/* automatically generated by bin2hex */\n" );
- printf( "static unsigned char %s [] %s =\n{\n", varname , id?"__initdata":"");
-
- while ( ( c = getchar( ) ) != EOF )
- {
- if ( i != 0 && i % 10 == 0 )
- printf( "\n" );
- printf( "0x%02lx,", c & 0xFFl );
- i++;
- }
-
- printf( "};\nstatic int %sLen = %d;\n", varname, i );
- return 0;
-}
diff --git a/sound/oss/coproc.h b/sound/oss/coproc.h
deleted file mode 100644
index 7bec21bbdd88..000000000000
--- a/sound/oss/coproc.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Definitions for various on board processors on the sound cards. For
- * example DSP processors.
- */
-
-/*
- * Coprocessor access types
- */
-#define COPR_CUSTOM 0x0001 /* Custom applications */
-#define COPR_MIDI 0x0002 /* MIDI (MPU-401) emulation */
-#define COPR_PCM 0x0004 /* Digitized voice applications */
-#define COPR_SYNTH 0x0008 /* Music synthesis */
diff --git a/sound/oss/dev_table.c b/sound/oss/dev_table.c
deleted file mode 100644
index 6dad51596b70..000000000000
--- a/sound/oss/dev_table.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * sound/oss/dev_table.c
- *
- * Device call tables.
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-
-#include <linux/init.h>
-
-#include "sound_config.h"
-
-struct audio_operations *audio_devs[MAX_AUDIO_DEV];
-EXPORT_SYMBOL(audio_devs);
-
-int num_audiodevs;
-EXPORT_SYMBOL(num_audiodevs);
-
-struct mixer_operations *mixer_devs[MAX_MIXER_DEV];
-EXPORT_SYMBOL(mixer_devs);
-
-int num_mixers;
-EXPORT_SYMBOL(num_mixers);
-
-struct synth_operations *synth_devs[MAX_SYNTH_DEV+MAX_MIDI_DEV];
-EXPORT_SYMBOL(synth_devs);
-
-int num_synths;
-
-struct midi_operations *midi_devs[MAX_MIDI_DEV];
-EXPORT_SYMBOL(midi_devs);
-
-int num_midis;
-EXPORT_SYMBOL(num_midis);
-
-struct sound_timer_operations *sound_timer_devs[MAX_TIMER_DEV] = {
- &default_sound_timer, NULL
-};
-EXPORT_SYMBOL(sound_timer_devs);
-
-int num_sound_timers = 1;
-
-
-static int sound_alloc_audiodev(void);
-
-int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
- int driver_size, int flags, unsigned int format_mask,
- void *devc, int dma1, int dma2)
-{
- struct audio_driver *d;
- struct audio_operations *op;
- int num;
-
- if (vers != AUDIO_DRIVER_VERSION || driver_size > sizeof(struct audio_driver)) {
- printk(KERN_ERR "Sound: Incompatible audio driver for %s\n", name);
- return -EINVAL;
- }
- num = sound_alloc_audiodev();
-
- if (num == -1) {
- printk(KERN_ERR "sound: Too many audio drivers\n");
- return -EBUSY;
- }
- d = (struct audio_driver *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_driver)));
- sound_nblocks++;
- if (sound_nblocks >= MAX_MEM_BLOCKS)
- sound_nblocks = MAX_MEM_BLOCKS - 1;
-
- op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct audio_operations)));
- sound_nblocks++;
- if (sound_nblocks >= MAX_MEM_BLOCKS)
- sound_nblocks = MAX_MEM_BLOCKS - 1;
-
- if (d == NULL || op == NULL) {
- printk(KERN_ERR "Sound: Can't allocate driver for (%s)\n", name);
- sound_unload_audiodev(num);
- return -ENOMEM;
- }
- init_waitqueue_head(&op->in_sleeper);
- init_waitqueue_head(&op->out_sleeper);
- init_waitqueue_head(&op->poll_sleeper);
- if (driver_size < sizeof(struct audio_driver))
- memset((char *) d, 0, sizeof(struct audio_driver));
-
- memcpy((char *) d, (char *) driver, driver_size);
-
- op->d = d;
- strlcpy(op->name, name, sizeof(op->name));
- op->flags = flags;
- op->format_mask = format_mask;
- op->devc = devc;
-
- /*
- * Hardcoded defaults
- */
- audio_devs[num] = op;
-
- DMAbuf_init(num, dma1, dma2);
-
- audio_init_devices();
- return num;
-}
-EXPORT_SYMBOL(sound_install_audiodrv);
-
-int sound_install_mixer(int vers, char *name, struct mixer_operations *driver,
- int driver_size, void *devc)
-{
- struct mixer_operations *op;
-
- int n = sound_alloc_mixerdev();
-
- if (n == -1) {
- printk(KERN_ERR "Sound: Too many mixer drivers\n");
- return -EBUSY;
- }
- if (vers != MIXER_DRIVER_VERSION ||
- driver_size > sizeof(struct mixer_operations)) {
- printk(KERN_ERR "Sound: Incompatible mixer driver for %s\n", name);
- return -EINVAL;
- }
-
- /* FIXME: This leaks a mixer_operations struct every time its called
- until you unload sound! */
-
- op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct mixer_operations)));
- sound_nblocks++;
- if (sound_nblocks >= MAX_MEM_BLOCKS)
- sound_nblocks = MAX_MEM_BLOCKS - 1;
-
- if (op == NULL) {
- printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name);
- return -ENOMEM;
- }
- memcpy((char *) op, (char *) driver, driver_size);
-
- strlcpy(op->name, name, sizeof(op->name));
- op->devc = devc;
-
- mixer_devs[n] = op;
- return n;
-}
-EXPORT_SYMBOL(sound_install_mixer);
-
-void sound_unload_audiodev(int dev)
-{
- if (dev != -1) {
- DMAbuf_deinit(dev);
- audio_devs[dev] = NULL;
- unregister_sound_dsp((dev<<4)+3);
- }
-}
-EXPORT_SYMBOL(sound_unload_audiodev);
-
-static int sound_alloc_audiodev(void)
-{
- int i = register_sound_dsp(&oss_sound_fops, -1);
- if(i==-1)
- return i;
- i>>=4;
- if(i>=num_audiodevs)
- num_audiodevs = i + 1;
- return i;
-}
-
-int sound_alloc_mididev(void)
-{
- int i = register_sound_midi(&oss_sound_fops, -1);
- if(i==-1)
- return i;
- i>>=4;
- if(i>=num_midis)
- num_midis = i + 1;
- return i;
-}
-EXPORT_SYMBOL(sound_alloc_mididev);
-
-int sound_alloc_synthdev(void)
-{
- int i;
-
- for (i = 0; i < MAX_SYNTH_DEV; i++) {
- if (synth_devs[i] == NULL) {
- if (i >= num_synths)
- num_synths++;
- return i;
- }
- }
- return -1;
-}
-EXPORT_SYMBOL(sound_alloc_synthdev);
-
-int sound_alloc_mixerdev(void)
-{
- int i = register_sound_mixer(&oss_sound_fops, -1);
- if(i==-1)
- return -1;
- i>>=4;
- if(i>=num_mixers)
- num_mixers = i + 1;
- return i;
-}
-EXPORT_SYMBOL(sound_alloc_mixerdev);
-
-int sound_alloc_timerdev(void)
-{
- int i;
-
- for (i = 0; i < MAX_TIMER_DEV; i++) {
- if (sound_timer_devs[i] == NULL) {
- if (i >= num_sound_timers)
- num_sound_timers++;
- return i;
- }
- }
- return -1;
-}
-EXPORT_SYMBOL(sound_alloc_timerdev);
-
-void sound_unload_mixerdev(int dev)
-{
- if (dev != -1) {
- mixer_devs[dev] = NULL;
- unregister_sound_mixer(dev<<4);
- num_mixers--;
- }
-}
-EXPORT_SYMBOL(sound_unload_mixerdev);
-
-void sound_unload_mididev(int dev)
-{
- if (dev != -1) {
- midi_devs[dev] = NULL;
- unregister_sound_midi((dev<<4)+2);
- }
-}
-EXPORT_SYMBOL(sound_unload_mididev);
-
-void sound_unload_synthdev(int dev)
-{
- if (dev != -1)
- synth_devs[dev] = NULL;
-}
-EXPORT_SYMBOL(sound_unload_synthdev);
-
-void sound_unload_timerdev(int dev)
-{
- if (dev != -1)
- sound_timer_devs[dev] = NULL;
-}
-EXPORT_SYMBOL(sound_unload_timerdev);
-
diff --git a/sound/oss/dev_table.h b/sound/oss/dev_table.h
deleted file mode 100644
index 0199a317c5a9..000000000000
--- a/sound/oss/dev_table.h
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * dev_table.h
- *
- * Global definitions for device call tables
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-
-
-#ifndef _DEV_TABLE_H_
-#define _DEV_TABLE_H_
-
-#include <linux/spinlock.h>
-/*
- * Sound card numbers 27 to 999. (1 to 26 are defined in soundcard.h)
- * Numbers 1000 to N are reserved for driver's internal use.
- */
-
-#define SNDCARD_DESKPROXL 27 /* Compaq Deskpro XL */
-#define SNDCARD_VIDC 28 /* ARMs VIDC */
-#define SNDCARD_SBPNP 29
-#define SNDCARD_SOFTOSS 36
-#define SNDCARD_VMIDI 37
-#define SNDCARD_OPL3SA1 38 /* Note: clash in msnd.h */
-#define SNDCARD_OPL3SA1_SB 39
-#define SNDCARD_OPL3SA1_MPU 40
-#define SNDCARD_WAVEFRONT 41
-#define SNDCARD_OPL3SA2 42
-#define SNDCARD_OPL3SA2_MPU 43
-#define SNDCARD_WAVEARTIST 44 /* Waveartist */
-#define SNDCARD_OPL3SA2_MSS 45 /* Originally missed */
-#define SNDCARD_AD1816 88
-
-/*
- * NOTE! NOTE! NOTE! NOTE!
- *
- * If you modify this file, please check the dev_table.c also.
- *
- * NOTE! NOTE! NOTE! NOTE!
- */
-
-struct driver_info
-{
- char *driver_id;
- int card_subtype; /* Driver specific. Usually 0 */
- int card_type; /* From soundcard.h */
- char *name;
- void (*attach) (struct address_info *hw_config);
- int (*probe) (struct address_info *hw_config);
- void (*unload) (struct address_info *hw_config);
-};
-
-struct card_info
-{
- int card_type; /* Link (search key) to the driver list */
- struct address_info config;
- int enabled;
- void *for_driver_use;
-};
-
-
-/*
- * Device specific parameters (used only by dmabuf.c)
- */
-#define MAX_SUB_BUFFERS (32*MAX_REALTIME_FACTOR)
-
-#define DMODE_NONE 0
-#define DMODE_OUTPUT PCM_ENABLE_OUTPUT
-#define DMODE_INPUT PCM_ENABLE_INPUT
-
-struct dma_buffparms
-{
- int dma_mode; /* DMODE_INPUT, DMODE_OUTPUT or DMODE_NONE */
- int closing;
-
- /*
- * Pointers to raw buffers
- */
-
- char *raw_buf;
- unsigned long raw_buf_phys;
- int buffsize;
-
- /*
- * Device state tables
- */
-
- unsigned long flags;
-#define DMA_BUSY 0x00000001
-#define DMA_RESTART 0x00000002
-#define DMA_ACTIVE 0x00000004
-#define DMA_STARTED 0x00000008
-#define DMA_EMPTY 0x00000010
-#define DMA_ALLOC_DONE 0x00000020
-#define DMA_SYNCING 0x00000040
-#define DMA_DIRTY 0x00000080
-#define DMA_POST 0x00000100
-#define DMA_NODMA 0x00000200
-#define DMA_NOTIMEOUT 0x00000400
-
- int open_mode;
-
- /*
- * Queue parameters.
- */
- int qlen;
- int qhead;
- int qtail;
- spinlock_t lock;
-
- int cfrag; /* Current incomplete fragment (write) */
-
- int nbufs;
- int counts[MAX_SUB_BUFFERS];
- int subdivision;
-
- int fragment_size;
- int needs_reorg;
- int max_fragments;
-
- int bytes_in_use;
-
- int underrun_count;
- unsigned long byte_counter;
- unsigned long user_counter;
- unsigned long max_byte_counter;
- int data_rate; /* Bytes/second */
-
- int mapping_flags;
-#define DMA_MAP_MAPPED 0x00000001
- char neutral_byte;
- int dma; /* DMA channel */
-
- int applic_profile; /* Application profile (APF_*) */
- /* Interrupt callback stuff */
- void (*audio_callback) (int dev, int parm);
- int callback_parm;
-
- int buf_flags[MAX_SUB_BUFFERS];
-#define BUFF_EOF 0x00000001 /* Increment eof count */
-#define BUFF_DIRTY 0x00000002 /* Buffer written */
-};
-
-/*
- * Structure for use with various microcontrollers and DSP processors
- * in the recent sound cards.
- */
-typedef struct coproc_operations
-{
- char name[64];
- struct module *owner;
- int (*open) (void *devc, int sub_device);
- void (*close) (void *devc, int sub_device);
- int (*ioctl) (void *devc, unsigned int cmd, void __user * arg, int local);
- void (*reset) (void *devc);
-
- void *devc; /* Driver specific info */
-} coproc_operations;
-
-struct audio_driver
-{
- struct module *owner;
- int (*open) (int dev, int mode);
- void (*close) (int dev);
- void (*output_block) (int dev, unsigned long buf,
- int count, int intrflag);
- void (*start_input) (int dev, unsigned long buf,
- int count, int intrflag);
- int (*ioctl) (int dev, unsigned int cmd, void __user * arg);
- int (*prepare_for_input) (int dev, int bufsize, int nbufs);
- int (*prepare_for_output) (int dev, int bufsize, int nbufs);
- void (*halt_io) (int dev);
- int (*local_qlen)(int dev);
- void (*copy_user) (int dev,
- char *localbuf, int localoffs,
- const char __user *userbuf, int useroffs,
- int max_in, int max_out,
- int *used, int *returned,
- int len);
- void (*halt_input) (int dev);
- void (*halt_output) (int dev);
- void (*trigger) (int dev, int bits);
- int (*set_speed)(int dev, int speed);
- unsigned int (*set_bits)(int dev, unsigned int bits);
- short (*set_channels)(int dev, short channels);
- void (*postprocess_write)(int dev); /* Device spesific postprocessing for written data */
- void (*preprocess_read)(int dev); /* Device spesific preprocessing for read data */
- void (*mmap)(int dev);
-};
-
-struct audio_operations
-{
- char name[128];
- int flags;
-#define NOTHING_SPECIAL 0x00
-#define NEEDS_RESTART 0x01
-#define DMA_AUTOMODE 0x02
-#define DMA_DUPLEX 0x04
-#define DMA_PSEUDO_AUTOMODE 0x08
-#define DMA_HARDSTOP 0x10
-#define DMA_EXACT 0x40
-#define DMA_NORESET 0x80
- int format_mask; /* Bitmask for supported audio formats */
- void *devc; /* Driver specific info */
- struct audio_driver *d;
- void *portc; /* Driver specific info */
- struct dma_buffparms *dmap_in, *dmap_out;
- struct coproc_operations *coproc;
- int mixer_dev;
- int enable_bits;
- int open_mode;
- int go;
- int min_fragment; /* 0 == unlimited */
- int max_fragment; /* 0 == unlimited */
- int parent_dev; /* 0 -> no parent, 1 to n -> parent=parent_dev+1 */
-
- /* fields formerly in dmabuf.c */
- wait_queue_head_t in_sleeper;
- wait_queue_head_t out_sleeper;
- wait_queue_head_t poll_sleeper;
-
- /* fields formerly in audio.c */
- int audio_mode;
-
-#define AM_NONE 0
-#define AM_WRITE OPEN_WRITE
-#define AM_READ OPEN_READ
-
- int local_format;
- int audio_format;
- int local_conversion;
-#define CNV_MU_LAW 0x00000001
-
- /* large structures at the end to keep offsets small */
- struct dma_buffparms dmaps[2];
-};
-
-int *load_mixer_volumes(char *name, int *levels, int present);
-
-struct mixer_operations
-{
- struct module *owner;
- char id[16];
- char name[64];
- int (*ioctl) (int dev, unsigned int cmd, void __user * arg);
-
- void *devc;
- int modify_counter;
-};
-
-struct synth_operations
-{
- struct module *owner;
- char *id; /* Unique identifier (ASCII) max 29 char */
- struct synth_info *info;
- int midi_dev;
- int synth_type;
- int synth_subtype;
-
- int (*open) (int dev, int mode);
- void (*close) (int dev);
- int (*ioctl) (int dev, unsigned int cmd, void __user * arg);
- int (*kill_note) (int dev, int voice, int note, int velocity);
- int (*start_note) (int dev, int voice, int note, int velocity);
- int (*set_instr) (int dev, int voice, int instr);
- void (*reset) (int dev);
- void (*hw_control) (int dev, unsigned char *event);
- int (*load_patch) (int dev, int format, const char __user *addr,
- int count, int pmgr_flag);
- void (*aftertouch) (int dev, int voice, int pressure);
- void (*controller) (int dev, int voice, int ctrl_num, int value);
- void (*panning) (int dev, int voice, int value);
- void (*volume_method) (int dev, int mode);
- void (*bender) (int dev, int chn, int value);
- int (*alloc_voice) (int dev, int chn, int note, struct voice_alloc_info *alloc);
- void (*setup_voice) (int dev, int voice, int chn);
- int (*send_sysex)(int dev, unsigned char *bytes, int len);
-
- struct voice_alloc_info alloc;
- struct channel_info chn_info[16];
- int emulation;
-#define EMU_GM 1 /* General MIDI */
-#define EMU_XG 2 /* Yamaha XG */
-#define MAX_SYSEX_BUF 64
- unsigned char sysex_buf[MAX_SYSEX_BUF];
- int sysex_ptr;
-};
-
-struct midi_input_info
-{
- /* MIDI input scanner variables */
-#define MI_MAX 10
- volatile int m_busy;
- unsigned char m_buf[MI_MAX];
- unsigned char m_prev_status; /* For running status */
- int m_ptr;
-#define MST_INIT 0
-#define MST_DATA 1
-#define MST_SYSEX 2
- int m_state;
- int m_left;
-};
-
-struct midi_operations
-{
- struct module *owner;
- struct midi_info info;
- struct synth_operations *converter;
- struct midi_input_info in_info;
- int (*open) (int dev, int mode,
- void (*inputintr)(int dev, unsigned char data),
- void (*outputintr)(int dev)
- );
- void (*close) (int dev);
- int (*ioctl) (int dev, unsigned int cmd, void __user * arg);
- int (*outputc) (int dev, unsigned char data);
- int (*start_read) (int dev);
- int (*end_read) (int dev);
- void (*kick)(int dev);
- int (*command) (int dev, unsigned char *data);
- int (*buffer_status) (int dev);
- int (*prefix_cmd) (int dev, unsigned char status);
- struct coproc_operations *coproc;
- void *devc;
-};
-
-struct sound_lowlev_timer
-{
- int dev;
- int priority;
- unsigned int (*tmr_start)(int dev, unsigned int usecs);
- void (*tmr_disable)(int dev);
- void (*tmr_restart)(int dev);
-};
-
-struct sound_timer_operations
-{
- struct module *owner;
- struct sound_timer_info info;
- int priority;
- int devlink;
- int (*open)(int dev, int mode);
- void (*close)(int dev);
- int (*event)(int dev, unsigned char *ev);
- unsigned long (*get_time)(int dev);
- int (*ioctl) (int dev, unsigned int cmd, void __user * arg);
- void (*arm_timer)(int dev, long time);
-};
-
-extern struct sound_timer_operations default_sound_timer;
-
-extern struct audio_operations *audio_devs[MAX_AUDIO_DEV];
-extern int num_audiodevs;
-extern struct mixer_operations *mixer_devs[MAX_MIXER_DEV];
-extern int num_mixers;
-extern struct synth_operations *synth_devs[MAX_SYNTH_DEV+MAX_MIDI_DEV];
-extern int num_synths;
-extern struct midi_operations *midi_devs[MAX_MIDI_DEV];
-extern int num_midis;
-extern struct sound_timer_operations * sound_timer_devs[MAX_TIMER_DEV];
-extern int num_sound_timers;
-
-extern int sound_map_buffer (int dev, struct dma_buffparms *dmap, buffmem_desc *info);
-void sound_timer_init (struct sound_lowlev_timer *t, char *name);
-void sound_dma_intr (int dev, struct dma_buffparms *dmap, int chan);
-
-#define AUDIO_DRIVER_VERSION 2
-#define MIXER_DRIVER_VERSION 2
-int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
- int driver_size, int flags, unsigned int format_mask,
- void *devc, int dma1, int dma2);
-int sound_install_mixer(int vers, char *name, struct mixer_operations *driver,
- int driver_size, void *devc);
-
-void sound_unload_audiodev(int dev);
-void sound_unload_mixerdev(int dev);
-void sound_unload_mididev(int dev);
-void sound_unload_synthdev(int dev);
-void sound_unload_timerdev(int dev);
-int sound_alloc_mixerdev(void);
-int sound_alloc_timerdev(void);
-int sound_alloc_synthdev(void);
-int sound_alloc_mididev(void);
-#endif /* _DEV_TABLE_H_ */
-
diff --git a/sound/oss/dmabuf.c b/sound/oss/dmabuf.c
deleted file mode 100644
index c5dd396c66a2..000000000000
--- a/sound/oss/dmabuf.c
+++ /dev/null
@@ -1,1268 +0,0 @@
-/*
- * sound/oss/dmabuf.c
- *
- * The DMA buffer manager for digitized voice applications
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- * Thomas Sailer : moved several static variables into struct audio_operations
- * (which is grossly misnamed btw.) because they have the same
- * lifetime as the rest in there and dynamic allocation saves
- * 12k or so
- * Thomas Sailer : remove {in,out}_sleep_flag. It was used for the sleeper to
- * determine if it was woken up by the expiring timeout or by
- * an explicit wake_up. The return value from schedule_timeout
- * can be used instead; if 0, the wakeup was due to the timeout.
- *
- * Rob Riggs Added persistent DMA buffers (1998/10/17)
- */
-
-#define BE_CONSERVATIVE
-#define SAMPLE_ROUNDUP 0
-
-#include <linux/mm.h>
-#include <linux/gfp.h>
-#include <linux/sched/signal.h>
-
-#include "sound_config.h"
-#include "sleep.h"
-
-#define DMAP_FREE_ON_CLOSE 0
-#define DMAP_KEEP_ON_CLOSE 1
-extern int sound_dmap_flag;
-
-static void dma_reset_output(int dev);
-static void dma_reset_input(int dev);
-static int local_start_dma(struct audio_operations *adev, unsigned long physaddr, int count, int dma_mode);
-
-
-
-static int debugmem; /* switched off by default */
-static int dma_buffsize = DSP_BUFFSIZE;
-
-static long dmabuf_timeout(struct dma_buffparms *dmap)
-{
- long tmout;
-
- tmout = (dmap->fragment_size * HZ) / dmap->data_rate;
- tmout += HZ / 5; /* Some safety distance */
- if (tmout < (HZ / 2))
- tmout = HZ / 2;
- if (tmout > 20 * HZ)
- tmout = 20 * HZ;
- return tmout;
-}
-
-static int sound_alloc_dmap(struct dma_buffparms *dmap)
-{
- char *start_addr, *end_addr;
- int dma_pagesize;
- int sz, size;
- struct page *page;
-
- dmap->mapping_flags &= ~DMA_MAP_MAPPED;
-
- if (dmap->raw_buf != NULL)
- return 0; /* Already done */
- if (dma_buffsize < 4096)
- dma_buffsize = 4096;
- dma_pagesize = (dmap->dma < 4) ? (64 * 1024) : (128 * 1024);
-
- /*
- * Now check for the Cyrix problem.
- */
-
- if(isa_dma_bridge_buggy==2)
- dma_pagesize=32768;
-
- dmap->raw_buf = NULL;
- dmap->buffsize = dma_buffsize;
- if (dmap->buffsize > dma_pagesize)
- dmap->buffsize = dma_pagesize;
- start_addr = NULL;
- /*
- * Now loop until we get a free buffer. Try to get smaller buffer if
- * it fails. Don't accept smaller than 8k buffer for performance
- * reasons.
- */
- while (start_addr == NULL && dmap->buffsize > PAGE_SIZE) {
- for (sz = 0, size = PAGE_SIZE; size < dmap->buffsize; sz++, size <<= 1);
- dmap->buffsize = PAGE_SIZE * (1 << sz);
- start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA|__GFP_NOWARN, sz);
- if (start_addr == NULL)
- dmap->buffsize /= 2;
- }
-
- if (start_addr == NULL) {
- printk(KERN_WARNING "Sound error: Couldn't allocate DMA buffer\n");
- return -ENOMEM;
- } else {
- /* make some checks */
- end_addr = start_addr + dmap->buffsize - 1;
-
- if (debugmem)
- printk(KERN_DEBUG "sound: start 0x%lx, end 0x%lx\n", (long) start_addr, (long) end_addr);
-
- /* now check if it fits into the same dma-pagesize */
-
- if (((long) start_addr & ~(dma_pagesize - 1)) != ((long) end_addr & ~(dma_pagesize - 1))
- || end_addr >= (char *) (MAX_DMA_ADDRESS)) {
- printk(KERN_ERR "sound: Got invalid address 0x%lx for %db DMA-buffer\n", (long) start_addr, dmap->buffsize);
- return -EFAULT;
- }
- }
- dmap->raw_buf = start_addr;
- dmap->raw_buf_phys = dma_map_single(NULL, start_addr, dmap->buffsize, DMA_BIDIRECTIONAL);
-
- for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
- SetPageReserved(page);
- return 0;
-}
-
-static void sound_free_dmap(struct dma_buffparms *dmap)
-{
- int sz, size;
- struct page *page;
- unsigned long start_addr, end_addr;
-
- if (dmap->raw_buf == NULL)
- return;
- if (dmap->mapping_flags & DMA_MAP_MAPPED)
- return; /* Don't free mmapped buffer. Will use it next time */
- for (sz = 0, size = PAGE_SIZE; size < dmap->buffsize; sz++, size <<= 1);
-
- start_addr = (unsigned long) dmap->raw_buf;
- end_addr = start_addr + dmap->buffsize;
-
- for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
- ClearPageReserved(page);
-
- dma_unmap_single(NULL, dmap->raw_buf_phys, dmap->buffsize, DMA_BIDIRECTIONAL);
- free_pages((unsigned long) dmap->raw_buf, sz);
- dmap->raw_buf = NULL;
-}
-
-
-/* Intel version !!!!!!!!! */
-
-static int sound_start_dma(struct dma_buffparms *dmap, unsigned long physaddr, int count, int dma_mode)
-{
- unsigned long flags;
- int chan = dmap->dma;
-
- /* printk( "Start DMA%d %d, %d\n", chan, (int)(physaddr-dmap->raw_buf_phys), count); */
-
- flags = claim_dma_lock();
- disable_dma(chan);
- clear_dma_ff(chan);
- set_dma_mode(chan, dma_mode);
- set_dma_addr(chan, physaddr);
- set_dma_count(chan, count);
- enable_dma(chan);
- release_dma_lock(flags);
-
- return 0;
-}
-
-static void dma_init_buffers(struct dma_buffparms *dmap)
-{
- dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0;
- dmap->byte_counter = 0;
- dmap->max_byte_counter = 8000 * 60 * 60;
- dmap->bytes_in_use = dmap->buffsize;
-
- dmap->dma_mode = DMODE_NONE;
- dmap->mapping_flags = 0;
- dmap->neutral_byte = 0x80;
- dmap->data_rate = 8000;
- dmap->cfrag = -1;
- dmap->closing = 0;
- dmap->nbufs = 1;
- dmap->flags = DMA_BUSY; /* Other flags off */
-}
-
-static int open_dmap(struct audio_operations *adev, int mode, struct dma_buffparms *dmap)
-{
- int err;
-
- if (dmap->flags & DMA_BUSY)
- return -EBUSY;
- if ((err = sound_alloc_dmap(dmap)) < 0)
- return err;
-
- if (dmap->raw_buf == NULL) {
- printk(KERN_WARNING "Sound: DMA buffers not available\n");
- return -ENOSPC; /* Memory allocation failed during boot */
- }
- if (dmap->dma >= 0 && sound_open_dma(dmap->dma, adev->name)) {
- printk(KERN_WARNING "Unable to grab(2) DMA%d for the audio driver\n", dmap->dma);
- return -EBUSY;
- }
- dma_init_buffers(dmap);
- spin_lock_init(&dmap->lock);
- dmap->open_mode = mode;
- dmap->subdivision = dmap->underrun_count = 0;
- dmap->fragment_size = 0;
- dmap->max_fragments = 65536; /* Just a large value */
- dmap->byte_counter = 0;
- dmap->max_byte_counter = 8000 * 60 * 60;
- dmap->applic_profile = APF_NORMAL;
- dmap->needs_reorg = 1;
- dmap->audio_callback = NULL;
- dmap->callback_parm = 0;
- return 0;
-}
-
-static void close_dmap(struct audio_operations *adev, struct dma_buffparms *dmap)
-{
- unsigned long flags;
-
- if (dmap->dma >= 0) {
- sound_close_dma(dmap->dma);
- flags=claim_dma_lock();
- disable_dma(dmap->dma);
- release_dma_lock(flags);
- }
- if (dmap->flags & DMA_BUSY)
- dmap->dma_mode = DMODE_NONE;
- dmap->flags &= ~DMA_BUSY;
-
- if (sound_dmap_flag == DMAP_FREE_ON_CLOSE)
- sound_free_dmap(dmap);
-}
-
-
-static unsigned int default_set_bits(int dev, unsigned int bits)
-{
- mm_segment_t fs = get_fs();
-
- set_fs(get_ds());
- audio_devs[dev]->d->ioctl(dev, SNDCTL_DSP_SETFMT, (void __user *)&bits);
- set_fs(fs);
- return bits;
-}
-
-static int default_set_speed(int dev, int speed)
-{
- mm_segment_t fs = get_fs();
-
- set_fs(get_ds());
- audio_devs[dev]->d->ioctl(dev, SNDCTL_DSP_SPEED, (void __user *)&speed);
- set_fs(fs);
- return speed;
-}
-
-static short default_set_channels(int dev, short channels)
-{
- int c = channels;
- mm_segment_t fs = get_fs();
-
- set_fs(get_ds());
- audio_devs[dev]->d->ioctl(dev, SNDCTL_DSP_CHANNELS, (void __user *)&c);
- set_fs(fs);
- return c;
-}
-
-static void check_driver(struct audio_driver *d)
-{
- if (d->set_speed == NULL)
- d->set_speed = default_set_speed;
- if (d->set_bits == NULL)
- d->set_bits = default_set_bits;
- if (d->set_channels == NULL)
- d->set_channels = default_set_channels;
-}
-
-int DMAbuf_open(int dev, int mode)
-{
- struct audio_operations *adev = audio_devs[dev];
- int retval;
- struct dma_buffparms *dmap_in = NULL;
- struct dma_buffparms *dmap_out = NULL;
-
- if (!adev)
- return -ENXIO;
- if (!(adev->flags & DMA_DUPLEX))
- adev->dmap_in = adev->dmap_out;
- check_driver(adev->d);
-
- if ((retval = adev->d->open(dev, mode)) < 0)
- return retval;
- dmap_out = adev->dmap_out;
- dmap_in = adev->dmap_in;
- if (dmap_in == dmap_out)
- adev->flags &= ~DMA_DUPLEX;
-
- if (mode & OPEN_WRITE) {
- if ((retval = open_dmap(adev, mode, dmap_out)) < 0) {
- adev->d->close(dev);
- return retval;
- }
- }
- adev->enable_bits = mode;
-
- if (mode == OPEN_READ || (mode != OPEN_WRITE && (adev->flags & DMA_DUPLEX))) {
- if ((retval = open_dmap(adev, mode, dmap_in)) < 0) {
- adev->d->close(dev);
- if (mode & OPEN_WRITE)
- close_dmap(adev, dmap_out);
- return retval;
- }
- }
- adev->open_mode = mode;
- adev->go = 1;
-
- adev->d->set_bits(dev, 8);
- adev->d->set_channels(dev, 1);
- adev->d->set_speed(dev, DSP_DEFAULT_SPEED);
- if (adev->dmap_out->dma_mode == DMODE_OUTPUT)
- memset(adev->dmap_out->raw_buf, adev->dmap_out->neutral_byte,
- adev->dmap_out->bytes_in_use);
- return 0;
-}
-/* MUST not hold the spinlock */
-void DMAbuf_reset(int dev)
-{
- if (audio_devs[dev]->open_mode & OPEN_WRITE)
- dma_reset_output(dev);
-
- if (audio_devs[dev]->open_mode & OPEN_READ)
- dma_reset_input(dev);
-}
-
-static void dma_reset_output(int dev)
-{
- struct audio_operations *adev = audio_devs[dev];
- unsigned long flags,f ;
- struct dma_buffparms *dmap = adev->dmap_out;
-
- if (!(dmap->flags & DMA_STARTED)) /* DMA is not active */
- return;
-
- /*
- * First wait until the current fragment has been played completely
- */
- spin_lock_irqsave(&dmap->lock,flags);
- adev->dmap_out->flags |= DMA_SYNCING;
-
- adev->dmap_out->underrun_count = 0;
- if (!signal_pending(current) && adev->dmap_out->qlen &&
- adev->dmap_out->underrun_count == 0){
- spin_unlock_irqrestore(&dmap->lock,flags);
- oss_broken_sleep_on(&adev->out_sleeper, dmabuf_timeout(dmap));
- spin_lock_irqsave(&dmap->lock,flags);
- }
- adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE);
-
- /*
- * Finally shut the device off
- */
- if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_output)
- adev->d->halt_io(dev);
- else
- adev->d->halt_output(dev);
- adev->dmap_out->flags &= ~DMA_STARTED;
-
- f=claim_dma_lock();
- clear_dma_ff(dmap->dma);
- disable_dma(dmap->dma);
- release_dma_lock(f);
-
- dmap->byte_counter = 0;
- reorganize_buffers(dev, adev->dmap_out, 0);
- dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0;
- spin_unlock_irqrestore(&dmap->lock,flags);
-}
-
-static void dma_reset_input(int dev)
-{
- struct audio_operations *adev = audio_devs[dev];
- unsigned long flags;
- struct dma_buffparms *dmap = adev->dmap_in;
-
- spin_lock_irqsave(&dmap->lock,flags);
- if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_input)
- adev->d->halt_io(dev);
- else
- adev->d->halt_input(dev);
- adev->dmap_in->flags &= ~DMA_STARTED;
-
- dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0;
- dmap->byte_counter = 0;
- reorganize_buffers(dev, adev->dmap_in, 1);
- spin_unlock_irqrestore(&dmap->lock,flags);
-}
-/* MUST be called with holding the dmap->lock */
-void DMAbuf_launch_output(int dev, struct dma_buffparms *dmap)
-{
- struct audio_operations *adev = audio_devs[dev];
-
- if (!((adev->enable_bits * adev->go) & PCM_ENABLE_OUTPUT))
- return; /* Don't start DMA yet */
- dmap->dma_mode = DMODE_OUTPUT;
-
- if (!(dmap->flags & DMA_ACTIVE) || !(adev->flags & DMA_AUTOMODE) || (dmap->flags & DMA_NODMA)) {
- if (!(dmap->flags & DMA_STARTED)) {
- reorganize_buffers(dev, dmap, 0);
- if (adev->d->prepare_for_output(dev, dmap->fragment_size, dmap->nbufs))
- return;
- if (!(dmap->flags & DMA_NODMA))
- local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use,DMA_MODE_WRITE);
- dmap->flags |= DMA_STARTED;
- }
- if (dmap->counts[dmap->qhead] == 0)
- dmap->counts[dmap->qhead] = dmap->fragment_size;
- dmap->dma_mode = DMODE_OUTPUT;
- adev->d->output_block(dev, dmap->raw_buf_phys + dmap->qhead * dmap->fragment_size,
- dmap->counts[dmap->qhead], 1);
- if (adev->d->trigger)
- adev->d->trigger(dev,adev->enable_bits * adev->go);
- }
- dmap->flags |= DMA_ACTIVE;
-}
-
-int DMAbuf_sync(int dev)
-{
- struct audio_operations *adev = audio_devs[dev];
- unsigned long flags;
- int n = 0;
- struct dma_buffparms *dmap;
-
- if (!adev->go && !(adev->enable_bits & PCM_ENABLE_OUTPUT))
- return 0;
-
- if (adev->dmap_out->dma_mode == DMODE_OUTPUT) {
- dmap = adev->dmap_out;
- spin_lock_irqsave(&dmap->lock,flags);
- if (dmap->qlen > 0 && !(dmap->flags & DMA_ACTIVE))
- DMAbuf_launch_output(dev, dmap);
- adev->dmap_out->flags |= DMA_SYNCING;
- adev->dmap_out->underrun_count = 0;
- while (!signal_pending(current) && n++ < adev->dmap_out->nbufs &&
- adev->dmap_out->qlen && adev->dmap_out->underrun_count == 0) {
- long t = dmabuf_timeout(dmap);
- spin_unlock_irqrestore(&dmap->lock,flags);
- /* FIXME: not safe may miss events */
- t = oss_broken_sleep_on(&adev->out_sleeper, t);
- spin_lock_irqsave(&dmap->lock,flags);
- if (!t) {
- adev->dmap_out->flags &= ~DMA_SYNCING;
- spin_unlock_irqrestore(&dmap->lock,flags);
- return adev->dmap_out->qlen;
- }
- }
- adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE);
-
- /*
- * Some devices such as GUS have huge amount of on board RAM for the
- * audio data. We have to wait until the device has finished playing.
- */
-
- /* still holding the lock */
- if (adev->d->local_qlen) { /* Device has hidden buffers */
- while (!signal_pending(current) &&
- adev->d->local_qlen(dev)){
- spin_unlock_irqrestore(&dmap->lock,flags);
- oss_broken_sleep_on(&adev->out_sleeper,
- dmabuf_timeout(dmap));
- spin_lock_irqsave(&dmap->lock,flags);
- }
- }
- spin_unlock_irqrestore(&dmap->lock,flags);
- }
- adev->dmap_out->dma_mode = DMODE_NONE;
- return adev->dmap_out->qlen;
-}
-
-int DMAbuf_release(int dev, int mode)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap;
- unsigned long flags;
-
- dmap = adev->dmap_out;
- if (adev->open_mode & OPEN_WRITE)
- adev->dmap_out->closing = 1;
-
- if (adev->open_mode & OPEN_READ){
- adev->dmap_in->closing = 1;
- dmap = adev->dmap_in;
- }
- if (adev->open_mode & OPEN_WRITE)
- if (!(adev->dmap_out->mapping_flags & DMA_MAP_MAPPED))
- if (!signal_pending(current) && (adev->dmap_out->dma_mode == DMODE_OUTPUT))
- DMAbuf_sync(dev);
- if (adev->dmap_out->dma_mode == DMODE_OUTPUT)
- memset(adev->dmap_out->raw_buf, adev->dmap_out->neutral_byte, adev->dmap_out->bytes_in_use);
-
- DMAbuf_reset(dev);
- spin_lock_irqsave(&dmap->lock,flags);
- adev->d->close(dev);
-
- if (adev->open_mode & OPEN_WRITE)
- close_dmap(adev, adev->dmap_out);
-
- if (adev->open_mode == OPEN_READ ||
- (adev->open_mode != OPEN_WRITE &&
- (adev->flags & DMA_DUPLEX)))
- close_dmap(adev, adev->dmap_in);
- adev->open_mode = 0;
- spin_unlock_irqrestore(&dmap->lock,flags);
- return 0;
-}
-/* called with dmap->lock dold */
-int DMAbuf_activate_recording(int dev, struct dma_buffparms *dmap)
-{
- struct audio_operations *adev = audio_devs[dev];
- int err;
-
- if (!(adev->open_mode & OPEN_READ))
- return 0;
- if (!(adev->enable_bits & PCM_ENABLE_INPUT))
- return 0;
- if (dmap->dma_mode == DMODE_OUTPUT) { /* Direction change */
- /* release lock - it's not recursive */
- spin_unlock_irq(&dmap->lock);
- DMAbuf_sync(dev);
- DMAbuf_reset(dev);
- spin_lock_irq(&dmap->lock);
- dmap->dma_mode = DMODE_NONE;
- }
- if (!dmap->dma_mode) {
- reorganize_buffers(dev, dmap, 1);
- if ((err = adev->d->prepare_for_input(dev,
- dmap->fragment_size, dmap->nbufs)) < 0)
- return err;
- dmap->dma_mode = DMODE_INPUT;
- }
- if (!(dmap->flags & DMA_ACTIVE)) {
- if (dmap->needs_reorg)
- reorganize_buffers(dev, dmap, 0);
- local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use, DMA_MODE_READ);
- adev->d->start_input(dev, dmap->raw_buf_phys + dmap->qtail * dmap->fragment_size,
- dmap->fragment_size, 0);
- dmap->flags |= DMA_ACTIVE;
- if (adev->d->trigger)
- adev->d->trigger(dev, adev->enable_bits * adev->go);
- }
- return 0;
-}
-/* acquires lock */
-int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock)
-{
- struct audio_operations *adev = audio_devs[dev];
- unsigned long flags;
- int err = 0, n = 0;
- struct dma_buffparms *dmap = adev->dmap_in;
-
- if (!(adev->open_mode & OPEN_READ))
- return -EIO;
- spin_lock_irqsave(&dmap->lock,flags);
- if (dmap->needs_reorg)
- reorganize_buffers(dev, dmap, 0);
- if (adev->dmap_in->mapping_flags & DMA_MAP_MAPPED) {
-/* printk(KERN_WARNING "Sound: Can't read from mmapped device (1)\n");*/
- spin_unlock_irqrestore(&dmap->lock,flags);
- return -EINVAL;
- } else while (dmap->qlen <= 0 && n++ < 10) {
- long timeout = MAX_SCHEDULE_TIMEOUT;
- if (!(adev->enable_bits & PCM_ENABLE_INPUT) || !adev->go) {
- spin_unlock_irqrestore(&dmap->lock,flags);
- return -EAGAIN;
- }
- if ((err = DMAbuf_activate_recording(dev, dmap)) < 0) {
- spin_unlock_irqrestore(&dmap->lock,flags);
- return err;
- }
- /* Wait for the next block */
-
- if (dontblock) {
- spin_unlock_irqrestore(&dmap->lock,flags);
- return -EAGAIN;
- }
- if (adev->go)
- timeout = dmabuf_timeout(dmap);
-
- spin_unlock_irqrestore(&dmap->lock,flags);
- timeout = oss_broken_sleep_on(&adev->in_sleeper, timeout);
- if (!timeout) {
- /* FIXME: include device name */
- err = -EIO;
- printk(KERN_WARNING "Sound: DMA (input) timed out - IRQ/DRQ config error?\n");
- dma_reset_input(dev);
- } else
- err = -EINTR;
- spin_lock_irqsave(&dmap->lock,flags);
- }
- spin_unlock_irqrestore(&dmap->lock,flags);
-
- if (dmap->qlen <= 0)
- return err ? err : -EINTR;
- *buf = &dmap->raw_buf[dmap->qhead * dmap->fragment_size + dmap->counts[dmap->qhead]];
- *len = dmap->fragment_size - dmap->counts[dmap->qhead];
-
- return dmap->qhead;
-}
-
-int DMAbuf_rmchars(int dev, int buff_no, int c)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_in;
- int p = dmap->counts[dmap->qhead] + c;
-
- if (dmap->mapping_flags & DMA_MAP_MAPPED)
- {
-/* printk("Sound: Can't read from mmapped device (2)\n");*/
- return -EINVAL;
- }
- else if (dmap->qlen <= 0)
- return -EIO;
- else if (p >= dmap->fragment_size) { /* This buffer is completely empty */
- dmap->counts[dmap->qhead] = 0;
- dmap->qlen--;
- dmap->qhead = (dmap->qhead + 1) % dmap->nbufs;
- }
- else dmap->counts[dmap->qhead] = p;
-
- return 0;
-}
-/* MUST be called with dmap->lock hold */
-int DMAbuf_get_buffer_pointer(int dev, struct dma_buffparms *dmap, int direction)
-{
- /*
- * Try to approximate the active byte position of the DMA pointer within the
- * buffer area as well as possible.
- */
-
- int pos;
- unsigned long f;
-
- if (!(dmap->flags & DMA_ACTIVE))
- pos = 0;
- else {
- int chan = dmap->dma;
-
- f=claim_dma_lock();
- clear_dma_ff(chan);
-
- if(!isa_dma_bridge_buggy)
- disable_dma(dmap->dma);
-
- pos = get_dma_residue(chan);
-
- pos = dmap->bytes_in_use - pos;
-
- if (!(dmap->mapping_flags & DMA_MAP_MAPPED)) {
- if (direction == DMODE_OUTPUT) {
- if (dmap->qhead == 0)
- if (pos > dmap->fragment_size)
- pos = 0;
- } else {
- if (dmap->qtail == 0)
- if (pos > dmap->fragment_size)
- pos = 0;
- }
- }
- if (pos < 0)
- pos = 0;
- if (pos >= dmap->bytes_in_use)
- pos = 0;
-
- if(!isa_dma_bridge_buggy)
- enable_dma(dmap->dma);
-
- release_dma_lock(f);
- }
- /* printk( "%04x ", pos); */
-
- return pos;
-}
-
-/*
- * DMAbuf_start_devices() is called by the /dev/music driver to start
- * one or more audio devices at desired moment.
- */
-
-void DMAbuf_start_devices(unsigned int devmask)
-{
- struct audio_operations *adev;
- int dev;
-
- for (dev = 0; dev < num_audiodevs; dev++) {
- if (!(devmask & (1 << dev)))
- continue;
- if (!(adev = audio_devs[dev]))
- continue;
- if (adev->open_mode == 0)
- continue;
- if (adev->go)
- continue;
- /* OK to start the device */
- adev->go = 1;
- if (adev->d->trigger)
- adev->d->trigger(dev,adev->enable_bits * adev->go);
- }
-}
-/* via poll called without a lock ?*/
-int DMAbuf_space_in_queue(int dev)
-{
- struct audio_operations *adev = audio_devs[dev];
- int len, max, tmp;
- struct dma_buffparms *dmap = adev->dmap_out;
- int lim = dmap->nbufs;
-
- if (lim < 2)
- lim = 2;
-
- if (dmap->qlen >= lim) /* No space at all */
- return 0;
-
- /*
- * Verify that there are no more pending buffers than the limit
- * defined by the process.
- */
-
- max = dmap->max_fragments;
- if (max > lim)
- max = lim;
- len = dmap->qlen;
-
- if (adev->d->local_qlen) {
- tmp = adev->d->local_qlen(dev);
- if (tmp && len)
- tmp--; /* This buffer has been counted twice */
- len += tmp;
- }
- if (dmap->byte_counter % dmap->fragment_size) /* There is a partial fragment */
- len = len + 1;
-
- if (len >= max)
- return 0;
- return max - len;
-}
-/* MUST not hold the spinlock - this function may sleep */
-static int output_sleep(int dev, int dontblock)
-{
- struct audio_operations *adev = audio_devs[dev];
- int err = 0;
- struct dma_buffparms *dmap = adev->dmap_out;
- long timeout;
- long timeout_value;
-
- if (dontblock)
- return -EAGAIN;
- if (!(adev->enable_bits & PCM_ENABLE_OUTPUT))
- return -EAGAIN;
-
- /*
- * Wait for free space
- */
- if (signal_pending(current))
- return -EINTR;
- timeout = (adev->go && !(dmap->flags & DMA_NOTIMEOUT));
- if (timeout)
- timeout_value = dmabuf_timeout(dmap);
- else
- timeout_value = MAX_SCHEDULE_TIMEOUT;
- timeout_value = oss_broken_sleep_on(&adev->out_sleeper, timeout_value);
- if (timeout != MAX_SCHEDULE_TIMEOUT && !timeout_value) {
- printk(KERN_WARNING "Sound: DMA (output) timed out - IRQ/DRQ config error?\n");
- dma_reset_output(dev);
- } else {
- if (signal_pending(current))
- err = -EINTR;
- }
- return err;
-}
-/* called with the lock held */
-static int find_output_space(int dev, char **buf, int *size)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_out;
- unsigned long active_offs;
- long len, offs;
- int maxfrags;
- int occupied_bytes = (dmap->user_counter % dmap->fragment_size);
-
- *buf = dmap->raw_buf;
- if (!(maxfrags = DMAbuf_space_in_queue(dev)) && !occupied_bytes)
- return 0;
-
-#ifdef BE_CONSERVATIVE
- active_offs = dmap->byte_counter + dmap->qhead * dmap->fragment_size;
-#else
- active_offs = max(DMAbuf_get_buffer_pointer(dev, dmap, DMODE_OUTPUT), 0);
- /* Check for pointer wrapping situation */
- if (active_offs >= dmap->bytes_in_use)
- active_offs = 0;
- active_offs += dmap->byte_counter;
-#endif
-
- offs = (dmap->user_counter % dmap->bytes_in_use) & ~SAMPLE_ROUNDUP;
- if (offs < 0 || offs >= dmap->bytes_in_use) {
- printk(KERN_ERR "Sound: Got unexpected offs %ld. Giving up.\n", offs);
- printk("Counter = %ld, bytes=%d\n", dmap->user_counter, dmap->bytes_in_use);
- return 0;
- }
- *buf = dmap->raw_buf + offs;
-
- len = active_offs + dmap->bytes_in_use - dmap->user_counter; /* Number of unused bytes in buffer */
-
- if ((offs + len) > dmap->bytes_in_use)
- len = dmap->bytes_in_use - offs;
- if (len < 0) {
- return 0;
- }
- if (len > ((maxfrags * dmap->fragment_size) - occupied_bytes))
- len = (maxfrags * dmap->fragment_size) - occupied_bytes;
- *size = len & ~SAMPLE_ROUNDUP;
- return (*size > 0);
-}
-/* acquires lock */
-int DMAbuf_getwrbuffer(int dev, char **buf, int *size, int dontblock)
-{
- struct audio_operations *adev = audio_devs[dev];
- unsigned long flags;
- int err = -EIO;
- struct dma_buffparms *dmap = adev->dmap_out;
-
- if (dmap->mapping_flags & DMA_MAP_MAPPED) {
-/* printk(KERN_DEBUG "Sound: Can't write to mmapped device (3)\n");*/
- return -EINVAL;
- }
- spin_lock_irqsave(&dmap->lock,flags);
- if (dmap->needs_reorg)
- reorganize_buffers(dev, dmap, 0);
-
- if (dmap->dma_mode == DMODE_INPUT) { /* Direction change */
- spin_unlock_irqrestore(&dmap->lock,flags);
- DMAbuf_reset(dev);
- spin_lock_irqsave(&dmap->lock,flags);
- }
- dmap->dma_mode = DMODE_OUTPUT;
-
- while (find_output_space(dev, buf, size) <= 0) {
- spin_unlock_irqrestore(&dmap->lock,flags);
- if ((err = output_sleep(dev, dontblock)) < 0) {
- return err;
- }
- spin_lock_irqsave(&dmap->lock,flags);
- }
-
- spin_unlock_irqrestore(&dmap->lock,flags);
- return 0;
-}
-/* has to acquire dmap->lock */
-int DMAbuf_move_wrpointer(int dev, int l)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_out;
- unsigned long ptr;
- unsigned long end_ptr, p;
- int post;
- unsigned long flags;
-
- spin_lock_irqsave(&dmap->lock,flags);
- post= (dmap->flags & DMA_POST);
- ptr = (dmap->user_counter / dmap->fragment_size) * dmap->fragment_size;
-
- dmap->flags &= ~DMA_POST;
- dmap->cfrag = -1;
- dmap->user_counter += l;
- dmap->flags |= DMA_DIRTY;
-
- if (dmap->byte_counter >= dmap->max_byte_counter) {
- /* Wrap the byte counters */
- long decr = dmap->byte_counter;
- dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use);
- decr -= dmap->byte_counter;
- dmap->user_counter -= decr;
- }
- end_ptr = (dmap->user_counter / dmap->fragment_size) * dmap->fragment_size;
-
- p = (dmap->user_counter - 1) % dmap->bytes_in_use;
- dmap->neutral_byte = dmap->raw_buf[p];
-
- /* Update the fragment based bookkeeping too */
- while (ptr < end_ptr) {
- dmap->counts[dmap->qtail] = dmap->fragment_size;
- dmap->qtail = (dmap->qtail + 1) % dmap->nbufs;
- dmap->qlen++;
- ptr += dmap->fragment_size;
- }
-
- dmap->counts[dmap->qtail] = dmap->user_counter - ptr;
-
- /*
- * Let the low level driver perform some postprocessing to
- * the written data.
- */
- if (adev->d->postprocess_write)
- adev->d->postprocess_write(dev);
-
- if (!(dmap->flags & DMA_ACTIVE))
- if (dmap->qlen > 1 || (dmap->qlen > 0 && (post || dmap->qlen >= dmap->nbufs - 1)))
- DMAbuf_launch_output(dev, dmap);
-
- spin_unlock_irqrestore(&dmap->lock,flags);
- return 0;
-}
-
-int DMAbuf_start_dma(int dev, unsigned long physaddr, int count, int dma_mode)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = (dma_mode == DMA_MODE_WRITE) ? adev->dmap_out : adev->dmap_in;
-
- if (dmap->raw_buf == NULL) {
- printk(KERN_ERR "sound: DMA buffer(1) == NULL\n");
- printk("Device %d, chn=%s\n", dev, (dmap == adev->dmap_out) ? "out" : "in");
- return 0;
- }
- if (dmap->dma < 0)
- return 0;
- sound_start_dma(dmap, physaddr, count, dma_mode);
- return count;
-}
-EXPORT_SYMBOL(DMAbuf_start_dma);
-
-static int local_start_dma(struct audio_operations *adev, unsigned long physaddr, int count, int dma_mode)
-{
- struct dma_buffparms *dmap = (dma_mode == DMA_MODE_WRITE) ? adev->dmap_out : adev->dmap_in;
-
- if (dmap->raw_buf == NULL) {
- printk(KERN_ERR "sound: DMA buffer(2) == NULL\n");
- printk(KERN_ERR "Device %s, chn=%s\n", adev->name, (dmap == adev->dmap_out) ? "out" : "in");
- return 0;
- }
- if (dmap->flags & DMA_NODMA)
- return 1;
- if (dmap->dma < 0)
- return 0;
- sound_start_dma(dmap, dmap->raw_buf_phys, dmap->bytes_in_use, dma_mode | DMA_AUTOINIT);
- dmap->flags |= DMA_STARTED;
- return count;
-}
-
-static void finish_output_interrupt(int dev, struct dma_buffparms *dmap)
-{
- struct audio_operations *adev = audio_devs[dev];
-
- if (dmap->audio_callback != NULL)
- dmap->audio_callback(dev, dmap->callback_parm);
- wake_up(&adev->out_sleeper);
- wake_up(&adev->poll_sleeper);
-}
-/* called with dmap->lock held in irq context*/
-static void do_outputintr(int dev, int dummy)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_out;
- int this_fragment;
-
- if (dmap->raw_buf == NULL) {
- printk(KERN_ERR "Sound: Error. Audio interrupt (%d) after freeing buffers.\n", dev);
- return;
- }
- if (dmap->mapping_flags & DMA_MAP_MAPPED) { /* Virtual memory mapped access */
- /* mmapped access */
- dmap->qhead = (dmap->qhead + 1) % dmap->nbufs;
- if (dmap->qhead == 0) { /* Wrapped */
- dmap->byte_counter += dmap->bytes_in_use;
- if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */
- long decr = dmap->byte_counter;
- dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use);
- decr -= dmap->byte_counter;
- dmap->user_counter -= decr;
- }
- }
- dmap->qlen++; /* Yes increment it (don't decrement) */
- if (!(adev->flags & DMA_AUTOMODE))
- dmap->flags &= ~DMA_ACTIVE;
- dmap->counts[dmap->qhead] = dmap->fragment_size;
- DMAbuf_launch_output(dev, dmap);
- finish_output_interrupt(dev, dmap);
- return;
- }
-
- dmap->qlen--;
- this_fragment = dmap->qhead;
- dmap->qhead = (dmap->qhead + 1) % dmap->nbufs;
-
- if (dmap->qhead == 0) { /* Wrapped */
- dmap->byte_counter += dmap->bytes_in_use;
- if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */
- long decr = dmap->byte_counter;
- dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use);
- decr -= dmap->byte_counter;
- dmap->user_counter -= decr;
- }
- }
- if (!(adev->flags & DMA_AUTOMODE))
- dmap->flags &= ~DMA_ACTIVE;
-
- /*
- * This is dmap->qlen <= 0 except when closing when
- * dmap->qlen < 0
- */
-
- while (dmap->qlen <= -dmap->closing) {
- dmap->underrun_count++;
- dmap->qlen++;
- if ((dmap->flags & DMA_DIRTY) && dmap->applic_profile != APF_CPUINTENS) {
- dmap->flags &= ~DMA_DIRTY;
- memset(adev->dmap_out->raw_buf, adev->dmap_out->neutral_byte,
- adev->dmap_out->buffsize);
- }
- dmap->user_counter += dmap->fragment_size;
- dmap->qtail = (dmap->qtail + 1) % dmap->nbufs;
- }
- if (dmap->qlen > 0)
- DMAbuf_launch_output(dev, dmap);
- finish_output_interrupt(dev, dmap);
-}
-/* called in irq context */
-void DMAbuf_outputintr(int dev, int notify_only)
-{
- struct audio_operations *adev = audio_devs[dev];
- unsigned long flags;
- struct dma_buffparms *dmap = adev->dmap_out;
-
- spin_lock_irqsave(&dmap->lock,flags);
- if (!(dmap->flags & DMA_NODMA)) {
- int chan = dmap->dma, pos, n;
- unsigned long f;
-
- f=claim_dma_lock();
-
- if(!isa_dma_bridge_buggy)
- disable_dma(dmap->dma);
- clear_dma_ff(chan);
- pos = dmap->bytes_in_use - get_dma_residue(chan);
- if(!isa_dma_bridge_buggy)
- enable_dma(dmap->dma);
- release_dma_lock(f);
-
- pos = pos / dmap->fragment_size; /* Actual qhead */
- if (pos < 0 || pos >= dmap->nbufs)
- pos = 0;
- n = 0;
- while (dmap->qhead != pos && n++ < dmap->nbufs)
- do_outputintr(dev, notify_only);
- }
- else
- do_outputintr(dev, notify_only);
- spin_unlock_irqrestore(&dmap->lock,flags);
-}
-EXPORT_SYMBOL(DMAbuf_outputintr);
-
-/* called with dmap->lock held in irq context */
-static void do_inputintr(int dev)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_in;
-
- if (dmap->raw_buf == NULL) {
- printk(KERN_ERR "Sound: Fatal error. Audio interrupt after freeing buffers.\n");
- return;
- }
- if (dmap->mapping_flags & DMA_MAP_MAPPED) {
- dmap->qtail = (dmap->qtail + 1) % dmap->nbufs;
- if (dmap->qtail == 0) { /* Wrapped */
- dmap->byte_counter += dmap->bytes_in_use;
- if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */
- long decr = dmap->byte_counter;
- dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use) + dmap->bytes_in_use;
- decr -= dmap->byte_counter;
- dmap->user_counter -= decr;
- }
- }
- dmap->qlen++;
-
- if (!(adev->flags & DMA_AUTOMODE)) {
- if (dmap->needs_reorg)
- reorganize_buffers(dev, dmap, 0);
- local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use,DMA_MODE_READ);
- adev->d->start_input(dev, dmap->raw_buf_phys + dmap->qtail * dmap->fragment_size,
- dmap->fragment_size, 1);
- if (adev->d->trigger)
- adev->d->trigger(dev, adev->enable_bits * adev->go);
- }
- dmap->flags |= DMA_ACTIVE;
- } else if (dmap->qlen >= (dmap->nbufs - 1)) {
- printk(KERN_WARNING "Sound: Recording overrun\n");
- dmap->underrun_count++;
-
- /* Just throw away the oldest fragment but keep the engine running */
- dmap->qhead = (dmap->qhead + 1) % dmap->nbufs;
- dmap->qtail = (dmap->qtail + 1) % dmap->nbufs;
- } else if (dmap->qlen >= 0 && dmap->qlen < dmap->nbufs) {
- dmap->qlen++;
- dmap->qtail = (dmap->qtail + 1) % dmap->nbufs;
- if (dmap->qtail == 0) { /* Wrapped */
- dmap->byte_counter += dmap->bytes_in_use;
- if (dmap->byte_counter >= dmap->max_byte_counter) { /* Overflow */
- long decr = dmap->byte_counter;
- dmap->byte_counter = (dmap->byte_counter % dmap->bytes_in_use) + dmap->bytes_in_use;
- decr -= dmap->byte_counter;
- dmap->user_counter -= decr;
- }
- }
- }
- if (!(adev->flags & DMA_AUTOMODE) || (dmap->flags & DMA_NODMA)) {
- local_start_dma(adev, dmap->raw_buf_phys, dmap->bytes_in_use, DMA_MODE_READ);
- adev->d->start_input(dev, dmap->raw_buf_phys + dmap->qtail * dmap->fragment_size, dmap->fragment_size, 1);
- if (adev->d->trigger)
- adev->d->trigger(dev,adev->enable_bits * adev->go);
- }
- dmap->flags |= DMA_ACTIVE;
- if (dmap->qlen > 0)
- {
- wake_up(&adev->in_sleeper);
- wake_up(&adev->poll_sleeper);
- }
-}
-/* called in irq context */
-void DMAbuf_inputintr(int dev)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_in;
- unsigned long flags;
-
- spin_lock_irqsave(&dmap->lock,flags);
-
- if (!(dmap->flags & DMA_NODMA)) {
- int chan = dmap->dma, pos, n;
- unsigned long f;
-
- f=claim_dma_lock();
- if(!isa_dma_bridge_buggy)
- disable_dma(dmap->dma);
- clear_dma_ff(chan);
- pos = dmap->bytes_in_use - get_dma_residue(chan);
- if(!isa_dma_bridge_buggy)
- enable_dma(dmap->dma);
- release_dma_lock(f);
-
- pos = pos / dmap->fragment_size; /* Actual qhead */
- if (pos < 0 || pos >= dmap->nbufs)
- pos = 0;
-
- n = 0;
- while (dmap->qtail != pos && ++n < dmap->nbufs)
- do_inputintr(dev);
- } else
- do_inputintr(dev);
- spin_unlock_irqrestore(&dmap->lock,flags);
-}
-EXPORT_SYMBOL(DMAbuf_inputintr);
-
-void DMAbuf_init(int dev, int dma1, int dma2)
-{
- struct audio_operations *adev = audio_devs[dev];
- /*
- * NOTE! This routine could be called several times.
- */
-
- if (adev && adev->dmap_out == NULL) {
- if (adev->d == NULL)
- panic("OSS: audio_devs[%d]->d == NULL\n", dev);
-
- if (adev->parent_dev) { /* Use DMA map of the parent dev */
- int parent = adev->parent_dev - 1;
- adev->dmap_out = audio_devs[parent]->dmap_out;
- adev->dmap_in = audio_devs[parent]->dmap_in;
- } else {
- adev->dmap_out = adev->dmap_in = &adev->dmaps[0];
- adev->dmap_out->dma = dma1;
- if (adev->flags & DMA_DUPLEX) {
- adev->dmap_in = &adev->dmaps[1];
- adev->dmap_in->dma = dma2;
- }
- }
- /* Persistent DMA buffers allocated here */
- if (sound_dmap_flag == DMAP_KEEP_ON_CLOSE) {
- if (adev->dmap_in->raw_buf == NULL)
- sound_alloc_dmap(adev->dmap_in);
- if (adev->dmap_out->raw_buf == NULL)
- sound_alloc_dmap(adev->dmap_out);
- }
- }
-}
-
-/* No kernel lock - DMAbuf_activate_recording protected by global cli/sti */
-static unsigned int poll_input(struct file * file, int dev, poll_table *wait)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_in;
-
- if (!(adev->open_mode & OPEN_READ))
- return 0;
- if (dmap->mapping_flags & DMA_MAP_MAPPED) {
- if (dmap->qlen)
- return POLLIN | POLLRDNORM;
- return 0;
- }
- if (dmap->dma_mode != DMODE_INPUT) {
- if (dmap->dma_mode == DMODE_NONE &&
- adev->enable_bits & PCM_ENABLE_INPUT &&
- !dmap->qlen && adev->go) {
- unsigned long flags;
-
- spin_lock_irqsave(&dmap->lock,flags);
- DMAbuf_activate_recording(dev, dmap);
- spin_unlock_irqrestore(&dmap->lock,flags);
- }
- return 0;
- }
- if (!dmap->qlen)
- return 0;
- return POLLIN | POLLRDNORM;
-}
-
-static unsigned int poll_output(struct file * file, int dev, poll_table *wait)
-{
- struct audio_operations *adev = audio_devs[dev];
- struct dma_buffparms *dmap = adev->dmap_out;
-
- if (!(adev->open_mode & OPEN_WRITE))
- return 0;
- if (dmap->mapping_flags & DMA_MAP_MAPPED) {
- if (dmap->qlen)
- return POLLOUT | POLLWRNORM;
- return 0;
- }
- if (dmap->dma_mode == DMODE_INPUT)
- return 0;
- if (dmap->dma_mode == DMODE_NONE)
- return POLLOUT | POLLWRNORM;
- if (!DMAbuf_space_in_queue(dev))
- return 0;
- return POLLOUT | POLLWRNORM;
-}
-
-unsigned int DMAbuf_poll(struct file * file, int dev, poll_table *wait)
-{
- struct audio_operations *adev = audio_devs[dev];
- poll_wait(file, &adev->poll_sleeper, wait);
- return poll_input(file, dev, wait) | poll_output(file, dev, wait);
-}
-
-void DMAbuf_deinit(int dev)
-{
- struct audio_operations *adev = audio_devs[dev];
- /* This routine is called when driver is being unloaded */
- if (!adev)
- return;
-
- /* Persistent DMA buffers deallocated here */
- if (sound_dmap_flag == DMAP_KEEP_ON_CLOSE) {
- sound_free_dmap(adev->dmap_out);
- if (adev->flags & DMA_DUPLEX)
- sound_free_dmap(adev->dmap_in);
- }
-}
diff --git a/sound/oss/hex2hex.c b/sound/oss/hex2hex.c
deleted file mode 100644
index f76d729b0196..000000000000
--- a/sound/oss/hex2hex.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * hex2hex reads stdin in Intel HEX format and produces an
- * (unsigned char) array which contains the bytes and writes it
- * to stdout using C syntax
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-#define ABANDON(why) { fprintf(stderr, "%s\n", why); exit(1); }
-#define MAX_SIZE (256*1024)
-unsigned char buf[MAX_SIZE];
-
-static int loadhex(FILE *inf, unsigned char *buf)
-{
- int l=0, c, i;
-
- while ((c=getc(inf))!=EOF)
- {
- if (c == ':') /* Sync with beginning of line */
- {
- int n, check;
- unsigned char sum;
- int addr;
- int linetype;
-
- if (fscanf(inf, "%02x", &n) != 1)
- ABANDON("File format error");
- sum = n;
-
- if (fscanf(inf, "%04x", &addr) != 1)
- ABANDON("File format error");
- sum += addr/256;
- sum += addr%256;
-
- if (fscanf(inf, "%02x", &linetype) != 1)
- ABANDON("File format error");
- sum += linetype;
-
- if (linetype != 0)
- continue;
-
- for (i=0;i<n;i++)
- {
- if (fscanf(inf, "%02x", &c) != 1)
- ABANDON("File format error");
- if (addr >= MAX_SIZE)
- ABANDON("File too large");
- buf[addr++] = c;
- if (addr > l)
- l = addr;
- sum += c;
- }
-
- if (fscanf(inf, "%02x", &check) != 1)
- ABANDON("File format error");
-
- sum = ~sum + 1;
- if (check != sum)
- ABANDON("Line checksum error");
- }
- }
-
- return l;
-}
-
-int main( int argc, const char * argv [] )
-{
- const char * varline;
- int i,l;
- int id=0;
-
- if(argv[1] && strcmp(argv[1], "-i")==0)
- {
- argv++;
- argc--;
- id=1;
- }
- if(argv[1]==NULL)
- {
- fprintf(stderr,"hex2hex: [-i] filename\n");
- exit(1);
- }
- varline = argv[1];
- l = loadhex(stdin, buf);
-
- printf("/*\n *\t Computer generated file. Do not edit.\n */\n");
- printf("static int %s_len = %d;\n", varline, l);
- printf("static unsigned char %s[] %s = {\n", varline, id?"__initdata":"");
-
- for (i=0;i<l;i++)
- {
- if (i) printf(",");
- if (i && !(i % 16)) printf("\n");
- printf("0x%02x", buf[i]);
- }
-
- printf("\n};\n\n");
- return 0;
-}
diff --git a/sound/oss/kahlua.c b/sound/oss/kahlua.c
deleted file mode 100644
index c4b0434c7604..000000000000
--- a/sound/oss/kahlua.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Initialisation code for Cyrix/NatSemi VSA1 softaudio
- *
- * (C) Copyright 2003 Red Hat Inc <alan@lxorguk.ukuu.org.uk>
- *
- * XpressAudio(tm) is used on the Cyrix MediaGX (now NatSemi Geode) systems.
- * The older version (VSA1) provides fairly good soundblaster emulation
- * although there are a couple of bugs: large DMA buffers break record,
- * and the MPU event handling seems suspect. VSA2 allows the native driver
- * to control the AC97 audio engine directly and requires a different driver.
- *
- * Thanks to National Semiconductor for providing the needed information
- * on the XpressAudio(tm) internals.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * TO DO:
- * Investigate whether we can portably support Cognac (5520) in the
- * same manner.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include "sound_config.h"
-
-#include "sb.h"
-
-/*
- * Read a soundblaster compatible mixer register.
- * In this case we are actually reading an SMI trap
- * not real hardware.
- */
-
-static u8 mixer_read(unsigned long io, u8 reg)
-{
- outb(reg, io + 4);
- udelay(20);
- reg = inb(io + 5);
- udelay(20);
- return reg;
-}
-
-static int probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct address_info *hw_config;
- unsigned long base;
- void __iomem *mem;
- unsigned long io;
- u16 map;
- u8 irq, dma8, dma16;
- int oldquiet;
- extern int sb_be_quiet;
-
- base = pci_resource_start(pdev, 0);
- if(base == 0UL)
- return 1;
-
- mem = ioremap(base, 128);
- if (!mem)
- return 1;
- map = readw(mem + 0x18); /* Read the SMI enables */
- iounmap(mem);
-
- /* Map bits
- 0:1 * 0x20 + 0x200 = sb base
- 2 sb enable
- 3 adlib enable
- 5 MPU enable 0x330
- 6 MPU enable 0x300
-
- The other bits may be used internally so must be masked */
-
- io = 0x220 + 0x20 * (map & 3);
-
- if(map & (1<<2))
- printk(KERN_INFO "kahlua: XpressAudio at 0x%lx\n", io);
- else
- return 1;
-
- if(map & (1<<5))
- printk(KERN_INFO "kahlua: MPU at 0x300\n");
- else if(map & (1<<6))
- printk(KERN_INFO "kahlua: MPU at 0x330\n");
-
- irq = mixer_read(io, 0x80) & 0x0F;
- dma8 = mixer_read(io, 0x81);
-
- // printk("IRQ=%x MAP=%x DMA=%x\n", irq, map, dma8);
-
- if(dma8 & 0x20)
- dma16 = 5;
- else if(dma8 & 0x40)
- dma16 = 6;
- else if(dma8 & 0x80)
- dma16 = 7;
- else
- {
- printk(KERN_ERR "kahlua: No 16bit DMA enabled.\n");
- return 1;
- }
-
- if(dma8 & 0x01)
- dma8 = 0;
- else if(dma8 & 0x02)
- dma8 = 1;
- else if(dma8 & 0x08)
- dma8 = 3;
- else
- {
- printk(KERN_ERR "kahlua: No 8bit DMA enabled.\n");
- return 1;
- }
-
- if(irq & 1)
- irq = 9;
- else if(irq & 2)
- irq = 5;
- else if(irq & 4)
- irq = 7;
- else if(irq & 8)
- irq = 10;
- else
- {
- printk(KERN_ERR "kahlua: SB IRQ not set.\n");
- return 1;
- }
-
- printk(KERN_INFO "kahlua: XpressAudio on IRQ %d, DMA %d, %d\n",
- irq, dma8, dma16);
-
- hw_config = kzalloc(sizeof(struct address_info), GFP_KERNEL);
- if(hw_config == NULL)
- {
- printk(KERN_ERR "kahlua: out of memory.\n");
- return 1;
- }
-
- pci_set_drvdata(pdev, hw_config);
-
- hw_config->io_base = io;
- hw_config->irq = irq;
- hw_config->dma = dma8;
- hw_config->dma2 = dma16;
- hw_config->name = "Cyrix XpressAudio";
- hw_config->driver_use_1 = SB_NO_MIDI | SB_PCI_IRQ;
-
- if (!request_region(io, 16, "soundblaster"))
- goto err_out_free;
-
- if(sb_dsp_detect(hw_config, 0, 0, NULL)==0)
- {
- printk(KERN_ERR "kahlua: audio not responding.\n");
- release_region(io, 16);
- goto err_out_free;
- }
-
- oldquiet = sb_be_quiet;
- sb_be_quiet = 1;
- if(sb_dsp_init(hw_config, THIS_MODULE))
- {
- sb_be_quiet = oldquiet;
- goto err_out_free;
- }
- sb_be_quiet = oldquiet;
-
- return 0;
-
-err_out_free:
- kfree(hw_config);
- return 1;
-}
-
-static void remove_one(struct pci_dev *pdev)
-{
- struct address_info *hw_config = pci_get_drvdata(pdev);
- sb_dsp_unload(hw_config, 0);
- kfree(hw_config);
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("Kahlua VSA1 PCI Audio");
-MODULE_LICENSE("GPL");
-
-/*
- * 5530 only. The 5510/5520 decode is different.
- */
-
-static const struct pci_device_id id_tbl[] = {
- { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO), 0 },
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, id_tbl);
-
-static struct pci_driver kahlua_driver = {
- .name = "kahlua",
- .id_table = id_tbl,
- .probe = probe_one,
- .remove = remove_one,
-};
-
-
-static int __init kahlua_init_module(void)
-{
- printk(KERN_INFO "Cyrix Kahlua VSA1 XpressAudio support (c) Copyright 2003 Red Hat Inc\n");
- return pci_register_driver(&kahlua_driver);
-}
-
-static void kahlua_cleanup_module(void)
-{
- pci_unregister_driver(&kahlua_driver);
-}
-
-
-module_init(kahlua_init_module);
-module_exit(kahlua_cleanup_module);
-
diff --git a/sound/oss/midi_ctrl.h b/sound/oss/midi_ctrl.h
deleted file mode 100644
index 240d0c719f1e..000000000000
--- a/sound/oss/midi_ctrl.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-static unsigned char ctrl_def_values[128] =
-{
- 0x40,0x00,0x40,0x40, 0x40,0x40,0x40,0x7f, /* 0 to 7 */
- 0x40,0x40,0x40,0x7f, 0x40,0x40,0x40,0x40, /* 8 to 15 */
- 0x40,0x40,0x40,0x40, 0x40,0x40,0x40,0x40, /* 16 to 23 */
- 0x40,0x40,0x40,0x40, 0x40,0x40,0x40,0x40, /* 24 to 31 */
-
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 32 to 39 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 40 to 47 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 48 to 55 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 56 to 63 */
-
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 64 to 71 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 72 to 79 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 80 to 87 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 88 to 95 */
-
- 0x00,0x00,0x7f,0x7f, 0x7f,0x7f,0x00,0x00, /* 96 to 103 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 104 to 111 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 112 to 119 */
- 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, /* 120 to 127 */
-};
diff --git a/sound/oss/midi_synth.c b/sound/oss/midi_synth.c
deleted file mode 100644
index 2292c230d7e6..000000000000
--- a/sound/oss/midi_synth.c
+++ /dev/null
@@ -1,712 +0,0 @@
-/*
- * sound/oss/midi_synth.c
- *
- * High level midi sequencer manager for dumb MIDI interfaces.
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-/*
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * Andrew Veliath : fixed running status in MIDI input state machine
- */
-#define USE_SEQ_MACROS
-#define USE_SIMPLE_MACROS
-
-#include "sound_config.h"
-
-#define _MIDI_SYNTH_C_
-
-#include "midi_synth.h"
-
-static int midi2synth[MAX_MIDI_DEV];
-static int sysex_state[MAX_MIDI_DEV] =
-{0};
-static unsigned char prev_out_status[MAX_MIDI_DEV];
-
-#define STORE(cmd) \
-{ \
- int len; \
- unsigned char obuf[8]; \
- cmd; \
- seq_input_event(obuf, len); \
-}
-
-#define _seqbuf obuf
-#define _seqbufptr 0
-#define _SEQ_ADVBUF(x) len=x
-
-void
-do_midi_msg(int synthno, unsigned char *msg, int mlen)
-{
- switch (msg[0] & 0xf0)
- {
- case 0x90:
- if (msg[2] != 0)
- {
- STORE(SEQ_START_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2]));
- break;
- }
- msg[2] = 64;
-
- case 0x80:
- STORE(SEQ_STOP_NOTE(synthno, msg[0] & 0x0f, msg[1], msg[2]));
- break;
-
- case 0xA0:
- STORE(SEQ_KEY_PRESSURE(synthno, msg[0] & 0x0f, msg[1], msg[2]));
- break;
-
- case 0xB0:
- STORE(SEQ_CONTROL(synthno, msg[0] & 0x0f,
- msg[1], msg[2]));
- break;
-
- case 0xC0:
- STORE(SEQ_SET_PATCH(synthno, msg[0] & 0x0f, msg[1]));
- break;
-
- case 0xD0:
- STORE(SEQ_CHN_PRESSURE(synthno, msg[0] & 0x0f, msg[1]));
- break;
-
- case 0xE0:
- STORE(SEQ_BENDER(synthno, msg[0] & 0x0f,
- (msg[1] & 0x7f) | ((msg[2] & 0x7f) << 7)));
- break;
-
- default:
- /* printk( "MPU: Unknown midi channel message %02x\n", msg[0]); */
- ;
- }
-}
-EXPORT_SYMBOL(do_midi_msg);
-
-static void
-midi_outc(int midi_dev, int data)
-{
- int timeout;
-
- for (timeout = 0; timeout < 3200; timeout++)
- if (midi_devs[midi_dev]->outputc(midi_dev, (unsigned char) (data & 0xff)))
- {
- if (data & 0x80) /*
- * Status byte
- */
- prev_out_status[midi_dev] =
- (unsigned char) (data & 0xff); /*
- * Store for running status
- */
- return; /*
- * Mission complete
- */
- }
- /*
- * Sorry! No space on buffers.
- */
- printk("Midi send timed out\n");
-}
-
-static int
-prefix_cmd(int midi_dev, unsigned char status)
-{
- if ((char *) midi_devs[midi_dev]->prefix_cmd == NULL)
- return 1;
-
- return midi_devs[midi_dev]->prefix_cmd(midi_dev, status);
-}
-
-static void
-midi_synth_input(int orig_dev, unsigned char data)
-{
- int dev;
- struct midi_input_info *inc;
-
- static unsigned char len_tab[] = /* # of data bytes following a status
- */
- {
- 2, /* 8x */
- 2, /* 9x */
- 2, /* Ax */
- 2, /* Bx */
- 1, /* Cx */
- 1, /* Dx */
- 2, /* Ex */
- 0 /* Fx */
- };
-
- if (orig_dev < 0 || orig_dev > num_midis || midi_devs[orig_dev] == NULL)
- return;
-
- if (data == 0xfe) /* Ignore active sensing */
- return;
-
- dev = midi2synth[orig_dev];
- inc = &midi_devs[orig_dev]->in_info;
-
- switch (inc->m_state)
- {
- case MST_INIT:
- if (data & 0x80) /* MIDI status byte */
- {
- if ((data & 0xf0) == 0xf0) /* Common message */
- {
- switch (data)
- {
- case 0xf0: /* Sysex */
- inc->m_state = MST_SYSEX;
- break; /* Sysex */
-
- case 0xf1: /* MTC quarter frame */
- case 0xf3: /* Song select */
- inc->m_state = MST_DATA;
- inc->m_ptr = 1;
- inc->m_left = 1;
- inc->m_buf[0] = data;
- break;
-
- case 0xf2: /* Song position pointer */
- inc->m_state = MST_DATA;
- inc->m_ptr = 1;
- inc->m_left = 2;
- inc->m_buf[0] = data;
- break;
-
- default:
- inc->m_buf[0] = data;
- inc->m_ptr = 1;
- do_midi_msg(dev, inc->m_buf, inc->m_ptr);
- inc->m_ptr = 0;
- inc->m_left = 0;
- }
- } else
- {
- inc->m_state = MST_DATA;
- inc->m_ptr = 1;
- inc->m_left = len_tab[(data >> 4) - 8];
- inc->m_buf[0] = inc->m_prev_status = data;
- }
- } else if (inc->m_prev_status & 0x80) {
- /* Data byte (use running status) */
- inc->m_ptr = 2;
- inc->m_buf[1] = data;
- inc->m_buf[0] = inc->m_prev_status;
- inc->m_left = len_tab[(inc->m_buf[0] >> 4) - 8] - 1;
- if (inc->m_left > 0)
- inc->m_state = MST_DATA; /* Not done yet */
- else {
- inc->m_state = MST_INIT;
- do_midi_msg(dev, inc->m_buf, inc->m_ptr);
- inc->m_ptr = 0;
- }
- }
- break; /* MST_INIT */
-
- case MST_DATA:
- inc->m_buf[inc->m_ptr++] = data;
- if (--inc->m_left <= 0)
- {
- inc->m_state = MST_INIT;
- do_midi_msg(dev, inc->m_buf, inc->m_ptr);
- inc->m_ptr = 0;
- }
- break; /* MST_DATA */
-
- case MST_SYSEX:
- if (data == 0xf7) /* Sysex end */
- {
- inc->m_state = MST_INIT;
- inc->m_left = 0;
- inc->m_ptr = 0;
- }
- break; /* MST_SYSEX */
-
- default:
- printk("MIDI%d: Unexpected state %d (%02x)\n", orig_dev, inc->m_state, (int) data);
- inc->m_state = MST_INIT;
- }
-}
-
-static void
-leave_sysex(int dev)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int timeout = 0;
-
- if (!sysex_state[dev])
- return;
-
- sysex_state[dev] = 0;
-
- while (!midi_devs[orig_dev]->outputc(orig_dev, 0xf7) &&
- timeout < 1000)
- timeout++;
-
- sysex_state[dev] = 0;
-}
-
-static void
-midi_synth_output(int dev)
-{
- /*
- * Currently NOP
- */
-}
-
-int midi_synth_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- /*
- * int orig_dev = synth_devs[dev]->midi_dev;
- */
-
- switch (cmd) {
-
- case SNDCTL_SYNTH_INFO:
- if (__copy_to_user(arg, synth_devs[dev]->info, sizeof(struct synth_info)))
- return -EFAULT;
- return 0;
-
- case SNDCTL_SYNTH_MEMAVL:
- return 0x7fffffff;
-
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(midi_synth_ioctl);
-
-int
-midi_synth_kill_note(int dev, int channel, int note, int velocity)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int msg, chn;
-
- if (note < 0 || note > 127)
- return 0;
- if (channel < 0 || channel > 15)
- return 0;
- if (velocity < 0)
- velocity = 0;
- if (velocity > 127)
- velocity = 127;
-
- leave_sysex(dev);
-
- msg = prev_out_status[orig_dev] & 0xf0;
- chn = prev_out_status[orig_dev] & 0x0f;
-
- if (chn == channel && ((msg == 0x90 && velocity == 64) || msg == 0x80))
- { /*
- * Use running status
- */
- if (!prefix_cmd(orig_dev, note))
- return 0;
-
- midi_outc(orig_dev, note);
-
- if (msg == 0x90) /*
- * Running status = Note on
- */
- midi_outc(orig_dev, 0); /*
- * Note on with velocity 0 == note
- * off
- */
- else
- midi_outc(orig_dev, velocity);
- } else
- {
- if (velocity == 64)
- {
- if (!prefix_cmd(orig_dev, 0x90 | (channel & 0x0f)))
- return 0;
- midi_outc(orig_dev, 0x90 | (channel & 0x0f)); /*
- * Note on
- */
- midi_outc(orig_dev, note);
- midi_outc(orig_dev, 0); /*
- * Zero G
- */
- } else
- {
- if (!prefix_cmd(orig_dev, 0x80 | (channel & 0x0f)))
- return 0;
- midi_outc(orig_dev, 0x80 | (channel & 0x0f)); /*
- * Note off
- */
- midi_outc(orig_dev, note);
- midi_outc(orig_dev, velocity);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(midi_synth_kill_note);
-
-int
-midi_synth_set_instr(int dev, int channel, int instr_no)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
-
- if (instr_no < 0 || instr_no > 127)
- instr_no = 0;
- if (channel < 0 || channel > 15)
- return 0;
-
- leave_sysex(dev);
-
- if (!prefix_cmd(orig_dev, 0xc0 | (channel & 0x0f)))
- return 0;
- midi_outc(orig_dev, 0xc0 | (channel & 0x0f)); /*
- * Program change
- */
- midi_outc(orig_dev, instr_no);
-
- return 0;
-}
-EXPORT_SYMBOL(midi_synth_set_instr);
-
-int
-midi_synth_start_note(int dev, int channel, int note, int velocity)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int msg, chn;
-
- if (note < 0 || note > 127)
- return 0;
- if (channel < 0 || channel > 15)
- return 0;
- if (velocity < 0)
- velocity = 0;
- if (velocity > 127)
- velocity = 127;
-
- leave_sysex(dev);
-
- msg = prev_out_status[orig_dev] & 0xf0;
- chn = prev_out_status[orig_dev] & 0x0f;
-
- if (chn == channel && msg == 0x90)
- { /*
- * Use running status
- */
- if (!prefix_cmd(orig_dev, note))
- return 0;
- midi_outc(orig_dev, note);
- midi_outc(orig_dev, velocity);
- } else
- {
- if (!prefix_cmd(orig_dev, 0x90 | (channel & 0x0f)))
- return 0;
- midi_outc(orig_dev, 0x90 | (channel & 0x0f)); /*
- * Note on
- */
- midi_outc(orig_dev, note);
- midi_outc(orig_dev, velocity);
- }
- return 0;
-}
-EXPORT_SYMBOL(midi_synth_start_note);
-
-void
-midi_synth_reset(int dev)
-{
-
- leave_sysex(dev);
-}
-EXPORT_SYMBOL(midi_synth_reset);
-
-int
-midi_synth_open(int dev, int mode)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int err;
- struct midi_input_info *inc;
-
- if (orig_dev < 0 || orig_dev >= num_midis || midi_devs[orig_dev] == NULL)
- return -ENXIO;
-
- midi2synth[orig_dev] = dev;
- sysex_state[dev] = 0;
- prev_out_status[orig_dev] = 0;
-
- if ((err = midi_devs[orig_dev]->open(orig_dev, mode,
- midi_synth_input, midi_synth_output)) < 0)
- return err;
- inc = &midi_devs[orig_dev]->in_info;
-
- /* save_flags(flags);
- cli();
- don't know against what irqhandler to protect*/
- inc->m_busy = 0;
- inc->m_state = MST_INIT;
- inc->m_ptr = 0;
- inc->m_left = 0;
- inc->m_prev_status = 0x00;
- /* restore_flags(flags); */
-
- return 1;
-}
-EXPORT_SYMBOL(midi_synth_open);
-
-void
-midi_synth_close(int dev)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
-
- leave_sysex(dev);
-
- /*
- * Shut up the synths by sending just single active sensing message.
- */
- midi_devs[orig_dev]->outputc(orig_dev, 0xfe);
-
- midi_devs[orig_dev]->close(orig_dev);
-}
-EXPORT_SYMBOL(midi_synth_close);
-
-void
-midi_synth_hw_control(int dev, unsigned char *event)
-{
-}
-EXPORT_SYMBOL(midi_synth_hw_control);
-
-int
-midi_synth_load_patch(int dev, int format, const char __user *addr,
- int count, int pmgr_flag)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
-
- struct sysex_info sysex;
- int i;
- unsigned long left, src_offs, eox_seen = 0;
- int first_byte = 1;
- int hdr_size = (unsigned long) &sysex.data[0] - (unsigned long) &sysex;
-
- leave_sysex(dev);
-
- if (!prefix_cmd(orig_dev, 0xf0))
- return 0;
-
- /* Invalid patch format */
- if (format != SYSEX_PATCH)
- return -EINVAL;
-
- /* Patch header too short */
- if (count < hdr_size)
- return -EINVAL;
-
- count -= hdr_size;
-
- /*
- * Copy the header from user space
- */
-
- if (copy_from_user(&sysex, addr, hdr_size))
- return -EFAULT;
-
- /* Sysex record too short */
- if ((unsigned)count < (unsigned)sysex.len)
- sysex.len = count;
-
- left = sysex.len;
- src_offs = 0;
-
- for (i = 0; i < left && !signal_pending(current); i++)
- {
- unsigned char data;
-
- if (get_user(data,
- (unsigned char __user *)(addr + hdr_size + i)))
- return -EFAULT;
-
- eox_seen = (i > 0 && data & 0x80); /* End of sysex */
-
- if (eox_seen && data != 0xf7)
- data = 0xf7;
-
- if (i == 0)
- {
- if (data != 0xf0)
- {
- printk(KERN_WARNING "midi_synth: Sysex start missing\n");
- return -EINVAL;
- }
- }
- while (!midi_devs[orig_dev]->outputc(orig_dev, (unsigned char) (data & 0xff)) &&
- !signal_pending(current))
- schedule();
-
- if (!first_byte && data & 0x80)
- return 0;
- first_byte = 0;
- }
-
- if (!eox_seen)
- midi_outc(orig_dev, 0xf7);
- return 0;
-}
-EXPORT_SYMBOL(midi_synth_load_patch);
-
-void midi_synth_panning(int dev, int channel, int pressure)
-{
-}
-EXPORT_SYMBOL(midi_synth_panning);
-
-void midi_synth_aftertouch(int dev, int channel, int pressure)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int msg, chn;
-
- if (pressure < 0 || pressure > 127)
- return;
- if (channel < 0 || channel > 15)
- return;
-
- leave_sysex(dev);
-
- msg = prev_out_status[orig_dev] & 0xf0;
- chn = prev_out_status[orig_dev] & 0x0f;
-
- if (msg != 0xd0 || chn != channel) /*
- * Test for running status
- */
- {
- if (!prefix_cmd(orig_dev, 0xd0 | (channel & 0x0f)))
- return;
- midi_outc(orig_dev, 0xd0 | (channel & 0x0f)); /*
- * Channel pressure
- */
- } else if (!prefix_cmd(orig_dev, pressure))
- return;
-
- midi_outc(orig_dev, pressure);
-}
-EXPORT_SYMBOL(midi_synth_aftertouch);
-
-void
-midi_synth_controller(int dev, int channel, int ctrl_num, int value)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int chn, msg;
-
- if (ctrl_num < 0 || ctrl_num > 127)
- return;
- if (channel < 0 || channel > 15)
- return;
-
- leave_sysex(dev);
-
- msg = prev_out_status[orig_dev] & 0xf0;
- chn = prev_out_status[orig_dev] & 0x0f;
-
- if (msg != 0xb0 || chn != channel)
- {
- if (!prefix_cmd(orig_dev, 0xb0 | (channel & 0x0f)))
- return;
- midi_outc(orig_dev, 0xb0 | (channel & 0x0f));
- } else if (!prefix_cmd(orig_dev, ctrl_num))
- return;
-
- midi_outc(orig_dev, ctrl_num);
- midi_outc(orig_dev, value & 0x7f);
-}
-EXPORT_SYMBOL(midi_synth_controller);
-
-void
-midi_synth_bender(int dev, int channel, int value)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int msg, prev_chn;
-
- if (channel < 0 || channel > 15)
- return;
-
- if (value < 0 || value > 16383)
- return;
-
- leave_sysex(dev);
-
- msg = prev_out_status[orig_dev] & 0xf0;
- prev_chn = prev_out_status[orig_dev] & 0x0f;
-
- if (msg != 0xd0 || prev_chn != channel) /*
- * Test for running status
- */
- {
- if (!prefix_cmd(orig_dev, 0xe0 | (channel & 0x0f)))
- return;
- midi_outc(orig_dev, 0xe0 | (channel & 0x0f));
- } else if (!prefix_cmd(orig_dev, value & 0x7f))
- return;
-
- midi_outc(orig_dev, value & 0x7f);
- midi_outc(orig_dev, (value >> 7) & 0x7f);
-}
-EXPORT_SYMBOL(midi_synth_bender);
-
-void
-midi_synth_setup_voice(int dev, int voice, int channel)
-{
-}
-EXPORT_SYMBOL(midi_synth_setup_voice);
-
-int
-midi_synth_send_sysex(int dev, unsigned char *bytes, int len)
-{
- int orig_dev = synth_devs[dev]->midi_dev;
- int i;
-
- for (i = 0; i < len; i++)
- {
- switch (bytes[i])
- {
- case 0xf0: /* Start sysex */
- if (!prefix_cmd(orig_dev, 0xf0))
- return 0;
- sysex_state[dev] = 1;
- break;
-
- case 0xf7: /* End sysex */
- if (!sysex_state[dev]) /* Orphan sysex end */
- return 0;
- sysex_state[dev] = 0;
- break;
-
- default:
- if (!sysex_state[dev])
- return 0;
-
- if (bytes[i] & 0x80) /* Error. Another message before sysex end */
- {
- bytes[i] = 0xf7; /* Sysex end */
- sysex_state[dev] = 0;
- }
- }
-
- if (!midi_devs[orig_dev]->outputc(orig_dev, bytes[i]))
- {
-/*
- * Hardware level buffer is full. Abort the sysex message.
- */
-
- int timeout = 0;
-
- bytes[i] = 0xf7;
- sysex_state[dev] = 0;
-
- while (!midi_devs[orig_dev]->outputc(orig_dev, bytes[i]) &&
- timeout < 1000)
- timeout++;
- }
- if (!sysex_state[dev])
- return 0;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(midi_synth_send_sysex);
-
diff --git a/sound/oss/midi_synth.h b/sound/oss/midi_synth.h
deleted file mode 100644
index 1cf676c7510e..000000000000
--- a/sound/oss/midi_synth.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-int midi_synth_ioctl (int dev,
- unsigned int cmd, void __user * arg);
-int midi_synth_kill_note (int dev, int channel, int note, int velocity);
-int midi_synth_set_instr (int dev, int channel, int instr_no);
-int midi_synth_start_note (int dev, int channel, int note, int volume);
-void midi_synth_reset (int dev);
-int midi_synth_open (int dev, int mode);
-void midi_synth_close (int dev);
-void midi_synth_hw_control (int dev, unsigned char *event);
-int midi_synth_load_patch (int dev, int format, const char __user * addr,
- int count, int pmgr_flag);
-void midi_synth_panning (int dev, int channel, int pressure);
-void midi_synth_aftertouch (int dev, int channel, int pressure);
-void midi_synth_controller (int dev, int channel, int ctrl_num, int value);
-void midi_synth_bender (int dev, int chn, int value);
-void midi_synth_setup_voice (int dev, int voice, int chn);
-int midi_synth_send_sysex(int dev, unsigned char *bytes,int len);
-
-#ifndef _MIDI_SYNTH_C_
-static struct synth_info std_synth_info =
-{MIDI_SYNTH_NAME, 0, SYNTH_TYPE_MIDI, 0, 0, 128, 0, 128, MIDI_SYNTH_CAPS};
-
-static struct synth_operations std_midi_synth =
-{
- .owner = THIS_MODULE,
- .id = "MIDI",
- .info = &std_synth_info,
- .midi_dev = 0,
- .synth_type = SYNTH_TYPE_MIDI,
- .synth_subtype = 0,
- .open = midi_synth_open,
- .close = midi_synth_close,
- .ioctl = midi_synth_ioctl,
- .kill_note = midi_synth_kill_note,
- .start_note = midi_synth_start_note,
- .set_instr = midi_synth_set_instr,
- .reset = midi_synth_reset,
- .hw_control = midi_synth_hw_control,
- .load_patch = midi_synth_load_patch,
- .aftertouch = midi_synth_aftertouch,
- .controller = midi_synth_controller,
- .panning = midi_synth_panning,
- .bender = midi_synth_bender,
- .setup_voice = midi_synth_setup_voice,
- .send_sysex = midi_synth_send_sysex
-};
-#endif
diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c
deleted file mode 100644
index 1277df815d5b..000000000000
--- a/sound/oss/midibuf.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * sound/oss/midibuf.c
- *
- * Device file manager for /dev/midi#
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-/*
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- */
-#include <linux/stddef.h>
-#include <linux/kmod.h>
-#include <linux/spinlock.h>
-#include <linux/sched/signal.h>
-
-#define MIDIBUF_C
-
-#include "sound_config.h"
-
-
-/*
- * Don't make MAX_QUEUE_SIZE larger than 4000
- */
-
-#define MAX_QUEUE_SIZE 4000
-
-static wait_queue_head_t midi_sleeper[MAX_MIDI_DEV];
-static wait_queue_head_t input_sleeper[MAX_MIDI_DEV];
-
-struct midi_buf
-{
- int len, head, tail;
- unsigned char queue[MAX_QUEUE_SIZE];
-};
-
-struct midi_parms
-{
- long prech_timeout; /*
- * Timeout before the first ch
- */
-};
-
-static struct midi_buf *midi_out_buf[MAX_MIDI_DEV] = {NULL};
-static struct midi_buf *midi_in_buf[MAX_MIDI_DEV] = {NULL};
-static struct midi_parms parms[MAX_MIDI_DEV];
-
-static void midi_poll(unsigned long dummy);
-
-
-static DEFINE_TIMER(poll_timer, midi_poll);
-
-static volatile int open_devs;
-static DEFINE_SPINLOCK(lock);
-
-#define DATA_AVAIL(q) (q->len)
-#define SPACE_AVAIL(q) (MAX_QUEUE_SIZE - q->len)
-
-#define QUEUE_BYTE(q, data) \
- if (SPACE_AVAIL(q)) \
- { \
- unsigned long flags; \
- spin_lock_irqsave(&lock, flags); \
- q->queue[q->tail] = (data); \
- q->len++; q->tail = (q->tail+1) % MAX_QUEUE_SIZE; \
- spin_unlock_irqrestore(&lock, flags); \
- }
-
-#define REMOVE_BYTE(q, data) \
- if (DATA_AVAIL(q)) \
- { \
- unsigned long flags; \
- spin_lock_irqsave(&lock, flags); \
- data = q->queue[q->head]; \
- q->len--; q->head = (q->head+1) % MAX_QUEUE_SIZE; \
- spin_unlock_irqrestore(&lock, flags); \
- }
-
-static void drain_midi_queue(int dev)
-{
-
- /*
- * Give the Midi driver time to drain its output queues
- */
-
- if (midi_devs[dev]->buffer_status != NULL)
- wait_event_interruptible_timeout(midi_sleeper[dev],
- !midi_devs[dev]->buffer_status(dev), HZ/10);
-}
-
-static void midi_input_intr(int dev, unsigned char data)
-{
- if (midi_in_buf[dev] == NULL)
- return;
-
- if (data == 0xfe) /*
- * Active sensing
- */
- return; /*
- * Ignore
- */
-
- if (SPACE_AVAIL(midi_in_buf[dev])) {
- QUEUE_BYTE(midi_in_buf[dev], data);
- wake_up(&input_sleeper[dev]);
- }
-}
-
-static void midi_output_intr(int dev)
-{
- /*
- * Currently NOP
- */
-}
-
-static void midi_poll(unsigned long dummy)
-{
- unsigned long flags;
- int dev;
-
- spin_lock_irqsave(&lock, flags);
- if (open_devs)
- {
- for (dev = 0; dev < num_midis; dev++)
- if (midi_devs[dev] != NULL && midi_out_buf[dev] != NULL)
- {
- while (DATA_AVAIL(midi_out_buf[dev]))
- {
- int ok;
- int c = midi_out_buf[dev]->queue[midi_out_buf[dev]->head];
-
- spin_unlock_irqrestore(&lock,flags);/* Give some time to others */
- ok = midi_devs[dev]->outputc(dev, c);
- spin_lock_irqsave(&lock, flags);
- if (!ok)
- break;
- midi_out_buf[dev]->head = (midi_out_buf[dev]->head + 1) % MAX_QUEUE_SIZE;
- midi_out_buf[dev]->len--;
- }
-
- if (DATA_AVAIL(midi_out_buf[dev]) < 100)
- wake_up(&midi_sleeper[dev]);
- }
- poll_timer.expires = (1) + jiffies;
- add_timer(&poll_timer);
- /*
- * Come back later
- */
- }
- spin_unlock_irqrestore(&lock, flags);
-}
-
-int MIDIbuf_open(int dev, struct file *file)
-{
- int mode, err;
-
- dev = dev >> 4;
- mode = translate_mode(file);
-
- if (num_midis > MAX_MIDI_DEV)
- {
- printk(KERN_ERR "midi: Too many midi interfaces\n");
- num_midis = MAX_MIDI_DEV;
- }
- if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL)
- return -ENXIO;
- /*
- * Interrupts disabled. Be careful
- */
-
- module_put(midi_devs[dev]->owner);
-
- if ((err = midi_devs[dev]->open(dev, mode,
- midi_input_intr, midi_output_intr)) < 0)
- return err;
-
- parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT;
- midi_in_buf[dev] = vmalloc(sizeof(struct midi_buf));
-
- if (midi_in_buf[dev] == NULL)
- {
- printk(KERN_WARNING "midi: Can't allocate buffer\n");
- midi_devs[dev]->close(dev);
- return -EIO;
- }
- midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0;
-
- midi_out_buf[dev] = vmalloc(sizeof(struct midi_buf));
-
- if (midi_out_buf[dev] == NULL)
- {
- printk(KERN_WARNING "midi: Can't allocate buffer\n");
- midi_devs[dev]->close(dev);
- vfree(midi_in_buf[dev]);
- midi_in_buf[dev] = NULL;
- return -EIO;
- }
- midi_out_buf[dev]->len = midi_out_buf[dev]->head = midi_out_buf[dev]->tail = 0;
- open_devs++;
-
- init_waitqueue_head(&midi_sleeper[dev]);
- init_waitqueue_head(&input_sleeper[dev]);
-
- if (open_devs < 2) /* This was first open */
- {
- poll_timer.expires = 1 + jiffies;
- add_timer(&poll_timer); /* Start polling */
- }
- return err;
-}
-
-void MIDIbuf_release(int dev, struct file *file)
-{
- int mode;
-
- dev = dev >> 4;
- mode = translate_mode(file);
-
- if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL)
- return;
-
- /*
- * Wait until the queue is empty
- */
-
- if (mode != OPEN_READ)
- {
- midi_devs[dev]->outputc(dev, 0xfe); /*
- * Active sensing to shut the
- * devices
- */
-
- wait_event_interruptible(midi_sleeper[dev],
- !DATA_AVAIL(midi_out_buf[dev]));
- /*
- * Sync
- */
-
- drain_midi_queue(dev); /*
- * Ensure the output queues are empty
- */
- }
-
- midi_devs[dev]->close(dev);
-
- open_devs--;
- if (open_devs == 0)
- del_timer_sync(&poll_timer);
- vfree(midi_in_buf[dev]);
- vfree(midi_out_buf[dev]);
- midi_in_buf[dev] = NULL;
- midi_out_buf[dev] = NULL;
-
- module_put(midi_devs[dev]->owner);
-}
-
-int MIDIbuf_write(int dev, struct file *file, const char __user *buf, int count)
-{
- int c, n, i;
- unsigned char tmp_data;
-
- dev = dev >> 4;
-
- if (!count)
- return 0;
-
- c = 0;
-
- while (c < count)
- {
- n = SPACE_AVAIL(midi_out_buf[dev]);
-
- if (n == 0) { /*
- * No space just now.
- */
-
- if (file->f_flags & O_NONBLOCK) {
- c = -EAGAIN;
- goto out;
- }
-
- if (wait_event_interruptible(midi_sleeper[dev],
- SPACE_AVAIL(midi_out_buf[dev])))
- {
- c = -EINTR;
- goto out;
- }
- n = SPACE_AVAIL(midi_out_buf[dev]);
- }
- if (n > (count - c))
- n = count - c;
-
- for (i = 0; i < n; i++)
- {
- /* BROKE BROKE BROKE - CAN'T DO THIS WITH CLI !! */
- /* yes, think the same, so I removed the cli() brackets
- QUEUE_BYTE is protected against interrupts */
- if (copy_from_user((char *) &tmp_data, &(buf)[c], 1)) {
- c = -EFAULT;
- goto out;
- }
- QUEUE_BYTE(midi_out_buf[dev], tmp_data);
- c++;
- }
- }
-out:
- return c;
-}
-
-
-int MIDIbuf_read(int dev, struct file *file, char __user *buf, int count)
-{
- int n, c = 0;
- unsigned char tmp_data;
-
- dev = dev >> 4;
-
- if (!DATA_AVAIL(midi_in_buf[dev])) { /*
- * No data yet, wait
- */
- if (file->f_flags & O_NONBLOCK) {
- c = -EAGAIN;
- goto out;
- }
- wait_event_interruptible_timeout(input_sleeper[dev],
- DATA_AVAIL(midi_in_buf[dev]),
- parms[dev].prech_timeout);
-
- if (signal_pending(current))
- c = -EINTR; /* The user is getting restless */
- }
- if (c == 0 && DATA_AVAIL(midi_in_buf[dev])) /*
- * Got some bytes
- */
- {
- n = DATA_AVAIL(midi_in_buf[dev]);
- if (n > count)
- n = count;
- c = 0;
-
- while (c < n)
- {
- char *fixit;
- REMOVE_BYTE(midi_in_buf[dev], tmp_data);
- fixit = (char *) &tmp_data;
- /* BROKE BROKE BROKE */
- /* yes removed the cli() brackets again
- should q->len,tail&head be atomic_t? */
- if (copy_to_user(&(buf)[c], fixit, 1)) {
- c = -EFAULT;
- goto out;
- }
- c++;
- }
- }
-out:
- return c;
-}
-
-int MIDIbuf_ioctl(int dev, struct file *file,
- unsigned int cmd, void __user *arg)
-{
- int val;
-
- dev = dev >> 4;
-
- if (((cmd >> 8) & 0xff) == 'C')
- {
- if (midi_devs[dev]->coproc) /* Coprocessor ioctl */
- return midi_devs[dev]->coproc->ioctl(midi_devs[dev]->coproc->devc, cmd, arg, 0);
-/* printk("/dev/midi%d: No coprocessor for this device\n", dev);*/
- return -ENXIO;
- }
- else
- {
- switch (cmd)
- {
- case SNDCTL_MIDI_PRETIME:
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- if (val < 0)
- val = 0;
- val = (HZ * val) / 10;
- parms[dev].prech_timeout = val;
- return put_user(val, (int __user *)arg);
-
- default:
- if (!midi_devs[dev]->ioctl)
- return -EINVAL;
- return midi_devs[dev]->ioctl(dev, cmd, arg);
- }
- }
-}
-
-/* No kernel lock - fine */
-unsigned int MIDIbuf_poll(int dev, struct file *file, poll_table * wait)
-{
- unsigned int mask = 0;
-
- dev = dev >> 4;
-
- /* input */
- poll_wait(file, &input_sleeper[dev], wait);
- if (DATA_AVAIL(midi_in_buf[dev]))
- mask |= POLLIN | POLLRDNORM;
-
- /* output */
- poll_wait(file, &midi_sleeper[dev], wait);
- if (!SPACE_AVAIL(midi_out_buf[dev]))
- mask |= POLLOUT | POLLWRNORM;
-
- return mask;
-}
-
-
-int MIDIbuf_avail(int dev)
-{
- if (midi_in_buf[dev])
- return DATA_AVAIL (midi_in_buf[dev]);
- return 0;
-}
-EXPORT_SYMBOL(MIDIbuf_avail);
-
diff --git a/sound/oss/mpu401.c b/sound/oss/mpu401.c
deleted file mode 100644
index 20e8fa46f647..000000000000
--- a/sound/oss/mpu401.c
+++ /dev/null
@@ -1,1804 +0,0 @@
-/*
- * sound/oss/mpu401.c
- *
- * The low level driver for Roland MPU-401 compatible Midi cards.
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Thomas Sailer ioctl code reworked (vmalloc/vfree removed)
- * Alan Cox modularisation, use normal request_irq, use dev_id
- * Bartlomiej Zolnierkiewicz removed some __init to allow using many drivers
- * Chris Rankin Update the module-usage counter for the coprocessor
- * Zwane Mwaikambo Changed attach/unload resource freeing
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#define USE_SEQ_MACROS
-#define USE_SIMPLE_MACROS
-
-#include "sound_config.h"
-
-#include "coproc.h"
-#include "mpu401.h"
-
-static int timer_mode = TMR_INTERNAL, timer_caps = TMR_INTERNAL;
-
-struct mpu_config
-{
- int base; /*
- * I/O base
- */
- int irq;
- int opened; /*
- * Open mode
- */
- int devno;
- int synthno;
- int uart_mode;
- int initialized;
- int mode;
-#define MODE_MIDI 1
-#define MODE_SYNTH 2
- unsigned char version, revision;
- unsigned int capabilities;
-#define MPU_CAP_INTLG 0x10000000
-#define MPU_CAP_SYNC 0x00000010
-#define MPU_CAP_FSK 0x00000020
-#define MPU_CAP_CLS 0x00000040
-#define MPU_CAP_SMPTE 0x00000080
-#define MPU_CAP_2PORT 0x00000001
- int timer_flag;
-
-#define MBUF_MAX 10
-#define BUFTEST(dc) if (dc->m_ptr >= MBUF_MAX || dc->m_ptr < 0) \
- {printk( "MPU: Invalid buffer pointer %d/%d, s=%d\n", dc->m_ptr, dc->m_left, dc->m_state);dc->m_ptr--;}
- int m_busy;
- unsigned char m_buf[MBUF_MAX];
- int m_ptr;
- int m_state;
- int m_left;
- unsigned char last_status;
- void (*inputintr) (int dev, unsigned char data);
- int shared_irq;
- int *osp;
- spinlock_t lock;
- };
-
-#define DATAPORT(base) (base)
-#define COMDPORT(base) (base+1)
-#define STATPORT(base) (base+1)
-
-
-static void mpu401_close(int dev);
-
-static inline int mpu401_status(struct mpu_config *devc)
-{
- return inb(STATPORT(devc->base));
-}
-
-#define input_avail(devc) (!(mpu401_status(devc)&INPUT_AVAIL))
-#define output_ready(devc) (!(mpu401_status(devc)&OUTPUT_READY))
-
-static inline void write_command(struct mpu_config *devc, unsigned char cmd)
-{
- outb(cmd, COMDPORT(devc->base));
-}
-
-static inline int read_data(struct mpu_config *devc)
-{
- return inb(DATAPORT(devc->base));
-}
-
-static inline void write_data(struct mpu_config *devc, unsigned char byte)
-{
- outb(byte, DATAPORT(devc->base));
-}
-
-#define OUTPUT_READY 0x40
-#define INPUT_AVAIL 0x80
-#define MPU_ACK 0xFE
-#define MPU_RESET 0xFF
-#define UART_MODE_ON 0x3F
-
-static struct mpu_config dev_conf[MAX_MIDI_DEV];
-
-static int n_mpu_devs;
-
-static int reset_mpu401(struct mpu_config *devc);
-static void set_uart_mode(int dev, struct mpu_config *devc, int arg);
-
-static int mpu_timer_init(int midi_dev);
-static void mpu_timer_interrupt(void);
-static void timer_ext_event(struct mpu_config *devc, int event, int parm);
-
-static struct synth_info mpu_synth_info_proto = {
- "MPU-401 MIDI interface",
- 0,
- SYNTH_TYPE_MIDI,
- MIDI_TYPE_MPU401,
- 0, 128,
- 0, 128,
- SYNTH_CAP_INPUT
-};
-
-static struct synth_info mpu_synth_info[MAX_MIDI_DEV];
-
-/*
- * States for the input scanner
- */
-
-#define ST_INIT 0 /* Ready for timing byte or msg */
-#define ST_TIMED 1 /* Leading timing byte rcvd */
-#define ST_DATABYTE 2 /* Waiting for (nr_left) data bytes */
-
-#define ST_SYSMSG 100 /* System message (sysx etc). */
-#define ST_SYSEX 101 /* System exclusive msg */
-#define ST_MTC 102 /* Midi Time Code (MTC) qframe msg */
-#define ST_SONGSEL 103 /* Song select */
-#define ST_SONGPOS 104 /* Song position pointer */
-
-static unsigned char len_tab[] = /* # of data bytes following a status
- */
-{
- 2, /* 8x */
- 2, /* 9x */
- 2, /* Ax */
- 2, /* Bx */
- 1, /* Cx */
- 1, /* Dx */
- 2, /* Ex */
- 0 /* Fx */
-};
-
-#define STORE(cmd) \
-{ \
- int len; \
- unsigned char obuf[8]; \
- cmd; \
- seq_input_event(obuf, len); \
-}
-
-#define _seqbuf obuf
-#define _seqbufptr 0
-#define _SEQ_ADVBUF(x) len=x
-
-static int mpu_input_scanner(struct mpu_config *devc, unsigned char midic)
-{
-
- switch (devc->m_state)
- {
- case ST_INIT:
- switch (midic)
- {
- case 0xf8:
- /* Timer overflow */
- break;
-
- case 0xfc:
- printk("<all end>");
- break;
-
- case 0xfd:
- if (devc->timer_flag)
- mpu_timer_interrupt();
- break;
-
- case 0xfe:
- return MPU_ACK;
-
- case 0xf0:
- case 0xf1:
- case 0xf2:
- case 0xf3:
- case 0xf4:
- case 0xf5:
- case 0xf6:
- case 0xf7:
- printk("<Trk data rq #%d>", midic & 0x0f);
- break;
-
- case 0xf9:
- printk("<conductor rq>");
- break;
-
- case 0xff:
- devc->m_state = ST_SYSMSG;
- break;
-
- default:
- if (midic <= 0xef)
- {
- /* printk( "mpu time: %d ", midic); */
- devc->m_state = ST_TIMED;
- }
- else
- printk("<MPU: Unknown event %02x> ", midic);
- }
- break;
-
- case ST_TIMED:
- {
- int msg = ((int) (midic & 0xf0) >> 4);
-
- devc->m_state = ST_DATABYTE;
-
- if (msg < 8) /* Data byte */
- {
- /* printk( "midi msg (running status) "); */
- msg = ((int) (devc->last_status & 0xf0) >> 4);
- msg -= 8;
- devc->m_left = len_tab[msg] - 1;
-
- devc->m_ptr = 2;
- devc->m_buf[0] = devc->last_status;
- devc->m_buf[1] = midic;
-
- if (devc->m_left <= 0)
- {
- devc->m_state = ST_INIT;
- do_midi_msg(devc->synthno, devc->m_buf, devc->m_ptr);
- devc->m_ptr = 0;
- }
- }
- else if (msg == 0xf) /* MPU MARK */
- {
- devc->m_state = ST_INIT;
-
- switch (midic)
- {
- case 0xf8:
- /* printk( "NOP "); */
- break;
-
- case 0xf9:
- /* printk( "meas end "); */
- break;
-
- case 0xfc:
- /* printk( "data end "); */
- break;
-
- default:
- printk("Unknown MPU mark %02x\n", midic);
- }
- }
- else
- {
- devc->last_status = midic;
- /* printk( "midi msg "); */
- msg -= 8;
- devc->m_left = len_tab[msg];
-
- devc->m_ptr = 1;
- devc->m_buf[0] = midic;
-
- if (devc->m_left <= 0)
- {
- devc->m_state = ST_INIT;
- do_midi_msg(devc->synthno, devc->m_buf, devc->m_ptr);
- devc->m_ptr = 0;
- }
- }
- }
- break;
-
- case ST_SYSMSG:
- switch (midic)
- {
- case 0xf0:
- printk("<SYX>");
- devc->m_state = ST_SYSEX;
- break;
-
- case 0xf1:
- devc->m_state = ST_MTC;
- break;
-
- case 0xf2:
- devc->m_state = ST_SONGPOS;
- devc->m_ptr = 0;
- break;
-
- case 0xf3:
- devc->m_state = ST_SONGSEL;
- break;
-
- case 0xf6:
- /* printk( "tune_request\n"); */
- devc->m_state = ST_INIT;
- break;
-
- /*
- * Real time messages
- */
- case 0xf8:
- /* midi clock */
- devc->m_state = ST_INIT;
- timer_ext_event(devc, TMR_CLOCK, 0);
- break;
-
- case 0xfA:
- devc->m_state = ST_INIT;
- timer_ext_event(devc, TMR_START, 0);
- break;
-
- case 0xFB:
- devc->m_state = ST_INIT;
- timer_ext_event(devc, TMR_CONTINUE, 0);
- break;
-
- case 0xFC:
- devc->m_state = ST_INIT;
- timer_ext_event(devc, TMR_STOP, 0);
- break;
-
- case 0xFE:
- /* active sensing */
- devc->m_state = ST_INIT;
- break;
-
- case 0xff:
- /* printk( "midi hard reset"); */
- devc->m_state = ST_INIT;
- break;
-
- default:
- printk("unknown MIDI sysmsg %0x\n", midic);
- devc->m_state = ST_INIT;
- }
- break;
-
- case ST_MTC:
- devc->m_state = ST_INIT;
- printk("MTC frame %x02\n", midic);
- break;
-
- case ST_SYSEX:
- if (midic == 0xf7)
- {
- printk("<EOX>");
- devc->m_state = ST_INIT;
- }
- else
- printk("%02x ", midic);
- break;
-
- case ST_SONGPOS:
- BUFTEST(devc);
- devc->m_buf[devc->m_ptr++] = midic;
- if (devc->m_ptr == 2)
- {
- devc->m_state = ST_INIT;
- devc->m_ptr = 0;
- timer_ext_event(devc, TMR_SPP,
- ((devc->m_buf[1] & 0x7f) << 7) |
- (devc->m_buf[0] & 0x7f));
- }
- break;
-
- case ST_DATABYTE:
- BUFTEST(devc);
- devc->m_buf[devc->m_ptr++] = midic;
- if ((--devc->m_left) <= 0)
- {
- devc->m_state = ST_INIT;
- do_midi_msg(devc->synthno, devc->m_buf, devc->m_ptr);
- devc->m_ptr = 0;
- }
- break;
-
- default:
- printk("Bad state %d ", devc->m_state);
- devc->m_state = ST_INIT;
- }
- return 1;
-}
-
-static void mpu401_input_loop(struct mpu_config *devc)
-{
- unsigned long flags;
- int busy;
- int n;
-
- spin_lock_irqsave(&devc->lock,flags);
- busy = devc->m_busy;
- devc->m_busy = 1;
- spin_unlock_irqrestore(&devc->lock,flags);
-
- if (busy) /* Already inside the scanner */
- return;
-
- n = 50;
-
- while (input_avail(devc) && n-- > 0)
- {
- unsigned char c = read_data(devc);
-
- if (devc->mode == MODE_SYNTH)
- {
- mpu_input_scanner(devc, c);
- }
- else if (devc->opened & OPEN_READ && devc->inputintr != NULL)
- devc->inputintr(devc->devno, c);
- }
- devc->m_busy = 0;
-}
-
-static irqreturn_t mpuintr(int irq, void *dev_id)
-{
- struct mpu_config *devc;
- int dev = (int)(unsigned long) dev_id;
- int handled = 0;
-
- devc = &dev_conf[dev];
-
- if (input_avail(devc))
- {
- handled = 1;
- if (devc->base != 0 && (devc->opened & OPEN_READ || devc->mode == MODE_SYNTH))
- mpu401_input_loop(devc);
- else
- {
- /* Dummy read (just to acknowledge the interrupt) */
- read_data(devc);
- }
- }
- return IRQ_RETVAL(handled);
-}
-
-static int mpu401_open(int dev, int mode,
- void (*input) (int dev, unsigned char data),
- void (*output) (int dev)
-)
-{
- int err;
- struct mpu_config *devc;
- struct coproc_operations *coprocessor;
-
- if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL)
- return -ENXIO;
-
- devc = &dev_conf[dev];
-
- if (devc->opened)
- return -EBUSY;
- /*
- * Verify that the device is really running.
- * Some devices (such as Ensoniq SoundScape don't
- * work before the on board processor (OBP) is initialized
- * by downloading its microcode.
- */
-
- if (!devc->initialized)
- {
- if (mpu401_status(devc) == 0xff) /* Bus float */
- {
- printk(KERN_ERR "mpu401: Device not initialized properly\n");
- return -EIO;
- }
- reset_mpu401(devc);
- }
-
- if ( (coprocessor = midi_devs[dev]->coproc) != NULL )
- {
- if (!try_module_get(coprocessor->owner)) {
- mpu401_close(dev);
- return -ENODEV;
- }
-
- if ((err = coprocessor->open(coprocessor->devc, COPR_MIDI)) < 0)
- {
- printk(KERN_WARNING "MPU-401: Can't access coprocessor device\n");
- mpu401_close(dev);
- return err;
- }
- }
-
- set_uart_mode(dev, devc, 1);
- devc->mode = MODE_MIDI;
- devc->synthno = 0;
-
- mpu401_input_loop(devc);
-
- devc->inputintr = input;
- devc->opened = mode;
-
- return 0;
-}
-
-static void mpu401_close(int dev)
-{
- struct mpu_config *devc;
- struct coproc_operations *coprocessor;
-
- devc = &dev_conf[dev];
- if (devc->uart_mode)
- reset_mpu401(devc); /*
- * This disables the UART mode
- */
- devc->mode = 0;
- devc->inputintr = NULL;
-
- coprocessor = midi_devs[dev]->coproc;
- if (coprocessor) {
- coprocessor->close(coprocessor->devc, COPR_MIDI);
- module_put(coprocessor->owner);
- }
- devc->opened = 0;
-}
-
-static int mpu401_out(int dev, unsigned char midi_byte)
-{
- int timeout;
- unsigned long flags;
-
- struct mpu_config *devc;
-
- devc = &dev_conf[dev];
-
- /*
- * Sometimes it takes about 30000 loops before the output becomes ready
- * (After reset). Normally it takes just about 10 loops.
- */
-
- for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
-
- spin_lock_irqsave(&devc->lock,flags);
- if (!output_ready(devc))
- {
- printk(KERN_WARNING "mpu401: Send data timeout\n");
- spin_unlock_irqrestore(&devc->lock,flags);
- return 0;
- }
- write_data(devc, midi_byte);
- spin_unlock_irqrestore(&devc->lock,flags);
- return 1;
-}
-
-static int mpu401_command(int dev, mpu_command_rec * cmd)
-{
- int i, timeout, ok;
- unsigned long flags;
- struct mpu_config *devc;
-
- devc = &dev_conf[dev];
-
- if (devc->uart_mode) /*
- * Not possible in UART mode
- */
- {
- printk(KERN_WARNING "mpu401: commands not possible in the UART mode\n");
- return -EINVAL;
- }
- /*
- * Test for input since pending input seems to block the output.
- */
- if (input_avail(devc))
- mpu401_input_loop(devc);
-
- /*
- * Sometimes it takes about 50000 loops before the output becomes ready
- * (After reset). Normally it takes just about 10 loops.
- */
-
- timeout = 50000;
-retry:
- if (timeout-- <= 0)
- {
- printk(KERN_WARNING "mpu401: Command (0x%x) timeout\n", (int) cmd->cmd);
- return -EIO;
- }
- spin_lock_irqsave(&devc->lock,flags);
-
- if (!output_ready(devc))
- {
- spin_unlock_irqrestore(&devc->lock,flags);
- goto retry;
- }
- write_command(devc, cmd->cmd);
-
- ok = 0;
- for (timeout = 50000; timeout > 0 && !ok; timeout--)
- {
- if (input_avail(devc))
- {
- if (devc->opened && devc->mode == MODE_SYNTH)
- {
- if (mpu_input_scanner(devc, read_data(devc)) == MPU_ACK)
- ok = 1;
- }
- else
- {
- /* Device is not currently open. Use simpler method */
- if (read_data(devc) == MPU_ACK)
- ok = 1;
- }
- }
- }
- if (!ok)
- {
- spin_unlock_irqrestore(&devc->lock,flags);
- return -EIO;
- }
- if (cmd->nr_args)
- {
- for (i = 0; i < cmd->nr_args; i++)
- {
- for (timeout = 3000; timeout > 0 && !output_ready(devc); timeout--);
-
- if (!mpu401_out(dev, cmd->data[i]))
- {
- spin_unlock_irqrestore(&devc->lock,flags);
- printk(KERN_WARNING "mpu401: Command (0x%x), parm send failed.\n", (int) cmd->cmd);
- return -EIO;
- }
- }
- }
- cmd->data[0] = 0;
-
- if (cmd->nr_returns)
- {
- for (i = 0; i < cmd->nr_returns; i++)
- {
- ok = 0;
- for (timeout = 5000; timeout > 0 && !ok; timeout--)
- if (input_avail(devc))
- {
- cmd->data[i] = read_data(devc);
- ok = 1;
- }
- if (!ok)
- {
- spin_unlock_irqrestore(&devc->lock,flags);
- return -EIO;
- }
- }
- }
- spin_unlock_irqrestore(&devc->lock,flags);
- return 0;
-}
-
-static int mpu_cmd(int dev, int cmd, int data)
-{
- int ret;
-
- static mpu_command_rec rec;
-
- rec.cmd = cmd & 0xff;
- rec.nr_args = ((cmd & 0xf0) == 0xE0);
- rec.nr_returns = ((cmd & 0xf0) == 0xA0);
- rec.data[0] = data & 0xff;
-
- if ((ret = mpu401_command(dev, &rec)) < 0)
- return ret;
- return (unsigned char) rec.data[0];
-}
-
-static int mpu401_prefix_cmd(int dev, unsigned char status)
-{
- struct mpu_config *devc = &dev_conf[dev];
-
- if (devc->uart_mode)
- return 1;
-
- if (status < 0xf0)
- {
- if (mpu_cmd(dev, 0xD0, 0) < 0)
- return 0;
- return 1;
- }
- switch (status)
- {
- case 0xF0:
- if (mpu_cmd(dev, 0xDF, 0) < 0)
- return 0;
- return 1;
-
- default:
- return 0;
- }
-}
-
-static int mpu401_start_read(int dev)
-{
- return 0;
-}
-
-static int mpu401_end_read(int dev)
-{
- return 0;
-}
-
-static int mpu401_ioctl(int dev, unsigned cmd, void __user *arg)
-{
- struct mpu_config *devc;
- mpu_command_rec rec;
- int val, ret;
-
- devc = &dev_conf[dev];
- switch (cmd)
- {
- case SNDCTL_MIDI_MPUMODE:
- if (!(devc->capabilities & MPU_CAP_INTLG)) { /* No intelligent mode */
- printk(KERN_WARNING "mpu401: Intelligent mode not supported by the HW\n");
- return -EINVAL;
- }
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- set_uart_mode(dev, devc, !val);
- return 0;
-
- case SNDCTL_MIDI_MPUCMD:
- if (copy_from_user(&rec, arg, sizeof(rec)))
- return -EFAULT;
- if ((ret = mpu401_command(dev, &rec)) < 0)
- return ret;
- if (copy_to_user(arg, &rec, sizeof(rec)))
- return -EFAULT;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static void mpu401_kick(int dev)
-{
-}
-
-static int mpu401_buffer_status(int dev)
-{
- return 0; /*
- * No data in buffers
- */
-}
-
-static int mpu_synth_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- int midi_dev;
- struct mpu_config *devc;
-
- midi_dev = synth_devs[dev]->midi_dev;
-
- if (midi_dev < 0 || midi_dev >= num_midis || midi_devs[midi_dev] == NULL)
- return -ENXIO;
-
- devc = &dev_conf[midi_dev];
-
- switch (cmd)
- {
-
- case SNDCTL_SYNTH_INFO:
- if (copy_to_user(arg, &mpu_synth_info[midi_dev],
- sizeof(struct synth_info)))
- return -EFAULT;
- return 0;
-
- case SNDCTL_SYNTH_MEMAVL:
- return 0x7fffffff;
-
- default:
- return -EINVAL;
- }
-}
-
-static int mpu_synth_open(int dev, int mode)
-{
- int midi_dev, err;
- struct mpu_config *devc;
- struct coproc_operations *coprocessor;
-
- midi_dev = synth_devs[dev]->midi_dev;
-
- if (midi_dev < 0 || midi_dev > num_midis || midi_devs[midi_dev] == NULL)
- return -ENXIO;
-
- devc = &dev_conf[midi_dev];
-
- /*
- * Verify that the device is really running.
- * Some devices (such as Ensoniq SoundScape don't
- * work before the on board processor (OBP) is initialized
- * by downloading its microcode.
- */
-
- if (!devc->initialized)
- {
- if (mpu401_status(devc) == 0xff) /* Bus float */
- {
- printk(KERN_ERR "mpu401: Device not initialized properly\n");
- return -EIO;
- }
- reset_mpu401(devc);
- }
- if (devc->opened)
- return -EBUSY;
- devc->mode = MODE_SYNTH;
- devc->synthno = dev;
-
- devc->inputintr = NULL;
-
- coprocessor = midi_devs[midi_dev]->coproc;
- if (coprocessor) {
- if (!try_module_get(coprocessor->owner))
- return -ENODEV;
-
- if ((err = coprocessor->open(coprocessor->devc, COPR_MIDI)) < 0)
- {
- printk(KERN_WARNING "mpu401: Can't access coprocessor device\n");
- return err;
- }
- }
- devc->opened = mode;
- reset_mpu401(devc);
-
- if (mode & OPEN_READ)
- {
- mpu_cmd(midi_dev, 0x8B, 0); /* Enable data in stop mode */
- mpu_cmd(midi_dev, 0x34, 0); /* Return timing bytes in stop mode */
- mpu_cmd(midi_dev, 0x87, 0); /* Enable pitch & controller */
- }
- return 0;
-}
-
-static void mpu_synth_close(int dev)
-{
- int midi_dev;
- struct mpu_config *devc;
- struct coproc_operations *coprocessor;
-
- midi_dev = synth_devs[dev]->midi_dev;
-
- devc = &dev_conf[midi_dev];
- mpu_cmd(midi_dev, 0x15, 0); /* Stop recording, playback and MIDI */
- mpu_cmd(midi_dev, 0x8a, 0); /* Disable data in stopped mode */
-
- devc->inputintr = NULL;
-
- coprocessor = midi_devs[midi_dev]->coproc;
- if (coprocessor) {
- coprocessor->close(coprocessor->devc, COPR_MIDI);
- module_put(coprocessor->owner);
- }
- devc->opened = 0;
- devc->mode = 0;
-}
-
-#define MIDI_SYNTH_NAME "MPU-401 UART Midi"
-#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
-#include "midi_synth.h"
-
-static struct synth_operations mpu401_synth_proto =
-{
- .owner = THIS_MODULE,
- .id = "MPU401",
- .info = NULL,
- .midi_dev = 0,
- .synth_type = SYNTH_TYPE_MIDI,
- .synth_subtype = 0,
- .open = mpu_synth_open,
- .close = mpu_synth_close,
- .ioctl = mpu_synth_ioctl,
- .kill_note = midi_synth_kill_note,
- .start_note = midi_synth_start_note,
- .set_instr = midi_synth_set_instr,
- .reset = midi_synth_reset,
- .hw_control = midi_synth_hw_control,
- .load_patch = midi_synth_load_patch,
- .aftertouch = midi_synth_aftertouch,
- .controller = midi_synth_controller,
- .panning = midi_synth_panning,
- .bender = midi_synth_bender,
- .setup_voice = midi_synth_setup_voice,
- .send_sysex = midi_synth_send_sysex
-};
-
-static struct synth_operations *mpu401_synth_operations[MAX_MIDI_DEV];
-
-static struct midi_operations mpu401_midi_proto =
-{
- .owner = THIS_MODULE,
- .info = {"MPU-401 Midi", 0, MIDI_CAP_MPU401, SNDCARD_MPU401},
- .in_info = {0},
- .open = mpu401_open,
- .close = mpu401_close,
- .ioctl = mpu401_ioctl,
- .outputc = mpu401_out,
- .start_read = mpu401_start_read,
- .end_read = mpu401_end_read,
- .kick = mpu401_kick,
- .buffer_status = mpu401_buffer_status,
- .prefix_cmd = mpu401_prefix_cmd
-};
-
-static struct midi_operations mpu401_midi_operations[MAX_MIDI_DEV];
-
-static void mpu401_chk_version(int n, struct mpu_config *devc)
-{
- int tmp;
-
- devc->version = devc->revision = 0;
-
- tmp = mpu_cmd(n, 0xAC, 0);
- if (tmp < 0)
- return;
- if ((tmp & 0xf0) > 0x20) /* Why it's larger than 2.x ??? */
- return;
- devc->version = tmp;
-
- if ((tmp = mpu_cmd(n, 0xAD, 0)) < 0) {
- devc->version = 0;
- return;
- }
- devc->revision = tmp;
-}
-
-int attach_mpu401(struct address_info *hw_config, struct module *owner)
-{
- unsigned long flags;
- char revision_char;
-
- int m, ret;
- struct mpu_config *devc;
-
- hw_config->slots[1] = -1;
- m = sound_alloc_mididev();
- if (m == -1)
- {
- printk(KERN_WARNING "MPU-401: Too many midi devices detected\n");
- ret = -ENOMEM;
- goto out_err;
- }
- devc = &dev_conf[m];
- devc->base = hw_config->io_base;
- devc->osp = hw_config->osp;
- devc->irq = hw_config->irq;
- devc->opened = 0;
- devc->uart_mode = 0;
- devc->initialized = 0;
- devc->version = 0;
- devc->revision = 0;
- devc->capabilities = 0;
- devc->timer_flag = 0;
- devc->m_busy = 0;
- devc->m_state = ST_INIT;
- devc->shared_irq = hw_config->always_detect;
- spin_lock_init(&devc->lock);
-
- if (devc->irq < 0)
- {
- devc->irq *= -1;
- devc->shared_irq = 1;
- }
-
- if (!hw_config->always_detect)
- {
- /* Verify the hardware again */
- if (!reset_mpu401(devc))
- {
- printk(KERN_WARNING "mpu401: Device didn't respond\n");
- ret = -ENODEV;
- goto out_mididev;
- }
- if (!devc->shared_irq)
- {
- if (request_irq(devc->irq, mpuintr, 0, "mpu401",
- hw_config) < 0)
- {
- printk(KERN_WARNING "mpu401: Failed to allocate IRQ%d\n", devc->irq);
- ret = -ENOMEM;
- goto out_mididev;
- }
- }
- spin_lock_irqsave(&devc->lock,flags);
- mpu401_chk_version(m, devc);
- if (devc->version == 0)
- mpu401_chk_version(m, devc);
- spin_unlock_irqrestore(&devc->lock, flags);
- }
-
- if (devc->version != 0)
- if (mpu_cmd(m, 0xC5, 0) >= 0) /* Set timebase OK */
- if (mpu_cmd(m, 0xE0, 120) >= 0) /* Set tempo OK */
- devc->capabilities |= MPU_CAP_INTLG; /* Supports intelligent mode */
-
-
- mpu401_synth_operations[m] = kmalloc(sizeof(struct synth_operations), GFP_KERNEL);
-
- if (mpu401_synth_operations[m] == NULL)
- {
- printk(KERN_ERR "mpu401: Can't allocate memory\n");
- ret = -ENOMEM;
- goto out_irq;
- }
- if (!(devc->capabilities & MPU_CAP_INTLG)) /* No intelligent mode */
- {
- memcpy((char *) mpu401_synth_operations[m],
- (char *) &std_midi_synth,
- sizeof(struct synth_operations));
- }
- else
- {
- memcpy((char *) mpu401_synth_operations[m],
- (char *) &mpu401_synth_proto,
- sizeof(struct synth_operations));
- }
- if (owner)
- mpu401_synth_operations[m]->owner = owner;
-
- memcpy((char *) &mpu401_midi_operations[m],
- (char *) &mpu401_midi_proto,
- sizeof(struct midi_operations));
-
- mpu401_midi_operations[m].converter = mpu401_synth_operations[m];
-
- memcpy((char *) &mpu_synth_info[m],
- (char *) &mpu_synth_info_proto,
- sizeof(struct synth_info));
-
- n_mpu_devs++;
-
- if (devc->version == 0x20 && devc->revision >= 0x07) /* MusicQuest interface */
- {
- int ports = (devc->revision & 0x08) ? 32 : 16;
-
- devc->capabilities |= MPU_CAP_SYNC | MPU_CAP_SMPTE |
- MPU_CAP_CLS | MPU_CAP_2PORT;
-
- revision_char = (devc->revision == 0x7f) ? 'M' : ' ';
- sprintf(mpu_synth_info[m].name, "MQX-%d%c MIDI Interface #%d",
- ports,
- revision_char,
- n_mpu_devs);
- }
- else
- {
- revision_char = devc->revision ? devc->revision + '@' : ' ';
- if ((int) devc->revision > ('Z' - '@'))
- revision_char = '+';
-
- devc->capabilities |= MPU_CAP_SYNC | MPU_CAP_FSK;
-
- if (hw_config->name)
- sprintf(mpu_synth_info[m].name, "%s (MPU401)", hw_config->name);
- else
- sprintf(mpu_synth_info[m].name,
- "MPU-401 %d.%d%c MIDI #%d",
- (int) (devc->version & 0xf0) >> 4,
- devc->version & 0x0f,
- revision_char,
- n_mpu_devs);
- }
-
- strcpy(mpu401_midi_operations[m].info.name,
- mpu_synth_info[m].name);
-
- conf_printf(mpu_synth_info[m].name, hw_config);
-
- mpu401_synth_operations[m]->midi_dev = devc->devno = m;
- mpu401_synth_operations[devc->devno]->info = &mpu_synth_info[devc->devno];
-
- if (devc->capabilities & MPU_CAP_INTLG) /* Intelligent mode */
- hw_config->slots[2] = mpu_timer_init(m);
-
- midi_devs[m] = &mpu401_midi_operations[devc->devno];
-
- if (owner)
- midi_devs[m]->owner = owner;
-
- hw_config->slots[1] = m;
- sequencer_init();
-
- return 0;
-
-out_irq:
- free_irq(devc->irq, hw_config);
-out_mididev:
- sound_unload_mididev(m);
-out_err:
- release_region(hw_config->io_base, 2);
- return ret;
-}
-
-static int reset_mpu401(struct mpu_config *devc)
-{
- unsigned long flags;
- int ok, timeout, n;
- int timeout_limit;
-
- /*
- * Send the RESET command. Try again if no success at the first time.
- * (If the device is in the UART mode, it will not ack the reset cmd).
- */
-
- ok = 0;
-
- timeout_limit = devc->initialized ? 30000 : 100000;
- devc->initialized = 1;
-
- for (n = 0; n < 2 && !ok; n++)
- {
- for (timeout = timeout_limit; timeout > 0 && !ok; timeout--)
- ok = output_ready(devc);
-
- write_command(devc, MPU_RESET); /*
- * Send MPU-401 RESET Command
- */
-
- /*
- * Wait at least 25 msec. This method is not accurate so let's make the
- * loop bit longer. Cannot sleep since this is called during boot.
- */
-
- for (timeout = timeout_limit * 2; timeout > 0 && !ok; timeout--)
- {
- spin_lock_irqsave(&devc->lock,flags);
- if (input_avail(devc))
- if (read_data(devc) == MPU_ACK)
- ok = 1;
- spin_unlock_irqrestore(&devc->lock,flags);
- }
-
- }
-
- devc->m_state = ST_INIT;
- devc->m_ptr = 0;
- devc->m_left = 0;
- devc->last_status = 0;
- devc->uart_mode = 0;
-
- return ok;
-}
-
-static void set_uart_mode(int dev, struct mpu_config *devc, int arg)
-{
- if (!arg && (devc->capabilities & MPU_CAP_INTLG))
- return;
- if ((devc->uart_mode == 0) == (arg == 0))
- return; /* Already set */
- reset_mpu401(devc); /* This exits the uart mode */
-
- if (arg)
- {
- if (mpu_cmd(dev, UART_MODE_ON, 0) < 0)
- {
- printk(KERN_ERR "mpu401: Can't enter UART mode\n");
- devc->uart_mode = 0;
- return;
- }
- }
- devc->uart_mode = arg;
-
-}
-
-int probe_mpu401(struct address_info *hw_config, struct resource *ports)
-{
- int ok = 0;
- struct mpu_config tmp_devc;
-
- tmp_devc.base = hw_config->io_base;
- tmp_devc.irq = hw_config->irq;
- tmp_devc.initialized = 0;
- tmp_devc.opened = 0;
- tmp_devc.osp = hw_config->osp;
-
- if (hw_config->always_detect)
- return 1;
-
- if (inb(hw_config->io_base + 1) == 0xff)
- {
- DDB(printk("MPU401: Port %x looks dead.\n", hw_config->io_base));
- return 0; /* Just bus float? */
- }
- ok = reset_mpu401(&tmp_devc);
-
- if (!ok)
- {
- DDB(printk("MPU401: Reset failed on port %x\n", hw_config->io_base));
- }
- return ok;
-}
-
-void unload_mpu401(struct address_info *hw_config)
-{
- void *p;
- int n=hw_config->slots[1];
-
- if (n != -1) {
- release_region(hw_config->io_base, 2);
- if (hw_config->always_detect == 0 && hw_config->irq > 0)
- free_irq(hw_config->irq, hw_config);
- p=mpu401_synth_operations[n];
- sound_unload_mididev(n);
- sound_unload_timerdev(hw_config->slots[2]);
- kfree(p);
- }
-}
-
-/*****************************************************
- * Timer stuff
- ****************************************************/
-
-static volatile int timer_initialized = 0, timer_open = 0, tmr_running = 0;
-static volatile int curr_tempo, curr_timebase, hw_timebase;
-static int max_timebase = 8; /* 8*24=192 ppqn */
-static volatile unsigned long next_event_time;
-static volatile unsigned long curr_ticks, curr_clocks;
-static unsigned long prev_event_time;
-static int metronome_mode;
-
-static unsigned long clocks2ticks(unsigned long clocks)
-{
- /*
- * The MPU-401 supports just a limited set of possible timebase values.
- * Since the applications require more choices, the driver has to
- * program the HW to do its best and to convert between the HW and
- * actual timebases.
- */
- return ((clocks * curr_timebase) + (hw_timebase / 2)) / hw_timebase;
-}
-
-static void set_timebase(int midi_dev, int val)
-{
- int hw_val;
-
- if (val < 48)
- val = 48;
- if (val > 1000)
- val = 1000;
-
- hw_val = val;
- hw_val = (hw_val + 12) / 24;
- if (hw_val > max_timebase)
- hw_val = max_timebase;
-
- if (mpu_cmd(midi_dev, 0xC0 | (hw_val & 0x0f), 0) < 0)
- {
- printk(KERN_WARNING "mpu401: Can't set HW timebase to %d\n", hw_val * 24);
- return;
- }
- hw_timebase = hw_val * 24;
- curr_timebase = val;
-
-}
-
-static void tmr_reset(struct mpu_config *devc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock,flags);
- next_event_time = (unsigned long) -1;
- prev_event_time = 0;
- curr_ticks = curr_clocks = 0;
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static void set_timer_mode(int midi_dev)
-{
- if (timer_mode & TMR_MODE_CLS)
- mpu_cmd(midi_dev, 0x3c, 0); /* Use CLS sync */
- else if (timer_mode & TMR_MODE_SMPTE)
- mpu_cmd(midi_dev, 0x3d, 0); /* Use SMPTE sync */
-
- if (timer_mode & TMR_INTERNAL)
- {
- mpu_cmd(midi_dev, 0x80, 0); /* Use MIDI sync */
- }
- else
- {
- if (timer_mode & (TMR_MODE_MIDI | TMR_MODE_CLS))
- {
- mpu_cmd(midi_dev, 0x82, 0); /* Use MIDI sync */
- mpu_cmd(midi_dev, 0x91, 0); /* Enable ext MIDI ctrl */
- }
- else if (timer_mode & TMR_MODE_FSK)
- mpu_cmd(midi_dev, 0x81, 0); /* Use FSK sync */
- }
-}
-
-static void stop_metronome(int midi_dev)
-{
- mpu_cmd(midi_dev, 0x84, 0); /* Disable metronome */
-}
-
-static void setup_metronome(int midi_dev)
-{
- int numerator, denominator;
- int clks_per_click, num_32nds_per_beat;
- int beats_per_measure;
-
- numerator = ((unsigned) metronome_mode >> 24) & 0xff;
- denominator = ((unsigned) metronome_mode >> 16) & 0xff;
- clks_per_click = ((unsigned) metronome_mode >> 8) & 0xff;
- num_32nds_per_beat = (unsigned) metronome_mode & 0xff;
- beats_per_measure = (numerator * 4) >> denominator;
-
- if (!metronome_mode)
- mpu_cmd(midi_dev, 0x84, 0); /* Disable metronome */
- else
- {
- mpu_cmd(midi_dev, 0xE4, clks_per_click);
- mpu_cmd(midi_dev, 0xE6, beats_per_measure);
- mpu_cmd(midi_dev, 0x83, 0); /* Enable metronome without accents */
- }
-}
-
-static int mpu_start_timer(int midi_dev)
-{
- struct mpu_config *devc= &dev_conf[midi_dev];
-
- tmr_reset(devc);
- set_timer_mode(midi_dev);
-
- if (tmr_running)
- return TIMER_NOT_ARMED; /* Already running */
-
- if (timer_mode & TMR_INTERNAL)
- {
- mpu_cmd(midi_dev, 0x02, 0); /* Send MIDI start */
- tmr_running = 1;
- return TIMER_NOT_ARMED;
- }
- else
- {
- mpu_cmd(midi_dev, 0x35, 0); /* Enable mode messages to PC */
- mpu_cmd(midi_dev, 0x38, 0); /* Enable sys common messages to PC */
- mpu_cmd(midi_dev, 0x39, 0); /* Enable real time messages to PC */
- mpu_cmd(midi_dev, 0x97, 0); /* Enable system exclusive messages to PC */
- }
- return TIMER_ARMED;
-}
-
-static int mpu_timer_open(int dev, int mode)
-{
- int midi_dev = sound_timer_devs[dev]->devlink;
- struct mpu_config *devc= &dev_conf[midi_dev];
-
- if (timer_open)
- return -EBUSY;
-
- tmr_reset(devc);
- curr_tempo = 50;
- mpu_cmd(midi_dev, 0xE0, 50);
- curr_timebase = hw_timebase = 120;
- set_timebase(midi_dev, 120);
- timer_open = 1;
- metronome_mode = 0;
- set_timer_mode(midi_dev);
-
- mpu_cmd(midi_dev, 0xe7, 0x04); /* Send all clocks to host */
- mpu_cmd(midi_dev, 0x95, 0); /* Enable clock to host */
-
- return 0;
-}
-
-static void mpu_timer_close(int dev)
-{
- int midi_dev = sound_timer_devs[dev]->devlink;
-
- timer_open = tmr_running = 0;
- mpu_cmd(midi_dev, 0x15, 0); /* Stop all */
- mpu_cmd(midi_dev, 0x94, 0); /* Disable clock to host */
- mpu_cmd(midi_dev, 0x8c, 0); /* Disable measure end messages to host */
- stop_metronome(midi_dev);
-}
-
-static int mpu_timer_event(int dev, unsigned char *event)
-{
- unsigned char command = event[1];
- unsigned long parm = *(unsigned int *) &event[4];
- int midi_dev = sound_timer_devs[dev]->devlink;
-
- switch (command)
- {
- case TMR_WAIT_REL:
- parm += prev_event_time;
- case TMR_WAIT_ABS:
- if (parm > 0)
- {
- long time;
-
- if (parm <= curr_ticks) /* It's the time */
- return TIMER_NOT_ARMED;
- time = parm;
- next_event_time = prev_event_time = time;
-
- return TIMER_ARMED;
- }
- break;
-
- case TMR_START:
- if (tmr_running)
- break;
- return mpu_start_timer(midi_dev);
-
- case TMR_STOP:
- mpu_cmd(midi_dev, 0x01, 0); /* Send MIDI stop */
- stop_metronome(midi_dev);
- tmr_running = 0;
- break;
-
- case TMR_CONTINUE:
- if (tmr_running)
- break;
- mpu_cmd(midi_dev, 0x03, 0); /* Send MIDI continue */
- setup_metronome(midi_dev);
- tmr_running = 1;
- break;
-
- case TMR_TEMPO:
- if (parm)
- {
- if (parm < 8)
- parm = 8;
- if (parm > 250)
- parm = 250;
- if (mpu_cmd(midi_dev, 0xE0, parm) < 0)
- printk(KERN_WARNING "mpu401: Can't set tempo to %d\n", (int) parm);
- curr_tempo = parm;
- }
- break;
-
- case TMR_ECHO:
- seq_copy_to_input(event, 8);
- break;
-
- case TMR_TIMESIG:
- if (metronome_mode) /* Metronome enabled */
- {
- metronome_mode = parm;
- setup_metronome(midi_dev);
- }
- break;
-
- default:;
- }
- return TIMER_NOT_ARMED;
-}
-
-static unsigned long mpu_timer_get_time(int dev)
-{
- if (!timer_open)
- return 0;
-
- return curr_ticks;
-}
-
-static int mpu_timer_ioctl(int dev, unsigned int command, void __user *arg)
-{
- int midi_dev = sound_timer_devs[dev]->devlink;
- int __user *p = (int __user *)arg;
-
- switch (command)
- {
- case SNDCTL_TMR_SOURCE:
- {
- int parm;
-
- if (get_user(parm, p))
- return -EFAULT;
- parm &= timer_caps;
-
- if (parm != 0)
- {
- timer_mode = parm;
-
- if (timer_mode & TMR_MODE_CLS)
- mpu_cmd(midi_dev, 0x3c, 0); /* Use CLS sync */
- else if (timer_mode & TMR_MODE_SMPTE)
- mpu_cmd(midi_dev, 0x3d, 0); /* Use SMPTE sync */
- }
- if (put_user(timer_mode, p))
- return -EFAULT;
- return timer_mode;
- }
- break;
-
- case SNDCTL_TMR_START:
- mpu_start_timer(midi_dev);
- return 0;
-
- case SNDCTL_TMR_STOP:
- tmr_running = 0;
- mpu_cmd(midi_dev, 0x01, 0); /* Send MIDI stop */
- stop_metronome(midi_dev);
- return 0;
-
- case SNDCTL_TMR_CONTINUE:
- if (tmr_running)
- return 0;
- tmr_running = 1;
- mpu_cmd(midi_dev, 0x03, 0); /* Send MIDI continue */
- return 0;
-
- case SNDCTL_TMR_TIMEBASE:
- {
- int val;
- if (get_user(val, p))
- return -EFAULT;
- if (val)
- set_timebase(midi_dev, val);
- if (put_user(curr_timebase, p))
- return -EFAULT;
- return curr_timebase;
- }
- break;
-
- case SNDCTL_TMR_TEMPO:
- {
- int val;
- int ret;
-
- if (get_user(val, p))
- return -EFAULT;
-
- if (val)
- {
- if (val < 8)
- val = 8;
- if (val > 250)
- val = 250;
- if ((ret = mpu_cmd(midi_dev, 0xE0, val)) < 0)
- {
- printk(KERN_WARNING "mpu401: Can't set tempo to %d\n", (int) val);
- return ret;
- }
- curr_tempo = val;
- }
- if (put_user(curr_tempo, p))
- return -EFAULT;
- return curr_tempo;
- }
- break;
-
- case SNDCTL_SEQ_CTRLRATE:
- {
- int val;
- if (get_user(val, p))
- return -EFAULT;
-
- if (val != 0) /* Can't change */
- return -EINVAL;
- val = ((curr_tempo * curr_timebase) + 30)/60;
- if (put_user(val, p))
- return -EFAULT;
- return val;
- }
- break;
-
- case SNDCTL_SEQ_GETTIME:
- if (put_user(curr_ticks, p))
- return -EFAULT;
- return curr_ticks;
-
- case SNDCTL_TMR_METRONOME:
- if (get_user(metronome_mode, p))
- return -EFAULT;
- setup_metronome(midi_dev);
- return 0;
-
- default:;
- }
- return -EINVAL;
-}
-
-static void mpu_timer_arm(int dev, long time)
-{
- if (time < 0)
- time = curr_ticks + 1;
- else if (time <= curr_ticks) /* It's the time */
- return;
- next_event_time = prev_event_time = time;
- return;
-}
-
-static struct sound_timer_operations mpu_timer =
-{
- .owner = THIS_MODULE,
- .info = {"MPU-401 Timer", 0},
- .priority = 10, /* Priority */
- .devlink = 0, /* Local device link */
- .open = mpu_timer_open,
- .close = mpu_timer_close,
- .event = mpu_timer_event,
- .get_time = mpu_timer_get_time,
- .ioctl = mpu_timer_ioctl,
- .arm_timer = mpu_timer_arm
-};
-
-static void mpu_timer_interrupt(void)
-{
- if (!timer_open)
- return;
-
- if (!tmr_running)
- return;
-
- curr_clocks++;
- curr_ticks = clocks2ticks(curr_clocks);
-
- if (curr_ticks >= next_event_time)
- {
- next_event_time = (unsigned long) -1;
- sequencer_timer(0);
- }
-}
-
-static void timer_ext_event(struct mpu_config *devc, int event, int parm)
-{
- int midi_dev = devc->devno;
-
- if (!devc->timer_flag)
- return;
-
- switch (event)
- {
- case TMR_CLOCK:
- printk("<MIDI clk>");
- break;
-
- case TMR_START:
- printk("Ext MIDI start\n");
- if (!tmr_running)
- {
- if (timer_mode & TMR_EXTERNAL)
- {
- tmr_running = 1;
- setup_metronome(midi_dev);
- next_event_time = 0;
- STORE(SEQ_START_TIMER());
- }
- }
- break;
-
- case TMR_STOP:
- printk("Ext MIDI stop\n");
- if (timer_mode & TMR_EXTERNAL)
- {
- tmr_running = 0;
- stop_metronome(midi_dev);
- STORE(SEQ_STOP_TIMER());
- }
- break;
-
- case TMR_CONTINUE:
- printk("Ext MIDI continue\n");
- if (timer_mode & TMR_EXTERNAL)
- {
- tmr_running = 1;
- setup_metronome(midi_dev);
- STORE(SEQ_CONTINUE_TIMER());
- }
- break;
-
- case TMR_SPP:
- printk("Songpos: %d\n", parm);
- if (timer_mode & TMR_EXTERNAL)
- {
- STORE(SEQ_SONGPOS(parm));
- }
- break;
- }
-}
-
-static int mpu_timer_init(int midi_dev)
-{
- struct mpu_config *devc;
- int n;
-
- devc = &dev_conf[midi_dev];
-
- if (timer_initialized)
- return -1; /* There is already a similar timer */
-
- timer_initialized = 1;
-
- mpu_timer.devlink = midi_dev;
- dev_conf[midi_dev].timer_flag = 1;
-
- n = sound_alloc_timerdev();
- if (n == -1)
- n = 0;
- sound_timer_devs[n] = &mpu_timer;
-
- if (devc->version < 0x20) /* Original MPU-401 */
- timer_caps = TMR_INTERNAL | TMR_EXTERNAL | TMR_MODE_FSK | TMR_MODE_MIDI;
- else
- {
- /*
- * The version number 2.0 is used (at least) by the
- * MusicQuest cards and the Roland Super-MPU.
- *
- * MusicQuest has given a special meaning to the bits of the
- * revision number. The Super-MPU returns 0.
- */
-
- if (devc->revision)
- timer_caps |= TMR_EXTERNAL | TMR_MODE_MIDI;
-
- if (devc->revision & 0x02)
- timer_caps |= TMR_MODE_CLS;
-
-
- if (devc->revision & 0x40)
- max_timebase = 10; /* Has the 216 and 240 ppqn modes */
- }
-
- timer_mode = (TMR_INTERNAL | TMR_MODE_MIDI) & timer_caps;
- return n;
-
-}
-
-EXPORT_SYMBOL(probe_mpu401);
-EXPORT_SYMBOL(attach_mpu401);
-EXPORT_SYMBOL(unload_mpu401);
-
-static struct address_info cfg;
-
-static int io = -1;
-static int irq = -1;
-
-module_param_hw(irq, int, irq, 0);
-module_param_hw(io, int, ioport, 0);
-
-static int __init init_mpu401(void)
-{
- int ret;
- /* Can be loaded either for module use or to provide functions
- to others */
- if (io != -1 && irq != -1) {
- struct resource *ports;
- cfg.irq = irq;
- cfg.io_base = io;
- ports = request_region(io, 2, "mpu401");
- if (!ports)
- return -EBUSY;
- if (probe_mpu401(&cfg, ports) == 0) {
- release_region(io, 2);
- return -ENODEV;
- }
- if ((ret = attach_mpu401(&cfg, THIS_MODULE)))
- return ret;
- }
-
- return 0;
-}
-
-static void __exit cleanup_mpu401(void)
-{
- if (io != -1 && irq != -1) {
- /* Check for use by, for example, sscape driver */
- unload_mpu401(&cfg);
- }
-}
-
-module_init(init_mpu401);
-module_exit(cleanup_mpu401);
-
-#ifndef MODULE
-static int __init setup_mpu401(char *str)
-{
- /* io, irq */
- int ints[3];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
-
- return 1;
-}
-
-__setup("mpu401=", setup_mpu401);
-#endif
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/mpu401.h b/sound/oss/mpu401.h
deleted file mode 100644
index 6beb8c2ae405..000000000000
--- a/sound/oss/mpu401.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* From uart401.c */
-int probe_uart401 (struct address_info *hw_config, struct module *owner);
-void unload_uart401 (struct address_info *hw_config);
-
-irqreturn_t uart401intr (int irq, void *dev_id);
-
-/* From mpu401.c */
-int probe_mpu401(struct address_info *hw_config, struct resource *ports);
-int attach_mpu401(struct address_info * hw_config, struct module *owner);
-void unload_mpu401(struct address_info *hw_info);
diff --git a/sound/oss/msnd.c b/sound/oss/msnd.c
deleted file mode 100644
index b63010ad22f1..000000000000
--- a/sound/oss/msnd.c
+++ /dev/null
@@ -1,413 +0,0 @@
-/*********************************************************************
- *
- * msnd.c - Driver Base
- *
- * Turtle Beach MultiSound Sound Card Driver for Linux
- *
- * Copyright (C) 1998 Andrew Veliath
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ********************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/vmalloc.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/spinlock.h>
-#include <asm/irq.h>
-#include "msnd.h"
-
-#define LOGNAME "msnd"
-
-#define MSND_MAX_DEVS 4
-
-static multisound_dev_t *devs[MSND_MAX_DEVS];
-static int num_devs;
-
-int msnd_register(multisound_dev_t *dev)
-{
- int i;
-
- for (i = 0; i < MSND_MAX_DEVS; ++i)
- if (devs[i] == NULL)
- break;
-
- if (i == MSND_MAX_DEVS)
- return -ENOMEM;
-
- devs[i] = dev;
- ++num_devs;
- return 0;
-}
-
-void msnd_unregister(multisound_dev_t *dev)
-{
- int i;
-
- for (i = 0; i < MSND_MAX_DEVS; ++i)
- if (devs[i] == dev)
- break;
-
- if (i == MSND_MAX_DEVS) {
- printk(KERN_WARNING LOGNAME ": Unregistering unknown device\n");
- return;
- }
-
- devs[i] = NULL;
- --num_devs;
-}
-
-void msnd_init_queue(void __iomem *base, int start, int size)
-{
- writew(PCTODSP_BASED(start), base + JQS_wStart);
- writew(PCTODSP_OFFSET(size) - 1, base + JQS_wSize);
- writew(0, base + JQS_wHead);
- writew(0, base + JQS_wTail);
-}
-
-void msnd_fifo_init(msnd_fifo *f)
-{
- f->data = NULL;
-}
-
-void msnd_fifo_free(msnd_fifo *f)
-{
- vfree(f->data);
- f->data = NULL;
-}
-
-int msnd_fifo_alloc(msnd_fifo *f, size_t n)
-{
- msnd_fifo_free(f);
- f->data = vmalloc(n);
- f->n = n;
- f->tail = 0;
- f->head = 0;
- f->len = 0;
-
- if (!f->data)
- return -ENOMEM;
-
- return 0;
-}
-
-void msnd_fifo_make_empty(msnd_fifo *f)
-{
- f->len = f->tail = f->head = 0;
-}
-
-int msnd_fifo_write_io(msnd_fifo *f, char __iomem *buf, size_t len)
-{
- int count = 0;
-
- while ((count < len) && (f->len != f->n)) {
-
- int nwritten;
-
- if (f->head <= f->tail) {
- nwritten = len - count;
- if (nwritten > f->n - f->tail)
- nwritten = f->n - f->tail;
- }
- else {
- nwritten = f->head - f->tail;
- if (nwritten > len - count)
- nwritten = len - count;
- }
-
- memcpy_fromio(f->data + f->tail, buf, nwritten);
-
- count += nwritten;
- buf += nwritten;
- f->len += nwritten;
- f->tail += nwritten;
- f->tail %= f->n;
- }
-
- return count;
-}
-
-int msnd_fifo_write(msnd_fifo *f, const char *buf, size_t len)
-{
- int count = 0;
-
- while ((count < len) && (f->len != f->n)) {
-
- int nwritten;
-
- if (f->head <= f->tail) {
- nwritten = len - count;
- if (nwritten > f->n - f->tail)
- nwritten = f->n - f->tail;
- }
- else {
- nwritten = f->head - f->tail;
- if (nwritten > len - count)
- nwritten = len - count;
- }
-
- memcpy(f->data + f->tail, buf, nwritten);
-
- count += nwritten;
- buf += nwritten;
- f->len += nwritten;
- f->tail += nwritten;
- f->tail %= f->n;
- }
-
- return count;
-}
-
-int msnd_fifo_read_io(msnd_fifo *f, char __iomem *buf, size_t len)
-{
- int count = 0;
-
- while ((count < len) && (f->len > 0)) {
-
- int nread;
-
- if (f->tail <= f->head) {
- nread = len - count;
- if (nread > f->n - f->head)
- nread = f->n - f->head;
- }
- else {
- nread = f->tail - f->head;
- if (nread > len - count)
- nread = len - count;
- }
-
- memcpy_toio(buf, f->data + f->head, nread);
-
- count += nread;
- buf += nread;
- f->len -= nread;
- f->head += nread;
- f->head %= f->n;
- }
-
- return count;
-}
-
-int msnd_fifo_read(msnd_fifo *f, char *buf, size_t len)
-{
- int count = 0;
-
- while ((count < len) && (f->len > 0)) {
-
- int nread;
-
- if (f->tail <= f->head) {
- nread = len - count;
- if (nread > f->n - f->head)
- nread = f->n - f->head;
- }
- else {
- nread = f->tail - f->head;
- if (nread > len - count)
- nread = len - count;
- }
-
- memcpy(buf, f->data + f->head, nread);
-
- count += nread;
- buf += nread;
- f->len -= nread;
- f->head += nread;
- f->head %= f->n;
- }
-
- return count;
-}
-
-static int msnd_wait_TXDE(multisound_dev_t *dev)
-{
- register unsigned int io = dev->io;
- register int timeout = 1000;
-
- while(timeout-- > 0)
- if (msnd_inb(io + HP_ISR) & HPISR_TXDE)
- return 0;
-
- return -EIO;
-}
-
-static int msnd_wait_HC0(multisound_dev_t *dev)
-{
- register unsigned int io = dev->io;
- register int timeout = 1000;
-
- while(timeout-- > 0)
- if (!(msnd_inb(io + HP_CVR) & HPCVR_HC))
- return 0;
-
- return -EIO;
-}
-
-int msnd_send_dsp_cmd(multisound_dev_t *dev, BYTE cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->lock, flags);
- if (msnd_wait_HC0(dev) == 0) {
- msnd_outb(cmd, dev->io + HP_CVR);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
-
- printk(KERN_DEBUG LOGNAME ": Send DSP command timeout\n");
-
- return -EIO;
-}
-
-int msnd_send_word(multisound_dev_t *dev, unsigned char high,
- unsigned char mid, unsigned char low)
-{
- register unsigned int io = dev->io;
-
- if (msnd_wait_TXDE(dev) == 0) {
- msnd_outb(high, io + HP_TXH);
- msnd_outb(mid, io + HP_TXM);
- msnd_outb(low, io + HP_TXL);
- return 0;
- }
-
- printk(KERN_DEBUG LOGNAME ": Send host word timeout\n");
-
- return -EIO;
-}
-
-int msnd_upload_host(multisound_dev_t *dev, char *bin, int len)
-{
- int i;
-
- if (len % 3 != 0) {
- printk(KERN_WARNING LOGNAME ": Upload host data not multiple of 3!\n");
- return -EINVAL;
- }
-
- for (i = 0; i < len; i += 3)
- if (msnd_send_word(dev, bin[i], bin[i + 1], bin[i + 2]) != 0)
- return -EIO;
-
- msnd_inb(dev->io + HP_RXL);
- msnd_inb(dev->io + HP_CVR);
-
- return 0;
-}
-
-int msnd_enable_irq(multisound_dev_t *dev)
-{
- unsigned long flags;
-
- if (dev->irq_ref++)
- return 0;
-
- printk(KERN_DEBUG LOGNAME ": Enabling IRQ\n");
-
- spin_lock_irqsave(&dev->lock, flags);
- if (msnd_wait_TXDE(dev) == 0) {
- msnd_outb(msnd_inb(dev->io + HP_ICR) | HPICR_TREQ, dev->io + HP_ICR);
- if (dev->type == msndClassic)
- msnd_outb(dev->irqid, dev->io + HP_IRQM);
- msnd_outb(msnd_inb(dev->io + HP_ICR) & ~HPICR_TREQ, dev->io + HP_ICR);
- msnd_outb(msnd_inb(dev->io + HP_ICR) | HPICR_RREQ, dev->io + HP_ICR);
- enable_irq(dev->irq);
- msnd_init_queue(dev->DSPQ, dev->dspq_data_buff, dev->dspq_buff_size);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
-
- printk(KERN_DEBUG LOGNAME ": Enable IRQ failed\n");
-
- return -EIO;
-}
-
-int msnd_disable_irq(multisound_dev_t *dev)
-{
- unsigned long flags;
-
- if (--dev->irq_ref > 0)
- return 0;
-
- if (dev->irq_ref < 0)
- printk(KERN_DEBUG LOGNAME ": IRQ ref count is %d\n", dev->irq_ref);
-
- printk(KERN_DEBUG LOGNAME ": Disabling IRQ\n");
-
- spin_lock_irqsave(&dev->lock, flags);
- if (msnd_wait_TXDE(dev) == 0) {
- msnd_outb(msnd_inb(dev->io + HP_ICR) & ~HPICR_RREQ, dev->io + HP_ICR);
- if (dev->type == msndClassic)
- msnd_outb(HPIRQ_NONE, dev->io + HP_IRQM);
- disable_irq(dev->irq);
- spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
-
- printk(KERN_DEBUG LOGNAME ": Disable IRQ failed\n");
-
- return -EIO;
-}
-
-#ifndef LINUX20
-EXPORT_SYMBOL(msnd_register);
-EXPORT_SYMBOL(msnd_unregister);
-
-EXPORT_SYMBOL(msnd_init_queue);
-
-EXPORT_SYMBOL(msnd_fifo_init);
-EXPORT_SYMBOL(msnd_fifo_free);
-EXPORT_SYMBOL(msnd_fifo_alloc);
-EXPORT_SYMBOL(msnd_fifo_make_empty);
-EXPORT_SYMBOL(msnd_fifo_write_io);
-EXPORT_SYMBOL(msnd_fifo_read_io);
-EXPORT_SYMBOL(msnd_fifo_write);
-EXPORT_SYMBOL(msnd_fifo_read);
-
-EXPORT_SYMBOL(msnd_send_dsp_cmd);
-EXPORT_SYMBOL(msnd_send_word);
-EXPORT_SYMBOL(msnd_upload_host);
-
-EXPORT_SYMBOL(msnd_enable_irq);
-EXPORT_SYMBOL(msnd_disable_irq);
-#endif
-
-#ifdef MODULE
-MODULE_AUTHOR ("Andrew Veliath <andrewtv@usa.net>");
-MODULE_DESCRIPTION ("Turtle Beach MultiSound Driver Base");
-MODULE_LICENSE("GPL");
-
-
-int init_module(void)
-{
- return 0;
-}
-
-void cleanup_module(void)
-{
-}
-#endif
diff --git a/sound/oss/msnd.h b/sound/oss/msnd.h
deleted file mode 100644
index c8be47ec2b7e..000000000000
--- a/sound/oss/msnd.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*********************************************************************
- *
- * msnd.h
- *
- * Turtle Beach MultiSound Sound Card Driver for Linux
- *
- * Some parts of this header file were derived from the Turtle Beach
- * MultiSound Driver Development Kit.
- *
- * Copyright (C) 1998 Andrew Veliath
- * Copyright (C) 1993 Turtle Beach Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ********************************************************************/
-#ifndef __MSND_H
-#define __MSND_H
-
-#define VERSION "0.8.3.1"
-
-#define DEFSAMPLERATE DSP_DEFAULT_SPEED
-#define DEFSAMPLESIZE AFMT_U8
-#define DEFCHANNELS 1
-
-#define DEFFIFOSIZE 128
-
-#define SNDCARD_MSND 38
-
-#define SRAM_BANK_SIZE 0x8000
-#define SRAM_CNTL_START 0x7F00
-
-#define DSP_BASE_ADDR 0x4000
-#define DSP_BANK_BASE 0x4000
-
-#define HP_ICR 0x00
-#define HP_CVR 0x01
-#define HP_ISR 0x02
-#define HP_IVR 0x03
-#define HP_NU 0x04
-#define HP_INFO 0x04
-#define HP_TXH 0x05
-#define HP_RXH 0x05
-#define HP_TXM 0x06
-#define HP_RXM 0x06
-#define HP_TXL 0x07
-#define HP_RXL 0x07
-
-#define HP_ICR_DEF 0x00
-#define HP_CVR_DEF 0x12
-#define HP_ISR_DEF 0x06
-#define HP_IVR_DEF 0x0f
-#define HP_NU_DEF 0x00
-
-#define HP_IRQM 0x09
-
-#define HPR_BLRC 0x08
-#define HPR_SPR1 0x09
-#define HPR_SPR2 0x0A
-#define HPR_TCL0 0x0B
-#define HPR_TCL1 0x0C
-#define HPR_TCL2 0x0D
-#define HPR_TCL3 0x0E
-#define HPR_TCL4 0x0F
-
-#define HPICR_INIT 0x80
-#define HPICR_HM1 0x40
-#define HPICR_HM0 0x20
-#define HPICR_HF1 0x10
-#define HPICR_HF0 0x08
-#define HPICR_TREQ 0x02
-#define HPICR_RREQ 0x01
-
-#define HPCVR_HC 0x80
-
-#define HPISR_HREQ 0x80
-#define HPISR_DMA 0x40
-#define HPISR_HF3 0x10
-#define HPISR_HF2 0x08
-#define HPISR_TRDY 0x04
-#define HPISR_TXDE 0x02
-#define HPISR_RXDF 0x01
-
-#define HPIO_290 0
-#define HPIO_260 1
-#define HPIO_250 2
-#define HPIO_240 3
-#define HPIO_230 4
-#define HPIO_220 5
-#define HPIO_210 6
-#define HPIO_3E0 7
-
-#define HPMEM_NONE 0
-#define HPMEM_B000 1
-#define HPMEM_C800 2
-#define HPMEM_D000 3
-#define HPMEM_D400 4
-#define HPMEM_D800 5
-#define HPMEM_E000 6
-#define HPMEM_E800 7
-
-#define HPIRQ_NONE 0
-#define HPIRQ_5 1
-#define HPIRQ_7 2
-#define HPIRQ_9 3
-#define HPIRQ_10 4
-#define HPIRQ_11 5
-#define HPIRQ_12 6
-#define HPIRQ_15 7
-
-#define HIMT_PLAY_DONE 0x00
-#define HIMT_RECORD_DONE 0x01
-#define HIMT_MIDI_EOS 0x02
-#define HIMT_MIDI_OUT 0x03
-
-#define HIMT_MIDI_IN_UCHAR 0x0E
-#define HIMT_DSP 0x0F
-
-#define HDEX_BASE 0x92
-#define HDEX_PLAY_START (0 + HDEX_BASE)
-#define HDEX_PLAY_STOP (1 + HDEX_BASE)
-#define HDEX_PLAY_PAUSE (2 + HDEX_BASE)
-#define HDEX_PLAY_RESUME (3 + HDEX_BASE)
-#define HDEX_RECORD_START (4 + HDEX_BASE)
-#define HDEX_RECORD_STOP (5 + HDEX_BASE)
-#define HDEX_MIDI_IN_START (6 + HDEX_BASE)
-#define HDEX_MIDI_IN_STOP (7 + HDEX_BASE)
-#define HDEX_MIDI_OUT_START (8 + HDEX_BASE)
-#define HDEX_MIDI_OUT_STOP (9 + HDEX_BASE)
-#define HDEX_AUX_REQ (10 + HDEX_BASE)
-
-#define HIWORD(l) ((WORD)((((DWORD)(l)) >> 16) & 0xFFFF))
-#define LOWORD(l) ((WORD)(DWORD)(l))
-#define HIBYTE(w) ((BYTE)(((WORD)(w) >> 8) & 0xFF))
-#define LOBYTE(w) ((BYTE)(w))
-#define MAKELONG(low,hi) ((long)(((WORD)(low))|(((DWORD)((WORD)(hi)))<<16)))
-#define MAKEWORD(low,hi) ((WORD)(((BYTE)(low))|(((WORD)((BYTE)(hi)))<<8)))
-
-#define PCTODSP_OFFSET(w) (USHORT)((w)/2)
-#define PCTODSP_BASED(w) (USHORT)(((w)/2) + DSP_BASE_ADDR)
-#define DSPTOPC_BASED(w) (((w) - DSP_BASE_ADDR) * 2)
-
-#ifdef SLOWIO
-#define msnd_outb outb_p
-#define msnd_inb inb_p
-#else
-#define msnd_outb outb
-#define msnd_inb inb
-#endif
-
-/* JobQueueStruct */
-#define JQS_wStart 0x00
-#define JQS_wSize 0x02
-#define JQS_wHead 0x04
-#define JQS_wTail 0x06
-#define JQS__size 0x08
-
-/* DAQueueDataStruct */
-#define DAQDS_wStart 0x00
-#define DAQDS_wSize 0x02
-#define DAQDS_wFormat 0x04
-#define DAQDS_wSampleSize 0x06
-#define DAQDS_wChannels 0x08
-#define DAQDS_wSampleRate 0x0A
-#define DAQDS_wIntMsg 0x0C
-#define DAQDS_wFlags 0x0E
-#define DAQDS__size 0x10
-
-typedef u8 BYTE;
-typedef u16 USHORT;
-typedef u16 WORD;
-typedef u32 DWORD;
-typedef void __iomem * LPDAQD;
-
-/* Generic FIFO */
-typedef struct {
- size_t n, len;
- char *data;
- int head, tail;
-} msnd_fifo;
-
-typedef struct multisound_dev {
- /* Linux device info */
- char *name;
- int dsp_minor, mixer_minor;
- int ext_midi_dev, hdr_midi_dev;
-
- /* Hardware resources */
- int io, numio;
- int memid, irqid;
- int irq, irq_ref;
- unsigned char info;
- void __iomem *base;
-
- /* Motorola 56k DSP SMA */
- void __iomem *SMA;
- void __iomem *DAPQ, *DARQ, *MODQ, *MIDQ, *DSPQ;
- void __iomem *pwDSPQData, *pwMIDQData, *pwMODQData;
- int dspq_data_buff, dspq_buff_size;
-
- /* State variables */
- enum { msndClassic, msndPinnacle } type;
- fmode_t mode;
- unsigned long flags;
-#define F_RESETTING 0
-#define F_HAVEDIGITAL 1
-#define F_AUDIO_WRITE_INUSE 2
-#define F_WRITING 3
-#define F_WRITEBLOCK 4
-#define F_WRITEFLUSH 5
-#define F_AUDIO_READ_INUSE 6
-#define F_READING 7
-#define F_READBLOCK 8
-#define F_EXT_MIDI_INUSE 9
-#define F_HDR_MIDI_INUSE 10
-#define F_DISABLE_WRITE_NDELAY 11
- wait_queue_head_t writeblock;
- wait_queue_head_t readblock;
- wait_queue_head_t writeflush;
- spinlock_t lock;
- int nresets;
- unsigned long recsrc;
- int left_levels[32];
- int right_levels[32];
- int mixer_mod_count;
- int calibrate_signal;
- int play_sample_size, play_sample_rate, play_channels;
- int play_ndelay;
- int rec_sample_size, rec_sample_rate, rec_channels;
- int rec_ndelay;
- BYTE bCurrentMidiPatch;
-
- /* Digital audio FIFOs */
- msnd_fifo DAPF, DARF;
- int fifosize;
- int last_playbank, last_recbank;
-
- /* MIDI in callback */
- void (*midi_in_interrupt)(struct multisound_dev *);
-} multisound_dev_t;
-
-#ifndef mdelay
-# define mdelay(a) udelay((a) * 1000)
-#endif
-
-int msnd_register(multisound_dev_t *dev);
-void msnd_unregister(multisound_dev_t *dev);
-
-void msnd_init_queue(void __iomem *, int start, int size);
-
-void msnd_fifo_init(msnd_fifo *f);
-void msnd_fifo_free(msnd_fifo *f);
-int msnd_fifo_alloc(msnd_fifo *f, size_t n);
-void msnd_fifo_make_empty(msnd_fifo *f);
-int msnd_fifo_write_io(msnd_fifo *f, char __iomem *buf, size_t len);
-int msnd_fifo_read_io(msnd_fifo *f, char __iomem *buf, size_t len);
-int msnd_fifo_write(msnd_fifo *f, const char *buf, size_t len);
-int msnd_fifo_read(msnd_fifo *f, char *buf, size_t len);
-
-int msnd_send_dsp_cmd(multisound_dev_t *dev, BYTE cmd);
-int msnd_send_word(multisound_dev_t *dev, unsigned char high,
- unsigned char mid, unsigned char low);
-int msnd_upload_host(multisound_dev_t *dev, char *bin, int len);
-int msnd_enable_irq(multisound_dev_t *dev);
-int msnd_disable_irq(multisound_dev_t *dev);
-
-#endif /* __MSND_H */
diff --git a/sound/oss/msnd_classic.c b/sound/oss/msnd_classic.c
deleted file mode 100644
index 3b23a096fa4e..000000000000
--- a/sound/oss/msnd_classic.c
+++ /dev/null
@@ -1,3 +0,0 @@
-/* The work is in msnd_pinnacle.c, just define MSND_CLASSIC before it. */
-#define MSND_CLASSIC
-#include "msnd_pinnacle.c"
diff --git a/sound/oss/msnd_classic.h b/sound/oss/msnd_classic.h
deleted file mode 100644
index 1a17dde2f650..000000000000
--- a/sound/oss/msnd_classic.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*********************************************************************
- *
- * msnd_classic.h
- *
- * Turtle Beach MultiSound Sound Card Driver for Linux
- *
- * Some parts of this header file were derived from the Turtle Beach
- * MultiSound Driver Development Kit.
- *
- * Copyright (C) 1998 Andrew Veliath
- * Copyright (C) 1993 Turtle Beach Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ********************************************************************/
-#ifndef __MSND_CLASSIC_H
-#define __MSND_CLASSIC_H
-
-
-#define DSP_NUMIO 0x10
-
-#define HP_MEMM 0x08
-
-#define HP_BITM 0x0E
-#define HP_WAIT 0x0D
-#define HP_DSPR 0x0A
-#define HP_PROR 0x0B
-#define HP_BLKS 0x0C
-
-#define HPPRORESET_OFF 0
-#define HPPRORESET_ON 1
-
-#define HPDSPRESET_OFF 0
-#define HPDSPRESET_ON 1
-
-#define HPBLKSEL_0 0
-#define HPBLKSEL_1 1
-
-#define HPWAITSTATE_0 0
-#define HPWAITSTATE_1 1
-
-#define HPBITMODE_16 0
-#define HPBITMODE_8 1
-
-#define HIDSP_INT_PLAY_UNDER 0x00
-#define HIDSP_INT_RECORD_OVER 0x01
-#define HIDSP_INPUT_CLIPPING 0x02
-#define HIDSP_MIDI_IN_OVER 0x10
-#define HIDSP_MIDI_OVERRUN_ERR 0x13
-
-#define HDEXAR_CLEAR_PEAKS 1
-#define HDEXAR_IN_SET_POTS 2
-#define HDEXAR_AUX_SET_POTS 3
-#define HDEXAR_CAL_A_TO_D 4
-#define HDEXAR_RD_EXT_DSP_BITS 5
-
-#define TIME_PRO_RESET_DONE 0x028A
-#define TIME_PRO_SYSEX 0x0040
-#define TIME_PRO_RESET 0x0032
-
-#define AGND 0x01
-#define SIGNAL 0x02
-
-#define EXT_DSP_BIT_DCAL 0x0001
-#define EXT_DSP_BIT_MIDI_CON 0x0002
-
-#define BUFFSIZE 0x8000
-#define HOSTQ_SIZE 0x40
-
-#define SRAM_CNTL_START 0x7F00
-#define SMA_STRUCT_START 0x7F40
-
-#define DAP_BUFF_SIZE 0x2400
-#define DAR_BUFF_SIZE 0x2000
-
-#define DAPQ_STRUCT_SIZE 0x10
-#define DARQ_STRUCT_SIZE 0x10
-#define DAPQ_BUFF_SIZE (3 * 0x10)
-#define DARQ_BUFF_SIZE (3 * 0x10)
-#define MODQ_BUFF_SIZE 0x400
-#define MIDQ_BUFF_SIZE 0x200
-#define DSPQ_BUFF_SIZE 0x40
-
-#define DAPQ_DATA_BUFF 0x6C00
-#define DARQ_DATA_BUFF 0x6C30
-#define MODQ_DATA_BUFF 0x6C60
-#define MIDQ_DATA_BUFF 0x7060
-#define DSPQ_DATA_BUFF 0x7260
-
-#define DAPQ_OFFSET SRAM_CNTL_START
-#define DARQ_OFFSET (SRAM_CNTL_START + 0x08)
-#define MODQ_OFFSET (SRAM_CNTL_START + 0x10)
-#define MIDQ_OFFSET (SRAM_CNTL_START + 0x18)
-#define DSPQ_OFFSET (SRAM_CNTL_START + 0x20)
-
-#define MOP_SYNTH 0x10
-#define MOP_EXTOUT 0x32
-#define MOP_EXTTHRU 0x02
-#define MOP_OUTMASK 0x01
-
-#define MIP_EXTIN 0x01
-#define MIP_SYNTH 0x00
-#define MIP_INMASK 0x32
-
-/* Classic SMA Common Data */
-#define SMA_wCurrPlayBytes 0x0000
-#define SMA_wCurrRecordBytes 0x0002
-#define SMA_wCurrPlayVolLeft 0x0004
-#define SMA_wCurrPlayVolRight 0x0006
-#define SMA_wCurrInVolLeft 0x0008
-#define SMA_wCurrInVolRight 0x000a
-#define SMA_wUser_3 0x000c
-#define SMA_wUser_4 0x000e
-#define SMA_dwUser_5 0x0010
-#define SMA_dwUser_6 0x0014
-#define SMA_wUser_7 0x0018
-#define SMA_wReserved_A 0x001a
-#define SMA_wReserved_B 0x001c
-#define SMA_wReserved_C 0x001e
-#define SMA_wReserved_D 0x0020
-#define SMA_wReserved_E 0x0022
-#define SMA_wReserved_F 0x0024
-#define SMA_wReserved_G 0x0026
-#define SMA_wReserved_H 0x0028
-#define SMA_wCurrDSPStatusFlags 0x002a
-#define SMA_wCurrHostStatusFlags 0x002c
-#define SMA_wCurrInputTagBits 0x002e
-#define SMA_wCurrLeftPeak 0x0030
-#define SMA_wCurrRightPeak 0x0032
-#define SMA_wExtDSPbits 0x0034
-#define SMA_bExtHostbits 0x0036
-#define SMA_bBoardLevel 0x0037
-#define SMA_bInPotPosRight 0x0038
-#define SMA_bInPotPosLeft 0x0039
-#define SMA_bAuxPotPosRight 0x003a
-#define SMA_bAuxPotPosLeft 0x003b
-#define SMA_wCurrMastVolLeft 0x003c
-#define SMA_wCurrMastVolRight 0x003e
-#define SMA_bUser_12 0x0040
-#define SMA_bUser_13 0x0041
-#define SMA_wUser_14 0x0042
-#define SMA_wUser_15 0x0044
-#define SMA_wCalFreqAtoD 0x0046
-#define SMA_wUser_16 0x0048
-#define SMA_wUser_17 0x004a
-#define SMA__size 0x004c
-
-#ifdef HAVE_DSPCODEH
-# include "msndperm.c"
-# include "msndinit.c"
-# define PERMCODE msndperm
-# define INITCODE msndinit
-# define PERMCODESIZE sizeof(msndperm)
-# define INITCODESIZE sizeof(msndinit)
-#else
-# ifndef CONFIG_MSNDCLAS_INIT_FILE
-# define CONFIG_MSNDCLAS_INIT_FILE \
- "/etc/sound/msndinit.bin"
-# endif
-# ifndef CONFIG_MSNDCLAS_PERM_FILE
-# define CONFIG_MSNDCLAS_PERM_FILE \
- "/etc/sound/msndperm.bin"
-# endif
-# define PERMCODEFILE CONFIG_MSNDCLAS_PERM_FILE
-# define INITCODEFILE CONFIG_MSNDCLAS_INIT_FILE
-# define PERMCODE dspini
-# define INITCODE permini
-# define PERMCODESIZE sizeof_dspini
-# define INITCODESIZE sizeof_permini
-#endif
-#define LONGNAME "MultiSound (Classic/Monterey/Tahiti)"
-
-#endif /* __MSND_CLASSIC_H */
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
deleted file mode 100644
index d2abc2cf3213..000000000000
--- a/sound/oss/msnd_pinnacle.c
+++ /dev/null
@@ -1,1941 +0,0 @@
-/*********************************************************************
- *
- * Turtle Beach MultiSound Sound Card Driver for Linux
- * Linux 2.0/2.2 Version
- *
- * msnd_pinnacle.c / msnd_classic.c
- *
- * -- If MSND_CLASSIC is defined:
- *
- * -> driver for Turtle Beach Classic/Monterey/Tahiti
- *
- * -- Else
- *
- * -> driver for Turtle Beach Pinnacle/Fiji
- *
- * Copyright (C) 1998 Andrew Veliath
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * 12-3-2000 Modified IO port validation Steve Sycamore
- *
- ********************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-#include <linux/sched/signal.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include "sound_config.h"
-#include "sound_firmware.h"
-#ifdef MSND_CLASSIC
-# ifndef __alpha__
-# define SLOWIO
-# endif
-#endif
-#include "msnd.h"
-#ifdef MSND_CLASSIC
-# ifdef CONFIG_MSNDCLAS_HAVE_BOOT
-# define HAVE_DSPCODEH
-# endif
-# include "msnd_classic.h"
-# define LOGNAME "msnd_classic"
-#else
-# ifdef CONFIG_MSNDPIN_HAVE_BOOT
-# define HAVE_DSPCODEH
-# endif
-# include "msnd_pinnacle.h"
-# define LOGNAME "msnd_pinnacle"
-#endif
-
-#ifndef CONFIG_MSND_WRITE_NDELAY
-# define CONFIG_MSND_WRITE_NDELAY 1
-#endif
-
-#define get_play_delay_jiffies(size) ((size) * HZ * \
- dev.play_sample_size / 8 / \
- dev.play_sample_rate / \
- dev.play_channels)
-
-#define get_rec_delay_jiffies(size) ((size) * HZ * \
- dev.rec_sample_size / 8 / \
- dev.rec_sample_rate / \
- dev.rec_channels)
-
-static DEFINE_MUTEX(msnd_pinnacle_mutex);
-static multisound_dev_t dev;
-
-#ifndef HAVE_DSPCODEH
-static char *dspini, *permini;
-static int sizeof_dspini, sizeof_permini;
-#endif
-
-static int dsp_full_reset(void);
-static void dsp_write_flush(void);
-
-static __inline__ int chk_send_dsp_cmd(multisound_dev_t *dev, register BYTE cmd)
-{
- if (msnd_send_dsp_cmd(dev, cmd) == 0)
- return 0;
- dsp_full_reset();
- return msnd_send_dsp_cmd(dev, cmd);
-}
-
-static void reset_play_queue(void)
-{
- int n;
- LPDAQD lpDAQ;
-
- dev.last_playbank = -1;
- writew(PCTODSP_OFFSET(0 * DAQDS__size), dev.DAPQ + JQS_wHead);
- writew(PCTODSP_OFFSET(0 * DAQDS__size), dev.DAPQ + JQS_wTail);
-
- for (n = 0, lpDAQ = dev.base + DAPQ_DATA_BUFF; n < 3; ++n, lpDAQ += DAQDS__size) {
- writew(PCTODSP_BASED((DWORD)(DAP_BUFF_SIZE * n)), lpDAQ + DAQDS_wStart);
- writew(0, lpDAQ + DAQDS_wSize);
- writew(1, lpDAQ + DAQDS_wFormat);
- writew(dev.play_sample_size, lpDAQ + DAQDS_wSampleSize);
- writew(dev.play_channels, lpDAQ + DAQDS_wChannels);
- writew(dev.play_sample_rate, lpDAQ + DAQDS_wSampleRate);
- writew(HIMT_PLAY_DONE * 0x100 + n, lpDAQ + DAQDS_wIntMsg);
- writew(n, lpDAQ + DAQDS_wFlags);
- }
-}
-
-static void reset_record_queue(void)
-{
- int n;
- LPDAQD lpDAQ;
- unsigned long flags;
-
- dev.last_recbank = 2;
- writew(PCTODSP_OFFSET(0 * DAQDS__size), dev.DARQ + JQS_wHead);
- writew(PCTODSP_OFFSET(dev.last_recbank * DAQDS__size), dev.DARQ + JQS_wTail);
-
- /* Critical section: bank 1 access */
- spin_lock_irqsave(&dev.lock, flags);
- msnd_outb(HPBLKSEL_1, dev.io + HP_BLKS);
- memset_io(dev.base, 0, DAR_BUFF_SIZE * 3);
- msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
- spin_unlock_irqrestore(&dev.lock, flags);
-
- for (n = 0, lpDAQ = dev.base + DARQ_DATA_BUFF; n < 3; ++n, lpDAQ += DAQDS__size) {
- writew(PCTODSP_BASED((DWORD)(DAR_BUFF_SIZE * n)) + 0x4000, lpDAQ + DAQDS_wStart);
- writew(DAR_BUFF_SIZE, lpDAQ + DAQDS_wSize);
- writew(1, lpDAQ + DAQDS_wFormat);
- writew(dev.rec_sample_size, lpDAQ + DAQDS_wSampleSize);
- writew(dev.rec_channels, lpDAQ + DAQDS_wChannels);
- writew(dev.rec_sample_rate, lpDAQ + DAQDS_wSampleRate);
- writew(HIMT_RECORD_DONE * 0x100 + n, lpDAQ + DAQDS_wIntMsg);
- writew(n, lpDAQ + DAQDS_wFlags);
- }
-}
-
-static void reset_queues(void)
-{
- if (dev.mode & FMODE_WRITE) {
- msnd_fifo_make_empty(&dev.DAPF);
- reset_play_queue();
- }
- if (dev.mode & FMODE_READ) {
- msnd_fifo_make_empty(&dev.DARF);
- reset_record_queue();
- }
-}
-
-static int dsp_set_format(struct file *file, int val)
-{
- int data, i;
- LPDAQD lpDAQ, lpDARQ;
-
- lpDAQ = dev.base + DAPQ_DATA_BUFF;
- lpDARQ = dev.base + DARQ_DATA_BUFF;
-
- switch (val) {
- case AFMT_U8:
- case AFMT_S16_LE:
- data = val;
- break;
- default:
- data = DEFSAMPLESIZE;
- break;
- }
-
- for (i = 0; i < 3; ++i, lpDAQ += DAQDS__size, lpDARQ += DAQDS__size) {
- if (file->f_mode & FMODE_WRITE)
- writew(data, lpDAQ + DAQDS_wSampleSize);
- if (file->f_mode & FMODE_READ)
- writew(data, lpDARQ + DAQDS_wSampleSize);
- }
- if (file->f_mode & FMODE_WRITE)
- dev.play_sample_size = data;
- if (file->f_mode & FMODE_READ)
- dev.rec_sample_size = data;
-
- return data;
-}
-
-static int dsp_get_frag_size(void)
-{
- int size;
- size = dev.fifosize / 4;
- if (size > 32 * 1024)
- size = 32 * 1024;
- return size;
-}
-
-static int dsp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int val, i, data, tmp;
- LPDAQD lpDAQ, lpDARQ;
- audio_buf_info abinfo;
- unsigned long flags;
- int __user *p = (int __user *)arg;
-
- lpDAQ = dev.base + DAPQ_DATA_BUFF;
- lpDARQ = dev.base + DARQ_DATA_BUFF;
-
- switch (cmd) {
- case SNDCTL_DSP_SUBDIVIDE:
- case SNDCTL_DSP_SETFRAGMENT:
- case SNDCTL_DSP_SETDUPLEX:
- case SNDCTL_DSP_POST:
- return 0;
-
- case SNDCTL_DSP_GETIPTR:
- case SNDCTL_DSP_GETOPTR:
- case SNDCTL_DSP_MAPINBUF:
- case SNDCTL_DSP_MAPOUTBUF:
- return -EINVAL;
-
- case SNDCTL_DSP_GETOSPACE:
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- spin_lock_irqsave(&dev.lock, flags);
- abinfo.fragsize = dsp_get_frag_size();
- abinfo.bytes = dev.DAPF.n - dev.DAPF.len;
- abinfo.fragstotal = dev.DAPF.n / abinfo.fragsize;
- abinfo.fragments = abinfo.bytes / abinfo.fragsize;
- spin_unlock_irqrestore(&dev.lock, flags);
- return copy_to_user((void __user *)arg, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_GETISPACE:
- if (!(file->f_mode & FMODE_READ))
- return -EINVAL;
- spin_lock_irqsave(&dev.lock, flags);
- abinfo.fragsize = dsp_get_frag_size();
- abinfo.bytes = dev.DARF.n - dev.DARF.len;
- abinfo.fragstotal = dev.DARF.n / abinfo.fragsize;
- abinfo.fragments = abinfo.bytes / abinfo.fragsize;
- spin_unlock_irqrestore(&dev.lock, flags);
- return copy_to_user((void __user *)arg, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_RESET:
- dev.nresets = 0;
- reset_queues();
- return 0;
-
- case SNDCTL_DSP_SYNC:
- dsp_write_flush();
- return 0;
-
- case SNDCTL_DSP_GETBLKSIZE:
- tmp = dsp_get_frag_size();
- if (put_user(tmp, p))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_GETFMTS:
- val = AFMT_S16_LE | AFMT_U8;
- if (put_user(val, p))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_SETFMT:
- if (get_user(val, p))
- return -EFAULT;
-
- if (file->f_mode & FMODE_WRITE)
- data = val == AFMT_QUERY
- ? dev.play_sample_size
- : dsp_set_format(file, val);
- else
- data = val == AFMT_QUERY
- ? dev.rec_sample_size
- : dsp_set_format(file, val);
-
- if (put_user(data, p))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_NONBLOCK:
- if (!test_bit(F_DISABLE_WRITE_NDELAY, &dev.flags) &&
- file->f_mode & FMODE_WRITE)
- dev.play_ndelay = 1;
- if (file->f_mode & FMODE_READ)
- dev.rec_ndelay = 1;
- return 0;
-
- case SNDCTL_DSP_GETCAPS:
- val = DSP_CAP_DUPLEX | DSP_CAP_BATCH;
- if (put_user(val, p))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_SPEED:
- if (get_user(val, p))
- return -EFAULT;
-
- if (val < 8000)
- val = 8000;
-
- if (val > 48000)
- val = 48000;
-
- data = val;
-
- for (i = 0; i < 3; ++i, lpDAQ += DAQDS__size, lpDARQ += DAQDS__size) {
- if (file->f_mode & FMODE_WRITE)
- writew(data, lpDAQ + DAQDS_wSampleRate);
- if (file->f_mode & FMODE_READ)
- writew(data, lpDARQ + DAQDS_wSampleRate);
- }
- if (file->f_mode & FMODE_WRITE)
- dev.play_sample_rate = data;
- if (file->f_mode & FMODE_READ)
- dev.rec_sample_rate = data;
-
- if (put_user(data, p))
- return -EFAULT;
- return 0;
-
- case SNDCTL_DSP_CHANNELS:
- case SNDCTL_DSP_STEREO:
- if (get_user(val, p))
- return -EFAULT;
-
- if (cmd == SNDCTL_DSP_CHANNELS) {
- switch (val) {
- case 1:
- case 2:
- data = val;
- break;
- default:
- val = data = 2;
- break;
- }
- } else {
- switch (val) {
- case 0:
- data = 1;
- break;
- default:
- val = 1;
- case 1:
- data = 2;
- break;
- }
- }
-
- for (i = 0; i < 3; ++i, lpDAQ += DAQDS__size, lpDARQ += DAQDS__size) {
- if (file->f_mode & FMODE_WRITE)
- writew(data, lpDAQ + DAQDS_wChannels);
- if (file->f_mode & FMODE_READ)
- writew(data, lpDARQ + DAQDS_wChannels);
- }
- if (file->f_mode & FMODE_WRITE)
- dev.play_channels = data;
- if (file->f_mode & FMODE_READ)
- dev.rec_channels = data;
-
- if (put_user(val, p))
- return -EFAULT;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int mixer_get(int d)
-{
- if (d > 31)
- return -EINVAL;
-
- switch (d) {
- case SOUND_MIXER_VOLUME:
- case SOUND_MIXER_PCM:
- case SOUND_MIXER_LINE:
- case SOUND_MIXER_IMIX:
- case SOUND_MIXER_LINE1:
-#ifndef MSND_CLASSIC
- case SOUND_MIXER_MIC:
- case SOUND_MIXER_SYNTH:
-#endif
- return (dev.left_levels[d] >> 8) * 100 / 0xff |
- (((dev.right_levels[d] >> 8) * 100 / 0xff) << 8);
- default:
- return 0;
- }
-}
-
-#define update_volm(a,b) \
- writew((dev.left_levels[a] >> 1) * \
- readw(dev.SMA + SMA_wCurrMastVolLeft) / 0xffff, \
- dev.SMA + SMA_##b##Left); \
- writew((dev.right_levels[a] >> 1) * \
- readw(dev.SMA + SMA_wCurrMastVolRight) / 0xffff, \
- dev.SMA + SMA_##b##Right);
-
-#define update_potm(d,s,ar) \
- writeb((dev.left_levels[d] >> 8) * \
- readw(dev.SMA + SMA_wCurrMastVolLeft) / 0xffff, \
- dev.SMA + SMA_##s##Left); \
- writeb((dev.right_levels[d] >> 8) * \
- readw(dev.SMA + SMA_wCurrMastVolRight) / 0xffff, \
- dev.SMA + SMA_##s##Right); \
- if (msnd_send_word(&dev, 0, 0, ar) == 0) \
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
-
-#define update_pot(d,s,ar) \
- writeb(dev.left_levels[d] >> 8, \
- dev.SMA + SMA_##s##Left); \
- writeb(dev.right_levels[d] >> 8, \
- dev.SMA + SMA_##s##Right); \
- if (msnd_send_word(&dev, 0, 0, ar) == 0) \
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
-
-static int mixer_set(int d, int value)
-{
- int left = value & 0x000000ff;
- int right = (value & 0x0000ff00) >> 8;
- int bLeft, bRight;
- int wLeft, wRight;
- int updatemaster = 0;
-
- if (d > 31)
- return -EINVAL;
-
- bLeft = left * 0xff / 100;
- wLeft = left * 0xffff / 100;
-
- bRight = right * 0xff / 100;
- wRight = right * 0xffff / 100;
-
- dev.left_levels[d] = wLeft;
- dev.right_levels[d] = wRight;
-
- switch (d) {
- /* master volume unscaled controls */
- case SOUND_MIXER_LINE: /* line pot control */
- /* scaled by IMIX in digital mix */
- writeb(bLeft, dev.SMA + SMA_bInPotPosLeft);
- writeb(bRight, dev.SMA + SMA_bInPotPosRight);
- if (msnd_send_word(&dev, 0, 0, HDEXAR_IN_SET_POTS) == 0)
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
- break;
-#ifndef MSND_CLASSIC
- case SOUND_MIXER_MIC: /* mic pot control */
- /* scaled by IMIX in digital mix */
- writeb(bLeft, dev.SMA + SMA_bMicPotPosLeft);
- writeb(bRight, dev.SMA + SMA_bMicPotPosRight);
- if (msnd_send_word(&dev, 0, 0, HDEXAR_MIC_SET_POTS) == 0)
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
- break;
-#endif
- case SOUND_MIXER_VOLUME: /* master volume */
- writew(wLeft, dev.SMA + SMA_wCurrMastVolLeft);
- writew(wRight, dev.SMA + SMA_wCurrMastVolRight);
- /* fall through */
-
- case SOUND_MIXER_LINE1: /* aux pot control */
- /* scaled by master volume */
- /* fall through */
-
- /* digital controls */
- case SOUND_MIXER_SYNTH: /* synth vol (dsp mix) */
- case SOUND_MIXER_PCM: /* pcm vol (dsp mix) */
- case SOUND_MIXER_IMIX: /* input monitor (dsp mix) */
- /* scaled by master volume */
- updatemaster = 1;
- break;
-
- default:
- return 0;
- }
-
- if (updatemaster) {
- /* update master volume scaled controls */
- update_volm(SOUND_MIXER_PCM, wCurrPlayVol);
- update_volm(SOUND_MIXER_IMIX, wCurrInVol);
-#ifndef MSND_CLASSIC
- update_volm(SOUND_MIXER_SYNTH, wCurrMHdrVol);
-#endif
- update_potm(SOUND_MIXER_LINE1, bAuxPotPos, HDEXAR_AUX_SET_POTS);
- }
-
- return mixer_get(d);
-}
-
-static void mixer_setup(void)
-{
- update_pot(SOUND_MIXER_LINE, bInPotPos, HDEXAR_IN_SET_POTS);
- update_potm(SOUND_MIXER_LINE1, bAuxPotPos, HDEXAR_AUX_SET_POTS);
- update_volm(SOUND_MIXER_PCM, wCurrPlayVol);
- update_volm(SOUND_MIXER_IMIX, wCurrInVol);
-#ifndef MSND_CLASSIC
- update_pot(SOUND_MIXER_MIC, bMicPotPos, HDEXAR_MIC_SET_POTS);
- update_volm(SOUND_MIXER_SYNTH, wCurrMHdrVol);
-#endif
-}
-
-static unsigned long set_recsrc(unsigned long recsrc)
-{
- if (dev.recsrc == recsrc)
- return dev.recsrc;
-#ifdef HAVE_NORECSRC
- else if (recsrc == 0)
- dev.recsrc = 0;
-#endif
- else
- dev.recsrc ^= recsrc;
-
-#ifndef MSND_CLASSIC
- if (dev.recsrc & SOUND_MASK_IMIX) {
- if (msnd_send_word(&dev, 0, 0, HDEXAR_SET_ANA_IN) == 0)
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
- }
- else if (dev.recsrc & SOUND_MASK_SYNTH) {
- if (msnd_send_word(&dev, 0, 0, HDEXAR_SET_SYNTH_IN) == 0)
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
- }
- else if ((dev.recsrc & SOUND_MASK_DIGITAL1) && test_bit(F_HAVEDIGITAL, &dev.flags)) {
- if (msnd_send_word(&dev, 0, 0, HDEXAR_SET_DAT_IN) == 0)
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
- }
- else {
-#ifdef HAVE_NORECSRC
- /* Select no input (?) */
- dev.recsrc = 0;
-#else
- dev.recsrc = SOUND_MASK_IMIX;
- if (msnd_send_word(&dev, 0, 0, HDEXAR_SET_ANA_IN) == 0)
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ);
-#endif
- }
-#endif /* MSND_CLASSIC */
-
- return dev.recsrc;
-}
-
-static unsigned long force_recsrc(unsigned long recsrc)
-{
- dev.recsrc = 0;
- return set_recsrc(recsrc);
-}
-
-#define set_mixer_info() \
- memset(&info, 0, sizeof(info)); \
- strlcpy(info.id, "MSNDMIXER", sizeof(info.id)); \
- strlcpy(info.name, "MultiSound Mixer", sizeof(info.name));
-
-static int mixer_ioctl(unsigned int cmd, unsigned long arg)
-{
- if (cmd == SOUND_MIXER_INFO) {
- mixer_info info;
- set_mixer_info();
- info.modify_counter = dev.mixer_mod_count;
- if (copy_to_user((void __user *)arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- } else if (cmd == SOUND_OLD_MIXER_INFO) {
- _old_mixer_info info;
- set_mixer_info();
- if (copy_to_user((void __user *)arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- } else if (cmd == SOUND_MIXER_PRIVATE1) {
- dev.nresets = 0;
- dsp_full_reset();
- return 0;
- } else if (((cmd >> 8) & 0xff) == 'M') {
- int val = 0;
-
- if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
- switch (cmd & 0xff) {
- case SOUND_MIXER_RECSRC:
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- val = set_recsrc(val);
- break;
-
- default:
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- val = mixer_set(cmd & 0xff, val);
- break;
- }
- ++dev.mixer_mod_count;
- return put_user(val, (int __user *)arg);
- } else {
- switch (cmd & 0xff) {
- case SOUND_MIXER_RECSRC:
- val = dev.recsrc;
- break;
-
- case SOUND_MIXER_DEVMASK:
- case SOUND_MIXER_STEREODEVS:
- val = SOUND_MASK_PCM |
- SOUND_MASK_LINE |
- SOUND_MASK_IMIX |
- SOUND_MASK_LINE1 |
-#ifndef MSND_CLASSIC
- SOUND_MASK_MIC |
- SOUND_MASK_SYNTH |
-#endif
- SOUND_MASK_VOLUME;
- break;
-
- case SOUND_MIXER_RECMASK:
-#ifdef MSND_CLASSIC
- val = 0;
-#else
- val = SOUND_MASK_IMIX |
- SOUND_MASK_SYNTH;
- if (test_bit(F_HAVEDIGITAL, &dev.flags))
- val |= SOUND_MASK_DIGITAL1;
-#endif
- break;
-
- case SOUND_MIXER_CAPS:
- val = SOUND_CAP_EXCL_INPUT;
- break;
-
- default:
- if ((val = mixer_get(cmd & 0xff)) < 0)
- return -EINVAL;
- break;
- }
- }
-
- return put_user(val, (int __user *)arg);
- }
-
- return -EINVAL;
-}
-
-static long dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int minor = iminor(file_inode(file));
- int ret;
-
- if (cmd == OSS_GETVERSION) {
- int sound_version = SOUND_VERSION;
- return put_user(sound_version, (int __user *)arg);
- }
-
- ret = -EINVAL;
-
- mutex_lock(&msnd_pinnacle_mutex);
- if (minor == dev.dsp_minor)
- ret = dsp_ioctl(file, cmd, arg);
- else if (minor == dev.mixer_minor)
- ret = mixer_ioctl(cmd, arg);
- mutex_unlock(&msnd_pinnacle_mutex);
-
- return ret;
-}
-
-static void dsp_write_flush(void)
-{
- int timeout = get_play_delay_jiffies(dev.DAPF.len);
-
- if (!(dev.mode & FMODE_WRITE) || !test_bit(F_WRITING, &dev.flags))
- return;
- set_bit(F_WRITEFLUSH, &dev.flags);
- wait_event_interruptible_timeout(
- dev.writeflush,
- !test_bit(F_WRITEFLUSH, &dev.flags),
- timeout);
- clear_bit(F_WRITEFLUSH, &dev.flags);
- if (!signal_pending(current)) {
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(get_play_delay_jiffies(DAP_BUFF_SIZE));
- }
- clear_bit(F_WRITING, &dev.flags);
-}
-
-static void dsp_halt(struct file *file)
-{
- if ((file ? file->f_mode : dev.mode) & FMODE_READ) {
- clear_bit(F_READING, &dev.flags);
- chk_send_dsp_cmd(&dev, HDEX_RECORD_STOP);
- msnd_disable_irq(&dev);
- if (file) {
- printk(KERN_DEBUG LOGNAME ": Stopping read for %p\n", file);
- dev.mode &= ~FMODE_READ;
- }
- clear_bit(F_AUDIO_READ_INUSE, &dev.flags);
- }
- if ((file ? file->f_mode : dev.mode) & FMODE_WRITE) {
- if (test_bit(F_WRITING, &dev.flags)) {
- dsp_write_flush();
- chk_send_dsp_cmd(&dev, HDEX_PLAY_STOP);
- }
- msnd_disable_irq(&dev);
- if (file) {
- printk(KERN_DEBUG LOGNAME ": Stopping write for %p\n", file);
- dev.mode &= ~FMODE_WRITE;
- }
- clear_bit(F_AUDIO_WRITE_INUSE, &dev.flags);
- }
-}
-
-static int dsp_release(struct file *file)
-{
- dsp_halt(file);
- return 0;
-}
-
-static int dsp_open(struct file *file)
-{
- if ((file ? file->f_mode : dev.mode) & FMODE_WRITE) {
- set_bit(F_AUDIO_WRITE_INUSE, &dev.flags);
- clear_bit(F_WRITING, &dev.flags);
- msnd_fifo_make_empty(&dev.DAPF);
- reset_play_queue();
- if (file) {
- printk(KERN_DEBUG LOGNAME ": Starting write for %p\n", file);
- dev.mode |= FMODE_WRITE;
- }
- msnd_enable_irq(&dev);
- }
- if ((file ? file->f_mode : dev.mode) & FMODE_READ) {
- set_bit(F_AUDIO_READ_INUSE, &dev.flags);
- clear_bit(F_READING, &dev.flags);
- msnd_fifo_make_empty(&dev.DARF);
- reset_record_queue();
- if (file) {
- printk(KERN_DEBUG LOGNAME ": Starting read for %p\n", file);
- dev.mode |= FMODE_READ;
- }
- msnd_enable_irq(&dev);
- }
- return 0;
-}
-
-static void set_default_play_audio_parameters(void)
-{
- dev.play_sample_size = DEFSAMPLESIZE;
- dev.play_sample_rate = DEFSAMPLERATE;
- dev.play_channels = DEFCHANNELS;
-}
-
-static void set_default_rec_audio_parameters(void)
-{
- dev.rec_sample_size = DEFSAMPLESIZE;
- dev.rec_sample_rate = DEFSAMPLERATE;
- dev.rec_channels = DEFCHANNELS;
-}
-
-static void set_default_audio_parameters(void)
-{
- set_default_play_audio_parameters();
- set_default_rec_audio_parameters();
-}
-
-static int dev_open(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- int err = 0;
-
- mutex_lock(&msnd_pinnacle_mutex);
- if (minor == dev.dsp_minor) {
- if ((file->f_mode & FMODE_WRITE &&
- test_bit(F_AUDIO_WRITE_INUSE, &dev.flags)) ||
- (file->f_mode & FMODE_READ &&
- test_bit(F_AUDIO_READ_INUSE, &dev.flags))) {
- err = -EBUSY;
- goto out;
- }
-
- if ((err = dsp_open(file)) >= 0) {
- dev.nresets = 0;
- if (file->f_mode & FMODE_WRITE) {
- set_default_play_audio_parameters();
- if (!test_bit(F_DISABLE_WRITE_NDELAY, &dev.flags))
- dev.play_ndelay = (file->f_flags & O_NDELAY) ? 1 : 0;
- else
- dev.play_ndelay = 0;
- }
- if (file->f_mode & FMODE_READ) {
- set_default_rec_audio_parameters();
- dev.rec_ndelay = (file->f_flags & O_NDELAY) ? 1 : 0;
- }
- }
- }
- else if (minor == dev.mixer_minor) {
- /* nothing */
- } else
- err = -EINVAL;
-out:
- mutex_unlock(&msnd_pinnacle_mutex);
- return err;
-}
-
-static int dev_release(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- int err = 0;
-
- mutex_lock(&msnd_pinnacle_mutex);
- if (minor == dev.dsp_minor)
- err = dsp_release(file);
- else if (minor == dev.mixer_minor) {
- /* nothing */
- } else
- err = -EINVAL;
- mutex_unlock(&msnd_pinnacle_mutex);
- return err;
-}
-
-static __inline__ int pack_DARQ_to_DARF(register int bank)
-{
- register int size, timeout = 3;
- register WORD wTmp;
- LPDAQD DAQD;
-
- /* Increment the tail and check for queue wrap */
- wTmp = readw(dev.DARQ + JQS_wTail) + PCTODSP_OFFSET(DAQDS__size);
- if (wTmp > readw(dev.DARQ + JQS_wSize))
- wTmp = 0;
- while (wTmp == readw(dev.DARQ + JQS_wHead) && timeout--)
- udelay(1);
- writew(wTmp, dev.DARQ + JQS_wTail);
-
- /* Get our digital audio queue struct */
- DAQD = bank * DAQDS__size + dev.base + DARQ_DATA_BUFF;
-
- /* Get length of data */
- size = readw(DAQD + DAQDS_wSize);
-
- /* Read data from the head (unprotected bank 1 access okay
- since this is only called inside an interrupt) */
- msnd_outb(HPBLKSEL_1, dev.io + HP_BLKS);
- msnd_fifo_write_io(
- &dev.DARF,
- dev.base + bank * DAR_BUFF_SIZE,
- size);
- msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
-
- return 1;
-}
-
-static __inline__ int pack_DAPF_to_DAPQ(register int start)
-{
- register WORD DAPQ_tail;
- register int protect = start, nbanks = 0;
- LPDAQD DAQD;
-
- DAPQ_tail = readw(dev.DAPQ + JQS_wTail);
- while (DAPQ_tail != readw(dev.DAPQ + JQS_wHead) || start) {
- register int bank_num = DAPQ_tail / PCTODSP_OFFSET(DAQDS__size);
- register int n;
- unsigned long flags;
-
- /* Write the data to the new tail */
- if (protect) {
- /* Critical section: protect fifo in non-interrupt */
- spin_lock_irqsave(&dev.lock, flags);
- n = msnd_fifo_read_io(
- &dev.DAPF,
- dev.base + bank_num * DAP_BUFF_SIZE,
- DAP_BUFF_SIZE);
- spin_unlock_irqrestore(&dev.lock, flags);
- } else {
- n = msnd_fifo_read_io(
- &dev.DAPF,
- dev.base + bank_num * DAP_BUFF_SIZE,
- DAP_BUFF_SIZE);
- }
- if (!n)
- break;
-
- if (start)
- start = 0;
-
- /* Get our digital audio queue struct */
- DAQD = bank_num * DAQDS__size + dev.base + DAPQ_DATA_BUFF;
-
- /* Write size of this bank */
- writew(n, DAQD + DAQDS_wSize);
- ++nbanks;
-
- /* Then advance the tail */
- DAPQ_tail = (++bank_num % 3) * PCTODSP_OFFSET(DAQDS__size);
- writew(DAPQ_tail, dev.DAPQ + JQS_wTail);
- /* Tell the DSP to play the bank */
- msnd_send_dsp_cmd(&dev, HDEX_PLAY_START);
- }
- return nbanks;
-}
-
-static int dsp_read(char __user *buf, size_t len)
-{
- int count = len;
- char *page = (char *)__get_free_page(GFP_KERNEL);
- int timeout = get_rec_delay_jiffies(DAR_BUFF_SIZE);
-
- if (!page)
- return -ENOMEM;
-
- while (count > 0) {
- int n, k;
- unsigned long flags;
-
- k = PAGE_SIZE;
- if (k > count)
- k = count;
-
- /* Critical section: protect fifo in non-interrupt */
- spin_lock_irqsave(&dev.lock, flags);
- n = msnd_fifo_read(&dev.DARF, page, k);
- spin_unlock_irqrestore(&dev.lock, flags);
- if (copy_to_user(buf, page, n)) {
- free_page((unsigned long)page);
- return -EFAULT;
- }
- buf += n;
- count -= n;
-
- if (n == k && count)
- continue;
-
- if (!test_bit(F_READING, &dev.flags) && dev.mode & FMODE_READ) {
- dev.last_recbank = -1;
- if (chk_send_dsp_cmd(&dev, HDEX_RECORD_START) == 0)
- set_bit(F_READING, &dev.flags);
- }
-
- if (dev.rec_ndelay) {
- free_page((unsigned long)page);
- return count == len ? -EAGAIN : len - count;
- }
-
- if (count > 0) {
- set_bit(F_READBLOCK, &dev.flags);
- if (wait_event_interruptible_timeout(
- dev.readblock,
- test_bit(F_READBLOCK, &dev.flags),
- timeout) <= 0)
- clear_bit(F_READING, &dev.flags);
- if (signal_pending(current)) {
- free_page((unsigned long)page);
- return -EINTR;
- }
- }
- }
- free_page((unsigned long)page);
- return len - count;
-}
-
-static int dsp_write(const char __user *buf, size_t len)
-{
- int count = len;
- char *page = (char *)__get_free_page(GFP_KERNEL);
- int timeout = get_play_delay_jiffies(DAP_BUFF_SIZE);
-
- if (!page)
- return -ENOMEM;
-
- while (count > 0) {
- int n, k;
- unsigned long flags;
-
- k = PAGE_SIZE;
- if (k > count)
- k = count;
-
- if (copy_from_user(page, buf, k)) {
- free_page((unsigned long)page);
- return -EFAULT;
- }
-
- /* Critical section: protect fifo in non-interrupt */
- spin_lock_irqsave(&dev.lock, flags);
- n = msnd_fifo_write(&dev.DAPF, page, k);
- spin_unlock_irqrestore(&dev.lock, flags);
- buf += n;
- count -= n;
-
- if (count && n == k)
- continue;
-
- if (!test_bit(F_WRITING, &dev.flags) && (dev.mode & FMODE_WRITE)) {
- dev.last_playbank = -1;
- if (pack_DAPF_to_DAPQ(1) > 0)
- set_bit(F_WRITING, &dev.flags);
- }
-
- if (dev.play_ndelay) {
- free_page((unsigned long)page);
- return count == len ? -EAGAIN : len - count;
- }
-
- if (count > 0) {
- set_bit(F_WRITEBLOCK, &dev.flags);
- wait_event_interruptible_timeout(
- dev.writeblock,
- test_bit(F_WRITEBLOCK, &dev.flags),
- timeout);
- if (signal_pending(current)) {
- free_page((unsigned long)page);
- return -EINTR;
- }
- }
- }
-
- free_page((unsigned long)page);
- return len - count;
-}
-
-static ssize_t dev_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- int minor = iminor(file_inode(file));
- if (minor == dev.dsp_minor)
- return dsp_read(buf, count);
- else
- return -EINVAL;
-}
-
-static ssize_t dev_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- int minor = iminor(file_inode(file));
- if (minor == dev.dsp_minor)
- return dsp_write(buf, count);
- else
- return -EINVAL;
-}
-
-static __inline__ void eval_dsp_msg(register WORD wMessage)
-{
- switch (HIBYTE(wMessage)) {
- case HIMT_PLAY_DONE:
- if (dev.last_playbank == LOBYTE(wMessage) || !test_bit(F_WRITING, &dev.flags))
- break;
- dev.last_playbank = LOBYTE(wMessage);
-
- if (pack_DAPF_to_DAPQ(0) <= 0) {
- if (!test_bit(F_WRITEBLOCK, &dev.flags)) {
- if (test_and_clear_bit(F_WRITEFLUSH, &dev.flags))
- wake_up_interruptible(&dev.writeflush);
- }
- clear_bit(F_WRITING, &dev.flags);
- }
-
- if (test_and_clear_bit(F_WRITEBLOCK, &dev.flags))
- wake_up_interruptible(&dev.writeblock);
- break;
-
- case HIMT_RECORD_DONE:
- if (dev.last_recbank == LOBYTE(wMessage))
- break;
- dev.last_recbank = LOBYTE(wMessage);
-
- pack_DARQ_to_DARF(dev.last_recbank);
-
- if (test_and_clear_bit(F_READBLOCK, &dev.flags))
- wake_up_interruptible(&dev.readblock);
- break;
-
- case HIMT_DSP:
- switch (LOBYTE(wMessage)) {
-#ifndef MSND_CLASSIC
- case HIDSP_PLAY_UNDER:
-#endif
- case HIDSP_INT_PLAY_UNDER:
-/* printk(KERN_DEBUG LOGNAME ": Play underflow\n"); */
- clear_bit(F_WRITING, &dev.flags);
- break;
-
- case HIDSP_INT_RECORD_OVER:
-/* printk(KERN_DEBUG LOGNAME ": Record overflow\n"); */
- clear_bit(F_READING, &dev.flags);
- break;
-
- default:
-/* printk(KERN_DEBUG LOGNAME ": DSP message %d 0x%02x\n",
- LOBYTE(wMessage), LOBYTE(wMessage)); */
- break;
- }
- break;
-
- case HIMT_MIDI_IN_UCHAR:
- if (dev.midi_in_interrupt)
- (*dev.midi_in_interrupt)(&dev);
- break;
-
- default:
-/* printk(KERN_DEBUG LOGNAME ": HIMT message %d 0x%02x\n", HIBYTE(wMessage), HIBYTE(wMessage)); */
- break;
- }
-}
-
-static irqreturn_t intr(int irq, void *dev_id)
-{
- /* Send ack to DSP */
- msnd_inb(dev.io + HP_RXL);
-
- /* Evaluate queued DSP messages */
- while (readw(dev.DSPQ + JQS_wTail) != readw(dev.DSPQ + JQS_wHead)) {
- register WORD wTmp;
-
- eval_dsp_msg(readw(dev.pwDSPQData + 2*readw(dev.DSPQ + JQS_wHead)));
-
- if ((wTmp = readw(dev.DSPQ + JQS_wHead) + 1) > readw(dev.DSPQ + JQS_wSize))
- writew(0, dev.DSPQ + JQS_wHead);
- else
- writew(wTmp, dev.DSPQ + JQS_wHead);
- }
- return IRQ_HANDLED;
-}
-
-static const struct file_operations dev_fileops = {
- .owner = THIS_MODULE,
- .read = dev_read,
- .write = dev_write,
- .unlocked_ioctl = dev_ioctl,
- .open = dev_open,
- .release = dev_release,
- .llseek = noop_llseek,
-};
-
-static int reset_dsp(void)
-{
- int timeout = 100;
-
- msnd_outb(HPDSPRESET_ON, dev.io + HP_DSPR);
- mdelay(1);
-#ifndef MSND_CLASSIC
- dev.info = msnd_inb(dev.io + HP_INFO);
-#endif
- msnd_outb(HPDSPRESET_OFF, dev.io + HP_DSPR);
- mdelay(1);
- while (timeout-- > 0) {
- if (msnd_inb(dev.io + HP_CVR) == HP_CVR_DEF)
- return 0;
- mdelay(1);
- }
- printk(KERN_ERR LOGNAME ": Cannot reset DSP\n");
-
- return -EIO;
-}
-
-static int __init probe_multisound(void)
-{
-#ifndef MSND_CLASSIC
- char *xv, *rev = NULL;
- char *pin = "Pinnacle", *fiji = "Fiji";
- char *pinfiji = "Pinnacle/Fiji";
-#endif
-
- if (!request_region(dev.io, dev.numio, "probing")) {
- printk(KERN_ERR LOGNAME ": I/O port conflict\n");
- return -ENODEV;
- }
-
- if (reset_dsp() < 0) {
- release_region(dev.io, dev.numio);
- return -ENODEV;
- }
-
-#ifdef MSND_CLASSIC
- dev.name = "Classic/Tahiti/Monterey";
- printk(KERN_INFO LOGNAME ": %s, "
-#else
- switch (dev.info >> 4) {
- case 0xf: xv = "<= 1.15"; break;
- case 0x1: xv = "1.18/1.2"; break;
- case 0x2: xv = "1.3"; break;
- case 0x3: xv = "1.4"; break;
- default: xv = "unknown"; break;
- }
-
- switch (dev.info & 0x7) {
- case 0x0: rev = "I"; dev.name = pin; break;
- case 0x1: rev = "F"; dev.name = pin; break;
- case 0x2: rev = "G"; dev.name = pin; break;
- case 0x3: rev = "H"; dev.name = pin; break;
- case 0x4: rev = "E"; dev.name = fiji; break;
- case 0x5: rev = "C"; dev.name = fiji; break;
- case 0x6: rev = "D"; dev.name = fiji; break;
- case 0x7:
- rev = "A-B (Fiji) or A-E (Pinnacle)";
- dev.name = pinfiji;
- break;
- }
- printk(KERN_INFO LOGNAME ": %s revision %s, Xilinx version %s, "
-#endif /* MSND_CLASSIC */
- "I/O 0x%x-0x%x, IRQ %d, memory mapped to %p-%p\n",
- dev.name,
-#ifndef MSND_CLASSIC
- rev, xv,
-#endif
- dev.io, dev.io + dev.numio - 1,
- dev.irq,
- dev.base, dev.base + 0x7fff);
-
- release_region(dev.io, dev.numio);
- return 0;
-}
-
-static int init_sma(void)
-{
- static int initted;
- WORD mastVolLeft, mastVolRight;
- unsigned long flags;
-
-#ifdef MSND_CLASSIC
- msnd_outb(dev.memid, dev.io + HP_MEMM);
-#endif
- msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
- if (initted) {
- mastVolLeft = readw(dev.SMA + SMA_wCurrMastVolLeft);
- mastVolRight = readw(dev.SMA + SMA_wCurrMastVolRight);
- } else
- mastVolLeft = mastVolRight = 0;
- memset_io(dev.base, 0, 0x8000);
-
- /* Critical section: bank 1 access */
- spin_lock_irqsave(&dev.lock, flags);
- msnd_outb(HPBLKSEL_1, dev.io + HP_BLKS);
- memset_io(dev.base, 0, 0x8000);
- msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
- spin_unlock_irqrestore(&dev.lock, flags);
-
- dev.pwDSPQData = (dev.base + DSPQ_DATA_BUFF);
- dev.pwMODQData = (dev.base + MODQ_DATA_BUFF);
- dev.pwMIDQData = (dev.base + MIDQ_DATA_BUFF);
-
- /* Motorola 56k shared memory base */
- dev.SMA = dev.base + SMA_STRUCT_START;
-
- /* Digital audio play queue */
- dev.DAPQ = dev.base + DAPQ_OFFSET;
- msnd_init_queue(dev.DAPQ, DAPQ_DATA_BUFF, DAPQ_BUFF_SIZE);
-
- /* Digital audio record queue */
- dev.DARQ = dev.base + DARQ_OFFSET;
- msnd_init_queue(dev.DARQ, DARQ_DATA_BUFF, DARQ_BUFF_SIZE);
-
- /* MIDI out queue */
- dev.MODQ = dev.base + MODQ_OFFSET;
- msnd_init_queue(dev.MODQ, MODQ_DATA_BUFF, MODQ_BUFF_SIZE);
-
- /* MIDI in queue */
- dev.MIDQ = dev.base + MIDQ_OFFSET;
- msnd_init_queue(dev.MIDQ, MIDQ_DATA_BUFF, MIDQ_BUFF_SIZE);
-
- /* DSP -> host message queue */
- dev.DSPQ = dev.base + DSPQ_OFFSET;
- msnd_init_queue(dev.DSPQ, DSPQ_DATA_BUFF, DSPQ_BUFF_SIZE);
-
- /* Setup some DSP values */
-#ifndef MSND_CLASSIC
- writew(1, dev.SMA + SMA_wCurrPlayFormat);
- writew(dev.play_sample_size, dev.SMA + SMA_wCurrPlaySampleSize);
- writew(dev.play_channels, dev.SMA + SMA_wCurrPlayChannels);
- writew(dev.play_sample_rate, dev.SMA + SMA_wCurrPlaySampleRate);
-#endif
- writew(dev.play_sample_rate, dev.SMA + SMA_wCalFreqAtoD);
- writew(mastVolLeft, dev.SMA + SMA_wCurrMastVolLeft);
- writew(mastVolRight, dev.SMA + SMA_wCurrMastVolRight);
-#ifndef MSND_CLASSIC
- writel(0x00010000, dev.SMA + SMA_dwCurrPlayPitch);
- writel(0x00000001, dev.SMA + SMA_dwCurrPlayRate);
-#endif
- writew(0x303, dev.SMA + SMA_wCurrInputTagBits);
-
- initted = 1;
-
- return 0;
-}
-
-static int __init calibrate_adc(WORD srate)
-{
- writew(srate, dev.SMA + SMA_wCalFreqAtoD);
- if (dev.calibrate_signal == 0)
- writew(readw(dev.SMA + SMA_wCurrHostStatusFlags)
- | 0x0001, dev.SMA + SMA_wCurrHostStatusFlags);
- else
- writew(readw(dev.SMA + SMA_wCurrHostStatusFlags)
- & ~0x0001, dev.SMA + SMA_wCurrHostStatusFlags);
- if (msnd_send_word(&dev, 0, 0, HDEXAR_CAL_A_TO_D) == 0 &&
- chk_send_dsp_cmd(&dev, HDEX_AUX_REQ) == 0) {
- schedule_timeout_interruptible(HZ / 3);
- return 0;
- }
- printk(KERN_WARNING LOGNAME ": ADC calibration failed\n");
-
- return -EIO;
-}
-
-static int upload_dsp_code(void)
-{
- int ret = 0;
-
- msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
-#ifndef HAVE_DSPCODEH
- INITCODESIZE = mod_firmware_load(INITCODEFILE, &INITCODE);
- if (!INITCODE) {
- printk(KERN_ERR LOGNAME ": Error loading " INITCODEFILE);
- return -EBUSY;
- }
-
- PERMCODESIZE = mod_firmware_load(PERMCODEFILE, &PERMCODE);
- if (!PERMCODE) {
- printk(KERN_ERR LOGNAME ": Error loading " PERMCODEFILE);
- vfree(INITCODE);
- return -EBUSY;
- }
-#endif
- memcpy_toio(dev.base, PERMCODE, PERMCODESIZE);
- if (msnd_upload_host(&dev, INITCODE, INITCODESIZE) < 0) {
- printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n");
- ret = -ENODEV;
- goto out;
- }
-#ifdef HAVE_DSPCODEH
- printk(KERN_INFO LOGNAME ": DSP firmware uploaded (resident)\n");
-#else
- printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n");
-#endif
-
-out:
-#ifndef HAVE_DSPCODEH
- vfree(INITCODE);
- vfree(PERMCODE);
-#endif
-
- return ret;
-}
-
-#ifdef MSND_CLASSIC
-static void reset_proteus(void)
-{
- msnd_outb(HPPRORESET_ON, dev.io + HP_PROR);
- mdelay(TIME_PRO_RESET);
- msnd_outb(HPPRORESET_OFF, dev.io + HP_PROR);
- mdelay(TIME_PRO_RESET_DONE);
-}
-#endif
-
-static int initialize(void)
-{
- int err, timeout;
-
-#ifdef MSND_CLASSIC
- msnd_outb(HPWAITSTATE_0, dev.io + HP_WAIT);
- msnd_outb(HPBITMODE_16, dev.io + HP_BITM);
-
- reset_proteus();
-#endif
- if ((err = init_sma()) < 0) {
- printk(KERN_WARNING LOGNAME ": Cannot initialize SMA\n");
- return err;
- }
-
- if ((err = reset_dsp()) < 0)
- return err;
-
- if ((err = upload_dsp_code()) < 0) {
- printk(KERN_WARNING LOGNAME ": Cannot upload DSP code\n");
- return err;
- }
-
- timeout = 200;
- while (readw(dev.base)) {
- mdelay(1);
- if (!timeout--) {
- printk(KERN_DEBUG LOGNAME ": DSP reset timeout\n");
- return -EIO;
- }
- }
-
- mixer_setup();
-
- return 0;
-}
-
-static int dsp_full_reset(void)
-{
- int rv;
-
- if (test_bit(F_RESETTING, &dev.flags) || ++dev.nresets > 10)
- return 0;
-
- set_bit(F_RESETTING, &dev.flags);
- printk(KERN_INFO LOGNAME ": DSP reset\n");
- dsp_halt(NULL); /* Unconditionally halt */
- if ((rv = initialize()))
- printk(KERN_WARNING LOGNAME ": DSP reset failed\n");
- force_recsrc(dev.recsrc);
- dsp_open(NULL);
- clear_bit(F_RESETTING, &dev.flags);
-
- return rv;
-}
-
-static int __init attach_multisound(void)
-{
- int err;
-
- if ((err = request_irq(dev.irq, intr, 0, dev.name, &dev)) < 0) {
- printk(KERN_ERR LOGNAME ": Couldn't grab IRQ %d\n", dev.irq);
- return err;
- }
- if (request_region(dev.io, dev.numio, dev.name) == NULL) {
- free_irq(dev.irq, &dev);
- return -EBUSY;
- }
-
- err = dsp_full_reset();
- if (err < 0) {
- release_region(dev.io, dev.numio);
- free_irq(dev.irq, &dev);
- return err;
- }
-
- if ((err = msnd_register(&dev)) < 0) {
- printk(KERN_ERR LOGNAME ": Unable to register MultiSound\n");
- release_region(dev.io, dev.numio);
- free_irq(dev.irq, &dev);
- return err;
- }
-
- if ((dev.dsp_minor = register_sound_dsp(&dev_fileops, -1)) < 0) {
- printk(KERN_ERR LOGNAME ": Unable to register DSP operations\n");
- msnd_unregister(&dev);
- release_region(dev.io, dev.numio);
- free_irq(dev.irq, &dev);
- return dev.dsp_minor;
- }
-
- if ((dev.mixer_minor = register_sound_mixer(&dev_fileops, -1)) < 0) {
- printk(KERN_ERR LOGNAME ": Unable to register mixer operations\n");
- unregister_sound_mixer(dev.mixer_minor);
- msnd_unregister(&dev);
- release_region(dev.io, dev.numio);
- free_irq(dev.irq, &dev);
- return dev.mixer_minor;
- }
-
- dev.ext_midi_dev = dev.hdr_midi_dev = -1;
-
- disable_irq(dev.irq);
- calibrate_adc(dev.play_sample_rate);
-#ifndef MSND_CLASSIC
- force_recsrc(SOUND_MASK_IMIX);
-#endif
-
- return 0;
-}
-
-static void __exit unload_multisound(void)
-{
- release_region(dev.io, dev.numio);
- free_irq(dev.irq, &dev);
- unregister_sound_mixer(dev.mixer_minor);
- unregister_sound_dsp(dev.dsp_minor);
- msnd_unregister(&dev);
-}
-
-#ifndef MSND_CLASSIC
-
-/* Pinnacle/Fiji Logical Device Configuration */
-
-static int __init msnd_write_cfg(int cfg, int reg, int value)
-{
- msnd_outb(reg, cfg);
- msnd_outb(value, cfg + 1);
- if (value != msnd_inb(cfg + 1)) {
- printk(KERN_ERR LOGNAME ": msnd_write_cfg: I/O error\n");
- return -EIO;
- }
- return 0;
-}
-
-static int __init msnd_write_cfg_io0(int cfg, int num, WORD io)
-{
- if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_IO0_BASEHI, HIBYTE(io)))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_IO0_BASELO, LOBYTE(io)))
- return -EIO;
- return 0;
-}
-
-static int __init msnd_write_cfg_io1(int cfg, int num, WORD io)
-{
- if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_IO1_BASEHI, HIBYTE(io)))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_IO1_BASELO, LOBYTE(io)))
- return -EIO;
- return 0;
-}
-
-static int __init msnd_write_cfg_irq(int cfg, int num, WORD irq)
-{
- if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_IRQ_NUMBER, LOBYTE(irq)))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_IRQ_TYPE, IRQTYPE_EDGE))
- return -EIO;
- return 0;
-}
-
-static int __init msnd_write_cfg_mem(int cfg, int num, int mem)
-{
- WORD wmem;
-
- mem >>= 8;
- mem &= 0xfff;
- wmem = (WORD)mem;
- if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_MEMBASEHI, HIBYTE(wmem)))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_MEMBASELO, LOBYTE(wmem)))
- return -EIO;
- if (wmem && msnd_write_cfg(cfg, IREG_MEMCONTROL, (MEMTYPE_HIADDR | MEMTYPE_16BIT)))
- return -EIO;
- return 0;
-}
-
-static int __init msnd_activate_logical(int cfg, int num)
-{
- if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
- return -EIO;
- if (msnd_write_cfg(cfg, IREG_ACTIVATE, LD_ACTIVATE))
- return -EIO;
- return 0;
-}
-
-static int __init msnd_write_cfg_logical(int cfg, int num, WORD io0, WORD io1, WORD irq, int mem)
-{
- if (msnd_write_cfg(cfg, IREG_LOGDEVICE, num))
- return -EIO;
- if (msnd_write_cfg_io0(cfg, num, io0))
- return -EIO;
- if (msnd_write_cfg_io1(cfg, num, io1))
- return -EIO;
- if (msnd_write_cfg_irq(cfg, num, irq))
- return -EIO;
- if (msnd_write_cfg_mem(cfg, num, mem))
- return -EIO;
- if (msnd_activate_logical(cfg, num))
- return -EIO;
- return 0;
-}
-
-typedef struct msnd_pinnacle_cfg_device {
- WORD io0, io1, irq;
- int mem;
-} msnd_pinnacle_cfg_t[4];
-
-static int __init msnd_pinnacle_cfg_devices(int cfg, int reset, msnd_pinnacle_cfg_t device)
-{
- int i;
-
- /* Reset devices if told to */
- if (reset) {
- printk(KERN_INFO LOGNAME ": Resetting all devices\n");
- for (i = 0; i < 4; ++i)
- if (msnd_write_cfg_logical(cfg, i, 0, 0, 0, 0))
- return -EIO;
- }
-
- /* Configure specified devices */
- for (i = 0; i < 4; ++i) {
-
- switch (i) {
- case 0: /* DSP */
- if (!(device[i].io0 && device[i].irq && device[i].mem))
- continue;
- break;
- case 1: /* MPU */
- if (!(device[i].io0 && device[i].irq))
- continue;
- printk(KERN_INFO LOGNAME
- ": Configuring MPU to I/O 0x%x IRQ %d\n",
- device[i].io0, device[i].irq);
- break;
- case 2: /* IDE */
- if (!(device[i].io0 && device[i].io1 && device[i].irq))
- continue;
- printk(KERN_INFO LOGNAME
- ": Configuring IDE to I/O 0x%x, 0x%x IRQ %d\n",
- device[i].io0, device[i].io1, device[i].irq);
- break;
- case 3: /* Joystick */
- if (!(device[i].io0))
- continue;
- printk(KERN_INFO LOGNAME
- ": Configuring joystick to I/O 0x%x\n",
- device[i].io0);
- break;
- }
-
- /* Configure the device */
- if (msnd_write_cfg_logical(cfg, i, device[i].io0, device[i].io1, device[i].irq, device[i].mem))
- return -EIO;
- }
-
- return 0;
-}
-#endif
-
-#ifdef MODULE
-MODULE_AUTHOR ("Andrew Veliath <andrewtv@usa.net>");
-MODULE_DESCRIPTION ("Turtle Beach " LONGNAME " Linux Driver");
-MODULE_LICENSE("GPL");
-
-static int io __initdata = -1;
-static int irq __initdata = -1;
-static int mem __initdata = -1;
-static int write_ndelay __initdata = -1;
-
-#ifndef MSND_CLASSIC
-/* Pinnacle/Fiji non-PnP Config Port */
-static int cfg __initdata = -1;
-
-/* Extra Peripheral Configuration */
-static int reset __initdata = 0;
-static int mpu_io __initdata = 0;
-static int mpu_irq __initdata = 0;
-static int ide_io0 __initdata = 0;
-static int ide_io1 __initdata = 0;
-static int ide_irq __initdata = 0;
-static int joystick_io __initdata = 0;
-
-/* If we have the digital daugherboard... */
-static bool digital __initdata = false;
-#endif
-
-static int fifosize __initdata = DEFFIFOSIZE;
-static int calibrate_signal __initdata = 0;
-
-#else /* not a module */
-
-static int write_ndelay __initdata = -1;
-
-#ifdef MSND_CLASSIC
-static int io __initdata = CONFIG_MSNDCLAS_IO;
-static int irq __initdata = CONFIG_MSNDCLAS_IRQ;
-static int mem __initdata = CONFIG_MSNDCLAS_MEM;
-#else /* Pinnacle/Fiji */
-
-static int io __initdata = CONFIG_MSNDPIN_IO;
-static int irq __initdata = CONFIG_MSNDPIN_IRQ;
-static int mem __initdata = CONFIG_MSNDPIN_MEM;
-
-/* Pinnacle/Fiji non-PnP Config Port */
-#ifdef CONFIG_MSNDPIN_NONPNP
-# ifndef CONFIG_MSNDPIN_CFG
-# define CONFIG_MSNDPIN_CFG 0x250
-# endif
-#else
-# ifdef CONFIG_MSNDPIN_CFG
-# undef CONFIG_MSNDPIN_CFG
-# endif
-# define CONFIG_MSNDPIN_CFG -1
-#endif
-static int cfg __initdata = CONFIG_MSNDPIN_CFG;
-/* If not a module, we don't need to bother with reset=1 */
-static int reset;
-
-/* Extra Peripheral Configuration (Default: Disable) */
-#ifndef CONFIG_MSNDPIN_MPU_IO
-# define CONFIG_MSNDPIN_MPU_IO 0
-#endif
-static int mpu_io __initdata = CONFIG_MSNDPIN_MPU_IO;
-
-#ifndef CONFIG_MSNDPIN_MPU_IRQ
-# define CONFIG_MSNDPIN_MPU_IRQ 0
-#endif
-static int mpu_irq __initdata = CONFIG_MSNDPIN_MPU_IRQ;
-
-#ifndef CONFIG_MSNDPIN_IDE_IO0
-# define CONFIG_MSNDPIN_IDE_IO0 0
-#endif
-static int ide_io0 __initdata = CONFIG_MSNDPIN_IDE_IO0;
-
-#ifndef CONFIG_MSNDPIN_IDE_IO1
-# define CONFIG_MSNDPIN_IDE_IO1 0
-#endif
-static int ide_io1 __initdata = CONFIG_MSNDPIN_IDE_IO1;
-
-#ifndef CONFIG_MSNDPIN_IDE_IRQ
-# define CONFIG_MSNDPIN_IDE_IRQ 0
-#endif
-static int ide_irq __initdata = CONFIG_MSNDPIN_IDE_IRQ;
-
-#ifndef CONFIG_MSNDPIN_JOYSTICK_IO
-# define CONFIG_MSNDPIN_JOYSTICK_IO 0
-#endif
-static int joystick_io __initdata = CONFIG_MSNDPIN_JOYSTICK_IO;
-
-/* Have SPDIF (Digital) Daughterboard */
-#ifndef CONFIG_MSNDPIN_DIGITAL
-# define CONFIG_MSNDPIN_DIGITAL 0
-#endif
-static bool digital __initdata = CONFIG_MSNDPIN_DIGITAL;
-
-#endif /* MSND_CLASSIC */
-
-#ifndef CONFIG_MSND_FIFOSIZE
-# define CONFIG_MSND_FIFOSIZE DEFFIFOSIZE
-#endif
-static int fifosize __initdata = CONFIG_MSND_FIFOSIZE;
-
-#ifndef CONFIG_MSND_CALSIGNAL
-# define CONFIG_MSND_CALSIGNAL 0
-#endif
-static int
-calibrate_signal __initdata = CONFIG_MSND_CALSIGNAL;
-#endif /* MODULE */
-
-module_param_hw (io, int, ioport, 0);
-module_param_hw (irq, int, irq, 0);
-module_param_hw (mem, int, iomem, 0);
-module_param (write_ndelay, int, 0);
-module_param (fifosize, int, 0);
-module_param (calibrate_signal, int, 0);
-#ifndef MSND_CLASSIC
-module_param (digital, bool, 0);
-module_param_hw (cfg, int, ioport, 0);
-module_param (reset, int, 0);
-module_param_hw (mpu_io, int, ioport, 0);
-module_param_hw (mpu_irq, int, irq, 0);
-module_param_hw (ide_io0, int, ioport, 0);
-module_param_hw (ide_io1, int, ioport, 0);
-module_param_hw (ide_irq, int, irq, 0);
-module_param_hw (joystick_io, int, ioport, 0);
-#endif
-
-static int __init msnd_init(void)
-{
- int err;
-#ifndef MSND_CLASSIC
- static msnd_pinnacle_cfg_t pinnacle_devs;
-#endif /* MSND_CLASSIC */
-
- printk(KERN_INFO LOGNAME ": Turtle Beach " LONGNAME " Linux Driver Version "
- VERSION ", Copyright (C) 1998 Andrew Veliath\n");
-
- if (io == -1 || irq == -1 || mem == -1)
- printk(KERN_WARNING LOGNAME ": io, irq and mem must be set\n");
-
-#ifdef MSND_CLASSIC
- if (io == -1 ||
- !(io == 0x290 ||
- io == 0x260 ||
- io == 0x250 ||
- io == 0x240 ||
- io == 0x230 ||
- io == 0x220 ||
- io == 0x210 ||
- io == 0x3e0)) {
- printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must be set to 0x210, 0x220, 0x230, 0x240, 0x250, 0x260, 0x290, or 0x3E0\n");
- return -EINVAL;
- }
-#else
- if (io == -1 ||
- io < 0x100 ||
- io > 0x3e0 ||
- (io % 0x10) != 0) {
- printk(KERN_ERR LOGNAME ": \"io\" - DSP I/O base must within the range 0x100 to 0x3E0 and must be evenly divisible by 0x10\n");
- return -EINVAL;
- }
-#endif /* MSND_CLASSIC */
-
- if (irq == -1 ||
- !(irq == 5 ||
- irq == 7 ||
- irq == 9 ||
- irq == 10 ||
- irq == 11 ||
- irq == 12)) {
- printk(KERN_ERR LOGNAME ": \"irq\" - must be set to 5, 7, 9, 10, 11 or 12\n");
- return -EINVAL;
- }
-
- if (mem == -1 ||
- !(mem == 0xb0000 ||
- mem == 0xc8000 ||
- mem == 0xd0000 ||
- mem == 0xd8000 ||
- mem == 0xe0000 ||
- mem == 0xe8000)) {
- printk(KERN_ERR LOGNAME ": \"mem\" - must be set to "
- "0xb0000, 0xc8000, 0xd0000, 0xd8000, 0xe0000 or 0xe8000\n");
- return -EINVAL;
- }
-
-#ifdef MSND_CLASSIC
- switch (irq) {
- case 5: dev.irqid = HPIRQ_5; break;
- case 7: dev.irqid = HPIRQ_7; break;
- case 9: dev.irqid = HPIRQ_9; break;
- case 10: dev.irqid = HPIRQ_10; break;
- case 11: dev.irqid = HPIRQ_11; break;
- case 12: dev.irqid = HPIRQ_12; break;
- }
-
- switch (mem) {
- case 0xb0000: dev.memid = HPMEM_B000; break;
- case 0xc8000: dev.memid = HPMEM_C800; break;
- case 0xd0000: dev.memid = HPMEM_D000; break;
- case 0xd8000: dev.memid = HPMEM_D800; break;
- case 0xe0000: dev.memid = HPMEM_E000; break;
- case 0xe8000: dev.memid = HPMEM_E800; break;
- }
-#else
- if (cfg == -1) {
- printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
- } else if (cfg != 0x250 && cfg != 0x260 && cfg != 0x270) {
- printk(KERN_INFO LOGNAME ": Config port must be 0x250, 0x260 or 0x270 (or unspecified for PnP mode)\n");
- return -EINVAL;
- } else {
- printk(KERN_INFO LOGNAME ": Non-PnP mode: configuring at port 0x%x\n", cfg);
-
- /* DSP */
- pinnacle_devs[0].io0 = io;
- pinnacle_devs[0].irq = irq;
- pinnacle_devs[0].mem = mem;
-
- /* The following are Pinnacle specific */
-
- /* MPU */
- pinnacle_devs[1].io0 = mpu_io;
- pinnacle_devs[1].irq = mpu_irq;
-
- /* IDE */
- pinnacle_devs[2].io0 = ide_io0;
- pinnacle_devs[2].io1 = ide_io1;
- pinnacle_devs[2].irq = ide_irq;
-
- /* Joystick */
- pinnacle_devs[3].io0 = joystick_io;
-
- if (!request_region(cfg, 2, "Pinnacle/Fiji Config")) {
- printk(KERN_ERR LOGNAME ": Config port 0x%x conflict\n", cfg);
- return -EIO;
- }
-
- if (msnd_pinnacle_cfg_devices(cfg, reset, pinnacle_devs)) {
- printk(KERN_ERR LOGNAME ": Device configuration error\n");
- release_region(cfg, 2);
- return -EIO;
- }
- release_region(cfg, 2);
- }
-#endif /* MSND_CLASSIC */
-
- if (fifosize < 16)
- fifosize = 16;
-
- if (fifosize > 1024)
- fifosize = 1024;
-
- set_default_audio_parameters();
-#ifdef MSND_CLASSIC
- dev.type = msndClassic;
-#else
- dev.type = msndPinnacle;
-#endif
- dev.io = io;
- dev.numio = DSP_NUMIO;
- dev.irq = irq;
- dev.base = ioremap(mem, 0x8000);
- dev.fifosize = fifosize * 1024;
- dev.calibrate_signal = calibrate_signal ? 1 : 0;
- dev.recsrc = 0;
- dev.dspq_data_buff = DSPQ_DATA_BUFF;
- dev.dspq_buff_size = DSPQ_BUFF_SIZE;
- if (write_ndelay == -1)
- write_ndelay = CONFIG_MSND_WRITE_NDELAY;
- if (write_ndelay)
- clear_bit(F_DISABLE_WRITE_NDELAY, &dev.flags);
- else
- set_bit(F_DISABLE_WRITE_NDELAY, &dev.flags);
-#ifndef MSND_CLASSIC
- if (digital)
- set_bit(F_HAVEDIGITAL, &dev.flags);
-#endif
- init_waitqueue_head(&dev.writeblock);
- init_waitqueue_head(&dev.readblock);
- init_waitqueue_head(&dev.writeflush);
- msnd_fifo_init(&dev.DAPF);
- msnd_fifo_init(&dev.DARF);
- spin_lock_init(&dev.lock);
- printk(KERN_INFO LOGNAME ": %u byte audio FIFOs (x2)\n", dev.fifosize);
- if ((err = msnd_fifo_alloc(&dev.DAPF, dev.fifosize)) < 0) {
- printk(KERN_ERR LOGNAME ": Couldn't allocate write FIFO\n");
- return err;
- }
-
- if ((err = msnd_fifo_alloc(&dev.DARF, dev.fifosize)) < 0) {
- printk(KERN_ERR LOGNAME ": Couldn't allocate read FIFO\n");
- msnd_fifo_free(&dev.DAPF);
- return err;
- }
-
- if ((err = probe_multisound()) < 0) {
- printk(KERN_ERR LOGNAME ": Probe failed\n");
- msnd_fifo_free(&dev.DAPF);
- msnd_fifo_free(&dev.DARF);
- return err;
- }
-
- if ((err = attach_multisound()) < 0) {
- printk(KERN_ERR LOGNAME ": Attach failed\n");
- msnd_fifo_free(&dev.DAPF);
- msnd_fifo_free(&dev.DARF);
- return err;
- }
-
- return 0;
-}
-
-static void __exit msdn_cleanup(void)
-{
- unload_multisound();
- msnd_fifo_free(&dev.DAPF);
- msnd_fifo_free(&dev.DARF);
-}
-
-module_init(msnd_init);
-module_exit(msdn_cleanup);
diff --git a/sound/oss/msnd_pinnacle.h b/sound/oss/msnd_pinnacle.h
deleted file mode 100644
index c18d66cbbe3f..000000000000
--- a/sound/oss/msnd_pinnacle.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*********************************************************************
- *
- * msnd_pinnacle.h
- *
- * Turtle Beach MultiSound Sound Card Driver for Linux
- *
- * Some parts of this header file were derived from the Turtle Beach
- * MultiSound Driver Development Kit.
- *
- * Copyright (C) 1998 Andrew Veliath
- * Copyright (C) 1993 Turtle Beach Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- ********************************************************************/
-#ifndef __MSND_PINNACLE_H
-#define __MSND_PINNACLE_H
-
-
-#define DSP_NUMIO 0x08
-
-#define IREG_LOGDEVICE 0x07
-#define IREG_ACTIVATE 0x30
-#define LD_ACTIVATE 0x01
-#define LD_DISACTIVATE 0x00
-#define IREG_EECONTROL 0x3F
-#define IREG_MEMBASEHI 0x40
-#define IREG_MEMBASELO 0x41
-#define IREG_MEMCONTROL 0x42
-#define IREG_MEMRANGEHI 0x43
-#define IREG_MEMRANGELO 0x44
-#define MEMTYPE_8BIT 0x00
-#define MEMTYPE_16BIT 0x02
-#define MEMTYPE_RANGE 0x00
-#define MEMTYPE_HIADDR 0x01
-#define IREG_IO0_BASEHI 0x60
-#define IREG_IO0_BASELO 0x61
-#define IREG_IO1_BASEHI 0x62
-#define IREG_IO1_BASELO 0x63
-#define IREG_IRQ_NUMBER 0x70
-#define IREG_IRQ_TYPE 0x71
-#define IRQTYPE_HIGH 0x02
-#define IRQTYPE_LOW 0x00
-#define IRQTYPE_LEVEL 0x01
-#define IRQTYPE_EDGE 0x00
-
-#define HP_DSPR 0x04
-#define HP_BLKS 0x04
-
-#define HPDSPRESET_OFF 2
-#define HPDSPRESET_ON 0
-
-#define HPBLKSEL_0 2
-#define HPBLKSEL_1 3
-
-#define HIMT_DAT_OFF 0x03
-
-#define HIDSP_PLAY_UNDER 0x00
-#define HIDSP_INT_PLAY_UNDER 0x01
-#define HIDSP_SSI_TX_UNDER 0x02
-#define HIDSP_RECQ_OVERFLOW 0x08
-#define HIDSP_INT_RECORD_OVER 0x09
-#define HIDSP_SSI_RX_OVERFLOW 0x0a
-
-#define HIDSP_MIDI_IN_OVER 0x10
-
-#define HIDSP_MIDI_FRAME_ERR 0x11
-#define HIDSP_MIDI_PARITY_ERR 0x12
-#define HIDSP_MIDI_OVERRUN_ERR 0x13
-
-#define HIDSP_INPUT_CLIPPING 0x20
-#define HIDSP_MIX_CLIPPING 0x30
-#define HIDSP_DAT_IN_OFF 0x21
-
-#define HDEXAR_SET_ANA_IN 0
-#define HDEXAR_CLEAR_PEAKS 1
-#define HDEXAR_IN_SET_POTS 2
-#define HDEXAR_AUX_SET_POTS 3
-#define HDEXAR_CAL_A_TO_D 4
-#define HDEXAR_RD_EXT_DSP_BITS 5
-
-#define HDEXAR_SET_SYNTH_IN 4
-#define HDEXAR_READ_DAT_IN 5
-#define HDEXAR_MIC_SET_POTS 6
-#define HDEXAR_SET_DAT_IN 7
-
-#define HDEXAR_SET_SYNTH_48 8
-#define HDEXAR_SET_SYNTH_44 9
-
-#define TIME_PRO_RESET_DONE 0x028A
-#define TIME_PRO_SYSEX 0x001E
-#define TIME_PRO_RESET 0x0032
-
-#define AGND 0x01
-#define SIGNAL 0x02
-
-#define EXT_DSP_BIT_DCAL 0x0001
-#define EXT_DSP_BIT_MIDI_CON 0x0002
-
-#define BUFFSIZE 0x8000
-#define HOSTQ_SIZE 0x40
-
-#define SRAM_CNTL_START 0x7F00
-#define SMA_STRUCT_START 0x7F40
-
-#define DAP_BUFF_SIZE 0x2400
-#define DAR_BUFF_SIZE 0x2000
-
-#define DAPQ_STRUCT_SIZE 0x10
-#define DARQ_STRUCT_SIZE 0x10
-#define DAPQ_BUFF_SIZE (3 * 0x10)
-#define DARQ_BUFF_SIZE (3 * 0x10)
-#define MODQ_BUFF_SIZE 0x400
-#define MIDQ_BUFF_SIZE 0x800
-#define DSPQ_BUFF_SIZE 0x5A0
-
-#define DAPQ_DATA_BUFF 0x6C00
-#define DARQ_DATA_BUFF 0x6C30
-#define MODQ_DATA_BUFF 0x6C60
-#define MIDQ_DATA_BUFF 0x7060
-#define DSPQ_DATA_BUFF 0x7860
-
-#define DAPQ_OFFSET SRAM_CNTL_START
-#define DARQ_OFFSET (SRAM_CNTL_START + 0x08)
-#define MODQ_OFFSET (SRAM_CNTL_START + 0x10)
-#define MIDQ_OFFSET (SRAM_CNTL_START + 0x18)
-#define DSPQ_OFFSET (SRAM_CNTL_START + 0x20)
-
-#define MOP_WAVEHDR 0
-#define MOP_EXTOUT 1
-#define MOP_HWINIT 0xfe
-#define MOP_NONE 0xff
-#define MOP_MAX 1
-
-#define MIP_EXTIN 0
-#define MIP_WAVEHDR 1
-#define MIP_HWINIT 0xfe
-#define MIP_MAX 1
-
-/* Pinnacle/Fiji SMA Common Data */
-#define SMA_wCurrPlayBytes 0x0000
-#define SMA_wCurrRecordBytes 0x0002
-#define SMA_wCurrPlayVolLeft 0x0004
-#define SMA_wCurrPlayVolRight 0x0006
-#define SMA_wCurrInVolLeft 0x0008
-#define SMA_wCurrInVolRight 0x000a
-#define SMA_wCurrMHdrVolLeft 0x000c
-#define SMA_wCurrMHdrVolRight 0x000e
-#define SMA_dwCurrPlayPitch 0x0010
-#define SMA_dwCurrPlayRate 0x0014
-#define SMA_wCurrMIDIIOPatch 0x0018
-#define SMA_wCurrPlayFormat 0x001a
-#define SMA_wCurrPlaySampleSize 0x001c
-#define SMA_wCurrPlayChannels 0x001e
-#define SMA_wCurrPlaySampleRate 0x0020
-#define SMA_wCurrRecordFormat 0x0022
-#define SMA_wCurrRecordSampleSize 0x0024
-#define SMA_wCurrRecordChannels 0x0026
-#define SMA_wCurrRecordSampleRate 0x0028
-#define SMA_wCurrDSPStatusFlags 0x002a
-#define SMA_wCurrHostStatusFlags 0x002c
-#define SMA_wCurrInputTagBits 0x002e
-#define SMA_wCurrLeftPeak 0x0030
-#define SMA_wCurrRightPeak 0x0032
-#define SMA_bMicPotPosLeft 0x0034
-#define SMA_bMicPotPosRight 0x0035
-#define SMA_bMicPotMaxLeft 0x0036
-#define SMA_bMicPotMaxRight 0x0037
-#define SMA_bInPotPosLeft 0x0038
-#define SMA_bInPotPosRight 0x0039
-#define SMA_bAuxPotPosLeft 0x003a
-#define SMA_bAuxPotPosRight 0x003b
-#define SMA_bInPotMaxLeft 0x003c
-#define SMA_bInPotMaxRight 0x003d
-#define SMA_bAuxPotMaxLeft 0x003e
-#define SMA_bAuxPotMaxRight 0x003f
-#define SMA_bInPotMaxMethod 0x0040
-#define SMA_bAuxPotMaxMethod 0x0041
-#define SMA_wCurrMastVolLeft 0x0042
-#define SMA_wCurrMastVolRight 0x0044
-#define SMA_wCalFreqAtoD 0x0046
-#define SMA_wCurrAuxVolLeft 0x0048
-#define SMA_wCurrAuxVolRight 0x004a
-#define SMA_wCurrPlay1VolLeft 0x004c
-#define SMA_wCurrPlay1VolRight 0x004e
-#define SMA_wCurrPlay2VolLeft 0x0050
-#define SMA_wCurrPlay2VolRight 0x0052
-#define SMA_wCurrPlay3VolLeft 0x0054
-#define SMA_wCurrPlay3VolRight 0x0056
-#define SMA_wCurrPlay4VolLeft 0x0058
-#define SMA_wCurrPlay4VolRight 0x005a
-#define SMA_wCurrPlay1PeakLeft 0x005c
-#define SMA_wCurrPlay1PeakRight 0x005e
-#define SMA_wCurrPlay2PeakLeft 0x0060
-#define SMA_wCurrPlay2PeakRight 0x0062
-#define SMA_wCurrPlay3PeakLeft 0x0064
-#define SMA_wCurrPlay3PeakRight 0x0066
-#define SMA_wCurrPlay4PeakLeft 0x0068
-#define SMA_wCurrPlay4PeakRight 0x006a
-#define SMA_wCurrPlayPeakLeft 0x006c
-#define SMA_wCurrPlayPeakRight 0x006e
-#define SMA_wCurrDATSR 0x0070
-#define SMA_wCurrDATRXCHNL 0x0072
-#define SMA_wCurrDATTXCHNL 0x0074
-#define SMA_wCurrDATRXRate 0x0076
-#define SMA_dwDSPPlayCount 0x0078
-#define SMA__size 0x007c
-
-#ifdef HAVE_DSPCODEH
-# include "pndsperm.c"
-# include "pndspini.c"
-# define PERMCODE pndsperm
-# define INITCODE pndspini
-# define PERMCODESIZE sizeof(pndsperm)
-# define INITCODESIZE sizeof(pndspini)
-#else
-# ifndef CONFIG_MSNDPIN_INIT_FILE
-# define CONFIG_MSNDPIN_INIT_FILE \
- "/etc/sound/pndspini.bin"
-# endif
-# ifndef CONFIG_MSNDPIN_PERM_FILE
-# define CONFIG_MSNDPIN_PERM_FILE \
- "/etc/sound/pndsperm.bin"
-# endif
-# define PERMCODEFILE CONFIG_MSNDPIN_PERM_FILE
-# define INITCODEFILE CONFIG_MSNDPIN_INIT_FILE
-# define PERMCODE dspini
-# define INITCODE permini
-# define PERMCODESIZE sizeof_dspini
-# define INITCODESIZE sizeof_permini
-#endif
-#define LONGNAME "MultiSound (Pinnacle/Fiji)"
-
-#endif /* __MSND_PINNACLE_H */
diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c
deleted file mode 100644
index f0f5b5be6314..000000000000
--- a/sound/oss/opl3.c
+++ /dev/null
@@ -1,1255 +0,0 @@
-/*
- * sound/oss/opl3.c
- *
- * A low level driver for Yamaha YM3812 and OPL-3 -chips
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Changes
- * Thomas Sailer ioctl code reworked (vmalloc/vfree removed)
- * Alan Cox modularisation, fixed sound_mem allocs.
- * Christoph Hellwig Adapted to module_init/module_exit
- * Arnaldo C. de Melo get rid of check_region, use request_region for
- * OPL4, release it on exit, some cleanups.
- *
- * Status
- * Believed to work. Badly needs rewriting a bit to support multiple
- * OPL3 devices.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-
-/*
- * Major improvements to the FM handling 30AUG92 by Rob Hooft,
- * hooft@chem.ruu.nl
- */
-
-#include "sound_config.h"
-
-#include "opl3_hw.h"
-
-#define MAX_VOICE 18
-#define OFFS_4OP 11
-
-struct voice_info
-{
- unsigned char keyon_byte;
- long bender;
- long bender_range;
- unsigned long orig_freq;
- unsigned long current_freq;
- int volume;
- int mode;
- int panning; /* 0xffff means not set */
-};
-
-struct opl_devinfo
-{
- int base;
- int left_io, right_io;
- int nr_voice;
- int lv_map[MAX_VOICE];
-
- struct voice_info voc[MAX_VOICE];
- struct voice_alloc_info *v_alloc;
- struct channel_info *chn_info;
-
- struct sbi_instrument i_map[SBFM_MAXINSTR];
- struct sbi_instrument *act_i[MAX_VOICE];
-
- struct synth_info fm_info;
-
- int busy;
- int model;
- unsigned char cmask;
-
- int is_opl4;
-};
-
-static struct opl_devinfo *devc = NULL;
-
-static int detected_model;
-
-static int store_instr(int instr_no, struct sbi_instrument *instr);
-static void freq_to_fnum(int freq, int *block, int *fnum);
-static void opl3_command(int io_addr, unsigned int addr, unsigned int val);
-static int opl3_kill_note(int dev, int voice, int note, int velocity);
-
-static void enter_4op_mode(void)
-{
- int i;
- static int v4op[MAX_VOICE] = {
- 0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17
- };
-
- devc->cmask = 0x3f; /* Connect all possible 4 OP voice operators */
- opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, 0x3f);
-
- for (i = 0; i < 3; i++)
- pv_map[i].voice_mode = 4;
- for (i = 3; i < 6; i++)
- pv_map[i].voice_mode = 0;
-
- for (i = 9; i < 12; i++)
- pv_map[i].voice_mode = 4;
- for (i = 12; i < 15; i++)
- pv_map[i].voice_mode = 0;
-
- for (i = 0; i < 12; i++)
- devc->lv_map[i] = v4op[i];
- devc->v_alloc->max_voice = devc->nr_voice = 12;
-}
-
-static int opl3_ioctl(int dev, unsigned int cmd, void __user * arg)
-{
- struct sbi_instrument ins;
-
- switch (cmd) {
- case SNDCTL_FM_LOAD_INSTR:
- printk(KERN_WARNING "Warning: Obsolete ioctl(SNDCTL_FM_LOAD_INSTR) used. Fix the program.\n");
- if (copy_from_user(&ins, arg, sizeof(ins)))
- return -EFAULT;
- if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) {
- printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel);
- return -EINVAL;
- }
- return store_instr(ins.channel, &ins);
-
- case SNDCTL_SYNTH_INFO:
- devc->fm_info.nr_voices = (devc->nr_voice == 12) ? 6 : devc->nr_voice;
- if (copy_to_user(arg, &devc->fm_info, sizeof(devc->fm_info)))
- return -EFAULT;
- return 0;
-
- case SNDCTL_SYNTH_MEMAVL:
- return 0x7fffffff;
-
- case SNDCTL_FM_4OP_ENABLE:
- if (devc->model == 2)
- enter_4op_mode();
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static int opl3_detect(int ioaddr)
-{
- /*
- * This function returns 1 if the FM chip is present at the given I/O port
- * The detection algorithm plays with the timer built in the FM chip and
- * looks for a change in the status register.
- *
- * Note! The timers of the FM chip are not connected to AdLib (and compatible)
- * boards.
- *
- * Note2! The chip is initialized if detected.
- */
-
- unsigned char stat1, signature;
- int i;
-
- if (devc != NULL)
- {
- printk(KERN_ERR "opl3: Only one OPL3 supported.\n");
- return 0;
- }
-
- devc = kzalloc(sizeof(*devc), GFP_KERNEL);
-
- if (devc == NULL)
- {
- printk(KERN_ERR "opl3: Can't allocate memory for the device control "
- "structure \n ");
- return 0;
- }
-
- strcpy(devc->fm_info.name, "OPL2");
-
- if (!request_region(ioaddr, 4, devc->fm_info.name)) {
- printk(KERN_WARNING "opl3: I/O port 0x%x already in use\n", ioaddr);
- goto cleanup_devc;
- }
-
- devc->base = ioaddr;
-
- /* Reset timers 1 and 2 */
- opl3_command(ioaddr, TIMER_CONTROL_REGISTER, TIMER1_MASK | TIMER2_MASK);
-
- /* Reset the IRQ of the FM chip */
- opl3_command(ioaddr, TIMER_CONTROL_REGISTER, IRQ_RESET);
-
- signature = stat1 = inb(ioaddr); /* Status register */
-
- if (signature != 0x00 && signature != 0x06 && signature != 0x02 &&
- signature != 0x0f)
- {
- MDB(printk(KERN_INFO "OPL3 not detected %x\n", signature));
- goto cleanup_region;
- }
-
- if (signature == 0x06) /* OPL2 */
- {
- detected_model = 2;
- }
- else if (signature == 0x00 || signature == 0x0f) /* OPL3 or OPL4 */
- {
- unsigned char tmp;
-
- detected_model = 3;
-
- /*
- * Detect availability of OPL4 (_experimental_). Works probably
- * only after a cold boot. In addition the OPL4 port
- * of the chip may not be connected to the PC bus at all.
- */
-
- opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0x00);
- opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, OPL3_ENABLE | OPL4_ENABLE);
-
- if ((tmp = inb(ioaddr)) == 0x02) /* Have a OPL4 */
- {
- detected_model = 4;
- }
-
- if (request_region(ioaddr - 8, 2, "OPL4")) /* OPL4 port was free */
- {
- int tmp;
-
- outb((0x02), ioaddr - 8); /* Select OPL4 ID register */
- udelay(10);
- tmp = inb(ioaddr - 7); /* Read it */
- udelay(10);
-
- if (tmp == 0x20) /* OPL4 should return 0x20 here */
- {
- detected_model = 4;
- outb((0xF8), ioaddr - 8); /* Select OPL4 FM mixer control */
- udelay(10);
- outb((0x1B), ioaddr - 7); /* Write value */
- udelay(10);
- }
- else
- { /* release OPL4 port */
- release_region(ioaddr - 8, 2);
- detected_model = 3;
- }
- }
- opl3_command(ioaddr + 2, OPL3_MODE_REGISTER, 0);
- }
- for (i = 0; i < 9; i++)
- opl3_command(ioaddr, KEYON_BLOCK + i, 0); /*
- * Note off
- */
-
- opl3_command(ioaddr, TEST_REGISTER, ENABLE_WAVE_SELECT);
- opl3_command(ioaddr, PERCOSSION_REGISTER, 0x00); /*
- * Melodic mode.
- */
- return 1;
-cleanup_region:
- release_region(ioaddr, 4);
-cleanup_devc:
- kfree(devc);
- devc = NULL;
- return 0;
-}
-
-static int opl3_kill_note (int devno, int voice, int note, int velocity)
-{
- struct physical_voice_info *map;
-
- if (voice < 0 || voice >= devc->nr_voice)
- return 0;
-
- devc->v_alloc->map[voice] = 0;
-
- map = &pv_map[devc->lv_map[voice]];
-
- if (map->voice_mode == 0)
- return 0;
-
- opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, devc->voc[voice].keyon_byte & ~0x20);
- devc->voc[voice].keyon_byte = 0;
- devc->voc[voice].bender = 0;
- devc->voc[voice].volume = 64;
- devc->voc[voice].panning = 0xffff; /* Not set */
- devc->voc[voice].bender_range = 200;
- devc->voc[voice].orig_freq = 0;
- devc->voc[voice].current_freq = 0;
- devc->voc[voice].mode = 0;
- return 0;
-}
-
-#define HIHAT 0
-#define CYMBAL 1
-#define TOMTOM 2
-#define SNARE 3
-#define BDRUM 4
-#define UNDEFINED TOMTOM
-#define DEFAULT TOMTOM
-
-static int store_instr(int instr_no, struct sbi_instrument *instr)
-{
- if (instr->key != FM_PATCH && (instr->key != OPL3_PATCH || devc->model != 2))
- printk(KERN_WARNING "FM warning: Invalid patch format field (key) 0x%x\n", instr->key);
- memcpy((char *) &(devc->i_map[instr_no]), (char *) instr, sizeof(*instr));
- return 0;
-}
-
-static int opl3_set_instr (int dev, int voice, int instr_no)
-{
- if (voice < 0 || voice >= devc->nr_voice)
- return 0;
- if (instr_no < 0 || instr_no >= SBFM_MAXINSTR)
- instr_no = 0; /* Acoustic piano (usually) */
-
- devc->act_i[voice] = &devc->i_map[instr_no];
- return 0;
-}
-
-/*
- * The next table looks magical, but it certainly is not. Its values have
- * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception
- * for i=0. This log-table converts a linear volume-scaling (0..127) to a
- * logarithmic scaling as present in the FM-synthesizer chips. so : Volume
- * 64 = 0 db = relative volume 0 and: Volume 32 = -6 db = relative
- * volume -8 it was implemented as a table because it is only 128 bytes and
- * it saves a lot of log() calculations. (RH)
- */
-
-static char fm_volume_table[128] =
-{
- -64, -48, -40, -35, -32, -29, -27, -26,
- -24, -23, -21, -20, -19, -18, -18, -17,
- -16, -15, -15, -14, -13, -13, -12, -12,
- -11, -11, -10, -10, -10, -9, -9, -8,
- -8, -8, -7, -7, -7, -6, -6, -6,
- -5, -5, -5, -5, -4, -4, -4, -4,
- -3, -3, -3, -3, -2, -2, -2, -2,
- -2, -1, -1, -1, -1, 0, 0, 0,
- 0, 0, 0, 1, 1, 1, 1, 1,
- 1, 2, 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 4,
- 4, 4, 4, 4, 4, 4, 4, 5,
- 5, 5, 5, 5, 5, 5, 5, 5,
- 6, 6, 6, 6, 6, 6, 6, 6,
- 6, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 8, 8, 8, 8, 8
-};
-
-static void calc_vol(unsigned char *regbyte, int volume, int main_vol)
-{
- int level = (~*regbyte & 0x3f);
-
- if (main_vol > 127)
- main_vol = 127;
- volume = (volume * main_vol) / 127;
-
- if (level)
- level += fm_volume_table[volume];
-
- if (level > 0x3f)
- level = 0x3f;
- if (level < 0)
- level = 0;
-
- *regbyte = (*regbyte & 0xc0) | (~level & 0x3f);
-}
-
-static void set_voice_volume(int voice, int volume, int main_vol)
-{
- unsigned char vol1, vol2, vol3, vol4;
- struct sbi_instrument *instr;
- struct physical_voice_info *map;
-
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
- map = &pv_map[devc->lv_map[voice]];
- instr = devc->act_i[voice];
-
- if (!instr)
- instr = &devc->i_map[0];
-
- if (instr->channel < 0)
- return;
-
- if (devc->voc[voice].mode == 0)
- return;
-
- if (devc->voc[voice].mode == 2)
- {
- vol1 = instr->operators[2];
- vol2 = instr->operators[3];
- if ((instr->operators[10] & 0x01))
- {
- calc_vol(&vol1, volume, main_vol);
- calc_vol(&vol2, volume, main_vol);
- }
- else
- {
- calc_vol(&vol2, volume, main_vol);
- }
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], vol1);
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], vol2);
- }
- else
- { /*
- * 4 OP voice
- */
- int connection;
-
- vol1 = instr->operators[2];
- vol2 = instr->operators[3];
- vol3 = instr->operators[OFFS_4OP + 2];
- vol4 = instr->operators[OFFS_4OP + 3];
-
- /*
- * The connection method for 4 OP devc->voc is defined by the rightmost
- * bits at the offsets 10 and 10+OFFS_4OP
- */
-
- connection = ((instr->operators[10] & 0x01) << 1) | (instr->operators[10 + OFFS_4OP] & 0x01);
-
- switch (connection)
- {
- case 0:
- calc_vol(&vol4, volume, main_vol);
- break;
-
- case 1:
- calc_vol(&vol2, volume, main_vol);
- calc_vol(&vol4, volume, main_vol);
- break;
-
- case 2:
- calc_vol(&vol1, volume, main_vol);
- calc_vol(&vol4, volume, main_vol);
- break;
-
- case 3:
- calc_vol(&vol1, volume, main_vol);
- calc_vol(&vol3, volume, main_vol);
- calc_vol(&vol4, volume, main_vol);
- break;
-
- default:
- ;
- }
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], vol1);
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], vol2);
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[2], vol3);
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[3], vol4);
- }
-}
-
-static int opl3_start_note (int dev, int voice, int note, int volume)
-{
- unsigned char data, fpc;
- int block, fnum, freq, voice_mode, pan;
- struct sbi_instrument *instr;
- struct physical_voice_info *map;
-
- if (voice < 0 || voice >= devc->nr_voice)
- return 0;
-
- map = &pv_map[devc->lv_map[voice]];
- pan = devc->voc[voice].panning;
-
- if (map->voice_mode == 0)
- return 0;
-
- if (note == 255) /*
- * Just change the volume
- */
- {
- set_voice_volume(voice, volume, devc->voc[voice].volume);
- return 0;
- }
-
- /*
- * Kill previous note before playing
- */
-
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[1], 0xff); /*
- * Carrier
- * volume to
- * min
- */
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[0], 0xff); /*
- * Modulator
- * volume to
- */
-
- if (map->voice_mode == 4)
- {
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[2], 0xff);
- opl3_command(map->ioaddr, KSL_LEVEL + map->op[3], 0xff);
- }
-
- opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, 0x00); /*
- * Note
- * off
- */
-
- instr = devc->act_i[voice];
-
- if (!instr)
- instr = &devc->i_map[0];
-
- if (instr->channel < 0)
- {
- printk(KERN_WARNING "opl3: Initializing voice %d with undefined instrument\n", voice);
- return 0;
- }
-
- if (map->voice_mode == 2 && instr->key == OPL3_PATCH)
- return 0; /*
- * Cannot play
- */
-
- voice_mode = map->voice_mode;
-
- if (voice_mode == 4)
- {
- int voice_shift;
-
- voice_shift = (map->ioaddr == devc->left_io) ? 0 : 3;
- voice_shift += map->voice_num;
-
- if (instr->key != OPL3_PATCH) /*
- * Just 2 OP patch
- */
- {
- voice_mode = 2;
- devc->cmask &= ~(1 << voice_shift);
- }
- else
- {
- devc->cmask |= (1 << voice_shift);
- }
-
- opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, devc->cmask);
- }
-
- /*
- * Set Sound Characteristics
- */
-
- opl3_command(map->ioaddr, AM_VIB + map->op[0], instr->operators[0]);
- opl3_command(map->ioaddr, AM_VIB + map->op[1], instr->operators[1]);
-
- /*
- * Set Attack/Decay
- */
-
- opl3_command(map->ioaddr, ATTACK_DECAY + map->op[0], instr->operators[4]);
- opl3_command(map->ioaddr, ATTACK_DECAY + map->op[1], instr->operators[5]);
-
- /*
- * Set Sustain/Release
- */
-
- opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[0], instr->operators[6]);
- opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[1], instr->operators[7]);
-
- /*
- * Set Wave Select
- */
-
- opl3_command(map->ioaddr, WAVE_SELECT + map->op[0], instr->operators[8]);
- opl3_command(map->ioaddr, WAVE_SELECT + map->op[1], instr->operators[9]);
-
- /*
- * Set Feedback/Connection
- */
-
- fpc = instr->operators[10];
-
- if (pan != 0xffff)
- {
- fpc &= ~STEREO_BITS;
- if (pan < -64)
- fpc |= VOICE_TO_LEFT;
- else
- if (pan > 64)
- fpc |= VOICE_TO_RIGHT;
- else
- fpc |= (VOICE_TO_LEFT | VOICE_TO_RIGHT);
- }
-
- if (!(fpc & 0x30))
- fpc |= 0x30; /*
- * Ensure that at least one chn is enabled
- */
- opl3_command(map->ioaddr, FEEDBACK_CONNECTION + map->voice_num, fpc);
-
- /*
- * If the voice is a 4 OP one, initialize the operators 3 and 4 also
- */
-
- if (voice_mode == 4)
- {
- /*
- * Set Sound Characteristics
- */
-
- opl3_command(map->ioaddr, AM_VIB + map->op[2], instr->operators[OFFS_4OP + 0]);
- opl3_command(map->ioaddr, AM_VIB + map->op[3], instr->operators[OFFS_4OP + 1]);
-
- /*
- * Set Attack/Decay
- */
-
- opl3_command(map->ioaddr, ATTACK_DECAY + map->op[2], instr->operators[OFFS_4OP + 4]);
- opl3_command(map->ioaddr, ATTACK_DECAY + map->op[3], instr->operators[OFFS_4OP + 5]);
-
- /*
- * Set Sustain/Release
- */
-
- opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[2], instr->operators[OFFS_4OP + 6]);
- opl3_command(map->ioaddr, SUSTAIN_RELEASE + map->op[3], instr->operators[OFFS_4OP + 7]);
-
- /*
- * Set Wave Select
- */
-
- opl3_command(map->ioaddr, WAVE_SELECT + map->op[2], instr->operators[OFFS_4OP + 8]);
- opl3_command(map->ioaddr, WAVE_SELECT + map->op[3], instr->operators[OFFS_4OP + 9]);
-
- /*
- * Set Feedback/Connection
- */
-
- fpc = instr->operators[OFFS_4OP + 10];
- if (!(fpc & 0x30))
- fpc |= 0x30; /*
- * Ensure that at least one chn is enabled
- */
- opl3_command(map->ioaddr, FEEDBACK_CONNECTION + map->voice_num + 3, fpc);
- }
-
- devc->voc[voice].mode = voice_mode;
- set_voice_volume(voice, volume, devc->voc[voice].volume);
-
- freq = devc->voc[voice].orig_freq = note_to_freq(note) / 1000;
-
- /*
- * Since the pitch bender may have been set before playing the note, we
- * have to calculate the bending now.
- */
-
- freq = compute_finetune(devc->voc[voice].orig_freq, devc->voc[voice].bender, devc->voc[voice].bender_range, 0);
- devc->voc[voice].current_freq = freq;
-
- freq_to_fnum(freq, &block, &fnum);
-
- /*
- * Play note
- */
-
- data = fnum & 0xff; /*
- * Least significant bits of fnumber
- */
- opl3_command(map->ioaddr, FNUM_LOW + map->voice_num, data);
-
- data = 0x20 | ((block & 0x7) << 2) | ((fnum >> 8) & 0x3);
- devc->voc[voice].keyon_byte = data;
- opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, data);
- if (voice_mode == 4)
- opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num + 3, data);
-
- return 0;
-}
-
-static void freq_to_fnum (int freq, int *block, int *fnum)
-{
- int f, octave;
-
- /*
- * Converts the note frequency to block and fnum values for the FM chip
- */
- /*
- * First try to compute the block -value (octave) where the note belongs
- */
-
- f = freq;
-
- octave = 5;
-
- if (f == 0)
- octave = 0;
- else if (f < 261)
- {
- while (f < 261)
- {
- octave--;
- f <<= 1;
- }
- }
- else if (f > 493)
- {
- while (f > 493)
- {
- octave++;
- f >>= 1;
- }
- }
-
- if (octave > 7)
- octave = 7;
-
- *fnum = freq * (1 << (20 - octave)) / 49716;
- *block = octave;
-}
-
-static void opl3_command (int io_addr, unsigned int addr, unsigned int val)
-{
- int i;
-
- /*
- * The original 2-OP synth requires a quite long delay after writing to a
- * register. The OPL-3 survives with just two INBs
- */
-
- outb(((unsigned char) (addr & 0xff)), io_addr);
-
- if (devc->model != 2)
- udelay(10);
- else
- for (i = 0; i < 2; i++)
- inb(io_addr);
-
- outb(((unsigned char) (val & 0xff)), io_addr + 1);
-
- if (devc->model != 2)
- udelay(30);
- else
- for (i = 0; i < 2; i++)
- inb(io_addr);
-}
-
-static void opl3_reset(int devno)
-{
- int i;
-
- for (i = 0; i < 18; i++)
- devc->lv_map[i] = i;
-
- for (i = 0; i < devc->nr_voice; i++)
- {
- opl3_command(pv_map[devc->lv_map[i]].ioaddr,
- KSL_LEVEL + pv_map[devc->lv_map[i]].op[0], 0xff);
-
- opl3_command(pv_map[devc->lv_map[i]].ioaddr,
- KSL_LEVEL + pv_map[devc->lv_map[i]].op[1], 0xff);
-
- if (pv_map[devc->lv_map[i]].voice_mode == 4)
- {
- opl3_command(pv_map[devc->lv_map[i]].ioaddr,
- KSL_LEVEL + pv_map[devc->lv_map[i]].op[2], 0xff);
-
- opl3_command(pv_map[devc->lv_map[i]].ioaddr,
- KSL_LEVEL + pv_map[devc->lv_map[i]].op[3], 0xff);
- }
-
- opl3_kill_note(devno, i, 0, 64);
- }
-
- if (devc->model == 2)
- {
- devc->v_alloc->max_voice = devc->nr_voice = 18;
-
- for (i = 0; i < 18; i++)
- pv_map[i].voice_mode = 2;
-
- }
-}
-
-static int opl3_open(int dev, int mode)
-{
- int i;
-
- if (devc->busy)
- return -EBUSY;
- devc->busy = 1;
-
- devc->v_alloc->max_voice = devc->nr_voice = (devc->model == 2) ? 18 : 9;
- devc->v_alloc->timestamp = 0;
-
- for (i = 0; i < 18; i++)
- {
- devc->v_alloc->map[i] = 0;
- devc->v_alloc->alloc_times[i] = 0;
- }
-
- devc->cmask = 0x00; /*
- * Just 2 OP mode
- */
- if (devc->model == 2)
- opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, devc->cmask);
- return 0;
-}
-
-static void opl3_close(int dev)
-{
- devc->busy = 0;
- devc->v_alloc->max_voice = devc->nr_voice = (devc->model == 2) ? 18 : 9;
-
- devc->fm_info.nr_drums = 0;
- devc->fm_info.perc_mode = 0;
-
- opl3_reset(dev);
-}
-
-static void opl3_hw_control(int dev, unsigned char *event)
-{
-}
-
-static int opl3_load_patch(int dev, int format, const char __user *addr,
- int count, int pmgr_flag)
-{
- struct sbi_instrument ins;
-
- if (count <sizeof(ins))
- {
- printk(KERN_WARNING "FM Error: Patch record too short\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&ins, addr, sizeof(ins)))
- return -EFAULT;
-
- if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR)
- {
- printk(KERN_WARNING "FM Error: Invalid instrument number %d\n", ins.channel);
- return -EINVAL;
- }
- ins.key = format;
-
- return store_instr(ins.channel, &ins);
-}
-
-static void opl3_panning(int dev, int voice, int value)
-{
-
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
- devc->voc[voice].panning = value;
-}
-
-static void opl3_volume_method(int dev, int mode)
-{
-}
-
-#define SET_VIBRATO(cell) { \
- tmp = instr->operators[(cell-1)+(((cell-1)/2)*OFFS_4OP)]; \
- if (pressure > 110) \
- tmp |= 0x40; /* Vibrato on */ \
- opl3_command (map->ioaddr, AM_VIB + map->op[cell-1], tmp);}
-
-static void opl3_aftertouch(int dev, int voice, int pressure)
-{
- int tmp;
- struct sbi_instrument *instr;
- struct physical_voice_info *map;
-
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
- map = &pv_map[devc->lv_map[voice]];
-
- if (map->voice_mode == 0)
- return;
-
- /*
- * Adjust the amount of vibrato depending the pressure
- */
-
- instr = devc->act_i[voice];
-
- if (!instr)
- instr = &devc->i_map[0];
-
- if (devc->voc[voice].mode == 4)
- {
- int connection = ((instr->operators[10] & 0x01) << 1) | (instr->operators[10 + OFFS_4OP] & 0x01);
-
- switch (connection)
- {
- case 0:
- SET_VIBRATO(4);
- break;
-
- case 1:
- SET_VIBRATO(2);
- SET_VIBRATO(4);
- break;
-
- case 2:
- SET_VIBRATO(1);
- SET_VIBRATO(4);
- break;
-
- case 3:
- SET_VIBRATO(1);
- SET_VIBRATO(3);
- SET_VIBRATO(4);
- break;
-
- }
- /*
- * Not implemented yet
- */
- }
- else
- {
- SET_VIBRATO(1);
-
- if ((instr->operators[10] & 0x01)) /*
- * Additive synthesis
- */
- SET_VIBRATO(2);
- }
-}
-
-#undef SET_VIBRATO
-
-static void bend_pitch(int dev, int voice, int value)
-{
- unsigned char data;
- int block, fnum, freq;
- struct physical_voice_info *map;
-
- map = &pv_map[devc->lv_map[voice]];
-
- if (map->voice_mode == 0)
- return;
-
- devc->voc[voice].bender = value;
- if (!value)
- return;
- if (!(devc->voc[voice].keyon_byte & 0x20))
- return; /*
- * Not keyed on
- */
-
- freq = compute_finetune(devc->voc[voice].orig_freq, devc->voc[voice].bender, devc->voc[voice].bender_range, 0);
- devc->voc[voice].current_freq = freq;
-
- freq_to_fnum(freq, &block, &fnum);
-
- data = fnum & 0xff; /*
- * Least significant bits of fnumber
- */
- opl3_command(map->ioaddr, FNUM_LOW + map->voice_num, data);
-
- data = 0x20 | ((block & 0x7) << 2) | ((fnum >> 8) & 0x3);
- devc->voc[voice].keyon_byte = data;
- opl3_command(map->ioaddr, KEYON_BLOCK + map->voice_num, data);
-}
-
-static void opl3_controller (int dev, int voice, int ctrl_num, int value)
-{
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
- switch (ctrl_num)
- {
- case CTRL_PITCH_BENDER:
- bend_pitch(dev, voice, value);
- break;
-
- case CTRL_PITCH_BENDER_RANGE:
- devc->voc[voice].bender_range = value;
- break;
-
- case CTL_MAIN_VOLUME:
- devc->voc[voice].volume = value / 128;
- break;
-
- case CTL_PAN:
- devc->voc[voice].panning = (value * 2) - 128;
- break;
- }
-}
-
-static void opl3_bender(int dev, int voice, int value)
-{
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
- bend_pitch(dev, voice, value - 8192);
-}
-
-static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info *alloc)
-{
- int i, p, best, first, avail, best_time = 0x7fffffff;
- struct sbi_instrument *instr;
- int is4op;
- int instr_no;
-
- if (chn < 0 || chn > 15)
- instr_no = 0;
- else
- instr_no = devc->chn_info[chn].pgm_num;
-
- instr = &devc->i_map[instr_no];
- if (instr->channel < 0 || /* Instrument not loaded */
- devc->nr_voice != 12) /* Not in 4 OP mode */
- is4op = 0;
- else if (devc->nr_voice == 12) /* 4 OP mode */
- is4op = (instr->key == OPL3_PATCH);
- else
- is4op = 0;
-
- if (is4op)
- {
- first = p = 0;
- avail = 6;
- }
- else
- {
- if (devc->nr_voice == 12) /* 4 OP mode. Use the '2 OP only' operators first */
- first = p = 6;
- else
- first = p = 0;
- avail = devc->nr_voice;
- }
-
- /*
- * Now try to find a free voice
- */
- best = first;
-
- for (i = 0; i < avail; i++)
- {
- if (alloc->map[p] == 0)
- {
- return p;
- }
- if (alloc->alloc_times[p] < best_time) /* Find oldest playing note */
- {
- best_time = alloc->alloc_times[p];
- best = p;
- }
- p = (p + 1) % avail;
- }
-
- /*
- * Insert some kind of priority mechanism here.
- */
-
- if (best < 0)
- best = 0;
- if (best > devc->nr_voice)
- best -= devc->nr_voice;
-
- return best; /* All devc->voc in use. Select the first one. */
-}
-
-static void opl3_setup_voice(int dev, int voice, int chn)
-{
- struct channel_info *info;
-
- if (voice < 0 || voice >= devc->nr_voice)
- return;
-
- if (chn < 0 || chn > 15)
- return;
-
- info = &synth_devs[dev]->chn_info[chn];
-
- opl3_set_instr(dev, voice, info->pgm_num);
-
- devc->voc[voice].bender = 0;
- devc->voc[voice].bender_range = info->bender_range;
- devc->voc[voice].volume = info->controllers[CTL_MAIN_VOLUME];
- devc->voc[voice].panning = (info->controllers[CTL_PAN] * 2) - 128;
-}
-
-static struct synth_operations opl3_operations =
-{
- .owner = THIS_MODULE,
- .id = "OPL",
- .info = NULL,
- .midi_dev = 0,
- .synth_type = SYNTH_TYPE_FM,
- .synth_subtype = FM_TYPE_ADLIB,
- .open = opl3_open,
- .close = opl3_close,
- .ioctl = opl3_ioctl,
- .kill_note = opl3_kill_note,
- .start_note = opl3_start_note,
- .set_instr = opl3_set_instr,
- .reset = opl3_reset,
- .hw_control = opl3_hw_control,
- .load_patch = opl3_load_patch,
- .aftertouch = opl3_aftertouch,
- .controller = opl3_controller,
- .panning = opl3_panning,
- .volume_method = opl3_volume_method,
- .bender = opl3_bender,
- .alloc_voice = opl3_alloc_voice,
- .setup_voice = opl3_setup_voice
-};
-
-static int opl3_init(int ioaddr, struct module *owner)
-{
- int i;
- int me;
-
- if (devc == NULL)
- {
- printk(KERN_ERR "opl3: Device control structure not initialized.\n");
- return -1;
- }
-
- if ((me = sound_alloc_synthdev()) == -1)
- {
- printk(KERN_WARNING "opl3: Too many synthesizers\n");
- return -1;
- }
-
- devc->nr_voice = 9;
-
- devc->fm_info.device = 0;
- devc->fm_info.synth_type = SYNTH_TYPE_FM;
- devc->fm_info.synth_subtype = FM_TYPE_ADLIB;
- devc->fm_info.perc_mode = 0;
- devc->fm_info.nr_voices = 9;
- devc->fm_info.nr_drums = 0;
- devc->fm_info.instr_bank_size = SBFM_MAXINSTR;
- devc->fm_info.capabilities = 0;
- devc->left_io = ioaddr;
- devc->right_io = ioaddr + 2;
-
- if (detected_model <= 2)
- devc->model = 1;
- else
- {
- devc->model = 2;
- if (detected_model == 4)
- devc->is_opl4 = 1;
- }
-
- opl3_operations.info = &devc->fm_info;
-
- synth_devs[me] = &opl3_operations;
-
- if (owner)
- synth_devs[me]->owner = owner;
-
- sequencer_init();
- devc->v_alloc = &opl3_operations.alloc;
- devc->chn_info = &opl3_operations.chn_info[0];
-
- if (devc->model == 2)
- {
- if (devc->is_opl4)
- strcpy(devc->fm_info.name, "Yamaha OPL4/OPL3 FM");
- else
- strcpy(devc->fm_info.name, "Yamaha OPL3");
-
- devc->v_alloc->max_voice = devc->nr_voice = 18;
- devc->fm_info.nr_drums = 0;
- devc->fm_info.synth_subtype = FM_TYPE_OPL3;
- devc->fm_info.capabilities |= SYNTH_CAP_OPL3;
-
- for (i = 0; i < 18; i++)
- {
- if (pv_map[i].ioaddr == USE_LEFT)
- pv_map[i].ioaddr = devc->left_io;
- else
- pv_map[i].ioaddr = devc->right_io;
- }
- opl3_command(devc->right_io, OPL3_MODE_REGISTER, OPL3_ENABLE);
- opl3_command(devc->right_io, CONNECTION_SELECT_REGISTER, 0x00);
- }
- else
- {
- strcpy(devc->fm_info.name, "Yamaha OPL2");
- devc->v_alloc->max_voice = devc->nr_voice = 9;
- devc->fm_info.nr_drums = 0;
-
- for (i = 0; i < 18; i++)
- pv_map[i].ioaddr = devc->left_io;
- }
- conf_printf2(devc->fm_info.name, ioaddr, 0, -1, -1);
-
- for (i = 0; i < SBFM_MAXINSTR; i++)
- devc->i_map[i].channel = -1;
-
- return me;
-}
-
-static int me;
-
-static int io = -1;
-
-module_param_hw(io, int, ioport, 0);
-
-static int __init init_opl3 (void)
-{
- printk(KERN_INFO "YM3812 and OPL-3 driver Copyright (C) by Hannu Savolainen, Rob Hooft 1993-1996\n");
-
- if (io != -1) /* User loading pure OPL3 module */
- {
- if (!opl3_detect(io))
- {
- return -ENODEV;
- }
-
- me = opl3_init(io, THIS_MODULE);
- }
-
- return 0;
-}
-
-static void __exit cleanup_opl3(void)
-{
- if (devc && io != -1)
- {
- if (devc->base) {
- release_region(devc->base,4);
- if (devc->is_opl4)
- release_region(devc->base - 8, 2);
- }
- kfree(devc);
- devc = NULL;
- sound_unload_synthdev(me);
- }
-}
-
-module_init(init_opl3);
-module_exit(cleanup_opl3);
-
-#ifndef MODULE
-static int __init setup_opl3(char *str)
-{
- /* io */
- int ints[2];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
-
- return 1;
-}
-
-__setup("opl3=", setup_opl3);
-#endif
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/opl3_hw.h b/sound/oss/opl3_hw.h
deleted file mode 100644
index 8b11c893e869..000000000000
--- a/sound/oss/opl3_hw.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * opl3_hw.h - Definitions of the OPL-3 registers
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * The OPL-3 mode is switched on by writing 0x01, to the offset 5
- * of the right side.
- *
- * Another special register at the right side is at offset 4. It contains
- * a bit mask defining which voices are used as 4 OP voices.
- *
- * The percussive mode is implemented in the left side only.
- *
- * With the above exceptions the both sides can be operated independently.
- *
- * A 4 OP voice can be created by setting the corresponding
- * bit at offset 4 of the right side.
- *
- * For example setting the rightmost bit (0x01) changes the
- * first voice on the right side to the 4 OP mode. The fourth
- * voice is made inaccessible.
- *
- * If a voice is set to the 2 OP mode, it works like 2 OP modes
- * of the original YM3812 (AdLib). In addition the voice can
- * be connected the left, right or both stereo channels. It can
- * even be left unconnected. This works with 4 OP voices also.
- *
- * The stereo connection bits are located in the FEEDBACK_CONNECTION
- * register of the voice (0xC0-0xC8). In 4 OP voices these bits are
- * in the second half of the voice.
- */
-
-/*
- * Register numbers for the global registers
- */
-
-#define TEST_REGISTER 0x01
-#define ENABLE_WAVE_SELECT 0x20
-
-#define TIMER1_REGISTER 0x02
-#define TIMER2_REGISTER 0x03
-#define TIMER_CONTROL_REGISTER 0x04 /* Left side */
-#define IRQ_RESET 0x80
-#define TIMER1_MASK 0x40
-#define TIMER2_MASK 0x20
-#define TIMER1_START 0x01
-#define TIMER2_START 0x02
-
-#define CONNECTION_SELECT_REGISTER 0x04 /* Right side */
-#define RIGHT_4OP_0 0x01
-#define RIGHT_4OP_1 0x02
-#define RIGHT_4OP_2 0x04
-#define LEFT_4OP_0 0x08
-#define LEFT_4OP_1 0x10
-#define LEFT_4OP_2 0x20
-
-#define OPL3_MODE_REGISTER 0x05 /* Right side */
-#define OPL3_ENABLE 0x01
-#define OPL4_ENABLE 0x02
-
-#define KBD_SPLIT_REGISTER 0x08 /* Left side */
-#define COMPOSITE_SINE_WAVE_MODE 0x80 /* Don't use with OPL-3? */
-#define KEYBOARD_SPLIT 0x40
-
-#define PERCOSSION_REGISTER 0xbd /* Left side only */
-#define TREMOLO_DEPTH 0x80
-#define VIBRATO_DEPTH 0x40
-#define PERCOSSION_ENABLE 0x20
-#define BASSDRUM_ON 0x10
-#define SNAREDRUM_ON 0x08
-#define TOMTOM_ON 0x04
-#define CYMBAL_ON 0x02
-#define HIHAT_ON 0x01
-
-/*
- * Offsets to the register banks for operators. To get the
- * register number just add the operator offset to the bank offset
- *
- * AM/VIB/EG/KSR/Multiple (0x20 to 0x35)
- */
-#define AM_VIB 0x20
-#define TREMOLO_ON 0x80
-#define VIBRATO_ON 0x40
-#define SUSTAIN_ON 0x20
-#define KSR 0x10 /* Key scaling rate */
-#define MULTIPLE_MASK 0x0f /* Frequency multiplier */
-
- /*
- * KSL/Total level (0x40 to 0x55)
- */
-#define KSL_LEVEL 0x40
-#define KSL_MASK 0xc0 /* Envelope scaling bits */
-#define TOTAL_LEVEL_MASK 0x3f /* Strength (volume) of OP */
-
-/*
- * Attack / Decay rate (0x60 to 0x75)
- */
-#define ATTACK_DECAY 0x60
-#define ATTACK_MASK 0xf0
-#define DECAY_MASK 0x0f
-
-/*
- * Sustain level / Release rate (0x80 to 0x95)
- */
-#define SUSTAIN_RELEASE 0x80
-#define SUSTAIN_MASK 0xf0
-#define RELEASE_MASK 0x0f
-
-/*
- * Wave select (0xE0 to 0xF5)
- */
-#define WAVE_SELECT 0xe0
-
-/*
- * Offsets to the register banks for voices. Just add to the
- * voice number to get the register number.
- *
- * F-Number low bits (0xA0 to 0xA8).
- */
-#define FNUM_LOW 0xa0
-
-/*
- * F-number high bits / Key on / Block (octave) (0xB0 to 0xB8)
- */
-#define KEYON_BLOCK 0xb0
-#define KEYON_BIT 0x20
-#define BLOCKNUM_MASK 0x1c
-#define FNUM_HIGH_MASK 0x03
-
-/*
- * Feedback / Connection (0xc0 to 0xc8)
- *
- * These registers have two new bits when the OPL-3 mode
- * is selected. These bits controls connecting the voice
- * to the stereo channels. For 4 OP voices this bit is
- * defined in the second half of the voice (add 3 to the
- * register offset).
- *
- * For 4 OP voices the connection bit is used in the
- * both halves (gives 4 ways to connect the operators).
- */
-#define FEEDBACK_CONNECTION 0xc0
-#define FEEDBACK_MASK 0x0e /* Valid just for 1st OP of a voice */
-#define CONNECTION_BIT 0x01
-/*
- * In the 4 OP mode there is four possible configurations how the
- * operators can be connected together (in 2 OP modes there is just
- * AM or FM). The 4 OP connection mode is defined by the rightmost
- * bit of the FEEDBACK_CONNECTION (0xC0-0xC8) on the both halves.
- *
- * First half Second half Mode
- *
- * +---+
- * v |
- * 0 0 >+-1-+--2--3--4-->
- *
- *
- *
- * +---+
- * | |
- * 0 1 >+-1-+--2-+
- * |->
- * >--3----4-+
- *
- * +---+
- * | |
- * 1 0 >+-1-+-----+
- * |->
- * >--2--3--4-+
- *
- * +---+
- * | |
- * 1 1 >+-1-+--+
- * |
- * >--2--3-+->
- * |
- * >--4----+
- */
-#define STEREO_BITS 0x30 /* OPL-3 only */
-#define VOICE_TO_LEFT 0x10
-#define VOICE_TO_RIGHT 0x20
-
-/*
- * Definition table for the physical voices
- */
-
-struct physical_voice_info {
- unsigned char voice_num;
- unsigned char voice_mode; /* 0=unavailable, 2=2 OP, 4=4 OP */
- unsigned short ioaddr; /* I/O port (left or right side) */
- unsigned char op[4]; /* Operator offsets */
- };
-
-/*
- * There is 18 possible 2 OP voices
- * (9 in the left and 9 in the right).
- * The first OP is the modulator and 2nd is the carrier.
- *
- * The first three voices in the both sides may be connected
- * with another voice to a 4 OP voice. For example voice 0
- * can be connected with voice 3. The operators of voice 3 are
- * used as operators 3 and 4 of the new 4 OP voice.
- * In this case the 2 OP voice number 0 is the 'first half' and
- * voice 3 is the second.
- */
-
-#define USE_LEFT 0
-#define USE_RIGHT 1
-
-static struct physical_voice_info pv_map[18] =
-{
-/* No Mode Side OP1 OP2 OP3 OP4 */
-/* --------------------------------------------------- */
- { 0, 2, USE_LEFT, {0x00, 0x03, 0x08, 0x0b}},
- { 1, 2, USE_LEFT, {0x01, 0x04, 0x09, 0x0c}},
- { 2, 2, USE_LEFT, {0x02, 0x05, 0x0a, 0x0d}},
-
- { 3, 2, USE_LEFT, {0x08, 0x0b, 0x00, 0x00}},
- { 4, 2, USE_LEFT, {0x09, 0x0c, 0x00, 0x00}},
- { 5, 2, USE_LEFT, {0x0a, 0x0d, 0x00, 0x00}},
-
- { 6, 2, USE_LEFT, {0x10, 0x13, 0x00, 0x00}}, /* Used by percussive voices */
- { 7, 2, USE_LEFT, {0x11, 0x14, 0x00, 0x00}}, /* if the percussive mode */
- { 8, 2, USE_LEFT, {0x12, 0x15, 0x00, 0x00}}, /* is selected */
-
- { 0, 2, USE_RIGHT, {0x00, 0x03, 0x08, 0x0b}},
- { 1, 2, USE_RIGHT, {0x01, 0x04, 0x09, 0x0c}},
- { 2, 2, USE_RIGHT, {0x02, 0x05, 0x0a, 0x0d}},
-
- { 3, 2, USE_RIGHT, {0x08, 0x0b, 0x00, 0x00}},
- { 4, 2, USE_RIGHT, {0x09, 0x0c, 0x00, 0x00}},
- { 5, 2, USE_RIGHT, {0x0a, 0x0d, 0x00, 0x00}},
-
- { 6, 2, USE_RIGHT, {0x10, 0x13, 0x00, 0x00}},
- { 7, 2, USE_RIGHT, {0x11, 0x14, 0x00, 0x00}},
- { 8, 2, USE_RIGHT, {0x12, 0x15, 0x00, 0x00}}
-};
-/*
- * DMA buffer calls
- */
diff --git a/sound/oss/os.h b/sound/oss/os.h
deleted file mode 100644
index 16f3a069b85c..000000000000
--- a/sound/oss/os.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define ALLOW_SELECT
-#undef NO_INLINE_ASM
-#define SHORT_BANNERS
-#define MANUAL_PNP
-#undef DO_TIMINGS
-
-#include <linux/module.h>
-
-#ifdef __KERNEL__
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/param.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <asm/page.h>
-#include <linux/vmalloc.h>
-#include <linux/uaccess.h>
-#include <linux/poll.h>
-#include <linux/pci.h>
-#endif
-
-#include <linux/soundcard.h>
-
-#define FALSE 0
-#define TRUE 1
-
-extern int sound_alloc_dma(int chn, char *deviceID);
-extern int sound_open_dma(int chn, char *deviceID);
-extern void sound_free_dma(int chn);
-extern void sound_close_dma(int chn);
-
-extern void reprogram_timer(void);
-
-#define USE_AUTOINIT_DMA
-
-extern void *sound_mem_blocks[1024];
-extern int sound_nblocks;
-
-#undef PSEUDO_DMA_AUTOINIT
-#define ALLOW_BUFFER_MAPPING
-
-extern const struct file_operations oss_sound_fops;
diff --git a/sound/oss/pas2.h b/sound/oss/pas2.h
deleted file mode 100644
index 57f476238309..000000000000
--- a/sound/oss/pas2.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* From pas_card.c */
-int pas_set_intr(int mask);
-int pas_remove_intr(int mask);
-unsigned char pas_read(int ioaddr);
-void pas_write(unsigned char data, int ioaddr);
-
-/* From pas_audio.c */
-void pas_pcm_interrupt(unsigned char status, int cause);
-void pas_pcm_init(struct address_info *hw_config);
-
-/* From pas_mixer.c */
-int pas_init_mixer(void);
-
-/* From pas_midi.c */
-void pas_midi_init(void);
-void pas_midi_interrupt(void);
-
-/* From pas2_mixer.c*/
-void mix_write(unsigned char data, int ioaddr);
diff --git a/sound/oss/pas2_card.c b/sound/oss/pas2_card.c
deleted file mode 100644
index 769fca692d2a..000000000000
--- a/sound/oss/pas2_card.c
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * sound/oss/pas2_card.c
- *
- * Detection routine for the Pro Audio Spectrum cards.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include "sound_config.h"
-
-#include "pas2.h"
-#include "sb.h"
-
-static unsigned char dma_bits[] = {
- 4, 1, 2, 3, 0, 5, 6, 7
-};
-
-static unsigned char irq_bits[] = {
- 0, 0, 1, 2, 3, 4, 5, 6, 0, 1, 7, 8, 9, 0, 10, 11
-};
-
-static unsigned char sb_irq_bits[] = {
- 0x00, 0x00, 0x08, 0x10, 0x00, 0x18, 0x00, 0x20,
- 0x00, 0x08, 0x28, 0x30, 0x38, 0, 0
-};
-
-static unsigned char sb_dma_bits[] = {
- 0x00, 0x40, 0x80, 0xC0, 0, 0, 0, 0
-};
-
-/*
- * The Address Translation code is used to convert I/O register addresses to
- * be relative to the given base -register
- */
-
-int pas_translate_code = 0;
-static int pas_intr_mask;
-static int pas_irq;
-static int pas_sb_base;
-DEFINE_SPINLOCK(pas_lock);
-#ifndef CONFIG_PAS_JOYSTICK
-static bool joystick;
-#else
-static bool joystick = 1;
-#endif
-#ifdef SYMPHONY_PAS
-static bool symphony = 1;
-#else
-static bool symphony;
-#endif
-#ifdef BROKEN_BUS_CLOCK
-static bool broken_bus_clock = 1;
-#else
-static bool broken_bus_clock;
-#endif
-
-static struct address_info cfg;
-static struct address_info cfg2;
-
-char pas_model = 0;
-static char *pas_model_names[] = {
- "",
- "Pro AudioSpectrum+",
- "CDPC",
- "Pro AudioSpectrum 16",
- "Pro AudioSpectrum 16D"
-};
-
-/*
- * pas_read() and pas_write() are equivalents of inb and outb
- * These routines perform the I/O address translation required
- * to support other than the default base address
- */
-
-unsigned char pas_read(int ioaddr)
-{
- return inb(ioaddr + pas_translate_code);
-}
-
-void pas_write(unsigned char data, int ioaddr)
-{
- outb((data), ioaddr + pas_translate_code);
-}
-
-/******************* Begin of the Interrupt Handler ********************/
-
-static irqreturn_t pasintr(int irq, void *dev_id)
-{
- int status;
-
- status = pas_read(0x0B89);
- pas_write(status, 0x0B89); /* Clear interrupt */
-
- if (status & 0x08)
- {
- pas_pcm_interrupt(status, 1);
- status &= ~0x08;
- }
- if (status & 0x10)
- {
- pas_midi_interrupt();
- status &= ~0x10;
- }
- return IRQ_HANDLED;
-}
-
-int pas_set_intr(int mask)
-{
- if (!mask)
- return 0;
-
- pas_intr_mask |= mask;
-
- pas_write(pas_intr_mask, 0x0B8B);
- return 0;
-}
-
-int pas_remove_intr(int mask)
-{
- if (!mask)
- return 0;
-
- pas_intr_mask &= ~mask;
- pas_write(pas_intr_mask, 0x0B8B);
-
- return 0;
-}
-
-/******************* End of the Interrupt handler **********************/
-
-/******************* Begin of the Initialization Code ******************/
-
-static int __init config_pas_hw(struct address_info *hw_config)
-{
- char ok = 1;
- unsigned int_ptrs; /* scsi/sound interrupt pointers */
-
- pas_irq = hw_config->irq;
-
- pas_write(0x00, 0x0B8B);
- pas_write(0x36, 0x138B);
- pas_write(0x36, 0x1388);
- pas_write(0, 0x1388);
- pas_write(0x74, 0x138B);
- pas_write(0x74, 0x1389);
- pas_write(0, 0x1389);
-
- pas_write(0x80 | 0x40 | 0x20 | 1, 0x0B8A);
- pas_write(0x80 | 0x20 | 0x10 | 0x08 | 0x01, 0xF8A);
- pas_write(0x01 | 0x02 | 0x04 | 0x10 /*
- * |
- * 0x80
- */ , 0xB88);
-
- pas_write(0x80 | (joystick ? 0x40 : 0), 0xF388);
-
- if (pas_irq < 0 || pas_irq > 15)
- {
- printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
- hw_config->irq=-1;
- ok = 0;
- }
- else
- {
- int_ptrs = pas_read(0xF38A);
- int_ptrs = (int_ptrs & 0xf0) | irq_bits[pas_irq];
- pas_write(int_ptrs, 0xF38A);
- if (!irq_bits[pas_irq])
- {
- printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
- hw_config->irq=-1;
- ok = 0;
- }
- else
- {
- if (request_irq(pas_irq, pasintr, 0, "PAS16",hw_config) < 0) {
- printk(KERN_ERR "PAS16: Cannot allocate IRQ %d\n",pas_irq);
- hw_config->irq=-1;
- ok = 0;
- }
- }
- }
-
- if (hw_config->dma < 0 || hw_config->dma > 7)
- {
- printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
- hw_config->dma=-1;
- ok = 0;
- }
- else
- {
- pas_write(dma_bits[hw_config->dma], 0xF389);
- if (!dma_bits[hw_config->dma])
- {
- printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
- hw_config->dma=-1;
- ok = 0;
- }
- else
- {
- if (sound_alloc_dma(hw_config->dma, "PAS16"))
- {
- printk(KERN_ERR "pas2_card.c: Can't allocate DMA channel\n");
- hw_config->dma=-1;
- ok = 0;
- }
- }
- }
-
- /*
- * This fixes the timing problems of the PAS due to the Symphony chipset
- * as per Media Vision. Only define this if your PAS doesn't work correctly.
- */
-
- if(symphony)
- {
- outb((0x05), 0xa8);
- outb((0x60), 0xa9);
- }
-
- if(broken_bus_clock)
- pas_write(0x01 | 0x10 | 0x20 | 0x04, 0x8388);
- else
- /*
- * pas_write(0x01, 0x8388);
- */
- pas_write(0x01 | 0x10 | 0x20, 0x8388);
-
- pas_write(0x18, 0x838A); /* ??? */
- pas_write(0x20 | 0x01, 0x0B8A); /* Mute off, filter = 17.897 kHz */
- pas_write(8, 0xBF8A);
-
- mix_write(0x80 | 5, 0x078B);
- mix_write(5, 0x078B);
-
- {
- struct address_info *sb_config;
-
- sb_config = &cfg2;
- if (sb_config->io_base)
- {
- unsigned char irq_dma;
-
- /*
- * Turn on Sound Blaster compatibility
- * bit 1 = SB emulation
- * bit 0 = MPU401 emulation (CDPC only :-( )
- */
-
- pas_write(0x02, 0xF788);
-
- /*
- * "Emulation address"
- */
-
- pas_write((sb_config->io_base >> 4) & 0x0f, 0xF789);
- pas_sb_base = sb_config->io_base;
-
- if (!sb_dma_bits[sb_config->dma])
- printk(KERN_ERR "PAS16 Warning: Invalid SB DMA %d\n\n", sb_config->dma);
-
- if (!sb_irq_bits[sb_config->irq])
- printk(KERN_ERR "PAS16 Warning: Invalid SB IRQ %d\n\n", sb_config->irq);
-
- irq_dma = sb_dma_bits[sb_config->dma] |
- sb_irq_bits[sb_config->irq];
-
- pas_write(irq_dma, 0xFB8A);
- }
- else
- pas_write(0x00, 0xF788);
- }
-
- if (!ok)
- printk(KERN_WARNING "PAS16: Driver not enabled\n");
-
- return ok;
-}
-
-static int __init detect_pas_hw(struct address_info *hw_config)
-{
- unsigned char board_id, foo;
-
- /*
- * WARNING: Setting an option like W:1 or so that disables warm boot reset
- * of the card will screw up this detect code something fierce. Adding code
- * to handle this means possibly interfering with other cards on the bus if
- * you have something on base port 0x388. SO be forewarned.
- */
-
- outb((0xBC), 0x9A01); /* Activate first board */
- outb((hw_config->io_base >> 2), 0x9A01); /* Set base address */
- pas_translate_code = hw_config->io_base - 0x388;
- pas_write(1, 0xBF88); /* Select one wait states */
-
- board_id = pas_read(0x0B8B);
-
- if (board_id == 0xff)
- return 0;
-
- /*
- * We probably have a PAS-series board, now check for a PAS16-series board
- * by trying to change the board revision bits. PAS16-series hardware won't
- * let you do this - the bits are read-only.
- */
-
- foo = board_id ^ 0xe0;
-
- pas_write(foo, 0x0B8B);
- foo = pas_read(0x0B8B);
- pas_write(board_id, 0x0B8B);
-
- if (board_id != foo)
- return 0;
-
- pas_model = pas_read(0xFF88);
-
- return pas_model;
-}
-
-static void __init attach_pas_card(struct address_info *hw_config)
-{
- pas_irq = hw_config->irq;
-
- if (detect_pas_hw(hw_config))
- {
-
- if ((pas_model = pas_read(0xFF88)))
- {
- char temp[100];
-
- if (pas_model < 0 ||
- pas_model >= ARRAY_SIZE(pas_model_names)) {
- printk(KERN_ERR "pas2 unrecognized model.\n");
- return;
- }
- sprintf(temp,
- "%s rev %d", pas_model_names[(int) pas_model],
- pas_read(0x2789));
- conf_printf(temp, hw_config);
- }
- if (config_pas_hw(hw_config))
- {
- pas_pcm_init(hw_config);
- pas_midi_init();
- pas_init_mixer();
- }
- }
-}
-
-static inline int __init probe_pas(struct address_info *hw_config)
-{
- return detect_pas_hw(hw_config);
-}
-
-static void __exit unload_pas(struct address_info *hw_config)
-{
- extern int pas_audiodev;
- extern int pas2_mididev;
-
- if (hw_config->dma>0)
- sound_free_dma(hw_config->dma);
- if (hw_config->irq>0)
- free_irq(hw_config->irq, hw_config);
-
- if(pas_audiodev!=-1)
- sound_unload_mixerdev(audio_devs[pas_audiodev]->mixer_dev);
- if(pas2_mididev!=-1)
- sound_unload_mididev(pas2_mididev);
- if(pas_audiodev!=-1)
- sound_unload_audiodev(pas_audiodev);
-}
-
-static int __initdata io = -1;
-static int __initdata irq = -1;
-static int __initdata dma = -1;
-static int __initdata dma16 = -1; /* Set this for modules that need it */
-
-static int __initdata sb_io = 0;
-static int __initdata sb_irq = -1;
-static int __initdata sb_dma = -1;
-static int __initdata sb_dma16 = -1;
-
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-module_param_hw(dma, int, dma, 0);
-module_param_hw(dma16, int, dma, 0);
-
-module_param_hw(sb_io, int, ioport, 0);
-module_param_hw(sb_irq, int, irq, 0);
-module_param_hw(sb_dma, int, dma, 0);
-module_param_hw(sb_dma16, int, dma, 0);
-
-module_param(joystick, bool, 0);
-module_param(symphony, bool, 0);
-module_param(broken_bus_clock, bool, 0);
-
-MODULE_LICENSE("GPL");
-
-static int __init init_pas2(void)
-{
- printk(KERN_INFO "Pro Audio Spectrum driver Copyright (C) by Hannu Savolainen 1993-1996\n");
-
- cfg.io_base = io;
- cfg.irq = irq;
- cfg.dma = dma;
- cfg.dma2 = dma16;
-
- cfg2.io_base = sb_io;
- cfg2.irq = sb_irq;
- cfg2.dma = sb_dma;
- cfg2.dma2 = sb_dma16;
-
- if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
- printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n");
- return -EINVAL;
- }
-
- if (!probe_pas(&cfg))
- return -ENODEV;
- attach_pas_card(&cfg);
-
- return 0;
-}
-
-static void __exit cleanup_pas2(void)
-{
- unload_pas(&cfg);
-}
-
-module_init(init_pas2);
-module_exit(cleanup_pas2);
-
-#ifndef MODULE
-static int __init setup_pas2(char *str)
-{
- /* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, sb_dma2 */
- int ints[9];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
- dma = ints[3];
- dma16 = ints[4];
-
- sb_io = ints[5];
- sb_irq = ints[6];
- sb_dma = ints[7];
- sb_dma16 = ints[8];
-
- return 1;
-}
-
-__setup("pas2=", setup_pas2);
-#endif
diff --git a/sound/oss/pas2_midi.c b/sound/oss/pas2_midi.c
deleted file mode 100644
index 1122d10a20c3..000000000000
--- a/sound/oss/pas2_midi.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * sound/oss/pas2_midi.c
- *
- * The low level driver for the PAS Midi Interface.
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- * Bartlomiej Zolnierkiewicz : Added __init to pas_init_mixer()
- */
-
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include "sound_config.h"
-
-#include "pas2.h"
-
-extern spinlock_t pas_lock;
-
-static int midi_busy, input_opened;
-static int my_dev;
-
-int pas2_mididev=-1;
-
-static unsigned char tmp_queue[256];
-static volatile int qlen;
-static volatile unsigned char qhead, qtail;
-
-static void (*midi_input_intr) (int dev, unsigned char data);
-
-static int pas_midi_open(int dev, int mode,
- void (*input) (int dev, unsigned char data),
- void (*output) (int dev)
-)
-{
- int err;
- unsigned long flags;
- unsigned char ctrl;
-
-
- if (midi_busy)
- return -EBUSY;
-
- /*
- * Reset input and output FIFO pointers
- */
- pas_write(0x20 | 0x40,
- 0x178b);
-
- spin_lock_irqsave(&pas_lock, flags);
-
- if ((err = pas_set_intr(0x10)) < 0)
- {
- spin_unlock_irqrestore(&pas_lock, flags);
- return err;
- }
- /*
- * Enable input available and output FIFO empty interrupts
- */
-
- ctrl = 0;
- input_opened = 0;
- midi_input_intr = input;
-
- if (mode == OPEN_READ || mode == OPEN_READWRITE)
- {
- ctrl |= 0x04; /* Enable input */
- input_opened = 1;
- }
- if (mode == OPEN_WRITE || mode == OPEN_READWRITE)
- {
- ctrl |= 0x08 | 0x10; /* Enable output */
- }
- pas_write(ctrl, 0x178b);
-
- /*
- * Acknowledge any pending interrupts
- */
-
- pas_write(0xff, 0x1B88);
-
- spin_unlock_irqrestore(&pas_lock, flags);
-
- midi_busy = 1;
- qlen = qhead = qtail = 0;
- return 0;
-}
-
-static void pas_midi_close(int dev)
-{
-
- /*
- * Reset FIFO pointers, disable intrs
- */
- pas_write(0x20 | 0x40, 0x178b);
-
- pas_remove_intr(0x10);
- midi_busy = 0;
-}
-
-static int dump_to_midi(unsigned char midi_byte)
-{
- int fifo_space, x;
-
- fifo_space = ((x = pas_read(0x1B89)) >> 4) & 0x0f;
-
- /*
- * The MIDI FIFO space register and it's documentation is nonunderstandable.
- * There seem to be no way to differentiate between buffer full and buffer
- * empty situations. For this reason we don't never write the buffer
- * completely full. In this way we can assume that 0 (or is it 15)
- * means that the buffer is empty.
- */
-
- if (fifo_space < 2 && fifo_space != 0) /* Full (almost) */
- return 0; /* Ask upper layers to retry after some time */
-
- pas_write(midi_byte, 0x178A);
-
- return 1;
-}
-
-static int pas_midi_out(int dev, unsigned char midi_byte)
-{
-
- unsigned long flags;
-
- /*
- * Drain the local queue first
- */
-
- spin_lock_irqsave(&pas_lock, flags);
-
- while (qlen && dump_to_midi(tmp_queue[qhead]))
- {
- qlen--;
- qhead++;
- }
-
- spin_unlock_irqrestore(&pas_lock, flags);
-
- /*
- * Output the byte if the local queue is empty.
- */
-
- if (!qlen)
- if (dump_to_midi(midi_byte))
- return 1;
-
- /*
- * Put to the local queue
- */
-
- if (qlen >= 256)
- return 0; /* Local queue full */
-
- spin_lock_irqsave(&pas_lock, flags);
-
- tmp_queue[qtail] = midi_byte;
- qlen++;
- qtail++;
-
- spin_unlock_irqrestore(&pas_lock, flags);
-
- return 1;
-}
-
-static int pas_midi_start_read(int dev)
-{
- return 0;
-}
-
-static int pas_midi_end_read(int dev)
-{
- return 0;
-}
-
-static void pas_midi_kick(int dev)
-{
-}
-
-static int pas_buffer_status(int dev)
-{
- return qlen;
-}
-
-#define MIDI_SYNTH_NAME "Pro Audio Spectrum Midi"
-#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
-#include "midi_synth.h"
-
-static struct midi_operations pas_midi_operations =
-{
- .owner = THIS_MODULE,
- .info = {"Pro Audio Spectrum", 0, 0, SNDCARD_PAS},
- .converter = &std_midi_synth,
- .in_info = {0},
- .open = pas_midi_open,
- .close = pas_midi_close,
- .outputc = pas_midi_out,
- .start_read = pas_midi_start_read,
- .end_read = pas_midi_end_read,
- .kick = pas_midi_kick,
- .buffer_status = pas_buffer_status,
-};
-
-void __init pas_midi_init(void)
-{
- int dev = sound_alloc_mididev();
-
- if (dev == -1)
- {
- printk(KERN_WARNING "pas_midi_init: Too many midi devices detected\n");
- return;
- }
- std_midi_synth.midi_dev = my_dev = dev;
- midi_devs[dev] = &pas_midi_operations;
- pas2_mididev = dev;
- sequencer_init();
-}
-
-void pas_midi_interrupt(void)
-{
- unsigned char stat;
- int i, incount;
-
- stat = pas_read(0x1B88);
-
- if (stat & 0x04) /* Input data available */
- {
- incount = pas_read(0x1B89) & 0x0f; /* Input FIFO size */
- if (!incount)
- incount = 16;
-
- for (i = 0; i < incount; i++)
- if (input_opened)
- {
- midi_input_intr(my_dev, pas_read(0x178A));
- } else
- pas_read(0x178A); /* Flush */
- }
- if (stat & (0x08 | 0x10))
- {
- spin_lock(&pas_lock);/* called in irq context */
-
- while (qlen && dump_to_midi(tmp_queue[qhead]))
- {
- qlen--;
- qhead++;
- }
-
- spin_unlock(&pas_lock);
- }
- if (stat & 0x40)
- {
- printk(KERN_WARNING "MIDI output overrun %x,%x\n", pas_read(0x1B89), stat);
- }
- pas_write(stat, 0x1B88); /* Acknowledge interrupts */
-}
diff --git a/sound/oss/pas2_mixer.c b/sound/oss/pas2_mixer.c
deleted file mode 100644
index 50b5bd501247..000000000000
--- a/sound/oss/pas2_mixer.c
+++ /dev/null
@@ -1,327 +0,0 @@
-
-/*
- * sound/oss/pas2_mixer.c
- *
- * Mixer routines for the Pro Audio Spectrum cards.
- */
-
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-/*
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * Bartlomiej Zolnierkiewicz : added __init to pas_init_mixer()
- */
-#include <linux/init.h>
-#include "sound_config.h"
-
-#include "pas2.h"
-
-extern int pas_translate_code;
-extern char pas_model;
-extern int *pas_osp;
-extern int pas_audiodev;
-
-static int rec_devices = (SOUND_MASK_MIC); /* Default recording source */
-static int mode_control;
-
-#define POSSIBLE_RECORDING_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | \
- SOUND_MASK_CD | SOUND_MASK_ALTPCM)
-
-#define SUPPORTED_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | \
- SOUND_MASK_CD | SOUND_MASK_ALTPCM | SOUND_MASK_IMIX | \
- SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_RECLEV)
-
-static int *levels;
-
-static int default_levels[32] =
-{
- 0x3232, /* Master Volume */
- 0x3232, /* Bass */
- 0x3232, /* Treble */
- 0x5050, /* FM */
- 0x4b4b, /* PCM */
- 0x3232, /* PC Speaker */
- 0x4b4b, /* Ext Line */
- 0x4b4b, /* Mic */
- 0x4b4b, /* CD */
- 0x6464, /* Recording monitor */
- 0x4b4b, /* SB PCM */
- 0x6464 /* Recording level */
-};
-
-void
-mix_write(unsigned char data, int ioaddr)
-{
- /*
- * The Revision D cards have a problem with their MVA508 interface. The
- * kludge-o-rama fix is to make a 16-bit quantity with identical LSB and
- * MSBs out of the output byte and to do a 16-bit out to the mixer port -
- * 1. We need to do this because it isn't timing problem but chip access
- * sequence problem.
- */
-
- if (pas_model == 4)
- {
- outw(data | (data << 8), (ioaddr + pas_translate_code) - 1);
- outb((0x80), 0);
- } else
- pas_write(data, ioaddr);
-}
-
-static int
-mixer_output(int right_vol, int left_vol, int div, int bits,
- int mixer) /* Input or output mixer */
-{
- int left = left_vol * div / 100;
- int right = right_vol * div / 100;
-
-
- if (bits & 0x10)
- {
- left |= mixer;
- right |= mixer;
- }
- if (bits == 0x03 || bits == 0x04)
- {
- mix_write(0x80 | bits, 0x078B);
- mix_write(left, 0x078B);
- right_vol = left_vol;
- } else
- {
- mix_write(0x80 | 0x20 | bits, 0x078B);
- mix_write(left, 0x078B);
- mix_write(0x80 | 0x40 | bits, 0x078B);
- mix_write(right, 0x078B);
- }
-
- return (left_vol | (right_vol << 8));
-}
-
-static void
-set_mode(int new_mode)
-{
- mix_write(0x80 | 0x05, 0x078B);
- mix_write(new_mode, 0x078B);
-
- mode_control = new_mode;
-}
-
-static int
-pas_mixer_set(int whichDev, unsigned int level)
-{
- int left, right, devmask, changed, i, mixer = 0;
-
- left = level & 0x7f;
- right = (level & 0x7f00) >> 8;
-
- if (whichDev < SOUND_MIXER_NRDEVICES) {
- if ((1 << whichDev) & rec_devices)
- mixer = 0x20;
- else
- mixer = 0x00;
- }
-
- switch (whichDev)
- {
- case SOUND_MIXER_VOLUME: /* Master volume (0-63) */
- levels[whichDev] = mixer_output(right, left, 63, 0x01, 0);
- break;
-
- /*
- * Note! Bass and Treble are mono devices. Will use just the left
- * channel.
- */
- case SOUND_MIXER_BASS: /* Bass (0-12) */
- levels[whichDev] = mixer_output(right, left, 12, 0x03, 0);
- break;
- case SOUND_MIXER_TREBLE: /* Treble (0-12) */
- levels[whichDev] = mixer_output(right, left, 12, 0x04, 0);
- break;
-
- case SOUND_MIXER_SYNTH: /* Internal synthesizer (0-31) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x00, mixer);
- break;
- case SOUND_MIXER_PCM: /* PAS PCM (0-31) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x05, mixer);
- break;
- case SOUND_MIXER_ALTPCM: /* SB PCM (0-31) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x07, mixer);
- break;
- case SOUND_MIXER_SPEAKER: /* PC speaker (0-31) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x06, mixer);
- break;
- case SOUND_MIXER_LINE: /* External line (0-31) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x02, mixer);
- break;
- case SOUND_MIXER_CD: /* CD (0-31) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x03, mixer);
- break;
- case SOUND_MIXER_MIC: /* External microphone (0-31) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x04, mixer);
- break;
- case SOUND_MIXER_IMIX: /* Recording monitor (0-31) (Output mixer only) */
- levels[whichDev] = mixer_output(right, left, 31, 0x10 | 0x01,
- 0x00);
- break;
- case SOUND_MIXER_RECLEV: /* Recording level (0-15) */
- levels[whichDev] = mixer_output(right, left, 15, 0x02, 0);
- break;
-
-
- case SOUND_MIXER_RECSRC:
- devmask = level & POSSIBLE_RECORDING_DEVICES;
-
- changed = devmask ^ rec_devices;
- rec_devices = devmask;
-
- for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
- if (changed & (1 << i))
- {
- pas_mixer_set(i, levels[i]);
- }
- return rec_devices;
- break;
-
- default:
- return -EINVAL;
- }
-
- return (levels[whichDev]);
-}
-
-/*****/
-
-static void
-pas_mixer_reset(void)
-{
- int foo;
-
- for (foo = 0; foo < SOUND_MIXER_NRDEVICES; foo++)
- pas_mixer_set(foo, levels[foo]);
-
- set_mode(0x04 | 0x01);
-}
-
-static int pas_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- int level,v ;
- int __user *p = (int __user *)arg;
-
- if (cmd == SOUND_MIXER_PRIVATE1) { /* Set loudness bit */
- if (get_user(level, p))
- return -EFAULT;
- if (level == -1) /* Return current settings */
- level = (mode_control & 0x04);
- else {
- mode_control &= ~0x04;
- if (level)
- mode_control |= 0x04;
- set_mode(mode_control);
- }
- level = !!level;
- return put_user(level, p);
- }
- if (cmd == SOUND_MIXER_PRIVATE2) { /* Set enhance bit */
- if (get_user(level, p))
- return -EFAULT;
- if (level == -1) { /* Return current settings */
- if (!(mode_control & 0x03))
- level = 0;
- else
- level = ((mode_control & 0x03) + 1) * 20;
- } else {
- int i = 0;
-
- level &= 0x7f;
- if (level)
- i = (level / 20) - 1;
- mode_control &= ~0x03;
- mode_control |= i & 0x03;
- set_mode(mode_control);
- if (i)
- i = (i + 1) * 20;
- level = i;
- }
- return put_user(level, p);
- }
- if (cmd == SOUND_MIXER_PRIVATE3) { /* Set mute bit */
- if (get_user(level, p))
- return -EFAULT;
- if (level == -1) /* Return current settings */
- level = !(pas_read(0x0B8A) & 0x20);
- else {
- if (level)
- pas_write(pas_read(0x0B8A) & (~0x20), 0x0B8A);
- else
- pas_write(pas_read(0x0B8A) | 0x20, 0x0B8A);
-
- level = !(pas_read(0x0B8A) & 0x20);
- }
- return put_user(level, p);
- }
- if (((cmd >> 8) & 0xff) == 'M') {
- if (get_user(v, p))
- return -EFAULT;
- if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
- v = pas_mixer_set(cmd & 0xff, v);
- } else {
- switch (cmd & 0xff) {
- case SOUND_MIXER_RECSRC:
- v = rec_devices;
- break;
-
- case SOUND_MIXER_STEREODEVS:
- v = SUPPORTED_MIXER_DEVICES & ~(SOUND_MASK_BASS | SOUND_MASK_TREBLE);
- break;
-
- case SOUND_MIXER_DEVMASK:
- v = SUPPORTED_MIXER_DEVICES;
- break;
-
- case SOUND_MIXER_RECMASK:
- v = POSSIBLE_RECORDING_DEVICES & SUPPORTED_MIXER_DEVICES;
- break;
-
- case SOUND_MIXER_CAPS:
- v = 0; /* No special capabilities */
- break;
-
- default:
- v = levels[cmd & 0xff];
- break;
- }
- }
- return put_user(v, p);
- }
- return -EINVAL;
-}
-
-static struct mixer_operations pas_mixer_operations =
-{
- .owner = THIS_MODULE,
- .id = "PAS16",
- .name = "Pro Audio Spectrum 16",
- .ioctl = pas_mixer_ioctl
-};
-
-int __init
-pas_init_mixer(void)
-{
- int d;
-
- levels = load_mixer_volumes("PAS16_1", default_levels, 1);
-
- pas_mixer_reset();
-
- if ((d = sound_alloc_mixerdev()) != -1)
- {
- audio_devs[pas_audiodev]->mixer_dev = d;
- mixer_devs[d] = &pas_mixer_operations;
- }
- return 1;
-}
diff --git a/sound/oss/pas2_pcm.c b/sound/oss/pas2_pcm.c
deleted file mode 100644
index 474803b52f7d..000000000000
--- a/sound/oss/pas2_pcm.c
+++ /dev/null
@@ -1,419 +0,0 @@
-/*
- * pas2_pcm.c Audio routines for PAS16
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * Alan Cox : Swatted a double allocation of device bug. Made a few
- * more things module options.
- * Bartlomiej Zolnierkiewicz : Added __init to pas_pcm_init()
- */
-
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/timex.h>
-#include "sound_config.h"
-
-#include "pas2.h"
-
-#define PAS_PCM_INTRBITS (0x08)
-/*
- * Sample buffer timer interrupt enable
- */
-
-#define PCM_NON 0
-#define PCM_DAC 1
-#define PCM_ADC 2
-
-static unsigned long pcm_speed; /* sampling rate */
-static unsigned char pcm_channels = 1; /* channels (1 or 2) */
-static unsigned char pcm_bits = 8; /* bits/sample (8 or 16) */
-static unsigned char pcm_filter; /* filter FLAG */
-static unsigned char pcm_mode = PCM_NON;
-static unsigned long pcm_count;
-static unsigned short pcm_bitsok = 8; /* mask of OK bits */
-static int pcm_busy;
-int pas_audiodev = -1;
-static int open_mode;
-
-extern spinlock_t pas_lock;
-
-static int pcm_set_speed(int arg)
-{
- int foo, tmp;
- unsigned long flags;
-
- if (arg == 0)
- return pcm_speed;
-
- if (arg > 44100)
- arg = 44100;
- if (arg < 5000)
- arg = 5000;
-
- if (pcm_channels & 2)
- {
- foo = ((PIT_TICK_RATE / 2) + (arg / 2)) / arg;
- arg = ((PIT_TICK_RATE / 2) + (foo / 2)) / foo;
- }
- else
- {
- foo = (PIT_TICK_RATE + (arg / 2)) / arg;
- arg = (PIT_TICK_RATE + (foo / 2)) / foo;
- }
-
- pcm_speed = arg;
-
- tmp = pas_read(0x0B8A);
-
- /*
- * Set anti-aliasing filters according to sample rate. You really *NEED*
- * to enable this feature for all normal recording unless you want to
- * experiment with aliasing effects.
- * These filters apply to the selected "recording" source.
- * I (pfw) don't know the encoding of these 5 bits. The values shown
- * come from the SDK found on ftp.uwp.edu:/pub/msdos/proaudio/.
- *
- * I cleared bit 5 of these values, since that bit controls the master
- * mute flag. (Olav Wölfelschneider)
- *
- */
-#if !defined NO_AUTO_FILTER_SET
- tmp &= 0xe0;
- if (pcm_speed >= 2 * 17897)
- tmp |= 0x01;
- else if (pcm_speed >= 2 * 15909)
- tmp |= 0x02;
- else if (pcm_speed >= 2 * 11931)
- tmp |= 0x09;
- else if (pcm_speed >= 2 * 8948)
- tmp |= 0x11;
- else if (pcm_speed >= 2 * 5965)
- tmp |= 0x19;
- else if (pcm_speed >= 2 * 2982)
- tmp |= 0x04;
- pcm_filter = tmp;
-#endif
-
- spin_lock_irqsave(&pas_lock, flags);
-
- pas_write(tmp & ~(0x40 | 0x80), 0x0B8A);
- pas_write(0x00 | 0x30 | 0x04, 0x138B);
- pas_write(foo & 0xff, 0x1388);
- pas_write((foo >> 8) & 0xff, 0x1388);
- pas_write(tmp, 0x0B8A);
-
- spin_unlock_irqrestore(&pas_lock, flags);
-
- return pcm_speed;
-}
-
-static int pcm_set_channels(int arg)
-{
-
- if ((arg != 1) && (arg != 2))
- return pcm_channels;
-
- if (arg != pcm_channels)
- {
- pas_write(pas_read(0xF8A) ^ 0x20, 0xF8A);
-
- pcm_channels = arg;
- pcm_set_speed(pcm_speed); /* The speed must be reinitialized */
- }
- return pcm_channels;
-}
-
-static int pcm_set_bits(int arg)
-{
- if (arg == 0)
- return pcm_bits;
-
- if ((arg & pcm_bitsok) != arg)
- return pcm_bits;
-
- if (arg != pcm_bits)
- {
- pas_write(pas_read(0x8389) ^ 0x04, 0x8389);
-
- pcm_bits = arg;
- }
- return pcm_bits;
-}
-
-static int pas_audio_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- int val, ret;
- int __user *p = arg;
-
- switch (cmd)
- {
- case SOUND_PCM_WRITE_RATE:
- if (get_user(val, p))
- return -EFAULT;
- ret = pcm_set_speed(val);
- break;
-
- case SOUND_PCM_READ_RATE:
- ret = pcm_speed;
- break;
-
- case SNDCTL_DSP_STEREO:
- if (get_user(val, p))
- return -EFAULT;
- ret = pcm_set_channels(val + 1) - 1;
- break;
-
- case SOUND_PCM_WRITE_CHANNELS:
- if (get_user(val, p))
- return -EFAULT;
- ret = pcm_set_channels(val);
- break;
-
- case SOUND_PCM_READ_CHANNELS:
- ret = pcm_channels;
- break;
-
- case SNDCTL_DSP_SETFMT:
- if (get_user(val, p))
- return -EFAULT;
- ret = pcm_set_bits(val);
- break;
-
- case SOUND_PCM_READ_BITS:
- ret = pcm_bits;
- break;
-
- default:
- return -EINVAL;
- }
- return put_user(ret, p);
-}
-
-static void pas_audio_reset(int dev)
-{
- pas_write(pas_read(0xF8A) & ~0x40, 0xF8A); /* Disable PCM */
-}
-
-static int pas_audio_open(int dev, int mode)
-{
- int err;
- unsigned long flags;
-
- spin_lock_irqsave(&pas_lock, flags);
- if (pcm_busy)
- {
- spin_unlock_irqrestore(&pas_lock, flags);
- return -EBUSY;
- }
- pcm_busy = 1;
- spin_unlock_irqrestore(&pas_lock, flags);
-
- if ((err = pas_set_intr(PAS_PCM_INTRBITS)) < 0)
- return err;
-
-
- pcm_count = 0;
- open_mode = mode;
-
- return 0;
-}
-
-static void pas_audio_close(int dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pas_lock, flags);
-
- pas_audio_reset(dev);
- pas_remove_intr(PAS_PCM_INTRBITS);
- pcm_mode = PCM_NON;
-
- pcm_busy = 0;
- spin_unlock_irqrestore(&pas_lock, flags);
-}
-
-static void pas_audio_output_block(int dev, unsigned long buf, int count,
- int intrflag)
-{
- unsigned long flags, cnt;
-
- cnt = count;
- if (audio_devs[dev]->dmap_out->dma > 3)
- cnt >>= 1;
-
- if (audio_devs[dev]->flags & DMA_AUTOMODE &&
- intrflag &&
- cnt == pcm_count)
- return;
-
- spin_lock_irqsave(&pas_lock, flags);
-
- pas_write(pas_read(0xF8A) & ~0x40,
- 0xF8A);
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */
-
- if (audio_devs[dev]->dmap_out->dma > 3)
- count >>= 1;
-
- if (count != pcm_count)
- {
- pas_write(pas_read(0x0B8A) & ~0x80, 0x0B8A);
- pas_write(0x40 | 0x30 | 0x04, 0x138B);
- pas_write(count & 0xff, 0x1389);
- pas_write((count >> 8) & 0xff, 0x1389);
- pas_write(pas_read(0x0B8A) | 0x80, 0x0B8A);
-
- pcm_count = count;
- }
- pas_write(pas_read(0x0B8A) | 0x80 | 0x40, 0x0B8A);
-#ifdef NO_TRIGGER
- pas_write(pas_read(0xF8A) | 0x40 | 0x10, 0xF8A);
-#endif
-
- pcm_mode = PCM_DAC;
-
- spin_unlock_irqrestore(&pas_lock, flags);
-}
-
-static void pas_audio_start_input(int dev, unsigned long buf, int count,
- int intrflag)
-{
- unsigned long flags;
- int cnt;
-
- cnt = count;
- if (audio_devs[dev]->dmap_out->dma > 3)
- cnt >>= 1;
-
- if (audio_devs[pas_audiodev]->flags & DMA_AUTOMODE &&
- intrflag &&
- cnt == pcm_count)
- return;
-
- spin_lock_irqsave(&pas_lock, flags);
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */
-
- if (audio_devs[dev]->dmap_out->dma > 3)
- count >>= 1;
-
- if (count != pcm_count)
- {
- pas_write(pas_read(0x0B8A) & ~0x80, 0x0B8A);
- pas_write(0x40 | 0x30 | 0x04, 0x138B);
- pas_write(count & 0xff, 0x1389);
- pas_write((count >> 8) & 0xff, 0x1389);
- pas_write(pas_read(0x0B8A) | 0x80, 0x0B8A);
-
- pcm_count = count;
- }
- pas_write(pas_read(0x0B8A) | 0x80 | 0x40, 0x0B8A);
-#ifdef NO_TRIGGER
- pas_write((pas_read(0xF8A) | 0x40) & ~0x10, 0xF8A);
-#endif
-
- pcm_mode = PCM_ADC;
-
- spin_unlock_irqrestore(&pas_lock, flags);
-}
-
-#ifndef NO_TRIGGER
-static void pas_audio_trigger(int dev, int state)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pas_lock, flags);
- state &= open_mode;
-
- if (state & PCM_ENABLE_OUTPUT)
- pas_write(pas_read(0xF8A) | 0x40 | 0x10, 0xF8A);
- else if (state & PCM_ENABLE_INPUT)
- pas_write((pas_read(0xF8A) | 0x40) & ~0x10, 0xF8A);
- else
- pas_write(pas_read(0xF8A) & ~0x40, 0xF8A);
-
- spin_unlock_irqrestore(&pas_lock, flags);
-}
-#endif
-
-static int pas_audio_prepare_for_input(int dev, int bsize, int bcount)
-{
- pas_audio_reset(dev);
- return 0;
-}
-
-static int pas_audio_prepare_for_output(int dev, int bsize, int bcount)
-{
- pas_audio_reset(dev);
- return 0;
-}
-
-static struct audio_driver pas_audio_driver =
-{
- .owner = THIS_MODULE,
- .open = pas_audio_open,
- .close = pas_audio_close,
- .output_block = pas_audio_output_block,
- .start_input = pas_audio_start_input,
- .ioctl = pas_audio_ioctl,
- .prepare_for_input = pas_audio_prepare_for_input,
- .prepare_for_output = pas_audio_prepare_for_output,
- .halt_io = pas_audio_reset,
- .trigger = pas_audio_trigger
-};
-
-void __init pas_pcm_init(struct address_info *hw_config)
-{
- pcm_bitsok = 8;
- if (pas_read(0xEF8B) & 0x08)
- pcm_bitsok |= 16;
-
- pcm_set_speed(DSP_DEFAULT_SPEED);
-
- if ((pas_audiodev = sound_install_audiodrv(AUDIO_DRIVER_VERSION,
- "Pro Audio Spectrum",
- &pas_audio_driver,
- sizeof(struct audio_driver),
- DMA_AUTOMODE,
- AFMT_U8 | AFMT_S16_LE,
- NULL,
- hw_config->dma,
- hw_config->dma)) < 0)
- printk(KERN_WARNING "PAS16: Too many PCM devices available\n");
-}
-
-void pas_pcm_interrupt(unsigned char status, int cause)
-{
- if (cause == 1)
- {
- /*
- * Halt the PCM first. Otherwise we don't have time to start a new
- * block before the PCM chip proceeds to the next sample
- */
-
- if (!(audio_devs[pas_audiodev]->flags & DMA_AUTOMODE))
- pas_write(pas_read(0xF8A) & ~0x40, 0xF8A);
-
- switch (pcm_mode)
- {
- case PCM_DAC:
- DMAbuf_outputintr(pas_audiodev, 1);
- break;
-
- case PCM_ADC:
- DMAbuf_inputintr(pas_audiodev);
- break;
-
- default:
- printk(KERN_WARNING "PAS: Unexpected PCM interrupt\n");
- }
- }
-}
diff --git a/sound/oss/pss.c b/sound/oss/pss.c
deleted file mode 100644
index 33c3a442e162..000000000000
--- a/sound/oss/pss.c
+++ /dev/null
@@ -1,1270 +0,0 @@
-/*
- * sound/oss/pss.c
- *
- * The low level driver for the Personal Sound System (ECHO ESC614).
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Thomas Sailer ioctl code reworked (vmalloc/vfree removed)
- * Alan Cox modularisation, clean up.
- *
- * 98-02-21: Vladimir Michl <vladimir.michl@upol.cz>
- * Added mixer device for Beethoven ADSP-16 (master volume,
- * bass, treble, synth), only for speakers.
- * Fixed bug in pss_write (exchange parameters)
- * Fixed config port of SB
- * Requested two regions for PSS (PSS mixer, PSS config)
- * Modified pss_download_boot
- * To probe_pss_mss added test for initialize AD1848
- * 98-05-28: Vladimir Michl <vladimir.michl@upol.cz>
- * Fixed computation of mixer volumes
- * 04-05-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu>
- * Added code that allows the user to enable his cdrom and/or
- * joystick through the module parameters pss_cdrom_port and
- * pss_enable_joystick. pss_cdrom_port takes a port address as its
- * argument. pss_enable_joystick takes either a 0 or a non-0 as its
- * argument.
- * 04-06-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu>
- * Separated some code into new functions for easier reuse.
- * Cleaned up and streamlined new code. Added code to allow a user
- * to only use this driver for enabling non-sound components
- * through the new module parameter pss_no_sound (flag). Added
- * code that would allow a user to decide whether the driver should
- * reset the configured hardware settings for the PSS board through
- * the module parameter pss_keep_settings (flag). This flag will
- * allow a user to free up resources in use by this card if needbe,
- * furthermore it allows him to use this driver to just enable the
- * emulations and then be unloaded as it is no longer needed. Both
- * new settings are only available to this driver if compiled as a
- * module. The default settings of all new parameters are set to
- * load the driver as it did in previous versions.
- * 04-07-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu>
- * Added module parameter pss_firmware to allow the user to tell
- * the driver where the firmware file is located. The default
- * setting is the previous hardcoded setting "/etc/sound/pss_synth".
- * 00-03-03: Christoph Hellwig <chhellwig@infradead.org>
- * Adapted to module_init/module_exit
- * 11-10-2000: Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
- * Added __init to probe_pss(), attach_pss() and probe_pss_mpu()
- * 02-Jan-2001: Chris Rankin
- * Specify that this module owns the coprocessor
- */
-
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-
-#include "sound_config.h"
-#include "sound_firmware.h"
-
-#include "ad1848.h"
-#include "mpu401.h"
-
-/*
- * PSS registers.
- */
-#define REG(x) (devc->base+x)
-#define PSS_DATA 0
-#define PSS_STATUS 2
-#define PSS_CONTROL 2
-#define PSS_ID 4
-#define PSS_IRQACK 4
-#define PSS_PIO 0x1a
-
-/*
- * Config registers
- */
-#define CONF_PSS 0x10
-#define CONF_WSS 0x12
-#define CONF_SB 0x14
-#define CONF_CDROM 0x16
-#define CONF_MIDI 0x18
-
-/*
- * Status bits.
- */
-#define PSS_FLAG3 0x0800
-#define PSS_FLAG2 0x0400
-#define PSS_FLAG1 0x1000
-#define PSS_FLAG0 0x0800
-#define PSS_WRITE_EMPTY 0x8000
-#define PSS_READ_FULL 0x4000
-
-/*
- * WSS registers
- */
-#define WSS_INDEX 4
-#define WSS_DATA 5
-
-/*
- * WSS status bits
- */
-#define WSS_INITIALIZING 0x80
-#define WSS_AUTOCALIBRATION 0x20
-
-#define NO_WSS_MIXER -1
-
-#include "coproc.h"
-
-#include "pss_boot.h"
-
-/* If compiled into kernel, it enable or disable pss mixer */
-#ifdef CONFIG_PSS_MIXER
-static bool pss_mixer = 1;
-#else
-static bool pss_mixer;
-#endif
-
-
-struct pss_mixerdata {
- unsigned int volume_l;
- unsigned int volume_r;
- unsigned int bass;
- unsigned int treble;
- unsigned int synth;
-};
-
-struct pss_confdata {
- int base;
- int irq;
- int dma;
- int *osp;
- struct pss_mixerdata mixer;
- int ad_mixer_dev;
-};
-
-static struct pss_confdata pss_data;
-static struct pss_confdata *devc = &pss_data;
-static DEFINE_SPINLOCK(lock);
-
-static int pss_initialized;
-static int nonstandard_microcode;
-static int pss_cdrom_port = -1; /* Parameter for the PSS cdrom port */
-static bool pss_enable_joystick; /* Parameter for enabling the joystick */
-static coproc_operations pss_coproc_operations;
-
-static void pss_write(struct pss_confdata *devc, int data)
-{
- unsigned long i, limit;
-
- limit = jiffies + HZ/10; /* The timeout is 0.1 seconds */
- /*
- * Note! the i<5000000 is an emergency exit. The dsp_command() is sometimes
- * called while interrupts are disabled. This means that the timer is
- * disabled also. However the timeout situation is a abnormal condition.
- * Normally the DSP should be ready to accept commands after just couple of
- * loops.
- */
-
- for (i = 0; i < 5000000 && time_before(jiffies, limit); i++)
- {
- if (inw(REG(PSS_STATUS)) & PSS_WRITE_EMPTY)
- {
- outw(data, REG(PSS_DATA));
- return;
- }
- }
- printk(KERN_WARNING "PSS: DSP Command (%04x) Timeout.\n", data);
-}
-
-static int __init probe_pss(struct address_info *hw_config)
-{
- unsigned short id;
- int irq, dma;
-
- devc->base = hw_config->io_base;
- irq = devc->irq = hw_config->irq;
- dma = devc->dma = hw_config->dma;
- devc->osp = hw_config->osp;
-
- if (devc->base != 0x220 && devc->base != 0x240)
- if (devc->base != 0x230 && devc->base != 0x250) /* Some cards use these */
- return 0;
-
- if (!request_region(devc->base, 0x10, "PSS mixer, SB emulation")) {
- printk(KERN_ERR "PSS: I/O port conflict\n");
- return 0;
- }
- id = inw(REG(PSS_ID));
- if ((id >> 8) != 'E') {
- printk(KERN_ERR "No PSS signature detected at 0x%x (0x%x)\n", devc->base, id);
- release_region(devc->base, 0x10);
- return 0;
- }
- if (!request_region(devc->base + 0x10, 0x9, "PSS config")) {
- printk(KERN_ERR "PSS: I/O port conflict\n");
- release_region(devc->base, 0x10);
- return 0;
- }
- return 1;
-}
-
-static int set_irq(struct pss_confdata *devc, int dev, int irq)
-{
- static unsigned short irq_bits[16] =
- {
- 0x0000, 0x0000, 0x0000, 0x0008,
- 0x0000, 0x0010, 0x0000, 0x0018,
- 0x0000, 0x0020, 0x0028, 0x0030,
- 0x0038, 0x0000, 0x0000, 0x0000
- };
-
- unsigned short tmp, bits;
-
- if (irq < 0 || irq > 15)
- return 0;
-
- tmp = inw(REG(dev)) & ~0x38; /* Load confreg, mask IRQ bits out */
-
- if ((bits = irq_bits[irq]) == 0 && irq != 0)
- {
- printk(KERN_ERR "PSS: Invalid IRQ %d\n", irq);
- return 0;
- }
- outw(tmp | bits, REG(dev));
- return 1;
-}
-
-static void set_io_base(struct pss_confdata *devc, int dev, int base)
-{
- unsigned short tmp = inw(REG(dev)) & 0x003f;
- unsigned short bits = (base & 0x0ffc) << 4;
-
- outw(bits | tmp, REG(dev));
-}
-
-static int set_dma(struct pss_confdata *devc, int dev, int dma)
-{
- static unsigned short dma_bits[8] =
- {
- 0x0001, 0x0002, 0x0000, 0x0003,
- 0x0000, 0x0005, 0x0006, 0x0007
- };
-
- unsigned short tmp, bits;
-
- if (dma < 0 || dma > 7)
- return 0;
-
- tmp = inw(REG(dev)) & ~0x07; /* Load confreg, mask DMA bits out */
-
- if ((bits = dma_bits[dma]) == 0 && dma != 4)
- {
- printk(KERN_ERR "PSS: Invalid DMA %d\n", dma);
- return 0;
- }
- outw(tmp | bits, REG(dev));
- return 1;
-}
-
-static int pss_reset_dsp(struct pss_confdata *devc)
-{
- unsigned long i, limit = jiffies + HZ/10;
-
- outw(0x2000, REG(PSS_CONTROL));
- for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
- inw(REG(PSS_CONTROL));
- outw(0x0000, REG(PSS_CONTROL));
- return 1;
-}
-
-static int pss_put_dspword(struct pss_confdata *devc, unsigned short word)
-{
- int i, val;
-
- for (i = 0; i < 327680; i++)
- {
- val = inw(REG(PSS_STATUS));
- if (val & PSS_WRITE_EMPTY)
- {
- outw(word, REG(PSS_DATA));
- return 1;
- }
- }
- return 0;
-}
-
-static int pss_get_dspword(struct pss_confdata *devc, unsigned short *word)
-{
- int i, val;
-
- for (i = 0; i < 327680; i++)
- {
- val = inw(REG(PSS_STATUS));
- if (val & PSS_READ_FULL)
- {
- *word = inw(REG(PSS_DATA));
- return 1;
- }
- }
- return 0;
-}
-
-static int pss_download_boot(struct pss_confdata *devc, unsigned char *block,
- int size, int flags)
-{
- int i, val, count;
- unsigned long limit;
-
- if (flags & CPF_FIRST)
- {
-/*_____ Warn DSP software that a boot is coming */
- outw(0x00fe, REG(PSS_DATA));
-
- limit = jiffies + HZ/10;
- for (i = 0; i < 32768 && time_before(jiffies, limit); i++)
- if (inw(REG(PSS_DATA)) == 0x5500)
- break;
-
- outw(*block++, REG(PSS_DATA));
- pss_reset_dsp(devc);
- }
- count = 1;
- while ((flags&CPF_LAST) || count<size )
- {
- int j;
-
- for (j = 0; j < 327670; j++)
- {
-/*_____ Wait for BG to appear */
- if (inw(REG(PSS_STATUS)) & PSS_FLAG3)
- break;
- }
-
- if (j == 327670)
- {
- /* It's ok we timed out when the file was empty */
- if (count >= size && flags & CPF_LAST)
- break;
- else
- {
- printk("\n");
- printk(KERN_ERR "PSS: Download timeout problems, byte %d=%d\n", count, size);
- return 0;
- }
- }
-/*_____ Send the next byte */
- if (count >= size)
- {
- /* If not data in block send 0xffff */
- outw (0xffff, REG (PSS_DATA));
- }
- else
- {
- /*_____ Send the next byte */
- outw (*block++, REG (PSS_DATA));
- }
- count++;
- }
-
- if (flags & CPF_LAST)
- {
-/*_____ Why */
- outw(0, REG(PSS_DATA));
-
- limit = jiffies + HZ/10;
- for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
- val = inw(REG(PSS_STATUS));
-
- limit = jiffies + HZ/10;
- for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
- {
- val = inw(REG(PSS_STATUS));
- if (val & 0x4000)
- break;
- }
-
- /* now read the version */
- for (i = 0; i < 32000; i++)
- {
- val = inw(REG(PSS_STATUS));
- if (val & PSS_READ_FULL)
- break;
- }
- if (i == 32000)
- return 0;
-
- val = inw(REG(PSS_DATA));
- /* printk( "<PSS: microcode version %d.%d loaded>", val/16, val % 16); */
- }
- return 1;
-}
-
-/* Mixer */
-static void set_master_volume(struct pss_confdata *devc, int left, int right)
-{
- static unsigned char log_scale[101] = {
- 0xdb, 0xe0, 0xe3, 0xe5, 0xe7, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xed, 0xee,
- 0xef, 0xef, 0xf0, 0xf0, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3,
- 0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7,
- 0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9,
- 0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb,
- 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
- 0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd,
- 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
- 0xfe, 0xfe, 0xff, 0xff, 0xff
- };
- pss_write(devc, 0x0010);
- pss_write(devc, log_scale[left] | 0x0000);
- pss_write(devc, 0x0010);
- pss_write(devc, log_scale[right] | 0x0100);
-}
-
-static void set_synth_volume(struct pss_confdata *devc, int volume)
-{
- int vol = ((0x8000*volume)/100L);
- pss_write(devc, 0x0080);
- pss_write(devc, vol);
- pss_write(devc, 0x0081);
- pss_write(devc, vol);
-}
-
-static void set_bass(struct pss_confdata *devc, int level)
-{
- int vol = (int)(((0xfd - 0xf0) * level)/100L) + 0xf0;
- pss_write(devc, 0x0010);
- pss_write(devc, vol | 0x0200);
-};
-
-static void set_treble(struct pss_confdata *devc, int level)
-{
- int vol = (((0xfd - 0xf0) * level)/100L) + 0xf0;
- pss_write(devc, 0x0010);
- pss_write(devc, vol | 0x0300);
-};
-
-static void pss_mixer_reset(struct pss_confdata *devc)
-{
- set_master_volume(devc, 33, 33);
- set_bass(devc, 50);
- set_treble(devc, 50);
- set_synth_volume(devc, 30);
- pss_write (devc, 0x0010);
- pss_write (devc, 0x0800 | 0xce); /* Stereo */
-
- if(pss_mixer)
- {
- devc->mixer.volume_l = devc->mixer.volume_r = 33;
- devc->mixer.bass = 50;
- devc->mixer.treble = 50;
- devc->mixer.synth = 30;
- }
-}
-
-static int set_volume_mono(unsigned __user *p, unsigned int *aleft)
-{
- unsigned int left, volume;
- if (get_user(volume, p))
- return -EFAULT;
-
- left = volume & 0xff;
- if (left > 100)
- left = 100;
- *aleft = left;
- return 0;
-}
-
-static int set_volume_stereo(unsigned __user *p,
- unsigned int *aleft,
- unsigned int *aright)
-{
- unsigned int left, right, volume;
- if (get_user(volume, p))
- return -EFAULT;
-
- left = volume & 0xff;
- if (left > 100)
- left = 100;
- right = (volume >> 8) & 0xff;
- if (right > 100)
- right = 100;
- *aleft = left;
- *aright = right;
- return 0;
-}
-
-static int ret_vol_mono(int left)
-{
- return ((left << 8) | left);
-}
-
-static int ret_vol_stereo(int left, int right)
-{
- return ((right << 8) | left);
-}
-
-static int call_ad_mixer(struct pss_confdata *devc, unsigned int cmd,
- void __user *arg)
-{
- if (devc->ad_mixer_dev != NO_WSS_MIXER)
- return mixer_devs[devc->ad_mixer_dev]->ioctl(devc->ad_mixer_dev, cmd, arg);
- else
- return -EINVAL;
-}
-
-static int pss_mixer_ioctl (int dev, unsigned int cmd, void __user *arg)
-{
- struct pss_confdata *devc = mixer_devs[dev]->devc;
- int cmdf = cmd & 0xff;
-
- if ((cmdf != SOUND_MIXER_VOLUME) && (cmdf != SOUND_MIXER_BASS) &&
- (cmdf != SOUND_MIXER_TREBLE) && (cmdf != SOUND_MIXER_SYNTH) &&
- (cmdf != SOUND_MIXER_DEVMASK) && (cmdf != SOUND_MIXER_STEREODEVS) &&
- (cmdf != SOUND_MIXER_RECMASK) && (cmdf != SOUND_MIXER_CAPS) &&
- (cmdf != SOUND_MIXER_RECSRC))
- {
- return call_ad_mixer(devc, cmd, arg);
- }
-
- if (((cmd >> 8) & 0xff) != 'M')
- return -EINVAL;
-
- if (_SIOC_DIR (cmd) & _SIOC_WRITE)
- {
- switch (cmdf)
- {
- case SOUND_MIXER_RECSRC:
- if (devc->ad_mixer_dev != NO_WSS_MIXER)
- return call_ad_mixer(devc, cmd, arg);
- else
- {
- int v;
- if (get_user(v, (int __user *)arg))
- return -EFAULT;
- if (v != 0)
- return -EINVAL;
- return 0;
- }
- case SOUND_MIXER_VOLUME:
- if (set_volume_stereo(arg,
- &devc->mixer.volume_l,
- &devc->mixer.volume_r))
- return -EFAULT;
- set_master_volume(devc, devc->mixer.volume_l,
- devc->mixer.volume_r);
- return ret_vol_stereo(devc->mixer.volume_l,
- devc->mixer.volume_r);
-
- case SOUND_MIXER_BASS:
- if (set_volume_mono(arg, &devc->mixer.bass))
- return -EFAULT;
- set_bass(devc, devc->mixer.bass);
- return ret_vol_mono(devc->mixer.bass);
-
- case SOUND_MIXER_TREBLE:
- if (set_volume_mono(arg, &devc->mixer.treble))
- return -EFAULT;
- set_treble(devc, devc->mixer.treble);
- return ret_vol_mono(devc->mixer.treble);
-
- case SOUND_MIXER_SYNTH:
- if (set_volume_mono(arg, &devc->mixer.synth))
- return -EFAULT;
- set_synth_volume(devc, devc->mixer.synth);
- return ret_vol_mono(devc->mixer.synth);
-
- default:
- return -EINVAL;
- }
- }
- else
- {
- int val, and_mask = 0, or_mask = 0;
- /*
- * Return parameters
- */
- switch (cmdf)
- {
- case SOUND_MIXER_DEVMASK:
- if (call_ad_mixer(devc, cmd, arg) == -EINVAL)
- break;
- and_mask = ~0;
- or_mask = SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_SYNTH;
- break;
-
- case SOUND_MIXER_STEREODEVS:
- if (call_ad_mixer(devc, cmd, arg) == -EINVAL)
- break;
- and_mask = ~0;
- or_mask = SOUND_MASK_VOLUME;
- break;
-
- case SOUND_MIXER_RECMASK:
- if (devc->ad_mixer_dev != NO_WSS_MIXER)
- return call_ad_mixer(devc, cmd, arg);
- break;
-
- case SOUND_MIXER_CAPS:
- if (devc->ad_mixer_dev != NO_WSS_MIXER)
- return call_ad_mixer(devc, cmd, arg);
- or_mask = SOUND_CAP_EXCL_INPUT;
- break;
-
- case SOUND_MIXER_RECSRC:
- if (devc->ad_mixer_dev != NO_WSS_MIXER)
- return call_ad_mixer(devc, cmd, arg);
- break;
-
- case SOUND_MIXER_VOLUME:
- or_mask = ret_vol_stereo(devc->mixer.volume_l, devc->mixer.volume_r);
- break;
-
- case SOUND_MIXER_BASS:
- or_mask = ret_vol_mono(devc->mixer.bass);
- break;
-
- case SOUND_MIXER_TREBLE:
- or_mask = ret_vol_mono(devc->mixer.treble);
- break;
-
- case SOUND_MIXER_SYNTH:
- or_mask = ret_vol_mono(devc->mixer.synth);
- break;
- default:
- return -EINVAL;
- }
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- val &= and_mask;
- val |= or_mask;
- if (put_user(val, (int __user *)arg))
- return -EFAULT;
- return val;
- }
-}
-
-static struct mixer_operations pss_mixer_operations =
-{
- .owner = THIS_MODULE,
- .id = "SOUNDPORT",
- .name = "PSS-AD1848",
- .ioctl = pss_mixer_ioctl
-};
-
-static void disable_all_emulations(void)
-{
- outw(0x0000, REG(CONF_PSS)); /* 0x0400 enables joystick */
- outw(0x0000, REG(CONF_WSS));
- outw(0x0000, REG(CONF_SB));
- outw(0x0000, REG(CONF_MIDI));
- outw(0x0000, REG(CONF_CDROM));
-}
-
-static void configure_nonsound_components(void)
-{
- /* Configure Joystick port */
-
- if(pss_enable_joystick)
- {
- outw(0x0400, REG(CONF_PSS)); /* 0x0400 enables joystick */
- printk(KERN_INFO "PSS: joystick enabled.\n");
- }
- else
- {
- printk(KERN_INFO "PSS: joystick port not enabled.\n");
- }
-
- /* Configure CDROM port */
-
- if (pss_cdrom_port == -1) { /* If cdrom port enablation wasn't requested */
- printk(KERN_INFO "PSS: CDROM port not enabled.\n");
- } else if (!request_region(pss_cdrom_port, 2, "PSS CDROM")) {
- pss_cdrom_port = -1;
- printk(KERN_ERR "PSS: CDROM I/O port conflict.\n");
- } else {
- set_io_base(devc, CONF_CDROM, pss_cdrom_port);
- printk(KERN_INFO "PSS: CDROM I/O port set to 0x%x.\n", pss_cdrom_port);
- }
-}
-
-static int __init attach_pss(struct address_info *hw_config)
-{
- unsigned short id;
- char tmp[100];
-
- devc->base = hw_config->io_base;
- devc->irq = hw_config->irq;
- devc->dma = hw_config->dma;
- devc->osp = hw_config->osp;
- devc->ad_mixer_dev = NO_WSS_MIXER;
-
- if (!probe_pss(hw_config))
- return 0;
-
- id = inw(REG(PSS_ID)) & 0x00ff;
-
- /*
- * Disable all emulations. Will be enabled later (if required).
- */
-
- disable_all_emulations();
-
-#ifdef YOU_REALLY_WANT_TO_ALLOCATE_THESE_RESOURCES
- if (sound_alloc_dma(hw_config->dma, "PSS"))
- {
- printk("pss.c: Can't allocate DMA channel.\n");
- release_region(hw_config->io_base, 0x10);
- release_region(hw_config->io_base+0x10, 0x9);
- return 0;
- }
- if (!set_irq(devc, CONF_PSS, devc->irq))
- {
- printk("PSS: IRQ allocation error.\n");
- release_region(hw_config->io_base, 0x10);
- release_region(hw_config->io_base+0x10, 0x9);
- return 0;
- }
- if (!set_dma(devc, CONF_PSS, devc->dma))
- {
- printk(KERN_ERR "PSS: DMA allocation error\n");
- release_region(hw_config->io_base, 0x10);
- release_region(hw_config->io_base+0x10, 0x9);
- return 0;
- }
-#endif
-
- configure_nonsound_components();
- pss_initialized = 1;
- sprintf(tmp, "ECHO-PSS Rev. %d", id);
- conf_printf(tmp, hw_config);
- return 1;
-}
-
-static int __init probe_pss_mpu(struct address_info *hw_config)
-{
- struct resource *ports;
- int timeout;
-
- if (!pss_initialized)
- return 0;
-
- ports = request_region(hw_config->io_base, 2, "mpu401");
-
- if (!ports) {
- printk(KERN_ERR "PSS: MPU I/O port conflict\n");
- return 0;
- }
- set_io_base(devc, CONF_MIDI, hw_config->io_base);
- if (!set_irq(devc, CONF_MIDI, hw_config->irq)) {
- printk(KERN_ERR "PSS: MIDI IRQ allocation error.\n");
- goto fail;
- }
- if (!pss_synthLen) {
- printk(KERN_ERR "PSS: Can't enable MPU. MIDI synth microcode not available.\n");
- goto fail;
- }
- if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST)) {
- printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n");
- goto fail;
- }
-
- /*
- * Finally wait until the DSP algorithm has initialized itself and
- * deactivates receive interrupt.
- */
-
- for (timeout = 900000; timeout > 0; timeout--)
- {
- if ((inb(hw_config->io_base + 1) & 0x80) == 0) /* Input data avail */
- inb(hw_config->io_base); /* Discard it */
- else
- break; /* No more input */
- }
-
- if (!probe_mpu401(hw_config, ports))
- goto fail;
-
- attach_mpu401(hw_config, THIS_MODULE); /* Slot 1 */
- if (hw_config->slots[1] != -1) /* The MPU driver installed itself */
- midi_devs[hw_config->slots[1]]->coproc = &pss_coproc_operations;
- return 1;
-fail:
- release_region(hw_config->io_base, 2);
- return 0;
-}
-
-static int pss_coproc_open(void *dev_info, int sub_device)
-{
- switch (sub_device)
- {
- case COPR_MIDI:
- if (pss_synthLen == 0)
- {
- printk(KERN_ERR "PSS: MIDI synth microcode not available.\n");
- return -EIO;
- }
- if (nonstandard_microcode)
- if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST))
- {
- printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n");
- return -EIO;
- }
- nonstandard_microcode = 0;
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static void pss_coproc_close(void *dev_info, int sub_device)
-{
- return;
-}
-
-static void pss_coproc_reset(void *dev_info)
-{
- if (pss_synthLen)
- if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST))
- {
- printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n");
- }
- nonstandard_microcode = 0;
-}
-
-static int download_boot_block(void *dev_info, copr_buffer * buf)
-{
- if (buf->len <= 0 || buf->len > sizeof(buf->data))
- return -EINVAL;
-
- if (!pss_download_boot(devc, buf->data, buf->len, buf->flags))
- {
- printk(KERN_ERR "PSS: Unable to load microcode block to DSP.\n");
- return -EIO;
- }
- nonstandard_microcode = 1; /* The MIDI microcode has been overwritten */
- return 0;
-}
-
-static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, int local)
-{
- copr_buffer *buf;
- copr_msg *mbuf;
- copr_debug_buf dbuf;
- unsigned short tmp;
- unsigned long flags;
- unsigned short *data;
- int i, err;
- /* printk( "PSS coproc ioctl %x %x %d\n", cmd, arg, local); */
-
- switch (cmd)
- {
- case SNDCTL_COPR_RESET:
- pss_coproc_reset(dev_info);
- return 0;
-
- case SNDCTL_COPR_LOAD:
- buf = vmalloc(sizeof(copr_buffer));
- if (buf == NULL)
- return -ENOSPC;
- if (copy_from_user(buf, arg, sizeof(copr_buffer))) {
- vfree(buf);
- return -EFAULT;
- }
- err = download_boot_block(dev_info, buf);
- vfree(buf);
- return err;
-
- case SNDCTL_COPR_SENDMSG:
- mbuf = vmalloc(sizeof(copr_msg));
- if (mbuf == NULL)
- return -ENOSPC;
- if (copy_from_user(mbuf, arg, sizeof(copr_msg))) {
- vfree(mbuf);
- return -EFAULT;
- }
- data = (unsigned short *)(mbuf->data);
- spin_lock_irqsave(&lock, flags);
- for (i = 0; i < mbuf->len; i++) {
- if (!pss_put_dspword(devc, *data++)) {
- spin_unlock_irqrestore(&lock,flags);
- mbuf->len = i; /* feed back number of WORDs sent */
- err = copy_to_user(arg, mbuf, sizeof(copr_msg));
- vfree(mbuf);
- return err ? -EFAULT : -EIO;
- }
- }
- spin_unlock_irqrestore(&lock,flags);
- vfree(mbuf);
- return 0;
-
- case SNDCTL_COPR_RCVMSG:
- err = 0;
- mbuf = vmalloc(sizeof(copr_msg));
- if (mbuf == NULL)
- return -ENOSPC;
- data = (unsigned short *)mbuf->data;
- spin_lock_irqsave(&lock, flags);
- for (i = 0; i < sizeof(mbuf->data)/sizeof(unsigned short); i++) {
- mbuf->len = i; /* feed back number of WORDs read */
- if (!pss_get_dspword(devc, data++)) {
- if (i == 0)
- err = -EIO;
- break;
- }
- }
- spin_unlock_irqrestore(&lock,flags);
- if (copy_to_user(arg, mbuf, sizeof(copr_msg)))
- err = -EFAULT;
- vfree(mbuf);
- return err;
-
- case SNDCTL_COPR_RDATA:
- if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
- return -EFAULT;
- spin_lock_irqsave(&lock, flags);
- if (!pss_put_dspword(devc, 0x00d0)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- if (!pss_get_dspword(devc, &tmp)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- dbuf.parm1 = tmp;
- spin_unlock_irqrestore(&lock,flags);
- if (copy_to_user(arg, &dbuf, sizeof(dbuf)))
- return -EFAULT;
- return 0;
-
- case SNDCTL_COPR_WDATA:
- if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
- return -EFAULT;
- spin_lock_irqsave(&lock, flags);
- if (!pss_put_dspword(devc, 0x00d1)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- if (!pss_put_dspword(devc, (unsigned short) (dbuf.parm1 & 0xffff))) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- tmp = (unsigned int)dbuf.parm2 & 0xffff;
- if (!pss_put_dspword(devc, tmp)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- spin_unlock_irqrestore(&lock,flags);
- return 0;
-
- case SNDCTL_COPR_WCODE:
- if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
- return -EFAULT;
- spin_lock_irqsave(&lock, flags);
- if (!pss_put_dspword(devc, 0x00d3)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- tmp = (unsigned int)dbuf.parm2 & 0x00ff;
- if (!pss_put_dspword(devc, tmp)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- tmp = ((unsigned int)dbuf.parm2 >> 8) & 0xffff;
- if (!pss_put_dspword(devc, tmp)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- spin_unlock_irqrestore(&lock,flags);
- return 0;
-
- case SNDCTL_COPR_RCODE:
- if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
- return -EFAULT;
- spin_lock_irqsave(&lock, flags);
- if (!pss_put_dspword(devc, 0x00d2)) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) {
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- if (!pss_get_dspword(devc, &tmp)) { /* Read MSB */
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- dbuf.parm1 = tmp << 8;
- if (!pss_get_dspword(devc, &tmp)) { /* Read LSB */
- spin_unlock_irqrestore(&lock,flags);
- return -EIO;
- }
- dbuf.parm1 |= tmp & 0x00ff;
- spin_unlock_irqrestore(&lock,flags);
- if (copy_to_user(arg, &dbuf, sizeof(dbuf)))
- return -EFAULT;
- return 0;
-
- default:
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-static coproc_operations pss_coproc_operations =
-{
- "ADSP-2115",
- THIS_MODULE,
- pss_coproc_open,
- pss_coproc_close,
- pss_coproc_ioctl,
- pss_coproc_reset,
- &pss_data
-};
-
-static int __init probe_pss_mss(struct address_info *hw_config)
-{
- volatile int timeout;
- struct resource *ports;
- int my_mix = -999; /* gcc shut up */
-
- if (!pss_initialized)
- return 0;
-
- if (!request_region(hw_config->io_base, 4, "WSS config")) {
- printk(KERN_ERR "PSS: WSS I/O port conflicts.\n");
- return 0;
- }
- ports = request_region(hw_config->io_base + 4, 4, "ad1848");
- if (!ports) {
- printk(KERN_ERR "PSS: WSS I/O port conflicts.\n");
- release_region(hw_config->io_base, 4);
- return 0;
- }
- set_io_base(devc, CONF_WSS, hw_config->io_base);
- if (!set_irq(devc, CONF_WSS, hw_config->irq)) {
- printk("PSS: WSS IRQ allocation error.\n");
- goto fail;
- }
- if (!set_dma(devc, CONF_WSS, hw_config->dma)) {
- printk(KERN_ERR "PSS: WSS DMA allocation error\n");
- goto fail;
- }
- /*
- * For some reason the card returns 0xff in the WSS status register
- * immediately after boot. Probably MIDI+SB emulation algorithm
- * downloaded to the ADSP2115 spends some time initializing the card.
- * Let's try to wait until it finishes this task.
- */
- for (timeout = 0; timeout < 100000 && (inb(hw_config->io_base + WSS_INDEX) &
- WSS_INITIALIZING); timeout++)
- ;
-
- outb((0x0b), hw_config->io_base + WSS_INDEX); /* Required by some cards */
-
- for (timeout = 0; (inb(hw_config->io_base + WSS_DATA) & WSS_AUTOCALIBRATION) &&
- (timeout < 100000); timeout++)
- ;
-
- if (!probe_ms_sound(hw_config, ports))
- goto fail;
-
- devc->ad_mixer_dev = NO_WSS_MIXER;
- if (pss_mixer)
- {
- if ((my_mix = sound_install_mixer (MIXER_DRIVER_VERSION,
- "PSS-SPEAKERS and AD1848 (through MSS audio codec)",
- &pss_mixer_operations,
- sizeof (struct mixer_operations),
- devc)) < 0)
- {
- printk(KERN_ERR "Could not install PSS mixer\n");
- goto fail;
- }
- }
- pss_mixer_reset(devc);
- attach_ms_sound(hw_config, ports, THIS_MODULE); /* Slot 0 */
-
- if (hw_config->slots[0] != -1)
- {
- /* The MSS driver installed itself */
- audio_devs[hw_config->slots[0]]->coproc = &pss_coproc_operations;
- if (pss_mixer && (num_mixers == (my_mix + 2)))
- {
- /* The MSS mixer installed */
- devc->ad_mixer_dev = audio_devs[hw_config->slots[0]]->mixer_dev;
- }
- }
- return 1;
-fail:
- release_region(hw_config->io_base + 4, 4);
- release_region(hw_config->io_base, 4);
- return 0;
-}
-
-static inline void __exit unload_pss(struct address_info *hw_config)
-{
- release_region(hw_config->io_base, 0x10);
- release_region(hw_config->io_base+0x10, 0x9);
-}
-
-static inline void __exit unload_pss_mpu(struct address_info *hw_config)
-{
- unload_mpu401(hw_config);
-}
-
-static inline void __exit unload_pss_mss(struct address_info *hw_config)
-{
- unload_ms_sound(hw_config);
-}
-
-
-static struct address_info cfg;
-static struct address_info cfg2;
-static struct address_info cfg_mpu;
-
-static int pss_io __initdata = -1;
-static int mss_io __initdata = -1;
-static int mss_irq __initdata = -1;
-static int mss_dma __initdata = -1;
-static int mpu_io __initdata = -1;
-static int mpu_irq __initdata = -1;
-static bool pss_no_sound = 0; /* Just configure non-sound components */
-static bool pss_keep_settings = 1; /* Keep hardware settings at module exit */
-static char *pss_firmware = "/etc/sound/pss_synth";
-
-module_param_hw(pss_io, int, ioport, 0);
-MODULE_PARM_DESC(pss_io, "Set i/o base of PSS card (probably 0x220 or 0x240)");
-module_param_hw(mss_io, int, ioport, 0);
-MODULE_PARM_DESC(mss_io, "Set WSS (audio) i/o base (0x530, 0x604, 0xE80, 0xF40, or other. Address must end in 0 or 4 and must be from 0x100 to 0xFF4)");
-module_param_hw(mss_irq, int, irq, 0);
-MODULE_PARM_DESC(mss_irq, "Set WSS (audio) IRQ (3, 5, 7, 9, 10, 11, 12)");
-module_param_hw(mss_dma, int, dma, 0);
-MODULE_PARM_DESC(mss_dma, "Set WSS (audio) DMA (0, 1, 3)");
-module_param_hw(mpu_io, int, ioport, 0);
-MODULE_PARM_DESC(mpu_io, "Set MIDI i/o base (0x330 or other. Address must be on 4 location boundaries and must be from 0x100 to 0xFFC)");
-module_param_hw(mpu_irq, int, irq, 0);
-MODULE_PARM_DESC(mpu_irq, "Set MIDI IRQ (3, 5, 7, 9, 10, 11, 12)");
-module_param_hw(pss_cdrom_port, int, ioport, 0);
-MODULE_PARM_DESC(pss_cdrom_port, "Set the PSS CDROM port i/o base (0x340 or other)");
-module_param(pss_enable_joystick, bool, 0);
-MODULE_PARM_DESC(pss_enable_joystick, "Enables the PSS joystick port (1 to enable, 0 to disable)");
-module_param(pss_no_sound, bool, 0);
-MODULE_PARM_DESC(pss_no_sound, "Configure sound compoents (0 - no, 1 - yes)");
-module_param(pss_keep_settings, bool, 0);
-MODULE_PARM_DESC(pss_keep_settings, "Keep hardware setting at driver unloading (0 - no, 1 - yes)");
-module_param(pss_firmware, charp, 0);
-MODULE_PARM_DESC(pss_firmware, "Location of the firmware file (default - /etc/sound/pss_synth)");
-module_param(pss_mixer, bool, 0);
-MODULE_PARM_DESC(pss_mixer, "Enable (1) or disable (0) PSS mixer (controlling of output volume, bass, treble, synth volume). The mixer is not available on all PSS cards.");
-MODULE_AUTHOR("Hannu Savolainen, Vladimir Michl");
-MODULE_DESCRIPTION("Module for PSS sound cards (based on AD1848, ADSP-2115 and ESC614). This module includes control of output amplifier and synth volume of the Beethoven ADSP-16 card (this may work with other PSS cards).");
-MODULE_LICENSE("GPL");
-
-
-static int fw_load = 0;
-static int pssmpu = 0, pssmss = 0;
-
-/*
- * Load a PSS sound card module
- */
-
-static int __init init_pss(void)
-{
-
- if(pss_no_sound) /* If configuring only nonsound components */
- {
- cfg.io_base = pss_io;
- if(!probe_pss(&cfg))
- return -ENODEV;
- printk(KERN_INFO "ECHO-PSS Rev. %d\n", inw(REG(PSS_ID)) & 0x00ff);
- printk(KERN_INFO "PSS: loading in no sound mode.\n");
- disable_all_emulations();
- configure_nonsound_components();
- release_region(pss_io, 0x10);
- release_region(pss_io + 0x10, 0x9);
- return 0;
- }
-
- cfg.io_base = pss_io;
-
- cfg2.io_base = mss_io;
- cfg2.irq = mss_irq;
- cfg2.dma = mss_dma;
-
- cfg_mpu.io_base = mpu_io;
- cfg_mpu.irq = mpu_irq;
-
- if (cfg.io_base == -1 || cfg2.io_base == -1 || cfg2.irq == -1 || cfg.dma == -1) {
- printk(KERN_INFO "pss: mss_io, mss_dma, mss_irq and pss_io must be set.\n");
- return -EINVAL;
- }
-
- if (!pss_synth) {
- fw_load = 1;
- pss_synthLen = mod_firmware_load(pss_firmware, (void *) &pss_synth);
- }
- if (!attach_pss(&cfg))
- return -ENODEV;
- /*
- * Attach stuff
- */
- if (probe_pss_mpu(&cfg_mpu))
- pssmpu = 1;
-
- if (probe_pss_mss(&cfg2))
- pssmss = 1;
-
- return 0;
-}
-
-static void __exit cleanup_pss(void)
-{
- if(!pss_no_sound)
- {
- if (fw_load)
- vfree(pss_synth);
- if(pssmss)
- unload_pss_mss(&cfg2);
- if(pssmpu)
- unload_pss_mpu(&cfg_mpu);
- unload_pss(&cfg);
- } else if (pss_cdrom_port != -1)
- release_region(pss_cdrom_port, 2);
-
- if(!pss_keep_settings) /* Keep hardware settings if asked */
- {
- disable_all_emulations();
- printk(KERN_INFO "Resetting PSS sound card configurations.\n");
- }
-}
-
-module_init(init_pss);
-module_exit(cleanup_pss);
-
-#ifndef MODULE
-static int __init setup_pss(char *str)
-{
- /* io, mss_io, mss_irq, mss_dma, mpu_io, mpu_irq */
- int ints[7];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- pss_io = ints[1];
- mss_io = ints[2];
- mss_irq = ints[3];
- mss_dma = ints[4];
- mpu_io = ints[5];
- mpu_irq = ints[6];
-
- return 1;
-}
-
-__setup("pss=", setup_pss);
-#endif
diff --git a/sound/oss/sb.h b/sound/oss/sb.h
deleted file mode 100644
index bb1d18709b36..000000000000
--- a/sound/oss/sb.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define DSP_RESET (devc->base + 0x6)
-#define DSP_READ (devc->base + 0xA)
-#define DSP_WRITE (devc->base + 0xC)
-#define DSP_COMMAND (devc->base + 0xC)
-#define DSP_STATUS (devc->base + 0xC)
-#define DSP_DATA_AVAIL (devc->base + 0xE)
-#define DSP_DATA_AVL16 (devc->base + 0xF)
-#define MIXER_ADDR (devc->base + 0x4)
-#define MIXER_DATA (devc->base + 0x5)
-#define OPL3_LEFT (devc->base + 0x0)
-#define OPL3_RIGHT (devc->base + 0x2)
-#define OPL3_BOTH (devc->base + 0x8)
-/* DSP Commands */
-
-#define DSP_CMD_SPKON 0xD1
-#define DSP_CMD_SPKOFF 0xD3
-#define DSP_CMD_DMAON 0xD0
-#define DSP_CMD_DMAOFF 0xD4
-
-#define IMODE_NONE 0
-#define IMODE_OUTPUT PCM_ENABLE_OUTPUT
-#define IMODE_INPUT PCM_ENABLE_INPUT
-#define IMODE_INIT 3
-#define IMODE_MIDI 4
-
-#define NORMAL_MIDI 0
-#define UART_MIDI 1
-
-
-/*
- * Device models
- */
-#define MDL_NONE 0
-#define MDL_SB1 1 /* SB1.0 or 1.5 */
-#define MDL_SB2 2 /* SB2.0 */
-#define MDL_SB201 3 /* SB2.01 */
-#define MDL_SBPRO 4 /* SB Pro */
-#define MDL_SB16 5 /* SB16/32/AWE */
-#define MDL_SBPNP 6 /* SB16/32/AWE PnP */
-#define MDL_JAZZ 10 /* Media Vision Jazz16 */
-#define MDL_SMW 11 /* Logitech SoundMan Wave (Jazz16) */
-#define MDL_ESS 12 /* ESS ES688 and ES1688 */
-#define MDL_AZTECH 13 /* Aztech Sound Galaxy family */
-#define MDL_ES1868MIDI 14 /* MIDI port of ESS1868 */
-#define MDL_AEDSP 15 /* Audio Excel DSP 16 */
-#define MDL_ESSPCI 16 /* ESS PCI card */
-#define MDL_YMPCI 17 /* Yamaha PCI sb in emulation */
-
-#define SUBMDL_ALS007 42 /* ALS-007 differs from SB16 only in mixer */
- /* register assignment */
-#define SUBMDL_ALS100 43 /* ALS-100 allows sampling rates of up */
- /* to 48kHz */
-
-/*
- * Config flags
- */
-#define SB_NO_MIDI 0x00000001
-#define SB_NO_MIXER 0x00000002
-#define SB_NO_AUDIO 0x00000004
-#define SB_NO_RECORDING 0x00000008 /* No audio recording */
-#define SB_MIDI_ONLY (SB_NO_AUDIO|SB_NO_MIXER)
-#define SB_PCI_IRQ 0x00000010 /* PCI shared IRQ */
-
-struct mixer_def {
- unsigned int regno: 8;
- unsigned int bitoffs:4;
- unsigned int nbits:4;
-};
-
-typedef struct mixer_def mixer_tab[32][2];
-typedef struct mixer_def mixer_ent;
-
-struct sb_module_options
-{
- int esstype; /* ESS chip type */
- int acer; /* Do acer notebook init? */
- int sm_games; /* Logitech soundman games? */
-};
-
-typedef struct sb_devc {
- int dev;
-
- /* Hardware parameters */
- int *osp;
- int minor, major;
- int type;
- int model, submodel;
- int caps;
-# define SBCAP_STEREO 0x00000001
-# define SBCAP_16BITS 0x00000002
-
- /* Hardware resources */
- int base;
- int irq;
- int dma8, dma16;
-
- int pcibase; /* For ESS Maestro etc */
-
- /* State variables */
- int opened;
- /* new audio fields for full duplex support */
- int fullduplex;
- int duplex;
- int speed, bits, channels;
- volatile int irq_ok;
- volatile int intr_active, irq_mode;
- /* duplicate audio fields for full duplex support */
- volatile int intr_active_16, irq_mode_16;
-
- /* Mixer fields */
- int *levels;
- mixer_tab *iomap;
- size_t iomap_sz; /* number or records in the iomap table */
- int mixer_caps, recmask, outmask, supported_devices;
- int supported_rec_devices, supported_out_devices;
- int my_mixerdev;
- int sbmixnum;
-
- /* Audio fields */
- unsigned long trg_buf;
- int trigger_bits;
- int trg_bytes;
- int trg_intrflag;
- int trg_restart;
- /* duplicate audio fields for full duplex support */
- unsigned long trg_buf_16;
- int trigger_bits_16;
- int trg_bytes_16;
- int trg_intrflag_16;
- int trg_restart_16;
-
- unsigned char tconst;
-
- /* MIDI fields */
- int my_mididev;
- int input_opened;
- int midi_broken;
- void (*midi_input_intr) (int dev, unsigned char data);
- void *midi_irq_cookie; /* IRQ cookie for the midi */
-
- spinlock_t lock;
-
- struct sb_module_options sbmo; /* Module options */
-
- } sb_devc;
-
-/*
- * PCI card types
- */
-
-#define SB_PCI_ESSMAESTRO 1 /* ESS Maestro Legacy */
-#define SB_PCI_YAMAHA 2 /* Yamaha Legacy */
-
-/*
- * Functions
- */
-
-int sb_dsp_command (sb_devc *devc, unsigned char val);
-int sb_dsp_get_byte(sb_devc * devc);
-int sb_dsp_reset (sb_devc *devc);
-void sb_setmixer (sb_devc *devc, unsigned int port, unsigned int value);
-unsigned int sb_getmixer (sb_devc *devc, unsigned int port);
-int sb_dsp_detect (struct address_info *hw_config, int pci, int pciio, struct sb_module_options *sbmo);
-int sb_dsp_init (struct address_info *hw_config, struct module *owner);
-void sb_dsp_unload(struct address_info *hw_config, int sbmpu);
-int sb_mixer_init(sb_devc *devc, struct module *owner);
-void sb_mixer_unload(sb_devc *devc);
-void sb_mixer_set_stereo (sb_devc *devc, int mode);
-void smw_mixer_init(sb_devc *devc);
-void sb_dsp_midi_init (sb_devc *devc, struct module *owner);
-void sb_audio_init (sb_devc *devc, char *name, struct module *owner);
-void sb_midi_interrupt (sb_devc *devc);
-void sb_chgmixer (sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val);
-int sb_common_mixer_set(sb_devc * devc, int dev, int left, int right);
-
-int sb_audio_open(int dev, int mode);
-void sb_audio_close(int dev);
-
-/* From sb_common.c */
-void sb_dsp_disable_midi(int port);
-int probe_sbmpu (struct address_info *hw_config, struct module *owner);
-void unload_sbmpu (struct address_info *hw_config);
-
-void unload_sb16(struct address_info *hw_info);
-void unload_sb16midi(struct address_info *hw_info);
diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
deleted file mode 100644
index dc91072f4d82..000000000000
--- a/sound/oss/sb_audio.c
+++ /dev/null
@@ -1,1097 +0,0 @@
-/*
- * sound/oss/sb_audio.c
- *
- * Audio routines for Sound Blaster compatible cards.
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- * Changes
- * Alan Cox : Formatting and clean ups
- *
- * Status
- * Mostly working. Weird uart bug causing irq storms
- *
- * Daniel J. Rodriksson: Changes to make sb16 work full duplex.
- * Maybe other 16 bit cards in this code could behave
- * the same.
- * Chris Rankin: Use spinlocks instead of CLI/STI
- */
-
-#include <linux/spinlock.h>
-
-#include "sound_config.h"
-
-#include "sb_mixer.h"
-#include "sb.h"
-
-#include "sb_ess.h"
-
-int sb_audio_open(int dev, int mode)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned long flags;
-
- if (devc == NULL)
- {
- printk(KERN_ERR "Sound Blaster: incomplete initialization.\n");
- return -ENXIO;
- }
- if (devc->caps & SB_NO_RECORDING && mode & OPEN_READ)
- {
- if (mode == OPEN_READ)
- return -EPERM;
- }
- spin_lock_irqsave(&devc->lock, flags);
- if (devc->opened)
- {
- spin_unlock_irqrestore(&devc->lock, flags);
- return -EBUSY;
- }
- if (devc->dma16 != -1 && devc->dma16 != devc->dma8 && !devc->duplex)
- {
- if (sound_open_dma(devc->dma16, "Sound Blaster 16 bit"))
- {
- spin_unlock_irqrestore(&devc->lock, flags);
- return -EBUSY;
- }
- }
- devc->opened = mode;
- spin_unlock_irqrestore(&devc->lock, flags);
-
- devc->irq_mode = IMODE_NONE;
- devc->irq_mode_16 = IMODE_NONE;
- devc->fullduplex = devc->duplex &&
- ((mode & OPEN_READ) && (mode & OPEN_WRITE));
- sb_dsp_reset(devc);
-
- /* At first glance this check isn't enough, some ESS chips might not
- * have a RECLEV. However if they don't common_mixer_set will refuse
- * cause devc->iomap has no register mapping for RECLEV
- */
- if (devc->model == MDL_ESS) ess_mixer_reload (devc, SOUND_MIXER_RECLEV);
-
- /* The ALS007 seems to require that the DSP be removed from the output */
- /* in order for recording to be activated properly. This is done by */
- /* setting the appropriate bits of the output control register 4ch to */
- /* zero. This code assumes that the output control registers are not */
- /* used anywhere else and therefore the DSP bits are *always* ON for */
- /* output and OFF for sampling. */
-
- if (devc->submodel == SUBMDL_ALS007)
- {
- if (mode & OPEN_READ)
- sb_setmixer(devc,ALS007_OUTPUT_CTRL2,
- sb_getmixer(devc,ALS007_OUTPUT_CTRL2) & 0xf9);
- else
- sb_setmixer(devc,ALS007_OUTPUT_CTRL2,
- sb_getmixer(devc,ALS007_OUTPUT_CTRL2) | 0x06);
- }
- return 0;
-}
-
-void sb_audio_close(int dev)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- /* fix things if mmap turned off fullduplex */
- if(devc->duplex
- && !devc->fullduplex
- && (devc->opened & OPEN_READ) && (devc->opened & OPEN_WRITE))
- swap(audio_devs[dev]->dmap_out, audio_devs[dev]->dmap_in);
-
- audio_devs[dev]->dmap_out->dma = devc->dma8;
- audio_devs[dev]->dmap_in->dma = ( devc->duplex ) ?
- devc->dma16 : devc->dma8;
-
- if (devc->dma16 != -1 && devc->dma16 != devc->dma8 && !devc->duplex)
- sound_close_dma(devc->dma16);
-
- /* For ALS007, turn DSP output back on if closing the device for read */
-
- if ((devc->submodel == SUBMDL_ALS007) && (devc->opened & OPEN_READ))
- {
- sb_setmixer(devc,ALS007_OUTPUT_CTRL2,
- sb_getmixer(devc,ALS007_OUTPUT_CTRL2) | 0x06);
- }
- devc->opened = 0;
-}
-
-static void sb_set_output_parms(int dev, unsigned long buf, int nr_bytes,
- int intrflag)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (!devc->fullduplex || devc->bits == AFMT_S16_LE)
- {
- devc->trg_buf = buf;
- devc->trg_bytes = nr_bytes;
- devc->trg_intrflag = intrflag;
- devc->irq_mode = IMODE_OUTPUT;
- }
- else
- {
- devc->trg_buf_16 = buf;
- devc->trg_bytes_16 = nr_bytes;
- devc->trg_intrflag_16 = intrflag;
- devc->irq_mode_16 = IMODE_OUTPUT;
- }
-}
-
-static void sb_set_input_parms(int dev, unsigned long buf, int count, int intrflag)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (!devc->fullduplex || devc->bits != AFMT_S16_LE)
- {
- devc->trg_buf = buf;
- devc->trg_bytes = count;
- devc->trg_intrflag = intrflag;
- devc->irq_mode = IMODE_INPUT;
- }
- else
- {
- devc->trg_buf_16 = buf;
- devc->trg_bytes_16 = count;
- devc->trg_intrflag_16 = intrflag;
- devc->irq_mode_16 = IMODE_INPUT;
- }
-}
-
-/*
- * SB1.x compatible routines
- */
-
-static void sb1_audio_output_block(int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- unsigned long flags;
- int count = nr_bytes;
- sb_devc *devc = audio_devs[dev]->devc;
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */
-
- if (audio_devs[dev]->dmap_out->dma > 3)
- count >>= 1;
- count--;
-
- devc->irq_mode = IMODE_OUTPUT;
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x14)) /* 8 bit DAC using DMA */
- {
- sb_dsp_command(devc, (unsigned char) (count & 0xff));
- sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff));
- }
- else
- printk(KERN_WARNING "Sound Blaster: unable to start DAC.\n");
- spin_unlock_irqrestore(&devc->lock, flags);
- devc->intr_active = 1;
-}
-
-static void sb1_audio_start_input(int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- unsigned long flags;
- int count = nr_bytes;
- sb_devc *devc = audio_devs[dev]->devc;
-
- /*
- * Start a DMA input to the buffer pointed by dmaqtail
- */
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */
-
- if (audio_devs[dev]->dmap_out->dma > 3)
- count >>= 1;
- count--;
-
- devc->irq_mode = IMODE_INPUT;
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x24)) /* 8 bit ADC using DMA */
- {
- sb_dsp_command(devc, (unsigned char) (count & 0xff));
- sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff));
- }
- else
- printk(KERN_ERR "Sound Blaster: unable to start ADC.\n");
- spin_unlock_irqrestore(&devc->lock, flags);
-
- devc->intr_active = 1;
-}
-
-static void sb1_audio_trigger(int dev, int bits)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- bits &= devc->irq_mode;
-
- if (!bits)
- sb_dsp_command(devc, 0xd0); /* Halt DMA */
- else
- {
- switch (devc->irq_mode)
- {
- case IMODE_INPUT:
- sb1_audio_start_input(dev, devc->trg_buf, devc->trg_bytes,
- devc->trg_intrflag);
- break;
-
- case IMODE_OUTPUT:
- sb1_audio_output_block(dev, devc->trg_buf, devc->trg_bytes,
- devc->trg_intrflag);
- break;
- }
- }
- devc->trigger_bits = bits;
-}
-
-static int sb1_audio_prepare_for_input(int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x40))
- sb_dsp_command(devc, devc->tconst);
- sb_dsp_command(devc, DSP_CMD_SPKOFF);
- spin_unlock_irqrestore(&devc->lock, flags);
-
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int sb1_audio_prepare_for_output(int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x40))
- sb_dsp_command(devc, devc->tconst);
- sb_dsp_command(devc, DSP_CMD_SPKON);
- spin_unlock_irqrestore(&devc->lock, flags);
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int sb1_audio_set_speed(int dev, int speed)
-{
- int max_speed = 23000;
- sb_devc *devc = audio_devs[dev]->devc;
- int tmp;
-
- if (devc->opened & OPEN_READ)
- max_speed = 13000;
-
- if (speed > 0)
- {
- if (speed < 4000)
- speed = 4000;
-
- if (speed > max_speed)
- speed = max_speed;
-
- devc->tconst = (256 - ((1000000 + speed / 2) / speed)) & 0xff;
- tmp = 256 - devc->tconst;
- speed = (1000000 + tmp / 2) / tmp;
-
- devc->speed = speed;
- }
- return devc->speed;
-}
-
-static short sb1_audio_set_channels(int dev, short channels)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- return devc->channels = 1;
-}
-
-static unsigned int sb1_audio_set_bits(int dev, unsigned int bits)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- return devc->bits = 8;
-}
-
-static void sb1_audio_halt_xfer(int dev)
-{
- unsigned long flags;
- sb_devc *devc = audio_devs[dev]->devc;
-
- spin_lock_irqsave(&devc->lock, flags);
- sb_dsp_reset(devc);
- spin_unlock_irqrestore(&devc->lock, flags);
-}
-
-/*
- * SB 2.0 and SB 2.01 compatible routines
- */
-
-static void sb20_audio_output_block(int dev, unsigned long buf, int nr_bytes,
- int intrflag)
-{
- unsigned long flags;
- int count = nr_bytes;
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned char cmd;
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */
-
- if (audio_devs[dev]->dmap_out->dma > 3)
- count >>= 1;
- count--;
-
- devc->irq_mode = IMODE_OUTPUT;
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x48)) /* DSP Block size */
- {
- sb_dsp_command(devc, (unsigned char) (count & 0xff));
- sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff));
-
- if (devc->speed * devc->channels <= 23000)
- cmd = 0x1c; /* 8 bit PCM output */
- else
- cmd = 0x90; /* 8 bit high speed PCM output (SB2.01/Pro) */
-
- if (!sb_dsp_command(devc, cmd))
- printk(KERN_ERR "Sound Blaster: unable to start DAC.\n");
- }
- else
- printk(KERN_ERR "Sound Blaster: unable to start DAC.\n");
- spin_unlock_irqrestore(&devc->lock, flags);
- devc->intr_active = 1;
-}
-
-static void sb20_audio_start_input(int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- unsigned long flags;
- int count = nr_bytes;
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned char cmd;
-
- /*
- * Start a DMA input to the buffer pointed by dmaqtail
- */
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */
-
- if (audio_devs[dev]->dmap_out->dma > 3)
- count >>= 1;
- count--;
-
- devc->irq_mode = IMODE_INPUT;
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x48)) /* DSP Block size */
- {
- sb_dsp_command(devc, (unsigned char) (count & 0xff));
- sb_dsp_command(devc, (unsigned char) ((count >> 8) & 0xff));
-
- if (devc->speed * devc->channels <= (devc->major == 3 ? 23000 : 13000))
- cmd = 0x2c; /* 8 bit PCM input */
- else
- cmd = 0x98; /* 8 bit high speed PCM input (SB2.01/Pro) */
-
- if (!sb_dsp_command(devc, cmd))
- printk(KERN_ERR "Sound Blaster: unable to start ADC.\n");
- }
- else
- printk(KERN_ERR "Sound Blaster: unable to start ADC.\n");
- spin_unlock_irqrestore(&devc->lock, flags);
- devc->intr_active = 1;
-}
-
-static void sb20_audio_trigger(int dev, int bits)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- bits &= devc->irq_mode;
-
- if (!bits)
- sb_dsp_command(devc, 0xd0); /* Halt DMA */
- else
- {
- switch (devc->irq_mode)
- {
- case IMODE_INPUT:
- sb20_audio_start_input(dev, devc->trg_buf, devc->trg_bytes,
- devc->trg_intrflag);
- break;
-
- case IMODE_OUTPUT:
- sb20_audio_output_block(dev, devc->trg_buf, devc->trg_bytes,
- devc->trg_intrflag);
- break;
- }
- }
- devc->trigger_bits = bits;
-}
-
-/*
- * SB2.01 specific speed setup
- */
-
-static int sb201_audio_set_speed(int dev, int speed)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- int tmp;
- int s;
-
- if (speed > 0)
- {
- if (speed < 4000)
- speed = 4000;
- if (speed > 44100)
- speed = 44100;
- if (devc->opened & OPEN_READ && speed > 15000)
- speed = 15000;
- s = speed * devc->channels;
- devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff;
- tmp = 256 - devc->tconst;
- speed = ((1000000 + tmp / 2) / tmp) / devc->channels;
-
- devc->speed = speed;
- }
- return devc->speed;
-}
-
-/*
- * SB Pro specific routines
- */
-
-static int sbpro_audio_prepare_for_input(int dev, int bsize, int bcount)
-{ /* For SB Pro and Jazz16 */
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned long flags;
- unsigned char bits = 0;
-
- if (devc->dma16 >= 0 && devc->dma16 != devc->dma8)
- audio_devs[dev]->dmap_out->dma = audio_devs[dev]->dmap_in->dma =
- devc->bits == 16 ? devc->dma16 : devc->dma8;
-
- if (devc->model == MDL_JAZZ || devc->model == MDL_SMW)
- if (devc->bits == AFMT_S16_LE)
- bits = 0x04; /* 16 bit mode */
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x40))
- sb_dsp_command(devc, devc->tconst);
- sb_dsp_command(devc, DSP_CMD_SPKOFF);
- if (devc->channels == 1)
- sb_dsp_command(devc, 0xa0 | bits); /* Mono input */
- else
- sb_dsp_command(devc, 0xa8 | bits); /* Stereo input */
- spin_unlock_irqrestore(&devc->lock, flags);
-
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int sbpro_audio_prepare_for_output(int dev, int bsize, int bcount)
-{ /* For SB Pro and Jazz16 */
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned long flags;
- unsigned char tmp;
- unsigned char bits = 0;
-
- if (devc->dma16 >= 0 && devc->dma16 != devc->dma8)
- audio_devs[dev]->dmap_out->dma = audio_devs[dev]->dmap_in->dma = devc->bits == 16 ? devc->dma16 : devc->dma8;
- if (devc->model == MDL_SBPRO)
- sb_mixer_set_stereo(devc, devc->channels == 2);
-
- spin_lock_irqsave(&devc->lock, flags);
- if (sb_dsp_command(devc, 0x40))
- sb_dsp_command(devc, devc->tconst);
- sb_dsp_command(devc, DSP_CMD_SPKON);
-
- if (devc->model == MDL_JAZZ || devc->model == MDL_SMW)
- {
- if (devc->bits == AFMT_S16_LE)
- bits = 0x04; /* 16 bit mode */
-
- if (devc->channels == 1)
- sb_dsp_command(devc, 0xa0 | bits); /* Mono output */
- else
- sb_dsp_command(devc, 0xa8 | bits); /* Stereo output */
- spin_unlock_irqrestore(&devc->lock, flags);
- }
- else
- {
- spin_unlock_irqrestore(&devc->lock, flags);
- tmp = sb_getmixer(devc, 0x0e);
- if (devc->channels == 1)
- tmp &= ~0x02;
- else
- tmp |= 0x02;
- sb_setmixer(devc, 0x0e, tmp);
- }
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int sbpro_audio_set_speed(int dev, int speed)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (speed > 0)
- {
- if (speed < 4000)
- speed = 4000;
- if (speed > 44100)
- speed = 44100;
- if (devc->channels > 1 && speed > 22050)
- speed = 22050;
- sb201_audio_set_speed(dev, speed);
- }
- return devc->speed;
-}
-
-static short sbpro_audio_set_channels(int dev, short channels)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (channels == 1 || channels == 2)
- {
- if (channels != devc->channels)
- {
- devc->channels = channels;
- if (devc->model == MDL_SBPRO && devc->channels == 2)
- sbpro_audio_set_speed(dev, devc->speed);
- }
- }
- return devc->channels;
-}
-
-static int jazz16_audio_set_speed(int dev, int speed)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (speed > 0)
- {
- int tmp;
- int s;
-
- if (speed < 5000)
- speed = 5000;
- if (speed > 44100)
- speed = 44100;
-
- s = speed * devc->channels;
-
- devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff;
-
- tmp = 256 - devc->tconst;
- speed = ((1000000 + tmp / 2) / tmp) / devc->channels;
-
- devc->speed = speed;
- }
- return devc->speed;
-}
-
-/*
- * SB16 specific routines
- */
-
-static int sb16_audio_set_speed(int dev, int speed)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- int max_speed = devc->submodel == SUBMDL_ALS100 ? 48000 : 44100;
-
- if (speed > 0)
- {
- if (speed < 5000)
- speed = 5000;
-
- if (speed > max_speed)
- speed = max_speed;
-
- devc->speed = speed;
- }
- return devc->speed;
-}
-
-static unsigned int sb16_audio_set_bits(int dev, unsigned int bits)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (bits != 0)
- {
- if (bits == AFMT_U8 || bits == AFMT_S16_LE)
- devc->bits = bits;
- else
- devc->bits = AFMT_U8;
- }
-
- return devc->bits;
-}
-
-static int sb16_audio_prepare_for_input(int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (!devc->fullduplex)
- {
- audio_devs[dev]->dmap_out->dma =
- audio_devs[dev]->dmap_in->dma =
- devc->bits == AFMT_S16_LE ?
- devc->dma16 : devc->dma8;
- }
- else if (devc->bits == AFMT_S16_LE)
- {
- audio_devs[dev]->dmap_out->dma = devc->dma8;
- audio_devs[dev]->dmap_in->dma = devc->dma16;
- }
- else
- {
- audio_devs[dev]->dmap_out->dma = devc->dma16;
- audio_devs[dev]->dmap_in->dma = devc->dma8;
- }
-
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int sb16_audio_prepare_for_output(int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (!devc->fullduplex)
- {
- audio_devs[dev]->dmap_out->dma =
- audio_devs[dev]->dmap_in->dma =
- devc->bits == AFMT_S16_LE ?
- devc->dma16 : devc->dma8;
- }
- else if (devc->bits == AFMT_S16_LE)
- {
- audio_devs[dev]->dmap_out->dma = devc->dma8;
- audio_devs[dev]->dmap_in->dma = devc->dma16;
- }
- else
- {
- audio_devs[dev]->dmap_out->dma = devc->dma16;
- audio_devs[dev]->dmap_in->dma = devc->dma8;
- }
-
- devc->trigger_bits = 0;
- return 0;
-}
-
-static void sb16_audio_output_block(int dev, unsigned long buf, int count,
- int intrflag)
-{
- unsigned long flags, cnt;
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned long bits;
-
- if (!devc->fullduplex || devc->bits == AFMT_S16_LE)
- {
- devc->irq_mode = IMODE_OUTPUT;
- devc->intr_active = 1;
- }
- else
- {
- devc->irq_mode_16 = IMODE_OUTPUT;
- devc->intr_active_16 = 1;
- }
-
- /* save value */
- spin_lock_irqsave(&devc->lock, flags);
- bits = devc->bits;
- if (devc->fullduplex)
- devc->bits = (devc->bits == AFMT_S16_LE) ?
- AFMT_U8 : AFMT_S16_LE;
- spin_unlock_irqrestore(&devc->lock, flags);
-
- cnt = count;
- if (devc->bits == AFMT_S16_LE)
- cnt >>= 1;
- cnt--;
-
- spin_lock_irqsave(&devc->lock, flags);
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_WRITE); */
-
- sb_dsp_command(devc, 0x41);
- sb_dsp_command(devc, (unsigned char) ((devc->speed >> 8) & 0xff));
- sb_dsp_command(devc, (unsigned char) (devc->speed & 0xff));
-
- sb_dsp_command(devc, (devc->bits == AFMT_S16_LE ? 0xb6 : 0xc6));
- sb_dsp_command(devc, ((devc->channels == 2 ? 0x20 : 0) +
- (devc->bits == AFMT_S16_LE ? 0x10 : 0)));
- sb_dsp_command(devc, (unsigned char) (cnt & 0xff));
- sb_dsp_command(devc, (unsigned char) (cnt >> 8));
-
- /* restore real value after all programming */
- devc->bits = bits;
- spin_unlock_irqrestore(&devc->lock, flags);
-}
-
-
-/*
- * This fails on the Cyrix MediaGX. If you don't have the DMA enabled
- * before the first sample arrives it locks up. However even if you
- * do enable the DMA in time you just get DMA timeouts and missing
- * interrupts and stuff, so for now I've not bothered fixing this either.
- */
-
-static void sb16_audio_start_input(int dev, unsigned long buf, int count, int intrflag)
-{
- unsigned long flags, cnt;
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (!devc->fullduplex || devc->bits != AFMT_S16_LE)
- {
- devc->irq_mode = IMODE_INPUT;
- devc->intr_active = 1;
- }
- else
- {
- devc->irq_mode_16 = IMODE_INPUT;
- devc->intr_active_16 = 1;
- }
-
- cnt = count;
- if (devc->bits == AFMT_S16_LE)
- cnt >>= 1;
- cnt--;
-
- spin_lock_irqsave(&devc->lock, flags);
-
- /* DMAbuf_start_dma (dev, buf, count, DMA_MODE_READ); */
-
- sb_dsp_command(devc, 0x42);
- sb_dsp_command(devc, (unsigned char) ((devc->speed >> 8) & 0xff));
- sb_dsp_command(devc, (unsigned char) (devc->speed & 0xff));
-
- sb_dsp_command(devc, (devc->bits == AFMT_S16_LE ? 0xbe : 0xce));
- sb_dsp_command(devc, ((devc->channels == 2 ? 0x20 : 0) +
- (devc->bits == AFMT_S16_LE ? 0x10 : 0)));
- sb_dsp_command(devc, (unsigned char) (cnt & 0xff));
- sb_dsp_command(devc, (unsigned char) (cnt >> 8));
-
- spin_unlock_irqrestore(&devc->lock, flags);
-}
-
-static void sb16_audio_trigger(int dev, int bits)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- int bits_16 = bits & devc->irq_mode_16;
- bits &= devc->irq_mode;
-
- if (!bits && !bits_16)
- sb_dsp_command(devc, 0xd0); /* Halt DMA */
- else
- {
- if (bits)
- {
- switch (devc->irq_mode)
- {
- case IMODE_INPUT:
- sb16_audio_start_input(dev,
- devc->trg_buf,
- devc->trg_bytes,
- devc->trg_intrflag);
- break;
-
- case IMODE_OUTPUT:
- sb16_audio_output_block(dev,
- devc->trg_buf,
- devc->trg_bytes,
- devc->trg_intrflag);
- break;
- }
- }
- if (bits_16)
- {
- switch (devc->irq_mode_16)
- {
- case IMODE_INPUT:
- sb16_audio_start_input(dev,
- devc->trg_buf_16,
- devc->trg_bytes_16,
- devc->trg_intrflag_16);
- break;
-
- case IMODE_OUTPUT:
- sb16_audio_output_block(dev,
- devc->trg_buf_16,
- devc->trg_bytes_16,
- devc->trg_intrflag_16);
- break;
- }
- }
- }
-
- devc->trigger_bits = bits | bits_16;
-}
-
-static unsigned char lbuf8[2048];
-static signed short *lbuf16 = (signed short *)lbuf8;
-#define LBUFCOPYSIZE 1024
-static void
-sb16_copy_from_user(int dev,
- char *localbuf, int localoffs,
- const char __user *userbuf, int useroffs,
- int max_in, int max_out,
- int *used, int *returned,
- int len)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- int i, c, p, locallen;
- unsigned char *buf8;
- signed short *buf16;
-
- /* if not duplex no conversion */
- if (!devc->fullduplex)
- {
- if (copy_from_user(localbuf + localoffs,
- userbuf + useroffs, len))
- return;
- *used = len;
- *returned = len;
- }
- else if (devc->bits == AFMT_S16_LE)
- {
- /* 16 -> 8 */
- /* max_in >> 1, max number of samples in ( 16 bits ) */
- /* max_out, max number of samples out ( 8 bits ) */
- /* len, number of samples that will be taken ( 16 bits )*/
- /* c, count of samples remaining in buffer ( 16 bits )*/
- /* p, count of samples already processed ( 16 bits )*/
- len = ( (max_in >> 1) > max_out) ? max_out : (max_in >> 1);
- c = len;
- p = 0;
- buf8 = (unsigned char *)(localbuf + localoffs);
- while (c)
- {
- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
- /* << 1 in order to get 16 bit samples */
- if (copy_from_user(lbuf16,
- userbuf + useroffs + (p << 1),
- locallen << 1))
- return;
- for (i = 0; i < locallen; i++)
- {
- buf8[p+i] = ~((lbuf16[i] >> 8) & 0xff) ^ 0x80;
- }
- c -= locallen; p += locallen;
- }
- /* used = ( samples * 16 bits size ) */
- *used = max_in > ( max_out << 1) ? (max_out << 1) : max_in;
- /* returned = ( samples * 8 bits size ) */
- *returned = len;
- }
- else
- {
- /* 8 -> 16 */
- /* max_in, max number of samples in ( 8 bits ) */
- /* max_out >> 1, max number of samples out ( 16 bits ) */
- /* len, number of samples that will be taken ( 8 bits )*/
- /* c, count of samples remaining in buffer ( 8 bits )*/
- /* p, count of samples already processed ( 8 bits )*/
- len = max_in > (max_out >> 1) ? (max_out >> 1) : max_in;
- c = len;
- p = 0;
- buf16 = (signed short *)(localbuf + localoffs);
- while (c)
- {
- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
- if (copy_from_user(lbuf8,
- userbuf+useroffs + p,
- locallen))
- return;
- for (i = 0; i < locallen; i++)
- {
- buf16[p+i] = (~lbuf8[i] ^ 0x80) << 8;
- }
- c -= locallen; p += locallen;
- }
- /* used = ( samples * 8 bits size ) */
- *used = len;
- /* returned = ( samples * 16 bits size ) */
- *returned = len << 1;
- }
-}
-
-static void
-sb16_audio_mmap(int dev)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- devc->fullduplex = 0;
-}
-
-static struct audio_driver sb1_audio_driver = /* SB1.x */
-{
- .owner = THIS_MODULE,
- .open = sb_audio_open,
- .close = sb_audio_close,
- .output_block = sb_set_output_parms,
- .start_input = sb_set_input_parms,
- .prepare_for_input = sb1_audio_prepare_for_input,
- .prepare_for_output = sb1_audio_prepare_for_output,
- .halt_io = sb1_audio_halt_xfer,
- .trigger = sb1_audio_trigger,
- .set_speed = sb1_audio_set_speed,
- .set_bits = sb1_audio_set_bits,
- .set_channels = sb1_audio_set_channels
-};
-
-static struct audio_driver sb20_audio_driver = /* SB2.0 */
-{
- .owner = THIS_MODULE,
- .open = sb_audio_open,
- .close = sb_audio_close,
- .output_block = sb_set_output_parms,
- .start_input = sb_set_input_parms,
- .prepare_for_input = sb1_audio_prepare_for_input,
- .prepare_for_output = sb1_audio_prepare_for_output,
- .halt_io = sb1_audio_halt_xfer,
- .trigger = sb20_audio_trigger,
- .set_speed = sb1_audio_set_speed,
- .set_bits = sb1_audio_set_bits,
- .set_channels = sb1_audio_set_channels
-};
-
-static struct audio_driver sb201_audio_driver = /* SB2.01 */
-{
- .owner = THIS_MODULE,
- .open = sb_audio_open,
- .close = sb_audio_close,
- .output_block = sb_set_output_parms,
- .start_input = sb_set_input_parms,
- .prepare_for_input = sb1_audio_prepare_for_input,
- .prepare_for_output = sb1_audio_prepare_for_output,
- .halt_io = sb1_audio_halt_xfer,
- .trigger = sb20_audio_trigger,
- .set_speed = sb201_audio_set_speed,
- .set_bits = sb1_audio_set_bits,
- .set_channels = sb1_audio_set_channels
-};
-
-static struct audio_driver sbpro_audio_driver = /* SB Pro */
-{
- .owner = THIS_MODULE,
- .open = sb_audio_open,
- .close = sb_audio_close,
- .output_block = sb_set_output_parms,
- .start_input = sb_set_input_parms,
- .prepare_for_input = sbpro_audio_prepare_for_input,
- .prepare_for_output = sbpro_audio_prepare_for_output,
- .halt_io = sb1_audio_halt_xfer,
- .trigger = sb20_audio_trigger,
- .set_speed = sbpro_audio_set_speed,
- .set_bits = sb1_audio_set_bits,
- .set_channels = sbpro_audio_set_channels
-};
-
-static struct audio_driver jazz16_audio_driver = /* Jazz16 and SM Wave */
-{
- .owner = THIS_MODULE,
- .open = sb_audio_open,
- .close = sb_audio_close,
- .output_block = sb_set_output_parms,
- .start_input = sb_set_input_parms,
- .prepare_for_input = sbpro_audio_prepare_for_input,
- .prepare_for_output = sbpro_audio_prepare_for_output,
- .halt_io = sb1_audio_halt_xfer,
- .trigger = sb20_audio_trigger,
- .set_speed = jazz16_audio_set_speed,
- .set_bits = sb16_audio_set_bits,
- .set_channels = sbpro_audio_set_channels
-};
-
-static struct audio_driver sb16_audio_driver = /* SB16 */
-{
- .owner = THIS_MODULE,
- .open = sb_audio_open,
- .close = sb_audio_close,
- .output_block = sb_set_output_parms,
- .start_input = sb_set_input_parms,
- .prepare_for_input = sb16_audio_prepare_for_input,
- .prepare_for_output = sb16_audio_prepare_for_output,
- .halt_io = sb1_audio_halt_xfer,
- .copy_user = sb16_copy_from_user,
- .trigger = sb16_audio_trigger,
- .set_speed = sb16_audio_set_speed,
- .set_bits = sb16_audio_set_bits,
- .set_channels = sbpro_audio_set_channels,
- .mmap = sb16_audio_mmap
-};
-
-void sb_audio_init(sb_devc * devc, char *name, struct module *owner)
-{
- int audio_flags = 0;
- int format_mask = AFMT_U8;
-
- struct audio_driver *driver = &sb1_audio_driver;
-
- switch (devc->model)
- {
- case MDL_SB1: /* SB1.0 or SB 1.5 */
- DDB(printk("Will use standard SB1.x driver\n"));
- audio_flags = DMA_HARDSTOP;
- break;
-
- case MDL_SB2:
- DDB(printk("Will use SB2.0 driver\n"));
- audio_flags = DMA_AUTOMODE;
- driver = &sb20_audio_driver;
- break;
-
- case MDL_SB201:
- DDB(printk("Will use SB2.01 (high speed) driver\n"));
- audio_flags = DMA_AUTOMODE;
- driver = &sb201_audio_driver;
- break;
-
- case MDL_JAZZ:
- case MDL_SMW:
- DDB(printk("Will use Jazz16 driver\n"));
- audio_flags = DMA_AUTOMODE;
- format_mask |= AFMT_S16_LE;
- driver = &jazz16_audio_driver;
- break;
-
- case MDL_ESS:
- DDB(printk("Will use ESS ES688/1688 driver\n"));
- driver = ess_audio_init (devc, &audio_flags, &format_mask);
- break;
-
- case MDL_SB16:
- DDB(printk("Will use SB16 driver\n"));
- audio_flags = DMA_AUTOMODE;
- format_mask |= AFMT_S16_LE;
- if (devc->dma8 != devc->dma16 && devc->dma16 != -1)
- {
- audio_flags |= DMA_DUPLEX;
- devc->duplex = 1;
- }
- driver = &sb16_audio_driver;
- break;
-
- default:
- DDB(printk("Will use SB Pro driver\n"));
- audio_flags = DMA_AUTOMODE;
- driver = &sbpro_audio_driver;
- }
-
- if (owner)
- driver->owner = owner;
-
- if ((devc->dev = sound_install_audiodrv(AUDIO_DRIVER_VERSION,
- name,driver, sizeof(struct audio_driver),
- audio_flags, format_mask, devc,
- devc->dma8,
- devc->duplex ? devc->dma16 : devc->dma8)) < 0)
- {
- printk(KERN_ERR "Sound Blaster: unable to install audio.\n");
- return;
- }
- audio_devs[devc->dev]->mixer_dev = devc->my_mixerdev;
- audio_devs[devc->dev]->min_fragment = 5;
-}
diff --git a/sound/oss/sb_card.c b/sound/oss/sb_card.c
deleted file mode 100644
index 2a92cfe6cfe9..000000000000
--- a/sound/oss/sb_card.c
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * sound/oss/sb_card.c
- *
- * Detection routine for the ISA Sound Blaster and compatible sound
- * cards.
- *
- * This file is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this
- * software for more info.
- *
- * This is a complete rewrite of the detection routines. This was
- * prompted by the PnP API change during v2.5 and the ugly state the
- * code was in.
- *
- * Copyright (C) by Paul Laufer 2002. Based on code originally by
- * Hannu Savolainen which was modified by many others over the
- * years. Authors specifically mentioned in the previous version were:
- * Daniel Stone, Alessandro Zummo, Jeff Garzik, Arnaldo Carvalho de
- * Melo, Daniel Church, and myself.
- *
- * 02-05-2003 Original Release, Paul Laufer <paul@laufernet.com>
- * 02-07-2003 Bug made it into first release. Take two.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include "sound_config.h"
-#include "sb_mixer.h"
-#include "sb.h"
-#ifdef CONFIG_PNP
-#include <linux/pnp.h>
-#endif /* CONFIG_PNP */
-#include "sb_card.h"
-
-MODULE_DESCRIPTION("OSS Soundblaster ISA PnP and legacy sound driver");
-MODULE_LICENSE("GPL");
-
-extern void *smw_free;
-
-static int __initdata mpu_io = 0;
-static int __initdata io = -1;
-static int __initdata irq = -1;
-static int __initdata dma = -1;
-static int __initdata dma16 = -1;
-static int __initdata type = 0; /* Can set this to a specific card type */
-static int __initdata esstype = 0; /* ESS chip type */
-static int __initdata acer = 0; /* Do acer notebook init? */
-static int __initdata sm_games = 0; /* Logitech soundman games? */
-
-static struct sb_card_config *legacy = NULL;
-
-#ifdef CONFIG_PNP
-static int pnp_registered;
-static int __initdata pnp = 1;
-/*
-static int __initdata uart401 = 0;
-*/
-#else
-static int __initdata pnp = 0;
-#endif
-
-module_param_hw(io, int, ioport, 000);
-MODULE_PARM_DESC(io, "Soundblaster i/o base address (0x220,0x240,0x260,0x280)");
-module_param_hw(irq, int, irq, 000);
-MODULE_PARM_DESC(irq, "IRQ (5,7,9,10)");
-module_param_hw(dma, int, dma, 000);
-MODULE_PARM_DESC(dma, "8-bit DMA channel (0,1,3)");
-module_param_hw(dma16, int, dma, 000);
-MODULE_PARM_DESC(dma16, "16-bit DMA channel (5,6,7)");
-module_param_hw(mpu_io, int, ioport, 000);
-MODULE_PARM_DESC(mpu_io, "MPU base address");
-module_param(type, int, 000);
-MODULE_PARM_DESC(type, "You can set this to specific card type (doesn't " \
- "work with pnp)");
-module_param(sm_games, int, 000);
-MODULE_PARM_DESC(sm_games, "Enable support for Logitech soundman games " \
- "(doesn't work with pnp)");
-module_param(esstype, int, 000);
-MODULE_PARM_DESC(esstype, "ESS chip type (doesn't work with pnp)");
-module_param(acer, int, 000);
-MODULE_PARM_DESC(acer, "Set this to detect cards in some ACER notebooks "\
- "(doesn't work with pnp)");
-
-#ifdef CONFIG_PNP
-module_param(pnp, int, 000);
-MODULE_PARM_DESC(pnp, "Went set to 0 will disable detection using PnP. "\
- "Default is 1.\n");
-/* Not done yet.... */
-/*
-module_param(uart401, int, 000);
-MODULE_PARM_DESC(uart401, "When set to 1, will attempt to detect and enable"\
- "the mpu on some clones");
-*/
-#endif /* CONFIG_PNP */
-
-/* OSS subsystem card registration shared by PnP and legacy routines */
-static int sb_register_oss(struct sb_card_config *scc, struct sb_module_options *sbmo)
-{
- if (!request_region(scc->conf.io_base, 16, "soundblaster")) {
- printk(KERN_ERR "sb: ports busy.\n");
- kfree(scc);
- return -EBUSY;
- }
-
- if (!sb_dsp_detect(&scc->conf, 0, 0, sbmo)) {
- release_region(scc->conf.io_base, 16);
- printk(KERN_ERR "sb: Failed DSP Detect.\n");
- kfree(scc);
- return -ENODEV;
- }
- if(!sb_dsp_init(&scc->conf, THIS_MODULE)) {
- printk(KERN_ERR "sb: Failed DSP init.\n");
- kfree(scc);
- return -ENODEV;
- }
- if(scc->mpucnf.io_base > 0) {
- scc->mpu = 1;
- printk(KERN_INFO "sb: Turning on MPU\n");
- if(!probe_sbmpu(&scc->mpucnf, THIS_MODULE))
- scc->mpu = 0;
- }
-
- return 1;
-}
-
-static void sb_unload(struct sb_card_config *scc)
-{
- sb_dsp_unload(&scc->conf, 0);
- if(scc->mpu)
- unload_sbmpu(&scc->mpucnf);
- kfree(scc);
-}
-
-/* Register legacy card with OSS subsystem */
-static int __init sb_init_legacy(void)
-{
- struct sb_module_options sbmo = {0};
-
- if((legacy = kzalloc(sizeof(struct sb_card_config), GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "sb: Error: Could not allocate memory\n");
- return -ENOMEM;
- }
-
- legacy->conf.io_base = io;
- legacy->conf.irq = irq;
- legacy->conf.dma = dma;
- legacy->conf.dma2 = dma16;
- legacy->conf.card_subtype = type;
-
- legacy->mpucnf.io_base = mpu_io;
- legacy->mpucnf.irq = -1;
- legacy->mpucnf.dma = -1;
- legacy->mpucnf.dma2 = -1;
-
- sbmo.esstype = esstype;
- sbmo.sm_games = sm_games;
- sbmo.acer = acer;
-
- return sb_register_oss(legacy, &sbmo);
-}
-
-#ifdef CONFIG_PNP
-
-/* Populate the OSS subsystem structures with information from PnP */
-static void sb_dev2cfg(struct pnp_dev *dev, struct sb_card_config *scc)
-{
- scc->conf.io_base = -1;
- scc->conf.irq = -1;
- scc->conf.dma = -1;
- scc->conf.dma2 = -1;
- scc->mpucnf.io_base = -1;
- scc->mpucnf.irq = -1;
- scc->mpucnf.dma = -1;
- scc->mpucnf.dma2 = -1;
-
- /* All clones layout their PnP tables differently and some use
- different logical devices for the MPU */
- if(!strncmp("CTL",scc->card_id,3)) {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,0);
- scc->conf.dma2 = pnp_dma(dev,1);
- scc->mpucnf.io_base = pnp_port_start(dev,1);
- return;
- }
- if(!strncmp("tBA",scc->card_id,3)) {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,0);
- scc->conf.dma2 = pnp_dma(dev,1);
- return;
- }
- if(!strncmp("ESS",scc->card_id,3)) {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,0);
- scc->conf.dma2 = pnp_dma(dev,1);
- scc->mpucnf.io_base = pnp_port_start(dev,2);
- return;
- }
- if(!strncmp("CMI",scc->card_id,3)) {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,0);
- scc->conf.dma2 = pnp_dma(dev,1);
- return;
- }
- if(!strncmp("RWB",scc->card_id,3)) {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,0);
- return;
- }
- if(!strncmp("ALS",scc->card_id,3)) {
- if(!strncmp("ALS0007",scc->card_id,7)) {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,0);
- } else {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,1);
- scc->conf.dma2 = pnp_dma(dev,0);
- }
- return;
- }
- if(!strncmp("RTL",scc->card_id,3)) {
- scc->conf.io_base = pnp_port_start(dev,0);
- scc->conf.irq = pnp_irq(dev,0);
- scc->conf.dma = pnp_dma(dev,1);
- scc->conf.dma2 = pnp_dma(dev,0);
- }
-}
-
-static unsigned int sb_pnp_devices;
-
-/* Probe callback function for the PnP API */
-static int sb_pnp_probe(struct pnp_card_link *card, const struct pnp_card_device_id *card_id)
-{
- struct sb_card_config *scc;
- struct sb_module_options sbmo = {0}; /* Default to 0 for PnP */
- struct pnp_dev *dev = pnp_request_card_device(card, card_id->devs[0].id, NULL);
-
- if(!dev){
- return -EBUSY;
- }
-
- if((scc = kzalloc(sizeof(struct sb_card_config), GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "sb: Error: Could not allocate memory\n");
- return -ENOMEM;
- }
-
- printk(KERN_INFO "sb: PnP: Found Card Named = \"%s\", Card PnP id = " \
- "%s, Device PnP id = %s\n", card->card->name, card_id->id,
- dev->id->id);
-
- scc->card_id = card_id->id;
- scc->dev_id = dev->id->id;
- sb_dev2cfg(dev, scc);
-
- printk(KERN_INFO "sb: PnP: Detected at: io=0x%x, irq=%d, " \
- "dma=%d, dma16=%d\n", scc->conf.io_base, scc->conf.irq,
- scc->conf.dma, scc->conf.dma2);
-
- pnp_set_card_drvdata(card, scc);
- sb_pnp_devices++;
-
- return sb_register_oss(scc, &sbmo);
-}
-
-static void sb_pnp_remove(struct pnp_card_link *card)
-{
- struct sb_card_config *scc = pnp_get_card_drvdata(card);
-
- if(!scc)
- return;
-
- printk(KERN_INFO "sb: PnP: Removing %s\n", scc->card_id);
-
- sb_unload(scc);
-}
-
-static struct pnp_card_driver sb_pnp_driver = {
- .name = "OSS SndBlstr", /* 16 character limit */
- .id_table = sb_pnp_card_table,
- .probe = sb_pnp_probe,
- .remove = sb_pnp_remove,
-};
-MODULE_DEVICE_TABLE(pnp_card, sb_pnp_card_table);
-#endif /* CONFIG_PNP */
-
-static void sb_unregister_all(void)
-{
-#ifdef CONFIG_PNP
- if (pnp_registered)
- pnp_unregister_card_driver(&sb_pnp_driver);
-#endif
-}
-
-static int __init sb_init(void)
-{
- int lres = 0;
- int pres = 0;
-
- printk(KERN_INFO "sb: Init: Starting Probe...\n");
-
- if(io != -1 && irq != -1 && dma != -1) {
- printk(KERN_INFO "sb: Probing legacy card with io=%x, "\
- "irq=%d, dma=%d, dma16=%d\n",io, irq, dma, dma16);
- lres = sb_init_legacy();
- } else if((io != -1 || irq != -1 || dma != -1) ||
- (!pnp && (io == -1 && irq == -1 && dma == -1)))
- printk(KERN_ERR "sb: Error: At least io, irq, and dma "\
- "must be set for legacy cards.\n");
-
-#ifdef CONFIG_PNP
- if(pnp) {
- int err = pnp_register_card_driver(&sb_pnp_driver);
- if (!err)
- pnp_registered = 1;
- pres = sb_pnp_devices;
- }
-#endif
- printk(KERN_INFO "sb: Init: Done\n");
-
- /* If either PnP or Legacy registered a card then return
- * success */
- if (pres == 0 && lres <= 0) {
- sb_unregister_all();
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit sb_exit(void)
-{
- printk(KERN_INFO "sb: Unloading...\n");
-
- /* Unload legacy card */
- if (legacy) {
- printk (KERN_INFO "sb: Unloading legacy card\n");
- sb_unload(legacy);
- }
-
- sb_unregister_all();
-
- vfree(smw_free);
- smw_free = NULL;
-}
-
-module_init(sb_init);
-module_exit(sb_exit);
diff --git a/sound/oss/sb_card.h b/sound/oss/sb_card.h
deleted file mode 100644
index 5535cff800df..000000000000
--- a/sound/oss/sb_card.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * sound/oss/sb_card.h
- *
- * This file is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this
- * software for more info.
- *
- * 02-05-2002 Original Release, Paul Laufer <paul@laufernet.com>
- */
-
-struct sb_card_config {
- struct address_info conf;
- struct address_info mpucnf;
- const char *card_id;
- const char *dev_id;
- int mpu;
-};
-
-#ifdef CONFIG_PNP
-
-/*
- * SoundBlaster PnP tables and structures.
- */
-
-/* Card PnP ID Table */
-static struct pnp_card_device_id sb_pnp_card_table[] = {
- /* Sound Blaster 16 */
- {.id = "CTL0024", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL0025", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL0026", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL0027", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL0028", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL0029", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL002a", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL002b", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL002c", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL00ed", .driver_data = 0, .devs = { {.id="CTL0041"}, } },
- /* Sound Blaster 16 */
- {.id = "CTL0086", .driver_data = 0, .devs = { {.id="CTL0041"}, } },
- /* Sound Blaster Vibra16S */
- {.id = "CTL0051", .driver_data = 0, .devs = { {.id="CTL0001"}, } },
- /* Sound Blaster Vibra16C */
- {.id = "CTL0070", .driver_data = 0, .devs = { {.id="CTL0001"}, } },
- /* Sound Blaster Vibra16CL */
- {.id = "CTL0080", .driver_data = 0, .devs = { {.id="CTL0041"}, } },
- /* Sound Blaster Vibra16CL */
- {.id = "CTL00F0", .driver_data = 0, .devs = { {.id="CTL0043"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0039", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0042", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0043", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0044", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0045", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0046", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0047", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0048", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL0054", .driver_data = 0, .devs = { {.id="CTL0031"}, } },
- /* Sound Blaster AWE 32 */
- {.id = "CTL009C", .driver_data = 0, .devs = { {.id="CTL0041"}, } },
- /* Createive SB32 PnP */
- {.id = "CTL009F", .driver_data = 0, .devs = { {.id="CTL0041"}, } },
- /* Sound Blaster AWE 64 */
- {.id = "CTL009D", .driver_data = 0, .devs = { {.id="CTL0042"}, } },
- /* Sound Blaster AWE 64 Gold */
- {.id = "CTL009E", .driver_data = 0, .devs = { {.id="CTL0044"}, } },
- /* Sound Blaster AWE 64 Gold */
- {.id = "CTL00B2", .driver_data = 0, .devs = { {.id="CTL0044"}, } },
- /* Sound Blaster AWE 64 */
- {.id = "CTL00C1", .driver_data = 0, .devs = { {.id="CTL0042"}, } },
- /* Sound Blaster AWE 64 */
- {.id = "CTL00C3", .driver_data = 0, .devs = { {.id="CTL0045"}, } },
- /* Sound Blaster AWE 64 */
- {.id = "CTL00C5", .driver_data = 0, .devs = { {.id="CTL0045"}, } },
- /* Sound Blaster AWE 64 */
- {.id = "CTL00C7", .driver_data = 0, .devs = { {.id="CTL0045"}, } },
- /* Sound Blaster AWE 64 */
- {.id = "CTL00E4", .driver_data = 0, .devs = { {.id="CTL0045"}, } },
- /* Sound Blaster AWE 64 */
- {.id = "CTL00E9", .driver_data = 0, .devs = { {.id="CTL0045"}, } },
- /* ESS 1868 */
- {.id = "ESS0968", .driver_data = 0, .devs = { {.id="ESS0968"}, } },
- /* ESS 1868 */
- {.id = "ESS1868", .driver_data = 0, .devs = { {.id="ESS1868"}, } },
- /* ESS 1868 */
- {.id = "ESS1868", .driver_data = 0, .devs = { {.id="ESS8611"}, } },
- /* ESS 1869 PnP AudioDrive */
- {.id = "ESS0003", .driver_data = 0, .devs = { {.id="ESS1869"}, } },
- /* ESS 1869 */
- {.id = "ESS1869", .driver_data = 0, .devs = { {.id="ESS1869"}, } },
- /* ESS 1878 */
- {.id = "ESS1878", .driver_data = 0, .devs = { {.id="ESS1878"}, } },
- /* ESS 1879 */
- {.id = "ESS1879", .driver_data = 0, .devs = { {.id="ESS1879"}, } },
- /* CMI 8330 SoundPRO */
- {.id = "CMI0001", .driver_data = 0, .devs = { {.id="@X@0001"},
- {.id="@H@0001"},
- {.id="@@@0001"}, } },
- /* Diamond DT0197H */
- {.id = "RWR1688", .driver_data = 0, .devs = { {.id="@@@0001"},
- {.id="@X@0001"},
- {.id="@H@0001"}, } },
- /* ALS007 */
- {.id = "ALS0007", .driver_data = 0, .devs = { {.id="@@@0001"},
- {.id="@X@0001"},
- {.id="@H@0001"}, } },
- /* ALS100 */
- {.id = "ALS0001", .driver_data = 0, .devs = { {.id="@@@0001"},
- {.id="@X@0001"},
- {.id="@H@0001"}, } },
- /* ALS110 */
- {.id = "ALS0110", .driver_data = 0, .devs = { {.id="@@@1001"},
- {.id="@X@1001"},
- {.id="@H@0001"}, } },
- /* ALS120 */
- {.id = "ALS0120", .driver_data = 0, .devs = { {.id="@@@2001"},
- {.id="@X@2001"},
- {.id="@H@0001"}, } },
- /* ALS200 */
- {.id = "ALS0200", .driver_data = 0, .devs = { {.id="@@@0020"},
- {.id="@X@0030"},
- {.id="@H@0001"}, } },
- /* ALS200 */
- {.id = "RTL3000", .driver_data = 0, .devs = { {.id="@@@2001"},
- {.id="@X@2001"},
- {.id="@H@0001"}, } },
- /* Sound Blaster 16 (Virtual PC 2004) */
- {.id = "tBA03b0", .driver_data = 0, .devs = { {.id="PNPb003"}, } },
- /* -end- */
- {.id = "", }
-};
-
-#endif
diff --git a/sound/oss/sb_common.c b/sound/oss/sb_common.c
deleted file mode 100644
index 3d50fb4236ed..000000000000
--- a/sound/oss/sb_common.c
+++ /dev/null
@@ -1,1287 +0,0 @@
-/*
- * sound/oss/sb_common.c
- *
- * Common routines for Sound Blaster compatible cards.
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Daniel J. Rodriksson: Modified sbintr to handle 8 and 16 bit interrupts
- * for full duplex support ( only sb16 by now )
- * Rolf Fokkens: Added (BETA?) support for ES1887 chips.
- * (fokkensr@vertis.nl) Which means: You can adjust the recording levels.
- *
- * 2000/01/18 - separated sb_card and sb_common -
- * Jeff Garzik <jgarzik@pobox.com>
- *
- * 2000/09/18 - got rid of attach_uart401
- * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- *
- * 2001/01/26 - replaced CLI/STI with spinlocks
- * Chris Rankin <rankinc@zipworld.com.au>
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-
-#include "sound_config.h"
-#include "sound_firmware.h"
-
-#include "mpu401.h"
-
-#include "sb_mixer.h"
-#include "sb.h"
-#include "sb_ess.h"
-
-/*
- * global module flag
- */
-
-int sb_be_quiet;
-
-static sb_devc *detected_devc; /* For communication from probe to init */
-static sb_devc *last_devc; /* For MPU401 initialization */
-
-static unsigned char jazz_irq_bits[] = {
- 0, 0, 2, 3, 0, 1, 0, 4, 0, 2, 5, 0, 0, 0, 0, 6
-};
-
-static unsigned char jazz_dma_bits[] = {
- 0, 1, 0, 2, 0, 3, 0, 4
-};
-
-void *smw_free;
-
-/*
- * Jazz16 chipset specific control variables
- */
-
-static int jazz16_base; /* Not detected */
-static unsigned char jazz16_bits; /* I/O relocation bits */
-static DEFINE_SPINLOCK(jazz16_lock);
-
-/*
- * Logitech Soundman Wave specific initialization code
- */
-
-#ifdef SMW_MIDI0001_INCLUDED
-#include "smw-midi0001.h"
-#else
-static unsigned char *smw_ucode;
-static int smw_ucodeLen;
-
-#endif
-
-static sb_devc *last_sb; /* Last sb loaded */
-
-int sb_dsp_command(sb_devc * devc, unsigned char val)
-{
- int i;
- unsigned long limit;
-
- limit = jiffies + HZ / 10; /* Timeout */
-
- /*
- * Note! the i<500000 is an emergency exit. The sb_dsp_command() is sometimes
- * called while interrupts are disabled. This means that the timer is
- * disabled also. However the timeout situation is a abnormal condition.
- * Normally the DSP should be ready to accept commands after just couple of
- * loops.
- */
-
- for (i = 0; i < 500000 && (limit-jiffies)>0; i++)
- {
- if ((inb(DSP_STATUS) & 0x80) == 0)
- {
- outb((val), DSP_COMMAND);
- return 1;
- }
- }
- printk(KERN_WARNING "Sound Blaster: DSP command(%x) timeout.\n", val);
- return 0;
-}
-
-int sb_dsp_get_byte(sb_devc * devc)
-{
- int i;
-
- for (i = 1000; i; i--)
- {
- if (inb(DSP_DATA_AVAIL) & 0x80)
- return inb(DSP_READ);
- }
- return 0xffff;
-}
-
-static void sb_intr (sb_devc *devc)
-{
- int status;
- unsigned char src = 0xff;
-
- if (devc->model == MDL_SB16)
- {
- src = sb_getmixer(devc, IRQ_STAT); /* Interrupt source register */
-
- if (src & 4) /* MPU401 interrupt */
- if(devc->midi_irq_cookie)
- uart401intr(devc->irq, devc->midi_irq_cookie);
-
- if (!(src & 3))
- return; /* Not a DSP interrupt */
- }
- if (devc->intr_active && (!devc->fullduplex || (src & 0x01)))
- {
- switch (devc->irq_mode)
- {
- case IMODE_OUTPUT:
- DMAbuf_outputintr(devc->dev, 1);
- break;
-
- case IMODE_INPUT:
- DMAbuf_inputintr(devc->dev);
- break;
-
- case IMODE_INIT:
- break;
-
- case IMODE_MIDI:
- sb_midi_interrupt(devc);
- break;
-
- default:
- /* printk(KERN_WARNING "Sound Blaster: Unexpected interrupt\n"); */
- ;
- }
- }
- else if (devc->intr_active_16 && (src & 0x02))
- {
- switch (devc->irq_mode_16)
- {
- case IMODE_OUTPUT:
- DMAbuf_outputintr(devc->dev, 1);
- break;
-
- case IMODE_INPUT:
- DMAbuf_inputintr(devc->dev);
- break;
-
- case IMODE_INIT:
- break;
-
- default:
- /* printk(KERN_WARNING "Sound Blaster: Unexpected interrupt\n"); */
- ;
- }
- }
- /*
- * Acknowledge interrupts
- */
-
- if (src & 0x01)
- status = inb(DSP_DATA_AVAIL);
-
- if (devc->model == MDL_SB16 && src & 0x02)
- status = inb(DSP_DATA_AVL16);
-}
-
-static void pci_intr(sb_devc *devc)
-{
- int src = inb(devc->pcibase+0x1A);
- src&=3;
- if(src)
- sb_intr(devc);
-}
-
-static irqreturn_t sbintr(int irq, void *dev_id)
-{
- sb_devc *devc = dev_id;
-
- devc->irq_ok = 1;
-
- switch (devc->model) {
- case MDL_ESSPCI:
- pci_intr (devc);
- break;
-
- case MDL_ESS:
- ess_intr (devc);
- break;
- default:
- sb_intr (devc);
- break;
- }
- return IRQ_HANDLED;
-}
-
-int sb_dsp_reset(sb_devc * devc)
-{
- int loopc;
-
- if (devc->model == MDL_ESS) return ess_dsp_reset (devc);
-
- /* This is only for non-ESS chips */
-
- outb(1, DSP_RESET);
-
- udelay(10);
- outb(0, DSP_RESET);
- udelay(30);
-
- for (loopc = 0; loopc < 1000 && !(inb(DSP_DATA_AVAIL) & 0x80); loopc++);
-
- if (inb(DSP_READ) != 0xAA)
- {
- DDB(printk("sb: No response to RESET\n"));
- return 0; /* Sorry */
- }
-
- return 1;
-}
-
-static void dsp_get_vers(sb_devc * devc)
-{
- int i;
-
- unsigned long flags;
-
- DDB(printk("Entered dsp_get_vers()\n"));
- spin_lock_irqsave(&devc->lock, flags);
- devc->major = devc->minor = 0;
- sb_dsp_command(devc, 0xe1); /* Get version */
-
- for (i = 100000; i; i--)
- {
- if (inb(DSP_DATA_AVAIL) & 0x80)
- {
- if (devc->major == 0)
- devc->major = inb(DSP_READ);
- else
- {
- devc->minor = inb(DSP_READ);
- break;
- }
- }
- }
- spin_unlock_irqrestore(&devc->lock, flags);
- DDB(printk("DSP version %d.%02d\n", devc->major, devc->minor));
-}
-
-static int sb16_set_dma_hw(sb_devc * devc)
-{
- int bits;
-
- if (devc->dma8 != 0 && devc->dma8 != 1 && devc->dma8 != 3)
- {
- printk(KERN_ERR "SB16: Invalid 8 bit DMA (%d)\n", devc->dma8);
- return 0;
- }
- bits = (1 << devc->dma8);
-
- if (devc->dma16 >= 5 && devc->dma16 <= 7)
- bits |= (1 << devc->dma16);
-
- sb_setmixer(devc, DMA_NR, bits);
- return 1;
-}
-
-static void sb16_set_mpu_port(sb_devc * devc, struct address_info *hw_config)
-{
- /*
- * This routine initializes new MIDI port setup register of SB Vibra (CT2502).
- */
- unsigned char bits = sb_getmixer(devc, 0x84) & ~0x06;
-
- switch (hw_config->io_base)
- {
- case 0x300:
- sb_setmixer(devc, 0x84, bits | 0x04);
- break;
-
- case 0x330:
- sb_setmixer(devc, 0x84, bits | 0x00);
- break;
-
- default:
- sb_setmixer(devc, 0x84, bits | 0x02); /* Disable MPU */
- printk(KERN_ERR "SB16: Invalid MIDI I/O port %x\n", hw_config->io_base);
- }
-}
-
-static int sb16_set_irq_hw(sb_devc * devc, int level)
-{
- int ival;
-
- switch (level)
- {
- case 5:
- ival = 2;
- break;
- case 7:
- ival = 4;
- break;
- case 9:
- ival = 1;
- break;
- case 10:
- ival = 8;
- break;
- default:
- printk(KERN_ERR "SB16: Invalid IRQ%d\n", level);
- return 0;
- }
- sb_setmixer(devc, IRQ_NR, ival);
- return 1;
-}
-
-static void relocate_Jazz16(sb_devc * devc, struct address_info *hw_config)
-{
- unsigned char bits = 0;
- unsigned long flags;
-
- if (jazz16_base != 0 && jazz16_base != hw_config->io_base)
- return;
-
- switch (hw_config->io_base)
- {
- case 0x220:
- bits = 1;
- break;
- case 0x240:
- bits = 2;
- break;
- case 0x260:
- bits = 3;
- break;
- default:
- return;
- }
- bits = jazz16_bits = bits << 5;
- jazz16_base = hw_config->io_base;
-
- /*
- * Magic wake up sequence by writing to 0x201 (aka Joystick port)
- */
- spin_lock_irqsave(&jazz16_lock, flags);
- outb((0xAF), 0x201);
- outb((0x50), 0x201);
- outb((bits), 0x201);
- spin_unlock_irqrestore(&jazz16_lock, flags);
-}
-
-static int init_Jazz16(sb_devc * devc, struct address_info *hw_config)
-{
- char name[100];
- /*
- * First try to check that the card has Jazz16 chip. It identifies itself
- * by returning 0x12 as response to DSP command 0xfa.
- */
-
- if (!sb_dsp_command(devc, 0xfa))
- return 0;
-
- if (sb_dsp_get_byte(devc) != 0x12)
- return 0;
-
- /*
- * OK so far. Now configure the IRQ and DMA channel used by the card.
- */
- if (hw_config->irq < 1 || hw_config->irq > 15 || jazz_irq_bits[hw_config->irq] == 0)
- {
- printk(KERN_ERR "Jazz16: Invalid interrupt (IRQ%d)\n", hw_config->irq);
- return 0;
- }
- if (hw_config->dma < 0 || hw_config->dma > 3 || jazz_dma_bits[hw_config->dma] == 0)
- {
- printk(KERN_ERR "Jazz16: Invalid 8 bit DMA (DMA%d)\n", hw_config->dma);
- return 0;
- }
- if (hw_config->dma2 < 0)
- {
- printk(KERN_ERR "Jazz16: No 16 bit DMA channel defined\n");
- return 0;
- }
- if (hw_config->dma2 < 5 || hw_config->dma2 > 7 || jazz_dma_bits[hw_config->dma2] == 0)
- {
- printk(KERN_ERR "Jazz16: Invalid 16 bit DMA (DMA%d)\n", hw_config->dma2);
- return 0;
- }
- devc->dma16 = hw_config->dma2;
-
- if (!sb_dsp_command(devc, 0xfb))
- return 0;
-
- if (!sb_dsp_command(devc, jazz_dma_bits[hw_config->dma] |
- (jazz_dma_bits[hw_config->dma2] << 4)))
- return 0;
-
- if (!sb_dsp_command(devc, jazz_irq_bits[hw_config->irq]))
- return 0;
-
- /*
- * Now we have configured a standard Jazz16 device.
- */
- devc->model = MDL_JAZZ;
- strcpy(name, "Jazz16");
-
- hw_config->name = "Jazz16";
- devc->caps |= SB_NO_MIDI;
- return 1;
-}
-
-static void relocate_ess1688(sb_devc * devc)
-{
- unsigned char bits;
-
- switch (devc->base)
- {
- case 0x220:
- bits = 0x04;
- break;
- case 0x230:
- bits = 0x05;
- break;
- case 0x240:
- bits = 0x06;
- break;
- case 0x250:
- bits = 0x07;
- break;
- default:
- return; /* Wrong port */
- }
-
- DDB(printk("Doing ESS1688 address selection\n"));
-
- /*
- * ES1688 supports two alternative ways for software address config.
- * First try the so called Read-Sequence-Key method.
- */
-
- /* Reset the sequence logic */
- inb(0x229);
- inb(0x229);
- inb(0x229);
-
- /* Perform the read sequence */
- inb(0x22b);
- inb(0x229);
- inb(0x22b);
- inb(0x229);
- inb(0x229);
- inb(0x22b);
- inb(0x229);
-
- /* Select the base address by reading from it. Then probe using the port. */
- inb(devc->base);
- if (sb_dsp_reset(devc)) /* Bingo */
- return;
-
-#if 0 /* This causes system lockups (Nokia 386/25 at least) */
- /*
- * The last resort is the system control register method.
- */
-
- outb((0x00), 0xfb); /* 0xFB is the unlock register */
- outb((0x00), 0xe0); /* Select index 0 */
- outb((bits), 0xe1); /* Write the config bits */
- outb((0x00), 0xf9); /* 0xFB is the lock register */
-#endif
-}
-
-int sb_dsp_detect(struct address_info *hw_config, int pci, int pciio, struct sb_module_options *sbmo)
-{
- sb_devc sb_info;
- sb_devc *devc = &sb_info;
-
- memset((char *) &sb_info, 0, sizeof(sb_info)); /* Zero everything */
-
- /* Copy module options in place */
- if(sbmo) memcpy(&devc->sbmo, sbmo, sizeof(struct sb_module_options));
-
- sb_info.my_mididev = -1;
- sb_info.my_mixerdev = -1;
- sb_info.dev = -1;
-
- /*
- * Initialize variables
- */
-
- DDB(printk("sb_dsp_detect(%x) entered\n", hw_config->io_base));
-
- spin_lock_init(&devc->lock);
- devc->type = hw_config->card_subtype;
-
- devc->base = hw_config->io_base;
- devc->irq = hw_config->irq;
- devc->dma8 = hw_config->dma;
-
- devc->dma16 = -1;
- devc->pcibase = pciio;
-
- if(pci == SB_PCI_ESSMAESTRO)
- {
- devc->model = MDL_ESSPCI;
- devc->caps |= SB_PCI_IRQ;
- hw_config->driver_use_1 |= SB_PCI_IRQ;
- hw_config->card_subtype = MDL_ESSPCI;
- }
-
- if(pci == SB_PCI_YAMAHA)
- {
- devc->model = MDL_YMPCI;
- devc->caps |= SB_PCI_IRQ;
- hw_config->driver_use_1 |= SB_PCI_IRQ;
- hw_config->card_subtype = MDL_YMPCI;
-
- printk("Yamaha PCI mode.\n");
- }
-
- if (devc->sbmo.acer)
- {
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock, flags);
- inb(devc->base + 0x09);
- inb(devc->base + 0x09);
- inb(devc->base + 0x09);
- inb(devc->base + 0x0b);
- inb(devc->base + 0x09);
- inb(devc->base + 0x0b);
- inb(devc->base + 0x09);
- inb(devc->base + 0x09);
- inb(devc->base + 0x0b);
- inb(devc->base + 0x09);
- inb(devc->base + 0x00);
- spin_unlock_irqrestore(&devc->lock, flags);
- }
- /*
- * Detect the device
- */
-
- if (sb_dsp_reset(devc))
- dsp_get_vers(devc);
- else
- devc->major = 0;
-
- if (devc->type == 0 || devc->type == MDL_JAZZ || devc->type == MDL_SMW)
- if (devc->major == 0 || (devc->major == 3 && devc->minor == 1))
- relocate_Jazz16(devc, hw_config);
-
- if (devc->major == 0 && (devc->type == MDL_ESS || devc->type == 0))
- relocate_ess1688(devc);
-
- if (!sb_dsp_reset(devc))
- {
- DDB(printk("SB reset failed\n"));
-#ifdef MODULE
- printk(KERN_INFO "sb: dsp reset failed.\n");
-#endif
- return 0;
- }
- if (devc->major == 0)
- dsp_get_vers(devc);
-
- if (devc->major == 3 && devc->minor == 1)
- {
- if (devc->type == MDL_AZTECH) /* SG Washington? */
- {
- if (sb_dsp_command(devc, 0x09))
- if (sb_dsp_command(devc, 0x00)) /* Enter WSS mode */
- {
- int i;
-
- /* Have some delay */
- for (i = 0; i < 10000; i++)
- inb(DSP_DATA_AVAIL);
- devc->caps = SB_NO_AUDIO | SB_NO_MIDI; /* Mixer only */
- devc->model = MDL_AZTECH;
- }
- }
- }
-
- if(devc->type == MDL_ESSPCI)
- devc->model = MDL_ESSPCI;
-
- if(devc->type == MDL_YMPCI)
- {
- printk("YMPCI selected\n");
- devc->model = MDL_YMPCI;
- }
-
- /*
- * Save device information for sb_dsp_init()
- */
-
-
- detected_devc = kmemdup(devc, sizeof(sb_devc), GFP_KERNEL);
- if (detected_devc == NULL)
- {
- printk(KERN_ERR "sb: Can't allocate memory for device information\n");
- return 0;
- }
- MDB(printk(KERN_INFO "SB %d.%02d detected OK (%x)\n", devc->major, devc->minor, hw_config->io_base));
- return 1;
-}
-
-int sb_dsp_init(struct address_info *hw_config, struct module *owner)
-{
- sb_devc *devc;
- char name[100];
- extern int sb_be_quiet;
- int mixer22, mixer30;
-
-/*
- * Check if we had detected a SB device earlier
- */
- DDB(printk("sb_dsp_init(%x) entered\n", hw_config->io_base));
- name[0] = 0;
-
- if (detected_devc == NULL)
- {
- MDB(printk("No detected device\n"));
- return 0;
- }
- devc = detected_devc;
- detected_devc = NULL;
-
- if (devc->base != hw_config->io_base)
- {
- DDB(printk("I/O port mismatch\n"));
- release_region(devc->base, 16);
- return 0;
- }
- /*
- * Now continue initialization of the device
- */
-
- devc->caps = hw_config->driver_use_1;
-
- if (!((devc->caps & SB_NO_AUDIO) && (devc->caps & SB_NO_MIDI)) && hw_config->irq > 0)
- { /* IRQ setup */
-
- /*
- * ESS PCI cards do shared PCI IRQ stuff. Since they
- * will get shared PCI irq lines we must cope.
- */
-
- int i=(devc->caps&SB_PCI_IRQ)?IRQF_SHARED:0;
-
- if (request_irq(hw_config->irq, sbintr, i, "soundblaster", devc) < 0)
- {
- printk(KERN_ERR "SB: Can't allocate IRQ%d\n", hw_config->irq);
- release_region(devc->base, 16);
- return 0;
- }
- devc->irq_ok = 0;
-
- if (devc->major == 4)
- if (!sb16_set_irq_hw(devc, devc->irq)) /* Unsupported IRQ */
- {
- free_irq(devc->irq, devc);
- release_region(devc->base, 16);
- return 0;
- }
- if ((devc->type == 0 || devc->type == MDL_ESS) &&
- devc->major == 3 && devc->minor == 1)
- { /* Handle various chipsets which claim they are SB Pro compatible */
- if ((devc->type != 0 && devc->type != MDL_ESS) ||
- !ess_init(devc, hw_config))
- {
- if ((devc->type != 0 && devc->type != MDL_JAZZ &&
- devc->type != MDL_SMW) || !init_Jazz16(devc, hw_config))
- {
- DDB(printk("This is a genuine SB Pro\n"));
- }
- }
- }
- if (devc->major == 4 && devc->minor <= 11 ) /* Won't work */
- devc->irq_ok = 1;
- else
- {
- int n;
-
- for (n = 0; n < 3 && devc->irq_ok == 0; n++)
- {
- if (sb_dsp_command(devc, 0xf2)) /* Cause interrupt immediately */
- {
- int i;
-
- for (i = 0; !devc->irq_ok && i < 10000; i++);
- }
- }
- if (!devc->irq_ok)
- printk(KERN_WARNING "sb: Interrupt test on IRQ%d failed - Probable IRQ conflict\n", devc->irq);
- else
- {
- DDB(printk("IRQ test OK (IRQ%d)\n", devc->irq));
- }
- }
- } /* IRQ setup */
-
- last_sb = devc;
-
- switch (devc->major)
- {
- case 1: /* SB 1.0 or 1.5 */
- devc->model = hw_config->card_subtype = MDL_SB1;
- break;
-
- case 2: /* SB 2.x */
- if (devc->minor == 0)
- devc->model = hw_config->card_subtype = MDL_SB2;
- else
- devc->model = hw_config->card_subtype = MDL_SB201;
- break;
-
- case 3: /* SB Pro and most clones */
- switch (devc->model) {
- case 0:
- devc->model = hw_config->card_subtype = MDL_SBPRO;
- if (hw_config->name == NULL)
- hw_config->name = "Sound Blaster Pro (8 BIT ONLY)";
- break;
- case MDL_ESS:
- ess_dsp_init(devc, hw_config);
- break;
- }
- break;
-
- case 4:
- devc->model = hw_config->card_subtype = MDL_SB16;
- /*
- * ALS007 and ALS100 return DSP version 4.2 and have 2 post-reset !=0
- * registers at 0x3c and 0x4c (output ctrl registers on ALS007) whereas
- * a "standard" SB16 doesn't have a register at 0x4c. ALS100 actively
- * updates register 0x22 whenever 0x30 changes, as per the SB16 spec.
- * Since ALS007 doesn't, this can be used to differentiate the 2 cards.
- */
- if ((devc->minor == 2) && sb_getmixer(devc,0x3c) && sb_getmixer(devc,0x4c))
- {
- mixer30 = sb_getmixer(devc,0x30);
- sb_setmixer(devc,0x22,(mixer22=sb_getmixer(devc,0x22)) & 0x0f);
- sb_setmixer(devc,0x30,0xff);
- /* ALS100 will force 0x30 to 0xf8 like SB16; ALS007 will allow 0xff. */
- /* Register 0x22 & 0xf0 on ALS100 == 0xf0; on ALS007 it == 0x10. */
- if ((sb_getmixer(devc,0x30) != 0xff) || ((sb_getmixer(devc,0x22) & 0xf0) != 0x10))
- {
- devc->submodel = SUBMDL_ALS100;
- if (hw_config->name == NULL)
- hw_config->name = "Sound Blaster 16 (ALS-100)";
- }
- else
- {
- sb_setmixer(devc,0x3c,0x1f); /* Enable all inputs */
- sb_setmixer(devc,0x4c,0x1f);
- sb_setmixer(devc,0x22,mixer22); /* Restore 0x22 to original value */
- devc->submodel = SUBMDL_ALS007;
- if (hw_config->name == NULL)
- hw_config->name = "Sound Blaster 16 (ALS-007)";
- }
- sb_setmixer(devc,0x30,mixer30);
- }
- else if (hw_config->name == NULL)
- hw_config->name = "Sound Blaster 16";
-
- if (hw_config->dma2 == -1)
- devc->dma16 = devc->dma8;
- else if (hw_config->dma2 < 5 || hw_config->dma2 > 7)
- {
- printk(KERN_WARNING "SB16: Bad or missing 16 bit DMA channel\n");
- devc->dma16 = devc->dma8;
- }
- else
- devc->dma16 = hw_config->dma2;
-
- if(!sb16_set_dma_hw(devc)) {
- free_irq(devc->irq, devc);
- release_region(hw_config->io_base, 16);
- return 0;
- }
-
- devc->caps |= SB_NO_MIDI;
- }
-
- if (!(devc->caps & SB_NO_MIXER))
- if (devc->major == 3 || devc->major == 4)
- sb_mixer_init(devc, owner);
-
- if (!(devc->caps & SB_NO_MIDI))
- sb_dsp_midi_init(devc, owner);
-
- if (hw_config->name == NULL)
- hw_config->name = "Sound Blaster (8 BIT/MONO ONLY)";
-
- sprintf(name, "%s (%d.%02d)", hw_config->name, devc->major, devc->minor);
- conf_printf(name, hw_config);
-
- /*
- * Assuming that a sound card is Sound Blaster (compatible) is the most common
- * configuration error and the mother of all problems. Usually sound cards
- * emulate SB Pro but in addition they have a 16 bit native mode which should be
- * used in Unix. See Readme.cards for more information about configuring OSS/Free
- * properly.
- */
- if (devc->model <= MDL_SBPRO)
- {
- if (devc->major == 3 && devc->minor != 1) /* "True" SB Pro should have v3.1 (rare ones may have 3.2). */
- {
- printk(KERN_INFO "This sound card may not be fully Sound Blaster Pro compatible.\n");
- printk(KERN_INFO "In many cases there is another way to configure OSS so that\n");
- printk(KERN_INFO "it works properly with OSS (for example in 16 bit mode).\n");
- printk(KERN_INFO "Please ignore this message if you _really_ have a SB Pro.\n");
- }
- else if (!sb_be_quiet && devc->model == MDL_SBPRO)
- {
- printk(KERN_INFO "SB DSP version is just %d.%02d which means that your card is\n", devc->major, devc->minor);
- printk(KERN_INFO "several years old (8 bit only device) or alternatively the sound driver\n");
- printk(KERN_INFO "is incorrectly configured.\n");
- }
- }
- hw_config->card_subtype = devc->model;
- hw_config->slots[0]=devc->dev;
- last_devc = devc; /* For SB MPU detection */
-
- if (!(devc->caps & SB_NO_AUDIO) && devc->dma8 >= 0)
- {
- if (sound_alloc_dma(devc->dma8, "SoundBlaster8"))
- {
- printk(KERN_WARNING "Sound Blaster: Can't allocate 8 bit DMA channel %d\n", devc->dma8);
- }
- if (devc->dma16 >= 0 && devc->dma16 != devc->dma8)
- {
- if (sound_alloc_dma(devc->dma16, "SoundBlaster16"))
- printk(KERN_WARNING "Sound Blaster: can't allocate 16 bit DMA channel %d.\n", devc->dma16);
- }
- sb_audio_init(devc, name, owner);
- hw_config->slots[0]=devc->dev;
- }
- else
- {
- MDB(printk("Sound Blaster: no audio devices found.\n"));
- }
- return 1;
-}
-
-/* if (sbmpu) below we allow mpu401 to manage the midi devs
- otherwise we have to unload them. (Andrzej Krzysztofowicz) */
-
-void sb_dsp_unload(struct address_info *hw_config, int sbmpu)
-{
- sb_devc *devc;
-
- devc = audio_devs[hw_config->slots[0]]->devc;
-
- if (devc && devc->base == hw_config->io_base)
- {
- if ((devc->model & MDL_ESS) && devc->pcibase)
- release_region(devc->pcibase, 8);
-
- release_region(devc->base, 16);
-
- if (!(devc->caps & SB_NO_AUDIO))
- {
- sound_free_dma(devc->dma8);
- if (devc->dma16 >= 0)
- sound_free_dma(devc->dma16);
- }
- if (!(devc->caps & SB_NO_AUDIO && devc->caps & SB_NO_MIDI))
- {
- if (devc->irq > 0)
- free_irq(devc->irq, devc);
-
- sb_mixer_unload(devc);
- /* We don't have to do this bit any more the UART401 is its own
- master -- Krzysztof Halasa */
- /* But we have to do it, if UART401 is not detected */
- if (!sbmpu)
- sound_unload_mididev(devc->my_mididev);
- sound_unload_audiodev(devc->dev);
- }
- kfree(devc);
- }
- else
- release_region(hw_config->io_base, 16);
-
- kfree(detected_devc);
-}
-
-/*
- * Mixer access routines
- *
- * ES1887 modifications: some mixer registers reside in the
- * range above 0xa0. These must be accessed in another way.
- */
-
-void sb_setmixer(sb_devc * devc, unsigned int port, unsigned int value)
-{
- unsigned long flags;
-
- if (devc->model == MDL_ESS) {
- ess_setmixer (devc, port, value);
- return;
- }
-
- spin_lock_irqsave(&devc->lock, flags);
-
- outb(((unsigned char) (port & 0xff)), MIXER_ADDR);
- udelay(20);
- outb(((unsigned char) (value & 0xff)), MIXER_DATA);
- udelay(20);
-
- spin_unlock_irqrestore(&devc->lock, flags);
-}
-
-unsigned int sb_getmixer(sb_devc * devc, unsigned int port)
-{
- unsigned int val;
- unsigned long flags;
-
- if (devc->model == MDL_ESS) return ess_getmixer (devc, port);
-
- spin_lock_irqsave(&devc->lock, flags);
-
- outb(((unsigned char) (port & 0xff)), MIXER_ADDR);
- udelay(20);
- val = inb(MIXER_DATA);
- udelay(20);
-
- spin_unlock_irqrestore(&devc->lock, flags);
-
- return val;
-}
-
-void sb_chgmixer
- (sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val)
-{
- int value;
-
- value = sb_getmixer(devc, reg);
- value = (value & ~mask) | (val & mask);
- sb_setmixer(devc, reg, value);
-}
-
-/*
- * MPU401 MIDI initialization.
- */
-
-static void smw_putmem(sb_devc * devc, int base, int addr, unsigned char val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&jazz16_lock, flags); /* NOT the SB card? */
-
- outb((addr & 0xff), base + 1); /* Low address bits */
- outb((addr >> 8), base + 2); /* High address bits */
- outb((val), base); /* Data */
-
- spin_unlock_irqrestore(&jazz16_lock, flags);
-}
-
-static unsigned char smw_getmem(sb_devc * devc, int base, int addr)
-{
- unsigned long flags;
- unsigned char val;
-
- spin_lock_irqsave(&jazz16_lock, flags); /* NOT the SB card? */
-
- outb((addr & 0xff), base + 1); /* Low address bits */
- outb((addr >> 8), base + 2); /* High address bits */
- val = inb(base); /* Data */
-
- spin_unlock_irqrestore(&jazz16_lock, flags);
- return val;
-}
-
-static int smw_midi_init(sb_devc * devc, struct address_info *hw_config)
-{
- int mpu_base = hw_config->io_base;
- int mp_base = mpu_base + 4; /* Microcontroller base */
- int i;
- unsigned char control;
-
-
- /*
- * Reset the microcontroller so that the RAM can be accessed
- */
-
- control = inb(mpu_base + 7);
- outb((control | 3), mpu_base + 7); /* Set last two bits to 1 (?) */
- outb(((control & 0xfe) | 2), mpu_base + 7); /* xxxxxxx0 resets the mc */
-
- mdelay(3); /* Wait at least 1ms */
-
- outb((control & 0xfc), mpu_base + 7); /* xxxxxx00 enables RAM */
-
- /*
- * Detect microcontroller by probing the 8k RAM area
- */
- smw_putmem(devc, mp_base, 0, 0x00);
- smw_putmem(devc, mp_base, 1, 0xff);
- udelay(10);
-
- if (smw_getmem(devc, mp_base, 0) != 0x00 || smw_getmem(devc, mp_base, 1) != 0xff)
- {
- DDB(printk("SM Wave: No microcontroller RAM detected (%02x, %02x)\n", smw_getmem(devc, mp_base, 0), smw_getmem(devc, mp_base, 1)));
- return 0; /* No RAM */
- }
- /*
- * There is RAM so assume it's really a SM Wave
- */
-
- devc->model = MDL_SMW;
- smw_mixer_init(devc);
-
-#ifdef MODULE
- if (!smw_ucode)
- {
- smw_ucodeLen = mod_firmware_load("/etc/sound/midi0001.bin", (void *) &smw_ucode);
- smw_free = smw_ucode;
- }
-#endif
- if (smw_ucodeLen > 0)
- {
- if (smw_ucodeLen != 8192)
- {
- printk(KERN_ERR "SM Wave: Invalid microcode (MIDI0001.BIN) length\n");
- return 1;
- }
- /*
- * Download microcode
- */
-
- for (i = 0; i < 8192; i++)
- smw_putmem(devc, mp_base, i, smw_ucode[i]);
-
- /*
- * Verify microcode
- */
-
- for (i = 0; i < 8192; i++)
- if (smw_getmem(devc, mp_base, i) != smw_ucode[i])
- {
- printk(KERN_ERR "SM Wave: Microcode verification failed\n");
- return 0;
- }
- }
- control = 0;
-#ifdef SMW_SCSI_IRQ
- /*
- * Set the SCSI interrupt (IRQ2/9, IRQ3 or IRQ10). The SCSI interrupt
- * is disabled by default.
- *
- * FIXME - make this a module option
- *
- * BTW the Zilog 5380 SCSI controller is located at MPU base + 0x10.
- */
- {
- static unsigned char scsi_irq_bits[] = {
- 0, 0, 3, 1, 0, 0, 0, 0, 0, 3, 2, 0, 0, 0, 0, 0
- };
- control |= scsi_irq_bits[SMW_SCSI_IRQ] << 6;
- }
-#endif
-
-#ifdef SMW_OPL4_ENABLE
- /*
- * Make the OPL4 chip visible on the PC bus at 0x380.
- *
- * There is no need to enable this feature since this driver
- * doesn't support OPL4 yet. Also there is no RAM in SM Wave so
- * enabling OPL4 is pretty useless.
- */
- control |= 0x10; /* Uses IRQ12 if bit 0x20 == 0 */
- /* control |= 0x20; Uncomment this if you want to use IRQ7 */
-#endif
- outb((control | 0x03), mpu_base + 7); /* xxxxxx11 restarts */
- hw_config->name = "SoundMan Wave";
- return 1;
-}
-
-static int init_Jazz16_midi(sb_devc * devc, struct address_info *hw_config)
-{
- int mpu_base = hw_config->io_base;
- int sb_base = devc->base;
- int irq = hw_config->irq;
-
- unsigned char bits = 0;
- unsigned long flags;
-
- if (irq < 0)
- irq *= -1;
-
- if (irq < 1 || irq > 15 ||
- jazz_irq_bits[irq] == 0)
- {
- printk(KERN_ERR "Jazz16: Invalid MIDI interrupt (IRQ%d)\n", irq);
- return 0;
- }
- switch (sb_base)
- {
- case 0x220:
- bits = 1;
- break;
- case 0x240:
- bits = 2;
- break;
- case 0x260:
- bits = 3;
- break;
- default:
- return 0;
- }
- bits = jazz16_bits = bits << 5;
- switch (mpu_base)
- {
- case 0x310:
- bits |= 1;
- break;
- case 0x320:
- bits |= 2;
- break;
- case 0x330:
- bits |= 3;
- break;
- default:
- printk(KERN_ERR "Jazz16: Invalid MIDI I/O port %x\n", mpu_base);
- return 0;
- }
- /*
- * Magic wake up sequence by writing to 0x201 (aka Joystick port)
- */
- spin_lock_irqsave(&jazz16_lock, flags);
- outb(0xAF, 0x201);
- outb(0x50, 0x201);
- outb(bits, 0x201);
- spin_unlock_irqrestore(&jazz16_lock, flags);
-
- hw_config->name = "Jazz16";
- smw_midi_init(devc, hw_config);
-
- if (!sb_dsp_command(devc, 0xfb))
- return 0;
-
- if (!sb_dsp_command(devc, jazz_dma_bits[devc->dma8] |
- (jazz_dma_bits[devc->dma16] << 4)))
- return 0;
-
- if (!sb_dsp_command(devc, jazz_irq_bits[devc->irq] |
- (jazz_irq_bits[irq] << 4)))
- return 0;
-
- return 1;
-}
-
-int probe_sbmpu(struct address_info *hw_config, struct module *owner)
-{
- sb_devc *devc = last_devc;
- int ret;
-
- if (last_devc == NULL)
- return 0;
-
- last_devc = NULL;
-
- if (hw_config->io_base <= 0)
- {
- /* The real vibra16 is fine about this, but we have to go
- wipe up after Cyrix again */
-
- if(devc->model == MDL_SB16 && devc->minor >= 12)
- {
- unsigned char bits = sb_getmixer(devc, 0x84) & ~0x06;
- sb_setmixer(devc, 0x84, bits | 0x02); /* Disable MPU */
- }
- return 0;
- }
-
-#if defined(CONFIG_SOUND_MPU401)
- if (devc->model == MDL_ESS)
- {
- struct resource *ports;
- ports = request_region(hw_config->io_base, 2, "mpu401");
- if (!ports) {
- printk(KERN_ERR "sbmpu: I/O port conflict (%x)\n", hw_config->io_base);
- return 0;
- }
- if (!ess_midi_init(devc, hw_config)) {
- release_region(hw_config->io_base, 2);
- return 0;
- }
- hw_config->name = "ESS1xxx MPU";
- devc->midi_irq_cookie = NULL;
- if (!probe_mpu401(hw_config, ports)) {
- release_region(hw_config->io_base, 2);
- return 0;
- }
- attach_mpu401(hw_config, owner);
- if (last_sb->irq == -hw_config->irq)
- last_sb->midi_irq_cookie =
- (void *)(long) hw_config->slots[1];
- return 1;
- }
-#endif
-
- switch (devc->model)
- {
- case MDL_SB16:
- if (hw_config->io_base != 0x300 && hw_config->io_base != 0x330)
- {
- printk(KERN_ERR "SB16: Invalid MIDI port %x\n", hw_config->io_base);
- return 0;
- }
- hw_config->name = "Sound Blaster 16";
- if (hw_config->irq < 3 || hw_config->irq == devc->irq)
- hw_config->irq = -devc->irq;
- if (devc->minor > 12) /* What is Vibra's version??? */
- sb16_set_mpu_port(devc, hw_config);
- break;
-
- case MDL_JAZZ:
- if (hw_config->irq < 3 || hw_config->irq == devc->irq)
- hw_config->irq = -devc->irq;
- if (!init_Jazz16_midi(devc, hw_config))
- return 0;
- break;
-
- case MDL_YMPCI:
- hw_config->name = "Yamaha PCI Legacy";
- printk("Yamaha PCI legacy UART401 check.\n");
- break;
- default:
- return 0;
- }
-
- ret = probe_uart401(hw_config, owner);
- if (ret)
- last_sb->midi_irq_cookie=midi_devs[hw_config->slots[4]]->devc;
- return ret;
-}
-
-void unload_sbmpu(struct address_info *hw_config)
-{
-#if defined(CONFIG_SOUND_MPU401)
- if (!strcmp (hw_config->name, "ESS1xxx MPU")) {
- unload_mpu401(hw_config);
- return;
- }
-#endif
- unload_uart401(hw_config);
-}
-
-EXPORT_SYMBOL(sb_dsp_init);
-EXPORT_SYMBOL(sb_dsp_detect);
-EXPORT_SYMBOL(sb_dsp_unload);
-EXPORT_SYMBOL(sb_be_quiet);
-EXPORT_SYMBOL(probe_sbmpu);
-EXPORT_SYMBOL(unload_sbmpu);
-EXPORT_SYMBOL(smw_free);
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/sb_ess.c b/sound/oss/sb_ess.c
deleted file mode 100644
index 17e3f14318cd..000000000000
--- a/sound/oss/sb_ess.c
+++ /dev/null
@@ -1,1823 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#undef FKS_LOGGING
-#undef FKS_TEST
-
-/*
- * tabs should be 4 spaces, in vi(m): set tabstop=4
- *
- * TODO: consistency speed calculations!!
- * cleanup!
- * ????: Did I break MIDI support?
- *
- * History:
- *
- * Rolf Fokkens (Dec 20 1998): ES188x recording level support on a per
- * fokkensr@vertis.nl input basis.
- * (Dec 24 1998): Recognition of ES1788, ES1887, ES1888,
- * ES1868, ES1869 and ES1878. Could be used for
- * specific handling in the future. All except
- * ES1887 and ES1888 and ES688 are handled like
- * ES1688.
- * (Dec 27 1998): RECLEV for all (?) ES1688+ chips. ES188x now
- * have the "Dec 20" support + RECLEV
- * (Jan 2 1999): Preparation for Full Duplex. This means
- * Audio 2 is now used for playback when dma16
- * is specified. The next step would be to use
- * Audio 1 and Audio 2 at the same time.
- * (Jan 9 1999): Put all ESS stuff into sb_ess.[ch], this
- * includes both the ESS stuff that has been in
- * sb_*[ch] before I touched it and the ESS support
- * I added later
- * (Jan 23 1999): Full Duplex seems to work. I wrote a small
- * test proggy which works OK. Haven't found
- * any applications to test it though. So why did
- * I bother to create it anyway?? :) Just for
- * fun.
- * (May 2 1999): I tried to be too smart by "introducing"
- * ess_calc_best_speed (). The idea was that two
- * dividers could be used to setup a samplerate,
- * ess_calc_best_speed () would choose the best.
- * This works for playback, but results in
- * recording problems for high samplerates. I
- * fixed this by removing ess_calc_best_speed ()
- * and just doing what the documentation says.
- * Andy Sloane (Jun 4 1999): Stole some code from ALSA to fix the playback
- * andy@guildsoftware.com speed on ES1869, ES1879, ES1887, and ES1888.
- * 1879's were previously ignored by this driver;
- * added (untested) support for those.
- * Cvetan Ivanov (Oct 27 1999): Fixed ess_dsp_init to call ess_set_dma_hw for
- * zezo@inet.bg _ALL_ ESS models, not only ES1887
- *
- * This files contains ESS chip specifics. It's based on the existing ESS
- * handling as it resided in sb_common.c, sb_mixer.c and sb_audio.c. This
- * file adds features like:
- * - Chip Identification (as shown in /proc/sound)
- * - RECLEV support for ES1688 and later
- * - 6 bits playback level support chips later than ES1688
- * - Recording level support on a per-device basis for ES1887
- * - Full-Duplex for ES1887
- *
- * Full duplex is enabled by specifying dma16. While the normal dma must
- * be one of 0, 1 or 3, dma16 can be one of 0, 1, 3 or 5. DMA 5 is a 16 bit
- * DMA channel, while the others are 8 bit..
- *
- * ESS detection isn't full proof (yet). If it fails an additional module
- * parameter esstype can be specified to be one of the following:
- * -1, 0, 688, 1688, 1868, 1869, 1788, 1887, 1888
- * -1 means: mimic 2.0 behaviour,
- * 0 means: auto detect.
- * others: explicitly specify chip
- * -1 is default, cause auto detect still doesn't work.
- */
-
-/*
- * About the documentation
- *
- * I don't know if the chips all are OK, but the documentation is buggy. 'cause
- * I don't have all the cips myself, there's a lot I cannot verify. I'll try to
- * keep track of my latest insights about his here. If you have additional info,
- * please enlighten me (fokkensr@vertis.nl)!
- *
- * I had the impression that ES1688 also has 6 bit master volume control. The
- * documentation about ES1888 (rev C, october '95) claims that ES1888 has
- * the following features ES1688 doesn't have:
- * - 6 bit master volume
- * - Full Duplex
- * So ES1688 apparently doesn't have 6 bit master volume control, but the
- * ES1688 does have RECLEV control. Makes me wonder: does ES688 have it too?
- * Without RECLEV ES688 won't be much fun I guess.
- *
- * From the ES1888 (rev C, october '95) documentation I got the impression
- * that registers 0x68 to 0x6e don't exist which means: no recording volume
- * controls. To my surprise the ES888 documentation (1/14/96) claims that
- * ES888 does have these record mixer registers, but that ES1888 doesn't have
- * 0x69 and 0x6b. So the rest should be there.
- *
- * I'm trying to get ES1887 Full Duplex. Audio 2 is playback only, while Audio 2
- * is both record and playback. I think I should use Audio 2 for all playback.
- *
- * The documentation is an adventure: it's close but not fully accurate. I
- * found out that after a reset some registers are *NOT* reset, though the
- * docs say the would be. Interesting ones are 0x7f, 0x7d and 0x7a. They are
- * related to the Audio 2 channel. I also was surprised about the consequences
- * of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves
- * into ES1888 mode. This means that it claims IRQ 11, which happens to be my
- * ISDN adapter. Needless to say it no longer worked. I now understand why
- * after rebooting 0x7f already was 0x05, the value of my choice: the BIOS
- * did it.
- *
- * Oh, and this is another trap: in ES1887 docs mixer register 0x70 is
- * described as if it's exactly the same as register 0xa1. This is *NOT* true.
- * The description of 0x70 in ES1869 docs is accurate however.
- * Well, the assumption about ES1869 was wrong: register 0x70 is very much
- * like register 0xa1, except that bit 7 is always 1, whatever you want
- * it to be.
- *
- * When using audio 2 mixer register 0x72 seems te be meaningless. Only 0xa2
- * has effect.
- *
- * Software reset not being able to reset all registers is great! Especially
- * the fact that register 0x78 isn't reset is great when you wanna change back
- * to single dma operation (simplex): audio 2 is still operational, and uses
- * the same dma as audio 1: your ess changes into a funny echo machine.
- *
- * Received the news that ES1688 is detected as a ES1788. Did some thinking:
- * the ES1887 detection scheme suggests in step 2 to try if bit 3 of register
- * 0x64 can be changed. This is inaccurate, first I inverted the * check: "If
- * can be modified, it's a 1688", which lead to a correct detection
- * of my ES1887. It resulted however in bad detection of 1688 (reported by mail)
- * and 1868 (if no PnP detection first): they result in a 1788 being detected.
- * I don't have docs on 1688, but I do have docs on 1868: The documentation is
- * probably inaccurate in the fact that I should check bit 2, not bit 3. This
- * is what I do now.
- */
-
-/*
- * About recognition of ESS chips
- *
- * The distinction of ES688, ES1688, ES1788, ES1887 and ES1888 is described in
- * a (preliminary ??) datasheet on ES1887. Its aim is to identify ES1887, but
- * during detection the text claims that "this chip may be ..." when a step
- * fails. This scheme is used to distinct between the above chips.
- * It appears however that some PnP chips like ES1868 are recognized as ES1788
- * by the ES1887 detection scheme. These PnP chips can be detected in another
- * way however: ES1868, ES1869 and ES1878 can be recognized (full proof I think)
- * by repeatedly reading mixer register 0x40. This is done by ess_identify in
- * sb_common.c.
- * This results in the following detection steps:
- * - distinct between ES688 and ES1688+ (as always done in this driver)
- * if ES688 we're ready
- * - try to detect ES1868, ES1869 or ES1878
- * if successful we're ready
- * - try to detect ES1888, ES1887 or ES1788
- * if successful we're ready
- * - Dunno. Must be 1688. Will do in general
- *
- * About RECLEV support:
- *
- * The existing ES1688 support didn't take care of the ES1688+ recording
- * levels very well. Whenever a device was selected (recmask) for recording
- * its recording level was loud, and it couldn't be changed. The fact that
- * internal register 0xb4 could take care of RECLEV, didn't work meaning until
- * its value was restored every time the chip was reset; this reset the
- * value of 0xb4 too. I guess that's what 4front also had (have?) trouble with.
- *
- * About ES1887 support:
- *
- * The ES1887 has separate registers to control the recording levels, for all
- * inputs. The ES1887 specific software makes these levels the same as their
- * corresponding playback levels, unless recmask says they aren't recorded. In
- * the latter case the recording volumes are 0.
- * Now recording levels of inputs can be controlled, by changing the playback
- * levels. Furthermore several devices can be recorded together (which is not
- * possible with the ES1688).
- * Besides the separate recording level control for each input, the common
- * recording level can also be controlled by RECLEV as described above.
- *
- * Not only ES1887 have this recording mixer. I know the following from the
- * documentation:
- * ES688 no
- * ES1688 no
- * ES1868 no
- * ES1869 yes
- * ES1878 no
- * ES1879 yes
- * ES1888 no/yes Contradicting documentation; most recent: yes
- * ES1946 yes This is a PCI chip; not handled by this driver
- */
-
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-#include "sound_config.h"
-#include "sb_mixer.h"
-#include "sb.h"
-
-#include "sb_ess.h"
-
-#define ESSTYPE_LIKE20 -1 /* Mimic 2.0 behaviour */
-#define ESSTYPE_DETECT 0 /* Mimic 2.0 behaviour */
-
-#define SUBMDL_ES1788 0x10 /* Subtype ES1788 for specific handling */
-#define SUBMDL_ES1868 0x11 /* Subtype ES1868 for specific handling */
-#define SUBMDL_ES1869 0x12 /* Subtype ES1869 for specific handling */
-#define SUBMDL_ES1878 0x13 /* Subtype ES1878 for specific handling */
-#define SUBMDL_ES1879 0x16 /* ES1879 was initially forgotten */
-#define SUBMDL_ES1887 0x14 /* Subtype ES1887 for specific handling */
-#define SUBMDL_ES1888 0x15 /* Subtype ES1888 for specific handling */
-
-#define SB_CAP_ES18XX_RATE 0x100
-
-#define ES1688_CLOCK1 795444 /* 128 - div */
-#define ES1688_CLOCK2 397722 /* 256 - div */
-#define ES18XX_CLOCK1 793800 /* 128 - div */
-#define ES18XX_CLOCK2 768000 /* 256 - div */
-
-#ifdef FKS_LOGGING
-static void ess_show_mixerregs (sb_devc *devc);
-#endif
-static int ess_read (sb_devc * devc, unsigned char reg);
-static int ess_write (sb_devc * devc, unsigned char reg, unsigned char data);
-static void ess_chgmixer
- (sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val);
-
-/****************************************************************************
- * *
- * ESS audio *
- * *
- ****************************************************************************/
-
-struct ess_command {short cmd; short data;};
-
-/*
- * Commands for initializing Audio 1 for input (record)
- */
-static struct ess_command ess_i08m[] = /* input 8 bit mono */
- { {0xb7, 0x51}, {0xb7, 0xd0}, {-1, 0} };
-static struct ess_command ess_i16m[] = /* input 16 bit mono */
- { {0xb7, 0x71}, {0xb7, 0xf4}, {-1, 0} };
-static struct ess_command ess_i08s[] = /* input 8 bit stereo */
- { {0xb7, 0x51}, {0xb7, 0x98}, {-1, 0} };
-static struct ess_command ess_i16s[] = /* input 16 bit stereo */
- { {0xb7, 0x71}, {0xb7, 0xbc}, {-1, 0} };
-
-static struct ess_command *ess_inp_cmds[] =
- { ess_i08m, ess_i16m, ess_i08s, ess_i16s };
-
-
-/*
- * Commands for initializing Audio 1 for output (playback)
- */
-static struct ess_command ess_o08m[] = /* output 8 bit mono */
- { {0xb6, 0x80}, {0xb7, 0x51}, {0xb7, 0xd0}, {-1, 0} };
-static struct ess_command ess_o16m[] = /* output 16 bit mono */
- { {0xb6, 0x00}, {0xb7, 0x71}, {0xb7, 0xf4}, {-1, 0} };
-static struct ess_command ess_o08s[] = /* output 8 bit stereo */
- { {0xb6, 0x80}, {0xb7, 0x51}, {0xb7, 0x98}, {-1, 0} };
-static struct ess_command ess_o16s[] = /* output 16 bit stereo */
- { {0xb6, 0x00}, {0xb7, 0x71}, {0xb7, 0xbc}, {-1, 0} };
-
-static struct ess_command *ess_out_cmds[] =
- { ess_o08m, ess_o16m, ess_o08s, ess_o16s };
-
-static void ess_exec_commands
- (sb_devc *devc, struct ess_command *cmdtab[])
-{
- struct ess_command *cmd;
-
- cmd = cmdtab [ ((devc->channels != 1) << 1) + (devc->bits != AFMT_U8) ];
-
- while (cmd->cmd != -1) {
- ess_write (devc, cmd->cmd, cmd->data);
- cmd++;
- }
-}
-
-static void ess_change
- (sb_devc *devc, unsigned int reg, unsigned int mask, unsigned int val)
-{
- int value;
-
- value = ess_read (devc, reg);
- value = (value & ~mask) | (val & mask);
- ess_write (devc, reg, value);
-}
-
-static void ess_set_output_parms
- (int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (devc->duplex) {
- devc->trg_buf_16 = buf;
- devc->trg_bytes_16 = nr_bytes;
- devc->trg_intrflag_16 = intrflag;
- devc->irq_mode_16 = IMODE_OUTPUT;
- } else {
- devc->trg_buf = buf;
- devc->trg_bytes = nr_bytes;
- devc->trg_intrflag = intrflag;
- devc->irq_mode = IMODE_OUTPUT;
- }
-}
-
-static void ess_set_input_parms
- (int dev, unsigned long buf, int count, int intrflag)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- devc->trg_buf = buf;
- devc->trg_bytes = count;
- devc->trg_intrflag = intrflag;
- devc->irq_mode = IMODE_INPUT;
-}
-
-static int ess_calc_div (int clock, int revert, int *speedp, int *diffp)
-{
- int divider;
- int speed, diff;
- int retval;
-
- speed = *speedp;
- divider = (clock + speed / 2) / speed;
- retval = revert - divider;
- if (retval > revert - 1) {
- retval = revert - 1;
- divider = revert - retval;
- }
- /* This line is suggested. Must be wrong I think
- *speedp = (clock + divider / 2) / divider;
- So I chose the next one */
-
- *speedp = clock / divider;
- diff = speed - *speedp;
- if (diff < 0) diff =-diff;
- *diffp = diff;
-
- return retval;
-}
-
-static int ess_calc_best_speed
- (int clock1, int rev1, int clock2, int rev2, int *divp, int *speedp)
-{
- int speed1 = *speedp, speed2 = *speedp;
- int div1, div2;
- int diff1, diff2;
- int retval;
-
- div1 = ess_calc_div (clock1, rev1, &speed1, &diff1);
- div2 = ess_calc_div (clock2, rev2, &speed2, &diff2);
-
- if (diff1 < diff2) {
- *divp = div1;
- *speedp = speed1;
- retval = 1;
- } else {
- /* *divp = div2; */
- *divp = 0x80 | div2;
- *speedp = speed2;
- retval = 2;
- }
-
- return retval;
-}
-
-/*
- * Depending on the audiochannel ESS devices can
- * have different clock settings. These are made consistent for duplex
- * however.
- * callers of ess_speed only do an audionum suggestion, which means
- * input suggests 1, output suggests 2. This suggestion is only true
- * however when doing duplex.
- */
-static void ess_common_speed (sb_devc *devc, int *speedp, int *divp)
-{
- int diff = 0, div;
-
- if (devc->duplex) {
- /*
- * The 0x80 is important for the first audio channel
- */
- if (devc->submodel == SUBMDL_ES1888) {
- div = 0x80 | ess_calc_div (795500, 256, speedp, &diff);
- } else {
- div = 0x80 | ess_calc_div (795500, 128, speedp, &diff);
- }
- } else if(devc->caps & SB_CAP_ES18XX_RATE) {
- if (devc->submodel == SUBMDL_ES1888) {
- ess_calc_best_speed(397700, 128, 795500, 256,
- &div, speedp);
- } else {
- ess_calc_best_speed(ES18XX_CLOCK1, 128, ES18XX_CLOCK2, 256,
- &div, speedp);
- }
- } else {
- if (*speedp > 22000) {
- div = 0x80 | ess_calc_div (ES1688_CLOCK1, 256, speedp, &diff);
- } else {
- div = 0x00 | ess_calc_div (ES1688_CLOCK2, 128, speedp, &diff);
- }
- }
- *divp = div;
-}
-
-static void ess_speed (sb_devc *devc, int audionum)
-{
- int speed;
- int div, div2;
-
- ess_common_speed (devc, &(devc->speed), &div);
-
-#ifdef FKS_REG_LOGGING
-printk (KERN_INFO "FKS: ess_speed (%d) b speed = %d, div=%x\n", audionum, devc->speed, div);
-#endif
-
- /* Set filter roll-off to 90% of speed/2 */
- speed = (devc->speed * 9) / 20;
-
- div2 = 256 - 7160000 / (speed * 82);
-
- if (!devc->duplex) audionum = 1;
-
- if (audionum == 1) {
- /* Change behaviour of register A1 *
- sb_chg_mixer(devc, 0x71, 0x20, 0x20)
- * For ES1869 only??? */
- ess_write (devc, 0xa1, div);
- ess_write (devc, 0xa2, div2);
- } else {
- ess_setmixer (devc, 0x70, div);
- /*
- * FKS: fascinating: 0x72 doesn't seem to work.
- */
- ess_write (devc, 0xa2, div2);
- ess_setmixer (devc, 0x72, div2);
- }
-}
-
-static int ess_audio_prepare_for_input(int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- ess_speed(devc, 1);
-
- sb_dsp_command(devc, DSP_CMD_SPKOFF);
-
- ess_write (devc, 0xb8, 0x0e); /* Auto init DMA mode */
- ess_change (devc, 0xa8, 0x03, 3 - devc->channels); /* Mono/stereo */
- ess_write (devc, 0xb9, 2); /* Demand mode (4 bytes/DMA request) */
-
- ess_exec_commands (devc, ess_inp_cmds);
-
- ess_change (devc, 0xb1, 0xf0, 0x50);
- ess_change (devc, 0xb2, 0xf0, 0x50);
-
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int ess_audio_prepare_for_output_audio1 (int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- sb_dsp_reset(devc);
- ess_speed(devc, 1);
- ess_write (devc, 0xb8, 4); /* Auto init DMA mode */
- ess_change (devc, 0xa8, 0x03, 3 - devc->channels); /* Mono/stereo */
- ess_write (devc, 0xb9, 2); /* Demand mode (4 bytes/request) */
-
- ess_exec_commands (devc, ess_out_cmds);
-
- ess_change (devc, 0xb1, 0xf0, 0x50); /* Enable DMA */
- ess_change (devc, 0xb2, 0xf0, 0x50); /* Enable IRQ */
-
- sb_dsp_command(devc, DSP_CMD_SPKON); /* There be sound! */
-
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int ess_audio_prepare_for_output_audio2 (int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- unsigned char bits;
-
-/* FKS: qqq
- sb_dsp_reset(devc);
-*/
-
- /*
- * Auto-Initialize:
- * DMA mode + demand mode (8 bytes/request, yes I want it all!)
- * But leave 16-bit DMA bit untouched!
- */
- ess_chgmixer (devc, 0x78, 0xd0, 0xd0);
-
- ess_speed(devc, 2);
-
- /* bits 4:3 on ES1887 represent recording source. Keep them! */
- bits = ess_getmixer (devc, 0x7a) & 0x18;
-
- /* Set stereo/mono */
- if (devc->channels != 1) bits |= 0x02;
-
- /* Init DACs; UNSIGNED mode for 8 bit; SIGNED mode for 16 bit */
- if (devc->bits != AFMT_U8) bits |= 0x05; /* 16 bit */
-
- /* Enable DMA, IRQ will be shared (hopefully)*/
- bits |= 0x60;
-
- ess_setmixer (devc, 0x7a, bits);
-
- ess_mixer_reload (devc, SOUND_MIXER_PCM); /* There be sound! */
-
- devc->trigger_bits = 0;
- return 0;
-}
-
-static int ess_audio_prepare_for_output(int dev, int bsize, int bcount)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
-#ifdef FKS_REG_LOGGING
-printk(KERN_INFO "ess_audio_prepare_for_output: dma_out=%d,dma_in=%d\n"
-, audio_devs[dev]->dmap_out->dma, audio_devs[dev]->dmap_in->dma);
-#endif
-
- if (devc->duplex) {
- return ess_audio_prepare_for_output_audio2 (dev, bsize, bcount);
- } else {
- return ess_audio_prepare_for_output_audio1 (dev, bsize, bcount);
- }
-}
-
-static void ess_audio_halt_xfer(int dev)
-{
- unsigned long flags;
- sb_devc *devc = audio_devs[dev]->devc;
-
- spin_lock_irqsave(&devc->lock, flags);
- sb_dsp_reset(devc);
- spin_unlock_irqrestore(&devc->lock, flags);
-
- /*
- * Audio 2 may still be operational! Creates awful sounds!
- */
- if (devc->duplex) ess_chgmixer(devc, 0x78, 0x03, 0x00);
-}
-
-static void ess_audio_start_input
- (int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- int count = nr_bytes;
- sb_devc *devc = audio_devs[dev]->devc;
- short c = -nr_bytes;
-
- /*
- * Start a DMA input to the buffer pointed by dmaqtail
- */
-
- if (audio_devs[dev]->dmap_in->dma > 3) count >>= 1;
- count--;
-
- devc->irq_mode = IMODE_INPUT;
-
- ess_write (devc, 0xa4, (unsigned char) ((unsigned short) c & 0xff));
- ess_write (devc, 0xa5, (unsigned char) (((unsigned short) c >> 8) & 0xff));
-
- ess_change (devc, 0xb8, 0x0f, 0x0f); /* Go */
- devc->intr_active = 1;
-}
-
-static void ess_audio_output_block_audio1
- (int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- int count = nr_bytes;
- sb_devc *devc = audio_devs[dev]->devc;
- short c = -nr_bytes;
-
- if (audio_devs[dev]->dmap_out->dma > 3)
- count >>= 1;
- count--;
-
- devc->irq_mode = IMODE_OUTPUT;
-
- ess_write (devc, 0xa4, (unsigned char) ((unsigned short) c & 0xff));
- ess_write (devc, 0xa5, (unsigned char) (((unsigned short) c >> 8) & 0xff));
-
- ess_change (devc, 0xb8, 0x05, 0x05); /* Go */
- devc->intr_active = 1;
-}
-
-static void ess_audio_output_block_audio2
- (int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- int count = nr_bytes;
- sb_devc *devc = audio_devs[dev]->devc;
- short c = -nr_bytes;
-
- if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1;
- count--;
-
- ess_setmixer (devc, 0x74, (unsigned char) ((unsigned short) c & 0xff));
- ess_setmixer (devc, 0x76, (unsigned char) (((unsigned short) c >> 8) & 0xff));
- ess_chgmixer (devc, 0x78, 0x03, 0x03); /* Go */
-
- devc->irq_mode_16 = IMODE_OUTPUT;
- devc->intr_active_16 = 1;
-}
-
-static void ess_audio_output_block
- (int dev, unsigned long buf, int nr_bytes, int intrflag)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (devc->duplex) {
- ess_audio_output_block_audio2 (dev, buf, nr_bytes, intrflag);
- } else {
- ess_audio_output_block_audio1 (dev, buf, nr_bytes, intrflag);
- }
-}
-
-/*
- * FKS: the if-statements for both bits and bits_16 are quite alike.
- * Combine this...
- */
-static void ess_audio_trigger(int dev, int bits)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- int bits_16 = bits & devc->irq_mode_16;
- bits &= devc->irq_mode;
-
- if (!bits && !bits_16) {
- /* FKS oh oh.... wrong?? for dma 16? */
- sb_dsp_command(devc, 0xd0); /* Halt DMA */
- }
-
- if (bits) {
- switch (devc->irq_mode)
- {
- case IMODE_INPUT:
- ess_audio_start_input(dev, devc->trg_buf, devc->trg_bytes,
- devc->trg_intrflag);
- break;
-
- case IMODE_OUTPUT:
- ess_audio_output_block(dev, devc->trg_buf, devc->trg_bytes,
- devc->trg_intrflag);
- break;
- }
- }
-
- if (bits_16) {
- switch (devc->irq_mode_16) {
- case IMODE_INPUT:
- ess_audio_start_input(dev, devc->trg_buf_16, devc->trg_bytes_16,
- devc->trg_intrflag_16);
- break;
-
- case IMODE_OUTPUT:
- ess_audio_output_block(dev, devc->trg_buf_16, devc->trg_bytes_16,
- devc->trg_intrflag_16);
- break;
- }
- }
-
- devc->trigger_bits = bits | bits_16;
-}
-
-static int ess_audio_set_speed(int dev, int speed)
-{
- sb_devc *devc = audio_devs[dev]->devc;
- int minspeed, maxspeed, dummydiv;
-
- if (speed > 0) {
- minspeed = (devc->duplex ? 6215 : 5000 );
- maxspeed = (devc->duplex ? 44100 : 48000);
- if (speed < minspeed) speed = minspeed;
- if (speed > maxspeed) speed = maxspeed;
-
- ess_common_speed (devc, &speed, &dummydiv);
-
- devc->speed = speed;
- }
- return devc->speed;
-}
-
-/*
- * FKS: This is a one-on-one copy of sb1_audio_set_bits
- */
-static unsigned int ess_audio_set_bits(int dev, unsigned int bits)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (bits != 0) {
- if (bits == AFMT_U8 || bits == AFMT_S16_LE) {
- devc->bits = bits;
- } else {
- devc->bits = AFMT_U8;
- }
- }
-
- return devc->bits;
-}
-
-/*
- * FKS: This is a one-on-one copy of sbpro_audio_set_channels
- * (*) Modified it!!
- */
-static short ess_audio_set_channels(int dev, short channels)
-{
- sb_devc *devc = audio_devs[dev]->devc;
-
- if (channels == 1 || channels == 2) devc->channels = channels;
-
- return devc->channels;
-}
-
-static struct audio_driver ess_audio_driver = /* ESS ES688/1688 */
-{
- .owner = THIS_MODULE,
- .open = sb_audio_open,
- .close = sb_audio_close,
- .output_block = ess_set_output_parms,
- .start_input = ess_set_input_parms,
- .prepare_for_input = ess_audio_prepare_for_input,
- .prepare_for_output = ess_audio_prepare_for_output,
- .halt_io = ess_audio_halt_xfer,
- .trigger = ess_audio_trigger,
- .set_speed = ess_audio_set_speed,
- .set_bits = ess_audio_set_bits,
- .set_channels = ess_audio_set_channels
-};
-
-/*
- * ess_audio_init must be called from sb_audio_init
- */
-struct audio_driver *ess_audio_init
- (sb_devc *devc, int *audio_flags, int *format_mask)
-{
- *audio_flags = DMA_AUTOMODE;
- *format_mask |= AFMT_S16_LE;
-
- if (devc->duplex) {
- int tmp_dma;
- /*
- * sb_audio_init thinks dma8 is for playback and
- * dma16 is for record. Not now! So swap them.
- */
- tmp_dma = devc->dma16;
- devc->dma16 = devc->dma8;
- devc->dma8 = tmp_dma;
-
- *audio_flags |= DMA_DUPLEX;
- }
-
- return &ess_audio_driver;
-}
-
-/****************************************************************************
- * *
- * ESS common *
- * *
- ****************************************************************************/
-static void ess_handle_channel
- (char *channel, int dev, int intr_active, unsigned char flag, int irq_mode)
-{
- if (!intr_active || !flag) return;
-#ifdef FKS_REG_LOGGING
-printk(KERN_INFO "FKS: ess_handle_channel %s irq_mode=%d\n", channel, irq_mode);
-#endif
- switch (irq_mode) {
- case IMODE_OUTPUT:
- DMAbuf_outputintr (dev, 1);
- break;
-
- case IMODE_INPUT:
- DMAbuf_inputintr (dev);
- break;
-
- case IMODE_INIT:
- break;
-
- default:;
- /* printk(KERN_WARNING "ESS: Unexpected interrupt\n"); */
- }
-}
-
-/*
- * FKS: TODO!!! Finish this!
- *
- * I think midi stuff uses uart401, without interrupts.
- * So IMODE_MIDI isn't a value for devc->irq_mode.
- */
-void ess_intr (sb_devc *devc)
-{
- int status;
- unsigned char src;
-
- if (devc->submodel == SUBMDL_ES1887) {
- src = ess_getmixer (devc, 0x7f) >> 4;
- } else {
- src = 0xff;
- }
-
-#ifdef FKS_REG_LOGGING
-printk(KERN_INFO "FKS: sbintr src=%x\n",(int)src);
-#endif
- ess_handle_channel
- ( "Audio 1"
- , devc->dev, devc->intr_active , src & 0x01, devc->irq_mode );
- ess_handle_channel
- ( "Audio 2"
- , devc->dev, devc->intr_active_16, src & 0x02, devc->irq_mode_16);
- /*
- * Acknowledge interrupts
- */
- if (devc->submodel == SUBMDL_ES1887 && (src & 0x02)) {
- ess_chgmixer (devc, 0x7a, 0x80, 0x00);
- }
-
- if (src & 0x01) {
- status = inb(DSP_DATA_AVAIL);
- }
-}
-
-static void ess_extended (sb_devc * devc)
-{
- /* Enable extended mode */
-
- sb_dsp_command(devc, 0xc6);
-}
-
-static int ess_write (sb_devc * devc, unsigned char reg, unsigned char data)
-{
-#ifdef FKS_REG_LOGGING
-printk(KERN_INFO "FKS: write reg %x: %x\n", reg, data);
-#endif
- /* Write a byte to an extended mode register of ES1688 */
-
- if (!sb_dsp_command(devc, reg))
- return 0;
-
- return sb_dsp_command(devc, data);
-}
-
-static int ess_read (sb_devc * devc, unsigned char reg)
-{
- /* Read a byte from an extended mode register of ES1688 */
-
- /* Read register command */
- if (!sb_dsp_command(devc, 0xc0)) return -1;
-
- if (!sb_dsp_command(devc, reg )) return -1;
-
- return sb_dsp_get_byte(devc);
-}
-
-int ess_dsp_reset(sb_devc * devc)
-{
- int loopc;
-
-#ifdef FKS_REG_LOGGING
-printk(KERN_INFO "FKS: ess_dsp_reset 1\n");
-ess_show_mixerregs (devc);
-#endif
-
- outb(3, DSP_RESET); /* Reset FIFO too */
-
- udelay(10);
- outb(0, DSP_RESET);
- udelay(30);
-
- for (loopc = 0; loopc < 1000 && !(inb(DSP_DATA_AVAIL) & 0x80); loopc++);
-
- if (inb(DSP_READ) != 0xAA) {
- DDB(printk("sb: No response to RESET\n"));
- return 0; /* Sorry */
- }
- ess_extended (devc);
-
-#ifdef FKS_LOGGING
-printk(KERN_INFO "FKS: dsp_reset 2\n");
-ess_show_mixerregs (devc);
-#endif
-
- return 1;
-}
-
-static int ess_irq_bits (int irq)
-{
- switch (irq) {
- case 2:
- case 9:
- return 0;
-
- case 5:
- return 1;
-
- case 7:
- return 2;
-
- case 10:
- return 3;
-
- default:
- printk(KERN_ERR "ESS1688: Invalid IRQ %d\n", irq);
- return -1;
- }
-}
-
-/*
- * Set IRQ configuration register for all ESS models
- */
-static int ess_common_set_irq_hw (sb_devc * devc)
-{
- int irq_bits;
-
- if ((irq_bits = ess_irq_bits (devc->irq)) == -1) return 0;
-
- if (!ess_write (devc, 0xb1, 0x50 | (irq_bits << 2))) {
- printk(KERN_ERR "ES1688: Failed to write to IRQ config register\n");
- return 0;
- }
- return 1;
-}
-
-/*
- * I wanna use modern ES1887 mixer irq handling. Funny is the
- * fact that my BIOS wants the same. But suppose someone's BIOS
- * doesn't do this!
- * This is independent of duplex. If there's a 1887 this will
- * prevent it from going into 1888 mode.
- */
-static void ess_es1887_set_irq_hw (sb_devc * devc)
-{
- int irq_bits;
-
- if ((irq_bits = ess_irq_bits (devc->irq)) == -1) return;
-
- ess_chgmixer (devc, 0x7f, 0x0f, 0x01 | ((irq_bits + 1) << 1));
-}
-
-static int ess_set_irq_hw (sb_devc * devc)
-{
- if (devc->submodel == SUBMDL_ES1887) ess_es1887_set_irq_hw (devc);
-
- return ess_common_set_irq_hw (devc);
-}
-
-#ifdef FKS_TEST
-
-/*
- * FKS_test:
- * for ES1887: 00, 18, non wr bits: 0001 1000
- * for ES1868: 00, b8, non wr bits: 1011 1000
- * for ES1888: 00, f8, non wr bits: 1111 1000
- * for ES1688: 00, f8, non wr bits: 1111 1000
- * + ES968
- */
-
-static void FKS_test (sb_devc * devc)
-{
- int val1, val2;
- val1 = ess_getmixer (devc, 0x64);
- ess_setmixer (devc, 0x64, ~val1);
- val2 = ess_getmixer (devc, 0x64) ^ ~val1;
- ess_setmixer (devc, 0x64, val1);
- val1 ^= ess_getmixer (devc, 0x64);
-printk (KERN_INFO "FKS: FKS_test %02x, %02x\n", (val1 & 0x0ff), (val2 & 0x0ff));
-};
-#endif
-
-static unsigned int ess_identify (sb_devc * devc)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock, flags);
- outb(((unsigned char) (0x40 & 0xff)), MIXER_ADDR);
-
- udelay(20);
- val = inb(MIXER_DATA) << 8;
- udelay(20);
- val |= inb(MIXER_DATA);
- udelay(20);
- spin_unlock_irqrestore(&devc->lock, flags);
-
- return val;
-}
-
-/*
- * ESS technology describes a detection scheme in their docs. It involves
- * fiddling with the bits in certain mixer registers. ess_probe is supposed
- * to help.
- *
- * FKS: tracing shows ess_probe writes wrong value to 0x64. Bit 3 reads 1, but
- * should be written 0 only. Check this.
- */
-static int ess_probe (sb_devc * devc, int reg, int xorval)
-{
- int val1, val2, val3;
-
- val1 = ess_getmixer (devc, reg);
- val2 = val1 ^ xorval;
- ess_setmixer (devc, reg, val2);
- val3 = ess_getmixer (devc, reg);
- ess_setmixer (devc, reg, val1);
-
- return (val2 == val3);
-}
-
-int ess_init(sb_devc * devc, struct address_info *hw_config)
-{
- unsigned char cfg;
- int ess_major = 0, ess_minor = 0;
- int i;
- static char name[100], modelname[10];
-
- /*
- * Try to detect ESS chips.
- */
-
- sb_dsp_command(devc, 0xe7); /* Return identification */
-
- for (i = 1000; i; i--) {
- if (inb(DSP_DATA_AVAIL) & 0x80) {
- if (ess_major == 0) {
- ess_major = inb(DSP_READ);
- } else {
- ess_minor = inb(DSP_READ);
- break;
- }
- }
- }
-
- if (ess_major == 0) return 0;
-
- if (ess_major == 0x48 && (ess_minor & 0xf0) == 0x80) {
- sprintf(name, "ESS ES488 AudioDrive (rev %d)",
- ess_minor & 0x0f);
- hw_config->name = name;
- devc->model = MDL_SBPRO;
- return 1;
- }
-
- /*
- * This the detection heuristic of ESS technology, though somewhat
- * changed to actually make it work.
- * This results in the following detection steps:
- * - distinct between ES688 and ES1688+ (as always done in this driver)
- * if ES688 we're ready
- * - try to detect ES1868, ES1869 or ES1878 (ess_identify)
- * if successful we're ready
- * - try to detect ES1888, ES1887 or ES1788 (aim: detect ES1887)
- * if successful we're ready
- * - Dunno. Must be 1688. Will do in general
- *
- * This is the most BETA part of the software: Will the detection
- * always work?
- */
- devc->model = MDL_ESS;
- devc->submodel = ess_minor & 0x0f;
-
- if (ess_major == 0x68 && (ess_minor & 0xf0) == 0x80) {
- char *chip = NULL;
- int submodel = -1;
-
- switch (devc->sbmo.esstype) {
- case ESSTYPE_DETECT:
- case ESSTYPE_LIKE20:
- break;
- case 688:
- submodel = 0x00;
- break;
- case 1688:
- submodel = 0x08;
- break;
- case 1868:
- submodel = SUBMDL_ES1868;
- break;
- case 1869:
- submodel = SUBMDL_ES1869;
- break;
- case 1788:
- submodel = SUBMDL_ES1788;
- break;
- case 1878:
- submodel = SUBMDL_ES1878;
- break;
- case 1879:
- submodel = SUBMDL_ES1879;
- break;
- case 1887:
- submodel = SUBMDL_ES1887;
- break;
- case 1888:
- submodel = SUBMDL_ES1888;
- break;
- default:
- printk (KERN_ERR "Invalid esstype=%d specified\n", devc->sbmo.esstype);
- return 0;
- }
- if (submodel != -1) {
- devc->submodel = submodel;
- sprintf (modelname, "ES%d", devc->sbmo.esstype);
- chip = modelname;
- }
- if (chip == NULL && (ess_minor & 0x0f) < 8) {
- chip = "ES688";
- }
-#ifdef FKS_TEST
-FKS_test (devc);
-#endif
- /*
- * If Nothing detected yet, and we want 2.0 behaviour...
- * Then let's assume it's ES1688.
- */
- if (chip == NULL && devc->sbmo.esstype == ESSTYPE_LIKE20) {
- chip = "ES1688";
- }
-
- if (chip == NULL) {
- int type;
-
- type = ess_identify (devc);
-
- switch (type) {
- case 0x1868:
- chip = "ES1868";
- devc->submodel = SUBMDL_ES1868;
- break;
- case 0x1869:
- chip = "ES1869";
- devc->submodel = SUBMDL_ES1869;
- break;
- case 0x1878:
- chip = "ES1878";
- devc->submodel = SUBMDL_ES1878;
- break;
- case 0x1879:
- chip = "ES1879";
- devc->submodel = SUBMDL_ES1879;
- break;
- default:
- if ((type & 0x00ff) != ((type >> 8) & 0x00ff)) {
- printk ("ess_init: Unrecognized %04x\n", type);
- }
- }
- }
-#if 0
- /*
- * this one failed:
- * the probing of bit 4 is another thought: from ES1788 and up, all
- * chips seem to have hardware volume control. Bit 4 is readonly to
- * check if a hardware volume interrupt has fired.
- * Cause ES688/ES1688 don't have this feature, bit 4 might be writeable
- * for these chips.
- */
- if (chip == NULL && !ess_probe(devc, 0x64, (1 << 4))) {
-#endif
- /*
- * the probing of bit 2 is my idea. The ES1887 docs want me to probe
- * bit 3. This results in ES1688 being detected as ES1788.
- * Bit 2 is for "Enable HWV IRQE", but as ES(1)688 chips don't have
- * HardWare Volume, I think they don't have this IRQE.
- */
- if (chip == NULL && ess_probe(devc, 0x64, (1 << 2))) {
- if (ess_probe (devc, 0x70, 0x7f)) {
- if (ess_probe (devc, 0x64, (1 << 5))) {
- chip = "ES1887";
- devc->submodel = SUBMDL_ES1887;
- } else {
- chip = "ES1888";
- devc->submodel = SUBMDL_ES1888;
- }
- } else {
- chip = "ES1788";
- devc->submodel = SUBMDL_ES1788;
- }
- }
- if (chip == NULL) {
- chip = "ES1688";
- }
-
- printk(KERN_INFO "ESS chip %s %s%s\n", chip,
- (devc->sbmo.esstype == ESSTYPE_DETECT ||
- devc->sbmo.esstype == ESSTYPE_LIKE20) ?
- "detected" : "specified",
- devc->sbmo.esstype == ESSTYPE_LIKE20 ?
- " (kernel 2.0 compatible)" : "");
-
- sprintf(name,"ESS %s AudioDrive (rev %d)", chip, ess_minor & 0x0f);
- } else {
- strcpy(name, "Jazz16");
- }
-
- /* AAS: info stolen from ALSA: these boards have different clocks */
- switch(devc->submodel) {
-/* APPARENTLY NOT 1869 AND 1887
- case SUBMDL_ES1869:
- case SUBMDL_ES1887:
-*/
- case SUBMDL_ES1888:
- devc->caps |= SB_CAP_ES18XX_RATE;
- break;
- }
-
- hw_config->name = name;
- /* FKS: sb_dsp_reset to enable extended mode???? */
- sb_dsp_reset(devc); /* Turn on extended mode */
-
- /*
- * Enable joystick and OPL3
- */
- cfg = ess_getmixer (devc, 0x40);
- ess_setmixer (devc, 0x40, cfg | 0x03);
- if (devc->submodel >= 8) { /* ES1688 */
- devc->caps |= SB_NO_MIDI; /* ES1688 uses MPU401 MIDI mode */
- }
- sb_dsp_reset (devc);
-
- /*
- * This is important! If it's not done, the IRQ probe in sb_dsp_init
- * may fail.
- */
- return ess_set_irq_hw (devc);
-}
-
-static int ess_set_dma_hw(sb_devc * devc)
-{
- unsigned char cfg, dma_bits = 0, dma16_bits;
- int dma;
-
-#ifdef FKS_LOGGING
-printk(KERN_INFO "ess_set_dma_hw: dma8=%d,dma16=%d,dup=%d\n"
-, devc->dma8, devc->dma16, devc->duplex);
-#endif
-
- /*
- * FKS: It seems as if this duplex flag isn't set yet. Check it.
- */
- dma = devc->dma8;
-
- if (dma > 3 || dma < 0 || dma == 2) {
- dma_bits = 0;
- printk(KERN_ERR "ESS1688: Invalid DMA8 %d\n", dma);
- return 0;
- } else {
- /* Extended mode DMA enable */
- cfg = 0x50;
-
- if (dma == 3) {
- dma_bits = 3;
- } else {
- dma_bits = dma + 1;
- }
- }
-
- if (!ess_write (devc, 0xb2, cfg | (dma_bits << 2))) {
- printk(KERN_ERR "ESS1688: Failed to write to DMA config register\n");
- return 0;
- }
-
- if (devc->duplex) {
- dma = devc->dma16;
- dma16_bits = 0;
-
- if (dma >= 0) {
- switch (dma) {
- case 0:
- dma_bits = 0x04;
- break;
- case 1:
- dma_bits = 0x05;
- break;
- case 3:
- dma_bits = 0x06;
- break;
- case 5:
- dma_bits = 0x07;
- dma16_bits = 0x20;
- break;
- default:
- printk(KERN_ERR "ESS1887: Invalid DMA16 %d\n", dma);
- return 0;
- }
- ess_chgmixer (devc, 0x78, 0x20, dma16_bits);
- ess_chgmixer (devc, 0x7d, 0x07, dma_bits);
- }
- }
- return 1;
-}
-
-/*
- * This one is called from sb_dsp_init.
- *
- * Return values:
- * 0: Failed
- * 1: Succeeded or doesn't apply (not SUBMDL_ES1887)
- */
-int ess_dsp_init (sb_devc *devc, struct address_info *hw_config)
-{
- /*
- * Caller also checks this, but anyway
- */
- if (devc->model != MDL_ESS) {
- printk (KERN_INFO "ess_dsp_init for non ESS chip\n");
- return 1;
- }
- /*
- * This for ES1887 to run Full Duplex. Actually ES1888
- * is allowed to do so too. I have no idea yet if this
- * will work for ES1888 however.
- *
- * For SB16 having both dma8 and dma16 means enable
- * Full Duplex. Let's try this for ES1887 too
- *
- */
- if (devc->submodel == SUBMDL_ES1887) {
- if (hw_config->dma2 != -1) {
- devc->dma16 = hw_config->dma2;
- }
- /*
- * devc->duplex initialization is put here, cause
- * ess_set_dma_hw needs it.
- */
- if (devc->dma8 != devc->dma16 && devc->dma16 != -1) {
- devc->duplex = 1;
- }
- }
- if (!ess_set_dma_hw (devc)) {
- free_irq(devc->irq, devc);
- return 0;
- }
- return 1;
-}
-
-/****************************************************************************
- * *
- * ESS mixer *
- * *
- ****************************************************************************/
-
-#define ES688_RECORDING_DEVICES \
- ( SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD )
-#define ES688_MIXER_DEVICES \
- ( SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE \
- | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_VOLUME \
- | SOUND_MASK_LINE2 | SOUND_MASK_SPEAKER )
-
-#define ES1688_RECORDING_DEVICES \
- ( ES688_RECORDING_DEVICES )
-#define ES1688_MIXER_DEVICES \
- ( ES688_MIXER_DEVICES | SOUND_MASK_RECLEV )
-
-#define ES1887_RECORDING_DEVICES \
- ( ES1688_RECORDING_DEVICES | SOUND_MASK_LINE2 | SOUND_MASK_SYNTH)
-#define ES1887_MIXER_DEVICES \
- ( ES1688_MIXER_DEVICES )
-
-/*
- * Mixer registers of ES1887
- *
- * These registers specifically take care of recording levels. To make the
- * mapping from playback devices to recording devices every recording
- * devices = playback device + ES_REC_MIXER_RECDIFF
- */
-#define ES_REC_MIXER_RECBASE (SOUND_MIXER_LINE3 + 1)
-#define ES_REC_MIXER_RECDIFF (ES_REC_MIXER_RECBASE - SOUND_MIXER_SYNTH)
-
-#define ES_REC_MIXER_RECSYNTH (SOUND_MIXER_SYNTH + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECPCM (SOUND_MIXER_PCM + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECSPEAKER (SOUND_MIXER_SPEAKER + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECLINE (SOUND_MIXER_LINE + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECMIC (SOUND_MIXER_MIC + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECCD (SOUND_MIXER_CD + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECIMIX (SOUND_MIXER_IMIX + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECALTPCM (SOUND_MIXER_ALTPCM + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECRECLEV (SOUND_MIXER_RECLEV + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECIGAIN (SOUND_MIXER_IGAIN + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECOGAIN (SOUND_MIXER_OGAIN + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECLINE1 (SOUND_MIXER_LINE1 + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECLINE2 (SOUND_MIXER_LINE2 + ES_REC_MIXER_RECDIFF)
-#define ES_REC_MIXER_RECLINE3 (SOUND_MIXER_LINE3 + ES_REC_MIXER_RECDIFF)
-
-static mixer_tab es688_mix = {
-MIX_ENT(SOUND_MIXER_VOLUME, 0x32, 7, 4, 0x32, 3, 4),
-MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
-MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
-MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
-MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
-MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
-MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0)
-};
-
-/*
- * The ES1688 specifics... hopefully correct...
- * - 6 bit master volume
- * I was wrong, ES1888 docs say ES1688 didn't have it.
- * - RECLEV control
- * These may apply to ES688 too. I have no idea.
- */
-static mixer_tab es1688_mix = {
-MIX_ENT(SOUND_MIXER_VOLUME, 0x32, 7, 4, 0x32, 3, 4),
-MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
-MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
-MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
-MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
-MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
-MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
-MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0)
-};
-
-static mixer_tab es1688later_mix = {
-MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6),
-MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
-MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
-MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
-MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
-MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
-MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
-MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0)
-};
-
-/*
- * This one is for all ESS chips with a record mixer.
- * It's not used (yet) however
- */
-static mixer_tab es_rec_mix = {
-MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6),
-MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
-MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
-MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
-MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
-MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
-MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
-MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECSYNTH, 0x6b, 7, 4, 0x6b, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECSPEAKER, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECLINE, 0x6e, 7, 4, 0x6e, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECMIC, 0x68, 7, 4, 0x68, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECCD, 0x6a, 7, 4, 0x6a, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECIMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECRECLEV, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECIGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECOGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECLINE1, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECLINE2, 0x6c, 7, 4, 0x6c, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECLINE3, 0x00, 0, 0, 0x00, 0, 0)
-};
-
-/*
- * This one is for ES1887. It's little different from es_rec_mix: it
- * has 0x7c for PCM playback level. This is because ES1887 uses
- * Audio 2 for playback.
- */
-static mixer_tab es1887_mix = {
-MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6),
-MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
-MIX_ENT(SOUND_MIXER_PCM, 0x7c, 7, 4, 0x7c, 3, 4),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
-MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
-MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
-MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
-MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
-MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECSYNTH, 0x6b, 7, 4, 0x6b, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECSPEAKER, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECLINE, 0x6e, 7, 4, 0x6e, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECMIC, 0x68, 7, 4, 0x68, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECCD, 0x6a, 7, 4, 0x6a, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECIMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECRECLEV, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECIGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECOGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECLINE1, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(ES_REC_MIXER_RECLINE2, 0x6c, 7, 4, 0x6c, 3, 4),
-MIX_ENT(ES_REC_MIXER_RECLINE3, 0x00, 0, 0, 0x00, 0, 0)
-};
-
-static int ess_has_rec_mixer (int submodel)
-{
- switch (submodel) {
- case SUBMDL_ES1887:
- return 1;
- default:
- return 0;
- }
-};
-
-#ifdef FKS_LOGGING
-static int ess_mixer_mon_regs[]
- = { 0x70, 0x71, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7d, 0x7f
- , 0xa1, 0xa2, 0xa4, 0xa5, 0xa8, 0xa9
- , 0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb9
- , 0x00};
-
-static void ess_show_mixerregs (sb_devc *devc)
-{
- int *mp = ess_mixer_mon_regs;
-
-return;
-
- while (*mp != 0) {
- printk (KERN_INFO "res (%x)=%x\n", *mp, (int)(ess_getmixer (devc, *mp)));
- mp++;
- }
-}
-#endif
-
-void ess_setmixer (sb_devc * devc, unsigned int port, unsigned int value)
-{
- unsigned long flags;
-
-#ifdef FKS_LOGGING
-printk(KERN_INFO "FKS: write mixer %x: %x\n", port, value);
-#endif
-
- spin_lock_irqsave(&devc->lock, flags);
- if (port >= 0xa0) {
- ess_write (devc, port, value);
- } else {
- outb(((unsigned char) (port & 0xff)), MIXER_ADDR);
-
- udelay(20);
- outb(((unsigned char) (value & 0xff)), MIXER_DATA);
- udelay(20);
- }
- spin_unlock_irqrestore(&devc->lock, flags);
-}
-
-unsigned int ess_getmixer (sb_devc * devc, unsigned int port)
-{
- unsigned int val;
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock, flags);
-
- if (port >= 0xa0) {
- val = ess_read (devc, port);
- } else {
- outb(((unsigned char) (port & 0xff)), MIXER_ADDR);
-
- udelay(20);
- val = inb(MIXER_DATA);
- udelay(20);
- }
- spin_unlock_irqrestore(&devc->lock, flags);
-
- return val;
-}
-
-static void ess_chgmixer
- (sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val)
-{
- int value;
-
- value = ess_getmixer (devc, reg);
- value = (value & ~mask) | (val & mask);
- ess_setmixer (devc, reg, value);
-}
-
-/*
- * ess_mixer_init must be called from sb_mixer_init
- */
-void ess_mixer_init (sb_devc * devc)
-{
- devc->mixer_caps = SOUND_CAP_EXCL_INPUT;
-
- /*
- * Take care of ES1887 specifics...
- */
- switch (devc->submodel) {
- case SUBMDL_ES1887:
- devc->supported_devices = ES1887_MIXER_DEVICES;
- devc->supported_rec_devices = ES1887_RECORDING_DEVICES;
-#ifdef FKS_LOGGING
-printk (KERN_INFO "FKS: ess_mixer_init dup = %d\n", devc->duplex);
-#endif
- if (devc->duplex) {
- devc->iomap = &es1887_mix;
- devc->iomap_sz = ARRAY_SIZE(es1887_mix);
- } else {
- devc->iomap = &es_rec_mix;
- devc->iomap_sz = ARRAY_SIZE(es_rec_mix);
- }
- break;
- default:
- if (devc->submodel < 8) {
- devc->supported_devices = ES688_MIXER_DEVICES;
- devc->supported_rec_devices = ES688_RECORDING_DEVICES;
- devc->iomap = &es688_mix;
- devc->iomap_sz = ARRAY_SIZE(es688_mix);
- } else {
- /*
- * es1688 has 4 bits master vol.
- * later chips have 6 bits (?)
- */
- devc->supported_devices = ES1688_MIXER_DEVICES;
- devc->supported_rec_devices = ES1688_RECORDING_DEVICES;
- if (devc->submodel < 0x10) {
- devc->iomap = &es1688_mix;
- devc->iomap_sz = ARRAY_SIZE(es688_mix);
- } else {
- devc->iomap = &es1688later_mix;
- devc->iomap_sz = ARRAY_SIZE(es1688later_mix);
- }
- }
- }
-}
-
-/*
- * Changing playback levels at an ESS chip with record mixer means having to
- * take care of recording levels of recorded inputs (devc->recmask) too!
- */
-int ess_mixer_set(sb_devc *devc, int dev, int left, int right)
-{
- if (ess_has_rec_mixer (devc->submodel) && (devc->recmask & (1 << dev))) {
- sb_common_mixer_set (devc, dev + ES_REC_MIXER_RECDIFF, left, right);
- }
- return sb_common_mixer_set (devc, dev, left, right);
-}
-
-/*
- * After a sb_dsp_reset extended register 0xb4 (RECLEV) is reset too. After
- * sb_dsp_reset RECLEV has to be restored. This is where ess_mixer_reload
- * helps.
- */
-void ess_mixer_reload (sb_devc *devc, int dev)
-{
- int left, right, value;
-
- value = devc->levels[dev];
- left = value & 0x000000ff;
- right = (value & 0x0000ff00) >> 8;
-
- sb_common_mixer_set(devc, dev, left, right);
-}
-
-static int es_rec_set_recmask(sb_devc * devc, int mask)
-{
- int i, i_mask, cur_mask, diff_mask;
- int value, left, right;
-
-#ifdef FKS_LOGGING
-printk (KERN_INFO "FKS: es_rec_set_recmask mask = %x\n", mask);
-#endif
- /*
- * Changing the recmask on an ESS chip with recording mixer means:
- * (1) Find the differences
- * (2) For "turned-on" inputs: make the recording level the playback level
- * (3) For "turned-off" inputs: make the recording level zero
- */
- cur_mask = devc->recmask;
- diff_mask = (cur_mask ^ mask);
-
- for (i = 0; i < 32; i++) {
- i_mask = (1 << i);
- if (diff_mask & i_mask) { /* Difference? (1) */
- if (mask & i_mask) { /* Turn it on (2) */
- value = devc->levels[i];
- left = value & 0x000000ff;
- right = (value & 0x0000ff00) >> 8;
- } else { /* Turn it off (3) */
- left = 0;
- right = 0;
- }
- sb_common_mixer_set(devc, i + ES_REC_MIXER_RECDIFF, left, right);
- }
- }
- return mask;
-}
-
-int ess_set_recmask(sb_devc * devc, int *mask)
-{
- /* This applies to ESS chips with record mixers only! */
-
- if (ess_has_rec_mixer (devc->submodel)) {
- *mask = es_rec_set_recmask (devc, *mask);
- return 1; /* Applied */
- } else {
- return 0; /* Not applied */
- }
-}
-
-/*
- * ess_mixer_reset must be called from sb_mixer_reset
- */
-int ess_mixer_reset (sb_devc * devc)
-{
- /*
- * Separate actions for ESS chips with a record mixer:
- */
- if (ess_has_rec_mixer (devc->submodel)) {
- switch (devc->submodel) {
- case SUBMDL_ES1887:
- /*
- * Separate actions for ES1887:
- * Change registers 7a and 1c to make the record mixer the
- * actual recording source.
- */
- ess_chgmixer(devc, 0x7a, 0x18, 0x08);
- ess_chgmixer(devc, 0x1c, 0x07, 0x07);
- break;
- }
- /*
- * Call set_recmask for proper initialization
- */
- devc->recmask = devc->supported_rec_devices;
- es_rec_set_recmask(devc, 0);
- devc->recmask = 0;
-
- return 1; /* We took care of recmask. */
- } else {
- return 0; /* We didn't take care; caller do it */
- }
-}
-
-/****************************************************************************
- * *
- * ESS midi *
- * *
- ****************************************************************************/
-
-/*
- * FKS: IRQ may be shared. Hm. And if so? Then What?
- */
-int ess_midi_init(sb_devc * devc, struct address_info *hw_config)
-{
- unsigned char cfg, tmp;
-
- cfg = ess_getmixer (devc, 0x40) & 0x03;
-
- if (devc->submodel < 8) {
- ess_setmixer (devc, 0x40, cfg | 0x03); /* Enable OPL3 & joystick */
- return 0; /* ES688 doesn't support MPU401 mode */
- }
- tmp = (hw_config->io_base & 0x0f0) >> 4;
-
- if (tmp > 3) {
- ess_setmixer (devc, 0x40, cfg);
- return 0;
- }
- cfg |= tmp << 3;
-
- tmp = 1; /* MPU enabled without interrupts */
-
- /* May be shared: if so the value is -ve */
-
- switch (abs(hw_config->irq)) {
- case 9:
- tmp = 0x4;
- break;
- case 5:
- tmp = 0x5;
- break;
- case 7:
- tmp = 0x6;
- break;
- case 10:
- tmp = 0x7;
- break;
- default:
- return 0;
- }
-
- cfg |= tmp << 5;
- ess_setmixer (devc, 0x40, cfg | 0x03);
-
- return 1;
-}
-
diff --git a/sound/oss/sb_ess.h b/sound/oss/sb_ess.h
deleted file mode 100644
index 1c741212bcfc..000000000000
--- a/sound/oss/sb_ess.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Created: 9-Jan-1999 Rolf Fokkens
- */
-
-extern void ess_intr
- (sb_devc *devc);
-extern int ess_dsp_init
- (sb_devc *devc, struct address_info *hw_config);
-
-extern struct audio_driver *ess_audio_init
- (sb_devc *devc, int *audio_flags, int *format_mask);
-extern int ess_midi_init
- (sb_devc *devc, struct address_info *hw_config);
-extern void ess_mixer_init
- (sb_devc *devc);
-
-extern int ess_init
- (sb_devc *devc, struct address_info *hw_config);
-extern int ess_dsp_reset
- (sb_devc *devc);
-
-extern void ess_setmixer
- (sb_devc *devc, unsigned int port, unsigned int value);
-extern unsigned int ess_getmixer
- (sb_devc *devc, unsigned int port);
-extern int ess_mixer_set
- (sb_devc *devc, int dev, int left, int right);
-extern int ess_mixer_reset
- (sb_devc *devc);
-extern void ess_mixer_reload
- (sb_devc * devc, int dev);
-extern int ess_set_recmask
- (sb_devc *devc, int *mask);
-
diff --git a/sound/oss/sb_midi.c b/sound/oss/sb_midi.c
deleted file mode 100644
index 551ee7557b4e..000000000000
--- a/sound/oss/sb_midi.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * sound/oss/sb_midi.c
- *
- * The low level driver for the Sound Blaster DS chips.
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-
-#include "sound_config.h"
-
-#include "sb.h"
-#undef SB_TEST_IRQ
-
-/*
- * The DSP channel can be used either for input or output. Variable
- * 'sb_irq_mode' will be set when the program calls read or write first time
- * after open. Current version doesn't support mode changes without closing
- * and reopening the device. Support for this feature may be implemented in a
- * future version of this driver.
- */
-
-
-static int sb_midi_open(int dev, int mode,
- void (*input) (int dev, unsigned char data),
- void (*output) (int dev)
-)
-{
- sb_devc *devc = midi_devs[dev]->devc;
- unsigned long flags;
-
- if (devc == NULL)
- return -ENXIO;
-
- spin_lock_irqsave(&devc->lock, flags);
- if (devc->opened)
- {
- spin_unlock_irqrestore(&devc->lock, flags);
- return -EBUSY;
- }
- devc->opened = 1;
- spin_unlock_irqrestore(&devc->lock, flags);
-
- devc->irq_mode = IMODE_MIDI;
- devc->midi_broken = 0;
-
- sb_dsp_reset(devc);
-
- if (!sb_dsp_command(devc, 0x35)) /* Start MIDI UART mode */
- {
- devc->opened = 0;
- return -EIO;
- }
- devc->intr_active = 1;
-
- if (mode & OPEN_READ)
- {
- devc->input_opened = 1;
- devc->midi_input_intr = input;
- }
- return 0;
-}
-
-static void sb_midi_close(int dev)
-{
- sb_devc *devc = midi_devs[dev]->devc;
- unsigned long flags;
-
- if (devc == NULL)
- return;
-
- spin_lock_irqsave(&devc->lock, flags);
- sb_dsp_reset(devc);
- devc->intr_active = 0;
- devc->input_opened = 0;
- devc->opened = 0;
- spin_unlock_irqrestore(&devc->lock, flags);
-}
-
-static int sb_midi_out(int dev, unsigned char midi_byte)
-{
- sb_devc *devc = midi_devs[dev]->devc;
-
- if (devc == NULL)
- return 1;
-
- if (devc->midi_broken)
- return 1;
-
- if (!sb_dsp_command(devc, midi_byte))
- {
- devc->midi_broken = 1;
- return 1;
- }
- return 1;
-}
-
-static int sb_midi_start_read(int dev)
-{
- return 0;
-}
-
-static int sb_midi_end_read(int dev)
-{
- sb_devc *devc = midi_devs[dev]->devc;
-
- if (devc == NULL)
- return -ENXIO;
-
- sb_dsp_reset(devc);
- devc->intr_active = 0;
- return 0;
-}
-
-static int sb_midi_ioctl(int dev, unsigned cmd, void __user *arg)
-{
- return -EINVAL;
-}
-
-void sb_midi_interrupt(sb_devc * devc)
-{
- unsigned long flags;
- unsigned char data;
-
- if (devc == NULL)
- return;
-
- spin_lock_irqsave(&devc->lock, flags);
-
- data = inb(DSP_READ);
- if (devc->input_opened)
- devc->midi_input_intr(devc->my_mididev, data);
-
- spin_unlock_irqrestore(&devc->lock, flags);
-}
-
-#define MIDI_SYNTH_NAME "Sound Blaster Midi"
-#define MIDI_SYNTH_CAPS 0
-#include "midi_synth.h"
-
-static struct midi_operations sb_midi_operations =
-{
- .owner = THIS_MODULE,
- .info = {"Sound Blaster", 0, 0, SNDCARD_SB},
- .converter = &std_midi_synth,
- .in_info = {0},
- .open = sb_midi_open,
- .close = sb_midi_close,
- .ioctl = sb_midi_ioctl,
- .outputc = sb_midi_out,
- .start_read = sb_midi_start_read,
- .end_read = sb_midi_end_read,
-};
-
-void sb_dsp_midi_init(sb_devc * devc, struct module *owner)
-{
- int dev;
-
- if (devc->model < 2) /* No MIDI support for SB 1.x */
- return;
-
- dev = sound_alloc_mididev();
-
- if (dev == -1)
- {
- printk(KERN_ERR "sb_midi: too many MIDI devices detected\n");
- return;
- }
- std_midi_synth.midi_dev = devc->my_mididev = dev;
- midi_devs[dev] = kmalloc(sizeof(struct midi_operations), GFP_KERNEL);
- if (midi_devs[dev] == NULL)
- {
- printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n");
- sound_unload_mididev(dev);
- return;
- }
- memcpy((char *) midi_devs[dev], (char *) &sb_midi_operations,
- sizeof(struct midi_operations));
-
- if (owner)
- midi_devs[dev]->owner = owner;
-
- midi_devs[dev]->devc = devc;
-
-
- midi_devs[dev]->converter = kmalloc(sizeof(struct synth_operations), GFP_KERNEL);
- if (midi_devs[dev]->converter == NULL)
- {
- printk(KERN_WARNING "Sound Blaster: failed to allocate MIDI memory.\n");
- kfree(midi_devs[dev]);
- sound_unload_mididev(dev);
- return;
- }
- memcpy((char *) midi_devs[dev]->converter, (char *) &std_midi_synth,
- sizeof(struct synth_operations));
-
- midi_devs[dev]->converter->id = "SBMIDI";
- sequencer_init();
-}
diff --git a/sound/oss/sb_mixer.c b/sound/oss/sb_mixer.c
deleted file mode 100644
index acf7586aeb47..000000000000
--- a/sound/oss/sb_mixer.c
+++ /dev/null
@@ -1,770 +0,0 @@
-/*
- * sound/oss/sb_mixer.c
- *
- * The low level mixer driver for the Sound Blaster compatible cards.
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * Rolf Fokkens (Dec 20 1998) : Moved ESS stuff into sb_ess.[ch]
- * Stanislav Voronyi <stas@esc.kharkov.com> : Support for AWE 3DSE device (Jun 7 1999)
- */
-
-#include <linux/slab.h>
-
-#include "sound_config.h"
-
-#define __SB_MIXER_C__
-
-#include "sb.h"
-#include "sb_mixer.h"
-
-#include "sb_ess.h"
-
-#define SBPRO_RECORDING_DEVICES (SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD)
-
-/* Same as SB Pro, unless I find otherwise */
-#define SGNXPRO_RECORDING_DEVICES SBPRO_RECORDING_DEVICES
-
-#define SBPRO_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | \
- SOUND_MASK_CD | SOUND_MASK_VOLUME)
-
-/* SG NX Pro has treble and bass settings on the mixer. The 'speaker'
- * channel is the COVOX/DisneySoundSource emulation volume control
- * on the mixer. It does NOT control speaker volume. Should have own
- * mask eventually?
- */
-#define SGNXPRO_MIXER_DEVICES (SBPRO_MIXER_DEVICES|SOUND_MASK_BASS| \
- SOUND_MASK_TREBLE|SOUND_MASK_SPEAKER )
-
-#define SB16_RECORDING_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_LINE | SOUND_MASK_MIC | \
- SOUND_MASK_CD)
-
-#define SB16_OUTFILTER_DEVICES (SOUND_MASK_LINE | SOUND_MASK_MIC | \
- SOUND_MASK_CD)
-
-#define SB16_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | \
- SOUND_MASK_CD | \
- SOUND_MASK_IGAIN | SOUND_MASK_OGAIN | \
- SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | \
- SOUND_MASK_IMIX)
-
-/* These are the only devices that are working at the moment. Others could
- * be added once they are identified and a method is found to control them.
- */
-#define ALS007_MIXER_DEVICES (SOUND_MASK_SYNTH | SOUND_MASK_LINE | \
- SOUND_MASK_PCM | SOUND_MASK_MIC | \
- SOUND_MASK_CD | \
- SOUND_MASK_VOLUME)
-
-static mixer_tab sbpro_mix = {
-MIX_ENT(SOUND_MIXER_VOLUME, 0x22, 7, 4, 0x22, 3, 4),
-MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x26, 7, 4, 0x26, 3, 4),
-MIX_ENT(SOUND_MIXER_PCM, 0x04, 7, 4, 0x04, 3, 4),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x2e, 7, 4, 0x2e, 3, 4),
-MIX_ENT(SOUND_MIXER_MIC, 0x0a, 2, 3, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_CD, 0x28, 7, 4, 0x28, 3, 4),
-MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0)
-};
-
-static mixer_tab sb16_mix = {
-MIX_ENT(SOUND_MIXER_VOLUME, 0x30, 7, 5, 0x31, 7, 5),
-MIX_ENT(SOUND_MIXER_BASS, 0x46, 7, 4, 0x47, 7, 4),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x44, 7, 4, 0x45, 7, 4),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x34, 7, 5, 0x35, 7, 5),
-MIX_ENT(SOUND_MIXER_PCM, 0x32, 7, 5, 0x33, 7, 5),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x3b, 7, 2, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x38, 7, 5, 0x39, 7, 5),
-MIX_ENT(SOUND_MIXER_MIC, 0x3a, 7, 5, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_CD, 0x36, 7, 5, 0x37, 7, 5),
-MIX_ENT(SOUND_MIXER_IMIX, 0x3c, 0, 1, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0x3f, 7, 2, 0x40, 7, 2), /* Obsolete. Use IGAIN */
-MIX_ENT(SOUND_MIXER_IGAIN, 0x3f, 7, 2, 0x40, 7, 2),
-MIX_ENT(SOUND_MIXER_OGAIN, 0x41, 7, 2, 0x42, 7, 2)
-};
-
-static mixer_tab als007_mix =
-{
-MIX_ENT(SOUND_MIXER_VOLUME, 0x62, 7, 4, 0x62, 3, 4),
-MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_SYNTH, 0x66, 7, 4, 0x66, 3, 4),
-MIX_ENT(SOUND_MIXER_PCM, 0x64, 7, 4, 0x64, 3, 4),
-MIX_ENT(SOUND_MIXER_SPEAKER, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_LINE, 0x6e, 7, 4, 0x6e, 3, 4),
-MIX_ENT(SOUND_MIXER_MIC, 0x6a, 2, 3, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_CD, 0x68, 7, 4, 0x68, 3, 4),
-MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0), /* Obsolete. Use IGAIN */
-MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
-MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0)
-};
-
-
-/* SM_GAMES Master volume is lower and PCM & FM volumes
- higher than with SB Pro. This improves the
- sound quality */
-
-static int smg_default_levels[32] =
-{
- 0x2020, /* Master Volume */
- 0x4b4b, /* Bass */
- 0x4b4b, /* Treble */
- 0x6464, /* FM */
- 0x6464, /* PCM */
- 0x4b4b, /* PC Speaker */
- 0x4b4b, /* Ext Line */
- 0x0000, /* Mic */
- 0x4b4b, /* CD */
- 0x4b4b, /* Recording monitor */
- 0x4b4b, /* SB PCM */
- 0x4b4b, /* Recording level */
- 0x4b4b, /* Input gain */
- 0x4b4b, /* Output gain */
- 0x4040, /* Line1 */
- 0x4040, /* Line2 */
- 0x1515 /* Line3 */
-};
-
-static int sb_default_levels[32] =
-{
- 0x5a5a, /* Master Volume */
- 0x4b4b, /* Bass */
- 0x4b4b, /* Treble */
- 0x4b4b, /* FM */
- 0x4b4b, /* PCM */
- 0x4b4b, /* PC Speaker */
- 0x4b4b, /* Ext Line */
- 0x1010, /* Mic */
- 0x4b4b, /* CD */
- 0x0000, /* Recording monitor */
- 0x4b4b, /* SB PCM */
- 0x4b4b, /* Recording level */
- 0x4b4b, /* Input gain */
- 0x4b4b, /* Output gain */
- 0x4040, /* Line1 */
- 0x4040, /* Line2 */
- 0x1515 /* Line3 */
-};
-
-static unsigned char sb16_recmasks_L[SOUND_MIXER_NRDEVICES] =
-{
- 0x00, /* SOUND_MIXER_VOLUME */
- 0x00, /* SOUND_MIXER_BASS */
- 0x00, /* SOUND_MIXER_TREBLE */
- 0x40, /* SOUND_MIXER_SYNTH */
- 0x00, /* SOUND_MIXER_PCM */
- 0x00, /* SOUND_MIXER_SPEAKER */
- 0x10, /* SOUND_MIXER_LINE */
- 0x01, /* SOUND_MIXER_MIC */
- 0x04, /* SOUND_MIXER_CD */
- 0x00, /* SOUND_MIXER_IMIX */
- 0x00, /* SOUND_MIXER_ALTPCM */
- 0x00, /* SOUND_MIXER_RECLEV */
- 0x00, /* SOUND_MIXER_IGAIN */
- 0x00 /* SOUND_MIXER_OGAIN */
-};
-
-static unsigned char sb16_recmasks_R[SOUND_MIXER_NRDEVICES] =
-{
- 0x00, /* SOUND_MIXER_VOLUME */
- 0x00, /* SOUND_MIXER_BASS */
- 0x00, /* SOUND_MIXER_TREBLE */
- 0x20, /* SOUND_MIXER_SYNTH */
- 0x00, /* SOUND_MIXER_PCM */
- 0x00, /* SOUND_MIXER_SPEAKER */
- 0x08, /* SOUND_MIXER_LINE */
- 0x01, /* SOUND_MIXER_MIC */
- 0x02, /* SOUND_MIXER_CD */
- 0x00, /* SOUND_MIXER_IMIX */
- 0x00, /* SOUND_MIXER_ALTPCM */
- 0x00, /* SOUND_MIXER_RECLEV */
- 0x00, /* SOUND_MIXER_IGAIN */
- 0x00 /* SOUND_MIXER_OGAIN */
-};
-
-static char smw_mix_regs[] = /* Left mixer registers */
-{
- 0x0b, /* SOUND_MIXER_VOLUME */
- 0x0d, /* SOUND_MIXER_BASS */
- 0x0d, /* SOUND_MIXER_TREBLE */
- 0x05, /* SOUND_MIXER_SYNTH */
- 0x09, /* SOUND_MIXER_PCM */
- 0x00, /* SOUND_MIXER_SPEAKER */
- 0x03, /* SOUND_MIXER_LINE */
- 0x01, /* SOUND_MIXER_MIC */
- 0x07, /* SOUND_MIXER_CD */
- 0x00, /* SOUND_MIXER_IMIX */
- 0x00, /* SOUND_MIXER_ALTPCM */
- 0x00, /* SOUND_MIXER_RECLEV */
- 0x00, /* SOUND_MIXER_IGAIN */
- 0x00, /* SOUND_MIXER_OGAIN */
- 0x00, /* SOUND_MIXER_LINE1 */
- 0x00, /* SOUND_MIXER_LINE2 */
- 0x00 /* SOUND_MIXER_LINE3 */
-};
-
-static int sbmixnum = 1;
-
-static void sb_mixer_reset(sb_devc * devc);
-
-void sb_mixer_set_stereo(sb_devc * devc, int mode)
-{
- sb_chgmixer(devc, OUT_FILTER, STEREO_DAC, (mode ? STEREO_DAC : MONO_DAC));
-}
-
-static int detect_mixer(sb_devc * devc)
-{
- /* Just trust the mixer is there */
- return 1;
-}
-
-static void oss_change_bits(sb_devc *devc, unsigned char *regval, int dev, int chn, int newval)
-{
- unsigned char mask;
- int shift;
-
- mask = (1 << (*devc->iomap)[dev][chn].nbits) - 1;
- newval = (int) ((newval * mask) + 50) / 100; /* Scale */
-
- shift = (*devc->iomap)[dev][chn].bitoffs - (*devc->iomap)[dev][LEFT_CHN].nbits + 1;
-
- *regval &= ~(mask << shift); /* Mask out previous value */
- *regval |= (newval & mask) << shift; /* Set the new value */
-}
-
-static int sb_mixer_get(sb_devc * devc, int dev)
-{
- if (!((1 << dev) & devc->supported_devices))
- return -EINVAL;
- return devc->levels[dev];
-}
-
-void smw_mixer_init(sb_devc * devc)
-{
- int i;
-
- sb_setmixer(devc, 0x00, 0x18); /* Mute unused (Telephone) line */
- sb_setmixer(devc, 0x10, 0x38); /* Config register 2 */
-
- devc->supported_devices = 0;
- for (i = 0; i < sizeof(smw_mix_regs); i++)
- if (smw_mix_regs[i] != 0)
- devc->supported_devices |= (1 << i);
-
- devc->supported_rec_devices = devc->supported_devices &
- ~(SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_PCM | SOUND_MASK_VOLUME);
- sb_mixer_reset(devc);
-}
-
-int sb_common_mixer_set(sb_devc * devc, int dev, int left, int right)
-{
- int regoffs;
- unsigned char val;
-
- if ((dev < 0) || (dev >= devc->iomap_sz))
- return -EINVAL;
-
- regoffs = (*devc->iomap)[dev][LEFT_CHN].regno;
-
- if (regoffs == 0)
- return -EINVAL;
-
- val = sb_getmixer(devc, regoffs);
- oss_change_bits(devc, &val, dev, LEFT_CHN, left);
-
- if ((*devc->iomap)[dev][RIGHT_CHN].regno != regoffs) /*
- * Change register
- */
- {
- sb_setmixer(devc, regoffs, val); /*
- * Save the old one
- */
- regoffs = (*devc->iomap)[dev][RIGHT_CHN].regno;
-
- if (regoffs == 0)
- return left | (left << 8); /*
- * Just left channel present
- */
-
- val = sb_getmixer(devc, regoffs); /*
- * Read the new one
- */
- }
- oss_change_bits(devc, &val, dev, RIGHT_CHN, right);
-
- sb_setmixer(devc, regoffs, val);
-
- return left | (right << 8);
-}
-
-static int smw_mixer_set(sb_devc * devc, int dev, int left, int right)
-{
- int reg, val;
-
- switch (dev)
- {
- case SOUND_MIXER_VOLUME:
- sb_setmixer(devc, 0x0b, 96 - (96 * left / 100)); /* 96=mute, 0=max */
- sb_setmixer(devc, 0x0c, 96 - (96 * right / 100));
- break;
-
- case SOUND_MIXER_BASS:
- case SOUND_MIXER_TREBLE:
- devc->levels[dev] = left | (right << 8);
- /* Set left bass and treble values */
- val = ((devc->levels[SOUND_MIXER_TREBLE] & 0xff) * 16 / (unsigned) 100) << 4;
- val |= ((devc->levels[SOUND_MIXER_BASS] & 0xff) * 16 / (unsigned) 100) & 0x0f;
- sb_setmixer(devc, 0x0d, val);
-
- /* Set right bass and treble values */
- val = (((devc->levels[SOUND_MIXER_TREBLE] >> 8) & 0xff) * 16 / (unsigned) 100) << 4;
- val |= (((devc->levels[SOUND_MIXER_BASS] >> 8) & 0xff) * 16 / (unsigned) 100) & 0x0f;
- sb_setmixer(devc, 0x0e, val);
-
- break;
-
- default:
- /* bounds check */
- if (dev < 0 || dev >= ARRAY_SIZE(smw_mix_regs))
- return -EINVAL;
- reg = smw_mix_regs[dev];
- if (reg == 0)
- return -EINVAL;
- sb_setmixer(devc, reg, (24 - (24 * left / 100)) | 0x20); /* 24=mute, 0=max */
- sb_setmixer(devc, reg + 1, (24 - (24 * right / 100)) | 0x40);
- }
-
- devc->levels[dev] = left | (right << 8);
- return left | (right << 8);
-}
-
-static int sb_mixer_set(sb_devc * devc, int dev, int value)
-{
- int left = value & 0x000000ff;
- int right = (value & 0x0000ff00) >> 8;
- int retval;
-
- if (left > 100)
- left = 100;
- if (right > 100)
- right = 100;
-
- if ((dev < 0) || (dev > 31))
- return -EINVAL;
-
- if (!(devc->supported_devices & (1 << dev))) /*
- * Not supported
- */
- return -EINVAL;
-
- /* Differentiate depending on the chipsets */
- switch (devc->model) {
- case MDL_SMW:
- retval = smw_mixer_set(devc, dev, left, right);
- break;
- case MDL_ESS:
- retval = ess_mixer_set(devc, dev, left, right);
- break;
- default:
- retval = sb_common_mixer_set(devc, dev, left, right);
- }
- if (retval >= 0) devc->levels[dev] = retval;
-
- return retval;
-}
-
-/*
- * set_recsrc doesn't apply to ES188x
- */
-static void set_recsrc(sb_devc * devc, int src)
-{
- sb_setmixer(devc, RECORD_SRC, (sb_getmixer(devc, RECORD_SRC) & ~7) | (src & 0x7));
-}
-
-static int set_recmask(sb_devc * devc, int mask)
-{
- int devmask, i;
- unsigned char regimageL, regimageR;
-
- devmask = mask & devc->supported_rec_devices;
-
- switch (devc->model)
- {
- case MDL_SBPRO:
- case MDL_ESS:
- case MDL_JAZZ:
- case MDL_SMW:
- if (devc->model == MDL_ESS && ess_set_recmask (devc, &devmask)) {
- break;
- }
- if (devmask != SOUND_MASK_MIC &&
- devmask != SOUND_MASK_LINE &&
- devmask != SOUND_MASK_CD)
- {
- /*
- * More than one device selected. Drop the
- * previous selection
- */
- devmask &= ~devc->recmask;
- }
- if (devmask != SOUND_MASK_MIC &&
- devmask != SOUND_MASK_LINE &&
- devmask != SOUND_MASK_CD)
- {
- /*
- * More than one device selected. Default to
- * mic
- */
- devmask = SOUND_MASK_MIC;
- }
- if (devmask ^ devc->recmask) /*
- * Input source changed
- */
- {
- switch (devmask)
- {
- case SOUND_MASK_MIC:
- set_recsrc(devc, SRC__MIC);
- break;
-
- case SOUND_MASK_LINE:
- set_recsrc(devc, SRC__LINE);
- break;
-
- case SOUND_MASK_CD:
- set_recsrc(devc, SRC__CD);
- break;
-
- default:
- set_recsrc(devc, SRC__MIC);
- }
- }
- break;
-
- case MDL_SB16:
- if (!devmask)
- devmask = SOUND_MASK_MIC;
-
- if (devc->submodel == SUBMDL_ALS007)
- {
- switch (devmask)
- {
- case SOUND_MASK_LINE:
- sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_LINE);
- break;
- case SOUND_MASK_CD:
- sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_CD);
- break;
- case SOUND_MASK_SYNTH:
- sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_SYNTH);
- break;
- default: /* Also takes care of SOUND_MASK_MIC case */
- sb_setmixer(devc, ALS007_RECORD_SRC, ALS007_MIC);
- break;
- }
- }
- else
- {
- regimageL = regimageR = 0;
- for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
- {
- if ((1 << i) & devmask)
- {
- regimageL |= sb16_recmasks_L[i];
- regimageR |= sb16_recmasks_R[i];
- }
- sb_setmixer (devc, SB16_IMASK_L, regimageL);
- sb_setmixer (devc, SB16_IMASK_R, regimageR);
- }
- }
- break;
- }
- devc->recmask = devmask;
- return devc->recmask;
-}
-
-static int set_outmask(sb_devc * devc, int mask)
-{
- int devmask, i;
- unsigned char regimage;
-
- devmask = mask & devc->supported_out_devices;
-
- switch (devc->model)
- {
- case MDL_SB16:
- if (devc->submodel == SUBMDL_ALS007)
- break;
- else
- {
- regimage = 0;
- for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
- {
- if ((1 << i) & devmask)
- {
- regimage |= (sb16_recmasks_L[i] | sb16_recmasks_R[i]);
- }
- sb_setmixer (devc, SB16_OMASK, regimage);
- }
- }
- break;
- default:
- break;
- }
-
- devc->outmask = devmask;
- return devc->outmask;
-}
-
-static int sb_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- sb_devc *devc = mixer_devs[dev]->devc;
- int val, ret;
- int __user *p = arg;
-
- /*
- * Use ioctl(fd, SOUND_MIXER_AGC, &mode) to turn AGC off (0) or on (1).
- * Use ioctl(fd, SOUND_MIXER_3DSE, &mode) to turn 3DSE off (0) or on (1)
- * or mode==2 put 3DSE state to mode.
- */
- if (devc->model == MDL_SB16) {
- if (cmd == SOUND_MIXER_AGC)
- {
- if (get_user(val, p))
- return -EFAULT;
- sb_setmixer(devc, 0x43, (~val) & 0x01);
- return 0;
- }
- if (cmd == SOUND_MIXER_3DSE)
- {
- /* I put here 15, but I don't know the exact version.
- At least my 4.13 havn't 3DSE, 4.16 has it. */
- if (devc->minor < 15)
- return -EINVAL;
- if (get_user(val, p))
- return -EFAULT;
- if (val == 0 || val == 1)
- sb_chgmixer(devc, AWE_3DSE, 0x01, val);
- else if (val == 2)
- {
- ret = sb_getmixer(devc, AWE_3DSE)&0x01;
- return put_user(ret, p);
- }
- else
- return -EINVAL;
- return 0;
- }
- }
- if (((cmd >> 8) & 0xff) == 'M')
- {
- if (_SIOC_DIR(cmd) & _SIOC_WRITE)
- {
- if (get_user(val, p))
- return -EFAULT;
- switch (cmd & 0xff)
- {
- case SOUND_MIXER_RECSRC:
- ret = set_recmask(devc, val);
- break;
-
- case SOUND_MIXER_OUTSRC:
- ret = set_outmask(devc, val);
- break;
-
- default:
- ret = sb_mixer_set(devc, cmd & 0xff, val);
- }
- }
- else switch (cmd & 0xff)
- {
- case SOUND_MIXER_RECSRC:
- ret = devc->recmask;
- break;
-
- case SOUND_MIXER_OUTSRC:
- ret = devc->outmask;
- break;
-
- case SOUND_MIXER_DEVMASK:
- ret = devc->supported_devices;
- break;
-
- case SOUND_MIXER_STEREODEVS:
- ret = devc->supported_devices;
- /* The ESS seems to have stereo mic controls */
- if (devc->model == MDL_ESS)
- ret &= ~(SOUND_MASK_SPEAKER|SOUND_MASK_IMIX);
- else if (devc->model != MDL_JAZZ && devc->model != MDL_SMW)
- ret &= ~(SOUND_MASK_MIC | SOUND_MASK_SPEAKER | SOUND_MASK_IMIX);
- break;
-
- case SOUND_MIXER_RECMASK:
- ret = devc->supported_rec_devices;
- break;
-
- case SOUND_MIXER_OUTMASK:
- ret = devc->supported_out_devices;
- break;
-
- case SOUND_MIXER_CAPS:
- ret = devc->mixer_caps;
- break;
-
- default:
- ret = sb_mixer_get(devc, cmd & 0xff);
- break;
- }
- return put_user(ret, p);
- } else
- return -EINVAL;
-}
-
-static struct mixer_operations sb_mixer_operations =
-{
- .owner = THIS_MODULE,
- .id = "SB",
- .name = "Sound Blaster",
- .ioctl = sb_mixer_ioctl
-};
-
-static struct mixer_operations als007_mixer_operations =
-{
- .owner = THIS_MODULE,
- .id = "ALS007",
- .name = "Avance ALS-007",
- .ioctl = sb_mixer_ioctl
-};
-
-static void sb_mixer_reset(sb_devc * devc)
-{
- char name[32];
- int i;
-
- sprintf(name, "SB_%d", devc->sbmixnum);
-
- if (devc->sbmo.sm_games)
- devc->levels = load_mixer_volumes(name, smg_default_levels, 1);
- else
- devc->levels = load_mixer_volumes(name, sb_default_levels, 1);
-
- for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
- sb_mixer_set(devc, i, devc->levels[i]);
-
- if (devc->model != MDL_ESS || !ess_mixer_reset (devc)) {
- set_recmask(devc, SOUND_MASK_MIC);
- }
-}
-
-int sb_mixer_init(sb_devc * devc, struct module *owner)
-{
- int mixer_type = 0;
- int m;
-
- devc->sbmixnum = sbmixnum++;
- devc->levels = NULL;
-
- sb_setmixer(devc, 0x00, 0); /* Reset mixer */
-
- if (!(mixer_type = detect_mixer(devc)))
- return 0; /* No mixer. Why? */
-
- switch (devc->model)
- {
- case MDL_ESSPCI:
- case MDL_YMPCI:
- case MDL_SBPRO:
- case MDL_AZTECH:
- case MDL_JAZZ:
- devc->mixer_caps = SOUND_CAP_EXCL_INPUT;
- devc->supported_devices = SBPRO_MIXER_DEVICES;
- devc->supported_rec_devices = SBPRO_RECORDING_DEVICES;
- devc->iomap = &sbpro_mix;
- devc->iomap_sz = ARRAY_SIZE(sbpro_mix);
- break;
-
- case MDL_ESS:
- ess_mixer_init (devc);
- break;
-
- case MDL_SMW:
- devc->mixer_caps = SOUND_CAP_EXCL_INPUT;
- devc->supported_devices = 0;
- devc->supported_rec_devices = 0;
- devc->iomap = &sbpro_mix;
- devc->iomap_sz = ARRAY_SIZE(sbpro_mix);
- smw_mixer_init(devc);
- break;
-
- case MDL_SB16:
- devc->mixer_caps = 0;
- devc->supported_rec_devices = SB16_RECORDING_DEVICES;
- devc->supported_out_devices = SB16_OUTFILTER_DEVICES;
- if (devc->submodel != SUBMDL_ALS007)
- {
- devc->supported_devices = SB16_MIXER_DEVICES;
- devc->iomap = &sb16_mix;
- devc->iomap_sz = ARRAY_SIZE(sb16_mix);
- }
- else
- {
- devc->supported_devices = ALS007_MIXER_DEVICES;
- devc->iomap = &als007_mix;
- devc->iomap_sz = ARRAY_SIZE(als007_mix);
- }
- break;
-
- default:
- printk(KERN_WARNING "sb_mixer: Unsupported mixer type %d\n", devc->model);
- return 0;
- }
-
- m = sound_alloc_mixerdev();
- if (m == -1)
- return 0;
-
- mixer_devs[m] = kmalloc(sizeof(struct mixer_operations), GFP_KERNEL);
- if (mixer_devs[m] == NULL)
- {
- printk(KERN_ERR "sb_mixer: Can't allocate memory\n");
- sound_unload_mixerdev(m);
- return 0;
- }
-
- if (devc->submodel != SUBMDL_ALS007)
- memcpy ((char *) mixer_devs[m], (char *) &sb_mixer_operations, sizeof (struct mixer_operations));
- else
- memcpy ((char *) mixer_devs[m], (char *) &als007_mixer_operations, sizeof (struct mixer_operations));
-
- mixer_devs[m]->devc = devc;
-
- if (owner)
- mixer_devs[m]->owner = owner;
-
- devc->my_mixerdev = m;
- sb_mixer_reset(devc);
- return 1;
-}
-
-void sb_mixer_unload(sb_devc *devc)
-{
- if (devc->my_mixerdev == -1)
- return;
-
- kfree(mixer_devs[devc->my_mixerdev]);
- sound_unload_mixerdev(devc->my_mixerdev);
- sbmixnum--;
-}
diff --git a/sound/oss/sb_mixer.h b/sound/oss/sb_mixer.h
deleted file mode 100644
index 4b9425f085e3..000000000000
--- a/sound/oss/sb_mixer.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * sound/oss/sb_mixer.h
- *
- * Definitions for the SB Pro and SB16 mixers
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-
-/*
- * Modified:
- * Hunyue Yau Jan 6 1994
- * Added defines for the Sound Galaxy NX Pro mixer.
- *
- * Rolf Fokkens Dec 20 1998
- * Added defines for some ES188x chips.
- *
- * Rolf Fokkens Dec 27 1998
- * Moved static stuff to sb_mixer.c
- *
- */
-/*
- * Mixer registers
- *
- * NOTE! RECORD_SRC == IN_FILTER
- */
-
-/*
- * Mixer registers of SB Pro
- */
-#define VOC_VOL 0x04
-#define MIC_VOL 0x0A
-#define MIC_MIX 0x0A
-#define RECORD_SRC 0x0C
-#define IN_FILTER 0x0C
-#define OUT_FILTER 0x0E
-#define MASTER_VOL 0x22
-#define FM_VOL 0x26
-#define CD_VOL 0x28
-#define LINE_VOL 0x2E
-#define IRQ_NR 0x80
-#define DMA_NR 0x81
-#define IRQ_STAT 0x82
-#define OPSW 0x3c
-
-/*
- * Additional registers on the SG NX Pro
- */
-#define COVOX_VOL 0x42
-#define TREBLE_LVL 0x44
-#define BASS_LVL 0x46
-
-#define FREQ_HI (1 << 3)/* Use High-frequency ANFI filters */
-#define FREQ_LOW 0 /* Use Low-frequency ANFI filters */
-#define FILT_ON 0 /* Yes, 0 to turn it on, 1 for off */
-#define FILT_OFF (1 << 5)
-
-#define MONO_DAC 0x00
-#define STEREO_DAC 0x02
-
-/*
- * Mixer registers of SB16
- */
-#define SB16_OMASK 0x3c
-#define SB16_IMASK_L 0x3d
-#define SB16_IMASK_R 0x3e
-
-#define LEFT_CHN 0
-#define RIGHT_CHN 1
-
-/*
- * 3DSE register of AWE32/64
- */
-#define AWE_3DSE 0x90
-
-/*
- * Mixer registers of ALS007
- */
-#define ALS007_RECORD_SRC 0x6c
-#define ALS007_OUTPUT_CTRL1 0x3c
-#define ALS007_OUTPUT_CTRL2 0x4c
-
-#define MIX_ENT(name, reg_l, bit_l, len_l, reg_r, bit_r, len_r) \
- {{reg_l, bit_l, len_l}, {reg_r, bit_r, len_r}}
-
-/*
- * Recording sources (SB Pro)
- */
-
-#define SRC__MIC 1 /* Select Microphone recording source */
-#define SRC__CD 3 /* Select CD recording source */
-#define SRC__LINE 7 /* Use Line-in for recording source */
-
-/*
- * Recording sources for ALS-007
- */
-
-#define ALS007_MIC 4
-#define ALS007_LINE 6
-#define ALS007_CD 2
-#define ALS007_SYNTH 7
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
deleted file mode 100644
index f19da4b47c1d..000000000000
--- a/sound/oss/sequencer.c
+++ /dev/null
@@ -1,1661 +0,0 @@
-/*
- * sound/oss/sequencer.c
- *
- * The sequencer personality manager.
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-/*
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * Alan Cox : reformatted and fixed a pair of null pointer bugs
- */
-#include <linux/kmod.h>
-#include <linux/spinlock.h>
-#include "sound_config.h"
-
-#include "midi_ctrl.h"
-#include "sleep.h"
-
-static int sequencer_ok;
-static struct sound_timer_operations *tmr;
-static int tmr_no = -1; /* Currently selected timer */
-static int pending_timer = -1; /* For timer change operation */
-extern unsigned long seq_time;
-
-static int obsolete_api_used;
-static DEFINE_SPINLOCK(lock);
-
-/*
- * Local counts for number of synth and MIDI devices. These are initialized
- * by the sequencer_open.
- */
-static int max_mididev;
-static int max_synthdev;
-
-/*
- * The seq_mode gives the operating mode of the sequencer:
- * 1 = level1 (the default)
- * 2 = level2 (extended capabilities)
- */
-
-#define SEQ_1 1
-#define SEQ_2 2
-static int seq_mode = SEQ_1;
-
-static DECLARE_WAIT_QUEUE_HEAD(seq_sleeper);
-static DECLARE_WAIT_QUEUE_HEAD(midi_sleeper);
-
-static int midi_opened[MAX_MIDI_DEV];
-
-static int midi_written[MAX_MIDI_DEV];
-
-static unsigned long prev_input_time;
-static int prev_event_time;
-
-#include "tuning.h"
-
-#define EV_SZ 8
-#define IEV_SZ 8
-
-static unsigned char *queue;
-static unsigned char *iqueue;
-
-static volatile int qhead, qtail, qlen;
-static volatile int iqhead, iqtail, iqlen;
-static volatile int seq_playing;
-static volatile int sequencer_busy;
-static int output_threshold;
-static long pre_event_timeout;
-static unsigned synth_open_mask;
-
-static int seq_queue(unsigned char *note, char nonblock);
-static void seq_startplay(void);
-static int seq_sync(void);
-static void seq_reset(void);
-
-#if MAX_SYNTH_DEV > 15
-#error Too many synthesizer devices enabled.
-#endif
-
-int sequencer_read(int dev, struct file *file, char __user *buf, int count)
-{
- int c = count, p = 0;
- int ev_len;
- unsigned long flags;
-
- dev = dev >> 4;
-
- ev_len = seq_mode == SEQ_1 ? 4 : 8;
-
- spin_lock_irqsave(&lock,flags);
-
- if (!iqlen)
- {
- spin_unlock_irqrestore(&lock,flags);
- if (file->f_flags & O_NONBLOCK) {
- return -EAGAIN;
- }
-
- oss_broken_sleep_on(&midi_sleeper, pre_event_timeout);
- spin_lock_irqsave(&lock,flags);
- if (!iqlen)
- {
- spin_unlock_irqrestore(&lock,flags);
- return 0;
- }
- }
- while (iqlen && c >= ev_len)
- {
- char *fixit = (char *) &iqueue[iqhead * IEV_SZ];
- spin_unlock_irqrestore(&lock,flags);
- if (copy_to_user(&(buf)[p], fixit, ev_len))
- return count - c;
- p += ev_len;
- c -= ev_len;
-
- spin_lock_irqsave(&lock,flags);
- iqhead = (iqhead + 1) % SEQ_MAX_QUEUE;
- iqlen--;
- }
- spin_unlock_irqrestore(&lock,flags);
- return count - c;
-}
-
-static void sequencer_midi_output(int dev)
-{
- /*
- * Currently NOP
- */
-}
-
-void seq_copy_to_input(unsigned char *event_rec, int len)
-{
- unsigned long flags;
-
- /*
- * Verify that the len is valid for the current mode.
- */
-
- if (len != 4 && len != 8)
- return;
- if ((seq_mode == SEQ_1) != (len == 4))
- return;
-
- if (iqlen >= (SEQ_MAX_QUEUE - 1))
- return; /* Overflow */
-
- spin_lock_irqsave(&lock,flags);
- memcpy(&iqueue[iqtail * IEV_SZ], event_rec, len);
- iqlen++;
- iqtail = (iqtail + 1) % SEQ_MAX_QUEUE;
- wake_up(&midi_sleeper);
- spin_unlock_irqrestore(&lock,flags);
-}
-EXPORT_SYMBOL(seq_copy_to_input);
-
-static void sequencer_midi_input(int dev, unsigned char data)
-{
- unsigned int tstamp;
- unsigned char event_rec[4];
-
- if (data == 0xfe) /* Ignore active sensing */
- return;
-
- tstamp = jiffies - seq_time;
-
- if (tstamp != prev_input_time)
- {
- tstamp = (tstamp << 8) | SEQ_WAIT;
- seq_copy_to_input((unsigned char *) &tstamp, 4);
- prev_input_time = tstamp;
- }
- event_rec[0] = SEQ_MIDIPUTC;
- event_rec[1] = data;
- event_rec[2] = dev;
- event_rec[3] = 0;
-
- seq_copy_to_input(event_rec, 4);
-}
-
-void seq_input_event(unsigned char *event_rec, int len)
-{
- unsigned long this_time;
-
- if (seq_mode == SEQ_2)
- this_time = tmr->get_time(tmr_no);
- else
- this_time = jiffies - seq_time;
-
- if (this_time != prev_input_time)
- {
- unsigned char tmp_event[8];
-
- tmp_event[0] = EV_TIMING;
- tmp_event[1] = TMR_WAIT_ABS;
- tmp_event[2] = 0;
- tmp_event[3] = 0;
- *(unsigned int *) &tmp_event[4] = this_time;
-
- seq_copy_to_input(tmp_event, 8);
- prev_input_time = this_time;
- }
- seq_copy_to_input(event_rec, len);
-}
-EXPORT_SYMBOL(seq_input_event);
-
-int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
-{
- unsigned char event_rec[EV_SZ], ev_code;
- int p = 0, c, ev_size;
- int mode = translate_mode(file);
-
- dev = dev >> 4;
-
- if (mode == OPEN_READ)
- return -EIO;
-
- c = count;
-
- while (c >= 4)
- {
- if (copy_from_user((char *) event_rec, &(buf)[p], 4))
- goto out;
- ev_code = event_rec[0];
-
- if (ev_code == SEQ_FULLSIZE)
- {
- int err, fmt;
-
- dev = *(unsigned short *) &event_rec[2];
- if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL)
- return -ENXIO;
-
- if (!(synth_open_mask & (1 << dev)))
- return -ENXIO;
-
- fmt = (*(short *) &event_rec[0]) & 0xffff;
- err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0);
- if (err < 0)
- return err;
-
- return err;
- }
- if (ev_code >= 128)
- {
- if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED)
- {
- printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code);
- return -EINVAL;
- }
- ev_size = 8;
-
- if (c < ev_size)
- {
- if (!seq_playing)
- seq_startplay();
- return count - c;
- }
- if (copy_from_user((char *)&event_rec[4],
- &(buf)[p + 4], 4))
- goto out;
-
- }
- else
- {
- if (seq_mode == SEQ_2)
- {
- printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n");
- return -EINVAL;
- }
- ev_size = 4;
-
- if (event_rec[0] != SEQ_MIDIPUTC)
- obsolete_api_used = 1;
- }
-
- if (event_rec[0] == SEQ_MIDIPUTC)
- {
- if (!midi_opened[event_rec[2]])
- {
- int err, mode;
- int dev = event_rec[2];
-
- if (dev >= max_mididev || midi_devs[dev]==NULL)
- {
- /*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/
- return -ENXIO;
- }
- mode = translate_mode(file);
-
- if ((err = midi_devs[dev]->open(dev, mode,
- sequencer_midi_input, sequencer_midi_output)) < 0)
- {
- seq_reset();
- printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev);
- return err;
- }
- midi_opened[dev] = 1;
- }
- }
- if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0)))
- {
- int processed = count - c;
-
- if (!seq_playing)
- seq_startplay();
-
- if (!processed && (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
- else
- return processed;
- }
- p += ev_size;
- c -= ev_size;
- }
-
- if (!seq_playing)
- seq_startplay();
-out:
- return count;
-}
-
-static int seq_queue(unsigned char *note, char nonblock)
-{
-
- /*
- * Test if there is space in the queue
- */
-
- if (qlen >= SEQ_MAX_QUEUE)
- if (!seq_playing)
- seq_startplay(); /*
- * Give chance to drain the queue
- */
-
- if (!nonblock && qlen >= SEQ_MAX_QUEUE && !waitqueue_active(&seq_sleeper)) {
- /*
- * Sleep until there is enough space on the queue
- */
- oss_broken_sleep_on(&seq_sleeper, MAX_SCHEDULE_TIMEOUT);
- }
- if (qlen >= SEQ_MAX_QUEUE)
- {
- return 0; /*
- * To be sure
- */
- }
- memcpy(&queue[qtail * EV_SZ], note, EV_SZ);
-
- qtail = (qtail + 1) % SEQ_MAX_QUEUE;
- qlen++;
-
- return 1;
-}
-
-static int extended_event(unsigned char *q)
-{
- int dev = q[2];
-
- if (dev < 0 || dev >= max_synthdev)
- return -ENXIO;
-
- if (!(synth_open_mask & (1 << dev)))
- return -ENXIO;
-
- switch (q[1])
- {
- case SEQ_NOTEOFF:
- synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]);
- break;
-
- case SEQ_NOTEON:
- if (q[4] > 127 && q[4] != 255)
- return 0;
-
- if (q[5] == 0)
- {
- synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]);
- break;
- }
- synth_devs[dev]->start_note(dev, q[3], q[4], q[5]);
- break;
-
- case SEQ_PGMCHANGE:
- synth_devs[dev]->set_instr(dev, q[3], q[4]);
- break;
-
- case SEQ_AFTERTOUCH:
- synth_devs[dev]->aftertouch(dev, q[3], q[4]);
- break;
-
- case SEQ_BALANCE:
- synth_devs[dev]->panning(dev, q[3], (char) q[4]);
- break;
-
- case SEQ_CONTROLLER:
- synth_devs[dev]->controller(dev, q[3], q[4], (short) (q[5] | (q[6] << 8)));
- break;
-
- case SEQ_VOLMODE:
- if (synth_devs[dev]->volume_method != NULL)
- synth_devs[dev]->volume_method(dev, q[3]);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int find_voice(int dev, int chn, int note)
-{
- unsigned short key;
- int i;
-
- key = (chn << 8) | (note + 1);
- for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
- if (synth_devs[dev]->alloc.map[i] == key)
- return i;
- return -1;
-}
-
-static int alloc_voice(int dev, int chn, int note)
-{
- unsigned short key;
- int voice;
-
- key = (chn << 8) | (note + 1);
-
- voice = synth_devs[dev]->alloc_voice(dev, chn, note,
- &synth_devs[dev]->alloc);
- synth_devs[dev]->alloc.map[voice] = key;
- synth_devs[dev]->alloc.alloc_times[voice] =
- synth_devs[dev]->alloc.timestamp++;
- return voice;
-}
-
-static void seq_chn_voice_event(unsigned char *event_rec)
-{
-#define dev event_rec[1]
-#define cmd event_rec[2]
-#define chn event_rec[3]
-#define note event_rec[4]
-#define parm event_rec[5]
-
- int voice = -1;
-
- if ((int) dev > max_synthdev || synth_devs[dev] == NULL)
- return;
- if (!(synth_open_mask & (1 << dev)))
- return;
- if (!synth_devs[dev])
- return;
-
- if (seq_mode == SEQ_2)
- {
- if (synth_devs[dev]->alloc_voice)
- voice = find_voice(dev, chn, note);
-
- if (cmd == MIDI_NOTEON && parm == 0)
- {
- cmd = MIDI_NOTEOFF;
- parm = 64;
- }
- }
-
- switch (cmd)
- {
- case MIDI_NOTEON:
- if (note > 127 && note != 255) /* Not a seq2 feature */
- return;
-
- if (voice == -1 && seq_mode == SEQ_2 && synth_devs[dev]->alloc_voice)
- {
- /* Internal synthesizer (FM, GUS, etc) */
- voice = alloc_voice(dev, chn, note);
- }
- if (voice == -1)
- voice = chn;
-
- if (seq_mode == SEQ_2 && (int) dev < num_synths)
- {
- /*
- * The MIDI channel 10 is a percussive channel. Use the note
- * number to select the proper patch (128 to 255) to play.
- */
-
- if (chn == 9)
- {
- synth_devs[dev]->set_instr(dev, voice, 128 + note);
- synth_devs[dev]->chn_info[chn].pgm_num = 128 + note;
- }
- synth_devs[dev]->setup_voice(dev, voice, chn);
- }
- synth_devs[dev]->start_note(dev, voice, note, parm);
- break;
-
- case MIDI_NOTEOFF:
- if (voice == -1)
- voice = chn;
- synth_devs[dev]->kill_note(dev, voice, note, parm);
- break;
-
- case MIDI_KEY_PRESSURE:
- if (voice == -1)
- voice = chn;
- synth_devs[dev]->aftertouch(dev, voice, parm);
- break;
-
- default:;
- }
-#undef dev
-#undef cmd
-#undef chn
-#undef note
-#undef parm
-}
-
-
-static void seq_chn_common_event(unsigned char *event_rec)
-{
- unsigned char dev = event_rec[1];
- unsigned char cmd = event_rec[2];
- unsigned char chn = event_rec[3];
- unsigned char p1 = event_rec[4];
-
- /* unsigned char p2 = event_rec[5]; */
- unsigned short w14 = *(short *) &event_rec[6];
-
- if ((int) dev > max_synthdev || synth_devs[dev] == NULL)
- return;
- if (!(synth_open_mask & (1 << dev)))
- return;
- if (!synth_devs[dev])
- return;
-
- switch (cmd)
- {
- case MIDI_PGM_CHANGE:
- if (seq_mode == SEQ_2)
- {
- if (chn > 15)
- break;
-
- synth_devs[dev]->chn_info[chn].pgm_num = p1;
- if ((int) dev >= num_synths)
- synth_devs[dev]->set_instr(dev, chn, p1);
- }
- else
- synth_devs[dev]->set_instr(dev, chn, p1);
-
- break;
-
- case MIDI_CTL_CHANGE:
- if (seq_mode == SEQ_2)
- {
- if (chn > 15 || p1 > 127)
- break;
-
- synth_devs[dev]->chn_info[chn].controllers[p1] = w14 & 0x7f;
-
- if (p1 < 32) /* Setting MSB should clear LSB to 0 */
- synth_devs[dev]->chn_info[chn].controllers[p1 + 32] = 0;
-
- if ((int) dev < num_synths)
- {
- int val = w14 & 0x7f;
- int i, key;
-
- if (p1 < 64) /* Combine MSB and LSB */
- {
- val = ((synth_devs[dev]->
- chn_info[chn].controllers[p1 & ~32] & 0x7f) << 7)
- | (synth_devs[dev]->
- chn_info[chn].controllers[p1 | 32] & 0x7f);
- p1 &= ~32;
- }
- /* Handle all playing notes on this channel */
-
- key = ((int) chn << 8);
-
- for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
- if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key)
- synth_devs[dev]->controller(dev, i, p1, val);
- }
- else
- synth_devs[dev]->controller(dev, chn, p1, w14);
- }
- else /* Mode 1 */
- synth_devs[dev]->controller(dev, chn, p1, w14);
- break;
-
- case MIDI_PITCH_BEND:
- if (seq_mode == SEQ_2)
- {
- if (chn > 15)
- break;
-
- synth_devs[dev]->chn_info[chn].bender_value = w14;
-
- if ((int) dev < num_synths)
- {
- /* Handle all playing notes on this channel */
- int i, key;
-
- key = (chn << 8);
-
- for (i = 0; i < synth_devs[dev]->alloc.max_voice; i++)
- if ((synth_devs[dev]->alloc.map[i] & 0xff00) == key)
- synth_devs[dev]->bender(dev, i, w14);
- }
- else
- synth_devs[dev]->bender(dev, chn, w14);
- }
- else /* MODE 1 */
- synth_devs[dev]->bender(dev, chn, w14);
- break;
-
- default:;
- }
-}
-
-static int seq_timing_event(unsigned char *event_rec)
-{
- unsigned char cmd = event_rec[1];
- unsigned int parm = *(int *) &event_rec[4];
-
- if (seq_mode == SEQ_2)
- {
- int ret;
-
- if ((ret = tmr->event(tmr_no, event_rec)) == TIMER_ARMED)
- if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
- wake_up(&seq_sleeper);
- return ret;
- }
- switch (cmd)
- {
- case TMR_WAIT_REL:
- parm += prev_event_time;
-
- /*
- * NOTE! No break here. Execution of TMR_WAIT_REL continues in the
- * next case (TMR_WAIT_ABS)
- */
-
- case TMR_WAIT_ABS:
- if (parm > 0)
- {
- long time;
-
- time = parm;
- prev_event_time = time;
-
- seq_playing = 1;
- request_sound_timer(time);
-
- if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
- wake_up(&seq_sleeper);
- return TIMER_ARMED;
- }
- break;
-
- case TMR_START:
- seq_time = jiffies;
- prev_input_time = 0;
- prev_event_time = 0;
- break;
-
- case TMR_STOP:
- break;
-
- case TMR_CONTINUE:
- break;
-
- case TMR_TEMPO:
- break;
-
- case TMR_ECHO:
- parm = (parm << 8 | SEQ_ECHO);
- seq_copy_to_input((unsigned char *) &parm, 4);
- break;
-
- default:;
- }
-
- return TIMER_NOT_ARMED;
-}
-
-static void seq_local_event(unsigned char *event_rec)
-{
- unsigned char cmd = event_rec[1];
- unsigned int parm = *((unsigned int *) &event_rec[4]);
-
- switch (cmd)
- {
- case LOCL_STARTAUDIO:
- DMAbuf_start_devices(parm);
- break;
-
- default:;
- }
-}
-
-static void seq_sysex_message(unsigned char *event_rec)
-{
- unsigned int dev = event_rec[1];
- int i, l = 0;
- unsigned char *buf = &event_rec[2];
-
- if (dev > max_synthdev)
- return;
- if (!(synth_open_mask & (1 << dev)))
- return;
- if (!synth_devs[dev])
- return;
-
- l = 0;
- for (i = 0; i < 6 && buf[i] != 0xff; i++)
- l = i + 1;
-
- if (!synth_devs[dev]->send_sysex)
- return;
- if (l > 0)
- synth_devs[dev]->send_sysex(dev, buf, l);
-}
-
-static int play_event(unsigned char *q)
-{
- /*
- * NOTE! This routine returns
- * 0 = normal event played.
- * 1 = Timer armed. Suspend playback until timer callback.
- * 2 = MIDI output buffer full. Restore queue and suspend until timer
- */
- unsigned int *delay;
-
- switch (q[0])
- {
- case SEQ_NOTEOFF:
- if (synth_open_mask & (1 << 0))
- if (synth_devs[0])
- synth_devs[0]->kill_note(0, q[1], 255, q[3]);
- break;
-
- case SEQ_NOTEON:
- if (q[4] < 128 || q[4] == 255)
- if (synth_open_mask & (1 << 0))
- if (synth_devs[0])
- synth_devs[0]->start_note(0, q[1], q[2], q[3]);
- break;
-
- case SEQ_WAIT:
- delay = (unsigned int *) q; /*
- * Bytes 1 to 3 are containing the *
- * delay in 'ticks'
- */
- *delay = (*delay >> 8) & 0xffffff;
-
- if (*delay > 0)
- {
- long time;
-
- seq_playing = 1;
- time = *delay;
- prev_event_time = time;
-
- request_sound_timer(time);
-
- if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
- wake_up(&seq_sleeper);
- /*
- * The timer is now active and will reinvoke this function
- * after the timer expires. Return to the caller now.
- */
- return 1;
- }
- break;
-
- case SEQ_PGMCHANGE:
- if (synth_open_mask & (1 << 0))
- if (synth_devs[0])
- synth_devs[0]->set_instr(0, q[1], q[2]);
- break;
-
- case SEQ_SYNCTIMER: /*
- * Reset timer
- */
- seq_time = jiffies;
- prev_input_time = 0;
- prev_event_time = 0;
- break;
-
- case SEQ_MIDIPUTC: /*
- * Put a midi character
- */
- if (midi_opened[q[2]])
- {
- int dev;
-
- dev = q[2];
-
- if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL)
- break;
-
- if (!midi_devs[dev]->outputc(dev, q[1]))
- {
- /*
- * Output FIFO is full. Wait one timer cycle and try again.
- */
-
- seq_playing = 1;
- request_sound_timer(-1);
- return 2;
- }
- else
- midi_written[dev] = 1;
- }
- break;
-
- case SEQ_ECHO:
- seq_copy_to_input(q, 4); /*
- * Echo back to the process
- */
- break;
-
- case SEQ_PRIVATE:
- if ((int) q[1] < max_synthdev)
- synth_devs[q[1]]->hw_control(q[1], q);
- break;
-
- case SEQ_EXTENDED:
- extended_event(q);
- break;
-
- case EV_CHN_VOICE:
- seq_chn_voice_event(q);
- break;
-
- case EV_CHN_COMMON:
- seq_chn_common_event(q);
- break;
-
- case EV_TIMING:
- if (seq_timing_event(q) == TIMER_ARMED)
- {
- return 1;
- }
- break;
-
- case EV_SEQ_LOCAL:
- seq_local_event(q);
- break;
-
- case EV_SYSEX:
- seq_sysex_message(q);
- break;
-
- default:;
- }
- return 0;
-}
-
-/* called also as timer in irq context */
-static void seq_startplay(void)
-{
- int this_one, action;
- unsigned long flags;
-
- while (qlen > 0)
- {
-
- spin_lock_irqsave(&lock,flags);
- qhead = ((this_one = qhead) + 1) % SEQ_MAX_QUEUE;
- qlen--;
- spin_unlock_irqrestore(&lock,flags);
-
- seq_playing = 1;
-
- if ((action = play_event(&queue[this_one * EV_SZ])))
- { /* Suspend playback. Next timer routine invokes this routine again */
- if (action == 2)
- {
- qlen++;
- qhead = this_one;
- }
- return;
- }
- }
-
- seq_playing = 0;
-
- if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
- wake_up(&seq_sleeper);
-}
-
-static void reset_controllers(int dev, unsigned char *controller, int update_dev)
-{
- int i;
- for (i = 0; i < 128; i++)
- controller[i] = ctrl_def_values[i];
-}
-
-static void setup_mode2(void)
-{
- int dev;
-
- max_synthdev = num_synths;
-
- for (dev = 0; dev < num_midis; dev++)
- {
- if (midi_devs[dev] && midi_devs[dev]->converter != NULL)
- {
- synth_devs[max_synthdev++] = midi_devs[dev]->converter;
- }
- }
-
- for (dev = 0; dev < max_synthdev; dev++)
- {
- int chn;
-
- synth_devs[dev]->sysex_ptr = 0;
- synth_devs[dev]->emulation = 0;
-
- for (chn = 0; chn < 16; chn++)
- {
- synth_devs[dev]->chn_info[chn].pgm_num = 0;
- reset_controllers(dev,
- synth_devs[dev]->chn_info[chn].controllers,0);
- synth_devs[dev]->chn_info[chn].bender_value = (1 << 7); /* Neutral */
- synth_devs[dev]->chn_info[chn].bender_range = 200;
- }
- }
- max_mididev = 0;
- seq_mode = SEQ_2;
-}
-
-int sequencer_open(int dev, struct file *file)
-{
- int retval, mode, i;
- int level, tmp;
-
- if (!sequencer_ok)
- sequencer_init();
-
- level = ((dev & 0x0f) == SND_DEV_SEQ2) ? 2 : 1;
-
- dev = dev >> 4;
- mode = translate_mode(file);
-
- if (!sequencer_ok)
- {
-/* printk("Sound card: sequencer not initialized\n");*/
- return -ENXIO;
- }
- if (dev) /* Patch manager device (obsolete) */
- return -ENXIO;
-
- if(synth_devs[dev] == NULL)
- request_module("synth0");
-
- if (mode == OPEN_READ)
- {
- if (!num_midis)
- {
- /*printk("Sequencer: No MIDI devices. Input not possible\n");*/
- sequencer_busy = 0;
- return -ENXIO;
- }
- }
- if (sequencer_busy)
- {
- return -EBUSY;
- }
- sequencer_busy = 1;
- obsolete_api_used = 0;
-
- max_mididev = num_midis;
- max_synthdev = num_synths;
- pre_event_timeout = MAX_SCHEDULE_TIMEOUT;
- seq_mode = SEQ_1;
-
- if (pending_timer != -1)
- {
- tmr_no = pending_timer;
- pending_timer = -1;
- }
- if (tmr_no == -1) /* Not selected yet */
- {
- int i, best;
-
- best = -1;
- for (i = 0; i < num_sound_timers; i++)
- if (sound_timer_devs[i] && sound_timer_devs[i]->priority > best)
- {
- tmr_no = i;
- best = sound_timer_devs[i]->priority;
- }
- if (tmr_no == -1) /* Should not be */
- tmr_no = 0;
- }
- tmr = sound_timer_devs[tmr_no];
-
- if (level == 2)
- {
- if (tmr == NULL)
- {
- /*printk("sequencer: No timer for level 2\n");*/
- sequencer_busy = 0;
- return -ENXIO;
- }
- setup_mode2();
- }
- if (!max_synthdev && !max_mididev)
- {
- sequencer_busy=0;
- return -ENXIO;
- }
-
- synth_open_mask = 0;
-
- for (i = 0; i < max_mididev; i++)
- {
- midi_opened[i] = 0;
- midi_written[i] = 0;
- }
-
- for (i = 0; i < max_synthdev; i++)
- {
- if (synth_devs[i]==NULL)
- continue;
-
- if (!try_module_get(synth_devs[i]->owner))
- continue;
-
- if ((tmp = synth_devs[i]->open(i, mode)) < 0)
- {
- printk(KERN_WARNING "Sequencer: Warning! Cannot open synth device #%d (%d)\n", i, tmp);
- if (synth_devs[i]->midi_dev)
- printk(KERN_WARNING "(Maps to MIDI dev #%d)\n", synth_devs[i]->midi_dev);
- }
- else
- {
- synth_open_mask |= (1 << i);
- if (synth_devs[i]->midi_dev)
- midi_opened[synth_devs[i]->midi_dev] = 1;
- }
- }
-
- seq_time = jiffies;
-
- prev_input_time = 0;
- prev_event_time = 0;
-
- if (seq_mode == SEQ_1 && (mode == OPEN_READ || mode == OPEN_READWRITE))
- {
- /*
- * Initialize midi input devices
- */
-
- for (i = 0; i < max_mididev; i++)
- if (!midi_opened[i] && midi_devs[i])
- {
- if (!try_module_get(midi_devs[i]->owner))
- continue;
-
- if ((retval = midi_devs[i]->open(i, mode,
- sequencer_midi_input, sequencer_midi_output)) >= 0)
- {
- midi_opened[i] = 1;
- }
- }
- }
-
- if (seq_mode == SEQ_2) {
- if (try_module_get(tmr->owner))
- tmr->open(tmr_no, seq_mode);
- }
-
- init_waitqueue_head(&seq_sleeper);
- init_waitqueue_head(&midi_sleeper);
- output_threshold = SEQ_MAX_QUEUE / 2;
-
- return 0;
-}
-
-static void seq_drain_midi_queues(void)
-{
- int i, n;
-
- /*
- * Give the Midi drivers time to drain their output queues
- */
-
- n = 1;
-
- while (!signal_pending(current) && n)
- {
- n = 0;
-
- for (i = 0; i < max_mididev; i++)
- if (midi_opened[i] && midi_written[i])
- if (midi_devs[i]->buffer_status != NULL)
- if (midi_devs[i]->buffer_status(i))
- n++;
-
- /*
- * Let's have a delay
- */
-
- if (n)
- oss_broken_sleep_on(&seq_sleeper, HZ/10);
- }
-}
-
-void sequencer_release(int dev, struct file *file)
-{
- int i;
- int mode = translate_mode(file);
-
- dev = dev >> 4;
-
- /*
- * Wait until the queue is empty (if we don't have nonblock)
- */
-
- if (mode != OPEN_READ && !(file->f_flags & O_NONBLOCK))
- {
- while (!signal_pending(current) && qlen > 0)
- {
- seq_sync();
- oss_broken_sleep_on(&seq_sleeper, 3*HZ);
- /* Extra delay */
- }
- }
-
- if (mode != OPEN_READ)
- seq_drain_midi_queues(); /*
- * Ensure the output queues are empty
- */
- seq_reset();
- if (mode != OPEN_READ)
- seq_drain_midi_queues(); /*
- * Flush the all notes off messages
- */
-
- for (i = 0; i < max_synthdev; i++)
- {
- if (synth_open_mask & (1 << i)) /*
- * Actually opened
- */
- if (synth_devs[i])
- {
- synth_devs[i]->close(i);
-
- module_put(synth_devs[i]->owner);
-
- if (synth_devs[i]->midi_dev)
- midi_opened[synth_devs[i]->midi_dev] = 0;
- }
- }
-
- for (i = 0; i < max_mididev; i++)
- {
- if (midi_opened[i]) {
- midi_devs[i]->close(i);
- module_put(midi_devs[i]->owner);
- }
- }
-
- if (seq_mode == SEQ_2) {
- tmr->close(tmr_no);
- module_put(tmr->owner);
- }
-
- if (obsolete_api_used)
- printk(KERN_WARNING "/dev/music: Obsolete (4 byte) API was used by %s\n", current->comm);
- sequencer_busy = 0;
-}
-
-static int seq_sync(void)
-{
- if (qlen && !seq_playing && !signal_pending(current))
- seq_startplay();
-
- if (qlen > 0)
- oss_broken_sleep_on(&seq_sleeper, HZ);
- return qlen;
-}
-
-static void midi_outc(int dev, unsigned char data)
-{
- /*
- * NOTE! Calls sleep(). Don't call this from interrupt.
- */
-
- int n;
- unsigned long flags;
-
- /*
- * This routine sends one byte to the Midi channel.
- * If the output FIFO is full, it waits until there
- * is space in the queue
- */
-
- n = 3 * HZ; /* Timeout */
-
- spin_lock_irqsave(&lock,flags);
- while (n && !midi_devs[dev]->outputc(dev, data)) {
- oss_broken_sleep_on(&seq_sleeper, HZ/25);
- n--;
- }
- spin_unlock_irqrestore(&lock,flags);
-}
-
-static void seq_reset(void)
-{
- /*
- * NOTE! Calls sleep(). Don't call this from interrupt.
- */
-
- int i;
- int chn;
- unsigned long flags;
-
- sound_stop_timer();
-
- seq_time = jiffies;
- prev_input_time = 0;
- prev_event_time = 0;
-
- qlen = qhead = qtail = 0;
- iqlen = iqhead = iqtail = 0;
-
- for (i = 0; i < max_synthdev; i++)
- if (synth_open_mask & (1 << i))
- if (synth_devs[i])
- synth_devs[i]->reset(i);
-
- if (seq_mode == SEQ_2)
- {
- for (chn = 0; chn < 16; chn++)
- for (i = 0; i < max_synthdev; i++)
- if (synth_open_mask & (1 << i))
- if (synth_devs[i])
- {
- synth_devs[i]->controller(i, chn, 123, 0); /* All notes off */
- synth_devs[i]->controller(i, chn, 121, 0); /* Reset all ctl */
- synth_devs[i]->bender(i, chn, 1 << 13); /* Bender off */
- }
- }
- else /* seq_mode == SEQ_1 */
- {
- for (i = 0; i < max_mididev; i++)
- if (midi_written[i]) /*
- * Midi used. Some notes may still be playing
- */
- {
- /*
- * Sending just a ACTIVE SENSING message should be enough to stop all
- * playing notes. Since there are devices not recognizing the
- * active sensing, we have to send some all notes off messages also.
- */
- midi_outc(i, 0xfe);
-
- for (chn = 0; chn < 16; chn++)
- {
- midi_outc(i, (unsigned char) (0xb0 + (chn & 0x0f))); /* control change */
- midi_outc(i, 0x7b); /* All notes off */
- midi_outc(i, 0); /* Dummy parameter */
- }
-
- midi_devs[i]->close(i);
-
- midi_written[i] = 0;
- midi_opened[i] = 0;
- }
- }
-
- seq_playing = 0;
-
- spin_lock_irqsave(&lock,flags);
-
- if (waitqueue_active(&seq_sleeper)) {
- /* printk( "Sequencer Warning: Unexpected sleeping process - Waking up\n"); */
- wake_up(&seq_sleeper);
- }
- spin_unlock_irqrestore(&lock,flags);
-}
-
-static void seq_panic(void)
-{
- /*
- * This routine is called by the application in case the user
- * wants to reset the system to the default state.
- */
-
- seq_reset();
-
- /*
- * Since some of the devices don't recognize the active sensing and
- * all notes off messages, we have to shut all notes manually.
- *
- * TO BE IMPLEMENTED LATER
- */
-
- /*
- * Also return the controllers to their default states
- */
-}
-
-int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg)
-{
- int midi_dev, orig_dev, val, err;
- int mode = translate_mode(file);
- struct synth_info inf;
- struct seq_event_rec event_rec;
- int __user *p = arg;
-
- orig_dev = dev = dev >> 4;
-
- switch (cmd)
- {
- case SNDCTL_TMR_TIMEBASE:
- case SNDCTL_TMR_TEMPO:
- case SNDCTL_TMR_START:
- case SNDCTL_TMR_STOP:
- case SNDCTL_TMR_CONTINUE:
- case SNDCTL_TMR_METRONOME:
- case SNDCTL_TMR_SOURCE:
- if (seq_mode != SEQ_2)
- return -EINVAL;
- return tmr->ioctl(tmr_no, cmd, arg);
-
- case SNDCTL_TMR_SELECT:
- if (seq_mode != SEQ_2)
- return -EINVAL;
- if (get_user(pending_timer, p))
- return -EFAULT;
- if (pending_timer < 0 || pending_timer >= num_sound_timers || sound_timer_devs[pending_timer] == NULL)
- {
- pending_timer = -1;
- return -EINVAL;
- }
- val = pending_timer;
- break;
-
- case SNDCTL_SEQ_PANIC:
- seq_panic();
- return -EINVAL;
-
- case SNDCTL_SEQ_SYNC:
- if (mode == OPEN_READ)
- return 0;
- while (qlen > 0 && !signal_pending(current))
- seq_sync();
- return qlen ? -EINTR : 0;
-
- case SNDCTL_SEQ_RESET:
- seq_reset();
- return 0;
-
- case SNDCTL_SEQ_TESTMIDI:
- if (__get_user(midi_dev, p))
- return -EFAULT;
- if (midi_dev < 0 || midi_dev >= max_mididev || !midi_devs[midi_dev])
- return -ENXIO;
-
- if (!midi_opened[midi_dev] &&
- (err = midi_devs[midi_dev]->open(midi_dev, mode, sequencer_midi_input,
- sequencer_midi_output)) < 0)
- return err;
- midi_opened[midi_dev] = 1;
- return 0;
-
- case SNDCTL_SEQ_GETINCOUNT:
- if (mode == OPEN_WRITE)
- return 0;
- val = iqlen;
- break;
-
- case SNDCTL_SEQ_GETOUTCOUNT:
- if (mode == OPEN_READ)
- return 0;
- val = SEQ_MAX_QUEUE - qlen;
- break;
-
- case SNDCTL_SEQ_GETTIME:
- if (seq_mode == SEQ_2)
- return tmr->ioctl(tmr_no, cmd, arg);
- val = jiffies - seq_time;
- break;
-
- case SNDCTL_SEQ_CTRLRATE:
- /*
- * If *arg == 0, just return the current rate
- */
- if (seq_mode == SEQ_2)
- return tmr->ioctl(tmr_no, cmd, arg);
-
- if (get_user(val, p))
- return -EFAULT;
- if (val != 0)
- return -EINVAL;
- val = HZ;
- break;
-
- case SNDCTL_SEQ_RESETSAMPLES:
- case SNDCTL_SYNTH_REMOVESAMPLE:
- case SNDCTL_SYNTH_CONTROL:
- if (get_user(dev, p))
- return -EFAULT;
- if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
- return -ENXIO;
- if (!(synth_open_mask & (1 << dev)) && !orig_dev)
- return -EBUSY;
- return synth_devs[dev]->ioctl(dev, cmd, arg);
-
- case SNDCTL_SEQ_NRSYNTHS:
- val = max_synthdev;
- break;
-
- case SNDCTL_SEQ_NRMIDIS:
- val = max_mididev;
- break;
-
- case SNDCTL_SYNTH_MEMAVL:
- if (get_user(dev, p))
- return -EFAULT;
- if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
- return -ENXIO;
- if (!(synth_open_mask & (1 << dev)) && !orig_dev)
- return -EBUSY;
- val = synth_devs[dev]->ioctl(dev, cmd, arg);
- break;
-
- case SNDCTL_FM_4OP_ENABLE:
- if (get_user(dev, p))
- return -EFAULT;
- if (dev < 0 || dev >= num_synths || synth_devs[dev] == NULL)
- return -ENXIO;
- if (!(synth_open_mask & (1 << dev)))
- return -ENXIO;
- synth_devs[dev]->ioctl(dev, cmd, arg);
- return 0;
-
- case SNDCTL_SYNTH_INFO:
- if (get_user(dev, &((struct synth_info __user *)arg)->device))
- return -EFAULT;
- if (dev < 0 || dev >= max_synthdev)
- return -ENXIO;
- if (!(synth_open_mask & (1 << dev)) && !orig_dev)
- return -EBUSY;
- return synth_devs[dev]->ioctl(dev, cmd, arg);
-
- /* Like SYNTH_INFO but returns ID in the name field */
- case SNDCTL_SYNTH_ID:
- if (get_user(dev, &((struct synth_info __user *)arg)->device))
- return -EFAULT;
- if (dev < 0 || dev >= max_synthdev)
- return -ENXIO;
- if (!(synth_open_mask & (1 << dev)) && !orig_dev)
- return -EBUSY;
- memcpy(&inf, synth_devs[dev]->info, sizeof(inf));
- strlcpy(inf.name, synth_devs[dev]->id, sizeof(inf.name));
- inf.device = dev;
- return copy_to_user(arg, &inf, sizeof(inf))?-EFAULT:0;
-
- case SNDCTL_SEQ_OUTOFBAND:
- if (copy_from_user(&event_rec, arg, sizeof(event_rec)))
- return -EFAULT;
- play_event(event_rec.arr);
- return 0;
-
- case SNDCTL_MIDI_INFO:
- if (get_user(dev, &((struct midi_info __user *)arg)->device))
- return -EFAULT;
- if (dev < 0 || dev >= max_mididev || !midi_devs[dev])
- return -ENXIO;
- midi_devs[dev]->info.device = dev;
- return copy_to_user(arg, &midi_devs[dev]->info, sizeof(struct midi_info))?-EFAULT:0;
-
- case SNDCTL_SEQ_THRESHOLD:
- if (get_user(val, p))
- return -EFAULT;
- if (val < 1)
- val = 1;
- if (val >= SEQ_MAX_QUEUE)
- val = SEQ_MAX_QUEUE - 1;
- output_threshold = val;
- return 0;
-
- case SNDCTL_MIDI_PRETIME:
- if (get_user(val, p))
- return -EFAULT;
- if (val < 0)
- val = 0;
- val = (HZ * val) / 10;
- pre_event_timeout = val;
- break;
-
- default:
- if (mode == OPEN_READ)
- return -EIO;
- if (!synth_devs[0])
- return -ENXIO;
- if (!(synth_open_mask & (1 << 0)))
- return -ENXIO;
- if (!synth_devs[0]->ioctl)
- return -EINVAL;
- return synth_devs[0]->ioctl(0, cmd, arg);
- }
- return put_user(val, p);
-}
-
-/* No kernel lock - we're using the global irq lock here */
-unsigned int sequencer_poll(int dev, struct file *file, poll_table * wait)
-{
- unsigned long flags;
- unsigned int mask = 0;
-
- dev = dev >> 4;
-
- spin_lock_irqsave(&lock,flags);
- /* input */
- poll_wait(file, &midi_sleeper, wait);
- if (iqlen)
- mask |= POLLIN | POLLRDNORM;
-
- /* output */
- poll_wait(file, &seq_sleeper, wait);
- if ((SEQ_MAX_QUEUE - qlen) >= output_threshold)
- mask |= POLLOUT | POLLWRNORM;
- spin_unlock_irqrestore(&lock,flags);
- return mask;
-}
-
-
-void sequencer_timer(unsigned long dummy)
-{
- seq_startplay();
-}
-EXPORT_SYMBOL(sequencer_timer);
-
-int note_to_freq(int note_num)
-{
-
- /*
- * This routine converts a midi note to a frequency (multiplied by 1000)
- */
-
- int note, octave, note_freq;
- static int notes[] =
- {
- 261632, 277189, 293671, 311132, 329632, 349232,
- 369998, 391998, 415306, 440000, 466162, 493880
- };
-
-#define BASE_OCTAVE 5
-
- octave = note_num / 12;
- note = note_num % 12;
-
- note_freq = notes[note];
-
- if (octave < BASE_OCTAVE)
- note_freq >>= (BASE_OCTAVE - octave);
- else if (octave > BASE_OCTAVE)
- note_freq <<= (octave - BASE_OCTAVE);
-
- /*
- * note_freq >>= 1;
- */
-
- return note_freq;
-}
-EXPORT_SYMBOL(note_to_freq);
-
-unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
- int vibrato_cents)
-{
- unsigned long amount;
- int negative, semitones, cents, multiplier = 1;
-
- if (!bend)
- return base_freq;
- if (!range)
- return base_freq;
-
- if (!base_freq)
- return base_freq;
-
- if (range >= 8192)
- range = 8192;
-
- bend = bend * range / 8192; /* Convert to cents */
- bend += vibrato_cents;
-
- if (!bend)
- return base_freq;
-
- negative = bend < 0 ? 1 : 0;
-
- if (bend < 0)
- bend *= -1;
- if (bend > range)
- bend = range;
-
- /*
- if (bend > 2399)
- bend = 2399;
- */
- while (bend > 2399)
- {
- multiplier *= 4;
- bend -= 2400;
- }
-
- semitones = bend / 100;
- cents = bend % 100;
-
- amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000;
-
- if (negative)
- return (base_freq * 10000) / amount; /* Bend down */
- else
- return (base_freq * amount) / 10000; /* Bend up */
-}
-EXPORT_SYMBOL(compute_finetune);
-
-void sequencer_init(void)
-{
- if (sequencer_ok)
- return;
- queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ);
- if (queue == NULL)
- {
- printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n");
- return;
- }
- iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ);
- if (iqueue == NULL)
- {
- printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n");
- vfree(queue);
- return;
- }
- sequencer_ok = 1;
-}
-EXPORT_SYMBOL(sequencer_init);
-
-void sequencer_unload(void)
-{
- vfree(queue);
- vfree(iqueue);
- queue = iqueue = NULL;
-}
diff --git a/sound/oss/sleep.h b/sound/oss/sleep.h
deleted file mode 100644
index fd17d44d13dd..000000000000
--- a/sound/oss/sleep.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/wait.h>
-
-/*
- * Do not use. This is a replacement for the old
- * "interruptible_sleep_on_timeout" function that has been
- * deprecated for ages. All users should instead try to use
- * wait_event_interruptible_timeout.
- */
-
-static inline long
-oss_broken_sleep_on(wait_queue_head_t *q, long timeout)
-{
- DEFINE_WAIT(wait);
- prepare_to_wait(q, &wait, TASK_INTERRUPTIBLE);
- timeout = schedule_timeout(timeout);
- finish_wait(q, &wait);
- return timeout;
-}
diff --git a/sound/oss/sound_calls.h b/sound/oss/sound_calls.h
deleted file mode 100644
index bcd3f7340ef7..000000000000
--- a/sound/oss/sound_calls.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * DMA buffer calls
- */
-
-int DMAbuf_open(int dev, int mode);
-int DMAbuf_release(int dev, int mode);
-int DMAbuf_getwrbuffer(int dev, char **buf, int *size, int dontblock);
-int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock);
-int DMAbuf_rmchars(int dev, int buff_no, int c);
-int DMAbuf_start_output(int dev, int buff_no, int l);
-int DMAbuf_move_wrpointer(int dev, int l);
-/* int DMAbuf_ioctl(int dev, unsigned int cmd, void __user *arg, int local); */
-void DMAbuf_init(int dev, int dma1, int dma2);
-void DMAbuf_deinit(int dev);
-int DMAbuf_start_dma (int dev, unsigned long physaddr, int count, int dma_mode);
-void DMAbuf_inputintr(int dev);
-void DMAbuf_outputintr(int dev, int underflow_flag);
-struct dma_buffparms;
-int DMAbuf_space_in_queue (int dev);
-int DMAbuf_activate_recording (int dev, struct dma_buffparms *dmap);
-int DMAbuf_get_buffer_pointer (int dev, struct dma_buffparms *dmap, int direction);
-void DMAbuf_launch_output(int dev, struct dma_buffparms *dmap);
-unsigned int DMAbuf_poll(struct file *file, int dev, poll_table *wait);
-void DMAbuf_start_devices(unsigned int devmask);
-void DMAbuf_reset (int dev);
-int DMAbuf_sync (int dev);
-
-/*
- * System calls for /dev/dsp and /dev/audio (audio.c)
- */
-
-int audio_read (int dev, struct file *file, char __user *buf, int count);
-int audio_write (int dev, struct file *file, const char __user *buf, int count);
-int audio_open (int dev, struct file *file);
-void audio_release (int dev, struct file *file);
-int audio_ioctl (int dev, struct file *file,
- unsigned int cmd, void __user *arg);
-void audio_init_devices (void);
-void reorganize_buffers (int dev, struct dma_buffparms *dmap, int recording);
-
-/*
- * System calls for the /dev/sequencer
- */
-
-int sequencer_read (int dev, struct file *file, char __user *buf, int count);
-int sequencer_write (int dev, struct file *file, const char __user *buf, int count);
-int sequencer_open (int dev, struct file *file);
-void sequencer_release (int dev, struct file *file);
-int sequencer_ioctl (int dev, struct file *file, unsigned int cmd, void __user *arg);
-unsigned int sequencer_poll(int dev, struct file *file, poll_table * wait);
-
-void sequencer_init (void);
-void sequencer_unload (void);
-void sequencer_timer(unsigned long dummy);
-int note_to_freq(int note_num);
-unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
- int vibrato_bend);
-void seq_input_event(unsigned char *event, int len);
-void seq_copy_to_input (unsigned char *event, int len);
-
-/*
- * System calls for the /dev/midi
- */
-
-int MIDIbuf_read (int dev, struct file *file, char __user *buf, int count);
-int MIDIbuf_write (int dev, struct file *file, const char __user *buf, int count);
-int MIDIbuf_open (int dev, struct file *file);
-void MIDIbuf_release (int dev, struct file *file);
-int MIDIbuf_ioctl (int dev, struct file *file, unsigned int cmd, void __user *arg);
-unsigned int MIDIbuf_poll(int dev, struct file *file, poll_table * wait);
-int MIDIbuf_avail(int dev);
-
-void MIDIbuf_bytes_received(int dev, unsigned char *buf, int count);
-
-
-/* From soundcard.c */
-void request_sound_timer (int count);
-void sound_stop_timer(void);
-void conf_printf(char *name, struct address_info *hw_config);
-void conf_printf2(char *name, int base, int irq, int dma, int dma2);
-
-/* From sound_timer.c */
-void sound_timer_interrupt(void);
-void sound_timer_syncinterval(unsigned int new_usecs);
-
-/* From midi_synth.c */
-void do_midi_msg (int synthno, unsigned char *msg, int mlen);
diff --git a/sound/oss/sound_config.h b/sound/oss/sound_config.h
deleted file mode 100644
index 5253b0a70437..000000000000
--- a/sound/oss/sound_config.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/* sound_config.h
- *
- * A driver for sound cards, misc. configuration parameters.
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-
-
-#ifndef _SOUND_CONFIG_H_
-#define _SOUND_CONFIG_H_
-
-#include <linux/fs.h>
-#include <linux/sound.h>
-#include <linux/sched/signal.h>
-
-#include "os.h"
-#include "soundvers.h"
-
-
-#ifndef SND_DEFAULT_ENABLE
-#define SND_DEFAULT_ENABLE 1
-#endif
-
-#ifndef MAX_REALTIME_FACTOR
-#define MAX_REALTIME_FACTOR 4
-#endif
-
-/*
- * Use always 64k buffer size. There is no reason to use shorter.
- */
-#undef DSP_BUFFSIZE
-#define DSP_BUFFSIZE (64*1024)
-
-#ifndef DSP_BUFFCOUNT
-#define DSP_BUFFCOUNT 1 /* 1 is recommended. */
-#endif
-
-#define FM_MONO 0x388 /* This is the I/O address used by AdLib */
-
-#ifndef CONFIG_PAS_BASE
-#define CONFIG_PAS_BASE 0x388
-#endif
-
-/* SEQ_MAX_QUEUE is the maximum number of sequencer events buffered by the
- driver. (There is no need to alter this) */
-#define SEQ_MAX_QUEUE 1024
-
-#define SBFM_MAXINSTR (256) /* Size of the FM Instrument bank */
-/* 128 instruments for general MIDI setup and 16 unassigned */
-
-#define SND_NDEVS 256 /* Number of supported devices */
-
-#define DSP_DEFAULT_SPEED 8000
-
-#define MAX_AUDIO_DEV 5
-#define MAX_MIXER_DEV 5
-#define MAX_SYNTH_DEV 5
-#define MAX_MIDI_DEV 6
-#define MAX_TIMER_DEV 4
-
-struct address_info {
- int io_base;
- int irq;
- int dma;
- int dma2;
- int always_detect; /* 1=Trust me, it's there */
- char *name;
- int driver_use_1; /* Driver defined field 1 */
- int driver_use_2; /* Driver defined field 2 */
- int *osp; /* OS specific info */
- int card_subtype; /* Driver specific. Usually 0 */
- void *memptr; /* Module memory chainer */
- int slots[6]; /* To remember driver slot ids */
-};
-
-#define SYNTH_MAX_VOICES 32
-
-struct voice_alloc_info {
- int max_voice;
- int used_voices;
- int ptr; /* For device specific use */
- unsigned short map[SYNTH_MAX_VOICES]; /* (ch << 8) | (note+1) */
- int timestamp;
- int alloc_times[SYNTH_MAX_VOICES];
- };
-
-struct channel_info {
- int pgm_num;
- int bender_value;
- int bender_range;
- unsigned char controllers[128];
- };
-
-/*
- * Process wakeup reasons
- */
-#define WK_NONE 0x00
-#define WK_WAKEUP 0x01
-#define WK_TIMEOUT 0x02
-#define WK_SIGNAL 0x04
-#define WK_SLEEP 0x08
-#define WK_SELECT 0x10
-#define WK_ABORT 0x20
-
-#define OPEN_READ PCM_ENABLE_INPUT
-#define OPEN_WRITE PCM_ENABLE_OUTPUT
-#define OPEN_READWRITE (OPEN_READ|OPEN_WRITE)
-
-static inline int translate_mode(struct file *file)
-{
- if (OPEN_READ == (__force int)FMODE_READ &&
- OPEN_WRITE == (__force int)FMODE_WRITE)
- return (__force int)(file->f_mode & (FMODE_READ | FMODE_WRITE));
- else
- return ((file->f_mode & FMODE_READ) ? OPEN_READ : 0) |
- ((file->f_mode & FMODE_WRITE) ? OPEN_WRITE : 0);
-}
-
-#include "sound_calls.h"
-#include "dev_table.h"
-
-#ifndef DDB
-#define DDB(x) do {} while (0)
-#endif
-
-#ifndef MDB
-#ifdef MODULE
-#define MDB(x) x
-#else
-#define MDB(x)
-#endif
-#endif
-
-#define TIMER_ARMED 121234
-#define TIMER_NOT_ARMED 1
-
-#define MAX_MEM_BLOCKS 1024
-
-#endif
diff --git a/sound/oss/sound_firmware.h b/sound/oss/sound_firmware.h
deleted file mode 100644
index ebcbded0e8c2..000000000000
--- a/sound/oss/sound_firmware.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/fs.h>
-
-/**
- * mod_firmware_load - load sound driver firmware
- * @fn: filename
- * @fp: return for the buffer.
- *
- * Load the firmware for a sound module (up to 128K) into a buffer.
- * The buffer is returned in *fp. It is allocated with vmalloc so is
- * virtually linear and not DMAable. The caller should free it with
- * vfree when finished.
- *
- * The length of the buffer is returned on a successful load, the
- * value zero on a failure.
- *
- * Caution: This API is not recommended. Firmware should be loaded via
- * request_firmware.
- */
-static inline int mod_firmware_load(const char *fn, char **fp)
-{
- loff_t size;
- int err;
-
- err = kernel_read_file_from_path(fn, (void **)fp, &size,
- 131072, READING_FIRMWARE);
- if (err < 0)
- return 0;
- return size;
-}
diff --git a/sound/oss/sound_timer.c b/sound/oss/sound_timer.c
deleted file mode 100644
index 3a444a6f10eb..000000000000
--- a/sound/oss/sound_timer.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * sound/oss/sound_timer.c
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-/*
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- */
-#include <linux/string.h>
-#include <linux/spinlock.h>
-
-#include "sound_config.h"
-
-static volatile int initialized, opened, tmr_running;
-static volatile unsigned int tmr_offs, tmr_ctr;
-static volatile unsigned long ticks_offs;
-static volatile int curr_tempo, curr_timebase;
-static volatile unsigned long curr_ticks;
-static volatile unsigned long next_event_time;
-static unsigned long prev_event_time;
-static volatile unsigned long usecs_per_tmr; /* Length of the current interval */
-
-static struct sound_lowlev_timer *tmr;
-static DEFINE_SPINLOCK(lock);
-
-static unsigned long tmr2ticks(int tmr_value)
-{
- /*
- * Convert timer ticks to MIDI ticks
- */
-
- unsigned long tmp;
- unsigned long scale;
-
- tmp = tmr_value * usecs_per_tmr; /* Convert to usecs */
- scale = (60 * 1000000) / (curr_tempo * curr_timebase); /* usecs per MIDI tick */
- return (tmp + (scale / 2)) / scale;
-}
-
-void reprogram_timer(void)
-{
- unsigned long usecs_per_tick;
-
- /*
- * The user is changing the timer rate before setting a timer
- * slap, bad bad not allowed.
- */
-
- if(!tmr)
- return;
-
- usecs_per_tick = (60 * 1000000) / (curr_tempo * curr_timebase);
-
- /*
- * Don't kill the system by setting too high timer rate
- */
- if (usecs_per_tick < 2000)
- usecs_per_tick = 2000;
-
- usecs_per_tmr = tmr->tmr_start(tmr->dev, usecs_per_tick);
-}
-
-void sound_timer_syncinterval(unsigned int new_usecs)
-{
- /*
- * This routine is called by the hardware level if
- * the clock frequency has changed for some reason.
- */
- tmr_offs = tmr_ctr;
- ticks_offs += tmr2ticks(tmr_ctr);
- tmr_ctr = 0;
- usecs_per_tmr = new_usecs;
-}
-EXPORT_SYMBOL(sound_timer_syncinterval);
-
-static void tmr_reset(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lock,flags);
- tmr_offs = 0;
- ticks_offs = 0;
- tmr_ctr = 0;
- next_event_time = (unsigned long) -1;
- prev_event_time = 0;
- curr_ticks = 0;
- spin_unlock_irqrestore(&lock,flags);
-}
-
-static int timer_open(int dev, int mode)
-{
- if (opened)
- return -EBUSY;
- tmr_reset();
- curr_tempo = 60;
- curr_timebase = 100;
- opened = 1;
- reprogram_timer();
- return 0;
-}
-
-static void timer_close(int dev)
-{
- opened = tmr_running = 0;
- tmr->tmr_disable(tmr->dev);
-}
-
-static int timer_event(int dev, unsigned char *event)
-{
- unsigned char cmd = event[1];
- unsigned long parm = *(int *) &event[4];
-
- switch (cmd)
- {
- case TMR_WAIT_REL:
- parm += prev_event_time;
- case TMR_WAIT_ABS:
- if (parm > 0)
- {
- long time;
-
- if (parm <= curr_ticks) /* It's the time */
- return TIMER_NOT_ARMED;
- time = parm;
- next_event_time = prev_event_time = time;
- return TIMER_ARMED;
- }
- break;
-
- case TMR_START:
- tmr_reset();
- tmr_running = 1;
- reprogram_timer();
- break;
-
- case TMR_STOP:
- tmr_running = 0;
- break;
-
- case TMR_CONTINUE:
- tmr_running = 1;
- reprogram_timer();
- break;
-
- case TMR_TEMPO:
- if (parm)
- {
- if (parm < 8)
- parm = 8;
- if (parm > 250)
- parm = 250;
- tmr_offs = tmr_ctr;
- ticks_offs += tmr2ticks(tmr_ctr);
- tmr_ctr = 0;
- curr_tempo = parm;
- reprogram_timer();
- }
- break;
-
- case TMR_ECHO:
- seq_copy_to_input(event, 8);
- break;
-
- default:;
- }
- return TIMER_NOT_ARMED;
-}
-
-static unsigned long timer_get_time(int dev)
-{
- if (!opened)
- return 0;
- return curr_ticks;
-}
-
-static int timer_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- int __user *p = arg;
- int val;
-
- switch (cmd)
- {
- case SNDCTL_TMR_SOURCE:
- val = TMR_INTERNAL;
- break;
-
- case SNDCTL_TMR_START:
- tmr_reset();
- tmr_running = 1;
- return 0;
-
- case SNDCTL_TMR_STOP:
- tmr_running = 0;
- return 0;
-
- case SNDCTL_TMR_CONTINUE:
- tmr_running = 1;
- return 0;
-
- case SNDCTL_TMR_TIMEBASE:
- if (get_user(val, p))
- return -EFAULT;
- if (val)
- {
- if (val < 1)
- val = 1;
- if (val > 1000)
- val = 1000;
- curr_timebase = val;
- }
- val = curr_timebase;
- break;
-
- case SNDCTL_TMR_TEMPO:
- if (get_user(val, p))
- return -EFAULT;
- if (val)
- {
- if (val < 8)
- val = 8;
- if (val > 250)
- val = 250;
- tmr_offs = tmr_ctr;
- ticks_offs += tmr2ticks(tmr_ctr);
- tmr_ctr = 0;
- curr_tempo = val;
- reprogram_timer();
- }
- val = curr_tempo;
- break;
-
- case SNDCTL_SEQ_CTRLRATE:
- if (get_user(val, p))
- return -EFAULT;
- if (val != 0) /* Can't change */
- return -EINVAL;
- val = ((curr_tempo * curr_timebase) + 30) / 60;
- break;
-
- case SNDCTL_SEQ_GETTIME:
- val = curr_ticks;
- break;
-
- case SNDCTL_TMR_METRONOME:
- default:
- return -EINVAL;
- }
- return put_user(val, p);
-}
-
-static void timer_arm(int dev, long time)
-{
- if (time < 0)
- time = curr_ticks + 1;
- else if (time <= curr_ticks) /* It's the time */
- return;
-
- next_event_time = prev_event_time = time;
- return;
-}
-
-static struct sound_timer_operations sound_timer =
-{
- .owner = THIS_MODULE,
- .info = {"Sound Timer", 0},
- .priority = 1, /* Priority */
- .devlink = 0, /* Local device link */
- .open = timer_open,
- .close = timer_close,
- .event = timer_event,
- .get_time = timer_get_time,
- .ioctl = timer_ioctl,
- .arm_timer = timer_arm
-};
-
-void sound_timer_interrupt(void)
-{
- unsigned long flags;
-
- if (!opened)
- return;
-
- tmr->tmr_restart(tmr->dev);
-
- if (!tmr_running)
- return;
-
- spin_lock_irqsave(&lock,flags);
- tmr_ctr++;
- curr_ticks = ticks_offs + tmr2ticks(tmr_ctr);
-
- if (curr_ticks >= next_event_time)
- {
- next_event_time = (unsigned long) -1;
- sequencer_timer(0);
- }
- spin_unlock_irqrestore(&lock,flags);
-}
-EXPORT_SYMBOL(sound_timer_interrupt);
-
-void sound_timer_init(struct sound_lowlev_timer *t, char *name)
-{
- int n;
-
- if (initialized)
- {
- if (t->priority <= tmr->priority)
- return; /* There is already a similar or better timer */
- tmr = t;
- return;
- }
- initialized = 1;
- tmr = t;
-
- n = sound_alloc_timerdev();
- if (n == -1)
- n = 0; /* Overwrite the system timer */
- strlcpy(sound_timer.info.name, name, sizeof(sound_timer.info.name));
- sound_timer_devs[n] = &sound_timer;
-}
-EXPORT_SYMBOL(sound_timer_init);
-
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
deleted file mode 100644
index 4391062e5cfd..000000000000
--- a/sound/oss/soundcard.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * linux/sound/oss/soundcard.c
- *
- * Sound card driver for Linux
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- *
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * integrated sound_switch.c
- * Stefan Reinauer : integrated /proc/sound (equals to /dev/sndstat,
- * which should disappear in the near future)
- * Eric Dumas : devfs support (22-Jan-98) <dumas@linux.eu.org> with
- * fixups by C. Scott Ananian <cananian@alumni.princeton.edu>
- * Richard Gooch : moved common (non OSS-specific) devices to sound_core.c
- * Rob Riggs : Added persistent DMA buffers support (1998/10/17)
- * Christoph Hellwig : Some cleanup work (2000/03/01)
- */
-
-
-#include "sound_config.h"
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
-#include <linux/ctype.h>
-#include <linux/stddef.h>
-#include <linux/kmod.h>
-#include <linux/kernel.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <linux/wait.h>
-#include <linux/ioport.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-
-/*
- * This ought to be moved into include/asm/dma.h
- */
-#ifndef valid_dma
-#define valid_dma(n) ((n) >= 0 && (n) < MAX_DMA_CHANNELS && (n) != 4)
-#endif
-
-/*
- * Table for permanently allocated memory (used when unloading the module)
- */
-void * sound_mem_blocks[MAX_MEM_BLOCKS];
-static DEFINE_MUTEX(soundcard_mutex);
-int sound_nblocks = 0;
-
-/* Persistent DMA buffers */
-#ifdef CONFIG_SOUND_DMAP
-int sound_dmap_flag = 1;
-#else
-int sound_dmap_flag = 0;
-#endif
-
-static char dma_alloc_map[MAX_DMA_CHANNELS];
-
-#define DMA_MAP_UNAVAIL 0
-#define DMA_MAP_FREE 1
-#define DMA_MAP_BUSY 2
-
-
-unsigned long seq_time = 0; /* Time for /dev/sequencer */
-extern struct class *sound_class;
-
-/*
- * Table for configurable mixer volume handling
- */
-static mixer_vol_table mixer_vols[MAX_MIXER_DEV];
-static int num_mixer_volumes;
-
-int *load_mixer_volumes(char *name, int *levels, int present)
-{
- int i, n;
-
- for (i = 0; i < num_mixer_volumes; i++) {
- if (strncmp(name, mixer_vols[i].name, 32) == 0) {
- if (present)
- mixer_vols[i].num = i;
- return mixer_vols[i].levels;
- }
- }
- if (num_mixer_volumes >= MAX_MIXER_DEV) {
- printk(KERN_ERR "Sound: Too many mixers (%s)\n", name);
- return levels;
- }
- n = num_mixer_volumes++;
-
- strncpy(mixer_vols[n].name, name, 32);
-
- if (present)
- mixer_vols[n].num = n;
- else
- mixer_vols[n].num = -1;
-
- for (i = 0; i < 32; i++)
- mixer_vols[n].levels[i] = levels[i];
- return mixer_vols[n].levels;
-}
-EXPORT_SYMBOL(load_mixer_volumes);
-
-static int set_mixer_levels(void __user * arg)
-{
- /* mixer_vol_table is 174 bytes, so IMHO no reason to not allocate it on the stack */
- mixer_vol_table buf;
-
- if (__copy_from_user(&buf, arg, sizeof(buf)))
- return -EFAULT;
- load_mixer_volumes(buf.name, buf.levels, 0);
- if (__copy_to_user(arg, &buf, sizeof(buf)))
- return -EFAULT;
- return 0;
-}
-
-static int get_mixer_levels(void __user * arg)
-{
- int n;
-
- if (__get_user(n, (int __user *)(&(((mixer_vol_table __user *)arg)->num))))
- return -EFAULT;
- if (n < 0 || n >= num_mixer_volumes)
- return -EINVAL;
- if (__copy_to_user(arg, &mixer_vols[n], sizeof(mixer_vol_table)))
- return -EFAULT;
- return 0;
-}
-
-/* 4K page size but our output routines use some slack for overruns */
-#define PROC_BLOCK_SIZE (3*1024)
-
-static ssize_t sound_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
-{
- int dev = iminor(file_inode(file));
- int ret = -EINVAL;
-
- /*
- * The OSS drivers aren't remotely happy without this locking,
- * and unless someone fixes them when they are about to bite the
- * big one anyway, we might as well bandage here..
- */
-
- mutex_lock(&soundcard_mutex);
-
- switch (dev & 0x0f) {
- case SND_DEV_DSP:
- case SND_DEV_DSP16:
- case SND_DEV_AUDIO:
- ret = audio_read(dev, file, buf, count);
- break;
-
- case SND_DEV_SEQ:
- case SND_DEV_SEQ2:
- ret = sequencer_read(dev, file, buf, count);
- break;
-
- case SND_DEV_MIDIN:
- ret = MIDIbuf_read(dev, file, buf, count);
- }
- mutex_unlock(&soundcard_mutex);
- return ret;
-}
-
-static ssize_t sound_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
- int dev = iminor(file_inode(file));
- int ret = -EINVAL;
-
- mutex_lock(&soundcard_mutex);
- switch (dev & 0x0f) {
- case SND_DEV_SEQ:
- case SND_DEV_SEQ2:
- ret = sequencer_write(dev, file, buf, count);
- break;
-
- case SND_DEV_DSP:
- case SND_DEV_DSP16:
- case SND_DEV_AUDIO:
- ret = audio_write(dev, file, buf, count);
- break;
-
- case SND_DEV_MIDIN:
- ret = MIDIbuf_write(dev, file, buf, count);
- break;
- }
- mutex_unlock(&soundcard_mutex);
- return ret;
-}
-
-static int sound_open(struct inode *inode, struct file *file)
-{
- int dev = iminor(inode);
- int retval;
-
- if ((dev >= SND_NDEVS) || (dev < 0)) {
- printk(KERN_ERR "Invalid minor device %d\n", dev);
- return -ENXIO;
- }
- mutex_lock(&soundcard_mutex);
- switch (dev & 0x0f) {
- case SND_DEV_CTL:
- dev >>= 4;
- if (dev >= 0 && dev < MAX_MIXER_DEV && mixer_devs[dev] == NULL) {
- request_module("mixer%d", dev);
- }
- retval = -ENXIO;
- if (dev && (dev >= num_mixers || mixer_devs[dev] == NULL))
- break;
-
- if (!try_module_get(mixer_devs[dev]->owner))
- break;
-
- retval = 0;
- break;
-
- case SND_DEV_SEQ:
- case SND_DEV_SEQ2:
- retval = sequencer_open(dev, file);
- break;
-
- case SND_DEV_MIDIN:
- retval = MIDIbuf_open(dev, file);
- break;
-
- case SND_DEV_DSP:
- case SND_DEV_DSP16:
- case SND_DEV_AUDIO:
- retval = audio_open(dev, file);
- break;
-
- default:
- printk(KERN_ERR "Invalid minor device %d\n", dev);
- retval = -ENXIO;
- }
-
- mutex_unlock(&soundcard_mutex);
- return retval;
-}
-
-static int sound_release(struct inode *inode, struct file *file)
-{
- int dev = iminor(inode);
-
- mutex_lock(&soundcard_mutex);
- switch (dev & 0x0f) {
- case SND_DEV_CTL:
- module_put(mixer_devs[dev >> 4]->owner);
- break;
-
- case SND_DEV_SEQ:
- case SND_DEV_SEQ2:
- sequencer_release(dev, file);
- break;
-
- case SND_DEV_MIDIN:
- MIDIbuf_release(dev, file);
- break;
-
- case SND_DEV_DSP:
- case SND_DEV_DSP16:
- case SND_DEV_AUDIO:
- audio_release(dev, file);
- break;
-
- default:
- printk(KERN_ERR "Sound error: Releasing unknown device 0x%02x\n", dev);
- }
- mutex_unlock(&soundcard_mutex);
-
- return 0;
-}
-
-static int get_mixer_info(int dev, void __user *arg)
-{
- mixer_info info;
- memset(&info, 0, sizeof(info));
- strlcpy(info.id, mixer_devs[dev]->id, sizeof(info.id));
- strlcpy(info.name, mixer_devs[dev]->name, sizeof(info.name));
- info.modify_counter = mixer_devs[dev]->modify_counter;
- if (__copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-static int get_old_mixer_info(int dev, void __user *arg)
-{
- _old_mixer_info info;
- memset(&info, 0, sizeof(info));
- strlcpy(info.id, mixer_devs[dev]->id, sizeof(info.id));
- strlcpy(info.name, mixer_devs[dev]->name, sizeof(info.name));
- if (copy_to_user(arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
-}
-
-static int sound_mixer_ioctl(int mixdev, unsigned int cmd, void __user *arg)
-{
- if (mixdev < 0 || mixdev >= MAX_MIXER_DEV)
- return -ENXIO;
- /* Try to load the mixer... */
- if (mixer_devs[mixdev] == NULL) {
- request_module("mixer%d", mixdev);
- }
- if (mixdev >= num_mixers || !mixer_devs[mixdev])
- return -ENXIO;
- if (cmd == SOUND_MIXER_INFO)
- return get_mixer_info(mixdev, arg);
- if (cmd == SOUND_OLD_MIXER_INFO)
- return get_old_mixer_info(mixdev, arg);
- if (_SIOC_DIR(cmd) & _SIOC_WRITE)
- mixer_devs[mixdev]->modify_counter++;
- if (!mixer_devs[mixdev]->ioctl)
- return -EINVAL;
- return mixer_devs[mixdev]->ioctl(mixdev, cmd, arg);
-}
-
-static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int len = 0, dtype;
- int dev = iminor(file_inode(file));
- long ret = -EINVAL;
- void __user *p = (void __user *)arg;
-
- if (_SIOC_DIR(cmd) != _SIOC_NONE && _SIOC_DIR(cmd) != 0) {
- /*
- * Have to validate the address given by the process.
- */
- len = _SIOC_SIZE(cmd);
- if (len < 1 || len > 65536 || !p)
- return -EFAULT;
- if (_SIOC_DIR(cmd) & _SIOC_WRITE)
- if (!access_ok(VERIFY_READ, p, len))
- return -EFAULT;
- if (_SIOC_DIR(cmd) & _SIOC_READ)
- if (!access_ok(VERIFY_WRITE, p, len))
- return -EFAULT;
- }
- if (cmd == OSS_GETVERSION)
- return __put_user(SOUND_VERSION, (int __user *)p);
-
- mutex_lock(&soundcard_mutex);
- if (_IOC_TYPE(cmd) == 'M' && num_mixers > 0 && /* Mixer ioctl */
- (dev & 0x0f) != SND_DEV_CTL) {
- dtype = dev & 0x0f;
- switch (dtype) {
- case SND_DEV_DSP:
- case SND_DEV_DSP16:
- case SND_DEV_AUDIO:
- ret = sound_mixer_ioctl(audio_devs[dev >> 4]->mixer_dev,
- cmd, p);
- break;
- default:
- ret = sound_mixer_ioctl(dev >> 4, cmd, p);
- break;
- }
- mutex_unlock(&soundcard_mutex);
- return ret;
- }
-
- switch (dev & 0x0f) {
- case SND_DEV_CTL:
- if (cmd == SOUND_MIXER_GETLEVELS)
- ret = get_mixer_levels(p);
- else if (cmd == SOUND_MIXER_SETLEVELS)
- ret = set_mixer_levels(p);
- else
- ret = sound_mixer_ioctl(dev >> 4, cmd, p);
- break;
-
- case SND_DEV_SEQ:
- case SND_DEV_SEQ2:
- ret = sequencer_ioctl(dev, file, cmd, p);
- break;
-
- case SND_DEV_DSP:
- case SND_DEV_DSP16:
- case SND_DEV_AUDIO:
- ret = audio_ioctl(dev, file, cmd, p);
- break;
-
- case SND_DEV_MIDIN:
- ret = MIDIbuf_ioctl(dev, file, cmd, p);
- break;
-
- }
- mutex_unlock(&soundcard_mutex);
- return ret;
-}
-
-static unsigned int sound_poll(struct file *file, poll_table * wait)
-{
- struct inode *inode = file_inode(file);
- int dev = iminor(inode);
-
- switch (dev & 0x0f) {
- case SND_DEV_SEQ:
- case SND_DEV_SEQ2:
- return sequencer_poll(dev, file, wait);
-
- case SND_DEV_MIDIN:
- return MIDIbuf_poll(dev, file, wait);
-
- case SND_DEV_DSP:
- case SND_DEV_DSP16:
- case SND_DEV_AUDIO:
- return DMAbuf_poll(file, dev >> 4, wait);
- }
- return 0;
-}
-
-static int sound_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int dev_class;
- unsigned long size;
- struct dma_buffparms *dmap = NULL;
- int dev = iminor(file_inode(file));
-
- dev_class = dev & 0x0f;
- dev >>= 4;
-
- if (dev_class != SND_DEV_DSP && dev_class != SND_DEV_DSP16 && dev_class != SND_DEV_AUDIO) {
- printk(KERN_ERR "Sound: mmap() not supported for other than audio devices\n");
- return -EINVAL;
- }
- mutex_lock(&soundcard_mutex);
- if (vma->vm_flags & VM_WRITE) /* Map write and read/write to the output buf */
- dmap = audio_devs[dev]->dmap_out;
- else if (vma->vm_flags & VM_READ)
- dmap = audio_devs[dev]->dmap_in;
- else {
- printk(KERN_ERR "Sound: Undefined mmap() access\n");
- mutex_unlock(&soundcard_mutex);
- return -EINVAL;
- }
-
- if (dmap == NULL) {
- printk(KERN_ERR "Sound: mmap() error. dmap == NULL\n");
- mutex_unlock(&soundcard_mutex);
- return -EIO;
- }
- if (dmap->raw_buf == NULL) {
- printk(KERN_ERR "Sound: mmap() called when raw_buf == NULL\n");
- mutex_unlock(&soundcard_mutex);
- return -EIO;
- }
- if (dmap->mapping_flags) {
- printk(KERN_ERR "Sound: mmap() called twice for the same DMA buffer\n");
- mutex_unlock(&soundcard_mutex);
- return -EIO;
- }
- if (vma->vm_pgoff != 0) {
- printk(KERN_ERR "Sound: mmap() offset must be 0.\n");
- mutex_unlock(&soundcard_mutex);
- return -EINVAL;
- }
- size = vma->vm_end - vma->vm_start;
-
- if (size != dmap->bytes_in_use) {
- printk(KERN_WARNING "Sound: mmap() size = %ld. Should be %d\n", size, dmap->bytes_in_use);
- }
- if (remap_pfn_range(vma, vma->vm_start,
- virt_to_phys(dmap->raw_buf) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
- mutex_unlock(&soundcard_mutex);
- return -EAGAIN;
- }
-
- dmap->mapping_flags |= DMA_MAP_MAPPED;
-
- if( audio_devs[dev]->d->mmap)
- audio_devs[dev]->d->mmap(dev);
-
- memset(dmap->raw_buf,
- dmap->neutral_byte,
- dmap->bytes_in_use);
- mutex_unlock(&soundcard_mutex);
- return 0;
-}
-
-const struct file_operations oss_sound_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = sound_read,
- .write = sound_write,
- .poll = sound_poll,
- .unlocked_ioctl = sound_ioctl,
- .mmap = sound_mmap,
- .open = sound_open,
- .release = sound_release,
-};
-
-/*
- * Create the required special subdevices
- */
-
-static int create_special_devices(void)
-{
- int seq1,seq2;
- seq1=register_sound_special(&oss_sound_fops, 1);
- if(seq1==-1)
- goto bad;
- seq2=register_sound_special(&oss_sound_fops, 8);
- if(seq2!=-1)
- return 0;
- unregister_sound_special(1);
-bad:
- return -1;
-}
-
-
-static int dmabuf;
-static int dmabug;
-
-module_param(dmabuf, int, 0444);
-module_param(dmabug, int, 0444);
-
-/* additional minors for compatibility */
-struct oss_minor_dev {
- unsigned short minor;
- unsigned int enabled;
-} dev_list[] = {
- { SND_DEV_DSP16 },
- { SND_DEV_AUDIO },
-};
-
-static int __init oss_init(void)
-{
- int err;
- int i, j;
-
-#ifdef CONFIG_PCI
- if(dmabug)
- isa_dma_bridge_buggy = dmabug;
-#endif
-
- err = create_special_devices();
- if (err) {
- printk(KERN_ERR "sound: driver already loaded/included in kernel\n");
- return err;
- }
-
- /* Protecting the innocent */
- sound_dmap_flag = (dmabuf > 0 ? 1 : 0);
-
- for (i = 0; i < ARRAY_SIZE(dev_list); i++) {
- j = 0;
- do {
- unsigned short minor = dev_list[i].minor + j * 0x10;
- if (!register_sound_special(&oss_sound_fops, minor))
- dev_list[i].enabled = (1 << j);
- } while (++j < num_audiodevs);
- }
-
- if (sound_nblocks >= MAX_MEM_BLOCKS - 1)
- printk(KERN_ERR "Sound warning: Deallocation table was too small.\n");
-
- return 0;
-}
-
-static void __exit oss_cleanup(void)
-{
- int i, j;
-
- for (i = 0; i < ARRAY_SIZE(dev_list); i++) {
- j = 0;
- do {
- if (dev_list[i].enabled & (1 << j))
- unregister_sound_special(dev_list[i].minor);
- } while (++j < num_audiodevs);
- }
-
- unregister_sound_special(1);
- unregister_sound_special(8);
-
- sound_stop_timer();
-
- sequencer_unload();
-
- for (i = 0; i < MAX_DMA_CHANNELS; i++)
- if (dma_alloc_map[i] != DMA_MAP_UNAVAIL) {
- printk(KERN_ERR "Sound: Hmm, DMA%d was left allocated - fixed\n", i);
- sound_free_dma(i);
- }
-
- for (i = 0; i < sound_nblocks; i++)
- vfree(sound_mem_blocks[i]);
-
-}
-
-module_init(oss_init);
-module_exit(oss_cleanup);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("OSS Sound subsystem");
-MODULE_AUTHOR("Hannu Savolainen, et al.");
-
-
-int sound_alloc_dma(int chn, char *deviceID)
-{
- int err;
-
- if ((err = request_dma(chn, deviceID)) != 0)
- return err;
-
- dma_alloc_map[chn] = DMA_MAP_FREE;
-
- return 0;
-}
-EXPORT_SYMBOL(sound_alloc_dma);
-
-int sound_open_dma(int chn, char *deviceID)
-{
- if (!valid_dma(chn)) {
- printk(KERN_ERR "sound_open_dma: Invalid DMA channel %d\n", chn);
- return 1;
- }
-
- if (dma_alloc_map[chn] != DMA_MAP_FREE) {
- printk("sound_open_dma: DMA channel %d busy or not allocated (%d)\n", chn, dma_alloc_map[chn]);
- return 1;
- }
- dma_alloc_map[chn] = DMA_MAP_BUSY;
- return 0;
-}
-EXPORT_SYMBOL(sound_open_dma);
-
-void sound_free_dma(int chn)
-{
- if (dma_alloc_map[chn] == DMA_MAP_UNAVAIL) {
- /* printk( "sound_free_dma: Bad access to DMA channel %d\n", chn); */
- return;
- }
- free_dma(chn);
- dma_alloc_map[chn] = DMA_MAP_UNAVAIL;
-}
-EXPORT_SYMBOL(sound_free_dma);
-
-void sound_close_dma(int chn)
-{
- if (dma_alloc_map[chn] != DMA_MAP_BUSY) {
- printk(KERN_ERR "sound_close_dma: Bad access to DMA channel %d\n", chn);
- return;
- }
- dma_alloc_map[chn] = DMA_MAP_FREE;
-}
-EXPORT_SYMBOL(sound_close_dma);
-
-static void do_sequencer_timer(unsigned long dummy)
-{
- sequencer_timer(0);
-}
-
-
-static DEFINE_TIMER(seq_timer, do_sequencer_timer);
-
-void request_sound_timer(int count)
-{
- extern unsigned long seq_time;
-
- if (count < 0) {
- seq_timer.expires = (-count) + jiffies;
- add_timer(&seq_timer);
- return;
- }
- count += seq_time;
-
- count -= jiffies;
-
- if (count < 1)
- count = 1;
-
- seq_timer.expires = (count) + jiffies;
- add_timer(&seq_timer);
-}
-
-void sound_stop_timer(void)
-{
- del_timer(&seq_timer);
-}
-
-void conf_printf(char *name, struct address_info *hw_config)
-{
-#ifndef CONFIG_SOUND_TRACEINIT
- return;
-#else
- printk("<%s> at 0x%03x", name, hw_config->io_base);
-
- if (hw_config->irq)
- printk(" irq %d", (hw_config->irq > 0) ? hw_config->irq : -hw_config->irq);
-
- if (hw_config->dma != -1 || hw_config->dma2 != -1)
- {
- printk(" dma %d", hw_config->dma);
- if (hw_config->dma2 != -1)
- printk(",%d", hw_config->dma2);
- }
- printk("\n");
-#endif
-}
-EXPORT_SYMBOL(conf_printf);
-
-void conf_printf2(char *name, int base, int irq, int dma, int dma2)
-{
-#ifndef CONFIG_SOUND_TRACEINIT
- return;
-#else
- printk("<%s> at 0x%03x", name, base);
-
- if (irq)
- printk(" irq %d", (irq > 0) ? irq : -irq);
-
- if (dma != -1 || dma2 != -1)
- {
- printk(" dma %d", dma);
- if (dma2 != -1)
- printk(",%d", dma2);
- }
- printk("\n");
-#endif
-}
-EXPORT_SYMBOL(conf_printf2);
-
diff --git a/sound/oss/soundvers.h b/sound/oss/soundvers.h
deleted file mode 100644
index e9084d2f46a9..000000000000
--- a/sound/oss/soundvers.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define SOUND_VERSION_STRING "3.8s2++-971130"
-#define SOUND_INTERNAL_VERSION 0x030804
diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
deleted file mode 100644
index 97899352b15f..000000000000
--- a/sound/oss/swarm_cs4297a.c
+++ /dev/null
@@ -1,2781 +0,0 @@
-/*******************************************************************************
-*
-* "swarm_cs4297a.c" -- Cirrus Logic-Crystal CS4297a linux audio driver.
-*
-* Copyright (C) 2001 Broadcom Corporation.
-* Copyright (C) 2000,2001 Cirrus Logic Corp.
-* -- adapted from drivers by Thomas Sailer,
-* -- but don't bug him; Problems should go to:
-* -- tom woller (twoller@crystal.cirrus.com) or
-* (audio@crystal.cirrus.com).
-* -- adapted from cs4281 PCI driver for cs4297a on
-* BCM1250 Synchronous Serial interface
-* (Kip Walker, Broadcom Corp.)
-* Copyright (C) 2004 Maciej W. Rozycki
-* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*
-* Module command line parameters:
-* none
-*
-* Supported devices:
-* /dev/dsp standard /dev/dsp device, (mostly) OSS compatible
-* /dev/mixer standard /dev/mixer device, (mostly) OSS compatible
-* /dev/midi simple MIDI UART interface, no ioctl
-*
-* Modification History
-* 08/20/00 trw - silence and no stopping DAC until release
-* 08/23/00 trw - added CS_DBG statements, fix interrupt hang issue on DAC stop.
-* 09/18/00 trw - added 16bit only record with conversion
-* 09/24/00 trw - added Enhanced Full duplex (separate simultaneous
-* capture/playback rates)
-* 10/03/00 trw - fixed mmap (fixed GRECORD and the XMMS mmap test plugin
-* libOSSm.so)
-* 10/11/00 trw - modified for 2.4.0-test9 kernel enhancements (NR_MAP removal)
-* 11/03/00 trw - fixed interrupt loss/stutter, added debug.
-* 11/10/00 bkz - added __devinit to cs4297a_hw_init()
-* 11/10/00 trw - fixed SMP and capture spinlock hang.
-* 12/04/00 trw - cleaned up CSDEBUG flags and added "defaultorder" moduleparm.
-* 12/05/00 trw - fixed polling (myth2), and added underrun swptr fix.
-* 12/08/00 trw - added PM support.
-* 12/14/00 trw - added wrapper code, builds under 2.4.0, 2.2.17-20, 2.2.17-8
-* (RH/Dell base), 2.2.18, 2.2.12. cleaned up code mods by ident.
-* 12/19/00 trw - added PM support for 2.2 base (apm_callback). other PM cleanup.
-* 12/21/00 trw - added fractional "defaultorder" inputs. if >100 then use
-* defaultorder-100 as power of 2 for the buffer size. example:
-* 106 = 2^(106-100) = 2^6 = 64 bytes for the buffer size.
-*
-*******************************************************************************/
-
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/sched/signal.h>
-#include <linux/delay.h>
-#include <linux/sound.h>
-#include <linux/slab.h>
-#include <linux/soundcard.h>
-#include <linux/pci.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-
-#include <asm/byteorder.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#include <asm/sibyte/sb1250_regs.h>
-#include <asm/sibyte/sb1250_int.h>
-#include <asm/sibyte/sb1250_dma.h>
-#include <asm/sibyte/sb1250_scd.h>
-#include <asm/sibyte/sb1250_syncser.h>
-#include <asm/sibyte/sb1250_mac.h>
-#include <asm/sibyte/sb1250.h>
-
-#include "sleep.h"
-
-struct cs4297a_state;
-
-static DEFINE_MUTEX(swarm_cs4297a_mutex);
-static void stop_dac(struct cs4297a_state *s);
-static void stop_adc(struct cs4297a_state *s);
-static void start_dac(struct cs4297a_state *s);
-static void start_adc(struct cs4297a_state *s);
-#undef OSS_DOCUMENTED_MIXER_SEMANTICS
-
-// ---------------------------------------------------------------------
-
-#define CS4297a_MAGIC 0xf00beef1
-
-// buffer order determines the size of the dma buffer for the driver.
-// under Linux, a smaller buffer allows more responsiveness from many of the
-// applications (e.g. games). A larger buffer allows some of the apps (esound)
-// to not underrun the dma buffer as easily. As default, use 32k (order=3)
-// rather than 64k as some of the games work more responsively.
-// log base 2( buff sz = 32k).
-
-//
-// Turn on/off debugging compilation by commenting out "#define CSDEBUG"
-//
-#define CSDEBUG 0
-#if CSDEBUG
-#define CSDEBUG_INTERFACE 1
-#else
-#undef CSDEBUG_INTERFACE
-#endif
-//
-// cs_debugmask areas
-//
-#define CS_INIT 0x00000001 // initialization and probe functions
-#define CS_ERROR 0x00000002 // tmp debugging bit placeholder
-#define CS_INTERRUPT 0x00000004 // interrupt handler (separate from all other)
-#define CS_FUNCTION 0x00000008 // enter/leave functions
-#define CS_WAVE_WRITE 0x00000010 // write information for wave
-#define CS_WAVE_READ 0x00000020 // read information for wave
-#define CS_AC97 0x00000040 // AC97 register access
-#define CS_DESCR 0x00000080 // descriptor management
-#define CS_OPEN 0x00000400 // all open functions in the driver
-#define CS_RELEASE 0x00000800 // all release functions in the driver
-#define CS_PARMS 0x00001000 // functional and operational parameters
-#define CS_IOCTL 0x00002000 // ioctl (non-mixer)
-#define CS_TMP 0x10000000 // tmp debug mask bit
-
-//
-// CSDEBUG is usual mode is set to 1, then use the
-// cs_debuglevel and cs_debugmask to turn on or off debugging.
-// Debug level of 1 has been defined to be kernel errors and info
-// that should be printed on any released driver.
-//
-#if CSDEBUG
-#define CS_DBGOUT(mask,level,x) if((cs_debuglevel >= (level)) && ((mask) & cs_debugmask) ) {x;}
-#else
-#define CS_DBGOUT(mask,level,x)
-#endif
-
-#if CSDEBUG
-static unsigned long cs_debuglevel = 4; // levels range from 1-9
-static unsigned long cs_debugmask = CS_INIT /*| CS_IOCTL*/;
-module_param(cs_debuglevel, int, 0);
-module_param(cs_debugmask, int, 0);
-#endif
-#define CS_TRUE 1
-#define CS_FALSE 0
-
-#define CS_TYPE_ADC 0
-#define CS_TYPE_DAC 1
-
-#define SER_BASE (A_SER_BASE_1 + KSEG1)
-#define SS_CSR(t) (SER_BASE+t)
-#define SS_TXTBL(t) (SER_BASE+R_SER_TX_TABLE_BASE+(t*8))
-#define SS_RXTBL(t) (SER_BASE+R_SER_RX_TABLE_BASE+(t*8))
-
-#define FRAME_BYTES 32
-#define FRAME_SAMPLE_BYTES 4
-
-/* Should this be variable? */
-#define SAMPLE_BUF_SIZE (16*1024)
-#define SAMPLE_FRAME_COUNT (SAMPLE_BUF_SIZE / FRAME_SAMPLE_BYTES)
-/* The driver can explode/shrink the frames to/from a smaller sample
- buffer */
-#define DMA_BLOAT_FACTOR 1
-#define DMA_DESCR (SAMPLE_FRAME_COUNT / DMA_BLOAT_FACTOR)
-#define DMA_BUF_SIZE (DMA_DESCR * FRAME_BYTES)
-
-/* Use the maxmium count (255 == 5.1 ms between interrupts) */
-#define DMA_INT_CNT ((1 << S_DMA_INT_PKTCNT) - 1)
-
-/* Figure this out: how many TX DMAs ahead to schedule a reg access */
-#define REG_LATENCY 150
-
-#define FRAME_TX_US 20
-
-#define SERDMA_NEXTBUF(d,f) (((d)->f+1) % (d)->ringsz)
-
-static const char invalid_magic[] =
- KERN_CRIT "cs4297a: invalid magic value\n";
-
-#define VALIDATE_STATE(s) \
-({ \
- if (!(s) || (s)->magic != CS4297a_MAGIC) { \
- printk(invalid_magic); \
- return -ENXIO; \
- } \
-})
-
-/* AC97 registers */
-#define AC97_MASTER_VOL_STEREO 0x0002 /* Line Out */
-#define AC97_PCBEEP_VOL 0x000a /* none */
-#define AC97_PHONE_VOL 0x000c /* TAD Input (mono) */
-#define AC97_MIC_VOL 0x000e /* MIC Input (mono) */
-#define AC97_LINEIN_VOL 0x0010 /* Line Input (stereo) */
-#define AC97_CD_VOL 0x0012 /* CD Input (stereo) */
-#define AC97_AUX_VOL 0x0016 /* Aux Input (stereo) */
-#define AC97_PCMOUT_VOL 0x0018 /* Wave Output (stereo) */
-#define AC97_RECORD_SELECT 0x001a /* */
-#define AC97_RECORD_GAIN 0x001c
-#define AC97_GENERAL_PURPOSE 0x0020
-#define AC97_3D_CONTROL 0x0022
-#define AC97_POWER_CONTROL 0x0026
-#define AC97_VENDOR_ID1 0x007c
-
-struct list_head cs4297a_devs = { &cs4297a_devs, &cs4297a_devs };
-
-typedef struct serdma_descr_s {
- u64 descr_a;
- u64 descr_b;
-} serdma_descr_t;
-
-typedef unsigned long paddr_t;
-
-typedef struct serdma_s {
- unsigned ringsz;
- serdma_descr_t *descrtab;
- serdma_descr_t *descrtab_end;
- paddr_t descrtab_phys;
-
- serdma_descr_t *descr_add;
- serdma_descr_t *descr_rem;
-
- u64 *dma_buf; // buffer for DMA contents (frames)
- paddr_t dma_buf_phys;
- u16 *sample_buf; // tmp buffer for sample conversions
- u16 *sb_swptr;
- u16 *sb_hwptr;
- u16 *sb_end;
-
- dma_addr_t dmaaddr;
-// unsigned buforder; // Log base 2 of 'dma_buf' size in bytes..
- unsigned numfrag; // # of 'fragments' in the buffer.
- unsigned fragshift; // Log base 2 of fragment size.
- unsigned hwptr, swptr;
- unsigned total_bytes; // # bytes process since open.
- unsigned blocks; // last returned blocks value GETOPTR
- unsigned wakeup; // interrupt occurred on block
- int count;
- unsigned underrun; // underrun flag
- unsigned error; // over/underrun
- wait_queue_head_t wait;
- wait_queue_head_t reg_wait;
- // redundant, but makes calculations easier
- unsigned fragsize; // 2**fragshift..
- unsigned sbufsz; // 2**buforder.
- unsigned fragsamples;
- // OSS stuff
- unsigned mapped:1; // Buffer mapped in cs4297a_mmap()?
- unsigned ready:1; // prog_dmabuf_dac()/adc() successful?
- unsigned endcleared:1;
- unsigned type:1; // adc or dac buffer (CS_TYPE_XXX)
- unsigned ossfragshift;
- int ossmaxfrags;
- unsigned subdivision;
-} serdma_t;
-
-struct cs4297a_state {
- // magic
- unsigned int magic;
-
- struct list_head list;
-
- // soundcore stuff
- int dev_audio;
- int dev_mixer;
-
- // hardware resources
- unsigned int irq;
-
- struct {
- unsigned int rx_ovrrn; /* FIFO */
- unsigned int rx_overflow; /* staging buffer */
- unsigned int tx_underrun;
- unsigned int rx_bad;
- unsigned int rx_good;
- } stats;
-
- // mixer registers
- struct {
- unsigned short vol[10];
- unsigned int recsrc;
- unsigned int modcnt;
- unsigned short micpreamp;
- } mix;
-
- // wave stuff
- struct properties {
- unsigned fmt;
- unsigned fmt_original; // original requested format
- unsigned channels;
- unsigned rate;
- } prop_dac, prop_adc;
- unsigned conversion:1; // conversion from 16 to 8 bit in progress
- unsigned ena;
- spinlock_t lock;
- struct mutex open_mutex;
- struct mutex open_sem_adc;
- struct mutex open_sem_dac;
- fmode_t open_mode;
- wait_queue_head_t open_wait;
- wait_queue_head_t open_wait_adc;
- wait_queue_head_t open_wait_dac;
-
- dma_addr_t dmaaddr_sample_buf;
- unsigned buforder_sample_buf; // Log base 2 of 'dma_buf' size in bytes..
-
- serdma_t dma_dac, dma_adc;
-
- volatile u16 read_value;
- volatile u16 read_reg;
- volatile u64 reg_request;
-};
-
-#if 1
-#define prog_codec(a,b)
-#define dealloc_dmabuf(a,b);
-#endif
-
-static int prog_dmabuf_adc(struct cs4297a_state *s)
-{
- s->dma_adc.ready = 1;
- return 0;
-}
-
-
-static int prog_dmabuf_dac(struct cs4297a_state *s)
-{
- s->dma_dac.ready = 1;
- return 0;
-}
-
-static void clear_advance(void *buf, unsigned bsize, unsigned bptr,
- unsigned len, unsigned char c)
-{
- if (bptr + len > bsize) {
- unsigned x = bsize - bptr;
- memset(((char *) buf) + bptr, c, x);
- bptr = 0;
- len -= x;
- }
- CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO
- "cs4297a: clear_advance(): memset %d at 0x%.8x for %d size \n",
- (unsigned)c, (unsigned)((char *) buf) + bptr, len));
- memset(((char *) buf) + bptr, c, len);
-}
-
-#if CSDEBUG
-
-// DEBUG ROUTINES
-
-#define SOUND_MIXER_CS_GETDBGLEVEL _SIOWR('M',120, int)
-#define SOUND_MIXER_CS_SETDBGLEVEL _SIOWR('M',121, int)
-#define SOUND_MIXER_CS_GETDBGMASK _SIOWR('M',122, int)
-#define SOUND_MIXER_CS_SETDBGMASK _SIOWR('M',123, int)
-
-static void cs_printioctl(unsigned int x)
-{
- unsigned int i;
- unsigned char vidx;
- // Index of mixtable1[] member is Device ID
- // and must be <= SOUND_MIXER_NRDEVICES.
- // Value of array member is index into s->mix.vol[]
- static const unsigned char mixtable1[SOUND_MIXER_NRDEVICES] = {
- [SOUND_MIXER_PCM] = 1, // voice
- [SOUND_MIXER_LINE1] = 2, // AUX
- [SOUND_MIXER_CD] = 3, // CD
- [SOUND_MIXER_LINE] = 4, // Line
- [SOUND_MIXER_SYNTH] = 5, // FM
- [SOUND_MIXER_MIC] = 6, // Mic
- [SOUND_MIXER_SPEAKER] = 7, // Speaker
- [SOUND_MIXER_RECLEV] = 8, // Recording level
- [SOUND_MIXER_VOLUME] = 9 // Master Volume
- };
-
- switch (x) {
- case SOUND_MIXER_CS_GETDBGMASK:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_CS_GETDBGMASK:\n"));
- break;
- case SOUND_MIXER_CS_GETDBGLEVEL:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_CS_GETDBGLEVEL:\n"));
- break;
- case SOUND_MIXER_CS_SETDBGMASK:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_CS_SETDBGMASK:\n"));
- break;
- case SOUND_MIXER_CS_SETDBGLEVEL:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_CS_SETDBGLEVEL:\n"));
- break;
- case OSS_GETVERSION:
- CS_DBGOUT(CS_IOCTL, 4, printk("OSS_GETVERSION:\n"));
- break;
- case SNDCTL_DSP_SYNC:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SYNC:\n"));
- break;
- case SNDCTL_DSP_SETDUPLEX:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETDUPLEX:\n"));
- break;
- case SNDCTL_DSP_GETCAPS:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETCAPS:\n"));
- break;
- case SNDCTL_DSP_RESET:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_RESET:\n"));
- break;
- case SNDCTL_DSP_SPEED:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SPEED:\n"));
- break;
- case SNDCTL_DSP_STEREO:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_STEREO:\n"));
- break;
- case SNDCTL_DSP_CHANNELS:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_CHANNELS:\n"));
- break;
- case SNDCTL_DSP_GETFMTS:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETFMTS:\n"));
- break;
- case SNDCTL_DSP_SETFMT:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETFMT:\n"));
- break;
- case SNDCTL_DSP_POST:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_POST:\n"));
- break;
- case SNDCTL_DSP_GETTRIGGER:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETTRIGGER:\n"));
- break;
- case SNDCTL_DSP_SETTRIGGER:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETTRIGGER:\n"));
- break;
- case SNDCTL_DSP_GETOSPACE:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETOSPACE:\n"));
- break;
- case SNDCTL_DSP_GETISPACE:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETISPACE:\n"));
- break;
- case SNDCTL_DSP_NONBLOCK:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_NONBLOCK:\n"));
- break;
- case SNDCTL_DSP_GETODELAY:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETODELAY:\n"));
- break;
- case SNDCTL_DSP_GETIPTR:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETIPTR:\n"));
- break;
- case SNDCTL_DSP_GETOPTR:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETOPTR:\n"));
- break;
- case SNDCTL_DSP_GETBLKSIZE:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETBLKSIZE:\n"));
- break;
- case SNDCTL_DSP_SETFRAGMENT:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SNDCTL_DSP_SETFRAGMENT:\n"));
- break;
- case SNDCTL_DSP_SUBDIVIDE:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SUBDIVIDE:\n"));
- break;
- case SOUND_PCM_READ_RATE:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_RATE:\n"));
- break;
- case SOUND_PCM_READ_CHANNELS:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_PCM_READ_CHANNELS:\n"));
- break;
- case SOUND_PCM_READ_BITS:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_BITS:\n"));
- break;
- case SOUND_PCM_WRITE_FILTER:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_PCM_WRITE_FILTER:\n"));
- break;
- case SNDCTL_DSP_SETSYNCRO:
- CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETSYNCRO:\n"));
- break;
- case SOUND_PCM_READ_FILTER:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_FILTER:\n"));
- break;
- case SOUND_MIXER_PRIVATE1:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE1:\n"));
- break;
- case SOUND_MIXER_PRIVATE2:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE2:\n"));
- break;
- case SOUND_MIXER_PRIVATE3:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE3:\n"));
- break;
- case SOUND_MIXER_PRIVATE4:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE4:\n"));
- break;
- case SOUND_MIXER_PRIVATE5:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE5:\n"));
- break;
- case SOUND_MIXER_INFO:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_INFO:\n"));
- break;
- case SOUND_OLD_MIXER_INFO:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_OLD_MIXER_INFO:\n"));
- break;
-
- default:
- switch (_IOC_NR(x)) {
- case SOUND_MIXER_VOLUME:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_VOLUME:\n"));
- break;
- case SOUND_MIXER_SPEAKER:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_SPEAKER:\n"));
- break;
- case SOUND_MIXER_RECLEV:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_RECLEV:\n"));
- break;
- case SOUND_MIXER_MIC:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_MIC:\n"));
- break;
- case SOUND_MIXER_SYNTH:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_SYNTH:\n"));
- break;
- case SOUND_MIXER_RECSRC:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_RECSRC:\n"));
- break;
- case SOUND_MIXER_DEVMASK:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_DEVMASK:\n"));
- break;
- case SOUND_MIXER_RECMASK:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_RECMASK:\n"));
- break;
- case SOUND_MIXER_STEREODEVS:
- CS_DBGOUT(CS_IOCTL, 4,
- printk("SOUND_MIXER_STEREODEVS:\n"));
- break;
- case SOUND_MIXER_CAPS:
- CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CAPS:\n"));
- break;
- default:
- i = _IOC_NR(x);
- if (i >= SOUND_MIXER_NRDEVICES
- || !(vidx = mixtable1[i])) {
- CS_DBGOUT(CS_IOCTL, 4, printk
- ("UNKNOWN IOCTL: 0x%.8x NR=%d\n",
- x, i));
- } else {
- CS_DBGOUT(CS_IOCTL, 4, printk
- ("SOUND_MIXER_IOCTL AC9x: 0x%.8x NR=%d\n",
- x, i));
- }
- break;
- }
- }
-}
-#endif
-
-
-static int ser_init(struct cs4297a_state *s)
-{
- int i;
-
- CS_DBGOUT(CS_INIT, 2,
- printk(KERN_INFO "cs4297a: Setting up serial parameters\n"));
-
- __raw_writeq(M_SYNCSER_CMD_RX_RESET | M_SYNCSER_CMD_TX_RESET, SS_CSR(R_SER_CMD));
-
- __raw_writeq(M_SYNCSER_MSB_FIRST, SS_CSR(R_SER_MODE));
- __raw_writeq(32, SS_CSR(R_SER_MINFRM_SZ));
- __raw_writeq(32, SS_CSR(R_SER_MAXFRM_SZ));
-
- __raw_writeq(1, SS_CSR(R_SER_TX_RD_THRSH));
- __raw_writeq(4, SS_CSR(R_SER_TX_WR_THRSH));
- __raw_writeq(8, SS_CSR(R_SER_RX_RD_THRSH));
-
- /* This looks good from experimentation */
- __raw_writeq((M_SYNCSER_TXSYNC_INT | V_SYNCSER_TXSYNC_DLY(0) | M_SYNCSER_TXCLK_EXT |
- M_SYNCSER_RXSYNC_INT | V_SYNCSER_RXSYNC_DLY(1) | M_SYNCSER_RXCLK_EXT | M_SYNCSER_RXSYNC_EDGE),
- SS_CSR(R_SER_LINE_MODE));
-
- /* This looks good from experimentation */
- __raw_writeq(V_SYNCSER_SEQ_COUNT(14) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_STROBE,
- SS_TXTBL(0));
- __raw_writeq(V_SYNCSER_SEQ_COUNT(15) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE,
- SS_TXTBL(1));
- __raw_writeq(V_SYNCSER_SEQ_COUNT(13) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE,
- SS_TXTBL(2));
- __raw_writeq(V_SYNCSER_SEQ_COUNT( 0) | M_SYNCSER_SEQ_ENABLE |
- M_SYNCSER_SEQ_STROBE | M_SYNCSER_SEQ_LAST, SS_TXTBL(3));
-
- __raw_writeq(V_SYNCSER_SEQ_COUNT(14) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_STROBE,
- SS_RXTBL(0));
- __raw_writeq(V_SYNCSER_SEQ_COUNT(15) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE,
- SS_RXTBL(1));
- __raw_writeq(V_SYNCSER_SEQ_COUNT(13) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_BYTE,
- SS_RXTBL(2));
- __raw_writeq(V_SYNCSER_SEQ_COUNT( 0) | M_SYNCSER_SEQ_ENABLE | M_SYNCSER_SEQ_STROBE |
- M_SYNCSER_SEQ_LAST, SS_RXTBL(3));
-
- for (i=4; i<16; i++) {
- /* Just in case... */
- __raw_writeq(M_SYNCSER_SEQ_LAST, SS_TXTBL(i));
- __raw_writeq(M_SYNCSER_SEQ_LAST, SS_RXTBL(i));
- }
-
- return 0;
-}
-
-static int init_serdma(serdma_t *dma)
-{
- CS_DBGOUT(CS_INIT, 2,
- printk(KERN_ERR "cs4297a: desc - %d sbufsize - %d dbufsize - %d\n",
- DMA_DESCR, SAMPLE_BUF_SIZE, DMA_BUF_SIZE));
-
- /* Descriptors */
- dma->ringsz = DMA_DESCR;
- dma->descrtab = kzalloc(dma->ringsz * sizeof(serdma_descr_t), GFP_KERNEL);
- if (!dma->descrtab) {
- printk(KERN_ERR "cs4297a: kzalloc descrtab failed\n");
- return -1;
- }
- dma->descrtab_end = dma->descrtab + dma->ringsz;
- /* XXX bloddy mess, use proper DMA API here ... */
- dma->descrtab_phys = CPHYSADDR((long)dma->descrtab);
- dma->descr_add = dma->descr_rem = dma->descrtab;
-
- /* Frame buffer area */
- dma->dma_buf = kzalloc(DMA_BUF_SIZE, GFP_KERNEL);
- if (!dma->dma_buf) {
- printk(KERN_ERR "cs4297a: kzalloc dma_buf failed\n");
- kfree(dma->descrtab);
- return -1;
- }
- dma->dma_buf_phys = CPHYSADDR((long)dma->dma_buf);
-
- /* Samples buffer area */
- dma->sbufsz = SAMPLE_BUF_SIZE;
- dma->sample_buf = kmalloc(dma->sbufsz, GFP_KERNEL);
- if (!dma->sample_buf) {
- printk(KERN_ERR "cs4297a: kmalloc sample_buf failed\n");
- kfree(dma->descrtab);
- kfree(dma->dma_buf);
- return -1;
- }
- dma->sb_swptr = dma->sb_hwptr = dma->sample_buf;
- dma->sb_end = (u16 *)((void *)dma->sample_buf + dma->sbufsz);
- dma->fragsize = dma->sbufsz >> 1;
-
- CS_DBGOUT(CS_INIT, 4,
- printk(KERN_ERR "cs4297a: descrtab - %08x dma_buf - %x sample_buf - %x\n",
- (int)dma->descrtab, (int)dma->dma_buf,
- (int)dma->sample_buf));
-
- return 0;
-}
-
-static int dma_init(struct cs4297a_state *s)
-{
- int i;
-
- CS_DBGOUT(CS_INIT, 2,
- printk(KERN_INFO "cs4297a: Setting up DMA\n"));
-
- if (init_serdma(&s->dma_adc) ||
- init_serdma(&s->dma_dac))
- return -1;
-
- if (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_RX))||
- __raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX))) {
- panic("DMA state corrupted?!");
- }
-
- /* Initialize now - the descr/buffer pairings will never
- change... */
- for (i=0; i<DMA_DESCR; i++) {
- s->dma_dac.descrtab[i].descr_a = M_DMA_SERRX_SOP | V_DMA_DSCRA_A_SIZE(1) |
- (s->dma_dac.dma_buf_phys + i*FRAME_BYTES);
- s->dma_dac.descrtab[i].descr_b = V_DMA_DSCRB_PKT_SIZE(FRAME_BYTES);
- s->dma_adc.descrtab[i].descr_a = V_DMA_DSCRA_A_SIZE(1) |
- (s->dma_adc.dma_buf_phys + i*FRAME_BYTES);
- s->dma_adc.descrtab[i].descr_b = 0;
- }
-
- __raw_writeq((M_DMA_EOP_INT_EN | V_DMA_INT_PKTCNT(DMA_INT_CNT) |
- V_DMA_RINGSZ(DMA_DESCR) | M_DMA_TDX_EN),
- SS_CSR(R_SER_DMA_CONFIG0_RX));
- __raw_writeq(M_DMA_L2CA, SS_CSR(R_SER_DMA_CONFIG1_RX));
- __raw_writeq(s->dma_adc.descrtab_phys, SS_CSR(R_SER_DMA_DSCR_BASE_RX));
-
- __raw_writeq(V_DMA_RINGSZ(DMA_DESCR), SS_CSR(R_SER_DMA_CONFIG0_TX));
- __raw_writeq(M_DMA_L2CA | M_DMA_NO_DSCR_UPDT, SS_CSR(R_SER_DMA_CONFIG1_TX));
- __raw_writeq(s->dma_dac.descrtab_phys, SS_CSR(R_SER_DMA_DSCR_BASE_TX));
-
- /* Prep the receive DMA descriptor ring */
- __raw_writeq(DMA_DESCR, SS_CSR(R_SER_DMA_DSCR_COUNT_RX));
-
- __raw_writeq(M_SYNCSER_DMA_RX_EN | M_SYNCSER_DMA_TX_EN, SS_CSR(R_SER_DMA_ENABLE));
-
- __raw_writeq((M_SYNCSER_RX_SYNC_ERR | M_SYNCSER_RX_OVERRUN | M_SYNCSER_RX_EOP_COUNT),
- SS_CSR(R_SER_INT_MASK));
-
- /* Enable the rx/tx; let the codec warm up to the sync and
- start sending good frames before the receive FIFO is
- enabled */
- __raw_writeq(M_SYNCSER_CMD_TX_EN, SS_CSR(R_SER_CMD));
- udelay(1000);
- __raw_writeq(M_SYNCSER_CMD_RX_EN | M_SYNCSER_CMD_TX_EN, SS_CSR(R_SER_CMD));
-
- /* XXXKW is this magic? (the "1" part) */
- while ((__raw_readq(SS_CSR(R_SER_STATUS)) & 0xf1) != 1)
- ;
-
- CS_DBGOUT(CS_INIT, 4,
- printk(KERN_INFO "cs4297a: status: %08x\n",
- (unsigned int)(__raw_readq(SS_CSR(R_SER_STATUS)) & 0xffffffff)));
-
- return 0;
-}
-
-static int serdma_reg_access(struct cs4297a_state *s, u64 data)
-{
- serdma_t *d = &s->dma_dac;
- u64 *data_p;
- unsigned swptr;
- unsigned long flags;
- serdma_descr_t *descr;
-
- if (s->reg_request) {
- printk(KERN_ERR "cs4297a: attempt to issue multiple reg_access\n");
- return -1;
- }
-
- if (s->ena & FMODE_WRITE) {
- /* Since a writer has the DSP open, we have to mux the
- request in */
- s->reg_request = data;
- oss_broken_sleep_on(&s->dma_dac.reg_wait, MAX_SCHEDULE_TIMEOUT);
- /* XXXKW how can I deal with the starvation case where
- the opener isn't writing? */
- } else {
- /* Be safe when changing ring pointers */
- spin_lock_irqsave(&s->lock, flags);
- if (d->hwptr != d->swptr) {
- printk(KERN_ERR "cs4297a: reg access found bookkeeping error (hw/sw = %d/%d\n",
- d->hwptr, d->swptr);
- spin_unlock_irqrestore(&s->lock, flags);
- return -1;
- }
- swptr = d->swptr;
- d->hwptr = d->swptr = (d->swptr + 1) % d->ringsz;
- spin_unlock_irqrestore(&s->lock, flags);
-
- descr = &d->descrtab[swptr];
- data_p = &d->dma_buf[swptr * 4];
- *data_p = cpu_to_be64(data);
- __raw_writeq(1, SS_CSR(R_SER_DMA_DSCR_COUNT_TX));
- CS_DBGOUT(CS_DESCR, 4,
- printk(KERN_INFO "cs4297a: add_tx %p (%x -> %x)\n",
- data_p, swptr, d->hwptr));
- }
-
- CS_DBGOUT(CS_FUNCTION, 6,
- printk(KERN_INFO "cs4297a: serdma_reg_access()-\n"));
-
- return 0;
-}
-
-//****************************************************************************
-// "cs4297a_read_ac97" -- Reads an AC97 register
-//****************************************************************************
-static int cs4297a_read_ac97(struct cs4297a_state *s, u32 offset,
- u32 * value)
-{
- CS_DBGOUT(CS_AC97, 1,
- printk(KERN_INFO "cs4297a: read reg %2x\n", offset));
- if (serdma_reg_access(s, (0xCLL << 60) | (1LL << 47) | ((u64)(offset & 0x7F) << 40)))
- return -1;
-
- oss_broken_sleep_on(&s->dma_adc.reg_wait, MAX_SCHEDULE_TIMEOUT);
- *value = s->read_value;
- CS_DBGOUT(CS_AC97, 2,
- printk(KERN_INFO "cs4297a: rdr reg %x -> %x\n", s->read_reg, s->read_value));
-
- return 0;
-}
-
-
-//****************************************************************************
-// "cs4297a_write_ac97()"-- writes an AC97 register
-//****************************************************************************
-static int cs4297a_write_ac97(struct cs4297a_state *s, u32 offset,
- u32 value)
-{
- CS_DBGOUT(CS_AC97, 1,
- printk(KERN_INFO "cs4297a: write reg %2x -> %04x\n", offset, value));
- return (serdma_reg_access(s, (0xELL << 60) | ((u64)(offset & 0x7F) << 40) | ((value & 0xffff) << 12)));
-}
-
-static void stop_dac(struct cs4297a_state *s)
-{
- unsigned long flags;
-
- CS_DBGOUT(CS_WAVE_WRITE, 3, printk(KERN_INFO "cs4297a: stop_dac():\n"));
- spin_lock_irqsave(&s->lock, flags);
- s->ena &= ~FMODE_WRITE;
-#if 0
- /* XXXKW what do I really want here? My theory for now is
- that I just flip the "ena" bit, and the interrupt handler
- will stop processing the xmit channel */
- __raw_writeq((s->ena & FMODE_READ) ? M_SYNCSER_DMA_RX_EN : 0,
- SS_CSR(R_SER_DMA_ENABLE));
-#endif
-
- spin_unlock_irqrestore(&s->lock, flags);
-}
-
-
-static void start_dac(struct cs4297a_state *s)
-{
- unsigned long flags;
-
- CS_DBGOUT(CS_FUNCTION, 3, printk(KERN_INFO "cs4297a: start_dac()+\n"));
- spin_lock_irqsave(&s->lock, flags);
- if (!(s->ena & FMODE_WRITE) && (s->dma_dac.mapped ||
- (s->dma_dac.count > 0
- && s->dma_dac.ready))) {
- s->ena |= FMODE_WRITE;
- /* XXXKW what do I really want here? My theory for
- now is that I just flip the "ena" bit, and the
- interrupt handler will start processing the xmit
- channel */
-
- CS_DBGOUT(CS_WAVE_WRITE | CS_PARMS, 8, printk(KERN_INFO
- "cs4297a: start_dac(): start dma\n"));
-
- }
- spin_unlock_irqrestore(&s->lock, flags);
- CS_DBGOUT(CS_FUNCTION, 3,
- printk(KERN_INFO "cs4297a: start_dac()-\n"));
-}
-
-
-static void stop_adc(struct cs4297a_state *s)
-{
- unsigned long flags;
-
- CS_DBGOUT(CS_FUNCTION, 3,
- printk(KERN_INFO "cs4297a: stop_adc()+\n"));
-
- spin_lock_irqsave(&s->lock, flags);
- s->ena &= ~FMODE_READ;
-
- if (s->conversion == 1) {
- s->conversion = 0;
- s->prop_adc.fmt = s->prop_adc.fmt_original;
- }
- /* Nothing to do really, I need to keep the DMA going
- XXXKW when do I get here, and is there more I should do? */
- spin_unlock_irqrestore(&s->lock, flags);
- CS_DBGOUT(CS_FUNCTION, 3,
- printk(KERN_INFO "cs4297a: stop_adc()-\n"));
-}
-
-
-static void start_adc(struct cs4297a_state *s)
-{
- unsigned long flags;
-
- CS_DBGOUT(CS_FUNCTION, 2,
- printk(KERN_INFO "cs4297a: start_adc()+\n"));
-
- if (!(s->ena & FMODE_READ) &&
- (s->dma_adc.mapped || s->dma_adc.count <=
- (signed) (s->dma_adc.sbufsz - 2 * s->dma_adc.fragsize))
- && s->dma_adc.ready) {
- if (s->prop_adc.fmt & AFMT_S8 || s->prop_adc.fmt & AFMT_U8) {
- //
- // now only use 16 bit capture, due to truncation issue
- // in the chip, noticeable distortion occurs.
- // allocate buffer and then convert from 16 bit to
- // 8 bit for the user buffer.
- //
- s->prop_adc.fmt_original = s->prop_adc.fmt;
- if (s->prop_adc.fmt & AFMT_S8) {
- s->prop_adc.fmt &= ~AFMT_S8;
- s->prop_adc.fmt |= AFMT_S16_LE;
- }
- if (s->prop_adc.fmt & AFMT_U8) {
- s->prop_adc.fmt &= ~AFMT_U8;
- s->prop_adc.fmt |= AFMT_U16_LE;
- }
- //
- // prog_dmabuf_adc performs a stop_adc() but that is
- // ok since we really haven't started the DMA yet.
- //
- prog_codec(s, CS_TYPE_ADC);
-
- prog_dmabuf_adc(s);
- s->conversion = 1;
- }
- spin_lock_irqsave(&s->lock, flags);
- s->ena |= FMODE_READ;
- /* Nothing to do really, I am probably already
- DMAing... XXXKW when do I get here, and is there
- more I should do? */
- spin_unlock_irqrestore(&s->lock, flags);
-
- CS_DBGOUT(CS_PARMS, 6, printk(KERN_INFO
- "cs4297a: start_adc(): start adc\n"));
- }
- CS_DBGOUT(CS_FUNCTION, 2,
- printk(KERN_INFO "cs4297a: start_adc()-\n"));
-
-}
-
-
-// call with spinlock held!
-static void cs4297a_update_ptr(struct cs4297a_state *s, int intflag)
-{
- int good_diff, diff, diff2;
- u64 *data_p, data;
- u32 *s_ptr;
- unsigned hwptr;
- u32 status;
- serdma_t *d;
- serdma_descr_t *descr;
-
- // update ADC pointer
- status = intflag ? __raw_readq(SS_CSR(R_SER_STATUS)) : 0;
-
- if ((s->ena & FMODE_READ) || (status & (M_SYNCSER_RX_EOP_COUNT))) {
- d = &s->dma_adc;
- hwptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_RX)) & M_DMA_CURDSCR_ADDR) -
- d->descrtab_phys) / sizeof(serdma_descr_t));
-
- if (s->ena & FMODE_READ) {
- CS_DBGOUT(CS_FUNCTION, 2,
- printk(KERN_INFO "cs4297a: upd_rcv sw->hw->hw %x/%x/%x (int-%d)n",
- d->swptr, d->hwptr, hwptr, intflag));
- /* Number of DMA buffers available for software: */
- diff2 = diff = (d->ringsz + hwptr - d->hwptr) % d->ringsz;
- d->hwptr = hwptr;
- good_diff = 0;
- s_ptr = (u32 *)&(d->dma_buf[d->swptr*4]);
- descr = &d->descrtab[d->swptr];
- while (diff2--) {
- u64 data = be64_to_cpu(*(u64 *)s_ptr);
- u64 descr_a;
- u16 left, right;
- descr_a = descr->descr_a;
- descr->descr_a &= ~M_DMA_SERRX_SOP;
- if ((descr_a & M_DMA_DSCRA_A_ADDR) != CPHYSADDR((long)s_ptr)) {
- printk(KERN_ERR "cs4297a: RX Bad address (read)\n");
- }
- if (((data & 0x9800000000000000) != 0x9800000000000000) ||
- (!(descr_a & M_DMA_SERRX_SOP)) ||
- (G_DMA_DSCRB_PKT_SIZE(descr->descr_b) != FRAME_BYTES)) {
- s->stats.rx_bad++;
- printk(KERN_DEBUG "cs4297a: RX Bad attributes (read)\n");
- continue;
- }
- s->stats.rx_good++;
- if ((data >> 61) == 7) {
- s->read_value = (data >> 12) & 0xffff;
- s->read_reg = (data >> 40) & 0x7f;
- wake_up(&d->reg_wait);
- }
- if (d->count && (d->sb_hwptr == d->sb_swptr)) {
- s->stats.rx_overflow++;
- printk(KERN_DEBUG "cs4297a: RX overflow\n");
- continue;
- }
- good_diff++;
- left = ((be32_to_cpu(s_ptr[1]) & 0xff) << 8) |
- ((be32_to_cpu(s_ptr[2]) >> 24) & 0xff);
- right = (be32_to_cpu(s_ptr[2]) >> 4) & 0xffff;
- *d->sb_hwptr++ = cpu_to_be16(left);
- *d->sb_hwptr++ = cpu_to_be16(right);
- if (d->sb_hwptr == d->sb_end)
- d->sb_hwptr = d->sample_buf;
- descr++;
- if (descr == d->descrtab_end) {
- descr = d->descrtab;
- s_ptr = (u32 *)s->dma_adc.dma_buf;
- } else {
- s_ptr += 8;
- }
- }
- d->total_bytes += good_diff * FRAME_SAMPLE_BYTES;
- d->count += good_diff * FRAME_SAMPLE_BYTES;
- if (d->count > d->sbufsz) {
- printk(KERN_ERR "cs4297a: bogus receive overflow!!\n");
- }
- d->swptr = (d->swptr + diff) % d->ringsz;
- __raw_writeq(diff, SS_CSR(R_SER_DMA_DSCR_COUNT_RX));
- if (d->mapped) {
- if (d->count >= (signed) d->fragsize)
- wake_up(&d->wait);
- } else {
- if (d->count > 0) {
- CS_DBGOUT(CS_WAVE_READ, 4,
- printk(KERN_INFO
- "cs4297a: update count -> %d\n", d->count));
- wake_up(&d->wait);
- }
- }
- } else {
- /* Receive is going even if no one is
- listening (for register accesses and to
- avoid FIFO overrun) */
- diff2 = diff = (hwptr + d->ringsz - d->hwptr) % d->ringsz;
- if (!diff) {
- printk(KERN_ERR "cs4297a: RX full or empty?\n");
- }
-
- descr = &d->descrtab[d->swptr];
- data_p = &d->dma_buf[d->swptr*4];
-
- /* Force this to happen at least once; I got
- here because of an interrupt, so there must
- be a buffer to process. */
- do {
- data = be64_to_cpu(*data_p);
- if ((descr->descr_a & M_DMA_DSCRA_A_ADDR) != CPHYSADDR((long)data_p)) {
- printk(KERN_ERR "cs4297a: RX Bad address %d (%llx %lx)\n", d->swptr,
- (long long)(descr->descr_a & M_DMA_DSCRA_A_ADDR),
- (long)CPHYSADDR((long)data_p));
- }
- if (!(data & (1LL << 63)) ||
- !(descr->descr_a & M_DMA_SERRX_SOP) ||
- (G_DMA_DSCRB_PKT_SIZE(descr->descr_b) != FRAME_BYTES)) {
- s->stats.rx_bad++;
- printk(KERN_DEBUG "cs4297a: RX Bad attributes\n");
- } else {
- s->stats.rx_good++;
- if ((data >> 61) == 7) {
- s->read_value = (data >> 12) & 0xffff;
- s->read_reg = (data >> 40) & 0x7f;
- wake_up(&d->reg_wait);
- }
- }
- descr->descr_a &= ~M_DMA_SERRX_SOP;
- descr++;
- d->swptr++;
- data_p += 4;
- if (descr == d->descrtab_end) {
- descr = d->descrtab;
- d->swptr = 0;
- data_p = d->dma_buf;
- }
- __raw_writeq(1, SS_CSR(R_SER_DMA_DSCR_COUNT_RX));
- } while (--diff);
- d->hwptr = hwptr;
-
- CS_DBGOUT(CS_DESCR, 6,
- printk(KERN_INFO "cs4297a: hw/sw %x/%x\n", d->hwptr, d->swptr));
- }
-
- CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO
- "cs4297a: cs4297a_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n",
- (unsigned)s, d->hwptr,
- d->total_bytes, d->count));
- }
-
- /* XXXKW worry about s->reg_request -- there is a starvation
- case if s->ena has FMODE_WRITE on, but the client isn't
- doing writes */
-
- // update DAC pointer
- //
- // check for end of buffer, means that we are going to wait for another interrupt
- // to allow silence to fill the fifos on the part, to keep pops down to a minimum.
- //
- if (s->ena & FMODE_WRITE) {
- serdma_t *d = &s->dma_dac;
- hwptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) -
- d->descrtab_phys) / sizeof(serdma_descr_t));
- diff = (d->ringsz + hwptr - d->hwptr) % d->ringsz;
- CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO
- "cs4297a: cs4297a_update_ptr(): hw/hw/sw %x/%x/%x diff %d count %d\n",
- d->hwptr, hwptr, d->swptr, diff, d->count));
- d->hwptr = hwptr;
- /* XXXKW stereo? conversion? Just assume 2 16-bit samples for now */
- d->total_bytes += diff * FRAME_SAMPLE_BYTES;
- if (d->mapped) {
- d->count += diff * FRAME_SAMPLE_BYTES;
- if (d->count >= d->fragsize) {
- d->wakeup = 1;
- wake_up(&d->wait);
- if (d->count > d->sbufsz)
- d->count &= d->sbufsz - 1;
- }
- } else {
- d->count -= diff * FRAME_SAMPLE_BYTES;
- if (d->count <= 0) {
- //
- // fill with silence, and do not shut down the DAC.
- // Continue to play silence until the _release.
- //
- CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO
- "cs4297a: cs4297a_update_ptr(): memset %d at 0x%.8x for %d size \n",
- (unsigned)(s->prop_dac.fmt &
- (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0,
- (unsigned)d->dma_buf,
- d->ringsz));
- memset(d->dma_buf, 0, d->ringsz * FRAME_BYTES);
- if (d->count < 0) {
- d->underrun = 1;
- s->stats.tx_underrun++;
- d->count = 0;
- CS_DBGOUT(CS_ERROR, 9, printk(KERN_INFO
- "cs4297a: cs4297a_update_ptr(): underrun\n"));
- }
- } else if (d->count <=
- (signed) d->fragsize
- && !d->endcleared) {
- /* XXXKW what is this for? */
- clear_advance(d->dma_buf,
- d->sbufsz,
- d->swptr,
- d->fragsize,
- 0);
- d->endcleared = 1;
- }
- if ( (d->count <= (signed) d->sbufsz/2) || intflag)
- {
- CS_DBGOUT(CS_WAVE_WRITE, 4,
- printk(KERN_INFO
- "cs4297a: update count -> %d\n", d->count));
- wake_up(&d->wait);
- }
- }
- CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO
- "cs4297a: cs4297a_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n",
- (unsigned) s, d->hwptr,
- d->total_bytes, d->count));
- }
-}
-
-static int mixer_ioctl(struct cs4297a_state *s, unsigned int cmd,
- unsigned long arg)
-{
- // Index to mixer_src[] is value of AC97 Input Mux Select Reg.
- // Value of array member is recording source Device ID Mask.
- static const unsigned int mixer_src[8] = {
- SOUND_MASK_MIC, SOUND_MASK_CD, 0, SOUND_MASK_LINE1,
- SOUND_MASK_LINE, SOUND_MASK_VOLUME, 0, 0
- };
-
- // Index of mixtable1[] member is Device ID
- // and must be <= SOUND_MIXER_NRDEVICES.
- // Value of array member is index into s->mix.vol[]
- static const unsigned char mixtable1[SOUND_MIXER_NRDEVICES] = {
- [SOUND_MIXER_PCM] = 1, // voice
- [SOUND_MIXER_LINE1] = 2, // AUX
- [SOUND_MIXER_CD] = 3, // CD
- [SOUND_MIXER_LINE] = 4, // Line
- [SOUND_MIXER_SYNTH] = 5, // FM
- [SOUND_MIXER_MIC] = 6, // Mic
- [SOUND_MIXER_SPEAKER] = 7, // Speaker
- [SOUND_MIXER_RECLEV] = 8, // Recording level
- [SOUND_MIXER_VOLUME] = 9 // Master Volume
- };
-
- static const unsigned mixreg[] = {
- AC97_PCMOUT_VOL,
- AC97_AUX_VOL,
- AC97_CD_VOL,
- AC97_LINEIN_VOL
- };
- unsigned char l, r, rl, rr, vidx;
- unsigned char attentbl[11] =
- { 63, 42, 26, 17, 14, 11, 8, 6, 4, 2, 0 };
- unsigned temp1;
- int i, val;
-
- VALIDATE_STATE(s);
- CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
- "cs4297a: mixer_ioctl(): s=0x%.8x cmd=0x%.8x\n",
- (unsigned) s, cmd));
-#if CSDEBUG
- cs_printioctl(cmd);
-#endif
-#if CSDEBUG_INTERFACE
-
- if ((cmd == SOUND_MIXER_CS_GETDBGMASK) ||
- (cmd == SOUND_MIXER_CS_SETDBGMASK) ||
- (cmd == SOUND_MIXER_CS_GETDBGLEVEL) ||
- (cmd == SOUND_MIXER_CS_SETDBGLEVEL))
- {
- switch (cmd) {
-
- case SOUND_MIXER_CS_GETDBGMASK:
- return put_user(cs_debugmask,
- (unsigned long *) arg);
-
- case SOUND_MIXER_CS_GETDBGLEVEL:
- return put_user(cs_debuglevel,
- (unsigned long *) arg);
-
- case SOUND_MIXER_CS_SETDBGMASK:
- if (get_user(val, (unsigned long *) arg))
- return -EFAULT;
- cs_debugmask = val;
- return 0;
-
- case SOUND_MIXER_CS_SETDBGLEVEL:
- if (get_user(val, (unsigned long *) arg))
- return -EFAULT;
- cs_debuglevel = val;
- return 0;
- default:
- CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
- "cs4297a: mixer_ioctl(): ERROR unknown debug cmd\n"));
- return 0;
- }
- }
-#endif
-
- if (cmd == SOUND_MIXER_PRIVATE1) {
- return -EINVAL;
- }
- if (cmd == SOUND_MIXER_PRIVATE2) {
- // enable/disable/query spatializer
- if (get_user(val, (int *) arg))
- return -EFAULT;
- if (val != -1) {
- temp1 = (val & 0x3f) >> 2;
- cs4297a_write_ac97(s, AC97_3D_CONTROL, temp1);
- cs4297a_read_ac97(s, AC97_GENERAL_PURPOSE,
- &temp1);
- cs4297a_write_ac97(s, AC97_GENERAL_PURPOSE,
- temp1 | 0x2000);
- }
- cs4297a_read_ac97(s, AC97_3D_CONTROL, &temp1);
- return put_user((temp1 << 2) | 3, (int *) arg);
- }
- if (cmd == SOUND_MIXER_INFO) {
- mixer_info info;
- memset(&info, 0, sizeof(info));
- strlcpy(info.id, "CS4297a", sizeof(info.id));
- strlcpy(info.name, "Crystal CS4297a", sizeof(info.name));
- info.modify_counter = s->mix.modcnt;
- if (copy_to_user((void *) arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == SOUND_OLD_MIXER_INFO) {
- _old_mixer_info info;
- memset(&info, 0, sizeof(info));
- strlcpy(info.id, "CS4297a", sizeof(info.id));
- strlcpy(info.name, "Crystal CS4297a", sizeof(info.name));
- if (copy_to_user((void *) arg, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == OSS_GETVERSION)
- return put_user(SOUND_VERSION, (int *) arg);
-
- if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int))
- return -EINVAL;
-
- // If ioctl has only the SIOC_READ bit(bit 31)
- // on, process the only-read commands.
- if (_SIOC_DIR(cmd) == _SIOC_READ) {
- switch (_IOC_NR(cmd)) {
- case SOUND_MIXER_RECSRC: // Arg contains a bit for each recording source
- cs4297a_read_ac97(s, AC97_RECORD_SELECT,
- &temp1);
- return put_user(mixer_src[temp1 & 7], (int *) arg);
-
- case SOUND_MIXER_DEVMASK: // Arg contains a bit for each supported device
- return put_user(SOUND_MASK_PCM | SOUND_MASK_LINE |
- SOUND_MASK_VOLUME | SOUND_MASK_RECLEV,
- (int *) arg);
-
- case SOUND_MIXER_RECMASK: // Arg contains a bit for each supported recording source
- return put_user(SOUND_MASK_LINE | SOUND_MASK_VOLUME,
- (int *) arg);
-
- case SOUND_MIXER_STEREODEVS: // Mixer channels supporting stereo
- return put_user(SOUND_MASK_PCM | SOUND_MASK_LINE |
- SOUND_MASK_VOLUME | SOUND_MASK_RECLEV,
- (int *) arg);
-
- case SOUND_MIXER_CAPS:
- return put_user(SOUND_CAP_EXCL_INPUT, (int *) arg);
-
- default:
- i = _IOC_NR(cmd);
- if (i >= SOUND_MIXER_NRDEVICES
- || !(vidx = mixtable1[i]))
- return -EINVAL;
- return put_user(s->mix.vol[vidx - 1], (int *) arg);
- }
- }
- // If ioctl doesn't have both the SIOC_READ and
- // the SIOC_WRITE bit set, return invalid.
- if (_SIOC_DIR(cmd) != (_SIOC_READ | _SIOC_WRITE))
- return -EINVAL;
-
- // Increment the count of volume writes.
- s->mix.modcnt++;
-
- // Isolate the command; it must be a write.
- switch (_IOC_NR(cmd)) {
-
- case SOUND_MIXER_RECSRC: // Arg contains a bit for each recording source
- if (get_user(val, (int *) arg))
- return -EFAULT;
- i = hweight32(val); // i = # bits on in val.
- if (i != 1) // One & only 1 bit must be on.
- return 0;
- for (i = 0; i < sizeof(mixer_src) / sizeof(int); i++) {
- if (val == mixer_src[i]) {
- temp1 = (i << 8) | i;
- cs4297a_write_ac97(s,
- AC97_RECORD_SELECT,
- temp1);
- return 0;
- }
- }
- return 0;
-
- case SOUND_MIXER_VOLUME:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- l = val & 0xff;
- if (l > 100)
- l = 100; // Max soundcard.h vol is 100.
- if (l < 6) {
- rl = 63;
- l = 0;
- } else
- rl = attentbl[(10 * l) / 100]; // Convert 0-100 vol to 63-0 atten.
-
- r = (val >> 8) & 0xff;
- if (r > 100)
- r = 100; // Max right volume is 100, too
- if (r < 6) {
- rr = 63;
- r = 0;
- } else
- rr = attentbl[(10 * r) / 100]; // Convert volume to attenuation.
-
- if ((rl > 60) && (rr > 60)) // If both l & r are 'low',
- temp1 = 0x8000; // turn on the mute bit.
- else
- temp1 = 0;
-
- temp1 |= (rl << 8) | rr;
-
- cs4297a_write_ac97(s, AC97_MASTER_VOL_STEREO, temp1);
- cs4297a_write_ac97(s, AC97_PHONE_VOL, temp1);
-
-#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
- s->mix.vol[8] = ((unsigned int) r << 8) | l;
-#else
- s->mix.vol[8] = val;
-#endif
- return put_user(s->mix.vol[8], (int *) arg);
-
- case SOUND_MIXER_SPEAKER:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- l = val & 0xff;
- if (l > 100)
- l = 100;
- if (l < 3) {
- rl = 0;
- l = 0;
- } else {
- rl = (l * 2 - 5) / 13; // Convert 0-100 range to 0-15.
- l = (rl * 13 + 5) / 2;
- }
-
- if (rl < 3) {
- temp1 = 0x8000;
- rl = 0;
- } else
- temp1 = 0;
- rl = 15 - rl; // Convert volume to attenuation.
- temp1 |= rl << 1;
- cs4297a_write_ac97(s, AC97_PCBEEP_VOL, temp1);
-
-#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
- s->mix.vol[6] = l << 8;
-#else
- s->mix.vol[6] = val;
-#endif
- return put_user(s->mix.vol[6], (int *) arg);
-
- case SOUND_MIXER_RECLEV:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- l = val & 0xff;
- if (l > 100)
- l = 100;
- r = (val >> 8) & 0xff;
- if (r > 100)
- r = 100;
- rl = (l * 2 - 5) / 13; // Convert 0-100 scale to 0-15.
- rr = (r * 2 - 5) / 13;
- if (rl < 3 && rr < 3)
- temp1 = 0x8000;
- else
- temp1 = 0;
-
- temp1 = temp1 | (rl << 8) | rr;
- cs4297a_write_ac97(s, AC97_RECORD_GAIN, temp1);
-
-#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
- s->mix.vol[7] = ((unsigned int) r << 8) | l;
-#else
- s->mix.vol[7] = val;
-#endif
- return put_user(s->mix.vol[7], (int *) arg);
-
- case SOUND_MIXER_MIC:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- l = val & 0xff;
- if (l > 100)
- l = 100;
- if (l < 1) {
- l = 0;
- rl = 0;
- } else {
- rl = ((unsigned) l * 5 - 4) / 16; // Convert 0-100 range to 0-31.
- l = (rl * 16 + 4) / 5;
- }
- cs4297a_read_ac97(s, AC97_MIC_VOL, &temp1);
- temp1 &= 0x40; // Isolate 20db gain bit.
- if (rl < 3) {
- temp1 |= 0x8000;
- rl = 0;
- }
- rl = 31 - rl; // Convert volume to attenuation.
- temp1 |= rl;
- cs4297a_write_ac97(s, AC97_MIC_VOL, temp1);
-
-#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
- s->mix.vol[5] = val << 8;
-#else
- s->mix.vol[5] = val;
-#endif
- return put_user(s->mix.vol[5], (int *) arg);
-
-
- case SOUND_MIXER_SYNTH:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- l = val & 0xff;
- if (l > 100)
- l = 100;
- if (get_user(val, (int *) arg))
- return -EFAULT;
- r = (val >> 8) & 0xff;
- if (r > 100)
- r = 100;
- rl = (l * 2 - 11) / 3; // Convert 0-100 range to 0-63.
- rr = (r * 2 - 11) / 3;
- if (rl < 3) // If l is low, turn on
- temp1 = 0x0080; // the mute bit.
- else
- temp1 = 0;
-
- rl = 63 - rl; // Convert vol to attenuation.
-// writel(temp1 | rl, s->pBA0 + FMLVC);
- if (rr < 3) // If rr is low, turn on
- temp1 = 0x0080; // the mute bit.
- else
- temp1 = 0;
- rr = 63 - rr; // Convert vol to attenuation.
-// writel(temp1 | rr, s->pBA0 + FMRVC);
-
-#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
- s->mix.vol[4] = (r << 8) | l;
-#else
- s->mix.vol[4] = val;
-#endif
- return put_user(s->mix.vol[4], (int *) arg);
-
-
- default:
- CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
- "cs4297a: mixer_ioctl(): default\n"));
-
- i = _IOC_NR(cmd);
- if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i]))
- return -EINVAL;
- if (get_user(val, (int *) arg))
- return -EFAULT;
- l = val & 0xff;
- if (l > 100)
- l = 100;
- if (l < 1) {
- l = 0;
- rl = 31;
- } else
- rl = (attentbl[(l * 10) / 100]) >> 1;
-
- r = (val >> 8) & 0xff;
- if (r > 100)
- r = 100;
- if (r < 1) {
- r = 0;
- rr = 31;
- } else
- rr = (attentbl[(r * 10) / 100]) >> 1;
- if ((rl > 30) && (rr > 30))
- temp1 = 0x8000;
- else
- temp1 = 0;
- temp1 = temp1 | (rl << 8) | rr;
- cs4297a_write_ac97(s, mixreg[vidx - 1], temp1);
-
-#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
- s->mix.vol[vidx - 1] = ((unsigned int) r << 8) | l;
-#else
- s->mix.vol[vidx - 1] = val;
-#endif
- return put_user(s->mix.vol[vidx - 1], (int *) arg);
- }
-}
-
-
-// ---------------------------------------------------------------------
-
-static int cs4297a_open_mixdev(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct cs4297a_state *s=NULL;
- struct list_head *entry;
-
- CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
- printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()+\n"));
-
- mutex_lock(&swarm_cs4297a_mutex);
- list_for_each(entry, &cs4297a_devs)
- {
- s = list_entry(entry, struct cs4297a_state, list);
- if(s->dev_mixer == minor)
- break;
- }
- if (!s)
- {
- CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2,
- printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()- -ENODEV\n"));
-
- mutex_unlock(&swarm_cs4297a_mutex);
- return -ENODEV;
- }
- VALIDATE_STATE(s);
- file->private_data = s;
-
- CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
- printk(KERN_INFO "cs4297a: cs4297a_open_mixdev()- 0\n"));
- mutex_unlock(&swarm_cs4297a_mutex);
-
- return nonseekable_open(inode, file);
-}
-
-
-static int cs4297a_release_mixdev(struct inode *inode, struct file *file)
-{
- struct cs4297a_state *s =
- (struct cs4297a_state *) file->private_data;
-
- VALIDATE_STATE(s);
- return 0;
-}
-
-
-static int cs4297a_ioctl_mixdev(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
- mutex_lock(&swarm_cs4297a_mutex);
- ret = mixer_ioctl((struct cs4297a_state *) file->private_data, cmd,
- arg);
- mutex_unlock(&swarm_cs4297a_mutex);
- return ret;
-}
-
-
-// ******************************************************************************************
-// Mixer file operations struct.
-// ******************************************************************************************
-static const struct file_operations cs4297a_mixer_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .unlocked_ioctl = cs4297a_ioctl_mixdev,
- .open = cs4297a_open_mixdev,
- .release = cs4297a_release_mixdev,
-};
-
-// ---------------------------------------------------------------------
-
-
-static int drain_adc(struct cs4297a_state *s, int nonblock)
-{
- /* This routine serves no purpose currently - any samples
- sitting in the receive queue will just be processed by the
- background consumer. This would be different if DMA
- actually stopped when there were no clients. */
- return 0;
-}
-
-static int drain_dac(struct cs4297a_state *s, int nonblock)
-{
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
- unsigned hwptr;
- unsigned tmo;
- int count;
-
- if (s->dma_dac.mapped)
- return 0;
- if (nonblock)
- return -EBUSY;
- add_wait_queue(&s->dma_dac.wait, &wait);
- while ((count = __raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX))) ||
- (s->dma_dac.count > 0)) {
- if (!signal_pending(current)) {
- set_current_state(TASK_INTERRUPTIBLE);
- /* XXXKW is this calculation working? */
- tmo = ((count * FRAME_TX_US) * HZ) / 1000000;
- schedule_timeout(tmo + 1);
- } else {
- /* XXXKW do I care if there is a signal pending? */
- }
- }
- spin_lock_irqsave(&s->lock, flags);
- /* Reset the bookkeeping */
- hwptr = (int)(((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) -
- s->dma_dac.descrtab_phys) / sizeof(serdma_descr_t));
- s->dma_dac.hwptr = s->dma_dac.swptr = hwptr;
- spin_unlock_irqrestore(&s->lock, flags);
- remove_wait_queue(&s->dma_dac.wait, &wait);
- __set_current_state(TASK_RUNNING);
- return 0;
-}
-
-
-// ---------------------------------------------------------------------
-
-static ssize_t cs4297a_read(struct file *file, char *buffer, size_t count,
- loff_t * ppos)
-{
- struct cs4297a_state *s =
- (struct cs4297a_state *) file->private_data;
- ssize_t ret;
- unsigned long flags;
- int cnt, count_fr, cnt_by;
- unsigned copied = 0;
-
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2,
- printk(KERN_INFO "cs4297a: cs4297a_read()+ %d \n", count));
-
- VALIDATE_STATE(s);
- if (s->dma_adc.mapped)
- return -ENXIO;
- if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
- return ret;
- if (!access_ok(VERIFY_WRITE, buffer, count))
- return -EFAULT;
- ret = 0;
-//
-// "count" is the amount of bytes to read (from app), is decremented each loop
-// by the amount of bytes that have been returned to the user buffer.
-// "cnt" is the running total of each read from the buffer (changes each loop)
-// "buffer" points to the app's buffer
-// "ret" keeps a running total of the amount of bytes that have been copied
-// to the user buffer.
-// "copied" is the total bytes copied into the user buffer for each loop.
-//
- while (count > 0) {
- CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
- "_read() count>0 count=%d .count=%d .swptr=%d .hwptr=%d \n",
- count, s->dma_adc.count,
- s->dma_adc.swptr, s->dma_adc.hwptr));
- spin_lock_irqsave(&s->lock, flags);
-
- /* cnt will be the number of available samples (16-bit
- stereo); it starts out as the maxmimum consequetive
- samples */
- cnt = (s->dma_adc.sb_end - s->dma_adc.sb_swptr) / 2;
- count_fr = s->dma_adc.count / FRAME_SAMPLE_BYTES;
-
- // dma_adc.count is the current total bytes that have not been read.
- // if the amount of unread bytes from the current sw pointer to the
- // end of the buffer is greater than the current total bytes that
- // have not been read, then set the "cnt" (unread bytes) to the
- // amount of unread bytes.
-
- if (count_fr < cnt)
- cnt = count_fr;
- cnt_by = cnt * FRAME_SAMPLE_BYTES;
- spin_unlock_irqrestore(&s->lock, flags);
- //
- // if we are converting from 8/16 then we need to copy
- // twice the number of 16 bit bytes then 8 bit bytes.
- //
- if (s->conversion) {
- if (cnt_by > (count * 2)) {
- cnt = (count * 2) / FRAME_SAMPLE_BYTES;
- cnt_by = count * 2;
- }
- } else {
- if (cnt_by > count) {
- cnt = count / FRAME_SAMPLE_BYTES;
- cnt_by = count;
- }
- }
- //
- // "cnt" NOW is the smaller of the amount that will be read,
- // and the amount that is requested in this read (or partial).
- // if there are no bytes in the buffer to read, then start the
- // ADC and wait for the interrupt handler to wake us up.
- //
- if (cnt <= 0) {
-
- // start up the dma engine and then continue back to the top of
- // the loop when wake up occurs.
- start_adc(s);
- if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EAGAIN;
- oss_broken_sleep_on(&s->dma_adc.wait, MAX_SCHEDULE_TIMEOUT);
- if (signal_pending(current))
- return ret ? ret : -ERESTARTSYS;
- continue;
- }
- // there are bytes in the buffer to read.
- // copy from the hw buffer over to the user buffer.
- // user buffer is designated by "buffer"
- // virtual address to copy from is dma_buf+swptr
- // the "cnt" is the number of bytes to read.
-
- CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO
- "_read() copy_to cnt=%d count=%d ", cnt_by, count));
- CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
- " .sbufsz=%d .count=%d buffer=0x%.8x ret=%d\n",
- s->dma_adc.sbufsz, s->dma_adc.count,
- (unsigned) buffer, ret));
-
- if (copy_to_user (buffer, ((void *)s->dma_adc.sb_swptr), cnt_by))
- return ret ? ret : -EFAULT;
- copied = cnt_by;
-
- /* Return the descriptors */
- spin_lock_irqsave(&s->lock, flags);
- CS_DBGOUT(CS_FUNCTION, 2,
- printk(KERN_INFO "cs4297a: upd_rcv sw->hw %x/%x\n", s->dma_adc.swptr, s->dma_adc.hwptr));
- s->dma_adc.count -= cnt_by;
- s->dma_adc.sb_swptr += cnt * 2;
- if (s->dma_adc.sb_swptr == s->dma_adc.sb_end)
- s->dma_adc.sb_swptr = s->dma_adc.sample_buf;
- spin_unlock_irqrestore(&s->lock, flags);
- count -= copied;
- buffer += copied;
- ret += copied;
- start_adc(s);
- }
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2,
- printk(KERN_INFO "cs4297a: cs4297a_read()- %d\n", ret));
- return ret;
-}
-
-
-static ssize_t cs4297a_write(struct file *file, const char *buffer,
- size_t count, loff_t * ppos)
-{
- struct cs4297a_state *s =
- (struct cs4297a_state *) file->private_data;
- ssize_t ret;
- unsigned long flags;
- unsigned swptr, hwptr;
- int cnt;
-
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2,
- printk(KERN_INFO "cs4297a: cs4297a_write()+ count=%d\n",
- count));
- VALIDATE_STATE(s);
-
- if (s->dma_dac.mapped)
- return -ENXIO;
- if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
- return ret;
- if (!access_ok(VERIFY_READ, buffer, count))
- return -EFAULT;
- ret = 0;
- while (count > 0) {
- serdma_t *d = &s->dma_dac;
- int copy_cnt;
- u32 *s_tmpl;
- u32 *t_tmpl;
- u32 left, right;
- int swap = (s->prop_dac.fmt == AFMT_S16_LE) || (s->prop_dac.fmt == AFMT_U16_LE);
-
- /* XXXXXX this is broken for BLOAT_FACTOR */
- spin_lock_irqsave(&s->lock, flags);
- if (d->count < 0) {
- d->count = 0;
- d->swptr = d->hwptr;
- }
- if (d->underrun) {
- d->underrun = 0;
- hwptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) -
- d->descrtab_phys) / sizeof(serdma_descr_t));
- d->swptr = d->hwptr = hwptr;
- }
- swptr = d->swptr;
- cnt = d->sbufsz - (swptr * FRAME_SAMPLE_BYTES);
- /* Will this write fill up the buffer? */
- if (d->count + cnt > d->sbufsz)
- cnt = d->sbufsz - d->count;
- spin_unlock_irqrestore(&s->lock, flags);
- if (cnt > count)
- cnt = count;
- if (cnt <= 0) {
- start_dac(s);
- if (file->f_flags & O_NONBLOCK)
- return ret ? ret : -EAGAIN;
- oss_broken_sleep_on(&d->wait, MAX_SCHEDULE_TIMEOUT);
- if (signal_pending(current))
- return ret ? ret : -ERESTARTSYS;
- continue;
- }
- if (copy_from_user(d->sample_buf, buffer, cnt))
- return ret ? ret : -EFAULT;
-
- copy_cnt = cnt;
- s_tmpl = (u32 *)d->sample_buf;
- t_tmpl = (u32 *)(d->dma_buf + (swptr * 4));
-
- /* XXXKW assuming 16-bit stereo! */
- do {
- u32 tmp;
-
- t_tmpl[0] = cpu_to_be32(0x98000000);
-
- tmp = be32_to_cpu(s_tmpl[0]);
- left = tmp & 0xffff;
- right = tmp >> 16;
- if (swap) {
- left = swab16(left);
- right = swab16(right);
- }
- t_tmpl[1] = cpu_to_be32(left >> 8);
- t_tmpl[2] = cpu_to_be32(((left & 0xff) << 24) |
- (right << 4));
-
- s_tmpl++;
- t_tmpl += 8;
- copy_cnt -= 4;
- } while (copy_cnt);
-
- /* Mux in any pending read/write accesses */
- if (s->reg_request) {
- *(u64 *)(d->dma_buf + (swptr * 4)) |=
- cpu_to_be64(s->reg_request);
- s->reg_request = 0;
- wake_up(&s->dma_dac.reg_wait);
- }
-
- CS_DBGOUT(CS_WAVE_WRITE, 4,
- printk(KERN_INFO
- "cs4297a: copy in %d to swptr %x\n", cnt, swptr));
-
- swptr = (swptr + (cnt/FRAME_SAMPLE_BYTES)) % d->ringsz;
- __raw_writeq(cnt/FRAME_SAMPLE_BYTES, SS_CSR(R_SER_DMA_DSCR_COUNT_TX));
- spin_lock_irqsave(&s->lock, flags);
- d->swptr = swptr;
- d->count += cnt;
- d->endcleared = 0;
- spin_unlock_irqrestore(&s->lock, flags);
- count -= cnt;
- buffer += cnt;
- ret += cnt;
- start_dac(s);
- }
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2,
- printk(KERN_INFO "cs4297a: cs4297a_write()- %d\n", ret));
- return ret;
-}
-
-
-static unsigned int cs4297a_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct cs4297a_state *s =
- (struct cs4297a_state *) file->private_data;
- unsigned long flags;
- unsigned int mask = 0;
-
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
- printk(KERN_INFO "cs4297a: cs4297a_poll()+\n"));
- VALIDATE_STATE(s);
- if (file->f_mode & FMODE_WRITE) {
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
- printk(KERN_INFO
- "cs4297a: cs4297a_poll() wait on FMODE_WRITE\n"));
- if(!s->dma_dac.ready && prog_dmabuf_dac(s))
- return 0;
- poll_wait(file, &s->dma_dac.wait, wait);
- }
- if (file->f_mode & FMODE_READ) {
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
- printk(KERN_INFO
- "cs4297a: cs4297a_poll() wait on FMODE_READ\n"));
- if(!s->dma_dac.ready && prog_dmabuf_adc(s))
- return 0;
- poll_wait(file, &s->dma_adc.wait, wait);
- }
- spin_lock_irqsave(&s->lock, flags);
- cs4297a_update_ptr(s,CS_FALSE);
- if (file->f_mode & FMODE_WRITE) {
- if (s->dma_dac.mapped) {
- if (s->dma_dac.count >=
- (signed) s->dma_dac.fragsize) {
- if (s->dma_dac.wakeup)
- mask |= POLLOUT | POLLWRNORM;
- else
- mask = 0;
- s->dma_dac.wakeup = 0;
- }
- } else {
- if ((signed) (s->dma_dac.sbufsz/2) >= s->dma_dac.count)
- mask |= POLLOUT | POLLWRNORM;
- }
- } else if (file->f_mode & FMODE_READ) {
- if (s->dma_adc.mapped) {
- if (s->dma_adc.count >= (signed) s->dma_adc.fragsize)
- mask |= POLLIN | POLLRDNORM;
- } else {
- if (s->dma_adc.count > 0)
- mask |= POLLIN | POLLRDNORM;
- }
- }
- spin_unlock_irqrestore(&s->lock, flags);
- CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
- printk(KERN_INFO "cs4297a: cs4297a_poll()- 0x%.8x\n",
- mask));
- return mask;
-}
-
-
-static int cs4297a_mmap(struct file *file, struct vm_area_struct *vma)
-{
- /* XXXKW currently no mmap support */
- return -EINVAL;
- return 0;
-}
-
-
-static int cs4297a_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct cs4297a_state *s =
- (struct cs4297a_state *) file->private_data;
- unsigned long flags;
- audio_buf_info abinfo;
- count_info cinfo;
- int val, mapped, ret;
-
- CS_DBGOUT(CS_FUNCTION|CS_IOCTL, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): file=0x%.8x cmd=0x%.8x\n",
- (unsigned) file, cmd));
-#if CSDEBUG
- cs_printioctl(cmd);
-#endif
- VALIDATE_STATE(s);
- mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
- ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
- switch (cmd) {
- case OSS_GETVERSION:
- CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): SOUND_VERSION=0x%.8x\n",
- SOUND_VERSION));
- return put_user(SOUND_VERSION, (int *) arg);
-
- case SNDCTL_DSP_SYNC:
- CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_SYNC\n"));
- if (file->f_mode & FMODE_WRITE)
- return drain_dac(s,
- 0 /*file->f_flags & O_NONBLOCK */
- );
- return 0;
-
- case SNDCTL_DSP_SETDUPLEX:
- return 0;
-
- case SNDCTL_DSP_GETCAPS:
- return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME |
- DSP_CAP_TRIGGER | DSP_CAP_MMAP,
- (int *) arg);
-
- case SNDCTL_DSP_RESET:
- CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_RESET\n"));
- if (file->f_mode & FMODE_WRITE) {
- stop_dac(s);
- synchronize_irq(s->irq);
- s->dma_dac.count = s->dma_dac.total_bytes =
- s->dma_dac.blocks = s->dma_dac.wakeup = 0;
- s->dma_dac.swptr = s->dma_dac.hwptr =
- (int)(((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_TX)) & M_DMA_CURDSCR_ADDR) -
- s->dma_dac.descrtab_phys) / sizeof(serdma_descr_t));
- }
- if (file->f_mode & FMODE_READ) {
- stop_adc(s);
- synchronize_irq(s->irq);
- s->dma_adc.count = s->dma_adc.total_bytes =
- s->dma_adc.blocks = s->dma_dac.wakeup = 0;
- s->dma_adc.swptr = s->dma_adc.hwptr =
- (int)(((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_RX)) & M_DMA_CURDSCR_ADDR) -
- s->dma_adc.descrtab_phys) / sizeof(serdma_descr_t));
- }
- return 0;
-
- case SNDCTL_DSP_SPEED:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_SPEED val=%d -> 48000\n", val));
- val = 48000;
- return put_user(val, (int *) arg);
-
- case SNDCTL_DSP_STEREO:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_STEREO val=%d\n", val));
- if (file->f_mode & FMODE_READ) {
- stop_adc(s);
- s->dma_adc.ready = 0;
- s->prop_adc.channels = val ? 2 : 1;
- }
- if (file->f_mode & FMODE_WRITE) {
- stop_dac(s);
- s->dma_dac.ready = 0;
- s->prop_dac.channels = val ? 2 : 1;
- }
- return 0;
-
- case SNDCTL_DSP_CHANNELS:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_CHANNELS val=%d\n",
- val));
- if (val != 0) {
- if (file->f_mode & FMODE_READ) {
- stop_adc(s);
- s->dma_adc.ready = 0;
- if (val >= 2)
- s->prop_adc.channels = 2;
- else
- s->prop_adc.channels = 1;
- }
- if (file->f_mode & FMODE_WRITE) {
- stop_dac(s);
- s->dma_dac.ready = 0;
- if (val >= 2)
- s->prop_dac.channels = 2;
- else
- s->prop_dac.channels = 1;
- }
- }
-
- if (file->f_mode & FMODE_WRITE)
- val = s->prop_dac.channels;
- else if (file->f_mode & FMODE_READ)
- val = s->prop_adc.channels;
-
- return put_user(val, (int *) arg);
-
- case SNDCTL_DSP_GETFMTS: // Returns a mask
- CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_GETFMT val=0x%.8x\n",
- AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
- AFMT_U8));
- return put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
- AFMT_U8, (int *) arg);
-
- case SNDCTL_DSP_SETFMT:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_SETFMT val=0x%.8x\n",
- val));
- if (val != AFMT_QUERY) {
- if (file->f_mode & FMODE_READ) {
- stop_adc(s);
- s->dma_adc.ready = 0;
- if (val != AFMT_S16_LE
- && val != AFMT_U16_LE && val != AFMT_S8
- && val != AFMT_U8)
- val = AFMT_U8;
- s->prop_adc.fmt = val;
- s->prop_adc.fmt_original = s->prop_adc.fmt;
- }
- if (file->f_mode & FMODE_WRITE) {
- stop_dac(s);
- s->dma_dac.ready = 0;
- if (val != AFMT_S16_LE
- && val != AFMT_U16_LE && val != AFMT_S8
- && val != AFMT_U8)
- val = AFMT_U8;
- s->prop_dac.fmt = val;
- s->prop_dac.fmt_original = s->prop_dac.fmt;
- }
- } else {
- if (file->f_mode & FMODE_WRITE)
- val = s->prop_dac.fmt_original;
- else if (file->f_mode & FMODE_READ)
- val = s->prop_adc.fmt_original;
- }
- CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_SETFMT return val=0x%.8x\n",
- val));
- return put_user(val, (int *) arg);
-
- case SNDCTL_DSP_POST:
- CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): DSP_POST\n"));
- return 0;
-
- case SNDCTL_DSP_GETTRIGGER:
- val = 0;
- if (file->f_mode & s->ena & FMODE_READ)
- val |= PCM_ENABLE_INPUT;
- if (file->f_mode & s->ena & FMODE_WRITE)
- val |= PCM_ENABLE_OUTPUT;
- return put_user(val, (int *) arg);
-
- case SNDCTL_DSP_SETTRIGGER:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- if (file->f_mode & FMODE_READ) {
- if (val & PCM_ENABLE_INPUT) {
- if (!s->dma_adc.ready
- && (ret = prog_dmabuf_adc(s)))
- return ret;
- start_adc(s);
- } else
- stop_adc(s);
- }
- if (file->f_mode & FMODE_WRITE) {
- if (val & PCM_ENABLE_OUTPUT) {
- if (!s->dma_dac.ready
- && (ret = prog_dmabuf_dac(s)))
- return ret;
- start_dac(s);
- } else
- stop_dac(s);
- }
- return 0;
-
- case SNDCTL_DSP_GETOSPACE:
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- if (!s->dma_dac.ready && (val = prog_dmabuf_dac(s)))
- return val;
- spin_lock_irqsave(&s->lock, flags);
- cs4297a_update_ptr(s,CS_FALSE);
- abinfo.fragsize = s->dma_dac.fragsize;
- if (s->dma_dac.mapped)
- abinfo.bytes = s->dma_dac.sbufsz;
- else
- abinfo.bytes =
- s->dma_dac.sbufsz - s->dma_dac.count;
- abinfo.fragstotal = s->dma_dac.numfrag;
- abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
- CS_DBGOUT(CS_FUNCTION | CS_PARMS, 4, printk(KERN_INFO
- "cs4297a: cs4297a_ioctl(): GETOSPACE .fragsize=%d .bytes=%d .fragstotal=%d .fragments=%d\n",
- abinfo.fragsize,abinfo.bytes,abinfo.fragstotal,
- abinfo.fragments));
- spin_unlock_irqrestore(&s->lock, flags);
- return copy_to_user((void *) arg, &abinfo,
- sizeof(abinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_GETISPACE:
- if (!(file->f_mode & FMODE_READ))
- return -EINVAL;
- if (!s->dma_adc.ready && (val = prog_dmabuf_adc(s)))
- return val;
- spin_lock_irqsave(&s->lock, flags);
- cs4297a_update_ptr(s,CS_FALSE);
- if (s->conversion) {
- abinfo.fragsize = s->dma_adc.fragsize / 2;
- abinfo.bytes = s->dma_adc.count / 2;
- abinfo.fragstotal = s->dma_adc.numfrag;
- abinfo.fragments =
- abinfo.bytes >> (s->dma_adc.fragshift - 1);
- } else {
- abinfo.fragsize = s->dma_adc.fragsize;
- abinfo.bytes = s->dma_adc.count;
- abinfo.fragstotal = s->dma_adc.numfrag;
- abinfo.fragments =
- abinfo.bytes >> s->dma_adc.fragshift;
- }
- spin_unlock_irqrestore(&s->lock, flags);
- return copy_to_user((void *) arg, &abinfo,
- sizeof(abinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_NONBLOCK:
- spin_lock(&file->f_lock);
- file->f_flags |= O_NONBLOCK;
- spin_unlock(&file->f_lock);
- return 0;
-
- case SNDCTL_DSP_GETODELAY:
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- if(!s->dma_dac.ready && prog_dmabuf_dac(s))
- return 0;
- spin_lock_irqsave(&s->lock, flags);
- cs4297a_update_ptr(s,CS_FALSE);
- val = s->dma_dac.count;
- spin_unlock_irqrestore(&s->lock, flags);
- return put_user(val, (int *) arg);
-
- case SNDCTL_DSP_GETIPTR:
- if (!(file->f_mode & FMODE_READ))
- return -EINVAL;
- if(!s->dma_adc.ready && prog_dmabuf_adc(s))
- return 0;
- spin_lock_irqsave(&s->lock, flags);
- cs4297a_update_ptr(s,CS_FALSE);
- cinfo.bytes = s->dma_adc.total_bytes;
- if (s->dma_adc.mapped) {
- cinfo.blocks =
- (cinfo.bytes >> s->dma_adc.fragshift) -
- s->dma_adc.blocks;
- s->dma_adc.blocks =
- cinfo.bytes >> s->dma_adc.fragshift;
- } else {
- if (s->conversion) {
- cinfo.blocks =
- s->dma_adc.count /
- 2 >> (s->dma_adc.fragshift - 1);
- } else
- cinfo.blocks =
- s->dma_adc.count >> s->dma_adc.
- fragshift;
- }
- if (s->conversion)
- cinfo.ptr = s->dma_adc.hwptr / 2;
- else
- cinfo.ptr = s->dma_adc.hwptr;
- if (s->dma_adc.mapped)
- s->dma_adc.count &= s->dma_adc.fragsize - 1;
- spin_unlock_irqrestore(&s->lock, flags);
- return copy_to_user((void *) arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_GETOPTR:
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- if(!s->dma_dac.ready && prog_dmabuf_dac(s))
- return 0;
- spin_lock_irqsave(&s->lock, flags);
- cs4297a_update_ptr(s,CS_FALSE);
- cinfo.bytes = s->dma_dac.total_bytes;
- if (s->dma_dac.mapped) {
- cinfo.blocks =
- (cinfo.bytes >> s->dma_dac.fragshift) -
- s->dma_dac.blocks;
- s->dma_dac.blocks =
- cinfo.bytes >> s->dma_dac.fragshift;
- } else {
- cinfo.blocks =
- s->dma_dac.count >> s->dma_dac.fragshift;
- }
- cinfo.ptr = s->dma_dac.hwptr;
- if (s->dma_dac.mapped)
- s->dma_dac.count &= s->dma_dac.fragsize - 1;
- spin_unlock_irqrestore(&s->lock, flags);
- return copy_to_user((void *) arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_GETBLKSIZE:
- if (file->f_mode & FMODE_WRITE) {
- if ((val = prog_dmabuf_dac(s)))
- return val;
- return put_user(s->dma_dac.fragsize, (int *) arg);
- }
- if ((val = prog_dmabuf_adc(s)))
- return val;
- if (s->conversion)
- return put_user(s->dma_adc.fragsize / 2,
- (int *) arg);
- else
- return put_user(s->dma_adc.fragsize, (int *) arg);
-
- case SNDCTL_DSP_SETFRAGMENT:
- if (get_user(val, (int *) arg))
- return -EFAULT;
- return 0; // Say OK, but do nothing.
-
- case SNDCTL_DSP_SUBDIVIDE:
- if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision)
- || (file->f_mode & FMODE_WRITE
- && s->dma_dac.subdivision)) return -EINVAL;
- if (get_user(val, (int *) arg))
- return -EFAULT;
- if (val != 1 && val != 2 && val != 4)
- return -EINVAL;
- if (file->f_mode & FMODE_READ)
- s->dma_adc.subdivision = val;
- else if (file->f_mode & FMODE_WRITE)
- s->dma_dac.subdivision = val;
- return 0;
-
- case SOUND_PCM_READ_RATE:
- if (file->f_mode & FMODE_READ)
- return put_user(s->prop_adc.rate, (int *) arg);
- else if (file->f_mode & FMODE_WRITE)
- return put_user(s->prop_dac.rate, (int *) arg);
-
- case SOUND_PCM_READ_CHANNELS:
- if (file->f_mode & FMODE_READ)
- return put_user(s->prop_adc.channels, (int *) arg);
- else if (file->f_mode & FMODE_WRITE)
- return put_user(s->prop_dac.channels, (int *) arg);
-
- case SOUND_PCM_READ_BITS:
- if (file->f_mode & FMODE_READ)
- return
- put_user(
- (s->prop_adc.
- fmt & (AFMT_S8 | AFMT_U8)) ? 8 : 16,
- (int *) arg);
- else if (file->f_mode & FMODE_WRITE)
- return
- put_user(
- (s->prop_dac.
- fmt & (AFMT_S8 | AFMT_U8)) ? 8 : 16,
- (int *) arg);
-
- case SOUND_PCM_WRITE_FILTER:
- case SNDCTL_DSP_SETSYNCRO:
- case SOUND_PCM_READ_FILTER:
- return -EINVAL;
- }
- return mixer_ioctl(s, cmd, arg);
-}
-
-static long cs4297a_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
-{
- int ret;
-
- mutex_lock(&swarm_cs4297a_mutex);
- ret = cs4297a_ioctl(file, cmd, arg);
- mutex_unlock(&swarm_cs4297a_mutex);
-
- return ret;
-}
-
-static int cs4297a_release(struct inode *inode, struct file *file)
-{
- struct cs4297a_state *s =
- (struct cs4297a_state *) file->private_data;
-
- CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO
- "cs4297a: cs4297a_release(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n",
- (unsigned) inode, (unsigned) file, file->f_mode));
- VALIDATE_STATE(s);
-
- if (file->f_mode & FMODE_WRITE) {
- drain_dac(s, file->f_flags & O_NONBLOCK);
- mutex_lock(&s->open_sem_dac);
- stop_dac(s);
- dealloc_dmabuf(s, &s->dma_dac);
- s->open_mode &= ~FMODE_WRITE;
- mutex_unlock(&s->open_sem_dac);
- wake_up(&s->open_wait_dac);
- }
- if (file->f_mode & FMODE_READ) {
- drain_adc(s, file->f_flags & O_NONBLOCK);
- mutex_lock(&s->open_sem_adc);
- stop_adc(s);
- dealloc_dmabuf(s, &s->dma_adc);
- s->open_mode &= ~FMODE_READ;
- mutex_unlock(&s->open_sem_adc);
- wake_up(&s->open_wait_adc);
- }
- return 0;
-}
-
-static int cs4297a_locked_open(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct cs4297a_state *s=NULL;
- struct list_head *entry;
-
- CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
- "cs4297a: cs4297a_open(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n",
- (unsigned) inode, (unsigned) file, file->f_mode));
- CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
- "cs4297a: status = %08x\n", (int)__raw_readq(SS_CSR(R_SER_STATUS_DEBUG))));
-
- list_for_each(entry, &cs4297a_devs)
- {
- s = list_entry(entry, struct cs4297a_state, list);
-
- if (!((s->dev_audio ^ minor) & ~0xf))
- break;
- }
- if (entry == &cs4297a_devs)
- return -ENODEV;
- if (!s) {
- CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
- "cs4297a: cs4297a_open(): Error - unable to find audio state struct\n"));
- return -ENODEV;
- }
- VALIDATE_STATE(s);
- file->private_data = s;
-
- // wait for device to become free
- if (!(file->f_mode & (FMODE_WRITE | FMODE_READ))) {
- CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2, printk(KERN_INFO
- "cs4297a: cs4297a_open(): Error - must open READ and/or WRITE\n"));
- return -ENODEV;
- }
- if (file->f_mode & FMODE_WRITE) {
- if (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX)) != 0) {
- printk(KERN_ERR "cs4297a: TX pipe needs to drain\n");
- while (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_TX)))
- ;
- }
-
- mutex_lock(&s->open_sem_dac);
- while (s->open_mode & FMODE_WRITE) {
- if (file->f_flags & O_NONBLOCK) {
- mutex_unlock(&s->open_sem_dac);
- return -EBUSY;
- }
- mutex_unlock(&s->open_sem_dac);
- oss_broken_sleep_on(&s->open_wait_dac, MAX_SCHEDULE_TIMEOUT);
-
- if (signal_pending(current)) {
- printk("open - sig pending\n");
- return -ERESTARTSYS;
- }
- mutex_lock(&s->open_sem_dac);
- }
- }
- if (file->f_mode & FMODE_READ) {
- mutex_lock(&s->open_sem_adc);
- while (s->open_mode & FMODE_READ) {
- if (file->f_flags & O_NONBLOCK) {
- mutex_unlock(&s->open_sem_adc);
- return -EBUSY;
- }
- mutex_unlock(&s->open_sem_adc);
- oss_broken_sleep_on(&s->open_wait_adc, MAX_SCHEDULE_TIMEOUT);
-
- if (signal_pending(current)) {
- printk("open - sig pending\n");
- return -ERESTARTSYS;
- }
- mutex_lock(&s->open_sem_adc);
- }
- }
- s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
- if (file->f_mode & FMODE_READ) {
- s->prop_adc.fmt = AFMT_S16_BE;
- s->prop_adc.fmt_original = s->prop_adc.fmt;
- s->prop_adc.channels = 2;
- s->prop_adc.rate = 48000;
- s->conversion = 0;
- s->ena &= ~FMODE_READ;
- s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags =
- s->dma_adc.subdivision = 0;
- mutex_unlock(&s->open_sem_adc);
-
- if (prog_dmabuf_adc(s)) {
- CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
- "cs4297a: adc Program dmabufs failed.\n"));
- cs4297a_release(inode, file);
- return -ENOMEM;
- }
- }
- if (file->f_mode & FMODE_WRITE) {
- s->prop_dac.fmt = AFMT_S16_BE;
- s->prop_dac.fmt_original = s->prop_dac.fmt;
- s->prop_dac.channels = 2;
- s->prop_dac.rate = 48000;
- s->conversion = 0;
- s->ena &= ~FMODE_WRITE;
- s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags =
- s->dma_dac.subdivision = 0;
- mutex_unlock(&s->open_sem_dac);
-
- if (prog_dmabuf_dac(s)) {
- CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
- "cs4297a: dac Program dmabufs failed.\n"));
- cs4297a_release(inode, file);
- return -ENOMEM;
- }
- }
- CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
- printk(KERN_INFO "cs4297a: cs4297a_open()- 0\n"));
- return nonseekable_open(inode, file);
-}
-
-static int cs4297a_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- mutex_lock(&swarm_cs4297a_mutex);
- ret = cs4297a_open(inode, file);
- mutex_unlock(&swarm_cs4297a_mutex);
-
- return ret;
-}
-
-// ******************************************************************************************
-// Wave (audio) file operations struct.
-// ******************************************************************************************
-static const struct file_operations cs4297a_audio_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = cs4297a_read,
- .write = cs4297a_write,
- .poll = cs4297a_poll,
- .unlocked_ioctl = cs4297a_unlocked_ioctl,
- .mmap = cs4297a_mmap,
- .open = cs4297a_open,
- .release = cs4297a_release,
-};
-
-static void cs4297a_interrupt(int irq, void *dev_id)
-{
- struct cs4297a_state *s = (struct cs4297a_state *) dev_id;
- u32 status;
-
- status = __raw_readq(SS_CSR(R_SER_STATUS_DEBUG));
-
- CS_DBGOUT(CS_INTERRUPT, 6, printk(KERN_INFO
- "cs4297a: cs4297a_interrupt() HISR=0x%.8x\n", status));
-
-#if 0
- /* XXXKW what check *should* be done here? */
- if (!(status & (M_SYNCSER_RX_EOP_COUNT | M_SYNCSER_RX_OVERRUN | M_SYNCSER_RX_SYNC_ERR))) {
- status = __raw_readq(SS_CSR(R_SER_STATUS));
- printk(KERN_ERR "cs4297a: unexpected interrupt (status %08x)\n", status);
- return;
- }
-#endif
-
- if (status & M_SYNCSER_RX_SYNC_ERR) {
- status = __raw_readq(SS_CSR(R_SER_STATUS));
- printk(KERN_ERR "cs4297a: rx sync error (status %08x)\n", status);
- return;
- }
-
- if (status & M_SYNCSER_RX_OVERRUN) {
- int newptr, i;
- s->stats.rx_ovrrn++;
- printk(KERN_ERR "cs4297a: receive FIFO overrun\n");
-
- /* Fix things up: get the receive descriptor pool
- clean and give them back to the hardware */
- while (__raw_readq(SS_CSR(R_SER_DMA_DSCR_COUNT_RX)))
- ;
- newptr = (unsigned) (((__raw_readq(SS_CSR(R_SER_DMA_CUR_DSCR_ADDR_RX)) & M_DMA_CURDSCR_ADDR) -
- s->dma_adc.descrtab_phys) / sizeof(serdma_descr_t));
- for (i=0; i<DMA_DESCR; i++) {
- s->dma_adc.descrtab[i].descr_a &= ~M_DMA_SERRX_SOP;
- }
- s->dma_adc.swptr = s->dma_adc.hwptr = newptr;
- s->dma_adc.count = 0;
- s->dma_adc.sb_swptr = s->dma_adc.sb_hwptr = s->dma_adc.sample_buf;
- __raw_writeq(DMA_DESCR, SS_CSR(R_SER_DMA_DSCR_COUNT_RX));
- }
-
- spin_lock(&s->lock);
- cs4297a_update_ptr(s,CS_TRUE);
- spin_unlock(&s->lock);
-
- CS_DBGOUT(CS_INTERRUPT, 6, printk(KERN_INFO
- "cs4297a: cs4297a_interrupt()-\n"));
-}
-
-#if 0
-static struct initvol {
- int mixch;
- int vol;
-} initvol[] __initdata = {
-
- {SOUND_MIXER_WRITE_VOLUME, 0x4040},
- {SOUND_MIXER_WRITE_PCM, 0x4040},
- {SOUND_MIXER_WRITE_SYNTH, 0x4040},
- {SOUND_MIXER_WRITE_CD, 0x4040},
- {SOUND_MIXER_WRITE_LINE, 0x4040},
- {SOUND_MIXER_WRITE_LINE1, 0x4040},
- {SOUND_MIXER_WRITE_RECLEV, 0x0000},
- {SOUND_MIXER_WRITE_SPEAKER, 0x4040},
- {SOUND_MIXER_WRITE_MIC, 0x0000}
-};
-#endif
-
-static int __init cs4297a_init(void)
-{
- struct cs4297a_state *s;
- u32 pwr, id;
- mm_segment_t fs;
- int rval;
- u64 cfg;
- int mdio_val;
-
- CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
- "cs4297a: cs4297a_init_module()+ \n"));
-
- mdio_val = __raw_readq(KSEG1 + A_MAC_REGISTER(2, R_MAC_MDIO)) &
- (M_MAC_MDIO_DIR|M_MAC_MDIO_OUT);
-
- /* Check syscfg for synchronous serial on port 1 */
- cfg = __raw_readq(KSEG1 + A_SCD_SYSTEM_CFG);
- if (!(cfg & M_SYS_SER1_ENABLE)) {
- __raw_writeq(cfg | M_SYS_SER1_ENABLE, KSEG1+A_SCD_SYSTEM_CFG);
- cfg = __raw_readq(KSEG1 + A_SCD_SYSTEM_CFG);
- if (!(cfg & M_SYS_SER1_ENABLE)) {
- printk(KERN_INFO "cs4297a: serial port 1 not configured for synchronous operation\n");
- return -1;
- }
-
- printk(KERN_INFO "cs4297a: serial port 1 switching to synchronous operation\n");
-
- /* Force the codec (on SWARM) to reset by clearing
- GENO, preserving MDIO (no effect on CSWARM) */
- __raw_writeq(mdio_val, KSEG1+A_MAC_REGISTER(2, R_MAC_MDIO));
- udelay(10);
- }
-
- /* Now set GENO */
- __raw_writeq(mdio_val | M_MAC_GENC, KSEG1+A_MAC_REGISTER(2, R_MAC_MDIO));
- /* Give the codec some time to finish resetting (start the bit clock) */
- udelay(100);
-
- if (!(s = kzalloc(sizeof(struct cs4297a_state), GFP_KERNEL))) {
- CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
- "cs4297a: probe() no memory for state struct.\n"));
- return -1;
- }
- s->magic = CS4297a_MAGIC;
- init_waitqueue_head(&s->dma_adc.wait);
- init_waitqueue_head(&s->dma_dac.wait);
- init_waitqueue_head(&s->dma_adc.reg_wait);
- init_waitqueue_head(&s->dma_dac.reg_wait);
- init_waitqueue_head(&s->open_wait);
- init_waitqueue_head(&s->open_wait_adc);
- init_waitqueue_head(&s->open_wait_dac);
- mutex_init(&s->open_sem_adc);
- mutex_init(&s->open_sem_dac);
- spin_lock_init(&s->lock);
-
- s->irq = K_INT_SER_1;
-
- if (request_irq
- (s->irq, cs4297a_interrupt, 0, "Crystal CS4297a", s)) {
- CS_DBGOUT(CS_INIT | CS_ERROR, 1,
- printk(KERN_ERR "cs4297a: irq %u in use\n", s->irq));
- goto err_irq;
- }
- if ((s->dev_audio = register_sound_dsp(&cs4297a_audio_fops, -1)) <
- 0) {
- CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR
- "cs4297a: probe() register_sound_dsp() failed.\n"));
- goto err_dev1;
- }
- if ((s->dev_mixer = register_sound_mixer(&cs4297a_mixer_fops, -1)) <
- 0) {
- CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR
- "cs4297a: probe() register_sound_mixer() failed.\n"));
- goto err_dev2;
- }
-
- if (ser_init(s) || dma_init(s)) {
- CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR
- "cs4297a: ser_init failed.\n"));
- goto err_dev3;
- }
-
- do {
- udelay(4000);
- rval = cs4297a_read_ac97(s, AC97_POWER_CONTROL, &pwr);
- } while (!rval && (pwr != 0xf));
-
- if (!rval) {
- char *sb1250_duart_present;
-
- fs = get_fs();
- set_fs(KERNEL_DS);
-#if 0
- val = SOUND_MASK_LINE;
- mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
- for (i = 0; i < ARRAY_SIZE(initvol); i++) {
- val = initvol[i].vol;
- mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
- }
-// cs4297a_write_ac97(s, 0x18, 0x0808);
-#else
- // cs4297a_write_ac97(s, 0x5e, 0x180);
- cs4297a_write_ac97(s, 0x02, 0x0808);
- cs4297a_write_ac97(s, 0x18, 0x0808);
-#endif
- set_fs(fs);
-
- list_add(&s->list, &cs4297a_devs);
-
- cs4297a_read_ac97(s, AC97_VENDOR_ID1, &id);
-
- sb1250_duart_present = symbol_get(sb1250_duart_present);
- if (sb1250_duart_present)
- sb1250_duart_present[1] = 0;
-
- printk(KERN_INFO "cs4297a: initialized (vendor id = %x)\n", id);
-
- CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
- printk(KERN_INFO "cs4297a: cs4297a_init_module()-\n"));
-
- return 0;
- }
-
- err_dev3:
- unregister_sound_mixer(s->dev_mixer);
- err_dev2:
- unregister_sound_dsp(s->dev_audio);
- err_dev1:
- free_irq(s->irq, s);
- err_irq:
- kfree(s);
-
- printk(KERN_INFO "cs4297a: initialization failed\n");
-
- return -1;
-}
-
-static void __exit cs4297a_cleanup(void)
-{
- /*
- XXXKW
- disable_irq, free_irq
- drain DMA queue
- disable DMA
- disable TX/RX
- free memory
- */
- CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
- printk(KERN_INFO "cs4297a: cleanup_cs4297a() finished\n"));
-}
-
-// ---------------------------------------------------------------------
-
-MODULE_AUTHOR("Kip Walker, Broadcom Corp.");
-MODULE_DESCRIPTION("Cirrus Logic CS4297a Driver for Broadcom SWARM board");
-
-// ---------------------------------------------------------------------
-
-module_init(cs4297a_init);
-module_exit(cs4297a_cleanup);
diff --git a/sound/oss/sys_timer.c b/sound/oss/sys_timer.c
deleted file mode 100644
index 8a4b5625dba6..000000000000
--- a/sound/oss/sys_timer.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * sound/oss/sys_timer.c
- *
- * The default timer for the Level 2 sequencer interface
- * Uses the (1/HZ sec) timer of kernel.
- */
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- */
-/*
- * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
- * Andrew Veliath : adapted tmr2ticks from level 1 sequencer (avoid overflow)
- */
-#include <linux/spinlock.h>
-#include "sound_config.h"
-
-static volatile int opened, tmr_running;
-static volatile unsigned int tmr_offs, tmr_ctr;
-static volatile unsigned long ticks_offs;
-static volatile int curr_tempo, curr_timebase;
-static volatile unsigned long curr_ticks;
-static volatile unsigned long next_event_time;
-static unsigned long prev_event_time;
-
-static void poll_def_tmr(unsigned long dummy);
-static DEFINE_SPINLOCK(lock);
-static DEFINE_TIMER(def_tmr, poll_def_tmr);
-
-static unsigned long
-tmr2ticks(int tmr_value)
-{
- /*
- * Convert timer ticks to MIDI ticks
- */
-
- unsigned long tmp;
- unsigned long scale;
-
- /* tmr_value (ticks per sec) *
- 1000000 (usecs per sec) / HZ (ticks per sec) -=> usecs */
- tmp = tmr_value * (1000000 / HZ);
- scale = (60 * 1000000) / (curr_tempo * curr_timebase); /* usecs per MIDI tick */
- return (tmp + scale / 2) / scale;
-}
-
-static void
-poll_def_tmr(unsigned long dummy)
-{
- if (!opened)
- return;
- def_tmr.expires = (1) + jiffies;
- add_timer(&def_tmr);
-
- if (!tmr_running)
- return;
-
- spin_lock(&lock);
- tmr_ctr++;
- curr_ticks = ticks_offs + tmr2ticks(tmr_ctr);
-
- if (curr_ticks >= next_event_time) {
- next_event_time = (unsigned long) -1;
- sequencer_timer(0);
- }
-
- spin_unlock(&lock);
-}
-
-static void
-tmr_reset(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lock,flags);
- tmr_offs = 0;
- ticks_offs = 0;
- tmr_ctr = 0;
- next_event_time = (unsigned long) -1;
- prev_event_time = 0;
- curr_ticks = 0;
- spin_unlock_irqrestore(&lock,flags);
-}
-
-static int
-def_tmr_open(int dev, int mode)
-{
- if (opened)
- return -EBUSY;
-
- tmr_reset();
- curr_tempo = 60;
- curr_timebase = 100;
- opened = 1;
- {
- def_tmr.expires = (1) + jiffies;
- add_timer(&def_tmr);
- }
-
- return 0;
-}
-
-static void
-def_tmr_close(int dev)
-{
- opened = tmr_running = 0;
- del_timer(&def_tmr);
-}
-
-static int
-def_tmr_event(int dev, unsigned char *event)
-{
- unsigned char cmd = event[1];
- unsigned long parm = *(int *) &event[4];
-
- switch (cmd)
- {
- case TMR_WAIT_REL:
- parm += prev_event_time;
- case TMR_WAIT_ABS:
- if (parm > 0)
- {
- long time;
-
- if (parm <= curr_ticks) /* It's the time */
- return TIMER_NOT_ARMED;
-
- time = parm;
- next_event_time = prev_event_time = time;
-
- return TIMER_ARMED;
- }
- break;
-
- case TMR_START:
- tmr_reset();
- tmr_running = 1;
- break;
-
- case TMR_STOP:
- tmr_running = 0;
- break;
-
- case TMR_CONTINUE:
- tmr_running = 1;
- break;
-
- case TMR_TEMPO:
- if (parm)
- {
- if (parm < 8)
- parm = 8;
- if (parm > 360)
- parm = 360;
- tmr_offs = tmr_ctr;
- ticks_offs += tmr2ticks(tmr_ctr);
- tmr_ctr = 0;
- curr_tempo = parm;
- }
- break;
-
- case TMR_ECHO:
- seq_copy_to_input(event, 8);
- break;
-
- default:;
- }
-
- return TIMER_NOT_ARMED;
-}
-
-static unsigned long
-def_tmr_get_time(int dev)
-{
- if (!opened)
- return 0;
-
- return curr_ticks;
-}
-
-/* same as sound_timer.c:timer_ioctl!? */
-static int def_tmr_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- int __user *p = arg;
- int val;
-
- switch (cmd) {
- case SNDCTL_TMR_SOURCE:
- return __put_user(TMR_INTERNAL, p);
-
- case SNDCTL_TMR_START:
- tmr_reset();
- tmr_running = 1;
- return 0;
-
- case SNDCTL_TMR_STOP:
- tmr_running = 0;
- return 0;
-
- case SNDCTL_TMR_CONTINUE:
- tmr_running = 1;
- return 0;
-
- case SNDCTL_TMR_TIMEBASE:
- if (__get_user(val, p))
- return -EFAULT;
- if (val) {
- if (val < 1)
- val = 1;
- if (val > 1000)
- val = 1000;
- curr_timebase = val;
- }
- return __put_user(curr_timebase, p);
-
- case SNDCTL_TMR_TEMPO:
- if (__get_user(val, p))
- return -EFAULT;
- if (val) {
- if (val < 8)
- val = 8;
- if (val > 250)
- val = 250;
- tmr_offs = tmr_ctr;
- ticks_offs += tmr2ticks(tmr_ctr);
- tmr_ctr = 0;
- curr_tempo = val;
- reprogram_timer();
- }
- return __put_user(curr_tempo, p);
-
- case SNDCTL_SEQ_CTRLRATE:
- if (__get_user(val, p))
- return -EFAULT;
- if (val != 0) /* Can't change */
- return -EINVAL;
- val = ((curr_tempo * curr_timebase) + 30) / 60;
- return __put_user(val, p);
-
- case SNDCTL_SEQ_GETTIME:
- return __put_user(curr_ticks, p);
-
- case SNDCTL_TMR_METRONOME:
- /* NOP */
- break;
-
- default:;
- }
- return -EINVAL;
-}
-
-static void
-def_tmr_arm(int dev, long time)
-{
- if (time < 0)
- time = curr_ticks + 1;
- else if (time <= curr_ticks) /* It's the time */
- return;
-
- next_event_time = prev_event_time = time;
-
- return;
-}
-
-struct sound_timer_operations default_sound_timer =
-{
- .owner = THIS_MODULE,
- .info = {"System clock", 0},
- .priority = 0, /* Priority */
- .devlink = 0, /* Local device link */
- .open = def_tmr_open,
- .close = def_tmr_close,
- .event = def_tmr_event,
- .get_time = def_tmr_get_time,
- .ioctl = def_tmr_ioctl,
- .arm_timer = def_tmr_arm
-};
diff --git a/sound/oss/trix.c b/sound/oss/trix.c
deleted file mode 100644
index a57bc635d758..000000000000
--- a/sound/oss/trix.c
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- * sound/oss/trix.c
- *
- * Low level driver for the MediaTrix AudioTrix Pro
- * (MT-0002-PC Control Chip)
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- * Changes
- * Alan Cox Modularisation, cleanup.
- * Christoph Hellwig Adapted to module_init/module_exit
- * Arnaldo C. de Melo Got rid of attach_uart401
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include "sound_config.h"
-#include "sb.h"
-#include "sound_firmware.h"
-
-#include "ad1848.h"
-#include "mpu401.h"
-
-#include "trix_boot.h"
-
-static int mpu;
-
-static bool joystick;
-
-static unsigned char trix_read(int addr)
-{
- outb(((unsigned char) addr), 0x390); /* MT-0002-PC ASIC address */
- return inb(0x391); /* MT-0002-PC ASIC data */
-}
-
-static void trix_write(int addr, int data)
-{
- outb(((unsigned char) addr), 0x390); /* MT-0002-PC ASIC address */
- outb(((unsigned char) data), 0x391); /* MT-0002-PC ASIC data */
-}
-
-static void download_boot(int base)
-{
- int i = 0, n = trix_boot_len;
-
- if (trix_boot_len == 0)
- return;
-
- trix_write(0xf8, 0x00); /* ??????? */
- outb((0x01), base + 6); /* Clear the internal data pointer */
- outb((0x00), base + 6); /* Restart */
-
- /*
- * Write the boot code to the RAM upload/download register.
- * Each write increments the internal data pointer.
- */
- outb((0x01), base + 6); /* Clear the internal data pointer */
- outb((0x1A), 0x390); /* Select RAM download/upload port */
-
- for (i = 0; i < n; i++)
- outb((trix_boot[i]), 0x391);
- for (i = n; i < 10016; i++) /* Clear up to first 16 bytes of data RAM */
- outb((0x00), 0x391);
- outb((0x00), base + 6); /* Reset */
- outb((0x50), 0x390); /* ?????? */
-
-}
-
-static int trix_set_wss_port(struct address_info *hw_config)
-{
- unsigned char addr_bits;
-
- if (trix_read(0x15) != 0x71) /* No ASIC signature */
- {
- MDB(printk(KERN_ERR "No AudioTrix ASIC signature found\n"));
- return 0;
- }
-
- /*
- * Reset some registers.
- */
-
- trix_write(0x13, 0);
- trix_write(0x14, 0);
-
- /*
- * Configure the ASIC to place the codec to the proper I/O location
- */
-
- switch (hw_config->io_base)
- {
- case 0x530:
- addr_bits = 0;
- break;
- case 0x604:
- addr_bits = 1;
- break;
- case 0xE80:
- addr_bits = 2;
- break;
- case 0xF40:
- addr_bits = 3;
- break;
- default:
- return 0;
- }
-
- trix_write(0x19, (trix_read(0x19) & 0x03) | addr_bits);
- return 1;
-}
-
-/*
- * Probe and attach routines for the Windows Sound System mode of
- * AudioTrix Pro
- */
-
-static int __init init_trix_wss(struct address_info *hw_config)
-{
- static unsigned char dma_bits[4] = {
- 1, 2, 0, 3
- };
- struct resource *ports;
- int config_port = hw_config->io_base + 0;
- int dma1 = hw_config->dma, dma2 = hw_config->dma2;
- int old_num_mixers = num_mixers;
- u8 config, bits;
- int ret;
-
- switch(hw_config->irq) {
- case 7:
- bits = 8;
- break;
- case 9:
- bits = 0x10;
- break;
- case 10:
- bits = 0x18;
- break;
- case 11:
- bits = 0x20;
- break;
- default:
- printk(KERN_ERR "AudioTrix: Bad WSS IRQ %d\n", hw_config->irq);
- return 0;
- }
-
- switch (dma1) {
- case 0:
- case 1:
- case 3:
- break;
- default:
- printk(KERN_ERR "AudioTrix: Bad WSS DMA %d\n", dma1);
- return 0;
- }
-
- switch (dma2) {
- case -1:
- case 0:
- case 1:
- case 3:
- break;
- default:
- printk(KERN_ERR "AudioTrix: Bad capture DMA %d\n", dma2);
- return 0;
- }
-
- /*
- * Check if the IO port returns valid signature. The original MS Sound
- * system returns 0x04 while some cards (AudioTrix Pro for example)
- * return 0x00.
- */
- ports = request_region(hw_config->io_base + 4, 4, "ad1848");
- if (!ports) {
- printk(KERN_ERR "AudioTrix: MSS I/O port conflict (%x)\n", hw_config->io_base);
- return 0;
- }
-
- if (!request_region(hw_config->io_base, 4, "MSS config")) {
- printk(KERN_ERR "AudioTrix: MSS I/O port conflict (%x)\n", hw_config->io_base);
- release_region(hw_config->io_base + 4, 4);
- return 0;
- }
-
- if (!trix_set_wss_port(hw_config))
- goto fail;
-
- config = inb(hw_config->io_base + 3);
-
- if ((config & 0x3f) != 0x00)
- {
- MDB(printk(KERN_ERR "No MSS signature detected on port 0x%x\n", hw_config->io_base));
- goto fail;
- }
-
- /*
- * Check that DMA0 is not in use with a 8 bit board.
- */
-
- if (dma1 == 0 && config & 0x80)
- {
- printk(KERN_ERR "AudioTrix: Can't use DMA0 with a 8 bit card slot\n");
- goto fail;
- }
- if (hw_config->irq > 9 && config & 0x80)
- {
- printk(KERN_ERR "AudioTrix: Can't use IRQ%d with a 8 bit card slot\n", hw_config->irq);
- goto fail;
- }
-
- ret = ad1848_detect(ports, NULL, hw_config->osp);
- if (!ret)
- goto fail;
-
- if (joystick==1)
- trix_write(0x15, 0x80);
-
- /*
- * Set the IRQ and DMA addresses.
- */
-
- outb((bits | 0x40), config_port);
-
- if (dma2 == -1 || dma2 == dma1)
- {
- bits |= dma_bits[dma1];
- dma2 = dma1;
- }
- else
- {
- unsigned char tmp;
-
- tmp = trix_read(0x13) & ~30;
- trix_write(0x13, tmp | 0x80 | (dma1 << 4));
-
- tmp = trix_read(0x14) & ~30;
- trix_write(0x14, tmp | 0x80 | (dma2 << 4));
- }
-
- outb((bits), config_port); /* Write IRQ+DMA setup */
-
- hw_config->slots[0] = ad1848_init("AudioTrix Pro", ports,
- hw_config->irq,
- dma1,
- dma2,
- 0,
- hw_config->osp,
- THIS_MODULE);
-
- if (num_mixers > old_num_mixers) /* Mixer got installed */
- {
- AD1848_REROUTE(SOUND_MIXER_LINE1, SOUND_MIXER_LINE); /* Line in */
- AD1848_REROUTE(SOUND_MIXER_LINE2, SOUND_MIXER_CD);
- AD1848_REROUTE(SOUND_MIXER_LINE3, SOUND_MIXER_SYNTH); /* OPL4 */
- AD1848_REROUTE(SOUND_MIXER_SPEAKER, SOUND_MIXER_ALTPCM); /* SB */
- }
- return 1;
-
-fail:
- release_region(hw_config->io_base, 4);
- release_region(hw_config->io_base + 4, 4);
- return 0;
-}
-
-static int __init probe_trix_sb(struct address_info *hw_config)
-{
-
- int tmp;
- unsigned char conf;
- extern int sb_be_quiet;
- int old_quiet;
- static signed char irq_translate[] = {
- -1, -1, -1, 0, 1, 2, -1, 3
- };
-
- if (trix_boot_len == 0)
- return 0; /* No boot code -> no fun */
-
- if ((hw_config->io_base & 0xffffff8f) != 0x200)
- return 0;
-
- tmp = hw_config->irq;
- if (tmp > 7)
- return 0;
- if (irq_translate[tmp] == -1)
- return 0;
-
- tmp = hw_config->dma;
- if (tmp != 1 && tmp != 3)
- return 0;
-
- if (!request_region(hw_config->io_base, 16, "soundblaster")) {
- printk(KERN_ERR "AudioTrix: SB I/O port conflict (%x)\n", hw_config->io_base);
- return 0;
- }
-
- conf = 0x84; /* DMA and IRQ enable */
- conf |= hw_config->io_base & 0x70; /* I/O address bits */
- conf |= irq_translate[hw_config->irq];
- if (hw_config->dma == 3)
- conf |= 0x08;
- trix_write(0x1b, conf);
-
- download_boot(hw_config->io_base);
-
- hw_config->name = "AudioTrix SB";
- if (!sb_dsp_detect(hw_config, 0, 0, NULL)) {
- release_region(hw_config->io_base, 16);
- return 0;
- }
-
- hw_config->driver_use_1 = SB_NO_MIDI | SB_NO_MIXER | SB_NO_RECORDING;
-
- /* Prevent false alarms */
- old_quiet = sb_be_quiet;
- sb_be_quiet = 1;
-
- sb_dsp_init(hw_config, THIS_MODULE);
-
- sb_be_quiet = old_quiet;
- return 1;
-}
-
-static int __init probe_trix_mpu(struct address_info *hw_config)
-{
- unsigned char conf;
- static int irq_bits[] = {
- -1, -1, -1, 1, 2, 3, -1, 4, -1, 5
- };
-
- if (hw_config->irq > 9)
- {
- printk(KERN_ERR "AudioTrix: Bad MPU IRQ %d\n", hw_config->irq);
- return 0;
- }
- if (irq_bits[hw_config->irq] == -1)
- {
- printk(KERN_ERR "AudioTrix: Bad MPU IRQ %d\n", hw_config->irq);
- return 0;
- }
- switch (hw_config->io_base)
- {
- case 0x330:
- conf = 0x00;
- break;
- case 0x370:
- conf = 0x04;
- break;
- case 0x3b0:
- conf = 0x08;
- break;
- case 0x3f0:
- conf = 0x0c;
- break;
- default:
- return 0; /* Invalid port */
- }
-
- conf |= irq_bits[hw_config->irq] << 4;
- trix_write(0x19, (trix_read(0x19) & 0x83) | conf);
- hw_config->name = "AudioTrix Pro";
- return probe_uart401(hw_config, THIS_MODULE);
-}
-
-static void __exit unload_trix_wss(struct address_info *hw_config)
-{
- int dma2 = hw_config->dma2;
-
- if (dma2 == -1)
- dma2 = hw_config->dma;
-
- release_region(0x390, 2);
- release_region(hw_config->io_base, 4);
-
- ad1848_unload(hw_config->io_base + 4,
- hw_config->irq,
- hw_config->dma,
- dma2,
- 0);
- sound_unload_audiodev(hw_config->slots[0]);
-}
-
-static inline void __exit unload_trix_mpu(struct address_info *hw_config)
-{
- unload_uart401(hw_config);
-}
-
-static inline void __exit unload_trix_sb(struct address_info *hw_config)
-{
- sb_dsp_unload(hw_config, mpu);
-}
-
-static struct address_info cfg;
-static struct address_info cfg2;
-static struct address_info cfg_mpu;
-
-static int sb;
-static int fw_load;
-
-static int __initdata io = -1;
-static int __initdata irq = -1;
-static int __initdata dma = -1;
-static int __initdata dma2 = -1; /* Set this for modules that need it */
-static int __initdata sb_io = -1;
-static int __initdata sb_dma = -1;
-static int __initdata sb_irq = -1;
-static int __initdata mpu_io = -1;
-static int __initdata mpu_irq = -1;
-
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-module_param_hw(dma, int, dma, 0);
-module_param_hw(dma2, int, dma, 0);
-module_param_hw(sb_io, int, ioport, 0);
-module_param_hw(sb_dma, int, dma, 0);
-module_param_hw(sb_irq, int, irq, 0);
-module_param_hw(mpu_io, int, ioport, 0);
-module_param_hw(mpu_irq, int, irq, 0);
-module_param(joystick, bool, 0);
-
-static int __init init_trix(void)
-{
- printk(KERN_INFO "MediaTrix audio driver Copyright (C) by Hannu Savolainen 1993-1996\n");
-
- cfg.io_base = io;
- cfg.irq = irq;
- cfg.dma = dma;
- cfg.dma2 = dma2;
-
- cfg2.io_base = sb_io;
- cfg2.irq = sb_irq;
- cfg2.dma = sb_dma;
-
- cfg_mpu.io_base = mpu_io;
- cfg_mpu.irq = mpu_irq;
-
- if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
- printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n");
- return -EINVAL;
- }
-
- if (cfg2.io_base != -1 && (cfg2.irq == -1 || cfg2.dma == -1)) {
- printk(KERN_INFO "CONFIG_SB_IRQ and CONFIG_SB_DMA must be specified if SB_IO is set.\n");
- return -EINVAL;
- }
- if (cfg_mpu.io_base != -1 && cfg_mpu.irq == -1) {
- printk(KERN_INFO "CONFIG_MPU_IRQ must be specified if MPU_IO is set.\n");
- return -EINVAL;
- }
- if (!trix_boot)
- {
- fw_load = 1;
- trix_boot_len = mod_firmware_load("/etc/sound/trxpro.bin",
- (char **) &trix_boot);
- }
-
- if (!request_region(0x390, 2, "AudioTrix")) {
- printk(KERN_ERR "AudioTrix: Config port I/O conflict\n");
- return -ENODEV;
- }
-
- if (!init_trix_wss(&cfg)) {
- release_region(0x390, 2);
- return -ENODEV;
- }
-
- /*
- * We must attach in the right order to get the firmware
- * loaded up in time.
- */
-
- if (cfg2.io_base != -1) {
- sb = probe_trix_sb(&cfg2);
- }
-
- if (cfg_mpu.io_base != -1)
- mpu = probe_trix_mpu(&cfg_mpu);
-
- return 0;
-}
-
-static void __exit cleanup_trix(void)
-{
- if (fw_load)
- vfree(trix_boot);
- if (sb)
- unload_trix_sb(&cfg2);
- if (mpu)
- unload_trix_mpu(&cfg_mpu);
- unload_trix_wss(&cfg);
-}
-
-module_init(init_trix);
-module_exit(cleanup_trix);
-
-#ifndef MODULE
-static int __init setup_trix (char *str)
-{
- /* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, mpu_io, mpu_irq */
- int ints[9];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
- dma = ints[3];
- dma2 = ints[4];
- sb_io = ints[5];
- sb_irq = ints[6];
- sb_dma = ints[6];
- mpu_io = ints[7];
- mpu_irq = ints[8];
-
- return 1;
-}
-
-__setup("trix=", setup_trix);
-#endif
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/tuning.h b/sound/oss/tuning.h
deleted file mode 100644
index 953539931237..000000000000
--- a/sound/oss/tuning.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-static unsigned short semitone_tuning[24] =
-{
-/* 0 */ 10000, 10595, 11225, 11892, 12599, 13348, 14142, 14983,
-/* 8 */ 15874, 16818, 17818, 18877, 20000, 21189, 22449, 23784,
-/* 16 */ 25198, 26697, 28284, 29966, 31748, 33636, 35636, 37755
-};
-
-static unsigned short cent_tuning[100] =
-{
-/* 0 */ 10000, 10006, 10012, 10017, 10023, 10029, 10035, 10041,
-/* 8 */ 10046, 10052, 10058, 10064, 10070, 10075, 10081, 10087,
-/* 16 */ 10093, 10099, 10105, 10110, 10116, 10122, 10128, 10134,
-/* 24 */ 10140, 10145, 10151, 10157, 10163, 10169, 10175, 10181,
-/* 32 */ 10187, 10192, 10198, 10204, 10210, 10216, 10222, 10228,
-/* 40 */ 10234, 10240, 10246, 10251, 10257, 10263, 10269, 10275,
-/* 48 */ 10281, 10287, 10293, 10299, 10305, 10311, 10317, 10323,
-/* 56 */ 10329, 10335, 10341, 10347, 10353, 10359, 10365, 10371,
-/* 64 */ 10377, 10383, 10389, 10395, 10401, 10407, 10413, 10419,
-/* 72 */ 10425, 10431, 10437, 10443, 10449, 10455, 10461, 10467,
-/* 80 */ 10473, 10479, 10485, 10491, 10497, 10503, 10509, 10515,
-/* 88 */ 10521, 10528, 10534, 10540, 10546, 10552, 10558, 10564,
-/* 96 */ 10570, 10576, 10582, 10589
-};
diff --git a/sound/oss/uart401.c b/sound/oss/uart401.c
deleted file mode 100644
index 83dcc85b8688..000000000000
--- a/sound/oss/uart401.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * sound/oss/uart401.c
- *
- * MPU-401 UART driver (formerly uart401_midi.c)
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- * Changes:
- * Alan Cox Reformatted, removed sound_mem usage, use normal Linux
- * interrupt allocation. Protect against bogus unload
- * Fixed to allow IRQ > 15
- * Christoph Hellwig Adapted to module_init/module_exit
- * Arnaldo C. de Melo got rid of check_region
- *
- * Status:
- * Untested
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include "sound_config.h"
-
-#include "mpu401.h"
-
-struct uart401_devc
-{
- int base;
- int irq;
- int *osp;
- void (*midi_input_intr) (int dev, unsigned char data);
- int opened, disabled;
- volatile unsigned char input_byte;
- int my_dev;
- int share_irq;
- spinlock_t lock;
-};
-
-#define DATAPORT (devc->base)
-#define COMDPORT (devc->base+1)
-#define STATPORT (devc->base+1)
-
-static int uart401_status(struct uart401_devc *devc)
-{
- return inb(STATPORT);
-}
-
-#define input_avail(devc) (!(uart401_status(devc)&INPUT_AVAIL))
-#define output_ready(devc) (!(uart401_status(devc)&OUTPUT_READY))
-
-static void uart401_cmd(struct uart401_devc *devc, unsigned char cmd)
-{
- outb((cmd), COMDPORT);
-}
-
-static int uart401_read(struct uart401_devc *devc)
-{
- return inb(DATAPORT);
-}
-
-static void uart401_write(struct uart401_devc *devc, unsigned char byte)
-{
- outb((byte), DATAPORT);
-}
-
-#define OUTPUT_READY 0x40
-#define INPUT_AVAIL 0x80
-#define MPU_ACK 0xFE
-#define MPU_RESET 0xFF
-#define UART_MODE_ON 0x3F
-
-static int reset_uart401(struct uart401_devc *devc);
-static void enter_uart_mode(struct uart401_devc *devc);
-
-static void uart401_input_loop(struct uart401_devc *devc)
-{
- int work_limit=30000;
-
- while (input_avail(devc) && --work_limit)
- {
- unsigned char c = uart401_read(devc);
-
- if (c == MPU_ACK)
- devc->input_byte = c;
- else if (devc->opened & OPEN_READ && devc->midi_input_intr)
- devc->midi_input_intr(devc->my_dev, c);
- }
- if(work_limit==0)
- printk(KERN_WARNING "Too much work in interrupt on uart401 (0x%X). UART jabbering ??\n", devc->base);
-}
-
-irqreturn_t uart401intr(int irq, void *dev_id)
-{
- struct uart401_devc *devc = dev_id;
-
- if (devc == NULL)
- {
- printk(KERN_ERR "uart401: bad devc\n");
- return IRQ_NONE;
- }
-
- if (input_avail(devc))
- uart401_input_loop(devc);
- return IRQ_HANDLED;
-}
-
-static int
-uart401_open(int dev, int mode,
- void (*input) (int dev, unsigned char data),
- void (*output) (int dev)
-)
-{
- struct uart401_devc *devc = (struct uart401_devc *)
- midi_devs[dev]->devc;
-
- if (devc->opened)
- return -EBUSY;
-
- /* Flush the UART */
-
- while (input_avail(devc))
- uart401_read(devc);
-
- devc->midi_input_intr = input;
- devc->opened = mode;
- enter_uart_mode(devc);
- devc->disabled = 0;
-
- return 0;
-}
-
-static void uart401_close(int dev)
-{
- struct uart401_devc *devc = (struct uart401_devc *)
- midi_devs[dev]->devc;
-
- reset_uart401(devc);
- devc->opened = 0;
-}
-
-static int uart401_out(int dev, unsigned char midi_byte)
-{
- int timeout;
- unsigned long flags;
- struct uart401_devc *devc = (struct uart401_devc *)
- midi_devs[dev]->devc;
-
- if (devc->disabled)
- return 1;
- /*
- * Test for input since pending input seems to block the output.
- */
-
- spin_lock_irqsave(&devc->lock,flags);
- if (input_avail(devc))
- uart401_input_loop(devc);
-
- spin_unlock_irqrestore(&devc->lock,flags);
-
- /*
- * Sometimes it takes about 13000 loops before the output becomes ready
- * (After reset). Normally it takes just about 10 loops.
- */
-
- for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
-
- if (!output_ready(devc))
- {
- printk(KERN_WARNING "uart401: Timeout - Device not responding\n");
- devc->disabled = 1;
- reset_uart401(devc);
- enter_uart_mode(devc);
- return 1;
- }
- uart401_write(devc, midi_byte);
- return 1;
-}
-
-static inline int uart401_start_read(int dev)
-{
- return 0;
-}
-
-static inline int uart401_end_read(int dev)
-{
- return 0;
-}
-
-static inline void uart401_kick(int dev)
-{
-}
-
-static inline int uart401_buffer_status(int dev)
-{
- return 0;
-}
-
-#define MIDI_SYNTH_NAME "MPU-401 UART"
-#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
-#include "midi_synth.h"
-
-static const struct midi_operations uart401_operations =
-{
- .owner = THIS_MODULE,
- .info = {"MPU-401 (UART) MIDI", 0, 0, SNDCARD_MPU401},
- .converter = &std_midi_synth,
- .in_info = {0},
- .open = uart401_open,
- .close = uart401_close,
- .outputc = uart401_out,
- .start_read = uart401_start_read,
- .end_read = uart401_end_read,
- .kick = uart401_kick,
- .buffer_status = uart401_buffer_status,
-};
-
-static void enter_uart_mode(struct uart401_devc *devc)
-{
- int ok, timeout;
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock,flags);
- for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
-
- devc->input_byte = 0;
- uart401_cmd(devc, UART_MODE_ON);
-
- ok = 0;
- for (timeout = 50000; timeout > 0 && !ok; timeout--)
- if (devc->input_byte == MPU_ACK)
- ok = 1;
- else if (input_avail(devc))
- if (uart401_read(devc) == MPU_ACK)
- ok = 1;
-
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static int reset_uart401(struct uart401_devc *devc)
-{
- int ok, timeout, n;
-
- /*
- * Send the RESET command. Try again if no success at the first time.
- */
-
- ok = 0;
-
- for (n = 0; n < 2 && !ok; n++)
- {
- for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--);
- devc->input_byte = 0;
- uart401_cmd(devc, MPU_RESET);
-
- /*
- * Wait at least 25 msec. This method is not accurate so let's make the
- * loop bit longer. Cannot sleep since this is called during boot.
- */
-
- for (timeout = 50000; timeout > 0 && !ok; timeout--)
- {
- if (devc->input_byte == MPU_ACK) /* Interrupt */
- ok = 1;
- else if (input_avail(devc))
- {
- if (uart401_read(devc) == MPU_ACK)
- ok = 1;
- }
- }
- }
-
- /* Flush input before enabling interrupts */
- if (ok)
- uart401_input_loop(devc);
- else
- DDB(printk("Reset UART401 failed - No hardware detected.\n"));
-
- return ok;
-}
-
-int probe_uart401(struct address_info *hw_config, struct module *owner)
-{
- struct uart401_devc *devc;
- char *name = "MPU-401 (UART) MIDI";
- int ok = 0;
- unsigned long flags;
-
- DDB(printk("Entered probe_uart401()\n"));
-
- /* Default to "not found" */
- hw_config->slots[4] = -1;
-
- if (!request_region(hw_config->io_base, 4, "MPU-401 UART")) {
- printk(KERN_INFO "uart401: could not request_region(%d, 4)\n", hw_config->io_base);
- return 0;
- }
-
- devc = kmalloc(sizeof(struct uart401_devc), GFP_KERNEL);
- if (!devc) {
- printk(KERN_WARNING "uart401: Can't allocate memory\n");
- goto cleanup_region;
- }
-
- devc->base = hw_config->io_base;
- devc->irq = hw_config->irq;
- devc->osp = hw_config->osp;
- devc->midi_input_intr = NULL;
- devc->opened = 0;
- devc->input_byte = 0;
- devc->my_dev = 0;
- devc->share_irq = 0;
- spin_lock_init(&devc->lock);
-
- spin_lock_irqsave(&devc->lock,flags);
- ok = reset_uart401(devc);
- spin_unlock_irqrestore(&devc->lock,flags);
-
- if (!ok)
- goto cleanup_devc;
-
- if (hw_config->name)
- name = hw_config->name;
-
- if (devc->irq < 0) {
- devc->share_irq = 1;
- devc->irq *= -1;
- } else
- devc->share_irq = 0;
-
- if (!devc->share_irq)
- if (request_irq(devc->irq, uart401intr, 0, "MPU-401 UART", devc) < 0) {
- printk(KERN_WARNING "uart401: Failed to allocate IRQ%d\n", devc->irq);
- devc->share_irq = 1;
- }
- devc->my_dev = sound_alloc_mididev();
- enter_uart_mode(devc);
-
- if (devc->my_dev == -1) {
- printk(KERN_INFO "uart401: Too many midi devices detected\n");
- goto cleanup_irq;
- }
- conf_printf(name, hw_config);
- midi_devs[devc->my_dev] = kmemdup(&uart401_operations,
- sizeof(struct midi_operations),
- GFP_KERNEL);
- if (!midi_devs[devc->my_dev]) {
- printk(KERN_ERR "uart401: Failed to allocate memory\n");
- goto cleanup_unload_mididev;
- }
-
- if (owner)
- midi_devs[devc->my_dev]->owner = owner;
-
- midi_devs[devc->my_dev]->devc = devc;
- midi_devs[devc->my_dev]->converter = kmemdup(&std_midi_synth,
- sizeof(struct synth_operations),
- GFP_KERNEL);
-
- if (!midi_devs[devc->my_dev]->converter) {
- printk(KERN_WARNING "uart401: Failed to allocate memory\n");
- goto cleanup_midi_devs;
- }
- strcpy(midi_devs[devc->my_dev]->info.name, name);
- midi_devs[devc->my_dev]->converter->id = "UART401";
- midi_devs[devc->my_dev]->converter->midi_dev = devc->my_dev;
-
- if (owner)
- midi_devs[devc->my_dev]->converter->owner = owner;
-
- hw_config->slots[4] = devc->my_dev;
- sequencer_init();
- devc->opened = 0;
- return 1;
-cleanup_midi_devs:
- kfree(midi_devs[devc->my_dev]);
-cleanup_unload_mididev:
- sound_unload_mididev(devc->my_dev);
-cleanup_irq:
- if (!devc->share_irq)
- free_irq(devc->irq, devc);
-cleanup_devc:
- kfree(devc);
-cleanup_region:
- release_region(hw_config->io_base, 4);
- return 0;
-}
-
-void unload_uart401(struct address_info *hw_config)
-{
- struct uart401_devc *devc;
- int n=hw_config->slots[4];
-
- /* Not set up */
- if(n==-1 || midi_devs[n]==NULL)
- return;
-
- /* Not allocated (erm ??) */
-
- devc = midi_devs[hw_config->slots[4]]->devc;
- if (devc == NULL)
- return;
-
- reset_uart401(devc);
- release_region(hw_config->io_base, 4);
-
- if (!devc->share_irq)
- free_irq(devc->irq, devc);
- kfree(midi_devs[devc->my_dev]->converter);
- kfree(midi_devs[devc->my_dev]);
- kfree(devc);
-
- /* This kills midi_devs[x] */
- sound_unload_mididev(hw_config->slots[4]);
-}
-
-EXPORT_SYMBOL(probe_uart401);
-EXPORT_SYMBOL(unload_uart401);
-EXPORT_SYMBOL(uart401intr);
-
-static struct address_info cfg_mpu;
-
-static int io = -1;
-static int irq = -1;
-
-module_param_hw(io, int, ioport, 0444);
-module_param_hw(irq, int, irq, 0444);
-
-
-static int __init init_uart401(void)
-{
- cfg_mpu.irq = irq;
- cfg_mpu.io_base = io;
-
- /* Can be loaded either for module use or to provide functions
- to others */
- if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1) {
- printk(KERN_INFO "MPU-401 UART driver Copyright (C) Hannu Savolainen 1993-1997");
- if (!probe_uart401(&cfg_mpu, THIS_MODULE))
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit cleanup_uart401(void)
-{
- if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1)
- unload_uart401(&cfg_mpu);
-}
-
-module_init(init_uart401);
-module_exit(cleanup_uart401);
-
-#ifndef MODULE
-static int __init setup_uart401(char *str)
-{
- /* io, irq */
- int ints[3];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
-
- return 1;
-}
-
-__setup("uart401=", setup_uart401);
-#endif
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/uart6850.c b/sound/oss/uart6850.c
deleted file mode 100644
index a9d3f7568525..000000000000
--- a/sound/oss/uart6850.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * sound/oss/uart6850.c
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- * Extended by Alan Cox for Red Hat Software. Now a loadable MIDI driver.
- * 28/4/97 - (C) Copyright Alan Cox. Released under the GPL version 2.
- *
- * Alan Cox: Updated for new modular code. Removed snd_* irq handling. Now
- * uses native linux resources
- * Christoph Hellwig: Adapted to module_init/module_exit
- * Jeff Garzik: Made it work again, in theory
- * FIXME: If the request_irq() succeeds, the probe succeeds. Ug.
- *
- * Status: Testing required (no shit -jgarzik)
- *
- *
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-/* Mon Nov 22 22:38:35 MET 1993 marco@driq.home.usn.nl:
- * added 6850 support, used with COVOX SoundMaster II and custom cards.
- */
-
-#include "sound_config.h"
-
-static int uart6850_base = 0x330;
-
-static int *uart6850_osp;
-
-#define DATAPORT (uart6850_base)
-#define COMDPORT (uart6850_base+1)
-#define STATPORT (uart6850_base+1)
-
-static int uart6850_status(void)
-{
- return inb(STATPORT);
-}
-
-#define input_avail() (uart6850_status()&INPUT_AVAIL)
-#define output_ready() (uart6850_status()&OUTPUT_READY)
-
-static void uart6850_cmd(unsigned char cmd)
-{
- outb(cmd, COMDPORT);
-}
-
-static int uart6850_read(void)
-{
- return inb(DATAPORT);
-}
-
-static void uart6850_write(unsigned char byte)
-{
- outb(byte, DATAPORT);
-}
-
-#define OUTPUT_READY 0x02 /* Mask for data ready Bit */
-#define INPUT_AVAIL 0x01 /* Mask for Data Send Ready Bit */
-
-#define UART_RESET 0x95
-#define UART_MODE_ON 0x03
-
-static int uart6850_opened;
-static int uart6850_irq;
-static int uart6850_detected;
-static int my_dev;
-static DEFINE_SPINLOCK(lock);
-
-static void (*midi_input_intr) (int dev, unsigned char data);
-static void poll_uart6850(unsigned long dummy);
-
-
-static DEFINE_TIMER(uart6850_timer, poll_uart6850);
-
-static void uart6850_input_loop(void)
-{
- int count = 10;
-
- while (count)
- {
- /*
- * Not timed out
- */
- if (input_avail())
- {
- unsigned char c = uart6850_read();
- count = 100;
- if (uart6850_opened & OPEN_READ)
- midi_input_intr(my_dev, c);
- }
- else
- {
- while (!input_avail() && count)
- count--;
- }
- }
-}
-
-static irqreturn_t m6850intr(int irq, void *dev_id)
-{
- if (input_avail())
- uart6850_input_loop();
- return IRQ_HANDLED;
-}
-
-/*
- * It looks like there is no input interrupts in the UART mode. Let's try
- * polling.
- */
-
-static void poll_uart6850(unsigned long dummy)
-{
- unsigned long flags;
-
- if (!(uart6850_opened & OPEN_READ))
- return; /* Device has been closed */
-
- spin_lock_irqsave(&lock,flags);
- if (input_avail())
- uart6850_input_loop();
-
- uart6850_timer.expires = 1 + jiffies;
- add_timer(&uart6850_timer);
-
- /*
- * Come back later
- */
-
- spin_unlock_irqrestore(&lock,flags);
-}
-
-static int uart6850_open(int dev, int mode,
- void (*input) (int dev, unsigned char data),
- void (*output) (int dev)
-)
-{
- if (uart6850_opened)
- {
-/* printk("Midi6850: Midi busy\n");*/
- return -EBUSY;
- }
-
- uart6850_cmd(UART_RESET);
- uart6850_input_loop();
- midi_input_intr = input;
- uart6850_opened = mode;
- poll_uart6850(0); /*
- * Enable input polling
- */
-
- return 0;
-}
-
-static void uart6850_close(int dev)
-{
- uart6850_cmd(UART_MODE_ON);
- del_timer(&uart6850_timer);
- uart6850_opened = 0;
-}
-
-static int uart6850_out(int dev, unsigned char midi_byte)
-{
- int timeout;
- unsigned long flags;
-
- /*
- * Test for input since pending input seems to block the output.
- */
-
- spin_lock_irqsave(&lock,flags);
-
- if (input_avail())
- uart6850_input_loop();
-
- spin_unlock_irqrestore(&lock,flags);
-
- /*
- * Sometimes it takes about 13000 loops before the output becomes ready
- * (After reset). Normally it takes just about 10 loops.
- */
-
- for (timeout = 30000; timeout > 0 && !output_ready(); timeout--); /*
- * Wait
- */
- if (!output_ready())
- {
- printk(KERN_WARNING "Midi6850: Timeout\n");
- return 0;
- }
- uart6850_write(midi_byte);
- return 1;
-}
-
-static inline int uart6850_command(int dev, unsigned char *midi_byte)
-{
- return 1;
-}
-
-static inline int uart6850_start_read(int dev)
-{
- return 0;
-}
-
-static inline int uart6850_end_read(int dev)
-{
- return 0;
-}
-
-static inline void uart6850_kick(int dev)
-{
-}
-
-static inline int uart6850_buffer_status(int dev)
-{
- return 0; /*
- * No data in buffers
- */
-}
-
-#define MIDI_SYNTH_NAME "6850 UART Midi"
-#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
-#include "midi_synth.h"
-
-static struct midi_operations uart6850_operations =
-{
- .owner = THIS_MODULE,
- .info = {"6850 UART", 0, 0, SNDCARD_UART6850},
- .converter = &std_midi_synth,
- .in_info = {0},
- .open = uart6850_open,
- .close = uart6850_close,
- .outputc = uart6850_out,
- .start_read = uart6850_start_read,
- .end_read = uart6850_end_read,
- .kick = uart6850_kick,
- .command = uart6850_command,
- .buffer_status = uart6850_buffer_status
-};
-
-
-static void __init attach_uart6850(struct address_info *hw_config)
-{
- int ok, timeout;
- unsigned long flags;
-
- if (!uart6850_detected)
- return;
-
- if ((my_dev = sound_alloc_mididev()) == -1)
- {
- printk(KERN_INFO "uart6850: Too many midi devices detected\n");
- return;
- }
- uart6850_base = hw_config->io_base;
- uart6850_osp = hw_config->osp;
- uart6850_irq = hw_config->irq;
-
- spin_lock_irqsave(&lock,flags);
-
- for (timeout = 30000; timeout > 0 && !output_ready(); timeout--); /*
- * Wait
- */
- uart6850_cmd(UART_MODE_ON);
- ok = 1;
- spin_unlock_irqrestore(&lock,flags);
-
- conf_printf("6850 Midi Interface", hw_config);
-
- std_midi_synth.midi_dev = my_dev;
- hw_config->slots[4] = my_dev;
- midi_devs[my_dev] = &uart6850_operations;
- sequencer_init();
-}
-
-static inline int reset_uart6850(void)
-{
- uart6850_read();
- return 1; /*
- * OK
- */
-}
-
-static int __init probe_uart6850(struct address_info *hw_config)
-{
- int ok;
-
- uart6850_osp = hw_config->osp;
- uart6850_base = hw_config->io_base;
- uart6850_irq = hw_config->irq;
-
- if (request_irq(uart6850_irq, m6850intr, 0, "MIDI6850", NULL) < 0)
- return 0;
-
- ok = reset_uart6850();
- uart6850_detected = ok;
- return ok;
-}
-
-static void __exit unload_uart6850(struct address_info *hw_config)
-{
- free_irq(hw_config->irq, NULL);
- sound_unload_mididev(hw_config->slots[4]);
-}
-
-static struct address_info cfg_mpu;
-
-static int __initdata io = -1;
-static int __initdata irq = -1;
-
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-
-static int __init init_uart6850(void)
-{
- cfg_mpu.io_base = io;
- cfg_mpu.irq = irq;
-
- if (cfg_mpu.io_base == -1 || cfg_mpu.irq == -1) {
- printk(KERN_INFO "uart6850: irq and io must be set.\n");
- return -EINVAL;
- }
-
- if (probe_uart6850(&cfg_mpu))
- return -ENODEV;
- attach_uart6850(&cfg_mpu);
-
- return 0;
-}
-
-static void __exit cleanup_uart6850(void)
-{
- unload_uart6850(&cfg_mpu);
-}
-
-module_init(init_uart6850);
-module_exit(cleanup_uart6850);
-
-#ifndef MODULE
-static int __init setup_uart6850(char *str)
-{
- /* io, irq */
- int ints[3];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
-
- return 1;
-}
-__setup("uart6850=", setup_uart6850);
-#endif
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/ulaw.h b/sound/oss/ulaw.h
deleted file mode 100644
index ee898a0f78ce..000000000000
--- a/sound/oss/ulaw.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-static unsigned char ulaw_dsp[] = {
- 3, 7, 11, 15, 19, 23, 27, 31,
- 35, 39, 43, 47, 51, 55, 59, 63,
- 66, 68, 70, 72, 74, 76, 78, 80,
- 82, 84, 86, 88, 90, 92, 94, 96,
- 98, 99, 100, 101, 102, 103, 104, 105,
- 106, 107, 108, 109, 110, 111, 112, 113,
- 113, 114, 114, 115, 115, 116, 116, 117,
- 117, 118, 118, 119, 119, 120, 120, 121,
- 121, 121, 122, 122, 122, 122, 123, 123,
- 123, 123, 124, 124, 124, 124, 125, 125,
- 125, 125, 125, 125, 126, 126, 126, 126,
- 126, 126, 126, 126, 127, 127, 127, 127,
- 127, 127, 127, 127, 127, 127, 127, 127,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 253, 249, 245, 241, 237, 233, 229, 225,
- 221, 217, 213, 209, 205, 201, 197, 193,
- 190, 188, 186, 184, 182, 180, 178, 176,
- 174, 172, 170, 168, 166, 164, 162, 160,
- 158, 157, 156, 155, 154, 153, 152, 151,
- 150, 149, 148, 147, 146, 145, 144, 143,
- 143, 142, 142, 141, 141, 140, 140, 139,
- 139, 138, 138, 137, 137, 136, 136, 135,
- 135, 135, 134, 134, 134, 134, 133, 133,
- 133, 133, 132, 132, 132, 132, 131, 131,
- 131, 131, 131, 131, 130, 130, 130, 130,
- 130, 130, 130, 130, 129, 129, 129, 129,
- 129, 129, 129, 129, 129, 129, 129, 129,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
- 128, 128, 128, 128, 128, 128, 128, 128,
-};
-
-static unsigned char dsp_ulaw[] = {
- 0, 0, 0, 0, 0, 1, 1, 1,
- 1, 2, 2, 2, 2, 3, 3, 3,
- 3, 4, 4, 4, 4, 5, 5, 5,
- 5, 6, 6, 6, 6, 7, 7, 7,
- 7, 8, 8, 8, 8, 9, 9, 9,
- 9, 10, 10, 10, 10, 11, 11, 11,
- 11, 12, 12, 12, 12, 13, 13, 13,
- 13, 14, 14, 14, 14, 15, 15, 15,
- 15, 16, 16, 17, 17, 18, 18, 19,
- 19, 20, 20, 21, 21, 22, 22, 23,
- 23, 24, 24, 25, 25, 26, 26, 27,
- 27, 28, 28, 29, 29, 30, 30, 31,
- 31, 32, 33, 34, 35, 36, 37, 38,
- 39, 40, 41, 42, 43, 44, 45, 46,
- 47, 49, 51, 53, 55, 57, 59, 61,
- 63, 66, 70, 74, 78, 84, 92, 104,
- 254, 231, 219, 211, 205, 201, 197, 193,
- 190, 188, 186, 184, 182, 180, 178, 176,
- 175, 174, 173, 172, 171, 170, 169, 168,
- 167, 166, 165, 164, 163, 162, 161, 160,
- 159, 159, 158, 158, 157, 157, 156, 156,
- 155, 155, 154, 154, 153, 153, 152, 152,
- 151, 151, 150, 150, 149, 149, 148, 148,
- 147, 147, 146, 146, 145, 145, 144, 144,
- 143, 143, 143, 143, 142, 142, 142, 142,
- 141, 141, 141, 141, 140, 140, 140, 140,
- 139, 139, 139, 139, 138, 138, 138, 138,
- 137, 137, 137, 137, 136, 136, 136, 136,
- 135, 135, 135, 135, 134, 134, 134, 134,
- 133, 133, 133, 133, 132, 132, 132, 132,
- 131, 131, 131, 131, 130, 130, 130, 130,
- 129, 129, 129, 129, 128, 128, 128, 128,
-};
diff --git a/sound/oss/v_midi.c b/sound/oss/v_midi.c
deleted file mode 100644
index fc0ba276cc8f..000000000000
--- a/sound/oss/v_midi.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * sound/oss/v_midi.c
- *
- * The low level driver for the Sound Blaster DS chips.
- *
- *
- * Copyright (C) by Hannu Savolainen 1993-1996
- *
- * USS/Lite for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- * ??
- *
- * Changes
- * Alan Cox Modularisation, changed memory allocations
- * Christoph Hellwig Adapted to module_init/module_exit
- *
- * Status
- * Untested
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include "sound_config.h"
-
-#include "v_midi.h"
-
-static vmidi_devc *v_devc[2] = { NULL, NULL};
-static int midi1,midi2;
-static void *midi_mem = NULL;
-
-/*
- * The DSP channel can be used either for input or output. Variable
- * 'sb_irq_mode' will be set when the program calls read or write first time
- * after open. Current version doesn't support mode changes without closing
- * and reopening the device. Support for this feature may be implemented in a
- * future version of this driver.
- */
-
-
-static int v_midi_open (int dev, int mode,
- void (*input) (int dev, unsigned char data),
- void (*output) (int dev)
-)
-{
- vmidi_devc *devc = midi_devs[dev]->devc;
- unsigned long flags;
-
- if (devc == NULL)
- return -ENXIO;
-
- spin_lock_irqsave(&devc->lock,flags);
- if (devc->opened)
- {
- spin_unlock_irqrestore(&devc->lock,flags);
- return -EBUSY;
- }
- devc->opened = 1;
- spin_unlock_irqrestore(&devc->lock,flags);
-
- devc->intr_active = 1;
-
- if (mode & OPEN_READ)
- {
- devc->input_opened = 1;
- devc->midi_input_intr = input;
- }
-
- return 0;
-}
-
-static void v_midi_close (int dev)
-{
- vmidi_devc *devc = midi_devs[dev]->devc;
- unsigned long flags;
-
- if (devc == NULL)
- return;
-
- spin_lock_irqsave(&devc->lock,flags);
- devc->intr_active = 0;
- devc->input_opened = 0;
- devc->opened = 0;
- spin_unlock_irqrestore(&devc->lock,flags);
-}
-
-static int v_midi_out (int dev, unsigned char midi_byte)
-{
- vmidi_devc *devc = midi_devs[dev]->devc;
- vmidi_devc *pdevc;
-
- if (devc == NULL)
- return -ENXIO;
-
- pdevc = midi_devs[devc->pair_mididev]->devc;
- if (pdevc->input_opened > 0){
- if (MIDIbuf_avail(pdevc->my_mididev) > 500)
- return 0;
- pdevc->midi_input_intr (pdevc->my_mididev, midi_byte);
- }
- return 1;
-}
-
-static inline int v_midi_start_read (int dev)
-{
- return 0;
-}
-
-static int v_midi_end_read (int dev)
-{
- vmidi_devc *devc = midi_devs[dev]->devc;
- if (devc == NULL)
- return -ENXIO;
-
- devc->intr_active = 0;
- return 0;
-}
-
-/* why -EPERM and not -EINVAL?? */
-
-static inline int v_midi_ioctl (int dev, unsigned cmd, void __user *arg)
-{
- return -EPERM;
-}
-
-
-#define MIDI_SYNTH_NAME "Loopback MIDI"
-#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
-
-#include "midi_synth.h"
-
-static struct midi_operations v_midi_operations =
-{
- .owner = THIS_MODULE,
- .info = {"Loopback MIDI Port 1", 0, 0, SNDCARD_VMIDI},
- .converter = &std_midi_synth,
- .in_info = {0},
- .open = v_midi_open,
- .close = v_midi_close,
- .ioctl = v_midi_ioctl,
- .outputc = v_midi_out,
- .start_read = v_midi_start_read,
- .end_read = v_midi_end_read,
-};
-
-static struct midi_operations v_midi_operations2 =
-{
- .owner = THIS_MODULE,
- .info = {"Loopback MIDI Port 2", 0, 0, SNDCARD_VMIDI},
- .converter = &std_midi_synth,
- .in_info = {0},
- .open = v_midi_open,
- .close = v_midi_close,
- .ioctl = v_midi_ioctl,
- .outputc = v_midi_out,
- .start_read = v_midi_start_read,
- .end_read = v_midi_end_read,
-};
-
-/*
- * We kmalloc just one of these - it makes life simpler and the code
- * cleaner and the memory handling far more efficient
- */
-
-struct vmidi_memory
-{
- /* Must be first */
- struct midi_operations m_ops[2];
- struct synth_operations s_ops[2];
- struct vmidi_devc v_ops[2];
-};
-
-static void __init attach_v_midi (struct address_info *hw_config)
-{
- struct vmidi_memory *m;
- /* printk("Attaching v_midi device.....\n"); */
-
- midi1 = sound_alloc_mididev();
- if (midi1 == -1)
- {
- printk(KERN_ERR "v_midi: Too many midi devices detected\n");
- return;
- }
-
- m = kmalloc(sizeof(struct vmidi_memory), GFP_KERNEL);
- if (m == NULL)
- {
- printk(KERN_WARNING "Loopback MIDI: Failed to allocate memory\n");
- sound_unload_mididev(midi1);
- return;
- }
-
- midi_mem = m;
-
- midi_devs[midi1] = &m->m_ops[0];
-
-
- midi2 = sound_alloc_mididev();
- if (midi2 == -1)
- {
- printk (KERN_ERR "v_midi: Too many midi devices detected\n");
- kfree(m);
- sound_unload_mididev(midi1);
- return;
- }
-
- midi_devs[midi2] = &m->m_ops[1];
-
- /* printk("VMIDI1: %d VMIDI2: %d\n",midi1,midi2); */
-
- /* for MIDI-1 */
- v_devc[0] = &m->v_ops[0];
- memcpy ((char *) midi_devs[midi1], (char *) &v_midi_operations,
- sizeof (struct midi_operations));
-
- v_devc[0]->my_mididev = midi1;
- v_devc[0]->pair_mididev = midi2;
- v_devc[0]->opened = v_devc[0]->input_opened = 0;
- v_devc[0]->intr_active = 0;
- v_devc[0]->midi_input_intr = NULL;
- spin_lock_init(&v_devc[0]->lock);
-
- midi_devs[midi1]->devc = v_devc[0];
-
- midi_devs[midi1]->converter = &m->s_ops[0];
- std_midi_synth.midi_dev = midi1;
- memcpy ((char *) midi_devs[midi1]->converter, (char *) &std_midi_synth,
- sizeof (struct synth_operations));
- midi_devs[midi1]->converter->id = "V_MIDI 1";
-
- /* for MIDI-2 */
- v_devc[1] = &m->v_ops[1];
-
- memcpy ((char *) midi_devs[midi2], (char *) &v_midi_operations2,
- sizeof (struct midi_operations));
-
- v_devc[1]->my_mididev = midi2;
- v_devc[1]->pair_mididev = midi1;
- v_devc[1]->opened = v_devc[1]->input_opened = 0;
- v_devc[1]->intr_active = 0;
- v_devc[1]->midi_input_intr = NULL;
- spin_lock_init(&v_devc[1]->lock);
-
- midi_devs[midi2]->devc = v_devc[1];
- midi_devs[midi2]->converter = &m->s_ops[1];
-
- std_midi_synth.midi_dev = midi2;
- memcpy ((char *) midi_devs[midi2]->converter, (char *) &std_midi_synth,
- sizeof (struct synth_operations));
- midi_devs[midi2]->converter->id = "V_MIDI 2";
-
- sequencer_init();
- /* printk("Attached v_midi device\n"); */
-}
-
-static inline int __init probe_v_midi(struct address_info *hw_config)
-{
- return(1); /* always OK */
-}
-
-
-static void __exit unload_v_midi(struct address_info *hw_config)
-{
- sound_unload_mididev(midi1);
- sound_unload_mididev(midi2);
- kfree(midi_mem);
-}
-
-static struct address_info cfg; /* dummy */
-
-static int __init init_vmidi(void)
-{
- printk("MIDI Loopback device driver\n");
- if (!probe_v_midi(&cfg))
- return -ENODEV;
- attach_v_midi(&cfg);
-
- return 0;
-}
-
-static void __exit cleanup_vmidi(void)
-{
- unload_v_midi(&cfg);
-}
-
-module_init(init_vmidi);
-module_exit(cleanup_vmidi);
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/v_midi.h b/sound/oss/v_midi.h
deleted file mode 100644
index f4fc2bed07f8..000000000000
--- a/sound/oss/v_midi.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-typedef struct vmidi_devc {
- int dev;
-
- /* State variables */
- int opened;
- spinlock_t lock;
-
- /* MIDI fields */
- int my_mididev;
- int pair_mididev;
- int input_opened;
- int intr_active;
- void (*midi_input_intr) (int dev, unsigned char data);
- } vmidi_devc;
diff --git a/sound/oss/vidc.c b/sound/oss/vidc.c
deleted file mode 100644
index 92ca5bee1860..000000000000
--- a/sound/oss/vidc.c
+++ /dev/null
@@ -1,557 +0,0 @@
-/*
- * linux/drivers/sound/vidc.c
- *
- * Copyright (C) 1997-2000 by Russell King <rmk@arm.linux.org.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * VIDC20 audio driver.
- *
- * The VIDC20 sound hardware consists of the VIDC20 itself, a DAC and a DMA
- * engine. The DMA transfers fixed-format (16-bit little-endian linear)
- * samples to the VIDC20, which then transfers this data serially to the
- * DACs. The samplerate is controlled by the VIDC.
- *
- * We currently support a mixer device, but it is currently non-functional.
- */
-
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-
-#include <mach/hardware.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/hardware/iomd.h>
-#include <asm/irq.h>
-
-#include "sound_config.h"
-#include "vidc.h"
-
-#ifndef _SIOC_TYPE
-#define _SIOC_TYPE(x) _IOC_TYPE(x)
-#endif
-#ifndef _SIOC_NR
-#define _SIOC_NR(x) _IOC_NR(x)
-#endif
-
-#define VIDC_SOUND_CLOCK (250000)
-#define VIDC_SOUND_CLOCK_EXT (176400)
-
-/*
- * When using SERIAL SOUND mode (external DAC), the number of physical
- * channels is fixed at 2.
- */
-static int vidc_busy;
-static int vidc_adev;
-static int vidc_audio_rate;
-static char vidc_audio_format;
-static char vidc_audio_channels;
-
-static unsigned char vidc_level_l[SOUND_MIXER_NRDEVICES] = {
- 85, /* master */
- 50, /* bass */
- 50, /* treble */
- 0, /* synth */
- 75, /* pcm */
- 0, /* speaker */
- 100, /* ext line */
- 0, /* mic */
- 100, /* CD */
- 0,
-};
-
-static unsigned char vidc_level_r[SOUND_MIXER_NRDEVICES] = {
- 85, /* master */
- 50, /* bass */
- 50, /* treble */
- 0, /* synth */
- 75, /* pcm */
- 0, /* speaker */
- 100, /* ext line */
- 0, /* mic */
- 100, /* CD */
- 0,
-};
-
-static unsigned int vidc_audio_volume_l; /* left PCM vol, 0 - 65536 */
-static unsigned int vidc_audio_volume_r; /* right PCM vol, 0 - 65536 */
-
-extern void vidc_update_filler(int bits, int channels);
-extern int softoss_dev;
-
-static void
-vidc_mixer_set(int mdev, unsigned int level)
-{
- unsigned int lev_l = level & 0x007f;
- unsigned int lev_r = (level & 0x7f00) >> 8;
- unsigned int mlev_l, mlev_r;
-
- if (lev_l > 100)
- lev_l = 100;
- if (lev_r > 100)
- lev_r = 100;
-
-#define SCALE(lev,master) ((lev) * (master) * 65536 / 10000)
-
- mlev_l = vidc_level_l[SOUND_MIXER_VOLUME];
- mlev_r = vidc_level_r[SOUND_MIXER_VOLUME];
-
- switch (mdev) {
- case SOUND_MIXER_VOLUME:
- case SOUND_MIXER_PCM:
- vidc_level_l[mdev] = lev_l;
- vidc_level_r[mdev] = lev_r;
-
- vidc_audio_volume_l = SCALE(lev_l, mlev_l);
- vidc_audio_volume_r = SCALE(lev_r, mlev_r);
-/*printk("VIDC: PCM vol %05X %05X\n", vidc_audio_volume_l, vidc_audio_volume_r);*/
- break;
- }
-#undef SCALE
-}
-
-static int vidc_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
-{
- unsigned int val;
- unsigned int mdev;
-
- if (_SIOC_TYPE(cmd) != 'M')
- return -EINVAL;
-
- mdev = _SIOC_NR(cmd);
-
- if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
- if (get_user(val, (unsigned int __user *)arg))
- return -EFAULT;
-
- if (mdev < SOUND_MIXER_NRDEVICES)
- vidc_mixer_set(mdev, val);
- else
- return -EINVAL;
- }
-
- /*
- * Return parameters
- */
- switch (mdev) {
- case SOUND_MIXER_RECSRC:
- val = 0;
- break;
-
- case SOUND_MIXER_DEVMASK:
- val = SOUND_MASK_VOLUME | SOUND_MASK_PCM | SOUND_MASK_SYNTH;
- break;
-
- case SOUND_MIXER_STEREODEVS:
- val = SOUND_MASK_VOLUME | SOUND_MASK_PCM | SOUND_MASK_SYNTH;
- break;
-
- case SOUND_MIXER_RECMASK:
- val = 0;
- break;
-
- case SOUND_MIXER_CAPS:
- val = 0;
- break;
-
- default:
- if (mdev < SOUND_MIXER_NRDEVICES)
- val = vidc_level_l[mdev] | vidc_level_r[mdev] << 8;
- else
- return -EINVAL;
- }
-
- return put_user(val, (unsigned int __user *)arg) ? -EFAULT : 0;
-}
-
-static unsigned int vidc_audio_set_format(int dev, unsigned int fmt)
-{
- switch (fmt) {
- default:
- fmt = AFMT_S16_LE;
- case AFMT_U8:
- case AFMT_S8:
- case AFMT_S16_LE:
- vidc_audio_format = fmt;
- vidc_update_filler(vidc_audio_format, vidc_audio_channels);
- case AFMT_QUERY:
- break;
- }
- return vidc_audio_format;
-}
-
-#define my_abs(i) ((i)<0 ? -(i) : (i))
-
-static int vidc_audio_set_speed(int dev, int rate)
-{
- if (rate) {
- unsigned int hwctrl, hwrate, hwrate_ext, rate_int, rate_ext;
- unsigned int diff_int, diff_ext;
- unsigned int newsize, new2size;
-
- hwctrl = 0x00000003;
-
- /* Using internal clock */
- hwrate = (((VIDC_SOUND_CLOCK * 2) / rate) + 1) >> 1;
- if (hwrate < 3)
- hwrate = 3;
- if (hwrate > 255)
- hwrate = 255;
-
- /* Using exernal clock */
- hwrate_ext = (((VIDC_SOUND_CLOCK_EXT * 2) / rate) + 1) >> 1;
- if (hwrate_ext < 3)
- hwrate_ext = 3;
- if (hwrate_ext > 255)
- hwrate_ext = 255;
-
- rate_int = VIDC_SOUND_CLOCK / hwrate;
- rate_ext = VIDC_SOUND_CLOCK_EXT / hwrate_ext;
-
- /* Chose between external and internal clock */
- diff_int = my_abs(rate_ext-rate);
- diff_ext = my_abs(rate_int-rate);
- if (diff_ext < diff_int) {
- /*printk("VIDC: external %d %d %d\n", rate, rate_ext, hwrate_ext);*/
- hwrate=hwrate_ext;
- hwctrl=0x00000002;
- /* Allow roughly 0.4% tolerance */
- if (diff_ext > (rate/256))
- rate=rate_ext;
- } else {
- /*printk("VIDC: internal %d %d %d\n", rate, rate_int, hwrate);*/
- hwctrl=0x00000003;
- /* Allow roughly 0.4% tolerance */
- if (diff_int > (rate/256))
- rate=rate_int;
- }
-
- vidc_writel(0xb0000000 | (hwrate - 2));
- vidc_writel(0xb1000000 | hwctrl);
-
- newsize = (10000 / hwrate) & ~3;
- if (newsize < 208)
- newsize = 208;
- if (newsize > 4096)
- newsize = 4096;
- for (new2size = 128; new2size < newsize; new2size <<= 1);
- if (new2size - newsize > newsize - (new2size >> 1))
- new2size >>= 1;
- if (new2size > 4096) {
- printk(KERN_ERR "VIDC: error: dma buffer (%d) %d > 4K\n",
- newsize, new2size);
- new2size = 4096;
- }
- /*printk("VIDC: dma size %d\n", new2size);*/
- dma_bufsize = new2size;
- vidc_audio_rate = rate;
- }
- return vidc_audio_rate;
-}
-
-static short vidc_audio_set_channels(int dev, short channels)
-{
- switch (channels) {
- default:
- channels = 2;
- case 1:
- case 2:
- vidc_audio_channels = channels;
- vidc_update_filler(vidc_audio_format, vidc_audio_channels);
- case 0:
- break;
- }
- return vidc_audio_channels;
-}
-
-/*
- * Open the device
- */
-static int vidc_audio_open(int dev, int mode)
-{
- /* This audio device does not have recording capability */
- if (mode == OPEN_READ)
- return -EPERM;
-
- if (vidc_busy)
- return -EBUSY;
-
- vidc_busy = 1;
- return 0;
-}
-
-/*
- * Close the device
- */
-static void vidc_audio_close(int dev)
-{
- vidc_busy = 0;
-}
-
-/*
- * Output a block via DMA to sound device.
- *
- * We just set the DMA start and count; the DMA interrupt routine
- * will take care of formatting the samples (via the appropriate
- * vidc_filler routine), and flag via vidc_audio_dma_interrupt when
- * more data is required.
- */
-static void
-vidc_audio_output_block(int dev, unsigned long buf, int total_count, int one)
-{
- struct dma_buffparms *dmap = audio_devs[dev]->dmap_out;
- unsigned long flags;
-
- local_irq_save(flags);
- dma_start = buf - (unsigned long)dmap->raw_buf_phys + (unsigned long)dmap->raw_buf;
- dma_count = total_count;
- local_irq_restore(flags);
-}
-
-static void
-vidc_audio_start_input(int dev, unsigned long buf, int count, int intrflag)
-{
-}
-
-static int vidc_audio_prepare_for_input(int dev, int bsize, int bcount)
-{
- return -EINVAL;
-}
-
-static irqreturn_t vidc_audio_dma_interrupt(void)
-{
- DMAbuf_outputintr(vidc_adev, 1);
- return IRQ_HANDLED;
-}
-
-/*
- * Prepare for outputting samples.
- *
- * Each buffer that will be passed will be `bsize' bytes long,
- * with a total of `bcount' buffers.
- */
-static int vidc_audio_prepare_for_output(int dev, int bsize, int bcount)
-{
- struct audio_operations *adev = audio_devs[dev];
-
- dma_interrupt = NULL;
- adev->dmap_out->flags |= DMA_NODMA;
-
- return 0;
-}
-
-/*
- * Stop our current operation.
- */
-static void vidc_audio_reset(int dev)
-{
- dma_interrupt = NULL;
-}
-
-static int vidc_audio_local_qlen(int dev)
-{
- return /*dma_count !=*/ 0;
-}
-
-static void vidc_audio_trigger(int dev, int enable_bits)
-{
- struct audio_operations *adev = audio_devs[dev];
-
- if (enable_bits & PCM_ENABLE_OUTPUT) {
- if (!(adev->dmap_out->flags & DMA_ACTIVE)) {
- unsigned long flags;
-
- local_irq_save(flags);
-
- /* prevent recusion */
- adev->dmap_out->flags |= DMA_ACTIVE;
-
- dma_interrupt = vidc_audio_dma_interrupt;
- vidc_sound_dma_irq(0, NULL);
- iomd_writeb(DMA_CR_E | 0x10, IOMD_SD0CR);
-
- local_irq_restore(flags);
- }
- }
-}
-
-static struct audio_driver vidc_audio_driver =
-{
- .owner = THIS_MODULE,
- .open = vidc_audio_open,
- .close = vidc_audio_close,
- .output_block = vidc_audio_output_block,
- .start_input = vidc_audio_start_input,
- .prepare_for_input = vidc_audio_prepare_for_input,
- .prepare_for_output = vidc_audio_prepare_for_output,
- .halt_io = vidc_audio_reset,
- .local_qlen = vidc_audio_local_qlen,
- .trigger = vidc_audio_trigger,
- .set_speed = vidc_audio_set_speed,
- .set_bits = vidc_audio_set_format,
- .set_channels = vidc_audio_set_channels
-};
-
-static struct mixer_operations vidc_mixer_operations = {
- .owner = THIS_MODULE,
- .id = "VIDC",
- .name = "VIDCsound",
- .ioctl = vidc_mixer_ioctl
-};
-
-void vidc_update_filler(int format, int channels)
-{
-#define TYPE(fmt,ch) (((fmt)<<2) | ((ch)&3))
-
- switch (TYPE(format, channels)) {
- default:
- case TYPE(AFMT_U8, 1):
- vidc_filler = vidc_fill_1x8_u;
- break;
-
- case TYPE(AFMT_U8, 2):
- vidc_filler = vidc_fill_2x8_u;
- break;
-
- case TYPE(AFMT_S8, 1):
- vidc_filler = vidc_fill_1x8_s;
- break;
-
- case TYPE(AFMT_S8, 2):
- vidc_filler = vidc_fill_2x8_s;
- break;
-
- case TYPE(AFMT_S16_LE, 1):
- vidc_filler = vidc_fill_1x16_s;
- break;
-
- case TYPE(AFMT_S16_LE, 2):
- vidc_filler = vidc_fill_2x16_s;
- break;
- }
-}
-
-static void __init attach_vidc(struct address_info *hw_config)
-{
- char name[32];
- int i, adev;
-
- sprintf(name, "VIDC %d-bit sound", hw_config->card_subtype);
- conf_printf(name, hw_config);
- memset(dma_buf, 0, sizeof(dma_buf));
-
- adev = sound_install_audiodrv(AUDIO_DRIVER_VERSION, name,
- &vidc_audio_driver, sizeof(vidc_audio_driver),
- DMA_AUTOMODE, AFMT_U8 | AFMT_S8 | AFMT_S16_LE,
- NULL, hw_config->dma, hw_config->dma2);
-
- if (adev < 0)
- goto audio_failed;
-
- /*
- * 1024 bytes => 64 buffers
- */
- audio_devs[adev]->min_fragment = 10;
- audio_devs[adev]->mixer_dev = num_mixers;
-
- audio_devs[adev]->mixer_dev =
- sound_install_mixer(MIXER_DRIVER_VERSION,
- name, &vidc_mixer_operations,
- sizeof(vidc_mixer_operations), NULL);
-
- if (audio_devs[adev]->mixer_dev < 0)
- goto mixer_failed;
-
- for (i = 0; i < 2; i++) {
- dma_buf[i] = get_zeroed_page(GFP_KERNEL);
- if (!dma_buf[i]) {
- printk(KERN_ERR "%s: can't allocate required buffers\n",
- name);
- goto mem_failed;
- }
- dma_pbuf[i] = virt_to_phys((void *)dma_buf[i]);
- }
-
- if (sound_alloc_dma(hw_config->dma, hw_config->name)) {
- printk(KERN_ERR "%s: DMA %d is in use\n", name, hw_config->dma);
- goto dma_failed;
- }
-
- if (request_irq(hw_config->irq, vidc_sound_dma_irq, 0,
- hw_config->name, &dma_start)) {
- printk(KERN_ERR "%s: IRQ %d is in use\n", name, hw_config->irq);
- goto irq_failed;
- }
- vidc_adev = adev;
- vidc_mixer_set(SOUND_MIXER_VOLUME, (85 | 85 << 8));
-
- return;
-
-irq_failed:
- sound_free_dma(hw_config->dma);
-dma_failed:
-mem_failed:
- for (i = 0; i < 2; i++)
- free_page(dma_buf[i]);
- sound_unload_mixerdev(audio_devs[adev]->mixer_dev);
-mixer_failed:
- sound_unload_audiodev(adev);
-audio_failed:
- return;
-}
-
-static int __init probe_vidc(struct address_info *hw_config)
-{
- hw_config->irq = IRQ_DMAS0;
- hw_config->dma = DMA_VIRTUAL_SOUND;
- hw_config->dma2 = -1;
- hw_config->card_subtype = 16;
- hw_config->name = "VIDC20";
- return 1;
-}
-
-static void __exit unload_vidc(struct address_info *hw_config)
-{
- int i, adev = vidc_adev;
-
- vidc_adev = -1;
-
- free_irq(hw_config->irq, &dma_start);
- sound_free_dma(hw_config->dma);
-
- if (adev >= 0) {
- sound_unload_mixerdev(audio_devs[adev]->mixer_dev);
- sound_unload_audiodev(adev);
- for (i = 0; i < 2; i++)
- free_page(dma_buf[i]);
- }
-}
-
-static struct address_info cfg;
-
-static int __init init_vidc(void)
-{
- if (probe_vidc(&cfg) == 0)
- return -ENODEV;
-
- attach_vidc(&cfg);
-
- return 0;
-}
-
-static void __exit cleanup_vidc(void)
-{
- unload_vidc(&cfg);
-}
-
-module_init(init_vidc);
-module_exit(cleanup_vidc);
-
-MODULE_AUTHOR("Russell King");
-MODULE_DESCRIPTION("VIDC20 audio driver");
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/vidc.h b/sound/oss/vidc.h
deleted file mode 100644
index 0d1424751ecd..000000000000
--- a/sound/oss/vidc.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * linux/drivers/sound/vidc.h
- *
- * Copyright (C) 1997 Russell King <rmk@arm.linux.org.uk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * VIDC sound function prototypes
- */
-
-/* vidc_fill.S */
-
-/*
- * Filler routines for different channels and sample sizes
- */
-
-extern unsigned long vidc_fill_1x8_u(unsigned long ibuf, unsigned long iend,
- unsigned long obuf, int mask);
-extern unsigned long vidc_fill_2x8_u(unsigned long ibuf, unsigned long iend,
- unsigned long obuf, int mask);
-extern unsigned long vidc_fill_1x8_s(unsigned long ibuf, unsigned long iend,
- unsigned long obuf, int mask);
-extern unsigned long vidc_fill_2x8_s(unsigned long ibuf, unsigned long iend,
- unsigned long obuf, int mask);
-extern unsigned long vidc_fill_1x16_s(unsigned long ibuf, unsigned long iend,
- unsigned long obuf, int mask);
-extern unsigned long vidc_fill_2x16_s(unsigned long ibuf, unsigned long iend,
- unsigned long obuf, int mask);
-
-/*
- * DMA Interrupt handler
- */
-
-extern irqreturn_t vidc_sound_dma_irq(int irqnr, void *ref);
-
-/*
- * Filler routine pointer
- */
-
-extern unsigned long (*vidc_filler) (unsigned long ibuf, unsigned long iend,
- unsigned long obuf, int mask);
-
-/*
- * Virtual DMA buffer exhausted
- */
-
-extern irqreturn_t (*dma_interrupt) (void);
-
-/*
- * Virtual DMA buffer addresses
- */
-
-extern unsigned long dma_start, dma_count, dma_bufsize;
-extern unsigned long dma_buf[2], dma_pbuf[2];
-
-/* vidc_synth.c */
-
-extern void vidc_synth_init(struct address_info *hw_config);
-extern void vidc_synth_exit(struct address_info *hw_config);
-extern int vidc_synth_get_volume(void);
-extern int vidc_synth_set_volume(int vol);
diff --git a/sound/oss/vidc_fill.S b/sound/oss/vidc_fill.S
deleted file mode 100644
index bed34921d176..000000000000
--- a/sound/oss/vidc_fill.S
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * linux/drivers/sound/vidc_fill.S
- *
- * Copyright (C) 1997 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Filler routines for DMA buffers
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <mach/hardware.h>
-#include <asm/hardware/iomd.h>
-
- .text
-
-ENTRY(vidc_fill_1x8_u)
- mov ip, #0xff00
-1: cmp r0, r1
- bge vidc_clear
- ldrb r4, [r0], #1
- eor r4, r4, #0x80
- and r4, ip, r4, lsl #8
- orr r4, r4, r4, lsl #16
- str r4, [r2], #4
- cmp r2, r3
- blt 1b
- mov pc, lr
-
-ENTRY(vidc_fill_2x8_u)
- mov ip, #0xff00
-1: cmp r0, r1
- bge vidc_clear
- ldr r4, [r0], #2
- and r5, r4, ip
- and r4, ip, r4, lsl #8
- orr r4, r4, r5, lsl #16
- orr r4, r4, r4, lsr #8
- str r4, [r2], #4
- cmp r2, r3
- blt 1b
- mov pc, lr
-
-ENTRY(vidc_fill_1x8_s)
- mov ip, #0xff00
-1: cmp r0, r1
- bge vidc_clear
- ldrb r4, [r0], #1
- and r4, ip, r4, lsl #8
- orr r4, r4, r4, lsl #16
- str r4, [r2], #4
- cmp r2, r3
- blt 1b
- mov pc, lr
-
-ENTRY(vidc_fill_2x8_s)
- mov ip, #0xff00
-1: cmp r0, r1
- bge vidc_clear
- ldr r4, [r0], #2
- and r5, r4, ip
- and r4, ip, r4, lsl #8
- orr r4, r4, r5, lsl #16
- orr r4, r4, r4, lsr #8
- str r4, [r2], #4
- cmp r2, r3
- blt 1b
- mov pc, lr
-
-ENTRY(vidc_fill_1x16_s)
- mov ip, #0xff00
- orr ip, ip, ip, lsr #8
-1: cmp r0, r1
- bge vidc_clear
- ldr r5, [r0], #2
- and r4, r5, ip
- orr r4, r4, r4, lsl #16
- str r4, [r2], #4
- cmp r0, r1
- addlt r0, r0, #2
- andlt r4, r5, ip, lsl #16
- orrlt r4, r4, r4, lsr #16
- strlt r4, [r2], #4
- cmp r2, r3
- blt 1b
- mov pc, lr
-
-ENTRY(vidc_fill_2x16_s)
- mov ip, #0xff00
- orr ip, ip, ip, lsr #8
-1: cmp r0, r1
- bge vidc_clear
- ldr r4, [r0], #4
- str r4, [r2], #4
- cmp r0, r1
- ldrlt r4, [r0], #4
- strlt r4, [r2], #4
- cmp r2, r3
- blt 1b
- mov pc, lr
-
-ENTRY(vidc_fill_noaudio)
- mov r0, #0
- mov r1, #0
-2: mov r4, #0
- mov r5, #0
-1: cmp r2, r3
- stmltia r2!, {r0, r1, r4, r5}
- blt 1b
- mov pc, lr
-
-ENTRY(vidc_clear)
- mov r0, #0
- mov r1, #0
- tst r2, #4
- str r0, [r2], #4
- tst r2, #8
- stmia r2!, {r0, r1}
- b 2b
-
-/*
- * Call filler routines with:
- * r0 = phys address
- * r1 = phys end
- * r2 = buffer
- * Returns:
- * r0 = new buffer address
- * r2 = new buffer finish
- * r4 = corrupted
- * r5 = corrupted
- * ip = corrupted
- */
-
-ENTRY(vidc_sound_dma_irq)
- stmfd sp!, {r4 - r8, lr}
- ldr r8, =dma_start
- ldmia r8, {r0, r1, r2, r3, r4, r5}
- teq r1, #0
- adreq r4, vidc_fill_noaudio
- moveq r7, #1 << 31
- movne r7, #0
- mov ip, #IOMD_BASE & 0xff000000
- orr ip, ip, #IOMD_BASE & 0x00ff0000
- ldrb r6, [ip, #IOMD_SD0ST]
- tst r6, #DMA_ST_OFL @ Check for overrun
- eorne r6, r6, #DMA_ST_AB
- tst r6, #DMA_ST_AB
- moveq r2, r3 @ DMAing A, update B
- add r3, r2, r5 @ End of DMA buffer
- add r1, r1, r0 @ End of virtual DMA buffer
- mov lr, pc
- mov pc, r4 @ Call fill routine (uses r4, ip)
- sub r1, r1, r0 @ Remaining length
- stmia r8, {r0, r1}
- mov r0, #0
- tst r2, #4 @ Round buffer up to 4 words
- strne r0, [r2], #4
- tst r2, #8
- strne r0, [r2], #4
- strne r0, [r2], #4
- sub r2, r2, #16
- mov r2, r2, lsl #20
- movs r2, r2, lsr #20
- orreq r2, r2, #1 << 30 @ Set L bit
- orr r2, r2, r7
- ldmdb r8, {r3, r4, r5}
- tst r6, #DMA_ST_AB
- mov ip, #IOMD_BASE & 0xff000000
- orr ip, ip, #IOMD_BASE & 0x00ff0000
- streq r4, [ip, #IOMD_SD0CURB]
- strne r5, [ip, #IOMD_SD0CURA]
- streq r2, [ip, #IOMD_SD0ENDB]
- strne r2, [ip, #IOMD_SD0ENDA]
- ldr lr, [ip, #IOMD_SD0ST]
- tst lr, #DMA_ST_OFL
- bne 1f
- tst r6, #DMA_ST_AB
- strne r4, [ip, #IOMD_SD0CURB]
- streq r5, [ip, #IOMD_SD0CURA]
- strne r2, [ip, #IOMD_SD0ENDB]
- streq r2, [ip, #IOMD_SD0ENDA]
-1: teq r7, #0
- mov r0, #0x10
- strneb r0, [ip, #IOMD_SD0CR]
- ldmfd sp!, {r4 - r8, lr}
- mov r0, #1 @ IRQ_HANDLED
- teq r1, #0 @ If we have no more
- movne pc, lr
- teq r3, #0
- movne pc, r3 @ Call interrupt routine
- mov pc, lr
-
- .data
- .globl dma_interrupt
-dma_interrupt:
- .long 0 @ r3
- .globl dma_pbuf
-dma_pbuf:
- .long 0 @ r4
- .long 0 @ r5
- .globl dma_start
-dma_start:
- .long 0 @ r0
- .globl dma_count
-dma_count:
- .long 0 @ r1
- .globl dma_buf
-dma_buf:
- .long 0 @ r2
- .long 0 @ r3
- .globl vidc_filler
-vidc_filler:
- .long vidc_fill_noaudio @ r4
- .globl dma_bufsize
-dma_bufsize:
- .long 0x1000 @ r5
diff --git a/sound/oss/waveartist.c b/sound/oss/waveartist.c
deleted file mode 100644
index 4f0c3a232e41..000000000000
--- a/sound/oss/waveartist.c
+++ /dev/null
@@ -1,2043 +0,0 @@
-/*
- * linux/sound/oss/waveartist.c
- *
- * The low level driver for the RWA010 Rockwell Wave Artist
- * codec chip used in the Rebel.com NetWinder.
- *
- * Cleaned up and integrated into 2.1 by Russell King (rmk@arm.linux.org.uk)
- * and Pat Beirne (patb@corel.ca)
- *
- *
- * Copyright (C) by Rebel.com 1998-1999
- *
- * RWA010 specs received under NDA from Rockwell
- *
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
- * Version 2 (June 1991). See the "COPYING" file distributed with this software
- * for more info.
- *
- * Changes:
- * 11-10-2000 Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
- * Added __init to waveartist_init()
- */
-
-/* Debugging */
-#define DEBUG_CMD 1
-#define DEBUG_OUT 2
-#define DEBUG_IN 4
-#define DEBUG_INTR 8
-#define DEBUG_MIXER 16
-#define DEBUG_TRIGGER 32
-
-#define debug_flg (0)
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-
-
-#include "sound_config.h"
-#include "waveartist.h"
-
-#ifdef CONFIG_ARM
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#endif
-
-#ifndef NO_DMA
-#define NO_DMA 255
-#endif
-
-#define SUPPORTED_MIXER_DEVICES (SOUND_MASK_SYNTH |\
- SOUND_MASK_PCM |\
- SOUND_MASK_LINE |\
- SOUND_MASK_MIC |\
- SOUND_MASK_LINE1 |\
- SOUND_MASK_RECLEV |\
- SOUND_MASK_VOLUME |\
- SOUND_MASK_IMIX)
-
-static unsigned short levels[SOUND_MIXER_NRDEVICES] = {
- 0x5555, /* Master Volume */
- 0x0000, /* Bass */
- 0x0000, /* Treble */
- 0x2323, /* Synth (FM) */
- 0x4b4b, /* PCM */
- 0x6464, /* PC Speaker */
- 0x0000, /* Ext Line */
- 0x0000, /* Mic */
- 0x0000, /* CD */
- 0x6464, /* Recording monitor */
- 0x0000, /* SB PCM (ALT PCM) */
- 0x0000, /* Recording level */
- 0x6464, /* Input gain */
- 0x6464, /* Output gain */
- 0x0000, /* Line1 (Aux1) */
- 0x0000, /* Line2 (Aux2) */
- 0x0000, /* Line3 (Aux3) */
- 0x0000, /* Digital1 */
- 0x0000, /* Digital2 */
- 0x0000, /* Digital3 */
- 0x0000, /* Phone In */
- 0x6464, /* Phone Out */
- 0x0000, /* Video */
- 0x0000, /* Radio */
- 0x0000 /* Monitor */
-};
-
-struct wavnc_info {
- struct address_info hw; /* hardware */
- char *chip_name;
-
- int xfer_count;
- int audio_mode;
- int open_mode;
- int audio_flags;
- int record_dev;
- int playback_dev;
- int dev_no;
-
- /* Mixer parameters */
- const struct waveartist_mixer_info *mix;
-
- unsigned short *levels; /* cache of volume settings */
- int recmask; /* currently enabled recording device! */
-
-#ifdef CONFIG_ARCH_NETWINDER
- signed int slider_vol; /* hardware slider volume */
- unsigned int handset_detect :1;
- unsigned int telephone_detect:1;
- unsigned int no_autoselect :1;/* handset/telephone autoselects a path */
- unsigned int spkr_mute_state :1;/* set by ioctl or autoselect */
- unsigned int line_mute_state :1;/* set by ioctl or autoselect */
- unsigned int use_slider :1;/* use slider setting for o/p vol */
-#endif
-};
-
-/*
- * This is the implementation specific mixer information.
- */
-struct waveartist_mixer_info {
- unsigned int supported_devs; /* Supported devices */
- unsigned int recording_devs; /* Recordable devies */
- unsigned int stereo_devs; /* Stereo devices */
-
- unsigned int (*select_input)(struct wavnc_info *, unsigned int,
- unsigned char *, unsigned char *);
- int (*decode_mixer)(struct wavnc_info *, int,
- unsigned char, unsigned char);
- int (*get_mixer)(struct wavnc_info *, int);
-};
-
-struct wavnc_port_info {
- int open_mode;
- int speed;
- int channels;
- int audio_format;
-};
-
-static int nr_waveartist_devs;
-static struct wavnc_info adev_info[MAX_AUDIO_DEV];
-static DEFINE_SPINLOCK(waveartist_lock);
-
-#ifndef CONFIG_ARCH_NETWINDER
-#define machine_is_netwinder() 0
-#else
-static struct timer_list vnc_timer;
-static void vnc_configure_mixer(struct wavnc_info *devc,
- unsigned int input_mask);
-static int vnc_private_ioctl(int dev, unsigned int cmd, int __user *arg);
-static void vnc_slider_tick(unsigned long data);
-#endif
-
-static inline void
-waveartist_set_ctlr(struct address_info *hw, unsigned char clear, unsigned char set)
-{
- unsigned int ctlr_port = hw->io_base + CTLR;
-
- clear = ~clear & inb(ctlr_port);
-
- outb(clear | set, ctlr_port);
-}
-
-/* Toggle IRQ acknowledge line
- */
-static inline void
-waveartist_iack(struct wavnc_info *devc)
-{
- unsigned int ctlr_port = devc->hw.io_base + CTLR;
- int old_ctlr;
-
- old_ctlr = inb(ctlr_port) & ~IRQ_ACK;
-
- outb(old_ctlr | IRQ_ACK, ctlr_port);
- outb(old_ctlr, ctlr_port);
-}
-
-static inline int
-waveartist_sleep(int timeout_ms)
-{
- unsigned int timeout = msecs_to_jiffies(timeout_ms*100);
- return schedule_timeout_interruptible(timeout);
-}
-
-static int
-waveartist_reset(struct wavnc_info *devc)
-{
- struct address_info *hw = &devc->hw;
- unsigned int timeout, res = -1;
-
- waveartist_set_ctlr(hw, -1, RESET);
- waveartist_sleep(2);
- waveartist_set_ctlr(hw, RESET, 0);
-
- timeout = 500;
- do {
- mdelay(2);
-
- if (inb(hw->io_base + STATR) & CMD_RF) {
- res = inw(hw->io_base + CMDR);
- if (res == 0x55aa)
- break;
- }
- } while (--timeout);
-
- if (timeout == 0) {
- printk(KERN_WARNING "WaveArtist: reset timeout ");
- if (res != (unsigned int)-1)
- printk("(res=%04X)", res);
- printk("\n");
- return 1;
- }
- return 0;
-}
-
-/* Helper function to send and receive words
- * from WaveArtist. It handles all the handshaking
- * and can send or receive multiple words.
- */
-static int
-waveartist_cmd(struct wavnc_info *devc,
- int nr_cmd, unsigned int *cmd,
- int nr_resp, unsigned int *resp)
-{
- unsigned int io_base = devc->hw.io_base;
- unsigned int timed_out = 0;
- unsigned int i;
-
- if (debug_flg & DEBUG_CMD) {
- printk("waveartist_cmd: cmd=");
-
- for (i = 0; i < nr_cmd; i++)
- printk("%04X ", cmd[i]);
-
- printk("\n");
- }
-
- if (inb(io_base + STATR) & CMD_RF) {
- int old_data;
-
- /* flush the port
- */
-
- old_data = inw(io_base + CMDR);
-
- if (debug_flg & DEBUG_CMD)
- printk("flushed %04X...", old_data);
-
- udelay(10);
- }
-
- for (i = 0; !timed_out && i < nr_cmd; i++) {
- int count;
-
- for (count = 5000; count; count--)
- if (inb(io_base + STATR) & CMD_WE)
- break;
-
- if (!count)
- timed_out = 1;
- else
- outw(cmd[i], io_base + CMDR);
- }
-
- for (i = 0; !timed_out && i < nr_resp; i++) {
- int count;
-
- for (count = 5000; count; count--)
- if (inb(io_base + STATR) & CMD_RF)
- break;
-
- if (!count)
- timed_out = 1;
- else
- resp[i] = inw(io_base + CMDR);
- }
-
- if (debug_flg & DEBUG_CMD) {
- if (!timed_out) {
- printk("waveartist_cmd: resp=");
-
- for (i = 0; i < nr_resp; i++)
- printk("%04X ", resp[i]);
-
- printk("\n");
- } else
- printk("waveartist_cmd: timed out\n");
- }
-
- return timed_out ? 1 : 0;
-}
-
-/*
- * Send one command word
- */
-static inline int
-waveartist_cmd1(struct wavnc_info *devc, unsigned int cmd)
-{
- return waveartist_cmd(devc, 1, &cmd, 0, NULL);
-}
-
-/*
- * Send one command, receive one word
- */
-static inline unsigned int
-waveartist_cmd1_r(struct wavnc_info *devc, unsigned int cmd)
-{
- unsigned int ret;
-
- waveartist_cmd(devc, 1, &cmd, 1, &ret);
-
- return ret;
-}
-
-/*
- * Send a double command, receive one
- * word (and throw it away)
- */
-static inline int
-waveartist_cmd2(struct wavnc_info *devc, unsigned int cmd, unsigned int arg)
-{
- unsigned int vals[2];
-
- vals[0] = cmd;
- vals[1] = arg;
-
- return waveartist_cmd(devc, 2, vals, 1, vals);
-}
-
-/*
- * Send a triple command
- */
-static inline int
-waveartist_cmd3(struct wavnc_info *devc, unsigned int cmd,
- unsigned int arg1, unsigned int arg2)
-{
- unsigned int vals[3];
-
- vals[0] = cmd;
- vals[1] = arg1;
- vals[2] = arg2;
-
- return waveartist_cmd(devc, 3, vals, 0, NULL);
-}
-
-static int
-waveartist_getrev(struct wavnc_info *devc, char *rev)
-{
- unsigned int temp[2];
- unsigned int cmd = WACMD_GETREV;
-
- waveartist_cmd(devc, 1, &cmd, 2, temp);
-
- rev[0] = temp[0] >> 8;
- rev[1] = temp[0] & 255;
- rev[2] = '\0';
-
- return temp[0];
-}
-
-static void waveartist_halt_output(int dev);
-static void waveartist_halt_input(int dev);
-static void waveartist_halt(int dev);
-static void waveartist_trigger(int dev, int state);
-
-static int
-waveartist_open(int dev, int mode)
-{
- struct wavnc_info *devc;
- struct wavnc_port_info *portc;
- unsigned long flags;
-
- if (dev < 0 || dev >= num_audiodevs)
- return -ENXIO;
-
- devc = (struct wavnc_info *) audio_devs[dev]->devc;
- portc = (struct wavnc_port_info *) audio_devs[dev]->portc;
-
- spin_lock_irqsave(&waveartist_lock, flags);
- if (portc->open_mode || (devc->open_mode & mode)) {
- spin_unlock_irqrestore(&waveartist_lock, flags);
- return -EBUSY;
- }
-
- devc->audio_mode = 0;
- devc->open_mode |= mode;
- portc->open_mode = mode;
- waveartist_trigger(dev, 0);
-
- if (mode & OPEN_READ)
- devc->record_dev = dev;
- if (mode & OPEN_WRITE)
- devc->playback_dev = dev;
- spin_unlock_irqrestore(&waveartist_lock, flags);
-
- return 0;
-}
-
-static void
-waveartist_close(int dev)
-{
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
- unsigned long flags;
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- waveartist_halt(dev);
-
- devc->audio_mode = 0;
- devc->open_mode &= ~portc->open_mode;
- portc->open_mode = 0;
-
- spin_unlock_irqrestore(&waveartist_lock, flags);
-}
-
-static void
-waveartist_output_block(int dev, unsigned long buf, int __count, int intrflag)
-{
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- unsigned long flags;
- unsigned int count = __count;
-
- if (debug_flg & DEBUG_OUT)
- printk("waveartist: output block, buf=0x%lx, count=0x%x...\n",
- buf, count);
- /*
- * 16 bit data
- */
- if (portc->audio_format & (AFMT_S16_LE | AFMT_S16_BE))
- count >>= 1;
-
- if (portc->channels > 1)
- count >>= 1;
-
- count -= 1;
-
- if (devc->audio_mode & PCM_ENABLE_OUTPUT &&
- audio_devs[dev]->flags & DMA_AUTOMODE &&
- intrflag &&
- count == devc->xfer_count) {
- devc->audio_mode |= PCM_ENABLE_OUTPUT;
- return; /*
- * Auto DMA mode on. No need to react
- */
- }
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- /*
- * set sample count
- */
- waveartist_cmd2(devc, WACMD_OUTPUTSIZE, count);
-
- devc->xfer_count = count;
- devc->audio_mode |= PCM_ENABLE_OUTPUT;
-
- spin_unlock_irqrestore(&waveartist_lock, flags);
-}
-
-static void
-waveartist_start_input(int dev, unsigned long buf, int __count, int intrflag)
-{
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- unsigned long flags;
- unsigned int count = __count;
-
- if (debug_flg & DEBUG_IN)
- printk("waveartist: start input, buf=0x%lx, count=0x%x...\n",
- buf, count);
-
- if (portc->audio_format & (AFMT_S16_LE | AFMT_S16_BE)) /* 16 bit data */
- count >>= 1;
-
- if (portc->channels > 1)
- count >>= 1;
-
- count -= 1;
-
- if (devc->audio_mode & PCM_ENABLE_INPUT &&
- audio_devs[dev]->flags & DMA_AUTOMODE &&
- intrflag &&
- count == devc->xfer_count) {
- devc->audio_mode |= PCM_ENABLE_INPUT;
- return; /*
- * Auto DMA mode on. No need to react
- */
- }
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- /*
- * set sample count
- */
- waveartist_cmd2(devc, WACMD_INPUTSIZE, count);
-
- devc->xfer_count = count;
- devc->audio_mode |= PCM_ENABLE_INPUT;
-
- spin_unlock_irqrestore(&waveartist_lock, flags);
-}
-
-static int
-waveartist_ioctl(int dev, unsigned int cmd, void __user * arg)
-{
- return -EINVAL;
-}
-
-static unsigned int
-waveartist_get_speed(struct wavnc_port_info *portc)
-{
- unsigned int speed;
-
- /*
- * program the speed, channels, bits
- */
- if (portc->speed == 8000)
- speed = 0x2E71;
- else if (portc->speed == 11025)
- speed = 0x4000;
- else if (portc->speed == 22050)
- speed = 0x8000;
- else if (portc->speed == 44100)
- speed = 0x0;
- else {
- /*
- * non-standard - just calculate
- */
- speed = portc->speed << 16;
-
- speed = (speed / 44100) & 65535;
- }
-
- return speed;
-}
-
-static unsigned int
-waveartist_get_bits(struct wavnc_port_info *portc)
-{
- unsigned int bits;
-
- if (portc->audio_format == AFMT_S16_LE)
- bits = 1;
- else if (portc->audio_format == AFMT_S8)
- bits = 0;
- else
- bits = 2; //default AFMT_U8
-
- return bits;
-}
-
-static int
-waveartist_prepare_for_input(int dev, int bsize, int bcount)
-{
- unsigned long flags;
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
- unsigned int speed, bits;
-
- if (devc->audio_mode)
- return 0;
-
- speed = waveartist_get_speed(portc);
- bits = waveartist_get_bits(portc);
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- if (waveartist_cmd2(devc, WACMD_INPUTFORMAT, bits))
- printk(KERN_WARNING "waveartist: error setting the "
- "record format to %d\n", portc->audio_format);
-
- if (waveartist_cmd2(devc, WACMD_INPUTCHANNELS, portc->channels))
- printk(KERN_WARNING "waveartist: error setting record "
- "to %d channels\n", portc->channels);
-
- /*
- * write cmd SetSampleSpeedTimeConstant
- */
- if (waveartist_cmd2(devc, WACMD_INPUTSPEED, speed))
- printk(KERN_WARNING "waveartist: error setting the record "
- "speed to %dHz.\n", portc->speed);
-
- if (waveartist_cmd2(devc, WACMD_INPUTDMA, 1))
- printk(KERN_WARNING "waveartist: error setting the record "
- "data path to 0x%X\n", 1);
-
- if (waveartist_cmd2(devc, WACMD_INPUTFORMAT, bits))
- printk(KERN_WARNING "waveartist: error setting the record "
- "format to %d\n", portc->audio_format);
-
- devc->xfer_count = 0;
- spin_unlock_irqrestore(&waveartist_lock, flags);
- waveartist_halt_input(dev);
-
- if (debug_flg & DEBUG_INTR) {
- printk("WA CTLR reg: 0x%02X.\n",
- inb(devc->hw.io_base + CTLR));
- printk("WA STAT reg: 0x%02X.\n",
- inb(devc->hw.io_base + STATR));
- printk("WA IRQS reg: 0x%02X.\n",
- inb(devc->hw.io_base + IRQSTAT));
- }
-
- return 0;
-}
-
-static int
-waveartist_prepare_for_output(int dev, int bsize, int bcount)
-{
- unsigned long flags;
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
- unsigned int speed, bits;
-
- /*
- * program the speed, channels, bits
- */
- speed = waveartist_get_speed(portc);
- bits = waveartist_get_bits(portc);
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- if (waveartist_cmd2(devc, WACMD_OUTPUTSPEED, speed) &&
- waveartist_cmd2(devc, WACMD_OUTPUTSPEED, speed))
- printk(KERN_WARNING "waveartist: error setting the playback "
- "speed to %dHz.\n", portc->speed);
-
- if (waveartist_cmd2(devc, WACMD_OUTPUTCHANNELS, portc->channels))
- printk(KERN_WARNING "waveartist: error setting the playback "
- "to %d channels\n", portc->channels);
-
- if (waveartist_cmd2(devc, WACMD_OUTPUTDMA, 0))
- printk(KERN_WARNING "waveartist: error setting the playback "
- "data path to 0x%X\n", 0);
-
- if (waveartist_cmd2(devc, WACMD_OUTPUTFORMAT, bits))
- printk(KERN_WARNING "waveartist: error setting the playback "
- "format to %d\n", portc->audio_format);
-
- devc->xfer_count = 0;
- spin_unlock_irqrestore(&waveartist_lock, flags);
- waveartist_halt_output(dev);
-
- if (debug_flg & DEBUG_INTR) {
- printk("WA CTLR reg: 0x%02X.\n",inb(devc->hw.io_base + CTLR));
- printk("WA STAT reg: 0x%02X.\n",inb(devc->hw.io_base + STATR));
- printk("WA IRQS reg: 0x%02X.\n",inb(devc->hw.io_base + IRQSTAT));
- }
-
- return 0;
-}
-
-static void
-waveartist_halt(int dev)
-{
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
- struct wavnc_info *devc;
-
- if (portc->open_mode & OPEN_WRITE)
- waveartist_halt_output(dev);
-
- if (portc->open_mode & OPEN_READ)
- waveartist_halt_input(dev);
-
- devc = (struct wavnc_info *) audio_devs[dev]->devc;
- devc->audio_mode = 0;
-}
-
-static void
-waveartist_halt_input(int dev)
-{
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- unsigned long flags;
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- /*
- * Stop capture
- */
- waveartist_cmd1(devc, WACMD_INPUTSTOP);
-
- devc->audio_mode &= ~PCM_ENABLE_INPUT;
-
- /*
- * Clear interrupt by toggling
- * the IRQ_ACK bit in CTRL
- */
- if (inb(devc->hw.io_base + STATR) & IRQ_REQ)
- waveartist_iack(devc);
-
-// devc->audio_mode &= ~PCM_ENABLE_INPUT;
-
- spin_unlock_irqrestore(&waveartist_lock, flags);
-}
-
-static void
-waveartist_halt_output(int dev)
-{
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- unsigned long flags;
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- waveartist_cmd1(devc, WACMD_OUTPUTSTOP);
-
- devc->audio_mode &= ~PCM_ENABLE_OUTPUT;
-
- /*
- * Clear interrupt by toggling
- * the IRQ_ACK bit in CTRL
- */
- if (inb(devc->hw.io_base + STATR) & IRQ_REQ)
- waveartist_iack(devc);
-
-// devc->audio_mode &= ~PCM_ENABLE_OUTPUT;
-
- spin_unlock_irqrestore(&waveartist_lock, flags);
-}
-
-static void
-waveartist_trigger(int dev, int state)
-{
- struct wavnc_info *devc = (struct wavnc_info *)
- audio_devs[dev]->devc;
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
- unsigned long flags;
-
- if (debug_flg & DEBUG_TRIGGER) {
- printk("wavnc: audio trigger ");
- if (state & PCM_ENABLE_INPUT)
- printk("in ");
- if (state & PCM_ENABLE_OUTPUT)
- printk("out");
- printk("\n");
- }
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- state &= devc->audio_mode;
-
- if (portc->open_mode & OPEN_READ &&
- state & PCM_ENABLE_INPUT)
- /*
- * enable ADC Data Transfer to PC
- */
- waveartist_cmd1(devc, WACMD_INPUTSTART);
-
- if (portc->open_mode & OPEN_WRITE &&
- state & PCM_ENABLE_OUTPUT)
- /*
- * enable DAC data transfer from PC
- */
- waveartist_cmd1(devc, WACMD_OUTPUTSTART);
-
- spin_unlock_irqrestore(&waveartist_lock, flags);
-}
-
-static int
-waveartist_set_speed(int dev, int arg)
-{
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
-
- if (arg <= 0)
- return portc->speed;
-
- if (arg < 5000)
- arg = 5000;
- if (arg > 44100)
- arg = 44100;
-
- portc->speed = arg;
- return portc->speed;
-
-}
-
-static short
-waveartist_set_channels(int dev, short arg)
-{
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
-
- if (arg != 1 && arg != 2)
- return portc->channels;
-
- portc->channels = arg;
- return arg;
-}
-
-static unsigned int
-waveartist_set_bits(int dev, unsigned int arg)
-{
- struct wavnc_port_info *portc = (struct wavnc_port_info *)
- audio_devs[dev]->portc;
-
- if (arg == 0)
- return portc->audio_format;
-
- if ((arg != AFMT_U8) && (arg != AFMT_S16_LE) && (arg != AFMT_S8))
- arg = AFMT_U8;
-
- portc->audio_format = arg;
-
- return arg;
-}
-
-static struct audio_driver waveartist_audio_driver = {
- .owner = THIS_MODULE,
- .open = waveartist_open,
- .close = waveartist_close,
- .output_block = waveartist_output_block,
- .start_input = waveartist_start_input,
- .ioctl = waveartist_ioctl,
- .prepare_for_input = waveartist_prepare_for_input,
- .prepare_for_output = waveartist_prepare_for_output,
- .halt_io = waveartist_halt,
- .halt_input = waveartist_halt_input,
- .halt_output = waveartist_halt_output,
- .trigger = waveartist_trigger,
- .set_speed = waveartist_set_speed,
- .set_bits = waveartist_set_bits,
- .set_channels = waveartist_set_channels
-};
-
-
-static irqreturn_t
-waveartist_intr(int irq, void *dev_id)
-{
- struct wavnc_info *devc = dev_id;
- int irqstatus, status;
-
- spin_lock(&waveartist_lock);
- irqstatus = inb(devc->hw.io_base + IRQSTAT);
- status = inb(devc->hw.io_base + STATR);
-
- if (debug_flg & DEBUG_INTR)
- printk("waveartist_intr: stat=%02x, irqstat=%02x\n",
- status, irqstatus);
-
- if (status & IRQ_REQ) /* Clear interrupt */
- waveartist_iack(devc);
- else
- printk(KERN_WARNING "waveartist: unexpected interrupt\n");
-
- if (irqstatus & 0x01) {
- int temp = 1;
-
- /* PCM buffer done
- */
- if ((status & DMA0) && (devc->audio_mode & PCM_ENABLE_OUTPUT)) {
- DMAbuf_outputintr(devc->playback_dev, 1);
- temp = 0;
- }
- if ((status & DMA1) && (devc->audio_mode & PCM_ENABLE_INPUT)) {
- DMAbuf_inputintr(devc->record_dev);
- temp = 0;
- }
- if (temp) //default:
- printk(KERN_WARNING "waveartist: Unknown interrupt\n");
- }
- if (irqstatus & 0x2)
- // We do not use SB mode natively...
- printk(KERN_WARNING "waveartist: Unexpected SB interrupt...\n");
- spin_unlock(&waveartist_lock);
- return IRQ_HANDLED;
-}
-
-/* -------------------------------------------------------------------------
- * Mixer stuff
- */
-struct mix_ent {
- unsigned char reg_l;
- unsigned char reg_r;
- unsigned char shift;
- unsigned char max;
-};
-
-static const struct mix_ent mix_devs[SOUND_MIXER_NRDEVICES] = {
- { 2, 6, 1, 7 }, /* SOUND_MIXER_VOLUME */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_BASS */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_TREBLE */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_SYNTH */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_PCM */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_SPEAKER */
- { 0, 4, 6, 31 }, /* SOUND_MIXER_LINE */
- { 2, 6, 4, 3 }, /* SOUND_MIXER_MIC */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_CD */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_IMIX */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_ALTPCM */
-#if 0
- { 3, 7, 0, 10 }, /* SOUND_MIXER_RECLEV */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_IGAIN */
-#else
- { 0, 0, 0, 0 }, /* SOUND_MIXER_RECLEV */
- { 3, 7, 0, 7 }, /* SOUND_MIXER_IGAIN */
-#endif
- { 0, 0, 0, 0 }, /* SOUND_MIXER_OGAIN */
- { 0, 4, 1, 31 }, /* SOUND_MIXER_LINE1 */
- { 1, 5, 6, 31 }, /* SOUND_MIXER_LINE2 */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_LINE3 */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_DIGITAL1 */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_DIGITAL2 */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_DIGITAL3 */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_PHONEIN */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_PHONEOUT */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_VIDEO */
- { 0, 0, 0, 0 }, /* SOUND_MIXER_RADIO */
- { 0, 0, 0, 0 } /* SOUND_MIXER_MONITOR */
-};
-
-static void
-waveartist_mixer_update(struct wavnc_info *devc, int whichDev)
-{
- unsigned int lev_left, lev_right;
-
- lev_left = devc->levels[whichDev] & 0xff;
- lev_right = devc->levels[whichDev] >> 8;
-
- if (lev_left > 100)
- lev_left = 100;
- if (lev_right > 100)
- lev_right = 100;
-
-#define SCALE(lev,max) ((lev) * (max) / 100)
-
- if (machine_is_netwinder() && whichDev == SOUND_MIXER_PHONEOUT)
- whichDev = SOUND_MIXER_VOLUME;
-
- if (mix_devs[whichDev].reg_l || mix_devs[whichDev].reg_r) {
- const struct mix_ent *mix = mix_devs + whichDev;
- unsigned int mask, left, right;
-
- mask = mix->max << mix->shift;
- lev_left = SCALE(lev_left, mix->max) << mix->shift;
- lev_right = SCALE(lev_right, mix->max) << mix->shift;
-
- /* read left setting */
- left = waveartist_cmd1_r(devc, WACMD_GET_LEVEL |
- mix->reg_l << 8);
-
- /* read right setting */
- right = waveartist_cmd1_r(devc, WACMD_GET_LEVEL |
- mix->reg_r << 8);
-
- left = (left & ~mask) | (lev_left & mask);
- right = (right & ~mask) | (lev_right & mask);
-
- /* write left,right back */
- waveartist_cmd3(devc, WACMD_SET_MIXER, left, right);
- } else {
- switch(whichDev) {
- case SOUND_MIXER_PCM:
- waveartist_cmd3(devc, WACMD_SET_LEVEL,
- SCALE(lev_left, 32767),
- SCALE(lev_right, 32767));
- break;
-
- case SOUND_MIXER_SYNTH:
- waveartist_cmd3(devc, 0x0100 | WACMD_SET_LEVEL,
- SCALE(lev_left, 32767),
- SCALE(lev_right, 32767));
- break;
- }
- }
-}
-
-/*
- * Set the ADC MUX to the specified values. We do NOT do any
- * checking of the values passed, since we assume that the
- * relevant *_select_input function has done that for us.
- */
-static void
-waveartist_set_adc_mux(struct wavnc_info *devc, char left_dev,
- char right_dev)
-{
- unsigned int reg_08, reg_09;
-
- reg_08 = waveartist_cmd1_r(devc, WACMD_GET_LEVEL | 0x0800);
- reg_09 = waveartist_cmd1_r(devc, WACMD_GET_LEVEL | 0x0900);
-
- reg_08 = (reg_08 & ~0x3f) | right_dev << 3 | left_dev;
-
- waveartist_cmd3(devc, WACMD_SET_MIXER, reg_08, reg_09);
-}
-
-/*
- * Decode a recording mask into a mixer selection as follows:
- *
- * OSS Source WA Source Actual source
- * SOUND_MASK_IMIX Mixer Mixer output (same as AD1848)
- * SOUND_MASK_LINE Line Line in
- * SOUND_MASK_LINE1 Aux 1 Aux 1 in
- * SOUND_MASK_LINE2 Aux 2 Aux 2 in
- * SOUND_MASK_MIC Mic Microphone
- */
-static unsigned int
-waveartist_select_input(struct wavnc_info *devc, unsigned int recmask,
- unsigned char *dev_l, unsigned char *dev_r)
-{
- unsigned int recdev = ADC_MUX_NONE;
-
- if (recmask & SOUND_MASK_IMIX) {
- recmask = SOUND_MASK_IMIX;
- recdev = ADC_MUX_MIXER;
- } else if (recmask & SOUND_MASK_LINE2) {
- recmask = SOUND_MASK_LINE2;
- recdev = ADC_MUX_AUX2;
- } else if (recmask & SOUND_MASK_LINE1) {
- recmask = SOUND_MASK_LINE1;
- recdev = ADC_MUX_AUX1;
- } else if (recmask & SOUND_MASK_LINE) {
- recmask = SOUND_MASK_LINE;
- recdev = ADC_MUX_LINE;
- } else if (recmask & SOUND_MASK_MIC) {
- recmask = SOUND_MASK_MIC;
- recdev = ADC_MUX_MIC;
- }
-
- *dev_l = *dev_r = recdev;
-
- return recmask;
-}
-
-static int
-waveartist_decode_mixer(struct wavnc_info *devc, int dev,
- unsigned char lev_l,
- unsigned char lev_r)
-{
- switch (dev) {
- case SOUND_MIXER_VOLUME:
- case SOUND_MIXER_SYNTH:
- case SOUND_MIXER_PCM:
- case SOUND_MIXER_LINE:
- case SOUND_MIXER_MIC:
- case SOUND_MIXER_IGAIN:
- case SOUND_MIXER_LINE1:
- case SOUND_MIXER_LINE2:
- devc->levels[dev] = lev_l | lev_r << 8;
- break;
-
- case SOUND_MIXER_IMIX:
- break;
-
- default:
- dev = -EINVAL;
- break;
- }
-
- return dev;
-}
-
-static int waveartist_get_mixer(struct wavnc_info *devc, int dev)
-{
- return devc->levels[dev];
-}
-
-static const struct waveartist_mixer_info waveartist_mixer = {
- .supported_devs = SUPPORTED_MIXER_DEVICES | SOUND_MASK_IGAIN,
- .recording_devs = SOUND_MASK_LINE | SOUND_MASK_MIC |
- SOUND_MASK_LINE1 | SOUND_MASK_LINE2 |
- SOUND_MASK_IMIX,
- .stereo_devs = (SUPPORTED_MIXER_DEVICES | SOUND_MASK_IGAIN) & ~
- (SOUND_MASK_SPEAKER | SOUND_MASK_IMIX),
- .select_input = waveartist_select_input,
- .decode_mixer = waveartist_decode_mixer,
- .get_mixer = waveartist_get_mixer,
-};
-
-static void
-waveartist_set_recmask(struct wavnc_info *devc, unsigned int recmask)
-{
- unsigned char dev_l, dev_r;
-
- recmask &= devc->mix->recording_devs;
-
- /*
- * If more than one recording device selected,
- * disable the device that is currently in use.
- */
- if (hweight32(recmask) > 1)
- recmask &= ~devc->recmask;
-
- /*
- * Translate the recording device mask into
- * the ADC multiplexer settings.
- */
- devc->recmask = devc->mix->select_input(devc, recmask,
- &dev_l, &dev_r);
-
- waveartist_set_adc_mux(devc, dev_l, dev_r);
-}
-
-static int
-waveartist_set_mixer(struct wavnc_info *devc, int dev, unsigned int level)
-{
- unsigned int lev_left = level & 0x00ff;
- unsigned int lev_right = (level & 0xff00) >> 8;
-
- if (lev_left > 100)
- lev_left = 100;
- if (lev_right > 100)
- lev_right = 100;
-
- /*
- * Mono devices have their right volume forced to their
- * left volume. (from ALSA driver OSS emulation).
- */
- if (!(devc->mix->stereo_devs & (1 << dev)))
- lev_right = lev_left;
-
- dev = devc->mix->decode_mixer(devc, dev, lev_left, lev_right);
-
- if (dev >= 0)
- waveartist_mixer_update(devc, dev);
-
- return dev < 0 ? dev : 0;
-}
-
-static int
-waveartist_mixer_ioctl(int dev, unsigned int cmd, void __user * arg)
-{
- struct wavnc_info *devc = (struct wavnc_info *)audio_devs[dev]->devc;
- int ret = 0, val, nr;
-
- /*
- * All SOUND_MIXER_* ioctls use type 'M'
- */
- if (((cmd >> 8) & 255) != 'M')
- return -ENOIOCTLCMD;
-
-#ifdef CONFIG_ARCH_NETWINDER
- if (machine_is_netwinder()) {
- ret = vnc_private_ioctl(dev, cmd, arg);
- if (ret != -ENOIOCTLCMD)
- return ret;
- else
- ret = 0;
- }
-#endif
-
- nr = cmd & 0xff;
-
- if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
-
- switch (nr) {
- case SOUND_MIXER_RECSRC:
- waveartist_set_recmask(devc, val);
- break;
-
- default:
- ret = -EINVAL;
- if (nr < SOUND_MIXER_NRDEVICES &&
- devc->mix->supported_devs & (1 << nr))
- ret = waveartist_set_mixer(devc, nr, val);
- }
- }
-
- if (ret == 0 && _SIOC_DIR(cmd) & _SIOC_READ) {
- ret = -EINVAL;
-
- switch (nr) {
- case SOUND_MIXER_RECSRC:
- ret = devc->recmask;
- break;
-
- case SOUND_MIXER_DEVMASK:
- ret = devc->mix->supported_devs;
- break;
-
- case SOUND_MIXER_STEREODEVS:
- ret = devc->mix->stereo_devs;
- break;
-
- case SOUND_MIXER_RECMASK:
- ret = devc->mix->recording_devs;
- break;
-
- case SOUND_MIXER_CAPS:
- ret = SOUND_CAP_EXCL_INPUT;
- break;
-
- default:
- if (nr < SOUND_MIXER_NRDEVICES)
- ret = devc->mix->get_mixer(devc, nr);
- break;
- }
-
- if (ret >= 0)
- ret = put_user(ret, (int __user *)arg) ? -EFAULT : 0;
- }
-
- return ret;
-}
-
-static struct mixer_operations waveartist_mixer_operations =
-{
- .owner = THIS_MODULE,
- .id = "WaveArtist",
- .name = "WaveArtist",
- .ioctl = waveartist_mixer_ioctl
-};
-
-static void
-waveartist_mixer_reset(struct wavnc_info *devc)
-{
- int i;
-
- if (debug_flg & DEBUG_MIXER)
- printk("%s: mixer_reset\n", devc->hw.name);
-
- /*
- * reset mixer cmd
- */
- waveartist_cmd1(devc, WACMD_RST_MIXER);
-
- /*
- * set input for ADC to come from 'quiet'
- * turn on default modes
- */
- waveartist_cmd3(devc, WACMD_SET_MIXER, 0x9800, 0xa836);
-
- /*
- * set mixer input select to none, RX filter gains 0 dB
- */
- waveartist_cmd3(devc, WACMD_SET_MIXER, 0x4c00, 0x8c00);
-
- /*
- * set bit 0 reg 2 to 1 - unmute MonoOut
- */
- waveartist_cmd3(devc, WACMD_SET_MIXER, 0x2801, 0x6800);
-
- /* set default input device = internal mic
- * current recording device = none
- */
- waveartist_set_recmask(devc, 0);
-
- for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
- waveartist_mixer_update(devc, i);
-}
-
-static int __init waveartist_init(struct wavnc_info *devc)
-{
- struct wavnc_port_info *portc;
- char rev[3], dev_name[64];
- int my_dev;
-
- if (waveartist_reset(devc))
- return -ENODEV;
-
- sprintf(dev_name, "%s (%s", devc->hw.name, devc->chip_name);
-
- if (waveartist_getrev(devc, rev)) {
- strcat(dev_name, " rev. ");
- strcat(dev_name, rev);
- }
- strcat(dev_name, ")");
-
- conf_printf2(dev_name, devc->hw.io_base, devc->hw.irq,
- devc->hw.dma, devc->hw.dma2);
-
- portc = kzalloc(sizeof(struct wavnc_port_info), GFP_KERNEL);
- if (portc == NULL)
- goto nomem;
-
- my_dev = sound_install_audiodrv(AUDIO_DRIVER_VERSION, dev_name,
- &waveartist_audio_driver, sizeof(struct audio_driver),
- devc->audio_flags, AFMT_U8 | AFMT_S16_LE | AFMT_S8,
- devc, devc->hw.dma, devc->hw.dma2);
-
- if (my_dev < 0)
- goto free;
-
- audio_devs[my_dev]->portc = portc;
-
- waveartist_mixer_reset(devc);
-
- /*
- * clear any pending interrupt
- */
- waveartist_iack(devc);
-
- if (request_irq(devc->hw.irq, waveartist_intr, 0, devc->hw.name, devc) < 0) {
- printk(KERN_ERR "%s: IRQ %d in use\n",
- devc->hw.name, devc->hw.irq);
- goto uninstall;
- }
-
- if (sound_alloc_dma(devc->hw.dma, devc->hw.name)) {
- printk(KERN_ERR "%s: Can't allocate DMA%d\n",
- devc->hw.name, devc->hw.dma);
- goto uninstall_irq;
- }
-
- if (devc->hw.dma != devc->hw.dma2 && devc->hw.dma2 != NO_DMA)
- if (sound_alloc_dma(devc->hw.dma2, devc->hw.name)) {
- printk(KERN_ERR "%s: can't allocate DMA%d\n",
- devc->hw.name, devc->hw.dma2);
- goto uninstall_dma;
- }
-
- waveartist_set_ctlr(&devc->hw, 0, DMA1_IE | DMA0_IE);
-
- audio_devs[my_dev]->mixer_dev =
- sound_install_mixer(MIXER_DRIVER_VERSION,
- dev_name,
- &waveartist_mixer_operations,
- sizeof(struct mixer_operations),
- devc);
-
- return my_dev;
-
-uninstall_dma:
- sound_free_dma(devc->hw.dma);
-
-uninstall_irq:
- free_irq(devc->hw.irq, devc);
-
-uninstall:
- sound_unload_audiodev(my_dev);
-
-free:
- kfree(portc);
-
-nomem:
- return -1;
-}
-
-static int __init probe_waveartist(struct address_info *hw_config)
-{
- struct wavnc_info *devc = &adev_info[nr_waveartist_devs];
-
- if (nr_waveartist_devs >= MAX_AUDIO_DEV) {
- printk(KERN_WARNING "waveartist: too many audio devices\n");
- return 0;
- }
-
- if (!request_region(hw_config->io_base, 15, hw_config->name)) {
- printk(KERN_WARNING "WaveArtist: I/O port conflict\n");
- return 0;
- }
-
- if (hw_config->irq > 15 || hw_config->irq < 0) {
- release_region(hw_config->io_base, 15);
- printk(KERN_WARNING "WaveArtist: Bad IRQ %d\n",
- hw_config->irq);
- return 0;
- }
-
- if (hw_config->dma != 3) {
- release_region(hw_config->io_base, 15);
- printk(KERN_WARNING "WaveArtist: Bad DMA %d\n",
- hw_config->dma);
- return 0;
- }
-
- hw_config->name = "WaveArtist";
- devc->hw = *hw_config;
- devc->open_mode = 0;
- devc->chip_name = "RWA-010";
-
- return 1;
-}
-
-static void __init
-attach_waveartist(struct address_info *hw, const struct waveartist_mixer_info *mix)
-{
- struct wavnc_info *devc = &adev_info[nr_waveartist_devs];
-
- /*
- * NOTE! If irq < 0, there is another driver which has allocated the
- * IRQ so that this driver doesn't need to allocate/deallocate it.
- * The actually used IRQ is ABS(irq).
- */
- devc->hw = *hw;
- devc->hw.irq = (hw->irq > 0) ? hw->irq : 0;
- devc->open_mode = 0;
- devc->playback_dev = 0;
- devc->record_dev = 0;
- devc->audio_flags = DMA_AUTOMODE;
- devc->levels = levels;
-
- if (hw->dma != hw->dma2 && hw->dma2 != NO_DMA)
- devc->audio_flags |= DMA_DUPLEX;
-
- devc->mix = mix;
- devc->dev_no = waveartist_init(devc);
-
- if (devc->dev_no < 0)
- release_region(hw->io_base, 15);
- else {
-#ifdef CONFIG_ARCH_NETWINDER
- if (machine_is_netwinder()) {
- setup_timer(&vnc_timer, vnc_slider_tick,
- nr_waveartist_devs);
- mod_timer(&vnc_timer, jiffies);
-
- vnc_configure_mixer(devc, 0);
-
- devc->no_autoselect = 1;
- }
-#endif
- nr_waveartist_devs += 1;
- }
-}
-
-static void __exit unload_waveartist(struct address_info *hw)
-{
- struct wavnc_info *devc = NULL;
- int i;
-
- for (i = 0; i < nr_waveartist_devs; i++)
- if (hw->io_base == adev_info[i].hw.io_base) {
- devc = adev_info + i;
- break;
- }
-
- if (devc != NULL) {
- int mixer;
-
-#ifdef CONFIG_ARCH_NETWINDER
- if (machine_is_netwinder())
- del_timer(&vnc_timer);
-#endif
-
- release_region(devc->hw.io_base, 15);
-
- waveartist_set_ctlr(&devc->hw, DMA1_IE|DMA0_IE, 0);
-
- if (devc->hw.irq >= 0)
- free_irq(devc->hw.irq, devc);
-
- sound_free_dma(devc->hw.dma);
-
- if (devc->hw.dma != devc->hw.dma2 &&
- devc->hw.dma2 != NO_DMA)
- sound_free_dma(devc->hw.dma2);
-
- mixer = audio_devs[devc->dev_no]->mixer_dev;
-
- if (mixer >= 0)
- sound_unload_mixerdev(mixer);
-
- if (devc->dev_no >= 0)
- sound_unload_audiodev(devc->dev_no);
-
- nr_waveartist_devs -= 1;
-
- for (; i < nr_waveartist_devs; i++)
- adev_info[i] = adev_info[i + 1];
- } else
- printk(KERN_WARNING "waveartist: can't find device "
- "to unload\n");
-}
-
-#ifdef CONFIG_ARCH_NETWINDER
-
-/*
- * Rebel.com Netwinder specifics...
- */
-
-#include <asm/hardware/dec21285.h>
-
-#define VNC_TIMER_PERIOD (HZ/4) //check slider 4 times/sec
-
-#define MIXER_PRIVATE3_RESET 0x53570000
-#define MIXER_PRIVATE3_READ 0x53570001
-#define MIXER_PRIVATE3_WRITE 0x53570002
-
-#define VNC_MUTE_INTERNAL_SPKR 0x01 //the sw mute on/off control bit
-#define VNC_MUTE_LINE_OUT 0x10
-#define VNC_PHONE_DETECT 0x20
-#define VNC_HANDSET_DETECT 0x40
-#define VNC_DISABLE_AUTOSWITCH 0x80
-
-static inline void
-vnc_mute_spkr(struct wavnc_info *devc)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&nw_gpio_lock, flags);
- nw_cpld_modify(CPLD_UNMUTE, devc->spkr_mute_state ? 0 : CPLD_UNMUTE);
- raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
-}
-
-static void
-vnc_mute_lout(struct wavnc_info *devc)
-{
- unsigned int left, right;
-
- left = waveartist_cmd1_r(devc, WACMD_GET_LEVEL);
- right = waveartist_cmd1_r(devc, WACMD_GET_LEVEL | 0x400);
-
- if (devc->line_mute_state) {
- left &= ~1;
- right &= ~1;
- } else {
- left |= 1;
- right |= 1;
- }
- waveartist_cmd3(devc, WACMD_SET_MIXER, left, right);
-
-}
-
-static int
-vnc_volume_slider(struct wavnc_info *devc)
-{
- static signed int old_slider_volume;
- unsigned long flags;
- signed int volume = 255;
-
- *CSR_TIMER1_LOAD = 0x00ffffff;
-
- spin_lock_irqsave(&waveartist_lock, flags);
-
- outb(0xFF, 0x201);
- *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV1;
-
- while (volume && (inb(0x201) & 0x01))
- volume--;
-
- *CSR_TIMER1_CNTL = 0;
-
- spin_unlock_irqrestore(&waveartist_lock,flags);
-
- volume = 0x00ffffff - *CSR_TIMER1_VALUE;
-
-
-#ifndef REVERSE
- volume = 150 - (volume >> 5);
-#else
- volume = (volume >> 6) - 25;
-#endif
-
- if (volume < 0)
- volume = 0;
-
- if (volume > 100)
- volume = 100;
-
- /*
- * slider quite often reads +-8, so debounce this random noise
- */
- if (abs(volume - old_slider_volume) > 7) {
- old_slider_volume = volume;
-
- if (debug_flg & DEBUG_MIXER)
- printk(KERN_DEBUG "Slider volume: %d.\n", volume);
- }
-
- return old_slider_volume;
-}
-
-/*
- * Decode a recording mask into a mixer selection on the NetWinder
- * as follows:
- *
- * OSS Source WA Source Actual source
- * SOUND_MASK_IMIX Mixer Mixer output (same as AD1848)
- * SOUND_MASK_LINE Line Line in
- * SOUND_MASK_LINE1 Left Mic Handset
- * SOUND_MASK_PHONEIN Left Aux Telephone microphone
- * SOUND_MASK_MIC Right Mic Builtin microphone
- */
-static unsigned int
-netwinder_select_input(struct wavnc_info *devc, unsigned int recmask,
- unsigned char *dev_l, unsigned char *dev_r)
-{
- unsigned int recdev_l = ADC_MUX_NONE, recdev_r = ADC_MUX_NONE;
-
- if (recmask & SOUND_MASK_IMIX) {
- recmask = SOUND_MASK_IMIX;
- recdev_l = ADC_MUX_MIXER;
- recdev_r = ADC_MUX_MIXER;
- } else if (recmask & SOUND_MASK_LINE) {
- recmask = SOUND_MASK_LINE;
- recdev_l = ADC_MUX_LINE;
- recdev_r = ADC_MUX_LINE;
- } else if (recmask & SOUND_MASK_LINE1) {
- recmask = SOUND_MASK_LINE1;
- waveartist_cmd1(devc, WACMD_SET_MONO); /* left */
- recdev_l = ADC_MUX_MIC;
- recdev_r = ADC_MUX_NONE;
- } else if (recmask & SOUND_MASK_PHONEIN) {
- recmask = SOUND_MASK_PHONEIN;
- waveartist_cmd1(devc, WACMD_SET_MONO); /* left */
- recdev_l = ADC_MUX_AUX1;
- recdev_r = ADC_MUX_NONE;
- } else if (recmask & SOUND_MASK_MIC) {
- recmask = SOUND_MASK_MIC;
- waveartist_cmd1(devc, WACMD_SET_MONO | 0x100); /* right */
- recdev_l = ADC_MUX_NONE;
- recdev_r = ADC_MUX_MIC;
- }
-
- *dev_l = recdev_l;
- *dev_r = recdev_r;
-
- return recmask;
-}
-
-static int
-netwinder_decode_mixer(struct wavnc_info *devc, int dev, unsigned char lev_l,
- unsigned char lev_r)
-{
- switch (dev) {
- case SOUND_MIXER_VOLUME:
- case SOUND_MIXER_SYNTH:
- case SOUND_MIXER_PCM:
- case SOUND_MIXER_LINE:
- case SOUND_MIXER_IGAIN:
- devc->levels[dev] = lev_l | lev_r << 8;
- break;
-
- case SOUND_MIXER_MIC: /* right mic only */
- devc->levels[SOUND_MIXER_MIC] &= 0xff;
- devc->levels[SOUND_MIXER_MIC] |= lev_l << 8;
- break;
-
- case SOUND_MIXER_LINE1: /* left mic only */
- devc->levels[SOUND_MIXER_MIC] &= 0xff00;
- devc->levels[SOUND_MIXER_MIC] |= lev_l;
- dev = SOUND_MIXER_MIC;
- break;
-
- case SOUND_MIXER_PHONEIN: /* left aux only */
- devc->levels[SOUND_MIXER_LINE1] = lev_l;
- dev = SOUND_MIXER_LINE1;
- break;
-
- case SOUND_MIXER_IMIX:
- case SOUND_MIXER_PHONEOUT:
- break;
-
- default:
- dev = -EINVAL;
- break;
- }
- return dev;
-}
-
-static int netwinder_get_mixer(struct wavnc_info *devc, int dev)
-{
- int levels;
-
- switch (dev) {
- case SOUND_MIXER_VOLUME:
- case SOUND_MIXER_SYNTH:
- case SOUND_MIXER_PCM:
- case SOUND_MIXER_LINE:
- case SOUND_MIXER_IGAIN:
- levels = devc->levels[dev];
- break;
-
- case SOUND_MIXER_MIC: /* builtin mic: right mic only */
- levels = devc->levels[SOUND_MIXER_MIC] >> 8;
- levels |= levels << 8;
- break;
-
- case SOUND_MIXER_LINE1: /* handset mic: left mic only */
- levels = devc->levels[SOUND_MIXER_MIC] & 0xff;
- levels |= levels << 8;
- break;
-
- case SOUND_MIXER_PHONEIN: /* phone mic: left aux1 only */
- levels = devc->levels[SOUND_MIXER_LINE1] & 0xff;
- levels |= levels << 8;
- break;
-
- default:
- levels = 0;
- }
-
- return levels;
-}
-
-/*
- * Waveartist specific mixer information.
- */
-static const struct waveartist_mixer_info netwinder_mixer = {
- .supported_devs = SOUND_MASK_VOLUME | SOUND_MASK_SYNTH |
- SOUND_MASK_PCM | SOUND_MASK_SPEAKER |
- SOUND_MASK_LINE | SOUND_MASK_MIC |
- SOUND_MASK_IMIX | SOUND_MASK_LINE1 |
- SOUND_MASK_PHONEIN | SOUND_MASK_PHONEOUT|
- SOUND_MASK_IGAIN,
-
- .recording_devs = SOUND_MASK_LINE | SOUND_MASK_MIC |
- SOUND_MASK_IMIX | SOUND_MASK_LINE1 |
- SOUND_MASK_PHONEIN,
-
- .stereo_devs = SOUND_MASK_VOLUME | SOUND_MASK_SYNTH |
- SOUND_MASK_PCM | SOUND_MASK_LINE |
- SOUND_MASK_IMIX | SOUND_MASK_IGAIN,
-
- .select_input = netwinder_select_input,
- .decode_mixer = netwinder_decode_mixer,
- .get_mixer = netwinder_get_mixer,
-};
-
-static void
-vnc_configure_mixer(struct wavnc_info *devc, unsigned int recmask)
-{
- if (!devc->no_autoselect) {
- if (devc->handset_detect) {
- recmask = SOUND_MASK_LINE1;
- devc->spkr_mute_state = devc->line_mute_state = 1;
- } else if (devc->telephone_detect) {
- recmask = SOUND_MASK_PHONEIN;
- devc->spkr_mute_state = devc->line_mute_state = 1;
- } else {
- /* unless someone has asked for LINE-IN,
- * we default to MIC
- */
- if ((devc->recmask & SOUND_MASK_LINE) == 0)
- devc->recmask = SOUND_MASK_MIC;
- devc->spkr_mute_state = devc->line_mute_state = 0;
- }
- vnc_mute_spkr(devc);
- vnc_mute_lout(devc);
-
- if (recmask != devc->recmask)
- waveartist_set_recmask(devc, recmask);
- }
-}
-
-static int
-vnc_slider(struct wavnc_info *devc)
-{
- signed int slider_volume;
- unsigned int temp, old_hs, old_td;
-
- /*
- * read the "buttons" state.
- * Bit 4 = 0 means handset present
- * Bit 5 = 1 means phone offhook
- */
- temp = inb(0x201);
-
- old_hs = devc->handset_detect;
- old_td = devc->telephone_detect;
-
- devc->handset_detect = !(temp & 0x10);
- devc->telephone_detect = !!(temp & 0x20);
-
- if (!devc->no_autoselect &&
- (old_hs != devc->handset_detect ||
- old_td != devc->telephone_detect))
- vnc_configure_mixer(devc, devc->recmask);
-
- slider_volume = vnc_volume_slider(devc);
-
- /*
- * If we're using software controlled volume, and
- * the slider moves by more than 20%, then we
- * switch back to slider controlled volume.
- */
- if (abs(devc->slider_vol - slider_volume) > 20)
- devc->use_slider = 1;
-
- /*
- * use only left channel
- */
- temp = levels[SOUND_MIXER_VOLUME] & 0xFF;
-
- if (slider_volume != temp && devc->use_slider) {
- devc->slider_vol = slider_volume;
-
- waveartist_set_mixer(devc, SOUND_MIXER_VOLUME,
- slider_volume | slider_volume << 8);
-
- return 1;
- }
-
- return 0;
-}
-
-static void
-vnc_slider_tick(unsigned long data)
-{
- int next_timeout;
-
- if (vnc_slider(adev_info + data))
- next_timeout = 5; // mixer reported change
- else
- next_timeout = VNC_TIMER_PERIOD;
-
- mod_timer(&vnc_timer, jiffies + next_timeout);
-}
-
-static int
-vnc_private_ioctl(int dev, unsigned int cmd, int __user * arg)
-{
- struct wavnc_info *devc = (struct wavnc_info *)audio_devs[dev]->devc;
- int val;
-
- switch (cmd) {
- case SOUND_MIXER_PRIVATE1:
- {
- u_int prev_spkr_mute, prev_line_mute, prev_auto_state;
- int val;
-
- if (get_user(val, arg))
- return -EFAULT;
-
- /* check if parameter is logical */
- if (val & ~(VNC_MUTE_INTERNAL_SPKR |
- VNC_MUTE_LINE_OUT |
- VNC_DISABLE_AUTOSWITCH))
- return -EINVAL;
-
- prev_auto_state = devc->no_autoselect;
- prev_spkr_mute = devc->spkr_mute_state;
- prev_line_mute = devc->line_mute_state;
-
- devc->no_autoselect = (val & VNC_DISABLE_AUTOSWITCH) ? 1 : 0;
- devc->spkr_mute_state = (val & VNC_MUTE_INTERNAL_SPKR) ? 1 : 0;
- devc->line_mute_state = (val & VNC_MUTE_LINE_OUT) ? 1 : 0;
-
- if (prev_spkr_mute != devc->spkr_mute_state)
- vnc_mute_spkr(devc);
-
- if (prev_line_mute != devc->line_mute_state)
- vnc_mute_lout(devc);
-
- if (prev_auto_state != devc->no_autoselect)
- vnc_configure_mixer(devc, devc->recmask);
-
- return 0;
- }
-
- case SOUND_MIXER_PRIVATE2:
- if (get_user(val, arg))
- return -EFAULT;
-
- switch (val) {
-#define VNC_SOUND_PAUSE 0x53 //to pause the DSP
-#define VNC_SOUND_RESUME 0x57 //to unpause the DSP
- case VNC_SOUND_PAUSE:
- waveartist_cmd1(devc, 0x16);
- break;
-
- case VNC_SOUND_RESUME:
- waveartist_cmd1(devc, 0x18);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-
- /* private ioctl to allow bulk access to waveartist */
- case SOUND_MIXER_PRIVATE3:
- {
- unsigned long flags;
- int mixer_reg[15], i, val;
-
- if (get_user(val, arg))
- return -EFAULT;
- if (copy_from_user(mixer_reg, (void *)val, sizeof(mixer_reg)))
- return -EFAULT;
-
- switch (mixer_reg[14]) {
- case MIXER_PRIVATE3_RESET:
- waveartist_mixer_reset(devc);
- break;
-
- case MIXER_PRIVATE3_WRITE:
- waveartist_cmd3(devc, WACMD_SET_MIXER, mixer_reg[0], mixer_reg[4]);
- waveartist_cmd3(devc, WACMD_SET_MIXER, mixer_reg[1], mixer_reg[5]);
- waveartist_cmd3(devc, WACMD_SET_MIXER, mixer_reg[2], mixer_reg[6]);
- waveartist_cmd3(devc, WACMD_SET_MIXER, mixer_reg[3], mixer_reg[7]);
- waveartist_cmd3(devc, WACMD_SET_MIXER, mixer_reg[8], mixer_reg[9]);
-
- waveartist_cmd3(devc, WACMD_SET_LEVEL, mixer_reg[10], mixer_reg[11]);
- waveartist_cmd3(devc, WACMD_SET_LEVEL, mixer_reg[12], mixer_reg[13]);
- break;
-
- case MIXER_PRIVATE3_READ:
- spin_lock_irqsave(&waveartist_lock, flags);
-
- for (i = 0x30; i < 14 << 8; i += 1 << 8)
- waveartist_cmd(devc, 1, &i, 1, mixer_reg + (i >> 8));
-
- spin_unlock_irqrestore(&waveartist_lock, flags);
-
- if (copy_to_user((void *)val, mixer_reg, sizeof(mixer_reg)))
- return -EFAULT;
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
- }
-
- /* read back the state from PRIVATE1 */
- case SOUND_MIXER_PRIVATE4:
- val = (devc->spkr_mute_state ? VNC_MUTE_INTERNAL_SPKR : 0) |
- (devc->line_mute_state ? VNC_MUTE_LINE_OUT : 0) |
- (devc->handset_detect ? VNC_HANDSET_DETECT : 0) |
- (devc->telephone_detect ? VNC_PHONE_DETECT : 0) |
- (devc->no_autoselect ? VNC_DISABLE_AUTOSWITCH : 0);
-
- return put_user(val, arg) ? -EFAULT : 0;
- }
-
- if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
- /*
- * special case for master volume: if we
- * received this call - switch from hw
- * volume control to a software volume
- * control, till the hw volume is modified
- * to signal that user wants to be back in
- * hardware...
- */
- if ((cmd & 0xff) == SOUND_MIXER_VOLUME)
- devc->use_slider = 0;
-
- /* speaker output */
- if ((cmd & 0xff) == SOUND_MIXER_SPEAKER) {
- unsigned int val, l, r;
-
- if (get_user(val, arg))
- return -EFAULT;
-
- l = val & 0x7f;
- r = (val & 0x7f00) >> 8;
- val = (l + r) / 2;
- devc->levels[SOUND_MIXER_SPEAKER] = val | (val << 8);
- devc->spkr_mute_state = (val <= 50);
- vnc_mute_spkr(devc);
- return 0;
- }
- }
-
- return -ENOIOCTLCMD;
-}
-
-#endif
-
-static struct address_info cfg;
-
-static int attached;
-
-static int __initdata io = 0;
-static int __initdata irq = 0;
-static int __initdata dma = 0;
-static int __initdata dma2 = 0;
-
-
-static int __init init_waveartist(void)
-{
- const struct waveartist_mixer_info *mix;
-
- if (!io && machine_is_netwinder()) {
- /*
- * The NetWinder WaveArtist is at a fixed address.
- * If the user does not supply an address, use the
- * well-known parameters.
- */
- io = 0x250;
- irq = 12;
- dma = 3;
- dma2 = 7;
- }
-
- mix = &waveartist_mixer;
-#ifdef CONFIG_ARCH_NETWINDER
- if (machine_is_netwinder())
- mix = &netwinder_mixer;
-#endif
-
- cfg.io_base = io;
- cfg.irq = irq;
- cfg.dma = dma;
- cfg.dma2 = dma2;
-
- if (!probe_waveartist(&cfg))
- return -ENODEV;
-
- attach_waveartist(&cfg, mix);
- attached = 1;
-
- return 0;
-}
-
-static void __exit cleanup_waveartist(void)
-{
- if (attached)
- unload_waveartist(&cfg);
-}
-
-module_init(init_waveartist);
-module_exit(cleanup_waveartist);
-
-#ifndef MODULE
-static int __init setup_waveartist(char *str)
-{
- /* io, irq, dma, dma2 */
- int ints[5];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
- dma = ints[3];
- dma2 = ints[4];
-
- return 1;
-}
-__setup("waveartist=", setup_waveartist);
-#endif
-
-MODULE_DESCRIPTION("Rockwell WaveArtist RWA-010 sound driver");
-module_param_hw(io, int, ioport, 0); /* IO base */
-module_param_hw(irq, int, irq, 0); /* IRQ */
-module_param_hw(dma, int, dma, 0); /* DMA */
-module_param_hw(dma2, int, dma, 0); /* DMA2 */
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/waveartist.h b/sound/oss/waveartist.h
deleted file mode 100644
index f18d74b26483..000000000000
--- a/sound/oss/waveartist.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/sound/oss/waveartist.h
- *
- * def file for Rockwell RWA010 chip set, as installed in Rebel.com NetWinder
- */
-
-//registers
-#define CMDR 0
-#define DATR 2
-#define CTLR 4
-#define STATR 5
-#define IRQSTAT 12
-
-//bit defs
-//reg STATR
-#define CMD_WE 0x80
-#define CMD_RF 0x40
-#define DAT_WE 0x20
-#define DAT_RF 0x10
-
-#define IRQ_REQ 0x08
-#define DMA1 0x04
-#define DMA0 0x02
-
-//bit defs
-//reg CTLR
-#define CMD_WEIE 0x80
-#define CMD_RFIE 0x40
-#define DAT_WEIE 0x20
-#define DAT_RFIE 0x10
-
-#define RESET 0x08
-#define DMA1_IE 0x04
-#define DMA0_IE 0x02
-#define IRQ_ACK 0x01
-
-//commands
-
-#define WACMD_SYSTEMID 0x00
-#define WACMD_GETREV 0x00
-#define WACMD_INPUTFORMAT 0x10 //0-8S, 1-16S, 2-8U
-#define WACMD_INPUTCHANNELS 0x11 //1-Mono, 2-Stereo
-#define WACMD_INPUTSPEED 0x12 //sampling rate
-#define WACMD_INPUTDMA 0x13 //0-8bit, 1-16bit, 2-PIO
-#define WACMD_INPUTSIZE 0x14 //samples to interrupt
-#define WACMD_INPUTSTART 0x15 //start ADC
-#define WACMD_INPUTPAUSE 0x16 //pause ADC
-#define WACMD_INPUTSTOP 0x17 //stop ADC
-#define WACMD_INPUTRESUME 0x18 //resume ADC
-#define WACMD_INPUTPIO 0x19 //PIO ADC
-
-#define WACMD_OUTPUTFORMAT 0x20 //0-8S, 1-16S, 2-8U
-#define WACMD_OUTPUTCHANNELS 0x21 //1-Mono, 2-Stereo
-#define WACMD_OUTPUTSPEED 0x22 //sampling rate
-#define WACMD_OUTPUTDMA 0x23 //0-8bit, 1-16bit, 2-PIO
-#define WACMD_OUTPUTSIZE 0x24 //samples to interrupt
-#define WACMD_OUTPUTSTART 0x25 //start ADC
-#define WACMD_OUTPUTPAUSE 0x26 //pause ADC
-#define WACMD_OUTPUTSTOP 0x27 //stop ADC
-#define WACMD_OUTPUTRESUME 0x28 //resume ADC
-#define WACMD_OUTPUTPIO 0x29 //PIO ADC
-
-#define WACMD_GET_LEVEL 0x30
-#define WACMD_SET_LEVEL 0x31
-#define WACMD_SET_MIXER 0x32
-#define WACMD_RST_MIXER 0x33
-#define WACMD_SET_MONO 0x34
-
-/*
- * Definitions for left/right recording input mux
- */
-#define ADC_MUX_NONE 0
-#define ADC_MUX_MIXER 1
-#define ADC_MUX_LINE 2
-#define ADC_MUX_AUX2 3
-#define ADC_MUX_AUX1 4
-#define ADC_MUX_MIC 5
-
-/*
- * Definitions for mixer gain settings
- */
-#define MIX_GAIN_LINE 0 /* line in */
-#define MIX_GAIN_AUX1 1 /* aux1 */
-#define MIX_GAIN_AUX2 2 /* aux2 */
-#define MIX_GAIN_XMIC 3 /* crossover mic */
-#define MIX_GAIN_MIC 4 /* normal mic */
-#define MIX_GAIN_PREMIC 5 /* preamp mic */
-#define MIX_GAIN_OUT 6 /* output */
-#define MIX_GAIN_MONO 7 /* mono in */
-
-int wa_sendcmd(unsigned int cmd);
-int wa_writecmd(unsigned int cmd, unsigned int arg);
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 70d023a85bf5..720361455c60 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -573,10 +573,8 @@ static void snd_card_asihpi_pcm_int_start(struct snd_pcm_substream *substream)
static void snd_card_asihpi_pcm_int_stop(struct snd_pcm_substream *substream)
{
- struct snd_card_asihpi_pcm *dpcm;
struct snd_card_asihpi *card;
- dpcm = (struct snd_card_asihpi_pcm *)substream->runtime->private_data;
card = snd_pcm_substream_chip(substream);
hpi_handle_error(hpi_adapter_set_property(card->hpi->adapter->index,
@@ -749,9 +747,9 @@ static inline unsigned int modulo_min(unsigned int a, unsigned int b,
/** Timer function, equivalent to interrupt service routine for cards
*/
-static void snd_card_asihpi_timer_function(unsigned long data)
+static void snd_card_asihpi_timer_function(struct timer_list *t)
{
- struct snd_card_asihpi_pcm *dpcm = (struct snd_card_asihpi_pcm *)data;
+ struct snd_card_asihpi_pcm *dpcm = from_timer(dpcm, t, timer);
struct snd_pcm_substream *substream = dpcm->substream;
struct snd_card_asihpi *card = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime;
@@ -863,7 +861,6 @@ static void snd_card_asihpi_timer_function(unsigned long data)
snd_pcm_group_for_each_entry(s, substream) {
struct snd_card_asihpi_pcm *ds = s->runtime->private_data;
- runtime = s->runtime;
/* don't link Cap and Play */
if (substream->stream != s->stream)
@@ -948,7 +945,7 @@ static void snd_card_asihpi_int_task(unsigned long data)
asihpi = (struct snd_card_asihpi *)a->snd_card->private_data;
if (asihpi->llmode_streampriv)
snd_card_asihpi_timer_function(
- (unsigned long)asihpi->llmode_streampriv);
+ &asihpi->llmode_streampriv->timer);
}
static void snd_card_asihpi_isr(struct hpi_adapter *a)
@@ -1059,8 +1056,7 @@ static int snd_card_asihpi_playback_open(struct snd_pcm_substream *substream)
If internal and other stream playing, can't switch
*/
- setup_timer(&dpcm->timer, snd_card_asihpi_timer_function,
- (unsigned long) dpcm);
+ timer_setup(&dpcm->timer, snd_card_asihpi_timer_function, 0);
dpcm->substream = substream;
runtime->private_data = dpcm;
runtime->private_free = snd_card_asihpi_runtime_free;
@@ -1240,8 +1236,7 @@ static int snd_card_asihpi_capture_open(struct snd_pcm_substream *substream)
if (err)
return -EIO;
- setup_timer(&dpcm->timer, snd_card_asihpi_timer_function,
- (unsigned long) dpcm);
+ timer_setup(&dpcm->timer, snd_card_asihpi_timer_function, 0);
dpcm->substream = substream;
runtime->private_data = dpcm;
runtime->private_free = snd_card_asihpi_runtime_free;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index c308a4f70550..4083c8b01619 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2628,7 +2628,7 @@ static void vortex_spdif_init(vortex_t * vortex, int spdif_sr, int spdif_mode)
else
edi = 0x1ffff;
} else {
- i = edi = 0x800;
+ edi = 0x800;
}
/* this_04 and this_08 are the CASp4Src's (samplerate converters) */
vortex_src_setupchannel(vortex, this_04, edi, 0, 1,
diff --git a/sound/pci/ctxfi/cttimer.c b/sound/pci/ctxfi/cttimer.c
index 8f945341720b..08e874e9a7f6 100644
--- a/sound/pci/ctxfi/cttimer.c
+++ b/sound/pci/ctxfi/cttimer.c
@@ -63,9 +63,9 @@ struct ct_timer {
* system-timer-based updates
*/
-static void ct_systimer_callback(unsigned long data)
+static void ct_systimer_callback(struct timer_list *t)
{
- struct ct_timer_instance *ti = (struct ct_timer_instance *)data;
+ struct ct_timer_instance *ti = from_timer(ti, t, timer);
struct snd_pcm_substream *substream = ti->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct ct_atc_pcm *apcm = ti->apcm;
@@ -93,8 +93,7 @@ static void ct_systimer_callback(unsigned long data)
static void ct_systimer_init(struct ct_timer_instance *ti)
{
- setup_timer(&ti->timer, ct_systimer_callback,
- (unsigned long)ti);
+ timer_setup(&ti->timer, ct_systimer_callback, 0);
}
static void ct_systimer_start(struct ct_timer_instance *ti)
diff --git a/sound/pci/echoaudio/midi.c b/sound/pci/echoaudio/midi.c
index 8c685ddb1a41..6045a115cffe 100644
--- a/sound/pci/echoaudio/midi.c
+++ b/sound/pci/echoaudio/midi.c
@@ -199,9 +199,9 @@ static int snd_echo_midi_output_open(struct snd_rawmidi_substream *substream)
-static void snd_echo_midi_output_write(unsigned long data)
+static void snd_echo_midi_output_write(struct timer_list *t)
{
- struct echoaudio *chip = (struct echoaudio *)data;
+ struct echoaudio *chip = from_timer(chip, t, timer);
unsigned long flags;
int bytes, sent, time;
unsigned char buf[MIDI_OUT_BUFFER_SIZE - 1];
@@ -257,8 +257,8 @@ static void snd_echo_midi_output_trigger(struct snd_rawmidi_substream *substream
spin_lock_irq(&chip->lock);
if (up) {
if (!chip->tinuse) {
- setup_timer(&chip->timer, snd_echo_midi_output_write,
- (unsigned long)chip);
+ timer_setup(&chip->timer, snd_echo_midi_output_write,
+ 0);
chip->tinuse = 1;
}
} else {
@@ -273,7 +273,7 @@ static void snd_echo_midi_output_trigger(struct snd_rawmidi_substream *substream
spin_unlock_irq(&chip->lock);
if (up && !chip->midi_full)
- snd_echo_midi_output_write((unsigned long)chip);
+ snd_echo_midi_output_write(&chip->timer);
}
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index cf05229b569b..bde0d1954f56 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -280,7 +280,6 @@ static void snd_emu10k1_proc_rates_read(struct snd_info_entry *entry,
struct snd_emu10k1 *emu = entry->private_data;
unsigned int val, tmp, n;
val = snd_emu10k1_ptr20_read(emu, CAPTURE_RATE_STATUS, 0);
- tmp = (val >> 16) & 0x8;
for (n = 0; n < 4; n++) {
tmp = val >> (16 + (n*4));
if (tmp & 0x8) snd_iprintf(buffer, "Channel %d: Rate=%d\n", n, samplerate[tmp & 0x7]);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index d4cd6451fdca..39f79a6b5283 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -732,7 +732,7 @@ static void snd_es1371_codec_wait(struct snd_ac97 *ac97)
static void snd_es1371_adc_rate(struct ensoniq * ensoniq, unsigned int rate)
{
- unsigned int n, truncm, freq, result;
+ unsigned int n, truncm, freq;
mutex_lock(&ensoniq->src_mutex);
n = rate / 3000;
@@ -740,7 +740,6 @@ static void snd_es1371_adc_rate(struct ensoniq * ensoniq, unsigned int rate)
n--;
truncm = (21 * n - 1) | 1;
freq = ((48000UL << 15) / rate) * n;
- result = (48000UL << 15) / (freq / n);
if (rate >= 24000) {
if (truncm > 239)
truncm = 239;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a0989d231fd0..c1f8e5479bf3 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -977,7 +977,7 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec)
hda_nid_t fg;
int err;
- err = snd_hdac_refresh_widget_sysfs(&codec->core);
+ err = snd_hdac_refresh_widgets(&codec->core, true);
if (err < 0)
return err;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 28e265a88383..5cc65093d941 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -795,6 +795,8 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
hda_nid_t nid = path->path[i];
nums = snd_hda_get_conn_list(codec, nid, &conn);
+ if (nums < 0)
+ return;
type = get_wcaps_type(get_wcaps(codec, nid));
if (type == AC_WID_PIN ||
(type == AC_WID_AUD_IN && codec->single_adc_amp)) {
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 3e73d5c6ccfc..768ea8651993 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -27,6 +27,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/firmware.h>
+#include <linux/kernel.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -3605,8 +3606,7 @@ static int ca0132_vnode_switch_set(struct snd_kcontrol *kcontrol,
static int ca0132_voicefx_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- unsigned int items = sizeof(ca0132_voicefx_presets)
- / sizeof(struct ct_voicefx_preset);
+ unsigned int items = ARRAY_SIZE(ca0132_voicefx_presets);
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
@@ -3635,10 +3635,8 @@ static int ca0132_voicefx_put(struct snd_kcontrol *kcontrol,
struct ca0132_spec *spec = codec->spec;
int i, err = 0;
int sel = ucontrol->value.enumerated.item[0];
- unsigned int items = sizeof(ca0132_voicefx_presets)
- / sizeof(struct ct_voicefx_preset);
- if (sel >= items)
+ if (sel >= ARRAY_SIZE(ca0132_voicefx_presets))
return 0;
codec_dbg(codec, "ca0132_voicefx_put: sel=%d, preset=%s\n",
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dce0682c5001..db1a376e27c0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1803,6 +1803,7 @@ enum {
ALC887_FIXUP_ASUS_BASS,
ALC887_FIXUP_BASS_CHMAP,
ALC1220_FIXUP_GB_DUAL_CODECS,
+ ALC1220_FIXUP_CLEVO_P950,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2020,6 +2021,23 @@ static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec,
}
}
+static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ hda_nid_t conn1[1] = { 0x0c };
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ alc_update_coef_idx(codec, 0x7, 0, 0x3c3);
+ /* We therefore want to make sure 0x14 (front headphone) and
+ * 0x1b (speakers) use the stereo DAC 0x02
+ */
+ snd_hda_override_conn_list(codec, 0x14, 1, conn1);
+ snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
+}
+
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = HDA_FIXUP_PINS,
@@ -2260,6 +2278,10 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_gb_dual_codecs,
},
+ [ALC1220_FIXUP_CLEVO_P950] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc1220_fixup_clevo_p950,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2333,6 +2355,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -6366,6 +6389,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
{.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
+ {.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 0e66afa403a3..f1fe497c2f9d 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -2285,7 +2285,7 @@ static unsigned char snd_ice1712_read_i2c(struct snd_ice1712 *ice,
static int snd_ice1712_read_eeprom(struct snd_ice1712 *ice,
const char *modelname)
{
- int dev = 0xa0; /* EEPROM device address */
+ int dev = ICE_I2C_EEPROM_ADDR; /* I2C EEPROM device address */
unsigned int i, size;
struct snd_ice1712_card_info * const *tbl, *c;
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index 5cfba09c9761..8ae8742662a5 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -218,8 +218,9 @@
/*
- *
+ * I2C EEPROM Address
*/
+#define ICE_I2C_EEPROM_ADDR 0xA0
struct snd_ice1712;
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 04cd71c74e5c..c7b007164c99 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -599,9 +599,9 @@ static void snd_korg1212_SendStopAndWait(struct snd_korg1212 *korg1212)
}
/* timer callback for checking the ack of stop request */
-static void snd_korg1212_timer_func(unsigned long data)
+static void snd_korg1212_timer_func(struct timer_list *t)
{
- struct snd_korg1212 *korg1212 = (struct snd_korg1212 *) data;
+ struct snd_korg1212 *korg1212 = from_timer(korg1212, t, timer);
unsigned long flags;
spin_lock_irqsave(&korg1212->lock, flags);
@@ -2189,8 +2189,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
init_waitqueue_head(&korg1212->wait);
spin_lock_init(&korg1212->lock);
mutex_init(&korg1212->open_mutex);
- setup_timer(&korg1212->timer, snd_korg1212_timer_func,
- (unsigned long)korg1212);
+ timer_setup(&korg1212->timer, snd_korg1212_timer_func, 0);
korg1212->irq = -1;
korg1212->clkSource = K1212_CLKIDX_Local;
diff --git a/sound/pci/oxygen/xonar_dg.h b/sound/pci/oxygen/xonar_dg.h
index 7a1e8f9c48e7..24d97721c247 100644
--- a/sound/pci/oxygen/xonar_dg.h
+++ b/sound/pci/oxygen/xonar_dg.h
@@ -52,6 +52,6 @@ void dg_suspend(struct oxygen *chip);
void dg_resume(struct oxygen *chip);
void dg_cleanup(struct oxygen *chip);
-extern struct oxygen_model model_xonar_dg;
+extern const struct oxygen_model model_xonar_dg;
#endif
diff --git a/sound/pci/oxygen/xonar_dg_mixer.c b/sound/pci/oxygen/xonar_dg_mixer.c
index b885dac28a09..d22fbe8aebd0 100644
--- a/sound/pci/oxygen/xonar_dg_mixer.c
+++ b/sound/pci/oxygen/xonar_dg_mixer.c
@@ -449,7 +449,7 @@ static int dg_mixer_init(struct oxygen *chip)
return 0;
}
-struct oxygen_model model_xonar_dg = {
+const struct oxygen_model model_xonar_dg = {
.longname = "C-Media Oxygen HD Audio",
.chip = "CMI8786",
.init = dg_init,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 9f0f73875f01..1bff4b1b39cd 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -1410,9 +1410,9 @@ static void snd_hdsp_midi_input_trigger(struct snd_rawmidi_substream *substream,
spin_unlock_irqrestore (&hdsp->lock, flags);
}
-static void snd_hdsp_midi_output_timer(unsigned long data)
+static void snd_hdsp_midi_output_timer(struct timer_list *t)
{
- struct hdsp_midi *hmidi = (struct hdsp_midi *) data;
+ struct hdsp_midi *hmidi = from_timer(hmidi, t, timer);
unsigned long flags;
snd_hdsp_midi_output_write(hmidi);
@@ -1439,8 +1439,8 @@ static void snd_hdsp_midi_output_trigger(struct snd_rawmidi_substream *substream
spin_lock_irqsave (&hmidi->lock, flags);
if (up) {
if (!hmidi->istimer) {
- setup_timer(&hmidi->timer, snd_hdsp_midi_output_timer,
- (unsigned long) hmidi);
+ timer_setup(&hmidi->timer, snd_hdsp_midi_output_timer,
+ 0);
mod_timer(&hmidi->timer, 1 + jiffies);
hmidi->istimer++;
}
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index f20d42714e4d..4c59983158e0 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1946,9 +1946,9 @@ snd_hdspm_midi_input_trigger(struct snd_rawmidi_substream *substream, int up)
spin_unlock_irqrestore (&hdspm->lock, flags);
}
-static void snd_hdspm_midi_output_timer(unsigned long data)
+static void snd_hdspm_midi_output_timer(struct timer_list *t)
{
- struct hdspm_midi *hmidi = (struct hdspm_midi *) data;
+ struct hdspm_midi *hmidi = from_timer(hmidi, t, timer);
unsigned long flags;
snd_hdspm_midi_output_write(hmidi);
@@ -1976,8 +1976,8 @@ snd_hdspm_midi_output_trigger(struct snd_rawmidi_substream *substream, int up)
spin_lock_irqsave (&hmidi->lock, flags);
if (up) {
if (!hmidi->istimer) {
- setup_timer(&hmidi->timer, snd_hdspm_midi_output_timer,
- (unsigned long) hmidi);
+ timer_setup(&hmidi->timer,
+ snd_hdspm_midi_output_timer, 0);
mod_timer(&hmidi->timer, 1 + jiffies);
hmidi->istimer++;
}
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index fdc680ae8aa0..2b26311405a4 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -299,14 +299,14 @@ static void run_spu_dma(struct work_struct *work)
}
}
-static void aica_period_elapsed(unsigned long timer_var)
+static void aica_period_elapsed(struct timer_list *t)
{
+ struct snd_card_aica *dreamcastcard = from_timer(dreamcastcard,
+ t, timer);
+ struct snd_pcm_substream *substream = dreamcastcard->timer_substream;
/*timer function - so cannot sleep */
int play_period;
struct snd_pcm_runtime *runtime;
- struct snd_pcm_substream *substream;
- struct snd_card_aica *dreamcastcard;
- substream = (struct snd_pcm_substream *) timer_var;
runtime = substream->runtime;
dreamcastcard = substream->pcm->private_data;
/* Have we played out an additional period? */
@@ -336,12 +336,12 @@ static void spu_begin_dma(struct snd_pcm_substream *substream)
/*get the queue to do the work */
schedule_work(&(dreamcastcard->spu_dma_work));
/* Timer may already be running */
- if (unlikely(dreamcastcard->timer.data)) {
+ if (unlikely(dreamcastcard->timer_substream)) {
mod_timer(&dreamcastcard->timer, jiffies + 4);
return;
}
- setup_timer(&dreamcastcard->timer, aica_period_elapsed,
- (unsigned long) substream);
+ timer_setup(&dreamcastcard->timer, aica_period_elapsed, 0);
+ dreamcastcard->timer_substream = substream;
mod_timer(&dreamcastcard->timer, jiffies + 4);
}
@@ -379,7 +379,7 @@ static int snd_aicapcm_pcm_close(struct snd_pcm_substream
{
struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
flush_work(&(dreamcastcard->spu_dma_work));
- if (dreamcastcard->timer.data)
+ if (dreamcastcard->timer_substream)
del_timer(&dreamcastcard->timer);
kfree(dreamcastcard->channel);
spu_disable();
@@ -600,7 +600,7 @@ static int snd_aica_probe(struct platform_device *devptr)
{
int err;
struct snd_card_aica *dreamcastcard;
- dreamcastcard = kmalloc(sizeof(struct snd_card_aica), GFP_KERNEL);
+ dreamcastcard = kzalloc(sizeof(struct snd_card_aica), GFP_KERNEL);
if (unlikely(!dreamcastcard))
return -ENOMEM;
err = snd_card_new(&devptr->dev, index, SND_AICA_DRIVER,
@@ -619,8 +619,6 @@ static int snd_aica_probe(struct platform_device *devptr)
err = snd_aicapcmchip(dreamcastcard, 0);
if (unlikely(err < 0))
goto freedreamcast;
- dreamcastcard->timer.data = 0;
- dreamcastcard->channel = NULL;
/* Add basic controls */
err = add_aicamixer_controls(dreamcastcard);
if (unlikely(err < 0))
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index c0abad2067e1..d22758165496 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -36,6 +36,9 @@ config SND_SOC_COMPRESS
config SND_SOC_TOPOLOGY
bool
+config SND_SOC_ACPI
+ tristate
+
# All the supported SoCs
source "sound/soc/adi/Kconfig"
source "sound/soc/amd/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index bf8c1e2ce0bf..5327f4d6c668 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -15,6 +15,12 @@ ifneq ($(CONFIG_SND_SOC_AC97_BUS),)
snd-soc-core-objs += soc-ac97.o
endif
+ifneq ($(CONFIG_SND_SOC_ACPI),)
+snd-soc-acpi-objs := soc-acpi.o
+endif
+
+obj-$(CONFIG_SND_SOC_ACPI) += snd-soc-acpi.o
+
obj-$(CONFIG_SND_SOC) += snd-soc-core.o
obj-$(CONFIG_SND_SOC) += codecs/
obj-$(CONFIG_SND_SOC) += generic/
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index 78187eb24f56..d5838402f667 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -2,3 +2,10 @@ config SND_SOC_AMD_ACP
tristate "AMD Audio Coprocessor support"
help
This option enables ACP DMA support on AMD platform.
+
+config SND_SOC_AMD_CZ_RT5645_MACH
+ tristate "AMD CZ support for RT5645"
+ select SND_SOC_RT5645
+ depends on SND_SOC_AMD_ACP && I2C
+ help
+ This option enables machine driver for rt5645.
diff --git a/sound/soc/amd/Makefile b/sound/soc/amd/Makefile
index 1a66ec0366b2..f07fd2e2870a 100644
--- a/sound/soc/amd/Makefile
+++ b/sound/soc/amd/Makefile
@@ -1,3 +1,5 @@
-snd-soc-acp-pcm-objs := acp-pcm-dma.o
+acp_audio_dma-objs := acp-pcm-dma.o
+snd-soc-acp-rt5645-mach-objs := acp-rt5645.o
-obj-$(CONFIG_SND_SOC_AMD_ACP) += snd-soc-acp-pcm.o
+obj-$(CONFIG_SND_SOC_AMD_ACP) += acp_audio_dma.o
+obj-$(CONFIG_SND_SOC_AMD_CZ_RT5645_MACH) += snd-soc-acp-rt5645-mach.o
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 08b1399d1da2..9f521a55d610 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -20,7 +20,7 @@
#include <linux/pm_runtime.h>
#include <sound/soc.h>
-
+#include <drm/amd_asic_type.h>
#include "acp.h"
#define PLAYBACK_MIN_NUM_PERIODS 2
@@ -35,6 +35,13 @@
#define MAX_BUFFER (PLAYBACK_MAX_PERIOD_SIZE * PLAYBACK_MAX_NUM_PERIODS)
#define MIN_BUFFER MAX_BUFFER
+#define ST_PLAYBACK_MAX_PERIOD_SIZE 8192
+#define ST_CAPTURE_MAX_PERIOD_SIZE ST_PLAYBACK_MAX_PERIOD_SIZE
+#define ST_MAX_BUFFER (ST_PLAYBACK_MAX_PERIOD_SIZE * PLAYBACK_MAX_NUM_PERIODS)
+#define ST_MIN_BUFFER ST_MAX_BUFFER
+
+#define DRV_NAME "acp_audio_dma"
+
static const struct snd_pcm_hardware acp_pcm_hardware_playback = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP |
@@ -73,10 +80,42 @@ static const struct snd_pcm_hardware acp_pcm_hardware_capture = {
.periods_max = CAPTURE_MAX_NUM_PERIODS,
};
-struct audio_drv_data {
- struct snd_pcm_substream *play_stream;
- struct snd_pcm_substream *capture_stream;
- void __iomem *acp_mmio;
+static const struct snd_pcm_hardware acp_st_pcm_hardware_playback = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ .buffer_bytes_max = ST_MAX_BUFFER,
+ .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE,
+ .period_bytes_max = ST_PLAYBACK_MAX_PERIOD_SIZE,
+ .periods_min = PLAYBACK_MIN_NUM_PERIODS,
+ .periods_max = PLAYBACK_MAX_NUM_PERIODS,
+};
+
+static const struct snd_pcm_hardware acp_st_pcm_hardware_capture = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .buffer_bytes_max = ST_MAX_BUFFER,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = ST_CAPTURE_MAX_PERIOD_SIZE,
+ .periods_min = CAPTURE_MIN_NUM_PERIODS,
+ .periods_max = CAPTURE_MAX_NUM_PERIODS,
};
static u32 acp_reg_read(void __iomem *acp_mmio, u32 reg)
@@ -143,8 +182,8 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio,
* system memory <-> ACP SRAM
*/
static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
- u32 size, int direction,
- u32 pte_offset)
+ u32 size, int direction,
+ u32 pte_offset, u32 asic_type)
{
u16 i;
u16 dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH12;
@@ -154,24 +193,46 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
dmadscr[i].xfer_val = 0;
if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH12 + i;
- dmadscr[i].dest = ACP_SHARED_RAM_BANK_1_ADDRESS +
- (size / 2) - (i * (size/2));
+ dmadscr[i].dest = ACP_SHARED_RAM_BANK_1_ADDRESS
+ + (i * (size/2));
dmadscr[i].src = ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS
+ (pte_offset * SZ_4K) + (i * (size/2));
- dmadscr[i].xfer_val |=
- (ACP_DMA_ATTRIBUTES_DAGB_ONION_TO_SHAREDMEM << 16) |
- (size / 2);
+ switch (asic_type) {
+ case CHIP_STONEY:
+ dmadscr[i].xfer_val |=
+ (ACP_DMA_ATTRIBUTES_DAGB_GARLIC_TO_SHAREDMEM << 16) |
+ (size / 2);
+ break;
+ default:
+ dmadscr[i].xfer_val |=
+ (ACP_DMA_ATTRIBUTES_DAGB_ONION_TO_SHAREDMEM << 16) |
+ (size / 2);
+ }
} else {
dma_dscr_idx = CAPTURE_START_DMA_DESCR_CH14 + i;
- dmadscr[i].src = ACP_SHARED_RAM_BANK_5_ADDRESS +
- (i * (size/2));
- dmadscr[i].dest = ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS
- + (pte_offset * SZ_4K) +
- (i * (size/2));
- dmadscr[i].xfer_val |=
- BIT(22) |
- (ACP_DMA_ATTRIBUTES_SHAREDMEM_TO_DAGB_ONION << 16) |
- (size / 2);
+ switch (asic_type) {
+ case CHIP_STONEY:
+ dmadscr[i].src = ACP_SHARED_RAM_BANK_3_ADDRESS +
+ (i * (size/2));
+ dmadscr[i].dest =
+ ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS +
+ (pte_offset * SZ_4K) + (i * (size/2));
+ dmadscr[i].xfer_val |=
+ BIT(22) |
+ (ACP_DMA_ATTRIBUTES_SHARED_MEM_TO_DAGB_GARLIC << 16) |
+ (size / 2);
+ break;
+ default:
+ dmadscr[i].src = ACP_SHARED_RAM_BANK_5_ADDRESS +
+ (i * (size/2));
+ dmadscr[i].dest =
+ ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS +
+ (pte_offset * SZ_4K) + (i * (size/2));
+ dmadscr[i].xfer_val |=
+ BIT(22) |
+ (ACP_DMA_ATTRIBUTES_SHAREDMEM_TO_DAGB_ONION << 16) |
+ (size / 2);
+ }
}
config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
&dmadscr[i]);
@@ -192,7 +253,8 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
* ACP SRAM <-> I2S
*/
static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio,
- u32 size, int direction)
+ u32 size, int direction,
+ u32 asic_type)
{
u16 i;
@@ -213,8 +275,17 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio,
dma_dscr_idx = CAPTURE_START_DMA_DESCR_CH15 + i;
/* dmadscr[i].src is unused by hardware. */
dmadscr[i].src = 0;
- dmadscr[i].dest = ACP_SHARED_RAM_BANK_5_ADDRESS +
+ switch (asic_type) {
+ case CHIP_STONEY:
+ dmadscr[i].dest =
+ ACP_SHARED_RAM_BANK_3_ADDRESS +
(i * (size / 2));
+ break;
+ default:
+ dmadscr[i].dest =
+ ACP_SHARED_RAM_BANK_5_ADDRESS +
+ (i * (size / 2));
+ }
dmadscr[i].xfer_val |= BIT(22) |
(FROM_ACP_I2S_1 << 16) | (size / 2);
}
@@ -270,7 +341,8 @@ static void acp_pte_config(void __iomem *acp_mmio, struct page *pg,
}
static void config_acp_dma(void __iomem *acp_mmio,
- struct audio_substream_data *audio_config)
+ struct audio_substream_data *audio_config,
+ u32 asic_type)
{
u32 pte_offset;
@@ -284,11 +356,11 @@ static void config_acp_dma(void __iomem *acp_mmio,
/* Configure System memory <-> ACP SRAM DMA descriptors */
set_acp_sysmem_dma_descriptors(acp_mmio, audio_config->size,
- audio_config->direction, pte_offset);
+ audio_config->direction, pte_offset, asic_type);
/* Configure ACP SRAM <-> I2S DMA descriptors */
set_acp_to_i2s_dma_descriptors(acp_mmio, audio_config->size,
- audio_config->direction);
+ audio_config->direction, asic_type);
}
/* Start a given DMA channel transfer */
@@ -425,7 +497,7 @@ static void acp_set_sram_bank_state(void __iomem *acp_mmio, u16 bank,
}
/* Initialize and bring ACP hardware to default state. */
-static int acp_init(void __iomem *acp_mmio)
+static int acp_init(void __iomem *acp_mmio, u32 asic_type)
{
u16 bank;
u32 val, count, sram_pte_offset;
@@ -499,10 +571,21 @@ static int acp_init(void __iomem *acp_mmio)
/* When ACP_TILE_P1 is turned on, all SRAM banks get turned on.
* Now, turn off all of them. This can't be done in 'poweron' of
* ACP pm domain, as this requires ACP to be initialized.
+ * For Stoney, Memory gating is disabled,i.e SRAM Banks
+ * won't be turned off. The default state for SRAM banks is ON.
+ * Setting SRAM bank state code skipped for STONEY platform.
*/
- for (bank = 1; bank < 48; bank++)
- acp_set_sram_bank_state(acp_mmio, bank, false);
+ if (asic_type != CHIP_STONEY) {
+ for (bank = 1; bank < 48; bank++)
+ acp_set_sram_bank_state(acp_mmio, bank, false);
+ }
+ /* Stoney supports 16bit resolution */
+ if (asic_type == CHIP_STONEY) {
+ val = acp_reg_read(acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN);
+ val |= 0x03;
+ acp_reg_write(val, acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN);
+ }
return 0;
}
@@ -572,9 +655,9 @@ static irqreturn_t dma_irq_handler(int irq, void *arg)
valid_irq = true;
if (acp_reg_read(acp_mmio, mmACP_DMA_CUR_DSCR_13) ==
PLAYBACK_START_DMA_DESCR_CH13)
- dscr_idx = PLAYBACK_START_DMA_DESCR_CH12;
- else
dscr_idx = PLAYBACK_END_DMA_DESCR_CH12;
+ else
+ dscr_idx = PLAYBACK_START_DMA_DESCR_CH12;
config_acp_dma_channel(acp_mmio, SYSRAM_TO_ACP_CH_NUM, dscr_idx,
1, 0);
acp_dma_start(acp_mmio, SYSRAM_TO_ACP_CH_NUM, false);
@@ -626,10 +709,23 @@ static int acp_dma_open(struct snd_pcm_substream *substream)
if (adata == NULL)
return -ENOMEM;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- runtime->hw = acp_pcm_hardware_playback;
- else
- runtime->hw = acp_pcm_hardware_capture;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ switch (intr_data->asic_type) {
+ case CHIP_STONEY:
+ runtime->hw = acp_st_pcm_hardware_playback;
+ break;
+ default:
+ runtime->hw = acp_pcm_hardware_playback;
+ }
+ } else {
+ switch (intr_data->asic_type) {
+ case CHIP_STONEY:
+ runtime->hw = acp_st_pcm_hardware_capture;
+ break;
+ default:
+ runtime->hw = acp_pcm_hardware_capture;
+ }
+ }
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
@@ -652,14 +748,22 @@ static int acp_dma_open(struct snd_pcm_substream *substream)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
intr_data->play_stream = substream;
- for (bank = 1; bank <= 4; bank++)
- acp_set_sram_bank_state(intr_data->acp_mmio, bank,
- true);
+ /* For Stoney, Memory gating is disabled,i.e SRAM Banks
+ * won't be turned off. The default state for SRAM banks is ON.
+ * Setting SRAM bank state code skipped for STONEY platform.
+ */
+ if (intr_data->asic_type != CHIP_STONEY) {
+ for (bank = 1; bank <= 4; bank++)
+ acp_set_sram_bank_state(intr_data->acp_mmio,
+ bank, true);
+ }
} else {
intr_data->capture_stream = substream;
- for (bank = 5; bank <= 8; bank++)
- acp_set_sram_bank_state(intr_data->acp_mmio, bank,
- true);
+ if (intr_data->asic_type != CHIP_STONEY) {
+ for (bank = 5; bank <= 8; bank++)
+ acp_set_sram_bank_state(intr_data->acp_mmio,
+ bank, true);
+ }
}
return 0;
@@ -673,6 +777,8 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
struct page *pg;
struct snd_pcm_runtime *runtime;
struct audio_substream_data *rtd;
+ struct snd_soc_pcm_runtime *prtd = substream->private_data;
+ struct audio_drv_data *adata = dev_get_drvdata(prtd->platform->dev);
runtime = substream->runtime;
rtd = runtime->private_data;
@@ -700,7 +806,7 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
rtd->num_of_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
rtd->direction = substream->stream;
- config_acp_dma(rtd->acp_mmio, rtd);
+ config_acp_dma(rtd->acp_mmio, rtd, adata->asic_type);
status = 0;
} else {
status = -ENOMEM;
@@ -713,40 +819,48 @@ static int acp_dma_hw_free(struct snd_pcm_substream *substream)
return snd_pcm_lib_free_pages(substream);
}
+static u64 acp_get_byte_count(void __iomem *acp_mmio, int stream)
+{
+ union acp_dma_count playback_dma_count;
+ union acp_dma_count capture_dma_count;
+ u64 bytescount = 0;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ playback_dma_count.bcount.high = acp_reg_read(acp_mmio,
+ mmACP_I2S_TRANSMIT_BYTE_CNT_HIGH);
+ playback_dma_count.bcount.low = acp_reg_read(acp_mmio,
+ mmACP_I2S_TRANSMIT_BYTE_CNT_LOW);
+ bytescount = playback_dma_count.bytescount;
+ } else {
+ capture_dma_count.bcount.high = acp_reg_read(acp_mmio,
+ mmACP_I2S_RECEIVED_BYTE_CNT_HIGH);
+ capture_dma_count.bcount.low = acp_reg_read(acp_mmio,
+ mmACP_I2S_RECEIVED_BYTE_CNT_LOW);
+ bytescount = capture_dma_count.bytescount;
+ }
+ return bytescount;
+}
+
static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
{
- u16 dscr;
- u32 mul, dma_config, period_bytes;
+ u32 buffersize;
u32 pos = 0;
+ u64 bytescount = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
- period_bytes = frames_to_bytes(runtime, runtime->period_size);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- dscr = acp_reg_read(rtd->acp_mmio, mmACP_DMA_CUR_DSCR_13);
+ buffersize = frames_to_bytes(runtime, runtime->buffer_size);
+ bytescount = acp_get_byte_count(rtd->acp_mmio, substream->stream);
- if (dscr == PLAYBACK_START_DMA_DESCR_CH13)
- mul = 0;
- else
- mul = 1;
- pos = (mul * period_bytes);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (bytescount > rtd->renderbytescount)
+ bytescount = bytescount - rtd->renderbytescount;
} else {
- dma_config = acp_reg_read(rtd->acp_mmio, mmACP_DMA_CNTL_14);
- if (dma_config != 0) {
- dscr = acp_reg_read(rtd->acp_mmio,
- mmACP_DMA_CUR_DSCR_14);
- if (dscr == CAPTURE_START_DMA_DESCR_CH14)
- mul = 1;
- else
- mul = 2;
- pos = (mul * period_bytes);
- }
-
- if (pos >= (2 * period_bytes))
- pos = 0;
-
+ if (bytescount > rtd->capturebytescount)
+ bytescount = bytescount - rtd->capturebytescount;
}
+ pos = do_div(bytescount, buffersize);
return bytes_to_frames(runtime, pos);
}
@@ -768,23 +882,6 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
config_acp_dma_channel(rtd->acp_mmio, ACP_TO_I2S_DMA_CH_NUM,
PLAYBACK_START_DMA_DESCR_CH13,
NUM_DSCRS_PER_CHANNEL, 0);
- /* Fill ACP SRAM (2 periods) with zeros from System RAM
- * which is zero-ed in hw_params
- */
- acp_dma_start(rtd->acp_mmio, SYSRAM_TO_ACP_CH_NUM, false);
-
- /* ACP SRAM (2 periods of buffer size) is intially filled with
- * zeros. Before rendering starts, 2nd half of SRAM will be
- * filled with valid audio data DMA'ed from first half of system
- * RAM and 1st half of SRAM will be filled with Zeros. This is
- * the initial scenario when redering starts from SRAM. Later
- * on, 2nd half of system memory will be DMA'ed to 1st half of
- * SRAM, 1st half of system memory will be DMA'ed to 2nd half of
- * SRAM in ping-pong way till rendering stops.
- */
- config_acp_dma_channel(rtd->acp_mmio, SYSRAM_TO_ACP_CH_NUM,
- PLAYBACK_START_DMA_DESCR_CH12,
- 1, 0);
} else {
config_acp_dma_channel(rtd->acp_mmio, ACP_TO_SYSRAM_CH_NUM,
CAPTURE_START_DMA_DESCR_CH14,
@@ -799,7 +896,8 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
{
int ret;
- u32 loops = 1000;
+ u32 loops = 4000;
+ u64 bytescount = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *prtd = substream->private_data;
@@ -811,7 +909,11 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
+ bytescount = acp_get_byte_count(rtd->acp_mmio,
+ substream->stream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (rtd->renderbytescount == 0)
+ rtd->renderbytescount = bytescount;
acp_dma_start(rtd->acp_mmio,
SYSRAM_TO_ACP_CH_NUM, false);
while (acp_reg_read(rtd->acp_mmio, mmACP_DMA_CH_STS) &
@@ -828,6 +930,8 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
ACP_TO_I2S_DMA_CH_NUM, true);
} else {
+ if (rtd->capturebytescount == 0)
+ rtd->capturebytescount = bytescount;
acp_dma_start(rtd->acp_mmio,
I2S_TO_ACP_DMA_CH_NUM, true);
}
@@ -841,12 +945,15 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
* channels will stopped automatically after its transfer
* completes : SYSRAM_TO_ACP_CH_NUM / ACP_TO_SYSRAM_CH_NUM
*/
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = acp_dma_stop(rtd->acp_mmio,
ACP_TO_I2S_DMA_CH_NUM);
- else
+ rtd->renderbytescount = 0;
+ } else {
ret = acp_dma_stop(rtd->acp_mmio,
I2S_TO_ACP_DMA_CH_NUM);
+ rtd->capturebytescount = 0;
+ }
break;
default:
ret = -EINVAL;
@@ -857,10 +964,27 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
{
- return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ int ret;
+ struct audio_drv_data *adata = dev_get_drvdata(rtd->platform->dev);
+
+ switch (adata->asic_type) {
+ case CHIP_STONEY:
+ ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_DEV,
+ NULL, ST_MIN_BUFFER,
+ ST_MAX_BUFFER);
+ break;
+ default:
+ ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV,
NULL, MIN_BUFFER,
MAX_BUFFER);
+ break;
+ }
+ if (ret < 0)
+ dev_err(rtd->platform->dev,
+ "buffer preallocation failer error:%d\n", ret);
+ return ret;
}
static int acp_dma_close(struct snd_pcm_substream *substream)
@@ -875,14 +999,23 @@ static int acp_dma_close(struct snd_pcm_substream *substream)
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
adata->play_stream = NULL;
- for (bank = 1; bank <= 4; bank++)
- acp_set_sram_bank_state(adata->acp_mmio, bank,
- false);
- } else {
+ /* For Stoney, Memory gating is disabled,i.e SRAM Banks
+ * won't be turned off. The default state for SRAM banks is ON.
+ * Setting SRAM bank state code skipped for STONEY platform.
+ * added condition checks for Carrizo platform only
+ */
+ if (adata->asic_type != CHIP_STONEY) {
+ for (bank = 1; bank <= 4; bank++)
+ acp_set_sram_bank_state(adata->acp_mmio, bank,
+ false);
+ }
+ } else {
adata->capture_stream = NULL;
- for (bank = 5; bank <= 8; bank++)
- acp_set_sram_bank_state(adata->acp_mmio, bank,
- false);
+ if (adata->asic_type != CHIP_STONEY) {
+ for (bank = 5; bank <= 8; bank++)
+ acp_set_sram_bank_state(adata->acp_mmio, bank,
+ false);
+ }
}
/* Disable ACP irq, when the current stream is being closed and
@@ -916,6 +1049,7 @@ static int acp_audio_probe(struct platform_device *pdev)
int status;
struct audio_drv_data *audio_drv_data;
struct resource *res;
+ const u32 *pdata = pdev->dev.platform_data;
audio_drv_data = devm_kzalloc(&pdev->dev, sizeof(struct audio_drv_data),
GFP_KERNEL);
@@ -932,6 +1066,7 @@ static int acp_audio_probe(struct platform_device *pdev)
audio_drv_data->play_stream = NULL;
audio_drv_data->capture_stream = NULL;
+ audio_drv_data->asic_type = *pdata;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -949,7 +1084,7 @@ static int acp_audio_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, audio_drv_data);
/* Initialize the ACP */
- acp_init(audio_drv_data->acp_mmio);
+ acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
status = snd_soc_register_platform(&pdev->dev, &acp_asoc_platform);
if (status != 0) {
@@ -980,21 +1115,31 @@ static int acp_pcm_resume(struct device *dev)
u16 bank;
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_init(adata->acp_mmio);
+ acp_init(adata->acp_mmio, adata->asic_type);
if (adata->play_stream && adata->play_stream->runtime) {
- for (bank = 1; bank <= 4; bank++)
- acp_set_sram_bank_state(adata->acp_mmio, bank,
+ /* For Stoney, Memory gating is disabled,i.e SRAM Banks
+ * won't be turned off. The default state for SRAM banks is ON.
+ * Setting SRAM bank state code skipped for STONEY platform.
+ */
+ if (adata->asic_type != CHIP_STONEY) {
+ for (bank = 1; bank <= 4; bank++)
+ acp_set_sram_bank_state(adata->acp_mmio, bank,
true);
+ }
config_acp_dma(adata->acp_mmio,
- adata->play_stream->runtime->private_data);
+ adata->play_stream->runtime->private_data,
+ adata->asic_type);
}
if (adata->capture_stream && adata->capture_stream->runtime) {
- for (bank = 5; bank <= 8; bank++)
- acp_set_sram_bank_state(adata->acp_mmio, bank,
+ if (adata->asic_type != CHIP_STONEY) {
+ for (bank = 5; bank <= 8; bank++)
+ acp_set_sram_bank_state(adata->acp_mmio, bank,
true);
+ }
config_acp_dma(adata->acp_mmio,
- adata->capture_stream->runtime->private_data);
+ adata->capture_stream->runtime->private_data,
+ adata->asic_type);
}
acp_reg_write(1, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
return 0;
@@ -1013,7 +1158,7 @@ static int acp_pcm_runtime_resume(struct device *dev)
{
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_init(adata->acp_mmio);
+ acp_init(adata->acp_mmio, adata->asic_type);
acp_reg_write(1, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
return 0;
}
@@ -1028,14 +1173,15 @@ static struct platform_driver acp_dma_driver = {
.probe = acp_audio_probe,
.remove = acp_audio_remove,
.driver = {
- .name = "acp_audio_dma",
+ .name = DRV_NAME,
.pm = &acp_pm_ops,
},
};
module_platform_driver(acp_dma_driver);
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
MODULE_AUTHOR("Maruthi.Bayyavarapu@amd.com");
MODULE_DESCRIPTION("AMD ACP PCM Driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:acp-dma-audio");
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/sound/soc/amd/acp-rt5645.c b/sound/soc/amd/acp-rt5645.c
new file mode 100644
index 000000000000..941aed6bb364
--- /dev/null
+++ b/sound/soc/amd/acp-rt5645.c
@@ -0,0 +1,199 @@
+/*
+ * Machine driver for AMD ACP Audio engine using Realtek RT5645 codec
+ *
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * This file is modified from rt288 machine driver
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc-dapm.h>
+#include <sound/jack.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+
+#include "../codecs/rt5645.h"
+
+#define CZ_PLAT_CLK 24000000
+
+static struct snd_soc_jack cz_jack;
+
+static int cz_aif1_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int ret = 0;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5645_PLL1_S_MCLK,
+ CZ_PLAT_CLK, params_rate(params) * 512);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5645_SCLK_S_PLL1,
+ params_rate(params) * 512, SND_SOC_CLOCK_OUT);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int cz_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_card *card;
+ struct snd_soc_codec *codec;
+
+ codec = rtd->codec;
+ card = rtd->card;
+
+ ret = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &cz_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "HP jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ rt5645_set_jack_detect(codec, &cz_jack, &cz_jack, &cz_jack);
+
+ return 0;
+}
+
+static struct snd_soc_ops cz_aif1_ops = {
+ .hw_params = cz_aif1_hw_params,
+};
+
+static struct snd_soc_dai_link cz_dai_rt5650[] = {
+ {
+ .name = "amd-rt5645-play",
+ .stream_name = "RT5645_AIF1",
+ .platform_name = "acp_audio_dma.0.auto",
+ .cpu_dai_name = "designware-i2s.1.auto",
+ .codec_dai_name = "rt5645-aif1",
+ .codec_name = "i2c-10EC5650:00",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .init = cz_init,
+ .ops = &cz_aif1_ops,
+ },
+ {
+ .name = "amd-rt5645-cap",
+ .stream_name = "RT5645_AIF1",
+ .platform_name = "acp_audio_dma.0.auto",
+ .cpu_dai_name = "designware-i2s.2.auto",
+ .codec_dai_name = "rt5645-aif1",
+ .codec_name = "i2c-10EC5650:00",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .ops = &cz_aif1_ops,
+ },
+};
+
+static const struct snd_soc_dapm_widget cz_widgets[] = {
+ SND_SOC_DAPM_HP("Headphones", NULL),
+ SND_SOC_DAPM_SPK("Speakers", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route cz_audio_route[] = {
+ {"Headphones", NULL, "HPOL"},
+ {"Headphones", NULL, "HPOR"},
+ {"RECMIXL", NULL, "Headset Mic"},
+ {"RECMIXR", NULL, "Headset Mic"},
+ {"Speakers", NULL, "SPOL"},
+ {"Speakers", NULL, "SPOR"},
+ {"DMIC L2", NULL, "Int Mic"},
+ {"DMIC R2", NULL, "Int Mic"},
+};
+
+static const struct snd_kcontrol_new cz_mc_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphones"),
+ SOC_DAPM_PIN_SWITCH("Speakers"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Mic"),
+};
+
+static struct snd_soc_card cz_card = {
+ .name = "acprt5650",
+ .owner = THIS_MODULE,
+ .dai_link = cz_dai_rt5650,
+ .num_links = ARRAY_SIZE(cz_dai_rt5650),
+ .dapm_widgets = cz_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cz_widgets),
+ .dapm_routes = cz_audio_route,
+ .num_dapm_routes = ARRAY_SIZE(cz_audio_route),
+ .controls = cz_mc_controls,
+ .num_controls = ARRAY_SIZE(cz_mc_controls),
+};
+
+static int cz_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct snd_soc_card *card;
+
+ card = &cz_card;
+ cz_card.dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ ret = devm_snd_soc_register_card(&pdev->dev, &cz_card);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "devm_snd_soc_register_card(%s) failed: %d\n",
+ cz_card.name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct acpi_device_id cz_audio_acpi_match[] = {
+ { "AMDI1002", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, cz_audio_acpi_match);
+
+static struct platform_driver cz_pcm_driver = {
+ .driver = {
+ .name = "cz-rt5645",
+ .acpi_match_table = ACPI_PTR(cz_audio_acpi_match),
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = cz_probe,
+};
+
+module_platform_driver(cz_pcm_driver);
+
+MODULE_AUTHOR("akshu.agrawal@amd.com");
+MODULE_DESCRIPTION("cz-rt5645 audio support");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h
index 9d338216c5ae..ecb458935d1e 100644
--- a/sound/soc/amd/acp.h
+++ b/sound/soc/amd/acp.h
@@ -20,6 +20,7 @@
/* Capture SRAM address (as a source in dma descriptor) */
#define ACP_SHARED_RAM_BANK_5_ADDRESS 0x400A000
+#define ACP_SHARED_RAM_BANK_3_ADDRESS 0x4006000
#define ACP_DMA_RESET_TIME 10000
#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
@@ -68,6 +69,7 @@
#define CAPTURE_START_DMA_DESCR_CH15 6
#define CAPTURE_END_DMA_DESCR_CH15 7
+#define mmACP_I2S_16BIT_RESOLUTION_EN 0x5209
enum acp_dma_priority_level {
/* 0x0 Specifies the DMA channel is given normal priority */
ACP_DMA_PRIORITY_LEVEL_NORMAL = 0x0,
@@ -82,9 +84,26 @@ struct audio_substream_data {
u16 num_of_pages;
u16 direction;
uint64_t size;
+ u64 renderbytescount;
+ u64 capturebytescount;
void __iomem *acp_mmio;
};
+struct audio_drv_data {
+ struct snd_pcm_substream *play_stream;
+ struct snd_pcm_substream *capture_stream;
+ void __iomem *acp_mmio;
+ u32 asic_type;
+};
+
+union acp_dma_count {
+ struct {
+ u32 low;
+ u32 high;
+ } bcount;
+ u64 bytescount;
+};
+
enum {
ACP_TILE_P1 = 0,
ACP_TILE_P2,
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 6ba20498202e..2e449d7173fc 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -31,6 +31,7 @@
* General Public License for more details.
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -99,6 +100,8 @@
#define BCM2835_I2S_CHWID(v) (v)
#define BCM2835_I2S_CH1(v) ((v) << 16)
#define BCM2835_I2S_CH2(v) (v)
+#define BCM2835_I2S_CH1_POS(v) BCM2835_I2S_CH1(BCM2835_I2S_CHPOS(v))
+#define BCM2835_I2S_CH2_POS(v) BCM2835_I2S_CH2(BCM2835_I2S_CHPOS(v))
#define BCM2835_I2S_TX_PANIC(v) ((v) << 24)
#define BCM2835_I2S_RX_PANIC(v) ((v) << 16)
@@ -110,12 +113,19 @@
#define BCM2835_I2S_INT_RXR BIT(1)
#define BCM2835_I2S_INT_TXW BIT(0)
+/* Frame length register is 10 bit, maximum length 1024 */
+#define BCM2835_I2S_MAX_FRAME_LENGTH 1024
+
/* General device struct */
struct bcm2835_i2s_dev {
struct device *dev;
struct snd_dmaengine_dai_dma_data dma_data[2];
unsigned int fmt;
- unsigned int bclk_ratio;
+ unsigned int tdm_slots;
+ unsigned int rx_mask;
+ unsigned int tx_mask;
+ unsigned int slot_width;
+ unsigned int frame_length;
struct regmap *i2s_regmap;
struct clk *clk;
@@ -225,19 +235,120 @@ static int bcm2835_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
unsigned int ratio)
{
struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
- dev->bclk_ratio = ratio;
+
+ if (!ratio) {
+ dev->tdm_slots = 0;
+ return 0;
+ }
+
+ if (ratio > BCM2835_I2S_MAX_FRAME_LENGTH)
+ return -EINVAL;
+
+ dev->tdm_slots = 2;
+ dev->rx_mask = 0x03;
+ dev->tx_mask = 0x03;
+ dev->slot_width = ratio / 2;
+ dev->frame_length = ratio;
+
return 0;
}
+static int bcm2835_i2s_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int width)
+{
+ struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ if (slots) {
+ if (slots < 0 || width < 0)
+ return -EINVAL;
+
+ /* Limit masks to available slots */
+ rx_mask &= GENMASK(slots - 1, 0);
+ tx_mask &= GENMASK(slots - 1, 0);
+
+ /*
+ * The driver is limited to 2-channel setups.
+ * Check that exactly 2 bits are set in the masks.
+ */
+ if (hweight_long((unsigned long) rx_mask) != 2
+ || hweight_long((unsigned long) tx_mask) != 2)
+ return -EINVAL;
+
+ if (slots * width > BCM2835_I2S_MAX_FRAME_LENGTH)
+ return -EINVAL;
+ }
+
+ dev->tdm_slots = slots;
+
+ dev->rx_mask = rx_mask;
+ dev->tx_mask = tx_mask;
+ dev->slot_width = width;
+ dev->frame_length = slots * width;
+
+ return 0;
+}
+
+/*
+ * Convert logical slot number into physical slot number.
+ *
+ * If odd_offset is 0 sequential number is identical to logical number.
+ * This is used for DSP modes with slot numbering 0 1 2 3 ...
+ *
+ * Otherwise odd_offset defines the physical offset for odd numbered
+ * slots. This is used for I2S and left/right justified modes to
+ * translate from logical slot numbers 0 1 2 3 ... into physical slot
+ * numbers 0 2 ... 3 4 ...
+ */
+static int bcm2835_i2s_convert_slot(unsigned int slot, unsigned int odd_offset)
+{
+ if (!odd_offset)
+ return slot;
+
+ if (slot & 1)
+ return (slot >> 1) + odd_offset;
+
+ return slot >> 1;
+}
+
+/*
+ * Calculate channel position from mask and slot width.
+ *
+ * Mask must contain exactly 2 set bits.
+ * Lowest set bit is channel 1 position, highest set bit channel 2.
+ * The constant offset is added to both channel positions.
+ *
+ * If odd_offset is > 0 slot positions are translated to
+ * I2S-style TDM slot numbering ( 0 2 ... 3 4 ...) with odd
+ * logical slot numbers starting at physical slot odd_offset.
+ */
+static void bcm2835_i2s_calc_channel_pos(
+ unsigned int *ch1_pos, unsigned int *ch2_pos,
+ unsigned int mask, unsigned int width,
+ unsigned int bit_offset, unsigned int odd_offset)
+{
+ *ch1_pos = bcm2835_i2s_convert_slot((ffs(mask) - 1), odd_offset)
+ * width + bit_offset;
+ *ch2_pos = bcm2835_i2s_convert_slot((fls(mask) - 1), odd_offset)
+ * width + bit_offset;
+}
+
static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
- unsigned int sampling_rate = params_rate(params);
- unsigned int data_length, data_delay, bclk_ratio;
- unsigned int ch1pos, ch2pos, mode, format;
+ unsigned int data_length, data_delay, framesync_length;
+ unsigned int slots, slot_width, odd_slot_offset;
+ int frame_length, bclk_rate;
+ unsigned int rx_mask, tx_mask;
+ unsigned int rx_ch1_pos, rx_ch2_pos, tx_ch1_pos, tx_ch2_pos;
+ unsigned int mode, format;
+ bool bit_clock_master = false;
+ bool frame_sync_master = false;
+ bool frame_start_falling_edge = false;
uint32_t csreg;
+ int ret = 0;
/*
* If a stream is already enabled,
@@ -248,42 +359,70 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
if (csreg & (BCM2835_I2S_TXON | BCM2835_I2S_RXON))
return 0;
- /*
- * Adjust the data length according to the format.
- * We prefill the half frame length with an integer
- * divider of 2400 as explained at the clock settings.
- * Maybe it is overwritten there, if the Integer mode
- * does not apply.
- */
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- data_length = 16;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- data_length = 24;
+ data_length = params_width(params);
+ data_delay = 0;
+ odd_slot_offset = 0;
+ mode = 0;
+
+ if (dev->tdm_slots) {
+ slots = dev->tdm_slots;
+ slot_width = dev->slot_width;
+ frame_length = dev->frame_length;
+ rx_mask = dev->rx_mask;
+ tx_mask = dev->tx_mask;
+ bclk_rate = dev->frame_length * params_rate(params);
+ } else {
+ slots = 2;
+ slot_width = params_width(params);
+ rx_mask = 0x03;
+ tx_mask = 0x03;
+
+ frame_length = snd_soc_params_to_frame_size(params);
+ if (frame_length < 0)
+ return frame_length;
+
+ bclk_rate = snd_soc_params_to_bclk(params);
+ if (bclk_rate < 0)
+ return bclk_rate;
+ }
+
+ /* Check if data fits into slots */
+ if (data_length > slot_width)
+ return -EINVAL;
+
+ /* Check if CPU is bit clock master */
+ switch (dev->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBS_CFM:
+ bit_clock_master = true;
break;
- case SNDRV_PCM_FORMAT_S32_LE:
- data_length = 32;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBM_CFM:
+ bit_clock_master = false;
break;
default:
return -EINVAL;
}
- /* If bclk_ratio already set, use that one. */
- if (dev->bclk_ratio)
- bclk_ratio = dev->bclk_ratio;
- else
- /* otherwise calculate a fitting block ratio */
- bclk_ratio = 2 * data_length;
-
- /* Clock should only be set up here if CPU is clock master */
+ /* Check if CPU is frame sync master */
switch (dev->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ frame_sync_master = true;
+ break;
case SND_SOC_DAIFMT_CBS_CFM:
- clk_set_rate(dev->clk, sampling_rate * bclk_ratio);
+ case SND_SOC_DAIFMT_CBM_CFM:
+ frame_sync_master = false;
break;
default:
- break;
+ return -EINVAL;
+ }
+
+ /* Clock should only be set up here if CPU is clock master */
+ if (bit_clock_master) {
+ ret = clk_set_rate(dev->clk, bclk_rate);
+ if (ret)
+ return ret;
}
/* Setup the frame format */
@@ -294,43 +433,94 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
format |= BCM2835_I2S_CHWID((data_length-8)&0xf);
+ /* CH2 format is the same as for CH1 */
+ format = BCM2835_I2S_CH1(format) | BCM2835_I2S_CH2(format);
+
switch (dev->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- data_delay = 1;
- break;
- default:
+ /* I2S mode needs an even number of slots */
+ if (slots & 1)
+ return -EINVAL;
+
/*
- * TODO
- * Others are possible but are not implemented at the moment.
+ * Use I2S-style logical slot numbering: even slots
+ * are in first half of frame, odd slots in second half.
*/
- dev_err(dev->dev, "%s:bad format\n", __func__);
- return -EINVAL;
- }
+ odd_slot_offset = slots >> 1;
- ch1pos = data_delay;
- ch2pos = bclk_ratio / 2 + data_delay;
+ /* MSB starts one cycle after frame start */
+ data_delay = 1;
- switch (params_channels(params)) {
- case 2:
- format = BCM2835_I2S_CH1(format) | BCM2835_I2S_CH2(format);
- format |= BCM2835_I2S_CH1(BCM2835_I2S_CHPOS(ch1pos));
- format |= BCM2835_I2S_CH2(BCM2835_I2S_CHPOS(ch2pos));
+ /* Setup frame sync signal for 50% duty cycle */
+ framesync_length = frame_length / 2;
+ frame_start_falling_edge = true;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ if (slots & 1)
+ return -EINVAL;
+
+ odd_slot_offset = slots >> 1;
+ data_delay = 0;
+ framesync_length = frame_length / 2;
+ frame_start_falling_edge = false;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ if (slots & 1)
+ return -EINVAL;
+
+ /* Odd frame lengths aren't supported */
+ if (frame_length & 1)
+ return -EINVAL;
+
+ odd_slot_offset = slots >> 1;
+ data_delay = slot_width - data_length;
+ framesync_length = frame_length / 2;
+ frame_start_falling_edge = false;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ data_delay = 1;
+ framesync_length = 1;
+ frame_start_falling_edge = false;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ data_delay = 0;
+ framesync_length = 1;
+ frame_start_falling_edge = false;
break;
default:
return -EINVAL;
}
+ bcm2835_i2s_calc_channel_pos(&rx_ch1_pos, &rx_ch2_pos,
+ rx_mask, slot_width, data_delay, odd_slot_offset);
+ bcm2835_i2s_calc_channel_pos(&tx_ch1_pos, &tx_ch2_pos,
+ tx_mask, slot_width, data_delay, odd_slot_offset);
+
+ /*
+ * Transmitting data immediately after frame start, eg
+ * in left-justified or DSP mode A, only works stable
+ * if bcm2835 is the frame clock master.
+ */
+ if ((!rx_ch1_pos || !tx_ch1_pos) && !frame_sync_master)
+ dev_warn(dev->dev,
+ "Unstable slave config detected, L/R may be swapped");
+
/*
* Set format for both streams.
* We cannot set another frame length
* (and therefore word length) anyway,
* so the format will be the same.
*/
- regmap_write(dev->i2s_regmap, BCM2835_I2S_RXC_A_REG, format);
- regmap_write(dev->i2s_regmap, BCM2835_I2S_TXC_A_REG, format);
+ regmap_write(dev->i2s_regmap, BCM2835_I2S_RXC_A_REG,
+ format
+ | BCM2835_I2S_CH1_POS(rx_ch1_pos)
+ | BCM2835_I2S_CH2_POS(rx_ch2_pos));
+ regmap_write(dev->i2s_regmap, BCM2835_I2S_TXC_A_REG,
+ format
+ | BCM2835_I2S_CH1_POS(tx_ch1_pos)
+ | BCM2835_I2S_CH2_POS(tx_ch2_pos));
/* Setup the I2S mode */
- mode = 0;
if (data_length <= 16) {
/*
@@ -342,65 +532,41 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
mode |= BCM2835_I2S_FTXP | BCM2835_I2S_FRXP;
}
- mode |= BCM2835_I2S_FLEN(bclk_ratio - 1);
- mode |= BCM2835_I2S_FSLEN(bclk_ratio / 2);
+ mode |= BCM2835_I2S_FLEN(frame_length - 1);
+ mode |= BCM2835_I2S_FSLEN(framesync_length);
- /* Master or slave? */
- switch (dev->fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS:
- /* CPU is master */
- break;
- case SND_SOC_DAIFMT_CBM_CFS:
- /*
- * CODEC is bit clock master
- * CPU is frame master
- */
+ /* CLKM selects bcm2835 clock slave mode */
+ if (!bit_clock_master)
mode |= BCM2835_I2S_CLKM;
- break;
- case SND_SOC_DAIFMT_CBS_CFM:
- /*
- * CODEC is frame master
- * CPU is bit clock master
- */
+
+ /* FSM selects bcm2835 frame sync slave mode */
+ if (!frame_sync_master)
mode |= BCM2835_I2S_FSM;
+
+ /* CLKI selects normal clocking mode, sampling on rising edge */
+ switch (dev->fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ case SND_SOC_DAIFMT_NB_IF:
+ mode |= BCM2835_I2S_CLKI;
break;
- case SND_SOC_DAIFMT_CBM_CFM:
- /* CODEC is master */
- mode |= BCM2835_I2S_CLKM;
- mode |= BCM2835_I2S_FSM;
+ case SND_SOC_DAIFMT_IB_NF:
+ case SND_SOC_DAIFMT_IB_IF:
break;
default:
- dev_err(dev->dev, "%s:bad master\n", __func__);
return -EINVAL;
}
- /*
- * Invert clocks?
- *
- * The BCM approach seems to be inverted to the classical I2S approach.
- */
+ /* FSI selects frame start on falling edge */
switch (dev->fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
- /* None. Therefore, both for BCM */
- mode |= BCM2835_I2S_CLKI;
- mode |= BCM2835_I2S_FSI;
- break;
- case SND_SOC_DAIFMT_IB_IF:
- /* Both. Therefore, none for BCM */
+ case SND_SOC_DAIFMT_IB_NF:
+ if (frame_start_falling_edge)
+ mode |= BCM2835_I2S_FSI;
break;
case SND_SOC_DAIFMT_NB_IF:
- /*
- * Invert only frame sync. Therefore,
- * invert only bit clock for BCM
- */
- mode |= BCM2835_I2S_CLKI;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- /*
- * Invert only bit clock. Therefore,
- * invert only frame sync for BCM
- */
- mode |= BCM2835_I2S_FSI;
+ case SND_SOC_DAIFMT_IB_IF:
+ if (!frame_start_falling_edge)
+ mode |= BCM2835_I2S_FSI;
break;
default:
return -EINVAL;
@@ -423,7 +589,27 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
/* Clear FIFOs */
bcm2835_i2s_clear_fifos(dev, true, true);
- return 0;
+ dev_dbg(dev->dev,
+ "slots: %d width: %d rx mask: 0x%02x tx_mask: 0x%02x\n",
+ slots, slot_width, rx_mask, tx_mask);
+
+ dev_dbg(dev->dev, "frame len: %d sync len: %d data len: %d\n",
+ frame_length, framesync_length, data_length);
+
+ dev_dbg(dev->dev, "rx pos: %d,%d tx pos: %d,%d\n",
+ rx_ch1_pos, rx_ch2_pos, tx_ch1_pos, tx_ch2_pos);
+
+ dev_dbg(dev->dev, "sampling rate: %d bclk rate: %d\n",
+ params_rate(params), bclk_rate);
+
+ dev_dbg(dev->dev, "CLKM: %d CLKI: %d FSM: %d FSI: %d frame start: %s edge\n",
+ !!(mode & BCM2835_I2S_CLKM),
+ !!(mode & BCM2835_I2S_CLKI),
+ !!(mode & BCM2835_I2S_FSM),
+ !!(mode & BCM2835_I2S_FSI),
+ (mode & BCM2835_I2S_FSI) ? "falling" : "rising");
+
+ return ret;
}
static int bcm2835_i2s_prepare(struct snd_pcm_substream *substream,
@@ -559,6 +745,7 @@ static const struct snd_soc_dai_ops bcm2835_i2s_dai_ops = {
.hw_params = bcm2835_i2s_hw_params,
.set_fmt = bcm2835_i2s_set_dai_fmt,
.set_bclk_ratio = bcm2835_i2s_set_dai_bclk_ratio,
+ .set_tdm_slot = bcm2835_i2s_set_dai_tdm_slot,
};
static int bcm2835_i2s_dai_probe(struct snd_soc_dai *dai)
@@ -578,7 +765,9 @@ static struct snd_soc_dai_driver bcm2835_i2s_dai = {
.playback = {
.channels_min = 2,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 384000,
.formats = SNDRV_PCM_FMTBIT_S16_LE
| SNDRV_PCM_FMTBIT_S24_LE
| SNDRV_PCM_FMTBIT_S32_LE
@@ -586,13 +775,16 @@ static struct snd_soc_dai_driver bcm2835_i2s_dai = {
.capture = {
.channels_min = 2,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 384000,
.formats = SNDRV_PCM_FMTBIT_S16_LE
| SNDRV_PCM_FMTBIT_S24_LE
| SNDRV_PCM_FMTBIT_S32_LE
},
.ops = &bcm2835_i2s_dai_ops,
- .symmetric_rates = 1
+ .symmetric_rates = 1,
+ .symmetric_samplebits = 1,
};
static bool bcm2835_i2s_volatile_reg(struct device *dev, unsigned int reg)
@@ -699,9 +891,6 @@ static int bcm2835_i2s_probe(struct platform_device *pdev)
dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].flags =
SND_DMAENGINE_PCM_DAI_FLAG_PACK;
- /* BCLK ratio - use default */
- dev->bclk_ratio = 0;
-
/* Store the pdev */
dev->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, dev);
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
index 15c438f0f22d..abafadc0b534 100644
--- a/sound/soc/bcm/cygnus-ssp.c
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -655,23 +655,10 @@ static int cygnus_ssp_hw_params(struct snd_pcm_substream *substream,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
value = readl(aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
value &= ~BIT(BF_SRC_CFGX_BUFFER_PAIR_ENABLE);
- /* Configure channels as mono or stereo/TDM */
- if (params_channels(params) == 1)
- value |= BIT(BF_SRC_CFGX_SAMPLE_CH_MODE);
- else
- value &= ~BIT(BF_SRC_CFGX_SAMPLE_CH_MODE);
+ value &= ~BIT(BF_SRC_CFGX_SAMPLE_CH_MODE);
writel(value, aio->cygaud->audio + aio->regs.bf_sourcech_cfg);
switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S8:
- if (aio->port_type == PORT_SPDIF) {
- dev_err(aio->cygaud->dev,
- "SPDIF does not support 8bit format\n");
- return -EINVAL;
- }
- bitres = 8;
- break;
-
case SNDRV_PCM_FORMAT_S16_LE:
bitres = 16;
break;
@@ -842,6 +829,7 @@ int cygnus_ssp_set_custom_fsync_width(struct snd_soc_dai *cpu_dai, int len)
return -EINVAL;
}
}
+EXPORT_SYMBOL_GPL(cygnus_ssp_set_custom_fsync_width);
static int cygnus_ssp_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
{
@@ -998,7 +986,7 @@ static int cygnus_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
active_slots = hweight32(tx_mask);
- if ((active_slots < 0) || (active_slots > 16))
+ if (active_slots > 16)
return -EINVAL;
/* Slot value must be even */
@@ -1136,15 +1124,21 @@ static const struct snd_soc_dai_ops cygnus_ssp_dai_ops = {
.set_tdm_slot = cygnus_set_dai_tdm_slot,
};
+static const struct snd_soc_dai_ops cygnus_spdif_dai_ops = {
+ .startup = cygnus_ssp_startup,
+ .shutdown = cygnus_ssp_shutdown,
+ .trigger = cygnus_ssp_trigger,
+ .hw_params = cygnus_ssp_hw_params,
+ .set_sysclk = cygnus_ssp_set_sysclk,
+};
#define INIT_CPU_DAI(num) { \
.name = "cygnus-ssp" #num, \
.playback = { \
- .channels_min = 1, \
+ .channels_min = 2, \
.channels_max = 16, \
.rates = SNDRV_PCM_RATE_KNOT, \
- .formats = SNDRV_PCM_FMTBIT_S8 | \
- SNDRV_PCM_FMTBIT_S16_LE | \
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S32_LE, \
}, \
.capture = { \
@@ -1152,7 +1146,7 @@ static const struct snd_soc_dai_ops cygnus_ssp_dai_ops = {
.channels_max = 16, \
.rates = SNDRV_PCM_RATE_KNOT, \
.formats = SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S32_LE, \
+ SNDRV_PCM_FMTBIT_S32_LE, \
}, \
.ops = &cygnus_ssp_dai_ops, \
.suspend = cygnus_ssp_suspend, \
@@ -1174,7 +1168,7 @@ static const struct snd_soc_dai_driver cygnus_spdif_dai_info = {
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
},
- .ops = &cygnus_ssp_dai_ops,
+ .ops = &cygnus_spdif_dai_ops,
.suspend = cygnus_ssp_suspend,
.resume = cygnus_ssp_resume,
};
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index c367d11079bc..a42ddbc93f3d 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -214,9 +214,9 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8998 if MFD_WM8998
select SND_SOC_WM9081 if I2C
select SND_SOC_WM9090 if I2C
- select SND_SOC_WM9705 if SND_SOC_AC97_BUS
- select SND_SOC_WM9712 if SND_SOC_AC97_BUS
- select SND_SOC_WM9713 if SND_SOC_AC97_BUS
+ select SND_SOC_WM9705 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
+ select SND_SOC_WM9712 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
+ select SND_SOC_WM9713 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
help
Normally ASoC codec drivers are only built if a machine driver which
uses them is also built since they are only usable with a machine
@@ -749,6 +749,10 @@ config SND_SOC_RT5514
config SND_SOC_RT5514_SPI
tristate
+config SND_SOC_RT5514_SPI_BUILTIN
+ bool # force RT5514_SPI to be built-in to avoid link errors
+ default SND_SOC_RT5514=y && SND_SOC_RT5514_SPI=m
+
config SND_SOC_RT5616
tristate "Realtek RT5616 CODEC"
depends on I2C
@@ -1128,14 +1132,17 @@ config SND_SOC_WM9090
config SND_SOC_WM9705
tristate
select REGMAP_AC97
+ select AC97_BUS_COMPAT if AC97_BUS_NEW
config SND_SOC_WM9712
tristate
select REGMAP_AC97
+ select AC97_BUS_COMPAT if AC97_BUS_NEW
config SND_SOC_WM9713
tristate
select REGMAP_AC97
+ select AC97_BUS_COMPAT if AC97_BUS_NEW
config SND_SOC_ZX_AUD96P22
tristate "ZTE ZX AUD96P22 CODEC"
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 05018b7ca72b..0001069ce2a7 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -360,6 +360,7 @@ obj-$(CONFIG_SND_SOC_RT286) += snd-soc-rt286.o
obj-$(CONFIG_SND_SOC_RT298) += snd-soc-rt298.o
obj-$(CONFIG_SND_SOC_RT5514) += snd-soc-rt5514.o
obj-$(CONFIG_SND_SOC_RT5514_SPI) += snd-soc-rt5514-spi.o
+obj-$(CONFIG_SND_SOC_RT5514_SPI_BUILTIN) += snd-soc-rt5514-spi.o
obj-$(CONFIG_SND_SOC_RT5616) += snd-soc-rt5616.o
obj-$(CONFIG_SND_SOC_RT5631) += snd-soc-rt5631.o
obj-$(CONFIG_SND_SOC_RT5640) += snd-soc-rt5640.o
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index a1149f6a8450..b3375e19598a 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/gcd.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -293,16 +294,99 @@ int arizona_init_gpio(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(arizona_init_gpio);
-int arizona_init_notifiers(struct snd_soc_codec *codec)
+int arizona_init_common(struct arizona *arizona)
{
- struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = priv->arizona;
+ struct arizona_pdata *pdata = &arizona->pdata;
+ unsigned int val, mask;
+ int i;
BLOCKING_INIT_NOTIFIER_HEAD(&arizona->notifier);
+ for (i = 0; i < ARIZONA_MAX_OUTPUT; ++i) {
+ /* Default is 0 so noop with defaults */
+ if (pdata->out_mono[i])
+ val = ARIZONA_OUT1_MONO;
+ else
+ val = 0;
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_OUTPUT_PATH_CONFIG_1L + (i * 8),
+ ARIZONA_OUT1_MONO, val);
+ }
+
+ for (i = 0; i < ARIZONA_MAX_PDM_SPK; i++) {
+ if (pdata->spk_mute[i])
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_PDM_SPK1_CTRL_1 + (i * 2),
+ ARIZONA_SPK1_MUTE_ENDIAN_MASK |
+ ARIZONA_SPK1_MUTE_SEQ1_MASK,
+ pdata->spk_mute[i]);
+
+ if (pdata->spk_fmt[i])
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_PDM_SPK1_CTRL_2 + (i * 2),
+ ARIZONA_SPK1_FMT_MASK,
+ pdata->spk_fmt[i]);
+ }
+
+ for (i = 0; i < ARIZONA_MAX_INPUT; i++) {
+ /* Default for both is 0 so noop with defaults */
+ val = pdata->dmic_ref[i] << ARIZONA_IN1_DMIC_SUP_SHIFT;
+ if (pdata->inmode[i] & ARIZONA_INMODE_DMIC)
+ val |= 1 << ARIZONA_IN1_MODE_SHIFT;
+
+ switch (arizona->type) {
+ case WM8998:
+ case WM1814:
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ADC_DIGITAL_VOLUME_1L + (i * 8),
+ ARIZONA_IN1L_SRC_SE_MASK,
+ (pdata->inmode[i] & ARIZONA_INMODE_SE)
+ << ARIZONA_IN1L_SRC_SE_SHIFT);
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R + (i * 8),
+ ARIZONA_IN1R_SRC_SE_MASK,
+ (pdata->inmode[i] & ARIZONA_INMODE_SE)
+ << ARIZONA_IN1R_SRC_SE_SHIFT);
+
+ mask = ARIZONA_IN1_DMIC_SUP_MASK |
+ ARIZONA_IN1_MODE_MASK;
+ break;
+ default:
+ if (pdata->inmode[i] & ARIZONA_INMODE_SE)
+ val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+ mask = ARIZONA_IN1_DMIC_SUP_MASK |
+ ARIZONA_IN1_MODE_MASK |
+ ARIZONA_IN1_SINGLE_ENDED_MASK;
+ break;
+ }
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_IN1L_CONTROL + (i * 8),
+ mask, val);
+ }
+
return 0;
}
-EXPORT_SYMBOL_GPL(arizona_init_notifiers);
+EXPORT_SYMBOL_GPL(arizona_init_common);
+
+int arizona_init_vol_limit(struct arizona *arizona)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arizona->pdata.out_vol_limit); ++i) {
+ if (arizona->pdata.out_vol_limit[i])
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_DAC_VOLUME_LIMIT_1L + i * 4,
+ ARIZONA_OUT1L_VOL_LIM_MASK,
+ arizona->pdata.out_vol_limit[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_vol_limit);
const char * const arizona_mixer_texts[ARIZONA_NUM_MIXER_INPUTS] = {
"None",
@@ -2695,6 +2779,80 @@ int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
+int arizona_of_get_audio_pdata(struct arizona *arizona)
+{
+ struct arizona_pdata *pdata = &arizona->pdata;
+ struct device_node *np = arizona->dev->of_node;
+ struct property *prop;
+ const __be32 *cur;
+ u32 val;
+ u32 pdm_val[ARIZONA_MAX_PDM_SPK];
+ int ret;
+ int count = 0;
+
+ count = 0;
+ of_property_for_each_u32(np, "wlf,inmode", prop, cur, val) {
+ if (count == ARRAY_SIZE(pdata->inmode))
+ break;
+
+ pdata->inmode[count] = val;
+ count++;
+ }
+
+ count = 0;
+ of_property_for_each_u32(np, "wlf,dmic-ref", prop, cur, val) {
+ if (count == ARRAY_SIZE(pdata->dmic_ref))
+ break;
+
+ pdata->dmic_ref[count] = val;
+ count++;
+ }
+
+ count = 0;
+ of_property_for_each_u32(np, "wlf,out-mono", prop, cur, val) {
+ if (count == ARRAY_SIZE(pdata->out_mono))
+ break;
+
+ pdata->out_mono[count] = !!val;
+ count++;
+ }
+
+ count = 0;
+ of_property_for_each_u32(np, "wlf,max-channels-clocked", prop, cur, val) {
+ if (count == ARRAY_SIZE(pdata->max_channels_clocked))
+ break;
+
+ pdata->max_channels_clocked[count] = val;
+ count++;
+ }
+
+ count = 0;
+ of_property_for_each_u32(np, "wlf,out-volume-limit", prop, cur, val) {
+ if (count == ARRAY_SIZE(pdata->out_vol_limit))
+ break;
+
+ pdata->out_vol_limit[count] = val;
+ count++;
+ }
+
+ ret = of_property_read_u32_array(np, "wlf,spk-fmt",
+ pdm_val, ARRAY_SIZE(pdm_val));
+
+ if (ret >= 0)
+ for (count = 0; count < ARRAY_SIZE(pdata->spk_fmt); ++count)
+ pdata->spk_fmt[count] = pdm_val[count];
+
+ ret = of_property_read_u32_array(np, "wlf,spk-mute",
+ pdm_val, ARRAY_SIZE(pdm_val));
+
+ if (ret >= 0)
+ for (count = 0; count < ARRAY_SIZE(pdata->spk_mute); ++count)
+ pdata->spk_mute[count] = pdm_val[count];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_of_get_audio_pdata);
+
MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 1822e3b3de80..dfdf6d8c9687 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -313,7 +313,9 @@ int arizona_set_fll(struct arizona_fll *fll, int source,
int arizona_init_spk(struct snd_soc_codec *codec);
int arizona_init_gpio(struct snd_soc_codec *codec);
int arizona_init_mono(struct snd_soc_codec *codec);
-int arizona_init_notifiers(struct snd_soc_codec *codec);
+
+int arizona_init_common(struct arizona *arizona);
+int arizona_init_vol_limit(struct arizona *arizona);
int arizona_init_spk_irqs(struct arizona *arizona);
int arizona_free_spk_irqs(struct arizona *arizona);
@@ -350,4 +352,6 @@ static inline int arizona_unregister_notifier(struct snd_soc_codec *codec,
return blocking_notifier_chain_unregister(&arizona->notifier, nb);
}
+int arizona_of_get_audio_pdata(struct arizona *arizona);
+
#endif
diff --git a/sound/soc/codecs/cs43130.c b/sound/soc/codecs/cs43130.c
index 643e37fc218e..5ba0edc19df4 100644
--- a/sound/soc/codecs/cs43130.c
+++ b/sound/soc/codecs/cs43130.c
@@ -909,6 +909,7 @@ static int cs43130_hw_params(struct snd_pcm_substream *substream,
regmap_update_bits(cs43130->regmap, CS43130_DSD_PATH_CTL_2,
CS43130_DSD_SRC_MASK, CS43130_DSD_SRC_XSP <<
CS43130_DSD_SRC_SHIFT);
+ break;
}
if (!sclk && cs43130->dais[dai->id].dai_mode == SND_SOC_DAIFMT_CBM_CFM)
@@ -1039,6 +1040,7 @@ static int cs43130_pcm_ch_put(struct snd_kcontrol *kcontrol,
else
regmap_multi_reg_write(cs43130->regmap, pcm_ch_dis_seq,
ARRAY_SIZE(pcm_ch_dis_seq));
+ break;
}
return snd_soc_put_enum_double(kcontrol, ucontrol);
@@ -1152,6 +1154,7 @@ static int cs43130_dsd_event(struct snd_soc_dapm_widget *w,
case CS4399_CHIP_ID:
regmap_multi_reg_write(cs43130->regmap, dsd_seq,
ARRAY_SIZE(dsd_seq));
+ break;
}
break;
case SND_SOC_DAPM_POST_PMU:
@@ -1162,6 +1165,7 @@ static int cs43130_dsd_event(struct snd_soc_dapm_widget *w,
case CS4399_CHIP_ID:
regmap_multi_reg_write(cs43130->regmap, unmute_seq,
ARRAY_SIZE(unmute_seq));
+ break;
}
break;
case SND_SOC_DAPM_PRE_PMD:
@@ -1184,6 +1188,7 @@ static int cs43130_dsd_event(struct snd_soc_dapm_widget *w,
regmap_update_bits(cs43130->regmap,
CS43130_DSD_PATH_CTL_1,
CS43130_MUTE_MASK, CS43130_MUTE_EN);
+ break;
}
break;
default:
@@ -1206,6 +1211,7 @@ static int cs43130_pcm_event(struct snd_soc_dapm_widget *w,
case CS4399_CHIP_ID:
regmap_multi_reg_write(cs43130->regmap, pcm_seq,
ARRAY_SIZE(pcm_seq));
+ break;
}
break;
case SND_SOC_DAPM_POST_PMU:
@@ -1216,6 +1222,7 @@ static int cs43130_pcm_event(struct snd_soc_dapm_widget *w,
case CS4399_CHIP_ID:
regmap_multi_reg_write(cs43130->regmap, unmute_seq,
ARRAY_SIZE(unmute_seq));
+ break;
}
break;
case SND_SOC_DAPM_PRE_PMD:
@@ -1238,6 +1245,7 @@ static int cs43130_pcm_event(struct snd_soc_dapm_widget *w,
regmap_update_bits(cs43130->regmap,
CS43130_PCM_PATH_CTL_1,
CS43130_MUTE_MASK, CS43130_MUTE_EN);
+ break;
}
break;
default:
@@ -1277,6 +1285,7 @@ static int cs43130_dac_event(struct snd_soc_dapm_widget *w,
case CS43198_CHIP_ID:
regmap_multi_reg_write(cs43130->regmap, pop_free_seq2,
ARRAY_SIZE(pop_free_seq2));
+ break;
}
break;
case SND_SOC_DAPM_POST_PMU:
@@ -1301,6 +1310,7 @@ static int cs43130_dac_event(struct snd_soc_dapm_widget *w,
case CS43198_CHIP_ID:
usleep_range(12000, 12010);
regmap_write(cs43130->regmap, CS43130_DXD13, 0);
+ break;
}
regmap_write(cs43130->regmap, CS43130_DXD1, 0);
@@ -1311,6 +1321,7 @@ static int cs43130_dac_event(struct snd_soc_dapm_widget *w,
case CS4399_CHIP_ID:
regmap_multi_reg_write(cs43130->regmap, dac_postpmd_seq,
ARRAY_SIZE(dac_postpmd_seq));
+ break;
}
break;
default:
@@ -2133,6 +2144,7 @@ exit:
cs43130_hpload_proc(cs43130, hp_dis_cal_seq2,
ARRAY_SIZE(hp_dis_cal_seq2),
CS43130_HPLOAD_OFF_INT, ac_idx);
+ break;
}
regmap_multi_reg_write(cs43130->regmap, hp_cln_seq,
@@ -2543,6 +2555,7 @@ static int cs43130_i2c_probe(struct i2c_client *client,
digital_hp_routes;
soc_codec_dev_cs43130.component_driver.num_dapm_routes =
ARRAY_SIZE(digital_hp_routes);
+ break;
}
ret = snd_soc_register_codec(&client->dev, &soc_codec_dev_cs43130,
@@ -2586,8 +2599,7 @@ static int cs43130_i2c_remove(struct i2c_client *client)
device_remove_file(&client->dev, &dev_attr_hpload_ac_r);
}
- if (cs43130->reset_gpio)
- gpiod_set_value_cansleep(cs43130->reset_gpio, 0);
+ gpiod_set_value_cansleep(cs43130->reset_gpio, 0);
pm_runtime_disable(&client->dev);
regulator_bulk_disable(CS43130_NUM_SUPPLIES, cs43130->supplies);
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index e09fc8f037f1..94c0209977d0 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1130,7 +1130,6 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
arizona_init_gpio(codec);
arizona_init_mono(codec);
- arizona_init_notifiers(codec);
ret = wm_adsp2_codec_probe(&priv->core.adsp[1], codec);
if (ret)
@@ -1230,6 +1229,14 @@ static int cs47l24_probe(struct platform_device *pdev)
if (!cs47l24)
return -ENOMEM;
+ if (IS_ENABLED(CONFIG_OF)) {
+ if (!dev_get_platdata(arizona->dev)) {
+ ret = arizona_of_get_audio_pdata(arizona);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
platform_set_drvdata(pdev, cs47l24);
cs47l24->core.arizona = arizona;
@@ -1288,6 +1295,11 @@ static int cs47l24_probe(struct platform_device *pdev)
return ret;
}
+ arizona_init_common(arizona);
+
+ ret = arizona_init_vol_limit(arizona);
+ if (ret < 0)
+ goto err_dsp_irq;
ret = arizona_init_spk_irqs(arizona);
if (ret < 0)
goto err_dsp_irq;
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index cc0b2d2eaf15..41d9b1da27c2 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1220,6 +1220,7 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
struct snd_soc_codec *codec = codec_dai->codec;
struct da7213_priv *da7213 = snd_soc_codec_get_drvdata(codec);
u8 dai_clk_mode = 0, dai_ctrl = 0;
+ u8 dai_offset = 0;
/* Set master/slave mode */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -1234,17 +1235,46 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
}
/* Set clock normal/inverted */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- break;
- case SND_SOC_DAIFMT_NB_IF:
- dai_clk_mode |= DA7213_DAI_WCLK_POL_INV;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- dai_clk_mode |= DA7213_DAI_CLK_POL_INV;
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ dai_clk_mode |= DA7213_DAI_WCLK_POL_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ dai_clk_mode |= DA7213_DAI_CLK_POL_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ dai_clk_mode |= DA7213_DAI_WCLK_POL_INV |
+ DA7213_DAI_CLK_POL_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
- case SND_SOC_DAIFMT_IB_IF:
- dai_clk_mode |= DA7213_DAI_WCLK_POL_INV | DA7213_DAI_CLK_POL_INV;
+ case SND_SOC_DAI_FORMAT_DSP_A:
+ case SND_SOC_DAI_FORMAT_DSP_B:
+ /* The bclk is inverted wrt ASoC conventions */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ dai_clk_mode |= DA7213_DAI_CLK_POL_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ dai_clk_mode |= DA7213_DAI_WCLK_POL_INV |
+ DA7213_DAI_CLK_POL_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ dai_clk_mode |= DA7213_DAI_WCLK_POL_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
@@ -1261,6 +1291,13 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
case SND_SOC_DAIFMT_RIGHT_J:
dai_ctrl |= DA7213_DAI_FORMAT_RIGHT_J;
break;
+ case SND_SOC_DAI_FORMAT_DSP_A: /* L data MSB after FRM LRC */
+ dai_ctrl |= DA7213_DAI_FORMAT_DSP;
+ dai_offset = 1;
+ break;
+ case SND_SOC_DAI_FORMAT_DSP_B: /* L data MSB during FRM LRC */
+ dai_ctrl |= DA7213_DAI_FORMAT_DSP;
+ break;
default:
return -EINVAL;
}
@@ -1271,6 +1308,7 @@ static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
snd_soc_write(codec, DA7213_DAI_CLK_MODE, dai_clk_mode);
snd_soc_update_bits(codec, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK,
dai_ctrl);
+ snd_soc_write(codec, DA7213_DAI_OFFSET, dai_offset);
return 0;
}
diff --git a/sound/soc/codecs/da7213.h b/sound/soc/codecs/da7213.h
index 16ef56f77cd4..5a78dba1dcb5 100644
--- a/sound/soc/codecs/da7213.h
+++ b/sound/soc/codecs/da7213.h
@@ -188,6 +188,7 @@
#define DA7213_DAI_FORMAT_I2S_MODE (0x0 << 0)
#define DA7213_DAI_FORMAT_LEFT_J (0x1 << 0)
#define DA7213_DAI_FORMAT_RIGHT_J (0x2 << 0)
+#define DA7213_DAI_FORMAT_DSP (0x3 << 0)
#define DA7213_DAI_FORMAT_MASK (0x3 << 0)
#define DA7213_DAI_WORD_LENGTH_S16_LE (0x0 << 2)
#define DA7213_DAI_WORD_LENGTH_S20_LE (0x1 << 2)
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index e824d47cc22b..f3b4f4dfae6a 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -942,7 +942,8 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
if (!se)
return -ENOMEM;
- sprintf(kc_name, "Pin %d port %d Input", pin->nid, port->id);
+ snprintf(kc_name, NAME_SIZE, "Pin %d port %d Input",
+ pin->nid, port->id);
kc->name = devm_kstrdup(&edev->hdac.dev, kc_name, GFP_KERNEL);
if (!kc->name)
return -ENOMEM;
@@ -1452,6 +1453,8 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
int i, num_nodes;
struct hdac_device *hdac = &edev->hdac;
struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_cvt *temp_cvt, *cvt_next;
+ struct hdac_hdmi_pin *temp_pin, *pin_next;
int ret;
hdac_hdmi_skl_enable_all_pins(hdac);
@@ -1481,32 +1484,54 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
case AC_WID_AUD_OUT:
ret = hdac_hdmi_add_cvt(edev, nid);
if (ret < 0)
- return ret;
+ goto free_widgets;
break;
case AC_WID_PIN:
ret = hdac_hdmi_add_pin(edev, nid);
if (ret < 0)
- return ret;
+ goto free_widgets;
break;
}
}
hdac->end_nid = nid;
- if (!hdmi->num_pin || !hdmi->num_cvt)
- return -EIO;
+ if (!hdmi->num_pin || !hdmi->num_cvt) {
+ ret = -EIO;
+ goto free_widgets;
+ }
ret = hdac_hdmi_create_dais(hdac, dais, hdmi, hdmi->num_cvt);
if (ret) {
dev_err(&hdac->dev, "Failed to create dais with err: %d\n",
ret);
- return ret;
+ goto free_widgets;
}
*num_dais = hdmi->num_cvt;
+ ret = hdac_hdmi_init_dai_map(edev);
+ if (ret < 0)
+ goto free_widgets;
+
+ return ret;
+
+free_widgets:
+ list_for_each_entry_safe(temp_cvt, cvt_next, &hdmi->cvt_list, head) {
+ list_del(&temp_cvt->head);
+ kfree(temp_cvt->name);
+ kfree(temp_cvt);
+ }
+
+ list_for_each_entry_safe(temp_pin, pin_next, &hdmi->pin_list, head) {
+ for (i = 0; i < temp_pin->num_ports; i++)
+ temp_pin->ports[i].pin = NULL;
+ kfree(temp_pin->ports);
+ list_del(&temp_pin->head);
+ kfree(temp_pin);
+ }
- return hdac_hdmi_init_dai_map(edev);
+ return ret;
}
static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
@@ -1894,6 +1919,9 @@ static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
struct hdac_hdmi_port *port;
+ if (!pcm)
+ return;
+
if (list_empty(&pcm->port_list))
return;
@@ -1912,6 +1940,9 @@ static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
struct hdac_hdmi_priv *hdmi = edev->private_data;
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
+ if (!pcm)
+ return false;
+
if (list_empty(&pcm->port_list))
return false;
@@ -1925,6 +1956,9 @@ static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
struct hdac_hdmi_port *port;
+ if (!pcm)
+ return 0;
+
if (list_empty(&pcm->port_list))
return 0;
@@ -1978,6 +2012,9 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
hdmi_priv->chmap.ops.is_pcm_attached = is_hdac_hdmi_pcm_attached;
hdmi_priv->chmap.ops.get_spk_alloc = hdac_hdmi_get_spk_alloc;
+ if (!hdac_id)
+ return -ENODEV;
+
if (hdac_id->driver_data)
hdmi_priv->drv_data =
(struct hdac_hdmi_drv_data *)hdac_id->driver_data;
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 3abf82563408..5672e516bec3 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -303,11 +303,8 @@ enum {
static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-
uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
- uinfo->count = sizeof(hcp->eld);
+ uinfo->count = FIELD_SIZEOF(struct hdmi_codec_priv, eld);
return 0;
}
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 13bcfb1ef9b4..f5075d1f79e6 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2115,7 +2115,7 @@ static void max98090_pll_work(struct work_struct *work)
if (!snd_soc_codec_is_active(codec))
return;
- dev_info(codec->dev, "PLL unlocked\n");
+ dev_info_ratelimited(codec->dev, "PLL unlocked\n");
/* Toggle shutdown OFF then ON */
snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
index 327eaa25c9bd..921f95fc396d 100644
--- a/sound/soc/codecs/max98925.c
+++ b/sound/soc/codecs/max98925.c
@@ -579,7 +579,7 @@ static int max98925_i2c_probe(struct i2c_client *i2c,
ret = PTR_ERR(max98925->regmap);
dev_err(&i2c->dev,
"Failed to allocate regmap: %d\n", ret);
- goto err_out;
+ return ret;
}
if (!of_property_read_u32(i2c->dev.of_node, "vmon-slot-no", &value)) {
@@ -596,16 +596,20 @@ static int max98925_i2c_probe(struct i2c_client *i2c,
}
max98925->i_slot = value;
}
- ret = regmap_read(max98925->regmap,
- MAX98925_REV_VERSION, &reg);
- if ((ret < 0) ||
- ((reg != MAX98925_VERSION) &&
- (reg != MAX98925_VERSION1))) {
- dev_err(&i2c->dev,
- "device initialization error (%d 0x%02X)\n",
+
+ ret = regmap_read(max98925->regmap, MAX98925_REV_VERSION, &reg);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Read revision failed\n");
+ return ret;
+ }
+
+ if ((reg != MAX98925_VERSION) && (reg != MAX98925_VERSION1)) {
+ ret = -ENODEV;
+ dev_err(&i2c->dev, "Invalid revision (%d 0x%02X)\n",
ret, reg);
- goto err_out;
+ return ret;
}
+
dev_info(&i2c->dev, "device version 0x%02X\n", reg);
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98925,
@@ -613,7 +617,6 @@ static int max98925_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
dev_err(&i2c->dev,
"Failed to register codec: %d\n", ret);
-err_out:
return ret;
}
diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
index d9dbbe72f8ad..a1d39353719d 100644
--- a/sound/soc/codecs/max98927.c
+++ b/sound/soc/codecs/max98927.c
@@ -1,7 +1,7 @@
/*
* max98927.c -- MAX98927 ALSA Soc Audio driver
*
- * Copyright (C) 2016 Maxim Integrated Products
+ * Copyright (C) 2016-2017 Maxim Integrated Products
* Author: Ryan Lee <ryans.lee@maximintegrated.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -146,6 +146,7 @@ static int max98927_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
unsigned int mode = 0;
unsigned int format = 0;
+ bool use_pdm = false;
unsigned int invert = 0;
dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
@@ -187,22 +188,27 @@ static int max98927_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- max98927->iface |= SND_SOC_DAIFMT_I2S;
format = MAX98927_PCM_FORMAT_I2S;
break;
case SND_SOC_DAIFMT_LEFT_J:
- max98927->iface |= SND_SOC_DAIFMT_LEFT_J;
format = MAX98927_PCM_FORMAT_LJ;
break;
+ case SND_SOC_DAIFMT_DSP_A:
+ format = MAX98927_PCM_FORMAT_TDM_MODE1;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ format = MAX98927_PCM_FORMAT_TDM_MODE0;
+ break;
case SND_SOC_DAIFMT_PDM:
- max98927->iface |= SND_SOC_DAIFMT_PDM;
+ use_pdm = true;
break;
default:
return -EINVAL;
}
+ max98927->iface = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
- /* pcm channel configuration */
- if (max98927->iface & (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_LEFT_J)) {
+ if (!use_pdm) {
+ /* pcm channel configuration */
regmap_update_bits(max98927->regmap,
MAX98927_R0018_PCM_RX_EN_A,
MAX98927_PCM_RX_CH0_EN | MAX98927_PCM_RX_CH1_EN,
@@ -217,13 +223,11 @@ static int max98927_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
MAX98927_R003B_SPK_SRC_SEL,
MAX98927_SPK_SRC_MASK, 0);
- } else
regmap_update_bits(max98927->regmap,
- MAX98927_R0018_PCM_RX_EN_A,
- MAX98927_PCM_RX_CH0_EN | MAX98927_PCM_RX_CH1_EN, 0);
-
- /* pdm channel configuration */
- if (max98927->iface & SND_SOC_DAIFMT_PDM) {
+ MAX98927_R0035_PDM_RX_CTRL,
+ MAX98927_PDM_RX_EN_MASK, 0);
+ } else {
+ /* pdm channel configuration */
regmap_update_bits(max98927->regmap,
MAX98927_R0035_PDM_RX_CTRL,
MAX98927_PDM_RX_EN_MASK, 1);
@@ -231,10 +235,11 @@ static int max98927_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
regmap_update_bits(max98927->regmap,
MAX98927_R003B_SPK_SRC_SEL,
MAX98927_SPK_SRC_MASK, 3);
- } else
+
regmap_update_bits(max98927->regmap,
- MAX98927_R0035_PDM_RX_CTRL,
- MAX98927_PDM_RX_EN_MASK, 0);
+ MAX98927_R0018_PCM_RX_EN_A,
+ MAX98927_PCM_RX_CH0_EN | MAX98927_PCM_RX_CH1_EN, 0);
+ }
return 0;
}
@@ -245,6 +250,21 @@ static const int rate_table[] = {
13000000, 19200000,
};
+/* BCLKs per LRCLK */
+static const int bclk_sel_table[] = {
+ 32, 48, 64, 96, 128, 192, 256, 384, 512,
+};
+
+static int max98927_get_bclk_sel(int bclk)
+{
+ int i;
+ /* match BCLKs per LRCLK */
+ for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
+ if (bclk_sel_table[i] == bclk)
+ return i + 2;
+ }
+ return 0;
+}
static int max98927_set_clock(struct max98927_priv *max98927,
struct snd_pcm_hw_params *params)
{
@@ -270,23 +290,20 @@ static int max98927_set_clock(struct max98927_priv *max98927,
i << MAX98927_PCM_MASTER_MODE_MCLK_RATE_SHIFT);
}
- switch (blr_clk_ratio) {
- case 32:
- value = 2;
- break;
- case 48:
- value = 3;
- break;
- case 64:
- value = 4;
- break;
- default:
- return -EINVAL;
+ if (!max98927->tdm_mode) {
+ /* BCLK configuration */
+ value = max98927_get_bclk_sel(blr_clk_ratio);
+ if (!value) {
+ dev_err(codec->dev, "format unsupported %d\n",
+ params_format(params));
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98927->regmap,
+ MAX98927_R0022_PCM_CLK_SETUP,
+ MAX98927_PCM_CLK_SETUP_BSEL_MASK,
+ value);
}
- regmap_update_bits(max98927->regmap,
- MAX98927_R0022_PCM_CLK_SETUP,
- MAX98927_PCM_CLK_SETUP_BSEL_MASK,
- value);
return 0;
}
@@ -386,6 +403,78 @@ err:
return -EINVAL;
}
+static int max98927_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
+ int bsel = 0;
+ unsigned int chan_sz = 0;
+
+ max98927->tdm_mode = true;
+
+ /* BCLK configuration */
+ bsel = max98927_get_bclk_sel(slots * slot_width);
+ if (bsel == 0) {
+ dev_err(codec->dev, "BCLK %d not supported\n",
+ slots * slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98927->regmap,
+ MAX98927_R0022_PCM_CLK_SETUP,
+ MAX98927_PCM_CLK_SETUP_BSEL_MASK,
+ bsel);
+
+ /* Channel size configuration */
+ switch (slot_width) {
+ case 16:
+ chan_sz = MAX98927_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98927_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98927_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(codec->dev, "format unsupported %d\n",
+ slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98927->regmap,
+ MAX98927_R0020_PCM_MODE_CFG,
+ MAX98927_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ /* Rx slot configuration */
+ regmap_write(max98927->regmap,
+ MAX98927_R0018_PCM_RX_EN_A,
+ rx_mask & 0xFF);
+ regmap_write(max98927->regmap,
+ MAX98927_R0019_PCM_RX_EN_B,
+ (rx_mask & 0xFF00) >> 8);
+
+ /* Tx slot configuration */
+ regmap_write(max98927->regmap,
+ MAX98927_R001A_PCM_TX_EN_A,
+ tx_mask & 0xFF);
+ regmap_write(max98927->regmap,
+ MAX98927_R001B_PCM_TX_EN_B,
+ (tx_mask & 0xFF00) >> 8);
+
+ /* Tx slot Hi-Z configuration */
+ regmap_write(max98927->regmap,
+ MAX98927_R001C_PCM_TX_HIZ_CTRL_A,
+ ~tx_mask & 0xFF);
+ regmap_write(max98927->regmap,
+ MAX98927_R001D_PCM_TX_HIZ_CTRL_B,
+ (~tx_mask & 0xFF00) >> 8);
+
+ return 0;
+}
+
#define MAX98927_RATES SNDRV_PCM_RATE_8000_48000
#define MAX98927_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
@@ -405,6 +494,7 @@ static const struct snd_soc_dai_ops max98927_dai_ops = {
.set_sysclk = max98927_dai_set_sysclk,
.set_fmt = max98927_dai_set_fmt,
.hw_params = max98927_dai_hw_params,
+ .set_tdm_slot = max98927_dai_tdm_slot,
};
static int max98927_dac_event(struct snd_soc_dapm_widget *w,
@@ -414,6 +504,9 @@ static int max98927_dac_event(struct snd_soc_dapm_widget *w,
struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ max98927->tdm_mode = 0;
+ break;
case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(max98927->regmap,
MAX98927_R003A_AMP_EN,
diff --git a/sound/soc/codecs/max98927.h b/sound/soc/codecs/max98927.h
index ece6a608cbe1..9ea839735433 100644
--- a/sound/soc/codecs/max98927.h
+++ b/sound/soc/codecs/max98927.h
@@ -1,7 +1,7 @@
/*
* max98927.h -- MAX98927 ALSA Soc Audio driver
*
- * Copyright 2013-15 Maxim Integrated Products
+ * Copyright (C) 2016-2017 Maxim Integrated Products
* Author: Ryan Lee <ryans.lee@maximintegrated.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -161,7 +161,9 @@
#define MAX98927_PCM_MODE_CFG_FORMAT_SHIFT (3)
#define MAX98927_PCM_FORMAT_I2S (0x0 << 0)
#define MAX98927_PCM_FORMAT_LJ (0x1 << 0)
-
+#define MAX98927_PCM_FORMAT_TDM_MODE0 (0x3 << 0)
+#define MAX98927_PCM_FORMAT_TDM_MODE1 (0x4 << 0)
+#define MAX98927_PCM_FORMAT_TDM_MODE2 (0x5 << 0)
#define MAX98927_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
#define MAX98927_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
#define MAX98927_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
@@ -268,5 +270,6 @@ struct max98927_priv {
unsigned int iface;
unsigned int master;
unsigned int digital_gain;
+ bool tdm_mode;
};
#endif
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 549c269acc7d..5f3c42c4f74a 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -104,7 +104,7 @@
#define CDC_A_MICB_1_VAL (0xf141)
#define MICB_MIN_VAL 1600
#define MICB_STEP_SIZE 50
-#define MICB_VOLTAGE_REGVAL(v) ((v - MICB_MIN_VAL)/MICB_STEP_SIZE)
+#define MICB_VOLTAGE_REGVAL(v) (((v - MICB_MIN_VAL)/MICB_STEP_SIZE) << 3)
#define MICB_1_VAL_MICB_OUT_VAL_MASK GENMASK(7, 3)
#define MICB_1_VAL_MICB_OUT_VAL_V2P70V ((0x16) << 3)
#define MICB_1_VAL_MICB_OUT_VAL_V1P80V ((0x4) << 3)
@@ -285,7 +285,7 @@ struct pm8916_wcd_analog_priv {
u16 codec_version;
bool mbhc_btn_enabled;
/* special event to detect accessory type */
- bool mbhc_btn0_pressed;
+ int mbhc_btn0_released;
bool detect_accessory_type;
struct clk *mclk;
struct snd_soc_codec *codec;
@@ -349,8 +349,9 @@ static void pm8916_wcd_analog_micbias_enable(struct snd_soc_codec *codec)
| MICB_1_CTL_EXT_PRECHARG_EN_ENABLE);
if (wcd->micbias_mv) {
- snd_soc_write(codec, CDC_A_MICB_1_VAL,
- MICB_VOLTAGE_REGVAL(wcd->micbias_mv));
+ snd_soc_update_bits(codec, CDC_A_MICB_1_VAL,
+ MICB_1_VAL_MICB_OUT_VAL_MASK,
+ MICB_VOLTAGE_REGVAL(wcd->micbias_mv));
/*
* Special headset needs MICBIAS as 2.7V so wait for
* 50 msec for the MICBIAS to reach 2.7 volts.
@@ -443,50 +444,6 @@ static int pm8916_wcd_analog_enable_micbias_int1(struct
wcd->micbias1_cap_mode);
}
-static void pm8916_wcd_setup_mbhc(struct pm8916_wcd_analog_priv *wcd)
-{
- struct snd_soc_codec *codec = wcd->codec;
- u32 plug_type = 0;
- u32 int_en_mask;
-
- snd_soc_write(codec, CDC_A_MBHC_DET_CTL_1,
- CDC_A_MBHC_DET_CTL_L_DET_EN |
- CDC_A_MBHC_DET_CTL_MECH_DET_TYPE_INSERTION |
- CDC_A_MBHC_DET_CTL_MIC_CLAMP_CTL_AUTO |
- CDC_A_MBHC_DET_CTL_MBHC_BIAS_EN);
-
- if (wcd->hphl_jack_type_normally_open)
- plug_type |= CDC_A_HPHL_PLUG_TYPE_NO;
-
- if (wcd->gnd_jack_type_normally_open)
- plug_type |= CDC_A_GND_PLUG_TYPE_NO;
-
- snd_soc_write(codec, CDC_A_MBHC_DET_CTL_2,
- CDC_A_MBHC_DET_CTL_HS_L_DET_PULL_UP_CTRL_I_3P0 |
- CDC_A_MBHC_DET_CTL_HS_L_DET_COMPA_CTRL_V0P9_VDD |
- plug_type |
- CDC_A_MBHC_DET_CTL_HPHL_100K_TO_GND_EN);
-
-
- snd_soc_write(codec, CDC_A_MBHC_DBNC_TIMER,
- CDC_A_MBHC_DBNC_TIMER_INSREM_DBNC_T_256_MS |
- CDC_A_MBHC_DBNC_TIMER_BTN_DBNC_T_16MS);
-
- /* enable MBHC clock */
- snd_soc_update_bits(codec, CDC_D_CDC_DIG_CLK_CTL,
- DIG_CLK_CTL_D_MBHC_CLK_EN_MASK,
- DIG_CLK_CTL_D_MBHC_CLK_EN);
-
- int_en_mask = MBHC_SWITCH_INT;
- if (wcd->mbhc_btn_enabled)
- int_en_mask |= MBHC_BUTTON_PRESS_DET | MBHC_BUTTON_RELEASE_DET;
-
- snd_soc_update_bits(codec, CDC_D_INT_EN_CLR, int_en_mask, 0);
- snd_soc_update_bits(codec, CDC_D_INT_EN_SET, int_en_mask, int_en_mask);
- wcd->mbhc_btn0_pressed = false;
- wcd->detect_accessory_type = true;
-}
-
static int pm8916_mbhc_configure_bias(struct pm8916_wcd_analog_priv *priv,
bool micbias2_enabled)
{
@@ -534,6 +491,56 @@ static int pm8916_mbhc_configure_bias(struct pm8916_wcd_analog_priv *priv,
return 0;
}
+static void pm8916_wcd_setup_mbhc(struct pm8916_wcd_analog_priv *wcd)
+{
+ struct snd_soc_codec *codec = wcd->codec;
+ bool micbias_enabled = false;
+ u32 plug_type = 0;
+ u32 int_en_mask;
+
+ snd_soc_write(codec, CDC_A_MBHC_DET_CTL_1,
+ CDC_A_MBHC_DET_CTL_L_DET_EN |
+ CDC_A_MBHC_DET_CTL_MECH_DET_TYPE_INSERTION |
+ CDC_A_MBHC_DET_CTL_MIC_CLAMP_CTL_AUTO |
+ CDC_A_MBHC_DET_CTL_MBHC_BIAS_EN);
+
+ if (wcd->hphl_jack_type_normally_open)
+ plug_type |= CDC_A_HPHL_PLUG_TYPE_NO;
+
+ if (wcd->gnd_jack_type_normally_open)
+ plug_type |= CDC_A_GND_PLUG_TYPE_NO;
+
+ snd_soc_write(codec, CDC_A_MBHC_DET_CTL_2,
+ CDC_A_MBHC_DET_CTL_HS_L_DET_PULL_UP_CTRL_I_3P0 |
+ CDC_A_MBHC_DET_CTL_HS_L_DET_COMPA_CTRL_V0P9_VDD |
+ plug_type |
+ CDC_A_MBHC_DET_CTL_HPHL_100K_TO_GND_EN);
+
+
+ snd_soc_write(codec, CDC_A_MBHC_DBNC_TIMER,
+ CDC_A_MBHC_DBNC_TIMER_INSREM_DBNC_T_256_MS |
+ CDC_A_MBHC_DBNC_TIMER_BTN_DBNC_T_16MS);
+
+ /* enable MBHC clock */
+ snd_soc_update_bits(codec, CDC_D_CDC_DIG_CLK_CTL,
+ DIG_CLK_CTL_D_MBHC_CLK_EN_MASK,
+ DIG_CLK_CTL_D_MBHC_CLK_EN);
+
+ if (snd_soc_read(codec, CDC_A_MICB_2_EN) & CDC_A_MICB_2_EN_ENABLE)
+ micbias_enabled = true;
+
+ pm8916_mbhc_configure_bias(wcd, micbias_enabled);
+
+ int_en_mask = MBHC_SWITCH_INT;
+ if (wcd->mbhc_btn_enabled)
+ int_en_mask |= MBHC_BUTTON_PRESS_DET | MBHC_BUTTON_RELEASE_DET;
+
+ snd_soc_update_bits(codec, CDC_D_INT_EN_CLR, int_en_mask, 0);
+ snd_soc_update_bits(codec, CDC_D_INT_EN_SET, int_en_mask, int_en_mask);
+ wcd->mbhc_btn0_released = false;
+ wcd->detect_accessory_type = true;
+}
+
static int pm8916_wcd_analog_enable_micbias_int2(struct
snd_soc_dapm_widget
*w, struct snd_kcontrol
@@ -614,6 +621,7 @@ static int pm8916_wcd_analog_enable_adc(struct snd_soc_dapm_widget *w,
case CDC_A_TX_2_EN:
snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
MICB_1_CTL_CFILT_REF_SEL_MASK, 0);
+ /* fall through */
case CDC_A_TX_3_EN:
snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX2_CTL,
CONN_TX2_SERIAL_TX2_MUX,
@@ -950,7 +958,7 @@ static irqreturn_t mbhc_btn_release_irq_handler(int irq, void *arg)
/* check if its BTN0 thats released */
if ((val != -1) && !(val & CDC_A_MBHC_RESULT_1_BTN_RESULT_MASK))
- priv->mbhc_btn0_pressed = false;
+ priv->mbhc_btn0_released = true;
} else {
snd_soc_jack_report(priv->jack, 0, btn_mask);
@@ -983,9 +991,7 @@ static irqreturn_t mbhc_btn_press_irq_handler(int irq, void *arg)
break;
case 0x0:
/* handle BTN_0 specially for type detection */
- if (priv->detect_accessory_type)
- priv->mbhc_btn0_pressed = true;
- else
+ if (!priv->detect_accessory_type)
snd_soc_jack_report(priv->jack,
SND_JACK_BTN_0, btn_mask);
break;
@@ -1029,19 +1035,19 @@ static irqreturn_t pm8916_mbhc_switch_irq_handler(int irq, void *arg)
* both press and release event received then its
* a headset.
*/
- if (priv->mbhc_btn0_pressed)
+ if (priv->mbhc_btn0_released)
snd_soc_jack_report(priv->jack,
- SND_JACK_HEADPHONE, hs_jack_mask);
+ SND_JACK_HEADSET, hs_jack_mask);
else
snd_soc_jack_report(priv->jack,
- SND_JACK_HEADSET, hs_jack_mask);
+ SND_JACK_HEADPHONE, hs_jack_mask);
priv->detect_accessory_type = false;
} else { /* removal */
snd_soc_jack_report(priv->jack, 0, hs_jack_mask);
priv->detect_accessory_type = true;
- priv->mbhc_btn0_pressed = false;
+ priv->mbhc_btn0_released = false;
}
return IRQ_HANDLED;
@@ -1241,6 +1247,8 @@ static const struct of_device_id pm8916_wcd_analog_spmi_match_table[] = {
{ }
};
+MODULE_DEVICE_TABLE(of, pm8916_wcd_analog_spmi_match_table);
+
static struct platform_driver pm8916_wcd_analog_spmi_driver = {
.driver = {
.name = "qcom,pm8916-wcd-spmi-codec",
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
index 66df8f810f0d..a10a724eb448 100644
--- a/sound/soc/codecs/msm8916-wcd-digital.c
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -238,7 +238,7 @@ static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
static const struct soc_enum rx2_mix1_inp_enum[] = {
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 3, 6, rx_mix1_text),
- SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B2_CTL, 0, 6, rx_mix1_text),
};
/* RX2 MIX2 */
@@ -249,7 +249,7 @@ static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
static const struct soc_enum rx3_mix1_inp_enum[] = {
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 3, 6, rx_mix1_text),
- SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
+ SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
};
/* DEC */
diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c
index dbff416e38be..5f9c069569d5 100644
--- a/sound/soc/codecs/pcm512x-i2c.c
+++ b/sound/soc/codecs/pcm512x-i2c.c
@@ -1,7 +1,7 @@
/*
* Driver for the PCM512x CODECs
*
- * Author: Mark Brown <broonie@linaro.org>
+ * Author: Mark Brown <broonie@kernel.org>
* Copyright 2014 Linaro Ltd
*
* This program is free software; you can redistribute it and/or
@@ -75,5 +75,5 @@ static struct i2c_driver pcm512x_i2c_driver = {
module_i2c_driver(pcm512x_i2c_driver);
MODULE_DESCRIPTION("ASoC PCM512x codec driver - I2C");
-MODULE_AUTHOR("Mark Brown <broonie@linaro.org>");
+MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
index 712ed6598c48..25c63510ae15 100644
--- a/sound/soc/codecs/pcm512x-spi.c
+++ b/sound/soc/codecs/pcm512x-spi.c
@@ -1,7 +1,7 @@
/*
* Driver for the PCM512x CODECs
*
- * Author: Mark Brown <broonie@linaro.org>
+ * Author: Mark Brown <broonie@kernel.org>
* Copyright 2014 Linaro Ltd
*
* This program is free software; you can redistribute it and/or
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 68feae262476..e0f3556d3872 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1,7 +1,7 @@
/*
* Driver for the PCM512x CODECs
*
- * Author: Mark Brown <broonie@linaro.org>
+ * Author: Mark Brown <broonie@kernel.org>
* Copyright 2014 Linaro Ltd
*
* This program is free software; you can redistribute it and/or
@@ -1602,5 +1602,5 @@ const struct dev_pm_ops pcm512x_pm_ops = {
EXPORT_SYMBOL_GPL(pcm512x_pm_ops);
MODULE_DESCRIPTION("ASoC PCM512x codec driver");
-MODULE_AUTHOR("Mark Brown <broonie@linaro.org>");
+MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm512x.h b/sound/soc/codecs/pcm512x.h
index b7c310207223..d70d9c0c2088 100644
--- a/sound/soc/codecs/pcm512x.h
+++ b/sound/soc/codecs/pcm512x.h
@@ -1,7 +1,7 @@
/*
* Driver for the PCM512x CODECs
*
- * Author: Mark Brown <broonie@linaro.org>
+ * Author: Mark Brown <broonie@kernel.org>
* Copyright 2014 Linaro Ltd
*
* This program is free software; you can redistribute it and/or
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 7b447d0b173a..974a9040651d 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL_GPL(rl6231_get_pre_div);
*/
int rl6231_calc_dmic_clk(int rate)
{
- int div[] = {2, 3, 4, 6, 8, 12};
+ static const int div[] = {2, 3, 4, 6, 8, 12};
int i;
if (rate < 1000000 * div[0]) {
@@ -189,7 +189,8 @@ EXPORT_SYMBOL_GPL(rl6231_pll_calc);
int rl6231_get_clk_info(int sclk, int rate)
{
- int i, pd[] = {1, 2, 3, 4, 6, 8, 12, 16};
+ int i;
+ static const int pd[] = {1, 2, 3, 4, 6, 8, 12, 16};
if (sclk <= 0 || rate <= 0)
return -EINVAL;
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 12f2ecf3a4fe..2df91db765ac 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -147,8 +147,13 @@ done:
static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
{
+ size_t period_bytes;
u8 buf[8];
+ if (!rt5514_dsp->substream)
+ return;
+
+ period_bytes = snd_pcm_lib_period_bytes(rt5514_dsp->substream);
rt5514_dsp->get_size = 0;
/**
@@ -176,6 +181,10 @@ static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
rt5514_dsp->buf_size = rt5514_dsp->buf_limit - rt5514_dsp->buf_base;
+ if (rt5514_dsp->buf_size % period_bytes)
+ rt5514_dsp->buf_size = (rt5514_dsp->buf_size / period_bytes) *
+ period_bytes;
+
if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit &&
rt5514_dsp->buf_rp && rt5514_dsp->buf_size)
schedule_delayed_work(&rt5514_dsp->copy_work, 0);
@@ -447,9 +456,45 @@ static int rt5514_spi_probe(struct spi_device *spi)
return ret;
}
+ device_init_wakeup(&spi->dev, true);
+
+ return 0;
+}
+
+static int __maybe_unused rt5514_suspend(struct device *dev)
+{
+ int irq = to_spi_device(dev)->irq;
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(irq);
+
+ return 0;
+}
+
+static int __maybe_unused rt5514_resume(struct device *dev)
+{
+ struct snd_soc_platform *platform = snd_soc_lookup_platform(dev);
+ struct rt5514_dsp *rt5514_dsp =
+ snd_soc_platform_get_drvdata(platform);
+ int irq = to_spi_device(dev)->irq;
+ u8 buf[8];
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(irq);
+
+ if (rt5514_dsp->substream) {
+ rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf));
+ if (buf[0] & RT5514_IRQ_STATUS_BIT)
+ rt5514_schedule_copy(rt5514_dsp);
+ }
+
return 0;
}
+static const struct dev_pm_ops rt5514_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rt5514_suspend, rt5514_resume)
+};
+
static const struct of_device_id rt5514_of_match[] = {
{ .compatible = "realtek,rt5514", },
{},
@@ -459,6 +504,7 @@ MODULE_DEVICE_TABLE(of, rt5514_of_match);
static struct spi_driver rt5514_spi_driver = {
.driver = {
.name = "rt5514",
+ .pm = &rt5514_pm_ops,
.of_match_table = of_match_ptr(rt5514_of_match),
},
.probe = rt5514_spi_probe,
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index d7956ababd11..2a5b5d74e697 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -1143,7 +1143,7 @@ static const struct acpi_device_id rt5514_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rt5514_acpi_match);
#endif
-static int rt5514_parse_dt(struct rt5514_priv *rt5514, struct device *dev)
+static int rt5514_parse_dp(struct rt5514_priv *rt5514, struct device *dev)
{
device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
&rt5514->pdata.dmic_init_delay);
@@ -1183,8 +1183,8 @@ static int rt5514_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt5514->pdata = *pdata;
- else if (i2c->dev.of_node)
- rt5514_parse_dt(rt5514, &i2c->dev);
+ else
+ rt5514_parse_dp(rt5514, &i2c->dev);
rt5514->i2c_regmap = devm_regmap_init_i2c(i2c, &rt5514_i2c_regmap);
if (IS_ERR(rt5514->i2c_regmap)) {
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index a98647ac497c..f020d2d1eef4 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -55,6 +55,8 @@ MODULE_PARM_DESC(quirk, "RT5645 pdata quirk override");
#define RT5645_HWEQ_NUM 57
+#define TIME_TO_POWER_MS 400
+
static const struct regmap_range_cfg rt5645_ranges[] = {
{
.name = "PR",
@@ -432,6 +434,7 @@ struct rt5645_priv {
int jack_type;
bool en_button_func;
bool hp_on;
+ int v_id;
};
static int rt5645_reset(struct snd_soc_codec *codec)
@@ -2516,9 +2519,7 @@ static const struct snd_soc_dapm_route rt5645_dapm_routes[] = {
{ "SPKVOL L", "Switch", "SPK MIXL" },
{ "SPKVOL R", "Switch", "SPK MIXR" },
- { "SPOL MIX", "DAC R1 Switch", "DAC R1" },
{ "SPOL MIX", "DAC L1 Switch", "DAC L1" },
- { "SPOL MIX", "SPKVOL R Switch", "SPKVOL R" },
{ "SPOL MIX", "SPKVOL L Switch", "SPKVOL L" },
{ "SPOR MIX", "DAC R1 Switch", "DAC R1" },
{ "SPOR MIX", "SPKVOL R Switch", "SPKVOL R" },
@@ -2707,6 +2708,11 @@ static const struct snd_soc_dapm_route rt5645_specific_dapm_routes[] = {
{ "DAC R2 Mux", "IF1 DAC", "RT5645 IF1 DAC2 R Mux" },
};
+static const struct snd_soc_dapm_route rt5645_old_dapm_routes[] = {
+ { "SPOL MIX", "DAC R1 Switch", "DAC R1" },
+ { "SPOL MIX", "SPKVOL R Switch", "SPKVOL R" },
+};
+
static int rt5645_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
@@ -3340,9 +3346,9 @@ static irqreturn_t rt5645_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void rt5645_btn_check_callback(unsigned long data)
+static void rt5645_btn_check_callback(struct timer_list *t)
{
- struct rt5645_priv *rt5645 = (struct rt5645_priv *)data;
+ struct rt5645_priv *rt5645 = from_timer(rt5645, t, btn_check_timer);
queue_delayed_work(system_power_efficient_wq,
&rt5645->jack_detect_work, msecs_to_jiffies(5));
@@ -3363,6 +3369,11 @@ static int rt5645_probe(struct snd_soc_codec *codec)
snd_soc_dapm_add_routes(dapm,
rt5645_specific_dapm_routes,
ARRAY_SIZE(rt5645_specific_dapm_routes));
+ if (rt5645->v_id < 3) {
+ snd_soc_dapm_add_routes(dapm,
+ rt5645_old_dapm_routes,
+ ARRAY_SIZE(rt5645_old_dapm_routes));
+ }
break;
case CODEC_TYPE_RT5650:
snd_soc_dapm_new_controls(dapm,
@@ -3637,14 +3648,14 @@ static const struct dmi_system_id dmi_platform_gpd_win[] = {
{}
};
-static struct rt5645_platform_data general_platform_data2 = {
+static const struct rt5645_platform_data general_platform_data2 = {
.dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
.dmic2_data_pin = RT5645_DMIC2_DISABLE,
.jd_mode = 3,
.inv_jd1_1 = true,
};
-static struct dmi_system_id dmi_platform_asus_t100ha[] = {
+static const struct dmi_system_id dmi_platform_asus_t100ha[] = {
{
.ident = "ASUS T100HAN",
.matches = {
@@ -3655,11 +3666,11 @@ static struct dmi_system_id dmi_platform_asus_t100ha[] = {
{ }
};
-static struct rt5645_platform_data minix_z83_4_platform_data = {
+static const struct rt5645_platform_data minix_z83_4_platform_data = {
.jd_mode = 3,
};
-static struct dmi_system_id dmi_platform_minix_z83_4[] = {
+static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
{
.ident = "MINIX Z83-4",
.matches = {
@@ -3775,6 +3786,12 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
ret);
return ret;
}
+
+ /*
+ * Read after 400msec, as it is the interval required between
+ * read and power On.
+ */
+ msleep(TIME_TO_POWER_MS);
regmap_read(regmap, RT5645_VENDOR_ID2, &val);
switch (val) {
@@ -3803,6 +3820,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
regmap_write(rt5645->regmap, RT5645_RESET, 0);
+ regmap_read(regmap, RT5645_VENDOR_ID, &val);
+ rt5645->v_id = val & 0xff;
+
ret = regmap_register_patch(rt5645->regmap, init_list,
ARRAY_SIZE(init_list));
if (ret != 0)
@@ -3934,8 +3954,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
}
- setup_timer(&rt5645->btn_check_timer,
- rt5645_btn_check_callback, (unsigned long)rt5645);
+ timer_setup(&rt5645->btn_check_timer, rt5645_btn_check_callback, 0);
INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index da60b28ba3df..831b297978a4 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -26,10 +27,15 @@
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
+#include <sound/jack.h>
#include "rl6231.h"
#include "rt5651.h"
+#define RT5651_JD_MAP(quirk) ((quirk) & GENMASK(7, 0))
+#define RT5651_IN2_DIFF BIT(16)
+#define RT5651_DMIC_EN BIT(17)
+
#define RT5651_DEVICE_ID_VALUE 0x6281
#define RT5651_PR_RANGE_BASE (0xff + 1)
@@ -37,6 +43,8 @@
#define RT5651_PR_BASE (RT5651_PR_RANGE_BASE + (0 * RT5651_PR_SPACING))
+static unsigned long rt5651_quirk;
+
static const struct regmap_range_cfg rt5651_ranges[] = {
{ .name = "PR", .range_min = RT5651_PR_BASE,
.range_max = RT5651_PR_BASE + 0xb4,
@@ -880,11 +888,14 @@ static const struct snd_soc_dapm_widget rt5651_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("PLL1", RT5651_PWR_ANLG2,
RT5651_PWR_PLL_BIT, 0, NULL, 0),
/* Input Side */
+ SND_SOC_DAPM_SUPPLY("JD Power", RT5651_PWR_ANLG2,
+ RT5651_PWM_JD_M_BIT, 0, NULL, 0),
+
/* micbias */
SND_SOC_DAPM_SUPPLY("LDO", RT5651_PWR_ANLG1,
RT5651_PWR_LDO_BIT, 0, NULL, 0),
- SND_SOC_DAPM_MICBIAS("micbias1", RT5651_PWR_ANLG2,
- RT5651_PWR_MB1_BIT, 0),
+ SND_SOC_DAPM_SUPPLY("micbias1", RT5651_PWR_ANLG2,
+ RT5651_PWR_MB1_BIT, 0, NULL, 0),
/* Input Lines */
SND_SOC_DAPM_INPUT("MIC1"),
SND_SOC_DAPM_INPUT("MIC2"),
@@ -1528,6 +1539,8 @@ static int rt5651_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
static int rt5651_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
+
switch (level) {
case SND_SOC_BIAS_PREPARE:
if (SND_SOC_BIAS_STANDBY == snd_soc_codec_get_bias_level(codec)) {
@@ -1556,8 +1569,13 @@ static int rt5651_set_bias_level(struct snd_soc_codec *codec,
snd_soc_write(codec, RT5651_PWR_DIG2, 0x0000);
snd_soc_write(codec, RT5651_PWR_VOL, 0x0000);
snd_soc_write(codec, RT5651_PWR_MIXER, 0x0000);
- snd_soc_write(codec, RT5651_PWR_ANLG1, 0x0000);
- snd_soc_write(codec, RT5651_PWR_ANLG2, 0x0000);
+ if (rt5651->pdata.jd_src) {
+ snd_soc_write(codec, RT5651_PWR_ANLG2, 0x0204);
+ snd_soc_write(codec, RT5651_PWR_ANLG1, 0x0002);
+ } else {
+ snd_soc_write(codec, RT5651_PWR_ANLG1, 0x0000);
+ snd_soc_write(codec, RT5651_PWR_ANLG2, 0x0000);
+ }
break;
default:
@@ -1570,6 +1588,7 @@ static int rt5651_set_bias_level(struct snd_soc_codec *codec,
static int rt5651_probe(struct snd_soc_codec *codec)
{
struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
rt5651->codec = codec;
@@ -1585,6 +1604,15 @@ static int rt5651_probe(struct snd_soc_codec *codec)
snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_OFF);
+ if (rt5651->pdata.jd_src) {
+ snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+ snd_soc_dapm_force_enable_pin(dapm, "LDO");
+ snd_soc_dapm_sync(dapm);
+
+ regmap_update_bits(rt5651->regmap, RT5651_MICBIAS,
+ 0x38, 0x38);
+ }
+
return 0;
}
@@ -1718,15 +1746,130 @@ static const struct i2c_device_id rt5651_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rt5651_i2c_id);
+static int rt5651_quirk_cb(const struct dmi_system_id *id)
+{
+ rt5651_quirk = (unsigned long) id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id rt5651_quirk_table[] = {
+ {
+ .callback = rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "KIANO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "KIANO SlimNote 14.2"),
+ },
+ .driver_data = (unsigned long *) RT5651_JD1_1,
+ },
+ {}
+};
+
static int rt5651_parse_dt(struct rt5651_priv *rt5651, struct device_node *np)
{
- rt5651->pdata.in2_diff = of_property_read_bool(np,
- "realtek,in2-differential");
- rt5651->pdata.dmic_en = of_property_read_bool(np,
- "realtek,dmic-en");
+ if (of_property_read_bool(np, "realtek,in2-differential"))
+ rt5651_quirk |= RT5651_IN2_DIFF;
+ if (of_property_read_bool(np, "realtek,dmic-en"))
+ rt5651_quirk |= RT5651_DMIC_EN;
+
+ return 0;
+}
+
+static void rt5651_set_pdata(struct rt5651_priv *rt5651)
+{
+ if (rt5651_quirk & RT5651_IN2_DIFF)
+ rt5651->pdata.in2_diff = true;
+ if (rt5651_quirk & RT5651_DMIC_EN)
+ rt5651->pdata.dmic_en = true;
+ if (RT5651_JD_MAP(rt5651_quirk))
+ rt5651->pdata.jd_src = RT5651_JD_MAP(rt5651_quirk);
+}
+
+static irqreturn_t rt5651_irq(int irq, void *data)
+{
+ struct rt5651_priv *rt5651 = data;
+
+ queue_delayed_work(system_power_efficient_wq,
+ &rt5651->jack_detect_work, msecs_to_jiffies(250));
+
+ return IRQ_HANDLED;
+}
+
+static int rt5651_jack_detect(struct snd_soc_codec *codec, int jack_insert)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ int jack_type;
+
+ if (jack_insert) {
+ snd_soc_dapm_force_enable_pin(dapm, "LDO");
+ snd_soc_dapm_sync(dapm);
+
+ snd_soc_update_bits(codec, RT5651_MICBIAS,
+ RT5651_MIC1_OVCD_MASK |
+ RT5651_MIC1_OVTH_MASK |
+ RT5651_PWR_CLK12M_MASK |
+ RT5651_PWR_MB_MASK,
+ RT5651_MIC1_OVCD_EN |
+ RT5651_MIC1_OVTH_600UA |
+ RT5651_PWR_MB_PU |
+ RT5651_PWR_CLK12M_PU);
+ msleep(100);
+ if (snd_soc_read(codec, RT5651_IRQ_CTRL2) & RT5651_MB1_OC_CLR)
+ jack_type = SND_JACK_HEADPHONE;
+ else
+ jack_type = SND_JACK_HEADSET;
+ snd_soc_update_bits(codec, RT5651_IRQ_CTRL2,
+ RT5651_MB1_OC_CLR, 0);
+ } else { /* jack out */
+ jack_type = 0;
+
+ snd_soc_update_bits(codec, RT5651_MICBIAS,
+ RT5651_MIC1_OVCD_MASK,
+ RT5651_MIC1_OVCD_DIS);
+ }
+
+ return jack_type;
+}
+
+static void rt5651_jack_detect_work(struct work_struct *work)
+{
+ struct rt5651_priv *rt5651 =
+ container_of(work, struct rt5651_priv, jack_detect_work.work);
+
+ int report, val = 0;
+
+ if (!rt5651->codec)
+ return;
+
+ switch (rt5651->pdata.jd_src) {
+ case RT5651_JD1_1:
+ val = snd_soc_read(rt5651->codec, RT5651_INT_IRQ_ST) & 0x1000;
+ break;
+ case RT5651_JD1_2:
+ val = snd_soc_read(rt5651->codec, RT5651_INT_IRQ_ST) & 0x2000;
+ break;
+ case RT5651_JD2:
+ val = snd_soc_read(rt5651->codec, RT5651_INT_IRQ_ST) & 0x4000;
+ break;
+ default:
+ break;
+ }
+
+ report = rt5651_jack_detect(rt5651->codec, !val);
+
+ snd_soc_jack_report(rt5651->hp_jack, report, SND_JACK_HEADSET);
+}
+
+int rt5651_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *hp_jack)
+{
+ struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
+
+ rt5651->hp_jack = hp_jack;
+ rt5651_irq(0, rt5651);
return 0;
}
+EXPORT_SYMBOL_GPL(rt5651_set_jack_detect);
static int rt5651_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -1746,6 +1889,10 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
rt5651->pdata = *pdata;
else if (i2c->dev.of_node)
rt5651_parse_dt(rt5651, i2c->dev.of_node);
+ else
+ dmi_check_system(rt5651_quirk_table);
+
+ rt5651_set_pdata(rt5651);
rt5651->regmap = devm_regmap_init_i2c(i2c, &rt5651_regmap);
if (IS_ERR(rt5651->regmap)) {
@@ -1779,6 +1926,59 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
rt5651->hp_mute = 1;
+ if (rt5651->pdata.jd_src) {
+
+ /* IRQ output on GPIO1 */
+ regmap_update_bits(rt5651->regmap, RT5651_GPIO_CTRL1,
+ RT5651_GP1_PIN_MASK, RT5651_GP1_PIN_IRQ);
+
+ switch (rt5651->pdata.jd_src) {
+ case RT5651_JD1_1:
+ regmap_update_bits(rt5651->regmap, RT5651_JD_CTRL2,
+ RT5651_JD_TRG_SEL_MASK,
+ RT5651_JD_TRG_SEL_JD1_1);
+ regmap_update_bits(rt5651->regmap, RT5651_IRQ_CTRL1,
+ RT5651_JD1_1_IRQ_EN,
+ RT5651_JD1_1_IRQ_EN);
+ break;
+ case RT5651_JD1_2:
+ regmap_update_bits(rt5651->regmap, RT5651_JD_CTRL2,
+ RT5651_JD_TRG_SEL_MASK,
+ RT5651_JD_TRG_SEL_JD1_2);
+ regmap_update_bits(rt5651->regmap, RT5651_IRQ_CTRL1,
+ RT5651_JD1_2_IRQ_EN,
+ RT5651_JD1_2_IRQ_EN);
+ break;
+ case RT5651_JD2:
+ regmap_update_bits(rt5651->regmap, RT5651_JD_CTRL2,
+ RT5651_JD_TRG_SEL_MASK,
+ RT5651_JD_TRG_SEL_JD2);
+ regmap_update_bits(rt5651->regmap, RT5651_IRQ_CTRL1,
+ RT5651_JD2_IRQ_EN,
+ RT5651_JD2_IRQ_EN);
+ break;
+ case RT5651_JD_NULL:
+ break;
+ default:
+ dev_warn(&i2c->dev, "Currently only JD1_1 / JD1_2 / JD2 are supported\n");
+ break;
+ }
+ }
+
+ INIT_DELAYED_WORK(&rt5651->jack_detect_work, rt5651_jack_detect_work);
+
+ if (i2c->irq) {
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ rt5651_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, "rt5651", rt5651);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+ return ret;
+ }
+ }
+
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5651,
rt5651_dai, ARRAY_SIZE(rt5651_dai));
@@ -1787,6 +1987,9 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
static int rt5651_i2c_remove(struct i2c_client *i2c)
{
+ struct rt5651_priv *rt5651 = i2c_get_clientdata(i2c);
+
+ cancel_delayed_work_sync(&rt5651->jack_detect_work);
snd_soc_unregister_codec(&i2c->dev);
return 0;
diff --git a/sound/soc/codecs/rt5651.h b/sound/soc/codecs/rt5651.h
index 1bd33cfa6411..4f8b202121d7 100644
--- a/sound/soc/codecs/rt5651.h
+++ b/sound/soc/codecs/rt5651.h
@@ -2062,6 +2062,8 @@ struct rt5651_priv {
struct snd_soc_codec *codec;
struct rt5651_platform_data pdata;
struct regmap *regmap;
+ struct snd_soc_jack *hp_jack;
+ struct delayed_work jack_detect_work;
int sysclk;
int sysclk_src;
@@ -2077,4 +2079,6 @@ struct rt5651_priv {
bool hp_mute;
};
+int rt5651_set_jack_detect(struct snd_soc_codec *codec,
+ struct snd_soc_jack *hp_jack);
#endif /* __RT5651_H__ */
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index fa66b11df8d4..07e7757417bc 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -3385,10 +3385,9 @@ static int rt5659_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
-static int rt5659_set_dai_sysclk(struct snd_soc_dai *dai,
- int clk_id, unsigned int freq, int dir)
+static int rt5659_set_codec_sysclk(struct snd_soc_codec *codec, int clk_id,
+ int source, unsigned int freq, int dir)
{
- struct snd_soc_codec *codec = dai->codec;
struct rt5659_priv *rt5659 = snd_soc_codec_get_drvdata(codec);
unsigned int reg_val = 0;
@@ -3414,20 +3413,21 @@ static int rt5659_set_dai_sysclk(struct snd_soc_dai *dai,
rt5659->sysclk = freq;
rt5659->sysclk_src = clk_id;
- dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
+ dev_dbg(codec->dev, "Sysclk is %dHz and clock id is %d\n",
+ freq, clk_id);
return 0;
}
-static int rt5659_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
- unsigned int freq_in, unsigned int freq_out)
+static int rt5659_set_codec_pll(struct snd_soc_codec *codec, int pll_id,
+ int source, unsigned int freq_in,
+ unsigned int freq_out)
{
- struct snd_soc_codec *codec = dai->codec;
struct rt5659_priv *rt5659 = snd_soc_codec_get_drvdata(codec);
struct rl6231_pll_code pll_code;
int ret;
- if (Source == rt5659->pll_src && freq_in == rt5659->pll_in &&
+ if (source == rt5659->pll_src && freq_in == rt5659->pll_in &&
freq_out == rt5659->pll_out)
return 0;
@@ -3441,7 +3441,7 @@ static int rt5659_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
return 0;
}
- switch (Source) {
+ switch (source) {
case RT5659_PLL1_S_MCLK:
snd_soc_update_bits(codec, RT5659_GLB_CLK,
RT5659_PLL1_SRC_MASK, RT5659_PLL1_SRC_MCLK);
@@ -3459,7 +3459,7 @@ static int rt5659_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
RT5659_PLL1_SRC_MASK, RT5659_PLL1_SRC_BCLK3);
break;
default:
- dev_err(codec->dev, "Unknown PLL Source %d\n", Source);
+ dev_err(codec->dev, "Unknown PLL source %d\n", source);
return -EINVAL;
}
@@ -3481,7 +3481,7 @@ static int rt5659_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
rt5659->pll_in = freq_in;
rt5659->pll_out = freq_out;
- rt5659->pll_src = Source;
+ rt5659->pll_src = source;
return 0;
}
@@ -3666,9 +3666,7 @@ static int rt5659_resume(struct snd_soc_codec *codec)
static const struct snd_soc_dai_ops rt5659_aif_dai_ops = {
.hw_params = rt5659_hw_params,
.set_fmt = rt5659_set_dai_fmt,
- .set_sysclk = rt5659_set_dai_sysclk,
.set_tdm_slot = rt5659_set_tdm_slot,
- .set_pll = rt5659_set_dai_pll,
.set_bclk_ratio = rt5659_set_bclk_ratio,
};
@@ -3747,6 +3745,8 @@ static const struct snd_soc_codec_driver soc_codec_dev_rt5659 = {
.dapm_routes = rt5659_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(rt5659_dapm_routes),
},
+ .set_sysclk = rt5659_set_codec_sysclk,
+ .set_pll = rt5659_set_codec_pll,
};
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index e45b895d8279..b036c9dc0c8c 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -38,13 +38,24 @@ enum {
CODEC_VER_0,
};
+struct impedance_mapping_table {
+ unsigned int imp_min;
+ unsigned int imp_max;
+ unsigned int vol;
+ unsigned int dc_offset_l_manual;
+ unsigned int dc_offset_r_manual;
+ unsigned int dc_offset_l_manual_mic;
+ unsigned int dc_offset_r_manual_mic;
+};
+
struct rt5663_priv {
struct snd_soc_codec *codec;
struct rt5663_platform_data pdata;
struct regmap *regmap;
- struct delayed_work jack_detect_work;
+ struct delayed_work jack_detect_work, jd_unplug_work;
struct snd_soc_jack *hs_jack;
struct timer_list btn_check_timer;
+ struct impedance_mapping_table *imp_table;
int codec_ver;
int sysclk;
@@ -1575,6 +1586,9 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
rt5663->jack_type = SND_JACK_HEADSET;
rt5663_enable_push_button_irq(codec, true);
+ if (rt5663->pdata.impedance_sensing_num)
+ break;
+
if (rt5663->pdata.dc_offset_l_manual_mic) {
regmap_write(rt5663->regmap, RT5663_MIC_DECRO_2,
rt5663->pdata.dc_offset_l_manual_mic >>
@@ -1596,6 +1610,9 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
default:
rt5663->jack_type = SND_JACK_HEADPHONE;
+ if (rt5663->pdata.impedance_sensing_num)
+ break;
+
if (rt5663->pdata.dc_offset_l_manual) {
regmap_write(rt5663->regmap, RT5663_MIC_DECRO_2,
rt5663->pdata.dc_offset_l_manual >> 16);
@@ -1623,6 +1640,177 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
return rt5663->jack_type;
}
+static int rt5663_impedance_sensing(struct snd_soc_codec *codec)
+{
+ struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
+ unsigned int value, i, reg84, reg26, reg2fa, reg91, reg10, reg80;
+
+ for (i = 0; i < rt5663->pdata.impedance_sensing_num; i++) {
+ if (rt5663->imp_table[i].vol == 7)
+ break;
+ }
+
+ if (rt5663->jack_type == SND_JACK_HEADSET) {
+ snd_soc_write(codec, RT5663_MIC_DECRO_2,
+ rt5663->imp_table[i].dc_offset_l_manual_mic >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_3,
+ rt5663->imp_table[i].dc_offset_l_manual_mic & 0xffff);
+ snd_soc_write(codec, RT5663_MIC_DECRO_5,
+ rt5663->imp_table[i].dc_offset_r_manual_mic >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_6,
+ rt5663->imp_table[i].dc_offset_r_manual_mic & 0xffff);
+ } else {
+ snd_soc_write(codec, RT5663_MIC_DECRO_2,
+ rt5663->imp_table[i].dc_offset_l_manual >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_3,
+ rt5663->imp_table[i].dc_offset_l_manual & 0xffff);
+ snd_soc_write(codec, RT5663_MIC_DECRO_5,
+ rt5663->imp_table[i].dc_offset_r_manual >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_6,
+ rt5663->imp_table[i].dc_offset_r_manual & 0xffff);
+ }
+
+ reg84 = snd_soc_read(codec, RT5663_ASRC_2);
+ reg26 = snd_soc_read(codec, RT5663_STO1_ADC_MIXER);
+ reg2fa = snd_soc_read(codec, RT5663_DUMMY_1);
+ reg91 = snd_soc_read(codec, RT5663_HP_CHARGE_PUMP_1);
+ reg10 = snd_soc_read(codec, RT5663_RECMIX);
+ reg80 = snd_soc_read(codec, RT5663_GLB_CLK);
+
+ snd_soc_update_bits(codec, RT5663_STO_DRE_1, 0x8000, 0);
+ snd_soc_write(codec, RT5663_ASRC_2, 0);
+ snd_soc_write(codec, RT5663_STO1_ADC_MIXER, 0x4040);
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
+ RT5663_PWR_VREF1_MASK | RT5663_PWR_VREF2_MASK |
+ RT5663_PWR_FV1_MASK | RT5663_PWR_FV2_MASK,
+ RT5663_PWR_VREF1 | RT5663_PWR_VREF2);
+ usleep_range(10000, 10005);
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
+ RT5663_PWR_FV1_MASK | RT5663_PWR_FV2_MASK,
+ RT5663_PWR_FV1 | RT5663_PWR_FV2);
+ snd_soc_update_bits(codec, RT5663_GLB_CLK, RT5663_SCLK_SRC_MASK,
+ RT5663_SCLK_SRC_RCCLK);
+ snd_soc_update_bits(codec, RT5663_RC_CLK, RT5663_DIG_25M_CLK_MASK,
+ RT5663_DIG_25M_CLK_EN);
+ snd_soc_update_bits(codec, RT5663_ADDA_CLK_1, RT5663_I2S_PD1_MASK, 0);
+ snd_soc_write(codec, RT5663_PRE_DIV_GATING_1, 0xff00);
+ snd_soc_write(codec, RT5663_PRE_DIV_GATING_2, 0xfffc);
+ snd_soc_write(codec, RT5663_HP_CHARGE_PUMP_1, 0x1232);
+ snd_soc_write(codec, RT5663_HP_LOGIC_2, 0x0005);
+ snd_soc_write(codec, RT5663_DEPOP_2, 0x3003);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030, 0x0030);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0003, 0x0003);
+ snd_soc_update_bits(codec, RT5663_PWR_DIG_2,
+ RT5663_PWR_ADC_S1F | RT5663_PWR_DAC_S1F,
+ RT5663_PWR_ADC_S1F | RT5663_PWR_DAC_S1F);
+ snd_soc_update_bits(codec, RT5663_PWR_DIG_1,
+ RT5663_PWR_DAC_L1 | RT5663_PWR_DAC_R1 |
+ RT5663_PWR_LDO_DACREF_MASK | RT5663_PWR_ADC_L1 |
+ RT5663_PWR_ADC_R1,
+ RT5663_PWR_DAC_L1 | RT5663_PWR_DAC_R1 |
+ RT5663_PWR_LDO_DACREF_ON | RT5663_PWR_ADC_L1 |
+ RT5663_PWR_ADC_R1);
+ msleep(40);
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
+ RT5663_PWR_RECMIX1 | RT5663_PWR_RECMIX2,
+ RT5663_PWR_RECMIX1 | RT5663_PWR_RECMIX2);
+ msleep(30);
+ snd_soc_write(codec, RT5663_HP_CHARGE_PUMP_2, 0x1371);
+ snd_soc_write(codec, RT5663_STO_DAC_MIXER, 0);
+ snd_soc_write(codec, RT5663_BYPASS_STO_DAC, 0x000c);
+ snd_soc_write(codec, RT5663_HP_BIAS, 0xafaa);
+ snd_soc_write(codec, RT5663_CHARGE_PUMP_1, 0x2224);
+ snd_soc_write(codec, RT5663_HP_OUT_EN, 0x8088);
+ snd_soc_write(codec, RT5663_CHOP_ADC, 0x3000);
+ snd_soc_write(codec, RT5663_ADDA_RST, 0xc000);
+ snd_soc_write(codec, RT5663_STO1_HPF_ADJ1, 0x3320);
+ snd_soc_write(codec, RT5663_HP_CALIB_2, 0x00c9);
+ snd_soc_write(codec, RT5663_DUMMY_1, 0x004c);
+ snd_soc_write(codec, RT5663_ANA_BIAS_CUR_1, 0x7733);
+ snd_soc_write(codec, RT5663_CHARGE_PUMP_2, 0x7777);
+ snd_soc_write(codec, RT5663_STO_DRE_9, 0x0007);
+ snd_soc_write(codec, RT5663_STO_DRE_10, 0x0007);
+ snd_soc_write(codec, RT5663_DUMMY_2, 0x02a4);
+ snd_soc_write(codec, RT5663_RECMIX, 0x0005);
+ snd_soc_write(codec, RT5663_HP_IMP_SEN_1, 0x4334);
+ snd_soc_update_bits(codec, RT5663_IRQ_3, 0x0004, 0x0004);
+ snd_soc_write(codec, RT5663_HP_LOGIC_1, 0x2200);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000, 0x3000);
+ snd_soc_write(codec, RT5663_HP_LOGIC_1, 0x6200);
+
+ for (i = 0; i < 100; i++) {
+ msleep(20);
+ if (snd_soc_read(codec, RT5663_INT_ST_1) & 0x2)
+ break;
+ }
+
+ value = snd_soc_read(codec, RT5663_HP_IMP_SEN_4);
+
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000, 0);
+ snd_soc_write(codec, RT5663_INT_ST_1, 0);
+ snd_soc_write(codec, RT5663_HP_LOGIC_1, 0);
+ snd_soc_update_bits(codec, RT5663_RC_CLK, RT5663_DIG_25M_CLK_MASK,
+ RT5663_DIG_25M_CLK_DIS);
+ snd_soc_write(codec, RT5663_GLB_CLK, reg80);
+ snd_soc_write(codec, RT5663_RECMIX, reg10);
+ snd_soc_write(codec, RT5663_DUMMY_2, 0x00a4);
+ snd_soc_write(codec, RT5663_DUMMY_1, reg2fa);
+ snd_soc_write(codec, RT5663_HP_CALIB_2, 0x00c8);
+ snd_soc_write(codec, RT5663_STO1_HPF_ADJ1, 0xb320);
+ snd_soc_write(codec, RT5663_ADDA_RST, 0xe400);
+ snd_soc_write(codec, RT5663_CHOP_ADC, 0x2000);
+ snd_soc_write(codec, RT5663_HP_OUT_EN, 0x0008);
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
+ RT5663_PWR_RECMIX1 | RT5663_PWR_RECMIX2, 0);
+ snd_soc_update_bits(codec, RT5663_PWR_DIG_1,
+ RT5663_PWR_DAC_L1 | RT5663_PWR_DAC_R1 |
+ RT5663_PWR_LDO_DACREF_MASK | RT5663_PWR_ADC_L1 |
+ RT5663_PWR_ADC_R1, 0);
+ snd_soc_update_bits(codec, RT5663_PWR_DIG_2,
+ RT5663_PWR_ADC_S1F | RT5663_PWR_DAC_S1F, 0);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0003, 0);
+ snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030, 0);
+ snd_soc_write(codec, RT5663_HP_LOGIC_2, 0);
+ snd_soc_write(codec, RT5663_HP_CHARGE_PUMP_1, reg91);
+ snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
+ RT5663_PWR_VREF1_MASK | RT5663_PWR_VREF2_MASK, 0);
+ snd_soc_write(codec, RT5663_STO1_ADC_MIXER, reg26);
+ snd_soc_write(codec, RT5663_ASRC_2, reg84);
+
+ for (i = 0; i < rt5663->pdata.impedance_sensing_num; i++) {
+ if (value >= rt5663->imp_table[i].imp_min &&
+ value <= rt5663->imp_table[i].imp_max)
+ break;
+ }
+
+ snd_soc_update_bits(codec, RT5663_STO_DRE_9, RT5663_DRE_GAIN_HP_MASK,
+ rt5663->imp_table[i].vol);
+ snd_soc_update_bits(codec, RT5663_STO_DRE_10, RT5663_DRE_GAIN_HP_MASK,
+ rt5663->imp_table[i].vol);
+
+ if (rt5663->jack_type == SND_JACK_HEADSET) {
+ snd_soc_write(codec, RT5663_MIC_DECRO_2,
+ rt5663->imp_table[i].dc_offset_l_manual_mic >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_3,
+ rt5663->imp_table[i].dc_offset_l_manual_mic & 0xffff);
+ snd_soc_write(codec, RT5663_MIC_DECRO_5,
+ rt5663->imp_table[i].dc_offset_r_manual_mic >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_6,
+ rt5663->imp_table[i].dc_offset_r_manual_mic & 0xffff);
+ } else {
+ snd_soc_write(codec, RT5663_MIC_DECRO_2,
+ rt5663->imp_table[i].dc_offset_l_manual >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_3,
+ rt5663->imp_table[i].dc_offset_l_manual & 0xffff);
+ snd_soc_write(codec, RT5663_MIC_DECRO_5,
+ rt5663->imp_table[i].dc_offset_r_manual >> 16);
+ snd_soc_write(codec, RT5663_MIC_DECRO_6,
+ rt5663->imp_table[i].dc_offset_r_manual & 0xffff);
+ }
+
+ return 0;
+}
+
static int rt5663_button_detect(struct snd_soc_codec *codec)
{
int btn_type, val;
@@ -1702,6 +1890,8 @@ static void rt5663_jack_detect_work(struct work_struct *work)
break;
case CODEC_VER_0:
report = rt5663_jack_detect(rt5663->codec, 1);
+ if (rt5663->pdata.impedance_sensing_num)
+ rt5663_impedance_sensing(rt5663->codec);
break;
default:
dev_err(codec->dev, "Unknown CODEC Version\n");
@@ -1751,8 +1941,15 @@ static void rt5663_jack_detect_work(struct work_struct *work)
break;
}
/* button release or spurious interrput*/
- if (btn_type == 0)
+ if (btn_type == 0) {
report = rt5663->jack_type;
+ cancel_delayed_work_sync(
+ &rt5663->jd_unplug_work);
+ } else {
+ queue_delayed_work(system_wq,
+ &rt5663->jd_unplug_work,
+ msecs_to_jiffies(500));
+ }
}
} else {
/* jack out */
@@ -1773,6 +1970,37 @@ static void rt5663_jack_detect_work(struct work_struct *work)
SND_JACK_BTN_2 | SND_JACK_BTN_3);
}
+static void rt5663_jd_unplug_work(struct work_struct *work)
+{
+ struct rt5663_priv *rt5663 =
+ container_of(work, struct rt5663_priv, jd_unplug_work.work);
+ struct snd_soc_codec *codec = rt5663->codec;
+
+ if (!codec)
+ return;
+
+ if (!rt5663_check_jd_status(codec)) {
+ /* jack out */
+ switch (rt5663->codec_ver) {
+ case CODEC_VER_1:
+ rt5663_v2_jack_detect(rt5663->codec, 0);
+ break;
+ case CODEC_VER_0:
+ rt5663_jack_detect(rt5663->codec, 0);
+ break;
+ default:
+ dev_err(codec->dev, "Unknown CODEC Version\n");
+ }
+
+ snd_soc_jack_report(rt5663->hs_jack, 0, SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+ } else {
+ queue_delayed_work(system_wq, &rt5663->jd_unplug_work,
+ msecs_to_jiffies(500));
+ }
+}
+
static const struct snd_kcontrol_new rt5663_snd_controls[] = {
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC Playback Volume", RT5663_STO1_DAC_DIG_VOL,
@@ -1797,10 +2025,6 @@ static const struct snd_kcontrol_new rt5663_v2_specific_controls[] = {
};
static const struct snd_kcontrol_new rt5663_specific_controls[] = {
- /* Headphone Output Volume */
- SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5663_STO_DRE_9,
- RT5663_STO_DRE_10, RT5663_DRE_GAIN_HP_SHIFT, 23, 1,
- rt5663_hp_vol_tlv),
/* Mic Boost Volume*/
SOC_SINGLE_TLV("IN1 Capture Volume", RT5663_CBJ_2,
RT5663_GAIN_BST1_SHIFT, 8, 0, in_bst_tlv),
@@ -1808,6 +2032,13 @@ static const struct snd_kcontrol_new rt5663_specific_controls[] = {
SOC_ENUM("IF1 ADC Data Swap", rt5663_if1_adc_enum),
};
+static const struct snd_kcontrol_new rt5663_hpvol_controls[] = {
+ /* Headphone Output Volume */
+ SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5663_STO_DRE_9,
+ RT5663_STO_DRE_10, RT5663_DRE_GAIN_HP_SHIFT, 23, 1,
+ rt5663_hp_vol_tlv),
+};
+
static int rt5663_is_sys_clk_from_pll(struct snd_soc_dapm_widget *w,
struct snd_soc_dapm_widget *sink)
{
@@ -2890,6 +3121,10 @@ static int rt5663_probe(struct snd_soc_codec *codec)
ARRAY_SIZE(rt5663_specific_dapm_routes));
snd_soc_add_codec_controls(codec, rt5663_specific_controls,
ARRAY_SIZE(rt5663_specific_controls));
+
+ if (!rt5663->imp_table)
+ snd_soc_add_codec_controls(codec, rt5663_hpvol_controls,
+ ARRAY_SIZE(rt5663_hpvol_controls));
break;
}
@@ -3178,6 +3413,8 @@ static void rt5663_calibrate(struct rt5663_priv *rt5663)
static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev)
{
+ int table_size;
+
device_property_read_u32(dev, "realtek,dc_offset_l_manual",
&rt5663->pdata.dc_offset_l_manual);
device_property_read_u32(dev, "realtek,dc_offset_r_manual",
@@ -3186,6 +3423,17 @@ static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev)
&rt5663->pdata.dc_offset_l_manual_mic);
device_property_read_u32(dev, "realtek,dc_offset_r_manual_mic",
&rt5663->pdata.dc_offset_r_manual_mic);
+ device_property_read_u32(dev, "realtek,impedance_sensing_num",
+ &rt5663->pdata.impedance_sensing_num);
+
+ if (rt5663->pdata.impedance_sensing_num) {
+ table_size = sizeof(struct impedance_mapping_table) *
+ rt5663->pdata.impedance_sensing_num;
+ rt5663->imp_table = devm_kzalloc(dev, table_size, GFP_KERNEL);
+ device_property_read_u32_array(dev,
+ "realtek,impedance_sensing_table",
+ (u32 *)rt5663->imp_table, table_size);
+ }
return 0;
}
@@ -3219,7 +3467,16 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
ret);
return ret;
}
- regmap_read(regmap, RT5663_VENDOR_ID_2, &val);
+
+ ret = regmap_read(regmap, RT5663_VENDOR_ID_2, &val);
+ if (ret || (val != RT5663_DEVICE_ID_2 && val != RT5663_DEVICE_ID_1)) {
+ dev_err(&i2c->dev,
+ "Device with ID register %#x is not rt5663, retry one time.\n",
+ val);
+ msleep(100);
+ regmap_read(regmap, RT5663_VENDOR_ID_2, &val);
+ }
+
switch (val) {
case RT5663_DEVICE_ID_2:
rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5663_v2_regmap);
@@ -3338,6 +3595,7 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
}
INIT_DELAYED_WORK(&rt5663->jack_detect_work, rt5663_jack_detect_work);
+ INIT_DELAYED_WORK(&rt5663->jd_unplug_work, rt5663_jd_unplug_work);
if (i2c->irq) {
ret = request_irq(i2c->irq, rt5663_irq,
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 9545764ef3eb..c5094b4399e2 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -34,6 +34,24 @@
#include "rt5670.h"
#include "rt5670-dsp.h"
+#define RT5670_DEV_GPIO BIT(0)
+#define RT5670_IN2_DIFF BIT(1)
+#define RT5670_DMIC_EN BIT(2)
+#define RT5670_DMIC1_IN2P BIT(3)
+#define RT5670_DMIC1_GPIO6 BIT(4)
+#define RT5670_DMIC1_GPIO7 BIT(5)
+#define RT5670_DMIC2_INR BIT(6)
+#define RT5670_DMIC2_GPIO8 BIT(7)
+#define RT5670_DMIC3_GPIO5 BIT(8)
+#define RT5670_JD_MODE1 BIT(9)
+#define RT5670_JD_MODE2 BIT(10)
+#define RT5670_JD_MODE3 BIT(11)
+
+static unsigned long rt5670_quirk;
+static unsigned int quirk_override;
+module_param_named(quirk, quirk_override, uint, 0444);
+MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+
#define RT5670_DEVICE_ID 0x6271
#define RT5670_PR_RANGE_BASE (0xff + 1)
@@ -2582,6 +2600,24 @@ static int rt5670_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
return 0;
}
+static int rt5670_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_codec *codec = dai->codec;
+
+ dev_dbg(codec->dev, "%s ratio=%d\n", __func__, ratio);
+ if (dai->id != RT5670_AIF1)
+ return 0;
+
+ if ((ratio % 50) == 0)
+ snd_soc_update_bits(codec, RT5670_GEN_CTRL3,
+ RT5670_TDM_DATA_MODE_SEL, RT5670_TDM_DATA_MODE_50FS);
+ else
+ snd_soc_update_bits(codec, RT5670_GEN_CTRL3,
+ RT5670_TDM_DATA_MODE_SEL, RT5670_TDM_DATA_MODE_NOR);
+
+ return 0;
+}
+
static int rt5670_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -2712,6 +2748,7 @@ static const struct snd_soc_dai_ops rt5670_aif_dai_ops = {
.set_fmt = rt5670_set_dai_fmt,
.set_tdm_slot = rt5670_set_tdm_slot,
.set_pll = rt5670_set_dai_pll,
+ .set_bclk_ratio = rt5670_set_bclk_ratio,
};
static struct snd_soc_dai_driver rt5670_dai[] = {
@@ -2808,56 +2845,84 @@ static const struct acpi_device_id rt5670_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rt5670_acpi_match);
#endif
-static const struct dmi_system_id dmi_platform_intel_braswell[] = {
+static int rt5670_quirk_cb(const struct dmi_system_id *id)
+{
+ rt5670_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id dmi_platform_intel_quirks[] = {
{
+ .callback = rt5670_quirk_cb,
.ident = "Intel Braswell",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Braswell CRB"),
},
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC1_IN2P |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE1),
},
{
+ .callback = rt5670_quirk_cb,
.ident = "Dell Wyse 3040",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Wyse 3040"),
},
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC1_IN2P |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE1),
},
{
+ .callback = rt5670_quirk_cb,
.ident = "Lenovo Thinkpad Tablet 10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
},
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC1_IN2P |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE1),
},
{
+ .callback = rt5670_quirk_cb,
.ident = "Lenovo Thinkpad Tablet 10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Tablet B"),
},
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC1_IN2P |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE1),
},
- {}
-};
-
-static const struct dmi_system_id dmi_platform_intel_bytcht_jdmode2[] = {
{
+ .callback = rt5670_quirk_cb,
.ident = "Lenovo Thinkpad Tablet 10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
},
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC1_IN2P |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE2),
},
- {}
-};
-
-static const struct dmi_system_id dmi_platform_intel_bytcht_jdmode3[] = {
{
+ .callback = rt5670_quirk_cb,
.ident = "Dell Venue 8 Pro 5855",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 8 Pro 5855"),
},
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC2_INR |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE3),
},
{}
};
@@ -2881,21 +2946,61 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
if (pdata)
rt5670->pdata = *pdata;
- if (dmi_check_system(dmi_platform_intel_braswell)) {
- rt5670->pdata.dmic_en = true;
- rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P;
+ dmi_check_system(dmi_platform_intel_quirks);
+ if (quirk_override) {
+ dev_info(&i2c->dev, "Overriding quirk 0x%x => 0x%x\n",
+ (unsigned int)rt5670_quirk, quirk_override);
+ rt5670_quirk = quirk_override;
+ }
+
+ if (rt5670_quirk & RT5670_DEV_GPIO) {
rt5670->pdata.dev_gpio = true;
- rt5670->pdata.jd_mode = 1;
- } else if (dmi_check_system(dmi_platform_intel_bytcht_jdmode2)) {
+ dev_info(&i2c->dev, "quirk dev_gpio\n");
+ }
+ if (rt5670_quirk & RT5670_IN2_DIFF) {
+ rt5670->pdata.in2_diff = true;
+ dev_info(&i2c->dev, "quirk IN2_DIFF\n");
+ }
+ if (rt5670_quirk & RT5670_DMIC_EN) {
rt5670->pdata.dmic_en = true;
+ dev_info(&i2c->dev, "quirk DMIC enabled\n");
+ }
+ if (rt5670_quirk & RT5670_DMIC1_IN2P) {
rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P;
- rt5670->pdata.dev_gpio = true;
+ dev_info(&i2c->dev, "quirk DMIC1 on IN2P pin\n");
+ }
+ if (rt5670_quirk & RT5670_DMIC1_GPIO6) {
+ rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_GPIO6;
+ dev_info(&i2c->dev, "quirk DMIC1 on GPIO6 pin\n");
+ }
+ if (rt5670_quirk & RT5670_DMIC1_GPIO7) {
+ rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_GPIO7;
+ dev_info(&i2c->dev, "quirk DMIC1 on GPIO7 pin\n");
+ }
+ if (rt5670_quirk & RT5670_DMIC2_INR) {
+ rt5670->pdata.dmic2_data_pin = RT5670_DMIC_DATA_IN3N;
+ dev_info(&i2c->dev, "quirk DMIC2 on INR pin\n");
+ }
+ if (rt5670_quirk & RT5670_DMIC2_GPIO8) {
+ rt5670->pdata.dmic2_data_pin = RT5670_DMIC_DATA_GPIO8;
+ dev_info(&i2c->dev, "quirk DMIC2 on GPIO8 pin\n");
+ }
+ if (rt5670_quirk & RT5670_DMIC3_GPIO5) {
+ rt5670->pdata.dmic3_data_pin = RT5670_DMIC_DATA_GPIO5;
+ dev_info(&i2c->dev, "quirk DMIC3 on GPIO5 pin\n");
+ }
+
+ if (rt5670_quirk & RT5670_JD_MODE1) {
+ rt5670->pdata.jd_mode = 1;
+ dev_info(&i2c->dev, "quirk JD mode 1\n");
+ }
+ if (rt5670_quirk & RT5670_JD_MODE2) {
rt5670->pdata.jd_mode = 2;
- } else if (dmi_check_system(dmi_platform_intel_bytcht_jdmode3)) {
- rt5670->pdata.dmic_en = true;
- rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P;
- rt5670->pdata.dev_gpio = true;
+ dev_info(&i2c->dev, "quirk JD mode 2\n");
+ }
+ if (rt5670_quirk & RT5670_JD_MODE3) {
rt5670->pdata.jd_mode = 3;
+ dev_info(&i2c->dev, "quirk JD mode 3\n");
}
rt5670->regmap = devm_regmap_init_i2c(i2c, &rt5670_regmap);
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index 5ba485cae4e6..265df80d504e 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -1816,6 +1816,10 @@
#define RT5670_ZCD_HP_DIS (0x0 << 15)
#define RT5670_ZCD_HP_EN (0x1 << 15)
+/* General Control 3 (0xfc) */
+#define RT5670_TDM_DATA_MODE_SEL (0x1 << 11)
+#define RT5670_TDM_DATA_MODE_NOR (0x0 << 11)
+#define RT5670_TDM_DATA_MODE_50FS (0x1 << 11)
/* Codec Private Register definition */
/* 3D Speaker Control (0x63) */
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 810369f687d7..a09499977be4 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -697,7 +697,8 @@ static int tas571x_i2c_probe(struct i2c_client *client,
return PTR_ERR(priv->mclk);
}
- BUG_ON(priv->chip->num_supply_names > TAS571X_MAX_SUPPLIES);
+ if (WARN_ON(priv->chip->num_supply_names > TAS571X_MAX_SUPPLIES))
+ return -EINVAL;
for (i = 0; i < priv->chip->num_supply_names; i++)
priv->supplies[i].supply = priv->chip->supply_names[i];
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index 95e0a7abeb7a..f8dd67ca0744 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -312,9 +312,15 @@ static const struct i2c_device_id tfa9879_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tfa9879_i2c_id);
+static const struct of_device_id tfa9879_of_match[] = {
+ { .compatible = "nxp,tfa9879", },
+ { }
+};
+
static struct i2c_driver tfa9879_i2c_driver = {
.driver = {
.name = "tfa9879",
+ .of_match_table = tfa9879_of_match,
},
.probe = tfa9879_i2c_probe,
.remove = tfa9879_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 3d42138a7974..74909211c608 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -454,6 +454,7 @@ static int tlv320aic23_set_dai_fmt(struct snd_soc_dai *codec_dai,
break;
case SND_SOC_DAIFMT_DSP_A:
iface_reg |= TLV320AIC23_LRP_ON;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_B:
iface_reg |= TLV320AIC23_FOR_DSP;
break;
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 54a87a905eb6..e2862372c26e 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -929,7 +929,7 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
case SND_SOC_DAIFMT_I2S:
break;
case SND_SOC_DAIFMT_DSP_A:
- dsp_a_val = 0x1;
+ dsp_a_val = 0x1; /* fall through */
case SND_SOC_DAIFMT_DSP_B:
/* NOTE: BCLKINV bit value 1 equas NB and 0 equals IB */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 2e014c80d113..616cd4bebd01 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -274,6 +274,7 @@ static int tpa6130a2_probe(struct i2c_client *client,
default:
dev_warn(dev, "Unknown TPA model (%d). Assuming 6130A2\n",
data->id);
+ /* fall through */
case TPA6130A2:
regulator = "Vdd";
break;
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 43568435c208..738e04b09116 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
+#include <linux/acpi.h>
#include <sound/core.h>
#include <sound/jack.h>
@@ -374,11 +375,20 @@ static const struct of_device_id ts3a227e_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ts3a227e_of_match);
+#ifdef CONFIG_ACPI
+static struct acpi_device_id ts3a227e_acpi_match[] = {
+ { "104C227E", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, ts3a227e_acpi_match);
+#endif
+
static struct i2c_driver ts3a227e_driver = {
.driver = {
.name = "ts3a227e",
.pm = &ts3a227e_pm,
.of_match_table = of_match_ptr(ts3a227e_of_match),
+ .acpi_match_table = ACPI_PTR(ts3a227e_acpi_match),
},
.probe = ts3a227e_i2c_probe,
.id_table = ts3a227e_i2c_ids,
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 72486bf072f2..4f0481d3c7a7 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1951,7 +1951,6 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
return ret;
arizona_init_gpio(codec);
- arizona_init_notifiers(codec);
snd_soc_component_disable_pin(component, "HAPTICS");
@@ -2043,6 +2042,14 @@ static int wm5102_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, wm5102);
+ if (IS_ENABLED(CONFIG_OF)) {
+ if (!dev_get_platdata(arizona->dev)) {
+ ret = arizona_of_get_audio_pdata(arizona);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
mutex_init(&arizona->dac_comp_lock);
wm5102->core.arizona = arizona;
@@ -2098,6 +2105,11 @@ static int wm5102_probe(struct platform_device *pdev)
return ret;
}
+ arizona_init_common(arizona);
+
+ ret = arizona_init_vol_limit(arizona);
+ if (ret < 0)
+ goto err_dsp_irq;
ret = arizona_init_spk_irqs(arizona);
if (ret < 0)
goto err_dsp_irq;
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 858a24fc28e8..6ed1e1f9ce51 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2290,7 +2290,6 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
arizona_init_gpio(codec);
arizona_init_mono(codec);
- arizona_init_notifiers(codec);
for (i = 0; i < WM5110_NUM_ADSP; ++i) {
ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec);
@@ -2398,6 +2397,14 @@ static int wm5110_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, wm5110);
+ if (IS_ENABLED(CONFIG_OF)) {
+ if (!dev_get_platdata(arizona->dev)) {
+ ret = arizona_of_get_audio_pdata(arizona);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
wm5110->core.arizona = arizona;
wm5110->core.num_inputs = 8;
@@ -2454,6 +2461,11 @@ static int wm5110_probe(struct platform_device *pdev)
return ret;
}
+ arizona_init_common(arizona);
+
+ ret = arizona_init_vol_limit(arizona);
+ if (ret < 0)
+ goto err_dsp_irq;
ret = arizona_init_spk_irqs(arizona);
if (ret < 0)
goto err_dsp_irq;
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index b8c1940f2243..a394dbee77aa 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -196,7 +196,7 @@ static int wm8741_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_codec *codec = dai->codec;
struct wm8741_priv *wm8741 = snd_soc_codec_get_drvdata(codec);
- u16 iface = snd_soc_read(codec, WM8741_FORMAT_CONTROL) & 0x1FC;
+ unsigned int iface;
int i;
/* The set of sample rates that can be supported depends on the
@@ -223,15 +223,16 @@ static int wm8741_hw_params(struct snd_pcm_substream *substream,
/* bit size */
switch (params_width(params)) {
case 16:
+ iface = 0x0;
break;
case 20:
- iface |= 0x0001;
+ iface = 0x1;
break;
case 24:
- iface |= 0x0002;
+ iface = 0x2;
break;
case 32:
- iface |= 0x0003;
+ iface = 0x3;
break;
default:
dev_dbg(codec->dev, "wm8741_hw_params: Unsupported bit size param = %d",
@@ -242,7 +243,9 @@ static int wm8741_hw_params(struct snd_pcm_substream *substream,
dev_dbg(codec->dev, "wm8741_hw_params: bit size param = %d, rate param = %d",
params_width(params), params_rate(params));
- snd_soc_write(codec, WM8741_FORMAT_CONTROL, iface);
+ snd_soc_update_bits(codec, WM8741_FORMAT_CONTROL, WM8741_IWL_MASK,
+ iface);
+
return 0;
}
@@ -295,7 +298,7 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- u16 iface = snd_soc_read(codec, WM8741_FORMAT_CONTROL) & 0x1C3;
+ unsigned int iface;
/* check master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -308,18 +311,19 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- iface |= 0x0008;
+ iface = 0x08;
break;
case SND_SOC_DAIFMT_RIGHT_J:
+ iface = 0x00;
break;
case SND_SOC_DAIFMT_LEFT_J:
- iface |= 0x0004;
+ iface = 0x04;
break;
case SND_SOC_DAIFMT_DSP_A:
- iface |= 0x000C;
+ iface = 0x0C;
break;
case SND_SOC_DAIFMT_DSP_B:
- iface |= 0x001C;
+ iface = 0x1C;
break;
default:
return -EINVAL;
@@ -329,14 +333,14 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
- case SND_SOC_DAIFMT_IB_IF:
- iface |= 0x0010;
+ case SND_SOC_DAIFMT_NB_IF:
+ iface |= 0x10;
break;
case SND_SOC_DAIFMT_IB_NF:
- iface |= 0x0020;
+ iface |= 0x20;
break;
- case SND_SOC_DAIFMT_NB_IF:
- iface |= 0x0030;
+ case SND_SOC_DAIFMT_IB_IF:
+ iface |= 0x30;
break;
default:
return -EINVAL;
@@ -347,7 +351,10 @@ static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
fmt & SND_SOC_DAIFMT_FORMAT_MASK,
((fmt & SND_SOC_DAIFMT_INV_MASK)));
- snd_soc_write(codec, WM8741_FORMAT_CONTROL, iface);
+ snd_soc_update_bits(codec, WM8741_FORMAT_CONTROL,
+ WM8741_BCP_MASK | WM8741_LRP_MASK | WM8741_FMT_MASK,
+ iface);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index d05d76e79c70..0271a5253bd3 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -971,7 +971,7 @@ static int wm8753_pcm_set_dai_fmt(struct snd_soc_codec *codec,
case SND_SOC_DAIFMT_CBS_CFS:
break;
case SND_SOC_DAIFMT_CBM_CFM:
- ioctl |= 0x2;
+ ioctl |= 0x2; /* fall through */
case SND_SOC_DAIFMT_CBM_CFS:
voice |= 0x0040;
break;
@@ -1096,7 +1096,7 @@ static int wm8753_i2s_set_dai_fmt(struct snd_soc_codec *codec,
case SND_SOC_DAIFMT_CBS_CFS:
break;
case SND_SOC_DAIFMT_CBM_CFM:
- ioctl |= 0x1;
+ ioctl |= 0x1; /* fall through */
case SND_SOC_DAIFMT_CBM_CFS:
hifi |= 0x0040;
break;
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 195f7bf6eb22..830ffd80de4a 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1076,6 +1076,7 @@ static int wm8993_set_sysclk(struct snd_soc_dai *codec_dai,
switch (clk_id) {
case WM8993_SYSCLK_MCLK:
wm8993->mclk_rate = freq;
+ /* fall through */
case WM8993_SYSCLK_FLL:
wm8993->sysclk_source = clk_id;
break;
@@ -1123,6 +1124,7 @@ static int wm8993_set_dai_fmt(struct snd_soc_dai *dai,
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
aif1 |= WM8993_AIF_LRCLK_INV;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif1 |= 0x18;
break;
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 3896523b71e9..f91b49e1ece3 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -860,6 +860,7 @@ static void vmid_reference(struct snd_soc_codec *codec)
switch (wm8994->vmid_mode) {
default:
WARN_ON(NULL == "Invalid VMID mode");
+ /* fall through */
case WM8994_VMID_NORMAL:
/* Startup bias, VMID ramp & buffer */
snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
@@ -2654,6 +2655,7 @@ static int wm8994_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_DSP_B:
aif1 |= WM8994_AIF1_LRCLK_INV;
lrclk |= WM8958_AIF1_LRCLK_INV;
+ /* fall through */
case SND_SOC_DAIFMT_DSP_A:
aif1 |= 0x18;
break;
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 49401a8aae64..77f512767273 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1068,8 +1068,6 @@ static int wm8997_codec_probe(struct snd_soc_codec *codec)
if (ret < 0)
return ret;
- arizona_init_notifiers(codec);
-
snd_soc_component_disable_pin(component, "HAPTICS");
priv->core.arizona->dapm = dapm;
@@ -1136,6 +1134,14 @@ static int wm8997_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, wm8997);
+ if (IS_ENABLED(CONFIG_OF)) {
+ if (!dev_get_platdata(arizona->dev)) {
+ ret = arizona_of_get_audio_pdata(arizona);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
wm8997->core.arizona = arizona;
wm8997->core.num_inputs = 4;
@@ -1168,6 +1174,11 @@ static int wm8997_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
+ arizona_init_common(arizona);
+
+ ret = arizona_init_vol_limit(arizona);
+ if (ret < 0)
+ return ret;
ret = arizona_init_spk_irqs(arizona);
if (ret < 0)
return ret;
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 44f447136e22..2d211dbe7422 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -101,7 +101,7 @@ static int wm8998_asrc_ev(struct snd_soc_dapm_widget *w,
return 0;
}
-static int wm8998_in1mux_put(struct snd_kcontrol *kcontrol,
+static int wm8998_inmux_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
@@ -109,84 +109,38 @@ static int wm8998_in1mux_put(struct snd_kcontrol *kcontrol,
struct wm8998_priv *wm8998 = snd_soc_codec_get_drvdata(codec);
struct arizona *arizona = wm8998->core.arizona;
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int mux, inmode;
- unsigned int mode_val, src_val;
+ unsigned int mode_reg, mode_index;
+ unsigned int mux, inmode, src_val, mode_val;
mux = ucontrol->value.enumerated.item[0];
if (mux > 1)
return -EINVAL;
- /* L and R registers have same shift and mask */
- inmode = arizona->pdata.inmode[2 * mux];
- src_val = mux << ARIZONA_IN1L_SRC_SHIFT;
- if (inmode & ARIZONA_INMODE_SE)
- src_val |= 1 << ARIZONA_IN1L_SRC_SE_SHIFT;
-
- switch (arizona->pdata.inmode[0]) {
- case ARIZONA_INMODE_DMIC:
- if (mux)
- mode_val = 0; /* B always analogue */
- else
- mode_val = 1 << ARIZONA_IN1_MODE_SHIFT;
-
- snd_soc_update_bits(codec, ARIZONA_IN1L_CONTROL,
- ARIZONA_IN1_MODE_MASK, mode_val);
-
- /* IN1A is digital so L and R must change together */
- /* src_val setting same for both registers */
- snd_soc_update_bits(codec,
- ARIZONA_ADC_DIGITAL_VOLUME_1L,
- ARIZONA_IN1L_SRC_MASK |
- ARIZONA_IN1L_SRC_SE_MASK, src_val);
- snd_soc_update_bits(codec,
- ARIZONA_ADC_DIGITAL_VOLUME_1R,
- ARIZONA_IN1R_SRC_MASK |
- ARIZONA_IN1R_SRC_SE_MASK, src_val);
+ switch (e->reg) {
+ case ARIZONA_ADC_DIGITAL_VOLUME_2L:
+ mode_reg = ARIZONA_IN2L_CONTROL;
+ mode_index = 1 + (2 * mux);
break;
default:
- /* both analogue */
- snd_soc_update_bits(codec,
- e->reg,
- ARIZONA_IN1L_SRC_MASK |
- ARIZONA_IN1L_SRC_SE_MASK,
- src_val);
+ mode_reg = ARIZONA_IN1L_CONTROL;
+ mode_index = (2 * mux);
break;
}
- return snd_soc_dapm_mux_update_power(dapm, kcontrol,
- ucontrol->value.enumerated.item[0],
- e, NULL);
-}
-
-static int wm8998_in2mux_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
- struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
- struct wm8998_priv *wm8998 = snd_soc_codec_get_drvdata(codec);
- struct arizona *arizona = wm8998->core.arizona;
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
- unsigned int mux, inmode, src_val, mode_val;
-
- mux = ucontrol->value.enumerated.item[0];
- if (mux > 1)
- return -EINVAL;
-
- inmode = arizona->pdata.inmode[1 + (2 * mux)];
+ inmode = arizona->pdata.inmode[mode_index];
if (inmode & ARIZONA_INMODE_DMIC)
- mode_val = 1 << ARIZONA_IN2_MODE_SHIFT;
+ mode_val = 1 << ARIZONA_IN1_MODE_SHIFT;
else
mode_val = 0;
- src_val = mux << ARIZONA_IN2L_SRC_SHIFT;
+ src_val = mux << ARIZONA_IN1L_SRC_SHIFT;
if (inmode & ARIZONA_INMODE_SE)
- src_val |= 1 << ARIZONA_IN2L_SRC_SE_SHIFT;
+ src_val |= 1 << ARIZONA_IN1L_SRC_SE_SHIFT;
- snd_soc_update_bits(codec, ARIZONA_IN2L_CONTROL,
- ARIZONA_IN2_MODE_MASK, mode_val);
+ snd_soc_update_bits(codec, mode_reg, ARIZONA_IN1_MODE_MASK, mode_val);
- snd_soc_update_bits(codec, ARIZONA_ADC_DIGITAL_VOLUME_2L,
- ARIZONA_IN2L_SRC_MASK | ARIZONA_IN2L_SRC_SE_MASK,
+ snd_soc_update_bits(codec, e->reg,
+ ARIZONA_IN1L_SRC_MASK | ARIZONA_IN1L_SRC_SE_MASK,
src_val);
return snd_soc_dapm_mux_update_power(dapm, kcontrol,
@@ -216,14 +170,14 @@ static SOC_ENUM_SINGLE_DECL(wm8998_in2mux_enum,
static const struct snd_kcontrol_new wm8998_in1mux[2] = {
SOC_DAPM_ENUM_EXT("IN1L Mux", wm8998_in1muxl_enum,
- snd_soc_dapm_get_enum_double, wm8998_in1mux_put),
+ snd_soc_dapm_get_enum_double, wm8998_inmux_put),
SOC_DAPM_ENUM_EXT("IN1R Mux", wm8998_in1muxr_enum,
- snd_soc_dapm_get_enum_double, wm8998_in1mux_put),
+ snd_soc_dapm_get_enum_double, wm8998_inmux_put),
};
static const struct snd_kcontrol_new wm8998_in2mux =
SOC_DAPM_ENUM_EXT("IN2 Mux", wm8998_in2mux_enum,
- snd_soc_dapm_get_enum_double, wm8998_in2mux_put);
+ snd_soc_dapm_get_enum_double, wm8998_inmux_put);
static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
@@ -1330,7 +1284,6 @@ static int wm8998_codec_probe(struct snd_soc_codec *codec)
return ret;
arizona_init_gpio(codec);
- arizona_init_notifiers(codec);
snd_soc_component_disable_pin(component, "HAPTICS");
@@ -1399,6 +1352,14 @@ static int wm8998_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, wm8998);
+ if (IS_ENABLED(CONFIG_OF)) {
+ if (!dev_get_platdata(arizona->dev)) {
+ ret = arizona_of_get_audio_pdata(arizona);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
wm8998->core.arizona = arizona;
wm8998->core.num_inputs = 3; /* IN1L, IN1R, IN2 */
@@ -1423,6 +1384,8 @@ static int wm8998_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
+ arizona_init_common(arizona);
+
ret = arizona_init_spk_irqs(arizona);
if (ret < 0)
return ret;
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index f6d5c0f2aea5..2c09f71fe433 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/mfd/wm97xx.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -18,12 +19,19 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/compat.h>
#include <sound/initval.h>
#include <sound/soc.h>
#define WM9705_VENDOR_ID 0x574d4c05
#define WM9705_VENDOR_ID_MASK 0xffffffff
+struct wm9705_priv {
+ struct snd_ac97 *ac97;
+ struct wm97xx_platform_data *mfd_pdata;
+};
+
static const struct reg_default wm9705_reg_defaults[] = {
{ 0x02, 0x8000 },
{ 0x04, 0x8000 },
@@ -292,10 +300,10 @@ static int wm9705_soc_suspend(struct snd_soc_codec *codec)
static int wm9705_soc_resume(struct snd_soc_codec *codec)
{
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+ struct wm9705_priv *wm9705 = snd_soc_codec_get_drvdata(codec);
int ret;
- ret = snd_ac97_reset(ac97, true, WM9705_VENDOR_ID,
+ ret = snd_ac97_reset(wm9705->ac97, true, WM9705_VENDOR_ID,
WM9705_VENDOR_ID_MASK);
if (ret < 0)
return ret;
@@ -311,38 +319,45 @@ static int wm9705_soc_resume(struct snd_soc_codec *codec)
static int wm9705_soc_probe(struct snd_soc_codec *codec)
{
- struct snd_ac97 *ac97;
+ struct wm9705_priv *wm9705 = snd_soc_codec_get_drvdata(codec);
struct regmap *regmap;
- int ret;
-
- ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
- WM9705_VENDOR_ID_MASK);
- if (IS_ERR(ac97)) {
- dev_err(codec->dev, "Failed to register AC97 codec\n");
- return PTR_ERR(ac97);
- }
- regmap = regmap_init_ac97(ac97, &wm9705_regmap_config);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto err_free_ac97_codec;
+ if (wm9705->mfd_pdata) {
+ wm9705->ac97 = wm9705->mfd_pdata->ac97;
+ regmap = wm9705->mfd_pdata->regmap;
+ } else {
+#ifdef CONFIG_SND_SOC_AC97_BUS
+ wm9705->ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
+ WM9705_VENDOR_ID_MASK);
+ if (IS_ERR(wm9705->ac97)) {
+ dev_err(codec->dev, "Failed to register AC97 codec\n");
+ return PTR_ERR(wm9705->ac97);
+ }
+
+ regmap = regmap_init_ac97(wm9705->ac97, &wm9705_regmap_config);
+ if (IS_ERR(regmap)) {
+ snd_soc_free_ac97_codec(wm9705->ac97);
+ return PTR_ERR(regmap);
+ }
+#endif
}
- snd_soc_codec_set_drvdata(codec, ac97);
+ snd_soc_codec_set_drvdata(codec, wm9705->ac97);
snd_soc_codec_init_regmap(codec, regmap);
return 0;
-err_free_ac97_codec:
- snd_soc_free_ac97_codec(ac97);
- return ret;
}
static int wm9705_soc_remove(struct snd_soc_codec *codec)
{
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+#ifdef CONFIG_SND_SOC_AC97_BUS
+ struct wm9705_priv *wm9705 = snd_soc_codec_get_drvdata(codec);
- snd_soc_codec_exit_regmap(codec);
- snd_soc_free_ac97_codec(ac97);
+ if (!wm9705->mfd_pdata) {
+ snd_soc_codec_exit_regmap(codec);
+ snd_soc_free_ac97_codec(wm9705->ac97);
+ }
+#endif
return 0;
}
@@ -364,6 +379,15 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
static int wm9705_probe(struct platform_device *pdev)
{
+ struct wm9705_priv *wm9705;
+
+ wm9705 = devm_kzalloc(&pdev->dev, sizeof(*wm9705), GFP_KERNEL);
+ if (wm9705 == NULL)
+ return -ENOMEM;
+
+ wm9705->mfd_pdata = dev_get_platdata(&pdev->dev);
+ platform_set_drvdata(pdev, wm9705);
+
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_wm9705, wm9705_dai, ARRAY_SIZE(wm9705_dai));
}
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 1a3e1797994a..4f6d1a442bc4 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/mfd/wm97xx.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -19,6 +20,8 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/compat.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/tlv.h>
@@ -30,6 +33,7 @@ struct wm9712_priv {
struct snd_ac97 *ac97;
unsigned int hp_mixer[2];
struct mutex lock;
+ struct wm97xx_platform_data *mfd_pdata;
};
static const struct reg_default wm9712_reg_defaults[] = {
@@ -636,18 +640,26 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
struct regmap *regmap;
int ret;
- wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
- WM9712_VENDOR_ID_MASK);
- if (IS_ERR(wm9712->ac97)) {
- ret = PTR_ERR(wm9712->ac97);
- dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
- return ret;
- }
-
- regmap = regmap_init_ac97(wm9712->ac97, &wm9712_regmap_config);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto err_free_ac97_codec;
+ if (wm9712->mfd_pdata) {
+ wm9712->ac97 = wm9712->mfd_pdata->ac97;
+ regmap = wm9712->mfd_pdata->regmap;
+ } else {
+#ifdef CONFIG_SND_SOC_AC97_BUS
+ wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
+ WM9712_VENDOR_ID_MASK);
+ if (IS_ERR(wm9712->ac97)) {
+ ret = PTR_ERR(wm9712->ac97);
+ dev_err(codec->dev,
+ "Failed to register AC97 codec: %d\n", ret);
+ return ret;
+ }
+
+ regmap = regmap_init_ac97(wm9712->ac97, &wm9712_regmap_config);
+ if (IS_ERR(regmap)) {
+ snd_soc_free_ac97_codec(wm9712->ac97);
+ return PTR_ERR(regmap);
+ }
+#endif
}
snd_soc_codec_init_regmap(codec, regmap);
@@ -656,17 +668,18 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, AC97_VIDEO, 0x3000, 0x3000);
return 0;
-err_free_ac97_codec:
- snd_soc_free_ac97_codec(wm9712->ac97);
- return ret;
}
static int wm9712_soc_remove(struct snd_soc_codec *codec)
{
+#ifdef CONFIG_SND_SOC_AC97_BUS
struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
- snd_soc_codec_exit_regmap(codec);
- snd_soc_free_ac97_codec(wm9712->ac97);
+ if (!wm9712->mfd_pdata) {
+ snd_soc_codec_exit_regmap(codec);
+ snd_soc_free_ac97_codec(wm9712->ac97);
+ }
+#endif
return 0;
}
@@ -697,6 +710,7 @@ static int wm9712_probe(struct platform_device *pdev)
mutex_init(&wm9712->lock);
+ wm9712->mfd_pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, wm9712);
return snd_soc_register_codec(&pdev->dev,
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 7e4822185feb..df7220656d98 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -17,12 +17,15 @@
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/mfd/wm97xx.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/compat.h>
#include <sound/initval.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
@@ -38,6 +41,7 @@ struct wm9713_priv {
u32 pll_in; /* PLL input frequency */
unsigned int hp_mixer[2];
struct mutex lock;
+ struct wm97xx_platform_data *mfd_pdata;
};
#define HPL_MIXER 0
@@ -1205,17 +1209,23 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
static int wm9713_soc_probe(struct snd_soc_codec *codec)
{
struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
- struct regmap *regmap;
+ struct regmap *regmap = NULL;
- wm9713->ac97 = snd_soc_new_ac97_codec(codec, WM9713_VENDOR_ID,
- WM9713_VENDOR_ID_MASK);
- if (IS_ERR(wm9713->ac97))
- return PTR_ERR(wm9713->ac97);
-
- regmap = regmap_init_ac97(wm9713->ac97, &wm9713_regmap_config);
- if (IS_ERR(regmap)) {
- snd_soc_free_ac97_codec(wm9713->ac97);
- return PTR_ERR(regmap);
+ if (wm9713->mfd_pdata) {
+ wm9713->ac97 = wm9713->mfd_pdata->ac97;
+ regmap = wm9713->mfd_pdata->regmap;
+ } else {
+#ifdef CONFIG_SND_SOC_AC97_BUS
+ wm9713->ac97 = snd_soc_new_ac97_codec(codec, WM9713_VENDOR_ID,
+ WM9713_VENDOR_ID_MASK);
+ if (IS_ERR(wm9713->ac97))
+ return PTR_ERR(wm9713->ac97);
+ regmap = regmap_init_ac97(wm9713->ac97, &wm9713_regmap_config);
+ if (IS_ERR(regmap)) {
+ snd_soc_free_ac97_codec(wm9713->ac97);
+ return PTR_ERR(regmap);
+ }
+#endif
}
snd_soc_codec_init_regmap(codec, regmap);
@@ -1228,10 +1238,14 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
static int wm9713_soc_remove(struct snd_soc_codec *codec)
{
+#ifdef CONFIG_SND_SOC_AC97_BUS
struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
- snd_soc_codec_exit_regmap(codec);
- snd_soc_free_ac97_codec(wm9713->ac97);
+ if (!wm9713->mfd_pdata) {
+ snd_soc_codec_exit_regmap(codec);
+ snd_soc_free_ac97_codec(wm9713->ac97);
+ }
+#endif
return 0;
}
@@ -1262,6 +1276,7 @@ static int wm9713_probe(struct platform_device *pdev)
mutex_init(&wm9713->lock);
+ wm9713->mfd_pdata = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, wm9713);
return snd_soc_register_codec(&pdev->dev,
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index f395bbc7c354..804c6f2bcf21 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1721,7 +1721,8 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
PTR_ERR(chan));
return PTR_ERR(chan);
}
- BUG_ON(!chan->device || !chan->device->dev);
+ if (WARN_ON(!chan->device || !chan->device->dev))
+ return -EINVAL;
if (chan->device->dev->of_node)
ret = of_property_read_string(chan->device->dev->of_node,
@@ -1867,6 +1868,10 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
if (irq >= 0) {
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common",
dev_name(&pdev->dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
davinci_mcasp_common_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
@@ -1884,6 +1889,10 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
if (irq >= 0) {
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx",
dev_name(&pdev->dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
davinci_mcasp_rx_irq_handler,
IRQF_ONESHOT, irq_name, mcasp);
@@ -1899,6 +1908,10 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
if (irq >= 0) {
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx",
dev_name(&pdev->dev));
+ if (!irq_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
davinci_mcasp_tx_irq_handler,
IRQF_ONESHOT, irq_name, mcasp);
@@ -1982,8 +1995,10 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list ||
- !mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list)
- return -ENOMEM;
+ !mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list) {
+ ret = -ENOMEM;
+ goto err;
+ }
ret = davinci_mcasp_set_ch_constraints(mcasp);
if (ret)
diff --git a/sound/soc/dwc/Kconfig b/sound/soc/dwc/Kconfig
index c6fd95fa5ca6..aa0c6ec4d93c 100644
--- a/sound/soc/dwc/Kconfig
+++ b/sound/soc/dwc/Kconfig
@@ -4,8 +4,8 @@ config SND_DESIGNWARE_I2S
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M if you want to add support for I2S driver for
- Synopsys desigwnware I2S device. The device supports upto
- maximum of 8 channels each for play and record.
+ Synopsys designware I2S device. The device supports up to
+ a maximum of 8 channels each for play and record.
config SND_DESIGNWARE_PCM
bool "PCM PIO extension for I2S driver"
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 2db4d0c80d33..1225e0399de8 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -166,7 +166,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
ret = snd_soc_dai_set_sysclk(rtd->cpu_dai, cpu_priv->sysclk_id[tx],
cpu_priv->sysclk_freq[tx],
cpu_priv->sysclk_dir[tx]);
- if (ret) {
+ if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set sysclk for cpu dai\n");
return ret;
}
@@ -174,7 +174,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
if (cpu_priv->slot_width) {
ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2,
cpu_priv->slot_width);
- if (ret) {
+ if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set TDM slot for cpu dai\n");
return ret;
}
@@ -270,7 +270,7 @@ static int fsl_asoc_card_set_bias_level(struct snd_soc_card *card,
ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->fll_id,
pll_out, SND_SOC_CLOCK_IN);
- if (ret) {
+ if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set SYSCLK: %d\n", ret);
return ret;
}
@@ -283,7 +283,7 @@ static int fsl_asoc_card_set_bias_level(struct snd_soc_card *card,
ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->mclk_id,
codec_priv->mclk_freq,
SND_SOC_CLOCK_IN);
- if (ret) {
+ if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to switch away from FLL: %d\n", ret);
return ret;
}
@@ -459,7 +459,7 @@ static int fsl_asoc_card_late_probe(struct snd_soc_card *card)
ret = snd_soc_dai_set_sysclk(codec_dai, codec_priv->mclk_id,
codec_priv->mclk_freq, SND_SOC_CLOCK_IN);
- if (ret) {
+ if (ret && ret != -ENOTSUPP) {
dev_err(dev, "failed to set sysclk in %s\n", __func__);
return ret;
}
@@ -639,6 +639,10 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
devm_kasprintf(&pdev->dev, GFP_KERNEL,
"ac97-codec.%u",
(unsigned int)idx);
+ if (!priv->dai_link[0].codec_name) {
+ ret = -ENOMEM;
+ goto asrc_fail;
+ }
}
priv->dai_link[0].platform_of_node = cpu_np;
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 7e6cc4da0088..4f7469c1864c 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1110,7 +1110,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
struct clk *clk, u64 savesub,
enum spdif_txrate index, bool round)
{
- const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
+ static const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk);
u64 rate_ideal, rate_actual, sub;
u32 sysclk_dfmin, sysclk_dfmax;
@@ -1169,7 +1169,7 @@ out:
static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv,
enum spdif_txrate index)
{
- const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
+ static const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
struct platform_device *pdev = spdif_priv->pdev;
struct device *dev = &pdev->dev;
u64 savesub = 100000, ret;
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 64598d1183f8..f2f51e06e22c 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -197,12 +197,13 @@ struct fsl_ssi_soc_data {
* @use_dma: DMA is used or FIQ with stream filter
* @use_dual_fifo: DMA with support for both FIFOs used
* @fifo_deph: Depth of the SSI FIFOs
+ * @slot_width: width of each DAI slot
+ * @slots: number of slots
* @rxtx_reg_val: Specific register settings for receive/transmit configuration
*
* @clk: SSI clock
* @baudclk: SSI baud clock for master mode
* @baudclk_streams: Active streams that are using baudclk
- * @bitclk_freq: bitclock frequency set by .set_dai_sysclk
*
* @dma_params_tx: DMA transmit parameters
* @dma_params_rx: DMA receive parameters
@@ -233,12 +234,13 @@ struct fsl_ssi_private {
bool use_dual_fifo;
bool has_ipg_clk_name;
unsigned int fifo_depth;
+ unsigned int slot_width;
+ unsigned int slots;
struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
struct clk *clk;
struct clk *baudclk;
unsigned int baudclk_streams;
- unsigned int bitclk_freq;
/* regcache for volatile regs */
u32 regcache_sfcsr;
@@ -700,8 +702,8 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
* Note: This function can be only called when using SSI as DAI master
*
* Quick instruction for parameters:
- * freq: Output BCLK frequency = samplerate * 32 (fixed) * channels
- * dir: SND_SOC_CLOCK_OUT -> TxBCLK, SND_SOC_CLOCK_IN -> RxBCLK.
+ * freq: Output BCLK frequency = samplerate * slots * slot_width
+ * (In 2-channel I2S Master mode, slot_width is fixed 32)
*/
static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai,
@@ -712,15 +714,21 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret;
u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
unsigned long clkrate, baudrate, tmprate;
+ unsigned int slots = params_channels(hw_params);
+ unsigned int slot_width = 32;
u64 sub, savesub = 100000;
unsigned int freq;
bool baudclk_is_used;
- /* Prefer the explicitly set bitclock frequency */
- if (ssi_private->bitclk_freq)
- freq = ssi_private->bitclk_freq;
- else
- freq = params_channels(hw_params) * 32 * params_rate(hw_params);
+ /* Override slots and slot_width if being specifically set... */
+ if (ssi_private->slots)
+ slots = ssi_private->slots;
+ /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
+ if (ssi_private->slot_width && slots != 2)
+ slot_width = ssi_private->slot_width;
+
+ /* Generate bit clock based on the slot number and slot width */
+ freq = slots * slot_width * params_rate(hw_params);
/* Don't apply it to any non-baudclk circumstance */
if (IS_ERR(ssi_private->baudclk))
@@ -805,16 +813,6 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
return 0;
}
-static int fsl_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
- int clk_id, unsigned int freq, int dir)
-{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
-
- ssi_private->bitclk_freq = freq;
-
- return 0;
-}
-
/**
* fsl_ssi_hw_params - program the sample size
*
@@ -1095,6 +1093,12 @@ static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
struct regmap *regs = ssi_private->regs;
u32 val;
+ /* The word length should be 8, 10, 12, 16, 18, 20, 22 or 24 */
+ if (slot_width & 1 || slot_width < 8 || slot_width > 24) {
+ dev_err(cpu_dai->dev, "invalid slot width: %d\n", slot_width);
+ return -EINVAL;
+ }
+
/* The slot number should be >= 2 if using Network mode or I2S mode */
regmap_read(regs, CCSR_SSI_SCR, &val);
val &= CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET;
@@ -1121,6 +1125,9 @@ static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val);
+ ssi_private->slot_width = slot_width;
+ ssi_private->slots = slots;
+
return 0;
}
@@ -1191,7 +1198,6 @@ static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
.hw_params = fsl_ssi_hw_params,
.hw_free = fsl_ssi_hw_free,
.set_fmt = fsl_ssi_set_dai_fmt,
- .set_sysclk = fsl_ssi_set_dai_sysclk,
.set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
.trigger = fsl_ssi_trigger,
};
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 488c52f9405f..1b6164249341 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -29,7 +29,9 @@ struct graph_card_data {
struct graph_dai_props {
struct asoc_simple_dai cpu_dai;
struct asoc_simple_dai codec_dai;
+ unsigned int mclk_fs;
} *dai_props;
+ unsigned int mclk_fs;
struct snd_soc_dai_link *dai_link;
struct gpio_desc *pa_gpio;
};
@@ -95,9 +97,43 @@ static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
asoc_simple_card_clk_disable(&dai_props->codec_dai);
}
+static int asoc_graph_card_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+ unsigned int mclk, mclk_fs = 0;
+ int ret = 0;
+
+ if (priv->mclk_fs)
+ mclk_fs = priv->mclk_fs;
+ else if (dai_props->mclk_fs)
+ mclk_fs = dai_props->mclk_fs;
+
+ if (mclk_fs) {
+ mclk = params_rate(params) * mclk_fs;
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+ SND_SOC_CLOCK_IN);
+ if (ret && ret != -ENOTSUPP)
+ goto err;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+ SND_SOC_CLOCK_OUT);
+ if (ret && ret != -ENOTSUPP)
+ goto err;
+ }
+ return 0;
+err:
+ return ret;
+}
+
static const struct snd_soc_ops asoc_graph_card_ops = {
.startup = asoc_graph_card_startup,
.shutdown = asoc_graph_card_shutdown,
+ .hw_params = asoc_graph_card_hw_params,
};
static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
@@ -146,10 +182,7 @@ static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
if (ret < 0)
goto dai_link_of_err;
- /*
- * we need to consider "mclk-fs" around here
- * see simple-card
- */
+ of_property_read_u32(rcpu_ep, "mclk-fs", &dai_props->mclk_fs);
ret = asoc_simple_card_parse_graph_cpu(cpu_ep, dai_link);
if (ret < 0)
@@ -217,10 +250,8 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
if (ret < 0)
return ret;
- /*
- * we need to consider "mclk-fs" around here
- * see simple-card
- */
+ /* Factor to mclk, used in hw_params() */
+ of_property_read_u32(node, "mclk-fs", &priv->mclk_fs);
of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
ret = asoc_graph_card_dai_link_of(it.node, priv, idx++);
diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
index 567f9767fb73..d7fbb0a0a28b 100644
--- a/sound/soc/img/img-i2s-in.c
+++ b/sound/soc/img/img-i2s-in.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <sound/core.h>
@@ -60,8 +61,33 @@ struct img_i2s_in {
void __iomem *channel_base;
unsigned int active_channels;
struct snd_soc_dai_driver dai_driver;
+ u32 suspend_ctl;
+ u32 *suspend_ch_ctl;
};
+static int img_i2s_in_runtime_suspend(struct device *dev)
+{
+ struct img_i2s_in *i2s = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(i2s->clk_sys);
+
+ return 0;
+}
+
+static int img_i2s_in_runtime_resume(struct device *dev)
+{
+ struct img_i2s_in *i2s = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(i2s->clk_sys);
+ if (ret) {
+ dev_err(dev, "Unable to enable sys clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static inline void img_i2s_in_writel(struct img_i2s_in *i2s, u32 val, u32 reg)
{
writel(val, i2s->base + reg);
@@ -279,7 +305,7 @@ static int img_i2s_in_hw_params(struct snd_pcm_substream *substream,
static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct img_i2s_in *i2s = snd_soc_dai_get_drvdata(dai);
- int i;
+ int i, ret;
u32 chan_control_mask, lrd_set = 0, blkp_set = 0, chan_control_set = 0;
u32 reg;
@@ -319,6 +345,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK;
+ ret = pm_runtime_get_sync(i2s->dev);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < i2s->active_channels; i++)
img_i2s_in_ch_disable(i2s, i);
@@ -338,6 +368,8 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
for (i = 0; i < i2s->active_channels; i++)
img_i2s_in_ch_enable(i2s, i);
+ pm_runtime_put(i2s->dev);
+
return 0;
}
@@ -427,9 +459,15 @@ static int img_i2s_in_probe(struct platform_device *pdev)
return PTR_ERR(i2s->clk_sys);
}
- ret = clk_prepare_enable(i2s->clk_sys);
- if (ret)
- return ret;
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_i2s_in_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_suspend;
i2s->active_channels = 1;
i2s->dma_data.addr = res->start + IMG_I2S_IN_RX_FIFO;
@@ -447,7 +485,7 @@ static int img_i2s_in_probe(struct platform_device *pdev)
if (IS_ERR(rst)) {
if (PTR_ERR(rst) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto err_clk_disable;
+ goto err_suspend;
}
dev_dbg(dev, "No top level reset found\n");
@@ -469,42 +507,110 @@ static int img_i2s_in_probe(struct platform_device *pdev)
IMG_I2S_IN_CH_CTL_JUST_MASK |
IMG_I2S_IN_CH_CTL_FW_MASK, IMG_I2S_IN_CH_CTL);
+ pm_runtime_put(&pdev->dev);
+
+ i2s->suspend_ch_ctl = devm_kzalloc(dev,
+ sizeof(*i2s->suspend_ch_ctl) * i2s->max_i2s_chan, GFP_KERNEL);
+ if (!i2s->suspend_ch_ctl) {
+ ret = -ENOMEM;
+ goto err_suspend;
+ }
+
ret = devm_snd_soc_register_component(dev, &img_i2s_in_component,
&i2s->dai_driver, 1);
if (ret)
- goto err_clk_disable;
+ goto err_suspend;
ret = devm_snd_dmaengine_pcm_register(dev, &img_i2s_in_dma_config, 0);
if (ret)
- goto err_clk_disable;
+ goto err_suspend;
return 0;
-err_clk_disable:
- clk_disable_unprepare(i2s->clk_sys);
+err_suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_i2s_in_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
return ret;
}
static int img_i2s_in_dev_remove(struct platform_device *pdev)
{
- struct img_i2s_in *i2s = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_i2s_in_runtime_suspend(&pdev->dev);
- clk_disable_unprepare(i2s->clk_sys);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int img_i2s_in_suspend(struct device *dev)
+{
+ struct img_i2s_in *i2s = dev_get_drvdata(dev);
+ int i, ret;
+ u32 reg;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_i2s_in_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < i2s->max_i2s_chan; i++) {
+ reg = img_i2s_in_ch_readl(i2s, i, IMG_I2S_IN_CH_CTL);
+ i2s->suspend_ch_ctl[i] = reg;
+ }
+
+ i2s->suspend_ctl = img_i2s_in_readl(i2s, IMG_I2S_IN_CTL);
+
+ img_i2s_in_runtime_suspend(dev);
return 0;
}
+static int img_i2s_in_resume(struct device *dev)
+{
+ struct img_i2s_in *i2s = dev_get_drvdata(dev);
+ int i, ret;
+ u32 reg;
+
+ ret = img_i2s_in_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < i2s->max_i2s_chan; i++) {
+ reg = i2s->suspend_ch_ctl[i];
+ img_i2s_in_ch_writel(i2s, i, reg, IMG_I2S_IN_CH_CTL);
+ }
+
+ img_i2s_in_writel(i2s, i2s->suspend_ctl, IMG_I2S_IN_CTL);
+
+ if (pm_runtime_status_suspended(dev))
+ img_i2s_in_runtime_suspend(dev);
+
+ return 0;
+}
+#endif
+
static const struct of_device_id img_i2s_in_of_match[] = {
{ .compatible = "img,i2s-in" },
{}
};
MODULE_DEVICE_TABLE(of, img_i2s_in_of_match);
+static const struct dev_pm_ops img_i2s_in_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_i2s_in_runtime_suspend,
+ img_i2s_in_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_i2s_in_suspend, img_i2s_in_resume)
+};
+
static struct platform_driver img_i2s_in_driver = {
.driver = {
.name = "img-i2s-in",
- .of_match_table = img_i2s_in_of_match
+ .of_match_table = img_i2s_in_of_match,
+ .pm = &img_i2s_in_pm_ops
},
.probe = img_i2s_in_probe,
.remove = img_i2s_in_dev_remove
diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
index 78b7f6cd675b..30a95bcef2db 100644
--- a/sound/soc/img/img-i2s-out.c
+++ b/sound/soc/img/img-i2s-out.c
@@ -63,29 +63,36 @@ struct img_i2s_out {
unsigned int active_channels;
struct reset_control *rst;
struct snd_soc_dai_driver dai_driver;
+ u32 suspend_ctl;
+ u32 *suspend_ch_ctl;
};
-static int img_i2s_out_suspend(struct device *dev)
+static int img_i2s_out_runtime_suspend(struct device *dev)
{
struct img_i2s_out *i2s = dev_get_drvdata(dev);
- if (!i2s->force_clk_active)
- clk_disable_unprepare(i2s->clk_ref);
+ clk_disable_unprepare(i2s->clk_ref);
+ clk_disable_unprepare(i2s->clk_sys);
return 0;
}
-static int img_i2s_out_resume(struct device *dev)
+static int img_i2s_out_runtime_resume(struct device *dev)
{
struct img_i2s_out *i2s = dev_get_drvdata(dev);
int ret;
- if (!i2s->force_clk_active) {
- ret = clk_prepare_enable(i2s->clk_ref);
- if (ret) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
- return ret;
- }
+ ret = clk_prepare_enable(i2s->clk_sys);
+ if (ret) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(i2s->clk_ref);
+ if (ret) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ clk_disable_unprepare(i2s->clk_sys);
+ return ret;
}
return 0;
@@ -287,7 +294,7 @@ static int img_i2s_out_hw_params(struct snd_pcm_substream *substream,
static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct img_i2s_out *i2s = snd_soc_dai_get_drvdata(dai);
- int i;
+ int i, ret;
bool force_clk_active;
u32 chan_control_mask, control_mask, chan_control_set = 0;
u32 reg, control_set = 0;
@@ -342,6 +349,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
+ ret = pm_runtime_get_sync(i2s->dev);
+ if (ret < 0)
+ return ret;
+
img_i2s_out_disable(i2s);
reg = img_i2s_out_readl(i2s, IMG_I2S_OUT_CTL);
@@ -361,6 +372,7 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
img_i2s_out_ch_enable(i2s, i);
img_i2s_out_enable(i2s);
+ pm_runtime_put(i2s->dev);
i2s->force_clk_active = force_clk_active;
@@ -467,9 +479,20 @@ static int img_i2s_out_probe(struct platform_device *pdev)
return PTR_ERR(i2s->clk_ref);
}
- ret = clk_prepare_enable(i2s->clk_sys);
- if (ret)
- return ret;
+ i2s->suspend_ch_ctl = devm_kzalloc(dev,
+ sizeof(*i2s->suspend_ch_ctl) * i2s->max_i2s_chan, GFP_KERNEL);
+ if (!i2s->suspend_ch_ctl)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_i2s_out_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_suspend;
reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK;
img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
@@ -483,13 +506,7 @@ static int img_i2s_out_probe(struct platform_device *pdev)
img_i2s_out_ch_writel(i2s, i, reg, IMG_I2S_OUT_CH_CTL);
img_i2s_out_reset(i2s);
-
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- ret = img_i2s_out_resume(&pdev->dev);
- if (ret)
- goto err_pm_disable;
- }
+ pm_runtime_put(&pdev->dev);
i2s->active_channels = 1;
i2s->dma_data.addr = res->start + IMG_I2S_OUT_TX_FIFO;
@@ -517,26 +534,70 @@ static int img_i2s_out_probe(struct platform_device *pdev)
err_suspend:
if (!pm_runtime_status_suspended(&pdev->dev))
- img_i2s_out_suspend(&pdev->dev);
+ img_i2s_out_runtime_suspend(&pdev->dev);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(i2s->clk_sys);
return ret;
}
static int img_i2s_out_dev_remove(struct platform_device *pdev)
{
- struct img_i2s_out *i2s = platform_get_drvdata(pdev);
-
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
- img_i2s_out_suspend(&pdev->dev);
+ img_i2s_out_runtime_suspend(&pdev->dev);
- clk_disable_unprepare(i2s->clk_sys);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int img_i2s_out_suspend(struct device *dev)
+{
+ struct img_i2s_out *i2s = dev_get_drvdata(dev);
+ int i, ret;
+ u32 reg;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_i2s_out_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < i2s->max_i2s_chan; i++) {
+ reg = img_i2s_out_ch_readl(i2s, i, IMG_I2S_OUT_CH_CTL);
+ i2s->suspend_ch_ctl[i] = reg;
+ }
+
+ i2s->suspend_ctl = img_i2s_out_readl(i2s, IMG_I2S_OUT_CTL);
+
+ img_i2s_out_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int img_i2s_out_resume(struct device *dev)
+{
+ struct img_i2s_out *i2s = dev_get_drvdata(dev);
+ int i, ret;
+ u32 reg;
+
+ ret = img_i2s_out_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < i2s->max_i2s_chan; i++) {
+ reg = i2s->suspend_ch_ctl[i];
+ img_i2s_out_ch_writel(i2s, i, reg, IMG_I2S_OUT_CH_CTL);
+ }
+
+ img_i2s_out_writel(i2s, i2s->suspend_ctl, IMG_I2S_OUT_CTL);
+
+ if (pm_runtime_status_suspended(dev))
+ img_i2s_out_runtime_suspend(dev);
return 0;
}
+#endif
static const struct of_device_id img_i2s_out_of_match[] = {
{ .compatible = "img,i2s-out" },
@@ -545,8 +606,9 @@ static const struct of_device_id img_i2s_out_of_match[] = {
MODULE_DEVICE_TABLE(of, img_i2s_out_of_match);
static const struct dev_pm_ops img_i2s_out_pm_ops = {
- SET_RUNTIME_PM_OPS(img_i2s_out_suspend,
- img_i2s_out_resume, NULL)
+ SET_RUNTIME_PM_OPS(img_i2s_out_runtime_suspend,
+ img_i2s_out_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_i2s_out_suspend, img_i2s_out_resume)
};
static struct platform_driver img_i2s_out_driver = {
diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
index 23b0f0f6ec9c..acc005217be0 100644
--- a/sound/soc/img/img-parallel-out.c
+++ b/sound/soc/img/img-parallel-out.c
@@ -153,6 +153,7 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct img_prl_out *prl = snd_soc_dai_get_drvdata(dai);
u32 reg, control_set = 0;
+ int ret;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
@@ -164,9 +165,14 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
+ ret = pm_runtime_get_sync(prl->dev);
+ if (ret < 0)
+ return ret;
+
reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
img_prl_out_writel(prl, reg, IMG_PRL_OUT_CTL);
+ pm_runtime_put(prl->dev);
return 0;
}
diff --git a/sound/soc/img/img-spdif-in.c b/sound/soc/img/img-spdif-in.c
index 8adfd65d4390..cedd40c8d1f3 100644
--- a/sound/soc/img/img-spdif-in.c
+++ b/sound/soc/img/img-spdif-in.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <sound/core.h>
@@ -82,11 +83,36 @@ struct img_spdif_in {
unsigned int single_freq;
unsigned int multi_freqs[IMG_SPDIF_IN_NUM_ACLKGEN];
bool active;
+ u32 suspend_clkgen;
+ u32 suspend_ctl;
/* Write-only registers */
unsigned int aclkgen_regs[IMG_SPDIF_IN_NUM_ACLKGEN];
};
+static int img_spdif_in_runtime_suspend(struct device *dev)
+{
+ struct img_spdif_in *spdif = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(spdif->clk_sys);
+
+ return 0;
+}
+
+static int img_spdif_in_runtime_resume(struct device *dev)
+{
+ struct img_spdif_in *spdif = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(spdif->clk_sys);
+ if (ret) {
+ dev_err(dev, "Unable to enable sys clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static inline void img_spdif_in_writel(struct img_spdif_in *spdif,
u32 val, u32 reg)
{
@@ -723,15 +749,21 @@ static int img_spdif_in_probe(struct platform_device *pdev)
return PTR_ERR(spdif->clk_sys);
}
- ret = clk_prepare_enable(spdif->clk_sys);
- if (ret)
- return ret;
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_spdif_in_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_suspend;
rst = devm_reset_control_get_exclusive(&pdev->dev, "rst");
if (IS_ERR(rst)) {
if (PTR_ERR(rst) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto err_clk_disable;
+ goto err_pm_put;
}
dev_dbg(dev, "No top level reset found\n");
img_spdif_in_writel(spdif, IMG_SPDIF_IN_SOFT_RESET_MASK,
@@ -759,42 +791,98 @@ static int img_spdif_in_probe(struct platform_device *pdev)
IMG_SPDIF_IN_CTL_TRK_MASK;
img_spdif_in_writel(spdif, reg, IMG_SPDIF_IN_CTL);
+ pm_runtime_put(&pdev->dev);
+
ret = devm_snd_soc_register_component(&pdev->dev,
&img_spdif_in_component, &img_spdif_in_dai, 1);
if (ret)
- goto err_clk_disable;
+ goto err_suspend;
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (ret)
- goto err_clk_disable;
+ goto err_suspend;
return 0;
-err_clk_disable:
- clk_disable_unprepare(spdif->clk_sys);
+err_pm_put:
+ pm_runtime_put(&pdev->dev);
+err_suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_spdif_in_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
return ret;
}
static int img_spdif_in_dev_remove(struct platform_device *pdev)
{
- struct img_spdif_in *spdif = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_spdif_in_runtime_suspend(&pdev->dev);
- clk_disable_unprepare(spdif->clk_sys);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int img_spdif_in_suspend(struct device *dev)
+{
+ struct img_spdif_in *spdif = dev_get_drvdata(dev);
+ int ret;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_spdif_in_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ spdif->suspend_clkgen = img_spdif_in_readl(spdif, IMG_SPDIF_IN_CLKGEN);
+ spdif->suspend_ctl = img_spdif_in_readl(spdif, IMG_SPDIF_IN_CTL);
+
+ img_spdif_in_runtime_suspend(dev);
return 0;
}
+static int img_spdif_in_resume(struct device *dev)
+{
+ struct img_spdif_in *spdif = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = img_spdif_in_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < IMG_SPDIF_IN_NUM_ACLKGEN; i++)
+ img_spdif_in_aclkgen_writel(spdif, i);
+
+ img_spdif_in_writel(spdif, spdif->suspend_clkgen, IMG_SPDIF_IN_CLKGEN);
+ img_spdif_in_writel(spdif, spdif->suspend_ctl, IMG_SPDIF_IN_CTL);
+
+ if (pm_runtime_status_suspended(dev))
+ img_spdif_in_runtime_suspend(dev);
+
+ return 0;
+}
+#endif
+
static const struct of_device_id img_spdif_in_of_match[] = {
{ .compatible = "img,spdif-in" },
{}
};
MODULE_DEVICE_TABLE(of, img_spdif_in_of_match);
+static const struct dev_pm_ops img_spdif_in_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_spdif_in_runtime_suspend,
+ img_spdif_in_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_spdif_in_suspend, img_spdif_in_resume)
+};
+
static struct platform_driver img_spdif_in_driver = {
.driver = {
.name = "img-spdif-in",
- .of_match_table = img_spdif_in_of_match
+ .of_match_table = img_spdif_in_of_match,
+ .pm = &img_spdif_in_pm_ops
},
.probe = img_spdif_in_probe,
.remove = img_spdif_in_dev_remove
diff --git a/sound/soc/img/img-spdif-out.c b/sound/soc/img/img-spdif-out.c
index 383655da2e60..934ed3df2ebf 100644
--- a/sound/soc/img/img-spdif-out.c
+++ b/sound/soc/img/img-spdif-out.c
@@ -47,25 +47,36 @@ struct img_spdif_out {
struct snd_dmaengine_dai_dma_data dma_data;
struct device *dev;
struct reset_control *rst;
+ u32 suspend_ctl;
+ u32 suspend_csl;
+ u32 suspend_csh;
};
-static int img_spdif_out_suspend(struct device *dev)
+static int img_spdif_out_runtime_suspend(struct device *dev)
{
struct img_spdif_out *spdif = dev_get_drvdata(dev);
clk_disable_unprepare(spdif->clk_ref);
+ clk_disable_unprepare(spdif->clk_sys);
return 0;
}
-static int img_spdif_out_resume(struct device *dev)
+static int img_spdif_out_runtime_resume(struct device *dev)
{
struct img_spdif_out *spdif = dev_get_drvdata(dev);
int ret;
+ ret = clk_prepare_enable(spdif->clk_sys);
+ if (ret) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
ret = clk_prepare_enable(spdif->clk_ref);
if (ret) {
dev_err(dev, "clk_enable failed: %d\n", ret);
+ clk_disable_unprepare(spdif->clk_sys);
return ret;
}
@@ -355,21 +366,21 @@ static int img_spdif_out_probe(struct platform_device *pdev)
return PTR_ERR(spdif->clk_ref);
}
- ret = clk_prepare_enable(spdif->clk_sys);
- if (ret)
- return ret;
-
- img_spdif_out_writel(spdif, IMG_SPDIF_OUT_CTL_FS_MASK,
- IMG_SPDIF_OUT_CTL);
-
- img_spdif_out_reset(spdif);
-
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
- ret = img_spdif_out_resume(&pdev->dev);
+ ret = img_spdif_out_runtime_resume(&pdev->dev);
if (ret)
goto err_pm_disable;
}
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_suspend;
+
+ img_spdif_out_writel(spdif, IMG_SPDIF_OUT_CTL_FS_MASK,
+ IMG_SPDIF_OUT_CTL);
+
+ img_spdif_out_reset(spdif);
+ pm_runtime_put(&pdev->dev);
spin_lock_init(&spdif->lock);
@@ -393,27 +404,62 @@ static int img_spdif_out_probe(struct platform_device *pdev)
err_suspend:
if (!pm_runtime_status_suspended(&pdev->dev))
- img_spdif_out_suspend(&pdev->dev);
+ img_spdif_out_runtime_suspend(&pdev->dev);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(spdif->clk_sys);
return ret;
}
static int img_spdif_out_dev_remove(struct platform_device *pdev)
{
- struct img_spdif_out *spdif = platform_get_drvdata(pdev);
-
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
- img_spdif_out_suspend(&pdev->dev);
+ img_spdif_out_runtime_suspend(&pdev->dev);
- clk_disable_unprepare(spdif->clk_sys);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int img_spdif_out_suspend(struct device *dev)
+{
+ struct img_spdif_out *spdif = dev_get_drvdata(dev);
+ int ret;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_spdif_out_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ spdif->suspend_ctl = img_spdif_out_readl(spdif, IMG_SPDIF_OUT_CTL);
+ spdif->suspend_csl = img_spdif_out_readl(spdif, IMG_SPDIF_OUT_CSL);
+ spdif->suspend_csh = img_spdif_out_readl(spdif, IMG_SPDIF_OUT_CSH_UV);
+
+ img_spdif_out_runtime_suspend(dev);
return 0;
}
+static int img_spdif_out_resume(struct device *dev)
+{
+ struct img_spdif_out *spdif = dev_get_drvdata(dev);
+ int ret;
+
+ ret = img_spdif_out_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ img_spdif_out_writel(spdif, spdif->suspend_ctl, IMG_SPDIF_OUT_CTL);
+ img_spdif_out_writel(spdif, spdif->suspend_csl, IMG_SPDIF_OUT_CSL);
+ img_spdif_out_writel(spdif, spdif->suspend_csh, IMG_SPDIF_OUT_CSH_UV);
+
+ if (pm_runtime_status_suspended(dev))
+ img_spdif_out_runtime_suspend(dev);
+
+ return 0;
+}
+#endif
static const struct of_device_id img_spdif_out_of_match[] = {
{ .compatible = "img,spdif-out" },
{}
@@ -421,8 +467,9 @@ static const struct of_device_id img_spdif_out_of_match[] = {
MODULE_DEVICE_TABLE(of, img_spdif_out_of_match);
static const struct dev_pm_ops img_spdif_out_pm_ops = {
- SET_RUNTIME_PM_OPS(img_spdif_out_suspend,
- img_spdif_out_resume, NULL)
+ SET_RUNTIME_PM_OPS(img_spdif_out_runtime_suspend,
+ img_spdif_out_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_spdif_out_suspend, img_spdif_out_resume)
};
static struct platform_driver img_spdif_out_driver = {
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index b3c7f554ec30..bb8be10b8437 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -1,19 +1,3 @@
-config SND_MFLD_MACHINE
- tristate "SOC Machine Audio driver for Intel Medfield MID platform"
- depends on INTEL_SCU_IPC
- select SND_SOC_SN95031
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_PCI
- help
- This adds support for ASoC machine driver for Intel(R) MID Medfield platform
- used as alsa device in audio substem in Intel(R) MID devices
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SST_ATOM_HIFI2_PLATFORM
- tristate
- select SND_SOC_COMPRESS
-
config SND_SST_IPC
tristate
@@ -27,10 +11,12 @@ config SND_SST_IPC_ACPI
select SND_SOC_INTEL_SST
select IOSF_MBI
+config SND_SOC_INTEL_COMMON
+ tristate
+
config SND_SOC_INTEL_SST
tristate
select SND_SOC_INTEL_SST_ACPI if ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
config SND_SOC_INTEL_SST_FIRMWARE
tristate
@@ -39,280 +25,42 @@ config SND_SOC_INTEL_SST_FIRMWARE
config SND_SOC_INTEL_SST_ACPI
tristate
-config SND_SOC_INTEL_SST_MATCH
+config SND_SOC_ACPI_INTEL_MATCH
tristate
+ select SND_SOC_ACPI if ACPI
+
+config SND_SOC_INTEL_SST_TOPLEVEL
+ tristate "Intel ASoC SST drivers"
+ depends on X86 || COMPILE_TEST
+ select SND_SOC_INTEL_MACH
+ select SND_SOC_INTEL_COMMON
config SND_SOC_INTEL_HASWELL
- tristate
+ tristate "Intel ASoC SST driver for Haswell/Broadwell"
+ depends on SND_SOC_INTEL_SST_TOPLEVEL && SND_DMA_SGBUF
+ depends on DMADEVICES
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_SST_FIRMWARE
config SND_SOC_INTEL_BAYTRAIL
- tristate
- select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SST_FIRMWARE
-
-config SND_SOC_INTEL_HASWELL_MACH
- tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
- depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on DMADEVICES
- select SND_SOC_INTEL_HASWELL
- select SND_SOC_RT5640
- help
- This adds support for the Lynxpoint Audio DSP on Intel(R) Haswell
- Ultrabook platforms.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
- tristate "ASoC Audio driver for Broxton with DA7219 and MAX98357A in I2S Mode"
- depends on X86 && ACPI && I2C
- select SND_SOC_INTEL_SKYLAKE
- select SND_SOC_DA7219
- select SND_SOC_MAX98357A
- select SND_SOC_DMIC
- select SND_SOC_HDAC_HDMI
- select SND_HDA_DSP_LOADER
- help
- This adds support for ASoC machine driver for Broxton-P platforms
- with DA7219 + MAX98357A I2S audio codec.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_BXT_RT298_MACH
- tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
- depends on X86 && ACPI && I2C
- select SND_SOC_INTEL_SKYLAKE
- select SND_SOC_RT298
- select SND_SOC_DMIC
- select SND_SOC_HDAC_HDMI
- select SND_HDA_DSP_LOADER
- help
- This adds support for ASoC machine driver for Broxton platforms
- with RT286 I2S audio codec.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_BYT_RT5640_MACH
- tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
- depends on X86_INTEL_LPSS && I2C
+ tristate "Intel ASoC SST driver for Baytrail (legacy)"
+ depends on SND_SOC_INTEL_SST_TOPLEVEL
depends on DMADEVICES
- depends on SND_SST_IPC_ACPI = n
- select SND_SOC_INTEL_BAYTRAIL
- select SND_SOC_RT5640
- help
- This adds audio driver for Intel Baytrail platform based boards
- with the RT5640 audio codec. This driver is deprecated, use
- SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality.
-
-config SND_SOC_INTEL_BYT_MAX98090_MACH
- tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
- depends on X86_INTEL_LPSS && I2C
- depends on DMADEVICES
- depends on SND_SST_IPC_ACPI = n
- select SND_SOC_INTEL_BAYTRAIL
- select SND_SOC_MAX98090
- help
- This adds audio driver for Intel Baytrail platform based boards
- with the MAX98090 audio codec.
-
-config SND_SOC_INTEL_BDW_RT5677_MACH
- tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
- depends on X86_INTEL_LPSS && GPIOLIB && I2C
- depends on DMADEVICES
- select SND_SOC_INTEL_HASWELL
- select SND_SOC_RT5677
- help
- This adds support for Intel Broadwell platform based boards with
- the RT5677 audio codec.
-
-config SND_SOC_INTEL_BROADWELL_MACH
- tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
- depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on DMADEVICES
- select SND_SOC_INTEL_HASWELL
- select SND_SOC_RT286
- help
- This adds support for the Wilcatpoint Audio DSP on Intel(R) Broadwell
- Ultrabook platforms.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_BYTCR_RT5640_MACH
- tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5640 codec"
- depends on X86 && I2C && ACPI
- select SND_SOC_RT5640
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
- platforms with RT5640 audio codec.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_BYTCR_RT5651_MACH
- tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5651 codec"
- depends on X86 && I2C && ACPI
- select SND_SOC_RT5651
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
- platforms with RT5651 audio codec.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
- depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SOC_RT5670
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
- platforms with RT5672 audio codec.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
- depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SOC_RT5645
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
- platforms with RT5645/5650 audio codec.
- If unsure select "N".
-
-config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with MAX98090 & TI codec"
- depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SOC_MAX98090
- select SND_SOC_TS3A227E
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
- platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
- If unsure select "N".
-
-config SND_SOC_INTEL_BYT_CHT_DA7213_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with DA7212/7213 codec"
- depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SOC_DA7213
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for Intel(R) Baytrail & CherryTrail
- platforms with DA7212/7213 audio codec.
- If unsure select "N".
-
-config SND_SOC_INTEL_BYT_CHT_ES8316_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with ES8316 codec"
- depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SOC_ES8316
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for Intel(R) Baytrail &
- Cherrytrail platforms with ES8316 audio codec.
- If unsure select "N".
-
-config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
- depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
- select SND_SOC_INTEL_SST_MATCH if ACPI
- help
- This adds support for ASoC machine driver for the MinnowBoard Max or
- Up boards and provides access to I2S signals on the Low-Speed
- connector
- If unsure select "N".
-
-config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
- tristate "ASoC Audio driver for KBL with RT5663 and MAX98927 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SKYLAKE
- select SND_SOC_RT5663
- select SND_SOC_MAX98927
- select SND_SOC_DMIC
- select SND_SOC_HDAC_HDMI
- help
- This adds support for ASoC Onboard Codec I2S machine driver. This will
- create an alsa sound card for RT5663 + MAX98927.
- Say Y if you have such a device.
- If unsure select "N".
+ select SND_SOC_INTEL_SST_FIRMWARE
-config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
- tristate "ASoC Audio driver for KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C && SPI
- select SND_SOC_INTEL_SST
- select SND_SOC_INTEL_SKYLAKE
- select SND_SOC_RT5663
- select SND_SOC_RT5514
- select SND_SOC_RT5514_SPI
- select SND_SOC_MAX98927
- select SND_SOC_HDAC_HDMI
- help
- This adds support for ASoC Onboard Codec I2S machine driver. This will
- create an alsa sound card for RT5663 + RT5514 + MAX98927.
- Say Y if you have such a device.
- If unsure select "N".
+config SND_SST_ATOM_HIFI2_PLATFORM
+ tristate "Intel ASoC SST driver for HiFi2 platforms (*field, *trail)"
+ depends on SND_SOC_INTEL_SST_TOPLEVEL && X86
+ select SND_SOC_COMPRESS
config SND_SOC_INTEL_SKYLAKE
- tristate
+ tristate "Intel ASoC SST driver for SKL/BXT/KBL/GLK/CNL"
+ depends on SND_SOC_INTEL_SST_TOPLEVEL && PCI && ACPI
select SND_HDA_EXT_CORE
select SND_HDA_DSP_LOADER
select SND_SOC_TOPOLOGY
select SND_SOC_INTEL_SST
-config SND_SOC_INTEL_SKL_RT286_MACH
- tristate "ASoC Audio driver for SKL with RT286 I2S mode"
- depends on X86 && ACPI && I2C
- select SND_SOC_INTEL_SKYLAKE
- select SND_SOC_RT286
- select SND_SOC_DMIC
- select SND_SOC_HDAC_HDMI
- help
- This adds support for ASoC machine driver for Skylake platforms
- with RT286 I2S audio codec.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
- tristate "ASoC Audio driver for SKL with NAU88L25 and SSM4567 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- select SND_SOC_INTEL_SKYLAKE
- select SND_SOC_NAU8825
- select SND_SOC_SSM4567
- select SND_SOC_DMIC
- select SND_SOC_HDAC_HDMI
- help
- This adds support for ASoC Onboard Codec I2S machine driver. This will
- create an alsa sound card for NAU88L25 + SSM4567.
- Say Y if you have such a device.
- If unsure select "N".
-
-config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
- tristate "ASoC Audio driver for SKL with NAU88L25 and MAX98357A in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- select SND_SOC_INTEL_SKYLAKE
- select SND_SOC_NAU8825
- select SND_SOC_MAX98357A
- select SND_SOC_DMIC
- select SND_SOC_HDAC_HDMI
- help
- This adds support for ASoC Onboard Codec I2S machine driver. This will
- create an alsa sound card for NAU88L25 + MAX98357A.
- Say Y if you have such a device.
- If unsure select "N".
+# ASoC codec drivers
+source "sound/soc/intel/boards/Kconfig"
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 62f37ffffdf0..b973d457e834 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Core support
-obj-$(CONFIG_SND_SOC_INTEL_SST) += common/
+obj-$(CONFIG_SND_SOC_INTEL_COMMON) += common/
# Platform Support
obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
diff --git a/sound/soc/intel/atom/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c
index 1bead81bb510..1dbcab5a6ff0 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c
@@ -259,7 +259,7 @@ static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
return stream->compr_ops->set_metadata(sst->dev, stream->id, metadata);
}
-struct snd_compr_ops sst_platform_compr_ops = {
+const struct snd_compr_ops sst_platform_compr_ops = {
.open = sst_platform_compr_open,
.free = sst_platform_compr_free,
diff --git a/sound/soc/intel/atom/sst-mfld-platform.h b/sound/soc/intel/atom/sst-mfld-platform.h
index cb32cc7e5ec1..31a58c25472c 100644
--- a/sound/soc/intel/atom/sst-mfld-platform.h
+++ b/sound/soc/intel/atom/sst-mfld-platform.h
@@ -25,7 +25,7 @@
#include "sst-atom-controls.h"
extern struct sst_device *sst;
-extern struct snd_compr_ops sst_platform_compr_ops;
+extern const struct snd_compr_ops sst_platform_compr_ops;
#define SST_MONO 1
#define SST_STEREO 2
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 0e928d54305d..32d6e02e2104 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
@@ -41,9 +40,10 @@
#include <acpi/acpi_bus.h>
#include <asm/cpu_device_id.h>
#include <asm/iosf_mbi.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
#include "../sst-mfld-platform.h"
#include "../../common/sst-dsp.h"
-#include "../../common/sst-acpi.h"
#include "sst.h"
/* LPE viewpoint addresses */
@@ -239,19 +239,26 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
return 0;
}
+static int is_byt(void)
+{
+ bool status = false;
+ static const struct x86_cpu_id cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
+ {}
+ };
+ if (x86_match_cpu(cpu_ids))
+ status = true;
+ return status;
+}
static int is_byt_cr(struct device *dev, bool *bytcr)
{
int status = 0;
if (IS_ENABLED(CONFIG_IOSF_MBI)) {
- static const struct x86_cpu_id cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
- {}
- };
u32 bios_status;
- if (!x86_match_cpu(cpu_ids) || !iosf_mbi_available()) {
+ if (!is_byt() || !iosf_mbi_available()) {
/* bail silently */
return status;
}
@@ -285,7 +292,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
int ret = 0;
struct intel_sst_drv *ctx;
const struct acpi_device_id *id;
- struct sst_acpi_mach *mach;
+ struct snd_soc_acpi_mach *mach;
struct platform_device *mdev;
struct platform_device *plat_dev;
struct sst_platform_info *pdata;
@@ -297,13 +304,17 @@ static int sst_acpi_probe(struct platform_device *pdev)
return -ENODEV;
dev_dbg(dev, "for %s\n", id->id);
- mach = (struct sst_acpi_mach *)id->driver_data;
- mach = sst_acpi_find_machine(mach);
+ mach = (struct snd_soc_acpi_mach *)id->driver_data;
+ mach = snd_soc_acpi_find_machine(mach);
if (mach == NULL) {
dev_err(dev, "No matching machine driver found\n");
return -ENODEV;
}
+ if (is_byt())
+ mach->pdata = &byt_rvp_platform_data;
+ else
+ mach->pdata = &chv_platform_data;
pdata = mach->pdata;
ret = kstrtouint(id->id, 16, &dev_id);
@@ -381,286 +392,9 @@ static int sst_acpi_remove(struct platform_device *pdev)
return 0;
}
-static unsigned long cht_machine_id;
-
-#define CHT_SURFACE_MACH 1
-#define BYT_THINKPAD_10 2
-
-static int cht_surface_quirk_cb(const struct dmi_system_id *id)
-{
- cht_machine_id = CHT_SURFACE_MACH;
- return 1;
-}
-
-static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
-{
- cht_machine_id = BYT_THINKPAD_10;
- return 1;
-}
-
-
-static const struct dmi_system_id byt_table[] = {
- {
- .callback = byt_thinkpad10_quirk_cb,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
- },
- },
- {
- .callback = byt_thinkpad10_quirk_cb,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Tablet B"),
- },
- },
- {
- .callback = byt_thinkpad10_quirk_cb,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
- },
- },
- { }
-};
-
-static const struct dmi_system_id cht_table[] = {
- {
- .callback = cht_surface_quirk_cb,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
- },
- },
- { }
-};
-
-
-static struct sst_acpi_mach cht_surface_mach = {
- .id = "10EC5640",
- .drv_name = "cht-bsw-rt5645",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "cht-bsw",
- .pdata = &chv_platform_data,
-};
-
-static struct sst_acpi_mach byt_thinkpad_10 = {
- .id = "10EC5640",
- .drv_name = "cht-bsw-rt5672",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "cht-bsw",
- .pdata = &byt_rvp_platform_data,
-};
-
-static struct sst_acpi_mach *cht_quirk(void *arg)
-{
- struct sst_acpi_mach *mach = arg;
-
- dmi_check_system(cht_table);
-
- if (cht_machine_id == CHT_SURFACE_MACH)
- return &cht_surface_mach;
- else
- return mach;
-}
-
-static struct sst_acpi_mach *byt_quirk(void *arg)
-{
- struct sst_acpi_mach *mach = arg;
-
- dmi_check_system(byt_table);
-
- if (cht_machine_id == BYT_THINKPAD_10)
- return &byt_thinkpad_10;
- else
- return mach;
-}
-
-
-static struct sst_acpi_mach sst_acpi_bytcr[] = {
- {
- .id = "10EC5640",
- .drv_name = "bytcr_rt5640",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "bytcr_rt5640",
- .machine_quirk = byt_quirk,
- .pdata = &byt_rvp_platform_data,
- },
- {
- .id = "10EC5642",
- .drv_name = "bytcr_rt5640",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "bytcr_rt5640",
- .pdata = &byt_rvp_platform_data
- },
- {
- .id = "INTCCFFD",
- .drv_name = "bytcr_rt5640",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "bytcr_rt5640",
- .pdata = &byt_rvp_platform_data
- },
- {
- .id = "10EC5651",
- .drv_name = "bytcr_rt5651",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "bytcr_rt5651",
- .pdata = &byt_rvp_platform_data
- },
- {
- .id = "DLGS7212",
- .drv_name = "bytcht_da7213",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "bytcht_da7213",
- .pdata = &byt_rvp_platform_data
- },
- {
- .id = "DLGS7213",
- .drv_name = "bytcht_da7213",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "bytcht_da7213",
- .pdata = &byt_rvp_platform_data
- },
- /* some Baytrail platforms rely on RT5645, use CHT machine driver */
- {
- .id = "10EC5645",
- .drv_name = "cht-bsw-rt5645",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "cht-bsw",
- .pdata = &byt_rvp_platform_data
- },
- {
- .id = "10EC5648",
- .drv_name = "cht-bsw-rt5645",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "cht-bsw",
- .pdata = &byt_rvp_platform_data
- },
-#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
- /*
- * This is always last in the table so that it is selected only when
- * enabled explicitly and there is no codec-related information in SSDT
- */
- {
- .id = "80860F28",
- .drv_name = "bytcht_nocodec",
- .fw_filename = "intel/fw_sst_0f28.bin",
- .board = "bytcht_nocodec",
- .pdata = &byt_rvp_platform_data
- },
-#endif
- {},
-};
-
-/* Cherryview-based platforms: CherryTrail and Braswell */
-static struct sst_acpi_mach sst_acpi_chv[] = {
- {
- .id = "10EC5670",
- .drv_name = "cht-bsw-rt5672",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "cht-bsw",
- .pdata = &chv_platform_data
- },
- {
- .id = "10EC5672",
- .drv_name = "cht-bsw-rt5672",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "cht-bsw",
- .pdata = &chv_platform_data
- },
- {
- .id = "10EC5645",
- .drv_name = "cht-bsw-rt5645",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "cht-bsw",
- .pdata = &chv_platform_data
- },
- {
- .id = "10EC5650",
- .drv_name = "cht-bsw-rt5645",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "cht-bsw",
- .pdata = &chv_platform_data
- },
- {
- .id = "10EC3270",
- .drv_name = "cht-bsw-rt5645",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "cht-bsw",
- .pdata = &chv_platform_data
- },
-
- {
- .id = "193C9890",
- .drv_name = "cht-bsw-max98090",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "cht-bsw",
- .pdata = &chv_platform_data
- },
- {
- .id = "DLGS7212",
- .drv_name = "bytcht_da7213",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "bytcht_da7213",
- .pdata = &chv_platform_data
- },
- {
- .id = "DLGS7213",
- .drv_name = "bytcht_da7213",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "bytcht_da7213",
- .pdata = &chv_platform_data
- },
- {
- .id = "ESSX8316",
- .drv_name = "bytcht_es8316",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "bytcht_es8316",
- .pdata = &chv_platform_data
- },
- /* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
- {
- .id = "10EC5640",
- .drv_name = "bytcr_rt5640",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "bytcr_rt5640",
- .machine_quirk = cht_quirk,
- .pdata = &chv_platform_data
- },
- {
- .id = "10EC3276",
- .drv_name = "bytcr_rt5640",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "bytcr_rt5640",
- .pdata = &chv_platform_data
- },
- /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
- {
- .id = "10EC5651",
- .drv_name = "bytcr_rt5651",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "bytcr_rt5651",
- .pdata = &chv_platform_data
- },
-#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
- /*
- * This is always last in the table so that it is selected only when
- * enabled explicitly and there is no codec-related information in SSDT
- */
- {
- .id = "808622A8",
- .drv_name = "bytcht_nocodec",
- .fw_filename = "intel/fw_sst_22a8.bin",
- .board = "bytcht_nocodec",
- .pdata = &chv_platform_data
- },
-#endif
- {},
-};
-
static const struct acpi_device_id sst_acpi_ids[] = {
- { "80860F28", (unsigned long)&sst_acpi_bytcr},
- { "808622A8", (unsigned long) &sst_acpi_chv},
+ { "80860F28", (unsigned long)&snd_soc_acpi_intel_baytrail_machines},
+ { "808622A8", (unsigned long)&snd_soc_acpi_intel_cherrytrail_machines},
{ },
};
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index 33917146d9c4..a686eef2cf7f 100644
--- a/sound/soc/intel/atom/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -415,7 +415,6 @@ int sst_load_fw(struct intel_sst_drv *sst_drv_ctx)
return ret_val;
}
- BUG_ON(!sst_drv_ctx->fw_in_mem);
block = sst_create_block(sst_drv_ctx, 0, FW_DWNL_ID);
if (block == NULL)
return -ENOMEM;
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index 83d8dda15233..65e257b17a7e 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -45,7 +45,6 @@ int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params)
void *data = NULL;
dev_dbg(sst_drv_ctx->dev, "Enter\n");
- BUG_ON(!params);
str_params = (struct snd_sst_params *)params;
memset(&alloc_param, 0, sizeof(alloc_param));
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
new file mode 100644
index 000000000000..6f754708a48c
--- /dev/null
+++ b/sound/soc/intel/boards/Kconfig
@@ -0,0 +1,265 @@
+config SND_SOC_INTEL_MACH
+ tristate "Intel Audio machine drivers"
+ depends on SND_SOC_INTEL_SST_TOPLEVEL
+ select SND_SOC_ACPI_INTEL_MATCH if ACPI
+
+if SND_SOC_INTEL_MACH
+
+config SND_MFLD_MACHINE
+ tristate "SOC Machine Audio driver for Intel Medfield MID platform"
+ depends on INTEL_SCU_IPC
+ select SND_SOC_SN95031
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_PCI
+ help
+ This adds support for ASoC machine driver for Intel(R) MID Medfield platform
+ used as alsa device in audio substem in Intel(R) MID devices
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_HASWELL_MACH
+ tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
+ depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+ depends on SND_SOC_INTEL_HASWELL
+ select SND_SOC_RT5640
+ help
+ This adds support for the Lynxpoint Audio DSP on Intel(R) Haswell
+ Ultrabook platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BDW_RT5677_MACH
+ tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
+ depends on X86_INTEL_LPSS && GPIOLIB && I2C
+ depends on SND_SOC_INTEL_HASWELL
+ select SND_SOC_RT5677
+ help
+ This adds support for Intel Broadwell platform based boards with
+ the RT5677 audio codec.
+
+config SND_SOC_INTEL_BROADWELL_MACH
+ tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
+ depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+ depends on SND_SOC_INTEL_HASWELL
+ select SND_SOC_RT286
+ help
+ This adds support for the Wilcatpoint Audio DSP on Intel(R) Broadwell
+ Ultrabook platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BYT_MAX98090_MACH
+ tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
+ depends on X86_INTEL_LPSS && I2C
+ depends on SND_SST_IPC_ACPI = n
+ depends on SND_SOC_INTEL_BAYTRAIL
+ select SND_SOC_MAX98090
+ help
+ This adds audio driver for Intel Baytrail platform based boards
+ with the MAX98090 audio codec.
+
+config SND_SOC_INTEL_BYT_RT5640_MACH
+ tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
+ depends on X86_INTEL_LPSS && I2C
+ depends on SND_SST_IPC_ACPI = n
+ depends on SND_SOC_INTEL_BAYTRAIL
+ select SND_SOC_RT5640
+ help
+ This adds audio driver for Intel Baytrail platform based boards
+ with the RT5640 audio codec. This driver is deprecated, use
+ SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality.
+
+config SND_SOC_INTEL_BYTCR_RT5640_MACH
+ tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5640 codec"
+ depends on X86 && I2C && ACPI
+ select SND_SOC_RT5640
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+ platforms with RT5640 audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BYTCR_RT5651_MACH
+ tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5651 codec"
+ depends on X86 && I2C && ACPI
+ select SND_SOC_RT5651
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+ platforms with RT5651 audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
+ tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_RT5670
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with RT5672 audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
+ tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_RT5645
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with RT5645/5650 audio codec.
+ If unsure select "N".
+
+config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
+ tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with MAX98090 & TI codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_MAX98090
+ select SND_SOC_TS3A227E
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BYT_CHT_DA7213_MACH
+ tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with DA7212/7213 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_DA7213
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Baytrail & CherryTrail
+ platforms with DA7212/7213 audio codec.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BYT_CHT_ES8316_MACH
+ tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with ES8316 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ES8316
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Baytrail &
+ Cherrytrail platforms with ES8316 audio codec.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
+ tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ depends on SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ help
+ This adds support for ASoC machine driver for the MinnowBoard Max or
+ Up boards and provides access to I2S signals on the Low-Speed
+ connector
+ If unsure select "N".
+
+config SND_SOC_INTEL_SKL_RT286_MACH
+ tristate "ASoC Audio driver for SKL with RT286 I2S mode"
+ depends on X86 && ACPI && I2C
+ depends on SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_RT286
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC machine driver for Skylake platforms
+ with RT286 I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
+ tristate "ASoC Audio driver for SKL with NAU88L25 and SSM4567 in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C
+ depends on SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_NAU8825
+ select SND_SOC_SSM4567
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for NAU88L25 + SSM4567.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
+ tristate "ASoC Audio driver for SKL with NAU88L25 and MAX98357A in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C
+ depends on SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_NAU8825
+ select SND_SOC_MAX98357A
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for NAU88L25 + MAX98357A.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
+ tristate "ASoC Audio driver for Broxton with DA7219 and MAX98357A in I2S Mode"
+ depends on X86 && ACPI && I2C
+ depends on SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98357A
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ select SND_HDA_DSP_LOADER
+ help
+ This adds support for ASoC machine driver for Broxton-P platforms
+ with DA7219 + MAX98357A I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_BXT_RT298_MACH
+ tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
+ depends on X86 && ACPI && I2C
+ depends on SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_RT298
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ select SND_HDA_DSP_LOADER
+ help
+ This adds support for ASoC machine driver for Broxton platforms
+ with RT286 I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
+ tristate "ASoC Audio driver for KBL with RT5663 and MAX98927 in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C
+ select SND_SOC_INTEL_SST
+ depends on SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_RT5663
+ select SND_SOC_MAX98927
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for RT5663 + MAX98927.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
+ tristate "ASoC Audio driver for KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C && SPI
+ select SND_SOC_INTEL_SST
+ depends on SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_RT5663
+ select SND_SOC_RT5514
+ select SND_SOC_RT5514_SPI
+ select SND_SOC_MAX98927
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for RT5663 + RT5514 + MAX98927.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+endif
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index ce35ec7884d1..f8a91a6f2a17 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -55,20 +55,6 @@ enum {
BXT_DPCM_AUDIO_HDMI3_PB,
};
-static inline struct snd_soc_dai *bxt_get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
-
- if (!strncmp(rtd->codec_dai->name, BXT_DIALOG_CODEC_DAI,
- strlen(BXT_DIALOG_CODEC_DAI)))
- return rtd->codec_dai;
- }
-
- return NULL;
-}
-
static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
@@ -77,7 +63,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_soc_card *card = dapm->card;
struct snd_soc_dai *codec_dai;
- codec_dai = bxt_get_codec_dai(card);
+ codec_dai = snd_soc_card_get_codec_dai(card, BXT_DIALOG_CODEC_DAI);
if (!codec_dai) {
dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
return -EIO;
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index 18873e23f404..c4d82ad41bd7 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -27,9 +27,9 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/da7213.h"
#include "../atom/sst-atom-controls.h"
-#include "../common/sst-acpi.h"
static const struct snd_kcontrol_new controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
@@ -185,19 +185,11 @@ static struct snd_soc_dai_link dailink[] = {
.dpcm_playback = 1,
.ops = &aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Compressed Port",
- .stream_name = "Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
/* CODEC<->CODEC link */
/* back ends */
{
.name = "SSP2-Codec",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
@@ -231,19 +223,18 @@ static char codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
static int bytcht_da7213_probe(struct platform_device *pdev)
{
- int ret_val = 0;
- int i;
struct snd_soc_card *card;
- struct sst_acpi_mach *mach;
+ struct snd_soc_acpi_mach *mach;
const char *i2c_name = NULL;
int dai_index = 0;
+ int ret_val = 0;
+ int i;
mach = (&pdev->dev)->platform_data;
card = &bytcht_da7213_card;
card->dev = &pdev->dev;
/* fix index of codec dai */
- dai_index = MERR_DPCM_COMPR + 1;
for (i = 0; i < ARRAY_SIZE(dailink); i++) {
if (!strcmp(dailink[i].codec_name, "i2c-DLGS7213:00")) {
dai_index = i;
@@ -252,8 +243,8 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = sst_acpi_find_name_from_hid(mach->id);
- if (i2c_name != NULL) {
+ i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ if (i2c_name) {
snprintf(codec_name, sizeof(codec_name),
"%s%s", "i2c-", i2c_name);
dailink[dai_index].codec_name = codec_name;
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 52635462dac6..8088396717e3 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -29,28 +29,14 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../atom/sst-atom-controls.h"
-#include "../common/sst-acpi.h"
#include "../common/sst-dsp.h"
struct byt_cht_es8316_private {
struct clk *mclk;
};
-#define CODEC_DAI1 "ES8316 HiFi"
-
-static inline struct snd_soc_dai *get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
- if (!strncmp(rtd->codec_dai->name, CODEC_DAI1,
- strlen(CODEC_DAI1)))
- return rtd->codec_dai;
- }
- return NULL;
-}
-
static const struct snd_soc_dapm_widget byt_cht_es8316_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
@@ -208,22 +194,13 @@ static struct snd_soc_dai_link byt_cht_es8316_dais[] = {
.ops = &byt_cht_es8316_aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Compressed Port",
- .stream_name = "Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
-
/* back ends */
{
/* Only SSP2 has been tested here, so BYT-CR platforms that
* require SSP0 will not work.
*/
.name = "SSP2-Codec",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
diff --git a/sound/soc/intel/boards/bytcht_nocodec.c b/sound/soc/intel/boards/bytcht_nocodec.c
index 1dd9441806fa..b80ec027a0e8 100644
--- a/sound/soc/intel/boards/bytcht_nocodec.c
+++ b/sound/soc/intel/boards/bytcht_nocodec.c
@@ -133,19 +133,11 @@ static struct snd_soc_dai_link dais[] = {
.dpcm_playback = 1,
.ops = &aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Compressed Port",
- .stream_name = "Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
/* CODEC<->CODEC link */
/* back ends */
{
.name = "SSP2-LowSpeed Connector",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 4a76b099a508..f2c0fc415e52 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -22,19 +22,19 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
#include <asm/platform_sst_audio.h>
-#include <linux/clk.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/rt5640.h"
#include "../atom/sst-atom-controls.h"
-#include "../common/sst-acpi.h"
#include "../common/sst-dsp.h"
enum {
@@ -44,13 +44,13 @@ enum {
BYT_RT5640_IN3_MAP,
};
-#define BYT_RT5640_MAP(quirk) ((quirk) & 0xff)
+#define BYT_RT5640_MAP(quirk) ((quirk) & GENMASK(7, 0))
#define BYT_RT5640_DMIC_EN BIT(16)
#define BYT_RT5640_MONO_SPEAKER BIT(17)
#define BYT_RT5640_DIFF_MIC BIT(18) /* defaut is single-ended */
-#define BYT_RT5640_SSP2_AIF2 BIT(19) /* default is using AIF1 */
-#define BYT_RT5640_SSP0_AIF1 BIT(20)
-#define BYT_RT5640_SSP0_AIF2 BIT(21)
+#define BYT_RT5640_SSP2_AIF2 BIT(19) /* default is using AIF1 */
+#define BYT_RT5640_SSP0_AIF1 BIT(20)
+#define BYT_RT5640_SSP0_AIF2 BIT(21)
#define BYT_RT5640_MCLK_EN BIT(22)
#define BYT_RT5640_MCLK_25MHZ BIT(23)
@@ -145,22 +145,6 @@ static void log_quirks(struct device *dev)
#define BYT_CODEC_DAI1 "rt5640-aif1"
#define BYT_CODEC_DAI2 "rt5640-aif2"
-static inline struct snd_soc_dai *byt_get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
- if (!strncmp(rtd->codec_dai->name, BYT_CODEC_DAI1,
- strlen(BYT_CODEC_DAI1)))
- return rtd->codec_dai;
- if (!strncmp(rtd->codec_dai->name, BYT_CODEC_DAI2,
- strlen(BYT_CODEC_DAI2)))
- return rtd->codec_dai;
-
- }
- return NULL;
-}
-
static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
@@ -170,7 +154,10 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct byt_rt5640_private *priv = snd_soc_card_get_drvdata(card);
int ret;
- codec_dai = byt_get_codec_dai(card);
+ codec_dai = snd_soc_card_get_codec_dai(card, BYT_CODEC_DAI1);
+ if (!codec_dai)
+ codec_dai = snd_soc_card_get_codec_dai(card, BYT_CODEC_DAI2);
+
if (!codec_dai) {
dev_err(card->dev,
"Codec dai not found; Unable to set platform clock\n");
@@ -178,7 +165,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
}
if (SND_SOC_DAPM_EVENT_ON(event)) {
- if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk) {
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN) {
ret = clk_prepare_enable(priv->mclk);
if (ret < 0) {
dev_err(card->dev,
@@ -199,7 +186,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
48000 * 512,
SND_SOC_CLOCK_IN);
if (!ret) {
- if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN)
clk_disable_unprepare(priv->mclk);
}
}
@@ -376,8 +363,8 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
- BYT_RT5640_MCLK_EN),
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_MCLK_EN),
},
{
.callback = byt_rt5640_quirk_cb,
@@ -385,12 +372,11 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
- BYT_RT5640_MONO_SPEAKER |
- BYT_RT5640_DIFF_MIC |
- BYT_RT5640_SSP0_AIF2 |
- BYT_RT5640_MCLK_EN
- ),
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF2 |
+ BYT_RT5640_MCLK_EN),
},
{
.callback = byt_rt5640_quirk_cb,
@@ -398,9 +384,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "DellInc."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Venue 8 Pro 5830"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_DMIC2_MAP |
- BYT_RT5640_DMIC_EN |
- BYT_RT5640_MCLK_EN),
+ .driver_data = (void *)(BYT_RT5640_DMIC2_MAP |
+ BYT_RT5640_DMIC_EN |
+ BYT_RT5640_MCLK_EN),
},
{
.callback = byt_rt5640_quirk_cb,
@@ -408,8 +394,8 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP ElitePad 1000 G2"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
- BYT_RT5640_MCLK_EN),
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_MCLK_EN),
},
{
.callback = byt_rt5640_quirk_cb,
@@ -417,8 +403,8 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_DMIC1_MAP |
- BYT_RT5640_DMIC_EN),
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_DMIC_EN),
},
{
.callback = byt_rt5640_quirk_cb,
@@ -426,9 +412,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_IN3_MAP |
- BYT_RT5640_MCLK_EN |
- BYT_RT5640_SSP0_AIF1),
+ .driver_data = (void *)(BYT_RT5640_IN3_MAP |
+ BYT_RT5640_MCLK_EN |
+ BYT_RT5640_SSP0_AIF1),
},
{
.callback = byt_rt5640_quirk_cb,
@@ -436,7 +422,7 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_IN1_MAP |
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
BYT_RT5640_MCLK_EN |
BYT_RT5640_SSP0_AIF1),
@@ -446,9 +432,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
},
- .driver_data = (unsigned long *)(BYT_RT5640_IN3_MAP |
- BYT_RT5640_MCLK_EN |
- BYT_RT5640_SSP0_AIF1),
+ .driver_data = (void *)(BYT_RT5640_IN3_MAP |
+ BYT_RT5640_MCLK_EN |
+ BYT_RT5640_SSP0_AIF1),
},
{}
@@ -456,12 +442,12 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
{
- int ret;
- struct snd_soc_codec *codec = runtime->codec;
struct snd_soc_card *card = runtime->card;
- const struct snd_soc_dapm_route *custom_map;
struct byt_rt5640_private *priv = snd_soc_card_get_drvdata(card);
+ struct snd_soc_codec *codec = runtime->codec;
+ const struct snd_soc_dapm_route *custom_map;
int num_routes;
+ int ret;
card->dapm.idle_bias_off = true;
@@ -549,7 +535,7 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
snd_soc_dapm_ignore_suspend(&card->dapm, "Headphone");
snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
- if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk) {
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN) {
/*
* The firmware might enable the clock at
* boot (this information may or may not
@@ -693,18 +679,10 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
.dpcm_playback = 1,
.ops = &byt_rt5640_aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Baytrail Compressed Port",
- .stream_name = "Baytrail Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
/* back ends */
{
.name = "SSP2-Codec",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port", /* overwritten for ssp0 routing */
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
@@ -758,12 +736,12 @@ struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
{
- int ret_val = 0;
- struct sst_acpi_mach *mach;
+ struct byt_rt5640_private *priv;
+ struct snd_soc_acpi_mach *mach;
const char *i2c_name = NULL;
+ int ret_val = 0;
+ int dai_index = 0;
int i;
- int dai_index;
- struct byt_rt5640_private *priv;
is_bytcr = false;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
@@ -776,7 +754,6 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(&byt_rt5640_card, priv);
/* fix index of codec dai */
- dai_index = MERR_DPCM_COMPR + 1;
for (i = 0; i < ARRAY_SIZE(byt_rt5640_dais); i++) {
if (!strcmp(byt_rt5640_dais[i].codec_name, "i2c-10EC5640:00")) {
dai_index = i;
@@ -785,8 +762,8 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = sst_acpi_find_name_from_hid(mach->id);
- if (i2c_name != NULL) {
+ i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ if (i2c_name) {
snprintf(byt_rt5640_codec_name, sizeof(byt_rt5640_codec_name),
"%s%s", "i2c-", i2c_name);
@@ -819,7 +796,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
/* format specified: 2 64-bit integers */
struct acpi_buffer format = {sizeof("NN"), "NN"};
struct acpi_buffer state = {0, NULL};
- struct sst_acpi_package_context pkg_ctx;
+ struct snd_soc_acpi_package_context pkg_ctx;
bool pkg_found = false;
state.length = sizeof(chan_package);
@@ -831,7 +808,8 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
pkg_ctx.state = &state;
pkg_ctx.data_valid = false;
- pkg_found = sst_acpi_find_package_from_hid(mach->id, &pkg_ctx);
+ pkg_found = snd_soc_acpi_find_package_from_hid(mach->id,
+ &pkg_ctx);
if (pkg_found) {
if (chan_package.aif_value == 1) {
dev_info(&pdev->dev, "BIOS Routing: AIF1 connected\n");
@@ -891,7 +869,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
byt_rt5640_cpu_dai_name;
}
- if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) {
+ if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN) {
priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
if (IS_ERR(priv->mclk)) {
ret_val = PTR_ERR(priv->mclk);
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 4a3516b38c2c..d955836c6870 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -21,24 +21,124 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/slab.h>
+#include <asm/platform_sst_audio.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/rt5651.h"
#include "../atom/sst-atom-controls.h"
+enum {
+ BYT_RT5651_DMIC_MAP,
+ BYT_RT5651_IN1_MAP,
+ BYT_RT5651_IN2_MAP,
+};
+
+#define BYT_RT5651_MAP(quirk) ((quirk) & GENMASK(7, 0))
+#define BYT_RT5651_DMIC_EN BIT(16)
+#define BYT_RT5651_MCLK_EN BIT(17)
+#define BYT_RT5651_MCLK_25MHZ BIT(18)
+
+struct byt_rt5651_private {
+ struct clk *mclk;
+ struct snd_soc_jack jack;
+};
+
+static unsigned long byt_rt5651_quirk = BYT_RT5651_DMIC_MAP |
+ BYT_RT5651_DMIC_EN |
+ BYT_RT5651_MCLK_EN;
+
+static void log_quirks(struct device *dev)
+{
+ if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_DMIC_MAP)
+ dev_info(dev, "quirk DMIC_MAP enabled");
+ if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN1_MAP)
+ dev_info(dev, "quirk IN1_MAP enabled");
+ if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP)
+ dev_info(dev, "quirk IN2_MAP enabled");
+ if (byt_rt5651_quirk & BYT_RT5651_DMIC_EN)
+ dev_info(dev, "quirk DMIC enabled");
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN)
+ dev_info(dev, "quirk MCLK_EN enabled");
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_25MHZ)
+ dev_info(dev, "quirk MCLK_25MHZ enabled");
+}
+
+#define BYT_CODEC_DAI1 "rt5651-aif1"
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ struct byt_rt5651_private *priv = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, BYT_CODEC_DAI1);
+ if (!codec_dai) {
+ dev_err(card->dev,
+ "Codec dai not found; Unable to set platform clock\n");
+ return -EIO;
+ }
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN) {
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret < 0) {
+ dev_err(card->dev,
+ "could not configure MCLK state");
+ return ret;
+ }
+ }
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5651_SCLK_S_PLL1,
+ 48000 * 512,
+ SND_SOC_CLOCK_IN);
+ } else {
+ /*
+ * Set codec clock source to internal clock before
+ * turning off the platform clock. Codec needs clock
+ * for Jack detection and button press
+ */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5651_SCLK_S_RCCLK,
+ 48000 * 512,
+ SND_SOC_CLOCK_IN);
+ if (!ret)
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN)
+ clk_disable_unprepare(priv->mclk);
+ }
+
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec sysclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget byt_rt5651_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Internal Mic", NULL),
SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
};
static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
+ {"Headphone", NULL, "Platform Clock"},
+ {"Headset Mic", NULL, "Platform Clock"},
+ {"Internal Mic", NULL, "Platform Clock"},
+ {"Speaker", NULL, "Platform Clock"},
+
{"AIF1 Playback", NULL, "ssp2 Tx"},
{"ssp2 Tx", NULL, "codec_out0"},
{"ssp2 Tx", NULL, "codec_out1"},
@@ -47,38 +147,30 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
{"ssp2 Rx", NULL, "AIF1 Capture"},
{"Headset Mic", NULL, "micbias1"}, /* lowercase for rt5651 */
- {"IN2P", NULL, "Headset Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Speaker", NULL, "LOUTL"},
{"Speaker", NULL, "LOUTR"},
};
-static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic1_map[] = {
- {"DMIC1", NULL, "Internal Mic"},
-};
-
-static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic2_map[] = {
- {"DMIC2", NULL, "Internal Mic"},
+static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
+ {"IN2P", NULL, "Headset Mic"},
+ {"DMIC L1", NULL, "Internal Mic"},
+ {"DMIC R1", NULL, "Internal Mic"},
};
static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_map[] = {
{"Internal Mic", NULL, "micbias1"},
+ {"IN2P", NULL, "Headset Mic"},
{"IN1P", NULL, "Internal Mic"},
};
-enum {
- BYT_RT5651_DMIC1_MAP,
- BYT_RT5651_DMIC2_MAP,
- BYT_RT5651_IN1_MAP,
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in2_map[] = {
+ {"Internal Mic", NULL, "micbias1"},
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN2P", NULL, "Internal Mic"},
};
-#define BYT_RT5651_MAP(quirk) ((quirk) & 0xff)
-#define BYT_RT5651_DMIC_EN BIT(16)
-
-static unsigned long byt_rt5651_quirk = BYT_RT5651_DMIC1_MAP |
- BYT_RT5651_DMIC_EN;
-
static const struct snd_kcontrol_new byt_rt5651_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -86,6 +178,17 @@ static const struct snd_kcontrol_new byt_rt5651_controls[] = {
SOC_DAPM_PIN_SWITCH("Speaker"),
};
+static struct snd_soc_jack_pin bytcr_jack_pins[] = {
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
static int byt_rt5651_aif1_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -103,9 +206,26 @@ static int byt_rt5651_aif1_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- ret = snd_soc_dai_set_pll(codec_dai, 0, RT5651_PLL1_S_BCLK1,
- params_rate(params) * 50,
- params_rate(params) * 512);
+ if (!(byt_rt5651_quirk & BYT_RT5651_MCLK_EN)) {
+ /* 2x25 bit slots on SSP2 */
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5651_PLL1_S_BCLK1,
+ params_rate(params) * 50,
+ params_rate(params) * 512);
+ } else {
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_25MHZ) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5651_PLL1_S_MCLK,
+ 25000000,
+ params_rate(params) * 512);
+ } else {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5651_PLL1_S_MCLK,
+ 19200000,
+ params_rate(params) * 512);
+ }
+ }
+
if (ret < 0) {
dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
return ret;
@@ -114,33 +234,60 @@ static int byt_rt5651_aif1_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int byt_rt5651_quirk_cb(const struct dmi_system_id *id)
+{
+ byt_rt5651_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
static const struct dmi_system_id byt_rt5651_quirk_table[] = {
+ {
+ .callback = byt_rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
+ },
+ .driver_data = (void *)(BYT_RT5651_DMIC_MAP |
+ BYT_RT5651_DMIC_EN),
+ },
+ {
+ .callback = byt_rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "KIANO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "KIANO SlimNote 14.2"),
+ },
+ .driver_data = (void *)(BYT_RT5651_IN2_MAP),
+ },
{}
};
static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
{
- int ret;
struct snd_soc_card *card = runtime->card;
+ struct snd_soc_codec *codec = runtime->codec;
+ struct byt_rt5651_private *priv = snd_soc_card_get_drvdata(card);
const struct snd_soc_dapm_route *custom_map;
int num_routes;
+ int ret;
card->dapm.idle_bias_off = true;
- dmi_check_system(byt_rt5651_quirk_table);
switch (BYT_RT5651_MAP(byt_rt5651_quirk)) {
case BYT_RT5651_IN1_MAP:
custom_map = byt_rt5651_intmic_in1_map;
num_routes = ARRAY_SIZE(byt_rt5651_intmic_in1_map);
break;
- case BYT_RT5651_DMIC2_MAP:
- custom_map = byt_rt5651_intmic_dmic2_map;
- num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic2_map);
+ case BYT_RT5651_IN2_MAP:
+ custom_map = byt_rt5651_intmic_in2_map;
+ num_routes = ARRAY_SIZE(byt_rt5651_intmic_in2_map);
break;
default:
- custom_map = byt_rt5651_intmic_dmic1_map;
- num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic1_map);
+ custom_map = byt_rt5651_intmic_dmic_map;
+ num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic_map);
}
+ ret = snd_soc_dapm_add_routes(&card->dapm, custom_map, num_routes);
+ if (ret)
+ return ret;
ret = snd_soc_add_card_controls(card, byt_rt5651_controls,
ARRAY_SIZE(byt_rt5651_controls));
@@ -151,6 +298,40 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
snd_soc_dapm_ignore_suspend(&card->dapm, "Headphone");
snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN) {
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(priv->mclk);
+ if (!ret)
+ clk_disable_unprepare(priv->mclk);
+
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_25MHZ)
+ ret = clk_set_rate(priv->mclk, 25000000);
+ else
+ ret = clk_set_rate(priv->mclk, 19200000);
+
+ if (ret)
+ dev_err(card->dev, "unable to set MCLK rate\n");
+ }
+
+ ret = snd_soc_card_jack_new(runtime->card, "Headset",
+ SND_JACK_HEADSET, &priv->jack,
+ bytcr_jack_pins, ARRAY_SIZE(bytcr_jack_pins));
+ if (ret) {
+ dev_err(runtime->dev, "Headset jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ rt5651_set_jack_detect(codec, &priv->jack);
+
return ret;
}
@@ -253,19 +434,11 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
.dpcm_playback = 1,
.ops = &byt_rt5651_aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Compressed Port",
- .stream_name = "Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
/* CODEC<->CODEC link */
/* back ends */
{
.name = "SSP2-Codec",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
@@ -296,13 +469,65 @@ static struct snd_soc_card byt_rt5651_card = {
.fully_routed = true,
};
+static char byt_rt5651_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+
static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
{
+ struct byt_rt5651_private *priv;
+ struct snd_soc_acpi_mach *mach;
+ const char *i2c_name = NULL;
int ret_val = 0;
+ int dai_index = 0;
+ int i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
/* register the soc card */
byt_rt5651_card.dev = &pdev->dev;
+ mach = byt_rt5651_card.dev->platform_data;
+ snd_soc_card_set_drvdata(&byt_rt5651_card, priv);
+
+ /* fix index of codec dai */
+ for (i = 0; i < ARRAY_SIZE(byt_rt5651_dais); i++) {
+ if (!strcmp(byt_rt5651_dais[i].codec_name, "i2c-10EC5651:00")) {
+ dai_index = i;
+ break;
+ }
+ }
+
+ /* fixup codec name based on HID */
+ i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ if (i2c_name) {
+ snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
+ "%s%s", "i2c-", i2c_name);
+
+ byt_rt5651_dais[dai_index].codec_name = byt_rt5651_codec_name;
+ }
+
+ /* check quirks before creating card */
+ dmi_check_system(byt_rt5651_quirk_table);
+ log_quirks(&pdev->dev);
+
+ if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN) {
+ priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(priv->mclk)) {
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
+ PTR_ERR(priv->mclk));
+ /*
+ * Fall back to bit clock usage for -ENOENT (clock not
+ * available likely due to missing dependencies), bail
+ * for all other errors, including -EPROBE_DEFER
+ */
+ if (ret_val != -ENOENT)
+ return ret_val;
+ byt_rt5651_quirk &= ~BYT_RT5651_MCLK_EN;
+ }
+ }
+
ret_val = devm_snd_soc_register_card(&pdev->dev, &byt_rt5651_card);
if (ret_val) {
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 20755ecc7f9e..d3e1c7e12004 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -35,15 +36,48 @@
#define CHT_CODEC_DAI "HiFi"
struct cht_mc_private {
+ struct clk *mclk;
struct snd_soc_jack jack;
bool ts3a227e_present;
};
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
+ return -EIO;
+ }
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = clk_prepare_enable(ctx->mclk);
+ if (ret < 0) {
+ dev_err(card->dev,
+ "could not configure MCLK state");
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(ctx->mclk);
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route cht_audio_map[] = {
@@ -60,6 +94,10 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
{"codec_in0", NULL, "ssp2 Rx" },
{"codec_in1", NULL, "ssp2 Rx" },
{"ssp2 Rx", NULL, "HiFi Capture"},
+ {"Headphone", NULL, "Platform Clock"},
+ {"Headset Mic", NULL, "Platform Clock"},
+ {"Int Mic", NULL, "Platform Clock"},
+ {"Ext Spk", NULL, "Platform Clock"},
};
static const struct snd_kcontrol_new cht_mc_controls[] = {
@@ -109,6 +147,40 @@ static struct notifier_block cht_jack_nb = {
.notifier_call = cht_ti_jack_event,
};
+static struct snd_soc_jack_pin hs_jack_pins[] = {
+ {
+ .pin = "Headphone",
+ .mask = SND_JACK_HEADPHONE,
+ },
+ {
+ .pin = "Headset Mic",
+ .mask = SND_JACK_MICROPHONE,
+ },
+};
+
+static struct snd_soc_jack_gpio hs_jack_gpios[] = {
+ {
+ .name = "hp",
+ .report = SND_JACK_HEADPHONE | SND_JACK_LINEOUT,
+ .debounce_time = 200,
+ },
+ {
+ .name = "mic",
+ .invert = 1,
+ .report = SND_JACK_MICROPHONE,
+ .debounce_time = 200,
+ },
+};
+
+static const struct acpi_gpio_params hp_gpios = { 0, 0, false };
+static const struct acpi_gpio_params mic_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_max98090_gpios[] = {
+ { "hp-gpios", &hp_gpios, 1 },
+ { "mic-gpios", &mic_gpios, 1 },
+ {},
+};
+
static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
@@ -116,30 +188,55 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
struct snd_soc_jack *jack = &ctx->jack;
- /**
- * TI supports 4 butons headset detection
- * KEY_MEDIA
- * KEY_VOICECOMMAND
- * KEY_VOLUMEUP
- * KEY_VOLUMEDOWN
- */
- if (ctx->ts3a227e_present)
- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3;
- else
- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
+ if (ctx->ts3a227e_present) {
+ /*
+ * The jack has already been created in the
+ * cht_max98090_headset_init() function.
+ */
+ snd_soc_jack_notifier_register(jack, &cht_jack_nb);
+ return 0;
+ }
- ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
- jack_type, jack, NULL, 0);
+ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
+ ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
+ jack_type, jack,
+ hs_jack_pins, ARRAY_SIZE(hs_jack_pins));
if (ret) {
dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret);
return ret;
}
- if (ctx->ts3a227e_present)
- snd_soc_jack_notifier_register(jack, &cht_jack_nb);
+ ret = snd_soc_jack_add_gpiods(runtime->card->dev->parent, jack,
+ ARRAY_SIZE(hs_jack_gpios),
+ hs_jack_gpios);
+ if (ret) {
+ /*
+ * flag error but don't bail if jack detect is broken
+ * due to platform issues or bad BIOS/configuration
+ */
+ dev_err(runtime->dev,
+ "jack detection gpios not added, error %d\n", ret);
+ }
+
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(ctx->mclk);
+ if (!ret)
+ clk_disable_unprepare(ctx->mclk);
+
+ ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ);
+
+ if (ret)
+ dev_err(runtime->dev, "unable to set MCLK rate\n");
return ret;
}
@@ -160,7 +257,7 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return ret;
}
- fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF
+ fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS;
ret = snd_soc_dai_set_fmt(rtd->cpu_dai, fmt);
@@ -173,8 +270,8 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
- /* set SSP2 to 24-bit */
- params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
+ /* set SSP2 to 16-bit */
+ params_set_format(params, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
@@ -188,8 +285,29 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
{
struct snd_soc_card *card = component->card;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_jack *jack = &ctx->jack;
+ int jack_type;
+ int ret;
- return ts3a227e_enable_jack_detect(component, &ctx->jack);
+ /*
+ * TI supports 4 butons headset detection
+ * KEY_MEDIA
+ * KEY_VOICECOMMAND
+ * KEY_VOLUMEUP
+ * KEY_VOLUMEDOWN
+ */
+ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3;
+
+ ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type,
+ jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "Headset Jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ return ts3a227e_enable_jack_detect(component, jack);
}
static const struct snd_soc_ops cht_aif1_ops = {
@@ -232,18 +350,10 @@ static struct snd_soc_dai_link cht_dailink[] = {
.dpcm_playback = 1,
.ops = &cht_aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Compressed Port",
- .stream_name = "Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
/* back ends */
{
.name = "SSP2-Codec",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
@@ -277,6 +387,7 @@ static struct snd_soc_card snd_soc_card_cht = {
static int snd_cht_mc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret_val = 0;
struct cht_mc_private *drv;
@@ -289,11 +400,25 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* no need probe TI jack detection chip */
snd_soc_card_cht.aux_dev = NULL;
snd_soc_card_cht.num_aux_devs = 0;
+
+ ret_val = devm_acpi_dev_add_driver_gpios(dev->parent,
+ acpi_max98090_gpios);
+ if (ret_val)
+ dev_dbg(dev, "Unable to add GPIO mapping table\n");
}
/* register the soc card */
snd_soc_card_cht.dev = &pdev->dev;
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
+
+ drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(drv->mclk)) {
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
+ PTR_ERR(drv->mclk));
+ return PTR_ERR(drv->mclk);
+ }
+
ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
if (ret_val) {
dev_err(&pdev->dev,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 5bcde01d15e6..18d129caa974 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -21,20 +21,20 @@
*/
#include <linux/module.h>
-#include <linux/acpi.h>
#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
#include <asm/platform_sst_audio.h>
-#include <linux/clk.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/rt5645.h"
#include "../atom/sst-atom-controls.h"
-#include "../common/sst-acpi.h"
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI1 "rt5645-aif1"
@@ -53,7 +53,7 @@ struct cht_mc_private {
struct clk *mclk;
};
-#define CHT_RT5645_MAP(quirk) ((quirk) & 0xff)
+#define CHT_RT5645_MAP(quirk) ((quirk) & GENMASK(7, 0))
#define CHT_RT5645_SSP2_AIF2 BIT(16) /* default is using AIF1 */
#define CHT_RT5645_SSP0_AIF1 BIT(17)
#define CHT_RT5645_SSP0_AIF2 BIT(18)
@@ -70,21 +70,6 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk SSP0_AIF2 enabled");
}
-static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
- if (!strncmp(rtd->codec_dai->name, CHT_CODEC_DAI1,
- strlen(CHT_CODEC_DAI1)))
- return rtd->codec_dai;
- if (!strncmp(rtd->codec_dai->name, CHT_CODEC_DAI2,
- strlen(CHT_CODEC_DAI2)))
- return rtd->codec_dai;
- }
- return NULL;
-}
-
static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
@@ -94,20 +79,21 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
int ret;
- codec_dai = cht_get_codec_dai(card);
+ codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI1);
+ if (!codec_dai)
+ codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI2);
+
if (!codec_dai) {
dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
return -EIO;
}
if (SND_SOC_DAPM_EVENT_ON(event)) {
- if (ctx->mclk) {
- ret = clk_prepare_enable(ctx->mclk);
- if (ret < 0) {
- dev_err(card->dev,
- "could not configure MCLK state");
- return ret;
- }
+ ret = clk_prepare_enable(ctx->mclk);
+ if (ret < 0) {
+ dev_err(card->dev,
+ "could not configure MCLK state");
+ return ret;
}
} else {
/* Set codec sysclk source to its internal clock because codec PLL will
@@ -122,8 +108,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
return ret;
}
- if (ctx->mclk)
- clk_disable_unprepare(ctx->mclk);
+ clk_disable_unprepare(ctx->mclk);
}
return 0;
@@ -258,11 +243,11 @@ static const struct dmi_system_id cht_rt5645_quirk_table[] = {
static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
{
- int ret;
- int jack_type;
- struct snd_soc_codec *codec = runtime->codec;
struct snd_soc_card *card = runtime->card;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
+ struct snd_soc_codec *codec = runtime->codec;
+ int jack_type;
+ int ret;
if ((cht_rt5645_quirk & CHT_RT5645_SSP2_AIF2) ||
(cht_rt5645_quirk & CHT_RT5645_SSP0_AIF2)) {
@@ -320,26 +305,26 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
rt5645_set_jack_detect(codec, &ctx->jack, &ctx->jack, &ctx->jack);
- if (ctx->mclk) {
- /*
- * The firmware might enable the clock at
- * boot (this information may or may not
- * be reflected in the enable clock register).
- * To change the rate we must disable the clock
- * first to cover these cases. Due to common
- * clock framework restrictions that do not allow
- * to disable a clock that has not been enabled,
- * we need to enable the clock first.
- */
- ret = clk_prepare_enable(ctx->mclk);
- if (!ret)
- clk_disable_unprepare(ctx->mclk);
- ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ);
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(ctx->mclk);
+ if (!ret)
+ clk_disable_unprepare(ctx->mclk);
+
+ ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ);
+
+ if (ret)
+ dev_err(runtime->dev, "unable to set MCLK rate\n");
- if (ret)
- dev_err(runtime->dev, "unable to set MCLK rate\n");
- }
return ret;
}
@@ -460,19 +445,11 @@ static struct snd_soc_dai_link cht_dailink[] = {
.dpcm_playback = 1,
.ops = &cht_aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Compressed Port",
- .stream_name = "Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
/* CODEC<->CODEC link */
/* back ends */
{
.name = "SSP2-Codec",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
@@ -545,15 +522,15 @@ struct acpi_chan_package { /* ACPICA seems to require 64 bit integers */
static int snd_cht_mc_probe(struct platform_device *pdev)
{
- int ret_val = 0;
- int i;
- struct cht_mc_private *drv;
struct snd_soc_card *card = snd_soc_cards[0].soc_card;
- struct sst_acpi_mach *mach;
+ struct snd_soc_acpi_mach *mach;
+ struct cht_mc_private *drv;
const char *i2c_name = NULL;
- int dai_index = 0;
bool found = false;
bool is_bytcr = false;
+ int dai_index = 0;
+ int ret_val = 0;
+ int i;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
if (!drv)
@@ -589,8 +566,8 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = sst_acpi_find_name_from_hid(mach->id);
- if (i2c_name != NULL) {
+ i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ if (i2c_name) {
snprintf(cht_rt5645_codec_name, sizeof(cht_rt5645_codec_name),
"%s%s", "i2c-", i2c_name);
cht_dailink[dai_index].codec_name = cht_rt5645_codec_name;
@@ -622,7 +599,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* format specified: 2 64-bit integers */
struct acpi_buffer format = {sizeof("NN"), "NN"};
struct acpi_buffer state = {0, NULL};
- struct sst_acpi_package_context pkg_ctx;
+ struct snd_soc_acpi_package_context pkg_ctx;
bool pkg_found = false;
state.length = sizeof(chan_package);
@@ -634,7 +611,8 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
pkg_ctx.state = &state;
pkg_ctx.data_valid = false;
- pkg_found = sst_acpi_find_package_from_hid(mach->id, &pkg_ctx);
+ pkg_found = snd_soc_acpi_find_package_from_hid(mach->id,
+ &pkg_ctx);
if (pkg_found) {
if (chan_package.aif_value == 1) {
dev_info(&pdev->dev, "BIOS Routing: AIF1 connected\n");
@@ -682,14 +660,12 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
cht_rt5645_cpu_dai_name;
}
- if (is_valleyview()) {
- drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
- if (IS_ERR(drv->mclk)) {
- dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
- PTR_ERR(drv->mclk));
- return PTR_ERR(drv->mclk);
- }
+ drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(drv->mclk)) {
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
+ PTR_ERR(drv->mclk));
+ return PTR_ERR(drv->mclk);
}
snd_soc_card_set_drvdata(card, drv);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index f597d5582223..f8f21eee9b2d 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -20,14 +20,14 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
-#include <asm/cpu_device_id.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/rt5670.h"
#include "../atom/sst-atom-controls.h"
-#include "../common/sst-acpi.h"
+
/* The platform clock #3 outputs 19.2Mhz clock to codec as I2S MCLK */
#define CHT_PLAT_CLK_3_HZ 19200000
@@ -51,18 +51,6 @@ static struct snd_soc_jack_pin cht_bsw_headset_pins[] = {
},
};
-static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
- if (!strncmp(rtd->codec_dai->name, CHT_CODEC_DAI,
- strlen(CHT_CODEC_DAI)))
- return rtd->codec_dai;
- }
- return NULL;
-}
-
static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
@@ -72,7 +60,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
int ret;
- codec_dai = cht_get_codec_dai(card);
+ codec_dai = snd_soc_card_get_codec_dai(card, CHT_CODEC_DAI);
if (!codec_dai) {
dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
return -EIO;
@@ -315,20 +303,12 @@ static struct snd_soc_dai_link cht_dailink[] = {
.dpcm_playback = 1,
.ops = &cht_aif1_ops,
},
- [MERR_DPCM_COMPR] = {
- .name = "Compressed Port",
- .stream_name = "Compress",
- .cpu_dai_name = "compress-cpu-dai",
- .codec_dai_name = "snd-soc-dummy-dai",
- .codec_name = "snd-soc-dummy",
- .platform_name = "sst-mfld-platform",
- },
/* Back End DAI links */
{
/* SSP2 - Codec */
.name = "SSP2-Codec",
- .id = 1,
+ .id = 0,
.cpu_dai_name = "ssp2-port",
.platform_name = "sst-mfld-platform",
.no_pcm = 1,
@@ -348,9 +328,11 @@ static struct snd_soc_dai_link cht_dailink[] = {
static int cht_suspend_pre(struct snd_soc_card *card)
{
struct snd_soc_component *component;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
list_for_each_entry(component, &card->component_dev_list, card_list) {
- if (!strcmp(component->name, "i2c-10EC5670:00")) {
+ if (!strncmp(component->name,
+ ctx->codec_name, sizeof(ctx->codec_name))) {
struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n");
@@ -364,9 +346,11 @@ static int cht_suspend_pre(struct snd_soc_card *card)
static int cht_resume_post(struct snd_soc_card *card)
{
struct snd_soc_component *component;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
list_for_each_entry(component, &card->component_dev_list, card_list) {
- if (!strcmp(component->name, "i2c-10EC5670:00")) {
+ if (!strncmp(component->name,
+ ctx->codec_name, sizeof(ctx->codec_name))) {
struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
dev_dbg(codec->dev, "enabling jack detect for resume.\n");
@@ -380,7 +364,7 @@ static int cht_resume_post(struct snd_soc_card *card)
/* SoC card */
static struct snd_soc_card snd_soc_card_cht = {
- .name = "cherrytrailcraudio",
+ .name = "cht-bsw-rt5672",
.owner = THIS_MODULE,
.dai_link = cht_dailink,
.num_links = ARRAY_SIZE(cht_dailink),
@@ -394,25 +378,13 @@ static struct snd_soc_card snd_soc_card_cht = {
.resume_post = cht_resume_post,
};
-static bool is_valleyview(void)
-{
- static const struct x86_cpu_id cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
- {}
- };
-
- if (!x86_match_cpu(cpu_ids))
- return false;
- return true;
-}
-
#define RT5672_I2C_DEFAULT "i2c-10EC5670:00"
static int snd_cht_mc_probe(struct platform_device *pdev)
{
int ret_val = 0;
struct cht_mc_private *drv;
- struct sst_acpi_mach *mach = pdev->dev.platform_data;
+ struct snd_soc_acpi_mach *mach = pdev->dev.platform_data;
const char *i2c_name;
int i;
@@ -424,7 +396,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* fixup codec name based on HID */
if (mach) {
- i2c_name = sst_acpi_find_name_from_hid(mach->id);
+ i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
if (i2c_name) {
snprintf(drv->codec_name, sizeof(drv->codec_name),
"i2c-%s", i2c_name);
@@ -439,14 +411,12 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
}
- if (is_valleyview()) {
- drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
- if (IS_ERR(drv->mclk)) {
- dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
- PTR_ERR(drv->mclk));
- return PTR_ERR(drv->mclk);
- }
+ drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(drv->mclk)) {
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
+ PTR_ERR(drv->mclk));
+ return PTR_ERR(drv->mclk);
}
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 7f7607420706..6f9a8bcf20f3 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -17,6 +17,7 @@
* GNU General Public License for more details.
*/
+#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/core.h>
@@ -208,6 +209,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
int ret;
struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_jack *jack;
/*
* Headset buttons map to the google Reference headset.
@@ -221,6 +223,13 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
dev_err(rtd->dev, "Headset Jack creation failed %d\n", ret);
return ret;
}
+
+ jack = &ctx->kabylake_headset;
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
rt5663_set_jack_detect(codec, &ctx->kabylake_headset);
return ret;
}
@@ -341,13 +350,28 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_dpcm *dpcm = container_of(
+ params, struct snd_soc_dpcm, hw_params);
+ struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+ struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
- /* The ADSP will convert the FE rate to 48k, stereo */
- rate->min = rate->max = 48000;
- channels->min = channels->max = 2;
- /* set SSP1 to 24 bit */
- snd_mask_none(fmt);
- snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ /*
+ * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ */
+ if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ }
+ /*
+ * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ * thus changing the mask here
+ */
+ if (!strcmp(be_dai_link->name, "SSP0-Codec"))
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
return 0;
}
@@ -390,6 +414,43 @@ static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
+static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0, j;
+
+ for (j = 0; j < rtd->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
+ /*
+ * Use channel 4 and 5 for the first amp
+ */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) {
+ /*
+ * Use channel 6 and 7 for the second amp
+ */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+static struct snd_soc_ops kabylake_ssp0_ops = {
+ .hw_params = kabylake_ssp0_hw_params,
+};
+
static unsigned int channels_dmic[] = {
2, 4,
};
@@ -593,12 +654,13 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.no_pcm = 1,
.codecs = max98927_codec_components,
.num_codecs = ARRAY_SIZE(max98927_codec_components),
- .dai_fmt = SND_SOC_DAIFMT_I2S |
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ignore_pmdown_time = 1,
.be_hw_params_fixup = kabylake_ssp_fixup,
.dpcm_playback = 1,
+ .ops = &kabylake_ssp0_ops,
},
{
/* SSP1 - Codec */
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 88ff54220007..6072164f2d43 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -302,6 +302,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
* The ADSP will convert the FE rate to 48k, stereo, 24 bit
*/
if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
!strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
rate->min = rate->max = 48000;
channels->min = channels->max = 2;
@@ -604,6 +605,8 @@ static int kabylake_card_late_probe(struct snd_soc_card *card)
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
codec = pcm->codec_dai->codec;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP,pcm=%d Jack", pcm->device);
err = snd_soc_card_jack_new(card, jack_name,
SND_JACK_AVOUT, &ctx->kabylake_hdmi[i],
NULL, 0);
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 5ed0aa27b467..1b5a689dc99b 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -54,20 +54,6 @@ enum {
SKL_DPCM_AUDIO_HDMI3_PB,
};
-static inline struct snd_soc_dai *skl_get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
-
- if (!strncmp(rtd->codec_dai->name, SKL_NUVOTON_CODEC_DAI,
- strlen(SKL_NUVOTON_CODEC_DAI)))
- return rtd->codec_dai;
- }
-
- return NULL;
-}
-
static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
@@ -76,7 +62,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_soc_dai *codec_dai;
int ret;
- codec_dai = skl_get_codec_dai(card);
+ codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
if (!codec_dai) {
dev_err(card->dev, "Codec dai not found; Unable to set platform clock\n");
return -EIO;
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index 01b8b140bb08..7bea4bc77481 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -57,20 +57,6 @@ enum {
SKL_DPCM_AUDIO_HDMI3_PB,
};
-static inline struct snd_soc_dai *skl_get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
-
- if (!strncmp(rtd->codec_dai->name, SKL_NUVOTON_CODEC_DAI,
- strlen(SKL_NUVOTON_CODEC_DAI)))
- return rtd->codec_dai;
- }
-
- return NULL;
-}
-
static const struct snd_kcontrol_new skylake_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -86,7 +72,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_soc_dai *codec_dai;
int ret;
- codec_dai = skl_get_codec_dai(card);
+ codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
if (!codec_dai) {
dev_err(card->dev, "Codec dai not found\n");
return -EIO;
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 0e029f354f6b..7379d8830c39 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
snd-soc-sst-dsp-objs := sst-dsp.o
snd-soc-sst-acpi-objs := sst-acpi.o
-snd-soc-sst-match-objs := sst-match-acpi.o
snd-soc-sst-ipc-objs := sst-ipc.o
snd-soc-sst-firmware-objs := sst-firmware.o
+snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-match.o soc-acpi-intel-hsw-bdw-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
-obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST_FIRMWARE) += snd-soc-sst-firmware.o
+obj-$(CONFIG_SND_SOC_ACPI_INTEL_MATCH) += snd-soc-acpi-intel-match.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
new file mode 100644
index 000000000000..bfe1ca68a542
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
@@ -0,0 +1,196 @@
+/*
+ * soc-apci-intel-byt-match.c - tables and support for BYT ACPI enumeration.
+ *
+ * Copyright (c) 2017, Intel Corporation.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dmi.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+static unsigned long byt_machine_id;
+
+#define BYT_THINKPAD_10 1
+
+static int byt_thinkpad10_quirk_cb(const struct dmi_system_id *id)
+{
+ byt_machine_id = BYT_THINKPAD_10;
+ return 1;
+}
+
+
+static const struct dmi_system_id byt_table[] = {
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
+ },
+ },
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Tablet B"),
+ },
+ },
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"),
+ },
+ },
+ { }
+};
+
+static struct snd_soc_acpi_mach byt_thinkpad_10 = {
+ .id = "10EC5640",
+ .drv_name = "cht-bsw-rt5672",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-rt5670.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+};
+
+static struct snd_soc_acpi_mach *byt_quirk(void *arg)
+{
+ struct snd_soc_acpi_mach *mach = arg;
+
+ dmi_check_system(byt_table);
+
+ if (byt_machine_id == BYT_THINKPAD_10)
+ return &byt_thinkpad_10;
+ else
+ return mach;
+}
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[] = {
+ {
+ .id = "10EC5640",
+ .drv_name = "byt-rt5640",
+ .fw_filename = "intel/fw_sst_0f28.bin-48kHz_i2s_master",
+ },
+ {
+ .id = "193C9890",
+ .drv_name = "byt-max98090",
+ .fw_filename = "intel/fw_sst_0f28.bin-48kHz_i2s_master",
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_baytrail_legacy_machines);
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[] = {
+ {
+ .id = "10EC5640",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5640",
+ .machine_quirk = byt_quirk,
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-rt5640.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC5642",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5640",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-rt5640.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "INTCCFFD",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5640",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-rt5640.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC5651",
+ .drv_name = "bytcr_rt5651",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5651",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-rt5651.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "DLGS7212",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcht_da7213",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-da7213.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "DLGS7213",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcht_da7213",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-da7213.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ /* some Baytrail platforms rely on RT5645, use CHT machine driver */
+ {
+ .id = "10EC5645",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-rt5645.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC5648",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-rt5645.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ /* use CHT driver to Baytrail Chromebooks */
+ {
+ .id = "193C9890",
+ .drv_name = "cht-bsw-max98090",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-byt.ri",
+ .sof_tplg_filename = "intel/reef-byt-max98090.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+ * This is always last in the table so that it is selected only when
+ * enabled explicitly and there is no codec-related information in SSDT
+ */
+ {
+ .id = "80860F28",
+ .drv_name = "bytcht_nocodec",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcht_nocodec",
+ },
+#endif
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_baytrail_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-cht-match.c b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
new file mode 100644
index 000000000000..b50a0d53846b
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-cht-match.c
@@ -0,0 +1,194 @@
+/*
+ * soc-apci-intel-cht-match.c - tables and support for CHT ACPI enumeration.
+ *
+ * Copyright (c) 2017, Intel Corporation.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dmi.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+static unsigned long cht_machine_id;
+
+#define CHT_SURFACE_MACH 1
+
+static int cht_surface_quirk_cb(const struct dmi_system_id *id)
+{
+ cht_machine_id = CHT_SURFACE_MACH;
+ return 1;
+}
+
+static const struct dmi_system_id cht_table[] = {
+ {
+ .callback = cht_surface_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
+ },
+ },
+ { }
+};
+
+static struct snd_soc_acpi_mach cht_surface_mach = {
+ .id = "10EC5640",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+};
+
+static struct snd_soc_acpi_mach *cht_quirk(void *arg)
+{
+ struct snd_soc_acpi_mach *mach = arg;
+
+ dmi_check_system(cht_table);
+
+ if (cht_machine_id == CHT_SURFACE_MACH)
+ return &cht_surface_mach;
+ else
+ return mach;
+}
+
+/* Cherryview-based platforms: CherryTrail and Braswell */
+struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[] = {
+ {
+ .id = "10EC5670",
+ .drv_name = "cht-bsw-rt5672",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5670.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC5672",
+ .drv_name = "cht-bsw-rt5672",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5670.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC5645",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC5650",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC3270",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5645.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "193C9890",
+ .drv_name = "cht-bsw-max98090",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-max98090.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "DLGS7212",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_da7213",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-da7213.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "DLGS7213",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_da7213",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-da7213.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "ESSX8316",
+ .drv_name = "bytcht_es8316",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_es8316",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-es8316.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ /* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
+ {
+ .id = "10EC5640",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcr_rt5640",
+ .machine_quirk = cht_quirk,
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5640.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ {
+ .id = "10EC3276",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcr_rt5640",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5640.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+ /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
+ {
+ .id = "10EC5651",
+ .drv_name = "bytcr_rt5651",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcr_rt5651",
+ .sof_fw_filename = "intel/reef-cht.ri",
+ .sof_tplg_filename = "intel/reef-cht-rt5651.tplg",
+ .asoc_plat_name = "sst-mfld-platform",
+ },
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+ * This is always last in the table so that it is selected only when
+ * enabled explicitly and there is no codec-related information in SSDT
+ */
+ {
+ .id = "808622A8",
+ .drv_name = "bytcht_nocodec",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_nocodec",
+ },
+#endif
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cherrytrail_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
new file mode 100644
index 000000000000..e0e8c8c27528
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-hsw-bdw-match.c
@@ -0,0 +1,64 @@
+/*
+ * soc-apci-intel-hsw-bdw-match.c - tables and support for ACPI enumeration.
+ *
+ * Copyright (c) 2017, Intel Corporation.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dmi.h>
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_haswell_machines[] = {
+ {
+ .id = "INT33CA",
+ .drv_name = "haswell-audio",
+ .fw_filename = "intel/IntcSST1.bin",
+ .sof_fw_filename = "intel/reef-hsw.ri",
+ .sof_tplg_filename = "intel/reef-hsw.tplg",
+ .asoc_plat_name = "haswell-pcm-audio",
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_haswell_machines);
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[] = {
+ {
+ .id = "INT343A",
+ .drv_name = "broadwell-audio",
+ .fw_filename = "intel/IntcSST2.bin",
+ .sof_fw_filename = "intel/reef-bdw.ri",
+ .sof_tplg_filename = "intel/reef-bdw-rt286.tplg",
+ .asoc_plat_name = "haswell-pcm-audio",
+ },
+ {
+ .id = "RT5677CE",
+ .drv_name = "bdw-rt5677",
+ .fw_filename = "intel/IntcSST2.bin",
+ .sof_fw_filename = "intel/reef-bdw.ri",
+ .sof_tplg_filename = "intel/reef-bdw-rt286.tplg",
+ .asoc_plat_name = "haswell-pcm-audio",
+ },
+ {
+ .id = "INT33CA",
+ .drv_name = "haswell-audio",
+ .fw_filename = "intel/IntcSST2.bin",
+ .sof_fw_filename = "intel/reef-bdw.ri",
+ .sof_tplg_filename = "intel/reef-bdw-rt5640.tplg",
+ .asoc_plat_name = "haswell-pcm-audio",
+ },
+ {}
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_broadwell_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
index 1285cc597b6b..cf6fbbd4e378 100644
--- a/sound/soc/intel/common/sst-acpi.c
+++ b/sound/soc/intel/common/sst-acpi.c
@@ -21,7 +21,8 @@
#include <linux/platform_device.h>
#include "sst-dsp.h"
-#include "sst-acpi.h"
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
#define SST_LPT_DSP_DMA_ADDR_OFFSET 0x0F0000
#define SST_WPT_DSP_DMA_ADDR_OFFSET 0x0FE000
@@ -30,7 +31,7 @@
/* Descriptor for setting up SST platform data */
struct sst_acpi_desc {
const char *drv_name;
- struct sst_acpi_mach *machines;
+ struct snd_soc_acpi_mach *machines;
/* Platform resource indexes. Must set to -1 if not used */
int resindex_lpe_base;
int resindex_pcicfg_base;
@@ -49,7 +50,7 @@ struct sst_acpi_priv {
struct platform_device *pdev_pcm;
struct sst_pdata sst_pdata;
struct sst_acpi_desc *desc;
- struct sst_acpi_mach *mach;
+ struct snd_soc_acpi_mach *mach;
};
static void sst_acpi_fw_cb(const struct firmware *fw, void *context)
@@ -59,7 +60,7 @@ static void sst_acpi_fw_cb(const struct firmware *fw, void *context)
struct sst_acpi_priv *sst_acpi = platform_get_drvdata(pdev);
struct sst_pdata *sst_pdata = &sst_acpi->sst_pdata;
struct sst_acpi_desc *desc = sst_acpi->desc;
- struct sst_acpi_mach *mach = sst_acpi->mach;
+ struct snd_soc_acpi_mach *mach = sst_acpi->mach;
sst_pdata->fw = fw;
if (!fw) {
@@ -85,7 +86,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sst_acpi_priv *sst_acpi;
struct sst_pdata *sst_pdata;
- struct sst_acpi_mach *mach;
+ struct snd_soc_acpi_mach *mach;
struct sst_acpi_desc *desc;
struct resource *mmio;
int ret = 0;
@@ -99,7 +100,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
return -ENODEV;
desc = (struct sst_acpi_desc *)id->driver_data;
- mach = sst_acpi_find_machine(desc->machines);
+ mach = snd_soc_acpi_find_machine(desc->machines);
if (mach == NULL) {
dev_err(dev, "No matching ASoC machine driver found\n");
return -ENODEV;
@@ -179,14 +180,9 @@ static int sst_acpi_remove(struct platform_device *pdev)
return 0;
}
-static struct sst_acpi_mach haswell_machines[] = {
- { "INT33CA", "haswell-audio", "intel/IntcSST1.bin", NULL, NULL, NULL },
- {}
-};
-
static struct sst_acpi_desc sst_acpi_haswell_desc = {
.drv_name = "haswell-pcm-audio",
- .machines = haswell_machines,
+ .machines = snd_soc_acpi_intel_haswell_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = 1,
.resindex_fw_base = -1,
@@ -197,15 +193,9 @@ static struct sst_acpi_desc sst_acpi_haswell_desc = {
.dma_size = SST_LPT_DSP_DMA_SIZE,
};
-static struct sst_acpi_mach broadwell_machines[] = {
- { "INT343A", "broadwell-audio", "intel/IntcSST2.bin", NULL, NULL, NULL },
- { "RT5677CE", "bdw-rt5677", "intel/IntcSST2.bin", NULL, NULL, NULL },
- {}
-};
-
static struct sst_acpi_desc sst_acpi_broadwell_desc = {
.drv_name = "haswell-pcm-audio",
- .machines = broadwell_machines,
+ .machines = snd_soc_acpi_intel_broadwell_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = 1,
.resindex_fw_base = -1,
@@ -217,15 +207,9 @@ static struct sst_acpi_desc sst_acpi_broadwell_desc = {
};
#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
-static struct sst_acpi_mach baytrail_machines[] = {
- { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
- { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
- {}
-};
-
static struct sst_acpi_desc sst_acpi_baytrail_desc = {
.drv_name = "baytrail-pcm-audio",
- .machines = baytrail_machines,
+ .machines = snd_soc_acpi_intel_baytrail_legacy_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = 1,
.resindex_fw_base = 2,
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h
deleted file mode 100644
index afe9b87b8bd5..000000000000
--- a/sound/soc/intel/common/sst-acpi.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/stddef.h>
-#include <linux/acpi.h>
-
-struct sst_acpi_package_context {
- char *name; /* package name */
- int length; /* number of elements */
- struct acpi_buffer *format;
- struct acpi_buffer *state;
- bool data_valid;
-};
-
-#if IS_ENABLED(CONFIG_ACPI)
-/* translation fron HID to I2C name, needed for DAI codec_name */
-const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
-bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
- struct sst_acpi_package_context *ctx);
-#else
-static inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
- return NULL;
-}
-static inline bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
- struct sst_acpi_package_context *ctx)
-{
- return false;
-}
-#endif
-
-/* acpi match */
-struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines);
-
-/* acpi check hid */
-bool sst_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
-
-/* Descriptor for SST ASoC machine driver */
-struct sst_acpi_mach {
- /* ACPI ID for the matching machine driver. Audio codec for instance */
- const u8 id[ACPI_ID_LEN];
- /* machine driver name */
- const char *drv_name;
- /* firmware file name */
- const char *fw_filename;
-
- /* board name */
- const char *board;
- struct sst_acpi_mach * (*machine_quirk)(void *arg);
- const void *quirk_data;
- void *pdata;
-};
-
-#define SST_ACPI_MAX_CODECS 3
-
-/**
- * struct sst_codecs: Structure to hold secondary codec information apart from
- * the matched one, this data will be passed to the quirk function to match
- * with the ACPI detected devices
- *
- * @num_codecs: number of secondary codecs used in the platform
- * @codecs: holds the codec IDs
- *
- */
-struct sst_codecs {
- int num_codecs;
- u8 codecs[SST_ACPI_MAX_CODECS][ACPI_ID_LEN];
-};
-
-/* check all codecs */
-struct sst_acpi_mach *sst_acpi_codec_list(void *arg);
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index a086c35f91bb..657afc02f1c4 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -19,6 +19,7 @@
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -274,7 +275,6 @@ int sst_dma_new(struct sst_dsp *sst)
struct sst_pdata *sst_pdata = sst->pdata;
struct sst_dma *dma;
struct resource mem;
- const char *dma_dev_name;
int ret = 0;
if (sst->pdata->resindex_dma_base == -1)
@@ -285,7 +285,6 @@ int sst_dma_new(struct sst_dsp *sst)
* is attached to the ADSP IP. */
switch (sst->pdata->dma_engine) {
case SST_DMA_TYPE_DW:
- dma_dev_name = "dw_dmac";
break;
default:
dev_err(sst->dev, "error: invalid DMA engine %d\n",
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 89f70133c8e4..61b5bfa79d13 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -613,8 +613,10 @@ skip_buf_size_calc:
}
#define DMA_CONTROL_ID 5
+#define DMA_I2S_BLOB_SIZE 21
-int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
+int skl_dsp_set_dma_control(struct skl_sst *ctx, u32 *caps,
+ u32 caps_size, u32 node_id)
{
struct skl_dma_control *dma_ctrl;
struct skl_ipc_large_config_msg msg = {0};
@@ -624,24 +626,27 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
/*
* if blob size zero, then return
*/
- if (mconfig->formats_config.caps_size == 0)
+ if (caps_size == 0)
return 0;
msg.large_param_id = DMA_CONTROL_ID;
- msg.param_data_size = sizeof(struct skl_dma_control) +
- mconfig->formats_config.caps_size;
+ msg.param_data_size = sizeof(struct skl_dma_control) + caps_size;
dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
if (dma_ctrl == NULL)
return -ENOMEM;
- dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
+ dma_ctrl->node_id = node_id;
- /* size in dwords */
- dma_ctrl->config_length = mconfig->formats_config.caps_size / 4;
+ /*
+ * NHLT blob may contain additional configs along with i2s blob.
+ * firmware expects only the i2s blob size as the config_length.
+ * So fix to i2s blob size.
+ * size in dwords.
+ */
+ dma_ctrl->config_length = DMA_I2S_BLOB_SIZE;
- memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
- mconfig->formats_config.caps_size);
+ memcpy(dma_ctrl->config_data, caps, caps_size);
err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
@@ -702,18 +707,11 @@ static void skl_set_updown_mixer_format(struct skl_sst *ctx,
struct skl_module *module = mconfig->module;
struct skl_module_iface *iface = &module->formats[mconfig->fmt_idx];
struct skl_module_fmt *fmt = &iface->outputs[0].fmt;
- int i = 0;
skl_set_base_module_format(ctx, mconfig,
(struct skl_base_cfg *)mixer_mconfig);
mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
-
- /* Select F/W default coefficient */
- mixer_mconfig->coeff_sel = 0x0;
-
- /* User coeff, don't care since we are selecting F/W defaults */
- for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
- mixer_mconfig->coeff[i] = 0xDEADBEEF;
+ mixer_mconfig->ch_map = fmt->ch_map;
}
/*
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index e7d766d56c8e..d14c50a60289 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -20,6 +20,8 @@
#include <linux/pci.h>
#include "skl.h"
+#define NHLT_ACPI_HEADER_SIG "NHLT"
+
/* Unique identification for getting NHLT blobs */
static guid_t osc_guid =
GUID_INIT(0xA69F886E, 0x6CEB, 0x4594,
@@ -45,6 +47,13 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
MEMREMAP_WB);
ACPI_FREE(obj);
+ if (nhlt_table && (strncmp(nhlt_table->header.signature,
+ NHLT_ACPI_HEADER_SIG,
+ strlen(NHLT_ACPI_HEADER_SIG)) != 0)) {
+ memunmap(nhlt_table);
+ dev_err(dev, "NHLT ACPI header signature incorrect\n");
+ return NULL;
+ }
return nhlt_table;
}
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 2b1e513b1680..1dd97479e0c0 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -355,7 +355,8 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
}
mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
- skl_tplg_d0i3_put(skl, mconfig->d0i3_caps);
+ if (mconfig)
+ skl_tplg_d0i3_put(skl, mconfig->d0i3_caps);
kfree(dma_params);
}
@@ -652,7 +653,7 @@ static const struct snd_soc_dai_ops skl_link_dai_ops = {
.trigger = skl_link_pcm_trigger,
};
-static struct snd_soc_dai_driver skl_platform_dai[] = {
+static struct snd_soc_dai_driver skl_fe_dai[] = {
{
.name = "System Pin",
.ops = &skl_pcm_dai_ops,
@@ -796,8 +797,10 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
.sig_bits = 32,
},
},
+};
/* BE CPU Dais */
+static struct snd_soc_dai_driver skl_platform_dai[] = {
{
.name = "SSP0 Pin",
.ops = &skl_be_ssp_dai_ops,
@@ -975,6 +978,14 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
},
};
+int skl_dai_load(struct snd_soc_component *cmp,
+ struct snd_soc_dai_driver *pcm_dai)
+{
+ pcm_dai->ops = &skl_pcm_dai_ops;
+
+ return 0;
+}
+
static int skl_platform_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -1362,6 +1373,8 @@ int skl_platform_register(struct device *dev)
int ret;
struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
struct skl *skl = ebus_to_skl(ebus);
+ struct snd_soc_dai_driver *dais;
+ int num_dais = ARRAY_SIZE(skl_platform_dai);
INIT_LIST_HEAD(&skl->ppl_list);
INIT_LIST_HEAD(&skl->bind_list);
@@ -1371,14 +1384,38 @@ int skl_platform_register(struct device *dev)
dev_err(dev, "soc platform registration failed %d\n", ret);
return ret;
}
+
+ skl->dais = kmemdup(skl_platform_dai, sizeof(skl_platform_dai),
+ GFP_KERNEL);
+ if (!skl->dais) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (!skl->use_tplg_pcm) {
+ dais = krealloc(skl->dais, sizeof(skl_fe_dai) +
+ sizeof(skl_platform_dai), GFP_KERNEL);
+ if (!dais) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ skl->dais = dais;
+ memcpy(&skl->dais[ARRAY_SIZE(skl_platform_dai)], skl_fe_dai,
+ sizeof(skl_fe_dai));
+ num_dais += ARRAY_SIZE(skl_fe_dai);
+ }
+
ret = snd_soc_register_component(dev, &skl_component,
- skl_platform_dai,
- ARRAY_SIZE(skl_platform_dai));
+ skl->dais, num_dais);
if (ret) {
dev_err(dev, "soc component registration failed %d\n", ret);
- snd_soc_unregister_platform(dev);
+ goto err;
}
+ return 0;
+err:
+ snd_soc_unregister_platform(dev);
return ret;
}
@@ -1398,5 +1435,7 @@ int skl_platform_unregister(struct device *dev)
snd_soc_unregister_component(dev);
snd_soc_unregister_platform(dev);
+ kfree(skl->dais);
+
return 0;
}
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index 369ef7ce981c..8ff89280d9fd 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -251,6 +251,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
struct uuid_module *module;
struct firmware stripped_fw;
unsigned int safe_file;
+ int ret = 0;
/* Get the FW pointer to derive ADSP header */
stripped_fw.data = fw->data;
@@ -299,8 +300,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
for (i = 0; i < num_entry; i++, mod_entry++) {
module = kzalloc(sizeof(*module), GFP_KERNEL);
- if (!module)
- return -ENOMEM;
+ if (!module) {
+ ret = -ENOMEM;
+ goto free_uuid_list;
+ }
uuid_bin = (uuid_le *)mod_entry->uuid.id;
memcpy(&module->uuid, uuid_bin, sizeof(module->uuid));
@@ -311,8 +314,8 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
size = sizeof(int) * mod_entry->instance_max_count;
module->instance_id = devm_kzalloc(ctx->dev, size, GFP_KERNEL);
if (!module->instance_id) {
- kfree(module);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_uuid_list;
}
list_add_tail(&module->list, &skl->uuid_list);
@@ -323,6 +326,10 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
}
return 0;
+
+free_uuid_list:
+ skl_freeup_uuid_list(skl);
+ return ret;
}
void skl_freeup_uuid_list(struct skl_sst *ctx)
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 22f768ca3c73..a072bcf209d2 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2036,21 +2036,45 @@ static int skl_tplg_add_pipe(struct device *dev,
return 0;
}
-static int skl_tplg_fill_pin(struct device *dev, u32 tkn,
+static int skl_tplg_get_uuid(struct device *dev, u8 *guid,
+ struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
+{
+ if (uuid_tkn->token == SKL_TKN_UUID) {
+ memcpy(guid, &uuid_tkn->uuid, 16);
+ return 0;
+ }
+
+ dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token);
+
+ return -EINVAL;
+}
+
+static int skl_tplg_fill_pin(struct device *dev,
+ struct snd_soc_tplg_vendor_value_elem *tkn_elem,
struct skl_module_pin *m_pin,
- int pin_index, u32 value)
+ int pin_index)
{
- switch (tkn) {
+ int ret;
+
+ switch (tkn_elem->token) {
case SKL_TKN_U32_PIN_MOD_ID:
- m_pin[pin_index].id.module_id = value;
+ m_pin[pin_index].id.module_id = tkn_elem->value;
break;
case SKL_TKN_U32_PIN_INST_ID:
- m_pin[pin_index].id.instance_id = value;
+ m_pin[pin_index].id.instance_id = tkn_elem->value;
+ break;
+
+ case SKL_TKN_UUID:
+ ret = skl_tplg_get_uuid(dev, m_pin[pin_index].id.mod_uuid.b,
+ (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem);
+ if (ret < 0)
+ return ret;
+
break;
default:
- dev_err(dev, "%d Not a pin token\n", value);
+ dev_err(dev, "%d Not a pin token\n", tkn_elem->token);
return -EINVAL;
}
@@ -2083,9 +2107,7 @@ static int skl_tplg_fill_pins_info(struct device *dev,
return -EINVAL;
}
- ret = skl_tplg_fill_pin(dev, tkn_elem->token,
- m_pin, pin_count, tkn_elem->value);
-
+ ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count);
if (ret < 0)
return ret;
@@ -2170,19 +2192,6 @@ static int skl_tplg_widget_fill_fmt(struct device *dev,
return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val);
}
-static int skl_tplg_get_uuid(struct device *dev, struct skl_module_cfg *mconfig,
- struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
-{
- if (uuid_tkn->token == SKL_TKN_UUID)
- memcpy(&mconfig->guid, &uuid_tkn->uuid, 16);
- else {
- dev_err(dev, "Not an UUID token tkn %d\n", uuid_tkn->token);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void skl_tplg_fill_pin_dynamic_val(
struct skl_module_pin *mpin, u32 pin_count, u32 value)
{
@@ -2382,7 +2391,7 @@ static int skl_tplg_get_token(struct device *dev,
case SKL_TKN_U32_MAX_MCPS:
case SKL_TKN_U32_OBS:
case SKL_TKN_U32_IBS:
- ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, dir, pin_index);
+ ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir);
if (ret < 0)
return ret;
@@ -2488,6 +2497,7 @@ static int skl_tplg_get_token(struct device *dev,
case SKL_TKN_U32_PIN_MOD_ID:
case SKL_TKN_U32_PIN_INST_ID:
+ case SKL_TKN_UUID:
ret = skl_tplg_fill_pins_info(dev,
mconfig, tkn_elem, dir,
pin_index);
@@ -2550,6 +2560,7 @@ static int skl_tplg_get_tokens(struct device *dev,
struct snd_soc_tplg_vendor_value_elem *tkn_elem;
int tkn_count = 0, ret;
int off = 0, tuple_size = 0;
+ bool is_module_guid = true;
if (block_size <= 0)
return -EINVAL;
@@ -2565,7 +2576,15 @@ static int skl_tplg_get_tokens(struct device *dev,
continue;
case SND_SOC_TPLG_TUPLE_TYPE_UUID:
- ret = skl_tplg_get_uuid(dev, mconfig, array->uuid);
+ if (is_module_guid) {
+ ret = skl_tplg_get_uuid(dev, mconfig->guid,
+ array->uuid);
+ is_module_guid = false;
+ } else {
+ ret = skl_tplg_get_token(dev, array->value, skl,
+ mconfig);
+ }
+
if (ret < 0)
return ret;
@@ -3331,6 +3350,7 @@ static struct snd_soc_tplg_ops skl_tplg_ops = {
.io_ops = skl_tplg_kcontrol_ops,
.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
.manifest = skl_manifest_load,
+ .dai_load = skl_dai_load,
};
/*
@@ -3404,7 +3424,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
ret = request_firmware(&fw, skl->tplg_name, bus->dev);
if (ret < 0) {
- dev_err(bus->dev, "tplg fw %s load failed with %d\n",
+ dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin",
skl->tplg_name, ret);
ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
if (ret < 0) {
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index 2717db92036b..b6496513fe55 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -34,7 +34,7 @@
#define MAX_FIXED_DMIC_PARAMS_SIZE 727
/* Maximum number of coefficients up down mixer module */
-#define UP_DOWN_MIXER_MAX_COEFF 6
+#define UP_DOWN_MIXER_MAX_COEFF 8
#define MODULE_MAX_IN_PINS 8
#define MODULE_MAX_OUT_PINS 8
@@ -161,6 +161,7 @@ struct skl_up_down_mixer_cfg {
u32 coeff_sel;
/* Pass the user coeff in this array */
s32 coeff[UP_DOWN_MIXER_MAX_COEFF];
+ u32 ch_map;
} __packed;
struct skl_algo_cfg {
@@ -455,8 +456,8 @@ static inline struct skl *get_skl_ctx(struct device *dev)
int skl_tplg_be_update_params(struct snd_soc_dai *dai,
struct skl_pipe_params *params);
-int skl_dsp_set_dma_control(struct skl_sst *ctx,
- struct skl_module_cfg *mconfig);
+int skl_dsp_set_dma_control(struct skl_sst *ctx, u32 *caps,
+ u32 caps_size, u32 node_id);
void skl_tplg_set_be_dmic_config(struct snd_soc_dai *dai,
struct skl_pipe_params *params, int stream);
int skl_tplg_init(struct snd_soc_platform *platform,
@@ -501,4 +502,7 @@ int skl_pcm_host_dma_prepare(struct device *dev,
struct skl_pipe_params *params);
int skl_pcm_link_dma_prepare(struct device *dev,
struct skl_pipe_params *params);
+
+int skl_dai_load(struct snd_soc_component *cmp,
+ struct snd_soc_dai_driver *pcm_dai);
#endif
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index f94b484abb99..31d8634e8aa1 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -28,7 +28,7 @@
#include <linux/firmware.h>
#include <linux/delay.h>
#include <sound/pcm.h>
-#include "../common/sst-acpi.h"
+#include <sound/soc-acpi.h>
#include <sound/hda_register.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
@@ -439,10 +439,10 @@ static int skl_machine_device_register(struct skl *skl, void *driver_data)
{
struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
struct platform_device *pdev;
- struct sst_acpi_mach *mach = driver_data;
+ struct snd_soc_acpi_mach *mach = driver_data;
int ret;
- mach = sst_acpi_find_machine(mach);
+ mach = snd_soc_acpi_find_machine(mach);
if (mach == NULL) {
dev_err(bus->dev, "No matching machine driver found\n");
return -ENODEV;
@@ -462,8 +462,11 @@ static int skl_machine_device_register(struct skl *skl, void *driver_data)
return -EIO;
}
- if (mach->pdata)
+ if (mach->pdata) {
+ skl->use_tplg_pcm =
+ ((struct skl_machine_pdata *)mach->pdata)->use_tplg_pcm;
dev_set_drvdata(&pdev->dev, mach->pdata);
+ }
skl->i2s_dev = pdev;
@@ -875,33 +878,36 @@ static void skl_remove(struct pci_dev *pci)
dev_set_drvdata(&pci->dev, NULL);
}
-static struct sst_codecs skl_codecs = {
+static struct snd_soc_acpi_codecs skl_codecs = {
.num_codecs = 1,
.codecs = {"10508825"}
};
-static struct sst_codecs kbl_codecs = {
+static struct snd_soc_acpi_codecs kbl_codecs = {
.num_codecs = 1,
.codecs = {"10508825"}
};
-static struct sst_codecs bxt_codecs = {
+static struct snd_soc_acpi_codecs bxt_codecs = {
.num_codecs = 1,
.codecs = {"MX98357A"}
};
-static struct sst_codecs kbl_poppy_codecs = {
+static struct snd_soc_acpi_codecs kbl_poppy_codecs = {
.num_codecs = 1,
.codecs = {"10EC5663"}
};
-static struct sst_codecs kbl_5663_5514_codecs = {
+static struct snd_soc_acpi_codecs kbl_5663_5514_codecs = {
.num_codecs = 2,
.codecs = {"10EC5663", "10EC5514"}
};
+static struct skl_machine_pdata cnl_pdata = {
+ .use_tplg_pcm = true,
+};
-static struct sst_acpi_mach sst_skl_devdata[] = {
+static struct snd_soc_acpi_mach sst_skl_devdata[] = {
{
.id = "INT343A",
.drv_name = "skl_alc286s_i2s",
@@ -911,7 +917,7 @@ static struct sst_acpi_mach sst_skl_devdata[] = {
.id = "INT343B",
.drv_name = "skl_n88l25_s4567",
.fw_filename = "intel/dsp_fw_release.bin",
- .machine_quirk = sst_acpi_codec_list,
+ .machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &skl_codecs,
.pdata = &skl_dmic_data
},
@@ -919,14 +925,14 @@ static struct sst_acpi_mach sst_skl_devdata[] = {
.id = "MX98357A",
.drv_name = "skl_n88l25_m98357a",
.fw_filename = "intel/dsp_fw_release.bin",
- .machine_quirk = sst_acpi_codec_list,
+ .machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &skl_codecs,
.pdata = &skl_dmic_data
},
{}
};
-static struct sst_acpi_mach sst_bxtp_devdata[] = {
+static struct snd_soc_acpi_mach sst_bxtp_devdata[] = {
{
.id = "INT343A",
.drv_name = "bxt_alc298s_i2s",
@@ -936,13 +942,13 @@ static struct sst_acpi_mach sst_bxtp_devdata[] = {
.id = "DLGS7219",
.drv_name = "bxt_da7219_max98357a_i2s",
.fw_filename = "intel/dsp_fw_bxtn.bin",
- .machine_quirk = sst_acpi_codec_list,
+ .machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &bxt_codecs,
},
{}
};
-static struct sst_acpi_mach sst_kbl_devdata[] = {
+static struct snd_soc_acpi_mach sst_kbl_devdata[] = {
{
.id = "INT343A",
.drv_name = "kbl_alc286s_i2s",
@@ -952,7 +958,7 @@ static struct sst_acpi_mach sst_kbl_devdata[] = {
.id = "INT343B",
.drv_name = "kbl_n88l25_s4567",
.fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = sst_acpi_codec_list,
+ .machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &kbl_codecs,
.pdata = &skl_dmic_data
},
@@ -960,7 +966,7 @@ static struct sst_acpi_mach sst_kbl_devdata[] = {
.id = "MX98357A",
.drv_name = "kbl_n88l25_m98357a",
.fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = sst_acpi_codec_list,
+ .machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &kbl_codecs,
.pdata = &skl_dmic_data
},
@@ -968,7 +974,7 @@ static struct sst_acpi_mach sst_kbl_devdata[] = {
.id = "MX98927",
.drv_name = "kbl_r5514_5663_max",
.fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = sst_acpi_codec_list,
+ .machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &kbl_5663_5514_codecs,
.pdata = &skl_dmic_data
},
@@ -976,7 +982,7 @@ static struct sst_acpi_mach sst_kbl_devdata[] = {
.id = "MX98927",
.drv_name = "kbl_rt5663_m98927",
.fw_filename = "intel/dsp_fw_kbl.bin",
- .machine_quirk = sst_acpi_codec_list,
+ .machine_quirk = snd_soc_acpi_codec_list,
.quirk_data = &kbl_poppy_codecs,
.pdata = &skl_dmic_data
},
@@ -989,7 +995,7 @@ static struct sst_acpi_mach sst_kbl_devdata[] = {
{}
};
-static struct sst_acpi_mach sst_glk_devdata[] = {
+static struct snd_soc_acpi_mach sst_glk_devdata[] = {
{
.id = "INT343A",
.drv_name = "glk_alc298s_i2s",
@@ -998,12 +1004,14 @@ static struct sst_acpi_mach sst_glk_devdata[] = {
{}
};
-static const struct sst_acpi_mach sst_cnl_devdata[] = {
+static const struct snd_soc_acpi_mach sst_cnl_devdata[] = {
{
.id = "INT34C2",
.drv_name = "cnl_rt274",
.fw_filename = "intel/dsp_fw_cnl.bin",
+ .pdata = &cnl_pdata,
},
+ {}
};
/* PCI IDs */
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 8d9d6899f761..e00cde8200dd 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -53,6 +53,7 @@ struct skl {
struct platform_device *dmic_dev;
struct platform_device *i2s_dev;
struct snd_soc_platform *platform;
+ struct snd_soc_dai_driver *dais;
struct nhlt_acpi_table *nhlt; /* nhlt ptr */
struct skl_sst *skl_sst; /* sst skl ctx */
@@ -73,6 +74,7 @@ struct skl {
struct skl_debug *debugfs;
u8 nr_modules;
struct skl_module **modules;
+ bool use_tplg_pcm;
};
#define skl_to_ebus(s) (&(s)->ebus)
@@ -85,9 +87,9 @@ struct skl_dma_params {
u8 stream_tag;
};
-/* to pass dmic data */
struct skl_machine_pdata {
u32 dmic_num;
+ bool use_tplg_pcm; /* use dais and dai links from topology */
};
struct skl_dsp_ops {
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index cf23af159acf..505b0ff03c3b 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -318,7 +318,7 @@ static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
}
}
-struct snd_soc_platform_driver kirkwood_soc_platform = {
+const struct snd_soc_platform_driver kirkwood_soc_platform = {
.ops = &kirkwood_dma_ops,
.pcm_new = kirkwood_dma_new,
.pcm_free = kirkwood_dma_free_dma_buffers,
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index 90e32a781424..783cb1a4f30e 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -143,6 +143,6 @@ struct kirkwood_dma_data {
int burst;
};
-extern struct snd_soc_platform_driver kirkwood_soc_platform;
+extern const struct snd_soc_platform_driver kirkwood_soc_platform;
#endif
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 6c49f3d6fd96..d40219678700 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -260,7 +260,7 @@ static bool cx81801_cmd_pending;
static bool ams_delta_muted;
static DEFINE_SPINLOCK(ams_delta_lock);
-static void cx81801_timeout(unsigned long data)
+static void cx81801_timeout(struct timer_list *unused)
{
int muted;
@@ -349,7 +349,7 @@ static void cx81801_receive(struct tty_struct *tty,
/* First modem response, complete setup procedure */
/* Initialize timer used for config pulse generation */
- setup_timer(&cx81801_timer, cx81801_timeout, 0);
+ timer_setup(&cx81801_timer, cx81801_timeout, 0);
v253_ops.receive_buf(tty, cp, fp, count);
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index 3e9cc4842a1d..8eeac7cab1c1 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -362,6 +362,9 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
card->name = devm_kasprintf(dev, GFP_KERNEL,
"HDMI %s", dev_name(ad->dssdev));
+ if (!card->name)
+ return -ENOMEM;
+
card->owner = THIS_MODULE;
card->dai_link =
devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index f49bf02e5ec2..803818aabee9 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -29,21 +29,41 @@
static void pxa2xx_ac97_warm_reset(struct snd_ac97 *ac97)
{
- pxa2xx_ac97_try_warm_reset(ac97);
+ pxa2xx_ac97_try_warm_reset();
- pxa2xx_ac97_finish_reset(ac97);
+ pxa2xx_ac97_finish_reset();
}
static void pxa2xx_ac97_cold_reset(struct snd_ac97 *ac97)
{
- pxa2xx_ac97_try_cold_reset(ac97);
+ pxa2xx_ac97_try_cold_reset();
- pxa2xx_ac97_finish_reset(ac97);
+ pxa2xx_ac97_finish_reset();
+}
+
+static unsigned short pxa2xx_ac97_legacy_read(struct snd_ac97 *ac97,
+ unsigned short reg)
+{
+ int ret;
+
+ ret = pxa2xx_ac97_read(ac97->num, reg);
+ if (ret < 0)
+ return 0;
+ else
+ return (unsigned short)(ret & 0xffff);
+}
+
+static void pxa2xx_ac97_legacy_write(struct snd_ac97 *ac97,
+ unsigned short reg, unsigned short val)
+{
+ int ret;
+
+ ret = pxa2xx_ac97_write(ac97->num, reg, val);
}
static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
- .read = pxa2xx_ac97_read,
- .write = pxa2xx_ac97_write,
+ .read = pxa2xx_ac97_legacy_read,
+ .write = pxa2xx_ac97_legacy_write,
.warm_reset = pxa2xx_ac97_warm_reset,
.reset = pxa2xx_ac97_cold_reset,
};
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index e1945e1772cd..caf71aab8196 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -74,7 +74,6 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
data->i2s_port = cpu_dai->driver->id;
runtime->private_data = data;
- dma_ch = 0;
if (v->alloc_dma_channel)
dma_ch = v->alloc_dma_channel(drvdata, dir);
else
@@ -122,7 +121,6 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
struct lpass_pcm_data *data;
data = runtime->private_data;
- v = drvdata->variant;
drvdata->substream[data->dma_ch] = NULL;
if (v->free_dma_channel)
v->free_dma_channel(drvdata, data->dma_ch);
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index 0513fe480353..d64fbbd50544 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -23,6 +23,7 @@
#include <linux/of_gpio.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
+#include <linux/i2c.h>
#include <linux/input.h>
#include <sound/core.h>
#include <sound/jack.h>
@@ -47,18 +48,7 @@ static const struct snd_soc_dapm_widget rockchip_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Speakers", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route rockchip_dapm_routes[] = {
- /* Input Lines */
- {"MIC", NULL, "Headset Mic"},
- {"DMIC1L", NULL, "Int Mic"},
- {"DMIC1R", NULL, "Int Mic"},
-
- /* Output Lines */
- {"Headphones", NULL, "HPL"},
- {"Headphones", NULL, "HPR"},
- {"Speakers", NULL, "Speaker"},
+ SND_SOC_DAPM_LINE("HDMI", NULL),
};
static const struct snd_kcontrol_new rockchip_controls[] = {
@@ -66,6 +56,7 @@ static const struct snd_kcontrol_new rockchip_controls[] = {
SOC_DAPM_PIN_SWITCH("Speakers"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("HDMI"),
};
static int rockchip_sound_max98357a_hw_params(struct snd_pcm_substream *substream,
@@ -314,8 +305,6 @@ static struct snd_soc_card rockchip_sound_card = {
.owner = THIS_MODULE,
.dapm_widgets = rockchip_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(rockchip_dapm_widgets),
- .dapm_routes = rockchip_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(rockchip_dapm_routes),
.controls = rockchip_controls,
.num_controls = ARRAY_SIZE(rockchip_controls),
};
@@ -329,15 +318,6 @@ enum {
DAILINK_RT5514_DSP,
};
-static const char * const dailink_compat[] = {
- [DAILINK_CDNDP] = "rockchip,rk3399-cdn-dp",
- [DAILINK_DA7219] = "dlg,da7219",
- [DAILINK_DMIC] = "dmic-codec",
- [DAILINK_MAX98357A] = "maxim,max98357a",
- [DAILINK_RT5514] = "realtek,rt5514-i2c",
- [DAILINK_RT5514_DSP] = "realtek,rt5514-spi",
-};
-
static const struct snd_soc_dai_link rockchip_dais[] = {
[DAILINK_CDNDP] = {
.name = "DP",
@@ -391,13 +371,117 @@ static const struct snd_soc_dai_link rockchip_dais[] = {
},
};
+static const struct snd_soc_dapm_route rockchip_sound_cdndp_routes[] = {
+ /* Output */
+ {"HDMI", NULL, "TX"},
+};
+
+static const struct snd_soc_dapm_route rockchip_sound_da7219_routes[] = {
+ /* Output */
+ {"Headphones", NULL, "HPL"},
+ {"Headphones", NULL, "HPR"},
+
+ /* Input */
+ {"MIC", NULL, "Headset Mic"},
+};
+
+static const struct snd_soc_dapm_route rockchip_sound_dmic_routes[] = {
+ /* Input */
+ {"DMic", NULL, "Int Mic"},
+};
+
+static const struct snd_soc_dapm_route rockchip_sound_max98357a_routes[] = {
+ /* Output */
+ {"Speakers", NULL, "Speaker"},
+};
+
+static const struct snd_soc_dapm_route rockchip_sound_rt5514_routes[] = {
+ /* Input */
+ {"DMIC1L", NULL, "Int Mic"},
+ {"DMIC1R", NULL, "Int Mic"},
+};
+
+struct rockchip_sound_route {
+ const struct snd_soc_dapm_route *routes;
+ int num_routes;
+};
+
+static const struct rockchip_sound_route rockchip_routes[] = {
+ [DAILINK_CDNDP] = {
+ .routes = rockchip_sound_cdndp_routes,
+ .num_routes = ARRAY_SIZE(rockchip_sound_cdndp_routes),
+ },
+ [DAILINK_DA7219] = {
+ .routes = rockchip_sound_da7219_routes,
+ .num_routes = ARRAY_SIZE(rockchip_sound_da7219_routes),
+ },
+ [DAILINK_DMIC] = {
+ .routes = rockchip_sound_dmic_routes,
+ .num_routes = ARRAY_SIZE(rockchip_sound_dmic_routes),
+ },
+ [DAILINK_MAX98357A] = {
+ .routes = rockchip_sound_max98357a_routes,
+ .num_routes = ARRAY_SIZE(rockchip_sound_max98357a_routes),
+ },
+ [DAILINK_RT5514] = {
+ .routes = rockchip_sound_rt5514_routes,
+ .num_routes = ARRAY_SIZE(rockchip_sound_rt5514_routes),
+ },
+ [DAILINK_RT5514_DSP] = {},
+};
+
+struct dailink_match_data {
+ const char *compatible;
+ struct bus_type *bus_type;
+};
+
+static const struct dailink_match_data dailink_match[] = {
+ [DAILINK_CDNDP] = {
+ .compatible = "rockchip,rk3399-cdn-dp",
+ },
+ [DAILINK_DA7219] = {
+ .compatible = "dlg,da7219",
+ },
+ [DAILINK_DMIC] = {
+ .compatible = "dmic-codec",
+ },
+ [DAILINK_MAX98357A] = {
+ .compatible = "maxim,max98357a",
+ },
+ [DAILINK_RT5514] = {
+ .compatible = "realtek,rt5514",
+ .bus_type = &i2c_bus_type,
+ },
+ [DAILINK_RT5514_DSP] = {
+ .compatible = "realtek,rt5514",
+ .bus_type = &spi_bus_type,
+ },
+};
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
static int rockchip_sound_codec_node_match(struct device_node *np_codec)
{
+ struct device *dev;
int i;
- for (i = 0; i < ARRAY_SIZE(dailink_compat); i++) {
- if (of_device_is_compatible(np_codec, dailink_compat[i]))
- return i;
+ for (i = 0; i < ARRAY_SIZE(dailink_match); i++) {
+ if (!of_device_is_compatible(np_codec,
+ dailink_match[i].compatible))
+ continue;
+
+ if (dailink_match[i].bus_type) {
+ dev = bus_find_device(dailink_match[i].bus_type, NULL,
+ np_codec, of_dev_node_match);
+ if (!dev)
+ continue;
+ put_device(dev);
+ }
+
+ return i;
}
return -1;
}
@@ -408,16 +492,28 @@ static int rockchip_sound_of_parse_dais(struct device *dev,
struct device_node *np_cpu, *np_cpu0, *np_cpu1;
struct device_node *np_codec;
struct snd_soc_dai_link *dai;
+ struct snd_soc_dapm_route *routes;
int i, index;
+ int num_routes;
card->dai_link = devm_kzalloc(dev, sizeof(rockchip_dais),
GFP_KERNEL);
if (!card->dai_link)
return -ENOMEM;
+ num_routes = 0;
+ for (i = 0; i < ARRAY_SIZE(rockchip_routes); i++)
+ num_routes += rockchip_routes[i].num_routes;
+ routes = devm_kzalloc(dev, num_routes * sizeof(*routes),
+ GFP_KERNEL);
+ if (!routes)
+ return -ENOMEM;
+ card->dapm_routes = routes;
+
np_cpu0 = of_parse_phandle(dev->of_node, "rockchip,cpu", 0);
np_cpu1 = of_parse_phandle(dev->of_node, "rockchip,cpu", 1);
+ card->num_dapm_routes = 0;
card->num_links = 0;
for (i = 0; i < ARRAY_SIZE(rockchip_dais); i++) {
np_codec = of_parse_phandle(dev->of_node,
@@ -445,6 +541,17 @@ static int rockchip_sound_of_parse_dais(struct device *dev,
dai->codec_of_node = np_codec;
dai->platform_of_node = np_cpu;
dai->cpu_of_node = np_cpu;
+
+ if (card->num_dapm_routes + rockchip_routes[index].num_routes >
+ num_routes) {
+ dev_err(dev, "Too many routes\n");
+ return -EINVAL;
+ }
+
+ memcpy(routes + card->num_dapm_routes,
+ rockchip_routes[index].routes,
+ rockchip_routes[index].num_routes * sizeof(*routes));
+ card->num_dapm_routes += rockchip_routes[index].num_routes;
}
return 0;
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index b6590467fd14..908211e1d6fc 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -692,7 +692,6 @@ static int rockchip_i2s_remove(struct platform_device *pdev)
if (!pm_runtime_status_suspended(&pdev->dev))
i2s_runtime_suspend(&pdev->dev);
- clk_disable_unprepare(i2s->mclk);
clk_disable_unprepare(i2s->hclk);
return 0;
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 10a4da06c0a1..233f1c9a4b6c 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -552,8 +552,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
}
ret = clk_prepare_enable(i2s->op_clk);
- if (ret)
+ if (ret) {
+ clk_put(i2s->op_clk);
+ i2s->op_clk = NULL;
goto err;
+ }
i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
/* Over-ride the other's */
@@ -1096,6 +1099,7 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev,
i2s->pdev = pdev;
i2s->pri_dai = NULL;
i2s->sec_dai = NULL;
+ i2s->i2s_dai_drv.id = 1;
i2s->i2s_dai_drv.symmetric_rates = 1;
i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe;
i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove;
@@ -1108,10 +1112,13 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev,
i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS;
if (!sec) {
+ i2s->i2s_dai_drv.name = SAMSUNG_I2S_DAI;
i2s->i2s_dai_drv.capture.channels_min = 1;
i2s->i2s_dai_drv.capture.channels_max = 2;
i2s->i2s_dai_drv.capture.rates = i2s_dai_data->pcm_rates;
i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
+ } else {
+ i2s->i2s_dai_drv.name = SAMSUNG_I2S_DAI_SEC;
}
return i2s;
}
@@ -1285,6 +1292,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
}
}
}
+ quirks &= ~(QUIRK_SEC_DAI | QUIRK_SUPPORTS_IDMA);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pri_dai->addr = devm_ioremap_resource(&pdev->dev, res);
diff --git a/sound/soc/samsung/i2s.h b/sound/soc/samsung/i2s.h
index 21ff24e930db..79781de2f247 100644
--- a/sound/soc/samsung/i2s.h
+++ b/sound/soc/samsung/i2s.h
@@ -13,6 +13,9 @@
#ifndef __SND_SOC_SAMSUNG_I2S_H
#define __SND_SOC_SAMSUNG_I2S_H
+#define SAMSUNG_I2S_DAI "samsung-i2s"
+#define SAMSUNG_I2S_DAI_SEC "samsung-i2s-sec"
+
#define SAMSUNG_I2S_DIV_BCLK 1
#define SAMSUNG_I2S_RCLKSRC_0 0
diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
index 68698f3d72f9..a55d18703fe7 100644
--- a/sound/soc/samsung/tm2_wm5110.c
+++ b/sound/soc/samsung/tm2_wm5110.c
@@ -383,6 +383,7 @@ static struct snd_soc_dai_link tm2_dai_links[] = {
{
.name = "WM5110 AIF1",
.stream_name = "HiFi Primary",
+ .cpu_dai_name = SAMSUNG_I2S_DAI,
.codec_dai_name = "wm5110-aif1",
.ops = &tm2_aif1_ops,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
@@ -390,6 +391,7 @@ static struct snd_soc_dai_link tm2_dai_links[] = {
}, {
.name = "WM5110 Voice",
.stream_name = "Voice call",
+ .cpu_dai_name = SAMSUNG_I2S_DAI,
.codec_dai_name = "wm5110-aif2",
.ops = &tm2_aif2_ops,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
@@ -398,6 +400,7 @@ static struct snd_soc_dai_link tm2_dai_links[] = {
}, {
.name = "WM5110 BT",
.stream_name = "Bluetooth",
+ .cpu_dai_name = SAMSUNG_I2S_DAI,
.codec_dai_name = "wm5110-aif3",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
@@ -436,8 +439,7 @@ static int tm2_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(card, priv);
card->dev = dev;
- priv->gpio_mic_bias = devm_gpiod_get(dev, "mic-bias",
- GPIOF_OUT_INIT_LOW);
+ priv->gpio_mic_bias = devm_gpiod_get(dev, "mic-bias", GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpio_mic_bias)) {
dev_err(dev, "Failed to get mic bias gpio\n");
return PTR_ERR(priv->gpio_mic_bias);
@@ -477,7 +479,6 @@ static int tm2_probe(struct platform_device *pdev)
}
for (i = 0; i < card->num_links; i++) {
- card->dai_link[i].cpu_dai_name = NULL;
card->dai_link[i].cpu_name = NULL;
card->dai_link[i].platform_name = NULL;
card->dai_link[i].codec_of_node = codec_dai_node;
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 6d3c7706d93f..c3aaf4788557 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1932,14 +1932,9 @@ static int fsi_probe(struct platform_device *pdev)
core = NULL;
if (np) {
- const struct of_device_id *of_id;
-
- of_id = of_match_device(fsi_of_match, &pdev->dev);
- if (of_id) {
- core = of_id->data;
- fsi_of_parse("fsia", np, &info.port_a, &pdev->dev);
- fsi_of_parse("fsib", np, &info.port_b, &pdev->dev);
- }
+ core = of_device_get_match_data(&pdev->dev);
+ fsi_of_parse("fsia", np, &info.port_a, &pdev->dev);
+ fsi_of_parse("fsib", np, &info.port_b, &pdev->dev);
} else {
const struct platform_device_id *id_entry = pdev->id_entry;
if (id_entry)
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 938baff86ef2..8ddb08714faa 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -44,7 +44,6 @@ struct rsnd_adg {
#define LRCLK_ASYNC (1 << 0)
#define AUDIO_OUT_48 (1 << 1)
-#define adg_mode_flags(adg) (adg->flags)
#define for_each_rsnd_clk(pos, adg, i) \
for (i = 0; \
@@ -58,6 +57,13 @@ struct rsnd_adg {
i++)
#define rsnd_priv_to_adg(priv) ((struct rsnd_adg *)(priv)->adg)
+static const char * const clk_name[] = {
+ [CLKA] = "clk_a",
+ [CLKB] = "clk_b",
+ [CLKC] = "clk_c",
+ [CLKI] = "clk_i",
+};
+
static u32 rsnd_adg_calculate_rbgx(unsigned long div)
{
int i, ratio;
@@ -280,6 +286,7 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
+ struct device *dev = rsnd_priv_to_dev(priv);
int id = rsnd_mod_id(ssi_mod);
int shift = (id % 4) * 8;
u32 mask = 0xFF << shift;
@@ -306,12 +313,13 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL2, mask, val);
break;
}
+
+ dev_dbg(dev, "AUDIO_CLK_SEL is 0x%x\n", val);
}
int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
{
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
- struct device *dev = rsnd_priv_to_dev(priv);
struct clk *clk;
int i;
int sel_table[] = {
@@ -321,8 +329,6 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
[CLKI] = 0x0,
};
- dev_dbg(dev, "request clock = %d\n", rate);
-
/*
* find suitable clock from
* AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
@@ -366,8 +372,8 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
rsnd_adg_set_ssi_clk(ssi_mod, data);
- if (adg_mode_flags(adg) & LRCLK_ASYNC) {
- if (adg_mode_flags(adg) & AUDIO_OUT_48)
+ if (rsnd_flags_has(adg, LRCLK_ASYNC)) {
+ if (rsnd_flags_has(adg, AUDIO_OUT_48))
ckr = 0x80000000;
} else {
if (0 == (rate % 8000))
@@ -378,9 +384,10 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
rsnd_mod_write(adg_mod, BRRA, adg->rbga);
rsnd_mod_write(adg_mod, BRRB, adg->rbgb);
- dev_dbg(dev, "ADG: %s[%d] selects 0x%x for %d\n",
- rsnd_mod_name(ssi_mod), rsnd_mod_id(ssi_mod),
- data, rate);
+ dev_dbg(dev, "CLKOUT is based on BRG%c (= %dHz)\n",
+ (ckr) ? 'B' : 'A',
+ (ckr) ? adg->rbgb_rate_for_48khz :
+ adg->rbga_rate_for_441khz);
return 0;
}
@@ -409,21 +416,12 @@ static void rsnd_adg_get_clkin(struct rsnd_priv *priv,
{
struct device *dev = rsnd_priv_to_dev(priv);
struct clk *clk;
- static const char * const clk_name[] = {
- [CLKA] = "clk_a",
- [CLKB] = "clk_b",
- [CLKC] = "clk_c",
- [CLKI] = "clk_i",
- };
int i;
for (i = 0; i < CLKMAX; i++) {
clk = devm_clk_get(dev, clk_name[i]);
adg->clk[i] = IS_ERR(clk) ? NULL : clk;
}
-
- for_each_rsnd_clk(clk, adg, i)
- dev_dbg(dev, "clk %d : %p : %ld\n", i, clk, clk_get_rate(clk));
}
static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
@@ -479,10 +477,10 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
}
if (req_rate[0] % 48000 == 0)
- adg->flags = AUDIO_OUT_48;
+ rsnd_flags_set(adg, AUDIO_OUT_48);
if (of_get_property(np, "clkout-lr-asynchronous", NULL))
- adg->flags = LRCLK_ASYNC;
+ rsnd_flags_set(adg, LRCLK_ASYNC);
/*
* This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC
@@ -512,7 +510,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
adg->rbga_rate_for_441khz = rate / div;
ckr |= brg_table[i] << 20;
if (req_441kHz_rate &&
- !(adg_mode_flags(adg) & AUDIO_OUT_48))
+ !rsnd_flags_has(adg, AUDIO_OUT_48))
parent_clk_name = __clk_get_name(clk);
}
}
@@ -528,7 +526,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
adg->rbgb_rate_for_48khz = rate / div;
ckr |= brg_table[i] << 16;
if (req_48kHz_rate &&
- (adg_mode_flags(adg) & AUDIO_OUT_48))
+ rsnd_flags_has(adg, AUDIO_OUT_48))
parent_clk_name = __clk_get_name(clk);
}
}
@@ -572,12 +570,35 @@ rsnd_adg_get_clkout_end:
adg->ckr = ckr;
adg->rbga = rbga;
adg->rbgb = rbgb;
+}
+
+#ifdef DEBUG
+static void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct rsnd_adg *adg)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct clk *clk;
+ int i;
+
+ for_each_rsnd_clk(clk, adg, i)
+ dev_dbg(dev, "%s : %p : %ld\n",
+ clk_name[i], clk, clk_get_rate(clk));
- for_each_rsnd_clkout(clk, adg, i)
- dev_dbg(dev, "clkout %d : %p : %ld\n", i, clk, clk_get_rate(clk));
dev_dbg(dev, "BRGCKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
- ckr, rbga, rbgb);
+ adg->ckr, adg->rbga, adg->rbgb);
+ dev_dbg(dev, "BRGA (for 44100 base) = %d\n", adg->rbga_rate_for_441khz);
+ dev_dbg(dev, "BRGB (for 48000 base) = %d\n", adg->rbgb_rate_for_48khz);
+
+ /*
+ * Actual CLKOUT will be exchanged in rsnd_adg_ssi_clk_try_start()
+ * by BRGCKR::BRGCKR_31
+ */
+ for_each_rsnd_clkout(clk, adg, i)
+ dev_dbg(dev, "clkout %d : %p : %ld\n", i,
+ clk, clk_get_rate(clk));
}
+#else
+#define rsnd_adg_clk_dbg_info(priv, adg)
+#endif
int rsnd_adg_probe(struct rsnd_priv *priv)
{
@@ -596,6 +617,7 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
rsnd_adg_get_clkin(priv, adg);
rsnd_adg_get_clkout(priv, adg);
+ rsnd_adg_clk_dbg_info(priv, adg);
priv->adg = adg;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 107133297e8d..c70eb2097816 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -121,14 +121,6 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type)
}
}
-char *rsnd_mod_name(struct rsnd_mod *mod)
-{
- if (!mod || !mod->ops)
- return "unknown";
-
- return mod->ops->name;
-}
-
struct dma_chan *rsnd_mod_dma_req(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
{
@@ -172,8 +164,7 @@ int rsnd_mod_init(struct rsnd_priv *priv,
void rsnd_mod_quit(struct rsnd_mod *mod)
{
- if (mod->clk)
- clk_unprepare(mod->clk);
+ clk_unprepare(mod->clk);
mod->clk = NULL;
}
@@ -200,7 +191,10 @@ void rsnd_mod_interrupt(struct rsnd_mod *mod,
int rsnd_io_is_working(struct rsnd_dai_stream *io)
{
/* see rsnd_dai_stream_init/quit() */
- return !!io->substream;
+ if (io->substream)
+ return snd_pcm_running(io->substream);
+
+ return 0;
}
int rsnd_runtime_channel_original(struct rsnd_dai_stream *io)
@@ -407,11 +401,9 @@ struct rsnd_mod *rsnd_mod_next(int *iterator,
for (; *iterator < max; (*iterator)++) {
type = (array) ? array[*iterator] : *iterator;
- mod = io->mod[type];
- if (!mod)
- continue;
-
- return mod;
+ mod = rsnd_io_to_mod(io, type);
+ if (mod)
+ return mod;
}
return NULL;
@@ -1242,6 +1234,33 @@ struct rsnd_kctrl_cfg *rsnd_kctrl_init_s(struct rsnd_kctrl_cfg_s *cfg)
return &cfg->cfg;
}
+const char * const volume_ramp_rate[] = {
+ "128 dB/1 step", /* 00000 */
+ "64 dB/1 step", /* 00001 */
+ "32 dB/1 step", /* 00010 */
+ "16 dB/1 step", /* 00011 */
+ "8 dB/1 step", /* 00100 */
+ "4 dB/1 step", /* 00101 */
+ "2 dB/1 step", /* 00110 */
+ "1 dB/1 step", /* 00111 */
+ "0.5 dB/1 step", /* 01000 */
+ "0.25 dB/1 step", /* 01001 */
+ "0.125 dB/1 step", /* 01010 = VOLUME_RAMP_MAX_MIX */
+ "0.125 dB/2 steps", /* 01011 */
+ "0.125 dB/4 steps", /* 01100 */
+ "0.125 dB/8 steps", /* 01101 */
+ "0.125 dB/16 steps", /* 01110 */
+ "0.125 dB/32 steps", /* 01111 */
+ "0.125 dB/64 steps", /* 10000 */
+ "0.125 dB/128 steps", /* 10001 */
+ "0.125 dB/256 steps", /* 10010 */
+ "0.125 dB/512 steps", /* 10011 */
+ "0.125 dB/1024 steps", /* 10100 */
+ "0.125 dB/2048 steps", /* 10101 */
+ "0.125 dB/4096 steps", /* 10110 */
+ "0.125 dB/8192 steps", /* 10111 = VOLUME_RAMP_MAX_DVC */
+};
+
int rsnd_kctrl_new(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct snd_soc_pcm_runtime *rtd,
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index e7f53f44165d..d201d551866d 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -81,8 +81,11 @@ struct rsnd_ctu {
struct rsnd_kctrl_cfg_m sv3;
struct rsnd_kctrl_cfg_s reset;
int channels;
+ u32 flags;
};
+#define KCTRL_INITIALIZED (1 << 0)
+
#define rsnd_ctu_nr(priv) ((priv)->ctu_nr)
#define for_each_rsnd_ctu(pos, priv, i) \
for ((i) = 0; \
@@ -130,7 +133,7 @@ static void rsnd_ctu_value_init(struct rsnd_dai_stream *io,
int i;
for (i = 0; i < RSND_MAX_CHANNELS; i++) {
- u32 val = ctu->pass.val[i];
+ u32 val = rsnd_kctrl_valm(ctu->pass, i);
cpmdr |= val << (28 - (i * 4));
@@ -147,44 +150,44 @@ static void rsnd_ctu_value_init(struct rsnd_dai_stream *io,
rsnd_mod_write(mod, CTU_SCMDR, scmdr);
if (scmdr > 0) {
- rsnd_mod_write(mod, CTU_SV00R, ctu->sv0.val[0]);
- rsnd_mod_write(mod, CTU_SV01R, ctu->sv0.val[1]);
- rsnd_mod_write(mod, CTU_SV02R, ctu->sv0.val[2]);
- rsnd_mod_write(mod, CTU_SV03R, ctu->sv0.val[3]);
- rsnd_mod_write(mod, CTU_SV04R, ctu->sv0.val[4]);
- rsnd_mod_write(mod, CTU_SV05R, ctu->sv0.val[5]);
- rsnd_mod_write(mod, CTU_SV06R, ctu->sv0.val[6]);
- rsnd_mod_write(mod, CTU_SV07R, ctu->sv0.val[7]);
+ rsnd_mod_write(mod, CTU_SV00R, rsnd_kctrl_valm(ctu->sv0, 0));
+ rsnd_mod_write(mod, CTU_SV01R, rsnd_kctrl_valm(ctu->sv0, 1));
+ rsnd_mod_write(mod, CTU_SV02R, rsnd_kctrl_valm(ctu->sv0, 2));
+ rsnd_mod_write(mod, CTU_SV03R, rsnd_kctrl_valm(ctu->sv0, 3));
+ rsnd_mod_write(mod, CTU_SV04R, rsnd_kctrl_valm(ctu->sv0, 4));
+ rsnd_mod_write(mod, CTU_SV05R, rsnd_kctrl_valm(ctu->sv0, 5));
+ rsnd_mod_write(mod, CTU_SV06R, rsnd_kctrl_valm(ctu->sv0, 6));
+ rsnd_mod_write(mod, CTU_SV07R, rsnd_kctrl_valm(ctu->sv0, 7));
}
if (scmdr > 1) {
- rsnd_mod_write(mod, CTU_SV10R, ctu->sv1.val[0]);
- rsnd_mod_write(mod, CTU_SV11R, ctu->sv1.val[1]);
- rsnd_mod_write(mod, CTU_SV12R, ctu->sv1.val[2]);
- rsnd_mod_write(mod, CTU_SV13R, ctu->sv1.val[3]);
- rsnd_mod_write(mod, CTU_SV14R, ctu->sv1.val[4]);
- rsnd_mod_write(mod, CTU_SV15R, ctu->sv1.val[5]);
- rsnd_mod_write(mod, CTU_SV16R, ctu->sv1.val[6]);
- rsnd_mod_write(mod, CTU_SV17R, ctu->sv1.val[7]);
+ rsnd_mod_write(mod, CTU_SV10R, rsnd_kctrl_valm(ctu->sv1, 0));
+ rsnd_mod_write(mod, CTU_SV11R, rsnd_kctrl_valm(ctu->sv1, 1));
+ rsnd_mod_write(mod, CTU_SV12R, rsnd_kctrl_valm(ctu->sv1, 2));
+ rsnd_mod_write(mod, CTU_SV13R, rsnd_kctrl_valm(ctu->sv1, 3));
+ rsnd_mod_write(mod, CTU_SV14R, rsnd_kctrl_valm(ctu->sv1, 4));
+ rsnd_mod_write(mod, CTU_SV15R, rsnd_kctrl_valm(ctu->sv1, 5));
+ rsnd_mod_write(mod, CTU_SV16R, rsnd_kctrl_valm(ctu->sv1, 6));
+ rsnd_mod_write(mod, CTU_SV17R, rsnd_kctrl_valm(ctu->sv1, 7));
}
if (scmdr > 2) {
- rsnd_mod_write(mod, CTU_SV20R, ctu->sv2.val[0]);
- rsnd_mod_write(mod, CTU_SV21R, ctu->sv2.val[1]);
- rsnd_mod_write(mod, CTU_SV22R, ctu->sv2.val[2]);
- rsnd_mod_write(mod, CTU_SV23R, ctu->sv2.val[3]);
- rsnd_mod_write(mod, CTU_SV24R, ctu->sv2.val[4]);
- rsnd_mod_write(mod, CTU_SV25R, ctu->sv2.val[5]);
- rsnd_mod_write(mod, CTU_SV26R, ctu->sv2.val[6]);
- rsnd_mod_write(mod, CTU_SV27R, ctu->sv2.val[7]);
+ rsnd_mod_write(mod, CTU_SV20R, rsnd_kctrl_valm(ctu->sv2, 0));
+ rsnd_mod_write(mod, CTU_SV21R, rsnd_kctrl_valm(ctu->sv2, 1));
+ rsnd_mod_write(mod, CTU_SV22R, rsnd_kctrl_valm(ctu->sv2, 2));
+ rsnd_mod_write(mod, CTU_SV23R, rsnd_kctrl_valm(ctu->sv2, 3));
+ rsnd_mod_write(mod, CTU_SV24R, rsnd_kctrl_valm(ctu->sv2, 4));
+ rsnd_mod_write(mod, CTU_SV25R, rsnd_kctrl_valm(ctu->sv2, 5));
+ rsnd_mod_write(mod, CTU_SV26R, rsnd_kctrl_valm(ctu->sv2, 6));
+ rsnd_mod_write(mod, CTU_SV27R, rsnd_kctrl_valm(ctu->sv2, 7));
}
if (scmdr > 3) {
- rsnd_mod_write(mod, CTU_SV30R, ctu->sv3.val[0]);
- rsnd_mod_write(mod, CTU_SV31R, ctu->sv3.val[1]);
- rsnd_mod_write(mod, CTU_SV32R, ctu->sv3.val[2]);
- rsnd_mod_write(mod, CTU_SV33R, ctu->sv3.val[3]);
- rsnd_mod_write(mod, CTU_SV34R, ctu->sv3.val[4]);
- rsnd_mod_write(mod, CTU_SV35R, ctu->sv3.val[5]);
- rsnd_mod_write(mod, CTU_SV36R, ctu->sv3.val[6]);
- rsnd_mod_write(mod, CTU_SV37R, ctu->sv3.val[7]);
+ rsnd_mod_write(mod, CTU_SV30R, rsnd_kctrl_valm(ctu->sv3, 0));
+ rsnd_mod_write(mod, CTU_SV31R, rsnd_kctrl_valm(ctu->sv3, 1));
+ rsnd_mod_write(mod, CTU_SV32R, rsnd_kctrl_valm(ctu->sv3, 2));
+ rsnd_mod_write(mod, CTU_SV33R, rsnd_kctrl_valm(ctu->sv3, 3));
+ rsnd_mod_write(mod, CTU_SV34R, rsnd_kctrl_valm(ctu->sv3, 4));
+ rsnd_mod_write(mod, CTU_SV35R, rsnd_kctrl_valm(ctu->sv3, 5));
+ rsnd_mod_write(mod, CTU_SV36R, rsnd_kctrl_valm(ctu->sv3, 6));
+ rsnd_mod_write(mod, CTU_SV37R, rsnd_kctrl_valm(ctu->sv3, 7));
}
rsnd_mod_write(mod, CTU_CTUIR, 0);
@@ -196,17 +199,17 @@ static void rsnd_ctu_value_reset(struct rsnd_dai_stream *io,
struct rsnd_ctu *ctu = rsnd_mod_to_ctu(mod);
int i;
- if (!ctu->reset.val)
+ if (!rsnd_kctrl_vals(ctu->reset))
return;
for (i = 0; i < RSND_MAX_CHANNELS; i++) {
- ctu->pass.val[i] = 0;
- ctu->sv0.val[i] = 0;
- ctu->sv1.val[i] = 0;
- ctu->sv2.val[i] = 0;
- ctu->sv3.val[i] = 0;
+ rsnd_kctrl_valm(ctu->pass, i) = 0;
+ rsnd_kctrl_valm(ctu->sv0, i) = 0;
+ rsnd_kctrl_valm(ctu->sv1, i) = 0;
+ rsnd_kctrl_valm(ctu->sv2, i) = 0;
+ rsnd_kctrl_valm(ctu->sv3, i) = 0;
}
- ctu->reset.val = 0;
+ rsnd_kctrl_vals(ctu->reset) = 0;
}
static int rsnd_ctu_init(struct rsnd_mod *mod,
@@ -277,6 +280,9 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
struct rsnd_ctu *ctu = rsnd_mod_to_ctu(mod);
int ret;
+ if (rsnd_flags_has(ctu, KCTRL_INITIALIZED))
+ return 0;
+
/* CTU Pass */
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU Pass",
rsnd_kctrl_accept_anytime,
@@ -326,6 +332,8 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
rsnd_ctu_value_reset,
&ctu->reset, 1);
+ rsnd_flags_set(ctu, KCTRL_INITIALIZED);
+
return ret;
}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 041ec1080d52..fd557abfe390 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -60,6 +60,14 @@ struct rsnd_dma_ctrl {
#define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
#define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
+/* for DEBUG */
+static struct rsnd_mod_ops mem_ops = {
+ .name = "mem",
+};
+
+static struct rsnd_mod mem = {
+};
+
/*
* Audio DMAC
*/
@@ -211,11 +219,9 @@ static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
dma->mod_from,
dma->mod_to);
if (IS_ERR_OR_NULL(dmaen->chan)) {
- int ret = PTR_ERR(dmaen->chan);
-
dmaen->chan = NULL;
dev_err(dev, "can't get dma channel\n");
- return ret;
+ return -EIO;
}
return 0;
@@ -747,20 +753,22 @@ static void rsnd_dma_of_path(struct rsnd_mod *this,
rsnd_mod_name(this), rsnd_mod_id(this));
for (i = 0; i <= idx; i++) {
dev_dbg(dev, " %s[%d]%s\n",
- rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
- (mod[i] == *mod_from) ? " from" :
- (mod[i] == *mod_to) ? " to" : "");
+ rsnd_mod_name(mod[i] ? mod[i] : &mem),
+ rsnd_mod_id (mod[i] ? mod[i] : &mem),
+ (mod[i] == *mod_from) ? " from" :
+ (mod[i] == *mod_to) ? " to" : "");
}
}
-int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
- struct rsnd_mod **dma_mod)
+static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
+ struct rsnd_mod **dma_mod)
{
struct rsnd_mod *mod_from = NULL;
struct rsnd_mod *mod_to = NULL;
struct rsnd_priv *priv = rsnd_io_to_priv(io);
struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_dma *dma;
struct rsnd_mod_ops *ops;
enum rsnd_mod_type type;
int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
@@ -800,40 +808,47 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
type = RSND_MOD_AUDMA;
}
- if (!(*dma_mod)) {
- struct rsnd_dma *dma;
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return -ENOMEM;
- dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
- if (!dma)
- return -ENOMEM;
+ *dma_mod = rsnd_mod_get(dma);
- *dma_mod = rsnd_mod_get(dma);
+ ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
+ rsnd_mod_get_status, type, dma_id);
+ if (ret < 0)
+ return ret;
- ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
- rsnd_mod_get_status, type, dma_id);
- if (ret < 0)
- return ret;
+ dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
+ rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
+ rsnd_mod_name(mod_from ? mod_from : &mem),
+ rsnd_mod_id (mod_from ? mod_from : &mem),
+ rsnd_mod_name(mod_to ? mod_to : &mem),
+ rsnd_mod_id (mod_to ? mod_to : &mem));
- dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
- rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
- rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
- rsnd_mod_name(mod_to), rsnd_mod_id(mod_to));
+ ret = attach(io, dma, mod_from, mod_to);
+ if (ret < 0)
+ return ret;
+
+ dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
+ dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
+ dma->mod_from = mod_from;
+ dma->mod_to = mod_to;
+
+ return 0;
+}
+
+int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
+ struct rsnd_mod **dma_mod)
+{
+ if (!(*dma_mod)) {
+ int ret = rsnd_dma_alloc(io, mod, dma_mod);
- ret = attach(io, dma, mod_from, mod_to);
if (ret < 0)
return ret;
-
- dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
- dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
- dma->mod_from = mod_from;
- dma->mod_to = mod_to;
}
- ret = rsnd_dai_connect(*dma_mod, io, type);
- if (ret < 0)
- return ret;
-
- return 0;
+ return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
}
int rsnd_dma_probe(struct rsnd_priv *priv)
@@ -866,5 +881,6 @@ int rsnd_dma_probe(struct rsnd_priv *priv)
priv->dma = dmac;
- return 0;
+ /* dummy mem mod for debug */
+ return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, NULL, 0, 0);
}
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 1743ade3cc55..dbe54f024d68 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -44,8 +44,11 @@ struct rsnd_dvc {
struct rsnd_kctrl_cfg_s ren; /* Ramp Enable */
struct rsnd_kctrl_cfg_s rup; /* Ramp Rate Up */
struct rsnd_kctrl_cfg_s rdown; /* Ramp Rate Down */
+ u32 flags;
};
+#define KCTRL_INITIALIZED (1 << 0)
+
#define rsnd_dvc_get(priv, id) ((struct rsnd_dvc *)(priv->dvc) + id)
#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
@@ -58,33 +61,6 @@ struct rsnd_dvc {
((pos) = (struct rsnd_dvc *)(priv)->dvc + i); \
i++)
-static const char * const dvc_ramp_rate[] = {
- "128 dB/1 step", /* 00000 */
- "64 dB/1 step", /* 00001 */
- "32 dB/1 step", /* 00010 */
- "16 dB/1 step", /* 00011 */
- "8 dB/1 step", /* 00100 */
- "4 dB/1 step", /* 00101 */
- "2 dB/1 step", /* 00110 */
- "1 dB/1 step", /* 00111 */
- "0.5 dB/1 step", /* 01000 */
- "0.25 dB/1 step", /* 01001 */
- "0.125 dB/1 step", /* 01010 */
- "0.125 dB/2 steps", /* 01011 */
- "0.125 dB/4 steps", /* 01100 */
- "0.125 dB/8 steps", /* 01101 */
- "0.125 dB/16 steps", /* 01110 */
- "0.125 dB/32 steps", /* 01111 */
- "0.125 dB/64 steps", /* 10000 */
- "0.125 dB/128 steps", /* 10001 */
- "0.125 dB/256 steps", /* 10010 */
- "0.125 dB/512 steps", /* 10011 */
- "0.125 dB/1024 steps", /* 10100 */
- "0.125 dB/2048 steps", /* 10101 */
- "0.125 dB/4096 steps", /* 10110 */
- "0.125 dB/8192 steps", /* 10111 */
-};
-
static void rsnd_dvc_activation(struct rsnd_mod *mod)
{
rsnd_mod_write(mod, DVC_SWRSR, 0);
@@ -97,8 +73,9 @@ static void rsnd_dvc_halt(struct rsnd_mod *mod)
rsnd_mod_write(mod, DVC_SWRSR, 0);
}
-#define rsnd_dvc_get_vrpdr(dvc) (dvc->rup.val << 8 | dvc->rdown.val)
-#define rsnd_dvc_get_vrdbr(dvc) (0x3ff - (dvc->volume.val[0] >> 13))
+#define rsnd_dvc_get_vrpdr(dvc) (rsnd_kctrl_vals(dvc->rup) << 8 | \
+ rsnd_kctrl_vals(dvc->rdown))
+#define rsnd_dvc_get_vrdbr(dvc) (0x3ff - (rsnd_kctrl_valm(dvc->volume, 0) >> 13))
static void rsnd_dvc_volume_parameter(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
@@ -108,12 +85,12 @@ static void rsnd_dvc_volume_parameter(struct rsnd_dai_stream *io,
int i;
/* Enable Ramp */
- if (dvc->ren.val)
+ if (rsnd_kctrl_vals(dvc->ren))
for (i = 0; i < RSND_MAX_CHANNELS; i++)
- val[i] = dvc->volume.cfg.max;
+ val[i] = rsnd_kctrl_max(dvc->volume);
else
for (i = 0; i < RSND_MAX_CHANNELS; i++)
- val[i] = dvc->volume.val[i];
+ val[i] = rsnd_kctrl_valm(dvc->volume, i);
/* Enable Digital Volume */
rsnd_mod_write(mod, DVC_VOL0R, val[0]);
@@ -143,7 +120,7 @@ static void rsnd_dvc_volume_init(struct rsnd_dai_stream *io,
dvucr |= 0x101;
/* Enable Ramp */
- if (dvc->ren.val) {
+ if (rsnd_kctrl_vals(dvc->ren)) {
dvucr |= 0x10;
/*
@@ -185,10 +162,10 @@ static void rsnd_dvc_volume_update(struct rsnd_dai_stream *io,
u32 vrdbr = 0;
int i;
- for (i = 0; i < dvc->mute.cfg.size; i++)
- zcmcr |= (!!dvc->mute.cfg.val[i]) << i;
+ for (i = 0; i < rsnd_kctrl_size(dvc->mute); i++)
+ zcmcr |= (!!rsnd_kctrl_valm(dvc->mute, i)) << i;
- if (dvc->ren.val) {
+ if (rsnd_kctrl_vals(dvc->ren)) {
vrpdr = rsnd_dvc_get_vrpdr(dvc);
vrdbr = rsnd_dvc_get_vrdbr(dvc);
}
@@ -254,6 +231,9 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
int channels = rsnd_rdai_channels_get(rdai);
int ret;
+ if (rsnd_flags_has(dvc, KCTRL_INITIALIZED))
+ return 0;
+
/* Volume */
ret = rsnd_kctrl_new_m(mod, io, rtd,
is_play ?
@@ -292,7 +272,8 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
rsnd_kctrl_accept_anytime,
rsnd_dvc_volume_update,
&dvc->rup,
- dvc_ramp_rate);
+ volume_ramp_rate,
+ VOLUME_RAMP_MAX_DVC);
if (ret < 0)
return ret;
@@ -302,11 +283,14 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
rsnd_kctrl_accept_anytime,
rsnd_dvc_volume_update,
&dvc->rdown,
- dvc_ramp_rate);
+ volume_ramp_rate,
+ VOLUME_RAMP_MAX_DVC);
if (ret < 0)
return ret;
+ rsnd_flags_set(dvc, KCTRL_INITIALIZED);
+
return 0;
}
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
index 6c4826c189a4..7998380766f6 100644
--- a/sound/soc/sh/rcar/mix.c
+++ b/sound/soc/sh/rcar/mix.c
@@ -7,6 +7,33 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+/*
+ * CTUn MIXn
+ * +------+ +------+
+ * [SRC3 / SRC6] -> |CTU n0| -> [MIX n0| ->
+ * [SRC4 / SRC9] -> |CTU n1| -> [MIX n1| ->
+ * [SRC0 / SRC1] -> |CTU n2| -> [MIX n2| ->
+ * [SRC2 / SRC5] -> |CTU n3| -> [MIX n3| ->
+ * +------+ +------+
+ *
+ * ex)
+ * DAI0 : playback = <&src0 &ctu02 &mix0 &dvc0 &ssi0>;
+ * DAI1 : playback = <&src2 &ctu03 &mix0 &dvc0 &ssi0>;
+ *
+ * MIX Volume
+ * amixer set "MIX",0 100% // DAI0 Volume
+ * amixer set "MIX",1 100% // DAI1 Volume
+ *
+ * Volume Ramp
+ * amixer set "MIX Ramp Up Rate" "0.125 dB/1 step"
+ * amixer set "MIX Ramp Down Rate" "4 dB/1 step"
+ * amixer set "MIX Ramp" on
+ * aplay xxx.wav &
+ * amixer set "MIX",0 80% // DAI0 Volume Down
+ * amixer set "MIX",1 100% // DAI1 Volume Up
+ */
+
#include "rsnd.h"
#define MIX_NAME_SIZE 16
@@ -14,8 +41,27 @@
struct rsnd_mix {
struct rsnd_mod mod;
+ struct rsnd_kctrl_cfg_s volumeA; /* MDBAR */
+ struct rsnd_kctrl_cfg_s volumeB; /* MDBBR */
+ struct rsnd_kctrl_cfg_s volumeC; /* MDBCR */
+ struct rsnd_kctrl_cfg_s volumeD; /* MDBDR */
+ struct rsnd_kctrl_cfg_s ren; /* Ramp Enable */
+ struct rsnd_kctrl_cfg_s rup; /* Ramp Rate Up */
+ struct rsnd_kctrl_cfg_s rdw; /* Ramp Rate Down */
+ u32 flags;
};
+#define ONCE_KCTRL_INITIALIZED (1 << 0)
+#define HAS_VOLA (1 << 1)
+#define HAS_VOLB (1 << 2)
+#define HAS_VOLC (1 << 3)
+#define HAS_VOLD (1 << 4)
+
+#define VOL_MAX 0x3ff
+
+#define rsnd_mod_to_mix(_mod) \
+ container_of((_mod), struct rsnd_mix, mod)
+
#define rsnd_mix_get(priv, id) ((struct rsnd_mix *)(priv->mix) + id)
#define rsnd_mix_nr(priv) ((priv)->mix_nr)
#define for_each_rsnd_mix(pos, priv, i) \
@@ -36,26 +82,43 @@ static void rsnd_mix_halt(struct rsnd_mod *mod)
rsnd_mod_write(mod, MIX_SWRSR, 0);
}
+#define rsnd_mix_get_vol(mix, X) \
+ rsnd_flags_has(mix, HAS_VOL##X) ? \
+ (VOL_MAX - rsnd_kctrl_vals(mix->volume##X)) : 0
static void rsnd_mix_volume_parameter(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
{
- rsnd_mod_write(mod, MIX_MDBAR, 0);
- rsnd_mod_write(mod, MIX_MDBBR, 0);
- rsnd_mod_write(mod, MIX_MDBCR, 0);
- rsnd_mod_write(mod, MIX_MDBDR, 0);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_mix *mix = rsnd_mod_to_mix(mod);
+ u32 volA = rsnd_mix_get_vol(mix, A);
+ u32 volB = rsnd_mix_get_vol(mix, B);
+ u32 volC = rsnd_mix_get_vol(mix, C);
+ u32 volD = rsnd_mix_get_vol(mix, D);
+
+ dev_dbg(dev, "MIX A/B/C/D = %02x/%02x/%02x/%02x\n",
+ volA, volB, volC, volD);
+
+ rsnd_mod_write(mod, MIX_MDBAR, volA);
+ rsnd_mod_write(mod, MIX_MDBBR, volB);
+ rsnd_mod_write(mod, MIX_MDBCR, volC);
+ rsnd_mod_write(mod, MIX_MDBDR, volD);
}
static void rsnd_mix_volume_init(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
{
+ struct rsnd_mix *mix = rsnd_mod_to_mix(mod);
+
rsnd_mod_write(mod, MIX_MIXIR, 1);
/* General Information */
rsnd_mod_write(mod, MIX_ADINR, rsnd_runtime_channel_after_ctu(io));
/* volume step */
- rsnd_mod_write(mod, MIX_MIXMR, 0);
- rsnd_mod_write(mod, MIX_MVPDR, 0);
+ rsnd_mod_write(mod, MIX_MIXMR, rsnd_kctrl_vals(mix->ren));
+ rsnd_mod_write(mod, MIX_MVPDR, rsnd_kctrl_vals(mix->rup) << 8 |
+ rsnd_kctrl_vals(mix->rdw));
/* common volume parameter */
rsnd_mix_volume_parameter(io, mod);
@@ -109,11 +172,94 @@ static int rsnd_mix_quit(struct rsnd_mod *mod,
return 0;
}
+static int rsnd_mix_pcm_new(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_mix *mix = rsnd_mod_to_mix(mod);
+ struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
+ struct rsnd_kctrl_cfg_s *volume;
+ int ret;
+
+ switch (rsnd_mod_id(src_mod)) {
+ case 3:
+ case 6: /* MDBAR */
+ volume = &mix->volumeA;
+ rsnd_flags_set(mix, HAS_VOLA);
+ break;
+ case 4:
+ case 9: /* MDBBR */
+ volume = &mix->volumeB;
+ rsnd_flags_set(mix, HAS_VOLB);
+ break;
+ case 0:
+ case 1: /* MDBCR */
+ volume = &mix->volumeC;
+ rsnd_flags_set(mix, HAS_VOLC);
+ break;
+ case 2:
+ case 5: /* MDBDR */
+ volume = &mix->volumeD;
+ rsnd_flags_set(mix, HAS_VOLD);
+ break;
+ default:
+ dev_err(dev, "unknown SRC is connected\n");
+ return -EINVAL;
+ }
+
+ /* Volume */
+ ret = rsnd_kctrl_new_s(mod, io, rtd,
+ "MIX Playback Volume",
+ rsnd_kctrl_accept_anytime,
+ rsnd_mix_volume_update,
+ volume, VOL_MAX);
+ if (ret < 0)
+ return ret;
+ rsnd_kctrl_vals(*volume) = VOL_MAX;
+
+ if (rsnd_flags_has(mix, ONCE_KCTRL_INITIALIZED))
+ return ret;
+
+ /* Ramp */
+ ret = rsnd_kctrl_new_s(mod, io, rtd,
+ "MIX Ramp Switch",
+ rsnd_kctrl_accept_anytime,
+ rsnd_mix_volume_update,
+ &mix->ren, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_kctrl_new_e(mod, io, rtd,
+ "MIX Ramp Up Rate",
+ rsnd_kctrl_accept_anytime,
+ rsnd_mix_volume_update,
+ &mix->rup,
+ volume_ramp_rate,
+ VOLUME_RAMP_MAX_MIX);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_kctrl_new_e(mod, io, rtd,
+ "MIX Ramp Down Rate",
+ rsnd_kctrl_accept_anytime,
+ rsnd_mix_volume_update,
+ &mix->rdw,
+ volume_ramp_rate,
+ VOLUME_RAMP_MAX_MIX);
+
+ rsnd_flags_set(mix, ONCE_KCTRL_INITIALIZED);
+
+ return ret;
+}
+
static struct rsnd_mod_ops rsnd_mix_ops = {
.name = MIX_NAME,
.probe = rsnd_mix_probe_,
.init = rsnd_mix_init,
.quit = rsnd_mix_quit,
+ .pcm_new = rsnd_mix_pcm_new,
};
struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id)
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index c5de71f2dc8c..57cd2bc773c2 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -355,8 +355,9 @@ struct rsnd_mod {
#define __rsnd_mod_call_nolock_start 0
#define __rsnd_mod_call_nolock_stop 1
-#define rsnd_mod_to_priv(mod) ((mod)->priv)
-#define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1)
+#define rsnd_mod_to_priv(mod) ((mod)->priv)
+#define rsnd_mod_name(mod) ((mod)->ops->name)
+#define rsnd_mod_id(mod) ((mod)->id)
#define rsnd_mod_power_on(mod) clk_enable((mod)->clk)
#define rsnd_mod_power_off(mod) clk_disable((mod)->clk)
#define rsnd_mod_get(ip) (&(ip)->mod)
@@ -371,7 +372,6 @@ int rsnd_mod_init(struct rsnd_priv *priv,
enum rsnd_mod_type type,
int id);
void rsnd_mod_quit(struct rsnd_mod *mod);
-char *rsnd_mod_name(struct rsnd_mod *mod);
struct dma_chan *rsnd_mod_dma_req(struct rsnd_dai_stream *io,
struct rsnd_mod *mod);
void rsnd_mod_interrupt(struct rsnd_mod *mod,
@@ -601,6 +601,10 @@ struct rsnd_priv {
#define rsnd_is_gen1(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN1)
#define rsnd_is_gen2(priv) (((priv)->flags & RSND_GEN_MASK) == RSND_GEN2)
+#define rsnd_flags_has(p, f) ((p)->flags & (f))
+#define rsnd_flags_set(p, f) ((p)->flags |= (f))
+#define rsnd_flags_del(p, f) ((p)->flags &= ~(f))
+
/*
* rsnd_kctrl
*/
@@ -627,6 +631,10 @@ struct rsnd_kctrl_cfg_s {
struct rsnd_kctrl_cfg cfg;
u32 val;
};
+#define rsnd_kctrl_size(x) ((x).cfg.size)
+#define rsnd_kctrl_max(x) ((x).cfg.max)
+#define rsnd_kctrl_valm(x, i) ((x).val[i]) /* = (x).cfg.val[i] */
+#define rsnd_kctrl_vals(x) ((x).val) /* = (x).cfg.val[0] */
int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io);
int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io);
@@ -652,9 +660,13 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
rsnd_kctrl_new(mod, io, rtd, name, accept, update, rsnd_kctrl_init_s(cfg), \
NULL, 1, max)
-#define rsnd_kctrl_new_e(mod, io, rtd, name, accept, update, cfg, texts) \
+#define rsnd_kctrl_new_e(mod, io, rtd, name, accept, update, cfg, texts, size) \
rsnd_kctrl_new(mod, io, rtd, name, accept, update, rsnd_kctrl_init_s(cfg), \
- texts, 1, ARRAY_SIZE(texts))
+ texts, 1, size)
+
+extern const char * const volume_ramp_rate[];
+#define VOLUME_RAMP_MAX_DVC (0x17 + 1)
+#define VOLUME_RAMP_MAX_MIX (0x0a + 1)
/*
* R-Car SSI
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index fffc07e72627..fece1e5f582f 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -101,9 +101,6 @@ struct rsnd_ssi {
#define rsnd_ssi_get(priv, id) ((struct rsnd_ssi *)(priv->ssi) + id)
#define rsnd_ssi_nr(priv) ((priv)->ssi_nr)
#define rsnd_mod_to_ssi(_mod) container_of((_mod), struct rsnd_ssi, mod)
-#define rsnd_ssi_flags_has(p, f) ((p)->flags & f)
-#define rsnd_ssi_flags_set(p, f) ((p)->flags |= f)
-#define rsnd_ssi_flags_del(p, f) ((p)->flags = ((p)->flags & ~f))
#define rsnd_ssi_is_parent(ssi, io) ((ssi) == rsnd_io_to_mod_ssip(io))
#define rsnd_ssi_is_multi_slave(mod, io) \
(rsnd_ssi_multi_slaves(io) & (1 << rsnd_mod_id(mod)))
@@ -116,10 +113,10 @@ int rsnd_ssi_hdmi_port(struct rsnd_dai_stream *io)
struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- if (rsnd_ssi_flags_has(ssi, RSND_SSI_HDMI0))
+ if (rsnd_flags_has(ssi, RSND_SSI_HDMI0))
return RSND_SSI_HDMI_PORT0;
- if (rsnd_ssi_flags_has(ssi, RSND_SSI_HDMI1))
+ if (rsnd_flags_has(ssi, RSND_SSI_HDMI1))
return RSND_SSI_HDMI_PORT1;
return 0;
@@ -134,7 +131,7 @@ int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
if (!rsnd_ssi_is_dma_mode(mod))
return 0;
- if (!(rsnd_ssi_flags_has(ssi, RSND_SSI_NO_BUSIF)))
+ if (!(rsnd_flags_has(ssi, RSND_SSI_NO_BUSIF)))
use_busif = 1;
if (rsnd_io_to_mod_src(io))
use_busif = 1;
@@ -198,10 +195,15 @@ static u32 rsnd_ssi_run_mods(struct rsnd_dai_stream *io)
{
struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+ u32 mods;
- return rsnd_ssi_multi_slaves_runtime(io) |
- 1 << rsnd_mod_id(ssi_mod) |
- 1 << rsnd_mod_id(ssi_parent_mod);
+ mods = rsnd_ssi_multi_slaves_runtime(io) |
+ 1 << rsnd_mod_id(ssi_mod);
+
+ if (ssi_parent_mod)
+ mods |= 1 << rsnd_mod_id(ssi_parent_mod);
+
+ return mods;
}
u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
@@ -601,15 +603,18 @@ static int rsnd_ssi_stop(struct rsnd_mod *mod,
if (rsnd_ssi_is_parent(mod, io))
return 0;
- /*
- * disable all IRQ,
- * and, wait all data was sent
- */
cr = ssi->cr_own |
ssi->cr_clk;
- rsnd_mod_write(mod, SSICR, cr | EN);
- rsnd_ssi_status_check(mod, DIRQ);
+ /*
+ * disable all IRQ,
+ * Playback: Wait all data was sent
+ * Capture: It might not receave data. Do nothing
+ */
+ if (rsnd_io_is_play(io)) {
+ rsnd_mod_write(mod, SSICR, cr | EN);
+ rsnd_ssi_status_check(mod, DIRQ);
+ }
/*
* disable SSI,
@@ -793,13 +798,13 @@ static int rsnd_ssi_common_probe(struct rsnd_mod *mod,
* But it don't need to call request_irq() many times.
* Let's control it by RSND_SSI_PROBED flag.
*/
- if (!rsnd_ssi_flags_has(ssi, RSND_SSI_PROBED)) {
+ if (!rsnd_flags_has(ssi, RSND_SSI_PROBED)) {
ret = request_irq(ssi->irq,
rsnd_ssi_interrupt,
IRQF_SHARED,
dev_name(dev), mod);
- rsnd_ssi_flags_set(ssi, RSND_SSI_PROBED);
+ rsnd_flags_set(ssi, RSND_SSI_PROBED);
}
return ret;
@@ -817,10 +822,10 @@ static int rsnd_ssi_common_remove(struct rsnd_mod *mod,
return 0;
/* PIO will request IRQ again */
- if (rsnd_ssi_flags_has(ssi, RSND_SSI_PROBED)) {
+ if (rsnd_flags_has(ssi, RSND_SSI_PROBED)) {
free_irq(ssi->irq, mod);
- rsnd_ssi_flags_del(ssi, RSND_SSI_PROBED);
+ rsnd_flags_del(ssi, RSND_SSI_PROBED);
}
return 0;
@@ -1003,13 +1008,13 @@ static void __rsnd_ssi_parse_hdmi_connection(struct rsnd_priv *priv,
ssi = rsnd_mod_to_ssi(mod);
if (strstr(remote_ep->full_name, "hdmi0")) {
- rsnd_ssi_flags_set(ssi, RSND_SSI_HDMI0);
+ rsnd_flags_set(ssi, RSND_SSI_HDMI0);
dev_dbg(dev, "%s[%d] connected to HDMI0\n",
rsnd_mod_name(mod), rsnd_mod_id(mod));
}
if (strstr(remote_ep->full_name, "hdmi1")) {
- rsnd_ssi_flags_set(ssi, RSND_SSI_HDMI1);
+ rsnd_flags_set(ssi, RSND_SSI_HDMI1);
dev_dbg(dev, "%s[%d] connected to HDMI1\n",
rsnd_mod_name(mod), rsnd_mod_id(mod));
}
@@ -1042,7 +1047,7 @@ int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- return !!(rsnd_ssi_flags_has(ssi, RSND_SSI_CLK_PIN_SHARE));
+ return !!(rsnd_flags_has(ssi, RSND_SSI_CLK_PIN_SHARE));
}
static u32 *rsnd_ssi_get_status(struct rsnd_dai_stream *io,
@@ -1112,6 +1117,9 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
i = 0;
for_each_child_of_node(node, np) {
+ if (!of_device_is_available(np))
+ goto skip;
+
ssi = rsnd_ssi_get(priv, i);
snprintf(name, RSND_SSI_NAME_SIZE, "%s.%d",
@@ -1125,10 +1133,10 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
}
if (of_get_property(np, "shared-pin", NULL))
- rsnd_ssi_flags_set(ssi, RSND_SSI_CLK_PIN_SHARE);
+ rsnd_flags_set(ssi, RSND_SSI_CLK_PIN_SHARE);
if (of_get_property(np, "no-busif", NULL))
- rsnd_ssi_flags_set(ssi, RSND_SSI_NO_BUSIF);
+ rsnd_flags_set(ssi, RSND_SSI_NO_BUSIF);
ssi->irq = irq_of_parse_and_map(np, 0);
if (!ssi->irq) {
@@ -1148,7 +1156,7 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
of_node_put(np);
goto rsnd_ssi_probe_done;
}
-
+skip:
i++;
}
diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/soc-acpi.c
index 56d26f36a3cb..f21df28bc28e 100644
--- a/sound/soc/intel/common/sst-match-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -1,5 +1,5 @@
/*
- * sst_match_apci.c - SST (LPE) match for ACPI enumeration.
+ * soc-apci.c - support for ACPI enumeration.
*
* Copyright (c) 2013-15, Intel Corporation.
*
@@ -14,9 +14,9 @@
* more details.
*/
-#include "sst-acpi.h"
+#include <sound/soc-acpi.h>
-static acpi_status sst_acpi_find_name(acpi_handle handle, u32 level,
+static acpi_status snd_soc_acpi_find_name(acpi_handle handle, u32 level,
void *context, void **ret)
{
struct acpi_device *adev;
@@ -34,12 +34,12 @@ static acpi_status sst_acpi_find_name(acpi_handle handle, u32 level,
return AE_OK;
}
-const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
+const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
{
const char *name = NULL;
acpi_status status;
- status = acpi_get_devices(hid, sst_acpi_find_name, NULL,
+ status = acpi_get_devices(hid, snd_soc_acpi_find_name, NULL,
(void **)&name);
if (ACPI_FAILURE(status) || name[0] == '\0')
@@ -47,9 +47,9 @@ const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
return name;
}
-EXPORT_SYMBOL_GPL(sst_acpi_find_name_from_hid);
+EXPORT_SYMBOL_GPL(snd_soc_acpi_find_name_from_hid);
-static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
+static acpi_status snd_soc_acpi_mach_match(acpi_handle handle, u32 level,
void *context, void **ret)
{
unsigned long long sta;
@@ -63,26 +63,27 @@ static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
return AE_OK;
}
-bool sst_acpi_check_hid(const u8 hid[ACPI_ID_LEN])
+bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN])
{
acpi_status status;
bool found = false;
- status = acpi_get_devices(hid, sst_acpi_mach_match, &found, NULL);
+ status = acpi_get_devices(hid, snd_soc_acpi_mach_match, &found, NULL);
if (ACPI_FAILURE(status))
return false;
return found;
}
-EXPORT_SYMBOL_GPL(sst_acpi_check_hid);
+EXPORT_SYMBOL_GPL(snd_soc_acpi_check_hid);
-struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
+struct snd_soc_acpi_mach *
+snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
{
- struct sst_acpi_mach *mach;
+ struct snd_soc_acpi_mach *mach;
for (mach = machines; mach->id[0]; mach++) {
- if (sst_acpi_check_hid(mach->id) == true) {
+ if (snd_soc_acpi_check_hid(mach->id) == true) {
if (mach->machine_quirk == NULL)
return mach;
@@ -92,14 +93,14 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
+EXPORT_SYMBOL_GPL(snd_soc_acpi_find_machine);
-static acpi_status sst_acpi_find_package(acpi_handle handle, u32 level,
- void *context, void **ret)
+static acpi_status snd_soc_acpi_find_package(acpi_handle handle, u32 level,
+ void *context, void **ret)
{
struct acpi_device *adev;
acpi_status status = AE_OK;
- struct sst_acpi_package_context *pkg_ctx = context;
+ struct snd_soc_acpi_package_context *pkg_ctx = context;
pkg_ctx->data_valid = false;
@@ -137,37 +138,38 @@ static acpi_status sst_acpi_find_package(acpi_handle handle, u32 level,
return AE_OK;
}
-bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
- struct sst_acpi_package_context *ctx)
+bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+ struct snd_soc_acpi_package_context *ctx)
{
acpi_status status;
- status = acpi_get_devices(hid, sst_acpi_find_package, ctx, NULL);
+ status = acpi_get_devices(hid, snd_soc_acpi_find_package, ctx, NULL);
if (ACPI_FAILURE(status) || !ctx->data_valid)
return false;
return true;
}
-EXPORT_SYMBOL_GPL(sst_acpi_find_package_from_hid);
+EXPORT_SYMBOL_GPL(snd_soc_acpi_find_package_from_hid);
-struct sst_acpi_mach *sst_acpi_codec_list(void *arg)
+struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
{
- struct sst_acpi_mach *mach = arg;
- struct sst_codecs *codec_list = (struct sst_codecs *) mach->quirk_data;
+ struct snd_soc_acpi_mach *mach = arg;
+ struct snd_soc_acpi_codecs *codec_list =
+ (struct snd_soc_acpi_codecs *) mach->quirk_data;
int i;
if (mach->quirk_data == NULL)
return mach;
for (i = 0; i < codec_list->num_codecs; i++) {
- if (sst_acpi_check_hid(codec_list->codecs[i]) != true)
+ if (snd_soc_acpi_check_hid(codec_list->codecs[i]) != true)
return NULL;
}
return mach;
}
-EXPORT_SYMBOL_GPL(sst_acpi_codec_list);
+EXPORT_SYMBOL_GPL(snd_soc_acpi_codec_list);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Intel Common ACPI Match module");
+MODULE_DESCRIPTION("ALSA SoC ACPI module");
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 2cb8d3b55fbc..d9b1e6417fb9 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -30,8 +30,10 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -44,7 +46,7 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
}
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->open) {
ret = platform->driver->compr_ops->open(cstream);
if (ret < 0) {
pr_err("compress asoc: can't open platform %s\n",
@@ -53,6 +55,27 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
}
}
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->open)
+ continue;
+
+ __ret = component->driver->compr_ops->open(cstream);
+ if (__ret < 0) {
+ pr_err("compress asoc: can't open platform %s\n",
+ component->name);
+ ret = __ret;
+ }
+ }
+ if (ret < 0)
+ goto machine_err;
+
if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->startup) {
ret = rtd->dai_link->compr_ops->startup(cstream);
if (ret < 0) {
@@ -68,7 +91,21 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
return 0;
machine_err:
- if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->free)
+ continue;
+
+ component->driver->compr_ops->free(cstream);
+ }
+
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
plat_err:
if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
@@ -84,11 +121,13 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
struct snd_pcm_substream *fe_substream =
fe->pcm->streams[cstream->direction].substream;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
struct snd_soc_dpcm *dpcm;
struct snd_soc_dapm_widget_list *list;
int stream;
- int ret = 0;
+ int ret = 0, __ret;
if (cstream->direction == SND_COMPRESS_PLAYBACK)
stream = SNDRV_PCM_STREAM_PLAYBACK;
@@ -107,7 +146,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->open) {
ret = platform->driver->compr_ops->open(cstream);
if (ret < 0) {
pr_err("compress asoc: can't open platform %s\n",
@@ -116,6 +155,27 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
}
}
+ for_each_rtdcom(fe, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->open)
+ continue;
+
+ __ret = component->driver->compr_ops->open(cstream);
+ if (__ret < 0) {
+ pr_err("compress asoc: can't open platform %s\n",
+ component->name);
+ ret = __ret;
+ }
+ }
+ if (ret < 0)
+ goto machine_err;
+
if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->startup) {
ret = fe->dai_link->compr_ops->startup(cstream);
if (ret < 0) {
@@ -167,7 +227,21 @@ fe_err:
if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
fe->dai_link->compr_ops->shutdown(cstream);
machine_err:
- if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+ for_each_rtdcom(fe, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->free)
+ continue;
+
+ component->driver->compr_ops->free(cstream);
+ }
+
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
plat_err:
if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
@@ -210,6 +284,8 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int stream;
@@ -235,7 +311,21 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->shutdown)
rtd->dai_link->compr_ops->shutdown(cstream);
- if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->free)
+ continue;
+
+ component->driver->compr_ops->free(cstream);
+ }
+
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
@@ -267,6 +357,8 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
struct snd_soc_dpcm *dpcm;
int stream, ret;
@@ -304,9 +396,23 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
fe->dai_link->compr_ops->shutdown(cstream);
- if (platform->driver->compr_ops && platform->driver->compr_ops->free)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->free)
platform->driver->compr_ops->free(cstream);
+ for_each_rtdcom(fe, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->free)
+ continue;
+
+ component->driver->compr_ops->free(cstream);
+ }
+
if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
@@ -319,18 +425,38 @@ static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
- if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
ret = platform->driver->compr_ops->trigger(cstream, cmd);
if (ret < 0)
goto out;
}
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->trigger)
+ continue;
+
+ __ret = component->driver->compr_ops->trigger(cstream, cmd);
+ if (__ret < 0)
+ ret = __ret;
+ }
+ if (ret < 0)
+ goto out;
+
if (cpu_dai->driver->cops && cpu_dai->driver->cops->trigger)
cpu_dai->driver->cops->trigger(cstream, cmd, cpu_dai);
@@ -353,16 +479,36 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
- int ret = 0, stream;
+ int ret = 0, __ret, stream;
if (cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN ||
cmd == SND_COMPR_TRIGGER_DRAIN) {
- if (platform->driver->compr_ops &&
+ if (platform &&
+ platform->driver->compr_ops &&
platform->driver->compr_ops->trigger)
return platform->driver->compr_ops->trigger(cstream,
cmd);
+
+ for_each_rtdcom(fe, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->trigger)
+ continue;
+
+ __ret = component->driver->compr_ops->trigger(cstream, cmd);
+ if (__ret < 0)
+ ret = __ret;
+ }
+ return ret;
}
if (cstream->direction == SND_COMPRESS_PLAYBACK)
@@ -379,12 +525,30 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
goto out;
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
ret = platform->driver->compr_ops->trigger(cstream, cmd);
if (ret < 0)
goto out;
}
+ for_each_rtdcom(fe, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->trigger)
+ continue;
+
+ __ret = component->driver->compr_ops->trigger(cstream, cmd);
+ if (__ret < 0)
+ ret = __ret;
+ }
+ if (ret < 0)
+ goto out;
+
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
ret = dpcm_be_dai_trigger(fe, stream, cmd);
@@ -415,8 +579,10 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -432,12 +598,30 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
goto err;
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
ret = platform->driver->compr_ops->set_params(cstream, params);
if (ret < 0)
goto err;
}
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->set_params)
+ continue;
+
+ __ret = component->driver->compr_ops->set_params(cstream, params);
+ if (__ret < 0)
+ ret = __ret;
+ }
+ if (ret < 0)
+ goto err;
+
if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) {
ret = rtd->dai_link->compr_ops->set_params(cstream);
if (ret < 0)
@@ -471,8 +655,10 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
struct snd_pcm_substream *fe_substream =
fe->pcm->streams[cstream->direction].substream;
struct snd_soc_platform *platform = fe->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
- int ret = 0, stream;
+ int ret = 0, __ret, stream;
if (cstream->direction == SND_COMPRESS_PLAYBACK)
stream = SNDRV_PCM_STREAM_PLAYBACK;
@@ -487,12 +673,30 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
goto out;
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
ret = platform->driver->compr_ops->set_params(cstream, params);
if (ret < 0)
goto out;
}
+ for_each_rtdcom(fe, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->set_params)
+ continue;
+
+ __ret = component->driver->compr_ops->set_params(cstream, params);
+ if (__ret < 0)
+ ret = __ret;
+ }
+ if (ret < 0)
+ goto out;
+
if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->set_params) {
ret = fe->dai_link->compr_ops->set_params(cstream);
if (ret < 0)
@@ -531,8 +735,10 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -542,8 +748,27 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
goto err;
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->get_params)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->get_params) {
ret = platform->driver->compr_ops->get_params(cstream, params);
+ if (ret < 0)
+ goto err;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->get_params)
+ continue;
+
+ __ret = component->driver->compr_ops->get_params(cstream, params);
+ if (__ret < 0)
+ ret = __ret;
+ }
err:
mutex_unlock(&rtd->pcm_mutex);
@@ -555,13 +780,35 @@ static int soc_compr_get_caps(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
- int ret = 0;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
- if (platform->driver->compr_ops && platform->driver->compr_ops->get_caps)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->get_caps) {
ret = platform->driver->compr_ops->get_caps(cstream, caps);
+ if (ret < 0)
+ goto err;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->get_caps)
+ continue;
+
+ __ret = component->driver->compr_ops->get_caps(cstream, caps);
+ if (__ret < 0)
+ ret = __ret;
+ }
+
+err:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -571,13 +818,35 @@ static int soc_compr_get_codec_caps(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
- int ret = 0;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
- if (platform->driver->compr_ops && platform->driver->compr_ops->get_codec_caps)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->get_codec_caps) {
ret = platform->driver->compr_ops->get_codec_caps(cstream, codec);
+ if (ret < 0)
+ goto err;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->get_codec_caps)
+ continue;
+
+ __ret = component->driver->compr_ops->get_codec_caps(cstream, codec);
+ if (__ret < 0)
+ ret = __ret;
+ }
+
+err:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -586,8 +855,10 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -597,8 +868,27 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
goto err;
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->ack)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->ack) {
ret = platform->driver->compr_ops->ack(cstream, bytes);
+ if (ret < 0)
+ goto err;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->ack)
+ continue;
+
+ __ret = component->driver->compr_ops->ack(cstream, bytes);
+ if (__ret < 0)
+ ret = __ret;
+ }
err:
mutex_unlock(&rtd->pcm_mutex);
@@ -610,7 +900,9 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
- int ret = 0;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ int ret = 0, __ret;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -618,9 +910,29 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
if (cpu_dai->driver->cops && cpu_dai->driver->cops->pointer)
cpu_dai->driver->cops->pointer(cstream, tstamp, cpu_dai);
- if (platform->driver->compr_ops && platform->driver->compr_ops->pointer)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->pointer) {
ret = platform->driver->compr_ops->pointer(cstream, tstamp);
+ if (ret < 0)
+ goto err;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->pointer)
+ continue;
+ __ret = component->driver->compr_ops->pointer(cstream, tstamp);
+ if (__ret < 0)
+ ret = __ret;
+ }
+
+err:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -630,13 +942,34 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
- int ret = 0;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ int ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
- if (platform->driver->compr_ops && platform->driver->compr_ops->copy)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->copy) {
ret = platform->driver->compr_ops->copy(cstream, buf, count);
+ if (ret < 0)
+ goto err;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->copy)
+ continue;
+ __ret = component->driver->compr_ops->copy(cstream, buf, count);
+ if (__ret < 0)
+ ret = __ret;
+ }
+err:
mutex_unlock(&rtd->pcm_mutex);
return ret;
}
@@ -646,8 +979,10 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, __ret;
if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_metadata) {
ret = cpu_dai->driver->cops->set_metadata(cstream, metadata, cpu_dai);
@@ -655,8 +990,27 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
return ret;
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->set_metadata)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->set_metadata) {
ret = platform->driver->compr_ops->set_metadata(cstream, metadata);
+ if (ret < 0)
+ return ret;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->set_metadata)
+ continue;
+
+ __ret = component->driver->compr_ops->set_metadata(cstream, metadata);
+ if (__ret < 0)
+ ret = __ret;
+ }
return ret;
}
@@ -666,8 +1020,10 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, __ret;
if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_metadata) {
ret = cpu_dai->driver->cops->get_metadata(cstream, metadata, cpu_dai);
@@ -675,8 +1031,27 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
return ret;
}
- if (platform->driver->compr_ops && platform->driver->compr_ops->get_metadata)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->get_metadata) {
ret = platform->driver->compr_ops->get_metadata(cstream, metadata);
+ if (ret < 0)
+ return ret;
+ }
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->get_metadata)
+ continue;
+
+ __ret = component->driver->compr_ops->get_metadata(cstream, metadata);
+ if (__ret < 0)
+ ret = __ret;
+ }
return ret;
}
@@ -723,6 +1098,8 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_compr *compr;
@@ -798,9 +1175,25 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
}
+
/* Add copy callback for not memory mapped DSPs */
- if (platform->driver->compr_ops && platform->driver->compr_ops->copy)
+ if (platform && platform->driver->compr_ops && platform->driver->compr_ops->copy)
+ compr->ops->copy = soc_compr_copy;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->compr_ops ||
+ !component->driver->compr_ops->copy)
+ continue;
+
compr->ops->copy = soc_compr_copy;
+ }
+
mutex_init(&compr->lock);
ret = snd_compress_new(rtd->card->snd_card, num, direction,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index fee4b0ef5566..c0edac80df34 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -614,6 +614,8 @@ struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(snd_soc_get_dai_substream);
+static const struct snd_soc_ops null_snd_soc_ops;
+
static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
struct snd_soc_card *card, struct snd_soc_dai_link *dai_link)
{
@@ -626,6 +628,9 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
INIT_LIST_HEAD(&rtd->component_list);
rtd->card = card;
rtd->dai_link = dai_link;
+ if (!rtd->dai_link->ops)
+ rtd->dai_link->ops = &null_snd_soc_ops;
+
rtd->codec_dais = kzalloc(sizeof(struct snd_soc_dai *) *
dai_link->num_codecs,
GFP_KERNEL);
@@ -639,8 +644,7 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
{
- if (rtd && rtd->codec_dais)
- kfree(rtd->codec_dais);
+ kfree(rtd->codec_dais);
snd_soc_rtdcom_del_all(rtd);
kfree(rtd);
}
@@ -2632,7 +2636,7 @@ EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls);
int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
- if (dai->driver && dai->driver->ops->set_sysclk)
+ if (dai->driver->ops->set_sysclk)
return dai->driver->ops->set_sysclk(dai, clk_id, freq, dir);
return snd_soc_component_set_sysclk(dai->component, clk_id, 0,
@@ -2700,7 +2704,7 @@ EXPORT_SYMBOL_GPL(snd_soc_component_set_sysclk);
int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai,
int div_id, int div)
{
- if (dai->driver && dai->driver->ops->set_clkdiv)
+ if (dai->driver->ops->set_clkdiv)
return dai->driver->ops->set_clkdiv(dai, div_id, div);
else
return -EINVAL;
@@ -2720,7 +2724,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv);
int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
unsigned int freq_in, unsigned int freq_out)
{
- if (dai->driver && dai->driver->ops->set_pll)
+ if (dai->driver->ops->set_pll)
return dai->driver->ops->set_pll(dai, pll_id, source,
freq_in, freq_out);
@@ -2786,7 +2790,7 @@ EXPORT_SYMBOL_GPL(snd_soc_component_set_pll);
*/
int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
{
- if (dai->driver && dai->driver->ops->set_bclk_ratio)
+ if (dai->driver->ops->set_bclk_ratio)
return dai->driver->ops->set_bclk_ratio(dai, ratio);
else
return -EINVAL;
@@ -2796,7 +2800,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
/**
* snd_soc_dai_set_fmt - configure DAI hardware audio format.
* @dai: DAI
- * @fmt: SND_SOC_DAIFMT_ format value.
+ * @fmt: SND_SOC_DAIFMT_* format value.
*
* Configures the DAI hardware format and clocking.
*/
@@ -2860,7 +2864,7 @@ static int snd_soc_xlate_tdm_slot_mask(unsigned int slots,
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
{
- if (dai->driver && dai->driver->ops->xlate_tdm_slot_mask)
+ if (dai->driver->ops->xlate_tdm_slot_mask)
dai->driver->ops->xlate_tdm_slot_mask(slots,
&tx_mask, &rx_mask);
else
@@ -2869,7 +2873,7 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
dai->tx_mask = tx_mask;
dai->rx_mask = rx_mask;
- if (dai->driver && dai->driver->ops->set_tdm_slot)
+ if (dai->driver->ops->set_tdm_slot)
return dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask,
slots, slot_width);
else
@@ -2893,7 +2897,7 @@ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
{
- if (dai->driver && dai->driver->ops->set_channel_map)
+ if (dai->driver->ops->set_channel_map)
return dai->driver->ops->set_channel_map(dai, tx_num, tx_slot,
rx_num, rx_slot);
else
@@ -2910,7 +2914,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map);
*/
int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate)
{
- if (dai->driver && dai->driver->ops->set_tristate)
+ if (dai->driver->ops->set_tristate)
return dai->driver->ops->set_tristate(dai, tristate);
else
return -EINVAL;
@@ -3250,6 +3254,30 @@ static int snd_soc_component_stream_event(struct snd_soc_dapm_context *dapm,
return component->driver->stream_event(component, event);
}
+static int snd_soc_component_drv_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ if (component->driver->pcm_new)
+ return component->driver->pcm_new(rtd);
+
+ return 0;
+}
+
+static void snd_soc_component_drv_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
+{
+ if (component->driver->pcm_free)
+ component->driver->pcm_free(pcm);
+}
+
+static int snd_soc_component_set_bias_level(struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ struct snd_soc_component *component = dapm->component;
+
+ return component->driver->set_bias_level(component, level);
+}
+
static int snd_soc_component_initialize(struct snd_soc_component *component,
const struct snd_soc_component_driver *driver, struct device *dev)
{
@@ -3270,16 +3298,21 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
component->set_sysclk = component->driver->set_sysclk;
component->set_pll = component->driver->set_pll;
component->set_jack = component->driver->set_jack;
+ component->pcm_new = snd_soc_component_drv_pcm_new;
+ component->pcm_free = snd_soc_component_drv_pcm_free;
dapm = snd_soc_component_get_dapm(component);
dapm->dev = dev;
dapm->component = component;
dapm->bias_level = SND_SOC_BIAS_OFF;
- dapm->idle_bias_off = true;
+ dapm->idle_bias_off = !driver->idle_bias_on;
+ dapm->suspend_bias_off = driver->suspend_bias_off;
if (driver->seq_notifier)
dapm->seq_notifier = snd_soc_component_seq_notifier;
if (driver->stream_event)
dapm->stream_event = snd_soc_component_stream_event;
+ if (driver->set_bias_level)
+ dapm->set_bias_level = snd_soc_component_set_bias_level;
INIT_LIST_HEAD(&component->dai_list);
mutex_init(&component->io_mutex);
@@ -3371,19 +3404,49 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
list_del(&component->list);
}
-int snd_soc_register_component(struct device *dev,
- const struct snd_soc_component_driver *component_driver,
- struct snd_soc_dai_driver *dai_drv,
- int num_dai)
+#define ENDIANNESS_MAP(name) \
+ (SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE)
+static u64 endianness_format_map[] = {
+ ENDIANNESS_MAP(S16_),
+ ENDIANNESS_MAP(U16_),
+ ENDIANNESS_MAP(S24_),
+ ENDIANNESS_MAP(U24_),
+ ENDIANNESS_MAP(S32_),
+ ENDIANNESS_MAP(U32_),
+ ENDIANNESS_MAP(S24_3),
+ ENDIANNESS_MAP(U24_3),
+ ENDIANNESS_MAP(S20_3),
+ ENDIANNESS_MAP(U20_3),
+ ENDIANNESS_MAP(S18_3),
+ ENDIANNESS_MAP(U18_3),
+ ENDIANNESS_MAP(FLOAT_),
+ ENDIANNESS_MAP(FLOAT64_),
+ ENDIANNESS_MAP(IEC958_SUBFRAME_),
+};
+
+/*
+ * Fix up the DAI formats for endianness: codecs don't actually see
+ * the endianness of the data but we're using the CPU format
+ * definitions which do need to include endianness so we ensure that
+ * codec DAIs always have both big and little endian variants set.
+ */
+static void convert_endianness_formats(struct snd_soc_pcm_stream *stream)
{
- struct snd_soc_component *component;
- int ret;
+ int i;
- component = kzalloc(sizeof(*component), GFP_KERNEL);
- if (!component) {
- dev_err(dev, "ASoC: Failed to allocate memory\n");
- return -ENOMEM;
- }
+ for (i = 0; i < ARRAY_SIZE(endianness_format_map); i++)
+ if (stream->formats & endianness_format_map[i])
+ stream->formats |= endianness_format_map[i];
+}
+
+int snd_soc_add_component(struct device *dev,
+ struct snd_soc_component *component,
+ const struct snd_soc_component_driver *component_driver,
+ struct snd_soc_dai_driver *dai_drv,
+ int num_dai)
+{
+ int ret;
+ int i;
ret = snd_soc_component_initialize(component, component_driver, dev);
if (ret)
@@ -3392,7 +3455,15 @@ int snd_soc_register_component(struct device *dev,
component->ignore_pmdown_time = true;
component->registered_as_component = true;
- ret = snd_soc_register_dais(component, dai_drv, num_dai, true);
+ if (component_driver->endianness) {
+ for (i = 0; i < num_dai; i++) {
+ convert_endianness_formats(&dai_drv[i].playback);
+ convert_endianness_formats(&dai_drv[i].capture);
+ }
+ }
+
+ ret = snd_soc_register_dais(component, dai_drv, num_dai,
+ !component_driver->non_legacy_dai_naming);
if (ret < 0) {
dev_err(dev, "ASoC: Failed to register DAIs: %d\n", ret);
goto err_cleanup;
@@ -3408,6 +3479,22 @@ err_free:
kfree(component);
return ret;
}
+EXPORT_SYMBOL_GPL(snd_soc_add_component);
+
+int snd_soc_register_component(struct device *dev,
+ const struct snd_soc_component_driver *component_driver,
+ struct snd_soc_dai_driver *dai_drv,
+ int num_dai)
+{
+ struct snd_soc_component *component;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ return snd_soc_add_component(dev, component, component_driver,
+ dai_drv, num_dai);
+}
EXPORT_SYMBOL_GPL(snd_soc_register_component);
/**
@@ -3448,6 +3535,32 @@ void snd_soc_unregister_component(struct device *dev)
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
+struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
+ const char *driver_name)
+{
+ struct snd_soc_component *component;
+ struct snd_soc_component *ret;
+
+ ret = NULL;
+ mutex_lock(&client_mutex);
+ list_for_each_entry(component, &component_list, list) {
+ if (dev != component->dev)
+ continue;
+
+ if (driver_name &&
+ (driver_name != component->driver->name) &&
+ (strcmp(component->driver->name, driver_name) != 0))
+ continue;
+
+ ret = component;
+ break;
+ }
+ mutex_unlock(&client_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_lookup_component);
+
static int snd_soc_platform_drv_probe(struct snd_soc_component *component)
{
struct snd_soc_platform *platform = snd_soc_component_to_platform(component);
@@ -3462,6 +3575,26 @@ static void snd_soc_platform_drv_remove(struct snd_soc_component *component)
platform->driver->remove(platform);
}
+static int snd_soc_platform_drv_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_platform *platform = snd_soc_component_to_platform(component);
+
+ if (platform->driver->pcm_new)
+ return platform->driver->pcm_new(rtd);
+
+ return 0;
+}
+
+static void snd_soc_platform_drv_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
+{
+ struct snd_soc_platform *platform = snd_soc_component_to_platform(component);
+
+ if (platform->driver->pcm_free)
+ platform->driver->pcm_free(pcm);
+}
+
/**
* snd_soc_add_platform - Add a platform to the ASoC core
* @dev: The parent device for the platform
@@ -3485,6 +3618,10 @@ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
platform->component.probe = snd_soc_platform_drv_probe;
if (platform_drv->remove)
platform->component.remove = snd_soc_platform_drv_remove;
+ if (platform_drv->pcm_new)
+ platform->component.pcm_new = snd_soc_platform_drv_pcm_new;
+ if (platform_drv->pcm_free)
+ platform->component.pcm_free = snd_soc_platform_drv_pcm_free;
#ifdef CONFIG_DEBUG_FS
platform->component.debugfs_prefix = "platform";
@@ -3582,39 +3719,6 @@ void snd_soc_unregister_platform(struct device *dev)
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_platform);
-static u64 codec_format_map[] = {
- SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE,
- SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE,
- SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE,
- SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE,
- SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE,
- SNDRV_PCM_FMTBIT_U32_LE | SNDRV_PCM_FMTBIT_U32_BE,
- SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_U24_3BE,
- SNDRV_PCM_FMTBIT_U24_3LE | SNDRV_PCM_FMTBIT_U24_3BE,
- SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE,
- SNDRV_PCM_FMTBIT_U20_3LE | SNDRV_PCM_FMTBIT_U20_3BE,
- SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE,
- SNDRV_PCM_FMTBIT_U18_3LE | SNDRV_PCM_FMTBIT_U18_3BE,
- SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE,
- SNDRV_PCM_FMTBIT_FLOAT64_LE | SNDRV_PCM_FMTBIT_FLOAT64_BE,
- SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
- | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE,
-};
-
-/* Fix up the DAI formats for endianness: codecs don't actually see
- * the endianness of the data but we're using the CPU format
- * definitions which do need to include endianness so we ensure that
- * codec DAIs always have both big and little endian variants set.
- */
-static void fixup_codec_formats(struct snd_soc_pcm_stream *stream)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(codec_format_map); i++)
- if (stream->formats & codec_format_map[i])
- stream->formats |= codec_format_map[i];
-}
-
static int snd_soc_codec_drv_probe(struct snd_soc_component *component)
{
struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
@@ -3765,8 +3869,8 @@ int snd_soc_register_codec(struct device *dev,
codec->component.regmap = codec_drv->get_regmap(dev);
for (i = 0; i < num_dai; i++) {
- fixup_codec_formats(&dai_drv[i].playback);
- fixup_codec_formats(&dai_drv[i].capture);
+ convert_endianness_formats(&dai_drv[i].playback);
+ convert_endianness_formats(&dai_drv[i].capture);
}
ret = snd_soc_register_dais(&codec->component, dai_drv, num_dai, false);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index dcef67a9bd48..a10b21cfc31e 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2884,7 +2884,7 @@ int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
{
int i, r, ret = 0;
- mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
for (i = 0; i < num; i++) {
r = snd_soc_dapm_add_route(dapm, route);
if (r < 0) {
@@ -2915,7 +2915,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
{
int i;
- mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
for (i = 0; i < num; i++) {
snd_soc_dapm_del_route(dapm, route);
route++;
@@ -3681,7 +3681,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
substream.stream = SNDRV_PCM_STREAM_CAPTURE;
- if (source->driver->ops && source->driver->ops->startup) {
+ if (source->driver->ops->startup) {
ret = source->driver->ops->startup(&substream, source);
if (ret < 0) {
dev_err(source->dev,
@@ -3695,7 +3695,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
goto out;
substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
- if (sink->driver->ops && sink->driver->ops->startup) {
+ if (sink->driver->ops->startup) {
ret = sink->driver->ops->startup(&substream, sink);
if (ret < 0) {
dev_err(sink->dev,
@@ -3725,13 +3725,13 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
ret = 0;
source->active--;
- if (source->driver->ops && source->driver->ops->shutdown) {
+ if (source->driver->ops->shutdown) {
substream.stream = SNDRV_PCM_STREAM_CAPTURE;
source->driver->ops->shutdown(&substream, source);
}
sink->active--;
- if (sink->driver->ops && sink->driver->ops->shutdown) {
+ if (sink->driver->ops->shutdown) {
substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
sink->driver->ops->shutdown(&substream, sink);
}
@@ -3778,18 +3778,27 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
return 0;
}
-int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
- const struct snd_soc_pcm_stream *params,
- unsigned int num_params,
- struct snd_soc_dapm_widget *source,
- struct snd_soc_dapm_widget *sink)
+static void
+snd_soc_dapm_free_kcontrol(struct snd_soc_card *card,
+ unsigned long *private_value,
+ int num_params,
+ const char **w_param_text)
+{
+ int count;
+
+ devm_kfree(card->dev, (void *)*private_value);
+ for (count = 0 ; count < num_params; count++)
+ devm_kfree(card->dev, (void *)w_param_text[count]);
+ devm_kfree(card->dev, w_param_text);
+}
+
+static struct snd_kcontrol_new *
+snd_soc_dapm_alloc_kcontrol(struct snd_soc_card *card,
+ char *link_name,
+ const struct snd_soc_pcm_stream *params,
+ int num_params, const char **w_param_text,
+ unsigned long *private_value)
{
- struct snd_soc_dapm_widget template;
- struct snd_soc_dapm_widget *w;
- char *link_name;
- int ret, count;
- unsigned long private_value;
- const char **w_param_text;
struct soc_enum w_param_enum[] = {
SOC_ENUM_SINGLE(0, 0, 0, NULL),
};
@@ -3798,19 +3807,9 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
snd_soc_dapm_dai_link_get,
snd_soc_dapm_dai_link_put),
};
+ struct snd_kcontrol_new *kcontrol_news;
const struct snd_soc_pcm_stream *config = params;
-
- w_param_text = devm_kcalloc(card->dev, num_params,
- sizeof(char *), GFP_KERNEL);
- if (!w_param_text)
- return -ENOMEM;
-
- link_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-%s",
- source->name, sink->name);
- if (!link_name) {
- ret = -ENOMEM;
- goto outfree_w_param;
- }
+ int count;
for (count = 0 ; count < num_params; count++) {
if (!config->stream_name) {
@@ -3821,57 +3820,94 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
devm_kasprintf(card->dev, GFP_KERNEL,
"Anonymous Configuration %d",
count);
- if (!w_param_text[count]) {
- ret = -ENOMEM;
- goto outfree_link_name;
- }
} else {
w_param_text[count] = devm_kmemdup(card->dev,
config->stream_name,
strlen(config->stream_name) + 1,
GFP_KERNEL);
- if (!w_param_text[count]) {
- ret = -ENOMEM;
- goto outfree_link_name;
- }
}
+ if (!w_param_text[count])
+ goto outfree_w_param;
config++;
}
+
w_param_enum[0].items = num_params;
w_param_enum[0].texts = w_param_text;
- memset(&template, 0, sizeof(template));
- template.reg = SND_SOC_NOPM;
- template.id = snd_soc_dapm_dai_link;
- template.name = link_name;
- template.event = snd_soc_dai_link_event;
- template.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD;
- template.num_kcontrols = 1;
- /* duplicate w_param_enum on heap so that memory persists */
- private_value =
+ *private_value =
(unsigned long) devm_kmemdup(card->dev,
(void *)(kcontrol_dai_link[0].private_value),
sizeof(struct soc_enum), GFP_KERNEL);
- if (!private_value) {
+ if (!*private_value) {
dev_err(card->dev, "ASoC: Failed to create control for %s widget\n",
link_name);
- ret = -ENOMEM;
- goto outfree_link_name;
+ goto outfree_w_param;
}
- kcontrol_dai_link[0].private_value = private_value;
+ kcontrol_dai_link[0].private_value = *private_value;
/* duplicate kcontrol_dai_link on heap so that memory persists */
- template.kcontrol_news =
- devm_kmemdup(card->dev, &kcontrol_dai_link[0],
+ kcontrol_news = devm_kmemdup(card->dev, &kcontrol_dai_link[0],
sizeof(struct snd_kcontrol_new),
GFP_KERNEL);
- if (!template.kcontrol_news) {
+ if (!kcontrol_news) {
dev_err(card->dev, "ASoC: Failed to create control for %s widget\n",
link_name);
- ret = -ENOMEM;
- goto outfree_private_value;
+ goto outfree_w_param;
}
+ return kcontrol_news;
+
+outfree_w_param:
+ snd_soc_dapm_free_kcontrol(card, private_value, num_params, w_param_text);
+ return NULL;
+}
+
+int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+ const struct snd_soc_pcm_stream *params,
+ unsigned int num_params,
+ struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_dapm_widget template;
+ struct snd_soc_dapm_widget *w;
+ const char **w_param_text;
+ unsigned long private_value;
+ char *link_name;
+ int ret;
+
+ link_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-%s",
+ source->name, sink->name);
+ if (!link_name)
+ return -ENOMEM;
+
+ memset(&template, 0, sizeof(template));
+ template.reg = SND_SOC_NOPM;
+ template.id = snd_soc_dapm_dai_link;
+ template.name = link_name;
+ template.event = snd_soc_dai_link_event;
+ template.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD;
+ template.kcontrol_news = NULL;
+
+ /* allocate memory for control, only in case of multiple configs */
+ if (num_params > 1) {
+ w_param_text = devm_kcalloc(card->dev, num_params,
+ sizeof(char *), GFP_KERNEL);
+ if (!w_param_text) {
+ ret = -ENOMEM;
+ goto param_fail;
+ }
+ template.num_kcontrols = 1;
+ template.kcontrol_news =
+ snd_soc_dapm_alloc_kcontrol(card,
+ link_name, params, num_params,
+ w_param_text, &private_value);
+ if (!template.kcontrol_news) {
+ ret = -ENOMEM;
+ goto param_fail;
+ }
+ } else {
+ w_param_text = NULL;
+ }
dev_dbg(card->dev, "ASoC: adding %s widget\n", link_name);
w = snd_soc_dapm_new_control_unlocked(&card->dapm, &template);
@@ -3903,15 +3939,9 @@ outfree_w:
devm_kfree(card->dev, w);
outfree_kcontrol_news:
devm_kfree(card->dev, (void *)template.kcontrol_news);
-outfree_private_value:
- devm_kfree(card->dev, (void *)private_value);
-outfree_link_name:
+ snd_soc_dapm_free_kcontrol(card, &private_value, num_params, w_param_text);
+param_fail:
devm_kfree(card->dev, link_name);
-outfree_w_param:
- for (count = 0 ; count < num_params; count++)
- devm_kfree(card->dev, (void *)w_param_text[count]);
- devm_kfree(card->dev, w_param_text);
-
return ret;
}
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 9b3939049cef..20340ade20a7 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -41,6 +41,20 @@ int snd_soc_component_read(struct snd_soc_component *component,
}
EXPORT_SYMBOL_GPL(snd_soc_component_read);
+unsigned int snd_soc_component_read32(struct snd_soc_component *component,
+ unsigned int reg)
+{
+ unsigned int val;
+ int ret;
+
+ ret = snd_soc_component_read(component, reg, &val);
+ if (ret < 0)
+ return -1;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_read32);
+
/**
* snd_soc_component_write() - Write register value
* @component: Component to write to
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 94b88b897c3b..8075856668c2 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -133,16 +133,25 @@ void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream)
*/
bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd)
{
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
int i;
bool ignore = true;
if (!rtd->pmdown_time || rtd->dai_link->ignore_pmdown_time)
return true;
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ ignore &= !component->driver->pmdown_time;
+ }
+
+ /* this will be removed */
for (i = 0; i < rtd->num_codecs; i++)
ignore &= rtd->codec_dais[i]->component->ignore_pmdown_time;
- return rtd->cpu_dai->component->ignore_pmdown_time && ignore;
+ return ignore;
}
/**
@@ -459,7 +468,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai;
const char *codec_dai_name = "multicodec";
- int i, ret = 0;
+ int i, ret = 0, __ret;
pinctrl_pm_select_default_state(cpu_dai->dev);
for (i = 0; i < rtd->num_codecs; i++)
@@ -474,7 +483,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
/* startup the audio subsystem */
- if (cpu_dai->driver->ops && cpu_dai->driver->ops->startup) {
+ if (cpu_dai->driver->ops->startup) {
ret = cpu_dai->driver->ops->startup(substream, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "ASoC: can't open interface"
@@ -483,7 +492,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
}
}
- if (platform->driver->ops && platform->driver->ops->open) {
+ if (platform && platform->driver->ops && platform->driver->ops->open) {
ret = platform->driver->ops->open(substream);
if (ret < 0) {
dev_err(platform->dev, "ASoC: can't open platform"
@@ -492,9 +501,32 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
}
}
+ ret = 0;
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->open)
+ continue;
+
+ __ret = component->driver->ops->open(substream);
+ if (__ret < 0) {
+ dev_err(component->dev,
+ "ASoC: can't open component %s: %d\n",
+ component->name, ret);
+ ret = __ret;
+ }
+ }
+ if (ret < 0)
+ goto component_err;
+
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->ops && codec_dai->driver->ops->startup) {
+ if (codec_dai->driver->ops->startup) {
ret = codec_dai->driver->ops->startup(substream,
codec_dai);
if (ret < 0) {
@@ -511,7 +543,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
codec_dai->rx_mask = 0;
}
- if (rtd->dai_link->ops && rtd->dai_link->ops->startup) {
+ if (rtd->dai_link->ops->startup) {
ret = rtd->dai_link->ops->startup(substream);
if (ret < 0) {
pr_err("ASoC: %s startup failed: %d\n",
@@ -585,7 +617,7 @@ dynamic:
return 0;
config_err:
- if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown)
+ if (rtd->dai_link->ops->shutdown)
rtd->dai_link->ops->shutdown(substream);
machine_err:
@@ -598,7 +630,22 @@ codec_dai_err:
codec_dai->driver->ops->shutdown(substream, codec_dai);
}
- if (platform->driver->ops && platform->driver->ops->close)
+component_err:
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->close)
+ continue;
+
+ component->driver->ops->close(substream);
+ }
+
+ if (platform && platform->driver->ops && platform->driver->ops->close)
platform->driver->ops->close(substream);
platform_err:
@@ -692,12 +739,26 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
codec_dai->driver->ops->shutdown(substream, codec_dai);
}
- if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown)
+ if (rtd->dai_link->ops->shutdown)
rtd->dai_link->ops->shutdown(substream);
- if (platform->driver->ops && platform->driver->ops->close)
+ if (platform && platform->driver->ops && platform->driver->ops->close)
platform->driver->ops->close(substream);
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->close)
+ continue;
+
+ component->driver->ops->close(substream);
+ }
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
/* powered down playback stream now */
@@ -745,13 +806,15 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai;
int i, ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
- if (rtd->dai_link->ops && rtd->dai_link->ops->prepare) {
+ if (rtd->dai_link->ops->prepare) {
ret = rtd->dai_link->ops->prepare(substream);
if (ret < 0) {
dev_err(rtd->card->dev, "ASoC: machine prepare error:"
@@ -760,7 +823,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- if (platform->driver->ops && platform->driver->ops->prepare) {
+ if (platform && platform->driver->ops && platform->driver->ops->prepare) {
ret = platform->driver->ops->prepare(substream);
if (ret < 0) {
dev_err(platform->dev, "ASoC: platform prepare error:"
@@ -769,9 +832,28 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->prepare)
+ continue;
+
+ ret = component->driver->ops->prepare(substream);
+ if (ret < 0) {
+ dev_err(component->dev,
+ "ASoC: platform prepare error: %d\n", ret);
+ goto out;
+ }
+ }
+
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->ops && codec_dai->driver->ops->prepare) {
+ if (codec_dai->driver->ops->prepare) {
ret = codec_dai->driver->ops->prepare(substream,
codec_dai);
if (ret < 0) {
@@ -783,7 +865,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- if (cpu_dai->driver->ops && cpu_dai->driver->ops->prepare) {
+ if (cpu_dai->driver->ops->prepare) {
ret = cpu_dai->driver->ops->prepare(substream, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev,
@@ -829,7 +911,7 @@ int soc_dai_hw_params(struct snd_pcm_substream *substream,
{
int ret;
- if (dai->driver->ops && dai->driver->ops->hw_params) {
+ if (dai->driver->ops->hw_params) {
ret = dai->driver->ops->hw_params(substream, params, dai);
if (ret < 0) {
dev_err(dai->dev, "ASoC: can't set %s hw params: %d\n",
@@ -851,16 +933,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int i, ret = 0;
+ int i, ret = 0, __ret;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
-
- ret = soc_pcm_params_symmetry(substream, params);
- if (ret)
- goto out;
-
- if (rtd->dai_link->ops && rtd->dai_link->ops->hw_params) {
+ if (rtd->dai_link->ops->hw_params) {
ret = rtd->dai_link->ops->hw_params(substream, params);
if (ret < 0) {
dev_err(rtd->card->dev, "ASoC: machine hw_params"
@@ -915,7 +994,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
goto interface_err;
- if (platform->driver->ops && platform->driver->ops->hw_params) {
+ if (platform && platform->driver->ops && platform->driver->ops->hw_params) {
ret = platform->driver->ops->hw_params(substream, params);
if (ret < 0) {
dev_err(platform->dev, "ASoC: %s hw params failed: %d\n",
@@ -924,18 +1003,62 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
}
}
+ ret = 0;
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->hw_params)
+ continue;
+
+ __ret = component->driver->ops->hw_params(substream, params);
+ if (__ret < 0) {
+ dev_err(component->dev,
+ "ASoC: %s hw params failed: %d\n",
+ component->name, ret);
+ ret = __ret;
+ }
+ }
+ if (ret < 0)
+ goto component_err;
+
/* store the parameters for each DAIs */
cpu_dai->rate = params_rate(params);
cpu_dai->channels = params_channels(params);
cpu_dai->sample_bits =
snd_pcm_format_physical_width(params_format(params));
+ ret = soc_pcm_params_symmetry(substream, params);
+ if (ret)
+ goto component_err;
out:
mutex_unlock(&rtd->pcm_mutex);
return ret;
+component_err:
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->hw_free)
+ continue;
+
+ component->driver->ops->hw_free(substream);
+ }
+
+ if (platform && platform->driver->ops && platform->driver->ops->hw_free)
+ platform->driver->ops->hw_free(substream);
+
platform_err:
- if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free)
+ if (cpu_dai->driver->ops->hw_free)
cpu_dai->driver->ops->hw_free(substream, cpu_dai);
interface_err:
@@ -944,12 +1067,12 @@ interface_err:
codec_err:
while (--i >= 0) {
struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->ops && codec_dai->driver->ops->hw_free)
+ if (codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
codec_dai->rate = 0;
}
- if (rtd->dai_link->ops && rtd->dai_link->ops->hw_free)
+ if (rtd->dai_link->ops->hw_free)
rtd->dai_link->ops->hw_free(substream);
mutex_unlock(&rtd->pcm_mutex);
@@ -963,6 +1086,8 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai;
bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
@@ -995,21 +1120,36 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
}
/* free any machine hw params */
- if (rtd->dai_link->ops && rtd->dai_link->ops->hw_free)
+ if (rtd->dai_link->ops->hw_free)
rtd->dai_link->ops->hw_free(substream);
/* free any DMA resources */
- if (platform->driver->ops && platform->driver->ops->hw_free)
+ if (platform && platform->driver->ops && platform->driver->ops->hw_free)
platform->driver->ops->hw_free(substream);
+ /* free any component resources */
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->hw_free)
+ continue;
+
+ component->driver->ops->hw_free(substream);
+ }
+
/* now free hw params for the DAIs */
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->ops && codec_dai->driver->ops->hw_free)
+ if (codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
}
- if (cpu_dai->driver->ops && cpu_dai->driver->ops->hw_free)
+ if (cpu_dai->driver->ops->hw_free)
cpu_dai->driver->ops->hw_free(substream, cpu_dai);
mutex_unlock(&rtd->pcm_mutex);
@@ -1020,13 +1160,15 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai;
int i, ret;
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->ops && codec_dai->driver->ops->trigger) {
+ if (codec_dai->driver->ops->trigger) {
ret = codec_dai->driver->ops->trigger(substream,
cmd, codec_dai);
if (ret < 0)
@@ -1034,19 +1176,35 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
}
}
- if (platform->driver->ops && platform->driver->ops->trigger) {
+ if (platform && platform->driver->ops && platform->driver->ops->trigger) {
ret = platform->driver->ops->trigger(substream, cmd);
if (ret < 0)
return ret;
}
- if (cpu_dai->driver->ops && cpu_dai->driver->ops->trigger) {
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->trigger)
+ continue;
+
+ ret = component->driver->ops->trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (cpu_dai->driver->ops->trigger) {
ret = cpu_dai->driver->ops->trigger(substream, cmd, cpu_dai);
if (ret < 0)
return ret;
}
- if (rtd->dai_link->ops && rtd->dai_link->ops->trigger) {
+ if (rtd->dai_link->ops->trigger) {
ret = rtd->dai_link->ops->trigger(substream, cmd);
if (ret < 0)
return ret;
@@ -1065,8 +1223,7 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->ops &&
- codec_dai->driver->ops->bespoke_trigger) {
+ if (codec_dai->driver->ops->bespoke_trigger) {
ret = codec_dai->driver->ops->bespoke_trigger(substream,
cmd, codec_dai);
if (ret < 0)
@@ -1074,7 +1231,7 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
}
}
- if (cpu_dai->driver->ops && cpu_dai->driver->ops->bespoke_trigger) {
+ if (cpu_dai->driver->ops->bespoke_trigger) {
ret = cpu_dai->driver->ops->bespoke_trigger(substream, cmd, cpu_dai);
if (ret < 0)
return ret;
@@ -1090,6 +1247,8 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -1098,15 +1257,31 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
snd_pcm_sframes_t codec_delay = 0;
int i;
- if (platform->driver->ops && platform->driver->ops->pointer)
+ if (platform && platform->driver->ops && platform->driver->ops->pointer)
offset = platform->driver->ops->pointer(substream);
- if (cpu_dai->driver->ops && cpu_dai->driver->ops->delay)
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->pointer)
+ continue;
+
+ /* FIXME: use 1st pointer */
+ offset = component->driver->ops->pointer(substream);
+ break;
+ }
+
+ if (cpu_dai->driver->ops->delay)
delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
for (i = 0; i < rtd->num_codecs; i++) {
codec_dai = rtd->codec_dais[i];
- if (codec_dai->driver->ops && codec_dai->driver->ops->delay)
+ if (codec_dai->driver->ops->delay)
codec_delay = max(codec_delay,
codec_dai->driver->ops->delay(substream,
codec_dai));
@@ -2285,9 +2460,27 @@ static int soc_pcm_ioctl(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
- if (platform->driver->ops && platform->driver->ops->ioctl)
+ if (platform && platform->driver->ops && platform->driver->ops->ioctl)
return platform->driver->ops->ioctl(substream, cmd, arg);
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ /* ignore duplication for now */
+ if (platform && (component == &platform->component))
+ continue;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->ioctl)
+ continue;
+
+ /* FIXME: use 1st ioctl */
+ return component->driver->ops->ioctl(substream, cmd, arg);
+ }
+
return snd_pcm_lib_ioctl(substream, cmd, arg);
}
@@ -2632,12 +2825,163 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
return ret;
}
+static void soc_pcm_private_free(struct snd_pcm *pcm)
+{
+ struct snd_soc_pcm_runtime *rtd = pcm->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ /* need to sync the delayed work before releasing resources */
+
+ flush_delayed_work(&rtd->delayed_work);
+ component = rtdcom->component;
+
+ if (component->pcm_free)
+ component->pcm_free(component, pcm);
+ }
+}
+
+static int soc_rtdcom_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->ack)
+ continue;
+
+ /* FIXME. it returns 1st ask now */
+ return component->driver->ops->ack(substream);
+ }
+
+ return -EINVAL;
+}
+
+static int soc_rtdcom_copy_user(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, void __user *buf,
+ unsigned long bytes)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->copy_user)
+ continue;
+
+ /* FIXME. it returns 1st copy now */
+ return component->driver->ops->copy_user(substream, channel,
+ pos, buf, bytes);
+ }
+
+ return -EINVAL;
+}
+
+static int soc_rtdcom_copy_kernel(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, void *buf, unsigned long bytes)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->copy_kernel)
+ continue;
+
+ /* FIXME. it returns 1st copy now */
+ return component->driver->ops->copy_kernel(substream, channel,
+ pos, buf, bytes);
+ }
+
+ return -EINVAL;
+}
+
+static int soc_rtdcom_fill_silence(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, unsigned long bytes)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->fill_silence)
+ continue;
+
+ /* FIXME. it returns 1st silence now */
+ return component->driver->ops->fill_silence(substream, channel,
+ pos, bytes);
+ }
+
+ return -EINVAL;
+}
+
+static struct page *soc_rtdcom_page(struct snd_pcm_substream *substream,
+ unsigned long offset)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+ struct page *page;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->page)
+ continue;
+
+ /* FIXME. it returns 1st page now */
+ page = component->driver->ops->page(substream, offset);
+ if (page)
+ return page;
+ }
+
+ return NULL;
+}
+
+static int soc_rtdcom_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
+
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->driver->ops ||
+ !component->driver->ops->mmap)
+ continue;
+
+ /* FIXME. it returns 1st mmap now */
+ return component->driver->ops->mmap(substream, vma);
+ }
+
+ return -EINVAL;
+}
+
/* create a new pcm */
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
struct snd_pcm *pcm;
char new_name[64];
int ret = 0, playback = 0, capture = 0;
@@ -2732,7 +3076,28 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
rtd->ops.ioctl = soc_pcm_ioctl;
}
- if (platform->driver->ops) {
+ for_each_rtdcom(rtd, rtdcom) {
+ const struct snd_pcm_ops *ops = rtdcom->component->driver->ops;
+
+ if (!ops)
+ continue;
+
+ if (ops->ack)
+ rtd->ops.ack = soc_rtdcom_ack;
+ if (ops->copy_user)
+ rtd->ops.copy_user = soc_rtdcom_copy_user;
+ if (ops->copy_kernel)
+ rtd->ops.copy_kernel = soc_rtdcom_copy_kernel;
+ if (ops->fill_silence)
+ rtd->ops.fill_silence = soc_rtdcom_fill_silence;
+ if (ops->page)
+ rtd->ops.page = soc_rtdcom_page;
+ if (ops->mmap)
+ rtd->ops.mmap = soc_rtdcom_mmap;
+ }
+
+ /* overwrite */
+ if (platform && platform->driver->ops) {
rtd->ops.ack = platform->driver->ops->ack;
rtd->ops.copy_user = platform->driver->ops->copy_user;
rtd->ops.copy_kernel = platform->driver->ops->copy_kernel;
@@ -2747,17 +3112,22 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
if (capture)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
- if (platform->driver->pcm_new) {
- ret = platform->driver->pcm_new(rtd);
+ for_each_rtdcom(rtd, rtdcom) {
+ component = rtdcom->component;
+
+ if (!component->pcm_new)
+ continue;
+
+ ret = component->pcm_new(component, rtd);
if (ret < 0) {
- dev_err(platform->dev,
+ dev_err(component->dev,
"ASoC: pcm constructor failed: %d\n",
ret);
return ret;
}
}
- pcm->private_free = platform->driver->pcm_free;
+ pcm->private_free = soc_pcm_private_free;
out:
dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
(rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index 1258bef4dcb3..d6f71a3406e9 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -16,6 +16,7 @@
* details.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -27,6 +28,16 @@
#include "stm32_sai.h"
+static LIST_HEAD(sync_providers);
+static DEFINE_MUTEX(sync_mutex);
+
+struct sync_provider {
+ struct list_head link;
+ struct device_node *node;
+ int (*sync_conf)(void *data, int synco);
+ void *data;
+};
+
static const struct stm32_sai_conf stm32_sai_conf_f4 = {
.version = SAI_STM32F4,
};
@@ -41,23 +52,143 @@ static const struct of_device_id stm32_sai_ids[] = {
{}
};
+static int stm32_sai_sync_conf_client(struct stm32_sai_data *sai, int synci)
+{
+ int ret;
+
+ /* Enable peripheral clock to allow GCR register access */
+ ret = clk_prepare_enable(sai->pclk);
+ if (ret) {
+ dev_err(&sai->pdev->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ writel_relaxed(FIELD_PREP(SAI_GCR_SYNCIN_MASK, (synci - 1)), sai->base);
+
+ clk_disable_unprepare(sai->pclk);
+
+ return 0;
+}
+
+static int stm32_sai_sync_conf_provider(void *data, int synco)
+{
+ struct stm32_sai_data *sai = (struct stm32_sai_data *)data;
+ u32 prev_synco;
+ int ret;
+
+ /* Enable peripheral clock to allow GCR register access */
+ ret = clk_prepare_enable(sai->pclk);
+ if (ret) {
+ dev_err(&sai->pdev->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&sai->pdev->dev, "Set %s%s as synchro provider\n",
+ sai->pdev->dev.of_node->name,
+ synco == STM_SAI_SYNC_OUT_A ? "A" : "B");
+
+ prev_synco = FIELD_GET(SAI_GCR_SYNCOUT_MASK, readl_relaxed(sai->base));
+ if (prev_synco != STM_SAI_SYNC_OUT_NONE && synco != prev_synco) {
+ dev_err(&sai->pdev->dev, "%s%s already set as sync provider\n",
+ sai->pdev->dev.of_node->name,
+ prev_synco == STM_SAI_SYNC_OUT_A ? "A" : "B");
+ clk_disable_unprepare(sai->pclk);
+ return -EINVAL;
+ }
+
+ writel_relaxed(FIELD_PREP(SAI_GCR_SYNCOUT_MASK, synco), sai->base);
+
+ clk_disable_unprepare(sai->pclk);
+
+ return 0;
+}
+
+static int stm32_sai_set_sync_provider(struct device_node *np, int synco)
+{
+ struct sync_provider *provider;
+ int ret;
+
+ mutex_lock(&sync_mutex);
+ list_for_each_entry(provider, &sync_providers, link) {
+ if (provider->node == np) {
+ ret = provider->sync_conf(provider->data, synco);
+ mutex_unlock(&sync_mutex);
+ return ret;
+ }
+ }
+ mutex_unlock(&sync_mutex);
+
+ /* SAI sync provider not found */
+ return -ENODEV;
+}
+
+static int stm32_sai_set_sync(struct stm32_sai_data *sai,
+ struct device_node *np_provider,
+ int synco, int synci)
+{
+ int ret;
+
+ /* Configure sync client */
+ stm32_sai_sync_conf_client(sai, synci);
+
+ /* Configure sync provider */
+ ret = stm32_sai_set_sync_provider(np_provider, synco);
+
+ return ret;
+}
+
+static int stm32_sai_sync_add_provider(struct platform_device *pdev,
+ void *data)
+{
+ struct sync_provider *sp;
+
+ sp = devm_kzalloc(&pdev->dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+ sp->node = of_node_get(pdev->dev.of_node);
+ sp->data = data;
+ sp->sync_conf = &stm32_sai_sync_conf_provider;
+
+ mutex_lock(&sync_mutex);
+ list_add(&sp->link, &sync_providers);
+ mutex_unlock(&sync_mutex);
+
+ return 0;
+}
+
+static void stm32_sai_sync_del_provider(struct device_node *np)
+{
+ struct sync_provider *sp;
+
+ mutex_lock(&sync_mutex);
+ list_for_each_entry(sp, &sync_providers, link) {
+ if (sp->node == np) {
+ list_del(&sp->link);
+ of_node_put(sp->node);
+ break;
+ }
+ }
+ mutex_unlock(&sync_mutex);
+}
+
static int stm32_sai_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct stm32_sai_data *sai;
struct reset_control *rst;
struct resource *res;
- void __iomem *base;
const struct of_device_id *of_id;
+ int ret;
sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
if (!sai)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ sai->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sai->base))
+ return PTR_ERR(sai->base);
of_id = of_match_device(stm32_sai_ids, &pdev->dev);
if (of_id)
@@ -65,6 +196,14 @@ static int stm32_sai_probe(struct platform_device *pdev)
else
return -EINVAL;
+ if (!STM_SAI_IS_F4(sai)) {
+ sai->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(sai->pclk)) {
+ dev_err(&pdev->dev, "missing bus clock pclk\n");
+ return PTR_ERR(sai->pclk);
+ }
+ }
+
sai->clk_x8k = devm_clk_get(&pdev->dev, "x8k");
if (IS_ERR(sai->clk_x8k)) {
dev_err(&pdev->dev, "missing x8k parent clock\n");
@@ -85,23 +224,34 @@ static int stm32_sai_probe(struct platform_device *pdev)
}
/* reset */
- rst = reset_control_get_exclusive(&pdev->dev, NULL);
+ rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(rst)) {
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
}
+ ret = stm32_sai_sync_add_provider(pdev, sai);
+ if (ret < 0)
+ return ret;
+ sai->set_sync = &stm32_sai_set_sync;
+
sai->pdev = pdev;
platform_set_drvdata(pdev, sai);
- return of_platform_populate(np, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret < 0)
+ stm32_sai_sync_del_provider(np);
+
+ return ret;
}
static int stm32_sai_remove(struct platform_device *pdev)
{
of_platform_depopulate(&pdev->dev);
+ stm32_sai_sync_del_provider(pdev->dev.of_node);
+
return 0;
}
diff --git a/sound/soc/stm/stm32_sai.h b/sound/soc/stm/stm32_sai.h
index 889974dc62d9..bb062e70de63 100644
--- a/sound/soc/stm/stm32_sai.h
+++ b/sound/soc/stm/stm32_sai.h
@@ -16,9 +16,11 @@
* details.
*/
+#include <linux/bitfield.h>
+
/******************** SAI Register Map **************************************/
-/* common register */
+/* Global configuration register */
#define STM_SAI_GCR 0x00
/* Sub-block A&B registers offsets, relative to A&B sub-block addresses */
@@ -37,12 +39,13 @@
/******************** Bit definition for SAI_GCR register *******************/
#define SAI_GCR_SYNCIN_SHIFT 0
+#define SAI_GCR_SYNCIN_WDTH 2
#define SAI_GCR_SYNCIN_MASK GENMASK(1, SAI_GCR_SYNCIN_SHIFT)
-#define SAI_GCR_SYNCIN_SET(x) ((x) << SAI_GCR_SYNCIN_SHIFT)
+#define SAI_GCR_SYNCIN_MAX FIELD_GET(SAI_GCR_SYNCIN_MASK,\
+ SAI_GCR_SYNCIN_MASK)
#define SAI_GCR_SYNCOUT_SHIFT 4
#define SAI_GCR_SYNCOUT_MASK GENMASK(5, SAI_GCR_SYNCOUT_SHIFT)
-#define SAI_GCR_SYNCOUT_SET(x) ((x) << SAI_GCR_SYNCOUT_SHIFT)
/******************* Bit definition for SAI_XCR1 register *******************/
#define SAI_XCR1_RX_TX_SHIFT 0
@@ -231,6 +234,12 @@
#define STM_SAI_IS_F4(ip) ((ip)->conf->version == SAI_STM32F4)
#define STM_SAI_IS_H7(ip) ((ip)->conf->version == SAI_STM32H7)
+enum stm32_sai_syncout {
+ STM_SAI_SYNC_OUT_NONE,
+ STM_SAI_SYNC_OUT_A,
+ STM_SAI_SYNC_OUT_B,
+};
+
enum stm32_sai_version {
SAI_STM32F4,
SAI_STM32H7
@@ -247,15 +256,22 @@ struct stm32_sai_conf {
/**
* struct stm32_sai_data - private data of SAI instance driver
* @pdev: device data pointer
+ * @base: common register bank virtual base address
+ * @pclk: SAI bus clock
* @clk_x8k: SAI parent clock for sampling frequencies multiple of 8kHz
* @clk_x11k: SAI parent clock for sampling frequencies multiple of 11kHz
* @version: SOC version
* @irq: SAI interrupt line
+ * @set_sync: pointer to synchro mode configuration callback
*/
struct stm32_sai_data {
struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *pclk;
struct clk *clk_x8k;
struct clk *clk_x11k;
struct stm32_sai_conf *conf;
int irq;
+ int (*set_sync)(struct stm32_sai_data *sai,
+ struct device_node *np_provider, int synco, int synci);
};
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 90d439613899..08583b958430 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -55,6 +55,12 @@
#define STM_SAI_IS_SUB_B(x) ((x)->id == STM_SAI_B_ID)
#define STM_SAI_BLOCK_NAME(x) (((x)->id == STM_SAI_A_ID) ? "A" : "B")
+#define SAI_SYNC_NONE 0x0
+#define SAI_SYNC_INTERNAL 0x1
+#define SAI_SYNC_EXTERNAL 0x2
+
+#define STM_SAI_HAS_EXT_SYNC(x) (!STM_SAI_IS_F4(sai->pdata))
+
/**
* struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
* @pdev: device data pointer
@@ -65,6 +71,7 @@
* @cpu_dai: DAI runtime data pointer
* @substream: PCM substream data pointer
* @pdata: SAI block parent data pointer
+ * @np_sync_provider: synchronization provider node
* @sai_ck: kernel clock feeding the SAI clock generator
* @phys_addr: SAI registers physical base address
* @mclk_rate: SAI block master clock frequency (Hz). set at init
@@ -73,6 +80,8 @@
* @master: SAI block mode flag. (true=master, false=slave) set at init
* @fmt: SAI block format. relevant only for custom protocols. set at init
* @sync: SAI block synchronization mode. (none, internal or external)
+ * @synco: SAI block ext sync source (provider setting). (none, sub-block A/B)
+ * @synci: SAI block ext sync source (client setting). (SAI sync provider index)
* @fs_length: frame synchronization length. depends on protocol settings
* @slots: rx or tx slot number
* @slot_width: rx or tx slot width in bits
@@ -88,6 +97,7 @@ struct stm32_sai_sub_data {
struct snd_soc_dai *cpu_dai;
struct snd_pcm_substream *substream;
struct stm32_sai_data *pdata;
+ struct device_node *np_sync_provider;
struct clk *sai_ck;
dma_addr_t phys_addr;
unsigned int mclk_rate;
@@ -96,6 +106,8 @@ struct stm32_sai_sub_data {
bool master;
int fmt;
int sync;
+ int synco;
+ int synci;
int fs_length;
int slots;
int slot_width;
@@ -184,7 +196,6 @@ static const struct regmap_config stm32_sai_sub_regmap_config_h7 = {
static irqreturn_t stm32_sai_isr(int irq, void *devid)
{
struct stm32_sai_sub_data *sai = (struct stm32_sai_sub_data *)devid;
- struct snd_pcm_substream *substream = sai->substream;
struct platform_device *pdev = sai->pdev;
unsigned int sr, imr, flags;
snd_pcm_state_t status = SNDRV_PCM_STATE_RUNNING;
@@ -199,6 +210,11 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
regmap_update_bits(sai->regmap, STM_SAI_CLRFR_REGX, SAI_XCLRFR_MASK,
SAI_XCLRFR_MASK);
+ if (!sai->substream) {
+ dev_err(&pdev->dev, "Device stopped. Spurious IRQ 0x%x\n", sr);
+ return IRQ_NONE;
+ }
+
if (flags & SAI_XIMR_OVRUDRIE) {
dev_err(&pdev->dev, "IRQ %s\n",
STM_SAI_IS_PLAYBACK(sai) ? "underrun" : "overrun");
@@ -227,9 +243,9 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
}
if (status != SNDRV_PCM_STATE_RUNNING) {
- snd_pcm_stream_lock(substream);
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_lock(sai->substream);
+ snd_pcm_stop(sai->substream, SNDRV_PCM_STATE_XRUN);
+ snd_pcm_stream_unlock(sai->substream);
}
return IRQ_HANDLED;
@@ -304,12 +320,15 @@ static int stm32_sai_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
- int cr1 = 0, frcr = 0;
- int cr1_mask = 0, frcr_mask = 0;
+ int cr1, frcr = 0;
+ int cr1_mask, frcr_mask = 0;
int ret;
dev_dbg(cpu_dai->dev, "fmt %x\n", fmt);
+ cr1_mask = SAI_XCR1_PRTCFG_MASK;
+ cr1 = SAI_XCR1_PRTCFG_SET(SAI_FREE_PROTOCOL);
+
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
/* SCK active high for all protocols */
case SND_SOC_DAIFMT_I2S:
@@ -336,7 +355,7 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
return -EINVAL;
}
- cr1_mask |= SAI_XCR1_PRTCFG_MASK | SAI_XCR1_CKSTR;
+ cr1_mask |= SAI_XCR1_CKSTR;
frcr_mask |= SAI_XFRCR_FSPOL | SAI_XFRCR_FSOFF |
SAI_XFRCR_FSDEF;
@@ -380,6 +399,14 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
fmt & SND_SOC_DAIFMT_MASTER_MASK);
return -EINVAL;
}
+
+ /* Set slave mode if sub-block is synchronized with another SAI */
+ if (sai->sync) {
+ dev_dbg(cpu_dai->dev, "Synchronized SAI configured as slave\n");
+ cr1 |= SAI_XCR1_SLAVE;
+ sai->master = false;
+ }
+
cr1_mask |= SAI_XCR1_SLAVE;
/* do not generate master by default */
@@ -412,8 +439,6 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
}
/* Enable ITs */
- regmap_update_bits(sai->regmap, STM_SAI_SR_REGX,
- SAI_XSR_MASK, (unsigned int)~SAI_XSR_MASK);
regmap_update_bits(sai->regmap, STM_SAI_CLRFR_REGX,
SAI_XCLRFR_MASK, SAI_XCLRFR_MASK);
@@ -442,34 +467,33 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai,
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int cr1, cr1_mask, ret;
- int fth = STM_SAI_FIFO_TH_HALF;
- /* FIFO config */
+ /*
+ * DMA bursts increment is set to 4 words.
+ * SAI fifo threshold is set to half fifo, to keep enough space
+ * for DMA incoming bursts.
+ */
regmap_update_bits(sai->regmap, STM_SAI_CR2_REGX,
SAI_XCR2_FFLUSH | SAI_XCR2_FTH_MASK,
- SAI_XCR2_FFLUSH | SAI_XCR2_FTH_SET(fth));
+ SAI_XCR2_FFLUSH |
+ SAI_XCR2_FTH_SET(STM_SAI_FIFO_TH_HALF));
/* Mode, data format and channel config */
- cr1 = SAI_XCR1_PRTCFG_SET(SAI_FREE_PROTOCOL);
+ cr1_mask = SAI_XCR1_DS_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
- cr1 |= SAI_XCR1_DS_SET(SAI_DATASIZE_8);
+ cr1 = SAI_XCR1_DS_SET(SAI_DATASIZE_8);
break;
case SNDRV_PCM_FORMAT_S16_LE:
- cr1 |= SAI_XCR1_DS_SET(SAI_DATASIZE_16);
+ cr1 = SAI_XCR1_DS_SET(SAI_DATASIZE_16);
break;
case SNDRV_PCM_FORMAT_S32_LE:
- cr1 |= SAI_XCR1_DS_SET(SAI_DATASIZE_32);
+ cr1 = SAI_XCR1_DS_SET(SAI_DATASIZE_32);
break;
default:
dev_err(cpu_dai->dev, "Data format not supported");
return -EINVAL;
}
- cr1_mask = SAI_XCR1_DS_MASK | SAI_XCR1_PRTCFG_MASK;
-
- cr1_mask |= SAI_XCR1_RX_TX;
- if (STM_SAI_IS_CAPTURE(sai))
- cr1 |= SAI_XCR1_RX_TX;
cr1_mask |= SAI_XCR1_MONO;
if ((sai->slots == 2) && (params_channels(params) == 1))
@@ -481,10 +505,6 @@ static int stm32_sai_set_config(struct snd_soc_dai *cpu_dai,
return ret;
}
- /* DMA config */
- sai->dma_params.maxburst = STM_SAI_FIFO_SIZE * fth / sizeof(u32);
- snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)&sai->dma_params);
-
return 0;
}
@@ -691,6 +711,9 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_STOP:
dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n");
+ regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX,
+ SAI_XIMR_MASK, 0);
+
regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
SAI_XCR1_SAIEN,
(unsigned int)~SAI_XCR1_SAIEN);
@@ -725,9 +748,15 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
{
struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
+ int cr1 = 0, cr1_mask;
sai->dma_params.addr = (dma_addr_t)(sai->phys_addr + STM_SAI_DR_REGX);
- sai->dma_params.maxburst = 1;
+ /*
+ * DMA supports 4, 8 or 16 burst sizes. Burst size 4 is the best choice,
+ * as it allows bytes, half-word and words transfers. (See DMA fifos
+ * constraints).
+ */
+ sai->dma_params.maxburst = 4;
/* Buswidth will be set by framework at runtime */
sai->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
@@ -736,7 +765,21 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
else
snd_soc_dai_init_dma_data(cpu_dai, NULL, &sai->dma_params);
- return 0;
+ cr1_mask = SAI_XCR1_RX_TX;
+ if (STM_SAI_IS_CAPTURE(sai))
+ cr1 |= SAI_XCR1_RX_TX;
+
+ /* Configure synchronization */
+ if (sai->sync == SAI_SYNC_EXTERNAL) {
+ /* Configure synchro client and provider */
+ sai->pdata->set_sync(sai->pdata, sai->np_sync_provider,
+ sai->synco, sai->synci);
+ }
+
+ cr1_mask |= SAI_XCR1_SYNCEN_MASK;
+ cr1 |= SAI_XCR1_SYNCEN_SET(sai->sync);
+
+ return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
}
static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops = {
@@ -822,6 +865,8 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct resource *res;
void __iomem *base;
+ struct of_phandle_args args;
+ int ret;
if (!np)
return -ENODEV;
@@ -855,6 +900,69 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
return -EINVAL;
}
+ /* Get synchronization property */
+ args.np = NULL;
+ ret = of_parse_phandle_with_fixed_args(np, "st,sync", 1, 0, &args);
+ if (ret < 0 && ret != -ENOENT) {
+ dev_err(&pdev->dev, "Failed to get st,sync property\n");
+ return ret;
+ }
+
+ sai->sync = SAI_SYNC_NONE;
+ if (args.np) {
+ if (args.np == np) {
+ dev_err(&pdev->dev, "%s sync own reference\n",
+ np->name);
+ of_node_put(args.np);
+ return -EINVAL;
+ }
+
+ sai->np_sync_provider = of_get_parent(args.np);
+ if (!sai->np_sync_provider) {
+ dev_err(&pdev->dev, "%s parent node not found\n",
+ np->name);
+ of_node_put(args.np);
+ return -ENODEV;
+ }
+
+ sai->sync = SAI_SYNC_INTERNAL;
+ if (sai->np_sync_provider != sai->pdata->pdev->dev.of_node) {
+ if (!STM_SAI_HAS_EXT_SYNC(sai)) {
+ dev_err(&pdev->dev,
+ "External synchro not supported\n");
+ of_node_put(args.np);
+ return -EINVAL;
+ }
+ sai->sync = SAI_SYNC_EXTERNAL;
+
+ sai->synci = args.args[0];
+ if (sai->synci < 1 ||
+ (sai->synci > (SAI_GCR_SYNCIN_MAX + 1))) {
+ dev_err(&pdev->dev, "Wrong SAI index\n");
+ of_node_put(args.np);
+ return -EINVAL;
+ }
+
+ if (of_property_match_string(args.np, "compatible",
+ "st,stm32-sai-sub-a") >= 0)
+ sai->synco = STM_SAI_SYNC_OUT_A;
+
+ if (of_property_match_string(args.np, "compatible",
+ "st,stm32-sai-sub-b") >= 0)
+ sai->synco = STM_SAI_SYNC_OUT_B;
+
+ if (!sai->synco) {
+ dev_err(&pdev->dev, "Unknown SAI sub-block\n");
+ of_node_put(args.np);
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "%s synchronized with %s\n",
+ pdev->name, args.np->full_name);
+ }
+
+ of_node_put(args.np);
sai->sai_ck = devm_clk_get(&pdev->dev, "sai_ck");
if (IS_ERR(sai->sai_ck)) {
dev_err(&pdev->dev, "Missing kernel clock sai_ck\n");
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index 84cc5678beba..b9bdefcd3e10 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -392,6 +392,12 @@ static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
{
int ret;
+ spdifrx->ctrl_chan = dma_request_chan(dev, "rx-ctrl");
+ if (IS_ERR(spdifrx->ctrl_chan)) {
+ dev_err(dev, "dma_request_slave_channel failed\n");
+ return PTR_ERR(spdifrx->ctrl_chan);
+ }
+
spdifrx->dmab = devm_kzalloc(dev, sizeof(struct snd_dma_buffer),
GFP_KERNEL);
if (!spdifrx->dmab)
@@ -406,12 +412,6 @@ static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
return ret;
}
- spdifrx->ctrl_chan = dma_request_chan(dev, "rx-ctrl");
- if (!spdifrx->ctrl_chan) {
- dev_err(dev, "dma_request_slave_channel failed\n");
- return -EINVAL;
- }
-
spdifrx->slave_config.direction = DMA_DEV_TO_MEM;
spdifrx->slave_config.src_addr = (dma_addr_t)(spdifrx->phys_addr +
STM32_SPDIFRX_CSR);
@@ -423,7 +423,6 @@ static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
&spdifrx->slave_config);
if (ret < 0) {
dev_err(dev, "dmaengine_slave_config returned error %d\n", ret);
- dma_release_channel(spdifrx->ctrl_chan);
spdifrx->ctrl_chan = NULL;
}
@@ -750,17 +749,21 @@ static int stm32_spdifrx_hw_params(struct snd_pcm_substream *substream,
switch (data_size) {
case 16:
fmt = SPDIFRX_DRFMT_PACKED;
- spdifrx->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 32:
fmt = SPDIFRX_DRFMT_LEFT;
- spdifrx->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
default:
dev_err(&spdifrx->pdev->dev, "Unexpected data format\n");
return -EINVAL;
}
+ /*
+ * Set buswidth to 4 bytes for all data formats.
+ * Packed format: transfer 2 x 2 bytes samples
+ * Left format: transfer 1 x 3 bytes samples + 1 dummy byte
+ */
+ spdifrx->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
snd_soc_dai_init_dma_data(cpu_dai, NULL, &spdifrx->dma_params);
return regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
@@ -958,7 +961,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
return 0;
error:
- if (spdifrx->ctrl_chan)
+ if (!IS_ERR(spdifrx->ctrl_chan))
dma_release_channel(spdifrx->ctrl_chan);
if (spdifrx->dmab)
snd_dma_free_pages(spdifrx->dmab);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index baa9007464ed..5da4efe7a550 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -346,11 +346,6 @@ static int sun4i_codec_prepare_capture(struct snd_pcm_substream *substream,
0x3 << 8,
0x1 << 8);
- /* Fill most significant bits with valid data MSB */
- regmap_field_update_bits(scodec->reg_adc_fifoc,
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
- BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
-
return 0;
}
@@ -490,6 +485,30 @@ static int sun4i_codec_hw_params_capture(struct sun4i_codec *scodec,
BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
0);
+ /* Set the number of sample bits to either 16 or 24 bits */
+ if (hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min == 32) {
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS),
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS));
+
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
+ 0);
+
+ scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ } else {
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_SAMPLE_BITS),
+ 0);
+
+ /* Fill most significant bits with valid data MSB */
+ regmap_field_update_bits(scodec->reg_adc_fifoc,
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
+ BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
+
+ scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ }
+
return 0;
}
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index abfb710df7cb..3dd183be08a4 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -73,6 +73,7 @@
#define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
#define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
+#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
struct sun8i_codec {
struct device *dev;
@@ -170,11 +171,11 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
/* clock masters */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBS_CFS: /* DAI Slave */
- value = 0x0; /* Codec Master */
+ case SND_SOC_DAIFMT_CBS_CFS: /* Codec slave, DAI master */
+ value = 0x1;
break;
- case SND_SOC_DAIFMT_CBM_CFM: /* DAI Master */
- value = 0x1; /* Codec Slave */
+ case SND_SOC_DAIFMT_CBM_CFM: /* Codec Master, DAI slave */
+ value = 0x0;
break;
default:
return -EINVAL;
@@ -197,9 +198,20 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
BIT(SUN8I_AIF1CLK_CTRL_AIF1_BCLK_INV),
value << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_INV);
+
+ /*
+ * It appears that the DAI and the codec don't share the same
+ * polarity for the LRCK signal when they mean 'normal' and
+ * 'inverted' in the datasheet.
+ *
+ * Since the DAI here is our regular i2s driver that have been
+ * tested with way more codecs than just this one, it means
+ * that the codec probably gets it backward, and we have to
+ * invert the value here.
+ */
regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
BIT(SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV),
- value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV);
+ !value << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV);
/* DAI format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
@@ -226,12 +238,57 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
+struct sun8i_codec_clk_div {
+ u8 div;
+ u8 val;
+};
+
+static const struct sun8i_codec_clk_div sun8i_codec_bclk_div[] = {
+ { .div = 1, .val = 0 },
+ { .div = 2, .val = 1 },
+ { .div = 4, .val = 2 },
+ { .div = 6, .val = 3 },
+ { .div = 8, .val = 4 },
+ { .div = 12, .val = 5 },
+ { .div = 16, .val = 6 },
+ { .div = 24, .val = 7 },
+ { .div = 32, .val = 8 },
+ { .div = 48, .val = 9 },
+ { .div = 64, .val = 10 },
+ { .div = 96, .val = 11 },
+ { .div = 128, .val = 12 },
+ { .div = 192, .val = 13 },
+};
+
+static u8 sun8i_codec_get_bclk_div(struct sun8i_codec *scodec,
+ unsigned int rate,
+ unsigned int word_size)
+{
+ unsigned long clk_rate = clk_get_rate(scodec->clk_module);
+ unsigned int div = clk_rate / rate / word_size / 2;
+ unsigned int best_val = 0, best_diff = ~0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun8i_codec_bclk_div); i++) {
+ const struct sun8i_codec_clk_div *bdiv = &sun8i_codec_bclk_div[i];
+ unsigned int diff = abs(bdiv->div - div);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_val = bdiv->val;
+ }
+ }
+
+ return best_val;
+}
+
static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct sun8i_codec *scodec = snd_soc_codec_get_drvdata(dai->codec);
int sample_rate;
+ u8 bclk_div;
/*
* The CPU DAI handles only a sample of 16 bits. Configure the
@@ -241,6 +298,11 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK,
SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_16);
+ bclk_div = sun8i_codec_get_bclk_div(scodec, params_rate(params), 16);
+ regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
+ SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK,
+ bclk_div << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV);
+
regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK,
SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16);
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index 8382ffa3bcaf..2472144b329e 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -165,7 +165,7 @@ static bool xtfpga_pcm_push_tx(struct xtfpga_i2s *i2s)
tx_substream = rcu_dereference(i2s->tx_substream);
tx_active = tx_substream && snd_pcm_running(tx_substream);
if (tx_active) {
- unsigned tx_ptr = ACCESS_ONCE(i2s->tx_ptr);
+ unsigned tx_ptr = READ_ONCE(i2s->tx_ptr);
unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime,
tx_ptr);
@@ -437,7 +437,7 @@ static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ACCESS_ONCE(i2s->tx_ptr) = 0;
+ WRITE_ONCE(i2s->tx_ptr, 0);
rcu_assign_pointer(i2s->tx_substream, substream);
xtfpga_pcm_refill_fifo(i2s);
break;
@@ -459,7 +459,7 @@ static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct xtfpga_i2s *i2s = runtime->private_data;
- snd_pcm_uframes_t pos = ACCESS_ONCE(i2s->tx_ptr);
+ snd_pcm_uframes_t pos = READ_ONCE(i2s->tx_ptr);
return pos < runtime->buffer_size ? pos : 0;
}
diff --git a/sound/soc/zte/zx-spdif.c b/sound/soc/zte/zx-spdif.c
index b143f9f682d2..17b6ce35037a 100644
--- a/sound/soc/zte/zx-spdif.c
+++ b/sound/soc/zte/zx-spdif.c
@@ -139,11 +139,11 @@ static int zx_spdif_hw_params(struct snd_pcm_substream *substream,
{
struct zx_spdif_info *zx_spdif = dev_get_drvdata(socdai->dev);
struct zx_spdif_info *spdif = snd_soc_dai_get_drvdata(socdai);
- struct snd_dmaengine_dai_dma_data *dma_data = &zx_spdif->dma_data;
+ struct snd_dmaengine_dai_dma_data *dma_data =
+ snd_soc_dai_get_dma_data(socdai, substream);
u32 val, ch_num, rate;
int ret;
- dma_data = snd_soc_dai_get_dma_data(socdai, substream);
dma_data->addr_width = params_width(params) >> 3;
val = readl_relaxed(zx_spdif->reg_base + ZX_CTRL);
diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c
index b9981e8c0027..b840ff2dcfbb 100644
--- a/sound/synth/emux/emux.c
+++ b/sound/synth/emux/emux.c
@@ -53,7 +53,7 @@ int snd_emux_new(struct snd_emux **remu)
emu->max_voices = 0;
emu->use_time = 0;
- setup_timer(&emu->tlist, snd_emux_timer_callback, (unsigned long)emu);
+ timer_setup(&emu->tlist, snd_emux_timer_callback, 0);
emu->timer_active = 0;
*remu = emu;
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
index de19e108974a..764ff4bc2089 100644
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -430,7 +430,6 @@ gusspec_control(struct snd_emux *emu, struct snd_emux_port *port, int cmd,
{
int voice;
unsigned short p1;
- short p2;
int plong;
struct snd_midi_channel *chan;
@@ -445,7 +444,6 @@ gusspec_control(struct snd_emux *emu, struct snd_emux_port *port, int cmd,
chan = &port->chset.channels[voice];
p1 = *(unsigned short *) &event[4];
- p2 = *(short *) &event[6];
plong = *(int*) &event[4];
switch (cmd) {
diff --git a/sound/synth/emux/emux_synth.c b/sound/synth/emux/emux_synth.c
index 599551b5af44..9fa696b0dbde 100644
--- a/sound/synth/emux/emux_synth.c
+++ b/sound/synth/emux/emux_synth.c
@@ -202,9 +202,9 @@ snd_emux_note_off(void *p, int note, int vel, struct snd_midi_channel *chan)
*
* release the pending note-offs
*/
-void snd_emux_timer_callback(unsigned long data)
+void snd_emux_timer_callback(struct timer_list *t)
{
- struct snd_emux *emu = (struct snd_emux *) data;
+ struct snd_emux *emu = from_timer(emu, t, tlist);
struct snd_emux_voice *vp;
unsigned long flags;
int ch, do_again = 0;
diff --git a/sound/synth/emux/emux_voice.h b/sound/synth/emux/emux_voice.h
index a7073c371bcc..326fa8993d7b 100644
--- a/sound/synth/emux/emux_voice.h
+++ b/sound/synth/emux/emux_voice.h
@@ -55,7 +55,7 @@ void snd_emux_update_channel(struct snd_emux_port *port,
struct snd_midi_channel *chan, int update);
void snd_emux_update_port(struct snd_emux_port *port, int update);
-void snd_emux_timer_callback(unsigned long data);
+void snd_emux_timer_callback(struct timer_list *t);
/* emux_effect.c */
#ifdef SNDRV_EMUX_USE_RAW_EFFECT
diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
index c7641cb50616..17d5e3ee6d73 100644
--- a/sound/usb/6fire/chip.c
+++ b/sound/usb/6fire/chip.c
@@ -174,11 +174,9 @@ destroy_chip:
static void usb6fire_chip_disconnect(struct usb_interface *intf)
{
struct sfire_chip *chip;
- struct snd_card *card;
chip = usb_get_intfdata(intf);
if (chip) { /* if !chip, fw upload has been performed */
- card = chip->card;
chip->intf_count--;
if (!chip->intf_count) {
mutex_lock(&register_mutex);
diff --git a/sound/usb/bcd2000/bcd2000.c b/sound/usb/bcd2000/bcd2000.c
index 7371e5b06035..d6c8b29fe430 100644
--- a/sound/usb/bcd2000/bcd2000.c
+++ b/sound/usb/bcd2000/bcd2000.c
@@ -108,7 +108,7 @@ static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k,
unsigned int payload_length, tocopy;
struct snd_rawmidi_substream *midi_receive_substream;
- midi_receive_substream = ACCESS_ONCE(bcd2k->midi_receive_substream);
+ midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream);
if (!midi_receive_substream)
return;
@@ -139,7 +139,7 @@ static void bcd2000_midi_send(struct bcd2000 *bcd2k)
BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE);
- midi_out_substream = ACCESS_ONCE(bcd2k->midi_out_substream);
+ midi_out_substream = READ_ONCE(bcd2k->midi_out_substream);
if (!midi_out_substream)
return;
@@ -342,6 +342,13 @@ static int bcd2000_init_midi(struct bcd2000 *bcd2k)
bcd2k->midi_out_buf, BUFSIZE,
bcd2000_output_complete, bcd2k, 1);
+ /* sanity checks of EPs before actually submitting */
+ if (usb_urb_ep_type_check(bcd2k->midi_in_urb) ||
+ usb_urb_ep_type_check(bcd2k->midi_out_urb)) {
+ dev_err(&bcd2k->dev->dev, "invalid MIDI EP\n");
+ return -EINVAL;
+ }
+
bcd2000_init_device(bcd2k);
return 0;
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index d8409d9ae55b..d55ca48de3ea 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -461,6 +461,13 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
cdev->midi_out_buf, EP1_BUFSIZE,
snd_usb_caiaq_midi_output_done, cdev);
+ /* sanity checks of EPs before actually submitting */
+ if (usb_urb_ep_type_check(&cdev->ep1_in_urb) ||
+ usb_urb_ep_type_check(&cdev->midi_out_urb)) {
+ dev_err(dev, "invalid EPs\n");
+ return -EINVAL;
+ }
+
init_waitqueue_head(&cdev->ep1_wait_queue);
init_waitqueue_head(&cdev->prepare_wait_queue);
diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
index 4b3fb91deecd..e883659ea6e7 100644
--- a/sound/usb/caiaq/input.c
+++ b/sound/usb/caiaq/input.c
@@ -718,6 +718,9 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
usb_rcvbulkpipe(usb_dev, 0x4),
cdev->ep4_in_buf, EP4_BUFSIZE,
snd_usb_caiaq_ep4_reply_dispatch, cdev);
+ ret = usb_urb_ep_type_check(cdev->ep4_in_urb);
+ if (ret < 0)
+ goto exit_free_idev;
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 5);
@@ -757,6 +760,9 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
usb_rcvbulkpipe(usb_dev, 0x4),
cdev->ep4_in_buf, EP4_BUFSIZE,
snd_usb_caiaq_ep4_reply_dispatch, cdev);
+ ret = usb_urb_ep_type_check(cdev->ep4_in_urb);
+ if (ret < 0)
+ goto exit_free_idev;
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 5);
@@ -802,6 +808,9 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
usb_rcvbulkpipe(usb_dev, 0x4),
cdev->ep4_in_buf, EP4_BUFSIZE,
snd_usb_caiaq_ep4_reply_dispatch, cdev);
+ ret = usb_urb_ep_type_check(cdev->ep4_in_urb);
+ if (ret < 0)
+ goto exit_free_idev;
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 5);
break;
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 175d8d6b7f59..396c317115b1 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -541,6 +541,8 @@ static int hiface_pcm_init_urb(struct pcm_urb *urb,
usb_fill_bulk_urb(&urb->instance, chip->dev,
usb_sndbulkpipe(chip->dev, ep), (void *)urb->buffer,
PCM_PACKET_SIZE, handler, urb);
+ if (usb_urb_ep_type_check(&urb->instance))
+ return -EINVAL;
init_usb_anchor(&urb->submitted);
return 0;
@@ -599,9 +601,12 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
mutex_init(&rt->stream_mutex);
spin_lock_init(&rt->playback.lock);
- for (i = 0; i < PCM_N_URBS; i++)
- hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
+ for (i = 0; i < PCM_N_URBS; i++) {
+ ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
hiface_pcm_out_urb_handler);
+ if (ret < 0)
+ return ret;
+ }
ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
if (ret < 0) {
diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c
index 7c812565f90d..947d6168f24a 100644
--- a/sound/usb/line6/capture.c
+++ b/sound/usb/line6/capture.c
@@ -248,7 +248,7 @@ static int snd_line6_capture_close(struct snd_pcm_substream *substream)
}
/* capture operators */
-struct snd_pcm_ops snd_line6_capture_ops = {
+const struct snd_pcm_ops snd_line6_capture_ops = {
.open = snd_line6_capture_open,
.close = snd_line6_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/usb/line6/capture.h b/sound/usb/line6/capture.h
index 890b21bff18c..b67ccc39fd25 100644
--- a/sound/usb/line6/capture.h
+++ b/sound/usb/line6/capture.h
@@ -17,7 +17,7 @@
#include "driver.h"
#include "pcm.h"
-extern struct snd_pcm_ops snd_line6_capture_ops;
+extern const struct snd_pcm_ops snd_line6_capture_ops;
extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
int fsize);
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index c8f723c3a033..4f9613e5fc9e 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -78,6 +78,13 @@ static int line6_start_listen(struct usb_line6 *line6)
line6->buffer_listen, LINE6_BUFSIZE_LISTEN,
line6_data_received, line6);
}
+
+ /* sanity checks of EP before actually submitting */
+ if (usb_urb_ep_type_check(line6->urb_listen)) {
+ dev_err(line6->ifcdev, "invalid control EP\n");
+ return -EINVAL;
+ }
+
line6->urb_listen->actual_length = 0;
err = usb_submit_urb(line6->urb_listen, GFP_ATOMIC);
return err;
@@ -168,26 +175,33 @@ static int line6_send_raw_message_async_part(struct message *msg,
}
msg->done += bytes;
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval < 0) {
- dev_err(line6->ifcdev, "%s: usb_submit_urb failed (%d)\n",
- __func__, retval);
- usb_free_urb(urb);
- kfree(msg);
- return retval;
- }
+ /* sanity checks of EP before actually submitting */
+ retval = usb_urb_ep_type_check(urb);
+ if (retval < 0)
+ goto error;
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval < 0)
+ goto error;
return 0;
+
+ error:
+ dev_err(line6->ifcdev, "%s: usb_submit_urb failed (%d)\n",
+ __func__, retval);
+ usb_free_urb(urb);
+ kfree(msg);
+ return retval;
}
/*
Setup and start timer.
*/
void line6_start_timer(struct timer_list *timer, unsigned long msecs,
- void (*function)(unsigned long), unsigned long data)
+ void (*function)(struct timer_list *t))
{
- setup_timer(timer, function, data);
+ timer->function = (TIMER_FUNC_TYPE)function;
mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL_GPL(line6_start_timer);
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index dc97895547be..61425597eb61 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -198,8 +198,7 @@ extern int line6_send_sysex_message(struct usb_line6 *line6,
extern ssize_t line6_set_raw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
extern void line6_start_timer(struct timer_list *timer, unsigned long msecs,
- void (*function)(unsigned long),
- unsigned long data);
+ void (*function)(struct timer_list *t));
extern int line6_version_request_async(struct usb_line6 *line6);
extern int line6_write_data(struct usb_line6 *line6, unsigned address,
void *data, unsigned datalen);
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index 1d3a23b02d68..6d7cde56a355 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -130,16 +130,21 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
transfer_buffer, length, midi_sent, line6,
line6->interval);
urb->actual_length = 0;
- retval = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_urb_ep_type_check(urb);
+ if (retval < 0)
+ goto error;
- if (retval < 0) {
- dev_err(line6->ifcdev, "usb_submit_urb failed\n");
- usb_free_urb(urb);
- return retval;
- }
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval < 0)
+ goto error;
++line6->line6midi->num_active_send_urbs;
return 0;
+
+ error:
+ dev_err(line6->ifcdev, "usb_submit_urb failed\n");
+ usb_free_urb(urb);
+ return retval;
}
static int line6_midi_output_open(struct snd_rawmidi_substream *substream)
diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c
index 812d18191e01..819e9b2d1d6e 100644
--- a/sound/usb/line6/playback.c
+++ b/sound/usb/line6/playback.c
@@ -393,7 +393,7 @@ static int snd_line6_playback_close(struct snd_pcm_substream *substream)
}
/* playback operators */
-struct snd_pcm_ops snd_line6_playback_ops = {
+const struct snd_pcm_ops snd_line6_playback_ops = {
.open = snd_line6_playback_open,
.close = snd_line6_playback_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/usb/line6/playback.h b/sound/usb/line6/playback.h
index 51fce29e8726..d8d3b8a07a72 100644
--- a/sound/usb/line6/playback.h
+++ b/sound/usb/line6/playback.h
@@ -27,7 +27,7 @@
*/
#define USE_CLEAR_BUFFER_WORKAROUND 1
-extern struct snd_pcm_ops snd_line6_playback_ops;
+extern const struct snd_pcm_ops snd_line6_playback_ops;
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index 358224cc5638..020c81818951 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -174,7 +174,7 @@ static const char pod_version_header[] = {
};
/* forward declarations: */
-static void pod_startup2(unsigned long data);
+static void pod_startup2(struct timer_list *t);
static void pod_startup3(struct usb_line6_pod *pod);
static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
@@ -286,13 +286,12 @@ static void pod_startup1(struct usb_line6_pod *pod)
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_INIT);
/* delay startup procedure: */
- line6_start_timer(&pod->startup_timer, POD_STARTUP_DELAY, pod_startup2,
- (unsigned long)pod);
+ line6_start_timer(&pod->startup_timer, POD_STARTUP_DELAY, pod_startup2);
}
-static void pod_startup2(unsigned long data)
+static void pod_startup2(struct timer_list *t)
{
- struct usb_line6_pod *pod = (struct usb_line6_pod *)data;
+ struct usb_line6_pod *pod = from_timer(pod, t, startup_timer);
struct usb_line6 *line6 = &pod->line6;
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_VERSIONREQ);
@@ -413,7 +412,7 @@ static int pod_init(struct usb_line6 *line6,
line6->process_message = line6_pod_process_message;
line6->disconnect = line6_pod_disconnect;
- init_timer(&pod->startup_timer);
+ timer_setup(&pod->startup_timer, NULL, 0);
INIT_WORK(&pod->startup_work, pod_startup4);
/* create sysfs entries: */
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 451007c27743..36ed9c85c0eb 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -39,7 +39,8 @@ enum {
LINE6_PODHD500_1,
LINE6_PODX3,
LINE6_PODX3LIVE,
- LINE6_PODHD500X
+ LINE6_PODHD500X,
+ LINE6_PODHDDESKTOP
};
struct usb_line6_podhd {
@@ -157,7 +158,7 @@ static struct line6_pcm_properties podx3_pcm_properties = {
};
static struct usb_driver podhd_driver;
-static void podhd_startup_start_workqueue(unsigned long data);
+static void podhd_startup_start_workqueue(struct timer_list *t);
static void podhd_startup_workqueue(struct work_struct *work);
static int podhd_startup_finalize(struct usb_line6_podhd *pod);
@@ -207,12 +208,12 @@ static void podhd_startup(struct usb_line6_podhd *pod)
/* delay startup procedure: */
line6_start_timer(&pod->startup_timer, PODHD_STARTUP_DELAY,
- podhd_startup_start_workqueue, (unsigned long)pod);
+ podhd_startup_start_workqueue);
}
-static void podhd_startup_start_workqueue(unsigned long data)
+static void podhd_startup_start_workqueue(struct timer_list *t)
{
- struct usb_line6_podhd *pod = (struct usb_line6_podhd *)data;
+ struct usb_line6_podhd *pod = from_timer(pod, t, startup_timer);
CHECK_STARTUP_PROGRESS(pod->startup_progress,
PODHD_STARTUP_SCHEDULE_WORKQUEUE);
@@ -318,7 +319,7 @@ static int podhd_init(struct usb_line6 *line6,
line6->disconnect = podhd_disconnect;
- init_timer(&pod->startup_timer);
+ timer_setup(&pod->startup_timer, NULL, 0);
INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
@@ -379,6 +380,7 @@ static const struct usb_device_id podhd_id_table[] = {
{ LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 },
{ LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE },
{ LINE6_IF_NUM(0x4159, 0), .driver_info = LINE6_PODHD500X },
+ { LINE6_IF_NUM(0x4156, 0), .driver_info = LINE6_PODHDDESKTOP },
{}
};
@@ -465,6 +467,18 @@ static const struct line6_properties podhd_properties_table[] = {
.ep_audio_r = 0x86,
.ep_audio_w = 0x02,
},
+ [LINE6_PODHDDESKTOP] = {
+ .id = "PODHDDESKTOP",
+ .name = "POD HDDESKTOP",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM | LINE6_CAP_HWMON,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x81,
+ .ep_ctrl_w = 0x01,
+ .ctrl_if = 1,
+ .ep_audio_r = 0x86,
+ .ep_audio_w = 0x02,
+ },
};
/*
diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
index ba7975c0d03d..750467fb95db 100644
--- a/sound/usb/line6/toneport.c
+++ b/sound/usb/line6/toneport.c
@@ -241,9 +241,9 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static void toneport_start_pcm(unsigned long arg)
+static void toneport_start_pcm(struct timer_list *t)
{
- struct usb_line6_toneport *toneport = (struct usb_line6_toneport *)arg;
+ struct usb_line6_toneport *toneport = from_timer(toneport, t, timer);
struct usb_line6 *line6 = &toneport->line6;
line6_pcm_acquire(line6->line6pcm, LINE6_STREAM_MONITOR, true);
@@ -415,8 +415,7 @@ static int toneport_init(struct usb_line6 *line6,
struct usb_line6_toneport *toneport = (struct usb_line6_toneport *) line6;
toneport->type = id->driver_info;
- setup_timer(&toneport->timer, toneport_start_pcm,
- (unsigned long)toneport);
+ timer_setup(&toneport->timer, toneport_start_pcm, 0);
line6->disconnect = line6_toneport_disconnect;
diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
index 0c4512d0382e..e8c852b2ce35 100644
--- a/sound/usb/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -82,9 +82,9 @@ static const char variax_activate[] = {
};
/* forward declarations: */
-static void variax_startup2(unsigned long data);
-static void variax_startup4(unsigned long data);
-static void variax_startup5(unsigned long data);
+static void variax_startup2(struct timer_list *t);
+static void variax_startup4(struct timer_list *t);
+static void variax_startup5(struct timer_list *t);
static void variax_activate_async(struct usb_line6_variax *variax, int a)
{
@@ -106,12 +106,12 @@ static void variax_startup1(struct usb_line6_variax *variax)
/* delay startup procedure: */
line6_start_timer(&variax->startup_timer1, VARIAX_STARTUP_DELAY1,
- variax_startup2, (unsigned long)variax);
+ variax_startup2);
}
-static void variax_startup2(unsigned long data)
+static void variax_startup2(struct timer_list *t)
{
- struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
+ struct usb_line6_variax *variax = from_timer(variax, t, startup_timer1);
struct usb_line6 *line6 = &variax->line6;
/* schedule another startup procedure until startup is complete: */
@@ -120,7 +120,7 @@ static void variax_startup2(unsigned long data)
variax->startup_progress = VARIAX_STARTUP_VERSIONREQ;
line6_start_timer(&variax->startup_timer1, VARIAX_STARTUP_DELAY1,
- variax_startup2, (unsigned long)variax);
+ variax_startup2);
/* request firmware version: */
line6_version_request_async(line6);
@@ -132,12 +132,12 @@ static void variax_startup3(struct usb_line6_variax *variax)
/* delay startup procedure: */
line6_start_timer(&variax->startup_timer2, VARIAX_STARTUP_DELAY3,
- variax_startup4, (unsigned long)variax);
+ variax_startup4);
}
-static void variax_startup4(unsigned long data)
+static void variax_startup4(struct timer_list *t)
{
- struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
+ struct usb_line6_variax *variax = from_timer(variax, t, startup_timer2);
CHECK_STARTUP_PROGRESS(variax->startup_progress,
VARIAX_STARTUP_ACTIVATE);
@@ -145,12 +145,12 @@ static void variax_startup4(unsigned long data)
/* activate device: */
variax_activate_async(variax, 1);
line6_start_timer(&variax->startup_timer2, VARIAX_STARTUP_DELAY4,
- variax_startup5, (unsigned long)variax);
+ variax_startup5);
}
-static void variax_startup5(unsigned long data)
+static void variax_startup5(struct timer_list *t)
{
- struct usb_line6_variax *variax = (struct usb_line6_variax *)data;
+ struct usb_line6_variax *variax = from_timer(variax, t, startup_timer2);
CHECK_STARTUP_PROGRESS(variax->startup_progress,
VARIAX_STARTUP_WORKQUEUE);
@@ -190,7 +190,7 @@ static void line6_variax_process_message(struct usb_line6 *line6)
} else if (memcmp(buf + 1, variax_init_done + 1,
sizeof(variax_init_done) - 1) == 0) {
/* notify of complete initialization: */
- variax_startup4((unsigned long)variax);
+ variax_startup4(&variax->startup_timer2);
}
break;
}
@@ -222,8 +222,8 @@ static int variax_init(struct usb_line6 *line6,
line6->process_message = line6_variax_process_message;
line6->disconnect = line6_variax_disconnect;
- init_timer(&variax->startup_timer1);
- init_timer(&variax->startup_timer2);
+ timer_setup(&variax->startup_timer1, NULL, 0);
+ timer_setup(&variax->startup_timer2, NULL, 0);
INIT_WORK(&variax->startup_work, variax_startup6);
/* initialize USB buffers: */
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index a92e2b2a91ec..2c1aaa3292bf 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -352,9 +352,9 @@ static void snd_usbmidi_out_tasklet(unsigned long data)
}
/* called after transfers had been interrupted due to some USB error */
-static void snd_usbmidi_error_timer(unsigned long data)
+static void snd_usbmidi_error_timer(struct timer_list *t)
{
- struct snd_usb_midi *umidi = (struct snd_usb_midi *)data;
+ struct snd_usb_midi *umidi = from_timer(umidi, t, error_timer);
unsigned int i, j;
spin_lock(&umidi->disc_lock);
@@ -1282,6 +1282,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
unsigned int pipe;
int length;
unsigned int i;
+ int err;
rep->in = NULL;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
@@ -1292,8 +1293,8 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
for (i = 0; i < INPUT_URBS; ++i) {
ep->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!ep->urbs[i]) {
- snd_usbmidi_in_endpoint_delete(ep);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto error;
}
}
if (ep_info->in_interval)
@@ -1305,8 +1306,8 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
buffer = usb_alloc_coherent(umidi->dev, length, GFP_KERNEL,
&ep->urbs[i]->transfer_dma);
if (!buffer) {
- snd_usbmidi_in_endpoint_delete(ep);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto error;
}
if (ep_info->in_interval)
usb_fill_int_urb(ep->urbs[i], umidi->dev,
@@ -1318,10 +1319,20 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
pipe, buffer, length,
snd_usbmidi_in_urb_complete, ep);
ep->urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ err = usb_urb_ep_type_check(ep->urbs[i]);
+ if (err < 0) {
+ dev_err(&umidi->dev->dev, "invalid MIDI in EP %x\n",
+ ep_info->in_ep);
+ goto error;
+ }
}
rep->in = ep;
return 0;
+
+ error:
+ snd_usbmidi_in_endpoint_delete(ep);
+ return -ENOMEM;
}
/*
@@ -1357,6 +1368,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
unsigned int i;
unsigned int pipe;
void *buffer;
+ int err;
rep->out = NULL;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
@@ -1367,8 +1379,8 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
for (i = 0; i < OUTPUT_URBS; ++i) {
ep->urbs[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ep->urbs[i].urb) {
- snd_usbmidi_out_endpoint_delete(ep);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto error;
}
ep->urbs[i].ep = ep;
}
@@ -1406,8 +1418,8 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
ep->max_transfer, GFP_KERNEL,
&ep->urbs[i].urb->transfer_dma);
if (!buffer) {
- snd_usbmidi_out_endpoint_delete(ep);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto error;
}
if (ep_info->out_interval)
usb_fill_int_urb(ep->urbs[i].urb, umidi->dev,
@@ -1419,6 +1431,12 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
pipe, buffer, ep->max_transfer,
snd_usbmidi_out_urb_complete,
&ep->urbs[i]);
+ err = usb_urb_ep_type_check(ep->urbs[i].urb);
+ if (err < 0) {
+ dev_err(&umidi->dev->dev, "invalid MIDI out EP %x\n",
+ ep_info->out_ep);
+ goto error;
+ }
ep->urbs[i].urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
}
@@ -1437,6 +1455,10 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
rep->out = ep;
return 0;
+
+ error:
+ snd_usbmidi_out_endpoint_delete(ep);
+ return err;
}
/*
@@ -2347,8 +2369,7 @@ int __snd_usbmidi_create(struct snd_card *card,
usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
le16_to_cpu(umidi->dev->descriptor.idProduct));
umidi->usb_id = usb_id;
- setup_timer(&umidi->error_timer, snd_usbmidi_error_timer,
- (unsigned long)umidi);
+ timer_setup(&umidi->error_timer, snd_usbmidi_error_timer, 0);
/* detect the endpoint(s) to use */
memset(endpoints, 0, sizeof(endpoints));
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 20624320b753..77eecaa4db1f 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1128,30 +1128,24 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
/* devices which do not support reading the sample rate. */
switch (chip->usb_id) {
case USB_ID(0x041E, 0x4080): /* Creative Live Cam VF0610 */
- case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
- case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
- case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
- case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
- case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
- case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
- case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
- case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
- case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
- case USB_ID(0x047F, 0xC022): /* Plantronics C310 */
- case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */
- case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
- case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
- case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
- case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
return true;
}
+
+ /* devices of these vendors don't support reading rate, either */
+ switch (USB_ID_VENDOR(chip->usb_id)) {
+ case 0x045E: /* MS Lifecam */
+ case 0x047F: /* Plantronics */
+ case 0x1de7: /* Phoenix Audio */
+ return true;
+ }
+
return false;
}
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index e229abd21652..b0f8979ff2d2 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -56,7 +56,7 @@ check:
lb, s->period_size);
}
-static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
+static int init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
struct urb **urbs, char *transfer,
struct usb_device *dev, int pipe)
{
@@ -77,6 +77,8 @@ static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
urb->interval = 1;
if (usb_pipeout(pipe))
continue;
+ if (usb_urb_ep_type_check(urb))
+ return -EINVAL;
urb->transfer_buffer_length = transfer_length;
desc = urb->iso_frame_desc;
@@ -87,9 +89,11 @@ static void init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
desc[p].length = maxpacket;
}
}
+
+ return 0;
}
-static void init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
+static int init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
struct usb_device *dev, int in_pipe, int out_pipe)
{
struct usb_stream *s = sk->s;
@@ -103,9 +107,12 @@ static void init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
sk->outurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL);
}
- init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe);
- init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev,
- out_pipe);
+ if (init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe) ||
+ init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev,
+ out_pipe))
+ return -EINVAL;
+
+ return 0;
}
@@ -226,7 +233,11 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
else
sk->freqn = get_usb_high_speed_rate(sample_rate);
- init_urbs(sk, use_packsize, dev, in_pipe, out_pipe);
+ if (init_urbs(sk, use_packsize, dev, in_pipe, out_pipe) < 0) {
+ usb_stream_free(sk);
+ return NULL;
+ }
+
sk->s->state = usb_stream_stopped;
out:
return sk->s;
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index 4569c0efac0a..0ddf29267d70 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -279,6 +279,9 @@ int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y)
usX2Y->AS04.buffer + URB_DataLen_AsyncSeq*i, 0,
i_usX2Y_Out04Int, usX2Y
);
+ err = usb_urb_ep_type_check(usX2Y->AS04.urb[i]);
+ if (err < 0)
+ break;
}
return err;
}
@@ -298,6 +301,8 @@ int usX2Y_In04_init(struct usX2Ydev *usX2Y)
usX2Y->In04Buf, 21,
i_usX2Y_In04Int, usX2Y,
10);
+ if (usb_urb_ep_type_check(usX2Y->In04urb))
+ return -EINVAL;
return usb_submit_urb(usX2Y->In04urb, GFP_KERNEL);
}
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index f93b355756e6..345e439aa95b 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -677,6 +677,9 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
usb_fill_bulk_urb(us->urb[i], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 4),
usbdata + i, 2, i_usX2Y_04Int, usX2Y);
}
+ err = usb_urb_ep_type_check(us->urb[0]);
+ if (err < 0)
+ goto cleanup;
us->submitted = 0;
us->len = NOOF_SETRATE_URBS;
usX2Y->US04 = us;
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
index 7d8c3261a50d..1f5e26aae9fc 100644
--- a/tools/arch/x86/include/asm/atomic.h
+++ b/tools/arch/x86/include/asm/atomic.h
@@ -25,7 +25,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
/**
diff --git a/tools/gpio/gpio-utils.c b/tools/gpio/gpio-utils.c
index b86a32d90d88..cf7e2f3419ee 100644
--- a/tools/gpio/gpio-utils.c
+++ b/tools/gpio/gpio-utils.c
@@ -76,7 +76,8 @@ int gpiotools_request_linehandle(const char *device_name, unsigned int *lines,
fd = open(chrdev_name, 0);
if (fd == -1) {
ret = -errno;
- fprintf(stderr, "Failed to open %s\n", chrdev_name);
+ fprintf(stderr, "Failed to open %s, %s\n",
+ chrdev_name, strerror(errno));
goto exit_close_error;
}
@@ -92,8 +93,8 @@ int gpiotools_request_linehandle(const char *device_name, unsigned int *lines,
ret = ioctl(fd, GPIO_GET_LINEHANDLE_IOCTL, &req);
if (ret == -1) {
ret = -errno;
- fprintf(stderr, "Failed to issue GET LINEHANDLE IOCTL (%d)\n",
- ret);
+ fprintf(stderr, "Failed to issue %s (%d), %s\n",
+ "GPIO_GET_LINEHANDLE_IOCTL", ret, strerror(errno));
}
exit_close_error:
@@ -118,8 +119,9 @@ int gpiotools_set_values(const int fd, struct gpiohandle_data *data)
ret = ioctl(fd, GPIOHANDLE_SET_LINE_VALUES_IOCTL, data);
if (ret == -1) {
ret = -errno;
- fprintf(stderr, "Failed to issue %s (%d)\n",
- "GPIOHANDLE_SET_LINE_VALUES_IOCTL", ret);
+ fprintf(stderr, "Failed to issue %s (%d), %s\n",
+ "GPIOHANDLE_SET_LINE_VALUES_IOCTL", ret,
+ strerror(errno));
}
return ret;
@@ -141,8 +143,9 @@ int gpiotools_get_values(const int fd, struct gpiohandle_data *data)
ret = ioctl(fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, data);
if (ret == -1) {
ret = -errno;
- fprintf(stderr, "Failed to issue %s (%d)\n",
- "GPIOHANDLE_GET_LINE_VALUES_IOCTL", ret);
+ fprintf(stderr, "Failed to issue %s (%d), %s\n",
+ "GPIOHANDLE_GET_LINE_VALUES_IOCTL", ret,
+ strerror(errno));
}
return ret;
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
index 40b231fb95bd..4c1966f7c77a 100644
--- a/tools/include/asm-generic/atomic-gcc.h
+++ b/tools/include/asm-generic/atomic-gcc.h
@@ -22,7 +22,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
/**
diff --git a/tools/include/linux/kmemcheck.h b/tools/include/linux/kmemcheck.h
index 2bccd2c7b897..ea32a7d3cf1b 100644
--- a/tools/include/linux/kmemcheck.h
+++ b/tools/include/linux/kmemcheck.h
@@ -1,9 +1 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_
-#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_
-
-static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
-{
-}
-
-#endif
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index 4bf6777a8a03..9fdcd3eaac3b 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -15,6 +15,10 @@
# define POISON_POINTER_DELTA 0
#endif
+#ifdef __cplusplus
+#define LIST_POISON1 NULL
+#define LIST_POISON2 NULL
+#else
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
@@ -22,6 +26,7 @@
*/
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
+#endif
/********** include/linux/timer.h **********/
/*
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 6598fb76d2c2..9816590d3ad2 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -829,6 +829,7 @@ struct drm_i915_gem_exec_fence {
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
__u32 flags;
};
diff --git a/tools/include/uapi/linux/kcmp.h b/tools/include/uapi/linux/kcmp.h
new file mode 100644
index 000000000000..481e103da78e
--- /dev/null
+++ b/tools/include/uapi/linux/kcmp.h
@@ -0,0 +1,27 @@
+#ifndef _UAPI_LINUX_KCMP_H
+#define _UAPI_LINUX_KCMP_H
+
+#include <linux/types.h>
+
+/* Comparison type */
+enum kcmp_type {
+ KCMP_FILE,
+ KCMP_VM,
+ KCMP_FILES,
+ KCMP_FS,
+ KCMP_SIGHAND,
+ KCMP_IO,
+ KCMP_SYSVSEM,
+ KCMP_EPOLL_TFD,
+
+ KCMP_TYPES,
+};
+
+/* Slot for KCMP_EPOLL_TFD */
+struct kcmp_epoll_slot {
+ __u32 efd; /* epoll file descriptor */
+ __u32 tfd; /* target file number */
+ __u32 toff; /* target offset within same numbered sequence */
+};
+
+#endif /* _UAPI_LINUX_KCMP_H */
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
new file mode 100644
index 000000000000..a8d0759a9e40
--- /dev/null
+++ b/tools/include/uapi/linux/prctl.h
@@ -0,0 +1,200 @@
+#ifndef _LINUX_PRCTL_H
+#define _LINUX_PRCTL_H
+
+#include <linux/types.h>
+
+/* Values to pass as first argument to prctl() */
+
+#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */
+#define PR_GET_PDEATHSIG 2 /* Second arg is a ptr to return the signal */
+
+/* Get/set current->mm->dumpable */
+#define PR_GET_DUMPABLE 3
+#define PR_SET_DUMPABLE 4
+
+/* Get/set unaligned access control bits (if meaningful) */
+#define PR_GET_UNALIGN 5
+#define PR_SET_UNALIGN 6
+# define PR_UNALIGN_NOPRINT 1 /* silently fix up unaligned user accesses */
+# define PR_UNALIGN_SIGBUS 2 /* generate SIGBUS on unaligned user access */
+
+/* Get/set whether or not to drop capabilities on setuid() away from
+ * uid 0 (as per security/commoncap.c) */
+#define PR_GET_KEEPCAPS 7
+#define PR_SET_KEEPCAPS 8
+
+/* Get/set floating-point emulation control bits (if meaningful) */
+#define PR_GET_FPEMU 9
+#define PR_SET_FPEMU 10
+# define PR_FPEMU_NOPRINT 1 /* silently emulate fp operations accesses */
+# define PR_FPEMU_SIGFPE 2 /* don't emulate fp operations, send SIGFPE instead */
+
+/* Get/set floating-point exception mode (if meaningful) */
+#define PR_GET_FPEXC 11
+#define PR_SET_FPEXC 12
+# define PR_FP_EXC_SW_ENABLE 0x80 /* Use FPEXC for FP exception enables */
+# define PR_FP_EXC_DIV 0x010000 /* floating point divide by zero */
+# define PR_FP_EXC_OVF 0x020000 /* floating point overflow */
+# define PR_FP_EXC_UND 0x040000 /* floating point underflow */
+# define PR_FP_EXC_RES 0x080000 /* floating point inexact result */
+# define PR_FP_EXC_INV 0x100000 /* floating point invalid operation */
+# define PR_FP_EXC_DISABLED 0 /* FP exceptions disabled */
+# define PR_FP_EXC_NONRECOV 1 /* async non-recoverable exc. mode */
+# define PR_FP_EXC_ASYNC 2 /* async recoverable exception mode */
+# define PR_FP_EXC_PRECISE 3 /* precise exception mode */
+
+/* Get/set whether we use statistical process timing or accurate timestamp
+ * based process timing */
+#define PR_GET_TIMING 13
+#define PR_SET_TIMING 14
+# define PR_TIMING_STATISTICAL 0 /* Normal, traditional,
+ statistical process timing */
+# define PR_TIMING_TIMESTAMP 1 /* Accurate timestamp based
+ process timing */
+
+#define PR_SET_NAME 15 /* Set process name */
+#define PR_GET_NAME 16 /* Get process name */
+
+/* Get/set process endian */
+#define PR_GET_ENDIAN 19
+#define PR_SET_ENDIAN 20
+# define PR_ENDIAN_BIG 0
+# define PR_ENDIAN_LITTLE 1 /* True little endian mode */
+# define PR_ENDIAN_PPC_LITTLE 2 /* "PowerPC" pseudo little endian */
+
+/* Get/set process seccomp mode */
+#define PR_GET_SECCOMP 21
+#define PR_SET_SECCOMP 22
+
+/* Get/set the capability bounding set (as per security/commoncap.c) */
+#define PR_CAPBSET_READ 23
+#define PR_CAPBSET_DROP 24
+
+/* Get/set the process' ability to use the timestamp counter instruction */
+#define PR_GET_TSC 25
+#define PR_SET_TSC 26
+# define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */
+# define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */
+
+/* Get/set securebits (as per security/commoncap.c) */
+#define PR_GET_SECUREBITS 27
+#define PR_SET_SECUREBITS 28
+
+/*
+ * Get/set the timerslack as used by poll/select/nanosleep
+ * A value of 0 means "use default"
+ */
+#define PR_SET_TIMERSLACK 29
+#define PR_GET_TIMERSLACK 30
+
+#define PR_TASK_PERF_EVENTS_DISABLE 31
+#define PR_TASK_PERF_EVENTS_ENABLE 32
+
+/*
+ * Set early/late kill mode for hwpoison memory corruption.
+ * This influences when the process gets killed on a memory corruption.
+ */
+#define PR_MCE_KILL 33
+# define PR_MCE_KILL_CLEAR 0
+# define PR_MCE_KILL_SET 1
+
+# define PR_MCE_KILL_LATE 0
+# define PR_MCE_KILL_EARLY 1
+# define PR_MCE_KILL_DEFAULT 2
+
+#define PR_MCE_KILL_GET 34
+
+/*
+ * Tune up process memory map specifics.
+ */
+#define PR_SET_MM 35
+# define PR_SET_MM_START_CODE 1
+# define PR_SET_MM_END_CODE 2
+# define PR_SET_MM_START_DATA 3
+# define PR_SET_MM_END_DATA 4
+# define PR_SET_MM_START_STACK 5
+# define PR_SET_MM_START_BRK 6
+# define PR_SET_MM_BRK 7
+# define PR_SET_MM_ARG_START 8
+# define PR_SET_MM_ARG_END 9
+# define PR_SET_MM_ENV_START 10
+# define PR_SET_MM_ENV_END 11
+# define PR_SET_MM_AUXV 12
+# define PR_SET_MM_EXE_FILE 13
+# define PR_SET_MM_MAP 14
+# define PR_SET_MM_MAP_SIZE 15
+
+/*
+ * This structure provides new memory descriptor
+ * map which mostly modifies /proc/pid/stat[m]
+ * output for a task. This mostly done in a
+ * sake of checkpoint/restore functionality.
+ */
+struct prctl_mm_map {
+ __u64 start_code; /* code section bounds */
+ __u64 end_code;
+ __u64 start_data; /* data section bounds */
+ __u64 end_data;
+ __u64 start_brk; /* heap for brk() syscall */
+ __u64 brk;
+ __u64 start_stack; /* stack starts at */
+ __u64 arg_start; /* command line arguments bounds */
+ __u64 arg_end;
+ __u64 env_start; /* environment variables bounds */
+ __u64 env_end;
+ __u64 *auxv; /* auxiliary vector */
+ __u32 auxv_size; /* vector size */
+ __u32 exe_fd; /* /proc/$pid/exe link file */
+};
+
+/*
+ * Set specific pid that is allowed to ptrace the current task.
+ * A value of 0 mean "no process".
+ */
+#define PR_SET_PTRACER 0x59616d61
+# define PR_SET_PTRACER_ANY ((unsigned long)-1)
+
+#define PR_SET_CHILD_SUBREAPER 36
+#define PR_GET_CHILD_SUBREAPER 37
+
+/*
+ * If no_new_privs is set, then operations that grant new privileges (i.e.
+ * execve) will either fail or not grant them. This affects suid/sgid,
+ * file capabilities, and LSMs.
+ *
+ * Operations that merely manipulate or drop existing privileges (setresuid,
+ * capset, etc.) will still work. Drop those privileges if you want them gone.
+ *
+ * Changing LSM security domain is considered a new privilege. So, for example,
+ * asking selinux for a specific new context (e.g. with runcon) will result
+ * in execve returning -EPERM.
+ *
+ * See Documentation/prctl/no_new_privs.txt for more details.
+ */
+#define PR_SET_NO_NEW_PRIVS 38
+#define PR_GET_NO_NEW_PRIVS 39
+
+#define PR_GET_TID_ADDRESS 40
+
+#define PR_SET_THP_DISABLE 41
+#define PR_GET_THP_DISABLE 42
+
+/*
+ * Tell the kernel to start/stop helping userspace manage bounds tables.
+ */
+#define PR_MPX_ENABLE_MANAGEMENT 43
+#define PR_MPX_DISABLE_MANAGEMENT 44
+
+#define PR_SET_FP_MODE 45
+#define PR_GET_FP_MODE 46
+# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
+# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
+
+/* Control the ambient capability set */
+#define PR_CAP_AMBIENT 47
+# define PR_CAP_AMBIENT_IS_SET 1
+# define PR_CAP_AMBIENT_RAISE 2
+# define PR_CAP_AMBIENT_LOWER 3
+# define PR_CAP_AMBIENT_CLEAR_ALL 4
+
+#endif /* _LINUX_PRCTL_H */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index c0e26ad1fa7e..9b341584eb1b 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1757,11 +1757,14 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
if (insn->dead_end)
return 0;
- insn = next_insn;
- if (!insn) {
+ if (!next_insn) {
+ if (state.cfa.base == CFI_UNDEFINED)
+ return 0;
WARN("%s: unexpected end of section", sec->name);
return 1;
}
+
+ insn = next_insn;
}
return 0;
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 31e0f9143840..07f329919828 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -70,7 +70,7 @@ static void cmd_usage(void)
printf("\n");
- exit(1);
+ exit(129);
}
static void handle_options(int *argc, const char ***argv)
@@ -86,9 +86,7 @@ static void handle_options(int *argc, const char ***argv)
break;
} else {
fprintf(stderr, "Unknown option: %s\n", cmd);
- fprintf(stderr, "\n Usage: %s\n",
- objtool_usage_string);
- exit(1);
+ cmd_usage();
}
(*argv)++;
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index f709de54707b..e2a897ae3596 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -8,7 +8,8 @@ perf-list - List all symbolic event types
SYNOPSIS
--------
[verse]
-'perf list' [--no-desc] [--long-desc] [hw|sw|cache|tracepoint|pmu|sdt|event_glob]
+'perf list' [--no-desc] [--long-desc]
+ [hw|sw|cache|tracepoint|pmu|sdt|metric|metricgroup|event_glob]
DESCRIPTION
-----------
@@ -47,6 +48,8 @@ counted. The following modifiers exist:
P - use maximum detected precise level
S - read sample value (PERF_SAMPLE_READ)
D - pin the event to the PMU
+ W - group is weak and will fallback to non-group if not schedulable,
+ only supported in 'perf stat' for now.
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
@@ -201,7 +204,7 @@ For example Intel Core CPUs typically have four generic performance counters
for the core, plus three fixed counters for instructions, cycles and
ref-cycles. Some special events have restrictions on which counter they
can schedule, and may not support multiple instances in a single group.
-When too many events are specified in the group none of them will not
+When too many events are specified in the group some of them will not
be measured.
Globally pinned events can limit the number of counters available for
@@ -246,6 +249,10 @@ To limit the list use:
. 'sdt' to list all Statically Defined Tracepoint events.
+. 'metric' to list metrics
+
+. 'metricgroup' to list metricgroups with metrics.
+
. If none of the above is matched, it will apply the supplied glob to all
events, printing the ones that match.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 63526f4416ea..5a626ef666c2 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -377,6 +377,8 @@ symbolic names, e.g. on x86, ax, si. To list the available registers use
--intr-regs=\?. To name registers, pass a comma separated list such as
--intr-regs=ax,bx. The list of register is architecture dependent.
+--user-regs::
+Capture user registers at sample time. Same arguments as -I.
--running-time::
Record running and enabled time for read events (:S)
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 383a98d992ed..ddde2b54af57 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -434,7 +434,8 @@ include::itrace.txt[]
--inline::
If a callgraph address belongs to an inlined function, the inline stack
- will be printed. Each entry is function name or file/line.
+ will be printed. Each entry is function name or file/line. Enabled by
+ default, disable with --no-inline.
include::callchain-overhead-calculation.txt[]
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index a092a2499e8f..55b67338548e 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -106,6 +106,14 @@ OPTIONS for 'perf sched timehist'
--max-stack::
Maximum number of functions to display in backtrace, default 5.
+-p=::
+--pid=::
+ Only show events for given process ID (comma separated list).
+
+-t=::
+--tid=::
+ Only show events for given thread ID (comma separated list).
+
-s::
--summary::
Show only a summary of scheduling by thread with min, max, and average
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 18dfcfa38454..2811fcf684cb 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -116,8 +116,8 @@ OPTIONS
--fields::
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
- srcline, period, iregs, brstack, brstacksym, flags, bpf-output, brstackinsn, brstackoff,
- callindent, insn, insnlen, synth, phys_addr.
+ srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
+ brstackoff, callindent, insn, insnlen, synth, phys_addr.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -325,9 +325,14 @@ include::itrace.txt[]
Set the maximum number of program blocks to print with brstackasm for
each sample.
+--per-event-dump::
+ Create per event files with a "perf.data.EVENT.dump" name instead of
+ printing to stdout, useful, for instance, for generating flamegraphs.
+
--inline::
If a callgraph address belongs to an inlined function, the inline stack
- will be printed. Each entry has function name and file/line.
+ will be printed. Each entry has function name and file/line. Enabled by
+ default, disable with --no-inline.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index c37d61682dfb..823fce7674bb 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -199,6 +199,13 @@ Aggregate counts per processor socket for system-wide mode measurements.
--per-core::
Aggregate counts per physical processor for system-wide mode measurements.
+-M::
+--metrics::
+Print metrics or metricgroups specified in a comma separated list.
+For a group all metrics from the group are added.
+The events from the metrics are automatically measured.
+See perf list output for the possble metrics and metricgroups.
+
-A::
--no-aggr::
Do not aggregate counts across all monitored CPUs.
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index d864ea6fd367..4353262bc462 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -240,6 +240,9 @@ Default is to monitor all CPUS.
--force::
Don't do ownership validation.
+--num-thread-synthesize::
+ The number of threads to run when synthesizing events for existing processes.
+ By default, the number of threads equals to the number of online CPUs.
INTERACTIVE PROMPTING KEYS
--------------------------
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 91ef44bfaf3e..68cf1360a3f3 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -173,7 +173,7 @@ AWK = awk
# non-config cases
config := 1
-NON_CONFIG_TARGETS := clean TAGS tags cscope help install-doc install-man install-html install-info install-pdf doc man html info pdf
+NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help install-doc install-man install-html install-info install-pdf doc man html info pdf
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
@@ -420,6 +420,13 @@ sndrv_pcm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
$(sndrv_pcm_ioctl_array): $(sndrv_pcm_hdr_dir)/asound.h $(sndrv_pcm_ioctl_tbl)
$(Q)$(SHELL) '$(sndrv_pcm_ioctl_tbl)' $(sndrv_pcm_hdr_dir) > $@
+kcmp_type_array := $(beauty_outdir)/kcmp_type_array.c
+kcmp_hdr_dir := $(srctree)/tools/include/uapi/linux/
+kcmp_type_tbl := $(srctree)/tools/perf/trace/beauty/kcmp_type.sh
+
+$(kcmp_type_array): $(kcmp_hdr_dir)/kcmp.h $(kcmp_type_tbl)
+ $(Q)$(SHELL) '$(kcmp_type_tbl)' $(kcmp_hdr_dir) > $@
+
kvm_ioctl_array := $(beauty_ioctl_outdir)/kvm_ioctl_array.c
kvm_hdr_dir := $(srctree)/tools/include/uapi/linux
kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh
@@ -441,6 +448,20 @@ perf_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/perf_ioctl.sh
$(perf_ioctl_array): $(perf_hdr_dir)/perf_event.h $(perf_ioctl_tbl)
$(Q)$(SHELL) '$(perf_ioctl_tbl)' $(perf_hdr_dir) > $@
+madvise_behavior_array := $(beauty_outdir)/madvise_behavior_array.c
+madvise_hdr_dir := $(srctree)/tools/include/uapi/asm-generic/
+madvise_behavior_tbl := $(srctree)/tools/perf/trace/beauty/madvise_behavior.sh
+
+$(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_tbl)
+ $(Q)$(SHELL) '$(madvise_behavior_tbl)' $(madvise_hdr_dir) > $@
+
+prctl_option_array := $(beauty_outdir)/prctl_option_array.c
+prctl_hdr_dir := $(srctree)/tools/include/uapi/linux/
+prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
+
+$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl)
+ $(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@
+
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
@@ -539,9 +560,12 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(pkey_alloc_access_rights_array) \
$(sndrv_pcm_ioctl_array) \
$(sndrv_ctl_ioctl_array) \
+ $(kcmp_type_array) \
$(kvm_ioctl_array) \
$(vhost_virtio_ioctl_array) \
- $(perf_ioctl_array)
+ $(madvise_behavior_array) \
+ $(perf_ioctl_array) \
+ $(prctl_option_array)
$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -802,7 +826,10 @@ config-clean:
$(call QUIET_CLEAN, config)
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null
-clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean
+python-clean:
+ $(python-clean)
+
+clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean config-clean fixdep-clean python-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
@@ -811,15 +838,17 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)util/intel-pt-decoder/inat-tables.c \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
$(OUTPUT)pmu-events/pmu-events.c \
+ $(OUTPUT)$(madvise_behavior_array) \
$(OUTPUT)$(drm_ioctl_array) \
$(OUTPUT)$(pkey_alloc_access_rights_array) \
$(OUTPUT)$(sndrv_ctl_ioctl_array) \
$(OUTPUT)$(sndrv_pcm_ioctl_array) \
$(OUTPUT)$(kvm_ioctl_array) \
+ $(OUTPUT)$(kcmp_type_array) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
- $(OUTPUT)$(perf_ioctl_array)
+ $(OUTPUT)$(perf_ioctl_array) \
+ $(OUTPUT)$(prctl_option_array)
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
- $(python-clean)
#
# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY)
diff --git a/tools/perf/arch/arm/annotate/instructions.c b/tools/perf/arch/arm/annotate/instructions.c
index b39b16395aac..f64516d5b23e 100644
--- a/tools/perf/arch/arm/annotate/instructions.c
+++ b/tools/perf/arch/arm/annotate/instructions.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
#include <sys/types.h>
#include <regex.h>
@@ -24,7 +25,7 @@ static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const c
return ops;
}
-static int arm__annotate_init(struct arch *arch)
+static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
struct arm_annotate *arm;
int err;
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 9a3e0523e2c9..6688977e4ac7 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
#include <sys/types.h>
#include <regex.h>
@@ -26,7 +27,7 @@ static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const
return ops;
}
-static int arm64__annotate_init(struct arch *arch)
+static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
struct arm64_annotate *arm;
int err;
diff --git a/tools/perf/arch/powerpc/annotate/instructions.c b/tools/perf/arch/powerpc/annotate/instructions.c
index b7bc04980fe8..a3f423c27cae 100644
--- a/tools/perf/arch/powerpc/annotate/instructions.c
+++ b/tools/perf/arch/powerpc/annotate/instructions.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+
static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name)
{
int i;
@@ -47,7 +49,7 @@ static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, con
return ops;
}
-static int powerpc__annotate_init(struct arch *arch)
+static int powerpc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
if (!arch->initialized) {
arch->initialized = true;
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index c9a81673e8aa..e0e466c650df 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+
static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *name)
{
struct ins_ops *ops = NULL;
@@ -20,7 +22,7 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
return ops;
}
-static int s390__annotate_init(struct arch *arch)
+static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
{
if (!arch->initialized) {
arch->initialized = true;
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 4adfb4ce2864..5bd1ba8c0282 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -123,3 +123,17 @@ static int x86__cpuid_parse(struct arch *arch, char *cpuid)
return -1;
}
+
+static int x86__annotate_init(struct arch *arch, char *cpuid)
+{
+ int err = 0;
+
+ if (arch->initialized)
+ return 0;
+
+ if (cpuid)
+ err = x86__cpuid_parse(arch, cpuid);
+
+ arch->initialized = true;
+ return err;
+}
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index 9834fdc7c59e..c1bd979b957b 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -9,7 +9,6 @@ struct test;
int test__rdpmc(struct test *test __maybe_unused, int subtest);
int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
int test__insn_x86(struct test *test __maybe_unused, int subtest);
-int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest);
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index cbb7e978166b..8e2c5a38c3b9 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -5,4 +5,3 @@ libperf-y += arch-tests.o
libperf-y += rdpmc.o
libperf-y += perf-time-to-tsc.o
libperf-$(CONFIG_AUXTRACE) += insn-x86.o
-libperf-y += intel-cqm.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index 34a078136a47..cc1802ff5410 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -25,10 +25,6 @@ struct test arch_tests[] = {
},
#endif
{
- .desc = "Intel cqm nmi context read",
- .func = test__intel_cqm_count_nmi_context,
- },
- {
.func = NULL,
},
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 3d32aa45016d..f15731a3d438 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -357,7 +357,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
}
if (total_nr_samples == 0) {
- ui__error("The %s file has no samples!\n", session->file->path);
+ ui__error("The %s file has no samples!\n", session->data->file.path);
goto out;
}
@@ -401,7 +401,7 @@ int cmd_annotate(int argc, const char **argv)
.ordering_requires_timestamps = true,
},
};
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
struct option options[] = {
@@ -411,7 +411,7 @@ int cmd_annotate(int argc, const char **argv)
"only consider symbols in these dsos"),
OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol",
"symbol to annotate"),
- OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"),
@@ -483,9 +483,9 @@ int cmd_annotate(int argc, const char **argv)
if (quiet)
perf_quiet_option();
- file.path = input_name;
+ data.file.path = input_name;
- annotate.session = perf_session__new(&file, false, &annotate.tool);
+ annotate.session = perf_session__new(&data, false, &annotate.tool);
if (annotate.session == NULL)
return -1;
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 5f53a7ad5ef3..3d354ba6e9c5 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -312,7 +312,7 @@ int cmd_buildid_cache(int argc, const char **argv)
*kcore_filename = NULL;
char sbuf[STRERR_BUFSIZE];
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
struct perf_session *session = NULL;
@@ -353,10 +353,10 @@ int cmd_buildid_cache(int argc, const char **argv)
nsi = nsinfo__new(ns_id);
if (missing_filename) {
- file.path = missing_filename;
- file.force = force;
+ data.file.path = missing_filename;
+ data.force = force;
- session = perf_session__new(&file, false, NULL);
+ session = perf_session__new(&data, false, NULL);
if (session == NULL)
return -1;
}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index ec2f327cd79d..78abbe8d9d5f 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -51,10 +51,12 @@ static bool dso__skip_buildid(struct dso *dso, int with_hits)
static int perf_session__list_build_ids(bool force, bool with_hits)
{
struct perf_session *session;
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
symbol__elf_init();
@@ -64,7 +66,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
if (filename__fprintf_build_id(input_name, stdout) > 0)
goto out;
- session = perf_session__new(&file, false, &build_id__mark_dso_hit_ops);
+ session = perf_session__new(&data, false, &build_id__mark_dso_hit_ops);
if (session == NULL)
return -1;
@@ -72,7 +74,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
* We take all buildids when the file contains AUX area tracing data
* because we do not decode the trace because it would take too long.
*/
- if (!perf_data_file__is_pipe(&file) &&
+ if (!perf_data__is_pipe(&data) &&
perf_header__has_feat(&session->header, HEADER_AUXTRACE))
with_hits = false;
@@ -80,7 +82,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
*/
- if (with_hits || perf_data_file__is_pipe(&file))
+ if (with_hits || perf_data__is_pipe(&data))
perf_session__process_events(session);
perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index fd32ad08c6d4..17855c4626a0 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2524,7 +2524,7 @@ static int perf_c2c__report(int argc, const char **argv)
{
struct perf_session *session;
struct ui_progress prog;
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
@@ -2573,8 +2573,8 @@ static int perf_c2c__report(int argc, const char **argv)
if (!input_name || !strlen(input_name))
input_name = "perf.data";
- file.path = input_name;
- file.force = symbol_conf.force;
+ data.file.path = input_name;
+ data.force = symbol_conf.force;
err = setup_display(display);
if (err)
@@ -2592,7 +2592,7 @@ static int perf_c2c__report(int argc, const char **argv)
goto out;
}
- session = perf_session__new(&file, 0, &c2c.tool);
+ session = perf_session__new(&data, 0, &c2c.tool);
if (session == NULL) {
pr_debug("No memory for session\n");
goto out;
@@ -2612,7 +2612,7 @@ static int perf_c2c__report(int argc, const char **argv)
goto out_session;
/* No pipe support at the moment. */
- if (perf_data_file__is_pipe(session->file)) {
+ if (perf_data__is_pipe(session->data)) {
pr_debug("No pipe support at the moment.\n");
goto out_session;
}
@@ -2733,6 +2733,7 @@ static int perf_c2c__record(int argc, const char **argv)
if (!perf_mem_events[j].supported) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events[j].name);
+ free(rec_argv);
return -1;
}
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index abfa49eaf7fd..514f70f95b57 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -35,8 +35,7 @@ static struct option config_options[] = {
OPT_END()
};
-static int set_config(struct perf_config_set *set, const char *file_name,
- const char *var, const char *value)
+static int set_config(struct perf_config_set *set, const char *file_name)
{
struct perf_config_section *section = NULL;
struct perf_config_item *item = NULL;
@@ -50,7 +49,6 @@ static int set_config(struct perf_config_set *set, const char *file_name,
if (!fp)
return -1;
- perf_config_set__collect(set, file_name, var, value);
fprintf(fp, "%s\n", first_line);
/* overwrite configvariables */
@@ -162,6 +160,7 @@ int cmd_config(int argc, const char **argv)
struct perf_config_set *set;
char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
const char *config_filename;
+ bool changed = false;
argc = parse_options(argc, argv, config_options, config_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
@@ -232,15 +231,26 @@ int cmd_config(int argc, const char **argv)
goto out_err;
}
} else {
- if (set_config(set, config_filename, var, value) < 0) {
- pr_err("Failed to set '%s=%s' on %s\n",
- var, value, config_filename);
+ if (perf_config_set__collect(set, config_filename,
+ var, value) < 0) {
+ pr_err("Failed to add '%s=%s'\n",
+ var, value);
free(arg);
goto out_err;
}
+ changed = true;
}
free(arg);
}
+
+ if (!changed)
+ break;
+
+ if (set_config(set, config_filename) < 0) {
+ pr_err("Failed to set the configs on %s\n",
+ config_filename);
+ goto out_err;
+ }
}
ret = 0;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 56223bdfa205..d660cb7b222b 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -48,7 +48,7 @@ struct diff_hpp_fmt {
struct data__file {
struct perf_session *session;
- struct perf_data_file file;
+ struct perf_data data;
int idx;
struct hists *hists;
struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX];
@@ -708,7 +708,7 @@ static void data__fprintf(void)
data__for_each_file(i, d)
fprintf(stdout, "# [%d] %s %s\n",
- d->idx, d->file.path,
+ d->idx, d->data.file.path,
!d->idx ? "(Baseline)" : "");
fprintf(stdout, "#\n");
@@ -777,16 +777,16 @@ static int __cmd_diff(void)
int ret = -EINVAL, i;
data__for_each_file(i, d) {
- d->session = perf_session__new(&d->file, false, &tool);
+ d->session = perf_session__new(&d->data, false, &tool);
if (!d->session) {
- pr_err("Failed to open %s\n", d->file.path);
+ pr_err("Failed to open %s\n", d->data.file.path);
ret = -1;
goto out_delete;
}
ret = perf_session__process_events(d->session);
if (ret) {
- pr_err("Failed to process %s\n", d->file.path);
+ pr_err("Failed to process %s\n", d->data.file.path);
goto out_delete;
}
@@ -1287,11 +1287,11 @@ static int data_init(int argc, const char **argv)
return -ENOMEM;
data__for_each_file(i, d) {
- struct perf_data_file *file = &d->file;
+ struct perf_data *data = &d->data;
- file->path = use_default ? defaults[i] : argv[i];
- file->mode = PERF_DATA_MODE_READ,
- file->force = force,
+ data->file.path = use_default ? defaults[i] : argv[i];
+ data->mode = PERF_DATA_MODE_READ,
+ data->force = force,
d->idx = i;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index cdd145613f60..e06e822ce634 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -22,14 +22,16 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
{
struct perf_session *session;
struct perf_evsel *pos;
- struct perf_data_file file = {
- .path = file_name,
- .mode = PERF_DATA_MODE_READ,
- .force = details->force,
+ struct perf_data data = {
+ .file = {
+ .path = file_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = details->force,
};
bool has_tracepoint = false;
- session = perf_session__new(&file, 0, NULL);
+ session = perf_session__new(&data, 0, NULL);
if (session == NULL)
return -1;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 3e0e73b0dc67..16a28547ca86 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -36,7 +36,7 @@ struct perf_inject {
bool strip;
bool jit_mode;
const char *input_name;
- struct perf_data_file output;
+ struct perf_data output;
u64 bytes_written;
u64 aux_id;
struct list_head samples;
@@ -53,7 +53,7 @@ static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
{
ssize_t size;
- size = perf_data_file__write(&inject->output, buf, sz);
+ size = perf_data__write(&inject->output, buf, sz);
if (size < 0)
return -errno;
@@ -146,7 +146,7 @@ static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
if (!inject->output.is_pipe) {
off_t offset;
- offset = lseek(inject->output.fd, 0, SEEK_CUR);
+ offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
if (offset == -1)
return -errno;
ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
@@ -155,11 +155,11 @@ static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
return ret;
}
- if (perf_data_file__is_pipe(session->file) || !session->one_mmap) {
+ if (perf_data__is_pipe(session->data) || !session->one_mmap) {
ret = output_bytes(inject, event, event->header.size);
if (ret < 0)
return ret;
- ret = copy_bytes(inject, perf_data_file__fd(session->file),
+ ret = copy_bytes(inject, perf_data__fd(session->data),
event->auxtrace.size);
} else {
ret = output_bytes(inject, event,
@@ -638,8 +638,8 @@ static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
struct perf_session *session = inject->session;
- struct perf_data_file *file_out = &inject->output;
- int fd = perf_data_file__fd(file_out);
+ struct perf_data *data_out = &inject->output;
+ int fd = perf_data__fd(data_out);
u64 output_data_offset;
signal(SIGINT, sig_handler);
@@ -694,14 +694,14 @@ static int __cmd_inject(struct perf_inject *inject)
if (!inject->itrace_synth_opts.set)
auxtrace_index__free(&session->auxtrace_index);
- if (!file_out->is_pipe)
+ if (!data_out->is_pipe)
lseek(fd, output_data_offset, SEEK_SET);
ret = perf_session__process_events(session);
if (ret)
return ret;
- if (!file_out->is_pipe) {
+ if (!data_out->is_pipe) {
if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
@@ -776,11 +776,13 @@ int cmd_inject(int argc, const char **argv)
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
- .path = "-",
- .mode = PERF_DATA_MODE_WRITE,
+ .file = {
+ .path = "-",
+ },
+ .mode = PERF_DATA_MODE_WRITE,
},
};
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
int ret;
@@ -790,7 +792,7 @@ int cmd_inject(int argc, const char **argv)
"Inject build-ids into the output stream"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
- OPT_STRING('o', "output", &inject.output.path, "file",
+ OPT_STRING('o', "output", &inject.output.file.path, "file",
"output file name"),
OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
"Merge sched-stat and sched-switch for getting events "
@@ -802,7 +804,7 @@ int cmd_inject(int argc, const char **argv)
"be more verbose (show build ids, etc)"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
"kallsyms pathname"),
- OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
NULL, "opts", "Instruction Tracing options",
itrace_parse_synth_opts),
@@ -830,15 +832,15 @@ int cmd_inject(int argc, const char **argv)
return -1;
}
- if (perf_data_file__open(&inject.output)) {
+ if (perf_data__open(&inject.output)) {
perror("failed to create output file");
return -1;
}
inject.tool.ordered_events = inject.sched_stat;
- file.path = inject.input_name;
- inject.session = perf_session__new(&file, true, &inject.tool);
+ data.file.path = inject.input_name;
+ inject.session = perf_session__new(&data, true, &inject.tool);
if (inject.session == NULL)
return -1;
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 35d4b9c9a9e8..ae11e4c3516a 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -641,7 +641,6 @@ static const struct {
{ "__GFP_ATOMIC", "_A" },
{ "__GFP_IO", "I" },
{ "__GFP_FS", "F" },
- { "__GFP_COLD", "CO" },
{ "__GFP_NOWARN", "NWR" },
{ "__GFP_RETRY_MAYFAIL", "R" },
{ "__GFP_NOFAIL", "NF" },
@@ -655,7 +654,6 @@ static const struct {
{ "__GFP_RECLAIMABLE", "RC" },
{ "__GFP_MOVABLE", "M" },
{ "__GFP_ACCOUNT", "AC" },
- { "__GFP_NOTRACK", "NT" },
{ "__GFP_WRITE", "WR" },
{ "__GFP_RECLAIM", "R" },
{ "__GFP_DIRECT_RECLAIM", "DR" },
@@ -1894,7 +1892,7 @@ int cmd_kmem(int argc, const char **argv)
{
const char * const default_slab_sort = "frag,hit,bytes";
const char * const default_page_sort = "bytes,hit";
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
const struct option kmem_options[] = {
@@ -1910,7 +1908,7 @@ int cmd_kmem(int argc, const char **argv)
"page, order, migtype, gfp", parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
- OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+ OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
parse_slab_opt),
OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
@@ -1950,9 +1948,9 @@ int cmd_kmem(int argc, const char **argv)
return __cmd_record(argc, argv);
}
- file.path = input_name;
+ data.file.path = input_name;
- kmem_session = session = perf_session__new(&file, false, &perf_kmem);
+ kmem_session = session = perf_session__new(&data, false, &perf_kmem);
if (session == NULL)
return -1;
@@ -1984,7 +1982,8 @@ int cmd_kmem(int argc, const char **argv)
if (perf_time__parse_str(&ptime, time_str) != 0) {
pr_err("Invalid time string\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_delete;
}
if (!strcmp(argv[0], "stat")) {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 5fb40368d5d1..0c36f2ac6a0e 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -35,7 +35,6 @@
#include <termios.h>
#include <semaphore.h>
#include <signal.h>
-#include <pthread.h>
#include <math.h>
static const char *get_filename_for_perf_kvm(void)
@@ -1069,10 +1068,12 @@ static int read_events(struct perf_kvm_stat *kvm)
.namespaces = perf_event__process_namespaces,
.ordered_events = true,
};
- struct perf_data_file file = {
- .path = kvm->file_name,
- .mode = PERF_DATA_MODE_READ,
- .force = kvm->force,
+ struct perf_data file = {
+ .file = {
+ .path = kvm->file_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = kvm->force,
};
kvm->tool = eops;
@@ -1360,7 +1361,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
"perf kvm stat live [<options>]",
NULL
};
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_WRITE,
};
@@ -1434,7 +1435,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
/*
* perf session
*/
- kvm->session = perf_session__new(&file, false, &kvm->tool);
+ kvm->session = perf_session__new(&data, false, &kvm->tool);
if (kvm->session == NULL) {
err = -1;
goto out;
@@ -1443,7 +1444,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
perf_session__set_id_hdr_size(kvm->session);
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
- kvm->evlist->threads, false, kvm->opts.proc_map_timeout);
+ kvm->evlist->threads, false,
+ kvm->opts.proc_map_timeout, 1);
err = kvm_live_open_events(kvm);
if (err)
goto out;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index eeedbe433776..ead221e49f00 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -16,6 +16,7 @@
#include "util/cache.h"
#include "util/pmu.h"
#include "util/debug.h"
+#include "util/metricgroup.h"
#include <subcmd/parse-options.h>
static bool desc_flag = true;
@@ -80,6 +81,10 @@ int cmd_list(int argc, const char **argv)
long_desc_flag, details_flag);
else if (strcmp(argv[i], "sdt") == 0)
print_sdt_events(NULL, NULL, raw_dump);
+ else if (strcmp(argv[i], "metric") == 0)
+ metricgroup__print(true, false, NULL, raw_dump);
+ else if (strcmp(argv[i], "metricgroup") == 0)
+ metricgroup__print(false, true, NULL, raw_dump);
else if ((sep = strchr(argv[i], ':')) != NULL) {
int sep_idx;
@@ -97,6 +102,7 @@ int cmd_list(int argc, const char **argv)
s[sep_idx] = '\0';
print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
print_sdt_events(s, s + sep_idx + 1, raw_dump);
+ metricgroup__print(true, true, s, raw_dump);
free(s);
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -113,6 +119,7 @@ int cmd_list(int argc, const char **argv)
details_flag);
print_tracepoint_events(NULL, s, raw_dump);
print_sdt_events(NULL, s, raw_dump);
+ metricgroup__print(true, true, NULL, raw_dump);
free(s);
}
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index fe69cd6b89e1..6e0189df2b3b 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -865,13 +865,15 @@ static int __cmd_report(bool display_info)
.namespaces = perf_event__process_namespaces,
.ordered_events = true,
};
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = force,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
};
- session = perf_session__new(&file, false, &eops);
+ session = perf_session__new(&data, false, &eops);
if (!session) {
pr_err("Initializing perf session failed\n");
return -1;
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 4db960085273..506564651cda 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -113,6 +113,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (!perf_mem_events[j].supported) {
pr_err("failed: event '%s' not supported\n",
perf_mem_events__name(j));
+ free(rec_argv);
return -1;
}
@@ -236,13 +237,15 @@ static int process_sample_event(struct perf_tool *tool,
static int report_raw_events(struct perf_mem *mem)
{
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = mem->force,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = mem->force,
};
int ret;
- struct perf_session *session = perf_session__new(&file, false,
+ struct perf_session *session = perf_session__new(&data, false,
&mem->tool);
if (session == NULL)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0c95ffefb6cc..3d7f33e19df2 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -67,7 +67,7 @@ struct record {
struct perf_tool tool;
struct record_opts opts;
u64 bytes_written;
- struct perf_data_file file;
+ struct perf_data data;
struct auxtrace_record *itr;
struct perf_evlist *evlist;
struct perf_session *session;
@@ -108,7 +108,7 @@ static bool switch_output_time(struct record *rec)
static int record__write(struct record *rec, void *bf, size_t size)
{
- if (perf_data_file__write(rec->session->file, bf, size) < 0) {
+ if (perf_data__write(rec->session->data, bf, size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
@@ -130,107 +130,12 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, event, event->header.size);
}
-static int
-backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+static int record__pushfn(void *to, void *bf, size_t size)
{
- struct perf_event_header *pheader;
- u64 evt_head = head;
- int size = mask + 1;
-
- pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
- pheader = (struct perf_event_header *)(buf + (head & mask));
- *start = head;
- while (true) {
- if (evt_head - head >= (unsigned int)size) {
- pr_debug("Finished reading backward ring buffer: rewind\n");
- if (evt_head - head > (unsigned int)size)
- evt_head -= pheader->size;
- *end = evt_head;
- return 0;
- }
-
- pheader = (struct perf_event_header *)(buf + (evt_head & mask));
-
- if (pheader->size == 0) {
- pr_debug("Finished reading backward ring buffer: get start\n");
- *end = evt_head;
- return 0;
- }
-
- evt_head += pheader->size;
- pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
- }
- WARN_ONCE(1, "Shouldn't get here\n");
- return -1;
-}
-
-static int
-rb_find_range(void *data, int mask, u64 head, u64 old,
- u64 *start, u64 *end, bool backward)
-{
- if (!backward) {
- *start = old;
- *end = head;
- return 0;
- }
-
- return backward_rb_find_range(data, mask, head, start, end);
-}
-
-static int
-record__mmap_read(struct record *rec, struct perf_mmap *md,
- bool overwrite, bool backward)
-{
- u64 head = perf_mmap__read_head(md);
- u64 old = md->prev;
- u64 end = head, start = old;
- unsigned char *data = md->base + page_size;
- unsigned long size;
- void *buf;
- int rc = 0;
-
- if (rb_find_range(data, md->mask, head,
- old, &start, &end, backward))
- return -1;
-
- if (start == end)
- return 0;
+ struct record *rec = to;
rec->samples++;
-
- size = end - start;
- if (size > (unsigned long)(md->mask) + 1) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
-
- md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
- return 0;
- }
-
- if ((start & md->mask) + size != (end & md->mask)) {
- buf = &data[start & md->mask];
- size = md->mask + 1 - (start & md->mask);
- start += size;
-
- if (record__write(rec, buf, size) < 0) {
- rc = -1;
- goto out;
- }
- }
-
- buf = &data[start & md->mask];
- size = end - start;
- start += size;
-
- if (record__write(rec, buf, size) < 0) {
- rc = -1;
- goto out;
- }
-
- md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
-out:
- return rc;
+ return record__write(rec, bf, size);
}
static volatile int done;
@@ -269,13 +174,13 @@ static int record__process_auxtrace(struct perf_tool *tool,
size_t len1, void *data2, size_t len2)
{
struct record *rec = container_of(tool, struct record, tool);
- struct perf_data_file *file = &rec->file;
+ struct perf_data *data = &rec->data;
size_t padding;
u8 pad[8] = {0};
- if (!perf_data_file__is_pipe(file)) {
+ if (!perf_data__is_pipe(data)) {
off_t file_offset;
- int fd = perf_data_file__fd(file);
+ int fd = perf_data__fd(data);
int err;
file_offset = lseek(fd, 0, SEEK_CUR);
@@ -494,10 +399,10 @@ static int process_sample_event(struct perf_tool *tool,
static int process_buildids(struct record *rec)
{
- struct perf_data_file *file = &rec->file;
+ struct perf_data *data = &rec->data;
struct perf_session *session = rec->session;
- if (file->size == 0)
+ if (data->size == 0)
return 0;
/*
@@ -577,8 +482,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
if (maps[i].base) {
- if (record__mmap_read(rec, &maps[i],
- evlist->overwrite, backward) != 0) {
+ if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
rc = -1;
goto out;
}
@@ -641,14 +545,14 @@ static void record__init_features(struct record *rec)
static void
record__finish_output(struct record *rec)
{
- struct perf_data_file *file = &rec->file;
- int fd = perf_data_file__fd(file);
+ struct perf_data *data = &rec->data;
+ int fd = perf_data__fd(data);
- if (file->is_pipe)
+ if (data->is_pipe)
return;
rec->session->header.data_size += rec->bytes_written;
- file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
+ data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
if (!rec->no_buildid) {
process_buildids(rec);
@@ -687,7 +591,7 @@ static int record__synthesize(struct record *rec, bool tail);
static int
record__switch_output(struct record *rec, bool at_exit)
{
- struct perf_data_file *file = &rec->file;
+ struct perf_data *data = &rec->data;
int fd, err;
/* Same Size: "2015122520103046"*/
@@ -705,7 +609,7 @@ record__switch_output(struct record *rec, bool at_exit)
return -EINVAL;
}
- fd = perf_data_file__switch(file, timestamp,
+ fd = perf_data__switch(data, timestamp,
rec->session->header.data_offset,
at_exit);
if (fd >= 0 && !at_exit) {
@@ -715,7 +619,7 @@ record__switch_output(struct record *rec, bool at_exit)
if (!quiet)
fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
- file->path, timestamp);
+ data->file.path, timestamp);
/* Output tracking events */
if (!at_exit) {
@@ -790,16 +694,16 @@ static int record__synthesize(struct record *rec, bool tail)
{
struct perf_session *session = rec->session;
struct machine *machine = &session->machines.host;
- struct perf_data_file *file = &rec->file;
+ struct perf_data *data = &rec->data;
struct record_opts *opts = &rec->opts;
struct perf_tool *tool = &rec->tool;
- int fd = perf_data_file__fd(file);
+ int fd = perf_data__fd(data);
int err = 0;
if (rec->opts.tail_synthesize != tail)
return 0;
- if (file->is_pipe) {
+ if (data->is_pipe) {
err = perf_event__synthesize_features(
tool, session, rec->evlist, process_synthesized_event);
if (err < 0) {
@@ -864,7 +768,7 @@ static int record__synthesize(struct record *rec, bool tail)
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
- opts->proc_map_timeout);
+ opts->proc_map_timeout, 1);
out:
return err;
}
@@ -878,7 +782,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct machine *machine;
struct perf_tool *tool = &rec->tool;
struct record_opts *opts = &rec->opts;
- struct perf_data_file *file = &rec->file;
+ struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
int fd;
@@ -904,20 +808,20 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGUSR2, SIG_IGN);
}
- session = perf_session__new(file, false, tool);
+ session = perf_session__new(data, false, tool);
if (session == NULL) {
pr_err("Perf session creation failed.\n");
return -1;
}
- fd = perf_data_file__fd(file);
+ fd = perf_data__fd(data);
rec->session = session;
record__init_features(rec);
if (forks) {
err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
- argv, file->is_pipe,
+ argv, data->is_pipe,
workload_exec_failed_signal);
if (err < 0) {
pr_err("Couldn't run the workload!\n");
@@ -953,7 +857,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (!rec->evlist->nr_groups)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
- if (file->is_pipe) {
+ if (data->is_pipe) {
err = perf_header__write_pipe(fd);
if (err < 0)
goto out_child;
@@ -1214,8 +1118,8 @@ out_child:
samples[0] = '\0';
fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
- perf_data_file__size(file) / 1024.0 / 1024.0,
- file->path, postfix, samples);
+ perf_data__size(data) / 1024.0 / 1024.0,
+ data->file.path, postfix, samples);
}
out_delete_session:
@@ -1579,7 +1483,7 @@ static struct option __record_options[] = {
OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
- OPT_STRING('o', "output", &record.file.path, "file",
+ OPT_STRING('o', "output", &record.data.file.path, "file",
"output file name"),
OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
&record.opts.no_inherit_set,
@@ -1644,6 +1548,9 @@ static struct option __record_options[] = {
OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
"sample selected machine registers on interrupt,"
" use -I ? to list register names", parse_regs),
+ OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
+ "sample selected machine registers on interrupt,"
+ " use -I ? to list register names", parse_regs),
OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
"Record running/enabled time of read (:S) events"),
OPT_CALLBACK('k', "clockid", &record.opts,
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index fae4b0340750..1394cd8d96f7 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -258,7 +258,7 @@ static int report__setup_sample_type(struct report *rep)
{
struct perf_session *session = rep->session;
u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
- bool is_pipe = perf_data_file__is_pipe(session->file);
+ bool is_pipe = perf_data__is_pipe(session->data);
if (session->itrace_synth_opts->callchain ||
(!is_pipe &&
@@ -569,7 +569,7 @@ static int __cmd_report(struct report *rep)
int ret;
struct perf_session *session = rep->session;
struct perf_evsel *pos;
- struct perf_data_file *file = session->file;
+ struct perf_data *data = session->data;
signal(SIGINT, sig_handler);
@@ -638,7 +638,7 @@ static int __cmd_report(struct report *rep)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (rep->nr_entries == 0) {
- ui__error("The %s file has no samples!\n", file->path);
+ ui__error("The %s file has no samples!\n", data->file.path);
return 0;
}
@@ -880,7 +880,7 @@ int cmd_report(int argc, const char **argv)
"Show inline function"),
OPT_END()
};
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
int ret = hists__init();
@@ -941,11 +941,11 @@ int cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- file.path = input_name;
- file.force = symbol_conf.force;
+ data.file.path = input_name;
+ data.force = symbol_conf.force;
repeat:
- session = perf_session__new(&file, false, &report.tool);
+ session = perf_session__new(&data, false, &report.tool);
if (session == NULL)
return -1;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index f380d91ee609..83283fedb00f 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1701,14 +1701,16 @@ static int perf_sched__read_events(struct perf_sched *sched)
{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
};
struct perf_session *session;
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
int rc = -1;
- session = perf_session__new(&file, false, &sched->tool);
+ session = perf_session__new(&data, false, &sched->tool);
if (session == NULL) {
pr_debug("No Memory for session\n");
return -1;
@@ -2903,10 +2905,12 @@ static int perf_sched__timehist(struct perf_sched *sched)
const struct perf_evsel_str_handler migrate_handlers[] = {
{ "sched:sched_migrate_task", timehist_migrate_task_event, },
};
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
};
struct perf_session *session;
@@ -2931,7 +2935,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
symbol_conf.use_callchain = sched->show_callchain;
- session = perf_session__new(&file, false, &sched->tool);
+ session = perf_session__new(&data, false, &sched->tool);
if (session == NULL)
return -ENOMEM;
@@ -3364,6 +3368,10 @@ int cmd_sched(int argc, const char **argv)
OPT_STRING(0, "time", &sched.time_str, "str",
"Time span for analysis (start,stop)"),
OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
+ OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
+ "analyze events only for given process id(s)"),
+ OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
+ "analyze events only for given thread id(s)"),
OPT_PARENT(sched_options)
};
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 0fe02758de7d..68f36dc0344f 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -89,6 +89,7 @@ enum perf_output_field {
PERF_OUTPUT_BRSTACKOFF = 1U << 24,
PERF_OUTPUT_SYNTH = 1U << 25,
PERF_OUTPUT_PHYS_ADDR = 1U << 26,
+ PERF_OUTPUT_UREGS = 1U << 27,
};
struct output_option {
@@ -110,6 +111,7 @@ struct output_option {
{.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
{.str = "period", .field = PERF_OUTPUT_PERIOD},
{.str = "iregs", .field = PERF_OUTPUT_IREGS},
+ {.str = "uregs", .field = PERF_OUTPUT_UREGS},
{.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
{.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
{.str = "data_src", .field = PERF_OUTPUT_DATA_SRC},
@@ -209,6 +211,51 @@ static struct {
},
};
+struct perf_evsel_script {
+ char *filename;
+ FILE *fp;
+ u64 samples;
+};
+
+static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel,
+ struct perf_data *data)
+{
+ struct perf_evsel_script *es = malloc(sizeof(*es));
+
+ if (es != NULL) {
+ if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
+ goto out_free;
+ es->fp = fopen(es->filename, "w");
+ if (es->fp == NULL)
+ goto out_free_filename;
+ es->samples = 0;
+ }
+
+ return es;
+out_free_filename:
+ zfree(&es->filename);
+out_free:
+ free(es);
+ return NULL;
+}
+
+static void perf_evsel_script__delete(struct perf_evsel_script *es)
+{
+ zfree(&es->filename);
+ fclose(es->fp);
+ es->fp = NULL;
+ free(es);
+}
+
+static int perf_evsel_script__fprintf(struct perf_evsel_script *es, FILE *fp)
+{
+ struct stat st;
+
+ fstat(fileno(es->fp), &st);
+ return fprintf(fp, "[ perf script: Wrote %.3f MB %s (%" PRIu64 " samples) ]\n",
+ st.st_size / 1024.0 / 1024.0, es->filename, es->samples);
+}
+
static inline int output_type(unsigned int type)
{
switch (type) {
@@ -386,6 +433,11 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
PERF_OUTPUT_IREGS))
return -EINVAL;
+ if (PRINT_FIELD(UREGS) &&
+ perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS",
+ PERF_OUTPUT_UREGS))
+ return -EINVAL;
+
if (PRINT_FIELD(PHYS_ADDR) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR",
PERF_OUTPUT_PHYS_ADDR))
@@ -494,51 +546,76 @@ out:
return 0;
}
-static void print_sample_iregs(struct perf_sample *sample,
- struct perf_event_attr *attr)
+static int perf_sample__fprintf_iregs(struct perf_sample *sample,
+ struct perf_event_attr *attr, FILE *fp)
{
struct regs_dump *regs = &sample->intr_regs;
uint64_t mask = attr->sample_regs_intr;
unsigned i = 0, r;
+ int printed = 0;
if (!regs)
- return;
+ return 0;
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++];
- printf("%5s:0x%"PRIx64" ", perf_reg_name(r), val);
+ printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
}
+
+ return printed;
}
-static void print_sample_start(struct perf_sample *sample,
- struct thread *thread,
- struct perf_evsel *evsel)
+static int perf_sample__fprintf_uregs(struct perf_sample *sample,
+ struct perf_event_attr *attr, FILE *fp)
+{
+ struct regs_dump *regs = &sample->user_regs;
+ uint64_t mask = attr->sample_regs_user;
+ unsigned i = 0, r;
+ int printed = 0;
+
+ if (!regs || !regs->regs)
+ return 0;
+
+ printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
+
+ for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
+ u64 val = regs->regs[i++];
+ printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
+ }
+
+ return printed;
+}
+
+static int perf_sample__fprintf_start(struct perf_sample *sample,
+ struct thread *thread,
+ struct perf_evsel *evsel, FILE *fp)
{
struct perf_event_attr *attr = &evsel->attr;
unsigned long secs;
unsigned long long nsecs;
+ int printed = 0;
if (PRINT_FIELD(COMM)) {
if (latency_format)
- printf("%8.8s ", thread__comm_str(thread));
+ printed += fprintf(fp, "%8.8s ", thread__comm_str(thread));
else if (PRINT_FIELD(IP) && symbol_conf.use_callchain)
- printf("%s ", thread__comm_str(thread));
+ printed += fprintf(fp, "%s ", thread__comm_str(thread));
else
- printf("%16s ", thread__comm_str(thread));
+ printed += fprintf(fp, "%16s ", thread__comm_str(thread));
}
if (PRINT_FIELD(PID) && PRINT_FIELD(TID))
- printf("%5d/%-5d ", sample->pid, sample->tid);
+ printed += fprintf(fp, "%5d/%-5d ", sample->pid, sample->tid);
else if (PRINT_FIELD(PID))
- printf("%5d ", sample->pid);
+ printed += fprintf(fp, "%5d ", sample->pid);
else if (PRINT_FIELD(TID))
- printf("%5d ", sample->tid);
+ printed += fprintf(fp, "%5d ", sample->tid);
if (PRINT_FIELD(CPU)) {
if (latency_format)
- printf("%3d ", sample->cpu);
+ printed += fprintf(fp, "%3d ", sample->cpu);
else
- printf("[%03d] ", sample->cpu);
+ printed += fprintf(fp, "[%03d] ", sample->cpu);
}
if (PRINT_FIELD(TIME)) {
@@ -547,13 +624,15 @@ static void print_sample_start(struct perf_sample *sample,
nsecs -= secs * NSEC_PER_SEC;
if (nanosecs)
- printf("%5lu.%09llu: ", secs, nsecs);
+ printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
else {
char sample_time[32];
timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
- printf("%12s: ", sample_time);
+ printed += fprintf(fp, "%12s: ", sample_time);
}
}
+
+ return printed;
}
static inline char
@@ -565,16 +644,17 @@ mispred_str(struct branch_entry *br)
return br->flags.predicted ? 'P' : 'M';
}
-static void print_sample_brstack(struct perf_sample *sample,
- struct thread *thread,
- struct perf_event_attr *attr)
+static int perf_sample__fprintf_brstack(struct perf_sample *sample,
+ struct thread *thread,
+ struct perf_event_attr *attr, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt;
u64 i, from, to;
+ int printed = 0;
if (!(br && br->nr))
- return;
+ return 0;
for (i = 0; i < br->nr; i++) {
from = br->entries[i].from;
@@ -587,38 +667,41 @@ static void print_sample_brstack(struct perf_sample *sample,
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
}
- printf(" 0x%"PRIx64, from);
+ printed += fprintf(fp, " 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
- printf("(");
- map__fprintf_dsoname(alf.map, stdout);
- printf(")");
+ printed += fprintf(fp, "(");
+ printed += map__fprintf_dsoname(alf.map, fp);
+ printed += fprintf(fp, ")");
}
- printf("/0x%"PRIx64, to);
+ printed += fprintf(fp, "/0x%"PRIx64, to);
if (PRINT_FIELD(DSO)) {
- printf("(");
- map__fprintf_dsoname(alt.map, stdout);
- printf(")");
+ printed += fprintf(fp, "(");
+ printed += map__fprintf_dsoname(alt.map, fp);
+ printed += fprintf(fp, ")");
}
- printf("/%c/%c/%c/%d ",
+ printed += fprintf(fp, "/%c/%c/%c/%d ",
mispred_str( br->entries + i),
br->entries[i].flags.in_tx? 'X' : '-',
br->entries[i].flags.abort? 'A' : '-',
br->entries[i].flags.cycles);
}
+
+ return printed;
}
-static void print_sample_brstacksym(struct perf_sample *sample,
- struct thread *thread,
- struct perf_event_attr *attr)
+static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
+ struct thread *thread,
+ struct perf_event_attr *attr, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt;
u64 i, from, to;
+ int printed = 0;
if (!(br && br->nr))
- return;
+ return 0;
for (i = 0; i < br->nr; i++) {
@@ -635,37 +718,40 @@ static void print_sample_brstacksym(struct perf_sample *sample,
if (alt.map)
alt.sym = map__find_symbol(alt.map, alt.addr);
- symbol__fprintf_symname_offs(alf.sym, &alf, stdout);
+ printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
if (PRINT_FIELD(DSO)) {
- printf("(");
- map__fprintf_dsoname(alf.map, stdout);
- printf(")");
+ printed += fprintf(fp, "(");
+ printed += map__fprintf_dsoname(alf.map, fp);
+ printed += fprintf(fp, ")");
}
- putchar('/');
- symbol__fprintf_symname_offs(alt.sym, &alt, stdout);
+ printed += fprintf(fp, "%c", '/');
+ printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp);
if (PRINT_FIELD(DSO)) {
- printf("(");
- map__fprintf_dsoname(alt.map, stdout);
- printf(")");
+ printed += fprintf(fp, "(");
+ printed += map__fprintf_dsoname(alt.map, fp);
+ printed += fprintf(fp, ")");
}
- printf("/%c/%c/%c/%d ",
+ printed += fprintf(fp, "/%c/%c/%c/%d ",
mispred_str( br->entries + i),
br->entries[i].flags.in_tx? 'X' : '-',
br->entries[i].flags.abort? 'A' : '-',
br->entries[i].flags.cycles);
}
+
+ return printed;
}
-static void print_sample_brstackoff(struct perf_sample *sample,
- struct thread *thread,
- struct perf_event_attr *attr)
+static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
+ struct thread *thread,
+ struct perf_event_attr *attr, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt;
u64 i, from, to;
+ int printed = 0;
if (!(br && br->nr))
- return;
+ return 0;
for (i = 0; i < br->nr; i++) {
@@ -682,24 +768,26 @@ static void print_sample_brstackoff(struct perf_sample *sample,
if (alt.map && !alt.map->dso->adjust_symbols)
to = map__map_ip(alt.map, to);
- printf(" 0x%"PRIx64, from);
+ printed += fprintf(fp, " 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
- printf("(");
- map__fprintf_dsoname(alf.map, stdout);
- printf(")");
+ printed += fprintf(fp, "(");
+ printed += map__fprintf_dsoname(alf.map, fp);
+ printed += fprintf(fp, ")");
}
- printf("/0x%"PRIx64, to);
+ printed += fprintf(fp, "/0x%"PRIx64, to);
if (PRINT_FIELD(DSO)) {
- printf("(");
- map__fprintf_dsoname(alt.map, stdout);
- printf(")");
+ printed += fprintf(fp, "(");
+ printed += map__fprintf_dsoname(alt.map, fp);
+ printed += fprintf(fp, ")");
}
- printf("/%c/%c/%c/%d ",
+ printed += fprintf(fp, "/%c/%c/%c/%d ",
mispred_str(br->entries + i),
br->entries[i].flags.in_tx ? 'X' : '-',
br->entries[i].flags.abort ? 'A' : '-',
br->entries[i].flags.cycles);
}
+
+ return printed;
}
#define MAXBB 16384UL
@@ -727,27 +815,26 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
* but the exit is not. Let the caller patch it up.
*/
if (kernel != machine__kernel_ip(machine, end)) {
- printf("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n",
- start, end);
+ pr_debug("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", start, end);
return -ENXIO;
}
memset(&al, 0, sizeof(al));
if (end - start > MAXBB - MAXINSN) {
if (last)
- printf("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end);
+ pr_debug("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end);
else
- printf("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start);
+ pr_debug("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start);
return 0;
}
thread__find_addr_map(thread, *cpumode, MAP__FUNCTION, start, &al);
if (!al.map || !al.map->dso) {
- printf("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
+ pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
return 0;
}
if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR) {
- printf("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
+ pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
return 0;
}
@@ -760,36 +847,35 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
*is64bit = al.map->dso->is_64_bit;
if (len <= 0)
- printf("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n",
+ pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n",
start, end);
return len;
}
-static void print_jump(uint64_t ip, struct branch_entry *en,
- struct perf_insn *x, u8 *inbuf, int len,
- int insn)
+static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
+ struct perf_insn *x, u8 *inbuf, int len,
+ int insn, FILE *fp)
{
- printf("\t%016" PRIx64 "\t%-30s\t#%s%s%s%s",
- ip,
- dump_insn(x, ip, inbuf, len, NULL),
- en->flags.predicted ? " PRED" : "",
- en->flags.mispred ? " MISPRED" : "",
- en->flags.in_tx ? " INTX" : "",
- en->flags.abort ? " ABORT" : "");
+ int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", ip,
+ dump_insn(x, ip, inbuf, len, NULL),
+ en->flags.predicted ? " PRED" : "",
+ en->flags.mispred ? " MISPRED" : "",
+ en->flags.in_tx ? " INTX" : "",
+ en->flags.abort ? " ABORT" : "");
if (en->flags.cycles) {
- printf(" %d cycles", en->flags.cycles);
+ printed += fprintf(fp, " %d cycles", en->flags.cycles);
if (insn)
- printf(" %.2f IPC", (float)insn / en->flags.cycles);
+ printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles);
}
- putchar('\n');
+ return printed + fprintf(fp, "\n");
}
-static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu,
- uint64_t addr, struct symbol **lastsym,
- struct perf_event_attr *attr)
+static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
+ u8 cpumode, int cpu, struct symbol **lastsym,
+ struct perf_event_attr *attr, FILE *fp)
{
struct addr_location al;
- int off;
+ int off, printed = 0;
memset(&al, 0, sizeof(al));
@@ -798,7 +884,7 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu,
thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
addr, &al);
if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
- return;
+ return 0;
al.cpu = cpu;
al.sym = NULL;
@@ -806,37 +892,39 @@ static void print_ip_sym(struct thread *thread, u8 cpumode, int cpu,
al.sym = map__find_symbol(al.map, al.addr);
if (!al.sym)
- return;
+ return 0;
if (al.addr < al.sym->end)
off = al.addr - al.sym->start;
else
off = al.addr - al.map->start - al.sym->start;
- printf("\t%s", al.sym->name);
+ printed += fprintf(fp, "\t%s", al.sym->name);
if (off)
- printf("%+d", off);
- putchar(':');
+ printed += fprintf(fp, "%+d", off);
+ printed += fprintf(fp, ":");
if (PRINT_FIELD(SRCLINE))
- map__fprintf_srcline(al.map, al.addr, "\t", stdout);
- putchar('\n');
+ printed += map__fprintf_srcline(al.map, al.addr, "\t", fp);
+ printed += fprintf(fp, "\n");
*lastsym = al.sym;
+
+ return printed;
}
-static void print_sample_brstackinsn(struct perf_sample *sample,
- struct thread *thread,
- struct perf_event_attr *attr,
- struct machine *machine)
+static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
+ struct thread *thread,
+ struct perf_event_attr *attr,
+ struct machine *machine, FILE *fp)
{
struct branch_stack *br = sample->branch_stack;
u64 start, end;
- int i, insn, len, nr, ilen;
+ int i, insn, len, nr, ilen, printed = 0;
struct perf_insn x;
u8 buffer[MAXBB];
unsigned off;
struct symbol *lastsym = NULL;
if (!(br && br->nr))
- return;
+ return 0;
nr = br->nr;
if (max_blocks && nr > max_blocks + 1)
nr = max_blocks + 1;
@@ -844,17 +932,17 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
x.thread = thread;
x.cpu = sample->cpu;
- putchar('\n');
+ printed += fprintf(fp, "%c", '\n');
/* Handle first from jump, of which we don't know the entry. */
len = grab_bb(buffer, br->entries[nr-1].from,
br->entries[nr-1].from,
machine, thread, &x.is64bit, &x.cpumode, false);
if (len > 0) {
- print_ip_sym(thread, x.cpumode, x.cpu,
- br->entries[nr - 1].from, &lastsym, attr);
- print_jump(br->entries[nr - 1].from, &br->entries[nr - 1],
- &x, buffer, len, 0);
+ printed += ip__fprintf_sym(br->entries[nr - 1].from, thread,
+ x.cpumode, x.cpu, &lastsym, attr, fp);
+ printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1],
+ &x, buffer, len, 0, fp);
}
/* Print all blocks */
@@ -880,13 +968,13 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
for (off = 0;; off += ilen) {
uint64_t ip = start + off;
- print_ip_sym(thread, x.cpumode, x.cpu, ip, &lastsym, attr);
+ printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (ip == end) {
- print_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn);
+ printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp);
break;
} else {
- printf("\t%016" PRIx64 "\t%s\n", ip,
- dump_insn(&x, ip, buffer + off, len - off, &ilen));
+ printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
+ dump_insn(&x, ip, buffer + off, len - off, &ilen));
if (ilen == 0)
break;
insn++;
@@ -899,9 +987,9 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
* has not been executed yet.
*/
if (br->entries[0].from == sample->ip)
- return;
+ goto out;
if (br->entries[0].flags.abort)
- return;
+ goto out;
/*
* Print final block upto sample
@@ -909,58 +997,61 @@ static void print_sample_brstackinsn(struct perf_sample *sample,
start = br->entries[0].to;
end = sample->ip;
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
- print_ip_sym(thread, x.cpumode, x.cpu, start, &lastsym, attr);
+ printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (len <= 0) {
/* Print at least last IP if basic block did not work */
len = grab_bb(buffer, sample->ip, sample->ip,
machine, thread, &x.is64bit, &x.cpumode, false);
if (len <= 0)
- return;
+ goto out;
- printf("\t%016" PRIx64 "\t%s\n", sample->ip,
+ printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip,
dump_insn(&x, sample->ip, buffer, len, NULL));
- return;
+ goto out;
}
for (off = 0; off <= end - start; off += ilen) {
- printf("\t%016" PRIx64 "\t%s\n", start + off,
- dump_insn(&x, start + off, buffer + off, len - off, &ilen));
+ printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off,
+ dump_insn(&x, start + off, buffer + off, len - off, &ilen));
if (ilen == 0)
break;
}
+out:
+ return printed;
}
-static void print_sample_addr(struct perf_sample *sample,
- struct thread *thread,
- struct perf_event_attr *attr)
+static int perf_sample__fprintf_addr(struct perf_sample *sample,
+ struct thread *thread,
+ struct perf_event_attr *attr, FILE *fp)
{
struct addr_location al;
-
- printf("%16" PRIx64, sample->addr);
+ int printed = fprintf(fp, "%16" PRIx64, sample->addr);
if (!sample_addr_correlates_sym(attr))
- return;
+ goto out;
thread__resolve(thread, &al, sample);
if (PRINT_FIELD(SYM)) {
- printf(" ");
+ printed += fprintf(fp, " ");
if (PRINT_FIELD(SYMOFFSET))
- symbol__fprintf_symname_offs(al.sym, &al, stdout);
+ printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
else
- symbol__fprintf_symname(al.sym, stdout);
+ printed += symbol__fprintf_symname(al.sym, fp);
}
if (PRINT_FIELD(DSO)) {
- printf(" (");
- map__fprintf_dsoname(al.map, stdout);
- printf(")");
+ printed += fprintf(fp, " (");
+ printed += map__fprintf_dsoname(al.map, fp);
+ printed += fprintf(fp, ")");
}
+out:
+ return printed;
}
-static void print_sample_callindent(struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct thread *thread,
- struct addr_location *al)
+static int perf_sample__fprintf_callindent(struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al, FILE *fp)
{
struct perf_event_attr *attr = &evsel->attr;
size_t depth = thread_stack__depth(thread);
@@ -995,12 +1086,12 @@ static void print_sample_callindent(struct perf_sample *sample,
}
if (name)
- len = printf("%*s%s", (int)depth * 4, "", name);
+ len = fprintf(fp, "%*s%s", (int)depth * 4, "", name);
else if (ip)
- len = printf("%*s%16" PRIx64, (int)depth * 4, "", ip);
+ len = fprintf(fp, "%*s%16" PRIx64, (int)depth * 4, "", ip);
if (len < 0)
- return;
+ return len;
/*
* Try to keep the output length from changing frequently so that the
@@ -1010,39 +1101,46 @@ static void print_sample_callindent(struct perf_sample *sample,
spacing = round_up(len + 4, 32);
if (len < spacing)
- printf("%*s", spacing - len, "");
+ len += fprintf(fp, "%*s", spacing - len, "");
+
+ return len;
}
-static void print_insn(struct perf_sample *sample,
- struct perf_event_attr *attr,
- struct thread *thread,
- struct machine *machine)
+static int perf_sample__fprintf_insn(struct perf_sample *sample,
+ struct perf_event_attr *attr,
+ struct thread *thread,
+ struct machine *machine, FILE *fp)
{
+ int printed = 0;
+
if (PRINT_FIELD(INSNLEN))
- printf(" ilen: %d", sample->insn_len);
+ printed += fprintf(fp, " ilen: %d", sample->insn_len);
if (PRINT_FIELD(INSN)) {
int i;
- printf(" insn:");
+ printed += fprintf(fp, " insn:");
for (i = 0; i < sample->insn_len; i++)
- printf(" %02x", (unsigned char)sample->insn[i]);
+ printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]);
}
if (PRINT_FIELD(BRSTACKINSN))
- print_sample_brstackinsn(sample, thread, attr, machine);
+ printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp);
+
+ return printed;
}
-static void print_sample_bts(struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct thread *thread,
- struct addr_location *al,
- struct machine *machine)
+static int perf_sample__fprintf_bts(struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct thread *thread,
+ struct addr_location *al,
+ struct machine *machine, FILE *fp)
{
struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type);
bool print_srcline_last = false;
+ int printed = 0;
if (PRINT_FIELD(CALLINDENT))
- print_sample_callindent(sample, evsel, thread, al);
+ printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, fp);
/* print branch_from information */
if (PRINT_FIELD(IP)) {
@@ -1055,31 +1153,30 @@ static void print_sample_bts(struct perf_sample *sample,
cursor = &callchain_cursor;
if (cursor == NULL) {
- putchar(' ');
+ printed += fprintf(fp, " ");
if (print_opts & EVSEL__PRINT_SRCLINE) {
print_srcline_last = true;
print_opts &= ~EVSEL__PRINT_SRCLINE;
}
} else
- putchar('\n');
+ printed += fprintf(fp, "\n");
- sample__fprintf_sym(sample, al, 0, print_opts, cursor, stdout);
+ printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, fp);
}
/* print branch_to information */
if (PRINT_FIELD(ADDR) ||
((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
!output[type].user_set)) {
- printf(" => ");
- print_sample_addr(sample, thread, attr);
+ printed += fprintf(fp, " => ");
+ printed += perf_sample__fprintf_addr(sample, thread, attr, fp);
}
if (print_srcline_last)
- map__fprintf_srcline(al->map, al->addr, "\n ", stdout);
-
- print_insn(sample, attr, thread, machine);
+ printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
- printf("\n");
+ printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
+ return printed + fprintf(fp, "\n");
}
static struct {
@@ -1102,7 +1199,7 @@ static struct {
{0, NULL}
};
-static void print_sample_flags(u32 flags)
+static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
{
const char *chars = PERF_IP_FLAG_CHARS;
const int n = strlen(PERF_IP_FLAG_CHARS);
@@ -1129,9 +1226,9 @@ static void print_sample_flags(u32 flags)
str[pos] = 0;
if (name)
- printf(" %-7s%4s ", name, in_tx ? "(x)" : "");
- else
- printf(" %-11s ", str);
+ return fprintf(fp, " %-7s%4s ", name, in_tx ? "(x)" : "");
+
+ return fprintf(fp, " %-11s ", str);
}
struct printer_data {
@@ -1140,40 +1237,40 @@ struct printer_data {
bool is_printable;
};
-static void
-print_sample_bpf_output_printer(enum binary_printer_ops op,
- unsigned int val,
- void *extra)
+static int sample__fprintf_bpf_output(enum binary_printer_ops op,
+ unsigned int val,
+ void *extra, FILE *fp)
{
unsigned char ch = (unsigned char)val;
struct printer_data *printer_data = extra;
+ int printed = 0;
switch (op) {
case BINARY_PRINT_DATA_BEGIN:
- printf("\n");
+ printed += fprintf(fp, "\n");
break;
case BINARY_PRINT_LINE_BEGIN:
- printf("%17s", !printer_data->line_no ? "BPF output:" :
+ printed += fprintf(fp, "%17s", !printer_data->line_no ? "BPF output:" :
" ");
break;
case BINARY_PRINT_ADDR:
- printf(" %04x:", val);
+ printed += fprintf(fp, " %04x:", val);
break;
case BINARY_PRINT_NUM_DATA:
- printf(" %02x", val);
+ printed += fprintf(fp, " %02x", val);
break;
case BINARY_PRINT_NUM_PAD:
- printf(" ");
+ printed += fprintf(fp, " ");
break;
case BINARY_PRINT_SEP:
- printf(" ");
+ printed += fprintf(fp, " ");
break;
case BINARY_PRINT_CHAR_DATA:
if (printer_data->hit_nul && ch)
printer_data->is_printable = false;
if (!isprint(ch)) {
- printf("%c", '.');
+ printed += fprintf(fp, "%c", '.');
if (!printer_data->is_printable)
break;
@@ -1183,154 +1280,154 @@ print_sample_bpf_output_printer(enum binary_printer_ops op,
else
printer_data->is_printable = false;
} else {
- printf("%c", ch);
+ printed += fprintf(fp, "%c", ch);
}
break;
case BINARY_PRINT_CHAR_PAD:
- printf(" ");
+ printed += fprintf(fp, " ");
break;
case BINARY_PRINT_LINE_END:
- printf("\n");
+ printed += fprintf(fp, "\n");
printer_data->line_no++;
break;
case BINARY_PRINT_DATA_END:
default:
break;
}
+
+ return printed;
}
-static void print_sample_bpf_output(struct perf_sample *sample)
+static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp)
{
unsigned int nr_bytes = sample->raw_size;
struct printer_data printer_data = {0, false, true};
-
- print_binary(sample->raw_data, nr_bytes, 8,
- print_sample_bpf_output_printer, &printer_data);
+ int printed = binary__fprintf(sample->raw_data, nr_bytes, 8,
+ sample__fprintf_bpf_output, &printer_data, fp);
if (printer_data.is_printable && printer_data.hit_nul)
- printf("%17s \"%s\"\n", "BPF string:",
- (char *)(sample->raw_data));
+ printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data));
+
+ return printed;
}
-static void print_sample_spacing(int len, int spacing)
+static int perf_sample__fprintf_spacing(int len, int spacing, FILE *fp)
{
if (len > 0 && len < spacing)
- printf("%*s", spacing - len, "");
+ return fprintf(fp, "%*s", spacing - len, "");
+
+ return 0;
}
-static void print_sample_pt_spacing(int len)
+static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
{
- print_sample_spacing(len, 34);
+ return perf_sample__fprintf_spacing(len, 34, fp);
}
-static void print_sample_synth_ptwrite(struct perf_sample *sample)
+static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
- return;
+ return 0;
- len = printf(" IP: %u payload: %#" PRIx64 " ",
+ len = fprintf(fp, " IP: %u payload: %#" PRIx64 " ",
data->ip, le64_to_cpu(data->payload));
- print_sample_pt_spacing(len);
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
}
-static void print_sample_synth_mwait(struct perf_sample *sample)
+static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
- return;
+ return 0;
- len = printf(" hints: %#x extensions: %#x ",
- data->hints, data->extensions);
- print_sample_pt_spacing(len);
+ len = fprintf(fp, " hints: %#x extensions: %#x ",
+ data->hints, data->extensions);
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
}
-static void print_sample_synth_pwre(struct perf_sample *sample)
+static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
- return;
+ return 0;
- len = printf(" hw: %u cstate: %u sub-cstate: %u ",
- data->hw, data->cstate, data->subcstate);
- print_sample_pt_spacing(len);
+ len = fprintf(fp, " hw: %u cstate: %u sub-cstate: %u ",
+ data->hw, data->cstate, data->subcstate);
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
}
-static void print_sample_synth_exstop(struct perf_sample *sample)
+static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
- return;
+ return 0;
- len = printf(" IP: %u ", data->ip);
- print_sample_pt_spacing(len);
+ len = fprintf(fp, " IP: %u ", data->ip);
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
}
-static void print_sample_synth_pwrx(struct perf_sample *sample)
+static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
- return;
+ return 0;
- len = printf(" deepest cstate: %u last cstate: %u wake reason: %#x ",
+ len = fprintf(fp, " deepest cstate: %u last cstate: %u wake reason: %#x ",
data->deepest_cstate, data->last_cstate,
data->wake_reason);
- print_sample_pt_spacing(len);
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
}
-static void print_sample_synth_cbr(struct perf_sample *sample)
+static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
unsigned int percent, freq;
int len;
if (perf_sample__bad_synth_size(sample, *data))
- return;
+ return 0;
freq = (le32_to_cpu(data->freq) + 500) / 1000;
- len = printf(" cbr: %2u freq: %4u MHz ", data->cbr, freq);
+ len = fprintf(fp, " cbr: %2u freq: %4u MHz ", data->cbr, freq);
if (data->max_nonturbo) {
percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
- len += printf("(%3u%%) ", percent);
+ len += fprintf(fp, "(%3u%%) ", percent);
}
- print_sample_pt_spacing(len);
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
}
-static void print_sample_synth(struct perf_sample *sample,
- struct perf_evsel *evsel)
+static int perf_sample__fprintf_synth(struct perf_sample *sample,
+ struct perf_evsel *evsel, FILE *fp)
{
switch (evsel->attr.config) {
case PERF_SYNTH_INTEL_PTWRITE:
- print_sample_synth_ptwrite(sample);
- break;
+ return perf_sample__fprintf_synth_ptwrite(sample, fp);
case PERF_SYNTH_INTEL_MWAIT:
- print_sample_synth_mwait(sample);
- break;
+ return perf_sample__fprintf_synth_mwait(sample, fp);
case PERF_SYNTH_INTEL_PWRE:
- print_sample_synth_pwre(sample);
- break;
+ return perf_sample__fprintf_synth_pwre(sample, fp);
case PERF_SYNTH_INTEL_EXSTOP:
- print_sample_synth_exstop(sample);
- break;
+ return perf_sample__fprintf_synth_exstop(sample, fp);
case PERF_SYNTH_INTEL_PWRX:
- print_sample_synth_pwrx(sample);
- break;
+ return perf_sample__fprintf_synth_pwrx(sample, fp);
case PERF_SYNTH_INTEL_CBR:
- print_sample_synth_cbr(sample);
- break;
+ return perf_sample__fprintf_synth_cbr(sample, fp);
default:
break;
}
+
+ return 0;
}
struct perf_script {
@@ -1341,6 +1438,7 @@ struct perf_script {
bool show_switch_events;
bool show_namespace_events;
bool allocated;
+ bool per_event_dump;
struct cpu_map *cpus;
struct thread_map *threads;
int name_width;
@@ -1362,7 +1460,7 @@ static int perf_evlist__max_name_len(struct perf_evlist *evlist)
return max;
}
-static size_t data_src__printf(u64 data_src)
+static int data_src__fprintf(u64 data_src, FILE *fp)
{
struct mem_info mi = { .data_src.val = data_src };
char decode[100];
@@ -1376,7 +1474,7 @@ static size_t data_src__printf(u64 data_src)
if (maxlen < len)
maxlen = len;
- return printf("%-*s", maxlen, out);
+ return fprintf(fp, "%-*s", maxlen, out);
}
static void process_event(struct perf_script *script,
@@ -1387,14 +1485,18 @@ static void process_event(struct perf_script *script,
struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type);
+ struct perf_evsel_script *es = evsel->priv;
+ FILE *fp = es->fp;
if (output[type].fields == 0)
return;
- print_sample_start(sample, thread, evsel);
+ ++es->samples;
+
+ perf_sample__fprintf_start(sample, thread, evsel, fp);
if (PRINT_FIELD(PERIOD))
- printf("%10" PRIu64 " ", sample->period);
+ fprintf(fp, "%10" PRIu64 " ", sample->period);
if (PRINT_FIELD(EVNAME)) {
const char *evname = perf_evsel__name(evsel);
@@ -1402,33 +1504,33 @@ static void process_event(struct perf_script *script,
if (!script->name_width)
script->name_width = perf_evlist__max_name_len(script->session->evlist);
- printf("%*s: ", script->name_width,
- evname ? evname : "[unknown]");
+ fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]");
}
if (print_flags)
- print_sample_flags(sample->flags);
+ perf_sample__fprintf_flags(sample->flags, fp);
if (is_bts_event(attr)) {
- print_sample_bts(sample, evsel, thread, al, machine);
+ perf_sample__fprintf_bts(sample, evsel, thread, al, machine, fp);
return;
}
- if (PRINT_FIELD(TRACE))
- event_format__print(evsel->tp_format, sample->cpu,
- sample->raw_data, sample->raw_size);
+ if (PRINT_FIELD(TRACE)) {
+ event_format__fprintf(evsel->tp_format, sample->cpu,
+ sample->raw_data, sample->raw_size, fp);
+ }
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
- print_sample_synth(sample, evsel);
+ perf_sample__fprintf_synth(sample, evsel, fp);
if (PRINT_FIELD(ADDR))
- print_sample_addr(sample, thread, attr);
+ perf_sample__fprintf_addr(sample, thread, attr, fp);
if (PRINT_FIELD(DATA_SRC))
- data_src__printf(sample->data_src);
+ data_src__fprintf(sample->data_src, fp);
if (PRINT_FIELD(WEIGHT))
- printf("%16" PRIu64, sample->weight);
+ fprintf(fp, "%16" PRIu64, sample->weight);
if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL;
@@ -1438,27 +1540,30 @@ static void process_event(struct perf_script *script,
sample, NULL, NULL, scripting_max_stack) == 0)
cursor = &callchain_cursor;
- putchar(cursor ? '\n' : ' ');
- sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, stdout);
+ fputc(cursor ? '\n' : ' ', fp);
+ sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, fp);
}
if (PRINT_FIELD(IREGS))
- print_sample_iregs(sample, attr);
+ perf_sample__fprintf_iregs(sample, attr, fp);
+
+ if (PRINT_FIELD(UREGS))
+ perf_sample__fprintf_uregs(sample, attr, fp);
if (PRINT_FIELD(BRSTACK))
- print_sample_brstack(sample, thread, attr);
+ perf_sample__fprintf_brstack(sample, thread, attr, fp);
else if (PRINT_FIELD(BRSTACKSYM))
- print_sample_brstacksym(sample, thread, attr);
+ perf_sample__fprintf_brstacksym(sample, thread, attr, fp);
else if (PRINT_FIELD(BRSTACKOFF))
- print_sample_brstackoff(sample, thread, attr);
+ perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
- print_sample_bpf_output(sample);
- print_insn(sample, attr, thread, machine);
+ perf_sample__fprintf_bpf_output(sample, fp);
+ perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
if (PRINT_FIELD(PHYS_ADDR))
- printf("%16" PRIx64, sample->phys_addr);
- printf("\n");
+ fprintf(fp, "%16" PRIx64, sample->phys_addr);
+ fprintf(fp, "\n");
}
static struct scripting_ops *scripting_ops;
@@ -1632,7 +1737,7 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
- print_sample_start(sample, thread, evsel);
+ perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1667,7 +1772,7 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
- print_sample_start(sample, thread, evsel);
+ perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1700,7 +1805,7 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- print_sample_start(sample, thread, evsel);
+ perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
@@ -1729,7 +1834,7 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- print_sample_start(sample, thread, evsel);
+ perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0)
@@ -1764,7 +1869,7 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
- print_sample_start(sample, thread, evsel);
+ perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1795,7 +1900,7 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
- print_sample_start(sample, thread, evsel);
+ perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1821,7 +1926,7 @@ static int process_switch_event(struct perf_tool *tool,
return -1;
}
- print_sample_start(sample, thread, evsel);
+ perf_sample__fprintf_start(sample, thread, evsel, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1832,6 +1937,65 @@ static void sig_handler(int sig __maybe_unused)
session_done = 1;
}
+static void perf_script__fclose_per_event_dump(struct perf_script *script)
+{
+ struct perf_evlist *evlist = script->session->evlist;
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (!evsel->priv)
+ break;
+ perf_evsel_script__delete(evsel->priv);
+ evsel->priv = NULL;
+ }
+}
+
+static int perf_script__fopen_per_event_dump(struct perf_script *script)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(script->session->evlist, evsel) {
+ evsel->priv = perf_evsel_script__new(evsel, script->session->data);
+ if (evsel->priv == NULL)
+ goto out_err_fclose;
+ }
+
+ return 0;
+
+out_err_fclose:
+ perf_script__fclose_per_event_dump(script);
+ return -1;
+}
+
+static int perf_script__setup_per_event_dump(struct perf_script *script)
+{
+ struct perf_evsel *evsel;
+ static struct perf_evsel_script es_stdout;
+
+ if (script->per_event_dump)
+ return perf_script__fopen_per_event_dump(script);
+
+ es_stdout.fp = stdout;
+
+ evlist__for_each_entry(script->session->evlist, evsel)
+ evsel->priv = &es_stdout;
+
+ return 0;
+}
+
+static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
+{
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(script->session->evlist, evsel) {
+ struct perf_evsel_script *es = evsel->priv;
+
+ perf_evsel_script__fprintf(es, stdout);
+ perf_evsel_script__delete(es);
+ evsel->priv = NULL;
+ }
+}
+
static int __cmd_script(struct perf_script *script)
{
int ret;
@@ -1853,8 +2017,16 @@ static int __cmd_script(struct perf_script *script)
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
+ if (perf_script__setup_per_event_dump(script)) {
+ pr_err("Couldn't create the per event dump files\n");
+ return -1;
+ }
+
ret = perf_session__process_events(script->session);
+ if (script->per_event_dump)
+ perf_script__exit_per_event_dump_stats(script);
+
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -2419,14 +2591,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
};
char *temp;
int i = 0;
- session = perf_session__new(&file, false, NULL);
+ session = perf_session__new(&data, false, NULL);
if (!session)
return -1;
@@ -2704,7 +2878,7 @@ int cmd_script(int argc, const char **argv)
.ordering_requires_timestamps = true,
},
};
- struct perf_data_file file = {
+ struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
const struct option options[] = {
@@ -2740,7 +2914,7 @@ int cmd_script(int argc, const char **argv)
"+field to add and -field to remove."
"Valid types: hw,sw,trace,raw,synth. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
- "addr,symoff,period,iregs,brstack,brstacksym,flags,"
+ "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags,"
"bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
@@ -2772,6 +2946,8 @@ int cmd_script(int argc, const char **argv)
"Show context switch events (if recorded)"),
OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
"Show namespace events (if recorded)"),
+ OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
+ "Dump trace output to files named by the monitored events"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
OPT_INTEGER(0, "max-blocks", &max_blocks,
"Maximum number of code blocks to dump with brstackinsn"),
@@ -2802,13 +2978,15 @@ int cmd_script(int argc, const char **argv)
NULL
};
+ perf_set_singlethreaded();
+
setup_scripting();
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- file.path = input_name;
- file.force = symbol_conf.force;
+ data.file.path = input_name;
+ data.force = symbol_conf.force;
if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
@@ -2975,7 +3153,7 @@ int cmd_script(int argc, const char **argv)
if (!script_name)
setup_pager();
- session = perf_session__new(&file, false, &script.tool);
+ session = perf_session__new(&data, false, &script.tool);
if (session == NULL)
return -1;
@@ -3016,7 +3194,8 @@ int cmd_script(int argc, const char **argv)
machine__resolve_kernel_addr,
&session->machines.host) < 0) {
pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
- return -1;
+ err = -1;
+ goto out_delete;
}
if (generate_script_lang) {
@@ -3030,7 +3209,7 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
}
- input = open(file.path, O_RDONLY); /* input_name */
+ input = open(data.file.path, O_RDONLY); /* input_name */
if (input < 0) {
err = -errno;
perror("failed to open file");
@@ -3076,7 +3255,8 @@ int cmd_script(int argc, const char **argv)
/* needs to be parsed after looking up reference time */
if (perf_time__parse_str(&script.ptime, script.time_str) != 0) {
pr_err("Invalid time string\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out_delete;
}
err = __cmd_script(&script);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 69523ed55894..59af5a8419e2 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -65,6 +65,7 @@
#include "util/tool.h"
#include "util/group.h"
#include "util/string2.h"
+#include "util/metricgroup.h"
#include "asm/bug.h"
#include <linux/time64.h>
@@ -133,6 +134,8 @@ static const char *smi_cost_attrs = {
static struct perf_evlist *evsel_list;
+static struct rblist metric_events;
+
static struct target target = {
.uid = UINT_MAX,
};
@@ -172,7 +175,7 @@ static int print_free_counters_hint;
struct perf_stat {
bool record;
- struct perf_data_file file;
+ struct perf_data data;
struct perf_session *session;
u64 bytes_written;
struct perf_tool tool;
@@ -192,6 +195,11 @@ static struct perf_stat_config stat_config = {
.scale = true,
};
+static bool is_duration_time(struct perf_evsel *evsel)
+{
+ return !strcmp(evsel->name, "duration_time");
+}
+
static inline void diff_timespec(struct timespec *r, struct timespec *a,
struct timespec *b)
{
@@ -245,7 +253,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
* by attr->sample_type != 0, and we can't run it on
* stat sessions.
*/
- if (!(STAT_RECORD && perf_stat.file.is_pipe))
+ if (!(STAT_RECORD && perf_stat.data.is_pipe))
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
/*
@@ -287,7 +295,7 @@ static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
- if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) {
+ if (perf_data__write(&perf_stat.data, event, event->header.size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
@@ -407,6 +415,8 @@ static void process_interval(void)
pr_err("failed to write stat round event\n");
}
+ init_stats(&walltime_nsecs_stats);
+ update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
print_counters(&rs, 0, NULL);
}
@@ -582,6 +592,32 @@ static bool perf_evsel__should_store_id(struct perf_evsel *counter)
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
}
+static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
+{
+ struct perf_evsel *c2, *leader;
+ bool is_open = true;
+
+ leader = evsel->leader;
+ pr_debug("Weak group for %s/%d failed\n",
+ leader->name, leader->nr_members);
+
+ /*
+ * for_each_group_member doesn't work here because it doesn't
+ * include the first entry.
+ */
+ evlist__for_each_entry(evsel_list, c2) {
+ if (c2 == evsel)
+ is_open = false;
+ if (c2->leader == leader) {
+ if (is_open)
+ perf_evsel__close(c2);
+ c2->leader = c2;
+ c2->nr_members = 0;
+ }
+ }
+ return leader;
+}
+
static int __run_perf_stat(int argc, const char **argv)
{
int interval = stat_config.interval;
@@ -592,7 +628,7 @@ static int __run_perf_stat(int argc, const char **argv)
size_t l;
int status = 0;
const bool forks = (argc > 0);
- bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false;
+ bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
struct perf_evsel_config_term *err_term;
if (interval) {
@@ -618,6 +654,15 @@ static int __run_perf_stat(int argc, const char **argv)
evlist__for_each_entry(evsel_list, counter) {
try_again:
if (create_perf_stat_counter(counter) < 0) {
+
+ /* Weak group failed. Reset the group. */
+ if ((errno == EINVAL || errno == EBADF) &&
+ counter->leader != counter &&
+ counter->weak_group) {
+ counter = perf_evsel__reset_weak_group(counter);
+ goto try_again;
+ }
+
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
@@ -674,10 +719,10 @@ try_again:
}
if (STAT_RECORD) {
- int err, fd = perf_data_file__fd(&perf_stat.file);
+ int err, fd = perf_data__fd(&perf_stat.data);
if (is_pipe) {
- err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file));
+ err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
} else {
err = perf_session__write_header(perf_stat.session, evsel_list,
fd, false);
@@ -800,7 +845,7 @@ static void print_noise(struct perf_evsel *evsel, double avg)
if (run_count == 1)
return;
- ps = evsel->priv;
+ ps = evsel->stats;
print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
}
@@ -1199,7 +1244,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
perf_stat__print_shadow_stats(counter, uval,
first_shadow_cpu(counter, id),
- &out);
+ &out, &metric_events);
if (!csv_output && !metric_only) {
print_noise(counter, noise);
print_running(run, ena);
@@ -1222,8 +1267,7 @@ static void aggr_update_shadow(void)
continue;
val += perf_counts(counter->counts, cpu, 0)->val;
}
- val = val * counter->scale;
- perf_stat__update_shadow_stats(counter, &val,
+ perf_stat__update_shadow_stats(counter, val,
first_shadow_cpu(counter, id));
}
}
@@ -1325,6 +1369,9 @@ static void print_aggr(char *prefix)
ad.id = id = aggr_map->map[s];
first = true;
evlist__for_each_entry(evsel_list, counter) {
+ if (is_duration_time(counter))
+ continue;
+
ad.val = ad.ena = ad.run = 0;
ad.nr = 0;
if (!collect_data(counter, aggr_cb, &ad))
@@ -1384,7 +1431,7 @@ static void counter_aggr_cb(struct perf_evsel *counter, void *data,
bool first __maybe_unused)
{
struct caggr_data *cd = data;
- struct perf_stat_evsel *ps = counter->priv;
+ struct perf_stat_evsel *ps = counter->stats;
cd->avg += avg_stats(&ps->res_stats[0]);
cd->avg_enabled += avg_stats(&ps->res_stats[1]);
@@ -1468,6 +1515,8 @@ static void print_no_aggr_metric(char *prefix)
if (prefix)
fputs(prefix, stat_config.output);
evlist__for_each_entry(evsel_list, counter) {
+ if (is_duration_time(counter))
+ continue;
if (first) {
aggr_printout(counter, cpu, 0);
first = false;
@@ -1522,6 +1571,8 @@ static void print_metric_headers(const char *prefix, bool no_indent)
/* Print metrics headers only */
evlist__for_each_entry(evsel_list, counter) {
+ if (is_duration_time(counter))
+ continue;
os.evsel = counter;
out.ctx = &os;
out.print_metric = print_metric_header;
@@ -1530,7 +1581,8 @@ static void print_metric_headers(const char *prefix, bool no_indent)
os.evsel = counter;
perf_stat__print_shadow_stats(counter, 0,
0,
- &out);
+ &out,
+ &metric_events);
}
fputc('\n', stat_config.output);
}
@@ -1643,7 +1695,7 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
char buf[64], *prefix = NULL;
/* Do not print anything if we record to the pipe. */
- if (STAT_RECORD && perf_stat.file.is_pipe)
+ if (STAT_RECORD && perf_stat.data.is_pipe)
return;
if (interval)
@@ -1668,12 +1720,18 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
print_aggr(prefix);
break;
case AGGR_THREAD:
- evlist__for_each_entry(evsel_list, counter)
+ evlist__for_each_entry(evsel_list, counter) {
+ if (is_duration_time(counter))
+ continue;
print_aggr_thread(counter, prefix);
+ }
break;
case AGGR_GLOBAL:
- evlist__for_each_entry(evsel_list, counter)
+ evlist__for_each_entry(evsel_list, counter) {
+ if (is_duration_time(counter))
+ continue;
print_counter_aggr(counter, prefix);
+ }
if (metric_only)
fputc('\n', stat_config.output);
break;
@@ -1681,8 +1739,11 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
if (metric_only)
print_no_aggr_metric(prefix);
else {
- evlist__for_each_entry(evsel_list, counter)
+ evlist__for_each_entry(evsel_list, counter) {
+ if (is_duration_time(counter))
+ continue;
print_counter(counter, prefix);
+ }
}
break;
case AGGR_UNSET:
@@ -1754,6 +1815,13 @@ static int enable_metric_only(const struct option *opt __maybe_unused,
return 0;
}
+static int parse_metric_groups(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ return metricgroup__parse_groups(opt, str, &metric_events);
+}
+
static const struct option stat_options[] = {
OPT_BOOLEAN('T', "transaction", &transaction_run,
"hardware transaction statistics"),
@@ -1819,6 +1887,9 @@ static const struct option stat_options[] = {
"measure topdown level 1 statistics"),
OPT_BOOLEAN(0, "smi-cost", &smi_cost,
"measure SMI cost"),
+ OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
+ "monitor specified metrics or metric groups (separated by ,)",
+ parse_metric_groups),
OPT_END()
};
@@ -2334,20 +2405,20 @@ static void init_features(struct perf_session *session)
static int __cmd_record(int argc, const char **argv)
{
struct perf_session *session;
- struct perf_data_file *file = &perf_stat.file;
+ struct perf_data *data = &perf_stat.data;
argc = parse_options(argc, argv, stat_options, stat_record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (output_name)
- file->path = output_name;
+ data->file.path = output_name;
if (run_count != 1 || forever) {
pr_err("Cannot use -r option with perf stat record.\n");
return -1;
}
- session = perf_session__new(file, false, NULL);
+ session = perf_session__new(data, false, NULL);
if (session == NULL) {
pr_err("Perf session creation failed.\n");
return -1;
@@ -2405,7 +2476,7 @@ int process_stat_config_event(struct perf_tool *tool,
if (st->aggr_mode != AGGR_UNSET)
stat_config.aggr_mode = st->aggr_mode;
- if (perf_stat.file.is_pipe)
+ if (perf_stat.data.is_pipe)
perf_stat_init_aggr_mode();
else
perf_stat_init_aggr_mode_file(st);
@@ -2513,10 +2584,10 @@ static int __cmd_report(int argc, const char **argv)
input_name = "perf.data";
}
- perf_stat.file.path = input_name;
- perf_stat.file.mode = PERF_DATA_MODE_READ;
+ perf_stat.data.file.path = input_name;
+ perf_stat.data.mode = PERF_DATA_MODE_READ;
- session = perf_session__new(&perf_stat.file, false, &perf_stat.tool);
+ session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
if (session == NULL)
return -1;
@@ -2787,7 +2858,7 @@ int cmd_stat(int argc, const char **argv)
* records, but the need to suppress the kptr_restrict messages in older
* tools remain -acme
*/
- int fd = perf_data_file__fd(&perf_stat.file);
+ int fd = perf_data__fd(&perf_stat.data);
int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
process_synthesized_event,
&perf_stat.session->machines.host);
@@ -2801,7 +2872,7 @@ int cmd_stat(int argc, const char **argv)
pr_err("failed to write stat round event\n");
}
- if (!perf_stat.file.is_pipe) {
+ if (!perf_stat.data.is_pipe) {
perf_stat.session->header.data_size += perf_stat.bytes_written;
perf_session__write_header(perf_stat.session, evsel_list, fd, true);
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 4e2e61695986..813698a9b8c7 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1601,13 +1601,15 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
{ "syscalls:sys_exit_pselect6", process_exit_poll },
{ "syscalls:sys_exit_select", process_exit_poll },
};
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = tchart->force,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = tchart->force,
};
- struct perf_session *session = perf_session__new(&file, false,
+ struct perf_session *session = perf_session__new(&data, false,
&tchart->tool);
int ret = -EINVAL;
@@ -1617,7 +1619,7 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
symbol__init(&session->header.env);
(void)perf_header__process_sections(&session->header,
- perf_data_file__fd(session->file),
+ perf_data__fd(session->data),
tchart,
process_header);
@@ -1732,8 +1734,10 @@ static int timechart__io_record(int argc, const char **argv)
if (rec_argv == NULL)
return -ENOMEM;
- if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
+ if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
+ free(rec_argv);
return -ENOMEM;
+ }
p = rec_argv;
for (i = 0; i < common_args_nr; i++)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ee954bde7e3e..477a8699f0b5 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -958,8 +958,16 @@ static int __cmd_top(struct perf_top *top)
if (perf_session__register_idle_thread(top->session) < 0)
goto out_delete;
+ if (top->nr_threads_synthesize > 1)
+ perf_set_multithreaded();
+
machine__synthesize_threads(&top->session->machines.host, &opts->target,
- top->evlist->threads, false, opts->proc_map_timeout);
+ top->evlist->threads, false,
+ opts->proc_map_timeout,
+ top->nr_threads_synthesize);
+
+ if (top->nr_threads_synthesize > 1)
+ perf_set_singlethreaded();
if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
@@ -1112,6 +1120,7 @@ int cmd_top(int argc, const char **argv)
},
.max_stack = sysctl_perf_event_max_stack,
.sym_pcnt_filter = 5,
+ .nr_threads_synthesize = UINT_MAX,
};
struct record_opts *opts = &top.record_opts;
struct target *target = &opts->target;
@@ -1221,6 +1230,8 @@ int cmd_top(int argc, const char **argv)
OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
"Show entries in a hierarchy"),
OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
+ OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
+ "number of thread to run event synthesize"),
OPT_END()
};
const char * const top_usage[] = {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 771ddab94bb0..f2757d38c7d7 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -578,7 +578,6 @@ static struct syscall_fmt {
} syscall_fmts[] = {
{ .name = "access",
.arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
- { .name = "arch_prctl", .alias = "prctl", },
{ .name = "bpf",
.arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
{ .name = "brk", .hexret = true,
@@ -634,6 +633,12 @@ static struct syscall_fmt {
#else
[2] = { .scnprintf = SCA_HEX, /* arg */ }, }, },
#endif
+ { .name = "kcmp", .nr_args = 5,
+ .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, },
+ [1] = { .name = "pid2", .scnprintf = SCA_PID, },
+ [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, },
+ [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, },
+ [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, },
{ .name = "keyctl",
.arg = { [0] = STRARRAY(option, keyctl_options), }, },
{ .name = "kill",
@@ -703,6 +708,10 @@ static struct syscall_fmt {
[3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
{ .name = "poll", .timeout = true, },
{ .name = "ppoll", .timeout = true, },
+ { .name = "prctl", .alias = "arch_prctl",
+ .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
+ [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
+ [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
{ .name = "pread", .alias = "pread64", },
{ .name = "preadv", .alias = "pread", },
{ .name = "prlimit64",
@@ -985,6 +994,23 @@ size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
return printed;
}
+size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size)
+{
+ size_t printed = scnprintf(bf, size, "%d", fd);
+ struct thread *thread = machine__find_thread(trace->host, pid, pid);
+
+ if (thread) {
+ const char *path = thread__fd_path(thread, fd, trace);
+
+ if (path)
+ printed += scnprintf(bf + printed, size - printed, "<%s>", path);
+
+ thread__put(thread);
+ }
+
+ return printed;
+}
+
static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -1131,13 +1157,21 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
evlist->threads, trace__tool_process, false,
- trace->opts.proc_map_timeout);
+ trace->opts.proc_map_timeout, 1);
if (err)
symbol__exit();
return err;
}
+static void trace__symbols__exit(struct trace *trace)
+{
+ machine__exit(trace->host);
+ trace->host = NULL;
+
+ symbol__exit();
+}
+
static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
{
int idx;
@@ -1828,16 +1862,14 @@ out_dump:
goto out_put;
}
-static void bpf_output__printer(enum binary_printer_ops op,
- unsigned int val, void *extra)
+static int bpf_output__printer(enum binary_printer_ops op,
+ unsigned int val, void *extra __maybe_unused, FILE *fp)
{
- FILE *output = extra;
unsigned char ch = (unsigned char)val;
switch (op) {
case BINARY_PRINT_CHAR_DATA:
- fprintf(output, "%c", isprint(ch) ? ch : '.');
- break;
+ return fprintf(fp, "%c", isprint(ch) ? ch : '.');
case BINARY_PRINT_DATA_BEGIN:
case BINARY_PRINT_LINE_BEGIN:
case BINARY_PRINT_ADDR:
@@ -1850,13 +1882,15 @@ static void bpf_output__printer(enum binary_printer_ops op,
default:
break;
}
+
+ return 0;
}
static void bpf_output__fprintf(struct trace *trace,
struct perf_sample *sample)
{
- print_binary(sample->raw_data, sample->raw_size, 8,
- bpf_output__printer, trace->output);
+ binary__fprintf(sample->raw_data, sample->raw_size, 8,
+ bpf_output__printer, NULL, trace->output);
}
static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
@@ -2078,6 +2112,7 @@ static int trace__record(struct trace *trace, int argc, const char **argv)
rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
else {
pr_err("Neither raw_syscalls nor syscalls events exist.\n");
+ free(rec_argv);
return -1;
}
}
@@ -2481,6 +2516,8 @@ out_disable:
}
out_delete_evlist:
+ trace__symbols__exit(trace);
+
perf_evlist__delete(evlist);
trace->evlist = NULL;
trace->live = false;
@@ -2528,10 +2565,12 @@ static int trace__replay(struct trace *trace)
const struct perf_evsel_str_handler handlers[] = {
{ "probe:vfs_getname", trace__vfs_getname, },
};
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = trace->force,
+ struct perf_data data = {
+ .file = {
+ .path = input_name,
+ },
+ .mode = PERF_DATA_MODE_READ,
+ .force = trace->force,
};
struct perf_session *session;
struct perf_evsel *evsel;
@@ -2554,7 +2593,7 @@ static int trace__replay(struct trace *trace)
/* add tid to output */
trace->multiple_threads = true;
- session = perf_session__new(&file, false, &trace->tool);
+ session = perf_session__new(&data, false, &trace->tool);
if (session == NULL)
return -1;
@@ -2730,20 +2769,23 @@ DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_event
static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
{
- DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
size_t printed = trace__fprintf_threads_header(fp);
struct rb_node *nd;
+ int i;
- if (threads == NULL) {
- fprintf(fp, "%s", "Error sorting output by nr_events!\n");
- return 0;
- }
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
- resort_rb__for_each_entry(nd, threads)
- printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
+ if (threads == NULL) {
+ fprintf(fp, "%s", "Error sorting output by nr_events!\n");
+ return 0;
+ }
- resort_rb__delete(threads);
+ resort_rb__for_each_entry(nd, threads)
+ printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
+ resort_rb__delete(threads);
+ }
return printed;
}
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 50cd6228f506..77406d25e521 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -5,8 +5,10 @@ HEADERS='
include/uapi/drm/drm.h
include/uapi/drm/i915_drm.h
include/uapi/linux/fcntl.h
+include/uapi/linux/kcmp.h
include/uapi/linux/kvm.h
include/uapi/linux/perf_event.h
+include/uapi/linux/prctl.h
include/uapi/linux/sched.h
include/uapi/linux/stat.h
include/uapi/linux/vhost.h
@@ -58,6 +60,11 @@ check () {
}
+# Check if we have the kernel headers (tools/perf/../../include), else
+# we're probably on a detached tarball, so no point in trying to check
+# differences.
+test -d ../../include || exit 0
+
# simple diff check
for i in $HEADERS; do
check $i -B
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index f75f3dec7485..2357f4ccc9c7 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,6 +66,7 @@ struct record_opts {
unsigned int user_freq;
u64 branch_stack;
u64 sample_intr_regs;
+ u64 sample_user_regs;
u64 default_interval;
u64 user_interval;
size_t auxtrace_snapshot_size;
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
new file mode 100644
index 000000000000..00bfdb5c5acb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
new file mode 100644
index 000000000000..49c5f123d811
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / INST_RETIRED.ANY / cycles",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED ) ) / RS_EVENTS.EMPTY_END",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
new file mode 100644
index 000000000000..5a7f1ec24200
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
new file mode 100644
index 000000000000..b4791b443a66
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
@@ -0,0 +1,1453 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache request misses"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
+ "EventCode": "0x30",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "L2_REJECT_XQ.ALL",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests rejected by the XQ"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
+ "EventCode": "0x31",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "CORE_REJECT_L2Q.ALL",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests rejected by the L2Q"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DL1.REPLACEMENT",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1 Cache evictions for dirty data"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
+ "EventCode": "0x86",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "EventCode": "0xB7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Locked load uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x43",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of load uops retired.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of store uops retired.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x83",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000011000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000041000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200001000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000001000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000001000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000012000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000042000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200002000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000002000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000002000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000044800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200004800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000004800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000004800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000018000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000048000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000008000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000043010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200003010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000003010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000043091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x00000132b7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x00000432b7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x02000032b7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x10000032b7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6, 0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x40000032b7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
+ "PDIR_COUNTER": "na",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
new file mode 100644
index 000000000000..a7878965ceab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
@@ -0,0 +1,62 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ICACHE.HIT",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ICACHE.MISSES",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ICACHE.ACCESSES",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
+ "EventCode": "0xE7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "MS decode starts"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
+ "EventCode": "0xE9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Decode restrictions due to predicting wrong instruction length"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
new file mode 100644
index 000000000000..91e0815f3ffb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
@@ -0,0 +1,38 @@
+[
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops that split a page (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store uops that split a page (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "20003",
+ "BriefDescription": "Machine clears due to memory ordering issue"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
new file mode 100644
index 000000000000..b860374418ab
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
@@ -0,0 +1,98 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "EventCode": "0x86",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "FETCH_STALL.ALL",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles code-fetch stalled due to any reason."
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "EventCode": "0x86",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding."
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle to recover"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts hardware interrupts received by the processor.",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "203",
+ "BriefDescription": "Hardware interrupts received"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "HW_INTERRUPTS.MASKED",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles hardware interrupts are masked"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles pending interrupts are masked"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
new file mode 100644
index 000000000000..ccf1aed69197
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -0,0 +1,544 @@
+[
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 0",
+ "UMask": "0x1",
+ "PEBScounters": "32",
+ "EventName": "INST_RETIRED.ANY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Fixed event)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "PEBScounters": "33",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted (Fixed event)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x3",
+ "PEBScounters": "34",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "LD_BLOCKS.UTLB_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Uops issued to the back end per cycle"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired - using Reduced Skid PEBS feature"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts uops which retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.ANY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.MS",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "MS uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of floating point divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Floating point divide uops retired (Precise Event Capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Integer divide uops retired (Precise Event Capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears for any reason.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "20003",
+ "BriefDescription": "All machine clears"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "20003",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "20003",
+ "BriefDescription": "Machine clears due to FP assists"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "20003",
+ "BriefDescription": "Machine clears due to memory disambiguation"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "20003",
+ "BriefDescription": "Machines clear due to a page fault"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7e",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired taken branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xbf",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired far branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xeb",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near return branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf7",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near return instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf9",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfb",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near relative CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfd",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near relative call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfe",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7e",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xeb",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf7",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfb",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfe",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles if either divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a divider is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the integer divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles the integer divide unit is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles the FP divide unit is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BACLEARS.ALL",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "BACLEARs asserted for any branch type"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts BACLEARS on return instructions.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BACLEARS.RETURN",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "BACLEARs asserted for return branch"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "BACLEARS.COND",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "BACLEARs asserted for conditional branch"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
new file mode 100644
index 000000000000..0b53a3b0dfb8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
@@ -0,0 +1,218 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Page walk completed due to a demand load to a 4K page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Page walk completed due to a demand load to a 1GB page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Page walks outstanding due to a demand load every cycle."
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1GB page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Page walks outstanding due to a demand data store every cycle."
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "EPT.WALK_PENDING",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Page walks outstanding due to walking the EPT every cycle"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
+ "EventCode": "0x81",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ITLB.MISS",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "ITLB misses"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Page walk completed due to an instruction fetch in a 4K page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Page walks outstanding due to an instruction fetch every cycle."
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "TLB_FLUSHES.STLB_ANY",
+ "PDIR_COUNTER": "na",
+ "SampleAfterValue": "20003",
+ "BriefDescription": "STLB flushes"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x13",
+ "PEBScounters": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
new file mode 100644
index 000000000000..5ab5c78fe580
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -0,0 +1,158 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
new file mode 100644
index 000000000000..5ab5c78fe580
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -0,0 +1,158 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
new file mode 100644
index 000000000000..7c2679514efb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
new file mode 100644
index 000000000000..7c2679514efb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
new file mode 100644
index 000000000000..fd7d7c438226
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -0,0 +1,140 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 4ea068366c3e..fe1a2c47cabf 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -9,6 +9,7 @@ GenuineIntel-6-27,v4,bonnell,core
GenuineIntel-6-36,v4,bonnell,core
GenuineIntel-6-35,v4,bonnell,core
GenuineIntel-6-5C,v8,goldmont,core
+GenuineIntel-6-7A,v1,goldmontplus,core
GenuineIntel-6-3C,v24,haswell,core
GenuineIntel-6-45,v24,haswell,core
GenuineIntel-6-46,v24,haswell,core
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
new file mode 100644
index 000000000000..fd7d7c438226
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -0,0 +1,140 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
new file mode 100644
index 000000000000..36c903faed0b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
new file mode 100644
index 000000000000..36c903faed0b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index d51dc9ca8861..9eb7047bafe4 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -292,7 +292,7 @@ static int print_events_table_entry(void *data, char *name, char *event,
char *desc, char *long_desc,
char *pmu, char *unit, char *perpkg,
char *metric_expr,
- char *metric_name)
+ char *metric_name, char *metric_group)
{
struct perf_entry_data *pd = data;
FILE *outfp = pd->outfp;
@@ -304,8 +304,10 @@ static int print_events_table_entry(void *data, char *name, char *event,
*/
fprintf(outfp, "{\n");
- fprintf(outfp, "\t.name = \"%s\",\n", name);
- fprintf(outfp, "\t.event = \"%s\",\n", event);
+ if (name)
+ fprintf(outfp, "\t.name = \"%s\",\n", name);
+ if (event)
+ fprintf(outfp, "\t.event = \"%s\",\n", event);
fprintf(outfp, "\t.desc = \"%s\",\n", desc);
fprintf(outfp, "\t.topic = \"%s\",\n", topic);
if (long_desc && long_desc[0])
@@ -320,6 +322,8 @@ static int print_events_table_entry(void *data, char *name, char *event,
fprintf(outfp, "\t.metric_expr = \"%s\",\n", metric_expr);
if (metric_name)
fprintf(outfp, "\t.metric_name = \"%s\",\n", metric_name);
+ if (metric_group)
+ fprintf(outfp, "\t.metric_group = \"%s\",\n", metric_group);
fprintf(outfp, "},\n");
return 0;
@@ -357,6 +361,9 @@ static char *real_event(const char *name, char *event)
{
int i;
+ if (!name)
+ return NULL;
+
for (i = 0; fixed[i].name; i++)
if (!strcasecmp(name, fixed[i].name))
return (char *)fixed[i].event;
@@ -369,7 +376,7 @@ int json_events(const char *fn,
char *long_desc,
char *pmu, char *unit, char *perpkg,
char *metric_expr,
- char *metric_name),
+ char *metric_name, char *metric_group),
void *data)
{
int err = -EIO;
@@ -397,6 +404,7 @@ int json_events(const char *fn,
char *unit = NULL;
char *metric_expr = NULL;
char *metric_name = NULL;
+ char *metric_group = NULL;
unsigned long long eventcode = 0;
struct msrmap *msr = NULL;
jsmntok_t *msrval = NULL;
@@ -476,6 +484,8 @@ int json_events(const char *fn,
addfield(map, &perpkg, "", "", val);
} else if (json_streq(map, field, "MetricName")) {
addfield(map, &metric_name, "", "", val);
+ } else if (json_streq(map, field, "MetricGroup")) {
+ addfield(map, &metric_group, "", "", val);
} else if (json_streq(map, field, "MetricExpr")) {
addfield(map, &metric_expr, "", "", val);
for (s = metric_expr; *s; s++)
@@ -501,10 +511,11 @@ int json_events(const char *fn,
addfield(map, &event, ",", filter, NULL);
if (msr != NULL)
addfield(map, &event, ",", msr->pname, msrval);
- fixname(name);
+ if (name)
+ fixname(name);
err = func(data, name, real_event(name, event), desc, long_desc,
- pmu, unit, perpkg, metric_expr, metric_name);
+ pmu, unit, perpkg, metric_expr, metric_name, metric_group);
free(event);
free(desc);
free(name);
@@ -516,6 +527,7 @@ int json_events(const char *fn,
free(unit);
free(metric_expr);
free(metric_name);
+ free(metric_group);
if (err)
break;
tok += j;
diff --git a/tools/perf/pmu-events/jevents.h b/tools/perf/pmu-events/jevents.h
index d87efd2685b8..4684c673c445 100644
--- a/tools/perf/pmu-events/jevents.h
+++ b/tools/perf/pmu-events/jevents.h
@@ -7,7 +7,7 @@ int json_events(const char *fn,
char *long_desc,
char *pmu,
char *unit, char *perpkg, char *metric_expr,
- char *metric_name),
+ char *metric_name, char *metric_group),
void *data);
char *get_cpu_str(void);
diff --git a/tools/perf/pmu-events/pmu-events.h b/tools/perf/pmu-events/pmu-events.h
index e08789ddfe6c..92a4d15ee0b9 100644
--- a/tools/perf/pmu-events/pmu-events.h
+++ b/tools/perf/pmu-events/pmu-events.h
@@ -16,6 +16,7 @@ struct pmu_event {
const char *perpkg;
const char *metric_expr;
const char *metric_name;
+ const char *metric_group;
};
/*
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index c180bbcdbef6..0e1367f90af5 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -167,7 +167,7 @@ static int run_dir(const char *d, const char *perf)
snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
d, d, perf, vcnt, v);
- return system(cmd);
+ return system(cmd) ? TEST_FAIL : TEST_OK;
}
int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index 907b1b2f56ad..ff9b60b99f52 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -238,6 +238,7 @@ class Test(object):
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
+ res_event = {}
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
@@ -254,7 +255,10 @@ class Test(object):
if exp_event.optional():
log.debug(" %s does not match, but is optional" % exp_name)
else:
- exp_event.diff(res_event)
+ if not res_event:
+ log.debug(" res_event is empty");
+ else:
+ exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 31e0b1da830b..37940665f736 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -23,7 +23,7 @@ comm=1
freq=1
inherit_stat=0
enable_on_exec=1
-task=0
+task=1
watermark=0
precise_ip=0|1|2|3
mmap_data=0
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index 6e7961f6f7a5..618ba1c17474 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -17,5 +17,6 @@ sample_type=327
read_format=4
mmap=0
comm=0
+task=0
enable_on_exec=0
disabled=0
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index ef59afd6d635..f906b793196f 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -23,7 +23,7 @@ sample_type=343
# PERF_FORMAT_ID | PERF_FORMAT_GROUP
read_format=12
-
+task=0
mmap=0
comm=0
enable_on_exec=0
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 87a222d014d8..48e8bd12fe46 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -18,5 +18,6 @@ sample_type=327
read_format=4
mmap=0
comm=0
+task=0
enable_on_exec=0
disabled=0
diff --git a/tools/perf/tests/attr/test-stat-C0 b/tools/perf/tests/attr/test-stat-C0
index 67717fe6a65d..a2c76d10b2bb 100644
--- a/tools/perf/tests/attr/test-stat-C0
+++ b/tools/perf/tests/attr/test-stat-C0
@@ -7,3 +7,4 @@ ret = 1
# events are disabled by default when attached to cpu
disabled=1
enable_on_exec=0
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-basic b/tools/perf/tests/attr/test-stat-basic
index 74e17881f2ba..69867d049fda 100644
--- a/tools/perf/tests/attr/test-stat-basic
+++ b/tools/perf/tests/attr/test-stat-basic
@@ -4,3 +4,4 @@ args = -e cycles kill >/dev/null 2>&1
ret = 1
[event:base-stat]
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-default b/tools/perf/tests/attr/test-stat-default
index e911dbd4eb47..d9e99b3f77e6 100644
--- a/tools/perf/tests/attr/test-stat-default
+++ b/tools/perf/tests/attr/test-stat-default
@@ -32,6 +32,7 @@ config=2
fd=5
type=0
config=0
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@@ -52,15 +53,18 @@ optional=1
fd=8
type=0
config=1
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-1 b/tools/perf/tests/attr/test-stat-detailed-1
index b39270a08e74..8b04a055d154 100644
--- a/tools/perf/tests/attr/test-stat-detailed-1
+++ b/tools/perf/tests/attr/test-stat-detailed-1
@@ -33,6 +33,7 @@ config=2
fd=5
type=0
config=0
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@@ -53,18 +54,21 @@ optional=1
fd=8
type=0
config=1
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -74,6 +78,7 @@ config=5
fd=11
type=3
config=0
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -83,6 +88,7 @@ config=0
fd=12
type=3
config=65536
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -92,6 +98,7 @@ config=65536
fd=13
type=3
config=2
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -101,3 +108,4 @@ config=2
fd=14
type=3
config=65538
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-2 b/tools/perf/tests/attr/test-stat-detailed-2
index 45f8e6ea34f8..4fca9f1bfbf8 100644
--- a/tools/perf/tests/attr/test-stat-detailed-2
+++ b/tools/perf/tests/attr/test-stat-detailed-2
@@ -33,6 +33,7 @@ config=2
fd=5
type=0
config=0
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@@ -53,18 +54,21 @@ optional=1
fd=8
type=0
config=1
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -74,6 +78,7 @@ config=5
fd=11
type=3
config=0
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -83,6 +88,7 @@ config=0
fd=12
type=3
config=65536
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -92,6 +98,7 @@ config=65536
fd=13
type=3
config=2
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -101,6 +108,7 @@ config=2
fd=14
type=3
config=65538
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1I << 0 |
@@ -120,6 +128,7 @@ optional=1
fd=16
type=3
config=65537
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -129,6 +138,7 @@ config=65537
fd=17
type=3
config=3
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -138,6 +148,7 @@ config=3
fd=18
type=3
config=65539
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -147,6 +158,7 @@ config=65539
fd=19
type=3
config=4
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -156,3 +168,4 @@ config=4
fd=20
type=3
config=65540
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-3 b/tools/perf/tests/attr/test-stat-detailed-3
index 30ae0fb7a3fd..4bb58e1c82a6 100644
--- a/tools/perf/tests/attr/test-stat-detailed-3
+++ b/tools/perf/tests/attr/test-stat-detailed-3
@@ -33,6 +33,7 @@ config=2
fd=5
type=0
config=0
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
[event6:base-stat]
@@ -53,18 +54,21 @@ optional=1
fd=8
type=0
config=1
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
[event9:base-stat]
fd=9
type=0
config=4
+optional=1
# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
[event10:base-stat]
fd=10
type=0
config=5
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -74,6 +78,7 @@ config=5
fd=11
type=3
config=0
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_L1D << 0 |
@@ -83,6 +88,7 @@ config=0
fd=12
type=3
config=65536
+optional=1
# PERF_TYPE_HW_CACHE /
# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -92,6 +98,7 @@ config=65536
fd=13
type=3
config=2
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_LL << 0 |
@@ -101,6 +108,7 @@ config=2
fd=14
type=3
config=65538
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1I << 0 |
@@ -120,6 +128,7 @@ optional=1
fd=16
type=3
config=65537
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -129,6 +138,7 @@ config=65537
fd=17
type=3
config=3
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_DTLB << 0 |
@@ -138,6 +148,7 @@ config=3
fd=18
type=3
config=65539
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -147,6 +158,7 @@ config=65539
fd=19
type=3
config=4
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_ITLB << 0 |
@@ -156,6 +168,7 @@ config=4
fd=20
type=3
config=65540
+optional=1
# PERF_TYPE_HW_CACHE,
# PERF_COUNT_HW_CACHE_L1D << 0 |
diff --git a/tools/perf/tests/attr/test-stat-group b/tools/perf/tests/attr/test-stat-group
index fdc1596a8862..e15d6946e9b3 100644
--- a/tools/perf/tests/attr/test-stat-group
+++ b/tools/perf/tests/attr/test-stat-group
@@ -6,6 +6,7 @@ ret = 1
[event-1:base-stat]
fd=1
group_fd=-1
+read_format=3|15
[event-2:base-stat]
fd=2
@@ -13,3 +14,4 @@ group_fd=1
config=1
disabled=0
enable_on_exec=0
+read_format=3|15
diff --git a/tools/perf/tests/attr/test-stat-group1 b/tools/perf/tests/attr/test-stat-group1
index 2a1f86e4a904..1746751123dc 100644
--- a/tools/perf/tests/attr/test-stat-group1
+++ b/tools/perf/tests/attr/test-stat-group1
@@ -6,6 +6,7 @@ ret = 1
[event-1:base-stat]
fd=1
group_fd=-1
+read_format=3|15
[event-2:base-stat]
fd=2
@@ -13,3 +14,4 @@ group_fd=1
config=1
disabled=0
enable_on_exec=0
+read_format=3|15
diff --git a/tools/perf/tests/attr/test-stat-no-inherit b/tools/perf/tests/attr/test-stat-no-inherit
index d54b2a1e3e28..924fbb9300d1 100644
--- a/tools/perf/tests/attr/test-stat-no-inherit
+++ b/tools/perf/tests/attr/test-stat-no-inherit
@@ -5,3 +5,4 @@ ret = 1
[event:base-stat]
inherit=0
+optional=1
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 53d06f37406a..766573e236e4 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -4,6 +4,7 @@
*
* Builtin regression testing command: ever growing number of sanity tests
*/
+#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index 3c3f3e029e33..868d82b501f4 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -132,7 +132,7 @@ static int synth_all(struct machine *machine)
{
return perf_event__synthesize_threads(NULL,
perf_event__process,
- machine, 0, 500);
+ machine, 0, 500, 1);
}
static int synth_process(struct machine *machine)
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index a59db7c45a65..17cb1bb3448c 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -30,12 +30,14 @@ static int get_temp(char *path)
static int session_write_header(char *path)
{
struct perf_session *session;
- struct perf_data_file file = {
- .path = path,
- .mode = PERF_DATA_MODE_WRITE,
+ struct perf_data data = {
+ .file = {
+ .path = path,
+ },
+ .mode = PERF_DATA_MODE_WRITE,
};
- session = perf_session__new(&file, false, NULL);
+ session = perf_session__new(&data, false, NULL);
TEST_ASSERT_VAL("can't get session", session);
session->evlist = perf_evlist__new_default();
@@ -47,7 +49,7 @@ static int session_write_header(char *path)
session->header.data_size += DATA_SIZE;
TEST_ASSERT_VAL("failed to write header",
- !perf_session__write_header(session, session->evlist, file.fd, true));
+ !perf_session__write_header(session, session->evlist, data.file.fd, true));
perf_session__delete(session);
@@ -57,13 +59,15 @@ static int session_write_header(char *path)
static int check_cpu_topology(char *path, struct cpu_map *map)
{
struct perf_session *session;
- struct perf_data_file file = {
- .path = path,
- .mode = PERF_DATA_MODE_READ,
+ struct perf_data data = {
+ .file = {
+ .path = path,
+ },
+ .mode = PERF_DATA_MODE_READ,
};
int i;
- session = perf_session__new(&file, false, NULL);
+ session = perf_session__new(&data, false, NULL);
TEST_ASSERT_VAL("can't get session", session);
for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 175d633c6b49..066bbf0f4a74 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -3,5 +3,7 @@ libperf-y += fcntl.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
libperf-y += ioctl.o
endif
+libperf-y += kcmp.o
libperf-y += pkey_alloc.o
+libperf-y += prctl.o
libperf-y += statx.o
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index d80655cd1881..a6dfd04beaee 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <sys/types.h>
struct strarray {
int offset;
@@ -27,6 +28,8 @@ size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const cha
struct trace;
struct thread;
+size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size);
+
/**
* @val: value of syscall argument being formatted
* @args: All the args, use syscall_args__val(arg, nth) to access one
@@ -79,12 +82,27 @@ size_t syscall_arg__scnprintf_fcntl_arg(char *bf, size_t size, struct syscall_ar
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_IOCTL_CMD syscall_arg__scnprintf_ioctl_cmd
+size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_KCMP_TYPE syscall_arg__scnprintf_kcmp_type
+
+size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_KCMP_IDX syscall_arg__scnprintf_kcmp_idx
+
size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PKEY_ALLOC_ACCESS_RIGHTS syscall_arg__scnprintf_pkey_alloc_access_rights
size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
+size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_PRCTL_OPTION syscall_arg__scnprintf_prctl_option
+
+size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_PRCTL_ARG2 syscall_arg__scnprintf_prctl_arg2
+
+size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3
+
size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_STATX_FLAGS syscall_arg__scnprintf_statx_flags
diff --git a/tools/perf/trace/beauty/kcmp.c b/tools/perf/trace/beauty/kcmp.c
new file mode 100644
index 000000000000..f62040eb9d5c
--- /dev/null
+++ b/tools/perf/trace/beauty/kcmp.c
@@ -0,0 +1,44 @@
+/*
+ * trace/beauty/kcmp.c
+ *
+ * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "trace/beauty/beauty.h"
+#include <linux/kernel.h>
+#include <sys/types.h>
+#include <machine.h>
+#include <uapi/linux/kcmp.h>
+
+#include "trace/beauty/generated/kcmp_type_array.c"
+
+size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned long fd = arg->val;
+ int type = syscall_arg__val(arg, 2);
+ pid_t pid;
+
+ if (type != KCMP_FILE)
+ return syscall_arg__scnprintf_long(bf, size, arg);
+
+ pid = syscall_arg__val(arg, arg->idx == 3 ? 0 : 1); /* idx1 -> pid1, idx2 -> pid2 */
+ return pid__scnprintf_fd(arg->trace, pid, fd, bf, size);
+}
+
+static size_t kcmp__scnprintf_type(int type, char *bf, size_t size)
+{
+ static DEFINE_STRARRAY(kcmp_types);
+ return strarray__scnprintf(&strarray__kcmp_types, bf, size, "%d", type);
+}
+
+size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned long type = arg->val;
+
+ if (type != KCMP_FILE)
+ arg->mask |= (1 << 3) | (1 << 4); /* Ignore idx1 and idx2 */
+
+ return kcmp__scnprintf_type(type, bf, size);
+}
diff --git a/tools/perf/trace/beauty/kcmp_type.sh b/tools/perf/trace/beauty/kcmp_type.sh
new file mode 100755
index 000000000000..40d063b8c082
--- /dev/null
+++ b/tools/perf/trace/beauty/kcmp_type.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+header_dir=$1
+
+printf "static const char *kcmp_types[] = {\n"
+regex='^[[:space:]]+(KCMP_(\w+)),'
+egrep $regex ${header_dir}/kcmp.h | grep -v KCMP_TYPES, | \
+ sed -r "s/$regex/\1 \2/g" | \
+ xargs printf "\t[%s]\t= \"%s\",\n"
+printf "};\n"
diff --git a/tools/perf/trace/beauty/madvise_behavior.sh b/tools/perf/trace/beauty/madvise_behavior.sh
new file mode 100755
index 000000000000..60ef8640ee70
--- /dev/null
+++ b/tools/perf/trace/beauty/madvise_behavior.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+header_dir=$1
+
+printf "static const char *madvise_advices[] = {\n"
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MADV_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
+egrep $regex ${header_dir}/mman-common.h | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort -n | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n"
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index 51f1cea406f5..9e1668b2c5d7 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -95,35 +95,21 @@ static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
#define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
+static size_t madvise__scnprintf_behavior(int behavior, char *bf, size_t size)
+{
+#include "trace/beauty/generated/madvise_behavior_array.c"
+ static DEFINE_STRARRAY(madvise_advices);
+
+ if (behavior < strarray__madvise_advices.nr_entries && strarray__madvise_advices.entries[behavior] != NULL)
+ return scnprintf(bf, size, "MADV_%s", strarray__madvise_advices.entries[behavior]);
+
+ return scnprintf(bf, size, "%#", behavior);
+}
+
static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
struct syscall_arg *arg)
{
- int behavior = arg->val;
-
- switch (behavior) {
-#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
- P_MADV_BHV(NORMAL);
- P_MADV_BHV(RANDOM);
- P_MADV_BHV(SEQUENTIAL);
- P_MADV_BHV(WILLNEED);
- P_MADV_BHV(DONTNEED);
- P_MADV_BHV(FREE);
- P_MADV_BHV(REMOVE);
- P_MADV_BHV(DONTFORK);
- P_MADV_BHV(DOFORK);
- P_MADV_BHV(HWPOISON);
- P_MADV_BHV(SOFT_OFFLINE);
- P_MADV_BHV(MERGEABLE);
- P_MADV_BHV(UNMERGEABLE);
- P_MADV_BHV(HUGEPAGE);
- P_MADV_BHV(NOHUGEPAGE);
- P_MADV_BHV(DONTDUMP);
- P_MADV_BHV(DODUMP);
-#undef P_MADV_BHV
- default: break;
- }
-
- return scnprintf(bf, size, "%#x", behavior);
+ return madvise__scnprintf_behavior(arg->val, bf, size);
}
#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
diff --git a/tools/perf/trace/beauty/prctl.c b/tools/perf/trace/beauty/prctl.c
new file mode 100644
index 000000000000..246130dad6c4
--- /dev/null
+++ b/tools/perf/trace/beauty/prctl.c
@@ -0,0 +1,82 @@
+/*
+ * trace/beauty/prctl.c
+ *
+ * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include "trace/beauty/beauty.h"
+#include <linux/kernel.h>
+#include <uapi/linux/prctl.h>
+
+#include "trace/beauty/generated/prctl_option_array.c"
+
+static size_t prctl__scnprintf_option(int option, char *bf, size_t size)
+{
+ static DEFINE_STRARRAY(prctl_options);
+ return strarray__scnprintf(&strarray__prctl_options, bf, size, "%d", option);
+}
+
+static size_t prctl__scnprintf_set_mm(int option, char *bf, size_t size)
+{
+ static DEFINE_STRARRAY(prctl_set_mm_options);
+ return strarray__scnprintf(&strarray__prctl_set_mm_options, bf, size, "%d", option);
+}
+
+size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int option = syscall_arg__val(arg, 0);
+
+ if (option == PR_SET_MM)
+ return prctl__scnprintf_set_mm(arg->val, bf, size);
+ /*
+ * We still don't grab the contents of pointers on entry or exit,
+ * so just print them as hex numbers
+ */
+ if (option == PR_SET_NAME)
+ return syscall_arg__scnprintf_hex(bf, size, arg);
+
+ return syscall_arg__scnprintf_long(bf, size, arg);
+}
+
+size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg)
+{
+ int option = syscall_arg__val(arg, 0);
+
+ if (option == PR_SET_MM)
+ return syscall_arg__scnprintf_hex(bf, size, arg);
+
+ return syscall_arg__scnprintf_long(bf, size, arg);
+}
+
+size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned long option = arg->val;
+ enum {
+ SPO_ARG2 = (1 << 1),
+ SPO_ARG3 = (1 << 2),
+ SPO_ARG4 = (1 << 3),
+ SPO_ARG5 = (1 << 4),
+ SPO_ARG6 = (1 << 5),
+ };
+ const u8 all_but2 = SPO_ARG3 | SPO_ARG4 | SPO_ARG5 | SPO_ARG6;
+ const u8 all = SPO_ARG2 | all_but2;
+ const u8 masks[] = {
+ [PR_GET_DUMPABLE] = all,
+ [PR_SET_DUMPABLE] = all_but2,
+ [PR_SET_NAME] = all_but2,
+ [PR_GET_CHILD_SUBREAPER] = all_but2,
+ [PR_SET_CHILD_SUBREAPER] = all_but2,
+ [PR_GET_SECUREBITS] = all,
+ [PR_SET_SECUREBITS] = all_but2,
+ [PR_SET_MM] = SPO_ARG4 | SPO_ARG5 | SPO_ARG6,
+ [PR_GET_PDEATHSIG] = all,
+ [PR_SET_PDEATHSIG] = all_but2,
+ };
+
+ if (option < ARRAY_SIZE(masks))
+ arg->mask |= masks[option];
+
+ return prctl__scnprintf_option(option, bf, size);
+}
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
new file mode 100755
index 000000000000..0be4138fbe71
--- /dev/null
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+header_dir=$1
+
+printf "static const char *prctl_options[] = {\n"
+regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*'
+egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort -n | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n"
+
+printf "static const char *prctl_set_mm_options[] = {\n"
+regex='^#[[:space:]]+define[[:space:]]+PR_SET_MM_(\w+)[[:space:]]*([[:digit:]]+).*'
+egrep $regex ${header_dir}/prctl.h | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort -n | xargs printf "\t[%s] = \"%s\",\n"
+printf "};\n"
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 628ad5f7eddb..68146f4620a5 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -155,57 +155,9 @@ static void callchain_list__set_folding(struct callchain_list *cl, bool unfold)
cl->unfolded = unfold ? cl->has_children : false;
}
-static struct inline_node *inline_node__create(struct map *map, u64 ip)
-{
- struct dso *dso;
- struct inline_node *node;
-
- if (map == NULL)
- return NULL;
-
- dso = map->dso;
- if (dso == NULL)
- return NULL;
-
- node = dso__parse_addr_inlines(dso,
- map__rip_2objdump(map, ip));
-
- return node;
-}
-
-static int inline__count_rows(struct inline_node *node)
-{
- struct inline_list *ilist;
- int i = 0;
-
- if (node == NULL)
- return 0;
-
- list_for_each_entry(ilist, &node->val, list) {
- if ((ilist->filename != NULL) || (ilist->funcname != NULL))
- i++;
- }
-
- return i;
-}
-
-static int callchain_list__inline_rows(struct callchain_list *chain)
-{
- struct inline_node *node;
- int rows;
-
- node = inline_node__create(chain->ms.map, chain->ip);
- if (node == NULL)
- return 0;
-
- rows = inline__count_rows(node);
- inline_node__delete(node);
- return rows;
-}
-
static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
{
- int n = 0, inline_rows;
+ int n = 0;
struct rb_node *nd;
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
@@ -216,12 +168,6 @@ static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
list_for_each_entry(chain, &child->val, list) {
++n;
- if (symbol_conf.inline_name) {
- inline_rows =
- callchain_list__inline_rows(chain);
- n += inline_rows;
- }
-
/* We need this because we may not have children */
folded_sign = callchain_list__folded(chain);
if (folded_sign == '+')
@@ -273,7 +219,7 @@ static int callchain_node__count_rows(struct callchain_node *node)
{
struct callchain_list *chain;
bool unfolded = false;
- int n = 0, inline_rows;
+ int n = 0;
if (callchain_param.mode == CHAIN_FLAT)
return callchain_node__count_flat_rows(node);
@@ -282,10 +228,6 @@ static int callchain_node__count_rows(struct callchain_node *node)
list_for_each_entry(chain, &node->val, list) {
++n;
- if (symbol_conf.inline_name) {
- inline_rows = callchain_list__inline_rows(chain);
- n += inline_rows;
- }
unfolded = chain->unfolded;
}
@@ -433,19 +375,6 @@ static void hist_entry__init_have_children(struct hist_entry *he)
he->init_have_children = true;
}
-static void hist_entry_init_inline_node(struct hist_entry *he)
-{
- if (he->inline_node)
- return;
-
- he->inline_node = inline_node__create(he->ms.map, he->ip);
-
- if (he->inline_node == NULL)
- return;
-
- he->has_children = true;
-}
-
static bool hist_browser__toggle_fold(struct hist_browser *browser)
{
struct hist_entry *he = browser->he_selection;
@@ -477,12 +406,8 @@ static bool hist_browser__toggle_fold(struct hist_browser *browser)
if (he->unfolded) {
if (he->leaf)
- if (he->inline_node)
- he->nr_rows = inline__count_rows(
- he->inline_node);
- else
- he->nr_rows = callchain__count_rows(
- &he->sorted_chain);
+ he->nr_rows = callchain__count_rows(
+ &he->sorted_chain);
else
he->nr_rows = hierarchy_count_rows(browser, he, false);
@@ -842,71 +767,6 @@ static bool hist_browser__check_dump_full(struct hist_browser *browser __maybe_u
#define LEVEL_OFFSET_STEP 3
-static int hist_browser__show_inline(struct hist_browser *browser,
- struct inline_node *node,
- unsigned short row,
- int offset)
-{
- struct inline_list *ilist;
- char buf[1024];
- int color, width, first_row;
-
- first_row = row;
- width = browser->b.width - (LEVEL_OFFSET_STEP + 2);
- list_for_each_entry(ilist, &node->val, list) {
- if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
- color = HE_COLORSET_NORMAL;
- if (ui_browser__is_current_entry(&browser->b, row))
- color = HE_COLORSET_SELECTED;
-
- if (callchain_param.key == CCKEY_ADDRESS ||
- callchain_param.key == CCKEY_SRCLINE) {
- if (ilist->filename != NULL)
- scnprintf(buf, sizeof(buf),
- "%s:%d (inline)",
- ilist->filename,
- ilist->line_nr);
- else
- scnprintf(buf, sizeof(buf), "??");
- } else if (ilist->funcname != NULL)
- scnprintf(buf, sizeof(buf), "%s (inline)",
- ilist->funcname);
- else if (ilist->filename != NULL)
- scnprintf(buf, sizeof(buf),
- "%s:%d (inline)",
- ilist->filename,
- ilist->line_nr);
- else
- scnprintf(buf, sizeof(buf), "??");
-
- ui_browser__set_color(&browser->b, color);
- hist_browser__gotorc(browser, row, 0);
- ui_browser__write_nstring(&browser->b, " ",
- LEVEL_OFFSET_STEP + offset);
- ui_browser__write_nstring(&browser->b, buf, width);
- row++;
- }
- }
-
- return row - first_row;
-}
-
-static size_t show_inline_list(struct hist_browser *browser, struct map *map,
- u64 ip, int row, int offset)
-{
- struct inline_node *node;
- int ret;
-
- node = inline_node__create(map, ip);
- if (node == NULL)
- return 0;
-
- ret = hist_browser__show_inline(browser, node, row, offset);
-
- inline_node__delete(node);
- return ret;
-}
-
static int hist_browser__show_callchain_list(struct hist_browser *browser,
struct callchain_node *node,
struct callchain_list *chain,
@@ -918,7 +778,7 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
char bf[1024], *alloc_str;
char buf[64], *alloc_str2;
const char *str;
- int inline_rows = 0, ret = 1;
+ int ret = 1;
if (arg->row_offset != 0) {
arg->row_offset--;
@@ -955,12 +815,7 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
free(alloc_str);
free(alloc_str2);
- if (symbol_conf.inline_name) {
- inline_rows = show_inline_list(browser, chain->ms.map,
- chain->ip, row + 1, offset);
- }
-
- return ret + inline_rows;
+ return ret;
}
static bool check_percent_display(struct rb_node *node, u64 parent_total)
@@ -1384,12 +1239,6 @@ static int hist_browser__show_entry(struct hist_browser *browser,
folded_sign = hist_entry__folded(entry);
}
- if (symbol_conf.inline_name &&
- (!entry->has_children)) {
- hist_entry_init_inline_node(entry);
- folded_sign = hist_entry__folded(entry);
- }
-
if (row_offset == 0) {
struct hpp_arg arg = {
.b = &browser->b,
@@ -1421,8 +1270,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
}
if (first) {
- if (symbol_conf.use_callchain ||
- symbol_conf.inline_name) {
+ if (symbol_conf.use_callchain) {
ui_browser__printf(&browser->b, "%c ", folded_sign);
width -= 2;
}
@@ -1464,15 +1312,11 @@ static int hist_browser__show_entry(struct hist_browser *browser,
.is_current_entry = current_entry,
};
- if (entry->inline_node)
- printed += hist_browser__show_inline(browser,
- entry->inline_node, row, 0);
- else
- printed += hist_browser__show_callchain(browser,
- entry, 1, row,
- hist_browser__show_callchain_entry,
- &arg,
- hist_browser__check_output_full);
+ printed += hist_browser__show_callchain(browser,
+ entry, 1, row,
+ hist_browser__show_callchain_entry,
+ &arg,
+ hist_browser__check_output_full);
}
return printed;
diff --git a/tools/perf/ui/progress.c b/tools/perf/ui/progress.c
index b5a5df14d702..bbfbc91a0fa4 100644
--- a/tools/perf/ui/progress.c
+++ b/tools/perf/ui/progress.c
@@ -28,13 +28,17 @@ void ui_progress__update(struct ui_progress *p, u64 adv)
}
}
-void ui_progress__init(struct ui_progress *p, u64 total, const char *title)
+void __ui_progress__init(struct ui_progress *p, u64 total,
+ const char *title, bool size)
{
p->curr = 0;
p->next = p->step = total / 16 ?: 1;
p->total = total;
p->title = title;
+ p->size = size;
+ if (ui_progress__ops->init)
+ ui_progress__ops->init(p);
}
void ui_progress__finish(void)
diff --git a/tools/perf/ui/progress.h b/tools/perf/ui/progress.h
index 594bbe6935dd..4f52c37b2f09 100644
--- a/tools/perf/ui/progress.h
+++ b/tools/perf/ui/progress.h
@@ -9,12 +9,22 @@ void ui_progress__finish(void);
struct ui_progress {
const char *title;
u64 curr, next, step, total;
+ bool size;
};
-void ui_progress__init(struct ui_progress *p, u64 total, const char *title);
+void __ui_progress__init(struct ui_progress *p, u64 total,
+ const char *title, bool size);
+
+#define ui_progress__init(p, total, title) \
+ __ui_progress__init(p, total, title, false)
+
+#define ui_progress__init_size(p, total, title) \
+ __ui_progress__init(p, total, title, true)
+
void ui_progress__update(struct ui_progress *p, u64 adv);
struct ui_progress_ops {
+ void (*init)(struct ui_progress *p);
void (*update)(struct ui_progress *p);
void (*finish)(void);
};
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index de2810ae16be..25dd1e0ecc58 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -22,64 +22,6 @@ static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
return ret;
}
-static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
- int depth, int depth_mask, FILE *fp)
-{
- struct dso *dso;
- struct inline_node *node;
- struct inline_list *ilist;
- int ret = 0, i;
-
- if (map == NULL)
- return 0;
-
- dso = map->dso;
- if (dso == NULL)
- return 0;
-
- node = dso__parse_addr_inlines(dso,
- map__rip_2objdump(map, ip));
- if (node == NULL)
- return 0;
-
- list_for_each_entry(ilist, &node->val, list) {
- if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
- ret += callchain__fprintf_left_margin(fp, left_margin);
-
- for (i = 0; i < depth; i++) {
- if (depth_mask & (1 << i))
- ret += fprintf(fp, "|");
- else
- ret += fprintf(fp, " ");
- ret += fprintf(fp, " ");
- }
-
- if (callchain_param.key == CCKEY_ADDRESS ||
- callchain_param.key == CCKEY_SRCLINE) {
- if (ilist->filename != NULL)
- ret += fprintf(fp, "%s:%d (inline)",
- ilist->filename,
- ilist->line_nr);
- else
- ret += fprintf(fp, "??");
- } else if (ilist->funcname != NULL)
- ret += fprintf(fp, "%s (inline)",
- ilist->funcname);
- else if (ilist->filename != NULL)
- ret += fprintf(fp, "%s:%d (inline)",
- ilist->filename,
- ilist->line_nr);
- else
- ret += fprintf(fp, "??");
-
- ret += fprintf(fp, "\n");
- }
- }
-
- inline_node__delete(node);
- return ret;
-}
-
static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
int left_margin)
{
@@ -138,9 +80,6 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
fputc('\n', fp);
free(alloc_str);
- if (symbol_conf.inline_name)
- ret += inline__fprintf(chain->ms.map, chain->ip,
- left_margin, depth, depth_mask, fp);
return ret;
}
@@ -315,13 +254,6 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
if (++entries_printed == callchain_param.print_limit)
break;
-
- if (symbol_conf.inline_name)
- ret += inline__fprintf(chain->ms.map,
- chain->ip,
- left_margin,
- 0, 0,
- fp);
}
root = &cnode->rb_root;
}
@@ -601,7 +533,6 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
{
int ret;
int callchain_ret = 0;
- int inline_ret = 0;
struct perf_hpp hpp = {
.buf = bf,
.size = size,
@@ -623,13 +554,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
callchain_ret = hist_entry_callchain__fprintf(he, total_period,
0, fp);
- if (callchain_ret == 0 && symbol_conf.inline_name) {
- inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
- ret += inline_ret;
- if (inline_ret > 0)
- ret += fprintf(fp, "\n");
- } else
- ret += callchain_ret;
+ ret += callchain_ret;
return ret;
}
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c
index 236bcb620ae4..bc134b82829d 100644
--- a/tools/perf/ui/tui/progress.c
+++ b/tools/perf/ui/tui/progress.c
@@ -1,13 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
#include "../cache.h"
#include "../progress.h"
#include "../libslang.h"
#include "../ui.h"
#include "tui.h"
+#include "units.h"
#include "../browser.h"
+static void __tui_progress__init(struct ui_progress *p)
+{
+ p->next = p->step = p->total / (SLtt_Screen_Cols - 2) ?: 1;
+}
+
+static int get_title(struct ui_progress *p, char *buf, size_t size)
+{
+ char buf_cur[20];
+ char buf_tot[20];
+ int ret;
+
+ ret = unit_number__scnprintf(buf_cur, sizeof(buf_cur), p->curr);
+ ret += unit_number__scnprintf(buf_tot, sizeof(buf_tot), p->total);
+
+ return ret + scnprintf(buf, size, "%s [%s/%s]",
+ p->title, buf_cur, buf_tot);
+}
+
static void tui_progress__update(struct ui_progress *p)
{
+ char buf[100], *title = (char *) p->title;
int bar, y;
/*
* FIXME: We should have a per UI backend way of showing progress,
@@ -19,13 +40,18 @@ static void tui_progress__update(struct ui_progress *p)
if (p->total == 0)
return;
+ if (p->size) {
+ get_title(p, buf, sizeof(buf));
+ title = buf;
+ }
+
ui__refresh_dimensions(false);
pthread_mutex_lock(&ui__lock);
y = SLtt_Screen_Rows / 2 - 2;
SLsmg_set_color(0);
SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
SLsmg_gotorc(y++, 1);
- SLsmg_write_string((char *)p->title);
+ SLsmg_write_string(title);
SLsmg_fill_region(y, 1, 1, SLtt_Screen_Cols - 2, ' ');
SLsmg_set_color(HE_COLORSET_SELECTED);
bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total;
@@ -50,8 +76,8 @@ static void tui_progress__finish(void)
pthread_mutex_unlock(&ui__lock);
}
-static struct ui_progress_ops tui_progress__ops =
-{
+static struct ui_progress_ops tui_progress__ops = {
+ .init = __tui_progress__init,
.update = tui_progress__update,
.finish = tui_progress__finish,
};
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 94518c1bf8b6..a3de7916fe63 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -13,6 +13,7 @@ libperf-y += find_bit.o
libperf-y += kallsyms.o
libperf-y += levenshtein.o
libperf-y += llvm-utils.o
+libperf-y += mmap.o
libperf-y += memswap.o
libperf-y += parse-events.o
libperf-y += perf_regs.o
@@ -34,6 +35,7 @@ libperf-y += dso.o
libperf-y += symbol.o
libperf-y += symbol_fprintf.o
libperf-y += color.o
+libperf-y += metricgroup.o
libperf-y += header.o
libperf-y += callchain.o
libperf-y += values.o
@@ -78,6 +80,7 @@ libperf-y += data.o
libperf-y += tsc.o
libperf-y += cloexec.o
libperf-y += call-path.o
+libperf-y += rwsem.o
libperf-y += thread-stack.o
libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index aa66791b1bfc..da1c4c4a0dd8 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -49,10 +49,9 @@ struct arch {
void *priv;
unsigned int model;
unsigned int family;
- int (*init)(struct arch *arch);
+ int (*init)(struct arch *arch, char *cpuid);
bool (*ins_is_fused)(struct arch *arch, const char *ins1,
const char *ins2);
- int (*cpuid_parse)(struct arch *arch, char *cpuid);
struct {
char comment_char;
char skip_functions_char;
@@ -132,10 +131,10 @@ static struct arch architectures[] = {
},
{
.name = "x86",
+ .init = x86__annotate_init,
.instructions = x86__instructions,
.nr_instructions = ARRAY_SIZE(x86__instructions),
.ins_is_fused = x86__ins_is_fused,
- .cpuid_parse = x86__cpuid_parse,
.objdump = {
.comment_char = '#',
},
@@ -1457,16 +1456,13 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
*parch = arch;
if (arch->init) {
- err = arch->init(arch);
+ err = arch->init(arch, cpuid);
if (err) {
pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
return err;
}
}
- if (arch->cpuid_parse && cpuid)
- arch->cpuid_parse(arch, cpuid);
-
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 5547457566a7..a33491416400 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -208,7 +208,7 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
static void *auxtrace_copy_data(u64 size, struct perf_session *session)
{
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
void *p;
ssize_t ret;
@@ -305,7 +305,7 @@ static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
if (session->one_mmap) {
buffer->data = buffer->data_offset - session->one_mmap_offset +
session->one_mmap_addr;
- } else if (perf_data_file__is_pipe(session->file)) {
+ } else if (perf_data__is_pipe(session->data)) {
buffer->data = auxtrace_copy_data(buffer->size, session);
if (!buffer->data)
return -ENOMEM;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 33b5e6cdf38c..d19e11b68de7 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -378,7 +378,7 @@ struct addr_filters {
static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
- u64 head = ACCESS_ONCE(pc->aux_head);
+ u64 head = READ_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
rmb();
@@ -389,7 +389,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
- u64 head = ACCESS_ONCE(pc->aux_head);
+ u64 head = READ_ONCE(pc->aux_head);
#else
u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 6031933d811c..082505d08d72 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -567,6 +567,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
call->ip = cursor_node->ip;
call->ms.sym = cursor_node->sym;
call->ms.map = map__get(cursor_node->map);
+ call->srcline = cursor_node->srcline;
if (cursor_node->branch) {
call->branch_count = 1;
@@ -645,103 +646,120 @@ enum match_result {
MATCH_GT,
};
-static enum match_result match_chain_srcline(struct callchain_cursor_node *node,
- struct callchain_list *cnode)
+static enum match_result match_chain_strings(const char *left,
+ const char *right)
{
- char *left = NULL;
- char *right = NULL;
enum match_result ret = MATCH_EQ;
int cmp;
- if (cnode->ms.map)
- left = get_srcline(cnode->ms.map->dso,
- map__rip_2objdump(cnode->ms.map, cnode->ip),
- cnode->ms.sym, true, false);
- if (node->map)
- right = get_srcline(node->map->dso,
- map__rip_2objdump(node->map, node->ip),
- node->sym, true, false);
-
if (left && right)
cmp = strcmp(left, right);
else if (!left && right)
cmp = 1;
else if (left && !right)
cmp = -1;
- else if (cnode->ip == node->ip)
- cmp = 0;
else
- cmp = (cnode->ip < node->ip) ? -1 : 1;
+ return MATCH_ERROR;
if (cmp != 0)
ret = cmp < 0 ? MATCH_LT : MATCH_GT;
- free_srcline(left);
- free_srcline(right);
return ret;
}
+/*
+ * We need to always use relative addresses because we're aggregating
+ * callchains from multiple threads, i.e. different address spaces, so
+ * comparing absolute addresses make no sense as a symbol in a DSO may end up
+ * in a different address when used in a different binary or even the same
+ * binary but with some sort of address randomization technique, thus we need
+ * to compare just relative addresses. -acme
+ */
+static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
+ struct map *right_map, u64 right_ip)
+{
+ struct dso *left_dso = left_map ? left_map->dso : NULL;
+ struct dso *right_dso = right_map ? right_map->dso : NULL;
+
+ if (left_dso != right_dso)
+ return left_dso < right_dso ? MATCH_LT : MATCH_GT;
+
+ if (left_ip != right_ip)
+ return left_ip < right_ip ? MATCH_LT : MATCH_GT;
+
+ return MATCH_EQ;
+}
+
static enum match_result match_chain(struct callchain_cursor_node *node,
struct callchain_list *cnode)
{
- struct symbol *sym = node->sym;
- u64 left, right;
- struct dso *left_dso = NULL;
- struct dso *right_dso = NULL;
-
- if (callchain_param.key == CCKEY_SRCLINE) {
- enum match_result match = match_chain_srcline(node, cnode);
+ enum match_result match = MATCH_ERROR;
+ switch (callchain_param.key) {
+ case CCKEY_SRCLINE:
+ match = match_chain_strings(cnode->srcline, node->srcline);
if (match != MATCH_ERROR)
- return match;
- }
-
- if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) {
- left = cnode->ms.sym->start;
- right = sym->start;
- left_dso = cnode->ms.map->dso;
- right_dso = node->map->dso;
- } else {
- left = cnode->ip;
- right = node->ip;
+ break;
+ /* otherwise fall-back to symbol-based comparison below */
+ __fallthrough;
+ case CCKEY_FUNCTION:
+ if (node->sym && cnode->ms.sym) {
+ /*
+ * Compare inlined frames based on their symbol name
+ * because different inlined frames will have the same
+ * symbol start. Otherwise do a faster comparison based
+ * on the symbol start address.
+ */
+ if (cnode->ms.sym->inlined || node->sym->inlined) {
+ match = match_chain_strings(cnode->ms.sym->name,
+ node->sym->name);
+ if (match != MATCH_ERROR)
+ break;
+ } else {
+ match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
+ node->map, node->sym->start);
+ break;
+ }
+ }
+ /* otherwise fall-back to IP-based comparison below */
+ __fallthrough;
+ case CCKEY_ADDRESS:
+ default:
+ match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
+ break;
}
- if (left == right && left_dso == right_dso) {
- if (node->branch) {
- cnode->branch_count++;
+ if (match == MATCH_EQ && node->branch) {
+ cnode->branch_count++;
- if (node->branch_from) {
- /*
- * It's "to" of a branch
- */
- cnode->brtype_stat.branch_to = true;
+ if (node->branch_from) {
+ /*
+ * It's "to" of a branch
+ */
+ cnode->brtype_stat.branch_to = true;
- if (node->branch_flags.predicted)
- cnode->predicted_count++;
+ if (node->branch_flags.predicted)
+ cnode->predicted_count++;
- if (node->branch_flags.abort)
- cnode->abort_count++;
+ if (node->branch_flags.abort)
+ cnode->abort_count++;
- branch_type_count(&cnode->brtype_stat,
- &node->branch_flags,
- node->branch_from,
- node->ip);
- } else {
- /*
- * It's "from" of a branch
- */
- cnode->brtype_stat.branch_to = false;
- cnode->cycles_count +=
- node->branch_flags.cycles;
- cnode->iter_count += node->nr_loop_iter;
- cnode->iter_cycles += node->iter_cycles;
- }
+ branch_type_count(&cnode->brtype_stat,
+ &node->branch_flags,
+ node->branch_from,
+ node->ip);
+ } else {
+ /*
+ * It's "from" of a branch
+ */
+ cnode->brtype_stat.branch_to = false;
+ cnode->cycles_count += node->branch_flags.cycles;
+ cnode->iter_count += node->nr_loop_iter;
+ cnode->iter_cycles += node->iter_cycles;
}
-
- return MATCH_EQ;
}
- return left > right ? MATCH_GT : MATCH_LT;
+ return match;
}
/*
@@ -970,7 +988,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
list_for_each_entry_safe(list, next_list, &src->val, list) {
callchain_cursor_append(cursor, list->ip,
list->ms.map, list->ms.sym,
- false, NULL, 0, 0, 0);
+ false, NULL, 0, 0, 0, list->srcline);
list_del(&list->list);
map__zput(list->ms.map);
free(list);
@@ -1010,7 +1028,8 @@ int callchain_merge(struct callchain_cursor *cursor,
int callchain_cursor_append(struct callchain_cursor *cursor,
u64 ip, struct map *map, struct symbol *sym,
bool branch, struct branch_flags *flags,
- int nr_loop_iter, u64 iter_cycles, u64 branch_from)
+ int nr_loop_iter, u64 iter_cycles, u64 branch_from,
+ const char *srcline)
{
struct callchain_cursor_node *node = *cursor->last;
@@ -1029,6 +1048,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
node->branch = branch;
node->nr_loop_iter = nr_loop_iter;
node->iter_cycles = iter_cycles;
+ node->srcline = srcline;
if (flags)
memcpy(&node->branch_flags, flags,
@@ -1071,10 +1091,8 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
{
al->map = node->map;
al->sym = node->sym;
- if (node->map)
- al->addr = node->map->map_ip(node->map, node->ip);
- else
- al->addr = node->ip;
+ al->srcline = node->srcline;
+ al->addr = node->ip;
if (al->sym == NULL) {
if (hide_unresolved)
@@ -1116,16 +1134,15 @@ char *callchain_list__sym_name(struct callchain_list *cl,
int printed;
if (cl->ms.sym) {
- if (show_srcline && cl->ms.map && !cl->srcline)
- cl->srcline = get_srcline(cl->ms.map->dso,
- map__rip_2objdump(cl->ms.map,
- cl->ip),
- cl->ms.sym, false, show_addr);
- if (cl->srcline)
- printed = scnprintf(bf, bfsize, "%s %s",
- cl->ms.sym->name, cl->srcline);
+ const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
+
+ if (show_srcline && cl->srcline)
+ printed = scnprintf(bf, bfsize, "%s %s%s",
+ cl->ms.sym->name, cl->srcline,
+ inlined);
else
- printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name);
+ printed = scnprintf(bf, bfsize, "%s%s",
+ cl->ms.sym->name, inlined);
} else
printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
@@ -1533,7 +1550,7 @@ int callchain_cursor__copy(struct callchain_cursor *dst,
node->branch, &node->branch_flags,
node->nr_loop_iter,
node->iter_cycles,
- node->branch_from);
+ node->branch_from, node->srcline);
if (rc)
break;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index f967aa47d0a1..b79ef2478a57 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -122,7 +122,7 @@ struct callchain_list {
u64 iter_count;
u64 iter_cycles;
struct branch_type_stat brtype_stat;
- char *srcline;
+ const char *srcline;
struct list_head list;
};
@@ -136,6 +136,7 @@ struct callchain_cursor_node {
u64 ip;
struct map *map;
struct symbol *sym;
+ const char *srcline;
bool branch;
struct branch_flags branch_flags;
u64 branch_from;
@@ -202,7 +203,8 @@ static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
struct map *map, struct symbol *sym,
bool branch, struct branch_flags *flags,
- int nr_loop_iter, u64 iter_cycles, u64 branch_from);
+ int nr_loop_iter, u64 iter_cycles, u64 branch_from,
+ const char *srcline);
/* Close a cursor writing session. Initialize for the reader */
static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index 8808570f8e9c..7798a2cc8a86 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <string.h>
#include <linux/refcount.h>
+#include "rwsem.h"
struct comm_str {
char *str;
@@ -15,6 +16,7 @@ struct comm_str {
/* Should perhaps be moved to struct machine */
static struct rb_root comm_str_root;
+static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,};
static struct comm_str *comm_str__get(struct comm_str *cs)
{
@@ -26,7 +28,9 @@ static struct comm_str *comm_str__get(struct comm_str *cs)
static void comm_str__put(struct comm_str *cs)
{
if (cs && refcount_dec_and_test(&cs->refcnt)) {
+ down_write(&comm_str_lock);
rb_erase(&cs->rb_node, &comm_str_root);
+ up_write(&comm_str_lock);
zfree(&cs->str);
free(cs);
}
@@ -51,7 +55,8 @@ static struct comm_str *comm_str__alloc(const char *str)
return cs;
}
-static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
+static
+struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
@@ -82,6 +87,17 @@ static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
return new;
}
+static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
+{
+ struct comm_str *cs;
+
+ down_write(&comm_str_lock);
+ cs = __comm_str__findnew(str, root);
+ up_write(&comm_str_lock);
+
+ return cs;
+}
+
struct comm *comm__new(const char *str, u64 timestamp, bool exec)
{
struct comm *comm = zalloc(sizeof(*comm));
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 4b893c622236..84eb9393c7db 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -701,10 +701,7 @@ struct perf_config_set *perf_config_set__new(void)
if (set) {
INIT_LIST_HEAD(&set->sections);
- if (perf_config_set__init(set) < 0) {
- perf_config_set__delete(set);
- set = NULL;
- }
+ perf_config_set__init(set);
}
return set;
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 2346cecb8ea2..5744c12641a5 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -1577,10 +1577,10 @@ int bt_convert__perf2ctf(const char *input, const char *path,
struct perf_data_convert_opts *opts)
{
struct perf_session *session;
- struct perf_data_file file = {
- .path = input,
- .mode = PERF_DATA_MODE_READ,
- .force = opts->force,
+ struct perf_data data = {
+ .file.path = input,
+ .mode = PERF_DATA_MODE_READ,
+ .force = opts->force,
};
struct convert c = {
.tool = {
@@ -1619,7 +1619,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
err = -1;
/* perf.data session */
- session = perf_session__new(&file, 0, &c.tool);
+ session = perf_session__new(&data, 0, &c.tool);
if (!session)
goto free_writer;
@@ -1650,7 +1650,7 @@ int bt_convert__perf2ctf(const char *input, const char *path,
fprintf(stderr,
"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
- file.path, path);
+ data.file.path, path);
fprintf(stderr,
"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 79192758bdb3..48094fde0a68 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -4,6 +4,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
+#include <fcntl.h>
#include <unistd.h>
#include <string.h>
@@ -21,56 +22,56 @@
#endif
#endif
-static bool check_pipe(struct perf_data_file *file)
+static bool check_pipe(struct perf_data *data)
{
struct stat st;
bool is_pipe = false;
- int fd = perf_data_file__is_read(file) ?
+ int fd = perf_data__is_read(data) ?
STDIN_FILENO : STDOUT_FILENO;
- if (!file->path) {
+ if (!data->file.path) {
if (!fstat(fd, &st) && S_ISFIFO(st.st_mode))
is_pipe = true;
} else {
- if (!strcmp(file->path, "-"))
+ if (!strcmp(data->file.path, "-"))
is_pipe = true;
}
if (is_pipe)
- file->fd = fd;
+ data->file.fd = fd;
- return file->is_pipe = is_pipe;
+ return data->is_pipe = is_pipe;
}
-static int check_backup(struct perf_data_file *file)
+static int check_backup(struct perf_data *data)
{
struct stat st;
- if (!stat(file->path, &st) && st.st_size) {
+ if (!stat(data->file.path, &st) && st.st_size) {
/* TODO check errors properly */
char oldname[PATH_MAX];
snprintf(oldname, sizeof(oldname), "%s.old",
- file->path);
+ data->file.path);
unlink(oldname);
- rename(file->path, oldname);
+ rename(data->file.path, oldname);
}
return 0;
}
-static int open_file_read(struct perf_data_file *file)
+static int open_file_read(struct perf_data *data)
{
struct stat st;
int fd;
char sbuf[STRERR_BUFSIZE];
- fd = open(file->path, O_RDONLY);
+ fd = open(data->file.path, O_RDONLY);
if (fd < 0) {
int err = errno;
- pr_err("failed to open %s: %s", file->path,
+ pr_err("failed to open %s: %s", data->file.path,
str_error_r(err, sbuf, sizeof(sbuf)));
- if (err == ENOENT && !strcmp(file->path, "perf.data"))
+ if (err == ENOENT && !strcmp(data->file.path, "perf.data"))
pr_err(" (try 'perf record' first)");
pr_err("\n");
return -err;
@@ -79,19 +80,19 @@ static int open_file_read(struct perf_data_file *file)
if (fstat(fd, &st) < 0)
goto out_close;
- if (!file->force && st.st_uid && (st.st_uid != geteuid())) {
+ if (!data->force && st.st_uid && (st.st_uid != geteuid())) {
pr_err("File %s not owned by current user or root (use -f to override)\n",
- file->path);
+ data->file.path);
goto out_close;
}
if (!st.st_size) {
- pr_info("zero-sized file (%s), nothing to do!\n",
- file->path);
+ pr_info("zero-sized data (%s), nothing to do!\n",
+ data->file.path);
goto out_close;
}
- file->size = st.st_size;
+ data->size = st.st_size;
return fd;
out_close:
@@ -99,49 +100,49 @@ static int open_file_read(struct perf_data_file *file)
return -1;
}
-static int open_file_write(struct perf_data_file *file)
+static int open_file_write(struct perf_data *data)
{
int fd;
char sbuf[STRERR_BUFSIZE];
- if (check_backup(file))
+ if (check_backup(data))
return -1;
- fd = open(file->path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
+ fd = open(data->file.path, O_CREAT|O_RDWR|O_TRUNC|O_CLOEXEC,
S_IRUSR|S_IWUSR);
if (fd < 0)
- pr_err("failed to open %s : %s\n", file->path,
+ pr_err("failed to open %s : %s\n", data->file.path,
str_error_r(errno, sbuf, sizeof(sbuf)));
return fd;
}
-static int open_file(struct perf_data_file *file)
+static int open_file(struct perf_data *data)
{
int fd;
- fd = perf_data_file__is_read(file) ?
- open_file_read(file) : open_file_write(file);
+ fd = perf_data__is_read(data) ?
+ open_file_read(data) : open_file_write(data);
- file->fd = fd;
+ data->file.fd = fd;
return fd < 0 ? -1 : 0;
}
-int perf_data_file__open(struct perf_data_file *file)
+int perf_data__open(struct perf_data *data)
{
- if (check_pipe(file))
+ if (check_pipe(data))
return 0;
- if (!file->path)
- file->path = "perf.data";
+ if (!data->file.path)
+ data->file.path = "perf.data";
- return open_file(file);
+ return open_file(data);
}
-void perf_data_file__close(struct perf_data_file *file)
+void perf_data__close(struct perf_data *data)
{
- close(file->fd);
+ close(data->file.fd);
}
ssize_t perf_data_file__write(struct perf_data_file *file,
@@ -150,42 +151,48 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
return writen(file->fd, buf, size);
}
-int perf_data_file__switch(struct perf_data_file *file,
+ssize_t perf_data__write(struct perf_data *data,
+ void *buf, size_t size)
+{
+ return perf_data_file__write(&data->file, buf, size);
+}
+
+int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit)
{
char *new_filepath;
int ret;
- if (check_pipe(file))
+ if (check_pipe(data))
return -EINVAL;
- if (perf_data_file__is_read(file))
+ if (perf_data__is_read(data))
return -EINVAL;
- if (asprintf(&new_filepath, "%s.%s", file->path, postfix) < 0)
+ if (asprintf(&new_filepath, "%s.%s", data->file.path, postfix) < 0)
return -ENOMEM;
/*
* Only fire a warning, don't return error, continue fill
* original file.
*/
- if (rename(file->path, new_filepath))
- pr_warning("Failed to rename %s to %s\n", file->path, new_filepath);
+ if (rename(data->file.path, new_filepath))
+ pr_warning("Failed to rename %s to %s\n", data->file.path, new_filepath);
if (!at_exit) {
- close(file->fd);
- ret = perf_data_file__open(file);
+ close(data->file.fd);
+ ret = perf_data__open(data);
if (ret < 0)
goto out;
- if (lseek(file->fd, pos, SEEK_SET) == (off_t)-1) {
+ if (lseek(data->file.fd, pos, SEEK_SET) == (off_t)-1) {
ret = -errno;
pr_debug("Failed to lseek to %zu: %s",
pos, strerror(errno));
goto out;
}
}
- ret = file->fd;
+ ret = data->file.fd;
out:
free(new_filepath);
return ret;
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 80241ba78101..4828f7feea89 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -10,51 +10,57 @@ enum perf_data_mode {
};
struct perf_data_file {
- const char *path;
- int fd;
+ const char *path;
+ int fd;
+};
+
+struct perf_data {
+ struct perf_data_file file;
bool is_pipe;
bool force;
unsigned long size;
enum perf_data_mode mode;
};
-static inline bool perf_data_file__is_read(struct perf_data_file *file)
+static inline bool perf_data__is_read(struct perf_data *data)
{
- return file->mode == PERF_DATA_MODE_READ;
+ return data->mode == PERF_DATA_MODE_READ;
}
-static inline bool perf_data_file__is_write(struct perf_data_file *file)
+static inline bool perf_data__is_write(struct perf_data *data)
{
- return file->mode == PERF_DATA_MODE_WRITE;
+ return data->mode == PERF_DATA_MODE_WRITE;
}
-static inline int perf_data_file__is_pipe(struct perf_data_file *file)
+static inline int perf_data__is_pipe(struct perf_data *data)
{
- return file->is_pipe;
+ return data->is_pipe;
}
-static inline int perf_data_file__fd(struct perf_data_file *file)
+static inline int perf_data__fd(struct perf_data *data)
{
- return file->fd;
+ return data->file.fd;
}
-static inline unsigned long perf_data_file__size(struct perf_data_file *file)
+static inline unsigned long perf_data__size(struct perf_data *data)
{
- return file->size;
+ return data->size;
}
-int perf_data_file__open(struct perf_data_file *file);
-void perf_data_file__close(struct perf_data_file *file);
+int perf_data__open(struct perf_data *data);
+void perf_data__close(struct perf_data *data);
+ssize_t perf_data__write(struct perf_data *data,
+ void *buf, size_t size);
ssize_t perf_data_file__write(struct perf_data_file *file,
void *buf, size_t size);
/*
* If at_exit is set, only rename current perf.data to
- * perf.data.<postfix>, continue write on original file.
+ * perf.data.<postfix>, continue write on original data.
* Set at_exit when flushing the last output.
*
* Return value is fd of new output.
*/
-int perf_data_file__switch(struct perf_data_file *file,
+int perf_data__switch(struct perf_data *data,
const char *postfix,
size_t pos, bool at_exit);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index dc8b53b6950e..f3a71db83947 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -112,50 +112,53 @@ int dump_printf(const char *fmt, ...)
return ret;
}
-static void trace_event_printer(enum binary_printer_ops op,
- unsigned int val, void *extra)
+static int trace_event_printer(enum binary_printer_ops op,
+ unsigned int val, void *extra, FILE *fp)
{
const char *color = PERF_COLOR_BLUE;
union perf_event *event = (union perf_event *)extra;
unsigned char ch = (unsigned char)val;
+ int printed = 0;
switch (op) {
case BINARY_PRINT_DATA_BEGIN:
- printf(".");
- color_fprintf(stdout, color, "\n. ... raw event: size %d bytes\n",
- event->header.size);
+ printed += fprintf(fp, ".");
+ printed += color_fprintf(fp, color, "\n. ... raw event: size %d bytes\n",
+ event->header.size);
break;
case BINARY_PRINT_LINE_BEGIN:
- printf(".");
+ printed += fprintf(fp, ".");
break;
case BINARY_PRINT_ADDR:
- color_fprintf(stdout, color, " %04x: ", val);
+ printed += color_fprintf(fp, color, " %04x: ", val);
break;
case BINARY_PRINT_NUM_DATA:
- color_fprintf(stdout, color, " %02x", val);
+ printed += color_fprintf(fp, color, " %02x", val);
break;
case BINARY_PRINT_NUM_PAD:
- color_fprintf(stdout, color, " ");
+ printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_SEP:
- color_fprintf(stdout, color, " ");
+ printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_CHAR_DATA:
- color_fprintf(stdout, color, "%c",
+ printed += color_fprintf(fp, color, "%c",
isprint(ch) ? ch : '.');
break;
case BINARY_PRINT_CHAR_PAD:
- color_fprintf(stdout, color, " ");
+ printed += color_fprintf(fp, color, " ");
break;
case BINARY_PRINT_LINE_END:
- color_fprintf(stdout, color, "\n");
+ printed += color_fprintf(fp, color, "\n");
break;
case BINARY_PRINT_DATA_END:
- printf("\n");
+ printed += fprintf(fp, "\n");
break;
default:
break;
}
+
+ return printed;
}
void trace_event(union perf_event *event)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 00c98c968cb1..d5b6f7f5baff 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -7,9 +7,11 @@
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
+#include <fcntl.h>
#include "compress.h"
#include "path.h"
#include "symbol.h"
+#include "srcline.h"
#include "dso.h"
#include "machine.h"
#include "auxtrace.h"
@@ -1201,6 +1203,8 @@ struct dso *dso__new(const char *name)
for (i = 0; i < MAP__NR_TYPES; ++i)
dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
dso->data.cache = RB_ROOT;
+ dso->inlined_nodes = RB_ROOT;
+ dso->srclines = RB_ROOT;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1232,6 +1236,10 @@ void dso__delete(struct dso *dso)
if (!RB_EMPTY_NODE(&dso->rb_node))
pr_err("DSO %s is still in rbtree when being deleted!\n",
dso->long_name);
+
+ /* free inlines first, as they reference symbols */
+ inlines__tree_delete(&dso->inlined_nodes);
+ srcline__tree_delete(&dso->srclines);
for (i = 0; i < MAP__NR_TYPES; ++i)
symbols__delete(&dso->symbols[i]);
@@ -1366,9 +1374,9 @@ void __dsos__add(struct dsos *dsos, struct dso *dso)
void dsos__add(struct dsos *dsos, struct dso *dso)
{
- pthread_rwlock_wrlock(&dsos->lock);
+ down_write(&dsos->lock);
__dsos__add(dsos, dso);
- pthread_rwlock_unlock(&dsos->lock);
+ up_write(&dsos->lock);
}
struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
@@ -1387,9 +1395,9 @@ struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
{
struct dso *dso;
- pthread_rwlock_rdlock(&dsos->lock);
+ down_read(&dsos->lock);
dso = __dsos__find(dsos, name, cmp_short);
- pthread_rwlock_unlock(&dsos->lock);
+ up_read(&dsos->lock);
return dso;
}
@@ -1416,9 +1424,9 @@ struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
struct dso *dsos__findnew(struct dsos *dsos, const char *name)
{
struct dso *dso;
- pthread_rwlock_wrlock(&dsos->lock);
+ down_write(&dsos->lock);
dso = dso__get(__dsos__findnew(dsos, name));
- pthread_rwlock_unlock(&dsos->lock);
+ up_write(&dsos->lock);
return dso;
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 926ff2e7f668..c229dbe0277a 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -7,7 +7,7 @@
#include <linux/rbtree.h>
#include <sys/types.h>
#include <stdbool.h>
-#include <pthread.h>
+#include "rwsem.h"
#include <linux/types.h>
#include <linux/bitops.h>
#include "map.h"
@@ -130,7 +130,7 @@ struct dso_cache {
struct dsos {
struct list_head head;
struct rb_root root; /* rbtree root sorted by long name */
- pthread_rwlock_t lock;
+ struct rw_semaphore lock;
};
struct auxtrace_cache;
@@ -142,6 +142,8 @@ struct dso {
struct rb_root *root; /* root of rbtree that rb_node is in */
struct rb_root symbols[MAP__NR_TYPES];
struct rb_root symbol_names[MAP__NR_TYPES];
+ struct rb_root inlined_nodes;
+ struct rb_root srclines;
struct {
u64 addr;
struct symbol *symbol;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index fc690fecbfd6..97a8ef9980db 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <dirent.h>
#include <errno.h>
+#include <fcntl.h>
#include <inttypes.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -678,21 +679,21 @@ out:
return err;
}
-int perf_event__synthesize_threads(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine,
- bool mmap_data,
- unsigned int proc_map_timeout)
+static int __perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data,
+ unsigned int proc_map_timeout,
+ struct dirent **dirent,
+ int start,
+ int num)
{
- DIR *proc;
- char proc_path[PATH_MAX];
- struct dirent *dirent;
union perf_event *comm_event, *mmap_event, *fork_event;
union perf_event *namespaces_event;
int err = -1;
-
- if (machine__is_default_guest(machine))
- return 0;
+ char *end;
+ pid_t pid;
+ int i;
comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
if (comm_event == NULL)
@@ -712,31 +713,25 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (namespaces_event == NULL)
goto out_free_fork;
- snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
- proc = opendir(proc_path);
-
- if (proc == NULL)
- goto out_free_namespaces;
-
- while ((dirent = readdir(proc)) != NULL) {
- char *end;
- pid_t pid = strtol(dirent->d_name, &end, 10);
+ for (i = start; i < start + num; i++) {
+ if (!isdigit(dirent[i]->d_name[0]))
+ continue;
- if (*end) /* only interested in proper numerical dirents */
+ pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
+ /* only interested in proper numerical dirents */
+ if (*end)
continue;
/*
- * We may race with exiting thread, so don't stop just because
- * one thread couldn't be synthesized.
- */
+ * We may race with exiting thread, so don't stop just because
+ * one thread couldn't be synthesized.
+ */
__event__synthesize_thread(comm_event, mmap_event, fork_event,
namespaces_event, pid, 1, process,
tool, machine, mmap_data,
proc_map_timeout);
}
-
err = 0;
- closedir(proc);
-out_free_namespaces:
+
free(namespaces_event);
out_free_fork:
free(fork_event);
@@ -748,6 +743,118 @@ out:
return err;
}
+struct synthesize_threads_arg {
+ struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+ bool mmap_data;
+ unsigned int proc_map_timeout;
+ struct dirent **dirent;
+ int num;
+ int start;
+};
+
+static void *synthesize_threads_worker(void *arg)
+{
+ struct synthesize_threads_arg *args = arg;
+
+ __perf_event__synthesize_threads(args->tool, args->process,
+ args->machine, args->mmap_data,
+ args->proc_map_timeout, args->dirent,
+ args->start, args->num);
+ return NULL;
+}
+
+int perf_event__synthesize_threads(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine,
+ bool mmap_data,
+ unsigned int proc_map_timeout,
+ unsigned int nr_threads_synthesize)
+{
+ struct synthesize_threads_arg *args = NULL;
+ pthread_t *synthesize_threads = NULL;
+ char proc_path[PATH_MAX];
+ struct dirent **dirent;
+ int num_per_thread;
+ int m, n, i, j;
+ int thread_nr;
+ int base = 0;
+ int err = -1;
+
+
+ if (machine__is_default_guest(machine))
+ return 0;
+
+ snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
+ n = scandir(proc_path, &dirent, 0, alphasort);
+ if (n < 0)
+ return err;
+
+ if (nr_threads_synthesize == UINT_MAX)
+ thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
+ else
+ thread_nr = nr_threads_synthesize;
+
+ if (thread_nr <= 1) {
+ err = __perf_event__synthesize_threads(tool, process,
+ machine, mmap_data,
+ proc_map_timeout,
+ dirent, base, n);
+ goto free_dirent;
+ }
+ if (thread_nr > n)
+ thread_nr = n;
+
+ synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
+ if (synthesize_threads == NULL)
+ goto free_dirent;
+
+ args = calloc(sizeof(*args), thread_nr);
+ if (args == NULL)
+ goto free_threads;
+
+ num_per_thread = n / thread_nr;
+ m = n % thread_nr;
+ for (i = 0; i < thread_nr; i++) {
+ args[i].tool = tool;
+ args[i].process = process;
+ args[i].machine = machine;
+ args[i].mmap_data = mmap_data;
+ args[i].proc_map_timeout = proc_map_timeout;
+ args[i].dirent = dirent;
+ }
+ for (i = 0; i < m; i++) {
+ args[i].num = num_per_thread + 1;
+ args[i].start = i * args[i].num;
+ }
+ if (i != 0)
+ base = args[i-1].start + args[i-1].num;
+ for (j = i; j < thread_nr; j++) {
+ args[j].num = num_per_thread;
+ args[j].start = base + (j - i) * args[i].num;
+ }
+
+ for (i = 0; i < thread_nr; i++) {
+ if (pthread_create(&synthesize_threads[i], NULL,
+ synthesize_threads_worker, &args[i]))
+ goto out_join;
+ }
+ err = 0;
+out_join:
+ for (i = 0; i < thread_nr; i++)
+ pthread_join(synthesize_threads[i], NULL);
+ free(args);
+free_threads:
+ free(synthesize_threads);
+free_dirent:
+ for (i = 0; i < n; i++)
+ free(dirent[i]);
+ free(dirent);
+
+ return err;
+}
+
struct process_symbol_args {
const char *name;
u64 start;
@@ -1498,6 +1605,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
al->sym = NULL;
al->cpu = sample->cpu;
al->socket = -1;
+ al->srcline = NULL;
if (al->cpu >= 0) {
struct perf_env *env = machine->env;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 5524ee69279c..1ae95efbfb95 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -681,7 +681,8 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool,
int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine, bool mmap_data,
- unsigned int proc_map_timeout);
+ unsigned int proc_map_timeout,
+ unsigned int nr_threads_synthesize);
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6a0d7ffbeba0..c6c891e154a6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -33,9 +33,6 @@
#include <linux/log2.h>
#include <linux/err.h>
-static void perf_mmap__munmap(struct perf_mmap *map);
-static void perf_mmap__put(struct perf_mmap *map);
-
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
@@ -704,129 +701,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
return perf_evlist__set_paused(evlist, false);
}
-/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *
-perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
- u64 end, u64 *prev)
-{
- unsigned char *data = md->base + page_size;
- union perf_event *event = NULL;
- int diff = end - start;
-
- if (check_messup) {
- /*
- * If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and mess up the samples under us.
- *
- * If we somehow ended up ahead of the 'end', we got messed up.
- *
- * In either case, truncate and restart at 'end'.
- */
- if (diff > md->mask / 2 || diff < 0) {
- fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
-
- /*
- * 'end' points to a known good entry, start there.
- */
- start = end;
- diff = 0;
- }
- }
-
- if (diff >= (int)sizeof(event->header)) {
- size_t size;
-
- event = (union perf_event *)&data[start & md->mask];
- size = event->header.size;
-
- if (size < sizeof(event->header) || diff < (int)size) {
- event = NULL;
- goto broken_event;
- }
-
- /*
- * Event straddles the mmap boundary -- header should always
- * be inside due to u64 alignment of output.
- */
- if ((start & md->mask) + size != ((start + size) & md->mask)) {
- unsigned int offset = start;
- unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = md->event_copy;
-
- do {
- cpy = min(md->mask + 1 - (offset & md->mask), len);
- memcpy(dst, &data[offset & md->mask], cpy);
- offset += cpy;
- dst += cpy;
- len -= cpy;
- } while (len);
-
- event = (union perf_event *) md->event_copy;
- }
-
- start += size;
- }
-
-broken_event:
- if (prev)
- *prev = start;
-
- return event;
-}
-
-union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
-{
- u64 head;
- u64 old = md->prev;
-
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&md->refcnt))
- return NULL;
-
- head = perf_mmap__read_head(md);
-
- return perf_mmap__read(md, check_messup, old, head, &md->prev);
-}
-
-union perf_event *
-perf_mmap__read_backward(struct perf_mmap *md)
-{
- u64 head, end;
- u64 start = md->prev;
-
- /*
- * Check if event was unmapped due to a POLLHUP/POLLERR.
- */
- if (!refcount_read(&md->refcnt))
- return NULL;
-
- head = perf_mmap__read_head(md);
- if (!head)
- return NULL;
-
- /*
- * 'head' pointer starts from 0. Kernel minus sizeof(record) form
- * it each time when kernel writes to it, so in fact 'head' is
- * negative. 'end' pointer is made manually by adding the size of
- * the ring buffer to 'head' pointer, means the validate data can
- * read is the whole ring buffer. If 'end' is positive, the ring
- * buffer has not fully filled, so we must adjust 'end' to 0.
- *
- * However, since both 'head' and 'end' is unsigned, we can't
- * simply compare 'end' against 0. Here we compare '-head' and
- * the size of the ring buffer, where -head is the number of bytes
- * kernel write to the ring buffer.
- */
- if (-head < (u64)(md->mask + 1))
- end = 0;
- else
- end = head + md->mask + 1;
-
- return perf_mmap__read(md, false, start, end, &md->prev);
-}
-
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
@@ -857,96 +731,16 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return perf_evlist__mmap_read_forward(evlist, idx);
}
-void perf_mmap__read_catchup(struct perf_mmap *md)
-{
- u64 head;
-
- if (!refcount_read(&md->refcnt))
- return;
-
- head = perf_mmap__read_head(md);
- md->prev = head;
-}
-
void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
{
perf_mmap__read_catchup(&evlist->mmap[idx]);
}
-static bool perf_mmap__empty(struct perf_mmap *md)
-{
- return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
-}
-
-static void perf_mmap__get(struct perf_mmap *map)
-{
- refcount_inc(&map->refcnt);
-}
-
-static void perf_mmap__put(struct perf_mmap *md)
-{
- BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
-
- if (refcount_dec_and_test(&md->refcnt))
- perf_mmap__munmap(md);
-}
-
-void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
-{
- if (!overwrite) {
- u64 old = md->prev;
-
- perf_mmap__write_tail(md, old);
- }
-
- if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
- perf_mmap__put(md);
-}
-
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
}
-int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
- struct auxtrace_mmap_params *mp __maybe_unused,
- void *userpg __maybe_unused,
- int fd __maybe_unused)
-{
- return 0;
-}
-
-void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
-{
-}
-
-void __weak auxtrace_mmap_params__init(
- struct auxtrace_mmap_params *mp __maybe_unused,
- off_t auxtrace_offset __maybe_unused,
- unsigned int auxtrace_pages __maybe_unused,
- bool auxtrace_overwrite __maybe_unused)
-{
-}
-
-void __weak auxtrace_mmap_params__set_idx(
- struct auxtrace_mmap_params *mp __maybe_unused,
- struct perf_evlist *evlist __maybe_unused,
- int idx __maybe_unused,
- bool per_cpu __maybe_unused)
-{
-}
-
-static void perf_mmap__munmap(struct perf_mmap *map)
-{
- if (map->base != NULL) {
- munmap(map->base, perf_mmap__mmap_len(map));
- map->base = NULL;
- map->fd = -1;
- refcount_set(&map->refcnt, 0);
- }
- auxtrace_mmap__munmap(&map->auxtrace_mmap);
-}
-
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{
int i;
@@ -995,48 +789,6 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
return map;
}
-struct mmap_params {
- int prot;
- int mask;
- struct auxtrace_mmap_params auxtrace_mp;
-};
-
-static int perf_mmap__mmap(struct perf_mmap *map,
- struct mmap_params *mp, int fd)
-{
- /*
- * The last one will be done at perf_evlist__mmap_consume(), so that we
- * make sure we don't prevent tools from consuming every last event in
- * the ring buffer.
- *
- * I.e. we can get the POLLHUP meaning that the fd doesn't exist
- * anymore, but the last events for it are still in the ring buffer,
- * waiting to be consumed.
- *
- * Tools can chose to ignore this at their own discretion, but the
- * evlist layer can't just drop it when filtering events in
- * perf_evlist__filter_pollfd().
- */
- refcount_set(&map->refcnt, 2);
- map->prev = 0;
- map->mask = mp->mask;
- map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
- MAP_SHARED, fd, 0);
- if (map->base == MAP_FAILED) {
- pr_debug2("failed to mmap perf event ring buffer, error %d\n",
- errno);
- map->base = NULL;
- return -1;
- }
- map->fd = fd;
-
- if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
- &mp->auxtrace_mp, map->base, fd))
- return -1;
-
- return 0;
-}
-
static bool
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
struct perf_evsel *evsel)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index c1750a400bb7..e72ae64c11ac 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -7,12 +7,13 @@
#include <linux/refcount.h>
#include <linux/list.h>
#include <api/fd/array.h>
+#include <fcntl.h>
#include <stdio.h>
#include "../perf.h"
#include "event.h"
#include "evsel.h"
+#include "mmap.h"
#include "util.h"
-#include "auxtrace.h"
#include <signal.h>
#include <unistd.h>
@@ -24,55 +25,6 @@ struct record_opts;
#define PERF_EVLIST__HLIST_BITS 8
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
-/**
- * struct perf_mmap - perf's ring buffer mmap details
- *
- * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
- */
-struct perf_mmap {
- void *base;
- int mask;
- int fd;
- refcount_t refcnt;
- u64 prev;
- struct auxtrace_mmap auxtrace_mmap;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
-};
-
-static inline size_t
-perf_mmap__mmap_len(struct perf_mmap *map)
-{
- return map->mask + 1 + page_size;
-}
-
-/*
- * State machine of bkw_mmap_state:
- *
- * .________________(forbid)_____________.
- * | V
- * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
- * ^ ^ | ^ |
- * | |__(forbid)____/ |___(forbid)___/|
- * | |
- * \_________________(3)_______________/
- *
- * NOTREADY : Backward ring buffers are not ready
- * RUNNING : Backward ring buffers are recording
- * DATA_PENDING : We are required to collect data from backward ring buffers
- * EMPTY : We have collected data from backward ring buffers.
- *
- * (0): Setup backward ring buffer
- * (1): Pause ring buffers for reading
- * (2): Read from ring buffers
- * (3): Resume ring buffers for recording
- */
-enum bkw_mmap_state {
- BKW_MMAP_NOTREADY,
- BKW_MMAP_RUNNING,
- BKW_MMAP_DATA_PENDING,
- BKW_MMAP_EMPTY,
-};
-
struct perf_evlist {
struct list_head entries;
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
@@ -177,12 +129,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
-union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
-
-void perf_mmap__read_catchup(struct perf_mmap *md);
-void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
-
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
@@ -286,25 +232,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
-static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
-{
- struct perf_event_mmap_page *pc = mm->base;
- u64 head = ACCESS_ONCE(pc->data_head);
- rmb();
- return head;
-}
-
-static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
-{
- struct perf_event_mmap_page *pc = md->base;
-
- /*
- * ensure all reads are done before we write the tail out.
- */
- mb();
- pc->data_tail = tail;
-}
-
bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
void perf_evlist__to_front(struct perf_evlist *evlist,
struct perf_evsel *move_evsel);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0dccdb89572c..f894893c203d 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -683,7 +683,7 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
if (!function) {
perf_evsel__set_sample_bit(evsel, REGS_USER);
perf_evsel__set_sample_bit(evsel, STACK_USER);
- attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_regs_user |= PERF_REGS_MASK;
attr->sample_stack_user = param->dump_size;
attr->exclude_callchain_user = 1;
} else {
@@ -936,6 +936,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
perf_evsel__set_sample_bit(evsel, REGS_INTR);
}
+ if (opts->sample_user_regs) {
+ attr->sample_regs_user |= opts->sample_user_regs;
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
+ }
+
if (target__has_cpu(&opts->target) || opts->sample_cpu)
perf_evsel__set_sample_bit(evsel, CPU);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b4df79d72329..9277df96ffda 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -69,6 +69,8 @@ struct perf_evsel_config_term {
} val;
};
+struct perf_stat_evsel;
+
/** struct perf_evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -102,6 +104,7 @@ struct perf_evsel {
const char *unit;
struct event_format *tp_format;
off_t id_offset;
+ struct perf_stat_evsel *stats;
void *priv;
u64 db_id;
struct cgroup_sel *cgrp;
@@ -138,6 +141,7 @@ struct perf_evsel {
const char * metric_name;
struct perf_evsel **metric_events;
bool collect_stat;
+ bool weak_group;
};
union u64_swap {
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 1fd7c2e46db2..06dfb027879d 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -158,7 +158,7 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
}
}
- if (print_dso) {
+ if (print_dso && (!node->sym || !node->sym->inlined)) {
printed += fprintf(fp, " (");
printed += map__fprintf_dsoname(node->map, fp);
printed += fprintf(fp, ")");
@@ -167,41 +167,12 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
if (print_srcline)
printed += map__fprintf_srcline(node->map, addr, "\n ", fp);
+ if (node->sym && node->sym->inlined)
+ printed += fprintf(fp, " (inlined)");
+
if (!print_oneline)
printed += fprintf(fp, "\n");
- if (symbol_conf.inline_name && node->map) {
- struct inline_node *inode;
-
- addr = map__rip_2objdump(node->map, node->ip),
- inode = dso__parse_addr_inlines(node->map->dso, addr);
-
- if (inode) {
- struct inline_list *ilist;
-
- list_for_each_entry(ilist, &inode->val, list) {
- if (print_arrow)
- printed += fprintf(fp, " <-");
-
- /* IP is same, just skip it */
- if (print_ip)
- printed += fprintf(fp, "%c%16s",
- s, "");
- if (print_sym)
- printed += fprintf(fp, " %s",
- ilist->funcname);
- if (print_srcline)
- printed += fprintf(fp, "\n %s:%d",
- ilist->filename,
- ilist->line_nr);
- if (!print_oneline)
- printed += fprintf(fp, "\n");
- }
-
- inline_node__delete(inode);
- }
- }
-
if (symbol_conf.bt_stop_list &&
node->sym &&
strlist__has_entry(symbol_conf.bt_stop_list,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index ba0cea8fef72..7c0e9d587bfa 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1763,7 +1763,7 @@ process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
session = container_of(ff->ph, struct perf_session, header);
- if (session->file->is_pipe) {
+ if (session->data->is_pipe) {
/* Save events for reading later by print_event_desc,
* since they can't be read again in pipe mode. */
ff->events = events;
@@ -1772,7 +1772,7 @@ process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
for (evsel = events; evsel->attr.size; evsel++)
perf_evlist__set_event_name(session->evlist, evsel);
- if (!session->file->is_pipe)
+ if (!session->data->is_pipe)
free_event_desc(events);
return 0;
@@ -2249,7 +2249,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
{
struct header_print_data hd;
struct perf_header *header = &session->header;
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
struct stat st;
int ret, bit;
@@ -2265,7 +2265,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
perf_header__process_sections(header, fd, &hd,
perf_file_section__fprintf_info);
- if (session->file->is_pipe)
+ if (session->data->is_pipe)
return 0;
fprintf(fp, "# missing features: ");
@@ -2758,7 +2758,7 @@ static int perf_header__read_pipe(struct perf_session *session)
struct perf_pipe_file_header f_header;
if (perf_file_header__read_pipe(&f_header, header,
- perf_data_file__fd(session->file),
+ perf_data__fd(session->data),
session->repipe) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
@@ -2861,13 +2861,13 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
int perf_session__read_header(struct perf_session *session)
{
- struct perf_data_file *file = session->file;
+ struct perf_data *data = session->data;
struct perf_header *header = &session->header;
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
- int fd = perf_data_file__fd(file);
+ int fd = perf_data__fd(data);
session->evlist = perf_evlist__new();
if (session->evlist == NULL)
@@ -2875,7 +2875,7 @@ int perf_session__read_header(struct perf_session *session)
session->evlist->env = &header->env;
session->machines.host.env = &header->env;
- if (perf_data_file__is_pipe(file))
+ if (perf_data__is_pipe(data))
return perf_header__read_pipe(session);
if (perf_file_header__read(&f_header, header, fd) < 0)
@@ -2890,7 +2890,7 @@ int perf_session__read_header(struct perf_session *session)
if (f_header.data.size == 0) {
pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
"Was the 'perf record' command properly terminated?\n",
- file->path);
+ data->file.path);
}
nr_attrs = f_header.attrs.size / f_header.attr_size;
@@ -3398,7 +3398,7 @@ int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
struct perf_session *session)
{
ssize_t size_read, padding, size = event->tracing_data.size;
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
off_t offset = lseek(fd, 0, SEEK_CUR);
char buf[BUFSIZ];
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 097473600d94..b6140950301e 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -597,6 +597,7 @@ __hists__add_entry(struct hists *hists,
.map = al->map,
.sym = al->sym,
},
+ .srcline = al->srcline ? strdup(al->srcline) : NULL,
.socket = al->socket,
.cpu = al->cpu,
.cpumode = al->cpumode,
@@ -951,6 +952,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.map = al->map,
.sym = al->sym,
},
+ .srcline = al->srcline ? strdup(al->srcline) : NULL,
.parent = iter->parent,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
@@ -1142,11 +1144,6 @@ void hist_entry__delete(struct hist_entry *he)
zfree(&he->mem_info);
}
- if (he->inline_node) {
- inline_node__delete(he->inline_node);
- he->inline_node = NULL;
- }
-
zfree(&he->stat_acc);
free_srcline(he->srcline);
if (he->srcfile && he->srcfile[0])
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 218ee2bac9a5..5325e65f9711 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -500,7 +500,7 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
}
if (!buffer->data) {
- int fd = perf_data_file__fd(btsq->bts->session->file);
+ int fd = perf_data__fd(btsq->bts->session->data);
buffer->data = auxtrace_buffer__get_data(buffer, fd);
if (!buffer->data) {
@@ -664,10 +664,10 @@ static int intel_bts_process_auxtrace_event(struct perf_session *session,
if (!bts->data_queued) {
struct auxtrace_buffer *buffer;
off_t data_offset;
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
int err;
- if (perf_data_file__is_pipe(session->file)) {
+ if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index b58f9fd1e2ee..23f9ba676df0 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -271,7 +271,7 @@ next:
ptq->buffer = buffer;
if (!buffer->data) {
- int fd = perf_data_file__fd(ptq->pt->session->file);
+ int fd = perf_data__fd(ptq->pt->session->data);
buffer->data = auxtrace_buffer__get_data(buffer, fd);
if (!buffer->data)
@@ -2084,10 +2084,10 @@ static int intel_pt_process_auxtrace_event(struct perf_session *session,
if (!pt->data_queued) {
struct auxtrace_buffer *buffer;
off_t data_offset;
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
int err;
- if (perf_data_file__is_pipe(session->file)) {
+ if (perf_data__is_pipe(session->data)) {
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
diff --git a/tools/perf/util/jit.h b/tools/perf/util/jit.h
index c2582fa9fe21..6817ffc2a059 100644
--- a/tools/perf/util/jit.h
+++ b/tools/perf/util/jit.h
@@ -4,7 +4,7 @@
#include <data.h>
-int jit_process(struct perf_session *session, struct perf_data_file *output,
+int jit_process(struct perf_session *session, struct perf_data *output,
struct machine *machine, char *filename, pid_t pid, u64 *nbytes);
int jit_inject_record(const char *filename);
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 36483db032e8..a1863000e972 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -30,7 +30,7 @@
#include "sane_ctype.h"
struct jit_buf_desc {
- struct perf_data_file *output;
+ struct perf_data *output;
struct perf_session *session;
struct machine *machine;
union jr_entry *entry;
@@ -61,8 +61,8 @@ struct debug_line_info {
struct jit_tool {
struct perf_tool tool;
- struct perf_data_file output;
- struct perf_data_file input;
+ struct perf_data output;
+ struct perf_data input;
u64 bytes_written;
};
@@ -357,7 +357,7 @@ jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
{
ssize_t size;
- size = perf_data_file__write(jd->output, event, event->header.size);
+ size = perf_data__write(jd->output, event, event->header.size);
if (size < 0)
return -1;
@@ -752,7 +752,7 @@ jit_detect(char *mmap_name, pid_t pid)
int
jit_process(struct perf_session *session,
- struct perf_data_file *output,
+ struct perf_data *output,
struct machine *machine,
char *filename,
pid_t pid,
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index bd5d5b5e2218..6a8d03c3d9b7 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -31,7 +31,21 @@ static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
dsos->root = RB_ROOT;
- pthread_rwlock_init(&dsos->lock, NULL);
+ init_rwsem(&dsos->lock);
+}
+
+static void machine__threads_init(struct machine *machine)
+{
+ int i;
+
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ struct threads *threads = &machine->threads[i];
+ threads->entries = RB_ROOT;
+ init_rwsem(&threads->lock);
+ threads->nr = 0;
+ INIT_LIST_HEAD(&threads->dead);
+ threads->last_match = NULL;
+ }
}
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
@@ -41,11 +55,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
RB_CLEAR_NODE(&machine->rb_node);
dsos__init(&machine->dsos);
- machine->threads = RB_ROOT;
- pthread_rwlock_init(&machine->threads_lock, NULL);
- machine->nr_threads = 0;
- INIT_LIST_HEAD(&machine->dead_threads);
- machine->last_match = NULL;
+ machine__threads_init(machine);
machine->vdso_info = NULL;
machine->env = NULL;
@@ -121,7 +131,7 @@ static void dsos__purge(struct dsos *dsos)
{
struct dso *pos, *n;
- pthread_rwlock_wrlock(&dsos->lock);
+ down_write(&dsos->lock);
list_for_each_entry_safe(pos, n, &dsos->head, node) {
RB_CLEAR_NODE(&pos->rb_node);
@@ -130,39 +140,49 @@ static void dsos__purge(struct dsos *dsos)
dso__put(pos);
}
- pthread_rwlock_unlock(&dsos->lock);
+ up_write(&dsos->lock);
}
static void dsos__exit(struct dsos *dsos)
{
dsos__purge(dsos);
- pthread_rwlock_destroy(&dsos->lock);
+ exit_rwsem(&dsos->lock);
}
void machine__delete_threads(struct machine *machine)
{
struct rb_node *nd;
+ int i;
- pthread_rwlock_wrlock(&machine->threads_lock);
- nd = rb_first(&machine->threads);
- while (nd) {
- struct thread *t = rb_entry(nd, struct thread, rb_node);
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ struct threads *threads = &machine->threads[i];
+ down_write(&threads->lock);
+ nd = rb_first(&threads->entries);
+ while (nd) {
+ struct thread *t = rb_entry(nd, struct thread, rb_node);
- nd = rb_next(nd);
- __machine__remove_thread(machine, t, false);
+ nd = rb_next(nd);
+ __machine__remove_thread(machine, t, false);
+ }
+ up_write(&threads->lock);
}
- pthread_rwlock_unlock(&machine->threads_lock);
}
void machine__exit(struct machine *machine)
{
+ int i;
+
machine__destroy_kernel_maps(machine);
map_groups__exit(&machine->kmaps);
dsos__exit(&machine->dsos);
machine__exit_vdso(machine);
zfree(&machine->root_dir);
zfree(&machine->current_tid);
- pthread_rwlock_destroy(&machine->threads_lock);
+
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ struct threads *threads = &machine->threads[i];
+ exit_rwsem(&threads->lock);
+ }
}
void machine__delete(struct machine *machine)
@@ -380,10 +400,11 @@ out_err:
* lookup/new thread inserted.
*/
static struct thread *____machine__findnew_thread(struct machine *machine,
+ struct threads *threads,
pid_t pid, pid_t tid,
bool create)
{
- struct rb_node **p = &machine->threads.rb_node;
+ struct rb_node **p = &threads->entries.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
@@ -392,14 +413,14 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* so most of the time we dont have to look up
* the full rbtree:
*/
- th = machine->last_match;
+ th = threads->last_match;
if (th != NULL) {
if (th->tid == tid) {
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
- machine->last_match = NULL;
+ threads->last_match = NULL;
}
while (*p != NULL) {
@@ -407,7 +428,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
th = rb_entry(parent, struct thread, rb_node);
if (th->tid == tid) {
- machine->last_match = th;
+ threads->last_match = th;
machine__update_thread_pid(machine, th, pid);
return thread__get(th);
}
@@ -424,7 +445,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &machine->threads);
+ rb_insert_color(&th->rb_node, &threads->entries);
/*
* We have to initialize map_groups separately
@@ -435,7 +456,7 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
- rb_erase_init(&th->rb_node, &machine->threads);
+ rb_erase_init(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
return NULL;
@@ -444,8 +465,8 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
* It is now in the rbtree, get a ref
*/
thread__get(th);
- machine->last_match = th;
- ++machine->nr_threads;
+ threads->last_match = th;
+ ++threads->nr;
}
return th;
@@ -453,27 +474,30 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
- return ____machine__findnew_thread(machine, pid, tid, true);
+ return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
}
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
+ struct threads *threads = machine__threads(machine, tid);
struct thread *th;
- pthread_rwlock_wrlock(&machine->threads_lock);
+ down_write(&threads->lock);
th = __machine__findnew_thread(machine, pid, tid);
- pthread_rwlock_unlock(&machine->threads_lock);
+ up_write(&threads->lock);
return th;
}
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
+ struct threads *threads = machine__threads(machine, tid);
struct thread *th;
- pthread_rwlock_rdlock(&machine->threads_lock);
- th = ____machine__findnew_thread(machine, pid, tid, false);
- pthread_rwlock_unlock(&machine->threads_lock);
+
+ down_read(&threads->lock);
+ th = ____machine__findnew_thread(machine, threads, pid, tid, false);
+ up_read(&threads->lock);
return th;
}
@@ -565,7 +589,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
{
struct dso *dso;
- pthread_rwlock_wrlock(&machine->dsos.lock);
+ down_write(&machine->dsos.lock);
dso = __dsos__find(&machine->dsos, m->name, true);
if (!dso) {
@@ -579,7 +603,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
dso__get(dso);
out_unlock:
- pthread_rwlock_unlock(&machine->dsos.lock);
+ up_write(&machine->dsos.lock);
return dso;
}
@@ -720,21 +744,25 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
size_t machine__fprintf(struct machine *machine, FILE *fp)
{
- size_t ret;
struct rb_node *nd;
+ size_t ret;
+ int i;
- pthread_rwlock_rdlock(&machine->threads_lock);
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ struct threads *threads = &machine->threads[i];
- ret = fprintf(fp, "Threads: %u\n", machine->nr_threads);
+ down_read(&threads->lock);
- for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
- struct thread *pos = rb_entry(nd, struct thread, rb_node);
+ ret = fprintf(fp, "Threads: %u\n", threads->nr);
- ret += thread__fprintf(pos, fp);
- }
+ for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ struct thread *pos = rb_entry(nd, struct thread, rb_node);
- pthread_rwlock_unlock(&machine->threads_lock);
+ ret += thread__fprintf(pos, fp);
+ }
+ up_read(&threads->lock);
+ }
return ret;
}
@@ -1293,7 +1321,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
struct dso *kernel = NULL;
struct dso *dso;
- pthread_rwlock_rdlock(&machine->dsos.lock);
+ down_read(&machine->dsos.lock);
list_for_each_entry(dso, &machine->dsos.head, node) {
@@ -1323,7 +1351,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
break;
}
- pthread_rwlock_unlock(&machine->dsos.lock);
+ up_read(&machine->dsos.lock);
if (kernel == NULL)
kernel = machine__findnew_dso(machine, kmmap_prefix);
@@ -1480,23 +1508,25 @@ out_problem:
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
{
- if (machine->last_match == th)
- machine->last_match = NULL;
+ struct threads *threads = machine__threads(machine, th->tid);
+
+ if (threads->last_match == th)
+ threads->last_match = NULL;
BUG_ON(refcount_read(&th->refcnt) == 0);
if (lock)
- pthread_rwlock_wrlock(&machine->threads_lock);
- rb_erase_init(&th->rb_node, &machine->threads);
+ down_write(&threads->lock);
+ rb_erase_init(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
- --machine->nr_threads;
+ --threads->nr;
/*
* Move it first to the dead_threads list, then drop the reference,
* if this is the last reference, then the thread__delete destructor
* will be called and we will remove it from the dead_threads list.
*/
- list_add_tail(&th->node, &machine->dead_threads);
+ list_add_tail(&th->node, &threads->dead);
if (lock)
- pthread_rwlock_unlock(&machine->threads_lock);
+ up_write(&threads->lock);
thread__put(th);
}
@@ -1680,6 +1710,26 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
return mi;
}
+static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
+{
+ char *srcline = NULL;
+
+ if (!map || callchain_param.key == CCKEY_FUNCTION)
+ return srcline;
+
+ srcline = srcline__tree_find(&map->dso->srclines, ip);
+ if (!srcline) {
+ bool show_sym = false;
+ bool show_addr = callchain_param.key == CCKEY_ADDRESS;
+
+ srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
+ sym, show_sym, show_addr);
+ srcline__tree_insert(&map->dso->srclines, ip, srcline);
+ }
+
+ return srcline;
+}
+
struct iterations {
int nr_loop_iter;
u64 cycles;
@@ -1699,6 +1749,7 @@ static int add_callchain_ip(struct thread *thread,
struct addr_location al;
int nr_loop_iter = 0;
u64 iter_cycles = 0;
+ const char *srcline = NULL;
al.filtered = 0;
al.sym = NULL;
@@ -1754,9 +1805,10 @@ static int add_callchain_ip(struct thread *thread,
iter_cycles = iter->cycles;
}
+ srcline = callchain_srcline(al.map, al.sym, al.addr);
return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
branch, flags, nr_loop_iter,
- iter_cycles, branch_from);
+ iter_cycles, branch_from, srcline);
}
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
@@ -2069,15 +2121,54 @@ check_calls:
return 0;
}
+static int append_inlines(struct callchain_cursor *cursor,
+ struct map *map, struct symbol *sym, u64 ip)
+{
+ struct inline_node *inline_node;
+ struct inline_list *ilist;
+ u64 addr;
+ int ret = 1;
+
+ if (!symbol_conf.inline_name || !map || !sym)
+ return ret;
+
+ addr = map__rip_2objdump(map, ip);
+
+ inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
+ if (!inline_node) {
+ inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
+ if (!inline_node)
+ return ret;
+ inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
+ }
+
+ list_for_each_entry(ilist, &inline_node->val, list) {
+ ret = callchain_cursor_append(cursor, ip, map,
+ ilist->symbol, false,
+ NULL, 0, 0, 0, ilist->srcline);
+
+ if (ret != 0)
+ return ret;
+ }
+
+ return ret;
+}
+
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
+ const char *srcline = NULL;
if (symbol_conf.hide_unresolved && entry->sym == NULL)
return 0;
+
+ if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0)
+ return 0;
+
+ srcline = callchain_srcline(entry->map, entry->sym, entry->ip);
return callchain_cursor_append(cursor, entry->ip,
entry->map, entry->sym,
- false, NULL, 0, 0, 0);
+ false, NULL, 0, 0, 0, srcline);
}
static int thread__resolve_callchain_unwind(struct thread *thread,
@@ -2141,21 +2232,26 @@ int machine__for_each_thread(struct machine *machine,
int (*fn)(struct thread *thread, void *p),
void *priv)
{
+ struct threads *threads;
struct rb_node *nd;
struct thread *thread;
int rc = 0;
+ int i;
- for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
- thread = rb_entry(nd, struct thread, rb_node);
- rc = fn(thread, priv);
- if (rc != 0)
- return rc;
- }
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ threads = &machine->threads[i];
+ for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ thread = rb_entry(nd, struct thread, rb_node);
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
- list_for_each_entry(thread, &machine->dead_threads, node) {
- rc = fn(thread, priv);
- if (rc != 0)
- return rc;
+ list_for_each_entry(thread, &threads->dead, node) {
+ rc = fn(thread, priv);
+ if (rc != 0)
+ return rc;
+ }
}
return rc;
}
@@ -2184,12 +2280,16 @@ int machines__for_each_thread(struct machines *machines,
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap,
- unsigned int proc_map_timeout)
+ unsigned int proc_map_timeout,
+ unsigned int nr_threads_synthesize)
{
if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
else if (target__has_cpu(target))
- return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
+ return perf_event__synthesize_threads(tool, process,
+ machine, data_mmap,
+ proc_map_timeout,
+ nr_threads_synthesize);
/* command specified */
return 0;
}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d551aa80a59b..5ce860b64c74 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -7,6 +7,7 @@
#include "map.h"
#include "dso.h"
#include "event.h"
+#include "rwsem.h"
struct addr_location;
struct branch_stack;
@@ -24,6 +25,17 @@ extern const char *ref_reloc_sym_names[];
struct vdso_info;
+#define THREADS__TABLE_BITS 8
+#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
+
+struct threads {
+ struct rb_root entries;
+ struct rw_semaphore lock;
+ unsigned int nr;
+ struct list_head dead;
+ struct thread *last_match;
+};
+
struct machine {
struct rb_node rb_node;
pid_t pid;
@@ -31,11 +43,7 @@ struct machine {
bool comm_exec;
bool kptr_restrict_warned;
char *root_dir;
- struct rb_root threads;
- pthread_rwlock_t threads_lock;
- unsigned int nr_threads;
- struct list_head dead_threads;
- struct thread *last_match;
+ struct threads threads[THREADS__TABLE_SIZE];
struct vdso_info *vdso_info;
struct perf_env *env;
struct dsos dsos;
@@ -49,6 +57,12 @@ struct machine {
};
};
+static inline struct threads *machine__threads(struct machine *machine, pid_t tid)
+{
+ /* Cast it to handle tid == -1 */
+ return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE];
+}
+
static inline
struct map *__machine__kernel_map(struct machine *machine, enum map_type type)
{
@@ -244,15 +258,18 @@ int machines__for_each_thread(struct machines *machines,
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap,
- unsigned int proc_map_timeout);
+ unsigned int proc_map_timeout,
+ unsigned int nr_threads_synthesize);
static inline
int machine__synthesize_threads(struct machine *machine, struct target *target,
struct thread_map *threads, bool data_mmap,
- unsigned int proc_map_timeout)
+ unsigned int proc_map_timeout,
+ unsigned int nr_threads_synthesize)
{
return __machine__synthesize_threads(machine, NULL, target, threads,
perf_event__process, data_mmap,
- proc_map_timeout);
+ proc_map_timeout,
+ nr_threads_synthesize);
}
pid_t machine__get_current_tid(struct machine *machine, int cpu);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 4e7bd2750122..6d40efd74402 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -489,7 +489,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
static void maps__init(struct maps *maps)
{
maps->entries = RB_ROOT;
- pthread_rwlock_init(&maps->lock, NULL);
+ init_rwsem(&maps->lock);
}
void map_groups__init(struct map_groups *mg, struct machine *machine)
@@ -518,9 +518,9 @@ static void __maps__purge(struct maps *maps)
static void maps__exit(struct maps *maps)
{
- pthread_rwlock_wrlock(&maps->lock);
+ down_write(&maps->lock);
__maps__purge(maps);
- pthread_rwlock_unlock(&maps->lock);
+ up_write(&maps->lock);
}
void map_groups__exit(struct map_groups *mg)
@@ -587,7 +587,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
struct symbol *sym;
struct rb_node *nd;
- pthread_rwlock_rdlock(&maps->lock);
+ down_read(&maps->lock);
for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
@@ -603,7 +603,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
sym = NULL;
out:
- pthread_rwlock_unlock(&maps->lock);
+ up_read(&maps->lock);
return sym;
}
@@ -639,7 +639,7 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
size_t printed = 0;
struct rb_node *nd;
- pthread_rwlock_rdlock(&maps->lock);
+ down_read(&maps->lock);
for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node);
@@ -651,7 +651,7 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
}
}
- pthread_rwlock_unlock(&maps->lock);
+ up_read(&maps->lock);
return printed;
}
@@ -683,7 +683,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
struct rb_node *next;
int err = 0;
- pthread_rwlock_wrlock(&maps->lock);
+ down_write(&maps->lock);
root = &maps->entries;
next = rb_first(root);
@@ -751,7 +751,7 @@ put_map:
err = 0;
out:
- pthread_rwlock_unlock(&maps->lock);
+ up_write(&maps->lock);
return err;
}
@@ -772,7 +772,7 @@ int map_groups__clone(struct thread *thread,
struct map *map;
struct maps *maps = &parent->maps[type];
- pthread_rwlock_rdlock(&maps->lock);
+ down_read(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
struct map *new = map__clone(map);
@@ -789,7 +789,7 @@ int map_groups__clone(struct thread *thread,
err = 0;
out_unlock:
- pthread_rwlock_unlock(&maps->lock);
+ up_read(&maps->lock);
return err;
}
@@ -816,9 +816,9 @@ static void __maps__insert(struct maps *maps, struct map *map)
void maps__insert(struct maps *maps, struct map *map)
{
- pthread_rwlock_wrlock(&maps->lock);
+ down_write(&maps->lock);
__maps__insert(maps, map);
- pthread_rwlock_unlock(&maps->lock);
+ up_write(&maps->lock);
}
static void __maps__remove(struct maps *maps, struct map *map)
@@ -829,9 +829,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
void maps__remove(struct maps *maps, struct map *map)
{
- pthread_rwlock_wrlock(&maps->lock);
+ down_write(&maps->lock);
__maps__remove(maps, map);
- pthread_rwlock_unlock(&maps->lock);
+ up_write(&maps->lock);
}
struct map *maps__find(struct maps *maps, u64 ip)
@@ -839,7 +839,7 @@ struct map *maps__find(struct maps *maps, u64 ip)
struct rb_node **p, *parent = NULL;
struct map *m;
- pthread_rwlock_rdlock(&maps->lock);
+ down_read(&maps->lock);
p = &maps->entries.rb_node;
while (*p != NULL) {
@@ -855,7 +855,7 @@ struct map *maps__find(struct maps *maps, u64 ip)
m = NULL;
out:
- pthread_rwlock_unlock(&maps->lock);
+ up_read(&maps->lock);
return m;
}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 1fb9b8589adc..edeb7291c8e1 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -10,6 +10,7 @@
#include <stdio.h>
#include <stdbool.h>
#include <linux/types.h>
+#include "rwsem.h"
enum map_type {
MAP__FUNCTION = 0,
@@ -62,7 +63,7 @@ struct kmap {
struct maps {
struct rb_root entries;
- pthread_rwlock_t lock;
+ struct rw_semaphore lock;
};
struct map_groups {
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
new file mode 100644
index 000000000000..0ddd9c199227
--- /dev/null
+++ b/tools/perf/util/metricgroup.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright (c) 2017, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+/* Manage metrics and groups of metrics from JSON files */
+
+#include "metricgroup.h"
+#include "evlist.h"
+#include "strbuf.h"
+#include "pmu.h"
+#include "expr.h"
+#include "rblist.h"
+#include "pmu.h"
+#include <string.h>
+#include <stdbool.h>
+#include <errno.h>
+#include "pmu-events/pmu-events.h"
+#include "strbuf.h"
+#include "strlist.h"
+#include <assert.h>
+#include <ctype.h>
+
+struct metric_event *metricgroup__lookup(struct rblist *metric_events,
+ struct perf_evsel *evsel,
+ bool create)
+{
+ struct rb_node *nd;
+ struct metric_event me = {
+ .evsel = evsel
+ };
+ nd = rblist__find(metric_events, &me);
+ if (nd)
+ return container_of(nd, struct metric_event, nd);
+ if (create) {
+ rblist__add_node(metric_events, &me);
+ nd = rblist__find(metric_events, &me);
+ if (nd)
+ return container_of(nd, struct metric_event, nd);
+ }
+ return NULL;
+}
+
+static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
+{
+ struct metric_event *a = container_of(rb_node,
+ struct metric_event,
+ nd);
+ const struct metric_event *b = entry;
+
+ if (a->evsel == b->evsel)
+ return 0;
+ if ((char *)a->evsel < (char *)b->evsel)
+ return -1;
+ return +1;
+}
+
+static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
+ const void *entry)
+{
+ struct metric_event *me = malloc(sizeof(struct metric_event));
+
+ if (!me)
+ return NULL;
+ memcpy(me, entry, sizeof(struct metric_event));
+ me->evsel = ((struct metric_event *)entry)->evsel;
+ INIT_LIST_HEAD(&me->head);
+ return &me->nd;
+}
+
+static void metricgroup__rblist_init(struct rblist *metric_events)
+{
+ rblist__init(metric_events);
+ metric_events->node_cmp = metric_event_cmp;
+ metric_events->node_new = metric_event_new;
+}
+
+struct egroup {
+ struct list_head nd;
+ int idnum;
+ const char **ids;
+ const char *metric_name;
+ const char *metric_expr;
+};
+
+static struct perf_evsel *find_evsel(struct perf_evlist *perf_evlist,
+ const char **ids,
+ int idnum,
+ struct perf_evsel **metric_events)
+{
+ struct perf_evsel *ev, *start = NULL;
+ int ind = 0;
+
+ evlist__for_each_entry (perf_evlist, ev) {
+ if (!strcmp(ev->name, ids[ind])) {
+ metric_events[ind] = ev;
+ if (ind == 0)
+ start = ev;
+ if (++ind == idnum) {
+ metric_events[ind] = NULL;
+ return start;
+ }
+ } else {
+ ind = 0;
+ start = NULL;
+ }
+ }
+ /*
+ * This can happen when an alias expands to multiple
+ * events, like for uncore events.
+ * We don't support this case for now.
+ */
+ return NULL;
+}
+
+static int metricgroup__setup_events(struct list_head *groups,
+ struct perf_evlist *perf_evlist,
+ struct rblist *metric_events_list)
+{
+ struct metric_event *me;
+ struct metric_expr *expr;
+ int i = 0;
+ int ret = 0;
+ struct egroup *eg;
+ struct perf_evsel *evsel;
+
+ list_for_each_entry (eg, groups, nd) {
+ struct perf_evsel **metric_events;
+
+ metric_events = calloc(sizeof(void *), eg->idnum + 1);
+ if (!metric_events) {
+ ret = -ENOMEM;
+ break;
+ }
+ evsel = find_evsel(perf_evlist, eg->ids, eg->idnum,
+ metric_events);
+ if (!evsel) {
+ pr_debug("Cannot resolve %s: %s\n",
+ eg->metric_name, eg->metric_expr);
+ continue;
+ }
+ for (i = 0; i < eg->idnum; i++)
+ metric_events[i]->collect_stat = true;
+ me = metricgroup__lookup(metric_events_list, evsel, true);
+ if (!me) {
+ ret = -ENOMEM;
+ break;
+ }
+ expr = malloc(sizeof(struct metric_expr));
+ if (!expr) {
+ ret = -ENOMEM;
+ break;
+ }
+ expr->metric_expr = eg->metric_expr;
+ expr->metric_name = eg->metric_name;
+ expr->metric_events = metric_events;
+ list_add(&expr->nd, &me->head);
+ }
+ return ret;
+}
+
+static bool match_metric(const char *n, const char *list)
+{
+ int len;
+ char *m;
+
+ if (!list)
+ return false;
+ if (!strcmp(list, "all"))
+ return true;
+ if (!n)
+ return !strcasecmp(list, "No_group");
+ len = strlen(list);
+ m = strcasestr(n, list);
+ if (!m)
+ return false;
+ if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
+ (m[len] == 0 || m[len] == ';'))
+ return true;
+ return false;
+}
+
+struct mep {
+ struct rb_node nd;
+ const char *name;
+ struct strlist *metrics;
+};
+
+static int mep_cmp(struct rb_node *rb_node, const void *entry)
+{
+ struct mep *a = container_of(rb_node, struct mep, nd);
+ struct mep *b = (struct mep *)entry;
+
+ return strcmp(a->name, b->name);
+}
+
+static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
+ const void *entry)
+{
+ struct mep *me = malloc(sizeof(struct mep));
+
+ if (!me)
+ return NULL;
+ memcpy(me, entry, sizeof(struct mep));
+ me->name = strdup(me->name);
+ if (!me->name)
+ goto out_me;
+ me->metrics = strlist__new(NULL, NULL);
+ if (!me->metrics)
+ goto out_name;
+ return &me->nd;
+out_name:
+ free((char *)me->name);
+out_me:
+ free(me);
+ return NULL;
+}
+
+static struct mep *mep_lookup(struct rblist *groups, const char *name)
+{
+ struct rb_node *nd;
+ struct mep me = {
+ .name = name
+ };
+ nd = rblist__find(groups, &me);
+ if (nd)
+ return container_of(nd, struct mep, nd);
+ rblist__add_node(groups, &me);
+ nd = rblist__find(groups, &me);
+ if (nd)
+ return container_of(nd, struct mep, nd);
+ return NULL;
+}
+
+static void mep_delete(struct rblist *rl __maybe_unused,
+ struct rb_node *nd)
+{
+ struct mep *me = container_of(nd, struct mep, nd);
+
+ strlist__delete(me->metrics);
+ free((void *)me->name);
+ free(me);
+}
+
+static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
+{
+ struct str_node *sn;
+ int n = 0;
+
+ strlist__for_each_entry (sn, metrics) {
+ if (raw)
+ printf("%s%s", n > 0 ? " " : "", sn->s);
+ else
+ printf(" %s\n", sn->s);
+ n++;
+ }
+ if (raw)
+ putchar('\n');
+}
+
+void metricgroup__print(bool metrics, bool metricgroups, char *filter,
+ bool raw)
+{
+ struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_event *pe;
+ int i;
+ struct rblist groups;
+ struct rb_node *node, *next;
+ struct strlist *metriclist = NULL;
+
+ if (!map)
+ return;
+
+ if (!metricgroups) {
+ metriclist = strlist__new(NULL, NULL);
+ if (!metriclist)
+ return;
+ }
+
+ rblist__init(&groups);
+ groups.node_new = mep_new;
+ groups.node_cmp = mep_cmp;
+ groups.node_delete = mep_delete;
+ for (i = 0; ; i++) {
+ const char *g;
+ pe = &map->table[i];
+
+ if (!pe->name && !pe->metric_group && !pe->metric_name)
+ break;
+ if (!pe->metric_expr)
+ continue;
+ g = pe->metric_group;
+ if (!g && pe->metric_name) {
+ if (pe->name)
+ continue;
+ g = "No_group";
+ }
+ if (g) {
+ char *omg;
+ char *mg = strdup(g);
+
+ if (!mg)
+ return;
+ omg = mg;
+ while ((g = strsep(&mg, ";")) != NULL) {
+ struct mep *me;
+ char *s;
+
+ if (*g == 0)
+ g = "No_group";
+ while (isspace(*g))
+ g++;
+ if (filter && !strstr(g, filter))
+ continue;
+ if (raw)
+ s = (char *)pe->metric_name;
+ else {
+ if (asprintf(&s, "%s\n\t[%s]",
+ pe->metric_name, pe->desc) < 0)
+ return;
+ }
+
+ if (!s)
+ continue;
+
+ if (!metricgroups) {
+ strlist__add(metriclist, s);
+ } else {
+ me = mep_lookup(&groups, g);
+ if (!me)
+ continue;
+ strlist__add(me->metrics, s);
+ }
+ }
+ free(omg);
+ }
+ }
+
+ if (metricgroups && !raw)
+ printf("\nMetric Groups:\n\n");
+ else if (metrics && !raw)
+ printf("\nMetrics:\n\n");
+
+ for (node = rb_first(&groups.entries); node; node = next) {
+ struct mep *me = container_of(node, struct mep, nd);
+
+ if (metricgroups)
+ printf("%s%s%s", me->name, metrics ? ":" : "", raw ? " " : "\n");
+ if (metrics)
+ metricgroup__print_strlist(me->metrics, raw);
+ next = rb_next(node);
+ rblist__remove_node(&groups, node);
+ }
+ if (!metricgroups)
+ metricgroup__print_strlist(metriclist, raw);
+ strlist__delete(metriclist);
+}
+
+static int metricgroup__add_metric(const char *metric, struct strbuf *events,
+ struct list_head *group_list)
+{
+ struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_event *pe;
+ int ret = -EINVAL;
+ int i, j;
+
+ if (!map)
+ return 0;
+
+ for (i = 0; ; i++) {
+ pe = &map->table[i];
+
+ if (!pe->name && !pe->metric_group && !pe->metric_name)
+ break;
+ if (!pe->metric_expr)
+ continue;
+ if (match_metric(pe->metric_group, metric) ||
+ match_metric(pe->metric_name, metric)) {
+ const char **ids;
+ int idnum;
+ struct egroup *eg;
+
+ pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
+
+ if (expr__find_other(pe->metric_expr,
+ NULL, &ids, &idnum) < 0)
+ continue;
+ if (events->len > 0)
+ strbuf_addf(events, ",");
+ for (j = 0; j < idnum; j++) {
+ pr_debug("found event %s\n", ids[j]);
+ strbuf_addf(events, "%s%s",
+ j == 0 ? "{" : ",",
+ ids[j]);
+ }
+ strbuf_addf(events, "}:W");
+
+ eg = malloc(sizeof(struct egroup));
+ if (!eg) {
+ ret = -ENOMEM;
+ break;
+ }
+ eg->ids = ids;
+ eg->idnum = idnum;
+ eg->metric_name = pe->metric_name;
+ eg->metric_expr = pe->metric_expr;
+ list_add_tail(&eg->nd, group_list);
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+static int metricgroup__add_metric_list(const char *list, struct strbuf *events,
+ struct list_head *group_list)
+{
+ char *llist, *nlist, *p;
+ int ret = -EINVAL;
+
+ nlist = strdup(list);
+ if (!nlist)
+ return -ENOMEM;
+ llist = nlist;
+
+ strbuf_init(events, 100);
+ strbuf_addf(events, "%s", "");
+
+ while ((p = strsep(&llist, ",")) != NULL) {
+ ret = metricgroup__add_metric(p, events, group_list);
+ if (ret == -EINVAL) {
+ fprintf(stderr, "Cannot find metric or group `%s'\n",
+ p);
+ break;
+ }
+ }
+ free(nlist);
+ return ret;
+}
+
+static void metricgroup__free_egroups(struct list_head *group_list)
+{
+ struct egroup *eg, *egtmp;
+ int i;
+
+ list_for_each_entry_safe (eg, egtmp, group_list, nd) {
+ for (i = 0; i < eg->idnum; i++)
+ free((char *)eg->ids[i]);
+ free(eg->ids);
+ free(eg);
+ }
+}
+
+int metricgroup__parse_groups(const struct option *opt,
+ const char *str,
+ struct rblist *metric_events)
+{
+ struct parse_events_error parse_error;
+ struct perf_evlist *perf_evlist = *(struct perf_evlist **)opt->value;
+ struct strbuf extra_events;
+ LIST_HEAD(group_list);
+ int ret;
+
+ if (metric_events->nr_entries == 0)
+ metricgroup__rblist_init(metric_events);
+ ret = metricgroup__add_metric_list(str, &extra_events, &group_list);
+ if (ret)
+ return ret;
+ pr_debug("adding %s\n", extra_events.buf);
+ memset(&parse_error, 0, sizeof(struct parse_events_error));
+ ret = parse_events(perf_evlist, extra_events.buf, &parse_error);
+ if (ret) {
+ parse_events_print_error(&parse_error, extra_events.buf);
+ goto out;
+ }
+ strbuf_release(&extra_events);
+ ret = metricgroup__setup_events(&group_list, perf_evlist,
+ metric_events);
+out:
+ metricgroup__free_egroups(&group_list);
+ return ret;
+}
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
new file mode 100644
index 000000000000..06854e125ee7
--- /dev/null
+++ b/tools/perf/util/metricgroup.h
@@ -0,0 +1,31 @@
+#ifndef METRICGROUP_H
+#define METRICGROUP_H 1
+
+#include "linux/list.h"
+#include "rblist.h"
+#include <subcmd/parse-options.h>
+#include "evlist.h"
+#include "strbuf.h"
+
+struct metric_event {
+ struct rb_node nd;
+ struct perf_evsel *evsel;
+ struct list_head head; /* list of metric_expr */
+};
+
+struct metric_expr {
+ struct list_head nd;
+ const char *metric_expr;
+ const char *metric_name;
+ struct perf_evsel **metric_events;
+};
+
+struct metric_event *metricgroup__lookup(struct rblist *metric_events,
+ struct perf_evsel *evsel,
+ bool create);
+int metricgroup__parse_groups(const struct option *opt,
+ const char *str,
+ struct rblist *metric_events);
+
+void metricgroup__print(bool metrics, bool groups, char *filter, bool raw);
+#endif
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
new file mode 100644
index 000000000000..9fe5f9c7d577
--- /dev/null
+++ b/tools/perf/util/mmap.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
+ * copyright notes.
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include <sys/mman.h>
+#include <inttypes.h>
+#include <asm/bug.h>
+#include "debug.h"
+#include "event.h"
+#include "mmap.h"
+#include "util.h" /* page_size */
+
+size_t perf_mmap__mmap_len(struct perf_mmap *map)
+{
+ return map->mask + 1 + page_size;
+}
+
+/* When check_messup is true, 'end' must points to a good entry */
+static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
+ u64 start, u64 end, u64 *prev)
+{
+ unsigned char *data = map->base + page_size;
+ union perf_event *event = NULL;
+ int diff = end - start;
+
+ if (check_messup) {
+ /*
+ * If we're further behind than half the buffer, there's a chance
+ * the writer will bite our tail and mess up the samples under us.
+ *
+ * If we somehow ended up ahead of the 'end', we got messed up.
+ *
+ * In either case, truncate and restart at 'end'.
+ */
+ if (diff > map->mask / 2 || diff < 0) {
+ fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
+
+ /*
+ * 'end' points to a known good entry, start there.
+ */
+ start = end;
+ diff = 0;
+ }
+ }
+
+ if (diff >= (int)sizeof(event->header)) {
+ size_t size;
+
+ event = (union perf_event *)&data[start & map->mask];
+ size = event->header.size;
+
+ if (size < sizeof(event->header) || diff < (int)size) {
+ event = NULL;
+ goto broken_event;
+ }
+
+ /*
+ * Event straddles the mmap boundary -- header should always
+ * be inside due to u64 alignment of output.
+ */
+ if ((start & map->mask) + size != ((start + size) & map->mask)) {
+ unsigned int offset = start;
+ unsigned int len = min(sizeof(*event), size), cpy;
+ void *dst = map->event_copy;
+
+ do {
+ cpy = min(map->mask + 1 - (offset & map->mask), len);
+ memcpy(dst, &data[offset & map->mask], cpy);
+ offset += cpy;
+ dst += cpy;
+ len -= cpy;
+ } while (len);
+
+ event = (union perf_event *)map->event_copy;
+ }
+
+ start += size;
+ }
+
+broken_event:
+ if (prev)
+ *prev = start;
+
+ return event;
+}
+
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
+{
+ u64 head;
+ u64 old = map->prev;
+
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return NULL;
+
+ head = perf_mmap__read_head(map);
+
+ return perf_mmap__read(map, check_messup, old, head, &map->prev);
+}
+
+union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
+{
+ u64 head, end;
+ u64 start = map->prev;
+
+ /*
+ * Check if event was unmapped due to a POLLHUP/POLLERR.
+ */
+ if (!refcount_read(&map->refcnt))
+ return NULL;
+
+ head = perf_mmap__read_head(map);
+ if (!head)
+ return NULL;
+
+ /*
+ * 'head' pointer starts from 0. Kernel minus sizeof(record) form
+ * it each time when kernel writes to it, so in fact 'head' is
+ * negative. 'end' pointer is made manually by adding the size of
+ * the ring buffer to 'head' pointer, means the validate data can
+ * read is the whole ring buffer. If 'end' is positive, the ring
+ * buffer has not fully filled, so we must adjust 'end' to 0.
+ *
+ * However, since both 'head' and 'end' is unsigned, we can't
+ * simply compare 'end' against 0. Here we compare '-head' and
+ * the size of the ring buffer, where -head is the number of bytes
+ * kernel write to the ring buffer.
+ */
+ if (-head < (u64)(map->mask + 1))
+ end = 0;
+ else
+ end = head + map->mask + 1;
+
+ return perf_mmap__read(map, false, start, end, &map->prev);
+}
+
+void perf_mmap__read_catchup(struct perf_mmap *map)
+{
+ u64 head;
+
+ if (!refcount_read(&map->refcnt))
+ return;
+
+ head = perf_mmap__read_head(map);
+ map->prev = head;
+}
+
+static bool perf_mmap__empty(struct perf_mmap *map)
+{
+ return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
+}
+
+void perf_mmap__get(struct perf_mmap *map)
+{
+ refcount_inc(&map->refcnt);
+}
+
+void perf_mmap__put(struct perf_mmap *map)
+{
+ BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
+
+ if (refcount_dec_and_test(&map->refcnt))
+ perf_mmap__munmap(map);
+}
+
+void perf_mmap__consume(struct perf_mmap *map, bool overwrite)
+{
+ if (!overwrite) {
+ u64 old = map->prev;
+
+ perf_mmap__write_tail(map, old);
+ }
+
+ if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
+ perf_mmap__put(map);
+}
+
+int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
+ struct auxtrace_mmap_params *mp __maybe_unused,
+ void *userpg __maybe_unused,
+ int fd __maybe_unused)
+{
+ return 0;
+}
+
+void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
+{
+}
+
+void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
+ off_t auxtrace_offset __maybe_unused,
+ unsigned int auxtrace_pages __maybe_unused,
+ bool auxtrace_overwrite __maybe_unused)
+{
+}
+
+void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused,
+ int idx __maybe_unused,
+ bool per_cpu __maybe_unused)
+{
+}
+
+void perf_mmap__munmap(struct perf_mmap *map)
+{
+ if (map->base != NULL) {
+ munmap(map->base, perf_mmap__mmap_len(map));
+ map->base = NULL;
+ map->fd = -1;
+ refcount_set(&map->refcnt, 0);
+ }
+ auxtrace_mmap__munmap(&map->auxtrace_mmap);
+}
+
+int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
+{
+ /*
+ * The last one will be done at perf_evlist__mmap_consume(), so that we
+ * make sure we don't prevent tools from consuming every last event in
+ * the ring buffer.
+ *
+ * I.e. we can get the POLLHUP meaning that the fd doesn't exist
+ * anymore, but the last events for it are still in the ring buffer,
+ * waiting to be consumed.
+ *
+ * Tools can chose to ignore this at their own discretion, but the
+ * evlist layer can't just drop it when filtering events in
+ * perf_evlist__filter_pollfd().
+ */
+ refcount_set(&map->refcnt, 2);
+ map->prev = 0;
+ map->mask = mp->mask;
+ map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
+ MAP_SHARED, fd, 0);
+ if (map->base == MAP_FAILED) {
+ pr_debug2("failed to mmap perf event ring buffer, error %d\n",
+ errno);
+ map->base = NULL;
+ return -1;
+ }
+ map->fd = fd;
+
+ if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
+ &mp->auxtrace_mp, map->base, fd))
+ return -1;
+
+ return 0;
+}
+
+static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+{
+ struct perf_event_header *pheader;
+ u64 evt_head = head;
+ int size = mask + 1;
+
+ pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
+ pheader = (struct perf_event_header *)(buf + (head & mask));
+ *start = head;
+ while (true) {
+ if (evt_head - head >= (unsigned int)size) {
+ pr_debug("Finished reading backward ring buffer: rewind\n");
+ if (evt_head - head > (unsigned int)size)
+ evt_head -= pheader->size;
+ *end = evt_head;
+ return 0;
+ }
+
+ pheader = (struct perf_event_header *)(buf + (evt_head & mask));
+
+ if (pheader->size == 0) {
+ pr_debug("Finished reading backward ring buffer: get start\n");
+ *end = evt_head;
+ return 0;
+ }
+
+ evt_head += pheader->size;
+ pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
+ }
+ WARN_ONCE(1, "Shouldn't get here\n");
+ return -1;
+}
+
+static int rb_find_range(void *data, int mask, u64 head, u64 old,
+ u64 *start, u64 *end, bool backward)
+{
+ if (!backward) {
+ *start = old;
+ *end = head;
+ return 0;
+ }
+
+ return backward_rb_find_range(data, mask, head, start, end);
+}
+
+int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+ void *to, int push(void *to, void *buf, size_t size))
+{
+ u64 head = perf_mmap__read_head(md);
+ u64 old = md->prev;
+ u64 end = head, start = old;
+ unsigned char *data = md->base + page_size;
+ unsigned long size;
+ void *buf;
+ int rc = 0;
+
+ if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
+ return -1;
+
+ if (start == end)
+ return 0;
+
+ size = end - start;
+ if (size > (unsigned long)(md->mask) + 1) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+
+ md->prev = head;
+ perf_mmap__consume(md, overwrite || backward);
+ return 0;
+ }
+
+ if ((start & md->mask) + size != (end & md->mask)) {
+ buf = &data[start & md->mask];
+ size = md->mask + 1 - (start & md->mask);
+ start += size;
+
+ if (push(to, buf, size) < 0) {
+ rc = -1;
+ goto out;
+ }
+ }
+
+ buf = &data[start & md->mask];
+ size = end - start;
+ start += size;
+
+ if (push(to, buf, size) < 0) {
+ rc = -1;
+ goto out;
+ }
+
+ md->prev = head;
+ perf_mmap__consume(md, overwrite || backward);
+out:
+ return rc;
+}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
new file mode 100644
index 000000000000..efd78b827b05
--- /dev/null
+++ b/tools/perf/util/mmap.h
@@ -0,0 +1,97 @@
+#ifndef __PERF_MMAP_H
+#define __PERF_MMAP_H 1
+
+#include <linux/compiler.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <stdbool.h>
+#include "auxtrace.h"
+#include "event.h"
+
+/**
+ * struct perf_mmap - perf's ring buffer mmap details
+ *
+ * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
+ */
+struct perf_mmap {
+ void *base;
+ int mask;
+ int fd;
+ refcount_t refcnt;
+ u64 prev;
+ struct auxtrace_mmap auxtrace_mmap;
+ char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+};
+
+/*
+ * State machine of bkw_mmap_state:
+ *
+ * .________________(forbid)_____________.
+ * | V
+ * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
+ * ^ ^ | ^ |
+ * | |__(forbid)____/ |___(forbid)___/|
+ * | |
+ * \_________________(3)_______________/
+ *
+ * NOTREADY : Backward ring buffers are not ready
+ * RUNNING : Backward ring buffers are recording
+ * DATA_PENDING : We are required to collect data from backward ring buffers
+ * EMPTY : We have collected data from backward ring buffers.
+ *
+ * (0): Setup backward ring buffer
+ * (1): Pause ring buffers for reading
+ * (2): Read from ring buffers
+ * (3): Resume ring buffers for recording
+ */
+enum bkw_mmap_state {
+ BKW_MMAP_NOTREADY,
+ BKW_MMAP_RUNNING,
+ BKW_MMAP_DATA_PENDING,
+ BKW_MMAP_EMPTY,
+};
+
+struct mmap_params {
+ int prot, mask;
+ struct auxtrace_mmap_params auxtrace_mp;
+};
+
+int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd);
+void perf_mmap__munmap(struct perf_mmap *map);
+
+void perf_mmap__get(struct perf_mmap *map);
+void perf_mmap__put(struct perf_mmap *map);
+
+void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
+
+void perf_mmap__read_catchup(struct perf_mmap *md);
+
+static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
+{
+ struct perf_event_mmap_page *pc = mm->base;
+ u64 head = ACCESS_ONCE(pc->data_head);
+ rmb();
+ return head;
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
+{
+ struct perf_event_mmap_page *pc = md->base;
+
+ /*
+ * ensure all reads are done before we write the tail out.
+ */
+ mb();
+ pc->data_tail = tail;
+}
+
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
+union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
+
+int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+ void *to, int push(void *to, void *buf, size_t size));
+
+size_t perf_mmap__mmap_len(struct perf_mmap *map);
+
+#endif /*__PERF_MMAP_H */
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index a58e91197729..5be021701f34 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -11,6 +11,7 @@
#include "event.h"
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <limits.h>
#include <sched.h>
#include <stdlib.h>
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index 05d82601c9a6..760558dcfd18 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -9,9 +9,10 @@
#ifndef __PERF_NAMESPACES_H
#define __PERF_NAMESPACES_H
-#include "../perf.h"
-#include <linux/list.h>
+#include <sys/types.h>
+#include <linux/perf_event.h>
#include <linux/refcount.h>
+#include <linux/types.h>
struct namespaces_event;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 56694e3409ea..a7fcd95961ef 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -29,6 +29,7 @@
#include "probe-file.h"
#include "asm/bug.h"
#include "util/parse-branch-options.h"
+#include "metricgroup.h"
#define MAX_NAME_LEN 100
@@ -1220,11 +1221,17 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
struct perf_pmu_info info;
struct perf_pmu *pmu;
struct perf_evsel *evsel;
+ struct parse_events_error *err = parse_state->error;
LIST_HEAD(config_terms);
pmu = perf_pmu__find(name);
- if (!pmu)
+ if (!pmu) {
+ if (asprintf(&err->str,
+ "Cannot find PMU `%s'. Missing kernel support?",
+ name) < 0)
+ err->str = NULL;
return -EINVAL;
+ }
if (pmu->default_config) {
memcpy(&attr, pmu->default_config,
@@ -1368,6 +1375,7 @@ struct event_modifier {
int exclude_GH;
int sample_read;
int pinned;
+ int weak;
};
static int get_event_modifier(struct event_modifier *mod, char *str,
@@ -1386,6 +1394,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int exclude = eu | ek | eh;
int exclude_GH = evsel ? evsel->exclude_GH : 0;
+ int weak = 0;
memset(mod, 0, sizeof(*mod));
@@ -1423,6 +1432,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
sample_read = 1;
} else if (*str == 'D') {
pinned = 1;
+ } else if (*str == 'W') {
+ weak = 1;
} else
break;
@@ -1453,6 +1464,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->exclude_GH = exclude_GH;
mod->sample_read = sample_read;
mod->pinned = pinned;
+ mod->weak = weak;
return 0;
}
@@ -1466,7 +1478,7 @@ static int check_modifier(char *str)
char *p = str;
/* The sizeof includes 0 byte as well. */
- if (strlen(str) > (sizeof("ukhGHpppPSDI") - 1))
+ if (strlen(str) > (sizeof("ukhGHpppPSDIW") - 1))
return -1;
while (*p) {
@@ -1506,6 +1518,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->exclude_GH = mod.exclude_GH;
evsel->sample_read = mod.sample_read;
evsel->precise_max = mod.precise_max;
+ evsel->weak_group = mod.weak;
if (perf_evsel__is_group_leader(evsel))
evsel->attr.pinned = mod.pinned;
@@ -1728,8 +1741,8 @@ static int get_term_width(void)
return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
}
-static void parse_events_print_error(struct parse_events_error *err,
- const char *event)
+void parse_events_print_error(struct parse_events_error *err,
+ const char *event)
{
const char *str = "invalid or unsupported event: ";
char _buf[MAX_WIDTH];
@@ -1784,8 +1797,6 @@ static void parse_events_print_error(struct parse_events_error *err,
zfree(&err->str);
zfree(&err->help);
}
-
- fprintf(stderr, "Run 'perf list' for a list of valid events\n");
}
#undef MAX_WIDTH
@@ -1797,8 +1808,10 @@ int parse_events_option(const struct option *opt, const char *str,
struct parse_events_error err = { .idx = 0, };
int ret = parse_events(evlist, str, &err);
- if (ret)
+ if (ret) {
parse_events_print_error(&err, str);
+ fprintf(stderr, "Run 'perf list' for a list of valid events\n");
+ }
return ret;
}
@@ -2376,6 +2389,8 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_tracepoint_events(NULL, NULL, name_only);
print_sdt_events(NULL, NULL, name_only);
+
+ metricgroup__print(true, true, NULL, name_only);
}
int parse_events__is_hardcoded_term(struct parse_events_term *term)
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index eed50b54bab3..be337c266697 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -203,6 +203,9 @@ int is_valid_tracepoint(const char *event_string);
int valid_event_mount(const char *eventfs);
char *parse_events_formats_error_string(char *additional_terms);
+void parse_events_print_error(struct parse_events_error *err,
+ const char *event);
+
#ifdef HAVE_LIBELF_SUPPORT
/*
* If the probe point starts with '%',
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 6680e4fb7967..655ecff636a8 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -5,6 +5,7 @@
%option stack
%option bison-locations
%option yylineno
+%option reject
%{
#include <errno.h>
@@ -178,7 +179,7 @@ name [a-zA-Z_*?][a-zA-Z0-9_*?.]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
/* If you add a modifier you need to update check_modifier() */
-modifier_event [ukhpPGHSDI]+
+modifier_event [ukhpPGHSDIW]+
modifier_bp [rwx]{1,3}
%%
@@ -305,6 +306,7 @@ cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COU
alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
+duration_time { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
/*
@@ -339,8 +341,8 @@ r{num_raw_hex} { return raw(yyscanner); }
{num_hex} { return value(yyscanner, 16); }
{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
-{bpf_object} { if (!isbpf(yyscanner)) USER_REJECT; return str(yyscanner, PE_BPF_OBJECT); }
-{bpf_source} { if (!isbpf(yyscanner)) USER_REJECT; return str(yyscanner, PE_BPF_SOURCE); }
+{bpf_object} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_OBJECT); }
+{bpf_source} { if (!isbpf(yyscanner)) { USER_REJECT }; return str(yyscanner, PE_BPF_SOURCE); }
{name} { return pmu_str_check(yyscanner); }
"/" { BEGIN(config); return '/'; }
- { return '-'; }
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index b10b35a63138..07cb2ac041d7 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <sys/types.h>
#include <errno.h>
+#include <fcntl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdio.h>
@@ -541,16 +542,8 @@ char * __weak get_cpuid_str(void)
return NULL;
}
-/*
- * From the pmu_events_map, find the table of PMU events that corresponds
- * to the current running CPU. Then, add all PMU events from that table
- * as aliases.
- */
-static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
+static char *perf_pmu__getcpuid(void)
{
- int i;
- struct pmu_events_map *map;
- struct pmu_event *pe;
char *cpuid;
static bool printed;
@@ -560,22 +553,50 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
if (!cpuid)
cpuid = get_cpuid_str();
if (!cpuid)
- return;
+ return NULL;
if (!printed) {
pr_debug("Using CPUID %s\n", cpuid);
printed = true;
}
+ return cpuid;
+}
+
+struct pmu_events_map *perf_pmu__find_map(void)
+{
+ struct pmu_events_map *map;
+ char *cpuid = perf_pmu__getcpuid();
+ int i;
i = 0;
- while (1) {
+ for (;;) {
map = &pmu_events_map[i++];
- if (!map->table)
- goto out;
+ if (!map->table) {
+ map = NULL;
+ break;
+ }
if (!strcmp(map->cpuid, cpuid))
break;
}
+ free(cpuid);
+ return map;
+}
+
+/*
+ * From the pmu_events_map, find the table of PMU events that corresponds
+ * to the current running CPU. Then, add all PMU events from that table
+ * as aliases.
+ */
+static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
+{
+ int i;
+ struct pmu_events_map *map;
+ struct pmu_event *pe;
+
+ map = perf_pmu__find_map();
+ if (!map)
+ return;
/*
* Found a matching PMU events table. Create aliases
@@ -585,8 +606,11 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
const char *pname;
pe = &map->table[i++];
- if (!pe->name)
+ if (!pe->name) {
+ if (pe->metric_group || pe->metric_name)
+ continue;
break;
+ }
pname = pe->pmu ? pe->pmu : "cpu";
if (strncmp(pname, name, strlen(pname)))
@@ -600,9 +624,6 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
(char *)pe->metric_expr,
(char *)pe->metric_name);
}
-
-out:
- free(cpuid);
}
struct perf_event_attr * __weak
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index eca99435f4a0..27c75e635866 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -92,4 +92,6 @@ int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
+struct pmu_events_map *perf_pmu__find_map(void);
+
#endif /* __PMU_H */
diff --git a/tools/perf/util/print_binary.c b/tools/perf/util/print_binary.c
index 779e35c9e566..23e367063446 100644
--- a/tools/perf/util/print_binary.c
+++ b/tools/perf/util/print_binary.c
@@ -3,40 +3,42 @@
#include <linux/log2.h>
#include "sane_ctype.h"
-void print_binary(unsigned char *data, size_t len,
- size_t bytes_per_line, print_binary_t printer,
- void *extra)
+int binary__fprintf(unsigned char *data, size_t len,
+ size_t bytes_per_line, binary__fprintf_t printer,
+ void *extra, FILE *fp)
{
size_t i, j, mask;
+ int printed = 0;
if (!printer)
- return;
+ return 0;
bytes_per_line = roundup_pow_of_two(bytes_per_line);
mask = bytes_per_line - 1;
- printer(BINARY_PRINT_DATA_BEGIN, 0, extra);
+ printed += printer(BINARY_PRINT_DATA_BEGIN, 0, extra, fp);
for (i = 0; i < len; i++) {
if ((i & mask) == 0) {
- printer(BINARY_PRINT_LINE_BEGIN, -1, extra);
- printer(BINARY_PRINT_ADDR, i, extra);
+ printed += printer(BINARY_PRINT_LINE_BEGIN, -1, extra, fp);
+ printed += printer(BINARY_PRINT_ADDR, i, extra, fp);
}
- printer(BINARY_PRINT_NUM_DATA, data[i], extra);
+ printed += printer(BINARY_PRINT_NUM_DATA, data[i], extra, fp);
if (((i & mask) == mask) || i == len - 1) {
for (j = 0; j < mask-(i & mask); j++)
- printer(BINARY_PRINT_NUM_PAD, -1, extra);
+ printed += printer(BINARY_PRINT_NUM_PAD, -1, extra, fp);
- printer(BINARY_PRINT_SEP, i, extra);
+ printer(BINARY_PRINT_SEP, i, extra, fp);
for (j = i & ~mask; j <= i; j++)
- printer(BINARY_PRINT_CHAR_DATA, data[j], extra);
+ printed += printer(BINARY_PRINT_CHAR_DATA, data[j], extra, fp);
for (j = 0; j < mask-(i & mask); j++)
- printer(BINARY_PRINT_CHAR_PAD, i, extra);
- printer(BINARY_PRINT_LINE_END, -1, extra);
+ printed += printer(BINARY_PRINT_CHAR_PAD, i, extra, fp);
+ printed += printer(BINARY_PRINT_LINE_END, -1, extra, fp);
}
}
- printer(BINARY_PRINT_DATA_END, -1, extra);
+ printed += printer(BINARY_PRINT_DATA_END, -1, extra, fp);
+ return printed;
}
int is_printable_array(char *p, unsigned int len)
diff --git a/tools/perf/util/print_binary.h b/tools/perf/util/print_binary.h
index 2be3075e2b05..2a1554afc957 100644
--- a/tools/perf/util/print_binary.h
+++ b/tools/perf/util/print_binary.h
@@ -3,6 +3,7 @@
#define PERF_PRINT_BINARY_H
#include <stddef.h>
+#include <stdio.h>
enum binary_printer_ops {
BINARY_PRINT_DATA_BEGIN,
@@ -17,12 +18,19 @@ enum binary_printer_ops {
BINARY_PRINT_DATA_END,
};
-typedef void (*print_binary_t)(enum binary_printer_ops op,
- unsigned int val, void *extra);
+typedef int (*binary__fprintf_t)(enum binary_printer_ops op,
+ unsigned int val, void *extra, FILE *fp);
-void print_binary(unsigned char *data, size_t len,
- size_t bytes_per_line, print_binary_t printer,
- void *extra);
+int binary__fprintf(unsigned char *data, size_t len,
+ size_t bytes_per_line, binary__fprintf_t printer,
+ void *extra, FILE *fp);
+
+static inline void print_binary(unsigned char *data, size_t len,
+ size_t bytes_per_line, binary__fprintf_t printer,
+ void *extra)
+{
+ binary__fprintf(data, len, bytes_per_line, printer, extra, stdout);
+}
int is_printable_array(char *p, unsigned int len);
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index cdf8d83a484c..4ae1123c6794 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -15,6 +15,7 @@
*
*/
#include <errno.h>
+#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/uio.h>
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index e66dc495809a..b4f2f06722a7 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
+util/mmap.c
util/namespaces.c
../lib/bitmap.c
../lib/find_bit.c
diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h
index 7d8972b33f6b..a920f702a74d 100644
--- a/tools/perf/util/rb_resort.h
+++ b/tools/perf/util/rb_resort.h
@@ -144,7 +144,8 @@ struct __name##_sorted *__name = __name##_sorted__new
__ilist->rblist.nr_entries)
/* For 'struct machine->threads' */
-#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine) \
- DECLARE_RESORT_RB(__name)(&__machine->threads, __machine->nr_threads)
+#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
+ DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \
+ __machine->threads[hash_bucket].nr)
#endif /* _PERF_RESORT_RB_H_ */
diff --git a/tools/perf/util/rwsem.c b/tools/perf/util/rwsem.c
new file mode 100644
index 000000000000..5e52e7baa7b6
--- /dev/null
+++ b/tools/perf/util/rwsem.c
@@ -0,0 +1,32 @@
+#include "util.h"
+#include "rwsem.h"
+
+int init_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_init(&sem->lock, NULL);
+}
+
+int exit_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_destroy(&sem->lock);
+}
+
+int down_read(struct rw_semaphore *sem)
+{
+ return perf_singlethreaded ? 0 : pthread_rwlock_rdlock(&sem->lock);
+}
+
+int up_read(struct rw_semaphore *sem)
+{
+ return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
+}
+
+int down_write(struct rw_semaphore *sem)
+{
+ return perf_singlethreaded ? 0 : pthread_rwlock_wrlock(&sem->lock);
+}
+
+int up_write(struct rw_semaphore *sem)
+{
+ return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
+}
diff --git a/tools/perf/util/rwsem.h b/tools/perf/util/rwsem.h
new file mode 100644
index 000000000000..94565ad4d494
--- /dev/null
+++ b/tools/perf/util/rwsem.h
@@ -0,0 +1,19 @@
+#ifndef _PERF_RWSEM_H
+#define _PERF_RWSEM_H
+
+#include <pthread.h>
+
+struct rw_semaphore {
+ pthread_rwlock_t lock;
+};
+
+int init_rwsem(struct rw_semaphore *sem);
+int exit_rwsem(struct rw_semaphore *sem);
+
+int down_read(struct rw_semaphore *sem);
+int up_read(struct rw_semaphore *sem);
+
+int down_write(struct rw_semaphore *sem);
+int up_write(struct rw_semaphore *sem);
+
+#endif /* _PERF_RWSEM_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index da55081aefc6..5c412310f266 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -33,14 +33,14 @@ static int perf_session__deliver_event(struct perf_session *session,
static int perf_session__open(struct perf_session *session)
{
- struct perf_data_file *file = session->file;
+ struct perf_data *data = session->data;
if (perf_session__read_header(session) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)\n");
return -1;
}
- if (perf_data_file__is_pipe(file))
+ if (perf_data__is_pipe(data))
return 0;
if (perf_header__has_feat(&session->header, HEADER_STAT))
@@ -121,7 +121,7 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
session->tool, event->file_offset);
}
-struct perf_session *perf_session__new(struct perf_data_file *file,
+struct perf_session *perf_session__new(struct perf_data *data,
bool repipe, struct perf_tool *tool)
{
struct perf_session *session = zalloc(sizeof(*session));
@@ -135,13 +135,13 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
machines__init(&session->machines);
ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
- if (file) {
- if (perf_data_file__open(file))
+ if (data) {
+ if (perf_data__open(data))
goto out_delete;
- session->file = file;
+ session->data = data;
- if (perf_data_file__is_read(file)) {
+ if (perf_data__is_read(data)) {
if (perf_session__open(session) < 0)
goto out_close;
@@ -149,7 +149,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
* set session attributes that are present in perf.data
* but not in pipe-mode.
*/
- if (!file->is_pipe) {
+ if (!data->is_pipe) {
perf_session__set_id_hdr_size(session);
perf_session__set_comm_exec(session);
}
@@ -158,7 +158,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
session->machines.host.env = &perf_env;
}
- if (!file || perf_data_file__is_write(file)) {
+ if (!data || perf_data__is_write(data)) {
/*
* In O_RDONLY mode this will be performed when reading the
* kernel MMAP event, in perf_event__process_mmap().
@@ -171,7 +171,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
* In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
* processed, so perf_evlist__sample_id_all is not meaningful here.
*/
- if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
+ if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_events = false;
@@ -180,7 +180,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
return session;
out_close:
- perf_data_file__close(file);
+ perf_data__close(data);
out_delete:
perf_session__delete(session);
out:
@@ -202,8 +202,8 @@ void perf_session__delete(struct perf_session *session)
perf_session__delete_threads(session);
perf_env__exit(&session->header.env);
machines__exit(&session->machines);
- if (session->file)
- perf_data_file__close(session->file);
+ if (session->data)
+ perf_data__close(session->data);
free(session);
}
@@ -291,8 +291,8 @@ static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
__maybe_unused)
{
dump_printf(": unhandled!\n");
- if (perf_data_file__is_pipe(session->file))
- skipn(perf_data_file__fd(session->file), event->auxtrace.size);
+ if (perf_data__is_pipe(session->data))
+ skipn(perf_data__fd(session->data), event->auxtrace.size);
return event->auxtrace.size;
}
@@ -1350,7 +1350,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
int err;
dump_event(session->evlist, event, file_offset, NULL);
@@ -1450,10 +1450,10 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
goto out_parse_sample;
}
- if (perf_data_file__is_pipe(session->file))
+ if (perf_data__is_pipe(session->data))
return -1;
- fd = perf_data_file__fd(session->file);
+ fd = perf_data__fd(session->data);
hdr_sz = sizeof(struct perf_event_header);
if (buf_sz < hdr_sz)
@@ -1688,7 +1688,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
union perf_event *event;
uint32_t size, cur_size = 0;
void *buf = NULL;
@@ -1829,7 +1829,7 @@ static int __perf_session__process_events(struct perf_session *session,
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
- int fd = perf_data_file__fd(session->file);
+ int fd = perf_data__fd(session->data);
u64 head, page_offset, file_offset, file_pos, size;
int err, mmap_prot, mmap_flags, map_idx = 0;
size_t mmap_size;
@@ -1850,7 +1850,7 @@ static int __perf_session__process_events(struct perf_session *session,
if (data_offset + data_size < file_size)
file_size = data_offset + data_size;
- ui_progress__init(&prog, file_size, "Processing events...");
+ ui_progress__init_size(&prog, file_size, "Processing events...");
mmap_size = MMAP_SIZE;
if (mmap_size > file_size) {
@@ -1946,13 +1946,13 @@ out_err:
int perf_session__process_events(struct perf_session *session)
{
- u64 size = perf_data_file__size(session->file);
+ u64 size = perf_data__size(session->data);
int err;
if (perf_session__register_idle_thread(session) < 0)
return -ENOMEM;
- if (!perf_data_file__is_pipe(session->file))
+ if (!perf_data__is_pipe(session->data))
err = __perf_session__process_events(session,
session->header.data_offset,
session->header.data_size, size);
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 41caa098ed15..da1434a7c120 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -33,13 +33,13 @@ struct perf_session {
void *one_mmap_addr;
u64 one_mmap_offset;
struct ordered_events ordered_events;
- struct perf_data_file *file;
+ struct perf_data *data;
struct perf_tool *tool;
};
struct perf_tool;
-struct perf_session *perf_session__new(struct perf_data_file *file,
+struct perf_session *perf_session__new(struct perf_data *data,
bool repipe, struct perf_tool *tool);
void perf_session__delete(struct perf_session *session);
@@ -114,7 +114,7 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
extern volatile int session_done;
-#define session_done() ACCESS_ONCE(session_done)
+#define session_done() READ_ONCE(session_done)
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 84a33f1e9ec9..a00eacdf02ed 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -226,6 +226,9 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
if (sym_l == sym_r)
return 0;
+ if (sym_l->inlined || sym_r->inlined)
+ return strcmp(sym_l->name, sym_r->name);
+
if (sym_l->start != sym_r->start)
return (int64_t)(sym_r->start - sym_l->start);
@@ -284,6 +287,9 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
width - ret,
sym->name);
+ if (sym->inlined)
+ ret += repsep_snprintf(bf + ret, size - ret,
+ " (inlined)");
}
} else {
size_t len = BITS_PER_LONG / 4;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index b2b55e5149a7..f5901c10a563 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -130,7 +130,6 @@ struct hist_entry {
};
char *srcline;
char *srcfile;
- struct inline_node *inline_node;
struct symbol *parent;
struct branch_info *branch_info;
struct hists *hists;
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 4105682afc7a..d19f05c56de6 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -11,7 +11,7 @@
#include "util/debug.h"
#include "util/callchain.h"
#include "srcline.h"
-
+#include "string2.h"
#include "symbol.h"
bool srcline_full_filename;
@@ -34,28 +34,17 @@ static const char *dso__name(struct dso *dso)
return dso_name;
}
-static int inline_list__append(char *filename, char *funcname, int line_nr,
- struct inline_node *node, struct dso *dso)
+static int inline_list__append(struct symbol *symbol, char *srcline,
+ struct inline_node *node)
{
struct inline_list *ilist;
- char *demangled;
ilist = zalloc(sizeof(*ilist));
if (ilist == NULL)
return -1;
- ilist->filename = filename;
- ilist->line_nr = line_nr;
-
- if (dso != NULL) {
- demangled = dso__demangle_sym(dso, 0, funcname);
- if (demangled == NULL) {
- ilist->funcname = funcname;
- } else {
- ilist->funcname = demangled;
- free(funcname);
- }
- }
+ ilist->symbol = symbol;
+ ilist->srcline = srcline;
if (callchain_param.order == ORDER_CALLEE)
list_add_tail(&ilist->list, &node->val);
@@ -65,6 +54,65 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
return 0;
}
+/* basename version that takes a const input string */
+static const char *gnu_basename(const char *path)
+{
+ const char *base = strrchr(path, '/');
+
+ return base ? base + 1 : path;
+}
+
+static char *srcline_from_fileline(const char *file, unsigned int line)
+{
+ char *srcline;
+
+ if (!file)
+ return NULL;
+
+ if (!srcline_full_filename)
+ file = gnu_basename(file);
+
+ if (asprintf(&srcline, "%s:%u", file, line) < 0)
+ return NULL;
+
+ return srcline;
+}
+
+static struct symbol *new_inline_sym(struct dso *dso,
+ struct symbol *base_sym,
+ const char *funcname)
+{
+ struct symbol *inline_sym;
+ char *demangled = NULL;
+
+ if (dso) {
+ demangled = dso__demangle_sym(dso, 0, funcname);
+ if (demangled)
+ funcname = demangled;
+ }
+
+ if (base_sym && strcmp(funcname, base_sym->name) == 0) {
+ /* reuse the real, existing symbol */
+ inline_sym = base_sym;
+ /* ensure that we don't alias an inlined symbol, which could
+ * lead to double frees in inline_node__delete
+ */
+ assert(!base_sym->inlined);
+ } else {
+ /* create a fake symbol for the inline frame */
+ inline_sym = symbol__new(base_sym ? base_sym->start : 0,
+ base_sym ? base_sym->end : 0,
+ base_sym ? base_sym->binding : 0,
+ funcname);
+ if (inline_sym)
+ inline_sym->inlined = 1;
+ }
+
+ free(demangled);
+
+ return inline_sym;
+}
+
#ifdef HAVE_LIBBFD_SUPPORT
/*
@@ -208,18 +256,23 @@ static void addr2line_cleanup(struct a2l_data *a2l)
#define MAX_INLINE_NEST 1024
static int inline_list__append_dso_a2l(struct dso *dso,
- struct inline_node *node)
+ struct inline_node *node,
+ struct symbol *sym)
{
struct a2l_data *a2l = dso->a2l;
- char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL;
- char *filename = a2l->filename ? strdup(a2l->filename) : NULL;
+ struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname);
+ char *srcline = NULL;
- return inline_list__append(filename, funcname, a2l->line, node, dso);
+ if (a2l->filename)
+ srcline = srcline_from_fileline(a2l->filename, a2l->line);
+
+ return inline_list__append(inline_sym, srcline, node);
}
static int addr2line(const char *dso_name, u64 addr,
char **file, unsigned int *line, struct dso *dso,
- bool unwind_inlines, struct inline_node *node)
+ bool unwind_inlines, struct inline_node *node,
+ struct symbol *sym)
{
int ret = 0;
struct a2l_data *a2l = dso->a2l;
@@ -245,7 +298,7 @@ static int addr2line(const char *dso_name, u64 addr,
if (unwind_inlines) {
int cnt = 0;
- if (node && inline_list__append_dso_a2l(dso, node))
+ if (node && inline_list__append_dso_a2l(dso, node, sym))
return 0;
while (bfd_find_inliner_info(a2l->abfd, &a2l->filename,
@@ -256,7 +309,7 @@ static int addr2line(const char *dso_name, u64 addr,
a2l->filename = NULL;
if (node != NULL) {
- if (inline_list__append_dso_a2l(dso, node))
+ if (inline_list__append_dso_a2l(dso, node, sym))
return 0;
// found at least one inline frame
ret = 1;
@@ -288,7 +341,7 @@ void dso__free_a2l(struct dso *dso)
}
static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
- struct dso *dso)
+ struct dso *dso, struct symbol *sym)
{
struct inline_node *node;
@@ -301,17 +354,8 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
INIT_LIST_HEAD(&node->val);
node->addr = addr;
- if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node))
- goto out_free_inline_node;
-
- if (list_empty(&node->val))
- goto out_free_inline_node;
-
+ addr2line(dso_name, addr, NULL, NULL, dso, true, node, sym);
return node;
-
-out_free_inline_node:
- inline_node__delete(node);
- return NULL;
}
#else /* HAVE_LIBBFD_SUPPORT */
@@ -341,7 +385,8 @@ static int addr2line(const char *dso_name, u64 addr,
char **file, unsigned int *line_nr,
struct dso *dso __maybe_unused,
bool unwind_inlines __maybe_unused,
- struct inline_node *node __maybe_unused)
+ struct inline_node *node __maybe_unused,
+ struct symbol *sym __maybe_unused)
{
FILE *fp;
char cmd[PATH_MAX];
@@ -381,16 +426,18 @@ void dso__free_a2l(struct dso *dso __maybe_unused)
}
static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
- struct dso *dso __maybe_unused)
+ struct dso *dso __maybe_unused,
+ struct symbol *sym)
{
FILE *fp;
char cmd[PATH_MAX];
struct inline_node *node;
char *filename = NULL;
- size_t len;
+ char *funcname = NULL;
+ size_t filelen, funclen;
unsigned int line_nr = 0;
- scnprintf(cmd, sizeof(cmd), "addr2line -e %s -i %016"PRIx64,
+ scnprintf(cmd, sizeof(cmd), "addr2line -e %s -i -f %016"PRIx64,
dso_name, addr);
fp = popen(cmd, "r");
@@ -408,26 +455,34 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr,
INIT_LIST_HEAD(&node->val);
node->addr = addr;
- while (getline(&filename, &len, fp) != -1) {
- if (filename_split(filename, &line_nr) != 1) {
- free(filename);
+ /* addr2line -f generates two lines for each inlined functions */
+ while (getline(&funcname, &funclen, fp) != -1) {
+ char *srcline;
+ struct symbol *inline_sym;
+
+ rtrim(funcname);
+
+ if (getline(&filename, &filelen, fp) == -1)
goto out;
- }
- if (inline_list__append(filename, NULL, line_nr, node,
- NULL) != 0)
+ if (filename_split(filename, &line_nr) != 1)
goto out;
- filename = NULL;
+ srcline = srcline_from_fileline(filename, line_nr);
+ inline_sym = new_inline_sym(dso, sym, funcname);
+
+ if (inline_list__append(inline_sym, srcline, node) != 0) {
+ free(srcline);
+ if (inline_sym && inline_sym->inlined)
+ symbol__delete(inline_sym);
+ goto out;
+ }
}
out:
pclose(fp);
-
- if (list_empty(&node->val)) {
- inline_node__delete(node);
- return NULL;
- }
+ free(filename);
+ free(funcname);
return node;
}
@@ -455,19 +510,18 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
if (dso_name == NULL)
goto out;
- if (!addr2line(dso_name, addr, &file, &line, dso, unwind_inlines, NULL))
+ if (!addr2line(dso_name, addr, &file, &line, dso,
+ unwind_inlines, NULL, sym))
goto out;
- if (asprintf(&srcline, "%s:%u",
- srcline_full_filename ? file : basename(file),
- line) < 0) {
- free(file);
+ srcline = srcline_from_fileline(file, line);
+ free(file);
+
+ if (!srcline)
goto out;
- }
dso->a2l_fails = 0;
- free(file);
return srcline;
out:
@@ -501,7 +555,74 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
return __get_srcline(dso, addr, sym, show_sym, show_addr, false);
}
-struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr)
+struct srcline_node {
+ u64 addr;
+ char *srcline;
+ struct rb_node rb_node;
+};
+
+void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline)
+{
+ struct rb_node **p = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ struct srcline_node *i, *node;
+
+ node = zalloc(sizeof(struct srcline_node));
+ if (!node) {
+ perror("not enough memory for the srcline node");
+ return;
+ }
+
+ node->addr = addr;
+ node->srcline = srcline;
+
+ while (*p != NULL) {
+ parent = *p;
+ i = rb_entry(parent, struct srcline_node, rb_node);
+ if (addr < i->addr)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, tree);
+}
+
+char *srcline__tree_find(struct rb_root *tree, u64 addr)
+{
+ struct rb_node *n = tree->rb_node;
+
+ while (n) {
+ struct srcline_node *i = rb_entry(n, struct srcline_node,
+ rb_node);
+
+ if (addr < i->addr)
+ n = n->rb_left;
+ else if (addr > i->addr)
+ n = n->rb_right;
+ else
+ return i->srcline;
+ }
+
+ return NULL;
+}
+
+void srcline__tree_delete(struct rb_root *tree)
+{
+ struct srcline_node *pos;
+ struct rb_node *next = rb_first(tree);
+
+ while (next) {
+ pos = rb_entry(next, struct srcline_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, tree);
+ free_srcline(pos->srcline);
+ zfree(&pos);
+ }
+}
+
+struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
+ struct symbol *sym)
{
const char *dso_name;
@@ -509,7 +630,7 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr)
if (dso_name == NULL)
return NULL;
- return addr2inlines(dso_name, addr, dso);
+ return addr2inlines(dso_name, addr, dso, sym);
}
void inline_node__delete(struct inline_node *node)
@@ -518,10 +639,63 @@ void inline_node__delete(struct inline_node *node)
list_for_each_entry_safe(ilist, tmp, &node->val, list) {
list_del_init(&ilist->list);
- zfree(&ilist->filename);
- zfree(&ilist->funcname);
+ free_srcline(ilist->srcline);
+ /* only the inlined symbols are owned by the list */
+ if (ilist->symbol && ilist->symbol->inlined)
+ symbol__delete(ilist->symbol);
free(ilist);
}
free(node);
}
+
+void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines)
+{
+ struct rb_node **p = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ const u64 addr = inlines->addr;
+ struct inline_node *i;
+
+ while (*p != NULL) {
+ parent = *p;
+ i = rb_entry(parent, struct inline_node, rb_node);
+ if (addr < i->addr)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&inlines->rb_node, parent, p);
+ rb_insert_color(&inlines->rb_node, tree);
+}
+
+struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr)
+{
+ struct rb_node *n = tree->rb_node;
+
+ while (n) {
+ struct inline_node *i = rb_entry(n, struct inline_node,
+ rb_node);
+
+ if (addr < i->addr)
+ n = n->rb_left;
+ else if (addr > i->addr)
+ n = n->rb_right;
+ else
+ return i;
+ }
+
+ return NULL;
+}
+
+void inlines__tree_delete(struct rb_root *tree)
+{
+ struct inline_node *pos;
+ struct rb_node *next = rb_first(tree);
+
+ while (next) {
+ pos = rb_entry(next, struct inline_node, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, tree);
+ inline_node__delete(pos);
+ }
+}
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 8e73f607dfa3..847b7086182c 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -3,6 +3,7 @@
#define PERF_SRCLINE_H
#include <linux/list.h>
+#include <linux/rbtree.h>
#include <linux/types.h>
struct dso;
@@ -15,21 +16,38 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym, bool show_addr, bool unwind_inlines);
void free_srcline(char *srcline);
+/* insert the srcline into the DSO, which will take ownership */
+void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
+/* find previously inserted srcline */
+char *srcline__tree_find(struct rb_root *tree, u64 addr);
+/* delete all srclines within the tree */
+void srcline__tree_delete(struct rb_root *tree);
+
#define SRCLINE_UNKNOWN ((char *) "??:0")
struct inline_list {
- char *filename;
- char *funcname;
- unsigned int line_nr;
+ struct symbol *symbol;
+ char *srcline;
struct list_head list;
};
struct inline_node {
u64 addr;
struct list_head val;
+ struct rb_node rb_node;
};
-struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr);
+/* parse inlined frames for the given address */
+struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
+ struct symbol *sym);
+/* free resources associated to the inline node list */
void inline_node__delete(struct inline_node *node);
+/* insert the inline node list into the DSO, which will take ownership */
+void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines);
+/* find previously inserted inline node list */
+struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr);
+/* delete all nodes within the tree of inline_node s */
+void inlines__tree_delete(struct rb_root *tree);
+
#endif /* PERF_SRCLINE_H */
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 37363869c9a1..855e35cbb1dc 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -7,6 +7,7 @@
#include "rblist.h"
#include "evlist.h"
#include "expr.h"
+#include "metricgroup.h"
enum {
CTX_BIT_USER = 1 << 0,
@@ -56,7 +57,6 @@ struct saved_value {
struct rb_node rb_node;
struct perf_evsel *evsel;
int cpu;
- int ctx;
struct stats stats;
};
@@ -67,8 +67,6 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
rb_node);
const struct saved_value *b = entry;
- if (a->ctx != b->ctx)
- return a->ctx - b->ctx;
if (a->cpu != b->cpu)
return a->cpu - b->cpu;
if (a->evsel == b->evsel)
@@ -90,13 +88,12 @@ static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
}
static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
- int cpu, int ctx,
+ int cpu,
bool create)
{
struct rb_node *nd;
struct saved_value dm = {
.cpu = cpu,
- .ctx = ctx,
.evsel = evsel,
};
nd = rblist__find(&runtime_saved_values, &dm);
@@ -182,59 +179,60 @@ void perf_stat__reset_shadow_stats(void)
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
-void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
+void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
int cpu)
{
int ctx = evsel_context(counter);
+ count *= counter->scale;
+
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
- update_stats(&runtime_nsecs_stats[cpu], count[0]);
+ update_stats(&runtime_nsecs_stats[cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
- update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_transaction_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, ELISION_START))
- update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_elision_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
- update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_total_slots[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
- update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_slots_issued[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
- update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_slots_retired[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
- update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]);
+ update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
- update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
- update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
- update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_branches_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cacherefs_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
- update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_l1_dcache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
- update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
- update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_itlb_cache_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, SMI_NUM))
- update_stats(&runtime_smi_num_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_smi_num_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, APERF))
- update_stats(&runtime_aperf_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_aperf_stats[ctx][cpu], count);
if (counter->collect_stat) {
- struct saved_value *v = saved_value_lookup(counter, cpu, ctx,
- true);
- update_stats(&v->stats, count[0]);
+ struct saved_value *v = saved_value_lookup(counter, cpu, true);
+ update_stats(&v->stats, count);
}
}
@@ -628,15 +626,68 @@ static void print_smi_cost(int cpu, struct perf_evsel *evsel,
out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
}
+static void generic_metric(const char *metric_expr,
+ struct perf_evsel **metric_events,
+ char *name,
+ const char *metric_name,
+ double avg,
+ int cpu,
+ struct perf_stat_output_ctx *out)
+{
+ print_metric_t print_metric = out->print_metric;
+ struct parse_ctx pctx;
+ double ratio;
+ int i;
+ void *ctxp = out->ctx;
+
+ expr__ctx_init(&pctx);
+ expr__add_id(&pctx, name, avg);
+ for (i = 0; metric_events[i]; i++) {
+ struct saved_value *v;
+ struct stats *stats;
+ double scale;
+
+ if (!strcmp(metric_events[i]->name, "duration_time")) {
+ stats = &walltime_nsecs_stats;
+ scale = 1e-9;
+ } else {
+ v = saved_value_lookup(metric_events[i], cpu, false);
+ if (!v)
+ break;
+ stats = &v->stats;
+ scale = 1.0;
+ }
+ expr__add_id(&pctx, metric_events[i]->name, avg_stats(stats)*scale);
+ }
+ if (!metric_events[i]) {
+ const char *p = metric_expr;
+
+ if (expr__parse(&ratio, &pctx, &p) == 0)
+ print_metric(ctxp, NULL, "%8.1f",
+ metric_name ?
+ metric_name :
+ out->force_header ? name : "",
+ ratio);
+ else
+ print_metric(ctxp, NULL, NULL,
+ out->force_header ?
+ (metric_name ? metric_name : name) : "", 0);
+ } else
+ print_metric(ctxp, NULL, NULL, "", 0);
+}
+
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct rblist *metric_events)
{
void *ctxp = out->ctx;
print_metric_t print_metric = out->print_metric;
double total, ratio = 0.0, total2;
const char *color = NULL;
int ctx = evsel_context(evsel);
+ struct metric_event *me;
+ int num = 1;
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
@@ -820,33 +871,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
else
print_metric(ctxp, NULL, NULL, name, 0);
} else if (evsel->metric_expr) {
- struct parse_ctx pctx;
- int i;
-
- expr__ctx_init(&pctx);
- expr__add_id(&pctx, evsel->name, avg);
- for (i = 0; evsel->metric_events[i]; i++) {
- struct saved_value *v;
-
- v = saved_value_lookup(evsel->metric_events[i], cpu, ctx, false);
- if (!v)
- break;
- expr__add_id(&pctx, evsel->metric_events[i]->name,
- avg_stats(&v->stats));
- }
- if (!evsel->metric_events[i]) {
- const char *p = evsel->metric_expr;
-
- if (expr__parse(&ratio, &pctx, &p) == 0)
- print_metric(ctxp, NULL, "%8.1f",
- evsel->metric_name ?
- evsel->metric_name :
- out->force_header ? evsel->name : "",
- ratio);
- else
- print_metric(ctxp, NULL, NULL, "", 0);
- } else
- print_metric(ctxp, NULL, NULL, "", 0);
+ generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
+ evsel->metric_name, avg, cpu, out);
} else if (runtime_nsecs_stats[cpu].n != 0) {
char unit = 'M';
char unit_buf[10];
@@ -864,6 +890,20 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
print_smi_cost(cpu, evsel, out);
} else {
- print_metric(ctxp, NULL, NULL, NULL, 0);
+ num = 0;
}
+
+ if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
+ struct metric_expr *mexp;
+
+ list_for_each_entry (mexp, &me->head, nd) {
+ if (num++ > 0)
+ out->new_line(ctxp);
+ generic_metric(mexp->metric_expr, mexp->metric_events,
+ evsel->name, mexp->metric_name,
+ avg, cpu, out);
+ }
+ }
+ if (num == 0)
+ print_metric(ctxp, NULL, NULL, NULL, 0);
}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index c9bae5fb8b47..151e9efd7286 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -70,7 +70,7 @@ double rel_stddev_stats(double stddev, double avg)
bool __perf_evsel_stat__is(struct perf_evsel *evsel,
enum perf_stat_evsel_id id)
{
- struct perf_stat_evsel *ps = evsel->priv;
+ struct perf_stat_evsel *ps = evsel->stats;
return ps->id == id;
}
@@ -94,7 +94,7 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
void perf_stat_evsel_id_init(struct perf_evsel *evsel)
{
- struct perf_stat_evsel *ps = evsel->priv;
+ struct perf_stat_evsel *ps = evsel->stats;
int i;
/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
@@ -110,7 +110,7 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
{
int i;
- struct perf_stat_evsel *ps = evsel->priv;
+ struct perf_stat_evsel *ps = evsel->stats;
for (i = 0; i < 3; i++)
init_stats(&ps->res_stats[i]);
@@ -120,8 +120,8 @@ static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{
- evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
- if (evsel->priv == NULL)
+ evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
+ if (evsel->stats == NULL)
return -ENOMEM;
perf_evsel__reset_stat_priv(evsel);
return 0;
@@ -129,11 +129,11 @@ static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
{
- struct perf_stat_evsel *ps = evsel->priv;
+ struct perf_stat_evsel *ps = evsel->stats;
if (ps)
free(ps->group_data);
- zfree(&evsel->priv);
+ zfree(&evsel->stats);
}
static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
@@ -278,7 +278,9 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
perf_evsel__compute_deltas(evsel, cpu, thread, count);
perf_counts_values__scale(count, config->scale, NULL);
if (config->aggr_mode == AGGR_NONE)
- perf_stat__update_shadow_stats(evsel, count->values, cpu);
+ perf_stat__update_shadow_stats(evsel, count->val, cpu);
+ if (config->aggr_mode == AGGR_THREAD)
+ perf_stat__update_shadow_stats(evsel, count->val, 0);
break;
case AGGR_GLOBAL:
aggr->val += count->val;
@@ -319,9 +321,8 @@ int perf_stat_process_counter(struct perf_stat_config *config,
struct perf_evsel *counter)
{
struct perf_counts_values *aggr = &counter->counts->aggr;
- struct perf_stat_evsel *ps = counter->priv;
+ struct perf_stat_evsel *ps = counter->stats;
u64 *count = counter->counts->aggr.values;
- u64 val;
int i, ret;
aggr->val = aggr->ena = aggr->run = 0;
@@ -361,8 +362,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
/*
* Save the full runtime - to allow normalization during printout:
*/
- val = counter->scale * *count;
- perf_stat__update_shadow_stats(counter, &val, 0);
+ perf_stat__update_shadow_stats(counter, *count, 0);
return 0;
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 96326b1f9443..eefca5c981fd 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -83,7 +83,7 @@ typedef void (*new_line_t )(void *ctx);
void perf_stat__init_shadow_stats(void);
void perf_stat__reset_shadow_stats(void);
-void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
+void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
int cpu);
struct perf_stat_output_ctx {
void *ctx;
@@ -92,9 +92,11 @@ struct perf_stat_output_ctx {
bool force_header;
};
+struct rblist;
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
- struct perf_stat_output_ctx *out);
+ struct perf_stat_output_ctx *out,
+ struct rblist *metric_events);
void perf_stat__collect_metric_expr(struct perf_evlist *);
int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 6492ef38b090..1b67a8639dfe 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -46,6 +46,7 @@ struct symbol_conf symbol_conf = {
.show_hist_headers = true,
.symfs = "",
.event_group = true,
+ .inline_name = true,
};
static enum dso_binary_type binary_type_symtab[] = {
@@ -227,7 +228,7 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
struct maps *maps = &mg->maps[type];
struct map *next, *curr;
- pthread_rwlock_wrlock(&maps->lock);
+ down_write(&maps->lock);
curr = maps__first(maps);
if (curr == NULL)
@@ -247,7 +248,7 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
curr->end = ~0ULL;
out_unlock:
- pthread_rwlock_unlock(&maps->lock);
+ up_write(&maps->lock);
}
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
@@ -1672,7 +1673,7 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
struct maps *maps = &mg->maps[type];
struct map *map;
- pthread_rwlock_rdlock(&maps->lock);
+ down_read(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
if (map->dso && strcmp(map->dso->short_name, name) == 0)
@@ -1682,7 +1683,7 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
map = NULL;
out_unlock:
- pthread_rwlock_unlock(&maps->lock);
+ up_read(&maps->lock);
return map;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 6352022593c6..a4f0075b4e5c 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -60,6 +60,7 @@ struct symbol {
u8 binding;
u8 idle:1;
u8 ignore:1;
+ u8 inlined:1;
u8 arch_sym;
char name[0];
};
@@ -209,6 +210,7 @@ struct addr_location {
struct thread *thread;
struct map *map;
struct symbol *sym;
+ const char *srcline;
u64 addr;
char level;
u8 filtered;
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 1dbcd3c8dee0..68b65b10579b 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -46,6 +46,8 @@ struct thread *thread__new(pid_t pid, pid_t tid)
thread->cpu = -1;
INIT_LIST_HEAD(&thread->namespaces_list);
INIT_LIST_HEAD(&thread->comm_list);
+ init_rwsem(&thread->namespaces_lock);
+ init_rwsem(&thread->comm_lock);
comm_str = malloc(32);
if (!comm_str)
@@ -84,18 +86,26 @@ void thread__delete(struct thread *thread)
map_groups__put(thread->mg);
thread->mg = NULL;
}
+ down_write(&thread->namespaces_lock);
list_for_each_entry_safe(namespaces, tmp_namespaces,
&thread->namespaces_list, list) {
list_del(&namespaces->list);
namespaces__free(namespaces);
}
+ up_write(&thread->namespaces_lock);
+
+ down_write(&thread->comm_lock);
list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
list_del(&comm->list);
comm__free(comm);
}
+ up_write(&thread->comm_lock);
+
unwind__finish_access(thread);
nsinfo__zput(thread->nsinfo);
+ exit_rwsem(&thread->namespaces_lock);
+ exit_rwsem(&thread->comm_lock);
free(thread);
}
@@ -126,8 +136,8 @@ struct namespaces *thread__namespaces(const struct thread *thread)
return list_first_entry(&thread->namespaces_list, struct namespaces, list);
}
-int thread__set_namespaces(struct thread *thread, u64 timestamp,
- struct namespaces_event *event)
+static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
+ struct namespaces_event *event)
{
struct namespaces *new, *curr = thread__namespaces(thread);
@@ -150,6 +160,17 @@ int thread__set_namespaces(struct thread *thread, u64 timestamp,
return 0;
}
+int thread__set_namespaces(struct thread *thread, u64 timestamp,
+ struct namespaces_event *event)
+{
+ int ret;
+
+ down_write(&thread->namespaces_lock);
+ ret = __thread__set_namespaces(thread, timestamp, event);
+ up_write(&thread->namespaces_lock);
+ return ret;
+}
+
struct comm *thread__comm(const struct thread *thread)
{
if (list_empty(&thread->comm_list))
@@ -171,8 +192,8 @@ struct comm *thread__exec_comm(const struct thread *thread)
return last;
}
-int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
- bool exec)
+static int ____thread__set_comm(struct thread *thread, const char *str,
+ u64 timestamp, bool exec)
{
struct comm *new, *curr = thread__comm(thread);
@@ -196,6 +217,17 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
return 0;
}
+int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
+ bool exec)
+{
+ int ret;
+
+ down_write(&thread->comm_lock);
+ ret = ____thread__set_comm(thread, str, timestamp, exec);
+ up_write(&thread->comm_lock);
+ return ret;
+}
+
int thread__set_comm_from_proc(struct thread *thread)
{
char path[64];
@@ -213,7 +245,7 @@ int thread__set_comm_from_proc(struct thread *thread)
return err;
}
-const char *thread__comm_str(const struct thread *thread)
+static const char *__thread__comm_str(const struct thread *thread)
{
const struct comm *comm = thread__comm(thread);
@@ -223,6 +255,17 @@ const char *thread__comm_str(const struct thread *thread)
return comm__str(comm);
}
+const char *thread__comm_str(const struct thread *thread)
+{
+ const char *str;
+
+ down_read((struct rw_semaphore *)&thread->comm_lock);
+ str = __thread__comm_str(thread);
+ up_read((struct rw_semaphore *)&thread->comm_lock);
+
+ return str;
+}
+
/* CHECKME: it should probably better return the max comm len from its comm list */
int thread__comm_len(struct thread *thread)
{
@@ -265,7 +308,7 @@ static int __thread__prepare_access(struct thread *thread)
struct maps *maps = &thread->mg->maps[i];
struct map *map;
- pthread_rwlock_rdlock(&maps->lock);
+ down_read(&maps->lock);
for (map = maps__first(maps); map; map = map__next(map)) {
err = unwind__prepare_access(thread, map, &initialized);
@@ -273,7 +316,7 @@ static int __thread__prepare_access(struct thread *thread)
break;
}
- pthread_rwlock_unlock(&maps->lock);
+ up_read(&maps->lock);
}
return err;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index fdcea7c0cac1..40cfa36c022a 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -10,6 +10,7 @@
#include "symbol.h"
#include <strlist.h>
#include <intlist.h>
+#include "rwsem.h"
struct thread_stack;
struct unwind_libunwind_ops;
@@ -30,7 +31,9 @@ struct thread {
int comm_len;
bool dead; /* if set thread has exited */
struct list_head namespaces_list;
+ struct rw_semaphore namespaces_lock;
struct list_head comm_list;
+ struct rw_semaphore comm_lock;
u64 db_id;
void *priv;
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 506150a75bd0..9892323cdd7c 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -38,6 +38,7 @@ struct perf_top {
int sym_pcnt_filter;
const char *sym_filter;
float min_percent;
+ unsigned int nr_threads_synthesize;
};
#define CONSOLE_CLEAR ""
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index e7d60d05596d..d7f2113462fb 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -28,7 +28,6 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
-#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 8a9a677f7576..40b425949aa3 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -27,7 +27,6 @@
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/mman.h>
-#include <pthread.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 3687b720327a..a789f952b3e9 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -7,6 +7,7 @@
#include <sys/stat.h>
#include <sys/utsname.h>
#include <dirent.h>
+#include <fcntl.h>
#include <inttypes.h>
#include <signal.h>
#include <stdio.h>
@@ -23,6 +24,19 @@
/*
* XXX We need to find a better place for these things...
*/
+
+bool perf_singlethreaded = true;
+
+void perf_set_singlethreaded(void)
+{
+ perf_singlethreaded = true;
+}
+
+void perf_set_multithreaded(void)
+{
+ perf_singlethreaded = false;
+}
+
unsigned int page_size;
int cacheline_size;
@@ -175,7 +189,7 @@ out:
return err;
}
-int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
+static int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
{
void *ptr;
loff_t pgoff;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index b52765e6d7b4..01434509c2e9 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -6,7 +6,6 @@
/* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */
#define _DEFAULT_SOURCE 1
-#include <fcntl.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdlib.h>
@@ -36,7 +35,6 @@ bool lsdir_no_dot_filter(const char *name, struct dirent *d);
int copyfile(const char *from, const char *to);
int copyfile_mode(const char *from, const char *to, mode_t mode);
int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi);
-int copyfile_offset(int fromfd, loff_t from_ofs, int tofd, loff_t to_ofs, u64 size);
ssize_t readn(int fd, void *buf, size_t n);
ssize_t writen(int fd, const void *buf, size_t n);
@@ -65,4 +63,9 @@ int sched_getcpu(void);
int setns(int fd, int nstype);
#endif
+extern bool perf_singlethreaded;
+
+void perf_set_singlethreaded(void);
+void perf_set_multithreaded(void);
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index cffcda448c28..0acb1ec0e2f0 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -320,7 +320,7 @@ struct dso *machine__findnew_vdso(struct machine *machine,
struct vdso_info *vdso_info;
struct dso *dso = NULL;
- pthread_rwlock_wrlock(&machine->dsos.lock);
+ down_write(&machine->dsos.lock);
if (!machine->vdso_info)
machine->vdso_info = vdso_info__new();
@@ -348,7 +348,7 @@ struct dso *machine__findnew_vdso(struct machine *machine,
out_unlock:
dso__get(dso);
- pthread_rwlock_unlock(&machine->dsos.lock);
+ up_write(&machine->dsos.lock);
return dso;
}
diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c
index 008fe68d7b76..a725b958cf31 100644
--- a/tools/perf/util/zlib.c
+++ b/tools/perf/util/zlib.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
index f7c7af1f9258..b436f8675f6a 100644
--- a/tools/power/acpi/tools/acpidump/Makefile
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -39,6 +39,7 @@ TOOL_OBJS = \
utnonansi.o\
utprint.o\
utstring.o\
+ utstrsuppt.o\
utstrtoul64.o\
utxferror.o\
oslinuxtbl.o\
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 60df1fbd4a77..0634449156d8 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -287,8 +287,7 @@ int ap_dump_table_by_address(char *ascii_address)
/* Convert argument to an integer physical address */
- status = acpi_ut_strtoul64(ascii_address, ACPI_STRTOUL_64BIT,
- &long_address);
+ status = acpi_ut_strtoul64(ascii_address, &long_address);
if (ACPI_FAILURE(status)) {
fprintf(stderr, "%s: Could not convert to a physical address\n",
ascii_address);
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 943b6b614683..22c3b4ee1617 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -208,9 +208,7 @@ static int ap_do_options(int argc, char **argv)
case 'r': /* Dump tables from specified RSDP */
status =
- acpi_ut_strtoul64(acpi_gbl_optarg,
- ACPI_STRTOUL_64BIT,
- &gbl_rsdp_base);
+ acpi_ut_strtoul64(acpi_gbl_optarg, &gbl_rsdp_base);
if (ACPI_FAILURE(status)) {
fprintf(stderr,
"%s: Could not convert to a physical address\n",
diff --git a/tools/power/cpupower/.gitignore b/tools/power/cpupower/.gitignore
index d42073f12609..1f9977cc609c 100644
--- a/tools/power/cpupower/.gitignore
+++ b/tools/power/cpupower/.gitignore
@@ -1,7 +1,6 @@
.libs
libcpupower.so
-libcpupower.so.0
-libcpupower.so.0.0.0
+libcpupower.so.*
build/ccdv
cpufreq-info
cpufreq-set
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index d6e1c02ddcfe..da205d1fa03c 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -30,6 +30,8 @@ OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
+include ../../scripts/Makefile.arch
+
# --- CONFIGURATION BEGIN ---
# Set the following to `true' to make a unstripped, unoptimized
@@ -79,7 +81,11 @@ bindir ?= /usr/bin
sbindir ?= /usr/sbin
mandir ?= /usr/man
includedir ?= /usr/include
+ifeq ($(IS_64_BIT), 1)
+libdir ?= /usr/lib64
+else
libdir ?= /usr/lib
+endif
localedir ?= /usr/share/locale
docdir ?= /usr/share/doc/packages/cpupower
confdir ?= /etc/
diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
index 3e701f0e9c14..df43cd45d810 100644
--- a/tools/power/cpupower/utils/cpufreq-info.c
+++ b/tools/power/cpupower/utils/cpufreq-info.c
@@ -93,8 +93,6 @@ static void print_speed(unsigned long speed)
if (speed > 1000000)
printf("%u.%06u GHz", ((unsigned int) speed/1000000),
((unsigned int) speed%1000000));
- else if (speed > 100000)
- printf("%u MHz", (unsigned int) speed);
else if (speed > 1000)
printf("%u.%03u MHz", ((unsigned int) speed/1000),
(unsigned int) (speed%1000));
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 06c71178d07d..59245b3d587c 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -618,7 +618,7 @@ static void multiorder_account(void)
__radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
__radix_tree_lookup(&tree, 1 << 5, &node, &slot);
assert(node->count == node->exceptional * 2);
- __radix_tree_replace(&tree, node, slot, NULL, NULL, NULL);
+ __radix_tree_replace(&tree, node, slot, NULL, NULL);
assert(node->exceptional == 0);
item_kill_tree(&tree);
diff --git a/tools/testing/scatterlist/Makefile b/tools/testing/scatterlist/Makefile
new file mode 100644
index 000000000000..933c3a6e4d77
--- /dev/null
+++ b/tools/testing/scatterlist/Makefile
@@ -0,0 +1,30 @@
+CFLAGS += -I. -I../../include -g -O2 -Wall -fsanitize=address
+LDFLAGS += -fsanitize=address -fsanitize=undefined
+TARGETS = main
+OFILES = main.o scatterlist.o
+
+ifeq ($(BUILD), 32)
+ CFLAGS += -m32
+ LDFLAGS += -m32
+endif
+
+targets: include $(TARGETS)
+
+main: $(OFILES)
+
+clean:
+ $(RM) $(TARGETS) $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h asm/io.h
+ @rmdir asm
+
+scatterlist.c: ../../../lib/scatterlist.c
+ @sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
+
+.PHONY: include
+
+include: ../../../include/linux/scatterlist.h
+ @mkdir -p linux
+ @mkdir -p asm
+ @touch asm/io.h
+ @touch linux/highmem.h
+ @touch linux/kmemleak.h
+ @cp $< linux/scatterlist.h
diff --git a/tools/testing/scatterlist/linux/mm.h b/tools/testing/scatterlist/linux/mm.h
new file mode 100644
index 000000000000..6f9ac14aa800
--- /dev/null
+++ b/tools/testing/scatterlist/linux/mm.h
@@ -0,0 +1,125 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+
+typedef unsigned long dma_addr_t;
+
+#define unlikely
+
+#define BUG_ON(x) assert(!(x))
+
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ assert(0); \
+ unlikely(__ret_warn_on); \
+})
+
+#define PAGE_SIZE (4096)
+#define PAGE_SHIFT (12)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+
+#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+#define virt_to_page(x) ((void *)x)
+#define page_address(x) ((void *)x)
+
+static inline unsigned long page_to_phys(struct page *page)
+{
+ assert(0);
+
+ return 0;
+}
+
+#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
+#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
+#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+
+#define __min(t1, t2, min1, min2, x, y) ({ \
+ t1 min1 = (x); \
+ t2 min2 = (y); \
+ (void) (&min1 == &min2); \
+ min1 < min2 ? min1 : min2; })
+
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#define min(x, y) \
+ __min(typeof(x), typeof(y), \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
+
+#define min_t(type, x, y) \
+ __min(type, type, \
+ __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
+ x, y)
+
+#define preemptible() (1)
+
+static inline void *kmap(struct page *page)
+{
+ assert(0);
+
+ return NULL;
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ assert(0);
+
+ return NULL;
+}
+
+static inline void kunmap(void *addr)
+{
+ assert(0);
+}
+
+static inline void kunmap_atomic(void *addr)
+{
+ assert(0);
+}
+
+static inline unsigned long __get_free_page(unsigned int flags)
+{
+ return (unsigned long)malloc(PAGE_SIZE);
+}
+
+static inline void free_page(unsigned long page)
+{
+ free((void *)page);
+}
+
+static inline void *kmalloc(unsigned int size, unsigned int flags)
+{
+ return malloc(size);
+}
+
+#define kfree(x) free(x)
+
+#define kmemleak_alloc(a, b, c, d)
+#define kmemleak_free(a)
+
+#define PageSlab(p) (0)
+#define flush_kernel_dcache_page(p)
+
+#endif
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
new file mode 100644
index 000000000000..0a1464181226
--- /dev/null
+++ b/tools/testing/scatterlist/main.c
@@ -0,0 +1,79 @@
+#include <stdio.h>
+#include <assert.h>
+
+#include <linux/scatterlist.h>
+
+#define MAX_PAGES (64)
+
+static void set_pages(struct page **pages, const unsigned *array, unsigned num)
+{
+ unsigned int i;
+
+ assert(num < MAX_PAGES);
+ for (i = 0; i < num; i++)
+ pages[i] = (struct page *)(unsigned long)
+ ((1 + array[i]) * PAGE_SIZE);
+}
+
+#define pfn(...) (unsigned []){ __VA_ARGS__ }
+
+int main(void)
+{
+ const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT;
+ struct test {
+ int alloc_ret;
+ unsigned num_pages;
+ unsigned *pfn;
+ unsigned size;
+ unsigned int max_seg;
+ unsigned int expected_segments;
+ } *test, tests[] = {
+ { -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
+ { -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
+ { -EINVAL, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 },
+ { 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 },
+ { 0, 1, pfn(0), 1, sgmax, 1 },
+ { 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 },
+ { 0, 2, pfn(1, 0), 2 * PAGE_SIZE, sgmax, 2 },
+ { 0, 3, pfn(0, 1, 2), 3 * PAGE_SIZE, sgmax, 1 },
+ { 0, 3, pfn(0, 2, 1), 3 * PAGE_SIZE, sgmax, 3 },
+ { 0, 3, pfn(0, 1, 3), 3 * PAGE_SIZE, sgmax, 2 },
+ { 0, 3, pfn(1, 2, 4), 3 * PAGE_SIZE, sgmax, 2 },
+ { 0, 3, pfn(1, 3, 4), 3 * PAGE_SIZE, sgmax, 2 },
+ { 0, 4, pfn(0, 1, 3, 4), 4 * PAGE_SIZE, sgmax, 2 },
+ { 0, 5, pfn(0, 1, 3, 4, 5), 5 * PAGE_SIZE, sgmax, 2 },
+ { 0, 5, pfn(0, 1, 3, 4, 6), 5 * PAGE_SIZE, sgmax, 3 },
+ { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, sgmax, 1 },
+ { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
+ { 0, 6, pfn(0, 1, 2, 3, 4, 5), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
+ { 0, 6, pfn(0, 2, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 4 },
+ { 0, 6, pfn(0, 1, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
+ { 0, 0, NULL, 0, 0, 0 },
+ };
+ unsigned int i;
+
+ for (i = 0, test = tests; test->expected_segments; test++, i++) {
+ struct page *pages[MAX_PAGES];
+ struct sg_table st;
+ int ret;
+
+ set_pages(pages, test->pfn, test->num_pages);
+
+ ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages,
+ 0, test->size, test->max_seg,
+ GFP_KERNEL);
+ assert(ret == test->alloc_ret);
+
+ if (test->alloc_ret)
+ continue;
+
+ assert(st.nents == test->expected_segments);
+ assert(st.orig_nents == test->expected_segments);
+
+ sg_free_table(&st);
+ }
+
+ assert(i == (sizeof(tests) / sizeof(tests[0])) - 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index bb3c4ad8c59f..bf092b83e453 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -3579,7 +3579,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to packet: test19, cls helper fail range zero",
+ "helper access to packet: test19, cls helper range zero",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -3599,8 +3599,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .result = REJECT,
- .errstr = "invalid access to packet",
+ .result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
@@ -4379,10 +4378,10 @@ static struct bpf_test tests[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
@@ -4486,9 +4485,10 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
@@ -4622,13 +4622,14 @@ static struct bpf_test tests[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr = "R1 min value is outside of the array range",
+ .errstr = "R2 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -4765,13 +4766,14 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
offsetof(struct test_val, foo), 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr = "R1 min value is outside of the array range",
+ .errstr = "R2 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5350,7 +5352,7 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
- .errstr = "invalid stack type R1 off=-64 access_size=0",
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5505,7 +5507,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "invalid stack type R1 off=-64 access_size=0",
+ .errstr = "invalid indirect read from stack off -64+0 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@@ -5668,7 +5670,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size = 0 not allowed on != NULL",
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
@@ -5681,8 +5683,99 @@ static struct bpf_test tests[] = {
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
- .errstr = "invalid stack type R1 off=-8 access_size=0",
- .result = REJECT,
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_EMIT_CALL(BPF_FUNC_csum_diff),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h
index 18ea223bd398..cdb840bc54f2 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -39,7 +39,7 @@
#define rmb() asm volatile("lwsync":::"memory")
#define wmb() asm volatile("lwsync":::"memory")
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define READ_ONCE(x) (*(volatile typeof(x) *)&(x))
/* Prilvilege state DSCR access */
inline unsigned long get_dscr(void)
diff --git a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
index df17c3bab0a7..9e1a37e93b63 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
+++ b/tools/testing/selftests/powerpc/dscr/dscr_default_test.c
@@ -27,7 +27,7 @@ static void *do_test(void *in)
unsigned long d, cur_dscr, cur_dscr_usr;
unsigned long s1, s2;
- s1 = ACCESS_ONCE(sequence);
+ s1 = READ_ONCE(sequence);
if (s1 & 1)
continue;
rmb();
diff --git a/tools/testing/selftests/rcutorture/bin/config_override.sh b/tools/testing/selftests/rcutorture/bin/config_override.sh
index 49fa51726ce3..ef7fcbac3d42 100755
--- a/tools/testing/selftests/rcutorture/bin/config_override.sh
+++ b/tools/testing/selftests/rcutorture/bin/config_override.sh
@@ -42,7 +42,7 @@ else
exit 1
fi
-T=/tmp/config_override.sh.$$
+T=${TMPDIR-/tmp}/config_override.sh.$$
trap 'rm -rf $T' 0
mkdir $T
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
index 70fca318a82b..197deece7c7c 100755
--- a/tools/testing/selftests/rcutorture/bin/configcheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -19,7 +19,7 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-T=/tmp/abat-chk-config.sh.$$
+T=${TMPDIR-/tmp}/abat-chk-config.sh.$$
trap 'rm -rf $T' 0
mkdir $T
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 3f81a1095206..51f66a7ce876 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -32,7 +32,7 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-T=/tmp/configinit.sh.$$
+T=${TMPDIR-/tmp}/configinit.sh.$$
trap 'rm -rf $T' 0
mkdir $T
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 46752c164676..fb66d0173638 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -35,7 +35,7 @@ then
exit 1
fi
-T=/tmp/test-linux.sh.$$
+T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0
mkdir $T
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 0af36a721b9c..ab14b97c942c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -38,7 +38,7 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-T=/tmp/kvm-test-1-run.sh.$$
+T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
mkdir $T
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index b55895fb10ed..ccd49e958fd2 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -30,7 +30,7 @@
scriptname=$0
args="$*"
-T=/tmp/kvm.sh.$$
+T=${TMPDIR-/tmp}/kvm.sh.$$
trap 'rm -rf $T' 0
mkdir $T
@@ -222,7 +222,7 @@ do
exit 1
fi
done
-sort -k2nr $T/cfgcpu > $T/cfgcpu.sort
+sort -k2nr $T/cfgcpu -T="$T" > $T/cfgcpu.sort
# Use a greedy bin-packing algorithm, sorting the list accordingly.
awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh
index a6b57622c2e5..24fe5f822b28 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh
@@ -28,7 +28,7 @@
F=$1
title=$2
-T=/tmp/parse-build.sh.$$
+T=${TMPDIR-/tmp}/parse-build.sh.$$
trap 'rm -rf $T' 0
mkdir $T
diff --git a/tools/testing/selftests/rcutorture/bin/parse-torture.sh b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
index e3c5f0705696..f12c38909b00 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
@@ -27,7 +27,7 @@
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-T=/tmp/parse-torture.sh.$$
+T=${TMPDIR-/tmp}/parse-torture.sh.$$
file="$1"
title="$2"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
index be3fdd351937..3f95a768a03b 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
@@ -35,8 +35,7 @@
#define rs_smp_mb() do {} while (0)
#endif
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *) &(x))
-#define READ_ONCE(x) ACCESS_ONCE(x)
-#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
+#define READ_ONCE(x) (*(volatile typeof(x) *) &(x))
+#define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val))
#endif
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index d075ea0e5ca1..361466a2eaef 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -95,6 +95,27 @@ asm (
"int3\n\t"
"vmcode_int80:\n\t"
"int $0x80\n\t"
+ "vmcode_umip:\n\t"
+ /* addressing via displacements */
+ "smsw (2052)\n\t"
+ "sidt (2054)\n\t"
+ "sgdt (2060)\n\t"
+ /* addressing via registers */
+ "mov $2066, %bx\n\t"
+ "smsw (%bx)\n\t"
+ "mov $2068, %bx\n\t"
+ "sidt (%bx)\n\t"
+ "mov $2074, %bx\n\t"
+ "sgdt (%bx)\n\t"
+ /* register operands, only for smsw */
+ "smsw %ax\n\t"
+ "mov %ax, (2080)\n\t"
+ "int3\n\t"
+ "vmcode_umip_str:\n\t"
+ "str %eax\n\t"
+ "vmcode_umip_sldt:\n\t"
+ "sldt %eax\n\t"
+ "int3\n\t"
".size vmcode, . - vmcode\n\t"
"end_vmcode:\n\t"
".code32\n\t"
@@ -103,7 +124,8 @@ asm (
extern unsigned char vmcode[], end_vmcode[];
extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
- vmcode_sti[], vmcode_int3[], vmcode_int80[];
+ vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_umip[],
+ vmcode_umip_str[], vmcode_umip_sldt[];
/* Returns false if the test was skipped. */
static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
@@ -160,6 +182,68 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
return true;
}
+void do_umip_tests(struct vm86plus_struct *vm86, unsigned char *test_mem)
+{
+ struct table_desc {
+ unsigned short limit;
+ unsigned long base;
+ } __attribute__((packed));
+
+ /* Initialize variables with arbitrary values */
+ struct table_desc gdt1 = { .base = 0x3c3c3c3c, .limit = 0x9999 };
+ struct table_desc gdt2 = { .base = 0x1a1a1a1a, .limit = 0xaeae };
+ struct table_desc idt1 = { .base = 0x7b7b7b7b, .limit = 0xf1f1 };
+ struct table_desc idt2 = { .base = 0x89898989, .limit = 0x1313 };
+ unsigned short msw1 = 0x1414, msw2 = 0x2525, msw3 = 3737;
+
+ /* UMIP -- exit with INT3 unless kernel emulation did not trap #GP */
+ do_test(vm86, vmcode_umip - vmcode, VM86_TRAP, 3, "UMIP tests");
+
+ /* Results from displacement-only addressing */
+ msw1 = *(unsigned short *)(test_mem + 2052);
+ memcpy(&idt1, test_mem + 2054, sizeof(idt1));
+ memcpy(&gdt1, test_mem + 2060, sizeof(gdt1));
+
+ /* Results from register-indirect addressing */
+ msw2 = *(unsigned short *)(test_mem + 2066);
+ memcpy(&idt2, test_mem + 2068, sizeof(idt2));
+ memcpy(&gdt2, test_mem + 2074, sizeof(gdt2));
+
+ /* Results when using register operands */
+ msw3 = *(unsigned short *)(test_mem + 2080);
+
+ printf("[INFO]\tResult from SMSW:[0x%04x]\n", msw1);
+ printf("[INFO]\tResult from SIDT: limit[0x%04x]base[0x%08lx]\n",
+ idt1.limit, idt1.base);
+ printf("[INFO]\tResult from SGDT: limit[0x%04x]base[0x%08lx]\n",
+ gdt1.limit, gdt1.base);
+
+ if (msw1 != msw2 || msw1 != msw3)
+ printf("[FAIL]\tAll the results of SMSW should be the same.\n");
+ else
+ printf("[PASS]\tAll the results from SMSW are identical.\n");
+
+ if (memcmp(&gdt1, &gdt2, sizeof(gdt1)))
+ printf("[FAIL]\tAll the results of SGDT should be the same.\n");
+ else
+ printf("[PASS]\tAll the results from SGDT are identical.\n");
+
+ if (memcmp(&idt1, &idt2, sizeof(idt1)))
+ printf("[FAIL]\tAll the results of SIDT should be the same.\n");
+ else
+ printf("[PASS]\tAll the results from SIDT are identical.\n");
+
+ sethandler(SIGILL, sighandler, 0);
+ do_test(vm86, vmcode_umip_str - vmcode, VM86_SIGNAL, 0,
+ "STR instruction");
+ clearhandler(SIGILL);
+
+ sethandler(SIGILL, sighandler, 0);
+ do_test(vm86, vmcode_umip_sldt - vmcode, VM86_SIGNAL, 0,
+ "SLDT instruction");
+ clearhandler(SIGILL);
+}
+
int main(void)
{
struct vm86plus_struct v86;
@@ -218,6 +302,9 @@ int main(void)
v86.regs.eax = (unsigned int)-1;
do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80");
+ /* UMIP -- should exit with INTx 0x80 unless UMIP was not disabled */
+ do_umip_tests(&v86, addr);
+
/* Execute a null pointer */
v86.regs.cs = 0;
v86.regs.ss = 0;
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 961e3ee26c27..66e5ce5b91f0 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -115,7 +115,15 @@ static void check_valid_segment(uint16_t index, int ldt,
return;
}
- if (ar != expected_ar) {
+ /* The SDM says "bits 19:16 are undefined". Thanks. */
+ ar &= ~0xF0000;
+
+ /*
+ * NB: Different Linux versions do different things with the
+ * accessed bit in set_thread_area().
+ */
+ if (ar != expected_ar &&
+ (ldt || ar != (expected_ar | AR_ACCESSED))) {
printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
(ldt ? "LDT" : "GDT"), index, ar, expected_ar);
nerrs++;
@@ -129,30 +137,51 @@ static void check_valid_segment(uint16_t index, int ldt,
}
}
-static bool install_valid_mode(const struct user_desc *desc, uint32_t ar,
- bool oldmode)
+static bool install_valid_mode(const struct user_desc *d, uint32_t ar,
+ bool oldmode, bool ldt)
{
- int ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11,
- desc, sizeof(*desc));
- if (ret < -1)
- errno = -ret;
+ struct user_desc desc = *d;
+ int ret;
+
+ if (!ldt) {
+#ifndef __i386__
+ /* No point testing set_thread_area in a 64-bit build */
+ return false;
+#endif
+ if (!gdt_entry_num)
+ return false;
+ desc.entry_number = gdt_entry_num;
+
+ ret = syscall(SYS_set_thread_area, &desc);
+ } else {
+ ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11,
+ &desc, sizeof(desc));
+
+ if (ret < -1)
+ errno = -ret;
+
+ if (ret != 0 && errno == ENOSYS) {
+ printf("[OK]\tmodify_ldt returned -ENOSYS\n");
+ return false;
+ }
+ }
+
if (ret == 0) {
- uint32_t limit = desc->limit;
- if (desc->limit_in_pages)
+ uint32_t limit = desc.limit;
+ if (desc.limit_in_pages)
limit = (limit << 12) + 4095;
- check_valid_segment(desc->entry_number, 1, ar, limit, true);
+ check_valid_segment(desc.entry_number, ldt, ar, limit, true);
return true;
- } else if (errno == ENOSYS) {
- printf("[OK]\tmodify_ldt returned -ENOSYS\n");
- return false;
} else {
- if (desc->seg_32bit) {
- printf("[FAIL]\tUnexpected modify_ldt failure %d\n",
+ if (desc.seg_32bit) {
+ printf("[FAIL]\tUnexpected %s failure %d\n",
+ ldt ? "modify_ldt" : "set_thread_area",
errno);
nerrs++;
return false;
} else {
- printf("[OK]\tmodify_ldt rejected 16 bit segment\n");
+ printf("[OK]\t%s rejected 16 bit segment\n",
+ ldt ? "modify_ldt" : "set_thread_area");
return false;
}
}
@@ -160,7 +189,15 @@ static bool install_valid_mode(const struct user_desc *desc, uint32_t ar,
static bool install_valid(const struct user_desc *desc, uint32_t ar)
{
- return install_valid_mode(desc, ar, false);
+ bool ret = install_valid_mode(desc, ar, false, true);
+
+ if (desc->contents <= 1 && desc->seg_32bit &&
+ !desc->seg_not_present) {
+ /* Should work in the GDT, too. */
+ install_valid_mode(desc, ar, false, false);
+ }
+
+ return ret;
}
static void install_invalid(const struct user_desc *desc, bool oldmode)
@@ -367,9 +404,24 @@ static void do_simple_tests(void)
install_invalid(&desc, false);
desc.seg_not_present = 0;
- desc.read_exec_only = 0;
desc.seg_32bit = 1;
+ desc.read_exec_only = 0;
+ desc.limit = 0xfffff;
+
install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
+
+ desc.limit_in_pages = 1;
+
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G);
+ desc.read_exec_only = 1;
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G);
+ desc.contents = 1;
+ desc.read_exec_only = 0;
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+ desc.read_exec_only = 1;
+ install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G);
+
+ desc.limit = 0;
install_invalid(&desc, true);
}
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 555e43ca846b..7a1cc0e56d2d 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -189,17 +189,29 @@ void lots_o_noops_around_write(int *write_to_me)
#define u64 uint64_t
#ifdef __i386__
-#define SYS_mprotect_key 380
-#define SYS_pkey_alloc 381
-#define SYS_pkey_free 382
+
+#ifndef SYS_mprotect_key
+# define SYS_mprotect_key 380
+#endif
+#ifndef SYS_pkey_alloc
+# define SYS_pkey_alloc 381
+# define SYS_pkey_free 382
+#endif
#define REG_IP_IDX REG_EIP
#define si_pkey_offset 0x14
+
#else
-#define SYS_mprotect_key 329
-#define SYS_pkey_alloc 330
-#define SYS_pkey_free 331
+
+#ifndef SYS_mprotect_key
+# define SYS_mprotect_key 329
+#endif
+#ifndef SYS_pkey_alloc
+# define SYS_pkey_alloc 330
+# define SYS_pkey_free 331
+#endif
#define REG_IP_IDX REG_RIP
#define si_pkey_offset 0x20
+
#endif
void dump_mem(void *dumpme, int len_bytes)
diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am
index da3a430849a8..5961e9c18812 100644
--- a/tools/usb/usbip/Makefile.am
+++ b/tools/usb/usbip/Makefile.am
@@ -2,6 +2,7 @@
SUBDIRS := libsrc src
includedir = @includedir@/usbip
include_HEADERS := $(addprefix libsrc/, \
- usbip_common.h vhci_driver.h usbip_host_driver.h)
+ usbip_common.h vhci_driver.h usbip_host_driver.h \
+ list.h sysfs_utils.h usbip_host_common.h)
dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8)
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 90b0133004e1..5706e075adf2 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -110,11 +110,15 @@ static inline void busy_wait(void)
barrier();
}
+#if defined(__x86_64__) || defined(__i386__)
+#define smp_mb() asm volatile("lock; addl $0,-128(%%rsp)" ::: "memory", "cc")
+#else
/*
* Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
* with other __ATOMIC_SEQ_CST calls.
*/
#define smp_mb() __sync_synchronize()
+#endif
/*
* This abuses the atomic builtins for thread fences, and
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index b0b7ef6d0de1..f82c2eaa859d 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -84,6 +84,7 @@ int output_lines = -1;
int sort_loss;
int extended_totals;
int show_bytes;
+int unreclaim_only;
/* Debug options */
int sanity;
@@ -133,6 +134,7 @@ static void usage(void)
"-L|--Loss Sort by loss\n"
"-X|--Xtotals Show extended summary information\n"
"-B|--Bytes Show size in bytes\n"
+ "-U|--Unreclaim Show unreclaimable slabs only\n"
"\nValid debug options (FZPUT may be combined)\n"
"a / A Switch on all debug options (=FZUP)\n"
"- Switch off all debug options\n"
@@ -569,6 +571,9 @@ static void slabcache(struct slabinfo *s)
if (strcmp(s->name, "*") == 0)
return;
+ if (unreclaim_only && s->reclaim_account)
+ return;
+
if (actual_slabs == 1) {
report(s);
return;
@@ -1347,6 +1352,7 @@ struct option opts[] = {
{ "Loss", no_argument, NULL, 'L'},
{ "Xtotals", no_argument, NULL, 'X'},
{ "Bytes", no_argument, NULL, 'B'},
+ { "Unreclaim", no_argument, NULL, 'U'},
{ NULL, 0, NULL, 0 }
};
@@ -1358,7 +1364,7 @@ int main(int argc, char *argv[])
page_size = getpagesize();
- while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXB",
+ while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTSN:LXBU",
opts, NULL)) != -1)
switch (c) {
case '1':
@@ -1439,6 +1445,9 @@ int main(int argc, char *argv[])
case 'B':
show_bytes = 1;
break;
+ case 'U':
+ unreclaim_only = 1;
+ break;
default:
fatal("%s: Invalid option '%c'\n", argv[0], optopt);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 95cba0799828..4cf9b91e6c9b 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -652,6 +652,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
preempt_disable();
+ /* Flush FP/SIMD state that can't survive guest entry/exit */
+ kvm_fpsimd_flush_cpu_state();
+
kvm_pmu_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9deb5a245b83..ce507ae1d4f5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2302,7 +2302,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
} else if (pass && i > last_boosted_vcpu)
break;
- if (!ACCESS_ONCE(vcpu->preempted))
+ if (!READ_ONCE(vcpu->preempted))
continue;
if (vcpu == me)
continue;